summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format2
-rw-r--r--.mailmap3
-rw-r--r--Documentation/ABI/stable/sysfs-class-rfkill6
-rw-r--r--Documentation/ABI/testing/procfs-diskstats10
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats122
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-queues11
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu24
-rw-r--r--Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator27
-rw-r--r--Documentation/PCI/00-INDEX2
-rw-r--r--Documentation/PCI/acpi-info.txt187
-rw-r--r--Documentation/PCI/endpoint/function/binding/pci-test.txt2
-rw-r--r--Documentation/PCI/endpoint/pci-endpoint.txt4
-rw-r--r--Documentation/PCI/endpoint/pci-test-function.txt29
-rw-r--r--Documentation/PCI/endpoint/pci-test-howto.txt30
-rw-r--r--Documentation/PCI/pcieaer-howto.txt5
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html118
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html22
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg123
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg16
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg56
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg237
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg12
-rw-r--r--Documentation/RCU/stallwarn.txt24
-rw-r--r--Documentation/RCU/whatisRCU.txt18
-rw-r--r--Documentation/acpi/dsd/data-node-references.txt89
-rw-r--r--Documentation/acpi/dsd/graph.txt72
-rw-r--r--Documentation/admin-guide/README.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst92
-rw-r--r--Documentation/admin-guide/index.rst9
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt162
-rw-r--r--Documentation/admin-guide/l1tf.rst610
-rw-r--r--Documentation/block/null_blk.txt7
-rw-r--r--Documentation/block/stat.txt28
-rw-r--r--Documentation/bpf/bpf_devel_QA.rst21
-rw-r--r--Documentation/bpf/index.rst (renamed from Documentation/bpf/README.rst)10
-rw-r--r--Documentation/conf.py2
-rw-r--r--Documentation/console/console.txt15
-rw-r--r--Documentation/core-api/atomic_ops.rst2
-rw-r--r--Documentation/core-api/boot-time-mm.rst92
-rw-r--r--Documentation/core-api/idr.rst2
-rw-r--r--Documentation/core-api/index.rst2
-rw-r--r--Documentation/core-api/timekeeping.rst185
-rw-r--r--Documentation/crypto/api-samples.rst2
-rw-r--r--Documentation/dev-tools/kselftest.rst5
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-ir.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt48
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt (renamed from Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt)61
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt26
-rw-r--r--Documentation/devicetree/bindings/arm/omap/crossbar.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/pmu.txt3
-rw-r--r--Documentation/devicetree/bindings/ata/fsl-sata.txt1
-rw-r--r--Documentation/devicetree/bindings/ata/pata-arasan.txt2
-rw-r--r--Documentation/devicetree/bindings/board/fsl-board.txt1
-rw-r--r--Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/actions,owl-cmu.txt (renamed from Documentation/devicetree/bindings/clock/actions,s900-cmu.txt)20
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt56
-rw-r--r--Documentation/devicetree/bindings/clock/at91-clock.txt42
-rw-r--r--Documentation/devicetree/bindings/clock/maxim,max9485.txt59
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,dispcc.txt19
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt43
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt65
-rw-r--r--Documentation/devicetree/bindings/clock/sun8i-de2.txt1
-rw-r--r--Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/amd-ccp.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/arm-cryptocell.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec2.txt5
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec4.txt21
-rw-r--r--Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt67
-rw-r--r--Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt15
-rw-r--r--Documentation/devicetree/bindings/crypto/picochip-spacc.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,prng.txt (renamed from Documentation/devicetree/bindings/rng/qcom,prng.txt)4
-rw-r--r--Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt211
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix_dp.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/anx7814.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sii902x.txt4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sii9234.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sil-sii8620.txt2
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos7-decon.txt3
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_dp.txt2
-rw-r--r--Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt3
-rw-r--r--Documentation/devicetree/bindings/display/ht16k33.txt2
-rw-r--r--Documentation/devicetree/bindings/display/ilitek,ili9341.txt27
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt2
-rw-r--r--Documentation/devicetree/bindings/display/msm/dpu.txt131
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi.txt18
-rw-r--r--Documentation/devicetree/bindings/display/msm/edp.txt4
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp5.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt29
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt28
-rw-r--r--Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt13
-rw-r--r--Documentation/devicetree/bindings/display/panel/edt,et-series.txt39
-rw-r--r--Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt10
-rw-r--r--Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt10
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt20
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt24
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt20
-rw-r--r--Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt22
-rw-r--r--Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt (renamed from Documentation/devicetree/bindings/display/panel/edt,et057090dhu.txt)4
-rw-r--r--Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt12
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt1
-rw-r--r--Documentation/devicetree/bindings/display/sm501fb.txt2
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt64
-rw-r--r--Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/jz4780-dma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt1
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt2
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-sm5502.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/8xxx_gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt3
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt3
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-adnp.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-aspeed.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-ath79.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-davinci.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-max732x.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca953x.txt3
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-uniphier.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-xilinx.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-xlp.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-zynq.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt35
-rw-r--r--Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt4
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt32
-rw-r--r--Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt1
-rw-r--r--Documentation/devicetree/bindings/hsi/omap-ssi.txt1
-rw-r--r--Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt84
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-aspeed.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-jz4780.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mpc.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-pca-platform.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-pnx.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-pxa.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adxl345.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bma180.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/accel/mma8452.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/fsl,imx25-gcq.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/max1027-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/health/afe4403.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/health/afe4404.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/health/max30100.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/health/max30102.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/hts221.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/imu/bmi160.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/light/apds9300.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/light/apds9960.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/light/isl29018.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/light/opt3001.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/light/tsl2583.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/light/uvis25.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/bmc150_magn.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/bmp085.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/zpa2326.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/as3935.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/sx9500.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/sensorhub.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/tmp007.txt2
-rw-r--r--Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt1
-rw-r--r--Documentation/devicetree/bindings/input/cypress,cyapa.txt2
-rw-r--r--Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt2
-rw-r--r--Documentation/devicetree/bindings/input/e3x0-button.txt2
-rw-r--r--Documentation/devicetree/bindings/input/elan_i2c.txt2
-rw-r--r--Documentation/devicetree/bindings/input/elants_i2c.txt2
-rw-r--r--Documentation/devicetree/bindings/input/hid-over-i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/input/raydium_i2c_ts.txt1
-rw-r--r--Documentation/devicetree/bindings/input/rmi4/rmi_i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/input/rmi4/rmi_spi.txt1
-rw-r--r--Documentation/devicetree/bindings/input/ti,palmas-pwrbutton.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ad7879.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ads7846.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ar1021.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/exc3000.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/fsl-mx25-tcq.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/hideep.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/max11801-ts.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sx8654.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/zet6223.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/hisilicon,mbigen-v2.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mediatek,cirq.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mscc,ocelot-icpu-intr.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt6
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu2
-rw-r--r--Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt1
-rw-r--r--Documentation/devicetree/bindings/leds/common.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lm3692x.txt5
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lt3593.txt32
-rw-r--r--Documentation/devicetree/bindings/mailbox/altera-mailbox.txt1
-rw-r--r--Documentation/devicetree/bindings/mailbox/fsl,mu.txt54
-rw-r--r--Documentation/devicetree/bindings/mailbox/mtk-gce.txt57
-rw-r--r--Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt50
-rw-r--r--Documentation/devicetree/bindings/media/cec-gpio.txt22
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ak7375.txt8
-rw-r--r--Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt46
-rw-r--r--Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807.txt9
-rw-r--r--Documentation/devicetree/bindings/media/i2c/nokia,smia.txt3
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov2680.txt46
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov5640.txt5
-rw-r--r--Documentation/devicetree/bindings/media/i2c/tc358743.txt2
-rw-r--r--Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt11
-rw-r--r--Documentation/devicetree/bindings/media/qcom,camss.txt128
-rw-r--r--Documentation/devicetree/bindings/media/qcom,venus.txt1
-rw-r--r--Documentation/devicetree/bindings/media/rcar_vin.txt54
-rw-r--r--Documentation/devicetree/bindings/media/sh_mobile_ceu.txt1
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.txt6
-rw-r--r--Documentation/devicetree/bindings/mfd/ac100.txt4
-rw-r--r--Documentation/devicetree/bindings/mfd/altera-a10sr.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/arizona.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/axp20x.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/bd9571mwv.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/bfticu.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/da9055.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/da9062.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/da9063.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/da9150.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/max14577.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/max77686.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/max77693.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/max77802.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/max8998.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/motorola-cpcap.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/palmas.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/retu.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/rk808.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,sec-core.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/stmpe.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/tc3589x.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65086.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65912.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/twl-familly.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/twl6040.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/wm831x.txt1
-rw-r--r--Documentation/devicetree/bindings/mips/cavium/cib.txt2
-rw-r--r--Documentation/devicetree/bindings/misc/aspeed,cvic.txt35
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/denali-nand.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-nand.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt9
-rw-r--r--Documentation/devicetree/bindings/mtd/nand.txt6
-rw-r--r--Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt64
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt46
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt37
-rw-r--r--Documentation/devicetree/bindings/mtd/qcom_nandc.txt7
-rw-r--r--Documentation/devicetree/bindings/mtd/spear_smi.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/spi-nand.txt5
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe.txt2
-rw-r--r--Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt7
-rw-r--r--Documentation/devicetree/bindings/net/btusb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/can/holt_hi311x.txt1
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt1
-rw-r--r--Documentation/devicetree/bindings/net/can/xilinx_can.txt36
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt1
-rw-r--r--Documentation/devicetree/bindings/net/davicom-dm9000.txt1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt8
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/realtek-smi.txt153
-rw-r--r--Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt81
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fman.txt25
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt1
-rw-r--r--Documentation/devicetree/bindings/net/ibm,emac.txt1
-rw-r--r--Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt2
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-bluetooth.txt35
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt3
-rw-r--r--Documentation/devicetree/bindings/net/microchip,enc28j60.txt3
-rw-r--r--Documentation/devicetree/bindings/net/nfc/nxp-nci.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/pn544.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st21nfca.txt2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st95hf.txt3
-rw-r--r--Documentation/devicetree/bindings/net/nfc/trf7970a.txt1
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt2
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca7000.txt1
-rw-r--r--Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt29
-rw-r--r--Documentation/devicetree/bindings/net/ralink,rt2880-net.txt2
-rw-r--r--Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt2
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt2
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.txt1
-rw-r--r--Documentation/devicetree/bindings/net/samsung-sxgbe.txt2
-rw-r--r--Documentation/devicetree/bindings/net/sh_eth.txt2
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt2
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt7
-rw-r--r--Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt2
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt4
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie-msi.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/faraday,ftpci100.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/mobiveil-pcie.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/pci-keystone.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt3
-rw-r--r--Documentation/devicetree/bindings/phy/phy-ath79-usb.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt10
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt36
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt17
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt11
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt8
-rw-r--r--Documentation/devicetree/bindings/power/supply/act8945a-charger.txt2
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24257.txt2
-rw-r--r--Documentation/devicetree/bindings/power/supply/lp8727_charger.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max14656.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/rt9455_charger.txt2
-rw-r--r--Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/akebono.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/hsta.txt1
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt7
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/diu.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/dma.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/ecm.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mcm.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt5
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/pamu.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/nintendo/wii.txt1
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-qoriq.txt15
-rw-r--r--Documentation/devicetree/bindings/regulator/cpcap-regulator.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/max8997-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/palmas-pmic.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt86
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt160
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt8
-rw-r--r--Documentation/devicetree/bindings/regulator/uniphier-regulator.txt57
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt6
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt6
-rw-r--r--Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/isil,isl12057.txt3
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-cmos.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-ds1307.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-m41t80.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-omap.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-palmas.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/spear-rtc.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt1
-rw-r--r--Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt1
-rw-r--r--Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/maxim,max310x.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt3
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt137
-rw-r--r--Documentation/devicetree/bindings/sound/ac97-bus.txt32
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.txt124
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-tdm-iface.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-i2s.txt11
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-card.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l33.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l34.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l35.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l42.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/da7218.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/da7219.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/dioo,dio2125.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es7134.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es7241.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,ssi.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/marvell,pxa2xx-ac97.txt27
-rw-r--r--Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/name-prefix.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/omap-dmic.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcbsp.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcpdm.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8096.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6adm.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6afe.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6asm.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,sdm845.txt80
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd9335.txt123
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-i2s.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rt5514.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rt5682.txt50
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/simple-amplifier.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/tas571x.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/ts3a227e.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/ux500-msp.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/wm8994.txt7
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-spi.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/sh-hspi.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rspi.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-uniphier.txt22
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xlp.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt1
-rw-r--r--Documentation/devicetree/bindings/sram/sram.txt2
-rw-r--r--Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/thermal/armada-thermal.txt5
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt1
-rw-r--r--Documentation/devicetree/bindings/thermal/exynos-thermal.txt1
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.txt31
-rw-r--r--Documentation/devicetree/bindings/timer/altr,timer-1.0.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,gtm.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/marvell,orion-timer.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt34
-rw-r--r--Documentation/devicetree/bindings/timer/snps,arc-timer.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/st,spear-timer.txt2
-rw-r--r--Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt1
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-hisi.txt41
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt10
-rw-r--r--Documentation/devicetree/bindings/usb/fsl-usb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/maxim,max3421.txt3
-rw-r--r--Documentation/devicetree/bindings/usb/richtek,rt1711h.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/samsung-hsotg.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/spear-usb.txt4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt8
-rw-r--r--Documentation/devicetree/bindings/watchdog/cadence-wdt.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt1
-rw-r--r--Documentation/devicetree/bindings/xilinx.txt2
-rw-r--r--Documentation/devicetree/bindings/xillybus/xillybus.txt2
-rw-r--r--Documentation/devicetree/dynamic-resolution-notes.txt5
-rw-r--r--Documentation/doc-guide/kernel-doc.rst9
-rw-r--r--Documentation/doc-guide/parse-headers.rst4
-rw-r--r--Documentation/doc-guide/sphinx.rst2
-rw-r--r--Documentation/driver-api/device_link.rst12
-rw-r--r--Documentation/driver-api/dma-buf.rst6
-rw-r--r--Documentation/driver-api/mtdnand.rst4
-rw-r--r--Documentation/fb/fbcon.txt7
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt8
-rw-r--r--Documentation/filesystems/Locking8
-rw-r--r--Documentation/filesystems/cifs/CHANGES1072
-rw-r--r--Documentation/filesystems/cifs/README22
-rw-r--r--Documentation/filesystems/ext4/ext4.rst (renamed from Documentation/filesystems/ext4.txt)142
-rw-r--r--Documentation/filesystems/ext4/index.rst17
-rw-r--r--Documentation/filesystems/ext4/ondisk/about.rst44
-rw-r--r--Documentation/filesystems/ext4/ondisk/allocators.rst56
-rw-r--r--Documentation/filesystems/ext4/ondisk/attributes.rst191
-rw-r--r--Documentation/filesystems/ext4/ondisk/bigalloc.rst22
-rw-r--r--Documentation/filesystems/ext4/ondisk/bitmaps.rst28
-rw-r--r--Documentation/filesystems/ext4/ondisk/blockgroup.rst135
-rw-r--r--Documentation/filesystems/ext4/ondisk/blockmap.rst49
-rw-r--r--Documentation/filesystems/ext4/ondisk/blocks.rst142
-rw-r--r--Documentation/filesystems/ext4/ondisk/checksums.rst73
-rw-r--r--Documentation/filesystems/ext4/ondisk/directory.rst426
-rw-r--r--Documentation/filesystems/ext4/ondisk/dynamic.rst12
-rw-r--r--Documentation/filesystems/ext4/ondisk/eainode.rst18
-rw-r--r--Documentation/filesystems/ext4/ondisk/globals.rst13
-rw-r--r--Documentation/filesystems/ext4/ondisk/group_descr.rst170
-rw-r--r--Documentation/filesystems/ext4/ondisk/ifork.rst194
-rw-r--r--Documentation/filesystems/ext4/ondisk/index.rst9
-rw-r--r--Documentation/filesystems/ext4/ondisk/inlinedata.rst37
-rw-r--r--Documentation/filesystems/ext4/ondisk/inodes.rst575
-rw-r--r--Documentation/filesystems/ext4/ondisk/journal.rst611
-rw-r--r--Documentation/filesystems/ext4/ondisk/mmp.rst77
-rw-r--r--Documentation/filesystems/ext4/ondisk/overview.rst26
-rw-r--r--Documentation/filesystems/ext4/ondisk/special_inodes.rst38
-rw-r--r--Documentation/filesystems/ext4/ondisk/super.rst801
-rw-r--r--Documentation/filesystems/index.rst33
-rw-r--r--Documentation/filesystems/porting20
-rw-r--r--Documentation/filesystems/proc.txt3
-rw-r--r--Documentation/filesystems/relay.txt4
-rw-r--r--Documentation/filesystems/vfs.txt18
-rw-r--r--Documentation/filesystems/xfs.txt4
-rw-r--r--Documentation/gpu/amdgpu.rst129
-rw-r--r--Documentation/gpu/drivers.rst1
-rw-r--r--Documentation/gpu/drm-client.rst12
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst26
-rw-r--r--Documentation/gpu/drm-kms.rst28
-rw-r--r--Documentation/gpu/drm-mm.rst20
-rw-r--r--Documentation/gpu/index.rst1
-rw-r--r--Documentation/gpu/kms-properties.csv1
-rw-r--r--Documentation/gpu/msm-crash-dump.rst96
-rw-r--r--Documentation/gpu/v3d.rst28
-rw-r--r--Documentation/hwmon/max3444016
-rw-r--r--Documentation/hwmon/mlxreg-fan60
-rw-r--r--Documentation/hwmon/npcm750-pwm-fan22
-rw-r--r--Documentation/hwmon/sysfs-interface48
-rw-r--r--Documentation/index.rst28
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/iostats.txt15
-rw-r--r--Documentation/kbuild/kbuild.txt16
-rw-r--r--Documentation/kbuild/kconfig-language.txt4
-rw-r--r--Documentation/kbuild/makefiles.txt8
-rw-r--r--Documentation/kernel-hacking/hacking.rst2
-rw-r--r--Documentation/kernel-hacking/index.rst2
-rw-r--r--Documentation/kernel-hacking/locking.rst6
-rw-r--r--Documentation/kprobes.txt35
-rw-r--r--Documentation/locking/ww-mutex-design.txt65
-rw-r--r--Documentation/media/audio.h.rst.exceptions3
-rw-r--r--Documentation/media/media.h.rst.exceptions2
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst18
-rw-r--r--Documentation/media/uapi/dvb/audio-get-pts.rst65
-rw-r--r--Documentation/media/uapi/dvb/audio-set-attributes.rst67
-rw-r--r--Documentation/media/uapi/dvb/audio-set-ext-id.rst66
-rw-r--r--Documentation/media/uapi/dvb/audio-set-karaoke.rst66
-rw-r--r--Documentation/media/uapi/dvb/audio_data_types.rst37
-rw-r--r--Documentation/media/uapi/dvb/audio_function_calls.rst4
-rw-r--r--Documentation/media/uapi/dvb/video-get-frame-rate.rst61
-rw-r--r--Documentation/media/uapi/dvb/video-get-navi.rst84
-rw-r--r--Documentation/media/uapi/dvb/video-set-attributes.rst93
-rw-r--r--Documentation/media/uapi/dvb/video-set-highlight.rst86
-rw-r--r--Documentation/media/uapi/dvb/video-set-id.rst75
-rw-r--r--Documentation/media/uapi/dvb/video-set-spu-palette.rst82
-rw-r--r--Documentation/media/uapi/dvb/video-set-spu.rst85
-rw-r--r--Documentation/media/uapi/dvb/video-set-system.rst77
-rw-r--r--Documentation/media/uapi/dvb/video_function_calls.rst7
-rw-r--r--Documentation/media/uapi/dvb/video_types.rst131
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-device-info.rst48
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst92
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-enum-links.rst72
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-g-topology.rst240
-rw-r--r--Documentation/media/uapi/mediactl/media-types.rst515
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst48
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-compressed.rst7
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-rgb.rst1
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb14p.rst127
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y10p.rst33
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst87
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumstd.rst11
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-std.rst14
-rw-r--r--Documentation/media/uapi/v4l/vidioc-querystd.rst11
-rw-r--r--Documentation/media/uapi/v4l/yuv-formats.rst1
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss.rst93
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot104
-rw-r--r--Documentation/media/video.h.rst.exceptions3
-rw-r--r--Documentation/media/videodev2.h.rst.exceptions1
-rw-r--r--Documentation/memory-barriers.txt43
-rw-r--r--Documentation/misc-devices/pci-endpoint-test.txt6
-rw-r--r--Documentation/networking/00-INDEX4
-rw-r--r--Documentation/networking/alias.rst49
-rw-r--r--Documentation/networking/alias.txt40
-rw-r--r--Documentation/networking/bridge.rst (renamed from Documentation/networking/bridge.txt)6
-rw-r--r--Documentation/networking/can_ucan_protocol.rst332
-rw-r--r--Documentation/networking/index.rst6
-rw-r--r--Documentation/networking/ip-sysctl.txt34
-rw-r--r--Documentation/networking/net_failover.rst111
-rw-r--r--Documentation/networking/netdev-FAQ.rst259
-rw-r--r--Documentation/networking/netdev-FAQ.txt244
-rw-r--r--Documentation/networking/scaling.txt61
-rw-r--r--Documentation/networking/ti-cpsw.txt540
-rw-r--r--Documentation/nommu-mmap.txt2
-rw-r--r--Documentation/power/freezing-of-tasks.txt12
-rw-r--r--Documentation/power/suspend-and-cpuhotplug.txt6
-rw-r--r--Documentation/process/2.Process.rst2
-rw-r--r--Documentation/process/changes.rst27
-rw-r--r--Documentation/process/howto.rst2
-rw-r--r--Documentation/process/management-style.rst25
-rw-r--r--Documentation/process/stable-kernel-rules.rst2
-rw-r--r--Documentation/process/submitting-patches.rst1
-rw-r--r--Documentation/rfkill.txt18
-rw-r--r--Documentation/sound/alsa-configuration.rst2
-rwxr-xr-xDocumentation/sound/cards/multisound.sh1139
-rw-r--r--Documentation/sound/hd-audio/models.rst264
-rw-r--r--Documentation/sound/soc/dpcm.rst4
-rw-r--r--Documentation/sphinx/kerneldoc.py10
-rwxr-xr-xDocumentation/sphinx/parse-headers.pl2
-rw-r--r--Documentation/sysctl/vm.txt23
-rw-r--r--Documentation/timers/timekeeping.txt2
-rw-r--r--Documentation/trace/events-power.rst1
-rw-r--r--Documentation/trace/events.rst2
-rw-r--r--Documentation/trace/ftrace.rst4
-rw-r--r--Documentation/trace/histogram.rst (renamed from Documentation/trace/histogram.txt)1250
-rw-r--r--Documentation/trace/index.rst1
-rw-r--r--Documentation/trace/kprobetrace.rst9
-rw-r--r--Documentation/trace/uprobetracer.rst2
-rw-r--r--Documentation/translations/index.rst13
-rw-r--r--Documentation/translations/it_IT/disclaimer-ita.rst13
-rw-r--r--Documentation/translations/it_IT/doc-guide/index.rst24
-rw-r--r--Documentation/translations/it_IT/doc-guide/kernel-doc.rst554
-rw-r--r--Documentation/translations/it_IT/doc-guide/parse-headers.rst196
-rw-r--r--Documentation/translations/it_IT/doc-guide/sphinx.rst457
-rw-r--r--Documentation/translations/it_IT/index.rst118
-rw-r--r--Documentation/translations/it_IT/kernel-hacking/hacking.rst855
-rw-r--r--Documentation/translations/it_IT/kernel-hacking/index.rst16
-rw-r--r--Documentation/translations/it_IT/kernel-hacking/locking.rst1493
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt22
-rw-r--r--Documentation/translations/zh_CN/oops-tracing.txt4
-rw-r--r--Documentation/virtual/kvm/api.txt16
-rw-r--r--Documentation/x86/intel_rdt_ui.txt380
-rw-r--r--Documentation/x86/x86_64/boot-options.txt8
-rw-r--r--Kconfig22
-rw-r--r--MAINTAINERS210
-rw-r--r--Makefile68
-rw-r--r--arch/Kconfig162
-rw-r--r--arch/alpha/Kconfig23
-rw-r--r--arch/alpha/Kconfig.debug5
-rw-r--r--arch/alpha/boot/Makefile2
-rw-r--r--arch/alpha/include/asm/atomic.h64
-rw-r--r--arch/alpha/include/uapi/asm/socket.h3
-rw-r--r--arch/arc/Kconfig19
-rw-r--r--arch/arc/Kconfig.debug5
-rw-r--r--arch/arc/include/asm/atomic.h86
-rw-r--r--arch/arc/include/asm/kprobes.h2
-rw-r--r--arch/arc/kernel/kprobes.c50
-rw-r--r--arch/arm/Kconfig47
-rw-r--r--arch/arm/Kconfig.debug5
-rw-r--r--arch/arm/Makefile7
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dir-685.dts140
-rw-r--r--arch/arm/crypto/chacha20-neon-core.S10
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c1
-rw-r--r--arch/arm/crypto/sha1_glue.c1
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c1
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm/crypto/sha256_glue.c2
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c2
-rw-r--r--arch/arm/crypto/sha512-glue.c2
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c2
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/atomic.h55
-rw-r--r--arch/arm/include/asm/bitops.h92
-rw-r--r--arch/arm/include/asm/efi.h3
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h7
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/kprobes.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h14
-rw-r--r--arch/arm/include/asm/mach/arch.h2
-rw-r--r--arch/arm/include/asm/mach/time.h3
-rw-r--r--arch/arm/include/asm/probes.h1
-rw-r--r--arch/arm/include/asm/thread_info.h4
-rw-r--r--arch/arm/include/asm/tlb.h8
-rw-r--r--arch/arm/include/asm/uaccess.h26
-rw-r--r--arch/arm/kernel/entry-armv.S10
-rw-r--r--arch/arm/kernel/head-nommu.S12
-rw-r--r--arch/arm/kernel/hw_breakpoint.c78
-rw-r--r--arch/arm/kernel/irq.c10
-rw-r--r--arch/arm/kernel/perf_event_v6.c14
-rw-r--r--arch/arm/kernel/perf_event_v7.c15
-rw-r--r--arch/arm/kernel/perf_event_xscale.c18
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/signal.c58
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c8
-rw-r--r--arch/arm/kernel/time.c15
-rw-r--r--arch/arm/lib/copy_from_user.S9
-rw-r--r--arch/arm/mach-at91/Kconfig4
-rw-r--r--arch/arm/mach-at91/Makefile25
-rw-r--r--arch/arm/mach-exynos/suspend.c2
-rw-r--r--arch/arm/mach-pxa/balloon3.c1
-rw-r--r--arch/arm/mach-pxa/devices.c148
-rw-r--r--arch/arm/mach-pxa/devices.h6
-rw-r--r--arch/arm/mach-pxa/em-x270.c1
-rw-r--r--arch/arm/mach-pxa/pxa25x.c38
-rw-r--r--arch/arm/mach-pxa/pxa27x.c39
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c41
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/dma-mapping.c12
-rw-r--r--arch/arm/mm/nommu.c3
-rw-r--r--arch/arm/mm/tcm.h2
-rw-r--r--arch/arm/net/bpf_jit_32.c1062
-rw-r--r--arch/arm/net/bpf_jit_32.h42
-rw-r--r--arch/arm/plat-omap/counter_32k.c2
-rw-r--r--arch/arm/plat-pxa/ssp.c47
-rw-r--r--arch/arm/probes/kprobes/core.c139
-rw-r--r--arch/arm/probes/kprobes/test-core.c1
-rw-r--r--arch/arm/vfp/Makefile5
-rw-r--r--arch/arm/vfp/vfpmodule.c17
-rw-r--r--arch/arm64/Kconfig48
-rw-r--r--arch/arm64/Kconfig.debug5
-rw-r--r--arch/arm64/Makefile9
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi4
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi15
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi18
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07.dtsi284
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/crypto/aes-glue.c3
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S271
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c204
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c1
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha256-glue.c8
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c4
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha512-glue.c2
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h29
-rw-r--r--arch/arm64/include/asm/atomic.h47
-rw-r--r--arch/arm64/include/asm/barrier.h13
-rw-r--r--arch/arm64/include/asm/bitops.h21
-rw-r--r--arch/arm64/include/asm/cache.h4
-rw-r--r--arch/arm64/include/asm/cacheflush.h27
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/efi.h3
-rw-r--r--arch/arm64/include/asm/fpsimd.h17
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h7
-rw-r--r--arch/arm64/include/asm/insn.h2
-rw-r--r--arch/arm64/include/asm/irq.h2
-rw-r--r--arch/arm64/include/asm/kprobes.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h10
-rw-r--r--arch/arm64/include/asm/neon.h7
-rw-r--r--arch/arm64/include/asm/numa.h4
-rw-r--r--arch/arm64/include/asm/processor.h21
-rw-r--r--arch/arm64/include/asm/ptrace.h79
-rw-r--r--arch/arm64/include/asm/sdei.h9
-rw-r--r--arch/arm64/include/asm/spinlock.h117
-rw-r--r--arch/arm64/include/asm/spinlock_types.h17
-rw-r--r--arch/arm64/include/asm/stacktrace.h73
-rw-r--r--arch/arm64/include/asm/syscall.h8
-rw-r--r--arch/arm64/include/asm/syscall_wrapper.h80
-rw-r--r--arch/arm64/include/asm/sysreg.h30
-rw-r--r--arch/arm64/include/asm/tlbflush.h7
-rw-r--r--arch/arm64/include/asm/topology.h4
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h28
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/acpi.c11
-rw-r--r--arch/arm64/kernel/acpi_numa.c88
-rw-r--r--arch/arm64/kernel/alternative.c4
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c32
-rw-r--r--arch/arm64/kernel/cpu-reset.h9
-rw-r--r--arch/arm64/kernel/cpu_errata.c30
-rw-r--r--arch/arm64/kernel/cpufeature.c4
-rw-r--r--arch/arm64/kernel/entry.S160
-rw-r--r--arch/arm64/kernel/entry32.S121
-rw-r--r--arch/arm64/kernel/fpsimd.c19
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c86
-rw-r--r--arch/arm64/kernel/insn.c70
-rw-r--r--arch/arm64/kernel/irq.c10
-rw-r--r--arch/arm64/kernel/machine_kexec.c15
-rw-r--r--arch/arm64/kernel/perf_event.c281
-rw-r--r--arch/arm64/kernel/probes/kprobes.c88
-rw-r--r--arch/arm64/kernel/process.c42
-rw-r--r--arch/arm64/kernel/ptrace.c61
-rw-r--r--arch/arm64/kernel/sdei.c48
-rw-r--r--arch/arm64/kernel/setup.c38
-rw-r--r--arch/arm64/kernel/signal.c8
-rw-r--r--arch/arm64/kernel/signal32.c24
-rw-r--r--arch/arm64/kernel/smp.c44
-rw-r--r--arch/arm64/kernel/stacktrace.c2
-rw-r--r--arch/arm64/kernel/sys.c29
-rw-r--r--arch/arm64/kernel/sys32.c135
-rw-r--r--arch/arm64/kernel/syscall.c139
-rw-r--r--arch/arm64/kernel/topology.c58
-rw-r--r--arch/arm64/kernel/traps.c18
-rw-r--r--arch/arm64/kernel/vdso/note.S3
-rw-r--r--arch/arm64/kvm/guest.c14
-rw-r--r--arch/arm64/kvm/hyp/Makefile3
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c2
-rw-r--r--arch/arm64/kvm/regmap.c22
-rw-r--r--arch/arm64/kvm/reset.c4
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/bitops.S76
-rw-r--r--arch/arm64/mm/cache.S4
-rw-r--r--arch/arm64/mm/fault.c9
-rw-r--r--arch/arm64/mm/flush.c3
-rw-r--r--arch/arm64/mm/mmu.c48
-rw-r--r--arch/arm64/mm/numa.c29
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c13
-rw-r--r--arch/c6x/Kconfig41
-rw-r--r--arch/c6x/Kconfig.debug10
-rw-r--r--arch/h8300/Kconfig36
-rw-r--r--arch/h8300/Kconfig.debug1
-rw-r--r--arch/h8300/include/asm/atomic.h19
-rw-r--r--arch/hexagon/Kconfig21
-rw-r--r--arch/hexagon/Kconfig.debug1
-rw-r--r--arch/hexagon/include/asm/atomic.h18
-rw-r--r--arch/ia64/Kconfig25
-rw-r--r--arch/ia64/Kconfig.debug5
-rw-r--r--arch/ia64/include/asm/atomic.h81
-rw-r--r--arch/ia64/include/asm/io.h13
-rw-r--r--arch/ia64/include/asm/kprobes.h2
-rw-r--r--arch/ia64/include/uapi/asm/break.h1
-rw-r--r--arch/ia64/include/uapi/asm/socket.h3
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/jprobes.S90
-rw-r--r--arch/ia64/kernel/kprobes.c93
-rw-r--r--arch/m68k/Kconfig36
-rw-r--r--arch/m68k/Kconfig.debug5
-rw-r--r--arch/m68k/apollo/config.c8
-rw-r--r--arch/m68k/atari/config.c5
-rw-r--r--arch/m68k/atari/time.c63
-rw-r--r--arch/m68k/bvme6000/config.c45
-rw-r--r--arch/m68k/configs/amiga_defconfig32
-rw-r--r--arch/m68k/configs/apollo_defconfig30
-rw-r--r--arch/m68k/configs/atari_defconfig29
-rw-r--r--arch/m68k/configs/bvme6000_defconfig30
-rw-r--r--arch/m68k/configs/hp300_defconfig30
-rw-r--r--arch/m68k/configs/mac_defconfig30
-rw-r--r--arch/m68k/configs/multi_defconfig32
-rw-r--r--arch/m68k/configs/mvme147_defconfig30
-rw-r--r--arch/m68k/configs/mvme16x_defconfig30
-rw-r--r--arch/m68k/configs/q40_defconfig30
-rw-r--r--arch/m68k/configs/sun3_defconfig28
-rw-r--r--arch/m68k/configs/sun3x_defconfig30
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/atomic.h24
-rw-r--r--arch/m68k/include/asm/bitops.h14
-rw-r--r--arch/m68k/include/asm/dma-mapping.h12
-rw-r--r--arch/m68k/include/asm/io.h7
-rw-r--r--arch/m68k/include/asm/io_mm.h42
-rw-r--r--arch/m68k/include/asm/io_no.h12
-rw-r--r--arch/m68k/include/asm/kmap.h9
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/macintosh.h1
-rw-r--r--arch/m68k/include/asm/page_no.h2
-rw-r--r--arch/m68k/kernel/dma.c68
-rw-r--r--arch/m68k/kernel/setup_mm.c15
-rw-r--r--arch/m68k/kernel/setup_no.c21
-rw-r--r--arch/m68k/mac/config.c21
-rw-r--r--arch/m68k/mac/misc.c80
-rw-r--r--arch/m68k/mm/init.c1
-rw-r--r--arch/m68k/mm/mcfmmu.c13
-rw-r--r--arch/m68k/mm/motorola.c35
-rw-r--r--arch/m68k/mvme147/config.c7
-rw-r--r--arch/m68k/mvme16x/config.c8
-rw-r--r--arch/m68k/q40/config.c30
-rw-r--r--arch/m68k/sun3/config.c4
-rw-r--r--arch/microblaze/Kconfig32
-rw-r--r--arch/microblaze/Kconfig.debug6
-rw-r--r--arch/mips/Kconfig82
-rw-r--r--arch/mips/Kconfig.debug5
-rw-r--r--arch/mips/Makefile22
-rw-r--r--arch/mips/alchemy/board-gpr.c3
-rw-r--r--arch/mips/alchemy/board-mtx1.c3
-rw-r--r--arch/mips/alchemy/board-xxs1500.c3
-rw-r--r--arch/mips/alchemy/devboards/platform.c3
-rw-r--r--arch/mips/ar7/clock.c29
-rw-r--r--arch/mips/ar7/prom.c4
-rw-r--r--arch/mips/ath25/Kconfig1
-rw-r--r--arch/mips/ath25/board.c6
-rw-r--r--arch/mips/ath25/early_printk.c5
-rw-r--r--arch/mips/ath79/clock.c193
-rw-r--r--arch/mips/ath79/common.c8
-rw-r--r--arch/mips/ath79/early_printk.c64
-rw-r--r--arch/mips/ath79/setup.c35
-rw-r--r--arch/mips/bcm63xx/early_printk.c1
-rw-r--r--arch/mips/bmips/dma.c32
-rw-r--r--arch/mips/bmips/setup.c7
-rw-r--r--arch/mips/boot/Makefile52
-rw-r--r--arch/mips/boot/compressed/uart-prom.c3
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi19
-rw-r--r--arch/mips/boot/dts/mscc/Makefile2
-rw-r--r--arch/mips/boot/dts/mscc/ocelot.dtsi32
-rw-r--r--arch/mips/boot/dts/mscc/ocelot_pcb123.dts10
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts3
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dpt_module.dts5
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts5
-rw-r--r--arch/mips/boot/dts/qca/ar9331_omega.dts5
-rw-r--r--arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts5
-rw-r--r--arch/mips/boot/ecoff.h61
-rw-r--r--arch/mips/boot/elf2ecoff.c31
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-md5.c1
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha1.c1
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha256.c2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha512.c2
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c191
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c5
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c7
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-spi.c8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c7
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c4
-rw-r--r--arch/mips/cavium-octeon/setup.c8
-rw-r--r--arch/mips/configs/bcm47xx_defconfig1
-rw-r--r--arch/mips/configs/ci20_defconfig2
-rw-r--r--arch/mips/configs/generic_defconfig3
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig1
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig1
-rw-r--r--arch/mips/configs/maltaaprp_defconfig1
-rw-r--r--arch/mips/configs/maltasmvp_defconfig1
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig1
-rw-r--r--arch/mips/configs/maltaup_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig1
-rw-r--r--arch/mips/fw/arc/arc_con.c1
-rw-r--r--arch/mips/fw/arc/promlib.c1
-rw-r--r--arch/mips/fw/sni/sniprom.c1
-rw-r--r--arch/mips/generic/Kconfig12
-rw-r--r--arch/mips/generic/Platform1
-rw-r--r--arch/mips/generic/board-ocelot_pcb123.its.S23
-rw-r--r--arch/mips/generic/init.c14
-rw-r--r--arch/mips/generic/yamon-dt.c4
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/atomic.h367
-rw-r--r--arch/mips/include/asm/bmips.h16
-rw-r--r--arch/mips/include/asm/cpu-features.h176
-rw-r--r--arch/mips/include/asm/cpu.h51
-rw-r--r--arch/mips/include/asm/dma-coherence.h6
-rw-r--r--arch/mips/include/asm/dma-direct.h17
-rw-r--r--arch/mips/include/asm/dma-mapping.h20
-rw-r--r--arch/mips/include/asm/io.h40
-rw-r--r--arch/mips/include/asm/kprobes.h13
-rw-r--r--arch/mips/include/asm/mach-ar7/spaces.h3
-rw-r--r--arch/mips/include/asm/mach-ath25/dma-coherence.h76
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h771
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h34
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h6
-rw-r--r--arch/mips/include/asm/mach-bmips/dma-coherence.h54
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h79
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h73
-rw-r--r--arch/mips/include/asm/mach-generic/kmalloc.h3
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h10
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h70
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h92
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h60
-rw-r--r--arch/mips/include/asm/mach-loongson64/dma-coherence.h93
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h24
-rw-r--r--arch/mips/include/asm/mach-pic32/spaces.h1
-rw-r--r--arch/mips/include/asm/mipsregs.h29
-rw-r--r--arch/mips/include/asm/mmu_context.h2
-rw-r--r--arch/mips/include/asm/netlogic/xlr/fmn.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-asxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu-defs.h10062
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gmxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-stxx-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/octeon.h9
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h3
-rw-r--r--arch/mips/include/asm/page.h11
-rw-r--r--arch/mips/include/asm/processor.h15
-rw-r--r--arch/mips/include/asm/setup.h2
-rw-r--r--arch/mips/include/asm/sgialib.h1
-rw-r--r--arch/mips/include/asm/sim.h12
-rw-r--r--arch/mips/include/asm/smp.h12
-rw-r--r--arch/mips/include/asm/txx9/generic.h1
-rw-r--r--arch/mips/include/asm/txx9/tx4939.h29
-rw-r--r--arch/mips/include/uapi/asm/socket.h3
-rw-r--r--arch/mips/jazz/jazzdma.c141
-rw-r--r--arch/mips/jazz/setup.c17
-rw-r--r--arch/mips/jz4740/Platform2
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c3
-rw-r--r--arch/mips/kernel/cpu-probe.c3
-rw-r--r--arch/mips/kernel/early_printk.c2
-rw-r--r--arch/mips/kernel/early_printk_8250.c1
-rw-r--r--arch/mips/kernel/genex.S46
-rw-r--r--arch/mips/kernel/idle.c12
-rw-r--r--arch/mips/kernel/kprobes.c70
-rw-r--r--arch/mips/kernel/linux32.c29
-rw-r--r--arch/mips/kernel/process.c80
-rw-r--r--arch/mips/kernel/ptrace.c254
-rw-r--r--arch/mips/kernel/ptrace32.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S4
-rw-r--r--arch/mips/kernel/setup.c38
-rw-r--r--arch/mips/kernel/signal.c24
-rw-r--r--arch/mips/kernel/signal_n32.c12
-rw-r--r--arch/mips/kernel/signal_o32.c24
-rw-r--r--arch/mips/kernel/traps.c7
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/mips/lantiq/early_printk.c1
-rw-r--r--arch/mips/lantiq/prom.c8
-rw-r--r--arch/mips/lantiq/xway/dma.c3
-rw-r--r--arch/mips/lasat/prom.c1
-rw-r--r--arch/mips/lib/memset.S21
-rw-r--r--arch/mips/loongson32/Platform8
-rw-r--r--arch/mips/loongson64/Kconfig5
-rw-r--r--arch/mips/loongson64/common/Makefile6
-rw-r--r--arch/mips/loongson64/common/cs5536/cs5536_ohci.c2
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c109
-rw-r--r--arch/mips/loongson64/common/dma.c18
-rw-r--r--arch/mips/loongson64/common/early_printk.c1
-rw-r--r--arch/mips/loongson64/common/env.c3
-rw-r--r--arch/mips/loongson64/loongson-3/Makefile2
-rw-r--r--arch/mips/loongson64/loongson-3/dma.c25
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c3
-rw-r--r--arch/mips/mm/Makefile3
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mm/cache.c4
-rw-r--r--arch/mips/mm/dma-default.c404
-rw-r--r--arch/mips/mm/dma-noncoherent.c208
-rw-r--r--arch/mips/mm/page.c15
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/mm/uasm-micromips.c1
-rw-r--r--arch/mips/mm/uasm-mips.c1
-rw-r--r--arch/mips/mti-malta/Makefile2
-rw-r--r--arch/mips/mti-malta/malta-pm.c96
-rw-r--r--arch/mips/mti-malta/malta-reset.c30
-rw-r--r--arch/mips/mti-malta/malta-setup.c39
-rw-r--r--arch/mips/netlogic/common/earlycons.c1
-rw-r--r--arch/mips/netlogic/xlp/dt.c14
-rw-r--r--arch/mips/paravirt/serial.c5
-rw-r--r--arch/mips/pci/ops-bridge.c4
-rw-r--r--arch/mips/pci/pci-ar2315.c24
-rw-r--r--arch/mips/pci/pci-ar724x.c42
-rw-r--r--arch/mips/pci/pci-ip27.c25
-rw-r--r--arch/mips/pci/pci-octeon.c4
-rw-r--r--arch/mips/pci/pcie-octeon.c6
-rw-r--r--arch/mips/pic32/pic32mzda/early_console.c5
-rw-r--r--arch/mips/ralink/early_printk.c7
-rw-r--r--arch/mips/sgi-ip27/ip27-console.c1
-rw-r--r--arch/mips/sgi-ip32/Makefile2
-rw-r--r--arch/mips/sgi-ip32/ip32-dma.c37
-rw-r--r--arch/mips/sibyte/Kconfig1
-rw-r--r--arch/mips/sibyte/common/cfe.c1
-rw-r--r--arch/mips/txx9/generic/setup.c3
-rw-r--r--arch/mips/txx9/generic/setup_tx4938.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c2
-rw-r--r--arch/mips/vdso/Makefile9
-rw-r--r--arch/mips/vdso/genvdso.h51
-rw-r--r--arch/mips/vr41xx/common/pmu.c3
-rw-r--r--arch/nds32/Kconfig21
-rw-r--r--arch/nds32/Kconfig.debug1
-rw-r--r--arch/nios2/Kconfig35
-rw-r--r--arch/nios2/Kconfig.debug5
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/dma-mapping.h20
-rw-r--r--arch/nios2/mm/dma-mapping.c139
-rw-r--r--arch/openrisc/Kconfig35
-rw-r--r--arch/openrisc/Kconfig.debug1
-rw-r--r--arch/openrisc/Makefile1
-rw-r--r--arch/openrisc/include/asm/atomic.h4
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h3
-rw-r--r--arch/openrisc/include/asm/irq.h2
-rw-r--r--arch/openrisc/kernel/irq.c7
-rw-r--r--arch/parisc/Kconfig32
-rw-r--r--arch/parisc/Kconfig.debug5
-rw-r--r--arch/parisc/include/asm/assembly.h2
-rw-r--r--arch/parisc/include/asm/atomic.h107
-rw-r--r--arch/parisc/include/asm/dma-mapping.h5
-rw-r--r--arch/parisc/include/asm/linkage.h17
-rw-r--r--arch/parisc/include/asm/ptrace.h11
-rw-r--r--arch/parisc/include/asm/spinlock.h8
-rw-r--r--arch/parisc/include/asm/unwind.h3
-rw-r--r--arch/parisc/include/uapi/asm/errno.h1
-rw-r--r--arch/parisc/include/uapi/asm/socket.h3
-rw-r--r--arch/parisc/kernel/entry.S53
-rw-r--r--arch/parisc/kernel/pacache.S125
-rw-r--r--arch/parisc/kernel/pci-dma.c199
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/ptrace.c100
-rw-r--r--arch/parisc/kernel/real2.S6
-rw-r--r--arch/parisc/kernel/setup.c8
-rw-r--r--arch/parisc/kernel/syscall.S24
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/parisc/kernel/unwind.c93
-rw-r--r--arch/parisc/lib/lusercopy.S21
-rw-r--r--arch/parisc/mm/init.c11
-rw-r--r--arch/powerpc/Kconfig22
-rw-r--r--arch/powerpc/Kconfig.debug5
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi15
-rw-r--r--arch/powerpc/configs/wii_defconfig1
-rw-r--r--arch/powerpc/crypto/md5-glue.c1
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c1
-rw-r--r--arch/powerpc/crypto/sha1.c1
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c2
-rw-r--r--arch/powerpc/include/asm/atomic.h69
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h7
-rw-r--r--arch/powerpc/include/asm/kprobes.h12
-rw-r--r--arch/powerpc/kernel/dma.c3
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c47
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c72
-rw-r--r--arch/powerpc/kernel/kprobes.c92
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S39
-rw-r--r--arch/powerpc/kernel/vdso32/note.S3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/purgatory/Makefile3
-rw-r--r--arch/riscv/Kconfig71
-rw-r--r--arch/riscv/Kconfig.debug37
-rw-r--r--arch/riscv/Makefile1
-rw-r--r--arch/riscv/include/asm/atomic.h166
-rw-r--r--arch/s390/Kconfig29
-rw-r--r--arch/s390/Kconfig.debug5
-rw-r--r--arch/s390/Makefile42
-rw-r--r--arch/s390/appldata/appldata_base.c122
-rw-r--r--arch/s390/boot/Makefile45
-rw-r--r--arch/s390/boot/als.c (renamed from arch/s390/kernel/als.c)20
-rw-r--r--arch/s390/boot/compressed/.gitignore1
-rw-r--r--arch/s390/boot/compressed/Makefile55
-rw-r--r--arch/s390/boot/compressed/head.S8
-rw-r--r--arch/s390/boot/compressed/misc.c37
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S15
-rw-r--r--arch/s390/boot/compressed/vmlinux.scr.lds.S (renamed from arch/s390/boot/compressed/vmlinux.scr)4
-rw-r--r--arch/s390/boot/ebcdic.c2
-rw-r--r--arch/s390/boot/head.S (renamed from arch/s390/kernel/head.S)11
-rw-r--r--arch/s390/boot/head_kdump.S (renamed from arch/s390/kernel/head_kdump.S)0
-rw-r--r--arch/s390/boot/mem.S2
-rw-r--r--arch/s390/boot/sclp_early_core.c2
-rw-r--r--arch/s390/crypto/aes_s390.c1
-rw-r--r--arch/s390/crypto/ghash_s390.c1
-rw-r--r--arch/s390/crypto/sha1_s390.c1
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha512_s390.c2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c4
-rw-r--r--arch/s390/hypfs/inode.c6
-rw-r--r--arch/s390/include/asm/ap.h284
-rw-r--r--arch/s390/include/asm/atomic.h65
-rw-r--r--arch/s390/include/asm/cpu_mf.h12
-rw-r--r--arch/s390/include/asm/gmap.h10
-rw-r--r--arch/s390/include/asm/hugetlb.h5
-rw-r--r--arch/s390/include/asm/kprobes.h2
-rw-r--r--arch/s390/include/asm/lowcore.h2
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/nospec-insn.h4
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pgtable.h13
-rw-r--r--arch/s390/include/asm/purgatory.h6
-rw-r--r--arch/s390/include/asm/qdio.h1
-rw-r--r--arch/s390/include/asm/sections.h2
-rw-r--r--arch/s390/include/asm/setup.h4
-rw-r--r--arch/s390/include/uapi/asm/chsc.h10
-rw-r--r--arch/s390/include/uapi/asm/socket.h3
-rw-r--r--arch/s390/kernel/Makefile24
-rw-r--r--arch/s390/kernel/crash_dump.c104
-rw-r--r--arch/s390/kernel/early.c12
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/head64.S43
-rw-r--r--arch/s390/kernel/kprobes.c86
-rw-r--r--arch/s390/kernel/nospec-branch.c12
-rw-r--r--arch/s390/kernel/nospec-sysfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c14
-rw-r--r--arch/s390/kernel/perf_regs.c6
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/kernel/syscalls/Makefile6
-rw-r--r--arch/s390/kernel/sysinfo.c4
-rw-r--r--arch/s390/kernel/time.c15
-rw-r--r--arch/s390/kernel/topology.c44
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S13
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c82
-rw-r--r--arch/s390/kvm/priv.c105
-rw-r--r--arch/s390/lib/mem.S16
-rw-r--r--arch/s390/mm/cmm.c74
-rw-r--r--arch/s390/mm/extmem.c4
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/gmap.c454
-rw-r--r--arch/s390/mm/hugetlbpage.c24
-rw-r--r--arch/s390/mm/page-states.c2
-rw-r--r--arch/s390/mm/pageattr.c6
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/mm/pgtable.c159
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/s390/numa/numa.c16
-rw-r--r--arch/s390/pci/pci_debug.c8
-rw-r--r--arch/s390/purgatory/Makefile12
-rw-r--r--arch/s390/purgatory/head.S45
-rw-r--r--arch/s390/purgatory/purgatory.c9
-rw-r--r--arch/s390/scripts/Makefile.chkbss11
-rw-r--r--arch/s390/tools/gen_opcode_table.c4
-rw-r--r--arch/sh/Kconfig30
-rw-r--r--arch/sh/Kconfig.debug5
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c282
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c217
-rw-r--r--arch/sh/boards/mach-migor/setup.c8
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c120
-rw-r--r--arch/sh/drivers/pci/pci.c2
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/atomic.h35
-rw-r--r--arch/sh/include/asm/cacheflush.h7
-rw-r--r--arch/sh/include/asm/cmpxchg-xchg.h3
-rw-r--r--arch/sh/include/asm/dma-mapping.h23
-rw-r--r--arch/sh/include/asm/hw_breakpoint.h8
-rw-r--r--arch/sh/include/asm/kprobes.h4
-rw-r--r--arch/sh/kernel/Makefile4
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c2
-rw-r--r--arch/sh/kernel/dma-coherent.c83
-rw-r--r--arch/sh/kernel/dma-nommu.c88
-rw-r--r--arch/sh/kernel/hw_breakpoint.c53
-rw-r--r--arch/sh/kernel/kprobes.c72
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/consistent.c87
-rw-r--r--arch/sh/mm/init.c10
-rw-r--r--arch/sparc/Kconfig28
-rw-r--r--arch/sparc/Kconfig.debug5
-rw-r--r--arch/sparc/crypto/md5_glue.c1
-rw-r--r--arch/sparc/crypto/sha1_glue.c1
-rw-r--r--arch/sparc/crypto/sha256_glue.c2
-rw-r--r--arch/sparc/crypto/sha512_glue.c2
-rw-r--r--arch/sparc/include/asm/atomic_32.h24
-rw-r--r--arch/sparc/include/asm/atomic_64.h65
-rw-r--r--arch/sparc/include/asm/io_64.h19
-rw-r--r--arch/sparc/include/asm/kprobes.h1
-rw-r--r--arch/sparc/include/uapi/asm/socket.h3
-rw-r--r--arch/sparc/kernel/kprobes.c65
-rw-r--r--arch/sparc/lib/atomic32.c4
-rw-r--r--arch/um/Kconfig (renamed from arch/um/Kconfig.um)74
-rw-r--r--arch/um/Kconfig.char124
-rw-r--r--arch/um/Kconfig.common60
-rw-r--r--arch/um/Kconfig.debug5
-rw-r--r--arch/um/Kconfig.rest22
-rw-r--r--arch/um/Makefile15
-rw-r--r--arch/um/drivers/Kconfig (renamed from arch/um/Kconfig.net)125
-rw-r--r--arch/um/drivers/Makefile4
-rw-r--r--arch/unicore32/Kconfig28
-rw-r--r--arch/unicore32/Kconfig.debug5
-rw-r--r--arch/x86/Kconfig30
-rw-r--r--arch/x86/Kconfig.debug5
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/Makefile.um2
-rw-r--r--arch/x86/boot/bitops.h3
-rw-r--r--arch/x86/boot/compressed/eboot.c545
-rw-r--r--arch/x86/boot/compressed/eboot.h12
-rw-r--r--arch/x86/boot/compressed/kaslr.c98
-rw-r--r--arch/x86/boot/string.c5
-rw-r--r--arch/x86/crypto/aegis128-aesni-asm.S2
-rw-r--r--arch/x86/crypto/aegis128l-aesni-asm.S2
-rw-r--r--arch/x86/crypto/aegis256-aesni-asm.S2
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S8
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S4
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c6
-rw-r--r--arch/x86/crypto/morus1280-avx2-asm.S2
-rw-r--r--arch/x86/crypto/morus1280-sse2-asm.S2
-rw-r--r--arch/x86/crypto/morus640-sse2-asm.S2
-rw-r--r--arch/x86/crypto/poly1305_glue.c1
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c17
-rw-r--r--arch/x86/crypto/sha1_ssse3_asm.S2
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c4
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c18
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c8
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c18
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c6
-rw-r--r--arch/x86/entry/entry_32.S632
-rw-r--r--arch/x86/entry/entry_64.S5
-rw-r--r--arch/x86/entry/vdso/Makefile26
-rw-r--r--arch/x86/entry/vdso/vdso-note.S3
-rw-r--r--arch/x86/entry/vdso/vdso32/note.S3
-rw-r--r--arch/x86/events/intel/core.c31
-rw-r--r--arch/x86/events/intel/ds.c49
-rw-r--r--arch/x86/events/intel/lbr.c56
-rw-r--r--arch/x86/events/perf_event.h6
-rw-r--r--arch/x86/hyperv/hv_apic.c59
-rw-r--r--arch/x86/hyperv/mmu.c80
-rw-r--r--arch/x86/include/asm/apic.h9
-rw-r--r--arch/x86/include/asm/atomic.h32
-rw-r--r--arch/x86/include/asm/atomic64_32.h61
-rw-r--r--arch/x86/include/asm/atomic64_64.h50
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h4
-rw-r--r--arch/x86/include/asm/cpufeatures.h6
-rw-r--r--arch/x86/include/asm/dmi.h2
-rw-r--r--arch/x86/include/asm/hardirq.h26
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h7
-rw-r--r--arch/x86/include/asm/i8259.h1
-rw-r--r--arch/x86/include/asm/intel-family.h13
-rw-r--r--arch/x86/include/asm/intel-mid.h43
-rw-r--r--arch/x86/include/asm/intel_ds.h3
-rw-r--r--arch/x86/include/asm/irqflags.h2
-rw-r--r--arch/x86/include/asm/kprobes.h5
-rw-r--r--arch/x86/include/asm/kvm_guest.h7
-rw-r--r--arch/x86/include/asm/kvm_host.h6
-rw-r--r--arch/x86/include/asm/kvm_para.h1
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/mshyperv.h34
-rw-r--r--arch/x86/include/asm/msr-index.h7
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/orc_types.h2
-rw-r--r--arch/x86/include/asm/page_32_types.h9
-rw-r--r--arch/x86/include/asm/pci-direct.h4
-rw-r--r--arch/x86/include/asm/percpu.h7
-rw-r--r--arch/x86/include/asm/pgtable-2level.h26
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h3
-rw-r--r--arch/x86/include/asm/pgtable-3level.h44
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h6
-rw-r--r--arch/x86/include/asm/pgtable-invert.h32
-rw-r--r--arch/x86/include/asm/pgtable.h168
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h9
-rw-r--r--arch/x86/include/asm/pgtable_64.h127
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h3
-rw-r--r--arch/x86/include/asm/pgtable_types.h28
-rw-r--r--arch/x86/include/asm/processor-flags.h8
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/include/asm/pti.h3
-rw-r--r--arch/x86/include/asm/refcount.h1
-rw-r--r--arch/x86/include/asm/sections.h1
-rw-r--r--arch/x86/include/asm/set_memory.h1
-rw-r--r--arch/x86/include/asm/switch_to.h16
-rw-r--r--arch/x86/include/asm/text-patching.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h21
-rw-r--r--arch/x86/include/asm/topology.h6
-rw-r--r--arch/x86/include/asm/trace/hyperv.h15
-rw-r--r--arch/x86/include/asm/tsc.h4
-rw-r--r--arch/x86/include/asm/unwind_hints.h16
-rw-r--r--arch/x86/include/asm/vmx.h11
-rw-r--r--arch/x86/include/asm/xen/hypercall.h25
-rw-r--r--arch/x86/kernel/alternative.c7
-rw-r--r--arch/x86/kernel/apic/apic.c20
-rw-r--r--arch/x86/kernel/apic/io_apic.c1
-rw-r--r--arch/x86/kernel/apic/msi.c1
-rw-r--r--arch/x86/kernel/apic/vector.c20
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/asm-offsets.c5
-rw-r--r--arch/x86/kernel/asm-offsets_32.c10
-rw-r--r--arch/x86/kernel/asm-offsets_64.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/amd.c64
-rw-r--r--arch/x86/kernel/cpu/bugs.c190
-rw-r--r--arch/x86/kernel/cpu/common.c106
-rw-r--r--arch/x86/kernel/cpu/cpu.h3
-rw-r--r--arch/x86/kernel/cpu/intel.c17
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c11
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h143
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c129
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c1522
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h43
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c812
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c202
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c16
-rw-r--r--arch/x86/kernel/cpu/topology.c41
-rw-r--r--arch/x86/kernel/dumpstack.c28
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/fpu/core.c1
-rw-r--r--arch/x86/kernel/head_32.S20
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/hpet.c1
-rw-r--r--arch/x86/kernel/hw_breakpoint.c131
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/idt.c1
-rw-r--r--arch/x86/kernel/irq.c1
-rw-r--r--arch/x86/kernel/irq_32.c1
-rw-r--r--arch/x86/kernel/irq_64.c1
-rw-r--r--arch/x86/kernel/irqinit.c1
-rw-r--r--arch/x86/kernel/jump_label.c11
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c2
-rw-r--r--arch/x86/kernel/kprobes/common.h10
-rw-r--r--arch/x86/kernel/kprobes/core.c124
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c49
-rw-r--r--arch/x86/kernel/kprobes/opt.c1
-rw-r--r--arch/x86/kernel/kvm.c18
-rw-r--r--arch/x86/kernel/kvmclock.c259
-rw-r--r--arch/x86/kernel/ldt.c137
-rw-r--r--arch/x86/kernel/machine_kexec_32.c5
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c2
-rw-r--r--arch/x86/kernel/pci-dma.c5
-rw-r--r--arch/x86/kernel/pci-iommu_table.c2
-rw-r--r--arch/x86/kernel/pcspeaker.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/setup.c21
-rw-r--r--arch/x86/kernel/smp.c1
-rw-r--r--arch/x86/kernel/smpboot.c18
-rw-r--r--arch/x86/kernel/stacktrace.c42
-rw-r--r--arch/x86/kernel/time.c1
-rw-r--r--arch/x86/kernel/tsc.c259
-rw-r--r--arch/x86/kernel/tsc_msr.c96
-rw-r--r--arch/x86/kernel/unwind_orc.c52
-rw-r--r--arch/x86/kernel/vm86_32.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S17
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c1
-rw-r--r--arch/x86/kvm/vmx.c455
-rw-r--r--arch/x86/kvm/x86.c34
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/mm/dump_pagetables.c27
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init.c62
-rw-r--r--arch/x86/mm/init_64.c14
-rw-r--r--arch/x86/mm/kmmio.c25
-rw-r--r--arch/x86/mm/mmap.c21
-rw-r--r--arch/x86/mm/numa_emulation.c107
-rw-r--r--arch/x86/mm/pageattr.c27
-rw-r--r--arch/x86/mm/pgtable.c169
-rw-r--r--arch/x86/mm/pti.c262
-rw-r--r--arch/x86/mm/tlb.c224
-rw-r--r--arch/x86/pci/common.c4
-rw-r--r--arch/x86/pci/early.c44
-rw-r--r--arch/x86/platform/efi/efi_64.c101
-rw-r--r--arch/x86/platform/efi/quirks.c14
-rw-r--r--arch/x86/platform/intel-mid/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c1
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c23
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_weak_decls.h18
-rw-r--r--arch/x86/platform/intel-mid/mfld.c70
-rw-r--r--arch/x86/platform/intel-mid/mrfld.c105
-rw-r--r--arch/x86/platform/olpc/olpc.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c3
-rw-r--r--arch/x86/power/hibernate_64.c36
-rw-r--r--arch/x86/power/hibernate_asm_64.S2
-rw-r--r--arch/x86/purgatory/Makefile3
-rw-r--r--arch/x86/tools/relocs.c1
-rw-r--r--arch/x86/um/Kconfig15
-rw-r--r--arch/x86/um/vdso/.gitignore1
-rw-r--r--arch/x86/um/vdso/Makefile16
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/enlighten_pv.c54
-rw-r--r--arch/x86/xen/mmu_pv.c6
-rw-r--r--arch/x86/xen/multicalls.c6
-rw-r--r--arch/x86/xen/spinlock.c4
-rw-r--r--arch/x86/xen/suspend_pv.c5
-rw-r--r--arch/x86/xen/time.c18
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/Kconfig29
-rw-r--r--arch/xtensa/Kconfig.debug5
-rw-r--r--arch/xtensa/boot/Makefile3
-rw-r--r--arch/xtensa/include/asm/atomic.h98
-rw-r--r--arch/xtensa/include/asm/hw_breakpoint.h7
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h3
-rw-r--r--arch/xtensa/kernel/hw_breakpoint.c40
-rw-r--r--block/Kconfig16
-rw-r--r--block/Makefile4
-rw-r--r--block/bfq-iosched.c131
-rw-r--r--block/bfq-iosched.h7
-rw-r--r--block/bfq-wf2q.c30
-rw-r--r--block/bio-integrity.c22
-rw-r--r--block/bio.c208
-rw-r--r--block/blk-cgroup.c284
-rw-r--r--block/blk-core.c106
-rw-r--r--block/blk-ioc.c2
-rw-r--r--block/blk-iolatency.c955
-rw-r--r--block/blk-lib.c10
-rw-r--r--block/blk-mq-debugfs-zoned.c24
-rw-r--r--block/blk-mq-debugfs.c24
-rw-r--r--block/blk-mq-debugfs.h9
-rw-r--r--block/blk-mq-pci.c5
-rw-r--r--block/blk-mq-sched.c112
-rw-r--r--block/blk-mq-tag.c11
-rw-r--r--block/blk-mq.c173
-rw-r--r--block/blk-mq.h13
-rw-r--r--block/blk-rq-qos.c194
-rw-r--r--block/blk-rq-qos.h109
-rw-r--r--block/blk-settings.c6
-rw-r--r--block/blk-stat.c16
-rw-r--r--block/blk-stat.h4
-rw-r--r--block/blk-sysfs.c37
-rw-r--r--block/blk-throttle.c32
-rw-r--r--block/blk-wbt.c425
-rw-r--r--block/blk-wbt.h68
-rw-r--r--block/blk-zoned.c2
-rw-r--r--block/blk.h7
-rw-r--r--block/bounce.c69
-rw-r--r--block/bsg-lib.c5
-rw-r--r--block/bsg.c460
-rw-r--r--block/cfq-iosched.c23
-rw-r--r--block/genhd.c29
-rw-r--r--block/partition-generic.c25
-rw-r--r--block/partitions/aix.c13
-rw-r--r--block/partitions/ldm.c3
-rw-r--r--block/t10-pi.c110
-rw-r--r--certs/system_keyring.c3
-rw-r--r--crypto/ablkcipher.c59
-rw-r--r--crypto/aegis128.c1
-rw-r--r--crypto/aegis128l.c3
-rw-r--r--crypto/aegis256.c1
-rw-r--r--crypto/af_alg.c2
-rw-r--r--crypto/api.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_key_type.c2
-rw-r--r--crypto/blkcipher.c55
-rw-r--r--crypto/crypto_null.c1
-rw-r--r--crypto/dh.c66
-rw-r--r--crypto/dh_helper.c43
-rw-r--r--crypto/drbg.c39
-rw-r--r--crypto/ecc.c42
-rw-r--r--crypto/ecc_curve_defs.h22
-rw-r--r--crypto/ghash-generic.c1
-rw-r--r--crypto/lrw.c4
-rw-r--r--crypto/md4.c1
-rw-r--r--crypto/md5.c1
-rw-r--r--crypto/morus1280.c1
-rw-r--r--crypto/morus640.c1
-rw-r--r--crypto/poly1305_generic.c1
-rw-r--r--crypto/rmd128.c1
-rw-r--r--crypto/rmd160.c1
-rw-r--r--crypto/rmd256.c11
-rw-r--r--crypto/rmd320.c13
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c4
-rw-r--r--crypto/sha3_generic.c4
-rw-r--r--crypto/sha512_generic.c26
-rw-r--r--crypto/skcipher.c57
-rw-r--r--crypto/sm3_generic.c1
-rw-r--r--crypto/tcrypt.c38
-rw-r--r--crypto/testmgr.c59
-rw-r--r--crypto/testmgr.h233
-rw-r--r--crypto/tgr192.c3
-rw-r--r--crypto/vmac.c444
-rw-r--r--crypto/wp512.c3
-rw-r--r--crypto/xts.c4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/nsaccess.c7
-rw-r--r--drivers/acpi/acpica/nssearch.c1
-rw-r--r--drivers/acpi/arm64/iort.c48
-rw-r--r--drivers/acpi/battery.c69
-rw-r--r--drivers/acpi/bus.c40
-rw-r--r--drivers/acpi/button.c17
-rw-r--r--drivers/acpi/ec.c14
-rw-r--r--drivers/acpi/osi.c8
-rw-r--r--drivers/acpi/property.c209
-rw-r--r--drivers/acpi/scan.c23
-rw-r--r--drivers/acpi/sleep.c30
-rw-r--r--drivers/acpi/x86/utils.c22
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-scsi.c18
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_pxa.c10
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/auxdisplay/arm-charlcd.c6
-rw-r--r--drivers/auxdisplay/charlcd.c5
-rw-r--r--drivers/base/core.c64
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/firmware_loader/fallback.c7
-rw-r--r--drivers/base/power/common.c17
-rw-r--r--drivers/base/power/domain.c24
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h3
-rw-r--r--drivers/base/regmap/regmap-sccb.c128
-rw-r--r--drivers/base/regmap/regmap-slimbus.c23
-rw-r--r--drivers/base/regmap/regmap.c79
-rw-r--r--drivers/bcma/Kconfig3
-rw-r--r--drivers/block/DAC960.c9
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/Makefile5
-rw-r--r--drivers/block/aoe/aoecmd.c1
-rw-r--r--drivers/block/aoe/aoedev.c4
-rw-r--r--drivers/block/brd.c14
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_main.c12
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c4
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c3
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/null_blk.h108
-rw-r--r--drivers/block/null_blk_main.c (renamed from drivers/block/null_blk.c)129
-rw-r--r--drivers/block/null_blk_zoned.c149
-rw-r--r--drivers/block/paride/bpck.c3
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/pktcdvd.c109
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/skd_main.c16
-rw-r--r--drivers/block/xen-blkfront.c9
-rw-r--r--drivers/block/zram/zram_drv.c19
-rw-r--r--drivers/bluetooth/Kconfig25
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/bpa10x.c6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c2
-rw-r--r--drivers/bluetooth/btmtkuart.c629
-rw-r--r--drivers/bluetooth/btqca.c123
-rw-r--r--drivers/bluetooth/btqca.h22
-rw-r--r--drivers/bluetooth/btrtl.c512
-rw-r--r--drivers/bluetooth/btrtl.h53
-rw-r--r--drivers/bluetooth/btusb.c116
-rw-r--r--drivers/bluetooth/hci_h5.c206
-rw-r--r--drivers/bluetooth/hci_intel.c2
-rw-r--r--drivers/bluetooth/hci_qca.c490
-rw-r--r--drivers/cdrom/cdrom.c30
-rw-r--r--drivers/char/Kconfig14
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/msm-rng.c183
-rw-r--r--drivers/char/random.c49
-rw-r--r--drivers/char/tpm/tpm-chip.c68
-rw-r--r--drivers/char/tpm/tpm-interface.c72
-rw-r--r--drivers/char/tpm/tpm.h31
-rw-r--r--drivers/char/tpm/tpm2-cmd.c258
-rw-r--r--drivers/char/tpm/tpm2-space.c12
-rw-r--r--drivers/char/tpm/tpm_crb.c101
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c8
-rw-r--r--drivers/char/tpm/tpm_tis_core.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.h1
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c9
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c2
-rw-r--r--drivers/clk/Kconfig6
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/actions/Kconfig7
-rw-r--r--drivers/clk/actions/Makefile1
-rw-r--r--drivers/clk/actions/owl-s700.c606
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-i2s-mux.c116
-rw-r--r--drivers/clk/clk-aspeed.c2
-rw-r--r--drivers/clk/clk-cs2000-cp.c5
-rw-r--r--drivers/clk/clk-fixed-factor.c9
-rw-r--r--drivers/clk/clk-max9485.c387
-rw-r--r--drivers/clk/clk-scmi.c5
-rw-r--r--drivers/clk/clk-si514.c38
-rw-r--r--drivers/clk/clk-si544.c38
-rw-r--r--drivers/clk/clk.c218
-rw-r--r--drivers/clk/clkdev.c5
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c44
-rw-r--r--drivers/clk/imx/clk-imx6q.c16
-rw-r--r--drivers/clk/imx/clk-imx6sl.c12
-rw-r--r--drivers/clk/imx/clk-imx6sll.c7
-rw-r--r--drivers/clk/imx/clk-imx6sx.c41
-rw-r--r--drivers/clk/imx/clk-imx6ul.c29
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c4
-rw-r--r--drivers/clk/meson/Kconfig28
-rw-r--r--drivers/clk/meson/Makefile4
-rw-r--r--drivers/clk/meson/axg-audio.c845
-rw-r--r--drivers/clk/meson/axg-audio.h127
-rw-r--r--drivers/clk/meson/axg.c244
-rw-r--r--drivers/clk/meson/axg.h8
-rw-r--r--drivers/clk/meson/clk-audio-divider.c110
-rw-r--r--drivers/clk/meson/clk-phase.c63
-rw-r--r--drivers/clk/meson/clk-triphase.c68
-rw-r--r--drivers/clk/meson/clkc-audio.h28
-rw-r--r--drivers/clk/meson/clkc.h11
-rw-r--r--drivers/clk/meson/gxbb.c118
-rw-r--r--drivers/clk/meson/gxbb.h5
-rw-r--r--drivers/clk/meson/sclk-div.c243
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c9
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c6
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c7
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c7
-rw-r--r--drivers/clk/qcom/Kconfig19
-rw-r--r--drivers/clk/qcom/Makefile2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c10
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h14
-rw-r--r--drivers/clk/qcom/clk-branch.c10
-rw-r--r--drivers/clk/qcom/clk-branch.h14
-rw-r--r--drivers/clk/qcom/clk-rcg.h2
-rw-r--r--drivers/clk/qcom/clk-regmap.c10
-rw-r--r--drivers/clk/qcom/clk-regmap.h14
-rw-r--r--drivers/clk/qcom/clk-rpmh.c329
-rw-r--r--drivers/clk/qcom/common.c10
-rw-r--r--drivers/clk/qcom/common.h15
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c685
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c3
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c2
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c5
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c5
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c2
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c45
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c2
-rw-r--r--drivers/clk/qcom/videocc-sdm845.c2
-rw-r--r--drivers/clk/renesas/Kconfig6
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c893
-rw-r--r--drivers/clk/rockchip/Makefile2
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c227
-rw-r--r--drivers/clk/rockchip/clk-px30.c1039
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c3
-rw-r--r--drivers/clk/rockchip/clk.c10
-rw-r--r--drivers/clk/rockchip/clk.h126
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c2
-rw-r--r--drivers/clk/socfpga/clk-s10.c9
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c58
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.h8
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-bpmp.c12
-rw-r--r--drivers/clk/tegra/clk-divider.c35
-rw-r--r--drivers/clk/tegra/clk-emc.c2
-rw-r--r--drivers/clk/tegra/clk-id.h2
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c251
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c11
-rw-r--r--drivers/clk/tegra/clk-tegra124.c3
-rw-r--r--drivers/clk/tegra/clk-tegra210.c14
-rw-r--r--drivers/clk/tegra/clk-utils.c43
-rw-r--r--drivers/clk/tegra/clk.h30
-rw-r--r--drivers/clk/uniphier/clk-uniphier-peri.c9
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c58
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/mtk_timer.c268
-rw-r--r--drivers/clocksource/tegra20_timer.c4
-rw-r--r--drivers/clocksource/timer-atcpit100.c2
-rw-r--r--drivers/clocksource/timer-keystone.c2
-rw-r--r--drivers/clocksource/timer-mediatek.c328
-rw-r--r--drivers/clocksource/timer-sprd.c50
-rw-r--r--drivers/clocksource/timer-ti-32k.c3
-rw-r--r--drivers/clocksource/zevio-timer.c2
-rw-r--r--drivers/connector/connector.c3
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c163
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c52
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c21
-rw-r--r--drivers/cpufreq/intel_pstate.c27
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c9
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c5
-rw-r--r--drivers/cpuidle/cpuidle-arm.c3
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c18
-rw-r--r--drivers/crypto/atmel-ecc.c35
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c28
-rw-r--r--drivers/crypto/bcm/cipher.c8
-rw-r--r--drivers/crypto/caam/caamhash.c3
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/ccp/psp-dev.c35
-rw-r--r--drivers/crypto/ccp/psp-dev.h19
-rw-r--r--drivers/crypto/ccp/sp-dev.h7
-rw-r--r--drivers/crypto/ccp/sp-pci.c36
-rw-r--r--drivers/crypto/ccree/cc_aead.c16
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c8
-rw-r--r--drivers/crypto/ccree/cc_cipher.c170
-rw-r--r--drivers/crypto/ccree/cc_cipher.h1
-rw-r--r--drivers/crypto/ccree/cc_driver.c4
-rw-r--r--drivers/crypto/ccree/cc_driver.h1
-rw-r--r--drivers/crypto/ccree/cc_hash.c85
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c2
-rw-r--r--drivers/crypto/hisilicon/Kconfig14
-rw-r--r--drivers/crypto/hisilicon/Makefile2
-rw-r--r--drivers/crypto/hisilicon/sec/Makefile3
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c1122
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c1323
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h428
-rw-r--r--drivers/crypto/inside-secure/safexcel.c474
-rw-r--r--drivers/crypto/inside-secure/safexcel.h201
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c492
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c560
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c63
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c1
-rw-r--r--drivers/crypto/nx/nx-sha256.c1
-rw-r--r--drivers/crypto/nx/nx-sha512.c1
-rw-r--r--drivers/crypto/omap-sham.c36
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/qce/sha.c3
-rw-r--r--drivers/crypto/qcom-rng.c229
-rw-r--r--drivers/crypto/s5p-sss.c9
-rw-r--r--drivers/crypto/sahara.c10
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c62
-rw-r--r--drivers/crypto/stm32/stm32-hash.c95
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c71
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c20
-rw-r--r--drivers/crypto/talitos.c37
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c15
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c116
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h25
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c33
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c81
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/devfreq/devfreq.c16
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c6
-rw-r--r--drivers/devfreq/rk3399_dmc.c102
-rw-r--r--drivers/dma-buf/dma-buf.c56
-rw-r--r--drivers/dma-buf/dma-fence-array.c1
-rw-r--r--drivers/dma-buf/dma-fence.c167
-rw-r--r--drivers/dma-buf/reservation.c8
-rw-r--r--drivers/dma-buf/sw_sync.c1
-rw-r--r--drivers/dma/pxa_dma.c15
-rw-r--r--drivers/edac/altera_edac.c3
-rw-r--r--drivers/edac/edac_mc_sysfs.c6
-rw-r--r--drivers/edac/i7core_edac.c22
-rw-r--r--drivers/edac/sb_edac.c17
-rw-r--r--drivers/edac/thunderx_edac.c14
-rw-r--r--drivers/firmware/efi/Kconfig12
-rw-r--r--drivers/firmware/efi/arm-init.c1
-rw-r--r--drivers/firmware/efi/arm-runtime.c18
-rw-r--r--drivers/firmware/efi/cper.c19
-rw-r--r--drivers/firmware/efi/efi.c23
-rw-r--r--drivers/firmware/efi/esrt.c8
-rw-r--r--drivers/firmware/efi/libstub/Makefile7
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c32
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c31
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c202
-rw-r--r--drivers/firmware/qemu_fw_cfg.c1
-rw-r--r--drivers/gpio/Kconfig12
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-aspeed.c426
-rw-r--r--drivers/gpio/gpio-ath79.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c6
-rw-r--r--drivers/gpio/gpio-davinci.c70
-rw-r--r--drivers/gpio/gpio-dwapb.c6
-rw-r--r--drivers/gpio/gpio-em.c6
-rw-r--r--drivers/gpio/gpio-it87.c13
-rw-r--r--drivers/gpio/gpio-max732x.c12
-rw-r--r--drivers/gpio/gpio-menz127.c4
-rw-r--r--drivers/gpio/gpio-ml-ioh.c3
-rw-r--r--drivers/gpio/gpio-mmio.c108
-rw-r--r--drivers/gpio/gpio-mt7621.c332
-rw-r--r--drivers/gpio/gpio-mxc.c73
-rw-r--r--drivers/gpio/gpio-mxs.c3
-rw-r--r--drivers/gpio/gpio-omap.c88
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pisosr.c22
-rw-r--r--drivers/gpio/gpio-pxa.c42
-rw-r--r--drivers/gpio/gpio-rc5t583.c2
-rw-r--r--drivers/gpio/gpio-rcar.c10
-rw-r--r--drivers/gpio/gpio-rdc321x.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sch.c11
-rw-r--r--drivers/gpio/gpio-sch311x.c70
-rw-r--r--drivers/gpio/gpio-spear-spics.c2
-rw-r--r--drivers/gpio/gpio-sta2x11.c41
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-stp-xway.c18
-rw-r--r--drivers/gpio/gpio-syscon.c33
-rw-r--r--drivers/gpio/gpio-tb10x.c3
-rw-r--r--drivers/gpio/gpio-tegra.c30
-rw-r--r--drivers/gpio/gpio-tegra186.c74
-rw-r--r--drivers/gpio/gpio-timberdale.c2
-rw-r--r--drivers/gpio/gpio-uniphier.c3
-rw-r--r--drivers/gpio/gpio-vr41xx.c8
-rw-r--r--drivers/gpio/gpio-xgene-sb.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c11
-rw-r--r--drivers/gpio/gpiolib-of.c3
-rw-r--r--drivers/gpio/gpiolib.c82
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/Kconfig21
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c259
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c400
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c266
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c202
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c207
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c418
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c219
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c396
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c299
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c467
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_int.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h458
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c122
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c283
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c118
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c43
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig17
-rw-r--r--drivers/gpu/drm/amd/display/TODO8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c104
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c722
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h)18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c49
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c104
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c562
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c330
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c405
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c196
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c215
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c110
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c174
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c117
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c133
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c462
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c937
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c724
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c427
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c362
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c518
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c159
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h180
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h46
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h136
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h59
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c147
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/luts_1d.h51
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c4
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h46
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h20
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h15
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h37
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h55
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h98
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h (renamed from drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h)11
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h33
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h37
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h40
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c109
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c155
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c57
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c179
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c1133
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h18
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c108
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c101
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c150
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c80
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c168
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c220
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c85
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c5
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c2
-rw-r--r--drivers/gpu/drm/arm/Makefile1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c35
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c76
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c187
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h26
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c297
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h40
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c250
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.h14
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h24
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_510.c24
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1008
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h56
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h14
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c54
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c30
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h6
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c17
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h15
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c665
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c289
-rw-r--r--drivers/gpu/drm/armada/armada_plane.h15
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c19
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c8
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c6
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c4
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c4
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c4
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/panel.c2
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c2
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c20
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c43
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c14
-rw-r--r--drivers/gpu/drm/drm_atomic.c408
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c114
-rw-r--r--drivers/gpu/drm/drm_client.c406
-rw-r--r--drivers/gpu/drm/drm_connector.c219
-rw-r--r--drivers/gpu/drm/drm_crtc.c53
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h28
-rw-r--r--drivers/gpu/drm/drm_debugfs.c11
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c9
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c428
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c33
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c4
-rw-r--r--drivers/gpu/drm/drm_drv.c23
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c279
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c355
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c359
-rw-r--r--drivers/gpu/drm/drm_file.c306
-rw-r--r--drivers/gpu/drm/drm_fourcc.c42
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c49
-rw-r--r--drivers/gpu/drm/drm_gem.c9
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c2
-rw-r--r--drivers/gpu/drm/drm_global.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c13
-rw-r--r--drivers/gpu/drm/drm_lease.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c91
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_mode_object.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c23
-rw-r--r--drivers/gpu/drm/drm_of.c27
-rw-r--r--drivers/gpu/drm/drm_panel.c27
-rw-r--r--drivers/gpu/drm/drm_pci.c58
-rw-r--r--drivers/gpu/drm/drm_plane.c169
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c12
-rw-r--r--drivers/gpu/drm/drm_prime.c34
-rw-r--r--drivers/gpu/drm/drm_print.c111
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c19
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c6
-rw-r--r--drivers/gpu/drm/drm_syncobj.c1
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c353
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c37
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c28
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c119
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h47
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c300
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c7
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c62
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h1
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c20
-rw-r--r--drivers/gpu/drm/gma500/gtt.h2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h38
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c387
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug12
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c22
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c26
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c62
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c434
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c31
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h20
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c438
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c130
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c208
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c57
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c374
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c177
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h127
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c593
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c115
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1282
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h85
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c92
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c648
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c20
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c144
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c68
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4088
-rw-r--r--drivers/gpu/drm/i915/i915_request.c100
-rw-r--r--drivers/gpu/drm/i915/i915_request.h14
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h148
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c385
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h53
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c127
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c27
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c7
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c18
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c70
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c129
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c16
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c61
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c55
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c178
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c782
-rw-r--r--drivers/gpu/drm/i915/intel_display.h26
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c531
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c12
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c39
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c17
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c205
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h18
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h99
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h34
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c15
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c288
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c129
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c9
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c160
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h20
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c70
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.h26
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c138
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c17
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c140
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c118
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c84
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c979
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h7
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c204
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c31
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c28
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c445
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c109
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c631
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c405
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h139
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c333
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c49
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c311
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c23
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c165
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c28
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h22
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h14
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c167
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c82
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c39
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c174
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c62
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c55
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_wedge_me.h58
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c260
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c133
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c49
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c65
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c (renamed from drivers/gpu/drm/i915/intel_dsi.c)117
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c (renamed from drivers/gpu/drm/i915/intel_dsi_pll.c)98
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c47
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c5
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c3
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c235
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c102
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c76
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c4
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c22
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c656
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h4
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c378
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h3
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile31
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c30
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c22
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c242
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c187
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c479
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h153
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c637
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h133
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2138
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h423
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c2393
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2498
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h177
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h430
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c905
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c922
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c1173
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h88
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c155
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c511
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h804
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h168
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c323
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h139
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c540
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h218
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c1183
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h257
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c349
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c261
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h122
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h465
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c250
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h136
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c753
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h424
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c398
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h202
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c368
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h348
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c275
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c203
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h57
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h59
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1345
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h290
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c245
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1963
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h175
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c249
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h225
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c1079
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h199
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h1007
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c384
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h1376
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c7
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c52
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c154
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h23
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c56
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h12
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c431
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c135
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c7
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c93
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c230
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h97
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c54
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c145
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h68
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h29
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c46
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c73
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c109
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c286
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c19
-rw-r--r--drivers/gpu/drm/panel/Kconfig9
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c503
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c352
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c1
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c1
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c58
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c298
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c1
-rw-r--r--drivers/gpu/drm/pl111/Makefile1
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c56
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h5
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c61
-rw-r--r--drivers/gpu/drm/pl111/pl111_nomadik.c36
-rw-r--r--drivers/gpu/drm/pl111/pl111_nomadik.h18
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c7
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c20
-rw-r--r--drivers/gpu/drm/radeon/cik.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c94
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/si.c22
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c16
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c4
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c86
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c99
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h23
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c8
-rw-r--r--drivers/gpu/drm/savage/savage_state.c2
-rw-r--r--drivers/gpu/drm/scheduler/Makefile1
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c386
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c13
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c71
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c8
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c8
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c10
-rw-r--r--drivers/gpu/drm/stm/ltdc.c21
-rw-r--r--drivers/gpu/drm/stm/ltdc.h1
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig7
-rw-r--r--drivers/gpu/drm/sun4i/Makefile1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c127
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c67
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c117
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c90
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c81
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c274
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.h44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c61
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c61
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c5
-rw-r--r--drivers/gpu/drm/tegra/gem.c14
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/rgb.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c4
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig11
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile1
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c3
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c1
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c232
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c1
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c2
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c1
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c63
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c62
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c25
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c18
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h5
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c23
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c15
-rw-r--r--drivers/gpu/drm/udl/udl_main.c45
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c7
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c46
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c28
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h11
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c17
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c13
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h1
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c26
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c147
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h10
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c57
-rw-r--r--drivers/gpu/drm/vc4/vc4_fence.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c98
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h6
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c477
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c34
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c4
-rw-r--r--drivers/gpu/drm/vkms/Makefile3
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c130
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c156
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h78
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c179
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c111
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c57
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile4
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h233
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h86
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h300
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h1094
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h334
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h211
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c1123
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c376
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c72
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h186
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c122
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c604
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h80
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c709
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c146
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c109
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c553
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c)10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_va.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.c2
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c4
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c2
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c2
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c29
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c34
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c3
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c63
-rw-r--r--drivers/hwmon/Kconfig22
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adt7475.c340
-rw-r--r--drivers/hwmon/emc1403.c2
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/iio_hwmon.c67
-rw-r--r--drivers/hwmon/k10temp.c8
-rw-r--r--drivers/hwmon/mlxreg-fan.c489
-rw-r--r--drivers/hwmon/nct6775.c6
-rw-r--r--drivers/hwmon/nct7904.c68
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c1057
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/hwmon/pmbus/max34440.c93
-rw-r--r--drivers/i2c/i2c-core-smbus.c28
-rw-r--r--drivers/ide/ide-cd.c58
-rw-r--r--drivers/ide/ide-cd.h6
-rw-r--r--drivers/ide/ide-cd_ioctl.c62
-rw-r--r--drivers/infiniband/core/rdma_core.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c10
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c11
-rw-r--r--drivers/input/keyboard/hilkbd.c4
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/irqchip/Kconfig16
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c16
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c243
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-ingenic.c1
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/isdn/capi/capi.c5
-rw-r--r--drivers/isdn/capi/capidrv.c3
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c29
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c36
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c4
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c1
-rw-r--r--drivers/isdn/hisax/callc.c3
-rw-r--r--drivers/isdn/hisax/config.c9
-rw-r--r--drivers/isdn/hisax/gazel.c4
-rw-r--r--drivers/isdn/hisax/hfc_usb.c10
-rw-r--r--drivers/isdn/hisax/isar.c2
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c1
-rw-r--r--drivers/isdn/hisax/l3dss1.c1
-rw-r--r--drivers/isdn/hisax/st5481_usb.c11
-rw-r--r--drivers/isdn/hysdn/hysdn_boot.c2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c4
-rw-r--r--drivers/isdn/i4l/isdn_v110.c9
-rw-r--r--drivers/isdn/mISDN/stack.c1
-rw-r--r--drivers/leds/Kconfig5
-rw-r--r--drivers/leds/led-triggers.c39
-rw-r--r--drivers/leds/leds-apu.c44
-rw-r--r--drivers/leds/leds-lm3692x.c181
-rw-r--r--drivers/leds/leds-lt3593.c190
-rw-r--r--drivers/leds/leds-max8997.c2
-rw-r--r--drivers/leds/leds-ns2.c4
-rw-r--r--drivers/leds/trigger/Kconfig15
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c51
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c64
-rw-r--r--drivers/leds/trigger/ledtrig-camera.c3
-rw-r--r--drivers/leds/trigger/ledtrig-default-on.c20
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c92
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c49
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c101
-rw-r--r--drivers/leds/trigger/ledtrig-oneshot.c91
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c58
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c102
-rw-r--r--drivers/lightnvm/Kconfig30
-rw-r--r--drivers/lightnvm/pblk-cache.c9
-rw-r--r--drivers/lightnvm/pblk-core.c78
-rw-r--r--drivers/lightnvm/pblk-gc.c34
-rw-r--r--drivers/lightnvm/pblk-init.c98
-rw-r--r--drivers/lightnvm/pblk-rb.c24
-rw-r--r--drivers/lightnvm/pblk-read.c247
-rw-r--r--drivers/lightnvm/pblk-recovery.c47
-rw-r--r--drivers/lightnvm/pblk-sysfs.c13
-rw-r--r--drivers/lightnvm/pblk-write.c35
-rw-r--r--drivers/lightnvm/pblk.h48
-rw-r--r--drivers/mailbox/Kconfig16
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/imx-mailbox.c358
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c6
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c571
-rw-r--r--drivers/mailbox/omap-mailbox.c31
-rw-r--r--drivers/mailbox/ti-msgmgr.c353
-rw-r--r--drivers/md/bcache/bcache.h24
-rw-r--r--drivers/md/bcache/bset.c63
-rw-r--r--drivers/md/bcache/btree.c63
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/closure.c13
-rw-r--r--drivers/md/bcache/closure.h4
-rw-r--r--drivers/md/bcache/debug.c17
-rw-r--r--drivers/md/bcache/journal.c1
-rw-r--r--drivers/md/bcache/request.c75
-rw-r--r--drivers/md/bcache/super.c59
-rw-r--r--drivers/md/bcache/sysfs.c48
-rw-r--r--drivers/md/bcache/util.c2
-rw-r--r--drivers/md/bcache/util.h2
-rw-r--r--drivers/md/bcache/writeback.c125
-rw-r--r--drivers/md/bcache/writeback.h19
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/md-cluster.c47
-rw-r--r--drivers/md/md.c29
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/media/cec/cec-adap.c18
-rw-r--r--drivers/media/cec/cec-api.c8
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c10
-rw-r--r--drivers/media/common/siano/smsdvb-main.c6
-rw-r--r--drivers/media/common/siano/smsdvb.h7
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c5
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c3
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c2
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c84
-rw-r--r--drivers/media/dvb-core/dvbdev.c18
-rw-r--r--drivers/media/dvb-frontends/Kconfig10
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9013.c7
-rw-r--r--drivers/media/dvb-frontends/af9033.c7
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c6
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c6
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c6
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c6
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c6
-rw-r--r--drivers/media/dvb-frontends/cx22700.c6
-rw-r--r--drivers/media/dvb-frontends/cx22702.c6
-rw-r--r--drivers/media/dvb-frontends/cx24110.c8
-rw-r--r--drivers/media/dvb-frontends/cx24113.c8
-rw-r--r--drivers/media/dvb-frontends/cx24116.c8
-rw-r--r--drivers/media/dvb-frontends/cx24117.c8
-rw-r--r--drivers/media/dvb-frontends/cx24120.c8
-rw-r--r--drivers/media/dvb-frontends/cx24123.c8
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c9
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_top.c6
-rw-r--r--drivers/media/dvb-frontends/dib0070.c8
-rw-r--r--drivers/media/dvb-frontends/dib0090.c12
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c6
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c6
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c6
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c6
-rw-r--r--drivers/media/dvb-frontends/dib8000.c6
-rw-r--r--drivers/media/dvb-frontends/dib9000.c6
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c25
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c13
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c26
-rw-r--r--drivers/media/dvb-frontends/ds3000.c8
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c27
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.c24
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c6
-rw-r--r--drivers/media/dvb-frontends/helene.c105
-rw-r--r--drivers/media/dvb-frontends/helene.h3
-rw-r--r--drivers/media/dvb-frontends/horus3a.c6
-rw-r--r--drivers/media/dvb-frontends/itd1000.c8
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c8
-rw-r--r--drivers/media/dvb-frontends/l64781.c7
-rw-r--r--drivers/media/dvb-frontends/lg2160.c12
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c12
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c6
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c12
-rw-r--r--drivers/media/dvb-frontends/lgs8gl5.c7
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c6
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c6
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c8
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c7
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c6
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c802
-rw-r--r--drivers/media/dvb-frontends/mn88443x.h27
-rw-r--r--drivers/media/dvb-frontends/mt312.c10
-rw-r--r--drivers/media/dvb-frontends/mt352.c7
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c6
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c6
-rw-r--r--drivers/media/dvb-frontends/nxt6000.c6
-rw-r--r--drivers/media/dvb-frontends/or51132.c6
-rw-r--r--drivers/media/dvb-frontends/or51211.c8
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c10
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c6
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c8
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c6
-rw-r--r--drivers/media/dvb-frontends/s921.c7
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c7
-rw-r--r--drivers/media/dvb-frontends/sp8870.c6
-rw-r--r--drivers/media/dvb-frontends/sp887x.c6
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c6
-rw-r--r--drivers/media/dvb-frontends/stb6000.c4
-rw-r--r--drivers/media/dvb-frontends/stb6100.c5
-rw-r--r--drivers/media/dvb-frontends/stv0288.c7
-rw-r--r--drivers/media/dvb-frontends/stv0297.c6
-rw-r--r--drivers/media/dvb-frontends/stv0299.c7
-rw-r--r--drivers/media/dvb-frontends/stv0367.c20
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c7
-rw-r--r--drivers/media/dvb-frontends/stv090x.c6
-rw-r--r--drivers/media/dvb-frontends/stv0910.c10
-rw-r--r--drivers/media/dvb-frontends/stv6110.c6
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c7
-rw-r--r--drivers/media/dvb-frontends/stv6111.c5
-rw-r--r--drivers/media/dvb-frontends/tc90522.c10
-rw-r--r--drivers/media/dvb-frontends/tda10021.c10
-rw-r--r--drivers/media/dvb-frontends/tda10023.c6
-rw-r--r--drivers/media/dvb-frontends/tda10048.c6
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c12
-rw-r--r--drivers/media/dvb-frontends/tda10071.c10
-rw-r--r--drivers/media/dvb-frontends/tda10086.c6
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c7
-rw-r--r--drivers/media/dvb-frontends/tda665x.c6
-rw-r--r--drivers/media/dvb-frontends/tda8083.c7
-rw-r--r--drivers/media/dvb-frontends/tda8261.c9
-rw-r--r--drivers/media/dvb-frontends/tda826x.c4
-rw-r--r--drivers/media/dvb-frontends/ts2020.c4
-rw-r--r--drivers/media/dvb-frontends/tua6100.c6
-rw-r--r--drivers/media/dvb-frontends/ves1820.c6
-rw-r--r--drivers/media/dvb-frontends/ves1x93.c8
-rw-r--r--drivers/media/dvb-frontends/zl10036.c8
-rw-r--r--drivers/media/dvb-frontends/zl10353.c7
-rw-r--r--drivers/media/firewire/firedtv-fe.c26
-rw-r--r--drivers/media/i2c/Kconfig115
-rw-r--r--drivers/media/i2c/Makefile5
-rw-r--r--drivers/media/i2c/ad9389b.c1
-rw-r--r--drivers/media/i2c/adv7180.c32
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c2
-rw-r--r--drivers/media/i2c/adv7511.c1
-rw-r--r--drivers/media/i2c/adv7604.c8
-rw-r--r--drivers/media/i2c/adv7842.c9
-rw-r--r--drivers/media/i2c/ak7375.c292
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.h33
-rw-r--r--drivers/media/i2c/dw9807-vcm.c329
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c1
-rw-r--r--drivers/media/i2c/imx258.c8
-rw-r--r--drivers/media/i2c/imx274.c742
-rw-r--r--drivers/media/i2c/lm3560.c3
-rw-r--r--drivers/media/i2c/mt9m032.c1
-rw-r--r--drivers/media/i2c/mt9p031.c1
-rw-r--r--drivers/media/i2c/mt9t001.c1
-rw-r--r--drivers/media/i2c/mt9v032.c1
-rw-r--r--drivers/media/i2c/mt9v111.c1298
-rw-r--r--drivers/media/i2c/ov2680.c1186
-rw-r--r--drivers/media/i2c/ov5640.c175
-rw-r--r--drivers/media/i2c/ov5645.c13
-rw-r--r--drivers/media/i2c/ov7670.c6
-rw-r--r--drivers/media/i2c/ov772x.c353
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c1437
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c20
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c2
-rw-r--r--drivers/media/i2c/tc358743.c5
-rw-r--r--drivers/media/i2c/tda1997x.c2
-rw-r--r--drivers/media/i2c/tvp514x.c2
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/i2c/tvp7002.c2
-rw-r--r--drivers/media/i2c/video-i2c.c81
-rw-r--r--drivers/media/i2c/vs6624.c4
-rw-r--r--drivers/media/media-device.c16
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/bt8xx/dst.c26
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c12
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c82
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c679
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.h58
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-gpio.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c673
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.h135
-rw-r--r--drivers/media/pci/cx25821/cx25821.h12
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c7
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c4
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c20
-rw-r--r--drivers/media/pci/ddbridge/Makefile3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c45
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.c3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.c5
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-max.c18
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-max.h2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-mci.c409
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-mci.h192
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-regs.h8
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-sx8.c488
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h14
-rw-r--r--drivers/media/pci/dm1105/dm1105.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c1
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/pci/mantis/mantis_vp3030.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c5
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c7
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c11
-rw-r--r--drivers/media/platform/Kconfig9
-rw-r--r--drivers/media/platform/Makefile3
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c27
-rw-r--r--drivers/media/platform/cadence/Kconfig2
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c1
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c1
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c54
-rw-r--r--drivers/media/platform/coda/coda-bit.c123
-rw-r--r--drivers/media/platform/coda/coda-common.c189
-rw-r--r--drivers/media/platform/coda/coda-h264.c319
-rw-r--r--drivers/media/platform/coda/coda.h4
-rw-r--r--drivers/media/platform/coda/coda_regs.h1
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c1
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c24
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c11
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c6
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c6
-rw-r--r--drivers/media/platform/fsl-viu.c38
-rw-r--r--drivers/media/platform/m2m-deinterlace.c25
-rw-r--r--drivers/media/platform/meson/ao-cec.c2
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c5
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c25
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c23
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c16
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/mx2_emmaprp.c21
-rw-r--r--drivers/media/platform/omap/Kconfig1
-rw-r--r--drivers/media/platform/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/pxa_camera.c22
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.h123
-rw-r--r--drivers/media/platform/qcom/camss/Makefile (renamed from drivers/media/platform/qcom/camss-8x16/Makefile)4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-csid.c)471
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-csid.h)17
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c176
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c256
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-csiphy.c)363
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-csiphy.h)37
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-ispif.c)264
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-ispif.h)23
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c1018
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c1140
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-vfe.c)1569
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h186
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c (renamed from drivers/media/platform/qcom/camss-8x16/camss-video.c)133
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.h (renamed from drivers/media/platform/qcom/camss-8x16/camss-video.h)12
-rw-r--r--drivers/media/platform/qcom/camss/camss.c (renamed from drivers/media/platform/qcom/camss-8x16/camss.c)450
-rw-r--r--drivers/media/platform/qcom/camss/camss.h (renamed from drivers/media/platform/qcom/camss-8x16/camss.h)43
-rw-r--r--drivers/media/platform/qcom/venus/Makefile3
-rw-r--r--drivers/media/platform/qcom/venus/core.c107
-rw-r--r--drivers/media/platform/qcom/venus/core.h100
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c568
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h23
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h10
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c62
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h112
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c407
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c278
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.h110
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c108
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h10
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c329
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c10
-rw-r--r--drivers/media/platform/qcom/venus/venc.c227
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c10
-rw-r--r--drivers/media/platform/rcar-fcp.c6
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig1
-rw-r--r--drivers/media/platform/rcar-vin/Makefile1
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c321
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c20
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c63
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c18
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h37
-rw-r--r--drivers/media/platform/rcar_drif.c8
-rw-r--r--drivers/media/platform/rcar_fdp1.c6
-rw-r--r--drivers/media/platform/rcar_jpu.c27
-rw-r--r--drivers/media/platform/renesas-ceu.c91
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c45
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c20
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h2
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c19
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c7
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c29
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c15
-rw-r--r--drivers/media/platform/sh_veu.c5
-rw-r--r--drivers/media/platform/sh_vou.c5
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c5
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c18
-rw-r--r--drivers/media/platform/sti/hva/hva-v4l2.c1
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c259
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c20
-rw-r--r--drivers/media/platform/vicodec/Kconfig13
-rw-r--r--drivers/media/platform/vicodec/Makefile4
-rw-r--r--drivers/media/platform/vicodec/vicodec-codec.c797
-rw-r--r--drivers/media/platform/vicodec/vicodec-codec.h129
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c1506
-rw-r--r--drivers/media/platform/video-mux.c119
-rw-r--r--drivers/media/platform/vim2m.c42
-rw-r--r--drivers/media/platform/vimc/vimc-core.c1
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c2
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1.h3
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c433
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h28
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c20
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h5
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c72
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c6
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c3
-rw-r--r--drivers/media/rc/bpf-lirc.c10
-rw-r--r--drivers/media/tuners/e4000.c6
-rw-r--r--drivers/media/tuners/fc0011.c6
-rw-r--r--drivers/media/tuners/fc0012.c7
-rw-r--r--drivers/media/tuners/fc0013.c7
-rw-r--r--drivers/media/tuners/fc2580.c6
-rw-r--r--drivers/media/tuners/it913x.c6
-rw-r--r--drivers/media/tuners/m88rs6000t.c6
-rw-r--r--drivers/media/tuners/max2165.c8
-rw-r--r--drivers/media/tuners/mc44s803.c8
-rw-r--r--drivers/media/tuners/mt2060.c8
-rw-r--r--drivers/media/tuners/mt2063.c7
-rw-r--r--drivers/media/tuners/mt2131.c8
-rw-r--r--drivers/media/tuners/mt2266.c8
-rw-r--r--drivers/media/tuners/mxl301rf.c4
-rw-r--r--drivers/media/tuners/mxl5005s.c8
-rw-r--r--drivers/media/tuners/mxl5007t.c2
-rw-r--r--drivers/media/tuners/qm1d1b0004.c4
-rw-r--r--drivers/media/tuners/qm1d1c0042.c4
-rw-r--r--drivers/media/tuners/qt1010.c8
-rw-r--r--drivers/media/tuners/qt1010_priv.h14
-rw-r--r--drivers/media/tuners/r820t.c6
-rw-r--r--drivers/media/tuners/si2157.c6
-rw-r--r--drivers/media/tuners/tda18212.c8
-rw-r--r--drivers/media/tuners/tda18218.c8
-rw-r--r--drivers/media/tuners/tda18250.c6
-rw-r--r--drivers/media/tuners/tda18271-fe.c6
-rw-r--r--drivers/media/tuners/tda827x.c12
-rw-r--r--drivers/media/tuners/tua9001.c6
-rw-r--r--drivers/media/tuners/tuner-simple.c5
-rw-r--r--drivers/media/tuners/tuner-xc2028.c15
-rw-r--r--drivers/media/tuners/xc4000.c16
-rw-r--r--drivers/media/tuners/xc5000.c12
-rw-r--r--drivers/media/usb/au0828/au0828-video.c2
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c14
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c7
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig5
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c492
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/usb_urb.c4
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig6
-rw-r--r--drivers/media/usb/dvb-usb/Makefile3
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c6
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c6
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c1
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c6
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c19
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c11
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c3
-rw-r--r--drivers/media/usb/dvb-usb/usb-urb.c4
-rw-r--r--drivers/media/usb/dvb-usb/vp702x-fe.c7
-rw-r--r--drivers/media/usb/dvb-usb/vp7045-fe.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c39
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c9
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c11
-rw-r--r--drivers/media/usb/gspca/kinect.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c3
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c5
-rw-r--r--drivers/media/usb/tm6000/tm6000-i2c.c2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusbdecfe.c12
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c5
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c215
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c5
-rw-r--r--drivers/media/usb/uvc/uvc_status.c121
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c4
-rw-r--r--drivers/media/usb/uvc/uvc_video.c62
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h18
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c128
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c266
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/memory/Kconfig6
-rw-r--r--drivers/message/fusion/mptbase.c5
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/misc/cxl/api.c22
-rw-r--r--drivers/misc/pci_endpoint_test.c290
-rw-r--r--drivers/mmc/host/pxamci.c29
-rw-r--r--drivers/mtd/Kconfig24
-rw-r--r--drivers/mtd/chips/Kconfig2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c2
-rw-r--r--drivers/mtd/chips/gen_probe.c7
-rw-r--r--drivers/mtd/devices/Kconfig4
-rw-r--r--drivers/mtd/devices/m25p80.c8
-rw-r--r--drivers/mtd/devices/powernv_flash.c1
-rw-r--r--drivers/mtd/devices/sst25l.c5
-rw-r--r--drivers/mtd/lpddr/Kconfig8
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c2
-rw-r--r--drivers/mtd/maps/Kconfig24
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c3
-rw-r--r--drivers/mtd/maps/impa7.c5
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c2
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c5
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c3
-rw-r--r--drivers/mtd/maps/solutionengine.c6
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/mtdcore.c28
-rw-r--r--drivers/mtd/mtdpart.c33
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/onenand/generic.c5
-rw-r--r--drivers/mtd/nand/onenand/samsung.c5
-rw-r--r--drivers/mtd/nand/raw/Kconfig157
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c175
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c67
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c143
-rw-r--r--drivers/mtd/nand/raw/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c3
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c231
-rw-r--r--drivers/mtd/nand/raw/denali.c216
-rw-r--r--drivers/mtd/nand/raw/denali.h1
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c70
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c1
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/docg4.c89
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c25
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c25
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c183
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c15
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c76
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h11
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c78
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c51
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c41
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c61
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c77
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c319
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c79
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c157
-rw-r--r--drivers/mtd/nand/raw/nand_base.c345
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c10
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c23
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c351
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c32
-rw-r--r--drivers/mtd/nand/raw/nandsim.c84
-rw-r--r--drivers/mtd/nand/raw/ndfc.c4
-rw-r--r--drivers/mtd/nand/raw/omap2.c463
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c9
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c4
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c591
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c52
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c36
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c5
-rw-r--r--drivers/mtd/nand/raw/sm_common.c39
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c55
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c44
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c1246
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c44
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c127
-rw-r--r--drivers/mtd/nand/spi/Kconfig7
-rw-r--r--drivers/mtd/nand/spi/Makefile3
-rw-r--r--drivers/mtd/nand/spi/core.c1155
-rw-r--r--drivers/mtd/nand/spi/macronix.c144
-rw-r--r--drivers/mtd/nand/spi/micron.c133
-rw-r--r--drivers/mtd/nand/spi/winbond.c141
-rw-r--r--drivers/mtd/nftlmount.c3
-rw-r--r--drivers/mtd/parsers/parser_trx.c7
-rw-r--r--drivers/mtd/spi-nor/atmel-quadspi.c23
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c20
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c2
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c1
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c18
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c6
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/bonding/bond_sysfs.c7
-rw-r--r--drivers/net/can/Kconfig6
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/dev.c14
-rw-r--r--drivers/net/can/flexcan.c33
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c2
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c20
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/usb/Kconfig48
-rw-r--r--drivers/net/can/usb/Makefile7
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2085
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h188
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c835
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2028
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c1358
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/can/usb/ucan.c1613
-rw-r--r--drivers/net/can/xilinx_can.c512
-rw-r--r--drivers/net/dsa/Kconfig24
-rw-r--r--drivers/net/dsa/Makefile3
-rw-r--r--drivers/net/dsa/b53/b53_srab.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c19
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c46
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c243
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h47
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_avb.c25
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c134
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h14
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c109
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h23
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c101
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h71
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c439
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h24
-rw-r--r--drivers/net/dsa/realtek-smi.c489
-rw-r--r--drivers/net/dsa/realtek-smi.h144
-rw-r--r--drivers/net/dsa/rtl8366.c515
-rw-r--r--drivers/net/dsa/rtl8366rb.c1454
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.c1365
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c1
-rw-r--r--drivers/net/ethernet/8390/mac8390.c20
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile9
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c5
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c6
-rw-r--r--drivers/net/ethernet/apple/bmac.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c117
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c51
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c52
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/aurora/Kconfig1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c223
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h11
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c233
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h66
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c121
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c375
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1214
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c11
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c17
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c88
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c1
-rw-r--r--drivers/net/ethernet/cavium/Kconfig6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c47
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c40
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c708
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c186
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c163
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c376
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c135
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig40
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c96
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c39
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c22
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c27
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c12
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c21
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c18
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c9
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig15
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c56
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c408
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c115
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h120
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c676
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c75
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c177
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c36
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c168
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c347
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c69
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c13
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c267
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c365
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c10
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c440
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h134
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c973
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h203
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c703
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c179
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c223
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h75
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c220
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c947
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h175
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h274
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h210
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c603
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c230
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/vxlan.h)43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/ib.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h973
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c473
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h260
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c428
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c342
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c271
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c536
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c1168
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h124
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c438
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h228
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c463
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c354
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c344
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h36
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c3
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c3
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c723
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c284
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h234
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c1160
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h74
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c445
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig23
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c313
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h58
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c92
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c213
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h37
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c169
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c6
-rw-r--r--drivers/net/ethernet/ni/nixge.c12
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Makefile2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h40
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c262
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h35
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c193
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h2
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c127
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h20
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c138
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c423
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c31
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c195
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1082
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c9
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c47
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h14
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c12
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h228
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c371
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c411
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c71
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c133
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c60
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c16
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c463
-rw-r--r--drivers/net/ethernet/ti/cpts.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c31
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c13
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c112
-rw-r--r--drivers/net/fjes/fjes_main.c8
-rw-r--r--drivers/net/geneve.c17
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/6pack.c5
-rw-r--r--drivers/net/hyperv/hyperv_net.h11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c112
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/ieee802154/Kconfig11
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/fakelb.c3
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c937
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.h73
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/net_failover.c11
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/bpf.c91
-rw-r--r--drivers/net/netdevsim/ipsec.c297
-rw-r--r--drivers/net/netdevsim/netdev.c119
-rw-r--r--drivers/net/netdevsim/netdevsim.h71
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/dp83640.c5
-rw-r--r--drivers/net/phy/dp83tc811.c46
-rw-r--r--drivers/net/phy/fixed_phy.c7
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c108
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c22
-rw-r--r--drivers/net/phy/mscc.c2
-rw-r--r--drivers/net/phy/phy.c105
-rw-r--r--drivers/net/phy/phy_device.c8
-rw-r--r--drivers/net/phy/phylink.c30
-rw-r--r--drivers/net/phy/realtek.c80
-rw-r--r--drivers/net/phy/sfp.c803
-rw-r--r--drivers/net/phy/vitesse.c175
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c20
-rw-r--r--drivers/net/ppp/ppp_mppe.c56
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/asix_devices.c38
-rw-r--r--drivers/net/usb/catc.c1
-rw-r--r--drivers/net/usb/cdc-phonet.c6
-rw-r--r--drivers/net/usb/hso.c44
-rw-r--r--drivers/net/usb/kaweth.c8
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/r8152.c15
-rw-r--r--drivers/net/usb/rtl8150.c5
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/veth.c750
-rw-r--r--drivers/net/virtio_net.c234
-rw-r--r--drivers/net/vxlan.c14
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c6
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wimax/i2400m/control.c3
-rw-r--r--drivers/net/wimax/i2400m/fw.c3
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig24
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c70
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c79
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c47
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c85
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c101
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h23
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c67
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c30
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c95
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c80
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c847
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c289
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c425
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c73
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c129
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c53
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h59
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c708
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h112
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c1608
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h568
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h316
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c723
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h824
-rw-r--r--drivers/net/wireless/atmel/atmel.c18
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c48
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c2
-rw-r--r--drivers/net/wireless/cisco/airo.c8
-rw-r--r--drivers/net/wireless/cisco/airo_cs.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c25
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c284
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h286
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c211
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c364
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c388
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c92
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c17
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c123
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c12
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c30
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c7
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c95
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c55
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c25
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig27
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile20
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/core.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.c522
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c445
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c720
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h772
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c658
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c403
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c656
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h101
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h330
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c1008
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/regs.h651
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h313
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/tx.c270
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c381
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_common.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c377
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c305
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c641
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c699
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c360
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_regs.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_usb.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_core.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_init.c318
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_main.c185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c463
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c845
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_mcu.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c11
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c103
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c211
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c26
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h105
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00pci.h6
-rw-r--r--drivers/net/wireless/ray_cs.c6
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c180
-rw-r--r--drivers/net/wireless/rndis_wlan.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c38
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c23
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h3
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c29
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c90
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c538
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c146
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c30
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h1
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_chip.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c21
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c9
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c3
-rw-r--r--drivers/nubus/bus.c3
-rw-r--r--drivers/nvdimm/btt.c12
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvdimm/pmem.c13
-rw-r--r--drivers/nvme/host/core.c108
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fc.c1
-rw-r--r--drivers/nvme/host/lightnvm.c27
-rw-r--r--drivers/nvme/host/multipath.c349
-rw-r--r--drivers/nvme/host/nvme.h78
-rw-r--r--drivers/nvme/host/pci.c77
-rw-r--r--drivers/nvme/host/rdma.c234
-rw-r--r--drivers/nvme/host/trace.c11
-rw-r--r--drivers/nvme/host/trace.h142
-rw-r--r--drivers/nvme/target/admin-cmd.c221
-rw-r--r--drivers/nvme/target/configfs.c250
-rw-r--r--drivers/nvme/target/core.c104
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c7
-rw-r--r--drivers/nvme/target/io-cmd-file.c80
-rw-r--r--drivers/nvme/target/loop.c1
-rw-r--r--drivers/nvme/target/nvmet.h62
-rw-r--r--drivers/nvme/target/rdma.c198
-rw-r--r--drivers/of/address.c6
-rw-r--r--drivers/of/device.c21
-rw-r--r--drivers/of/fdt.c9
-rw-r--r--drivers/of/of_mdio.c17
-rw-r--r--drivers/of/platform.c3
-rw-r--r--drivers/pci/ats.c3
-rw-r--r--drivers/pci/controller/Kconfig12
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c1
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c210
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h29
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c1
-rw-r--r--drivers/pci/controller/pci-aardvark.c79
-rw-r--r--drivers/pci/controller/pci-hyperv.c3
-rw-r--r--drivers/pci/controller/pci-mvebu.c153
-rw-r--r--drivers/pci/controller/pcie-cadence-ep.c21
-rw-r--r--drivers/pci/controller/pcie-cadence-host.c33
-rw-r--r--drivers/pci/controller/pcie-cadence.c123
-rw-r--r--drivers/pci/controller/pcie-cadence.h13
-rw-r--r--drivers/pci/controller/pcie-iproc.c159
-rw-r--r--drivers/pci/controller/pcie-iproc.h8
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c2
-rw-r--r--drivers/pci/controller/vmd.c13
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c86
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c24
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c68
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c36
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c22
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c14
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c16
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c15
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c20
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c134
-rw-r--r--drivers/pci/hotplug/pciehp.h117
-rw-r--r--drivers/pci/hotplug/pciehp_core.c127
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c351
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c281
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c4
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c348
-rw-r--r--drivers/pci/hotplug/pnv_php.c5
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c13
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c13
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c9
-rw-r--r--drivers/pci/hotplug/shpchp_core.c41
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c2
-rw-r--r--drivers/pci/iov.c4
-rw-r--r--drivers/pci/irq.c6
-rw-r--r--drivers/pci/msi.c3
-rw-r--r--drivers/pci/of.c14
-rw-r--r--drivers/pci/pci-acpi.c19
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-sysfs.c6
-rw-r--r--drivers/pci/pci.c505
-rw-r--r--drivers/pci/pci.h69
-rw-r--r--drivers/pci/pcie/aer.c341
-rw-r--r--drivers/pci/pcie/aspm.c8
-rw-r--r--drivers/pci/pcie/dpc.c107
-rw-r--r--drivers/pci/pcie/err.c21
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/pcie/portdrv_core.c30
-rw-r--r--drivers/pci/pcie/portdrv_pci.c27
-rw-r--r--drivers/pci/probe.c102
-rw-r--r--drivers/pci/quirks.c386
-rw-r--r--drivers/pci/remove.c1
-rw-r--r--drivers/pci/rom.c11
-rw-r--r--drivers/pci/switch/switchtec.c14
-rw-r--r--drivers/pci/vpd.c7
-rw-r--r--drivers/perf/arm-cci.c38
-rw-r--r--drivers/perf/arm-ccn.c14
-rw-r--r--drivers/perf/arm_pmu.c38
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c12
-rw-r--r--drivers/pinctrl/Kconfig9
-rw-r--r--drivers/pinctrl/actions/Kconfig1
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c270
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.h22
-rw-r--r--drivers/pinctrl/actions/pinctrl-s900.c31
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c4
-rw-r--r--drivers/pinctrl/berlin/Kconfig5
-rw-r--r--drivers/pinctrl/berlin/Makefile1
-rw-r--r--drivers/pinctrl/berlin/berlin.c14
-rw-r--r--drivers/pinctrl/berlin/pinctrl-as370.c368
-rw-r--r--drivers/pinctrl/core.c36
-rw-r--r--drivers/pinctrl/core.h6
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c351
-rw-r--r--drivers/pinctrl/intel/Kconfig12
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c97
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c436
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c37
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h5
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c5
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c118
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c11
-rw-r--r--drivers/pinctrl/pinctrl-amd.c17
-rw-r--r--drivers/pinctrl/pinctrl-amd.h4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c46
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c26
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c104
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c24
-rw-r--r--drivers/pinctrl/pinctrl-single.c127
-rw-r--r--drivers/pinctrl/pinmux.c17
-rw-r--r--drivers/pinctrl/pinmux.h7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c32
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c16
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c68
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h11
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c333
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c69
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c43
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c6
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra114.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra124.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra210.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra30.c8
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c10
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c20
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c5
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c10
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c10
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c15
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c10
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c10
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c5
-rw-r--r--drivers/platform/mips/cpu_hwmon.c3
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-wmi.c12
-rw-r--r--drivers/platform/x86/eeepc-laptop.c12
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c132
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c1
-rw-r--r--drivers/powercap/Kconfig10
-rw-r--r--drivers/powercap/Makefile1
-rw-r--r--drivers/powercap/idle_inject.c356
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/ptp_qoriq.c217
-rw-r--r--drivers/regulator/Kconfig25
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/arizona-ldo1.c27
-rw-r--r--drivers/regulator/bd71837-regulator.c44
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c72
-rw-r--r--drivers/regulator/core.c44
-rw-r--r--drivers/regulator/cpcap-regulator.c103
-rw-r--r--drivers/regulator/max14577-regulator.c22
-rw-r--r--drivers/regulator/max77686-regulator.c32
-rw-r--r--drivers/regulator/max77693-regulator.c32
-rw-r--r--drivers/regulator/max77802-regulator.c34
-rw-r--r--drivers/regulator/max8997-regulator.c33
-rw-r--r--drivers/regulator/max8998.c28
-rw-r--r--drivers/regulator/pfuze100-regulator.c112
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c769
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c38
-rw-r--r--drivers/regulator/s2mpa01.c14
-rw-r--r--drivers/regulator/s2mps11.c21
-rw-r--r--drivers/regulator/s5m8767.c16
-rw-r--r--drivers/regulator/tps65217-regulator.c2
-rw-r--r--drivers/regulator/uniphier-regulator.c213
-rw-r--r--drivers/s390/block/dasd.c17
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c12
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/keyboard.c28
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/sclp_async.c38
-rw-r--r--drivers/s390/char/tape_3590.c8
-rw-r--r--drivers/s390/char/tape_class.c6
-rw-r--r--drivers/s390/cio/chp.c21
-rw-r--r--drivers/s390/cio/chsc.c18
-rw-r--r--drivers/s390/cio/chsc.h18
-rw-r--r--drivers/s390/cio/cio.c77
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/cio/css.c82
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/qdio_main.c5
-rw-r--r--drivers/s390/cio/trace.h102
-rw-r--r--drivers/s390/crypto/ap_asm.h236
-rw-r--r--drivers/s390/crypto/ap_bus.c25
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/ap_card.c1
-rw-r--r--drivers/s390/crypto/ap_queue.c20
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/zcrypt_card.c12
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h20
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c20
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c12
-rw-r--r--drivers/s390/net/Kconfig10
-rw-r--r--drivers/s390/net/Makefile3
-rw-r--r--drivers/s390/net/ism.h221
-rw-r--r--drivers/s390/net/ism_drv.c623
-rw-r--r--drivers/s390/net/qeth_core.h39
-rw-r--r--drivers/s390/net/qeth_core_main.c614
-rw-r--r--drivers/s390/net/qeth_core_mpc.c11
-rw-r--r--drivers/s390/net/qeth_core_mpc.h7
-rw-r--r--drivers/s390/net/qeth_core_sys.c18
-rw-r--r--drivers/s390/net/qeth_l2.h5
-rw-r--r--drivers/s390/net/qeth_l2_main.c211
-rw-r--r--drivers/s390/net/qeth_l3_main.c400
-rw-r--r--drivers/s390/net/qeth_l3_sys.c6
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/scsi/3w-9xxx.c6
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/Kconfig33
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR_D700.c405
-rw-r--r--drivers/scsi/NCR_D700.h30
-rw-r--r--drivers/scsi/NCR_Q720.c376
-rw-r--r--drivers/scsi/NCR_Q720.h29
-rw-r--r--drivers/scsi/a100u2w.c4
-rw-r--r--drivers/scsi/aacraid/aachba.c41
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/aacraid/sa.c2
-rw-r--r--drivers/scsi/aacraid/src.c6
-rw-r--r--drivers/scsi/advansys.c10
-rw-r--r--drivers/scsi/aha152x.c71
-rw-r--r--drivers/scsi/aha1740.c9
-rw-r--r--drivers/scsi/aha1740.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c7
-rw-r--r--drivers/scsi/atp870u.c16
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c23
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c23
-rw-r--r--drivers/scsi/bfa/bfad_im.c19
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c115
-rw-r--r--drivers/scsi/csiostor/csio_wr.c84
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c29
-rw-r--r--drivers/scsi/cxlflash/superpipe.c14
-rw-r--r--drivers/scsi/cxlflash/vlun.c7
-rw-r--r--drivers/scsi/dc395x.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c12
-rw-r--r--drivers/scsi/gdth.c67
-rw-r--r--drivers/scsi/gdth.h10
-rw-r--r--drivers/scsi/gdth_proc.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c319
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c23
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c290
-rw-r--r--drivers/scsi/hosts.c32
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c12
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c6
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.c25
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/libfc/fc_disc.c47
-rw-r--r--drivers/scsi/libfc/fc_lport.c108
-rw-r--r--drivers/scsi/libfc/fc_rport.c100
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c47
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c4
-rw-r--r--drivers/scsi/lpfc/Makefile4
-rw-r--r--drivers/scsi/lpfc/lpfc.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c462
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h46
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c50
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h4
-rw-r--r--drivers/scsi/megaraid.c29
-rw-r--r--drivers/scsi/megaraid.h14
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h33
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c61
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c36
-rw-r--r--drivers/scsi/mesh.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c438
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h30
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c81
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c395
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c463
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c62
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c3
-rw-r--r--drivers/scsi/ncr53c8xx.c10
-rw-r--r--drivers/scsi/nsp32_debug.c2
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c30
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c731
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c154
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c155
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c20
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi.h3
-rw-r--r--drivers/scsi/scsi_debug.c57
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_ioctl.c4
-rw-r--r--drivers/scsi/scsi_lib.c411
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/scsi_typedefs.h2
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/sd.h9
-rw-r--r--drivers/scsi/sd_dif.c113
-rw-r--r--drivers/scsi/sd_zbc.c6
-rw-r--r--drivers/scsi/sg.c16
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h12
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c160
-rw-r--r--drivers/scsi/snic/snic_debugfs.c10
-rw-r--r--drivers/scsi/snic/snic_trc.c6
-rw-r--r--drivers/scsi/sr_ioctl.c22
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c3
-rw-r--r--drivers/scsi/ufs/Kconfig9
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c619
-rw-r--r--drivers/scsi/ufs/ufs-hisi.h115
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c21
-rw-r--r--drivers/scsi/ufs/ufshcd.c52
-rw-r--r--drivers/scsi/ufs/ufshcd.h3
-rw-r--r--drivers/scsi/virtio_scsi.c8
-rw-r--r--drivers/sh/maple/maple.c7
-rw-r--r--drivers/soc/imx/gpc.c2
-rw-r--r--drivers/soc/qcom/Kconfig27
-rw-r--r--drivers/soc/qcom/Makefile6
-rw-r--r--drivers/soc/qcom/llcc-sdm845.c94
-rw-r--r--drivers/soc/qcom/llcc-slice.c338
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c3
-rw-r--r--drivers/soc/qcom/rpmh-internal.h114
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c693
-rw-r--r--drivers/soc/qcom/rpmh.c513
-rw-r--r--drivers/soc/qcom/smem.c10
-rw-r--r--drivers/soc/qcom/trace-rpmh.h82
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-bitbang.c50
-rw-r--r--drivers/spi/spi-butterfly.c4
-rw-r--r--drivers/spi/spi-cadence.c4
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dw-mmio.c90
-rw-r--r--drivers/spi/spi-dw.c9
-rw-r--r--drivers/spi/spi-dw.h2
-rw-r--r--drivers/spi/spi-fsl-dspi.c505
-rw-r--r--drivers/spi/spi-fsl-espi.c5
-rw-r--r--drivers/spi/spi-gpio.c49
-rw-r--r--drivers/spi/spi-img-spfi.c3
-rw-r--r--drivers/spi/spi-imx.c162
-rw-r--r--drivers/spi/spi-lm70llp.c5
-rw-r--r--drivers/spi/spi-mem.c28
-rw-r--r--drivers/spi/spi-omap2-mcspi.c9
-rw-r--r--drivers/spi/spi-orion.c77
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c53
-rw-r--r--drivers/spi/spi-sh-sci.c20
-rw-r--r--drivers/spi/spi-uniphier.c523
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c2
-rw-r--r--drivers/ssb/Kconfig21
-rw-r--r--drivers/ssb/b43_pci_bridge.c4
-rw-r--r--drivers/ssb/bridge_pcmcia_80211.c6
-rw-r--r--drivers/ssb/driver_chipcommon.c14
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c40
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c6
-rw-r--r--drivers/ssb/driver_extif.c4
-rw-r--r--drivers/ssb/driver_gige.c2
-rw-r--r--drivers/ssb/driver_gpio.c8
-rw-r--r--drivers/ssb/driver_mipscore.c17
-rw-r--r--drivers/ssb/driver_pcicore.c23
-rw-r--r--drivers/ssb/embedded.c18
-rw-r--r--drivers/ssb/host_soc.c16
-rw-r--r--drivers/ssb/main.c83
-rw-r--r--drivers/ssb/pci.c75
-rw-r--r--drivers/ssb/pcmcia.c62
-rw-r--r--drivers/ssb/scan.c38
-rw-r--r--drivers/ssb/sdio.c16
-rw-r--r--drivers/ssb/sprom.c4
-rw-r--r--drivers/ssb/ssb_private.h39
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h2
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c5
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c38
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c112
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c1
-rw-r--r--drivers/staging/media/imx/imx-media.h2
-rw-r--r--drivers/staging/most/sound/sound.c1
-rw-r--r--drivers/staging/netlogic/xlr_net.c9
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c5
-rw-r--r--drivers/staging/rtl8712/wifi.h1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c5
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c7
-rw-r--r--drivers/staging/rtlwifi/base.c2
-rw-r--r--drivers/staging/skein/skein_generic.c3
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c24
-rw-r--r--drivers/target/Kconfig5
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c55
-rw-r--r--drivers/target/loopback/Kconfig1
-rw-r--r--drivers/target/loopback/tcm_loop.c15
-rw-r--r--drivers/target/sbp/sbp_target.c18
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/target/target_core_device.c53
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_sbc.c7
-rw-r--r--drivers/target/target_core_tmr.c30
-rw-r--r--drivers/target/target_core_transport.c285
-rw-r--r--drivers/target/target_core_ua.c43
-rw-r--r--drivers/target/target_core_ua.h3
-rw-r--r--drivers/target/target_core_user.c377
-rw-r--r--drivers/target/target_core_xcopy.c5
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c10
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c5
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c5
-rw-r--r--drivers/tee/tee_shm.c6
-rw-r--r--drivers/thermal/armada_thermal.c534
-rw-r--r--drivers/thermal/imx_thermal.c33
-rw-r--r--drivers/thermal/qcom/Makefile2
-rw-r--r--drivers/thermal/qcom/tsens-8996.c84
-rw-r--r--drivers/thermal/qcom/tsens-common.c29
-rw-r--r--drivers/thermal/qcom/tsens-v2.c77
-rw-r--r--drivers/thermal/qcom/tsens.c3
-rw-r--r--drivers/thermal/qcom/tsens.h8
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_hwmon.c3
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-bandgap.h68
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-thermal-data.c65
-rw-r--r--drivers/thermal/ti-soc-thermal/omap3-thermal-data.c6
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4-thermal-data.c10
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h10
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5-thermal-data.c46
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h41
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c370
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h43
-rw-r--r--drivers/tty/tty_audit.c2
-rw-r--r--drivers/tty/vt/keyboard.c4
-rw-r--r--drivers/usb/core/ledtrig-usbport.c34
-rw-r--r--drivers/usb/gadget/function/f_tcm.c19
-rw-r--r--drivers/vfio/pci/vfio_pci.c21
-rw-r--r--drivers/vfio/vfio_iommu_type1.c1
-rw-r--r--drivers/vhost/net.c370
-rw-r--r--drivers/vhost/scsi.c16
-rw-r--r--drivers/vhost/vhost.c71
-rw-r--r--drivers/vhost/vhost.h11
-rw-r--r--drivers/video/console/Kconfig11
-rw-r--r--drivers/video/console/dummycon.c69
-rw-r--r--drivers/video/fbdev/core/fbcon.c83
-rw-r--r--drivers/video/fbdev/efifb.c51
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/virtio/virtio_pci_common.c7
-rw-r--r--drivers/virtio/virtio_pci_common.h2
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/xen/Kconfig24
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/balloon.c75
-rw-r--r--drivers/xen/biomerge.c2
-rw-r--r--drivers/xen/gntdev-common.h94
-rw-r--r--drivers/xen/gntdev-dmabuf.c856
-rw-r--r--drivers/xen/gntdev-dmabuf.h33
-rw-r--r--drivers/xen/gntdev.c220
-rw-r--r--drivers/xen/grant-table.c151
-rw-r--r--drivers/xen/mem-reservation.c118
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xen-scsiback.c17
-rw-r--r--fs/9p/vfs_inode.c7
-rw-r--r--fs/9p/vfs_inode_dotl.c7
-rw-r--r--fs/Kconfig.binfmt5
-rw-r--r--fs/adfs/inode.c2
-rw-r--r--fs/adfs/super.c1
-rw-r--r--fs/afs/dir.c45
-rw-r--r--fs/afs/dynroot.c25
-rw-r--r--fs/afs/rxrpc.c30
-rw-r--r--fs/aio.c232
-rw-r--r--fs/anon_inodes.c30
-rw-r--r--fs/attr.c5
-rw-r--r--fs/bad_inode.c2
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/btrfs/acl.c13
-rw-r--r--fs/btrfs/backref.c6
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/check-integrity.c9
-rw-r--r--fs/btrfs/compression.c18
-rw-r--r--fs/btrfs/ctree.c53
-rw-r--r--fs/btrfs/ctree.h89
-rw-r--r--fs/btrfs/delayed-inode.c14
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/delayed-ref.c43
-rw-r--r--fs/btrfs/delayed-ref.h6
-rw-r--r--fs/btrfs/dev-replace.c29
-rw-r--r--fs/btrfs/dir-item.c4
-rw-r--r--fs/btrfs/disk-io.c113
-rw-r--r--fs/btrfs/disk-io.h5
-rw-r--r--fs/btrfs/extent-tree.c883
-rw-r--r--fs/btrfs/extent_io.c156
-rw-r--r--fs/btrfs/extent_io.h16
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c128
-rw-r--r--fs/btrfs/free-space-cache.c19
-rw-r--r--fs/btrfs/free-space-tree.c2
-rw-r--r--fs/btrfs/inode-map.c12
-rw-r--r--fs/btrfs/inode.c267
-rw-r--r--fs/btrfs/ioctl.c69
-rw-r--r--fs/btrfs/ordered-data.c138
-rw-r--r--fs/btrfs/ordered-data.h23
-rw-r--r--fs/btrfs/print-tree.c39
-rw-r--r--fs/btrfs/qgroup.c270
-rw-r--r--fs/btrfs/qgroup.h46
-rw-r--r--fs/btrfs/raid56.c109
-rw-r--r--fs/btrfs/reada.c3
-rw-r--r--fs/btrfs/relocation.c216
-rw-r--r--fs/btrfs/root-tree.c22
-rw-r--r--fs/btrfs/scrub.c679
-rw-r--r--fs/btrfs/send.c172
-rw-r--r--fs/btrfs/struct-funcs.c1
-rw-r--r--fs/btrfs/super.c115
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c24
-rw-r--r--fs/btrfs/transaction.c11
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-checker.c115
-rw-r--r--fs/btrfs/tree-log.c270
-rw-r--r--fs/btrfs/volumes.c611
-rw-r--r--fs/btrfs/volumes.h31
-rw-r--r--fs/buffer.c76
-rw-r--r--fs/ceph/file.c7
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/cifs/Kconfig59
-rw-r--r--fs/cifs/cache.c6
-rw-r--r--fs/cifs/cifs_debug.c66
-rw-r--r--fs/cifs/cifsencrypt.c12
-rw-r--r--fs/cifs/cifsfs.c33
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/cifsglob.h54
-rw-r--r--fs/cifs/cifsproto.h10
-rw-r--r--fs/cifs/cifssmb.c12
-rw-r--r--fs/cifs/connect.c111
-rw-r--r--fs/cifs/dir.c7
-rw-r--r--fs/cifs/fscache.c12
-rw-r--r--fs/cifs/fscache.h8
-rw-r--r--fs/cifs/inode.c38
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/netmisc.c19
-rw-r--r--fs/cifs/smb1ops.c4
-rw-r--r--fs/cifs/smb2inode.c6
-rw-r--r--fs/cifs/smb2misc.c13
-rw-r--r--fs/cifs/smb2ops.c540
-rw-r--r--fs/cifs/smb2pdu.c557
-rw-r--r--fs/cifs/smb2pdu.h24
-rw-r--r--fs/cifs/smb2proto.h24
-rw-r--r--fs/cifs/smb2transport.c9
-rw-r--r--fs/cifs/trace.h64
-rw-r--r--fs/cifs/transport.c211
-rw-r--r--fs/cifs/xattr.c28
-rw-r--r--fs/compat_ioctl.c40
-rw-r--r--fs/configfs/dir.c11
-rw-r--r--fs/configfs/item.c24
-rw-r--r--fs/configfs/symlink.c2
-rw-r--r--fs/dax.c10
-rw-r--r--fs/dcache.c68
-rw-r--r--fs/efivarfs/inode.c4
-rw-r--r--fs/exofs/ore.c4
-rw-r--r--fs/ext2/ialloc.c3
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/ext4/balloc.c6
-rw-r--r--fs/ext4/ext4.h32
-rw-r--r--fs/ext4/extents.c17
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/ext4/inode.c65
-rw-r--r--fs/ext4/mballoc.c7
-rw-r--r--fs/ext4/mmp.c6
-rw-r--r--fs/ext4/move_extent.c4
-rw-r--r--fs/ext4/namei.c1
-rw-r--r--fs/ext4/super.c75
-rw-r--r--fs/ext4/sysfs.c38
-rw-r--r--fs/ext4/truncate.h4
-rw-r--r--fs/ext4/xattr.c2
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/super.c3
-rw-r--r--fs/file_table.c85
-rw-r--r--fs/fuse/dir.c25
-rw-r--r--fs/gfs2/acl.c6
-rw-r--r--fs/gfs2/aops.c337
-rw-r--r--fs/gfs2/aops.h19
-rw-r--r--fs/gfs2/bmap.c414
-rw-r--r--fs/gfs2/dir.c4
-rw-r--r--fs/gfs2/file.c161
-rw-r--r--fs/gfs2/incore.h23
-rw-r--r--fs/gfs2/inode.c32
-rw-r--r--fs/gfs2/lock_dlm.c20
-rw-r--r--fs/gfs2/log.c28
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/gfs2/recovery.c7
-rw-r--r--fs/gfs2/rgrp.c169
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/gfs2/sys.c11
-rw-r--r--fs/gfs2/trace_gfs2.h3
-rw-r--r--fs/gfs2/trans.h6
-rw-r--r--fs/gfs2/util.c38
-rw-r--r--fs/gfs2/util.h10
-rw-r--r--fs/gfs2/xattr.c59
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c28
-rw-r--r--fs/hpfs/dir.c23
-rw-r--r--fs/hugetlbfs/inode.c54
-rw-r--r--fs/inode.c53
-rw-r--r--fs/internal.h7
-rw-r--r--fs/iomap.c780
-rw-r--r--fs/jbd2/commit.c3
-rw-r--r--fs/jffs2/dir.c32
-rw-r--r--fs/jffs2/file.c6
-rw-r--r--fs/jffs2/fs.c12
-rw-r--r--fs/jffs2/os-linux.h13
-rw-r--r--fs/jfs/jfs_imap.c8
-rw-r--r--fs/jfs/jfs_incore.h2
-rw-r--r--fs/jfs/jfs_inode.c10
-rw-r--r--fs/jfs/namei.c12
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/kernfs/dir.c29
-rw-r--r--fs/kernfs/file.c8
-rw-r--r--fs/kernfs/inode.c2
-rw-r--r--fs/kernfs/kernfs-internal.h2
-rw-r--r--fs/kernfs/symlink.c11
-rw-r--r--fs/locks.c20
-rw-r--r--fs/mpage.c4
-rw-r--r--fs/namei.c261
-rw-r--r--fs/nfs/dir.c23
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/open.c88
-rw-r--r--fs/orangefs/file.c19
-rw-r--r--fs/orangefs/inode.c3
-rw-r--r--fs/pipe.c43
-rw-r--r--fs/pstore/Kconfig17
-rw-r--r--fs/pstore/platform.c16
-rw-r--r--fs/statfs.c14
-rw-r--r--fs/sysfs/dir.c7
-rw-r--r--fs/sysfs/file.c77
-rw-r--r--fs/sysfs/group.c23
-rw-r--r--fs/sysfs/sysfs.h5
-rw-r--r--fs/timerfd.c14
-rw-r--r--fs/udf/namei.c12
-rw-r--r--fs/ufs/ialloc.c3
-rw-r--r--fs/ufs/namei.c9
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c13
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h4
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c41
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c120
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c70
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c26
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c444
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h48
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c22
-rw-r--r--fs/xfs/libxfs/xfs_btree.h4
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c37
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_defer.c323
-rw-r--r--fs/xfs/libxfs/xfs_defer.h41
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c74
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h10
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c17
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c33
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h1
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c12
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h4
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c20
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c74
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h6
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h13
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c107
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h25
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c13
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h7
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c99
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h22
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c5
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_sb.c278
-rw-r--r--fs/xfs/libxfs/xfs_shared.h12
-rw-r--r--fs/xfs/libxfs/xfs_types.c34
-rw-r--r--fs/xfs/libxfs/xfs_types.h1
-rw-r--r--fs/xfs/scrub/agheader.c518
-rw-r--r--fs/xfs/scrub/agheader_repair.c889
-rw-r--r--fs/xfs/scrub/alloc.c116
-rw-r--r--fs/xfs/scrub/attr.c130
-rw-r--r--fs/xfs/scrub/bitmap.c303
-rw-r--r--fs/xfs/scrub/bitmap.h36
-rw-r--r--fs/xfs/scrub/bmap.c331
-rw-r--r--fs/xfs/scrub/btree.c334
-rw-r--r--fs/xfs/scrub/btree.h45
-rw-r--r--fs/xfs/scrub/common.c394
-rw-r--r--fs/xfs/scrub/common.h115
-rw-r--r--fs/xfs/scrub/dabtree.c170
-rw-r--r--fs/xfs/scrub/dabtree.h33
-rw-r--r--fs/xfs/scrub/dir.c264
-rw-r--r--fs/xfs/scrub/ialloc.c214
-rw-r--r--fs/xfs/scrub/inode.c282
-rw-r--r--fs/xfs/scrub/parent.c132
-rw-r--r--fs/xfs/scrub/quota.c140
-rw-r--r--fs/xfs/scrub/refcount.c194
-rw-r--r--fs/xfs/scrub/repair.c519
-rw-r--r--fs/xfs/scrub/repair.h95
-rw-r--r--fs/xfs/scrub/rmap.c172
-rw-r--r--fs/xfs/scrub/rtbitmap.c78
-rw-r--r--fs/xfs/scrub/scrub.c210
-rw-r--r--fs/xfs/scrub/scrub.h134
-rw-r--r--fs/xfs/scrub/symlink.c28
-rw-r--r--fs/xfs/scrub/trace.c6
-rw-r--r--fs/xfs/scrub/trace.h141
-rw-r--r--fs/xfs/xfs.h1
-rw-r--r--fs/xfs/xfs_aops.c1007
-rw-r--r--fs/xfs/xfs_aops.h4
-rw-r--r--fs/xfs/xfs_attr_inactive.c1
-rw-r--r--fs/xfs/xfs_attr_list.c2
-rw-r--r--fs/xfs/xfs_bmap_item.c25
-rw-r--r--fs/xfs/xfs_bmap_item.h3
-rw-r--r--fs/xfs/xfs_bmap_util.c118
-rw-r--r--fs/xfs/xfs_buf.c212
-rw-r--r--fs/xfs/xfs_buf.h15
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_dquot.c49
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_export.c2
-rw-r--r--fs/xfs/xfs_file.c12
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_fsmap.c4
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_icache.c16
-rw-r--r--fs/xfs/xfs_inode.c177
-rw-r--r--fs/xfs/xfs_inode.h32
-rw-r--r--fs/xfs/xfs_inode_item.c4
-rw-r--r--fs/xfs/xfs_iomap.c56
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_iops.c7
-rw-r--r--fs/xfs/xfs_itable.c8
-rw-r--r--fs/xfs/xfs_log.c156
-rw-r--r--fs/xfs/xfs_log.h1
-rw-r--r--fs/xfs/xfs_log_recover.c68
-rw-r--r--fs/xfs/xfs_mount.c100
-rw-r--r--fs/xfs/xfs_mount.h3
-rw-r--r--fs/xfs/xfs_qm.c22
-rw-r--r--fs/xfs/xfs_qm_syscalls.c9
-rw-r--r--fs/xfs/xfs_quotaops.c2
-rw-r--r--fs/xfs/xfs_refcount_item.c28
-rw-r--r--fs/xfs/xfs_refcount_item.h3
-rw-r--r--fs/xfs/xfs_reflink.c173
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_rtalloc.c20
-rw-r--r--fs/xfs/xfs_super.c38
-rw-r--r--fs/xfs/xfs_symlink.c51
-rw-r--r--fs/xfs/xfs_trace.h78
-rw-r--r--fs/xfs/xfs_trans.c26
-rw-r--r--fs/xfs/xfs_trans.h21
-rw-r--r--fs/xfs/xfs_trans_bmap.c6
-rw-r--r--fs/xfs/xfs_trans_extfree.c2
-rw-r--r--fs/xfs/xfs_trans_refcount.c6
-rw-r--r--fs/xfs/xfs_trans_rmap.c1
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/ghes.h4
-rw-r--r--include/asm-generic/atomic-instrumented.h197
-rw-r--r--include/asm-generic/atomic.h33
-rw-r--r--include/asm-generic/atomic64.h15
-rw-r--r--include/asm-generic/bitops/atomic.h188
-rw-r--r--include/asm-generic/bitops/lock.h68
-rw-r--r--include/asm-generic/pgtable.h20
-rw-r--r--include/asm-generic/tlb.h10
-rw-r--r--include/crypto/dh.h4
-rw-r--r--include/crypto/drbg.h3
-rw-r--r--include/crypto/scatterwalk.h15
-rw-r--r--include/crypto/sha.h4
-rw-r--r--include/crypto/vmac.h63
-rw-r--r--include/drm/drmP.h18
-rw-r--r--include/drm/drm_atomic.h14
-rw-r--r--include/drm/drm_atomic_helper.h1
-rw-r--r--include/drm/drm_audio_component.h118
-rw-r--r--include/drm/drm_bridge.h48
-rw-r--r--include/drm/drm_client.h139
-rw-r--r--include/drm/drm_connector.h279
-rw-r--r--include/drm/drm_crtc.h276
-rw-r--r--include/drm/drm_debugfs_crc.h3
-rw-r--r--include/drm/drm_device.h21
-rw-r--r--include/drm/drm_dp_helper.h56
-rw-r--r--include/drm/drm_drv.h29
-rw-r--r--include/drm/drm_encoder.h16
-rw-r--r--include/drm/drm_fb_cma_helper.h6
-rw-r--r--include/drm/drm_fb_helper.h38
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_fourcc.h2
-rw-r--r--include/drm/drm_mm.h34
-rw-r--r--include/drm/drm_mode_config.h36
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h17
-rw-r--r--include/drm/drm_of.h8
-rw-r--r--include/drm/drm_panel.h3
-rw-r--r--include/drm/drm_pci.h7
-rw-r--r--include/drm/drm_plane.h197
-rw-r--r--include/drm/drm_plane_helper.h6
-rw-r--r--include/drm/drm_prime.h6
-rw-r--r--include/drm/drm_print.h77
-rw-r--r--include/drm/drm_property.h4
-rw-r--r--include/drm/drm_vma_manager.h1
-rw-r--r--include/drm/drm_writeback.h136
-rw-r--r--include/drm/gpu_scheduler.h174
-rw-r--r--include/drm/i915_component.h85
-rw-r--r--include/drm/i915_drm.h4
-rw-r--r--include/drm/i915_pciids.h37
-rw-r--r--include/drm/tinydrm/tinydrm.h23
-rw-r--r--include/drm/ttm/ttm_bo_api.h25
-rw-r--r--include/drm/ttm/ttm_set_memory.h150
-rw-r--r--include/dt-bindings/clock/actions,s700-cmu.h118
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h2
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h94
-rw-r--r--include/dt-bindings/clock/axg-clkc.h4
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h1
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h9
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h8
-rw-r--r--include/dt-bindings/clock/maxim,max9485.h18
-rw-r--r--include/dt-bindings/clock/px30-cru.h389
-rw-r--r--include/dt-bindings/clock/pxa-clock.h3
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sdm845.h45
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h2
-rw-r--r--include/dt-bindings/clock/r9a06g032-sysctrl.h148
-rw-r--r--include/dt-bindings/clock/rk3399-ddr.h56
-rw-r--r--include/dt-bindings/clock/sun8i-r40-ccu.h4
-rw-r--r--include/dt-bindings/clock/sun8i-tcon-top.h11
-rw-r--r--include/dt-bindings/gce/mt8173-gce.h44
-rw-r--r--include/dt-bindings/pinctrl/at91.h4
-rw-r--r--include/dt-bindings/pinctrl/samsung.h7
-rw-r--r--include/dt-bindings/regulator/maxim,max77802.h5
-rw-r--r--include/dt-bindings/regulator/qcom,rpmh-regulator.h36
-rw-r--r--include/dt-bindings/soc/qcom,rpmh-rsc.h14
-rw-r--r--include/linux/acpi.h25
-rw-r--r--include/linux/ascii85.h38
-rw-r--r--include/linux/atomic.h453
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/bitfield.h7
-rw-r--r--include/linux/bitops.h22
-rw-r--r--include/linux/bits.h26
-rw-r--r--include/linux/blk-cgroup.h146
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h27
-rw-r--r--include/linux/blkdev.h66
-rw-r--r--include/linux/bootmem.h17
-rw-r--r--include/linux/bpf-cgroup.h54
-rw-r--r--include/linux/bpf.h79
-rw-r--r--include/linux/bpf_types.h9
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/build-salt.h20
-rw-r--r--include/linux/can/dev.h7
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/cgroup.h30
-rw-r--r--include/linux/clk-provider.h26
-rw-r--r--include/linux/clk.h33
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h20
-rw-r--r--include/linux/compat_time.h9
-rw-r--r--include/linux/console.h5
-rw-r--r--include/linux/cpu.h23
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h18
-rw-r--r--include/linux/crc32poly.h20
-rw-r--r--include/linux/cred.h15
-rw-r--r--include/linux/crypto.h5
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/device.h26
-rw-r--r--include/linux/dma-buf.h21
-rw-r--r--include/linux/dma-direction.h6
-rw-r--r--include/linux/dma-fence.h32
-rw-r--r--include/linux/dma-mapping.h9
-rw-r--r--include/linux/dma-noncoherent.h8
-rw-r--r--include/linux/dma/pxa-dma.h9
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/filter.h51
-rw-r--r--include/linux/fs.h41
-rw-r--r--include/linux/fsl/ptp_qoriq.h44
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/genhd.h14
-rw-r--r--include/linux/gpio.h2
-rw-r--r--include/linux/gpio/aspeed.h15
-rw-r--r--include/linux/gpio/consumer.h14
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/hwmon.h32
-rw-r--r--include/linux/i2c.h11
-rw-r--r--include/linux/idle_inject.h29
-rw-r--r--include/linux/ieee80211.h437
-rw-r--r--include/linux/if_team.h18
-rw-r--r--include/linux/ima.h11
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/integrity.h13
-rw-r--r--include/linux/iomap.h47
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h3
-rw-r--r--include/linux/jump_label.h6
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/kobject.h4
-rw-r--r--include/linux/kprobes.h53
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/leds.h36
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/list.h30
-rw-r--r--include/linux/lsm_hooks.h8
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h77
-rw-r--r--include/linux/memblock.h76
-rw-r--r--include/linux/memcontrol.h13
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx5/device.h13
-rw-r--r--include/linux/mlx5/driver.h5
-rw-r--r--include/linux/mlx5/fs.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h13
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h1
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm_types.h241
-rw-r--r--include/linux/mroute_base.h3
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/rawnand.h126
-rw-r--r--include/linux/mtd/spi-nor.h1
-rw-r--r--include/linux/mtd/spinand.h421
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h219
-rw-r--r--include/linux/netfilter.h37
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h (renamed from include/linux/netfilter/nf_osf.h)23
-rw-r--r--include/linux/netfilter_bridge.h11
-rw-r--r--include/linux/netfilter_ipv4.h11
-rw-r--r--include/linux/netfilter_ipv6.h5
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nmi.h10
-rw-r--r--include/linux/nvme.h72
-rw-r--r--include/linux/omap-mailbox.h5
-rw-r--r--include/linux/openvswitch.h5
-rw-r--r--include/linux/pci-dma-compat.h8
-rw-r--r--include/linux/pci-epc.h16
-rw-r--r--include/linux/pci-epf.h1
-rw-r--r--include/linux/pci.h44
-rw-r--r--include/linux/pci_hotplug.h15
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu_ida.h83
-rw-r--r--include/linux/perf/arm_pmu.h11
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/phylink.h1
-rw-r--r--include/linux/pinctrl/pinconf.h3
-rw-r--r--include/linux/platform_data/bt-nokia-h4p.h38
-rw-r--r--include/linux/platform_data/gpio-davinci.h3
-rw-r--r--include/linux/platform_data/jz4740/jz4740_nand.h (renamed from arch/mips/include/asm/mach-jz4740/jz4740_nand.h)4
-rw-r--r--include/linux/platform_data/media/sii9234.h24
-rw-r--r--include/linux/platform_data/mmp_dma.h4
-rw-r--r--include/linux/platform_data/mtd-orion_nand.h1
-rw-r--r--include/linux/platform_data/txx9/ndfmc.h (renamed from arch/mips/include/asm/txx9/ndfmc.h)6
-rw-r--r--include/linux/pm_domain.h15
-rw-r--r--include/linux/posix-timers.h4
-rw-r--r--include/linux/printk.h10
-rw-r--r--include/linux/pti.h1
-rw-r--r--include/linux/pxa2xx_ssp.h10
-rw-r--r--include/linux/qcom_scm.h4
-rw-r--r--include/linux/qed/qed_eth_if.h6
-rw-r--r--include/linux/qed/qed_if.h15
-rw-r--r--include/linux/random.h3
-rw-r--r--include/linux/rculist.h19
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/reciprocal_div.h68
-rw-r--r--include/linux/refcount.h34
-rw-r--r--include/linux/regmap.h54
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/regulator/pfuze100.h11
-rw-r--r--include/linux/rfkill.h20
-rw-r--r--include/linux/rhashtable-types.h137
-rw-r--r--include/linux/rhashtable.h164
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/sched.h13
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/sched_clock.h5
-rw-r--r--include/linux/sctp.h7
-rw-r--r--include/linux/security.h32
-rw-r--r--include/linux/sfp.h72
-rw-r--r--include/linux/skbuff.h19
-rw-r--r--include/linux/smpboot.h15
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h180
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h8
-rw-r--r--include/linux/spi/adi_spi3.h254
-rw-r--r--include/linux/spi/spi-mem.h18
-rw-r--r--include/linux/spi/spi_bitbang.h5
-rw-r--r--include/linux/spinlock.h53
-rw-r--r--include/linux/srcu.h17
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swait.h36
-rw-r--r--include/linux/swap.h11
-rw-r--r--include/linux/swapfile.h2
-rw-r--r--include/linux/switchtec.h4
-rw-r--r--include/linux/syscalls.h21
-rw-r--r--include/linux/sysfs.h14
-rw-r--r--include/linux/t10-pi.h24
-rw-r--r--include/linux/tcp.h18
-rw-r--r--include/linux/time.h4
-rw-r--r--include/linux/time64.h1
-rw-r--r--include/linux/timekeeping.h20
-rw-r--r--include/linux/torture.h4
-rw-r--r--include/linux/tpm.h7
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/udp.h4
-rw-r--r--include/linux/usb/audio-v3.h19
-rw-r--r--include/linux/verification.h6
-rw-r--r--include/linux/vga_switcheroo.h8
-rw-r--r--include/linux/virtio_config.h7
-rw-r--r--include/linux/ww_mutex.h45
-rw-r--r--include/media/cec-pin.h4
-rw-r--r--include/media/cec.h12
-rw-r--r--include/media/dvb_frontend.h49
-rw-r--r--include/media/i2c/lm3560.h1
-rw-r--r--include/media/v4l2-common.h2
-rw-r--r--include/media/v4l2-ctrls.h4
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/media/v4l2-mediabus.h2
-rw-r--r--include/media/v4l2-mem2mem.h56
-rw-r--r--include/media/vsp1.h2
-rw-r--r--include/net/act_api.h31
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/af_ieee802154.h1
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/bluetooth/hci.h224
-rw-r--r--include/net/bluetooth/hci_core.h34
-rw-r--r--include/net/bluetooth/mgmt.h55
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bonding.h13
-rw-r--r--include/net/busy_poll.h16
-rw-r--r--include/net/cfg80211.h106
-rw-r--r--include/net/dcbnl.h13
-rw-r--r--include/net/devlink.h195
-rw-r--r--include/net/dsa.h3
-rw-r--r--include/net/dst.h14
-rw-r--r--include/net/flow_dissector.h21
-rw-r--r--include/net/gen_stats.h4
-rw-r--r--include/net/ieee80211_radiotap.h123
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/inet_frag.h11
-rw-r--r--include/net/inet_sock.h9
-rw-r--r--include/net/ip.h27
-rw-r--r--include/net/ip_tunnels.h8
-rw-r--r--include/net/ip_vs.h18
-rw-r--r--include/net/ipv6.h63
-rw-r--r--include/net/ipv6_frag.h104
-rw-r--r--include/net/lag.h17
-rw-r--r--include/net/mac80211.h64
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h7
-rw-r--r--include/net/netfilter/nf_conntrack_core.h15
-rw-r--r--include/net/netfilter/nf_conntrack_count.h37
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h84
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h16
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h39
-rw-r--r--include/net/netfilter/nf_flow_table.h2
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nf_tables_core.h7
-rw-r--r--include/net/netfilter/nf_tproxy.h8
-rw-r--r--include/net/netns/hash.h7
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/pkt_cls.h35
-rw-r--r--include/net/pkt_sched.h7
-rw-r--r--include/net/sch_generic.h70
-rw-r--r--include/net/sctp/structs.h52
-rw-r--r--include/net/seg6.h2
-rw-r--r--include/net/seg6_hmac.h2
-rw-r--r--include/net/seg6_local.h4
-rw-r--r--include/net/smc.h65
-rw-r--r--include/net/sock.h81
-rw-r--r--include/net/sock_reuseport.h19
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/net/tc_act/tc_skbedit.h37
-rw-r--r--include/net/tcp.h58
-rw-r--r--include/net/tls.h86
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/udp.h4
-rw-r--r--include/net/udp_tunnel.h6
-rw-r--r--include/net/xdp.h20
-rw-r--r--include/net/xfrm.h61
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/scsi_cmnd.h13
-rw-r--r--include/scsi/scsi_device.h14
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/soc/qcom/rpmh.h51
-rw-r--r--include/soc/qcom/tcs.h56
-rw-r--r--include/sound/ac97/codec.h8
-rw-r--r--include/sound/ac97/compat.h9
-rw-r--r--include/sound/ac97/controller.h8
-rw-r--r--include/sound/ac97/regs.h20
-rw-r--r--include/sound/ac97_codec.h25
-rw-r--r--include/sound/compress_driver.h21
-rw-r--r--include/sound/dmaengine_pcm.h14
-rw-r--r--include/sound/hda_component.h61
-rw-r--r--include/sound/hda_i915.h37
-rw-r--r--include/sound/hdaudio.h65
-rw-r--r--include/sound/hdaudio_ext.h123
-rw-r--r--include/sound/memalloc.h18
-rw-r--r--include/sound/pcm.h7
-rw-r--r--include/sound/pcm_params.h10
-rw-r--r--include/sound/pxa2xx-lib.h13
-rw-r--r--include/sound/rt5682.h40
-rw-r--r--include/sound/sb16_csp.h2
-rw-r--r--include/sound/seq_midi_event.h6
-rw-r--r--include/sound/seq_virmidi.h3
-rw-r--r--include/sound/sh_fsi.h13
-rw-r--r--include/sound/simple_card.h7
-rw-r--r--include/sound/simple_card_utils.h23
-rw-r--r--include/sound/soc-acpi-intel-match.h19
-rw-r--r--include/sound/soc-acpi.h13
-rw-r--r--include/sound/soc-dai.h15
-rw-r--r--include/sound/soc-dapm.h11
-rw-r--r--include/sound/soc-dpcm.h7
-rw-r--r--include/sound/soc-topology.h37
-rw-r--r--include/sound/soc.h31
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h16
-rw-r--r--include/target/target_core_fabric.h10
-rw-r--r--include/trace/events/btrfs.h3
-rw-r--r--include/trace/events/clk.h36
-rw-r--r--include/trace/events/fib.h2
-rw-r--r--include/trace/events/filelock.h5
-rw-r--r--include/trace/events/net.h7
-rw-r--r--include/trace/events/power.h25
-rw-r--r--include/trace/events/rcu.h112
-rw-r--r--include/trace/events/rxrpc.h129
-rw-r--r--include/trace/events/sock.h30
-rw-r--r--include/uapi/asm-generic/socket.h3
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h27
-rw-r--r--include/uapi/drm/drm.h9
-rw-r--r--include/uapi/drm/drm_fourcc.h176
-rw-r--r--include/uapi/drm/drm_mode.h8
-rw-r--r--include/uapi/drm/vmwgfx_drm.h166
-rw-r--r--include/uapi/linux/aio_abi.h6
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--include/uapi/linux/bcache.h4
-rw-r--r--include/uapi/linux/blkzoned.h2
-rw-r--r--include/uapi/linux/bpf.h104
-rw-r--r--include/uapi/linux/can.h2
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/dcbnl.h3
-rw-r--r--include/uapi/linux/devlink.h42
-rw-r--r--include/uapi/linux/dvb/audio.h37
-rw-r--r--include/uapi/linux/dvb/video.h58
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/errqueue.h4
-rw-r--r--include/uapi/linux/ethtool.h9
-rw-r--r--include/uapi/linux/if_link.h17
-rw-r--r--include/uapi/linux/ila.h1
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/kfd_ioctl.h33
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/l2tp.h15
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/media.h46
-rw-r--r--include/uapi/linux/mii.h1
-rw-r--r--include/uapi/linux/mroute.h2
-rw-r--r--include/uapi/linux/net_tstamp.h18
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h124
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_osf.h (renamed from include/uapi/linux/netfilter/nf_osf.h)32
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h22
-rw-r--r--include/uapi/linux/netfilter_bridge.h11
-rw-r--r--include/uapi/linux/nl80211.h102
-rw-r--r--include/uapi/linux/openvswitch.h3
-rw-r--r--include/uapi/linux/pci_regs.h4
-rw-r--r--include/uapi/linux/pcitest.h3
-rw-r--r--include/uapi/linux/pkt_cls.h41
-rw-r--r--include/uapi/linux/pkt_sched.h150
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/rds.h69
-rw-r--r--include/uapi/linux/rtnetlink.h7
-rw-r--r--include/uapi/linux/sctp.h5
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/snmp.h3
-rw-r--r--include/uapi/linux/sysctl.h3
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h9
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h28
-rw-r--r--include/uapi/linux/tcp.h10
-rw-r--r--include/uapi/linux/time.h7
-rw-r--r--include/uapi/linux/tipc_netlink.h14
-rw-r--r--include/uapi/linux/usb/audio.h49
-rw-r--r--include/uapi/linux/uvcvideo.h2
-rw-r--r--include/uapi/linux/v4l2-controls.h20
-rw-r--r--include/uapi/linux/v4l2-subdev.h4
-rw-r--r--include/uapi/linux/vhost.h18
-rw-r--r--include/uapi/linux/videodev2.h8
-rw-r--r--include/uapi/linux/xfrm.h5
-rw-r--r--include/uapi/xen/gntdev.h106
-rw-r--r--include/video/mipi_display.h3
-rw-r--r--include/xen/grant_table.h21
-rw-r--r--include/xen/mem-reservation.h59
-rw-r--r--init/Kconfig42
-rw-r--r--init/main.c11
-rw-r--r--init/version.c3
-rw-r--r--ipc/msg.c1
-rw-r--r--ipc/sem.c1
-rw-r--r--ipc/shm.c46
-rw-r--r--ipc/util.c1
-rw-r--r--kernel/Kconfig.preempt2
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c7
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/audit_watch.c41
-rw-r--r--kernel/auditfilter.c17
-rw-r--r--kernel/auditsc.c14
-rw-r--r--kernel/bpf/Makefile4
-rw-r--r--kernel/bpf/arraymap.c28
-rw-r--r--kernel/bpf/cgroup.c162
-rw-r--r--kernel/bpf/core.c79
-rw-r--r--kernel/bpf/cpumap.c1
-rw-r--r--kernel/bpf/devmap.c1
-rw-r--r--kernel/bpf/hashtab.c26
-rw-r--r--kernel/bpf/helpers.c20
-rw-r--r--kernel/bpf/inode.c11
-rw-r--r--kernel/bpf/local_storage.c379
-rw-r--r--kernel/bpf/lpm_trie.c12
-rw-r--r--kernel/bpf/map_in_map.c3
-rw-r--r--kernel/bpf/offload.c223
-rw-r--r--kernel/bpf/reuseport_array.c363
-rw-r--r--kernel/bpf/sockmap.c5
-rw-r--r--kernel/bpf/stackmap.c1
-rw-r--r--kernel/bpf/syscall.c107
-rw-r--r--kernel/bpf/verifier.c55
-rw-r--r--kernel/bpf/xskmap.c3
-rw-r--r--kernel/cgroup/cgroup.c4
-rw-r--r--kernel/compat.c29
-rw-r--r--kernel/cpu.c295
-rw-r--r--kernel/dma/direct.c6
-rw-r--r--kernel/dma/noncoherent.c8
-rw-r--r--kernel/dma/swiotlb.c18
-rw-r--r--kernel/events/core.c24
-rw-r--r--kernel/events/hw_breakpoint.c92
-rw-r--r--kernel/events/uprobes.c6
-rw-r--r--kernel/fail_function.c3
-rw-r--r--kernel/fork.c20
-rw-r--r--kernel/freezer.c4
-rw-r--r--kernel/irq/Kconfig1
-rw-r--r--kernel/irq/irqdesc.c13
-rw-r--r--kernel/irq/manage.c47
-rw-r--r--kernel/irq/proc.c22
-rw-r--r--kernel/kexec.c8
-rw-r--r--kernel/kprobes.c167
-rw-r--r--kernel/kthread.c6
-rw-r--r--kernel/locking/locktorture.c7
-rw-r--r--kernel/locking/mutex.c345
-rw-r--r--kernel/locking/test-ww_mutex.c2
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/power/hibernate.c16
-rw-r--r--kernel/power/main.c12
-rw-r--r--kernel/power/suspend.c8
-rw-r--r--kernel/power/swap.c4
-rw-r--r--kernel/power/user.c4
-rw-r--r--kernel/printk/internal.h9
-rw-r--r--kernel/printk/printk.c182
-rw-r--r--kernel/printk/printk_safe.c58
-rw-r--r--kernel/rcu/rcu.h104
-rw-r--r--kernel/rcu/rcuperf.c57
-rw-r--r--kernel/rcu/rcutorture.c462
-rw-r--r--kernel/rcu/srcutiny.c4
-rw-r--r--kernel/rcu/srcutree.c39
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tree.c1027
-rw-r--r--kernel/rcu/tree.h71
-rw-r--r--kernel/rcu/tree_exp.h18
-rw-r--r--kernel/rcu/tree_plugin.h188
-rw-r--r--kernel/rcu/update.c45
-rw-r--r--kernel/reboot.c6
-rw-r--r--kernel/sched/Makefile2
-rw-r--r--kernel/sched/clock.c57
-rw-r--r--kernel/sched/completion.c8
-rw-r--r--kernel/sched/core.c174
-rw-r--r--kernel/sched/cpufreq_schedutil.c103
-rw-r--r--kernel/sched/deadline.c8
-rw-r--r--kernel/sched/debug.c37
-rw-r--r--kernel/sched/fair.c664
-rw-r--r--kernel/sched/pelt.c399
-rw-r--r--kernel/sched/pelt.h72
-rw-r--r--kernel/sched/rt.c15
-rw-r--r--kernel/sched/sched.h87
-rw-r--r--kernel/sched/swait.c32
-rw-r--r--kernel/sched/wait.c55
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/smpboot.c54
-rw-r--r--kernel/stop_machine.c43
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/test_kprobes.c94
-rw-r--r--kernel/time/alarmtimer.c7
-rw-r--r--kernel/time/clockevents.c6
-rw-r--r--kernel/time/clocksource.c149
-rw-r--r--kernel/time/hrtimer.c7
-rw-r--r--kernel/time/ntp.c23
-rw-r--r--kernel/time/ntp_internal.h4
-rw-r--r--kernel/time/posix-cpu-timers.c2
-rw-r--r--kernel/time/posix-stubs.c2
-rw-r--r--kernel/time/posix-timers.c68
-rw-r--r--kernel/time/posix-timers.h2
-rw-r--r--kernel/time/sched_clock.c2
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c2
-rw-r--r--kernel/time/time.c31
-rw-r--r--kernel/time/timekeeping.c183
-rw-r--r--kernel/time/timekeeping_debug.c2
-rw-r--r--kernel/time/timekeeping_internal.h2
-rw-r--r--kernel/time/timer.c31
-rw-r--r--kernel/torture.c15
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/blktrace.c6
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_kprobe.c11
-rw-r--r--kernel/watchdog.c147
-rw-r--r--kernel/watchdog_hld.c4
-rw-r--r--lib/Kconfig.debug27
-rw-r--r--lib/Makefile4
-rw-r--r--lib/atomic64.c14
-rw-r--r--lib/bch.c23
-rw-r--r--lib/crc32.c11
-rw-r--r--lib/crc32defs.h14
-rw-r--r--lib/debugobjects.c10
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--lib/gen_crc32table.c5
-rw-r--r--lib/ioremap.c4
-rw-r--r--lib/klist.c10
-rw-r--r--lib/kobject.c28
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/mpi/mpi-pow.c3
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/nmi_backtrace.c3
-rw-r--r--lib/percpu_ida.c370
-rw-r--r--lib/raid6/s390vx.uc34
-rw-r--r--lib/reciprocal_div.c41
-rw-r--r--lib/reed_solomon/reed_solomon.c2
-rw-r--r--lib/refcount.c55
-rw-r--r--lib/rhashtable.c58
-rw-r--r--lib/test_bitfield.c168
-rw-r--r--lib/test_printf.c24
-rw-r--r--lib/test_rhashtable.c8
-rw-r--r--lib/vsprintf.c28
-rw-r--r--lib/xz/xz_crc32.c3
-rw-r--r--mm/Kconfig5
-rw-r--r--mm/bootmem.c159
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/init-mm.c11
-rw-r--r--mm/memblock.c203
-rw-r--r--mm/memcontrol.c13
-rw-r--r--mm/memfd.c2
-rw-r--r--mm/memory.c70
-rw-r--r--mm/mprotect.c49
-rw-r--r--mm/nobootmem.c20
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_io.c3
-rw-r--r--mm/readahead.c19
-rw-r--r--mm/shmem.c59
-rw-r--r--mm/swapfile.c77
-rw-r--r--mm/usercopy.c25
-rw-r--r--net/6lowpan/iphc.c1
-rw-r--r--net/8021q/Makefile1
-rw-r--r--net/8021q/vlan.c13
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/Kconfig2
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/mpoa_proc.c6
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/ax25/ax25_addr.c1
-rw-r--r--net/ax25/ax25_ds_in.c1
-rw-r--r--net/ax25/ax25_ds_subr.c1
-rw-r--r--net/ax25/ax25_ip.c1
-rw-r--r--net/ax25/ax25_out.c1
-rw-r--r--net/batman-adv/Kconfig8
-rw-r--r--net/batman-adv/bat_iv_ogm.h6
-rw-r--r--net/batman-adv/bat_v_ogm.h6
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/debugfs.c2
-rw-r--r--net/batman-adv/originator.c17
-rw-r--r--net/batman-adv/types.h7
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/hci_conn.c189
-rw-r--r--net/bluetooth/hci_core.c105
-rw-r--r--net/bluetooth/hci_debugfs.c19
-rw-r--r--net/bluetooth/hci_event.c579
-rw-r--r--net/bluetooth/hci_request.c616
-rw-r--r--net/bluetooth/hci_request.h8
-rw-r--r--net/bluetooth/hidp/core.c6
-rw-r--r--net/bluetooth/leds.c6
-rw-r--r--net/bluetooth/mgmt.c402
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/bpf/test_run.c13
-rw-r--r--net/bpfilter/Kconfig1
-rw-r--r--net/bpfilter/Makefile4
-rw-r--r--net/bridge/br_forward.c16
-rw-r--r--net/bridge/br_if.c62
-rw-r--r--net/bridge/br_multicast.c12
-rw-r--r--net/bridge/br_netfilter_hooks.c1
-rw-r--r--net/bridge/br_netlink.c30
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/br_sysfs_if.c94
-rw-r--r--net/bridge/netfilter/ebtable_filter.c1
-rw-r--r--net/bridge/netfilter/ebtable_nat.c1
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c3
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/compat.c6
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c874
-rw-r--r--net/core/dev_ioctl.c7
-rw-r--r--net/core/devlink.c1322
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/filter.c554
-rw-r--r--net/core/flow_dissector.c65
-rw-r--r--net/core/gen_estimator.c21
-rw-r--r--net/core/lwt_bpf.c2
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net-sysfs.c159
-rw-r--r--net/core/net_namespace.c28
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/core/rtnetlink.c82
-rw-r--r--net/core/skbuff.c18
-rw-r--r--net/core/sock.c106
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sock_reuseport.c92
-rw-r--r--net/core/utils.c2
-rw-r--r--net/core/xdp.c47
-rw-r--r--net/dcb/dcbnl.c97
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/decnet/Kconfig1
-rw-r--r--net/decnet/Makefile1
-rw-r--r--net/decnet/TODO5
-rw-r--r--net/decnet/dn_fib.c2
-rw-r--r--net/decnet/dn_nsp_in.c2
-rw-r--r--net/decnet/dn_nsp_out.c1
-rw-r--r--net/decnet/dn_route.c5
-rw-r--r--net/decnet/dn_rules.c2
-rw-r--r--net/decnet/netfilter/Makefile1
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c1
-rw-r--r--net/dns_resolver/dns_key.c1
-rw-r--r--net/dsa/dsa2.c14
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/dsa/switch.c22
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ieee802154/6lowpan/reassembly.c7
-rw-r--r--net/ieee802154/6lowpan/tx.c21
-rw-r--r--net/ieee802154/core.c1
-rw-r--r--net/ieee802154/nl_policy.c1
-rw-r--r--net/ieee802154/socket.c17
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/bpfilter/Makefile1
-rw-r--r--net/ipv4/devinet.c11
-rw-r--r--net/ipv4/esp4_offload.c10
-rw-r--r--net/ipv4/fou.c20
-rw-r--r--net/ipv4/gre_offload.c8
-rw-r--r--net/ipv4/icmp.c9
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/inet_connection_sock.c9
-rw-r--r--net/ipv4/inet_fragment.c17
-rw-r--r--net/ipv4/inet_hashtables.c19
-rw-r--r--net/ipv4/ip_forward.c3
-rw-r--r--net/ipv4/ip_fragment.c360
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/ip_input.c147
-rw-r--r--net/ipv4/ip_output.c22
-rw-r--r--net/ipv4/ipmr.c22
-rw-r--r--net/ipv4/ipmr_base.c1
-rw-r--r--net/ipv4/netfilter.c53
-rw-r--r--net/ipv4/netfilter/Kconfig22
-rw-r--r--net/ipv4/netfilter/Makefile6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c472
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c8
-rw-r--r--net/ipv4/ping.c16
-rw-r--r--net/ipv4/proc.c3
-rw-r--r--net/ipv4/raw.c11
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/sysctl_net_ipv4.c26
-rw-r--r--net/ipv4/tcp.c75
-rw-r--r--net/ipv4/tcp_bbr.c6
-rw-r--r--net/ipv4/tcp_dctcp.c4
-rw-r--r--net/ipv4/tcp_input.c68
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_minisocks.c229
-rw-r--r--net/ipv4/tcp_offload.c17
-rw-r--r--net/ipv4/tcp_output.c19
-rw-r--r--net/ipv4/tcp_rate.c4
-rw-r--r--net/ipv4/tcp_recovery.c2
-rw-r--r--net/ipv4/tcp_timer.c51
-rw-r--r--net/ipv4/udp.c20
-rw-r--r--net/ipv4/udp_offload.c13
-rw-r--r--net/ipv6/Kconfig2
-rw-r--r--net/ipv6/addrconf.c45
-rw-r--r--net/ipv6/af_inet6.c8
-rw-r--r--net/ipv6/datagram.c6
-rw-r--r--net/ipv6/esp6_offload.c10
-rw-r--r--net/ipv6/icmp.c32
-rw-r--r--net/ipv6/ila/Makefile2
-rw-r--r--net/ipv6/ila/ila.h27
-rw-r--r--net/ipv6/ila/ila_common.c31
-rw-r--r--net/ipv6/ila/ila_main.c121
-rw-r--r--net/ipv6/ila/ila_xlat.c292
-rw-r--r--net/ipv6/inet6_hashtables.c14
-rw-r--r--net/ipv6/ip6_flowlabel.c3
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ip6_input.c131
-rw-r--r--net/ipv6/ip6_offload.c16
-rw-r--r--net/ipv6/ip6_output.c38
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c8
-rw-r--r--net/ipv6/netfilter.c62
-rw-r--r--net/ipv6/netfilter/Kconfig27
-rw-r--r--net/ipv6/netfilter/Makefile6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c460
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c22
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c4
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c8
-rw-r--r--net/ipv6/ping.c7
-rw-r--r--net/ipv6/raw.c18
-rw-r--r--net/ipv6/reassembly.c97
-rw-r--r--net/ipv6/seg6.c1
-rw-r--r--net/ipv6/seg6_hmac.c1
-rw-r--r--net/ipv6/seg6_local.c54
-rw-r--r--net/ipv6/tcpv6_offload.c4
-rw-r--r--net/ipv6/udp.c17
-rw-r--r--net/ipv6/udp_offload.c4
-rw-r--r--net/ipv6/xfrm6_mode_ro.c2
-rw-r--r--net/iucv/af_iucv.c6
-rw-r--r--net/kcm/Kconfig1
-rw-r--r--net/kcm/kcmsock.c1
-rw-r--r--net/key/af_key.c6
-rw-r--r--net/l2tp/l2tp_core.c86
-rw-r--r--net/l2tp/l2tp_core.h73
-rw-r--r--net/l2tp/l2tp_debugfs.c8
-rw-r--r--net/l2tp/l2tp_eth.c32
-rw-r--r--net/l2tp/l2tp_ip.c4
-rw-r--r--net/l2tp/l2tp_ip6.c15
-rw-r--r--net/l2tp/l2tp_netlink.c37
-rw-r--r--net/l2tp/l2tp_ppp.c560
-rw-r--r--net/llc/Kconfig2
-rw-r--r--net/llc/Makefile2
-rw-r--r--net/llc/llc_if.c1
-rw-r--r--net/mac80211/Makefile1
-rw-r--r--net/mac80211/agg-rx.c10
-rw-r--r--net/mac80211/agg-tx.c19
-rw-r--r--net/mac80211/cfg.c9
-rw-r--r--net/mac80211/ethtool.c6
-rw-r--r--net/mac80211/he.c55
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/ieee80211_i.h47
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/key.c24
-rw-r--r--net/mac80211/led.c20
-rw-r--r--net/mac80211/main.c36
-rw-r--r--net/mac80211/mlme.c312
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c1
-rw-r--r--net/mac80211/rx.c129
-rw-r--r--net/mac80211/scan.c56
-rw-r--r--net/mac80211/sta_info.c101
-rw-r--r--net/mac80211/sta_info.h20
-rw-r--r--net/mac80211/trace.h2
-rw-r--r--net/mac80211/tx.c23
-rw-r--r--net/mac80211/util.c159
-rw-r--r--net/mac802154/tx.c15
-rw-r--r--net/mpls/mpls_iptunnel.c2
-rw-r--r--net/netfilter/Kconfig57
-rw-r--r--net/netfilter/Makefile12
-rw-r--r--net/netfilter/core.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c67
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c19
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c18
-rw-r--r--net/netfilter/nf_conncount.c386
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c317
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_helper.c10
-rw-r--r--net/netfilter/nf_conntrack_l3proto_generic.c66
-rw-r--r--net/netfilter/nf_conntrack_netlink.c98
-rw-r--r--net/netfilter/nf_conntrack_proto.c844
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c44
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c32
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c24
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c (renamed from net/ipv4/netfilter/nf_conntrack_proto_icmp.c)19
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c (renamed from net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c)17
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c46
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c52
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c55
-rw-r--r--net/netfilter/nf_conntrack_standalone.c28
-rw-r--r--net/netfilter/nf_conntrack_timeout.c21
-rw-r--r--net/netfilter/nf_flow_table_core.c13
-rw-r--r--net/netfilter/nf_log_common.c5
-rw-r--r--net/netfilter/nf_nat_core.c18
-rw-r--r--net/netfilter/nf_osf.c218
-rw-r--r--net/netfilter/nf_tables_api.c226
-rw-r--r--net/netfilter/nf_tables_core.c16
-rw-r--r--net/netfilter/nfnetlink.c23
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c74
-rw-r--r--net/netfilter/nfnetlink_osf.c436
-rw-r--r--net/netfilter/nft_chain_filter.c4
-rw-r--r--net/netfilter/nft_connlimit.c36
-rw-r--r--net/netfilter/nft_ct.c220
-rw-r--r--net/netfilter/nft_dynset.c2
-rw-r--r--net/netfilter/nft_lookup.c6
-rw-r--r--net/netfilter/nft_meta.c15
-rw-r--r--net/netfilter/nft_numgen.c4
-rw-r--r--net/netfilter/nft_osf.c104
-rw-r--r--net/netfilter/nft_socket.c22
-rw-r--r--net/netfilter/nft_tproxy.c316
-rw-r--r--net/netfilter/nft_tunnel.c566
-rw-r--r--net/netfilter/utils.c131
-rw-r--r--net/netfilter/xt_AUDIT.c2
-rw-r--r--net/netfilter/xt_CT.c6
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/netfilter/xt_TPROXY.c9
-rw-r--r--net/netfilter/xt_cgroup.c6
-rw-r--r--net/netfilter/xt_connlimit.c4
-rw-r--r--net/netfilter/xt_osf.c149
-rw-r--r--net/netfilter/xt_owner.c2
-rw-r--r--net/netfilter/xt_recent.c3
-rw-r--r--net/netfilter/xt_socket.c8
-rw-r--r--net/netlabel/netlabel_user.c2
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/openvswitch/actions.c33
-rw-r--r--net/openvswitch/conntrack.c20
-rw-r--r--net/openvswitch/flow_netlink.c80
-rw-r--r--net/packet/af_packet.c64
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/rds/Kconfig2
-rw-r--r--net/rds/Makefile1
-rw-r--r--net/rds/af_rds.c205
-rw-r--r--net/rds/bind.c138
-rw-r--r--net/rds/cong.c23
-rw-r--r--net/rds/connection.c283
-rw-r--r--net/rds/ib.c136
-rw-r--r--net/rds/ib.h53
-rw-r--r--net/rds/ib_cm.c320
-rw-r--r--net/rds/ib_frmr.c1
-rw-r--r--net/rds/ib_mr.h2
-rw-r--r--net/rds/ib_rdma.c26
-rw-r--r--net/rds/ib_recv.c33
-rw-r--r--net/rds/ib_send.c13
-rw-r--r--net/rds/loop.c7
-rw-r--r--net/rds/message.c1
-rw-r--r--net/rds/rdma.c6
-rw-r--r--net/rds/rdma_transport.c95
-rw-r--r--net/rds/rdma_transport.h5
-rw-r--r--net/rds/rds.h88
-rw-r--r--net/rds/recv.c78
-rw-r--r--net/rds/send.c116
-rw-r--r--net/rds/tcp.c154
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_connect.c68
-rw-r--r--net/rds/tcp_listen.c87
-rw-r--r--net/rds/tcp_recv.c9
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rds/threads.c69
-rw-r--r--net/rds/transport.c16
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-internal.h4
-rw-r--r--net/rxrpc/call_event.c2
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/conn_client.c3
-rw-r--r--net/rxrpc/conn_event.c17
-rw-r--r--net/rxrpc/conn_object.c4
-rw-r--r--net/rxrpc/input.c15
-rw-r--r--net/rxrpc/local_event.c5
-rw-r--r--net/rxrpc/local_object.c2
-rw-r--r--net/rxrpc/output.c32
-rw-r--r--net/rxrpc/peer_object.c2
-rw-r--r--net/rxrpc/proc.c22
-rw-r--r--net/rxrpc/recvmsg.c56
-rw-r--r--net/rxrpc/rxkad.c31
-rw-r--r--net/rxrpc/sysctl.c1
-rw-r--r--net/sched/Kconfig39
-rw-r--r--net/sched/Makefile5
-rw-r--r--net/sched/act_api.c442
-rw-r--r--net/sched/act_bpf.c54
-rw-r--r--net/sched/act_connmark.c36
-rw-r--r--net/sched/act_csum.c76
-rw-r--r--net/sched/act_gact.c47
-rw-r--r--net/sched/act_ife.c98
-rw-r--r--net/sched/act_ipt.c55
-rw-r--r--net/sched/act_mirred.c185
-rw-r--r--net/sched/act_nat.c36
-rw-r--r--net/sched/act_pedit.c142
-rw-r--r--net/sched/act_police.c56
-rw-r--r--net/sched/act_sample.c50
-rw-r--r--net/sched/act_simple.c43
-rw-r--r--net/sched/act_skbedit.c175
-rw-r--r--net/sched/act_skbmod.c73
-rw-r--r--net/sched/act_tunnel_key.c327
-rw-r--r--net/sched/act_vlan.c92
-rw-r--r--net/sched/cls_api.c717
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_bpf.c43
-rw-r--r--net/sched/cls_flower.c647
-rw-r--r--net/sched/cls_matchall.c32
-rw-r--r--net/sched/cls_tcindex.c8
-rw-r--r--net/sched/cls_u32.c111
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sched/sch_cake.c3020
-rw-r--r--net/sched/sch_cbs.c134
-rw-r--r--net/sched/sch_etf.c484
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sched/sch_netem.c73
-rw-r--r--net/sched/sch_skbprio.c320
-rw-r--r--net/sctp/Kconfig4
-rw-r--r--net/sctp/associola.c15
-rw-r--r--net/sctp/chunk.c6
-rw-r--r--net/sctp/input.c1
-rw-r--r--net/sctp/ipv6.c20
-rw-r--r--net/sctp/outqueue.c11
-rw-r--r--net/sctp/protocol.c16
-rw-r--r--net/sctp/sm_sideeffect.c1
-rw-r--r--net/sctp/socket.c249
-rw-r--r--net/sctp/stream.c153
-rw-r--r--net/sctp/stream_interleave.c20
-rw-r--r--net/sctp/stream_sched.c13
-rw-r--r--net/sctp/stream_sched_prio.c22
-rw-r--r--net/sctp/stream_sched_rr.c8
-rw-r--r--net/smc/Makefile2
-rw-r--r--net/smc/af_smc.c310
-rw-r--r--net/smc/smc.h9
-rw-r--r--net/smc/smc_cdc.c113
-rw-r--r--net/smc/smc_cdc.h86
-rw-r--r--net/smc/smc_clc.c197
-rw-r--r--net/smc/smc_clc.h99
-rw-r--r--net/smc/smc_core.c349
-rw-r--r--net/smc/smc_core.h85
-rw-r--r--net/smc/smc_diag.c33
-rw-r--r--net/smc/smc_ib.c170
-rw-r--r--net/smc/smc_ib.h7
-rw-r--r--net/smc/smc_ism.c348
-rw-r--r--net/smc/smc_ism.h48
-rw-r--r--net/smc/smc_llc.c80
-rw-r--r--net/smc/smc_llc.h7
-rw-r--r--net/smc/smc_pnet.c171
-rw-r--r--net/smc/smc_pnet.h19
-rw-r--r--net/smc/smc_rx.c21
-rw-r--r--net/smc/smc_tx.c239
-rw-r--r--net/smc/smc_tx.h6
-rw-r--r--net/smc/smc_wr.c32
-rw-r--r--net/socket.c57
-rw-r--r--net/strparser/strparser.c30
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/tipc/bcast.c2
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/group.c41
-rw-r--r--net/tipc/group.h1
-rw-r--r--net/tipc/link.c134
-rw-r--r--net/tipc/link.h2
-rw-r--r--net/tipc/monitor.c3
-rw-r--r--net/tipc/msg.c35
-rw-r--r--net/tipc/name_table.c2
-rw-r--r--net/tipc/node.c90
-rw-r--r--net/tipc/node.h14
-rw-r--r--net/tipc/socket.c12
-rw-r--r--net/tls/tls_device.c304
-rw-r--r--net/tls/tls_device_fallback.c11
-rw-r--r--net/tls/tls_main.c32
-rw-r--r--net/tls/tls_sw.c347
-rw-r--r--net/unix/af_unix.c11
-rw-r--r--net/wimax/Makefile2
-rw-r--r--net/wimax/debugfs.c2
-rw-r--r--net/wimax/op-msg.c1
-rw-r--r--net/wimax/stack.c4
-rw-r--r--net/wireless/core.c21
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c55
-rw-r--r--net/wireless/nl80211.c194
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--net/wireless/util.c87
-rw-r--r--net/wireless/wext-compat.c10
-rw-r--r--net/x25/Kconfig2
-rw-r--r--net/x25/x25_subr.c1
-rw-r--r--net/xdp/xdp_umem.c70
-rw-r--r--net/xfrm/Kconfig9
-rw-r--r--net/xfrm/Makefile1
-rw-r--r--net/xfrm/xfrm_device.c19
-rw-r--r--net/xfrm/xfrm_input.c5
-rw-r--r--net/xfrm/xfrm_interface.c975
-rw-r--r--net/xfrm/xfrm_output.c3
-rw-r--r--net/xfrm/xfrm_policy.c314
-rw-r--r--net/xfrm/xfrm_state.c48
-rw-r--r--net/xfrm/xfrm_user.c95
-rw-r--r--samples/bpf/Makefile47
-rw-r--r--samples/bpf/bpf_load.c3
-rw-r--r--samples/bpf/hash_func01.h55
-rw-r--r--samples/bpf/test_cgrp2_attach2.c21
-rw-r--r--samples/bpf/test_cgrp2_sock2.c2
-rw-r--r--samples/bpf/xdp_fwd_user.c34
-rw-r--r--samples/bpf/xdp_redirect_cpu_kern.c114
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c4
-rw-r--r--samples/bpf/xdp_rxq_info_kern.c43
-rw-r--r--samples/bpf/xdp_rxq_info_user.c45
-rw-r--r--samples/bpf/xdp_sample_pkts_kern.c66
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c169
-rw-r--r--samples/bpf/xdpsock_user.c43
-rw-r--r--samples/seccomp/Makefile6
-rw-r--r--scripts/.gitignore1
-rw-r--r--scripts/Kbuild.include11
-rw-r--r--scripts/Makefile5
-rw-r--r--scripts/Makefile.build7
-rw-r--r--scripts/Makefile.clean1
-rw-r--r--scripts/Makefile.gcc-plugins37
-rw-r--r--scripts/Makefile.host24
-rw-r--r--scripts/Makefile.lib7
-rw-r--r--scripts/Makefile.modbuiltin4
-rw-r--r--scripts/basic/.gitignore1
-rw-r--r--scripts/basic/Makefile1
-rw-r--r--scripts/bin2c.c (renamed from scripts/basic/bin2c.c)0
-rwxr-xr-xscripts/coccicheck5
-rw-r--r--scripts/coccinelle/api/atomic_as_refcounter.cocci129
-rw-r--r--scripts/coccinelle/tests/doubletest.cocci34
-rwxr-xr-xscripts/depmod.sh8
-rwxr-xr-xscripts/documentation-file-ref-check6
-rw-r--r--scripts/gcc-plugins/Kconfig142
-rw-r--r--scripts/gcc-plugins/Makefile5
-rw-r--r--scripts/gcc-plugins/gcc-common.h26
-rw-r--r--scripts/kconfig/Makefile25
-rw-r--r--scripts/kconfig/check-pkgconfig.sh8
-rw-r--r--scripts/kconfig/conf.c39
-rw-r--r--scripts/kconfig/confdata.c156
-rw-r--r--scripts/kconfig/expr.h3
-rw-r--r--scripts/kconfig/gconf.c5
-rw-r--r--scripts/kconfig/lkc.h2
-rw-r--r--scripts/kconfig/lkc_proto.h4
-rw-r--r--scripts/kconfig/mconf.c10
-rw-r--r--scripts/kconfig/menu.c2
-rw-r--r--scripts/kconfig/nconf.c8
-rw-r--r--scripts/kconfig/qconf.cc3
-rw-r--r--scripts/kconfig/symbol.c13
-rw-r--r--scripts/kconfig/util.c30
-rw-r--r--scripts/kconfig/zconf.y6
-rwxr-xr-xscripts/kernel-doc20
-rw-r--r--scripts/mod/modpost.c3
-rwxr-xr-xscripts/package/buildtar12
-rwxr-xr-xscripts/package/mkdebian68
-rwxr-xr-xscripts/tracing/draw_functrace.py2
-rw-r--r--security/Kconfig3
-rw-r--r--security/apparmor/lsm.c4
-rw-r--r--security/integrity/digsig_asymmetric.c23
-rw-r--r--security/integrity/evm/Kconfig1
-rw-r--r--security/integrity/evm/evm.h10
-rw-r--r--security/integrity/evm/evm_crypto.c50
-rw-r--r--security/integrity/evm/evm_main.c19
-rw-r--r--security/integrity/evm/evm_secfs.c4
-rw-r--r--security/integrity/iint.c9
-rw-r--r--security/integrity/ima/Kconfig59
-rw-r--r--security/integrity/ima/ima.h7
-rw-r--r--security/integrity/ima/ima_appraise.c4
-rw-r--r--security/integrity/ima/ima_crypto.c4
-rw-r--r--security/integrity/ima/ima_init.c16
-rw-r--r--security/integrity/ima/ima_main.c84
-rw-r--r--security/integrity/ima/ima_policy.c57
-rw-r--r--security/integrity/ima/ima_queue.c4
-rw-r--r--security/integrity/integrity.h15
-rw-r--r--security/integrity/integrity_audit.c6
-rw-r--r--security/keys/dh.c2
-rw-r--r--security/loadpin/loadpin.c6
-rw-r--r--security/security.c24
-rw-r--r--security/selinux/avc.c2
-rw-r--r--security/selinux/hooks.c87
-rw-r--r--security/selinux/netif.c11
-rw-r--r--security/selinux/netlink.c2
-rw-r--r--security/selinux/netnode.c5
-rw-r--r--security/selinux/netport.c5
-rw-r--r--security/selinux/nlmsgtab.c2
-rw-r--r--security/selinux/selinuxfs.c45
-rw-r--r--security/selinux/ss/avtab.c51
-rw-r--r--security/selinux/ss/conditional.c16
-rw-r--r--security/selinux/ss/ebitmap.c15
-rw-r--r--security/selinux/ss/policydb.c91
-rw-r--r--security/selinux/ss/services.c71
-rw-r--r--security/selinux/ss/sidtab.c5
-rw-r--r--security/smack/smack_lsm.c28
-rw-r--r--security/tomoyo/Makefile2
-rw-r--r--security/tomoyo/tomoyo.c2
-rw-r--r--sound/ac97/bus.c26
-rw-r--r--sound/aoa/core/gpio-feature.c4
-rw-r--r--sound/arm/Kconfig5
-rw-r--r--sound/arm/Makefile3
-rw-r--r--sound/arm/pxa2xx-ac97-lib.c12
-rw-r--r--sound/arm/pxa2xx-ac97.c124
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c75
-rw-r--r--sound/arm/pxa2xx-pcm.c129
-rw-r--r--sound/arm/pxa2xx-pcm.h27
-rw-r--r--sound/core/compress_offload.c12
-rw-r--r--sound/core/memalloc.c8
-rw-r--r--sound/core/oss/pcm_oss.c2
-rw-r--r--sound/core/oss/pcm_plugin.c9
-rw-r--r--sound/core/pcm.c7
-rw-r--r--sound/core/pcm_lib.c38
-rw-r--r--sound/core/pcm_local.h2
-rw-r--r--sound/core/pcm_native.c10
-rw-r--r--sound/core/rawmidi.c249
-rw-r--r--sound/core/seq/oss/seq_oss.c2
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_timer.c2
-rw-r--r--sound/core/seq/seq.c33
-rw-r--r--sound/core/seq/seq_clientmgr.c30
-rw-r--r--sound/core/seq/seq_info.c10
-rw-r--r--sound/core/seq/seq_info.h6
-rw-r--r--sound/core/seq/seq_memory.c12
-rw-r--r--sound/core/seq/seq_memory.h6
-rw-r--r--sound/core/seq/seq_midi.c24
-rw-r--r--sound/core/seq/seq_midi_emul.c14
-rw-r--r--sound/core/seq/seq_midi_event.c87
-rw-r--r--sound/core/seq/seq_queue.c12
-rw-r--r--sound/core/seq/seq_queue.h27
-rw-r--r--sound/core/seq/seq_virmidi.c133
-rw-r--r--sound/core/timer.c5
-rw-r--r--sound/drivers/aloop.c1
-rw-r--r--sound/drivers/mpu401/mpu401_uart.c16
-rw-r--r--sound/drivers/opl3/opl3_drums.c2
-rw-r--r--sound/drivers/opl3/opl3_lib.c19
-rw-r--r--sound/drivers/opl3/opl3_midi.c17
-rw-r--r--sound/drivers/opl3/opl3_oss.c8
-rw-r--r--sound/drivers/opl3/opl3_synth.c1
-rw-r--r--sound/drivers/opl3/opl3_voice.h4
-rw-r--r--sound/drivers/opl4/opl4_lib.c12
-rw-r--r--sound/drivers/vx/vx_core.c15
-rw-r--r--sound/drivers/vx/vx_pcm.c2
-rw-r--r--sound/firewire/bebob/bebob_pcm.c1
-rw-r--r--sound/firewire/dice/dice-alesis.c2
-rw-r--r--sound/firewire/dice/dice-pcm.c2
-rw-r--r--sound/firewire/digi00x/digi00x-pcm.c1
-rw-r--r--sound/firewire/fireface/ff-pcm.c1
-rw-r--r--sound/firewire/fireworks/fireworks_pcm.c1
-rw-r--r--sound/firewire/isight.c1
-rw-r--r--sound/firewire/motu/motu-pcm.c2
-rw-r--r--sound/firewire/motu/motu-protocol-v2.c64
-rw-r--r--sound/firewire/motu/motu-protocol-v3.c19
-rw-r--r--sound/firewire/motu/motu.c19
-rw-r--r--sound/firewire/motu/motu.h5
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c2
-rw-r--r--sound/firewire/tascam/tascam-pcm.c1
-rw-r--r--sound/hda/Kconfig7
-rw-r--r--sound/hda/Makefile1
-rw-r--r--sound/hda/ext/hdac_ext_bus.c80
-rw-r--r--sound/hda/ext/hdac_ext_controller.c64
-rw-r--r--sound/hda/ext/hdac_ext_stream.c104
-rw-r--r--sound/hda/hdac_component.c335
-rw-r--r--sound/hda/hdac_device.c2
-rw-r--r--sound/hda/hdac_i915.c353
-rw-r--r--sound/hda/hdac_stream.c4
-rw-r--r--sound/i2c/cs8427.c12
-rw-r--r--sound/i2c/i2c.c13
-rw-r--r--sound/i2c/other/ak4xxx-adda.c12
-rw-r--r--sound/i2c/tea6330t.c16
-rw-r--r--sound/isa/Kconfig2
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c3
-rw-r--r--sound/isa/es1688/es1688.c2
-rw-r--r--sound/isa/es1688/es1688_lib.c16
-rw-r--r--sound/isa/es18xx.c1
-rw-r--r--sound/isa/galaxy/galaxy.c3
-rw-r--r--sound/isa/gus/gus_io.c2
-rw-r--r--sound/isa/gus/gus_main.c16
-rw-r--r--sound/isa/gus/gus_reset.c2
-rw-r--r--sound/isa/msnd/msnd.c18
-rw-r--r--sound/isa/msnd/msnd.h2
-rw-r--r--sound/isa/msnd/msnd_midi.c2
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c8
-rw-r--r--sound/isa/opti9xx/miro.c5
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c3
-rw-r--r--sound/isa/sb/emu8000_patch.c7
-rw-r--r--sound/isa/sb/emu8000_pcm.c2
-rw-r--r--sound/isa/sb/sb16_csp.c52
-rw-r--r--sound/isa/sb/sb16_main.c25
-rw-r--r--sound/isa/sb/sb8_main.c19
-rw-r--r--sound/isa/sb/sb_common.c16
-rw-r--r--sound/isa/wss/wss_lib.c18
-rw-r--r--sound/mips/sgio2audio.c3
-rw-r--r--sound/pci/ac97/ac97_codec.c16
-rw-r--r--sound/pci/ali5451/ali5451.c5
-rw-r--r--sound/pci/asihpi/asihpi.c24
-rw-r--r--sound/pci/asihpi/hpi6205.c5
-rw-r--r--sound/pci/atiixp.c4
-rw-r--r--sound/pci/atiixp_modem.c4
-rw-r--r--sound/pci/au88x0/au88x0.h2
-rw-r--r--sound/pci/au88x0/au88x0_core.c2
-rw-r--r--sound/pci/bt87x.c4
-rw-r--r--sound/pci/cs46xx/dsp_spos_scb_lib.c3
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c7
-rw-r--r--sound/pci/cs5535audio/cs5535audio.h6
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c4
-rw-r--r--sound/pci/ctxfi/cthw20k1.c6
-rw-r--r--sound/pci/ctxfi/cthw20k2.c12
-rw-r--r--sound/pci/ctxfi/ctmixer.c15
-rw-r--r--sound/pci/echoaudio/echoaudio.c2
-rw-r--r--sound/pci/echoaudio/echoaudio.h2
-rw-r--r--sound/pci/echoaudio/echoaudio_3g.c14
-rw-r--r--sound/pci/echoaudio/echoaudio_dsp.c6
-rw-r--r--sound/pci/echoaudio/echoaudio_dsp.h50
-rw-r--r--sound/pci/echoaudio/echoaudio_gml.c8
-rw-r--r--sound/pci/emu10k1/emu10k1_patch.c7
-rw-r--r--sound/pci/emu10k1/emufx.c24
-rw-r--r--sound/pci/emu10k1/emupcm.c7
-rw-r--r--sound/pci/ens1370.c3
-rw-r--r--sound/pci/hda/dell_wmi_helper.c116
-rw-r--r--sound/pci/hda/hda_codec.c97
-rw-r--r--sound/pci/hda/hda_codec.h6
-rw-r--r--sound/pci/hda/hda_generic.c144
-rw-r--r--sound/pci/hda/hda_generic.h16
-rw-r--r--sound/pci/hda/hda_intel.c18
-rw-r--r--sound/pci/hda/patch_analog.c4
-rw-r--r--sound/pci/hda/patch_ca0132.c279
-rw-r--r--sound/pci/hda/patch_cirrus.c29
-rw-r--r--sound/pci/hda/patch_conexant.c104
-rw-r--r--sound/pci/hda/patch_hdmi.c128
-rw-r--r--sound/pci/hda/patch_realtek.c853
-rw-r--r--sound/pci/hda/patch_sigmatel.c31
-rw-r--r--sound/pci/hda/patch_via.c294
-rw-r--r--sound/pci/hda/thinkpad_helper.c27
-rw-r--r--sound/pci/ice1712/ak4xxx.c12
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c21
-rw-r--r--sound/pci/intel8x0.c6
-rw-r--r--sound/pci/intel8x0m.c6
-rw-r--r--sound/pci/korg1212/korg1212.c4
-rw-r--r--sound/pci/lola/lola.c4
-rw-r--r--sound/pci/lola/lola.h4
-rw-r--r--sound/pci/lola/lola_pcm.c8
-rw-r--r--sound/pci/maestro3.c6
-rw-r--r--sound/pci/mixart/mixart.c1
-rw-r--r--sound/pci/mixart/mixart_core.c6
-rw-r--r--sound/pci/mixart/mixart_hwdep.c42
-rw-r--r--sound/pci/mixart/mixart_hwdep.h8
-rw-r--r--sound/pci/riptide/riptide.c10
-rw-r--r--sound/pci/sonicvibes.c2
-rw-r--r--sound/pci/trident/trident.c2
-rw-r--r--sound/pci/trident/trident.h2
-rw-r--r--sound/pci/trident/trident_main.c2
-rw-r--r--sound/pci/vx222/vx222_ops.c8
-rw-r--r--sound/pci/ymfpci/ymfpci.h78
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c6
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c1
-rw-r--r--sound/pcmcia/vx/vxp_ops.c10
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/amd/Kconfig1
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c109
-rw-r--r--sound/soc/amd/acp-pcm-dma.c214
-rw-r--r--sound/soc/amd/acp.h13
-rw-r--r--sound/soc/atmel/atmel-i2s.c46
-rw-r--r--sound/soc/codecs/Kconfig25
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/adau17x1.c1
-rw-r--r--sound/soc/codecs/adav80x.c1
-rw-r--r--sound/soc/codecs/ak4458.c2
-rw-r--r--sound/soc/codecs/ak4554.c17
-rw-r--r--sound/soc/codecs/ak4613.c26
-rw-r--r--sound/soc/codecs/ak4642.c26
-rw-r--r--sound/soc/codecs/ak5558.c4
-rw-r--r--sound/soc/codecs/cs4270.c2
-rw-r--r--sound/soc/codecs/cs47l24.c11
-rw-r--r--sound/soc/codecs/cx20442.c23
-rw-r--r--sound/soc/codecs/da7210.c27
-rw-r--r--sound/soc/codecs/da7213.c4
-rw-r--r--sound/soc/codecs/da7219-aad.c5
-rw-r--r--sound/soc/codecs/da7219.c48
-rw-r--r--sound/soc/codecs/da7219.h8
-rw-r--r--sound/soc/codecs/da9055.c4
-rw-r--r--sound/soc/codecs/es7134.c227
-rw-r--r--sound/soc/codecs/es7241.c322
-rw-r--r--sound/soc/codecs/hdac_hdmi.c495
-rw-r--r--sound/soc/codecs/hdmi-codec.c21
-rw-r--r--sound/soc/codecs/max98373.c1
-rw-r--r--sound/soc/codecs/max9850.c4
-rw-r--r--sound/soc/codecs/nau8540.c3
-rw-r--r--sound/soc/codecs/nau8824.c2
-rw-r--r--sound/soc/codecs/nau8825.c2
-rw-r--r--sound/soc/codecs/pcm1789.c3
-rw-r--r--sound/soc/codecs/pcm186x.c2
-rw-r--r--sound/soc/codecs/rt1305.c15
-rw-r--r--sound/soc/codecs/rt5514-spi.c1
-rw-r--r--sound/soc/codecs/rt5514.c8
-rw-r--r--sound/soc/codecs/rt5631.c12
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5651.c235
-rw-r--r--sound/soc/codecs/rt5651.h8
-rw-r--r--sound/soc/codecs/rt5677.c3
-rw-r--r--sound/soc/codecs/rt5682.c2681
-rw-r--r--sound/soc/codecs/rt5682.h1324
-rw-r--r--sound/soc/codecs/simple-amplifier.c (renamed from sound/soc/codecs/dio2125.c)42
-rw-r--r--sound/soc/codecs/tas571x.c110
-rw-r--r--sound/soc/codecs/tas571x.h16
-rw-r--r--sound/soc/codecs/tda7419.c4
-rw-r--r--sound/soc/codecs/tscs42xx.c37
-rw-r--r--sound/soc/codecs/tscs42xx.h8
-rw-r--r--sound/soc/codecs/twl6040.c2
-rw-r--r--sound/soc/codecs/wm2200.c10
-rw-r--r--sound/soc/codecs/wm5100-tables.c12
-rw-r--r--sound/soc/codecs/wm5102.c10
-rw-r--r--sound/soc/codecs/wm5110.c13
-rw-r--r--sound/soc/codecs/wm8903.c4
-rw-r--r--sound/soc/codecs/wm8904.c1
-rw-r--r--sound/soc/codecs/wm8955.c1
-rw-r--r--sound/soc/codecs/wm8960.c1
-rw-r--r--sound/soc/codecs/wm8961.c1
-rw-r--r--sound/soc/codecs/wm8962.c1
-rw-r--r--sound/soc/codecs/wm8988.c4
-rw-r--r--sound/soc/codecs/wm8990.c4
-rw-r--r--sound/soc/codecs/wm8994.c1
-rw-r--r--sound/soc/codecs/wm8995.c1
-rw-r--r--sound/soc/codecs/wm8996.c9
-rw-r--r--sound/soc/codecs/wm9081.c1
-rw-r--r--sound/soc/codecs/wm_adsp.c216
-rw-r--r--sound/soc/codecs/wm_adsp.h12
-rw-r--r--sound/soc/codecs/wmfw.h1
-rw-r--r--sound/soc/davinci/davinci-i2s.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c16
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c20
-rw-r--r--sound/soc/fsl/fsl_asrc.c18
-rw-r--r--sound/soc/fsl/fsl_asrc.h5
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c18
-rw-r--r--sound/soc/fsl/fsl_esai.c1
-rw-r--r--sound/soc/fsl/fsl_spdif.c2
-rw-r--r--sound/soc/fsl/fsl_utils.c18
-rw-r--r--sound/soc/fsl/fsl_utils.h7
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c15
-rw-r--r--sound/soc/generic/audio-graph-card.c41
-rw-r--r--sound/soc/generic/audio-graph-scu-card.c25
-rw-r--r--sound/soc/generic/simple-card-utils.c74
-rw-r--r--sound/soc/generic/simple-card.c106
-rw-r--r--sound/soc/generic/simple-scu-card.c21
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c29
-rw-r--r--sound/soc/intel/atom/sst/sst_loader.c6
-rw-r--r--sound/soc/intel/boards/Kconfig14
-rw-r--r--sound/soc/intel/boards/Makefile2
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c4
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c20
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c55
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c364
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c643
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c3
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c4
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c4
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c2
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c2
-rw-r--r--sound/soc/intel/boards/skl_rt286.c2
-rw-r--r--sound/soc/intel/common/Makefile6
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-bxt-match.c59
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c40
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cht-match.c56
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c32
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-glk-match.c41
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c16
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c91
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-skl-match.c47
-rw-r--r--sound/soc/intel/common/sst-firmware.c6
-rw-r--r--sound/soc/intel/haswell/sst-haswell-dsp.c53
-rw-r--r--sound/soc/intel/skylake/skl-messages.c50
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c8
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c119
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.c8
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.h2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c28
-rw-r--r--sound/soc/intel/skylake/skl-topology.h11
-rw-r--r--sound/soc/intel/skylake/skl.c360
-rw-r--r--sound/soc/intel/skylake/skl.h7
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.c64
-rw-r--r--sound/soc/mediatek/common/mtk-base-afe.h6
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-common.h1
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c65
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-dai-adda.c20
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-dai-hostless.c16
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-dai-pcm.c19
-rw-r--r--sound/soc/meson/Kconfig65
-rw-r--r--sound/soc/meson/Makefile21
-rw-r--r--sound/soc/meson/axg-card.c671
-rw-r--r--sound/soc/meson/axg-fifo.c341
-rw-r--r--sound/soc/meson/axg-fifo.h80
-rw-r--r--sound/soc/meson/axg-frddr.c141
-rw-r--r--sound/soc/meson/axg-spdifout.c456
-rw-r--r--sound/soc/meson/axg-tdm-formatter.c381
-rw-r--r--sound/soc/meson/axg-tdm-formatter.h39
-rw-r--r--sound/soc/meson/axg-tdm-interface.c542
-rw-r--r--sound/soc/meson/axg-tdm.h78
-rw-r--r--sound/soc/meson/axg-tdmin.c229
-rw-r--r--sound/soc/meson/axg-tdmout.c259
-rw-r--r--sound/soc/meson/axg-toddr.c199
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c2
-rw-r--r--sound/soc/omap/omap-dmic.c2
-rw-r--r--sound/soc/omap/omap-mcpdm.c4
-rw-r--r--sound/soc/pxa/Kconfig6
-rw-r--r--sound/soc/pxa/magician.c106
-rw-r--r--sound/soc/pxa/pxa-ssp.c181
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c47
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c9
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c73
-rw-r--r--sound/soc/pxa/zylonite.c9
-rw-r--r--sound/soc/qcom/Kconfig14
-rw-r--r--sound/soc/qcom/Makefile4
-rw-r--r--sound/soc/qcom/apq8096.c188
-rw-r--r--sound/soc/qcom/common.c112
-rw-r--r--sound/soc/qcom/common.h11
-rw-r--r--sound/soc/qcom/lpass-platform.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6adm.c16
-rw-r--r--sound/soc/qcom/qdsp6/q6afe-dai.c225
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.c43
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c42
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c17
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c71
-rw-r--r--sound/soc/qcom/sdm845.c285
-rw-r--r--sound/soc/rockchip/Makefile3
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c3
-rw-r--r--sound/soc/rockchip/rockchip_pcm.c45
-rw-r--r--sound/soc/rockchip/rockchip_pcm.h14
-rw-r--r--sound/soc/rockchip/rockchip_rt5645.c27
-rw-r--r--sound/soc/samsung/i2s.c1
-rw-r--r--sound/soc/sh/Kconfig1
-rw-r--r--sound/soc/sh/dma-sh7760.c26
-rw-r--r--sound/soc/sh/fsi.c22
-rw-r--r--sound/soc/sh/hac.c20
-rw-r--r--sound/soc/sh/migor.c14
-rw-r--r--sound/soc/sh/rcar/Makefile1
-rw-r--r--sound/soc/sh/rcar/adg.c15
-rw-r--r--sound/soc/sh/rcar/cmd.c19
-rw-r--r--sound/soc/sh/rcar/core.c41
-rw-r--r--sound/soc/sh/rcar/ctu.c15
-rw-r--r--sound/soc/sh/rcar/dma.c17
-rw-r--r--sound/soc/sh/rcar/dvc.c16
-rw-r--r--sound/soc/sh/rcar/gen.c16
-rw-r--r--sound/soc/sh/rcar/mix.c14
-rw-r--r--sound/soc/sh/rcar/rsnd.h17
-rw-r--r--sound/soc/sh/rcar/src.c16
-rw-r--r--sound/soc/sh/rcar/ssi.c59
-rw-r--r--sound/soc/sh/rcar/ssiu.c15
-rw-r--r--sound/soc/sh/sh7760-ac97.c14
-rw-r--r--sound/soc/sh/siu.h26
-rw-r--r--sound/soc/sh/siu_dai.c26
-rw-r--r--sound/soc/sh/siu_pcm.c27
-rw-r--r--sound/soc/sh/ssi.c21
-rw-r--r--sound/soc/sirf/sirf-usp.c7
-rw-r--r--sound/soc/soc-ac97.c29
-rw-r--r--sound/soc/soc-acpi.c20
-rw-r--r--sound/soc/soc-compress.c120
-rw-r--r--sound/soc/soc-core.c200
-rw-r--r--sound/soc/soc-dapm.c51
-rw-r--r--sound/soc/soc-devres.c15
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c30
-rw-r--r--sound/soc/soc-io.c19
-rw-r--r--sound/soc/soc-jack.c19
-rw-r--r--sound/soc/soc-ops.c29
-rw-r--r--sound/soc/soc-pcm.c469
-rw-r--r--sound/soc/soc-topology.c91
-rw-r--r--sound/soc/soc-utils.c24
-rw-r--r--sound/soc/sti/uniperif_player.c6
-rw-r--r--sound/soc/sti/uniperif_reader.c2
-rw-r--r--sound/soc/stm/Kconfig1
-rw-r--r--sound/soc/stm/stm32_adfsdm.c10
-rw-r--r--sound/soc/stm/stm32_sai_sub.c146
-rw-r--r--sound/soc/tegra/tegra20_ac97.c2
-rw-r--r--sound/soc/tegra/tegra30_i2s.h2
-rw-r--r--sound/soc/tegra/tegra_alc5632.c17
-rw-r--r--sound/soc/tegra/tegra_rt5677.c17
-rw-r--r--sound/soc/uniphier/aio-core.c84
-rw-r--r--sound/soc/uniphier/aio-cpu.c5
-rw-r--r--sound/soc/uniphier/aio-ld11.c2
-rw-r--r--sound/soc/uniphier/aio-reg.h1
-rw-r--r--sound/soc/uniphier/aio.h6
-rw-r--r--sound/soc/zte/zx-tdm.c4
-rw-r--r--sound/synth/emux/emux.c17
-rw-r--r--sound/synth/util_mem.c16
-rw-r--r--sound/usb/6fire/pcm.c1
-rw-r--r--sound/usb/Makefile1
-rw-r--r--sound/usb/caiaq/audio.c6
-rw-r--r--sound/usb/card.c9
-rw-r--r--sound/usb/card.h2
-rw-r--r--sound/usb/clock.c24
-rw-r--r--sound/usb/endpoint.c2
-rw-r--r--sound/usb/hiface/pcm.c1
-rw-r--r--sound/usb/line6/toneport.c5
-rw-r--r--sound/usb/midi.c5
-rw-r--r--sound/usb/misc/ua101.c2
-rw-r--r--sound/usb/mixer.c214
-rw-r--r--sound/usb/mixer.h2
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--sound/usb/pcm.c71
-rw-r--r--sound/usb/pcm.h2
-rw-r--r--sound/usb/power.c104
-rw-r--r--sound/usb/power.h19
-rw-r--r--sound/usb/quirks-table.h3
-rw-r--r--sound/usb/quirks.c16
-rw-r--r--sound/usb/stream.c70
-rw-r--r--sound/x86/intel_hdmi_audio.c4
-rw-r--r--sound/xen/xen_snd_front_alsa.c4
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h20
-rw-r--r--tools/arch/parisc/include/uapi/asm/errno.h1
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h3
-rw-r--r--tools/bpf/.gitignore5
-rw-r--r--tools/bpf/Makefile.helpers59
-rw-r--r--tools/bpf/bpftool/.gitignore2
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile13
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst12
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst33
-rw-r--r--tools/bpf/bpftool/Makefile10
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool134
-rw-r--r--tools/bpf/bpftool/btf_dumper.c251
-rw-r--r--tools/bpf/bpftool/cgroup.c170
-rw-r--r--tools/bpf/bpftool/common.c2
-rw-r--r--tools/bpf/bpftool/main.c4
-rw-r--r--tools/bpf/bpftool/main.h36
-rw-r--r--tools/bpf/bpftool/map.c224
-rw-r--r--tools/bpf/bpftool/prog.c249
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c6
-rw-r--r--tools/build/Build.include2
-rw-r--r--tools/build/Makefile2
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-reallocarray.c8
-rw-r--r--tools/include/linux/compiler-gcc.h4
-rw-r--r--tools/include/linux/overflow.h278
-rw-r--r--tools/include/tools/libc_compat.h20
-rw-r--r--tools/include/uapi/asm-generic/unistd.h783
-rw-r--r--tools/include/uapi/linux/bpf.h104
-rw-r--r--tools/include/uapi/linux/in.h301
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile6
-rw-r--r--tools/lib/bpf/bpf.c1
-rw-r--r--tools/lib/bpf/bpf.h1
-rw-r--r--tools/lib/bpf/btf.c43
-rw-r--r--tools/lib/bpf/btf.h2
-rw-r--r--tools/lib/bpf/libbpf.c295
-rw-r--r--tools/lib/bpf/libbpf.h16
-rw-r--r--tools/lib/bpf/libbpf_errno.c74
-rw-r--r--tools/memory-model/Documentation/explanation.txt2
-rw-r--r--tools/memory-model/Documentation/recipes.txt12
-rw-r--r--tools/memory-model/README20
-rw-r--r--tools/memory-model/linux-kernel.bell2
-rw-r--r--tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus (renamed from tools/memory-model/litmus-tests/IRIW+mbonceonces+OnceOnce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus2
-rw-r--r--tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus (renamed from tools/memory-model/litmus-tests/LB+ctrlonceonce+mbonceonce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus (renamed from tools/memory-model/litmus-tests/MP+wmbonceonce+rmbonceonce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/R+fencembonceonces.litmus (renamed from tools/memory-model/litmus-tests/R+mbonceonces.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/README25
-rw-r--r--tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus (renamed from tools/memory-model/litmus-tests/S+wmbonceonce+poacquireonce.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/SB+fencembonceonces.litmus (renamed from tools/memory-model/litmus-tests/SB+mbonceonces.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus32
-rw-r--r--tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus (renamed from tools/memory-model/litmus-tests/WRC+pooncerelease+rmbonceonce+Once.litmus)2
-rw-r--r--tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus (renamed from tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus)2
-rwxr-xr-x[-rw-r--r--]tools/memory-model/scripts/checkalllitmus.sh2
-rwxr-xr-x[-rw-r--r--]tools/memory-model/scripts/checklitmus.sh2
-rw-r--r--tools/objtool/Makefile4
-rw-r--r--tools/objtool/arch/x86/include/asm/orc_types.h2
-rw-r--r--tools/objtool/check.c1
-rw-r--r--tools/objtool/check.h2
-rw-r--r--tools/objtool/orc_dump.c3
-rw-r--r--tools/objtool/orc_gen.c2
-rw-r--r--tools/pci/pcitest.c51
-rw-r--r--tools/pci/pcitest.sh15
-rw-r--r--tools/perf/Documentation/perf-list.txt10
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Makefile.config6
-rw-r--r--tools/perf/Makefile.perf10
-rw-r--r--tools/perf/arch/arm64/Makefile21
-rwxr-xr-xtools/perf/arch/arm64/entry/syscalls/mksyscalltbl62
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c10
-rw-r--r--tools/perf/arch/s390/util/kvm-stat.c2
-rw-r--r--tools/perf/builtin-c2c.c7
-rw-r--r--tools/perf/builtin-diff.c2
-rw-r--r--tools/perf/builtin-report.c4
-rw-r--r--tools/perf/builtin-stat.c60
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c19
-rwxr-xr-xtools/perf/check-headers.sh3
-rw-r--r--tools/perf/include/bpf/bpf.h3
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json87
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/extended.json18
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/extended.json56
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/transaction.json7
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/basic.json8
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json53
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/transaction.json7
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/extended.json24
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/basic.json12
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json16
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/extended.json35
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json7
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rw-r--r--tools/perf/tests/builtin-test.c2
-rw-r--r--tools/perf/tests/parse-events.c18
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh36
-rw-r--r--tools/perf/trace/beauty/Build1
-rw-r--r--tools/perf/trace/beauty/beauty.h3
-rwxr-xr-xtools/perf/trace/beauty/drm_ioctl.sh9
-rwxr-xr-xtools/perf/trace/beauty/kcmp_type.sh2
-rwxr-xr-xtools/perf/trace/beauty/kvm_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/madvise_behavior.sh2
-rwxr-xr-xtools/perf/trace/beauty/perf_ioctl.sh2
-rwxr-xr-xtools/perf/trace/beauty/pkey_alloc_access_rights.sh2
-rwxr-xr-xtools/perf/trace/beauty/sndrv_ctl_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/sndrv_pcm_ioctl.sh4
-rw-r--r--tools/perf/trace/beauty/socket.c28
-rwxr-xr-xtools/perf/trace/beauty/socket_ipproto.sh11
-rwxr-xr-xtools/perf/trace/beauty/vhost_virtio_ioctl.sh6
-rw-r--r--tools/perf/ui/stdio/hist.c8
-rw-r--r--tools/perf/util/bpf-loader.c4
-rw-r--r--tools/perf/util/comm.c16
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c10
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h1
-rw-r--r--tools/perf/util/cs-etm.c71
-rw-r--r--tools/perf/util/evsel.c25
-rw-r--r--tools/perf/util/evsel.h9
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/machine.c79
-rw-r--r--tools/perf/util/metricgroup.c26
-rw-r--r--tools/perf/util/metricgroup.h1
-rw-r--r--tools/perf/util/pmu.c6
-rw-r--r--tools/perf/util/stat-shadow.c5
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/unwind-libdw.c2
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
-rw-r--r--tools/testing/selftests/bpf/Makefile15
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h12
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h4
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c6
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h6
-rw-r--r--tools/testing/selftests/bpf/socket_cookie_prog.c60
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py12
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py16
-rw-r--r--tools/testing/selftests/bpf/test_align.c5
-rw-r--r--tools/testing/selftests/bpf/test_btf.c92
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c130
-rw-r--r--tools/testing/selftests/bpf/test_maps.c262
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py232
-rw-r--r--tools/testing/selftests/bpf/test_select_reuseport.c688
-rw-r--r--tools/testing/selftests/bpf/test_select_reuseport_common.h36
-rw-r--r--tools/testing/selftests/bpf/test_select_reuseport_kern.c180
-rwxr-xr-xtools/testing/selftests/bpf/test_skb_cgroup_id.sh62
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c47
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c187
-rw-r--r--tools/testing/selftests/bpf/test_sock.c5
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c42
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c225
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf.h1
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_kern.c17
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c119
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c177
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c48
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh217
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh197
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh189
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh233
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/router_scale.sh167
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh366
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh119
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh117
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh13
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh55
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh18
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh19
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh134
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c6
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/forwarding/README2
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_port_isolation.sh151
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh108
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath.sh253
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh291
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh132
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh126
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh283
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh285
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_lib.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_nh.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh21
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge.sh113
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_vlan.sh132
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_broadcast.sh233
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh39
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_chains.sh86
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_shblocks.sh2
-rwxr-xr-xtools/testing/selftests/net/ip6_gre_headroom.sh65
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh128
-rw-r--r--tools/testing/selftests/net/tls.c692
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh26
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh11
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh7
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh2
-rw-r--r--tools/testing/selftests/rseq/param_test.c44
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64.h594
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h513
-rw-r--r--tools/testing/selftests/rseq/rseq.h4
-rw-r--r--tools/testing/selftests/tc-testing/README16
-rw-r--r--tools/testing/selftests/tc-testing/config48
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json3
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/nat.json593
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json26
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json917
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/fw.json1049
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json4
-rw-r--r--tools/testing/selftests/timers/raw_skew.c5
-rw-r--r--virt/kvm/arm/aarch32.c20
-rw-r--r--virt/kvm/arm/arm.c4
-rw-r--r--virt/kvm/arm/psci.c2
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/kvm_main.c4
7051 files changed, 339123 insertions, 128656 deletions
diff --git a/.clang-format b/.clang-format
index faffc0d5af4e..1d5da22e0ba5 100644
--- a/.clang-format
+++ b/.clang-format
@@ -382,7 +382,7 @@ IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
#IndentPPDirectives: None # Unknown to clang-format-5.0
IndentWidth: 8
-IndentWrappedFunctionNames: true
+IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
diff --git a/.mailmap b/.mailmap
index a2f4c8595835..2a6f685bf706 100644
--- a/.mailmap
+++ b/.mailmap
@@ -83,6 +83,9 @@ Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
Jean Tourrilhes <jt@hpl.hp.com>
Jeff Garzik <jgarzik@pretzel.yyz.us>
+Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
+Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
+Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill
index e1ba4a104753..80151a409d67 100644
--- a/Documentation/ABI/stable/sysfs-class-rfkill
+++ b/Documentation/ABI/stable/sysfs-class-rfkill
@@ -11,7 +11,7 @@ KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org,
Description: The rfkill class subsystem folder.
Each registered rfkill driver is represented by an rfkillX
- subfolder (X being an integer > 0).
+ subfolder (X being an integer >= 0).
What: /sys/class/rfkill/rfkill[0-9]+/name
@@ -48,8 +48,8 @@ Contact: linux-wireless@vger.kernel.org
Description: Current state of the transmitter.
This file was scheduled to be removed in 2014, but due to its
large number of users it will be sticking around for a bit
- longer. Despite it being marked as stabe, the newer "hard" and
- "soft" interfaces should be preffered, since it is not possible
+ longer. Despite it being marked as stable, the newer "hard" and
+ "soft" interfaces should be preferred, since it is not possible
to express the 'soft and hard block' state of the rfkill driver
through this interface. There will likely be another attempt to
remove it in the future.
diff --git a/Documentation/ABI/testing/procfs-diskstats b/Documentation/ABI/testing/procfs-diskstats
index f91a973a37fe..abac31d216de 100644
--- a/Documentation/ABI/testing/procfs-diskstats
+++ b/Documentation/ABI/testing/procfs-diskstats
@@ -5,6 +5,7 @@ Description:
The /proc/diskstats file displays the I/O statistics
of block devices. Each line contains the following 14
fields:
+
1 - major number
2 - minor mumber
3 - device name
@@ -19,4 +20,13 @@ Description:
12 - I/Os currently in progress
13 - time spent doing I/Os (ms)
14 - weighted time spent doing I/Os (ms)
+
+ Kernel 4.18+ appends four more fields for discard
+ tracking putting the total at 18:
+
+ 15 - discards completed successfully
+ 16 - discards merged
+ 17 - sectors discarded
+ 18 - time spent discarding
+
For more details refer to Documentation/iostats.txt
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats b/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
new file mode 100644
index 000000000000..4b0318c99507
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
@@ -0,0 +1,122 @@
+==========================
+PCIe Device AER statistics
+==========================
+These attributes show up under all the devices that are AER capable. These
+statistical counters indicate the errors "as seen/reported by the device".
+Note that this may mean that if an endpoint is causing problems, the AER
+counters may increment at its link partner (e.g. root port) because the
+errors may be "seen" / reported by the link partner and not the
+problematic endpoint itself (which may report all counters as 0 as it never
+saw any problems).
+
+Where: /sys/bus/pci/devices/<dev>/aer_dev_correctable
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: List of correctable errors seen and reported by this
+ PCI device using ERR_COR. Note that since multiple errors may
+ be reported using a single ERR_COR message, thus
+ TOTAL_ERR_COR at the end of the file may not match the actual
+ total of all the errors in the file. Sample output:
+-------------------------------------------------------------------------
+localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_correctable
+Receiver Error 2
+Bad TLP 0
+Bad DLLP 0
+RELAY_NUM Rollover 0
+Replay Timer Timeout 0
+Advisory Non-Fatal 0
+Corrected Internal Error 0
+Header Log Overflow 0
+TOTAL_ERR_COR 2
+-------------------------------------------------------------------------
+
+Where: /sys/bus/pci/devices/<dev>/aer_dev_fatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: List of uncorrectable fatal errors seen and reported by this
+ PCI device using ERR_FATAL. Note that since multiple errors may
+ be reported using a single ERR_FATAL message, thus
+ TOTAL_ERR_FATAL at the end of the file may not match the actual
+ total of all the errors in the file. Sample output:
+-------------------------------------------------------------------------
+localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_fatal
+Undefined 0
+Data Link Protocol 0
+Surprise Down Error 0
+Poisoned TLP 0
+Flow Control Protocol 0
+Completion Timeout 0
+Completer Abort 0
+Unexpected Completion 0
+Receiver Overflow 0
+Malformed TLP 0
+ECRC 0
+Unsupported Request 0
+ACS Violation 0
+Uncorrectable Internal Error 0
+MC Blocked TLP 0
+AtomicOp Egress Blocked 0
+TLP Prefix Blocked Error 0
+TOTAL_ERR_FATAL 0
+-------------------------------------------------------------------------
+
+Where: /sys/bus/pci/devices/<dev>/aer_dev_nonfatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: List of uncorrectable nonfatal errors seen and reported by this
+ PCI device using ERR_NONFATAL. Note that since multiple errors
+ may be reported using a single ERR_FATAL message, thus
+ TOTAL_ERR_NONFATAL at the end of the file may not match the
+ actual total of all the errors in the file. Sample output:
+-------------------------------------------------------------------------
+localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_nonfatal
+Undefined 0
+Data Link Protocol 0
+Surprise Down Error 0
+Poisoned TLP 0
+Flow Control Protocol 0
+Completion Timeout 0
+Completer Abort 0
+Unexpected Completion 0
+Receiver Overflow 0
+Malformed TLP 0
+ECRC 0
+Unsupported Request 0
+ACS Violation 0
+Uncorrectable Internal Error 0
+MC Blocked TLP 0
+AtomicOp Egress Blocked 0
+TLP Prefix Blocked Error 0
+TOTAL_ERR_NONFATAL 0
+-------------------------------------------------------------------------
+
+============================
+PCIe Rootport AER statistics
+============================
+These attributes show up under only the rootports (or root complex event
+collectors) that are AER capable. These indicate the number of error messages as
+"reported to" the rootport. Please note that the rootports also transmit
+(internally) the ERR_* messages for errors seen by the internal rootport PCI
+device, so these counters include them and are thus cumulative of all the error
+messages on the PCI hierarchy originating at that root port.
+
+Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_cor
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: Total number of ERR_COR messages reported to rootport.
+
+Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_fatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: Total number of ERR_FATAL messages reported to rootport.
+
+Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_nonfatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: Total number of ERR_NONFATAL messages reported to rootport.
diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
index 0c0df91b1516..978b76358661 100644
--- a/Documentation/ABI/testing/sysfs-class-net-queues
+++ b/Documentation/ABI/testing/sysfs-class-net-queues
@@ -42,6 +42,17 @@ Description:
network device transmit queue. Possible vaules depend on the
number of available CPU(s) in the system.
+What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
+Date: June 2018
+KernelVersion: 4.18.0
+Contact: netdev@vger.kernel.org
+Description:
+ Mask of the receive queue(s) currently enabled to participate
+ into the Transmit Packet Steering packet processing flow for this
+ network device transmit queue. Possible values depend on the
+ number of available receive queue(s) in the network device.
+ Default is disabled.
+
What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
Date: November 2011
KernelVersion: 3.3
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 9c5e7732d249..73318225a368 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -476,6 +476,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ /sys/devices/system/cpu/vulnerabilities/l1tf
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
@@ -487,3 +488,26 @@ Description: Information about CPU vulnerabilities
"Not affected" CPU is not affected by the vulnerability
"Vulnerable" CPU is affected and no mitigation in effect
"Mitigation: $M" CPU is affected and mitigation $M is in effect
+
+ Details about the l1tf file can be found in
+ Documentation/admin-guide/l1tf.rst
+
+What: /sys/devices/system/cpu/smt
+ /sys/devices/system/cpu/smt/active
+ /sys/devices/system/cpu/smt/control
+Date: June 2018
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: Control Symetric Multi Threading (SMT)
+
+ active: Tells whether SMT is active (enabled and siblings online)
+
+ control: Read/write interface to control SMT. Possible
+ values:
+
+ "on" SMT is enabled
+ "off" SMT is disabled
+ "forceoff" SMT is force disabled. Cannot be changed.
+ "notsupported" SMT is not supported by the CPU
+
+ If control status is "forceoff" or "notsupported" writes
+ are rejected.
diff --git a/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator b/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator
new file mode 100644
index 000000000000..4d63a7904b94
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator
@@ -0,0 +1,27 @@
+What: /sys/bus/i2c/devices/.../bd9571mwv-regulator.*.auto/backup_mode
+Date: Jul 2018
+KernelVersion: 4.19
+Contact: Geert Uytterhoeven <geert+renesas@glider.be>
+Description: Read/write the current state of DDR Backup Mode, which controls
+ if DDR power rails will be kept powered during system suspend.
+ ("on"/"1" = enabled, "off"/"0" = disabled).
+ Two types of power switches (or control signals) can be used:
+ A. With a momentary power switch (or pulse signal), DDR
+ Backup Mode is enabled by default when available, as the
+ PMIC will be configured only during system suspend.
+ B. With a toggle power switch (or level signal), the
+ following steps must be followed exactly:
+ 1. Configure PMIC for backup mode, to change the role of
+ the accessory power switch from a power switch to a
+ wake-up switch,
+ 2. Switch accessory power switch off, to prepare for
+ system suspend, which is a manual step not controlled
+ by software,
+ 3. Suspend system,
+ 4. Switch accessory power switch on, to resume the
+ system.
+ DDR Backup Mode must be explicitly enabled by the user,
+ to invoke step 1.
+ See also Documentation/devicetree/bindings/mfd/bd9571mwv.txt.
+Users: User space applications for embedded boards equipped with a
+ BD9571MWV PMIC.
diff --git a/Documentation/PCI/00-INDEX b/Documentation/PCI/00-INDEX
index 00c9a90b6f38..206b1d5c1e71 100644
--- a/Documentation/PCI/00-INDEX
+++ b/Documentation/PCI/00-INDEX
@@ -1,5 +1,7 @@
00-INDEX
- this file
+acpi-info.txt
+ - info on how PCI host bridges are represented in ACPI
MSI-HOWTO.txt
- the Message Signaled Interrupts (MSI) Driver Guide HOWTO and FAQ.
PCIEBUS-HOWTO.txt
diff --git a/Documentation/PCI/acpi-info.txt b/Documentation/PCI/acpi-info.txt
new file mode 100644
index 000000000000..3ffa3b03970e
--- /dev/null
+++ b/Documentation/PCI/acpi-info.txt
@@ -0,0 +1,187 @@
+ ACPI considerations for PCI host bridges
+
+The general rule is that the ACPI namespace should describe everything the
+OS might use unless there's another way for the OS to find it [1, 2].
+
+For example, there's no standard hardware mechanism for enumerating PCI
+host bridges, so the ACPI namespace must describe each host bridge, the
+method for accessing PCI config space below it, the address space windows
+the host bridge forwards to PCI (using _CRS), and the routing of legacy
+INTx interrupts (using _PRT).
+
+PCI devices, which are below the host bridge, generally do not need to be
+described via ACPI. The OS can discover them via the standard PCI
+enumeration mechanism, using config accesses to discover and identify
+devices and read and size their BARs. However, ACPI may describe PCI
+devices if it provides power management or hotplug functionality for them
+or if the device has INTx interrupts connected by platform interrupt
+controllers and a _PRT is needed to describe those connections.
+
+ACPI resource description is done via _CRS objects of devices in the ACPI
+namespace [2].   The _CRS is like a generalized PCI BAR: the OS can read
+_CRS and figure out what resource is being consumed even if it doesn't have
+a driver for the device [3].  That's important because it means an old OS
+can work correctly even on a system with new devices unknown to the OS.
+The new devices might not do anything, but the OS can at least make sure no
+resources conflict with them.
+
+Static tables like MCFG, HPET, ECDT, etc., are *not* mechanisms for
+reserving address space. The static tables are for things the OS needs to
+know early in boot, before it can parse the ACPI namespace. If a new table
+is defined, an old OS needs to operate correctly even though it ignores the
+table. _CRS allows that because it is generic and understood by the old
+OS; a static table does not.
+
+If the OS is expected to manage a non-discoverable device described via
+ACPI, that device will have a specific _HID/_CID that tells the OS what
+driver to bind to it, and the _CRS tells the OS and the driver where the
+device's registers are.
+
+PCI host bridges are PNP0A03 or PNP0A08 devices.  Their _CRS should
+describe all the address space they consume.  This includes all the windows
+they forward down to the PCI bus, as well as registers of the host bridge
+itself that are not forwarded to PCI.  The host bridge registers include
+things like secondary/subordinate bus registers that determine the bus
+range below the bridge, window registers that describe the apertures, etc.
+These are all device-specific, non-architected things, so the only way a
+PNP0A03/PNP0A08 driver can manage them is via _PRS/_CRS/_SRS, which contain
+the device-specific details.  The host bridge registers also include ECAM
+space, since it is consumed by the host bridge.
+
+ACPI defines a Consumer/Producer bit to distinguish the bridge registers
+("Consumer") from the bridge apertures ("Producer") [4, 5], but early
+BIOSes didn't use that bit correctly. The result is that the current ACPI
+spec defines Consumer/Producer only for the Extended Address Space
+descriptors; the bit should be ignored in the older QWord/DWord/Word
+Address Space descriptors. Consequently, OSes have to assume all
+QWord/DWord/Word descriptors are windows.
+
+Prior to the addition of Extended Address Space descriptors, the failure of
+Consumer/Producer meant there was no way to describe bridge registers in
+the PNP0A03/PNP0A08 device itself. The workaround was to describe the
+bridge registers (including ECAM space) in PNP0C02 catch-all devices [6].
+With the exception of ECAM, the bridge register space is device-specific
+anyway, so the generic PNP0A03/PNP0A08 driver (pci_root.c) has no need to
+know about it.  
+
+New architectures should be able to use "Consumer" Extended Address Space
+descriptors in the PNP0A03 device for bridge registers, including ECAM,
+although a strict interpretation of [6] might prohibit this. Old x86 and
+ia64 kernels assume all address space descriptors, including "Consumer"
+Extended Address Space ones, are windows, so it would not be safe to
+describe bridge registers this way on those architectures.
+
+PNP0C02 "motherboard" devices are basically a catch-all.  There's no
+programming model for them other than "don't use these resources for
+anything else."  So a PNP0C02 _CRS should claim any address space that is
+(1) not claimed by _CRS under any other device object in the ACPI namespace
+and (2) should not be assigned by the OS to something else.
+
+The PCIe spec requires the Enhanced Configuration Access Method (ECAM)
+unless there's a standard firmware interface for config access, e.g., the
+ia64 SAL interface [7]. A host bridge consumes ECAM memory address space
+and converts memory accesses into PCI configuration accesses. The spec
+defines the ECAM address space layout and functionality; only the base of
+the address space is device-specific. An ACPI OS learns the base address
+from either the static MCFG table or a _CBA method in the PNP0A03 device.
+
+The MCFG table must describe the ECAM space of non-hot pluggable host
+bridges [8]. Since MCFG is a static table and can't be updated by hotplug,
+a _CBA method in the PNP0A03 device describes the ECAM space of a
+hot-pluggable host bridge [9]. Note that for both MCFG and _CBA, the base
+address always corresponds to bus 0, even if the bus range below the bridge
+(which is reported via _CRS) doesn't start at 0.
+
+
+[1] ACPI 6.2, sec 6.1:
+ For any device that is on a non-enumerable type of bus (for example, an
+ ISA bus), OSPM enumerates the devices' identifier(s) and the ACPI
+ system firmware must supply an _HID object ... for each device to
+ enable OSPM to do that.
+
+[2] ACPI 6.2, sec 3.7:
+ The OS enumerates motherboard devices simply by reading through the
+ ACPI Namespace looking for devices with hardware IDs.
+
+ Each device enumerated by ACPI includes ACPI-defined objects in the
+ ACPI Namespace that report the hardware resources the device could
+ occupy [_PRS], an object that reports the resources that are currently
+ used by the device [_CRS], and objects for configuring those resources
+ [_SRS]. The information is used by the Plug and Play OS (OSPM) to
+ configure the devices.
+
+[3] ACPI 6.2, sec 6.2:
+ OSPM uses device configuration objects to configure hardware resources
+ for devices enumerated via ACPI. Device configuration objects provide
+ information about current and possible resource requirements, the
+ relationship between shared resources, and methods for configuring
+ hardware resources.
+
+ When OSPM enumerates a device, it calls _PRS to determine the resource
+ requirements of the device. It may also call _CRS to find the current
+ resource settings for the device. Using this information, the Plug and
+ Play system determines what resources the device should consume and
+ sets those resources by calling the device’s _SRS control method.
+
+ In ACPI, devices can consume resources (for example, legacy keyboards),
+ provide resources (for example, a proprietary PCI bridge), or do both.
+ Unless otherwise specified, resources for a device are assumed to be
+ taken from the nearest matching resource above the device in the device
+ hierarchy.
+
+[4] ACPI 6.2, sec 6.4.3.5.1, 2, 3, 4:
+ QWord/DWord/Word Address Space Descriptor (.1, .2, .3)
+ General Flags: Bit [0] Ignored
+
+ Extended Address Space Descriptor (.4)
+ General Flags: Bit [0] Consumer/Producer:
+ 1–This device consumes this resource
+ 0–This device produces and consumes this resource
+
+[5] ACPI 6.2, sec 19.6.43:
+ ResourceUsage specifies whether the Memory range is consumed by
+ this device (ResourceConsumer) or passed on to child devices
+ (ResourceProducer). If nothing is specified, then
+ ResourceConsumer is assumed.
+
+[6] PCI Firmware 3.2, sec 4.1.2:
+ If the operating system does not natively comprehend reserving the
+ MMCFG region, the MMCFG region must be reserved by firmware. The
+ address range reported in the MCFG table or by _CBA method (see Section
+ 4.1.3) must be reserved by declaring a motherboard resource. For most
+ systems, the motherboard resource would appear at the root of the ACPI
+ namespace (under \_SB) in a node with a _HID of EISAID (PNP0C02), and
+ the resources in this case should not be claimed in the root PCI bus’s
+ _CRS. The resources can optionally be returned in Int15 E820 or
+ EFIGetMemoryMap as reserved memory but must always be reported through
+ ACPI as a motherboard resource.
+
+[7] PCI Express 4.0, sec 7.2.2:
+ For systems that are PC-compatible, or that do not implement a
+ processor-architecture-specific firmware interface standard that allows
+ access to the Configuration Space, the ECAM is required as defined in
+ this section.
+
+[8] PCI Firmware 3.2, sec 4.1.2:
+ The MCFG table is an ACPI table that is used to communicate the base
+ addresses corresponding to the non-hot removable PCI Segment Groups
+ range within a PCI Segment Group available to the operating system at
+ boot. This is required for the PC-compatible systems.
+
+ The MCFG table is only used to communicate the base addresses
+ corresponding to the PCI Segment Groups available to the system at
+ boot.
+
+[9] PCI Firmware 3.2, sec 4.1.3:
+ The _CBA (Memory mapped Configuration Base Address) control method is
+ an optional ACPI object that returns the 64-bit memory mapped
+ configuration base address for the hot plug capable host bridge. The
+ base address returned by _CBA is processor-relative address. The _CBA
+ control method evaluates to an Integer.
+
+ This control method appears under a host bridge object. When the _CBA
+ method appears under an active host bridge object, the operating system
+ evaluates this structure to identify the memory mapped configuration
+ base address corresponding to the PCI Segment Group for the bus number
+ range specified in _CRS method. An ACPI name space object that contains
+ the _CBA method must also contain a corresponding _SEG method.
diff --git a/Documentation/PCI/endpoint/function/binding/pci-test.txt b/Documentation/PCI/endpoint/function/binding/pci-test.txt
index 3b68b955fb50..cd76ba47394b 100644
--- a/Documentation/PCI/endpoint/function/binding/pci-test.txt
+++ b/Documentation/PCI/endpoint/function/binding/pci-test.txt
@@ -15,3 +15,5 @@ subsys_id : don't care
interrupt_pin : Should be 1 - INTA, 2 - INTB, 3 - INTC, 4 -INTD
msi_interrupts : Should be 1 to 32 depending on the number of MSI interrupts
to test
+msix_interrupts : Should be 1 to 2048 depending on the number of MSI-X
+ interrupts to test
diff --git a/Documentation/PCI/endpoint/pci-endpoint.txt b/Documentation/PCI/endpoint/pci-endpoint.txt
index 9b1d66829290..e86a96b66a6a 100644
--- a/Documentation/PCI/endpoint/pci-endpoint.txt
+++ b/Documentation/PCI/endpoint/pci-endpoint.txt
@@ -44,7 +44,7 @@ by the PCI controller driver.
* clear_bar: ops to reset the BAR
* alloc_addr_space: ops to allocate in PCI controller address space
* free_addr_space: ops to free the allocated address space
- * raise_irq: ops to raise a legacy or MSI interrupt
+ * raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
* start: ops to start the PCI link
* stop: ops to stop the PCI link
@@ -96,7 +96,7 @@ by the PCI endpoint function driver.
*) pci_epc_raise_irq()
The PCI endpoint function driver should use pci_epc_raise_irq() to raise
- Legacy Interrupt or MSI Interrupt.
+ Legacy Interrupt, MSI or MSI-X Interrupt.
*) pci_epc_mem_alloc_addr()
diff --git a/Documentation/PCI/endpoint/pci-test-function.txt b/Documentation/PCI/endpoint/pci-test-function.txt
index 0c519c9bf94a..5916f1f592bb 100644
--- a/Documentation/PCI/endpoint/pci-test-function.txt
+++ b/Documentation/PCI/endpoint/pci-test-function.txt
@@ -20,6 +20,8 @@ The PCI endpoint test device has the following registers:
5) PCI_ENDPOINT_TEST_DST_ADDR
6) PCI_ENDPOINT_TEST_SIZE
7) PCI_ENDPOINT_TEST_CHECKSUM
+ 8) PCI_ENDPOINT_TEST_IRQ_TYPE
+ 9) PCI_ENDPOINT_TEST_IRQ_NUMBER
*) PCI_ENDPOINT_TEST_MAGIC
@@ -34,10 +36,10 @@ that the endpoint device must perform.
Bitfield Description:
Bit 0 : raise legacy IRQ
Bit 1 : raise MSI IRQ
- Bit 2 - 7 : MSI interrupt number
- Bit 8 : read command (read data from RC buffer)
- Bit 9 : write command (write data to RC buffer)
- Bit 10 : copy command (copy data from one RC buffer to another
+ Bit 2 : raise MSI-X IRQ
+ Bit 3 : read command (read data from RC buffer)
+ Bit 4 : write command (write data to RC buffer)
+ Bit 5 : copy command (copy data from one RC buffer to another
RC buffer)
*) PCI_ENDPOINT_TEST_STATUS
@@ -64,3 +66,22 @@ COPY/READ command.
This register contains the destination address (RC buffer address) for
the COPY/WRITE command.
+
+*) PCI_ENDPOINT_TEST_IRQ_TYPE
+
+This register contains the interrupt type (Legacy/MSI) triggered
+for the READ/WRITE/COPY and raise IRQ (Legacy/MSI) commands.
+
+Possible types:
+ - Legacy : 0
+ - MSI : 1
+ - MSI-X : 2
+
+*) PCI_ENDPOINT_TEST_IRQ_NUMBER
+
+This register contains the triggered ID interrupt.
+
+Admissible values:
+ - Legacy : 0
+ - MSI : [1 .. 32]
+ - MSI-X : [1 .. 2048]
diff --git a/Documentation/PCI/endpoint/pci-test-howto.txt b/Documentation/PCI/endpoint/pci-test-howto.txt
index 75f48c3bb191..e40cf0fb58d7 100644
--- a/Documentation/PCI/endpoint/pci-test-howto.txt
+++ b/Documentation/PCI/endpoint/pci-test-howto.txt
@@ -45,9 +45,9 @@ The PCI endpoint framework populates the directory with the following
configurable fields.
# ls functions/pci_epf_test/func1
- baseclass_code interrupt_pin revid subsys_vendor_id
- cache_line_size msi_interrupts subclass_code vendorid
- deviceid progif_code subsys_id
+ baseclass_code interrupt_pin progif_code subsys_id
+ cache_line_size msi_interrupts revid subsys_vendorid
+ deviceid msix_interrupts subclass_code vendorid
The PCI endpoint function driver populates these entries with default values
when the device is bound to the driver. The pci-epf-test driver populates
@@ -67,6 +67,7 @@ device, the following commands can be used.
# echo 0x104c > functions/pci_epf_test/func1/vendorid
# echo 0xb500 > functions/pci_epf_test/func1/deviceid
# echo 16 > functions/pci_epf_test/func1/msi_interrupts
+ # echo 8 > functions/pci_epf_test/func1/msix_interrupts
1.5 Binding pci-epf-test Device to EP Controller
@@ -120,7 +121,9 @@ following commands.
Interrupt tests
+ SET IRQ TYPE TO LEGACY: OKAY
LEGACY IRQ: NOT OKAY
+ SET IRQ TYPE TO MSI: OKAY
MSI1: OKAY
MSI2: OKAY
MSI3: OKAY
@@ -153,9 +156,30 @@ following commands.
MSI30: NOT OKAY
MSI31: NOT OKAY
MSI32: NOT OKAY
+ SET IRQ TYPE TO MSI-X: OKAY
+ MSI-X1: OKAY
+ MSI-X2: OKAY
+ MSI-X3: OKAY
+ MSI-X4: OKAY
+ MSI-X5: OKAY
+ MSI-X6: OKAY
+ MSI-X7: OKAY
+ MSI-X8: OKAY
+ MSI-X9: NOT OKAY
+ MSI-X10: NOT OKAY
+ MSI-X11: NOT OKAY
+ MSI-X12: NOT OKAY
+ MSI-X13: NOT OKAY
+ MSI-X14: NOT OKAY
+ MSI-X15: NOT OKAY
+ MSI-X16: NOT OKAY
+ [...]
+ MSI-X2047: NOT OKAY
+ MSI-X2048: NOT OKAY
Read Tests
+ SET IRQ TYPE TO MSI: OKAY
READ ( 1 bytes): OKAY
READ ( 1024 bytes): OKAY
READ ( 1025 bytes): OKAY
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index acd0dddd6bb8..48ce7903e3c6 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -73,6 +73,11 @@ In the example, 'Requester ID' means the ID of the device who sends
the error message to root port. Pls. refer to pci express specs for
other fields.
+2.4 AER Statistics / Counters
+
+When PCIe AER errors are captured, the counters / statistics are also exposed
+in the form of sysfs attributes which are documented at
+Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
3. Developer Guide
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index 6c06e10bd04b..f5120a00f511 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -380,31 +380,26 @@ and therefore need no protection.
as follows:
<pre>
- 1 unsigned long gpnum;
- 2 unsigned long completed;
+ 1 unsigned long gp_seq;
</pre>
<p>RCU grace periods are numbered, and
-the <tt>-&gt;gpnum</tt> field contains the number of the grace
-period that started most recently.
-The <tt>-&gt;completed</tt> field contains the number of the
-grace period that completed most recently.
-If the two fields are equal, the RCU grace period that most recently
-started has already completed, and therefore the corresponding
-flavor of RCU is idle.
-If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
-then <tt>-&gt;gpnum</tt> gives the number of the current RCU
-grace period, which has not yet completed.
-Any other combination of values indicates that something is broken.
-These two fields are protected by the root <tt>rcu_node</tt>'s
+the <tt>-&gt;gp_seq</tt> field contains the current grace-period
+sequence number.
+The bottom two bits are the state of the current grace period,
+which can be zero for not yet started or one for in progress.
+In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
+zero, the corresponding flavor of RCU is idle.
+Any other value in the bottom two bits indicates that something is broken.
+This field is protected by the root <tt>rcu_node</tt> structure's
<tt>-&gt;lock</tt> field.
-</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+</p><p>There are <tt>-&gt;gp_seq</tt> fields
in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
as well.
The fields in the <tt>rcu_state</tt> structure represent the
-most current values, and those of the other structures are compared
-in order to detect the start of a new grace period in a distributed
+most current value, and those of the other structures are compared
+in order to detect the beginnings and ends of grace periods in a distributed
fashion.
The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
(down the tree from the root to the leaves) to <tt>rcu_data</tt>.
@@ -512,27 +507,47 @@ than to be heisenbugged out of existence.
as follows:
<pre>
- 1 unsigned long gpnum;
- 2 unsigned long completed;
+ 1 unsigned long gp_seq;
+ 2 unsigned long gp_seq_needed;
</pre>
-<p>These fields are the counterparts of the fields of the same name in
-the <tt>rcu_state</tt> structure.
-They each may lag up to one behind their <tt>rcu_state</tt>
-counterparts.
-If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+<p>The <tt>rcu_node</tt> structures' <tt>-&gt;gp_seq</tt> fields are
+the counterparts of the field of the same name in the <tt>rcu_state</tt>
+structure.
+They each may lag up to one step behind their <tt>rcu_state</tt>
+counterpart.
+If the bottom two bits of a given <tt>rcu_node</tt> structure's
+<tt>-&gt;gp_seq</tt> field is zero, then this <tt>rcu_node</tt>
structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_node</tt> believes
-is still being waited for.
+</p><p>The <tt>&gt;gp_seq</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning and the end
+of each grace period.
+
+<p>The <tt>-&gt;gp_seq_needed</tt> fields record the
+furthest-in-the-future grace period request seen by the corresponding
+<tt>rcu_node</tt> structure. The request is considered fulfilled when
+the value of the <tt>-&gt;gp_seq</tt> field equals or exceeds that of
+the <tt>-&gt;gp_seq_needed</tt> field.
-</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
-structure is updated at the beginning
-of each grace period, and the <tt>-&gt;completed</tt> fields are
-updated at the end of each grace period.
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ Suppose that this <tt>rcu_node</tt> structure doesn't see
+ a request for a very long time.
+ Won't wrapping of the <tt>-&gt;gp_seq</tt> field cause
+ problems?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ No, because if the <tt>-&gt;gp_seq_needed</tt> field lags behind the
+ <tt>-&gt;gp_seq</tt> field, the <tt>-&gt;gp_seq_needed</tt> field
+ will be updated at the end of the grace period.
+ Modulo-arithmetic comparisons therefore will always get the
+ correct answer, even with wrapping.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
<h5>Quiescent-State Tracking</h5>
@@ -626,9 +641,8 @@ normal and expedited grace periods, respectively.
</ol>
<p><font color="ffffff">So the locking is absolutely required in
- order to coordinate
- clearing of the bits with the grace-period numbers in
- <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt>.
+ order to coordinate clearing of the bits with updating of the
+ grace-period sequence number in <tt>-&gt;gp_seq</tt>.
</font></td></tr>
<tr><td>&nbsp;</td></tr>
</table>
@@ -1038,15 +1052,15 @@ out any <tt>rcu_data</tt> structure for which this flag is not set.
as follows:
<pre>
- 1 unsigned long completed;
- 2 unsigned long gpnum;
+ 1 unsigned long gp_seq;
+ 2 unsigned long gp_seq_needed;
3 bool cpu_no_qs;
4 bool core_needs_qs;
5 bool gpwrap;
6 unsigned long rcu_qs_ctr_snap;
</pre>
-<p>The <tt>completed</tt> and <tt>gpnum</tt>
+<p>The <tt>-&gt;gp_seq</tt> and <tt>-&gt;gp_seq_needed</tt>
fields are the counterparts of the fields of the same name
in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
They may each lag up to one behind their <tt>rcu_node</tt>
@@ -1054,15 +1068,9 @@ counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
will catch up upon exit from dyntick-idle mode).
-If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+If the lower two bits of a given <tt>rcu_data</tt> structure's
+<tt>-&gt;gp_seq</tt> are zero, then this <tt>rcu_data</tt>
structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
-structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_data</tt> believes
-is still being waited for.
<table>
<tr><th>&nbsp;</th></tr>
@@ -1070,13 +1078,13 @@ is still being waited for.
<tr><td>
All this replication of the grace period numbers can only cause
massive confusion.
- Why not just keep a global pair of counters and be done with it???
+ Why not just keep a global sequence number and be done with it???
</td></tr>
<tr><th align="left">Answer:</th></tr>
<tr><td bgcolor="#ffffff"><font color="ffffff">
- Because if there was only a single global pair of grace-period
+ Because if there was only a single global sequence
numbers, there would need to be a single global lock to allow
- safely accessing and updating them.
+ safely accessing and updating it.
And if we are not going to have a single global lock, we need
to carefully manage the numbers on a per-node basis.
Recall from the answer to a previous Quick Quiz that the consequences
@@ -1091,8 +1099,8 @@ CPU has not yet passed through a quiescent state,
while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
RCU core needs a quiescent state from the corresponding CPU.
The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
-CPU has remained idle for so long that the <tt>completed</tt>
-and <tt>gpnum</tt> counters are in danger of overflow, which
+CPU has remained idle for so long that the
+<tt>gp_seq</tt> counter is in danger of overflow, which
will cause the CPU to disregard the values of its counters on
its next exit from idle.
Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
@@ -1130,10 +1138,10 @@ The CPU advances the callbacks in its <tt>rcu_data</tt> structure
whenever it notices that another RCU grace period has completed.
The CPU detects the completion of an RCU grace period by noticing
that the value of its <tt>rcu_data</tt> structure's
-<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>-&gt;gp_seq</tt> field differs from that of its leaf
<tt>rcu_node</tt> structure.
Recall that each <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field is updated at the end of each
+<tt>-&gt;gp_seq</tt> field is updated at the beginnings and ends of each
grace period.
<p>
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
index 8651b0b4fd79..a346ce0116eb 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -357,7 +357,7 @@ parts, starting in this section with the various phases of
grace-period initialization.
<p>The first ordering-related grace-period initialization action is to
-increment the <tt>rcu_state</tt> structure's <tt>-&gt;gpnum</tt>
+advance the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt>
grace-period-number counter, as shown below:
</p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -388,7 +388,7 @@ its last CPU and if the next <tt>rcu_node</tt> structure has no online CPUs).
<p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt>
tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
-<tt>-&gt;gpnum</tt> field to the newly incremented value from the
+<tt>-&gt;gp_seq</tt> field to the newly advanced value from the
<tt>rcu_state</tt> structure, as shown in the following diagram.
</p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -398,9 +398,9 @@ tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
to notice that a new grace period has started, as described in the next
section.
But because the grace-period kthread started the grace period at the
-root (with the increment of the <tt>rcu_state</tt> structure's
-<tt>-&gt;gpnum</tt> field) before setting each leaf <tt>rcu_node</tt>
-structure's <tt>-&gt;gpnum</tt> field, each CPU's observation of
+root (with the advancing of the <tt>rcu_state</tt> structure's
+<tt>-&gt;gp_seq</tt> field) before setting each leaf <tt>rcu_node</tt>
+structure's <tt>-&gt;gp_seq</tt> field, each CPU's observation of
the start of the grace period will happen after the actual start
of the grace period.
@@ -466,7 +466,7 @@ section that the grace period must wait on.
<tr><td>
But a RCU read-side critical section might have started
after the beginning of the grace period
- (the <tt>-&gt;gpnum++</tt> from earlier), so why should
+ (the advancing of <tt>-&gt;gp_seq</tt> from earlier), so why should
the grace period wait on such a critical section?
</td></tr>
<tr><th align="left">Answer:</th></tr>
@@ -609,10 +609,8 @@ states outstanding from other CPUs.
<h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4>
<p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree
-breadth-first setting all the <tt>-&gt;completed</tt> fields equal
-to the number of the newly completed grace period, then it sets
-the <tt>rcu_state</tt> structure's <tt>-&gt;completed</tt> field,
-again to the number of the newly completed grace period.
+breadth-first advancing all the <tt>-&gt;gp_seq</tt> fields, then it
+advances the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt> field.
The ordering effects are shown below:
</p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%">
@@ -634,7 +632,7 @@ grace-period cleanup is complete, the next grace period can begin.
CPU has reported its quiescent state, but it may be some
milliseconds before RCU becomes aware of this.
The latest reasonable candidate is once the <tt>rcu_state</tt>
- structure's <tt>-&gt;completed</tt> field has been updated,
+ structure's <tt>-&gt;gp_seq</tt> field has been updated,
but it is quite possible that some CPUs have already completed
phase two of their updates by that time.
In short, if you are going to work with RCU, you need to
@@ -647,7 +645,7 @@ grace-period cleanup is complete, the next grace period can begin.
<h4><a name="Callback Invocation">Callback Invocation</a></h4>
<p>Once a given CPU's leaf <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field has been updated, that CPU can begin
+<tt>-&gt;gp_seq</tt> field has been updated, that CPU can begin
invoking its RCU callbacks that were waiting for this grace period
to end.
These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
index 754f426b297a..bf84fbab27ee 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
@@ -384,11 +384,11 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.70710678"
- inkscape:cx="617.89017"
- inkscape:cy="542.52419"
- inkscape:window-x="86"
- inkscape:window-y="28"
+ inkscape:zoom="0.78716603"
+ inkscape:cx="513.06403"
+ inkscape:cy="623.1214"
+ inkscape:window-x="102"
+ inkscape:window-y="38"
inkscape:window-maximized="0"
inkscape:current-layer="g3188-3"
fit-margin-top="5"
@@ -417,13 +417,15 @@
id="g3188">
<text
xml:space="preserve"
- x="3199.1516"
+ x="3145.9592"
y="13255.592"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3143">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
<g
id="g3107"
transform="translate(947.90548,11584.029)">
@@ -502,13 +504,15 @@
</g>
<text
xml:space="preserve"
- x="5324.5371"
- y="15414.598"
+ x="5264.4731"
+ y="15428.84"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-753"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -547,15 +551,6 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-6-0">Leaf</tspan></text>
- <text
- xml:space="preserve"
- x="7479.5796"
- y="17699.943"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-9"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<path
sodipodi:nodetypes="cc"
inkscape:connector-curvature="0"
@@ -566,15 +561,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(-737.93887,7732.6672)"
id="g3188-3">
- <text
- xml:space="preserve"
- x="3225.7478"
- y="13175.802"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;completed =</text>
<g
id="g3107-62"
transform="translate(947.90548,11584.029)">
@@ -607,15 +593,6 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-7">Root</tspan></text>
- <text
- xml:space="preserve"
- x="3225.7478"
- y="13390.038"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
<flowRoot
xml:space="preserve"
id="flowRoot3356"
@@ -627,7 +604,18 @@
height="63.63961"
x="332.34018"
y="681.87292" /></flowRegion><flowPara
- id="flowPara3362" /></flowRoot> </g>
+ id="flowPara3362" /></flowRoot> <text
+ xml:space="preserve"
+ x="3156.6121"
+ y="13317.754"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+ </g>
<g
style="fill:none;stroke-width:0.025in"
transform="translate(-858.40227,7769.0342)"
@@ -859,6 +847,17 @@
id="path3414-8-3-6-6"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ x="7418.769"
+ y="17646.104"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-70"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-93">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-1642.5377,-11611.245)"
@@ -887,13 +886,15 @@
</g>
<text
xml:space="preserve"
- x="5327.3057"
+ x="5274.1133"
y="15428.84"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-36"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-151.71746,-11647.612)"
@@ -972,13 +973,15 @@
id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7486.4907"
- y="17670.119"
+ x="7408.5918"
+ y="17619.504"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-6817.1997,-11647.612)"
@@ -1019,13 +1022,15 @@
id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7474.1382"
- y="17688.926"
+ x="7416.8003"
+ y="17619.504"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-56">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -1059,15 +1064,6 @@
id="path3414-8-3-6"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- x="7318.9653"
- y="6031.6353"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-2"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<g
style="fill:none;stroke-width:0.025in"
id="g4504-3-9"
@@ -1123,4 +1119,15 @@
id="path3134-9-0-3-5"
d="m 6875.6003,15833.906 1595.7755,0"
style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
+ <text
+ xml:space="preserve"
+ x="7275.2612"
+ y="5971.8916"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-2">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
index 0161262904ec..8c207550818f 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
@@ -272,13 +272,13 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.70710678"
- inkscape:cx="617.89019"
- inkscape:cy="636.57143"
- inkscape:window-x="697"
+ inkscape:zoom="2.6330492"
+ inkscape:cx="524.82797"
+ inkscape:cy="519.31194"
+ inkscape:window-x="79"
inkscape:window-y="28"
inkscape:window-maximized="0"
- inkscape:current-layer="svg2"
+ inkscape:current-layer="g3188"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
@@ -305,13 +305,15 @@
id="g3188">
<text
xml:space="preserve"
- x="3305.5364"
+ x="3119.363"
y="13255.592"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
<g
id="g3107"
transform="translate(947.90548,11584.029)">
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
index de6ecc51b00e..d24d7d555dbc 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
@@ -19,7 +19,7 @@
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
- sodipodi:docname="TreeRCU-gp-init-2.svg">
+ sodipodi:docname="TreeRCU-gp-init-3.svg">
<metadata
id="metadata212">
<rdf:RDF>
@@ -257,18 +257,22 @@
inkscape:window-width="1087"
inkscape:window-height="1144"
id="namedview208"
- showgrid="false"
- inkscape:zoom="0.70710678"
+ showgrid="true"
+ inkscape:zoom="0.68224756"
inkscape:cx="617.89019"
inkscape:cy="625.84293"
- inkscape:window-x="697"
+ inkscape:window-x="54"
inkscape:window-y="28"
inkscape:window-maximized="0"
- inkscape:current-layer="svg2"
+ inkscape:current-layer="g3153"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
- fit-margin-bottom="5" />
+ fit-margin-bottom="5">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3090" />
+ </sodipodi:namedview>
<path
sodipodi:nodetypes="cccccccccccccccccccccccc"
inkscape:connector-curvature="0"
@@ -281,13 +285,13 @@
id="g3188">
<text
xml:space="preserve"
- x="3305.5364"
+ x="3145.9592"
y="13255.592"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
<g
id="g3107"
transform="translate(947.90548,11584.029)">
@@ -366,13 +370,13 @@
</g>
<text
xml:space="preserve"
- x="5392.3345"
- y="15407.104"
+ x="5253.6904"
+ y="15407.032"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -413,13 +417,13 @@
id="tspan3104-6-5-6-0">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7536.4883"
- y="17640.934"
+ x="7415.4365"
+ y="17670.572"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-9"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-1642.5375,-11610.962)"
@@ -448,13 +452,13 @@
</g>
<text
xml:space="preserve"
- x="5378.4146"
- y="15436.927"
+ x="5258.0688"
+ y="15412.313"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-151.71726,-11647.329)"
@@ -533,13 +537,13 @@
id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7520.1294"
- y="17673.639"
+ x="7405.2607"
+ y="17670.572"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-35"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-6817.1998,-11647.329)"
@@ -580,13 +584,13 @@
id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7521.4663"
- y="17666.062"
+ x="7413.4688"
+ y="17670.566"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-75"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -622,11 +626,11 @@
sodipodi:nodetypes="cc" />
<text
xml:space="preserve"
- x="7370.856"
- y="5997.5972"
+ x="7271.9297"
+ y="6023.2412"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-62"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
index b13b7b01bb3a..acd73c7ad0f4 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
@@ -1070,13 +1070,13 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.6004608"
- inkscape:cx="826.65969"
- inkscape:cy="483.3047"
- inkscape:window-x="66"
- inkscape:window-y="28"
+ inkscape:zoom="0.81932583"
+ inkscape:cx="840.45848"
+ inkscape:cy="5052.4242"
+ inkscape:window-x="787"
+ inkscape:window-y="24"
inkscape:window-maximized="0"
- inkscape:current-layer="svg2"
+ inkscape:current-layer="g4"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
@@ -1543,15 +1543,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(1749.0282,658.72243)"
id="g3188">
- <text
- xml:space="preserve"
- x="3305.5364"
- y="13255.592"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
<g
id="g3107-62"
transform="translate(947.90548,11584.029)">
@@ -1584,6 +1575,17 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-7">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3137.9988"
+ y="13271.316"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-626"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
</g>
<rect
ry="0"
@@ -2318,15 +2320,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(1739.0986,17188.625)"
id="g3188-6">
- <text
- xml:space="preserve"
- x="3305.5364"
- y="13255.592"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
<g
id="g3107-5"
transform="translate(947.90548,11584.029)">
@@ -2359,6 +2352,15 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-1">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3147.9268"
+ y="13240.524"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -2387,13 +2389,13 @@
</g>
<text
xml:space="preserve"
- x="5392.3345"
- y="15407.104"
+ x="5263.1094"
+ y="15411.646"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-6-7"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-92"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -2434,13 +2436,13 @@
id="tspan3104-6-5-6-0-94">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7536.4883"
- y="17640.934"
+ x="7417.4053"
+ y="17655.502"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-9"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-759"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-2353.8462,17224.992)"
@@ -2469,13 +2471,13 @@
</g>
<text
xml:space="preserve"
- x="5378.4146"
- y="15436.927"
+ x="5246.1548"
+ y="15411.648"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-87"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-863.02613,17188.625)"
@@ -2554,13 +2556,13 @@
id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7520.1294"
- y="17673.639"
+ x="7433.8257"
+ y="17682.098"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-35"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<g
transform="translate(-7528.5085,17188.625)"
@@ -2601,13 +2603,13 @@
id="tspan3104-6-5-6-0-1-8">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7521.4663"
- y="17666.062"
+ x="7415.4404"
+ y="17682.098"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-75-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ id="text202-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -2641,15 +2643,6 @@
id="path3414-8-3-6-4"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- x="6659.5469"
- y="34833.551"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-62"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
@@ -3844,7 +3837,7 @@
font-weight="bold"
font-size="192"
id="text202-6-6-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
<text
xml:space="preserve"
x="5035.4155"
@@ -4284,15 +4277,6 @@
style="fill:none;stroke-width:0.025in"
transform="translate(1874.038,53203.538)"
id="g3188-7">
- <text
- xml:space="preserve"
- x="3199.1516"
- y="13255.592"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-82"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<g
id="g3107-53"
transform="translate(947.90548,11584.029)">
@@ -4325,6 +4309,17 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-19">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3175.896"
+ y="13240.11"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<rect
ry="0"
@@ -4371,13 +4366,15 @@
</g>
<text
xml:space="preserve"
- x="5324.5371"
- y="15414.598"
+ x="5264.4829"
+ y="15411.231"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-753"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
style="fill:none;stroke-width:0.025in"
@@ -4412,30 +4409,12 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-6-0-4">Leaf</tspan></text>
- <text
- xml:space="preserve"
- x="10084.225"
- y="70903.312"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-9-0"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<path
sodipodi:nodetypes="ccc"
inkscape:connector-curvature="0"
id="path3134-9-0-3-9"
d="m 6315.6122,72629.054 -20.9533,8108.684 1648.968,0"
style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
- <text
- xml:space="preserve"
- x="5092.4683"
- y="74111.672"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rsp-&gt;completed =</text>
<g
style="fill:none;stroke-width:0.025in"
id="g3107-62-6"
@@ -4469,15 +4448,6 @@
sodipodi:linespacing="125%"><tspan
style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
id="tspan3104-6-5-7-7">Root</tspan></text>
- <text
- xml:space="preserve"
- x="5092.4683"
- y="74325.906"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-60-3"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
<g
style="fill:none;stroke-width:0.025in"
transform="translate(1746.2528,60972.572)"
@@ -4736,13 +4706,15 @@
</g>
<text
xml:space="preserve"
- x="5327.3057"
- y="15428.84"
+ x="5274.1216"
+ y="15411.231"
font-style="normal"
font-weight="bold"
font-size="192"
id="text202-36"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-6">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-728.08545,53203.538)"
@@ -4821,13 +4793,15 @@
id="tspan3104-6-5-6-0-92-5">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7486.4907"
- y="17670.119"
+ x="7435.1987"
+ y="17708.281"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-6-2"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-1">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<g
transform="translate(-7393.5687,53203.538)"
@@ -4868,13 +4842,15 @@
id="tspan3104-6-5-6-0-1-5">Leaf</tspan></text>
<text
xml:space="preserve"
- x="7474.1382"
- y="17688.926"
+ x="7416.8125"
+ y="17708.281"
font-style="normal"
font-weight="bold"
font-size="192"
- id="text202-5-1"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ id="text202-36-35"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-62">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
</g>
<path
style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -4908,15 +4884,6 @@
id="path3414-8-3-6-67"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- x="6742.6001"
- y="70882.617"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- id="text202-2"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
<g
style="fill:none;stroke-width:0.025in"
id="g4504-3-9-6"
@@ -5131,5 +5098,47 @@
font-size="192"
id="text202-7-9-6-6-7"
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text>
+ <text
+ xml:space="preserve"
+ x="6698.9019"
+ y="70885.211"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-7">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+ <text
+ xml:space="preserve"
+ x="10023.457"
+ y="70885.234"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+ <text
+ xml:space="preserve"
+ x="5023.3389"
+ y="74209.773"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36-36"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+ style="font-size:172.87567139px"
+ id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+ <text
+ xml:space="preserve"
+ x="6562.5884"
+ y="34870.727"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
</g>
</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
index de3992f4cbe1..149bec2a4493 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
@@ -300,13 +300,13 @@
inkscape:window-height="1144"
id="namedview208"
showgrid="true"
- inkscape:zoom="0.70710678"
- inkscape:cx="616.47598"
- inkscape:cy="595.41964"
- inkscape:window-x="813"
+ inkscape:zoom="0.96484375"
+ inkscape:cx="507.0191"
+ inkscape:cy="885.62207"
+ inkscape:window-x="47"
inkscape:window-y="28"
inkscape:window-maximized="0"
- inkscape:current-layer="g4405"
+ inkscape:current-layer="g3115"
fit-margin-top="5"
fit-margin-right="5"
fit-margin-left="5"
@@ -710,7 +710,7 @@
font-weight="bold"
font-size="192"
id="text202-6-6"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
<text
xml:space="preserve"
x="5035.4155"
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 4259f95c3261..f99cf11b314b 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -172,7 +172,7 @@ it will print a message similar to the following:
INFO: rcu_sched detected stalls on CPUs/tasks:
2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
16-...: (0 ticks this GP) idle=81c/0/0 softirq=764/764 fqs=0
- (detected by 32, t=2603 jiffies, g=7073, c=7072, q=625)
+ (detected by 32, t=2603 jiffies, g=7075, q=625)
This message indicates that CPU 32 detected that CPUs 2 and 16 were both
causing stalls, and that the stall was affecting RCU-sched. This message
@@ -215,11 +215,10 @@ CPU since the last time that this CPU noted the beginning of a grace
period.
The "detected by" line indicates which CPU detected the stall (in this
-case, CPU 32), how many jiffies have elapsed since the start of the
-grace period (in this case 2603), the number of the last grace period
-to start and to complete (7073 and 7072, respectively), and an estimate
-of the total number of RCU callbacks queued across all CPUs (625 in
-this case).
+case, CPU 32), how many jiffies have elapsed since the start of the grace
+period (in this case 2603), the grace-period sequence number (7075), and
+an estimate of the total number of RCU callbacks queued across all CPUs
+(625 in this case).
In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
for each CPU:
@@ -266,15 +265,16 @@ If the relevant grace-period kthread has been unable to run prior to
the stall warning, as was the case in the "All QSes seen" line above,
the following additional line is printed:
- kthread starved for 23807 jiffies! g7073 c7072 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1
+ kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
Starving the grace-period kthreads of CPU time can of course result
in RCU CPU stall warnings even when all CPUs and tasks have passed
-through the required quiescent states. The "g" and "c" numbers flag the
-number of the last grace period started and completed, respectively,
-the "f" precedes the ->gp_flags command to the grace-period kthread,
-the "RCU_GP_WAIT_FQS" indicates that the kthread is waiting for a short
-timeout, and the "state" precedes value of the task_struct ->state field.
+through the required quiescent states. The "g" number shows the current
+grace-period sequence number, the "f" precedes the ->gp_flags command
+to the grace-period kthread, the "RCU_GP_WAIT_FQS" indicates that the
+kthread is waiting for a short timeout, the "state" precedes value of the
+task_struct ->state field, and the "cpu" indicates that the grace-period
+kthread last ran on CPU 5.
Multiple Warnings From One Stall
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 65eb856526b7..c2a7facf7ff9 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -588,6 +588,7 @@ It is extremely simple:
void synchronize_rcu(void)
{
write_lock(&rcu_gp_mutex);
+ smp_mb__after_spinlock();
write_unlock(&rcu_gp_mutex);
}
@@ -609,12 +610,15 @@ don't forget about them when submitting patches making use of RCU!]
The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
and release a global reader-writer lock. The synchronize_rcu()
-primitive write-acquires this same lock, then immediately releases
-it. This means that once synchronize_rcu() exits, all RCU read-side
-critical sections that were in progress before synchronize_rcu() was
-called are guaranteed to have completed -- there is no way that
-synchronize_rcu() would have been able to write-acquire the lock
-otherwise.
+primitive write-acquires this same lock, then releases it. This means
+that once synchronize_rcu() exits, all RCU read-side critical sections
+that were in progress before synchronize_rcu() was called are guaranteed
+to have completed -- there is no way that synchronize_rcu() would have
+been able to write-acquire the lock otherwise. The smp_mb__after_spinlock()
+promotes synchronize_rcu() to a full memory barrier in compliance with
+the "Memory-Barrier Guarantees" listed in:
+
+ Documentation/RCU/Design/Requirements/Requirements.html.
It is possible to nest rcu_read_lock(), since reader-writer locks may
be recursively acquired. Note also that rcu_read_lock() is immune
@@ -816,11 +820,13 @@ RCU list traversal:
list_next_rcu
list_for_each_entry_rcu
list_for_each_entry_continue_rcu
+ list_for_each_entry_from_rcu
hlist_first_rcu
hlist_next_rcu
hlist_pprev_rcu
hlist_for_each_entry_rcu
hlist_for_each_entry_rcu_bh
+ hlist_for_each_entry_from_rcu
hlist_for_each_entry_continue_rcu
hlist_for_each_entry_continue_rcu_bh
hlist_nulls_first_rcu
diff --git a/Documentation/acpi/dsd/data-node-references.txt b/Documentation/acpi/dsd/data-node-references.txt
new file mode 100644
index 000000000000..c3871565c8cf
--- /dev/null
+++ b/Documentation/acpi/dsd/data-node-references.txt
@@ -0,0 +1,89 @@
+Copyright (C) 2018 Intel Corporation
+Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+
+Referencing hierarchical data nodes
+-----------------------------------
+
+ACPI in general allows referring to device objects in the tree only.
+Hierarchical data extension nodes may not be referred to directly, hence this
+document defines a scheme to implement such references.
+
+A reference consist of the device object name followed by one or more
+hierarchical data extension [1] keys. Specifically, the hierarchical data
+extension node which is referred to by the key shall lie directly under the
+parent object i.e. either the device object or another hierarchical data
+extension node.
+
+The keys in the hierarchical data nodes shall consist of the name of the node,
+"@" character and the number of the node in hexadecimal notation (without pre-
+or postfixes). The same ACPI object shall include the _DSD property extension
+with a property "reg" that shall have the same numerical value as the number of
+the node.
+
+In case a hierarchical data extensions node has no numerical value, then the
+"reg" property shall be omitted from the ACPI object's _DSD properties and the
+"@" character and the number shall be omitted from the hierarchical data
+extension key.
+
+
+Example
+-------
+
+ In the ASL snippet below, the "reference" _DSD property [2] contains a
+ device object reference to DEV0 and under that device object, a
+ hierarchical data extension key "node@1" referring to the NOD1 object
+ and lastly, a hierarchical data extension key "anothernode" referring to
+ the ANOD object which is also the final target node of the reference.
+
+ Device (DEV0)
+ {
+ Name (_DSD, Package () {
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () { "node@0", NOD0 },
+ Package () { "node@1", NOD1 },
+ }
+ })
+ Name (NOD0, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "random-property", 3 },
+ }
+ })
+ Name (NOD1, Package() {
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () { "anothernode", ANOD },
+ }
+ })
+ Name (ANOD, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "random-property", 0 },
+ }
+ })
+ }
+
+ Device (DEV1)
+ {
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "reference", ^DEV0, "node@1", "anothernode" },
+ }
+ })
+ }
+
+Please also see a graph example in graph.txt .
+
+References
+----------
+
+[1] Hierarchical Data Extension UUID For _DSD.
+ <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
+ referenced 2018-07-17.
+
+[2] Device Properties UUID For _DSD.
+ <URL:http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf>,
+ referenced 2016-10-04.
diff --git a/Documentation/acpi/dsd/graph.txt b/Documentation/acpi/dsd/graph.txt
index ac09e3138b79..b9ce910781dc 100644
--- a/Documentation/acpi/dsd/graph.txt
+++ b/Documentation/acpi/dsd/graph.txt
@@ -36,29 +36,41 @@ The port and endpoint concepts are very similar to those in Devicetree
[3]. A port represents an interface in a device, and an endpoint
represents a connection to that interface.
-All port nodes are located under the device's "_DSD" node in the
-hierarchical data extension tree. The property extension related to
-each port node must contain the key "port" and an integer value which
-is the number of the port. The object it refers to should be called "PRTX",
-where "X" is the number of the port.
-
-Further on, endpoints are located under the individual port nodes. The
-first hierarchical data extension package list entry of the endpoint
-nodes must begin with "endpoint" and must be followed by the number
-of the endpoint. The object it refers to should be called "EPXY", where
-"X" is the number of the port and "Y" is the number of the endpoint.
-
-Each port node contains a property extension key "port", the value of
-which is the number of the port node. The each endpoint is similarly numbered
-with a property extension key "endpoint". Port numbers must be unique within a
-device and endpoint numbers must be unique within a port.
+All port nodes are located under the device's "_DSD" node in the hierarchical
+data extension tree. The data extension related to each port node must begin
+with "port" and must be followed by the "@" character and the number of the port
+as its key. The target object it refers to should be called "PRTX", where "X" is
+the number of the port. An example of such a package would be:
+
+ Package() { "port@4", PRT4 }
+
+Further on, endpoints are located under the port nodes. The hierarchical
+data extension key of the endpoint nodes must begin with
+"endpoint" and must be followed by the "@" character and the number of the
+endpoint. The object it refers to should be called "EPXY", where "X" is the
+number of the port and "Y" is the number of the endpoint. An example of such a
+package would be:
+
+ Package() { "endpoint@0", EP40 }
+
+Each port node contains a property extension key "port", the value of which is
+the number of the port. Each endpoint is similarly numbered with a property
+extension key "reg", the value of which is the number of the endpoint. Port
+numbers must be unique within a device and endpoint numbers must be unique
+within a port. If a device object may only has a single port, then the number
+of that port shall be zero. Similarly, if a port may only have a single
+endpoint, the number of that endpoint shall be zero.
The endpoint reference uses property extension with "remote-endpoint" property
name followed by a reference in the same package. Such references consist of the
-the remote device reference, number of the port in the device and finally the
-number of the endpoint in that port. Individual references thus appear as:
+the remote device reference, the first package entry of the port data extension
+reference under the device and finally the first package entry of the endpoint
+data extension reference under the port. Individual references thus appear as:
- Package() { device, port_number, endpoint_number }
+ Package() { device, "port@X", "endpoint@Y" }
+
+In the above example, "X" is the number of the port and "Y" is the number of the
+endpoint.
The references to endpoints must be always done both ways, to the
remote endpoint and back from the referred remote endpoint node.
@@ -76,24 +88,24 @@ A simple example of this is show below:
},
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "port0", "PRT0" },
+ Package () { "port@0", PRT0 },
}
})
Name (PRT0, Package() {
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
- Package () { "port", 0 },
+ Package () { "reg", 0 },
},
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "endpoint0", "EP00" },
+ Package () { "endpoint@0", EP00 },
}
})
Name (EP00, Package() {
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
- Package () { "endpoint", 0 },
- Package () { "remote-endpoint", Package() { \_SB.PCI0.ISP, 4, 0 } },
+ Package () { "reg", 0 },
+ Package () { "remote-endpoint", Package() { \_SB.PCI0.ISP, "port@4", "endpoint@0" } },
}
})
}
@@ -106,26 +118,26 @@ A simple example of this is show below:
Name (_DSD, Package () {
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "port4", "PRT4" },
+ Package () { "port@4", PRT4 },
}
})
Name (PRT4, Package() {
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
- Package () { "port", 4 }, /* CSI-2 port number */
+ Package () { "reg", 4 }, /* CSI-2 port number */
},
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "endpoint0", "EP40" },
+ Package () { "endpoint@0", EP40 },
}
})
Name (EP40, Package() {
ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
Package () {
- Package () { "endpoint", 0 },
- Package () { "remote-endpoint", Package () { \_SB.PCI0.I2C2.CAM0, 0, 0 } },
+ Package () { "reg", 0 },
+ Package () { "remote-endpoint", Package () { \_SB.PCI0.I2C2.CAM0, "port@0", "endpoint@0" } },
}
})
}
@@ -151,7 +163,7 @@ References
referenced 2016-10-04.
[5] Hierarchical Data Extension UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.pdf>,
+ <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
referenced 2016-10-04.
[6] Advanced Configuration and Power Interface Specification.
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 02caa7efd5ef..15ea785b2dfa 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -1,3 +1,5 @@
+.. _readme:
+
Linux kernel release 4.x <http://kernel.org/>
=============================================
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 8a2c52d5c53b..1746131bc9cb 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -51,6 +51,9 @@ v1 is available under Documentation/cgroup-v1/.
5-3. IO
5-3-1. IO Interface Files
5-3-2. Writeback
+ 5-3-3. IO Latency
+ 5-3-3-1. How IO Latency Throttling Works
+ 5-3-3-2. IO Latency Interface Files
5-4. PID
5-4-1. PID Interface Files
5-5. Device
@@ -1314,17 +1317,19 @@ IO Interface Files
Lines are keyed by $MAJ:$MIN device numbers and not ordered.
The following nested keys are defined.
- ====== ===================
+ ====== =====================
rbytes Bytes read
wbytes Bytes written
rios Number of read IOs
wios Number of write IOs
- ====== ===================
+ dbytes Bytes discarded
+ dios Number of discard IOs
+ ====== =====================
An example read output follows:
- 8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353
- 8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252
+ 8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353 dbytes=0 dios=0
+ 8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252 dbytes=50331648 dios=3021
io.weight
A read-write flat-keyed file which exists on non-root cgroups.
@@ -1446,6 +1451,85 @@ writeback as follows.
vm.dirty[_background]_ratio.
+IO Latency
+~~~~~~~~~~
+
+This is a cgroup v2 controller for IO workload protection. You provide a group
+with a latency target, and if the average latency exceeds that target the
+controller will throttle any peers that have a lower latency target than the
+protected workload.
+
+The limits are only applied at the peer level in the hierarchy. This means that
+in the diagram below, only groups A, B, and C will influence each other, and
+groups D and F will influence each other. Group G will influence nobody.
+
+ [root]
+ / | \
+ A B C
+ / \ |
+ D F G
+
+
+So the ideal way to configure this is to set io.latency in groups A, B, and C.
+Generally you do not want to set a value lower than the latency your device
+supports. Experiment to find the value that works best for your workload.
+Start at higher than the expected latency for your device and watch the
+avg_lat value in io.stat for your workload group to get an idea of the
+latency you see during normal operation. Use the avg_lat value as a basis for
+your real setting, setting at 10-15% higher than the value in io.stat.
+
+How IO Latency Throttling Works
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+io.latency is work conserving; so as long as everybody is meeting their latency
+target the controller doesn't do anything. Once a group starts missing its
+target it begins throttling any peer group that has a higher target than itself.
+This throttling takes 2 forms:
+
+- Queue depth throttling. This is the number of outstanding IO's a group is
+ allowed to have. We will clamp down relatively quickly, starting at no limit
+ and going all the way down to 1 IO at a time.
+
+- Artificial delay induction. There are certain types of IO that cannot be
+ throttled without possibly adversely affecting higher priority groups. This
+ includes swapping and metadata IO. These types of IO are allowed to occur
+ normally, however they are "charged" to the originating group. If the
+ originating group is being throttled you will see the use_delay and delay
+ fields in io.stat increase. The delay value is how many microseconds that are
+ being added to any process that runs in this group. Because this number can
+ grow quite large if there is a lot of swapping or metadata IO occurring we
+ limit the individual delay events to 1 second at a time.
+
+Once the victimized group starts meeting its latency target again it will start
+unthrottling any peer groups that were throttled previously. If the victimized
+group simply stops doing IO the global counter will unthrottle appropriately.
+
+IO Latency Interface Files
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ io.latency
+ This takes a similar format as the other controllers.
+
+ "MAJOR:MINOR target=<target time in microseconds"
+
+ io.stat
+ If the controller is enabled you will see extra stats in io.stat in
+ addition to the normal ones.
+
+ depth
+ This is the current queue depth for the group.
+
+ avg_lat
+ This is an exponential moving average with a decay rate of 1/exp
+ bound by the sampling interval. The decay rate interval can be
+ calculated by multiplying the win value in io.stat by the
+ corresponding number of samples based on the win value.
+
+ win
+ The sampling window size in milliseconds. This is the minimum
+ duration of time between evaluation events. Windows only elapse
+ with IO activity. Idle periods extend the most recent window.
+
PID
---
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 48d70af11652..0873685bab0f 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -17,6 +17,15 @@ etc.
kernel-parameters
devices
+This section describes CPU vulnerabilities and provides an overview of the
+possible mitigations along with guidance for selecting mitigations if they
+are configurable at compile, boot or run time.
+
+.. toctree::
+ :maxdepth: 1
+
+ l1tf
+
Here is a set of documents aimed at users who are trying to track down
problems and bugs in particular.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 533ff5c68970..adafe47ac376 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -748,6 +748,14 @@
debug [KNL] Enable kernel debugging (events log level).
+ debug_boot_weak_hash
+ [KNL] Enable printing [hashed] pointers early in the
+ boot sequence. If enabled, we use a weak hash instead
+ of siphash to hash pointers. Use this option if you are
+ seeing instances of '(___ptrval___)') and need to see a
+ value (hashed pointer) instead. Cryptographically
+ insecure, please do not use on production kernels.
+
debug_locks_verbose=
[KNL] verbose self-tests
Format=<0|1>
@@ -816,6 +824,17 @@
disable= [IPV6]
See Documentation/networking/ipv6.txt.
+ hardened_usercopy=
+ [KNL] Under CONFIG_HARDENED_USERCOPY, whether
+ hardening is enabled for this boot. Hardened
+ usercopy checking is used to protect the kernel
+ from reading or writing beyond known memory
+ allocation boundaries as a proactive defense
+ against bounds-checking flaws in the kernel's
+ copy_to_user()/copy_from_user() interface.
+ on Perform hardened usercopy checks (default).
+ off Disable hardened usercopy checks.
+
disable_radix [PPC]
Disable RADIX MMU mode on POWER9
@@ -1967,10 +1986,84 @@
(virtualized real and unpaged mode) on capable
Intel chips. Default is 1 (enabled)
+ kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault
+ CVE-2018-3620.
+
+ Valid arguments: never, cond, always
+
+ always: L1D cache flush on every VMENTER.
+ cond: Flush L1D on VMENTER only when the code between
+ VMEXIT and VMENTER can leak host memory.
+ never: Disables the mitigation
+
+ Default is cond (do L1 cache flush in specific instances)
+
kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification
feature (tagged TLBs) on capable Intel chips.
Default is 1 (enabled)
+ l1tf= [X86] Control mitigation of the L1TF vulnerability on
+ affected CPUs
+
+ The kernel PTE inversion protection is unconditionally
+ enabled and cannot be disabled.
+
+ full
+ Provides all available mitigations for the
+ L1TF vulnerability. Disables SMT and
+ enables all mitigations in the
+ hypervisors, i.e. unconditional L1D flush.
+
+ SMT control and L1D flush control via the
+ sysfs interface is still possible after
+ boot. Hypervisors will issue a warning
+ when the first VM is started in a
+ potentially insecure configuration,
+ i.e. SMT enabled or L1D flush disabled.
+
+ full,force
+ Same as 'full', but disables SMT and L1D
+ flush runtime control. Implies the
+ 'nosmt=force' command line option.
+ (i.e. sysfs control of SMT is disabled.)
+
+ flush
+ Leaves SMT enabled and enables the default
+ hypervisor mitigation, i.e. conditional
+ L1D flush.
+
+ SMT control and L1D flush control via the
+ sysfs interface is still possible after
+ boot. Hypervisors will issue a warning
+ when the first VM is started in a
+ potentially insecure configuration,
+ i.e. SMT enabled or L1D flush disabled.
+
+ flush,nosmt
+
+ Disables SMT and enables the default
+ hypervisor mitigation.
+
+ SMT control and L1D flush control via the
+ sysfs interface is still possible after
+ boot. Hypervisors will issue a warning
+ when the first VM is started in a
+ potentially insecure configuration,
+ i.e. SMT enabled or L1D flush disabled.
+
+ flush,nowarn
+ Same as 'flush', but hypervisors will not
+ warn when a VM is started in a potentially
+ insecure configuration.
+
+ off
+ Disables hypervisor mitigations and doesn't
+ emit any warnings.
+
+ Default is 'flush'.
+
+ For details see: Documentation/admin-guide/l1tf.rst
+
l2cr= [PPC]
l3cr= [PPC]
@@ -2687,6 +2780,10 @@
nosmt [KNL,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1.
+ [KNL,x86] Disable symmetric multithreading (SMT).
+ nosmt=force: Force disable SMT, cannot be undone
+ via the sysfs control file.
+
nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
allow data leaks with this option, which is equivalent
@@ -2835,8 +2932,6 @@
nosync [HW,M68K] Disables sync negotiation for all devices.
- notsc [BUGS=X86-32] Disable Time Stamp Counter
-
nowatchdog [KNL] Disable both lockup detectors, i.e.
soft-lockup and NMI watchdog (hard-lockup).
@@ -2994,8 +3089,31 @@
See header of drivers/block/paride/pcd.c.
See also Documentation/blockdev/paride.txt.
- pci=option[,option...] [PCI] various PCI subsystem options:
- earlydump [X86] dump PCI config space before the kernel
+ pci=option[,option...] [PCI] various PCI subsystem options.
+
+ Some options herein operate on a specific device
+ or a set of devices (<pci_dev>). These are
+ specified in one of the following formats:
+
+ [<domain>:]<bus>:<dev>.<func>[/<dev>.<func>]*
+ pci:<vendor>:<device>[:<subvendor>:<subdevice>]
+
+ Note: the first format specifies a PCI
+ bus/device/function address which may change
+ if new hardware is inserted, if motherboard
+ firmware changes, or due to changes caused
+ by other kernel parameters. If the
+ domain is left unspecified, it is
+ taken to be zero. Optionally, a path
+ to a device through multiple device/function
+ addresses can be specified after the base
+ address (this is more robust against
+ renumbering issues). The second format
+ selects devices using IDs from the
+ configuration space which may match multiple
+ devices in the system.
+
+ earlydump dump PCI config space before the kernel
changes anything
off [X86] don't probe for the PCI bus
bios [X86-32] force use of PCI BIOS, don't access
@@ -3123,11 +3241,10 @@
window. The default value is 64 megabytes.
resource_alignment=
Format:
- [<order of align>@][<domain>:]<bus>:<slot>.<func>[; ...]
- [<order of align>@]pci:<vendor>:<device>\
- [:<subvendor>:<subdevice>][; ...]
+ [<order of align>@]<pci_dev>[; ...]
Specifies alignment and device to reassign
- aligned memory resources.
+ aligned memory resources. How to
+ specify the device is described above.
If <order of align> is not specified,
PAGE_SIZE is used as alignment.
PCI-PCI bridge can be specified, if resource
@@ -3170,6 +3287,15 @@
Adding the window is slightly risky (it may
conflict with unreported devices), so this
taints the kernel.
+ disable_acs_redir=<pci_dev>[; ...]
+ Specify one or more PCI devices (in the format
+ specified above) separated by semicolons.
+ Each device specified will have the PCI ACS
+ redirect capabilities forced off which will
+ allow P2P traffic between devices through
+ bridges without forcing it upstream. Note:
+ this removes isolation between devices and
+ may put more devices in an IOMMU group.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
@@ -3632,8 +3758,8 @@
Set time (s) after boot for CPU-hotplug testing.
rcutorture.onoff_interval= [KNL]
- Set time (s) between CPU-hotplug operations, or
- zero to disable CPU-hotplug testing.
+ Set time (jiffies) between CPU-hotplug operations,
+ or zero to disable CPU-hotplug testing.
rcutorture.shuffle_interval= [KNL]
Set task-shuffle interval (s). Shuffling tasks
@@ -4060,6 +4186,8 @@
This parameter controls whether the Speculative Store
Bypass optimization is used.
+ On x86 the options are:
+
on - Unconditionally disable Speculative Store Bypass
off - Unconditionally enable Speculative Store Bypass
auto - Kernel detects whether the CPU model contains an
@@ -4075,12 +4203,20 @@
seccomp - Same as "prctl" above, but all seccomp threads
will disable SSB unless they explicitly opt out.
- Not specifying this option is equivalent to
- spec_store_bypass_disable=auto.
-
Default mitigations:
X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
+ On powerpc the options are:
+
+ on,auto - On Power8 and Power9 insert a store-forwarding
+ barrier on kernel entry and exit. On Power7
+ perform a software flush on kernel entry and
+ exit.
+ off - No action.
+
+ Not specifying this option is equivalent to
+ spec_store_bypass_disable=auto.
+
spia_io_base= [HW,MTD]
spia_fio_base=
spia_pedr=
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
new file mode 100644
index 000000000000..bae52b845de0
--- /dev/null
+++ b/Documentation/admin-guide/l1tf.rst
@@ -0,0 +1,610 @@
+L1TF - L1 Terminal Fault
+========================
+
+L1 Terminal Fault is a hardware vulnerability which allows unprivileged
+speculative access to data which is available in the Level 1 Data Cache
+when the page table entry controlling the virtual address, which is used
+for the access, has the Present bit cleared or other reserved bits set.
+
+Affected processors
+-------------------
+
+This vulnerability affects a wide range of Intel processors. The
+vulnerability is not present on:
+
+ - Processors from AMD, Centaur and other non Intel vendors
+
+ - Older processor models, where the CPU family is < 6
+
+ - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft,
+ Penwell, Pineview, Silvermont, Airmont, Merrifield)
+
+ - The Intel XEON PHI family
+
+ - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the
+ IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected
+ by the Meltdown vulnerability either. These CPUs should become
+ available by end of 2018.
+
+Whether a processor is affected or not can be read out from the L1TF
+vulnerability file in sysfs. See :ref:`l1tf_sys_info`.
+
+Related CVEs
+------------
+
+The following CVE entries are related to the L1TF vulnerability:
+
+ ============= ================= ==============================
+ CVE-2018-3615 L1 Terminal Fault SGX related aspects
+ CVE-2018-3620 L1 Terminal Fault OS, SMM related aspects
+ CVE-2018-3646 L1 Terminal Fault Virtualization related aspects
+ ============= ================= ==============================
+
+Problem
+-------
+
+If an instruction accesses a virtual address for which the relevant page
+table entry (PTE) has the Present bit cleared or other reserved bits set,
+then speculative execution ignores the invalid PTE and loads the referenced
+data if it is present in the Level 1 Data Cache, as if the page referenced
+by the address bits in the PTE was still present and accessible.
+
+While this is a purely speculative mechanism and the instruction will raise
+a page fault when it is retired eventually, the pure act of loading the
+data and making it available to other speculative instructions opens up the
+opportunity for side channel attacks to unprivileged malicious code,
+similar to the Meltdown attack.
+
+While Meltdown breaks the user space to kernel space protection, L1TF
+allows to attack any physical memory address in the system and the attack
+works across all protection domains. It allows an attack of SGX and also
+works from inside virtual machines because the speculation bypasses the
+extended page table (EPT) protection mechanism.
+
+
+Attack scenarios
+----------------
+
+1. Malicious user space
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Operating Systems store arbitrary information in the address bits of a
+ PTE which is marked non present. This allows a malicious user space
+ application to attack the physical memory to which these PTEs resolve.
+ In some cases user-space can maliciously influence the information
+ encoded in the address bits of the PTE, thus making attacks more
+ deterministic and more practical.
+
+ The Linux kernel contains a mitigation for this attack vector, PTE
+ inversion, which is permanently enabled and has no performance
+ impact. The kernel ensures that the address bits of PTEs, which are not
+ marked present, never point to cacheable physical memory space.
+
+ A system with an up to date kernel is protected against attacks from
+ malicious user space applications.
+
+2. Malicious guest in a virtual machine
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The fact that L1TF breaks all domain protections allows malicious guest
+ OSes, which can control the PTEs directly, and malicious guest user
+ space applications, which run on an unprotected guest kernel lacking the
+ PTE inversion mitigation for L1TF, to attack physical host memory.
+
+ A special aspect of L1TF in the context of virtualization is symmetric
+ multi threading (SMT). The Intel implementation of SMT is called
+ HyperThreading. The fact that Hyperthreads on the affected processors
+ share the L1 Data Cache (L1D) is important for this. As the flaw allows
+ only to attack data which is present in L1D, a malicious guest running
+ on one Hyperthread can attack the data which is brought into the L1D by
+ the context which runs on the sibling Hyperthread of the same physical
+ core. This context can be host OS, host user space or a different guest.
+
+ If the processor does not support Extended Page Tables, the attack is
+ only possible, when the hypervisor does not sanitize the content of the
+ effective (shadow) page tables.
+
+ While solutions exist to mitigate these attack vectors fully, these
+ mitigations are not enabled by default in the Linux kernel because they
+ can affect performance significantly. The kernel provides several
+ mechanisms which can be utilized to address the problem depending on the
+ deployment scenario. The mitigations, their protection scope and impact
+ are described in the next sections.
+
+ The default mitigations and the rationale for choosing them are explained
+ at the end of this document. See :ref:`default_mitigations`.
+
+.. _l1tf_sys_info:
+
+L1TF system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current L1TF
+status of the system: whether the system is vulnerable, and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/l1tf
+
+The possible values in this file are:
+
+ =========================== ===============================
+ 'Not affected' The processor is not vulnerable
+ 'Mitigation: PTE Inversion' The host protection is active
+ =========================== ===============================
+
+If KVM/VMX is enabled and the processor is vulnerable then the following
+information is appended to the 'Mitigation: PTE Inversion' part:
+
+ - SMT status:
+
+ ===================== ================
+ 'VMX: SMT vulnerable' SMT is enabled
+ 'VMX: SMT disabled' SMT is disabled
+ ===================== ================
+
+ - L1D Flush mode:
+
+ ================================ ====================================
+ 'L1D vulnerable' L1D flushing is disabled
+
+ 'L1D conditional cache flushes' L1D flush is conditionally enabled
+
+ 'L1D cache flushes' L1D flush is unconditionally enabled
+ ================================ ====================================
+
+The resulting grade of protection is discussed in the following sections.
+
+
+Host mitigation mechanism
+-------------------------
+
+The kernel is unconditionally protected against L1TF attacks from malicious
+user space running on the host.
+
+
+Guest mitigation mechanisms
+---------------------------
+
+.. _l1d_flush:
+
+1. L1D flush on VMENTER
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ To make sure that a guest cannot attack data which is present in the L1D
+ the hypervisor flushes the L1D before entering the guest.
+
+ Flushing the L1D evicts not only the data which should not be accessed
+ by a potentially malicious guest, it also flushes the guest
+ data. Flushing the L1D has a performance impact as the processor has to
+ bring the flushed guest data back into the L1D. Depending on the
+ frequency of VMEXIT/VMENTER and the type of computations in the guest
+ performance degradation in the range of 1% to 50% has been observed. For
+ scenarios where guest VMEXIT/VMENTER are rare the performance impact is
+ minimal. Virtio and mechanisms like posted interrupts are designed to
+ confine the VMEXITs to a bare minimum, but specific configurations and
+ application scenarios might still suffer from a high VMEXIT rate.
+
+ The kernel provides two L1D flush modes:
+ - conditional ('cond')
+ - unconditional ('always')
+
+ The conditional mode avoids L1D flushing after VMEXITs which execute
+ only audited code paths before the corresponding VMENTER. These code
+ paths have been verified that they cannot expose secrets or other
+ interesting data to an attacker, but they can leak information about the
+ address space layout of the hypervisor.
+
+ Unconditional mode flushes L1D on all VMENTER invocations and provides
+ maximum protection. It has a higher overhead than the conditional
+ mode. The overhead cannot be quantified correctly as it depends on the
+ workload scenario and the resulting number of VMEXITs.
+
+ The general recommendation is to enable L1D flush on VMENTER. The kernel
+ defaults to conditional mode on affected processors.
+
+ **Note**, that L1D flush does not prevent the SMT problem because the
+ sibling thread will also bring back its data into the L1D which makes it
+ attackable again.
+
+ L1D flush can be controlled by the administrator via the kernel command
+ line and sysfs control files. See :ref:`mitigation_control_command_line`
+ and :ref:`mitigation_control_kvm`.
+
+.. _guest_confinement:
+
+2. Guest VCPU confinement to dedicated physical cores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ To address the SMT problem, it is possible to make a guest or a group of
+ guests affine to one or more physical cores. The proper mechanism for
+ that is to utilize exclusive cpusets to ensure that no other guest or
+ host tasks can run on these cores.
+
+ If only a single guest or related guests run on sibling SMT threads on
+ the same physical core then they can only attack their own memory and
+ restricted parts of the host memory.
+
+ Host memory is attackable, when one of the sibling SMT threads runs in
+ host OS (hypervisor) context and the other in guest context. The amount
+ of valuable information from the host OS context depends on the context
+ which the host OS executes, i.e. interrupts, soft interrupts and kernel
+ threads. The amount of valuable data from these contexts cannot be
+ declared as non-interesting for an attacker without deep inspection of
+ the code.
+
+ **Note**, that assigning guests to a fixed set of physical cores affects
+ the ability of the scheduler to do load balancing and might have
+ negative effects on CPU utilization depending on the hosting
+ scenario. Disabling SMT might be a viable alternative for particular
+ scenarios.
+
+ For further information about confining guests to a single or to a group
+ of cores consult the cpusets documentation:
+
+ https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt
+
+.. _interrupt_isolation:
+
+3. Interrupt affinity
+^^^^^^^^^^^^^^^^^^^^^
+
+ Interrupts can be made affine to logical CPUs. This is not universally
+ true because there are types of interrupts which are truly per CPU
+ interrupts, e.g. the local timer interrupt. Aside of that multi queue
+ devices affine their interrupts to single CPUs or groups of CPUs per
+ queue without allowing the administrator to control the affinities.
+
+ Moving the interrupts, which can be affinity controlled, away from CPUs
+ which run untrusted guests, reduces the attack vector space.
+
+ Whether the interrupts with are affine to CPUs, which run untrusted
+ guests, provide interesting data for an attacker depends on the system
+ configuration and the scenarios which run on the system. While for some
+ of the interrupts it can be assumed that they won't expose interesting
+ information beyond exposing hints about the host OS memory layout, there
+ is no way to make general assumptions.
+
+ Interrupt affinity can be controlled by the administrator via the
+ /proc/irq/$NR/smp_affinity[_list] files. Limited documentation is
+ available at:
+
+ https://www.kernel.org/doc/Documentation/IRQ-affinity.txt
+
+.. _smt_control:
+
+4. SMT control
+^^^^^^^^^^^^^^
+
+ To prevent the SMT issues of L1TF it might be necessary to disable SMT
+ completely. Disabling SMT can have a significant performance impact, but
+ the impact depends on the hosting scenario and the type of workloads.
+ The impact of disabling SMT needs also to be weighted against the impact
+ of other mitigation solutions like confining guests to dedicated cores.
+
+ The kernel provides a sysfs interface to retrieve the status of SMT and
+ to control it. It also provides a kernel command line interface to
+ control SMT.
+
+ The kernel command line interface consists of the following options:
+
+ =========== ==========================================================
+ nosmt Affects the bring up of the secondary CPUs during boot. The
+ kernel tries to bring all present CPUs online during the
+ boot process. "nosmt" makes sure that from each physical
+ core only one - the so called primary (hyper) thread is
+ activated. Due to a design flaw of Intel processors related
+ to Machine Check Exceptions the non primary siblings have
+ to be brought up at least partially and are then shut down
+ again. "nosmt" can be undone via the sysfs interface.
+
+ nosmt=force Has the same effect as "nosmt" but it does not allow to
+ undo the SMT disable via the sysfs interface.
+ =========== ==========================================================
+
+ The sysfs interface provides two files:
+
+ - /sys/devices/system/cpu/smt/control
+ - /sys/devices/system/cpu/smt/active
+
+ /sys/devices/system/cpu/smt/control:
+
+ This file allows to read out the SMT control state and provides the
+ ability to disable or (re)enable SMT. The possible states are:
+
+ ============== ===================================================
+ on SMT is supported by the CPU and enabled. All
+ logical CPUs can be onlined and offlined without
+ restrictions.
+
+ off SMT is supported by the CPU and disabled. Only
+ the so called primary SMT threads can be onlined
+ and offlined without restrictions. An attempt to
+ online a non-primary sibling is rejected
+
+ forceoff Same as 'off' but the state cannot be controlled.
+ Attempts to write to the control file are rejected.
+
+ notsupported The processor does not support SMT. It's therefore
+ not affected by the SMT implications of L1TF.
+ Attempts to write to the control file are rejected.
+ ============== ===================================================
+
+ The possible states which can be written into this file to control SMT
+ state are:
+
+ - on
+ - off
+ - forceoff
+
+ /sys/devices/system/cpu/smt/active:
+
+ This file reports whether SMT is enabled and active, i.e. if on any
+ physical core two or more sibling threads are online.
+
+ SMT control is also possible at boot time via the l1tf kernel command
+ line parameter in combination with L1D flush control. See
+ :ref:`mitigation_control_command_line`.
+
+5. Disabling EPT
+^^^^^^^^^^^^^^^^
+
+ Disabling EPT for virtual machines provides full mitigation for L1TF even
+ with SMT enabled, because the effective page tables for guests are
+ managed and sanitized by the hypervisor. Though disabling EPT has a
+ significant performance impact especially when the Meltdown mitigation
+ KPTI is enabled.
+
+ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
+
+There is ongoing research and development for new mitigation mechanisms to
+address the performance impact of disabling SMT or EPT.
+
+.. _mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the L1TF mitigations at boot
+time with the option "l1tf=". The valid arguments for this option are:
+
+ ============ =============================================================
+ full Provides all available mitigations for the L1TF
+ vulnerability. Disables SMT and enables all mitigations in
+ the hypervisors, i.e. unconditional L1D flushing
+
+ SMT control and L1D flush control via the sysfs interface
+ is still possible after boot. Hypervisors will issue a
+ warning when the first VM is started in a potentially
+ insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ full,force Same as 'full', but disables SMT and L1D flush runtime
+ control. Implies the 'nosmt=force' command line option.
+ (i.e. sysfs control of SMT is disabled.)
+
+ flush Leaves SMT enabled and enables the default hypervisor
+ mitigation, i.e. conditional L1D flushing
+
+ SMT control and L1D flush control via the sysfs interface
+ is still possible after boot. Hypervisors will issue a
+ warning when the first VM is started in a potentially
+ insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ flush,nosmt Disables SMT and enables the default hypervisor mitigation,
+ i.e. conditional L1D flushing.
+
+ SMT control and L1D flush control via the sysfs interface
+ is still possible after boot. Hypervisors will issue a
+ warning when the first VM is started in a potentially
+ insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ flush,nowarn Same as 'flush', but hypervisors will not warn when a VM is
+ started in a potentially insecure configuration.
+
+ off Disables hypervisor mitigations and doesn't emit any
+ warnings.
+ ============ =============================================================
+
+The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
+
+
+.. _mitigation_control_kvm:
+
+Mitigation control for KVM - module parameter
+-------------------------------------------------------------
+
+The KVM hypervisor mitigation mechanism, flushing the L1D cache when
+entering a guest, can be controlled with a module parameter.
+
+The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the
+following arguments:
+
+ ============ ==============================================================
+ always L1D cache flush on every VMENTER.
+
+ cond Flush L1D on VMENTER only when the code between VMEXIT and
+ VMENTER can leak host memory which is considered
+ interesting for an attacker. This still can leak host memory
+ which allows e.g. to determine the hosts address space layout.
+
+ never Disables the mitigation
+ ============ ==============================================================
+
+The parameter can be provided on the kernel command line, as a module
+parameter when loading the modules and at runtime modified via the sysfs
+file:
+
+/sys/module/kvm_intel/parameters/vmentry_l1d_flush
+
+The default is 'cond'. If 'l1tf=full,force' is given on the kernel command
+line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
+module parameter is ignored and writes to the sysfs file are rejected.
+
+
+Mitigation selection guide
+--------------------------
+
+1. No virtualization in use
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The system is protected by the kernel unconditionally and no further
+ action is required.
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If the guest comes from a trusted source and the guest OS kernel is
+ guaranteed to have the L1TF mitigations in place the system is fully
+ protected against L1TF and no further action is required.
+
+ To avoid the overhead of the default L1D flushing on VMENTER the
+ administrator can disable the flushing via the kernel command line and
+ sysfs control files. See :ref:`mitigation_control_command_line` and
+ :ref:`mitigation_control_kvm`.
+
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+3.1. SMT not supported or disabled
+""""""""""""""""""""""""""""""""""
+
+ If SMT is not supported by the processor or disabled in the BIOS or by
+ the kernel, it's only required to enforce L1D flushing on VMENTER.
+
+ Conditional L1D flushing is the default behaviour and can be tuned. See
+ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
+
+3.2. EPT not supported or disabled
+""""""""""""""""""""""""""""""""""
+
+ If EPT is not supported by the processor or disabled in the hypervisor,
+ the system is fully protected. SMT can stay enabled and L1D flushing on
+ VMENTER is not required.
+
+ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
+
+3.3. SMT and EPT supported and active
+"""""""""""""""""""""""""""""""""""""
+
+ If SMT and EPT are supported and active then various degrees of
+ mitigations can be employed:
+
+ - L1D flushing on VMENTER:
+
+ L1D flushing on VMENTER is the minimal protection requirement, but it
+ is only potent in combination with other mitigation methods.
+
+ Conditional L1D flushing is the default behaviour and can be tuned. See
+ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
+
+ - Guest confinement:
+
+ Confinement of guests to a single or a group of physical cores which
+ are not running any other processes, can reduce the attack surface
+ significantly, but interrupts, soft interrupts and kernel threads can
+ still expose valuable data to a potential attacker. See
+ :ref:`guest_confinement`.
+
+ - Interrupt isolation:
+
+ Isolating the guest CPUs from interrupts can reduce the attack surface
+ further, but still allows a malicious guest to explore a limited amount
+ of host physical memory. This can at least be used to gain knowledge
+ about the host address space layout. The interrupts which have a fixed
+ affinity to the CPUs which run the untrusted guests can depending on
+ the scenario still trigger soft interrupts and schedule kernel threads
+ which might expose valuable information. See
+ :ref:`interrupt_isolation`.
+
+The above three mitigation methods combined can provide protection to a
+certain degree, but the risk of the remaining attack surface has to be
+carefully analyzed. For full protection the following methods are
+available:
+
+ - Disabling SMT:
+
+ Disabling SMT and enforcing the L1D flushing provides the maximum
+ amount of protection. This mitigation is not depending on any of the
+ above mitigation methods.
+
+ SMT control and L1D flushing can be tuned by the command line
+ parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run
+ time with the matching sysfs control files. See :ref:`smt_control`,
+ :ref:`mitigation_control_command_line` and
+ :ref:`mitigation_control_kvm`.
+
+ - Disabling EPT:
+
+ Disabling EPT provides the maximum amount of protection as well. It is
+ not depending on any of the above mitigation methods. SMT can stay
+ enabled and L1D flushing is not required, but the performance impact is
+ significant.
+
+ EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
+ parameter.
+
+3.4. Nested virtual machines
+""""""""""""""""""""""""""""
+
+When nested virtualization is in use, three operating systems are involved:
+the bare metal hypervisor, the nested hypervisor and the nested virtual
+machine. VMENTER operations from the nested hypervisor into the nested
+guest will always be processed by the bare metal hypervisor. If KVM is the
+bare metal hypervisor it wiil:
+
+ - Flush the L1D cache on every switch from the nested hypervisor to the
+ nested virtual machine, so that the nested hypervisor's secrets are not
+ exposed to the nested virtual machine;
+
+ - Flush the L1D cache on every switch from the nested virtual machine to
+ the nested hypervisor; this is a complex operation, and flushing the L1D
+ cache avoids that the bare metal hypervisor's secrets are exposed to the
+ nested virtual machine;
+
+ - Instruct the nested hypervisor to not perform any L1D cache flush. This
+ is an optimization to avoid double L1D flushing.
+
+
+.. _default_mitigations:
+
+Default mitigations
+-------------------
+
+ The kernel default mitigations for vulnerable processors are:
+
+ - PTE inversion to protect against malicious user space. This is done
+ unconditionally and cannot be controlled.
+
+ - L1D conditional flushing on VMENTER when EPT is enabled for
+ a guest.
+
+ The kernel does not by default enforce the disabling of SMT, which leaves
+ SMT systems vulnerable when running untrusted guests with EPT enabled.
+
+ The rationale for this choice is:
+
+ - Force disabling SMT can break existing setups, especially with
+ unattended updates.
+
+ - If regular users run untrusted guests on their machine, then L1TF is
+ just an add on to other malware which might be embedded in an untrusted
+ guest, e.g. spam-bots or attacks on the local network.
+
+ There is no technical way to prevent a user from running untrusted code
+ on their machines blindly.
+
+ - It's technically extremely unlikely and from today's knowledge even
+ impossible that L1TF can be exploited via the most popular attack
+ mechanisms like JavaScript because these mechanisms have no way to
+ control PTEs. If this would be possible and not other mitigation would
+ be possible, then the default might be different.
+
+ - The administrators of cloud and hosting setups have to carefully
+ analyze the risk for their scenarios and make the appropriate
+ mitigation choices, which might even vary across their deployed
+ machines and also result in other changes of their overall setup.
+ There is no way for the kernel to provide a sensible default for this
+ kind of scenarios.
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index 07f147381f32..ea2dafe49ae8 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -85,3 +85,10 @@ shared_tags=[0/1]: Default: 0
0: Tag set is not shared.
1: Tag set shared between devices for blk-mq. Only makes sense with
nr_devices > 1, otherwise there's no tag set to share.
+
+zoned=[0/1]: Default: 0
+ 0: Block device is exposed as a random-access block device.
+ 1: Block device is exposed as a host-managed zoned block device.
+
+zone_size=[MB]: Default: 256
+ Per zone size when exposed as a zoned block device. Must be a power of two.
diff --git a/Documentation/block/stat.txt b/Documentation/block/stat.txt
index 0dbc946de2ea..0aace9cc536c 100644
--- a/Documentation/block/stat.txt
+++ b/Documentation/block/stat.txt
@@ -31,28 +31,32 @@ write ticks milliseconds total wait time for write requests
in_flight requests number of I/Os currently in flight
io_ticks milliseconds total time this block device has been active
time_in_queue milliseconds total wait time for all requests
+discard I/Os requests number of discard I/Os processed
+discard merges requests number of discard I/Os merged with in-queue I/O
+discard sectors sectors number of sectors discarded
+discard ticks milliseconds total wait time for discard requests
-read I/Os, write I/Os
-=====================
+read I/Os, write I/Os, discard I/0s
+===================================
These values increment when an I/O request completes.
-read merges, write merges
-=========================
+read merges, write merges, discard merges
+=========================================
These values increment when an I/O request is merged with an
already-queued I/O request.
-read sectors, write sectors
-===========================
+read sectors, write sectors, discard_sectors
+============================================
-These values count the number of sectors read from or written to this
-block device. The "sectors" in question are the standard UNIX 512-byte
-sectors, not any device- or filesystem-specific block size. The
-counters are incremented when the I/O completes.
+These values count the number of sectors read from, written to, or
+discarded from this block device. The "sectors" in question are the
+standard UNIX 512-byte sectors, not any device- or filesystem-specific
+block size. The counters are incremented when the I/O completes.
-read ticks, write ticks
-=======================
+read ticks, write ticks, discard ticks
+======================================
These values count the number of milliseconds that I/O requests have
waited on this block device. If there are multiple I/O requests waiting,
diff --git a/Documentation/bpf/bpf_devel_QA.rst b/Documentation/bpf/bpf_devel_QA.rst
index 0e7c1d946e83..c9856b927055 100644
--- a/Documentation/bpf/bpf_devel_QA.rst
+++ b/Documentation/bpf/bpf_devel_QA.rst
@@ -106,9 +106,9 @@ into the bpf-next tree will make their way into net-next tree. net and
net-next are both run by David S. Miller. From there, they will go
into the kernel mainline tree run by Linus Torvalds. To read up on the
process of net and net-next being merged into the mainline tree, see
-the `netdev FAQ`_ under:
+the :ref:`netdev-FAQ`
+
- `Documentation/networking/netdev-FAQ.txt`_
Occasionally, to prevent merge conflicts, we might send pull requests
to other trees (e.g. tracing) with a small subset of the patches, but
@@ -125,8 +125,8 @@ request)::
Q: How do I indicate which tree (bpf vs. bpf-next) my patch should be applied to?
---------------------------------------------------------------------------------
-A: The process is the very same as described in the `netdev FAQ`_, so
-please read up on it. The subject line must indicate whether the
+A: The process is the very same as described in the :ref:`netdev-FAQ`,
+so please read up on it. The subject line must indicate whether the
patch is a fix or rather "next-like" content in order to let the
maintainers know whether it is targeted at bpf or bpf-next.
@@ -184,7 +184,7 @@ ii) run extensive BPF test suite and
Once the BPF pull request was accepted by David S. Miller, then
the patches end up in net or net-next tree, respectively, and
make their way from there further into mainline. Again, see the
-`netdev FAQ`_ for additional information e.g. on how often they are
+:ref:`netdev-FAQ` for additional information e.g. on how often they are
merged to mainline.
Q: How long do I need to wait for feedback on my BPF patches?
@@ -208,7 +208,7 @@ Q: Are patches applied to bpf-next when the merge window is open?
-----------------------------------------------------------------
A: For the time when the merge window is open, bpf-next will not be
processed. This is roughly analogous to net-next patch processing,
-so feel free to read up on the `netdev FAQ`_ about further details.
+so feel free to read up on the :ref:`netdev-FAQ` about further details.
During those two weeks of merge window, we might ask you to resend
your patch series once bpf-next is open again. Once Linus released
@@ -372,7 +372,7 @@ netdev kernel mailing list in Cc and ask for the fix to be queued up:
netdev@vger.kernel.org
The process in general is the same as on netdev itself, see also the
-`netdev FAQ`_ document.
+:ref:`netdev-FAQ`.
Q: Do you also backport to kernels not currently maintained as stable?
----------------------------------------------------------------------
@@ -388,9 +388,7 @@ Q: The BPF patch I am about to submit needs to go to stable as well
What should I do?
A: The same rules apply as with netdev patch submissions in general, see
-`netdev FAQ`_ under:
-
- `Documentation/networking/netdev-FAQ.txt`_
+the :ref:`netdev-FAQ`.
Never add "``Cc: stable@vger.kernel.org``" to the patch description, but
ask the BPF maintainers to queue the patches instead. This can be done
@@ -630,8 +628,7 @@ when:
.. Links
.. _Documentation/process/: https://www.kernel.org/doc/html/latest/process/
.. _MAINTAINERS: ../../MAINTAINERS
-.. _Documentation/networking/netdev-FAQ.txt: ../networking/netdev-FAQ.txt
-.. _netdev FAQ: ../networking/netdev-FAQ.txt
+.. _netdev-FAQ: ../networking/netdev-FAQ.rst
.. _samples/bpf/: ../../samples/bpf/
.. _selftests: ../../tools/testing/selftests/bpf/
.. _Documentation/dev-tools/kselftest.rst:
diff --git a/Documentation/bpf/README.rst b/Documentation/bpf/index.rst
index b9a80c9e9392..00a8450a602f 100644
--- a/Documentation/bpf/README.rst
+++ b/Documentation/bpf/index.rst
@@ -1,5 +1,5 @@
=================
-BPF documentation
+BPF Documentation
=================
This directory contains documentation for the BPF (Berkeley Packet
@@ -22,14 +22,14 @@ Frequently asked questions (FAQ)
Two sets of Questions and Answers (Q&A) are maintained.
-* QA for common questions about BPF see: bpf_design_QA_
+.. toctree::
+ :maxdepth: 1
-* QA for developers interacting with BPF subsystem: bpf_devel_QA_
+ bpf_design_QA
+ bpf_devel_QA
.. Links:
-.. _bpf_design_QA: bpf_design_QA.rst
-.. _bpf_devel_QA: bpf_devel_QA.rst
.. _Documentation/networking/filter.txt: ../networking/filter.txt
.. _man-pages: https://www.kernel.org/doc/man-pages/
.. _bpf(2): http://man7.org/linux/man-pages/man2/bpf.2.html
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 62ac5a9f3a9f..b691af4831fa 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -34,7 +34,7 @@ needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain', 'kfigure']
+extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain', 'kfigure', 'sphinx.ext.ifconfig']
# The name of the math extension changed on Sphinx 1.4
if major == 1 and minor > 3:
diff --git a/Documentation/console/console.txt b/Documentation/console/console.txt
index f93810d599ad..d73c2ab4beda 100644
--- a/Documentation/console/console.txt
+++ b/Documentation/console/console.txt
@@ -1,7 +1,7 @@
Console Drivers
===============
-The linux kernel has 2 general types of console drivers. The first type is
+The Linux kernel has 2 general types of console drivers. The first type is
assigned by the kernel to all the virtual consoles during the boot process.
This type will be called 'system driver', and only one system driver is allowed
to exist. The system driver is persistent and it can never be unloaded, though
@@ -17,10 +17,11 @@ of driver occupying the consoles.) They can only take over the console that is
occupied by the system driver. In the same token, if the modular driver is
released by the console, the system driver will take over.
-Modular drivers, from the programmer's point of view, has to call:
+Modular drivers, from the programmer's point of view, have to call:
do_take_over_console() - load and bind driver to console layer
- give_up_console() - unload driver, it will only work if driver is fully unbond
+ give_up_console() - unload driver; it will only work if driver
+ is fully unbound
In newer kernels, the following are also available:
@@ -56,7 +57,7 @@ What do these files signify?
cat /sys/class/vtconsole/vtcon0/name
(S) VGA+
- '(S)' stands for a (S)ystem driver, ie, it cannot be directly
+ '(S)' stands for a (S)ystem driver, i.e., it cannot be directly
commanded to bind or unbind
'VGA+' is the name of the driver
@@ -89,7 +90,7 @@ driver, make changes, recompile, reload and rebind the driver without any need
for rebooting the kernel. For regular users who may want to switch from
framebuffer console to VGA console and vice versa, this feature also makes
this possible. (NOTE NOTE NOTE: Please read fbcon.txt under Documentation/fb
-for more details).
+for more details.)
Notes for developers:
=====================
@@ -110,8 +111,8 @@ In order for binding to and unbinding from the console to properly work,
console drivers must follow these guidelines:
1. All drivers, except system drivers, must call either do_register_con_driver()
- or do_take_over_console(). do_register_con_driver() will just add the driver to
- the console's internal list. It won't take over the
+ or do_take_over_console(). do_register_con_driver() will just add the driver
+ to the console's internal list. It won't take over the
console. do_take_over_console(), as it name implies, will also take over (or
bind to) the console.
diff --git a/Documentation/core-api/atomic_ops.rst b/Documentation/core-api/atomic_ops.rst
index 2e7165f86f55..724583453e1f 100644
--- a/Documentation/core-api/atomic_ops.rst
+++ b/Documentation/core-api/atomic_ops.rst
@@ -29,7 +29,7 @@ updated by one CPU, local_t is probably more appropriate. Please see
local_t.
The first operations to implement for atomic_t's are the initializers and
-plain reads. ::
+plain writes. ::
#define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) ((v)->counter = (i))
diff --git a/Documentation/core-api/boot-time-mm.rst b/Documentation/core-api/boot-time-mm.rst
new file mode 100644
index 000000000000..03cb1643f46f
--- /dev/null
+++ b/Documentation/core-api/boot-time-mm.rst
@@ -0,0 +1,92 @@
+===========================
+Boot time memory management
+===========================
+
+Early system initialization cannot use "normal" memory management
+simply because it is not set up yet. But there is still need to
+allocate memory for various data structures, for instance for the
+physical page allocator. To address this, a specialized allocator
+called the :ref:`Boot Memory Allocator <bootmem>`, or bootmem, was
+introduced. Several years later PowerPC developers added a "Logical
+Memory Blocks" allocator, which was later adopted by other
+architectures and renamed to :ref:`memblock <memblock>`. There is also
+a compatibility layer called `nobootmem` that translates bootmem
+allocation interfaces to memblock calls.
+
+The selection of the early allocator is done using
+``CONFIG_NO_BOOTMEM`` and ``CONFIG_HAVE_MEMBLOCK`` kernel
+configuration options. These options are enabled or disabled
+statically by the architectures' Kconfig files.
+
+* Architectures that rely only on bootmem select
+ ``CONFIG_NO_BOOTMEM=n && CONFIG_HAVE_MEMBLOCK=n``.
+* The users of memblock with the nobootmem compatibility layer set
+ ``CONFIG_NO_BOOTMEM=y && CONFIG_HAVE_MEMBLOCK=y``.
+* And for those that use both memblock and bootmem the configuration
+ includes ``CONFIG_NO_BOOTMEM=n && CONFIG_HAVE_MEMBLOCK=y``.
+
+Whichever allocator is used, it is the responsibility of the
+architecture specific initialization to set it up in
+:c:func:`setup_arch` and tear it down in :c:func:`mem_init` functions.
+
+Once the early memory management is available it offers a variety of
+functions and macros for memory allocations. The allocation request
+may be directed to the first (and probably the only) node or to a
+particular node in a NUMA system. There are API variants that panic
+when an allocation fails and those that don't. And more recent and
+advanced memblock even allows controlling its own behaviour.
+
+.. _bootmem:
+
+Bootmem
+=======
+
+(mostly stolen from Mel Gorman's "Understanding the Linux Virtual
+Memory Manager" `book`_)
+
+.. _book: https://www.kernel.org/doc/gorman/
+
+.. kernel-doc:: mm/bootmem.c
+ :doc: bootmem overview
+
+.. _memblock:
+
+Memblock
+========
+
+.. kernel-doc:: mm/memblock.c
+ :doc: memblock overview
+
+
+Functions and structures
+========================
+
+Common API
+----------
+
+The functions that are described in this section are available
+regardless of what early memory manager is enabled.
+
+.. kernel-doc:: mm/nobootmem.c
+
+Bootmem specific API
+--------------------
+
+These interfaces available only with bootmem, i.e when ``CONFIG_NO_BOOTMEM=n``
+
+.. kernel-doc:: include/linux/bootmem.h
+.. kernel-doc:: mm/bootmem.c
+ :nodocs:
+
+Memblock specific API
+---------------------
+
+Here is the description of memblock data structures, functions and
+macros. Some of them are actually internal, but since they are
+documented it would be silly to omit them. Besides, reading the
+descriptions for the internal functions can help to understand what
+really happens under the hood.
+
+.. kernel-doc:: include/linux/memblock.h
+.. kernel-doc:: mm/memblock.c
+ :nodocs:
diff --git a/Documentation/core-api/idr.rst b/Documentation/core-api/idr.rst
index 9078a5c3ac95..d351e880a2f6 100644
--- a/Documentation/core-api/idr.rst
+++ b/Documentation/core-api/idr.rst
@@ -76,4 +76,6 @@ Functions and structures
========================
.. kernel-doc:: include/linux/idr.h
+ :functions:
.. kernel-doc:: lib/idr.c
+ :functions:
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index f5a66b72f984..b5379fb740a5 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -28,6 +28,8 @@ Core utilities
printk-formats
circular-buffers
gfp_mask-from-fs-io
+ timekeeping
+ boot-time-mm
Interfaces for kernel debugging
===============================
diff --git a/Documentation/core-api/timekeeping.rst b/Documentation/core-api/timekeeping.rst
new file mode 100644
index 000000000000..93cbeb9daec0
--- /dev/null
+++ b/Documentation/core-api/timekeeping.rst
@@ -0,0 +1,185 @@
+ktime accessors
+===============
+
+Device drivers can read the current time using ktime_get() and the many
+related functions declared in linux/timekeeping.h. As a rule of thumb,
+using an accessor with a shorter name is preferred over one with a longer
+name if both are equally fit for a particular use case.
+
+Basic ktime_t based interfaces
+------------------------------
+
+The recommended simplest form returns an opaque ktime_t, with variants
+that return time for different clock references:
+
+
+.. c:function:: ktime_t ktime_get( void )
+
+ CLOCK_MONOTONIC
+
+ Useful for reliable timestamps and measuring short time intervals
+ accurately. Starts at system boot time but stops during suspend.
+
+.. c:function:: ktime_t ktime_get_boottime( void )
+
+ CLOCK_BOOTTIME
+
+ Like ktime_get(), but does not stop when suspended. This can be
+ used e.g. for key expiration times that need to be synchronized
+ with other machines across a suspend operation.
+
+.. c:function:: ktime_t ktime_get_real( void )
+
+ CLOCK_REALTIME
+
+ Returns the time in relative to the UNIX epoch starting in 1970
+ using the Coordinated Universal Time (UTC), same as gettimeofday()
+ user space. This is used for all timestamps that need to
+ persist across a reboot, like inode times, but should be avoided
+ for internal uses, since it can jump backwards due to a leap
+ second update, NTP adjustment settimeofday() operation from user
+ space.
+
+.. c:function:: ktime_t ktime_get_clocktai( void )
+
+ CLOCK_TAI
+
+ Like ktime_get_real(), but uses the International Atomic Time (TAI)
+ reference instead of UTC to avoid jumping on leap second updates.
+ This is rarely useful in the kernel.
+
+.. c:function:: ktime_t ktime_get_raw( void )
+
+ CLOCK_MONOTONIC_RAW
+
+ Like ktime_get(), but runs at the same rate as the hardware
+ clocksource without (NTP) adjustments for clock drift. This is
+ also rarely needed in the kernel.
+
+nanosecond, timespec64, and second output
+-----------------------------------------
+
+For all of the above, there are variants that return the time in a
+different format depending on what is required by the user:
+
+.. c:function:: u64 ktime_get_ns( void )
+ u64 ktime_get_boottime_ns( void )
+ u64 ktime_get_real_ns( void )
+ u64 ktime_get_tai_ns( void )
+ u64 ktime_get_raw_ns( void )
+
+ Same as the plain ktime_get functions, but returning a u64 number
+ of nanoseconds in the respective time reference, which may be
+ more convenient for some callers.
+
+.. c:function:: void ktime_get_ts64( struct timespec64 * )
+ void ktime_get_boottime_ts64( struct timespec64 * )
+ void ktime_get_real_ts64( struct timespec64 * )
+ void ktime_get_clocktai_ts64( struct timespec64 * )
+ void ktime_get_raw_ts64( struct timespec64 * )
+
+ Same above, but returns the time in a 'struct timespec64', split
+ into seconds and nanoseconds. This can avoid an extra division
+ when printing the time, or when passing it into an external
+ interface that expects a 'timespec' or 'timeval' structure.
+
+.. c:function:: time64_t ktime_get_seconds( void )
+ time64_t ktime_get_boottime_seconds( void )
+ time64_t ktime_get_real_seconds( void )
+ time64_t ktime_get_clocktai_seconds( void )
+ time64_t ktime_get_raw_seconds( void )
+
+ Return a coarse-grained version of the time as a scalar
+ time64_t. This avoids accessing the clock hardware and rounds
+ down the seconds to the full seconds of the last timer tick
+ using the respective reference.
+
+Coarse and fast_ns access
+-------------------------
+
+Some additional variants exist for more specialized cases:
+
+.. c:function:: ktime_t ktime_get_coarse_boottime( void )
+ ktime_t ktime_get_coarse_real( void )
+ ktime_t ktime_get_coarse_clocktai( void )
+ ktime_t ktime_get_coarse_raw( void )
+
+.. c:function:: void ktime_get_coarse_ts64( struct timespec64 * )
+ void ktime_get_coarse_boottime_ts64( struct timespec64 * )
+ void ktime_get_coarse_real_ts64( struct timespec64 * )
+ void ktime_get_coarse_clocktai_ts64( struct timespec64 * )
+ void ktime_get_coarse_raw_ts64( struct timespec64 * )
+
+ These are quicker than the non-coarse versions, but less accurate,
+ corresponding to CLOCK_MONONOTNIC_COARSE and CLOCK_REALTIME_COARSE
+ in user space, along with the equivalent boottime/tai/raw
+ timebase not available in user space.
+
+ The time returned here corresponds to the last timer tick, which
+ may be as much as 10ms in the past (for CONFIG_HZ=100), same as
+ reading the 'jiffies' variable. These are only useful when called
+ in a fast path and one still expects better than second accuracy,
+ but can't easily use 'jiffies', e.g. for inode timestamps.
+ Skipping the hardware clock access saves around 100 CPU cycles
+ on most modern machines with a reliable cycle counter, but
+ up to several microseconds on older hardware with an external
+ clocksource.
+
+.. c:function:: u64 ktime_get_mono_fast_ns( void )
+ u64 ktime_get_raw_fast_ns( void )
+ u64 ktime_get_boot_fast_ns( void )
+ u64 ktime_get_real_fast_ns( void )
+
+ These variants are safe to call from any context, including from
+ a non-maskable interrupt (NMI) during a timekeeper update, and
+ while we are entering suspend with the clocksource powered down.
+ This is useful in some tracing or debugging code as well as
+ machine check reporting, but most drivers should never call them,
+ since the time is allowed to jump under certain conditions.
+
+Deprecated time interfaces
+--------------------------
+
+Older kernels used some other interfaces that are now being phased out
+but may appear in third-party drivers being ported here. In particular,
+all interfaces returning a 'struct timeval' or 'struct timespec' have
+been replaced because the tv_sec member overflows in year 2038 on 32-bit
+architectures. These are the recommended replacements:
+
+.. c:function:: void ktime_get_ts( struct timespec * )
+
+ Use ktime_get() or ktime_get_ts64() instead.
+
+.. c:function:: struct timeval do_gettimeofday( void )
+ struct timespec getnstimeofday( void )
+ struct timespec64 getnstimeofday64( void )
+ void ktime_get_real_ts( struct timespec * )
+
+ ktime_get_real_ts64() is a direct replacement, but consider using
+ monotonic time (ktime_get_ts64()) and/or a ktime_t based interface
+ (ktime_get()/ktime_get_real()).
+
+.. c:function:: struct timespec current_kernel_time( void )
+ struct timespec64 current_kernel_time64( void )
+ struct timespec get_monotonic_coarse( void )
+ struct timespec64 get_monotonic_coarse64( void )
+
+ These are replaced by ktime_get_coarse_real_ts64() and
+ ktime_get_coarse_ts64(). However, A lot of code that wants
+ coarse-grained times can use the simple 'jiffies' instead, while
+ some drivers may actually want the higher resolution accessors
+ these days.
+
+.. c:function:: struct timespec getrawmonotonic( void )
+ struct timespec64 getrawmonotonic64( void )
+ struct timespec timekeeping_clocktai( void )
+ struct timespec64 timekeeping_clocktai64( void )
+ struct timespec get_monotonic_boottime( void )
+ struct timespec64 get_monotonic_boottime64( void )
+
+ These are replaced by ktime_get_raw()/ktime_get_raw_ts64(),
+ ktime_get_clocktai()/ktime_get_clocktai_ts64() as well
+ as ktime_get_boottime()/ktime_get_boottime_ts64().
+ However, if the particular choice of clock source is not
+ important for the user, consider converting to
+ ktime_get()/ktime_get_ts64() instead for consistency.
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 006827e30d06..0f6ca8b7261e 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -162,7 +162,7 @@ Code Example For Use of Operational State Memory With SHASH
char *hash_alg_name = "sha1-padlock-nano";
int ret;
- alg = crypto_alloc_shash(hash_alg_name, CRYPTO_ALG_TYPE_SHASH, 0);
+ alg = crypto_alloc_shash(hash_alg_name, 0, 0);
if (IS_ERR(alg)) {
pr_info("can't alloc alg %s\n", hash_alg_name);
return PTR_ERR(alg);
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index 3bf371a938d0..6f653acea248 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -156,6 +156,11 @@ Contributing new tests (details)
installed by the distro on the system should be the primary focus to be able
to find regressions.
+ * If a test needs specific kernel config options enabled, add a config file in
+ the test directory to enable them.
+
+ e.g: tools/testing/selftests/android/ion/config
+
Test Harness
============
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-ir.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-ir.txt
index 669808b2af49..6dd6f399236d 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-ir.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,vf610-mscm-ir.txt
@@ -18,9 +18,6 @@ Required properties:
assignment of the interrupt router is required.
Flags get passed only when using GIC as parent. Flags
encoding as documented by the GIC bindings.
-- interrupt-parent: Should be the phandle for the interrupt controller of
- the CPU the device tree is intended to be used on. This
- is either the node of the GIC or NVIC controller.
Example:
mscm_ir: interrupt-controller@40001800 {
diff --git a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
index 0b887440e08a..3fd21bb7cb37 100644
--- a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
@@ -2,14 +2,17 @@ Marvell Armada AP806 System Controller
======================================
The AP806 is one of the two core HW blocks of the Marvell Armada 7K/8K
-SoCs. It contains a system controller, which provides a number
-registers giving access to numerous features: clocks, pin-muxing and
-many other SoC configuration items. This DT binding allows to describe
-this system controller.
+SoCs. It contains system controllers, which provide several registers
+giving access to numerous features: clocks, pin-muxing and many other
+SoC configuration items. This DT binding allows to describe these
+system controllers.
For the top level node:
- compatible: must be: "syscon", "simple-mfd";
- - reg: register area of the AP806 system controller
+ - reg: register area of the AP806 system controller
+
+SYSTEM CONTROLLER 0
+===================
Clocks:
-------
@@ -98,3 +101,38 @@ ap_syscon: system-controller@6f4000 {
gpio-ranges = <&ap_pinctrl 0 0 19>;
};
};
+
+SYSTEM CONTROLLER 1
+===================
+
+Thermal:
+--------
+
+For common binding part and usage, refer to
+Documentation/devicetree/bindings/thermal/thermal.txt
+
+The thermal IP can probe the temperature all around the processor. It
+may feature several channels, each of them wired to one sensor.
+
+Required properties:
+- compatible: must be one of:
+ * marvell,armada-ap806-thermal
+- reg: register range associated with the thermal functions.
+
+Optional properties:
+- #thermal-sensor-cells: shall be <1> when thermal-zones subnodes refer
+ to this IP and represents the channel ID. There is one sensor per
+ channel. O refers to the thermal IP internal channel, while positive
+ IDs refer to each CPU.
+
+Example:
+ap_syscon1: system-controller@6f8000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x6f8000 0x1000>;
+
+ ap_thermal: thermal-sensor@80 {
+ compatible = "marvell,armada-ap806-thermal";
+ reg = <0x80 0x10>;
+ #thermal-sensor-cells = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
index 35c3c3460d17..eddde4faef01 100644
--- a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
@@ -33,3 +33,18 @@ nb_pm: syscon@14000 {
compatible = "marvell,armada-3700-nb-pm", "syscon";
reg = <0x14000 0x60>;
}
+
+AVS
+---
+
+For AVS an other component is needed:
+
+Required properties:
+- compatible : should contain "marvell,armada-3700-avs", "syscon";
+- reg : the register start and length for the AVS
+
+Example:
+avs: avs@11500 {
+ compatible = "marvell,armada-3700-avs", "syscon";
+ reg = <0x11500 0x40>;
+}
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
index 29cdbae6c5ac..81ce742d2760 100644
--- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
@@ -1,15 +1,18 @@
-Marvell Armada CP110 System Controller 0
-========================================
+Marvell Armada CP110 System Controller
+======================================
The CP110 is one of the two core HW blocks of the Marvell Armada 7K/8K
-SoCs. It contains two sets of system control registers, System
-Controller 0 and System Controller 1. This Device Tree binding allows
-to describe the first system controller, which provides registers to
-configure various aspects of the SoC.
+SoCs. It contains system controllers, which provide several registers
+giving access to numerous features: clocks, pin-muxing and many other
+SoC configuration items. This DT binding allows to describe these
+system controllers.
For the top level node:
- compatible: must be: "syscon", "simple-mfd";
- - reg: register area of the CP110 system controller 0
+ - reg: register area of the CP110 system controller
+
+SYSTEM CONTROLLER 0
+===================
Clocks:
-------
@@ -163,26 +166,60 @@ Required properties:
Example:
-cpm_syscon0: system-controller@440000 {
+CP110_LABEL(syscon0): system-controller@440000 {
compatible = "syscon", "simple-mfd";
reg = <0x440000 0x1000>;
- cpm_clk: clock {
+ CP110_LABEL(clk): clock {
compatible = "marvell,cp110-clock";
#clock-cells = <2>;
};
- cpm_pinctrl: pinctrl {
+ CP110_LABEL(pinctrl): pinctrl {
compatible = "marvell,armada-8k-cpm-pinctrl";
};
- cpm_gpio1: gpio@100 {
+ CP110_LABEL(gpio1): gpio@100 {
compatible = "marvell,armada-8k-gpio";
offset = <0x100>;
ngpios = <32>;
gpio-controller;
#gpio-cells = <2>;
- gpio-ranges = <&cpm_pinctrl 0 0 32>;
+ gpio-ranges = <&CP110_LABEL(pinctrl) 0 0 32>;
};
};
+
+SYSTEM CONTROLLER 1
+===================
+
+Thermal:
+--------
+
+The thermal IP can probe the temperature all around the processor. It
+may feature several channels, each of them wired to one sensor.
+
+For common binding part and usage, refer to
+Documentation/devicetree/bindings/thermal/thermal.txt
+
+Required properties:
+- compatible: must be one of:
+ * marvell,armada-cp110-thermal
+- reg: register range associated with the thermal functions.
+
+Optional properties:
+- #thermal-sensor-cells: shall be <1> when thermal-zones subnodes refer
+ to this IP and represents the channel ID. There is one sensor per
+ channel. O refers to the thermal IP internal channel.
+
+Example:
+CP110_LABEL(syscon1): system-controller@6f8000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x6f8000 0x1000>;
+
+ CP110_LABEL(thermal): thermal-sensor@70 {
+ compatible = "marvell,armada-cp110-thermal";
+ reg = <0x70 0x10>;
+ #thermal-sensor-cells = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
new file mode 100644
index 000000000000..5e85749262ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -0,0 +1,26 @@
+== Introduction==
+
+LLCC (Last Level Cache Controller) provides last level of cache memory in SOC,
+that can be shared by multiple clients. Clients here are different cores in the
+SOC, the idea is to minimize the local caches at the clients and migrate to
+common pool of memory. Cache memory is divided into partitions called slices
+which are assigned to clients. Clients can query the slice details, activate
+and deactivate them.
+
+Properties:
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,sdm845-llcc"
+
+- reg:
+ Usage: required
+ Value Type: <prop-encoded-array>
+ Definition: Start address and the the size of the register region.
+
+Example:
+
+ cache-controller@1100000 {
+ compatible = "qcom,sdm845-llcc";
+ reg = <0x1100000 0x250000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/omap/crossbar.txt b/Documentation/devicetree/bindings/arm/omap/crossbar.txt
index ecb360ed0e33..4cd5d873fc3a 100644
--- a/Documentation/devicetree/bindings/arm/omap/crossbar.txt
+++ b/Documentation/devicetree/bindings/arm/omap/crossbar.txt
@@ -10,7 +10,6 @@ Required properties:
- compatible : Should be "ti,irq-crossbar"
- reg: Base address and the size of the crossbar registers.
- interrupt-controller: indicates that this block is an interrupt controller.
-- interrupt-parent: the interrupt controller this block is connected to.
- ti,max-irqs: Total number of irqs available at the parent interrupt controller.
- ti,max-crossbar-sources: Maximum number of crossbar sources that can be routed.
- ti,reg-size: Size of a individual register in bytes. Every individual
diff --git a/Documentation/devicetree/bindings/arm/samsung/pmu.txt b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
index 16685787d2bd..433bfd7593ac 100644
--- a/Documentation/devicetree/bindings/arm/samsung/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
@@ -40,9 +40,6 @@ following properties:
- #interrupt-cells: must be identical to the that of the parent interrupt
controller.
-- interrupt-parent: a phandle indicating which interrupt controller
- this PMU signals interrupts to.
-
Optional nodes:
diff --git a/Documentation/devicetree/bindings/ata/fsl-sata.txt b/Documentation/devicetree/bindings/ata/fsl-sata.txt
index b46bcf46c3d8..fd63bb3becc9 100644
--- a/Documentation/devicetree/bindings/ata/fsl-sata.txt
+++ b/Documentation/devicetree/bindings/ata/fsl-sata.txt
@@ -16,7 +16,6 @@ Required properties:
4 for controller @ 0x1b000
Optional properties:
-- interrupt-parent : optional, if needed for interrupt mapping
- reg : <registers mapping>
Example:
diff --git a/Documentation/devicetree/bindings/ata/pata-arasan.txt b/Documentation/devicetree/bindings/ata/pata-arasan.txt
index 2aff154be84e..872edc105680 100644
--- a/Documentation/devicetree/bindings/ata/pata-arasan.txt
+++ b/Documentation/devicetree/bindings/ata/pata-arasan.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: "arasan,cf-spear1340"
- reg: Address range of the CF registers
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupt: Should contain the CF interrupt number
- clock-frequency: Interface clock rate, in Hz, one of
25000000
diff --git a/Documentation/devicetree/bindings/board/fsl-board.txt b/Documentation/devicetree/bindings/board/fsl-board.txt
index fb7b03ec2071..eb52f6b35159 100644
--- a/Documentation/devicetree/bindings/board/fsl-board.txt
+++ b/Documentation/devicetree/bindings/board/fsl-board.txt
@@ -29,7 +29,6 @@ Required properties:
- reg: should contain the address and the length of the FPGA register set.
Optional properties:
-- interrupt-parent: should specify phandle for the interrupt controller.
- interrupts: should specify event (wakeup) IRQ.
Example (P1022DS):
diff --git a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
index 8a6c3c2e58fe..729def62f0c5 100644
--- a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
+++ b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
@@ -9,8 +9,6 @@ Required properties:
"brcm,bcm7400-gisb-arb" for older 40nm chips and all 65nm chips
"brcm,bcm7038-gisb-arb" for 130nm chips
- reg: specifies the base physical address and size of the registers
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this arbiter gets interrupt line from
- interrupts: specifies the two interrupts (timeout and TEA) to be used from
the parent interrupt controller
diff --git a/Documentation/devicetree/bindings/clock/actions,s900-cmu.txt b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
index 93e4fb827cd6..d1e60d297387 100644
--- a/Documentation/devicetree/bindings/clock/actions,s900-cmu.txt
+++ b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
@@ -1,12 +1,14 @@
-* Actions S900 Clock Management Unit (CMU)
+* Actions Semi Owl Clock Management Unit (CMU)
-The Actions S900 clock management unit generates and supplies clock to various
-controllers within the SoC. The clock binding described here is applicable to
-S900 SoC.
+The Actions Semi Owl Clock Management Unit generates and supplies clock
+to various controllers within the SoC. The clock binding described here is
+applicable to S900 and S700 SoC's.
Required Properties:
-- compatible: should be "actions,s900-cmu"
+- compatible: should be one of the following,
+ "actions,s900-cmu"
+ "actions,s700-cmu"
- reg: physical base address of the controller and length of memory mapped
region.
- clocks: Reference to the parent clocks ("hosc", "losc")
@@ -15,16 +17,16 @@ Required Properties:
Each clock is assigned an identifier, and client nodes can use this identifier
to specify the clock which they consume.
-All available clocks are defined as preprocessor macros in
-dt-bindings/clock/actions,s900-cmu.h header and can be used in device
-tree sources.
+All available clocks are defined as preprocessor macros in corresponding
+dt-bindings/clock/actions,s900-cmu.h or actions,s700-cmu.h header and can be
+used in device tree sources.
External clocks:
The hosc clock used as input for the plls is generated outside the SoC. It is
expected that it is defined using standard clock bindings as "hosc".
-Actions S900 CMU also requires one more clock:
+Actions Semi S900 CMU also requires one more clock:
- "losc" - internal low frequency oscillator
Example: Clock Management Unit node:
diff --git a/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
new file mode 100644
index 000000000000..61777ad24f61
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
@@ -0,0 +1,56 @@
+* Amlogic AXG Audio Clock Controllers
+
+The Amlogic AXG audio clock controller generates and supplies clock to the
+other elements of the audio subsystem, such as fifos, i2s, spdif and pdm
+devices.
+
+Required Properties:
+
+- compatible : should be "amlogic,axg-audio-clkc" for the A113X and A113D
+- reg : physical base address of the clock controller and length of
+ memory mapped region.
+- clocks : a list of phandle + clock-specifier pairs for the clocks listed
+ in clock-names.
+- clock-names : must contain the following:
+ * "pclk" - Main peripheral bus clock
+ may contain the following:
+ * "mst_in[0-7]" - 8 input plls to generate clock signals
+ * "slv_sclk[0-9]" - 10 slave bit clocks provided by external
+ components.
+ * "slv_lrclk[0-9]" - 10 slave sample clocks provided by external
+ components.
+- resets : phandle of the internal reset line
+- #clock-cells : should be 1.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/axg-audio-clkc.h header and can be
+used in device tree sources.
+
+Example:
+
+clkc_audio: clock-controller@0 {
+ compatible = "amlogic,axg-audio-clkc";
+ reg = <0x0 0x0 0x0 0xb4>;
+ #clock-cells = <1>;
+
+ clocks = <&clkc CLKID_AUDIO>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL3>,
+ <&clkc CLKID_HIFI_PLL>,
+ <&clkc CLKID_FCLK_DIV3>,
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_GP0_PLL>;
+ clock-names = "pclk",
+ "mst_in0",
+ "mst_in1",
+ "mst_in2",
+ "mst_in3",
+ "mst_in4",
+ "mst_in5",
+ "mst_in6",
+ "mst_in7";
+ resets = <&reset RESET_AUDIO>;
+};
diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt
index 51c259a92d02..09968ee224d8 100644
--- a/Documentation/devicetree/bindings/clock/at91-clock.txt
+++ b/Documentation/devicetree/bindings/clock/at91-clock.txt
@@ -91,6 +91,9 @@ Required properties:
at91 audio pll output on AUDIOPLLCLK that feeds the PMC
and can be used by peripheral clock or generic clock
+ "atmel,sama5d2-clk-i2s-mux" (under pmc node):
+ at91 I2S clock source selection
+
Required properties for SCKC node:
- reg : defines the IO memory reserved for the SCKC.
- #size-cells : shall be 0 (reg is used to encode clk id).
@@ -180,7 +183,6 @@ For example:
};
Required properties for main clock internal RC oscillator:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<0>".
- clock-frequency : define the internal RC oscillator frequency.
@@ -197,7 +199,6 @@ For example:
};
Required properties for main clock oscillator:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<0>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall encode the main osc source clk sources (see atmel datasheet).
@@ -218,7 +219,6 @@ For example:
};
Required properties for main clock:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<0>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall encode the main clk sources (see atmel datasheet).
@@ -233,7 +233,6 @@ For example:
};
Required properties for master clock:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<3>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall be the master clock sources (see atmel datasheet) phandles.
@@ -292,7 +291,6 @@ For example:
Required properties for pll clocks:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<1>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall be the main clock phandle.
@@ -348,7 +346,6 @@ For example:
};
Required properties for programmable clocks:
-- interrupt-parent : must reference the PMC node.
- #size-cells : shall be 0 (reg is used to encode clk id).
- #address-cells : shall be 1 (reg is used to encode clk id).
- clocks : shall be the programmable clock source phandles.
@@ -451,7 +448,6 @@ For example:
Required properties for utmi clock:
-- interrupt-parent : must reference the PMC node.
- interrupts : shall be set to "<AT91_PMC_LOCKU IRQ_TYPE_LEVEL_HIGH>".
- #clock-cells : from common clock binding; shall be set to 0.
- clocks : shall be the main clock source phandle.
@@ -507,3 +503,35 @@ For example:
atmel,clk-output-range = <0 83000000>;
};
};
+
+Required properties for I2S mux clocks:
+- #size-cells : shall be 0 (reg is used to encode I2S bus id).
+- #address-cells : shall be 1 (reg is used to encode I2S bus id).
+- name: device tree node describing a specific mux clock.
+ * #clock-cells : from common clock binding; shall be set to 0.
+ * clocks : shall be the mux clock parent phandles; shall be 2 phandles:
+ peripheral and generated clock; the first phandle shall belong to the
+ peripheral clock and the second one shall belong to the generated
+ clock; "clock-indices" property can be user to specify
+ the correct order.
+ * reg: I2S bus id of the corresponding mux clock.
+ e.g. reg = <0>; for i2s0, reg = <1>; for i2s1
+
+For example:
+ i2s_clkmux {
+ compatible = "atmel,sama5d2-clk-i2s-mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2s0muxck: i2s0_muxclk {
+ clocks = <&i2s0_clk>, <&i2s0_gclk>;
+ #clock-cells = <0>;
+ reg = <0>;
+ };
+
+ i2s1muxck: i2s1_muxclk {
+ clocks = <&i2s1_clk>, <&i2s1_gclk>;
+ #clock-cells = <0>;
+ reg = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/maxim,max9485.txt b/Documentation/devicetree/bindings/clock/maxim,max9485.txt
new file mode 100644
index 000000000000..61bec1100a94
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/maxim,max9485.txt
@@ -0,0 +1,59 @@
+Devicetree bindings for Maxim MAX9485 Programmable Audio Clock Generator
+
+This device exposes 4 clocks in total:
+
+- MAX9485_MCLKOUT: A gated, buffered output of the input clock of 27 MHz
+- MAX9485_CLKOUT: A PLL that can be configured to 16 different discrete
+ frequencies
+- MAX9485_CLKOUT[1,2]: Two gated outputs for MAX9485_CLKOUT
+
+MAX9485_CLKOUT[1,2] are children of MAX9485_CLKOUT which upchain all rate set
+requests.
+
+Required properties:
+- compatible: "maxim,max9485"
+- clocks: Input clock, must provice 27.000 MHz
+- clock-names: Must be set to "xclk"
+- #clock-cells: From common clock binding; shall be set to 1
+
+Optional properties:
+- reset-gpios: GPIO descriptor connected to the #RESET input pin
+- vdd-supply: A regulator node for Vdd
+- clock-output-names: Name of output clocks, as defined in common clock
+ bindings
+
+If not explicitly set, the output names are "mclkout", "clkout", "clkout1"
+and "clkout2".
+
+Clocks are defined as preprocessor macros in the dt-binding header.
+
+Example:
+
+ #include <dt-bindings/clock/maxim,max9485.h>
+
+ xo-27mhz: xo-27mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+
+ &i2c0 {
+ max9485: audio-clock@63 {
+ reg = <0x63>;
+ compatible = "maxim,max9485";
+ clock-names = "xclk";
+ clocks = <&xo-27mhz>;
+ reset-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
+ vdd-supply = <&3v3-reg>;
+ #clock-cells = <1>;
+ };
+ };
+
+ // Clock consumer node
+
+ foo@0 {
+ compatible = "bar,foo";
+ /* ... */
+ clock-names = "foo-input-clk";
+ clocks = <&max9485 MAX9485_CLKOUT1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
new file mode 100644
index 000000000000..d639e18d0b85
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
@@ -0,0 +1,19 @@
+Qualcomm Technologies, Inc. Display Clock Controller Binding
+------------------------------------------------------------
+
+Required properties :
+
+- compatible : shall contain "qcom,sdm845-dispcc"
+- reg : shall contain base register location and length.
+- #clock-cells : from common clock binding, shall contain 1.
+- #reset-cells : from common reset binding, shall contain 1.
+- #power-domain-cells : from generic power domain binding, shall contain 1.
+
+Example:
+ dispcc: clock-controller@af00000 {
+ compatible = "qcom,sdm845-dispcc";
+ reg = <0xaf00000 0x100000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt
new file mode 100644
index 000000000000..d60b99756bb9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.txt
@@ -0,0 +1,43 @@
+* Renesas R9A06G032 SYSCTRL
+
+Required Properties:
+
+ - compatible: Must be:
+ - "renesas,r9a06g032-sysctrl"
+ - reg: Base address and length of the SYSCTRL IO block.
+ - #clock-cells: Must be 1
+ - clocks: References to the parent clocks:
+ - external 40mhz crystal.
+ - external (optional) 32.768khz
+ - external (optional) jtag input
+ - external (optional) RGMII_REFCLK
+ - clock-names: Must be:
+ clock-names = "mclk", "rtc", "jtag", "rgmii_ref_ext";
+
+Examples
+--------
+
+ - SYSCTRL node:
+
+ sysctrl: system-controller@4000c000 {
+ compatible = "renesas,r9a06g032-sysctrl";
+ reg = <0x4000c000 0x1000>;
+ #clock-cells = <1>;
+
+ clocks = <&ext_mclk>, <&ext_rtc_clk>,
+ <&ext_jtag_clk>, <&ext_rgmii_ref>;
+ clock-names = "mclk", "rtc", "jtag", "rgmii_ref_ext";
+ };
+
+ - Other nodes can use the clocks provided by SYSCTRL as in:
+
+ #include <dt-bindings/clock/r9a06g032-sysctrl.h>
+ uart0: serial@40060000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x40060000 0x400>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&sysctrl R9A06G032_CLK_UART0>;
+ clock-names = "baudclk";
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt
new file mode 100644
index 000000000000..39f0c1ac84ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,px30-cru.txt
@@ -0,0 +1,65 @@
+* Rockchip PX30 Clock and Reset Unit
+
+The PX30 clock controller generates and supplies clock to various
+controllers within the SoC and also implements a reset controller for SoC
+peripherals.
+
+Required Properties:
+
+- compatible: PMU for CRU should be "rockchip,px30-pmu-cru"
+- compatible: CRU should be "rockchip,px30-cru"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Optional Properties:
+
+- rockchip,grf: phandle to the syscon managing the "general register files"
+ If missing, pll rates are not changeable, due to the missing pll lock status.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/px30-cru.h headers and can be
+used in device tree sources. Similar macros exist for the reset sources in
+these files.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "xin32k" - rtc clock - optional,
+ - "i2sx_clkin" - external I2S clock - optional,
+ - "gmac_clkin" - external GMAC clock - optional
+
+Example: Clock controller node:
+
+ pmucru: clock-controller@ff2bc000 {
+ compatible = "rockchip,px30-pmucru";
+ reg = <0x0 0xff2bc000 0x0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ cru: clock-controller@ff2b0000 {
+ compatible = "rockchip,px30-cru";
+ reg = <0x0 0xff2b0000 0x0 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+Example: UART controller node that consumes the clock generated by the clock
+ controller:
+
+ uart0: serial@ff030000 {
+ compatible = "rockchip,px30-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff030000 0x0 0x100>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmucru SCLK_UART0_PMU>, <&pmucru PCLK_UART0_PMU>;
+ clock-names = "baudclk", "apb_pclk";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/sun8i-de2.txt b/Documentation/devicetree/bindings/clock/sun8i-de2.txt
index f2fa87c4765c..e94582e8b8a9 100644
--- a/Documentation/devicetree/bindings/clock/sun8i-de2.txt
+++ b/Documentation/devicetree/bindings/clock/sun8i-de2.txt
@@ -6,6 +6,7 @@ Required properties :
- "allwinner,sun8i-a83t-de2-clk"
- "allwinner,sun8i-h3-de2-clk"
- "allwinner,sun8i-v3s-de2-clk"
+ - "allwinner,sun50i-a64-de2-clk"
- "allwinner,sun50i-h5-de2-clk"
- reg: Must contain the registers base address and length
diff --git a/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
index af2385795d78..73470ecd1f12 100644
--- a/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
+++ b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
@@ -29,8 +29,6 @@ Required properties:
- reg: Specifies base physical address and size of the registers.
- interrupts: The interrupt that the AVS CPU will use to interrupt the host
when a command completed.
-- interrupt-parent: The interrupt controller the above interrupt is routed
- through.
- interrupt-names: The name of the interrupt used to interrupt the host.
Optional properties:
diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
index 8c61183b41e0..d87579d63da6 100644
--- a/Documentation/devicetree/bindings/crypto/amd-ccp.txt
+++ b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: Should be "amd,ccp-seattle-v1a"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the CCP interrupt
Optional properties:
diff --git a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
index c2598ab27f2e..999fb2a810f6 100644
--- a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
+++ b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
@@ -7,8 +7,6 @@ Required properties:
- interrupts: Interrupt number for the device.
Optional properties:
-- interrupt-parent: The phandle for the interrupt controller that services
- interrupts for this device.
- clocks: Reference to the crypto engine clock.
- dma-coherent: Present if dma operations are coherent.
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
index f0d926bf9f36..125f155d00d0 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
@@ -50,11 +50,6 @@ remaining bits are reserved for future SEC EUs.
..and so on and so forth.
-Optional properties:
-
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
-
Example:
/* MPC8548E */
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index 3c1f3a229eab..2fe245ca816a 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -99,13 +99,6 @@ PROPERTIES
of the specifier is defined by the binding document
describing the node's interrupt parent.
- - interrupt-parent
- Usage: (required if interrupt property is defined)
- Value type: <phandle>
- Definition: A single <phandle> value that points
- to the interrupt parent to which the child domain
- is being mapped.
-
- clocks
Usage: required if SEC 4.0 requires explicit enablement of clocks
Value type: <prop_encoded-array>
@@ -199,13 +192,6 @@ Job Ring (JR) Node
of the specifier is defined by the binding document
describing the node's interrupt parent.
- - interrupt-parent
- Usage: (required if interrupt property is defined)
- Value type: <phandle>
- Definition: A single <phandle> value that points
- to the interrupt parent to which the child domain
- is being mapped.
-
EXAMPLE
jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
@@ -370,13 +356,6 @@ Secure Non-Volatile Storage (SNVS) Node
of the specifier is defined by the binding document
describing the node's interrupt parent.
- - interrupt-parent
- Usage: (required if interrupt property is defined)
- Value type: <phandle>
- Definition: A single <phandle> value that points
- to the interrupt parent to which the child domain
- is being mapped.
-
EXAMPLE
sec_mon@314000 {
compatible = "fsl,sec-v4.0-mon", "syscon";
diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
new file mode 100644
index 000000000000..78d2db9d4de5
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
@@ -0,0 +1,67 @@
+* Hisilicon hip07 Security Accelerator (SEC)
+
+Required properties:
+- compatible: Must contain one of
+ - "hisilicon,hip06-sec"
+ - "hisilicon,hip07-sec"
+- reg: Memory addresses and lengths of the memory regions through which
+ this device is controlled.
+ Region 0 has registers to control the backend processing engines.
+ Region 1 has registers for functionality common to all queues.
+ Regions 2-18 have registers for the 16 individual queues which are isolated
+ both in hardware and within the driver.
+- interrupts: Interrupt specifiers.
+ Refer to interrupt-controller/interrupts.txt for generic interrupt client node
+ bindings.
+ Interrupt 0 is for the SEC unit error queue.
+ Interrupt 2N + 1 is the completion interrupt for queue N.
+ Interrupt 2N + 2 is the error interrupt for queue N.
+- dma-coherent: The driver assumes coherent dma is possible.
+
+Optional properties:
+- iommus: The SEC units are behind smmu-v3 iommus.
+ Refer to iommu/arm,smmu-v3.txt for more information.
+
+Example:
+
+p1_sec_a: crypto@400,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x400 0xd0000000 0x0 0x10000
+ 0x400 0xd2000000 0x0 0x10000
+ 0x400 0xd2010000 0x0 0x10000
+ 0x400 0xd2020000 0x0 0x10000
+ 0x400 0xd2030000 0x0 0x10000
+ 0x400 0xd2040000 0x0 0x10000
+ 0x400 0xd2050000 0x0 0x10000
+ 0x400 0xd2060000 0x0 0x10000
+ 0x400 0xd2070000 0x0 0x10000
+ 0x400 0xd2080000 0x0 0x10000
+ 0x400 0xd2090000 0x0 0x10000
+ 0x400 0xd20a0000 0x0 0x10000
+ 0x400 0xd20b0000 0x0 0x10000
+ 0x400 0xd20c0000 0x0 0x10000
+ 0x400 0xd20d0000 0x0 0x10000
+ 0x400 0xd20e0000 0x0 0x10000
+ 0x400 0xd20f0000 0x0 0x10000
+ 0x400 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p1_mbigen_sec_a>;
+ iommus = <&p1_smmu_alg_a 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+};
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index 5dba55cdfa63..3bbf144c9988 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -1,8 +1,9 @@
Inside Secure SafeXcel cryptographic engine
Required properties:
-- compatible: Should be "inside-secure,safexcel-eip197" or
- "inside-secure,safexcel-eip97".
+- compatible: Should be "inside-secure,safexcel-eip197b",
+ "inside-secure,safexcel-eip197d" or
+ "inside-secure,safexcel-eip97ies".
- reg: Base physical address of the engine and length of memory mapped region.
- interrupts: Interrupt numbers for the rings and engine.
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
@@ -14,10 +15,18 @@ Optional properties:
name must be "core" for the first clock and "reg" for
the second one.
+Backward compatibility:
+Two compatibles are kept for backward compatibility, but shouldn't be used for
+new submissions:
+- "inside-secure,safexcel-eip197" is equivalent to
+ "inside-secure,safexcel-eip197b".
+- "inside-secure,safexcel-eip97" is equivalent to
+ "inside-secure,safexcel-eip97ies".
+
Example:
crypto: crypto@800000 {
- compatible = "inside-secure,safexcel-eip197";
+ compatible = "inside-secure,safexcel-eip197b";
reg = <0x800000 0x200000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/crypto/picochip-spacc.txt b/Documentation/devicetree/bindings/crypto/picochip-spacc.txt
index d8609ece1f4c..df1151f87745 100644
--- a/Documentation/devicetree/bindings/crypto/picochip-spacc.txt
+++ b/Documentation/devicetree/bindings/crypto/picochip-spacc.txt
@@ -7,8 +7,6 @@ Required properties:
- compatible : "picochip,spacc-ipsec" for the IPSEC offload engine
"picochip,spacc-l2" for the femtocell layer 2 ciphering engine.
- reg : Offset and length of the register set for this device
- - interrupt-parent : The interrupt controller that controls the SPAcc
- interrupt.
- interrupts : The interrupt line from the SPAcc.
- ref-clock : The input clock that drives the SPAcc.
diff --git a/Documentation/devicetree/bindings/rng/qcom,prng.txt b/Documentation/devicetree/bindings/crypto/qcom,prng.txt
index 8e5853c2879b..7ee0e9eac973 100644
--- a/Documentation/devicetree/bindings/rng/qcom,prng.txt
+++ b/Documentation/devicetree/bindings/crypto/qcom,prng.txt
@@ -2,7 +2,9 @@ Qualcomm MSM pseudo random number generator.
Required properties:
-- compatible : should be "qcom,prng"
+- compatible : should be "qcom,prng" for 8916 etc
+ : should be "qcom,prng-ee" for 8996 and later using EE
+ (Execution Environment) slice of prng
- reg : specifies base physical address and size of the registers map
- clocks : phandle to clock-controller plus clock-specifier pair
- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
diff --git a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
index fc2bcbe26b1e..0ec68141f85a 100644
--- a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
+++ b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
@@ -1,14 +1,10 @@
-* Rockchip rk3399 DMC(Dynamic Memory Controller) device
+* Rockchip rk3399 DMC (Dynamic Memory Controller) device
Required properties:
- compatible: Must be "rockchip,rk3399-dmc".
- devfreq-events: Node to get DDR loading, Refer to
- Documentation/devicetree/bindings/devfreq/
+ Documentation/devicetree/bindings/devfreq/event/
rockchip-dfi.txt
-- interrupts: The interrupt number to the CPU. The interrupt
- specifier format depends on the interrupt controller.
- It should be DCF interrupts, when DDR dvfs finish,
- it will happen.
- clocks: Phandles for clock specified in "clock-names" property
- clock-names : The name of clock used by the DFI, must be
"pclk_ddr_mon";
@@ -17,139 +13,148 @@ Required properties:
- center-supply: DMC supply node.
- status: Marks the node enabled/disabled.
-Following properties are ddr timing:
-
-- rockchip,dram_speed_bin : Value reference include/dt-bindings/clock/ddr.h,
- it select ddr3 cl-trp-trcd type, default value
- "DDR3_DEFAULT".it must selected according to
- "Speed Bin" in ddr3 datasheet, DO NOT use
- smaller "Speed Bin" than ddr3 exactly is.
-
-- rockchip,pd_idle : Config the PD_IDLE value, defined the power-down
- idle period, memories are places into power-down
- mode if bus is idle for PD_IDLE DFI clocks.
-
-- rockchip,sr_idle : Configure the SR_IDLE value, defined the
- selfrefresh idle period, memories are places
- into self-refresh mode if bus is idle for
- SR_IDLE*1024 DFI clocks (DFI clocks freq is
- half of dram's clocks), defaule value is "0".
-
-- rockchip,sr_mc_gate_idle : Defined the self-refresh with memory and
- controller clock gating idle period, memories
- are places into self-refresh mode and memory
- controller clock arg gating if bus is idle for
- sr_mc_gate_idle*1024 DFI clocks.
-
-- rockchip,srpd_lite_idle : Defined the self-refresh power down idle
- period, memories are places into self-refresh
- power down mode if bus is idle for
- srpd_lite_idle*1024 DFI clocks. This parameter
- is for LPDDR4 only.
-
-- rockchip,standby_idle : Defined the standby idle period, memories are
- places into self-refresh than controller, pi,
- phy and dram clock will gating if bus is idle
- for standby_idle * DFI clocks.
-
-- rockchip,dram_dll_disb_freq : It's defined the DDR3 dll bypass frequency in
- MHz, when ddr freq less than DRAM_DLL_DISB_FREQ,
- ddr3 dll will bypssed note: if dll was bypassed,
- the odt also stop working.
-
-- rockchip,phy_dll_disb_freq : Defined the PHY dll bypass frequency in
- MHz (Mega Hz), when ddr freq less than
- DRAM_DLL_DISB_FREQ, phy dll will bypssed.
- note: phy dll and phy odt are independent.
-
-- rockchip,ddr3_odt_disb_freq : When dram type is DDR3, this parameter defined
- the odt disable frequency in MHz (Mega Hz),
- when ddr frequency less then ddr3_odt_disb_freq,
- the odt on dram side and controller side are
+Optional properties:
+- interrupts: The CPU interrupt number. The interrupt specifier
+ format depends on the interrupt controller.
+ It should be a DCF interrupt. When DDR DVFS finishes
+ a DCF interrupt is triggered.
+
+Following properties relate to DDR timing:
+
+- rockchip,dram_speed_bin : Value reference include/dt-bindings/clock/rk3399-ddr.h,
+ it selects the DDR3 cl-trp-trcd type. It must be
+ set according to "Speed Bin" in DDR3 datasheet,
+ DO NOT use a smaller "Speed Bin" than specified
+ for the DDR3 being used.
+
+- rockchip,pd_idle : Configure the PD_IDLE value. Defines the
+ power-down idle period in which memories are
+ placed into power-down mode if bus is idle
+ for PD_IDLE DFI clock cycles.
+
+- rockchip,sr_idle : Configure the SR_IDLE value. Defines the
+ self-refresh idle period in which memories are
+ placed into self-refresh mode if bus is idle
+ for SR_IDLE * 1024 DFI clock cycles (DFI
+ clocks freq is half of DRAM clock), default
+ value is "0".
+
+- rockchip,sr_mc_gate_idle : Defines the memory self-refresh and controller
+ clock gating idle period. Memories are placed
+ into self-refresh mode and memory controller
+ clock arg gating started if bus is idle for
+ sr_mc_gate_idle*1024 DFI clock cycles.
+
+- rockchip,srpd_lite_idle : Defines the self-refresh power down idle
+ period in which memories are placed into
+ self-refresh power down mode if bus is idle
+ for srpd_lite_idle * 1024 DFI clock cycles.
+ This parameter is for LPDDR4 only.
+
+- rockchip,standby_idle : Defines the standby idle period in which
+ memories are placed into self-refresh mode.
+ The controller, pi, PHY and DRAM clock will
+ be gated if bus is idle for standby_idle * DFI
+ clock cycles.
+
+- rockchip,dram_dll_dis_freq : Defines the DDR3 DLL bypass frequency in MHz.
+ When DDR frequency is less than DRAM_DLL_DISB_FREQ,
+ DDR3 DLL will be bypassed. Note: if DLL was bypassed,
+ the odt will also stop working.
+
+- rockchip,phy_dll_dis_freq : Defines the PHY dll bypass frequency in
+ MHz (Mega Hz). When DDR frequency is less than
+ DRAM_DLL_DISB_FREQ, PHY DLL will be bypassed.
+ Note: PHY DLL and PHY ODT are independent.
+
+- rockchip,ddr3_odt_dis_freq : When the DRAM type is DDR3, this parameter defines
+ the ODT disable frequency in MHz (Mega Hz).
+ when the DDR frequency is less then ddr3_odt_dis_freq,
+ the ODT on the DRAM side and controller side are
both disabled.
-- rockchip,ddr3_drv : When dram type is DDR3, this parameter define
- the dram side driver stength in ohm, default
+- rockchip,ddr3_drv : When the DRAM type is DDR3, this parameter defines
+ the DRAM side driver strength in ohms. Default
value is DDR3_DS_40ohm.
-- rockchip,ddr3_odt : When dram type is DDR3, this parameter define
- the dram side ODT stength in ohm, default value
+- rockchip,ddr3_odt : When the DRAM type is DDR3, this parameter defines
+ the DRAM side ODT strength in ohms. Default value
is DDR3_ODT_120ohm.
-- rockchip,phy_ddr3_ca_drv : When dram type is DDR3, this parameter define
- the phy side CA line(incluing command line,
+- rockchip,phy_ddr3_ca_drv : When the DRAM type is DDR3, this parameter defines
+ the phy side CA line (incluing command line,
address line and clock line) driver strength.
Default value is PHY_DRV_ODT_40.
-- rockchip,phy_ddr3_dq_drv : When dram type is DDR3, this parameter define
- the phy side DQ line(incluing DQS/DQ/DM line)
- driver strength. default value is PHY_DRV_ODT_40.
+- rockchip,phy_ddr3_dq_drv : When the DRAM type is DDR3, this parameter defines
+ the PHY side DQ line (including DQS/DQ/DM line)
+ driver strength. Default value is PHY_DRV_ODT_40.
-- rockchip,phy_ddr3_odt : When dram type is DDR3, this parameter define the
- phy side odt strength, default value is
+- rockchip,phy_ddr3_odt : When the DRAM type is DDR3, this parameter defines
+ the PHY side ODT strength. Default value is
PHY_DRV_ODT_240.
-- rockchip,lpddr3_odt_disb_freq : When dram type is LPDDR3, this parameter defined
- then odt disable frequency in MHz (Mega Hz),
- when ddr frequency less then ddr3_odt_disb_freq,
- the odt on dram side and controller side are
+- rockchip,lpddr3_odt_dis_freq : When the DRAM type is LPDDR3, this parameter defines
+ then ODT disable frequency in MHz (Mega Hz).
+ When DDR frequency is less then ddr3_odt_dis_freq,
+ the ODT on the DRAM side and controller side are
both disabled.
-- rockchip,lpddr3_drv : When dram type is LPDDR3, this parameter define
- the dram side driver stength in ohm, default
+- rockchip,lpddr3_drv : When the DRAM type is LPDDR3, this parameter defines
+ the DRAM side driver strength in ohms. Default
value is LP3_DS_34ohm.
-- rockchip,lpddr3_odt : When dram type is LPDDR3, this parameter define
- the dram side ODT stength in ohm, default value
+- rockchip,lpddr3_odt : When the DRAM type is LPDDR3, this parameter defines
+ the DRAM side ODT strength in ohms. Default value
is LP3_ODT_240ohm.
-- rockchip,phy_lpddr3_ca_drv : When dram type is LPDDR3, this parameter define
- the phy side CA line(incluing command line,
+- rockchip,phy_lpddr3_ca_drv : When the DRAM type is LPDDR3, this parameter defines
+ the PHY side CA line (including command line,
address line and clock line) driver strength.
- default value is PHY_DRV_ODT_40.
+ Default value is PHY_DRV_ODT_40.
-- rockchip,phy_lpddr3_dq_drv : When dram type is LPDDR3, this parameter define
- the phy side DQ line(incluing DQS/DQ/DM line)
- driver strength. default value is
+- rockchip,phy_lpddr3_dq_drv : When the DRAM type is LPDDR3, this parameter defines
+ the PHY side DQ line (including DQS/DQ/DM line)
+ driver strength. Default value is
PHY_DRV_ODT_40.
- rockchip,phy_lpddr3_odt : When dram type is LPDDR3, this parameter define
the phy side odt strength, default value is
PHY_DRV_ODT_240.
-- rockchip,lpddr4_odt_disb_freq : When dram type is LPDDR4, this parameter
- defined the odt disable frequency in
- MHz (Mega Hz), when ddr frequency less then
- ddr3_odt_disb_freq, the odt on dram side and
+- rockchip,lpddr4_odt_dis_freq : When the DRAM type is LPDDR4, this parameter
+ defines the ODT disable frequency in
+ MHz (Mega Hz). When the DDR frequency is less then
+ ddr3_odt_dis_freq, the ODT on the DRAM side and
controller side are both disabled.
-- rockchip,lpddr4_drv : When dram type is LPDDR4, this parameter define
- the dram side driver stength in ohm, default
+- rockchip,lpddr4_drv : When the DRAM type is LPDDR4, this parameter defines
+ the DRAM side driver strength in ohms. Default
value is LP4_PDDS_60ohm.
-- rockchip,lpddr4_dq_odt : When dram type is LPDDR4, this parameter define
- the dram side ODT on dqs/dq line stength in ohm,
- default value is LP4_DQ_ODT_40ohm.
+- rockchip,lpddr4_dq_odt : When the DRAM type is LPDDR4, this parameter defines
+ the DRAM side ODT on DQS/DQ line strength in ohms.
+ Default value is LP4_DQ_ODT_40ohm.
-- rockchip,lpddr4_ca_odt : When dram type is LPDDR4, this parameter define
- the dram side ODT on ca line stength in ohm,
- default value is LP4_CA_ODT_40ohm.
+- rockchip,lpddr4_ca_odt : When the DRAM type is LPDDR4, this parameter defines
+ the DRAM side ODT on CA line strength in ohms.
+ Default value is LP4_CA_ODT_40ohm.
-- rockchip,phy_lpddr4_ca_drv : When dram type is LPDDR4, this parameter define
- the phy side CA line(incluing command address
- line) driver strength. default value is
+- rockchip,phy_lpddr4_ca_drv : When the DRAM type is LPDDR4, this parameter defines
+ the PHY side CA line (including command address
+ line) driver strength. Default value is
PHY_DRV_ODT_40.
-- rockchip,phy_lpddr4_ck_cs_drv : When dram type is LPDDR4, this parameter define
- the phy side clock line and cs line driver
- strength. default value is PHY_DRV_ODT_80.
+- rockchip,phy_lpddr4_ck_cs_drv : When the DRAM type is LPDDR4, this parameter defines
+ the PHY side clock line and CS line driver
+ strength. Default value is PHY_DRV_ODT_80.
-- rockchip,phy_lpddr4_dq_drv : When dram type is LPDDR4, this parameter define
- the phy side DQ line(incluing DQS/DQ/DM line)
- driver strength. default value is PHY_DRV_ODT_80.
+- rockchip,phy_lpddr4_dq_drv : When the DRAM type is LPDDR4, this parameter defines
+ the PHY side DQ line (including DQS/DQ/DM line)
+ driver strength. Default value is PHY_DRV_ODT_80.
-- rockchip,phy_lpddr4_odt : When dram type is LPDDR4, this parameter define
- the phy side odt strength, default value is
+- rockchip,phy_lpddr4_odt : When the DRAM type is LPDDR4, this parameter defines
+ the PHY side ODT strength. Default value is
PHY_DRV_ODT_60.
Example:
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
index 284e2b14cfbe..26649b4c4dd8 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
+++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
@@ -74,6 +74,12 @@ Required properties for DSI:
The 3 clocks output from the DSI analog PHY: dsi[01]_byte,
dsi[01]_ddr2, and dsi[01]_ddr
+Required properties for the TXP (writeback) block:
+- compatible: Should be "brcm,bcm2835-txp"
+- reg: Physical base address and length of the TXP block's registers
+- interrupts: The interrupt number
+ See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+
[1] Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
index 0c7473dd0e51..027d76c27a41 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
+++ b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
@@ -15,8 +15,6 @@ Required properties for dp-controller:
from common clock binding: handle to dp clock.
-clock-names:
from common clock binding: Shall be "dp".
- -interrupt-parent:
- phandle to Interrupt combiner node.
-phys:
from general PHY binding: the phandle for the PHY device.
-phy-names:
diff --git a/Documentation/devicetree/bindings/display/bridge/anx7814.txt b/Documentation/devicetree/bindings/display/bridge/anx7814.txt
index b2a22c28c9b3..dbd7c84ee584 100644
--- a/Documentation/devicetree/bindings/display/bridge/anx7814.txt
+++ b/Documentation/devicetree/bindings/display/bridge/anx7814.txt
@@ -8,8 +8,6 @@ Required properties:
- compatible : "analogix,anx7814"
- reg : I2C address of the device
- - interrupt-parent : Should be the phandle of the interrupt controller
- that services interrupts for this device
- interrupts : Should contain the INTP interrupt
- hpd-gpios : Which GPIO to use for hpd
- pd-gpios : Which GPIO to use for power down
diff --git a/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt b/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt
index aacc8b92968c..09e0a21f705e 100644
--- a/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt
+++ b/Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt
@@ -19,8 +19,6 @@ hardware are EDID, HPD, and interrupts.
stdp4028-ge-b850v3-fw required properties:
- compatible : "megachips,stdp4028-ge-b850v3-fw"
- reg : I2C bus address
- - interrupt-parent : phandle of the interrupt controller that services
- interrupts to the device
- interrupts : one interrupt should be described here, as in
<0 IRQ_TYPE_LEVEL_HIGH>
- ports : One input port(reg = <0>) and one output port(reg = <1>)
diff --git a/Documentation/devicetree/bindings/display/bridge/sii902x.txt b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
index 56a3e68ccb80..72d2dc6c3e6b 100644
--- a/Documentation/devicetree/bindings/display/bridge/sii902x.txt
+++ b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
@@ -5,8 +5,8 @@ Required properties:
- reg: i2c address of the bridge
Optional properties:
- - interrupts-extended or interrupt-parent + interrupts: describe
- the interrupt line used to inform the host about hotplug events.
+ - interrupts: describe the interrupt line used to inform the host
+ about hotplug events.
- reset-gpios: OF device-tree gpio specification for RST_N pin.
Optional subnodes:
diff --git a/Documentation/devicetree/bindings/display/bridge/sii9234.txt b/Documentation/devicetree/bindings/display/bridge/sii9234.txt
index 88041ba23d56..a55bf77bd960 100644
--- a/Documentation/devicetree/bindings/display/bridge/sii9234.txt
+++ b/Documentation/devicetree/bindings/display/bridge/sii9234.txt
@@ -7,7 +7,7 @@ Required properties:
- iovcc18-supply : I/O Supply Voltage (1.8V)
- avcc12-supply : TMDS Analog Supply Voltage (1.2V)
- cvcc12-supply : Digital Core Supply Voltage (1.2V)
- - interrupts, interrupt-parent: interrupt specifier of INT pin
+ - interrupts: interrupt specifier of INT pin
- reset-gpios: gpio specifier of RESET pin (active low)
- video interfaces: Device node can contain two video interface port
nodes for HDMI encoder and connector according to [1].
diff --git a/Documentation/devicetree/bindings/display/bridge/sil-sii8620.txt b/Documentation/devicetree/bindings/display/bridge/sil-sii8620.txt
index 9409d9c6a260..b05052f7d62f 100644
--- a/Documentation/devicetree/bindings/display/bridge/sil-sii8620.txt
+++ b/Documentation/devicetree/bindings/display/bridge/sil-sii8620.txt
@@ -5,7 +5,7 @@ Required properties:
- reg: i2c address of the bridge
- cvcc10-supply: Digital Core Supply Voltage (1.0V)
- iovcc18-supply: I/O Supply Voltage (1.8V)
- - interrupts, interrupt-parent: interrupt specifier of INT pin
+ - interrupts: interrupt specifier of INT pin
- reset-gpios: gpio specifier of RESET pin
- clocks, clock-names: specification and name of "xtal" clock
- video interfaces: Device node can contain video interface port
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos7-decon.txt b/Documentation/devicetree/bindings/display/exynos/exynos7-decon.txt
index 9e2e7f6f7609..53912c99ec38 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos7-decon.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos7-decon.txt
@@ -9,9 +9,6 @@ Required properties:
- reg: physical base address and length of the DECON registers set.
-- interrupt-parent: should be the phandle of the decon controller's
- parent interrupt controller.
-
- interrupts: should contain a list of all DECON IP block interrupts in the
order: FIFO Level, VSYNC, LCD_SYSTEM. The interrupt specifier
format depends on the interrupt controller used.
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
index ade5d8eebf85..9b6cba3f82af 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
@@ -25,8 +25,6 @@ Required properties for dp-controller:
from common clock binding: handle to dp clock.
-clock-names:
from common clock binding: Shall be "dp".
- -interrupt-parent:
- phandle to Interrupt combiner node.
-phys:
from general PHY binding: the phandle for the PHY device.
-phy-names:
diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
index 5837402c3ade..b3096421d42b 100644
--- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
@@ -16,9 +16,6 @@ Required properties:
- reg: physical base address and length of the FIMD registers set.
-- interrupt-parent: should be the phandle of the fimd controller's
- parent interrupt controller.
-
- interrupts: should contain a list of all FIMD IP block interrupts in the
order: FIFO Level, VSYNC, LCD_SYSTEM. The interrupt specifier
format depends on the interrupt controller used.
diff --git a/Documentation/devicetree/bindings/display/ht16k33.txt b/Documentation/devicetree/bindings/display/ht16k33.txt
index 8e5b30b87754..d5a8b070b467 100644
--- a/Documentation/devicetree/bindings/display/ht16k33.txt
+++ b/Documentation/devicetree/bindings/display/ht16k33.txt
@@ -4,8 +4,6 @@ Holtek ht16k33 RAM mapping 16*8 LED controller driver with keyscan
Required properties:
- compatible: "holtek,ht16k33"
- reg: I2C slave address of the chip.
-- interrupt-parent: A phandle pointing to the interrupt controller
- serving the interrupt for this chip.
- interrupts: Interrupt specification for the key pressed interrupt.
- refresh-rate-hz: Display update interval in HZ.
- debounce-delay-ms: Debouncing interval time in milliseconds.
diff --git a/Documentation/devicetree/bindings/display/ilitek,ili9341.txt b/Documentation/devicetree/bindings/display/ilitek,ili9341.txt
new file mode 100644
index 000000000000..169b32e4ee4e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ilitek,ili9341.txt
@@ -0,0 +1,27 @@
+Ilitek ILI9341 display panels
+
+This binding is for display panels using an Ilitek ILI9341 controller in SPI
+mode.
+
+Required properties:
+- compatible: "adafruit,yx240qv29", "ilitek,ili9341"
+- dc-gpios: D/C pin
+- reset-gpios: Reset pin
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+ display@0{
+ compatible = "adafruit,yx240qv29", "ilitek,ili9341";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ dc-gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+ rotation = <270>;
+ backlight = <&backlight>;
+ };
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index 383183a89164..8469de510001 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -40,7 +40,7 @@ Required properties (all function blocks):
"mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
"mediatek,<chip>-disp-mutex" - display mutex
"mediatek,<chip>-disp-od" - overdrive
- the supported chips are mt2701 and mt8173.
+ the supported chips are mt2701, mt2712 and mt8173.
- reg: Physical base address and length of the function block register space
- interrupts: The interrupt signal from the function block (required, except for
merge and split function blocks).
diff --git a/Documentation/devicetree/bindings/display/msm/dpu.txt b/Documentation/devicetree/bindings/display/msm/dpu.txt
new file mode 100644
index 000000000000..ad2e8830324e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/dpu.txt
@@ -0,0 +1,131 @@
+Qualcomm Technologies, Inc. DPU KMS
+
+Description:
+
+Device tree bindings for MSM Mobile Display Subsytem(MDSS) that encapsulates
+sub-blocks like DPU display controller, DSI and DP interfaces etc.
+The DPU display controller is found in SDM845 SoC.
+
+MDSS:
+Required properties:
+- compatible: "qcom,sdm845-mdss"
+- reg: physical base address and length of contoller's registers.
+- reg-names: register region names. The following region is required:
+ * "mdss"
+- power-domains: a power domain consumer specifier according to
+ Documentation/devicetree/bindings/power/power_domain.txt
+- clocks: list of clock specifiers for clocks needed by the device.
+- clock-names: device clock names, must be in same order as clocks property.
+ The following clocks are required:
+ * "iface"
+ * "bus"
+ * "core"
+- interrupts: interrupt signal from MDSS.
+- interrupt-controller: identifies the node as an interrupt controller.
+- #interrupt-cells: specifies the number of cells needed to encode an interrupt
+ source, should be 1.
+- iommus: phandle of iommu device node.
+- #address-cells: number of address cells for the MDSS children. Should be 1.
+- #size-cells: Should be 1.
+- ranges: parent bus address space is the same as the child bus address space.
+
+Optional properties:
+- assigned-clocks: list of clock specifiers for clocks needing rate assignment
+- assigned-clock-rates: list of clock frequencies sorted in the same order as
+ the assigned-clocks property.
+
+MDP:
+Required properties:
+- compatible: "qcom,sdm845-dpu"
+- reg: physical base address and length of controller's registers.
+- reg-names : register region names. The following region is required:
+ * "mdp"
+ * "vbif"
+- clocks: list of clock specifiers for clocks needed by the device.
+- clock-names: device clock names, must be in same order as clocks property.
+ The following clocks are required.
+ * "bus"
+ * "iface"
+ * "core"
+ * "vsync"
+- interrupts: interrupt line from DPU to MDSS.
+- ports: contains the list of output ports from DPU device. These ports connect
+ to interfaces that are external to the DPU hardware, such as DSI, DP etc.
+
+ Each output port contains an endpoint that describes how it is connected to an
+ external interface. These are described by the standard properties documented
+ here:
+ Documentation/devicetree/bindings/graph.txt
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+
+ Port 0 -> DPU_INTF1 (DSI1)
+ Port 1 -> DPU_INTF2 (DSI2)
+
+Optional properties:
+- assigned-clocks: list of clock specifiers for clocks needing rate assignment
+- assigned-clock-rates: list of clock frequencies sorted in the same order as
+ the assigned-clocks property.
+
+Example:
+
+ mdss: mdss@ae00000 {
+ compatible = "qcom,sdm845-mdss";
+ reg = <0xae00000 0x1000>;
+ reg-names = "mdss";
+
+ power-domains = <&clock_dispcc 0>;
+
+ clocks = <&gcc GCC_DISP_AHB_CLK>, <&gcc GCC_DISP_AXI_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_MDP_CLK>;
+ clock-names = "iface", "bus", "core";
+
+ assigned-clocks = <&clock_dispcc DISP_CC_MDSS_MDP_CLK>;
+ assigned-clock-rates = <300000000>;
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ iommus = <&apps_iommu 0>;
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0xae00000 0xb2008>;
+
+ mdss_mdp: mdp@ae01000 {
+ compatible = "qcom,sdm845-dpu";
+ reg = <0 0x1000 0x8f000>, <0 0xb0000 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_AXI_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ clock-names = "iface", "bus", "core", "vsync";
+
+ assigned-clocks = <&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ assigned-clock-rates = <0 0 300000000 19200000>;
+
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&dsi1_in>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index 518e9cdf0d4b..dfc743219bd8 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -43,8 +43,6 @@ Optional properties:
the master link of the 2-DSI panel.
- qcom,sync-dual-dsi: Boolean value indicating if the DSI controller is
driving a 2-DSI panel whose 2 links need receive command simultaneously.
-- interrupt-parent: phandle to the MDP block if the interrupt signal is routed
- through MDP block
- pinctrl-names: the pin control state names; should contain "default"
- pinctrl-0: the default pinctrl state (active)
- pinctrl-n: the "sleep" pinctrl state
@@ -121,6 +119,20 @@ Required properties:
Optional properties:
- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
regulator is wanted.
+- qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode
+ panels in microseconds. Driver uses this number to adjust
+ the clock rate according to the expected transfer time.
+ Increasing this value would slow down the mdp processing
+ and can result in slower performance.
+ Decreasing this value can speed up the mdp processing,
+ but this can also impact power consumption.
+ As a rule this time should not be higher than the time
+ that would be expected with the processing at the
+ dsi link rate since anyways this would be the maximum
+ transfer time that could be achieved.
+ If ping pong split is enabled, this time should not be higher
+ than two times the dsi link rate time.
+ If the property is not specified, then the default value is 14000 us.
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/graph.txt
@@ -171,6 +183,8 @@ Example:
qcom,master-dsi;
qcom,sync-dual-dsi;
+ qcom,mdss-mdp-transfer-time-us = <12000>;
+
pinctrl-names = "default", "sleep";
pinctrl-0 = <&dsi_active>;
pinctrl-1 = <&dsi_suspend>;
diff --git a/Documentation/devicetree/bindings/display/msm/edp.txt b/Documentation/devicetree/bindings/display/msm/edp.txt
index 95ce19ca7bc5..eff9daff418c 100644
--- a/Documentation/devicetree/bindings/display/msm/edp.txt
+++ b/Documentation/devicetree/bindings/display/msm/edp.txt
@@ -25,10 +25,6 @@ Required properties:
- panel-hpd-gpios: GPIO pin used for eDP hpd.
-Optional properties:
-- interrupt-parent: phandle to the MDP block if the interrupt signal is routed
- through MDP block
-
Example:
mdss_edp: qcom,mdss_edp@fd923400 {
compatible = "qcom,mdss-edp";
diff --git a/Documentation/devicetree/bindings/display/msm/mdp5.txt b/Documentation/devicetree/bindings/display/msm/mdp5.txt
index 1b31977a68ba..4e11338548aa 100644
--- a/Documentation/devicetree/bindings/display/msm/mdp5.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdp5.txt
@@ -41,8 +41,6 @@ Required properties:
- reg-names: The names of register regions. The following regions are required:
* "mdp_phys"
- interrupts: Interrupt line from MDP5 to MDSS interrupt controller.
-- interrupt-parent: phandle to the MDSS block
- through MDP block
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
- * "bus"
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
new file mode 100644
index 000000000000..49e4105378f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
@@ -0,0 +1,29 @@
+AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g070vvn01"
+- backlight: phandle of the backlight device attached to the panel
+- power-supply: single regulator to provide the supply voltage
+
+Required nodes:
+- port: Parallel port mapping to connect this display
+
+This panel needs single power supply voltage. Its backlight is conntrolled
+via PWM signal.
+
+Example:
+--------
+
+Example device-tree definition when connected to iMX6Q based board
+
+ lcd_panel: lcd-panel {
+ compatible = "auo,g070vvn01";
+ backlight = <&backlight_lcd>;
+ power-supply = <&reg_display>;
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
new file mode 100644
index 000000000000..55183d360032
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
@@ -0,0 +1,28 @@
+BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "boe,hv070wsa-100"
+- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
+- enable-gpios: GPIO pin to enable and disable panel (active high)
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in [1]. This
+node should describe panel's video bus.
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+
+ panel: panel {
+ compatible = "boe,hv070wsa-100";
+ power-supply = <&vcc_3v3_reg>;
+ enable-gpios = <&gpd1 3 GPIO_ACTIVE_HIGH>;
+ port {
+ panel_ep: endpoint {
+ remote-endpoint = <&bridge_out_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
new file mode 100644
index 000000000000..897085ee3cd4
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
@@ -0,0 +1,8 @@
+DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
+
+Required properties:
+- compatible: should be "dataimage,scf0700c48ggu18"
+- power-supply: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt
new file mode 100644
index 000000000000..bf06bb025b08
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt
@@ -0,0 +1,13 @@
+DLC Display Co. DLC0700YZG-1 7.0" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "dlc,dlc0700yzg-1"
+- power-supply: See simple-panel.txt
+
+Optional properties:
+- reset-gpios: See panel-common.txt
+- enable-gpios: See simple-panel.txt
+- backlight: See simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
new file mode 100644
index 000000000000..f56b99ebd9be
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
@@ -0,0 +1,39 @@
+Emerging Display Technology Corp. Displays
+==========================================
+
+
+Display bindings for EDT Display Technology Corp. Displays which are
+compatible with the simple-panel binding, which is specified in
+simple-panel.txt
+
+
+5,7" WVGA TFT Panels
+--------------------
+
++-----------------+---------------------+-------------------------------------+
+| Identifier | compatbile | description |
++=================+=====================+=====================================+
+| ET057090DHU | edt,et057090dhu | 5.7" VGA TFT LCD panel |
++-----------------+---------------------+-------------------------------------+
+
+
+7,0" WVGA TFT Panels
+--------------------
+
++-----------------+---------------------+-------------------------------------+
+| Identifier | compatbile | description |
++=================+=====================+=====================================+
+| ETM0700G0DH6 | edt,etm070080dh6 | WVGA TFT Display with capacitive |
+| | | Touchscreen |
++-----------------+---------------------+-------------------------------------+
+| ETM0700G0BDH6 | edt,etm070080bdh6 | Same as ETM0700G0DH6 but with |
+| | | inverted pixel clock. |
++-----------------+---------------------+-------------------------------------+
+| ETM0700G0EDH6 | edt,etm070080edh6 | Same display as the ETM0700G0BDH6, |
+| | | but with changed Hardware for the |
+| | | backlight and the touch interface |
++-----------------+---------------------+-------------------------------------+
+| ET070080DH6 | edt,etm070080dh6 | Same timings as the ETM0700G0DH6, |
+| | | but with resistive touch. |
++-----------------+---------------------+-------------------------------------+
+
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt b/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt
deleted file mode 100644
index 20cb38e836e4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Emerging Display Technology Corp. ET070080DH6 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "edt,et070080dh6"
-
-This panel is the same as ETM0700G0DH6 except for the touchscreen.
-ET070080DH6 is the model with resistive touch.
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt b/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt
deleted file mode 100644
index ee4b18053e40..000000000000
--- a/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Emerging Display Technology Corp. ETM0700G0DH6 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "edt,etm0700g0dh6"
-
-This panel is the same as ET070080DH6 except for the touchscreen.
-ETM0700G0DH6 is the model with capacitive multitouch.
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt
new file mode 100644
index 000000000000..4a041acb4e18
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt
@@ -0,0 +1,20 @@
+Ilitek ILI9881c based MIPI-DSI panels
+
+Required properties:
+ - compatible: must be "ilitek,ili9881c" and one of:
+ * "bananapi,lhr050h41"
+ - reg: DSI virtual channel used by that screen
+ - power-supply: phandle to the power regulator
+ - reset-gpios: a GPIO phandle for the reset pin
+
+Optional properties:
+ - backlight: phandle to the backlight used
+
+Example:
+panel@0 {
+ compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
+ reg = <0>;
+ power-supply = <&reg_display>;
+ reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
+ backlight = <&pwm_bl>;
+};
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
new file mode 100644
index 000000000000..7c234cf68e11
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
@@ -0,0 +1,12 @@
+Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,g070y2-l01"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
new file mode 100644
index 000000000000..595d9dfeffd3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
@@ -0,0 +1,24 @@
+Innolux P097PFG 9.7" 1536x2048 TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,p097pfg"
+- reg: DSI virtual channel of the peripheral
+- avdd-supply: phandle of the regulator that provides positive voltage
+- avee-supply: phandle of the regulator that provides negative voltage
+- enable-gpios: panel enable gpio
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+ &mipi_dsi {
+ panel {
+ compatible = "innolux,p079zca";
+ reg = <0>;
+ avdd-supply = <...>;
+ avee-supply = <...>;
+ backlight = <&backlight>;
+ enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt b/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt
new file mode 100644
index 000000000000..a9b35265fa13
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt
@@ -0,0 +1,20 @@
+Innolux TV123WAM 12.3 inch eDP 2K display panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "innolux,tv123wam"
+- power-supply: regulator to provide the supply voltage
+
+Optional properties:
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+ panel_edp: panel-edp {
+ compatible = "innolux,tv123wam";
+ enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
+ power-supply = <&pm8916_l2>;
+ backlight = <&backlight>;
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
new file mode 100644
index 000000000000..164a5fa236da
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
@@ -0,0 +1,22 @@
+Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
+
+Required properties:
+- compatible: should be "kingdisplay,kd097d04"
+- reg: DSI virtual channel of the peripheral
+- power-supply: phandle of the regulator that provides the supply voltage
+- enable-gpios: panel enable gpio
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+ &mipi_dsi {
+ panel {
+ compatible = "kingdisplay,kd097d04";
+ reg = <0>;
+ power-supply = <...>;
+ backlight = <&backlight>;
+ enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et057090dhu.txt b/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
index 4903d7b1d947..e78292b1a131 100644
--- a/Documentation/devicetree/bindings/display/panel/edt,et057090dhu.txt
+++ b/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
@@ -1,7 +1,7 @@
-Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
+Newhaven Display International 480 x 272 TFT LCD panel
Required properties:
-- compatible: should be "edt,et057090dhu"
+- compatible: should be "newhaven,nhd-4.3-480272ef-atxl"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
new file mode 100644
index 000000000000..eb1fb9f8d1f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
@@ -0,0 +1,25 @@
+Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "rocktech,rk070er9427"
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Optional nodes:
+- Video port for LCD panel input.
+
+Example:
+ panel {
+ compatible = "rocktech,rk070er9427";
+ backlight = <&backlight_lcd>;
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
new file mode 100644
index 000000000000..0753f6967279
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
@@ -0,0 +1,12 @@
+Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq035q7db03"
+- power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties:
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 7c6854bd0a04..ec9d34be2ff7 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -19,7 +19,6 @@ Required Properties:
- reg: the memory-mapped I/O registers base address and length
- - interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifiers for the DU interrupts.
- clocks: A list of phandles + clock-specifier pairs, one for each entry in
diff --git a/Documentation/devicetree/bindings/display/sm501fb.txt b/Documentation/devicetree/bindings/display/sm501fb.txt
index 9d9f0098092b..1c79c267a57f 100644
--- a/Documentation/devicetree/bindings/display/sm501fb.txt
+++ b/Documentation/devicetree/bindings/display/sm501fb.txt
@@ -9,8 +9,6 @@ Required properties:
- First entry: System Configuration register
- Second entry: IO space (Display Controller register)
- interrupts : SMI interrupt to the cpu should be described here.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
- mode : select a video mode:
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 3346c1e2a7a0..f8773ecb7525 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -103,6 +103,7 @@ Required properties:
- compatible: value must be one of:
* allwinner,sun8i-a83t-hdmi-phy
* allwinner,sun8i-h3-hdmi-phy
+ * allwinner,sun50i-a64-hdmi-phy
- reg: base address and size of memory-mapped region
- clocks: phandles to the clocks feeding the HDMI PHY
* bus: the HDMI PHY interface clock
@@ -111,8 +112,9 @@ Required properties:
- resets: phandle to the reset controller driving the PHY
- reset-names: must be "phy"
-H3 HDMI PHY requires additional clock:
+H3 and A64 HDMI PHY require additional clocks:
- pll-0: parent of phy clock
+ - pll-1: second possible phy clock parent (A64 only)
TV Encoder
----------
@@ -145,6 +147,7 @@ Required properties:
* allwinner,sun8i-a33-tcon
* allwinner,sun8i-a83t-tcon-lcd
* allwinner,sun8i-a83t-tcon-tv
+ * allwinner,sun8i-r40-tcon-tv
* allwinner,sun8i-v3s-tcon
* allwinner,sun9i-a80-tcon-lcd
* allwinner,sun9i-a80-tcon-tv
@@ -179,7 +182,7 @@ For TCONs with channel 0, there is one more clock required:
For TCONs with channel 1, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
-When TCON support LVDS (all TCONs except TV TCON on A83T and those found
+When TCON support LVDS (all TCONs except TV TCONs on A83T, R40 and those found
in A13, H3, H5 and V3s SoCs), you need one more reset line:
- 'lvds': The reset line driving the LVDS logic
@@ -187,6 +190,62 @@ And on the A23, A31, A31s and A33, you need one more clock line:
- 'lvds-alt': An alternative clock source, separate from the TCON channel 0
clock, that can be used to drive the LVDS clock
+TCON TOP
+--------
+
+TCON TOPs main purpose is to configure whole display pipeline. It determines
+relationships between mixers and TCONs, selects source TCON for HDMI, muxes
+LCD and TV encoder GPIO output, selects TV encoder clock source and contains
+additional TV TCON and DSI gates.
+
+It allows display pipeline to be configured in very different ways:
+
+ / LCD0/LVDS0
+ / [0] TCON-LCD0
+ | \ MIPI DSI
+ mixer0 |
+ \ / [1] TCON-LCD1 - LCD1/LVDS1
+ TCON-TOP
+ / \ [2] TCON-TV0 [0] - TVE0/RGB
+ mixer1 | \
+ | TCON-TOP - HDMI
+ | /
+ \ [3] TCON-TV1 [1] - TVE1/RGB
+
+Note that both TCON TOP references same physical unit. Both mixers can be
+connected to any TCON.
+
+Required properties:
+ - compatible: value must be one of:
+ * allwinner,sun8i-r40-tcon-top
+ - reg: base address and size of the memory-mapped region.
+ - clocks: phandle to the clocks feeding the TCON TOP
+ * bus: TCON TOP interface clock
+ * tcon-tv0: TCON TV0 clock
+ * tve0: TVE0 clock
+ * tcon-tv1: TCON TV1 clock
+ * tve1: TVE0 clock
+ * dsi: MIPI DSI clock
+ - clock-names: clock name mentioned above
+ - resets: phandle to the reset line driving the TCON TOP
+ - #clock-cells : must contain 1
+ - clock-output-names: Names of clocks created for TCON TV0 channel clock,
+ TCON TV1 channel clock and DSI channel clock, in that order.
+
+- ports: A ports node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt. 6 ports should
+ be defined:
+ * port 0 is input for mixer0 mux
+ * port 1 is output for mixer0 mux
+ * port 2 is input for mixer1 mux
+ * port 3 is output for mixer1 mux
+ * port 4 is input for HDMI mux
+ * port 5 is output for HDMI mux
+ All output endpoints for mixer muxes and input endpoints for HDMI mux should
+ have reg property with the id of the target TCON, as shown in above graph
+ (0-3 for mixer muxes and 0-1 for HDMI mux). All ports should have only one
+ endpoint connected to remote endpoint.
+
DRC
---
@@ -341,6 +400,7 @@ Required properties:
* allwinner,sun8i-a33-display-engine
* allwinner,sun8i-a83t-display-engine
* allwinner,sun8i-h3-display-engine
+ * allwinner,sun8i-r40-display-engine
* allwinner,sun8i-v3s-display-engine
* allwinner,sun9i-a80-display-engine
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
index 3055d5c2c04e..7bf1bb444812 100644
--- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
@@ -8,8 +8,6 @@ Required properties:
- reg: base address and size of the LCDC device
Recommended properties:
- - interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
- ti,hwmods: Name of the hwmod associated to the LCDC
Optional properties:
diff --git a/Documentation/devicetree/bindings/dma/jz4780-dma.txt b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
index f25feee62b15..03e9cf7b42e0 100644
--- a/Documentation/devicetree/bindings/dma/jz4780-dma.txt
+++ b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
@@ -5,7 +5,6 @@ Required properties:
- compatible: Should be "ingenic,jz4780-dma"
- reg: Should contain the DMA controller registers location and length.
- interrupts: Should contain the interrupt specifier of the DMA controller.
-- interrupt-parent: Should be the phandle of the interrupt controller that
- clocks: Should contain a clock specifier for the JZ4780 PDMA clock.
- #dma-cells: Must be <2>. Number of integer cells in the dmas property of
DMA clients (see below).
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
index 1e1dc8f972e4..2f35b047f772 100644
--- a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
@@ -8,7 +8,6 @@ Required properties:
- reg: Should contain DMA registers location and length. This should be
a single entry that includes all of the per-channel registers in one
contiguous bank.
-- interrupt-parent: Phandle to the interrupt parent controller.
- interrupts: Should contain all of the per-channel DMA interrupts in
ascending order with respect to the DMA channel index.
- clocks: Must contain one entry for the ADMA module clock
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
index f237b7928283..dbe160400adc 100644
--- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
@@ -5,8 +5,6 @@ Required properties:
- reg: Address range of the DMAC registers. This should include
all of the per-channel registers.
- interrupt: Should contain the DMAC interrupt number.
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device.
- dma-channels: Number of channels supported by hardware.
- snps,dma-masters: Number of AXI masters supported by the hardware.
- snps,data-width: Maximum AXI data width supported by hardware.
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index 99acc712f83a..39e2b26be344 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -23,8 +23,6 @@ Deprecated properties:
Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- is_private: The device channels should be marked as private and not for by the
general purpose DMA channel allocator. False if not passed.
- multi-block: Multi block transfers supported by hardware. Array property with
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 3f15f6644527..4bbc94d829c8 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -201,7 +201,6 @@ Required properties:
- #dma-cells: Should be set to <1>
Clients should use a single channel number per DMA request.
- reg: Memory map for accessing module
-- interrupt-parent: Interrupt controller the interrupt is routed through
- interrupts: Exactly 3 interrupts need to be specified in the order:
1. Transfer completion interrupt.
2. Memory protection interrupt.
diff --git a/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
index a784cdd94790..07a5a7aa9ea0 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
@@ -5,7 +5,6 @@ control and rate control support for slave/peripheral dma access.
Required properties:
- compatible : Should be "xlnx,zynqmp-dma-1.0"
- reg : Memory map for gdma/adma module access.
-- interrupt-parent : Interrupt controller the interrupt is routed through
- interrupts : Should contain DMA channel interrupt.
- xlnx,bus-width : Axi buswidth in bits. Should contain 128 or 64
- clock-names : List of input clocks "clk_main", "clk_apb"
diff --git a/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt b/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt
index 6dede7d11532..cfcf455ad4de 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt
@@ -11,8 +11,6 @@ for USB D-/D+ switching.
Required properties:
- compatible: Should be "richtek,rt8973a-muic"
- reg: Specifies the I2C slave address of the MUIC block. It should be 0x14
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from rt8973a are delivered to.
- interrupts: Interrupt specifiers for detection interrupt sources.
Example:
diff --git a/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt b/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
index 4ecda224955f..fc3888e09549 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
@@ -9,8 +9,6 @@ the host controller using an I2C interface.
Required properties:
- compatible: Should be "siliconmitus,sm5502-muic"
- reg: Specifies the I2C slave address of the MUIC block. It should be 0x25
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from sm5502 are delivered to.
- interrupts: Interrupt specifiers for detection interrupt sources.
Example:
diff --git a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
index 798cfc9d3839..973362eb3f1e 100644
--- a/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
@@ -25,8 +25,6 @@ Required properties:
- #gpio-cells: Should be two. The first cell is the pin number
and the second cell is used to specify optional
parameters (currently unused).
-- interrupt-parent: Phandle for the interrupt controller that
- services interrupts for this device.
- interrupts: Interrupt mapping for GPIO IRQ.
- gpio-controller: Marks the port as GPIO controller.
diff --git a/Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.txt b/Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.txt
index a25c87b650e5..ce19c5660aca 100644
--- a/Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.txt
@@ -14,7 +14,6 @@ Optional Properties:
- #interrupt-cells: Should be <1>. Interrupts are triggered on both edges.
- interrupts: Defines the interrupt line connecting this GPIO controller to
its parent interrupt controller.
-- interrupt-parent: Defines the parent interrupt controller.
GPIO ranges are specified as described in
Documentation/devicetree/bindings/gpio/gpio.txt
diff --git a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
index b405b4410bfb..5d468ecd1809 100644
--- a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
@@ -30,9 +30,6 @@ Optional properties:
- interrupts:
The interrupt shared by all GPIO lines for this controller.
-- interrupt-parent:
- phandle of the parent interrupt controller
-
- interrupts-extended:
Alternate form of specifying interrupts and parents that allows for
multiple parents. This takes precedence over 'interrupts' and
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
index dbd22e0df21e..b4cd9f906c24 100644
--- a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
@@ -19,6 +19,9 @@ Required properties:
4 = active high level-sensitive.
8 = active low level-sensitive.
+Optional properties:
+- clocks: the clock for clocking the GPIO silicon
+
Example:
gpio0: gpio@73f84000 {
diff --git a/Documentation/devicetree/bindings/gpio/gpio-adnp.txt b/Documentation/devicetree/bindings/gpio/gpio-adnp.txt
index af66b2724837..a28902a65a62 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-adnp.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-adnp.txt
@@ -3,7 +3,6 @@ Avionic Design N-bit GPIO expander bindings
Required properties:
- compatible: should be "ad,gpio-adnp"
- reg: The I2C slave address for this device.
-- interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
- #gpio-cells: Should be 2. The first cell is the GPIO number and the
second cell is used to specify optional parameters:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
index fc6378c778c5..7e9b586770b0 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
@@ -17,7 +17,6 @@ Required properties:
Optional properties:
-- interrupt-parent : The parent interrupt controller, optional if inherited
- clocks : A phandle to the clock to use for debounce timings
The gpio and interrupt properties are further described in their respective
diff --git a/Documentation/devicetree/bindings/gpio/gpio-ath79.txt b/Documentation/devicetree/bindings/gpio/gpio-ath79.txt
index c522851017ae..cf71f3ec969d 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-ath79.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-ath79.txt
@@ -12,7 +12,6 @@ Required properties:
- ngpios: Should be set to the number of GPIOs available on the SoC.
Optional properties:
-- interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode interrupt
diff --git a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt b/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
index 8beb0539b6d8..553b92a7e87b 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
@@ -15,8 +15,6 @@ Required Properties:
- first cell is the pin number
- second cell is used to specify optional parameters (unused)
-- interrupt-parent: phandle of the parent interrupt controller.
-
- interrupts: Array of GPIO interrupt number. Only banked or unbanked IRQs are
supported at a time.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-max732x.txt b/Documentation/devicetree/bindings/gpio/gpio-max732x.txt
index 5fdc843b4542..b3a9c0c32823 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-max732x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-max732x.txt
@@ -30,7 +30,6 @@ Optional properties:
- #interrupt-cells: Number of cells to encode an interrupt source, shall be 2.
- first cell is the pin number
- second cell is used to specify flags
- - interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
Please refer to gpio.txt in this directory for details of the common GPIO
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
index 88f228665507..4e3c550e319a 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
@@ -37,6 +37,7 @@ Required properties:
- #interrupt-cells: if to be used as interrupt expander.
Optional properties:
+ - interrupts: interrupt specifier for the device's interrupt output.
- reset-gpios: GPIO specification for the RESET input. This is an
active low signal to the PCA953x.
- vcc-supply: power supply regulator.
@@ -49,6 +50,8 @@ Example:
reg = <0x20>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pca9505>;
+ gpio-controller;
+ #gpio-cells = <2>;
interrupt-parent = <&gpio3>;
interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt b/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
index 7d3bd631d011..a482455a205b 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
@@ -49,7 +49,6 @@ Optional Properties:
- interrupt-controller: Identifies the node as an interrupt controller.
- #interrupt-cells: Number of cells to encode an interrupt source, shall be 2.
- - interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt b/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
index fed9158dd913..f281f12dac18 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
@@ -6,7 +6,6 @@ Required properties:
- gpio-controller: Marks the device node as a GPIO controller.
- #gpio-cells: Should be 2. The first cell is the pin number and the second
cell is used to specify optional parameters.
-- interrupt-parent: Specifies the parent interrupt controller.
- interrupt-controller: Marks the device node as an interrupt controller.
- #interrupt-cells: Should be 2. The first cell defines the interrupt number.
The second cell bits[3:0] is used to specify trigger type as follows:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt b/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt
index 5490c1d68981..e90fb987e25f 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xgene-sb.txt
@@ -26,7 +26,6 @@ Required properties:
1 = active low
- gpio-controller: Marks the device node as a GPIO controller.
- interrupts: The EXT_INT_0 parent interrupt resource must be listed first.
-- interrupt-parent: Phandle of the parent interrupt controller.
- interrupt-cells: Should be two.
- first cell is 0-N coresponding for EXT_INT_0 to EXT_INT_N.
- second cell is used to specify flags.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
index 63bf4becd5f0..08eed2335db0 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
@@ -14,8 +14,6 @@ Required properties:
Optional properties:
- interrupts : Interrupt mapping for GPIO IRQ.
-- interrupt-parent : Phandle for the interrupt controller that
- services interrupts for this device.
- xlnx,all-inputs : if n-th bit is setup, GPIO-n is input
- xlnx,dout-default : if n-th bit is 1, GPIO-n default value is 1
- xlnx,gpio-width : gpio width
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xlp.txt b/Documentation/devicetree/bindings/gpio/gpio-xlp.txt
index 28662d83a43e..47fc64922fe0 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xlp.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xlp.txt
@@ -30,7 +30,6 @@ Required properties:
4 = active high level-sensitive.
8 = active low level-sensitive.
- interrupts: Interrupt number for this device.
-- interrupt-parent: phandle of the parent interrupt controller.
- interrupt-controller: Identifies the node as an interrupt controller.
Example:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
index 7b542657f259..4fa4eb5507cd 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
@@ -11,7 +11,6 @@ Required properties:
- gpio-controller : Marks the device node as a GPIO controller.
- interrupts : Interrupt specifier (see interrupt bindings for
details)
-- interrupt-parent : Must be core interrupt controller
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : Should be 2. The first cell is the GPIO number.
The second cell bits[3:0] is used to specify trigger type and level flags:
diff --git a/Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt b/Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt
new file mode 100644
index 000000000000..ba455589f869
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt
@@ -0,0 +1,35 @@
+Mediatek MT7621 SoC GPIO controller bindings
+
+The IP core used inside these SoCs has 3 banks of 32 GPIOs each.
+The registers of all the banks are interwoven inside one single IO range.
+We load one GPIO controller instance per bank. Also the GPIO controller can receive
+interrupts on any of the GPIOs, either edge or level. It then interrupts the CPU
+using GIC INT12.
+
+Required properties for the top level node:
+- #gpio-cells : Should be two. The first cell is the GPIO pin number and the
+ second cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>.
+ Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt. Should be 2. The first cell defines the interrupt number,
+ the second encodes the triger flags encoded as described in
+ Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+- compatible:
+ - "mediatek,mt7621-gpio" for Mediatek controllers
+- reg : Physical base address and length of the controller's registers
+- interrupt-parent : phandle of the parent interrupt controller.
+- interrupts : Interrupt specifier for the controllers interrupt.
+- interrupt-controller : Mark the device node as an interrupt controller.
+- gpio-controller : Marks the device node as a GPIO controller.
+
+Example:
+ gpio@600 {
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "mediatek,mt7621-gpio";
+ gpio-controller;
+ interrupt-controller;
+ reg = <0x600 0x100>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 12 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
index 45a61b462287..df63da46309c 100644
--- a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
@@ -14,7 +14,6 @@ Optional properties:
- #interrupt-cells: Should be two.
- interrupts: Interrupt specifier for the controller's Broadway (PowerPC)
interrupt.
-- interrupt-parent: phandle of the parent interrupt controller.
Example:
diff --git a/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt b/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt
index c82a2e221bc1..adff16c71d21 100644
--- a/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt
@@ -68,6 +68,8 @@ Required properties:
One of:
- "nvidia,tegra186-gpio".
- "nvidia,tegra186-gpio-aon".
+ - "nvidia,tegra194-gpio".
+ - "nvidia,tegra194-gpio-aon".
- reg-names
Array of strings.
Contains a list of names for the register spaces described by the reg
@@ -91,6 +93,8 @@ Required properties:
depending on compatible value:
- "nvidia,tegra186-gpio": 6 entries.
- "nvidia,tegra186-gpio-aon": 1 entry.
+ - "nvidia,tegra194-gpio": 6 entries.
+ - "nvidia,tegra194-gpio-aon": 1 entry.
- gpio-controller
Boolean.
Marks the device node as a GPIO controller/provider.
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 378f1322211e..4018ee57a6af 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -17,6 +17,7 @@ Required Properties:
- "renesas,gpio-r8a7796": for R8A7796 (R-Car M3-W) compatible GPIO controller.
- "renesas,gpio-r8a77965": for R8A77965 (R-Car M3-N) compatible GPIO controller.
- "renesas,gpio-r8a77970": for R8A77970 (R-Car V3M) compatible GPIO controller.
+ - "renesas,gpio-r8a77980": for R8A77980 (R-Car V3H) compatible GPIO controller.
- "renesas,gpio-r8a77990": for R8A77990 (R-Car E3) compatible GPIO controller.
- "renesas,gpio-r8a77995": for R8A77995 (R-Car D3) compatible GPIO controller.
- "renesas,rcar-gen1-gpio": for a generic R-Car Gen1 GPIO controller.
@@ -31,7 +32,6 @@ Required Properties:
- reg: Base address and length of each memory resource used by the GPIO
controller hardware module.
- - interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
- gpio-controller: Marks the device node as a gpio controller.
diff --git a/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt b/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt
new file mode 100644
index 000000000000..f9231df17c2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/rockchip,rk3328-grf-gpio.txt
@@ -0,0 +1,32 @@
+Rockchip RK3328 GRF (General Register Files) GPIO controller.
+
+In Rockchip RK3328, the output only GPIO_MUTE pin, originally for codec mute
+control, can also be used for general purpose. It is manipulated by the
+GRF_SOC_CON10 register in GRF. Aside from the GPIO_MUTE pin, the HDMI pins can
+also be set in the same way.
+
+Currently this GPIO controller only supports the mute pin. If needed in the
+future, the HDMI pins support can also be added.
+
+Required properties:
+- compatible: Should contain "rockchip,rk3328-grf-gpio".
+- gpio-controller: Marks the device node as a gpio controller.
+- #gpio-cells: Should be 2. The first cell is the pin number and
+ the second cell is used to specify the gpio polarity:
+ 0 = Active high,
+ 1 = Active low.
+
+Example:
+
+ grf: syscon@ff100000 {
+ compatible = "rockchip,rk3328-grf", "syscon", "simple-mfd";
+
+ grf_gpio: grf-gpio {
+ compatible = "rockchip,rk3328-grf-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+
+Note: The grf_gpio node should be declared as the child of the GRF (General
+Register File) node. The GPIO_MUTE pin is referred to as <&grf_gpio 0>.
diff --git a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt b/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
index 3c1118bc67f5..7276b50c3506 100644
--- a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
@@ -25,7 +25,6 @@ controller.
interrupt. Shall be set to 2. The first cell defines the interrupt number,
the second encodes the triger flags encoded as described in
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-- interrupt-parent : The parent interrupt controller.
- interrupts : The interrupts to the parent controller raised when GPIOs
generate the interrupts. If the controller provides one combined interrupt
for all GPIOs, specify a single interrupt. If the controller provides one
diff --git a/Documentation/devicetree/bindings/hsi/omap-ssi.txt b/Documentation/devicetree/bindings/hsi/omap-ssi.txt
index 955e335e7e56..77a0c3c3036e 100644
--- a/Documentation/devicetree/bindings/hsi/omap-ssi.txt
+++ b/Documentation/devicetree/bindings/hsi/omap-ssi.txt
@@ -33,7 +33,6 @@ Required Port sub-node properties:
- reg-names: Contains the values "tx" and "rx" (in this order).
- reg: Contains a matching register specifier for each entry
in reg-names.
-- interrupt-parent Should be a phandle for the interrupt controller
- interrupts: Should contain interrupt specifiers for mpu interrupts
0 and 1 (in this order).
- ti,ssi-cawake-gpio: Defines which GPIO pin is used to signify CAWAKE
diff --git a/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt b/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt
new file mode 100644
index 000000000000..28f43e929f6d
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/npcm750-pwm-fan.txt
@@ -0,0 +1,84 @@
+Nuvoton NPCM7xx PWM and Fan Tacho controller device
+
+The Nuvoton BMC NPCM7XX supports 8 Pulse-width modulation (PWM)
+controller outputs and 16 Fan tachometer controller inputs.
+
+Required properties for pwm-fan node
+- #address-cells : should be 1.
+- #size-cells : should be 0.
+- compatible : "nuvoton,npcm750-pwm-fan" for Poleg NPCM7XX.
+- reg : specifies physical base address and size of the registers.
+- reg-names : must contain:
+ * "pwm" for the PWM registers.
+ * "fan" for the Fan registers.
+- clocks : phandle of reference clocks.
+- clock-names : must contain
+ * "pwm" for PWM controller operating clock.
+ * "fan" for Fan controller operating clock.
+- interrupts : contain the Fan interrupts with flags for falling edge.
+- pinctrl-names : a pinctrl state named "default" must be defined.
+- pinctrl-0 : phandle referencing pin configuration of the PWM and Fan
+ controller ports.
+
+fan subnode format:
+===================
+Under fan subnode can be upto 8 child nodes, each child node representing a fan.
+Each fan subnode must have one PWM channel and atleast one Fan tach channel.
+
+For PWM channel can be configured cooling-levels to create cooling device.
+Cooling device could be bound to a thermal zone for the thermal control.
+
+Required properties for each child node:
+- reg : specify the PWM output channel.
+ integer value in the range 0 through 7, that represent
+ the PWM channel number that used.
+
+- fan-tach-ch : specify the Fan tach input channel.
+ integer value in the range 0 through 15, that represent
+ the fan tach channel number that used.
+
+ At least one Fan tach input channel is required
+
+Optional property for each child node:
+- cooling-levels: PWM duty cycle values in a range from 0 to 255
+ which correspond to thermal cooling states.
+
+Examples:
+
+pwm_fan:pwm-fan-controller@103000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nuvoton,npcm750-pwm-fan";
+ reg = <0x103000 0x2000>,
+ <0x180000 0x8000>;
+ reg-names = "pwm", "fan";
+ clocks = <&clk NPCM7XX_CLK_APB3>,
+ <&clk NPCM7XX_CLK_APB4>;
+ clock-names = "pwm","fan";
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pins &pwm1_pins &pwm2_pins
+ &fanin0_pins &fanin1_pins &fanin2_pins
+ &fanin3_pins &fanin4_pins>;
+ fan@0 {
+ reg = <0x00>;
+ fan-tach-ch = /bits/ 8 <0x00 0x01>;
+ cooling-levels = <127 255>;
+ };
+ fan@1 {
+ reg = <0x01>;
+ fan-tach-ch = /bits/ 8 <0x02 0x03>;
+ };
+ fan@2 {
+ reg = <0x02>;
+ fan-tach-ch = /bits/ 8 <0x04>;
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt b/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
index e7106bfc1f13..8fbd8633a387 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
@@ -11,9 +11,6 @@ Required Properties:
- resets : phandle to reset controller with the reset number in
the second cell
- interrupts : interrupt number
-- interrupt-parent : interrupt controller for bus, should reference a
- aspeed,ast2400-i2c-ic or aspeed,ast2500-i2c-ic
- interrupt controller
Optional Properties:
- bus-frequency : frequency of the bus clock in Hz defaults to 100 kHz when not
diff --git a/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt b/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
index aeceaceba3c5..0380609b177a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
@@ -10,8 +10,6 @@ Required properties:
Optional properties :
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this one is cascaded from
- interrupts: specifies the interrupt number, the irq line to be used
- interrupt-names: Interrupt name string
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
index 60fe90d69f4e..00e4365d7206 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
@@ -5,7 +5,6 @@ Required properties:
- "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
- "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc
- reg : address and length of the lpi2c master registers
-- interrupt-parent : core interrupt controller
- interrupts : lpi2c interrupt
- clocks : lpi2c clock specifier
diff --git a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
index d4a082acf92f..3738cfbf863f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
@@ -11,10 +11,6 @@ Recommended properties:
- pinctrl-names: should be "default";
- pinctrl-0: phandle to pinctrl function
-Optional properties:
-- interrupt-parent: Should be the phandle of the interrupt controller that
- delivers interrupts to the I2C block.
-
Example
/ {
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mpc.txt b/Documentation/devicetree/bindings/i2c/i2c-mpc.txt
index 1eacd6b20ed5..42a390526957 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mpc.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mpc.txt
@@ -15,8 +15,6 @@ Recommended properties :
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- fsl,preserve-clocking : boolean; if defined, the clock settings
from the bootloader are preserved (not touched).
- clock-frequency : desired I2C bus clock frequency in Hz.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
index 34d91501342e..ccf6c86ed076 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
@@ -28,8 +28,6 @@ Optional Properties:
- i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
children in idle state. This is necessary for example, if there are several
multiplexers on the bus and the devices behind them use same I2C addresses.
- - interrupt-parent: Phandle for the interrupt controller that services
- interrupts for this device.
- interrupts: Interrupt mapping for IRQ.
- interrupt-controller: Marks the device node as an interrupt controller.
- #interrupt-cells : Should be two.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-pca-platform.txt b/Documentation/devicetree/bindings/i2c/i2c-pca-platform.txt
index f1f3876bb8e8..73a693d66ef7 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-pca-platform.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-pca-platform.txt
@@ -12,8 +12,6 @@ Required properties :
Optional properties
- interrupts : the interrupt number
- - interrupt-parent : the phandle for the interrupt controller.
- If an interrupt is not specified polling will be used.
- reset-gpios : gpio specifier for gpio connected to RESET_N pin. As the line
is active low, it should be marked GPIO_ACTIVE_LOW.
- clock-frequency : I2C bus frequency.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-pnx.txt b/Documentation/devicetree/bindings/i2c/i2c-pnx.txt
index fe98ada33ee4..2a59006cf79e 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-pnx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-pnx.txt
@@ -7,8 +7,6 @@ Required properties:
- interrupts: configure one interrupt line
- #address-cells: always 1 (for i2c addresses)
- #size-cells: always 0
- - interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-pxa.txt b/Documentation/devicetree/bindings/i2c/i2c-pxa.txt
index d30f0b11d853..c30783c0eca0 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-pxa.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-pxa.txt
@@ -12,9 +12,6 @@ Required properties :
Recommended properties :
- interrupts : the interrupt number
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device. If the parent is the default
- interrupt controller in device tree, it could be ignored.
- mrvl,i2c-polling : Disable interrupt of i2c controller. Polling
status register of i2c controller instead.
- mrvl,i2c-fast-mode : Enable fast mode of i2c controller.
diff --git a/Documentation/devicetree/bindings/iio/accel/adxl345.txt b/Documentation/devicetree/bindings/iio/accel/adxl345.txt
index e7111b02c02c..ae09a046bc72 100644
--- a/Documentation/devicetree/bindings/iio/accel/adxl345.txt
+++ b/Documentation/devicetree/bindings/iio/accel/adxl345.txt
@@ -11,8 +11,6 @@ Required properties for SPI bus usage:
- spi-cpol and spi-cpha : must be defined for adxl345 to enable SPI mode 3
Optional properties:
- - interrupt-parent : phandle to the parent interrupt controller as documented
- in Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- interrupts: interrupt mapping for IRQ as documented in
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt
index 4a3679d54457..3b25b4c4d446 100644
--- a/Documentation/devicetree/bindings/iio/accel/bma180.txt
+++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt
@@ -10,8 +10,6 @@ Required properties:
Optional properties:
- - interrupt-parent : should be the phandle for the interrupt controller
-
- interrupts : interrupt mapping for GPIO IRQ, it should by configured with
flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING
For the bma250 the first interrupt listed must be the one
diff --git a/Documentation/devicetree/bindings/iio/accel/mma8452.txt b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
index 45f5c5c5929c..2100e9af379c 100644
--- a/Documentation/devicetree/bindings/iio/accel/mma8452.txt
+++ b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
@@ -15,8 +15,6 @@ Required properties:
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
-
- interrupts: interrupt mapping for GPIO IRQ
- interrupt-names: should contain "INT1" and/or "INT2", the accelerometer's
diff --git a/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt b/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt
index 487ea966858e..ec04008e8f4f 100644
--- a/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt
@@ -2,7 +2,6 @@ Motorola CPCAP PMIC ADC binding
Required properties:
- compatible: Should be "motorola,cpcap-adc" or "motorola,mapphone-cpcap-adc"
-- interrupt-parent: The interrupt controller
- interrupts: The interrupt number for the ADC device
- interrupt-names: Should be "adcdone"
- #io-channel-cells: Number of cells in an IIO specifier
diff --git a/Documentation/devicetree/bindings/iio/adc/fsl,imx25-gcq.txt b/Documentation/devicetree/bindings/iio/adc/fsl,imx25-gcq.txt
index b0866d36a307..eebdcec3dab5 100644
--- a/Documentation/devicetree/bindings/iio/adc/fsl,imx25-gcq.txt
+++ b/Documentation/devicetree/bindings/iio/adc/fsl,imx25-gcq.txt
@@ -8,7 +8,6 @@ Required properties:
- reg: Should be the register range of the module.
- interrupts: Should be the interrupt number of the module.
Typically this is <1>.
- - interrupt-parent: phandle to the tsadc module of the i.MX25.
- #address-cells: Should be <1> (setting for the subnodes)
- #size-cells: Should be <0> (setting for the subnodes)
diff --git a/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt b/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt
index a8770cc6bcad..e680c61dfb84 100644
--- a/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: Should be "maxim,max1027" or "maxim,max1029" or "maxim,max1031"
- reg: SPI chip select number for the device
- - interrupt-parent: phandle to the parent interrupt controller
- see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- interrupts: IRQ line for the ADC
see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
index f1ead43a1a95..8346bcb04ad7 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -60,7 +60,6 @@ Required properties:
- reg: Offset of ADC instance in ADC block (e.g. may be 0x0, 0x100, 0x200).
- clocks: Input clock private to this ADC instance. It's required only on
stm32f4, that has per instance clock input for registers access.
-- interrupt-parent: Phandle to the parent interrupt controller.
- interrupts: IRQ Line for the ADC (e.g. may be 0 for adc@0, 1 for adc@100 or
2 for adc@200).
- st,adc-channels: List of single-ended channels muxed for this ADC.
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
index d71258e2d456..e0e0755cabd8 100644
--- a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
@@ -22,7 +22,6 @@ Required properties:
clock to the AXI bus interface of the core.
Optional properties:
- - interrupt-parent: phandle to the parent interrupt controller
- xlnx,external-mux:
* "none": No external multiplexer is used, this is the default
if the property is omitted.
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt
index 2962bd9a2b3d..f4320595b851 100644
--- a/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt
+++ b/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: must be "atlas,ec-sm"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt
index 5d8b687d5edc..af1f5a9aa4da 100644
--- a/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt
+++ b/Documentation/devicetree/bindings/iio/chemical/atlas,orp-sm.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: must be "atlas,orp-sm"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt
index cffa1907463a..79d90f060327 100644
--- a/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt
+++ b/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: must be "atlas,ph-sm"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
index b0d3b59966bc..233fe207aded 100644
--- a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
+++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
@@ -5,7 +5,6 @@ Required properties:
- reg : the I2C address of the sensor
Optional properties:
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts : interrupt mapping for the trigger interrupt from the
internal oscillator. The following IRQ modes are supported:
IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_HIGH and
diff --git a/Documentation/devicetree/bindings/iio/health/afe4403.txt b/Documentation/devicetree/bindings/iio/health/afe4403.txt
index 2fffd70336ba..8e412054d6d5 100644
--- a/Documentation/devicetree/bindings/iio/health/afe4403.txt
+++ b/Documentation/devicetree/bindings/iio/health/afe4403.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible : Should be "ti,afe4403".
- reg : SPI chip select address of device.
- tx-supply : Regulator supply to transmitting LEDs.
- - interrupt-parent : Phandle to he parent interrupt controller.
- interrupts : The interrupt line the device ADC_RDY pin is
connected to. For details refer to,
../../interrupt-controller/interrupts.txt.
diff --git a/Documentation/devicetree/bindings/iio/health/afe4404.txt b/Documentation/devicetree/bindings/iio/health/afe4404.txt
index de69f203edfa..0b52830a0d9c 100644
--- a/Documentation/devicetree/bindings/iio/health/afe4404.txt
+++ b/Documentation/devicetree/bindings/iio/health/afe4404.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible : Should be "ti,afe4404".
- reg : I2C address of the device.
- tx-supply : Regulator supply to transmitting LEDs.
- - interrupt-parent : Phandle to he parent interrupt controller.
- interrupts : The interrupt line the device ADC_RDY pin is
connected to. For details refer to,
../interrupt-controller/interrupts.txt.
diff --git a/Documentation/devicetree/bindings/iio/health/max30100.txt b/Documentation/devicetree/bindings/iio/health/max30100.txt
index 8d8176459d09..0054908a6e74 100644
--- a/Documentation/devicetree/bindings/iio/health/max30100.txt
+++ b/Documentation/devicetree/bindings/iio/health/max30100.txt
@@ -5,7 +5,6 @@ Maxim MAX30100 heart rate and pulse oximeter sensor
Required properties:
- compatible: must be "maxim,max30100"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic
diff --git a/Documentation/devicetree/bindings/iio/health/max30102.txt b/Documentation/devicetree/bindings/iio/health/max30102.txt
index ef2ca0a0306f..7ef7ae40ae4f 100644
--- a/Documentation/devicetree/bindings/iio/health/max30102.txt
+++ b/Documentation/devicetree/bindings/iio/health/max30102.txt
@@ -7,7 +7,6 @@ Maxim MAX30105 optical particle-sensing module
Required properties:
- compatible: must be "maxim,max30102" or "maxim,max30105"
- reg: the I2C address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic
diff --git a/Documentation/devicetree/bindings/iio/humidity/hts221.txt b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
index 10adeb0d703d..84d029372260 100644
--- a/Documentation/devicetree/bindings/iio/humidity/hts221.txt
+++ b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
@@ -13,7 +13,6 @@ Optional properties:
when it is not active, whereas a pull-up one is needed when interrupt
line is configured as IRQ_TYPE_LEVEL_LOW or IRQ_TYPE_EDGE_FALLING.
Refer to pinctrl/pinctrl-bindings.txt for the property description.
-- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with
flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
IRQ_TYPE_EDGE_FALLING.
diff --git a/Documentation/devicetree/bindings/iio/imu/bmi160.txt b/Documentation/devicetree/bindings/iio/imu/bmi160.txt
index ae0112c7debc..0c1c105fb503 100644
--- a/Documentation/devicetree/bindings/iio/imu/bmi160.txt
+++ b/Documentation/devicetree/bindings/iio/imu/bmi160.txt
@@ -9,7 +9,6 @@ Required properties:
- spi-max-frequency : set maximum clock frequency (only for SPI)
Optional properties:
- - interrupt-parent : should be the phandle of the interrupt controller
- interrupts : interrupt mapping for IRQ, must be IRQ_TYPE_LEVEL_LOW
- interrupt-names : set to "INT1" if INT1 pin should be used as interrupt
input, set to "INT2" if INT2 pin should be used instead
diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
index 5f4777e8cc9e..d33c19d68de3 100644
--- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
+++ b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
@@ -11,7 +11,6 @@ Required properties:
"invensense,mpu9255"
"invensense,icm20608"
- reg : the I2C address of the sensor
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with flags
IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
IRQ_TYPE_EDGE_FALLING.
diff --git a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
index ef8a8566c63f..ea2d6e0ae4c5 100644
--- a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+++ b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
@@ -20,7 +20,6 @@ Optional properties:
IRQ_TYPE_EDGE_RISING a pull-down resistor is needed to drive the line
when it is not active, whereas a pull-up one is needed when interrupt
line is configured as IRQ_TYPE_LEVEL_LOW or IRQ_TYPE_EDGE_FALLING.
-- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with
flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
IRQ_TYPE_EDGE_FALLING.
diff --git a/Documentation/devicetree/bindings/iio/light/apds9300.txt b/Documentation/devicetree/bindings/iio/light/apds9300.txt
index d6f66c73ddbf..aa199e09a493 100644
--- a/Documentation/devicetree/bindings/iio/light/apds9300.txt
+++ b/Documentation/devicetree/bindings/iio/light/apds9300.txt
@@ -9,7 +9,6 @@ Required properties:
Optional properties:
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts : interrupt mapping for GPIO IRQ
Example:
diff --git a/Documentation/devicetree/bindings/iio/light/apds9960.txt b/Documentation/devicetree/bindings/iio/light/apds9960.txt
index 174b709f16db..3af325ad194b 100644
--- a/Documentation/devicetree/bindings/iio/light/apds9960.txt
+++ b/Documentation/devicetree/bindings/iio/light/apds9960.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: must be "avago,apds9960"
- reg: the I2c address of the sensor
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts : the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/light/isl29018.txt b/Documentation/devicetree/bindings/iio/light/isl29018.txt
index 425ab459e209..b9bbde3e13ed 100644
--- a/Documentation/devicetree/bindings/iio/light/isl29018.txt
+++ b/Documentation/devicetree/bindings/iio/light/isl29018.txt
@@ -10,7 +10,6 @@ Required properties:
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/light/opt3001.txt b/Documentation/devicetree/bindings/iio/light/opt3001.txt
index eac30d508849..47b13eb8f4ec 100644
--- a/Documentation/devicetree/bindings/iio/light/opt3001.txt
+++ b/Documentation/devicetree/bindings/iio/light/opt3001.txt
@@ -13,7 +13,6 @@ Required properties:
- reg: the I2C address of the sensor
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for GPIO IRQ (configure for falling edge)
Example:
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2583.txt b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
index 8e2066c83f70..059dffa1829a 100644
--- a/Documentation/devicetree/bindings/iio/light/tsl2583.txt
+++ b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
@@ -10,7 +10,6 @@ Required properties:
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
- interrupts: the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic interrupt client
diff --git a/Documentation/devicetree/bindings/iio/light/uvis25.txt b/Documentation/devicetree/bindings/iio/light/uvis25.txt
index 3041207e3f3c..043c139d91e6 100644
--- a/Documentation/devicetree/bindings/iio/light/uvis25.txt
+++ b/Documentation/devicetree/bindings/iio/light/uvis25.txt
@@ -5,7 +5,6 @@ Required properties:
- reg: i2c address of the sensor / spi cs line
Optional properties:
-- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with
flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
IRQ_TYPE_EDGE_FALLING.
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/bmc150_magn.txt b/Documentation/devicetree/bindings/iio/magnetometer/bmc150_magn.txt
index 9f263b7df162..fd5fca90fb39 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/bmc150_magn.txt
+++ b/Documentation/devicetree/bindings/iio/magnetometer/bmc150_magn.txt
@@ -9,7 +9,6 @@ Required properties:
Optional properties:
- - interrupt-parent : phandle to the parent interrupt controller
- interrupts : interrupt mapping for GPIO IRQ
Example:
diff --git a/Documentation/devicetree/bindings/iio/pressure/bmp085.txt b/Documentation/devicetree/bindings/iio/pressure/bmp085.txt
index c7198a03c906..5498b0688b6f 100644
--- a/Documentation/devicetree/bindings/iio/pressure/bmp085.txt
+++ b/Documentation/devicetree/bindings/iio/pressure/bmp085.txt
@@ -12,7 +12,6 @@ Optional properties:
- temp-measurement-period: temperature measurement period (milliseconds)
- default-oversampling: default oversampling value to be used at startup,
value range is 0-3 with rising sensitivity.
-- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ
- reset-gpios: a GPIO line handling reset of the sensor: as the line is
active low, it should be marked GPIO_ACTIVE_LOW (see gpio/gpio.txt)
diff --git a/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt b/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt
index fb85de676e03..a36ab3e0c3f7 100644
--- a/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt
+++ b/Documentation/devicetree/bindings/iio/pressure/zpa2326.txt
@@ -15,8 +15,6 @@ Optional properties:
power to the sensor
- vdd-supply: an optional regulator that needs to be on to provide VDD
power to the sensor
-- interrupt-parent: phandle to the parent interrupt controller as documented in
- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- interrupts: interrupt mapping for IRQ as documented in
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
diff --git a/Documentation/devicetree/bindings/iio/proximity/as3935.txt b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
index b6c1afa6f02d..849115585d55 100644
--- a/Documentation/devicetree/bindings/iio/proximity/as3935.txt
+++ b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
@@ -6,7 +6,6 @@ Required properties:
- spi-max-frequency: specifies maximum SPI clock frequency
- spi-cpha: SPI Mode 1. Refer to spi/spi-bus.txt for generic SPI
slave node bindings.
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts : the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic
diff --git a/Documentation/devicetree/bindings/iio/proximity/sx9500.txt b/Documentation/devicetree/bindings/iio/proximity/sx9500.txt
index b301dd2b35da..c54455db3bec 100644
--- a/Documentation/devicetree/bindings/iio/proximity/sx9500.txt
+++ b/Documentation/devicetree/bindings/iio/proximity/sx9500.txt
@@ -3,7 +3,6 @@ Semtech's SX9500 capacitive proximity button device driver
Required properties:
- compatible: must be "semtech,sx9500"
- reg: i2c address where to find the device
- - interrupt-parent : should be the phandle for the interrupt controller
- interrupts : the sole interrupt generated by the device
Refer to interrupt-controller/interrupts.txt for generic
diff --git a/Documentation/devicetree/bindings/iio/sensorhub.txt b/Documentation/devicetree/bindings/iio/sensorhub.txt
index 8d57571d5c0b..b6ac0457d4ea 100644
--- a/Documentation/devicetree/bindings/iio/sensorhub.txt
+++ b/Documentation/devicetree/bindings/iio/sensorhub.txt
@@ -6,7 +6,6 @@ of a virtual sensor device.
Required properties:
- compatible: "samsung,sensorhub-rinato" or "samsung,sensorhub-thermostat"
- spi-max-frequency: max SPI clock frequency
-- interrupt-parent: interrupt parent
- interrupts: communication interrupt
- ap-mcu-gpios: [out] ap to sensorhub line - used during communication
- mcu-ap-gpios: [in] sensorhub to ap - used during communication
diff --git a/Documentation/devicetree/bindings/iio/temperature/tmp007.txt b/Documentation/devicetree/bindings/iio/temperature/tmp007.txt
index b63aba91ef03..da0af234a357 100644
--- a/Documentation/devicetree/bindings/iio/temperature/tmp007.txt
+++ b/Documentation/devicetree/bindings/iio/temperature/tmp007.txt
@@ -20,8 +20,6 @@ Required properties:
Optional properties:
- - interrupt-parent: should be the phandle for the interrupt controller
-
- interrupts: interrupt mapping for GPIO IRQ (level active low)
Example:
diff --git a/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
index d3b273e4336a..84f1a1b505d2 100644
--- a/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
+++ b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
@@ -19,7 +19,6 @@ representing a dsaf device.
- #size-cells: must be 2
Optional properties:
- dma-coherent: Present if DMA operations are coherent.
-- interrupt-parent: the interrupt parent of this device.
- interrupts: should contain 32 completion event irq,1 async event irq
and 1 event overflow irq.
- interrupt-names:should be one of 34 irqs for roce device
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
index 8d91ba9ff2fd..d3db65916a36 100644
--- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt
+++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
@@ -3,8 +3,6 @@ Cypress I2C Touchpad
Required properties:
- compatible: must be "cypress,cyapa".
- reg: I2C address of the chip.
-- interrupt-parent: a phandle for the interrupt controller (see interrupt
- binding[0]).
- interrupts: interrupt to which the chip is connected (see interrupt
binding[0]).
diff --git a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
index 635f62c756ee..0c252d9306da 100644
--- a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
+++ b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
@@ -3,8 +3,6 @@ Samsung tm2-touchkey
Required properties:
- compatible: must be "cypress,tm2-touchkey"
- reg: I2C address of the chip.
-- interrupt-parent: a phandle for the interrupt controller (see interrupt
- binding[0]).
- interrupts: interrupt to which the chip is connected (see interrupt
binding[0]).
- vcc-supply : internal regulator output. 1.8V
diff --git a/Documentation/devicetree/bindings/input/e3x0-button.txt b/Documentation/devicetree/bindings/input/e3x0-button.txt
index 751665e8e47a..907b195f2eaa 100644
--- a/Documentation/devicetree/bindings/input/e3x0-button.txt
+++ b/Documentation/devicetree/bindings/input/e3x0-button.txt
@@ -7,8 +7,6 @@ This module provides a simple power button event via two interrupts.
Required properties:
- compatible: should be one of the following
- "ettus,e3x0-button": For devices such as the NI Ettus Research USRP E3x0
-- interrupt-parent:
- - a phandle to the interrupt controller that it is attached to.
- interrupts: should be one of the following
- <0 30 1>, <0 31 1>: For devices such as the NI Ettus Research USRP E3x0
- interrupt-names: should be one of the following
diff --git a/Documentation/devicetree/bindings/input/elan_i2c.txt b/Documentation/devicetree/bindings/input/elan_i2c.txt
index d80a83583238..797607460735 100644
--- a/Documentation/devicetree/bindings/input/elan_i2c.txt
+++ b/Documentation/devicetree/bindings/input/elan_i2c.txt
@@ -3,8 +3,6 @@ Elantech I2C Touchpad
Required properties:
- compatible: must be "elan,ekth3000".
- reg: I2C address of the chip.
-- interrupt-parent: a phandle for the interrupt controller (see interrupt
- binding[0]).
- interrupts: interrupt to which the chip is connected (see interrupt
binding[0]).
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
index 8a71038f3489..5edac8be0802 100644
--- a/Documentation/devicetree/bindings/input/elants_i2c.txt
+++ b/Documentation/devicetree/bindings/input/elants_i2c.txt
@@ -3,8 +3,6 @@ Elantech I2C Touchscreen
Required properties:
- compatible: must be "elan,ekth3500".
- reg: I2C address of the chip.
-- interrupt-parent: a phandle for the interrupt controller (see interrupt
- binding[0]).
- interrupts: interrupt to which the chip is connected (see interrupt
binding[0]).
diff --git a/Documentation/devicetree/bindings/input/hid-over-i2c.txt b/Documentation/devicetree/bindings/input/hid-over-i2c.txt
index 4d3da9d91de4..f1cb9f8d7692 100644
--- a/Documentation/devicetree/bindings/input/hid-over-i2c.txt
+++ b/Documentation/devicetree/bindings/input/hid-over-i2c.txt
@@ -14,7 +14,6 @@ Required properties:
- compatible: must be "hid-over-i2c"
- reg: i2c slave address
- hid-descr-addr: HID descriptor address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: interrupt line
Additional optional properties:
diff --git a/Documentation/devicetree/bindings/input/raydium_i2c_ts.txt b/Documentation/devicetree/bindings/input/raydium_i2c_ts.txt
index 5b6232db7c61..99a4f2ab5557 100644
--- a/Documentation/devicetree/bindings/input/raydium_i2c_ts.txt
+++ b/Documentation/devicetree/bindings/input/raydium_i2c_ts.txt
@@ -3,7 +3,6 @@ Raydium I2C touchscreen
Required properties:
- compatible: must be "raydium,rm32380"
- reg: The I2C address of the device
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: interrupt to which the chip is connected
See ../interrupt-controller/interrupts.txt
Optional properties:
diff --git a/Documentation/devicetree/bindings/input/rmi4/rmi_i2c.txt b/Documentation/devicetree/bindings/input/rmi4/rmi_i2c.txt
index ec908b91fd90..dcb012f5b3ee 100644
--- a/Documentation/devicetree/bindings/input/rmi4/rmi_i2c.txt
+++ b/Documentation/devicetree/bindings/input/rmi4/rmi_i2c.txt
@@ -16,7 +16,6 @@ Required Properties:
Optional Properties:
- interrupts: interrupt which the rmi device is connected to.
-- interrupt-parent: The interrupt controller.
See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- syna,reset-delay-ms: The number of milliseconds to wait after resetting the
diff --git a/Documentation/devicetree/bindings/input/rmi4/rmi_spi.txt b/Documentation/devicetree/bindings/input/rmi4/rmi_spi.txt
index a4ca7828f21d..632f473db65b 100644
--- a/Documentation/devicetree/bindings/input/rmi4/rmi_spi.txt
+++ b/Documentation/devicetree/bindings/input/rmi4/rmi_spi.txt
@@ -16,7 +16,6 @@ Required Properties:
Optional Properties:
- interrupts: interrupt which the rmi device is connected to.
-- interrupt-parent: The interrupt controller.
See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- spi-rx-delay-us: microsecond delay after a read transfer.
diff --git a/Documentation/devicetree/bindings/input/ti,palmas-pwrbutton.txt b/Documentation/devicetree/bindings/input/ti,palmas-pwrbutton.txt
index a3dde8c30e67..c829e18e1a05 100644
--- a/Documentation/devicetree/bindings/input/ti,palmas-pwrbutton.txt
+++ b/Documentation/devicetree/bindings/input/ti,palmas-pwrbutton.txt
@@ -9,7 +9,6 @@ This module provides a simple power button event via an Interrupt.
Required properties:
- compatible: should be one of the following
- "ti,palmas-pwrbutton": For Palmas compatible power on button
-- interrupt-parent: Parent interrupt device, must be handle of palmas node.
- interrupts: Interrupt number of power button submodule on device.
Optional Properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt b/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
index 3c8614c451f2..cdd743a1f2d5 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
@@ -5,7 +5,6 @@ Required properties:
for I2C slave, use "adi,ad7879-1"
- reg : SPI chipselect/I2C slave address
See spi-bus.txt for more SPI slave properties
-- interrupt-parent : the phandle for the interrupt controller
- interrupts : touch controller interrupt
- touchscreen-max-pressure : maximum reported pressure
- adi,resistance-plate-x : total resistance of X-plate (for pressure
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt b/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
index 9fc47b006fd1..04413da51391 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
@@ -18,7 +18,6 @@ Additional required properties:
"ti,ads7846"
"ti,ads7873"
- interrupt-parent
interrupts An interrupt node describing the IRQ line the chip's
!PENIRQ pin is connected to.
vcc-supply A regulator node for the supply voltage.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt b/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt
index e459e8546f34..82019bd6094e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : "microchip,ar1021-i2c"
- reg : I2C slave address
-- interrupt-parent : the phandle for the interrupt controller
- interrupts : touch controller interrupt
Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt b/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
index d11f8d615b5d..38b0603f65f3 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible : "chipone,icn8318"
- reg : I2C slave address of the chip (0x40)
- - interrupt-parent : a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts : interrupt specification for the icn8318 interrupt
- wake-gpios : GPIO specification for the WAKE input
- touchscreen-size-x : horizontal resolution of touchscreen (in pixels)
diff --git a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
index df531b5b6a0d..2e1490a8fe74 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
@@ -7,7 +7,6 @@ Required Properties:
- xm-gpios: FET gate driver for input of X-
- yp-gpios: FET gate driver for input of Y+
- ym-gpios: FET gate driver for input of Y-
-- interrupt-parent: phandle for the interrupt controller
- interrupts: pen irq interrupt for touch detection
- pinctrl-names: "idle", "default", "gpios"
- pinctrl-0: pinctrl node for pen/touch detection state pinmux
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt b/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt
index b75d4cfd2c36..6ee274aa8b03 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt
@@ -4,8 +4,6 @@ Required properties:
- compatible : must be "cypress,cyttsp-i2c" or "cypress,cyttsp-spi"
- reg : Device I2C address or SPI chip select number
- spi-max-frequency : Maximum SPI clocking speed of the device (for cyttsp-spi)
- - interrupt-parent : the phandle for the gpio controller
- (see interrupt binding[0]).
- interrupts : (gpio) interrupt to which the chip is connected
(see interrupt binding[0]).
- bootloader-key : the 8-byte bootloader key that is required to switch
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
index 025cf8c9324a..da2dc5d6c98b 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
@@ -22,8 +22,6 @@ Required properties:
or: "focaltech,ft6236"
- reg: I2C slave address of the chip (0x38)
- - interrupt-parent: a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts: interrupt specification for the touchdetect
interrupt
diff --git a/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
index 298e3442f143..92fb2620f5e2 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "eeti,egalax_ts"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: touch controller interrupt
- wakeup-gpios: the gpio pin to be used for waking up the controller
and also used as irq pin
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt b/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
index 5a19f4c3e9d7..94c4fc644940 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible : "elan,ektf2127"
- reg : I2C slave address of the chip (0x40)
- - interrupt-parent : a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts : interrupt specification for the ektf2127 interrupt
- power-gpios : GPIO specification for the pin connected to the
ektf2127's wake input. This needs to be driven high
diff --git a/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt b/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt
index 1dcff4a43eaa..68291b94fec2 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "eeti,exc3000"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: touch controller interrupt
- touchscreen-size-x: See touchscreen.txt
- touchscreen-size-y: See touchscreen.txt
diff --git a/Documentation/devicetree/bindings/input/touchscreen/fsl-mx25-tcq.txt b/Documentation/devicetree/bindings/input/touchscreen/fsl-mx25-tcq.txt
index abfcab3edc66..99d6f9d25335 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/fsl-mx25-tcq.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/fsl-mx25-tcq.txt
@@ -8,7 +8,6 @@ Required properties:
- reg: Memory range of the device.
- interrupts: Should be the interrupt number associated with this module within
the tscadc unit (<0>).
- - interrupt-parent: Should be a phandle to the tscadc unit.
- fsl,wires: Should be '<4>' or '<5>'
Optional properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
index 0c369d8ebcab..f7e95c52f3c7 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
@@ -11,7 +11,6 @@ Required properties:
or "goodix,gt928"
or "goodix,gt967"
- reg : I2C address of the chip. Should be 0x5d or 0x14
- - interrupt-parent : Interrupt controller to which the chip is connected
- interrupts : Interrupt to which the chip is connected
Optional properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
index 1063c30d53f7..a47c36190b01 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : must be "hideep,hideep-ts"
- reg : I2C slave address, (e.g. 0x6C).
-- interrupt-parent : Interrupt controller to which the chip is connected.
- interrupts : Interrupt to which the chip is connected.
Optional properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/max11801-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/max11801-ts.txt
index 40ac0fe94df6..05e982c3454e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/max11801-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/max11801-ts.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "maxim,max11801"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: touch controller interrupt
Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt b/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
index 7b8944c2cb31..b2ab5498e519 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "melfas,mip4_ts"
- reg: I2C slave address of the chip (0x48 or 0x34)
-- interrupt-parent: interrupt controller to which the chip is connected
- interrupts: interrupt to which the chip is connected
Optional properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt b/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt
index d9b7c2ff611e..6805d10d226d 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible : must be "samsung,s6sy761"
- reg : I2C slave address, (e.g. 0x48)
-- interrupt-parent : the phandle to the interrupt controller which provides
- the interrupt
- interrupts : interrupt specification
- avdd-supply : analogic power supply
- vdd-supply : power supply
diff --git a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
index 84752de12412..d67e558e5ab5 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
@@ -8,8 +8,6 @@ Required properties:
"silead,gsl3675"
"silead,gsl3692"
- reg : I2C slave address of the chip (0x40)
-- interrupt-parent : a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts : interrupt specification for the gsl1680 interrupt
- power-gpios : Specification for the pin connected to the gsl1680's
shutdown input. This needs to be driven high to take the
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt
index d87ad14f1efe..8f5322e01024 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: must be "sis,9200-ts"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- (see interrupt binding [0])
- interrupts: touch controller interrupt (see interrupt
binding [0])
diff --git a/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt b/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt
index 9683595cd0f5..0a5d0cb4a280 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt
@@ -10,8 +10,6 @@ coordinates.
Required properties:
- compatible : must be "st,stmfts"
- reg : I2C slave address, (e.g. 0x49)
-- interrupt-parent : the phandle to the interrupt controller which provides
- the interrupt
- interrupts : interrupt specification
- avdd-supply : analogic power supply
- vdd-supply : power supply
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
index 5aaa6b3aa90c..4886c4aa2906 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: must be "semtech,sx8654"
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: touch controller interrupt
Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt b/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt
index ec365e172236..ed00f61b8c08 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/tsc2007.txt
@@ -9,8 +9,6 @@ Optional properties:
- gpios: the interrupt gpio the chip is connected to (trough the penirq pin).
The penirq pin goes to low when the panel is touched.
(see GPIO binding[1] for more details).
-- interrupt-parent: the phandle for the gpio controller
- (see interrupt binding[0]).
- interrupts: (gpio) interrupt to which the chip is connected
(see interrupt binding[0]).
- ti,max-rt: maximum pressure.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zet6223.txt b/Documentation/devicetree/bindings/input/touchscreen/zet6223.txt
index fe6a1feef703..27d55a506f18 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/zet6223.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/zet6223.txt
@@ -3,8 +3,6 @@ Zeitec ZET6223 I2C touchscreen controller
Required properties:
- compatible : "zeitec,zet6223"
- reg : I2C slave address of the chip (0x76)
-- interrupt-parent : a phandle pointing to the interrupt controller
- serving the interrupt for this chip
- interrupts : interrupt specification for the zet6223 interrupt
Optional properties:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt b/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt
index 9d52d5afe3e9..5a4dd263fc12 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/abilis,tb10x-ictl.txt
@@ -13,7 +13,6 @@ Required properties
- interrupt-congroller: Identifies the node as an interrupt controller.
- #interrupt cells: Specifies the number of cells used to encode an interrupt
source connected to this controller. The value shall be 2.
-- interrupt-parent: Specifies the parent interrupt controller.
- interrupts: Specifies the list of interrupt lines which are handled by
the interrupt controller in the parent controller's notation. Interrupts
are mapped one-to-one to parent interrupts.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
index f6f1c14bf99b..5669764f9cc9 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
@@ -6,7 +6,6 @@ Required properties:
- compatible: should be "al,alpine-msix"
- reg: physical base address and size of the registers
-- interrupt-parent: specifies the parent interrupt controller.
- interrupt-controller: identifies the node as an interrupt controller
- msi-controller: identifies the node as an PCI Message Signaled Interrupt
controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
index 4903fb72d883..24beadf7ba83 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
@@ -13,7 +13,6 @@ Required properties:
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value shall be 2. The first cell is the IRQ number, the
second cell the trigger type as defined in interrupt.txt in this directory.
-- interrupt-parent: Specifies the parent interrupt controller.
- interrupts: Specifies the interrupt line (NMI) which is handled by
the interrupt controller in the parent controller's notation. This value
shall be the NMI.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
index 89674ad8a097..1502a51548bb 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
@@ -15,9 +15,6 @@ Required properties:
"amlogic,meson-gxbb-gpio-intc" for GXBB SoCs (S905) or
"amlogic,meson-gxl-gpio-intc" for GXL SoCs (S905X, S912)
"amlogic,meson-axg-gpio-intc" for AXG SoCs (A113D, A113X)
-- interrupt-parent : a phandle to the GIC the interrupts are routed to.
- Usually this is provided at the root level of the device tree as it is
- common to most of the SoC.
- reg : Specifies base physical address and size of the registers.
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Specifies the number of cells needed to encode an
diff --git a/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt b/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
index 0e9f09a6a2fe..f4c5d34c4111 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "atmel,<chip>-aic"
<chip> can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4"
- interrupt-controller: Identifies the node as an interrupt controller.
-- interrupt-parent: For single AIC system, it is an empty property.
- #interrupt-cells: The number of cells to define the interrupts. It should be 3.
The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
The second cell is used to specify flags:
@@ -27,7 +26,6 @@ Examples:
aic: interrupt-controller@fffff000 {
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
- interrupt-parent;
#interrupt-cells = <3>;
reg = <0xfffff000 0x200>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
index 6428a6ba9f4a..0f1af5a1c12e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
@@ -26,8 +26,6 @@ Required properties:
are 0..7 for bank 0, and 0..31 for bank 1.
Additional required properties for brcm,bcm2836-armctrl-ic:
-- interrupt-parent : Specifies the parent interrupt controller when this
- controller is the second level.
- interrupts : Specifies the interrupt on the parent for this interrupt
controller to handle.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
index 8f48aad50868..37aea40d5430 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
@@ -18,8 +18,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this one is cascaded from
- interrupts: specifies the interrupt line in the interrupt-parent controller
node, valid values depend on the type of parent interrupt controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
index 4040905388d9..2bc19b1ac877 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
@@ -29,8 +29,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller(s)
- this one is cascaded from
- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
node; valid values depend on the type of parent interrupt controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
index cc217b22dccd..2117d4ac1ae5 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
@@ -28,8 +28,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller(s)
- this one is cascaded from
- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
node; valid values depend on the type of parent interrupt controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
index 44a9bb15dd56..addd86b6ca2f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
@@ -56,8 +56,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this one is cascaded from
- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
node, valid values depend on the type of parent interrupt controller
- brcm,int-map-mask: 32-bits bit mask describing how many and which interrupts
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
index 36df06c5c567..d514ec060a4a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
@@ -8,8 +8,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an
interrupt source. Should be 1.
-- interrupt-parent: specifies the phandle to the parent interrupt controller
- this controller is cacaded from
- interrupts: specifies the interrupt line in the interrupt-parent irq space
to be used for cascading
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
index a4ff93d6b7f3..454ce04d6787 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
@@ -13,9 +13,6 @@ Required properties:
- reg: physical base address of the controller and length of memory mapped.
- interrupts: an interrupt to the parent interrupt controller.
-Optional properties:
-- interrupt-parent: the phandle to the parent interrupt controller.
-
This interrupt controller hardware is a second level interrupt controller that
is hooked to a parent interrupt controller: e.g: ARM GIC for ARM-based
platforms. If interrupt-parent is not provided, the default parent interrupt
diff --git a/Documentation/devicetree/bindings/interrupt-controller/hisilicon,mbigen-v2.txt b/Documentation/devicetree/bindings/interrupt-controller/hisilicon,mbigen-v2.txt
index 3b2f4c43ad8d..a6813a071f15 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/hisilicon,mbigen-v2.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/hisilicon,mbigen-v2.txt
@@ -68,8 +68,6 @@ Examples:
Devices connect to mbigen required properties:
----------------------------------------------------
--interrupt-parent: Specifies the mbigen device node which device connected.
-
-interrupts:Specifies the interrupt source.
For the specific information of each cell in this property,please refer to
the "interrupt-cells" description mentioned above.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
index 5f89fb635a1b..d4373d0f7121 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible : should be "ingenic,<socname>-intc". Valid strings are:
ingenic,jz4740-intc
+ ingenic,jz4725b-intc
ingenic,jz4770-intc
ingenic,jz4775-intc
ingenic,jz4780-intc
@@ -11,7 +12,6 @@ Required properties:
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value shall be 1.
-- interrupt-parent : phandle of the CPU interrupt controller.
- interrupts : Specifies the CPU interrupt the controller is connected to.
Example:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
index 3f6442c7f867..930fb462fd9f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
@@ -26,8 +26,6 @@ Required properties:
See Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
for details about the GIC Device Tree binding.
-- interrupt-parent : Reference to the parent interrupt controller.
-
Example:
odmi: odmi@300000 {
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,cirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,cirq.txt
index a7efdbc3de5b..5865f4f2c69d 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,cirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,cirq.txt
@@ -16,8 +16,6 @@ Required properties:
and "mediatek,cirq" as a fallback.
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Use the same format as specified by GIC in arm,gic.txt.
-- interrupt-parent: phandle of irq parent for cirq. The parent must
- use the same interrupt-cells format as GIC.
- reg: Physical base address of the cirq registers and length of memory
mapped region.
- mediatek,ext-irq-range: Identifies external irq number range in different
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
index 07bf0b9a5139..6a32922a55b8 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
@@ -21,8 +21,6 @@ Required properties:
"mediatek,mt2701-sysirq", "mediatek,mt6577-sysirq": for MT2701
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Use the same format as specified by GIC in arm,gic.txt.
-- interrupt-parent: phandle of irq parent for sysirq. The parent must
- use the same interrupt-cells format as GIC.
- reg: Physical base address of the intpol registers and length of memory
mapped region. Could be multiple bases here. Ex: mt6797 needs 2 reg, others
need 1.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mscc,ocelot-icpu-intr.txt b/Documentation/devicetree/bindings/interrupt-controller/mscc,ocelot-icpu-intr.txt
index b47a8a02b17b..f5baeccb689f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mscc,ocelot-icpu-intr.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mscc,ocelot-icpu-intr.txt
@@ -7,7 +7,6 @@ Required properties:
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value shall be 1.
-- interrupt-parent : phandle of the CPU interrupt controller.
- interrupts : Specifies the CPU interrupt the controller is connected to.
Example:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
index f246ccbf8838..2ff356640100 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
@@ -19,8 +19,6 @@ Required properties:
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value must be 3.
-- interrupt-parent : a phandle to the GIC these interrupts are routed
- to.
Notes:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt b/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt
index 38211f344dc8..0bfb3ba55f4c 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt
@@ -14,8 +14,6 @@ Required properties:
Reset value is IRQ_TYPE_LEVEL_LOW.
Optional properties:
-- interrupt-parent: empty for MIC interrupt controller, link to parent
- MIC interrupt controller for SIC1 and SIC2
- interrupts: empty for MIC interrupt controller, cascaded MIC
hardware interrupts for SIC1 and SIC2
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
index 475ae9bd562b..ad70006c1848 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
@@ -7,7 +7,6 @@ Required Properties:
- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or
"qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc"
- reg: Base address and size of the controllers memory area
-- interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode interrupt
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
index 0b2c97ddb520..8e0797cb1487 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
@@ -35,12 +35,6 @@ Properties:
interrupt.
The second element is the trigger type.
-- interrupt-parent:
- Usage: required
- Value type: <phandle>
- Definition: Specifies the interrupt parent necessary for hierarchical
- domain to operate.
-
- interrupt-controller:
Usage: required
Value type: <bool>
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index 20f121daa910..697ca2f26d1b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -7,6 +7,7 @@ Required properties:
- "renesas,irqc-r8a73a4" (R-Mobile APE6)
- "renesas,irqc-r8a7743" (RZ/G1M)
- "renesas,irqc-r8a7745" (RZ/G1E)
+ - "renesas,irqc-r8a77470" (RZ/G1C)
- "renesas,irqc-r8a7790" (R-Car H2)
- "renesas,irqc-r8a7791" (R-Car M2-W)
- "renesas,irqc-r8a7792" (R-Car V2H)
@@ -16,6 +17,7 @@ Required properties:
- "renesas,intc-ex-r8a7796" (R-Car M3-W)
- "renesas,intc-ex-r8a77965" (R-Car M3-N)
- "renesas,intc-ex-r8a77970" (R-Car V3M)
+ - "renesas,intc-ex-r8a77980" (R-Car V3H)
- "renesas,intc-ex-r8a77995" (R-Car D3)
- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
interrupts.txt in this directory
diff --git a/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.txt b/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.txt
index 9e5f73412cd7..19af687858a1 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.txt
@@ -32,8 +32,6 @@ Optional properties:
- samsung,combiner-nr: The number of interrupt combiners supported. If this
property is not specified, the default number of combiners is assumed
to be 16.
-- interrupt-parent: pHandle of the parent interrupt controller, if not
- inherited from the parent node.
Example:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
index 1f441fa0ad40..355c18a3a4d3 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: should be "sigma,smp8642-intc"
- reg: physical address of MMIO region
- ranges: address space mapping of child nodes
-- interrupt-parent: phandle of parent interrupt controller
- interrupt-controller: boolean
- #address-cells: should be <1>
- #size-cells: should be <1>
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 8b46a34e05f1..09fc02b99845 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -7,7 +7,6 @@ Properties:
- compatible: "snps,archs-idu-intc"
- interrupt-controller: This is an interrupt controller.
-- interrupt-parent: <reference to parent core intc>
- #interrupt-cells: Must be <1>.
Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt
index 492911744ca3..086ff08322db 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,dw-apb-ictl.txt
@@ -11,7 +11,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: number of cells to encode an interrupt-specifier, shall be 1
- interrupts: interrupt reference to primary interrupt controller
-- interrupt-parent: (optional) reference specific primary interrupt controller
The interrupt sources map to the corresponding bits in the interrupt
registers, i.e.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt b/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
index 8b2faefe29ca..dac0846fe789 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
@@ -12,7 +12,6 @@ Required properties:
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value must be 3.
-- interrupt-parent : phandle of the GIC these interrupts are routed to.
- socionext,spi-base : The SPI number of the first SPI of the 32 adjacent
ones the EXIU forwards its interrups to.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt b/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
index 2ab0ea39867b..a407c499b3cc 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,spear3xx-shirq.txt
@@ -31,10 +31,6 @@ Required properties:
parent) is equal to number of groups. The format of the interrupt
specifier depends in the interrupt parent controller.
- Optional properties:
- - interrupt-parent: pHandle of the parent interrupt controller, if not
- inherited from the parent node.
-
Example:
The following is an example from the SPEAr320 SoC dtsi file.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt b/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt
index 7f15f1b0325b..341ae5909333 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/technologic,ts4800.txt
@@ -11,6 +11,4 @@ Required properties:
region
- #interrupt-cells: specifies the number of cells needed to encode an interrupt
source, should be 1.
-- interrupt-parent: phandle to the parent interrupt controller this one is
- cascaded from
- interrupts: specifies the interrupt line in the interrupt-parent controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt
index 42bb796cc4ad..ee3f9c351501 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt
@@ -46,7 +46,6 @@ C6X Interrupt Chips
- interrupt-controller
- #interrupt-cells: <1>
- reg: base address and size of register area
- - interrupt-parent: must be core interrupt controller
- interrupts: This should have four cells; one for each interrupt combiner.
The cells contain the core priority interrupt to which the
corresponding combiner output is wired.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu b/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu
index 18d4f407bf0e..422d6908f8b2 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,omap4-wugen-mpu
@@ -12,8 +12,6 @@ Required properties:
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value must be 3.
-- interrupt-parent : a phandle to the GIC these interrupts are routed
- to.
Notes:
diff --git a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt
index b1682c80b490..525ec82615a6 100644
--- a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt
+++ b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt
@@ -31,7 +31,6 @@ Required properties:
- compatible: Should be "samsung,exynos-sysmmu"
- reg: A tuple of base address and size of System MMU registers.
- #iommu-cells: Should be <0>.
-- interrupt-parent: The phandle of the interrupt controller of System MMU
- interrupts: An interrupt specifier for interrupt signal of System MMU,
according to the format defined by a particular interrupt
controller.
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index 1d4afe9644b6..aa1399814a2a 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -31,7 +31,7 @@ Optional properties for child nodes:
"backlight" - LED will act as a back-light, controlled by the framebuffer
system
"default-on" - LED will turn on (but for leds-gpio see "default-state"
- property in Documentation/devicetree/bindings/gpio/led.txt)
+ property in Documentation/devicetree/bindings/leds/leds-gpio.txt)
"heartbeat" - LED "double" flashes at a load average based rate
"disk-activity" - LED indicates disk activity
"ide-disk" - LED indicates IDE disk activity (deprecated),
diff --git a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
index 6c9074f84a51..08b352840bd7 100644
--- a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
+++ b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
@@ -20,7 +20,10 @@ Optional properties:
- vled-supply : LED supply
Required child properties:
- - reg : 0
+ - reg : 0 - Will enable all LED sync paths
+ 1 - Will enable the LED1 sync
+ 2 - Will enable the LED2 sync
+ 3 - Will enable the LED3 sync (LM36923 only)
Optional child properties:
- label : see Documentation/devicetree/bindings/leds/common.txt
diff --git a/Documentation/devicetree/bindings/leds/leds-lt3593.txt b/Documentation/devicetree/bindings/leds/leds-lt3593.txt
new file mode 100644
index 000000000000..6b2cabc36c0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-lt3593.txt
@@ -0,0 +1,32 @@
+Bindings for Linear Technologies LT3593 LED controller
+
+Required properties:
+- compatible: Should be "lltc,lt3593".
+- lltc,ctrl-gpios: A handle to the GPIO that is connected to the 'CTRL'
+ pin of the chip.
+
+The hardware supports only one LED. The properties of this LED are
+configured in a sub-node in the device node.
+
+Optional sub-node properties:
+- label: A label for the LED. If none is given, the LED will be
+ named "lt3595::".
+- linux,default-trigger: The default trigger for the LED.
+ See Documentation/devicetree/bindings/leds/common.txt
+- default-state: The initial state of the LED.
+ See Documentation/devicetree/bindings/leds/common.txt
+
+If multiple chips of this type are found in a design, each one needs to
+be handled by its own device node.
+
+Example:
+
+led-controller {
+ compatible = "lltc,lt3593";
+ lltc,ctrl-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+
+ led {
+ label = "white:backlight";
+ default-state = "on";
+ };
+};
diff --git a/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt b/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
index 49cfc8c337c4..c4dd93f1fed2 100644
--- a/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
+++ b/Documentation/devicetree/bindings/mailbox/altera-mailbox.txt
@@ -9,7 +9,6 @@ Required properties:
of cells required for the mailbox specifier. Should be 1.
Optional properties:
-- interrupt-parent : interrupt source phandle.
- interrupts : interrupt number. The interrupt specifier format
depends on the interrupt controller parent.
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.txt b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
new file mode 100644
index 000000000000..f3cf77eb5ab4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
@@ -0,0 +1,54 @@
+NXP i.MX Messaging Unit (MU)
+--------------------------------------------------------------------
+
+The Messaging Unit module enables two processors within the SoC to
+communicate and coordinate by passing messages (e.g. data, status
+and control) through the MU interface. The MU also provides the ability
+for one processor to signal the other processor using interrupts.
+
+Because the MU manages the messaging between processors, the MU uses
+different clocks (from each side of the different peripheral buses).
+Therefore, the MU must synchronize the accesses from one side to the
+other. The MU accomplishes synchronization using two sets of matching
+registers (Processor A-facing, Processor B-facing).
+
+Messaging Unit Device Node:
+=============================
+
+Required properties:
+-------------------
+- compatible : should be "fsl,<chip>-mu", the supported chips include
+ imx6sx, imx7s, imx8qxp, imx8qm.
+ The "fsl,imx6sx-mu" compatible is seen as generic and should
+ be included together with SoC specific compatible.
+- reg : Should contain the registers location and length
+- interrupts : Interrupt number. The interrupt specifier format depends
+ on the interrupt controller parent.
+- #mbox-cells: Must be 2.
+ <&phandle type channel>
+ phandle : Label name of controller
+ type : Channel type
+ channel : Channel number
+
+ This MU support 4 type of unidirectional channels, each type
+ has 4 channels. A total of 16 channels. Following types are
+ supported:
+ 0 - TX channel with 32bit transmit register and IRQ transmit
+ acknowledgment support.
+ 1 - RX channel with 32bit receive register and IRQ support
+ 2 - TX doorbell channel. Without own register and no ACK support.
+ 3 - RX doorbell channel.
+
+Optional properties:
+-------------------
+- clocks : phandle to the input clock.
+- fsl,mu-side-b : Should be set for side B MU.
+
+Examples:
+--------
+lsio_mu0: mailbox@5d1b0000 {
+ compatible = "fsl,imx8qxp-mu";
+ reg = <0x0 0x5d1b0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/mailbox/mtk-gce.txt b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
new file mode 100644
index 000000000000..7d72b21c9e94
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
@@ -0,0 +1,57 @@
+MediaTek GCE
+===============
+
+The Global Command Engine (GCE) is used to help read/write registers with
+critical time limitation, such as updating display configuration during the
+vblank. The GCE can be used to implement the Command Queue (CMDQ) driver.
+
+CMDQ driver uses mailbox framework for communication. Please refer to
+mailbox.txt for generic information about mailbox device-tree bindings.
+
+Required properties:
+- compatible: Must be "mediatek,mt8173-gce"
+- reg: Address range of the GCE unit
+- interrupts: The interrupt signal from the GCE block
+- clock: Clocks according to the common clock binding
+- clock-names: Must be "gce" to stand for GCE clock
+- #mbox-cells: Should be 3.
+ <&phandle channel priority atomic_exec>
+ phandle: Label name of a gce node.
+ channel: Channel of mailbox. Be equal to the thread id of GCE.
+ priority: Priority of GCE thread.
+ atomic_exec: GCE processing continuous packets of commands in atomic
+ way.
+
+Required properties for a client device:
+- mboxes: Client use mailbox to communicate with GCE, it should have this
+ property and list of phandle, mailbox specifiers.
+- mediatek,gce-subsys: u32, specify the sub-system id which is corresponding
+ to the register address.
+
+Some vaules of properties are defined in 'dt-bindings/gce/mt8173-gce.h'. Such as
+sub-system ids, thread priority, event ids.
+
+Example:
+
+ gce: gce@10212000 {
+ compatible = "mediatek,mt8173-gce";
+ reg = <0 0x10212000 0 0x1000>;
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_GCE>;
+ clock-names = "gce";
+ thread-num = CMDQ_THR_MAX_COUNT;
+ #mbox-cells = <3>;
+ };
+
+Example for a client device:
+
+ mmsys: clock-controller@14000000 {
+ compatible = "mediatek,mt8173-mmsys";
+ mboxes = <&gce 0 CMDQ_THR_PRIO_LOWEST 1>,
+ <&gce 1 CMDQ_THR_PRIO_LOWEST 1>;
+ mediatek,gce-subsys = <SUBSYS_1400XXXX>;
+ mutex-event-eof = <CMDQ_EVENT_MUTEX0_STREAM_EOF
+ CMDQ_EVENT_MUTEX1_STREAM_EOF>;
+
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt b/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt
new file mode 100644
index 000000000000..6c9c7daf0f5c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt
@@ -0,0 +1,50 @@
+Texas Instruments' Secure Proxy
+========================================
+
+The Texas Instruments' secure proxy is a mailbox controller that has
+configurable queues selectable at SoC(System on Chip) integration. The
+Message manager is broken up into different address regions that are
+called "threads" or "proxies" - each instance is unidirectional and is
+instantiated at SoC integration level by system controller to indicate
+receive or transmit path.
+
+Message Manager Device Node:
+===========================
+Required properties:
+--------------------
+- compatible: Shall be "ti,am654-secure-proxy"
+- reg-names target_data - Map the proxy data region
+ rt - Map the realtime status region
+ scfg - Map the configuration region
+- reg: Contains the register map per reg-names.
+- #mbox-cells Shall be 1 and shall refer to the transfer path
+ called thread.
+- interrupt-names: Contains interrupt names matching the rx transfer path
+ for a given SoC. Receive interrupts shall be of the
+ format: "rx_<PID>".
+- interrupts: Contains the interrupt information corresponding to
+ interrupt-names property.
+
+Example(AM654):
+------------
+
+ secure_proxy: mailbox@32c00000 {
+ compatible = "ti,am654-secure-proxy";
+ #mbox-cells = <1>;
+ reg-names = "target_data", "rt", "scfg";
+ reg = <0x0 0x32c00000 0x0 0x100000>,
+ <0x0 0x32400000 0x0 0x100000>,
+ <0x0 0x32800000 0x0 0x100000>;
+ interrupt-names = "rx_011";
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ dmsc: dmsc {
+ [...]
+ mbox-names = "rx", "tx";
+ # RX Thread ID is 11
+ # TX Thread ID is 13
+ mboxes= <&secure_proxy 11>,
+ <&secure_proxy 13>;
+ [...]
+ };
diff --git a/Documentation/devicetree/bindings/media/cec-gpio.txt b/Documentation/devicetree/bindings/media/cec-gpio.txt
index 12fcd55ed153..47e8d73d32a3 100644
--- a/Documentation/devicetree/bindings/media/cec-gpio.txt
+++ b/Documentation/devicetree/bindings/media/cec-gpio.txt
@@ -4,8 +4,8 @@ The HDMI CEC GPIO module supports CEC implementations where the CEC line
is hooked up to a pull-up GPIO line and - optionally - the HPD line is
hooked up to another GPIO line.
-Please note: the maximum voltage for the CEC line is 3.63V, for the HPD
-line it is 5.3V. So you may need some sort of level conversion circuitry
+Please note: the maximum voltage for the CEC line is 3.63V, for the HPD and
+5V lines it is 5.3V. So you may need some sort of level conversion circuitry
when connecting them to a GPIO line.
Required properties:
@@ -19,18 +19,24 @@ following property is also required:
- hdmi-phandle - phandle to the HDMI controller, see also cec.txt.
If the CEC line is not associated with an HDMI receiver/transmitter, then
-the following property is optional:
+the following property is optional and can be used for debugging HPD changes:
- hpd-gpios: gpio that the HPD line is connected to.
+This property is optional and can be used for debugging changes on the 5V line:
+
+ - v5-gpios: gpio that the 5V line is connected to.
+
Example for the Raspberry Pi 3 where the CEC line is connected to
-pin 26 aka BCM7 aka CE1 on the GPIO pin header and the HPD line is
-connected to pin 11 aka BCM17 (some level shifter is needed for this!):
+pin 26 aka BCM7 aka CE1 on the GPIO pin header, the HPD line is
+connected to pin 11 aka BCM17 and the 5V line is connected to pin
+15 aka BCM22 (some level shifter is needed for the HPD and 5V lines!):
#include <dt-bindings/gpio/gpio.h>
cec-gpio {
- compatible = "cec-gpio";
- cec-gpios = <&gpio 7 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
- hpd-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+ compatible = "cec-gpio";
+ cec-gpios = <&gpio 7 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ hpd-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+ v5-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
};
diff --git a/Documentation/devicetree/bindings/media/i2c/ak7375.txt b/Documentation/devicetree/bindings/media/i2c/ak7375.txt
new file mode 100644
index 000000000000..aa3e24b41241
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ak7375.txt
@@ -0,0 +1,8 @@
+Asahi Kasei Microdevices AK7375 voice coil lens driver
+
+AK7375 is a camera voice coil lens.
+
+Mandatory properties:
+
+- compatible: "asahi-kasei,ak7375"
+- reg: I2C slave address
diff --git a/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt b/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt
new file mode 100644
index 000000000000..bd896e9f67d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt
@@ -0,0 +1,46 @@
+* Aptina MT9V111 CMOS sensor
+----------------------------
+
+The Aptina MT9V111 is a 1/4-Inch VGA-format digital image sensor with a core
+based on Aptina MT9V011 sensor and an integrated Image Flow Processor (IFP).
+
+The sensor has an active pixel array of 640x480 pixels and can output a number
+of image resolution and formats controllable through a simple two-wires
+interface.
+
+Required properties:
+--------------------
+
+- compatible: shall be "aptina,mt9v111".
+- clocks: reference to the system clock input provider.
+
+Optional properties:
+--------------------
+
+- enable-gpios: output enable signal, pin name "OE#". Active low.
+- standby-gpios: low power state control signal, pin name "STANDBY".
+ Active high.
+- reset-gpios: chip reset signal, pin name "RESET#". Active low.
+
+The device node must contain one 'port' child node with one 'endpoint' child
+sub-node for its digital output video port, in accordance with the video
+interface bindings defined in:
+Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+--------
+
+ &i2c1 {
+ camera@48 {
+ compatible = "aptina,mt9v111";
+ reg = <0x48>;
+
+ clocks = <&camera_clk>;
+
+ port {
+ mt9v111_out: endpoint {
+ remote-endpoint = <&ceu_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807.txt b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807.txt
new file mode 100644
index 000000000000..c4701f1eaaf6
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807.txt
@@ -0,0 +1,9 @@
+Dongwoon Anatech DW9807 voice coil lens driver
+
+DW9807 is a 10-bit DAC with current sink capability. It is intended for
+controlling voice coil lenses.
+
+Mandatory properties:
+
+- compatible: "dongwoon,dw9807-vcm"
+- reg: I2C slave address
diff --git a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
index 33f10a94c381..8ee7c7972ac7 100644
--- a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
+++ b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
@@ -29,6 +29,9 @@ Optional properties
- reset-gpios: XSHUTDOWN GPIO
- flash-leds: See ../video-interfaces.txt
- lens-focus: See ../video-interfaces.txt
+- rotation: Integer property; valid values are 0 (sensor mounted upright)
+ and 180 (sensor mounted upside down). See
+ ../video-interfaces.txt .
Endpoint node mandatory properties
diff --git a/Documentation/devicetree/bindings/media/i2c/ov2680.txt b/Documentation/devicetree/bindings/media/i2c/ov2680.txt
new file mode 100644
index 000000000000..11e925ed9dad
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ov2680.txt
@@ -0,0 +1,46 @@
+* Omnivision OV2680 MIPI CSI-2 sensor
+
+Required Properties:
+- compatible: should be "ovti,ov2680".
+- clocks: reference to the xvclk input clock.
+- clock-names: should be "xvclk".
+- DOVDD-supply: Digital I/O voltage supply.
+- DVDD-supply: Digital core voltage supply.
+- AVDD-supply: Analog voltage supply.
+
+Optional Properties:
+- reset-gpios: reference to the GPIO connected to the powerdown/reset pin,
+ if any. This is an active low signal to the OV2680.
+
+The device node must contain one 'port' child node for its digital output
+video port, and this port must have a single endpoint in accordance with
+ the video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Endpoint node required properties for CSI-2 connection are:
+- remote-endpoint: a phandle to the bus receiver's endpoint node.
+- clock-lanes: should be set to <0> (clock lane on hardware lane 0).
+- data-lanes: should be set to <1> (one CSI-2 lane supported).
+
+Example:
+
+&i2c2 {
+ ov2680: camera-sensor@36 {
+ compatible = "ovti,ov2680";
+ reg = <0x36>;
+ clocks = <&osc>;
+ clock-names = "xvclk";
+ reset-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ DOVDD-supply = <&sw2_reg>;
+ DVDD-supply = <&sw2_reg>;
+ AVDD-supply = <&reg_peri_3p15v>;
+
+ port {
+ ov2680_to_mipi: endpoint {
+ remote-endpoint = <&mipi_from_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/media/i2c/ov5640.txt b/Documentation/devicetree/bindings/media/i2c/ov5640.txt
index 8e36da0d8406..c97c2f2da12d 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov5640.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ov5640.txt
@@ -13,6 +13,10 @@ Optional Properties:
This is an active low signal to the OV5640.
- powerdown-gpios: reference to the GPIO connected to the powerdown pin,
if any. This is an active high signal to the OV5640.
+- rotation: as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt,
+ valid values are 0 (sensor mounted upright) and 180 (sensor
+ mounted upside down).
The device node must contain one 'port' child node for its digital output
video port, in accordance with the video interface bindings defined in
@@ -51,6 +55,7 @@ Examples:
DVDD-supply = <&vgen2_reg>; /* 1.5v */
powerdown-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+ rotation = <180>;
port {
/* MIPI CSI-2 bus endpoint */
diff --git a/Documentation/devicetree/bindings/media/i2c/tc358743.txt b/Documentation/devicetree/bindings/media/i2c/tc358743.txt
index 49f8bcc2ea4d..59102edcf01e 100644
--- a/Documentation/devicetree/bindings/media/i2c/tc358743.txt
+++ b/Documentation/devicetree/bindings/media/i2c/tc358743.txt
@@ -12,7 +12,7 @@ Required Properties:
Optional Properties:
- reset-gpios: gpio phandle GPIO connected to the reset pin
-- interrupts, interrupt-parent: GPIO connected to the interrupt pin
+- interrupts: GPIO connected to the interrupt pin
- data-lanes: should be <1 2 3 4> for four-lane operation,
or <1 2> for two-lane operation
- clock-lanes: should be <0>
diff --git a/Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt b/Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt
index 470237ed6fe5..7302e949e662 100644
--- a/Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt
+++ b/Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt
@@ -27,9 +27,15 @@ Required properties:
- sxe
- clocks : Must include the following entries:
- vde
-- resets : Must include the following entries:
+- resets : Must contain an entry for each entry in reset-names.
+- reset-names : Should include the following entries:
- vde
+Optional properties:
+- resets : Must contain an entry for each entry in reset-names.
+- reset-names : Must include the following entries:
+ - mc
+
Example:
video-codec@6001a000 {
@@ -51,5 +57,6 @@ video-codec@6001a000 {
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; /* SXE interrupt */
interrupt-names = "sync-token", "bsev", "sxe";
clocks = <&tegra_car TEGRA20_CLK_VDE>;
- resets = <&tegra_car 61>;
+ reset-names = "vde", "mc";
+ resets = <&tegra_car 61>, <&mc TEGRA20_MC_RESET_VDE>;
};
diff --git a/Documentation/devicetree/bindings/media/qcom,camss.txt b/Documentation/devicetree/bindings/media/qcom,camss.txt
index cadecebc73f7..09eb6ed99114 100644
--- a/Documentation/devicetree/bindings/media/qcom,camss.txt
+++ b/Documentation/devicetree/bindings/media/qcom,camss.txt
@@ -5,8 +5,9 @@ Qualcomm Camera Subsystem
- compatible:
Usage: required
Value type: <stringlist>
- Definition: Should contain:
+ Definition: Should contain one of:
- "qcom,msm8916-camss"
+ - "qcom,msm8996-camss"
- reg:
Usage: required
Value type: <prop-encoded-array>
@@ -19,11 +20,16 @@ Qualcomm Camera Subsystem
- "csiphy0_clk_mux"
- "csiphy1"
- "csiphy1_clk_mux"
+ - "csiphy2" (8996 only)
+ - "csiphy2_clk_mux" (8996 only)
- "csid0"
- "csid1"
+ - "csid2" (8996 only)
+ - "csid3" (8996 only)
- "ispif"
- "csi_clk_mux"
- "vfe0"
+ - "vfe1" (8996 only)
- interrupts:
Usage: required
Value type: <prop-encoded-array>
@@ -34,10 +40,14 @@ Qualcomm Camera Subsystem
Definition: Should contain the following entries:
- "csiphy0"
- "csiphy1"
+ - "csiphy2" (8996 only)
- "csid0"
- "csid1"
+ - "csid2" (8996 only)
+ - "csid3" (8996 only)
- "ispif"
- "vfe0"
+ - "vfe1" (8996 only)
- power-domains:
Usage: required
Value type: <prop-encoded-array>
@@ -53,25 +63,42 @@ Qualcomm Camera Subsystem
Usage: required
Value type: <stringlist>
Definition: Should contain the following entries:
- - "camss_top_ahb"
- - "ispif_ahb"
- - "csiphy0_timer"
- - "csiphy1_timer"
- - "csi0_ahb"
- - "csi0"
- - "csi0_phy"
- - "csi0_pix"
- - "csi0_rdi"
- - "csi1_ahb"
- - "csi1"
- - "csi1_phy"
- - "csi1_pix"
- - "csi1_rdi"
- - "camss_ahb"
- - "camss_vfe_vfe"
- - "camss_csi_vfe"
- - "iface"
- - "bus"
+ - "top_ahb"
+ - "ispif_ahb"
+ - "csiphy0_timer"
+ - "csiphy1_timer"
+ - "csiphy2_timer" (8996 only)
+ - "csi0_ahb"
+ - "csi0"
+ - "csi0_phy"
+ - "csi0_pix"
+ - "csi0_rdi"
+ - "csi1_ahb"
+ - "csi1"
+ - "csi1_phy"
+ - "csi1_pix"
+ - "csi1_rdi"
+ - "csi2_ahb" (8996 only)
+ - "csi2" (8996 only)
+ - "csi2_phy" (8996 only)
+ - "csi2_pix" (8996 only)
+ - "csi2_rdi" (8996 only)
+ - "csi3_ahb" (8996 only)
+ - "csi3" (8996 only)
+ - "csi3_phy" (8996 only)
+ - "csi3_pix" (8996 only)
+ - "csi3_rdi" (8996 only)
+ - "ahb"
+ - "vfe0"
+ - "csi_vfe0"
+ - "vfe0_ahb", (8996 only)
+ - "vfe0_stream", (8996 only)
+ - "vfe1", (8996 only)
+ - "csi_vfe1", (8996 only)
+ - "vfe1_ahb", (8996 only)
+ - "vfe1_stream", (8996 only)
+ - "vfe_ahb"
+ - "vfe_axi"
- vdda-supply:
Usage: required
Value type: <phandle>
@@ -90,22 +117,27 @@ Qualcomm Camera Subsystem
- reg:
Usage: required
Value type: <u32>
- Definition: Selects CSI2 PHY interface - PHY0 or PHY1.
+ Definition: Selects CSI2 PHY interface - PHY0, PHY1
+ or PHY2 (8996 only)
Endpoint node properties:
- clock-lanes:
Usage: required
Value type: <u32>
- Definition: The physical clock lane index. The value
- must always be <1> as the physical clock
- lane is lane 1.
+ Definition: The physical clock lane index. On 8916
+ the value must always be <1> as the physical
+ clock lane is lane 1. On 8996 the value must
+ always be <7> as the hardware supports D-PHY
+ and C-PHY, indexes are in a common set and
+ D-PHY physical clock lane is labeled as 7.
- data-lanes:
Usage: required
Value type: <prop-encoded-array>
- Definition: An array of physical data lanes indexes.
- Position of an entry determines the logical
- lane number, while the value of an entry
- indicates physical lane index. Lane swapping
- is supported.
+ Definition: An array of physical data lanes indexes.
+ Position of an entry determines the logical
+ lane number, while the value of an entry
+ indicates physical lane index. Lane swapping
+ is supported. Physical lane indexes for
+ 8916: 0, 2, 3, 4; for 8996: 0, 1, 2, 3.
* An Example
@@ -161,25 +193,25 @@ Qualcomm Camera Subsystem
<&gcc GCC_CAMSS_CSI_VFE0_CLK>,
<&gcc GCC_CAMSS_VFE_AHB_CLK>,
<&gcc GCC_CAMSS_VFE_AXI_CLK>;
- clock-names = "camss_top_ahb",
- "ispif_ahb",
- "csiphy0_timer",
- "csiphy1_timer",
- "csi0_ahb",
- "csi0",
- "csi0_phy",
- "csi0_pix",
- "csi0_rdi",
- "csi1_ahb",
- "csi1",
- "csi1_phy",
- "csi1_pix",
- "csi1_rdi",
- "camss_ahb",
- "camss_vfe_vfe",
- "camss_csi_vfe",
- "iface",
- "bus";
+ clock-names = "top_ahb",
+ "ispif_ahb",
+ "csiphy0_timer",
+ "csiphy1_timer",
+ "csi0_ahb",
+ "csi0",
+ "csi0_phy",
+ "csi0_pix",
+ "csi0_rdi",
+ "csi1_ahb",
+ "csi1",
+ "csi1_phy",
+ "csi1_pix",
+ "csi1_rdi",
+ "ahb",
+ "vfe0",
+ "csi_vfe0",
+ "vfe_ahb",
+ "vfe_axi";
vdda-supply = <&pm8916_l2>;
iommus = <&apps_iommu 3>;
ports {
diff --git a/Documentation/devicetree/bindings/media/qcom,venus.txt b/Documentation/devicetree/bindings/media/qcom,venus.txt
index 2693449daf73..00d0d1bf7647 100644
--- a/Documentation/devicetree/bindings/media/qcom,venus.txt
+++ b/Documentation/devicetree/bindings/media/qcom,venus.txt
@@ -6,6 +6,7 @@
Definition: Value should contain one of:
- "qcom,msm8916-venus"
- "qcom,msm8996-venus"
+ - "qcom,sdm845-venus"
- reg:
Usage: required
Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index a19517e1c669..2f420050d57f 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -21,7 +21,9 @@ on Gen3 platforms to a CSI-2 receiver.
- "renesas,vin-r8a7794" for the R8A7794 device
- "renesas,vin-r8a7795" for the R8A7795 device
- "renesas,vin-r8a7796" for the R8A7796 device
+ - "renesas,vin-r8a77965" for the R8A77965 device
- "renesas,vin-r8a77970" for the R8A77970 device
+ - "renesas,vin-r8a77995" for the R8A77995 device
- "renesas,rcar-gen2-vin" for a generic R-Car Gen2 or RZ/G1 compatible
device.
@@ -37,30 +39,51 @@ Additionally, an alias named vinX will need to be created to specify
which video input device this is.
The per-board settings Gen2 platforms:
- - port sub-node describing a single endpoint connected to the vin
- as described in video-interfaces.txt[1]. Only the first one will
- be considered as each vin interface has one input port.
+
+- port - sub-node describing a single endpoint connected to the VIN
+ from external SoC pins as described in video-interfaces.txt[1].
+ Only the first one will be considered as each vin interface has one
+ input port.
+
+ - Optional properties for endpoint nodes:
+ - hsync-active: see [1] for description. Default is active high.
+ - vsync-active: see [1] for description. Default is active high.
+ If both HSYNC and VSYNC polarities are not specified, embedded
+ synchronization is selected.
+ - field-active-even: see [1] for description. Default is active high.
+ - bus-width: see [1] for description. The selected bus width depends on
+ the SoC type and selected input image format.
+ Valid values are: 8, 10, 12, 16, 24 and 32.
+ - data-shift: see [1] for description. Valid values are 0 and 8.
+ - data-enable-active: polarity of CLKENB signal, see [1] for
+ description. Default is active high.
The per-board settings Gen3 platforms:
Gen3 platforms can support both a single connected parallel input source
-from external SoC pins (port0) and/or multiple parallel input sources
-from local SoC CSI-2 receivers (port1) depending on SoC.
+from external SoC pins (port@0) and/or multiple parallel input sources
+from local SoC CSI-2 receivers (port@1) depending on SoC.
- renesas,id - ID number of the VIN, VINx in the documentation.
- ports
- - port 0 - sub-node describing a single endpoint connected to the VIN
- from external SoC pins described in video-interfaces.txt[1].
- Describing more then one endpoint in port 0 is invalid. Only VIN
- instances that are connected to external pins should have port 0.
- - port 1 - sub-nodes describing one or more endpoints connected to
+ - port@0 - sub-node describing a single endpoint connected to the VIN
+ from external SoC pins as described in video-interfaces.txt[1].
+ Describing more than one endpoint in port@0 is invalid. Only VIN
+ instances that are connected to external pins should have port@0.
+
+ Endpoint nodes of port@0 support the optional properties listed in
+ the Gen2 per-board settings description.
+
+ - port@1 - sub-nodes describing one or more endpoints connected to
the VIN from local SoC CSI-2 receivers. The endpoint numbers must
use the following schema.
- - Endpoint 0 - sub-node describing the endpoint connected to CSI20
- - Endpoint 1 - sub-node describing the endpoint connected to CSI21
- - Endpoint 2 - sub-node describing the endpoint connected to CSI40
- - Endpoint 3 - sub-node describing the endpoint connected to CSI41
+ - endpoint@0 - sub-node describing the endpoint connected to CSI20
+ - endpoint@1 - sub-node describing the endpoint connected to CSI21
+ - endpoint@2 - sub-node describing the endpoint connected to CSI40
+ - endpoint@3 - sub-node describing the endpoint connected to CSI41
+
+ Endpoint nodes of port@1 do not support any optional endpoint property.
Device node example for Gen2 platforms
--------------------------------------
@@ -107,9 +130,6 @@ Board setup example for Gen2 platforms (vin1 composite video input)
status = "okay";
port {
- #address-cells = <1>;
- #size-cells = <0>;
-
vin1ep0: endpoint {
remote-endpoint = <&adv7180>;
bus-width = <8>;
diff --git a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
index 17a8e81ca0cc..cfa4ffada8ae 100644
--- a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
+++ b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
@@ -2,7 +2,6 @@ Bindings, specific for the sh_mobile_ceu_camera.c driver:
- compatible: Should be "renesas,sh-mobile-ceu"
- reg: register base and size
- interrupts: the interrupt number
- - interrupt-parent: the interrupt controller
- renesas,max-width: maximum image width, supported on this SoC
- renesas,max-height: maximum image height, supported on this SoC
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt
index 258b8dfddf48..baf9d9756b3c 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.txt
+++ b/Documentation/devicetree/bindings/media/video-interfaces.txt
@@ -85,6 +85,10 @@ Optional properties
- lens-focus: A phandle to the node of the focus lens controller.
+- rotation: The device, typically an image sensor, is not mounted upright,
+ but a number of degrees counter clockwise. Typical values are 0 and 180
+ (upside down).
+
Optional endpoint properties
----------------------------
@@ -109,6 +113,8 @@ Optional endpoint properties
Note, that if HSYNC and VSYNC polarities are not specified, embedded
synchronization may be required, where supported.
- data-active: similar to HSYNC and VSYNC, specifies data line polarity.
+- data-enable-active: similar to HSYNC and VSYNC, specifies the data enable
+ signal polarity.
- field-even-active: field signal level during the even field data transmission.
- pclk-sample: sample data on rising (1) or falling (0) edge of the pixel clock
signal.
diff --git a/Documentation/devicetree/bindings/mfd/ac100.txt b/Documentation/devicetree/bindings/mfd/ac100.txt
index b8ef00667599..dff219f07493 100644
--- a/Documentation/devicetree/bindings/mfd/ac100.txt
+++ b/Documentation/devicetree/bindings/mfd/ac100.txt
@@ -10,7 +10,6 @@ Required properties:
- sub-nodes:
- codec
- compatible: "x-powers,ac100-codec"
- - interrupt-parent: The parent interrupt controller
- interrupts: SoC NMI / GPIO interrupt connected to the
IRQ_AUDIO pin
- #clock-cells: Shall be 0
@@ -20,9 +19,6 @@ Required properties:
- rtc
- compatible: "x-powers,ac100-rtc"
- - interrupt-parent: The parent interrupt controller
- - interrupts: SoC NMI / GPIO interrupt connected to the
- IRQ_RTC pin
- clocks: A phandle to the codec's "4M_adda" clock
- #clock-cells: Shall be 1
- clock-output-names: "cko1_rtc", "cko2_rtc", "cko3_rtc"
diff --git a/Documentation/devicetree/bindings/mfd/altera-a10sr.txt b/Documentation/devicetree/bindings/mfd/altera-a10sr.txt
index c8a736554b4b..a688520dd87d 100644
--- a/Documentation/devicetree/bindings/mfd/altera-a10sr.txt
+++ b/Documentation/devicetree/bindings/mfd/altera-a10sr.txt
@@ -5,7 +5,6 @@ Required parent device properties:
- spi-max-frequency : Maximum SPI frequency.
- reg : The SPI Chip Select address for the Arria10
System Resource chip
-- interrupt-parent : The parent interrupt controller.
- interrupts : The interrupt line the device is connected to.
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : The number of cells to describe an IRQ, should be 2.
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
index a014afb07902..9b62831fdf3e 100644
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ b/Documentation/devicetree/bindings/mfd/arizona.txt
@@ -22,7 +22,6 @@ Required properties:
connected to.
- interrupt-controller : Arizona class devices contain interrupt controllers
and may provide interrupt services to other devices.
- - interrupt-parent : The parent interrupt controller.
- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
The first cell is the IRQ number.
The second cell is the flags, encoded as the trigger masks from
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index d1762f3b30af..62091f6b025e 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -28,7 +28,6 @@ Required properties:
* "x-powers,axp809"
* "x-powers,axp813"
- reg: The I2C slave address or RSB hardware address for the AXP chip
-- interrupt-parent: The parent interrupt controller
- interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin
- interrupt-controller: The PMIC has its own internal IRQs
- #interrupt-cells: Should be set to 1
diff --git a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
index 25d1f697eb25..8c4678650d1a 100644
--- a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
+++ b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : Should be "rohm,bd9571mwv".
- reg : I2C slave address.
- - interrupt-parent : Phandle to the parent interrupt controller.
- interrupts : The interrupt line the device is connected to.
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : The number of cells to describe an IRQ, should be 2.
diff --git a/Documentation/devicetree/bindings/mfd/bfticu.txt b/Documentation/devicetree/bindings/mfd/bfticu.txt
index 65c90776c620..538192fda9ae 100644
--- a/Documentation/devicetree/bindings/mfd/bfticu.txt
+++ b/Documentation/devicetree/bindings/mfd/bfticu.txt
@@ -10,7 +10,6 @@ Required properties:
- interrupts: the main IRQ line to signal the collected IRQs
- #interrupt-cells : is 2 and their usage is compliant to the 2 cells variant
of Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-- interrupt-parent: the parent IRQ ctrl the main IRQ is connected to
- reg: access on the parent local bus (chip select, offset in chip select, size)
Example:
diff --git a/Documentation/devicetree/bindings/mfd/da9055.txt b/Documentation/devicetree/bindings/mfd/da9055.txt
index 6dab34d34fce..131a53283e17 100644
--- a/Documentation/devicetree/bindings/mfd/da9055.txt
+++ b/Documentation/devicetree/bindings/mfd/da9055.txt
@@ -22,8 +22,6 @@ Documentation/devicetree/bindings/sound/da9055.txt
Required properties:
- compatible : Should be "dlg,da9055-pmic"
- reg: Specifies the I2C slave address (defaults to 0x5a but can be modified)
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the IRQs from da9055 are delivered to.
- interrupts: IRQ line info for da9055 chip.
- interrupt-controller: da9055 has internal IRQs (has own IRQ domain).
- #interrupt-cells: Should be 1, is the local IRQ number for da9055.
diff --git a/Documentation/devicetree/bindings/mfd/da9062.txt b/Documentation/devicetree/bindings/mfd/da9062.txt
index c0a418c27e9d..edca653a5777 100644
--- a/Documentation/devicetree/bindings/mfd/da9062.txt
+++ b/Documentation/devicetree/bindings/mfd/da9062.txt
@@ -32,8 +32,6 @@ Required properties:
"dlg,da9061" for DA9061
- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
modified to match the chip's OTP settings).
-- interrupt-parent : Specifies the reference to the interrupt controller for
- the DA9062 or DA9061.
- interrupts : IRQ line information.
- interrupt-controller
diff --git a/Documentation/devicetree/bindings/mfd/da9063.txt b/Documentation/devicetree/bindings/mfd/da9063.txt
index 443e68286957..8da879935c59 100644
--- a/Documentation/devicetree/bindings/mfd/da9063.txt
+++ b/Documentation/devicetree/bindings/mfd/da9063.txt
@@ -16,8 +16,6 @@ Required properties:
- compatible : Should be "dlg,da9063" or "dlg,da9063l"
- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
modified to match the chip's OTP settings).
-- interrupt-parent : Specifies the reference to the interrupt controller for
- the DA9063.
- interrupts : IRQ line information.
- interrupt-controller
diff --git a/Documentation/devicetree/bindings/mfd/da9150.txt b/Documentation/devicetree/bindings/mfd/da9150.txt
index fd4dca7f4aba..f09b41fbdf47 100644
--- a/Documentation/devicetree/bindings/mfd/da9150.txt
+++ b/Documentation/devicetree/bindings/mfd/da9150.txt
@@ -13,8 +13,6 @@ da9150-fg : Battery Fuel-Gauge
Required properties:
- compatible : Should be "dlg,da9150"
- reg: Specifies the I2C slave address
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the IRQs from da9150 are delivered to.
- interrupts: IRQ line info for da9150 chip.
- interrupt-controller: da9150 has internal IRQs (own IRQ domain).
(See ../interrupt-controller/interrupts.txt for
diff --git a/Documentation/devicetree/bindings/mfd/max14577.txt b/Documentation/devicetree/bindings/mfd/max14577.txt
index 236264c10b92..fc6f0f4e8beb 100644
--- a/Documentation/devicetree/bindings/mfd/max14577.txt
+++ b/Documentation/devicetree/bindings/mfd/max14577.txt
@@ -11,7 +11,6 @@ Required properties:
- compatible : Must be "maxim,max14577" or "maxim,max77836".
- reg : I2C slave address for the max14577 chip (0x25 for max14577/max77836)
- interrupts : IRQ line for the chip.
-- interrupt-parent : The parent interrupt controller.
Required nodes:
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 0f2587fa42cb..42968b7144e0 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -15,7 +15,6 @@ Required properties:
- compatible : Must be "maxim,max77686";
- reg : Specifies the i2c slave address of PMIC block.
- interrupts : This i2c device has an IRQ line connected to the main SoC.
-- interrupt-parent : The parent interrupt controller.
Example:
diff --git a/Documentation/devicetree/bindings/mfd/max77693.txt b/Documentation/devicetree/bindings/mfd/max77693.txt
index e6754974a745..a3c60a7a3be1 100644
--- a/Documentation/devicetree/bindings/mfd/max77693.txt
+++ b/Documentation/devicetree/bindings/mfd/max77693.txt
@@ -14,7 +14,6 @@ Required properties:
- compatible : Must be "maxim,max77693".
- reg : Specifies the i2c slave address of PMIC block.
- interrupts : This i2c device has an IRQ line connected to the main SoC.
-- interrupt-parent : The parent interrupt controller.
Optional properties:
- regulators : The regulators of max77693 have to be instantiated under subnode
diff --git a/Documentation/devicetree/bindings/mfd/max77802.txt b/Documentation/devicetree/bindings/mfd/max77802.txt
index f2f3fe75901c..09decac20d91 100644
--- a/Documentation/devicetree/bindings/mfd/max77802.txt
+++ b/Documentation/devicetree/bindings/mfd/max77802.txt
@@ -14,7 +14,6 @@ Required properties:
- compatible : Must be "maxim,max77802"
- reg : Specifies the I2C slave address of PMIC block.
- interrupts : I2C device IRQ line connected to the main SoC.
-- interrupt-parent : The parent interrupt controller.
Example:
diff --git a/Documentation/devicetree/bindings/mfd/max8998.txt b/Documentation/devicetree/bindings/mfd/max8998.txt
index 23a3650ff2a2..5f2f07c09c90 100644
--- a/Documentation/devicetree/bindings/mfd/max8998.txt
+++ b/Documentation/devicetree/bindings/mfd/max8998.txt
@@ -20,8 +20,6 @@ Required properties:
- reg: Specifies the i2c slave address of the pmic block. It should be 0x66.
Optional properties:
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from MAX8998 are routed to.
- interrupts: Interrupt specifiers for two interrupt sources.
- First interrupt specifier is for main interrupt.
- Second interrupt specifier is for power-on/-off interrupt.
diff --git a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
index c639705a98ef..5ddcc8f4febc 100644
--- a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
+++ b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
@@ -3,7 +3,6 @@ Motorola CPCAP PMIC device tree binding
Required properties:
- compatible : One or both of "motorola,cpcap" or "ste,6556002"
- reg : SPI chip select
-- interrupt-parent : The parent interrupt controller
- interrupts : The interrupt line the device is connected to
- interrupt-controller : Marks the device node as an interrupt controller
- #interrupt-cells : The number of cells to describe an IRQ, should be 2
diff --git a/Documentation/devicetree/bindings/mfd/palmas.txt b/Documentation/devicetree/bindings/mfd/palmas.txt
index 8ae1a32bfb7e..e736ab3012a6 100644
--- a/Documentation/devicetree/bindings/mfd/palmas.txt
+++ b/Documentation/devicetree/bindings/mfd/palmas.txt
@@ -25,7 +25,6 @@ and also the generic series names
The first cell is the IRQ number.
The second cell is the flags, encoded as the trigger masks from
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-- interrupt-parent : The parent interrupt controller.
Optional properties:
ti,mux-padX : set the pad register X (1-2) to the correct muxing for the
diff --git a/Documentation/devicetree/bindings/mfd/retu.txt b/Documentation/devicetree/bindings/mfd/retu.txt
index 876242394a16..df3005dd3e3c 100644
--- a/Documentation/devicetree/bindings/mfd/retu.txt
+++ b/Documentation/devicetree/bindings/mfd/retu.txt
@@ -9,7 +9,6 @@ Required properties:
- compatible: "nokia,retu" or "nokia,tahvo"
- reg: Specifies the CBUS slave address of the ASIC chip
- interrupts: The interrupt line the device is connected to
-- interrupt-parent: The parent interrupt controller
Example:
diff --git a/Documentation/devicetree/bindings/mfd/rk808.txt b/Documentation/devicetree/bindings/mfd/rk808.txt
index 91b65227afeb..1683ec3245bc 100644
--- a/Documentation/devicetree/bindings/mfd/rk808.txt
+++ b/Documentation/devicetree/bindings/mfd/rk808.txt
@@ -10,7 +10,6 @@ Required properties:
- compatible: "rockchip,rk808"
- compatible: "rockchip,rk818"
- reg: I2C slave address
-- interrupt-parent: The parent interrupt controller.
- interrupts: the interrupt outputs of the controller.
- #clock-cells: from common clock binding; shall be set to 1 (multiple clock
outputs). See <dt-bindings/clock/rockchip,rk808.h> for clock IDs.
diff --git a/Documentation/devicetree/bindings/mfd/samsung,sec-core.txt b/Documentation/devicetree/bindings/mfd/samsung,sec-core.txt
index cdd079bfc287..c68cdd365153 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,sec-core.txt
+++ b/Documentation/devicetree/bindings/mfd/samsung,sec-core.txt
@@ -31,8 +31,6 @@ Required properties:
- reg: Specifies the I2C slave address of the pmic block. It should be 0x66.
Optional properties:
- - interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from s2mps11 are delivered to.
- interrupts: Interrupt specifiers for interrupt sources.
- samsung,s2mps11-wrstbi-ground: Indicates that WRSTBI pin of PMIC is pulled
down. When the system is suspended it will always go down thus triggerring
diff --git a/Documentation/devicetree/bindings/mfd/stmpe.txt b/Documentation/devicetree/bindings/mfd/stmpe.txt
index f9065a5781a2..c797c05cd3c2 100644
--- a/Documentation/devicetree/bindings/mfd/stmpe.txt
+++ b/Documentation/devicetree/bindings/mfd/stmpe.txt
@@ -10,7 +10,6 @@ Required properties:
Optional properties:
- interrupts : The interrupt outputs from the controller
- interrupt-controller : Marks the device node as an interrupt controller
- - interrupt-parent : Specifies which IRQ controller we're connected to
- wakeup-source : Marks the input device as wakable
- st,autosleep-timeout : Valid entries (ms); 4, 16, 32, 64, 128, 256, 512 and 1024
- irq-gpio : If present, which GPIO to use for event IRQ
diff --git a/Documentation/devicetree/bindings/mfd/tc3589x.txt b/Documentation/devicetree/bindings/mfd/tc3589x.txt
index 23fc2f21f5a4..4f22b2b07dc5 100644
--- a/Documentation/devicetree/bindings/mfd/tc3589x.txt
+++ b/Documentation/devicetree/bindings/mfd/tc3589x.txt
@@ -15,7 +15,6 @@ Required properties:
- compatible : must be "toshiba,tc35890", "toshiba,tc35892", "toshiba,tc35893",
"toshiba,tc35894", "toshiba,tc35895" or "toshiba,tc35896"
- reg : I2C address of the device
- - interrupt-parent : specifies which IRQ controller we're connected to
- interrupts : the interrupt on the parent the controller is connected to
- interrupt-controller : marks the device node as an interrupt controller
- #interrupt-cells : should be <1>, the first cell is the IRQ offset on this
diff --git a/Documentation/devicetree/bindings/mfd/tps65086.txt b/Documentation/devicetree/bindings/mfd/tps65086.txt
index 9cfa886fe99f..67eac0ed32df 100644
--- a/Documentation/devicetree/bindings/mfd/tps65086.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65086.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : Should be "ti,tps65086".
- reg : I2C slave address.
- - interrupt-parent : Phandle to the parent interrupt controller.
- interrupts : The interrupt line the device is connected to.
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : The number of cells to describe an IRQ, should be 2.
diff --git a/Documentation/devicetree/bindings/mfd/tps65912.txt b/Documentation/devicetree/bindings/mfd/tps65912.txt
index 717e66d23142..8becb183a48e 100644
--- a/Documentation/devicetree/bindings/mfd/tps65912.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65912.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible : Should be "ti,tps65912".
- reg : Slave address or chip select number (I2C / SPI).
- - interrupt-parent : The parent interrupt controller.
- interrupts : The interrupt line the device is connected to.
- interrupt-controller : Marks the device node as an interrupt controller.
- #interrupt-cells : The number of cells to describe an IRQ, should be 2.
diff --git a/Documentation/devicetree/bindings/mfd/twl-familly.txt b/Documentation/devicetree/bindings/mfd/twl-familly.txt
index a66fcf946759..56f244b5d8a4 100644
--- a/Documentation/devicetree/bindings/mfd/twl-familly.txt
+++ b/Documentation/devicetree/bindings/mfd/twl-familly.txt
@@ -16,7 +16,6 @@ Required properties:
- interrupt-controller : Since the twl support several interrupts internally,
it is considered as an interrupt controller cascaded to the SoC one.
- #interrupt-cells = <1>;
-- interrupt-parent : The parent interrupt controller.
Optional node:
- Child nodes contain in the twl. The twl family is made of several variants
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
index 9a98ee7c323d..06e9dd7a0d96 100644
--- a/Documentation/devicetree/bindings/mfd/twl6040.txt
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -9,7 +9,6 @@ Required properties:
- compatible : "ti,twl6040" for twl6040, "ti,twl6041" for twl6041
- reg: must be 0x4b for i2c address
- interrupts: twl6040 has one interrupt line connecteded to the main SoC
-- interrupt-parent: The parent interrupt controller
- gpio-controller:
- #gpio-cells = <1>: twl6040 provides GPO lines.
- #clock-cells = <0>; twl6040 is a provider of pdmclk which is used by McPDM
diff --git a/Documentation/devicetree/bindings/mfd/wm831x.txt b/Documentation/devicetree/bindings/mfd/wm831x.txt
index 505709403d3f..6b84b1b0d018 100644
--- a/Documentation/devicetree/bindings/mfd/wm831x.txt
+++ b/Documentation/devicetree/bindings/mfd/wm831x.txt
@@ -22,7 +22,6 @@ Required properties:
- interrupts : The interrupt line the IRQ signal for the device is
connected to.
- - interrupt-parent : The parent interrupt controller.
- interrupt-controller : wm831x devices contain interrupt controllers and
may provide interrupt services to other devices.
diff --git a/Documentation/devicetree/bindings/mips/cavium/cib.txt b/Documentation/devicetree/bindings/mips/cavium/cib.txt
index f39a1aa2852b..410efa322254 100644
--- a/Documentation/devicetree/bindings/mips/cavium/cib.txt
+++ b/Documentation/devicetree/bindings/mips/cavium/cib.txt
@@ -13,8 +13,6 @@ Properties:
- cavium,max-bits: The index (zero based) of the highest numbered bit
in the CIB block.
-- interrupt-parent: Always the CIU on the SoC.
-
- interrupts: The CIU line to which the CIB block is connected.
- #interrupt-cells: Must be <2>. The first cell is the bit within the
diff --git a/Documentation/devicetree/bindings/misc/aspeed,cvic.txt b/Documentation/devicetree/bindings/misc/aspeed,cvic.txt
new file mode 100644
index 000000000000..d62c783d1d5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/aspeed,cvic.txt
@@ -0,0 +1,35 @@
+* ASPEED AST2400 and AST2500 coprocessor interrupt controller
+
+This file describes the bindings for the interrupt controller present
+in the AST2400 and AST2500 BMC SoCs which provides interrupt to the
+ColdFire coprocessor.
+
+It is not a normal interrupt controller and it would be rather
+inconvenient to create an interrupt tree for it as it somewhat shares
+some of the same sources as the main ARM interrupt controller but with
+different numbers.
+
+The AST2500 supports a SW generated interrupt
+
+Required properties:
+- reg: address and length of the register for the device.
+- compatible: "aspeed,cvic" and one of:
+ "aspeed,ast2400-cvic"
+ or
+ "aspeed,ast2500-cvic"
+
+- valid-sources: One cell, bitmap of supported sources for the implementation
+
+Optional properties;
+- copro-sw-interrupts: List of interrupt numbers that can be used as
+ SW interrupts from the ARM to the coprocessor.
+ (AST2500 only)
+
+Example:
+
+ cvic: copro-interrupt-controller@1e6c2000 {
+ compatible = "aspeed,ast2500-cvic";
+ valid-sources = <0xffffffff>;
+ copro-sw-interrupts = <1>;
+ reg = <0x1e6c2000 0x80>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 60481bfc3d31..5f3c02522c17 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -19,8 +19,6 @@ Required Properties:
- clocks: From clock bindings: Handles to clock inputs.
- clock-names: From clock bindings: Tuple including "clk_xin" and "clk_ahb"
- interrupts: Interrupt specifier
- - interrupt-parent: Phandle for the interrupt controller that services
- interrupts for this device.
Required Properties for "arasan,sdhci-5.1":
- phys: From PHY bindings: Phandle for the Generic PHY for arasan.
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index a2cf5e1c87d8..99c5cf8507e8 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -21,7 +21,6 @@ Required properties:
"fsl,ls1043a-esdhc"
"fsl,ls1046a-esdhc"
"fsl,ls2080a-esdhc"
- - interrupt-parent : interrupt source phandle.
- clock-frequency : specifies eSDHC base clock frequency.
Optional properties:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
index 0e5e2ec4001d..75486cca8054 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
@@ -13,8 +13,6 @@ Optional properties:
- gpios : may specify GPIOs in this order: Card-Detect GPIO,
Write-Protect GPIO. Note that this does not follow the
binding from mmc.txt, for historical reasons.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Example:
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
index b40f3a492800..bcda1dfc4bac 100644
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
@@ -42,7 +42,6 @@ Required properties:
May be "nand", if the SoC has the individual NAND
interrupts multiplexed behind another custom piece of
hardware
-- interrupt-parent : See standard interrupt bindings
- #address-cells : <1> - subnodes give the chip-select number
- #size-cells : <0>
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt
index 0ee8edb60efc..f33da8782741 100644
--- a/Documentation/devicetree/bindings/mtd/denali-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt
@@ -8,6 +8,9 @@ Required properties:
- reg : should contain registers location and length for data and reg.
- reg-names: Should contain the reg names "nand_data" and "denali_reg"
- interrupts : The interrupt number.
+ - clocks: should contain phandle of the controller core clock, the bus
+ interface clock, and the ECC circuit clock.
+ - clock-names: should contain "nand", "nand_x", "ecc"
Optional properties:
- nand-ecc-step-size: see nand.txt for details. If present, the value must be
@@ -31,5 +34,7 @@ nand: nand@ff900000 {
compatible = "altr,socfpga-denali-nand";
reg = <0xff900000 0x20>, <0xffb80000 0x1000>;
reg-names = "nand_data", "denali_reg";
+ clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>;
+ clock-names = "nand", "nand_x", "ecc";
interrupts = <0 144 4>;
};
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
index dd559045593d..c059ab74ed88 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
@@ -16,7 +16,6 @@ Required properties:
- compatible: "ti,omap2-nand"
- reg: range id (CS number), base offset and length of the
NAND I/O space
- - interrupt-parent: must point to gpmc node
- interrupts: Two interrupt specifiers, one for fifoevent, one for termcount.
Optional properties:
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
index 956bb046e599..f03be904d3c2 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
@@ -69,6 +69,15 @@ Optional properties:
all chips and support for it can not be detected at runtime.
Refer to your chips' datasheet to check if this is supported
by your chip.
+- broken-flash-reset : Some flash devices utilize stateful addressing modes
+ (e.g., for 32-bit addressing) which need to be managed
+ carefully by a system. Because these sorts of flash don't
+ have a standardized software reset command, and because some
+ systems don't toggle the flash RESET# pin upon system reset
+ (if the pin even exists at all), there are systems which
+ cannot reboot properly if the flash is left in the "wrong"
+ state. This boolean flag can be used on such systems, to
+ denote the absence of a reliable reset mechanism.
Example:
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index 8bb11d809429..e949c778e983 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -25,7 +25,7 @@ Optional NAND chip properties:
Deprecated values:
"soft_bch": use "soft" and nand-ecc-algo instead
- nand-ecc-algo: string, algorithm of NAND ECC.
- Supported values are: "hamming", "bch".
+ Valid values are: "hamming", "bch", "rs".
- nand-bus-width : 8 or 16 bus width if not present 8
- nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
@@ -43,6 +43,10 @@ Optional NAND chip properties:
This is particularly useful when only the in-band area is
used by the upper layers, and you want to make your NAND
as reliable as possible.
+- nand-is-boot-medium: Whether the NAND chip is a boot medium. Drivers might use
+ this information to select ECC algorithms supported by
+ the boot ROM or similar restrictions.
+
- nand-rb: shall contain the native Ready/Busy ids.
The ECC strength and ECC step size properties define the correction capability
diff --git a/Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt b/Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt
new file mode 100644
index 000000000000..b2f2ca12f9e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt
@@ -0,0 +1,64 @@
+NVIDIA Tegra NAND Flash controller
+
+Required properties:
+- compatible: Must be one of:
+ - "nvidia,tegra20-nand"
+- reg: MMIO address range
+- interrupts: interrupt output of the NFC controller
+- clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - nand
+- resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+ - nand
+
+Optional children nodes:
+Individual NAND chips are children of the NAND controller node. Currently
+only one NAND chip supported.
+
+Required children node properties:
+- reg: An integer ranging from 1 to 6 representing the CS line to use.
+
+Optional children node properties:
+- nand-ecc-mode: String, operation mode of the NAND ecc mode. Currently only
+ "hw" is supported.
+- nand-ecc-algo: string, algorithm of NAND ECC.
+ Supported values with "hw" ECC mode are: "rs", "bch".
+- nand-bus-width : See nand.txt
+- nand-on-flash-bbt: See nand.txt
+- nand-ecc-strength: integer representing the number of bits to correct
+ per ECC step (always 512). Supported strength using HW ECC
+ modes are:
+ - RS: 4, 6, 8
+ - BCH: 4, 8, 14, 16
+- nand-ecc-maximize: See nand.txt
+- nand-is-boot-medium: Makes sure only ECC strengths supported by the boot ROM
+ are chosen.
+- wp-gpios: GPIO specifier for the write protect pin.
+
+Optional child node of NAND chip nodes:
+Partitions: see partition.txt
+
+ Example:
+ nand-controller@70008000 {
+ compatible = "nvidia,tegra20-nand";
+ reg = <0x70008000 0x100>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA20_CLK_NDFLASH>;
+ clock-names = "nand";
+ resets = <&tegra_car 13>;
+ reset-names = "nand";
+
+ nand@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nand-bus-width = <8>;
+ nand-on-flash-bbt;
+ nand-ecc-algo = "bch";
+ nand-ecc-strength = <8>;
+ wp-gpios = <&gpio TEGRA_GPIO(S, 0) GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
index a8f382642ba9..afbbd870496d 100644
--- a/Documentation/devicetree/bindings/mtd/partition.txt
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -14,6 +14,13 @@ method is used for a given flash device. To describe the method there should be
a subnode of the flash device that is named 'partitions'. It must have a
'compatible' property, which is used to identify the method to use.
+When a single partition is represented with a DT node (it depends on a used
+format) it may also be described using above rules ('compatible' and optionally
+some extra properties / subnodes). It allows describing more complex,
+hierarchical (multi-level) layouts and should be used if there is some
+significant relation between partitions or some partition internally uses
+another partitioning method.
+
Available bindings are listed in the "partitions" subdirectory.
@@ -109,3 +116,42 @@ flash@2 {
};
};
};
+
+flash@3 {
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "bootloader";
+ reg = <0x000000 0x100000>;
+ read-only;
+ };
+
+ firmware@100000 {
+ label = "firmware";
+ reg = <0x100000 0xe00000>;
+ compatible = "brcm,trx";
+ };
+
+ calibration@f00000 {
+ label = "calibration";
+ reg = <0xf00000 0x100000>;
+ compatible = "fixed-partitions";
+ ranges = <0 0xf00000 0x100000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "wifi0";
+ reg = <0x000000 0x080000>;
+ };
+
+ partition@80000 {
+ label = "wifi1";
+ reg = <0x080000 0x080000>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt b/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt
new file mode 100644
index 000000000000..b677147ca4e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt
@@ -0,0 +1,37 @@
+Broadcom TRX Container Partition
+================================
+
+TRX is Broadcom's official firmware format for the BCM947xx boards. It's used by
+most of the vendors building devices based on Broadcom's BCM47xx SoCs and is
+supported by the CFE bootloader.
+
+Design of the TRX format is very minimalistic. Its header contains
+identification fields, CRC32 checksum and the locations of embedded partitions.
+Its purpose is to store a few partitions in a format that can be distributed as
+a standalone file and written in a flash memory.
+
+Container can hold up to 4 partitions. The first partition has to contain a
+device executable binary (e.g. a kernel) as it's what the CFE bootloader starts
+executing. Other partitions can be used for operating system purposes. This is
+useful for systems that keep kernel and rootfs separated.
+
+TRX doesn't enforce any strict partition boundaries or size limits. All
+partitions have to be less than the 4GiB max size limit.
+
+There are two existing/known TRX variants:
+1) v1 which contains 3 partitions
+2) v2 which contains 4 partitions
+
+There aren't separated compatible bindings for them as version can be trivialy
+detected by a software parsing TRX header.
+
+Required properties:
+- compatible : (required) must be "brcm,trx"
+
+Example:
+
+flash@0 {
+ partitions {
+ compatible = "brcm,trx";
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
index 73d336befa08..1123cc6d56ef 100644
--- a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
+++ b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
@@ -45,11 +45,12 @@ Required properties:
number (e.g., 0, 1, 2, etc.)
- #address-cells: see partition.txt
- #size-cells: see partition.txt
-- nand-ecc-strength: see nand.txt
-- nand-ecc-step-size: must be 512. see nand.txt for more details.
Optional properties:
- nand-bus-width: see nand.txt
+- nand-ecc-strength: see nand.txt. If not specified, then ECC strength will
+ be used according to chip requirement and available
+ OOB size.
Each nandcs device node may optionally contain a 'partitions' sub-node, which
further contains sub-nodes describing the flash partition mapping. See
@@ -77,7 +78,6 @@ nand-controller@1ac00000 {
reg = <0>;
nand-ecc-strength = <4>;
- nand-ecc-step-size = <512>;
nand-bus-width = <8>;
partitions {
@@ -117,7 +117,6 @@ nand-controller@79b0000 {
nand@0 {
reg = <0>;
nand-ecc-strength = <4>;
- nand-ecc-step-size = <512>;
nand-bus-width = <8>;
partitions {
diff --git a/Documentation/devicetree/bindings/mtd/spear_smi.txt b/Documentation/devicetree/bindings/mtd/spear_smi.txt
index 7248aadd89e4..c41873e92d26 100644
--- a/Documentation/devicetree/bindings/mtd/spear_smi.txt
+++ b/Documentation/devicetree/bindings/mtd/spear_smi.txt
@@ -5,8 +5,6 @@ Required properties:
- reg : Address range of the mtd chip
- #address-cells, #size-cells : Must be present if the device has sub-nodes
representing partitions.
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the STMMAC interrupts
- clock-rate : Functional clock rate of SMI in Hz
diff --git a/Documentation/devicetree/bindings/mtd/spi-nand.txt b/Documentation/devicetree/bindings/mtd/spi-nand.txt
new file mode 100644
index 000000000000..8b51f3b6d55c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/spi-nand.txt
@@ -0,0 +1,5 @@
+SPI NAND flash
+
+Required properties:
+- compatible: should be "spi-nand"
+- reg: should encode the chip-select line used to access the NAND chip
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index 4bb624a73b54..93dcb79a5f16 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -8,8 +8,6 @@ Required properties:
- SerDes Rx/Tx registers
- SerDes integration registers (1/2)
- SerDes integration registers (2/2)
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
listed is required and is the general device interrupt. If the optional
amd,per-channel-interrupt property is specified, then one additional
diff --git a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
index dfe287a5d6f2..b58843f29591 100644
--- a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
+++ b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
@@ -13,14 +13,17 @@ MDIO multiplexer node:
Every non-ethernet PHY requires a compatible so that it could be probed based
on this compatible string.
+Optional properties:
+- clocks: phandle of the core clock which drives the mdio block.
+
Additional information regarding generic multiplexer properties can be found
at- Documentation/devicetree/bindings/net/mdio-mux.txt
for example:
- mdio_mux_iproc: mdio-mux@6602023c {
+ mdio_mux_iproc: mdio-mux@66020000 {
compatible = "brcm,mdio-mux-iproc";
- reg = <0x6602023c 0x14>;
+ reg = <0x66020000 0x250>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt
index 9c5e663fa1af..37d67926dd6d 100644
--- a/Documentation/devicetree/bindings/net/btusb.txt
+++ b/Documentation/devicetree/bindings/net/btusb.txt
@@ -15,7 +15,6 @@ Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
Optional properties:
- - interrupt-parent: phandle of the parent interrupt controller
- interrupt-names: (see below)
- interrupts : The interrupt specified by the name "wakeup" is the interrupt
that shall be used for out-of-band wake-on-bt. Driver will
diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
index 23aa94eab207..903a78da65be 100644
--- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
+++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt
@@ -5,7 +5,6 @@ Required properties:
- "holt,hi3110" for HI-3110
- reg: SPI chip select.
- clocks: The clock feeding the CAN controller.
- - interrupt-parent: The parent interrupt controller.
- interrupts: Should contain IRQ line for the CAN controller.
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
index ee3723beb701..188c8bd4eb67 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
@@ -6,7 +6,6 @@ Required properties:
- "microchip,mcp2515" for MCP2515.
- reg: SPI chip select.
- clocks: The clock feeding the CAN controller.
- - interrupt-parent: The parent interrupt controller.
- interrupts: Should contain IRQ line for the CAN controller.
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/can/xilinx_can.txt b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
index fe38847d8e26..060e2d46bad9 100644
--- a/Documentation/devicetree/bindings/net/can/xilinx_can.txt
+++ b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
@@ -2,20 +2,25 @@ Xilinx Axi CAN/Zynq CANPS controller Device Tree Bindings
---------------------------------------------------------
Required properties:
-- compatible : Should be "xlnx,zynq-can-1.0" for Zynq CAN
- controllers and "xlnx,axi-can-1.00.a" for Axi CAN
- controllers.
-- reg : Physical base address and size of the Axi CAN/Zynq
- CANPS registers map.
+- compatible : Should be:
+ - "xlnx,zynq-can-1.0" for Zynq CAN controllers
+ - "xlnx,axi-can-1.00.a" for Axi CAN controllers
+ - "xlnx,canfd-1.0" for CAN FD controllers
+- reg : Physical base address and size of the controller
+ registers map.
- interrupts : Property with a value describing the interrupt
number.
-- interrupt-parent : Must be core interrupt controller
-- clock-names : List of input clock names - "can_clk", "pclk"
- (For CANPS), "can_clk" , "s_axi_aclk"(For AXI CAN)
+- clock-names : List of input clock names
+ - "can_clk", "pclk" (For CANPS),
+ - "can_clk", "s_axi_aclk" (For AXI CAN and CAN FD).
(See clock bindings for details).
- clocks : Clock phandles (see clock bindings for details).
-- tx-fifo-depth : Can Tx fifo depth.
-- rx-fifo-depth : Can Rx fifo depth.
+- tx-fifo-depth : Can Tx fifo depth (Zynq, Axi CAN).
+- rx-fifo-depth : Can Rx fifo depth (Zynq, Axi CAN, CAN FD in
+ sequential Rx mode).
+- tx-mailbox-count : Can Tx mailbox buffer count (CAN FD).
+- rx-mailbox-count : Can Rx mailbox buffer count (CAN FD in mailbox Rx
+ mode).
Example:
@@ -42,3 +47,14 @@ For Axi CAN Dts file:
tx-fifo-depth = <0x40>;
rx-fifo-depth = <0x40>;
};
+For CAN FD Dts file:
+ canfd_0: canfd@40000000 {
+ compatible = "xlnx,canfd-1.0";
+ clocks = <&clkc 0>, <&clkc 1>;
+ clock-names = "can_clk", "s_axi_aclk";
+ reg = <0x40000000 0x2000>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 59 1>;
+ tx-mailbox-count = <0x20>;
+ rx-fifo-depth = <0x20>;
+ };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 4cb4925a28ab..41089369f891 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -11,7 +11,6 @@ Required properties:
registers map
- interrupts : property with a value describing the interrupt
number
-- interrupt-parent : The parent interrupt controller
- cpdma_channels : Specifies number of channels in CPDMA
- ale_entries : Specifies No of entries ALE can hold
- bd_ram_size : Specifies internal descriptor RAM size
diff --git a/Documentation/devicetree/bindings/net/davicom-dm9000.txt b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
index 5224bf05f6f8..64c159e9cbf7 100644
--- a/Documentation/devicetree/bindings/net/davicom-dm9000.txt
+++ b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
@@ -5,7 +5,6 @@ Required properties:
- reg : physical addresses and sizes of registers, must contain 2 entries:
first entry : address register,
second entry : data register.
-- interrupt-parent : interrupt controller to which the device is connected
- interrupts : interrupt specifier specific to interrupt controller
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index 47a6a7fe0b86..1811e1972a7a 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -24,6 +24,14 @@ Required properties:
"brcm,bcm53018-srab"
"brcm,bcm53019-srab" and the mandatory "brcm,bcm5301x-srab" string
+ For the BCM5831X/BCM1140x SoCs with an integrated switch, must be one of:
+ "brcm,bcm11404-srab"
+ "brcm,bcm11407-srab"
+ "brcm,bcm11409-srab"
+ "brcm,bcm58310-srab"
+ "brcm,bcm58311-srab"
+ "brcm,bcm58313-srab" and the mandatory "brcm,omega-srab" string
+
For the BCM585xx/586XX/88312 SoCs with an integrated switch, must be one of:
"brcm,bcm58522-srab"
"brcm,bcm58523-srab"
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 60d50a2b0323..feb007af13cb 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -30,7 +30,6 @@ Required properties:
Optional properties:
- reset-gpios : Should be a gpio specifier for a reset line
-- interrupt-parent : Parent interrupt controller
- interrupts : Interrupt from the switch
- interrupt-controller : Indicates the switch is itself an interrupt
controller. This is used for the PHY interrupts.
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
new file mode 100644
index 000000000000..b6ae8541bd55
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
@@ -0,0 +1,153 @@
+Realtek SMI-based Switches
+==========================
+
+The SMI "Simple Management Interface" is a two-wire protocol using
+bit-banged GPIO that while it reuses the MDIO lines MCK and MDIO does
+not use the MDIO protocol. This binding defines how to specify the
+SMI-based Realtek devices.
+
+Required properties:
+
+- compatible: must be exactly one of:
+ "realtek,rtl8366"
+ "realtek,rtl8366rb" (4+1 ports)
+ "realtek,rtl8366s" (4+1 ports)
+ "realtek,rtl8367"
+ "realtek,rtl8367b"
+ "realtek,rtl8368s" (8 port)
+ "realtek,rtl8369"
+ "realtek,rtl8370" (8 port)
+
+Required properties:
+- mdc-gpios: GPIO line for the MDC clock line.
+- mdio-gpios: GPIO line for the MDIO data line.
+- reset-gpios: GPIO line for the reset signal.
+
+Optional properties:
+- realtek,disable-leds: if the LED drivers are not used in the
+ hardware design this will disable them so they are not turned on
+ and wasting power.
+
+Required subnodes:
+
+- interrupt-controller
+
+ This defines an interrupt controller with an IRQ line (typically
+ a GPIO) that will demultiplex and handle the interrupt from the single
+ interrupt line coming out of one of the SMI-based chips. It most
+ importantly provides link up/down interrupts to the PHY blocks inside
+ the ASIC.
+
+Required properties of interrupt-controller:
+
+- interrupt: parent interrupt, see interrupt-controller/interrupts.txt
+- interrupt-controller: see interrupt-controller/interrupts.txt
+- #address-cells: should be <0>
+- #interrupt-cells: should be <1>
+
+- mdio
+
+ This defines the internal MDIO bus of the SMI device, mostly for the
+ purpose of being able to hook the interrupts to the right PHY and
+ the right PHY to the corresponding port.
+
+Required properties of mdio:
+
+- compatible: should be set to "realtek,smi-mdio" for all SMI devices
+
+See net/mdio.txt for additional MDIO bus properties.
+
+See net/dsa/dsa.txt for a list of additional required and optional properties
+and subnodes of DSA switches.
+
+Examples:
+
+switch {
+ compatible = "realtek,rtl8366rb";
+ /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */
+ mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
+ mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+
+ switch_intc: interrupt-controller {
+ /* GPIO 15 provides the interrupt */
+ interrupt-parent = <&gpio0>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ phy-handle = <&phy0>;
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-handle = <&phy1>;
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-handle = <&phy2>;
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-handle = <&phy3>;
+ };
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ phy-handle = <&phy4>;
+ };
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+
+ mdio {
+ compatible = "realtek,smi-mdio", "dsa-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: phy@0 {
+ reg = <0>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <0>;
+ };
+ phy1: phy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <1>;
+ };
+ phy2: phy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <2>;
+ };
+ phy3: phy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <3>;
+ };
+ phy4: phy@4 {
+ reg = <4>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <12>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
new file mode 100644
index 000000000000..ed4710c40641
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
@@ -0,0 +1,81 @@
+Vitesse VSC73xx Switches
+========================
+
+This defines device tree bindings for the Vitesse VSC73xx switch chips.
+The Vitesse company has been acquired by Microsemi and Microsemi in turn
+acquired by Microchip but retains this vendor branding.
+
+The currently supported switch chips are:
+Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+
+The device tree node is an SPI device so it must reside inside a SPI bus
+device tree node, see spi/spi-bus.txt
+
+Required properties:
+
+- compatible: must be exactly one of:
+ "vitesse,vsc7385"
+ "vitesse,vsc7388"
+ "vitesse,vsc7395"
+ "vitesse,vsc7398"
+- gpio-controller: indicates that this switch is also a GPIO controller,
+ see gpio/gpio.txt
+- #gpio-cells: this must be set to <2> and indicates that we are a twocell
+ GPIO controller, see gpio/gpio.txt
+
+Optional properties:
+
+- reset-gpios: a handle to a GPIO line that can issue reset of the chip.
+ It should be tagged as active low.
+
+Required subnodes:
+
+See net/dsa/dsa.txt for a list of additional required and optional properties
+and subnodes of DSA switches.
+
+Examples:
+
+switch@0 {
+ compatible = "vitesse,vsc7395";
+ reg = <0>;
+ /* Specified for 2.5 MHz or below */
+ spi-max-frequency = <2500000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+ vsc: port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac1>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
index f8c33890bc29..299c0dcd67db 100644
--- a/Documentation/devicetree/bindings/net/fsl-fman.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fman.txt
@@ -356,30 +356,7 @@ ethernet@e0000 {
============================================================================
FMan IEEE 1588 Node
-DESCRIPTION
-
-The FMan interface to support IEEE 1588
-
-
-PROPERTIES
-
-- compatible
- Usage: required
- Value type: <stringlist>
- Definition: A standard property.
- Must include "fsl,fman-ptp-timer".
-
-- reg
- Usage: required
- Value type: <prop-encoded-array>
- Definition: A standard property.
-
-EXAMPLE
-
-ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
-};
+Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
=============================================================================
FMan MDIO Node
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
index abfbeecbcf39..8ee4b1cedae8 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
@@ -9,7 +9,6 @@ Required properties:
"6port-16rss",
"6port-16vf",
"single-port".
-- interrupt-parent: the interrupt parent of this device.
- interrupts: should contain the DSA Fabric and rcb interrupt.
- reg: specifies base physical address(es) and size of the device registers.
The first region is external interface control register base and size(optional,
diff --git a/Documentation/devicetree/bindings/net/ibm,emac.txt b/Documentation/devicetree/bindings/net/ibm,emac.txt
index 44b842b6ca15..c0c14aa3f97c 100644
--- a/Documentation/devicetree/bindings/net/ibm,emac.txt
+++ b/Documentation/devicetree/bindings/net/ibm,emac.txt
@@ -18,7 +18,6 @@
"ibm,emac4". For Axon, thus, we have: "ibm,emac-axon",
"ibm,emac4"
- interrupts : <interrupt mapping for EMAC IRQ and WOL IRQ>
- - interrupt-parent : optional, if needed for interrupt mapping
- reg : <registers mapping>
- local-mac-address : 6 bytes, MAC address
- mal-device : phandle of the associated McMAL node
diff --git a/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
index 3d27c68613a6..957e5e5c2927 100644
--- a/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
+++ b/Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
@@ -22,7 +22,6 @@ Optional properties:
- marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
platform. The value will be configured to firmware. This
is needed to work chip's sleep feature as expected (u16).
- - interrupt-parent: phandle of the parent interrupt controller
- interrupt-names: Used only for USB based devices (See below)
- interrupts : specifies the interrupt pin number to the cpu. For SDIO, the
driver will use the first interrupt specified in the interrupt
diff --git a/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt b/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt
index aa6313024176..358fed2fab43 100644
--- a/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt
+++ b/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt
@@ -6,8 +6,6 @@ The mediatek gigabit switch can be found on Mediatek SoCs (mt7620, mt7621).
Required properties:
- compatible: Should be "mediatek,mt7620-gsw" or "mediatek,mt7621-gsw"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the gigabit switches interrupt
- resets: Should contain the gigabit switches resets
- reset-names: Should contain the reset names "gsw"
diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
new file mode 100644
index 000000000000..14ceb2a5b4e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
@@ -0,0 +1,35 @@
+MediaTek SoC built-in Bluetooth Devices
+==================================
+
+This device is a serial attached device to BTIF device and thus it must be a
+child node of the serial node with BTIF. The dt-bindings details for BTIF
+device can be known via Documentation/devicetree/bindings/serial/8250.txt.
+
+Required properties:
+
+- compatible: Must be
+ "mediatek,mt7622-bluetooth": for MT7622 SoC
+- clocks: Should be the clock specifiers corresponding to the entry in
+ clock-names property.
+- clock-names: Should contain "ref" entries.
+- power-domains: Phandle to the power domain that the device is part of
+
+Example:
+
+ btif: serial@1100c000 {
+ compatible = "mediatek,mt7622-btif",
+ "mediatek,mtk-btif";
+ reg = <0 0x1100c000 0 0x1000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_BTIF_PD>;
+ clock-names = "main";
+ reg-shift = <2>;
+ reg-io-width = <4>;
+
+ bluetooth {
+ compatible = "mediatek,mt7622-bluetooth";
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
+ clocks = <&clk25m>;
+ clock-names = "ref";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 53c13ee384a4..503f2b9194e2 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -30,9 +30,6 @@ Required properties:
- mediatek,pctl: phandle to the syscon node that handles the ports slew rate
and driver current: only for MT2701 and MT7623 SoC
-Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
* Ethernet MAC node
Required properties:
diff --git a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
index 44dff53d4dda..24626e082b83 100644
--- a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
+++ b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
@@ -8,9 +8,6 @@ the SPI master node.
Required properties:
- compatible: Should be "microchip,enc28j60"
- reg: Specify the SPI chip select the ENC28J60 is wired to
-- interrupt-parent: Specify the phandle of the source interrupt, see interrupt
- binding documentation for details. Usually this is the GPIO bank
- the interrupt line is wired to.
- interrupts: Specify the interrupt index within the interrupt controller (referred
to above in interrupt-parent) and interrupt type. The ENC28J60 natively
generates falling edge interrupts, however, additional board logic
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
index 92486733df71..cfaf88998918 100644
--- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
+++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "nxp,nxp-nci-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- enable-gpios: Output GPIO pin used for enabling/disabling the chip
- firmware-gpios: Output GPIO pin used to enter firmware download mode
diff --git a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
index 122460e42e3c..2efe3886b95b 100644
--- a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
+++ b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "nxp,pn532-i2c" or "nxp,pn533-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
Optional SoC Specific Properties:
diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt
index 538a86f7b2b0..5b937403fed6 100644
--- a/Documentation/devicetree/bindings/net/nfc/pn544.txt
+++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "nxp,pn544-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- enable-gpios: Output GPIO pin used for enabling/disabling the PN544
- firmware-gpios: Output GPIO pin used to enter firmware download mode
diff --git a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
index ed5b3eaadb39..f02f6fb7f81c 100644
--- a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
+++ b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: Should be "samsung,s3fwrn5-i2c".
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- s3fwrn5,en-gpios: Output GPIO pin used for enabling/disabling the chip
- s3fwrn5,fw-gpios: Output GPIO pin used to enter firmware mode and
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
index b46d473be425..baa8f8133d19 100644
--- a/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible: Should be "st,st21nfcb-i2c" or "st,st21nfcc-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- reset-gpios: Output GPIO pin used to reset the ST21NFCB
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
index 54ce8e7ac681..d33343330b94 100644
--- a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: Should be "st,st21nfcb-spi"
- spi-max-frequency: Maximum SPI frequency (<= 4000000).
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- reset-gpios: Output GPIO pin used to reset the ST21NFCB
diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
index 5ee9440fa9ad..b8bd90f80e12 100644
--- a/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st21nfca.txt
@@ -4,8 +4,6 @@ Required properties:
- compatible: Should be "st,st21nfca-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
-- interrupts: GPIO interrupt to which the chip is connected
- enable-gpios: Output GPIO pin used for enabling/disabling the ST21NFCA
Optional SoC Specific Properties:
diff --git a/Documentation/devicetree/bindings/net/nfc/st95hf.txt b/Documentation/devicetree/bindings/net/nfc/st95hf.txt
index 08a202e00d47..3f373a1e20ff 100644
--- a/Documentation/devicetree/bindings/net/nfc/st95hf.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st95hf.txt
@@ -17,9 +17,6 @@ Required properties:
- enable-gpio: GPIO line to enable ST95HF transceiver.
-- interrupt-parent : Standard way to specify the controller to which
- ST95HF transceiver's interrupt is routed.
-
- interrupts : Standard way to define ST95HF transceiver's out
interrupt.
diff --git a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
index 5ca9362ef127..ba1934b950e5 100644
--- a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
+++ b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible: Should be "ti,trf7970a".
- spi-max-frequency: Maximum SPI frequency (<= 2000000).
-- interrupt-parent: phandle of parent interrupt handler.
- interrupts: A single interrupt specifier.
- ti,enable-gpios: One or two GPIO entries used for 'EN' and 'EN2' pins on the
TRF7970A. EN2 is optional.
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index d2169a56f5e3..17c1d2bd00f6 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -3,8 +3,6 @@ PHY nodes
Required properties:
- interrupts : interrupt specifier for the sole interrupt.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- reg : The ID number for the phy, usually a small integer
Optional Properties:
diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.txt b/Documentation/devicetree/bindings/net/qca,qca7000.txt
index 3987846b3fd3..e4a8a51086df 100644
--- a/Documentation/devicetree/bindings/net/qca,qca7000.txt
+++ b/Documentation/devicetree/bindings/net/qca,qca7000.txt
@@ -19,7 +19,6 @@ Required properties:
- spi-cpol : Must be set
Optional properties:
-- interrupt-parent : Specify the pHandle of the source interrupt
- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at.
Numbers smaller than 1000000 or greater than 16000000
are invalid. Missing the property will set the SPI
diff --git a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
index 0ea18a53cc29..824c0e23c544 100644
--- a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
@@ -10,12 +10,25 @@ device the slave device is attached to.
Required properties:
- compatible: should contain one of the following:
* "qcom,qca6174-bt"
+ * "qcom,wcn3990-bt"
+
+Optional properties for compatible string qcom,qca6174-bt:
-Optional properties:
- enable-gpios: gpio specifier used to enable chip
- clocks: clock provided to the controller (SUSCLK_32KHZ)
-Example:
+Required properties for compatible string qcom,wcn3990-bt:
+
+ - vddio-supply: VDD_IO supply regulator handle.
+ - vddxo-supply: VDD_XO supply regulator handle.
+ - vddrf-supply: VDD_RF supply regulator handle.
+ - vddch0-supply: VDD_CH0 supply regulator handle.
+
+Optional properties for compatible string qcom,wcn3990-bt:
+
+ - max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
+
+Examples:
serial@7570000 {
label = "BT-UART";
@@ -28,3 +41,15 @@ serial@7570000 {
clocks = <&divclk4>;
};
};
+
+serial@898000 {
+ bluetooth {
+ compatible = "qcom,wcn3990-bt";
+
+ vddio-supply = <&vreg_s4a_1p8>;
+ vddxo-supply = <&vreg_l7a_1p8>;
+ vddrf-supply = <&vreg_l17a_1p3>;
+ vddch0-supply = <&vreg_l25a_3p3>;
+ max-speed = <3200000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt b/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt
index 88b095d1f13b..9fe1a0a22e44 100644
--- a/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt
+++ b/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt
@@ -14,8 +14,6 @@ Required properties:
"ralink,rt3050-eth", "ralink,rt3883-eth", "ralink,rt5350-eth",
"mediatek,mt7620-eth", "mediatek,mt7621-eth"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the frame engines interrupt
- resets: Should contain the frame engines resets
- reset-names: Should contain the reset names "fe". If a switch is present
diff --git a/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt b/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt
index 2e79bd376c56..87e315856efa 100644
--- a/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt
+++ b/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt
@@ -7,8 +7,6 @@ SoCs (RT3x5x, RT5350, MT76x8).
Required properties:
- compatible: Should be "ralink,rt3050-esw"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the embedded switches interrupt
- resets: Should contain the embedded switches resets
- reset-names: Should contain the reset names "esw"
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index fac897d54423..19740d01cab0 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -47,8 +47,6 @@ Required properties:
- pinctrl-0: phandle, referring to a default pin configuration node.
Optional properties:
-- interrupt-parent: the phandle for the interrupt controller that services
- interrupts for this device.
- interrupt-names: A list of interrupt names.
For the R-Car Gen 3 SoCs this property is mandatory;
it should include one entry per channel, named "ch%u",
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
index 9c16ee2965a2..3b71da7e8742 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
@@ -4,6 +4,7 @@ The device node has following properties.
Required properties:
- compatible: should be "rockchip,<name>-gamc"
+ "rockchip,px30-gmac": found on PX30 SoCs
"rockchip,rk3128-gmac": found on RK312x SoCs
"rockchip,rk3228-gmac": found on RK322x SoCs
"rockchip,rk3288-gmac": found on RK3288 SoCs
diff --git a/Documentation/devicetree/bindings/net/samsung-sxgbe.txt b/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
index 888c250197fe..46e591178911 100644
--- a/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
+++ b/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: Should be "samsung,sxgbe-v2.0a"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the SXGBE interrupts
These interrupts are ordered by fixed and follows variable
trasmit DMA interrupts, receive DMA interrupts and lpi interrupt.
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
index 82a4cf2c145d..76db9f13ad96 100644
--- a/Documentation/devicetree/bindings/net/sh_eth.txt
+++ b/Documentation/devicetree/bindings/net/sh_eth.txt
@@ -35,8 +35,6 @@ Required properties:
- pinctrl-0: phandle, referring to a default pin configuration node.
Optional properties:
-- interrupt-parent: the phandle for the interrupt controller that services
- interrupts for this device.
- pinctrl-names: pin configuration state name ("default").
- renesas,no-ether-link: boolean, specify when a board does not provide a proper
Ether LINK signal.
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
index 21d27aa4c68c..36f1aef585f0 100644
--- a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+++ b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
@@ -83,8 +83,6 @@ Required properties:
- "snps,dwc-qos-ethernet-4.10" (deprecated):
- "phy_ref_clk"
- "apb_clk"
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the core's combined interrupt signal
- phy-mode: See ethernet.txt file in the same directory
- resets: Phandle and reset specifiers for each entry in reset-names, in the
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 3a28a5d8857d..cb694062afff 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -1,11 +1,10 @@
-* STMicroelectronics 10/100/1000 Ethernet driver (GMAC)
+* STMicroelectronics 10/100/1000/2500/10000 Ethernet (GMAC/XGMAC)
Required properties:
-- compatible: Should be "snps,dwmac-<ip_version>", "snps,dwmac"
+- compatible: Should be "snps,dwmac-<ip_version>", "snps,dwmac" or
+ "snps,dwxgmac-<ip_version>", "snps,dwxgmac".
For backwards compatibility: "st,spear600-gmac" is also supported.
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the STMMAC interrupts
- interrupt-names: Should contain a list of interrupt names corresponding to
the interrupts in the interrupts property, if available.
diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
index 86602f264dce..cffb2d6876e3 100644
--- a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
+++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
@@ -11,8 +11,6 @@ Required properties:
Optional properties:
- brcm,drive-strength : drive strength used for SDIO pins on device in mA
(default = 6).
- - interrupt-parent : the phandle for the interrupt controller to which the
- device interrupts are connected.
- interrupts : specifies attributes for the out-of-band interrupt (host-wake).
When not specified the device will use in-band SDIO interrupts.
- interrupt-names : name of the out-of-band interrupt, which must be set
diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
index 59de8646862d..9bf9bbac16e2 100644
--- a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
+++ b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
@@ -29,7 +29,6 @@ Optional properties:
- marvell,wakeup-pin : a wakeup pin number of wifi chip which will be configured
to firmware. Firmware will wakeup the host using this pin
during suspend/resume.
- - interrupt-parent: phandle of the parent interrupt controller
- interrupts : interrupt pin number to the cpu. driver will request an irq based on
this interrupt number. during system suspend, the irq will be enabled
so that the wifi chip can wakeup host platform under certain condition.
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
index 189ae5cad8f7..bb2fcde6f7ff 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
@@ -8,8 +8,6 @@ Required properties:
- reg : Chip select address of device
- spi-max-frequency : Maximum SPI clocking speed of device in Hz
- interrupts : Should contain interrupt line
-- interrupt-parent : Should be the phandle for the interrupt controller
- that services interrupts for this device
- vio-supply : phandle to regulator providing VIO
- ti,power-gpio : GPIO connected to chip's PMEN pin
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt b/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt
index 8f9ced076fe1..cb5c9e1569ca 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt
@@ -17,9 +17,7 @@ Required properties:
* "ti,wl1837"
- reg : Chip select address of device
- spi-max-frequency : Maximum SPI clocking speed of device in Hz
-- interrupt-parent, interrupts :
- Should contain parameters for 1 interrupt line.
- Interrupt parameters: parent, line number, type.
+- interrupts : Should contain parameters for 1 interrupt line.
- vwlan-supply : Point the node of the regulator that powers/enable the
wl12xx/wl18xx chip
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
index f42f6b0f1bc7..9306c4dadd46 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
@@ -20,8 +20,6 @@ Required properties:
- interrupts : specifies attributes for the out-of-band interrupt.
Optional properties:
- - interrupt-parent : the phandle for the interrupt controller to which the
- device interrupts are connected.
- ref-clock-frequency : ref clock frequency in Hz
- tcxo-clock-frequency : tcxo clock frequency in Hz
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt b/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
index 09cd3bc4d038..9514c327d31b 100644
--- a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
+++ b/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
@@ -7,7 +7,6 @@ Required properties:
- reg-names: must include the following entries:
"csr": CSR registers
"vector_slave": vectors slave port region
-- interrupt-parent: interrupt source phandle.
- interrupts: specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie.txt b/Documentation/devicetree/bindings/pci/altera-pcie.txt
index a1dc9366a8fc..6c396f17c91a 100644
--- a/Documentation/devicetree/bindings/pci/altera-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/altera-pcie.txt
@@ -6,7 +6,6 @@ Required properties:
- reg-names: must include the following entries:
"Txs": TX slave port region
"Cra": Control register access region
-- interrupt-parent: interrupt source phandle.
- interrupts: specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends
on the parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
index b8e48b4762b2..df065aa53a83 100644
--- a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
@@ -65,7 +65,6 @@ When the iProc event queue based MSI is used, one needs to define the
following properties in the MSI device node:
- compatible: Must be "brcm,iproc-msi"
- msi-controller: claims itself as an MSI controller
-- interrupt-parent: Link to its parent interrupt device
- interrupts: List of interrupt IDs from its parent interrupt device
Optional properties:
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
index 9a305237fa6e..4a0475e2ba7e 100644
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
@@ -9,6 +9,9 @@ Required properties:
Optional properties:
- max-functions: Maximum number of functions that can be configured (default 1).
+- phys: From PHY bindings: List of Generic PHY phandles. One per lane if more
+ than one in the list. If only one PHY listed it must manage all lanes.
+- phy-names: List of names to identify the PHY.
Example:
@@ -19,4 +22,6 @@ pcie@fc000000 {
reg-names = "reg", "mem";
cdns,max-outbound-regions = <16>;
max-functions = /bits/ 8 <8>;
+ phys = <&ep_phy0 &ep_phy1>;
+ phy-names = "pcie-lane0","pcie-lane1";
};
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
index 20a33f38f69d..91de69c713a9 100644
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
@@ -24,6 +24,9 @@ Optional properties:
translations (default 32)
- vendor-id: The PCI vendor ID (16 bits, default is design dependent)
- device-id: The PCI device ID (16 bits, default is design dependent)
+- phys: From PHY bindings: List of Generic PHY phandles. One per lane if more
+ than one in the list. If only one PHY listed it must manage all lanes.
+- phy-names: List of names to identify the PHY.
Example:
@@ -57,4 +60,7 @@ pcie@fb000000 {
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
msi-parent = <&its_pci>;
+
+ phys = <&pcie_phy0>;
+ phy-names = "pcie-phy";
};
diff --git a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
index 89a84f8aa621..5f8cb4962f8d 100644
--- a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
+++ b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
@@ -41,7 +41,6 @@ Mandatory subnodes:
- For "faraday,ftpci100" a node representing the interrupt-controller inside the
host bridge is mandatory. It has the following mandatory properties:
- interrupt: see interrupt-controller/interrupts.txt
- - interrupt-parent: see interrupt-controller/interrupts.txt
- interrupt-controller: see interrupt-controller/interrupts.txt
- #address-cells: set to <0>
- #interrupt-cells: set to <1>
diff --git a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
index 65038aa642e5..a618d4787dd7 100644
--- a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
@@ -26,9 +26,6 @@ Required properties:
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an
interrupt source. The value must be 1.
-- interrupt-parent : phandle to the interrupt controller that
- it is attached to, it should be set to gic to point to
- ARM's Generic Interrupt Controller node in system DT.
- interrupts: The interrupt line of the PCIe controller
last cell of this field is set to 4 to
denote it as IRQ_TYPE_LEVEL_HIGH type interrupt.
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
index 3d4a209b0fd0..4dd17de549a7 100644
--- a/Documentation/devicetree/bindings/pci/pci-keystone.txt
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -17,7 +17,6 @@ reg: index 1 is the base address and length of DW application registers.
pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
interrupt-cells: should be set to 1
- interrupt-parent: Parent interrupt controller phandle
interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
Example:
@@ -37,8 +36,6 @@ pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
pcie_intc: Interrupt controller device node for Legacy IRQ chip
interrupt-cells: should be set to 1
- interrupt-parent: Parent interrupt controller phandle
- interrupts: GIC interrupt lines connected to PCI Legacy interrupt lines
Example:
pcie_intc: legacy-interrupt-controller {
diff --git a/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt b/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt
index a04ab1b76211..ffba4f63d71f 100644
--- a/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt
+++ b/Documentation/devicetree/bindings/pci/ralink,rt3883-pci.txt
@@ -41,9 +41,6 @@
- #interrupt-cells: specifies the number of cells needed to encode an
interrupt source. The value must be 1.
- - interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
-
- interrupts: specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt b/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
index cafe2197dad9..c3a29c5feea3 100644
--- a/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
+++ b/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
@@ -3,7 +3,7 @@
Required properties:
- compatible: "qca,ar7100-usb-phy"
- #phys-cells: should be 0
-- reset-names: "usb-phy"[, "usb-suspend-override"]
+- reset-names: "phy"[, "suspend-override"]
- resets: references to the reset controllers
Example:
@@ -11,7 +11,7 @@ Example:
usb-phy {
compatible = "qca,ar7100-usb-phy";
- reset-names = "usb-phy", "usb-suspend-override";
+ reset-names = "phy", "suspend-override";
resets = <&rst 4>, <&rst 3>;
#phy-cells = <0>;
diff --git a/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
index 8fb5a53775e8..81b58dddd3ed 100644
--- a/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
@@ -19,6 +19,10 @@ Required Properties:
defines the interrupt number, the second encodes
the trigger flags described in
bindings/interrupt-controller/interrupts.txt
+- interrupts: The interrupt outputs from the controller. There is one GPIO
+ interrupt per GPIO bank. The number of interrupts listed depends
+ on the number of GPIO banks on the SoC. The interrupts must be
+ ordered by bank, starting with bank 0.
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices, including the meaning of the
@@ -180,6 +184,12 @@ Example:
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
uart2-default: uart2-default {
pinmux {
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
index 61ac75706cc9..04d16fb69eb7 100644
--- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
@@ -36,6 +36,8 @@ Optional properties:
- GENERIC_PINCONFIG: generic pinconfig options to use, bias-disable,
bias-pull-down, bias-pull-up, drive-open-drain, input-schmitt-enable,
input-debounce, output-low, output-high.
+- atmel,drive-strength: 0 or 1 for low drive, 2 for medium drive and 3 for
+high drive. The default value is low drive.
Example:
@@ -66,6 +68,7 @@ Example:
pinmux = <PIN_PB0>,
<PIN_PB5>;
bias-pull-up;
+ atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
};
pinctrl_sdmmc1_default: sdmmc1_default {
diff --git a/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
index f8fa28ce163e..0a2d5516e1f3 100644
--- a/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/berlin,pinctrl.txt
@@ -23,7 +23,8 @@ Required properties:
"marvell,berlin2q-system-pinctrl",
"marvell,berlin4ct-avio-pinctrl",
"marvell,berlin4ct-soc-pinctrl",
- "marvell,berlin4ct-system-pinctrl"
+ "marvell,berlin4ct-system-pinctrl",
+ "syna,as370-soc-pinctrl"
Required subnode-properties:
- groups: a list of strings describing the group names.
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt
new file mode 100644
index 000000000000..66de75090458
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt
@@ -0,0 +1,36 @@
+* Freescale IMX8MQ IOMUX Controller
+
+Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+for common binding part and usage.
+
+Required properties:
+- compatible: "fsl,imx8mq-iomuxc"
+- reg: should contain the base physical address and size of the iomuxc
+ registers.
+
+Required properties in sub-nodes:
+- fsl,pins: each entry consists of 6 integers and represents the mux and config
+ setting for one pin. The first 5 integers <mux_reg conf_reg input_reg mux_val
+ input_val> are specified using a PIN_FUNC_ID macro, which can be found in
+ imx8mq-pinfunc.h under device tree source folder. The last integer CONFIG is
+ the pad setting value like pull-up on this pin. Please refer to i.MX8M Quad
+ Reference Manual for detailed CONFIG settings.
+
+Examples:
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+};
+
+iomuxc: pinctrl@30330000 {
+ compatible = "fsl,imx8mq-iomuxc";
+ reg = <0x0 0x30330000 0x0 0x10000>;
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x49
+ MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX 0x49
+ >;
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt
index ecb5c0d25218..f4d06bb0b55a 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-pinmux.txt
@@ -17,7 +17,7 @@ Tegra124 adds the following optional properties for pin configuration subnodes.
The macros for options are defined in the
include/dt-binding/pinctrl/pinctrl-tegra.h.
- nvidia,enable-input: Integer. Enable the pin's input path.
- enable :TEGRA_PIN_ENABLE0 and
+ enable :TEGRA_PIN_ENABLE and
disable or output only: TEGRA_PIN_DISABLE.
- nvidia,open-drain: Integer.
enable: TEGRA_PIN_ENABLE.
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt
index a62d82d5fbe9..85f211436b8e 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt
@@ -44,7 +44,7 @@ Optional subnode-properties:
- nvidia,tristate: Integer.
0: drive, 1: tristate.
- nvidia,enable-input: Integer. Enable the pin's input path.
- enable :TEGRA_PIN_ENABLE0 and
+ enable :TEGRA_PIN_ENABLE and
disable or output only: TEGRA_PIN_DISABLE.
- nvidia,open-drain: Integer.
enable: TEGRA_PIN_ENABLE.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
index def8fcad8941..3b695131c51b 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
@@ -16,8 +16,6 @@ If the property interrupt-controller is defined, following property is required
- reg-names: A string describing the "reg" entries. Must contain "eint".
- interrupts : The interrupt output from the controller.
- #interrupt-cells: Should be two.
-- interrupt-parent: Phandle of the interrupt parent to which the external
- GPIO interrupts are forwarded to.
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices, including the meaning of the
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt
index bf76867168e9..4023bad2fe39 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-sx150x.txt
@@ -25,8 +25,6 @@ Required properties:
- gpio-controller: Marks the device as a GPIO controller.
Optional properties :
-- interrupt-parent: phandle of the parent interrupt controller.
-
- interrupts: Interrupt specifier for the controllers interrupt.
- interrupt-controller: Marks the device as a interrupt controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
index a752a4716486..c2dbb3e8d840 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
@@ -10,6 +10,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -67,6 +72,7 @@ Example:
pinctrl-names = "default";
pinctrl-0 = <&gsbi5_uart_default>;
+ gpio-ranges = <&msmgpio 0 0 90>;
gsbi5_uart_default: gsbi5_uart_default {
mux {
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
index c4ea61ac56f2..68e93d5b7ede 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
@@ -40,6 +40,14 @@ MSM8960 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -154,6 +162,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 147>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 208 0>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
index 93374f478b9e..991be0cd0948 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
@@ -13,6 +13,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -64,6 +69,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 100>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 208 0>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
index 6e88e91feb11..7ed56a1b70fc 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
@@ -10,6 +10,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -67,6 +72,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinmux 0 0 69>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 32 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt
index 407b9443629d..6dd72f8599e9 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt
@@ -40,6 +40,14 @@ IPQ8074 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -148,6 +156,7 @@ Example:
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 70>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt
index 1b52f01ddcb7..86ecdcfc4fb8 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.txt
@@ -40,6 +40,14 @@ MDM9615 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -127,6 +135,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 88>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 16 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt
index df9a838ec5f9..cdc4787e59d2 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt
@@ -10,6 +10,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -62,6 +67,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 173>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 16 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt
index 498caff6029e..195a7a0ef0cc 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.txt
@@ -40,6 +40,14 @@ MSM8916 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -162,6 +170,7 @@ Example:
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 122>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
index eb8d8aa41f20..5034eb6653c7 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
@@ -40,6 +40,14 @@ MSM8960 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -156,6 +164,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 152>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 16 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
index 453bd7c76d6b..c22e6c425d0b 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
@@ -10,6 +10,11 @@ Required properties:
- #gpio-cells : Should be two.
The first cell is the gpio pin number and the
second cell is used for optional parameters.
+- gpio-ranges: see ../gpio/gpio.txt
+
+Optional properties:
+
+- gpio-reserved-ranges: see ../gpio/gpio.txt
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -87,6 +92,7 @@ Example:
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 146>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <0 208 0>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt
index 13cd629f896e..f15443f6e78e 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.txt
@@ -42,6 +42,14 @@ MSM8994 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -160,6 +168,7 @@ Example:
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&msmgpio 0 0 146>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt
index aaf01e929eea..fa97f609fe45 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt
@@ -40,6 +40,14 @@ MSM8996 platform.
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -180,6 +188,7 @@ Example:
reg = <0x01010000 0x300000>;
interrupts = <0 208 0>;
gpio-controller;
+ gpio-ranges = <&tlmm 0 0 150>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index 5c25fcb29fb5..ffd4345415f3 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -7,6 +7,7 @@ PMIC's from Qualcomm.
Usage: required
Value type: <string>
Definition: must be one of:
+ "qcom,pm8005-gpio"
"qcom,pm8018-gpio"
"qcom,pm8038-gpio"
"qcom,pm8058-gpio"
@@ -15,7 +16,7 @@ PMIC's from Qualcomm.
"qcom,pm8921-gpio"
"qcom,pm8941-gpio"
"qcom,pm8994-gpio"
- "qcom,pmi8994-gpio"
+ "qcom,pm8998-gpio"
"qcom,pma8084-gpio"
"qcom,pmi8994-gpio"
@@ -78,6 +79,7 @@ to specify in a pin configuration subnode:
Value type: <string-array>
Definition: List of gpio pins affected by the properties specified in
this subnode. Valid pins are:
+ gpio1-gpio4 for pm8005
gpio1-gpio6 for pm8018
gpio1-gpio12 for pm8038
gpio1-gpio40 for pm8058
@@ -86,7 +88,7 @@ to specify in a pin configuration subnode:
gpio1-gpio44 for pm8921
gpio1-gpio36 for pm8941
gpio1-gpio22 for pm8994
- gpio1-gpio10 for pmi8994
+ gpio1-gpio26 for pm8998
gpio1-gpio22 for pma8084
gpio1-gpio10 for pmi8994
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index 5e00a21de2bf..70659c917bdc 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -124,8 +124,6 @@ used as system wakeup events.
A. External GPIO Interrupts: For supporting external gpio interrupts, the
following properties should be specified in the pin-controller device node.
- - interrupt-parent: phandle of the interrupt parent to which the external
- GPIO interrupts are forwarded to.
- interrupts: interrupt specifier for the controller. The format and value of
the interrupt specifier depends on the interrupt parent for the controller.
@@ -145,8 +143,13 @@ A. External GPIO Interrupts: For supporting external gpio interrupts, the
B. External Wakeup Interrupts: For supporting external wakeup interrupts, a
child node representing the external wakeup interrupt controller should be
- included in the pin-controller device node. This child node should include
- the following properties.
+ included in the pin-controller device node.
+
+ Only one pin-controller device node can include external wakeup interrupts
+ child node (in other words, only one External Wakeup Interrupts
+ pin-controller is supported).
+
+ This child node should include following properties:
- compatible: identifies the type of the external wakeup interrupt controller
The possible values are:
@@ -156,12 +159,12 @@ B. External Wakeup Interrupts: For supporting external wakeup interrupts, a
found on Samsung S3C2412 and S3C2413 SoCs,
- samsung,s3c64xx-wakeup-eint: represents wakeup interrupt controller
found on Samsung S3C64xx SoCs,
+ - samsung,s5pv210-wakeup-eint: represents wakeup interrupt controller
+ found on Samsung S5Pv210 SoCs,
- samsung,exynos4210-wakeup-eint: represents wakeup interrupt controller
found on Samsung Exynos4210 and S5PC110/S5PV210 SoCs.
- samsung,exynos7-wakeup-eint: represents wakeup interrupt controller
found on Samsung Exynos7 SoC.
- - interrupt-parent: phandle of the interrupt parent to which the external
- wakeup interrupts are forwarded to.
- interrupts: interrupt used by multiplexed wakeup interrupts.
In addition, following properties must be present in node of every bank
@@ -181,8 +184,6 @@ B. External Wakeup Interrupts: For supporting external wakeup interrupts, a
Node of every bank of pins supporting direct wake-up interrupts (without
multiplexing) must contain following properties:
- - interrupt-parent: phandle of the interrupt parent to which the external
- wakeup interrupts are forwarded to.
- interrupts: interrupts of the interrupt parent which are used for external
wakeup interrupts from pins of the bank, must contain interrupts for all
pins of the bank.
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
index 9a06e1fdbc42..ef4f2ff4a1aa 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
@@ -37,11 +37,10 @@ Required properties:
Optional properties:
- reset: : Reference to the reset controller
- - interrupt-parent: phandle of the interrupt parent to which the external
- GPIO interrupts are forwarded to.
- - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
- which includes IRQ mux selection register, and the offset of the IRQ mux
- selection register.
+ - st,syscfg: Should be phandle/offset/mask.
+ -The phandle to the syscon node which includes IRQ mux selection register.
+ -The offset of the IRQ mux selection register
+ -The field mask of IRQ mux, needed if different of 0xf.
- gpio-ranges: Define a dedicated mapping between a pin-controller and
a gpio controller. Format is <&phandle a b c> with:
-(phandle): phandle of pin-controller.
@@ -55,6 +54,8 @@ Optional properties:
NOTE: If "gpio-ranges" is used for a gpio controller, all gpio-controller
have to use a "gpio-ranges" entry.
More details in Documentation/devicetree/bindings/gpio/gpio.txt.
+ - st,bank-ioport: should correspond to the EXTI IOport selection (EXTI line
+ used to select GPIOs as interrupts).
Example 1:
#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 7dec508987c7..8f8b25a24b8f 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -114,18 +114,26 @@ Required properties:
- power-domains : A list of PM domain specifiers, as defined by bindings of
the power controller that is the PM domain provider.
+Optional properties:
+ - power-domain-names : A list of power domain name strings sorted in the same
+ order as the power-domains property. Consumers drivers will use
+ power-domain-names to match power domains with power-domains
+ specifiers.
+
Example:
leaky-device@12350000 {
compatible = "foo,i-leak-current";
reg = <0x12350000 0x1000>;
power-domains = <&power 0>;
+ power-domain-names = "io";
};
leaky-device@12351000 {
compatible = "foo,i-leak-current";
reg = <0x12351000 0x1000>;
power-domains = <&power 0>, <&power 1> ;
+ power-domain-names = "io", "clk";
};
The first example above defines a typical PM domain consumer device, which is
diff --git a/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt b/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt
index b86ecada4f84..c7dfb7cecf40 100644
--- a/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/act8945a-charger.txt
@@ -9,8 +9,6 @@ Required properties:
- interrupts: <a b> where a is the interrupt number and b is a
field that represents an encoding of the sense and level
information for the interrupt.
- - interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
- active-semi,input-voltage-threshold-microvolt: unit: mV;
diff --git a/Documentation/devicetree/bindings/power/supply/bq24257.txt b/Documentation/devicetree/bindings/power/supply/bq24257.txt
index d693702c9c1e..f8f5a1685bb9 100644
--- a/Documentation/devicetree/bindings/power/supply/bq24257.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq24257.txt
@@ -6,8 +6,6 @@ Required properties:
* "ti,bq24251"
* "ti,bq24257"
- reg: integer, i2c address of the device.
-- interrupt-parent: Should be the phandle for the interrupt controller. Use in
- conjunction with "interrupts".
- interrupts: Interrupt mapping for GPIO IRQ (configure for both edges). Use in
conjunction with "interrupt-parent".
- ti,battery-regulation-voltage: integer, maximum charging voltage in uV.
diff --git a/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt b/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt
index 2246bc5c874b..0355a4b68f79 100644
--- a/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/lp8727_charger.txt
@@ -5,7 +5,6 @@ Required properties:
- reg: I2C slave address 27h
Optional properties:
-- interrupt-parent: interrupt controller node (see interrupt binding[0])
- interrupts: interrupt specifier (see interrupt binding[0])
- debounce-ms: interrupt debounce time. (u32)
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt b/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt
index d6e8dfd0a581..f956247d493e 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt
@@ -3,7 +3,6 @@ Maxim MAX14656 / AL32 USB Charger Detector
Required properties :
- compatible : "maxim,max14656";
- reg: i2c slave address
-- interrupt-parent: the phandle for the interrupt controller
- interrupts: interrupt line
Example:
diff --git a/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt b/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt
index 5d9ad5cf2c5a..1e6107c7578e 100644
--- a/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/rt9455_charger.txt
@@ -4,8 +4,6 @@ Required properties:
- compatible: it should contain one of the following:
"richtek,rt9455".
- reg: integer, i2c address of the device.
-- interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
- interrupts: interrupt mapping for GPIO IRQ, it should be
configured with IRQ_TYPE_LEVEL_LOW flag.
- richtek,output-charge-current: integer, output current from the charger to the
diff --git a/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt b/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt
index a3719623a94f..84e74151eef2 100644
--- a/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/sbs_sbs-charger.txt
@@ -7,8 +7,6 @@ Required properties:
specific registers.
Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller. Use in
- conjunction with "interrupts".
- interrupts: Interrupt mapping for GPIO IRQ. Use in conjunction with
"interrupt-parent". If an interrupt is not provided the driver will switch
automatically to polling.
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/akebono.txt b/Documentation/devicetree/bindings/powerpc/4xx/akebono.txt
index db939210e29d..940fd78e3363 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/akebono.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/akebono.txt
@@ -19,7 +19,6 @@ The IBM Akebono board is a development board for the PPC476GTR SoC.
- compatible : should be "ibm,476gtr-sdhci","generic-sdhci".
- reg : should contain the SDHCI registers location and length.
- - interrupt-parent : a phandle for the interrupt controller.
- interrupts : should contain the SDHCI interrupt.
1.b) The Advanced Host Controller Interface (AHCI) SATA node
@@ -30,7 +29,6 @@ The IBM Akebono board is a development board for the PPC476GTR SoC.
- compatible : should be "ibm,476gtr-ahci".
- reg : should contain the AHCI registers location and length.
- - interrupt-parent : a phandle for the interrupt controller.
- interrupts : should contain the AHCI interrupt.
1.c) The FPGA node
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/hsta.txt b/Documentation/devicetree/bindings/powerpc/4xx/hsta.txt
index c737c8338705..66dbd9ff56f7 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/hsta.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/hsta.txt
@@ -13,7 +13,6 @@ device tree entries:
Require properties:
- compatible : "ibm,476gtr-hsta-msi", "ibm,hsta-msi"
- reg : register mapping for the HSTA MSI space
-- interrupt-parent : parent controller for mapping interrupts
- interrupts : ordered interrupt mapping for each MSI in the register
space. The first interrupt should be associated with a
register offset of 0x00, the second to 0x10, etc.
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
index 515ebcf1b97d..de6a5f7d4aa4 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
@@ -38,7 +38,6 @@ DMA devices.
2 sources: DMAx CS FIFO Needs Service IRQ (on UIC0)
and DMA Error IRQ (on UIC1). The latter is common
for both DMA engines>.
- - interrupt-parent : needed for interrupt mapping
Example:
@@ -65,7 +64,6 @@ DMA devices.
- compatible : "amcc,xor-accelerator";
- reg : <registers mapping>
- interrupts : <interrupt mapping for XOR interrupt source>
- - interrupt-parent : for interrupt mapping
Example:
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt b/Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt
index 18a88100af94..4b01e1afafda 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/dcsr.txt
@@ -84,13 +84,6 @@ PROPERTIES
Interrupt numbers are listed in order (perfmon, event0, event1).
- - interrupt-parent
- Usage: required
- Value type: <phandle>
- Definition: A single <phandle> value that points
- to the interrupt parent to which the child domain
- is being mapped. Value must be "&mpic"
-
- reg
Usage: required
Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/diu.txt b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt
index b66cb6d31d69..eb45db1ecee5 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/diu.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt
@@ -8,8 +8,6 @@ Required properties:
- reg : should contain at least address and length of the DIU register
set.
- interrupts : one DIU interrupt should be described here.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
- edid : verbatim EDID data block describing attached display.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
index 7fc1b010fa75..c11ad5c6db21 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
@@ -13,7 +13,6 @@ Required properties:
DMA channels and the address space of the DMA controller
- cell-index : controller index. 0 for controller @ 0x8100
- interrupts : interrupt specifier for DMA IRQ
-- interrupt-parent : optional, if needed for interrupt mapping
- DMA channel nodes:
- compatible : must include "fsl,elo-dma-channel"
@@ -25,7 +24,6 @@ Optional properties:
- interrupts : interrupt specifier for DMA channel IRQ
(on 83xx this is expected to be identical to
the interrupts property of the parent node)
- - interrupt-parent : optional, if needed for interrupt mapping
Example:
dma@82a8 {
@@ -88,7 +86,6 @@ Required properties:
- cell-index : DMA channel index starts at 0.
- reg : DMA channel specific registers
- interrupts : interrupt specifier for DMA channel IRQ
- - interrupt-parent : optional, if needed for interrupt mapping
Example:
dma@21300 {
@@ -146,7 +143,6 @@ Required properties:
- compatible : must include "fsl,eloplus-dma-channel"
- reg : DMA channel specific registers
- interrupts : interrupt specifier for DMA channel IRQ
- - interrupt-parent : optional, if needed for interrupt mapping
Example:
dma@100300 {
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
index f514f29c67d6..76dc547bc492 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
@@ -57,8 +57,4 @@ PROPERTIES
Usage: required
Value type: <prop-encoded-array>
- - interrupt-parent
- Usage: required
- Value type: <phandle>
-
=====================================================================
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
index 4ceda9b3b413..a5dae6b1f545 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
@@ -57,8 +57,4 @@ PROPERTIES
Usage: required
Value type: <prop-encoded-array>
- - interrupt-parent
- Usage: required
- Value type: <phandle>
-
=====================================================================
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
index 647817527c88..5dfd68f1a423 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
@@ -18,8 +18,6 @@ Required properties :
- interrupts : <a b> where a is the interrupt number of the
PSC FIFO Controller and b is a field that represents an
encoding of the sense and level information for the interrupt.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Recommended properties :
- fsl,rx-fifo-size : the size of the RX fifo slice (a multiple of 4)
@@ -45,8 +43,6 @@ Required properties :
- interrupts : <a b> where a is the interrupt number of the
PSC FIFO Controller and b is a field that represents an
encoding of the sense and level information for the interrupt.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Recommended properties :
- clocks : specifies the clock needed to operate the fifo controller
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
index 82dd5b65cf48..f8d2b7fe06d6 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
@@ -21,11 +21,6 @@ Required properties:
be set as edge sensitive. If msi-available-ranges is present, only
the interrupts that correspond to available ranges shall be present.
-- interrupt-parent: the phandle for the interrupt controller
- that services interrupts for this device. for 83xx cpu, the interrupts
- are routed to IPIC, and for 85xx/86xx cpu the interrupts are routed
- to MPIC.
-
Optional properties:
- msi-available-ranges: use <start count> style section to define which
msi interrupt can be used in the 256 msi interrupts. This property is
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/pamu.txt b/Documentation/devicetree/bindings/powerpc/fsl/pamu.txt
index c2b2899885f2..b21ab85de6e7 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/pamu.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/pamu.txt
@@ -32,8 +32,6 @@ Optional properties:
A standard property. It represents the CCSR registers of
all child PAMUs combined. Include it to provide support
for legacy drivers.
-- interrupt-parent : <phandle>
- Phandle to interrupt controller
- fsl,portid-mapping : <u32>
The Coherency Subdomain ID Port Mapping Registers and
Snoop ID Port Mapping registers, which are part of the
diff --git a/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
index a3dc4b9fa11a..c4d78f28d23c 100644
--- a/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
+++ b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
@@ -148,7 +148,6 @@ Nintendo Wii device tree
- reg : should contain the controller registers location and length
- interrupt-controller
- interrupts : should contain the cascade interrupt of the "flipper" pic
- - interrupt-parent: should contain the phandle of the "flipper" pic
1.l) The General Purpose I/O (GPIO) controller node
diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
index 0f569d8e73a3..c5d0e7998e2b 100644
--- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
+++ b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
@@ -2,7 +2,8 @@
General Properties:
- - compatible Should be "fsl,etsec-ptp"
+ - compatible Should be "fsl,etsec-ptp" for eTSEC
+ Should be "fsl,fman-ptp-timer" for DPAA FMan
- reg Offset and length of the register set for the device
- interrupts There should be at least two interrupts. Some devices
have as many as four PTP related interrupts.
@@ -43,14 +44,22 @@ Clock Properties:
value, which will be directly written in those bits, that is why,
according to reference manual, the next clock sources can be used:
+ For eTSEC,
<0> - external high precision timer reference clock (TSEC_TMR_CLK
input is used for this purpose);
<1> - eTSEC system clock;
<2> - eTSEC1 transmit clock;
<3> - RTC clock input.
- When this attribute is not used, eTSEC system clock will serve as
- IEEE 1588 timer reference clock.
+ For DPAA FMan,
+ <0> - external high precision timer reference clock (TMR_1588_CLK)
+ <1> - MAC system clock (1/2 FMan clock)
+ <2> - reserved
+ <3> - RTC clock oscillator
+
+ When this attribute is not used, the IEEE 1588 timer reference clock
+ will use the eTSEC system clock (for Gianfar) or the MAC system
+ clock (for DPAA).
Example:
diff --git a/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt b/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
index 675f4437ce92..36f5e2f5cc0f 100644
--- a/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
@@ -5,6 +5,7 @@ Requires node properties:
- "compatible" value one of:
"motorola,cpcap-regulator"
"motorola,mapphone-cpcap-regulator"
+ "motorola,xoom-cpcap-regulator"
Required regulator properties:
- "regulator-name"
diff --git a/Documentation/devicetree/bindings/regulator/max8997-regulator.txt b/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
index 5c186a7a77ba..6fe825b8ac1b 100644
--- a/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
@@ -32,8 +32,6 @@ Required properties:
'max8997,pmic-buck[1/2/5]-dvs-voltage' should be specified.
Optional properties:
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the interrupts from max8997 are delivered to.
- interrupts: Interrupt specifiers for two interrupt sources.
- First interrupt specifier is for 'irq1' interrupt.
- Second interrupt specifier is for 'alert' interrupt.
diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
index 99872819604f..84bc76a7c39e 100644
--- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
+++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
@@ -18,7 +18,6 @@ Required properties:
ti,tps659038-pmic
and also the generic series names
ti,palmas-pmic
-- interrupt-parent : The parent interrupt controller which is palmas.
- interrupts : The interrupt number and the type which can be looked up here:
arch/arm/boot/dts/include/dt-bindings/interrupt-controller/irq.h
- interrupts-name: The names of the individual interrupts.
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index f0ada3b14d70..c7610718adff 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -1,9 +1,18 @@
PFUZE100 family of regulators
Required properties:
-- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000"
+- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000", "fsl,pfuze3001"
- reg: I2C slave address
+Optional properties:
+- fsl,pfuze-support-disable-sw: Boolean, if present disable all unused switch
+ regulators to save power consumption. Attention, ensure that all important
+ regulators (e.g. DDR ref, DDR supply) has set the "regulator-always-on"
+ property. If not present, the switched regualtors are always on and can't be
+ disabled. This binding is a workaround to keep backward compatibility with
+ old dtb's which rely on the fact that the switched regulators are always on
+ and don't mark them explicit as "regulator-always-on".
+
Required child node:
- regulators: This is the list of child nodes that specify the regulator
initialization data for defined regulators. Please refer to below doc
@@ -16,6 +25,8 @@ Required child node:
sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6,coin
--PFUZE3000
sw1a,sw1b,sw2,sw3,swbst,vsnvs,vrefddr,vldo1,vldo2,vccsd,v33,vldo3,vldo4
+ --PFUZE3001
+ sw1,sw2,sw3,vsnvs,vldo1,vldo2,vccsd,v33,vldo3,vldo4
Each regulator is defined using the standard binding for regulators.
@@ -303,3 +314,76 @@ Example 3: PFUZE3000
};
};
};
+
+Example 4: PFUZE 3001
+
+ pfuze3001: pmic@8 {
+ compatible = "fsl,pfuze3001";
+ reg = <0x08>;
+
+ regulators {
+ sw1_reg: sw1 {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3_reg: sw3 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1650000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen2_reg: vldo2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ regulator-always-on;
+ };
+
+ vgen3_reg: vccsd {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: v33 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
new file mode 100644
index 000000000000..7ef2dbe48e8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
@@ -0,0 +1,160 @@
+Qualcomm Technologies, Inc. RPMh Regulators
+
+rpmh-regulator devices support PMIC regulator management via the Voltage
+Regulator Manager (VRM) and Oscillator Buffer (XOB) RPMh accelerators. The APPS
+processor communicates with these hardware blocks via a Resource State
+Coordinator (RSC) using command packets. The VRM allows changing three
+parameters for a given regulator: enable state, output voltage, and operating
+mode. The XOB allows changing only a single parameter for a given regulator:
+its enable state. Despite its name, the XOB is capable of controlling the
+enable state of any PMIC peripheral. It is used for clock buffers, low-voltage
+switches, and LDO/SMPS regulators which have a fixed voltage and mode.
+
+=======================
+Required Node Structure
+=======================
+
+RPMh regulators must be described in two levels of device nodes. The first
+level describes the PMIC containing the regulators and must reside within an
+RPMh device node. The second level describes each regulator within the PMIC
+which is to be used on the board. Each of these regulators maps to a single
+RPMh resource.
+
+The names used for regulator nodes must match those supported by a given PMIC.
+Supported regulator node names:
+ PM8998: smps1 - smps13, ldo1 - ldo28, lvs1 - lvs2
+ PMI8998: bob
+ PM8005: smps1 - smps4
+
+========================
+First Level Nodes - PMIC
+========================
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must be one of: "qcom,pm8998-rpmh-regulators",
+ "qcom,pmi8998-rpmh-regulators" or
+ "qcom,pm8005-rpmh-regulators".
+
+- qcom,pmic-id
+ Usage: required
+ Value type: <string>
+ Definition: RPMh resource name suffix used for the regulators found on
+ this PMIC. Typical values: "a", "b", "c", "d", "e", "f".
+
+- vdd-s1-supply
+- vdd-s2-supply
+- vdd-s3-supply
+- vdd-s4-supply
+ Usage: optional (PM8998 and PM8005 only)
+ Value type: <phandle>
+ Definition: phandle of the parent supply regulator of one or more of the
+ regulators for this PMIC.
+
+- vdd-s5-supply
+- vdd-s6-supply
+- vdd-s7-supply
+- vdd-s8-supply
+- vdd-s9-supply
+- vdd-s10-supply
+- vdd-s11-supply
+- vdd-s12-supply
+- vdd-s13-supply
+- vdd-l1-l27-supply
+- vdd-l2-l8-l17-supply
+- vdd-l3-l11-supply
+- vdd-l4-l5-supply
+- vdd-l6-supply
+- vdd-l7-l12-l14-l15-supply
+- vdd-l9-supply
+- vdd-l10-l23-l25-supply
+- vdd-l13-l19-l21-supply
+- vdd-l16-l28-supply
+- vdd-l18-l22-supply
+- vdd-l20-l24-supply
+- vdd-l26-supply
+- vin-lvs-1-2-supply
+ Usage: optional (PM8998 only)
+ Value type: <phandle>
+ Definition: phandle of the parent supply regulator of one or more of the
+ regulators for this PMIC.
+
+- vdd-bob-supply
+ Usage: optional (PMI8998 only)
+ Value type: <phandle>
+ Definition: BOB regulator parent supply phandle
+
+===============================
+Second Level Nodes - Regulators
+===============================
+
+- qcom,always-wait-for-ack
+ Usage: optional
+ Value type: <empty>
+ Definition: Boolean flag which indicates that the application processor
+ must wait for an ACK or a NACK from RPMh for every request
+ sent for this regulator including those which are for a
+ strictly lower power state.
+
+Other properties defined in Documentation/devicetree/bindings/regulator.txt
+may also be used. regulator-initial-mode and regulator-allowed-modes may be
+specified for VRM regulators using mode values from
+include/dt-bindings/regulator/qcom,rpmh-regulator.h. regulator-allow-bypass
+may be specified for BOB type regulators managed via VRM.
+regulator-allow-set-load may be specified for LDO type regulators managed via
+VRM.
+
+========
+Examples
+========
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+&apps_rsc {
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-l7-l12-l14-l15-supply = <&pm8998_s5>;
+
+ smps2 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ pm8998_s5: smps5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2040000>;
+ };
+
+ ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_LPM
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ lvs1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ bob {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-allowed-modes =
+ <RPMH_REGULATOR_MODE_AUTO
+ RPMH_REGULATOR_MODE_HPM>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
index 4edf3137d9f7..76ead07072b1 100644
--- a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
@@ -1,13 +1,5 @@
ROHM BD71837 Power Management Integrated Circuit (PMIC) regulator bindings
-BD71837MWV is a programmable Power Management
-IC (PMIC) for powering single-core, dual-core, and
-quad-core SoC’s such as NXP-i.MX 8M. It is optimized
-for low BOM cost and compact solution footprint. It
-integrates 8 Buck regulators and 7 LDO’s to provide all
-the power rails required by the SoC and the commonly
-used peripherals.
-
Required properties:
- regulator-name: should be "buck1", ..., "buck8" and "ldo1", ..., "ldo7"
diff --git a/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt b/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt
new file mode 100644
index 000000000000..c9919f4b92d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt
@@ -0,0 +1,57 @@
+Socionext UniPhier Regulator Controller
+
+This describes the devicetree bindings for regulator controller implemented
+on Socionext UniPhier SoCs.
+
+USB3 Controller
+---------------
+
+This regulator controls VBUS and belongs to USB3 glue layer. Before using
+the regulator, it is necessary to control the clocks and resets to enable
+this layer. These clocks and resets should be described in each property.
+
+Required properties:
+- compatible: Should be
+ "socionext,uniphier-pro4-usb3-regulator" - for Pro4 SoC
+ "socionext,uniphier-pxs2-usb3-regulator" - for PXs2 SoC
+ "socionext,uniphier-ld20-usb3-regulator" - for LD20 SoC
+ "socionext,uniphier-pxs3-usb3-regulator" - for PXs3 SoC
+- reg: Specifies offset and length of the register set for the device.
+- clocks: A list of phandles to the clock gate for USB3 glue layer.
+ According to the clock-names, appropriate clocks are required.
+- clock-names: Should contain
+ "gio", "link" - for Pro4 SoC
+ "link" - for others
+- resets: A list of phandles to the reset control for USB3 glue layer.
+ According to the reset-names, appropriate resets are required.
+- reset-names: Should contain
+ "gio", "link" - for Pro4 SoC
+ "link" - for others
+
+See Documentation/devicetree/bindings/regulator/regulator.txt
+for more details about the regulator properties.
+
+Example:
+
+ usb-glue@65b00000 {
+ compatible = "socionext,uniphier-ld20-dwc3-glue",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65b00000 0x400>;
+
+ usb_vbus0: regulators@100 {
+ compatible = "socionext,uniphier-ld20-usb3-regulator";
+ reg = <0x100 0x10>;
+ clock-names = "link";
+ clocks = <&sys_clk 14>;
+ reset-names = "link";
+ resets = <&sys_rst 14>;
+ };
+
+ phy {
+ ...
+ phy-supply = <&usb_vbus0>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
index e44a97e21164..25f8658e216f 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
+++ b/Documentation/devicetree/bindings/remoteproc/ti,davinci-rproc.txt
@@ -45,12 +45,6 @@ The following are the mandatory properties:
per the bindings in
Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
-Optional properties:
---------------------
-- interrupt-parent: phandle to the interrupt controller node. This property
- is needed if the device node hierarchy doesn't have an
- interrupt controller.
-
Example:
--------
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
index 1eb72874130b..461dc1d8d570 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
+++ b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
@@ -51,12 +51,6 @@ The following are the mandatory properties:
Documentation/devicetree/bindings/reset/ti,sci-reset.txt
for 66AK2G SoCs
-- interrupt-parent: Should contain a phandle to the Keystone 2 IRQ controller
- IP node that is used by the ARM CorePac processor to
- receive interrupts from the DSP remote processors. See
- Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt
- for details.
-
- interrupts: Should contain an entry for each value in 'interrupt-names'.
Each entry should have the interrupt source number used by
the remote processor to the host processor. The values should
diff --git a/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt b/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
index 1d990bcc0baf..d946f28502b3 100644
--- a/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
+++ b/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
@@ -7,8 +7,6 @@ Required properties:
- compatible : should contain "brcm,brcmstb-waketimer"
- reg : the register start and length for the WKTMR block
- interrupts : The TIMER interrupt
-- interrupt-parent: The phandle to the Always-On (AON) Power Management (PM) L2
- interrupt controller node
- clocks : The phandle to the UPG fixed clock (27Mhz domain)
Example:
diff --git a/Documentation/devicetree/bindings/rtc/isil,isl12057.txt b/Documentation/devicetree/bindings/rtc/isil,isl12057.txt
index fbbdd92e5af9..ff7c43555199 100644
--- a/Documentation/devicetree/bindings/rtc/isil,isl12057.txt
+++ b/Documentation/devicetree/bindings/rtc/isil,isl12057.txt
@@ -25,9 +25,6 @@ Optional properties:
- "wakeup-source": mark the chip as a wakeup source, independently of
the availability of an IRQ line connected to the SoC.
- - "interrupt-parent", "interrupts": for passing the interrupt line
- of the SoC connected to IRQ#2 of the RTC chip.
-
Example isl12057 node without IRQ#2 pin connected (no alarm support):
diff --git a/Documentation/devicetree/bindings/rtc/rtc-cmos.txt b/Documentation/devicetree/bindings/rtc/rtc-cmos.txt
index 7382989b3052..b94b35f3600b 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-cmos.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-cmos.txt
@@ -7,7 +7,6 @@ Required properties:
Optional properties:
- interrupts : should contain interrupt.
- - interrupt-parent : interrupt source phandle.
- ctrl-reg : Contains the initial value of the control register also
called "Register B".
- freq-reg : Contains the initial value of the frequency register also
diff --git a/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt b/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
index d28d6e7f6ae8..226cc93df875 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
@@ -21,7 +21,6 @@ Required properties:
- reg: I2C bus address of the device
Optional properties:
-- interrupt-parent: phandle for the interrupt controller.
- interrupts: rtc alarm interrupt.
- clock-output-names: From common clock binding to override the default output
clock name
diff --git a/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt b/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
index 717d93860af1..c746cb221210 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
@@ -16,7 +16,6 @@ Required properties:
- reg: I2C bus address of the device
Optional properties:
-- interrupt-parent: phandle for the interrupt controller.
- interrupts: rtc alarm interrupt.
- clock-output-names: From common clock binding to override the default output
clock name
diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
index bee41f97044e..062ebb14cecf 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-omap.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
@@ -11,7 +11,6 @@ Required properties:
- "ti,am4372-rtc" - for RTC IP used similar to that on AM437X SoC family.
- reg: Address range of rtc register set
- interrupts: rtc timer, alarm interrupts in order
-- interrupt-parent: phandle for the interrupt controller
Optional properties:
- system-power-controller: whether the rtc is controlling the system power
diff --git a/Documentation/devicetree/bindings/rtc/rtc-palmas.txt b/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
index eb1c7fdeb413..c6cf37758a77 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
@@ -3,7 +3,6 @@ Palmas RTC controller bindings
Required properties:
- compatible:
- "ti,palmas-rtc" for palma series of the RTC controller
-- interrupt-parent: Parent interrupt device, must be handle of palmas node.
- interrupts: Interrupt number of RTC submodule on device.
Optional properties:
diff --git a/Documentation/devicetree/bindings/rtc/spear-rtc.txt b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
index ca67ac62108e..fecf8e4ad4b4 100644
--- a/Documentation/devicetree/bindings/rtc/spear-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible : "st,spear600-rtc"
- reg : Address range of the rtc registers
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupt: Should contain the rtc interrupt number
Example:
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
index 7c170da0d4b7..1f5754299d31 100644
--- a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
@@ -3,7 +3,6 @@ Spreadtrum SC27xx Real Time Clock
Required properties:
- compatible: should be "sprd,sc2731-rtc".
- reg: address offset of rtc register.
-- interrupt-parent: phandle for the interrupt controller.
- interrupts: rtc alarm interrupt.
Example:
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
index c920e2736991..130ca5b98253 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.txt
@@ -13,8 +13,6 @@ Required properties:
It is required on stm32(h7/mp1).
- clock-names: must be "rtc_ck" and "pclk".
It is required on stm32(h7/mp1).
-- interrupt-parent: phandle for the interrupt controller.
- It is required on stm32(f4/f7/h7).
- interrupts: rtc alarm interrupt. On stm32mp1, a second interrupt is required
for rtc alarm wakeup interrupt.
- st,syscfg: phandle/offset/mask triplet. The phandle to pwrcfg used to
diff --git a/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt b/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
index 3ebeb311335f..e615a897b20e 100644
--- a/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
+++ b/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
@@ -3,7 +3,6 @@ ST-Ericsson COH 901 331 Real Time Clock
Required properties:
- compatible: must be "stericsson,coh901331"
- reg: address range of rtc register set.
-- interrupt-parent: phandle for the interrupt controller.
- interrupts: rtc alarm interrupt.
- clocks: phandle to the rtc clock source
diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt
index 6a4e0d30d8c4..0dc121b6eace 100644
--- a/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt
+++ b/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt
@@ -6,7 +6,6 @@ Required properties:
- reg: address on the bus
Optional ST33ZP24 Properties:
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- lpcpd-gpios: Output GPIO pin used for ST33ZP24 power management D1/D2 state.
If set, power must be present when the platform is going into sleep/hibernate mode.
diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt
index 604dce901b60..37198971f17b 100644
--- a/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt
+++ b/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt
@@ -5,7 +5,6 @@ Required properties:
- spi-max-frequency: Maximum SPI frequency (<= 10000000).
Optional ST33ZP24 Properties:
-- interrupt-parent: phandle for the interrupt gpio controller
- interrupts: GPIO interrupt to which the chip is connected
- lpcpd-gpios: Output GPIO pin used for ST33ZP24 power management D1/D2 state.
If set, power must be present when the platform is going into sleep/hibernate mode.
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt b/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
index 41d740545189..7c6304426da1 100644
--- a/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
+++ b/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
@@ -13,7 +13,7 @@ Required properties:
"tcg,tpm-tis-mmio". Valid chip strings are:
* "atmel,at97sc3204"
- reg: The location of the MMIO registers, should be at least 0x5000 bytes
-- interrupt-parent/interrupts: An optional interrupt indicating command completion.
+- interrupts: An optional interrupt indicating command completion.
Example:
diff --git a/Documentation/devicetree/bindings/serial/maxim,max310x.txt b/Documentation/devicetree/bindings/serial/maxim,max310x.txt
index 823f77dd7978..79e10a05a96a 100644
--- a/Documentation/devicetree/bindings/serial/maxim,max310x.txt
+++ b/Documentation/devicetree/bindings/serial/maxim,max310x.txt
@@ -7,8 +7,6 @@ Required properties:
- "maxim,max3109" for Maxim MAX3109,
- "maxim,max14830" for Maxim MAX14830.
- reg: SPI chip select number.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this IC.
- interrupts: Specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
index fbfe53635a3a..e7921a8e276b 100644
--- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
@@ -10,8 +10,6 @@ Required properties:
- "nxp,sc16is760" for NXP SC16IS760,
- "nxp,sc16is762" for NXP SC16IS762.
- reg: I2C address of the SC16IS7xx device.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this IC.
- interrupts: Should contain the UART interrupt
- clocks: Reference to the IC source clock.
@@ -44,8 +42,6 @@ Required properties:
- "nxp,sc16is760" for NXP SC16IS760,
- "nxp,sc16is762" for NXP SC16IS762.
- reg: SPI chip select number.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this IC.
- interrupts: Specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
index c5e032c85bf9..7d65126bd1d7 100644
--- a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
@@ -7,9 +7,6 @@ Required properties:
- reg: Specifies the physical base address of the controller and
the length of the memory mapped region.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this device.
-
- interrupts: Specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt
index 626e1afa64a6..cce3cd71e85a 100644
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt
@@ -21,7 +21,6 @@ Optional properties:
one as described by the fsl,cpm1-gpio-irq-mask property. There should be as
many interrupts as number of ones in the mask property. The first interrupt in
the list corresponds to the most significant bit of the mask.
-- interrupt-parent : Parent for the above interrupt property.
Example of four SOC GPIO banks defined as gpio-controller nodes:
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt
index e47734bee3f0..5efb7ac94c79 100644
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt
@@ -11,8 +11,6 @@ Required properties:
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- pio-handle : The phandle for the Parallel I/O port configuration.
- port-number : for UART drivers, the port number to use, between 0 and 3.
This usually corresponds to the /dev/ttyQE device, e.g. <0> = /dev/ttyQE0.
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt
index 9ccd5f30405b..da13999337a4 100644
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt
@@ -6,7 +6,6 @@ Required properties:
length, the next two two cells should contain PRAM location and
length.
- interrupts : should contain USB interrupt.
-- interrupt-parent : interrupt source phandle.
- fsl,fullspeed-clock : specifies the full speed USB clock source:
"none": clock source is disabled
"brg1" through "brg16": clock source is BRG1-BRG16, respectively
diff --git a/Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt b/Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt
new file mode 100644
index 000000000000..9b86d1eff219
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt
@@ -0,0 +1,137 @@
+RPMH RSC:
+------------
+
+Resource Power Manager Hardened (RPMH) is the mechanism for communicating with
+the hardened resource accelerators on Qualcomm SoCs. Requests to the resources
+can be written to the Trigger Command Set (TCS) registers and using a (addr,
+val) pair and triggered. Messages in the TCS are then sent in sequence over an
+internal bus.
+
+The hardware block (Direct Resource Voter or DRV) is a part of the h/w entity
+(Resource State Coordinator a.k.a RSC) that can handle multiple sleep and
+active/wake resource requests. Multiple such DRVs can exist in a SoC and can
+be written to from Linux. The structure of each DRV follows the same template
+with a few variations that are captured by the properties here.
+
+A TCS may be triggered from Linux or triggered by the F/W after all the CPUs
+have powered off to facilitate idle power saving. TCS could be classified as -
+
+ ACTIVE /* Triggered by Linux */
+ SLEEP /* Triggered by F/W */
+ WAKE /* Triggered by F/W */
+ CONTROL /* Triggered by F/W */
+
+The order in which they are described in the DT, should match the hardware
+configuration.
+
+Requests can be made for the state of a resource, when the subsystem is active
+or idle. When all subsystems like Modem, GPU, CPU are idle, the resource state
+will be an aggregate of the sleep votes from each of those subsystems. Clients
+may request a sleep value for their shared resources in addition to the active
+mode requests.
+
+Properties:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,rpmh-rsc".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: The first register specifies the base address of the
+ DRV(s). The number of DRVs in the dependent on the RSC.
+ The tcs-offset specifies the start address of the
+ TCS in the DRVs.
+
+- reg-names:
+ Usage: required
+ Value type: <string>
+ Definition: Maps the register specified in the reg property. Must be
+ "drv-0", "drv-1", "drv-2" etc and "tcs-offset". The
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-interrupt>
+ Definition: The interrupt that trips when a message complete/response
+ is received for this DRV from the accelerators.
+
+- qcom,drv-id:
+ Usage: required
+ Value type: <u32>
+ Definition: The id of the DRV in the RSC block that will be used by
+ this controller.
+
+- qcom,tcs-config:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: The tuple defining the configuration of TCS.
+ Must have 2 cells which describe each TCS type.
+ <type number_of_tcs>.
+ The order of the TCS must match the hardware
+ configuration.
+ - Cell #1 (TCS Type): TCS types to be specified -
+ ACTIVE_TCS
+ SLEEP_TCS
+ WAKE_TCS
+ CONTROL_TCS
+ - Cell #2 (Number of TCS): <u32>
+
+- label:
+ Usage: optional
+ Value type: <string>
+ Definition: Name for the RSC. The name would be used in trace logs.
+
+Drivers that want to use the RSC to communicate with RPMH must specify their
+bindings as child nodes of the RSC controllers they wish to communicate with.
+
+Example 1:
+
+For a TCS whose RSC base address is is 0x179C0000 and is at a DRV id of 2, the
+register offsets for DRV2 start at 0D00, the register calculations are like
+this -
+DRV0: 0x179C0000
+DRV2: 0x179C0000 + 0x10000 = 0x179D0000
+DRV2: 0x179C0000 + 0x10000 * 2 = 0x179E0000
+TCS-OFFSET: 0xD00
+
+ apps_rsc: rsc@179c0000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x179c0000 0x10000>,
+ <0x179d0000 0x10000>,
+ <0x179e0000 0x10000>;
+ reg-names = "drv-0", "drv-1", "drv-2";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,tcs-offset = <0xd00>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 2>,
+ <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <CONTROL_TCS 1>;
+ };
+
+Example 2:
+
+For a TCS whose RSC base address is 0xAF20000 and is at DRV id of 0, the
+register offsets for DRV0 start at 01C00, the register calculations are like
+this -
+DRV0: 0xAF20000
+TCS-OFFSET: 0x1C00
+
+ disp_rsc: rsc@af20000 {
+ label = "disp_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0xaf20000 0x10000>;
+ reg-names = "drv-0";
+ interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,tcs-offset = <0x1c00>;
+ qcom,drv-id = <0>;
+ qcom,tcs-config = <ACTIVE_TCS 0>,
+ <SLEEP_TCS 1>,
+ <WAKE_TCS 1>,
+ <CONTROL_TCS 0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/ac97-bus.txt b/Documentation/devicetree/bindings/sound/ac97-bus.txt
new file mode 100644
index 000000000000..103c428f2595
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ac97-bus.txt
@@ -0,0 +1,32 @@
+Generic AC97 Device Properties
+
+This documents describes the devicetree bindings for an ac97 controller child
+node describing ac97 codecs.
+
+Required properties:
+-compatible : Must be "ac97,vendor_id1,vendor_id2
+ The ids shall be the 4 characters hexadecimal encoding, such as
+ given by "%04x" formatting of printf
+-reg : Must be the ac97 codec number, between 0 and 3
+
+Example:
+ac97: sound@40500000 {
+ compatible = "marvell,pxa270-ac97";
+ reg = < 0x40500000 0x1000 >;
+ interrupts = <14>;
+ reset-gpios = <&gpio 95 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = < &pinctrl_ac97_default >;
+ clocks = <&clks CLK_AC97>, <&clks CLK_AC97CONF>;
+ clock-names = "AC97CLK", "AC97CONFCLK";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ audio-codec@0 {
+ reg = <0>;
+ compatible = "ac97,574d,4c13";
+ clocks = <&fixed_wm9713_clock>;
+ clock-names = "ac97_clk";
+ }
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt
new file mode 100644
index 000000000000..3dfc2515e5c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt
@@ -0,0 +1,23 @@
+* Amlogic Audio FIFO controllers
+
+Required properties:
+- compatible: 'amlogic,axg-toddr' or
+ 'amlogic,axg-frddr'
+- reg: physical base address of the controller and length of memory
+ mapped region.
+- interrupts: interrupt specifier for the fifo.
+- clocks: phandle to the fifo peripheral clock provided by the audio
+ clock controller.
+- resets: phandle to memory ARB line provided by the arb reset controller.
+- #sound-dai-cells: must be 0.
+
+Example of FRDDR A on the A113 SoC:
+
+frddr_a: audio-controller@1c0 {
+ compatible = "amlogic,axg-frddr";
+ reg = <0x0 0x1c0 0x0 0x1c>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_audio AUD_CLKID_FRDDR_A>;
+ resets = <&arb AXG_ARB_FRDDR_A>;
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.txt
new file mode 100644
index 000000000000..80b411296480
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-sound-card.txt
@@ -0,0 +1,124 @@
+Amlogic AXG sound card:
+
+Required properties:
+
+- compatible: "amlogic,axg-sound-card"
+- model : User specified audio sound card name, one string
+
+Optional properties:
+
+- audio-aux-devs : List of phandles pointing to auxiliary devices
+- audio-widgets : Please refer to widgets.txt.
+- audio-routing : A list of the connections between audio components.
+
+Subnodes:
+
+- dai-link: Container for dai-link level properties and the CODEC
+ sub-nodes. There should be at least one (and probably more)
+ subnode of this type.
+
+Required dai-link properties:
+
+- sound-dai: phandle and port of the CPU DAI.
+
+Required TDM Backend dai-link properties:
+- dai-format : CPU/CODEC common audio format
+
+Optional TDM Backend dai-link properties:
+- dai-tdm-slot-rx-mask-{0,1,2,3}: Receive direction slot masks
+- dai-tdm-slot-tx-mask-{0,1,2,3}: Transmit direction slot masks
+ When omitted, mask is assumed to have to no
+ slots. A valid must have at one slot, so at
+ least one these mask should be provided with
+ an enabled slot.
+- dai-tdm-slot-num : Please refer to tdm-slot.txt.
+ If omitted, slot number is set to accommodate the largest
+ mask provided.
+- dai-tdm-slot-width : Please refer to tdm-slot.txt. default to 32 if omitted.
+- mclk-fs : Multiplication factor between stream rate and mclk
+
+Backend dai-link subnodes:
+
+- codec: dai-link representing backend links should have at least one subnode.
+ One subnode for each codec of the dai-link.
+ dai-link representing frontend links have no codec, therefore have no
+ subnodes
+
+Required codec subnodes properties:
+
+- sound-dai: phandle and port of the CODEC DAI.
+
+Optional codec subnodes properties:
+
+- dai-tdm-slot-tx-mask : Please refer to tdm-slot.txt.
+- dai-tdm-slot-rx-mask : Please refer to tdm-slot.txt.
+
+Example:
+
+sound {
+ compatible = "amlogic,axg-sound-card";
+ model = "AXG-S420";
+ audio-aux-devs = <&tdmin_a>, <&tdmout_c>;
+ audio-widgets = "Line", "Lineout",
+ "Line", "Linein",
+ "Speaker", "Speaker1 Left",
+ "Speaker", "Speaker1 Right";
+ "Speaker", "Speaker2 Left",
+ "Speaker", "Speaker2 Right";
+ audio-routing = "TDMOUT_C IN 0", "FRDDR_A OUT 2",
+ "SPDIFOUT IN 0", "FRDDR_A OUT 3",
+ "TDM_C Playback", "TDMOUT_C OUT",
+ "TDMIN_A IN 2", "TDM_C Capture",
+ "TDMIN_A IN 5", "TDM_C Loopback",
+ "TODDR_A IN 0", "TDMIN_A OUT",
+ "Lineout", "Lineout AOUTL",
+ "Lineout", "Lineout AOUTR",
+ "Speaker1 Left", "SPK1 OUT_A",
+ "Speaker2 Left", "SPK2 OUT_A",
+ "Speaker1 Right", "SPK1 OUT_B",
+ "Speaker2 Right", "SPK2 OUT_B",
+ "Linein AINL", "Linein",
+ "Linein AINR", "Linein";
+
+ dai-link@0 {
+ sound-dai = <&frddr_a>;
+ };
+
+ dai-link@1 {
+ sound-dai = <&toddr_a>;
+ };
+
+ dai-link@2 {
+ sound-dai = <&tdmif_c>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-2 = <1 1>;
+ dai-tdm-slot-tx-mask-3 = <1 1>;
+ dai-tdm-slot-rx-mask-1 = <1 1>;
+ mclk-fs = <256>;
+
+ codec@0 {
+ sound-dai = <&lineout>;
+ };
+
+ codec@1 {
+ sound-dai = <&speaker_amp1>;
+ };
+
+ codec@2 {
+ sound-dai = <&speaker_amp2>;
+ };
+
+ codec@3 {
+ sound-dai = <&linein>;
+ };
+
+ };
+
+ dai-link@3 {
+ sound-dai = <&spdifout>;
+
+ codec {
+ sound-dai = <&spdif_dit>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt
new file mode 100644
index 000000000000..521c38ad89e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt
@@ -0,0 +1,20 @@
+* Amlogic Audio SPDIF Output
+
+Required properties:
+- compatible: 'amlogic,axg-spdifout'
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "pclk" : peripheral clock.
+ * "mclk" : master clock
+- #sound-dai-cells: must be 0.
+
+Example on the A113 SoC:
+
+spdifout: audio-controller@480 {
+ compatible = "amlogic,axg-spdifout";
+ reg = <0x0 0x480 0x0 0x50>;
+ #sound-dai-cells = <0>;
+ clocks = <&clkc_audio AUD_CLKID_SPDIFOUT>,
+ <&clkc_audio AUD_CLKID_SPDIFOUT_CLK>;
+ clock-names = "pclk", "mclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt
new file mode 100644
index 000000000000..1c1b7490554e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt
@@ -0,0 +1,28 @@
+* Amlogic Audio TDM formatters
+
+Required properties:
+- compatible: 'amlogic,axg-tdmin' or
+ 'amlogic,axg-tdmout'
+- reg: physical base address of the controller and length of memory
+ mapped region.
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "pclk" : peripheral clock.
+ * "sclk" : bit clock.
+ * "sclk_sel" : bit clock input multiplexer.
+ * "lrclk" : sample clock
+ * "lrclk_sel": sample clock input multiplexer
+
+Example of TDMOUT_A on the A113 SoC:
+
+tdmout_a: audio-controller@500 {
+ compatible = "amlogic,axg-tdmout";
+ reg = <0x0 0x500 0x0 0x40>;
+ clocks = <&clkc_audio AUD_CLKID_TDMOUT_A>,
+ <&clkc_audio AUD_CLKID_TDMOUT_A_SCLK>,
+ <&clkc_audio AUD_CLKID_TDMOUT_A_SCLK_SEL>,
+ <&clkc_audio AUD_CLKID_TDMOUT_A_LRCLK>,
+ <&clkc_audio AUD_CLKID_TDMOUT_A_LRCLK>;
+ clock-names = "pclk", "sclk", "sclk_sel",
+ "lrclk", "lrclk_sel";
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-iface.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-iface.txt
new file mode 100644
index 000000000000..cabfb26a5f22
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-iface.txt
@@ -0,0 +1,22 @@
+* Amlogic Audio TDM Interfaces
+
+Required properties:
+- compatible: 'amlogic,axg-tdm-iface'
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "sclk" : bit clock.
+ * "lrclk": sample clock
+ * "mclk" : master clock
+ -> optional if the interface is in clock slave mode.
+- #sound-dai-cells: must be 0.
+
+Example of TDM_A on the A113 SoC:
+
+tdmif_a: audio-controller@0 {
+ compatible = "amlogic,axg-tdm-iface";
+ #sound-dai-cells = <0>;
+ clocks = <&clkc_audio AUD_CLKID_MST_A_MCLK>,
+ <&clkc_audio AUD_CLKID_MST_A_SCLK>,
+ <&clkc_audio AUD_CLKID_MST_A_LRCLK>;
+ clock-names = "mclk", "sclk", "lrclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/atmel-i2s.txt b/Documentation/devicetree/bindings/sound/atmel-i2s.txt
index 735368b8a73f..40549f496a81 100644
--- a/Documentation/devicetree/bindings/sound/atmel-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/atmel-i2s.txt
@@ -15,7 +15,6 @@ Required properties:
- clock-names: Should be one of each entry matching the clocks phandles list:
- "pclk" (peripheral clock) Required.
- "gclk" (generated clock) Optional (1).
- - "aclk" (Audio PLL clock) Optional (1).
- "muxclk" (I2S mux clock) Optional (1).
Optional properties:
@@ -23,9 +22,9 @@ Optional properties:
- princtrl-names: Should contain only one value - "default".
-(1) : Only the peripheral clock is required. The generated clock, the Audio
- PLL clock adn the I2S mux clock are optional and should only be set
- together, when Master Mode is required.
+(1) : Only the peripheral clock is required. The generated clock and the I2S
+ mux clock are optional and should only be set together, when Master Mode
+ is required.
Example:
@@ -40,8 +39,8 @@ Example:
(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
AT91_XDMAC_DT_PERID(32))>;
dma-names = "tx", "rx";
- clocks = <&i2s0_clk>, <&i2s0_gclk>, <&audio_pll_pmc>, <&i2s0muxck>;
- clock-names = "pclk", "gclk", "aclk", "muxclk";
+ clocks = <&i2s0_clk>, <&i2s0_gclk>, <&i2s0muxck>;
+ clock-names = "pclk", "gclk", "muxclk";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2s0_default>;
};
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
index d04ea3b1a1dd..7e63e53a901c 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-card.txt
+++ b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
@@ -18,6 +18,8 @@ Below are same as Simple-Card.
- bitclock-inversion
- frame-inversion
- mclk-fs
+- hp-det-gpio
+- mic-det-gpio
- dai-tdm-slot-num
- dai-tdm-slot-width
- clocks / system-clock-frequency
diff --git a/Documentation/devicetree/bindings/sound/cs35l33.txt b/Documentation/devicetree/bindings/sound/cs35l33.txt
index acfb47525b49..dc5a355d1a19 100644
--- a/Documentation/devicetree/bindings/sound/cs35l33.txt
+++ b/Documentation/devicetree/bindings/sound/cs35l33.txt
@@ -14,8 +14,6 @@ Optional properties:
- reset-gpios : gpio used to reset the amplifier
- - interrupt-parent : Specifies the phandle of the interrupt controller to
- which the IRQs from CS35L33 are delivered to.
- interrupts : IRQ line info CS35L33.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
for further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/cs35l34.txt b/Documentation/devicetree/bindings/sound/cs35l34.txt
index b218ead2e68e..2f7606b7d542 100644
--- a/Documentation/devicetree/bindings/sound/cs35l34.txt
+++ b/Documentation/devicetree/bindings/sound/cs35l34.txt
@@ -21,8 +21,6 @@ Optional properties:
- reset-gpios: GPIO used to reset the amplifier.
- - interrupt-parent : Specifies the phandle of the interrupt controller to
- which the IRQs from CS35L34 are delivered to.
- interrupts : IRQ line info CS35L34.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
for further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/cs35l35.txt b/Documentation/devicetree/bindings/sound/cs35l35.txt
index 77ee75c39233..7915897f8a81 100644
--- a/Documentation/devicetree/bindings/sound/cs35l35.txt
+++ b/Documentation/devicetree/bindings/sound/cs35l35.txt
@@ -10,8 +10,6 @@ Required properties:
as covered in
Documentation/devicetree/bindings/regulator/regulator.txt.
- - interrupt-parent : Specifies the phandle of the interrupt controller to
- which the IRQs from CS35L35 are delivered to.
- interrupts : IRQ line info CS35L35.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
for further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/cs42l42.txt b/Documentation/devicetree/bindings/sound/cs42l42.txt
index 9a2c5e2423d5..7dfaa2ab906f 100644
--- a/Documentation/devicetree/bindings/sound/cs42l42.txt
+++ b/Documentation/devicetree/bindings/sound/cs42l42.txt
@@ -15,9 +15,6 @@ Optional properties:
- reset-gpios : a GPIO spec for the reset pin. If specified, it will be
deasserted before communication to the codec starts.
- - interrupt-parent : Specifies the phandle of the interrupt controller to
- which the IRQs from CS42L42 are delivered to.
-
- interrupts : IRQ line info CS42L42.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
for further information relating to interrupt properties)
@@ -107,4 +104,4 @@ cs42l42: cs42l42@48 {
cirrus,btn-det-event-dbnce = <10>;
cirrus,bias-lvls = <0x0F 0x08 0x04 0x01>;
cirrus,hs-bias-ramp-rate = <0x02>;
-}; \ No newline at end of file
+};
diff --git a/Documentation/devicetree/bindings/sound/da7218.txt b/Documentation/devicetree/bindings/sound/da7218.txt
index 3ab9dfef38d1..2cf30899bd0d 100644
--- a/Documentation/devicetree/bindings/sound/da7218.txt
+++ b/Documentation/devicetree/bindings/sound/da7218.txt
@@ -15,8 +15,6 @@ Required properties:
information relating to regulators)
Optional properties:
-- interrupt-parent: Specifies the phandle of the interrupt controller to which
- the IRQs from DA7218 are delivered to.
- interrupts: IRQ line info for DA7218 chip.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/da7219.txt b/Documentation/devicetree/bindings/sound/da7219.txt
index c3df92d31c4b..e9d0baeb94e2 100644
--- a/Documentation/devicetree/bindings/sound/da7219.txt
+++ b/Documentation/devicetree/bindings/sound/da7219.txt
@@ -8,8 +8,6 @@ Required properties:
- compatible : Should be "dlg,da7219"
- reg: Specifies the I2C slave address
-- interrupt-parent : Specifies the phandle of the interrupt controller to which
- the IRQs from DA7219 are delivered to.
- interrupts : IRQ line info for DA7219.
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
further information relating to interrupt properties)
diff --git a/Documentation/devicetree/bindings/sound/dioo,dio2125.txt b/Documentation/devicetree/bindings/sound/dioo,dio2125.txt
deleted file mode 100644
index 63dbfe0f11d0..000000000000
--- a/Documentation/devicetree/bindings/sound/dioo,dio2125.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-DIO2125 Audio Driver
-
-Required properties:
-- compatible : "dioo,dio2125"
-- enable-gpios : the gpio connected to the enable pin of the dio2125
-
-Example:
-
-amp: analog-amplifier {
- compatible = "dioo,dio2125";
- enable-gpios = <&gpio GPIOH_3 0>;
-};
diff --git a/Documentation/devicetree/bindings/sound/everest,es7134.txt b/Documentation/devicetree/bindings/sound/everest,es7134.txt
index 5495a3cb8b7b..091666069bde 100644
--- a/Documentation/devicetree/bindings/sound/everest,es7134.txt
+++ b/Documentation/devicetree/bindings/sound/everest,es7134.txt
@@ -1,10 +1,15 @@
ES7134 i2s DA converter
Required properties:
-- compatible : "everest,es7134" or "everest,es7144"
+- compatible : "everest,es7134" or
+ "everest,es7144" or
+ "everest,es7154"
+- VDD-supply : regulator phandle for the VDD supply
+- PVDD-supply: regulator phandle for the PVDD supply for the es7154
Example:
i2s_codec: external-codec {
compatible = "everest,es7134";
+ VDD-supply = <&vcc_5v>;
};
diff --git a/Documentation/devicetree/bindings/sound/everest,es7241.txt b/Documentation/devicetree/bindings/sound/everest,es7241.txt
new file mode 100644
index 000000000000..28f82cf4959f
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/everest,es7241.txt
@@ -0,0 +1,28 @@
+ES7241 i2s AD converter
+
+Required properties:
+- compatible : "everest,es7241"
+- VDDP-supply: regulator phandle for the VDDA supply
+- VDDA-supply: regulator phandle for the VDDP supply
+- VDDD-supply: regulator phandle for the VDDD supply
+
+Optional properties:
+- reset-gpios: gpio connected to the reset pin
+- m0-gpios : gpio connected to the m0 pin
+- m1-gpios : gpio connected to the m1 pin
+- everest,sdout-pull-down:
+ Format used by the serial interface is controlled by pulling
+ the sdout. If the sdout is pulled down, leftj format is used.
+ If this property is not provided, sdout is assumed to pulled
+ up and i2s format is used
+
+Example:
+
+linein: audio-codec@2 {
+ #sound-dai-cells = <0>;
+ compatible = "everest,es7241";
+ VDDA-supply = <&vcc_3v3>;
+ VDDP-supply = <&vcc_3v3>;
+ VDDD-supply = <&vcc_3v3>;
+ reset-gpios = <&gpio GPIOH_42>;
+};
diff --git a/Documentation/devicetree/bindings/sound/fsl,ssi.txt b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
index d415888e1316..7e15a85cecd2 100644
--- a/Documentation/devicetree/bindings/sound/fsl,ssi.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
@@ -18,8 +18,6 @@ Required properties:
encoded based on the information in section 2)
depending on the type of interrupt controller you
have.
-- interrupt-parent: The phandle for the interrupt controller that
- services interrupts for this device.
- fsl,fifo-depth: The number of elements in the transmit and receive FIFOs.
This number is the maximum allowed value for SFCSR[TFWM0].
- clocks: "ipg" - Required clock for the SSI unit
diff --git a/Documentation/devicetree/bindings/sound/marvell,pxa2xx-ac97.txt b/Documentation/devicetree/bindings/sound/marvell,pxa2xx-ac97.txt
new file mode 100644
index 000000000000..2ea85d5be6a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/marvell,pxa2xx-ac97.txt
@@ -0,0 +1,27 @@
+Marvell PXA2xx audio complex
+
+This descriptions matches the AC97 controller found in pxa2xx and pxa3xx series.
+
+Required properties:
+ - compatible: should be one of the following:
+ "marvell,pxa250-ac97"
+ "marvell,pxa270-ac97"
+ "marvell,pxa300-ac97"
+ - reg: device MMIO address space
+ - interrupts: single interrupt generated by AC97 IP
+ - clocks: input clock of the AC97 IP, refer to clock-bindings.txt
+
+Optional properties:
+ - pinctrl-names, pinctrl-0: refer to pinctrl-bindings.txt
+ - reset-gpios: gpio used for AC97 reset, refer to gpio.txt
+
+Example:
+ ac97: sound@40500000 {
+ compatible = "marvell,pxa250-ac97";
+ reg = < 0x40500000 0x1000 >;
+ interrupts = <14>;
+ reset-gpios = <&gpio 113 GPIO_ACTIVE_HIGH>;
+ #sound-dai-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = < &pmux_ac97_default >;
+ };
diff --git a/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt b/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt
index 74c9ba6c2823..93b982e9419f 100644
--- a/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt
+++ b/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt
@@ -5,6 +5,14 @@ Required properties:
compatible Must be "mrvl,pxa-ssp-dai"
port A phandle reference to a PXA ssp upstream device
+Optional properties:
+
+ clock-names
+ clocks Through "clock-names" and "clocks", external clocks
+ can be configured. If a clock names "extclk" exists,
+ it will be set to the mclk rate of the audio stream
+ and be used as clock provider of the DAI.
+
Example:
/* upstream device */
diff --git a/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt b/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt
deleted file mode 100644
index 551fbb8348c2..000000000000
--- a/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-DT bindings for ARM PXA2xx PCM platform driver
-
-This is just a dummy driver that registers the PXA ASoC platform driver.
-It does not have any resources assigned.
-
-Required properties:
-
- - compatible 'mrvl,pxa-pcm-audio'
-
-Example:
-
- pxa_pcm_audio: snd_soc_pxa_audio {
- compatible = "mrvl,pxa-pcm-audio";
- };
-
diff --git a/Documentation/devicetree/bindings/sound/name-prefix.txt b/Documentation/devicetree/bindings/sound/name-prefix.txt
new file mode 100644
index 000000000000..645775908657
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/name-prefix.txt
@@ -0,0 +1,24 @@
+Name prefix:
+
+Card implementing the routing property define the connection between
+audio components as list of string pair. Component using the same
+sink/source names may use the name prefix property to prepend the
+name of their sinks/sources with the provided string.
+
+Optional name prefix property:
+- sound-name-prefix : string using as prefix for the sink/source names of
+ the component.
+
+Example: Two instances of the same component.
+
+amp0: analog-amplifier@0 {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio GPIOH_3 0>;
+ sound-name-prefix = "FRONT";
+};
+
+amp1: analog-amplifier@1 {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio GPIOH_4 0>;
+ sound-name-prefix = "BACK";
+};
diff --git a/Documentation/devicetree/bindings/sound/omap-dmic.txt b/Documentation/devicetree/bindings/sound/omap-dmic.txt
index fd8105f18978..418e30e72e89 100644
--- a/Documentation/devicetree/bindings/sound/omap-dmic.txt
+++ b/Documentation/devicetree/bindings/sound/omap-dmic.txt
@@ -6,7 +6,6 @@ Required properties:
<MPU access base address, size>,
<L3 interconnect address, size>;
- interrupts: Interrupt number for DMIC
-- interrupt-parent: The parent interrupt controller
- ti,hwmods: Name of the hwmod associated with OMAP dmic IP
Example:
diff --git a/Documentation/devicetree/bindings/sound/omap-mcbsp.txt b/Documentation/devicetree/bindings/sound/omap-mcbsp.txt
index 17cce4490456..ae8bf703ce7a 100644
--- a/Documentation/devicetree/bindings/sound/omap-mcbsp.txt
+++ b/Documentation/devicetree/bindings/sound/omap-mcbsp.txt
@@ -15,7 +15,6 @@ Required properties:
<TX irq>,
<RX irq>;
- interrupt-names: Array of strings associated with the interrupt numbers
-- interrupt-parent: The parent interrupt controller
- ti,buffer-size: Size of the FIFO on the port (OMAP2430 and newer SoC)
- ti,hwmods: Name of the hwmod associated to the McBSP port
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
index 0741dff048dd..5f4e68ca228c 100644
--- a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
+++ b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
@@ -6,7 +6,6 @@ Required properties:
<MPU access base address, size>,
<L3 interconnect address, size>;
- interrupts: Interrupt number for McPDM
-- interrupt-parent: The parent interrupt controller
- ti,hwmods: Name of the hwmod associated to the McPDM
Example:
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt
index c7600a93ab39..c814e867850f 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt
@@ -7,7 +7,7 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio.
Value type: <stringlist>
Definition: must be "qcom,apq8096-sndcard"
-- qcom,audio-routing:
+- audio-routing:
Usage: Optional
Value type: <stringlist>
Definition: A list of the connections between audio components.
@@ -49,6 +49,12 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio.
"DMIC1"
"DMIC2"
"DMIC3"
+
+- model:
+ Usage: required
+ Value type: <stringlist>
+ Definition: The user-visible name of this sound card.
+
= dailinks
Each subnode of sndcard represents either a dailink, and subnodes of each
dailinks would be cpu/codec/platform dais.
@@ -79,11 +85,16 @@ dailinks would be cpu/codec/platform dais.
Value type: <phandle with arguments>
Definition: dai phandle/s and port of CPU/CODEC/PLATFORM node.
+Obsolete:
+ qcom,model: String for soundcard name (Use model instead)
+ qcom,audio-routing: A list of the connections between audio components.
+ (Use audio-routing instead)
+
Example:
audio {
compatible = "qcom,apq8096-sndcard";
- qcom,model = "DB820c";
+ model = "DB820c";
mm1-dai-link {
link-name = "MultiMedia1";
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
index 551ecab67efe..fdcea3d12ee5 100644
--- a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
@@ -7,7 +7,6 @@ Bindings for codec Analog IP which is integrated in pmic pm8916,
Required properties
- compatible = "qcom,pm8916-wcd-analog-codec";
- reg: represents the slave base address provided to the peripheral.
- - interrupt-parent : The parent interrupt controller.
- interrupts: List of interrupts in given SPMI peripheral.
- interrupt-names: Names specified to above list of interrupts in same
order. List of supported interrupt names are:
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6adm.txt b/Documentation/devicetree/bindings/sound/qcom,q6adm.txt
index cb709e5dbc44..bbae426cdfb1 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6adm.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6adm.txt
@@ -18,6 +18,11 @@ used by the apr service device.
= ADM routing
"routing" subnode of the ADM node represents adm routing specific configuration
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,q6adm-routing".
+
- #sound-dai-cells
Usage: required
Value type: <u32>
@@ -28,6 +33,7 @@ q6adm@8 {
compatible = "qcom,q6adm";
reg = <APR_SVC_ADM>;
q6routing: routing {
+ compatible = "qcom,q6adm-routing";
#sound-dai-cells = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
index bdbf87df8c0b..a8179409c194 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
@@ -17,6 +17,11 @@ used by all apr services. Must contain the following properties.
subnode of "dais" representing board specific dai setup.
"dais" node should have following properties followed by dai children.
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,q6afe-dais"
+
- #sound-dai-cells
Usage: required
Value type: <u32>
@@ -100,6 +105,7 @@ q6afe@4 {
reg = <APR_SVC_AFE>;
dais {
+ compatible = "qcom,q6afe-dais";
#sound-dai-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
index 2178eb91146f..f9c7bd8c1bc0 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
@@ -17,6 +17,11 @@ used by the apr service device.
= ASM DAIs (Digial Audio Interface)
"dais" subnode of the ASM node represents dai specific configuration
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,q6asm-dais".
+
- #sound-dai-cells
Usage: required
Value type: <u32>
@@ -28,6 +33,7 @@ q6asm@7 {
compatible = "qcom,q6asm";
reg = <APR_SVC_ASM>;
q6asmdai: dais {
+ compatible = "qcom,q6asm-dais";
#sound-dai-cells = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,sdm845.txt b/Documentation/devicetree/bindings/sound/qcom,sdm845.txt
new file mode 100644
index 000000000000..408c4837e6d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,sdm845.txt
@@ -0,0 +1,80 @@
+* Qualcomm Technologies Inc. SDM845 ASoC sound card driver
+
+This binding describes the SDM845 sound card, which uses qdsp for audio.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,sdm845-sndcard"
+
+- audio-routing:
+ Usage: Optional
+ Value type: <stringlist>
+ Definition: A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, MicBias
+ of codec and the jacks on the board.
+
+- model:
+ Usage: required
+ Value type: <stringlist>
+ Definition: The user-visible name of this sound card.
+
+= dailinks
+Each subnode of sndcard represents either a dailink, and subnodes of each
+dailinks would be cpu/codec/platform dais.
+
+- link-name:
+ Usage: required
+ Value type: <string>
+ Definition: User friendly name for dai link
+
+= CPU, PLATFORM, CODEC dais subnodes
+- cpu:
+ Usage: required
+ Value type: <subnode>
+ Definition: cpu dai sub-node
+
+- codec:
+ Usage: required
+ Value type: <subnode>
+ Definition: codec dai sub-node
+
+- platform:
+ Usage: Optional
+ Value type: <subnode>
+ Definition: platform dai sub-node
+
+- sound-dai:
+ Usage: required
+ Value type: <phandle>
+ Definition: dai phandle/s and port of CPU/CODEC/PLATFORM node.
+
+Example:
+
+audio {
+ compatible = "qcom,sdm845-sndcard";
+ model = "sdm845-snd-card";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pri_mi2s_active &pri_mi2s_ws_active>;
+ pinctrl-1 = <&pri_mi2s_sleep &pri_mi2s_ws_sleep>;
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ pri-mi2s-dai-link {
+ link-name = "PRI MI2S Playback";
+ cpu {
+ sound-dai = <&q6afedai PRIMARY_MI2S_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
new file mode 100644
index 000000000000..1d8d49e30af7
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
@@ -0,0 +1,123 @@
+QCOM WCD9335 Codec
+
+Qualcomm WCD9335 Codec is a standalone Hi-Fi audio codec IC, supports
+Qualcomm Technologies, Inc. (QTI) multimedia solutions, including
+the MSM8996, MSM8976, and MSM8956 chipsets. It has in-built
+Soundwire controller, interrupt mux. It supports both I2S/I2C and
+SLIMbus audio interfaces.
+
+Required properties with SLIMbus Interface:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: For SLIMbus interface it should be "slimMID,PID",
+ textual representation of Manufacturer ID, Product Code,
+ shall be in lower case hexadecimal with leading zeroes
+ suppressed. Refer to slimbus/bus.txt for details.
+ Should be:
+ "slim217,1a0" for MSM8996 and APQ8096 SoCs with SLIMbus.
+
+- reg
+ Usage: required
+ Value type: <u32 u32>
+ Definition: Should be ('Device index', 'Instance ID')
+
+- interrupts
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Interrupts via WCD INTR1 and INTR2 pins
+
+- interrupt-names:
+ Usage: required
+ Value type: <String array>
+ Definition: Interrupt names of WCD INTR1 and INTR2
+ Should be: "intr1", "intr2"
+
+- reset-gpio:
+ Usage: required
+ Value type: <String Array>
+ Definition: Reset gpio line
+
+- qcom,ifd:
+ Usage: required
+ Value type: <phandle>
+ Definition: SLIM interface device
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: See clock-bindings.txt section "consumers". List of
+ three clock specifiers for mclk, mclk2 and slimbus clock.
+
+- clock-names:
+ Usage: required
+ Value type: <string>
+ Definition: Must contain "mclk", "mclk2" and "slimbus" strings.
+
+- vdd-buck-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V buck supply
+
+- vdd-buck-sido-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V SIDO buck supply
+
+- vdd-rx-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V rx supply
+
+- vdd-tx-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V tx supply
+
+- vdd-vbat-supply:
+ Usage: Optional
+ Value type: <phandle>
+ Definition: Should contain a reference to the vbat supply
+
+- vdd-micbias-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the micbias supply
+
+- vdd-io-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain a reference to the 1.8V io supply
+
+- interrupt-controller:
+ Usage: required
+ Definition: Indicating that this is a interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <int>
+ Definition: should be 1
+
+#sound-dai-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+codec@1{
+ compatible = "slim217,1a0";
+ reg = <1 0>;
+ interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "intr2"
+ reset-gpio = <&msmgpio 64 0>;
+ qcom,ifd = <&wc9335_ifd>;
+ clock-names = "mclk", "native";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>,
+ <&rpmcc RPM_SMD_BB_CLK1>;
+ vdd-buck-supply = <&pm8994_s4>;
+ vdd-rx-supply = <&pm8994_s4>;
+ vdd-buck-sido-supply = <&pm8994_s4>;
+ vdd-tx-supply = <&pm8994_s4>;
+ vdd-io-supply = <&pm8994_s4>;
+ #sound-dai-cells = <1>;
+}
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index b86d790f630f..9e764270c36b 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -352,6 +352,7 @@ Required properties:
- "renesas,rcar_sound-r8a7794" (R-Car E2)
- "renesas,rcar_sound-r8a7795" (R-Car H3)
- "renesas,rcar_sound-r8a7796" (R-Car M3-W)
+ - "renesas,rcar_sound-r8a77965" (R-Car M3-N)
- reg : Should contain the register physical address.
required register is
SRU/ADG/SSI if generation1
diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
index b208a752576c..54aefab71f2c 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
@@ -7,6 +7,7 @@ Required properties:
- compatible: should be one of the following:
- "rockchip,rk3066-i2s": for rk3066
+ - "rockchip,px30-i2s", "rockchip,rk3066-i2s": for px30
- "rockchip,rk3036-i2s", "rockchip,rk3066-i2s": for rk3036
- "rockchip,rk3188-i2s", "rockchip,rk3066-i2s": for rk3188
- "rockchip,rk3228-i2s", "rockchip,rk3066-i2s": for rk3228
diff --git a/Documentation/devicetree/bindings/sound/rt5514.txt b/Documentation/devicetree/bindings/sound/rt5514.txt
index b25ed08c7a5a..d2cc171f22f2 100644
--- a/Documentation/devicetree/bindings/sound/rt5514.txt
+++ b/Documentation/devicetree/bindings/sound/rt5514.txt
@@ -14,7 +14,6 @@ Optional properties:
- clocks: The phandle of the master clock to the CODEC
- clock-names: Should be "mclk"
-- interrupt-parent: The phandle for the interrupt controller.
- interrupts: The interrupt number to the cpu. The interrupt specifier format
depends on the interrupt controller.
diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt
new file mode 100644
index 000000000000..312e9a129530
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt5682.txt
@@ -0,0 +1,50 @@
+RT5682 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : "realtek,rt5682" or "realtek,rt5682i"
+
+- reg : The I2C address of the device.
+
+Optional properties:
+
+- interrupts : The CODEC's interrupt output.
+
+- realtek,dmic1-data-pin
+ 0: dmic1 is not used
+ 1: using GPIO2 pin as dmic1 data pin
+ 2: using GPIO5 pin as dmic1 data pin
+
+- realtek,dmic1-clk-pin
+ 0: using GPIO1 pin as dmic1 clock pin
+ 1: using GPIO3 pin as dmic1 clock pin
+
+- realtek,jd-src
+ 0: No JD is used
+ 1: using JD1 as JD source
+
+- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
+
+Pins on the device (for linking into audio routes) for RT5682:
+
+ * DMIC L1
+ * DMIC R1
+ * IN1P
+ * HPOL
+ * HPOR
+
+Example:
+
+rt5682 {
+ compatible = "realtek,rt5682i";
+ reg = <0x1a>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(U, 6) GPIO_ACTIVE_HIGH>;
+ realtek,ldo1-en-gpios =
+ <&gpio TEGRA_GPIO(R, 2) GPIO_ACTIVE_HIGH>;
+ realtek,dmic1-data-pin = <1>;
+ realtek,dmic1-clk-pin = <1>;
+ realtek,jd-src = <1>;
+};
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index 0f214457476f..9c58f724396a 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -17,7 +17,7 @@ Optional properties:
- VDDD-supply : the regulator provider of VDDD
-- micbias-resistor-k-ohms : the bias resistor to be used in kOmhs
+- micbias-resistor-k-ohms : the bias resistor to be used in kOhms
The resistor can take values of 2k, 4k or 8k.
If set to 0 it will be off.
If this node is not mentioned or if the value is unknown, then
diff --git a/Documentation/devicetree/bindings/sound/simple-amplifier.txt b/Documentation/devicetree/bindings/sound/simple-amplifier.txt
new file mode 100644
index 000000000000..8647edae7af0
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/simple-amplifier.txt
@@ -0,0 +1,12 @@
+Simple Amplifier Audio Driver
+
+Required properties:
+- compatible : "dioo,dio2125" or "simple-audio-amplifier"
+- enable-gpios : the gpio connected to the enable pin of the simple amplifier
+
+Example:
+
+amp: analog-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio GPIOH_3 0>;
+};
diff --git a/Documentation/devicetree/bindings/sound/tas571x.txt b/Documentation/devicetree/bindings/sound/tas571x.txt
index b4959f10b74b..7c8fd37c2f9e 100644
--- a/Documentation/devicetree/bindings/sound/tas571x.txt
+++ b/Documentation/devicetree/bindings/sound/tas571x.txt
@@ -7,6 +7,7 @@ powerdown (optional).
Required properties:
- compatible: should be one of the following:
+ - "ti,tas5707"
- "ti,tas5711",
- "ti,tas5717",
- "ti,tas5719",
diff --git a/Documentation/devicetree/bindings/sound/ts3a227e.txt b/Documentation/devicetree/bindings/sound/ts3a227e.txt
index a836881d9608..3ed8359144d3 100644
--- a/Documentation/devicetree/bindings/sound/ts3a227e.txt
+++ b/Documentation/devicetree/bindings/sound/ts3a227e.txt
@@ -10,7 +10,6 @@ Required properties:
- compatible: Should contain "ti,ts3a227e".
- reg: The i2c address. Should contain <0x3b>.
- - interrupt-parent: The parent interrupt controller
- interrupts: Interrupt number for /INT pin from the 227e
Optional properies:
diff --git a/Documentation/devicetree/bindings/sound/ux500-msp.txt b/Documentation/devicetree/bindings/sound/ux500-msp.txt
index 99acd9c774e1..7dd1b96160f5 100644
--- a/Documentation/devicetree/bindings/sound/ux500-msp.txt
+++ b/Documentation/devicetree/bindings/sound/ux500-msp.txt
@@ -6,7 +6,6 @@ Required properties:
Optional properties:
- interrupts : The interrupt output from the device.
- - interrupt-parent : The parent interrupt controller.
- <name>-supply : Phandle to the regulator <name> supply
Example:
diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt
index 4a9dead1b7d3..68cccc4653ba 100644
--- a/Documentation/devicetree/bindings/sound/wm8994.txt
+++ b/Documentation/devicetree/bindings/sound/wm8994.txt
@@ -26,7 +26,6 @@ Optional properties:
- interrupt-controller : These devices contain interrupt controllers
and may provide interrupt services to other devices if they have an
interrupt line connected.
- - interrupt-parent : The parent interrupt controller.
- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
The first cell is the IRQ number.
The second cell is the flags, encoded as the trigger masks from
@@ -57,6 +56,12 @@ Optional properties:
- wlf,ldoena-always-driven : If present LDOENA is always driven.
+ - wlf,spkmode-pu : If present enable the internal pull-up resistor on
+ the SPKMODE pin.
+
+ - wlf,csnaddr-pd : If present enable the internal pull-down resistor on
+ the CS/ADDR pin.
+
Example:
wm8994: codec@1a {
diff --git a/Documentation/devicetree/bindings/spi/fsl-spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt
index a2331372068c..8854004a1d3a 100644
--- a/Documentation/devicetree/bindings/spi/fsl-spi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-spi.txt
@@ -12,8 +12,6 @@ Required properties:
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
-- interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- clock-frequency : input clock frequency to non FSL_SOC cores
Optional properties:
diff --git a/Documentation/devicetree/bindings/spi/sh-hspi.txt b/Documentation/devicetree/bindings/spi/sh-hspi.txt
index 585fed90376e..b9d1e4d11a77 100644
--- a/Documentation/devicetree/bindings/spi/sh-hspi.txt
+++ b/Documentation/devicetree/bindings/spi/sh-hspi.txt
@@ -6,8 +6,6 @@ Required properties:
- "renesas,hspi-r8a7778" (R-Car M1)
- "renesas,hspi-r8a7779" (R-Car H1)
- reg : Offset and length of the register set for the device
-- interrupt-parent : The phandle for the interrupt controller that
- services interrupts for this device
- interrupts : Interrupt specifier
- #address-cells : Must be <1>
- #size-cells : Must be <0>
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index 39806329c193..bfbc2035fb6b 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -29,8 +29,6 @@ Required properties:
If two register sets are present, the first is to be
used by the CPU, and the second is to be used by the
DMA engine.
-- interrupt-parent : The phandle for the interrupt controller that
- services interrupts for this device
- interrupts : Interrupt specifier
- #address-cells : Must be <1>
- #size-cells : Must be <0>
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
index 204b311e0400..642d3fb1ef85 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
@@ -1,8 +1,10 @@
Synopsys DesignWare AMBA 2.0 Synchronous Serial Interface.
Required properties:
-- compatible : "snps,dw-apb-ssi"
-- reg : The register base for the controller.
+- compatible : "snps,dw-apb-ssi" or "mscc,<soc>-spi", where soc is "ocelot" or
+ "jaguar2"
+- reg : The register base for the controller. For "mscc,<soc>-spi", a second
+ register set is required (named ICPU_CFG:SPI_MST)
- interrupts : One interrupt, used by the controller.
- #address-cells : <1>, as required by generic SPI binding.
- #size-cells : <0>, also as required by generic SPI binding.
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.txt b/Documentation/devicetree/bindings/spi/spi-cadence.txt
index 94f09141a4f0..05a2ef945664 100644
--- a/Documentation/devicetree/bindings/spi/spi-cadence.txt
+++ b/Documentation/devicetree/bindings/spi/spi-cadence.txt
@@ -6,7 +6,6 @@ Required properties:
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
-- interrupt-parent : Must be core interrupt controller
- clock-names : List of input clock names - "ref_clk", "pclk"
(See clock bindings for details).
- clocks : Clock phandles (see clock bindings for details).
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
index 225ace1d0c65..4af132606b37 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible :
- "fsl,imx7ulp-spi" for LPSPI compatible with the one integrated on i.MX7ULP soc
- reg : address and length of the lpspi master registers
-- interrupt-parent : core interrupt controller
- interrupts : lpspi interrupt
- clocks : lpspi clock specifier
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.txt b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
index 6e3ffacbba32..a0edac12d8df 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
@@ -7,6 +7,7 @@ Required Properties:
- compatible: should be one of the following.
"rockchip,rv1108-spi" for rv1108 SoCs.
+ "rockchip,px30-spi", "rockchip,rk3066-spi" for px30 SoCs.
"rockchip,rk3036-spi" for rk3036 SoCS.
"rockchip,rk3066-spi" for rk3066 SoCs.
"rockchip,rk3188-spi" for rk3188 SoCs.
diff --git a/Documentation/devicetree/bindings/spi/spi-rspi.txt b/Documentation/devicetree/bindings/spi/spi-rspi.txt
index 3b02b3a7cfb2..96fd58548f69 100644
--- a/Documentation/devicetree/bindings/spi/spi-rspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rspi.txt
@@ -28,8 +28,6 @@ Required properties:
- "rx" for SPRI,
- "tx" to SPTI,
- "mux" for a single muxed interrupt.
-- interrupt-parent : The phandle for the interrupt controller that
- services interrupts for this device.
- num-cs : Number of chip selects. Some RSPI cores have more than 1.
- #address-cells : Must be <1>
- #size-cells : Must be <0>
diff --git a/Documentation/devicetree/bindings/spi/spi-uniphier.txt b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
new file mode 100644
index 000000000000..504a4ecfc7b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
@@ -0,0 +1,22 @@
+Socionext UniPhier SPI controller driver
+
+UniPhier SoCs have SCSSI which supports SPI single channel.
+
+Required properties:
+ - compatible: should be "socionext,uniphier-scssi"
+ - reg: address and length of the spi master registers
+ - #address-cells: must be <1>, see spi-bus.txt
+ - #size-cells: must be <0>, see spi-bus.txt
+ - clocks: A phandle to the clock for the device.
+ - resets: A phandle to the reset control for the device.
+
+Example:
+
+spi0: spi@54006000 {
+ compatible = "socionext,uniphier-scssi";
+ reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&peri_clk 11>;
+ resets = <&peri_rst 11>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
index 7bf61efc66c8..dc924a5f71db 100644
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -6,7 +6,6 @@ Required properties:
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
-- interrupt-parent : Must be core interrupt controller
Optional properties:
- xlnx,num-ss-bits : Number of chip selects used.
diff --git a/Documentation/devicetree/bindings/spi/spi-xlp.txt b/Documentation/devicetree/bindings/spi/spi-xlp.txt
index 40e82d51efec..f4925ec0ed33 100644
--- a/Documentation/devicetree/bindings/spi/spi-xlp.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xlp.txt
@@ -13,7 +13,6 @@ Required properties:
- reg : Should contain register location and length.
- clocks : Phandle of the spi clock
- interrupts : Interrupt number used by this controller.
-- interrupt-parent : Phandle of the parent interrupt controller.
SPI slave nodes must be children of the SPI master node and can contain
properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
index c8f50e5cf70b..0f6d37ff541c 100644
--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
@@ -6,7 +6,6 @@ Required properties:
- reg : Physical base address and size of GQSPI registers map.
- interrupts : Property with a value describing the interrupt
number.
-- interrupt-parent : Must be core interrupt controller.
- clock-names : List of input clock names - "ref_clk", "pclk"
(See clock bindings for details).
- clocks : Clock phandles (see clock bindings for details).
diff --git a/Documentation/devicetree/bindings/sram/sram.txt b/Documentation/devicetree/bindings/sram/sram.txt
index 267da4410aef..e98908bd4227 100644
--- a/Documentation/devicetree/bindings/sram/sram.txt
+++ b/Documentation/devicetree/bindings/sram/sram.txt
@@ -50,6 +50,8 @@ Optional properties in the area nodes:
manipulation of the page attributes.
- label : the name for the reserved partition, if omitted, the label
is taken from the node name excluding the unit address.
+- clocks : a list of phandle and clock specifier pair that controls the
+ single SRAM clock.
Example:
diff --git a/Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt b/Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt
index 02ea23a63f20..88bc94fe1f6d 100644
--- a/Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt
+++ b/Documentation/devicetree/bindings/staging/iio/adc/spear-adc.txt
@@ -3,8 +3,6 @@
Required properties:
- compatible: Should be "st,spear600-adc"
- reg: Address and length of the register set for the device
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the ADC interrupt
- sampling-frequency: Default sampling frequency
diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
index e0d013a2e66d..f3b441100890 100644
--- a/Documentation/devicetree/bindings/thermal/armada-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
@@ -10,6 +10,11 @@ Required properties:
* marvell,armada-ap806-thermal
* marvell,armada-cp110-thermal
+Note: these bindings are deprecated for AP806/CP110 and should instead
+follow the rules described in:
+Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
+Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
+
- reg: Device's register space.
Two entries are expected, see the examples below. The first one points
to the status register (4B). The second one points to the control
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
index 9d43553a8d39..43a9ed545944 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
@@ -7,7 +7,6 @@ Required properties:
- reg: address range for the AVS TMON registers
- interrupts: temperature monitor interrupt, for high/low threshold triggers
- interrupt-names: should be "tmon"
-- interrupt-parent: the parent interrupt controller
Example:
diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
index ad648d93d961..33004ce7e5df 100644
--- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
@@ -13,7 +13,6 @@
Exynos5420 (Must pass triminfo base and triminfo clock)
"samsung,exynos5433-tmu"
"samsung,exynos7-tmu"
-- interrupt-parent : The phandle for the interrupt controller
- reg : Address range of the thermal registers. For soc's which has multiple
instances of TMU and some registers are shared across all TMU's like
interrupt related then 2 set of register has to supplied. First set
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
index 06195e8f35e2..1d9e8cf61018 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
@@ -1,18 +1,28 @@
* QCOM SoC Temperature Sensor (TSENS)
Required properties:
-- compatible :
- - "qcom,msm8916-tsens" : For 8916 Family of SoCs
- - "qcom,msm8974-tsens" : For 8974 Family of SoCs
- - "qcom,msm8996-tsens" : For 8996 Family of SoCs
+- compatible:
+ Must be one of the following:
+ - "qcom,msm8916-tsens" (MSM8916)
+ - "qcom,msm8974-tsens" (MSM8974)
+ - "qcom,msm8996-tsens" (MSM8996)
+ - "qcom,msm8998-tsens", "qcom,tsens-v2" (MSM8998)
+ - "qcom,sdm845-tsens", "qcom,tsens-v2" (SDM845)
+ The generic "qcom,tsens-v2" property must be used as a fallback for any SoC
+ with version 2 of the TSENS IP. MSM8996 is the only exception because the
+ generic property did not exist when support was added.
+
+- reg: Address range of the thermal registers.
+ New platforms containing v2.x.y of the TSENS IP must specify the SROT and TM
+ register spaces separately, with order being TM before SROT.
+ See Example 2, below.
-- reg: Address range of the thermal registers
- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description.
- #qcom,sensors: Number of sensors in tsens block
- Refer to Documentation/devicetree/bindings/nvmem/nvmem.txt to know how to specify
nvmem cells
-Example:
+Example 1 (legacy support before a fallback tsens-v2 property was introduced):
tsens: thermal-sensor@900000 {
compatible = "qcom,msm8916-tsens";
reg = <0x4a8000 0x2000>;
@@ -20,3 +30,12 @@ tsens: thermal-sensor@900000 {
nvmem-cell-names = "caldata", "calsel";
#thermal-sensor-cells = <1>;
};
+
+Example 2 (for any platform containing v2 of the TSENS IP):
+tsens0: thermal-sensor@c263000 {
+ compatible = "qcom,sdm845-tsens", "qcom,tsens-v2";
+ reg = <0xc263000 0x1ff>, /* TM */
+ <0xc222000 0x1ff>; /* SROT */
+ #qcom,sensors = <13>;
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt b/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
index 904a5846d7ac..e698e3488735 100644
--- a/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
+++ b/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
@@ -4,7 +4,6 @@ Required properties:
- compatible : should be "altr,timer-1.0"
- reg : Specifies base physical address and size of the registers.
-- interrupt-parent: phandle of the interrupt controller
- interrupts : Should contain the timer interrupt number
- clock-frequency : The frequency of the clock that drives the counter, in Hz.
diff --git a/Documentation/devicetree/bindings/timer/fsl,gtm.txt b/Documentation/devicetree/bindings/timer/fsl,gtm.txt
index 9a33efded4bc..fc1c571f7412 100644
--- a/Documentation/devicetree/bindings/timer/fsl,gtm.txt
+++ b/Documentation/devicetree/bindings/timer/fsl,gtm.txt
@@ -7,7 +7,6 @@ Required properties:
"fsl,<chip>-cpm2-gtm", "fsl,cpm2-gtm", "fsl,gtm" for CPM2 GTMs
- reg : should contain gtm registers location and length (0x40).
- interrupts : should contain four interrupts.
- - interrupt-parent : interrupt source phandle.
- clock-frequency : specifies the frequency driving the timer.
Example:
diff --git a/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt b/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
index 62bb8260cf6a..cd1a0c256f94 100644
--- a/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
+++ b/Documentation/devicetree/bindings/timer/marvell,orion-timer.txt
@@ -3,7 +3,6 @@ Marvell Orion SoC timer
Required properties:
- compatible: shall be "marvell,orion-timer"
- reg: base address of the timer register starting with TIMERS CONTROL register
-- interrupt-parent: phandle of the bridge interrupt controller
- interrupts: should contain the interrupts for Timer0 and Timer1
- clocks: phandle of timer reference clock (tclk)
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
index b1fe7e9de1b4..18d4d0166c76 100644
--- a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
+++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
@@ -1,19 +1,25 @@
-Mediatek MT6577, MT6572 and MT6589 Timers
----------------------------------------
+Mediatek Timers
+---------------
+
+Mediatek SoCs have two different timers on different platforms,
+- GPT (General Purpose Timer)
+- SYST (System Timer)
+
+The proper timer will be selected automatically by driver.
Required properties:
- compatible should contain:
- * "mediatek,mt2701-timer" for MT2701 compatible timers
- * "mediatek,mt6580-timer" for MT6580 compatible timers
- * "mediatek,mt6589-timer" for MT6589 compatible timers
- * "mediatek,mt7623-timer" for MT7623 compatible timers
- * "mediatek,mt8127-timer" for MT8127 compatible timers
- * "mediatek,mt8135-timer" for MT8135 compatible timers
- * "mediatek,mt8173-timer" for MT8173 compatible timers
- * "mediatek,mt6577-timer" for MT6577 and all above compatible timers
-- reg: Should contain location and length for timers register.
-- clocks: Clocks driving the timer hardware. This list should include two
- clocks. The order is system clock and as second clock the RTC clock.
+ * "mediatek,mt2701-timer" for MT2701 compatible timers (GPT)
+ * "mediatek,mt6580-timer" for MT6580 compatible timers (GPT)
+ * "mediatek,mt6589-timer" for MT6589 compatible timers (GPT)
+ * "mediatek,mt7623-timer" for MT7623 compatible timers (GPT)
+ * "mediatek,mt8127-timer" for MT8127 compatible timers (GPT)
+ * "mediatek,mt8135-timer" for MT8135 compatible timers (GPT)
+ * "mediatek,mt8173-timer" for MT8173 compatible timers (GPT)
+ * "mediatek,mt6577-timer" for MT6577 and all above compatible timers (GPT)
+ * "mediatek,mt6765-timer" for MT6765 compatible timers (SYST)
+- reg: Should contain location and length for timer register.
+- clocks: Should contain system clock.
Examples:
@@ -21,5 +27,5 @@ Examples:
compatible = "mediatek,mt6577-timer";
reg = <0x10008000 0x80>;
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&system_clk>, <&rtc_clk>;
+ clocks = <&system_clk>;
};
diff --git a/Documentation/devicetree/bindings/timer/snps,arc-timer.txt b/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
index 4ef024630d61..147ef3e74452 100644
--- a/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
+++ b/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
@@ -12,10 +12,6 @@ Required properties:
(16 for ARCHS cores, 3 for ARC700 cores)
- clocks : phandle to the source clock
-Optional properties:
-
-- interrupt-parent : phandle to parent intc
-
Example:
timer0 {
diff --git a/Documentation/devicetree/bindings/timer/st,spear-timer.txt b/Documentation/devicetree/bindings/timer/st,spear-timer.txt
index c0017221cf55..b5238a07da17 100644
--- a/Documentation/devicetree/bindings/timer/st,spear-timer.txt
+++ b/Documentation/devicetree/bindings/timer/st,spear-timer.txt
@@ -5,8 +5,6 @@
- compatible : Should be:
"st,spear-timer"
- reg: Address range of the timer registers
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupt: Should contain the timer interrupt number
Example:
diff --git a/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt b/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
index 95911fe70224..d96c1e283e73 100644
--- a/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
+++ b/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
@@ -7,7 +7,6 @@ Required properties:
- compatible: must be "ti,c64x+timer64"
- reg: base address and size of register region
-- interrupt-parent: interrupt controller
- interrupts: interrupt id
Optional properties:
diff --git a/Documentation/devicetree/bindings/ufs/ufs-hisi.txt b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt
new file mode 100644
index 000000000000..a48c44817367
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt
@@ -0,0 +1,41 @@
+* Hisilicon Universal Flash Storage (UFS) Host Controller
+
+UFS nodes are defined to describe on-chip UFS hardware macro.
+Each UFS Host Controller should have its own node.
+
+Required properties:
+- compatible : compatible list, contains one of the following -
+ "hisilicon,hi3660-ufs", "jedec,ufs-1.1" for hisi ufs
+ host controller present on Hi36xx chipset.
+- reg : should contain UFS register address space & UFS SYS CTRL register address,
+- interrupt-parent : interrupt device
+- interrupts : interrupt number
+- clocks : List of phandle and clock specifier pairs
+- clock-names : List of clock input name strings sorted in the same
+ order as the clocks property. "ref_clk", "phy_clk" is optional
+- freq-table-hz : Array of <min max> operating frequencies stored in the same
+ order as the clocks property. If this property is not
+ defined or a value in the array is "0" then it is assumed
+ that the frequency is set by the parent clock or a
+ fixed rate clock source.
+- resets : describe reset node register
+- reset-names : reset node register, the "rst" corresponds to reset the whole UFS IP.
+
+Example:
+
+ ufs: ufs@ff3b0000 {
+ compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1";
+ /* 0: HCI standard */
+ /* 1: UFS SYS CTRL */
+ reg = <0x0 0xff3b0000 0x0 0x1000>,
+ <0x0 0xff3b1000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
+ clock-names = "ref_clk", "phy_clk";
+ freq-table-hz = <0 0>, <0 0>;
+ /* offset: 0x84; bit: 12 */
+ resets = <&crg_rst 0x84 12>;
+ reset-names = "rst";
+ };
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index c39dfef76a18..2df00524bd21 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -41,6 +41,8 @@ Optional properties:
-lanes-per-direction : number of lanes available per direction - either 1 or 2.
Note that it is assume same number of lanes is used both
directions at once. If not specified, default is 2 lanes per direction.
+- resets : reset node register
+- reset-names : describe reset node register, the "rst" corresponds to reset the whole UFS IP.
Note: If above properties are not defined it can be assumed that the supply
regulators or clocks are always on.
@@ -61,9 +63,11 @@ Example:
vccq-max-microamp = 200000;
vccq2-max-microamp = 200000;
- clocks = <&core 0>, <&ref 0>, <&iface 0>;
- clock-names = "core_clk", "ref_clk", "iface_clk";
- freq-table-hz = <100000000 200000000>, <0 0>, <0 0>;
+ clocks = <&core 0>, <&ref 0>, <&phy 0>, <&iface 0>;
+ clock-names = "core_clk", "ref_clk", "phy_clk", "iface_clk";
+ freq-table-hz = <100000000 200000000>, <0 0>, <0 0>, <0 0>;
+ resets = <&reset 0 1>;
+ reset-names = "rst";
phys = <&ufsphy1>;
phy-names = "ufsphy";
};
diff --git a/Documentation/devicetree/bindings/usb/fsl-usb.txt b/Documentation/devicetree/bindings/usb/fsl-usb.txt
index 4779c029b675..0b08b006c5ea 100644
--- a/Documentation/devicetree/bindings/usb/fsl-usb.txt
+++ b/Documentation/devicetree/bindings/usb/fsl-usb.txt
@@ -33,8 +33,6 @@ Recommended properties :
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties :
- fsl,invert-drvvbus : boolean; for MPC5121 USB0 only. Indicates the
diff --git a/Documentation/devicetree/bindings/usb/maxim,max3421.txt b/Documentation/devicetree/bindings/usb/maxim,max3421.txt
index 8cdbe0c85188..90495b1aeec2 100644
--- a/Documentation/devicetree/bindings/usb/maxim,max3421.txt
+++ b/Documentation/devicetree/bindings/usb/maxim,max3421.txt
@@ -11,9 +11,6 @@ Required properties:
The driver configures MAX3421E for active low level triggered interrupts,
configure your interrupt line accordingly.
-Optional property:
- - interrupt-parent: the phandle to the associated interrupt controller.
-
Example:
usb@0 {
diff --git a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
index 09e847e92e5e..d4cf53c071d9 100644
--- a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
+++ b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
@@ -3,8 +3,6 @@ Richtek RT1711H TypeC PD Controller.
Required properties:
- compatible : Must be "richtek,rt1711h".
- reg : Must be 0x4e, it's slave address of RT1711H.
- - interrupt-parent : the phandle for the interrupt controller that
- provides interrupts for this device.
- interrupts : <a b> where a is the interrupt number and b represents an
encoding of the sense and level information for the interrupt.
diff --git a/Documentation/devicetree/bindings/usb/samsung-hsotg.txt b/Documentation/devicetree/bindings/usb/samsung-hsotg.txt
index b83d428a265e..0388634598ce 100644
--- a/Documentation/devicetree/bindings/usb/samsung-hsotg.txt
+++ b/Documentation/devicetree/bindings/usb/samsung-hsotg.txt
@@ -14,8 +14,6 @@ Binding details
Required properties:
- compatible: "samsung,s3c6400-hsotg" should be used for all currently
supported SoC,
-- interrupt-parent: phandle for the interrupt controller to which the
- interrupt signal of the HSOTG block is routed,
- interrupts: specifier of interrupt signal of interrupt controller,
according to bindings of interrupt controller,
- clocks: contains an array of clock specifiers:
diff --git a/Documentation/devicetree/bindings/usb/spear-usb.txt b/Documentation/devicetree/bindings/usb/spear-usb.txt
index f8a464a25653..1dc91cc459c0 100644
--- a/Documentation/devicetree/bindings/usb/spear-usb.txt
+++ b/Documentation/devicetree/bindings/usb/spear-usb.txt
@@ -6,8 +6,6 @@ EHCI:
Required properties:
- compatible: "st,spear600-ehci"
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the EHCI interrupt
Example:
@@ -25,8 +23,6 @@ OHCI:
Required properties:
- compatible: "st,spear600-ohci"
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupts: Should contain the OHCI interrupt
Example:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 7cad066191ee..f32b79814dd7 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,6 +8,7 @@ abracon Abracon Corporation
actions Actions Semiconductor Co., Ltd.
active-semi Active-Semi International Inc
ad Avionic Design GmbH
+adafruit Adafruit Industries, LLC
adapteva Adapteva, Inc.
adaptrum Adaptrum, Inc.
adh AD Holdings Plc.
@@ -41,6 +42,7 @@ arrow Arrow Electronics
artesyn Artesyn Embedded Technologies Inc.
asahi-kasei Asahi Kasei Corp.
aspeed ASPEED Technology Inc.
+asus AsusTek Computer Inc.
atlas Atlas Scientific LLC
atmel Atmel Corporation
auo AU Optronics Corporation
@@ -53,6 +55,7 @@ axentia Axentia Technologies AB
axis Axis Communications AB
bananapi BIPAI KEJI LIMITED
bhf Beckhoff Automation GmbH & Co. KG
+bitmain Bitmain Technologies
boe BOE Technology Group Co., Ltd.
bosch Bosch Sensortec GmbH
boundary Boundary Devices Inc.
@@ -85,6 +88,7 @@ cubietech Cubietech, Ltd.
cypress Cypress Semiconductor Corporation
cznic CZ.NIC, z.s.p.o.
dallas Maxim Integrated Products (formerly Dallas Semiconductor)
+dataimage DataImage, Inc.
davicom DAVICOM Semiconductor, Inc.
delta Delta Electronics, Inc.
denx Denx Software Engineering
@@ -93,6 +97,7 @@ dh DH electronics GmbH
digi Digi International Inc.
digilent Diglent, Inc.
dioo Dioo Microcircuit Co., Ltd
+dlc DLC Display Co., Ltd.
dlg Dialog Semiconductor
dlink D-Link Corporation
dmo Data Modul AG
@@ -188,6 +193,7 @@ keymile Keymile GmbH
khadas Khadas
kiebackpeter Kieback & Peter GmbH
kinetic Kinetic Technologies
+kingdisplay King & Display Technology Co., Ltd.
kingnovel Kingnovel Technology Co., Ltd.
koe Kaohsiung Opto-Electronics Inc.
kosagi Sutajio Ko-Usagi PTE Ltd.
@@ -395,6 +401,7 @@ v3 V3 Semiconductor
variscite Variscite Ltd.
via VIA Technologies, Inc.
virtio Virtual I/O Device Specification, developed by the OASIS consortium
+vitesse Vitesse Semiconductor Corporation
vivante Vivante Corporation
vocore VoCore Studio
voipac Voipac Technologies s.r.o.
@@ -412,6 +419,7 @@ xes Extreme Engineering Solutions (X-ES)
xillybus Xillybus Ltd.
xlnx Xilinx
xunlong Shenzhen Xunlong Software CO.,Limited
+ysoft Y Soft Corporation a.s.
zarlink Zarlink Semiconductor
zeitec ZEITEC Semiconductor Co., LTD.
zidoo Shenzhen Zidoo Technology Co., Ltd.
diff --git a/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt b/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
index c3a36ee45552..750a87657448 100644
--- a/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
@@ -5,7 +5,6 @@ Required properties:
- compatible : Should be "cdns,wdt-r1p2".
- clocks : This is pclk (APB clock).
- interrupts : This is wd_irq - watchdog timeout interrupt.
-- interrupt-parent : Must be core interrupt controller.
Optional properties
- reset-on-timeout : If this property exists, then a reset is done
diff --git a/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt b/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt
index d7bab3db9d1f..05b95bfa2a89 100644
--- a/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/rt2880-wdt.txt
@@ -5,7 +5,6 @@ Required properties:
- reg: physical base address of the controller and length of the register range
Optional properties:
-- interrupt-parent: phandle to the INTC device node
- interrupts: Specify the INTC interrupt number
Example:
diff --git a/Documentation/devicetree/bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
index 1d11b9002ef8..d058ace29345 100644
--- a/Documentation/devicetree/bindings/xilinx.txt
+++ b/Documentation/devicetree/bindings/xilinx.txt
@@ -49,7 +49,7 @@
followed by an older IP core version which implements the same
interface or any other device with the same interface.
- 'reg', 'interrupt-parent' and 'interrupts' are all optional properties.
+ 'reg' and 'interrupts' are all optional properties.
For example, the following block from system.mhs:
diff --git a/Documentation/devicetree/bindings/xillybus/xillybus.txt b/Documentation/devicetree/bindings/xillybus/xillybus.txt
index 9e316dc2e40f..e65d1f94b49c 100644
--- a/Documentation/devicetree/bindings/xillybus/xillybus.txt
+++ b/Documentation/devicetree/bindings/xillybus/xillybus.txt
@@ -4,8 +4,6 @@ Required properties:
- compatible: Should be "xillybus,xillybus-1.00.a"
- reg: Address and length of the register set for the device
- interrupts: Contains one interrupt node, typically consisting of three cells.
-- interrupt-parent: the phandle for the interrupt controller that
- services interrupts for this device.
Optional properties:
- dma-coherent: Present if DMA operations are coherent
diff --git a/Documentation/devicetree/dynamic-resolution-notes.txt b/Documentation/devicetree/dynamic-resolution-notes.txt
index 083d23262abe..c24ec366c5dc 100644
--- a/Documentation/devicetree/dynamic-resolution-notes.txt
+++ b/Documentation/devicetree/dynamic-resolution-notes.txt
@@ -2,15 +2,14 @@ Device Tree Dynamic Resolver Notes
----------------------------------
This document describes the implementation of the in-kernel
-Device Tree resolver, residing in drivers/of/resolver.c and is a
-companion document to Documentation/devicetree/dt-object-internal.txt[1]
+Device Tree resolver, residing in drivers/of/resolver.c
How the resolver works
----------------------
The resolver is given as an input an arbitrary tree compiled with the
proper dtc option and having a /plugin/ tag. This generates the
-appropriate __fixups__ & __local_fixups__ nodes as described in [1].
+appropriate __fixups__ & __local_fixups__ nodes.
In sequence the resolver works by the following steps:
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index 80383b1a574a..8db53cdc225f 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -488,14 +488,19 @@ doc: *title*
.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
:doc: High Definition Audio over HDMI and Display Port
-functions: *function* *[...]*
+functions: *[ function ...]*
Include documentation for each *function* in *source*.
+ If no *function* if specified, the documentaion for all functions
+ and types in the *source* will be included.
- Example::
+ Examples::
.. kernel-doc:: lib/bitmap.c
:functions: bitmap_parselist bitmap_parselist_user
+ .. kernel-doc:: lib/idr.c
+ :functions:
+
Without options, the kernel-doc directive includes all documentation comments
from the source file.
diff --git a/Documentation/doc-guide/parse-headers.rst b/Documentation/doc-guide/parse-headers.rst
index 82a3e43b6864..24cfaa15dd81 100644
--- a/Documentation/doc-guide/parse-headers.rst
+++ b/Documentation/doc-guide/parse-headers.rst
@@ -32,7 +32,7 @@ SYNOPSIS
\ **parse_headers.pl**\ [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
-Where <options> can be: --debug, --help or --man.
+Where <options> can be: --debug, --help or --usage.
OPTIONS
@@ -133,7 +133,7 @@ For both statements, \ **type**\ can be either one of the following:
\ **symbol**\
- The ignore or replace statement will apply to the name of enum statements
+ The ignore or replace statement will apply to the name of enum value
at C_FILE.
For replace statements, \ **new_value**\ will automatically use :c:type:
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index a2417633fdd8..f0796daa95b4 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -28,7 +28,7 @@ The ReST markups currently used by the Documentation/ files are meant to be
built with ``Sphinx`` version 1.3 or upper. If you're desiring to build
PDF outputs, it is recommended to use version 1.4.6 or upper.
-There's a script that checks for the Spinx requirements. Please see
+There's a script that checks for the Sphinx requirements. Please see
:ref:`sphinx-pre-install` for further details.
Most distributions are shipped with Sphinx, but its toolchain is fragile,
diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst
index 70e328e16aad..d6763272e747 100644
--- a/Documentation/driver-api/device_link.rst
+++ b/Documentation/driver-api/device_link.rst
@@ -81,10 +81,14 @@ integration is desired.
Two other flags are specifically targeted at use cases where the device
link is added from the consumer's ``->probe`` callback: ``DL_FLAG_RPM_ACTIVE``
can be specified to runtime resume the supplier upon addition of the
-device link. ``DL_FLAG_AUTOREMOVE`` causes the device link to be automatically
-purged when the consumer fails to probe or later unbinds. This obviates
-the need to explicitly delete the link in the ``->remove`` callback or in
-the error path of the ``->probe`` callback.
+device link. ``DL_FLAG_AUTOREMOVE_CONSUMER`` causes the device link to be
+automatically purged when the consumer fails to probe or later unbinds.
+This obviates the need to explicitly delete the link in the ``->remove``
+callback or in the error path of the ``->probe`` callback.
+
+Similarly, when the device link is added from supplier's ``->probe`` callback,
+``DL_FLAG_AUTOREMOVE_SUPPLIER`` causes the device link to be automatically
+purged when the supplier fails to probe or later unbinds.
Limitations
===========
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index dc384f2f7f34..b541e97c7ab1 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -131,6 +131,12 @@ DMA Fences
----------
.. kernel-doc:: drivers/dma-buf/dma-fence.c
+ :doc: DMA fences overview
+
+DMA Fences Functions Reference
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/dma-fence.c
:export:
.. kernel-doc:: include/linux/dma-fence.h
diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst
index dcd63599f700..c55a6034c397 100644
--- a/Documentation/driver-api/mtdnand.rst
+++ b/Documentation/driver-api/mtdnand.rst
@@ -374,7 +374,7 @@ The nand driver supports three different types of hardware ECC.
- NAND_ECC_HW8_512
- Hardware ECC generator providing 6 bytes ECC per 512 byte.
+ Hardware ECC generator providing 8 bytes ECC per 512 byte.
If your hardware generator has a different functionality add it at the
appropriate place in nand_base.c
@@ -889,7 +889,7 @@ Use these constants to select the ECC algorithm::
#define NAND_ECC_HW3_512 3
/* Hardware ECC 6 byte ECC per 512 Byte data */
#define NAND_ECC_HW6_512 4
- /* Hardware ECC 6 byte ECC per 512 Byte data */
+ /* Hardware ECC 8 byte ECC per 512 Byte data */
#define NAND_ECC_HW8_512 6
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index 79c22d096bbc..d4d642e1ce9c 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -155,6 +155,13 @@ C. Boot options
used by text. By default, this area will be black. The 'color' value
is an integer number that depends on the framebuffer driver being used.
+6. fbcon=nodefer
+
+ If the kernel is compiled with deferred fbcon takeover support, normally
+ the framebuffer contents, left in place by the firmware/bootloader, will
+ be preserved until there actually is some text is output to the console.
+ This option causes fbcon to bind immediately to the fbdev device.
+
C. Attaching, Detaching and Unloading
Before going on how to attach, detach and unload the framebuffer console, an
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index dbdf62907703..c7858dd1ea8f 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -5,10 +5,10 @@
#
# Architecture requirements
#
-# * arm64
+# * arm/arm64
#
-# Rely on eret context synchronization when returning from IPI handler, and
-# when returning to user-space.
+# Rely on implicit context synchronization as a result of exception return
+# when returning from IPI handler, and when returning to user-space.
#
# * x86
#
@@ -31,7 +31,7 @@
-----------------------
| alpha: | TODO |
| arc: | TODO |
- | arm: | TODO |
+ | arm: | ok |
| arm64: | ok |
| c6x: | TODO |
| h8300: | TODO |
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 37bf0a9de75c..9e6f19eaef89 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -64,7 +64,7 @@ prototypes:
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
+ umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
locking rules:
@@ -532,9 +532,9 @@ More details about quota locking can be found in fs/dquot.c.
prototypes:
void (*open)(struct vm_area_struct*);
void (*close)(struct vm_area_struct*);
- int (*fault)(struct vm_area_struct*, struct vm_fault *);
- int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
- int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);
+ vm_fault_t (*fault)(struct vm_area_struct*, struct vm_fault *);
+ vm_fault_t (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
+ vm_fault_t (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);
int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
locking rules:
diff --git a/Documentation/filesystems/cifs/CHANGES b/Documentation/filesystems/cifs/CHANGES
index 455e1cc494a9..1df7f4910eb2 100644
--- a/Documentation/filesystems/cifs/CHANGES
+++ b/Documentation/filesystems/cifs/CHANGES
@@ -1,1068 +1,4 @@
-See https://wiki.samba.org/index.php/LinuxCIFSKernel for
-more current information.
-
-Version 1.62
-------------
-Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
-to more strictly handle corrupt frames.
-
-Version 1.61
-------------
-Fix append problem to Samba servers (files opened with O_APPEND could
-have duplicated data). Fix oops in cifs_lookup. Workaround problem
-mounting to OS/400 Netserve. Fix oops in cifs_get_tcp_session.
-Disable use of server inode numbers when server only
-partially supports them (e.g. for one server querying inode numbers on
-FindFirst fails but QPathInfo queries works). Fix oops with dfs in
-cifs_put_smb_ses. Fix mmap to work on directio mounts (needed
-for OpenOffice when on forcedirectio mount e.g.)
-
-Version 1.60
--------------
-Fix memory leak in reconnect. Fix oops in DFS mount error path.
-Set s_maxbytes to smaller (the max that vfs can handle) so that
-sendfile will now work over cifs mounts again. Add noforcegid
-and noforceuid mount parameters. Fix small mem leak when using
-ntlmv2. Fix 2nd mount to same server but with different port to
-be allowed (rather than reusing the 1st port) - only when the
-user explicitly overrides the port on the 2nd mount.
-
-Version 1.59
-------------
-Client uses server inode numbers (which are persistent) rather than
-client generated ones by default (mount option "serverino" turned
-on by default if server supports it). Add forceuid and forcegid
-mount options (so that when negotiating unix extensions specifying
-which uid mounted does not immediately force the server's reported
-uids to be overridden). Add support for scope mount parm. Improve
-hard link detection to use same inode for both. Do not set
-read-only dos attribute on directories (for chmod) since Windows
-explorer special cases this attribute bit for directories for
-a different purpose.
-
-Version 1.58
-------------
-Guard against buffer overruns in various UCS-2 to UTF-8 string conversions
-when the UTF-8 string is composed of unusually long (more than 4 byte) converted
-characters. Add support for mounting root of a share which redirects immediately
-to DFS target. Convert string conversion functions from Unicode to more
-accurately mark string length before allocating memory (which may help the
-rare cases where a UTF-8 string is much larger than the UCS2 string that
-we converted from). Fix endianness of the vcnum field used during
-session setup to distinguish multiple mounts to same server from different
-userids. Raw NTLMSSP fixed (it requires /proc/fs/cifs/experimental
-flag to be set to 2, and mount must enable krb5 to turn on extended security).
-Performance of file create to Samba improved (posix create on lookup
-removes 1 of 2 network requests sent on file create)
-
-Version 1.57
-------------
-Improve support for multiple security contexts to the same server. We
-used to use the same "vcnumber" for all connections which could cause
-the server to treat subsequent connections, especially those that
-are authenticated as guest, as reconnections, invalidating the earlier
-user's smb session. This fix allows cifs to mount multiple times to the
-same server with different userids without risking invalidating earlier
-established security contexts. fsync now sends SMB Flush operation
-to better ensure that we wait for server to write all of the data to
-server disk (not just write it over the network). Add new mount
-parameter to allow user to disable sending the (slow) SMB flush on
-fsync if desired (fsync still flushes all cached write data to the server).
-Posix file open support added (turned off after one attempt if server
-fails to support it properly, as with Samba server versions prior to 3.3.2)
-Fix "redzone overwritten" bug in cifs_put_tcon (CIFSTcon may allocate too
-little memory for the "nativeFileSystem" field returned by the server
-during mount). Endian convert inode numbers if necessary (makes it easier
-to compare inode numbers on network files from big endian systems).
-
-Version 1.56
-------------
-Add "forcemandatorylock" mount option to allow user to use mandatory
-rather than posix (advisory) byte range locks, even though server would
-support posix byte range locks. Fix query of root inode when prefixpath
-specified and user does not have access to query information about the
-top of the share. Fix problem in 2.6.28 resolving DFS paths to
-Samba servers (worked to Windows). Fix rmdir so that pending search
-(readdir) requests do not get invalid results which include the now
-removed directory. Fix oops in cifs_dfs_ref.c when prefixpath is not reachable
-when using DFS. Add better file create support to servers which support
-the CIFS POSIX protocol extensions (this adds support for new flags
-on create, and improves semantics for write of locked ranges).
-
-Version 1.55
-------------
-Various fixes to make delete of open files behavior more predictable
-(when delete of an open file fails we mark the file as "delete-on-close"
-in a way that more servers accept, but only if we can first rename the
-file to a temporary name). Add experimental support for more safely
-handling fcntl(F_SETLEASE). Convert cifs to using blocking tcp
-sends, and also let tcp autotune the socket send and receive buffers.
-This reduces the number of EAGAIN errors returned by TCP/IP in
-high stress workloads (and the number of retries on socket writes
-when sending large SMBWriteX requests). Fix case in which a portion of
-data can in some cases not get written to the file on the server before the
-file is closed. Fix DFS parsing to properly handle path consumed field,
-and to handle certain codepage conversions better. Fix mount and
-umount race that can cause oops in mount or umount or reconnect.
-
-Version 1.54
-------------
-Fix premature write failure on congested networks (we would give up
-on EAGAIN from the socket too quickly on large writes).
-Cifs_mkdir and cifs_create now respect the setgid bit on parent dir.
-Fix endian problems in acl (mode from/to cifs acl) on bigendian
-architectures. Fix problems with preserving timestamps on copying open
-files (e.g. "cp -a") to Windows servers. For mkdir and create honor setgid bit
-on parent directory when server supports Unix Extensions but not POSIX
-create. Update cifs.upcall version to handle new Kerberos sec flags
-(this requires update of cifs.upcall program from Samba). Fix memory leak
-on dns_upcall (resolving DFS referralls). Fix plain text password
-authentication (requires setting SecurityFlags to 0x30030 to enable
-lanman and plain text though). Fix writes to be at correct offset when
-file is open with O_APPEND and file is on a directio (forcediretio) mount.
-Fix bug in rewinding readdir directory searches. Add nodfs mount option.
-
-Version 1.53
-------------
-DFS support added (Microsoft Distributed File System client support needed
-for referrals which enable a hierarchical name space among servers).
-Disable temporary caching of mode bits to servers which do not support
-storing of mode (e.g. Windows servers, when client mounts without cifsacl
-mount option) and add new "dynperm" mount option to enable temporary caching
-of mode (enable old behavior). Fix hang on mount caused when server crashes
-tcp session during negotiate protocol.
-
-Version 1.52
-------------
-Fix oops on second mount to server when null auth is used.
-Enable experimental Kerberos support. Return writebehind errors on flush
-and sync so that events like out of disk space get reported properly on
-cached files. Fix setxattr failure to certain Samba versions. Fix mount
-of second share to disconnected server session (autoreconnect on this).
-Add ability to modify cifs acls for handling chmod (when mounted with
-cifsacl flag). Fix prefixpath path separator so we can handle mounts
-with prefixpaths longer than one directory (one path component) when
-mounted to Windows servers. Fix slow file open when cifsacl
-enabled. Fix memory leak in FindNext when the SMB call returns -EBADF.
-
-
-Version 1.51
-------------
-Fix memory leak in statfs when mounted to very old servers (e.g.
-Windows 9x). Add new feature "POSIX open" which allows servers
-which support the current POSIX Extensions to provide better semantics
-(e.g. delete for open files opened with posix open). Take into
-account umask on posix mkdir not just older style mkdir. Add
-ability to mount to IPC$ share (which allows CIFS named pipes to be
-opened, read and written as if they were files). When 1st tree
-connect fails (e.g. due to signing negotiation failure) fix
-leak that causes cifsd not to stop and rmmod to fail to cleanup
-cifs_request_buffers pool. Fix problem with POSIX Open/Mkdir on
-bigendian architectures. Fix possible memory corruption when
-EAGAIN returned on kern_recvmsg. Return better error if server
-requires packet signing but client has disabled it. When mounted
-with cifsacl mount option - mode bits are approximated based
-on the contents of the ACL of the file or directory. When cifs
-mount helper is missing convert make sure that UNC name
-has backslash (not forward slash) between ip address of server
-and the share name.
-
-Version 1.50
-------------
-Fix NTLMv2 signing. NFS server mounted over cifs works (if cifs mount is
-done with "serverino" mount option). Add support for POSIX Unlink
-(helps with certain sharing violation cases when server such as
-Samba supports newer POSIX CIFS Protocol Extensions). Add "nounix"
-mount option to allow disabling the CIFS Unix Extensions for just
-that mount. Fix hang on spinlock in find_writable_file (race when
-reopening file after session crash). Byte range unlock request to
-windows server could unlock more bytes (on server copy of file)
-than intended if start of unlock request is well before start of
-a previous byte range lock that we issued.
-
-Version 1.49
-------------
-IPv6 support. Enable ipv6 addresses to be passed on mount (put the ipv6
-address after the "ip=" mount option, at least until mount.cifs is fixed to
-handle DNS host to ipv6 name translation). Accept override of uid or gid
-on mount even when Unix Extensions are negotiated (it used to be ignored
-when Unix Extensions were ignored). This allows users to override the
-default uid and gid for files when they are certain that the uids or
-gids on the server do not match those of the client. Make "sec=none"
-mount override username (so that null user connection is attempted)
-to match what documentation said. Support for very large reads, over 127K,
-available to some newer servers (such as Samba 3.0.26 and later but
-note that it also requires setting CIFSMaxBufSize at module install
-time to a larger value which may hurt performance in some cases).
-Make sign option force signing (or fail if server does not support it).
-
-Version 1.48
-------------
-Fix mtime bouncing around from local idea of last write times to remote time.
-Fix hang (in i_size_read) when simultaneous size update of same remote file
-on smp system corrupts sequence number. Do not reread unnecessarily partial page
-(which we are about to overwrite anyway) when writing out file opened rw.
-When DOS attribute of file on non-Unix server's file changes on the server side
-from read-only back to read-write, reflect this change in default file mode
-(we had been leaving a file's mode read-only until the inode were reloaded).
-Allow setting of attribute back to ATTR_NORMAL (removing readonly dos attribute
-when archive dos attribute not set and we are changing mode back to writeable
-on server which does not support the Unix Extensions). Remove read only dos
-attribute on chmod when adding any write permission (ie on any of
-user/group/other (not all of user/group/other ie 0222) when
-mounted to windows. Add support for POSIX MkDir (slight performance
-enhancement and eliminates the network race between the mkdir and set
-path info of the mode).
-
-
-Version 1.47
-------------
-Fix oops in list_del during mount caused by unaligned string.
-Fix file corruption which could occur on some large file
-copies caused by writepages page i/o completion bug.
-Seek to SEEK_END forces check for update of file size for non-cached
-files. Allow file size to be updated on remote extend of locally open,
-non-cached file. Fix reconnect to newer Samba servers (or other servers
-which support the CIFS Unix/POSIX extensions) so that we again tell the
-server the Unix/POSIX cifs capabilities which we support (SetFSInfo).
-Add experimental support for new POSIX Open/Mkdir (which returns
-stat information on the open, and allows setting the mode).
-
-Version 1.46
-------------
-Support deep tree mounts. Better support OS/2, Win9x (DOS) time stamps.
-Allow null user to be specified on mount ("username="). Do not return
-EINVAL on readdir when filldir fails due to overwritten blocksize
-(fixes FC problem). Return error in rename 2nd attempt retry (ie report
-if rename by handle also fails, after rename by path fails, we were
-not reporting whether the retry worked or not). Fix NTLMv2 to
-work to Windows servers (mount with option "sec=ntlmv2").
-
-Version 1.45
-------------
-Do not time out lockw calls when using posix extensions. Do not
-time out requests if server still responding reasonably fast
-on requests on other threads. Improve POSIX locking emulation,
-(lock cancel now works, and unlock of merged range works even
-to Windows servers now). Fix oops on mount to lanman servers
-(win9x, os/2 etc.) when null password. Do not send listxattr
-(SMB to query all EAs) if nouser_xattr specified. Fix SE Linux
-problem (instantiate inodes/dentries in right order for readdir).
-
-Version 1.44
-------------
-Rewritten sessionsetup support, including support for legacy SMB
-session setup needed for OS/2 and older servers such as Windows 95 and 98.
-Fix oops on ls to OS/2 servers. Add support for level 1 FindFirst
-so we can do search (ls etc.) to OS/2. Do not send NTCreateX
-or recent levels of FindFirst unless server says it supports NT SMBs
-(instead use legacy equivalents from LANMAN dialect). Fix to allow
-NTLMv2 authentication support (now can use stronger password hashing
-on mount if corresponding /proc/fs/cifs/SecurityFlags is set (0x4004).
-Allow override of global cifs security flags on mount via "sec=" option(s).
-
-Version 1.43
-------------
-POSIX locking to servers which support CIFS POSIX Extensions
-(disabled by default controlled by proc/fs/cifs/Experimental).
-Handle conversion of long share names (especially Asian languages)
-to Unicode during mount. Fix memory leak in sess struct on reconnect.
-Fix rare oops after acpi suspend. Fix O_TRUNC opens to overwrite on
-cifs open which helps rare case when setpathinfo fails or server does
-not support it.
-
-Version 1.42
-------------
-Fix slow oplock break when mounted to different servers at the same time and
-the tids match and we try to find matching fid on wrong server. Fix read
-looping when signing required by server (2.6.16 kernel only). Fix readdir
-vs. rename race which could cause each to hang. Return . and .. even
-if server does not. Allow searches to skip first three entries and
-begin at any location. Fix oops in find_writeable_file.
-
-Version 1.41
-------------
-Fix NTLMv2 security (can be enabled in /proc/fs/cifs) so customers can
-configure stronger authentication. Fix sfu symlinks so they can
-be followed (not just recognized). Fix wraparound of bcc on
-read responses when buffer size over 64K and also fix wrap of
-max smb buffer size when CIFSMaxBufSize over 64K. Fix oops in
-cifs_user_read and cifs_readpages (when EAGAIN on send of smb
-on socket is returned over and over). Add POSIX (advisory) byte range
-locking support (requires server with newest CIFS UNIX Extensions
-to the protocol implemented). Slow down negprot slightly in port 139
-RFC1001 case to give session_init time on buggy servers.
-
-Version 1.40
-------------
-Use fsuid (fsgid) more consistently instead of uid (gid). Improve performance
-of readpages by eliminating one extra memcpy. Allow update of file size
-from remote server even if file is open for write as long as mount is
-directio. Recognize share mode security and send NTLM encrypted password
-on tree connect if share mode negotiated.
-
-Version 1.39
-------------
-Defer close of a file handle slightly if pending writes depend on that handle
-(this reduces the EBADF bad file handle errors that can be logged under heavy
-stress on writes). Modify cifs Kconfig options to expose CONFIG_CIFS_STATS2
-Fix SFU style symlinks and mknod needed for servers which do not support the
-CIFS Unix Extensions. Fix setfacl/getfacl on bigendian. Timeout negative
-dentries so files that the client sees as deleted but that later get created
-on the server will be recognized. Add client side permission check on setattr.
-Timeout stuck requests better (where server has never responded or sent corrupt
-responses)
-
-Version 1.38
-------------
-Fix tcp socket retransmission timeouts (e.g. on ENOSPACE from the socket)
-to be smaller at first (but increasing) so large write performance performance
-over GigE is better. Do not hang thread on illegal byte range lock response
-from Windows (Windows can send an RFC1001 size which does not match smb size) by
-allowing an SMBs TCP length to be up to a few bytes longer than it should be.
-wsize and rsize can now be larger than negotiated buffer size if server
-supports large readx/writex, even when directio mount flag not specified.
-Write size will in many cases now be 16K instead of 4K which greatly helps
-file copy performance on lightly loaded networks. Fix oops in dnotify
-when experimental config flag enabled. Make cifsFYI more granular.
-
-Version 1.37
-------------
-Fix readdir caching when unlink removes file in current search buffer,
-and this is followed by a rewind search to just before the deleted entry.
-Do not attempt to set ctime unless atime and/or mtime change requested
-(most servers throw it away anyway). Fix length check of received smbs
-to be more accurate. Fix big endian problem with mapchars mount option,
-and with a field returned by statfs.
-
-Version 1.36
-------------
-Add support for mounting to older pre-CIFS servers such as Windows9x and ME.
-For these older servers, add option for passing netbios name of server in
-on mount (servernetbiosname). Add suspend support for power management, to
-avoid cifsd thread preventing software suspend from working.
-Add mount option for disabling the default behavior of sending byte range lock
-requests to the server (necessary for certain applications which break with
-mandatory lock behavior such as Evolution), and also mount option for
-requesting case insensitive matching for path based requests (requesting
-case sensitive is the default).
-
-Version 1.35
-------------
-Add writepage performance improvements. Fix path name conversions
-for long filenames on mounts which were done with "mapchars" mount option
-specified. Ensure multiplex ids do not collide. Fix case in which
-rmmod can oops if done soon after last unmount. Fix truncated
-search (readdir) output when resume filename was a long filename.
-Fix filename conversion when mapchars mount option was specified and
-filename was a long filename.
-
-Version 1.34
-------------
-Fix error mapping of the TOO_MANY_LINKS (hardlinks) case.
-Do not oops if root user kills cifs oplock kernel thread or
-kills the cifsd thread (NB: killing the cifs kernel threads is not
-recommended, unmount and rmmod cifs will kill them when they are
-no longer needed). Fix readdir to ASCII servers (ie older servers
-which do not support Unicode) and also require asterisk.
-Fix out of memory case in which data could be written one page
-off in the page cache.
-
-Version 1.33
-------------
-Fix caching problem, in which readdir of directory containing a file
-which was cached could cause the file's time stamp to be updated
-without invalidating the readahead data (so we could get stale
-file data on the client for that file even as the server copy changed).
-Cleanup response processing so cifsd can not loop when abnormally
-terminated.
-
-
-Version 1.32
-------------
-Fix oops in ls when Transact2 FindFirst (or FindNext) returns more than one
-transact response for an SMB request and search entry split across two frames.
-Add support for lsattr (getting ext2/ext3/reiserfs attr flags from the server)
-as new protocol extensions. Do not send Get/Set calls for POSIX ACLs
-unless server explicitly claims to support them in CIFS Unix extensions
-POSIX ACL capability bit. Fix packet signing when multiuser mounting with
-different users from the same client to the same server. Fix oops in
-cifs_close. Add mount option for remapping reserved characters in
-filenames (also allow recognizing files with created by SFU which have any
-of these seven reserved characters, except backslash, to be recognized).
-Fix invalid transact2 message (we were sometimes trying to interpret
-oplock breaks as SMB responses). Add ioctl for checking that the
-current uid matches the uid of the mounter (needed by umount.cifs).
-Reduce the number of large buffer allocations in cifs response processing
-(significantly reduces memory pressure under heavy stress with multiple
-processes accessing the same server at the same time).
-
-Version 1.31
-------------
-Fix updates of DOS attributes and time fields so that files on NT4 servers
-do not get marked delete on close. Display sizes of cifs buffer pools in
-cifs stats. Fix oops in unmount when cifsd thread being killed by
-shutdown. Add generic readv/writev and aio support. Report inode numbers
-consistently in readdir and lookup (when serverino mount option is
-specified use the inode number that the server reports - for both lookup
-and readdir, otherwise by default the locally generated inode number is used
-for inodes created in either path since servers are not always able to
-provide unique inode numbers when exporting multiple volumes from under one
-sharename).
-
-Version 1.30
-------------
-Allow new nouser_xattr mount parm to disable xattr support for user namespace.
-Do not flag user_xattr mount parm in dmesg. Retry failures setting file time
-(mostly affects NT4 servers) by retry with handle based network operation.
-Add new POSIX Query FS Info for returning statfs info more accurately.
-Handle passwords with multiple commas in them.
-
-Version 1.29
-------------
-Fix default mode in sysfs of cifs module parms. Remove old readdir routine.
-Fix capabilities flags for large readx so as to allow reads larger than 64K.
-
-Version 1.28
-------------
-Add module init parm for large SMB buffer size (to allow it to be changed
-from its default of 16K) which is especially useful for large file copy
-when mounting with the directio mount option. Fix oops after
-returning from mount when experimental ExtendedSecurity enabled and
-SpnegoNegotiated returning invalid error. Fix case to retry better when
-peek returns from 1 to 3 bytes on socket which should have more data.
-Fixed path based calls (such as cifs lookup) to handle path names
-longer than 530 (now can handle PATH_MAX). Fix pass through authentication
-from Samba server to DC (Samba required dummy LM password).
-
-Version 1.27
-------------
-Turn off DNOTIFY (directory change notification support) by default
-(unless built with the experimental flag) to fix hang with KDE
-file browser. Fix DNOTIFY flag mappings. Fix hang (in wait_event
-waiting on an SMB response) in SendReceive when session dies but
-reconnects quickly from another task. Add module init parms for
-minimum number of large and small network buffers in the buffer pools,
-and for the maximum number of simultaneous requests.
-
-Version 1.26
-------------
-Add setfacl support to allow setting of ACLs remotely to Samba 3.10 and later
-and other POSIX CIFS compliant servers. Fix error mapping for getfacl
-to EOPNOTSUPP when server does not support posix acls on the wire. Fix
-improperly zeroed buffer in CIFS Unix extensions set times call.
-
-Version 1.25
-------------
-Fix internationalization problem in cifs readdir with filenames that map to
-longer UTF-8 strings than the string on the wire was in Unicode. Add workaround
-for readdir to netapp servers. Fix search rewind (seek into readdir to return
-non-consecutive entries). Do not do readdir when server negotiates
-buffer size to small to fit filename. Add support for reading POSIX ACLs from
-the server (add also acl and noacl mount options).
-
-Version 1.24
-------------
-Optionally allow using server side inode numbers, rather than client generated
-ones by specifying mount option "serverino" - this is required for some apps
-to work which double check hardlinked files and have persistent inode numbers.
-
-Version 1.23
-------------
-Multiple bigendian fixes. On little endian systems (for reconnect after
-network failure) fix tcp session reconnect code so we do not try first
-to reconnect on reverse of port 445. Treat reparse points (NTFS junctions)
-as directories rather than symlinks because we can do follow link on them.
-
-Version 1.22
-------------
-Add config option to enable XATTR (extended attribute) support, mapping
-xattr names in the "user." namespace space to SMB/CIFS EAs. Lots of
-minor fixes pointed out by the Stanford SWAT checker (mostly missing
-or out of order NULL pointer checks in little used error paths).
-
-Version 1.21
-------------
-Add new mount parm to control whether mode check (generic_permission) is done
-on the client. If Unix extensions are enabled and the uids on the client
-and server do not match, client permission checks are meaningless on
-server uids that do not exist on the client (this does not affect the
-normal ACL check which occurs on the server). Fix default uid
-on mknod to match create and mkdir. Add optional mount parm to allow
-override of the default uid behavior (in which the server sets the uid
-and gid of newly created files). Normally for network filesystem mounts
-user want the server to set the uid/gid on newly created files (rather than
-using uid of the client processes you would in a local filesystem).
-
-Version 1.20
-------------
-Make transaction counts more consistent. Merge /proc/fs/cifs/SimultaneousOps
-info into /proc/fs/cifs/DebugData. Fix oops in rare oops in readdir
-(in build_wildcard_path_from_dentry). Fix mknod to pass type field
-(block/char/fifo) properly. Remove spurious mount warning log entry when
-credentials passed as mount argument. Set major/minor device number in
-inode for block and char devices when unix extensions enabled.
-
-Version 1.19
-------------
-Fix /proc/fs/cifs/Stats and DebugData display to handle larger
-amounts of return data. Properly limit requests to MAX_REQ (50
-is the usual maximum active multiplex SMB/CIFS requests per server).
-Do not kill cifsd (and thus hurt the other SMB session) when more than one
-session to the same server (but with different userids) exists and one
-of the two user's smb sessions is being removed while leaving the other.
-Do not loop reconnecting in cifsd demultiplex thread when admin
-kills the thread without going through unmount.
-
-Version 1.18
-------------
-Do not rename hardlinked files (since that should be a noop). Flush
-cached write behind data when reopening a file after session abend,
-except when already in write. Grab per socket sem during reconnect
-to avoid oops in sendmsg if overlapping with reconnect. Do not
-reset cached inode file size on readdir for files open for write on
-client.
-
-
-Version 1.17
-------------
-Update number of blocks in file so du command is happier (in Linux a fake
-blocksize of 512 is required for calculating number of blocks in inode).
-Fix prepare write of partial pages to read in data from server if possible.
-Fix race on tcpStatus field between unmount and reconnection code, causing
-cifsd process sometimes to hang around forever. Improve out of memory
-checks in cifs_filldir
-
-Version 1.16
-------------
-Fix incorrect file size in file handle based setattr on big endian hardware.
-Fix oops in build_path_from_dentry when out of memory. Add checks for invalid
-and closing file structs in writepage/partialpagewrite. Add statistics
-for each mounted share (new menuconfig option). Fix endianness problem in
-volume information displayed in /proc/fs/cifs/DebugData (only affects
-affects big endian architectures). Prevent renames while constructing
-path names for open, mkdir and rmdir.
-
-Version 1.15
-------------
-Change to mempools for alloc smb request buffers and multiplex structs
-to better handle low memory problems (and potential deadlocks).
-
-Version 1.14
-------------
-Fix incomplete listings of large directories on Samba servers when Unix
-extensions enabled. Fix oops when smb_buffer can not be allocated. Fix
-rename deadlock when writing out dirty pages at same time.
-
-Version 1.13
-------------
-Fix open of files in which O_CREATE can cause the mode to change in
-some cases. Fix case in which retry of write overlaps file close.
-Fix PPC64 build error. Reduce excessive stack usage in smb password
-hashing. Fix overwrite of Linux user's view of file mode to Windows servers.
-
-Version 1.12
-------------
-Fixes for large file copy, signal handling, socket retry, buffer
-allocation and low memory situations.
-
-Version 1.11
-------------
-Better port 139 support to Windows servers (RFC1001/RFC1002 Session_Initialize)
-also now allowing support for specifying client netbiosname. NT4 support added.
-
-Version 1.10
-------------
-Fix reconnection (and certain failed mounts) to properly wake up the
-blocked users thread so it does not seem hung (in some cases was blocked
-until the cifs receive timeout expired). Fix spurious error logging
-to kernel log when application with open network files killed.
-
-Version 1.09
-------------
-Fix /proc/fs module unload warning message (that could be logged
-to the kernel log). Fix intermittent failure in connectathon
-test7 (hardlink count not immediately refreshed in case in which
-inode metadata can be incorrectly kept cached when time near zero)
-
-Version 1.08
-------------
-Allow file_mode and dir_mode (specified at mount time) to be enforced
-locally (the server already enforced its own ACLs too) for servers
-that do not report the correct mode (do not support the
-CIFS Unix Extensions).
-
-Version 1.07
-------------
-Fix some small memory leaks in some unmount error paths. Fix major leak
-of cache pages in readpages causing multiple read oriented stress
-testcases (including fsx, and even large file copy) to fail over time.
-
-Version 1.06
-------------
-Send NTCreateX with ATTR_POSIX if Linux/Unix extensions negotiated with server.
-This allows files that differ only in case and improves performance of file
-creation and file open to such servers. Fix semaphore conflict which causes
-slow delete of open file to Samba (which unfortunately can cause an oplock
-break to self while vfs_unlink held i_sem) which can hang for 20 seconds.
-
-Version 1.05
-------------
-fixes to cifs_readpages for fsx test case
-
-Version 1.04
-------------
-Fix caching data integrity bug when extending file size especially when no
-oplock on file. Fix spurious logging of valid already parsed mount options
-that are parsed outside of the cifs vfs such as nosuid.
-
-
-Version 1.03
-------------
-Connect to server when port number override not specified, and tcp port
-unitialized. Reset search to restart at correct file when kernel routine
-filldir returns error during large directory searches (readdir).
-
-Version 1.02
-------------
-Fix caching problem when files opened by multiple clients in which
-page cache could contain stale data, and write through did
-not occur often enough while file was still open when read ahead
-(read oplock) not allowed. Treat "sep=" when first mount option
-as an override of comma as the default separator between mount
-options.
-
-Version 1.01
-------------
-Allow passwords longer than 16 bytes. Allow null password string.
-
-Version 1.00
-------------
-Gracefully clean up failed mounts when attempting to mount to servers such as
-Windows 98 that terminate tcp sessions during protocol negotiation. Handle
-embedded commas in mount parsing of passwords.
-
-Version 0.99
-------------
-Invalidate local inode cached pages on oplock break and when last file
-instance is closed so that the client does not continue using stale local
-copy rather than later modified server copy of file. Do not reconnect
-when server drops the tcp session prematurely before negotiate
-protocol response. Fix oops in reopen_file when dentry freed. Allow
-the support for CIFS Unix Extensions to be disabled via proc interface.
-
-Version 0.98
-------------
-Fix hang in commit_write during reconnection of open files under heavy load.
-Fix unload_nls oops in a mount failure path. Serialize writes to same socket
-which also fixes any possible races when cifs signatures are enabled in SMBs
-being sent out of signature sequence number order.
-
-Version 0.97
-------------
-Fix byte range locking bug (endian problem) causing bad offset and
-length.
-
-Version 0.96
-------------
-Fix oops (in send_sig) caused by CIFS unmount code trying to
-wake up the demultiplex thread after it had exited. Do not log
-error on harmless oplock release of closed handle.
-
-Version 0.95
-------------
-Fix unsafe global variable usage and password hash failure on gcc 3.3.1
-Fix problem reconnecting secondary mounts to same server after session
-failure. Fix invalid dentry - race in mkdir when directory gets created
-by another client between the lookup and mkdir.
-
-Version 0.94
-------------
-Fix to list processing in reopen_files. Fix reconnection when server hung
-but tcpip session still alive. Set proper timeout on socket read.
-
-Version 0.93
-------------
-Add missing mount options including iocharset. SMP fixes in write and open.
-Fix errors in reconnecting after TCP session failure. Fix module unloading
-of default nls codepage
-
-Version 0.92
-------------
-Active smb transactions should never go negative (fix double FreeXid). Fix
-list processing in file routines. Check return code on kmalloc in open.
-Fix spinlock usage for SMP.
-
-Version 0.91
-------------
-Fix oops in reopen_files when invalid dentry. drop dentry on server rename
-and on revalidate errors. Fix cases where pid is now tgid. Fix return code
-on create hard link when server does not support them.
-
-Version 0.90
-------------
-Fix scheduling while atomic error in getting inode info on newly created file.
-Fix truncate of existing files opened with O_CREAT but not O_TRUNC set.
-
-Version 0.89
-------------
-Fix oops on write to dead tcp session. Remove error log write for case when file open
-O_CREAT but not O_EXCL
-
-Version 0.88
-------------
-Fix non-POSIX behavior on rename of open file and delete of open file by taking
-advantage of trans2 SetFileInfo rename facility if available on target server.
-Retry on ENOSPC and EAGAIN socket errors.
-
-Version 0.87
-------------
-Fix oops on big endian readdir. Set blksize to be even power of two (2**blkbits) to fix
-allocation size miscalculation. After oplock token lost do not read through
-cache.
-
-Version 0.86
-------------
-Fix oops on empty file readahead. Fix for file size handling for locally cached files.
-
-Version 0.85
-------------
-Fix oops in mkdir when server fails to return inode info. Fix oops in reopen_files
-during auto reconnection to server after server recovered from failure.
-
-Version 0.84
-------------
-Finish support for Linux 2.5 open/create changes, which removes the
-redundant NTCreate/QPathInfo/close that was sent during file create.
-Enable oplock by default. Enable packet signing by default (needed to
-access many recent Windows servers)
-
-Version 0.83
-------------
-Fix oops when mounting to long server names caused by inverted parms to kmalloc.
-Fix MultiuserMount (/proc/fs/cifs configuration setting) so that when enabled
-we will choose a cifs user session (smb uid) that better matches the local
-uid if a) the mount uid does not match the current uid and b) we have another
-session to the same server (ip address) for a different mount which
-matches the current local uid.
-
-Version 0.82
-------------
-Add support for mknod of block or character devices. Fix oplock
-code (distributed caching) to properly send response to oplock
-break from server.
-
-Version 0.81
-------------
-Finish up CIFS packet digital signing for the default
-NTLM security case. This should help Windows 2003
-network interoperability since it is common for
-packet signing to be required now. Fix statfs (stat -f)
-which recently started returning errors due to
-invalid value (-1 instead of 0) being set in the
-struct kstatfs f_ffiles field.
-
-Version 0.80
------------
-Fix oops on stopping oplock thread when removing cifs when
-built as module.
-
-Version 0.79
-------------
-Fix mount options for ro (readonly), uid, gid and file and directory mode.
-
-Version 0.78
-------------
-Fix errors displayed on failed mounts to be more understandable.
-Fixed various incorrect or misleading smb to posix error code mappings.
-
-Version 0.77
-------------
-Fix display of NTFS DFS junctions to display as symlinks.
-They are the network equivalent. Fix oops in
-cifs_partialpagewrite caused by missing spinlock protection
-of openfile linked list. Allow writebehind caching errors to
-be returned to the application at file close.
-
-Version 0.76
-------------
-Clean up options displayed in /proc/mounts by show_options to
-be more consistent with other filesystems.
-
-Version 0.75
-------------
-Fix delete of readonly file to Windows servers. Reflect
-presence or absence of read only dos attribute in mode
-bits for servers that do not support CIFS Unix extensions.
-Fix shortened results on readdir of large directories to
-servers supporting CIFS Unix extensions (caused by
-incorrect resume key).
-
-Version 0.74
-------------
-Fix truncate bug (set file size) that could cause hangs e.g. running fsx
-
-Version 0.73
-------------
-unload nls if mount fails.
-
-Version 0.72
-------------
-Add resume key support to search (readdir) code to workaround
-Windows bug. Add /proc/fs/cifs/LookupCacheEnable which
-allows disabling caching of attribute information for
-lookups.
-
-Version 0.71
-------------
-Add more oplock handling (distributed caching code). Remove
-dead code. Remove excessive stack space utilization from
-symlink routines.
-
-Version 0.70
-------------
-Fix oops in get dfs referral (triggered when null path sent in to
-mount). Add support for overriding rsize at mount time.
-
-Version 0.69
-------------
-Fix buffer overrun in readdir which caused intermittent kernel oopses.
-Fix writepage code to release kmap on write data. Allow "-ip=" new
-mount option to be passed in on parameter distinct from the first part
-(server name portion of) the UNC name. Allow override of the
-tcp port of the target server via new mount option "-port="
-
-Version 0.68
-------------
-Fix search handle leak on rewind. Fix setuid and gid so that they are
-reflected in the local inode immediately. Cleanup of whitespace
-to make 2.4 and 2.5 versions more consistent.
-
-
-Version 0.67
-------------
-Fix signal sending so that captive thread (cifsd) exits on umount
-(which was causing the warning in kmem_cache_free of the request buffers
-at rmmod time). This had broken as a sideeffect of the recent global
-kernel change to daemonize. Fix memory leak in readdir code which
-showed up in "ls -R" (and applications that did search rewinding).
-
-Version 0.66
-------------
-Reconnect tids and fids after session reconnection (still do not
-reconnect byte range locks though). Fix problem caching
-lookup information for directory inodes, improving performance,
-especially in deep directory trees. Fix various build warnings.
-
-Version 0.65
-------------
-Finish fixes to commit write for caching/readahead consistency. fsx
-now works to Samba servers. Fix oops caused when readahead
-was interrupted by a signal.
-
-Version 0.64
-------------
-Fix data corruption (in partial page after truncate) that caused fsx to
-fail to Windows servers. Cleaned up some extraneous error logging in
-common error paths. Add generic sendfile support.
-
-Version 0.63
-------------
-Fix memory leak in AllocMidQEntry.
-Finish reconnection logic, so connection with server can be dropped
-(or server rebooted) and the cifs client will reconnect.
-
-Version 0.62
-------------
-Fix temporary socket leak when bad userid or password specified
-(or other SMBSessSetup failure). Increase maximum buffer size to slightly
-over 16K to allow negotiation of up to Samba and Windows server default read
-sizes. Add support for readpages
-
-Version 0.61
-------------
-Fix oops when username not passed in on mount. Extensive fixes and improvements
-to error logging (strip redundant newlines, change debug macros to ensure newline
-passed in and to be more consistent). Fix writepage wrong file handle problem,
-a readonly file handle could be incorrectly used to attempt to write out
-file updates through the page cache to multiply open files. This could cause
-the iozone benchmark to fail on the fwrite test. Fix bug mounting two different
-shares to the same Windows server when using different usernames
-(doing this to Samba servers worked but Windows was rejecting it) - now it is
-possible to use different userids when connecting to the same server from a
-Linux client. Fix oops when treeDisconnect called during unmount on
-previously freed socket.
-
-Version 0.60
-------------
-Fix oops in readpages caused by not setting address space operations in inode in
-rare code path.
-
-Version 0.59
-------------
-Includes support for deleting of open files and renaming over existing files (per POSIX
-requirement). Add readlink support for Windows junction points (directory symlinks).
-
-Version 0.58
-------------
-Changed read and write to go through pagecache. Added additional address space operations.
-Memory mapped operations now working.
-
-Version 0.57
-------------
-Added writepage code for additional memory mapping support. Fixed leak in xids causing
-the simultaneous operations counter (/proc/fs/cifs/SimultaneousOps) to increase on
-every stat call. Additional formatting cleanup.
-
-Version 0.56
-------------
-Fix bigendian bug in order of time conversion. Merge 2.5 to 2.4 version. Formatting cleanup.
-
-Version 0.55
-------------
-Fixes from Zwane Mwaikambo for adding missing return code checking in a few places.
-Also included a modified version of his fix to protect global list manipulation of
-the smb session and tree connection and mid related global variables.
-
-Version 0.54
-------------
-Fix problem with captive thread hanging around at unmount time. Adjust to 2.5.42-pre
-changes to superblock layout. Remove wasteful allocation of smb buffers (now the send
-buffer is reused for responses). Add more oplock handling. Additional minor cleanup.
-
-Version 0.53
-------------
-More stylistic updates to better match kernel style. Add additional statistics
-for filesystem which can be viewed via /proc/fs/cifs. Add more pieces of NTLMv2
-and CIFS Packet Signing enablement.
-
-Version 0.52
-------------
-Replace call to sleep_on with safer wait_on_event.
-Make stylistic changes to better match kernel style recommendations.
-Remove most typedef usage (except for the PDUs themselves).
-
-Version 0.51
-------------
-Update mount so the -unc mount option is no longer required (the ip address can be specified
-in a UNC style device name. Implementation of readpage/writepage started.
-
-Version 0.50
-------------
-Fix intermittent problem with incorrect smb header checking on badly
-fragmented tcp responses
-
-Version 0.49
-------------
-Fixes to setting of allocation size and file size.
-
-Version 0.48
-------------
-Various 2.5.38 fixes. Now works on 2.5.38
-
-Version 0.47
-------------
-Prepare for 2.5 kernel merge. Remove ifdefs.
-
-Version 0.46
-------------
-Socket buffer management fixes. Fix dual free.
-
-Version 0.45
-------------
-Various big endian fixes for hardlinks and symlinks and also for dfs.
-
-Version 0.44
-------------
-Various big endian fixes for servers with Unix extensions such as Samba
-
-Version 0.43
-------------
-Various FindNext fixes for incorrect filenames on large directory searches on big endian
-clients. basic posix file i/o tests now work on big endian machines, not just le
-
-Version 0.42
-------------
-SessionSetup and NegotiateProtocol now work from Big Endian machines.
-Various Big Endian fixes found during testing on the Linux on 390. Various fixes for compatibility with older
-versions of 2.4 kernel (now builds and works again on kernels at least as early as 2.4.7).
-
-Version 0.41
-------------
-Various minor fixes for Connectathon Posix "basic" file i/o test suite. Directory caching fixed so hardlinked
-files now return the correct number of links on fstat as they are repeatedly linked and unlinked.
-
-Version 0.40
-------------
-Implemented "Raw" (i.e. not encapsulated in SPNEGO) NTLMSSP (i.e. the Security Provider Interface used to negotiate
-session advanced session authentication). Raw NTLMSSP is preferred by Windows 2000 Professional and Windows XP.
-Began implementing support for SPNEGO encapsulation of NTLMSSP based session authentication blobs
-(which is the mechanism preferred by Windows 2000 server in the absence of Kerberos).
-
-Version 0.38
-------------
-Introduced optional mount helper utility mount.cifs and made coreq changes to cifs vfs to enable
-it. Fixed a few bugs in the DFS code (e.g. bcc two bytes too short and incorrect uid in PDU).
-
-Version 0.37
-------------
-Rewrote much of connection and mount/unmount logic to handle bugs with
-multiple uses to same share, multiple users to same server etc.
-
-Version 0.36
-------------
-Fixed major problem with dentry corruption (missing call to dput)
-
-Version 0.35
-------------
-Rewrite of readdir code to fix bug. Various fixes for bigendian machines.
-Begin adding oplock support. Multiusermount and oplockEnabled flags added to /proc/fs/cifs
-although corresponding function not fully implemented in the vfs yet
-
-Version 0.34
-------------
-Fixed dentry caching bug, misc. cleanup
-
-Version 0.33
-------------
-Fixed 2.5 support to handle build and configure changes as well as misc. 2.5 changes. Now can build
-on current 2.5 beta version (2.5.24) of the Linux kernel as well as on 2.4 Linux kernels.
-Support for STATUS codes (newer 32 bit NT error codes) added. DFS support begun to be added.
-
-Version 0.32
-------------
-Unix extensions (symlink, readlink, hardlink, chmod and some chgrp and chown) implemented
-and tested against Samba 2.2.5
-
-
-Version 0.31
-------------
-1) Fixed lockrange to be correct (it was one byte too short)
-
-2) Fixed GETLK (i.e. the fcntl call to test a range of bytes in a file to see if locked) to correctly
-show range as locked when there is a conflict with an existing lock.
-
-3) default file perms are now 2767 (indicating support for mandatory locks) instead of 777 for directories
-in most cases. Eventually will offer optional ability to query server for the correct perms.
-
-3) Fixed eventual trap when mounting twice to different shares on the same server when the first succeeded
-but the second one was invalid and failed (the second one was incorrectly disconnecting the tcp and smb
-session)
-
-4) Fixed error logging of valid mount options
-
-5) Removed logging of password field.
-
-6) Moved negotiate, treeDisconnect and uloggoffX (only tConx and SessSetup remain in connect.c) to cifssmb.c
-and cleaned them up and made them more consistent with other cifs functions.
-
-7) Server support for Unix extensions is now fully detected and FindFirst is implemented both ways
-(with or without Unix extensions) but FindNext and QueryPathInfo with the Unix extensions are not completed,
-nor is the symlink support using the Unix extensions
-
-8) Started adding the readlink and follow_link code
-
-Version 0.3
------------
-Initial drop
-
+See https://wiki.samba.org/index.php/LinuxCIFSKernel for summary
+information (that may be easier to read than parsing the output of
+"git log fs/cifs") about fixes/improvements to CIFS/SMB2/SMB3 support (changes
+to cifs.ko module) by kernel version (and cifs internal module version).
diff --git a/Documentation/filesystems/cifs/README b/Documentation/filesystems/cifs/README
index 99ce3d25003d..4a804619cff2 100644
--- a/Documentation/filesystems/cifs/README
+++ b/Documentation/filesystems/cifs/README
@@ -603,8 +603,7 @@ DebugData Displays information about active CIFS sessions and
shares, features enabled as well as the cifs.ko
version.
Stats Lists summary resource usage information as well as per
- share statistics, if CONFIG_CIFS_STATS in enabled
- in the kernel configuration.
+ share statistics.
Configuration pseudo-files:
SecurityFlags Flags which control security negotiation and
@@ -687,23 +686,22 @@ cifsFYI functions as a bit mask. Setting it to 1 enables additional kernel
logging of various informational messages. 2 enables logging of non-zero
SMB return codes while 4 enables logging of requests that take longer
than one second to complete (except for byte range lock requests).
-Setting it to 4 requires defining CONFIG_CIFS_STATS2 manually in the
-source code (typically by setting it in the beginning of cifsglob.h),
-and setting it to seven enables all three. Finally, tracing
+Setting it to 4 requires CONFIG_CIFS_STATS2 to be set in kernel configuration
+(.config). Setting it to seven enables all three. Finally, tracing
the start of smb requests and responses can be enabled via:
echo 1 > /proc/fs/cifs/traceSMB
-Per share (per client mount) statistics are available in /proc/fs/cifs/Stats
-if the kernel was configured with cifs statistics enabled. The statistics
-represent the number of successful (ie non-zero return code from the server)
-SMB responses to some of the more common commands (open, delete, mkdir etc.).
+Per share (per client mount) statistics are available in /proc/fs/cifs/Stats.
+Additional information is available if CONFIG_CIFS_STATS2 is enabled in the
+kernel configuration (.config). The statistics returned include counters which
+represent the number of attempted and failed (ie non-zero return code from the
+server) SMB3 (or cifs) requests grouped by request type (read, write, close etc.).
Also recorded is the total bytes read and bytes written to the server for
that share. Note that due to client caching effects this can be less than the
number of bytes read and written by the application running on the client.
-The statistics for the number of total SMBs and oplock breaks are different in
-that they represent all for that share, not just those for which the server
-returned success.
+Statistics can be reset to zero by "echo 0 > /proc/fs/cifs/Stats" which may be
+useful if comparing performance of two different scenarios.
Also note that "cat /proc/fs/cifs/DebugData" will display information about
the active sessions and the shares that are mounted.
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4/ext4.rst
index 7f628b9f7c4b..9d4368d591fa 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4/ext4.rst
@@ -1,6 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
-Ext4 Filesystem
-===============
+========================
+General Information
+========================
Ext4 is an advanced level of the ext3 filesystem which incorporates
scalability and reliability enhancements for supporting large filesystems
@@ -11,37 +13,30 @@ Mailing list: linux-ext4@vger.kernel.org
Web site: http://ext4.wiki.kernel.org
-1. Quick usage instructions:
-===========================
+Quick usage instructions
+========================
Note: More extensive information for getting started with ext4 can be
- found at the ext4 wiki site at the URL:
- http://ext4.wiki.kernel.org/index.php/Ext4_Howto
+found at the ext4 wiki site at the URL:
+http://ext4.wiki.kernel.org/index.php/Ext4_Howto
- - Compile and install the latest version of e2fsprogs (as of this
- writing version 1.41.3) from:
+ - The latest version of e2fsprogs can be found at:
+
+ https://www.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs/
- http://sourceforge.net/project/showfiles.php?group_id=2406
-
or
- https://www.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs/
+ http://sourceforge.net/project/showfiles.php?group_id=2406
or grab the latest git repository from:
- git://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git
-
- - Note that it is highly important to install the mke2fs.conf file
- that comes with the e2fsprogs 1.41.x sources in /etc/mke2fs.conf. If
- you have edited the /etc/mke2fs.conf file installed on your system,
- you will need to merge your changes with the version from e2fsprogs
- 1.41.x.
+ https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git
- Create a new filesystem using the ext4 filesystem type:
- # mke2fs -t ext4 /dev/hda1
+ # mke2fs -t ext4 /dev/hda1
- Or to configure an existing ext3 filesystem to support extents:
+ Or to configure an existing ext3 filesystem to support extents:
# tune2fs -O extents /dev/hda1
@@ -50,10 +45,6 @@ Note: More extensive information for getting started with ext4 can be
# tune2fs -I 256 /dev/hda1
- (Note: we currently do not have tools to convert an ext4
- filesystem back to ext3; so please do not do try this on production
- filesystems.)
-
- Mounting:
# mount -t ext4 /dev/hda1 /wherever
@@ -75,10 +66,11 @@ Note: More extensive information for getting started with ext4 can be
the filesystem with a large journal can also be helpful for
metadata-intensive workloads.
-2. Features
-===========
+Features
+========
-2.1 Currently available
+Currently Available
+-------------------
* ability to use filesystems > 16TB (e2fsprogs support not available yet)
* extent format reduces metadata overhead (RAM, IO for access, transactions)
@@ -103,31 +95,15 @@ Note: More extensive information for getting started with ext4 can be
[1] Filesystems with a block size of 1k may see a limit imposed by the
directory hash tree having a maximum depth of two.
-2.2 Candidate features for future inclusion
-
-* online defrag (patches available but not well tested)
-* reduced mke2fs time via lazy itable initialization in conjunction with
- the uninit_bg feature (capability to do this is available in e2fsprogs
- but a kernel thread to do lazy zeroing of unused inode table blocks
- after filesystem is first mounted is required for safety)
-
-There are several others under discussion, whether they all make it in is
-partly a function of how much time everyone has to work on them. Features like
-metadata checksumming have been discussed and planned for a bit but no patches
-exist yet so I'm not sure they're in the near-term roadmap.
-
-The big performance win will come with mballoc, delalloc and flex_bg
-grouping of bitmaps and inode tables. Some test results available here:
-
- - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-write-2.6.27-rc1.html
- - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-readwrite-2.6.27-rc1.html
-
-3. Options
-==========
+Options
+=======
When mounting an ext4 filesystem, the following option are accepted:
(*) == default
+======================= =======================================================
+Mount Option Description
+======================= =======================================================
ro Mount filesystem read only. Note that ext4 will
replay the journal (and thus write to the
partition) even when mounted "read only". The
@@ -387,33 +363,38 @@ i_version Enable 64-bit inode version support. This option is
dax Use direct access (no page cache). See
Documentation/filesystems/dax.txt. Note that
this option is incompatible with data=journal.
+======================= =======================================================
Data Mode
=========
There are 3 different data modes:
* writeback mode
-In data=writeback mode, ext4 does not journal data at all. This mode provides
-a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
-mode - metadata journaling. A crash+recovery can cause incorrect data to
-appear in files which were written shortly before the crash. This mode will
-typically provide the best ext4 performance.
+
+ In data=writeback mode, ext4 does not journal data at all. This mode provides
+ a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
+ mode - metadata journaling. A crash+recovery can cause incorrect data to
+ appear in files which were written shortly before the crash. This mode will
+ typically provide the best ext4 performance.
* ordered mode
-In data=ordered mode, ext4 only officially journals metadata, but it logically
-groups metadata information related to data changes with the data blocks into a
-single unit called a transaction. When it's time to write the new metadata
-out to disk, the associated data blocks are written first. In general,
-this mode performs slightly slower than writeback but significantly faster than journal mode.
+
+ In data=ordered mode, ext4 only officially journals metadata, but it logically
+ groups metadata information related to data changes with the data blocks into
+ a single unit called a transaction. When it's time to write the new metadata
+ out to disk, the associated data blocks are written first. In general, this
+ mode performs slightly slower than writeback but significantly faster than
+ journal mode.
* journal mode
-data=journal mode provides full data and metadata journaling. All new data is
-written to the journal first, and then to its final location.
-In the event of a crash, the journal can be replayed, bringing both data and
-metadata into a consistent state. This mode is the slowest except when data
-needs to be read from and written to disk at the same time where it
-outperforms all others modes. Enabling this mode will disable delayed
-allocation and O_DIRECT support.
+
+ data=journal mode provides full data and metadata journaling. All new data is
+ written to the journal first, and then to its final location. In the event of
+ a crash, the journal can be replayed, bringing both data and metadata into a
+ consistent state. This mode is the slowest except when data needs to be read
+ from and written to disk at the same time where it outperforms all others
+ modes. Enabling this mode will disable delayed allocation and O_DIRECT
+ support.
/proc entries
=============
@@ -425,10 +406,12 @@ Information about mounted ext4 file systems can be found in
in table below.
Files in /proc/fs/ext4/<devname>
-..............................................................................
+
+================ =======
File Content
+================ =======
mb_groups details of multiblock allocator buddy cache of free blocks
-..............................................................................
+================ =======
/sys entries
============
@@ -439,28 +422,30 @@ Information about mounted ext4 file systems can be found in
/sys/fs/ext4/dm-0). The files in each per-device directory are shown
in table below.
-Files in /sys/fs/ext4/<devname>
+Files in /sys/fs/ext4/<devname>:
+
(see also Documentation/ABI/testing/sysfs-fs-ext4)
-..............................................................................
- File Content
+============================= =================================================
+File Content
+============================= =================================================
delayed_allocation_blocks This file is read-only and shows the number of
blocks that are dirty in the page cache, but
which do not have their location in the
filesystem allocated yet.
- inode_goal Tuning parameter which (if non-zero) controls
+inode_goal Tuning parameter which (if non-zero) controls
the goal inode used by the inode allocator in
preference to all other allocation heuristics.
This is intended for debugging use only, and
should be 0 on production systems.
- inode_readahead_blks Tuning parameter which controls the maximum
+inode_readahead_blks Tuning parameter which controls the maximum
number of inode table blocks that ext4's inode
table readahead algorithm will pre-read into
the buffer cache
- lifetime_write_kbytes This file is read-only and shows the number of
+lifetime_write_kbytes This file is read-only and shows the number of
kilobytes of data that have been written to this
filesystem since it was created.
@@ -508,7 +493,7 @@ Files in /sys/fs/ext4/<devname>
in the file system. If there is not enough space
for the reserved space when mounting the file
mount will _not_ fail.
-..............................................................................
+============================= =================================================
Ioctls
======
@@ -518,8 +503,10 @@ through the system call interfaces. The list of all Ext4 specific ioctls are
shown in the table below.
Table of Ext4 specific ioctls
-..............................................................................
- Ioctl Description
+
+============================= =================================================
+Ioctl Description
+============================= =================================================
EXT4_IOC_GETFLAGS Get additional attributes associated with inode.
The ioctl argument is an integer bitfield, with
bit values described in ext4.h. This ioctl is an
@@ -610,8 +597,7 @@ Table of Ext4 specific ioctls
normal user by accident.
The data blocks of the previous boot loader
will be associated with the given inode.
-
-..............................................................................
+============================= =================================================
References
==========
diff --git a/Documentation/filesystems/ext4/index.rst b/Documentation/filesystems/ext4/index.rst
new file mode 100644
index 000000000000..71121605558c
--- /dev/null
+++ b/Documentation/filesystems/ext4/index.rst
@@ -0,0 +1,17 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+ext4 Filesystem
+===============
+
+General usage and on-disk artifacts writen by ext4. More documentation may
+be ported from the wiki as time permits. This should be considered the
+canonical source of information as the details here have been reviewed by
+the ext4 community.
+
+.. toctree::
+ :maxdepth: 5
+ :numbered:
+
+ ext4
+ ondisk/index
diff --git a/Documentation/filesystems/ext4/ondisk/about.rst b/Documentation/filesystems/ext4/ondisk/about.rst
new file mode 100644
index 000000000000..0aadba052264
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/about.rst
@@ -0,0 +1,44 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+About this Book
+===============
+
+This document attempts to describe the on-disk format for ext4
+filesystems. The same general ideas should apply to ext2/3 filesystems
+as well, though they do not support all the features that ext4 supports,
+and the fields will be shorter.
+
+**NOTE**: This is a work in progress, based on notes that the author
+(djwong) made while picking apart a filesystem by hand. The data
+structure definitions should be current as of Linux 4.18 and
+e2fsprogs-1.44. All comments and corrections are welcome, since there is
+undoubtedly plenty of lore that might not be reflected in freshly
+created demonstration filesystems.
+
+License
+-------
+This book is licensed under the terms of the GNU Public License, v2.
+
+Terminology
+-----------
+
+ext4 divides a storage device into an array of logical blocks both to
+reduce bookkeeping overhead and to increase throughput by forcing larger
+transfer sizes. Generally, the block size will be 4KiB (the same size as
+pages on x86 and the block layer's default block size), though the
+actual size is calculated as 2 ^ (10 + ``sb.s_log_block_size``) bytes.
+Throughout this document, disk locations are given in terms of these
+logical blocks, not raw LBAs, and not 1024-byte blocks. For the sake of
+convenience, the logical block size will be referred to as
+``$block_size`` throughout the rest of the document.
+
+When referenced in ``preformatted text`` blocks, ``sb`` refers to fields
+in the super block, and ``inode`` refers to fields in an inode table
+entry.
+
+Other References
+----------------
+
+Also see http://www.nongnu.org/ext2-doc/ for quite a collection of
+information about ext2/3. Here's another old reference:
+http://wiki.osdev.org/Ext2
diff --git a/Documentation/filesystems/ext4/ondisk/allocators.rst b/Documentation/filesystems/ext4/ondisk/allocators.rst
new file mode 100644
index 000000000000..7aa85152ace3
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/allocators.rst
@@ -0,0 +1,56 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Block and Inode Allocation Policy
+---------------------------------
+
+ext4 recognizes (better than ext3, anyway) that data locality is
+generally a desirably quality of a filesystem. On a spinning disk,
+keeping related blocks near each other reduces the amount of movement
+that the head actuator and disk must perform to access a data block,
+thus speeding up disk IO. On an SSD there of course are no moving parts,
+but locality can increase the size of each transfer request while
+reducing the total number of requests. This locality may also have the
+effect of concentrating writes on a single erase block, which can speed
+up file rewrites significantly. Therefore, it is useful to reduce
+fragmentation whenever possible.
+
+The first tool that ext4 uses to combat fragmentation is the multi-block
+allocator. When a file is first created, the block allocator
+speculatively allocates 8KiB of disk space to the file on the assumption
+that the space will get written soon. When the file is closed, the
+unused speculative allocations are of course freed, but if the
+speculation is correct (typically the case for full writes of small
+files) then the file data gets written out in a single multi-block
+extent. A second related trick that ext4 uses is delayed allocation.
+Under this scheme, when a file needs more blocks to absorb file writes,
+the filesystem defers deciding the exact placement on the disk until all
+the dirty buffers are being written out to disk. By not committing to a
+particular placement until it's absolutely necessary (the commit timeout
+is hit, or sync() is called, or the kernel runs out of memory), the hope
+is that the filesystem can make better location decisions.
+
+The third trick that ext4 (and ext3) uses is that it tries to keep a
+file's data blocks in the same block group as its inode. This cuts down
+on the seek penalty when the filesystem first has to read a file's inode
+to learn where the file's data blocks live and then seek over to the
+file's data blocks to begin I/O operations.
+
+The fourth trick is that all the inodes in a directory are placed in the
+same block group as the directory, when feasible. The working assumption
+here is that all the files in a directory might be related, therefore it
+is useful to try to keep them all together.
+
+The fifth trick is that the disk volume is cut up into 128MB block
+groups; these mini-containers are used as outlined above to try to
+maintain data locality. However, there is a deliberate quirk -- when a
+directory is created in the root directory, the inode allocator scans
+the block groups and puts that directory into the least heavily loaded
+block group that it can find. This encourages directories to spread out
+over a disk; as the top-level directory/file blobs fill up one block
+group, the allocators simply move on to the next block group. Allegedly
+this scheme evens out the loading on the block groups, though the author
+suspects that the directories which are so unlucky as to land towards
+the end of a spinning drive get a raw deal performance-wise.
+
+Of course if all of these mechanisms fail, one can always use e4defrag
+to defragment files.
diff --git a/Documentation/filesystems/ext4/ondisk/attributes.rst b/Documentation/filesystems/ext4/ondisk/attributes.rst
new file mode 100644
index 000000000000..0b01b67b81fe
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/attributes.rst
@@ -0,0 +1,191 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Extended Attributes
+-------------------
+
+Extended attributes (xattrs) are typically stored in a separate data
+block on the disk and referenced from inodes via ``inode.i_file_acl*``.
+The first use of extended attributes seems to have been for storing file
+ACLs and other security data (selinux). With the ``user_xattr`` mount
+option it is possible for users to store extended attributes so long as
+all attribute names begin with “userâ€; this restriction seems to have
+disappeared as of Linux 3.0.
+
+There are two places where extended attributes can be found. The first
+place is between the end of each inode entry and the beginning of the
+next inode entry. For example, if inode.i\_extra\_isize = 28 and
+sb.inode\_size = 256, then there are 256 - (128 + 28) = 100 bytes
+available for in-inode extended attribute storage. The second place
+where extended attributes can be found is in the block pointed to by
+``inode.i_file_acl``. As of Linux 3.11, it is not possible for this
+block to contain a pointer to a second extended attribute block (or even
+the remaining blocks of a cluster). In theory it is possible for each
+attribute's value to be stored in a separate data block, though as of
+Linux 3.11 the code does not permit this.
+
+Keys are generally assumed to be ASCIIZ strings, whereas values can be
+strings or binary data.
+
+Extended attributes, when stored after the inode, have a header
+``ext4_xattr_ibody_header`` that is 4 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - h\_magic
+ - Magic number for identification, 0xEA020000. This value is set by the
+ Linux driver, though e2fsprogs doesn't seem to check it(?)
+
+The beginning of an extended attribute block is in
+``struct ext4_xattr_header``, which is 32 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - h\_magic
+ - Magic number for identification, 0xEA020000.
+ * - 0x4
+ - \_\_le32
+ - h\_refcount
+ - Reference count.
+ * - 0x8
+ - \_\_le32
+ - h\_blocks
+ - Number of disk blocks used.
+ * - 0xC
+ - \_\_le32
+ - h\_hash
+ - Hash value of all attributes.
+ * - 0x10
+ - \_\_le32
+ - h\_checksum
+ - Checksum of the extended attribute block.
+ * - 0x14
+ - \_\_u32
+ - h\_reserved[2]
+ - Zero.
+
+The checksum is calculated against the FS UUID, the 64-bit block number
+of the extended attribute block, and the entire block (header +
+entries).
+
+Following the ``struct ext4_xattr_header`` or
+``struct ext4_xattr_ibody_header`` is an array of
+``struct ext4_xattr_entry``; each of these entries is at least 16 bytes
+long. When stored in an external block, the ``struct ext4_xattr_entry``
+entries must be stored in sorted order. The sort order is
+``e_name_index``, then ``e_name_len``, and finally ``e_name``.
+Attributes stored inside an inode do not need be stored in sorted order.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_u8
+ - e\_name\_len
+ - Length of name.
+ * - 0x1
+ - \_\_u8
+ - e\_name\_index
+ - Attribute name index. There is a discussion of this below.
+ * - 0x2
+ - \_\_le16
+ - e\_value\_offs
+ - Location of this attribute's value on the disk block where it is stored.
+ Multiple attributes can share the same value. For an inode attribute
+ this value is relative to the start of the first entry; for a block this
+ value is relative to the start of the block (i.e. the header).
+ * - 0x4
+ - \_\_le32
+ - e\_value\_inum
+ - The inode where the value is stored. Zero indicates the value is in the
+ same block as this entry. This field is only used if the
+ INCOMPAT\_EA\_INODE feature is enabled.
+ * - 0x8
+ - \_\_le32
+ - e\_value\_size
+ - Length of attribute value.
+ * - 0xC
+ - \_\_le32
+ - e\_hash
+ - Hash value of attribute name and attribute value. The kernel doesn't
+ update the hash for in-inode attributes, so for that case this value
+ must be zero, because e2fsck validates any non-zero hash regardless of
+ where the xattr lives.
+ * - 0x10
+ - char
+ - e\_name[e\_name\_len]
+ - Attribute name. Does not include trailing NULL.
+
+Attribute values can follow the end of the entry table. There appears to
+be a requirement that they be aligned to 4-byte boundaries. The values
+are stored starting at the end of the block and grow towards the
+xattr\_header/xattr\_entry table. When the two collide, the overflow is
+put into a separate disk block. If the disk block fills up, the
+filesystem returns -ENOSPC.
+
+The first four fields of the ``ext4_xattr_entry`` are set to zero to
+mark the end of the key list.
+
+Attribute Name Indices
+~~~~~~~~~~~~~~~~~~~~~~
+
+Logically speaking, extended attributes are a series of key=value pairs.
+The keys are assumed to be NULL-terminated strings. To reduce the amount
+of on-disk space that the keys consume, the beginning of the key string
+is matched against the attribute name index. If a match is found, the
+attribute name index field is set, and matching string is removed from
+the key name. Here is a map of name index values to key prefixes:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Name Index
+ - Key Prefix
+ * - 0
+ - (no prefix)
+ * - 1
+ - “user.â€
+ * - 2
+ - “system.posix\_acl\_accessâ€
+ * - 3
+ - “system.posix\_acl\_defaultâ€
+ * - 4
+ - “trusted.â€
+ * - 6
+ - “security.â€
+ * - 7
+ - “system.†(inline\_data only?)
+ * - 8
+ - “system.richacl†(SuSE kernels only?)
+
+For example, if the attribute key is “user.fubarâ€, the attribute name
+index is set to 1 and the “fubar†name is recorded on disk.
+
+POSIX ACLs
+~~~~~~~~~~
+
+POSIX ACLs are stored in a reduced version of the Linux kernel (and
+libacl's) internal ACL format. The key difference is that the version
+number is different (1) and the ``e_id`` field is only stored for named
+user and group ACLs.
diff --git a/Documentation/filesystems/ext4/ondisk/bigalloc.rst b/Documentation/filesystems/ext4/ondisk/bigalloc.rst
new file mode 100644
index 000000000000..c6d88557553c
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/bigalloc.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Bigalloc
+--------
+
+At the moment, the default size of a block is 4KiB, which is a commonly
+supported page size on most MMU-capable hardware. This is fortunate, as
+ext4 code is not prepared to handle the case where the block size
+exceeds the page size. However, for a filesystem of mostly huge files,
+it is desirable to be able to allocate disk blocks in units of multiple
+blocks to reduce both fragmentation and metadata overhead. The
+`bigalloc <Bigalloc>`__ feature provides exactly this ability. The
+administrator can set a block cluster size at mkfs time (which is stored
+in the s\_log\_cluster\_size field in the superblock); from then on, the
+block bitmaps track clusters, not individual blocks. This means that
+block groups can be several gigabytes in size (instead of just 128MiB);
+however, the minimum allocation unit becomes a cluster, not a block,
+even for directories. TaoBao had a patchset to extend the “use units of
+clusters instead of blocks†to the extent tree, though it is not clear
+where those patches went-- they eventually morphed into “extent tree v2â€
+but that code has not landed as of May 2015.
+
diff --git a/Documentation/filesystems/ext4/ondisk/bitmaps.rst b/Documentation/filesystems/ext4/ondisk/bitmaps.rst
new file mode 100644
index 000000000000..c7546dbc197a
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/bitmaps.rst
@@ -0,0 +1,28 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Block and inode Bitmaps
+-----------------------
+
+The data block bitmap tracks the usage of data blocks within the block
+group.
+
+The inode bitmap records which entries in the inode table are in use.
+
+As with most bitmaps, one bit represents the usage status of one data
+block or inode table entry. This implies a block group size of 8 \*
+number\_of\_bytes\_in\_a\_logical\_block.
+
+NOTE: If ``BLOCK_UNINIT`` is set for a given block group, various parts
+of the kernel and e2fsprogs code pretends that the block bitmap contains
+zeros (i.e. all blocks in the group are free). However, it is not
+necessarily the case that no blocks are in use -- if ``meta_bg`` is set,
+the bitmaps and group descriptor live inside the group. Unfortunately,
+ext2fs\_test\_block\_bitmap2() will return '0' for those locations,
+which produces confusing debugfs output.
+
+Inode Table
+-----------
+Inode tables are statically allocated at mkfs time. Each block group
+descriptor points to the start of the table, and the superblock records
+the number of inodes per group. See the section on inodes for more
+information.
diff --git a/Documentation/filesystems/ext4/ondisk/blockgroup.rst b/Documentation/filesystems/ext4/ondisk/blockgroup.rst
new file mode 100644
index 000000000000..baf888e4c06a
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/blockgroup.rst
@@ -0,0 +1,135 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Layout
+------
+
+The layout of a standard block group is approximately as follows (each
+of these fields is discussed in a separate section below):
+
+.. list-table::
+ :widths: 1 1 1 1 1 1 1 1
+ :header-rows: 1
+
+ * - Group 0 Padding
+ - ext4 Super Block
+ - Group Descriptors
+ - Reserved GDT Blocks
+ - Data Block Bitmap
+ - inode Bitmap
+ - inode Table
+ - Data Blocks
+ * - 1024 bytes
+ - 1 block
+ - many blocks
+ - many blocks
+ - 1 block
+ - 1 block
+ - many blocks
+ - many more blocks
+
+For the special case of block group 0, the first 1024 bytes are unused,
+to allow for the installation of x86 boot sectors and other oddities.
+The superblock will start at offset 1024 bytes, whichever block that
+happens to be (usually 0). However, if for some reason the block size =
+1024, then block 0 is marked in use and the superblock goes in block 1.
+For all other block groups, there is no padding.
+
+The ext4 driver primarily works with the superblock and the group
+descriptors that are found in block group 0. Redundant copies of the
+superblock and group descriptors are written to some of the block groups
+across the disk in case the beginning of the disk gets trashed, though
+not all block groups necessarily host a redundant copy (see following
+paragraph for more details). If the group does not have a redundant
+copy, the block group begins with the data block bitmap. Note also that
+when the filesystem is freshly formatted, mkfs will allocate “reserve
+GDT block†space after the block group descriptors and before the start
+of the block bitmaps to allow for future expansion of the filesystem. By
+default, a filesystem is allowed to increase in size by a factor of
+1024x over the original filesystem size.
+
+The location of the inode table is given by ``grp.bg_inode_table_*``. It
+is continuous range of blocks large enough to contain
+``sb.s_inodes_per_group * sb.s_inode_size`` bytes.
+
+As for the ordering of items in a block group, it is generally
+established that the super block and the group descriptor table, if
+present, will be at the beginning of the block group. The bitmaps and
+the inode table can be anywhere, and it is quite possible for the
+bitmaps to come after the inode table, or for both to be in different
+groups (flex\_bg). Leftover space is used for file data blocks, indirect
+block maps, extent tree blocks, and extended attributes.
+
+Flexible Block Groups
+---------------------
+
+Starting in ext4, there is a new feature called flexible block groups
+(flex\_bg). In a flex\_bg, several block groups are tied together as one
+logical block group; the bitmap spaces and the inode table space in the
+first block group of the flex\_bg are expanded to include the bitmaps
+and inode tables of all other block groups in the flex\_bg. For example,
+if the flex\_bg size is 4, then group 0 will contain (in order) the
+superblock, group descriptors, data block bitmaps for groups 0-3, inode
+bitmaps for groups 0-3, inode tables for groups 0-3, and the remaining
+space in group 0 is for file data. The effect of this is to group the
+block metadata close together for faster loading, and to enable large
+files to be continuous on disk. Backup copies of the superblock and
+group descriptors are always at the beginning of block groups, even if
+flex\_bg is enabled. The number of block groups that make up a flex\_bg
+is given by 2 ^ ``sb.s_log_groups_per_flex``.
+
+Meta Block Groups
+-----------------
+
+Without the option META\_BG, for safety concerns, all block group
+descriptors copies are kept in the first block group. Given the default
+128MiB(2^27 bytes) block group size and 64-byte group descriptors, ext4
+can have at most 2^27/64 = 2^21 block groups. This limits the entire
+filesystem size to 2^21 ∗ 2^27 = 2^48bytes or 256TiB.
+
+The solution to this problem is to use the metablock group feature
+(META\_BG), which is already in ext3 for all 2.6 releases. With the
+META\_BG feature, ext4 filesystems are partitioned into many metablock
+groups. Each metablock group is a cluster of block groups whose group
+descriptor structures can be stored in a single disk block. For ext4
+filesystems with 4 KB block size, a single metablock group partition
+includes 64 block groups, or 8 GiB of disk space. The metablock group
+feature moves the location of the group descriptors from the congested
+first block group of the whole filesystem into the first group of each
+metablock group itself. The backups are in the second and last group of
+each metablock group. This increases the 2^21 maximum block groups limit
+to the hard limit 2^32, allowing support for a 512PiB filesystem.
+
+The change in the filesystem format replaces the current scheme where
+the superblock is followed by a variable-length set of block group
+descriptors. Instead, the superblock and a single block group descriptor
+block is placed at the beginning of the first, second, and last block
+groups in a meta-block group. A meta-block group is a collection of
+block groups which can be described by a single block group descriptor
+block. Since the size of the block group descriptor structure is 32
+bytes, a meta-block group contains 32 block groups for filesystems with
+a 1KB block size, and 128 block groups for filesystems with a 4KB
+blocksize. Filesystems can either be created using this new block group
+descriptor layout, or existing filesystems can be resized on-line, and
+the field s\_first\_meta\_bg in the superblock will indicate the first
+block group using this new layout.
+
+Please see an important note about ``BLOCK_UNINIT`` in the section about
+block and inode bitmaps.
+
+Lazy Block Group Initialization
+-------------------------------
+
+A new feature for ext4 are three block group descriptor flags that
+enable mkfs to skip initializing other parts of the block group
+metadata. Specifically, the INODE\_UNINIT and BLOCK\_UNINIT flags mean
+that the inode and block bitmaps for that group can be calculated and
+therefore the on-disk bitmap blocks are not initialized. This is
+generally the case for an empty block group or a block group containing
+only fixed-location block group metadata. The INODE\_ZEROED flag means
+that the inode table has been initialized; mkfs will unset this flag and
+rely on the kernel to initialize the inode tables in the background.
+
+By not writing zeroes to the bitmaps and inode table, mkfs time is
+reduced considerably. Note the feature flag is RO\_COMPAT\_GDT\_CSUM,
+but the dumpe2fs output prints this as “uninit\_bgâ€. They are the same
+thing.
diff --git a/Documentation/filesystems/ext4/ondisk/blockmap.rst b/Documentation/filesystems/ext4/ondisk/blockmap.rst
new file mode 100644
index 000000000000..30e25750d88a
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/blockmap.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0
+
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| i.i\_block Offset | Where It Points |
++=====================+==============================================================================================================================================================================================================================+
+| 0 to 11 | Direct map to file blocks 0 to 11. |
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| 12 | Indirect block: (file blocks 12 to (``$block_size`` / 4) + 11, or 12 to 1035 if 4KiB blocks) |
+| | |
+| | +------------------------------+--------------------------------------------------------------------+ |
+| | | Indirect Block Offset | Where It Points | |
+| | +==============================+====================================================================+ |
+| | | 0 to (``$block_size`` / 4) | Direct map to (``$block_size`` / 4) blocks (1024 if 4KiB blocks) | |
+| | +------------------------------+--------------------------------------------------------------------+ |
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| 13 | Double-indirect block: (file blocks ``$block_size``/4 + 12 to (``$block_size`` / 4) ^ 2 + (``$block_size`` / 4) + 11, or 1036 to 1049611 if 4KiB blocks) |
+| | |
+| | +--------------------------------+---------------------------------------------------------------------------------------------------------+ |
+| | | Double Indirect Block Offset | Where It Points | |
+| | +================================+=========================================================================================================+ |
+| | | 0 to (``$block_size`` / 4) | Map to (``$block_size`` / 4) indirect blocks (1024 if 4KiB blocks) | |
+| | | | | |
+| | | | +------------------------------+--------------------------------------------------------------------+ | |
+| | | | | Indirect Block Offset | Where It Points | | |
+| | | | +==============================+====================================================================+ | |
+| | | | | 0 to (``$block_size`` / 4) | Direct map to (``$block_size`` / 4) blocks (1024 if 4KiB blocks) | | |
+| | | | +------------------------------+--------------------------------------------------------------------+ | |
+| | +--------------------------------+---------------------------------------------------------------------------------------------------------+ |
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| 14 | Triple-indirect block: (file blocks (``$block_size`` / 4) ^ 2 + (``$block_size`` / 4) + 12 to (``$block_size`` / 4) ^ 3 + (``$block_size`` / 4) ^ 2 + (``$block_size`` / 4) + 12, or 1049612 to 1074791436 if 4KiB blocks) |
+| | |
+| | +--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+ |
+| | | Triple Indirect Block Offset | Where It Points | |
+| | +================================+================================================================================================================================================+ |
+| | | 0 to (``$block_size`` / 4) | Map to (``$block_size`` / 4) double indirect blocks (1024 if 4KiB blocks) | |
+| | | | | |
+| | | | +--------------------------------+---------------------------------------------------------------------------------------------------------+ | |
+| | | | | Double Indirect Block Offset | Where It Points | | |
+| | | | +================================+=========================================================================================================+ | |
+| | | | | 0 to (``$block_size`` / 4) | Map to (``$block_size`` / 4) indirect blocks (1024 if 4KiB blocks) | | |
+| | | | | | | | |
+| | | | | | +------------------------------+--------------------------------------------------------------------+ | | |
+| | | | | | | Indirect Block Offset | Where It Points | | | |
+| | | | | | +==============================+====================================================================+ | | |
+| | | | | | | 0 to (``$block_size`` / 4) | Direct map to (``$block_size`` / 4) blocks (1024 if 4KiB blocks) | | | |
+| | | | | | +------------------------------+--------------------------------------------------------------------+ | | |
+| | | | +--------------------------------+---------------------------------------------------------------------------------------------------------+ | |
+| | +--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+ |
++---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/Documentation/filesystems/ext4/ondisk/blocks.rst b/Documentation/filesystems/ext4/ondisk/blocks.rst
new file mode 100644
index 000000000000..73d4dc0f7bda
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/blocks.rst
@@ -0,0 +1,142 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Blocks
+------
+
+ext4 allocates storage space in units of “blocksâ€. A block is a group of
+sectors between 1KiB and 64KiB, and the number of sectors must be an
+integral power of 2. Blocks are in turn grouped into larger units called
+block groups. Block size is specified at mkfs time and typically is
+4KiB. You may experience mounting problems if block size is greater than
+page size (i.e. 64KiB blocks on a i386 which only has 4KiB memory
+pages). By default a filesystem can contain 2^32 blocks; if the '64bit'
+feature is enabled, then a filesystem can have 2^64 blocks.
+
+For 32-bit filesystems, limits are as follows:
+
+.. list-table::
+ :widths: 1 1 1 1 1
+ :header-rows: 1
+
+ * - Item
+ - 1KiB
+ - 2KiB
+ - 4KiB
+ - 64KiB
+ * - Blocks
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - Inodes
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - File System Size
+ - 4TiB
+ - 8TiB
+ - 16TiB
+ - 256PiB
+ * - Blocks Per Block Group
+ - 8,192
+ - 16,384
+ - 32,768
+ - 524,288
+ * - Inodes Per Block Group
+ - 8,192
+ - 16,384
+ - 32,768
+ - 524,288
+ * - Block Group Size
+ - 8MiB
+ - 32MiB
+ - 128MiB
+ - 32GiB
+ * - Blocks Per File, Extents
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - Blocks Per File, Block Maps
+ - 16,843,020
+ - 134,480,396
+ - 1,074,791,436
+ - 4,398,314,962,956 (really 2^32 due to field size limitations)
+ * - File Size, Extents
+ - 4TiB
+ - 8TiB
+ - 16TiB
+ - 256TiB
+ * - File Size, Block Maps
+ - 16GiB
+ - 256GiB
+ - 4TiB
+ - 256TiB
+
+For 64-bit filesystems, limits are as follows:
+
+.. list-table::
+ :widths: 1 1 1 1 1
+ :header-rows: 1
+
+ * - Item
+ - 1KiB
+ - 2KiB
+ - 4KiB
+ - 64KiB
+ * - Blocks
+ - 2^64
+ - 2^64
+ - 2^64
+ - 2^64
+ * - Inodes
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - File System Size
+ - 16ZiB
+ - 32ZiB
+ - 64ZiB
+ - 1YiB
+ * - Blocks Per Block Group
+ - 8,192
+ - 16,384
+ - 32,768
+ - 524,288
+ * - Inodes Per Block Group
+ - 8,192
+ - 16,384
+ - 32,768
+ - 524,288
+ * - Block Group Size
+ - 8MiB
+ - 32MiB
+ - 128MiB
+ - 32GiB
+ * - Blocks Per File, Extents
+ - 2^32
+ - 2^32
+ - 2^32
+ - 2^32
+ * - Blocks Per File, Block Maps
+ - 16,843,020
+ - 134,480,396
+ - 1,074,791,436
+ - 4,398,314,962,956 (really 2^32 due to field size limitations)
+ * - File Size, Extents
+ - 4TiB
+ - 8TiB
+ - 16TiB
+ - 256TiB
+ * - File Size, Block Maps
+ - 16GiB
+ - 256GiB
+ - 4TiB
+ - 256TiB
+
+Note: Files not using extents (i.e. files using block maps) must be
+placed within the first 2^32 blocks of a filesystem. Files with extents
+must be placed within the first 2^48 blocks of a filesystem. It's not
+clear what happens with larger filesystems.
diff --git a/Documentation/filesystems/ext4/ondisk/checksums.rst b/Documentation/filesystems/ext4/ondisk/checksums.rst
new file mode 100644
index 000000000000..9d6a793b2e03
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/checksums.rst
@@ -0,0 +1,73 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Checksums
+---------
+
+Starting in early 2012, metadata checksums were added to all major ext4
+and jbd2 data structures. The associated feature flag is metadata\_csum.
+The desired checksum algorithm is indicated in the superblock, though as
+of October 2012 the only supported algorithm is crc32c. Some data
+structures did not have space to fit a full 32-bit checksum, so only the
+lower 16 bits are stored. Enabling the 64bit feature increases the data
+structure size so that full 32-bit checksums can be stored for many data
+structures. However, existing 32-bit filesystems cannot be extended to
+enable 64bit mode, at least not without the experimental resize2fs
+patches to do so.
+
+Existing filesystems can have checksumming added by running
+``tune2fs -O metadata_csum`` against the underlying device. If tune2fs
+encounters directory blocks that lack sufficient empty space to add a
+checksum, it will request that you run ``e2fsck -D`` to have the
+directories rebuilt with checksums. This has the added benefit of
+removing slack space from the directory files and rebalancing the htree
+indexes. If you \_ignore\_ this step, your directories will not be
+protected by a checksum!
+
+The following table describes the data elements that go into each type
+of checksum. The checksum function is whatever the superblock describes
+(crc32c as of October 2013) unless noted otherwise.
+
+.. list-table::
+ :widths: 1 1 4
+ :header-rows: 1
+
+ * - Metadata
+ - Length
+ - Ingredients
+ * - Superblock
+ - \_\_le32
+ - The entire superblock up to the checksum field. The UUID lives inside
+ the superblock.
+ * - MMP
+ - \_\_le32
+ - UUID + the entire MMP block up to the checksum field.
+ * - Extended Attributes
+ - \_\_le32
+ - UUID + the entire extended attribute block. The checksum field is set to
+ zero.
+ * - Directory Entries
+ - \_\_le32
+ - UUID + inode number + inode generation + the directory block up to the
+ fake entry enclosing the checksum field.
+ * - HTREE Nodes
+ - \_\_le32
+ - UUID + inode number + inode generation + all valid extents + HTREE tail.
+ The checksum field is set to zero.
+ * - Extents
+ - \_\_le32
+ - UUID + inode number + inode generation + the entire extent block up to
+ the checksum field.
+ * - Bitmaps
+ - \_\_le32 or \_\_le16
+ - UUID + the entire bitmap. Checksums are stored in the group descriptor,
+ and truncated if the group descriptor size is 32 bytes (i.e. ^64bit)
+ * - Inodes
+ - \_\_le32
+ - UUID + inode number + inode generation + the entire inode. The checksum
+ field is set to zero. Each inode has its own checksum.
+ * - Group Descriptors
+ - \_\_le16
+ - If metadata\_csum, then UUID + group number + the entire descriptor;
+ else if gdt\_csum, then crc16(UUID + group number + the entire
+ descriptor). In all cases, only the lower 16 bits are stored.
+
diff --git a/Documentation/filesystems/ext4/ondisk/directory.rst b/Documentation/filesystems/ext4/ondisk/directory.rst
new file mode 100644
index 000000000000..8fcba68c2884
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/directory.rst
@@ -0,0 +1,426 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Directory Entries
+-----------------
+
+In an ext4 filesystem, a directory is more or less a flat file that maps
+an arbitrary byte string (usually ASCII) to an inode number on the
+filesystem. There can be many directory entries across the filesystem
+that reference the same inode number--these are known as hard links, and
+that is why hard links cannot reference files on other filesystems. As
+such, directory entries are found by reading the data block(s)
+associated with a directory file for the particular directory entry that
+is desired.
+
+Linear (Classic) Directories
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default, each directory lists its entries in an “almost-linearâ€
+array. I write “almost†because it's not a linear array in the memory
+sense because directory entries are not split across filesystem blocks.
+Therefore, it is more accurate to say that a directory is a series of
+data blocks and that each block contains a linear array of directory
+entries. The end of each per-block array is signified by reaching the
+end of the block; the last entry in the block has a record length that
+takes it all the way to the end of the block. The end of the entire
+directory is of course signified by reaching the end of the file. Unused
+directory entries are signified by inode = 0. By default the filesystem
+uses ``struct ext4_dir_entry_2`` for directory entries unless the
+“filetype†feature flag is not set, in which case it uses
+``struct ext4_dir_entry``.
+
+The original directory entry format is ``struct ext4_dir_entry``, which
+is at most 263 bytes long, though on disk you'll need to reference
+``dirent.rec_len`` to know for sure.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - inode
+ - Number of the inode that this directory entry points to.
+ * - 0x4
+ - \_\_le16
+ - rec\_len
+ - Length of this directory entry. Must be a multiple of 4.
+ * - 0x6
+ - \_\_le16
+ - name\_len
+ - Length of the file name.
+ * - 0x8
+ - char
+ - name[EXT4\_NAME\_LEN]
+ - File name.
+
+Since file names cannot be longer than 255 bytes, the new directory
+entry format shortens the rec\_len field and uses the space for a file
+type flag, probably to avoid having to load every inode during directory
+tree traversal. This format is ``ext4_dir_entry_2``, which is at most
+263 bytes long, though on disk you'll need to reference
+``dirent.rec_len`` to know for sure.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - inode
+ - Number of the inode that this directory entry points to.
+ * - 0x4
+ - \_\_le16
+ - rec\_len
+ - Length of this directory entry.
+ * - 0x6
+ - \_\_u8
+ - name\_len
+ - Length of the file name.
+ * - 0x7
+ - \_\_u8
+ - file\_type
+ - File type code, see ftype_ table below.
+ * - 0x8
+ - char
+ - name[EXT4\_NAME\_LEN]
+ - File name.
+
+.. _ftype:
+
+The directory file type is one of the following values:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0
+ - Unknown.
+ * - 0x1
+ - Regular file.
+ * - 0x2
+ - Directory.
+ * - 0x3
+ - Character device file.
+ * - 0x4
+ - Block device file.
+ * - 0x5
+ - FIFO.
+ * - 0x6
+ - Socket.
+ * - 0x7
+ - Symbolic link.
+
+In order to add checksums to these classic directory blocks, a phony
+``struct ext4_dir_entry`` is placed at the end of each leaf block to
+hold the checksum. The directory entry is 12 bytes long. The inode
+number and name\_len fields are set to zero to fool old software into
+ignoring an apparently empty directory entry, and the checksum is stored
+in the place where the name normally goes. The structure is
+``struct ext4_dir_entry_tail``:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - det\_reserved\_zero1
+ - Inode number, which must be zero.
+ * - 0x4
+ - \_\_le16
+ - det\_rec\_len
+ - Length of this directory entry, which must be 12.
+ * - 0x6
+ - \_\_u8
+ - det\_reserved\_zero2
+ - Length of the file name, which must be zero.
+ * - 0x7
+ - \_\_u8
+ - det\_reserved\_ft
+ - File type, which must be 0xDE.
+ * - 0x8
+ - \_\_le32
+ - det\_checksum
+ - Directory leaf block checksum.
+
+The leaf directory block checksum is calculated against the FS UUID, the
+directory's inode number, the directory's inode generation number, and
+the entire directory entry block up to (but not including) the fake
+directory entry.
+
+Hash Tree Directories
+~~~~~~~~~~~~~~~~~~~~~
+
+A linear array of directory entries isn't great for performance, so a
+new feature was added to ext3 to provide a faster (but peculiar)
+balanced tree keyed off a hash of the directory entry name. If the
+EXT4\_INDEX\_FL (0x1000) flag is set in the inode, this directory uses a
+hashed btree (htree) to organize and find directory entries. For
+backwards read-only compatibility with ext2, this tree is actually
+hidden inside the directory file, masquerading as “empty†directory data
+blocks! It was stated previously that the end of the linear directory
+entry table was signified with an entry pointing to inode 0; this is
+(ab)used to fool the old linear-scan algorithm into thinking that the
+rest of the directory block is empty so that it moves on.
+
+The root of the tree always lives in the first data block of the
+directory. By ext2 custom, the '.' and '..' entries must appear at the
+beginning of this first block, so they are put here as two
+``struct ext4_dir_entry_2``\ s and not stored in the tree. The rest of
+the root node contains metadata about the tree and finally a hash->block
+map to find nodes that are lower in the htree. If
+``dx_root.info.indirect_levels`` is non-zero then the htree has two
+levels; the data block pointed to by the root node's map is an interior
+node, which is indexed by a minor hash. Interior nodes in this tree
+contains a zeroed out ``struct ext4_dir_entry_2`` followed by a
+minor\_hash->block map to find leafe nodes. Leaf nodes contain a linear
+array of all ``struct ext4_dir_entry_2``; all of these entries
+(presumably) hash to the same value. If there is an overflow, the
+entries simply overflow into the next leaf node, and the
+least-significant bit of the hash (in the interior node map) that gets
+us to this next leaf node is set.
+
+To traverse the directory as a htree, the code calculates the hash of
+the desired file name and uses it to find the corresponding block
+number. If the tree is flat, the block is a linear array of directory
+entries that can be searched; otherwise, the minor hash of the file name
+is computed and used against this second block to find the corresponding
+third block number. That third block number will be a linear array of
+directory entries.
+
+To traverse the directory as a linear array (such as the old code does),
+the code simply reads every data block in the directory. The blocks used
+for the htree will appear to have no entries (aside from '.' and '..')
+and so only the leaf nodes will appear to have any interesting content.
+
+The root of the htree is in ``struct dx_root``, which is the full length
+of a data block:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - dot.inode
+ - inode number of this directory.
+ * - 0x4
+ - \_\_le16
+ - dot.rec\_len
+ - Length of this record, 12.
+ * - 0x6
+ - u8
+ - dot.name\_len
+ - Length of the name, 1.
+ * - 0x7
+ - u8
+ - dot.file\_type
+ - File type of this entry, 0x2 (directory) (if the feature flag is set).
+ * - 0x8
+ - char
+ - dot.name[4]
+ - “.\\0\\0\\0â€
+ * - 0xC
+ - \_\_le32
+ - dotdot.inode
+ - inode number of parent directory.
+ * - 0x10
+ - \_\_le16
+ - dotdot.rec\_len
+ - block\_size - 12. The record length is long enough to cover all htree
+ data.
+ * - 0x12
+ - u8
+ - dotdot.name\_len
+ - Length of the name, 2.
+ * - 0x13
+ - u8
+ - dotdot.file\_type
+ - File type of this entry, 0x2 (directory) (if the feature flag is set).
+ * - 0x14
+ - char
+ - dotdot\_name[4]
+ - “..\\0\\0â€
+ * - 0x18
+ - \_\_le32
+ - struct dx\_root\_info.reserved\_zero
+ - Zero.
+ * - 0x1C
+ - u8
+ - struct dx\_root\_info.hash\_version
+ - Hash type, see dirhash_ table below.
+ * - 0x1D
+ - u8
+ - struct dx\_root\_info.info\_length
+ - Length of the tree information, 0x8.
+ * - 0x1E
+ - u8
+ - struct dx\_root\_info.indirect\_levels
+ - Depth of the htree. Cannot be larger than 3 if the INCOMPAT\_LARGEDIR
+ feature is set; cannot be larger than 2 otherwise.
+ * - 0x1F
+ - u8
+ - struct dx\_root\_info.unused\_flags
+ -
+ * - 0x20
+ - \_\_le16
+ - limit
+ - Maximum number of dx\_entries that can follow this header, plus 1 for
+ the header itself.
+ * - 0x22
+ - \_\_le16
+ - count
+ - Actual number of dx\_entries that follow this header, plus 1 for the
+ header itself.
+ * - 0x24
+ - \_\_le32
+ - block
+ - The block number (within the directory file) that goes with hash=0.
+ * - 0x28
+ - struct dx\_entry
+ - entries[0]
+ - As many 8-byte ``struct dx_entry`` as fits in the rest of the data block.
+
+.. _dirhash:
+
+The directory hash is one of the following values:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0
+ - Legacy.
+ * - 0x1
+ - Half MD4.
+ * - 0x2
+ - Tea.
+ * - 0x3
+ - Legacy, unsigned.
+ * - 0x4
+ - Half MD4, unsigned.
+ * - 0x5
+ - Tea, unsigned.
+
+Interior nodes of an htree are recorded as ``struct dx_node``, which is
+also the full length of a data block:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - fake.inode
+ - Zero, to make it look like this entry is not in use.
+ * - 0x4
+ - \_\_le16
+ - fake.rec\_len
+ - The size of the block, in order to hide all of the dx\_node data.
+ * - 0x6
+ - u8
+ - name\_len
+ - Zero. There is no name for this “unused†directory entry.
+ * - 0x7
+ - u8
+ - file\_type
+ - Zero. There is no file type for this “unused†directory entry.
+ * - 0x8
+ - \_\_le16
+ - limit
+ - Maximum number of dx\_entries that can follow this header, plus 1 for
+ the header itself.
+ * - 0xA
+ - \_\_le16
+ - count
+ - Actual number of dx\_entries that follow this header, plus 1 for the
+ header itself.
+ * - 0xE
+ - \_\_le32
+ - block
+ - The block number (within the directory file) that goes with the lowest
+ hash value of this block. This value is stored in the parent block.
+ * - 0x12
+ - struct dx\_entry
+ - entries[0]
+ - As many 8-byte ``struct dx_entry`` as fits in the rest of the data block.
+
+The hash maps that exist in both ``struct dx_root`` and
+``struct dx_node`` are recorded as ``struct dx_entry``, which is 8 bytes
+long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - hash
+ - Hash code.
+ * - 0x4
+ - \_\_le32
+ - block
+ - Block number (within the directory file, not filesystem blocks) of the
+ next node in the htree.
+
+(If you think this is all quite clever and peculiar, so does the
+author.)
+
+If metadata checksums are enabled, the last 8 bytes of the directory
+block (precisely the length of one dx\_entry) are used to store a
+``struct dx_tail``, which contains the checksum. The ``limit`` and
+``count`` entries in the dx\_root/dx\_node structures are adjusted as
+necessary to fit the dx\_tail into the block. If there is no space for
+the dx\_tail, the user is notified to run e2fsck -D to rebuild the
+directory index (which will ensure that there's space for the checksum.
+The dx\_tail structure is 8 bytes long and looks like this:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - u32
+ - dt\_reserved
+ - Zero.
+ * - 0x4
+ - \_\_le32
+ - dt\_checksum
+ - Checksum of the htree directory block.
+
+The checksum is calculated against the FS UUID, the htree index header
+(dx\_root or dx\_node), all of the htree indices (dx\_entry) that are in
+use, and the tail block (dx\_tail).
diff --git a/Documentation/filesystems/ext4/ondisk/dynamic.rst b/Documentation/filesystems/ext4/ondisk/dynamic.rst
new file mode 100644
index 000000000000..bb0c84333341
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/dynamic.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Dynamic Structures
+==================
+
+Dynamic metadata are created on the fly when files and blocks are
+allocated to files.
+
+.. include:: inodes.rst
+.. include:: ifork.rst
+.. include:: directory.rst
+.. include:: attributes.rst
diff --git a/Documentation/filesystems/ext4/ondisk/eainode.rst b/Documentation/filesystems/ext4/ondisk/eainode.rst
new file mode 100644
index 000000000000..ecc0d01a0a72
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/eainode.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Large Extended Attribute Values
+-------------------------------
+
+To enable ext4 to store extended attribute values that do not fit in the
+inode or in the single extended attribute block attached to an inode,
+the EA\_INODE feature allows us to store the value in the data blocks of
+a regular file inode. This “EA inode†is linked only from the extended
+attribute name index and must not appear in a directory entry. The
+inode's i\_atime field is used to store a checksum of the xattr value;
+and i\_ctime/i\_version store a 64-bit reference count, which enables
+sharing of large xattr values between multiple owning inodes. For
+backward compatibility with older versions of this feature, the
+i\_mtime/i\_generation *may* store a back-reference to the inode number
+and i\_generation of the **one** owning inode (in cases where the EA
+inode is not referenced by multiple inodes) to verify that the EA inode
+is the correct one being accessed.
diff --git a/Documentation/filesystems/ext4/ondisk/globals.rst b/Documentation/filesystems/ext4/ondisk/globals.rst
new file mode 100644
index 000000000000..368bf7662b96
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/globals.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Global Structures
+=================
+
+The filesystem is sharded into a number of block groups, each of which
+have static metadata at fixed locations.
+
+.. include:: super.rst
+.. include:: group_descr.rst
+.. include:: bitmaps.rst
+.. include:: mmp.rst
+.. include:: journal.rst
diff --git a/Documentation/filesystems/ext4/ondisk/group_descr.rst b/Documentation/filesystems/ext4/ondisk/group_descr.rst
new file mode 100644
index 000000000000..759827e5d2cf
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/group_descr.rst
@@ -0,0 +1,170 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Block Group Descriptors
+-----------------------
+
+Each block group on the filesystem has one of these descriptors
+associated with it. As noted in the Layout section above, the group
+descriptors (if present) are the second item in the block group. The
+standard configuration is for each block group to contain a full copy of
+the block group descriptor table unless the sparse\_super feature flag
+is set.
+
+Notice how the group descriptor records the location of both bitmaps and
+the inode table (i.e. they can float). This means that within a block
+group, the only data structures with fixed locations are the superblock
+and the group descriptor table. The flex\_bg mechanism uses this
+property to group several block groups into a flex group and lay out all
+of the groups' bitmaps and inode tables into one long run in the first
+group of the flex group.
+
+If the meta\_bg feature flag is set, then several block groups are
+grouped together into a meta group. Note that in the meta\_bg case,
+however, the first and last two block groups within the larger meta
+group contain only group descriptors for the groups inside the meta
+group.
+
+flex\_bg and meta\_bg do not appear to be mutually exclusive features.
+
+In ext2, ext3, and ext4 (when the 64bit feature is not enabled), the
+block group descriptor was only 32 bytes long and therefore ends at
+bg\_checksum. On an ext4 filesystem with the 64bit feature enabled, the
+block group descriptor expands to at least the 64 bytes described below;
+the size is stored in the superblock.
+
+If gdt\_csum is set and metadata\_csum is not set, the block group
+checksum is the crc16 of the FS UUID, the group number, and the group
+descriptor structure. If metadata\_csum is set, then the block group
+checksum is the lower 16 bits of the checksum of the FS UUID, the group
+number, and the group descriptor structure. Both block and inode bitmap
+checksums are calculated against the FS UUID, the group number, and the
+entire bitmap.
+
+The block group descriptor is laid out in ``struct ext4_group_desc``.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - bg\_block\_bitmap\_lo
+ - Lower 32-bits of location of block bitmap.
+ * - 0x4
+ - \_\_le32
+ - bg\_inode\_bitmap\_lo
+ - Lower 32-bits of location of inode bitmap.
+ * - 0x8
+ - \_\_le32
+ - bg\_inode\_table\_lo
+ - Lower 32-bits of location of inode table.
+ * - 0xC
+ - \_\_le16
+ - bg\_free\_blocks\_count\_lo
+ - Lower 16-bits of free block count.
+ * - 0xE
+ - \_\_le16
+ - bg\_free\_inodes\_count\_lo
+ - Lower 16-bits of free inode count.
+ * - 0x10
+ - \_\_le16
+ - bg\_used\_dirs\_count\_lo
+ - Lower 16-bits of directory count.
+ * - 0x12
+ - \_\_le16
+ - bg\_flags
+ - Block group flags. See the bgflags_ table below.
+ * - 0x14
+ - \_\_le32
+ - bg\_exclude\_bitmap\_lo
+ - Lower 32-bits of location of snapshot exclusion bitmap.
+ * - 0x18
+ - \_\_le16
+ - bg\_block\_bitmap\_csum\_lo
+ - Lower 16-bits of the block bitmap checksum.
+ * - 0x1A
+ - \_\_le16
+ - bg\_inode\_bitmap\_csum\_lo
+ - Lower 16-bits of the inode bitmap checksum.
+ * - 0x1C
+ - \_\_le16
+ - bg\_itable\_unused\_lo
+ - Lower 16-bits of unused inode count. If set, we needn't scan past the
+ ``(sb.s_inodes_per_group - gdt.bg_itable_unused)``\ th entry in the
+ inode table for this group.
+ * - 0x1E
+ - \_\_le16
+ - bg\_checksum
+ - Group descriptor checksum; crc16(sb\_uuid+group+desc) if the
+ RO\_COMPAT\_GDT\_CSUM feature is set, or crc32c(sb\_uuid+group\_desc) &
+ 0xFFFF if the RO\_COMPAT\_METADATA\_CSUM feature is set.
+ * -
+ -
+ -
+ - These fields only exist if the 64bit feature is enabled and s_desc_size
+ > 32.
+ * - 0x20
+ - \_\_le32
+ - bg\_block\_bitmap\_hi
+ - Upper 32-bits of location of block bitmap.
+ * - 0x24
+ - \_\_le32
+ - bg\_inode\_bitmap\_hi
+ - Upper 32-bits of location of inodes bitmap.
+ * - 0x28
+ - \_\_le32
+ - bg\_inode\_table\_hi
+ - Upper 32-bits of location of inodes table.
+ * - 0x2C
+ - \_\_le16
+ - bg\_free\_blocks\_count\_hi
+ - Upper 16-bits of free block count.
+ * - 0x2E
+ - \_\_le16
+ - bg\_free\_inodes\_count\_hi
+ - Upper 16-bits of free inode count.
+ * - 0x30
+ - \_\_le16
+ - bg\_used\_dirs\_count\_hi
+ - Upper 16-bits of directory count.
+ * - 0x32
+ - \_\_le16
+ - bg\_itable\_unused\_hi
+ - Upper 16-bits of unused inode count.
+ * - 0x34
+ - \_\_le32
+ - bg\_exclude\_bitmap\_hi
+ - Upper 32-bits of location of snapshot exclusion bitmap.
+ * - 0x38
+ - \_\_le16
+ - bg\_block\_bitmap\_csum\_hi
+ - Upper 16-bits of the block bitmap checksum.
+ * - 0x3A
+ - \_\_le16
+ - bg\_inode\_bitmap\_csum\_hi
+ - Upper 16-bits of the inode bitmap checksum.
+ * - 0x3C
+ - \_\_u32
+ - bg\_reserved
+ - Padding to 64 bytes.
+
+.. _bgflags:
+
+Block group flags can be any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - inode table and bitmap are not initialized (EXT4\_BG\_INODE\_UNINIT).
+ * - 0x2
+ - block bitmap is not initialized (EXT4\_BG\_BLOCK\_UNINIT).
+ * - 0x4
+ - inode table is zeroed (EXT4\_BG\_INODE\_ZEROED).
diff --git a/Documentation/filesystems/ext4/ondisk/ifork.rst b/Documentation/filesystems/ext4/ondisk/ifork.rst
new file mode 100644
index 000000000000..5dbe3b2b121a
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/ifork.rst
@@ -0,0 +1,194 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+The Contents of inode.i\_block
+------------------------------
+
+Depending on the type of file an inode describes, the 60 bytes of
+storage in ``inode.i_block`` can be used in different ways. In general,
+regular files and directories will use it for file block indexing
+information, and special files will use it for special purposes.
+
+Symbolic Links
+~~~~~~~~~~~~~~
+
+The target of a symbolic link will be stored in this field if the target
+string is less than 60 bytes long. Otherwise, either extents or block
+maps will be used to allocate data blocks to store the link target.
+
+Direct/Indirect Block Addressing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In ext2/3, file block numbers were mapped to logical block numbers by
+means of an (up to) three level 1-1 block map. To find the logical block
+that stores a particular file block, the code would navigate through
+this increasingly complicated structure. Notice that there is neither a
+magic number nor a checksum to provide any level of confidence that the
+block isn't full of garbage.
+
+.. ifconfig:: builder != 'latex'
+
+ .. include:: blockmap.rst
+
+.. ifconfig:: builder == 'latex'
+
+ [Table omitted because LaTeX doesn't support nested tables.]
+
+Note that with this block mapping scheme, it is necessary to fill out a
+lot of mapping data even for a large contiguous file! This inefficiency
+led to the creation of the extent mapping scheme, discussed below.
+
+Notice also that a file using this mapping scheme cannot be placed
+higher than 2^32 blocks.
+
+Extent Tree
+~~~~~~~~~~~
+
+In ext4, the file to logical block map has been replaced with an extent
+tree. Under the old scheme, allocating a contiguous run of 1,000 blocks
+requires an indirect block to map all 1,000 entries; with extents, the
+mapping is reduced to a single ``struct ext4_extent`` with
+``ee_len = 1000``. If flex\_bg is enabled, it is possible to allocate
+very large files with a single extent, at a considerable reduction in
+metadata block use, and some improvement in disk efficiency. The inode
+must have the extents flag (0x80000) flag set for this feature to be in
+use.
+
+Extents are arranged as a tree. Each node of the tree begins with a
+``struct ext4_extent_header``. If the node is an interior node
+(``eh.eh_depth`` > 0), the header is followed by ``eh.eh_entries``
+instances of ``struct ext4_extent_idx``; each of these index entries
+points to a block containing more nodes in the extent tree. If the node
+is a leaf node (``eh.eh_depth == 0``), then the header is followed by
+``eh.eh_entries`` instances of ``struct ext4_extent``; these instances
+point to the file's data blocks. The root node of the extent tree is
+stored in ``inode.i_block``, which allows for the first four extents to
+be recorded without the use of extra metadata blocks.
+
+The extent tree header is recorded in ``struct ext4_extent_header``,
+which is 12 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - eh\_magic
+ - Magic number, 0xF30A.
+ * - 0x2
+ - \_\_le16
+ - eh\_entries
+ - Number of valid entries following the header.
+ * - 0x4
+ - \_\_le16
+ - eh\_max
+ - Maximum number of entries that could follow the header.
+ * - 0x6
+ - \_\_le16
+ - eh\_depth
+ - Depth of this extent node in the extent tree. 0 = this extent node
+ points to data blocks; otherwise, this extent node points to other
+ extent nodes. The extent tree can be at most 5 levels deep: a logical
+ block number can be at most ``2^32``, and the smallest ``n`` that
+ satisfies ``4*(((blocksize - 12)/12)^n) >= 2^32`` is 5.
+ * - 0x8
+ - \_\_le32
+ - eh\_generation
+ - Generation of the tree. (Used by Lustre, but not standard ext4).
+
+Internal nodes of the extent tree, also known as index nodes, are
+recorded as ``struct ext4_extent_idx``, and are 12 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - ei\_block
+ - This index node covers file blocks from 'block' onward.
+ * - 0x4
+ - \_\_le32
+ - ei\_leaf\_lo
+ - Lower 32-bits of the block number of the extent node that is the next
+ level lower in the tree. The tree node pointed to can be either another
+ internal node or a leaf node, described below.
+ * - 0x8
+ - \_\_le16
+ - ei\_leaf\_hi
+ - Upper 16-bits of the previous field.
+ * - 0xA
+ - \_\_u16
+ - ei\_unused
+ -
+
+Leaf nodes of the extent tree are recorded as ``struct ext4_extent``,
+and are also 12 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - ee\_block
+ - First file block number that this extent covers.
+ * - 0x4
+ - \_\_le16
+ - ee\_len
+ - Number of blocks covered by extent. If the value of this field is <=
+ 32768, the extent is initialized. If the value of the field is > 32768,
+ the extent is uninitialized and the actual extent length is ``ee_len`` -
+ 32768. Therefore, the maximum length of a initialized extent is 32768
+ blocks, and the maximum length of an uninitialized extent is 32767.
+ * - 0x6
+ - \_\_le16
+ - ee\_start\_hi
+ - Upper 16-bits of the block number to which this extent points.
+ * - 0x8
+ - \_\_le32
+ - ee\_start\_lo
+ - Lower 32-bits of the block number to which this extent points.
+
+Prior to the introduction of metadata checksums, the extent header +
+extent entries always left at least 4 bytes of unallocated space at the
+end of each extent tree data block (because (2^x % 12) >= 4). Therefore,
+the 32-bit checksum is inserted into this space. The 4 extents in the
+inode do not need checksumming, since the inode is already checksummed.
+The checksum is calculated against the FS UUID, the inode number, the
+inode generation, and the entire extent block leading up to (but not
+including) the checksum itself.
+
+``struct ext4_extent_tail`` is 4 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - eb\_checksum
+ - Checksum of the extent block, crc32c(uuid+inum+igeneration+extentblock)
+
+Inline Data
+~~~~~~~~~~~
+
+If the inline data feature is enabled for the filesystem and the flag is
+set for the inode, it is possible that the first 60 bytes of the file
+data are stored here.
diff --git a/Documentation/filesystems/ext4/ondisk/index.rst b/Documentation/filesystems/ext4/ondisk/index.rst
new file mode 100644
index 000000000000..f7d082c3a435
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/index.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Data Structures and Algorithms
+==============================
+.. include:: about.rst
+.. include:: overview.rst
+.. include:: globals.rst
+.. include:: dynamic.rst
diff --git a/Documentation/filesystems/ext4/ondisk/inlinedata.rst b/Documentation/filesystems/ext4/ondisk/inlinedata.rst
new file mode 100644
index 000000000000..d1075178ce0b
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/inlinedata.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Inline Data
+-----------
+
+The inline data feature was designed to handle the case that a file's
+data is so tiny that it readily fits inside the inode, which
+(theoretically) reduces disk block consumption and reduces seeks. If the
+file is smaller than 60 bytes, then the data are stored inline in
+``inode.i_block``. If the rest of the file would fit inside the extended
+attribute space, then it might be found as an extended attribute
+“system.data†within the inode body (“ibody EAâ€). This of course
+constrains the amount of extended attributes one can attach to an inode.
+If the data size increases beyond i\_block + ibody EA, a regular block
+is allocated and the contents moved to that block.
+
+Pending a change to compact the extended attribute key used to store
+inline data, one ought to be able to store 160 bytes of data in a
+256-byte inode (as of June 2015, when i\_extra\_isize is 28). Prior to
+that, the limit was 156 bytes due to inefficient use of inode space.
+
+The inline data feature requires the presence of an extended attribute
+for “system.dataâ€, even if the attribute value is zero length.
+
+Inline Directories
+~~~~~~~~~~~~~~~~~~
+
+The first four bytes of i\_block are the inode number of the parent
+directory. Following that is a 56-byte space for an array of directory
+entries; see ``struct ext4_dir_entry``. If there is a “system.dataâ€
+attribute in the inode body, the EA value is an array of
+``struct ext4_dir_entry`` as well. Note that for inline directories, the
+i\_block and EA space are treated as separate dirent blocks; directory
+entries cannot span the two.
+
+Inline directory entries are not checksummed, as the inode checksum
+should protect all inline data contents.
diff --git a/Documentation/filesystems/ext4/ondisk/inodes.rst b/Documentation/filesystems/ext4/ondisk/inodes.rst
new file mode 100644
index 000000000000..655ce898f3f5
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/inodes.rst
@@ -0,0 +1,575 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Index Nodes
+-----------
+
+In a regular UNIX filesystem, the inode stores all the metadata
+pertaining to the file (time stamps, block maps, extended attributes,
+etc), not the directory entry. To find the information associated with a
+file, one must traverse the directory files to find the directory entry
+associated with a file, then load the inode to find the metadata for
+that file. ext4 appears to cheat (for performance reasons) a little bit
+by storing a copy of the file type (normally stored in the inode) in the
+directory entry. (Compare all this to FAT, which stores all the file
+information directly in the directory entry, but does not support hard
+links and is in general more seek-happy than ext4 due to its simpler
+block allocator and extensive use of linked lists.)
+
+The inode table is a linear array of ``struct ext4_inode``. The table is
+sized to have enough blocks to store at least
+``sb.s_inode_size * sb.s_inodes_per_group`` bytes. The number of the
+block group containing an inode can be calculated as
+``(inode_number - 1) / sb.s_inodes_per_group``, and the offset into the
+group's table is ``(inode_number - 1) % sb.s_inodes_per_group``. There
+is no inode 0.
+
+The inode checksum is calculated against the FS UUID, the inode number,
+and the inode structure itself.
+
+The inode table entry is laid out in ``struct ext4_inode``.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - i\_mode
+ - File mode. See the table i_mode_ below.
+ * - 0x2
+ - \_\_le16
+ - i\_uid
+ - Lower 16-bits of Owner UID.
+ * - 0x4
+ - \_\_le32
+ - i\_size\_lo
+ - Lower 32-bits of size in bytes.
+ * - 0x8
+ - \_\_le32
+ - i\_atime
+ - Last access time, in seconds since the epoch. However, if the EA\_INODE
+ inode flag is set, this inode stores an extended attribute value and
+ this field contains the checksum of the value.
+ * - 0xC
+ - \_\_le32
+ - i\_ctime
+ - Last inode change time, in seconds since the epoch. However, if the
+ EA\_INODE inode flag is set, this inode stores an extended attribute
+ value and this field contains the lower 32 bits of the attribute value's
+ reference count.
+ * - 0x10
+ - \_\_le32
+ - i\_mtime
+ - Last data modification time, in seconds since the epoch. However, if the
+ EA\_INODE inode flag is set, this inode stores an extended attribute
+ value and this field contains the number of the inode that owns the
+ extended attribute.
+ * - 0x14
+ - \_\_le32
+ - i\_dtime
+ - Deletion Time, in seconds since the epoch.
+ * - 0x18
+ - \_\_le16
+ - i\_gid
+ - Lower 16-bits of GID.
+ * - 0x1A
+ - \_\_le16
+ - i\_links\_count
+ - Hard link count. Normally, ext4 does not permit an inode to have more
+ than 65,000 hard links. This applies to files as well as directories,
+ which means that there cannot be more than 64,998 subdirectories in a
+ directory (each subdirectory's '..' entry counts as a hard link, as does
+ the '.' entry in the directory itself). With the DIR\_NLINK feature
+ enabled, ext4 supports more than 64,998 subdirectories by setting this
+ field to 1 to indicate that the number of hard links is not known.
+ * - 0x1C
+ - \_\_le32
+ - i\_blocks\_lo
+ - Lower 32-bits of “block†count. If the huge\_file feature flag is not
+ set on the filesystem, the file consumes ``i_blocks_lo`` 512-byte blocks
+ on disk. If huge\_file is set and EXT4\_HUGE\_FILE\_FL is NOT set in
+ ``inode.i_flags``, then the file consumes ``i_blocks_lo + (i_blocks_hi
+ << 32)`` 512-byte blocks on disk. If huge\_file is set and
+ EXT4\_HUGE\_FILE\_FL IS set in ``inode.i_flags``, then this file
+ consumes (``i_blocks_lo + i_blocks_hi`` << 32) filesystem blocks on
+ disk.
+ * - 0x20
+ - \_\_le32
+ - i\_flags
+ - Inode flags. See the table i_flags_ below.
+ * - 0x24
+ - 4 bytes
+ - i\_osd1
+ - See the table i_osd1_ for more details.
+ * - 0x28
+ - 60 bytes
+ - i\_block[EXT4\_N\_BLOCKS=15]
+ - Block map or extent tree. See the section “The Contents of inode.i\_blockâ€.
+ * - 0x64
+ - \_\_le32
+ - i\_generation
+ - File version (for NFS).
+ * - 0x68
+ - \_\_le32
+ - i\_file\_acl\_lo
+ - Lower 32-bits of extended attribute block. ACLs are of course one of
+ many possible extended attributes; I think the name of this field is a
+ result of the first use of extended attributes being for ACLs.
+ * - 0x6C
+ - \_\_le32
+ - i\_size\_high / i\_dir\_acl
+ - Upper 32-bits of file/directory size. In ext2/3 this field was named
+ i\_dir\_acl, though it was usually set to zero and never used.
+ * - 0x70
+ - \_\_le32
+ - i\_obso\_faddr
+ - (Obsolete) fragment address.
+ * - 0x74
+ - 12 bytes
+ - i\_osd2
+ - See the table i_osd2_ for more details.
+ * - 0x80
+ - \_\_le16
+ - i\_extra\_isize
+ - Size of this inode - 128. Alternately, the size of the extended inode
+ fields beyond the original ext2 inode, including this field.
+ * - 0x82
+ - \_\_le16
+ - i\_checksum\_hi
+ - Upper 16-bits of the inode checksum.
+ * - 0x84
+ - \_\_le32
+ - i\_ctime\_extra
+ - Extra change time bits. This provides sub-second precision. See Inode
+ Timestamps section.
+ * - 0x88
+ - \_\_le32
+ - i\_mtime\_extra
+ - Extra modification time bits. This provides sub-second precision.
+ * - 0x8C
+ - \_\_le32
+ - i\_atime\_extra
+ - Extra access time bits. This provides sub-second precision.
+ * - 0x90
+ - \_\_le32
+ - i\_crtime
+ - File creation time, in seconds since the epoch.
+ * - 0x94
+ - \_\_le32
+ - i\_crtime\_extra
+ - Extra file creation time bits. This provides sub-second precision.
+ * - 0x98
+ - \_\_le32
+ - i\_version\_hi
+ - Upper 32-bits for version number.
+ * - 0x9C
+ - \_\_le32
+ - i\_projid
+ - Project ID.
+
+.. _i_mode:
+
+The ``i_mode`` value is a combination of the following flags:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - S\_IXOTH (Others may execute)
+ * - 0x2
+ - S\_IWOTH (Others may write)
+ * - 0x4
+ - S\_IROTH (Others may read)
+ * - 0x8
+ - S\_IXGRP (Group members may execute)
+ * - 0x10
+ - S\_IWGRP (Group members may write)
+ * - 0x20
+ - S\_IRGRP (Group members may read)
+ * - 0x40
+ - S\_IXUSR (Owner may execute)
+ * - 0x80
+ - S\_IWUSR (Owner may write)
+ * - 0x100
+ - S\_IRUSR (Owner may read)
+ * - 0x200
+ - S\_ISVTX (Sticky bit)
+ * - 0x400
+ - S\_ISGID (Set GID)
+ * - 0x800
+ - S\_ISUID (Set UID)
+ * -
+ - These are mutually-exclusive file types:
+ * - 0x1000
+ - S\_IFIFO (FIFO)
+ * - 0x2000
+ - S\_IFCHR (Character device)
+ * - 0x4000
+ - S\_IFDIR (Directory)
+ * - 0x6000
+ - S\_IFBLK (Block device)
+ * - 0x8000
+ - S\_IFREG (Regular file)
+ * - 0xA000
+ - S\_IFLNK (Symbolic link)
+ * - 0xC000
+ - S\_IFSOCK (Socket)
+
+.. _i_flags:
+
+The ``i_flags`` field is a combination of these values:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - This file requires secure deletion (EXT4\_SECRM\_FL). (not implemented)
+ * - 0x2
+ - This file should be preserved, should undeletion be desired
+ (EXT4\_UNRM\_FL). (not implemented)
+ * - 0x4
+ - File is compressed (EXT4\_COMPR\_FL). (not really implemented)
+ * - 0x8
+ - All writes to the file must be synchronous (EXT4\_SYNC\_FL).
+ * - 0x10
+ - File is immutable (EXT4\_IMMUTABLE\_FL).
+ * - 0x20
+ - File can only be appended (EXT4\_APPEND\_FL).
+ * - 0x40
+ - The dump(1) utility should not dump this file (EXT4\_NODUMP\_FL).
+ * - 0x80
+ - Do not update access time (EXT4\_NOATIME\_FL).
+ * - 0x100
+ - Dirty compressed file (EXT4\_DIRTY\_FL). (not used)
+ * - 0x200
+ - File has one or more compressed clusters (EXT4\_COMPRBLK\_FL). (not used)
+ * - 0x400
+ - Do not compress file (EXT4\_NOCOMPR\_FL). (not used)
+ * - 0x800
+ - Encrypted inode (EXT4\_ENCRYPT\_FL). This bit value previously was
+ EXT4\_ECOMPR\_FL (compression error), which was never used.
+ * - 0x1000
+ - Directory has hashed indexes (EXT4\_INDEX\_FL).
+ * - 0x2000
+ - AFS magic directory (EXT4\_IMAGIC\_FL).
+ * - 0x4000
+ - File data must always be written through the journal
+ (EXT4\_JOURNAL\_DATA\_FL).
+ * - 0x8000
+ - File tail should not be merged (EXT4\_NOTAIL\_FL). (not used by ext4)
+ * - 0x10000
+ - All directory entry data should be written synchronously (see
+ ``dirsync``) (EXT4\_DIRSYNC\_FL).
+ * - 0x20000
+ - Top of directory hierarchy (EXT4\_TOPDIR\_FL).
+ * - 0x40000
+ - This is a huge file (EXT4\_HUGE\_FILE\_FL).
+ * - 0x80000
+ - Inode uses extents (EXT4\_EXTENTS\_FL).
+ * - 0x200000
+ - Inode stores a large extended attribute value in its data blocks
+ (EXT4\_EA\_INODE\_FL).
+ * - 0x400000
+ - This file has blocks allocated past EOF (EXT4\_EOFBLOCKS\_FL).
+ (deprecated)
+ * - 0x01000000
+ - Inode is a snapshot (``EXT4_SNAPFILE_FL``). (not in mainline)
+ * - 0x04000000
+ - Snapshot is being deleted (``EXT4_SNAPFILE_DELETED_FL``). (not in
+ mainline)
+ * - 0x08000000
+ - Snapshot shrink has completed (``EXT4_SNAPFILE_SHRUNK_FL``). (not in
+ mainline)
+ * - 0x10000000
+ - Inode has inline data (EXT4\_INLINE\_DATA\_FL).
+ * - 0x20000000
+ - Create children with the same project ID (EXT4\_PROJINHERIT\_FL).
+ * - 0x80000000
+ - Reserved for ext4 library (EXT4\_RESERVED\_FL).
+ * -
+ - Aggregate flags:
+ * - 0x4BDFFF
+ - User-visible flags.
+ * - 0x4B80FF
+ - User-modifiable flags. Note that while EXT4\_JOURNAL\_DATA\_FL and
+ EXT4\_EXTENTS\_FL can be set with setattr, they are not in the kernel's
+ EXT4\_FL\_USER\_MODIFIABLE mask, since it needs to handle the setting of
+ these flags in a special manner and they are masked out of the set of
+ flags that are saved directly to i\_flags.
+
+.. _i_osd1:
+
+The ``osd1`` field has multiple meanings depending on the creator:
+
+Linux:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - l\_i\_version
+ - Inode version. However, if the EA\_INODE inode flag is set, this inode
+ stores an extended attribute value and this field contains the upper 32
+ bits of the attribute value's reference count.
+
+Hurd:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - h\_i\_translator
+ - ??
+
+Masix:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - m\_i\_reserved
+ - ??
+
+.. _i_osd2:
+
+The ``osd2`` field has multiple meanings depending on the filesystem creator:
+
+Linux:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - l\_i\_blocks\_high
+ - Upper 16-bits of the block count. Please see the note attached to
+ i\_blocks\_lo.
+ * - 0x2
+ - \_\_le16
+ - l\_i\_file\_acl\_high
+ - Upper 16-bits of the extended attribute block (historically, the file
+ ACL location). See the Extended Attributes section below.
+ * - 0x4
+ - \_\_le16
+ - l\_i\_uid\_high
+ - Upper 16-bits of the Owner UID.
+ * - 0x6
+ - \_\_le16
+ - l\_i\_gid\_high
+ - Upper 16-bits of the GID.
+ * - 0x8
+ - \_\_le16
+ - l\_i\_checksum\_lo
+ - Lower 16-bits of the inode checksum.
+ * - 0xA
+ - \_\_le16
+ - l\_i\_reserved
+ - Unused.
+
+Hurd:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - h\_i\_reserved1
+ - ??
+ * - 0x2
+ - \_\_u16
+ - h\_i\_mode\_high
+ - Upper 16-bits of the file mode.
+ * - 0x4
+ - \_\_le16
+ - h\_i\_uid\_high
+ - Upper 16-bits of the Owner UID.
+ * - 0x6
+ - \_\_le16
+ - h\_i\_gid\_high
+ - Upper 16-bits of the GID.
+ * - 0x8
+ - \_\_u32
+ - h\_i\_author
+ - Author code?
+
+Masix:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le16
+ - h\_i\_reserved1
+ - ??
+ * - 0x2
+ - \_\_u16
+ - m\_i\_file\_acl\_high
+ - Upper 16-bits of the extended attribute block (historically, the file
+ ACL location).
+ * - 0x4
+ - \_\_u32
+ - m\_i\_reserved2[2]
+ - ??
+
+Inode Size
+~~~~~~~~~~
+
+In ext2 and ext3, the inode structure size was fixed at 128 bytes
+(``EXT2_GOOD_OLD_INODE_SIZE``) and each inode had a disk record size of
+128 bytes. Starting with ext4, it is possible to allocate a larger
+on-disk inode at format time for all inodes in the filesystem to provide
+space beyond the end of the original ext2 inode. The on-disk inode
+record size is recorded in the superblock as ``s_inode_size``. The
+number of bytes actually used by struct ext4\_inode beyond the original
+128-byte ext2 inode is recorded in the ``i_extra_isize`` field for each
+inode, which allows struct ext4\_inode to grow for a new kernel without
+having to upgrade all of the on-disk inodes. Access to fields beyond
+EXT2\_GOOD\_OLD\_INODE\_SIZE should be verified to be within
+``i_extra_isize``. By default, ext4 inode records are 256 bytes, and (as
+of October 2013) the inode structure is 156 bytes
+(``i_extra_isize = 28``). The extra space between the end of the inode
+structure and the end of the inode record can be used to store extended
+attributes. Each inode record can be as large as the filesystem block
+size, though this is not terribly efficient.
+
+Finding an Inode
+~~~~~~~~~~~~~~~~
+
+Each block group contains ``sb->s_inodes_per_group`` inodes. Because
+inode 0 is defined not to exist, this formula can be used to find the
+block group that an inode lives in:
+``bg = (inode_num - 1) / sb->s_inodes_per_group``. The particular inode
+can be found within the block group's inode table at
+``index = (inode_num - 1) % sb->s_inodes_per_group``. To get the byte
+address within the inode table, use
+``offset = index * sb->s_inode_size``.
+
+Inode Timestamps
+~~~~~~~~~~~~~~~~
+
+Four timestamps are recorded in the lower 128 bytes of the inode
+structure -- inode change time (ctime), access time (atime), data
+modification time (mtime), and deletion time (dtime). The four fields
+are 32-bit signed integers that represent seconds since the Unix epoch
+(1970-01-01 00:00:00 GMT), which means that the fields will overflow in
+January 2038. For inodes that are not linked from any directory but are
+still open (orphan inodes), the dtime field is overloaded for use with
+the orphan list. The superblock field ``s_last_orphan`` points to the
+first inode in the orphan list; dtime is then the number of the next
+orphaned inode, or zero if there are no more orphans.
+
+If the inode structure size ``sb->s_inode_size`` is larger than 128
+bytes and the ``i_inode_extra`` field is large enough to encompass the
+respective ``i_[cma]time_extra`` field, the ctime, atime, and mtime
+inode fields are widened to 64 bits. Within this “extra†32-bit field,
+the lower two bits are used to extend the 32-bit seconds field to be 34
+bit wide; the upper 30 bits are used to provide nanosecond timestamp
+accuracy. Therefore, timestamps should not overflow until May 2446.
+dtime was not widened. There is also a fifth timestamp to record inode
+creation time (crtime); this field is 64-bits wide and decoded in the
+same manner as 64-bit [cma]time. Neither crtime nor dtime are accessible
+through the regular stat() interface, though debugfs will report them.
+
+We use the 32-bit signed time value plus (2^32 \* (extra epoch bits)).
+In other words:
+
+.. list-table::
+ :widths: 20 20 20 20 20
+ :header-rows: 1
+
+ * - Extra epoch bits
+ - MSB of 32-bit time
+ - Adjustment for signed 32-bit to 64-bit tv\_sec
+ - Decoded 64-bit tv\_sec
+ - valid time range
+ * - 0 0
+ - 1
+ - 0
+ - ``-0x80000000 - -0x00000001``
+ - 1901-12-13 to 1969-12-31
+ * - 0 0
+ - 0
+ - 0
+ - ``0x000000000 - 0x07fffffff``
+ - 1970-01-01 to 2038-01-19
+ * - 0 1
+ - 1
+ - 0x100000000
+ - ``0x080000000 - 0x0ffffffff``
+ - 2038-01-19 to 2106-02-07
+ * - 0 1
+ - 0
+ - 0x100000000
+ - ``0x100000000 - 0x17fffffff``
+ - 2106-02-07 to 2174-02-25
+ * - 1 0
+ - 1
+ - 0x200000000
+ - ``0x180000000 - 0x1ffffffff``
+ - 2174-02-25 to 2242-03-16
+ * - 1 0
+ - 0
+ - 0x200000000
+ - ``0x200000000 - 0x27fffffff``
+ - 2242-03-16 to 2310-04-04
+ * - 1 1
+ - 1
+ - 0x300000000
+ - ``0x280000000 - 0x2ffffffff``
+ - 2310-04-04 to 2378-04-22
+ * - 1 1
+ - 0
+ - 0x300000000
+ - ``0x300000000 - 0x37fffffff``
+ - 2378-04-22 to 2446-05-10
+
+This is a somewhat odd encoding since there are effectively seven times
+as many positive values as negative values. There have also been
+long-standing bugs decoding and encoding dates beyond 2038, which don't
+seem to be fixed as of kernel 3.12 and e2fsprogs 1.42.8. 64-bit kernels
+incorrectly use the extra epoch bits 1,1 for dates between 1901 and
+1970. At some point the kernel will be fixed and e2fsck will fix this
+situation, assuming that it is run before 2310.
diff --git a/Documentation/filesystems/ext4/ondisk/journal.rst b/Documentation/filesystems/ext4/ondisk/journal.rst
new file mode 100644
index 000000000000..e7031af86876
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/journal.rst
@@ -0,0 +1,611 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Journal (jbd2)
+--------------
+
+Introduced in ext3, the ext4 filesystem employs a journal to protect the
+filesystem against corruption in the case of a system crash. A small
+continuous region of disk (default 128MiB) is reserved inside the
+filesystem as a place to land “important†data writes on-disk as quickly
+as possible. Once the important data transaction is fully written to the
+disk and flushed from the disk write cache, a record of the data being
+committed is also written to the journal. At some later point in time,
+the journal code writes the transactions to their final locations on
+disk (this could involve a lot of seeking or a lot of small
+read-write-erases) before erasing the commit record. Should the system
+crash during the second slow write, the journal can be replayed all the
+way to the latest commit record, guaranteeing the atomicity of whatever
+gets written through the journal to the disk. The effect of this is to
+guarantee that the filesystem does not become stuck midway through a
+metadata update.
+
+For performance reasons, ext4 by default only writes filesystem metadata
+through the journal. This means that file data blocks are /not/
+guaranteed to be in any consistent state after a crash. If this default
+guarantee level (``data=ordered``) is not satisfactory, there is a mount
+option to control journal behavior. If ``data=journal``, all data and
+metadata are written to disk through the journal. This is slower but
+safest. If ``data=writeback``, dirty data blocks are not flushed to the
+disk before the metadata are written to disk through the journal.
+
+The journal inode is typically inode 8. The first 68 bytes of the
+journal inode are replicated in the ext4 superblock. The journal itself
+is normal (but hidden) file within the filesystem. The file usually
+consumes an entire block group, though mke2fs tries to put it in the
+middle of the disk.
+
+All fields in jbd2 are written to disk in big-endian order. This is the
+opposite of ext4.
+
+NOTE: Both ext4 and ocfs2 use jbd2.
+
+The maximum size of a journal embedded in an ext4 filesystem is 2^32
+blocks. jbd2 itself does not seem to care.
+
+Layout
+~~~~~~
+
+Generally speaking, the journal has this format:
+
+.. list-table::
+ :widths: 1 1 78
+ :header-rows: 1
+
+ * - Superblock
+ - descriptor\_block (data\_blocks or revocation\_block) [more data or
+ revocations] commmit\_block
+ - [more transactions...]
+ * -
+ - One transaction
+ -
+
+Notice that a transaction begins with either a descriptor and some data,
+or a block revocation list. A finished transaction always ends with a
+commit. If there is no commit record (or the checksums don't match), the
+transaction will be discarded during replay.
+
+External Journal
+~~~~~~~~~~~~~~~~
+
+Optionally, an ext4 filesystem can be created with an external journal
+device (as opposed to an internal journal, which uses a reserved inode).
+In this case, on the filesystem device, ``s_journal_inum`` should be
+zero and ``s_journal_uuid`` should be set. On the journal device there
+will be an ext4 super block in the usual place, with a matching UUID.
+The journal superblock will be in the next full block after the
+superblock.
+
+.. list-table::
+ :widths: 1 1 1 1 76
+ :header-rows: 1
+
+ * - 1024 bytes of padding
+ - ext4 Superblock
+ - Journal Superblock
+ - descriptor\_block (data\_blocks or revocation\_block) [more data or
+ revocations] commmit\_block
+ - [more transactions...]
+ * -
+ -
+ -
+ - One transaction
+ -
+
+Block Header
+~~~~~~~~~~~~
+
+Every block in the journal starts with a common 12-byte header
+``struct journal_header_s``:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_be32
+ - h\_magic
+ - jbd2 magic number, 0xC03B3998.
+ * - 0x4
+ - \_\_be32
+ - h\_blocktype
+ - Description of what this block contains. See the jbd2_blocktype_ table
+ below.
+ * - 0x8
+ - \_\_be32
+ - h\_sequence
+ - The transaction ID that goes with this block.
+
+.. _jbd2_blocktype:
+
+The journal block type can be any one of:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 1
+ - Descriptor. This block precedes a series of data blocks that were
+ written through the journal during a transaction.
+ * - 2
+ - Block commit record. This block signifies the completion of a
+ transaction.
+ * - 3
+ - Journal superblock, v1.
+ * - 4
+ - Journal superblock, v2.
+ * - 5
+ - Block revocation records. This speeds up recovery by enabling the
+ journal to skip writing blocks that were subsequently rewritten.
+
+Super Block
+~~~~~~~~~~~
+
+The super block for the journal is much simpler as compared to ext4's.
+The key data kept within are size of the journal, and where to find the
+start of the log of transactions.
+
+The journal superblock is recorded as ``struct journal_superblock_s``,
+which is 1024 bytes long:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * -
+ -
+ -
+ - Static information describing the journal.
+ * - 0x0
+ - journal\_header\_t (12 bytes)
+ - s\_header
+ - Common header identifying this as a superblock.
+ * - 0xC
+ - \_\_be32
+ - s\_blocksize
+ - Journal device block size.
+ * - 0x10
+ - \_\_be32
+ - s\_maxlen
+ - Total number of blocks in this journal.
+ * - 0x14
+ - \_\_be32
+ - s\_first
+ - First block of log information.
+ * -
+ -
+ -
+ - Dynamic information describing the current state of the log.
+ * - 0x18
+ - \_\_be32
+ - s\_sequence
+ - First commit ID expected in log.
+ * - 0x1C
+ - \_\_be32
+ - s\_start
+ - Block number of the start of log. Contrary to the comments, this field
+ being zero does not imply that the journal is clean!
+ * - 0x20
+ - \_\_be32
+ - s\_errno
+ - Error value, as set by jbd2\_journal\_abort().
+ * -
+ -
+ -
+ - The remaining fields are only valid in a v2 superblock.
+ * - 0x24
+ - \_\_be32
+ - s\_feature\_compat;
+ - Compatible feature set. See the table jbd2_compat_ below.
+ * - 0x28
+ - \_\_be32
+ - s\_feature\_incompat
+ - Incompatible feature set. See the table jbd2_incompat_ below.
+ * - 0x2C
+ - \_\_be32
+ - s\_feature\_ro\_compat
+ - Read-only compatible feature set. There aren't any of these currently.
+ * - 0x30
+ - \_\_u8
+ - s\_uuid[16]
+ - 128-bit uuid for journal. This is compared against the copy in the ext4
+ super block at mount time.
+ * - 0x40
+ - \_\_be32
+ - s\_nr\_users
+ - Number of file systems sharing this journal.
+ * - 0x44
+ - \_\_be32
+ - s\_dynsuper
+ - Location of dynamic super block copy. (Not used?)
+ * - 0x48
+ - \_\_be32
+ - s\_max\_transaction
+ - Limit of journal blocks per transaction. (Not used?)
+ * - 0x4C
+ - \_\_be32
+ - s\_max\_trans\_data
+ - Limit of data blocks per transaction. (Not used?)
+ * - 0x50
+ - \_\_u8
+ - s\_checksum\_type
+ - Checksum algorithm used for the journal. See jbd2_checksum_type_ for
+ more info.
+ * - 0x51
+ - \_\_u8[3]
+ - s\_padding2
+ -
+ * - 0x54
+ - \_\_u32
+ - s\_padding[42]
+ -
+ * - 0xFC
+ - \_\_be32
+ - s\_checksum
+ - Checksum of the entire superblock, with this field set to zero.
+ * - 0x100
+ - \_\_u8
+ - s\_users[16\*48]
+ - ids of all file systems sharing the log. e2fsprogs/Linux don't allow
+ shared external journals, but I imagine Lustre (or ocfs2?), which use
+ the jbd2 code, might.
+
+.. _jbd2_compat:
+
+The journal compat features are any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Journal maintains checksums on the data blocks.
+ (JBD2\_FEATURE\_COMPAT\_CHECKSUM)
+
+.. _jbd2_incompat:
+
+The journal incompat features are any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Journal has block revocation records. (JBD2\_FEATURE\_INCOMPAT\_REVOKE)
+ * - 0x2
+ - Journal can deal with 64-bit block numbers.
+ (JBD2\_FEATURE\_INCOMPAT\_64BIT)
+ * - 0x4
+ - Journal commits asynchronously. (JBD2\_FEATURE\_INCOMPAT\_ASYNC\_COMMIT)
+ * - 0x8
+ - This journal uses v2 of the checksum on-disk format. Each journal
+ metadata block gets its own checksum, and the block tags in the
+ descriptor table contain checksums for each of the data blocks in the
+ journal. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2)
+ * - 0x10
+ - This journal uses v3 of the checksum on-disk format. This is the same as
+ v2, but the journal block tag size is fixed regardless of the size of
+ block numbers. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3)
+
+.. _jbd2_checksum_type:
+
+Journal checksum type codes are one of the following. crc32 or crc32c are the
+most likely choices.
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 1
+ - CRC32
+ * - 2
+ - MD5
+ * - 3
+ - SHA1
+ * - 4
+ - CRC32C
+
+Descriptor Block
+~~~~~~~~~~~~~~~~
+
+The descriptor block contains an array of journal block tags that
+describe the final locations of the data blocks that follow in the
+journal. Descriptor blocks are open-coded instead of being completely
+described by a data structure, but here is the block structure anyway.
+Descriptor blocks consume at least 36 bytes, but use a full block:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - journal\_header\_t
+ - (open coded)
+ - Common block header.
+ * - 0xC
+ - struct journal\_block\_tag\_s
+ - open coded array[]
+ - Enough tags either to fill up the block or to describe all the data
+ blocks that follow this descriptor block.
+
+Journal block tags have any of the following formats, depending on which
+journal feature and block tag flags are set.
+
+If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 is set, the journal block tag is
+defined as ``struct journal_block_tag3_s``, which looks like the
+following. The size is 16 or 32 bytes.
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - \_\_be32
+ - t\_blocknr
+ - Lower 32-bits of the location of where the corresponding data block
+ should end up on disk.
+ * - 0x4
+ - \_\_be32
+ - t\_flags
+ - Flags that go with the descriptor. See the table jbd2_tag_flags_ for
+ more info.
+ * - 0x8
+ - \_\_be32
+ - t\_blocknr\_high
+ - Upper 32-bits of the location of where the corresponding data block
+ should end up on disk. This is zero if JBD2\_FEATURE\_INCOMPAT\_64BIT is
+ not enabled.
+ * - 0xC
+ - \_\_be32
+ - t\_checksum
+ - Checksum of the journal UUID, the sequence number, and the data block.
+ * -
+ -
+ -
+ - This field appears to be open coded. It always comes at the end of the
+ tag, after t_checksum. This field is not present if the "same UUID" flag
+ is set.
+ * - 0x8 or 0xC
+ - char
+ - uuid[16]
+ - A UUID to go with this tag. This field appears to be copied from the
+ ``j_uuid`` field in ``struct journal_s``, but only tune2fs touches that
+ field.
+
+.. _jbd2_tag_flags:
+
+The journal tag flags are any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - On-disk block is escaped. The first four bytes of the data block just
+ happened to match the jbd2 magic number.
+ * - 0x2
+ - This block has the same UUID as previous, therefore the UUID field is
+ omitted.
+ * - 0x4
+ - The data block was deleted by the transaction. (Not used?)
+ * - 0x8
+ - This is the last tag in this descriptor block.
+
+If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 is NOT set, the journal block tag
+is defined as ``struct journal_block_tag_s``, which looks like the
+following. The size is 8, 12, 24, or 28 bytes:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - \_\_be32
+ - t\_blocknr
+ - Lower 32-bits of the location of where the corresponding data block
+ should end up on disk.
+ * - 0x4
+ - \_\_be16
+ - t\_checksum
+ - Checksum of the journal UUID, the sequence number, and the data block.
+ Note that only the lower 16 bits are stored.
+ * - 0x6
+ - \_\_be16
+ - t\_flags
+ - Flags that go with the descriptor. See the table jbd2_tag_flags_ for
+ more info.
+ * -
+ -
+ -
+ - This next field is only present if the super block indicates support for
+ 64-bit block numbers.
+ * - 0x8
+ - \_\_be32
+ - t\_blocknr\_high
+ - Upper 32-bits of the location of where the corresponding data block
+ should end up on disk.
+ * -
+ -
+ -
+ - This field appears to be open coded. It always comes at the end of the
+ tag, after t_flags or t_blocknr_high. This field is not present if the
+ "same UUID" flag is set.
+ * - 0x8 or 0xC
+ - char
+ - uuid[16]
+ - A UUID to go with this tag. This field appears to be copied from the
+ ``j_uuid`` field in ``struct journal_s``, but only tune2fs touches that
+ field.
+
+If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or
+JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the block is a
+``struct jbd2_journal_block_tail``, which looks like this:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - \_\_be32
+ - t\_checksum
+ - Checksum of the journal UUID + the descriptor block, with this field set
+ to zero.
+
+Data Block
+~~~~~~~~~~
+
+In general, the data blocks being written to disk through the journal
+are written verbatim into the journal file after the descriptor block.
+However, if the first four bytes of the block match the jbd2 magic
+number then those four bytes are replaced with zeroes and the “escapedâ€
+flag is set in the descriptor block tag.
+
+Revocation Block
+~~~~~~~~~~~~~~~~
+
+A revocation block is used to prevent replay of a block in an earlier
+transaction. This is used to mark blocks that were journalled at one
+time but are no longer journalled. Typically this happens if a metadata
+block is freed and re-allocated as a file data block; in this case, a
+journal replay after the file block was written to disk will cause
+corruption.
+
+**NOTE**: This mechanism is NOT used to express “this journal block is
+superseded by this other journal blockâ€, as the author (djwong)
+mistakenly thought. Any block being added to a transaction will cause
+the removal of all existing revocation records for that block.
+
+Revocation blocks are described in
+``struct jbd2_journal_revoke_header_s``, are at least 16 bytes in
+length, but use a full block:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - journal\_header\_t
+ - r\_header
+ - Common block header.
+ * - 0xC
+ - \_\_be32
+ - r\_count
+ - Number of bytes used in this block.
+ * - 0x10
+ - \_\_be32 or \_\_be64
+ - blocks[0]
+ - Blocks to revoke.
+
+After r\_count is a linear array of block numbers that are effectively
+revoked by this transaction. The size of each block number is 8 bytes if
+the superblock advertises 64-bit block number support, or 4 bytes
+otherwise.
+
+If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or
+JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the revocation
+block is a ``struct jbd2_journal_revoke_tail``, which has this format:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_be32
+ - r\_checksum
+ - Checksum of the journal UUID + revocation block
+
+Commit Block
+~~~~~~~~~~~~
+
+The commit block is a sentry that indicates that a transaction has been
+completely written to the journal. Once this commit block reaches the
+journal, the data stored with this transaction can be written to their
+final locations on disk.
+
+The commit block is described by ``struct commit_header``, which is 32
+bytes long (but uses a full block):
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Descriptor
+ * - 0x0
+ - journal\_header\_s
+ - (open coded)
+ - Common block header.
+ * - 0xC
+ - unsigned char
+ - h\_chksum\_type
+ - The type of checksum to use to verify the integrity of the data blocks
+ in the transaction. See jbd2_checksum_type_ for more info.
+ * - 0xD
+ - unsigned char
+ - h\_chksum\_size
+ - The number of bytes used by the checksum. Most likely 4.
+ * - 0xE
+ - unsigned char
+ - h\_padding[2]
+ -
+ * - 0x10
+ - \_\_be32
+ - h\_chksum[JBD2\_CHECKSUM\_BYTES]
+ - 32 bytes of space to store checksums. If
+ JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3
+ are set, the first ``__be32`` is the checksum of the journal UUID and
+ the entire commit block, with this field zeroed. If
+ JBD2\_FEATURE\_COMPAT\_CHECKSUM is set, the first ``__be32`` is the
+ crc32 of all the blocks already written to the transaction.
+ * - 0x30
+ - \_\_be64
+ - h\_commit\_sec
+ - The time that the transaction was committed, in seconds since the epoch.
+ * - 0x38
+ - \_\_be32
+ - h\_commit\_nsec
+ - Nanoseconds component of the above timestamp.
+
diff --git a/Documentation/filesystems/ext4/ondisk/mmp.rst b/Documentation/filesystems/ext4/ondisk/mmp.rst
new file mode 100644
index 000000000000..b7d7a3137f80
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/mmp.rst
@@ -0,0 +1,77 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Multiple Mount Protection
+-------------------------
+
+Multiple mount protection (MMP) is a feature that protects the
+filesystem against multiple hosts trying to use the filesystem
+simultaneously. When a filesystem is opened (for mounting, or fsck,
+etc.), the MMP code running on the node (call it node A) checks a
+sequence number. If the sequence number is EXT4\_MMP\_SEQ\_CLEAN, the
+open continues. If the sequence number is EXT4\_MMP\_SEQ\_FSCK, then
+fsck is (hopefully) running, and open fails immediately. Otherwise, the
+open code will wait for twice the specified MMP check interval and check
+the sequence number again. If the sequence number has changed, then the
+filesystem is active on another machine and the open fails. If the MMP
+code passes all of those checks, a new MMP sequence number is generated
+and written to the MMP block, and the mount proceeds.
+
+While the filesystem is live, the kernel sets up a timer to re-check the
+MMP block at the specified MMP check interval. To perform the re-check,
+the MMP sequence number is re-read; if it does not match the in-memory
+MMP sequence number, then another node (node B) has mounted the
+filesystem, and node A remounts the filesystem read-only. If the
+sequence numbers match, the sequence number is incremented both in
+memory and on disk, and the re-check is complete.
+
+The hostname and device filename are written into the MMP block whenever
+an open operation succeeds. The MMP code does not use these values; they
+are provided purely for informational purposes.
+
+The checksum is calculated against the FS UUID and the MMP structure.
+The MMP structure (``struct mmp_struct``) is as follows:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Type
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - mmp\_magic
+ - Magic number for MMP, 0x004D4D50 (“MMPâ€).
+ * - 0x4
+ - \_\_le32
+ - mmp\_seq
+ - Sequence number, updated periodically.
+ * - 0x8
+ - \_\_le64
+ - mmp\_time
+ - Time that the MMP block was last updated.
+ * - 0x10
+ - char[64]
+ - mmp\_nodename
+ - Hostname of the node that opened the filesystem.
+ * - 0x50
+ - char[32]
+ - mmp\_bdevname
+ - Block device name of the filesystem.
+ * - 0x70
+ - \_\_le16
+ - mmp\_check\_interval
+ - The MMP re-check interval, in seconds.
+ * - 0x72
+ - \_\_le16
+ - mmp\_pad1
+ - Zero.
+ * - 0x74
+ - \_\_le32[226]
+ - mmp\_pad2
+ - Zero.
+ * - 0x3FC
+ - \_\_le32
+ - mmp\_checksum
+ - Checksum of the MMP block.
diff --git a/Documentation/filesystems/ext4/ondisk/overview.rst b/Documentation/filesystems/ext4/ondisk/overview.rst
new file mode 100644
index 000000000000..cbab18baba12
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/overview.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+High Level Design
+=================
+
+An ext4 file system is split into a series of block groups. To reduce
+performance difficulties due to fragmentation, the block allocator tries
+very hard to keep each file's blocks within the same group, thereby
+reducing seek times. The size of a block group is specified in
+``sb.s_blocks_per_group`` blocks, though it can also calculated as 8 \*
+``block_size_in_bytes``. With the default block size of 4KiB, each group
+will contain 32,768 blocks, for a length of 128MiB. The number of block
+groups is the size of the device divided by the size of a block group.
+
+All fields in ext4 are written to disk in little-endian order. HOWEVER,
+all fields in jbd2 (the journal) are written to disk in big-endian
+order.
+
+.. include:: blocks.rst
+.. include:: blockgroup.rst
+.. include:: special_inodes.rst
+.. include:: allocators.rst
+.. include:: checksums.rst
+.. include:: bigalloc.rst
+.. include:: inlinedata.rst
+.. include:: eainode.rst
diff --git a/Documentation/filesystems/ext4/ondisk/special_inodes.rst b/Documentation/filesystems/ext4/ondisk/special_inodes.rst
new file mode 100644
index 000000000000..a82f70c9baeb
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/special_inodes.rst
@@ -0,0 +1,38 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Special inodes
+--------------
+
+ext4 reserves some inode for special features, as follows:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - inode Number
+ - Purpose
+ * - 0
+ - Doesn't exist; there is no inode 0.
+ * - 1
+ - List of defective blocks.
+ * - 2
+ - Root directory.
+ * - 3
+ - User quota.
+ * - 4
+ - Group quota.
+ * - 5
+ - Boot loader.
+ * - 6
+ - Undelete directory.
+ * - 7
+ - Reserved group descriptors inode. (“resize inodeâ€)
+ * - 8
+ - Journal inode.
+ * - 9
+ - The “exclude†inode, for snapshots(?)
+ * - 10
+ - Replica inode, used for some non-upstream feature?
+ * - 11
+ - Traditional first non-reserved inode. Usually this is the lost+found directory. See s\_first\_ino in the superblock.
+
diff --git a/Documentation/filesystems/ext4/ondisk/super.rst b/Documentation/filesystems/ext4/ondisk/super.rst
new file mode 100644
index 000000000000..5f81dd87e0b9
--- /dev/null
+++ b/Documentation/filesystems/ext4/ondisk/super.rst
@@ -0,0 +1,801 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Super Block
+-----------
+
+The superblock records various information about the enclosing
+filesystem, such as block counts, inode counts, supported features,
+maintenance information, and more.
+
+If the sparse\_super feature flag is set, redundant copies of the
+superblock and group descriptors are kept only in the groups whose group
+number is either 0 or a power of 3, 5, or 7. If the flag is not set,
+redundant copies are kept in all groups.
+
+The superblock checksum is calculated against the superblock structure,
+which includes the FS UUID.
+
+The ext4 superblock is laid out as follows in
+``struct ext4_super_block``:
+
+.. list-table::
+ :widths: 1 1 1 77
+ :header-rows: 1
+
+ * - Offset
+ - Size
+ - Name
+ - Description
+ * - 0x0
+ - \_\_le32
+ - s\_inodes\_count
+ - Total inode count.
+ * - 0x4
+ - \_\_le32
+ - s\_blocks\_count\_lo
+ - Total block count.
+ * - 0x8
+ - \_\_le32
+ - s\_r\_blocks\_count\_lo
+ - This number of blocks can only be allocated by the super-user.
+ * - 0xC
+ - \_\_le32
+ - s\_free\_blocks\_count\_lo
+ - Free block count.
+ * - 0x10
+ - \_\_le32
+ - s\_free\_inodes\_count
+ - Free inode count.
+ * - 0x14
+ - \_\_le32
+ - s\_first\_data\_block
+ - First data block. This must be at least 1 for 1k-block filesystems and
+ is typically 0 for all other block sizes.
+ * - 0x18
+ - \_\_le32
+ - s\_log\_block\_size
+ - Block size is 2 ^ (10 + s\_log\_block\_size).
+ * - 0x1C
+ - \_\_le32
+ - s\_log\_cluster\_size
+ - Cluster size is (2 ^ s\_log\_cluster\_size) blocks if bigalloc is
+ enabled. Otherwise s\_log\_cluster\_size must equal s\_log\_block\_size.
+ * - 0x20
+ - \_\_le32
+ - s\_blocks\_per\_group
+ - Blocks per group.
+ * - 0x24
+ - \_\_le32
+ - s\_clusters\_per\_group
+ - Clusters per group, if bigalloc is enabled. Otherwise
+ s\_clusters\_per\_group must equal s\_blocks\_per\_group.
+ * - 0x28
+ - \_\_le32
+ - s\_inodes\_per\_group
+ - Inodes per group.
+ * - 0x2C
+ - \_\_le32
+ - s\_mtime
+ - Mount time, in seconds since the epoch.
+ * - 0x30
+ - \_\_le32
+ - s\_wtime
+ - Write time, in seconds since the epoch.
+ * - 0x34
+ - \_\_le16
+ - s\_mnt\_count
+ - Number of mounts since the last fsck.
+ * - 0x36
+ - \_\_le16
+ - s\_max\_mnt\_count
+ - Number of mounts beyond which a fsck is needed.
+ * - 0x38
+ - \_\_le16
+ - s\_magic
+ - Magic signature, 0xEF53
+ * - 0x3A
+ - \_\_le16
+ - s\_state
+ - File system state. See super_state_ for more info.
+ * - 0x3C
+ - \_\_le16
+ - s\_errors
+ - Behaviour when detecting errors. See super_errors_ for more info.
+ * - 0x3E
+ - \_\_le16
+ - s\_minor\_rev\_level
+ - Minor revision level.
+ * - 0x40
+ - \_\_le32
+ - s\_lastcheck
+ - Time of last check, in seconds since the epoch.
+ * - 0x44
+ - \_\_le32
+ - s\_checkinterval
+ - Maximum time between checks, in seconds.
+ * - 0x48
+ - \_\_le32
+ - s\_creator\_os
+ - Creator OS. See the table super_creator_ for more info.
+ * - 0x4C
+ - \_\_le32
+ - s\_rev\_level
+ - Revision level. See the table super_revision_ for more info.
+ * - 0x50
+ - \_\_le16
+ - s\_def\_resuid
+ - Default uid for reserved blocks.
+ * - 0x52
+ - \_\_le16
+ - s\_def\_resgid
+ - Default gid for reserved blocks.
+ * -
+ -
+ -
+ - These fields are for EXT4_DYNAMIC_REV superblocks only.
+
+ Note: the difference between the compatible feature set and the
+ incompatible feature set is that if there is a bit set in the
+ incompatible feature set that the kernel doesn't know about, it should
+ refuse to mount the filesystem.
+
+ e2fsck's requirements are more strict; if it doesn't know
+ about a feature in either the compatible or incompatible feature set, it
+ must abort and not try to meddle with things it doesn't understand...
+ * - 0x54
+ - \_\_le32
+ - s\_first\_ino
+ - First non-reserved inode.
+ * - 0x58
+ - \_\_le16
+ - s\_inode\_size
+ - Size of inode structure, in bytes.
+ * - 0x5A
+ - \_\_le16
+ - s\_block\_group\_nr
+ - Block group # of this superblock.
+ * - 0x5C
+ - \_\_le32
+ - s\_feature\_compat
+ - Compatible feature set flags. Kernel can still read/write this fs even
+ if it doesn't understand a flag; fsck should not do that. See the
+ super_compat_ table for more info.
+ * - 0x60
+ - \_\_le32
+ - s\_feature\_incompat
+ - Incompatible feature set. If the kernel or fsck doesn't understand one
+ of these bits, it should stop. See the super_incompat_ table for more
+ info.
+ * - 0x64
+ - \_\_le32
+ - s\_feature\_ro\_compat
+ - Readonly-compatible feature set. If the kernel doesn't understand one of
+ these bits, it can still mount read-only. See the super_rocompat_ table
+ for more info.
+ * - 0x68
+ - \_\_u8
+ - s\_uuid[16]
+ - 128-bit UUID for volume.
+ * - 0x78
+ - char
+ - s\_volume\_name[16]
+ - Volume label.
+ * - 0x88
+ - char
+ - s\_last\_mounted[64]
+ - Directory where filesystem was last mounted.
+ * - 0xC8
+ - \_\_le32
+ - s\_algorithm\_usage\_bitmap
+ - For compression (Not used in e2fsprogs/Linux)
+ * -
+ -
+ -
+ - Performance hints. Directory preallocation should only happen if the
+ EXT4_FEATURE_COMPAT_DIR_PREALLOC flag is on.
+ * - 0xCC
+ - \_\_u8
+ - s\_prealloc\_blocks
+ - #. of blocks to try to preallocate for ... files? (Not used in
+ e2fsprogs/Linux)
+ * - 0xCD
+ - \_\_u8
+ - s\_prealloc\_dir\_blocks
+ - #. of blocks to preallocate for directories. (Not used in
+ e2fsprogs/Linux)
+ * - 0xCE
+ - \_\_le16
+ - s\_reserved\_gdt\_blocks
+ - Number of reserved GDT entries for future filesystem expansion.
+ * -
+ -
+ -
+ - Journalling support is valid only if EXT4_FEATURE_COMPAT_HAS_JOURNAL is
+ set.
+ * - 0xD0
+ - \_\_u8
+ - s\_journal\_uuid[16]
+ - UUID of journal superblock
+ * - 0xE0
+ - \_\_le32
+ - s\_journal\_inum
+ - inode number of journal file.
+ * - 0xE4
+ - \_\_le32
+ - s\_journal\_dev
+ - Device number of journal file, if the external journal feature flag is
+ set.
+ * - 0xE8
+ - \_\_le32
+ - s\_last\_orphan
+ - Start of list of orphaned inodes to delete.
+ * - 0xEC
+ - \_\_le32
+ - s\_hash\_seed[4]
+ - HTREE hash seed.
+ * - 0xFC
+ - \_\_u8
+ - s\_def\_hash\_version
+ - Default hash algorithm to use for directory hashes. See super_def_hash_
+ for more info.
+ * - 0xFD
+ - \_\_u8
+ - s\_jnl\_backup\_type
+ - If this value is 0 or EXT3\_JNL\_BACKUP\_BLOCKS (1), then the
+ ``s_jnl_blocks`` field contains a duplicate copy of the inode's
+ ``i_block[]`` array and ``i_size``.
+ * - 0xFE
+ - \_\_le16
+ - s\_desc\_size
+ - Size of group descriptors, in bytes, if the 64bit incompat feature flag
+ is set.
+ * - 0x100
+ - \_\_le32
+ - s\_default\_mount\_opts
+ - Default mount options. See the super_mountopts_ table for more info.
+ * - 0x104
+ - \_\_le32
+ - s\_first\_meta\_bg
+ - First metablock block group, if the meta\_bg feature is enabled.
+ * - 0x108
+ - \_\_le32
+ - s\_mkfs\_time
+ - When the filesystem was created, in seconds since the epoch.
+ * - 0x10C
+ - \_\_le32
+ - s\_jnl\_blocks[17]
+ - Backup copy of the journal inode's ``i_block[]`` array in the first 15
+ elements and i\_size\_high and i\_size in the 16th and 17th elements,
+ respectively.
+ * -
+ -
+ -
+ - 64bit support is valid only if EXT4_FEATURE_COMPAT_64BIT is set.
+ * - 0x150
+ - \_\_le32
+ - s\_blocks\_count\_hi
+ - High 32-bits of the block count.
+ * - 0x154
+ - \_\_le32
+ - s\_r\_blocks\_count\_hi
+ - High 32-bits of the reserved block count.
+ * - 0x158
+ - \_\_le32
+ - s\_free\_blocks\_count\_hi
+ - High 32-bits of the free block count.
+ * - 0x15C
+ - \_\_le16
+ - s\_min\_extra\_isize
+ - All inodes have at least # bytes.
+ * - 0x15E
+ - \_\_le16
+ - s\_want\_extra\_isize
+ - New inodes should reserve # bytes.
+ * - 0x160
+ - \_\_le32
+ - s\_flags
+ - Miscellaneous flags. See the super_flags_ table for more info.
+ * - 0x164
+ - \_\_le16
+ - s\_raid\_stride
+ - RAID stride. This is the number of logical blocks read from or written
+ to the disk before moving to the next disk. This affects the placement
+ of filesystem metadata, which will hopefully make RAID storage faster.
+ * - 0x166
+ - \_\_le16
+ - s\_mmp\_interval
+ - #. seconds to wait in multi-mount prevention (MMP) checking. In theory,
+ MMP is a mechanism to record in the superblock which host and device
+ have mounted the filesystem, in order to prevent multiple mounts. This
+ feature does not seem to be implemented...
+ * - 0x168
+ - \_\_le64
+ - s\_mmp\_block
+ - Block # for multi-mount protection data.
+ * - 0x170
+ - \_\_le32
+ - s\_raid\_stripe\_width
+ - RAID stripe width. This is the number of logical blocks read from or
+ written to the disk before coming back to the current disk. This is used
+ by the block allocator to try to reduce the number of read-modify-write
+ operations in a RAID5/6.
+ * - 0x174
+ - \_\_u8
+ - s\_log\_groups\_per\_flex
+ - Size of a flexible block group is 2 ^ ``s_log_groups_per_flex``.
+ * - 0x175
+ - \_\_u8
+ - s\_checksum\_type
+ - Metadata checksum algorithm type. The only valid value is 1 (crc32c).
+ * - 0x176
+ - \_\_le16
+ - s\_reserved\_pad
+ -
+ * - 0x178
+ - \_\_le64
+ - s\_kbytes\_written
+ - Number of KiB written to this filesystem over its lifetime.
+ * - 0x180
+ - \_\_le32
+ - s\_snapshot\_inum
+ - inode number of active snapshot. (Not used in e2fsprogs/Linux.)
+ * - 0x184
+ - \_\_le32
+ - s\_snapshot\_id
+ - Sequential ID of active snapshot. (Not used in e2fsprogs/Linux.)
+ * - 0x188
+ - \_\_le64
+ - s\_snapshot\_r\_blocks\_count
+ - Number of blocks reserved for active snapshot's future use. (Not used in
+ e2fsprogs/Linux.)
+ * - 0x190
+ - \_\_le32
+ - s\_snapshot\_list
+ - inode number of the head of the on-disk snapshot list. (Not used in
+ e2fsprogs/Linux.)
+ * - 0x194
+ - \_\_le32
+ - s\_error\_count
+ - Number of errors seen.
+ * - 0x198
+ - \_\_le32
+ - s\_first\_error\_time
+ - First time an error happened, in seconds since the epoch.
+ * - 0x19C
+ - \_\_le32
+ - s\_first\_error\_ino
+ - inode involved in first error.
+ * - 0x1A0
+ - \_\_le64
+ - s\_first\_error\_block
+ - Number of block involved of first error.
+ * - 0x1A8
+ - \_\_u8
+ - s\_first\_error\_func[32]
+ - Name of function where the error happened.
+ * - 0x1C8
+ - \_\_le32
+ - s\_first\_error\_line
+ - Line number where error happened.
+ * - 0x1CC
+ - \_\_le32
+ - s\_last\_error\_time
+ - Time of most recent error, in seconds since the epoch.
+ * - 0x1D0
+ - \_\_le32
+ - s\_last\_error\_ino
+ - inode involved in most recent error.
+ * - 0x1D4
+ - \_\_le32
+ - s\_last\_error\_line
+ - Line number where most recent error happened.
+ * - 0x1D8
+ - \_\_le64
+ - s\_last\_error\_block
+ - Number of block involved in most recent error.
+ * - 0x1E0
+ - \_\_u8
+ - s\_last\_error\_func[32]
+ - Name of function where the most recent error happened.
+ * - 0x200
+ - \_\_u8
+ - s\_mount\_opts[64]
+ - ASCIIZ string of mount options.
+ * - 0x240
+ - \_\_le32
+ - s\_usr\_quota\_inum
+ - Inode number of user `quota <quota>`__ file.
+ * - 0x244
+ - \_\_le32
+ - s\_grp\_quota\_inum
+ - Inode number of group `quota <quota>`__ file.
+ * - 0x248
+ - \_\_le32
+ - s\_overhead\_blocks
+ - Overhead blocks/clusters in fs. (Huh? This field is always zero, which
+ means that the kernel calculates it dynamically.)
+ * - 0x24C
+ - \_\_le32
+ - s\_backup\_bgs[2]
+ - Block groups containing superblock backups (if sparse\_super2)
+ * - 0x254
+ - \_\_u8
+ - s\_encrypt\_algos[4]
+ - Encryption algorithms in use. There can be up to four algorithms in use
+ at any time; valid algorithm codes are given in the super_encrypt_ table
+ below.
+ * - 0x258
+ - \_\_u8
+ - s\_encrypt\_pw\_salt[16]
+ - Salt for the string2key algorithm for encryption.
+ * - 0x268
+ - \_\_le32
+ - s\_lpf\_ino
+ - Inode number of lost+found
+ * - 0x26C
+ - \_\_le32
+ - s\_prj\_quota\_inum
+ - Inode that tracks project quotas.
+ * - 0x270
+ - \_\_le32
+ - s\_checksum\_seed
+ - Checksum seed used for metadata\_csum calculations. This value is
+ crc32c(~0, $orig\_fs\_uuid).
+ * - 0x274
+ - \_\_u8
+ - s\_wtime_hi
+ - Upper 8 bits of the s_wtime field.
+ * - 0x275
+ - \_\_u8
+ - s\_wtime_hi
+ - Upper 8 bits of the s_mtime field.
+ * - 0x276
+ - \_\_u8
+ - s\_mkfs_time_hi
+ - Upper 8 bits of the s_mkfs_time field.
+ * - 0x277
+ - \_\_u8
+ - s\_lastcheck_hi
+ - Upper 8 bits of the s_lastcheck_hi field.
+ * - 0x278
+ - \_\_u8
+ - s\_first_error_time_hi
+ - Upper 8 bits of the s_first_error_time_hi field.
+ * - 0x279
+ - \_\_u8
+ - s\_last_error_time_hi
+ - Upper 8 bits of the s_last_error_time_hi field.
+ * - 0x27A
+ - \_\_u8[2]
+ - s\_pad
+ - Zero padding.
+ * - 0x27C
+ - \_\_le32
+ - s\_reserved[96]
+ - Padding to the end of the block.
+ * - 0x3FC
+ - \_\_le32
+ - s\_checksum
+ - Superblock checksum.
+
+.. _super_state:
+
+The superblock state is some combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0001
+ - Cleanly umounted
+ * - 0x0002
+ - Errors detected
+ * - 0x0004
+ - Orphans being recovered
+
+.. _super_errors:
+
+The superblock error policy is one of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 1
+ - Continue
+ * - 2
+ - Remount read-only
+ * - 3
+ - Panic
+
+.. _super_creator:
+
+The filesystem creator is one of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0
+ - Linux
+ * - 1
+ - Hurd
+ * - 2
+ - Masix
+ * - 3
+ - FreeBSD
+ * - 4
+ - Lites
+
+.. _super_revision:
+
+The superblock revision is one of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0
+ - Original format
+ * - 1
+ - v2 format w/ dynamic inode sizes
+
+Note that ``EXT4_DYNAMIC_REV`` refers to a revision 1 or newer filesystem.
+
+.. _super_compat:
+
+The superblock compatible features field is a combination of any of the
+following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Directory preallocation (COMPAT\_DIR\_PREALLOC).
+ * - 0x2
+ - “imagic inodesâ€. Not clear from the code what this does
+ (COMPAT\_IMAGIC\_INODES).
+ * - 0x4
+ - Has a journal (COMPAT\_HAS\_JOURNAL).
+ * - 0x8
+ - Supports extended attributes (COMPAT\_EXT\_ATTR).
+ * - 0x10
+ - Has reserved GDT blocks for filesystem expansion
+ (COMPAT\_RESIZE\_INODE). Requires RO\_COMPAT\_SPARSE\_SUPER.
+ * - 0x20
+ - Has directory indices (COMPAT\_DIR\_INDEX).
+ * - 0x40
+ - “Lazy BGâ€. Not in Linux kernel, seems to have been for uninitialized
+ block groups? (COMPAT\_LAZY\_BG)
+ * - 0x80
+ - “Exclude inodeâ€. Not used. (COMPAT\_EXCLUDE\_INODE).
+ * - 0x100
+ - “Exclude bitmapâ€. Seems to be used to indicate the presence of
+ snapshot-related exclude bitmaps? Not defined in kernel or used in
+ e2fsprogs (COMPAT\_EXCLUDE\_BITMAP).
+ * - 0x200
+ - Sparse Super Block, v2. If this flag is set, the SB field s\_backup\_bgs
+ points to the two block groups that contain backup superblocks
+ (COMPAT\_SPARSE\_SUPER2).
+
+.. _super_incompat:
+
+The superblock incompatible features field is a combination of any of the
+following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Compression (INCOMPAT\_COMPRESSION).
+ * - 0x2
+ - Directory entries record the file type. See ext4\_dir\_entry\_2 below
+ (INCOMPAT\_FILETYPE).
+ * - 0x4
+ - Filesystem needs recovery (INCOMPAT\_RECOVER).
+ * - 0x8
+ - Filesystem has a separate journal device (INCOMPAT\_JOURNAL\_DEV).
+ * - 0x10
+ - Meta block groups. See the earlier discussion of this feature
+ (INCOMPAT\_META\_BG).
+ * - 0x40
+ - Files in this filesystem use extents (INCOMPAT\_EXTENTS).
+ * - 0x80
+ - Enable a filesystem size of 2^64 blocks (INCOMPAT\_64BIT).
+ * - 0x100
+ - Multiple mount protection. Not implemented (INCOMPAT\_MMP).
+ * - 0x200
+ - Flexible block groups. See the earlier discussion of this feature
+ (INCOMPAT\_FLEX\_BG).
+ * - 0x400
+ - Inodes can be used to store large extended attribute values
+ (INCOMPAT\_EA\_INODE).
+ * - 0x1000
+ - Data in directory entry (INCOMPAT\_DIRDATA). (Not implemented?)
+ * - 0x2000
+ - Metadata checksum seed is stored in the superblock. This feature enables
+ the administrator to change the UUID of a metadata\_csum filesystem
+ while the filesystem is mounted; without it, the checksum definition
+ requires all metadata blocks to be rewritten (INCOMPAT\_CSUM\_SEED).
+ * - 0x4000
+ - Large directory >2GB or 3-level htree (INCOMPAT\_LARGEDIR). Prior to
+ this feature, directories could not be larger than 4GiB and could not
+ have an htree more than 2 levels deep. If this feature is enabled,
+ directories can be larger than 4GiB and have a maximum htree depth of 3.
+ * - 0x8000
+ - Data in inode (INCOMPAT\_INLINE\_DATA).
+ * - 0x10000
+ - Encrypted inodes are present on the filesystem. (INCOMPAT\_ENCRYPT).
+
+.. _super_rocompat:
+
+The superblock read-only compatible features field is a combination of any of
+the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x1
+ - Sparse superblocks. See the earlier discussion of this feature
+ (RO\_COMPAT\_SPARSE\_SUPER).
+ * - 0x2
+ - This filesystem has been used to store a file greater than 2GiB
+ (RO\_COMPAT\_LARGE\_FILE).
+ * - 0x4
+ - Not used in kernel or e2fsprogs (RO\_COMPAT\_BTREE\_DIR).
+ * - 0x8
+ - This filesystem has files whose sizes are represented in units of
+ logical blocks, not 512-byte sectors. This implies a very large file
+ indeed! (RO\_COMPAT\_HUGE\_FILE)
+ * - 0x10
+ - Group descriptors have checksums. In addition to detecting corruption,
+ this is useful for lazy formatting with uninitialized groups
+ (RO\_COMPAT\_GDT\_CSUM).
+ * - 0x20
+ - Indicates that the old ext3 32,000 subdirectory limit no longer applies
+ (RO\_COMPAT\_DIR\_NLINK). A directory's i\_links\_count will be set to 1
+ if it is incremented past 64,999.
+ * - 0x40
+ - Indicates that large inodes exist on this filesystem
+ (RO\_COMPAT\_EXTRA\_ISIZE).
+ * - 0x80
+ - This filesystem has a snapshot (RO\_COMPAT\_HAS\_SNAPSHOT).
+ * - 0x100
+ - `Quota <Quota>`__ (RO\_COMPAT\_QUOTA).
+ * - 0x200
+ - This filesystem supports “bigallocâ€, which means that file extents are
+ tracked in units of clusters (of blocks) instead of blocks
+ (RO\_COMPAT\_BIGALLOC).
+ * - 0x400
+ - This filesystem supports metadata checksumming.
+ (RO\_COMPAT\_METADATA\_CSUM; implies RO\_COMPAT\_GDT\_CSUM, though
+ GDT\_CSUM must not be set)
+ * - 0x800
+ - Filesystem supports replicas. This feature is neither in the kernel nor
+ e2fsprogs. (RO\_COMPAT\_REPLICA)
+ * - 0x1000
+ - Read-only filesystem image; the kernel will not mount this image
+ read-write and most tools will refuse to write to the image.
+ (RO\_COMPAT\_READONLY)
+ * - 0x2000
+ - Filesystem tracks project quotas. (RO\_COMPAT\_PROJECT)
+
+.. _super_def_hash:
+
+The ``s_def_hash_version`` field is one of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0
+ - Legacy.
+ * - 0x1
+ - Half MD4.
+ * - 0x2
+ - Tea.
+ * - 0x3
+ - Legacy, unsigned.
+ * - 0x4
+ - Half MD4, unsigned.
+ * - 0x5
+ - Tea, unsigned.
+
+.. _super_mountopts:
+
+The ``s_default_mount_opts`` field is any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0001
+ - Print debugging info upon (re)mount. (EXT4\_DEFM\_DEBUG)
+ * - 0x0002
+ - New files take the gid of the containing directory (instead of the fsgid
+ of the current process). (EXT4\_DEFM\_BSDGROUPS)
+ * - 0x0004
+ - Support userspace-provided extended attributes. (EXT4\_DEFM\_XATTR\_USER)
+ * - 0x0008
+ - Support POSIX access control lists (ACLs). (EXT4\_DEFM\_ACL)
+ * - 0x0010
+ - Do not support 32-bit UIDs. (EXT4\_DEFM\_UID16)
+ * - 0x0020
+ - All data and metadata are commited to the journal.
+ (EXT4\_DEFM\_JMODE\_DATA)
+ * - 0x0040
+ - All data are flushed to the disk before metadata are committed to the
+ journal. (EXT4\_DEFM\_JMODE\_ORDERED)
+ * - 0x0060
+ - Data ordering is not preserved; data may be written after the metadata
+ has been written. (EXT4\_DEFM\_JMODE\_WBACK)
+ * - 0x0100
+ - Disable write flushes. (EXT4\_DEFM\_NOBARRIER)
+ * - 0x0200
+ - Track which blocks in a filesystem are metadata and therefore should not
+ be used as data blocks. This option will be enabled by default on 3.18,
+ hopefully. (EXT4\_DEFM\_BLOCK\_VALIDITY)
+ * - 0x0400
+ - Enable DISCARD support, where the storage device is told about blocks
+ becoming unused. (EXT4\_DEFM\_DISCARD)
+ * - 0x0800
+ - Disable delayed allocation. (EXT4\_DEFM\_NODELALLOC)
+
+.. _super_flags:
+
+The ``s_flags`` field is any combination of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0x0001
+ - Signed directory hash in use.
+ * - 0x0002
+ - Unsigned directory hash in use.
+ * - 0x0004
+ - To test development code.
+
+.. _super_encrypt:
+
+The ``s_encrypt_algos`` list can contain any of the following:
+
+.. list-table::
+ :widths: 1 79
+ :header-rows: 1
+
+ * - Value
+ - Description
+ * - 0
+ - Invalid algorithm (ENCRYPTION\_MODE\_INVALID).
+ * - 1
+ - 256-bit AES in XTS mode (ENCRYPTION\_MODE\_AES\_256\_XTS).
+ * - 2
+ - 256-bit AES in GCM mode (ENCRYPTION\_MODE\_AES\_256\_GCM).
+ * - 3
+ - 256-bit AES in CBC mode (ENCRYPTION\_MODE\_AES\_256\_CBC).
+
+Total size of the superblock is 1024 bytes.
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 53b89d0edc15..46d1b1be3a51 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -71,6 +71,39 @@ Other Functions
.. kernel-doc:: fs/block_dev.c
:export:
+.. kernel-doc:: fs/anon_inodes.c
+ :export:
+
+.. kernel-doc:: fs/attr.c
+ :export:
+
+.. kernel-doc:: fs/d_path.c
+ :export:
+
+.. kernel-doc:: fs/dax.c
+ :export:
+
+.. kernel-doc:: fs/direct-io.c
+ :export:
+
+.. kernel-doc:: fs/file_table.c
+ :export:
+
+.. kernel-doc:: fs/libfs.c
+ :export:
+
+.. kernel-doc:: fs/posix_acl.c
+ :export:
+
+.. kernel-doc:: fs/stat.c
+ :export:
+
+.. kernel-doc:: fs/sync.c
+ :export:
+
+.. kernel-doc:: fs/xattr.c
+ :export:
+
The proc filesystem
===================
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 17bb4dc28fae..7b7b845c490a 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -602,3 +602,23 @@ in your dentry operations instead.
dentry separately, and it now has request_mask and query_flags arguments
to specify the fields and sync type requested by statx. Filesystems not
supporting any statx-specific features may ignore the new arguments.
+--
+[mandatory]
+ ->atomic_open() calling conventions have changed. Gone is int *opened,
+ along with FILE_OPENED/FILE_CREATED. In place of those we have
+ FMODE_OPENED/FMODE_CREATED, set in file->f_mode. Additionally, return
+ value for 'called finish_no_open(), open it yourself' case has become
+ 0, not 1. Since finish_no_open() itself is returning 0 now, that part
+ does not need any changes in ->atomic_open() instances.
+--
+[mandatory]
+ alloc_file() has become static now; two wrappers are to be used instead.
+ alloc_file_pseudo(inode, vfsmount, name, flags, ops) is for the cases
+ when dentry needs to be created; that's the majority of old alloc_file()
+ users. Calling conventions: on success a reference to new struct file
+ is returned and callers reference to inode is subsumed by that. On
+ failure, ERR_PTR() is returned and no caller's references are affected,
+ so the caller needs to drop the inode reference it held.
+ alloc_file_clone(file, flags, ops) does not affect any caller's references.
+ On success you get a new struct file sharing the mount/dentry with the
+ original, on failure - ERR_PTR().
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 520f6a84cf50..1605acbb23b6 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -870,6 +870,7 @@ Committed_AS: 100056 kB
VmallocTotal: 112216 kB
VmallocUsed: 428 kB
VmallocChunk: 111088 kB
+HardwareCorrupted: 0 kB
AnonHugePages: 49152 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
@@ -915,6 +916,8 @@ MemAvailable: An estimate of how much memory is available for starting new
Dirty: Memory which is waiting to get written back to the disk
Writeback: Memory which is actively being written back to the disk
AnonPages: Non-file backed pages mapped into userspace page tables
+HardwareCorrupted: The amount of RAM/memory in KB, the kernel identifies as
+ corrupted.
AnonHugePages: Non-file backed huge pages mapped into userspace page tables
Mapped: files which have been mmaped, such as libraries
Shmem: Total memory used by shared memory (shmem) and tmpfs
diff --git a/Documentation/filesystems/relay.txt b/Documentation/filesystems/relay.txt
index 33e2f3694733..cd709a94d054 100644
--- a/Documentation/filesystems/relay.txt
+++ b/Documentation/filesystems/relay.txt
@@ -222,7 +222,7 @@ using debugfs:
*/
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent,
- int mode,
+ umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
@@ -375,7 +375,7 @@ would be very similar:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
- unsigned int prev_padding)
+ size_t prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index f608180ad59d..85907d5b9c2c 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -386,7 +386,7 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *, struct file *,
- unsigned open_flag, umode_t create_mode, int *opened);
+ unsigned open_flag, umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
};
@@ -496,13 +496,15 @@ otherwise noted.
atomic_open: called on the last component of an open. Using this optional
method the filesystem can look up, possibly create and open the file in
- one atomic operation. If it cannot perform this (e.g. the file type
- turned out to be wrong) it may signal this by returning 1 instead of
- usual 0 or -ve . This method is only called if the last component is
- negative or needs lookup. Cached positive dentries are still handled by
- f_op->open(). If the file was created, the FILE_CREATED flag should be
- set in "opened". In case of O_EXCL the method must only succeed if the
- file didn't exist and hence FILE_CREATED shall always be set on success.
+ one atomic operation. If it wants to leave actual opening to the
+ caller (e.g. if the file turned out to be a symlink, device, or just
+ something filesystem won't do atomic open for), it may signal this by
+ returning finish_no_open(file, dentry). This method is only called if
+ the last component is negative or needs lookup. Cached positive dentries
+ are still handled by f_op->open(). If the file was created,
+ FMODE_CREATED flag should be set in file->f_mode. In case of O_EXCL
+ the method must only succeed if the file didn't exist and hence FMODE_CREATED
+ shall always be set on success.
tmpfile: called in the end of O_TMPFILE open(). Optional, equivalent to
atomically creating, opening and unlinking a file in given directory.
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 4d9ff0a7f8e1..a9ae82fb9d13 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -223,8 +223,6 @@ Deprecated Mount Options
Name Removal Schedule
---- ----------------
- barrier no earlier than v4.15
- nobarrier no earlier than v4.15
Removed Mount Options
@@ -236,6 +234,8 @@ Removed Mount Options
ihashsize v4.0
irixsgid v4.0
osyncisdsync/osyncisosync v4.0
+ barrier v4.19
+ nobarrier v4.19
sysctls
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
new file mode 100644
index 000000000000..a740e491dfcc
--- /dev/null
+++ b/Documentation/gpu/amdgpu.rst
@@ -0,0 +1,129 @@
+=========================
+ drm/amdgpu AMDgpu driver
+=========================
+
+The drm/amdgpu driver supports all AMD Radeon GPUs based on the Graphics Core
+Next (GCN) architecture.
+
+Module Parameters
+=================
+
+The amdgpu driver supports the following module parameters:
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+
+Core Driver Infrastructure
+==========================
+
+This section covers core driver infrastructure.
+
+.. _amdgpu_memory_domains:
+
+Memory Domains
+--------------
+
+.. kernel-doc:: include/uapi/drm/amdgpu_drm.h
+ :doc: memory domains
+
+Buffer Objects
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+ :doc: amdgpu_object
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+ :internal:
+
+PRIME Buffer Sharing
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+ :doc: PRIME Buffer Sharing
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+ :internal:
+
+MMU Notifier
+------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+ :doc: MMU Notifier
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+ :internal:
+
+AMDGPU Virtual Memory
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+ :doc: GPUVM
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+ :internal:
+
+Interrupt Handling
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+ :doc: Interrupt Handling
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+ :internal:
+
+GPU Power/Thermal Controls and Monitoring
+=========================================
+
+This section covers hwmon and power/thermal controls.
+
+HWMON Interfaces
+----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: hwmon
+
+GPU sysfs Power State Interfaces
+--------------------------------
+
+GPU power controls are exposed via sysfs files.
+
+power_dpm_state
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: power_dpm_state
+
+power_dpm_force_performance_level
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: power_dpm_force_performance_level
+
+pp_table
+~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: pp_table
+
+pp_od_clk_voltage
+~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: pp_od_clk_voltage
+
+pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+
+pp_power_profile_mode
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: pp_power_profile_mode
+
+busy_percent
+~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+ :doc: busy_percent
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index f982558fc25d..65be325bf282 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -4,6 +4,7 @@ GPU Driver Documentation
.. toctree::
+ amdgpu
i915
meson
pl111
diff --git a/Documentation/gpu/drm-client.rst b/Documentation/gpu/drm-client.rst
new file mode 100644
index 000000000000..7e672063e7eb
--- /dev/null
+++ b/Documentation/gpu/drm-client.rst
@@ -0,0 +1,12 @@
+=================
+Kernel clients
+=================
+
+.. kernel-doc:: drivers/gpu/drm/drm_client.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/drm_client.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_client.c
+ :export:
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index e37557b30f62..f9cfcdcdf024 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -109,6 +109,15 @@ Framebuffer CMA Helper Functions Reference
.. _drm_bridges:
+Framebuffer GEM Helper Reference
+================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+ :export:
+
Bridges
=======
@@ -169,6 +178,15 @@ Display Port Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
:export:
+Display Port CEC Helper Functions Reference
+===========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c
+ :doc: dp cec helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c
+ :export:
+
Display Port Dual Mode Adaptor Helper Functions Reference
=========================================================
@@ -282,13 +300,13 @@ Auxiliary Modeset Helpers
.. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
:export:
-Framebuffer GEM Helper Reference
-================================
+OF/DT Helpers
+=============
-.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_of.c
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_of.c
:export:
Legacy Plane Helper Reference
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 1dffd1ac4cd4..5dee6b8a4c12 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -56,11 +56,12 @@ Overview
The basic object structure KMS presents to userspace is fairly simple.
Framebuffers (represented by :c:type:`struct drm_framebuffer <drm_framebuffer>`,
-see `Frame Buffer Abstraction`_) feed into planes. One or more (or even no)
-planes feed their pixel data into a CRTC (represented by :c:type:`struct
-drm_crtc <drm_crtc>`, see `CRTC Abstraction`_) for blending. The precise
-blending step is explained in more detail in `Plane Composition Properties`_ and
-related chapters.
+see `Frame Buffer Abstraction`_) feed into planes. Planes are represented by
+:c:type:`struct drm_plane <drm_plane>`, see `Plane Abstraction`_ for more
+details. One or more (or even no) planes feed their pixel data into a CRTC
+(represented by :c:type:`struct drm_crtc <drm_crtc>`, see `CRTC Abstraction`_)
+for blending. The precise blending step is explained in more detail in `Plane
+Composition Properties`_ and related chapters.
For the output routing the first step is encoders (represented by
:c:type:`struct drm_encoder <drm_encoder>`, see `Encoder Abstraction`_). Those
@@ -373,6 +374,15 @@ Connector Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:export:
+Writeback Connectors
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
+ :export:
+
Encoder Abstraction
===================
@@ -457,7 +467,7 @@ Output discovery and initialization example
drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_output->base,
+ drm_connector_attach_encoder(&intel_output->base,
&intel_output->enc);
/* Set up the DDC bus. */
@@ -517,6 +527,12 @@ Standard Connector Properties
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:doc: standard connector properties
+HDMI Specific Connector Properties
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: HDMI connector properties
+
Plane Composition Properties
----------------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index b08e9dcd9177..21b6b72a9ba8 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -395,6 +395,8 @@ VMA Offset Manager
.. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
:export:
+.. _prime_buffer_sharing:
+
PRIME Buffer Sharing
====================
@@ -496,3 +498,21 @@ DRM Sync Objects
.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
:export:
+
+GPU Scheduler
+=============
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c
+ :doc: Overview
+
+Scheduler Function References
+-----------------------------
+
+.. kernel-doc:: include/drm/gpu_scheduler.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c
+ :export:
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index 00288f34c5a6..1fcf8e851e15 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -10,6 +10,7 @@ Linux GPU Driver Developer's Guide
drm-kms
drm-kms-helpers
drm-uapi
+ drm-client
drivers
vga-switcheroo
vgaarbiter
diff --git a/Documentation/gpu/kms-properties.csv b/Documentation/gpu/kms-properties.csv
index 07ed22ea3bd6..bfde04eddd14 100644
--- a/Documentation/gpu/kms-properties.csv
+++ b/Documentation/gpu/kms-properties.csv
@@ -17,6 +17,7 @@ Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,De
,Virtual GPU,“suggested Xâ€,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
,,“suggested Yâ€,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
+,Optional,"""content type""",ENUM,"{ ""No Data"", ""Graphics"", ""Photo"", ""Cinema"", ""Game"" }",Connector,TBD
i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
,,“audioâ€,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
,SDVO-TV,“modeâ€,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
diff --git a/Documentation/gpu/msm-crash-dump.rst b/Documentation/gpu/msm-crash-dump.rst
new file mode 100644
index 000000000000..757cd257e0d8
--- /dev/null
+++ b/Documentation/gpu/msm-crash-dump.rst
@@ -0,0 +1,96 @@
+=====================
+MSM Crash Dump Format
+=====================
+
+Following a GPU hang the MSM driver outputs debugging information via
+/sys/kernel/dri/X/show or via devcoredump (/sys/class/devcoredump/dcdX/data).
+This document describes how the output is formatted.
+
+Each entry is in the form key: value. Sections headers will not have a value
+and all the contents of a section will be indented two spaces from the header.
+Each section might have multiple array entries the start of which is designated
+by a (-).
+
+Mappings
+--------
+
+kernel
+ The kernel version that generated the dump (UTS_RELEASE).
+
+module
+ The module that generated the crashdump.
+
+time
+ The kernel time at crash formated as seconds.microseconds.
+
+comm
+ Comm string for the binary that generated the fault.
+
+cmdline
+ Command line for the binary that generated the fault.
+
+revision
+ ID of the GPU that generated the crash formatted as
+ core.major.minor.patchlevel separated by dots.
+
+rbbm-status
+ The current value of RBBM_STATUS which shows what top level GPU
+ components are in use at the time of crash.
+
+ringbuffer
+ Section containing the contents of each ringbuffer. Each ringbuffer is
+ identified with an id number.
+
+ id
+ Ringbuffer ID (0 based index). Each ringbuffer in the section
+ will have its own unique id.
+ iova
+ GPU address of the ringbuffer.
+
+ last-fence
+ The last fence that was issued on the ringbuffer
+
+ retired-fence
+ The last fence retired on the ringbuffer.
+
+ rptr
+ The current read pointer (rptr) for the ringbuffer.
+
+ wptr
+ The current write pointer (wptr) for the ringbuffer.
+
+ size
+ Maximum size of the ringbuffer programmed in the hardware.
+
+ data
+ The contents of the ring encoded as ascii85. Only the used
+ portions of the ring will be printed.
+
+bo
+ List of buffers from the hanging submission if available.
+ Each buffer object will have a uinque iova.
+
+ iova
+ GPU address of the buffer object.
+
+ size
+ Allocated size of the buffer object.
+
+ data
+ The contents of the buffer object encoded with ascii85. Only
+ Trailing zeros at the end of the buffer will be skipped.
+
+registers
+ Set of registers values. Each entry is on its own line enclosed
+ by brackets { }.
+
+ offset
+ Byte offset of the register from the start of the
+ GPU memory region.
+
+ value
+ Hexadecimal value of the register.
+
+registers-hlsq
+ (5xx only) Register values from the HLSQ aperture.
+ Same format as the register section.
diff --git a/Documentation/gpu/v3d.rst b/Documentation/gpu/v3d.rst
new file mode 100644
index 000000000000..543f7fbf526e
--- /dev/null
+++ b/Documentation/gpu/v3d.rst
@@ -0,0 +1,28 @@
+=====================================
+ drm/v3d Broadcom V3D Graphics Driver
+=====================================
+
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_drv.c
+ :doc: Broadcom V3D Graphics Driver
+
+GPU buffer object (BO) management
+---------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_bo.c
+ :doc: V3D GEM BO management support
+
+Address space management
+===========================================
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_mmu.c
+ :doc: Broadcom V3D MMU
+
+GPU Scheduling
+===========================================
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_sched.c
+ :doc: Broadcom V3D scheduling
+
+Interrupts
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_irq.c
+ :doc: Interrupt management for the V3D engine
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
index 9ba6587b7657..b2de8fa49273 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440
@@ -16,6 +16,11 @@ Supported chips:
Prefixes: 'max34446'
Addresses scanned: -
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
+ * Maxim MAX34451
+ PMBus 16-Channel V/I Monitor and 12-Channel Sequencer/Marginer
+ Prefixes: 'max34451'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34451.pdf
* Maxim MAX34460
PMBus 12-Channel Voltage Monitor & Sequencer
Prefix: 'max34460'
@@ -36,9 +41,10 @@ Description
This driver supports hardware monitoring for Maxim MAX34440 PMBus 6-Channel
Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply Manager
and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data Logger.
-It also supports the MAX34460 and MAX34461 PMBus Voltage Monitor & Sequencers.
-The MAX34460 supports 12 voltage channels, and the MAX34461 supports 16 voltage
-channels.
+It also supports the MAX34451, MAX34460, and MAX34461 PMBus Voltage Monitor &
+Sequencers. The MAX34451 supports monitoring voltage or current of 12 channels
+based on GIN pins. The MAX34460 supports 12 voltage channels, and the MAX34461
+supports 16 voltage channels.
The driver is a client driver to the core PMBus driver. Please see
Documentation/hwmon/pmbus for details on PMBus client drivers.
@@ -93,7 +99,7 @@ curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
-curr[1-4]_average Historical average current (MAX34446 only).
+curr[1-4]_average Historical average current (MAX34446/34451 only).
curr[1-6]_highest Historical maximum current.
curr[1-6]_reset_history Write any value to reset history.
@@ -123,5 +129,7 @@ temp[1-8]_reset_history Write any value to reset history.
temp7 and temp8 attributes only exist for MAX34440.
MAX34446 only supports temp[1-3].
+MAX34451 supports attribute groups in[1-16] (or curr[1-16] based on input pins)
+and temp[1-5].
MAX34460 supports attribute groups in[1-12] and temp[1-5].
MAX34461 supports attribute groups in[1-16] and temp[1-5].
diff --git a/Documentation/hwmon/mlxreg-fan b/Documentation/hwmon/mlxreg-fan
new file mode 100644
index 000000000000..fc531c6978d4
--- /dev/null
+++ b/Documentation/hwmon/mlxreg-fan
@@ -0,0 +1,60 @@
+Kernel driver mlxreg-fan
+========================
+
+Provides FAN control for the next Mellanox systems:
+QMB700, equipped with 40x200GbE InfiniBand ports;
+MSN3700, equipped with 32x200GbE or 16x400GbE Ethernet ports;
+MSN3410, equipped with 6x400GbE plus 48x50GbE Ethernet ports;
+MSN3800, equipped with 64x1000GbE Ethernet ports;
+These are the Top of the Rack systems, equipped with Mellanox switch
+board with Mellanox Quantum or Spectrume-2 devices.
+FAN controller is implemented by the programmable device logic.
+
+The default registers offsets set within the programmable device is as
+following:
+- pwm1 0xe3
+- fan1 (tacho1) 0xe4
+- fan2 (tacho2) 0xe5
+- fan3 (tacho3) 0xe6
+- fan4 (tacho4) 0xe7
+- fan5 (tacho5) 0xe8
+- fan6 (tacho6) 0xe9
+- fan7 (tacho7) 0xea
+- fan8 (tacho8) 0xeb
+- fan9 (tacho9) 0xec
+- fan10 (tacho10) 0xed
+- fan11 (tacho11) 0xee
+- fan12 (tacho12) 0xef
+This setup can be re-programmed with other registers.
+
+Author: Vadim Pasternak <vadimp@mellanox.com>
+
+Description
+-----------
+
+The driver implements a simple interface for driving a fan connected to
+a PWM output and tachometer inputs.
+This driver obtains PWM and tachometers registers location according to
+the system configuration and creates FAN/PWM hwmon objects and a cooling
+device. PWM and tachometers are sensed through the on-board programmable
+device, which exports its register map. This device could be attached to
+any bus type, for which register mapping is supported.
+Single instance is created with one PWM control, up to 12 tachometers and
+one cooling device. It could be as many instances as programmable device
+supports.
+The driver exposes the fan to the user space through the hwmon's and
+thermal's sysfs interfaces.
+
+/sys files in hwmon subsystem
+-----------------------------
+
+fan[1-12]_fault - RO files for tachometers TACH1-TACH12 fault indication
+fan[1-12]_input - RO files for tachometers TACH1-TACH12 input (in RPM)
+pwm1 - RW file for fan[1-12] target duty cycle (0..255)
+
+/sys files in thermal subsystem
+-------------------------------
+
+cur_state - RW file for current cooling state of the cooling device
+ (0..max_state)
+max_state - RO file for maximum cooling state of the cooling device
diff --git a/Documentation/hwmon/npcm750-pwm-fan b/Documentation/hwmon/npcm750-pwm-fan
new file mode 100644
index 000000000000..6156ef7398e6
--- /dev/null
+++ b/Documentation/hwmon/npcm750-pwm-fan
@@ -0,0 +1,22 @@
+Kernel driver npcm750-pwm-fan
+=============================
+
+Supported chips:
+ NUVOTON NPCM750/730/715/705
+
+Authors:
+ <tomer.maimon@nuvoton.com>
+
+Description:
+------------
+This driver implements support for NUVOTON NPCM7XX PWM and Fan Tacho
+controller. The PWM controller supports up to 8 PWM outputs. The Fan tacho
+controller supports up to 16 tachometer inputs.
+
+The driver provides the following sensor accesses in sysfs:
+
+fanX_input ro provide current fan rotation value in RPM as reported
+ by the fan to the device.
+
+pwmX rw get or set PWM fan control value. This is an integer
+ value between 0(off) and 255(full speed).
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index fc337c317c67..2b9e1005d88b 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -171,6 +171,13 @@ in[0-*]_label Suggested voltage channel label.
user-space.
RO
+in[0-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
cpu[0-*]_vid CPU core reference voltage.
Unit: millivolt
RO
@@ -236,6 +243,13 @@ fan[1-*]_label Suggested fan channel label.
In all other cases, the label is provided by user-space.
RO
+fan[1-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Also see the Alarms section for status flags associated with fans.
@@ -409,6 +423,13 @@ temp_reset_history
Reset temp_lowest and temp_highest for all sensors
WO
+temp[1-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Some chips measure temperature using external thermistors and an ADC, and
report the temperature measurement as a voltage. Converting this voltage
back to a temperature (or the other way around for limits) requires
@@ -468,6 +489,13 @@ curr_reset_history
Reset currX_lowest and currX_highest for all sensors
WO
+curr[1-*]_enable
+ Enable or disable the sensors.
+ When disabled the sensor read will return -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Also see the Alarms section for status flags associated with currents.
*********
@@ -566,6 +594,13 @@ power[1-*]_crit Critical maximum power.
Unit: microWatt
RW
+power[1-*]_enable Enable or disable the sensors.
+ When disabled the sensor read will return
+ -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
Also see the Alarms section for status flags associated with power readings.
**********
@@ -576,6 +611,12 @@ energy[1-*]_input Cumulative energy use
Unit: microJoule
RO
+energy[1-*]_enable Enable or disable the sensors.
+ When disabled the sensor read will return
+ -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
************
* Humidity *
@@ -586,6 +627,13 @@ humidity[1-*]_input Humidity
RO
+humidity[1-*]_enable Enable or disable the sensors
+ When disabled the sensor read will return
+ -ENODATA.
+ 1: Enable
+ 0: Disable
+ RW
+
**********
* Alarms *
**********
diff --git a/Documentation/index.rst b/Documentation/index.rst
index fdc585703498..5db7e87c7cb1 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -3,6 +3,8 @@
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
+.. _linux_doc:
+
The Linux Kernel documentation
==============================
@@ -90,6 +92,7 @@ needed).
crypto/index
filesystems/index
vm/index
+ bpf/index
Architecture-specific documentation
-----------------------------------
@@ -102,29 +105,24 @@ implementation.
sh/index
-Korean translations
--------------------
-
-.. toctree::
- :maxdepth: 1
-
- translations/ko_KR/index
+Filesystem Documentation
+------------------------
-Chinese translations
---------------------
+The documentation in this section are provided by specific filesystem
+subprojects.
.. toctree::
- :maxdepth: 1
+ :maxdepth: 2
- translations/zh_CN/index
+ filesystems/ext4/index
-Japanese translations
----------------------
+Translations
+------------
.. toctree::
- :maxdepth: 1
+ :maxdepth: 2
- translations/ja_JP/index
+ translations/index
Indices and tables
==================
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 480c8609dc58..d6ed527985cf 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -166,6 +166,7 @@ Code Seq#(hex) Include File Comments
'P' all linux/soundcard.h conflict!
'P' 60-6F sound/sscape_ioctl.h conflict!
'P' 00-0F drivers/usb/class/usblp.c conflict!
+'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
'Q' all linux/soundcard.h
'R' 00-1F linux/random.h conflict!
'R' 01 linux/rfkill.h conflict!
@@ -274,6 +275,7 @@ Code Seq#(hex) Include File Comments
'v' 00-1F linux/ext2_fs.h conflict!
'v' 00-1F linux/fs.h conflict!
'v' 00-0F linux/sonypi.h conflict!
+'v' 00-0F media/v4l2-subdev.h conflict!
'v' C0-FF linux/meye.h conflict!
'w' all CERN SCI driver
'y' 00-1F packet based user level communications
diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt
index 04d394a2e06c..49df45f90e8a 100644
--- a/Documentation/iostats.txt
+++ b/Documentation/iostats.txt
@@ -31,6 +31,9 @@ Here are examples of these different formats::
3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
3 1 hda1 35486 38030 38030 38030
+ 4.18+ diskstats:
+ 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160 0 0 0 0
+
On 2.4 you might execute ``grep 'hda ' /proc/partitions``. On 2.6+, you have
a choice of ``cat /sys/block/hda/stat`` or ``grep 'hda ' /proc/diskstats``.
@@ -101,6 +104,18 @@ Field 11 -- weighted # of milliseconds spent doing I/Os
last update of this field. This can provide an easy measure of both
I/O completion time and the backlog that may be accumulating.
+Field 12 -- # of discards completed
+ This is the total number of discards completed successfully.
+
+Field 13 -- # of discards merged
+ See the description of field 2
+
+Field 14 -- # of sectors discarded
+ This is the total number of sectors discarded successfully.
+
+Field 15 -- # of milliseconds spent discarding
+ This is the total number of milliseconds spent by all discards (as
+ measured from __make_request() to end_that_request_last()).
To avoid introducing performance bottlenecks, no locks are held while
modifying these counters. This implies that minor inaccuracies may be
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 114c7ce7b58d..8390c360d4b3 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -50,6 +50,22 @@ LDFLAGS_MODULE
--------------------------------------------------
Additional options used for $(LD) when linking modules.
+HOSTCFLAGS
+--------------------------------------------------
+Additional flags to be passed to $(HOSTCC) when building host programs.
+
+HOSTCXXFLAGS
+--------------------------------------------------
+Additional flags to be passed to $(HOSTCXX) when building host programs.
+
+HOSTLDFLAGS
+--------------------------------------------------
+Additional flags to be passed when linking host programs.
+
+HOSTLDLIBS
+--------------------------------------------------
+Additional libraries to link against when building host programs.
+
KBUILD_KCONFIG
--------------------------------------------------
Set the top-level Kconfig file to the value of this environment
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 64e0775a62d4..c54cb7cb9ff4 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -370,7 +370,7 @@ choices:
This defines a choice group and accepts any of the above attributes as
options. A choice can only be of type bool or tristate. If no type is
-specified for a choice, it's type will be determined by the type of
+specified for a choice, its type will be determined by the type of
the first choice element in the group or remain unknown if none of the
choice elements have a type specified, as well.
@@ -384,7 +384,7 @@ A choice accepts another option "optional", which allows to set the
choice to 'n' and no entry needs to be selected.
If no [symbol] is associated with a choice, then you can not have multiple
definitions of that choice. If a [symbol] is associated to the choice,
-then you may define the same choice (ie. with the same entries) in another
+then you may define the same choice (i.e. with the same entries) in another
place.
comment:
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 048fc39a6b91..766355b1d221 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -661,7 +661,7 @@ Both possibilities are described in the following.
When compiling host programs, it is possible to set specific flags.
The programs will always be compiled utilising $(HOSTCC) passed
- the options specified in $(HOSTCFLAGS).
+ the options specified in $(KBUILD_HOSTCFLAGS).
To set flags that will take effect for all host programs created
in that Makefile, use the variable HOST_EXTRACFLAGS.
@@ -1105,6 +1105,12 @@ When kbuild executes, the following steps are followed (roughly):
target: source(s) FORCE
#WRONG!# $(call if_changed, ld/objcopy/gzip/...)
+ Note: if_changed should not be used more than once per target.
+ It stores the executed command in a corresponding .cmd
+ file and multiple calls would result in overwrites and
+ unwanted results when the target is up to date and only the
+ tests on changed commands trigger execution of commands.
+
ld
Link target. Often, LDFLAGS_$@ is used to set specific options to ld.
diff --git a/Documentation/kernel-hacking/hacking.rst b/Documentation/kernel-hacking/hacking.rst
index 9999c8468293..d824e4feaff3 100644
--- a/Documentation/kernel-hacking/hacking.rst
+++ b/Documentation/kernel-hacking/hacking.rst
@@ -1,3 +1,5 @@
+.. _kernel_hacking_hack:
+
============================================
Unreliable Guide To Hacking The Linux Kernel
============================================
diff --git a/Documentation/kernel-hacking/index.rst b/Documentation/kernel-hacking/index.rst
index fcb0eda3cca3..f53027652290 100644
--- a/Documentation/kernel-hacking/index.rst
+++ b/Documentation/kernel-hacking/index.rst
@@ -1,3 +1,5 @@
+.. _kernel_hacking:
+
=====================
Kernel Hacking Guides
=====================
diff --git a/Documentation/kernel-hacking/locking.rst b/Documentation/kernel-hacking/locking.rst
index f937c0fd11aa..519673df0e82 100644
--- a/Documentation/kernel-hacking/locking.rst
+++ b/Documentation/kernel-hacking/locking.rst
@@ -1,3 +1,5 @@
+.. _kernel_hacking_lock:
+
===========================
Unreliable Guide To Locking
===========================
@@ -177,7 +179,7 @@ perfect world).
Note that you can also use :c:func:`spin_lock_irq()` or
:c:func:`spin_lock_irqsave()` here, which stop hardware interrupts
-as well: see `Hard IRQ Context <#hardirq-context>`__.
+as well: see `Hard IRQ Context <#hard-irq-context>`__.
This works perfectly for UP as well: the spin lock vanishes, and this
macro simply becomes :c:func:`local_bh_disable()`
@@ -228,7 +230,7 @@ The Same Softirq
~~~~~~~~~~~~~~~~
The same softirq can run on the other CPUs: you can use a per-CPU array
-(see `Per-CPU Data <#per-cpu>`__) for better performance. If you're
+(see `Per-CPU Data <#per-cpu-data>`__) for better performance. If you're
going so far as to use a softirq, you probably care about scalable
performance enough to justify the extra complexity.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index cb3b0de83fc6..10f4499e677c 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -80,6 +80,26 @@ After the instruction is single-stepped, Kprobes executes the
"post_handler," if any, that is associated with the kprobe.
Execution then continues with the instruction following the probepoint.
+Changing Execution Path
+-----------------------
+
+Since kprobes can probe into a running kernel code, it can change the
+register set, including instruction pointer. This operation requires
+maximum care, such as keeping the stack frame, recovering the execution
+path etc. Since it operates on a running kernel and needs deep knowledge
+of computer architecture and concurrent computing, you can easily shoot
+your foot.
+
+If you change the instruction pointer (and set up other related
+registers) in pre_handler, you must return !0 so that kprobes stops
+single stepping and just returns to the given address.
+This also means post_handler should not be called anymore.
+
+Note that this operation may be harder on some architectures which use
+TOC (Table of Contents) for function call, since you have to setup a new
+TOC for your function in your module, and recover the old one after
+returning from it.
+
Return Probes
-------------
@@ -262,7 +282,7 @@ is optimized, that modification is ignored. Thus, if you want to
tweak the kernel's execution path, you need to suppress optimization,
using one of the following techniques:
-- Specify an empty function for the kprobe's post_handler or break_handler.
+- Specify an empty function for the kprobe's post_handler.
or
@@ -474,7 +494,7 @@ error occurs during registration, all probes in the array, up to
the bad probe, are safely unregistered before the register_*probes
function returns.
-- kps/rps/jps: an array of pointers to ``*probe`` data structures
+- kps/rps: an array of pointers to ``*probe`` data structures
- num: the number of the array entries.
.. note::
@@ -566,12 +586,11 @@ the same handler) may run concurrently on different CPUs.
Kprobes does not use mutexes or allocate memory except during
registration and unregistration.
-Probe handlers are run with preemption disabled. Depending on the
-architecture and optimization state, handlers may also run with
-interrupts disabled (e.g., kretprobe handlers and optimized kprobe
-handlers run without interrupt disabled on x86/x86-64). In any case,
-your handler should not yield the CPU (e.g., by attempting to acquire
-a semaphore).
+Probe handlers are run with preemption disabled or interrupt disabled,
+which depends on the architecture and optimization state. (e.g.,
+kretprobe handlers and optimized kprobe handlers run without interrupt
+disabled on x86/x86-64). In any case, your handler should not yield
+the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O).
Since a return probe is implemented by replacing the return
address with the trampoline's address, stack backtraces and calls
diff --git a/Documentation/locking/ww-mutex-design.txt b/Documentation/locking/ww-mutex-design.txt
index 34c3a1b50b9a..f0ed7c30e695 100644
--- a/Documentation/locking/ww-mutex-design.txt
+++ b/Documentation/locking/ww-mutex-design.txt
@@ -1,4 +1,4 @@
-Wait/Wound Deadlock-Proof Mutex Design
+Wound/Wait Deadlock-Proof Mutex Design
======================================
Please read mutex-design.txt first, as it applies to wait/wound mutexes too.
@@ -32,10 +32,26 @@ the oldest task) wins, and the one with the higher reservation id (i.e. the
younger task) unlocks all of the buffers that it has already locked, and then
tries again.
-In the RDBMS literature this deadlock handling approach is called wait/wound:
-The older tasks waits until it can acquire the contended lock. The younger tasks
-needs to back off and drop all the locks it is currently holding, i.e. the
-younger task is wounded.
+In the RDBMS literature, a reservation ticket is associated with a transaction.
+and the deadlock handling approach is called Wait-Die. The name is based on
+the actions of a locking thread when it encounters an already locked mutex.
+If the transaction holding the lock is younger, the locking transaction waits.
+If the transaction holding the lock is older, the locking transaction backs off
+and dies. Hence Wait-Die.
+There is also another algorithm called Wound-Wait:
+If the transaction holding the lock is younger, the locking transaction
+wounds the transaction holding the lock, requesting it to die.
+If the transaction holding the lock is older, it waits for the other
+transaction. Hence Wound-Wait.
+The two algorithms are both fair in that a transaction will eventually succeed.
+However, the Wound-Wait algorithm is typically stated to generate fewer backoffs
+compared to Wait-Die, but is, on the other hand, associated with more work than
+Wait-Die when recovering from a backoff. Wound-Wait is also a preemptive
+algorithm in that transactions are wounded by other transactions, and that
+requires a reliable way to pick up up the wounded condition and preempt the
+running transaction. Note that this is not the same as process preemption. A
+Wound-Wait transaction is considered preempted when it dies (returning
+-EDEADLK) following a wound.
Concepts
--------
@@ -47,18 +63,20 @@ Acquire context: To ensure eventual forward progress it is important the a task
trying to acquire locks doesn't grab a new reservation id, but keeps the one it
acquired when starting the lock acquisition. This ticket is stored in the
acquire context. Furthermore the acquire context keeps track of debugging state
-to catch w/w mutex interface abuse.
+to catch w/w mutex interface abuse. An acquire context is representing a
+transaction.
W/w class: In contrast to normal mutexes the lock class needs to be explicit for
-w/w mutexes, since it is required to initialize the acquire context.
+w/w mutexes, since it is required to initialize the acquire context. The lock
+class also specifies what algorithm to use, Wound-Wait or Wait-Die.
Furthermore there are three different class of w/w lock acquire functions:
* Normal lock acquisition with a context, using ww_mutex_lock.
-* Slowpath lock acquisition on the contending lock, used by the wounded task
- after having dropped all already acquired locks. These functions have the
- _slow postfix.
+* Slowpath lock acquisition on the contending lock, used by the task that just
+ killed its transaction after having dropped all already acquired locks.
+ These functions have the _slow postfix.
From a simple semantics point-of-view the _slow functions are not strictly
required, since simply calling the normal ww_mutex_lock functions on the
@@ -90,6 +108,12 @@ provided.
Usage
-----
+The algorithm (Wait-Die vs Wound-Wait) is chosen by using either
+DEFINE_WW_CLASS() (Wound-Wait) or DEFINE_WD_CLASS() (Wait-Die)
+As a rough rule of thumb, use Wound-Wait iff you
+expect the number of simultaneous competing transactions to be typically small,
+and you want to reduce the number of rollbacks.
+
Three different ways to acquire locks within the same w/w class. Common
definitions for methods #1 and #2:
@@ -220,7 +244,7 @@ mutexes are a natural fit for such a case for two reasons:
Note that this approach differs in two important ways from the above methods:
- Since the list of objects is dynamically constructed (and might very well be
- different when retrying due to hitting the -EDEADLK wound condition) there's
+ different when retrying due to hitting the -EDEADLK die condition) there's
no need to keep any object on a persistent list when it's not locked. We can
therefore move the list_head into the object itself.
- On the other hand the dynamic object list construction also means that the -EALREADY return
@@ -312,12 +336,23 @@ Design:
We maintain the following invariants for the wait list:
(1) Waiters with an acquire context are sorted by stamp order; waiters
without an acquire context are interspersed in FIFO order.
- (2) Among waiters with contexts, only the first one can have other locks
- acquired already (ctx->acquired > 0). Note that this waiter may come
- after other waiters without contexts in the list.
+ (2) For Wait-Die, among waiters with contexts, only the first one can have
+ other locks acquired already (ctx->acquired > 0). Note that this waiter
+ may come after other waiters without contexts in the list.
+
+ The Wound-Wait preemption is implemented with a lazy-preemption scheme:
+ The wounded status of the transaction is checked only when there is
+ contention for a new lock and hence a true chance of deadlock. In that
+ situation, if the transaction is wounded, it backs off, clears the
+ wounded status and retries. A great benefit of implementing preemption in
+ this way is that the wounded transaction can identify a contending lock to
+ wait for before restarting the transaction. Just blindly restarting the
+ transaction would likely make the transaction end up in a situation where
+ it would have to back off again.
In general, not much contention is expected. The locks are typically used to
- serialize access to resources for devices.
+ serialize access to resources for devices, and optimization focus should
+ therefore be directed towards the uncontended cases.
Lockdep:
Special care has been taken to warn for as many cases of api abuse
diff --git a/Documentation/media/audio.h.rst.exceptions b/Documentation/media/audio.h.rst.exceptions
index f40f3cbfe4c9..940458774cf6 100644
--- a/Documentation/media/audio.h.rst.exceptions
+++ b/Documentation/media/audio.h.rst.exceptions
@@ -1,9 +1,6 @@
# Ignore header name
ignore define _DVBAUDIO_H_
-# Typedef pointing to structs
-replace typedef audio_karaoke_t :c:type:`audio_karaoke`
-
# Undocumented audio caps, as this is a deprecated API anyway
ignore define AUDIO_CAP_DTS
ignore define AUDIO_CAP_LPCM
diff --git a/Documentation/media/media.h.rst.exceptions b/Documentation/media/media.h.rst.exceptions
index 83d7f7c722fb..684fe9c86dee 100644
--- a/Documentation/media/media.h.rst.exceptions
+++ b/Documentation/media/media.h.rst.exceptions
@@ -6,10 +6,10 @@ ignore define MEDIA_API_VERSION
ignore define MEDIA_ENT_F_BASE
ignore define MEDIA_ENT_F_OLD_BASE
ignore define MEDIA_ENT_F_OLD_SUBDEV_BASE
+ignore define MEDIA_ENT_F_DTV_DECODER
ignore define MEDIA_INTF_T_DVB_BASE
ignore define MEDIA_INTF_T_V4L_BASE
ignore define MEDIA_INTF_T_ALSA_BASE
-
#ignore legacy entity type macros
ignore define MEDIA_ENT_TYPE_SHIFT
ignore define MEDIA_ENT_TYPE_MASK
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index b6fd86424fbb..8d5633e6ae04 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -179,6 +179,24 @@ it is guaranteed that the state did change in between the two events.
capability set. When open() is called, the HPD pin can be read and
if the HPD is high, then an initial event will be generated for that
filehandle.
+ * .. _`CEC-EVENT-PIN-5V-LOW`:
+
+ - ``CEC_EVENT_PIN_5V_LOW``
+ - 6
+ - Generated if the 5V pin goes from a high voltage to a low voltage.
+ Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN``
+ capability set. When open() is called, the 5V pin can be read and
+ if the 5V is low, then an initial event will be generated for that
+ filehandle.
+ * .. _`CEC-EVENT-PIN-5V-HIGH`:
+
+ - ``CEC_EVENT_PIN_5V_HIGH``
+ - 7
+ - Generated if the 5V pin goes from a low voltage to a high voltage.
+ Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN``
+ capability set. When open() is called, the 5V pin can be read and
+ if the 5V is high, then an initial event will be generated for that
+ filehandle.
.. tabularcolumns:: |p{6.0cm}|p{0.6cm}|p{10.9cm}|
diff --git a/Documentation/media/uapi/dvb/audio-get-pts.rst b/Documentation/media/uapi/dvb/audio-get-pts.rst
deleted file mode 100644
index 2d1396b003de..000000000000
--- a/Documentation/media/uapi/dvb/audio-get-pts.rst
+++ /dev/null
@@ -1,65 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _AUDIO_GET_PTS:
-
-=============
-AUDIO_GET_PTS
-=============
-
-Name
-----
-
-AUDIO_GET_PTS
-
-.. attention:: This ioctl is deprecated
-
-Synopsis
---------
-
-.. c:function:: int ioctl(int fd, AUDIO_GET_PTS, __u64 *pts)
- :name: AUDIO_GET_PTS
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- -
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- -
-
- - __u64 \*pts
-
- - Returns the 33-bit timestamp as defined in ITU T-REC-H.222.0 /
- ISO/IEC 13818-1.
-
- The PTS should belong to the currently played frame if possible,
- but may also be a value close to it like the PTS of the last
- decoded frame or the last PTS extracted by the PES parser.
-
-
-Description
------------
-
-This ioctl is obsolete. Do not use in new drivers. If you need this
-functionality, then please contact the linux-media mailing list
-(`https://linuxtv.org/lists.php <https://linuxtv.org/lists.php>`__).
-
-This ioctl call asks the Audio Device to return the current PTS
-timestamp.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/audio-set-attributes.rst b/Documentation/media/uapi/dvb/audio-set-attributes.rst
deleted file mode 100644
index f0c6153ca80f..000000000000
--- a/Documentation/media/uapi/dvb/audio-set-attributes.rst
+++ /dev/null
@@ -1,67 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _AUDIO_SET_ATTRIBUTES:
-
-====================
-AUDIO_SET_ATTRIBUTES
-====================
-
-Name
-----
-
-AUDIO_SET_ATTRIBUTES
-
-.. attention:: This ioctl is deprecated
-
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, AUDIO_SET_ATTRIBUTES, struct audio_attributes *attr )
- :name: AUDIO_SET_ATTRIBUTES
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- -
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- -
-
- - audio_attributes_t attr
-
- - audio attributes according to section ??
-
-
-Description
------------
-
-This ioctl is intended for DVD playback and allows you to set certain
-information about the audio stream.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - attr is not a valid or supported attribute setting.
diff --git a/Documentation/media/uapi/dvb/audio-set-ext-id.rst b/Documentation/media/uapi/dvb/audio-set-ext-id.rst
deleted file mode 100644
index 8503c47f26bd..000000000000
--- a/Documentation/media/uapi/dvb/audio-set-ext-id.rst
+++ /dev/null
@@ -1,66 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _AUDIO_SET_EXT_ID:
-
-================
-AUDIO_SET_EXT_ID
-================
-
-Name
-----
-
-AUDIO_SET_EXT_ID
-
-.. attention:: This ioctl is deprecated
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, AUDIO_SET_EXT_ID, int id)
- :name: AUDIO_SET_EXT_ID
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- -
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- -
-
- - int id
-
- - audio sub_stream_id
-
-
-Description
------------
-
-This ioctl can be used to set the extension id for MPEG streams in DVD
-playback. Only the first 3 bits are recognized.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - id is not a valid id.
diff --git a/Documentation/media/uapi/dvb/audio-set-karaoke.rst b/Documentation/media/uapi/dvb/audio-set-karaoke.rst
deleted file mode 100644
index c759952d88aa..000000000000
--- a/Documentation/media/uapi/dvb/audio-set-karaoke.rst
+++ /dev/null
@@ -1,66 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _AUDIO_SET_KARAOKE:
-
-=================
-AUDIO_SET_KARAOKE
-=================
-
-Name
-----
-
-AUDIO_SET_KARAOKE
-
-.. attention:: This ioctl is deprecated
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, AUDIO_SET_KARAOKE, struct audio_karaoke *karaoke)
- :name: AUDIO_SET_KARAOKE
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- -
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- -
-
- - audio_karaoke_t \*karaoke
-
- - karaoke settings according to section ??.
-
-
-Description
------------
-
-This ioctl allows one to set the mixer settings for a karaoke DVD.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - karaoke is not a valid or supported karaoke setting.
diff --git a/Documentation/media/uapi/dvb/audio_data_types.rst b/Documentation/media/uapi/dvb/audio_data_types.rst
index 6b93359d64f7..5bffa2c98a24 100644
--- a/Documentation/media/uapi/dvb/audio_data_types.rst
+++ b/Documentation/media/uapi/dvb/audio_data_types.rst
@@ -114,40 +114,3 @@ following bits set according to the hardwares capabilities.
#define AUDIO_CAP_OGG 64
#define AUDIO_CAP_SDDS 128
#define AUDIO_CAP_AC3 256
-
-.. c:type:: audio_karaoke
-
-The ioctl AUDIO_SET_KARAOKE uses the following format:
-
-
-.. code-block:: c
-
- typedef
- struct audio_karaoke {
- int vocal1;
- int vocal2;
- int melody;
- } audio_karaoke_t;
-
-If Vocal1 or Vocal2 are non-zero, they get mixed into left and right t
-at 70% each. If both, Vocal1 and Vocal2 are non-zero, Vocal1 gets mixed
-into the left channel and Vocal2 into the right channel at 100% each. Ff
-Melody is non-zero, the melody channel gets mixed into left and right.
-
-
-.. c:type:: audio_attributes
-
-The following attributes can be set by a call to AUDIO_SET_ATTRIBUTES:
-
-
-.. code-block:: c
-
- typedef uint16_t audio_attributes_t;
- /* bits: descr. */
- /* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */
- /* 12 multichannel extension */
- /* 11-10 audio type (0=not spec, 1=language included) */
- /* 9- 8 audio application mode (0=not spec, 1=karaoke, 2=surround) */
- /* 7- 6 Quantization / DRC (mpeg audio: 1=DRC exists)(lpcm: 0=16bit, */
- /* 5- 4 Sample frequency fs (0=48kHz, 1=96kHz) */
- /* 2- 0 number of audio channels (n+1 channels) */
diff --git a/Documentation/media/uapi/dvb/audio_function_calls.rst b/Documentation/media/uapi/dvb/audio_function_calls.rst
index 0bb56f0cfed4..7dba16285dab 100644
--- a/Documentation/media/uapi/dvb/audio_function_calls.rst
+++ b/Documentation/media/uapi/dvb/audio_function_calls.rst
@@ -22,13 +22,9 @@ Audio Function Calls
audio-set-bypass-mode
audio-channel-select
audio-bilingual-channel-select
- audio-get-pts
audio-get-status
audio-get-capabilities
audio-clear-buffer
audio-set-id
audio-set-mixer
audio-set-streamtype
- audio-set-ext-id
- audio-set-attributes
- audio-set-karaoke
diff --git a/Documentation/media/uapi/dvb/video-get-frame-rate.rst b/Documentation/media/uapi/dvb/video-get-frame-rate.rst
deleted file mode 100644
index 400042a854cf..000000000000
--- a/Documentation/media/uapi/dvb/video-get-frame-rate.rst
+++ /dev/null
@@ -1,61 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_GET_FRAME_RATE:
-
-====================
-VIDEO_GET_FRAME_RATE
-====================
-
-Name
-----
-
-VIDEO_GET_FRAME_RATE
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(int fd, VIDEO_GET_FRAME_RATE, unsigned int *rate)
- :name: VIDEO_GET_FRAME_RATE
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_GET_FRAME_RATE for this command.
-
- - .. row 3
-
- - unsigned int \*rate
-
- - Returns the framerate in number of frames per 1000 seconds.
-
-
-Description
------------
-
-This ioctl call asks the Video Device to return the current framerate.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/video-get-navi.rst b/Documentation/media/uapi/dvb/video-get-navi.rst
deleted file mode 100644
index 114a9ac48b9e..000000000000
--- a/Documentation/media/uapi/dvb/video-get-navi.rst
+++ /dev/null
@@ -1,84 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_GET_NAVI:
-
-==============
-VIDEO_GET_NAVI
-==============
-
-Name
-----
-
-VIDEO_GET_NAVI
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_GET_NAVI , struct video_navi_pack *navipack)
- :name: VIDEO_GET_NAVI
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_GET_NAVI for this command.
-
- - .. row 3
-
- - video_navi_pack_t \*navipack
-
- - PCI or DSI pack (private stream 2) according to section ??.
-
-
-Description
------------
-
-This ioctl returns navigational information from the DVD stream. This is
-especially needed if an encoded stream has to be decoded by the
-hardware.
-
-.. c:type:: video_navi_pack
-
-.. code-block::c
-
- typedef struct video_navi_pack {
- int length; /* 0 ... 1024 */
- __u8 data[1024];
- } video_navi_pack_t;
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EFAULT``
-
- - driver is not able to return navigational information
diff --git a/Documentation/media/uapi/dvb/video-set-attributes.rst b/Documentation/media/uapi/dvb/video-set-attributes.rst
deleted file mode 100644
index b2f11a6746e9..000000000000
--- a/Documentation/media/uapi/dvb/video-set-attributes.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_ATTRIBUTES:
-
-====================
-VIDEO_SET_ATTRIBUTES
-====================
-
-Name
-----
-
-VIDEO_SET_ATTRIBUTES
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_ATTRIBUTE ,video_attributes_t vattr)
- :name: VIDEO_SET_ATTRIBUTE
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_ATTRIBUTE for this command.
-
- - .. row 3
-
- - video_attributes_t vattr
-
- - video attributes according to section ??.
-
-
-Description
------------
-
-This ioctl is intended for DVD playback and allows you to set certain
-information about the stream. Some hardware may not need this
-information, but the call also tells the hardware to prepare for DVD
-playback.
-
-.. c:type:: video_attributes_t
-
-.. code-block::c
-
- typedef __u16 video_attributes_t;
- /* bits: descr. */
- /* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
- /* 13-12 TV system (0=525/60, 1=625/50) */
- /* 11-10 Aspect ratio (0=4:3, 3=16:9) */
- /* 9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */
- /* 7 line 21-1 data present in GOP (1=yes, 0=no) */
- /* 6 line 21-2 data present in GOP (1=yes, 0=no) */
- /* 5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
- /* 2 source letterboxed (1=yes, 0=no) */
- /* 0 film/camera mode (0=camera, 1=film (625/50 only)) */
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - input is not a valid attribute setting.
diff --git a/Documentation/media/uapi/dvb/video-set-highlight.rst b/Documentation/media/uapi/dvb/video-set-highlight.rst
deleted file mode 100644
index 90aeafd923b7..000000000000
--- a/Documentation/media/uapi/dvb/video-set-highlight.rst
+++ /dev/null
@@ -1,86 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_HIGHLIGHT:
-
-===================
-VIDEO_SET_HIGHLIGHT
-===================
-
-Name
-----
-
-VIDEO_SET_HIGHLIGHT
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_HIGHLIGHT, struct video_highlight *vhilite)
- :name: VIDEO_SET_HIGHLIGHT
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_HIGHLIGHT for this command.
-
- - .. row 3
-
- - video_highlight_t \*vhilite
-
- - SPU Highlight information according to section ??.
-
-
-Description
------------
-
-This ioctl sets the SPU highlight information for the menu access of a
-DVD.
-
-.. c:type:: video_highlight
-
-.. code-block:: c
-
- typedef
- struct video_highlight {
- int active; /* 1=show highlight, 0=hide highlight */
- __u8 contrast1; /* 7- 4 Pattern pixel contrast */
- /* 3- 0 Background pixel contrast */
- __u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */
- /* 3- 0 Emphasis pixel-1 contrast */
- __u8 color1; /* 7- 4 Pattern pixel color */
- /* 3- 0 Background pixel color */
- __u8 color2; /* 7- 4 Emphasis pixel-2 color */
- /* 3- 0 Emphasis pixel-1 color */
- __u32 ypos; /* 23-22 auto action mode */
- /* 21-12 start y */
- /* 9- 0 end y */
- __u32 xpos; /* 23-22 button color number */
- /* 21-12 start x */
- /* 9- 0 end x */
- } video_highlight_t;
-
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/video-set-id.rst b/Documentation/media/uapi/dvb/video-set-id.rst
deleted file mode 100644
index 18f66875ae3f..000000000000
--- a/Documentation/media/uapi/dvb/video-set-id.rst
+++ /dev/null
@@ -1,75 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_ID:
-
-============
-VIDEO_SET_ID
-============
-
-Name
-----
-
-VIDEO_SET_ID
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(int fd, VIDEO_SET_ID, int id)
- :name: VIDEO_SET_ID
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_ID for this command.
-
- - .. row 3
-
- - int id
-
- - video sub-stream id
-
-
-Description
------------
-
-This ioctl selects which sub-stream is to be decoded if a program or
-system stream is sent to the video device.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - Invalid sub-stream id.
diff --git a/Documentation/media/uapi/dvb/video-set-spu-palette.rst b/Documentation/media/uapi/dvb/video-set-spu-palette.rst
deleted file mode 100644
index 51a1913d21d2..000000000000
--- a/Documentation/media/uapi/dvb/video-set-spu-palette.rst
+++ /dev/null
@@ -1,82 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_SPU_PALETTE:
-
-=====================
-VIDEO_SET_SPU_PALETTE
-=====================
-
-Name
-----
-
-VIDEO_SET_SPU_PALETTE
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_SPU_PALETTE, struct video_spu_palette *palette )
- :name: VIDEO_SET_SPU_PALETTE
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_SPU_PALETTE for this command.
-
- - .. row 3
-
- - video_spu_palette_t \*palette
-
- - SPU palette according to section ??.
-
-
-Description
------------
-
-This ioctl sets the SPU color palette.
-
-.. c:type:: video_spu_palette
-
-.. code-block::c
-
- typedef struct video_spu_palette { /* SPU Palette information */
- int length;
- __u8 __user *palette;
- } video_spu_palette_t;
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - input is not a valid palette or driver doesn’t handle SPU.
diff --git a/Documentation/media/uapi/dvb/video-set-spu.rst b/Documentation/media/uapi/dvb/video-set-spu.rst
deleted file mode 100644
index 739e5e7bd133..000000000000
--- a/Documentation/media/uapi/dvb/video-set-spu.rst
+++ /dev/null
@@ -1,85 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_SPU:
-
-=============
-VIDEO_SET_SPU
-=============
-
-Name
-----
-
-VIDEO_SET_SPU
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_SPU , struct video_spu *spu)
- :name: VIDEO_SET_SPU
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_SPU for this command.
-
- - .. row 3
-
- - video_spu_t \*spu
-
- - SPU decoding (de)activation and subid setting according to section
- ??.
-
-
-Description
------------
-
-This ioctl activates or deactivates SPU decoding in a DVD input stream.
-It can only be used, if the driver is able to handle a DVD stream.
-
-.. c:type:: struct video_spu
-
-.. code-block:: c
-
- typedef struct video_spu {
- int active;
- int stream_id;
- } video_spu_t;
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - input is not a valid spu setting or driver cannot handle SPU.
diff --git a/Documentation/media/uapi/dvb/video-set-system.rst b/Documentation/media/uapi/dvb/video-set-system.rst
deleted file mode 100644
index e39cbe080ef7..000000000000
--- a/Documentation/media/uapi/dvb/video-set-system.rst
+++ /dev/null
@@ -1,77 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _VIDEO_SET_SYSTEM:
-
-================
-VIDEO_SET_SYSTEM
-================
-
-Name
-----
-
-VIDEO_SET_SYSTEM
-
-.. attention:: This ioctl is deprecated.
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, VIDEO_SET_SYSTEM , video_system_t system)
- :name: VIDEO_SET_SYSTEM
-
-
-Arguments
----------
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - int fd
-
- - File descriptor returned by a previous call to open().
-
- - .. row 2
-
- - int request
-
- - Equals VIDEO_SET_FORMAT for this command.
-
- - .. row 3
-
- - video_system_t system
-
- - video system of TV output.
-
-
-Description
------------
-
-This ioctl sets the television output format. The format (see section
-??) may vary from the color format of the displayed MPEG stream. If the
-hardware is not able to display the requested format the call will
-return an error.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EINVAL``
-
- - system is not a valid or supported video system.
diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst
index 68588ac7fecb..3f4f6c9ffad7 100644
--- a/Documentation/media/uapi/dvb/video_function_calls.rst
+++ b/Documentation/media/uapi/dvb/video_function_calls.rst
@@ -21,7 +21,6 @@ Video Function Calls
video-get-status
video-get-frame-count
video-get-pts
- video-get-frame-rate
video-get-event
video-command
video-try-command
@@ -31,13 +30,7 @@ Video Function Calls
video-fast-forward
video-slowmotion
video-get-capabilities
- video-set-id
video-clear-buffer
video-set-streamtype
video-set-format
- video-set-system
- video-set-highlight
- video-set-spu
- video-set-spu-palette
- video-get-navi
video-set-attributes
diff --git a/Documentation/media/uapi/dvb/video_types.rst b/Documentation/media/uapi/dvb/video_types.rst
index 640a21de6b8a..a0942171596c 100644
--- a/Documentation/media/uapi/dvb/video_types.rst
+++ b/Documentation/media/uapi/dvb/video_types.rst
@@ -246,134 +246,3 @@ following bits set according to the hardwares capabilities.
#define VIDEO_CAP_SPU 16
#define VIDEO_CAP_NAVI 32
#define VIDEO_CAP_CSS 64
-
-
-.. _video-system:
-
-video_system_t
-==============
-
-A call to VIDEO_SET_SYSTEM sets the desired video system for TV
-output. The following system types can be set:
-
-
-.. code-block:: c
-
- typedef enum {
- VIDEO_SYSTEM_PAL,
- VIDEO_SYSTEM_NTSC,
- VIDEO_SYSTEM_PALN,
- VIDEO_SYSTEM_PALNc,
- VIDEO_SYSTEM_PALM,
- VIDEO_SYSTEM_NTSC60,
- VIDEO_SYSTEM_PAL60,
- VIDEO_SYSTEM_PALM60
- } video_system_t;
-
-
-.. c:type:: video_highlight
-
-struct video_highlight
-======================
-
-Calling the ioctl VIDEO_SET_HIGHLIGHTS posts the SPU highlight
-information. The call expects the following format for that information:
-
-
-.. code-block:: c
-
- typedef
- struct video_highlight {
- boolean active; /* 1=show highlight, 0=hide highlight */
- uint8_t contrast1; /* 7- 4 Pattern pixel contrast */
- /* 3- 0 Background pixel contrast */
- uint8_t contrast2; /* 7- 4 Emphasis pixel-2 contrast */
- /* 3- 0 Emphasis pixel-1 contrast */
- uint8_t color1; /* 7- 4 Pattern pixel color */
- /* 3- 0 Background pixel color */
- uint8_t color2; /* 7- 4 Emphasis pixel-2 color */
- /* 3- 0 Emphasis pixel-1 color */
- uint32_t ypos; /* 23-22 auto action mode */
- /* 21-12 start y */
- /* 9- 0 end y */
- uint32_t xpos; /* 23-22 button color number */
- /* 21-12 start x */
- /* 9- 0 end x */
- } video_highlight_t;
-
-
-.. c:type:: video_spu
-
-struct video_spu
-================
-
-Calling VIDEO_SET_SPU deactivates or activates SPU decoding, according
-to the following format:
-
-
-.. code-block:: c
-
- typedef
- struct video_spu {
- boolean active;
- int stream_id;
- } video_spu_t;
-
-
-.. c:type:: video_spu_palette
-
-struct video_spu_palette
-========================
-
-The following structure is used to set the SPU palette by calling
-VIDEO_SPU_PALETTE:
-
-
-.. code-block:: c
-
- typedef
- struct video_spu_palette {
- int length;
- uint8_t *palette;
- } video_spu_palette_t;
-
-
-.. c:type:: video_navi_pack
-
-struct video_navi_pack
-======================
-
-In order to get the navigational data the following structure has to be
-passed to the ioctl VIDEO_GET_NAVI:
-
-
-.. code-block:: c
-
- typedef
- struct video_navi_pack {
- int length; /* 0 ... 1024 */
- uint8_t data[1024];
- } video_navi_pack_t;
-
-
-.. _video-attributes-t:
-
-video_attributes_t
-==================
-
-The following attributes can be set by a call to VIDEO_SET_ATTRIBUTES:
-
-
-.. code-block:: c
-
- typedef uint16_t video_attributes_t;
- /* bits: descr. */
- /* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
- /* 13-12 TV system (0=525/60, 1=625/50) */
- /* 11-10 Aspect ratio (0=4:3, 3=16:9) */
- /* 9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */
- /* 7 line 21-1 data present in GOP (1=yes, 0=no) */
- /* 6 line 21-2 data present in GOP (1=yes, 0=no) */
- /* 5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
- /* 2 source letterboxed (1=yes, 0=no) */
- /* 0 film/camera mode (0=camera, 1=film (625/50 only)) */
diff --git a/Documentation/media/uapi/mediactl/media-ioc-device-info.rst b/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
index f690f9afc470..649cb3d9e058 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
@@ -48,12 +48,8 @@ ioctl never fails.
:widths: 1 1 2
- - .. row 1
-
- - char
-
+ * - char
- ``driver``\ [16]
-
- Name of the driver implementing the media API as a NUL-terminated
ASCII string. The driver version is stored in the
``driver_version`` field.
@@ -62,66 +58,38 @@ ioctl never fails.
the driver identity. It is also useful to work around known bugs,
or to identify drivers in error reports.
- - .. row 2
-
- - char
-
+ * - char
- ``model``\ [32]
-
- Device model name as a NUL-terminated UTF-8 string. The device
version is stored in the ``device_version`` field and is not be
appended to the model name.
- - .. row 3
-
- - char
-
+ * - char
- ``serial``\ [40]
-
- Serial number as a NUL-terminated ASCII string.
- - .. row 4
-
- - char
-
+ * - char
- ``bus_info``\ [32]
-
- Location of the device in the system as a NUL-terminated ASCII
string. This includes the bus type name (PCI, USB, ...) and a
bus-specific identifier.
- - .. row 5
-
- - __u32
-
+ * - __u32
- ``media_version``
-
- Media API version, formatted with the ``KERNEL_VERSION()`` macro.
- - .. row 6
-
- - __u32
-
+ * - __u32
- ``hw_revision``
-
- Hardware device revision in a driver-specific format.
- - .. row 7
-
- - __u32
-
+ * - __u32
- ``driver_version``
-
- Media device driver version, formatted with the
``KERNEL_VERSION()`` macro. Together with the ``driver`` field
this identifies a particular driver.
- - .. row 8
-
- - __u32
-
+ * - __u32
- ``reserved``\ [31]
-
- Reserved for future extensions. Drivers and applications must set
this array to zero.
diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
index 582fda488810..fc2e39c070c9 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
@@ -58,142 +58,90 @@ id's until they get an error.
:stub-columns: 0
:widths: 1 1 1 1 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
-
-
-
- - Entity id, set by the application. When the id is or'ed with
+ - Entity ID, set by the application. When the ID is or'ed with
``MEDIA_ENT_ID_FLAG_NEXT``, the driver clears the flag and returns
- the first entity with a larger id.
-
- - .. row 2
-
- - char
+ the first entity with a larger ID. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode entity IDs in an application.
+ * - char
- ``name``\ [32]
-
-
-
- - Entity name as an UTF-8 NULL-terminated string.
-
- - .. row 3
-
- - __u32
+ - Entity name as an UTF-8 NULL-terminated string. This name must be unique
+ within the media topology.
+ * - __u32
- ``type``
-
-
-
- Entity type, see :ref:`media-entity-functions` for details.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``revision``
-
-
-
- Entity revision. Always zero (obsolete)
- - .. row 5
-
- - __u32
-
+ * - __u32
- ``flags``
-
-
-
- Entity flags, see :ref:`media-entity-flag` for details.
- - .. row 6
-
- - __u32
-
+ * - __u32
- ``group_id``
-
-
-
- Entity group ID. Always zero (obsolete)
- - .. row 7
-
- - __u16
-
+ * - __u16
- ``pads``
-
-
-
- Number of pads
- - .. row 8
-
- - __u16
-
+ * - __u16
- ``links``
-
-
-
- Total number of outbound links. Inbound links are not counted in
this field.
- - .. row 9
-
- - __u32
-
+ * - __u32
- ``reserved[4]``
-
-
-
- Reserved for future extensions. Drivers and applications must set
the array to zero.
- - .. row 10
-
- - union
-
- - .. row 11
+ * - union
- -
+ * -
- struct
-
- ``dev``
-
-
- Valid for (sub-)devices that create a single device node.
- - .. row 12
-
- -
+ * -
-
- __u32
-
- ``major``
-
- Device node major number.
- - .. row 13
-
- -
+ * -
-
- __u32
-
- ``minor``
-
- Device node minor number.
- - .. row 14
-
- -
+ * -
- __u8
-
- ``raw``\ [184]
-
-
-
diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
index 256168b3c3be..f158c134e9b0 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
@@ -62,35 +62,21 @@ returned during the enumeration process.
:stub-columns: 0
:widths: 1 1 2
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``entity``
-
- Entity id, set by the application.
- - .. row 2
-
- - struct :c:type:`media_pad_desc`
-
+ * - struct :c:type:`media_pad_desc`
- \*\ ``pads``
-
- Pointer to a pads array allocated by the application. Ignored if
NULL.
- - .. row 3
-
- - struct :c:type:`media_link_desc`
-
+ * - struct :c:type:`media_link_desc`
- \*\ ``links``
-
- Pointer to a links array allocated by the application. Ignored if
NULL.
-
.. c:type:: media_pad_desc
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
@@ -100,37 +86,20 @@ returned during the enumeration process.
:stub-columns: 0
:widths: 1 1 2
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``entity``
-
- ID of the entity this pad belongs to.
- - .. row 2
-
- - __u16
-
+ * - __u16
- ``index``
+ - Pad index, starts at 0.
- - 0-based pad index.
-
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Pad flags, see :ref:`media-pad-flag` for more details.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``reserved[2]``
-
- Reserved for future extensions. Drivers and applications must set
the array to zero.
@@ -145,37 +114,20 @@ returned during the enumeration process.
:stub-columns: 0
:widths: 1 1 2
-
- - .. row 1
-
- - struct :c:type:`media_pad_desc`
-
+ * - struct :c:type:`media_pad_desc`
- ``source``
-
- Pad at the origin of this link.
- - .. row 2
-
- - struct :c:type:`media_pad_desc`
-
+ * - struct :c:type:`media_pad_desc`
- ``sink``
-
- Pad at the target of this link.
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Link flags, see :ref:`media-link-flag` for more details.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``reserved[4]``
-
- Reserved for future extensions. Drivers and applications must set
the array to zero.
diff --git a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
index c4055ddf070a..bac128c7eda9 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
@@ -55,119 +55,66 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u64
-
+ * - __u64
- ``topology_version``
-
- Version of the media graph topology. When the graph is created,
this field starts with zero. Every time a graph element is added
or removed, this field is incremented.
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``num_entities``
-
- Number of entities in the graph
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``reserved1``
-
- Applications and drivers shall set this to 0.
- - .. row 4
-
- - __u64
-
+ * - __u64
- ``ptr_entities``
-
- A pointer to a memory area where the entities array will be
stored, converted to a 64-bits integer. It can be zero. if zero,
the ioctl won't store the entities. It will just update
``num_entities``
- - .. row 5
-
- - __u32
-
+ * - __u32
- ``num_interfaces``
-
- Number of interfaces in the graph
- - .. row 6
-
- - __u32
-
+ * - __u32
- ``reserved2``
-
- Applications and drivers shall set this to 0.
- - .. row 7
-
- - __u64
-
+ * - __u64
- ``ptr_interfaces``
-
- A pointer to a memory area where the interfaces array will be
stored, converted to a 64-bits integer. It can be zero. if zero,
the ioctl won't store the interfaces. It will just update
``num_interfaces``
- - .. row 8
-
- - __u32
-
+ * - __u32
- ``num_pads``
-
- Total number of pads in the graph
- - .. row 9
-
- - __u32
-
+ * - __u32
- ``reserved3``
-
- Applications and drivers shall set this to 0.
- - .. row 10
-
- - __u64
-
+ * - __u64
- ``ptr_pads``
-
- A pointer to a memory area where the pads array will be stored,
converted to a 64-bits integer. It can be zero. if zero, the ioctl
won't store the pads. It will just update ``num_pads``
- - .. row 11
-
- - __u32
-
+ * - __u32
- ``num_links``
-
- Total number of data and interface links in the graph
- - .. row 12
-
- - __u32
-
+ * - __u32
- ``reserved4``
-
- Applications and drivers shall set this to 0.
- - .. row 13
-
- - __u64
-
+ * - __u64
- ``ptr_links``
-
- A pointer to a memory area where the links array will be stored,
converted to a 64-bits integer. It can be zero. if zero, the ioctl
won't store the links. It will just update ``num_links``
@@ -182,37 +129,31 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
+ - Unique ID for the entity. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode entity IDs in an application.
- - Unique ID for the entity.
-
- - .. row 2
-
- - char
-
+ * - char
- ``name``\ [64]
+ - Entity name as an UTF-8 NULL-terminated string. This name must be unique
+ within the media topology.
- - Entity name as an UTF-8 NULL-terminated string.
-
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``function``
-
- Entity main function, see :ref:`media-entity-functions` for details.
- - .. row 4
-
- - __u32
-
- - ``reserved``\ [6]
+ * - __u32
+ - ``flags``
+ - Entity flags, see :ref:`media-entity-flag` for details.
+ Only valid if ``MEDIA_V2_ENTITY_HAS_FLAGS(media_version)``
+ returns true. The ``media_version`` is defined in struct
+ :c:type:`media_device_info` and can be retrieved using
+ :ref:`MEDIA_IOC_DEVICE_INFO`.
+ * - __u32
+ - ``reserved``\ [5]
- Reserved for future extensions. Drivers and applications must set
this array to zero.
@@ -226,47 +167,29 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
+ - Unique ID for the interface. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode interface IDs in an application.
- - Unique ID for the interface.
-
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``intf_type``
-
- Interface type, see :ref:`media-intf-type` for details.
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Interface flags. Currently unused.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``reserved``\ [9]
-
- Reserved for future extensions. Drivers and applications must set
this array to zero.
- - .. row 5
-
- - struct media_v2_intf_devnode
-
+ * - struct media_v2_intf_devnode
- ``devnode``
-
- Used only for device node interfaces. See
- :c:type:`media_v2_intf_devnode` for details..
+ :c:type:`media_v2_intf_devnode` for details.
.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
@@ -278,24 +201,14 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``major``
-
- Device node major number.
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``minor``
-
- Device node minor number.
-
.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
.. c:type:: media_v2_pad
@@ -305,37 +218,29 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
+ - Unique ID for the pad. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode pad IDs in an application.
- - Unique ID for the pad.
-
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``entity_id``
-
- Unique ID for the entity where this pad belongs.
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Pad flags, see :ref:`media-pad-flag` for more details.
- - .. row 4
-
- - __u32
-
- - ``reserved``\ [5]
+ * - __u32
+ - ``index``
+ - Pad index, starts at 0. Only valid if ``MEDIA_V2_PAD_HAS_INDEX(media_version)``
+ returns true. The ``media_version`` is defined in struct
+ :c:type:`media_device_info` and can be retrieved using
+ :ref:`MEDIA_IOC_DEVICE_INFO`.
+ * - __u32
+ - ``reserved``\ [4]
- Reserved for future extensions. Drivers and applications must set
this array to zero.
@@ -349,49 +254,30 @@ desired arrays with the media graph elements.
:stub-columns: 0
:widths: 1 2 8
-
- - .. row 1
-
- - __u32
-
+ * - __u32
- ``id``
+ - Unique ID for the link. Do not expect that the ID will
+ always be the same for each instance of the device. In other words,
+ do not hardcode link IDs in an application.
- - Unique ID for the link.
-
- - .. row 2
-
- - __u32
-
+ * - __u32
- ``source_id``
-
- On pad to pad links: unique ID for the source pad.
On interface to entity links: unique ID for the interface.
- - .. row 3
-
- - __u32
-
+ * - __u32
- ``sink_id``
-
- On pad to pad links: unique ID for the sink pad.
On interface to entity links: unique ID for the entity.
- - .. row 4
-
- - __u32
-
+ * - __u32
- ``flags``
-
- Link flags, see :ref:`media-link-flag` for more details.
- - .. row 5
-
- - __u32
-
+ * - __u32
- ``reserved``\ [6]
-
- Reserved for future extensions. Drivers and applications must set
this array to zero.
diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst
index 2dda14bd89b7..e4c57c8f4553 100644
--- a/Documentation/media/uapi/mediactl/media-types.rst
+++ b/Documentation/media/uapi/mediactl/media-types.rst
@@ -8,6 +8,41 @@ Types and flags used to represent the media graph elements
.. tabularcolumns:: |p{8.2cm}|p{10.3cm}|
.. _media-entity-functions:
+.. _MEDIA-ENT-F-UNKNOWN:
+.. _MEDIA-ENT-F-V4L2-SUBDEV-UNKNOWN:
+.. _MEDIA-ENT-F-IO-V4L:
+.. _MEDIA-ENT-F-IO-VBI:
+.. _MEDIA-ENT-F-IO-SWRADIO:
+.. _MEDIA-ENT-F-IO-DTV:
+.. _MEDIA-ENT-F-DTV-DEMOD:
+.. _MEDIA-ENT-F-TS-DEMUX:
+.. _MEDIA-ENT-F-DTV-CA:
+.. _MEDIA-ENT-F-DTV-NET-DECAP:
+.. _MEDIA-ENT-F-CONN-RF:
+.. _MEDIA-ENT-F-CONN-SVIDEO:
+.. _MEDIA-ENT-F-CONN-COMPOSITE:
+.. _MEDIA-ENT-F-CAM-SENSOR:
+.. _MEDIA-ENT-F-FLASH:
+.. _MEDIA-ENT-F-LENS:
+.. _MEDIA-ENT-F-ATV-DECODER:
+.. _MEDIA-ENT-F-TUNER:
+.. _MEDIA-ENT-F-IF-VID-DECODER:
+.. _MEDIA-ENT-F-IF-AUD-DECODER:
+.. _MEDIA-ENT-F-AUDIO-CAPTURE:
+.. _MEDIA-ENT-F-AUDIO-PLAYBACK:
+.. _MEDIA-ENT-F-AUDIO-MIXER:
+.. _MEDIA-ENT-F-PROC-VIDEO-COMPOSER:
+.. _MEDIA-ENT-F-PROC-VIDEO-PIXEL-FORMATTER:
+.. _MEDIA-ENT-F-PROC-VIDEO-PIXEL-ENC-CONV:
+.. _MEDIA-ENT-F-PROC-VIDEO-LUT:
+.. _MEDIA-ENT-F-PROC-VIDEO-SCALER:
+.. _MEDIA-ENT-F-PROC-VIDEO-STATISTICS:
+.. _MEDIA-ENT-F-PROC-VIDEO-ENCODER:
+.. _MEDIA-ENT-F-PROC-VIDEO-DECODER:
+.. _MEDIA-ENT-F-VID-MUX:
+.. _MEDIA-ENT-F-VID-IF-BRIDGE:
+.. _MEDIA-ENT-F-DV-DECODER:
+.. _MEDIA-ENT-F-DV-ENCODER:
.. cssclass:: longtable
@@ -15,139 +50,56 @@ Types and flags used to represent the media graph elements
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-ENT-F-UNKNOWN:
- .. _MEDIA-ENT-F-V4L2-SUBDEV-UNKNOWN:
-
- - ``MEDIA_ENT_F_UNKNOWN`` and
-
+ * - ``MEDIA_ENT_F_UNKNOWN`` and
``MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN``
-
- Unknown entity. That generally indicates that a driver didn't
initialize properly the entity, which is a Kernel bug
- - .. row 2
-
- .. _MEDIA-ENT-F-IO-V4L:
-
- - ``MEDIA_ENT_F_IO_V4L``
-
+ * - ``MEDIA_ENT_F_IO_V4L``
- Data streaming input and/or output entity.
- - .. row 3
-
- .. _MEDIA-ENT-F-IO-VBI:
-
- - ``MEDIA_ENT_F_IO_VBI``
-
+ * - ``MEDIA_ENT_F_IO_VBI``
- V4L VBI streaming input or output entity
- - .. row 4
-
- .. _MEDIA-ENT-F-IO-SWRADIO:
-
- - ``MEDIA_ENT_F_IO_SWRADIO``
-
+ * - ``MEDIA_ENT_F_IO_SWRADIO``
- V4L Software Digital Radio (SDR) streaming input or output entity
- - .. row 5
-
- .. _MEDIA-ENT-F-IO-DTV:
-
- - ``MEDIA_ENT_F_IO_DTV``
-
+ * - ``MEDIA_ENT_F_IO_DTV``
- DVB Digital TV streaming input or output entity
- - .. row 6
-
- .. _MEDIA-ENT-F-DTV-DEMOD:
-
- - ``MEDIA_ENT_F_DTV_DEMOD``
-
+ * - ``MEDIA_ENT_F_DTV_DEMOD``
- Digital TV demodulator entity.
- - .. row 7
-
- .. _MEDIA-ENT-F-TS-DEMUX:
-
- - ``MEDIA_ENT_F_TS_DEMUX``
-
+ * - ``MEDIA_ENT_F_TS_DEMUX``
- MPEG Transport stream demux entity. Could be implemented on
hardware or in Kernelspace by the Linux DVB subsystem.
- - .. row 8
-
- .. _MEDIA-ENT-F-DTV-CA:
-
- - ``MEDIA_ENT_F_DTV_CA``
-
+ * - ``MEDIA_ENT_F_DTV_CA``
- Digital TV Conditional Access module (CAM) entity
- - .. row 9
-
- .. _MEDIA-ENT-F-DTV-NET-DECAP:
-
- - ``MEDIA_ENT_F_DTV_NET_DECAP``
-
+ * - ``MEDIA_ENT_F_DTV_NET_DECAP``
- Digital TV network ULE/MLE desencapsulation entity. Could be
implemented on hardware or in Kernelspace
- - .. row 10
-
- .. _MEDIA-ENT-F-CONN-RF:
-
- - ``MEDIA_ENT_F_CONN_RF``
-
+ * - ``MEDIA_ENT_F_CONN_RF``
- Connector for a Radio Frequency (RF) signal.
- - .. row 11
-
- .. _MEDIA-ENT-F-CONN-SVIDEO:
-
- - ``MEDIA_ENT_F_CONN_SVIDEO``
-
+ * - ``MEDIA_ENT_F_CONN_SVIDEO``
- Connector for a S-Video signal.
- - .. row 12
-
- .. _MEDIA-ENT-F-CONN-COMPOSITE:
-
- - ``MEDIA_ENT_F_CONN_COMPOSITE``
-
+ * - ``MEDIA_ENT_F_CONN_COMPOSITE``
- Connector for a RGB composite signal.
- - .. row 13
-
- .. _MEDIA-ENT-F-CAM-SENSOR:
-
- - ``MEDIA_ENT_F_CAM_SENSOR``
-
+ * - ``MEDIA_ENT_F_CAM_SENSOR``
- Camera video sensor entity.
- - .. row 14
-
- .. _MEDIA-ENT-F-FLASH:
-
- - ``MEDIA_ENT_F_FLASH``
-
+ * - ``MEDIA_ENT_F_FLASH``
- Flash controller entity.
- - .. row 15
-
- .. _MEDIA-ENT-F-LENS:
-
- - ``MEDIA_ENT_F_LENS``
-
+ * - ``MEDIA_ENT_F_LENS``
- Lens controller entity.
- - .. row 16
-
- .. _MEDIA-ENT-F-ATV-DECODER:
-
- - ``MEDIA_ENT_F_ATV_DECODER``
-
+ * - ``MEDIA_ENT_F_ATV_DECODER``
- Analog video decoder, the basic function of the video decoder is
to accept analogue video from a wide variety of sources such as
broadcast, DVD players, cameras and video cassette recorders, in
@@ -155,36 +107,21 @@ Types and flags used to represent the media graph elements
its component parts, luminance and chrominance, and output it in
some digital video standard, with appropriate timing signals.
- - .. row 17
-
- .. _MEDIA-ENT-F-TUNER:
-
- - ``MEDIA_ENT_F_TUNER``
-
+ * - ``MEDIA_ENT_F_TUNER``
- Digital TV, analog TV, radio and/or software radio tuner, with
consists on a PLL tuning stage that converts radio frequency (RF)
signal into an Intermediate Frequency (IF). Modern tuners have
internally IF-PLL decoders for audio and video, but older models
have those stages implemented on separate entities.
- - .. row 18
-
- .. _MEDIA-ENT-F-IF-VID-DECODER:
-
- - ``MEDIA_ENT_F_IF_VID_DECODER``
-
+ * - ``MEDIA_ENT_F_IF_VID_DECODER``
- IF-PLL video decoder. It receives the IF from a PLL and decodes
the analog TV video signal. This is commonly found on some very
old analog tuners, like Philips MK3 designs. They all contain a
tda9887 (or some software compatible similar chip, like tda9885).
Those devices use a different I2C address than the tuner PLL.
- - .. row 19
-
- .. _MEDIA-ENT-F-IF-AUD-DECODER:
-
- - ``MEDIA_ENT_F_IF_AUD_DECODER``
-
+ * - ``MEDIA_ENT_F_IF_AUD_DECODER``
- IF-PLL sound decoder. It receives the IF from a PLL and decodes
the analog TV audio signal. This is commonly found on some very
old analog hardware, like Micronas msp3400, Philips tda9840,
@@ -192,36 +129,16 @@ Types and flags used to represent the media graph elements
tuner PLL and should be controlled together with the IF-PLL video
decoder.
- - .. row 20
-
- .. _MEDIA-ENT-F-AUDIO-CAPTURE:
-
- - ``MEDIA_ENT_F_AUDIO_CAPTURE``
-
+ * - ``MEDIA_ENT_F_AUDIO_CAPTURE``
- Audio Capture Function Entity.
- - .. row 21
-
- .. _MEDIA-ENT-F-AUDIO-PLAYBACK:
-
- - ``MEDIA_ENT_F_AUDIO_PLAYBACK``
-
+ * - ``MEDIA_ENT_F_AUDIO_PLAYBACK``
- Audio Playback Function Entity.
- - .. row 22
-
- .. _MEDIA-ENT-F-AUDIO-MIXER:
-
- - ``MEDIA_ENT_F_AUDIO_MIXER``
-
+ * - ``MEDIA_ENT_F_AUDIO_MIXER``
- Audio Mixer Function Entity.
- - .. row 23
-
- .. _MEDIA-ENT-F-PROC-VIDEO-COMPOSER:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_COMPOSER``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_COMPOSER``
- Video composer (blender). An entity capable of video
composing must have at least two sink pads and one source
pad, and composes input video frames onto output video
@@ -229,12 +146,7 @@ Types and flags used to represent the media graph elements
color keying, raster operations (ROP), stitching or any other
means.
- - .. row 24
-
- .. _MEDIA-ENT-F-PROC-VIDEO-PIXEL-FORMATTER:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER``
- Video pixel formatter. An entity capable of pixel formatting
must have at least one sink pad and one source pad. Read
pixel formatters read pixels from memory and perform a subset
@@ -243,12 +155,7 @@ Types and flags used to represent the media graph elements
a subset of dithering, pixel encoding conversion and packing
and write pixels to memory.
- - .. row 25
-
- .. _MEDIA-ENT-F-PROC-VIDEO-PIXEL-ENC-CONV:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV``
- Video pixel encoding converter. An entity capable of pixel
enconding conversion must have at least one sink pad and one
source pad, and convert the encoding of pixels received on
@@ -257,12 +164,7 @@ Types and flags used to represent the media graph elements
to RGB to/from HSV, RGB to/from YUV and CFA (Bayer) to RGB
conversions.
- - .. row 26
-
- .. _MEDIA-ENT-F-PROC-VIDEO-LUT:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_LUT``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_LUT``
- Video look-up table. An entity capable of video lookup table
processing must have one sink pad and one source pad. It uses
the values of the pixels received on its sink pad to look up
@@ -271,12 +173,7 @@ Types and flags used to represent the media graph elements
separately or combine them for multi-dimensional table
lookups.
- - .. row 27
-
- .. _MEDIA-ENT-F-PROC-VIDEO-SCALER:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_SCALER``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_SCALER``
- Video scaler. An entity capable of video scaling must have
at least one sink pad and one source pad, and scale the
video frame(s) received on its sink pad(s) to a different
@@ -287,311 +184,190 @@ Types and flags used to represent the media graph elements
sub-sampling (occasionally also referred to as skipping) are
considered as scaling.
- - .. row 28
-
- .. _MEDIA-ENT-F-PROC-VIDEO-STATISTICS:
-
- - ``MEDIA_ENT_F_PROC_VIDEO_STATISTICS``
-
+ * - ``MEDIA_ENT_F_PROC_VIDEO_STATISTICS``
- Video statistics computation (histogram, 3A, etc.). An entity
capable of statistics computation must have one sink pad and
one source pad. It computes statistics over the frames
received on its sink pad and outputs the statistics data on
its source pad.
- - .. row 29
+ * - ``MEDIA_ENT_F_PROC_VIDEO_ENCODER``
+ - Video (MPEG, HEVC, VPx, etc.) encoder. An entity capable of
+ compressing video frames. Must have one sink pad and at least
+ one source pad.
- .. _MEDIA-ENT-F-VID-MUX:
-
- - ``MEDIA_ENT_F_VID_MUX``
+ * - ``MEDIA_ENT_F_PROC_VIDEO_DECODER``
+ - Video (MPEG, HEVC, VPx, etc.) decoder. An entity capable of
+ decompressing a compressed video stream into uncompressed video
+ frames. Must have one sink pad and at least one source pad.
+ * - ``MEDIA_ENT_F_VID_MUX``
- Video multiplexer. An entity capable of multiplexing must have at
least two sink pads and one source pad, and must pass the video
frame(s) received from the active sink pad to the source pad.
- - .. row 30
-
- .. _MEDIA-ENT-F-VID-IF-BRIDGE:
-
- - ``MEDIA_ENT_F_VID_IF_BRIDGE``
-
+ * - ``MEDIA_ENT_F_VID_IF_BRIDGE``
- Video interface bridge. A video interface bridge entity must have at
least one sink pad and at least one source pad. It receives video
frames on its sink pad from an input video bus of one type (HDMI, eDP,
MIPI CSI-2, etc.), and outputs them on its source pad to an output
video bus of another type (eDP, MIPI CSI-2, parallel, etc.).
- - .. row 31
-
- .. _MEDIA-ENT-F-DTV-DECODER:
-
- - ``MEDIA_ENT_F_DTV_DECODER``
-
+ * - ``MEDIA_ENT_F_DV_DECODER``
- Digital video decoder. The basic function of the video decoder is
to accept digital video from a wide variety of sources
and output it in some digital video standard, with appropriate
timing signals.
+ * - ``MEDIA_ENT_F_DV_ENCODER``
+ - Digital video encoder. The basic function of the video encoder is
+ to accept digital video from some digital video standard with
+ appropriate timing signals (usually a parallel video bus with sync
+ signals) and output this to a digital video output connector such
+ as HDMI or DisplayPort.
+
.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
.. _media-entity-flag:
+.. _MEDIA-ENT-FL-DEFAULT:
+.. _MEDIA-ENT-FL-CONNECTOR:
.. flat-table:: Media entity flags
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-ENT-FL-DEFAULT:
-
- - ``MEDIA_ENT_FL_DEFAULT``
-
+ * - ``MEDIA_ENT_FL_DEFAULT``
- Default entity for its type. Used to discover the default audio,
VBI and video devices, the default camera sensor, etc.
- - .. row 2
-
- .. _MEDIA-ENT-FL-CONNECTOR:
-
- - ``MEDIA_ENT_FL_CONNECTOR``
-
+ * - ``MEDIA_ENT_FL_CONNECTOR``
- The entity represents a connector.
.. tabularcolumns:: |p{6.5cm}|p{6.0cm}|p{5.0cm}|
.. _media-intf-type:
+.. _MEDIA-INTF-T-DVB-FE:
+.. _MEDIA-INTF-T-DVB-DEMUX:
+.. _MEDIA-INTF-T-DVB-DVR:
+.. _MEDIA-INTF-T-DVB-CA:
+.. _MEDIA-INTF-T-DVB-NET:
+.. _MEDIA-INTF-T-V4L-VIDEO:
+.. _MEDIA-INTF-T-V4L-VBI:
+.. _MEDIA-INTF-T-V4L-RADIO:
+.. _MEDIA-INTF-T-V4L-SUBDEV:
+.. _MEDIA-INTF-T-V4L-SWRADIO:
+.. _MEDIA-INTF-T-V4L-TOUCH:
+.. _MEDIA-INTF-T-ALSA-PCM-CAPTURE:
+.. _MEDIA-INTF-T-ALSA-PCM-PLAYBACK:
+.. _MEDIA-INTF-T-ALSA-CONTROL:
+.. _MEDIA-INTF-T-ALSA-COMPRESS:
+.. _MEDIA-INTF-T-ALSA-RAWMIDI:
+.. _MEDIA-INTF-T-ALSA-HWDEP:
+.. _MEDIA-INTF-T-ALSA-SEQUENCER:
+.. _MEDIA-INTF-T-ALSA-TIMER:
.. flat-table:: Media interface types
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-INTF-T-DVB-FE:
-
- - ``MEDIA_INTF_T_DVB_FE``
-
+ * - ``MEDIA_INTF_T_DVB_FE``
- Device node interface for the Digital TV frontend
-
- typically, /dev/dvb/adapter?/frontend?
- - .. row 2
-
- .. _MEDIA-INTF-T-DVB-DEMUX:
-
- - ``MEDIA_INTF_T_DVB_DEMUX``
-
+ * - ``MEDIA_INTF_T_DVB_DEMUX``
- Device node interface for the Digital TV demux
-
- typically, /dev/dvb/adapter?/demux?
- - .. row 3
-
- .. _MEDIA-INTF-T-DVB-DVR:
-
- - ``MEDIA_INTF_T_DVB_DVR``
-
+ * - ``MEDIA_INTF_T_DVB_DVR``
- Device node interface for the Digital TV DVR
-
- typically, /dev/dvb/adapter?/dvr?
- - .. row 4
-
- .. _MEDIA-INTF-T-DVB-CA:
-
- - ``MEDIA_INTF_T_DVB_CA``
-
+ * - ``MEDIA_INTF_T_DVB_CA``
- Device node interface for the Digital TV Conditional Access
-
- typically, /dev/dvb/adapter?/ca?
- - .. row 5
-
- .. _MEDIA-INTF-T-DVB-NET:
-
- - ``MEDIA_INTF_T_DVB_NET``
-
+ * - ``MEDIA_INTF_T_DVB_NET``
- Device node interface for the Digital TV network control
-
- typically, /dev/dvb/adapter?/net?
- - .. row 6
-
- .. _MEDIA-INTF-T-V4L-VIDEO:
-
- - ``MEDIA_INTF_T_V4L_VIDEO``
-
+ * - ``MEDIA_INTF_T_V4L_VIDEO``
- Device node interface for video (V4L)
-
- typically, /dev/video?
- - .. row 7
-
- .. _MEDIA-INTF-T-V4L-VBI:
-
- - ``MEDIA_INTF_T_V4L_VBI``
-
+ * - ``MEDIA_INTF_T_V4L_VBI``
- Device node interface for VBI (V4L)
-
- typically, /dev/vbi?
- - .. row 8
-
- .. _MEDIA-INTF-T-V4L-RADIO:
-
- - ``MEDIA_INTF_T_V4L_RADIO``
-
+ * - ``MEDIA_INTF_T_V4L_RADIO``
- Device node interface for radio (V4L)
-
- typically, /dev/radio?
- - .. row 9
-
- .. _MEDIA-INTF-T-V4L-SUBDEV:
-
- - ``MEDIA_INTF_T_V4L_SUBDEV``
-
+ * - ``MEDIA_INTF_T_V4L_SUBDEV``
- Device node interface for a V4L subdevice
-
- typically, /dev/v4l-subdev?
- - .. row 10
-
- .. _MEDIA-INTF-T-V4L-SWRADIO:
-
- - ``MEDIA_INTF_T_V4L_SWRADIO``
-
+ * - ``MEDIA_INTF_T_V4L_SWRADIO``
- Device node interface for Software Defined Radio (V4L)
-
- typically, /dev/swradio?
- - .. row 11
-
- .. _MEDIA-INTF-T-V4L-TOUCH:
-
- - ``MEDIA_INTF_T_V4L_TOUCH``
-
+ * - ``MEDIA_INTF_T_V4L_TOUCH``
- Device node interface for Touch device (V4L)
-
- typically, /dev/v4l-touch?
- - .. row 12
-
- .. _MEDIA-INTF-T-ALSA-PCM-CAPTURE:
-
- - ``MEDIA_INTF_T_ALSA_PCM_CAPTURE``
-
+ * - ``MEDIA_INTF_T_ALSA_PCM_CAPTURE``
- Device node interface for ALSA PCM Capture
-
- typically, /dev/snd/pcmC?D?c
- - .. row 13
-
- .. _MEDIA-INTF-T-ALSA-PCM-PLAYBACK:
-
- - ``MEDIA_INTF_T_ALSA_PCM_PLAYBACK``
-
+ * - ``MEDIA_INTF_T_ALSA_PCM_PLAYBACK``
- Device node interface for ALSA PCM Playback
-
- typically, /dev/snd/pcmC?D?p
- - .. row 14
-
- .. _MEDIA-INTF-T-ALSA-CONTROL:
-
- - ``MEDIA_INTF_T_ALSA_CONTROL``
-
+ * - ``MEDIA_INTF_T_ALSA_CONTROL``
- Device node interface for ALSA Control
-
- typically, /dev/snd/controlC?
- - .. row 15
-
- .. _MEDIA-INTF-T-ALSA-COMPRESS:
-
- - ``MEDIA_INTF_T_ALSA_COMPRESS``
-
+ * - ``MEDIA_INTF_T_ALSA_COMPRESS``
- Device node interface for ALSA Compress
-
- typically, /dev/snd/compr?
- - .. row 16
-
- .. _MEDIA-INTF-T-ALSA-RAWMIDI:
-
- - ``MEDIA_INTF_T_ALSA_RAWMIDI``
-
+ * - ``MEDIA_INTF_T_ALSA_RAWMIDI``
- Device node interface for ALSA Raw MIDI
-
- typically, /dev/snd/midi?
- - .. row 17
-
- .. _MEDIA-INTF-T-ALSA-HWDEP:
-
- - ``MEDIA_INTF_T_ALSA_HWDEP``
-
+ * - ``MEDIA_INTF_T_ALSA_HWDEP``
- Device node interface for ALSA Hardware Dependent
-
- typically, /dev/snd/hwC?D?
- - .. row 18
-
- .. _MEDIA-INTF-T-ALSA-SEQUENCER:
-
- - ``MEDIA_INTF_T_ALSA_SEQUENCER``
-
+ * - ``MEDIA_INTF_T_ALSA_SEQUENCER``
- Device node interface for ALSA Sequencer
-
- typically, /dev/snd/seq
- - .. row 19
-
- .. _MEDIA-INTF-T-ALSA-TIMER:
-
- - ``MEDIA_INTF_T_ALSA_TIMER``
-
+ * - ``MEDIA_INTF_T_ALSA_TIMER``
- Device node interface for ALSA Timer
-
- typically, /dev/snd/timer
.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
.. _media-pad-flag:
+.. _MEDIA-PAD-FL-SINK:
+.. _MEDIA-PAD-FL-SOURCE:
+.. _MEDIA-PAD-FL-MUST-CONNECT:
.. flat-table:: Media pad flags
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-PAD-FL-SINK:
-
- - ``MEDIA_PAD_FL_SINK``
-
+ * - ``MEDIA_PAD_FL_SINK``
- Input pad, relative to the entity. Input pads sink data and are
targets of links.
- - .. row 2
-
- .. _MEDIA-PAD-FL-SOURCE:
-
- - ``MEDIA_PAD_FL_SOURCE``
-
+ * - ``MEDIA_PAD_FL_SOURCE``
- Output pad, relative to the entity. Output pads source data and
are origins of links.
- - .. row 3
-
- .. _MEDIA-PAD-FL-MUST-CONNECT:
-
- - ``MEDIA_PAD_FL_MUST_CONNECT``
-
+ * - ``MEDIA_PAD_FL_MUST_CONNECT``
- If this flag is set and the pad is linked to any other pad, then
at least one of those links must be enabled for the entity to be
able to stream. There could be temporary reasons (e.g. device
@@ -606,46 +382,29 @@ must be set for every pad.
.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
.. _media-link-flag:
+.. _MEDIA-LNK-FL-ENABLED:
+.. _MEDIA-LNK-FL-IMMUTABLE:
+.. _MEDIA-LNK-FL-DYNAMIC:
+.. _MEDIA-LNK-FL-LINK-TYPE:
.. flat-table:: Media link flags
:header-rows: 0
:stub-columns: 0
-
- - .. row 1
-
- .. _MEDIA-LNK-FL-ENABLED:
-
- - ``MEDIA_LNK_FL_ENABLED``
-
+ * - ``MEDIA_LNK_FL_ENABLED``
- The link is enabled and can be used to transfer media data. When
two or more links target a sink pad, only one of them can be
enabled at a time.
- - .. row 2
-
- .. _MEDIA-LNK-FL-IMMUTABLE:
-
- - ``MEDIA_LNK_FL_IMMUTABLE``
-
+ * - ``MEDIA_LNK_FL_IMMUTABLE``
- The link enabled state can't be modified at runtime. An immutable
link is always enabled.
- - .. row 3
-
- .. _MEDIA-LNK-FL-DYNAMIC:
-
- - ``MEDIA_LNK_FL_DYNAMIC``
-
+ * - ``MEDIA_LNK_FL_DYNAMIC``
- The link enabled state can be modified during streaming. This flag
is set by drivers and is read-only for applications.
- - .. row 4
-
- .. _MEDIA-LNK-FL-LINK-TYPE:
-
- - ``MEDIA_LNK_FL_LINK_TYPE``
-
+ * - ``MEDIA_LNK_FL_LINK_TYPE``
- This is a bitmask that defines the type of the link. Currently,
two types of links are supported:
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index 03931f9b1285..9f7312bf3365 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -1955,9 +1955,51 @@ enum v4l2_vp8_golden_frame_sel -
``V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (integer)``
Quantization parameter for a P frame for VP8.
-``V4L2_CID_MPEG_VIDEO_VPX_PROFILE (integer)``
- Select the desired profile for VPx encoder. Acceptable values are 0,
- 1, 2 and 3 corresponding to encoder profiles 0, 1, 2 and 3.
+.. _v4l2-mpeg-video-vp8-profile:
+
+``V4L2_CID_MPEG_VIDEO_VP8_PROFILE``
+ (enum)
+
+enum v4l2_mpeg_video_vp8_profile -
+ This control allows selecting the profile for VP8 encoder.
+ This is also used to enumerate supported profiles by VP8 encoder or decoder.
+ Possible values are:
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_0``
+ - Profile 0
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_1``
+ - Profile 1
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_2``
+ - Profile 2
+ * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_3``
+ - Profile 3
+
+.. _v4l2-mpeg-video-vp9-profile:
+
+``V4L2_CID_MPEG_VIDEO_VP9_PROFILE``
+ (enum)
+
+enum v4l2_mpeg_video_vp9_profile -
+ This control allows selecting the profile for VP9 encoder.
+ This is also used to enumerate supported profiles by VP9 encoder or decoder.
+ Possible values are:
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_0``
+ - Profile 0
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_1``
+ - Profile 1
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_2``
+ - Profile 2
+ * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_3``
+ - Profile 3
High Efficiency Video Coding (HEVC/H.265) Control Reference
diff --git a/Documentation/media/uapi/v4l/pixfmt-compressed.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
index abec03937bb3..d382e7a5c38e 100644
--- a/Documentation/media/uapi/v4l/pixfmt-compressed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
@@ -95,3 +95,10 @@ Compressed Formats
- ``V4L2_PIX_FMT_HEVC``
- 'HEVC'
- HEVC/H.265 video elementary stream.
+ * .. _V4L2-PIX-FMT-FWHT:
+
+ - ``V4L2_PIX_FMT_FWHT``
+ - 'FWHT'
+ - Video elementary stream using a codec based on the Fast Walsh Hadamard
+ Transform. This codec is implemented by the vicodec ('Virtual Codec')
+ driver. See the vicodec-codec.h header for more details.
diff --git a/Documentation/media/uapi/v4l/pixfmt-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
index cf2ef7df9616..1f9a7e3a07c9 100644
--- a/Documentation/media/uapi/v4l/pixfmt-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
@@ -19,4 +19,5 @@ RGB Formats
pixfmt-srggb10-ipu3
pixfmt-srggb12
pixfmt-srggb12p
+ pixfmt-srggb14p
pixfmt-srggb16
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
new file mode 100644
index 000000000000..88d20c0e4282
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
@@ -0,0 +1,127 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-PIX-FMT-SRGGB14P:
+.. _v4l2-pix-fmt-sbggr14p:
+.. _v4l2-pix-fmt-sgbrg14p:
+.. _v4l2-pix-fmt-sgrbg14p:
+
+*******************************************************************************************************************************
+V4L2_PIX_FMT_SRGGB14P ('pRCC'), V4L2_PIX_FMT_SGRBG14P ('pgCC'), V4L2_PIX_FMT_SGBRG14P ('pGCC'), V4L2_PIX_FMT_SBGGR14P ('pBCC'),
+*******************************************************************************************************************************
+
+*man V4L2_PIX_FMT_SRGGB14P(2)*
+
+V4L2_PIX_FMT_SGRBG14P
+V4L2_PIX_FMT_SGBRG14P
+V4L2_PIX_FMT_SBGGR14P
+14-bit packed Bayer formats
+
+
+Description
+===========
+
+These four pixel formats are packed raw sRGB / Bayer formats with 14
+bits per colour. Every four consecutive samples are packed into seven
+bytes. Each of the first four bytes contain the eight high order bits
+of the pixels, and the three following bytes contains the six least
+significants bits of each pixel, in the same order.
+
+Each n-pixel row contains n/2 green samples and n/2 blue or red samples,
+with alternating green-red and green-blue rows. They are conventionally
+described as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example
+of one of these formats:
+
+**Byte Order.**
+Each cell is one byte.
+
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 2 1 1 1 1 1 1 1
+
+
+ - .. row 1
+
+ - start + 0:
+
+ - B\ :sub:`00high`
+
+ - G\ :sub:`01high`
+
+ - B\ :sub:`02high`
+
+ - G\ :sub:`03high`
+
+ - G\ :sub:`01low bits 1--0`\ (bits 7--6)
+ B\ :sub:`00low bits 5--0`\ (bits 5--0)
+
+ - R\ :sub:`02low bits 3--0`\ (bits 7--4)
+ G\ :sub:`01low bits 5--2`\ (bits 3--0)
+
+ - G\ :sub:`03low bits 5--0`\ (bits 7--2)
+ R\ :sub:`02low bits 5--4`\ (bits 1--0)
+
+ - .. row 2
+
+ - start + 7:
+
+ - G\ :sub:`00high`
+
+ - R\ :sub:`01high`
+
+ - G\ :sub:`02high`
+
+ - R\ :sub:`03high`
+
+ - R\ :sub:`01low bits 1--0`\ (bits 7--6)
+ G\ :sub:`00low bits 5--0`\ (bits 5--0)
+
+ - G\ :sub:`02low bits 3--0`\ (bits 7--4)
+ R\ :sub:`01low bits 5--2`\ (bits 3--0)
+
+ - R\ :sub:`03low bits 5--0`\ (bits 7--2)
+ G\ :sub:`02low bits 5--4`\ (bits 1--0)
+
+ - .. row 3
+
+ - start + 14
+
+ - B\ :sub:`20high`
+
+ - G\ :sub:`21high`
+
+ - B\ :sub:`22high`
+
+ - G\ :sub:`23high`
+
+ - G\ :sub:`21low bits 1--0`\ (bits 7--6)
+ B\ :sub:`20low bits 5--0`\ (bits 5--0)
+
+ - R\ :sub:`22low bits 3--0`\ (bits 7--4)
+ G\ :sub:`21low bits 5--2`\ (bits 3--0)
+
+ - G\ :sub:`23low bits 5--0`\ (bits 7--2)
+ R\ :sub:`22low bits 5--4`\ (bits 1--0)
+
+ - .. row 4
+
+ - start + 21
+
+ - G\ :sub:`30high`
+
+ - R\ :sub:`31high`
+
+ - G\ :sub:`32high`
+
+ - R\ :sub:`33high`
+
+ - R\ :sub:`31low bits 1--0`\ (bits 7--6)
+ G\ :sub:`30low bits 5--0`\ (bits 5--0)
+
+ - G\ :sub:`32low bits 3--0`\ (bits 7--4)
+ R\ :sub:`31low bits 5--2`\ (bits 3--0)
+
+ - R\ :sub:`33low bits 5--0`\ (bits 7--2)
+ G\ :sub:`32low bits 5--4`\ (bits 1--0)
diff --git a/Documentation/media/uapi/v4l/pixfmt-y10p.rst b/Documentation/media/uapi/v4l/pixfmt-y10p.rst
new file mode 100644
index 000000000000..13b571306915
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-y10p.rst
@@ -0,0 +1,33 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-PIX-FMT-Y10P:
+
+******************************
+V4L2_PIX_FMT_Y10P ('Y10P')
+******************************
+
+Grey-scale image as a MIPI RAW10 packed array
+
+
+Description
+===========
+
+This is a packed grey-scale image format with a depth of 10 bits per
+pixel. Every four consecutive pixels are packed into 5 bytes. Each of
+the first 4 bytes contain the 8 high order bits of the pixels, and
+the 5th byte contains the 2 least significants bits of each pixel,
+in the same order.
+
+**Bit-packed representation.**
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 8 8 8 8 64
+
+ * - Y'\ :sub:`00[9:2]`
+ - Y'\ :sub:`01[9:2]`
+ - Y'\ :sub:`02[9:2]`
+ - Y'\ :sub:`03[9:2]`
+ - Y'\ :sub:`03[1:0]`\ (bits 7--6) Y'\ :sub:`02[1:0]`\ (bits 5--4)
+ Y'\ :sub:`01[1:0]`\ (bits 3--2) Y'\ :sub:`00[1:0]`\ (bits 1--0)
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index 9fcabe7f9367..8e73fcfc6900 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -37,19 +37,22 @@ Media Bus Formats
- Image colorspace, from enum
:c:type:`v4l2_colorspace`. See
:ref:`colorspaces` for details.
- * - enum :c:type:`v4l2_ycbcr_encoding`
+ * - __u16
- ``ycbcr_enc``
- - This information supplements the ``colorspace`` and must be set by
+ - Y'CbCr encoding, from enum :c:type:`v4l2_ycbcr_encoding`.
+ This information supplements the ``colorspace`` and must be set by
the driver for capture streams and by the application for output
streams, see :ref:`colorspaces`.
- * - enum :c:type:`v4l2_quantization`
+ * - __u16
- ``quantization``
- - This information supplements the ``colorspace`` and must be set by
+ - Quantization range, from enum :c:type:`v4l2_quantization`.
+ This information supplements the ``colorspace`` and must be set by
the driver for capture streams and by the application for output
streams, see :ref:`colorspaces`.
- * - enum :c:type:`v4l2_xfer_func`
+ * - __u16
- ``xfer_func``
- - This information supplements the ``colorspace`` and must be set by
+ - Transfer function, from enum :c:type:`v4l2_xfer_func`.
+ This information supplements the ``colorspace`` and must be set by
the driver for capture streams and by the application for output
streams, see :ref:`colorspaces`.
* - __u16
@@ -4315,6 +4318,78 @@ the following codes.
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-Y10-2X8-PADHI_LE:
+
+ - MEDIA_BUS_FMT_Y10_2X8_PADHI_LE
+ - 0x202c
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - 0
+ - 0
+ - 0
+ - 0
+ - 0
+ - 0
+ - y\ :sub:`9`
+ - y\ :sub:`8`
* .. _MEDIA-BUS-FMT-UYVY10-2X10:
- MEDIA_BUS_FMT_UYVY10_2X10
diff --git a/Documentation/media/uapi/v4l/vidioc-enumstd.rst b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
index b7fda29f46a1..2644a62acd4b 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumstd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
@@ -2,14 +2,14 @@
.. _VIDIOC_ENUMSTD:
-********************
-ioctl VIDIOC_ENUMSTD
-********************
+*******************************************
+ioctl VIDIOC_ENUMSTD, VIDIOC_SUBDEV_ENUMSTD
+*******************************************
Name
====
-VIDIOC_ENUMSTD - Enumerate supported video standards
+VIDIOC_ENUMSTD - VIDIOC_SUBDEV_ENUMSTD - Enumerate supported video standards
Synopsis
@@ -18,6 +18,9 @@ Synopsis
.. c:function:: int ioctl( int fd, VIDIOC_ENUMSTD, struct v4l2_standard *argp )
:name: VIDIOC_ENUMSTD
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_ENUMSTD, struct v4l2_standard *argp )
+ :name: VIDIOC_SUBDEV_ENUMSTD
+
Arguments
=========
diff --git a/Documentation/media/uapi/v4l/vidioc-g-std.rst b/Documentation/media/uapi/v4l/vidioc-g-std.rst
index 90791ab51a53..8d94f0404df2 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-std.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-std.rst
@@ -2,14 +2,14 @@
.. _VIDIOC_G_STD:
-********************************
-ioctl VIDIOC_G_STD, VIDIOC_S_STD
-********************************
+**************************************************************************
+ioctl VIDIOC_G_STD, VIDIOC_S_STD, VIDIOC_SUBDEV_G_STD, VIDIOC_SUBDEV_S_STD
+**************************************************************************
Name
====
-VIDIOC_G_STD - VIDIOC_S_STD - Query or select the video standard of the current input
+VIDIOC_G_STD - VIDIOC_S_STD - VIDIOC_SUBDEV_G_STD - VIDIOC_SUBDEV_S_STD - Query or select the video standard of the current input
Synopsis
@@ -21,6 +21,12 @@ Synopsis
.. c:function:: int ioctl( int fd, VIDIOC_S_STD, const v4l2_std_id *argp )
:name: VIDIOC_S_STD
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_G_STD, v4l2_std_id *argp )
+ :name: VIDIOC_SUBDEV_G_STD
+
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_S_STD, const v4l2_std_id *argp )
+ :name: VIDIOC_SUBDEV_S_STD
+
Arguments
=========
diff --git a/Documentation/media/uapi/v4l/vidioc-querystd.rst b/Documentation/media/uapi/v4l/vidioc-querystd.rst
index cf40bca19b9f..a8385cc74818 100644
--- a/Documentation/media/uapi/v4l/vidioc-querystd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querystd.rst
@@ -2,14 +2,14 @@
.. _VIDIOC_QUERYSTD:
-*********************
-ioctl VIDIOC_QUERYSTD
-*********************
+*********************************************
+ioctl VIDIOC_QUERYSTD, VIDIOC_SUBDEV_QUERYSTD
+*********************************************
Name
====
-VIDIOC_QUERYSTD - Sense the video standard received by the current input
+VIDIOC_QUERYSTD - VIDIOC_SUBDEV_QUERYSTD - Sense the video standard received by the current input
Synopsis
@@ -18,6 +18,9 @@ Synopsis
.. c:function:: int ioctl( int fd, VIDIOC_QUERYSTD, v4l2_std_id *argp )
:name: VIDIOC_QUERYSTD
+.. c:function:: int ioctl( int fd, VIDIOC_SUBDEV_QUERYSTD, v4l2_std_id *argp )
+ :name: VIDIOC_SUBDEV_QUERYSTD
+
Arguments
=========
diff --git a/Documentation/media/uapi/v4l/yuv-formats.rst b/Documentation/media/uapi/v4l/yuv-formats.rst
index 3334ea445657..9ab0592d08da 100644
--- a/Documentation/media/uapi/v4l/yuv-formats.rst
+++ b/Documentation/media/uapi/v4l/yuv-formats.rst
@@ -29,6 +29,7 @@ to brightness information.
pixfmt-y10
pixfmt-y12
pixfmt-y10b
+ pixfmt-y10p
pixfmt-y16
pixfmt-y16-be
pixfmt-y8i
diff --git a/Documentation/media/v4l-drivers/qcom_camss.rst b/Documentation/media/v4l-drivers/qcom_camss.rst
index 9e66b7b5770f..f27c8df20b2b 100644
--- a/Documentation/media/v4l-drivers/qcom_camss.rst
+++ b/Documentation/media/v4l-drivers/qcom_camss.rst
@@ -7,34 +7,34 @@ Introduction
------------
This file documents the Qualcomm Camera Subsystem driver located under
-drivers/media/platform/qcom/camss-8x16.
+drivers/media/platform/qcom/camss.
The current version of the driver supports the Camera Subsystem found on
-Qualcomm MSM8916 and APQ8016 processors.
+Qualcomm MSM8916/APQ8016 and MSM8996/APQ8096 processors.
The driver implements V4L2, Media controller and V4L2 subdev interfaces.
Camera sensor using V4L2 subdev interface in the kernel is supported.
The driver is implemented using as a reference the Qualcomm Camera Subsystem
-driver for Android as found in Code Aurora [#f1]_.
+driver for Android as found in Code Aurora [#f1]_ [#f2]_.
Qualcomm Camera Subsystem hardware
----------------------------------
-The Camera Subsystem hardware found on 8x16 processors and supported by the
-driver consists of:
+The Camera Subsystem hardware found on 8x16 / 8x96 processors and supported by
+the driver consists of:
-- 2 CSIPHY modules. They handle the Physical layer of the CSI2 receivers.
+- 2 / 3 CSIPHY modules. They handle the Physical layer of the CSI2 receivers.
A separate camera sensor can be connected to each of the CSIPHY module;
-- 2 CSID (CSI Decoder) modules. They handle the Protocol and Application layer
- of the CSI2 receivers. A CSID can decode data stream from any of the CSIPHY.
- Each CSID also contains a TG (Test Generator) block which can generate
+- 2 / 4 CSID (CSI Decoder) modules. They handle the Protocol and Application
+ layer of the CSI2 receivers. A CSID can decode data stream from any of the
+ CSIPHY. Each CSID also contains a TG (Test Generator) block which can generate
artificial input data for test purposes;
- ISPIF (ISP Interface) module. Handles the routing of the data streams from
the CSIDs to the inputs of the VFE;
-- VFE (Video Front End) module. Contains a pipeline of image processing hardware
- blocks. The VFE has different input interfaces. The PIX (Pixel) input
+- 1 / 2 VFE (Video Front End) module(s). Contain a pipeline of image processing
+ hardware blocks. The VFE has different input interfaces. The PIX (Pixel) input
interface feeds the input data to the image processing pipeline. The image
processing pipeline contains also a scale and crop module at the end. Three
RDI (Raw Dump Interface) input interfaces bypass the image processing
@@ -49,18 +49,33 @@ The current version of the driver supports:
- Input from camera sensor via CSIPHY;
- Generation of test input data by the TG in CSID;
-- RDI interface of VFE - raw dump of the input data to memory.
+- RDI interface of VFE
- Supported formats:
+ - Raw dump of the input data to memory.
- - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV /
- V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY);
- - MIPI RAW8 (8bit Bayer RAW - V4L2_PIX_FMT_SRGGB8 /
- V4L2_PIX_FMT_SGRBG8 / V4L2_PIX_FMT_SGBRG8 / V4L2_PIX_FMT_SBGGR8);
- - MIPI RAW10 (10bit packed Bayer RAW - V4L2_PIX_FMT_SBGGR10P /
- V4L2_PIX_FMT_SGBRG10P / V4L2_PIX_FMT_SGRBG10P / V4L2_PIX_FMT_SRGGB10P);
- - MIPI RAW12 (12bit packed Bayer RAW - V4L2_PIX_FMT_SRGGB12P /
- V4L2_PIX_FMT_SGBRG12P / V4L2_PIX_FMT_SGRBG12P / V4L2_PIX_FMT_SRGGB12P).
+ Supported formats:
+
+ - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV /
+ V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY);
+ - MIPI RAW8 (8bit Bayer RAW - V4L2_PIX_FMT_SRGGB8 /
+ V4L2_PIX_FMT_SGRBG8 / V4L2_PIX_FMT_SGBRG8 / V4L2_PIX_FMT_SBGGR8);
+ - MIPI RAW10 (10bit packed Bayer RAW - V4L2_PIX_FMT_SBGGR10P /
+ V4L2_PIX_FMT_SGBRG10P / V4L2_PIX_FMT_SGRBG10P / V4L2_PIX_FMT_SRGGB10P /
+ V4L2_PIX_FMT_Y10P);
+ - MIPI RAW12 (12bit packed Bayer RAW - V4L2_PIX_FMT_SRGGB12P /
+ V4L2_PIX_FMT_SGBRG12P / V4L2_PIX_FMT_SGRBG12P / V4L2_PIX_FMT_SRGGB12P).
+ - (8x96 only) MIPI RAW14 (14bit packed Bayer RAW - V4L2_PIX_FMT_SRGGB14P /
+ V4L2_PIX_FMT_SGBRG14P / V4L2_PIX_FMT_SGRBG14P / V4L2_PIX_FMT_SRGGB14P).
+
+ - (8x96 only) Format conversion of the input data.
+
+ Supported input formats:
+
+ - MIPI RAW10 (10bit packed Bayer RAW - V4L2_PIX_FMT_SBGGR10P / V4L2_PIX_FMT_Y10P).
+
+ Supported output formats:
+
+ - Plain16 RAW10 (10bit unpacked Bayer RAW - V4L2_PIX_FMT_SBGGR10 / V4L2_PIX_FMT_Y10).
- PIX interface of VFE
@@ -75,14 +90,16 @@ The current version of the driver supports:
- NV12/NV21 (two plane YUV 4:2:0 - V4L2_PIX_FMT_NV12 / V4L2_PIX_FMT_NV21);
- NV16/NV61 (two plane YUV 4:2:2 - V4L2_PIX_FMT_NV16 / V4L2_PIX_FMT_NV61).
+ - (8x96 only) YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV /
+ V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY).
- Scaling support. Configuration of the VFE Encoder Scale module
for downscalling with ratio up to 16x.
- Cropping support. Configuration of the VFE Encoder Crop module.
-- Concurrent and independent usage of two data inputs - could be camera sensors
- and/or TG.
+- Concurrent and independent usage of two (8x96: three) data inputs -
+ could be camera sensors and/or TG.
Driver Architecture and Design
@@ -90,14 +107,14 @@ Driver Architecture and Design
The driver implements the V4L2 subdev interface. With the goal to model the
hardware links between the modules and to expose a clean, logical and usable
-interface, the driver is split into V4L2 sub-devices as follows:
+interface, the driver is split into V4L2 sub-devices as follows (8x16 / 8x96):
-- 2 CSIPHY sub-devices - each CSIPHY is represented by a single sub-device;
-- 2 CSID sub-devices - each CSID is represented by a single sub-device;
-- 2 ISPIF sub-devices - ISPIF is represented by a number of sub-devices equal
- to the number of CSID sub-devices;
-- 4 VFE sub-devices - VFE is represented by a number of sub-devices equal to
- the number of the input interfaces (3 RDI and 1 PIX).
+- 2 / 3 CSIPHY sub-devices - each CSIPHY is represented by a single sub-device;
+- 2 / 4 CSID sub-devices - each CSID is represented by a single sub-device;
+- 2 / 4 ISPIF sub-devices - ISPIF is represented by a number of sub-devices
+ equal to the number of CSID sub-devices;
+- 4 / 8 VFE sub-devices - VFE is represented by a number of sub-devices equal to
+ the number of the input interfaces (3 RDI and 1 PIX for each VFE).
The considerations to split the driver in this particular way are as follows:
@@ -115,8 +132,8 @@ The considerations to split the driver in this particular way are as follows:
Each VFE sub-device is linked to a separate video device node.
-The media controller pipeline graph is as follows (with connected two OV5645
-camera sensors):
+The media controller pipeline graph is as follows (with connected two / three
+OV5645 camera sensors):
.. _qcom_camss_graph:
@@ -124,7 +141,13 @@ camera sensors):
:alt: qcom_camss_graph.dot
:align: center
- Media pipeline graph
+ Media pipeline graph 8x16
+
+.. kernel-figure:: qcom_camss_8x96_graph.dot
+ :alt: qcom_camss_8x96_graph.dot
+ :align: center
+
+ Media pipeline graph 8x96
Implementation
@@ -149,8 +172,12 @@ APQ8016 Specification:
https://developer.qualcomm.com/download/sd410/snapdragon-410-processor-device-specification.pdf
Referenced 2016-11-24.
+APQ8096 Specification:
+https://developer.qualcomm.com/download/sd820e/qualcomm-snapdragon-820e-processor-apq8096sge-device-specification.pdf
+Referenced 2018-06-22.
References
----------
.. [#f1] https://source.codeaurora.org/quic/la/kernel/msm-3.10/
+.. [#f2] https://source.codeaurora.org/quic/la/kernel/msm-3.18/
diff --git a/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot b/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot
new file mode 100644
index 000000000000..de34f0a7afdc
--- /dev/null
+++ b/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot
@@ -0,0 +1,104 @@
+digraph board {
+ rankdir=TB
+ n00000001 [label="{{<port0> 0} | msm_csiphy0\n/dev/v4l-subdev0 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000001:port1 -> n0000000a:port0 [style=dashed]
+ n00000001:port1 -> n0000000d:port0 [style=dashed]
+ n00000001:port1 -> n00000010:port0 [style=dashed]
+ n00000001:port1 -> n00000013:port0 [style=dashed]
+ n00000004 [label="{{<port0> 0} | msm_csiphy1\n/dev/v4l-subdev1 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000004:port1 -> n0000000a:port0 [style=dashed]
+ n00000004:port1 -> n0000000d:port0 [style=dashed]
+ n00000004:port1 -> n00000010:port0 [style=dashed]
+ n00000004:port1 -> n00000013:port0 [style=dashed]
+ n00000007 [label="{{<port0> 0} | msm_csiphy2\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000007:port1 -> n0000000a:port0 [style=dashed]
+ n00000007:port1 -> n0000000d:port0 [style=dashed]
+ n00000007:port1 -> n00000010:port0 [style=dashed]
+ n00000007:port1 -> n00000013:port0 [style=dashed]
+ n0000000a [label="{{<port0> 0} | msm_csid0\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000a:port1 -> n00000016:port0 [style=dashed]
+ n0000000a:port1 -> n00000019:port0 [style=dashed]
+ n0000000a:port1 -> n0000001c:port0 [style=dashed]
+ n0000000a:port1 -> n0000001f:port0 [style=dashed]
+ n0000000d [label="{{<port0> 0} | msm_csid1\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000d:port1 -> n00000016:port0 [style=dashed]
+ n0000000d:port1 -> n00000019:port0 [style=dashed]
+ n0000000d:port1 -> n0000001c:port0 [style=dashed]
+ n0000000d:port1 -> n0000001f:port0 [style=dashed]
+ n00000010 [label="{{<port0> 0} | msm_csid2\n/dev/v4l-subdev5 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000010:port1 -> n00000016:port0 [style=dashed]
+ n00000010:port1 -> n00000019:port0 [style=dashed]
+ n00000010:port1 -> n0000001c:port0 [style=dashed]
+ n00000010:port1 -> n0000001f:port0 [style=dashed]
+ n00000013 [label="{{<port0> 0} | msm_csid3\n/dev/v4l-subdev6 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000013:port1 -> n00000016:port0 [style=dashed]
+ n00000013:port1 -> n00000019:port0 [style=dashed]
+ n00000013:port1 -> n0000001c:port0 [style=dashed]
+ n00000013:port1 -> n0000001f:port0 [style=dashed]
+ n00000016 [label="{{<port0> 0} | msm_ispif0\n/dev/v4l-subdev7 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000016:port1 -> n00000022:port0 [style=dashed]
+ n00000016:port1 -> n0000002b:port0 [style=dashed]
+ n00000016:port1 -> n00000034:port0 [style=dashed]
+ n00000016:port1 -> n0000003d:port0 [style=dashed]
+ n00000016:port1 -> n00000046:port0 [style=dashed]
+ n00000016:port1 -> n0000004f:port0 [style=dashed]
+ n00000016:port1 -> n00000058:port0 [style=dashed]
+ n00000016:port1 -> n00000061:port0 [style=dashed]
+ n00000019 [label="{{<port0> 0} | msm_ispif1\n/dev/v4l-subdev8 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000019:port1 -> n00000022:port0 [style=dashed]
+ n00000019:port1 -> n0000002b:port0 [style=dashed]
+ n00000019:port1 -> n00000034:port0 [style=dashed]
+ n00000019:port1 -> n0000003d:port0 [style=dashed]
+ n00000019:port1 -> n00000046:port0 [style=dashed]
+ n00000019:port1 -> n0000004f:port0 [style=dashed]
+ n00000019:port1 -> n00000058:port0 [style=dashed]
+ n00000019:port1 -> n00000061:port0 [style=dashed]
+ n0000001c [label="{{<port0> 0} | msm_ispif2\n/dev/v4l-subdev9 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000001c:port1 -> n00000022:port0 [style=dashed]
+ n0000001c:port1 -> n0000002b:port0 [style=dashed]
+ n0000001c:port1 -> n00000034:port0 [style=dashed]
+ n0000001c:port1 -> n0000003d:port0 [style=dashed]
+ n0000001c:port1 -> n00000046:port0 [style=dashed]
+ n0000001c:port1 -> n0000004f:port0 [style=dashed]
+ n0000001c:port1 -> n00000058:port0 [style=dashed]
+ n0000001c:port1 -> n00000061:port0 [style=dashed]
+ n0000001f [label="{{<port0> 0} | msm_ispif3\n/dev/v4l-subdev10 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000001f:port1 -> n00000022:port0 [style=dashed]
+ n0000001f:port1 -> n0000002b:port0 [style=dashed]
+ n0000001f:port1 -> n00000034:port0 [style=dashed]
+ n0000001f:port1 -> n0000003d:port0 [style=dashed]
+ n0000001f:port1 -> n00000046:port0 [style=dashed]
+ n0000001f:port1 -> n0000004f:port0 [style=dashed]
+ n0000001f:port1 -> n00000058:port0 [style=dashed]
+ n0000001f:port1 -> n00000061:port0 [style=dashed]
+ n00000022 [label="{{<port0> 0} | msm_vfe0_rdi0\n/dev/v4l-subdev11 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000022:port1 -> n00000025 [style=bold]
+ n00000025 [label="msm_vfe0_video0\n/dev/video0", shape=box, style=filled, fillcolor=yellow]
+ n0000002b [label="{{<port0> 0} | msm_vfe0_rdi1\n/dev/v4l-subdev12 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000002b:port1 -> n0000002e [style=bold]
+ n0000002e [label="msm_vfe0_video1\n/dev/video1", shape=box, style=filled, fillcolor=yellow]
+ n00000034 [label="{{<port0> 0} | msm_vfe0_rdi2\n/dev/v4l-subdev13 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000034:port1 -> n00000037 [style=bold]
+ n00000037 [label="msm_vfe0_video2\n/dev/video2", shape=box, style=filled, fillcolor=yellow]
+ n0000003d [label="{{<port0> 0} | msm_vfe0_pix\n/dev/v4l-subdev14 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000003d:port1 -> n00000040 [style=bold]
+ n00000040 [label="msm_vfe0_video3\n/dev/video3", shape=box, style=filled, fillcolor=yellow]
+ n00000046 [label="{{<port0> 0} | msm_vfe1_rdi0\n/dev/v4l-subdev15 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000046:port1 -> n00000049 [style=bold]
+ n00000049 [label="msm_vfe1_video0\n/dev/video4", shape=box, style=filled, fillcolor=yellow]
+ n0000004f [label="{{<port0> 0} | msm_vfe1_rdi1\n/dev/v4l-subdev16 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000004f:port1 -> n00000052 [style=bold]
+ n00000052 [label="msm_vfe1_video1\n/dev/video5", shape=box, style=filled, fillcolor=yellow]
+ n00000058 [label="{{<port0> 0} | msm_vfe1_rdi2\n/dev/v4l-subdev17 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000058:port1 -> n0000005b [style=bold]
+ n0000005b [label="msm_vfe1_video2\n/dev/video6", shape=box, style=filled, fillcolor=yellow]
+ n00000061 [label="{{<port0> 0} | msm_vfe1_pix\n/dev/v4l-subdev18 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000061:port1 -> n00000064 [style=bold]
+ n00000064 [label="msm_vfe1_video3\n/dev/video7", shape=box, style=filled, fillcolor=yellow]
+ n000000e2 [label="{{} | ov5645 3-0039\n/dev/v4l-subdev19 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n000000e2:port0 -> n00000004:port0 [style=bold]
+ n000000e4 [label="{{} | ov5645 3-003a\n/dev/v4l-subdev20 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n000000e4:port0 -> n00000007:port0 [style=bold]
+ n000000e6 [label="{{} | ov5645 3-003b\n/dev/v4l-subdev21 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n000000e6:port0 -> n00000001:port0 [style=bold]
+}
diff --git a/Documentation/media/video.h.rst.exceptions b/Documentation/media/video.h.rst.exceptions
index a91aa884ce0e..371cdbd7d062 100644
--- a/Documentation/media/video.h.rst.exceptions
+++ b/Documentation/media/video.h.rst.exceptions
@@ -34,7 +34,4 @@ replace typedef video_displayformat_t :c:type:`video_displayformat`
replace typedef video_size_t :c:type:`video_size`
replace typedef video_stream_source_t :c:type:`video_stream_source`
replace typedef video_play_state_t :c:type:`video_play_state`
-replace typedef video_highlight_t :c:type:`video_highlight`
-replace typedef video_spu_t :c:type:`video_spu`
-replace typedef video_spu_palette_t :c:type:`video_spu_palette`
replace typedef video_navi_pack_t :c:type:`video_navi_pack`
diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
index a5cb0a8686ac..ca9f0edc579e 100644
--- a/Documentation/media/videodev2.h.rst.exceptions
+++ b/Documentation/media/videodev2.h.rst.exceptions
@@ -517,7 +517,6 @@ ignore define V4L2_CTRL_WHICH_DEF_VAL
ignore define V4L2_OUT_CAP_CUSTOM_TIMINGS
ignore define V4L2_CID_MAX_CTRLS
-ignore ioctl VIDIOC_RESERVED
ignore define BASE_VIDIOC_PRIVATE
# Associate ioctls with their counterparts
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index a02d6bbfc9d0..0d8d7ef131e9 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -2179,32 +2179,41 @@ or:
event_indicated = 1;
wake_up_process(event_daemon);
-A write memory barrier is implied by wake_up() and co. if and only if they
-wake something up. The barrier occurs before the task state is cleared, and so
-sits between the STORE to indicate the event and the STORE to set TASK_RUNNING:
+A general memory barrier is executed by wake_up() if it wakes something up.
+If it doesn't wake anything up then a memory barrier may or may not be
+executed; you must not rely on it. The barrier occurs before the task state
+is accessed, in particular, it sits between the STORE to indicate the event
+and the STORE to set TASK_RUNNING:
- CPU 1 CPU 2
+ CPU 1 (Sleeper) CPU 2 (Waker)
=============================== ===============================
set_current_state(); STORE event_indicated
smp_store_mb(); wake_up();
- STORE current->state <write barrier>
- <general barrier> STORE current->state
- LOAD event_indicated
+ STORE current->state ...
+ <general barrier> <general barrier>
+ LOAD event_indicated if ((LOAD task->state) & TASK_NORMAL)
+ STORE task->state
-To repeat, this write memory barrier is present if and only if something
-is actually awakened. To see this, consider the following sequence of
-events, where X and Y are both initially zero:
+where "task" is the thread being woken up and it equals CPU 1's "current".
+
+To repeat, a general memory barrier is guaranteed to be executed by wake_up()
+if something is actually awakened, but otherwise there is no such guarantee.
+To see this, consider the following sequence of events, where X and Y are both
+initially zero:
CPU 1 CPU 2
=============================== ===============================
- X = 1; STORE event_indicated
+ X = 1; Y = 1;
smp_mb(); wake_up();
- Y = 1; wait_event(wq, Y == 1);
- wake_up(); load from Y sees 1, no memory barrier
- load from X might see 0
+ LOAD Y LOAD X
+
+If a wakeup does occur, one (at least) of the two loads must see 1. If, on
+the other hand, a wakeup does not occur, both loads might see 0.
-In contrast, if a wakeup does occur, CPU 2's load from X would be guaranteed
-to see 1.
+wake_up_process() always executes a general memory barrier. The barrier again
+occurs before the task state is accessed. In particular, if the wake_up() in
+the previous snippet were replaced by a call to wake_up_process() then one of
+the two loads would be guaranteed to see 1.
The available waker functions include:
@@ -2224,6 +2233,8 @@ The available waker functions include:
wake_up_poll();
wake_up_process();
+In terms of memory ordering, these functions all provide the same guarantees of
+a wake_up() (or stronger).
[!] Note that the memory barriers implied by the sleeper and the waker do _not_
order multiple stores before the wake-up with respect to loads of those stored
diff --git a/Documentation/misc-devices/pci-endpoint-test.txt b/Documentation/misc-devices/pci-endpoint-test.txt
index 4ebc3594b32c..58ccca4416b1 100644
--- a/Documentation/misc-devices/pci-endpoint-test.txt
+++ b/Documentation/misc-devices/pci-endpoint-test.txt
@@ -10,6 +10,7 @@ The PCI driver for the test device performs the following tests
*) verifying addresses programmed in BAR
*) raise legacy IRQ
*) raise MSI IRQ
+ *) raise MSI-X IRQ
*) read data
*) write data
*) copy data
@@ -25,6 +26,11 @@ ioctl
PCITEST_LEGACY_IRQ: Tests legacy IRQ
PCITEST_MSI: Tests message signalled interrupts. The MSI number
to be tested should be passed as argument.
+ PCITEST_MSIX: Tests message signalled interrupts. The MSI-X number
+ to be tested should be passed as argument.
+ PCITEST_SET_IRQTYPE: Changes driver IRQ type configuration. The IRQ type
+ should be passed as argument (0: Legacy, 1:MSI, 2:MSI-X).
+ PCITEST_GET_IRQTYPE: Gets driver IRQ type configuration.
PCITEST_WRITE: Perform write tests. The size of the buffer should be passed
as argument.
PCITEST_READ: Perform read tests. The size of the buffer should be passed
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 2b89d91b376f..02a323c43261 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -18,8 +18,6 @@ README.ipw2200
- README for the Intel PRO/Wireless 2915ABG and 2200BG driver.
README.sb1000
- info on General Instrument/NextLevel SURFboard1000 cable modem.
-alias.txt
- - info on using alias network devices.
altera_tse.txt
- Altera Triple-Speed Ethernet controller.
arcnet-hardware.txt
@@ -140,8 +138,6 @@ multiqueue.txt
- HOWTO for multiqueue network device support.
netconsole.txt
- The network console module netconsole.ko: configuration and notes.
-netdev-FAQ.txt
- - FAQ describing how to submit net changes to netdev mailing list.
netdev-features.txt
- Network interface features API description.
netdevices.txt
diff --git a/Documentation/networking/alias.rst b/Documentation/networking/alias.rst
new file mode 100644
index 000000000000..af7c5ee92014
--- /dev/null
+++ b/Documentation/networking/alias.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========
+IP-Aliasing
+===========
+
+IP-aliases are an obsolete way to manage multiple IP-addresses/masks
+per interface. Newer tools such as iproute2 support multiple
+address/prefixes per interface, but aliases are still supported
+for backwards compatibility.
+
+An alias is formed by adding a colon and a string when running ifconfig.
+This string is usually numeric, but this is not a must.
+
+
+Alias creation
+==============
+
+Alias creation is done by 'magic' interface naming: eg. to create a
+200.1.1.1 alias for eth0 ...
+::
+
+ # ifconfig eth0:0 200.1.1.1 etc,etc....
+ ~~ -> request alias #0 creation (if not yet exists) for eth0
+
+The corresponding route is also set up by this command. Please note:
+The route always points to the base interface.
+
+
+Alias deletion
+==============
+
+The alias is removed by shutting the alias down::
+
+ # ifconfig eth0:0 down
+ ~~~~~~~~~~ -> will delete alias
+
+
+Alias (re-)configuring
+======================
+
+Aliases are not real devices, but programs should be able to configure
+and refer to them as usual (ifconfig, route, etc).
+
+
+Relationship with main device
+=============================
+
+If the base device is shut down the added aliases will be deleted too.
diff --git a/Documentation/networking/alias.txt b/Documentation/networking/alias.txt
deleted file mode 100644
index 85046f53fcfc..000000000000
--- a/Documentation/networking/alias.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-
-IP-Aliasing:
-============
-
-IP-aliases are an obsolete way to manage multiple IP-addresses/masks
-per interface. Newer tools such as iproute2 support multiple
-address/prefixes per interface, but aliases are still supported
-for backwards compatibility.
-
-An alias is formed by adding a colon and a string when running ifconfig.
-This string is usually numeric, but this is not a must.
-
-o Alias creation.
- Alias creation is done by 'magic' interface naming: eg. to create a
- 200.1.1.1 alias for eth0 ...
-
- # ifconfig eth0:0 200.1.1.1 etc,etc....
- ~~ -> request alias #0 creation (if not yet exists) for eth0
-
- The corresponding route is also set up by this command.
- Please note: The route always points to the base interface.
-
-
-o Alias deletion.
- The alias is removed by shutting the alias down:
-
- # ifconfig eth0:0 down
- ~~~~~~~~~~ -> will delete alias
-
-
-o Alias (re-)configuring
-
- Aliases are not real devices, but programs should be able to configure and
- refer to them as usual (ifconfig, route, etc).
-
-
-o Relationship with main device
-
- If the base device is shut down the added aliases will be deleted
- too.
diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.rst
index a27cb6214ed7..4aef9cddde2f 100644
--- a/Documentation/networking/bridge.txt
+++ b/Documentation/networking/bridge.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Ethernet Bridging
+=================
+
In order to use the Ethernet bridging functionality, you'll need the
userspace tools.
diff --git a/Documentation/networking/can_ucan_protocol.rst b/Documentation/networking/can_ucan_protocol.rst
new file mode 100644
index 000000000000..4cef88d24fc7
--- /dev/null
+++ b/Documentation/networking/can_ucan_protocol.rst
@@ -0,0 +1,332 @@
+=================
+The UCAN Protocol
+=================
+
+UCAN is the protocol used by the microcontroller-based USB-CAN
+adapter that is integrated on System-on-Modules from Theobroma Systems
+and that is also available as a standalone USB stick.
+
+The UCAN protocol has been designed to be hardware-independent.
+It is modeled closely after how Linux represents CAN devices
+internally. All multi-byte integers are encoded as Little Endian.
+
+All structures mentioned in this document are defined in
+``drivers/net/can/usb/ucan.c``.
+
+USB Endpoints
+=============
+
+UCAN devices use three USB endpoints:
+
+CONTROL endpoint
+ The driver sends device management commands on this endpoint
+
+IN endpoint
+ The device sends CAN data frames and CAN error frames
+
+OUT endpoint
+ The driver sends CAN data frames on the out endpoint
+
+
+CONTROL Messages
+================
+
+UCAN devices are configured using vendor requests on the control pipe.
+
+To support multiple CAN interfaces in a single USB device all
+configuration commands target the corresponding interface in the USB
+descriptor.
+
+The driver uses ``ucan_ctrl_command_in/out`` and
+``ucan_device_request_in`` to deliver commands to the device.
+
+Setup Packet
+------------
+
+================= =====================================================
+``bmRequestType`` Direction | Vendor | (Interface or Device)
+``bRequest`` Command Number
+``wValue`` Subcommand Number (16 Bit) or 0 if not used
+``wIndex`` USB Interface Index (0 for device commands)
+``wLength`` * Host to Device - Number of bytes to transmit
+ * Device to Host - Maximum Number of bytes to
+ receive. If the device send less. Commom ZLP
+ semantics are used.
+================= =====================================================
+
+Error Handling
+--------------
+
+The device indicates failed control commands by stalling the
+pipe.
+
+Device Commands
+---------------
+
+UCAN_DEVICE_GET_FW_STRING
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*Dev2Host; optional*
+
+Request the device firmware string.
+
+
+Interface Commands
+------------------
+
+UCAN_COMMAND_START
+~~~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Bring the CAN interface up.
+
+Payload Format
+ ``ucan_ctl_payload_t.cmd_start``
+
+==== ============================
+mode or mask of ``UCAN_MODE_*``
+==== ============================
+
+UCAN_COMMAND_STOP
+~~~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Stop the CAN interface
+
+Payload Format
+ *empty*
+
+UCAN_COMMAND_RESET
+~~~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Reset the CAN controller (including error counters)
+
+Payload Format
+ *empty*
+
+UCAN_COMMAND_GET
+~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Get Information from the Device
+
+Subcommands
+^^^^^^^^^^^
+
+UCAN_COMMAND_GET_INFO
+ Request the device information structure ``ucan_ctl_payload_t.device_info``.
+
+ See the ``device_info`` field for details, and
+ ``uapi/linux/can/netlink.h`` for an explanation of the
+ ``can_bittiming fields``.
+
+ Payload Format
+ ``ucan_ctl_payload_t.device_info``
+
+UCAN_COMMAND_GET_PROTOCOL_VERSION
+
+ Request the device protocol version
+ ``ucan_ctl_payload_t.protocol_version``. The current protocol version is 3.
+
+ Payload Format
+ ``ucan_ctl_payload_t.protocol_version``
+
+.. note:: Devices that do not implement this command use the old
+ protocol version 1
+
+UCAN_COMMAND_SET_BITTIMING
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*Host2Dev; mandatory*
+
+Setup bittiming by sending the the structure
+``ucan_ctl_payload_t.cmd_set_bittiming`` (see ``struct bittiming`` for
+details)
+
+Payload Format
+ ``ucan_ctl_payload_t.cmd_set_bittiming``.
+
+UCAN_SLEEP/WAKE
+~~~~~~~~~~~~~~~
+
+*Host2Dev; optional*
+
+Configure sleep and wake modes. Not yet supported by the driver.
+
+UCAN_FILTER
+~~~~~~~~~~~
+
+*Host2Dev; optional*
+
+Setup hardware CAN filters. Not yet supported by the driver.
+
+Allowed interface commands
+--------------------------
+
+================== =================== ==================
+Legal Device State Command New Device State
+================== =================== ==================
+stopped SET_BITTIMING stopped
+stopped START started
+started STOP or RESET stopped
+stopped STOP or RESET stopped
+started RESTART started
+any GET *no change*
+================== =================== ==================
+
+IN Message Format
+=================
+
+A data packet on the USB IN endpoint contains one or more
+``ucan_message_in`` values. If multiple messages are batched in a USB
+data packet, the ``len`` field can be used to jump to the next
+``ucan_message_in`` value (take care to sanity-check the ``len`` value
+against the actual data size).
+
+.. _can_ucan_in_message_len:
+
+``len`` field
+-------------
+
+Each ``ucan_message_in`` must be aligned to a 4-byte boundary (relative
+to the start of the start of the data buffer). That means that there
+may be padding bytes between multiple ``ucan_message_in`` values:
+
+.. code::
+
+ +----------------------------+ < 0
+ | |
+ | struct ucan_message_in |
+ | |
+ +----------------------------+ < len
+ [padding]
+ +----------------------------+ < round_up(len, 4)
+ | |
+ | struct ucan_message_in |
+ | |
+ +----------------------------+
+ [...]
+
+``type`` field
+--------------
+
+The ``type`` field specifies the type of the message.
+
+UCAN_IN_RX
+~~~~~~~~~~
+
+``subtype``
+ zero
+
+Data received from the CAN bus (ID + payload).
+
+UCAN_IN_TX_COMPLETE
+~~~~~~~~~~~~~~~~~~~
+
+``subtype``
+ zero
+
+The CAN device has sent a message to the CAN bus. It answers with a
+list of of tuples <echo-ids, flags>.
+
+The echo-id identifies the frame from (echos the id from a previous
+UCAN_OUT_TX message). The flag indicates the result of the
+transmission. Whereas a set Bit 0 indicates success. All other bits
+are reserved and set to zero.
+
+Flow Control
+------------
+
+When receiving CAN messages there is no flow control on the USB
+buffer. The driver has to handle inbound message quickly enough to
+avoid drops. I case the device buffer overflow the condition is
+reported by sending corresponding error frames (see
+:ref:`can_ucan_error_handling`)
+
+
+OUT Message Format
+==================
+
+A data packet on the USB OUT endpoint contains one or more ``struct
+ucan_message_out`` values. If multiple messages are batched into one
+data packet, the device uses the ``len`` field to jump to the next
+ucan_message_out value. Each ucan_message_out must be aligned to 4
+bytes (relative to the start of the data buffer). The mechanism is
+same as described in :ref:`can_ucan_in_message_len`.
+
+.. code::
+
+ +----------------------------+ < 0
+ | |
+ | struct ucan_message_out |
+ | |
+ +----------------------------+ < len
+ [padding]
+ +----------------------------+ < round_up(len, 4)
+ | |
+ | struct ucan_message_out |
+ | |
+ +----------------------------+
+ [...]
+
+``type`` field
+--------------
+
+In protocol version 3 only ``UCAN_OUT_TX`` is defined, others are used
+only by legacy devices (protocol version 1).
+
+UCAN_OUT_TX
+~~~~~~~~~~~
+``subtype``
+ echo id to be replied within a CAN_IN_TX_COMPLETE message
+
+Transmit a CAN frame. (parameters: ``id``, ``data``)
+
+Flow Control
+------------
+
+When the device outbound buffers are full it starts sending *NAKs* on
+the *OUT* pipe until more buffers are available. The driver stops the
+queue when a certain threshold of out packets are incomplete.
+
+.. _can_ucan_error_handling:
+
+CAN Error Handling
+==================
+
+If error reporting is turned on the device encodes errors into CAN
+error frames (see ``uapi/linux/can/error.h``) and sends it using the
+IN endpoint. The driver updates its error statistics and forwards
+it.
+
+Although UCAN devices can suppress error frames completely, in Linux
+the driver is always interested. Hence, the device is always started with
+the ``UCAN_MODE_BERR_REPORT`` set. Filtering those messages for the
+user space is done by the driver.
+
+Bus OFF
+-------
+
+- The device does not recover from bus of automatically.
+- Bus OFF is indicated by an error frame (see ``uapi/linux/can/error.h``)
+- Bus OFF recovery is started by ``UCAN_COMMAND_RESTART``
+- Once Bus OFF recover is completed the device sends an error frame
+ indicating that it is on ERROR-ACTIVE state.
+- During Bus OFF no frames are sent by the device.
+- During Bus OFF transmission requests from the host are completed
+ immediately with the success bit left unset.
+
+Example Conversation
+====================
+
+#) Device is connected to USB
+#) Host sends command ``UCAN_COMMAND_RESET``, subcmd 0
+#) Host sends command ``UCAN_COMMAND_GET``, subcmd ``UCAN_COMMAND_GET_INFO``
+#) Device sends ``UCAN_IN_DEVICE_INFO``
+#) Host sends command ``UCAN_OUT_SET_BITTIMING``
+#) Host sends command ``UCAN_COMMAND_START``, subcmd 0, mode ``UCAN_MODE_BERR_REPORT``
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index fec8588a588e..fcd710f2cc7a 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -6,15 +6,21 @@ Contents:
.. toctree::
:maxdepth: 2
+ netdev-FAQ
af_xdp
batman-adv
can
+ can_ucan_protocol
dpaa2/index
e100
e1000
kapi
z8530book
msg_zerocopy
+ failover
+ net_failover
+ alias
+ bridge
.. only:: subproject
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ce8fbf5aa63c..8313a636dd53 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -81,6 +81,15 @@ fib_multipath_hash_policy - INTEGER
0 - Layer 3
1 - Layer 4
+ip_forward_update_priority - INTEGER
+ Whether to update SKB priority from "TOS" field in IPv4 header after it
+ is forwarded. The new SKB priority is mapped from TOS field value
+ according to an rt_tos2priority table (see e.g. man tc-prio).
+ Default: 1 (Update priority.)
+ Possible values:
+ 0 - Do not update priority.
+ 1 - Update priority.
+
route/max_size - INTEGER
Maximum number of routes allowed in the kernel. Increase
this when using large numbers of interfaces and/or routes.
@@ -733,11 +742,11 @@ tcp_limit_output_bytes - INTEGER
Controls TCP Small Queue limit per tcp socket.
TCP bulk sender tends to increase packets in flight until it
gets losses notifications. With SNDBUF autotuning, this can
- result in a large amount of packets queued in qdisc/device
- on the local machine, hurting latency of other flows, for
- typical pfifo_fast qdiscs.
- tcp_limit_output_bytes limits the number of bytes on qdisc
- or device to reduce artificial RTT/cwnd and reduce bufferbloat.
+ result in a large amount of packets queued on the local machine
+ (e.g.: qdiscs, CPU backlog, or device) hurting latency of other
+ flows, for typical pfifo_fast qdiscs. tcp_limit_output_bytes
+ limits the number of bytes on qdisc or device to reduce artificial
+ RTT/cwnd and reduce bufferbloat.
Default: 262144
tcp_challenge_ack_limit - INTEGER
@@ -1834,6 +1843,16 @@ stable_secret - IPv6 address
By default the stable secret is unset.
+addr_gen_mode - INTEGER
+ Defines how link-local and autoconf addresses are generated.
+
+ 0: generate address based on EUI64 (default)
+ 1: do no generate a link-local address, use EUI64 for addresses generated
+ from autoconf
+ 2: generate stable privacy addresses, using the secret from
+ stable_secret (RFC7217)
+ 3: generate stable privacy addresses, using a random secret if unset
+
drop_unicast_in_l2_multicast - BOOLEAN
Drop any unicast IPv6 packets that are received in link-layer
multicast (or broadcast) frames.
@@ -1863,6 +1882,11 @@ ratelimit - INTEGER
otherwise the minimal space between responses in milliseconds.
Default: 1000
+echo_ignore_all - BOOLEAN
+ If set non-zero, then the kernel will ignore all ICMP ECHO
+ requests sent to it over the IPv6 protocol.
+ Default: 0
+
xfrm6_gc_thresh - INTEGER
The threshold at which we will start garbage collecting for IPv6
destination cache entries. At twice this value the system will
diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst
index 70ca2f5800c4..06c97dcb57ca 100644
--- a/Documentation/networking/net_failover.rst
+++ b/Documentation/networking/net_failover.rst
@@ -36,37 +36,39 @@ feature on the virtio-net interface and assign the same MAC address to both
virtio-net and VF interfaces.
Here is an example XML snippet that shows such configuration.
-
- <interface type='network'>
- <mac address='52:54:00:00:12:53'/>
- <source network='enp66s0f0_br'/>
- <target dev='tap01'/>
- <model type='virtio'/>
- <driver name='vhost' queues='4'/>
- <link state='down'/>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
- </interface>
- <interface type='hostdev' managed='yes'>
- <mac address='52:54:00:00:12:53'/>
- <source>
- <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
- </source>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
- </interface>
+::
+
+ <interface type='network'>
+ <mac address='52:54:00:00:12:53'/>
+ <source network='enp66s0f0_br'/>
+ <target dev='tap01'/>
+ <model type='virtio'/>
+ <driver name='vhost' queues='4'/>
+ <link state='down'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
+ </interface>
+ <interface type='hostdev' managed='yes'>
+ <mac address='52:54:00:00:12:53'/>
+ <source>
+ <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
+ </source>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
+ </interface>
Booting a VM with the above configuration will result in the following 3
netdevs created in the VM.
-
-4: ens10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
- link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
- inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10
- valid_lft 42482sec preferred_lft 42482sec
- inet6 fe80::97d8:db2:8c10:b6d6/64 scope link
- valid_lft forever preferred_lft forever
-5: ens10nsby: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000
- link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
-7: ens11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ens10 state UP group default qlen 1000
- link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
+::
+
+ 4: ens10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
+ link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
+ inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10
+ valid_lft 42482sec preferred_lft 42482sec
+ inet6 fe80::97d8:db2:8c10:b6d6/64 scope link
+ valid_lft forever preferred_lft forever
+ 5: ens10nsby: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000
+ link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
+ 7: ens11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ens10 state UP group default qlen 1000
+ link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
ens10 is the 'failover' master netdev, ens10nsby and ens11 are the slave
'standby' and 'primary' netdevs respectively.
@@ -80,37 +82,38 @@ the paravirtual datapath when the VF is unplugged.
Here is a sample script that shows the steps to initiate live migration on
the source hypervisor.
+::
-# cat vf_xml
-<interface type='hostdev' managed='yes'>
- <mac address='52:54:00:00:12:53'/>
- <source>
- <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
- </source>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
-</interface>
+ # cat vf_xml
+ <interface type='hostdev' managed='yes'>
+ <mac address='52:54:00:00:12:53'/>
+ <source>
+ <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
+ </source>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
+ </interface>
-# Source Hypervisor
-#!/bin/bash
+ # Source Hypervisor
+ #!/bin/bash
-DOMAIN=fedora27-tap01
-PF=enp66s0f0
-VF_NUM=5
-TAP_IF=tap01
-VF_XML=
+ DOMAIN=fedora27-tap01
+ PF=enp66s0f0
+ VF_NUM=5
+ TAP_IF=tap01
+ VF_XML=
-MAC=52:54:00:00:12:53
-ZERO_MAC=00:00:00:00:00:00
+ MAC=52:54:00:00:12:53
+ ZERO_MAC=00:00:00:00:00:00
-virsh domif-setlink $DOMAIN $TAP_IF up
-bridge fdb del $MAC dev $PF master
-virsh detach-device $DOMAIN $VF_XML
-ip link set $PF vf $VF_NUM mac $ZERO_MAC
+ virsh domif-setlink $DOMAIN $TAP_IF up
+ bridge fdb del $MAC dev $PF master
+ virsh detach-device $DOMAIN $VF_XML
+ ip link set $PF vf $VF_NUM mac $ZERO_MAC
-virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system
+ virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system
-# Destination Hypervisor
-#!/bin/bash
+ # Destination Hypervisor
+ #!/bin/bash
-virsh attach-device $DOMAIN $VF_XML
-virsh domif-setlink $DOMAIN $TAP_IF down
+ virsh attach-device $DOMAIN $VF_XML
+ virsh domif-setlink $DOMAIN $TAP_IF down
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
new file mode 100644
index 000000000000..0ac5fa77f501
--- /dev/null
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -0,0 +1,259 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _netdev-FAQ:
+
+==========
+netdev FAQ
+==========
+
+Q: What is netdev?
+------------------
+A: It is a mailing list for all network-related Linux stuff. This
+includes anything found under net/ (i.e. core code like IPv6) and
+drivers/net (i.e. hardware specific drivers) in the Linux source tree.
+
+Note that some subsystems (e.g. wireless drivers) which have a high
+volume of traffic have their own specific mailing lists.
+
+The netdev list is managed (like many other Linux mailing lists) through
+VGER (http://vger.kernel.org/) and archives can be found below:
+
+- http://marc.info/?l=linux-netdev
+- http://www.spinics.net/lists/netdev/
+
+Aside from subsystems like that mentioned above, all network-related
+Linux development (i.e. RFC, review, comments, etc.) takes place on
+netdev.
+
+Q: How do the changes posted to netdev make their way into Linux?
+-----------------------------------------------------------------
+A: There are always two trees (git repositories) in play. Both are
+driven by David Miller, the main network maintainer. There is the
+``net`` tree, and the ``net-next`` tree. As you can probably guess from
+the names, the ``net`` tree is for fixes to existing code already in the
+mainline tree from Linus, and ``net-next`` is where the new code goes
+for the future release. You can find the trees here:
+
+- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+
+Q: How often do changes from these trees make it to the mainline Linus tree?
+----------------------------------------------------------------------------
+A: To understand this, you need to know a bit of background information on
+the cadence of Linux development. Each new release starts off with a
+two week "merge window" where the main maintainers feed their new stuff
+to Linus for merging into the mainline tree. After the two weeks, the
+merge window is closed, and it is called/tagged ``-rc1``. No new
+features get mainlined after this -- only fixes to the rc1 content are
+expected. After roughly a week of collecting fixes to the rc1 content,
+rc2 is released. This repeats on a roughly weekly basis until rc7
+(typically; sometimes rc6 if things are quiet, or rc8 if things are in a
+state of churn), and a week after the last vX.Y-rcN was done, the
+official vX.Y is released.
+
+Relating that to netdev: At the beginning of the 2-week merge window,
+the ``net-next`` tree will be closed - no new changes/features. The
+accumulated new content of the past ~10 weeks will be passed onto
+mainline/Linus via a pull request for vX.Y -- at the same time, the
+``net`` tree will start accumulating fixes for this pulled content
+relating to vX.Y
+
+An announcement indicating when ``net-next`` has been closed is usually
+sent to netdev, but knowing the above, you can predict that in advance.
+
+IMPORTANT: Do not send new ``net-next`` content to netdev during the
+period during which ``net-next`` tree is closed.
+
+Shortly after the two weeks have passed (and vX.Y-rc1 is released), the
+tree for ``net-next`` reopens to collect content for the next (vX.Y+1)
+release.
+
+If you aren't subscribed to netdev and/or are simply unsure if
+``net-next`` has re-opened yet, simply check the ``net-next`` git
+repository link above for any new networking-related commits. You may
+also check the following website for the current status:
+
+ http://vger.kernel.org/~davem/net-next.html
+
+The ``net`` tree continues to collect fixes for the vX.Y content, and is
+fed back to Linus at regular (~weekly) intervals. Meaning that the
+focus for ``net`` is on stabilization and bug fixes.
+
+Finally, the vX.Y gets released, and the whole cycle starts over.
+
+Q: So where are we now in this cycle?
+
+Load the mainline (Linus) page here:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+and note the top of the "tags" section. If it is rc1, it is early in
+the dev cycle. If it was tagged rc7 a week ago, then a release is
+probably imminent.
+
+Q: How do I indicate which tree (net vs. net-next) my patch should be in?
+-------------------------------------------------------------------------
+A: Firstly, think whether you have a bug fix or new "next-like" content.
+Then once decided, assuming that you use git, use the prefix flag, i.e.
+::
+
+ git format-patch --subject-prefix='PATCH net-next' start..finish
+
+Use ``net`` instead of ``net-next`` (always lower case) in the above for
+bug-fix ``net`` content. If you don't use git, then note the only magic
+in the above is just the subject text of the outgoing e-mail, and you
+can manually change it yourself with whatever MUA you are comfortable
+with.
+
+Q: I sent a patch and I'm wondering what happened to it?
+--------------------------------------------------------
+Q: How can I tell whether it got merged?
+A: Start by looking at the main patchworks queue for netdev:
+
+ http://patchwork.ozlabs.org/project/netdev/list/
+
+The "State" field will tell you exactly where things are at with your
+patch.
+
+Q: The above only says "Under Review". How can I find out more?
+----------------------------------------------------------------
+A: Generally speaking, the patches get triaged quickly (in less than
+48h). So be patient. Asking the maintainer for status updates on your
+patch is a good way to ensure your patch is ignored or pushed to the
+bottom of the priority list.
+
+Q: I submitted multiple versions of the patch series
+----------------------------------------------------
+Q: should I directly update patchwork for the previous versions of these
+patch series?
+A: No, please don't interfere with the patch status on patchwork, leave
+it to the maintainer to figure out what is the most recent and current
+version that should be applied. If there is any doubt, the maintainer
+will reply and ask what should be done.
+
+Q: How can I tell what patches are queued up for backporting to the various stable releases?
+--------------------------------------------------------------------------------------------
+A: Normally Greg Kroah-Hartman collects stable commits himself, but for
+networking, Dave collects up patches he deems critical for the
+networking subsystem, and then hands them off to Greg.
+
+There is a patchworks queue that you can see here:
+
+ http://patchwork.ozlabs.org/bundle/davem/stable/?state=*
+
+It contains the patches which Dave has selected, but not yet handed off
+to Greg. If Greg already has the patch, then it will be here:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
+
+A quick way to find whether the patch is in this stable-queue is to
+simply clone the repo, and then git grep the mainline commit ID, e.g.
+::
+
+ stable-queue$ git grep -l 284041ef21fdf2e
+ releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ stable/stable-queue$
+
+Q: I see a network patch and I think it should be backported to stable.
+-----------------------------------------------------------------------
+Q: Should I request it via stable@vger.kernel.org like the references in
+the kernel's Documentation/process/stable-kernel-rules.rst file say?
+A: No, not for networking. Check the stable queues as per above first
+to see if it is already queued. If not, then send a mail to netdev,
+listing the upstream commit ID and why you think it should be a stable
+candidate.
+
+Before you jump to go do the above, do note that the normal stable rules
+in :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+still apply. So you need to explicitly indicate why it is a critical
+fix and exactly what users are impacted. In addition, you need to
+convince yourself that you *really* think it has been overlooked,
+vs. having been considered and rejected.
+
+Generally speaking, the longer it has had a chance to "soak" in
+mainline, the better the odds that it is an OK candidate for stable. So
+scrambling to request a commit be added the day after it appears should
+be avoided.
+
+Q: I have created a network patch and I think it should be backported to stable.
+--------------------------------------------------------------------------------
+Q: Should I add a Cc: stable@vger.kernel.org like the references in the
+kernel's Documentation/ directory say?
+A: No. See above answer. In short, if you think it really belongs in
+stable, then ensure you write a decent commit log that describes who
+gets impacted by the bug fix and how it manifests itself, and when the
+bug was introduced. If you do that properly, then the commit will get
+handled appropriately and most likely get put in the patchworks stable
+queue if it really warrants it.
+
+If you think there is some valid information relating to it being in
+stable that does *not* belong in the commit log, then use the three dash
+marker line as described in
+:ref:`Documentation/process/submitting-patches.rst <the_canonical_patch_format>`
+to temporarily embed that information into the patch that you send.
+
+Q: Are all networking bug fixes backported to all stable releases?
+------------------------------------------------------------------
+A: Due to capacity, Dave could only take care of the backports for the
+last two stable releases. For earlier stable releases, each stable
+branch maintainer is supposed to take care of them. If you find any
+patch is missing from an earlier stable branch, please notify
+stable@vger.kernel.org with either a commit ID or a formal patch
+backported, and CC Dave and other relevant networking developers.
+
+Q: Is the comment style convention different for the networking content?
+------------------------------------------------------------------------
+A: Yes, in a largely trivial way. Instead of this::
+
+ /*
+ * foobar blah blah blah
+ * another line of text
+ */
+
+it is requested that you make it look like this::
+
+ /* foobar blah blah blah
+ * another line of text
+ */
+
+Q: I am working in existing code that has the former comment style and not the latter.
+--------------------------------------------------------------------------------------
+Q: Should I submit new code in the former style or the latter?
+A: Make it the latter style, so that eventually all code in the domain
+of netdev is of this format.
+
+Q: I found a bug that might have possible security implications or similar.
+---------------------------------------------------------------------------
+Q: Should I mail the main netdev maintainer off-list?**
+A: No. The current netdev maintainer has consistently requested that
+people use the mailing lists and not reach out directly. If you aren't
+OK with that, then perhaps consider mailing security@kernel.org or
+reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
+as possible alternative mechanisms.
+
+Q: What level of testing is expected before I submit my change?
+---------------------------------------------------------------
+A: If your changes are against ``net-next``, the expectation is that you
+have tested by layering your changes on top of ``net-next``. Ideally
+you will have done run-time testing specific to your change, but at a
+minimum, your changes should survive an ``allyesconfig`` and an
+``allmodconfig`` build without new warnings or failures.
+
+Q: Any other tips to help ensure my net/net-next patch gets OK'd?
+-----------------------------------------------------------------
+A: Attention to detail. Re-read your own work as if you were the
+reviewer. You can start with using ``checkpatch.pl``, perhaps even with
+the ``--strict`` flag. But do not be mindlessly robotic in doing so.
+If your change is a bug fix, make sure your commit log indicates the
+end-user visible symptom, the underlying reason as to why it happens,
+and then if necessary, explain why the fix proposed is the best way to
+get things done. Don't mangle whitespace, and as is common, don't
+mis-indent function arguments that span multiple lines. If it is your
+first patch, mail it to yourself so you can test apply it to an
+unpatched tree to confirm infrastructure didn't mangle it.
+
+Finally, go back and read
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
+to be sure you are not repeating some common mistake documented there.
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
deleted file mode 100644
index fa951b820b25..000000000000
--- a/Documentation/networking/netdev-FAQ.txt
+++ /dev/null
@@ -1,244 +0,0 @@
-
-Information you need to know about netdev
------------------------------------------
-
-Q: What is netdev?
-
-A: It is a mailing list for all network-related Linux stuff. This includes
- anything found under net/ (i.e. core code like IPv6) and drivers/net
- (i.e. hardware specific drivers) in the Linux source tree.
-
- Note that some subsystems (e.g. wireless drivers) which have a high volume
- of traffic have their own specific mailing lists.
-
- The netdev list is managed (like many other Linux mailing lists) through
- VGER ( http://vger.kernel.org/ ) and archives can be found below:
-
- http://marc.info/?l=linux-netdev
- http://www.spinics.net/lists/netdev/
-
- Aside from subsystems like that mentioned above, all network-related Linux
- development (i.e. RFC, review, comments, etc.) takes place on netdev.
-
-Q: How do the changes posted to netdev make their way into Linux?
-
-A: There are always two trees (git repositories) in play. Both are driven
- by David Miller, the main network maintainer. There is the "net" tree,
- and the "net-next" tree. As you can probably guess from the names, the
- net tree is for fixes to existing code already in the mainline tree from
- Linus, and net-next is where the new code goes for the future release.
- You can find the trees here:
-
- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
-
-Q: How often do changes from these trees make it to the mainline Linus tree?
-
-A: To understand this, you need to know a bit of background information
- on the cadence of Linux development. Each new release starts off with
- a two week "merge window" where the main maintainers feed their new
- stuff to Linus for merging into the mainline tree. After the two weeks,
- the merge window is closed, and it is called/tagged "-rc1". No new
- features get mainlined after this -- only fixes to the rc1 content
- are expected. After roughly a week of collecting fixes to the rc1
- content, rc2 is released. This repeats on a roughly weekly basis
- until rc7 (typically; sometimes rc6 if things are quiet, or rc8 if
- things are in a state of churn), and a week after the last vX.Y-rcN
- was done, the official "vX.Y" is released.
-
- Relating that to netdev: At the beginning of the 2-week merge window,
- the net-next tree will be closed - no new changes/features. The
- accumulated new content of the past ~10 weeks will be passed onto
- mainline/Linus via a pull request for vX.Y -- at the same time,
- the "net" tree will start accumulating fixes for this pulled content
- relating to vX.Y
-
- An announcement indicating when net-next has been closed is usually
- sent to netdev, but knowing the above, you can predict that in advance.
-
- IMPORTANT: Do not send new net-next content to netdev during the
- period during which net-next tree is closed.
-
- Shortly after the two weeks have passed (and vX.Y-rc1 is released), the
- tree for net-next reopens to collect content for the next (vX.Y+1) release.
-
- If you aren't subscribed to netdev and/or are simply unsure if net-next
- has re-opened yet, simply check the net-next git repository link above for
- any new networking-related commits. You may also check the following
- website for the current status:
-
- http://vger.kernel.org/~davem/net-next.html
-
- The "net" tree continues to collect fixes for the vX.Y content, and
- is fed back to Linus at regular (~weekly) intervals. Meaning that the
- focus for "net" is on stabilization and bugfixes.
-
- Finally, the vX.Y gets released, and the whole cycle starts over.
-
-Q: So where are we now in this cycle?
-
-A: Load the mainline (Linus) page here:
-
- https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-
- and note the top of the "tags" section. If it is rc1, it is early
- in the dev cycle. If it was tagged rc7 a week ago, then a release
- is probably imminent.
-
-Q: How do I indicate which tree (net vs. net-next) my patch should be in?
-
-A: Firstly, think whether you have a bug fix or new "next-like" content.
- Then once decided, assuming that you use git, use the prefix flag, i.e.
-
- git format-patch --subject-prefix='PATCH net-next' start..finish
-
- Use "net" instead of "net-next" (always lower case) in the above for
- bug-fix net content. If you don't use git, then note the only magic in
- the above is just the subject text of the outgoing e-mail, and you can
- manually change it yourself with whatever MUA you are comfortable with.
-
-Q: I sent a patch and I'm wondering what happened to it. How can I tell
- whether it got merged?
-
-A: Start by looking at the main patchworks queue for netdev:
-
- http://patchwork.ozlabs.org/project/netdev/list/
-
- The "State" field will tell you exactly where things are at with
- your patch.
-
-Q: The above only says "Under Review". How can I find out more?
-
-A: Generally speaking, the patches get triaged quickly (in less than 48h).
- So be patient. Asking the maintainer for status updates on your
- patch is a good way to ensure your patch is ignored or pushed to
- the bottom of the priority list.
-
-Q: I submitted multiple versions of the patch series, should I directly update
- patchwork for the previous versions of these patch series?
-
-A: No, please don't interfere with the patch status on patchwork, leave it to
- the maintainer to figure out what is the most recent and current version that
- should be applied. If there is any doubt, the maintainer will reply and ask
- what should be done.
-
-Q: How can I tell what patches are queued up for backporting to the
- various stable releases?
-
-A: Normally Greg Kroah-Hartman collects stable commits himself, but
- for networking, Dave collects up patches he deems critical for the
- networking subsystem, and then hands them off to Greg.
-
- There is a patchworks queue that you can see here:
- http://patchwork.ozlabs.org/bundle/davem/stable/?state=*
-
- It contains the patches which Dave has selected, but not yet handed
- off to Greg. If Greg already has the patch, then it will be here:
- https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
-
- A quick way to find whether the patch is in this stable-queue is
- to simply clone the repo, and then git grep the mainline commit ID, e.g.
-
- stable-queue$ git grep -l 284041ef21fdf2e
- releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
- releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
- releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
- stable/stable-queue$
-
-Q: I see a network patch and I think it should be backported to stable.
- Should I request it via "stable@vger.kernel.org" like the references in
- the kernel's Documentation/process/stable-kernel-rules.rst file say?
-
-A: No, not for networking. Check the stable queues as per above 1st to see
- if it is already queued. If not, then send a mail to netdev, listing
- the upstream commit ID and why you think it should be a stable candidate.
-
- Before you jump to go do the above, do note that the normal stable rules
- in Documentation/process/stable-kernel-rules.rst still apply. So you need to
- explicitly indicate why it is a critical fix and exactly what users are
- impacted. In addition, you need to convince yourself that you _really_
- think it has been overlooked, vs. having been considered and rejected.
-
- Generally speaking, the longer it has had a chance to "soak" in mainline,
- the better the odds that it is an OK candidate for stable. So scrambling
- to request a commit be added the day after it appears should be avoided.
-
-Q: I have created a network patch and I think it should be backported to
- stable. Should I add a "Cc: stable@vger.kernel.org" like the references
- in the kernel's Documentation/ directory say?
-
-A: No. See above answer. In short, if you think it really belongs in
- stable, then ensure you write a decent commit log that describes who
- gets impacted by the bugfix and how it manifests itself, and when the
- bug was introduced. If you do that properly, then the commit will
- get handled appropriately and most likely get put in the patchworks
- stable queue if it really warrants it.
-
- If you think there is some valid information relating to it being in
- stable that does _not_ belong in the commit log, then use the three
- dash marker line as described in Documentation/process/submitting-patches.rst to
- temporarily embed that information into the patch that you send.
-
-Q: Are all networking bug fixes backported to all stable releases?
-
-A: Due to capacity, Dave could only take care of the backports for the last
- 2 stable releases. For earlier stable releases, each stable branch maintainer
- is supposed to take care of them. If you find any patch is missing from an
- earlier stable branch, please notify stable@vger.kernel.org with either a
- commit ID or a formal patch backported, and CC Dave and other relevant
- networking developers.
-
-Q: Someone said that the comment style and coding convention is different
- for the networking content. Is this true?
-
-A: Yes, in a largely trivial way. Instead of this:
-
- /*
- * foobar blah blah blah
- * another line of text
- */
-
- it is requested that you make it look like this:
-
- /* foobar blah blah blah
- * another line of text
- */
-
-Q: I am working in existing code that has the former comment style and not the
- latter. Should I submit new code in the former style or the latter?
-
-A: Make it the latter style, so that eventually all code in the domain of
- netdev is of this format.
-
-Q: I found a bug that might have possible security implications or similar.
- Should I mail the main netdev maintainer off-list?
-
-A: No. The current netdev maintainer has consistently requested that people
- use the mailing lists and not reach out directly. If you aren't OK with
- that, then perhaps consider mailing "security@kernel.org" or reading about
- http://oss-security.openwall.org/wiki/mailing-lists/distros
- as possible alternative mechanisms.
-
-Q: What level of testing is expected before I submit my change?
-
-A: If your changes are against net-next, the expectation is that you
- have tested by layering your changes on top of net-next. Ideally you
- will have done run-time testing specific to your change, but at a
- minimum, your changes should survive an "allyesconfig" and an
- "allmodconfig" build without new warnings or failures.
-
-Q: Any other tips to help ensure my net/net-next patch gets OK'd?
-
-A: Attention to detail. Re-read your own work as if you were the
- reviewer. You can start with using checkpatch.pl, perhaps even
- with the "--strict" flag. But do not be mindlessly robotic in
- doing so. If your change is a bug-fix, make sure your commit log
- indicates the end-user visible symptom, the underlying reason as
- to why it happens, and then if necessary, explain why the fix proposed
- is the best way to get things done. Don't mangle whitespace, and as
- is common, don't mis-indent function arguments that span multiple lines.
- If it is your first patch, mail it to yourself so you can test apply
- it to an unpatched tree to confirm infrastructure didn't mangle it.
-
- Finally, go back and read Documentation/process/submitting-patches.rst to be
- sure you are not repeating some common mistake documented there.
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index f55639d71d35..b7056a8a0540 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -366,8 +366,13 @@ XPS: Transmit Packet Steering
Transmit Packet Steering is a mechanism for intelligently selecting
which transmit queue to use when transmitting a packet on a multi-queue
-device. To accomplish this, a mapping from CPU to hardware queue(s) is
-recorded. The goal of this mapping is usually to assign queues
+device. This can be accomplished by recording two kinds of maps, either
+a mapping of CPU to hardware queue(s) or a mapping of receive queue(s)
+to hardware transmit queue(s).
+
+1. XPS using CPUs map
+
+The goal of this mapping is usually to assign queues
exclusively to a subset of CPUs, where the transmit completions for
these queues are processed on a CPU within this set. This choice
provides two benefits. First, contention on the device queue lock is
@@ -377,15 +382,40 @@ transmit queue). Secondly, cache miss rate on transmit completion is
reduced, in particular for data cache lines that hold the sk_buff
structures.
-XPS is configured per transmit queue by setting a bitmap of CPUs that
-may use that queue to transmit. The reverse mapping, from CPUs to
-transmit queues, is computed and maintained for each network device.
-When transmitting the first packet in a flow, the function
-get_xps_queue() is called to select a queue. This function uses the ID
-of the running CPU as a key into the CPU-to-queue lookup table. If the
+2. XPS using receive queues map
+
+This mapping is used to pick transmit queue based on the receive
+queue(s) map configuration set by the administrator. A set of receive
+queues can be mapped to a set of transmit queues (many:many), although
+the common use case is a 1:1 mapping. This will enable sending packets
+on the same queue associations for transmit and receive. This is useful for
+busy polling multi-threaded workloads where there are challenges in
+associating a given CPU to a given application thread. The application
+threads are not pinned to CPUs and each thread handles packets
+received on a single queue. The receive queue number is cached in the
+socket for the connection. In this model, sending the packets on the same
+transmit queue corresponding to the associated receive queue has benefits
+in keeping the CPU overhead low. Transmit completion work is locked into
+the same queue-association that a given application is polling on. This
+avoids the overhead of triggering an interrupt on another CPU. When the
+application cleans up the packets during the busy poll, transmit completion
+may be processed along with it in the same thread context and so result in
+reduced latency.
+
+XPS is configured per transmit queue by setting a bitmap of
+CPUs/receive-queues that may use that queue to transmit. The reverse
+mapping, from CPUs to transmit queues or from receive-queues to transmit
+queues, is computed and maintained for each network device. When
+transmitting the first packet in a flow, the function get_xps_queue() is
+called to select a queue. This function uses the ID of the receive queue
+for the socket connection for a match in the receive queue-to-transmit queue
+lookup table. Alternatively, this function can also use the ID of the
+running CPU as a key into the CPU-to-queue lookup table. If the
ID matches a single queue, that is used for transmission. If multiple
queues match, one is selected by using the flow hash to compute an index
-into the set.
+into the set. When selecting the transmit queue based on receive queue(s)
+map, the transmit device is not validated against the receive device as it
+requires expensive lookup operation in the datapath.
The queue chosen for transmitting a particular flow is saved in the
corresponding socket structure for the flow (e.g. a TCP connection).
@@ -404,11 +434,15 @@ acknowledged.
XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by
default for SMP). The functionality remains disabled until explicitly
-configured. To enable XPS, the bitmap of CPUs that may use a transmit
-queue is configured using the sysfs file entry:
+configured. To enable XPS, the bitmap of CPUs/receive-queues that may
+use a transmit queue is configured using the sysfs file entry:
+For selection based on CPUs map:
/sys/class/net/<dev>/queues/tx-<n>/xps_cpus
+For selection based on receive-queues map:
+/sys/class/net/<dev>/queues/tx-<n>/xps_rxqs
+
== Suggested Configuration
For a network device with a single transmission queue, XPS configuration
@@ -421,6 +455,11 @@ best CPUs to share a given queue are probably those that share the cache
with the CPU that processes transmit completions for that queue
(transmit interrupts).
+For transmit queue selection based on receive queue(s), XPS has to be
+explicitly configured mapping receive-queue(s) to transmit queue(s). If the
+user configuration for receive-queue map does not apply, then the transmit
+queue is selected based on the CPUs map.
+
Per TX Queue rate limitation:
=============================
diff --git a/Documentation/networking/ti-cpsw.txt b/Documentation/networking/ti-cpsw.txt
new file mode 100644
index 000000000000..67039205bd69
--- /dev/null
+++ b/Documentation/networking/ti-cpsw.txt
@@ -0,0 +1,540 @@
+* Texas Instruments CPSW ethernet driver
+
+Multiqueue & CBS & MQPRIO
+=====================================================================
+=====================================================================
+
+The cpsw has 3 CBS shapers for each external ports. This document
+describes MQPRIO and CBS Qdisc offload configuration for cpsw driver
+based on examples. It potentially can be used in audio video bridging
+(AVB) and time sensitive networking (TSN).
+
+The following examples were tested on AM572x EVM and BBB boards.
+
+Test setup
+==========
+
+Under consideration two examples with AM572x EVM running cpsw driver
+in dual_emac mode.
+
+Several prerequisites:
+- TX queues must be rated starting from txq0 that has highest priority
+- Traffic classes are used starting from 0, that has highest priority
+- CBS shapers should be used with rated queues
+- The bandwidth for CBS shapers has to be set a little bit more then
+ potential incoming rate, thus, rate of all incoming tx queues has
+ to be a little less
+- Real rates can differ, due to discreetness
+- Map skb-priority to txq is not enough, also skb-priority to l2 prio
+ map has to be created with ip or vconfig tool
+- Any l2/socket prio (0 - 7) for classes can be used, but for
+ simplicity default values are used: 3 and 2
+- only 2 classes tested: A and B, but checked and can work with more,
+ maximum allowed 4, but only for 3 rate can be set.
+
+Test setup for examples
+=======================
+ +-------------------------------+
+ |--+ |
+ | | Workstation0 |
+ |E | MAC 18:03:73:66:87:42 |
++-----------------------------+ +--|t | |
+| | 1 | E | | |h |./tsn_listener -d \ |
+| Target board: | 0 | t |--+ |0 | 18:03:73:66:87:42 -i eth0 \|
+| AM572x EVM | 0 | h | | | -s 1500 |
+| | 0 | 0 | |--+ |
+| Only 2 classes: |Mb +---| +-------------------------------+
+| class A, class B | |
+| | +---| +-------------------------------+
+| | 1 | E | |--+ |
+| | 0 | t | | | Workstation1 |
+| | 0 | h |--+ |E | MAC 20:cf:30:85:7d:fd |
+| |Mb | 1 | +--|t | |
++-----------------------------+ |h |./tsn_listener -d \ |
+ |0 | 20:cf:30:85:7d:fd -i eth0 \|
+ | | -s 1500 |
+ |--+ |
+ +-------------------------------+
+
+*********************************************************************
+*********************************************************************
+*********************************************************************
+Example 1: One port tx AVB configuration scheme for target board
+----------------------------------------------------------------------
+(prints and scheme for AM572x evm, applicable for single port boards)
+
+tc - traffic class
+txq - transmit queue
+p - priority
+f - fifo (cpsw fifo)
+S - shaper configured
+
++------------------------------------------------------------------+ u
+| +---------------+ +---------------+ +------+ +------+ | s
+| | | | | | | | | | e
+| | App 1 | | App 2 | | Apps | | Apps | | r
+| | Class A | | Class B | | Rest | | Rest | |
+| | Eth0 | | Eth0 | | Eth0 | | Eth1 | | s
+| | VLAN100 | | VLAN100 | | | | | | | | p
+| | 40 Mb/s | | 20 Mb/s | | | | | | | | a
+| | SO_PRIORITY=3 | | SO_PRIORITY=2 | | | | | | | | c
+| | | | | | | | | | | | | | e
+| +---|-----------+ +---|-----------+ +---|--+ +---|--+ |
++-----|------------------|------------------|--------|-------------+
+ +-+ +------------+ | |
+ | | +-----------------+ +--+
+ | | | |
++---|-------|-------------|-----------------------|----------------+
+| +----+ +----+ +----+ +----+ +----+ |
+| | p3 | | p2 | | p1 | | p0 | | p0 | | k
+| \ / \ / \ / \ / \ / | e
+| \ / \ / \ / \ / \ / | r
+| \/ \/ \/ \/ \/ | n
+| | | | | | e
+| | | +-----+ | | l
+| | | | | |
+| +----+ +----+ +----+ +----+ | s
+| |tc0 | |tc1 | |tc2 | |tc0 | | p
+| \ / \ / \ / \ / | a
+| \ / \ / \ / \ / | c
+| \/ \/ \/ \/ | e
+| | | +-----+ | |
+| | | | | | |
+| | | | | | |
+| | | | | | |
+| +----+ +----+ +----+ +----+ +----+ |
+| |txq0| |txq1| |txq2| |txq3| |txq4| |
+| \ / \ / \ / \ / \ / |
+| \ / \ / \ / \ / \ / |
+| \/ \/ \/ \/ \/ |
+| +-|------|------|------|--+ +--|--------------+ |
+| | | | | | | Eth0.100 | | Eth1 | |
++---|------|------|------|------------------------|----------------+
+ | | | | |
+ p p p p |
+ 3 2 0-1, 4-7 <- L2 priority |
+ | | | | |
+ | | | | |
++---|------|------|------|------------------------|----------------+
+| | | | | |----------+ |
+| +----+ +----+ +----+ +----+ +----+ |
+| |dma7| |dma6| |dma5| |dma4| |dma3| |
+| \ / \ / \ / \ / \ / | c
+| \S / \S / \ / \ / \ / | p
+| \/ \/ \/ \/ \/ | s
+| | | | +----- | | w
+| | | | | | |
+| | | | | | | d
+| +----+ +----+ +----+p p+----+ | r
+| | | | | | |o o| | | i
+| | f3 | | f2 | | f0 |r r| f0 | | v
+| |tc0 | |tc1 | |tc2 |t t|tc0 | | e
+| \CBS / \CBS / \CBS /1 2\CBS / | r
+| \S / \S / \ / \ / |
+| \/ \/ \/ \/ |
++------------------------------------------------------------------+
+========================================Eth==========================>
+
+1)
+// Add 4 tx queues, for interface Eth0, and 1 tx queue for Eth1
+$ ethtool -L eth0 rx 1 tx 5
+rx unmodified, ignoring
+
+2)
+// Check if num of queues is set correctly:
+$ ethtool -l eth0
+Channel parameters for eth0:
+Pre-set maximums:
+RX: 8
+TX: 8
+Other: 0
+Combined: 0
+Current hardware settings:
+RX: 1
+TX: 5
+Other: 0
+Combined: 0
+
+3)
+// TX queues must be rated starting from 0, so set bws for tx0 and tx1
+// Set rates 40 and 20 Mb/s appropriately.
+// Pay attention, real speed can differ a bit due to discreetness.
+// Leave last 2 tx queues not rated.
+$ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate
+$ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate
+
+4)
+// Check maximum rate of tx (cpdma) queues:
+$ cat /sys/class/net/eth0/queues/tx-*/tx_maxrate
+40
+20
+0
+0
+0
+
+5)
+// Map skb->priority to traffic class:
+// 3pri -> tc0, 2pri -> tc1, (0,1,4-7)pri -> tc2
+// Map traffic class to transmit queue:
+// tc0 -> txq0, tc1 -> txq1, tc2 -> (txq2, txq3)
+$ tc qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \
+map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 1
+
+5a)
+// As two interface sharing same set of tx queues, assign all traffic
+// coming to interface Eth1 to separate queue in order to not mix it
+// with traffic from interface Eth0, so use separate txq to send
+// packets to Eth1, so all prio -> tc0 and tc0 -> txq4
+// Here hw 0, so here still default configuration for eth1 in hw
+$ tc qdisc replace dev eth1 handle 100: parent root mqprio num_tc 1 \
+map 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 queues 1@4 hw 0
+
+6)
+// Check classes settings
+$ tc -g class show dev eth0
++---(100:ffe2) mqprio
+| +---(100:3) mqprio
+| +---(100:4) mqprio
+|
++---(100:ffe1) mqprio
+| +---(100:2) mqprio
+|
++---(100:ffe0) mqprio
+ +---(100:1) mqprio
+
+$ tc -g class show dev eth1
++---(100:ffe0) mqprio
+ +---(100:5) mqprio
+
+7)
+// Set rate for class A - 41 Mbit (tc0, txq0) using CBS Qdisc
+// Set it +1 Mb for reserve (important!)
+// here only idle slope is important, others arg are ignored
+// Pay attention, real speed can differ a bit due to discreetness
+$ tc qdisc add dev eth0 parent 100:1 cbs locredit -1438 \
+hicredit 62 sendslope -959000 idleslope 41000 offload 1
+net eth0: set FIFO3 bw = 50
+
+8)
+// Set rate for class B - 21 Mbit (tc1, txq1) using CBS Qdisc:
+// Set it +1 Mb for reserve (important!)
+$ tc qdisc add dev eth0 parent 100:2 cbs locredit -1468 \
+hicredit 65 sendslope -979000 idleslope 21000 offload 1
+net eth0: set FIFO2 bw = 30
+
+9)
+// Create vlan 100 to map sk->priority to vlan qos
+$ ip link add link eth0 name eth0.100 type vlan id 100
+8021q: 802.1Q VLAN Support v1.8
+8021q: adding VLAN 0 to HW filter on device eth0
+8021q: adding VLAN 0 to HW filter on device eth1
+net eth0: Adding vlanid 100 to vlan filter
+
+10)
+// Map skb->priority to L2 prio, 1 to 1
+$ ip link set eth0.100 type vlan \
+egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+11)
+// Check egress map for vlan 100
+$ cat /proc/net/vlan/eth0.100
+[...]
+INGRESS priority mappings: 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:0
+EGRESS priority mappings: 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+12)
+// Run your appropriate tools with socket option "SO_PRIORITY"
+// to 3 for class A and/or to 2 for class B
+// (I took at https://www.spinics.net/lists/netdev/msg460869.html)
+./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p3 -s 1500&
+./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p2 -s 1500&
+
+13)
+// run your listener on workstation (should be in same vlan)
+// (I took at https://www.spinics.net/lists/netdev/msg460869.html)
+./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39000 kbps
+
+14)
+// Restore default configuration if needed
+$ ip link del eth0.100
+$ tc qdisc del dev eth1 root
+$ tc qdisc del dev eth0 root
+net eth0: Prev FIFO2 is shaped
+net eth0: set FIFO3 bw = 0
+net eth0: set FIFO2 bw = 0
+$ ethtool -L eth0 rx 1 tx 1
+
+*********************************************************************
+*********************************************************************
+*********************************************************************
+Example 2: Two port tx AVB configuration scheme for target board
+----------------------------------------------------------------------
+(prints and scheme for AM572x evm, for dual emac boards only)
+
++------------------------------------------------------------------+ u
+| +----------+ +----------+ +------+ +----------+ +----------+ | s
+| | | | | | | | | | | | e
+| | App 1 | | App 2 | | Apps | | App 3 | | App 4 | | r
+| | Class A | | Class B | | Rest | | Class B | | Class A | |
+| | Eth0 | | Eth0 | | | | | Eth1 | | Eth1 | | s
+| | VLAN100 | | VLAN100 | | | | | VLAN100 | | VLAN100 | | p
+| | 40 Mb/s | | 20 Mb/s | | | | | 10 Mb/s | | 30 Mb/s | | a
+| | SO_PRI=3 | | SO_PRI=2 | | | | | SO_PRI=3 | | SO_PRI=2 | | c
+| | | | | | | | | | | | | | | | | e
+| +---|------+ +---|------+ +---|--+ +---|------+ +---|------+ |
++-----|-------------|-------------|---------|-------------|--------+
+ +-+ +-------+ | +----------+ +----+
+ | | +-------+------+ | |
+ | | | | | |
++---|-------|-------------|--------------|-------------|-------|---+
+| +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ |
+| | p3 | | p2 | | p1 | | p0 | | p0 | | p1 | | p2 | | p3 | | k
+| \ / \ / \ / \ / \ / \ / \ / \ / | e
+| \ / \ / \ / \ / \ / \ / \ / \ / | r
+| \/ \/ \/ \/ \/ \/ \/ \/ | n
+| | | | | | | | e
+| | | +----+ +----+ | | | l
+| | | | | | | |
+| +----+ +----+ +----+ +----+ +----+ +----+ | s
+| |tc0 | |tc1 | |tc2 | |tc2 | |tc1 | |tc0 | | p
+| \ / \ / \ / \ / \ / \ / | a
+| \ / \ / \ / \ / \ / \ / | c
+| \/ \/ \/ \/ \/ \/ | e
+| | | +-----+ +-----+ | | |
+| | | | | | | | | |
+| | | | | | | | | |
+| | | | | E E | | | | |
+| +----+ +----+ +----+ +----+ t t +----+ +----+ +----+ +----+ |
+| |txq0| |txq1| |txq4| |txq5| h h |txq6| |txq7| |txq3| |txq2| |
+| \ / \ / \ / \ / 0 1 \ / \ / \ / \ / |
+| \ / \ / \ / \ / . . \ / \ / \ / \ / |
+| \/ \/ \/ \/ 1 1 \/ \/ \/ \/ |
+| +-|------|------|------|--+ 0 0 +-|------|------|------|--+ |
+| | | | | | | 0 0 | | | | | | |
++---|------|------|------|---------------|------|------|------|----+
+ | | | | | | | |
+ p p p p p p p p
+ 3 2 0-1, 4-7 <-L2 pri-> 0-1, 4-7 2 3
+ | | | | | | | |
+ | | | | | | | |
++---|------|------|------|---------------|------|------|------|----+
+| | | | | | | | | |
+| +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ |
+| |dma7| |dma6| |dma3| |dma2| |dma1| |dma0| |dma4| |dma5| |
+| \ / \ / \ / \ / \ / \ / \ / \ / | c
+| \S / \S / \ / \ / \ / \ / \S / \S / | p
+| \/ \/ \/ \/ \/ \/ \/ \/ | s
+| | | | +----- | | | | | w
+| | | | | +----+ | | | |
+| | | | | | | | | | d
+| +----+ +----+ +----+p p+----+ +----+ +----+ | r
+| | | | | | |o o| | | | | | | i
+| | f3 | | f2 | | f0 |r CPSW r| f3 | | f2 | | f0 | | v
+| |tc0 | |tc1 | |tc2 |t t|tc0 | |tc1 | |tc2 | | e
+| \CBS / \CBS / \CBS /1 2\CBS / \CBS / \CBS / | r
+| \S / \S / \ / \S / \S / \ / |
+| \/ \/ \/ \/ \/ \/ |
++------------------------------------------------------------------+
+========================================Eth==========================>
+
+1)
+// Add 8 tx queues, for interface Eth0, but they are common, so are accessed
+// by two interfaces Eth0 and Eth1.
+$ ethtool -L eth1 rx 1 tx 8
+rx unmodified, ignoring
+
+2)
+// Check if num of queues is set correctly:
+$ ethtool -l eth0
+Channel parameters for eth0:
+Pre-set maximums:
+RX: 8
+TX: 8
+Other: 0
+Combined: 0
+Current hardware settings:
+RX: 1
+TX: 8
+Other: 0
+Combined: 0
+
+3)
+// TX queues must be rated starting from 0, so set bws for tx0 and tx1 for Eth0
+// and for tx2 and tx3 for Eth1. That is, rates 40 and 20 Mb/s appropriately
+// for Eth0 and 30 and 10 Mb/s for Eth1.
+// Real speed can differ a bit due to discreetness
+// Leave last 4 tx queues as not rated
+$ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate
+$ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate
+$ echo 30 > /sys/class/net/eth1/queues/tx-2/tx_maxrate
+$ echo 10 > /sys/class/net/eth1/queues/tx-3/tx_maxrate
+
+4)
+// Check maximum rate of tx (cpdma) queues:
+$ cat /sys/class/net/eth0/queues/tx-*/tx_maxrate
+40
+20
+30
+10
+0
+0
+0
+0
+
+5)
+// Map skb->priority to traffic class for Eth0:
+// 3pri -> tc0, 2pri -> tc1, (0,1,4-7)pri -> tc2
+// Map traffic class to transmit queue:
+// tc0 -> txq0, tc1 -> txq1, tc2 -> (txq4, txq5)
+$ tc qdisc replace dev eth0 handle 100: parent root mqprio num_tc 3 \
+map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@4 hw 1
+
+6)
+// Check classes settings
+$ tc -g class show dev eth0
++---(100:ffe2) mqprio
+| +---(100:5) mqprio
+| +---(100:6) mqprio
+|
++---(100:ffe1) mqprio
+| +---(100:2) mqprio
+|
++---(100:ffe0) mqprio
+ +---(100:1) mqprio
+
+7)
+// Set rate for class A - 41 Mbit (tc0, txq0) using CBS Qdisc for Eth0
+// here only idle slope is important, others ignored
+// Real speed can differ a bit due to discreetness
+$ tc qdisc add dev eth0 parent 100:1 cbs locredit -1470 \
+hicredit 62 sendslope -959000 idleslope 41000 offload 1
+net eth0: set FIFO3 bw = 50
+
+8)
+// Set rate for class B - 21 Mbit (tc1, txq1) using CBS Qdisc for Eth0
+$ tc qdisc add dev eth0 parent 100:2 cbs locredit -1470 \
+hicredit 65 sendslope -979000 idleslope 21000 offload 1
+net eth0: set FIFO2 bw = 30
+
+9)
+// Create vlan 100 to map sk->priority to vlan qos for Eth0
+$ ip link add link eth0 name eth0.100 type vlan id 100
+net eth0: Adding vlanid 100 to vlan filter
+
+10)
+// Map skb->priority to L2 prio for Eth0.100, one to one
+$ ip link set eth0.100 type vlan \
+egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+11)
+// Check egress map for vlan 100
+$ cat /proc/net/vlan/eth0.100
+[...]
+INGRESS priority mappings: 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:0
+EGRESS priority mappings: 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+12)
+// Map skb->priority to traffic class for Eth1:
+// 3pri -> tc0, 2pri -> tc1, (0,1,4-7)pri -> tc2
+// Map traffic class to transmit queue:
+// tc0 -> txq2, tc1 -> txq3, tc2 -> (txq6, txq7)
+$ tc qdisc replace dev eth1 handle 100: parent root mqprio num_tc 3 \
+map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@2 1@3 2@6 hw 1
+
+13)
+// Check classes settings
+$ tc -g class show dev eth1
++---(100:ffe2) mqprio
+| +---(100:7) mqprio
+| +---(100:8) mqprio
+|
++---(100:ffe1) mqprio
+| +---(100:4) mqprio
+|
++---(100:ffe0) mqprio
+ +---(100:3) mqprio
+
+14)
+// Set rate for class A - 31 Mbit (tc0, txq2) using CBS Qdisc for Eth1
+// here only idle slope is important, others ignored
+// Set it +1 Mb for reserve (important!)
+$ tc qdisc add dev eth1 parent 100:3 cbs locredit -1453 \
+hicredit 47 sendslope -969000 idleslope 31000 offload 1
+net eth1: set FIFO3 bw = 31
+
+15)
+// Set rate for class B - 11 Mbit (tc1, txq3) using CBS Qdisc for Eth1
+// Set it +1 Mb for reserve (important!)
+$ tc qdisc add dev eth1 parent 100:4 cbs locredit -1483 \
+hicredit 34 sendslope -989000 idleslope 11000 offload 1
+net eth1: set FIFO2 bw = 11
+
+16)
+// Create vlan 100 to map sk->priority to vlan qos for Eth1
+$ ip link add link eth1 name eth1.100 type vlan id 100
+net eth1: Adding vlanid 100 to vlan filter
+
+17)
+// Map skb->priority to L2 prio for Eth1.100, one to one
+$ ip link set eth1.100 type vlan \
+egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+18)
+// Check egress map for vlan 100
+$ cat /proc/net/vlan/eth1.100
+[...]
+INGRESS priority mappings: 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:0
+EGRESS priority mappings: 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+19)
+// Run appropriate tools with socket option "SO_PRIORITY" to 3
+// for class A and to 2 for class B. For both interfaces
+./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p2 -s 1500&
+./tsn_talker -d 18:03:73:66:87:42 -i eth0.100 -p3 -s 1500&
+./tsn_talker -d 20:cf:30:85:7d:fd -i eth1.100 -p2 -s 1500&
+./tsn_talker -d 20:cf:30:85:7d:fd -i eth1.100 -p3 -s 1500&
+
+20)
+// run your listener on workstation (should be in same vlan)
+// (I took at https://www.spinics.net/lists/netdev/msg460869.html)
+./tsn_listener -d 18:03:73:66:87:42 -i enp5s0 -s 1500
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39012 kbps
+Receiving data rate: 39000 kbps
+
+21)
+// Restore default configuration if needed
+$ ip link del eth1.100
+$ ip link del eth0.100
+$ tc qdisc del dev eth1 root
+net eth1: Prev FIFO2 is shaped
+net eth1: set FIFO3 bw = 0
+net eth1: set FIFO2 bw = 0
+$ tc qdisc del dev eth0 root
+net eth0: Prev FIFO2 is shaped
+net eth0: set FIFO3 bw = 0
+net eth0: set FIFO2 bw = 0
+$ ethtool -L eth0 rx 1 tx 1
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt
index 69556f0d494b..530fed08de2c 100644
--- a/Documentation/nommu-mmap.txt
+++ b/Documentation/nommu-mmap.txt
@@ -47,7 +47,7 @@ and it's also much more restricted in the latter case:
appropriate mapping protection capabilities. Ramfs, romfs, cramfs
and mtd might all permit this.
- - If the backing device device can't or won't permit direct sharing,
+ - If the backing device can't or won't permit direct sharing,
but does have the NOMMU_MAP_COPY capability, then a copy of the
appropriate bit of the file will be read into a contiguous bit of
memory and any extraneous space beyond the EOF will be cleared
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index af005770e767..cd283190855a 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -204,26 +204,26 @@ VI. Are there any precautions to be taken to prevent freezing failures?
Yes, there are.
-First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
+First of all, grabbing the 'system_transition_mutex' lock to mutually exclude a piece of code
from system-wide sleep such as suspend/hibernation is not encouraged.
If possible, that piece of code must instead hook onto the suspend/hibernation
notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
(kernel/cpu.c) for an example.
-However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
-it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
+However, if that is not feasible, and grabbing 'system_transition_mutex' is deemed necessary,
+it is strongly discouraged to directly call mutex_[un]lock(&system_transition_mutex) since
that could lead to freezing failures, because if the suspend/hibernate code
-successfully acquired the 'pm_mutex' lock, and hence that other entity failed
+successfully acquired the 'system_transition_mutex' lock, and hence that other entity failed
to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
state. As a consequence, the freezer would not be able to freeze that task,
leading to freezing failure.
However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
since they ask the freezer to skip freezing this task, since it is anyway
-"frozen enough" as it is blocked on 'pm_mutex', which will be released
+"frozen enough" as it is blocked on 'system_transition_mutex', which will be released
only after the entire suspend/hibernation sequence is complete.
So, to summarize, use [un]lock_system_sleep() instead of directly using
-mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
+mutex_[un]lock(&system_transition_mutex). That would prevent freezing failures.
V. Miscellaneous
/sys/power/pm_freeze_timeout controls how long it will cost at most to freeze
diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt
index 6f55eb960a6d..a8751b8df10e 100644
--- a/Documentation/power/suspend-and-cpuhotplug.txt
+++ b/Documentation/power/suspend-and-cpuhotplug.txt
@@ -32,7 +32,7 @@ More details follow:
sysfs file
|
v
- Acquire pm_mutex lock
+ Acquire system_transition_mutex lock
|
v
Send PM_SUSPEND_PREPARE
@@ -96,10 +96,10 @@ execution during resume):
* thaw tasks
* send PM_POST_SUSPEND notifications
-* Release pm_mutex lock.
+* Release system_transition_mutex lock.
-It is to be noted here that the pm_mutex lock is acquired at the very
+It is to be noted here that the system_transition_mutex lock is acquired at the very
beginning, when we are just starting out to suspend, and then released only
after the entire cycle is complete (i.e., suspend + resume).
diff --git a/Documentation/process/2.Process.rst b/Documentation/process/2.Process.rst
index a9c46dd0706b..51d0349c7809 100644
--- a/Documentation/process/2.Process.rst
+++ b/Documentation/process/2.Process.rst
@@ -134,7 +134,7 @@ and their maintainers are:
4.4 Greg Kroah-Hartman (very long-term stable kernel)
4.9 Greg Kroah-Hartman
4.14 Greg Kroah-Hartman
- ====== ====================== ===========================
+ ====== ====================== ==============================
The selection of a kernel for long-term support is purely a matter of a
maintainer having the need and the time to maintain that release. There
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index ddc029734b25..7a92a06f90de 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -35,7 +35,7 @@ binutils 2.20 ld -v
flex 2.5.35 flex --version
bison 2.0 bison --version
util-linux 2.10o fdformat --version
-module-init-tools 0.9.10 depmod -V
+kmod 13 depmod -V
e2fsprogs 1.41.4 e2fsck -V
jfsutils 1.1.3 fsck.jfs -V
reiserfsprogs 3.6.3 reiserfsck -V
@@ -81,6 +81,14 @@ The build system has, as of 4.13, switched to using thin archives (`ar T`)
rather than incremental linking (`ld -r`) for built-in.a intermediate steps.
This requires binutils 2.20 or newer.
+pkg-config
+----------
+
+The build system, as of 4.18, requires pkg-config to check for installed
+kconfig tools and to determine flags settings for use in
+'make {menu,n,g,x}config'. Previously pkg-config was being used but not
+verified or documented.
+
Flex
----
@@ -156,12 +164,6 @@ is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and
reproduce the Oops with that option, then you can still decode that Oops
with ksymoops.
-Module-Init-Tools
------------------
-
-A new module loader is now in the kernel that requires ``module-init-tools``
-to use. It is backward compatible with the 2.4.x series kernels.
-
Mkinitrd
--------
@@ -371,16 +373,17 @@ Util-linux
- <https://www.kernel.org/pub/linux/utils/util-linux/>
+Kmod
+----
+
+- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
+- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
+
Ksymoops
--------
- <https://www.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
-Module-Init-Tools
------------------
-
-- <https://www.kernel.org/pub/linux/utils/kernel/module-init-tools/>
-
Mkinitrd
--------
diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst
index 3df55811b916..130bf0f48875 100644
--- a/Documentation/process/howto.rst
+++ b/Documentation/process/howto.rst
@@ -85,7 +85,7 @@ linux-api@vger.kernel.org.
Here is a list of files that are in the kernel source tree that are
required reading:
- README
+ :ref:`Documentation/admin-guide/README.rst <readme>`
This file gives a short background on the Linux kernel and describes
what is necessary to do to configure and build the kernel. People
who are new to the kernel should start here.
diff --git a/Documentation/process/management-style.rst b/Documentation/process/management-style.rst
index 45595fd8a66b..85ef8ca8f639 100644
--- a/Documentation/process/management-style.rst
+++ b/Documentation/process/management-style.rst
@@ -105,7 +105,7 @@ to admit that you are stupid when you haven't **yet** done the really
stupid thing.
Then, when it really does turn out to be stupid, people just roll their
-eyes and say "Oops, he did it again".
+eyes and say "Oops, not again".
This preemptive admission of incompetence might also make the people who
actually do the work also think twice about whether it's worth doing or
@@ -172,10 +172,10 @@ To solve this problem, you really only have two options:
might even be amused.
The option of being unfailingly polite really doesn't exist. Nobody will
-trust somebody who is so clearly hiding his true character.
+trust somebody who is so clearly hiding their true character.
.. [#f2] Paul Simon sang "Fifty Ways to Leave Your Lover", because quite
- frankly, "A Million Ways to Tell a Developer He Is a D*ckhead" doesn't
+ frankly, "A Million Ways to Tell a Developer They're a D*ckhead" doesn't
scan nearly as well. But I'm sure he thought about it.
@@ -219,15 +219,16 @@ Things will go wrong, and people want somebody to blame. Tag, you're it.
It's not actually that hard to accept the blame, especially if people
kind of realize that it wasn't **all** your fault. Which brings us to the
-best way of taking the blame: do it for another guy. You'll feel good
-for taking the fall, he'll feel good about not getting blamed, and the
-guy who lost his whole 36GB porn-collection because of your incompetence
-will grudgingly admit that you at least didn't try to weasel out of it.
-
-Then make the developer who really screwed up (if you can find him) know
-**in_private** that he screwed up. Not just so he can avoid it in the
-future, but so that he knows he owes you one. And, perhaps even more
-importantly, he's also likely the person who can fix it. Because, let's
+best way of taking the blame: do it for someone else. You'll feel good
+for taking the fall, they'll feel good about not getting blamed, and the
+person who lost their whole 36GB porn-collection because of your
+incompetence will grudgingly admit that you at least didn't try to weasel
+out of it.
+
+Then make the developer who really screwed up (if you can find them) know
+**in_private** that they screwed up. Not just so they can avoid it in the
+future, but so that they know they owe you one. And, perhaps even more
+importantly, they're also likely the person who can fix it. Because, let's
face it, it sure ain't you.
Taking the blame is also why you get to be manager in the first place.
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 36a2dded525b..0de6f6145cc6 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -37,7 +37,7 @@ Procedure for submitting patches to the -stable tree
- If the patch covers files in net/ or drivers/net please follow netdev stable
submission guidelines as described in
- Documentation/networking/netdev-FAQ.txt
+ :ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
- Security patches should not be handled (solely) by the -stable review
process but should follow the procedures in
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index 908bb55be407..c0917107b90a 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -611,6 +611,7 @@ which stable kernel versions should receive your fix. This is the preferred
method for indicating a bug fixed by the patch. See :ref:`describe_changes`
for more details.
+.. _the_canonical_patch_format:
14) The canonical patch format
------------------------------
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index a289285d2412..7d3684e81df6 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -9,7 +9,7 @@ rfkill - RF kill switch support
Introduction
============
-The rfkill subsystem provides a generic interface to disabling any radio
+The rfkill subsystem provides a generic interface for disabling any radio
transmitter in the system. When a transmitter is blocked, it shall not
radiate any power.
@@ -45,7 +45,7 @@ The rfkill subsystem is composed of three main components:
* the rfkill drivers.
The rfkill core provides API for kernel drivers to register their radio
-transmitter with the kernel, methods for turning it on and off and, letting
+transmitter with the kernel, methods for turning it on and off, and letting
the system know about hardware-disabled states that may be implemented on
the device.
@@ -54,7 +54,7 @@ ways for userspace to query the current states. See the "Userspace support"
section below.
When the device is hard-blocked (either by a call to rfkill_set_hw_state()
-or from query_hw_block) set_block() will be invoked for additional software
+or from query_hw_block), set_block() will be invoked for additional software
block, but drivers can ignore the method call since they can use the return
value of the function rfkill_set_hw_state() to sync the software state
instead of keeping track of calls to set_block(). In fact, drivers should
@@ -65,7 +65,6 @@ keeps track of soft and hard block separately.
Kernel API
==========
-
Drivers for radio transmitters normally implement an rfkill driver.
Platform drivers might implement input devices if the rfkill button is just
@@ -75,14 +74,14 @@ a way to turn on/off the transmitter(s).
For some platforms, it is possible that the hardware state changes during
suspend/hibernation, in which case it will be necessary to update the rfkill
-core with the current state is at resume time.
+core with the current state at resume time.
To create an rfkill driver, driver's Kconfig needs to have::
depends on RFKILL || !RFKILL
to ensure the driver cannot be built-in when rfkill is modular. The !RFKILL
-case allows the driver to be built when rfkill is not configured, which
+case allows the driver to be built when rfkill is not configured, in which
case all rfkill API can still be used but will be provided by static inlines
which compile to almost nothing.
@@ -91,7 +90,7 @@ rfkill drivers that control devices that can be hard-blocked unless they also
assign the poll_hw_block() callback (then the rfkill core will poll the
device). Don't do this unless you cannot get the event in any other way.
-RFKill provides per-switch LED triggers, which can be used to drive LEDs
+rfkill provides per-switch LED triggers, which can be used to drive LEDs
according to the switch state (LED_FULL when blocked, LED_OFF otherwise).
@@ -114,7 +113,7 @@ a specified type) into a state which also updates the default state for
hotplugged devices.
After an application opens /dev/rfkill, it can read the current state of all
-devices. Changes can be either obtained by either polling the descriptor for
+devices. Changes can be obtained by either polling the descriptor for
hotplug or state change events or by listening for uevents emitted by the
rfkill core framework.
@@ -127,8 +126,7 @@ environment variables set::
RFKILL_STATE
RFKILL_TYPE
-The contents of these variables corresponds to the "name", "state" and
+The content of these variables corresponds to the "name", "state" and
"type" sysfs files explained above.
-
For further details consult Documentation/ABI/stable/sysfs-class-rfkill.
diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst
index 4d83c1c0ca04..4a3cecc8ad38 100644
--- a/Documentation/sound/alsa-configuration.rst
+++ b/Documentation/sound/alsa-configuration.rst
@@ -1568,7 +1568,7 @@ joystick_io
The driver requires firmware files ``turtlebeach/msndinit.bin`` and
``turtlebeach/msndperm.bin`` in the proper firmware directory.
-See Documentation/sound/oss/MultiSound for important information
+See Documentation/sound/cards/multisound.sh for important information
about this driver. Note that it has been discontinued, but the
Voyetra Turtle Beach knowledge base entry for it is still available
at
diff --git a/Documentation/sound/cards/multisound.sh b/Documentation/sound/cards/multisound.sh
new file mode 100755
index 000000000000..a915a1affcde
--- /dev/null
+++ b/Documentation/sound/cards/multisound.sh
@@ -0,0 +1,1139 @@
+#! /bin/sh
+#
+# Turtle Beach MultiSound Driver Notes
+# -- Andrew Veliath <andrewtv@usa.net>
+#
+# Last update: September 10, 1998
+# Corresponding msnd driver: 0.8.3
+#
+# ** This file is a README (top part) and shell archive (bottom part).
+# The corresponding archived utility sources can be unpacked by
+# running `sh MultiSound' (the utilities are only needed for the
+# Pinnacle and Fiji cards). **
+#
+#
+# -=-=- Getting Firmware -=-=-
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# See the section `Obtaining and Creating Firmware Files' in this
+# document for instructions on obtaining the necessary firmware
+# files.
+#
+#
+# Supported Features
+# ~~~~~~~~~~~~~~~~~~
+#
+# Currently, full-duplex digital audio (/dev/dsp only, /dev/audio is
+# not currently available) and mixer functionality (/dev/mixer) are
+# supported (memory mapped digital audio is not yet supported).
+# Digital transfers and monitoring can be done as well if you have
+# the digital daughterboard (see the section on using the S/PDIF port
+# for more information).
+#
+# Support for the Turtle Beach MultiSound Hurricane architecture is
+# composed of the following modules (these can also operate compiled
+# into the kernel):
+#
+# snd-msnd-lib - MultiSound base (requires snd)
+#
+# snd-msnd-classic - Base audio/mixer support for Classic, Monetery and
+# Tahiti cards
+#
+# snd-msnd-pinnacle - Base audio/mixer support for Pinnacle and Fiji cards
+#
+#
+# Important Notes - Read Before Using
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# The firmware files are not included (may change in future). You
+# must obtain these images from Turtle Beach (they are included in
+# the MultiSound Development Kits), and place them in /etc/sound for
+# example, and give the full paths in the Linux configuration. If
+# you are compiling in support for the MultiSound driver rather than
+# using it as a module, these firmware files must be accessible
+# during kernel compilation.
+#
+# Please note these files must be binary files, not assembler. See
+# the section later in this document for instructions to obtain these
+# files.
+#
+#
+# Configuring Card Resources
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# ** This section is very important, as your card may not work at all
+# or your machine may crash if you do not do this correctly. **
+#
+# * Classic/Monterey/Tahiti
+#
+# These cards are configured through the driver snd-msnd-classic. You must
+# know the io port, then the driver will select the irq and memory resources
+# on the card. It is up to you to know if these are free locations or now,
+# a conflict can lock the machine up.
+#
+# * Pinnacle/Fiji
+#
+# The Pinnacle and Fiji cards have an extra config port, either
+# 0x250, 0x260 or 0x270. This port can be disabled to have the card
+# configured strictly through PnP, however you lose the ability to
+# access the IDE controller and joystick devices on this card when
+# using PnP. The included pinnaclecfg program in this shell archive
+# can be used to configure the card in non-PnP mode, and in PnP mode
+# you can use isapnptools. These are described briefly here.
+#
+# pinnaclecfg is not required; you can use the snd-msnd-pinnacle module
+# to fully configure the card as well. However, pinnaclecfg can be
+# used to change the resource values of a particular device after the
+# snd-msnd-pinnacle module has been loaded. If you are compiling the
+# driver into the kernel, you must set these values during compile
+# time, however other peripheral resource values can be changed with
+# the pinnaclecfg program after the kernel is loaded.
+#
+#
+# *** PnP mode
+#
+# Use pnpdump to obtain a sample configuration if you can; I was able
+# to obtain one with the command `pnpdump 1 0x203' -- this may vary
+# for you (running pnpdump by itself did not work for me). Then,
+# edit this file and use isapnp to uncomment and set the card values.
+# Use these values when inserting the snd-msnd-pinnacle module. Using
+# this method, you can set the resources for the DSP and the Kurzweil
+# synth (Pinnacle). Since Linux does not directly support PnP
+# devices, you may have difficulty when using the card in PnP mode
+# when it the driver is compiled into the kernel. Using non-PnP mode
+# is preferable in this case.
+#
+# Here is an example mypinnacle.conf for isapnp that sets the card to
+# io base 0x210, irq 5 and mem 0xd8000, and also sets the Kurzweil
+# synth to 0x330 and irq 9 (may need editing for your system):
+#
+# (READPORT 0x0203)
+# (CSN 2)
+# (IDENTIFY *)
+#
+# # DSP
+# (CONFIGURE BVJ0440/-1 (LD 0
+# (INT 0 (IRQ 5 (MODE +E))) (IO 0 (BASE 0x0210)) (MEM 0 (BASE 0x0d8000))
+# (ACT Y)))
+#
+# # Kurzweil Synth (Pinnacle Only)
+# (CONFIGURE BVJ0440/-1 (LD 1
+# (IO 0 (BASE 0x0330)) (INT 0 (IRQ 9 (MODE +E)))
+# (ACT Y)))
+#
+# (WAITFORKEY)
+#
+#
+# *** Non-PnP mode
+#
+# The second way is by running the card in non-PnP mode. This
+# actually has some advantages in that you can access some other
+# devices on the card, such as the joystick and IDE controller. To
+# configure the card, unpack this shell archive and build the
+# pinnaclecfg program. Using this program, you can assign the
+# resource values to the card's devices, or disable the devices. As
+# an alternative to using pinnaclecfg, you can specify many of the
+# configuration values when loading the snd-msnd-pinnacle module (or
+# during kernel configuration when compiling the driver into the
+# kernel).
+#
+# If you specify cfg=0x250 for the snd-msnd-pinnacle module, it
+# automatically configure the card to the given io, irq and memory
+# values using that config port (the config port is jumper selectable
+# on the card to 0x250, 0x260 or 0x270).
+#
+# See the `snd-msnd-pinnacle Additional Options' section below for more
+# information on these parameters (also, if you compile the driver
+# directly into the kernel, these extra parameters can be useful
+# here).
+#
+#
+# ** It is very easy to cause problems in your machine if you choose a
+# resource value which is incorrect. **
+#
+#
+# Examples
+# ~~~~~~~~
+#
+# * MultiSound Classic/Monterey/Tahiti:
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# insmod snd-msnd-classic io=0x290 irq=7 mem=0xd0000
+#
+# * MultiSound Pinnacle in PnP mode:
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# isapnp mypinnacle.conf
+# insmod snd-msnd-pinnacle io=0x210 irq=5 mem=0xd8000 <-- match mypinnacle.conf values
+#
+# * MultiSound Pinnacle in non-PnP mode (replace 0x250 with your configuration port,
+# one of 0x250, 0x260 or 0x270):
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# insmod snd-msnd-pinnacle cfg=0x250 io=0x290 irq=5 mem=0xd0000
+#
+# * To use the MPU-compatible Kurzweil synth on the Pinnacle in PnP
+# mode, add the following (assumes you did `isapnp mypinnacle.conf'):
+#
+# insmod snd
+# insmod mpu401 io=0x330 irq=9 <-- match mypinnacle.conf values
+#
+# * To use the MPU-compatible Kurzweil synth on the Pinnacle in non-PnP
+# mode, add the following. Note how we first configure the peripheral's
+# resources, _then_ install a Linux driver for it:
+#
+# insmod snd
+# pinnaclecfg 0x250 mpu 0x330 9
+# insmod mpu401 io=0x330 irq=9
+#
+# -- OR you can use the following sequence without pinnaclecfg in non-PnP mode:
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# insmod snd-msnd-pinnacle cfg=0x250 io=0x290 irq=5 mem=0xd0000 mpu_io=0x330 mpu_irq=9
+# insmod snd
+# insmod mpu401 io=0x330 irq=9
+#
+# * To setup the joystick port on the Pinnacle in non-PnP mode (though
+# you have to find the actual Linux joystick driver elsewhere), you
+# can use pinnaclecfg:
+#
+# pinnaclecfg 0x250 joystick 0x200
+#
+# -- OR you can configure this using snd-msnd-pinnacle with the following:
+#
+# modprobe snd
+# insmod snd-msnd-lib
+# insmod snd-msnd-pinnacle cfg=0x250 io=0x290 irq=5 mem=0xd0000 joystick_io=0x200
+#
+#
+# snd-msnd-classic, snd-msnd-pinnacle Required Options
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# If the following options are not given, the module will not load.
+# Examine the kernel message log for informative error messages.
+# WARNING--probing isn't supported so try to make sure you have the
+# correct shared memory area, otherwise you may experience problems.
+#
+# io I/O base of DSP, e.g. io=0x210
+# irq IRQ number, e.g. irq=5
+# mem Shared memory area, e.g. mem=0xd8000
+#
+#
+# snd-msnd-classic, snd-msnd-pinnacle Additional Options
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# fifosize The digital audio FIFOs, in kilobytes. If not
+# specified, the default will be used. Increasing
+# this value will reduce the chance of a FIFO
+# underflow at the expense of increasing overall
+# latency. For example, fifosize=512 will
+# allocate 512kB read and write FIFOs (1MB total).
+# While this may reduce dropouts, a heavy machine
+# load will undoubtedly starve the FIFO of data
+# and you will eventually get dropouts. One
+# option is to alter the scheduling priority of
+# the playback process, using `nice' or some form
+# of POSIX soft real-time scheduling.
+#
+# calibrate_signal Setting this to one calibrates the ADCs to the
+# signal, zero calibrates to the card (defaults
+# to zero).
+#
+#
+# snd-msnd-pinnacle Additional Options
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# digital Specify digital=1 to enable the S/PDIF input
+# if you have the digital daughterboard
+# adapter. This will enable access to the
+# DIGITAL1 input for the soundcard in the mixer.
+# Some mixer programs might have trouble setting
+# the DIGITAL1 source as an input. If you have
+# trouble, you can try the setdigital.c program
+# at the bottom of this document.
+#
+# cfg Non-PnP configuration port for the Pinnacle
+# and Fiji (typically 0x250, 0x260 or 0x270,
+# depending on the jumper configuration). If
+# this option is omitted, then it is assumed
+# that the card is in PnP mode, and that the
+# specified DSP resource values are already
+# configured with PnP (i.e. it won't attempt to
+# do any sort of configuration).
+#
+# When the Pinnacle is in non-PnP mode, you can use the following
+# options to configure particular devices. If a full specification
+# for a device is not given, then the device is not configured. Note
+# that you still must use a Linux driver for any of these devices
+# once their resources are setup (such as the Linux joystick driver,
+# or the MPU401 driver from OSS for the Kurzweil synth).
+#
+# mpu_io I/O port of MPU (on-board Kurzweil synth)
+# mpu_irq IRQ of MPU (on-board Kurzweil synth)
+# ide_io0 First I/O port of IDE controller
+# ide_io1 Second I/O port of IDE controller
+# ide_irq IRQ IDE controller
+# joystick_io I/O port of joystick
+#
+#
+# Obtaining and Creating Firmware Files
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# For the Classic/Tahiti/Monterey
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# Download to /tmp and unzip the following file from Turtle Beach:
+#
+# ftp://ftp.voyetra.com/pub/tbs/msndcl/msndvkit.zip
+#
+# When unzipped, unzip the file named MsndFiles.zip. Then copy the
+# following firmware files to /etc/sound (note the file renaming):
+#
+# cp DSPCODE/MSNDINIT.BIN /etc/sound/msndinit.bin
+# cp DSPCODE/MSNDPERM.REB /etc/sound/msndperm.bin
+#
+# When configuring the Linux kernel, specify /etc/sound/msndinit.bin and
+# /etc/sound/msndperm.bin for the two firmware files (Linux kernel
+# versions older than 2.2 do not ask for firmware paths, and are
+# hardcoded to /etc/sound).
+#
+# If you are compiling the driver into the kernel, these files must
+# be accessible during compilation, but will not be needed later.
+# The files must remain, however, if the driver is used as a module.
+#
+#
+# For the Pinnacle/Fiji
+# ~~~~~~~~~~~~~~~~~~~~~
+#
+# Download to /tmp and unzip the following file from Turtle Beach (be
+# sure to use the entire URL; some have had trouble navigating to the
+# URL):
+#
+# ftp://ftp.voyetra.com/pub/tbs/pinn/pnddk100.zip
+#
+# Unpack this shell archive, and run make in the created directory
+# (you need a C compiler and flex to build the utilities). This
+# should give you the executables conv, pinnaclecfg and setdigital.
+# conv is only used temporarily here to create the firmware files,
+# while pinnaclecfg is used to configure the Pinnacle or Fiji card in
+# non-PnP mode, and setdigital can be used to set the S/PDIF input on
+# the mixer (pinnaclecfg and setdigital should be copied to a
+# convenient place, possibly run during system initialization).
+#
+# To generating the firmware files with the `conv' program, we create
+# the binary firmware files by doing the following conversion
+# (assuming the archive unpacked into a directory named PINNDDK):
+#
+# ./conv < PINNDDK/dspcode/pndspini.asm > /etc/sound/pndspini.bin
+# ./conv < PINNDDK/dspcode/pndsperm.asm > /etc/sound/pndsperm.bin
+#
+# The conv (and conv.l) program is not needed after conversion and can
+# be safely deleted. Then, when configuring the Linux kernel, specify
+# /etc/sound/pndspini.bin and /etc/sound/pndsperm.bin for the two
+# firmware files (Linux kernel versions older than 2.2 do not ask for
+# firmware paths, and are hardcoded to /etc/sound).
+#
+# If you are compiling the driver into the kernel, these files must
+# be accessible during compilation, but will not be needed later.
+# The files must remain, however, if the driver is used as a module.
+#
+#
+# Using Digital I/O with the S/PDIF Port
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# If you have a Pinnacle or Fiji with the digital daughterboard and
+# want to set it as the input source, you can use this program if you
+# have trouble trying to do it with a mixer program (be sure to
+# insert the module with the digital=1 option, or say Y to the option
+# during compiled-in kernel operation). Upon selection of the S/PDIF
+# port, you should be able monitor and record from it.
+#
+# There is something to note about using the S/PDIF port. Digital
+# timing is taken from the digital signal, so if a signal is not
+# connected to the port and it is selected as recording input, you
+# will find PCM playback to be distorted in playback rate. Also,
+# attempting to record at a sampling rate other than the DAT rate may
+# be problematic (i.e. trying to record at 8000Hz when the DAT signal
+# is 44100Hz). If you have a problem with this, set the recording
+# input to analog if you need to record at a rate other than that of
+# the DAT rate.
+#
+#
+# -- Shell archive attached below, just run `sh MultiSound' to extract.
+# Contains Pinnacle/Fiji utilities to convert firmware, configure
+# in non-PnP mode, and select the DIGITAL1 input for the mixer.
+#
+#
+#!/bin/sh
+# This is a shell archive (produced by GNU sharutils 4.2).
+# To extract the files from this archive, save it to some FILE, remove
+# everything before the `!/bin/sh' line above, then type `sh FILE'.
+#
+# Made on 1998-12-04 10:07 EST by <andrewtv@ztransform.velsoft.com>.
+# Source directory was `/home/andrewtv/programming/pinnacle/pinnacle'.
+#
+# Existing files will *not* be overwritten unless `-c' is specified.
+#
+# This shar contains:
+# length mode name
+# ------ ---------- ------------------------------------------
+# 2064 -rw-rw-r-- MultiSound.d/setdigital.c
+# 10224 -rw-rw-r-- MultiSound.d/pinnaclecfg.c
+# 106 -rw-rw-r-- MultiSound.d/Makefile
+# 146 -rw-rw-r-- MultiSound.d/conv.l
+# 1491 -rw-rw-r-- MultiSound.d/msndreset.c
+#
+save_IFS="${IFS}"
+IFS="${IFS}:"
+gettext_dir=FAILED
+locale_dir=FAILED
+first_param="$1"
+for dir in $PATH
+do
+ if test "$gettext_dir" = FAILED && test -f $dir/gettext \
+ && ($dir/gettext --version >/dev/null 2>&1)
+ then
+ set `$dir/gettext --version 2>&1`
+ if test "$3" = GNU
+ then
+ gettext_dir=$dir
+ fi
+ fi
+ if test "$locale_dir" = FAILED && test -f $dir/shar \
+ && ($dir/shar --print-text-domain-dir >/dev/null 2>&1)
+ then
+ locale_dir=`$dir/shar --print-text-domain-dir`
+ fi
+done
+IFS="$save_IFS"
+if test "$locale_dir" = FAILED || test "$gettext_dir" = FAILED
+then
+ echo=echo
+else
+ TEXTDOMAINDIR=$locale_dir
+ export TEXTDOMAINDIR
+ TEXTDOMAIN=sharutils
+ export TEXTDOMAIN
+ echo="$gettext_dir/gettext -s"
+fi
+touch -am 1231235999 $$.touch >/dev/null 2>&1
+if test ! -f 1231235999 && test -f $$.touch; then
+ shar_touch=touch
+else
+ shar_touch=:
+ echo
+ $echo 'WARNING: not restoring timestamps. Consider getting and'
+ $echo "installing GNU \`touch', distributed in GNU File Utilities..."
+ echo
+fi
+rm -f 1231235999 $$.touch
+#
+if mkdir _sh01426; then
+ $echo 'x -' 'creating lock directory'
+else
+ $echo 'failed to create lock directory'
+ exit 1
+fi
+# ============= MultiSound.d/setdigital.c ==============
+if test ! -d 'MultiSound.d'; then
+ $echo 'x -' 'creating directory' 'MultiSound.d'
+ mkdir 'MultiSound.d'
+fi
+if test -f 'MultiSound.d/setdigital.c' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/setdigital.c' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/setdigital.c' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/setdigital.c' &&
+/*********************************************************************
+X *
+X * setdigital.c - sets the DIGITAL1 input for a mixer
+X *
+X * Copyright (C) 1998 Andrew Veliath
+X *
+X * This program is free software; you can redistribute it and/or modify
+X * it under the terms of the GNU General Public License as published by
+X * the Free Software Foundation; either version 2 of the License, or
+X * (at your option) any later version.
+X *
+X * This program is distributed in the hope that it will be useful,
+X * but WITHOUT ANY WARRANTY; without even the implied warranty of
+X * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+X * GNU General Public License for more details.
+X *
+X * You should have received a copy of the GNU General Public License
+X * along with this program; if not, write to the Free Software
+X * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+X *
+X ********************************************************************/
+X
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/soundcard.h>
+X
+int main(int argc, char *argv[])
+{
+X int fd;
+X unsigned long recmask, recsrc;
+X
+X if (argc != 2) {
+X fprintf(stderr, "usage: setdigital <mixer device>\n");
+X exit(1);
+X }
+X
+X if ((fd = open(argv[1], O_RDWR)) < 0) {
+X perror(argv[1]);
+X exit(1);
+X }
+X
+X if (ioctl(fd, SOUND_MIXER_READ_RECMASK, &recmask) < 0) {
+X fprintf(stderr, "error: ioctl read recording mask failed\n");
+X perror("ioctl");
+X close(fd);
+X exit(1);
+X }
+X
+X if (!(recmask & SOUND_MASK_DIGITAL1)) {
+X fprintf(stderr, "error: cannot find DIGITAL1 device in mixer\n");
+X close(fd);
+X exit(1);
+X }
+X
+X if (ioctl(fd, SOUND_MIXER_READ_RECSRC, &recsrc) < 0) {
+X fprintf(stderr, "error: ioctl read recording source failed\n");
+X perror("ioctl");
+X close(fd);
+X exit(1);
+X }
+X
+X recsrc |= SOUND_MASK_DIGITAL1;
+X
+X if (ioctl(fd, SOUND_MIXER_WRITE_RECSRC, &recsrc) < 0) {
+X fprintf(stderr, "error: ioctl write recording source failed\n");
+X perror("ioctl");
+X close(fd);
+X exit(1);
+X }
+X
+X close(fd);
+X
+X return 0;
+}
+SHAR_EOF
+ $shar_touch -am 1204092598 'MultiSound.d/setdigital.c' &&
+ chmod 0664 'MultiSound.d/setdigital.c' ||
+ $echo 'restore of' 'MultiSound.d/setdigital.c' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/setdigital.c:' 'MD5 check failed'
+e87217fc3e71288102ba41fd81f71ec4 MultiSound.d/setdigital.c
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/setdigital.c'`"
+ test 2064 -eq "$shar_count" ||
+ $echo 'MultiSound.d/setdigital.c:' 'original size' '2064,' 'current size' "$shar_count!"
+ fi
+fi
+# ============= MultiSound.d/pinnaclecfg.c ==============
+if test -f 'MultiSound.d/pinnaclecfg.c' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/pinnaclecfg.c' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/pinnaclecfg.c' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/pinnaclecfg.c' &&
+/*********************************************************************
+X *
+X * pinnaclecfg.c - Pinnacle/Fiji Device Configuration Program
+X *
+X * This is for NON-PnP mode only. For PnP mode, use isapnptools.
+X *
+X * This is Linux-specific, and must be run with root permissions.
+X *
+X * Part of the Turtle Beach MultiSound Sound Card Driver for Linux
+X *
+X * Copyright (C) 1998 Andrew Veliath
+X *
+X * This program is free software; you can redistribute it and/or modify
+X * it under the terms of the GNU General Public License as published by
+X * the Free Software Foundation; either version 2 of the License, or
+X * (at your option) any later version.
+X *
+X * This program is distributed in the hope that it will be useful,
+X * but WITHOUT ANY WARRANTY; without even the implied warranty of
+X * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+X * GNU General Public License for more details.
+X *
+X * You should have received a copy of the GNU General Public License
+X * along with this program; if not, write to the Free Software
+X * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+X *
+X ********************************************************************/
+X
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <asm/types.h>
+#include <sys/io.h>
+X
+#define IREG_LOGDEVICE 0x07
+#define IREG_ACTIVATE 0x30
+#define LD_ACTIVATE 0x01
+#define LD_DISACTIVATE 0x00
+#define IREG_EECONTROL 0x3F
+#define IREG_MEMBASEHI 0x40
+#define IREG_MEMBASELO 0x41
+#define IREG_MEMCONTROL 0x42
+#define IREG_MEMRANGEHI 0x43
+#define IREG_MEMRANGELO 0x44
+#define MEMTYPE_8BIT 0x00
+#define MEMTYPE_16BIT 0x02
+#define MEMTYPE_RANGE 0x00
+#define MEMTYPE_HIADDR 0x01
+#define IREG_IO0_BASEHI 0x60
+#define IREG_IO0_BASELO 0x61
+#define IREG_IO1_BASEHI 0x62
+#define IREG_IO1_BASELO 0x63
+#define IREG_IRQ_NUMBER 0x70
+#define IREG_IRQ_TYPE 0x71
+#define IRQTYPE_HIGH 0x02
+#define IRQTYPE_LOW 0x00
+#define IRQTYPE_LEVEL 0x01
+#define IRQTYPE_EDGE 0x00
+X
+#define HIBYTE(w) ((BYTE)(((WORD)(w) >> 8) & 0xFF))
+#define LOBYTE(w) ((BYTE)(w))
+#define MAKEWORD(low,hi) ((WORD)(((BYTE)(low))|(((WORD)((BYTE)(hi)))<<8)))
+X
+typedef __u8 BYTE;
+typedef __u16 USHORT;
+typedef __u16 WORD;
+X
+static int config_port = -1;
+X
+static int msnd_write_cfg(int cfg, int reg, int value)
+{
+X outb(reg, cfg);
+X outb(value, cfg + 1);
+X if (value != inb(cfg + 1)) {
+X fprintf(stderr, "error: msnd_write_cfg: I/O error\n");
+X return -EIO;
+X }
+X return 0;
+}
+X
+static int msnd_read_cfg(int cfg, int reg)
+{
+X outb(reg, cfg);
+X return inb(cfg + 1);
+}
+X
+static int msnd_write_cfg_io0(int cfg, int num, WORD io)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IO0_BASEHI, HIBYTE(io)))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IO0_BASELO, LOBYTE(io)))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_io0(int cfg, int num, WORD *io)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X
+X *io = MAKEWORD(msnd_read_cfg(cfg, IREG_IO0_BASELO),
+X msnd_read_cfg(cfg, IREG_IO0_BASEHI));
+X
+X return 0;
+}
+X
+static int msnd_write_cfg_io1(int cfg, int num, WORD io)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IO1_BASEHI, HIBYTE(io)))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IO1_BASELO, LOBYTE(io)))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_io1(int cfg, int num, WORD *io)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X
+X *io = MAKEWORD(msnd_read_cfg(cfg, IREG_IO1_BASELO),
+X msnd_read_cfg(cfg, IREG_IO1_BASEHI));
+X
+X return 0;
+}
+X
+static int msnd_write_cfg_irq(int cfg, int num, WORD irq)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IRQ_NUMBER, LOBYTE(irq)))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_IRQ_TYPE, IRQTYPE_EDGE))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_irq(int cfg, int num, WORD *irq)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X
+X *irq = msnd_read_cfg(cfg, IREG_IRQ_NUMBER);
+X
+X return 0;
+}
+X
+static int msnd_write_cfg_mem(int cfg, int num, int mem)
+{
+X WORD wmem;
+X
+X mem >>= 8;
+X mem &= 0xfff;
+X wmem = (WORD)mem;
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_MEMBASEHI, HIBYTE(wmem)))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_MEMBASELO, LOBYTE(wmem)))
+X return -EIO;
+X if (wmem && msnd_write_cfg(cfg, IREG_MEMCONTROL, (MEMTYPE_HIADDR | MEMTYPE_16BIT)))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_mem(int cfg, int num, int *mem)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X
+X *mem = MAKEWORD(msnd_read_cfg(cfg, IREG_MEMBASELO),
+X msnd_read_cfg(cfg, IREG_MEMBASEHI));
+X *mem <<= 8;
+X
+X return 0;
+}
+X
+static int msnd_activate_logical(int cfg, int num)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg(cfg, IREG_ACTIVATE, LD_ACTIVATE))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_write_cfg_logical(int cfg, int num, WORD io0, WORD io1, WORD irq, int mem)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_write_cfg_io0(cfg, num, io0))
+X return -EIO;
+X if (msnd_write_cfg_io1(cfg, num, io1))
+X return -EIO;
+X if (msnd_write_cfg_irq(cfg, num, irq))
+X return -EIO;
+X if (msnd_write_cfg_mem(cfg, num, mem))
+X return -EIO;
+X if (msnd_activate_logical(cfg, num))
+X return -EIO;
+X return 0;
+}
+X
+static int msnd_read_cfg_logical(int cfg, int num, WORD *io0, WORD *io1, WORD *irq, int *mem)
+{
+X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
+X return -EIO;
+X if (msnd_read_cfg_io0(cfg, num, io0))
+X return -EIO;
+X if (msnd_read_cfg_io1(cfg, num, io1))
+X return -EIO;
+X if (msnd_read_cfg_irq(cfg, num, irq))
+X return -EIO;
+X if (msnd_read_cfg_mem(cfg, num, mem))
+X return -EIO;
+X return 0;
+}
+X
+static void usage(void)
+{
+X fprintf(stderr,
+X "\n"
+X "pinnaclecfg 1.0\n"
+X "\n"
+X "usage: pinnaclecfg <config port> [device config]\n"
+X "\n"
+X "This is for use with the card in NON-PnP mode only.\n"
+X "\n"
+X "Available devices (not all available for Fiji):\n"
+X "\n"
+X " Device Description\n"
+X " -------------------------------------------------------------------\n"
+X " reset Reset all devices (i.e. disable)\n"
+X " show Display current device configurations\n"
+X "\n"
+X " dsp <io> <irq> <mem> Audio device\n"
+X " mpu <io> <irq> Internal Kurzweil synth\n"
+X " ide <io0> <io1> <irq> On-board IDE controller\n"
+X " joystick <io> Joystick port\n"
+X "\n");
+X exit(1);
+}
+X
+static int cfg_reset(void)
+{
+X int i;
+X
+X for (i = 0; i < 4; ++i)
+X msnd_write_cfg_logical(config_port, i, 0, 0, 0, 0);
+X
+X return 0;
+}
+X
+static int cfg_show(void)
+{
+X int i;
+X int count = 0;
+X
+X for (i = 0; i < 4; ++i) {
+X WORD io0, io1, irq;
+X int mem;
+X msnd_read_cfg_logical(config_port, i, &io0, &io1, &irq, &mem);
+X switch (i) {
+X case 0:
+X if (io0 || irq || mem) {
+X printf("dsp 0x%x %d 0x%x\n", io0, irq, mem);
+X ++count;
+X }
+X break;
+X case 1:
+X if (io0 || irq) {
+X printf("mpu 0x%x %d\n", io0, irq);
+X ++count;
+X }
+X break;
+X case 2:
+X if (io0 || io1 || irq) {
+X printf("ide 0x%x 0x%x %d\n", io0, io1, irq);
+X ++count;
+X }
+X break;
+X case 3:
+X if (io0) {
+X printf("joystick 0x%x\n", io0);
+X ++count;
+X }
+X break;
+X }
+X }
+X
+X if (count == 0)
+X fprintf(stderr, "no devices configured\n");
+X
+X return 0;
+}
+X
+static int cfg_dsp(int argc, char *argv[])
+{
+X int io, irq, mem;
+X
+X if (argc < 3 ||
+X sscanf(argv[0], "0x%x", &io) != 1 ||
+X sscanf(argv[1], "%d", &irq) != 1 ||
+X sscanf(argv[2], "0x%x", &mem) != 1)
+X usage();
+X
+X if (!(io == 0x290 ||
+X io == 0x260 ||
+X io == 0x250 ||
+X io == 0x240 ||
+X io == 0x230 ||
+X io == 0x220 ||
+X io == 0x210 ||
+X io == 0x3e0)) {
+X fprintf(stderr, "error: io must be one of "
+X "210, 220, 230, 240, 250, 260, 290, or 3E0\n");
+X usage();
+X }
+X
+X if (!(irq == 5 ||
+X irq == 7 ||
+X irq == 9 ||
+X irq == 10 ||
+X irq == 11 ||
+X irq == 12)) {
+X fprintf(stderr, "error: irq must be one of "
+X "5, 7, 9, 10, 11 or 12\n");
+X usage();
+X }
+X
+X if (!(mem == 0xb0000 ||
+X mem == 0xc8000 ||
+X mem == 0xd0000 ||
+X mem == 0xd8000 ||
+X mem == 0xe0000 ||
+X mem == 0xe8000)) {
+X fprintf(stderr, "error: mem must be one of "
+X "0xb0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000 or 0xe8000\n");
+X usage();
+X }
+X
+X return msnd_write_cfg_logical(config_port, 0, io, 0, irq, mem);
+}
+X
+static int cfg_mpu(int argc, char *argv[])
+{
+X int io, irq;
+X
+X if (argc < 2 ||
+X sscanf(argv[0], "0x%x", &io) != 1 ||
+X sscanf(argv[1], "%d", &irq) != 1)
+X usage();
+X
+X return msnd_write_cfg_logical(config_port, 1, io, 0, irq, 0);
+}
+X
+static int cfg_ide(int argc, char *argv[])
+{
+X int io0, io1, irq;
+X
+X if (argc < 3 ||
+X sscanf(argv[0], "0x%x", &io0) != 1 ||
+X sscanf(argv[0], "0x%x", &io1) != 1 ||
+X sscanf(argv[1], "%d", &irq) != 1)
+X usage();
+X
+X return msnd_write_cfg_logical(config_port, 2, io0, io1, irq, 0);
+}
+X
+static int cfg_joystick(int argc, char *argv[])
+{
+X int io;
+X
+X if (argc < 1 ||
+X sscanf(argv[0], "0x%x", &io) != 1)
+X usage();
+X
+X return msnd_write_cfg_logical(config_port, 3, io, 0, 0, 0);
+}
+X
+int main(int argc, char *argv[])
+{
+X char *device;
+X int rv = 0;
+X
+X --argc; ++argv;
+X
+X if (argc < 2)
+X usage();
+X
+X sscanf(argv[0], "0x%x", &config_port);
+X if (config_port != 0x250 && config_port != 0x260 && config_port != 0x270) {
+X fprintf(stderr, "error: <config port> must be 0x250, 0x260 or 0x270\n");
+X exit(1);
+X }
+X if (ioperm(config_port, 2, 1)) {
+X perror("ioperm");
+X fprintf(stderr, "note: pinnaclecfg must be run as root\n");
+X exit(1);
+X }
+X device = argv[1];
+X
+X argc -= 2; argv += 2;
+X
+X if (strcmp(device, "reset") == 0)
+X rv = cfg_reset();
+X else if (strcmp(device, "show") == 0)
+X rv = cfg_show();
+X else if (strcmp(device, "dsp") == 0)
+X rv = cfg_dsp(argc, argv);
+X else if (strcmp(device, "mpu") == 0)
+X rv = cfg_mpu(argc, argv);
+X else if (strcmp(device, "ide") == 0)
+X rv = cfg_ide(argc, argv);
+X else if (strcmp(device, "joystick") == 0)
+X rv = cfg_joystick(argc, argv);
+X else {
+X fprintf(stderr, "error: unknown device %s\n", device);
+X usage();
+X }
+X
+X if (rv)
+X fprintf(stderr, "error: device configuration failed\n");
+X
+X return 0;
+}
+SHAR_EOF
+ $shar_touch -am 1204092598 'MultiSound.d/pinnaclecfg.c' &&
+ chmod 0664 'MultiSound.d/pinnaclecfg.c' ||
+ $echo 'restore of' 'MultiSound.d/pinnaclecfg.c' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/pinnaclecfg.c:' 'MD5 check failed'
+366bdf27f0db767a3c7921d0a6db20fe MultiSound.d/pinnaclecfg.c
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/pinnaclecfg.c'`"
+ test 10224 -eq "$shar_count" ||
+ $echo 'MultiSound.d/pinnaclecfg.c:' 'original size' '10224,' 'current size' "$shar_count!"
+ fi
+fi
+# ============= MultiSound.d/Makefile ==============
+if test -f 'MultiSound.d/Makefile' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/Makefile' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/Makefile' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/Makefile' &&
+CC = gcc
+CFLAGS = -O
+PROGS = setdigital msndreset pinnaclecfg conv
+X
+all: $(PROGS)
+X
+clean:
+X rm -f $(PROGS)
+SHAR_EOF
+ $shar_touch -am 1204092398 'MultiSound.d/Makefile' &&
+ chmod 0664 'MultiSound.d/Makefile' ||
+ $echo 'restore of' 'MultiSound.d/Makefile' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/Makefile:' 'MD5 check failed'
+76ca8bb44e3882edcf79c97df6c81845 MultiSound.d/Makefile
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/Makefile'`"
+ test 106 -eq "$shar_count" ||
+ $echo 'MultiSound.d/Makefile:' 'original size' '106,' 'current size' "$shar_count!"
+ fi
+fi
+# ============= MultiSound.d/conv.l ==============
+if test -f 'MultiSound.d/conv.l' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/conv.l' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/conv.l' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/conv.l' &&
+%%
+[ \n\t,\r]
+\;.*
+DB
+[0-9A-Fa-f]+H { int n; sscanf(yytext, "%xH", &n); printf("%c", n); }
+%%
+int yywrap() { return 1; }
+void main() { yylex(); }
+SHAR_EOF
+ $shar_touch -am 0828231798 'MultiSound.d/conv.l' &&
+ chmod 0664 'MultiSound.d/conv.l' ||
+ $echo 'restore of' 'MultiSound.d/conv.l' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/conv.l:' 'MD5 check failed'
+d2411fc32cd71a00dcdc1f009e858dd2 MultiSound.d/conv.l
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/conv.l'`"
+ test 146 -eq "$shar_count" ||
+ $echo 'MultiSound.d/conv.l:' 'original size' '146,' 'current size' "$shar_count!"
+ fi
+fi
+# ============= MultiSound.d/msndreset.c ==============
+if test -f 'MultiSound.d/msndreset.c' && test "$first_param" != -c; then
+ $echo 'x -' SKIPPING 'MultiSound.d/msndreset.c' '(file already exists)'
+else
+ $echo 'x -' extracting 'MultiSound.d/msndreset.c' '(text)'
+ sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/msndreset.c' &&
+/*********************************************************************
+X *
+X * msndreset.c - resets the MultiSound card
+X *
+X * Copyright (C) 1998 Andrew Veliath
+X *
+X * This program is free software; you can redistribute it and/or modify
+X * it under the terms of the GNU General Public License as published by
+X * the Free Software Foundation; either version 2 of the License, or
+X * (at your option) any later version.
+X *
+X * This program is distributed in the hope that it will be useful,
+X * but WITHOUT ANY WARRANTY; without even the implied warranty of
+X * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+X * GNU General Public License for more details.
+X *
+X * You should have received a copy of the GNU General Public License
+X * along with this program; if not, write to the Free Software
+X * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+X *
+X ********************************************************************/
+X
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/soundcard.h>
+X
+int main(int argc, char *argv[])
+{
+X int fd;
+X
+X if (argc != 2) {
+X fprintf(stderr, "usage: msndreset <mixer device>\n");
+X exit(1);
+X }
+X
+X if ((fd = open(argv[1], O_RDWR)) < 0) {
+X perror(argv[1]);
+X exit(1);
+X }
+X
+X if (ioctl(fd, SOUND_MIXER_PRIVATE1, 0) < 0) {
+X fprintf(stderr, "error: msnd ioctl reset failed\n");
+X perror("ioctl");
+X close(fd);
+X exit(1);
+X }
+X
+X close(fd);
+X
+X return 0;
+}
+SHAR_EOF
+ $shar_touch -am 1204100698 'MultiSound.d/msndreset.c' &&
+ chmod 0664 'MultiSound.d/msndreset.c' ||
+ $echo 'restore of' 'MultiSound.d/msndreset.c' 'failed'
+ if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
+ && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
+ md5sum -c << SHAR_EOF >/dev/null 2>&1 \
+ || $echo 'MultiSound.d/msndreset.c:' 'MD5 check failed'
+c52f876521084e8eb25e12e01dcccb8a MultiSound.d/msndreset.c
+SHAR_EOF
+ else
+ shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/msndreset.c'`"
+ test 1491 -eq "$shar_count" ||
+ $echo 'MultiSound.d/msndreset.c:' 'original size' '1491,' 'current size' "$shar_count!"
+ fi
+fi
+rm -fr _sh01426
+exit 0
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index 7c2d37571af0..e06238131f77 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -34,6 +34,22 @@ ALC262
======
inv-dmic
Inverted internal mic workaround
+fsc-h270
+ Fixups for Fujitsu-Siemens Celsius H270
+fsc-s7110
+ Fixups for Fujitsu-Siemens Lifebook S7110
+hp-z200
+ Fixups for HP Z200
+tyan
+ Fixups for Tyan Thunder n6650W
+lenovo-3000
+ Fixups for Lenovo 3000
+benq
+ Fixups for Benq ED8
+benq-t31
+ Fixups for Benq T31
+bayleybay
+ Fixups for Intel BayleyBay
ALC267/268
==========
@@ -41,6 +57,8 @@ inv-dmic
Inverted internal mic workaround
hp-eapd
Disable HP EAPD on NID 0x15
+spdif
+ Enable SPDIF output on NID 0x1e
ALC22x/23x/25x/269/27x/28x/29x (and vendor-specific ALC3xxx models)
===================================================================
@@ -70,6 +88,10 @@ dell-headset-multi
Headset jack, which can also be used as mic-in
dell-headset-dock
Headset jack (without mic-in), and also dock I/O
+dell-headset3
+ Headset jack (without mic-in), and also dock I/O, variant 3
+dell-headset4
+ Headset jack (without mic-in), and also dock I/O, variant 4
alc283-dac-wcaps
Fixups for Chromebook with ALC283
alc283-sense-combo
@@ -80,15 +102,173 @@ tpt440
Lenovo Thinkpad T440s setup
tpt460
Lenovo Thinkpad T460/560 setup
+tpt470-dock
+ Lenovo Thinkpad T470 dock setup
dual-codecs
Lenovo laptops with dual codecs
alc700-ref
Intel reference board with ALC700 codec
+vaio
+ Pin fixups for Sony VAIO laptops
+dell-m101z
+ COEF setup for Dell M101z
+asus-g73jw
+ Subwoofer pin fixup for ASUS G73JW
+lenovo-eapd
+ Inversed EAPD setup for Lenovo laptops
+sony-hweq
+ H/W EQ COEF setup for Sony laptops
+pcm44k
+ Fixed PCM 44kHz constraints (for buggy devices)
+lifebook
+ Dock pin fixups for Fujitsu Lifebook
+lifebook-extmic
+ Headset mic fixup for Fujitsu Lifebook
+lifebook-hp-pin
+ Headphone pin fixup for Fujitsu Lifebook
+lifebook-u7x7
+ Lifebook U7x7 fixups
+alc269vb-amic
+ ALC269VB analog mic pin fixups
+alc269vb-dmic
+ ALC269VB digital mic pin fixups
+hp-mute-led-mic1
+ Mute LED via Mic1 pin on HP
+hp-mute-led-mic2
+ Mute LED via Mic2 pin on HP
+hp-mute-led-mic3
+ Mute LED via Mic3 pin on HP
+hp-gpio-mic1
+ GPIO + Mic1 pin LED on HP
+hp-line1-mic1
+ Mute LED via Line1 + Mic1 pins on HP
+noshutup
+ Skip shutup callback
+sony-nomic
+ Headset mic fixup for Sony laptops
+aspire-headset-mic
+ Headset pin fixup for Acer Aspire
+asus-x101
+ ASUS X101 fixups
+acer-ao7xx
+ Acer AO7xx fixups
+acer-aspire-e1
+ Acer Aspire E1 fixups
+acer-ac700
+ Acer AC700 fixups
+limit-mic-boost
+ Limit internal mic boost on Lenovo machines
+asus-zenbook
+ ASUS Zenbook fixups
+asus-zenbook-ux31a
+ ASUS Zenbook UX31A fixups
+ordissimo
+ Ordissimo EVE2 (or Malata PC-B1303) fixups
+asus-tx300
+ ASUS TX300 fixups
+alc283-int-mic
+ ALC283 COEF setup for Lenovo machines
+mono-speakers
+ Subwoofer and headset fixupes for Dell Inspiron
+alc290-subwoofer
+ Subwoofer fixups for Dell Vostro
+thinkpad
+ Binding with thinkpad_acpi driver for Lenovo machines
+dmic-thinkpad
+ thinkpad_acpi binding + digital mic support
+alc255-acer
+ ALC255 fixups on Acer machines
+alc255-asus
+ ALC255 fixups on ASUS machines
+alc255-dell1
+ ALC255 fixups on Dell machines
+alc255-dell2
+ ALC255 fixups on Dell machines, variant 2
+alc293-dell1
+ ALC293 fixups on Dell machines
+alc283-headset
+ Headset pin fixups on ALC283
+aspire-v5
+ Acer Aspire V5 fixups
+hp-gpio4
+ GPIO and Mic1 pin mute LED fixups for HP
+hp-gpio-led
+ GPIO mute LEDs on HP
+hp-gpio2-hotkey
+ GPIO mute LED with hot key handling on HP
+hp-dock-pins
+ GPIO mute LEDs and dock support on HP
+hp-dock-gpio-mic
+ GPIO, Mic mute LED and dock support on HP
+hp-9480m
+ HP 9480m fixups
+alc288-dell1
+ ALC288 fixups on Dell machines
+alc288-dell-xps13
+ ALC288 fixups on Dell XPS13
+dell-e7x
+ Dell E7x fixups
+alc293-dell
+ ALC293 fixups on Dell machines
+alc298-dell1
+ ALC298 fixups on Dell machines
+alc298-dell-aio
+ ALC298 fixups on Dell AIO machines
+alc275-dell-xps
+ ALC275 fixups on Dell XPS models
+alc256-dell-xps13
+ ALC256 fixups on Dell XPS13
+lenovo-spk-noise
+ Workaround for speaker noise on Lenovo machines
+lenovo-hotkey
+ Hot-key support via Mic2 pin on Lenovo machines
+dell-spk-noise
+ Workaround for speaker noise on Dell machines
+alc255-dell1
+ ALC255 fixups on Dell machines
+alc295-disable-dac3
+ Disable DAC3 routing on ALC295
+alc280-hp-headset
+ HP Elitebook fixups
+alc221-hp-mic
+ Front mic pin fixup on HP machines
+alc298-spk-volume
+ Speaker pin routing workaround on ALC298
+dell-inspiron-7559
+ Dell Inspiron 7559 fixups
+ativ-book
+ Samsung Ativ book 8 fixups
+alc221-hp-mic
+ ALC221 headset fixups on HP machines
+alc256-asus-mic
+ ALC256 fixups on ASUS machines
+alc256-asus-aio
+ ALC256 fixups on ASUS AIO machines
+alc233-eapd
+ ALC233 fixups on ASUS machines
+alc294-lenovo-mic
+ ALC294 Mic pin fixup for Lenovo AIO machines
+alc225-wyse
+ Dell Wyse fixups
+alc274-dell-aio
+ ALC274 fixups on Dell AIO machines
+alc255-dummy-lineout
+ Dell Precision 3930 fixups
+alc255-dell-headset"},
+ Dell Precision 3630 fixups
+alc295-hp-x360
+ HP Spectre X360 fixups
ALC66x/67x/892
==============
+aspire
+ Subwoofer pin fixup for Aspire laptops
+ideapad
+ Subwoofer pin fixup for Ideapad laptops
mario
Chromebook mario model fixup
+hp-rp5800
+ Headphone pin fixup for HP RP5800
asus-mode1
ASUS
asus-mode2
@@ -105,10 +285,40 @@ asus-mode7
ASUS
asus-mode8
ASUS
+zotac-z68
+ Front HP fixup for Zotac Z68
inv-dmic
Inverted internal mic workaround
+alc662-headset-multi
+ Dell headset jack, which can also be used as mic-in (ALC662)
dell-headset-multi
Headset jack, which can also be used as mic-in
+alc662-headset
+ Headset mode support on ALC662
+alc668-headset
+ Headset mode support on ALC668
+bass16
+ Bass speaker fixup on pin 0x16
+bass1a
+ Bass speaker fixup on pin 0x1a
+automute
+ Auto-mute fixups for ALC668
+dell-xps13
+ Dell XPS13 fixups
+asus-nx50
+ ASUS Nx50 fixups
+asus-nx51
+ ASUS Nx51 fixups
+alc891-headset
+ Headset mode support on ALC891
+alc891-headset-multi
+ Dell headset jack, which can also be used as mic-in (ALC891)
+acer-veriton
+ Acer Veriton speaker pin fixup
+asrock-mobo
+ Fix invalid 0x15 / 0x16 pins
+usi-headset
+ Headset support on USI machines
dual-codecs
Lenovo laptops with dual codecs
@@ -116,20 +326,70 @@ ALC680
======
N/A
-ALC88x/898/1150
-======================
+ALC88x/898/1150/1220
+====================
+abit-aw9d
+ Pin fixups for Abit AW9D-MAX
+lenovo-y530
+ Pin fixups for Lenovo Y530
+acer-aspire-7736
+ Fixup for Acer Aspire 7736
+asus-w90v
+ Pin fixup for ASUS W90V
+cd
+ Enable audio CD pin NID 0x1c
+no-front-hp
+ Disable front HP pin NID 0x1b
+vaio-tt
+ Pin fixup for VAIO TT
+eee1601
+ COEF setups for ASUS Eee 1601
+alc882-eapd
+ Change EAPD COEF mode on ALC882
+alc883-eapd
+ Change EAPD COEF mode on ALC883
+gpio1
+ Enable GPIO1
+gpio2
+ Enable GPIO2
+gpio3
+ Enable GPIO3
+alc889-coef
+ Setup ALC889 COEF
+asus-w2jc
+ Fixups for ASUS W2JC
acer-aspire-4930g
Acer Aspire 4930G/5930G/6530G/6930G/7730G
acer-aspire-8930g
Acer Aspire 8330G/6935G
acer-aspire
Acer Aspire others
+macpro-gpio
+ GPIO setup for Mac Pro
+dac-route
+ Workaround for DAC routing on Acer Aspire
+mbp-vref
+ Vref setup for Macbook Pro
+imac91-vref
+ Vref setup for iMac 9,1
+mba11-vref
+ Vref setup for MacBook Air 1,1
+mba21-vref
+ Vref setup for MacBook Air 2,1
+mp11-vref
+ Vref setup for Mac Pro 1,1
+mp41-vref
+ Vref setup for Mac Pro 4,1
inv-dmic
Inverted internal mic workaround
no-primary-hp
VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC)
+asus-bass
+ Bass speaker setup for ASUS ET2700
dual-codecs
ALC1220 dual codecs for Gaming mobos
+clevo-p950
+ Fixups for Clevo P950
ALC861/660
==========
diff --git a/Documentation/sound/soc/dpcm.rst b/Documentation/sound/soc/dpcm.rst
index 395e5a516282..fe61e02277f8 100644
--- a/Documentation/sound/soc/dpcm.rst
+++ b/Documentation/sound/soc/dpcm.rst
@@ -254,9 +254,7 @@ configuration.
channels->min = channels->max = 2;
/* set DAI0 to 16 bit */
- snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
- SNDRV_PCM_HW_PARAM_FIRST_MASK],
- SNDRV_PCM_FORMAT_S16_LE);
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index fbedcc39460b..9d0a7f08f93b 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -47,7 +47,7 @@ class KernelDocDirective(Directive):
optional_arguments = 4
option_spec = {
'doc': directives.unchanged_required,
- 'functions': directives.unchanged_required,
+ 'functions': directives.unchanged,
'export': directives.unchanged,
'internal': directives.unchanged,
}
@@ -75,8 +75,12 @@ class KernelDocDirective(Directive):
elif 'doc' in self.options:
cmd += ['-function', str(self.options.get('doc'))]
elif 'functions' in self.options:
- for f in str(self.options.get('functions')).split():
- cmd += ['-function', f]
+ functions = self.options.get('functions').split()
+ if functions:
+ for f in functions:
+ cmd += ['-function', f]
+ else:
+ cmd += ['-no-doc-sections']
for pattern in export_file_patterns:
for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl
index d410f47567e9..c518050ffc3f 100755
--- a/Documentation/sphinx/parse-headers.pl
+++ b/Documentation/sphinx/parse-headers.pl
@@ -344,7 +344,7 @@ enums and defines and create cross-references to a Sphinx book.
B<parse_headers.pl> [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
-Where <options> can be: --debug, --help or --man.
+Where <options> can be: --debug, --help or --usage.
=head1 OPTIONS
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 697ef8c225df..e72853b6d725 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -27,6 +27,7 @@ Currently, these files are in /proc/sys/vm:
- dirty_bytes
- dirty_expire_centisecs
- dirty_ratio
+- dirtytime_expire_seconds
- dirty_writeback_centisecs
- drop_caches
- extfrag_threshold
@@ -44,6 +45,7 @@ Currently, these files are in /proc/sys/vm:
- mmap_rnd_bits
- mmap_rnd_compat_bits
- nr_hugepages
+- nr_hugepages_mempolicy
- nr_overcommit_hugepages
- nr_trim_pages (only if CONFIG_MMU=n)
- numa_zonelist_order
@@ -178,6 +180,18 @@ The total available memory is not equal to total system memory.
==============================================================
+dirtytime_expire_seconds
+
+When a lazytime inode is constantly having its pages dirtied, the inode with
+an updated timestamp will never get chance to be written out. And, if the
+only thing that has happened on the file system is a dirtytime inode caused
+by an atime update, a worker will be scheduled to make sure that inode
+eventually gets pushed out to disk. This tunable is used to define when dirty
+inode is old enough to be eligible for writeback by the kernel flusher threads.
+And, it is also used as the interval to wakeup dirtytime_writeback thread.
+
+==============================================================
+
dirty_writeback_centisecs
The kernel flusher threads will periodically wake up and write `old' data
@@ -519,6 +533,15 @@ See Documentation/admin-guide/mm/hugetlbpage.rst
==============================================================
+nr_hugepages_mempolicy
+
+Change the size of the hugepage pool at run-time on a specific
+set of NUMA nodes.
+
+See Documentation/admin-guide/mm/hugetlbpage.rst
+
+==============================================================
+
nr_overcommit_hugepages
Change the maximum size of the hugepage pool. The maximum is
diff --git a/Documentation/timers/timekeeping.txt b/Documentation/timers/timekeeping.txt
index f3a8cf28f802..2d1732b0a868 100644
--- a/Documentation/timers/timekeeping.txt
+++ b/Documentation/timers/timekeeping.txt
@@ -27,7 +27,7 @@ a Linux system will eventually read the clock source to determine exactly
what time it is.
Typically the clock source is a monotonic, atomic counter which will provide
-n bits which count from 0 to 2^(n-1) and then wraps around to 0 and start over.
+n bits which count from 0 to (2^n)-1 and then wraps around to 0 and start over.
It will ideally NEVER stop ticking as long as the system is running. It
may stop during system suspend.
diff --git a/Documentation/trace/events-power.rst b/Documentation/trace/events-power.rst
index a77daca75e30..2ef318962e29 100644
--- a/Documentation/trace/events-power.rst
+++ b/Documentation/trace/events-power.rst
@@ -27,6 +27,7 @@ cpufreq.
cpu_idle "state=%lu cpu_id=%lu"
cpu_frequency "state=%lu cpu_id=%lu"
+ cpu_frequency_limits "min=%lu max=%lu cpu_id=%lu"
A suspend event is used to indicate the system going in and out of the
suspend mode:
diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst
index 696dc69b8158..f7e1fcc0953c 100644
--- a/Documentation/trace/events.rst
+++ b/Documentation/trace/events.rst
@@ -524,4 +524,4 @@ The following commands are supported:
totals derived from one or more trace event format fields and/or
event counts (hitcount).
- See Documentation/trace/histogram.txt for details and examples.
+ See Documentation/trace/histogram.rst for details and examples.
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index a20d34955333..7ea16a0ceffc 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -329,9 +329,9 @@ of ftrace. Here is a list of some of the key files:
track of the time spent in those functions. The histogram
content can be displayed in the files:
- trace_stats/function<cpu> ( function0, function1, etc).
+ trace_stat/function<cpu> ( function0, function1, etc).
- trace_stats:
+ trace_stat:
A directory that holds different tracing stats.
diff --git a/Documentation/trace/histogram.txt b/Documentation/trace/histogram.rst
index 7ffea6aa22e3..5ac724baea7d 100644
--- a/Documentation/trace/histogram.txt
+++ b/Documentation/trace/histogram.rst
@@ -1,6 +1,8 @@
- Event Histograms
+================
+Event Histograms
+================
- Documentation written by Tom Zanussi
+Documentation written by Tom Zanussi
1. Introduction
===============
@@ -19,7 +21,7 @@
derived from one or more trace event format fields and/or event
counts (hitcount).
- The format of a hist trigger is as follows:
+ The format of a hist trigger is as follows::
hist:keys=<field1[,field2,...]>[:values=<field1[,field2,...]>]
[:sort=<field1[,field2,...]>][:size=#entries][:pause][:continue]
@@ -68,6 +70,7 @@
modified by appending any of the following modifiers to the field
name:
+ =========== ==========================================
.hex display a number as a hex value
.sym display an address as a symbol
.sym-offset display an address as a symbol and offset
@@ -75,6 +78,7 @@
.execname display a common_pid as a program name
.log2 display log2 value rather than raw number
.usecs display a common_timestamp in microseconds
+ =========== ==========================================
Note that in general the semantics of a given field aren't
interpreted when applying a modifier to it, but there are some
@@ -92,15 +96,15 @@
pid-specific comm fields in the event itself.
A typical usage scenario would be the following to enable a hist
- trigger, read its current contents, and then turn it off:
+ trigger, read its current contents, and then turn it off::
- # echo 'hist:keys=skbaddr.hex:vals=len' > \
- /sys/kernel/debug/tracing/events/net/netif_rx/trigger
+ # echo 'hist:keys=skbaddr.hex:vals=len' > \
+ /sys/kernel/debug/tracing/events/net/netif_rx/trigger
- # cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
+ # cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
- # echo '!hist:keys=skbaddr.hex:vals=len' > \
- /sys/kernel/debug/tracing/events/net/netif_rx/trigger
+ # echo '!hist:keys=skbaddr.hex:vals=len' > \
+ /sys/kernel/debug/tracing/events/net/netif_rx/trigger
The trigger file itself can be read to show the details of the
currently attached hist trigger. This information is also displayed
@@ -140,7 +144,7 @@
can be attached to a given event, allowing that event to kick off
and stop aggregations on a host of other events.
- The format is very similar to the enable/disable_event triggers:
+ The format is very similar to the enable/disable_event triggers::
enable_hist:<system>:<event>[:count]
disable_hist:<system>:<event>[:count]
@@ -153,16 +157,16 @@
A typical usage scenario for the enable_hist/disable_hist triggers
would be to first set up a paused hist trigger on some event,
followed by an enable_hist/disable_hist pair that turns the hist
- aggregation on and off when conditions of interest are hit:
+ aggregation on and off when conditions of interest are hit::
- # echo 'hist:keys=skbaddr.hex:vals=len:pause' > \
- /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+ # echo 'hist:keys=skbaddr.hex:vals=len:pause' > \
+ /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
- # echo 'enable_hist:net:netif_receive_skb if filename==/usr/bin/wget' > \
- /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
+ # echo 'enable_hist:net:netif_receive_skb if filename==/usr/bin/wget' > \
+ /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
- # echo 'disable_hist:net:netif_receive_skb if comm==wget' > \
- /sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
+ # echo 'disable_hist:net:netif_receive_skb if comm==wget' > \
+ /sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
The above sets up an initially paused hist trigger which is unpaused
and starts aggregating events when a given program is executed, and
@@ -172,8 +176,8 @@
The examples below provide a more concrete illustration of the
concepts and typical usage patterns discussed above.
- 'special' event fields
- ------------------------
+'special' event fields
+------------------------
There are a number of 'special event fields' available for use as
keys or values in a hist trigger. These look like and behave as if
@@ -182,14 +186,16 @@
event, and can be used anywhere an actual event field could be.
They are:
- common_timestamp u64 - timestamp (from ring buffer) associated
- with the event, in nanoseconds. May be
- modified by .usecs to have timestamps
- interpreted as microseconds.
- cpu int - the cpu on which the event occurred.
+ ====================== ==== =======================================
+ common_timestamp u64 timestamp (from ring buffer) associated
+ with the event, in nanoseconds. May be
+ modified by .usecs to have timestamps
+ interpreted as microseconds.
+ cpu int the cpu on which the event occurred.
+ ====================== ==== =======================================
- Extended error information
- --------------------------
+Extended error information
+--------------------------
For some error conditions encountered when invoking a hist trigger
command, extended error information is available via the
@@ -199,7 +205,7 @@
be available until the next hist trigger command for that event.
If available for a given error condition, the extended error
- information and usage takes the following form:
+ information and usage takes the following form::
# echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
echo: write error: Invalid argument
@@ -213,7 +219,7 @@
The first set of examples creates aggregations using the kmalloc
event. The fields that can be used for the hist trigger are listed
- in the kmalloc event's format file:
+ in the kmalloc event's format file::
# cat /sys/kernel/debug/tracing/events/kmem/kmalloc/format
name: kmalloc
@@ -232,7 +238,7 @@
We'll start by creating a hist trigger that generates a simple table
that lists the total number of bytes requested for each function in
- the kernel that made one or more calls to kmalloc:
+ the kernel that made one or more calls to kmalloc::
# echo 'hist:key=call_site:val=bytes_req' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -247,7 +253,7 @@
We'll let it run for awhile and then dump the contents of the 'hist'
file in the kmalloc event's subdirectory (for readability, a number
- of entries have been omitted):
+ of entries have been omitted)::
# cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
# trigger info: hist:keys=call_site:vals=bytes_req:sort=hitcount:size=2048 [active]
@@ -287,7 +293,7 @@
specified in the trigger, followed by the value(s) also specified in
the trigger. At the beginning of the output is a line that displays
the trigger info, which can also be displayed by reading the
- 'trigger' file:
+ 'trigger' file::
# cat /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
hist:keys=call_site:vals=bytes_req:sort=hitcount:size=2048 [active]
@@ -317,7 +323,7 @@
frequencies.
To turn the hist trigger off, simply call up the trigger in the
- command history and re-execute it with a '!' prepended:
+ command history and re-execute it with a '!' prepended::
# echo '!hist:key=call_site:val=bytes_req' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -325,7 +331,7 @@
Finally, notice that the call_site as displayed in the output above
isn't really very useful. It's an address, but normally addresses
are displayed in hex. To have a numeric field displayed as a hex
- value, simply append '.hex' to the field name in the trigger:
+ value, simply append '.hex' to the field name in the trigger::
# echo 'hist:key=call_site.hex:val=bytes_req' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -370,7 +376,7 @@
when looking at text addresses are the corresponding symbols
instead. To have an address displayed as symbolic value instead,
simply append '.sym' or '.sym-offset' to the field name in the
- trigger:
+ trigger::
# echo 'hist:key=call_site.sym:val=bytes_req' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -420,7 +426,7 @@
run. If instead we we wanted to see the top kmalloc callers in
terms of the number of bytes requested rather than the number of
calls, and we wanted the top caller to appear at the top, we can use
- the 'sort' parameter, along with the 'descending' modifier:
+ the 'sort' parameter, along with the 'descending' modifier::
# echo 'hist:key=call_site.sym:val=bytes_req:sort=bytes_req.descending' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -461,7 +467,7 @@
Dropped: 0
To display the offset and size information in addition to the symbol
- name, just use 'sym-offset' instead:
+ name, just use 'sym-offset' instead::
# echo 'hist:key=call_site.sym-offset:val=bytes_req:sort=bytes_req.descending' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -500,7 +506,7 @@
We can also add multiple fields to the 'values' parameter. For
example, we might want to see the total number of bytes allocated
alongside bytes requested, and display the result sorted by bytes
- allocated in a descending order:
+ allocated in a descending order::
# echo 'hist:keys=call_site.sym:values=bytes_req,bytes_alloc:sort=bytes_alloc.descending' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -543,7 +549,7 @@
the hist trigger display symbolic call_sites, we can have the hist
trigger additionally display the complete set of kernel stack traces
that led to each call_site. To do that, we simply use the special
- value 'stacktrace' for the key parameter:
+ value 'stacktrace' for the key parameter::
# echo 'hist:keys=stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
/sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
@@ -554,7 +560,7 @@
event, along with a running total of any of the event fields for
that event. Here we tally bytes requested and bytes allocated for
every callpath in the system that led up to a kmalloc (in this case
- every callpath to a kmalloc for a kernel compile):
+ every callpath to a kmalloc for a kernel compile)::
# cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
# trigger info: hist:keys=stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
@@ -652,7 +658,7 @@
gather and display sorted totals for each process, you can use the
special .execname modifier to display the executable names for the
processes in the table rather than raw pids. The example below
- keeps a per-process sum of total bytes read:
+ keeps a per-process sum of total bytes read::
# echo 'hist:key=common_pid.execname:val=count:sort=count.descending' > \
/sys/kernel/debug/tracing/events/syscalls/sys_enter_read/trigger
@@ -693,7 +699,7 @@
gather and display a list of systemwide syscall hits, you can use
the special .syscall modifier to display the syscall names rather
than raw ids. The example below keeps a running total of syscall
- counts for the system during the run:
+ counts for the system during the run::
# echo 'hist:key=id.syscall:val=hitcount' > \
/sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/trigger
@@ -735,19 +741,19 @@
Entries: 72
Dropped: 0
- The syscall counts above provide a rough overall picture of system
- call activity on the system; we can see for example that the most
- popular system call on this system was the 'sys_ioctl' system call.
+ The syscall counts above provide a rough overall picture of system
+ call activity on the system; we can see for example that the most
+ popular system call on this system was the 'sys_ioctl' system call.
- We can use 'compound' keys to refine that number and provide some
- further insight as to which processes exactly contribute to the
- overall ioctl count.
+ We can use 'compound' keys to refine that number and provide some
+ further insight as to which processes exactly contribute to the
+ overall ioctl count.
- The command below keeps a hitcount for every unique combination of
- system call id and pid - the end result is essentially a table
- that keeps a per-pid sum of system call hits. The results are
- sorted using the system call id as the primary key, and the
- hitcount sum as the secondary key:
+ The command below keeps a hitcount for every unique combination of
+ system call id and pid - the end result is essentially a table
+ that keeps a per-pid sum of system call hits. The results are
+ sorted using the system call id as the primary key, and the
+ hitcount sum as the secondary key::
# echo 'hist:key=id.syscall,common_pid.execname:val=hitcount:sort=id,hitcount' > \
/sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/trigger
@@ -793,11 +799,11 @@
Entries: 323
Dropped: 0
- The above list does give us a breakdown of the ioctl syscall by
- pid, but it also gives us quite a bit more than that, which we
- don't really care about at the moment. Since we know the syscall
- id for sys_ioctl (16, displayed next to the sys_ioctl name), we
- can use that to filter out all the other syscalls:
+ The above list does give us a breakdown of the ioctl syscall by
+ pid, but it also gives us quite a bit more than that, which we
+ don't really care about at the moment. Since we know the syscall
+ id for sys_ioctl (16, displayed next to the sys_ioctl name), we
+ can use that to filter out all the other syscalls::
# echo 'hist:key=id.syscall,common_pid.execname:val=hitcount:sort=id,hitcount if id == 16' > \
/sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/trigger
@@ -829,18 +835,18 @@
Entries: 103
Dropped: 0
- The above output shows that 'compiz' and 'Xorg' are far and away
- the heaviest ioctl callers (which might lead to questions about
- whether they really need to be making all those calls and to
- possible avenues for further investigation.)
+ The above output shows that 'compiz' and 'Xorg' are far and away
+ the heaviest ioctl callers (which might lead to questions about
+ whether they really need to be making all those calls and to
+ possible avenues for further investigation.)
- The compound key examples used a key and a sum value (hitcount) to
- sort the output, but we can just as easily use two keys instead.
- Here's an example where we use a compound key composed of the the
- common_pid and size event fields. Sorting with pid as the primary
- key and 'size' as the secondary key allows us to display an
- ordered summary of the recvfrom sizes, with counts, received by
- each process:
+ The compound key examples used a key and a sum value (hitcount) to
+ sort the output, but we can just as easily use two keys instead.
+ Here's an example where we use a compound key composed of the the
+ common_pid and size event fields. Sorting with pid as the primary
+ key and 'size' as the secondary key allows us to display an
+ ordered summary of the recvfrom sizes, with counts, received by
+ each process::
# echo 'hist:key=common_pid.execname,size:val=hitcount:sort=common_pid,size' > \
/sys/kernel/debug/tracing/events/syscalls/sys_enter_recvfrom/trigger
@@ -893,7 +899,7 @@
demonstrates how you can manually pause and continue a hist trigger.
In this example, we'll aggregate fork counts and don't expect a
large number of entries in the hash table, so we'll drop it to a
- much smaller number, say 256:
+ much smaller number, say 256::
# echo 'hist:key=child_comm:val=hitcount:size=256' > \
/sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
@@ -929,7 +935,7 @@
If we want to pause the hist trigger, we can simply append :pause to
the command that started the trigger. Notice that the trigger info
- displays as [paused]:
+ displays as [paused]::
# echo 'hist:key=child_comm:val=hitcount:size=256:pause' >> \
/sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
@@ -966,7 +972,7 @@
To manually continue having the trigger aggregate events, append
:cont instead. Notice that the trigger info displays as [active]
- again, and the data has changed:
+ again, and the data has changed::
# echo 'hist:key=child_comm:val=hitcount:size=256:cont' >> \
/sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
@@ -1020,7 +1026,7 @@
wget.
First we set up an initially paused stacktrace trigger on the
- netif_receive_skb event:
+ netif_receive_skb event::
# echo 'hist:key=stacktrace:vals=len:pause' > \
/sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
@@ -1031,7 +1037,7 @@
set up on netif_receive_skb if and only if it sees a
sched_process_exec event with a filename of '/usr/bin/wget'. When
that happens, all netif_receive_skb events are aggregated into a
- hash table keyed on stacktrace:
+ hash table keyed on stacktrace::
# echo 'enable_hist:net:netif_receive_skb if filename==/usr/bin/wget' > \
/sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
@@ -1039,7 +1045,7 @@
The aggregation continues until the netif_receive_skb is paused
again, which is what the following disable_hist event does by
creating a similar setup on the sched_process_exit event, using the
- filter 'comm==wget':
+ filter 'comm==wget'::
# echo 'disable_hist:net:netif_receive_skb if comm==wget' > \
/sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
@@ -1051,7 +1057,7 @@
The overall effect is that netif_receive_skb events are aggregated
into the hash table for only the duration of the wget. Executing a
wget command and then listing the 'hist' file will display the
- output generated by the wget command:
+ output generated by the wget command::
$ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
@@ -1136,13 +1142,13 @@
Suppose we wanted to try another run of the previous example but
this time also wanted to see the complete list of events that went
into the histogram. In order to avoid having to set everything up
- again, we can just clear the histogram first:
+ again, we can just clear the histogram first::
# echo 'hist:key=stacktrace:vals=len:clear' >> \
/sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
Just to verify that it is in fact cleared, here's what we now see in
- the hist file:
+ the hist file::
# cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist
# trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
@@ -1156,7 +1162,7 @@
event occurring during the new run, which are in fact the same
events being aggregated into the hash table, we add some additional
'enable_event' events to the triggering sched_process_exec and
- sched_process_exit events as such:
+ sched_process_exit events as such::
# echo 'enable_event:net:netif_receive_skb if filename==/usr/bin/wget' > \
/sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
@@ -1167,7 +1173,7 @@
If you read the trigger files for the sched_process_exec and
sched_process_exit triggers, you should see two triggers for each:
one enabling/disabling the hist aggregation and the other
- enabling/disabling the logging of events:
+ enabling/disabling the logging of events::
# cat /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
enable_event:net:netif_receive_skb:unlimited if filename==/usr/bin/wget
@@ -1181,13 +1187,13 @@
sched_process_exit events is hit and matches 'wget', it enables or
disables both the histogram and the event log, and what you end up
with is a hash table and set of events just covering the specified
- duration. Run the wget command again:
+ duration. Run the wget command again::
$ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
Displaying the 'hist' file should show something similar to what you
saw in the last run, but this time you should also see the
- individual events in the trace file:
+ individual events in the trace file::
# cat /sys/kernel/debug/tracing/trace
@@ -1220,7 +1226,7 @@
attached to a given event. This capability can be useful for
creating a set of different summaries derived from the same set of
events, or for comparing the effects of different filters, among
- other things.
+ other things::
# echo 'hist:keys=skbaddr.hex:vals=len if len < 0' >> \
/sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
@@ -1241,7 +1247,7 @@
any existing hist triggers beforehand).
Displaying the contents of the 'hist' file for the event shows the
- contents of all five histograms:
+ contents of all five histograms::
# cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist
@@ -1361,7 +1367,7 @@
output of events generated by tracepoints contained inside inline
functions, but names can be used in a hist trigger on any event.
For example, these two triggers when hit will update the same 'len'
- field in the shared 'foo' histogram data:
+ field in the shared 'foo' histogram data::
# echo 'hist:name=foo:keys=skbaddr.hex:vals=len' > \
/sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
@@ -1369,7 +1375,7 @@
/sys/kernel/debug/tracing/events/net/netif_rx/trigger
You can see that they're updating common histogram data by reading
- each event's hist files at the same time:
+ each event's hist files at the same time::
# cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist;
cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
@@ -1482,7 +1488,7 @@
And here's an example that shows how to combine histogram data from
any two events even if they don't share any 'compatible' fields
other than 'hitcount' and 'stacktrace'. These commands create a
- couple of triggers named 'bar' using those fields:
+ couple of triggers named 'bar' using those fields::
# echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
/sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
@@ -1490,7 +1496,7 @@
/sys/kernel/debug/tracing/events/net/netif_rx/trigger
And displaying the output of either shows some interesting if
- somewhat confusing output:
+ somewhat confusing output::
# cat /sys/kernel/debug/tracing/events/sched/sched_process_fork/hist
# cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
@@ -1705,7 +1711,7 @@ to any event field.
Either keys or values can be saved and retrieved in this way. This
creates a variable named 'ts0' for a histogram entry with the key
-'next_pid':
+'next_pid'::
# echo 'hist:keys=next_pid:vals=$ts0:ts0=common_timestamp ... >> \
event/trigger
@@ -1721,40 +1727,40 @@ Because 'vals=' is used, the common_timestamp variable value above
will also be summed as a normal histogram value would (though for a
timestamp it makes little sense).
-The below shows that a key value can also be saved in the same way:
+The below shows that a key value can also be saved in the same way::
# echo 'hist:timer_pid=common_pid:key=timer_pid ...' >> event/trigger
If a variable isn't a key variable or prefixed with 'vals=', the
associated event field will be saved in a variable but won't be summed
-as a value:
+as a value::
# echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger
Multiple variables can be assigned at the same time. The below would
result in both ts0 and b being created as variables, with both
-common_timestamp and field1 additionally being summed as values:
+common_timestamp and field1 additionally being summed as values::
# echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \
event/trigger
Note that variable assignments can appear either preceding or
following their use. The command below behaves identically to the
-command above:
+command above::
# echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \
event/trigger
Any number of variables not bound to a 'vals=' prefix can also be
assigned by simply separating them with colons. Below is the same
-thing but without the values being summed in the histogram:
+thing but without the values being summed in the histogram::
# echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger
Variables set as above can be referenced and used in expressions on
another event.
-For example, here's how a latency can be calculated:
+For example, here's how a latency can be calculated::
# echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
# echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
@@ -1764,7 +1770,7 @@ variable ts0. In the next line, ts0 is subtracted from the second
event's timestamp to produce the latency, which is then assigned into
yet another variable, 'wakeup_lat'. The hist trigger below in turn
makes use of the wakeup_lat variable to compute a combined latency
-using the same key and variable from yet another event:
+using the same key and variable from yet another event::
# echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger
@@ -1784,7 +1790,7 @@ separated by semicolons, to the tracing/synthetic_events file.
For instance, the following creates a new event named 'wakeup_latency'
with 3 fields: lat, pid, and prio. Each of those fields is simply a
-variable reference to a variable on another event:
+variable reference to a variable on another event::
# echo 'wakeup_latency \
u64 lat; \
@@ -1793,13 +1799,13 @@ variable reference to a variable on another event:
/sys/kernel/debug/tracing/synthetic_events
Reading the tracing/synthetic_events file lists all the currently
-defined synthetic events, in this case the event defined above:
+defined synthetic events, in this case the event defined above::
# cat /sys/kernel/debug/tracing/synthetic_events
wakeup_latency u64 lat; pid_t pid; int prio
An existing synthetic event definition can be removed by prepending
-the command that defined it with a '!':
+the command that defined it with a '!'::
# echo '!wakeup_latency u64 lat pid_t pid int prio' >> \
/sys/kernel/debug/tracing/synthetic_events
@@ -1811,13 +1817,13 @@ and variables defined on other events (see Section 2.2.3 below on
how that is done using hist trigger 'onmatch' action). Once that is
done, the 'wakeup_latency' synthetic event instance is created.
-A histogram can now be defined for the new synthetic event:
+A histogram can now be defined for the new synthetic event::
# echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
/sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
The new event is created under the tracing/events/synthetic/ directory
-and looks and behaves just like any other event:
+and looks and behaves just like any other event::
# ls /sys/kernel/debug/tracing/events/synthetic/wakeup_latency
enable filter format hist id trigger
@@ -1872,74 +1878,74 @@ hist trigger specification.
As an example the below defines a simple synthetic event and uses
a variable defined on the sched_wakeup_new event as a parameter
when invoking the synthetic event. Here we define the synthetic
- event:
+ event::
- # echo 'wakeup_new_test pid_t pid' >> \
- /sys/kernel/debug/tracing/synthetic_events
+ # echo 'wakeup_new_test pid_t pid' >> \
+ /sys/kernel/debug/tracing/synthetic_events
- # cat /sys/kernel/debug/tracing/synthetic_events
- wakeup_new_test pid_t pid
+ # cat /sys/kernel/debug/tracing/synthetic_events
+ wakeup_new_test pid_t pid
The following hist trigger both defines the missing testpid
variable and specifies an onmatch() action that generates a
wakeup_new_test synthetic event whenever a sched_wakeup_new event
occurs, which because of the 'if comm == "cyclictest"' filter only
- happens when the executable is cyclictest:
+ happens when the executable is cyclictest::
- # echo 'hist:keys=$testpid:testpid=pid:onmatch(sched.sched_wakeup_new).\
- wakeup_new_test($testpid) if comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
+ # echo 'hist:keys=$testpid:testpid=pid:onmatch(sched.sched_wakeup_new).\
+ wakeup_new_test($testpid) if comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger
Creating and displaying a histogram based on those events is now
just a matter of using the fields and new synthetic event in the
- tracing/events/synthetic directory, as usual:
+ tracing/events/synthetic directory, as usual::
- # echo 'hist:keys=pid:sort=pid' >> \
- /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/trigger
+ # echo 'hist:keys=pid:sort=pid' >> \
+ /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/trigger
Running 'cyclictest' should cause wakeup_new events to generate
wakeup_new_test synthetic events which should result in histogram
- output in the wakeup_new_test event's hist file:
+ output in the wakeup_new_test event's hist file::
- # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/hist
+ # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_new_test/hist
A more typical usage would be to use two events to calculate a
latency. The following example uses a set of hist triggers to
- produce a 'wakeup_latency' histogram:
+ produce a 'wakeup_latency' histogram.
- First, we define a 'wakeup_latency' synthetic event:
+ First, we define a 'wakeup_latency' synthetic event::
- # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> \
- /sys/kernel/debug/tracing/synthetic_events
+ # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> \
+ /sys/kernel/debug/tracing/synthetic_events
Next, we specify that whenever we see a sched_waking event for a
- cyclictest thread, save the timestamp in a 'ts0' variable:
+ cyclictest thread, save the timestamp in a 'ts0' variable::
- # echo 'hist:keys=$saved_pid:saved_pid=pid:ts0=common_timestamp.usecs \
- if comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+ # echo 'hist:keys=$saved_pid:saved_pid=pid:ts0=common_timestamp.usecs \
+ if comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
Then, when the corresponding thread is actually scheduled onto the
CPU by a sched_switch event, calculate the latency and use that
along with another variable and an event field to generate a
- wakeup_latency synthetic event:
+ wakeup_latency synthetic event::
- # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:\
- onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,\
- $saved_pid,next_prio) if next_comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:\
+ onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,\
+ $saved_pid,next_prio) if next_comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
We also need to create a histogram on the wakeup_latency synthetic
- event in order to aggregate the generated synthetic event data:
+ event in order to aggregate the generated synthetic event data::
- # echo 'hist:keys=pid,prio,lat:sort=pid,lat' >> \
- /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
+ # echo 'hist:keys=pid,prio,lat:sort=pid,lat' >> \
+ /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
Finally, once we've run cyclictest to actually generate some
events, we can see the output by looking at the wakeup_latency
- synthetic event's hist file:
+ synthetic event's hist file::
- # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/hist
+ # cat /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/hist
- onmax(var).save(field,.. .)
@@ -1961,38 +1967,38 @@ hist trigger specification.
back to that pid, the timestamp difference is calculated. If the
resulting latency, stored in wakeup_lat, exceeds the current
maximum latency, the values specified in the save() fields are
- recorded:
+ recorded::
- # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
- if comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
+ if comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
- # echo 'hist:keys=next_pid:\
- wakeup_lat=common_timestamp.usecs-$ts0:\
- onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) \
- if next_comm=="cyclictest"' >> \
- /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+ # echo 'hist:keys=next_pid:\
+ wakeup_lat=common_timestamp.usecs-$ts0:\
+ onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) \
+ if next_comm=="cyclictest"' >> \
+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
When the histogram is displayed, the max value and the saved
values corresponding to the max are displayed following the rest
- of the fields:
+ of the fields::
- # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist
- { next_pid: 2255 } hitcount: 239
- common_timestamp-ts0: 0
- max: 27
- next_comm: cyclictest
- prev_pid: 0 prev_prio: 120 prev_comm: swapper/1
+ # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist
+ { next_pid: 2255 } hitcount: 239
+ common_timestamp-ts0: 0
+ max: 27
+ next_comm: cyclictest
+ prev_pid: 0 prev_prio: 120 prev_comm: swapper/1
- { next_pid: 2256 } hitcount: 2355
- common_timestamp-ts0: 0
- max: 49 next_comm: cyclictest
- prev_pid: 0 prev_prio: 120 prev_comm: swapper/0
+ { next_pid: 2256 } hitcount: 2355
+ common_timestamp-ts0: 0
+ max: 49 next_comm: cyclictest
+ prev_pid: 0 prev_prio: 120 prev_comm: swapper/0
- Totals:
- Hits: 12970
- Entries: 2
- Dropped: 0
+ Totals:
+ Hits: 12970
+ Entries: 2
+ Dropped: 0
3. User space creating a trigger
--------------------------------
@@ -2002,24 +2008,24 @@ ring buffer. This can also act like an event, by writing into the trigger
file located in /sys/kernel/tracing/events/ftrace/print/
Modifying cyclictest to write into the trace_marker file before it sleeps
-and after it wakes up, something like this:
+and after it wakes up, something like this::
-static void traceputs(char *str)
-{
+ static void traceputs(char *str)
+ {
/* tracemark_fd is the trace_marker file descriptor */
if (tracemark_fd < 0)
return;
/* write the tracemark message */
write(tracemark_fd, str, strlen(str));
-}
+ }
-And later add something like:
+And later add something like::
traceputs("start");
clock_nanosleep(...);
traceputs("end");
-We can make a histogram from this:
+We can make a histogram from this::
# cd /sys/kernel/tracing
# echo 'latency u64 lat' > synthetic_events
@@ -2034,7 +2040,7 @@ it will call the "latency" synthetic event with the calculated latency as its
parameter. Finally, a histogram is added to the latency synthetic event to
record the calculated latency along with the pid.
-Now running cyclictest with:
+Now running cyclictest with::
# ./cyclictest -p80 -d0 -i250 -n -a -t --tracemark -b 1000
@@ -2049,297 +2055,297 @@ Now running cyclictest with:
Note, the -b 1000 is used just to make --tracemark available.
-Then we can see the histogram created by this with:
+Then we can see the histogram created by this with::
# cat events/synthetic/latency/hist
-# event histogram
-#
-# trigger info: hist:keys=lat,common_pid:vals=hitcount:sort=lat:size=2048 [active]
-#
-
-{ lat: 107, common_pid: 2039 } hitcount: 1
-{ lat: 122, common_pid: 2041 } hitcount: 1
-{ lat: 166, common_pid: 2039 } hitcount: 1
-{ lat: 174, common_pid: 2039 } hitcount: 1
-{ lat: 194, common_pid: 2041 } hitcount: 1
-{ lat: 196, common_pid: 2036 } hitcount: 1
-{ lat: 197, common_pid: 2038 } hitcount: 1
-{ lat: 198, common_pid: 2039 } hitcount: 1
-{ lat: 199, common_pid: 2039 } hitcount: 1
-{ lat: 200, common_pid: 2041 } hitcount: 1
-{ lat: 201, common_pid: 2039 } hitcount: 2
-{ lat: 202, common_pid: 2038 } hitcount: 1
-{ lat: 202, common_pid: 2043 } hitcount: 1
-{ lat: 203, common_pid: 2039 } hitcount: 1
-{ lat: 203, common_pid: 2036 } hitcount: 1
-{ lat: 203, common_pid: 2041 } hitcount: 1
-{ lat: 206, common_pid: 2038 } hitcount: 2
-{ lat: 207, common_pid: 2039 } hitcount: 1
-{ lat: 207, common_pid: 2036 } hitcount: 1
-{ lat: 208, common_pid: 2040 } hitcount: 1
-{ lat: 209, common_pid: 2043 } hitcount: 1
-{ lat: 210, common_pid: 2039 } hitcount: 1
-{ lat: 211, common_pid: 2039 } hitcount: 4
-{ lat: 212, common_pid: 2043 } hitcount: 1
-{ lat: 212, common_pid: 2039 } hitcount: 2
-{ lat: 213, common_pid: 2039 } hitcount: 1
-{ lat: 214, common_pid: 2038 } hitcount: 1
-{ lat: 214, common_pid: 2039 } hitcount: 2
-{ lat: 214, common_pid: 2042 } hitcount: 1
-{ lat: 215, common_pid: 2039 } hitcount: 1
-{ lat: 217, common_pid: 2036 } hitcount: 1
-{ lat: 217, common_pid: 2040 } hitcount: 1
-{ lat: 217, common_pid: 2039 } hitcount: 1
-{ lat: 218, common_pid: 2039 } hitcount: 6
-{ lat: 219, common_pid: 2039 } hitcount: 9
-{ lat: 220, common_pid: 2039 } hitcount: 11
-{ lat: 221, common_pid: 2039 } hitcount: 5
-{ lat: 221, common_pid: 2042 } hitcount: 1
-{ lat: 222, common_pid: 2039 } hitcount: 7
-{ lat: 223, common_pid: 2036 } hitcount: 1
-{ lat: 223, common_pid: 2039 } hitcount: 3
-{ lat: 224, common_pid: 2039 } hitcount: 4
-{ lat: 224, common_pid: 2037 } hitcount: 1
-{ lat: 224, common_pid: 2036 } hitcount: 2
-{ lat: 225, common_pid: 2039 } hitcount: 5
-{ lat: 225, common_pid: 2042 } hitcount: 1
-{ lat: 226, common_pid: 2039 } hitcount: 7
-{ lat: 226, common_pid: 2036 } hitcount: 4
-{ lat: 227, common_pid: 2039 } hitcount: 6
-{ lat: 227, common_pid: 2036 } hitcount: 12
-{ lat: 227, common_pid: 2043 } hitcount: 1
-{ lat: 228, common_pid: 2039 } hitcount: 7
-{ lat: 228, common_pid: 2036 } hitcount: 14
-{ lat: 229, common_pid: 2039 } hitcount: 9
-{ lat: 229, common_pid: 2036 } hitcount: 8
-{ lat: 229, common_pid: 2038 } hitcount: 1
-{ lat: 230, common_pid: 2039 } hitcount: 11
-{ lat: 230, common_pid: 2036 } hitcount: 6
-{ lat: 230, common_pid: 2043 } hitcount: 1
-{ lat: 230, common_pid: 2042 } hitcount: 2
-{ lat: 231, common_pid: 2041 } hitcount: 1
-{ lat: 231, common_pid: 2036 } hitcount: 6
-{ lat: 231, common_pid: 2043 } hitcount: 1
-{ lat: 231, common_pid: 2039 } hitcount: 8
-{ lat: 232, common_pid: 2037 } hitcount: 1
-{ lat: 232, common_pid: 2039 } hitcount: 6
-{ lat: 232, common_pid: 2040 } hitcount: 2
-{ lat: 232, common_pid: 2036 } hitcount: 5
-{ lat: 232, common_pid: 2043 } hitcount: 1
-{ lat: 233, common_pid: 2036 } hitcount: 5
-{ lat: 233, common_pid: 2039 } hitcount: 11
-{ lat: 234, common_pid: 2039 } hitcount: 4
-{ lat: 234, common_pid: 2038 } hitcount: 2
-{ lat: 234, common_pid: 2043 } hitcount: 2
-{ lat: 234, common_pid: 2036 } hitcount: 11
-{ lat: 234, common_pid: 2040 } hitcount: 1
-{ lat: 235, common_pid: 2037 } hitcount: 2
-{ lat: 235, common_pid: 2036 } hitcount: 8
-{ lat: 235, common_pid: 2043 } hitcount: 2
-{ lat: 235, common_pid: 2039 } hitcount: 5
-{ lat: 235, common_pid: 2042 } hitcount: 2
-{ lat: 235, common_pid: 2040 } hitcount: 4
-{ lat: 235, common_pid: 2041 } hitcount: 1
-{ lat: 236, common_pid: 2036 } hitcount: 7
-{ lat: 236, common_pid: 2037 } hitcount: 1
-{ lat: 236, common_pid: 2041 } hitcount: 5
-{ lat: 236, common_pid: 2039 } hitcount: 3
-{ lat: 236, common_pid: 2043 } hitcount: 9
-{ lat: 236, common_pid: 2040 } hitcount: 7
-{ lat: 237, common_pid: 2037 } hitcount: 1
-{ lat: 237, common_pid: 2040 } hitcount: 1
-{ lat: 237, common_pid: 2036 } hitcount: 9
-{ lat: 237, common_pid: 2039 } hitcount: 3
-{ lat: 237, common_pid: 2043 } hitcount: 8
-{ lat: 237, common_pid: 2042 } hitcount: 2
-{ lat: 237, common_pid: 2041 } hitcount: 2
-{ lat: 238, common_pid: 2043 } hitcount: 10
-{ lat: 238, common_pid: 2040 } hitcount: 1
-{ lat: 238, common_pid: 2037 } hitcount: 9
-{ lat: 238, common_pid: 2038 } hitcount: 1
-{ lat: 238, common_pid: 2039 } hitcount: 1
-{ lat: 238, common_pid: 2042 } hitcount: 3
-{ lat: 238, common_pid: 2036 } hitcount: 7
-{ lat: 239, common_pid: 2041 } hitcount: 1
-{ lat: 239, common_pid: 2043 } hitcount: 11
-{ lat: 239, common_pid: 2037 } hitcount: 11
-{ lat: 239, common_pid: 2038 } hitcount: 6
-{ lat: 239, common_pid: 2036 } hitcount: 7
-{ lat: 239, common_pid: 2040 } hitcount: 1
-{ lat: 239, common_pid: 2042 } hitcount: 9
-{ lat: 240, common_pid: 2037 } hitcount: 29
-{ lat: 240, common_pid: 2043 } hitcount: 15
-{ lat: 240, common_pid: 2040 } hitcount: 44
-{ lat: 240, common_pid: 2039 } hitcount: 1
-{ lat: 240, common_pid: 2041 } hitcount: 2
-{ lat: 240, common_pid: 2038 } hitcount: 1
-{ lat: 240, common_pid: 2036 } hitcount: 10
-{ lat: 240, common_pid: 2042 } hitcount: 13
-{ lat: 241, common_pid: 2036 } hitcount: 21
-{ lat: 241, common_pid: 2041 } hitcount: 36
-{ lat: 241, common_pid: 2037 } hitcount: 34
-{ lat: 241, common_pid: 2042 } hitcount: 14
-{ lat: 241, common_pid: 2040 } hitcount: 94
-{ lat: 241, common_pid: 2039 } hitcount: 12
-{ lat: 241, common_pid: 2038 } hitcount: 2
-{ lat: 241, common_pid: 2043 } hitcount: 28
-{ lat: 242, common_pid: 2040 } hitcount: 109
-{ lat: 242, common_pid: 2041 } hitcount: 506
-{ lat: 242, common_pid: 2039 } hitcount: 155
-{ lat: 242, common_pid: 2042 } hitcount: 21
-{ lat: 242, common_pid: 2037 } hitcount: 52
-{ lat: 242, common_pid: 2043 } hitcount: 21
-{ lat: 242, common_pid: 2036 } hitcount: 16
-{ lat: 242, common_pid: 2038 } hitcount: 156
-{ lat: 243, common_pid: 2037 } hitcount: 46
-{ lat: 243, common_pid: 2039 } hitcount: 40
-{ lat: 243, common_pid: 2042 } hitcount: 119
-{ lat: 243, common_pid: 2041 } hitcount: 611
-{ lat: 243, common_pid: 2036 } hitcount: 69
-{ lat: 243, common_pid: 2038 } hitcount: 784
-{ lat: 243, common_pid: 2040 } hitcount: 323
-{ lat: 243, common_pid: 2043 } hitcount: 14
-{ lat: 244, common_pid: 2043 } hitcount: 35
-{ lat: 244, common_pid: 2042 } hitcount: 305
-{ lat: 244, common_pid: 2039 } hitcount: 8
-{ lat: 244, common_pid: 2040 } hitcount: 4515
-{ lat: 244, common_pid: 2038 } hitcount: 371
-{ lat: 244, common_pid: 2037 } hitcount: 31
-{ lat: 244, common_pid: 2036 } hitcount: 114
-{ lat: 244, common_pid: 2041 } hitcount: 3396
-{ lat: 245, common_pid: 2036 } hitcount: 700
-{ lat: 245, common_pid: 2041 } hitcount: 2772
-{ lat: 245, common_pid: 2037 } hitcount: 268
-{ lat: 245, common_pid: 2039 } hitcount: 472
-{ lat: 245, common_pid: 2038 } hitcount: 2758
-{ lat: 245, common_pid: 2042 } hitcount: 3833
-{ lat: 245, common_pid: 2040 } hitcount: 3105
-{ lat: 245, common_pid: 2043 } hitcount: 645
-{ lat: 246, common_pid: 2038 } hitcount: 3451
-{ lat: 246, common_pid: 2041 } hitcount: 142
-{ lat: 246, common_pid: 2037 } hitcount: 5101
-{ lat: 246, common_pid: 2040 } hitcount: 68
-{ lat: 246, common_pid: 2043 } hitcount: 5099
-{ lat: 246, common_pid: 2039 } hitcount: 5608
-{ lat: 246, common_pid: 2042 } hitcount: 3723
-{ lat: 246, common_pid: 2036 } hitcount: 4738
-{ lat: 247, common_pid: 2042 } hitcount: 312
-{ lat: 247, common_pid: 2043 } hitcount: 2385
-{ lat: 247, common_pid: 2041 } hitcount: 452
-{ lat: 247, common_pid: 2038 } hitcount: 792
-{ lat: 247, common_pid: 2040 } hitcount: 78
-{ lat: 247, common_pid: 2036 } hitcount: 2375
-{ lat: 247, common_pid: 2039 } hitcount: 1834
-{ lat: 247, common_pid: 2037 } hitcount: 2655
-{ lat: 248, common_pid: 2037 } hitcount: 36
-{ lat: 248, common_pid: 2042 } hitcount: 11
-{ lat: 248, common_pid: 2038 } hitcount: 122
-{ lat: 248, common_pid: 2036 } hitcount: 135
-{ lat: 248, common_pid: 2039 } hitcount: 26
-{ lat: 248, common_pid: 2041 } hitcount: 503
-{ lat: 248, common_pid: 2043 } hitcount: 66
-{ lat: 248, common_pid: 2040 } hitcount: 46
-{ lat: 249, common_pid: 2037 } hitcount: 29
-{ lat: 249, common_pid: 2038 } hitcount: 1
-{ lat: 249, common_pid: 2043 } hitcount: 29
-{ lat: 249, common_pid: 2039 } hitcount: 8
-{ lat: 249, common_pid: 2042 } hitcount: 56
-{ lat: 249, common_pid: 2040 } hitcount: 27
-{ lat: 249, common_pid: 2041 } hitcount: 11
-{ lat: 249, common_pid: 2036 } hitcount: 27
-{ lat: 250, common_pid: 2038 } hitcount: 1
-{ lat: 250, common_pid: 2036 } hitcount: 30
-{ lat: 250, common_pid: 2040 } hitcount: 19
-{ lat: 250, common_pid: 2043 } hitcount: 22
-{ lat: 250, common_pid: 2042 } hitcount: 20
-{ lat: 250, common_pid: 2041 } hitcount: 1
-{ lat: 250, common_pid: 2039 } hitcount: 6
-{ lat: 250, common_pid: 2037 } hitcount: 48
-{ lat: 251, common_pid: 2037 } hitcount: 43
-{ lat: 251, common_pid: 2039 } hitcount: 1
-{ lat: 251, common_pid: 2036 } hitcount: 12
-{ lat: 251, common_pid: 2042 } hitcount: 2
-{ lat: 251, common_pid: 2041 } hitcount: 1
-{ lat: 251, common_pid: 2043 } hitcount: 15
-{ lat: 251, common_pid: 2040 } hitcount: 3
-{ lat: 252, common_pid: 2040 } hitcount: 1
-{ lat: 252, common_pid: 2036 } hitcount: 12
-{ lat: 252, common_pid: 2037 } hitcount: 21
-{ lat: 252, common_pid: 2043 } hitcount: 14
-{ lat: 253, common_pid: 2037 } hitcount: 21
-{ lat: 253, common_pid: 2039 } hitcount: 2
-{ lat: 253, common_pid: 2036 } hitcount: 9
-{ lat: 253, common_pid: 2043 } hitcount: 6
-{ lat: 253, common_pid: 2040 } hitcount: 1
-{ lat: 254, common_pid: 2036 } hitcount: 8
-{ lat: 254, common_pid: 2043 } hitcount: 3
-{ lat: 254, common_pid: 2041 } hitcount: 1
-{ lat: 254, common_pid: 2042 } hitcount: 1
-{ lat: 254, common_pid: 2039 } hitcount: 1
-{ lat: 254, common_pid: 2037 } hitcount: 12
-{ lat: 255, common_pid: 2043 } hitcount: 1
-{ lat: 255, common_pid: 2037 } hitcount: 2
-{ lat: 255, common_pid: 2036 } hitcount: 2
-{ lat: 255, common_pid: 2039 } hitcount: 8
-{ lat: 256, common_pid: 2043 } hitcount: 1
-{ lat: 256, common_pid: 2036 } hitcount: 4
-{ lat: 256, common_pid: 2039 } hitcount: 6
-{ lat: 257, common_pid: 2039 } hitcount: 5
-{ lat: 257, common_pid: 2036 } hitcount: 4
-{ lat: 258, common_pid: 2039 } hitcount: 5
-{ lat: 258, common_pid: 2036 } hitcount: 2
-{ lat: 259, common_pid: 2036 } hitcount: 7
-{ lat: 259, common_pid: 2039 } hitcount: 7
-{ lat: 260, common_pid: 2036 } hitcount: 8
-{ lat: 260, common_pid: 2039 } hitcount: 6
-{ lat: 261, common_pid: 2036 } hitcount: 5
-{ lat: 261, common_pid: 2039 } hitcount: 7
-{ lat: 262, common_pid: 2039 } hitcount: 5
-{ lat: 262, common_pid: 2036 } hitcount: 5
-{ lat: 263, common_pid: 2039 } hitcount: 7
-{ lat: 263, common_pid: 2036 } hitcount: 7
-{ lat: 264, common_pid: 2039 } hitcount: 9
-{ lat: 264, common_pid: 2036 } hitcount: 9
-{ lat: 265, common_pid: 2036 } hitcount: 5
-{ lat: 265, common_pid: 2039 } hitcount: 1
-{ lat: 266, common_pid: 2036 } hitcount: 1
-{ lat: 266, common_pid: 2039 } hitcount: 3
-{ lat: 267, common_pid: 2036 } hitcount: 1
-{ lat: 267, common_pid: 2039 } hitcount: 3
-{ lat: 268, common_pid: 2036 } hitcount: 1
-{ lat: 268, common_pid: 2039 } hitcount: 6
-{ lat: 269, common_pid: 2036 } hitcount: 1
-{ lat: 269, common_pid: 2043 } hitcount: 1
-{ lat: 269, common_pid: 2039 } hitcount: 2
-{ lat: 270, common_pid: 2040 } hitcount: 1
-{ lat: 270, common_pid: 2039 } hitcount: 6
-{ lat: 271, common_pid: 2041 } hitcount: 1
-{ lat: 271, common_pid: 2039 } hitcount: 5
-{ lat: 272, common_pid: 2039 } hitcount: 10
-{ lat: 273, common_pid: 2039 } hitcount: 8
-{ lat: 274, common_pid: 2039 } hitcount: 2
-{ lat: 275, common_pid: 2039 } hitcount: 1
-{ lat: 276, common_pid: 2039 } hitcount: 2
-{ lat: 276, common_pid: 2037 } hitcount: 1
-{ lat: 276, common_pid: 2038 } hitcount: 1
-{ lat: 277, common_pid: 2039 } hitcount: 1
-{ lat: 277, common_pid: 2042 } hitcount: 1
-{ lat: 278, common_pid: 2039 } hitcount: 1
-{ lat: 279, common_pid: 2039 } hitcount: 4
-{ lat: 279, common_pid: 2043 } hitcount: 1
-{ lat: 280, common_pid: 2039 } hitcount: 3
-{ lat: 283, common_pid: 2036 } hitcount: 2
-{ lat: 284, common_pid: 2039 } hitcount: 1
-{ lat: 284, common_pid: 2043 } hitcount: 1
-{ lat: 288, common_pid: 2039 } hitcount: 1
-{ lat: 289, common_pid: 2039 } hitcount: 1
-{ lat: 300, common_pid: 2039 } hitcount: 1
-{ lat: 384, common_pid: 2039 } hitcount: 1
-
-Totals:
- Hits: 67625
- Entries: 278
- Dropped: 0
+ # event histogram
+ #
+ # trigger info: hist:keys=lat,common_pid:vals=hitcount:sort=lat:size=2048 [active]
+ #
+
+ { lat: 107, common_pid: 2039 } hitcount: 1
+ { lat: 122, common_pid: 2041 } hitcount: 1
+ { lat: 166, common_pid: 2039 } hitcount: 1
+ { lat: 174, common_pid: 2039 } hitcount: 1
+ { lat: 194, common_pid: 2041 } hitcount: 1
+ { lat: 196, common_pid: 2036 } hitcount: 1
+ { lat: 197, common_pid: 2038 } hitcount: 1
+ { lat: 198, common_pid: 2039 } hitcount: 1
+ { lat: 199, common_pid: 2039 } hitcount: 1
+ { lat: 200, common_pid: 2041 } hitcount: 1
+ { lat: 201, common_pid: 2039 } hitcount: 2
+ { lat: 202, common_pid: 2038 } hitcount: 1
+ { lat: 202, common_pid: 2043 } hitcount: 1
+ { lat: 203, common_pid: 2039 } hitcount: 1
+ { lat: 203, common_pid: 2036 } hitcount: 1
+ { lat: 203, common_pid: 2041 } hitcount: 1
+ { lat: 206, common_pid: 2038 } hitcount: 2
+ { lat: 207, common_pid: 2039 } hitcount: 1
+ { lat: 207, common_pid: 2036 } hitcount: 1
+ { lat: 208, common_pid: 2040 } hitcount: 1
+ { lat: 209, common_pid: 2043 } hitcount: 1
+ { lat: 210, common_pid: 2039 } hitcount: 1
+ { lat: 211, common_pid: 2039 } hitcount: 4
+ { lat: 212, common_pid: 2043 } hitcount: 1
+ { lat: 212, common_pid: 2039 } hitcount: 2
+ { lat: 213, common_pid: 2039 } hitcount: 1
+ { lat: 214, common_pid: 2038 } hitcount: 1
+ { lat: 214, common_pid: 2039 } hitcount: 2
+ { lat: 214, common_pid: 2042 } hitcount: 1
+ { lat: 215, common_pid: 2039 } hitcount: 1
+ { lat: 217, common_pid: 2036 } hitcount: 1
+ { lat: 217, common_pid: 2040 } hitcount: 1
+ { lat: 217, common_pid: 2039 } hitcount: 1
+ { lat: 218, common_pid: 2039 } hitcount: 6
+ { lat: 219, common_pid: 2039 } hitcount: 9
+ { lat: 220, common_pid: 2039 } hitcount: 11
+ { lat: 221, common_pid: 2039 } hitcount: 5
+ { lat: 221, common_pid: 2042 } hitcount: 1
+ { lat: 222, common_pid: 2039 } hitcount: 7
+ { lat: 223, common_pid: 2036 } hitcount: 1
+ { lat: 223, common_pid: 2039 } hitcount: 3
+ { lat: 224, common_pid: 2039 } hitcount: 4
+ { lat: 224, common_pid: 2037 } hitcount: 1
+ { lat: 224, common_pid: 2036 } hitcount: 2
+ { lat: 225, common_pid: 2039 } hitcount: 5
+ { lat: 225, common_pid: 2042 } hitcount: 1
+ { lat: 226, common_pid: 2039 } hitcount: 7
+ { lat: 226, common_pid: 2036 } hitcount: 4
+ { lat: 227, common_pid: 2039 } hitcount: 6
+ { lat: 227, common_pid: 2036 } hitcount: 12
+ { lat: 227, common_pid: 2043 } hitcount: 1
+ { lat: 228, common_pid: 2039 } hitcount: 7
+ { lat: 228, common_pid: 2036 } hitcount: 14
+ { lat: 229, common_pid: 2039 } hitcount: 9
+ { lat: 229, common_pid: 2036 } hitcount: 8
+ { lat: 229, common_pid: 2038 } hitcount: 1
+ { lat: 230, common_pid: 2039 } hitcount: 11
+ { lat: 230, common_pid: 2036 } hitcount: 6
+ { lat: 230, common_pid: 2043 } hitcount: 1
+ { lat: 230, common_pid: 2042 } hitcount: 2
+ { lat: 231, common_pid: 2041 } hitcount: 1
+ { lat: 231, common_pid: 2036 } hitcount: 6
+ { lat: 231, common_pid: 2043 } hitcount: 1
+ { lat: 231, common_pid: 2039 } hitcount: 8
+ { lat: 232, common_pid: 2037 } hitcount: 1
+ { lat: 232, common_pid: 2039 } hitcount: 6
+ { lat: 232, common_pid: 2040 } hitcount: 2
+ { lat: 232, common_pid: 2036 } hitcount: 5
+ { lat: 232, common_pid: 2043 } hitcount: 1
+ { lat: 233, common_pid: 2036 } hitcount: 5
+ { lat: 233, common_pid: 2039 } hitcount: 11
+ { lat: 234, common_pid: 2039 } hitcount: 4
+ { lat: 234, common_pid: 2038 } hitcount: 2
+ { lat: 234, common_pid: 2043 } hitcount: 2
+ { lat: 234, common_pid: 2036 } hitcount: 11
+ { lat: 234, common_pid: 2040 } hitcount: 1
+ { lat: 235, common_pid: 2037 } hitcount: 2
+ { lat: 235, common_pid: 2036 } hitcount: 8
+ { lat: 235, common_pid: 2043 } hitcount: 2
+ { lat: 235, common_pid: 2039 } hitcount: 5
+ { lat: 235, common_pid: 2042 } hitcount: 2
+ { lat: 235, common_pid: 2040 } hitcount: 4
+ { lat: 235, common_pid: 2041 } hitcount: 1
+ { lat: 236, common_pid: 2036 } hitcount: 7
+ { lat: 236, common_pid: 2037 } hitcount: 1
+ { lat: 236, common_pid: 2041 } hitcount: 5
+ { lat: 236, common_pid: 2039 } hitcount: 3
+ { lat: 236, common_pid: 2043 } hitcount: 9
+ { lat: 236, common_pid: 2040 } hitcount: 7
+ { lat: 237, common_pid: 2037 } hitcount: 1
+ { lat: 237, common_pid: 2040 } hitcount: 1
+ { lat: 237, common_pid: 2036 } hitcount: 9
+ { lat: 237, common_pid: 2039 } hitcount: 3
+ { lat: 237, common_pid: 2043 } hitcount: 8
+ { lat: 237, common_pid: 2042 } hitcount: 2
+ { lat: 237, common_pid: 2041 } hitcount: 2
+ { lat: 238, common_pid: 2043 } hitcount: 10
+ { lat: 238, common_pid: 2040 } hitcount: 1
+ { lat: 238, common_pid: 2037 } hitcount: 9
+ { lat: 238, common_pid: 2038 } hitcount: 1
+ { lat: 238, common_pid: 2039 } hitcount: 1
+ { lat: 238, common_pid: 2042 } hitcount: 3
+ { lat: 238, common_pid: 2036 } hitcount: 7
+ { lat: 239, common_pid: 2041 } hitcount: 1
+ { lat: 239, common_pid: 2043 } hitcount: 11
+ { lat: 239, common_pid: 2037 } hitcount: 11
+ { lat: 239, common_pid: 2038 } hitcount: 6
+ { lat: 239, common_pid: 2036 } hitcount: 7
+ { lat: 239, common_pid: 2040 } hitcount: 1
+ { lat: 239, common_pid: 2042 } hitcount: 9
+ { lat: 240, common_pid: 2037 } hitcount: 29
+ { lat: 240, common_pid: 2043 } hitcount: 15
+ { lat: 240, common_pid: 2040 } hitcount: 44
+ { lat: 240, common_pid: 2039 } hitcount: 1
+ { lat: 240, common_pid: 2041 } hitcount: 2
+ { lat: 240, common_pid: 2038 } hitcount: 1
+ { lat: 240, common_pid: 2036 } hitcount: 10
+ { lat: 240, common_pid: 2042 } hitcount: 13
+ { lat: 241, common_pid: 2036 } hitcount: 21
+ { lat: 241, common_pid: 2041 } hitcount: 36
+ { lat: 241, common_pid: 2037 } hitcount: 34
+ { lat: 241, common_pid: 2042 } hitcount: 14
+ { lat: 241, common_pid: 2040 } hitcount: 94
+ { lat: 241, common_pid: 2039 } hitcount: 12
+ { lat: 241, common_pid: 2038 } hitcount: 2
+ { lat: 241, common_pid: 2043 } hitcount: 28
+ { lat: 242, common_pid: 2040 } hitcount: 109
+ { lat: 242, common_pid: 2041 } hitcount: 506
+ { lat: 242, common_pid: 2039 } hitcount: 155
+ { lat: 242, common_pid: 2042 } hitcount: 21
+ { lat: 242, common_pid: 2037 } hitcount: 52
+ { lat: 242, common_pid: 2043 } hitcount: 21
+ { lat: 242, common_pid: 2036 } hitcount: 16
+ { lat: 242, common_pid: 2038 } hitcount: 156
+ { lat: 243, common_pid: 2037 } hitcount: 46
+ { lat: 243, common_pid: 2039 } hitcount: 40
+ { lat: 243, common_pid: 2042 } hitcount: 119
+ { lat: 243, common_pid: 2041 } hitcount: 611
+ { lat: 243, common_pid: 2036 } hitcount: 69
+ { lat: 243, common_pid: 2038 } hitcount: 784
+ { lat: 243, common_pid: 2040 } hitcount: 323
+ { lat: 243, common_pid: 2043 } hitcount: 14
+ { lat: 244, common_pid: 2043 } hitcount: 35
+ { lat: 244, common_pid: 2042 } hitcount: 305
+ { lat: 244, common_pid: 2039 } hitcount: 8
+ { lat: 244, common_pid: 2040 } hitcount: 4515
+ { lat: 244, common_pid: 2038 } hitcount: 371
+ { lat: 244, common_pid: 2037 } hitcount: 31
+ { lat: 244, common_pid: 2036 } hitcount: 114
+ { lat: 244, common_pid: 2041 } hitcount: 3396
+ { lat: 245, common_pid: 2036 } hitcount: 700
+ { lat: 245, common_pid: 2041 } hitcount: 2772
+ { lat: 245, common_pid: 2037 } hitcount: 268
+ { lat: 245, common_pid: 2039 } hitcount: 472
+ { lat: 245, common_pid: 2038 } hitcount: 2758
+ { lat: 245, common_pid: 2042 } hitcount: 3833
+ { lat: 245, common_pid: 2040 } hitcount: 3105
+ { lat: 245, common_pid: 2043 } hitcount: 645
+ { lat: 246, common_pid: 2038 } hitcount: 3451
+ { lat: 246, common_pid: 2041 } hitcount: 142
+ { lat: 246, common_pid: 2037 } hitcount: 5101
+ { lat: 246, common_pid: 2040 } hitcount: 68
+ { lat: 246, common_pid: 2043 } hitcount: 5099
+ { lat: 246, common_pid: 2039 } hitcount: 5608
+ { lat: 246, common_pid: 2042 } hitcount: 3723
+ { lat: 246, common_pid: 2036 } hitcount: 4738
+ { lat: 247, common_pid: 2042 } hitcount: 312
+ { lat: 247, common_pid: 2043 } hitcount: 2385
+ { lat: 247, common_pid: 2041 } hitcount: 452
+ { lat: 247, common_pid: 2038 } hitcount: 792
+ { lat: 247, common_pid: 2040 } hitcount: 78
+ { lat: 247, common_pid: 2036 } hitcount: 2375
+ { lat: 247, common_pid: 2039 } hitcount: 1834
+ { lat: 247, common_pid: 2037 } hitcount: 2655
+ { lat: 248, common_pid: 2037 } hitcount: 36
+ { lat: 248, common_pid: 2042 } hitcount: 11
+ { lat: 248, common_pid: 2038 } hitcount: 122
+ { lat: 248, common_pid: 2036 } hitcount: 135
+ { lat: 248, common_pid: 2039 } hitcount: 26
+ { lat: 248, common_pid: 2041 } hitcount: 503
+ { lat: 248, common_pid: 2043 } hitcount: 66
+ { lat: 248, common_pid: 2040 } hitcount: 46
+ { lat: 249, common_pid: 2037 } hitcount: 29
+ { lat: 249, common_pid: 2038 } hitcount: 1
+ { lat: 249, common_pid: 2043 } hitcount: 29
+ { lat: 249, common_pid: 2039 } hitcount: 8
+ { lat: 249, common_pid: 2042 } hitcount: 56
+ { lat: 249, common_pid: 2040 } hitcount: 27
+ { lat: 249, common_pid: 2041 } hitcount: 11
+ { lat: 249, common_pid: 2036 } hitcount: 27
+ { lat: 250, common_pid: 2038 } hitcount: 1
+ { lat: 250, common_pid: 2036 } hitcount: 30
+ { lat: 250, common_pid: 2040 } hitcount: 19
+ { lat: 250, common_pid: 2043 } hitcount: 22
+ { lat: 250, common_pid: 2042 } hitcount: 20
+ { lat: 250, common_pid: 2041 } hitcount: 1
+ { lat: 250, common_pid: 2039 } hitcount: 6
+ { lat: 250, common_pid: 2037 } hitcount: 48
+ { lat: 251, common_pid: 2037 } hitcount: 43
+ { lat: 251, common_pid: 2039 } hitcount: 1
+ { lat: 251, common_pid: 2036 } hitcount: 12
+ { lat: 251, common_pid: 2042 } hitcount: 2
+ { lat: 251, common_pid: 2041 } hitcount: 1
+ { lat: 251, common_pid: 2043 } hitcount: 15
+ { lat: 251, common_pid: 2040 } hitcount: 3
+ { lat: 252, common_pid: 2040 } hitcount: 1
+ { lat: 252, common_pid: 2036 } hitcount: 12
+ { lat: 252, common_pid: 2037 } hitcount: 21
+ { lat: 252, common_pid: 2043 } hitcount: 14
+ { lat: 253, common_pid: 2037 } hitcount: 21
+ { lat: 253, common_pid: 2039 } hitcount: 2
+ { lat: 253, common_pid: 2036 } hitcount: 9
+ { lat: 253, common_pid: 2043 } hitcount: 6
+ { lat: 253, common_pid: 2040 } hitcount: 1
+ { lat: 254, common_pid: 2036 } hitcount: 8
+ { lat: 254, common_pid: 2043 } hitcount: 3
+ { lat: 254, common_pid: 2041 } hitcount: 1
+ { lat: 254, common_pid: 2042 } hitcount: 1
+ { lat: 254, common_pid: 2039 } hitcount: 1
+ { lat: 254, common_pid: 2037 } hitcount: 12
+ { lat: 255, common_pid: 2043 } hitcount: 1
+ { lat: 255, common_pid: 2037 } hitcount: 2
+ { lat: 255, common_pid: 2036 } hitcount: 2
+ { lat: 255, common_pid: 2039 } hitcount: 8
+ { lat: 256, common_pid: 2043 } hitcount: 1
+ { lat: 256, common_pid: 2036 } hitcount: 4
+ { lat: 256, common_pid: 2039 } hitcount: 6
+ { lat: 257, common_pid: 2039 } hitcount: 5
+ { lat: 257, common_pid: 2036 } hitcount: 4
+ { lat: 258, common_pid: 2039 } hitcount: 5
+ { lat: 258, common_pid: 2036 } hitcount: 2
+ { lat: 259, common_pid: 2036 } hitcount: 7
+ { lat: 259, common_pid: 2039 } hitcount: 7
+ { lat: 260, common_pid: 2036 } hitcount: 8
+ { lat: 260, common_pid: 2039 } hitcount: 6
+ { lat: 261, common_pid: 2036 } hitcount: 5
+ { lat: 261, common_pid: 2039 } hitcount: 7
+ { lat: 262, common_pid: 2039 } hitcount: 5
+ { lat: 262, common_pid: 2036 } hitcount: 5
+ { lat: 263, common_pid: 2039 } hitcount: 7
+ { lat: 263, common_pid: 2036 } hitcount: 7
+ { lat: 264, common_pid: 2039 } hitcount: 9
+ { lat: 264, common_pid: 2036 } hitcount: 9
+ { lat: 265, common_pid: 2036 } hitcount: 5
+ { lat: 265, common_pid: 2039 } hitcount: 1
+ { lat: 266, common_pid: 2036 } hitcount: 1
+ { lat: 266, common_pid: 2039 } hitcount: 3
+ { lat: 267, common_pid: 2036 } hitcount: 1
+ { lat: 267, common_pid: 2039 } hitcount: 3
+ { lat: 268, common_pid: 2036 } hitcount: 1
+ { lat: 268, common_pid: 2039 } hitcount: 6
+ { lat: 269, common_pid: 2036 } hitcount: 1
+ { lat: 269, common_pid: 2043 } hitcount: 1
+ { lat: 269, common_pid: 2039 } hitcount: 2
+ { lat: 270, common_pid: 2040 } hitcount: 1
+ { lat: 270, common_pid: 2039 } hitcount: 6
+ { lat: 271, common_pid: 2041 } hitcount: 1
+ { lat: 271, common_pid: 2039 } hitcount: 5
+ { lat: 272, common_pid: 2039 } hitcount: 10
+ { lat: 273, common_pid: 2039 } hitcount: 8
+ { lat: 274, common_pid: 2039 } hitcount: 2
+ { lat: 275, common_pid: 2039 } hitcount: 1
+ { lat: 276, common_pid: 2039 } hitcount: 2
+ { lat: 276, common_pid: 2037 } hitcount: 1
+ { lat: 276, common_pid: 2038 } hitcount: 1
+ { lat: 277, common_pid: 2039 } hitcount: 1
+ { lat: 277, common_pid: 2042 } hitcount: 1
+ { lat: 278, common_pid: 2039 } hitcount: 1
+ { lat: 279, common_pid: 2039 } hitcount: 4
+ { lat: 279, common_pid: 2043 } hitcount: 1
+ { lat: 280, common_pid: 2039 } hitcount: 3
+ { lat: 283, common_pid: 2036 } hitcount: 2
+ { lat: 284, common_pid: 2039 } hitcount: 1
+ { lat: 284, common_pid: 2043 } hitcount: 1
+ { lat: 288, common_pid: 2039 } hitcount: 1
+ { lat: 289, common_pid: 2039 } hitcount: 1
+ { lat: 300, common_pid: 2039 } hitcount: 1
+ { lat: 384, common_pid: 2039 } hitcount: 1
+
+ Totals:
+ Hits: 67625
+ Entries: 278
+ Dropped: 0
Note, the writes are around the sleep, so ideally they will all be of 250
microseconds. If you are wondering how there are several that are under
@@ -2350,7 +2356,7 @@ will be at 200 microseconds.
But this could easily be done in userspace. To make this even more
interesting, we can mix the histogram between events that happened in the
-kernel with trace_marker.
+kernel with trace_marker::
# cd /sys/kernel/tracing
# echo 'latency u64 lat' > synthetic_events
@@ -2362,177 +2368,177 @@ The difference this time is that instead of using the trace_marker to start
the latency, the sched_waking event is used, matching the common_pid for the
trace_marker write with the pid that is being woken by sched_waking.
-After running cyclictest again with the same parameters, we now have:
+After running cyclictest again with the same parameters, we now have::
# cat events/synthetic/latency/hist
-# event histogram
-#
-# trigger info: hist:keys=lat,common_pid:vals=hitcount:sort=lat:size=2048 [active]
-#
-
-{ lat: 7, common_pid: 2302 } hitcount: 640
-{ lat: 7, common_pid: 2299 } hitcount: 42
-{ lat: 7, common_pid: 2303 } hitcount: 18
-{ lat: 7, common_pid: 2305 } hitcount: 166
-{ lat: 7, common_pid: 2306 } hitcount: 1
-{ lat: 7, common_pid: 2301 } hitcount: 91
-{ lat: 7, common_pid: 2300 } hitcount: 17
-{ lat: 8, common_pid: 2303 } hitcount: 8296
-{ lat: 8, common_pid: 2304 } hitcount: 6864
-{ lat: 8, common_pid: 2305 } hitcount: 9464
-{ lat: 8, common_pid: 2301 } hitcount: 9213
-{ lat: 8, common_pid: 2306 } hitcount: 6246
-{ lat: 8, common_pid: 2302 } hitcount: 8797
-{ lat: 8, common_pid: 2299 } hitcount: 8771
-{ lat: 8, common_pid: 2300 } hitcount: 8119
-{ lat: 9, common_pid: 2305 } hitcount: 1519
-{ lat: 9, common_pid: 2299 } hitcount: 2346
-{ lat: 9, common_pid: 2303 } hitcount: 2841
-{ lat: 9, common_pid: 2301 } hitcount: 1846
-{ lat: 9, common_pid: 2304 } hitcount: 3861
-{ lat: 9, common_pid: 2302 } hitcount: 1210
-{ lat: 9, common_pid: 2300 } hitcount: 2762
-{ lat: 9, common_pid: 2306 } hitcount: 4247
-{ lat: 10, common_pid: 2299 } hitcount: 16
-{ lat: 10, common_pid: 2306 } hitcount: 333
-{ lat: 10, common_pid: 2303 } hitcount: 16
-{ lat: 10, common_pid: 2304 } hitcount: 168
-{ lat: 10, common_pid: 2302 } hitcount: 240
-{ lat: 10, common_pid: 2301 } hitcount: 28
-{ lat: 10, common_pid: 2300 } hitcount: 95
-{ lat: 10, common_pid: 2305 } hitcount: 18
-{ lat: 11, common_pid: 2303 } hitcount: 5
-{ lat: 11, common_pid: 2305 } hitcount: 8
-{ lat: 11, common_pid: 2306 } hitcount: 221
-{ lat: 11, common_pid: 2302 } hitcount: 76
-{ lat: 11, common_pid: 2304 } hitcount: 26
-{ lat: 11, common_pid: 2300 } hitcount: 125
-{ lat: 11, common_pid: 2299 } hitcount: 2
-{ lat: 12, common_pid: 2305 } hitcount: 3
-{ lat: 12, common_pid: 2300 } hitcount: 6
-{ lat: 12, common_pid: 2306 } hitcount: 90
-{ lat: 12, common_pid: 2302 } hitcount: 4
-{ lat: 12, common_pid: 2303 } hitcount: 1
-{ lat: 12, common_pid: 2304 } hitcount: 122
-{ lat: 13, common_pid: 2300 } hitcount: 12
-{ lat: 13, common_pid: 2301 } hitcount: 1
-{ lat: 13, common_pid: 2306 } hitcount: 32
-{ lat: 13, common_pid: 2302 } hitcount: 5
-{ lat: 13, common_pid: 2305 } hitcount: 1
-{ lat: 13, common_pid: 2303 } hitcount: 1
-{ lat: 13, common_pid: 2304 } hitcount: 61
-{ lat: 14, common_pid: 2303 } hitcount: 4
-{ lat: 14, common_pid: 2306 } hitcount: 5
-{ lat: 14, common_pid: 2305 } hitcount: 4
-{ lat: 14, common_pid: 2304 } hitcount: 62
-{ lat: 14, common_pid: 2302 } hitcount: 19
-{ lat: 14, common_pid: 2300 } hitcount: 33
-{ lat: 14, common_pid: 2299 } hitcount: 1
-{ lat: 14, common_pid: 2301 } hitcount: 4
-{ lat: 15, common_pid: 2305 } hitcount: 1
-{ lat: 15, common_pid: 2302 } hitcount: 25
-{ lat: 15, common_pid: 2300 } hitcount: 11
-{ lat: 15, common_pid: 2299 } hitcount: 5
-{ lat: 15, common_pid: 2301 } hitcount: 1
-{ lat: 15, common_pid: 2304 } hitcount: 8
-{ lat: 15, common_pid: 2303 } hitcount: 1
-{ lat: 15, common_pid: 2306 } hitcount: 6
-{ lat: 16, common_pid: 2302 } hitcount: 31
-{ lat: 16, common_pid: 2306 } hitcount: 3
-{ lat: 16, common_pid: 2300 } hitcount: 5
-{ lat: 17, common_pid: 2302 } hitcount: 6
-{ lat: 17, common_pid: 2303 } hitcount: 1
-{ lat: 18, common_pid: 2304 } hitcount: 1
-{ lat: 18, common_pid: 2302 } hitcount: 8
-{ lat: 18, common_pid: 2299 } hitcount: 1
-{ lat: 18, common_pid: 2301 } hitcount: 1
-{ lat: 19, common_pid: 2303 } hitcount: 4
-{ lat: 19, common_pid: 2304 } hitcount: 5
-{ lat: 19, common_pid: 2302 } hitcount: 4
-{ lat: 19, common_pid: 2299 } hitcount: 3
-{ lat: 19, common_pid: 2306 } hitcount: 1
-{ lat: 19, common_pid: 2300 } hitcount: 4
-{ lat: 19, common_pid: 2305 } hitcount: 5
-{ lat: 20, common_pid: 2299 } hitcount: 2
-{ lat: 20, common_pid: 2302 } hitcount: 3
-{ lat: 20, common_pid: 2305 } hitcount: 1
-{ lat: 20, common_pid: 2300 } hitcount: 2
-{ lat: 20, common_pid: 2301 } hitcount: 2
-{ lat: 20, common_pid: 2303 } hitcount: 3
-{ lat: 21, common_pid: 2305 } hitcount: 1
-{ lat: 21, common_pid: 2299 } hitcount: 5
-{ lat: 21, common_pid: 2303 } hitcount: 4
-{ lat: 21, common_pid: 2302 } hitcount: 7
-{ lat: 21, common_pid: 2300 } hitcount: 1
-{ lat: 21, common_pid: 2301 } hitcount: 5
-{ lat: 21, common_pid: 2304 } hitcount: 2
-{ lat: 22, common_pid: 2302 } hitcount: 5
-{ lat: 22, common_pid: 2303 } hitcount: 1
-{ lat: 22, common_pid: 2306 } hitcount: 3
-{ lat: 22, common_pid: 2301 } hitcount: 2
-{ lat: 22, common_pid: 2300 } hitcount: 1
-{ lat: 22, common_pid: 2299 } hitcount: 1
-{ lat: 22, common_pid: 2305 } hitcount: 1
-{ lat: 22, common_pid: 2304 } hitcount: 1
-{ lat: 23, common_pid: 2299 } hitcount: 1
-{ lat: 23, common_pid: 2306 } hitcount: 2
-{ lat: 23, common_pid: 2302 } hitcount: 6
-{ lat: 24, common_pid: 2302 } hitcount: 3
-{ lat: 24, common_pid: 2300 } hitcount: 1
-{ lat: 24, common_pid: 2306 } hitcount: 2
-{ lat: 24, common_pid: 2305 } hitcount: 1
-{ lat: 24, common_pid: 2299 } hitcount: 1
-{ lat: 25, common_pid: 2300 } hitcount: 1
-{ lat: 25, common_pid: 2302 } hitcount: 4
-{ lat: 26, common_pid: 2302 } hitcount: 2
-{ lat: 27, common_pid: 2305 } hitcount: 1
-{ lat: 27, common_pid: 2300 } hitcount: 1
-{ lat: 27, common_pid: 2302 } hitcount: 3
-{ lat: 28, common_pid: 2306 } hitcount: 1
-{ lat: 28, common_pid: 2302 } hitcount: 4
-{ lat: 29, common_pid: 2302 } hitcount: 1
-{ lat: 29, common_pid: 2300 } hitcount: 2
-{ lat: 29, common_pid: 2306 } hitcount: 1
-{ lat: 29, common_pid: 2304 } hitcount: 1
-{ lat: 30, common_pid: 2302 } hitcount: 4
-{ lat: 31, common_pid: 2302 } hitcount: 6
-{ lat: 32, common_pid: 2302 } hitcount: 1
-{ lat: 33, common_pid: 2299 } hitcount: 1
-{ lat: 33, common_pid: 2302 } hitcount: 3
-{ lat: 34, common_pid: 2302 } hitcount: 2
-{ lat: 35, common_pid: 2302 } hitcount: 1
-{ lat: 35, common_pid: 2304 } hitcount: 1
-{ lat: 36, common_pid: 2302 } hitcount: 4
-{ lat: 37, common_pid: 2302 } hitcount: 6
-{ lat: 38, common_pid: 2302 } hitcount: 2
-{ lat: 39, common_pid: 2302 } hitcount: 2
-{ lat: 39, common_pid: 2304 } hitcount: 1
-{ lat: 40, common_pid: 2304 } hitcount: 2
-{ lat: 40, common_pid: 2302 } hitcount: 5
-{ lat: 41, common_pid: 2304 } hitcount: 1
-{ lat: 41, common_pid: 2302 } hitcount: 8
-{ lat: 42, common_pid: 2302 } hitcount: 6
-{ lat: 42, common_pid: 2304 } hitcount: 1
-{ lat: 43, common_pid: 2302 } hitcount: 3
-{ lat: 43, common_pid: 2304 } hitcount: 4
-{ lat: 44, common_pid: 2302 } hitcount: 6
-{ lat: 45, common_pid: 2302 } hitcount: 5
-{ lat: 46, common_pid: 2302 } hitcount: 5
-{ lat: 47, common_pid: 2302 } hitcount: 7
-{ lat: 48, common_pid: 2301 } hitcount: 1
-{ lat: 48, common_pid: 2302 } hitcount: 9
-{ lat: 49, common_pid: 2302 } hitcount: 3
-{ lat: 50, common_pid: 2302 } hitcount: 1
-{ lat: 50, common_pid: 2301 } hitcount: 1
-{ lat: 51, common_pid: 2302 } hitcount: 2
-{ lat: 51, common_pid: 2301 } hitcount: 1
-{ lat: 61, common_pid: 2302 } hitcount: 1
-{ lat: 110, common_pid: 2302 } hitcount: 1
-
-Totals:
- Hits: 89565
- Entries: 158
- Dropped: 0
+ # event histogram
+ #
+ # trigger info: hist:keys=lat,common_pid:vals=hitcount:sort=lat:size=2048 [active]
+ #
+
+ { lat: 7, common_pid: 2302 } hitcount: 640
+ { lat: 7, common_pid: 2299 } hitcount: 42
+ { lat: 7, common_pid: 2303 } hitcount: 18
+ { lat: 7, common_pid: 2305 } hitcount: 166
+ { lat: 7, common_pid: 2306 } hitcount: 1
+ { lat: 7, common_pid: 2301 } hitcount: 91
+ { lat: 7, common_pid: 2300 } hitcount: 17
+ { lat: 8, common_pid: 2303 } hitcount: 8296
+ { lat: 8, common_pid: 2304 } hitcount: 6864
+ { lat: 8, common_pid: 2305 } hitcount: 9464
+ { lat: 8, common_pid: 2301 } hitcount: 9213
+ { lat: 8, common_pid: 2306 } hitcount: 6246
+ { lat: 8, common_pid: 2302 } hitcount: 8797
+ { lat: 8, common_pid: 2299 } hitcount: 8771
+ { lat: 8, common_pid: 2300 } hitcount: 8119
+ { lat: 9, common_pid: 2305 } hitcount: 1519
+ { lat: 9, common_pid: 2299 } hitcount: 2346
+ { lat: 9, common_pid: 2303 } hitcount: 2841
+ { lat: 9, common_pid: 2301 } hitcount: 1846
+ { lat: 9, common_pid: 2304 } hitcount: 3861
+ { lat: 9, common_pid: 2302 } hitcount: 1210
+ { lat: 9, common_pid: 2300 } hitcount: 2762
+ { lat: 9, common_pid: 2306 } hitcount: 4247
+ { lat: 10, common_pid: 2299 } hitcount: 16
+ { lat: 10, common_pid: 2306 } hitcount: 333
+ { lat: 10, common_pid: 2303 } hitcount: 16
+ { lat: 10, common_pid: 2304 } hitcount: 168
+ { lat: 10, common_pid: 2302 } hitcount: 240
+ { lat: 10, common_pid: 2301 } hitcount: 28
+ { lat: 10, common_pid: 2300 } hitcount: 95
+ { lat: 10, common_pid: 2305 } hitcount: 18
+ { lat: 11, common_pid: 2303 } hitcount: 5
+ { lat: 11, common_pid: 2305 } hitcount: 8
+ { lat: 11, common_pid: 2306 } hitcount: 221
+ { lat: 11, common_pid: 2302 } hitcount: 76
+ { lat: 11, common_pid: 2304 } hitcount: 26
+ { lat: 11, common_pid: 2300 } hitcount: 125
+ { lat: 11, common_pid: 2299 } hitcount: 2
+ { lat: 12, common_pid: 2305 } hitcount: 3
+ { lat: 12, common_pid: 2300 } hitcount: 6
+ { lat: 12, common_pid: 2306 } hitcount: 90
+ { lat: 12, common_pid: 2302 } hitcount: 4
+ { lat: 12, common_pid: 2303 } hitcount: 1
+ { lat: 12, common_pid: 2304 } hitcount: 122
+ { lat: 13, common_pid: 2300 } hitcount: 12
+ { lat: 13, common_pid: 2301 } hitcount: 1
+ { lat: 13, common_pid: 2306 } hitcount: 32
+ { lat: 13, common_pid: 2302 } hitcount: 5
+ { lat: 13, common_pid: 2305 } hitcount: 1
+ { lat: 13, common_pid: 2303 } hitcount: 1
+ { lat: 13, common_pid: 2304 } hitcount: 61
+ { lat: 14, common_pid: 2303 } hitcount: 4
+ { lat: 14, common_pid: 2306 } hitcount: 5
+ { lat: 14, common_pid: 2305 } hitcount: 4
+ { lat: 14, common_pid: 2304 } hitcount: 62
+ { lat: 14, common_pid: 2302 } hitcount: 19
+ { lat: 14, common_pid: 2300 } hitcount: 33
+ { lat: 14, common_pid: 2299 } hitcount: 1
+ { lat: 14, common_pid: 2301 } hitcount: 4
+ { lat: 15, common_pid: 2305 } hitcount: 1
+ { lat: 15, common_pid: 2302 } hitcount: 25
+ { lat: 15, common_pid: 2300 } hitcount: 11
+ { lat: 15, common_pid: 2299 } hitcount: 5
+ { lat: 15, common_pid: 2301 } hitcount: 1
+ { lat: 15, common_pid: 2304 } hitcount: 8
+ { lat: 15, common_pid: 2303 } hitcount: 1
+ { lat: 15, common_pid: 2306 } hitcount: 6
+ { lat: 16, common_pid: 2302 } hitcount: 31
+ { lat: 16, common_pid: 2306 } hitcount: 3
+ { lat: 16, common_pid: 2300 } hitcount: 5
+ { lat: 17, common_pid: 2302 } hitcount: 6
+ { lat: 17, common_pid: 2303 } hitcount: 1
+ { lat: 18, common_pid: 2304 } hitcount: 1
+ { lat: 18, common_pid: 2302 } hitcount: 8
+ { lat: 18, common_pid: 2299 } hitcount: 1
+ { lat: 18, common_pid: 2301 } hitcount: 1
+ { lat: 19, common_pid: 2303 } hitcount: 4
+ { lat: 19, common_pid: 2304 } hitcount: 5
+ { lat: 19, common_pid: 2302 } hitcount: 4
+ { lat: 19, common_pid: 2299 } hitcount: 3
+ { lat: 19, common_pid: 2306 } hitcount: 1
+ { lat: 19, common_pid: 2300 } hitcount: 4
+ { lat: 19, common_pid: 2305 } hitcount: 5
+ { lat: 20, common_pid: 2299 } hitcount: 2
+ { lat: 20, common_pid: 2302 } hitcount: 3
+ { lat: 20, common_pid: 2305 } hitcount: 1
+ { lat: 20, common_pid: 2300 } hitcount: 2
+ { lat: 20, common_pid: 2301 } hitcount: 2
+ { lat: 20, common_pid: 2303 } hitcount: 3
+ { lat: 21, common_pid: 2305 } hitcount: 1
+ { lat: 21, common_pid: 2299 } hitcount: 5
+ { lat: 21, common_pid: 2303 } hitcount: 4
+ { lat: 21, common_pid: 2302 } hitcount: 7
+ { lat: 21, common_pid: 2300 } hitcount: 1
+ { lat: 21, common_pid: 2301 } hitcount: 5
+ { lat: 21, common_pid: 2304 } hitcount: 2
+ { lat: 22, common_pid: 2302 } hitcount: 5
+ { lat: 22, common_pid: 2303 } hitcount: 1
+ { lat: 22, common_pid: 2306 } hitcount: 3
+ { lat: 22, common_pid: 2301 } hitcount: 2
+ { lat: 22, common_pid: 2300 } hitcount: 1
+ { lat: 22, common_pid: 2299 } hitcount: 1
+ { lat: 22, common_pid: 2305 } hitcount: 1
+ { lat: 22, common_pid: 2304 } hitcount: 1
+ { lat: 23, common_pid: 2299 } hitcount: 1
+ { lat: 23, common_pid: 2306 } hitcount: 2
+ { lat: 23, common_pid: 2302 } hitcount: 6
+ { lat: 24, common_pid: 2302 } hitcount: 3
+ { lat: 24, common_pid: 2300 } hitcount: 1
+ { lat: 24, common_pid: 2306 } hitcount: 2
+ { lat: 24, common_pid: 2305 } hitcount: 1
+ { lat: 24, common_pid: 2299 } hitcount: 1
+ { lat: 25, common_pid: 2300 } hitcount: 1
+ { lat: 25, common_pid: 2302 } hitcount: 4
+ { lat: 26, common_pid: 2302 } hitcount: 2
+ { lat: 27, common_pid: 2305 } hitcount: 1
+ { lat: 27, common_pid: 2300 } hitcount: 1
+ { lat: 27, common_pid: 2302 } hitcount: 3
+ { lat: 28, common_pid: 2306 } hitcount: 1
+ { lat: 28, common_pid: 2302 } hitcount: 4
+ { lat: 29, common_pid: 2302 } hitcount: 1
+ { lat: 29, common_pid: 2300 } hitcount: 2
+ { lat: 29, common_pid: 2306 } hitcount: 1
+ { lat: 29, common_pid: 2304 } hitcount: 1
+ { lat: 30, common_pid: 2302 } hitcount: 4
+ { lat: 31, common_pid: 2302 } hitcount: 6
+ { lat: 32, common_pid: 2302 } hitcount: 1
+ { lat: 33, common_pid: 2299 } hitcount: 1
+ { lat: 33, common_pid: 2302 } hitcount: 3
+ { lat: 34, common_pid: 2302 } hitcount: 2
+ { lat: 35, common_pid: 2302 } hitcount: 1
+ { lat: 35, common_pid: 2304 } hitcount: 1
+ { lat: 36, common_pid: 2302 } hitcount: 4
+ { lat: 37, common_pid: 2302 } hitcount: 6
+ { lat: 38, common_pid: 2302 } hitcount: 2
+ { lat: 39, common_pid: 2302 } hitcount: 2
+ { lat: 39, common_pid: 2304 } hitcount: 1
+ { lat: 40, common_pid: 2304 } hitcount: 2
+ { lat: 40, common_pid: 2302 } hitcount: 5
+ { lat: 41, common_pid: 2304 } hitcount: 1
+ { lat: 41, common_pid: 2302 } hitcount: 8
+ { lat: 42, common_pid: 2302 } hitcount: 6
+ { lat: 42, common_pid: 2304 } hitcount: 1
+ { lat: 43, common_pid: 2302 } hitcount: 3
+ { lat: 43, common_pid: 2304 } hitcount: 4
+ { lat: 44, common_pid: 2302 } hitcount: 6
+ { lat: 45, common_pid: 2302 } hitcount: 5
+ { lat: 46, common_pid: 2302 } hitcount: 5
+ { lat: 47, common_pid: 2302 } hitcount: 7
+ { lat: 48, common_pid: 2301 } hitcount: 1
+ { lat: 48, common_pid: 2302 } hitcount: 9
+ { lat: 49, common_pid: 2302 } hitcount: 3
+ { lat: 50, common_pid: 2302 } hitcount: 1
+ { lat: 50, common_pid: 2301 } hitcount: 1
+ { lat: 51, common_pid: 2302 } hitcount: 2
+ { lat: 51, common_pid: 2301 } hitcount: 1
+ { lat: 61, common_pid: 2302 } hitcount: 1
+ { lat: 110, common_pid: 2302 } hitcount: 1
+
+ Totals:
+ Hits: 89565
+ Entries: 158
+ Dropped: 0
This doesn't tell us any information about how late cyclictest may have
woken up, but it does show us a nice histogram of how long it took from
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index b58c10b04e27..306997941ba1 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -18,6 +18,7 @@ Linux Tracing Technologies
events-nmi
events-msr
mmiotrace
+ histogram
hwlat_detector
intel_th
stm
diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst
index 3e0f971b12de..8bfc75c90806 100644
--- a/Documentation/trace/kprobetrace.rst
+++ b/Documentation/trace/kprobetrace.rst
@@ -18,7 +18,7 @@ To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y.
Similar to the events tracer, this doesn't need to be activated via
current_tracer. Instead of that, add probe points via
/sys/kernel/debug/tracing/kprobe_events, and enable it via
-/sys/kernel/debug/tracing/events/kprobes/<EVENT>/enabled.
+/sys/kernel/debug/tracing/events/kprobes/<EVENT>/enable.
Synopsis of kprobe_events
@@ -81,9 +81,9 @@ Per-probe event filtering feature allows you to set different filter on each
probe and gives you what arguments will be shown in trace buffer. If an event
name is specified right after 'p:' or 'r:' in kprobe_events, it adds an event
under tracing/events/kprobes/<EVENT>, at the directory you can see 'id',
-'enabled', 'format' and 'filter'.
+'enable', 'format', 'filter' and 'trigger'.
-enabled:
+enable:
You can enable/disable the probe by writing 1 or 0 on it.
format:
@@ -95,6 +95,9 @@ filter:
id:
This shows the id of this probe event.
+trigger:
+ This allows to install trigger commands which are executed when the event is
+ hit (for details, see Documentation/trace/events.rst, section 6).
Event Profiling
---------------
diff --git a/Documentation/trace/uprobetracer.rst b/Documentation/trace/uprobetracer.rst
index 98d3f692957a..d0822811527a 100644
--- a/Documentation/trace/uprobetracer.rst
+++ b/Documentation/trace/uprobetracer.rst
@@ -13,7 +13,7 @@ To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y.
Similar to the kprobe-event tracer, this doesn't need to be activated via
current_tracer. Instead of that, add probe points via
/sys/kernel/debug/tracing/uprobe_events, and enable it via
-/sys/kernel/debug/tracing/events/uprobes/<EVENT>/enabled.
+/sys/kernel/debug/tracing/events/uprobes/<EVENT>/enable.
However unlike kprobe-event tracer, the uprobe event interface expects the
user to calculate the offset of the probepoint in the object.
diff --git a/Documentation/translations/index.rst b/Documentation/translations/index.rst
new file mode 100644
index 000000000000..7f77c52d33aa
--- /dev/null
+++ b/Documentation/translations/index.rst
@@ -0,0 +1,13 @@
+.. _translations:
+
+============
+Translations
+============
+
+.. toctree::
+ :maxdepth: 1
+
+ zh_CN/index
+ it_IT/index
+ ko_KR/index
+ ja_JP/index
diff --git a/Documentation/translations/it_IT/disclaimer-ita.rst b/Documentation/translations/it_IT/disclaimer-ita.rst
new file mode 100644
index 000000000000..d68e52de6a5d
--- /dev/null
+++ b/Documentation/translations/it_IT/disclaimer-ita.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. note::
+ This document is maintained by Federico Vaga <federico.vaga@vaga.pv.it>.
+ If you find any difference between this document and the original file or a
+ problem with the translation, please contact the maintainer of this file.
+ Following people helped to translate or review:
+ Alessia Mantegazza <amantegazza@vaga.pv.it>
+
+.. warning::
+ The purpose of this file is to be easier to read and understand for Italian
+ speakers and is not intended as a fork. So, if you have any comments or
+ updates for this file please try to update the original English file first.
diff --git a/Documentation/translations/it_IT/doc-guide/index.rst b/Documentation/translations/it_IT/doc-guide/index.rst
new file mode 100644
index 000000000000..7a6562b547ee
--- /dev/null
+++ b/Documentation/translations/it_IT/doc-guide/index.rst
@@ -0,0 +1,24 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/doc-guide/index.rst <doc_guide>`
+
+.. _it_doc_guide:
+
+==========================================
+Come scrivere la documentazione del kernel
+==========================================
+
+.. toctree::
+ :maxdepth: 1
+
+ sphinx.rst
+ kernel-doc.rst
+ parse-headers.rst
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/translations/it_IT/doc-guide/kernel-doc.rst b/Documentation/translations/it_IT/doc-guide/kernel-doc.rst
new file mode 100644
index 000000000000..2bf1c1e2f394
--- /dev/null
+++ b/Documentation/translations/it_IT/doc-guide/kernel-doc.rst
@@ -0,0 +1,554 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/doc-guide/index.rst <doc_guide>`
+
+.. _it_kernel_doc:
+
+Scrivere i commenti in kernel-doc
+=================================
+
+Nei file sorgenti del kernel Linux potrete trovare commenti di documentazione
+strutturanti secondo il formato kernel-doc. Essi possono descrivere funzioni,
+tipi di dati, e l'architettura del codice.
+
+.. note:: Il formato kernel-doc può sembrare simile a gtk-doc o Doxygen ma
+ in realtà è molto differente per ragioni storiche. I sorgenti del kernel
+ contengono decine di migliaia di commenti kernel-doc. Siete pregati
+ d'attenervi allo stile qui descritto.
+
+La struttura kernel-doc è estratta a partire dai commenti; da questi viene
+generato il `dominio Sphinx per il C`_ con un'adeguata descrizione per le
+funzioni ed i tipi di dato con i loro relativi collegamenti. Le descrizioni
+vengono filtrare per cercare i riferimenti ed i marcatori.
+
+Vedere di seguito per maggiori dettagli.
+
+.. _`dominio Sphinx per il C`: http://www.sphinx-doc.org/en/stable/domains.html
+
+Tutte le funzioni esportate verso i moduli esterni utilizzando
+``EXPORT_SYMBOL`` o ``EXPORT_SYMBOL_GPL`` dovrebbero avere un commento
+kernel-doc. Quando l'intenzione è di utilizzarle nei moduli, anche le funzioni
+e le strutture dati nei file d'intestazione dovrebbero avere dei commenti
+kernel-doc.
+
+È considerata una buona pratica quella di fornire una documentazione formattata
+secondo kernel-doc per le funzioni che sono visibili da altri file del kernel
+(ovvero, che non siano dichiarate utilizzando ``static``). Raccomandiamo,
+inoltre, di fornire una documentazione kernel-doc anche per procedure private
+(ovvero, dichiarate "static") al fine di fornire una struttura più coerente
+dei sorgenti. Quest'ultima raccomandazione ha una priorità più bassa ed è a
+discrezione dal manutentore (MAINTAINER) del file sorgente.
+
+
+
+Sicuramente la documentazione formattata con kernel-doc è necessaria per
+le funzioni che sono esportate verso i moduli esterni utilizzando
+``EXPORT_SYMBOL`` o ``EXPORT_SYMBOL_GPL``.
+
+Cerchiamo anche di fornire una documentazione formattata secondo kernel-doc
+per le funzioni che sono visibili da altri file del kernel (ovvero, che non
+siano dichiarate utilizzando "static")
+
+Raccomandiamo, inoltre, di fornire una documentazione formattata con kernel-doc
+anche per procedure private (ovvero, dichiarate "static") al fine di fornire
+una struttura più coerente dei sorgenti. Questa raccomandazione ha una priorità
+più bassa ed è a discrezione dal manutentore (MAINTAINER) del file sorgente.
+
+Le strutture dati visibili nei file di intestazione dovrebbero essere anch'esse
+documentate utilizzando commenti formattati con kernel-doc.
+
+Come formattare i commenti kernel-doc
+-------------------------------------
+
+I commenti kernel-doc iniziano con il marcatore ``/**``. Il programma
+``kernel-doc`` estrarrà i commenti marchiati in questo modo. Il resto
+del commento è formattato come un normale commento multilinea, ovvero
+con un asterisco all'inizio d'ogni riga e che si conclude con ``*/``
+su una riga separata.
+
+I commenti kernel-doc di funzioni e tipi dovrebbero essere posizionati
+appena sopra la funzione od il tipo che descrivono. Questo allo scopo di
+aumentare la probabilità che chi cambia il codice si ricordi di aggiornare
+anche la documentazione. I commenti kernel-doc di tipo più generale possono
+essere posizionati ovunque nel file.
+
+Al fine di verificare che i commenti siano formattati correttamente, potete
+eseguire il programma ``kernel-doc`` con un livello di verbosità alto e senza
+che questo produca alcuna documentazione. Per esempio::
+
+ scripts/kernel-doc -v -none drivers/foo/bar.c
+
+Il formato della documentazione è verificato della procedura di generazione
+del kernel quando viene richiesto di effettuare dei controlli extra con GCC::
+
+ make W=n
+
+Documentare le funzioni
+------------------------
+
+Generalmente il formato di un commento kernel-doc per funzioni e
+macro simil-funzioni è il seguente::
+
+ /**
+ * function_name() - Brief description of function.
+ * @arg1: Describe the first argument.
+ * @arg2: Describe the second argument.
+ * One can provide multiple line descriptions
+ * for arguments.
+ *
+ * A longer description, with more discussion of the function function_name()
+ * that might be useful to those using or modifying it. Begins with an
+ * empty comment line, and may include additional embedded empty
+ * comment lines.
+ *
+ * The longer description may have multiple paragraphs.
+ *
+ * Context: Describes whether the function can sleep, what locks it takes,
+ * releases, or expects to be held. It can extend over multiple
+ * lines.
+ * Return: Describe the return value of foobar.
+ *
+ * The return value description can also have multiple paragraphs, and should
+ * be placed at the end of the comment block.
+ */
+
+La descrizione introduttiva (*brief description*) che segue il nome della
+funzione può continuare su righe successive e termina con la descrizione di
+un argomento, una linea di commento vuota, oppure la fine del commento.
+
+Parametri delle funzioni
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Ogni argomento di una funzione dovrebbe essere descritto in ordine, subito
+dopo la descrizione introduttiva. Non lasciare righe vuote né fra la
+descrizione introduttiva e quella degli argomenti, né fra gli argomenti.
+
+Ogni ``@argument:`` può estendersi su più righe.
+
+.. note::
+
+ Se la descrizione di ``@argument:`` si estende su più righe,
+ la continuazione dovrebbe iniziare alla stessa colonna della riga
+ precedente::
+
+ * @argument: some long description
+ * that continues on next lines
+
+ or::
+
+ * @argument:
+ * some long description
+ * that continues on next lines
+
+Se una funzione ha un numero variabile di argomento, la sua descrizione
+dovrebbe essere scritta con la notazione kernel-doc::
+
+ * @...: description
+
+Contesto delle funzioni
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Il contesto in cui le funzioni vengono chiamate viene descritto in una
+sezione chiamata ``Context``. Questo dovrebbe informare sulla possibilità
+che una funzione dorma (*sleep*) o che possa essere chiamata in un contesto
+d'interruzione, così come i *lock* che prende, rilascia e che si aspetta che
+vengano presi dal chiamante.
+
+Esempi::
+
+ * Context: Any context.
+ * Context: Any context. Takes and releases the RCU lock.
+ * Context: Any context. Expects <lock> to be held by caller.
+ * Context: Process context. May sleep if @gfp flags permit.
+ * Context: Process context. Takes and releases <mutex>.
+ * Context: Softirq or process context. Takes and releases <lock>, BH-safe.
+ * Context: Interrupt context.
+
+Valore di ritorno
+~~~~~~~~~~~~~~~~~
+
+Il valore di ritorno, se c'è, viene descritto in una sezione dedicata di nome
+``Return``.
+
+.. note::
+
+ #) La descrizione multiriga non riconosce il termine d'una riga, per cui
+ se provate a formattare bene il vostro testo come nel seguente esempio::
+
+ * Return:
+ * 0 - OK
+ * -EINVAL - invalid argument
+ * -ENOMEM - out of memory
+
+ le righe verranno unite e il risultato sarà::
+
+ Return: 0 - OK -EINVAL - invalid argument -ENOMEM - out of memory
+
+ Quindi, se volete che le righe vengano effettivamente generate, dovete
+ utilizzare una lista ReST, ad esempio::
+
+ * Return:
+ * * 0 - OK to runtime suspend the device
+ * * -EBUSY - Device should not be runtime suspended
+
+ #) Se il vostro testo ha delle righe che iniziano con una frase seguita dai
+ due punti, allora ognuna di queste frasi verrà considerata come il nome
+ di una nuova sezione, e probabilmente non produrrà gli effetti desiderati.
+
+Documentare strutture, unioni ed enumerazioni
+---------------------------------------------
+
+Generalmente il formato di un commento kernel-doc per struct, union ed enum è::
+
+ /**
+ * struct struct_name - Brief description.
+ * @member1: Description of member1.
+ * @member2: Description of member2.
+ * One can provide multiple line descriptions
+ * for members.
+ *
+ * Description of the structure.
+ */
+
+Nell'esempio qui sopra, potete sostituire ``struct`` con ``union`` o ``enum``
+per descrivere unioni ed enumerati. ``member`` viene usato per indicare i
+membri di strutture ed unioni, ma anche i valori di un tipo enumerato.
+
+La descrizione introduttiva (*brief description*) che segue il nome della
+funzione può continuare su righe successive e termina con la descrizione di
+un argomento, una linea di commento vuota, oppure la fine del commento.
+
+Membri
+~~~~~~
+
+I membri di strutture, unioni ed enumerati devo essere documentati come i
+parametri delle funzioni; seguono la descrizione introduttiva e possono
+estendersi su più righe.
+
+All'interno d'una struttura o d'un unione, potete utilizzare le etichette
+``private:`` e ``public:``. I campi che sono nell'area ``private:`` non
+verranno inclusi nella documentazione finale.
+
+Le etichette ``private:`` e ``public:`` devono essere messe subito dopo
+il marcatore di un commento ``/*``. Opzionalmente, possono includere commenti
+fra ``:`` e il marcatore di fine commento ``*/``.
+
+Esempio::
+
+ /**
+ * struct my_struct - short description
+ * @a: first member
+ * @b: second member
+ * @d: fourth member
+ *
+ * Longer description
+ */
+ struct my_struct {
+ int a;
+ int b;
+ /* private: internal use only */
+ int c;
+ /* public: the next one is public */
+ int d;
+ };
+
+Strutture ed unioni annidate
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+È possibile documentare strutture ed unioni annidate, ad esempio::
+
+ /**
+ * struct nested_foobar - a struct with nested unions and structs
+ * @memb1: first member of anonymous union/anonymous struct
+ * @memb2: second member of anonymous union/anonymous struct
+ * @memb3: third member of anonymous union/anonymous struct
+ * @memb4: fourth member of anonymous union/anonymous struct
+ * @bar: non-anonymous union
+ * @bar.st1: struct st1 inside @bar
+ * @bar.st2: struct st2 inside @bar
+ * @bar.st1.memb1: first member of struct st1 on union bar
+ * @bar.st1.memb2: second member of struct st1 on union bar
+ * @bar.st2.memb1: first member of struct st2 on union bar
+ * @bar.st2.memb2: second member of struct st2 on union bar
+ */
+ struct nested_foobar {
+ /* Anonymous union/struct*/
+ union {
+ struct {
+ int memb1;
+ int memb2;
+ }
+ struct {
+ void *memb3;
+ int memb4;
+ }
+ }
+ union {
+ struct {
+ int memb1;
+ int memb2;
+ } st1;
+ struct {
+ void *memb1;
+ int memb2;
+ } st2;
+ } bar;
+ };
+
+.. note::
+
+ #) Quando documentate una struttura od unione annidata, ad esempio
+ di nome ``foo``, il suo campo ``bar`` dev'essere documentato
+ usando ``@foo.bar:``
+ #) Quando la struttura od unione annidata è anonima, il suo campo
+ ``bar`` dev'essere documentato usando ``@bar:``
+
+Commenti in linea per la documentazione dei membri
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+I membri d'una struttura possono essere documentati in linea all'interno
+della definizione stessa. Ci sono due stili: una singola riga di commento
+che inizia con ``/**`` e finisce con ``*/``; commenti multi riga come
+qualsiasi altro commento kernel-doc::
+
+ /**
+ * struct foo - Brief description.
+ * @foo: The Foo member.
+ */
+ struct foo {
+ int foo;
+ /**
+ * @bar: The Bar member.
+ */
+ int bar;
+ /**
+ * @baz: The Baz member.
+ *
+ * Here, the member description may contain several paragraphs.
+ */
+ int baz;
+ union {
+ /** @foobar: Single line description. */
+ int foobar;
+ };
+ /** @bar2: Description for struct @bar2 inside @foo */
+ struct {
+ /**
+ * @bar2.barbar: Description for @barbar inside @foo.bar2
+ */
+ int barbar;
+ } bar2;
+ };
+
+
+Documentazione dei tipi di dato
+-------------------------------
+Generalmente il formato di un commento kernel-doc per typedef è
+il seguente::
+
+ /**
+ * typedef type_name - Brief description.
+ *
+ * Description of the type.
+ */
+
+Anche i tipi di dato per prototipi di funzione possono essere documentati::
+
+ /**
+ * typedef type_name - Brief description.
+ * @arg1: description of arg1
+ * @arg2: description of arg2
+ *
+ * Description of the type.
+ *
+ * Context: Locking context.
+ * Return: Meaning of the return value.
+ */
+ typedef void (*type_name)(struct v4l2_ctrl *arg1, void *arg2);
+
+Marcatori e riferimenti
+-----------------------
+
+All'interno dei commenti di tipo kernel-doc vengono riconosciuti i seguenti
+*pattern* che vengono convertiti in marcatori reStructuredText ed in riferimenti
+del `dominio Sphinx per il C`_.
+
+.. attention:: Questi sono riconosciuti **solo** all'interno di commenti
+ kernel-doc, e **non** all'interno di documenti reStructuredText.
+
+``funcname()``
+ Riferimento ad una funzione.
+
+``@parameter``
+ Nome di un parametro di una funzione (nessun riferimento, solo formattazione).
+
+``%CONST``
+ Il nome di una costante (nessun riferimento, solo formattazione)
+
+````literal````
+ Un blocco di testo che deve essere riportato così com'è. La rappresentazione
+ finale utilizzerà caratteri a ``spaziatura fissa``.
+
+ Questo è utile se dovete utilizzare caratteri speciali che altrimenti
+ potrebbero assumere un significato diverso in kernel-doc o in reStructuredText
+
+ Questo è particolarmente utile se dovete scrivere qualcosa come ``%ph``
+ all'interno della descrizione di una funzione.
+
+``$ENVVAR``
+ Il nome di una variabile d'ambiente (nessun riferimento, solo formattazione).
+
+``&struct name``
+ Riferimento ad una struttura.
+
+``&enum name``
+ Riferimento ad un'enumerazione.
+
+``&typedef name``
+ Riferimento ad un tipo di dato.
+
+``&struct_name->member`` or ``&struct_name.member``
+ Riferimento ad un membro di una struttura o di un'unione. Il riferimento sarà
+ la struttura o l'unione, non il memembro.
+
+``&name``
+ Un generico riferimento ad un tipo. Usate, preferibilmente, il riferimento
+ completo come descritto sopra. Questo è dedicato ai commenti obsoleti.
+
+Riferimenti usando reStructuredText
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Per fare riferimento a funzioni e tipi di dato definiti nei commenti kernel-doc
+all'interno dei documenti reStructuredText, utilizzate i riferimenti dal
+`dominio Sphinx per il C`_. Per esempio::
+
+ See function :c:func:`foo` and struct/union/enum/typedef :c:type:`bar`.
+
+Nonostante il riferimento ai tipi di dato funzioni col solo nome,
+ovvero senza specificare struct/union/enum/typedef, potreste preferire il
+seguente::
+
+ See :c:type:`struct foo <foo>`.
+ See :c:type:`union bar <bar>`.
+ See :c:type:`enum baz <baz>`.
+ See :c:type:`typedef meh <meh>`.
+
+Questo produce dei collegamenti migliori, ed è in linea con il modo in cui
+kernel-doc gestisce i riferimenti.
+
+Per maggiori informazioni, siete pregati di consultare la documentazione
+del `dominio Sphinx per il C`_.
+
+Commenti per una documentazione generale
+----------------------------------------
+
+Al fine d'avere il codice ed i commenti nello stesso file, potete includere
+dei blocchi di documentazione kernel-doc con un formato libero invece
+che nel formato specifico per funzioni, strutture, unioni, enumerati o tipi
+di dato. Per esempio, questo tipo di commento potrebbe essere usato per la
+spiegazione delle operazioni di un driver o di una libreria
+
+Questo s'ottiene utilizzando la parola chiave ``DOC:`` a cui viene associato
+un titolo.
+
+Generalmente il formato di un commento generico o di visione d'insieme è
+il seguente::
+
+ /**
+ * DOC: Theory of Operation
+ *
+ * The whizbang foobar is a dilly of a gizmo. It can do whatever you
+ * want it to do, at any time. It reads your mind. Here's how it works.
+ *
+ * foo bar splat
+ *
+ * The only drawback to this gizmo is that is can sometimes damage
+ * hardware, software, or its subject(s).
+ */
+
+Il titolo che segue ``DOC:`` funziona da intestazione all'interno del file
+sorgente, ma anche come identificatore per l'estrazione di questi commenti di
+documentazione. Quindi, il titolo dev'essere unico all'interno del file.
+
+Includere i commenti di tipo kernel-doc
+=======================================
+
+I commenti di documentazione possono essere inclusi in un qualsiasi documento
+di tipo reStructuredText mediante l'apposita direttiva nell'estensione
+kernel-doc per Sphinx.
+
+Le direttive kernel-doc sono nel formato::
+
+ .. kernel-doc:: source
+ :option:
+
+Il campo *source* è il percorso ad un file sorgente, relativo alla cartella
+principale dei sorgenti del kernel. La direttiva supporta le seguenti opzioni:
+
+export: *[source-pattern ...]*
+ Include la documentazione per tutte le funzioni presenti nel file sorgente
+ (*source*) che sono state esportate utilizzando ``EXPORT_SYMBOL`` o
+ ``EXPORT_SYMBOL_GPL`` in *source* o in qualsiasi altro *source-pattern*
+ specificato.
+
+ Il campo *source-patter* è utile quando i commenti kernel-doc sono stati
+ scritti nei file d'intestazione, mentre ``EXPORT_SYMBOL`` e
+ ``EXPORT_SYMBOL_GPL`` si trovano vicino alla definizione delle funzioni.
+
+ Esempi::
+
+ .. kernel-doc:: lib/bitmap.c
+ :export:
+
+ .. kernel-doc:: include/net/mac80211.h
+ :export: net/mac80211/*.c
+
+internal: *[source-pattern ...]*
+ Include la documentazione per tutte le funzioni ed i tipi presenti nel file
+ sorgente (*source*) che **non** sono stati esportati utilizzando
+ ``EXPORT_SYMBOL`` o ``EXPORT_SYMBOL_GPL`` né in *source* né in qualsiasi
+ altro *source-pattern* specificato.
+
+ Esempio::
+
+ .. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
+ :internal:
+
+doc: *title*
+ Include la documentazione del paragrafo ``DOC:`` identificato dal titolo
+ (*title*) all'interno del file sorgente (*source*). Gli spazi in *title* sono
+ permessi; non virgolettate *title*. Il campo *title* è utilizzato per
+ identificare un paragrafo e per questo non viene incluso nella documentazione
+ finale. Verificate d'avere l'intestazione appropriata nei documenti
+ reStructuredText.
+
+ Esempio::
+
+ .. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
+ :doc: High Definition Audio over HDMI and Display Port
+
+functions: *function* *[...]*
+ Dal file sorgente (*source*) include la documentazione per le funzioni
+ elencate (*function*).
+
+ Esempio::
+
+ .. kernel-doc:: lib/bitmap.c
+ :functions: bitmap_parselist bitmap_parselist_user
+
+Senza alcuna opzione, la direttiva kernel-doc include tutti i commenti di
+documentazione presenti nel file sorgente (*source*).
+
+L'estensione kernel-doc fa parte dei sorgenti del kernel, la si può trovare
+in ``Documentation/sphinx/kerneldoc.py``. Internamente, viene utilizzato
+lo script ``scripts/kernel-doc`` per estrarre i commenti di documentazione
+dai file sorgenti.
+
+Come utilizzare kernel-doc per generare pagine man
+--------------------------------------------------
+
+Se volete utilizzare kernel-doc solo per generare delle pagine man, potete
+farlo direttamente dai sorgenti del kernel::
+
+ $ scripts/kernel-doc -man $(git grep -l '/\*\*' -- :^Documentation :^tools) | scripts/split-man.pl /tmp/man
diff --git a/Documentation/translations/it_IT/doc-guide/parse-headers.rst b/Documentation/translations/it_IT/doc-guide/parse-headers.rst
new file mode 100644
index 000000000000..b38918ca637e
--- /dev/null
+++ b/Documentation/translations/it_IT/doc-guide/parse-headers.rst
@@ -0,0 +1,196 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/doc-guide/index.rst <doc_guide>`
+
+=========================================
+Includere gli i file di intestazione uAPI
+=========================================
+
+Qualche volta è utile includere dei file di intestazione e degli esempi di codice C
+al fine di descrivere l'API per lo spazio utente e per generare dei riferimenti
+fra il codice e la documentazione. Aggiungere i riferimenti ai file dell'API
+dello spazio utente ha ulteriori vantaggi: Sphinx genererà dei messaggi
+d'avviso se un simbolo non viene trovato nella documentazione. Questo permette
+di mantenere allineate la documentazione della uAPI (API spazio utente)
+con le modifiche del kernel.
+Il programma :ref:`parse_headers.pl <it_parse_headers>` genera questi riferimenti.
+Esso dev'essere invocato attraverso un Makefile, mentre si genera la
+documentazione. Per avere un esempio su come utilizzarlo all'interno del kernel
+consultate ``Documentation/media/Makefile``.
+
+.. _it_parse_headers:
+
+parse_headers.pl
+^^^^^^^^^^^^^^^^
+
+NOME
+****
+
+
+parse_headers.pl - analizza i file C al fine di identificare funzioni,
+strutture, enumerati e definizioni, e creare riferimenti per Sphinx
+
+SINTASSI
+********
+
+
+\ **parse_headers.pl**\ [<options>] <C_FILE> <OUT_FILE> [<EXCEPTIONS_FILE>]
+
+Dove <options> può essere: --debug, --usage o --help.
+
+
+OPZIONI
+*******
+
+
+
+\ **--debug**\
+
+ Lo script viene messo in modalità verbosa, utile per il debugging.
+
+
+\ **--usage**\
+
+ Mostra un messaggio d'aiuto breve e termina.
+
+
+\ **--help**\
+
+ Mostra un messaggio d'aiuto dettagliato e termina.
+
+
+DESCRIZIONE
+***********
+
+Converte un file d'intestazione o un file sorgente C (C_FILE) in un testo
+ReStructuredText incluso mediante il blocco ..parsed-literal
+con riferimenti alla documentazione che descrive l'API. Opzionalmente,
+il programma accetta anche un altro file (EXCEPTIONS_FILE) che
+descrive quali elementi debbano essere ignorati o il cui riferimento
+deve puntare ad elemento diverso dal predefinito.
+
+Il file generato sarà disponibile in (OUT_FILE).
+
+Il programma è capace di identificare *define*, funzioni, strutture,
+tipi di dato, enumerati e valori di enumerati, e di creare i riferimenti
+per ognuno di loro. Inoltre, esso è capace di distinguere le #define
+utilizzate per specificare i comandi ioctl di Linux.
+
+Il file EXCEPTIONS_FILE contiene due tipi di dichiarazioni:
+\ **ignore**\ o \ **replace**\ .
+
+La sintassi per ignore è:
+
+ignore \ **tipo**\ \ **nome**\
+
+La dichiarazione \ **ignore**\ significa che non verrà generato alcun
+riferimento per il simbolo \ **name**\ di tipo \ **tipo**\ .
+
+
+La sintassi per replace è:
+
+replace \ **tipo**\ \ **nome**\ \ **nuovo_valore**\
+
+La dichiarazione \ **replace**\ significa che verrà generato un
+riferimento per il simbolo \ **name**\ di tipo \ **tipo**\ , ma, invece
+di utilizzare il valore predefinito, verrà utilizzato il valore
+\ **nuovo_valore**\ .
+
+Per entrambe le dichiarazioni, il \ **tipo**\ può essere uno dei seguenti:
+
+
+\ **ioctl**\
+
+ La dichiarazione ignore o replace verrà applicata su definizioni di ioctl
+ come la seguente:
+
+ #define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
+
+
+
+\ **define**\
+
+ La dichiarazione ignore o replace verrà applicata su una qualsiasi #define
+ trovata in C_FILE.
+
+
+
+\ **typedef**\
+
+ La dichiarazione ignore o replace verrà applicata ad una dichiarazione typedef
+ in C_FILE.
+
+
+
+\ **struct**\
+
+ La dichiarazione ignore o replace verrà applicata ai nomi di strutture
+ in C_FILE.
+
+
+
+\ **enum**\
+
+ La dichiarazione ignore o replace verrà applicata ai nomi di enumerati
+ in C_FILE.
+
+
+
+\ **symbol**\
+
+ La dichiarazione ignore o replace verrà applicata ai nomi di valori di
+ enumerati in C_FILE.
+
+ Per le dichiarazioni di tipo replace, il campo \ **new_value**\ utilizzerà
+ automaticamente i riferimenti :c:type: per \ **typedef**\ , \ **enum**\ e
+ \ **struct**\. Invece, utilizzerà :ref: per \ **ioctl**\ , \ **define**\ e
+ \ **symbol**\. Il tipo di riferimento può essere definito esplicitamente
+ nella dichiarazione stessa.
+
+
+ESEMPI
+******
+
+
+ignore define _VIDEODEV2_H
+
+
+Ignora una definizione #define _VIDEODEV2_H nel file C_FILE.
+
+ignore symbol PRIVATE
+
+
+In un enumerato come il seguente:
+
+enum foo { BAR1, BAR2, PRIVATE };
+
+Non genererà alcun riferimento per \ **PRIVATE**\ .
+
+replace symbol BAR1 :c:type:\`foo\`
+replace symbol BAR2 :c:type:\`foo\`
+
+
+In un enumerato come il seguente:
+
+enum foo { BAR1, BAR2, PRIVATE };
+
+Genererà un riferimento ai valori BAR1 e BAR2 dal simbolo foo nel dominio C.
+
+
+BUGS
+****
+
+Riferire ogni malfunzionamento a Mauro Carvalho Chehab <mchehab@s-opensource.com>
+
+
+COPYRIGHT
+*********
+
+
+Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>.
+
+Licenza GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
+
+Questo è software libero: siete liberi di cambiarlo e ridistribuirlo.
+Non c'è alcuna garanzia, nei limiti permessi dalla legge.
diff --git a/Documentation/translations/it_IT/doc-guide/sphinx.rst b/Documentation/translations/it_IT/doc-guide/sphinx.rst
new file mode 100644
index 000000000000..474b7e127893
--- /dev/null
+++ b/Documentation/translations/it_IT/doc-guide/sphinx.rst
@@ -0,0 +1,457 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/doc-guide/index.rst <doc_guide>`
+
+Introduzione
+============
+
+Il kernel Linux usa `Sphinx`_ per la generazione della documentazione a partire
+dai file `reStructuredText`_ che si trovano nella cartella ``Documentation``.
+Per generare la documentazione in HTML o PDF, usate comandi ``make htmldocs`` o
+``make pdfdocs``. La documentazione così generata sarà disponibile nella
+cartella ``Documentation/output``.
+
+.. _Sphinx: http://www.sphinx-doc.org/
+.. _reStructuredText: http://docutils.sourceforge.net/rst.html
+
+I file reStructuredText possono contenere delle direttive che permettono di
+includere i commenti di documentazione, o di tipo kernel-doc, dai file
+sorgenti.
+Solitamente questi commenti sono utilizzati per descrivere le funzioni, i tipi
+e l'architettura del codice. I commenti di tipo kernel-doc hanno una struttura
+e formato speciale, ma a parte questo vengono processati come reStructuredText.
+
+Inoltre, ci sono migliaia di altri documenti in formato testo sparsi nella
+cartella ``Documentation``. Alcuni di questi verranno probabilmente convertiti,
+nel tempo, in formato reStructuredText, ma la maggior parte di questi rimarranno
+in formato testo.
+
+.. _it_sphinx_install:
+
+Installazione Sphinx
+====================
+
+I marcatori ReST utilizzati nei file in Documentation/ sono pensati per essere
+processati da ``Sphinx`` nella versione 1.3 o superiore. Se desiderate produrre
+un documento PDF è raccomandato l'utilizzo di una versione superiore alle 1.4.6.
+
+Esiste uno script che verifica i requisiti Sphinx. Per ulteriori dettagli
+consultate :ref:`it_sphinx-pre-install`.
+
+La maggior parte delle distribuzioni Linux forniscono Sphinx, ma l'insieme dei
+programmi e librerie è fragile e non è raro che dopo un aggiornamento di
+Sphinx, o qualche altro pacchetto Python, la documentazione non venga più
+generata correttamente.
+
+Un modo per evitare questo genere di problemi è quello di utilizzare una
+versione diversa da quella fornita dalla vostra distribuzione. Per fare questo,
+vi raccomandiamo di installare Sphinx dentro ad un ambiente virtuale usando
+``virtualenv-3`` o ``virtualenv`` a seconda di come Python 3 è stato
+pacchettizzato dalla vostra distribuzione.
+
+.. note::
+
+ #) Le versioni di Sphinx inferiori alla 1.5 non funzionano bene
+ con il pacchetto Python docutils versione 0.13.1 o superiore.
+ Se volete usare queste versioni, allora dovere eseguire
+ ``pip install 'docutils==0.12'``.
+
+ #) Viene raccomandato l'uso del tema RTD per la documentazione in HTML.
+ A seconda della versione di Sphinx, potrebbe essere necessaria
+ l'installazione tramite il comando ``pip install sphinx_rtd_theme``.
+
+ #) Alcune pagine ReST contengono delle formule matematiche. A causa del
+ modo in cui Sphinx funziona, queste espressioni sono scritte
+ utilizzando LaTeX. Per una corretta interpretazione, è necessario aver
+ installato texlive con i pacchetti amdfonts e amsmath.
+
+Riassumendo, se volete installare la versione 1.4.9 di Sphinx dovete eseguire::
+
+ $ virtualenv sphinx_1.4
+ $ . sphinx_1.4/bin/activate
+ (sphinx_1.4) $ pip install -r Documentation/sphinx/requirements.txt
+
+Dopo aver eseguito ``. sphinx_1.4/bin/activate``, il prompt cambierà per
+indicare che state usando il nuovo ambiente. Se aprite un nuova sessione,
+prima di generare la documentazione, dovrete rieseguire questo comando per
+rientrare nell'ambiente virtuale.
+
+Generazione d'immagini
+----------------------
+
+Il meccanismo che genera la documentazione del kernel contiene un'estensione
+capace di gestire immagini in formato Graphviz e SVG (per maggior informazioni
+vedere :ref:`it_sphinx_kfigure`).
+
+Per far si che questo funzioni, dovete installare entrambe i pacchetti
+Graphviz e ImageMagick. Il sistema di generazione della documentazione è in
+grado di procedere anche se questi pacchetti non sono installati, ma il
+risultato, ovviamente, non includerà le immagini.
+
+Generazione in PDF e LaTeX
+--------------------------
+
+Al momento, la generazione di questi documenti è supportata solo dalle
+versioni di Sphinx superiori alla 1.4.
+
+Per la generazione di PDF e LaTeX, avrete bisogno anche del pacchetto
+``XeLaTeX`` nella versione 3.14159265
+
+Per alcune distribuzioni Linux potrebbe essere necessario installare
+anche una serie di pacchetti ``texlive`` in modo da fornire il supporto
+minimo per il funzionamento di ``XeLaTeX``.
+
+.. _it_sphinx-pre-install:
+
+Verificare le dipendenze Sphinx
+-------------------------------
+
+Esiste uno script che permette di verificare automaticamente le dipendenze di
+Sphinx. Se lo script riesce a riconoscere la vostra distribuzione, allora
+sarà in grado di darvi dei suggerimenti su come procedere per completare
+l'installazione::
+
+ $ ./scripts/sphinx-pre-install
+ Checking if the needed tools for Fedora release 26 (Twenty Six) are available
+ Warning: better to also install "texlive-luatex85".
+ You should run:
+
+ sudo dnf install -y texlive-luatex85
+ /usr/bin/virtualenv sphinx_1.4
+ . sphinx_1.4/bin/activate
+ pip install -r Documentation/sphinx/requirements.txt
+
+ Can't build as 1 mandatory dependency is missing at ./scripts/sphinx-pre-install line 468.
+
+L'impostazione predefinita prevede il controllo dei requisiti per la generazione
+di documenti html e PDF, includendo anche il supporto per le immagini, le
+espressioni matematiche e LaTeX; inoltre, presume che venga utilizzato un
+ambiente virtuale per Python. I requisiti per generare i documenti html
+sono considerati obbligatori, gli altri sono opzionali.
+
+Questo script ha i seguenti parametri:
+
+``--no-pdf``
+ Disabilita i controlli per la generazione di PDF;
+
+``--no-virtualenv``
+ Utilizza l'ambiente predefinito dal sistema operativo invece che
+ l'ambiente virtuale per Python;
+
+
+Generazione della documentazione Sphinx
+=======================================
+
+Per generare la documentazione in formato HTML o PDF si eseguono i rispettivi
+comandi ``make htmldocs`` o ``make pdfdocs``. Esistono anche altri formati
+in cui è possibile generare la documentazione; per maggiori informazioni
+potere eseguire il comando ``make help``.
+La documentazione così generata sarà disponibile nella sottocartella
+``Documentation/output``.
+
+Ovviamente, per generare la documentazione, Sphinx (``sphinx-build``)
+dev'essere installato. Se disponibile, il tema *Read the Docs* per Sphinx
+verrà utilizzato per ottenere una documentazione HTML più gradevole.
+Per la documentazione in formato PDF, invece, avrete bisogno di ``XeLaTeX`
+e di ``convert(1)`` disponibile in ImageMagick (https://www.imagemagick.org).
+Tipicamente, tutti questi pacchetti sono disponibili e pacchettizzati nelle
+distribuzioni Linux.
+
+Per poter passare ulteriori opzioni a Sphinx potete utilizzare la variabile
+make ``SPHINXOPTS``. Per esempio, se volete che Sphinx sia più verboso durante
+la generazione potete usare il seguente comando ``make SPHINXOPTS=-v htmldocs``.
+
+Potete eliminare la documentazione generata tramite il comando
+``make cleandocs``.
+
+Scrivere la documentazione
+==========================
+
+Aggiungere nuova documentazione è semplice:
+
+1. aggiungete un file ``.rst`` nella sottocartella ``Documentation``
+2. aggiungete un riferimento ad esso nell'indice (`TOC tree`_) in
+ ``Documentation/index.rst``.
+
+.. _TOC tree: http://www.sphinx-doc.org/en/stable/markup/toctree.html
+
+Questo, di solito, è sufficiente per la documentazione più semplice (come
+quella che state leggendo ora), ma per una documentazione più elaborata è
+consigliato creare una sottocartella dedicata (o, quando possibile, utilizzarne
+una già esistente). Per esempio, il sottosistema grafico è documentato nella
+sottocartella ``Documentation/gpu``; questa documentazione è divisa in
+diversi file ``.rst`` ed un indice ``index.rst`` (con un ``toctree``
+dedicato) a cui si fa riferimento nell'indice principale.
+
+Consultate la documentazione di `Sphinx`_ e `reStructuredText`_ per maggiori
+informazione circa le loro potenzialità. In particolare, il
+`manuale introduttivo a reStructuredText`_ di Sphinx è un buon punto da
+cui cominciare. Esistono, inoltre, anche alcuni
+`costruttori specifici per Sphinx`_.
+
+.. _`manuale introduttivo a reStructuredText`: http://www.sphinx-doc.org/en/stable/rest.html
+.. _`costruttori specifici per Sphinx`: http://www.sphinx-doc.org/en/stable/markup/index.html
+
+Guide linea per la documentazione del kernel
+--------------------------------------------
+
+In questa sezione troverete alcune linee guida specifiche per la documentazione
+del kernel:
+
+* Non esagerate con i costrutti di reStructuredText. Mantenete la
+ documentazione semplice. La maggior parte della documentazione dovrebbe
+ essere testo semplice con una strutturazione minima che permetta la
+ conversione in diversi formati.
+
+* Mantenete la strutturazione il più fedele possibile all'originale quando
+ convertite un documento in formato reStructuredText.
+
+* Aggiornate i contenuti quando convertite della documentazione, non limitatevi
+ solo alla formattazione.
+
+* Mantenete la decorazione dei livelli di intestazione come segue:
+
+ 1. ``=`` con una linea superiore per il titolo del documento::
+
+ ======
+ Titolo
+ ======
+
+ 2. ``=`` per i capitoli::
+
+ Capitoli
+ ========
+
+ 3. ``-`` per le sezioni::
+
+ Sezioni
+ -------
+
+ 4. ``~`` per le sottosezioni::
+
+ Sottosezioni
+ ~~~~~~~~~~~~
+
+ Sebbene RST non forzi alcun ordine specifico (*Piuttosto che imporre
+ un numero ed un ordine fisso di decorazioni, l'ordine utilizzato sarà
+ quello incontrato*), avere uniformità dei livelli principali rende più
+ semplice la lettura dei documenti.
+
+* Per inserire blocchi di testo con caratteri a dimensione fissa (codici di
+ esempio, casi d'uso, eccetera): utilizzate ``::`` quando non è necessario
+ evidenziare la sintassi, specialmente per piccoli frammenti; invece,
+ utilizzate ``.. code-block:: <language>`` per blocchi di più lunghi che
+ potranno beneficiare dell'avere la sintassi evidenziata.
+
+
+Il dominio C
+------------
+
+Il **Dominio Sphinx C** (denominato c) è adatto alla documentazione delle API C.
+Per esempio, un prototipo di una funzione:
+
+.. code-block:: rst
+
+ .. c:function:: int ioctl( int fd, int request )
+
+Il dominio C per kernel-doc ha delle funzionalità aggiuntive. Per esempio,
+potete assegnare un nuovo nome di riferimento ad una funzione con un nome
+molto comune come ``open`` o ``ioctl``:
+
+.. code-block:: rst
+
+ .. c:function:: int ioctl( int fd, int request )
+ :name: VIDIOC_LOG_STATUS
+
+Il nome della funzione (per esempio ioctl) rimane nel testo ma il nome del suo
+riferimento cambia da ``ioctl`` a ``VIDIOC_LOG_STATUS``. Anche la voce
+nell'indice cambia in ``VIDIOC_LOG_STATUS`` e si potrà quindi fare riferimento
+a questa funzione scrivendo:
+
+.. code-block:: rst
+
+ :c:func:`VIDIOC_LOG_STATUS`
+
+
+Tabelle a liste
+---------------
+
+Raccomandiamo l'uso delle tabelle in formato lista (*list table*). Le tabelle
+in formato lista sono liste di liste. In confronto all'ASCII-art potrebbero
+non apparire di facile lettura nei file in formato testo. Il loro vantaggio è
+che sono facili da creare o modificare e che la differenza di una modifica è
+molto più significativa perché limitata alle modifiche del contenuto.
+
+La ``flat-table`` è anch'essa una lista di liste simile alle ``list-table``
+ma con delle funzionalità aggiuntive:
+
+* column-span: col ruolo ``cspan`` una cella può essere estesa attraverso
+ colonne successive
+
+* raw-span: col ruolo ``rspan`` una cella può essere estesa attraverso
+ righe successive
+
+* auto-span: la cella più a destra viene estesa verso destra per compensare
+ la mancanza di celle. Con l'opzione ``:fill-cells:`` questo comportamento
+ può essere cambiato da *auto-span* ad *auto-fill*, il quale inserisce
+ automaticamente celle (vuote) invece che estendere l'ultima.
+
+opzioni:
+
+* ``:header-rows:`` [int] conta le righe di intestazione
+* ``:stub-columns:`` [int] conta le colonne di stub
+* ``:widths:`` [[int] [int] ... ] larghezza delle colonne
+* ``:fill-cells:`` invece di estendere automaticamente una cella su quelle
+ mancanti, ne crea di vuote.
+
+ruoli:
+
+* ``:cspan:`` [int] colonne successive (*morecols*)
+* ``:rspan:`` [int] righe successive (*morerows*)
+
+L'esempio successivo mostra come usare questo marcatore. Il primo livello della
+nostra lista di liste è la *riga*. In una *riga* è possibile inserire solamente
+la lista di celle che compongono la *riga* stessa. Fanno eccezione i *commenti*
+( ``..`` ) ed i *collegamenti* (per esempio, un riferimento a
+``:ref:`last row <last row>``` / :ref:`last row <it last row>`)
+
+.. code-block:: rst
+
+ .. flat-table:: table title
+ :widths: 2 1 1 3
+
+ * - head col 1
+ - head col 2
+ - head col 3
+ - head col 4
+
+ * - column 1
+ - field 1.1
+ - field 1.2 with autospan
+
+ * - column 2
+ - field 2.1
+ - :rspan:`1` :cspan:`1` field 2.2 - 3.3
+
+ * .. _`it last row`:
+
+ - column 3
+
+Che verrà rappresentata nel seguente modo:
+
+ .. flat-table:: table title
+ :widths: 2 1 1 3
+
+ * - head col 1
+ - head col 2
+ - head col 3
+ - head col 4
+
+ * - column 1
+ - field 1.1
+ - field 1.2 with autospan
+
+ * - column 2
+ - field 2.1
+ - :rspan:`1` :cspan:`1` field 2.2 - 3.3
+
+ * .. _`it last row`:
+
+ - column 3
+
+.. _it_sphinx_kfigure:
+
+Figure ed immagini
+==================
+
+Se volete aggiungere un'immagine, utilizzate le direttive ``kernel-figure``
+e ``kernel-image``. Per esempio, per inserire una figura di un'immagine in
+formato SVG::
+
+ .. kernel-figure:: ../../../doc-guide/svg_image.svg
+ :alt: una semplice immagine SVG
+
+ Una semplice immagine SVG
+
+.. _it_svg_image_example:
+
+.. kernel-figure:: ../../../doc-guide/svg_image.svg
+ :alt: una semplice immagine SVG
+
+ Una semplice immagine SVG
+
+Le direttive del kernel per figure ed immagini supportano il formato **DOT**,
+per maggiori informazioni
+
+* DOT: http://graphviz.org/pdf/dotguide.pdf
+* Graphviz: http://www.graphviz.org/content/dot-language
+
+Un piccolo esempio (:ref:`it_hello_dot_file`)::
+
+ .. kernel-figure:: ../../../doc-guide/hello.dot
+ :alt: ciao mondo
+
+ Esempio DOT
+
+.. _it_hello_dot_file:
+
+.. kernel-figure:: ../../../doc-guide/hello.dot
+ :alt: ciao mondo
+
+ Esempio DOT
+
+Tramite la direttiva ``kernel-render`` è possibile aggiungere codice specifico;
+ad esempio nel formato **DOT** di Graphviz.::
+
+ .. kernel-render:: DOT
+ :alt: foobar digraph
+ :caption: Codice **DOT** (Graphviz) integrato
+
+ digraph foo {
+ "bar" -> "baz";
+ }
+
+La rappresentazione dipenderà dei programmi installati. Se avete Graphviz
+installato, vedrete un'immagine vettoriale. In caso contrario, il codice grezzo
+verrà rappresentato come *blocco testuale* (:ref:`it_hello_dot_render`).
+
+.. _it_hello_dot_render:
+
+.. kernel-render:: DOT
+ :alt: foobar digraph
+ :caption: Codice **DOT** (Graphviz) integrato
+
+ digraph foo {
+ "bar" -> "baz";
+ }
+
+La direttiva *render* ha tutte le opzioni della direttiva *figure*, con
+l'aggiunta dell'opzione ``caption``. Se ``caption`` ha un valore allora
+un nodo *figure* viene aggiunto. Altrimenti verrà aggiunto un nodo *image*.
+L'opzione ``caption`` è necessaria in caso si vogliano aggiungere dei
+riferimenti (:ref:`it_hello_svg_render`).
+
+Per la scrittura di codice **SVG**::
+
+ .. kernel-render:: SVG
+ :caption: Integrare codice **SVG**
+ :alt: so-nw-arrow
+
+ <?xml version="1.0" encoding="UTF-8"?>
+ <svg xmlns="http://www.w3.org/2000/svg" version="1.1" ...>
+ ...
+ </svg>
+
+.. _it_hello_svg_render:
+
+.. kernel-render:: SVG
+ :caption: Integrare codice **SVG**
+ :alt: so-nw-arrow
+
+ <?xml version="1.0" encoding="UTF-8"?>
+ <svg xmlns="http://www.w3.org/2000/svg"
+ version="1.1" baseProfile="full" width="70px" height="40px" viewBox="0 0 700 400">
+ <line x1="180" y1="370" x2="500" y2="50" stroke="black" stroke-width="15px"/>
+ <polygon points="585 0 525 25 585 50" transform="rotate(135 525 25)"/>
+ </svg>
diff --git a/Documentation/translations/it_IT/index.rst b/Documentation/translations/it_IT/index.rst
new file mode 100644
index 000000000000..898a7823a6f4
--- /dev/null
+++ b/Documentation/translations/it_IT/index.rst
@@ -0,0 +1,118 @@
+.. _it_linux_doc:
+
+===================
+Traduzione italiana
+===================
+
+L'obiettivo di questa traduzione è di rendere più facile la lettura e
+la comprensione per chi preferisce leggere in lingua italiana.
+Tenete presente che la documentazione di riferimento rimane comunque
+quella in lingua inglese: :ref:`linux_doc`
+
+Questa traduzione cerca di essere il più fedele possibile all'originale ma
+è ovvio che alcune frasi vadano trasformate: non aspettatevi una traduzione
+letterale. Quando possibile, si eviteranno gli inglesismi ed al loro posto
+verranno utilizzate le corrispettive parole italiane.
+
+Se notate che la traduzione non è più aggiornata potete contattare
+direttamente il manutentore della traduzione italiana.
+
+Se notate che la documentazione contiene errori o dimenticanze, allora
+verificate la documentazione di riferimento in lingua inglese. Se il problema
+è presente anche nella documentazione di riferimento, contattate il suo
+manutentore. Se avete problemi a scrivere in inglese, potete comunque
+riportare il problema al manutentore della traduzione italiana.
+
+Manutentore della traduzione italiana: Federico Vaga <federico.vaga@vaga.pv.it>
+
+La documentazione del kernel Linux
+==================================
+
+Questo è il livello principale della documentazione del kernel in
+lingua italiana. La traduzione è incompleta, noterete degli avvisi
+che vi segnaleranno la mancanza di una traduzione o di un gruppo di
+traduzioni.
+
+Più in generale, la documentazione, come il kernel stesso, sono in
+costante sviluppo; particolarmente vero in quanto stiamo lavorando
+alla riorganizzazione della documentazione in modo più coerente.
+I miglioramenti alla documentazione sono sempre i benvenuti; per cui,
+se vuoi aiutare, iscriviti alla lista di discussione linux-doc presso
+vger.kernel.org.
+
+Documentazione sulla licenza dei sorgenti
+-----------------------------------------
+
+I seguenti documenti descrivono la licenza usata nei sorgenti del kernel Linux
+(GPLv2), come licenziare i singoli file; inoltre troverete i riferimenti al
+testo integrale della licenza.
+
+.. warning::
+
+ TODO ancora da tradurre
+
+Documentazione per gli utenti
+-----------------------------
+
+I seguenti manuali sono scritti per gli *utenti* del kernel - ovvero,
+coloro che cercano di farlo funzionare in modo ottimale su un dato sistema
+
+.. warning::
+
+ TODO ancora da tradurre
+
+Documentazione per gli sviluppatori di applicazioni
+---------------------------------------------------
+
+Il manuale delle API verso lo spazio utente è una collezione di documenti
+che descrivono le interfacce del kernel viste dagli sviluppatori
+di applicazioni.
+
+.. warning::
+
+ TODO ancora da tradurre
+
+
+Introduzione allo sviluppo del kernel
+-------------------------------------
+
+Questi manuali contengono informazioni su come contribuire allo sviluppo
+del kernel.
+Attorno al kernel Linux gira una comunità molto grande con migliaia di
+sviluppatori che contribuiscono ogni anno. Come in ogni grande comunità,
+sapere come le cose vengono fatte renderà il processo di integrazione delle
+vostre modifiche molto più semplice
+
+.. toctree::
+ :maxdepth: 2
+
+ doc-guide/index
+ kernel-hacking/index
+
+.. warning::
+
+ TODO ancora da tradurre
+
+Documentazione della API del kernel
+-----------------------------------
+
+Questi manuali forniscono dettagli su come funzionano i sottosistemi del
+kernel dal punto di vista degli sviluppatori del kernel. Molte delle
+informazioni contenute in questi manuali sono prese direttamente dai
+file sorgenti, informazioni aggiuntive vengono aggiunte solo se necessarie
+(o almeno ci proviamo — probabilmente *non* tutto quello che è davvero
+necessario).
+
+.. warning::
+
+ TODO ancora da tradurre
+
+Documentazione specifica per architettura
+-----------------------------------------
+
+Questi manuali forniscono dettagli di programmazione per le diverse
+implementazioni d'architettura.
+
+.. warning::
+
+ TODO ancora da tradurre
diff --git a/Documentation/translations/it_IT/kernel-hacking/hacking.rst b/Documentation/translations/it_IT/kernel-hacking/hacking.rst
new file mode 100644
index 000000000000..7178e517af0a
--- /dev/null
+++ b/Documentation/translations/it_IT/kernel-hacking/hacking.rst
@@ -0,0 +1,855 @@
+.. include:: ../disclaimer-ita.rst
+
+.. note:: Per leggere la documentazione originale in inglese:
+ :ref:`Documentation/kernel-hacking/hacking.rst <kernel_hacking_hack>`
+
+:Original: :ref:`Documentation/kernel-hacking/hacking.rst <kernel_hacking_hack>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
+
+.. _it_kernel_hacking_hack:
+
+=================================================
+L'inaffidabile guida all'hacking del kernel Linux
+=================================================
+
+:Author: Rusty Russell
+
+Introduzione
+============
+
+Benvenuto, gentile lettore, alla notevole ed inaffidabile guida all'hacking
+del kernel Linux ad opera di Rusty. Questo documento descrive le procedure
+più usate ed i concetti necessari per scrivere codice per il kernel: lo scopo
+è di fornire ai programmatori C più esperti un manuale di base per sviluppo.
+Eviterò dettagli implementativi: per questo abbiamo il codice,
+ed ignorerò intere parti di alcune procedure.
+
+Prima di leggere questa guida, sappiate che non ho mai voluto scriverla,
+essendo esageratamente sotto qualificato, ma ho sempre voluto leggere
+qualcosa di simile, e quindi questa era l'unica via. Spero che possa
+crescere e diventare un compendio di buone pratiche, punti di partenza
+e generiche informazioni.
+
+Gli attori
+==========
+
+In qualsiasi momento ognuna delle CPU di un sistema può essere:
+
+- non associata ad alcun processo, servendo un'interruzione hardware;
+
+- non associata ad alcun processo, servendo un softirq o tasklet;
+
+- in esecuzione nello spazio kernel, associata ad un processo
+ (contesto utente);
+
+- in esecuzione di un processo nello spazio utente;
+
+Esiste un ordine fra questi casi. Gli ultimi due possono avvicendarsi (preempt)
+l'un l'altro, ma a parte questo esiste una gerarchia rigida: ognuno di questi
+può avvicendarsi solo ad uno di quelli sottostanti. Per esempio, mentre un
+softirq è in esecuzione su d'una CPU, nessun altro softirq può avvicendarsi
+nell'esecuzione, ma un'interruzione hardware può. Ciò nonostante, le altre CPU
+del sistema operano indipendentemente.
+
+Più avanti vedremo alcuni modi in cui dal contesto utente è possibile bloccare
+le interruzioni, così da impedirne davvero il diritto di prelazione.
+
+Contesto utente
+---------------
+
+Ci si trova nel contesto utente quando si arriva da una chiamata di sistema
+od altre eccezioni: come nello spazio utente, altre procedure più importanti,
+o le interruzioni, possono far valere il proprio diritto di prelazione sul
+vostro processo. Potete sospendere l'esecuzione chiamando :c:func:`schedule()`.
+
+.. note::
+
+ Si è sempre in contesto utente quando un modulo viene caricato o rimosso,
+ e durante le operazioni nello strato dei dispositivi a blocchi
+ (*block layer*).
+
+Nel contesto utente, il puntatore ``current`` (il quale indica il processo al
+momento in esecuzione) è valido, e :c:func:`in_interrupt()`
+(``include/linux/preempt.h``) è falsa.
+
+.. warning::
+
+ Attenzione che se avete la prelazione o i softirq disabilitati (vedere
+ di seguito), :c:func:`in_interrupt()` ritornerà un falso positivo.
+
+Interruzioni hardware (Hard IRQs)
+---------------------------------
+
+Temporizzatori, schede di rete e tastiere sono esempi di vero hardware
+che possono produrre interruzioni in un qualsiasi momento. Il kernel esegue
+i gestori d'interruzione che prestano un servizio all'hardware. Il kernel
+garantisce che questi gestori non vengano mai interrotti: se una stessa
+interruzione arriva, questa verrà accodata (o scartata).
+Dato che durante la loro esecuzione le interruzioni vengono disabilitate,
+i gestori d'interruzioni devono essere veloci: spesso si limitano
+esclusivamente a notificare la presa in carico dell'interruzione,
+programmare una 'interruzione software' per l'esecuzione e quindi terminare.
+
+Potete dire d'essere in una interruzione hardware perché :c:func:`in_irq()`
+ritorna vero.
+
+.. warning::
+
+ Attenzione, questa ritornerà un falso positivo se le interruzioni
+ sono disabilitate (vedere di seguito).
+
+Contesto d'interruzione software: softirq e tasklet
+---------------------------------------------------
+
+Quando una chiamata di sistema sta per tornare allo spazio utente,
+oppure un gestore d'interruzioni termina, qualsiasi 'interruzione software'
+marcata come pendente (solitamente da un'interruzione hardware) viene
+eseguita (``kernel/softirq.c``).
+
+La maggior parte del lavoro utile alla gestione di un'interruzione avviene qui.
+All'inizio della transizione ai sistemi multiprocessore, c'erano solo i
+cosiddetti 'bottom half' (BH), i quali non traevano alcun vantaggio da questi
+sistemi. Non appena abbandonammo i computer raffazzonati con fiammiferi e
+cicche, abbandonammo anche questa limitazione e migrammo alle interruzioni
+software 'softirqs'.
+
+Il file ``include/linux/interrupt.h`` elenca i differenti tipi di 'softirq'.
+Un tipo di softirq molto importante è il timer (``include/linux/timer.h``):
+potete programmarlo per far si che esegua funzioni dopo un determinato
+periodo di tempo.
+
+Dato che i softirq possono essere eseguiti simultaneamente su più di un
+processore, spesso diventa estenuante l'averci a che fare. Per questa ragione,
+i tasklet (``include/linux/interrupt.h``) vengo usati più di frequente:
+possono essere registrati dinamicamente (il che significa che potete averne
+quanti ne volete), e garantiscono che un qualsiasi tasklet verrà eseguito
+solo su un processore alla volta, sebbene diversi tasklet possono essere
+eseguiti simultaneamente.
+
+.. warning::
+
+ Il nome 'tasklet' è ingannevole: non hanno niente a che fare
+ con i 'processi' ('tasks'), e probabilmente hanno più a che vedere
+ con qualche pessima vodka che Alexey Kuznetsov si fece a quel tempo.
+
+Potete determinate se siete in un softirq (o tasklet) utilizzando la
+macro :c:func:`in_softirq()` (``include/linux/preempt.h``).
+
+.. warning::
+
+ State attenti che questa macro ritornerà un falso positivo
+ se :ref:`botton half lock <it_local_bh_disable>` è bloccato.
+
+Alcune regole basilari
+======================
+
+Nessuna protezione della memoria
+ Se corrompete la memoria, che sia in contesto utente o d'interruzione,
+ la macchina si pianterà. Siete sicuri che quello che volete fare
+ non possa essere fatto nello spazio utente?
+
+Nessun numero in virgola mobile o MMX
+ Il contesto della FPU non è salvato; anche se siete in contesto utente
+ lo stato dell'FPU probabilmente non corrisponde a quello del processo
+ corrente: vi incasinerete con lo stato di qualche altro processo. Se
+ volete davvero usare la virgola mobile, allora dovrete salvare e recuperare
+ lo stato dell'FPU (ed evitare cambi di contesto). Generalmente è una
+ cattiva idea; usate l'aritmetica a virgola fissa.
+
+Un limite rigido dello stack
+ A seconda della configurazione del kernel lo stack è fra 3K e 6K per la
+ maggior parte delle architetture a 32-bit; è di 14K per la maggior
+ parte di quelle a 64-bit; e spesso è condiviso con le interruzioni,
+ per cui non si può usare.
+ Evitare profonde ricorsioni ad enormi array locali nello stack
+ (allocateli dinamicamente).
+
+Il kernel Linux è portabile
+ Quindi mantenetelo tale. Il vostro codice dovrebbe essere a 64-bit ed
+ indipendente dall'ordine dei byte (endianess) di un processore. Inoltre,
+ dovreste minimizzare il codice specifico per un processore; per esempio
+ il codice assembly dovrebbe essere incapsulato in modo pulito e minimizzato
+ per facilitarne la migrazione. Generalmente questo codice dovrebbe essere
+ limitato alla parte di kernel specifica per un'architettura.
+
+ioctl: non scrivere nuove chiamate di sistema
+=============================================
+
+Una chiamata di sistema, generalmente, è scritta così::
+
+ asmlinkage long sys_mycall(int arg)
+ {
+ return 0;
+ }
+
+Primo, nella maggior parte dei casi non volete creare nuove chiamate di
+sistema.
+Create un dispositivo a caratteri ed implementate l'appropriata chiamata ioctl.
+Questo meccanismo è molto più flessibile delle chiamate di sistema: esso non
+dev'essere dichiarato in tutte le architetture nei file
+``include/asm/unistd.h`` e ``arch/kernel/entry.S``; inoltre, è improbabile
+che questo venga accettato da Linus.
+
+Se tutto quello che il vostro codice fa è leggere o scrivere alcuni parametri,
+considerate l'implementazione di un'interfaccia :c:func:`sysfs()`.
+
+All'interno di una ioctl vi trovate nel contesto utente di un processo. Quando
+avviene un errore dovete ritornare un valore negativo di errno (consultate
+``include/uapi/asm-generic/errno-base.h``,
+``include/uapi/asm-generic/errno.h`` e ``include/linux/errno.h``), altrimenti
+ritornate 0.
+
+Dopo aver dormito dovreste verificare se ci sono stati dei segnali: il modo
+Unix/Linux di gestire un segnale è di uscire temporaneamente dalla chiamata
+di sistema con l'errore ``-ERESTARTSYS``. La chiamata di sistema ritornerà
+al contesto utente, eseguirà il gestore del segnale e poi la vostra chiamata
+di sistema riprenderà (a meno che l'utente non l'abbia disabilitata). Quindi,
+dovreste essere pronti per continuare l'esecuzione, per esempio nel mezzo
+della manipolazione di una struttura dati.
+
+::
+
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+Se dovete eseguire dei calcoli molto lunghi: pensate allo spazio utente.
+Se **davvero** volete farlo nel kernel ricordatevi di verificare periodicamente
+se dovete *lasciare* il processore (ricordatevi che, per ogni processore, c'è
+un sistema multi-processo senza diritto di prelazione).
+Esempio::
+
+ cond_resched(); /* Will sleep */
+
+Una breve nota sulla progettazione delle interfacce: il motto dei sistemi
+UNIX è "fornite meccanismi e non politiche"
+
+La ricetta per uno stallo
+=========================
+
+Non è permesso invocare una procedura che potrebbe dormire, fanno eccezione
+i seguenti casi:
+
+- Siete in un contesto utente.
+
+- Non trattenete alcun spinlock.
+
+- Avete abilitato le interruzioni (in realtà, Andy Kleen dice che
+ lo schedulatore le abiliterà per voi, ma probabilmente questo non è quello
+ che volete).
+
+Da tener presente che alcune funzioni potrebbero dormire implicitamente:
+le più comuni sono quelle per l'accesso allo spazio utente (\*_user) e
+quelle per l'allocazione della memoria senza l'opzione ``GFP_ATOMIC``
+
+Dovreste sempre compilare il kernel con l'opzione ``CONFIG_DEBUG_ATOMIC_SLEEP``
+attiva, questa vi avviserà se infrangete una di queste regole.
+Se **infrangete** le regole, allora potreste bloccare il vostro scatolotto.
+
+Veramente.
+
+Alcune delle procedure più comuni
+=================================
+
+:c:func:`printk()`
+------------------
+
+Definita in ``include/linux/printk.h``
+
+:c:func:`printk()` fornisce messaggi alla console, dmesg, e al demone syslog.
+Essa è utile per il debugging o per la notifica di errori; può essere
+utilizzata anche all'interno del contesto d'interruzione, ma usatela con
+cautela: una macchina che ha la propria console inondata da messaggi diventa
+inutilizzabile. La funzione utilizza un formato stringa quasi compatibile con
+la printf ANSI C, e la concatenazione di una stringa C come primo argomento
+per indicare la "priorità"::
+
+ printk(KERN_INFO "i = %u\n", i);
+
+Consultate ``include/linux/kern_levels.h`` per gli altri valori ``KERN_``;
+questi sono interpretati da syslog come livelli. Un caso speciale:
+per stampare un indirizzo IP usate::
+
+ __be32 ipaddress;
+ printk(KERN_INFO "my ip: %pI4\n", &ipaddress);
+
+
+:c:func:`printk()` utilizza un buffer interno di 1K e non s'accorge di
+eventuali sforamenti. Accertatevi che vi basti.
+
+.. note::
+
+ Saprete di essere un vero hacker del kernel quando inizierete a digitare
+ nei vostri programmi utenti le printf come se fossero printk :)
+
+.. note::
+
+ Un'altra nota a parte: la versione originale di Unix 6 aveva un commento
+ sopra alla funzione printf: "Printf non dovrebbe essere usata per il
+ chiacchiericcio". Dovreste seguire questo consiglio.
+
+:c:func:`copy_to_user()` / :c:func:`copy_from_user()` / :c:func:`get_user()` / :c:func:`put_user()`
+---------------------------------------------------------------------------------------------------
+
+Definite in ``include/linux/uaccess.h`` / ``asm/uaccess.h``
+
+**[DORMONO]**
+
+:c:func:`put_user()` e :c:func:`get_user()` sono usate per ricevere ed
+impostare singoli valori (come int, char, o long) da e verso lo spazio utente.
+Un puntatore nello spazio utente non dovrebbe mai essere dereferenziato: i dati
+dovrebbero essere copiati usando suddette procedure. Entrambe ritornano
+``-EFAULT`` oppure 0.
+
+:c:func:`copy_to_user()` e :c:func:`copy_from_user()` sono più generiche:
+esse copiano una quantità arbitraria di dati da e verso lo spazio utente.
+
+.. warning::
+
+ Al contrario di:c:func:`put_user()` e :c:func:`get_user()`, queste
+ funzioni ritornano la quantità di dati copiati (0 è comunque un successo).
+
+[Sì, questa stupida interfaccia mi imbarazza. La battaglia torna in auge anno
+dopo anno. --RR]
+
+Le funzioni potrebbero dormire implicitamente. Queste non dovrebbero mai essere
+invocate fuori dal contesto utente (non ha senso), con le interruzioni
+disabilitate, o con uno spinlock trattenuto.
+
+:c:func:`kmalloc()`/:c:func:`kfree()`
+-------------------------------------
+
+Definite in ``include/linux/slab.h``
+
+**[POTREBBERO DORMIRE: LEGGI SOTTO]**
+
+Queste procedure sono utilizzate per la richiesta dinamica di un puntatore ad
+un pezzo di memoria allineato, esattamente come malloc e free nello spazio
+utente, ma :c:func:`kmalloc()` ha un argomento aggiuntivo per indicare alcune
+opzioni. Le opzioni più importanti sono:
+
+``GFP_KERNEL``
+ Potrebbe dormire per librarare della memoria. L'opzione fornisce il modo
+ più affidabile per allocare memoria, ma il suo uso è strettamente limitato
+ allo spazio utente.
+
+``GFP_ATOMIC``
+ Non dorme. Meno affidabile di ``GFP_KERNEL``, ma può essere usata in un
+ contesto d'interruzione. Dovreste avere **davvero** una buona strategia
+ per la gestione degli errori in caso di mancanza di memoria.
+
+``GFP_DMA``
+ Alloca memoria per il DMA sul bus ISA nello spazio d'indirizzamento
+ inferiore ai 16MB. Se non sapete cos'è allora non vi serve.
+ Molto inaffidabile.
+
+Se vedete un messaggio d'avviso per una funzione dormiente che viene chiamata
+da un contesto errato, allora probabilmente avete usato una funzione
+d'allocazione dormiente da un contesto d'interruzione senza ``GFP_ATOMIC``.
+Dovreste correggerlo. Sbrigatevi, non cincischiate.
+
+Se allocate almeno ``PAGE_SIZE``(``asm/page.h`` o ``asm/page_types.h``) byte,
+considerate l'uso di :c:func:`__get_free_pages()` (``include/linux/gfp.h``).
+Accetta un argomento che definisce l'ordine (0 per per la dimensione di una
+pagine, 1 per una doppia pagina, 2 per quattro pagine, eccetra) e le stesse
+opzioni d'allocazione viste precedentemente.
+
+Se state allocando un numero di byte notevolemnte superiore ad una pagina
+potete usare :c:func:`vmalloc()`. Essa allocherà memoria virtuale all'interno
+dello spazio kernel. Questo è un blocco di memoria fisica non contiguo, ma
+la MMU vi darà l'impressione che lo sia (quindi, sarà contiguo solo dal punto
+di vista dei processori, non dal punto di vista dei driver dei dispositivi
+esterni).
+Se per qualche strana ragione avete davvero bisogno di una grossa quantità di
+memoria fisica contigua, avete un problema: Linux non ha un buon supporto per
+questo caso d'uso perché, dopo un po' di tempo, la frammentazione della memoria
+rende l'operazione difficile. Il modo migliore per allocare un simile blocco
+all'inizio dell'avvio del sistema è attraverso la procedura
+:c:func:`alloc_bootmem()`.
+
+Prima di inventare la vostra cache per gli oggetti più usati, considerate
+l'uso di una cache slab disponibile in ``include/linux/slab.h``.
+
+:c:func:`current()`
+-------------------
+
+Definita in ``include/asm/current.h``
+
+Questa variabile globale (in realtà una macro) contiene un puntatore alla
+struttura del processo corrente, quindi è valido solo dal contesto utente.
+Per esempio, quando un processo esegue una chiamata di sistema, questo
+punterà alla struttura dati del processo chiamate.
+Nel contesto d'interruzione in suo valore **non è NULL**.
+
+:c:func:`mdelay()`/:c:func:`udelay()`
+-------------------------------------
+
+Definite in ``include/asm/delay.h`` / ``include/linux/delay.h``
+
+Le funzioni :c:func:`udelay()` e :c:func:`ndelay()` possono essere utilizzate
+per brevi pause. Non usate grandi valori perché rischiate d'avere un
+overflow - in questo contesto la funzione :c:func:`mdelay()` è utile,
+oppure considerate :c:func:`msleep()`.
+
+:c:func:`cpu_to_be32()`/:c:func:`be32_to_cpu()`/:c:func:`cpu_to_le32()`/:c:func:`le32_to_cpu()`
+-----------------------------------------------------------------------------------------------
+
+Definite in ``include/asm/byteorder.h``
+
+La famiglia di funzioni :c:func:`cpu_to_be32()` (dove "32" può essere
+sostituito da 64 o 16, e "be" con "le") forniscono un modo generico
+per fare conversioni sull'ordine dei byte (endianess): esse ritornano
+il valore convertito. Tutte le varianti supportano anche il processo inverso:
+:c:func:`be32_to_cpu()`, eccetera.
+
+Queste funzioni hanno principalmente due varianti: la variante per
+puntatori, come :c:func:`cpu_to_be32p(), che prende un puntatore
+ad un tipo, e ritorna il valore convertito. L'altra variante per
+la famiglia di conversioni "in-situ", come :c:func:`cpu_to_be32s()`,
+che convertono il valore puntato da un puntatore, e ritornano void.
+
+:c:func:`local_irq_save()`/:c:func:`local_irq_restore()`
+--------------------------------------------------------
+
+Definite in ``include/linux/irqflags.h``
+
+Queste funzioni abilitano e disabilitano le interruzioni hardware
+sul processore locale. Entrambe sono rientranti; esse salvano lo stato
+precedente nel proprio argomento ``unsigned long flags``. Se sapete
+che le interruzioni sono abilite, potete semplicemente utilizzare
+:c:func:`local_irq_disable()` e :c:func:`local_irq_enable()`.
+
+.. _it_local_bh_disable:
+
+:c:func:`local_bh_disable()`/:c:func:`local_bh_enable()`
+--------------------------------------------------------
+
+Definite in ``include/linux/bottom_half.h``
+
+
+Queste funzioni abilitano e disabilitano le interruzioni software
+sul processore locale. Entrambe sono rientranti; se le interruzioni
+software erano già state disabilitate in precedenza, rimarranno
+disabilitate anche dopo aver invocato questa coppia di funzioni.
+Lo scopo è di prevenire l'esecuzione di softirq e tasklet sul processore
+attuale.
+
+:c:func:`smp_processor_id()`
+----------------------------
+
+Definita in ``include/linux/smp.h``
+
+:c:func:`get_cpu()` nega il diritto di prelazione (quindi non potete essere
+spostati su un altro processore all'improvviso) e ritorna il numero
+del processore attuale, fra 0 e ``NR_CPUS``. Da notare che non è detto
+che la numerazione dei processori sia continua. Quando avete terminato,
+ritornate allo stato precedente con :c:func:`put_cpu()`.
+
+Se sapete che non dovete essere interrotti da altri processi (per esempio,
+se siete in un contesto d'interruzione, o il diritto di prelazione
+è disabilitato) potete utilizzare smp_processor_id().
+
+
+``__init``/``__exit``/``__initdata``
+------------------------------------
+
+Definite in ``include/linux/init.h``
+
+Dopo l'avvio, il kernel libera una sezione speciale; le funzioni marcate
+con ``__init`` e le strutture dati marcate con ``__initdata`` vengono
+eliminate dopo il completamento dell'avvio: in modo simile i moduli eliminano
+questa memoria dopo l'inizializzazione. ``__exit`` viene utilizzato per
+dichiarare che una funzione verrà utilizzata solo in fase di rimozione:
+la detta funzione verrà eliminata quando il file che la contiene non è
+compilato come modulo. Guardate l'header file per informazioni. Da notare che
+non ha senso avere una funzione marcata come ``__init`` e al tempo stesso
+esportata ai moduli utilizzando :c:func:`EXPORT_SYMBOL()` o
+:c:func:`EXPORT_SYMBOL_GPL()` - non funzionerà.
+
+
+:c:func:`__initcall()`/:c:func:`module_init()`
+----------------------------------------------
+
+Definite in ``include/linux/init.h`` / ``include/linux/module.h``
+
+Molte parti del kernel funzionano bene come moduli (componenti del kernel
+caricabili dinamicamente). L'utilizzo delle macro :c:func:`module_init()`
+e :c:func:`module_exit()` semplifica la scrittura di codice che può funzionare
+sia come modulo, sia come parte del kernel, senza l'ausilio di #ifdef.
+
+La macro :c:func:`module_init()` definisce quale funzione dev'essere
+chiamata quando il modulo viene inserito (se il file è stato compilato come
+tale), o in fase di avvio : se il file non è stato compilato come modulo la
+macro :c:func:`module_init()` diventa equivalente a :c:func:`__initcall()`,
+la quale, tramite qualche magia del linker, s'assicura che la funzione venga
+chiamata durante l'avvio.
+
+La funzione può ritornare un numero d'errore negativo per scatenare un
+fallimento del caricamento (sfortunatamente, questo non ha effetto se il
+modulo è compilato come parte integrante del kernel). Questa funzione è chiamata
+in contesto utente con le interruzioni abilitate, quindi potrebbe dormire.
+
+
+:c:func:`module_exit()`
+-----------------------
+
+
+Definita in ``include/linux/module.h``
+
+Questa macro definisce la funzione che dev'essere chiamata al momento della
+rimozione (o mai, nel caso in cui il file sia parte integrante del kernel).
+Essa verrà chiamata solo quando il contatore d'uso del modulo raggiunge lo
+zero. Questa funzione può anche dormire, ma non può fallire: tutto dev'essere
+ripulito prima che la funzione ritorni.
+
+Da notare che questa macro è opzionale: se non presente, il modulo non sarà
+removibile (a meno che non usiate 'rmmod -f' ).
+
+
+:c:func:`try_module_get()`/:c:func:`module_put()`
+-------------------------------------------------
+
+Definite in ``include/linux/module.h``
+
+Queste funzioni maneggiano il contatore d'uso del modulo per proteggerlo dalla
+rimozione (in aggiunta, un modulo non può essere rimosso se un altro modulo
+utilizzo uno dei sui simboli esportati: vedere di seguito). Prima di eseguire
+codice del modulo, dovreste chiamare :c:func:`try_module_get()` su quel modulo:
+se fallisce significa che il modulo è stato rimosso e dovete agire come se
+non fosse presente. Altrimenti, potete accedere al modulo in sicurezza, e
+chiamare :c:func:`module_put()` quando avete finito.
+
+La maggior parte delle strutture registrabili hanno un campo owner
+(proprietario), come nella struttura
+:c:type:`struct file_operations <file_operations>`.
+Impostate questo campo al valore della macro ``THIS_MODULE``.
+
+
+Code d'attesa ``include/linux/wait.h``
+======================================
+
+**[DORMONO]**
+
+Una coda d'attesa è usata per aspettare che qualcuno vi attivi quando una
+certa condizione s'avvera. Per evitare corse critiche, devono essere usate
+con cautela. Dichiarate una :c:type:`wait_queue_head_t`, e poi i processi
+che vogliono attendere il verificarsi di quella condizione dichiareranno
+una :c:type:`wait_queue_entry_t` facendo riferimento a loro stessi, poi
+metteranno questa in coda.
+
+Dichiarazione
+-------------
+
+Potere dichiarare una ``wait_queue_head_t`` utilizzando la macro
+:c:func:`DECLARE_WAIT_QUEUE_HEAD()` oppure utilizzando la procedura
+:c:func:`init_waitqueue_head()` nel vostro codice d'inizializzazione.
+
+Accodamento
+-----------
+
+Mettersi in una coda d'attesa è piuttosto complesso, perché dovete
+mettervi in coda prima di verificare la condizione. Esiste una macro
+a questo scopo: :c:func:`wait_event_interruptible()` (``include/linux/wait.h``).
+Il primo argomento è la testa della coda d'attesa, e il secondo è
+un'espressione che dev'essere valutata; la macro ritorna 0 quando questa
+espressione è vera, altrimenti ``-ERESTARTSYS`` se è stato ricevuto un segnale.
+La versione :c:func:`wait_event()` ignora i segnali.
+
+Svegliare una procedura in coda
+-------------------------------
+
+Chiamate :c:func:`wake_up()` (``include/linux/wait.h``); questa attiverà tutti
+i processi in coda. Ad eccezione se uno di questi è impostato come
+``TASK_EXCLUSIVE``, in questo caso i rimanenti non verranno svegliati.
+Nello stesso header file esistono altre varianti di questa funzione.
+
+Operazioni atomiche
+===================
+
+Certe operazioni sono garantite come atomiche su tutte le piattaforme.
+Il primo gruppo di operazioni utilizza :c:type:`atomic_t`
+(``include/asm/atomic.h``); questo contiene un intero con segno (minimo 32bit),
+e dovete utilizzare queste funzione per modificare o leggere variabili di tipo
+:c:type:`atomic_t`. :c:func:`atomic_read()` e :c:func:`atomic_set()` leggono ed
+impostano il contatore, :c:func:`atomic_add()`, :c:func:`atomic_sub()`,
+:c:func:`atomic_inc()`, :c:func:`atomic_dec()`, e
+:c:func:`atomic_dec_and_test()` (ritorna vero se raggiunge zero dopo essere
+stata decrementata).
+
+Sì. Ritorna vero (ovvero != 0) se la variabile atomica è zero.
+
+Da notare che queste funzioni sono più lente rispetto alla normale aritmetica,
+e quindi non dovrebbero essere usate a sproposito.
+
+Il secondo gruppo di operazioni atomiche sono definite in
+``include/linux/bitops.h`` ed agiscono sui bit d'una variabile di tipo
+``unsigned long``. Queste operazioni prendono come argomento un puntatore
+alla variabile, e un numero di bit dove 0 è quello meno significativo.
+:c:func:`set_bit()`, :c:func:`clear_bit()` e :c:func:`change_bit()`
+impostano, cancellano, ed invertono il bit indicato.
+:c:func:`test_and_set_bit()`, :c:func:`test_and_clear_bit()` e
+:c:func:`test_and_change_bit()` fanno la stessa cosa, ad eccezione che
+ritornano vero se il bit era impostato; queste sono particolarmente
+utili quando si vuole impostare atomicamente dei flag.
+
+Con queste operazioni è possibile utilizzare indici di bit che eccedono
+il valore ``BITS_PER_LONG``. Il comportamento è strano sulle piattaforme
+big-endian quindi è meglio evitarlo.
+
+Simboli
+=======
+
+All'interno del kernel, si seguono le normali regole del linker (ovvero,
+a meno che un simbolo non venga dichiarato con visibilita limitata ad un
+file con la parola chiave ``static``, esso può essere utilizzato in qualsiasi
+parte del kernel). Nonostante ciò, per i moduli, esiste una tabella dei
+simboli esportati che limita i punti di accesso al kernel. Anche i moduli
+possono esportare simboli.
+
+:c:func:`EXPORT_SYMBOL()`
+-------------------------
+
+Definita in ``include/linux/export.h``
+
+Questo è il classico metodo per esportare un simbolo: i moduli caricati
+dinamicamente potranno utilizzare normalmente il simbolo.
+
+:c:func:`EXPORT_SYMBOL_GPL()`
+-----------------------------
+
+Definita in ``include/linux/export.h``
+
+Essa è simile a :c:func:`EXPORT_SYMBOL()` ad eccezione del fatto che i
+simboli esportati con :c:func:`EXPORT_SYMBOL_GPL()` possono essere
+utilizzati solo dai moduli che hanno dichiarato una licenza compatibile
+con la GPL attraverso :c:func:`MODULE_LICENSE()`. Questo implica che la
+funzione esportata è considerata interna, e non una vera e propria interfaccia.
+Alcuni manutentori e sviluppatori potrebbero comunque richiedere
+:c:func:`EXPORT_SYMBOL_GPL()` quando si aggiungono nuove funzionalità o
+interfacce.
+
+Procedure e convenzioni
+=======================
+
+Liste doppiamente concatenate ``include/linux/list.h``
+------------------------------------------------------
+
+Un tempo negli header del kernel c'erano tre gruppi di funzioni per
+le liste concatenate, ma questa è stata la vincente. Se non avete particolari
+necessità per una semplice lista concatenata, allora questa è una buona scelta.
+
+In particolare, :c:func:`list_for_each_entry()` è utile.
+
+Convenzione dei valori di ritorno
+---------------------------------
+
+Per codice chiamato in contesto utente, è molto comune sfidare le convenzioni
+C e ritornare 0 in caso di successo, ed un codice di errore negativo
+(eg. ``-EFAULT``) nei casi fallimentari. Questo potrebbe essere controintuitivo
+a prima vista, ma è abbastanza diffuso nel kernel.
+
+Utilizzate :c:func:`ERR_PTR()` (``include/linux/err.h``) per codificare
+un numero d'errore negativo in un puntatore, e :c:func:`IS_ERR()` e
+:c:func:`PTR_ERR()` per recuperarlo di nuovo: così si evita d'avere un
+puntatore dedicato per il numero d'errore. Da brividi, ma in senso positivo.
+
+Rompere la compilazione
+-----------------------
+
+Linus e gli altri sviluppatori a volte cambiano i nomi delle funzioni e
+delle strutture nei kernel in sviluppo; questo non è solo per tenere
+tutti sulle spine: questo riflette cambiamenti fondamentati (eg. la funzione
+non può più essere chiamata con le funzioni attive, o fa controlli aggiuntivi,
+o non fa più controlli che venivano fatti in precedenza). Solitamente a questo
+s'accompagna un'adeguata e completa nota sulla lista di discussone
+linux-kernel; cercate negli archivi.
+Solitamente eseguire una semplice sostituzione su tutto un file rendere
+le cose **peggiori**.
+
+Inizializzazione dei campi d'una struttura
+------------------------------------------
+
+Il metodo preferito per l'inizializzazione delle strutture è quello
+di utilizzare gli inizializzatori designati, come definiti nello
+standard ISO C99, eg::
+
+ static struct block_device_operations opt_fops = {
+ .open = opt_open,
+ .release = opt_release,
+ .ioctl = opt_ioctl,
+ .check_media_change = opt_media_change,
+ };
+
+Questo rende più facile la ricerca con grep, e rende più chiaro quale campo
+viene impostato. Dovreste fare così perché si mostra meglio.
+
+Estensioni GNU
+--------------
+
+Le estensioni GNU sono esplicitamente permesse nel kernel Linux. Da notare
+che alcune delle più complesse non sono ben supportate, per via dello scarso
+sviluppo, ma le seguenti sono da considerarsi la norma (per maggiori dettagli,
+leggete la sezione "C Extensions" nella pagina info di GCC - Sì, davvero
+la pagina info, la pagina man è solo un breve riassunto delle cose nella
+pagina info).
+
+- Funzioni inline
+
+- Istruzioni in espressioni (ie. il costrutto ({ and }) ).
+
+- Dichiarate attributi di una funzione / variabile / tipo
+ (__attribute__)
+
+- typeof
+
+- Array con lunghezza zero
+
+- Macro varargs
+
+- Aritmentica sui puntatori void
+
+- Inizializzatori non costanti
+
+- Istruzioni assembler (non al di fuori di 'arch/' e 'include/asm/')
+
+- Nomi delle funzioni come stringhe (__func__).
+
+- __builtin_constant_p()
+
+Siate sospettosi quando utilizzate long long nel kernel, il codice generato
+da gcc è orribile ed anche peggio: le divisioni e le moltiplicazioni non
+funzionano sulle piattaforme i386 perché le rispettive funzioni di runtime
+di GCC non sono incluse nell'ambiente del kernel.
+
+C++
+---
+
+Solitamente utilizzare il C++ nel kernel è una cattiva idea perché
+il kernel non fornisce il necessario ambiente di runtime e gli header file
+non sono stati verificati. Rimane comunque possibile, ma non consigliato.
+Se davvero volete usarlo, almeno evitate le eccezioni.
+
+NUMif
+-----
+
+Viene generalmente considerato più pulito l'uso delle macro negli header file
+(o all'inizio dei file .c) per astrarre funzioni piuttosto che utlizzare
+l'istruzione di pre-processore \`#if' all'interno del codice sorgente.
+
+Mettere le vostre cose nel kernel
+=================================
+
+Al fine d'avere le vostre cose in ordine per l'inclusione ufficiale, o
+anche per avere patch pulite, c'è del lavoro amministrativo da fare:
+
+- Trovare di chi è lo stagno in cui state pisciando. Guardare in cima
+ ai file sorgenti, all'interno del file ``MAINTAINERS``, ed alla fine
+ di tutti nel file ``CREDITS``. Dovreste coordinarvi con queste persone
+ per evitare di duplicare gli sforzi, o provare qualcosa che è già stato
+ rigettato.
+
+ Assicuratevi di mettere il vostro nome ed indirizzo email in cima a
+ tutti i file che create o che mangeggiate significativamente. Questo è
+ il primo posto dove le persone guarderanno quando troveranno un baco,
+ o quando **loro** vorranno fare una modifica.
+
+- Solitamente vorrete un'opzione di configurazione per la vostra modifica
+ al kernel. Modificate ``Kconfig`` nella cartella giusta. Il linguaggio
+ Config è facile con copia ed incolla, e c'è una completa documentazione
+ nel file ``Documentation/kbuild/kconfig-language.txt``.
+
+ Nella descrizione della vostra opzione, assicuratevi di parlare sia agli
+ utenti esperti sia agli utente che non sanno nulla del vostro lavoro.
+ Menzionate qui le incompatibilità ed i problemi. Chiaramente la
+ descrizione deve terminare con “if in doubt, say N†(se siete in dubbio,
+ dite N) (oppure, occasionalmente, \`Y'); questo è per le persone che non
+ hanno idea di che cosa voi stiate parlando.
+
+- Modificate il file ``Makefile``: le variabili CONFIG sono esportate qui,
+ quindi potete solitamente aggiungere una riga come la seguete
+ "obj-$(CONFIG_xxx) += xxx.o". La sintassi è documentata nel file
+ ``Documentation/kbuild/makefiles.txt``.
+
+- Aggiungete voi stessi in ``CREDITS`` se avete fatto qualcosa di notevole,
+ solitamente qualcosa che supera il singolo file (comunque il vostro nome
+ dovrebbe essere all'inizio dei file sorgenti). ``MAINTAINERS`` significa
+ che volete essere consultati quando vengono fatte delle modifiche ad un
+ sottosistema, e quando ci sono dei bachi; questo implica molto di più
+ di un semplice impegno su una parte del codice.
+
+- Infine, non dimenticatevi di leggere
+ ``Documentation/process/submitting-patches.rst`` e possibilmente anche
+ ``Documentation/process/submitting-drivers.rst``.
+
+Trucchetti del kernel
+=====================
+
+Dopo una rapida occhiata al codice, questi sono i preferiti. Sentitevi liberi
+di aggiungerne altri.
+
+``arch/x86/include/asm/delay.h``::
+
+ #define ndelay(n) (__builtin_constant_p(n) ? \
+ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
+ __ndelay(n))
+
+
+``include/linux/fs.h``::
+
+ /*
+ * Kernel pointers have redundant information, so we can use a
+ * scheme where we can return either an error code or a dentry
+ * pointer with the same return value.
+ *
+ * This should be a per-architecture thing, to allow different
+ * error and pointer decisions.
+ */
+ #define ERR_PTR(err) ((void *)((long)(err)))
+ #define PTR_ERR(ptr) ((long)(ptr))
+ #define IS_ERR(ptr) ((unsigned long)(ptr) > (unsigned long)(-1000))
+
+``arch/x86/include/asm/uaccess_32.h:``::
+
+ #define copy_to_user(to,from,n) \
+ (__builtin_constant_p(n) ? \
+ __constant_copy_to_user((to),(from),(n)) : \
+ __generic_copy_to_user((to),(from),(n)))
+
+
+``arch/sparc/kernel/head.S:``::
+
+ /*
+ * Sun people can't spell worth damn. "compatability" indeed.
+ * At least we *know* we can't spell, and use a spell-checker.
+ */
+
+ /* Uh, actually Linus it is I who cannot spell. Too much murky
+ * Sparc assembly will do this to ya.
+ */
+ C_LABEL(cputypvar):
+ .asciz "compatibility"
+
+ /* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
+ .align 4
+ C_LABEL(cputypvar_sun4m):
+ .asciz "compatible"
+
+
+``arch/sparc/lib/checksum.S:``::
+
+ /* Sun, you just can't beat me, you just can't. Stop trying,
+ * give up. I'm serious, I am going to kick the living shit
+ * out of you, game over, lights out.
+ */
+
+
+Ringraziamenti
+==============
+
+Ringrazio Andi Kleen per le sue idee, le risposte alle mie domande,
+le correzioni dei miei errori, l'aggiunta di contenuti, eccetera.
+Philipp Rumpf per l'ortografia e per aver reso più chiaro il testo, e
+per alcuni eccellenti punti tutt'altro che ovvi. Werner Almesberger
+per avermi fornito un ottimo riassunto di :c:func:`disable_irq()`,
+e Jes Sorensen e Andrea Arcangeli per le precisazioni. Michael Elizabeth
+Chastain per aver verificato ed aggiunto la sezione configurazione.
+Telsa Gwynne per avermi insegnato DocBook.
diff --git a/Documentation/translations/it_IT/kernel-hacking/index.rst b/Documentation/translations/it_IT/kernel-hacking/index.rst
new file mode 100644
index 000000000000..50228380bd50
--- /dev/null
+++ b/Documentation/translations/it_IT/kernel-hacking/index.rst
@@ -0,0 +1,16 @@
+.. include:: ../disclaimer-ita.rst
+
+:Original: :ref:`Documentation/kernel-hacking/index.rst <kernel_hacking>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
+
+.. _it_kernel_hacking:
+
+============================
+Guida all'hacking del kernel
+============================
+
+.. toctree::
+ :maxdepth: 2
+
+ hacking
+ locking
diff --git a/Documentation/translations/it_IT/kernel-hacking/locking.rst b/Documentation/translations/it_IT/kernel-hacking/locking.rst
new file mode 100644
index 000000000000..753643622c23
--- /dev/null
+++ b/Documentation/translations/it_IT/kernel-hacking/locking.rst
@@ -0,0 +1,1493 @@
+.. include:: ../disclaimer-ita.rst
+
+:Original: :ref:`Documentation/kernel-hacking/locking.rst <kernel_hacking_lock>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
+
+.. _it_kernel_hacking_lock:
+
+==========================================
+L'inaffidabile guida alla sincronizzazione
+==========================================
+
+:Author: Rusty Russell
+
+Introduzione
+============
+
+Benvenuto, alla notevole ed inaffidabile guida ai problemi di sincronizzazione
+(locking) nel kernel. Questo documento descrive il sistema di sincronizzazione
+nel kernel Linux 2.6.
+
+Dato il largo utilizzo del multi-threading e della prelazione nel kernel
+Linux, chiunque voglia dilettarsi col kernel deve conoscere i concetti
+fondamentali della concorrenza e della sincronizzazione nei sistemi
+multi-processore.
+
+Il problema con la concorrenza
+==============================
+
+(Saltatelo se sapete già cos'è una corsa critica).
+
+In un normale programma, potete incrementare un contatore nel seguente modo:
+
+::
+
+ contatore++;
+
+Questo è quello che vi aspettereste che accada sempre:
+
+
+.. table:: Risultati attesi
+
+ +------------------------------------+------------------------------------+
+ | Istanza 1 | Istanza 2 |
+ +====================================+====================================+
+ | leggi contatore (5) | |
+ +------------------------------------+------------------------------------+
+ | aggiungi 1 (6) | |
+ +------------------------------------+------------------------------------+
+ | scrivi contatore (6) | |
+ +------------------------------------+------------------------------------+
+ | | leggi contatore (6) |
+ +------------------------------------+------------------------------------+
+ | | aggiungi 1 (7) |
+ +------------------------------------+------------------------------------+
+ | | scrivi contatore (7) |
+ +------------------------------------+------------------------------------+
+
+Questo è quello che potrebbe succedere in realtà:
+
+.. table:: Possibile risultato
+
+ +------------------------------------+------------------------------------+
+ | Istanza 1 | Istanza 2 |
+ +====================================+====================================+
+ | leggi contatore (5) | |
+ +------------------------------------+------------------------------------+
+ | | leggi contatore (5) |
+ +------------------------------------+------------------------------------+
+ | aggiungi 1 (6) | |
+ +------------------------------------+------------------------------------+
+ | | aggiungi 1 (6) |
+ +------------------------------------+------------------------------------+
+ | scrivi contatore (6) | |
+ +------------------------------------+------------------------------------+
+ | | scrivi contatore (6) |
+ +------------------------------------+------------------------------------+
+
+
+Corse critiche e sezioni critiche
+---------------------------------
+
+Questa sovrapposizione, ovvero quando un risultato dipende dal tempo che
+intercorre fra processi diversi, è chiamata corsa critica. La porzione
+di codice che contiene questo problema è chiamata sezione critica.
+In particolar modo da quando Linux ha incominciato a girare su
+macchine multi-processore, le sezioni critiche sono diventate uno dei
+maggiori problemi di progettazione ed implementazione del kernel.
+
+La prelazione può sortire gli stessi effetti, anche se c'è una sola CPU:
+interrompendo un processo nella sua sezione critica otterremo comunque
+la stessa corsa critica. In questo caso, il thread che si avvicenda
+nell'esecuzione potrebbe eseguire anch'esso la sezione critica.
+
+La soluzione è quella di riconoscere quando avvengono questi accessi
+simultanei, ed utilizzare i *lock* per accertarsi che solo un'istanza
+per volta possa entrare nella sezione critica. Il kernel offre delle buone
+funzioni a questo scopo. E poi ci sono quelle meno buone, ma farò finta
+che non esistano.
+
+Sincronizzazione nel kernel Linux
+=================================
+
+Se posso darvi un suggerimento: non dormite mai con qualcuno più pazzo di
+voi. Ma se dovessi darvi un suggerimento sulla sincronizzazione:
+**mantenetela semplice**.
+
+Siate riluttanti nell'introduzione di nuovi *lock*.
+
+Abbastanza strano, quest'ultimo è l'esatto opposto del mio suggerimento
+su quando **avete** dormito con qualcuno più pazzo di voi. E dovreste
+pensare a prendervi un cane bello grande.
+
+I due principali tipi di *lock* nel kernel: spinlock e mutex
+------------------------------------------------------------
+
+Ci sono due tipi principali di *lock* nel kernel. Il tipo fondamentale è lo
+spinlock (``include/asm/spinlock.h``), un semplice *lock* che può essere
+trattenuto solo da un processo: se non si può trattenere lo spinlock, allora
+rimane in attesa attiva (in inglese *spinning*) finché non ci riesce.
+Gli spinlock sono molto piccoli e rapidi, possono essere utilizzati ovunque.
+
+Il secondo tipo è il mutex (``include/linux/mutex.h``): è come uno spinlock,
+ma potreste bloccarvi trattenendolo. Se non potete trattenere un mutex
+il vostro processo si auto-sospenderà; verrà riattivato quando il mutex
+verrà rilasciato. Questo significa che il processore potrà occuparsi d'altro
+mentre il vostro processo è in attesa. Esistono molti casi in cui non potete
+permettervi di sospendere un processo (vedere
+:ref:`Quali funzioni possono essere chiamate in modo sicuro dalle interruzioni? <it_sleeping-things>`)
+e quindi dovrete utilizzare gli spinlock.
+
+Nessuno di questi *lock* è ricorsivo: vedere
+:ref:`Stallo: semplice ed avanzato <it_deadlock>`
+
+I *lock* e i kernel per sistemi monoprocessore
+----------------------------------------------
+
+Per i kernel compilati senza ``CONFIG_SMP`` e senza ``CONFIG_PREEMPT``
+gli spinlock non esistono. Questa è un'ottima scelta di progettazione:
+quando nessun altro processo può essere eseguito in simultanea, allora
+non c'è la necessità di avere un *lock*.
+
+Se il kernel è compilato senza ``CONFIG_SMP`` ma con ``CONFIG_PREEMPT``,
+allora gli spinlock disabilitano la prelazione; questo è sufficiente a
+prevenire le corse critiche. Nella maggior parte dei casi, possiamo considerare
+la prelazione equivalente ad un sistema multi-processore senza preoccuparci
+di trattarla indipendentemente.
+
+Dovreste verificare sempre la sincronizzazione con le opzioni ``CONFIG_SMP`` e
+``CONFIG_PREEMPT`` abilitate, anche quando non avete un sistema
+multi-processore, questo vi permetterà di identificare alcuni problemi
+di sincronizzazione.
+
+Come vedremo di seguito, i mutex continuano ad esistere perché sono necessari
+per la sincronizzazione fra processi in contesto utente.
+
+Sincronizzazione in contesto utente
+-----------------------------------
+
+Se avete una struttura dati che verrà utilizzata solo dal contesto utente,
+allora, per proteggerla, potete utilizzare un semplice mutex
+(``include/linux/mutex.h``). Questo è il caso più semplice: inizializzate il
+mutex; invocate :c:func:`mutex_lock_interruptible()` per trattenerlo e
+:c:func:`mutex_unlock()` per rilasciarlo. C'è anche :c:func:`mutex_lock()`
+ma questa dovrebbe essere evitata perché non ritorna in caso di segnali.
+
+Per esempio: ``net/netfilter/nf_sockopt.c`` permette la registrazione
+di nuove chiamate per :c:func:`setsockopt()` e :c:func:`getsockopt()`
+usando la funzione :c:func:`nf_register_sockopt()`. La registrazione e
+la rimozione vengono eseguite solamente quando il modulo viene caricato
+o scaricato (e durante l'avvio del sistema, qui non abbiamo concorrenza),
+e la lista delle funzioni registrate viene consultata solamente quando
+:c:func:`setsockopt()` o :c:func:`getsockopt()` sono sconosciute al sistema.
+In questo caso ``nf_sockopt_mutex`` è perfetto allo scopo, in particolar modo
+visto che setsockopt e getsockopt potrebbero dormire.
+
+Sincronizzazione fra il contesto utente e i softirq
+---------------------------------------------------
+
+Se un softirq condivide dati col contesto utente, avete due problemi.
+Primo, il contesto utente corrente potrebbe essere interroto da un softirq,
+e secondo, la sezione critica potrebbe essere eseguita da un altro
+processore. Questo è quando :c:func:`spin_lock_bh()`
+(``include/linux/spinlock.h``) viene utilizzato. Questo disabilita i softirq
+sul processore e trattiene il *lock*. Invece, :c:func:`spin_unlock_bh()` fa
+l'opposto. (Il suffisso '_bh' è un residuo storico che fa riferimento al
+"Bottom Halves", il vecchio nome delle interruzioni software. In un mondo
+perfetto questa funzione si chiamerebbe 'spin_lock_softirq()').
+
+Da notare che in questo caso potete utilizzare anche :c:func:`spin_lock_irq()`
+o :c:func:`spin_lock_irqsave()`, queste fermano anche le interruzioni hardware:
+vedere :ref:`Contesto di interruzione hardware <it_hardirq-context>`.
+
+Questo funziona alla perfezione anche sui sistemi monoprocessore: gli spinlock
+svaniscono e questa macro diventa semplicemente :c:func:`local_bh_disable()`
+(``include/linux/interrupt.h``), la quale impedisce ai softirq d'essere
+eseguiti.
+
+Sincronizzazione fra contesto utente e i tasklet
+------------------------------------------------
+
+Questo caso è uguale al precedente, un tasklet viene eseguito da un softirq.
+
+Sincronizzazione fra contesto utente e i timer
+----------------------------------------------
+
+Anche questo caso è uguale al precedente, un timer viene eseguito da un
+softirq.
+Dal punto di vista della sincronizzazione, tasklet e timer sono identici.
+
+Sincronizzazione fra tasklet e timer
+------------------------------------
+
+Qualche volta un tasklet od un timer potrebbero condividere i dati con
+un altro tasklet o timer
+
+Lo stesso tasklet/timer
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Dato che un tasklet non viene mai eseguito contemporaneamente su due
+processori, non dovete preoccuparvi che sia rientrante (ovvero eseguito
+più volte in contemporanea), perfino su sistemi multi-processore.
+
+Differenti tasklet/timer
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Se un altro tasklet/timer vuole condividere dati col vostro tasklet o timer,
+allora avrete bisogno entrambe di :c:func:`spin_lock()` e
+:c:func:`spin_unlock()`. Qui :c:func:`spin_lock_bh()` è inutile, siete già
+in un tasklet ed avete la garanzia che nessun altro verrà eseguito sullo
+stesso processore.
+
+Sincronizzazione fra softirq
+----------------------------
+
+Spesso un softirq potrebbe condividere dati con se stesso o un tasklet/timer.
+
+Lo stesso softirq
+~~~~~~~~~~~~~~~~~
+
+Lo stesso softirq può essere eseguito su un diverso processore: allo scopo
+di migliorare le prestazioni potete utilizzare dati riservati ad ogni
+processore (vedere :ref:`Dati per processore <it_per-cpu>`). Se siete arrivati
+fino a questo punto nell'uso dei softirq, probabilmente tenete alla scalabilità
+delle prestazioni abbastanza da giustificarne la complessità aggiuntiva.
+
+Dovete utilizzare :c:func:`spin_lock()` e :c:func:`spin_unlock()` per
+proteggere i dati condivisi.
+
+Diversi Softirqs
+~~~~~~~~~~~~~~~~
+
+Dovete utilizzare :c:func:`spin_lock()` e :c:func:`spin_unlock()` per
+proteggere i dati condivisi, che siano timer, tasklet, diversi softirq o
+lo stesso o altri softirq: uno qualsiasi di essi potrebbe essere in esecuzione
+su un diverso processore.
+
+.. _`it_hardirq-context`:
+
+Contesto di interruzione hardware
+=================================
+
+Solitamente le interruzioni hardware comunicano con un tasklet o un softirq.
+Spesso questo si traduce nel mettere in coda qualcosa da fare che verrà
+preso in carico da un softirq.
+
+Sincronizzazione fra interruzioni hardware e softirq/tasklet
+------------------------------------------------------------
+
+Se un gestore di interruzioni hardware condivide dati con un softirq, allora
+avrete due preoccupazioni. Primo, il softirq può essere interrotto da
+un'interruzione hardware, e secondo, la sezione critica potrebbe essere
+eseguita da un'interruzione hardware su un processore diverso. Questo è il caso
+dove :c:func:`spin_lock_irq()` viene utilizzato. Disabilita le interruzioni
+sul processore che l'esegue, poi trattiene il lock. :c:func:`spin_unlock_irq()`
+fa l'opposto.
+
+Il gestore d'interruzione hardware non usa :c:func:`spin_lock_irq()` perché
+i softirq non possono essere eseguiti quando il gestore d'interruzione hardware
+è in esecuzione: per questo si può usare :c:func:`spin_lock()`, che è un po'
+più veloce. L'unica eccezione è quando un altro gestore d'interruzioni
+hardware utilizza lo stesso *lock*: :c:func:`spin_lock_irq()` impedirà a questo
+secondo gestore di interrompere quello in esecuzione.
+
+Questo funziona alla perfezione anche sui sistemi monoprocessore: gli spinlock
+svaniscono e questa macro diventa semplicemente :c:func:`local_irq_disable()`
+(``include/asm/smp.h``), la quale impedisce a softirq/tasklet/BH d'essere
+eseguiti.
+
+:c:func:`spin_lock_irqsave()` (``include/linux/spinlock.h``) è una variante che
+salva lo stato delle interruzioni in una variabile, questa verrà poi passata
+a :c:func:`spin_unlock_irqrestore()`. Questo significa che lo stesso codice
+potrà essere utilizzato in un'interruzione hardware (dove le interruzioni sono
+già disabilitate) e in un softirq (dove la disabilitazione delle interruzioni
+è richiesta).
+
+Da notare che i softirq (e quindi tasklet e timer) sono eseguiti al ritorno
+da un'interruzione hardware, quindi :c:func:`spin_lock_irq()` interrompe
+anche questi. Tenuto conto di questo si può dire che
+:c:func:`spin_lock_irqsave()` è la funzione di sincronizzazione più generica
+e potente.
+
+Sincronizzazione fra due gestori d'interruzioni hardware
+--------------------------------------------------------
+
+Condividere dati fra due gestori di interruzione hardware è molto raro, ma se
+succede, dovreste usare :c:func:`spin_lock_irqsave()`: è una specificità
+dell'architettura il fatto che tutte le interruzioni vengano interrotte
+quando si eseguono di gestori di interruzioni.
+
+Bigino della sincronizzazione
+=============================
+
+Pete Zaitcev ci offre il seguente riassunto:
+
+- Se siete in un contesto utente (una qualsiasi chiamata di sistema)
+ e volete sincronizzarvi con altri processi, usate i mutex. Potete trattenere
+ il mutex e dormire (``copy_from_user*(`` o ``kmalloc(x,GFP_KERNEL)``).
+
+- Altrimenti (== i dati possono essere manipolati da un'interruzione) usate
+ :c:func:`spin_lock_irqsave()` e :c:func:`spin_unlock_irqrestore()`.
+
+- Evitate di trattenere uno spinlock per più di 5 righe di codice incluse
+ le chiamate a funzione (ad eccezione di quell per l'accesso come
+ :c:func:`readb()`).
+
+Tabella dei requisiti minimi
+----------------------------
+
+La tabella seguente illustra i requisiti **minimi** per la sincronizzazione fra
+diversi contesti. In alcuni casi, lo stesso contesto può essere eseguito solo
+da un processore per volta, quindi non ci sono requisiti per la
+sincronizzazione (per esempio, un thread può essere eseguito solo su un
+processore alla volta, ma se deve condividere dati con un altro thread, allora
+la sincronizzazione è necessaria).
+
+Ricordatevi il suggerimento qui sopra: potete sempre usare
+:c:func:`spin_lock_irqsave()`, che è un sovrainsieme di tutte le altre funzioni
+per spinlock.
+
+============== ============= ============= ========= ========= ========= ========= ======= ======= ============== ==============
+. IRQ Handler A IRQ Handler B Softirq A Softirq B Tasklet A Tasklet B Timer A Timer B User Context A User Context B
+============== ============= ============= ========= ========= ========= ========= ======= ======= ============== ==============
+IRQ Handler A None
+IRQ Handler B SLIS None
+Softirq A SLI SLI SL
+Softirq B SLI SLI SL SL
+Tasklet A SLI SLI SL SL None
+Tasklet B SLI SLI SL SL SL None
+Timer A SLI SLI SL SL SL SL None
+Timer B SLI SLI SL SL SL SL SL None
+User Context A SLI SLI SLBH SLBH SLBH SLBH SLBH SLBH None
+User Context B SLI SLI SLBH SLBH SLBH SLBH SLBH SLBH MLI None
+============== ============= ============= ========= ========= ========= ========= ======= ======= ============== ==============
+
+Table: Tabella dei requisiti per la sincronizzazione
+
++--------+----------------------------+
+| SLIS | spin_lock_irqsave |
++--------+----------------------------+
+| SLI | spin_lock_irq |
++--------+----------------------------+
+| SL | spin_lock |
++--------+----------------------------+
+| SLBH | spin_lock_bh |
++--------+----------------------------+
+| MLI | mutex_lock_interruptible |
++--------+----------------------------+
+
+Table: Legenda per la tabella dei requisiti per la sincronizzazione
+
+Le funzioni *trylock*
+=====================
+
+Ci sono funzioni che provano a trattenere un *lock* solo una volta e
+ritornano immediatamente comunicato il successo od il fallimento
+dell'operazione. Posso essere usate quando non serve accedere ai dati
+protetti dal *lock* quando qualche altro thread lo sta già facendo
+trattenendo il *lock*. Potrete acquisire il *lock* più tardi se vi
+serve accedere ai dati protetti da questo *lock*.
+
+La funzione :c:func:`spin_trylock()` non ritenta di acquisire il *lock*,
+se ci riesce al primo colpo ritorna un valore diverso da zero, altrimenti
+se fallisce ritorna 0. Questa funzione può essere utilizzata in un qualunque
+contesto, ma come :c:func:`spin_lock()`: dovete disabilitare i contesti che
+potrebbero interrompervi e quindi trattenere lo spinlock.
+
+La funzione :c:func:`mutex_trylock()` invece di sospendere il vostro processo
+ritorna un valore diverso da zero se è possibile trattenere il lock al primo
+colpo, altrimenti se fallisce ritorna 0. Nonostante non dorma, questa funzione
+non può essere usata in modo sicuro in contesti di interruzione hardware o
+software.
+
+Esempi più comuni
+=================
+
+Guardiamo un semplice esempio: una memoria che associa nomi a numeri.
+La memoria tiene traccia di quanto spesso viene utilizzato ogni oggetto;
+quando è piena, l'oggetto meno usato viene eliminato.
+
+Tutto in contesto utente
+------------------------
+
+Nel primo esempio, supponiamo che tutte le operazioni avvengano in contesto
+utente (in soldoni, da una chiamata di sistema), quindi possiamo dormire.
+Questo significa che possiamo usare i mutex per proteggere la nostra memoria
+e tutti gli oggetti che contiene. Ecco il codice::
+
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/mutex.h>
+ #include <asm/errno.h>
+
+ struct object
+ {
+ struct list_head list;
+ int id;
+ char name[32];
+ int popularity;
+ };
+
+ /* Protects the cache, cache_num, and the objects within it */
+ static DEFINE_MUTEX(cache_lock);
+ static LIST_HEAD(cache);
+ static unsigned int cache_num = 0;
+ #define MAX_CACHE_SIZE 10
+
+ /* Must be holding cache_lock */
+ static struct object *__cache_find(int id)
+ {
+ struct object *i;
+
+ list_for_each_entry(i, &cache, list)
+ if (i->id == id) {
+ i->popularity++;
+ return i;
+ }
+ return NULL;
+ }
+
+ /* Must be holding cache_lock */
+ static void __cache_delete(struct object *obj)
+ {
+ BUG_ON(!obj);
+ list_del(&obj->list);
+ kfree(obj);
+ cache_num--;
+ }
+
+ /* Must be holding cache_lock */
+ static void __cache_add(struct object *obj)
+ {
+ list_add(&obj->list, &cache);
+ if (++cache_num > MAX_CACHE_SIZE) {
+ struct object *i, *outcast = NULL;
+ list_for_each_entry(i, &cache, list) {
+ if (!outcast || i->popularity < outcast->popularity)
+ outcast = i;
+ }
+ __cache_delete(outcast);
+ }
+ }
+
+ int cache_add(int id, const char *name)
+ {
+ struct object *obj;
+
+ if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ strlcpy(obj->name, name, sizeof(obj->name));
+ obj->id = id;
+ obj->popularity = 0;
+
+ mutex_lock(&cache_lock);
+ __cache_add(obj);
+ mutex_unlock(&cache_lock);
+ return 0;
+ }
+
+ void cache_delete(int id)
+ {
+ mutex_lock(&cache_lock);
+ __cache_delete(__cache_find(id));
+ mutex_unlock(&cache_lock);
+ }
+
+ int cache_find(int id, char *name)
+ {
+ struct object *obj;
+ int ret = -ENOENT;
+
+ mutex_lock(&cache_lock);
+ obj = __cache_find(id);
+ if (obj) {
+ ret = 0;
+ strcpy(name, obj->name);
+ }
+ mutex_unlock(&cache_lock);
+ return ret;
+ }
+
+Da notare che ci assicuriamo sempre di trattenere cache_lock quando
+aggiungiamo, rimuoviamo od ispezioniamo la memoria: sia la struttura
+della memoria che il suo contenuto sono protetti dal *lock*. Questo
+caso è semplice dato che copiamo i dati dall'utente e non permettiamo
+mai loro di accedere direttamente agli oggetti.
+
+C'è una piccola ottimizzazione qui: nella funzione :c:func:`cache_add()`
+impostiamo i campi dell'oggetto prima di acquisire il *lock*. Questo è
+sicuro perché nessun altro potrà accedervi finché non lo inseriremo
+nella memoria.
+
+Accesso dal contesto utente
+---------------------------
+
+Ora consideriamo il caso in cui :c:func:`cache_find()` può essere invocata
+dal contesto d'interruzione: sia hardware che software. Un esempio potrebbe
+essere un timer che elimina oggetti dalla memoria.
+
+Qui di seguito troverete la modifica nel formato *patch*: le righe ``-``
+sono quelle rimosse, mentre quelle ``+`` sono quelle aggiunte.
+
+::
+
+ --- cache.c.usercontext 2003-12-09 13:58:54.000000000 +1100
+ +++ cache.c.interrupt 2003-12-09 14:07:49.000000000 +1100
+ @@ -12,7 +12,7 @@
+ int popularity;
+ };
+
+ -static DEFINE_MUTEX(cache_lock);
+ +static DEFINE_SPINLOCK(cache_lock);
+ static LIST_HEAD(cache);
+ static unsigned int cache_num = 0;
+ #define MAX_CACHE_SIZE 10
+ @@ -55,6 +55,7 @@
+ int cache_add(int id, const char *name)
+ {
+ struct object *obj;
+ + unsigned long flags;
+
+ if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ @@ -63,30 +64,33 @@
+ obj->id = id;
+ obj->popularity = 0;
+
+ - mutex_lock(&cache_lock);
+ + spin_lock_irqsave(&cache_lock, flags);
+ __cache_add(obj);
+ - mutex_unlock(&cache_lock);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ return 0;
+ }
+
+ void cache_delete(int id)
+ {
+ - mutex_lock(&cache_lock);
+ + unsigned long flags;
+ +
+ + spin_lock_irqsave(&cache_lock, flags);
+ __cache_delete(__cache_find(id));
+ - mutex_unlock(&cache_lock);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ }
+
+ int cache_find(int id, char *name)
+ {
+ struct object *obj;
+ int ret = -ENOENT;
+ + unsigned long flags;
+
+ - mutex_lock(&cache_lock);
+ + spin_lock_irqsave(&cache_lock, flags);
+ obj = __cache_find(id);
+ if (obj) {
+ ret = 0;
+ strcpy(name, obj->name);
+ }
+ - mutex_unlock(&cache_lock);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ return ret;
+ }
+
+Da notare che :c:func:`spin_lock_irqsave()` disabiliterà le interruzioni
+se erano attive, altrimenti non farà niente (quando siamo già in un contesto
+d'interruzione); dunque queste funzioni possono essere chiamante in
+sicurezza da qualsiasi contesto.
+
+Sfortunatamente, :c:func:`cache_add()` invoca :c:func:`kmalloc()` con
+l'opzione ``GFP_KERNEL`` che è permessa solo in contesto utente. Ho supposto
+che :c:func:`cache_add()` venga chiamata dal contesto utente, altrimenti
+questa opzione deve diventare un parametro di :c:func:`cache_add()`.
+
+Exposing Objects Outside This File
+----------------------------------
+
+Se i vostri oggetti contengono più informazioni, potrebbe non essere
+sufficiente copiare i dati avanti e indietro: per esempio, altre parti del
+codice potrebbero avere un puntatore a questi oggetti piuttosto che cercarli
+ogni volta. Questo introduce due problemi.
+
+Il primo problema è che utilizziamo ``cache_lock`` per proteggere gli oggetti:
+dobbiamo renderlo dinamico così che il resto del codice possa usarlo. Questo
+rende la sincronizzazione più complicata dato che non avviene più in un unico
+posto.
+
+Il secondo problema è il problema del ciclo di vita: se un'altra struttura
+mantiene un puntatore ad un oggetto, presumibilmente si aspetta che questo
+puntatore rimanga valido. Sfortunatamente, questo è garantito solo mentre
+si trattiene il *lock*, altrimenti qualcuno potrebbe chiamare
+:c:func:`cache_delete()` o peggio, aggiungere un oggetto che riutilizza lo
+stesso indirizzo.
+
+Dato che c'è un solo *lock*, non potete trattenerlo a vita: altrimenti
+nessun altro potrà eseguire il proprio lavoro.
+
+La soluzione a questo problema è l'uso di un contatore di riferimenti:
+chiunque punti ad un oggetto deve incrementare il contatore, e decrementarlo
+quando il puntatore non viene più usato. Quando il contatore raggiunge lo zero
+significa che non è più usato e l'oggetto può essere rimosso.
+
+Ecco il codice::
+
+ --- cache.c.interrupt 2003-12-09 14:25:43.000000000 +1100
+ +++ cache.c.refcnt 2003-12-09 14:33:05.000000000 +1100
+ @@ -7,6 +7,7 @@
+ struct object
+ {
+ struct list_head list;
+ + unsigned int refcnt;
+ int id;
+ char name[32];
+ int popularity;
+ @@ -17,6 +18,35 @@
+ static unsigned int cache_num = 0;
+ #define MAX_CACHE_SIZE 10
+
+ +static void __object_put(struct object *obj)
+ +{
+ + if (--obj->refcnt == 0)
+ + kfree(obj);
+ +}
+ +
+ +static void __object_get(struct object *obj)
+ +{
+ + obj->refcnt++;
+ +}
+ +
+ +void object_put(struct object *obj)
+ +{
+ + unsigned long flags;
+ +
+ + spin_lock_irqsave(&cache_lock, flags);
+ + __object_put(obj);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ +}
+ +
+ +void object_get(struct object *obj)
+ +{
+ + unsigned long flags;
+ +
+ + spin_lock_irqsave(&cache_lock, flags);
+ + __object_get(obj);
+ + spin_unlock_irqrestore(&cache_lock, flags);
+ +}
+ +
+ /* Must be holding cache_lock */
+ static struct object *__cache_find(int id)
+ {
+ @@ -35,6 +65,7 @@
+ {
+ BUG_ON(!obj);
+ list_del(&obj->list);
+ + __object_put(obj);
+ cache_num--;
+ }
+
+ @@ -63,6 +94,7 @@
+ strlcpy(obj->name, name, sizeof(obj->name));
+ obj->id = id;
+ obj->popularity = 0;
+ + obj->refcnt = 1; /* The cache holds a reference */
+
+ spin_lock_irqsave(&cache_lock, flags);
+ __cache_add(obj);
+ @@ -79,18 +111,15 @@
+ spin_unlock_irqrestore(&cache_lock, flags);
+ }
+
+ -int cache_find(int id, char *name)
+ +struct object *cache_find(int id)
+ {
+ struct object *obj;
+ - int ret = -ENOENT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache_lock, flags);
+ obj = __cache_find(id);
+ - if (obj) {
+ - ret = 0;
+ - strcpy(name, obj->name);
+ - }
+ + if (obj)
+ + __object_get(obj);
+ spin_unlock_irqrestore(&cache_lock, flags);
+ - return ret;
+ + return obj;
+ }
+
+Abbiamo incapsulato il contatore di riferimenti nelle tipiche funzioni
+di 'get' e 'put'. Ora possiamo ritornare l'oggetto da :c:func:`cache_find()`
+col vantaggio che l'utente può dormire trattenendo l'oggetto (per esempio,
+:c:func:`copy_to_user()` per copiare il nome verso lo spazio utente).
+
+Un altro punto da notare è che ho detto che il contatore dovrebbe incrementarsi
+per ogni puntatore ad un oggetto: quindi il contatore di riferimenti è 1
+quando l'oggetto viene inserito nella memoria. In altre versione il framework
+non trattiene un riferimento per se, ma diventa più complicato.
+
+Usare operazioni atomiche per il contatore di riferimenti
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In sostanza, :c:type:`atomic_t` viene usato come contatore di riferimenti.
+Ci sono un certo numbero di operazioni atomiche definite
+in ``include/asm/atomic.h``: queste sono garantite come atomiche su qualsiasi
+processore del sistema, quindi non sono necessari i *lock*. In questo caso è
+più semplice rispetto all'uso degli spinlock, benché l'uso degli spinlock
+sia più elegante per casi non banali. Le funzioni :c:func:`atomic_inc()` e
+:c:func:`atomic_dec_and_test()` vengono usate al posto dei tipici operatori di
+incremento e decremento, e i *lock* non sono più necessari per proteggere il
+contatore stesso.
+
+::
+
+ --- cache.c.refcnt 2003-12-09 15:00:35.000000000 +1100
+ +++ cache.c.refcnt-atomic 2003-12-11 15:49:42.000000000 +1100
+ @@ -7,7 +7,7 @@
+ struct object
+ {
+ struct list_head list;
+ - unsigned int refcnt;
+ + atomic_t refcnt;
+ int id;
+ char name[32];
+ int popularity;
+ @@ -18,33 +18,15 @@
+ static unsigned int cache_num = 0;
+ #define MAX_CACHE_SIZE 10
+
+ -static void __object_put(struct object *obj)
+ -{
+ - if (--obj->refcnt == 0)
+ - kfree(obj);
+ -}
+ -
+ -static void __object_get(struct object *obj)
+ -{
+ - obj->refcnt++;
+ -}
+ -
+ void object_put(struct object *obj)
+ {
+ - unsigned long flags;
+ -
+ - spin_lock_irqsave(&cache_lock, flags);
+ - __object_put(obj);
+ - spin_unlock_irqrestore(&cache_lock, flags);
+ + if (atomic_dec_and_test(&obj->refcnt))
+ + kfree(obj);
+ }
+
+ void object_get(struct object *obj)
+ {
+ - unsigned long flags;
+ -
+ - spin_lock_irqsave(&cache_lock, flags);
+ - __object_get(obj);
+ - spin_unlock_irqrestore(&cache_lock, flags);
+ + atomic_inc(&obj->refcnt);
+ }
+
+ /* Must be holding cache_lock */
+ @@ -65,7 +47,7 @@
+ {
+ BUG_ON(!obj);
+ list_del(&obj->list);
+ - __object_put(obj);
+ + object_put(obj);
+ cache_num--;
+ }
+
+ @@ -94,7 +76,7 @@
+ strlcpy(obj->name, name, sizeof(obj->name));
+ obj->id = id;
+ obj->popularity = 0;
+ - obj->refcnt = 1; /* The cache holds a reference */
+ + atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
+
+ spin_lock_irqsave(&cache_lock, flags);
+ __cache_add(obj);
+ @@ -119,7 +101,7 @@
+ spin_lock_irqsave(&cache_lock, flags);
+ obj = __cache_find(id);
+ if (obj)
+ - __object_get(obj);
+ + object_get(obj);
+ spin_unlock_irqrestore(&cache_lock, flags);
+ return obj;
+ }
+
+Proteggere l'oggetto stesso
+---------------------------
+
+In questo esempio, assumiamo che gli oggetti (ad eccezione del contatore
+di riferimenti) non cambino mai dopo la loro creazione. Se vogliamo permettere
+al nome di cambiare abbiamo tre possibilità:
+
+- Si può togliere static da ``cache_lock`` e dire agli utenti che devono
+ trattenere il *lock* prima di modificare il nome di un oggetto.
+
+- Si può fornire una funzione :c:func:`cache_obj_rename()` che prende il
+ *lock* e cambia il nome per conto del chiamante; si dirà poi agli utenti
+ di usare questa funzione.
+
+- Si può decidere che ``cache_lock`` protegge solo la memoria stessa, ed
+ un altro *lock* è necessario per la protezione del nome.
+
+Teoricamente, possiamo avere un *lock* per ogni campo e per ogni oggetto.
+In pratica, le varianti più comuni sono:
+
+- un *lock* che protegge l'infrastruttura (la lista ``cache`` di questo
+ esempio) e gli oggetti. Questo è quello che abbiamo fatto finora.
+
+- un *lock* che protegge l'infrastruttura (inclusi i puntatori alla lista
+ negli oggetti), e un *lock* nell'oggetto per proteggere il resto
+ dell'oggetto stesso.
+
+- *lock* multipli per proteggere l'infrastruttura (per esempio un *lock*
+ per ogni lista), possibilmente con un *lock* per oggetto.
+
+Qui di seguito un'implementazione con "un lock per oggetto":
+
+::
+
+ --- cache.c.refcnt-atomic 2003-12-11 15:50:54.000000000 +1100
+ +++ cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
+ @@ -6,11 +6,17 @@
+
+ struct object
+ {
+ + /* These two protected by cache_lock. */
+ struct list_head list;
+ + int popularity;
+ +
+ atomic_t refcnt;
+ +
+ + /* Doesn't change once created. */
+ int id;
+ +
+ + spinlock_t lock; /* Protects the name */
+ char name[32];
+ - int popularity;
+ };
+
+ static DEFINE_SPINLOCK(cache_lock);
+ @@ -77,6 +84,7 @@
+ obj->id = id;
+ obj->popularity = 0;
+ atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
+ + spin_lock_init(&obj->lock);
+
+ spin_lock_irqsave(&cache_lock, flags);
+ __cache_add(obj);
+
+Da notare che ho deciso che il contatore di popolarità dovesse essere
+protetto da ``cache_lock`` piuttosto che dal *lock* dell'oggetto; questo
+perché è logicamente parte dell'infrastruttura (come
+:c:type:`struct list_head <list_head>` nell'oggetto). In questo modo,
+in :c:func:`__cache_add()`, non ho bisogno di trattenere il *lock* di ogni
+oggetto mentre si cerca il meno popolare.
+
+Ho anche deciso che il campo id è immutabile, quindi non ho bisogno di
+trattenere il lock dell'oggetto quando si usa :c:func:`__cache_find()`
+per leggere questo campo; il *lock* dell'oggetto è usato solo dal chiamante
+che vuole leggere o scrivere il campo name.
+
+Inoltre, da notare che ho aggiunto un commento che descrive i dati che sono
+protetti dal *lock*. Questo è estremamente importante in quanto descrive il
+comportamento del codice, che altrimenti sarebbe di difficile comprensione
+leggendo solamente il codice. E come dice Alan Cox: “Lock data, not codeâ€.
+
+Problemi comuni
+===============
+
+.. _`it_deadlock`:
+
+Stallo: semplice ed avanzato
+----------------------------
+
+Esiste un tipo di baco dove un pezzo di codice tenta di trattenere uno
+spinlock due volte: questo rimarrà in attesa attiva per sempre aspettando che
+il *lock* venga rilasciato (in Linux spinlocks, rwlocks e mutex non sono
+ricorsivi).
+Questo è facile da diagnosticare: non è uno di quei problemi che ti tengono
+sveglio 5 notti a parlare da solo.
+
+Un caso un pochino più complesso; immaginate d'avere una spazio condiviso
+fra un softirq ed il contesto utente. Se usate :c:func:`spin_lock()` per
+proteggerlo, il contesto utente potrebbe essere interrotto da un softirq
+mentre trattiene il lock, da qui il softirq rimarrà in attesa attiva provando
+ad acquisire il *lock* già trattenuto nel contesto utente.
+
+Questi casi sono chiamati stalli (*deadlock*), e come mostrato qui sopra,
+può succedere anche con un solo processore (Ma non sui sistemi
+monoprocessore perché gli spinlock spariscano quando il kernel è compilato
+con ``CONFIG_SMP``\ =n. Nonostante ciò, nel secondo caso avrete comunque
+una corruzione dei dati).
+
+Questi casi sono facili da diagnosticare; sui sistemi multi-processore
+il supervisione (*watchdog*) o l'opzione di compilazione ``DEBUG_SPINLOCK``
+(``include/linux/spinlock.h``) permettono di scovare immediatamente quando
+succedono.
+
+Esiste un caso più complesso che è conosciuto come l'abbraccio della morte;
+questo coinvolge due o più *lock*. Diciamo che avete un vettore di hash in cui
+ogni elemento è uno spinlock a cui è associata una lista di elementi con lo
+stesso hash. In un gestore di interruzioni software, dovete modificare un
+oggetto e spostarlo su un altro hash; quindi dovrete trattenete lo spinlock
+del vecchio hash e di quello nuovo, quindi rimuovere l'oggetto dal vecchio ed
+inserirlo nel nuovo.
+
+Qui abbiamo due problemi. Primo, se il vostro codice prova a spostare un
+oggetto all'interno della stessa lista, otterrete uno stallo visto che
+tenterà di trattenere lo stesso *lock* due volte. Secondo, se la stessa
+interruzione software su un altro processore sta tentando di spostare
+un altro oggetto nella direzione opposta, potrebbe accadere quanto segue:
+
++---------------------------------+---------------------------------+
+| CPU 1 | CPU 2 |
++=================================+=================================+
+| Trattiene *lock* A -> OK | Trattiene *lock* B -> OK |
++---------------------------------+---------------------------------+
+| Trattiene *lock* B -> attesa | Trattiene *lock* A -> attesa |
++---------------------------------+---------------------------------+
+
+Table: Conseguenze
+
+Entrambe i processori rimarranno in attesa attiva sul *lock* per sempre,
+aspettando che l'altro lo rilasci. Sembra e puzza come un blocco totale.
+
+Prevenire gli stalli
+--------------------
+
+I libri di testo vi diranno che se trattenete i *lock* sempre nello stesso
+ordine non avrete mai un simile stallo. La pratica vi dirà che questo
+approccio non funziona all'ingrandirsi del sistema: quando creo un nuovo
+*lock* non ne capisco abbastanza del kernel per dire in quale dei 5000 *lock*
+si incastrerà.
+
+I *lock* migliori sono quelli incapsulati: non vengono esposti nei file di
+intestazione, e non vengono mai trattenuti fuori dallo stesso file. Potete
+rileggere questo codice e vedere che non ci sarà mai uno stallo perché
+non tenterà mai di trattenere un altro *lock* quando lo ha già.
+Le persone che usano il vostro codice non devono nemmeno sapere che voi
+state usando dei *lock*.
+
+Un classico problema deriva dall'uso di *callback* e di *hook*: se li
+chiamate mentre trattenete un *lock*, rischiate uno stallo o un abbraccio
+della morte (chi lo sa cosa farà una *callback*?).
+
+Ossessiva prevenzione degli stalli
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Gli stalli sono un problema, ma non così terribile come la corruzione dei dati.
+Un pezzo di codice trattiene un *lock* di lettura, cerca in una lista,
+fallisce nel trovare quello che vuole, quindi rilascia il *lock* di lettura,
+trattiene un *lock* di scrittura ed inserisce un oggetto; questo genere di
+codice presenta una corsa critica.
+
+Se non riuscite a capire il perché, per favore state alla larga dal mio
+codice.
+
+corsa fra temporizzatori: un passatempo del kernel
+--------------------------------------------------
+
+I temporizzatori potrebbero avere dei problemi con le corse critiche.
+Considerate una collezione di oggetti (liste, hash, eccetera) dove ogni oggetto
+ha un temporizzatore che sta per distruggerlo.
+
+Se volete eliminare l'intera collezione (diciamo quando rimuovete un modulo),
+potreste fare come segue::
+
+ /* THIS CODE BAD BAD BAD BAD: IF IT WAS ANY WORSE IT WOULD USE
+ HUNGARIAN NOTATION */
+ spin_lock_bh(&list_lock);
+
+ while (list) {
+ struct foo *next = list->next;
+ del_timer(&list->timer);
+ kfree(list);
+ list = next;
+ }
+
+ spin_unlock_bh(&list_lock);
+
+Primo o poi, questo esploderà su un sistema multiprocessore perché un
+temporizzatore potrebbe essere già partiro prima di :c:func:`spin_lock_bh()`,
+e prenderà il *lock* solo dopo :c:func:`spin_unlock_bh()`, e cercherà
+di eliminare il suo oggetto (che però è già stato eliminato).
+
+Questo può essere evitato controllando il valore di ritorno di
+:c:func:`del_timer()`: se ritorna 1, il temporizzatore è stato già
+rimosso. Se 0, significa (in questo caso) che il temporizzatore è in
+esecuzione, quindi possiamo fare come segue::
+
+ retry:
+ spin_lock_bh(&list_lock);
+
+ while (list) {
+ struct foo *next = list->next;
+ if (!del_timer(&list->timer)) {
+ /* Give timer a chance to delete this */
+ spin_unlock_bh(&list_lock);
+ goto retry;
+ }
+ kfree(list);
+ list = next;
+ }
+
+ spin_unlock_bh(&list_lock);
+
+Un altro problema è l'eliminazione dei temporizzatori che si riavviano
+da soli (chiamando :c:func:`add_timer()` alla fine della loro esecuzione).
+Dato che questo è un problema abbastanza comune con una propensione
+alle corse critiche, dovreste usare :c:func:`del_timer_sync()`
+(``include/linux/timer.h``) per gestire questo caso. Questa ritorna il
+numero di volte che il temporizzatore è stato interrotto prima che
+fosse in grado di fermarlo senza che si riavviasse.
+
+Velocità della sincronizzazione
+===============================
+
+Ci sono tre cose importanti da tenere in considerazione quando si valuta
+la velocità d'esecuzione di un pezzo di codice che necessita di
+sincronizzazione. La prima è la concorrenza: quante cose rimangono in attesa
+mentre qualcuno trattiene un *lock*. La seconda è il tempo necessario per
+acquisire (senza contese) e rilasciare un *lock*. La terza è di usare meno
+*lock* o di più furbi. Immagino che i *lock* vengano usati regolarmente,
+altrimenti, non sareste interessati all'efficienza.
+
+La concorrenza dipende da quanto a lungo un *lock* è trattenuto: dovreste
+trattenere un *lock* solo il tempo minimo necessario ma non un istante in più.
+Nella memoria dell'esempio precedente, creiamo gli oggetti senza trattenere
+il *lock*, poi acquisiamo il *lock* quando siamo pronti per inserirlo nella
+lista.
+
+Il tempo di acquisizione di un *lock* dipende da quanto danno fa
+l'operazione sulla *pipeline* (ovvero stalli della *pipeline*) e quant'è
+probabile che il processore corrente sia stato anche l'ultimo ad acquisire
+il *lock* (in pratica, il *lock* è nella memoria cache del processore
+corrente?): su sistemi multi-processore questa probabilità precipita
+rapidamente. Consideriamo un processore Intel Pentium III a 700Mhz: questo
+esegue un'istruzione in 0.7ns, un incremento atomico richiede 58ns, acquisire
+un *lock* che è nella memoria cache del processore richiede 160ns, e un
+trasferimento dalla memoria cache di un altro processore richiede altri
+170/360ns (Leggetevi l'articolo di Paul McKenney's `Linux Journal RCU
+article <http://www.linuxjournal.com/article.php?sid=6993>`__).
+
+Questi due obiettivi sono in conflitto: trattenere un *lock* per il minor
+tempo possibile potrebbe richiedere la divisione in più *lock* per diverse
+parti (come nel nostro ultimo esempio con un *lock* per ogni oggetto),
+ma questo aumenta il numero di acquisizioni di *lock*, ed il risultato
+spesso è che tutto è più lento che con un singolo *lock*. Questo è un altro
+argomento in favore della semplicità quando si parla di sincronizzazione.
+
+Il terzo punto è discusso di seguito: ci sono alcune tecniche per ridurre
+il numero di sincronizzazioni che devono essere fatte.
+
+Read/Write Lock Variants
+------------------------
+
+Sia gli spinlock che i mutex hanno una variante per la lettura/scrittura
+(read/write): ``rwlock_t`` e :c:type:`struct rw_semaphore <rw_semaphore>`.
+Queste dividono gli utenti in due categorie: i lettori e gli scrittori.
+Se state solo leggendo i dati, potete acquisire il *lock* di lettura, ma
+per scrivere avrete bisogno del *lock* di scrittura. Molti possono trattenere
+il *lock* di lettura, ma solo uno scrittore alla volta può trattenere
+quello di scrittura.
+
+Se il vostro codice si divide chiaramente in codice per lettori e codice
+per scrittori (come nel nostro esempio), e il *lock* dei lettori viene
+trattenuto per molto tempo, allora l'uso di questo tipo di *lock* può aiutare.
+Questi sono leggermente più lenti rispetto alla loro versione normale, quindi
+nella pratica l'uso di ``rwlock_t`` non ne vale la pena.
+
+Evitare i *lock*: Read Copy Update
+--------------------------------------------
+
+Esiste un metodo di sincronizzazione per letture e scritture detto
+Read Copy Update. Con l'uso della tecnica RCU, i lettori possono scordarsi
+completamente di trattenere i *lock*; dato che nel nostro esempio ci
+aspettiamo d'avere più lettore che scrittori (altrimenti questa memoria
+sarebbe uno spreco) possiamo dire che questo meccanismo permette
+un'ottimizzazione.
+
+Come facciamo a sbarazzarci dei *lock* di lettura? Sbarazzarsi dei *lock* di
+lettura significa che uno scrittore potrebbe cambiare la lista sotto al naso
+dei lettori. Questo è abbastanza semplice: possiamo leggere una lista
+concatenata se lo scrittore aggiunge elementi alla fine e con certe
+precauzioni. Per esempio, aggiungendo ``new`` ad una lista concatenata
+chiamata ``list``::
+
+ new->next = list->next;
+ wmb();
+ list->next = new;
+
+La funzione :c:func:`wmb()` è una barriera di sincronizzazione delle
+scritture. Questa garantisce che la prima operazione (impostare l'elemento
+``next`` del nuovo elemento) venga completata e vista da tutti i processori
+prima che venga eseguita la seconda operazione (che sarebbe quella di mettere
+il nuovo elemento nella lista). Questo è importante perché i moderni
+compilatori ed i moderni processori possono, entrambe, riordinare le istruzioni
+se non vengono istruiti altrimenti: vogliamo che i lettori non vedano
+completamente il nuovo elemento; oppure che lo vedano correttamente e quindi
+il puntatore ``next`` deve puntare al resto della lista.
+
+Fortunatamente, c'è una funzione che fa questa operazione sulle liste
+:c:type:`struct list_head <list_head>`: :c:func:`list_add_rcu()`
+(``include/linux/list.h``).
+
+Rimuovere un elemento dalla lista è anche più facile: sostituiamo il puntatore
+al vecchio elemento con quello del suo successore, e i lettori vedranno
+l'elemento o lo salteranno.
+
+::
+
+ list->next = old->next;
+
+La funzione :c:func:`list_del_rcu()` (``include/linux/list.h``) fa esattamente
+questo (la versione normale corrompe il vecchio oggetto, e non vogliamo che
+accada).
+
+Anche i lettori devono stare attenti: alcuni processori potrebbero leggere
+attraverso il puntatore ``next`` il contenuto dell'elemento successivo
+troppo presto, ma non accorgersi che il contenuto caricato è sbagliato quando
+il puntatore ``next`` viene modificato alla loro spalle. Ancora una volta
+c'è una funzione che viene in vostro aiuto :c:func:`list_for_each_entry_rcu()`
+(``include/linux/list.h``). Ovviamente, gli scrittori possono usare
+:c:func:`list_for_each_entry()` dato che non ci possono essere due scrittori
+in contemporanea.
+
+Il nostro ultimo dilemma è il seguente: quando possiamo realmente distruggere
+l'elemento rimosso? Ricordate, un lettore potrebbe aver avuto accesso a questo
+elemento proprio ora: se eliminiamo questo elemento ed il puntatore ``next``
+cambia, il lettore salterà direttamente nella spazzatura e scoppierà. Dobbiamo
+aspettare finché tutti i lettori che stanno attraversando la lista abbiano
+finito. Utilizziamo :c:func:`call_rcu()` per registrare una funzione di
+richiamo che distrugga l'oggetto quando tutti i lettori correnti hanno
+terminato. In alternative, potrebbe essere usata la funzione
+:c:func:`synchronize_rcu()` che blocca l'esecuzione finché tutti i lettori
+non terminano di ispezionare la lista.
+
+Ma come fa l'RCU a sapere quando i lettori sono finiti? Il meccanismo è
+il seguente: innanzi tutto i lettori accedono alla lista solo fra la coppia
+:c:func:`rcu_read_lock()`/:c:func:`rcu_read_unlock()` che disabilita la
+prelazione così che i lettori non vengano sospesi mentre stanno leggendo
+la lista.
+
+Poi, l'RCU aspetta finché tutti i processori non abbiano dormito almeno
+una volta; a questo punto, dato che i lettori non possono dormire, possiamo
+dedurre che un qualsiasi lettore che abbia consultato la lista durante la
+rimozione abbia già terminato, quindi la *callback* viene eseguita. Il vero
+codice RCU è un po' più ottimizzato di così, ma questa è l'idea di fondo.
+
+::
+
+ --- cache.c.perobjectlock 2003-12-11 17:15:03.000000000 +1100
+ +++ cache.c.rcupdate 2003-12-11 17:55:14.000000000 +1100
+ @@ -1,15 +1,18 @@
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ +#include <linux/rcupdate.h>
+ #include <linux/mutex.h>
+ #include <asm/errno.h>
+
+ struct object
+ {
+ - /* These two protected by cache_lock. */
+ + /* This is protected by RCU */
+ struct list_head list;
+ int popularity;
+
+ + struct rcu_head rcu;
+ +
+ atomic_t refcnt;
+
+ /* Doesn't change once created. */
+ @@ -40,7 +43,7 @@
+ {
+ struct object *i;
+
+ - list_for_each_entry(i, &cache, list) {
+ + list_for_each_entry_rcu(i, &cache, list) {
+ if (i->id == id) {
+ i->popularity++;
+ return i;
+ @@ -49,19 +52,25 @@
+ return NULL;
+ }
+
+ +/* Final discard done once we know no readers are looking. */
+ +static void cache_delete_rcu(void *arg)
+ +{
+ + object_put(arg);
+ +}
+ +
+ /* Must be holding cache_lock */
+ static void __cache_delete(struct object *obj)
+ {
+ BUG_ON(!obj);
+ - list_del(&obj->list);
+ - object_put(obj);
+ + list_del_rcu(&obj->list);
+ cache_num--;
+ + call_rcu(&obj->rcu, cache_delete_rcu);
+ }
+
+ /* Must be holding cache_lock */
+ static void __cache_add(struct object *obj)
+ {
+ - list_add(&obj->list, &cache);
+ + list_add_rcu(&obj->list, &cache);
+ if (++cache_num > MAX_CACHE_SIZE) {
+ struct object *i, *outcast = NULL;
+ list_for_each_entry(i, &cache, list) {
+ @@ -104,12 +114,11 @@
+ struct object *cache_find(int id)
+ {
+ struct object *obj;
+ - unsigned long flags;
+
+ - spin_lock_irqsave(&cache_lock, flags);
+ + rcu_read_lock();
+ obj = __cache_find(id);
+ if (obj)
+ object_get(obj);
+ - spin_unlock_irqrestore(&cache_lock, flags);
+ + rcu_read_unlock();
+ return obj;
+ }
+
+Da notare che i lettori modificano il campo popularity nella funzione
+:c:func:`__cache_find()`, e ora non trattiene alcun *lock*. Una soluzione
+potrebbe essere quella di rendere la variabile ``atomic_t``, ma per l'uso
+che ne abbiamo fatto qui, non ci interessano queste corse critiche perché un
+risultato approssimativo è comunque accettabile, quindi non l'ho cambiato.
+
+Il risultato è che la funzione :c:func:`cache_find()` non ha bisogno di alcuna
+sincronizzazione con le altre funzioni, quindi è veloce su un sistema
+multi-processore tanto quanto lo sarebbe su un sistema mono-processore.
+
+Esiste un'ulteriore ottimizzazione possibile: vi ricordate il codice originale
+della nostra memoria dove non c'erano contatori di riferimenti e il chiamante
+semplicemente tratteneva il *lock* prima di accedere ad un oggetto? Questo è
+ancora possibile: se trattenete un *lock* nessuno potrà cancellare l'oggetto,
+quindi non avete bisogno di incrementare e decrementare il contatore di
+riferimenti.
+
+Ora, dato che il '*lock* di lettura' di un RCU non fa altro che disabilitare
+la prelazione, un chiamante che ha sempre la prelazione disabilitata fra le
+chiamate :c:func:`cache_find()` e :c:func:`object_put()` non necessita
+di incrementare e decrementare il contatore di riferimenti. Potremmo
+esporre la funzione :c:func:`__cache_find()` dichiarandola non-static,
+e quel chiamante potrebbe usare direttamente questa funzione.
+
+Il beneficio qui sta nel fatto che il contatore di riferimenti no
+viene scritto: l'oggetto non viene alterato in alcun modo e quindi diventa
+molto più veloce su sistemi molti-processore grazie alla loro memoria cache.
+
+.. _`it_per-cpu`:
+
+Dati per processore
+-------------------
+
+Un'altra tecnica comunemente usata per evitare la sincronizzazione è quella
+di duplicare le informazioni per ogni processore. Per esempio, se volete
+avere un contatore di qualcosa, potreste utilizzare uno spinlock ed un
+singolo contatore. Facile e pulito.
+
+Se questo dovesse essere troppo lento (solitamente non lo è, ma se avete
+dimostrato che lo è devvero), potreste usare un contatore per ogni processore
+e quindi non sarebbe più necessaria la mutua esclusione. Vedere
+:c:func:`DEFINE_PER_CPU()`, :c:func:`get_cpu_var()` e :c:func:`put_cpu_var()`
+(``include/linux/percpu.h``).
+
+Il tipo di dato ``local_t``, la funzione :c:func:`cpu_local_inc()` e tutte
+le altre funzioni associate, sono di particolare utilità per semplici contatori
+per-processore; su alcune architetture sono anche più efficienti
+(``include/asm/local.h``).
+
+Da notare che non esiste un modo facile ed affidabile per ottenere il valore
+di un simile contatore senza introdurre altri *lock*. In alcuni casi questo
+non è un problema.
+
+Dati che sono usati prevalentemente dai gestori d'interruzioni
+--------------------------------------------------------------
+
+Se i dati vengono utilizzati sempre dallo stesso gestore d'interruzioni,
+allora i *lock* non vi servono per niente: il kernel già vi garantisce che
+il gestore d'interruzione non verrà eseguito in contemporanea su diversi
+processori.
+
+Manfred Spraul fa notare che potreste comunque comportarvi così anche
+se i dati vengono occasionalmente utilizzati da un contesto utente o
+da un'interruzione software. Il gestore d'interruzione non utilizza alcun
+*lock*, e tutti gli altri accessi verranno fatti così::
+
+ spin_lock(&lock);
+ disable_irq(irq);
+ ...
+ enable_irq(irq);
+ spin_unlock(&lock);
+
+La funzione :c:func:`disable_irq()` impedisce al gestore d'interruzioni
+d'essere eseguito (e aspetta che finisca nel caso fosse in esecuzione su
+un altro processore). Lo spinlock, invece, previene accessi simultanei.
+Naturalmente, questo è più lento della semplice chiamata
+:c:func:`spin_lock_irq()`, quindi ha senso solo se questo genere di accesso
+è estremamente raro.
+
+.. _`it_sleeping-things`:
+
+Quali funzioni possono essere chiamate in modo sicuro dalle interruzioni?
+=========================================================================
+
+Molte funzioni del kernel dormono (in sostanza, chiamano ``schedule()``)
+direttamente od indirettamente: non potete chiamarle se trattenere uno
+spinlock o avete la prelazione disabilitata, mai. Questo significa che
+dovete necessariamente essere nel contesto utente: chiamarle da un
+contesto d'interruzione è illegale.
+
+Alcune funzioni che dormono
+---------------------------
+
+Le più comuni sono elencate qui di seguito, ma solitamente dovete leggere
+il codice per scoprire se altre chiamate sono sicure. Se chiunque altro
+le chiami dorme, allora dovreste poter dormire anche voi. In particolar
+modo, le funzioni di registrazione e deregistrazione solitamente si
+aspettano d'essere chiamante da un contesto utente e quindi che possono
+dormire.
+
+- Accessi allo spazio utente:
+
+ - :c:func:`copy_from_user()`
+
+ - :c:func:`copy_to_user()`
+
+ - :c:func:`get_user()`
+
+ - :c:func:`put_user()`
+
+- :c:func:`kmalloc(GFP_KERNEL) <kmalloc>`
+
+- :c:func:`mutex_lock_interruptible()` and
+ :c:func:`mutex_lock()`
+
+ C'è anche :c:func:`mutex_trylock()` che però non dorme.
+ Comunque, non deve essere usata in un contesto d'interruzione dato
+ che la sua implementazione non è sicura in quel contesto.
+ Anche :c:func:`mutex_unlock()` non dorme mai. Non può comunque essere
+ usata in un contesto d'interruzione perché un mutex deve essere rilasciato
+ dallo stesso processo che l'ha acquisito.
+
+Alcune funzioni che non dormono
+-------------------------------
+
+Alcune funzioni possono essere chiamate tranquillamente da qualsiasi
+contesto, o trattenendo un qualsiasi *lock*.
+
+- :c:func:`printk()`
+
+- :c:func:`kfree()`
+
+- :c:func:`add_timer()` e :c:func:`del_timer()`
+
+Riferimento per l'API dei Mutex
+===============================
+
+.. kernel-doc:: include/linux/mutex.h
+ :internal:
+
+.. kernel-doc:: kernel/locking/mutex.c
+ :export:
+
+Riferimento per l'API dei Futex
+===============================
+
+.. kernel-doc:: kernel/futex.c
+ :internal:
+
+Approfondimenti
+===============
+
+- ``Documentation/locking/spinlocks.txt``: la guida di Linus Torvalds agli
+ spinlock del kernel.
+
+- Unix Systems for Modern Architectures: Symmetric Multiprocessing and
+ Caching for Kernel Programmers.
+
+ L'introduzione alla sincronizzazione a livello di kernel di Curt Schimmel
+ è davvero ottima (non è scritta per Linux, ma approssimativamente si adatta
+ a tutte le situazioni). Il libro è costoso, ma vale ogni singolo spicciolo
+ per capire la sincronizzazione nei sistemi multi-processore.
+ [ISBN: 0201633388]
+
+Ringraziamenti
+==============
+
+Grazie a Telsa Gwynne per aver formattato questa guida in DocBook, averla
+pulita e aggiunto un po' di stile.
+
+Grazie a Martin Pool, Philipp Rumpf, Stephen Rothwell, Paul Mackerras,
+Ruedi Aschwanden, Alan Cox, Manfred Spraul, Tim Waugh, Pete Zaitcev,
+James Morris, Robert Love, Paul McKenney, John Ashby per aver revisionato,
+corretto, maledetto e commentato.
+
+Grazie alla congrega per non aver avuto alcuna influenza su questo documento.
+
+Glossario
+=========
+
+prelazione
+ Prima del kernel 2.5, o quando ``CONFIG_PREEMPT`` non è impostato, i processi
+ in contesto utente non si avvicendano nell'esecuzione (in pratica, il
+ processo userà il processore fino al proprio termine, a meno che non ci siano
+ delle interruzioni). Con l'aggiunta di ``CONFIG_PREEMPT`` nella versione
+ 2.5.4 questo è cambiato: quando si è in contesto utente, processi con una
+ priorità maggiore possono subentrare nell'esecuzione: gli spinlock furono
+ cambiati per disabilitare la prelazioni, anche su sistemi monoprocessore.
+
+bh
+ Bottom Half: per ragioni storiche, le funzioni che contengono '_bh' nel
+ loro nome ora si riferiscono a qualsiasi interruzione software; per esempio,
+ :c:func:`spin_lock_bh()` blocca qualsiasi interuzione software sul processore
+ corrente. I *Bottom Halves* sono deprecati, e probabilmente verranno
+ sostituiti dai tasklet. In un dato momento potrà esserci solo un
+ *bottom half* in esecuzione.
+
+contesto d'interruzione
+ Non è il contesto utente: qui si processano le interruzioni hardware e
+ software. La macro :c:func:`in_interrupt()` ritorna vero.
+
+contesto utente
+ Il kernel che esegue qualcosa per conto di un particolare processo (per
+ esempio una chiamata di sistema) o di un thread del kernel. Potete
+ identificare il processo con la macro ``current``. Da non confondere
+ con lo spazio utente. Può essere interrotto sia da interruzioni software
+ che hardware.
+
+interruzione hardware
+ Richiesta di interruzione hardware. :c:func:`in_irq()` ritorna vero in un
+ gestore d'interruzioni hardware.
+
+interruzione software / softirq
+ Gestore di interruzioni software: :c:func:`in_irq()` ritorna falso;
+ :c:func:`in_softirq()` ritorna vero. I tasklet e le softirq sono entrambi
+ considerati 'interruzioni software'.
+
+ In soldoni, un softirq è uno delle 32 interruzioni software che possono
+ essere eseguite su più processori in contemporanea. A volte si usa per
+ riferirsi anche ai tasklet (in pratica tutte le interruzioni software).
+
+monoprocessore / UP
+ (Uni-Processor) un solo processore, ovvero non è SMP. (``CONFIG_SMP=n``).
+
+multi-processore / SMP
+ (Symmetric Multi-Processor) kernel compilati per sistemi multi-processore
+ (``CONFIG_SMP=y``).
+
+spazio utente
+ Un processo che esegue il proprio codice fuori dal kernel.
+
+tasklet
+ Un'interruzione software registrabile dinamicamente che ha la garanzia
+ d'essere eseguita solo su un processore alla volta.
+
+timer
+ Un'interruzione software registrabile dinamicamente che viene eseguita
+ (circa) in un determinato momento. Quando è in esecuzione è come un tasklet
+ (infatti, sono chiamati da ``TIMER_SOFTIRQ``).
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index 921739d00f69..7f01fb1c1084 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -1891,22 +1891,22 @@ Mandatory ë°°ë¦¬ì–´ë“¤ì€ SMP 시스템ì—ì„œë„ UP 시스템ì—ì„œë„ SMP 효ê³
/* ì†Œìœ ê¶Œì„ ìˆ˜ì • */
desc->status = DEVICE_OWN;
- /* MMIO 를 통해 디바ì´ìŠ¤ì— 공지를 하기 ì „ì— ë©”ëª¨ë¦¬ë¥¼ ë™ê¸°í™” */
- wmb();
-
/* ì—…ë°ì´íŠ¸ëœ 디스í¬ë¦½í„°ì˜ 디바ì´ìŠ¤ì— 공지 */
writel(DESC_NOTIFY, doorbell);
}
dma_rmb() 는 디스í¬ë¦½í„°ë¡œë¶€í„° ë°ì´í„°ë¥¼ ì½ì–´ì˜¤ê¸° ì „ì— ë””ë°”ì´ìŠ¤ê°€ 소유권ì„
- 내놓았ìŒì„ 보장하게 하고, dma_wmb() 는 디바ì´ìŠ¤ê°€ ìžì‹ ì´ ì†Œìœ ê¶Œì„ ë‹¤ì‹œ
- 가졌ìŒì„ 보기 ì „ì— ë””ìŠ¤í¬ë¦½í„°ì— ë°ì´í„°ê°€ 쓰였ìŒì„ 보장합니다. wmb() 는
- ìºì‹œ ì¼ê´€ì„±ì´ 없는 (cache incoherent) MMIO ì˜ì—­ì— 쓰기를 ì‹œë„하기 ì „ì—
- ìºì‹œ ì¼ê´€ì„±ì´ 있는 메모리 (cache coherent memory) 쓰기가 완료ë˜ì—ˆìŒì„
- 보장해주기 위해 필요합니다.
-
- consistent memory ì— ëŒ€í•œ ìžì„¸í•œ ë‚´ìš©ì„ ìœ„í•´ì„  Documentation/DMA-API.txt
- 문서를 참고하세요.
+ ë‚´ë ¤ë†“ì•˜ì„ ê²ƒì„ ë³´ìž¥í•˜ê³ , dma_wmb() 는 디바ì´ìŠ¤ê°€ ìžì‹ ì´ ì†Œìœ ê¶Œì„ ë‹¤ì‹œ
+ 가졌ìŒì„ 보기 ì „ì— ë””ìŠ¤í¬ë¦½í„°ì— ë°ì´í„°ê°€ ì“°ì˜€ì„ ê²ƒì„ ë³´ìž¥í•©ë‹ˆë‹¤. 참고로,
+ writel() ì„ ì‚¬ìš©í•˜ë©´ ìºì‹œ ì¼ê´€ì„±ì´ 있는 메모리 (cache coherent memory)
+ 쓰기가 MMIO ì˜ì—­ì—ì˜ ì“°ê¸° ì „ì— ì™„ë£Œë˜ì—ˆì„ ê²ƒì„ ë³´ìž¥í•˜ë¯€ë¡œ writel() ì•žì—
+ wmb() 를 실행할 필요가 ì—†ìŒì„ 알아ë‘시기 ë°”ëžë‹ˆë‹¤. writel() 보다 비용ì´
+ 저렴한 writel_relaxed() 는 ì´ëŸ° ë³´ìž¥ì„ ì œê³µí•˜ì§€ 않으므로 여기선 사용ë˜ì§€
+ 않아야 합니다.
+
+ writel_relaxed() 와 ê°™ì€ ì™„í™”ëœ I/O ì ‘ê·¼ìžë“¤ì— 대한 ìžì„¸í•œ ë‚´ìš©ì„ ìœ„í•´ì„œëŠ”
+ "ì»¤ë„ I/O ë°°ë¦¬ì–´ì˜ íš¨ê³¼" 섹션ì„, consistent memory ì— ëŒ€í•œ ìžì„¸í•œ ë‚´ìš©ì„
+ 위해선 Documentation/DMA-API.txt 문서를 참고하세요.
MMIO 쓰기 배리어
diff --git a/Documentation/translations/zh_CN/oops-tracing.txt b/Documentation/translations/zh_CN/oops-tracing.txt
index 41ab53cc0e83..a893f04dfd5d 100644
--- a/Documentation/translations/zh_CN/oops-tracing.txt
+++ b/Documentation/translations/zh_CN/oops-tracing.txt
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/admin-guide/oops-tracing.rst
+Chinese translated version of Documentation/admin-guide/bug-hunting.rst
If you have any comment or update to the content, please contact the
original document maintainer directly. However, if you have a problem
@@ -8,7 +8,7 @@ or if there is a problem with the translation.
Chinese maintainer: Dave Young <hidave.darkstar@gmail.com>
---------------------------------------------------------------------
-Documentation/admin-guide/oops-tracing.rst 的中文翻译
+Documentation/admin-guide/bug-hunting.rst 的中文翻译
如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index d10944e619d3..cb8db4f9d097 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4391,6 +4391,22 @@ all such vmexits.
Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
+7.14 KVM_CAP_S390_HPAGE_1M
+
+Architectures: s390
+Parameters: none
+Returns: 0 on success, -EINVAL if hpage module parameter was not set
+ or cmma is enabled
+
+With this capability the KVM support for memory backing with 1m pages
+through hugetlbfs can be enabled for a VM. After the capability is
+enabled, cmma can't be enabled anymore and pfmfi and the storage key
+interpretation are disabled. If cmma has already been enabled or the
+hpage module parameter is not set to 1, -EINVAL is returned.
+
+While it is generally possible to create a huge page backed VM without
+this capability, the VM will not be able to run.
+
8. Other capabilities.
----------------------
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index a16aa2113840..f662d3c530e5 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -29,7 +29,11 @@ mount options are:
L2 and L3 CDP are controlled seperately.
RDT features are orthogonal. A particular system may support only
-monitoring, only control, or both monitoring and control.
+monitoring, only control, or both monitoring and control. Cache
+pseudo-locking is a unique way of using cache control to "pin" or
+"lock" data in the cache. Details can be found in
+"Cache Pseudo-Locking".
+
The mount succeeds if either of allocation or monitoring is present, but
only those files and directories supported by the system will be created.
@@ -65,6 +69,29 @@ related to allocation:
some platforms support devices that have their
own settings for cache use which can over-ride
these bits.
+"bit_usage": Annotated capacity bitmasks showing how all
+ instances of the resource are used. The legend is:
+ "0" - Corresponding region is unused. When the system's
+ resources have been allocated and a "0" is found
+ in "bit_usage" it is a sign that resources are
+ wasted.
+ "H" - Corresponding region is used by hardware only
+ but available for software use. If a resource
+ has bits set in "shareable_bits" but not all
+ of these bits appear in the resource groups'
+ schematas then the bits appearing in
+ "shareable_bits" but no resource group will
+ be marked as "H".
+ "X" - Corresponding region is available for sharing and
+ used by hardware and software. These are the
+ bits that appear in "shareable_bits" as
+ well as a resource group's allocation.
+ "S" - Corresponding region is used by software
+ and available for sharing.
+ "E" - Corresponding region is used exclusively by
+ one resource group. No sharing allowed.
+ "P" - Corresponding region is pseudo-locked. No
+ sharing allowed.
Memory bandwitdh(MB) subdirectory contains the following files
with respect to allocation:
@@ -151,6 +178,9 @@ All groups contain the following files:
CPUs to/from this group. As with the tasks file a hierarchy is
maintained where MON groups may only include CPUs owned by the
parent CTRL_MON group.
+ When the resouce group is in pseudo-locked mode this file will
+ only be readable, reflecting the CPUs associated with the
+ pseudo-locked region.
"cpus_list":
@@ -163,6 +193,21 @@ When control is enabled all CTRL_MON groups will also contain:
A list of all the resources available to this group.
Each resource has its own line and format - see below for details.
+"size":
+ Mirrors the display of the "schemata" file to display the size in
+ bytes of each allocation instead of the bits representing the
+ allocation.
+
+"mode":
+ The "mode" of the resource group dictates the sharing of its
+ allocations. A "shareable" resource group allows sharing of its
+ allocations while an "exclusive" resource group does not. A
+ cache pseudo-locked region is created by first writing
+ "pseudo-locksetup" to the "mode" file before writing the cache
+ pseudo-locked region's schemata to the resource group's "schemata"
+ file. On successful pseudo-locked region creation the mode will
+ automatically change to "pseudo-locked".
+
When monitoring is enabled all MON groups will also contain:
"mon_data":
@@ -379,6 +424,170 @@ L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
L3DATA:0=fffff;1=fffff;2=3c0;3=fffff
L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
+Cache Pseudo-Locking
+--------------------
+CAT enables a user to specify the amount of cache space that an
+application can fill. Cache pseudo-locking builds on the fact that a
+CPU can still read and write data pre-allocated outside its current
+allocated area on a cache hit. With cache pseudo-locking, data can be
+preloaded into a reserved portion of cache that no application can
+fill, and from that point on will only serve cache hits. The cache
+pseudo-locked memory is made accessible to user space where an
+application can map it into its virtual address space and thus have
+a region of memory with reduced average read latency.
+
+The creation of a cache pseudo-locked region is triggered by a request
+from the user to do so that is accompanied by a schemata of the region
+to be pseudo-locked. The cache pseudo-locked region is created as follows:
+- Create a CAT allocation CLOSNEW with a CBM matching the schemata
+ from the user of the cache region that will contain the pseudo-locked
+ memory. This region must not overlap with any current CAT allocation/CLOS
+ on the system and no future overlap with this cache region is allowed
+ while the pseudo-locked region exists.
+- Create a contiguous region of memory of the same size as the cache
+ region.
+- Flush the cache, disable hardware prefetchers, disable preemption.
+- Make CLOSNEW the active CLOS and touch the allocated memory to load
+ it into the cache.
+- Set the previous CLOS as active.
+- At this point the closid CLOSNEW can be released - the cache
+ pseudo-locked region is protected as long as its CBM does not appear in
+ any CAT allocation. Even though the cache pseudo-locked region will from
+ this point on not appear in any CBM of any CLOS an application running with
+ any CLOS will be able to access the memory in the pseudo-locked region since
+ the region continues to serve cache hits.
+- The contiguous region of memory loaded into the cache is exposed to
+ user-space as a character device.
+
+Cache pseudo-locking increases the probability that data will remain
+in the cache via carefully configuring the CAT feature and controlling
+application behavior. There is no guarantee that data is placed in
+cache. Instructions like INVD, WBINVD, CLFLUSH, etc. can still evict
+“locked†data from cache. Power management C-states may shrink or
+power off cache. Deeper C-states will automatically be restricted on
+pseudo-locked region creation.
+
+It is required that an application using a pseudo-locked region runs
+with affinity to the cores (or a subset of the cores) associated
+with the cache on which the pseudo-locked region resides. A sanity check
+within the code will not allow an application to map pseudo-locked memory
+unless it runs with affinity to cores associated with the cache on which the
+pseudo-locked region resides. The sanity check is only done during the
+initial mmap() handling, there is no enforcement afterwards and the
+application self needs to ensure it remains affine to the correct cores.
+
+Pseudo-locking is accomplished in two stages:
+1) During the first stage the system administrator allocates a portion
+ of cache that should be dedicated to pseudo-locking. At this time an
+ equivalent portion of memory is allocated, loaded into allocated
+ cache portion, and exposed as a character device.
+2) During the second stage a user-space application maps (mmap()) the
+ pseudo-locked memory into its address space.
+
+Cache Pseudo-Locking Interface
+------------------------------
+A pseudo-locked region is created using the resctrl interface as follows:
+
+1) Create a new resource group by creating a new directory in /sys/fs/resctrl.
+2) Change the new resource group's mode to "pseudo-locksetup" by writing
+ "pseudo-locksetup" to the "mode" file.
+3) Write the schemata of the pseudo-locked region to the "schemata" file. All
+ bits within the schemata should be "unused" according to the "bit_usage"
+ file.
+
+On successful pseudo-locked region creation the "mode" file will contain
+"pseudo-locked" and a new character device with the same name as the resource
+group will exist in /dev/pseudo_lock. This character device can be mmap()'ed
+by user space in order to obtain access to the pseudo-locked memory region.
+
+An example of cache pseudo-locked region creation and usage can be found below.
+
+Cache Pseudo-Locking Debugging Interface
+---------------------------------------
+The pseudo-locking debugging interface is enabled by default (if
+CONFIG_DEBUG_FS is enabled) and can be found in /sys/kernel/debug/resctrl.
+
+There is no explicit way for the kernel to test if a provided memory
+location is present in the cache. The pseudo-locking debugging interface uses
+the tracing infrastructure to provide two ways to measure cache residency of
+the pseudo-locked region:
+1) Memory access latency using the pseudo_lock_mem_latency tracepoint. Data
+ from these measurements are best visualized using a hist trigger (see
+ example below). In this test the pseudo-locked region is traversed at
+ a stride of 32 bytes while hardware prefetchers and preemption
+ are disabled. This also provides a substitute visualization of cache
+ hits and misses.
+2) Cache hit and miss measurements using model specific precision counters if
+ available. Depending on the levels of cache on the system the pseudo_lock_l2
+ and pseudo_lock_l3 tracepoints are available.
+ WARNING: triggering this measurement uses from two (for just L2
+ measurements) to four (for L2 and L3 measurements) precision counters on
+ the system, if any other measurements are in progress the counters and
+ their corresponding event registers will be clobbered.
+
+When a pseudo-locked region is created a new debugfs directory is created for
+it in debugfs as /sys/kernel/debug/resctrl/<newdir>. A single
+write-only file, pseudo_lock_measure, is present in this directory. The
+measurement on the pseudo-locked region depends on the number, 1 or 2,
+written to this debugfs file. Since the measurements are recorded with the
+tracing infrastructure the relevant tracepoints need to be enabled before the
+measurement is triggered.
+
+Example of latency debugging interface:
+In this example a pseudo-locked region named "newlock" was created. Here is
+how we can measure the latency in cycles of reading from this region and
+visualize this data with a histogram that is available if CONFIG_HIST_TRIGGERS
+is set:
+# :> /sys/kernel/debug/tracing/trace
+# echo 'hist:keys=latency' > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/trigger
+# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+# echo 1 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+# cat /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/hist
+
+# event histogram
+#
+# trigger info: hist:keys=latency:vals=hitcount:sort=hitcount:size=2048 [active]
+#
+
+{ latency: 456 } hitcount: 1
+{ latency: 50 } hitcount: 83
+{ latency: 36 } hitcount: 96
+{ latency: 44 } hitcount: 174
+{ latency: 48 } hitcount: 195
+{ latency: 46 } hitcount: 262
+{ latency: 42 } hitcount: 693
+{ latency: 40 } hitcount: 3204
+{ latency: 38 } hitcount: 3484
+
+Totals:
+ Hits: 8192
+ Entries: 9
+ Dropped: 0
+
+Example of cache hits/misses debugging:
+In this example a pseudo-locked region named "newlock" was created on the L2
+cache of a platform. Here is how we can obtain details of the cache hits
+and misses using the platform's precision counters.
+
+# :> /sys/kernel/debug/tracing/trace
+# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+# echo 2 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+# cat /sys/kernel/debug/tracing/trace
+
+# tracer: nop
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ pseudo_lock_mea-1672 [002] .... 3132.860500: pseudo_lock_l2: hits=4097 miss=0
+
+
Examples for RDT allocation usage:
Example 1
@@ -502,7 +711,172 @@ siblings and only the real time threads are scheduled on the cores 4-7.
# echo F0 > p0/cpus
-4) Locking between applications
+Example 4
+---------
+
+The resource groups in previous examples were all in the default "shareable"
+mode allowing sharing of their cache allocations. If one resource group
+configures a cache allocation then nothing prevents another resource group
+to overlap with that allocation.
+
+In this example a new exclusive resource group will be created on a L2 CAT
+system with two L2 cache instances that can be configured with an 8-bit
+capacity bitmask. The new exclusive resource group will be configured to use
+25% of each cache instance.
+
+# mount -t resctrl resctrl /sys/fs/resctrl/
+# cd /sys/fs/resctrl
+
+First, we observe that the default group is configured to allocate to all L2
+cache:
+
+# cat schemata
+L2:0=ff;1=ff
+
+We could attempt to create the new resource group at this point, but it will
+fail because of the overlap with the schemata of the default group:
+# mkdir p0
+# echo 'L2:0=0x3;1=0x3' > p0/schemata
+# cat p0/mode
+shareable
+# echo exclusive > p0/mode
+-sh: echo: write error: Invalid argument
+# cat info/last_cmd_status
+schemata overlaps
+
+To ensure that there is no overlap with another resource group the default
+resource group's schemata has to change, making it possible for the new
+resource group to become exclusive.
+# echo 'L2:0=0xfc;1=0xfc' > schemata
+# echo exclusive > p0/mode
+# grep . p0/*
+p0/cpus:0
+p0/mode:exclusive
+p0/schemata:L2:0=03;1=03
+p0/size:L2:0=262144;1=262144
+
+A new resource group will on creation not overlap with an exclusive resource
+group:
+# mkdir p1
+# grep . p1/*
+p1/cpus:0
+p1/mode:shareable
+p1/schemata:L2:0=fc;1=fc
+p1/size:L2:0=786432;1=786432
+
+The bit_usage will reflect how the cache is used:
+# cat info/L2/bit_usage
+0=SSSSSSEE;1=SSSSSSEE
+
+A resource group cannot be forced to overlap with an exclusive resource group:
+# echo 'L2:0=0x1;1=0x1' > p1/schemata
+-sh: echo: write error: Invalid argument
+# cat info/last_cmd_status
+overlaps with exclusive group
+
+Example of Cache Pseudo-Locking
+-------------------------------
+Lock portion of L2 cache from cache id 1 using CBM 0x3. Pseudo-locked
+region is exposed at /dev/pseudo_lock/newlock that can be provided to
+application for argument to mmap().
+
+# mount -t resctrl resctrl /sys/fs/resctrl/
+# cd /sys/fs/resctrl
+
+Ensure that there are bits available that can be pseudo-locked, since only
+unused bits can be pseudo-locked the bits to be pseudo-locked needs to be
+removed from the default resource group's schemata:
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSSSS
+# echo 'L2:1=0xfc' > schemata
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSS00
+
+Create a new resource group that will be associated with the pseudo-locked
+region, indicate that it will be used for a pseudo-locked region, and
+configure the requested pseudo-locked region capacity bitmask:
+
+# mkdir newlock
+# echo pseudo-locksetup > newlock/mode
+# echo 'L2:1=0x3' > newlock/schemata
+
+On success the resource group's mode will change to pseudo-locked, the
+bit_usage will reflect the pseudo-locked region, and the character device
+exposing the pseudo-locked region will exist:
+
+# cat newlock/mode
+pseudo-locked
+# cat info/L2/bit_usage
+0=SSSSSSSS;1=SSSSSSPP
+# ls -l /dev/pseudo_lock/newlock
+crw------- 1 root root 243, 0 Apr 3 05:01 /dev/pseudo_lock/newlock
+
+/*
+ * Example code to access one page of pseudo-locked cache region
+ * from user space.
+ */
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+/*
+ * It is required that the application runs with affinity to only
+ * cores associated with the pseudo-locked region. Here the cpu
+ * is hardcoded for convenience of example.
+ */
+static int cpuid = 2;
+
+int main(int argc, char *argv[])
+{
+ cpu_set_t cpuset;
+ long page_size;
+ void *mapping;
+ int dev_fd;
+ int ret;
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpuid, &cpuset);
+ ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
+ if (ret < 0) {
+ perror("sched_setaffinity");
+ exit(EXIT_FAILURE);
+ }
+
+ dev_fd = open("/dev/pseudo_lock/newlock", O_RDWR);
+ if (dev_fd < 0) {
+ perror("open");
+ exit(EXIT_FAILURE);
+ }
+
+ mapping = mmap(0, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ dev_fd, 0);
+ if (mapping == MAP_FAILED) {
+ perror("mmap");
+ close(dev_fd);
+ exit(EXIT_FAILURE);
+ }
+
+ /* Application interacts with pseudo-locked memory @mapping */
+
+ ret = munmap(mapping, page_size);
+ if (ret < 0) {
+ perror("munmap");
+ close(dev_fd);
+ exit(EXIT_FAILURE);
+ }
+
+ close(dev_fd);
+ exit(EXIT_SUCCESS);
+}
+
+Locking between applications
+----------------------------
Certain operations on the resctrl filesystem, composed of read/writes
to/from multiple files, must be atomic.
@@ -510,7 +884,7 @@ to/from multiple files, must be atomic.
As an example, the allocation of an exclusive reservation of L3 cache
involves:
- 1. Read the cbmmasks from each directory
+ 1. Read the cbmmasks from each directory or the per-resource "bit_usage"
2. Find a contiguous set of bits in the global CBM bitmask that is clear
in any of the directory cbmmasks
3. Create a new directory
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index 8d109ef67ab6..ad6d2a80cf05 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -92,9 +92,7 @@ APICs
Timing
notsc
- Don't use the CPU time stamp counter to read the wall time.
- This can be used to work around timing problems on multiprocessor systems
- with not properly synchronized CPUs.
+ Deprecated, use tsc=unstable instead.
nohpet
Don't use the HPET timer.
@@ -156,6 +154,10 @@ NUMA
If given as an integer, fills all system RAM with N fake nodes
interleaved over physical nodes.
+ numa=fake=<N>U
+ If given as an integer followed by 'U', it will divide each
+ physical node into N emulated nodes.
+
ACPI
acpi=off Don't enable ACPI
diff --git a/Kconfig b/Kconfig
index a90d9f9e268b..48a80beab685 100644
--- a/Kconfig
+++ b/Kconfig
@@ -9,4 +9,24 @@ comment "Compiler: $(CC_VERSION_TEXT)"
source "scripts/Kconfig.include"
-source "arch/$(SRCARCH)/Kconfig"
+source "init/Kconfig"
+
+source "kernel/Kconfig.freezer"
+
+source "fs/Kconfig.binfmt"
+
+source "mm/Kconfig"
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
+
+source "lib/Kconfig.debug"
diff --git a/MAINTAINERS b/MAINTAINERS
index 1b2d54f4dd2b..f72e524445a9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -367,6 +367,12 @@ L: linux-acpi@vger.kernel.org
S: Maintained
F: drivers/acpi/arm64
+ACPI I2C MULTI INSTANTIATE DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/i2c-multi-instantiate.c
+
ACPI PMIC DRIVERS
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Len Brown <lenb@kernel.org>
@@ -728,6 +734,14 @@ S: Supported
F: drivers/crypto/ccp/
F: include/linux/ccp.h
+AMD DISPLAY CORE
+M: Harry Wentland <harry.wentland@amd.com>
+M: Leo Li <sunpeng.li@amd.com>
+L: amd-gfx@lists.freedesktop.org
+T: git git://people.freedesktop.org/~agd5f/linux
+S: Supported
+F: drivers/gpu/drm/amd/display/
+
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Huang Rui <ray.huang@amd.com>
L: linux-hwmon@vger.kernel.org
@@ -777,6 +791,14 @@ F: drivers/gpu/drm/amd/include/vi_structs.h
F: drivers/gpu/drm/amd/include/v9_structs.h
F: include/uapi/linux/kfd_ioctl.h
+AMD POWERPLAY
+M: Rex Zhu <rex.zhu@amd.com>
+M: Evan Quan <evan.quan@amd.com>
+L: amd-gfx@lists.freedesktop.org
+S: Supported
+F: drivers/gpu/drm/amd/powerplay/
+T: git git://people.freedesktop.org/~agd5f/linux
+
AMD SEATTLE DEVICE TREE SUPPORT
M: Brijesh Singh <brijeshkumar.singh@amd.com>
M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
@@ -2264,6 +2286,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
S: Maintained
F: arch/arm64/
+X: arch/arm64/boot/dts/
F: Documentation/arm64/
AS3645A LED FLASH CONTROLLER DRIVER
@@ -2272,6 +2295,14 @@ L: linux-leds@vger.kernel.org
S: Maintained
F: drivers/leds/leds-as3645a.c
+ASAHI KASEI AK7375 LENS VOICE COIL DRIVER
+M: Tianshu Qiu <tian.shu.qiu@intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ak7375.c
+F: Documentation/devicetree/bindings/media/i2c/ak7375.txt
+
ASAHI KASEI AK8974 DRIVER
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-iio@vger.kernel.org
@@ -3513,6 +3544,11 @@ M: Christian Benvenuti <benve@cisco.com>
S: Supported
F: drivers/infiniband/hw/usnic/
+CLANG-FORMAT FILE
+M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
+S: Maintained
+F: .clang-format
+
CLEANCACHE API
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: linux-kernel@vger.kernel.org
@@ -4399,6 +4435,12 @@ X: Documentation/spi
X: Documentation/media
T: git git://git.lwn.net/linux.git docs-next
+DOCUMENTATION/ITALIAN
+M: Federico Vaga <federico.vaga@vaga.pv.it>
+L: linux-doc@vger.kernel.org
+S: Maintained
+F: Documentation/translations/it_IT
+
DONGWOON DW9714 LENS VOICE COIL DRIVER
M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
@@ -4406,6 +4448,13 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/i2c/dw9714.c
+DONGWOON DW9807 LENS VOICE COIL DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/dw9807.c
+
DOUBLETALK DRIVER
M: "James R. Van Zandt" <jrv@vanzandt.mv.com>
L: blinux-list@redhat.com
@@ -4883,7 +4932,8 @@ F: Documentation/gpu/xen-front.rst
DRM TTM SUBSYSTEM
M: Christian Koenig <christian.koenig@amd.com>
-M: Roger He <Hongbo.He@amd.com>
+M: Huang Rui <ray.huang@amd.com>
+M: Junwei Zhang <Jerry.Zhang@amd.com>
T: git git://people.freedesktop.org/~agd5f/linux
S: Maintained
L: dri-devel@lists.freedesktop.org
@@ -5055,6 +5105,18 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/tuners/e4000*
+EARTH_PT1 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/pci/pt1/
+
+EARTH_PT3 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/pci/pt3/
+
EC100 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -7027,7 +7089,7 @@ M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ina209
-F: Documentation/devicetree/bindings/i2c/ina209.txt
+F: Documentation/devicetree/bindings/hwmon/ina2xx.txt
F: drivers/hwmon/ina209.c
INA2XX HARDWARE MONITOR DRIVER
@@ -7245,6 +7307,9 @@ F: drivers/dma/iop-adma.c
INTEL IPU3 CSI-2 CIO2 DRIVER
M: Yong Zhi <yong.zhi@intel.com>
M: Sakari Ailus <sakari.ailus@linux.intel.com>
+M: Bingbu Cao <bingbu.cao@intel.com>
+R: Tian Shu Qiu <tian.shu.qiu@intel.com>
+R: Jian Xu Zheng <jian.xu.zheng@intel.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/pci/intel/ipu3/
@@ -7350,7 +7415,7 @@ M: Megha Dey <megha.dey@linux.intel.com>
R: Tim Chen <tim.c.chen@linux.intel.com>
L: linux-crypto@vger.kernel.org
S: Supported
-F: arch/x86/crypto/sha*-mb
+F: arch/x86/crypto/sha*-mb/
F: crypto/mcryptd.c
INTEL TELEMETRY DRIVER
@@ -8315,17 +8380,18 @@ M: Jade Alglave <j.alglave@ucl.ac.uk>
M: Luc Maranget <luc.maranget@inria.fr>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
R: Akira Yokosawa <akiyks@gmail.com>
+R: Daniel Lustig <dlustig@nvidia.com>
L: linux-kernel@vger.kernel.org
+L: linux-arch@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: tools/memory-model/
+F: Documentation/atomic_bitops.txt
+F: Documentation/atomic_t.txt
+F: Documentation/core-api/atomic_ops.rst
+F: Documentation/core-api/refcount-vs-atomic.rst
F: Documentation/memory-barriers.txt
-LINUX SECURITY MODULE (LSM) FRAMEWORK
-M: Chris Wright <chrisw@sous-sol.org>
-L: linux-security-module@vger.kernel.org
-S: Supported
-
LIS3LV02D ACCELEROMETER DRIVER
M: Eric Piel <eric.piel@tremplin-utc.net>
S: Maintained
@@ -8955,6 +9021,14 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/dvb-frontends/stv6111*
+MEDIA DRIVERS FOR STM32 - DCMI
+M: Hugues Fruchet <hugues.fruchet@st.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: Documentation/devicetree/bindings/media/st,stm32-dcmi.txt
+F: drivers/media/platform/stm32/stm32-dcmi.c
+
MEDIA DRIVERS FOR NVIDIA TEGRA - VDE
M: Dmitry Osipenko <digetx@gmail.com>
L: linux-media@vger.kernel.org
@@ -8986,6 +9060,14 @@ F: include/uapi/linux/meye.h
F: include/uapi/linux/ivtv*
F: include/uapi/linux/uvcvideo.h
+MEDIATEK BLUETOOTH DRIVER
+M: Sean Wang <sean.wang@mediatek.com>
+L: linux-bluetooth@vger.kernel.org
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
+F: drivers/bluetooth/btmtkuart.c
+
MEDIATEK CIR DRIVER
M: Sean Wang <sean.wang@mediatek.com>
S: Maintained
@@ -9158,6 +9240,7 @@ S: Supported
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/
+F: tools/testing/selftests/drivers/net/mlxsw/
MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
M: mlxsw@mellanox.com
@@ -9345,7 +9428,6 @@ F: drivers/media/platform/atmel/atmel-isc-regs.h
F: devicetree/bindings/media/atmel-isc.txt
MICROCHIP / ATMEL NAND DRIVER
-M: Wenyou Yang <wenyou.yang@microchip.com>
M: Josh Wu <rainyfeeling@outlook.com>
L: linux-mtd@lists.infradead.org
S: Supported
@@ -9637,6 +9719,14 @@ F: Documentation/devicetree/bindings/media/i2c/mt9v032.txt
F: drivers/media/i2c/mt9v032.c
F: include/media/i2c/mt9v032.h
+MT9V111 APTINA CAMERA SENSOR
+M: Jacopo Mondi <jacopo@jmondi.org>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.txt
+F: drivers/media/i2c/mt9v111.c
+
MULTIFUNCTION DEVICES (MFD)
M: Lee Jones <lee.jones@linaro.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git
@@ -9681,6 +9771,12 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/musb/
+MXL301RF MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/tuners/mxl301rf*
+
MXL5007T MEDIA DRIVER
M: Michael Krufky <mkrufky@linuxtv.org>
L: linux-media@vger.kernel.org
@@ -9749,12 +9845,6 @@ F: drivers/scsi/mac_scsi.*
F: drivers/scsi/sun3_scsi.*
F: drivers/scsi/sun3_scsi_vme.c
-NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
-M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
-L: linux-scsi@vger.kernel.org
-S: Maintained
-F: drivers/scsi/NCR_D700.*
-
NCSI LIBRARY:
M: Samuel Mendoza-Jonas <sam@mendozajonas.com>
S: Maintained
@@ -10463,6 +10553,14 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/i2c/ov13858.c
+OMNIVISION OV2680 SENSOR DRIVER
+M: Rui Miguel Silva <rmfrfs@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ov2680.c
+F: Documentation/devicetree/bindings/media/i2c/ov2680.txt
+
OMNIVISION OV2685 SENSOR DRIVER
M: Shunqian Zheng <zhengsq@rock-chips.com>
L: linux-media@vger.kernel.org
@@ -11256,7 +11354,7 @@ F: Documentation/devicetree/bindings/pinctrl/fsl,*
PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg@linux.intel.com>
-M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
F: drivers/pinctrl/intel/
@@ -11766,6 +11864,18 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
+QM1D1B0004 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/tuners/qm1d1b0004*
+
+QM1D1C0042 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/tuners/qm1d1c0042*
+
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
W: http://www.alarsen.net/linux/qnx4fs/
@@ -11814,7 +11924,7 @@ L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/media/qcom,camss.txt
F: Documentation/media/v4l-drivers/qcom_camss.rst
-F: drivers/media/platform/qcom/camss-8x16/
+F: drivers/media/platform/qcom/camss/
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
M: Ilia Lin <ilia.lin@gmail.com>
@@ -12037,9 +12147,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/
X: Documentation/RCU/torture.txt
F: include/linux/rcu*
-X: include/linux/srcu.h
+X: include/linux/srcu*.h
F: kernel/rcu/
-X: kernel/torture.c
+X: kernel/rcu/srcu*.c
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo <a.zummo@towertech.it>
@@ -12064,6 +12174,13 @@ S: Maintained
F: sound/soc/codecs/rt*
F: include/sound/rt*.h
+REALTEK RTL83xx SMI DSA ROUTER CHIPS
+M: Linus Walleij <linus.walleij@linaro.org>
+S: Maintained
+F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
+F: drivers/net/dsa/realtek-smi*
+F: drivers/net/dsa/rtl83*
+
REGISTER MAP ABSTRACTION
M: Mark Brown <broonie@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -12171,6 +12288,8 @@ S: Maintained
F: Documentation/rfkill.txt
F: Documentation/ABI/stable/sysfs-class-rfkill
F: net/rfkill/
+F: include/linux/rfkill.h
+F: include/uapi/linux/rfkill.h
RHASHTABLE
M: Thomas Graf <tgraf@suug.ch>
@@ -12178,7 +12297,9 @@ M: Herbert Xu <herbert@gondor.apana.org.au>
L: netdev@vger.kernel.org
S: Maintained
F: lib/rhashtable.c
+F: lib/test_rhashtable.c
F: include/linux/rhashtable.h
+F: include/linux/rhashtable-types.h
RICOH R5C592 MEMORYSTICK DRIVER
M: Maxim Levitsky <maximlevitsky@gmail.com>
@@ -12400,7 +12521,6 @@ F: drivers/pci/hotplug/s390_pci_hpc.c
S390 VFIO-CCW DRIVER
M: Cornelia Huck <cohuck@redhat.com>
-M: Dong Jia Shi <bjsdjshi@linux.ibm.com>
M: Halil Pasic <pasic@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: kvm@vger.kernel.org
@@ -12791,6 +12911,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
W: http://kernsec.org/
S: Supported
F: security/
+X: security/selinux/
SELINUX SECURITY MODULE
M: Paul Moore <paul@paul-moore.com>
@@ -12867,6 +12988,14 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: net/smc/
+SHARP RJ54N1CB0C SENSOR DRIVER
+M: Jacopo Mondi <jacopo@jmondi.org>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Odd fixes
+F: drivers/media/i2c/rj54n1cb0c.c
+F: include/media/i2c/rj54n1cb0c.h
+
SH_VEU V4L2 MEM2MEM DRIVER
L: linux-media@vger.kernel.org
S: Orphan
@@ -13082,8 +13211,8 @@ L: linux-kernel@vger.kernel.org
W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
-F: include/linux/srcu.h
-F: kernel/rcu/srcu.c
+F: include/linux/srcu*.h
+F: kernel/rcu/srcu*.c
SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
@@ -13579,6 +13708,13 @@ L: linux-block@vger.kernel.org
S: Maintained
F: drivers/block/skd*[ch]
+STI AUDIO (ASoC) DRIVERS
+M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
+F: sound/soc/sti/
+
STI CEC DRIVER
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
S: Maintained
@@ -13592,6 +13728,14 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/stk1160/
+STM32 AUDIO (ASoC) DRIVERS
+M: Olivier Moysan <olivier.moysan@st.com>
+M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/st,stm32-*.txt
+F: sound/soc/stm/
+
STM32 TIMER/LPTIMER DRIVERS
M: Fabrice Gasnier <fabrice.gasnier@st.com>
S: Maintained
@@ -13868,6 +14012,12 @@ F: include/uapi/linux/tc_act/
F: include/uapi/linux/tc_ematch/
F: net/sched/
+TC90522 MEDIA DRIVER
+M: Akihiro Tsukada <tskd08@gmail.com>
+L: linux-media@vger.kernel.org
+S: Odd Fixes
+F: drivers/media/dvb-frontends/tc90522*
+
TCP LOW PRIORITY MODULE
M: "Wong Hoi Sing, Edison" <hswong3i@gmail.com>
M: "Hung Hing Lun, Mike" <hlhung3i@gmail.com>
@@ -14062,6 +14212,13 @@ M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/input/keyboard/tegra-kbc.c
+TEGRA NAND DRIVER
+M: Stefan Agner <stefan@agner.ch>
+M: Lucas Stach <dev@lynxeye.de>
+S: Maintained
+F: Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt
+F: drivers/mtd/nand/raw/tegra_nand.c
+
TEGRA PWM DRIVER
M: Thierry Reding <thierry.reding@gmail.com>
S: Supported
@@ -14442,6 +14599,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/torture.c
F: kernel/rcu/rcutorture.c
+F: kernel/rcu/rcuperf.c
F: kernel/locking/locktorture.c
TOSHIBA ACPI EXTRAS DRIVER
@@ -15149,6 +15307,14 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/via/via-velocity.*
+VICODEC VIRTUAL CODEC DRIVER
+M: Hans Verkuil <hans.verkuil@cisco.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: https://linuxtv.org
+S: Maintained
+F: drivers/media/platform/vicodec/*
+
VIDEO MULTIPLEXER DRIVER
M: Philipp Zabel <p.zabel@pengutronix.de>
L: linux-media@vger.kernel.org
diff --git a/Makefile b/Makefile
index 863f58503bee..a0650bf79606 100644
--- a/Makefile
+++ b/Makefile
@@ -224,11 +224,14 @@ clean-targets := %clean mrproper cleandocs
no-dot-config-targets := $(clean-targets) \
cscope gtags TAGS tags help% %docs check% coccicheck \
$(version_h) headers_% archheaders archscripts \
- kernelversion %src-pkg
+ %asm-generic kernelversion %src-pkg
+no-sync-config-targets := $(no-dot-config-targets) install %install \
+ kernelrelease
-config-targets := 0
-mixed-targets := 0
-dot-config := 1
+config-targets := 0
+mixed-targets := 0
+dot-config := 1
+may-sync-config := 1
ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
@@ -236,6 +239,16 @@ ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
endif
endif
+ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),)
+ ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
+ may-sync-config := 0
+ endif
+endif
+
+ifneq ($(KBUILD_EXTMOD),)
+ may-sync-config := 0
+endif
+
ifeq ($(KBUILD_EXTMOD),)
ifneq ($(filter config %config,$(MAKECMDGOALS)),)
config-targets := 1
@@ -359,11 +372,12 @@ HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
HOSTCC = gcc
HOSTCXX = g++
-HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
- -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS)
-HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS)
-HOSTLDFLAGS := $(HOST_LFS_LDFLAGS)
-HOST_LOADLIBES := $(HOST_LFS_LIBS)
+KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
+ -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \
+ $(HOSTCFLAGS)
+KBUILD_HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
+KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
+KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
# Make variables (CC, etc...)
AS = $(CROSS_COMPILE)as
@@ -429,10 +443,10 @@ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
LDFLAGS :=
GCC_PLUGINS_CFLAGS :=
-export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP HOSTLDFLAGS HOST_LOADLIBES
+export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
+export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
-export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
+export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
@@ -584,7 +598,7 @@ virt-y := virt/
endif # KBUILD_EXTMOD
ifeq ($(dot-config),1)
--include include/config/auto.conf
+include include/config/auto.conf
endif
# The all: target is the default when no target is given on the
@@ -606,7 +620,7 @@ ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile
ifeq ($(dot-config),1)
-ifeq ($(KBUILD_EXTMOD),)
+ifeq ($(may-sync-config),1)
# Read in dependencies to all Kconfig* files, make sure to run syncconfig if
# changes are detected. This should be included after arch/$(SRCARCH)/Makefile
# because some architectures define CROSS_COMPILE there.
@@ -621,8 +635,9 @@ $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
$(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
else
-# external modules needs include/generated/autoconf.h and include/config/auto.conf
-# but do not care if they are up-to-date. Use auto.conf to trigger the test
+# External modules and some install targets need include/generated/autoconf.h
+# and include/config/auto.conf but do not care if they are up-to-date.
+# Use auto.conf to trigger the test
PHONY += include/config/auto.conf
include/config/auto.conf:
@@ -634,11 +649,7 @@ include/config/auto.conf:
echo >&2 ; \
/bin/false)
-endif # KBUILD_EXTMOD
-
-else
-# Dummy target needed, because used as prerequisite
-include/config/auto.conf: ;
+endif # may-sync-config
endif # $(dot-config)
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -1009,9 +1020,10 @@ ifdef CONFIG_GDB_SCRIPTS
endif
+$(call if_changed,link-vmlinux)
-# Build samples along the rest of the kernel
+# Build samples along the rest of the kernel. This needs headers_install.
ifdef CONFIG_SAMPLES
vmlinux-dirs += samples
+samples: headers_install
endif
# The actual objects are generated when descending,
@@ -1033,15 +1045,14 @@ define filechk_kernel.release
endef
# Store (new) KERNELRELEASE string in include/config/kernel.release
-include/config/kernel.release: include/config/auto.conf FORCE
+include/config/kernel.release: $(srctree)/Makefile FORCE
$(call filechk,kernel.release)
# Additional helpers built in scripts/
# Carefully list dependencies so we do not try to build scripts twice
# in parallel
PHONY += scripts
-scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
- asm-generic gcc-plugins $(autoksyms_h)
+scripts: scripts_basic asm-generic gcc-plugins $(autoksyms_h)
$(Q)$(MAKE) $(build)=$(@)
# Things we need to do before we recursively start building the kernel
@@ -1071,8 +1082,7 @@ endif
# that need to depend on updated CONFIG_* values can be checked here.
prepare2: prepare3 outputmakefile asm-generic
-prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h \
- include/config/auto.conf
+prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h
$(cmd_crmodverdir)
archprepare: archheaders archscripts prepare1 scripts_basic
@@ -1116,7 +1126,7 @@ define filechk_version.h
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
endef
-$(version_h): $(srctree)/Makefile FORCE
+$(version_h): FORCE
$(call filechk,version.h)
$(Q)rm -f $(old_version_h)
@@ -1210,7 +1220,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
$(Q)$(AWK) '!x[$$0]++' $^ > $(objtree)/modules.builtin
-%/modules.builtin: include/config/auto.conf
+%/modules.builtin: include/config/auto.conf include/config/tristate.conf
$(Q)$(MAKE) $(modbuiltin)=$*
diff --git a/arch/Kconfig b/arch/Kconfig
index 1aa59063f1fd..c6148166a7b4 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -3,6 +3,14 @@
# General architecture dependent options
#
+#
+# Note: arch/$(SRCARCH)/Kconfig needs to be included first so that it can
+# override the default values in this file.
+#
+source "arch/$(SRCARCH)/Kconfig"
+
+menu "General architecture-dependent options"
+
config CRASH_CORE
bool
@@ -13,6 +21,9 @@ config KEXEC_CORE
config HAVE_IMA_KEXEC
bool
+config HOTPLUG_SMT
+ bool
+
config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
@@ -405,150 +416,6 @@ config SECCOMP_FILTER
See Documentation/userspace-api/seccomp_filter.rst for details.
-preferred-plugin-hostcc := $(if-success,[ $(gcc-version) -ge 40800 ],$(HOSTCXX),$(HOSTCC))
-
-config PLUGIN_HOSTCC
- string
- default "$(shell,$(srctree)/scripts/gcc-plugin.sh "$(preferred-plugin-hostcc)" "$(HOSTCXX)" "$(CC)")"
- help
- Host compiler used to build GCC plugins. This can be $(HOSTCXX),
- $(HOSTCC), or a null string if GCC plugin is unsupported.
-
-config HAVE_GCC_PLUGINS
- bool
- help
- An arch should select this symbol if it supports building with
- GCC plugins.
-
-menuconfig GCC_PLUGINS
- bool "GCC plugins"
- depends on HAVE_GCC_PLUGINS
- depends on PLUGIN_HOSTCC != ""
- help
- GCC plugins are loadable modules that provide extra features to the
- compiler. They are useful for runtime instrumentation and static analysis.
-
- See Documentation/gcc-plugins.txt for details.
-
-config GCC_PLUGIN_CYC_COMPLEXITY
- bool "Compute the cyclomatic complexity of a function" if EXPERT
- depends on GCC_PLUGINS
- depends on !COMPILE_TEST # too noisy
- help
- The complexity M of a function's control flow graph is defined as:
- M = E - N + 2P
- where
-
- E = the number of edges
- N = the number of nodes
- P = the number of connected components (exit nodes).
-
- Enabling this plugin reports the complexity to stderr during the
- build. It mainly serves as a simple example of how to create a
- gcc plugin for the kernel.
-
-config GCC_PLUGIN_SANCOV
- bool
- depends on GCC_PLUGINS
- help
- This plugin inserts a __sanitizer_cov_trace_pc() call at the start of
- basic blocks. It supports all gcc versions with plugin support (from
- gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
- by Dmitry Vyukov <dvyukov@google.com>.
-
-config GCC_PLUGIN_LATENT_ENTROPY
- bool "Generate some entropy during boot and runtime"
- depends on GCC_PLUGINS
- help
- By saying Y here the kernel will instrument some kernel code to
- extract some entropy from both original and artificially created
- program state. This will help especially embedded systems where
- there is little 'natural' source of entropy normally. The cost
- is some slowdown of the boot process (about 0.5%) and fork and
- irq processing.
-
- Note that entropy extracted this way is not cryptographically
- secure!
-
- This plugin was ported from grsecurity/PaX. More information at:
- * https://grsecurity.net/
- * https://pax.grsecurity.net/
-
-config GCC_PLUGIN_STRUCTLEAK
- bool "Force initialization of variables containing userspace addresses"
- depends on GCC_PLUGINS
- # Currently STRUCTLEAK inserts initialization out of live scope of
- # variables from KASAN point of view. This leads to KASAN false
- # positive reports. Prohibit this combination for now.
- depends on !KASAN_EXTRA
- help
- This plugin zero-initializes any structures containing a
- __user attribute. This can prevent some classes of information
- exposures.
-
- This plugin was ported from grsecurity/PaX. More information at:
- * https://grsecurity.net/
- * https://pax.grsecurity.net/
-
-config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
- bool "Force initialize all struct type variables passed by reference"
- depends on GCC_PLUGIN_STRUCTLEAK
- depends on !COMPILE_TEST
- help
- Zero initialize any struct type local variable that may be passed by
- reference without having been initialized.
-
-config GCC_PLUGIN_STRUCTLEAK_VERBOSE
- bool "Report forcefully initialized variables"
- depends on GCC_PLUGIN_STRUCTLEAK
- depends on !COMPILE_TEST # too noisy
- help
- This option will cause a warning to be printed each time the
- structleak plugin finds a variable it thinks needs to be
- initialized. Since not all existing initializers are detected
- by the plugin, this can produce false positive warnings.
-
-config GCC_PLUGIN_RANDSTRUCT
- bool "Randomize layout of sensitive kernel structures"
- depends on GCC_PLUGINS
- select MODVERSIONS if MODULES
- help
- If you say Y here, the layouts of structures that are entirely
- function pointers (and have not been manually annotated with
- __no_randomize_layout), or structures that have been explicitly
- marked with __randomize_layout, will be randomized at compile-time.
- This can introduce the requirement of an additional information
- exposure vulnerability for exploits targeting these structure
- types.
-
- Enabling this feature will introduce some performance impact,
- slightly increase memory usage, and prevent the use of forensic
- tools like Volatility against the system (unless the kernel
- source tree isn't cleaned after kernel installation).
-
- The seed used for compilation is located at
- scripts/gcc-plgins/randomize_layout_seed.h. It remains after
- a make clean to allow for external modules to be compiled with
- the existing seed and will be removed by a make mrproper or
- make distclean.
-
- Note that the implementation requires gcc 4.7 or newer.
-
- This plugin was ported from grsecurity/PaX. More information at:
- * https://grsecurity.net/
- * https://pax.grsecurity.net/
-
-config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE
- bool "Use cacheline-aware structure randomization"
- depends on GCC_PLUGIN_RANDSTRUCT
- depends on !COMPILE_TEST # do not reduce test coverage
- help
- If you say Y here, the RANDSTRUCT randomization will make a
- best effort at restricting randomization to cacheline-sized
- groups of elements. It will further not randomize bitfields
- in structures. This reduces the performance hit of RANDSTRUCT
- at the cost of weakened randomization.
-
config HAVE_STACKPROTECTOR
bool
help
@@ -875,6 +742,9 @@ config COMPAT_32BIT_TIME
config ARCH_NO_COHERENT_DMA_MMAP
bool
+config ARCH_NO_PREEMPT
+ bool
+
config CPU_NO_EFFICIENT_FFS
def_bool n
@@ -972,3 +842,7 @@ config REFCOUNT_FULL
security flaw exploits.
source "kernel/gcov/Kconfig"
+
+source "scripts/gcc-plugins/Kconfig"
+
+endmenu
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 04a4a138ed13..5b4f88363453 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -4,6 +4,7 @@ config ALPHA
default y
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_NO_PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_AOUT
select HAVE_IDE
@@ -74,9 +75,6 @@ config PGTABLE_LEVELS
int
default 3
-source "init/Kconfig"
-source "kernel/Kconfig.freezer"
-
config AUDIT_ARCH
bool
@@ -573,8 +571,6 @@ config ARCH_DISCONTIGMEM_ENABLE
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa.rst> for more.
-source "mm/Kconfig"
-
config NUMA
bool "NUMA Support (EXPERIMENTAL)"
depends on DISCONTIGMEM && BROKEN
@@ -713,28 +709,11 @@ config SRM_ENV
This driver is also available as a module and will be called
srm_env then.
-source "fs/Kconfig.binfmt"
-
endmenu
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/alpha/Kconfig.debug"
-
# DUMMY_CONSOLE may be defined in drivers/video/console/Kconfig
# but we also need it if VGA_HOSE is set
config DUMMY_CONSOLE
bool
depends on VGA_HOSE
default y
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
diff --git a/arch/alpha/Kconfig.debug b/arch/alpha/Kconfig.debug
index 5e93dffb818a..b88c7b641d72 100644
--- a/arch/alpha/Kconfig.debug
+++ b/arch/alpha/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config EARLY_PRINTK
bool
@@ -39,5 +36,3 @@ config MATHEMU
This option is required for IEEE compliant floating point arithmetic
on the Alpha. The only time you would ever not say Y is to say M in
order to debug the code. Say Y unless you know what you are doing.
-
-endmenu
diff --git a/arch/alpha/boot/Makefile b/arch/alpha/boot/Makefile
index 0cbe4c59d3ce..991e023a6fc4 100644
--- a/arch/alpha/boot/Makefile
+++ b/arch/alpha/boot/Makefile
@@ -14,7 +14,7 @@ targets := vmlinux.gz vmlinux \
tools/bootpzh bootloader bootpheader bootpzheader
OBJSTRIP := $(obj)/tools/objstrip
-HOSTCFLAGS := -Wall -I$(objtree)/usr/include
+KBUILD_HOSTCFLAGS := -Wall -I$(objtree)/usr/include
BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
# SRM bootable image. Copy to offset 512 of a partition.
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 767bfdd42992..150a1c5d6a2c 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -18,11 +18,11 @@
* To ensure dependency ordering is preserved for the _relaxed and
* _release atomics, an smp_read_barrier_depends() is unconditionally
* inserted into the _relaxed variants, which are used to build the
- * barriered versions. To avoid redundant back-to-back fences, we can
- * define the _acquire and _fence versions explicitly.
+ * barriered versions. Avoid redundant back-to-back fences in the
+ * _acquire and _fence versions.
*/
-#define __atomic_op_acquire(op, args...) op##_relaxed(args)
-#define __atomic_op_fence __atomic_op_release
+#define __atomic_acquire_fence()
+#define __atomic_post_full_fence()
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
@@ -206,7 +206,7 @@ ATOMIC_OPS(xor, xor)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -214,7 +214,7 @@ ATOMIC_OPS(xor, xor)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
@@ -235,38 +235,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
-
+#define atomic_fetch_add_unless atomic_fetch_add_unless
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns true iff @v was not @u.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
- long c, tmp;
+ long c, new, old;
smp_mb();
__asm__ __volatile__(
- "1: ldq_l %[tmp],%[mem]\n"
- " cmpeq %[tmp],%[u],%[c]\n"
- " addq %[tmp],%[a],%[tmp]\n"
+ "1: ldq_l %[old],%[mem]\n"
+ " cmpeq %[old],%[u],%[c]\n"
+ " addq %[old],%[a],%[new]\n"
" bne %[c],2f\n"
- " stq_c %[tmp],%[mem]\n"
- " beq %[tmp],3f\n"
+ " stq_c %[new],%[mem]\n"
+ " beq %[new],3f\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
- : [tmp] "=&r"(tmp), [c] "=&r"(c)
+ : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
: "memory");
smp_mb();
- return !c;
+ return old;
}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
@@ -295,31 +296,6 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
-
-#define atomic_inc_return(v) atomic_add_return(1,(v))
-#define atomic64_inc_return(v) atomic64_add_return(1,(v))
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
-
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v) atomic_add(1,(v))
-#define atomic64_inc(v) atomic64_add(1,(v))
-
-#define atomic_dec(v) atomic_sub(1,(v))
-#define atomic64_dec(v) atomic64_sub(1,(v))
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index be14f16149d5..065fb372e355 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -112,4 +112,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 5151d81476a1..6d5eb8267e42 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -97,9 +97,6 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
def_bool y
depends on ARC_MMU_V4
-source "init/Kconfig"
-source "kernel/Kconfig.freezer"
-
menu "ARC Architecture Configuration"
menu "ARC Platform/SoC/Board"
@@ -551,24 +548,13 @@ config ARC_BUILTIN_DTB_NAME
Set the name of the DTB to embed in the vmlinux binary
Leaving it blank selects the minimal "skeleton" dtb
-source "kernel/Kconfig.preempt"
-
-menu "Executable file formats"
-source "fs/Kconfig.binfmt"
-endmenu
-
endmenu # "ARC Architecture Configuration"
-source "mm/Kconfig"
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "12" if ARC_HUGEPAGE_16M
default "11"
-source "net/Kconfig"
-source "drivers/Kconfig"
-
menu "Bus Support"
config PCI
@@ -589,9 +575,4 @@ source "drivers/pci/Kconfig"
endmenu
-source "fs/Kconfig"
-source "arch/arc/Kconfig.debug"
-source "security/Kconfig"
-source "crypto/Kconfig"
-source "lib/Kconfig"
source "kernel/power/Kconfig"
diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug
index 03da1a6b3072..45add86decd5 100644
--- a/arch/arc/Kconfig.debug
+++ b/arch/arc/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config 16KSTACKS
bool "Use 16Kb for kernel stacks instead of 8Kb"
@@ -11,5 +8,3 @@ config 16KSTACKS
This increases the resident kernel footprint and will cause less
threads to run on the system and also increase the pressure
on the VM subsystem for higher order allocations.
-
-endmenu
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 11859287c52a..4e0072730241 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -187,7 +187,8 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
+#define atomic_andnot atomic_andnot
+#define atomic_fetch_andnot atomic_fetch_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -296,8 +297,6 @@ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
-#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
-#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
@@ -308,48 +307,6 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v
- */
-#define __atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- \
- /* \
- * Explicit full memory barrier needed before/after as \
- * LLOCK/SCOND thmeselves don't provide any such semantics \
- */ \
- smp_mb(); \
- \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
- c = old; \
- \
- smp_mb(); \
- \
- c; \
-})
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-
-
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
@@ -472,7 +429,8 @@ static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
+#define atomic64_andnot atomic64_andnot
+#define atomic64_fetch_andnot atomic64_fetch_andnot
ATOMIC64_OPS(add, add.f, adc)
ATOMIC64_OPS(sub, sub.f, sbc)
@@ -559,53 +517,43 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return val;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * if (v != u) { v += a; ret = 1} else {ret = 0}
- * Returns 1 iff @v was not @u (i.e. if add actually happened)
+ * Atomically adds @a to @v, if it was not @u.
+ * Returns the old value of @v
*/
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- long long val;
- int op_done;
+ long long old, temp;
smp_mb();
__asm__ __volatile__(
"1: llockd %0, [%2] \n"
- " mov %1, 1 \n"
" brne %L0, %L4, 2f # continue to add since v != u \n"
" breq.d %H0, %H4, 3f # return since v == u \n"
- " mov %1, 0 \n"
"2: \n"
- " add.f %L0, %L0, %L3 \n"
- " adc %H0, %H0, %H3 \n"
- " scondd %0, [%2] \n"
+ " add.f %L1, %L0, %L3 \n"
+ " adc %H1, %H0, %H3 \n"
+ " scondd %1, [%2] \n"
" bnz 1b \n"
"3: \n"
- : "=&r"(val), "=&r" (op_done)
+ : "=&r"(old), "=&r" (temp)
: "r"(&v->counter), "r"(a), "r"(u)
: "cc"); /* memory clobber comes from smp_mb() */
smp_mb();
- return op_done;
+ return old;
}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 2e52d18e6bc7..2c1b479d5aea 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -45,8 +45,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned int kprobe_status;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index 42b05046fad9..df35d4c0b0b8 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -225,24 +225,18 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
/* If we have no pre-handler or it returned 0, we continue with
* normal processing. If we have a pre-handler and it returned
- * non-zero - which is expected from setjmp_pre_handler for
- * jprobe, we return without single stepping and leave that to
- * the break-handler which is invoked by a kprobe from
- * jprobe_return
+ * non-zero - which means user handler setup registers to exit
+ * to another instruction, we must skip the single stepping.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
+ } else {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
}
return 1;
- } else if (kprobe_running()) {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- setup_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
- return 1;
- }
}
/* no_kprobe: */
@@ -386,38 +380,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long sp_addr = regs->sp;
-
- kcb->jprobe_saved_regs = *regs;
- memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
- regs->ret = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- __asm__ __volatile__("unimp_s");
- return;
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long sp_addr;
-
- *regs = kcb->jprobe_saved_regs;
- sp_addr = regs->sp;
- memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
- preempt_enable_no_resched();
-
- return 1;
-}
-
static void __used kretprobe_trampoline_holder(void)
{
__asm__ __volatile__(".global kretprobe_trampoline\n"
@@ -483,9 +445,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->ret = orig_ret_address;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 843edfd000be..f69613fd4e68 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -9,6 +9,7 @@ config ARM
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
+ select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SET_MEMORY
@@ -298,10 +299,6 @@ config PGTABLE_LEVELS
default 3 if ARM_LPAE
default 2
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "System Type"
config MMU
@@ -337,8 +334,8 @@ config ARCH_MULTIPLATFORM
select TIMER_OF
select COMMON_CLK
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select MIGHT_HAVE_PCI
- select MULTI_IRQ_HANDLER
select PCI_DOMAINS if PCI
select SPARSE_IRQ
select USE_OF
@@ -465,9 +462,9 @@ config ARCH_DOVE
bool "Marvell Dove"
select CPU_PJ4
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select MIGHT_HAVE_PCI
- select MULTI_IRQ_HANDLER
select MVEBU_MBUS
select PINCTRL
select PINCTRL_DOVE
@@ -512,8 +509,8 @@ config ARCH_LPC32XX
select COMMON_CLK
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
- select MULTI_IRQ_HANDLER
select SPARSE_IRQ
select USE_OF
help
@@ -532,11 +529,11 @@ config ARCH_PXA
select TIMER_OF
select CPU_XSCALE if !CPU_XSC3
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIO_PXA
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
select PLAT_PXA
select SPARSE_IRQ
help
@@ -572,11 +569,11 @@ config ARCH_SA1100
select CPU_FREQ
select CPU_SA1100
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
select ISA
- select MULTI_IRQ_HANDLER
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
help
@@ -590,10 +587,10 @@ config ARCH_S3C24XX
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
select GPIOLIB
+ select GENERIC_IRQ_MULTI_HANDLER
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
- select MULTI_IRQ_HANDLER
select NEED_MACH_IO_H
select SAMSUNG_ATAGS
select USE_OF
@@ -627,10 +624,10 @@ config ARCH_OMAP1
select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
select NEED_MACH_IO_H if PCCARD
select NEED_MACH_MEMORY_H
select SPARSE_IRQ
@@ -921,11 +918,6 @@ config IWMMXT
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
-config MULTI_IRQ_HANDLER
- bool
- help
- Allow each machine to specify it's own IRQ handler at run time.
-
if !MMU
source "arch/arm/Kconfig-nommu"
endif
@@ -1485,8 +1477,6 @@ config ARCH_NR_GPIO
If unsure, leave the default value.
-source kernel/Kconfig.preempt
-
config HZ_FIXED
int
default 200 if ARCH_EBSA110
@@ -1721,8 +1711,6 @@ config ARM_MODULE_PLTS
Disabling this is usually safe for small single-platform
configurations. If unsure, say y.
-source "mm/Kconfig"
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "12" if SOC_AM33XX
@@ -2175,12 +2163,6 @@ config KERNEL_MODE_NEON
endmenu
-menu "Userspace binary formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options"
source "kernel/power/Kconfig"
@@ -2201,23 +2183,10 @@ config ARCH_HIBERNATION_POSSIBLE
endmenu
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/firmware/Kconfig"
-source "fs/Kconfig"
-
-source "arch/arm/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
if CRYPTO
source "arch/arm/crypto/Kconfig"
endif
-source "lib/Kconfig"
-
source "arch/arm/kvm/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 693f84392f1b..b48dc083d1b1 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config ARM_PTDUMP_CORE
def_bool n
@@ -1863,5 +1860,3 @@ config PID_IN_CONTEXTIDR
are planning to use hardware trace tools with this kernel.
source "drivers/hwtracing/coresight/Kconfig"
-
-endmenu
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index fc26c3d7b9b6..e7d703d8fac3 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -10,9 +10,6 @@
#
# Copyright (C) 1995-2001 by Russell King
-# Ensure linker flags are correct
-LDFLAGS :=
-
LDFLAGS_vmlinux :=-p --no-undefined -X --pic-veneer
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
@@ -46,12 +43,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__ARMEB__
AS += -EB
-LD += -EB
+LDFLAGS += -EB
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__ARMEL__
AS += -EL
-LD += -EL
+LDFLAGS += -EL
endif
#
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index fb5c954ab95a..6f258b50eb44 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -156,6 +156,100 @@
};
};
+ /* This is a RealTek RTL8366RB switch and PHY using SMI over GPIO */
+ switch {
+ compatible = "realtek,rtl8366rb";
+ /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */
+ mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
+ mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+ realtek,disable-leds;
+
+ switch_intc: interrupt-controller {
+ /* GPIO 15 provides the interrupt */
+ interrupt-parent = <&gpio0>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ phy-handle = <&phy0>;
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-handle = <&phy1>;
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-handle = <&phy2>;
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-handle = <&phy3>;
+ };
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ phy-handle = <&phy4>;
+ };
+ rtl8366rb_cpu_port: port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ };
+
+ mdio {
+ compatible = "realtek,smi-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: phy@0 {
+ reg = <0>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <0>;
+ };
+ phy1: phy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <1>;
+ };
+ phy2: phy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <2>;
+ };
+ phy3: phy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <3>;
+ };
+ phy4: phy@4 {
+ reg = <4>;
+ interrupt-parent = <&switch_intc>;
+ interrupts = <12>;
+ };
+ };
+ };
+
soc {
flash@30000000 {
/*
@@ -223,10 +317,12 @@
* gpio0bgrp cover line 7 used by WPS LED
* gpio0cgrp cover line 8, 13 used by keys
* and 11, 12 used by the HD LEDs
+ * and line 14, 15 used by RTL8366
+ * RESET and phy ready
* gpio0egrp cover line 16 used by VDISP
* gpio0fgrp cover line 17 used by TK IRQ
* gpio0ggrp cover line 20 used by panel CS
- * gpio0hgrp cover line 21,22 used by RTL8366RB
+ * gpio0hgrp cover line 21,22 used by RTL8366RB MDIO
*/
gpio0_default_pins: pinctrl-gpio0 {
mux {
@@ -250,6 +346,32 @@
groups = "gpio1bgrp";
};
};
+ pinctrl-gmii {
+ mux {
+ function = "gmii";
+ groups = "gmii_gmac0_grp";
+ };
+ conf0 {
+ pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV",
+ "Y7 GMAC0 RXC", "Y11 GMAC1 RXC",
+ "T8 GMAC0 TXEN", "W11 GMAC1 TXEN",
+ "U8 GMAC0 TXC", "V11 GMAC1 TXC",
+ "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
+ "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
+ "T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
+ "V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
+ "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
+ "T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
+ "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
+ "W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
+ skew-delay = <7>;
+ };
+ /* Set up drive strength on GMAC0 to 16 mA */
+ conf1 {
+ groups = "gmii_gmac0_grp";
+ drive-strength = <16>;
+ };
+ };
};
};
@@ -291,6 +413,22 @@
<0x6000 0 0 4 &pci_intc 2>;
};
+ ethernet@60000000 {
+ status = "okay";
+
+ ethernet-port@0 {
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ ethernet-port@1 {
+ /* Not used in this platform */
+ };
+ };
+
ata@63000000 {
status = "okay";
};
diff --git a/arch/arm/crypto/chacha20-neon-core.S b/arch/arm/crypto/chacha20-neon-core.S
index 3fecb2124c35..451a849ad518 100644
--- a/arch/arm/crypto/chacha20-neon-core.S
+++ b/arch/arm/crypto/chacha20-neon-core.S
@@ -51,9 +51,8 @@ ENTRY(chacha20_block_xor_neon)
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #16
- vsri.u32 q3, q4, #16
+ veor q3, q3, q0
+ vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
@@ -82,9 +81,8 @@ ENTRY(chacha20_block_xor_neon)
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #16
- vsri.u32 q3, q4, #16
+ veor q3, q3, q0
+ vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index d9bb52cae2ac..8930fc4e7c22 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -152,7 +152,7 @@ static struct shash_alg ghash_alg = {
.cra_name = "__ghash",
.cra_driver_name = "__driver-ghash-ce",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_key),
.cra_module = THIS_MODULE,
@@ -308,9 +308,8 @@ static struct ahash_alg ghash_async_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-ce",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_ctxsize = sizeof(struct ghash_async_ctx),
.cra_module = THIS_MODULE,
.cra_init = ghash_async_init_tfm,
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index 555f72b5e659..b732522e20f8 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -75,7 +75,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 6fc73bf8766d..98ab8239f919 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -67,7 +67,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 4e22f122f966..d15e0ea2c95e 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -83,7 +83,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-neon",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index df4dcef054ae..1211a5c129fc 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -78,7 +78,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ce",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -93,7 +92,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ce",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
index a84e869ef900..bf8ccff2c9d0 100644
--- a/arch/arm/crypto/sha256_glue.c
+++ b/arch/arm/crypto/sha256_glue.c
@@ -71,7 +71,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-asm",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -86,7 +85,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-asm",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 39ccd658817e..9bbee56fbdc8 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -79,7 +79,6 @@ struct shash_alg sha256_neon_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-neon",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -94,7 +93,6 @@ struct shash_alg sha256_neon_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-neon",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
index 269a394e4a53..86540cd4a6fa 100644
--- a/arch/arm/crypto/sha512-glue.c
+++ b/arch/arm/crypto/sha512-glue.c
@@ -63,7 +63,6 @@ static struct shash_alg sha512_arm_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-arm",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -78,7 +77,6 @@ static struct shash_alg sha512_arm_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-arm",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index 32693684a3ab..8a5642b41fd6 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -75,7 +75,6 @@ struct shash_alg sha512_neon_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-neon",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
@@ -91,7 +90,6 @@ struct shash_alg sha512_neon_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-neon",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 0cd4dccbae78..b17ee03d280b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -460,6 +460,10 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
adds \tmp, \addr, #\size - 1
sbcccs \tmp, \tmp, \limit
bcs \bad
+#ifdef CONFIG_CPU_SPECTRE
+ movcs \addr, #0
+ csdb
+#endif
#endif
.endm
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 66d0e215a773..f74756641410 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -130,7 +130,7 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
@@ -156,6 +156,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return oldval;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -215,15 +216,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
-
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
- c = old;
- return c;
-}
+#define atomic_fetch_andnot atomic_fetch_andnot
#endif /* __LINUX_ARM_ARCH__ */
@@ -254,17 +247,6 @@ ATOMIC_OPS(xor, ^=, eor)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
-#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
long long counter;
@@ -494,12 +476,13 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return result;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- long long val;
+ long long oldval, newval;
unsigned long tmp;
- int ret = 1;
smp_mb();
prefetchw(&v->counter);
@@ -508,33 +491,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
"1: ldrexd %0, %H0, [%4]\n"
" teq %0, %5\n"
" teqeq %H0, %H5\n"
-" moveq %1, #0\n"
" beq 2f\n"
-" adds %Q0, %Q0, %Q6\n"
-" adc %R0, %R0, %R6\n"
-" strexd %2, %0, %H0, [%4]\n"
+" adds %Q1, %Q0, %Q6\n"
+" adc %R1, %R0, %R6\n"
+" strexd %2, %1, %H1, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
- : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
- if (ret)
+ if (oldval != u)
smp_mb();
- return ret;
+ return oldval;
}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 4cab9bb823fb..c92e42a5c8f7 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -215,7 +215,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5
-#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
@@ -223,93 +222,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#else
-static inline int constant_fls(int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-
-/*
- * On ARMv5 and above those functions can be implemented around the
- * clz instruction for much better code efficiency. __clz returns
- * the number of leading zeros, zero input will return 32, and
- * 0x80000000 will return 0.
- */
-static inline unsigned int __clz(unsigned int x)
-{
- unsigned int ret;
-
- asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
-
- return ret;
-}
-
-/*
- * fls() returns zero if the input is zero, otherwise returns the bit
- * position of the last set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int fls(int x)
-{
- if (__builtin_constant_p(x))
- return constant_fls(x);
-
- return 32 - __clz(x);
-}
-
-/*
- * __fls() returns the bit position of the last bit set, where the
- * LSB is 0 and MSB is 31. Zero input is undefined.
- */
-static inline unsigned long __fls(unsigned long x)
-{
- return fls(x) - 1;
-}
-
-/*
- * ffs() returns zero if the input was zero, otherwise returns the bit
- * position of the first set bit, where the LSB is 1 and MSB is 32.
- */
-static inline int ffs(int x)
-{
- return fls(x & -x);
-}
-
/*
- * __ffs() returns the bit position of the first bit set, where the
- * LSB is 0 and MSB is 31. Zero input is undefined.
+ * On ARMv5 and above, the gcc built-ins may rely on the clz instruction
+ * and produce optimal inlined code in all cases. On ARMv7 it is even
+ * better by also using the rbit instruction.
*/
-static inline unsigned long __ffs(unsigned long x)
-{
- return ffs(x) - 1;
-}
-
-#define ffz(x) __ffs( ~(x) )
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/builtin-ffs.h>
#endif
+#include <asm-generic/bitops/ffz.h>
+
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 17f1f1a814ff..38badaae8d9d 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -58,6 +58,9 @@ void efi_virtmap_unload(void);
#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
#define efi_is_64bit() (false)
+#define efi_table_attr(table, attr, instance) \
+ ((table##_t *)instance)->attr
+
#define efi_call_proto(protocol, f, instance, ...) \
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index e46e4e7bdba3..ac54c06764e6 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -111,14 +111,17 @@ static inline void decode_ctrl_reg(u32 reg,
asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
} while (0)
+struct perf_event_attr;
struct notifier_block;
struct perf_event;
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index b6f319606e30..c883fcbe93b6 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -31,11 +31,6 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-#endif
-
#ifdef CONFIG_SMP
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index 59655459da59..82290f212d8e 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -44,8 +44,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct prev_kprobe prev_kprobe;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 6493bd479ddc..fe2fb1ddd771 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -26,13 +26,13 @@
#include <asm/cputype.h>
/* arm64 compatibility macros */
-#define COMPAT_PSR_MODE_ABT ABT_MODE
-#define COMPAT_PSR_MODE_UND UND_MODE
-#define COMPAT_PSR_T_BIT PSR_T_BIT
-#define COMPAT_PSR_I_BIT PSR_I_BIT
-#define COMPAT_PSR_A_BIT PSR_A_BIT
-#define COMPAT_PSR_E_BIT PSR_E_BIT
-#define COMPAT_PSR_IT_MASK PSR_IT_MASK
+#define PSR_AA32_MODE_ABT ABT_MODE
+#define PSR_AA32_MODE_UND UND_MODE
+#define PSR_AA32_T_BIT PSR_T_BIT
+#define PSR_AA32_I_BIT PSR_I_BIT
+#define PSR_AA32_A_BIT PSR_A_BIT
+#define PSR_AA32_E_BIT PSR_E_BIT
+#define PSR_AA32_IT_MASK PSR_IT_MASK
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 5c1ad11aa392..bb8851208e17 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -59,7 +59,7 @@ struct machine_desc {
void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
void (*restart)(enum reboot_mode, const char *);
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 0f79e4dec7f9..4ac3a019a46f 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -13,7 +13,6 @@
extern void timer_tick(void);
typedef void (*clock_access_fn)(struct timespec64 *);
-extern int register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent);
+extern int register_persistent_clock(clock_access_fn read_persistent);
#endif
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
index 1e5b9bb92270..991c9127c650 100644
--- a/arch/arm/include/asm/probes.h
+++ b/arch/arm/include/asm/probes.h
@@ -51,7 +51,6 @@ struct arch_probes_insn {
* We assume one instruction can consume at most 64 bytes stack, which is
* 'push {r0-r15}'. Instructions consume more or unknown stack space like
* 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
- * Both kprobe and jprobe use this macro.
*/
#define MAX_STACK_SIZE 64
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index e71cc35de163..9b37b6ab27fe 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -123,8 +123,8 @@ struct user_vfp_exc;
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *);
-extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- struct user_vfp_exc __user *);
+extern int vfp_restore_user_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
#endif
/*
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index d5562f9ce600..f854148c8d7c 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -292,5 +292,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
{
}
+static inline void tlb_flush_remove_tables(struct mm_struct *mm)
+{
+}
+
+static inline void tlb_flush_remove_tables_local(void *arg)
+{
+}
+
#endif /* CONFIG_MMU */
#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 3d614e90c19f..5451e1f05a19 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -85,6 +85,13 @@ static inline void set_fs(mm_segment_t fs)
flag; })
/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
+/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
* which read from user space (*get_*) need to take care not to leak
@@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *);
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
register typeof(*(p)) __user *__p asm("r0") = (p); \
- register typeof(x) __r2 asm("r2"); \
+ register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
@@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs)
#define user_addr_max() \
(uaccess_kernel() ? ~0UL : get_fs())
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1, it is not worth fixing the non-
+ * verifying accessors, because we need to add verification of the
+ * address space there. Force these to use the standard get_user()
+ * version instead.
+ */
+#define __get_user(x, ptr) get_user(x, ptr)
+#else
+
/*
* The "__xxx" versions of the user access functions do not verify the
* address space - it must have been done previously with a separate
@@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs)
__gu_err; \
})
-#define __get_user_error(x, ptr, err) \
-({ \
- __get_user_err((x), (ptr), err); \
- (void) 0; \
-})
-
#define __get_user_err(x, ptr, err) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
@@ -324,6 +335,7 @@ do { \
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
+#endif
#define __put_user_switch(x, ptr, __err, __fn) \
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 179a9f6bd1e3..e85a3af9ddeb 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -22,7 +22,7 @@
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
-#ifndef CONFIG_MULTI_IRQ_HANDLER
+#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
#include <mach/entry-macro.S>
#endif
#include <asm/thread_notify.h>
@@ -39,7 +39,7 @@
* Interrupt handling.
*/
.macro irq_handler
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
ldr r1, =handle_arch_irq
mov r0, sp
badr lr, 9997f
@@ -1226,9 +1226,3 @@ vector_addrexcptn:
.globl cr_alignment
cr_alignment:
.space 4
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- .globl handle_arch_irq
-handle_arch_irq:
- .space 4
-#endif
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 7a9b86978ee1..ec29de250076 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -53,7 +53,11 @@ ENTRY(stext)
THUMB(1: )
#endif
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install
+#endif
+ @ ensure svc mode and all interrupts masked
+ safe_svcmode_maskall r9
@ and irqs disabled
#if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id
@@ -89,7 +93,11 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install_secondary
+#endif
+ safe_svcmode_maskall r9
+
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
#else
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 629e25152c0d..1d5fbf1d1c67 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -456,14 +456,13 @@ static int get_hbp_len(u8 hbp_len)
/*
* Check whether bp virtual address is in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->ctrl.len);
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -518,42 +517,42 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+ hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->ctrl.type = ARM_BREAKPOINT_LOAD;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->ctrl.type = ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
- info->ctrl.len = ARM_BREAKPOINT_LEN_8;
- if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
+ if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8)
break;
default:
@@ -566,24 +565,24 @@ static int arch_build_bp_info(struct perf_event *bp)
* by the hardware and must be aligned to the appropriate number of
* bytes.
*/
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL;
/* Address */
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/* Privilege */
- info->ctrl.privilege = ARM_BREAKPOINT_USER;
- if (arch_check_bp_in_kernelspace(bp))
- info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
+ hw->ctrl.privilege = ARM_BREAKPOINT_USER;
+ if (arch_check_bp_in_kernelspace(hw))
+ hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
/* Enabled? */
- info->ctrl.enabled = !bp->attr.disabled;
+ hw->ctrl.enabled = !attr->disabled;
/* Mismatch */
- info->ctrl.mismatch = 0;
+ hw->ctrl.mismatch = 0;
return 0;
}
@@ -591,9 +590,10 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings.
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int ret = 0;
u32 offset, alignment_mask = 0x3;
@@ -602,14 +602,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -ENODEV;
/* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
goto out;
/* Check address alignment. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
switch (offset) {
case 0:
/* Aligned */
@@ -617,19 +617,19 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
case 1:
case 2:
/* Allow halfword watchpoints and breakpoints. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
case 3:
/* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
default:
ret = -EINVAL;
goto out;
}
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
if (is_default_overflow_handler(bp)) {
/*
@@ -640,7 +640,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -EINVAL;
/* We don't allow mismatch breakpoints in kernel space. */
- if (arch_check_bp_in_kernelspace(bp))
+ if (arch_check_bp_in_kernelspace(hw))
return -EPERM;
/*
@@ -655,8 +655,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* reports them.
*/
if (!debug_exception_updates_fsr() &&
- (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
- info->ctrl.type == ARM_BREAKPOINT_STORE))
+ (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
+ hw->ctrl.type == ARM_BREAKPOINT_STORE))
return -EINVAL;
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ece04a457486..9908dacf9229 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -102,16 +102,6 @@ void __init init_IRQ(void)
uniphier_cache_init();
}
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- if (handle_arch_irq)
- return;
-
- handle_arch_irq = handle_irq;
-}
-#endif
-
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index be42c4f66a40..1ae99deeec54 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -233,7 +233,7 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
return ret;
}
-static inline u32 armv6pmu_read_counter(struct perf_event *event)
+static inline u64 armv6pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -251,7 +251,7 @@ static inline u32 armv6pmu_read_counter(struct perf_event *event)
return value;
}
-static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
+static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -411,6 +411,12 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}
+static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
static void armv6pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
@@ -491,11 +497,11 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
}
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
@@ -542,11 +548,11 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6mpcore_map_event;
cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 57f01e059f39..a4fb0f8b8f84 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -743,7 +743,7 @@ static inline void armv7_pmnc_select_counter(int idx)
isb();
}
-static inline u32 armv7pmu_read_counter(struct perf_event *event)
+static inline u64 armv7pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -763,7 +763,7 @@ static inline u32 armv7pmu_read_counter(struct perf_event *event)
return value;
}
-static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
+static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -1058,6 +1058,12 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
return -EAGAIN;
}
+static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
@@ -1167,10 +1173,10 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv7pmu_read_counter;
cpu_pmu->write_counter = armv7pmu_write_counter;
cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
- cpu_pmu->max_period = (1LLU << 32) - 1;
};
static void armv7_read_num_pmnc_events(void *info)
@@ -1638,6 +1644,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base);
bool krait_event = EVENT_CPU(hwc->config_base);
+ armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || krait_event) {
bit = krait_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask);
@@ -1967,6 +1974,7 @@ static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base);
bool scorpion_event = EVENT_CPU(hwc->config_base);
+ armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || scorpion_event) {
bit = scorpion_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask);
@@ -2030,6 +2038,7 @@ static struct platform_driver armv7_pmu_driver = {
.driver = {
.name = "armv7-pmu",
.of_match_table = armv7_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
},
.probe = armv7_pmu_device_probe,
};
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 88d1a76f5367..f6cdcacfb96d 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -292,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}
+static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
@@ -316,7 +322,7 @@ static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static inline u32 xscale1pmu_read_counter(struct perf_event *event)
+static inline u64 xscale1pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -337,7 +343,7 @@ static inline u32 xscale1pmu_read_counter(struct perf_event *event)
return val;
}
-static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
+static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -370,11 +376,11 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale1pmu_read_counter;
cpu_pmu->write_counter = xscale1pmu_write_counter;
cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale1pmu_start;
cpu_pmu->stop = xscale1pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
@@ -679,7 +685,7 @@ static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static inline u32 xscale2pmu_read_counter(struct perf_event *event)
+static inline u64 xscale2pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -706,7 +712,7 @@ static inline u32 xscale2pmu_read_counter(struct perf_event *event)
return val;
}
-static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
+static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -739,11 +745,11 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale2pmu_read_counter;
cpu_pmu->write_counter = xscale2pmu_write_counter;
cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale2pmu_start;
cpu_pmu->stop = xscale2pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 5;
- cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 35ca494c028c..4c249cb261f3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1145,7 +1145,7 @@ void __init setup_arch(char **cmdline_p)
reserve_crashkernel();
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index dec130e7078c..b8f766cf3a90 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -150,22 +150,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
static int restore_vfp_context(char __user **auxp)
{
- struct vfp_sigframe __user *frame =
- (struct vfp_sigframe __user *)*auxp;
- unsigned long magic;
- unsigned long size;
- int err = 0;
-
- __get_user_error(magic, &frame->magic, err);
- __get_user_error(size, &frame->size, err);
+ struct vfp_sigframe frame;
+ int err;
+ err = __copy_from_user(&frame, *auxp, sizeof(frame));
if (err)
- return -EFAULT;
- if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ return err;
+
+ if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
return -EINVAL;
- *auxp += size;
- return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
+ *auxp += sizeof(frame);
+ return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
}
#endif
@@ -176,6 +172,7 @@ static int restore_vfp_context(char __user **auxp)
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
+ struct sigcontext context;
char __user *aux;
sigset_t set;
int err;
@@ -184,23 +181,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
if (err == 0)
set_current_blocked(&set);
- __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
- __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
- __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
- __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
- __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
- __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
- __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
- __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
- __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
- __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
- __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
- __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
- __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
- __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
- __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
- __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
- __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+ err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
+ if (err == 0) {
+ regs->ARM_r0 = context.arm_r0;
+ regs->ARM_r1 = context.arm_r1;
+ regs->ARM_r2 = context.arm_r2;
+ regs->ARM_r3 = context.arm_r3;
+ regs->ARM_r4 = context.arm_r4;
+ regs->ARM_r5 = context.arm_r5;
+ regs->ARM_r6 = context.arm_r6;
+ regs->ARM_r7 = context.arm_r7;
+ regs->ARM_r8 = context.arm_r8;
+ regs->ARM_r9 = context.arm_r9;
+ regs->ARM_r10 = context.arm_r10;
+ regs->ARM_fp = context.arm_fp;
+ regs->ARM_ip = context.arm_ip;
+ regs->ARM_sp = context.arm_sp;
+ regs->ARM_lr = context.arm_lr;
+ regs->ARM_pc = context.arm_pc;
+ regs->ARM_cpsr = context.arm_cpsr;
+ }
err |= !valid_user_regs(regs);
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 1df21a61e379..f0dd4b6ebb63 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
- __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
- __get_user_error(sops[i].sem_op, &tsops->sem_op, err);
- __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
+ struct oabi_sembuf osb;
+ err |= __copy_from_user(&osb, tsops, sizeof(osb));
+ sops[i].sem_num = osb.sem_num;
+ sops[i].sem_op = osb.sem_op;
+ sops[i].sem_flg = osb.sem_flg;
tsops++;
}
if (timeout) {
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index cf2701cb0de8..078b259ead4e 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -83,29 +83,18 @@ static void dummy_clock_access(struct timespec64 *ts)
}
static clock_access_fn __read_persistent_clock = dummy_clock_access;
-static clock_access_fn __read_boot_clock = dummy_clock_access;
void read_persistent_clock64(struct timespec64 *ts)
{
__read_persistent_clock(ts);
}
-void read_boot_clock64(struct timespec64 *ts)
-{
- __read_boot_clock(ts);
-}
-
-int __init register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent)
+int __init register_persistent_clock(clock_access_fn read_persistent)
{
/* Only allow the clockaccess functions to be registered once */
- if (__read_persistent_clock == dummy_clock_access &&
- __read_boot_clock == dummy_clock_access) {
- if (read_boot)
- __read_boot_clock = read_boot;
+ if (__read_persistent_clock == dummy_clock_access) {
if (read_persistent)
__read_persistent_clock = read_persistent;
-
return 0;
}
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a4b06049001..a826df3d3814 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -90,6 +90,15 @@
.text
ENTRY(arm_copy_from_user)
+#ifdef CONFIG_CPU_SPECTRE
+ get_thread_info r3
+ ldr r3, [r3, #TI_ADDR_LIMIT]
+ adds ip, r1, r2 @ ip=addr+size
+ sub r3, r3, #1 @ addr_limit - 1
+ cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
+ movcs r1, #0 @ addr = NULL
+ csdb
+#endif
#include "copy_template.S"
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 1254bf9d91b4..903f23c309df 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -27,6 +27,7 @@ config SOC_SAMA5D2
select HAVE_AT91_H32MX
select HAVE_AT91_GENERATED_CLK
select HAVE_AT91_AUDIO_PLL
+ select HAVE_AT91_I2S_MUX_CLK
select PINCTRL_AT91PIO4
help
Select this if ou are using one of Microchip's SAMA5D2 family SoC.
@@ -129,6 +130,9 @@ config HAVE_AT91_GENERATED_CLK
config HAVE_AT91_AUDIO_PLL
bool
+config HAVE_AT91_I2S_MUX_CLK
+ bool
+
config SOC_SAM_V4_V5
bool
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 4ea93c9df77b..7415f181907b 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -19,31 +19,6 @@ ifeq ($(CONFIG_PM_DEBUG),y)
CFLAGS_pm.o += -DDEBUG
endif
-# Default sed regexp - multiline due to syntax constraints
-define sed-y
- "/^->/{s:->#\(.*\):/* \1 */:; \
- s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
- s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
- s:->::; p;}"
-endef
-
-# Use filechk to avoid rebuilds when a header changes, but the resulting file
-# does not
-define filechk_offsets
- (set -e; \
- echo "#ifndef $2"; \
- echo "#define $2"; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was generated by Kbuild"; \
- echo " */"; \
- echo ""; \
- sed -ne $(sed-y); \
- echo ""; \
- echo "#endif" )
-endef
-
arch/arm/mach-at91/pm_data-offsets.s: arch/arm/mach-at91/pm_data-offsets.c
$(call if_changed_dep,cc_s_c)
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index d3db306a5a70..f3384e3a675d 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -272,7 +272,7 @@ static int exynos5420_cpu_suspend(unsigned long arg)
static void exynos_pm_set_wakeup_mask(void)
{
/* Set wake-up mask registers */
- pmu_raw_writel(exynos_get_eint_wake_mask(), S5P_EINT_WAKEUP_MASK);
+ pmu_raw_writel(exynos_get_eint_wake_mask(), EXYNOS_EINT_WAKEUP_MASK);
pmu_raw_writel(exynos_irqwake_intmask & ~(1 << 31), S5P_WAKEUP_MASK);
}
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index f4f8f23bda8c..af46d2182533 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -688,7 +688,6 @@ struct platform_nand_data balloon3_nand_pdata = {
.chip_delay = 50,
},
.ctrl = {
- .hwcontrol = 0,
.dev_ready = balloon3_nand_dev_ready,
.select_chip = balloon3_nand_select_chip,
.cmd_ctrl = balloon3_nand_cmd_ctl,
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index d7c9a8476d57..5a16ea74e28a 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -4,6 +4,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/platform_data/i2c-pxa.h>
@@ -59,16 +60,6 @@ static struct resource pxamci_resources[] = {
.end = IRQ_MMC,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- .start = 21,
- .end = 21,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = 22,
- .end = 22,
- .flags = IORESOURCE_DMA,
- },
};
static u64 pxamci_dmamask = 0xffffffffUL;
@@ -406,16 +397,6 @@ static struct resource pxa_ir_resources[] = {
.end = 0x40700023,
.flags = IORESOURCE_MEM,
},
- [5] = {
- .start = 17,
- .end = 17,
- .flags = IORESOURCE_DMA,
- },
- [6] = {
- .start = 18,
- .end = 18,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa_device_ficp = {
@@ -544,18 +525,6 @@ static struct resource pxa25x_resource_ssp[] = {
.end = IRQ_SSP,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 13,
- .end = 13,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 14,
- .end = 14,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa25x_device_ssp = {
@@ -582,18 +551,6 @@ static struct resource pxa25x_resource_nssp[] = {
.end = IRQ_NSSP,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 15,
- .end = 15,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 16,
- .end = 16,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa25x_device_nssp = {
@@ -620,18 +577,6 @@ static struct resource pxa25x_resource_assp[] = {
.end = IRQ_ASSP,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 23,
- .end = 23,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 24,
- .end = 24,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa25x_device_assp = {
@@ -750,18 +695,6 @@ static struct resource pxa27x_resource_ssp1[] = {
.end = IRQ_SSP,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 13,
- .end = 13,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 14,
- .end = 14,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa27x_device_ssp1 = {
@@ -788,18 +721,6 @@ static struct resource pxa27x_resource_ssp2[] = {
.end = IRQ_SSP2,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 15,
- .end = 15,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 16,
- .end = 16,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa27x_device_ssp2 = {
@@ -826,18 +747,6 @@ static struct resource pxa27x_resource_ssp3[] = {
.end = IRQ_SSP3,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 66,
- .end = 66,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 67,
- .end = 67,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa27x_device_ssp3 = {
@@ -894,16 +803,6 @@ static struct resource pxa3xx_resources_mci2[] = {
.end = IRQ_MMC2,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- .start = 93,
- .end = 93,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = 94,
- .end = 94,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa3xx_device_mci2 = {
@@ -933,16 +832,6 @@ static struct resource pxa3xx_resources_mci3[] = {
.end = IRQ_MMC3,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- .start = 100,
- .end = 100,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = 101,
- .end = 101,
- .flags = IORESOURCE_DMA,
- },
};
struct platform_device pxa3xx_device_mci3 = {
@@ -1020,18 +909,6 @@ static struct resource pxa3xx_resources_nand[] = {
.end = IRQ_NAND,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for Data DMA */
- .start = 97,
- .end = 97,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for Command DMA */
- .start = 99,
- .end = 99,
- .flags = IORESOURCE_DMA,
- },
};
static u64 pxa3xx_nand_dma_mask = DMA_BIT_MASK(32);
@@ -1065,18 +942,6 @@ static struct resource pxa3xx_resource_ssp4[] = {
.end = IRQ_SSP4,
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* DRCMR for RX */
- .start = 2,
- .end = 2,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- /* DRCMR for TX */
- .start = 3,
- .end = 3,
- .flags = IORESOURCE_DMA,
- },
};
/*
@@ -1202,11 +1067,6 @@ void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info)
platform_device_add(pd);
}
-static struct mmp_dma_platdata pxa_dma_pdata = {
- .dma_channels = 0,
- .nb_requestors = 0,
-};
-
static struct resource pxa_dma_resource[] = {
[0] = {
.start = 0x40000000,
@@ -1233,9 +1093,7 @@ static struct platform_device pxa2xx_pxa_dma = {
.resource = pxa_dma_resource,
};
-void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
+void __init pxa2xx_set_dmac_info(struct mmp_dma_platdata *dma_pdata)
{
- pxa_dma_pdata.dma_channels = nb_channels;
- pxa_dma_pdata.nb_requestors = nb_requestors;
- pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
+ pxa_register_device(&pxa2xx_pxa_dma, dma_pdata);
}
diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h
index 11263f7c455b..498b07bc6a3e 100644
--- a/arch/arm/mach-pxa/devices.h
+++ b/arch/arm/mach-pxa/devices.h
@@ -1,4 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#define PDMA_FILTER_PARAM(_prio, _requestor) (&(struct pxad_param) { \
+ .prio = PXAD_PRIO_##_prio, .drcmr = _requestor })
+struct mmp_dma_platdata;
+
extern struct platform_device pxa_device_pmu;
extern struct platform_device pxa_device_mci;
extern struct platform_device pxa3xx_device_mci2;
@@ -55,7 +59,7 @@ extern struct platform_device pxa3xx_device_gpio;
extern struct platform_device pxa93x_device_gpio;
void __init pxa_register_device(struct platform_device *dev, void *data);
-void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors);
+void __init pxa2xx_set_dmac_info(struct mmp_dma_platdata *dma_pdata);
struct i2c_pxa_platform_data;
extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info);
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 49022ad338e9..29be04c6cc48 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -346,7 +346,6 @@ struct platform_nand_data em_x270_nand_platdata = {
.chip_delay = 20,
},
.ctrl = {
- .hwcontrol = 0,
.dev_ready = em_x270_nand_device_ready,
.select_chip = 0,
.cmd_ctrl = em_x270_nand_cmd_ctl,
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index ba431fad5c47..ab8808ce7e21 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -16,6 +16,8 @@
* initialization stuff for PXA machines which can be overridden later if
* need be.
*/
+#include <linux/dmaengine.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/gpio.h>
#include <linux/gpio-pxa.h>
#include <linux/module.h>
@@ -26,6 +28,7 @@
#include <linux/syscore_ops.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/platform_data/mmp_dma.h>
#include <asm/mach/map.h>
#include <asm/suspend.h>
@@ -201,6 +204,39 @@ static struct platform_device *pxa25x_devices[] __initdata = {
&pxa_device_asoc_platform,
};
+static const struct dma_slave_map pxa25x_slave_map[] = {
+ /* PXA25x, PXA27x and PXA3xx common entries */
+ { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
+ PDMA_FILTER_PARAM(LOWEST, 10) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
+ { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
+ { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
+ { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
+ { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
+ { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
+ { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
+ { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
+ { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
+
+ /* PXA25x specific map */
+ { "pxa25x-ssp.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
+ { "pxa25x-ssp.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
+ { "pxa25x-nssp.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
+ { "pxa25x-nssp.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
+ { "pxa25x-nssp.2", "rx", PDMA_FILTER_PARAM(LOWEST, 23) },
+ { "pxa25x-nssp.2", "tx", PDMA_FILTER_PARAM(LOWEST, 24) },
+};
+
+static struct mmp_dma_platdata pxa25x_dma_pdata = {
+ .dma_channels = 16,
+ .nb_requestors = 40,
+ .slave_map = pxa25x_slave_map,
+ .slave_map_cnt = ARRAY_SIZE(pxa25x_slave_map),
+};
+
static int __init pxa25x_init(void)
{
int ret = 0;
@@ -215,7 +251,7 @@ static int __init pxa25x_init(void)
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
if (!of_have_populated_dt()) {
- pxa2xx_set_dmac_info(16, 40);
+ pxa2xx_set_dmac_info(&pxa25x_dma_pdata);
pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
ret = platform_add_devices(pxa25x_devices,
ARRAY_SIZE(pxa25x_devices));
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 0c06f383ad52..5a8990a9313d 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -11,6 +11,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/gpio.h>
#include <linux/gpio-pxa.h>
#include <linux/module.h>
@@ -23,6 +25,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_data/i2c-pxa.h>
+#include <linux/platform_data/mmp_dma.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
@@ -297,6 +300,40 @@ static struct platform_device *devices[] __initdata = {
&pxa27x_device_pwm1,
};
+static const struct dma_slave_map pxa27x_slave_map[] = {
+ /* PXA25x, PXA27x and PXA3xx common entries */
+ { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
+ PDMA_FILTER_PARAM(LOWEST, 10) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
+ { "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
+ { "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
+ { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
+ { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
+ { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
+ { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
+ { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
+ { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
+ { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
+ { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
+
+ /* PXA27x specific map */
+ { "pxa2xx-i2s", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
+ { "pxa2xx-i2s", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
+ { "pxa27x-camera.0", "CI_Y", PDMA_FILTER_PARAM(HIGHEST, 68) },
+ { "pxa27x-camera.0", "CI_U", PDMA_FILTER_PARAM(HIGHEST, 69) },
+ { "pxa27x-camera.0", "CI_V", PDMA_FILTER_PARAM(HIGHEST, 70) },
+};
+
+static struct mmp_dma_platdata pxa27x_dma_pdata = {
+ .dma_channels = 32,
+ .nb_requestors = 75,
+ .slave_map = pxa27x_slave_map,
+ .slave_map_cnt = ARRAY_SIZE(pxa27x_slave_map),
+};
+
static int __init pxa27x_init(void)
{
int ret = 0;
@@ -313,7 +350,7 @@ static int __init pxa27x_init(void)
if (!of_have_populated_dt()) {
pxa_register_device(&pxa27x_device_gpio,
&pxa27x_gpio_info);
- pxa2xx_set_dmac_info(32, 75);
+ pxa2xx_set_dmac_info(&pxa27x_dma_pdata);
ret = platform_add_devices(devices,
ARRAY_SIZE(devices));
}
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 8c64f93b669b..df9c8970adcf 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -12,6 +12,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -24,6 +26,7 @@
#include <linux/of.h>
#include <linux/syscore_ops.h>
#include <linux/platform_data/i2c-pxa.h>
+#include <linux/platform_data/mmp_dma.h>
#include <asm/mach/map.h>
#include <asm/suspend.h>
@@ -421,6 +424,42 @@ static struct platform_device *devices[] __initdata = {
&pxa27x_device_pwm1,
};
+static const struct dma_slave_map pxa3xx_slave_map[] = {
+ /* PXA25x, PXA27x and PXA3xx common entries */
+ { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
+ { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
+ PDMA_FILTER_PARAM(LOWEST, 10) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
+ { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
+ { "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
+ { "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
+ { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
+ { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
+ { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
+ { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
+ { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
+ { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
+ { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
+ { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
+
+ /* PXA3xx specific map */
+ { "pxa-ssp-dai.3", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
+ { "pxa-ssp-dai.3", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
+ { "pxa2xx-mci.1", "rx", PDMA_FILTER_PARAM(LOWEST, 93) },
+ { "pxa2xx-mci.1", "tx", PDMA_FILTER_PARAM(LOWEST, 94) },
+ { "pxa3xx-nand", "data", PDMA_FILTER_PARAM(LOWEST, 97) },
+ { "pxa2xx-mci.2", "rx", PDMA_FILTER_PARAM(LOWEST, 100) },
+ { "pxa2xx-mci.2", "tx", PDMA_FILTER_PARAM(LOWEST, 101) },
+};
+
+static struct mmp_dma_platdata pxa3xx_dma_pdata = {
+ .dma_channels = 32,
+ .nb_requestors = 100,
+ .slave_map = pxa3xx_slave_map,
+ .slave_map_cnt = ARRAY_SIZE(pxa3xx_slave_map),
+};
+
static int __init pxa3xx_init(void)
{
int ret = 0;
@@ -456,7 +495,7 @@ static int __init pxa3xx_init(void)
if (of_have_populated_dt())
return 0;
- pxa2xx_set_dmac_info(32, 100);
+ pxa2xx_set_dmac_info(&pxa3xx_dma_pdata);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
if (ret)
return ret;
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 96a7b6cf459b..b169e580bf82 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -702,7 +702,6 @@ config ARM_THUMBEE
config ARM_VIRT_EXT
bool
- depends on MMU
default y if CPU_V7
help
Enable the kernel to make use of the ARM Virtualization
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index be0fa7e39c26..ba0e786c952e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1151,6 +1151,11 @@ int arm_dma_supported(struct device *dev, u64 mask)
return __dma_supported(dev, mask, false);
}
+static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
+{
+ return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
+}
+
#ifdef CONFIG_ARM_DMA_USE_IOMMU
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
@@ -2296,7 +2301,7 @@ void arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
to_dma_iommu_mapping(dev) = NULL;
- set_dma_ops(dev, NULL);
+ set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
@@ -2357,11 +2362,6 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
-static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
-{
- return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
-}
-
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 5dd6c58d653b..7d67c70bbded 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -53,7 +53,8 @@ static inline bool security_extensions_enabled(void)
{
/* Check CPUID Identification Scheme before ID_PFR1 read */
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
- return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+ return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
+ cpuid_feature_extract(CPUID_EXT_PFR1, 20);
return 0;
}
diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h
index 8015ad434a40..24101925fe64 100644
--- a/arch/arm/mm/tcm.h
+++ b/arch/arm/mm/tcm.h
@@ -11,7 +11,7 @@
void __init tcm_init(void);
#else
/* No TCM support, just blank inlines to be optimized out */
-inline void tcm_init(void)
+static inline void tcm_init(void)
{
}
#endif
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index f6a62ae44a65..25b3ee85066e 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -22,6 +22,7 @@
#include <asm/cacheflush.h>
#include <asm/hwcap.h>
#include <asm/opcodes.h>
+#include <asm/system_info.h>
#include "bpf_jit_32.h"
@@ -47,32 +48,73 @@
* The callee saved registers depends on whether frame pointers are enabled.
* With frame pointers (to be compliant with the ABI):
*
- * high
- * original ARM_SP => +------------------+ \
- * | pc | |
- * current ARM_FP => +------------------+ } callee saved registers
- * |r4-r8,r10,fp,ip,lr| |
- * +------------------+ /
- * low
+ * high
+ * original ARM_SP => +--------------+ \
+ * | pc | |
+ * current ARM_FP => +--------------+ } callee saved registers
+ * |r4-r9,fp,ip,lr| |
+ * +--------------+ /
+ * low
*
* Without frame pointers:
*
- * high
- * original ARM_SP => +------------------+
- * | r4-r8,r10,fp,lr | callee saved registers
- * current ARM_FP => +------------------+
- * low
+ * high
+ * original ARM_SP => +--------------+
+ * | r4-r9,fp,lr | callee saved registers
+ * current ARM_FP => +--------------+
+ * low
*
* When popping registers off the stack at the end of a BPF function, we
* reference them via the current ARM_FP register.
*/
#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
- 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
+ 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
1 << ARM_FP)
#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
-#define STACK_OFFSET(k) (k)
+enum {
+ /* Stack layout - these are offsets from (top of stack - 4) */
+ BPF_R2_HI,
+ BPF_R2_LO,
+ BPF_R3_HI,
+ BPF_R3_LO,
+ BPF_R4_HI,
+ BPF_R4_LO,
+ BPF_R5_HI,
+ BPF_R5_LO,
+ BPF_R7_HI,
+ BPF_R7_LO,
+ BPF_R8_HI,
+ BPF_R8_LO,
+ BPF_R9_HI,
+ BPF_R9_LO,
+ BPF_FP_HI,
+ BPF_FP_LO,
+ BPF_TC_HI,
+ BPF_TC_LO,
+ BPF_AX_HI,
+ BPF_AX_LO,
+ /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
+ * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
+ * BPF_REG_FP and Tail call counts.
+ */
+ BPF_JIT_SCRATCH_REGS,
+};
+
+/*
+ * Negative "register" values indicate the register is stored on the stack
+ * and are the offset from the top of the eBPF JIT scratch space.
+ */
+#define STACK_OFFSET(k) (-4 - (k) * 4)
+#define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4)
+
+#ifdef CONFIG_FRAME_POINTER
+#define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4)
+#else
+#define EBPF_SCRATCH_TO_ARM_FP(x) (x)
+#endif
+
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
@@ -94,35 +136,35 @@
* scratch memory space and we have to build eBPF 64 bit register from those.
*
*/
-static const u8 bpf2a32[][2] = {
+static const s8 bpf2a32[][2] = {
/* return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = {ARM_R1, ARM_R0},
/* arguments from eBPF program to in-kernel function */
[BPF_REG_1] = {ARM_R3, ARM_R2},
/* Stored on stack scratch space */
- [BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)},
- [BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)},
- [BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)},
- [BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)},
+ [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)},
+ [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)},
+ [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)},
+ [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)},
/* callee saved registers that in-kernel function will preserve */
[BPF_REG_6] = {ARM_R5, ARM_R4},
/* Stored on stack scratch space */
- [BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)},
- [BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)},
- [BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)},
+ [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
+ [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
+ [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
/* Read only Frame Pointer to access Stack */
- [BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)},
+ [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
/* Temporary Register for internal BPF JIT, can be used
* for constant blindings and others.
*/
[TMP_REG_1] = {ARM_R7, ARM_R6},
- [TMP_REG_2] = {ARM_R10, ARM_R8},
+ [TMP_REG_2] = {ARM_R9, ARM_R8},
/* Tail call count. Stored on stack scratch space. */
- [TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)},
+ [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)},
/* temporary register for blinding constants.
* Stored on stack scratch space.
*/
- [BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)},
+ [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
};
#define dst_lo dst[1]
@@ -151,6 +193,7 @@ struct jit_ctx {
unsigned int idx;
unsigned int prologue_bytes;
unsigned int epilogue_offset;
+ unsigned int cpu_architecture;
u32 flags;
u32 *offsets;
u32 *target;
@@ -196,9 +239,55 @@ static inline void emit(u32 inst, struct jit_ctx *ctx)
}
/*
+ * This is rather horrid, but necessary to convert an integer constant
+ * to an immediate operand for the opcodes, and be able to detect at
+ * build time whether the constant can't be converted (iow, usable in
+ * BUILD_BUG_ON()).
+ */
+#define imm12val(v, s) (rol32(v, (s)) | (s) << 7)
+#define const_imm8m(x) \
+ ({ int r; \
+ u32 v = (x); \
+ if (!(v & ~0x000000ff)) \
+ r = imm12val(v, 0); \
+ else if (!(v & ~0xc000003f)) \
+ r = imm12val(v, 2); \
+ else if (!(v & ~0xf000000f)) \
+ r = imm12val(v, 4); \
+ else if (!(v & ~0xfc000003)) \
+ r = imm12val(v, 6); \
+ else if (!(v & ~0xff000000)) \
+ r = imm12val(v, 8); \
+ else if (!(v & ~0x3fc00000)) \
+ r = imm12val(v, 10); \
+ else if (!(v & ~0x0ff00000)) \
+ r = imm12val(v, 12); \
+ else if (!(v & ~0x03fc0000)) \
+ r = imm12val(v, 14); \
+ else if (!(v & ~0x00ff0000)) \
+ r = imm12val(v, 16); \
+ else if (!(v & ~0x003fc000)) \
+ r = imm12val(v, 18); \
+ else if (!(v & ~0x000ff000)) \
+ r = imm12val(v, 20); \
+ else if (!(v & ~0x0003fc00)) \
+ r = imm12val(v, 22); \
+ else if (!(v & ~0x0000ff00)) \
+ r = imm12val(v, 24); \
+ else if (!(v & ~0x00003fc0)) \
+ r = imm12val(v, 26); \
+ else if (!(v & ~0x00000ff0)) \
+ r = imm12val(v, 28); \
+ else if (!(v & ~0x000003fc)) \
+ r = imm12val(v, 30); \
+ else \
+ r = -1; \
+ r; })
+
+/*
* Checks if immediate value can be converted to imm12(12 bits) value.
*/
-static int16_t imm8m(u32 x)
+static int imm8m(u32 x)
{
u32 rot;
@@ -208,6 +297,38 @@ static int16_t imm8m(u32 x)
return -1;
}
+#define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x))
+
+static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
+{
+ op |= rt << 12 | rn << 16;
+ if (imm12 >= 0)
+ op |= ARM_INST_LDST__U;
+ else
+ imm12 = -imm12;
+ return op | (imm12 & ARM_INST_LDST__IMM12);
+}
+
+static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
+{
+ op |= rt << 12 | rn << 16;
+ if (imm8 >= 0)
+ op |= ARM_INST_LDST__U;
+ else
+ imm8 = -imm8;
+ return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f);
+}
+
+#define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
+#define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
+#define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
+#define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
+
+#define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
+#define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
+#define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
+#define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
+
/*
* Initializes the JIT space with undefined instructions.
*/
@@ -227,19 +348,10 @@ static void jit_fill_hole(void *area, unsigned int size)
#define STACK_ALIGNMENT 4
#endif
-/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
- * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
- * BPF_REG_FP and Tail call counts.
- */
-#define SCRATCH_SIZE 80
-
/* total stack size used in JITed code */
#define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE)
#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
-/* Get the offset of eBPF REGISTERs stored on scratch space. */
-#define STACK_VAR(off) (STACK_SIZE - off)
-
#if __LINUX_ARM_ARCH__ < 7
static u16 imm_offset(u32 k, struct jit_ctx *ctx)
@@ -355,7 +467,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
{
- const u8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
#if __LINUX_ARM_ARCH__ == 7
if (elf_hwcap & HWCAP_IDIVA) {
@@ -402,44 +514,110 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
}
-/* Checks whether BPF register is on scratch stack space or not. */
-static inline bool is_on_stack(u8 bpf_reg)
+/* Is the translated BPF register on stack? */
+static bool is_stacked(s8 reg)
+{
+ return reg < 0;
+}
+
+/* If a BPF register is on the stack (stk is true), load it to the
+ * supplied temporary register and return the temporary register
+ * for subsequent operations, otherwise just use the CPU register.
+ */
+static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx)
+{
+ if (is_stacked(reg)) {
+ emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
+ reg = tmp;
+ }
+ return reg;
+}
+
+static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp,
+ struct jit_ctx *ctx)
{
- static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5,
- BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT,
- BPF_REG_2, BPF_REG_FP};
- int i, reg_len = sizeof(stack_regs);
-
- for (i = 0 ; i < reg_len ; i++) {
- if (bpf_reg == stack_regs[i])
- return true;
+ if (is_stacked(reg[1])) {
+ if (__LINUX_ARM_ARCH__ >= 6 ||
+ ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
+ emit(ARM_LDRD_I(tmp[1], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+ } else {
+ emit(ARM_LDR_I(tmp[1], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+ emit(ARM_LDR_I(tmp[0], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
+ }
+ reg = tmp;
+ }
+ return reg;
+}
+
+/* If a BPF register is on the stack (stk is true), save the register
+ * back to the stack. If the source register is not the same, then
+ * move it into the correct register.
+ */
+static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx)
+{
+ if (is_stacked(reg))
+ emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
+ else if (reg != src)
+ emit(ARM_MOV_R(reg, src), ctx);
+}
+
+static void arm_bpf_put_reg64(const s8 *reg, const s8 *src,
+ struct jit_ctx *ctx)
+{
+ if (is_stacked(reg[1])) {
+ if (__LINUX_ARM_ARCH__ >= 6 ||
+ ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
+ emit(ARM_STRD_I(src[1], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+ } else {
+ emit(ARM_STR_I(src[1], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+ emit(ARM_STR_I(src[0], ARM_FP,
+ EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
+ }
+ } else {
+ if (reg[1] != src[1])
+ emit(ARM_MOV_R(reg[1], src[1]), ctx);
+ if (reg[0] != src[0])
+ emit(ARM_MOV_R(reg[0], src[0]), ctx);
}
- return false;
}
-static inline void emit_a32_mov_i(const u8 dst, const u32 val,
- bool dstk, struct jit_ctx *ctx)
+static inline void emit_a32_mov_i(const s8 dst, const u32 val,
+ struct jit_ctx *ctx)
{
- const u8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
- if (dstk) {
+ if (is_stacked(dst)) {
emit_mov_i(tmp[1], val, ctx);
- emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx);
+ arm_bpf_put_reg32(dst, tmp[1], ctx);
} else {
emit_mov_i(dst, val, ctx);
}
}
+static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
+{
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
+
+ emit_mov_i(rd[1], (u32)val, ctx);
+ emit_mov_i(rd[0], val >> 32, ctx);
+
+ arm_bpf_put_reg64(dst, rd, ctx);
+}
+
/* Sign extended move */
-static inline void emit_a32_mov_i64(const bool is64, const u8 dst[],
- const u32 val, bool dstk,
- struct jit_ctx *ctx) {
- u32 hi = 0;
+static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
+ const u32 val, struct jit_ctx *ctx) {
+ u64 val64 = val;
if (is64 && (val & (1<<31)))
- hi = (u32)~0;
- emit_a32_mov_i(dst_lo, val, dstk, ctx);
- emit_a32_mov_i(dst_hi, hi, dstk, ctx);
+ val64 |= 0xffffffff00000000ULL;
+ emit_a32_mov_i64(dst, val64, ctx);
}
static inline void emit_a32_add_r(const u8 dst, const u8 src,
@@ -521,75 +699,94 @@ static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
/* ALU operation (32 bit)
* dst = dst (op) src
*/
-static inline void emit_a32_alu_r(const u8 dst, const u8 src,
- bool dstk, bool sstk,
+static inline void emit_a32_alu_r(const s8 dst, const s8 src,
struct jit_ctx *ctx, const bool is64,
const bool hi, const u8 op) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rn = sstk ? tmp[1] : src;
-
- if (sstk)
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx);
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ s8 rn, rd;
+ rn = arm_bpf_get_reg32(src, tmp[1], ctx);
+ rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
/* ALU operation */
- if (dstk) {
- emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
- emit_alu_r(tmp[0], rn, is64, hi, op, ctx);
- emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
- } else {
- emit_alu_r(dst, rn, is64, hi, op, ctx);
- }
+ emit_alu_r(rd, rn, is64, hi, op, ctx);
+ arm_bpf_put_reg32(dst, rd, ctx);
}
/* ALU operation (64 bit) */
-static inline void emit_a32_alu_r64(const bool is64, const u8 dst[],
- const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx,
+static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
+ const s8 src[], struct jit_ctx *ctx,
const u8 op) {
- emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op);
- if (is64)
- emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op);
- else
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
+ if (is64) {
+ const s8 *rs;
+
+ rs = arm_bpf_get_reg64(src, tmp2, ctx);
+
+ /* ALU operation */
+ emit_alu_r(rd[1], rs[1], true, false, op, ctx);
+ emit_alu_r(rd[0], rs[0], true, true, op, ctx);
+ } else {
+ s8 rs;
+
+ rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+
+ /* ALU operation */
+ emit_alu_r(rd[1], rs, true, false, op, ctx);
+ emit_a32_mov_i(rd[0], 0, ctx);
+ }
+
+ arm_bpf_put_reg64(dst, rd, ctx);
}
-/* dst = imm (4 bytes)*/
-static inline void emit_a32_mov_r(const u8 dst, const u8 src,
- bool dstk, bool sstk,
+/* dst = src (4 bytes)*/
+static inline void emit_a32_mov_r(const s8 dst, const s8 src,
struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rt = sstk ? tmp[0] : src;
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ s8 rt;
- if (sstk)
- emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx);
- if (dstk)
- emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx);
- else
- emit(ARM_MOV_R(dst, rt), ctx);
+ rt = arm_bpf_get_reg32(src, tmp[0], ctx);
+ arm_bpf_put_reg32(dst, rt, ctx);
}
/* dst = src */
-static inline void emit_a32_mov_r64(const bool is64, const u8 dst[],
- const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx);
- if (is64) {
+static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
+ const s8 src[],
+ struct jit_ctx *ctx) {
+ if (!is64) {
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
+ /* Zero out high 4 bytes */
+ emit_a32_mov_i(dst_hi, 0, ctx);
+ } else if (__LINUX_ARM_ARCH__ < 6 &&
+ ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
/* complete 8 byte move */
- emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
+ emit_a32_mov_r(dst_hi, src_hi, ctx);
+ } else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+
+ emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
+ emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
+ } else if (is_stacked(src_lo)) {
+ emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
+ } else if (is_stacked(dst_lo)) {
+ emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
} else {
- /* Zero out high 4 bytes */
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ emit(ARM_MOV_R(dst[0], src[0]), ctx);
+ emit(ARM_MOV_R(dst[1], src[1]), ctx);
}
}
/* Shift operations */
-static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk,
+static inline void emit_a32_alu_i(const s8 dst, const u32 val,
struct jit_ctx *ctx, const u8 op) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rd = dstk ? tmp[0] : dst;
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ s8 rd;
- if (dstk)
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+ rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
/* Do shift operation */
switch (op) {
@@ -604,303 +801,245 @@ static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk,
break;
}
- if (dstk)
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+ arm_bpf_put_reg32(dst, rd, ctx);
}
/* dst = ~dst (64 bit) */
-static inline void emit_a32_neg64(const u8 dst[], bool dstk,
+static inline void emit_a32_neg64(const s8 dst[],
struct jit_ctx *ctx){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rd = dstk ? tmp[1] : dst[1];
- u8 rm = dstk ? tmp[0] : dst[0];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *rd;
/* Setup Operand */
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do Negate Operation */
- emit(ARM_RSBS_I(rd, rd, 0), ctx);
- emit(ARM_RSC_I(rm, rm, 0), ctx);
+ emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx);
+ emit(ARM_RSC_I(rd[0], rd[0], 0), ctx);
- if (dstk) {
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
}
/* dst = dst << src */
-static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[],
+ struct jit_ctx *ctx) {
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+ s8 rt;
/* Setup Operands */
- u8 rt = sstk ? tmp2[1] : src_lo;
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
-
- if (sstk)
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do LSH operation */
emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
- emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
- emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
- emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx);
-
- if (dstk) {
- emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
- } else {
- emit(ARM_MOV_R(rd, ARM_LR), ctx);
- emit(ARM_MOV_R(rm, ARM_IP), ctx);
- }
+ emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx);
+ emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx);
+ emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
+
+ arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
+ arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
}
/* dst = dst >> src (signed)*/
-static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[],
+ struct jit_ctx *ctx) {
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+ s8 rt;
+
/* Setup Operands */
- u8 rt = sstk ? tmp2[1] : src_lo;
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
-
- if (sstk)
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do the ARSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
- emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
+ emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
_emit(ARM_COND_MI, ARM_B(0), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx);
- emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx);
- if (dstk) {
- emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
- } else {
- emit(ARM_MOV_R(rd, ARM_LR), ctx);
- emit(ARM_MOV_R(rm, ARM_IP), ctx);
- }
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx);
+ emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
+
+ arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
+ arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
}
/* dst = dst >> src */
-static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[],
+ struct jit_ctx *ctx) {
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+ s8 rt;
+
/* Setup Operands */
- u8 rt = sstk ? tmp2[1] : src_lo;
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
-
- if (sstk)
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do RSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
- emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
- emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
- emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx);
- if (dstk) {
- emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
- } else {
- emit(ARM_MOV_R(rd, ARM_LR), ctx);
- emit(ARM_MOV_R(rm, ARM_IP), ctx);
- }
+ emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx);
+ emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
+
+ arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
+ arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
}
/* dst = dst << val */
-static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk,
- const u32 val, struct jit_ctx *ctx){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
- /* Setup operands */
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
+static inline void emit_a32_lsh_i64(const s8 dst[],
+ const u32 val, struct jit_ctx *ctx){
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ /* Setup operands */
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do LSH operation */
if (val < 32) {
- emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx);
- emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx);
- emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx);
+ emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx);
+ emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx);
+ emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx);
} else {
if (val == 32)
- emit(ARM_MOV_R(rm, rd), ctx);
+ emit(ARM_MOV_R(rd[0], rd[1]), ctx);
else
- emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx);
- emit(ARM_EOR_R(rd, rd, rd), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx);
+ emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx);
}
- if (dstk) {
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
}
/* dst = dst >> val */
-static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk,
+static inline void emit_a32_rsh_i64(const s8 dst[],
const u32 val, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
- /* Setup operands */
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ /* Setup operands */
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do LSR operation */
if (val < 32) {
- emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
- emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
- emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx);
+ emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
+ emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
} else if (val == 32) {
- emit(ARM_MOV_R(rd, rm), ctx);
- emit(ARM_MOV_I(rm, 0), ctx);
+ emit(ARM_MOV_R(rd[1], rd[0]), ctx);
+ emit(ARM_MOV_I(rd[0], 0), ctx);
} else {
- emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx);
- emit(ARM_MOV_I(rm, 0), ctx);
+ emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx);
+ emit(ARM_MOV_I(rd[0], 0), ctx);
}
- if (dstk) {
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
}
/* dst = dst >> val (signed) */
-static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk,
+static inline void emit_a32_arsh_i64(const s8 dst[],
const u32 val, struct jit_ctx *ctx){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
- /* Setup operands */
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
-
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd;
+
+ /* Setup operands */
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Do ARSH operation */
if (val < 32) {
- emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
- emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
- emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx);
+ emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
+ emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
} else if (val == 32) {
- emit(ARM_MOV_R(rd, rm), ctx);
- emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
+ emit(ARM_MOV_R(rd[1], rd[0]), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
} else {
- emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx);
- emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
+ emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx);
+ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
}
- if (dstk) {
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
}
-static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
- bool sstk, struct jit_ctx *ctx) {
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
+ struct jit_ctx *ctx) {
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rd, *rt;
+
/* Setup operands for multiplication */
- u8 rd = dstk ? tmp[1] : dst_lo;
- u8 rm = dstk ? tmp[0] : dst_hi;
- u8 rt = sstk ? tmp2[1] : src_lo;
- u8 rn = sstk ? tmp2[0] : src_hi;
-
- if (dstk) {
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
- if (sstk) {
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx);
- }
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
+ rt = arm_bpf_get_reg64(src, tmp2, ctx);
/* Do Multiplication */
- emit(ARM_MUL(ARM_IP, rd, rn), ctx);
- emit(ARM_MUL(ARM_LR, rm, rt), ctx);
+ emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
+ emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
- emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
- emit(ARM_ADD_R(rm, ARM_LR, rm), ctx);
- if (dstk) {
- emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
- } else {
- emit(ARM_MOV_R(rd, ARM_IP), ctx);
- }
+ emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
+ emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx);
+
+ arm_bpf_put_reg32(dst_lo, ARM_IP, ctx);
+ arm_bpf_put_reg32(dst_hi, rd[0], ctx);
}
/* *(size *)(dst + off) = src */
-static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
- const s32 off, struct jit_ctx *ctx, const u8 sz){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rd = dstk ? tmp[1] : dst;
-
- if (dstk)
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
- if (off) {
- emit_a32_mov_i(tmp[0], off, false, ctx);
- emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx);
+static inline void emit_str_r(const s8 dst, const s8 src[],
+ s32 off, struct jit_ctx *ctx, const u8 sz){
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ s32 off_max;
+ s8 rd;
+
+ rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
+
+ if (sz == BPF_H)
+ off_max = 0xff;
+ else
+ off_max = 0xfff;
+
+ if (off < 0 || off > off_max) {
+ emit_a32_mov_i(tmp[0], off, ctx);
+ emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
rd = tmp[0];
+ off = 0;
}
switch (sz) {
- case BPF_W:
- /* Store a Word */
- emit(ARM_STR_I(src, rd, 0), ctx);
+ case BPF_B:
+ /* Store a Byte */
+ emit(ARM_STRB_I(src_lo, rd, off), ctx);
break;
case BPF_H:
/* Store a HalfWord */
- emit(ARM_STRH_I(src, rd, 0), ctx);
+ emit(ARM_STRH_I(src_lo, rd, off), ctx);
break;
- case BPF_B:
- /* Store a Byte */
- emit(ARM_STRB_I(src, rd, 0), ctx);
+ case BPF_W:
+ /* Store a Word */
+ emit(ARM_STR_I(src_lo, rd, off), ctx);
+ break;
+ case BPF_DW:
+ /* Store a Double Word */
+ emit(ARM_STR_I(src_lo, rd, off), ctx);
+ emit(ARM_STR_I(src_hi, rd, off + 4), ctx);
break;
}
}
/* dst = *(size*)(src + off) */
-static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
+static inline void emit_ldx_r(const s8 dst[], const s8 src,
s32 off, struct jit_ctx *ctx, const u8 sz){
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *rd = dstk ? tmp : dst;
- u8 rm = src;
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
+ s8 rm = src;
s32 off_max;
if (sz == BPF_H)
@@ -909,7 +1048,7 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
off_max = 0xfff;
if (off < 0 || off > off_max) {
- emit_a32_mov_i(tmp[0], off, false, ctx);
+ emit_a32_mov_i(tmp[0], off, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
rm = tmp[0];
off = 0;
@@ -921,17 +1060,17 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
case BPF_B:
/* Load a Byte */
emit(ARM_LDRB_I(rd[1], rm, off), ctx);
- emit_a32_mov_i(dst[0], 0, dstk, ctx);
+ emit_a32_mov_i(rd[0], 0, ctx);
break;
case BPF_H:
/* Load a HalfWord */
emit(ARM_LDRH_I(rd[1], rm, off), ctx);
- emit_a32_mov_i(dst[0], 0, dstk, ctx);
+ emit_a32_mov_i(rd[0], 0, ctx);
break;
case BPF_W:
/* Load a Word */
emit(ARM_LDR_I(rd[1], rm, off), ctx);
- emit_a32_mov_i(dst[0], 0, dstk, ctx);
+ emit_a32_mov_i(rd[0], 0, ctx);
break;
case BPF_DW:
/* Load a Double Word */
@@ -939,10 +1078,7 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
break;
}
- if (dstk)
- emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
- if (dstk && sz == BPF_DW)
- emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
+ arm_bpf_put_reg64(dst, rd, ctx);
}
/* Arithmatic Operation */
@@ -981,64 +1117,66 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
{
/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
- const u8 *r2 = bpf2a32[BPF_REG_2];
- const u8 *r3 = bpf2a32[BPF_REG_3];
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
- const u8 *tcc = bpf2a32[TCALL_CNT];
+ const s8 *r2 = bpf2a32[BPF_REG_2];
+ const s8 *r3 = bpf2a32[BPF_REG_3];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *tcc = bpf2a32[TCALL_CNT];
+ const s8 *tc;
const int idx0 = ctx->idx;
#define cur_offset (ctx->idx - idx0)
#define jmp_offset (out_offset - (cur_offset) - 2)
- u32 off, lo, hi;
+ u32 lo, hi;
+ s8 r_array, r_index;
+ int off;
/* if (index >= array->map.max_entries)
* goto out;
*/
+ BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
+ ARM_INST_LDST__IMM12);
off = offsetof(struct bpf_array, map.max_entries);
- /* array->map.max_entries */
- emit_a32_mov_i(tmp[1], off, false, ctx);
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
- emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
+ r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx);
/* index is 32-bit for arrays */
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
+ r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx);
+ /* array->map.max_entries */
+ emit(ARM_LDR_I(tmp[1], r_array, off), ctx);
/* index >= array->map.max_entries */
- emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
+ emit(ARM_CMP_R(r_index, tmp[1]), ctx);
_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
+ /* tmp2[0] = array, tmp2[1] = index */
+
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
* tail_call_cnt++;
*/
lo = (u32)MAX_TAIL_CALL_CNT;
hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
- emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
- emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
- emit(ARM_CMP_I(tmp[0], hi), ctx);
- _emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx);
+ tc = arm_bpf_get_reg64(tcc, tmp, ctx);
+ emit(ARM_CMP_I(tc[0], hi), ctx);
+ _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx);
_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
- emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx);
- emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx);
- emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
- emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
+ emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx);
+ emit(ARM_ADC_I(tc[0], tc[0], 0), ctx);
+ arm_bpf_put_reg64(tcc, tmp, ctx);
/* prog = array->ptrs[index]
* if (prog == NULL)
* goto out;
*/
- off = offsetof(struct bpf_array, ptrs);
- emit_a32_mov_i(tmp[1], off, false, ctx);
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
- emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx);
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
- emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx);
- emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx);
+ BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0);
+ off = imm8m(offsetof(struct bpf_array, ptrs));
+ emit(ARM_ADD_I(tmp[1], r_array, off), ctx);
+ emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx);
emit(ARM_CMP_I(tmp[1], 0), ctx);
_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
/* goto *(prog->bpf_func + prologue_size); */
+ BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) >
+ ARM_INST_LDST__IMM12);
off = offsetof(struct bpf_prog, bpf_func);
- emit_a32_mov_i(tmp2[1], off, false, ctx);
- emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
+ emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx);
emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
emit_bx_r(tmp[1], ctx);
@@ -1059,7 +1197,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
{
#if __LINUX_ARM_ARCH__ < 6
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
@@ -1074,7 +1212,7 @@ static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
{
#if __LINUX_ARM_ARCH__ < 6
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
@@ -1094,28 +1232,27 @@ static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
}
// push the scratch stack register on top of the stack
-static inline void emit_push_r64(const u8 src[], const u8 shift,
- struct jit_ctx *ctx)
+static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx)
{
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *rt;
u16 reg_set = 0;
- emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx);
- emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx);
+ rt = arm_bpf_get_reg64(src, tmp2, ctx);
- reg_set = (1 << tmp2[1]) | (1 << tmp2[0]);
+ reg_set = (1 << rt[1]) | (1 << rt[0]);
emit(ARM_PUSH(reg_set), ctx);
}
static void build_prologue(struct jit_ctx *ctx)
{
- const u8 r0 = bpf2a32[BPF_REG_0][1];
- const u8 r2 = bpf2a32[BPF_REG_1][1];
- const u8 r3 = bpf2a32[BPF_REG_1][0];
- const u8 r4 = bpf2a32[BPF_REG_6][1];
- const u8 fplo = bpf2a32[BPF_REG_FP][1];
- const u8 fphi = bpf2a32[BPF_REG_FP][0];
- const u8 *tcc = bpf2a32[TCALL_CNT];
+ const s8 r0 = bpf2a32[BPF_REG_0][1];
+ const s8 r2 = bpf2a32[BPF_REG_1][1];
+ const s8 r3 = bpf2a32[BPF_REG_1][0];
+ const s8 r4 = bpf2a32[BPF_REG_6][1];
+ const s8 fplo = bpf2a32[BPF_REG_FP][1];
+ const s8 fphi = bpf2a32[BPF_REG_FP][0];
+ const s8 *tcc = bpf2a32[TCALL_CNT];
/* Save callee saved registers. */
#ifdef CONFIG_FRAME_POINTER
@@ -1136,8 +1273,8 @@ static void build_prologue(struct jit_ctx *ctx)
emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
/* Set up BPF prog stack base register */
- emit_a32_mov_r(fplo, ARM_IP, true, false, ctx);
- emit_a32_mov_i(fphi, 0, true, ctx);
+ emit_a32_mov_r(fplo, ARM_IP, ctx);
+ emit_a32_mov_i(fphi, 0, ctx);
/* mov r4, 0 */
emit(ARM_MOV_I(r4, 0), ctx);
@@ -1146,8 +1283,8 @@ static void build_prologue(struct jit_ctx *ctx)
emit(ARM_MOV_R(r3, r4), ctx);
emit(ARM_MOV_R(r2, r0), ctx);
/* Initialize Tail Count */
- emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx);
- emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx);
+ emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[0])), ctx);
+ emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[1])), ctx);
/* end of prologue */
}
@@ -1178,17 +1315,16 @@ static void build_epilogue(struct jit_ctx *ctx)
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
const u8 code = insn->code;
- const u8 *dst = bpf2a32[insn->dst_reg];
- const u8 *src = bpf2a32[insn->src_reg];
- const u8 *tmp = bpf2a32[TMP_REG_1];
- const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s8 *dst = bpf2a32[insn->dst_reg];
+ const s8 *src = bpf2a32[insn->src_reg];
+ const s8 *tmp = bpf2a32[TMP_REG_1];
+ const s8 *tmp2 = bpf2a32[TMP_REG_2];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
const bool is64 = BPF_CLASS(code) == BPF_ALU64;
- const bool dstk = is_on_stack(insn->dst_reg);
- const bool sstk = is_on_stack(insn->src_reg);
- u8 rd, rt, rm, rn;
+ const s8 *rd, *rs;
+ s8 rd_lo, rt, rm, rn;
s32 jmp_offset;
#define check_imm(bits, imm) do { \
@@ -1211,11 +1347,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU64 | BPF_MOV | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
- emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx);
+ emit_a32_mov_r64(is64, dst, src, ctx);
break;
case BPF_K:
/* Sign-extend immediate value to destination reg */
- emit_a32_mov_i64(is64, dst, imm, dstk, ctx);
+ emit_a32_mov_se_i64(is64, dst, imm, ctx);
break;
}
break;
@@ -1255,8 +1391,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU64 | BPF_XOR | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
- emit_a32_alu_r64(is64, dst, src, dstk, sstk,
- ctx, BPF_OP(code));
+ emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code));
break;
case BPF_K:
/* Move immediate value to the temporary register
@@ -1265,9 +1400,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
* value into temporary reg and then it would be
* safe to do the operation on it.
*/
- emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
- emit_a32_alu_r64(is64, dst, tmp2, dstk, false,
- ctx, BPF_OP(code));
+ emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
+ emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
break;
}
break;
@@ -1277,26 +1411,22 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU | BPF_MOD | BPF_X:
- rt = src_lo;
- rd = dstk ? tmp2[1] : dst_lo;
- if (dstk)
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx);
switch (BPF_SRC(code)) {
case BPF_X:
- rt = sstk ? tmp2[0] : rt;
- if (sstk)
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)),
- ctx);
+ rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
break;
case BPF_K:
rt = tmp2[0];
- emit_a32_mov_i(rt, imm, false, ctx);
+ emit_a32_mov_i(rt, imm, ctx);
+ break;
+ default:
+ rt = src_lo;
break;
}
- emit_udivmod(rd, rd, rt, ctx, BPF_OP(code));
- if (dstk)
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
+ arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
+ emit_a32_mov_i(dst_hi, 0, ctx);
break;
case BPF_ALU64 | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_X:
@@ -1310,54 +1440,54 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
if (unlikely(imm > 31))
return -EINVAL;
if (imm)
- emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code));
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code));
+ emit_a32_mov_i(dst_hi, 0, ctx);
break;
/* dst = dst << imm */
case BPF_ALU64 | BPF_LSH | BPF_K:
if (unlikely(imm > 63))
return -EINVAL;
- emit_a32_lsh_i64(dst, dstk, imm, ctx);
+ emit_a32_lsh_i64(dst, imm, ctx);
break;
/* dst = dst >> imm */
case BPF_ALU64 | BPF_RSH | BPF_K:
if (unlikely(imm > 63))
return -EINVAL;
- emit_a32_rsh_i64(dst, dstk, imm, ctx);
+ emit_a32_rsh_i64(dst, imm, ctx);
break;
/* dst = dst << src */
case BPF_ALU64 | BPF_LSH | BPF_X:
- emit_a32_lsh_r64(dst, src, dstk, sstk, ctx);
+ emit_a32_lsh_r64(dst, src, ctx);
break;
/* dst = dst >> src */
case BPF_ALU64 | BPF_RSH | BPF_X:
- emit_a32_rsh_r64(dst, src, dstk, sstk, ctx);
+ emit_a32_rsh_r64(dst, src, ctx);
break;
/* dst = dst >> src (signed) */
case BPF_ALU64 | BPF_ARSH | BPF_X:
- emit_a32_arsh_r64(dst, src, dstk, sstk, ctx);
+ emit_a32_arsh_r64(dst, src, ctx);
break;
/* dst = dst >> imm (signed) */
case BPF_ALU64 | BPF_ARSH | BPF_K:
if (unlikely(imm > 63))
return -EINVAL;
- emit_a32_arsh_i64(dst, dstk, imm, ctx);
+ emit_a32_arsh_i64(dst, imm, ctx);
break;
/* dst = ~dst */
case BPF_ALU | BPF_NEG:
- emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code));
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code));
+ emit_a32_mov_i(dst_hi, 0, ctx);
break;
/* dst = ~dst (64 bit) */
case BPF_ALU64 | BPF_NEG:
- emit_a32_neg64(dst, dstk, ctx);
+ emit_a32_neg64(dst, ctx);
break;
/* dst = dst * src/imm */
case BPF_ALU64 | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_K:
switch (BPF_SRC(code)) {
case BPF_X:
- emit_a32_mul_r64(dst, src, dstk, sstk, ctx);
+ emit_a32_mul_r64(dst, src, ctx);
break;
case BPF_K:
/* Move immediate value to the temporary register
@@ -1366,8 +1496,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
* reg then it would be safe to do the operation
* on it.
*/
- emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
- emit_a32_mul_r64(dst, tmp2, dstk, false, ctx);
+ emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
+ emit_a32_mul_r64(dst, tmp2, ctx);
break;
}
break;
@@ -1375,25 +1505,20 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
/* dst = htobe(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
case BPF_ALU | BPF_END | BPF_FROM_BE:
- rd = dstk ? tmp[0] : dst_hi;
- rt = dstk ? tmp[1] : dst_lo;
- if (dstk) {
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
if (BPF_SRC(code) == BPF_FROM_LE)
goto emit_bswap_uxt;
switch (imm) {
case 16:
- emit_rev16(rt, rt, ctx);
+ emit_rev16(rd[1], rd[1], ctx);
goto emit_bswap_uxt;
case 32:
- emit_rev32(rt, rt, ctx);
+ emit_rev32(rd[1], rd[1], ctx);
goto emit_bswap_uxt;
case 64:
- emit_rev32(ARM_LR, rt, ctx);
- emit_rev32(rt, rd, ctx);
- emit(ARM_MOV_R(rd, ARM_LR), ctx);
+ emit_rev32(ARM_LR, rd[1], ctx);
+ emit_rev32(rd[1], rd[0], ctx);
+ emit(ARM_MOV_R(rd[0], ARM_LR), ctx);
break;
}
goto exit;
@@ -1402,36 +1527,30 @@ emit_bswap_uxt:
case 16:
/* zero-extend 16 bits into 64 bits */
#if __LINUX_ARM_ARCH__ < 6
- emit_a32_mov_i(tmp2[1], 0xffff, false, ctx);
- emit(ARM_AND_R(rt, rt, tmp2[1]), ctx);
+ emit_a32_mov_i(tmp2[1], 0xffff, ctx);
+ emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx);
#else /* ARMv6+ */
- emit(ARM_UXTH(rt, rt), ctx);
+ emit(ARM_UXTH(rd[1], rd[1]), ctx);
#endif
- emit(ARM_EOR_R(rd, rd, rd), ctx);
+ emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
break;
case 32:
/* zero-extend 32 bits into 64 bits */
- emit(ARM_EOR_R(rd, rd, rd), ctx);
+ emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
break;
case 64:
/* nop */
break;
}
exit:
- if (dstk) {
- emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ arm_bpf_put_reg64(dst, rd, ctx);
break;
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW:
{
- const struct bpf_insn insn1 = insn[1];
- u32 hi, lo = imm;
+ u64 val = (u32)imm | (u64)insn[1].imm << 32;
- hi = insn1.imm;
- emit_a32_mov_i(dst_lo, lo, dstk, ctx);
- emit_a32_mov_i(dst_hi, hi, dstk, ctx);
+ emit_a32_mov_i64(dst, val, ctx);
return 1;
}
@@ -1440,10 +1559,8 @@ exit:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_DW:
- rn = sstk ? tmp2[1] : src_lo;
- if (sstk)
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
- emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
+ rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+ emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
@@ -1453,18 +1570,15 @@ exit:
switch (BPF_SIZE(code)) {
case BPF_DW:
/* Sign-extend immediate value into temp reg */
- emit_a32_mov_i64(true, tmp2, imm, false, ctx);
- emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W);
- emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W);
+ emit_a32_mov_se_i64(true, tmp2, imm, ctx);
break;
case BPF_W:
case BPF_H:
case BPF_B:
- emit_a32_mov_i(tmp2[1], imm, false, ctx);
- emit_str_r(dst_lo, tmp2[1], dstk, off, ctx,
- BPF_SIZE(code));
+ emit_a32_mov_i(tmp2[1], imm, ctx);
break;
}
+ emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
break;
/* STX XADD: lock *(u32 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_W:
@@ -1476,25 +1590,9 @@ exit:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_DW:
- {
- u8 sz = BPF_SIZE(code);
-
- rn = sstk ? tmp2[1] : src_lo;
- rm = sstk ? tmp2[0] : src_hi;
- if (sstk) {
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
- }
-
- /* Store the value */
- if (BPF_SIZE(code) == BPF_DW) {
- emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W);
- emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W);
- } else {
- emit_str_r(dst_lo, rn, dstk, off, ctx, sz);
- }
+ rs = arm_bpf_get_reg64(src, tmp2, ctx);
+ emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code));
break;
- }
/* PC += off if dst == src */
/* PC += off if dst > src */
/* PC += off if dst >= src */
@@ -1518,12 +1616,8 @@ exit:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
/* Setup source registers */
- rm = sstk ? tmp2[0] : src_hi;
- rn = sstk ? tmp2[1] : src_lo;
- if (sstk) {
- emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
- emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
- }
+ rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
+ rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
goto go_jmp;
/* PC += off if dst == imm */
/* PC += off if dst > imm */
@@ -1552,18 +1646,13 @@ exit:
rm = tmp2[0];
rn = tmp2[1];
/* Sign-extend immediate value */
- emit_a32_mov_i64(true, tmp2, imm, false, ctx);
+ emit_a32_mov_se_i64(true, tmp2, imm, ctx);
go_jmp:
/* Setup destination register */
- rd = dstk ? tmp[0] : dst_hi;
- rt = dstk ? tmp[1] : dst_lo;
- if (dstk) {
- emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
- emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
- }
+ rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Check for the condition */
- emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code));
+ emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code));
/* Setup JUMP instruction */
jmp_offset = bpf2a32_offset(i+off, i, ctx);
@@ -1619,21 +1708,21 @@ go_jmp:
/* function call */
case BPF_JMP | BPF_CALL:
{
- const u8 *r0 = bpf2a32[BPF_REG_0];
- const u8 *r1 = bpf2a32[BPF_REG_1];
- const u8 *r2 = bpf2a32[BPF_REG_2];
- const u8 *r3 = bpf2a32[BPF_REG_3];
- const u8 *r4 = bpf2a32[BPF_REG_4];
- const u8 *r5 = bpf2a32[BPF_REG_5];
+ const s8 *r0 = bpf2a32[BPF_REG_0];
+ const s8 *r1 = bpf2a32[BPF_REG_1];
+ const s8 *r2 = bpf2a32[BPF_REG_2];
+ const s8 *r3 = bpf2a32[BPF_REG_3];
+ const s8 *r4 = bpf2a32[BPF_REG_4];
+ const s8 *r5 = bpf2a32[BPF_REG_5];
const u32 func = (u32)__bpf_call_base + (u32)imm;
- emit_a32_mov_r64(true, r0, r1, false, false, ctx);
- emit_a32_mov_r64(true, r1, r2, false, true, ctx);
- emit_push_r64(r5, 0, ctx);
- emit_push_r64(r4, 8, ctx);
- emit_push_r64(r3, 16, ctx);
+ emit_a32_mov_r64(true, r0, r1, ctx);
+ emit_a32_mov_r64(true, r1, r2, ctx);
+ emit_push_r64(r5, ctx);
+ emit_push_r64(r4, ctx);
+ emit_push_r64(r3, ctx);
- emit_a32_mov_i(tmp[1], func, false, ctx);
+ emit_a32_mov_i(tmp[1], func, ctx);
emit_blx_r(tmp[1], ctx);
emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
@@ -1745,6 +1834,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
+ ctx.cpu_architecture = cpu_architecture();
/* Not able to allocate memory for offsets[] , then
* we must fall back to the interpreter
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index d5cf5f6208aa..f4e58bcdaa43 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -77,11 +77,14 @@
#define ARM_INST_EOR_R 0x00200000
#define ARM_INST_EOR_I 0x02200000
-#define ARM_INST_LDRB_I 0x05d00000
+#define ARM_INST_LDST__U 0x00800000
+#define ARM_INST_LDST__IMM12 0x00000fff
+#define ARM_INST_LDRB_I 0x05500000
#define ARM_INST_LDRB_R 0x07d00000
-#define ARM_INST_LDRH_I 0x01d000b0
+#define ARM_INST_LDRD_I 0x014000d0
+#define ARM_INST_LDRH_I 0x015000b0
#define ARM_INST_LDRH_R 0x019000b0
-#define ARM_INST_LDR_I 0x05900000
+#define ARM_INST_LDR_I 0x05100000
#define ARM_INST_LDR_R 0x07900000
#define ARM_INST_LDM 0x08900000
@@ -124,9 +127,10 @@
#define ARM_INST_SBC_R 0x00c00000
#define ARM_INST_SBCS_R 0x00d00000
-#define ARM_INST_STR_I 0x05800000
-#define ARM_INST_STRB_I 0x05c00000
-#define ARM_INST_STRH_I 0x01c000b0
+#define ARM_INST_STR_I 0x05000000
+#define ARM_INST_STRB_I 0x05400000
+#define ARM_INST_STRD_I 0x014000f0
+#define ARM_INST_STRH_I 0x014000b0
#define ARM_INST_TST_R 0x01100000
#define ARM_INST_TST_I 0x03100000
@@ -183,17 +187,18 @@
#define ARM_EOR_R(rd, rn, rm) _AL3_R(ARM_INST_EOR, rd, rn, rm)
#define ARM_EOR_I(rd, rn, imm) _AL3_I(ARM_INST_EOR, rd, rn, imm)
-#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
- | ((off) & 0xfff))
-#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | (rt) << 12 | (rn) << 16 \
+#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | ARM_INST_LDST__U \
+ | (rt) << 12 | (rn) << 16 \
| (rm))
-#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
- | (off))
-#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \
+#define ARM_LDR_R_SI(rt, rn, rm, type, imm) \
+ (ARM_INST_LDR_R | ARM_INST_LDST__U \
+ | (rt) << 12 | (rn) << 16 \
+ | (imm) << 7 | (type) << 5 | (rm))
+#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | ARM_INST_LDST__U \
+ | (rt) << 12 | (rn) << 16 \
| (rm))
-#define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
- | (((off) & 0xf0) << 4) | ((off) & 0xf))
-#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \
+#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | ARM_INST_LDST__U \
+ | (rt) << 12 | (rn) << 16 \
| (rm))
#define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs))
@@ -254,13 +259,6 @@
#define ARM_SUBS_I(rd, rn, imm) _AL3_I(ARM_INST_SUBS, rd, rn, imm)
#define ARM_SBC_I(rd, rn, imm) _AL3_I(ARM_INST_SBC, rd, rn, imm)
-#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \
- | ((off) & 0xfff))
-#define ARM_STRH_I(rt, rn, off) (ARM_INST_STRH_I | (rt) << 12 | (rn) << 16 \
- | (((off) & 0xf0) << 4) | ((off) & 0xf))
-#define ARM_STRB_I(rt, rn, off) (ARM_INST_STRB_I | (rt) << 12 | (rn) << 16 \
- | (((off) & 0xf0) << 4) | ((off) & 0xf))
-
#define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm)
#define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm)
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 2438b96004c1..fcc5bfec8bd1 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -110,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
- register_persistent_clock(NULL, omap_read_persistent_clock64);
+ register_persistent_clock(omap_read_persistent_clock64);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
return 0;
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ba13f793fbce..ed36dcab80f1 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -127,53 +127,6 @@ static int pxa_ssp_probe(struct platform_device *pdev)
if (IS_ERR(ssp->clk))
return PTR_ERR(ssp->clk);
- if (dev->of_node) {
- struct of_phandle_args dma_spec;
- struct device_node *np = dev->of_node;
- int ret;
-
- /*
- * FIXME: we should allocate the DMA channel from this
- * context and pass the channel down to the ssp users.
- * For now, we lookup the rx and tx indices manually
- */
-
- /* rx */
- ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells",
- 0, &dma_spec);
-
- if (ret) {
- dev_err(dev, "Can't parse dmas property\n");
- return -ENODEV;
- }
- ssp->drcmr_rx = dma_spec.args[0];
- of_node_put(dma_spec.np);
-
- /* tx */
- ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells",
- 1, &dma_spec);
- if (ret) {
- dev_err(dev, "Can't parse dmas property\n");
- return -ENODEV;
- }
- ssp->drcmr_tx = dma_spec.args[0];
- of_node_put(dma_spec.np);
- } else {
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (res == NULL) {
- dev_err(dev, "no SSP RX DRCMR defined\n");
- return -ENODEV;
- }
- ssp->drcmr_rx = res->start;
-
- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (res == NULL) {
- dev_err(dev, "no SSP TX DRCMR defined\n");
- return -ENODEV;
- }
- ssp->drcmr_tx = res->start;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "no memory resource defined\n");
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index e90cc8a08186..f8bd523d64d1 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -47,9 +47,6 @@
(unsigned long)(addr) + \
(size))
-/* Used as a marker in ARM_pc to note when we're in a jprobe. */
-#define JPROBE_MAGIC_ADDR 0xffffffff
-
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -289,8 +286,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
break;
case KPROBE_REENTER:
/* A nested probe was hit in FIQ, it is a BUG */
- pr_warn("Unrecoverable kprobe detected at %p.\n",
- p->addr);
+ pr_warn("Unrecoverable kprobe detected.\n");
+ dump_kprobe(p);
/* fall through */
default:
/* impossible cases */
@@ -303,10 +300,10 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
/*
* If we have no pre-handler or it returned 0, we
- * continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry,
- * so get out doing nothing more here.
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
kcb->kprobe_status = KPROBE_HIT_SS;
@@ -315,20 +312,9 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
- reset_current_kprobe();
- }
- }
- } else if (cur) {
- /* We probably hit a jprobe. Call its break handler. */
- if (cur->break_handler && cur->break_handler(cur, regs)) {
- kcb->kprobe_status = KPROBE_HIT_SS;
- singlestep(cur, regs, kcb);
- if (cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
}
+ reset_current_kprobe();
}
- reset_current_kprobe();
} else {
/*
* The probe was removed and a race is in progress.
@@ -521,117 +507,6 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long sp_addr = regs->ARM_sp;
- long cpsr;
-
- kcb->jprobe_saved_regs = *regs;
- memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
- regs->ARM_pc = (long)jp->entry;
-
- cpsr = regs->ARM_cpsr | PSR_I_BIT;
-#ifdef CONFIG_THUMB2_KERNEL
- /* Set correct Thumb state in cpsr */
- if (regs->ARM_pc & 1)
- cpsr |= PSR_T_BIT;
- else
- cpsr &= ~PSR_T_BIT;
-#endif
- regs->ARM_cpsr = cpsr;
-
- preempt_disable();
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- __asm__ __volatile__ (
- /*
- * Setup an empty pt_regs. Fill SP and PC fields as
- * they're needed by longjmp_break_handler.
- *
- * We allocate some slack between the original SP and start of
- * our fabricated regs. To be precise we want to have worst case
- * covered which is STMFD with all 16 regs so we allocate 2 *
- * sizeof(struct_pt_regs)).
- *
- * This is to prevent any simulated instruction from writing
- * over the regs when they are accessing the stack.
- */
-#ifdef CONFIG_THUMB2_KERNEL
- "sub r0, %0, %1 \n\t"
- "mov sp, r0 \n\t"
-#else
- "sub sp, %0, %1 \n\t"
-#endif
- "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
- "str %0, [sp, %2] \n\t"
- "str r0, [sp, %3] \n\t"
- "mov r0, sp \n\t"
- "bl kprobe_handler \n\t"
-
- /*
- * Return to the context saved by setjmp_pre_handler
- * and restored by longjmp_break_handler.
- */
-#ifdef CONFIG_THUMB2_KERNEL
- "ldr lr, [sp, %2] \n\t" /* lr = saved sp */
- "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */
- "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */
- "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */
- /* rfe context */
- "ldmia sp, {r0 - r12} \n\t"
- "mov sp, lr \n\t"
- "ldr lr, [sp], #4 \n\t"
- "rfeia sp! \n\t"
-#else
- "ldr r0, [sp, %4] \n\t"
- "msr cpsr_cxsf, r0 \n\t"
- "ldmia sp, {r0 - pc} \n\t"
-#endif
- :
- : "r" (kcb->jprobe_saved_regs.ARM_sp),
- "I" (sizeof(struct pt_regs) * 2),
- "J" (offsetof(struct pt_regs, ARM_sp)),
- "J" (offsetof(struct pt_regs, ARM_pc)),
- "J" (offsetof(struct pt_regs, ARM_cpsr)),
- "J" (offsetof(struct pt_regs, ARM_lr))
- : "memory", "cc");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long stack_addr = kcb->jprobe_saved_regs.ARM_sp;
- long orig_sp = regs->ARM_sp;
- struct jprobe *jp = container_of(p, struct jprobe, kp);
-
- if (regs->ARM_pc == JPROBE_MAGIC_ADDR) {
- if (orig_sp != stack_addr) {
- struct pt_regs *saved_regs =
- (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp;
- printk("current sp %lx does not match saved sp %lx\n",
- orig_sp, stack_addr);
- printk("Saved registers for jprobe %p\n", jp);
- show_regs(saved_regs);
- printk("Current registers\n");
- show_regs(regs);
- BUG();
- }
- *regs = kcb->jprobe_saved_regs;
- memcpy((void *)stack_addr, kcb->jprobes_stack,
- MIN_STACK_SIZE(stack_addr));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index 14db14152909..cc237fa9b90f 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -1461,7 +1461,6 @@ fail:
print_registers(&result_regs);
if (mem) {
- pr_err("current_stack=%p\n", current_stack);
pr_err("expected_memory:\n");
print_memory(expected_memory, mem_size);
pr_err("result_memory:\n");
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile
index a81404c09d5d..94516c40ebd3 100644
--- a/arch/arm/vfp/Makefile
+++ b/arch/arm/vfp/Makefile
@@ -8,8 +8,5 @@
# asflags-y := -DDEBUG
KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
-LDFLAGS +=--no-warn-mismatch
-obj-y += vfp.o
-
-vfp-$(CONFIG_VFP) += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
+obj-y += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 35d0f823e823..dc7e6b50ef67 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -596,13 +596,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
}
/* Sanitise and restore the current VFP state from the provided structures. */
-int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
- struct user_vfp_exc __user *ufp_exc)
+int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
unsigned long fpexc;
- int err = 0;
/* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread);
@@ -611,17 +609,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
- err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
- sizeof(hwstate->fpregs));
+ memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
- __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+ hwstate->fpscr = ufp->fpscr;
/*
* Sanitise and restore the exception registers.
*/
- __get_user_error(fpexc, &ufp_exc->fpexc, err);
+ fpexc = ufp_exc->fpexc;
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
@@ -630,10 +627,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
hwstate->fpexc = fpexc;
- __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
- __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+ hwstate->fpinst = ufp_exc->fpinst;
+ hwstate->fpinst2 = ufp_exc->fpinst2;
- return err ? -EFAULT : 0;
+ return 0;
}
/*
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 42c090cf0292..d0a53cc6293a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -24,6 +24,7 @@ config ARM64
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
+ select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK if !PREEMPT
@@ -42,8 +43,19 @@ config ARM64
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
+ select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
+ select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
@@ -74,6 +86,7 @@ config ARM64
select GENERIC_CPU_AUTOPROBE
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
@@ -96,6 +109,7 @@ config ARM64
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
@@ -127,6 +141,7 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
+ select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
@@ -264,13 +279,6 @@ config ARCH_SUPPORTS_UPROBES
config ARCH_PROC_KCORE_TEXT
def_bool y
-config MULTI_IRQ_HANDLER
- def_bool y
-
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "arch/arm64/Kconfig.platforms"
menu "Bus support"
@@ -756,7 +764,6 @@ config HOLES_IN_ZONE
def_bool y
depends on NUMA
-source kernel/Kconfig.preempt
source kernel/Kconfig.hz
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
@@ -775,6 +782,9 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SELECT_MEMORY_MODEL
def_bool ARCH_SPARSEMEM_ENABLE
+config ARCH_FLATMEM_ENABLE
+ def_bool !NUMA
+
config HAVE_ARCH_PFN_VALID
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
@@ -791,8 +801,6 @@ config ARCH_WANT_HUGE_PMD_SHARE
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
-source "mm/Kconfig"
-
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
---help---
@@ -1246,6 +1254,7 @@ config EFI
bool "UEFI runtime support"
depends on OF && !CPU_BIG_ENDIAN
depends on KERNEL_MODE_NEON
+ select ARCH_SUPPORTS_ACPI
select LIBFDT
select UCS2_STRING
select EFI_PARAMS_FROM_FDT
@@ -1273,10 +1282,6 @@ config DMI
endmenu
-menu "Userspace binary formats"
-
-source "fs/Kconfig.binfmt"
-
config COMPAT
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
@@ -1300,8 +1305,6 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
-endmenu
-
menu "Power management options"
source "kernel/power/Kconfig"
@@ -1327,25 +1330,12 @@ source "drivers/cpufreq/Kconfig"
endmenu
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/firmware/Kconfig"
source "drivers/acpi/Kconfig"
-source "fs/Kconfig"
-
source "arch/arm64/kvm/Kconfig"
-source "arch/arm64/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
if CRYPTO
source "arch/arm64/crypto/Kconfig"
endif
-
-source "lib/Kconfig"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index cc6bd559af85..69c9170bdd24 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -1,6 +1,3 @@
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config ARM64_PTDUMP_CORE
def_bool n
@@ -97,5 +94,3 @@ config ARM64_RELOC_TEST
tristate "Relocation testing module"
source "drivers/hwtracing/coresight/Kconfig"
-
-endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index e7101b19d590..efe61a2e4b5e 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -60,15 +60,16 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
AS += -EB
-# We must use the linux target here, since distributions don't tend to package
-# the ELF linker scripts with binutils, and this results in a build failure.
-LDFLAGS += -EB -maarch64linuxb
+# Prefer the baremetal ELF build target, but not all toolchains include
+# it so fall back to the standard linux version if needed.
+LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
AS += -EL
-LDFLAGS += -EL -maarch64linux # See comment above
+# Same as above, prefer ELF but fall back to linux target if needed.
+LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
UTS_MACHINE := aarch64
endif
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 4057197048dc..1a406a76c86a 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -482,9 +482,9 @@
status = "disabled";
};
- mdio_mux_iproc: mdio-mux@6602023c {
+ mdio_mux_iproc: mdio-mux@66020000 {
compatible = "brcm,mdio-mux-iproc";
- reg = <0x6602023c 0x14>;
+ reg = <0x66020000 0x250>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
index b203152ad67c..a70e8ddbd66f 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -278,9 +278,9 @@
#include "stingray-pinctrl.dtsi"
- mdio_mux_iproc: mdio-mux@2023c {
+ mdio_mux_iproc: mdio-mux@20000 {
compatible = "brcm,mdio-mux-iproc";
- reg = <0x0002023c 0x14>;
+ reg = <0x00020000 0x250>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
index 4dd06767f839..4664c33e0763 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
@@ -11,13 +11,14 @@ fman0: fman@1a00000 {
#size-cells = <1>;
cell-index = <0>;
compatible = "fsl,fman";
- ranges = <0x0 0x0 0x1a00000 0x100000>;
- reg = <0x0 0x1a00000 0x0 0x100000>;
+ ranges = <0x0 0x0 0x1a00000 0xfe000>;
+ reg = <0x0 0x1a00000 0x0 0xfe000>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 3 0>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x800 0x10>;
+ ptimer-handle = <&ptp_timer0>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -73,9 +74,11 @@ fman0: fman@1a00000 {
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
};
+};
- ptp_timer0: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer0: ptp-timer@1afe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x0 0x1afe000 0x0 0x1000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 3 0>;
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index 8d477dcbfa58..851190a719ea 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -1000,6 +1000,24 @@
reset-gpios = <&gpio11 1 0 >;
};
+ /* UFS */
+ ufs: ufs@ff3b0000 {
+ compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1";
+ /* 0: HCI standard */
+ /* 1: UFS SYS CTRL */
+ reg = <0x0 0xff3b0000 0x0 0x1000>,
+ <0x0 0xff3b1000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
+ <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
+ clock-names = "ref_clk", "phy_clk";
+ freq-table-hz = <0 0>, <0 0>;
+ /* offset: 0x84; bit: 12 */
+ resets = <&crg_rst 0x84 12>;
+ reset-names = "rst";
+ };
+
/* SD */
dwmmc1: dwmmc1@ff37f000 {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
index 9c10030a07f8..c33adefc3061 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
@@ -1049,7 +1049,74 @@
num-pins = <2>;
};
};
+ p0_mbigen_alg_a:interrupt-controller@d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x0 0xd0080000 0x0 0x10000>;
+ p0_mbigen_sec_a: intc_sec {
+ msi-parent = <&p0_its_dsa_a 0x40400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p0_mbigen_smmu_alg_a: intc_smmu_alg {
+ msi-parent = <&p0_its_dsa_a 0x40b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+ p0_mbigen_alg_b:interrupt-controller@8,d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x8 0xd0080000 0x0 0x10000>;
+
+ p0_mbigen_sec_b: intc_sec {
+ msi-parent = <&p0_its_dsa_b 0x42400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p0_mbigen_smmu_alg_b: intc_smmu_alg {
+ msi-parent = <&p0_its_dsa_b 0x42b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+ p1_mbigen_alg_a:interrupt-controller@400,d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x400 0xd0080000 0x0 0x10000>;
+
+ p1_mbigen_sec_a: intc_sec {
+ msi-parent = <&p1_its_dsa_a 0x44400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p1_mbigen_smmu_alg_a: intc_smmu_alg {
+ msi-parent = <&p1_its_dsa_a 0x44b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+ p1_mbigen_alg_b:interrupt-controller@408,d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x408 0xd0080000 0x0 0x10000>;
+
+ p1_mbigen_sec_b: intc_sec {
+ msi-parent = <&p1_its_dsa_b 0x46400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p1_mbigen_smmu_alg_b: intc_smmu_alg {
+ msi-parent = <&p1_its_dsa_b 0x46b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
p0_mbigen_dsa_a: interrupt-controller@c0080000 {
compatible = "hisilicon,mbigen-v2";
reg = <0x0 0xc0080000 0x0 0x10000>;
@@ -1107,6 +1174,58 @@
hisilicon,broken-prefetch-cmd;
status = "disabled";
};
+ p0_smmu_alg_a: smmu_alg@d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p0_mbigen_smmu_alg_a>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
+ p0_smmu_alg_b: smmu_alg@8,d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x8 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p0_mbigen_smmu_alg_b>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
+ p1_smmu_alg_a: smmu_alg@400,d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x400 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p1_mbigen_smmu_alg_a>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
+ p1_smmu_alg_b: smmu_alg@408,d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x408 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p1_mbigen_smmu_alg_b>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
soc {
compatible = "simple-bus";
@@ -1603,5 +1722,170 @@
0x0 0 0 4 &mbigen_pcie2_a 671 4>;
status = "disabled";
};
+ p0_sec_a: crypto@d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x0 0xd0000000 0x0 0x10000
+ 0x0 0xd2000000 0x0 0x10000
+ 0x0 0xd2010000 0x0 0x10000
+ 0x0 0xd2020000 0x0 0x10000
+ 0x0 0xd2030000 0x0 0x10000
+ 0x0 0xd2040000 0x0 0x10000
+ 0x0 0xd2050000 0x0 0x10000
+ 0x0 0xd2060000 0x0 0x10000
+ 0x0 0xd2070000 0x0 0x10000
+ 0x0 0xd2080000 0x0 0x10000
+ 0x0 0xd2090000 0x0 0x10000
+ 0x0 0xd20a0000 0x0 0x10000
+ 0x0 0xd20b0000 0x0 0x10000
+ 0x0 0xd20c0000 0x0 0x10000
+ 0x0 0xd20d0000 0x0 0x10000
+ 0x0 0xd20e0000 0x0 0x10000
+ 0x0 0xd20f0000 0x0 0x10000
+ 0x0 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p0_mbigen_sec_a>;
+ iommus = <&p0_smmu_alg_a 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+ p0_sec_b: crypto@8,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x8 0xd0000000 0x0 0x10000
+ 0x8 0xd2000000 0x0 0x10000
+ 0x8 0xd2010000 0x0 0x10000
+ 0x8 0xd2020000 0x0 0x10000
+ 0x8 0xd2030000 0x0 0x10000
+ 0x8 0xd2040000 0x0 0x10000
+ 0x8 0xd2050000 0x0 0x10000
+ 0x8 0xd2060000 0x0 0x10000
+ 0x8 0xd2070000 0x0 0x10000
+ 0x8 0xd2080000 0x0 0x10000
+ 0x8 0xd2090000 0x0 0x10000
+ 0x8 0xd20a0000 0x0 0x10000
+ 0x8 0xd20b0000 0x0 0x10000
+ 0x8 0xd20c0000 0x0 0x10000
+ 0x8 0xd20d0000 0x0 0x10000
+ 0x8 0xd20e0000 0x0 0x10000
+ 0x8 0xd20f0000 0x0 0x10000
+ 0x8 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p0_mbigen_sec_b>;
+ iommus = <&p0_smmu_alg_b 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+ p1_sec_a: crypto@400,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x400 0xd0000000 0x0 0x10000
+ 0x400 0xd2000000 0x0 0x10000
+ 0x400 0xd2010000 0x0 0x10000
+ 0x400 0xd2020000 0x0 0x10000
+ 0x400 0xd2030000 0x0 0x10000
+ 0x400 0xd2040000 0x0 0x10000
+ 0x400 0xd2050000 0x0 0x10000
+ 0x400 0xd2060000 0x0 0x10000
+ 0x400 0xd2070000 0x0 0x10000
+ 0x400 0xd2080000 0x0 0x10000
+ 0x400 0xd2090000 0x0 0x10000
+ 0x400 0xd20a0000 0x0 0x10000
+ 0x400 0xd20b0000 0x0 0x10000
+ 0x400 0xd20c0000 0x0 0x10000
+ 0x400 0xd20d0000 0x0 0x10000
+ 0x400 0xd20e0000 0x0 0x10000
+ 0x400 0xd20f0000 0x0 0x10000
+ 0x400 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p1_mbigen_sec_a>;
+ iommus = <&p1_smmu_alg_a 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+ p1_sec_b: crypto@408,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x408 0xd0000000 0x0 0x10000
+ 0x408 0xd2000000 0x0 0x10000
+ 0x408 0xd2010000 0x0 0x10000
+ 0x408 0xd2020000 0x0 0x10000
+ 0x408 0xd2030000 0x0 0x10000
+ 0x408 0xd2040000 0x0 0x10000
+ 0x408 0xd2050000 0x0 0x10000
+ 0x408 0xd2060000 0x0 0x10000
+ 0x408 0xd2070000 0x0 0x10000
+ 0x408 0xd2080000 0x0 0x10000
+ 0x408 0xd2090000 0x0 0x10000
+ 0x408 0xd20a0000 0x0 0x10000
+ 0x408 0xd20b0000 0x0 0x10000
+ 0x408 0xd20c0000 0x0 0x10000
+ 0x408 0xd20d0000 0x0 0x10000
+ 0x408 0xd20e0000 0x0 0x10000
+ 0x408 0xd20f0000 0x0 0x10000
+ 0x408 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p1_mbigen_sec_b>;
+ iommus = <&p1_smmu_alg_b 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f9a186f6af8a..2c07e233012b 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -193,6 +193,7 @@ CONFIG_SCSI_HISI_SAS=y
CONFIG_SCSI_HISI_SAS_PCI=y
CONFIG_SCSI_UFSHCD=m
CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFS_HISI=y
CONFIG_SCSI_UFS_QCOM=m
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index e3e50950a863..adcb83eb683c 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -567,7 +567,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "cmac(aes)",
.base.cra_driver_name = "cmac-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@@ -583,7 +582,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "xcbc(aes)",
.base.cra_driver_name = "xcbc-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@@ -599,7 +597,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "cbcmac(aes)",
.base.cra_driver_name = "cbcmac-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx),
.base.cra_module = THIS_MODULE,
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index c723647b37db..1b319b716d5e 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
- * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -46,6 +46,19 @@
ss3 .req v26
ss4 .req v27
+ XL2 .req v8
+ XM2 .req v9
+ XH2 .req v10
+ XL3 .req v11
+ XM3 .req v12
+ XH3 .req v13
+ TT3 .req v14
+ TT4 .req v15
+ HH .req v16
+ HH3 .req v17
+ HH4 .req v18
+ HH34 .req v19
+
.text
.arch armv8-a+crypto
@@ -134,11 +147,25 @@
.endm
.macro __pmull_pre_p64
+ add x8, x3, #16
+ ld1 {HH.2d-HH4.2d}, [x8]
+
+ trn1 SHASH2.2d, SHASH.2d, HH.2d
+ trn2 T1.2d, SHASH.2d, HH.2d
+ eor SHASH2.16b, SHASH2.16b, T1.16b
+
+ trn1 HH34.2d, HH3.2d, HH4.2d
+ trn2 T1.2d, HH3.2d, HH4.2d
+ eor HH34.16b, HH34.16b, T1.16b
+
movi MASK.16b, #0xe1
shl MASK.2d, MASK.2d, #57
.endm
.macro __pmull_pre_p8
+ ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+ eor SHASH2.16b, SHASH2.16b, SHASH.16b
+
// k00_16 := 0x0000000000000000_000000000000ffff
// k32_48 := 0x00000000ffffffff_0000ffffffffffff
movi k32_48.2d, #0xffffffff
@@ -213,31 +240,88 @@
.endm
.macro __pmull_ghash, pn
- frame_push 5
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
-
-0: ld1 {SHASH.2d}, [x22]
- ld1 {XL.2d}, [x20]
- ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
- eor SHASH2.16b, SHASH2.16b, SHASH.16b
+ ld1 {SHASH.2d}, [x3]
+ ld1 {XL.2d}, [x1]
__pmull_pre_\pn
/* do the head block first, if supplied */
- cbz x23, 1f
- ld1 {T1.2d}, [x23]
- mov x23, xzr
- b 2f
+ cbz x4, 0f
+ ld1 {T1.2d}, [x4]
+ mov x4, xzr
+ b 3f
+
+0: .ifc \pn, p64
+ tbnz w0, #0, 2f // skip until #blocks is a
+ tbnz w0, #1, 2f // round multiple of 4
+
+1: ld1 {XM3.16b-TT4.16b}, [x2], #64
+
+ sub w0, w0, #4
+
+ rev64 T1.16b, XM3.16b
+ rev64 T2.16b, XH3.16b
+ rev64 TT4.16b, TT4.16b
+ rev64 TT3.16b, TT3.16b
+
+ ext IN1.16b, TT4.16b, TT4.16b, #8
+ ext XL3.16b, TT3.16b, TT3.16b, #8
+
+ eor TT4.16b, TT4.16b, IN1.16b
+ pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
+ pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
+ pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
+
+ eor TT3.16b, TT3.16b, XL3.16b
+ pmull2 XH3.1q, HH.2d, XL3.2d // a1 * b1
+ pmull XL3.1q, HH.1d, XL3.1d // a0 * b0
+ pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
-1: ld1 {T1.2d}, [x21], #16
- sub w19, w19, #1
+ ext IN1.16b, T2.16b, T2.16b, #8
+ eor XL2.16b, XL2.16b, XL3.16b
+ eor XH2.16b, XH2.16b, XH3.16b
+ eor XM2.16b, XM2.16b, XM3.16b
+
+ eor T2.16b, T2.16b, IN1.16b
+ pmull2 XH3.1q, HH3.2d, IN1.2d // a1 * b1
+ pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0
+ pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
+
+ eor XL2.16b, XL2.16b, XL3.16b
+ eor XH2.16b, XH2.16b, XH3.16b
+ eor XM2.16b, XM2.16b, XM3.16b
+
+ ext IN1.16b, T1.16b, T1.16b, #8
+ ext TT3.16b, XL.16b, XL.16b, #8
+ eor XL.16b, XL.16b, IN1.16b
+ eor T1.16b, T1.16b, TT3.16b
+
+ pmull2 XH.1q, HH4.2d, XL.2d // a1 * b1
+ eor T1.16b, T1.16b, XL.16b
+ pmull XL.1q, HH4.1d, XL.1d // a0 * b0
+ pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
+
+ eor XL.16b, XL.16b, XL2.16b
+ eor XH.16b, XH.16b, XH2.16b
+ eor XM.16b, XM.16b, XM2.16b
+
+ eor T2.16b, XL.16b, XH.16b
+ ext T1.16b, XL.16b, XH.16b, #8
+ eor XM.16b, XM.16b, T2.16b
+
+ __pmull_reduce_p64
+
+ eor T2.16b, T2.16b, XH.16b
+ eor XL.16b, XL.16b, T2.16b
+
+ cbz w0, 5f
+ b 1b
+ .endif
-2: /* multiply XL by SHASH in GF(2^128) */
+2: ld1 {T1.2d}, [x2], #16
+ sub w0, w0, #1
+
+3: /* multiply XL by SHASH in GF(2^128) */
CPU_LE( rev64 T1.16b, T1.16b )
ext T2.16b, XL.16b, XL.16b, #8
@@ -250,7 +334,7 @@ CPU_LE( rev64 T1.16b, T1.16b )
__pmull_\pn XL, XL, SHASH // a0 * b0
__pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0)
- eor T2.16b, XL.16b, XH.16b
+4: eor T2.16b, XL.16b, XH.16b
ext T1.16b, XL.16b, XH.16b, #8
eor XM.16b, XM.16b, T2.16b
@@ -259,18 +343,9 @@ CPU_LE( rev64 T1.16b, T1.16b )
eor T2.16b, T2.16b, XH.16b
eor XL.16b, XL.16b, T2.16b
- cbz w19, 3f
-
- if_will_cond_yield_neon
- st1 {XL.2d}, [x20]
- do_cond_yield_neon
- b 0b
- endif_yield_neon
-
- b 1b
+ cbnz w0, 0b
-3: st1 {XL.2d}, [x20]
- frame_pop
+5: st1 {XL.2d}, [x1]
ret
.endm
@@ -286,9 +361,10 @@ ENTRY(pmull_ghash_update_p8)
__pmull_ghash p8
ENDPROC(pmull_ghash_update_p8)
- KS .req v8
- CTR .req v9
- INP .req v10
+ KS0 .req v12
+ KS1 .req v13
+ INP0 .req v14
+ INP1 .req v15
.macro load_round_keys, rounds, rk
cmp \rounds, #12
@@ -322,98 +398,128 @@ ENDPROC(pmull_ghash_update_p8)
.endm
.macro pmull_gcm_do_crypt, enc
- ld1 {SHASH.2d}, [x4]
+ ld1 {SHASH.2d}, [x4], #16
+ ld1 {HH.2d}, [x4]
ld1 {XL.2d}, [x1]
ldr x8, [x5, #8] // load lower counter
- load_round_keys w7, x6
-
movi MASK.16b, #0xe1
- ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+ trn1 SHASH2.2d, SHASH.2d, HH.2d
+ trn2 T1.2d, SHASH.2d, HH.2d
CPU_LE( rev x8, x8 )
shl MASK.2d, MASK.2d, #57
- eor SHASH2.16b, SHASH2.16b, SHASH.16b
+ eor SHASH2.16b, SHASH2.16b, T1.16b
.if \enc == 1
ldr x10, [sp]
- ld1 {KS.16b}, [x10]
+ ld1 {KS0.16b-KS1.16b}, [x10]
.endif
-0: ld1 {CTR.8b}, [x5] // load upper counter
- ld1 {INP.16b}, [x3], #16
+ cbnz x6, 4f
+
+0: ld1 {INP0.16b-INP1.16b}, [x3], #32
+
rev x9, x8
- add x8, x8, #1
- sub w0, w0, #1
- ins CTR.d[1], x9 // set lower counter
+ add x11, x8, #1
+ add x8, x8, #2
.if \enc == 1
- eor INP.16b, INP.16b, KS.16b // encrypt input
- st1 {INP.16b}, [x2], #16
+ eor INP0.16b, INP0.16b, KS0.16b // encrypt input
+ eor INP1.16b, INP1.16b, KS1.16b
.endif
- rev64 T1.16b, INP.16b
+ ld1 {KS0.8b}, [x5] // load upper counter
+ rev x11, x11
+ sub w0, w0, #2
+ mov KS1.8b, KS0.8b
+ ins KS0.d[1], x9 // set lower counter
+ ins KS1.d[1], x11
+
+ rev64 T1.16b, INP1.16b
cmp w7, #12
b.ge 2f // AES-192/256?
-1: enc_round CTR, v21
-
- ext T2.16b, XL.16b, XL.16b, #8
+1: enc_round KS0, v21
ext IN1.16b, T1.16b, T1.16b, #8
- enc_round CTR, v22
+ enc_round KS1, v21
+ pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
+
+ enc_round KS0, v22
+ eor T1.16b, T1.16b, IN1.16b
+
+ enc_round KS1, v22
+ pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
+ enc_round KS0, v23
+ pmull XM2.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+
+ enc_round KS1, v23
+ rev64 T1.16b, INP0.16b
+ ext T2.16b, XL.16b, XL.16b, #8
+
+ enc_round KS0, v24
+ ext IN1.16b, T1.16b, T1.16b, #8
eor T1.16b, T1.16b, T2.16b
- eor XL.16b, XL.16b, IN1.16b
- enc_round CTR, v23
+ enc_round KS1, v24
+ eor XL.16b, XL.16b, IN1.16b
- pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
+ enc_round KS0, v25
eor T1.16b, T1.16b, XL.16b
- enc_round CTR, v24
+ enc_round KS1, v25
+ pmull2 XH.1q, HH.2d, XL.2d // a1 * b1
+
+ enc_round KS0, v26
+ pmull XL.1q, HH.1d, XL.1d // a0 * b0
- pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
- pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+ enc_round KS1, v26
+ pmull2 XM.1q, SHASH2.2d, T1.2d // (a1 + a0)(b1 + b0)
- enc_round CTR, v25
+ enc_round KS0, v27
+ eor XL.16b, XL.16b, XL2.16b
+ eor XH.16b, XH.16b, XH2.16b
+ enc_round KS1, v27
+ eor XM.16b, XM.16b, XM2.16b
ext T1.16b, XL.16b, XH.16b, #8
+
+ enc_round KS0, v28
eor T2.16b, XL.16b, XH.16b
eor XM.16b, XM.16b, T1.16b
- enc_round CTR, v26
-
+ enc_round KS1, v28
eor XM.16b, XM.16b, T2.16b
- pmull T2.1q, XL.1d, MASK.1d
- enc_round CTR, v27
+ enc_round KS0, v29
+ pmull T2.1q, XL.1d, MASK.1d
+ enc_round KS1, v29
mov XH.d[0], XM.d[1]
mov XM.d[1], XL.d[0]
- enc_round CTR, v28
-
+ aese KS0.16b, v30.16b
eor XL.16b, XM.16b, T2.16b
- enc_round CTR, v29
-
+ aese KS1.16b, v30.16b
ext T2.16b, XL.16b, XL.16b, #8
- aese CTR.16b, v30.16b
-
+ eor KS0.16b, KS0.16b, v31.16b
pmull XL.1q, XL.1d, MASK.1d
eor T2.16b, T2.16b, XH.16b
- eor KS.16b, CTR.16b, v31.16b
-
+ eor KS1.16b, KS1.16b, v31.16b
eor XL.16b, XL.16b, T2.16b
.if \enc == 0
- eor INP.16b, INP.16b, KS.16b
- st1 {INP.16b}, [x2], #16
+ eor INP0.16b, INP0.16b, KS0.16b
+ eor INP1.16b, INP1.16b, KS1.16b
.endif
+ st1 {INP0.16b-INP1.16b}, [x2], #32
+
cbnz w0, 0b
CPU_LE( rev x8, x8 )
@@ -421,17 +527,24 @@ CPU_LE( rev x8, x8 )
str x8, [x5, #8] // store lower counter
.if \enc == 1
- st1 {KS.16b}, [x10]
+ st1 {KS0.16b-KS1.16b}, [x10]
.endif
ret
2: b.eq 3f // AES-192?
- enc_round CTR, v17
- enc_round CTR, v18
-3: enc_round CTR, v19
- enc_round CTR, v20
+ enc_round KS0, v17
+ enc_round KS1, v17
+ enc_round KS0, v18
+ enc_round KS1, v18
+3: enc_round KS0, v19
+ enc_round KS1, v19
+ enc_round KS0, v20
+ enc_round KS1, v20
b 1b
+
+4: load_round_keys w7, x6
+ b 0b
.endm
/*
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 8a10f1d7199a..6e9f33d14930 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
- * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -33,9 +33,12 @@ MODULE_ALIAS_CRYPTO("ghash");
#define GCM_IV_SIZE 12
struct ghash_key {
- u64 a;
- u64 b;
- be128 k;
+ u64 h[2];
+ u64 h2[2];
+ u64 h3[2];
+ u64 h4[2];
+
+ be128 k;
};
struct ghash_desc_ctx {
@@ -113,6 +116,9 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
}
}
+/* avoid hogging the CPU for too long */
+#define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
+
static int ghash_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
@@ -136,11 +142,16 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
blocks = len / GHASH_BLOCK_SIZE;
len %= GHASH_BLOCK_SIZE;
- ghash_do_update(blocks, ctx->digest, src, key,
- partial ? ctx->buf : NULL);
+ do {
+ int chunk = min(blocks, MAX_BLOCKS);
- src += blocks * GHASH_BLOCK_SIZE;
- partial = 0;
+ ghash_do_update(chunk, ctx->digest, src, key,
+ partial ? ctx->buf : NULL);
+
+ blocks -= chunk;
+ src += chunk * GHASH_BLOCK_SIZE;
+ partial = 0;
+ } while (unlikely(blocks > 0));
}
if (len)
memcpy(ctx->buf + partial, src, len);
@@ -166,23 +177,36 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
return 0;
}
+static void ghash_reflect(u64 h[], const be128 *k)
+{
+ u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0;
+
+ h[0] = (be64_to_cpu(k->b) << 1) | carry;
+ h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
+
+ if (carry)
+ h[1] ^= 0xc200000000000000UL;
+}
+
static int __ghash_setkey(struct ghash_key *key,
const u8 *inkey, unsigned int keylen)
{
- u64 a, b;
+ be128 h;
/* needed for the fallback */
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
- /* perform multiplication by 'x' in GF(2^128) */
- b = get_unaligned_be64(inkey);
- a = get_unaligned_be64(inkey + 8);
+ ghash_reflect(key->h, &key->k);
- key->a = (a << 1) | (b >> 63);
- key->b = (b << 1) | (a >> 63);
+ h = key->k;
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h2, &h);
- if (b >> 63)
- key->b ^= 0xc200000000000000UL;
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h3, &h);
+
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h4, &h);
return 0;
}
@@ -204,7 +228,6 @@ static struct shash_alg ghash_alg = {
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-ce",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key),
.base.cra_module = THIS_MODULE,
@@ -245,7 +268,7 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
__aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){},
num_rounds(&ctx->aes_key));
- return __ghash_setkey(&ctx->ghash_key, key, sizeof(key));
+ return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
}
static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
@@ -349,9 +372,10 @@ static int gcm_encrypt(struct aead_request *req)
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
struct skcipher_walk walk;
u8 iv[AES_BLOCK_SIZE];
- u8 ks[AES_BLOCK_SIZE];
+ u8 ks[2 * AES_BLOCK_SIZE];
u8 tag[AES_BLOCK_SIZE];
u64 dg[2] = {};
+ int nrounds = num_rounds(&ctx->aes_key);
int err;
if (req->assoclen)
@@ -360,39 +384,39 @@ static int gcm_encrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
- if (likely(may_use_simd())) {
- kernel_neon_begin();
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
- pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
+ if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ u32 const *rk = NULL;
+
+ kernel_neon_begin();
+ pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- pmull_gcm_encrypt_block(ks, iv, NULL,
- num_rounds(&ctx->aes_key));
+ pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
put_unaligned_be32(3, iv + GCM_IV_SIZE);
- kernel_neon_end();
+ pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
+ put_unaligned_be32(4, iv + GCM_IV_SIZE);
- err = skcipher_walk_aead_encrypt(&walk, req, false);
+ do {
+ int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
- while (walk.nbytes >= AES_BLOCK_SIZE) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ if (rk)
+ kernel_neon_begin();
- kernel_neon_begin();
pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, &ctx->ghash_key,
- iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key), ks);
+ iv, rk, nrounds, ks);
kernel_neon_end();
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
- }
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
+
+ rk = ctx->aes_key.key_enc;
+ } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
- __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
- num_rounds(&ctx->aes_key));
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- err = skcipher_walk_aead_encrypt(&walk, req, false);
-
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
@@ -400,8 +424,7 @@ static int gcm_encrypt(struct aead_request *req)
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
- ks, iv,
- num_rounds(&ctx->aes_key));
+ ks, iv, nrounds);
crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@@ -418,19 +441,28 @@ static int gcm_encrypt(struct aead_request *req)
}
if (walk.nbytes)
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
- num_rounds(&ctx->aes_key));
+ nrounds);
}
/* handle the tail */
if (walk.nbytes) {
u8 buf[GHASH_BLOCK_SIZE];
+ unsigned int nbytes = walk.nbytes;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *head = NULL;
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks,
walk.nbytes);
- memcpy(buf, walk.dst.virt.addr, walk.nbytes);
- memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
- ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+ if (walk.nbytes > GHASH_BLOCK_SIZE) {
+ head = dst;
+ dst += GHASH_BLOCK_SIZE;
+ nbytes %= GHASH_BLOCK_SIZE;
+ }
+
+ memcpy(buf, dst, nbytes);
+ memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
+ ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
err = skcipher_walk_done(&walk, 0);
}
@@ -453,10 +485,11 @@ static int gcm_decrypt(struct aead_request *req)
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
struct skcipher_walk walk;
- u8 iv[AES_BLOCK_SIZE];
+ u8 iv[2 * AES_BLOCK_SIZE];
u8 tag[AES_BLOCK_SIZE];
- u8 buf[GHASH_BLOCK_SIZE];
+ u8 buf[2 * GHASH_BLOCK_SIZE];
u64 dg[2] = {};
+ int nrounds = num_rounds(&ctx->aes_key);
int err;
if (req->assoclen)
@@ -465,43 +498,53 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
- if (likely(may_use_simd())) {
- kernel_neon_begin();
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
+
+ if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ u32 const *rk = NULL;
- pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
+ kernel_neon_begin();
+ pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- kernel_neon_end();
- err = skcipher_walk_aead_decrypt(&walk, req, false);
+ do {
+ int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
+ int rem = walk.total - blocks * AES_BLOCK_SIZE;
- while (walk.nbytes >= AES_BLOCK_SIZE) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ if (rk)
+ kernel_neon_begin();
- kernel_neon_begin();
pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, &ctx->ghash_key,
- iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
+ iv, rk, nrounds);
+
+ /* check if this is the final iteration of the loop */
+ if (rem < (2 * AES_BLOCK_SIZE)) {
+ u8 *iv2 = iv + AES_BLOCK_SIZE;
+
+ if (rem > AES_BLOCK_SIZE) {
+ memcpy(iv2, iv, AES_BLOCK_SIZE);
+ crypto_inc(iv2, AES_BLOCK_SIZE);
+ }
+
+ pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
+
+ if (rem > AES_BLOCK_SIZE)
+ pmull_gcm_encrypt_block(iv2, iv2, NULL,
+ nrounds);
+ }
+
kernel_neon_end();
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
- }
- if (walk.nbytes) {
- kernel_neon_begin();
- pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
- kernel_neon_end();
- }
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
+ rk = ctx->aes_key.key_enc;
+ } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
- __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
- num_rounds(&ctx->aes_key));
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- err = skcipher_walk_aead_decrypt(&walk, req, false);
-
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
@@ -512,8 +555,7 @@ static int gcm_decrypt(struct aead_request *req)
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
- buf, iv,
- num_rounds(&ctx->aes_key));
+ buf, iv, nrounds);
crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@@ -526,14 +568,24 @@ static int gcm_decrypt(struct aead_request *req)
}
if (walk.nbytes)
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
- num_rounds(&ctx->aes_key));
+ nrounds);
}
/* handle the tail */
if (walk.nbytes) {
- memcpy(buf, walk.src.virt.addr, walk.nbytes);
- memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
- ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+ const u8 *src = walk.src.virt.addr;
+ const u8 *head = NULL;
+ unsigned int nbytes = walk.nbytes;
+
+ if (walk.nbytes > GHASH_BLOCK_SIZE) {
+ head = src;
+ src += GHASH_BLOCK_SIZE;
+ nbytes %= GHASH_BLOCK_SIZE;
+ }
+
+ memcpy(buf, src, nbytes);
+ memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
+ ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
walk.nbytes);
@@ -558,7 +610,7 @@ static int gcm_decrypt(struct aead_request *req)
static struct aead_alg gcm_aes_alg = {
.ivsize = GCM_IV_SIZE,
- .chunksize = AES_BLOCK_SIZE,
+ .chunksize = 2 * AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm_setkey,
.setauthsize = gcm_setauthsize,
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index efbeb3e0dcfb..17fac2889f56 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -99,7 +99,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index fd1ff2b13dfa..261f5195cab7 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -114,7 +114,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -129,7 +128,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e8880ccdc71f..4aedeaefd61f 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -67,8 +67,7 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_priority = 125,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -80,8 +79,7 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_priority = 125,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
@@ -153,7 +151,6 @@ static struct shash_alg neon_algs[] = { {
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64-neon",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -166,7 +163,6 @@ static struct shash_alg neon_algs[] = { {
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64-neon",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index da8222e528bd..a336feac0f59 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -105,7 +105,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -117,7 +116,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -129,7 +127,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -141,7 +138,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index a77c8632a589..f2c5f28c622a 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -87,7 +87,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-ce",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -100,7 +99,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-ce",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index 27db4851e380..325b23b43a9b 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -63,7 +63,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-arm64",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -76,7 +75,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-arm64",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index 3b4948f7e26f..88938a20d9b2 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -72,7 +72,6 @@ static struct shash_alg sm3_alg = {
.descsize = sizeof(struct sm3_state),
.base.cra_name = "sm3",
.base.cra_driver_name = "sm3-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SM3_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 3a9b84d39d71..6cd5d77b6b44 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h
generic-y += msi.h
generic-y += preempt.h
generic-y += qrwlock.h
+generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += serial.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 0db62a4cbce2..709208dfdc8b 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,10 +12,12 @@
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
+#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/psci.h>
#include <asm/cputype.h>
+#include <asm/io.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@@ -29,18 +31,22 @@
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
+pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
+
/* ACPI table mapping after acpi_permanent_mmap is set */
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
+ /* For normal memory we already have a cacheable mapping. */
+ if (memblock_is_map_memory(phys))
+ return (void __iomem *)__phys_to_virt(phys);
+
/*
- * EFI's reserve_regions() call adds memory with the WB attribute
- * to memblock via early_init_dt_add_memory_arch().
+ * We should still honor the memory's attribute here because
+ * crash dump kernel possibly excludes some ACPI (reclaim)
+ * regions from memblock list.
*/
- if (!memblock_is_memory(phys))
- return ioremap(phys, size);
-
- return ioremap_cache(phys, size);
+ return __ioremap(phys, size, __acpi_get_mem_attribute(phys));
}
#define acpi_os_ioremap acpi_os_ioremap
@@ -129,15 +135,20 @@ static inline const char *acpi_get_enable_method(int cpu)
* for compatibility.
*/
#define acpi_disable_cmcff 1
-pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
+static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
+{
+ return __acpi_get_mem_attribute(addr);
+}
#endif /* CONFIG_ACPI_APEI */
#ifdef CONFIG_ACPI_NUMA
int arm64_acpi_numa_init(void);
-int acpi_numa_get_nid(unsigned int cpu, u64 hwid);
+int acpi_numa_get_nid(unsigned int cpu);
+void acpi_map_cpus_to_nodes(void);
#else
static inline int arm64_acpi_numa_init(void) { return -ENOSYS; }
-static inline int acpi_numa_get_nid(unsigned int cpu, u64 hwid) { return NUMA_NO_NODE; }
+static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
+static inline void acpi_map_cpus_to_nodes(void) { }
#endif /* CONFIG_ACPI_NUMA */
#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index c0235e0ff849..9bca54dda75c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -40,17 +40,6 @@
#include <asm/cmpxchg.h>
-#define ___atomic_add_unless(v, a, u, sfx) \
-({ \
- typeof((v)->counter) c, old; \
- \
- c = atomic##sfx##_read(v); \
- while (c != (u) && \
- (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c; \
- })
-
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
@@ -61,21 +50,11 @@
#define atomic_add_return_release atomic_add_return_release
#define atomic_add_return atomic_add_return
-#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
-#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
-#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_sub_return_acquire atomic_sub_return_acquire
#define atomic_sub_return_release atomic_sub_return_release
#define atomic_sub_return atomic_sub_return
-#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
-#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
-#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
#define atomic_fetch_add_release atomic_fetch_add_release
@@ -119,13 +98,6 @@
cmpxchg_release(&((v)->counter), (old), (new))
#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
-#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
#define atomic_andnot atomic_andnot
/*
@@ -140,21 +112,11 @@
#define atomic64_add_return_release atomic64_add_return_release
#define atomic64_add_return atomic64_add_return
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
-#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
-#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
#define atomic64_sub_return_release atomic64_sub_return_release
#define atomic64_sub_return atomic64_sub_return
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
-#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
-#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
#define atomic64_fetch_add_release atomic64_fetch_add_release
@@ -195,16 +157,9 @@
#define atomic64_cmpxchg_release atomic_cmpxchg_release
#define atomic64_cmpxchg atomic_cmpxchg
-#define atomic64_inc(v) atomic64_add(1, (v))
-#define atomic64_dec(v) atomic64_sub(1, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
-#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u)
#define atomic64_andnot atomic64_andnot
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif
#endif
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index f11518af96a9..822a9192c551 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -128,6 +128,19 @@ do { \
__u.__val; \
})
+#define smp_cond_load_relaxed(ptr, cond_expr) \
+({ \
+ typeof(ptr) __PTR = (ptr); \
+ typeof(*ptr) VAL; \
+ for (;;) { \
+ VAL = READ_ONCE(*__PTR); \
+ if (cond_expr) \
+ break; \
+ __cmpwait_relaxed(__PTR, VAL); \
+ } \
+ VAL; \
+})
+
#define smp_cond_load_acquire(ptr, cond_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
index 9c19594ce7cb..10d536b1af74 100644
--- a/arch/arm64/include/asm/bitops.h
+++ b/arch/arm64/include/asm/bitops.h
@@ -17,22 +17,11 @@
#define __ASM_BITOPS_H
#include <linux/compiler.h>
-#include <asm/barrier.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
-/*
- * Little endian assembly atomic bitops.
- */
-extern void set_bit(int nr, volatile unsigned long *p);
-extern void clear_bit(int nr, volatile unsigned long *p);
-extern void change_bit(int nr, volatile unsigned long *p);
-extern int test_and_set_bit(int nr, volatile unsigned long *p);
-extern int test_and_clear_bit(int nr, volatile unsigned long *p);
-extern int test_and_change_bit(int nr, volatile unsigned long *p);
-
#include <asm-generic/bitops/builtin-__ffs.h>
#include <asm-generic/bitops/builtin-ffs.h>
#include <asm-generic/bitops/builtin-__fls.h>
@@ -44,15 +33,11 @@ extern int test_and_change_bit(int nr, volatile unsigned long *p);
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
-
-/*
- * Ext2 is defined to use little-endian byte ordering.
- */
-#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p)
-#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __ASM_BITOPS_H */
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5df5cfe1c143..5ee5bca8c24b 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -21,12 +21,16 @@
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
+#define CTR_IMINLINE_SHIFT 0
#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
#define CTR_IDC_SHIFT 28
#define CTR_DIC_SHIFT 29
+#define CTR_CACHE_MINLINE_MASK \
+ (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
+
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
#define ICACHE_POLICY_VPIPT 0
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index d264a7274811..19844211a4e6 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -19,6 +19,7 @@
#ifndef __ASM_CACHEFLUSH_H
#define __ASM_CACHEFLUSH_H
+#include <linux/kgdb.h>
#include <linux/mm.h>
/*
@@ -71,7 +72,7 @@
* - kaddr - page address
* - size - region size
*/
-extern void flush_icache_range(unsigned long start, unsigned long end);
+extern void __flush_icache_range(unsigned long start, unsigned long end);
extern int invalidate_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __inval_dcache_area(void *addr, size_t len);
@@ -81,6 +82,30 @@ extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
extern void sync_icache_aliases(void *kaddr, unsigned long len);
+static inline void flush_icache_range(unsigned long start, unsigned long end)
+{
+ __flush_icache_range(start, end);
+
+ /*
+ * IPI all online CPUs so that they undergo a context synchronization
+ * event and are forced to refetch the new instructions.
+ */
+#ifdef CONFIG_KGDB
+ /*
+ * KGDB performs cache maintenance with interrupts disabled, so we
+ * will deadlock trying to IPI the secondary CPUs. In theory, we can
+ * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
+ * just means that KGDB will elide the maintenance altogether! As it
+ * turns out, KGDB uses IPIs to round-up the secondary CPUs during
+ * the patching operation, so we don't need extra IPIs here anyway.
+ * In which case, add a KGDB-specific bodge and return early.
+ */
+ if (kgdb_connected && irqs_disabled())
+ return;
+#endif
+ kick_all_cpus_sync();
+}
+
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 8a699c708fc9..be3bf3d08916 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -49,7 +49,8 @@
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
#define ARM64_SSBD 30
+#define ARM64_MISMATCHED_CACHE_TYPE 31
-#define ARM64_NCAPS 31
+#define ARM64_NCAPS 32
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 192d791f1103..7ed320895d1f 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
#define efi_is_64bit() (true)
+#define efi_table_attr(table, attr, instance) \
+ ((table##_t *)instance)->attr
+
#define efi_call_proto(protocol, f, instance, ...) \
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index fa92747a49c8..dd1ad3950ef5 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -16,13 +16,15 @@
#ifndef __ASM_FP_H
#define __ASM_FP_H
-#include <asm/ptrace.h>
#include <asm/errno.h>
+#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
+#include <asm/sysreg.h>
#ifndef __ASSEMBLY__
+#include <linux/build_bug.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/stddef.h>
@@ -102,6 +104,16 @@ extern int sve_set_vector_length(struct task_struct *task,
extern int sve_set_current_vl(unsigned long arg);
extern int sve_get_current_vl(void);
+static inline void sve_user_disable(void)
+{
+ sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
+}
+
+static inline void sve_user_enable(void)
+{
+ sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
+}
+
/*
* Probing and setup functions.
* Calls to these functions must be serialised with one another.
@@ -128,6 +140,9 @@ static inline int sve_get_current_vl(void)
return -EINVAL;
}
+static inline void sve_user_disable(void) { BUILD_BUG(); }
+static inline void sve_user_enable(void) { BUILD_BUG(); }
+
static inline void sve_init_vq_map(void) { }
static inline void sve_update_vq_map(void) { }
static inline int sve_verify_vq_map(void) { return 0; }
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 41770766d964..6a53e59ced95 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -119,13 +119,16 @@ static inline void decode_ctrl_reg(u32 reg,
struct task_struct;
struct notifier_block;
+struct perf_event_attr;
struct perf_event;
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type, int *offset);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index f62c56b1793f..c6802dea6cab 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
-bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
-
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index a0fee6985e6a..b2b0c6405eb0 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -8,8 +8,6 @@
struct pt_regs;
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
static inline int nr_legacy_irqs(void)
{
return 0;
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index 6deb8d726041..d5a44cf859e9 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -48,7 +48,6 @@ struct kprobe_ctlblk {
unsigned long saved_irqflag;
struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx;
- struct pt_regs jprobe_saved_regs;
};
void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 1dab3a984608..0c97e45d1dc3 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -140,7 +140,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
{
- *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
+ *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
}
/*
@@ -190,8 +190,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
u32 mode;
if (vcpu_mode_is_32bit(vcpu)) {
- mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
- return mode > COMPAT_PSR_MODE_USR;
+ mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
+ return mode > PSR_AA32_MODE_USR;
}
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
@@ -329,7 +329,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
- *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
+ *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
sctlr |= (1 << 25);
@@ -340,7 +340,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
- return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
+ return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
}
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index f922eaf780f9..fb9d137256a6 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -19,11 +19,4 @@
void kernel_neon_begin(void);
void kernel_neon_end(void);
-/*
- * Temporary macro to allow the crypto code to compile. Note that the
- * semantics of kernel_neon_begin_partial() are now different from the
- * original as it does not allow being called in an interrupt context.
- */
-#define kernel_neon_begin_partial(num_regs) kernel_neon_begin()
-
#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index 01bc46d5b43a..626ad01e83bf 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -35,10 +35,14 @@ void __init numa_set_distance(int from, int to, int distance);
void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
void numa_store_cpu_info(unsigned int cpu);
+void numa_add_cpu(unsigned int cpu);
+void numa_remove_cpu(unsigned int cpu);
#else /* CONFIG_NUMA */
static inline void numa_store_cpu_info(unsigned int cpu) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void arm64_numa_init(void) { }
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index a73ae1e49200..79657ad91397 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -182,12 +182,12 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
start_thread_common(regs, pc);
- regs->pstate = COMPAT_PSR_MODE_USR;
+ regs->pstate = PSR_AA32_MODE_USR;
if (pc & 1)
- regs->pstate |= COMPAT_PSR_T_BIT;
+ regs->pstate |= PSR_AA32_T_BIT;
#ifdef __AARCH64EB__
- regs->pstate |= COMPAT_PSR_E_BIT;
+ regs->pstate |= PSR_AA32_E_BIT;
#endif
regs->compat_sp = sp;
@@ -266,5 +266,20 @@ extern void __init minsigstksz_setup(void);
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
#define SVE_GET_VL() sve_get_current_vl()
+/*
+ * For CONFIG_GCC_PLUGIN_STACKLEAK
+ *
+ * These need to be macros because otherwise we get stuck in a nightmare
+ * of header definitions for the use of task_stack_page.
+ */
+
+#define current_top_of_stack() \
+({ \
+ struct stack_info _info; \
+ BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
+ _info.high; \
+})
+#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 6069d66e0bc2..177b851ca6d9 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -35,36 +35,39 @@
#define COMPAT_PTRACE_GETHBPREGS 29
#define COMPAT_PTRACE_SETHBPREGS 30
-/* AArch32 CPSR bits */
-#define COMPAT_PSR_MODE_MASK 0x0000001f
-#define COMPAT_PSR_MODE_USR 0x00000010
-#define COMPAT_PSR_MODE_FIQ 0x00000011
-#define COMPAT_PSR_MODE_IRQ 0x00000012
-#define COMPAT_PSR_MODE_SVC 0x00000013
-#define COMPAT_PSR_MODE_ABT 0x00000017
-#define COMPAT_PSR_MODE_HYP 0x0000001a
-#define COMPAT_PSR_MODE_UND 0x0000001b
-#define COMPAT_PSR_MODE_SYS 0x0000001f
-#define COMPAT_PSR_T_BIT 0x00000020
-#define COMPAT_PSR_F_BIT 0x00000040
-#define COMPAT_PSR_I_BIT 0x00000080
-#define COMPAT_PSR_A_BIT 0x00000100
-#define COMPAT_PSR_E_BIT 0x00000200
-#define COMPAT_PSR_J_BIT 0x01000000
-#define COMPAT_PSR_Q_BIT 0x08000000
-#define COMPAT_PSR_V_BIT 0x10000000
-#define COMPAT_PSR_C_BIT 0x20000000
-#define COMPAT_PSR_Z_BIT 0x40000000
-#define COMPAT_PSR_N_BIT 0x80000000
-#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
-#define COMPAT_PSR_GE_MASK 0x000f0000
+/* SPSR_ELx bits for exceptions taken from AArch32 */
+#define PSR_AA32_MODE_MASK 0x0000001f
+#define PSR_AA32_MODE_USR 0x00000010
+#define PSR_AA32_MODE_FIQ 0x00000011
+#define PSR_AA32_MODE_IRQ 0x00000012
+#define PSR_AA32_MODE_SVC 0x00000013
+#define PSR_AA32_MODE_ABT 0x00000017
+#define PSR_AA32_MODE_HYP 0x0000001a
+#define PSR_AA32_MODE_UND 0x0000001b
+#define PSR_AA32_MODE_SYS 0x0000001f
+#define PSR_AA32_T_BIT 0x00000020
+#define PSR_AA32_F_BIT 0x00000040
+#define PSR_AA32_I_BIT 0x00000080
+#define PSR_AA32_A_BIT 0x00000100
+#define PSR_AA32_E_BIT 0x00000200
+#define PSR_AA32_DIT_BIT 0x01000000
+#define PSR_AA32_Q_BIT 0x08000000
+#define PSR_AA32_V_BIT 0x10000000
+#define PSR_AA32_C_BIT 0x20000000
+#define PSR_AA32_Z_BIT 0x40000000
+#define PSR_AA32_N_BIT 0x80000000
+#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+#define PSR_AA32_GE_MASK 0x000f0000
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT
+#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT
#else
-#define COMPAT_PSR_ENDSTATE 0
+#define PSR_AA32_ENDSTATE 0
#endif
+/* AArch32 CPSR bits, as seen in AArch32 */
+#define COMPAT_PSR_DIT_BIT 0x00200000
+
/*
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
* process is located in memory.
@@ -111,6 +114,30 @@
#define compat_sp_fiq regs[29]
#define compat_lr_fiq regs[30]
+static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
+{
+ unsigned long pstate;
+
+ pstate = psr & ~COMPAT_PSR_DIT_BIT;
+
+ if (psr & COMPAT_PSR_DIT_BIT)
+ pstate |= PSR_AA32_DIT_BIT;
+
+ return pstate;
+}
+
+static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
+{
+ unsigned long psr;
+
+ psr = pstate & ~PSR_AA32_DIT_BIT;
+
+ if (pstate & PSR_AA32_DIT_BIT)
+ psr |= COMPAT_PSR_DIT_BIT;
+
+ return psr;
+}
+
/*
* This struct defines the way the registers are stored on the stack during an
* exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
@@ -156,7 +183,7 @@ static inline void forget_syscall(struct pt_regs *regs)
#ifdef CONFIG_COMPAT
#define compat_thumb_mode(regs) \
- (((regs)->pstate & COMPAT_PSR_T_BIT))
+ (((regs)->pstate & PSR_AA32_T_BIT))
#else
#define compat_thumb_mode(regs) (0)
#endif
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
index e073e6886685..ffe47d766c25 100644
--- a/arch/arm64/include/asm/sdei.h
+++ b/arch/arm64/include/asm/sdei.h
@@ -40,15 +40,18 @@ asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
unsigned long sdei_arch_get_entry_point(int conduit);
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
-bool _on_sdei_stack(unsigned long sp);
-static inline bool on_sdei_stack(unsigned long sp)
+struct stack_info;
+
+bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
+static inline bool on_sdei_stack(unsigned long sp,
+ struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
return false;
if (in_nmi())
- return _on_sdei_stack(sp);
+ return _on_sdei_stack(sp, info);
return false;
}
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 26c5bd7d88d8..38116008d18b 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -16,123 +16,8 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
-#include <asm/lse.h>
-#include <asm/spinlock_types.h>
-#include <asm/processor.h>
-
-/*
- * Spinlock implementation.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- */
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned int tmp;
- arch_spinlock_t lockval, newval;
-
- asm volatile(
- /* Atomically increment the next ticket. */
- ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
-" prfm pstl1strm, %3\n"
-"1: ldaxr %w0, %3\n"
-" add %w1, %w0, %w5\n"
-" stxr %w2, %w1, %3\n"
-" cbnz %w2, 1b\n",
- /* LSE atomics */
-" mov %w2, %w5\n"
-" ldadda %w2, %w0, %3\n"
- __nops(3)
- )
-
- /* Did we get the lock? */
-" eor %w1, %w0, %w0, ror #16\n"
-" cbz %w1, 3f\n"
- /*
- * No: spin on the owner. Send a local event to avoid missing an
- * unlock before the exclusive load.
- */
-" sevl\n"
-"2: wfe\n"
-" ldaxrh %w2, %4\n"
-" eor %w1, %w2, %w0, lsr #16\n"
-" cbnz %w1, 2b\n"
- /* We got the lock. Critical section starts here. */
-"3:"
- : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
- : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
- : "memory");
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned int tmp;
- arch_spinlock_t lockval;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " prfm pstl1strm, %2\n"
- "1: ldaxr %w0, %2\n"
- " eor %w1, %w0, %w0, ror #16\n"
- " cbnz %w1, 2f\n"
- " add %w0, %w0, %3\n"
- " stxr %w1, %w0, %2\n"
- " cbnz %w1, 1b\n"
- "2:",
- /* LSE atomics */
- " ldr %w0, %2\n"
- " eor %w1, %w0, %w0, ror #16\n"
- " cbnz %w1, 1f\n"
- " add %w1, %w0, %3\n"
- " casa %w0, %w1, %2\n"
- " sub %w1, %w1, %3\n"
- " eor %w1, %w1, %w0\n"
- "1:")
- : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- : "I" (1 << TICKET_SHIFT)
- : "memory");
-
- return !tmp;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- unsigned long tmp;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " ldrh %w1, %0\n"
- " add %w1, %w1, #1\n"
- " stlrh %w1, %0",
- /* LSE atomics */
- " mov %w1, #1\n"
- " staddlh %w1, %0\n"
- __nops(1))
- : "=Q" (lock->owner), "=&r" (tmp)
- :
- : "memory");
-}
-
-static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return lock.owner == lock.next;
-}
-
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- return !arch_spin_value_unlocked(READ_ONCE(*lock));
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- arch_spinlock_t lockval = READ_ONCE(*lock);
- return (lockval.next - lockval.owner) > 1;
-}
-#define arch_spin_is_contended arch_spin_is_contended
-
#include <asm/qrwlock.h>
+#include <asm/qspinlock.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 6b856012c51b..a157ff465e27 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,22 +20,7 @@
# error "please don't include this file directly"
#endif
-#include <linux/types.h>
-
-#define TICKET_SHIFT 16
-
-typedef struct {
-#ifdef __AARCH64EB__
- u16 next;
- u16 owner;
-#else
- u16 owner;
- u16 next;
-#endif
-} __aligned(4) arch_spinlock_t;
-
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
-
+#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qrwlock_types.h>
#endif
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 902f9edacbea..e86737b7c924 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -32,6 +32,21 @@ struct stackframe {
#endif
};
+enum stack_type {
+ STACK_TYPE_UNKNOWN,
+ STACK_TYPE_TASK,
+ STACK_TYPE_IRQ,
+ STACK_TYPE_OVERFLOW,
+ STACK_TYPE_SDEI_NORMAL,
+ STACK_TYPE_SDEI_CRITICAL,
+};
+
+struct stack_info {
+ unsigned long low;
+ unsigned long high;
+ enum stack_type type;
+};
+
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
@@ -39,7 +54,8 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
-static inline bool on_irq_stack(unsigned long sp)
+static inline bool on_irq_stack(unsigned long sp,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE;
@@ -47,46 +63,79 @@ static inline bool on_irq_stack(unsigned long sp)
if (!low)
return false;
- return (low <= sp && sp < high);
+ if (sp < low || sp >= high)
+ return false;
+
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_IRQ;
+ }
+
+ return true;
}
-static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
+static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
- return (low <= sp && sp < high);
+ if (sp < low || sp >= high)
+ return false;
+
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_TASK;
+ }
+
+ return true;
}
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
-static inline bool on_overflow_stack(unsigned long sp)
+static inline bool on_overflow_stack(unsigned long sp,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
- return (low <= sp && sp < high);
+ if (sp < low || sp >= high)
+ return false;
+
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_OVERFLOW;
+ }
+
+ return true;
}
#else
-static inline bool on_overflow_stack(unsigned long sp) { return false; }
+static inline bool on_overflow_stack(unsigned long sp,
+ struct stack_info *info) { return false; }
#endif
+
/*
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
-static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp)
+static inline bool on_accessible_stack(struct task_struct *tsk,
+ unsigned long sp,
+ struct stack_info *info)
{
- if (on_task_stack(tsk, sp))
+ if (on_task_stack(tsk, sp, info))
return true;
if (tsk != current || preemptible())
return false;
- if (on_irq_stack(sp))
+ if (on_irq_stack(sp, info))
return true;
- if (on_overflow_stack(sp))
+ if (on_overflow_stack(sp, info))
return true;
- if (on_sdei_stack(sp))
+ if (on_sdei_stack(sp, info))
return true;
return false;
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 709a574468f0..ad8be16a39c9 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -20,7 +20,13 @@
#include <linux/compat.h>
#include <linux/err.h>
-extern const void *sys_call_table[];
+typedef long (*syscall_fn_t)(struct pt_regs *regs);
+
+extern const syscall_fn_t sys_call_table[];
+
+#ifdef CONFIG_COMPAT
+extern const syscall_fn_t compat_sys_call_table[];
+#endif
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
new file mode 100644
index 000000000000..a4477e515b79
--- /dev/null
+++ b/arch/arm64/include/asm/syscall_wrapper.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - arm64 specific wrappers to syscall definitions
+ *
+ * Based on arch/x86/include/asm_syscall_wrapper.h
+ */
+
+#ifndef __ASM_SYSCALL_WRAPPER_H
+#define __ASM_SYSCALL_WRAPPER_H
+
+#define SC_ARM64_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \
+ ,,regs->regs[3],,regs->regs[4],,regs->regs[5])
+
+#ifdef CONFIG_COMPAT
+
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__arm64_compat_sys##name, ERRNO); \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_compat_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+ } \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define COMPAT_SYSCALL_DEFINE0(sname) \
+ asmlinkage long __arm64_compat_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
+ asmlinkage long __arm64_compat_sys_##sname(void)
+
+#define COND_SYSCALL_COMPAT(name) \
+ cond_syscall(__arm64_compat_sys_##name);
+
+#define COMPAT_SYS_NI(name) \
+ SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers);
+
+#endif /* CONFIG_COMPAT */
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#ifndef SYSCALL_DEFINE0
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __arm64_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
+ asmlinkage long __arm64_sys_##sname(void)
+#endif
+
+#ifndef COND_SYSCALL
+#define COND_SYSCALL(name) cond_syscall(__arm64_sys_##name)
+#endif
+
+#ifndef SYS_NI
+#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
+#endif
+
+#endif /* __ASM_SYSCALL_WRAPPER_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index a8f84812c6e8..e205ec8489e9 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -436,7 +436,8 @@
#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
(1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
(1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
- (1 << 27) | (1 << 30) | (1 << 31))
+ (1 << 27) | (1 << 30) | (1 << 31) | \
+ (0xffffffffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
@@ -452,9 +453,9 @@
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
-/* Check all the bits are accounted for */
-#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0)
-
+#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
+#error "Inconsistent SCTLR_EL2 set/clear bits"
+#endif
/* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_UCI (1 << 26)
@@ -473,7 +474,8 @@
#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
(1 << 29))
#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
- (1 << 27) | (1 << 30) | (1 << 31))
+ (1 << 27) | (1 << 30) | (1 << 31) | \
+ (0xffffffffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
@@ -492,8 +494,9 @@
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
SCTLR_EL1_RES0)
-/* Check all the bits are accounted for */
-#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
+#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
+#error "Inconsistent SCTLR_EL1 set/clear bits"
+#endif
/* id_aa64isar0 */
#define ID_AA64ISAR0_TS_SHIFT 52
@@ -739,19 +742,6 @@ asm(
write_sysreg(__scs_new, sysreg); \
} while (0)
-static inline void config_sctlr_el1(u32 clear, u32 set)
-{
- u32 val;
-
- SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS;
- SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS;
-
- val = read_sysreg(sctlr_el1);
- val &= ~clear;
- val |= set;
- write_sysreg(val, sctlr_el1);
-}
-
#endif
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index dfc61d73f740..a4a1901140ee 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -218,6 +218,13 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
dsb(ish);
}
+static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
+{
+ unsigned long addr = __TLBI_VADDR(kaddr, 0);
+
+ __tlbi(vaae1is, addr);
+ dsb(ish);
+}
#endif
#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index df48212f767b..49a0fee4f89b 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -11,7 +11,7 @@ struct cpu_topology {
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
- cpumask_t llc_siblings;
+ cpumask_t llc_sibling;
};
extern struct cpu_topology cpu_topology[NR_CPUS];
@@ -20,9 +20,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
+void remove_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
#ifdef CONFIG_NUMA
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index a0baa9af5487..e0d0f5b856e7 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -43,7 +43,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 398
+#define __NR_compat_syscalls 399
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index ef292160748c..2cd6dcf8d246 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -260,7 +260,7 @@ __SYSCALL(117, sys_ni_syscall)
#define __NR_fsync 118
__SYSCALL(__NR_fsync, sys_fsync)
#define __NR_sigreturn 119
-__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper)
+__SYSCALL(__NR_sigreturn, compat_sys_sigreturn)
#define __NR_clone 120
__SYSCALL(__NR_clone, sys_clone)
#define __NR_setdomainname 121
@@ -368,7 +368,7 @@ __SYSCALL(__NR_getresgid, sys_getresgid16)
#define __NR_prctl 172
__SYSCALL(__NR_prctl, sys_prctl)
#define __NR_rt_sigreturn 173
-__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper)
+__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn)
#define __NR_rt_sigaction 174
__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction)
#define __NR_rt_sigprocmask 175
@@ -382,9 +382,9 @@ __SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigsuspend 179
__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend)
#define __NR_pread64 180
-__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper)
+__SYSCALL(__NR_pread64, compat_sys_aarch32_pread64)
#define __NR_pwrite64 181
-__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper)
+__SYSCALL(__NR_pwrite64, compat_sys_aarch32_pwrite64)
#define __NR_chown 182
__SYSCALL(__NR_chown, sys_chown16)
#define __NR_getcwd 183
@@ -406,11 +406,11 @@ __SYSCALL(__NR_vfork, sys_vfork)
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */
#define __NR_mmap2 192
-__SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper)
+__SYSCALL(__NR_mmap2, compat_sys_aarch32_mmap2)
#define __NR_truncate64 193
-__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper)
+__SYSCALL(__NR_truncate64, compat_sys_aarch32_truncate64)
#define __NR_ftruncate64 194
-__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper)
+__SYSCALL(__NR_ftruncate64, compat_sys_aarch32_ftruncate64)
#define __NR_stat64 195
__SYSCALL(__NR_stat64, sys_stat64)
#define __NR_lstat64 196
@@ -472,7 +472,7 @@ __SYSCALL(223, sys_ni_syscall)
#define __NR_gettid 224
__SYSCALL(__NR_gettid, sys_gettid)
#define __NR_readahead 225
-__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper)
+__SYSCALL(__NR_readahead, compat_sys_aarch32_readahead)
#define __NR_setxattr 226
__SYSCALL(__NR_setxattr, sys_setxattr)
#define __NR_lsetxattr 227
@@ -554,15 +554,15 @@ __SYSCALL(__NR_clock_getres, compat_sys_clock_getres)
#define __NR_clock_nanosleep 265
__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep)
#define __NR_statfs64 266
-__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper)
+__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64)
#define __NR_fstatfs64 267
-__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper)
+__SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64)
#define __NR_tgkill 268
__SYSCALL(__NR_tgkill, sys_tgkill)
#define __NR_utimes 269
__SYSCALL(__NR_utimes, compat_sys_utimes)
#define __NR_arm_fadvise64_64 270
-__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper)
+__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64)
#define __NR_pciconfig_iobase 271
__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
#define __NR_pciconfig_read 272
@@ -704,7 +704,7 @@ __SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list)
#define __NR_splice 340
__SYSCALL(__NR_splice, sys_splice)
#define __NR_sync_file_range2 341
-__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper)
+__SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2)
#define __NR_tee 342
__SYSCALL(__NR_tee, sys_tee)
#define __NR_vmsplice 343
@@ -726,7 +726,7 @@ __SYSCALL(__NR_timerfd_create, sys_timerfd_create)
#define __NR_eventfd 351
__SYSCALL(__NR_eventfd, sys_eventfd)
#define __NR_fallocate 352
-__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper)
+__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate)
#define __NR_timerfd_settime 353
__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime)
#define __NR_timerfd_gettime 354
@@ -817,6 +817,8 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 397
__SYSCALL(__NR_statx, sys_statx)
+#define __NR_rseq 398
+__SYSCALL(__NR_rseq, sys_rseq)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 0025f8691046..95ac7374d723 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -18,7 +18,8 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
hyp-stub.o psci.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
- smp.o smp_spin_table.o topology.o smccc-call.o
+ smp.o smp_spin_table.o topology.o smccc-call.o \
+ syscall.o
extra-$(CONFIG_EFI) := efi-entry.o
@@ -27,7 +28,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o entry32.o
+ sys_compat.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 7b09487ff8fb..ed46dc188b22 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -18,6 +18,7 @@
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/cpumask.h>
+#include <linux/efi.h>
#include <linux/efi-bgrt.h>
#include <linux/init.h>
#include <linux/irq.h>
@@ -29,13 +30,9 @@
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
+#include <asm/pgtable.h>
#include <asm/smp_plat.h>
-#ifdef CONFIG_ACPI_APEI
-# include <linux/efi.h>
-# include <asm/pgtable.h>
-#endif
-
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
int acpi_disabled = 1;
EXPORT_SYMBOL(acpi_disabled);
@@ -239,8 +236,7 @@ done:
}
}
-#ifdef CONFIG_ACPI_APEI
-pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
+pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
{
/*
* According to "Table 8 Map: EFI memory types to AArch64 memory
@@ -261,4 +257,3 @@ pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
return __pgprot(PROT_NORMAL_NC);
return __pgprot(PROT_DEVICE_nGnRnE);
}
-#endif
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index d190a7b231bf..4f4f1815e047 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -26,36 +26,73 @@
#include <linux/module.h>
#include <linux/topology.h>
-#include <acpi/processor.h>
#include <asm/numa.h>
-static int cpus_in_srat;
+static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
-struct __node_cpu_hwid {
- u32 node_id; /* logical node containing this CPU */
- u64 cpu_hwid; /* MPIDR for this CPU */
-};
+int __init acpi_numa_get_nid(unsigned int cpu)
+{
+ return acpi_early_node_map[cpu];
+}
+
+static inline int get_cpu_for_acpi_id(u32 uid)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (uid == get_acpi_id_for_cpu(cpu))
+ return cpu;
-static struct __node_cpu_hwid early_node_cpu_hwid[NR_CPUS] = {
-[0 ... NR_CPUS - 1] = {NUMA_NO_NODE, PHYS_CPUID_INVALID} };
+ return -EINVAL;
+}
-int acpi_numa_get_nid(unsigned int cpu, u64 hwid)
+static int __init acpi_parse_gicc_pxm(struct acpi_subtable_header *header,
+ const unsigned long end)
{
- int i;
+ struct acpi_srat_gicc_affinity *pa;
+ int cpu, pxm, node;
- for (i = 0; i < cpus_in_srat; i++) {
- if (hwid == early_node_cpu_hwid[i].cpu_hwid)
- return early_node_cpu_hwid[i].node_id;
- }
+ if (srat_disabled())
+ return -EINVAL;
+
+ pa = (struct acpi_srat_gicc_affinity *)header;
+ if (!pa)
+ return -EINVAL;
+
+ if (!(pa->flags & ACPI_SRAT_GICC_ENABLED))
+ return 0;
- return NUMA_NO_NODE;
+ pxm = pa->proximity_domain;
+ node = pxm_to_node(pxm);
+
+ /*
+ * If we can't map the UID to a logical cpu this
+ * means that the UID is not part of possible cpus
+ * so we do not need a NUMA mapping for it, skip
+ * the SRAT entry and keep parsing.
+ */
+ cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
+ if (cpu < 0)
+ return 0;
+
+ acpi_early_node_map[cpu] = node;
+ pr_info("SRAT: PXM %d -> MPIDR 0x%llx -> Node %d\n", pxm,
+ cpu_logical_map(cpu), node);
+
+ return 0;
+}
+
+void __init acpi_map_cpus_to_nodes(void)
+{
+ acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_GICC_AFFINITY,
+ acpi_parse_gicc_pxm, 0);
}
/* Callback for Proximity Domain -> ACPI processor UID mapping */
void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
{
int pxm, node;
- phys_cpuid_t mpidr;
if (srat_disabled())
return;
@@ -70,12 +107,6 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
if (!(pa->flags & ACPI_SRAT_GICC_ENABLED))
return;
- if (cpus_in_srat >= NR_CPUS) {
- pr_warn_once("SRAT: cpu_to_node_map[%d] is too small, may not be able to use all cpus\n",
- NR_CPUS);
- return;
- }
-
pxm = pa->proximity_domain;
node = acpi_map_pxm_to_node(pxm);
@@ -85,20 +116,7 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
return;
}
- mpidr = acpi_map_madt_entry(pa->acpi_processor_uid);
- if (mpidr == PHYS_CPUID_INVALID) {
- pr_err("SRAT: PXM %d with ACPI ID %d has no valid MPIDR in MADT\n",
- pxm, pa->acpi_processor_uid);
- bad_srat();
- return;
- }
-
- early_node_cpu_hwid[cpus_in_srat].node_id = node;
- early_node_cpu_hwid[cpus_in_srat].cpu_hwid = mpidr;
node_set(node, numa_nodes_parsed);
- cpus_in_srat++;
- pr_info("SRAT: PXM %d -> MPIDR 0x%Lx -> Node %d\n",
- pxm, mpidr, node);
}
int __init arm64_acpi_numa_init(void)
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 36fb069fd049..b5d603992d40 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -47,11 +47,11 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
unsigned long replptr;
if (kernel_text_address(pc))
- return 1;
+ return true;
replptr = (unsigned long)ALT_REPL_PTR(alt);
if (pc >= replptr && pc <= (replptr + alt->alt_len))
- return 0;
+ return false;
/*
* Branching into *another* alternate sequence is doomed, and
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index d4707abb2f16..92be1d12d590 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -441,8 +441,8 @@ static struct undef_hook swp_hooks[] = {
{
.instr_mask = 0x0fb00ff0,
.instr_val = 0x01000090,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
- .pstate_val = COMPAT_PSR_MODE_USR,
+ .pstate_mask = PSR_AA32_MODE_MASK,
+ .pstate_val = PSR_AA32_MODE_USR,
.fn = swp_handler
},
{ }
@@ -511,9 +511,9 @@ ret:
static int cp15_barrier_set_hw_mode(bool enable)
{
if (enable)
- config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_CP15BEN);
else
- config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_CP15BEN, 0);
return 0;
}
@@ -521,15 +521,15 @@ static struct undef_hook cp15_barrier_hooks[] = {
{
.instr_mask = 0x0fff0fdf,
.instr_val = 0x0e070f9a,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
- .pstate_val = COMPAT_PSR_MODE_USR,
+ .pstate_mask = PSR_AA32_MODE_MASK,
+ .pstate_val = PSR_AA32_MODE_USR,
.fn = cp15barrier_handler,
},
{
.instr_mask = 0x0fff0fff,
.instr_val = 0x0e070f95,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
- .pstate_val = COMPAT_PSR_MODE_USR,
+ .pstate_mask = PSR_AA32_MODE_MASK,
+ .pstate_val = PSR_AA32_MODE_USR,
.fn = cp15barrier_handler,
},
{ }
@@ -548,9 +548,9 @@ static int setend_set_hw_mode(bool enable)
return -EINVAL;
if (enable)
- config_sctlr_el1(SCTLR_EL1_SED, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SED, 0);
else
- config_sctlr_el1(0, SCTLR_EL1_SED);
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_SED);
return 0;
}
@@ -562,10 +562,10 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
if (big_endian) {
insn = "setend be";
- regs->pstate |= COMPAT_PSR_E_BIT;
+ regs->pstate |= PSR_AA32_E_BIT;
} else {
insn = "setend le";
- regs->pstate &= ~COMPAT_PSR_E_BIT;
+ regs->pstate &= ~PSR_AA32_E_BIT;
}
trace_instruction_emulation(insn, regs->pc);
@@ -593,16 +593,16 @@ static struct undef_hook setend_hooks[] = {
{
.instr_mask = 0xfffffdff,
.instr_val = 0xf1010000,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
- .pstate_val = COMPAT_PSR_MODE_USR,
+ .pstate_mask = PSR_AA32_MODE_MASK,
+ .pstate_val = PSR_AA32_MODE_USR,
.fn = a32_setend_handler,
},
{
/* Thumb mode */
.instr_mask = 0x0000fff7,
.instr_val = 0x0000b650,
- .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
- .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
+ .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
+ .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
.fn = t16_setend_handler,
},
{}
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index 6c2b1b4f57c9..fad90e4935fb 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -16,13 +16,14 @@
void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
unsigned long arg0, unsigned long arg1, unsigned long arg2);
-static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
- unsigned long entry, unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static inline void __noreturn cpu_soft_restart(unsigned long entry,
+ unsigned long arg0,
+ unsigned long arg1,
+ unsigned long arg2)
{
typeof(__cpu_soft_restart) *restart;
- el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
+ unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
is_hyp_mode_available();
restart = (void *)__pa_symbol(__cpu_soft_restart);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 1d2b6d768efe..dec10898d688 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -65,19 +65,24 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
}
static bool
-has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
- int scope)
+has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
+ int scope)
{
+ u64 mask = CTR_CACHE_MINLINE_MASK;
+
+ /* Skip matching the min line sizes for cache type check */
+ if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
+ mask ^= arm64_ftr_reg_ctrel0.strict_mask;
+
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
- (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
+ return (read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask);
}
static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
{
- /* Clear SCTLR_EL1.UCT */
- config_sctlr_el1(SCTLR_EL1_UCT, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
@@ -101,7 +106,7 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
for (i = 0; i < SZ_2K; i += 0x80)
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
- flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+ __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
@@ -613,7 +618,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
- .matches = has_mismatched_cache_line_size,
+ .matches = has_mismatched_cache_type,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .cpu_enable = cpu_enable_trap_ctr_access,
+ },
+ {
+ .desc = "Mismatched cache type",
+ .capability = ARM64_MISMATCHED_CACHE_TYPE,
+ .matches = has_mismatched_cache_type,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.cpu_enable = cpu_enable_trap_ctr_access,
},
@@ -649,7 +661,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
- .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.cpu_enable = enable_smccc_arch_workaround_1,
ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
},
@@ -658,7 +669,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "EL2 vector hardening",
.capability = ARM64_HARDEN_EL2_VECTORS,
- .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
},
#endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c6d80743f4ed..611e8921c3d4 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -214,7 +214,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
* If we have differing I-cache policies, report it as the weakest - VIPT.
*/
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -1723,7 +1723,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
static struct undef_hook mrs_hook = {
.instr_mask = 0xfff00000,
.instr_val = 0xd5300000,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_mask = PSR_AA32_MODE_MASK,
.pstate_val = PSR_MODE_EL0t,
.fn = emulate_mrs,
};
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 28ad8799406f..09dbea221a27 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -41,19 +41,9 @@
* Context tracking subsystem. Used to instrument transitions
* between user and kernel mode.
*/
- .macro ct_user_exit, syscall = 0
+ .macro ct_user_exit
#ifdef CONFIG_CONTEXT_TRACKING
bl context_tracking_user_exit
- .if \syscall == 1
- /*
- * Save/restore needed during syscalls. Restore syscall arguments from
- * the values already saved on stack during kernel_entry.
- */
- ldp x0, x1, [sp]
- ldp x2, x3, [sp, #S_X2]
- ldp x4, x5, [sp, #S_X4]
- ldp x6, x7, [sp, #S_X6]
- .endif
#endif
.endm
@@ -63,6 +53,12 @@
#endif
.endm
+ .macro clear_gp_regs
+ .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
+ mov x\n, xzr
+ .endr
+ .endm
+
/*
* Bad Abort numbers
*-----------------
@@ -140,20 +136,21 @@ alternative_else_nop_endif
// This macro corrupts x0-x3. It is the caller's duty
// to save/restore them if required.
- .macro apply_ssbd, state, targ, tmp1, tmp2
+ .macro apply_ssbd, state, tmp1, tmp2
#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling
- b \targ
+ b .L__asm_ssbd_skip\@
alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
- cbz \tmp2, \targ
+ cbz \tmp2, .L__asm_ssbd_skip\@
ldr \tmp2, [tsk, #TSK_TI_FLAGS]
- tbnz \tmp2, #TIF_SSBD, \targ
+ tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state
alternative_cb arm64_update_smccc_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
+.L__asm_ssbd_skip\@:
#endif
.endm
@@ -178,20 +175,14 @@ alternative_cb_end
stp x28, x29, [sp, #16 * 14]
.if \el == 0
+ clear_gp_regs
mrs x21, sp_el0
ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
- apply_ssbd 1, 1f, x22, x23
-
-#ifdef CONFIG_ARM64_SSBD
- ldp x0, x1, [sp, #16 * 0]
- ldp x2, x3, [sp, #16 * 1]
-#endif
-1:
+ apply_ssbd 1, x22, x23
- mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
@@ -331,8 +322,7 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
- apply_ssbd 0, 5f, x0, x1
-5:
+ apply_ssbd 0, x0, x1
.endif
msr elr_el1, x21 // set up the return data
@@ -720,14 +710,9 @@ el0_sync_compat:
b.ge el0_dbg
b el0_inv
el0_svc_compat:
- /*
- * AArch32 syscall handling
- */
- ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
- adrp stbl, compat_sys_call_table // load compat syscall table pointer
- mov wscno, w7 // syscall number in w7 (r7)
- mov wsc_nr, #__NR_compat_syscalls
- b el0_svc_naked
+ mov x0, sp
+ bl el0_svc_compat_handler
+ b ret_to_user
.align 6
el0_irq_compat:
@@ -896,25 +881,6 @@ el0_error_naked:
b ret_to_user
ENDPROC(el0_error)
-
-/*
- * This is the fast syscall return path. We do as little as possible here,
- * and this includes saving x0 back into the kernel stack.
- */
-ret_fast_syscall:
- disable_daif
- str x0, [sp, #S_X0] // returned x0
- ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
- and x2, x1, #_TIF_SYSCALL_WORK
- cbnz x2, ret_fast_syscall_trace
- and x2, x1, #_TIF_WORK_MASK
- cbnz x2, work_pending
- enable_step_tsk x1, x2
- kernel_exit 0
-ret_fast_syscall_trace:
- enable_daif
- b __sys_trace_return_skipped // we already saved x0
-
/*
* Ok, we need to do extra processing, enter the slow path.
*/
@@ -936,6 +902,9 @@ ret_to_user:
cbnz x2, work_pending
finish_ret_to_user:
enable_step_tsk x1, x2
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ bl stackleak_erase
+#endif
kernel_exit 0
ENDPROC(ret_to_user)
@@ -944,85 +913,10 @@ ENDPROC(ret_to_user)
*/
.align 6
el0_svc:
- ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
- adrp stbl, sys_call_table // load syscall table pointer
- mov wscno, w8 // syscall number in w8
- mov wsc_nr, #__NR_syscalls
-
-#ifdef CONFIG_ARM64_SVE
-alternative_if_not ARM64_SVE
- b el0_svc_naked
-alternative_else_nop_endif
- tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set:
- bic x16, x16, #_TIF_SVE // discard SVE state
- str x16, [tsk, #TSK_TI_FLAGS]
-
- /*
- * task_fpsimd_load() won't be called to update CPACR_EL1 in
- * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
- * happens if a context switch or kernel_neon_begin() or context
- * modification (sigreturn, ptrace) intervenes.
- * So, ensure that CPACR_EL1 is already correct for the fast-path case:
- */
- mrs x9, cpacr_el1
- bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0
- msr cpacr_el1, x9 // synchronised by eret to el0
-#endif
-
-el0_svc_naked: // compat entry point
- stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
- enable_daif
- ct_user_exit 1
-
- tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
- b.ne __sys_trace
- cmp wscno, wsc_nr // check upper syscall limit
- b.hs ni_sys
- mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
- ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
- blr x16 // call sys_* routine
- b ret_fast_syscall
-ni_sys:
- mov x0, sp
- bl do_ni_syscall
- b ret_fast_syscall
-ENDPROC(el0_svc)
-
- /*
- * This is the really slow path. We're going to be doing context
- * switches, and waiting for our parent to respond.
- */
-__sys_trace:
- cmp wscno, #NO_SYSCALL // user-issued syscall(-1)?
- b.ne 1f
- mov x0, #-ENOSYS // set default errno if so
- str x0, [sp, #S_X0]
-1: mov x0, sp
- bl syscall_trace_enter
- cmp w0, #NO_SYSCALL // skip the syscall?
- b.eq __sys_trace_return_skipped
- mov wscno, w0 // syscall number (possibly new)
- mov x1, sp // pointer to regs
- cmp wscno, wsc_nr // check upper syscall limit
- b.hs __ni_sys_trace
- ldp x0, x1, [sp] // restore the syscall args
- ldp x2, x3, [sp, #S_X2]
- ldp x4, x5, [sp, #S_X4]
- ldp x6, x7, [sp, #S_X6]
- ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
- blr x16 // call sys_* routine
-
-__sys_trace_return:
- str x0, [sp, #S_X0] // save returned x0
-__sys_trace_return_skipped:
mov x0, sp
- bl syscall_trace_exit
+ bl el0_svc_handler
b ret_to_user
-
-__ni_sys_trace:
- mov x0, sp
- bl do_ni_syscall
- b __sys_trace_return
+ENDPROC(el0_svc)
.popsection // .entry.text
@@ -1138,14 +1032,6 @@ __entry_tramp_data_start:
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
/*
- * Special system call wrappers.
- */
-ENTRY(sys_rt_sigreturn_wrapper)
- mov x0, sp
- b sys_rt_sigreturn
-ENDPROC(sys_rt_sigreturn_wrapper)
-
-/*
* Register switch for AArch64. The callee-saved registers need to be saved
* and restored. On entry:
* x0 = previous task_struct (must be preserved across the switch)
diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S
deleted file mode 100644
index f332d5d1f6b4..000000000000
--- a/arch/arm64/kernel/entry32.S
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Compat system call wrappers
- *
- * Copyright (C) 2012 ARM Ltd.
- * Authors: Will Deacon <will.deacon@arm.com>
- * Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <linux/const.h>
-
-#include <asm/assembler.h>
-#include <asm/asm-offsets.h>
-#include <asm/errno.h>
-#include <asm/page.h>
-
-/*
- * System call wrappers for the AArch32 compatibility layer.
- */
-
-ENTRY(compat_sys_sigreturn_wrapper)
- mov x0, sp
- b compat_sys_sigreturn
-ENDPROC(compat_sys_sigreturn_wrapper)
-
-ENTRY(compat_sys_rt_sigreturn_wrapper)
- mov x0, sp
- b compat_sys_rt_sigreturn
-ENDPROC(compat_sys_rt_sigreturn_wrapper)
-
-ENTRY(compat_sys_statfs64_wrapper)
- mov w3, #84
- cmp w1, #88
- csel w1, w3, w1, eq
- b compat_sys_statfs64
-ENDPROC(compat_sys_statfs64_wrapper)
-
-ENTRY(compat_sys_fstatfs64_wrapper)
- mov w3, #84
- cmp w1, #88
- csel w1, w3, w1, eq
- b compat_sys_fstatfs64
-ENDPROC(compat_sys_fstatfs64_wrapper)
-
-/*
- * Note: off_4k (w5) is always in units of 4K. If we can't do the
- * requested offset because it is not page-aligned, we return -EINVAL.
- */
-ENTRY(compat_sys_mmap2_wrapper)
-#if PAGE_SHIFT > 12
- tst w5, #~PAGE_MASK >> 12
- b.ne 1f
- lsr w5, w5, #PAGE_SHIFT - 12
-#endif
- b sys_mmap_pgoff
-1: mov x0, #-EINVAL
- ret
-ENDPROC(compat_sys_mmap2_wrapper)
-
-/*
- * Wrappers for AArch32 syscalls that either take 64-bit parameters
- * in registers or that take 32-bit parameters which require sign
- * extension.
- */
-ENTRY(compat_sys_pread64_wrapper)
- regs_to_64 x3, x4, x5
- b sys_pread64
-ENDPROC(compat_sys_pread64_wrapper)
-
-ENTRY(compat_sys_pwrite64_wrapper)
- regs_to_64 x3, x4, x5
- b sys_pwrite64
-ENDPROC(compat_sys_pwrite64_wrapper)
-
-ENTRY(compat_sys_truncate64_wrapper)
- regs_to_64 x1, x2, x3
- b sys_truncate
-ENDPROC(compat_sys_truncate64_wrapper)
-
-ENTRY(compat_sys_ftruncate64_wrapper)
- regs_to_64 x1, x2, x3
- b sys_ftruncate
-ENDPROC(compat_sys_ftruncate64_wrapper)
-
-ENTRY(compat_sys_readahead_wrapper)
- regs_to_64 x1, x2, x3
- mov w2, w4
- b sys_readahead
-ENDPROC(compat_sys_readahead_wrapper)
-
-ENTRY(compat_sys_fadvise64_64_wrapper)
- mov w6, w1
- regs_to_64 x1, x2, x3
- regs_to_64 x2, x4, x5
- mov w3, w6
- b sys_fadvise64_64
-ENDPROC(compat_sys_fadvise64_64_wrapper)
-
-ENTRY(compat_sys_sync_file_range2_wrapper)
- regs_to_64 x2, x2, x3
- regs_to_64 x3, x4, x5
- b sys_sync_file_range2
-ENDPROC(compat_sys_sync_file_range2_wrapper)
-
-ENTRY(compat_sys_fallocate_wrapper)
- regs_to_64 x2, x2, x3
- regs_to_64 x3, x4, x5
- b sys_fallocate
-ENDPROC(compat_sys_fallocate_wrapper)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 84c68b14f1b2..58c53bc96928 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -159,25 +159,6 @@ static void sve_free(struct task_struct *task)
__sve_free(task);
}
-static void change_cpacr(u64 val, u64 mask)
-{
- u64 cpacr = read_sysreg(CPACR_EL1);
- u64 new = (cpacr & ~mask) | val;
-
- if (new != cpacr)
- write_sysreg(new, CPACR_EL1);
-}
-
-static void sve_user_disable(void)
-{
- change_cpacr(0, CPACR_EL1_ZEN_EL0EN);
-}
-
-static void sve_user_enable(void)
-{
- change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN);
-}
-
/*
* TIF_SVE controls whether a task can use SVE without trapping while
* in userspace, and also the way a task's FPSIMD/SVE state is stored
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 413dbe530da8..8c9644376326 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -343,14 +343,13 @@ static int get_hbp_len(u8 hbp_len)
/*
* Check whether bp virtual address is in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->ctrl.len);
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -421,53 +420,53 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+ hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->ctrl.type = ARM_BREAKPOINT_LOAD;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->ctrl.type = ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_3:
- info->ctrl.len = ARM_BREAKPOINT_LEN_3;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
break;
case HW_BREAKPOINT_LEN_4:
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_5:
- info->ctrl.len = ARM_BREAKPOINT_LEN_5;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
break;
case HW_BREAKPOINT_LEN_6:
- info->ctrl.len = ARM_BREAKPOINT_LEN_6;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
break;
case HW_BREAKPOINT_LEN_7:
- info->ctrl.len = ARM_BREAKPOINT_LEN_7;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
break;
case HW_BREAKPOINT_LEN_8:
- info->ctrl.len = ARM_BREAKPOINT_LEN_8;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
@@ -478,37 +477,37 @@ static int arch_build_bp_info(struct perf_event *bp)
* AArch32 also requires breakpoints of length 2 for Thumb.
* Watchpoints can be of length 1, 2, 4 or 8 bytes.
*/
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
if (is_compat_bp(bp)) {
- if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+ if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL;
- } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
+ } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
/*
* FIXME: Some tools (I'm looking at you perf) assume
* that breakpoints should be sizeof(long). This
* is nonsense. For now, we fix up the parameter
* but we should probably return -EINVAL instead.
*/
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
}
}
/* Address */
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/*
* Privilege
* Note that we disallow combined EL0/EL1 breakpoints because
* that would complicate the stepping code.
*/
- if (arch_check_bp_in_kernelspace(bp))
- info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
+ if (arch_check_bp_in_kernelspace(hw))
+ hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
else
- info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
+ hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
/* Enabled? */
- info->ctrl.enabled = !bp->attr.disabled;
+ hw->ctrl.enabled = !attr->disabled;
return 0;
}
@@ -516,14 +515,15 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings.
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int ret;
u64 alignment_mask, offset;
/* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
@@ -537,42 +537,42 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* that here.
*/
if (is_compat_bp(bp)) {
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
else
alignment_mask = 0x3;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
switch (offset) {
case 0:
/* Aligned */
break;
case 1:
/* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
default:
return -EINVAL;
}
} else {
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
alignment_mask = 0x3;
else
alignment_mask = 0x7;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
}
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
/*
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
*/
- if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
+ if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
return -EINVAL;
return 0;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 816d03c4c913..2b3413549734 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -149,20 +149,6 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn)
return __aarch64_insn_write(addr, cpu_to_le32(insn));
}
-static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
-{
- if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
- return false;
-
- return aarch64_insn_is_b(insn) ||
- aarch64_insn_is_bl(insn) ||
- aarch64_insn_is_svc(insn) ||
- aarch64_insn_is_hvc(insn) ||
- aarch64_insn_is_smc(insn) ||
- aarch64_insn_is_brk(insn) ||
- aarch64_insn_is_nop(insn);
-}
-
bool __kprobes aarch64_insn_uses_literal(u32 insn)
{
/* ldr/ldrsw (literal), prfm */
@@ -189,22 +175,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
aarch64_insn_is_bcond(insn);
}
-/*
- * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
- * Section B2.6.5 "Concurrent modification and execution of instructions":
- * Concurrent modification and execution of instructions can lead to the
- * resulting instruction performing any behavior that can be achieved by
- * executing any sequence of instructions that can be executed from the
- * same Exception level, except where the instruction before modification
- * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
- * or SMC instruction.
- */
-bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
-{
- return __aarch64_insn_hotpatch_safe(old_insn) &&
- __aarch64_insn_hotpatch_safe(new_insn);
-}
-
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
{
u32 *tp = addr;
@@ -216,8 +186,8 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
ret = aarch64_insn_write(tp, insn);
if (ret == 0)
- flush_icache_range((uintptr_t)tp,
- (uintptr_t)tp + AARCH64_INSN_SIZE);
+ __flush_icache_range((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
return ret;
}
@@ -239,11 +209,6 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
pp->new_insns[i]);
- /*
- * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
- * which ends with "dsb; isb" pair guaranteeing global
- * visibility.
- */
/* Notify other processors with an additional increment. */
atomic_inc(&pp->cpu_count);
} else {
@@ -255,8 +220,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
return ret;
}
-static
-int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
{
struct aarch64_insn_patch patch = {
.text_addrs = addrs,
@@ -272,34 +236,6 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
cpu_online_mask);
}
-int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
-{
- int ret;
- u32 insn;
-
- /* Unsafe to patch multiple instructions without synchronizaiton */
- if (cnt == 1) {
- ret = aarch64_insn_read(addrs[0], &insn);
- if (ret)
- return ret;
-
- if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
- /*
- * ARMv8 architecture doesn't guarantee all CPUs see
- * the new instruction after returning from function
- * aarch64_insn_patch_text_nosync(). So send IPIs to
- * all other CPUs to achieve instruction
- * synchronization.
- */
- ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
- kick_all_cpus_sync();
- return ret;
- }
- }
-
- return aarch64_insn_patch_text_sync(addrs, insns, cnt);
-}
-
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u32 *maskp, int *shiftp)
{
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 60e5fc661f74..780a12f59a8f 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -42,16 +42,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0;
}
-void (*handle_arch_irq)(struct pt_regs *) = NULL;
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- if (handle_arch_irq)
- return;
-
- handle_arch_irq = handle_irq;
-}
-
#ifdef CONFIG_VMAP_STACK
static void init_irq_stacks(void)
{
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index f76ea92dff91..f6a5c6bc1434 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -184,8 +184,15 @@ void machine_kexec(struct kimage *kimage)
/* Flush the reboot_code_buffer in preparation for its execution. */
__flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
- flush_icache_range((uintptr_t)reboot_code_buffer,
- arm64_relocate_new_kernel_size);
+
+ /*
+ * Although we've killed off the secondary CPUs, we don't update
+ * the online mask if we're handling a crash kernel and consequently
+ * need to avoid flush_icache_range(), which will attempt to IPI
+ * the offline CPUs. Therefore, we must use the __* variant here.
+ */
+ __flush_icache_range((uintptr_t)reboot_code_buffer,
+ arm64_relocate_new_kernel_size);
/* Flush the kimage list and its buffers. */
kexec_list_flush(kimage);
@@ -207,8 +214,7 @@ void machine_kexec(struct kimage *kimage)
* relocation is complete.
*/
- cpu_soft_restart(kimage != kexec_crash_image,
- reboot_code_buffer_phys, kimage->head, kimage->start, 0);
+ cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, 0);
BUG(); /* Should never get here. */
}
@@ -361,4 +367,5 @@ void arch_crash_save_vmcoreinfo(void)
kimage_voffset);
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
PHYS_OFFSET);
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 33147aacdafd..8e38d5267f22 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,6 +25,7 @@
#include <asm/virt.h>
#include <linux/acpi.h>
+#include <linux/clocksource.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
@@ -446,9 +447,16 @@ static struct attribute_group armv8_pmuv3_events_attr_group = {
};
PMU_FORMAT_ATTR(event, "config:0-15");
+PMU_FORMAT_ATTR(long, "config1:0");
+
+static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
+{
+ return event->attr.config1 & 0x1;
+}
static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr,
+ &format_attr_long.attr,
NULL,
};
@@ -466,6 +474,21 @@ static struct attribute_group armv8_pmuv3_format_attr_group = {
(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
/*
+ * We must chain two programmable counters for 64 bit events,
+ * except when we have allocated the 64bit cycle counter (for CPU
+ * cycles event). This must be called only when the event has
+ * a counter allocated.
+ */
+static inline bool armv8pmu_event_is_chained(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+
+ return !WARN_ON(idx < 0) &&
+ armv8pmu_event_is_64bit(event) &&
+ (idx != ARMV8_IDX_CYCLE_COUNTER);
+}
+
+/*
* ARMv8 low level PMU access
*/
@@ -503,34 +526,68 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
-static inline int armv8pmu_select_counter(int idx)
+static inline void armv8pmu_select_counter(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_sysreg(counter, pmselr_el0);
isb();
+}
- return idx;
+static inline u32 armv8pmu_read_evcntr(int idx)
+{
+ armv8pmu_select_counter(idx);
+ return read_sysreg(pmxevcntr_el0);
+}
+
+static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ u64 val = 0;
+
+ val = armv8pmu_read_evcntr(idx);
+ if (armv8pmu_event_is_chained(event))
+ val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
+ return val;
}
-static inline u32 armv8pmu_read_counter(struct perf_event *event)
+static inline u64 armv8pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- u32 value = 0;
+ u64 value = 0;
if (!armv8pmu_counter_valid(cpu_pmu, idx))
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER)
value = read_sysreg(pmccntr_el0);
- else if (armv8pmu_select_counter(idx) == idx)
- value = read_sysreg(pmxevcntr_el0);
+ else
+ value = armv8pmu_read_hw_counter(event);
return value;
}
-static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
+static inline void armv8pmu_write_evcntr(int idx, u32 value)
+{
+ armv8pmu_select_counter(idx);
+ write_sysreg(value, pmxevcntr_el0);
+}
+
+static inline void armv8pmu_write_hw_counter(struct perf_event *event,
+ u64 value)
+{
+ int idx = event->hw.idx;
+
+ if (armv8pmu_event_is_chained(event)) {
+ armv8pmu_write_evcntr(idx, upper_32_bits(value));
+ armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
+ } else {
+ armv8pmu_write_evcntr(idx, value);
+ }
+}
+
+static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -541,22 +598,43 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
/*
- * Set the upper 32bits as this is a 64bit counter but we only
- * count using the lower 32bits and we want an interrupt when
- * it overflows.
+ * The cycles counter is really a 64-bit counter.
+ * When treating it as a 32-bit counter, we only count
+ * the lower 32 bits, and set the upper 32-bits so that
+ * we get an interrupt upon 32-bit overflow.
*/
- u64 value64 = 0xffffffff00000000ULL | value;
-
- write_sysreg(value64, pmccntr_el0);
- } else if (armv8pmu_select_counter(idx) == idx)
- write_sysreg(value, pmxevcntr_el0);
+ if (!armv8pmu_event_is_64bit(event))
+ value |= 0xffffffff00000000ULL;
+ write_sysreg(value, pmccntr_el0);
+ } else
+ armv8pmu_write_hw_counter(event, value);
}
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
- if (armv8pmu_select_counter(idx) == idx) {
- val &= ARMV8_PMU_EVTYPE_MASK;
- write_sysreg(val, pmxevtyper_el0);
+ armv8pmu_select_counter(idx);
+ val &= ARMV8_PMU_EVTYPE_MASK;
+ write_sysreg(val, pmxevtyper_el0);
+}
+
+static inline void armv8pmu_write_event_type(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ /*
+ * For chained events, the low counter is programmed to count
+ * the event of interest and the high counter is programmed
+ * with CHAIN event code with filters set to count at all ELs.
+ */
+ if (armv8pmu_event_is_chained(event)) {
+ u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
+ ARMV8_PMU_INCLUDE_EL2;
+
+ armv8pmu_write_evtype(idx - 1, hwc->config_base);
+ armv8pmu_write_evtype(idx, chain_evt);
+ } else {
+ armv8pmu_write_evtype(idx, hwc->config_base);
}
}
@@ -567,6 +645,16 @@ static inline int armv8pmu_enable_counter(int idx)
return idx;
}
+static inline void armv8pmu_enable_event_counter(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+
+ armv8pmu_enable_counter(idx);
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_enable_counter(idx - 1);
+ isb();
+}
+
static inline int armv8pmu_disable_counter(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
@@ -574,6 +662,16 @@ static inline int armv8pmu_disable_counter(int idx)
return idx;
}
+static inline void armv8pmu_disable_event_counter(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_disable_counter(idx - 1);
+ armv8pmu_disable_counter(idx);
+}
+
static inline int armv8pmu_enable_intens(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
@@ -581,6 +679,11 @@ static inline int armv8pmu_enable_intens(int idx)
return idx;
}
+static inline int armv8pmu_enable_event_irq(struct perf_event *event)
+{
+ return armv8pmu_enable_intens(event->hw.idx);
+}
+
static inline int armv8pmu_disable_intens(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
@@ -593,6 +696,11 @@ static inline int armv8pmu_disable_intens(int idx)
return idx;
}
+static inline int armv8pmu_disable_event_irq(struct perf_event *event)
+{
+ return armv8pmu_disable_intens(event->hw.idx);
+}
+
static inline u32 armv8pmu_getreset_flags(void)
{
u32 value;
@@ -610,10 +718,8 @@ static inline u32 armv8pmu_getreset_flags(void)
static void armv8pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
/*
* Enable counter and interrupt, and set the counter to count
@@ -624,22 +730,22 @@ static void armv8pmu_enable_event(struct perf_event *event)
/*
* Disable counter
*/
- armv8pmu_disable_counter(idx);
+ armv8pmu_disable_event_counter(event);
/*
* Set event (if destined for PMNx counters).
*/
- armv8pmu_write_evtype(idx, hwc->config_base);
+ armv8pmu_write_event_type(event);
/*
* Enable interrupt for this counter
*/
- armv8pmu_enable_intens(idx);
+ armv8pmu_enable_event_irq(event);
/*
* Enable counter
*/
- armv8pmu_enable_counter(idx);
+ armv8pmu_enable_event_counter(event);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
@@ -647,10 +753,8 @@ static void armv8pmu_enable_event(struct perf_event *event)
static void armv8pmu_disable_event(struct perf_event *event)
{
unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
/*
* Disable counter and interrupt
@@ -660,16 +764,38 @@ static void armv8pmu_disable_event(struct perf_event *event)
/*
* Disable counter
*/
- armv8pmu_disable_counter(idx);
+ armv8pmu_disable_event_counter(event);
/*
* Disable interrupt for this counter
*/
- armv8pmu_disable_intens(idx);
+ armv8pmu_disable_event_irq(event);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
+static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Enable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Disable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
u32 pmovsr;
@@ -694,6 +820,11 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
*/
regs = get_irq_regs();
+ /*
+ * Stop the PMU while processing the counter overflows
+ * to prevent skews in group events.
+ */
+ armv8pmu_stop(cpu_pmu);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -718,6 +849,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(event);
}
+ armv8pmu_start(cpu_pmu);
/*
* Handle the pending perf events.
@@ -731,32 +863,42 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
return IRQ_HANDLED;
}
-static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
+ struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ int idx;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Enable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
+ }
+ return -EAGAIN;
}
-static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
+ struct arm_pmu *cpu_pmu)
{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ int idx;
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Disable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ /*
+ * Chaining requires two consecutive event counters, where
+ * the lower idx must be even.
+ */
+ for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
+ if (!test_and_set_bit(idx, cpuc->used_mask)) {
+ /* Check if the preceding even counter is available */
+ if (!test_and_set_bit(idx - 1, cpuc->used_mask))
+ return idx;
+ /* Release the Odd counter */
+ clear_bit(idx, cpuc->used_mask);
+ }
+ }
+ return -EAGAIN;
}
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
- int idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
@@ -770,13 +912,20 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
/*
* Otherwise use events counters
*/
- for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
- if (!test_and_set_bit(idx, cpuc->used_mask))
- return idx;
- }
+ if (armv8pmu_event_is_64bit(event))
+ return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
+ else
+ return armv8pmu_get_single_idx(cpuc, cpu_pmu);
+}
- /* The counters are all in use. */
- return -EAGAIN;
+static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int idx = event->hw.idx;
+
+ clear_bit(idx, cpuc->used_mask);
+ if (armv8pmu_event_is_chained(event))
+ clear_bit(idx - 1, cpuc->used_mask);
}
/*
@@ -851,6 +1000,9 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
&armv8_pmuv3_perf_cache_map,
ARMV8_PMU_EVTYPE_EVENT);
+ if (armv8pmu_event_is_64bit(event))
+ event->hw.flags |= ARMPMU_EVT_64BIT;
+
/* Onl expose micro/arch events supported by this PMU */
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
@@ -957,10 +1109,10 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv8pmu_read_counter,
cpu_pmu->write_counter = armv8pmu_write_counter,
cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
+ cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx,
cpu_pmu->start = armv8pmu_start,
cpu_pmu->stop = armv8pmu_stop,
cpu_pmu->reset = armv8pmu_reset,
- cpu_pmu->max_period = (1LLU << 32) - 1,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
return 0;
@@ -1127,3 +1279,32 @@ static int __init armv8_pmu_driver_init(void)
return arm_pmu_acpi_probe(armv8_pmuv3_init);
}
device_initcall(armv8_pmu_driver_init)
+
+void arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg, u64 now)
+{
+ u32 freq;
+ u32 shift;
+
+ /*
+ * Internal timekeeping for enabled/running/stopped times
+ * is always computed with the sched_clock.
+ */
+ freq = arch_timer_get_rate();
+ userpg->cap_user_time = 1;
+
+ clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
+ NSEC_PER_SEC, 0);
+ /*
+ * time_shift is not expected to be greater than 31 due to
+ * the original published conversion algorithm shifting a
+ * 32-bit value (now specifies a 64-bit value) - refer
+ * perf_event_mmap_page documentation in perf_event.h.
+ */
+ if (shift == 32) {
+ shift = 31;
+ userpg->time_mult >>= 1;
+ }
+ userpg->time_shift = (u16)shift;
+ userpg->time_offset = -now;
+}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d849d9804011..e78c3ef04d95 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
- pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+ pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
break;
@@ -395,9 +395,9 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry,
- * so get out doing nothing more here.
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
*
* pre_handler can hit a breakpoint and can step thru
* before return, keep PSTATE D-flag enabled until
@@ -405,16 +405,8 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs, kcb, 0);
- return;
- }
- }
- } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
- BRK64_OPCODE_KPROBES) && cur_kprobe) {
- /* We probably hit a jprobe. Call its break handler. */
- if (cur_kprobe->break_handler &&
- cur_kprobe->break_handler(cur_kprobe, regs)) {
- setup_singlestep(cur_kprobe, regs, kcb, 0);
- return;
+ } else
+ reset_current_kprobe();
}
}
/*
@@ -465,74 +457,6 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
return DBG_HOOK_HANDLED;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- /*
- * Since we can't be sure where in the stack frame "stacked"
- * pass-by-value arguments are stored we just don't try to
- * duplicate any of the stack. Do not use jprobes on functions that
- * use more than 64 bytes (after padding each to an 8 byte boundary)
- * of arguments, or pass individual arguments larger than 16 bytes.
- */
-
- instruction_pointer_set(regs, (unsigned long) jp->entry);
- preempt_disable();
- pause_graph_tracing();
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- /*
- * Jprobe handler return by entering break exception,
- * encoded same as kprobe, but with following conditions
- * -a special PC to identify it from the other kprobes.
- * -restore stack addr to original saved pt_regs
- */
- asm volatile(" mov sp, %0 \n"
- "jprobe_return_break: brk %1 \n"
- :
- : "r" (kcb->jprobe_saved_regs.sp),
- "I" (BRK64_ESR_KPROBES)
- : "memory");
-
- unreachable();
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- long stack_addr = kcb->jprobe_saved_regs.sp;
- long orig_sp = kernel_stack_pointer(regs);
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- extern const char jprobe_return_break[];
-
- if (instruction_pointer(regs) != (u64) jprobe_return_break)
- return 0;
-
- if (orig_sp != stack_addr) {
- struct pt_regs *saved_regs =
- (struct pt_regs *)kcb->jprobe_saved_regs.sp;
- pr_err("current sp %lx does not match saved sp %lx\n",
- orig_sp, stack_addr);
- pr_err("Saved registers for jprobe %p\n", jp);
- __show_regs(saved_regs);
- pr_err("Current registers\n");
- __show_regs(regs);
- BUG();
- }
- unpause_graph_tracing();
- *regs = kcb->jprobe_saved_regs;
- preempt_enable_no_resched();
- return 1;
-}
-
bool arch_within_kprobe_blacklist(unsigned long addr)
{
if ((addr >= (unsigned long)__kprobes_text_start &&
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index e10bc363f533..7f1628effe6d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -177,16 +177,16 @@ static void print_pstate(struct pt_regs *regs)
if (compat_user_mode(regs)) {
printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
pstate,
- pstate & COMPAT_PSR_N_BIT ? 'N' : 'n',
- pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z',
- pstate & COMPAT_PSR_C_BIT ? 'C' : 'c',
- pstate & COMPAT_PSR_V_BIT ? 'V' : 'v',
- pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q',
- pstate & COMPAT_PSR_T_BIT ? "T32" : "A32",
- pstate & COMPAT_PSR_E_BIT ? "BE" : "LE",
- pstate & COMPAT_PSR_A_BIT ? 'A' : 'a',
- pstate & COMPAT_PSR_I_BIT ? 'I' : 'i',
- pstate & COMPAT_PSR_F_BIT ? 'F' : 'f');
+ pstate & PSR_AA32_N_BIT ? 'N' : 'n',
+ pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
+ pstate & PSR_AA32_C_BIT ? 'C' : 'c',
+ pstate & PSR_AA32_V_BIT ? 'V' : 'v',
+ pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
+ pstate & PSR_AA32_T_BIT ? "T32" : "A32",
+ pstate & PSR_AA32_E_BIT ? "BE" : "LE",
+ pstate & PSR_AA32_A_BIT ? 'A' : 'a',
+ pstate & PSR_AA32_I_BIT ? 'I' : 'i',
+ pstate & PSR_AA32_F_BIT ? 'F' : 'f');
} else {
printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
pstate,
@@ -493,3 +493,25 @@ void arch_setup_new_exec(void)
{
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
}
+
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+void __used stackleak_check_alloca(unsigned long size)
+{
+ unsigned long stack_left;
+ unsigned long current_sp = current_stack_pointer;
+ struct stack_info info;
+
+ BUG_ON(!on_accessible_stack(current, current_sp, &info));
+
+ stack_left = current_sp - info.low;
+
+ /*
+ * There's a good chance we're almost out of stack space if this
+ * is true. Using panic() over BUG() is more likely to give
+ * reliable debugging output.
+ */
+ if (size >= stack_left)
+ panic("alloca() over the kernel stack boundary\n");
+}
+EXPORT_SYMBOL(stackleak_check_alloca);
+#endif
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 5c338ce5a7fa..6219486fa25f 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -132,7 +132,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
- on_irq_stack(addr);
+ on_irq_stack(addr, NULL);
}
/**
@@ -277,19 +277,22 @@ static int ptrace_hbp_set_event(unsigned int note_type,
switch (note_type) {
case NT_ARM_HW_BREAK:
- if (idx < ARM_MAX_BRP) {
- tsk->thread.debug.hbp_break[idx] = bp;
- err = 0;
- }
+ if (idx >= ARM_MAX_BRP)
+ goto out;
+ idx = array_index_nospec(idx, ARM_MAX_BRP);
+ tsk->thread.debug.hbp_break[idx] = bp;
+ err = 0;
break;
case NT_ARM_HW_WATCH:
- if (idx < ARM_MAX_WRP) {
- tsk->thread.debug.hbp_watch[idx] = bp;
- err = 0;
- }
+ if (idx >= ARM_MAX_WRP)
+ goto out;
+ idx = array_index_nospec(idx, ARM_MAX_WRP);
+ tsk->thread.debug.hbp_watch[idx] = bp;
+ err = 0;
break;
}
+out:
return err;
}
@@ -1076,6 +1079,7 @@ static int compat_gpr_get(struct task_struct *target,
break;
case 16:
reg = task_pt_regs(target)->pstate;
+ reg = pstate_to_compat_psr(reg);
break;
case 17:
reg = task_pt_regs(target)->orig_x0;
@@ -1143,6 +1147,7 @@ static int compat_gpr_set(struct task_struct *target,
newregs.pc = reg;
break;
case 16:
+ reg = compat_psr_to_pstate(reg);
newregs.pstate = reg;
break;
case 17:
@@ -1629,7 +1634,7 @@ static void tracehook_report_syscall(struct pt_regs *regs,
regs->regs[regno] = saved_reg;
}
-asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+int syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
@@ -1647,7 +1652,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
return regs->syscallno;
}
-asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+void syscall_trace_exit(struct pt_regs *regs)
{
audit_syscall_exit(regs);
@@ -1656,18 +1661,24 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+
+ rseq_syscall(regs);
}
/*
- * Bits which are always architecturally RES0 per ARM DDI 0487A.h
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
+ * We also take into account DIT (bit 24), which is not yet documented, and
+ * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
+ * allocated an EL0 meaning in future.
* Userspace cannot use these until they have an architectural meaning.
+ * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
* We also reserve IL for the kernel; SS is handled dynamically.
*/
#define SPSR_EL1_AARCH64_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
- GENMASK_ULL(5, 5))
+ (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+ GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
#define SPSR_EL1_AARCH32_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
+ (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
static int valid_compat_regs(struct user_pt_regs *regs)
{
@@ -1675,15 +1686,15 @@ static int valid_compat_regs(struct user_pt_regs *regs)
if (!system_supports_mixed_endian_el0()) {
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
- regs->pstate |= COMPAT_PSR_E_BIT;
+ regs->pstate |= PSR_AA32_E_BIT;
else
- regs->pstate &= ~COMPAT_PSR_E_BIT;
+ regs->pstate &= ~PSR_AA32_E_BIT;
}
if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
- (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
- (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
- (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
+ (regs->pstate & PSR_AA32_A_BIT) == 0 &&
+ (regs->pstate & PSR_AA32_I_BIT) == 0 &&
+ (regs->pstate & PSR_AA32_F_BIT) == 0) {
return 1;
}
@@ -1691,11 +1702,11 @@ static int valid_compat_regs(struct user_pt_regs *regs)
* Force PSR to a valid 32-bit EL0t, preserving the same bits as
* arch/arm.
*/
- regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
- COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
- COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
- COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
- COMPAT_PSR_T_BIT;
+ regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
+ PSR_AA32_C_BIT | PSR_AA32_V_BIT |
+ PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
+ PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
+ PSR_AA32_T_BIT;
regs->pstate |= PSR_MODE32_BIT;
return 0;
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 6b8d90d5ceae..5ba4465e44f0 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -13,6 +13,7 @@
#include <asm/mmu.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
+#include <asm/stacktrace.h>
#include <asm/sysreg.h>
#include <asm/vmap_stack.h>
@@ -88,23 +89,52 @@ static int init_sdei_stacks(void)
return err;
}
-bool _on_sdei_stack(unsigned long sp)
+static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long low, high;
+ unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
+ unsigned long high = low + SDEI_STACK_SIZE;
- if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ if (sp < low || sp >= high)
return false;
- low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
- high = low + SDEI_STACK_SIZE;
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_SDEI_NORMAL;
+ }
- if (low <= sp && sp < high)
+ return true;
+}
+
+static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+{
+ unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
+ unsigned long high = low + SDEI_STACK_SIZE;
+
+ if (sp < low || sp >= high)
+ return false;
+
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = STACK_TYPE_SDEI_CRITICAL;
+ }
+
+ return true;
+}
+
+bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+{
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return false;
+
+ if (on_sdei_critical_stack(sp, info))
return true;
- low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
- high = low + SDEI_STACK_SIZE;
+ if (on_sdei_normal_stack(sp, info))
+ return true;
- return (low <= sp && sp < high);
+ return false;
}
unsigned long sdei_arch_get_entry_point(int conduit)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 30ad2f085d1f..5b4fac434c84 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -241,6 +241,44 @@ static void __init request_standard_resources(void)
}
}
+static int __init reserve_memblock_reserved_regions(void)
+{
+ phys_addr_t start, end, roundup_end = 0;
+ struct resource *mem, *res;
+ u64 i;
+
+ for_each_reserved_mem_region(i, &start, &end) {
+ if (end <= roundup_end)
+ continue; /* done already */
+
+ start = __pfn_to_phys(PFN_DOWN(start));
+ end = __pfn_to_phys(PFN_UP(end)) - 1;
+ roundup_end = end;
+
+ res = kzalloc(sizeof(*res), GFP_ATOMIC);
+ if (WARN_ON(!res))
+ return -ENOMEM;
+ res->start = start;
+ res->end = end;
+ res->name = "reserved";
+ res->flags = IORESOURCE_MEM;
+
+ mem = request_resource_conflict(&iomem_resource, res);
+ /*
+ * We expected memblock_reserve() regions to conflict with
+ * memory created by request_standard_resources().
+ */
+ if (WARN_ON_ONCE(!mem))
+ continue;
+ kfree(res);
+
+ reserve_region_with_split(mem, start, end, "reserved");
+ }
+
+ return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
+
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 511af13e8d8f..5dcc942906db 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -539,8 +539,9 @@ static int restore_sigframe(struct pt_regs *regs,
return err;
}
-asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+SYSCALL_DEFINE0(rt_sigreturn)
{
+ struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
@@ -802,6 +803,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int usig = ksig->sig;
int ret;
+ rseq_signal_deliver(ksig, regs);
+
/*
* Set up the stack frame
*/
@@ -910,7 +913,7 @@ static void do_signal(struct pt_regs *regs)
}
asmlinkage void do_notify_resume(struct pt_regs *regs,
- unsigned int thread_flags)
+ unsigned long thread_flags)
{
/*
* The assembly code enters us with IRQs off, but it hasn't
@@ -940,6 +943,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
if (thread_flags & _TIF_FOREIGN_FPSTATE)
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 77b91f478995..24b09003f821 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -243,6 +243,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
int err;
sigset_t set;
struct compat_aux_sigframe __user *aux;
+ unsigned long psr;
err = get_sigset_t(&set, &sf->uc.uc_sigmask);
if (err == 0) {
@@ -266,7 +267,9 @@ static int compat_restore_sigframe(struct pt_regs *regs,
__get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
__get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
__get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
- __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
+ __get_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
+
+ regs->pstate = compat_psr_to_pstate(psr);
/*
* Avoid compat_sys_sigreturn() restarting.
@@ -282,8 +285,9 @@ static int compat_restore_sigframe(struct pt_regs *regs,
return err;
}
-asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
+COMPAT_SYSCALL_DEFINE0(sigreturn)
{
+ struct pt_regs *regs = current_pt_regs();
struct compat_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
@@ -312,8 +316,9 @@ badframe:
return 0;
}
-asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
+COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
+ struct pt_regs *regs = current_pt_regs();
struct compat_rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
@@ -372,22 +377,22 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
{
compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
compat_ulong_t retcode;
- compat_ulong_t spsr = regs->pstate & ~(PSR_f | COMPAT_PSR_E_BIT);
+ compat_ulong_t spsr = regs->pstate & ~(PSR_f | PSR_AA32_E_BIT);
int thumb;
/* Check if the handler is written for ARM or Thumb */
thumb = handler & 1;
if (thumb)
- spsr |= COMPAT_PSR_T_BIT;
+ spsr |= PSR_AA32_T_BIT;
else
- spsr &= ~COMPAT_PSR_T_BIT;
+ spsr &= ~PSR_AA32_T_BIT;
/* The IT state must be cleared for both ARM and Thumb-2 */
- spsr &= ~COMPAT_PSR_IT_MASK;
+ spsr &= ~PSR_AA32_IT_MASK;
/* Restore the original endianness */
- spsr |= COMPAT_PSR_ENDSTATE;
+ spsr |= PSR_AA32_ENDSTATE;
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
@@ -414,6 +419,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
struct pt_regs *regs, sigset_t *set)
{
struct compat_aux_sigframe __user *aux;
+ unsigned long psr = pstate_to_compat_psr(regs->pstate);
int err = 0;
__put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
@@ -432,7 +438,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
__put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
__put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
__put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
- __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
+ __put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
/* set the compat FSR WnR */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 2faa9863d2e5..25fcd22a4bb2 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -225,6 +225,7 @@ asmlinkage notrace void secondary_start_kernel(void)
notify_cpu_starting(cpu);
store_cpu_topology(cpu);
+ numa_add_cpu(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -278,6 +279,9 @@ int __cpu_disable(void)
if (ret)
return ret;
+ remove_cpu_topology(cpu);
+ numa_remove_cpu(cpu);
+
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
@@ -518,7 +522,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
}
bootcpu_valid = true;
cpu_madt_gicc[0] = *processor;
- early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
return;
}
@@ -541,8 +544,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
*/
acpi_set_mailbox_entry(cpu_count, processor);
- early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid));
-
cpu_count++;
}
@@ -562,8 +563,34 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
return 0;
}
+
+static void __init acpi_parse_and_init_cpus(void)
+{
+ int i;
+
+ /*
+ * do a walk of MADT to determine how many CPUs
+ * we have including disabled CPUs, and get information
+ * we need for SMP init.
+ */
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_gic_cpu_interface, 0);
+
+ /*
+ * In ACPI, SMP and CPU NUMA information is provided in separate
+ * static tables, namely the MADT and the SRAT.
+ *
+ * Thus, it is simpler to first create the cpu logical map through
+ * an MADT walk and then map the logical cpus to their node ids
+ * as separate steps.
+ */
+ acpi_map_cpus_to_nodes();
+
+ for (i = 0; i < nr_cpu_ids; i++)
+ early_map_cpu_to_node(i, acpi_numa_get_nid(i));
+}
#else
-#define acpi_table_parse_madt(...) do { } while (0)
+#define acpi_parse_and_init_cpus(...) do { } while (0)
#endif
/*
@@ -636,13 +663,7 @@ void __init smp_init_cpus(void)
if (acpi_disabled)
of_parse_and_init_cpus();
else
- /*
- * do a walk of MADT to determine how many CPUs
- * we have including disabled CPUs, and get information
- * we need for SMP init
- */
- acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
- acpi_parse_gic_cpu_interface, 0);
+ acpi_parse_and_init_cpus();
if (cpu_count > nr_cpu_ids)
pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
@@ -679,6 +700,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
this_cpu = smp_processor_id();
store_cpu_topology(this_cpu);
numa_store_cpu_info(this_cpu);
+ numa_add_cpu(this_cpu);
/*
* If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index d5718a060672..4989f7ea1e59 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -50,7 +50,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
if (!tsk)
tsk = current;
- if (!on_accessible_stack(tsk, fp))
+ if (!on_accessible_stack(tsk, fp, NULL))
return -EINVAL;
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index 72981bae10eb..b44065fb1616 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -25,11 +25,13 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
+
#include <asm/cpufeature.h>
+#include <asm/syscall.h>
-asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t off)
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, off_t, off)
{
if (offset_in_page(off) != 0)
return -EINVAL;
@@ -42,24 +44,25 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
if (personality(personality) == PER_LINUX32 &&
!system_supports_32bit_el0())
return -EINVAL;
- return sys_personality(personality);
+ return ksys_personality(personality);
}
/*
* Wrappers to pass the pt_regs argument.
*/
-asmlinkage long sys_rt_sigreturn_wrapper(void);
-#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
#define sys_personality sys_arm64_personality
+asmlinkage long sys_ni_syscall(const struct pt_regs *);
+#define __arm64_sys_ni_syscall sys_ni_syscall
+
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = sym,
+#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
+#include <asm/unistd.h>
-/*
- * The sys_call_table array must be 4K aligned to be accessible from
- * kernel/entry.S.
- */
-void * const sys_call_table[__NR_syscalls] __aligned(4096) = {
- [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+
+const syscall_fn_t sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index a40b1343b819..0f8bcb7de700 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -22,31 +22,128 @@
*/
#define __COMPAT_SYSCALL_NR
+#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
-asmlinkage long compat_sys_sigreturn_wrapper(void);
-asmlinkage long compat_sys_rt_sigreturn_wrapper(void);
-asmlinkage long compat_sys_statfs64_wrapper(void);
-asmlinkage long compat_sys_fstatfs64_wrapper(void);
-asmlinkage long compat_sys_pread64_wrapper(void);
-asmlinkage long compat_sys_pwrite64_wrapper(void);
-asmlinkage long compat_sys_truncate64_wrapper(void);
-asmlinkage long compat_sys_ftruncate64_wrapper(void);
-asmlinkage long compat_sys_readahead_wrapper(void);
-asmlinkage long compat_sys_fadvise64_64_wrapper(void);
-asmlinkage long compat_sys_sync_file_range2_wrapper(void);
-asmlinkage long compat_sys_fallocate_wrapper(void);
-asmlinkage long compat_sys_mmap2_wrapper(void);
+#include <asm/syscall.h>
-#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = sym,
+asmlinkage long compat_sys_sigreturn(void);
+asmlinkage long compat_sys_rt_sigreturn(void);
+
+COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname,
+ compat_size_t, sz, struct compat_statfs64 __user *, buf)
+{
+ /*
+ * 32-bit ARM applies an OABI compatibility fixup to statfs64 and
+ * fstatfs64 regardless of whether OABI is in use, and therefore
+ * arbitrary binaries may rely upon it, so we must do the same.
+ * For more details, see commit:
+ *
+ * 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and
+ * fstatfs64")
+ */
+ if (sz == 88)
+ sz = 84;
+
+ return kcompat_sys_statfs64(pathname, sz, buf);
+}
+
+COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz,
+ struct compat_statfs64 __user *, buf)
+{
+ /* see aarch32_statfs64 */
+ if (sz == 88)
+ sz = 84;
+
+ return kcompat_sys_fstatfs64(fd, sz, buf);
+}
/*
- * The sys_call_table array must be 4K aligned to be accessible from
- * kernel/entry.S.
+ * Note: off_4k is always in units of 4K. If we can't do the
+ * requested offset because it is not page-aligned, we return -EINVAL.
*/
-void * const compat_sys_call_table[__NR_compat_syscalls] __aligned(4096) = {
- [0 ... __NR_compat_syscalls - 1] = sys_ni_syscall,
+COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, off_4k)
+{
+ if (off_4k & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ off_4k >>= (PAGE_SHIFT - 12);
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k);
+}
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define arg_u32p(name) u32, name##_hi, u32, name##_lo
+#else
+#define arg_u32p(name) u32, name##_lo, u32, name##_hi
+#endif
+
+#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo)
+
+COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf,
+ size_t, count, u32, __pad, arg_u32p(pos))
+{
+ return ksys_pread64(fd, buf, count, arg_u64(pos));
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd,
+ const char __user *, buf, size_t, count, u32, __pad,
+ arg_u32p(pos))
+{
+ return ksys_pwrite64(fd, buf, count, arg_u64(pos));
+}
+
+COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname,
+ u32, __pad, arg_u32p(length))
+{
+ return ksys_truncate(pathname, arg_u64(length));
+}
+
+COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad,
+ arg_u32p(length))
+{
+ return ksys_ftruncate(fd, arg_u64(length));
+}
+
+COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad,
+ arg_u32p(offset), size_t, count)
+{
+ return ksys_readahead(fd, arg_u64(offset), count);
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice,
+ arg_u32p(offset), arg_u32p(len))
+{
+ return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice);
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags,
+ arg_u32p(offset), arg_u32p(nbytes))
+{
+ return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes),
+ flags);
+}
+
+COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
+ arg_u32p(offset), arg_u32p(len))
+{
+ return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
+}
+
+asmlinkage long sys_ni_syscall(const struct pt_regs *);
+#define __arm64_sys_ni_syscall sys_ni_syscall
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
+#include <asm/unistd32.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+
+const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
+ [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
#include <asm/unistd32.h>
};
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
new file mode 100644
index 000000000000..032d22312881
--- /dev/null
+++ b/arch/arm64/kernel/syscall.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/errno.h>
+#include <linux/nospec.h>
+#include <linux/ptrace.h>
+#include <linux/syscalls.h>
+
+#include <asm/daifflags.h>
+#include <asm/fpsimd.h>
+#include <asm/syscall.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+
+long compat_arm_syscall(struct pt_regs *regs);
+
+long sys_ni_syscall(void);
+
+asmlinkage long do_ni_syscall(struct pt_regs *regs)
+{
+#ifdef CONFIG_COMPAT
+ long ret;
+ if (is_compat_task()) {
+ ret = compat_arm_syscall(regs);
+ if (ret != -ENOSYS)
+ return ret;
+ }
+#endif
+
+ return sys_ni_syscall();
+}
+
+static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn)
+{
+ return syscall_fn(regs);
+}
+
+static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
+ unsigned int sc_nr,
+ const syscall_fn_t syscall_table[])
+{
+ long ret;
+
+ if (scno < sc_nr) {
+ syscall_fn_t syscall_fn;
+ syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
+ ret = __invoke_syscall(regs, syscall_fn);
+ } else {
+ ret = do_ni_syscall(regs);
+ }
+
+ regs->regs[0] = ret;
+}
+
+static inline bool has_syscall_work(unsigned long flags)
+{
+ return unlikely(flags & _TIF_SYSCALL_WORK);
+}
+
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_exit(struct pt_regs *regs);
+
+static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
+ const syscall_fn_t syscall_table[])
+{
+ unsigned long flags = current_thread_info()->flags;
+
+ regs->orig_x0 = regs->regs[0];
+ regs->syscallno = scno;
+
+ local_daif_restore(DAIF_PROCCTX);
+ user_exit();
+
+ if (has_syscall_work(flags)) {
+ /* set default errno for user-issued syscall(-1) */
+ if (scno == NO_SYSCALL)
+ regs->regs[0] = -ENOSYS;
+ scno = syscall_trace_enter(regs);
+ if (scno == NO_SYSCALL)
+ goto trace_exit;
+ }
+
+ invoke_syscall(regs, scno, sc_nr, syscall_table);
+
+ /*
+ * The tracing status may have changed under our feet, so we have to
+ * check again. However, if we were tracing entry, then we always trace
+ * exit regardless, as the old entry assembly did.
+ */
+ if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
+ local_daif_mask();
+ flags = current_thread_info()->flags;
+ if (!has_syscall_work(flags)) {
+ /*
+ * We're off to userspace, where interrupts are
+ * always enabled after we restore the flags from
+ * the SPSR.
+ */
+ trace_hardirqs_on();
+ return;
+ }
+ local_daif_restore(DAIF_PROCCTX);
+ }
+
+trace_exit:
+ syscall_trace_exit(regs);
+}
+
+static inline void sve_user_discard(void)
+{
+ if (!system_supports_sve())
+ return;
+
+ clear_thread_flag(TIF_SVE);
+
+ /*
+ * task_fpsimd_load() won't be called to update CPACR_EL1 in
+ * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
+ * happens if a context switch or kernel_neon_begin() or context
+ * modification (sigreturn, ptrace) intervenes.
+ * So, ensure that CPACR_EL1 is already correct for the fast-path case.
+ */
+ sve_user_disable();
+}
+
+asmlinkage void el0_svc_handler(struct pt_regs *regs)
+{
+ sve_user_discard();
+ el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
+}
+
+#ifdef CONFIG_COMPAT
+asmlinkage void el0_svc_compat_handler(struct pt_regs *regs)
+{
+ el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
+ compat_sys_call_table);
+}
+#endif
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index f845a8617812..0825c4a856e3 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -215,11 +215,16 @@ EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_coregroup_mask(int cpu)
{
- const cpumask_t *core_mask = &cpu_topology[cpu].core_sibling;
+ const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
+ /* Find the smaller of NUMA, core or LLC siblings */
+ if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
+ /* not numa in package, lets use the package siblings */
+ core_mask = &cpu_topology[cpu].core_sibling;
+ }
if (cpu_topology[cpu].llc_id != -1) {
- if (cpumask_subset(&cpu_topology[cpu].llc_siblings, core_mask))
- core_mask = &cpu_topology[cpu].llc_siblings;
+ if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
+ core_mask = &cpu_topology[cpu].llc_sibling;
}
return core_mask;
@@ -231,27 +236,25 @@ static void update_siblings_masks(unsigned int cpuid)
int cpu;
/* update core and thread sibling masks */
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
if (cpuid_topo->llc_id == cpu_topo->llc_id) {
- cpumask_set_cpu(cpu, &cpuid_topo->llc_siblings);
- cpumask_set_cpu(cpuid, &cpu_topo->llc_siblings);
+ cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
+ cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
}
if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
}
}
@@ -293,6 +296,19 @@ topology_populated:
update_siblings_masks(cpuid);
}
+static void clear_cpu_topology(int cpu)
+{
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpumask_clear(&cpu_topo->llc_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
+
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+}
+
static void __init reset_cpu_topology(void)
{
unsigned int cpu;
@@ -303,18 +319,26 @@ static void __init reset_cpu_topology(void)
cpu_topo->thread_id = -1;
cpu_topo->core_id = 0;
cpu_topo->package_id = -1;
-
cpu_topo->llc_id = -1;
- cpumask_clear(&cpu_topo->llc_siblings);
- cpumask_set_cpu(cpu, &cpu_topo->llc_siblings);
- cpumask_clear(&cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
- cpumask_clear(&cpu_topo->thread_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+ clear_cpu_topology(cpu);
}
}
+void remove_cpu_topology(unsigned int cpu)
+{
+ int sibling;
+
+ for_each_cpu(sibling, topology_core_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
+ for_each_cpu(sibling, topology_sibling_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
+ for_each_cpu(sibling, topology_llc_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
+
+ clear_cpu_topology(cpu);
+}
+
#ifdef CONFIG_ACPI
/*
* Propagate the topology information of the processor_topology_node tree to the
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d399d459397b..039e9ff379cc 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -411,7 +411,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{
- config_sctlr_el1(SCTLR_EL1_UCI, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
}
#define __user_cache_maint(insn, address, res) \
@@ -547,22 +547,6 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
do_undefinstr(regs);
}
-long compat_arm_syscall(struct pt_regs *regs);
-
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
-{
-#ifdef CONFIG_COMPAT
- long ret;
- if (is_compat_task()) {
- ret = compat_arm_syscall(regs);
- if (ret != -ENOSYS)
- return ret;
- }
-#endif
-
- return sys_ni_syscall();
-}
-
static const char *esr_class_str[] = {
[0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
[ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S
index b82c85e5d972..e20483b104d9 100644
--- a/arch/arm64/kernel/vdso/note.S
+++ b/arch/arm64/kernel/vdso/note.S
@@ -22,7 +22,10 @@
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
+#include <linux/build-salt.h>
ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
+
+BUILD_SALT
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 56a0260ceb11..cdd4d9d6d575 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -107,14 +107,14 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
- u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+ u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
- case COMPAT_PSR_MODE_USR:
- case COMPAT_PSR_MODE_FIQ:
- case COMPAT_PSR_MODE_IRQ:
- case COMPAT_PSR_MODE_SVC:
- case COMPAT_PSR_MODE_ABT:
- case COMPAT_PSR_MODE_UND:
+ case PSR_AA32_MODE_USR:
+ case PSR_AA32_MODE_FIQ:
+ case PSR_AA32_MODE_IRQ:
+ case PSR_AA32_MODE_SVC:
+ case PSR_AA32_MODE_ABT:
+ case PSR_AA32_MODE_UND:
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 4313f7475333..2fabc2dc1966 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -3,7 +3,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING \
+ $(DISABLE_STACKLEAK_PLUGIN)
KVM=../../../../virt/kvm
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 39be799d0417..215c7c0eb3b0 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -27,7 +27,7 @@
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
- return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT);
+ return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT);
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
}
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
index eefe403a2e63..7a5173ea2276 100644
--- a/arch/arm64/kvm/regmap.c
+++ b/arch/arm64/kvm/regmap.c
@@ -112,22 +112,22 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
{
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
- unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
+ unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
switch (mode) {
- case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
+ case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
break;
- case COMPAT_PSR_MODE_ABT:
+ case PSR_AA32_MODE_ABT:
mode = 4;
break;
- case COMPAT_PSR_MODE_UND:
+ case PSR_AA32_MODE_UND:
mode = 5;
break;
- case COMPAT_PSR_MODE_SYS:
+ case PSR_AA32_MODE_SYS:
mode = 0; /* SYS maps to USR */
break;
@@ -143,13 +143,13 @@ unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
*/
static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
{
- unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
+ unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
switch (mode) {
- case COMPAT_PSR_MODE_SVC: return KVM_SPSR_SVC;
- case COMPAT_PSR_MODE_ABT: return KVM_SPSR_ABT;
- case COMPAT_PSR_MODE_UND: return KVM_SPSR_UND;
- case COMPAT_PSR_MODE_IRQ: return KVM_SPSR_IRQ;
- case COMPAT_PSR_MODE_FIQ: return KVM_SPSR_FIQ;
+ case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC;
+ case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT;
+ case PSR_AA32_MODE_UND: return KVM_SPSR_UND;
+ case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ;
+ case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ;
default: BUG();
}
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index a74311beda35..4e4aedaf7ab7 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -42,8 +42,8 @@ static const struct kvm_regs default_regs_reset = {
};
static const struct kvm_regs default_regs_reset32 = {
- .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
- COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
+ .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
+ PSR_AA32_I_BIT | PSR_AA32_F_BIT),
};
static bool cpu_has_32bit_el1(void)
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 137710f4dac3..68755fd70dcf 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
+lib-y := clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
clear_page.o memchr.o memcpy.o memmove.o memset.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
deleted file mode 100644
index 43ac736baa5b..000000000000
--- a/arch/arm64/lib/bitops.S
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Based on arch/arm/lib/bitops.h
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/lse.h>
-
-/*
- * x0: bits 5:0 bit offset
- * bits 31:6 word offset
- * x1: address
- */
- .macro bitop, name, llsc, lse
-ENTRY( \name )
- and w3, w0, #63 // Get bit offset
- eor w0, w0, w3 // Clear low bits
- mov x2, #1
- add x1, x1, x0, lsr #3 // Get word offset
-alt_lse " prfm pstl1strm, [x1]", "nop"
- lsl x3, x2, x3 // Create mask
-
-alt_lse "1: ldxr x2, [x1]", "\lse x3, [x1]"
-alt_lse " \llsc x2, x2, x3", "nop"
-alt_lse " stxr w0, x2, [x1]", "nop"
-alt_lse " cbnz w0, 1b", "nop"
-
- ret
-ENDPROC(\name )
- .endm
-
- .macro testop, name, llsc, lse
-ENTRY( \name )
- and w3, w0, #63 // Get bit offset
- eor w0, w0, w3 // Clear low bits
- mov x2, #1
- add x1, x1, x0, lsr #3 // Get word offset
-alt_lse " prfm pstl1strm, [x1]", "nop"
- lsl x4, x2, x3 // Create mask
-
-alt_lse "1: ldxr x2, [x1]", "\lse x4, x2, [x1]"
- lsr x0, x2, x3
-alt_lse " \llsc x2, x2, x4", "nop"
-alt_lse " stlxr w5, x2, [x1]", "nop"
-alt_lse " cbnz w5, 1b", "nop"
-alt_lse " dmb ish", "nop"
-
- and x0, x0, #1
- ret
-ENDPROC(\name )
- .endm
-
-/*
- * Atomic bit operations.
- */
- bitop change_bit, eor, steor
- bitop clear_bit, bic, stclr
- bitop set_bit, orr, stset
-
- testop test_and_change_bit, eor, ldeoral
- testop test_and_clear_bit, bic, ldclral
- testop test_and_set_bit, orr, ldsetal
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 30334d81b021..0c22ede52f90 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -35,7 +35,7 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(flush_icache_range)
+ENTRY(__flush_icache_range)
/* FALLTHROUGH */
/*
@@ -77,7 +77,7 @@ alternative_else_nop_endif
9:
mov x0, #-EFAULT
b 1b
-ENDPROC(flush_icache_range)
+ENDPROC(__flush_icache_range)
ENDPROC(__flush_cache_user_range)
/*
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index b8eecc7b9531..9943690a3924 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -727,12 +727,7 @@ static const struct fault_info fault_info[] = {
int handle_guest_sea(phys_addr_t addr, unsigned int esr)
{
- int ret = -ENOENT;
-
- if (IS_ENABLED(CONFIG_ACPI_APEI_SEA))
- ret = ghes_notify_sea();
-
- return ret;
+ return ghes_notify_sea();
}
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
@@ -879,7 +874,7 @@ void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
*/
WARN_ON_ONCE(in_interrupt());
- config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
asm(SET_PSTATE_PAN(1));
}
#endif /* CONFIG_ARM64_PAN */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 1059884f9a6f..30695a868107 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -66,6 +66,7 @@ void __sync_icache_dcache(pte_t pte)
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
}
+EXPORT_SYMBOL_GPL(__sync_icache_dcache);
/*
* This function is called when a page has been modified by the kernel. Mark
@@ -82,7 +83,7 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(flush_icache_range);
+EXPORT_SYMBOL(__flush_icache_range);
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 493ff75670ff..65f86271f02b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -45,6 +45,7 @@
#include <asm/memblock.h>
#include <asm/mmu_context.h>
#include <asm/ptdump.h>
+#include <asm/tlbflush.h>
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
@@ -977,12 +978,51 @@ int pmd_clear_huge(pmd_t *pmdp)
return 1;
}
-int pud_free_pmd_page(pud_t *pud)
+int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
{
- return pud_none(*pud);
+ pte_t *table;
+ pmd_t pmd;
+
+ pmd = READ_ONCE(*pmdp);
+
+ /* No-op for empty entry and WARN_ON for valid entry */
+ if (!pmd_present(pmd) || !pmd_table(pmd)) {
+ VM_WARN_ON(!pmd_table(pmd));
+ return 1;
+ }
+
+ table = pte_offset_kernel(pmdp, addr);
+ pmd_clear(pmdp);
+ __flush_tlb_kernel_pgtable(addr);
+ pte_free_kernel(NULL, table);
+ return 1;
}
-int pmd_free_pte_page(pmd_t *pmd)
+int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
{
- return pmd_none(*pmd);
+ pmd_t *table;
+ pmd_t *pmdp;
+ pud_t pud;
+ unsigned long next, end;
+
+ pud = READ_ONCE(*pudp);
+
+ /* No-op for empty entry and WARN_ON for valid entry */
+ if (!pud_present(pud) || !pud_table(pud)) {
+ VM_WARN_ON(!pud_table(pud));
+ return 1;
+ }
+
+ table = pmd_offset(pudp, addr);
+ pmdp = table;
+ next = addr;
+ end = addr + PUD_SIZE;
+ do {
+ pmd_free_pte_page(pmdp, next);
+ } while (pmdp++, next += PMD_SIZE, next != end);
+
+ pud_clear(pudp);
+ __flush_tlb_kernel_pgtable(addr);
+ pmd_free(NULL, table);
+ return 1;
}
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index dad128ba98bf..146c04ceaa51 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -70,19 +70,32 @@ EXPORT_SYMBOL(cpumask_of_node);
#endif
-static void map_cpu_to_node(unsigned int cpu, int nid)
+static void numa_update_cpu(unsigned int cpu, bool remove)
{
- set_cpu_numa_node(cpu, nid);
- if (nid >= 0)
+ int nid = cpu_to_node(cpu);
+
+ if (nid == NUMA_NO_NODE)
+ return;
+
+ if (remove)
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
+ else
cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
}
-void numa_clear_node(unsigned int cpu)
+void numa_add_cpu(unsigned int cpu)
{
- int nid = cpu_to_node(cpu);
+ numa_update_cpu(cpu, false);
+}
- if (nid >= 0)
- cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
+void numa_remove_cpu(unsigned int cpu)
+{
+ numa_update_cpu(cpu, true);
+}
+
+void numa_clear_node(unsigned int cpu)
+{
+ numa_remove_cpu(cpu);
set_cpu_numa_node(cpu, NUMA_NO_NODE);
}
@@ -116,7 +129,7 @@ static void __init setup_node_to_cpumask_map(void)
*/
void numa_store_cpu_info(unsigned int cpu)
{
- map_cpu_to_node(cpu, cpu_to_node_map[cpu]);
+ set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
}
void __init early_map_cpu_to_node(unsigned int cpu, int nid)
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
index 02b18f8b2905..24d786fc3a4c 100644
--- a/arch/arm64/mm/ptdump_debugfs.c
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -10,18 +10,7 @@ static int ptdump_show(struct seq_file *m, void *v)
ptdump_walk_pgd(m, info);
return 0;
}
-
-static int ptdump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ptdump_show, inode->i_private);
-}
-
-static const struct file_operations ptdump_fops = {
- .open = ptdump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ptdump);
int ptdump_debugfs_register(struct ptdump_info *info, const char *name)
{
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index bf59855628ac..a641b0bf1611 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -53,12 +53,8 @@ config C6X_BIG_KERNEL
If unsure, say N.
-source "init/Kconfig"
-
# Use the generic interrupt handling code in kernel/irq/
-source "kernel/Kconfig.freezer"
-
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
@@ -114,43 +110,6 @@ config KERNEL_RAM_BASE_ADDRESS
default 0xe0000000 if SOC_TMS320C6472
default 0x80000000
-source "mm/Kconfig"
-
-source "kernel/Kconfig.preempt"
-
source "kernel/Kconfig.hz"
endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-config ACCESS_CHECK
- bool "Check the user pointer address"
- default y
- help
- Usually the pointer transfer from user space is checked to see if its
- address is in the kernel space.
-
- Say N here to disable that check to improve the performance.
-
-endmenu
diff --git a/arch/c6x/Kconfig.debug b/arch/c6x/Kconfig.debug
new file mode 100644
index 000000000000..c299e0d8eca3
--- /dev/null
+++ b/arch/c6x/Kconfig.debug
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config ACCESS_CHECK
+ bool "Check the user pointer address"
+ default y
+ help
+ Usually the pointer transfer from user space is checked to see if its
+ address is in the kernel space.
+
+ Say N here to disable that check to improve the performance.
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 091d6d04b5e5..5e89d40be8cd 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -48,40 +48,4 @@ config NR_CPUS
int
default 1
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "arch/h8300/Kconfig.cpu"
-
-menu "Kernel Features"
-
-source "kernel/Kconfig.preempt"
-
-source "mm/Kconfig"
-
-endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-endmenu
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
new file mode 100644
index 000000000000..22a162cd99e8
--- /dev/null
+++ b/arch/h8300/Kconfig.debug
@@ -0,0 +1 @@
+# dummy file, do not delete
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 941e7554e886..c6b6a06231b2 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -2,8 +2,10 @@
#ifndef __ARCH_H8300_ATOMIC__
#define __ARCH_H8300_ATOMIC__
+#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
+#include <asm/irqflags.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -15,8 +17,6 @@
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#include <linux/kernel.h>
-
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
@@ -69,18 +69,6 @@ ATOMIC_OPS(sub, -=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_inc_return(v) atomic_add_return(1, v)
-#define atomic_dec_return(v) atomic_sub_return(1, v)
-
-#define atomic_inc(v) (void)atomic_inc_return(v)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec(v) (void)atomic_dec_return(v)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -94,7 +82,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
h8300flags flags;
@@ -106,5 +94,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
arch_local_irq_restore(flags);
return ret;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 37adb2003033..89a4b22f34d9 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -4,6 +4,7 @@ comment "Linux Kernel Configuration for Hexagon"
config HEXAGON
def_bool y
+ select ARCH_NO_PREEMPT
select HAVE_OPROFILE
# Other pending projects/to-do items.
# select HAVE_REGS_AND_STACK_ACCESS_API
@@ -146,26 +147,6 @@ config PAGE_SIZE_256KB
endchoice
-source "mm/Kconfig"
-
source "kernel/Kconfig.hz"
endmenu
-
-source "init/Kconfig"
-source "kernel/Kconfig.freezer"
-source "drivers/Kconfig"
-source "fs/Kconfig"
-
-menu "Executable File Formats"
-source "fs/Kconfig.binfmt"
-endmenu
-
-source "net/Kconfig"
-source "security/Kconfig"
-source "crypto/Kconfig"
-source "lib/Kconfig"
-
-menu "Kernel hacking"
-source "lib/Kconfig.debug"
-endmenu
diff --git a/arch/hexagon/Kconfig.debug b/arch/hexagon/Kconfig.debug
new file mode 100644
index 000000000000..22a162cd99e8
--- /dev/null
+++ b/arch/hexagon/Kconfig.debug
@@ -0,0 +1 @@
+# dummy file, do not delete
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index fb3dfb2a667e..311b9894ccc8 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -164,7 +164,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
@@ -173,7 +173,7 @@ ATOMIC_OPS(xor)
*
*/
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int __oldval;
register int tmp;
@@ -196,18 +196,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
);
return __oldval;
}
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
-#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
-
-#define atomic_inc_return(v) (atomic_add_return(1, v))
-#define atomic_dec_return(v) (atomic_sub_return(1, v))
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ff861420b8f5..2bf4ef792f2c 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -4,10 +4,6 @@ config PGTABLE_LEVELS
range 3 4 if !IA64_PAGE_SIZE_64KB
default 3
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
config IA64
@@ -16,6 +12,7 @@ config IA64
select ARCH_MIGHT_HAVE_PC_SERIO
select PCI if (!IA64_HP_SIM)
select ACPI if (!IA64_HP_SIM)
+ select ARCH_SUPPORTS_ACPI if (!IA64_HP_SIM)
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select HAVE_UNSTABLE_SCHED_CLOCK
@@ -368,10 +365,6 @@ config FORCE_CPEI_RETARGET
This option it useful to enable this feature on older BIOS's as well.
You can also enable this by using boot command line option force_cpei=1.
-source "kernel/Kconfig.preempt"
-
-source "mm/Kconfig"
-
config ARCH_SELECT_MEMORY_MODEL
def_bool y
@@ -532,8 +525,6 @@ config CRASH_DUMP
source "drivers/firmware/Kconfig"
-source "fs/Kconfig.binfmt"
-
endmenu
menu "Power management and ACPI options"
@@ -574,10 +565,6 @@ endmenu
endif
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "arch/ia64/hp/sim/Kconfig"
config MSPEC
@@ -588,13 +575,3 @@ config MSPEC
If you have an ia64 and you want to enable memory special
operations support (formerly known as fetchop), say Y here,
otherwise say N.
-
-source "fs/Kconfig"
-
-source "arch/ia64/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug
index 677c409425df..1371efc9b005 100644
--- a/arch/ia64/Kconfig.debug
+++ b/arch/ia64/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
choice
prompt "Physical memory granularity"
@@ -56,5 +53,3 @@ config IA64_DEBUG_IRQ
Selecting this option turns on bug checking for the IA-64 irq_save
and restore instructions. It's useful for tracking down spinlock
problems, but slow! If you're unsure, select N.
-
-endmenu
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 2524fb60fbc2..206530d0751b 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -215,91 +215,10 @@ ATOMIC64_FETCH_OP(xor, ^)
(cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
-
-static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
-{
- long c, old, dec;
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
-/*
- * Atomically add I to V and return TRUE if the resulting value is
- * negative.
- */
-static __inline__ int
-atomic_add_negative (int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
-static __inline__ long
-atomic64_add_negative (__s64 i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
-
#define atomic_add(i,v) (void)atomic_add_return((i), (v))
#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
-#define atomic64_inc(v) atomic64_add(1, (v))
-#define atomic64_dec(v) atomic64_sub(1, (v))
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index fb0651961e2c..6f952171abf9 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -83,12 +83,14 @@ virt_to_phys (volatile void *address)
{
return (unsigned long) address - PAGE_OFFSET;
}
+#define virt_to_phys virt_to_phys
static inline void*
phys_to_virt (unsigned long address)
{
return (void *) (address + PAGE_OFFSET);
}
+#define phys_to_virt phys_to_virt
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
@@ -433,9 +435,11 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
{
return ioremap(phys_addr, size);
}
+#define ioremap ioremap
+#define ioremap_nocache ioremap_nocache
#define ioremap_cache ioremap_cache
#define ioremap_uc ioremap_nocache
-
+#define iounmap iounmap
/*
* String version of IO memory access ops:
@@ -444,6 +448,13 @@ extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
extern void memset_io(volatile void __iomem *s, int c, long n);
+#define memcpy_fromio memcpy_fromio
+#define memcpy_toio memcpy_toio
+#define memset_io memset_io
+#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+#include <asm-generic/io.h>
+
# endif /* __KERNEL__ */
#endif /* _ASM_IA64_IO_H */
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
index 0302b3664789..580356a2eea6 100644
--- a/arch/ia64/include/asm/kprobes.h
+++ b/arch/ia64/include/asm/kprobes.h
@@ -82,8 +82,6 @@ struct prev_kprobe {
#define ARCH_PREV_KPROBE_SZ 2
struct kprobe_ctlblk {
unsigned long kprobe_status;
- struct pt_regs jprobe_saved_regs;
- unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
unsigned long *bsp;
unsigned long cfm;
atomic_t prev_kprobe_index;
diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h
index 5d742bcb0018..4ca110f0a94b 100644
--- a/arch/ia64/include/uapi/asm/break.h
+++ b/arch/ia64/include/uapi/asm/break.h
@@ -14,7 +14,6 @@
*/
#define __IA64_BREAK_KDB 0x80100
#define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */
-#define __IA64_BREAK_JPROBE 0x82000
/*
* OS-specific break numbers:
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 3efba40adc54..c872c4e6bafb 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -114,4 +114,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 498f3da3f225..d0c0ccdd656a 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
-obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
+obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
deleted file mode 100644
index f69389c7be1d..000000000000
--- a/arch/ia64/kernel/jprobes.S
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Jprobe specific operations
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) Intel Corporation, 2005
- *
- * 2005-May Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
- * <anil.s.keshavamurthy@intel.com> initial implementation
- *
- * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
- * probe to be inserted into the beginning of a function call. The fundamental
- * difference between a jprobe and a kprobe is the jprobe handler is executed
- * in the same context as the target function, while the kprobe handlers
- * are executed in interrupt context.
- *
- * For jprobes we initially gain control by placing a break point in the
- * first instruction of the targeted function. When we catch that specific
- * break, we:
- * * set the return address to our jprobe_inst_return() function
- * * jump to the jprobe handler function
- *
- * Since we fixed up the return address, the jprobe handler will return to our
- * jprobe_inst_return() function, giving us control again. At this point we
- * are back in the parents frame marker, so we do yet another call to our
- * jprobe_break() function to fix up the frame marker as it would normally
- * exist in the target function.
- *
- * Our jprobe_return function then transfers control back to kprobes.c by
- * executing a break instruction using one of our reserved numbers. When we
- * catch that break in kprobes.c, we continue like we do for a normal kprobe
- * by single stepping the emulated instruction, and then returning execution
- * to the correct location.
- */
-#include <asm/asmmacro.h>
-#include <asm/break.h>
-
- /*
- * void jprobe_break(void)
- */
- .section .kprobes.text, "ax"
-ENTRY(jprobe_break)
- break.m __IA64_BREAK_JPROBE
-END(jprobe_break)
-
- /*
- * void jprobe_inst_return(void)
- */
-GLOBAL_ENTRY(jprobe_inst_return)
- br.call.sptk.many b0=jprobe_break
-END(jprobe_inst_return)
-
-GLOBAL_ENTRY(invalidate_stacked_regs)
- movl r16=invalidate_restore_cfm
- ;;
- mov b6=r16
- ;;
- br.ret.sptk.many b6
- ;;
-invalidate_restore_cfm:
- mov r16=ar.rsc
- ;;
- mov ar.rsc=r0
- ;;
- loadrs
- ;;
- mov ar.rsc=r16
- ;;
- br.cond.sptk.many rp
-END(invalidate_stacked_regs)
-
-GLOBAL_ENTRY(flush_register_stack)
- // flush dirty regs to backing store (must be first in insn group)
- flushrs
- ;;
- br.ret.sptk.many rp
-END(flush_register_stack)
-
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index f5f3a5e6fcd1..aa41bd5cf9b7 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -35,8 +35,6 @@
#include <asm/sections.h>
#include <asm/exception.h>
-extern void jprobe_inst_return(void);
-
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -480,12 +478,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
*/
break;
}
-
kretprobe_assert(ri, orig_ret_address, trampoline_address);
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
@@ -819,14 +814,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
prepare_ss(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
- } else if (args->err == __IA64_BREAK_JPROBE) {
- /*
- * jprobe instrumented function just completed
- */
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- goto ss_probe;
- }
} else if (!is_ia64_break_inst(regs)) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
@@ -861,15 +848,12 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
set_current_kprobe(p, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
- /*
- * Our pre-handler is specifically requesting that we just
- * do a return. This is used for both the jprobe pre-handler
- * and the kretprobe trampoline
- */
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
#if !defined(CONFIG_PREEMPT)
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
@@ -992,7 +976,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
case DIE_BREAK:
/* err is break number from ia64_bad_break() */
if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
- || args->err == __IA64_BREAK_JPROBE
|| args->err == 0)
if (pre_kprobes_handler(args))
ret = NOTIFY_STOP;
@@ -1040,74 +1023,6 @@ unsigned long arch_deref_entry_point(void *entry)
return ((struct fnptr *)entry)->ip;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr = arch_deref_entry_point(jp->entry);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- struct param_bsp_cfm pa;
- int bytes;
-
- /*
- * Callee owns the argument space and could overwrite it, eg
- * tail call optimization. So to be absolutely safe
- * we save the argument space before transferring the control
- * to instrumented jprobe function which runs in
- * the process context
- */
- pa.ip = regs->cr_iip;
- unw_init_running(ia64_get_bsp_cfm, &pa);
- bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
- - (char *)pa.bsp;
- memcpy( kcb->jprobes_saved_stacked_regs,
- pa.bsp,
- bytes );
- kcb->bsp = pa.bsp;
- kcb->cfm = pa.cfm;
-
- /* save architectural state */
- kcb->jprobe_saved_regs = *regs;
-
- /* after rfi, execute the jprobe instrumented function */
- regs->cr_iip = addr & ~0xFULL;
- ia64_psr(regs)->ri = addr & 0xf;
- regs->r1 = ((struct fnptr *)(jp->entry))->gp;
-
- /*
- * fix the return address to our jprobe_inst_return() function
- * in the jprobes.S file
- */
- regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
-
- return 1;
-}
-
-/* ia64 does not need this */
-void __kprobes jprobe_return(void)
-{
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- int bytes;
-
- /* restoring architectural state */
- *regs = kcb->jprobe_saved_regs;
-
- /* restoring the original argument space */
- flush_register_stack();
- bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
- - (char *)kcb->bsp;
- memcpy( kcb->bsp,
- kcb->jprobes_saved_stacked_regs,
- bytes );
- invalidate_stacked_regs();
-
- preempt_enable_no_resched();
- return 1;
-}
-
static struct kprobe trampoline_p = {
.pre_handler = trampoline_probe_handler
};
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 785612b576f7..070553791e97 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -2,8 +2,10 @@
config M68K
bool
default y
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
+ select ARCH_NO_PREEMPT if !COLDFIRE
select HAVE_IDE
select HAVE_AOUT if MMU
select HAVE_DEBUG_BUGVERBOSE
@@ -24,6 +26,10 @@ config M68K
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
select OLD_SIGACTION
+ select DMA_NONCOHERENT_OPS if HAS_DMA
+ select HAVE_MEMBLOCK
+ select ARCH_DISCARD_MEMBLOCK
+ select NO_BOOTMEM
config CPU_BIG_ENDIAN
def_bool y
@@ -75,10 +81,6 @@ config PGTABLE_LEVELS
default 2 if SUN3 || COLDFIRE
default 3
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
config MMU
bool "MMU-based Paged Memory Management Support"
default y
@@ -133,18 +135,6 @@ endmenu
menu "Kernel Features"
-if COLDFIRE
-source "kernel/Kconfig.preempt"
-endif
-
-source "mm/Kconfig"
-
-endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
endmenu
if !MMU
@@ -158,18 +148,4 @@ config PM
endmenu
endif
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "arch/m68k/Kconfig.devices"
-
-source "fs/Kconfig"
-
-source "arch/m68k/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
index 04690b179a6e..f43643111eaf 100644
--- a/arch/m68k/Kconfig.debug
+++ b/arch/m68k/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config BOOTPARAM
bool 'Compiled-in Kernel Boot Parameter'
@@ -51,5 +48,3 @@ config BDM_DISABLE
Disable the ColdFire CPU's BDM signals.
endif
-
-endmenu
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index b2a6bc63f8cd..aef8d42e078d 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -31,7 +31,6 @@ extern void dn_sched_init(irq_handler_t handler);
extern void dn_init_IRQ(void);
extern u32 dn_gettimeoffset(void);
extern int dn_dummy_hwclk(int, struct rtc_time *);
-extern int dn_dummy_set_clock_mmss(unsigned long);
extern void dn_dummy_reset(void);
#ifdef CONFIG_HEARTBEAT
static void dn_heartbeat(int on);
@@ -156,7 +155,6 @@ void __init config_apollo(void)
arch_gettimeoffset = dn_gettimeoffset;
mach_max_dma_address = 0xffffffff;
mach_hwclk = dn_dummy_hwclk; /* */
- mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */
mach_reset = dn_dummy_reset; /* */
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = dn_heartbeat;
@@ -240,12 +238,6 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) {
}
-int dn_dummy_set_clock_mmss(unsigned long nowtime)
-{
- pr_info("set_clock_mmss\n");
- return 0;
-}
-
void dn_dummy_reset(void) {
dn_serial_print("The end !\n");
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 565c6f06ab0b..bd96702a1ad0 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -81,9 +81,6 @@ extern void atari_sched_init(irq_handler_t);
extern u32 atari_gettimeoffset(void);
extern int atari_mste_hwclk (int, struct rtc_time *);
extern int atari_tt_hwclk (int, struct rtc_time *);
-extern int atari_mste_set_clock_mmss (unsigned long);
-extern int atari_tt_set_clock_mmss (unsigned long);
-
/* ++roman: This is a more elaborate test for an SCC chip, since the plain
* Medusa board generates DTACK at the SCC's standard addresses, but a SCC
@@ -362,13 +359,11 @@ void __init config_atari(void)
ATARIHW_SET(TT_CLK);
pr_cont(" TT_CLK");
mach_hwclk = atari_tt_hwclk;
- mach_set_clock_mmss = atari_tt_set_clock_mmss;
}
if (hwreg_present(&mste_rtc.sec_ones)) {
ATARIHW_SET(MSTE_CLK);
pr_cont(" MSTE_CLK");
mach_hwclk = atari_mste_hwclk;
- mach_set_clock_mmss = atari_mste_set_clock_mmss;
}
if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) &&
hwreg_write(&dma_wd.fdc_speed, 0)) {
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index c549b48174ec..9cca64286464 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -285,69 +285,6 @@ int atari_tt_hwclk( int op, struct rtc_time *t )
return( 0 );
}
-
-int atari_mste_set_clock_mmss (unsigned long nowtime)
-{
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
- struct MSTE_RTC val;
- unsigned char rtc_minutes;
-
- mste_read(&val);
- rtc_minutes= val.min_ones + val.min_tens * 10;
- if ((rtc_minutes < real_minutes
- ? real_minutes - rtc_minutes
- : rtc_minutes - real_minutes) < 30)
- {
- val.sec_ones = real_seconds % 10;
- val.sec_tens = real_seconds / 10;
- val.min_ones = real_minutes % 10;
- val.min_tens = real_minutes / 10;
- mste_write(&val);
- }
- else
- return -1;
- return 0;
-}
-
-int atari_tt_set_clock_mmss (unsigned long nowtime)
-{
- int retval = 0;
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
- unsigned char save_control, save_freq_select, rtc_minutes;
-
- save_control = RTC_READ (RTC_CONTROL); /* tell the clock it's being set */
- RTC_WRITE (RTC_CONTROL, save_control | RTC_SET);
-
- save_freq_select = RTC_READ (RTC_FREQ_SELECT); /* stop and reset prescaler */
- RTC_WRITE (RTC_FREQ_SELECT, save_freq_select | RTC_DIV_RESET2);
-
- rtc_minutes = RTC_READ (RTC_MINUTES);
- if (!(save_control & RTC_DM_BINARY))
- rtc_minutes = bcd2bin(rtc_minutes);
-
- /* Since we're only adjusting minutes and seconds, don't interfere
- with hour overflow. This avoids messing with unknown time zones
- but requires your RTC not to be off by more than 30 minutes. */
- if ((rtc_minutes < real_minutes
- ? real_minutes - rtc_minutes
- : rtc_minutes - real_minutes) < 30)
- {
- if (!(save_control & RTC_DM_BINARY))
- {
- real_seconds = bin2bcd(real_seconds);
- real_minutes = bin2bcd(real_minutes);
- }
- RTC_WRITE (RTC_SECONDS, real_seconds);
- RTC_WRITE (RTC_MINUTES, real_minutes);
- }
- else
- retval = -1;
-
- RTC_WRITE (RTC_FREQ_SELECT, save_freq_select);
- RTC_WRITE (RTC_CONTROL, save_control);
- return retval;
-}
-
/*
* Local variables:
* c-indent-level: 4
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 2cfff4765040..143ee9fa3893 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -41,7 +41,6 @@ static void bvme6000_get_model(char *model);
extern void bvme6000_sched_init(irq_handler_t handler);
extern u32 bvme6000_gettimeoffset(void);
extern int bvme6000_hwclk (int, struct rtc_time *);
-extern int bvme6000_set_clock_mmss (unsigned long);
extern void bvme6000_reset (void);
void bvme6000_set_vectors (void);
@@ -113,7 +112,6 @@ void __init config_bvme6000(void)
mach_init_IRQ = bvme6000_init_IRQ;
arch_gettimeoffset = bvme6000_gettimeoffset;
mach_hwclk = bvme6000_hwclk;
- mach_set_clock_mmss = bvme6000_set_clock_mmss;
mach_reset = bvme6000_reset;
mach_get_model = bvme6000_get_model;
@@ -305,46 +303,3 @@ int bvme6000_hwclk(int op, struct rtc_time *t)
return 0;
}
-
-/*
- * Set the minutes and seconds from seconds value 'nowtime'. Fail if
- * clock is out by > 30 minutes. Logic lifted from atari code.
- * Algorithm is to wait for the 10ms register to change, and then to
- * wait a short while, and then set it.
- */
-
-int bvme6000_set_clock_mmss (unsigned long nowtime)
-{
- int retval = 0;
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
- unsigned char rtc_minutes, rtc_tenms;
- volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
- unsigned char msr = rtc->msr & 0xc0;
- unsigned long flags;
- volatile int i;
-
- rtc->msr = 0; /* Ensure clock accessible */
- rtc_minutes = bcd2bin (rtc->bcd_min);
-
- if ((rtc_minutes < real_minutes
- ? real_minutes - rtc_minutes
- : rtc_minutes - real_minutes) < 30)
- {
- local_irq_save(flags);
- rtc_tenms = rtc->bcd_tenms;
- while (rtc_tenms == rtc->bcd_tenms)
- ;
- for (i = 0; i < 1000; i++)
- ;
- rtc->bcd_min = bin2bcd(real_minutes);
- rtc->bcd_sec = bin2bcd(real_seconds);
- local_irq_restore(flags);
- }
- else
- retval = -1;
-
- rtc->msr = msr;
-
- return retval;
-}
-
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index a874e54404d1..1d5483f6e457 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -52,6 +52,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -98,18 +99,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -122,6 +119,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -200,7 +198,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -231,7 +228,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -260,7 +256,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -356,6 +352,7 @@ CONFIG_A2091_SCSI=y
CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -363,6 +360,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -402,8 +400,8 @@ CONFIG_A2065=y
CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -412,8 +410,10 @@ CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
CONFIG_HYDRA=y
CONFIG_APNE=y
CONFIG_ZORRO8390=y
@@ -426,9 +426,9 @@ CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -478,6 +478,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -499,7 +500,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -600,6 +601,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -622,6 +624,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -657,6 +664,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 8ce39e23aa42..52a0af127951 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -381,14 +378,15 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -400,9 +398,9 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -440,6 +438,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -458,7 +457,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -559,6 +558,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -581,6 +581,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -616,6 +621,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 346c4e75edf8..b3103e51268a 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -391,14 +388,15 @@ CONFIG_VETH=m
CONFIG_ATARILANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
CONFIG_NE2000=y
@@ -411,9 +409,9 @@ CONFIG_NE2000=y
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -480,7 +478,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -581,6 +579,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -603,6 +602,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -638,6 +642,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fca9c7aa71a3..fb7d651a4cab 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index f9eab174915c..6b37f5537c39 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -382,14 +379,15 @@ CONFIG_VETH=m
CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -401,9 +399,9 @@ CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -443,6 +441,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -460,7 +459,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -561,6 +560,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -583,6 +583,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -618,6 +623,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b52e597899eb..930cc2965a11 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -49,6 +49,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -95,18 +96,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -119,6 +116,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -197,7 +195,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -228,7 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -257,7 +253,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -398,8 +395,8 @@ CONFIG_VETH=m
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -407,6 +404,7 @@ CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MACSONIC=y
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -420,9 +418,9 @@ CONFIG_MAC8390=y
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -465,6 +463,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -482,7 +481,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -583,6 +582,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -605,6 +605,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -640,6 +645,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 2a84eeec5b02..e7dd25300127 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -59,6 +59,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -105,18 +106,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -129,6 +126,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -207,7 +205,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -238,7 +235,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -267,7 +263,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -311,6 +306,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -373,6 +369,7 @@ CONFIG_A2091_SCSI=y
CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
CONFIG_ATARI_SCSI=y
CONFIG_MAC_SCSI=y
CONFIG_SCSI_MAC_ESP=y
@@ -387,6 +384,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -438,8 +436,8 @@ CONFIG_SUN3LANCE=y
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -449,9 +447,11 @@ CONFIG_BVME6000_NET=y
CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MACSONIC=y
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
CONFIG_HYDRA=y
CONFIG_MAC8390=y
CONFIG_NE2000=y
@@ -466,9 +466,9 @@ CONFIG_ZORRO8390=y
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -533,6 +533,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -562,7 +563,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -663,6 +664,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -685,6 +687,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -720,6 +727,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 476e69994340..b383327fd77a 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -47,6 +47,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -93,18 +94,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -117,6 +114,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +193,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -226,7 +223,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -255,7 +251,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -296,6 +291,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -343,6 +339,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 1477cda9146e..9783d3deb9e9 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index b3a543dc48a0..a35d10ee10cb 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -350,6 +346,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -388,8 +385,8 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -398,6 +395,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
CONFIG_NE2000=y
@@ -410,9 +408,9 @@ CONFIG_NE2000=y
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -455,6 +453,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -473,7 +472,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -574,6 +573,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -596,6 +596,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -631,6 +636,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index d543ed5dfa96..573bf922d448 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -385,6 +382,7 @@ CONFIG_SUN3LANCE=y
CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -574,6 +574,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -609,6 +614,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index a67e54246023..efb27a7fcc55 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_ERA=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
@@ -378,14 +375,15 @@ CONFIG_VETH=m
CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
# CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
@@ -575,6 +575,11 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -610,6 +615,7 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 4d8d68c4e3dd..a4b8d3331a9e 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,6 +1,7 @@
generic-y += barrier.h
generic-y += compat.h
generic-y += device.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index e993e2860ee1..47228b0d4163 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -126,11 +126,13 @@ static inline void atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
+#define atomic_inc atomic_inc
static inline void atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
+#define atomic_dec atomic_dec
static inline int atomic_dec_and_test(atomic_t *v)
{
@@ -138,6 +140,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
+#define atomic_dec_and_test atomic_dec_and_test
static inline int atomic_dec_and_test_lt(atomic_t *v)
{
@@ -155,6 +158,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
+#define atomic_inc_and_test atomic_inc_and_test
#ifdef CONFIG_RMW_INSNS
@@ -190,9 +194,6 @@ static inline int atomic_xchg(atomic_t *v, int new)
#endif /* !CONFIG_RMW_INSNS */
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
char c;
@@ -201,6 +202,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
+#define atomic_sub_and_test atomic_sub_and_test
static inline int atomic_add_negative(int i, atomic_t *v)
{
@@ -210,20 +212,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
+#define atomic_add_negative atomic_add_negative
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index 93b47b1f6fb4..d979f38af751 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -454,7 +454,7 @@ static inline unsigned long ffz(unsigned long word)
*/
#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
!defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
-static inline int __ffs(int x)
+static inline unsigned long __ffs(unsigned long x)
{
__asm__ __volatile__ ("bitrev %0; ff1 %0"
: "=d" (x)
@@ -493,7 +493,11 @@ static inline int ffs(int x)
: "dm" (x & -x));
return 32 - cnt;
}
-#define __ffs(x) (ffs(x) - 1)
+
+static inline unsigned long __ffs(unsigned long x)
+{
+ return ffs(x) - 1;
+}
/*
* fls: find last bit set.
@@ -515,12 +519,16 @@ static inline int __fls(int x)
#endif
+/* Simple test-and-set bit locks */
+#define test_and_set_bit_lock test_and_set_bit
+#define clear_bit_unlock clear_bit
+#define __clear_bit_unlock clear_bit_unlock
+
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
deleted file mode 100644
index e3722ed04fbb..000000000000
--- a/arch/m68k/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_DMA_MAPPING_H
-#define _M68K_DMA_MAPPING_H
-
-extern const struct dma_map_ops m68k_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &m68k_dma_ops;
-}
-
-#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/include/asm/io.h b/arch/m68k/include/asm/io.h
index ca2849afb087..aabe6420ead2 100644
--- a/arch/m68k/include/asm/io.h
+++ b/arch/m68k/include/asm/io.h
@@ -1,6 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _M68K_IO_H
+#define _M68K_IO_H
+
#if defined(__uClinux__) || defined(CONFIG_COLDFIRE)
#include <asm/io_no.h>
#else
#include <asm/io_mm.h>
#endif
+
+#include <asm-generic/io.h>
+
+#endif /* _M68K_IO_H */
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index fe485f4f5fac..782b78f8a048 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -16,13 +16,11 @@
* isa_readX(),isa_writeX() are for ISA memory
*/
-#ifndef _IO_H
-#define _IO_H
+#ifndef _M68K_IO_MM_H
+#define _M68K_IO_MM_H
#ifdef __KERNEL__
-#define ARCH_HAS_IOREMAP_WT
-
#include <linux/compiler.h>
#include <asm/raw_io.h>
#include <asm/virtconvert.h>
@@ -369,40 +367,6 @@ static inline void isa_delay(void)
#define writew(val, addr) out_le16((addr), (val))
#endif /* CONFIG_ATARI_ROM_ISA */
-#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
-/*
- * We need to define dummy functions for GENERIC_IOMAP support.
- */
-#define inb(port) 0xff
-#define inb_p(port) 0xff
-#define outb(val,port) ((void)0)
-#define outb_p(val,port) ((void)0)
-#define inw(port) 0xffff
-#define inw_p(port) 0xffff
-#define outw(val,port) ((void)0)
-#define outw_p(val,port) ((void)0)
-#define inl(port) 0xffffffffUL
-#define inl_p(port) 0xffffffffUL
-#define outl(val,port) ((void)0)
-#define outl_p(val,port) ((void)0)
-
-#define insb(port,buf,nr) ((void)0)
-#define outsb(port,buf,nr) ((void)0)
-#define insw(port,buf,nr) ((void)0)
-#define outsw(port,buf,nr) ((void)0)
-#define insl(port,buf,nr) ((void)0)
-#define outsl(port,buf,nr) ((void)0)
-
-/*
- * These should be valid on any ioremap()ed region
- */
-#define readb(addr) in_8(addr)
-#define writeb(val,addr) out_8((addr),(val))
-#define readw(addr) in_le16(addr)
-#define writew(val,addr) out_le16((addr),(val))
-
-#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
-
#define readl(addr) in_le32(addr)
#define writel(val,addr) out_le32((addr),(val))
@@ -444,4 +408,4 @@ static inline void isa_delay(void)
#define writew_relaxed(b, addr) writew(b, addr)
#define writel_relaxed(b, addr) writel(b, addr)
-#endif /* _IO_H */
+#endif /* _M68K_IO_MM_H */
diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h
index 83a0a6d449f4..0498192e1d98 100644
--- a/arch/m68k/include/asm/io_no.h
+++ b/arch/m68k/include/asm/io_no.h
@@ -131,19 +131,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define PCI_SPACE_LIMIT PCI_IO_MASK
#endif /* CONFIG_PCI */
-/*
- * These are defined in kmap.h as static inline functions. To maintain
- * previous behavior we put these define guards here so io_mm.h doesn't
- * see them.
- */
-#ifdef CONFIG_MMU
-#define memset_io memset_io
-#define memcpy_fromio memcpy_fromio
-#define memcpy_toio memcpy_toio
-#endif
-
#include <asm/kmap.h>
#include <asm/virtconvert.h>
-#include <asm-generic/io.h>
#endif /* _M68KNOMMU_IO_H */
diff --git a/arch/m68k/include/asm/kmap.h b/arch/m68k/include/asm/kmap.h
index 84b8333db8ad..aac7f045f7f0 100644
--- a/arch/m68k/include/asm/kmap.h
+++ b/arch/m68k/include/asm/kmap.h
@@ -4,6 +4,8 @@
#ifdef CONFIG_MMU
+#define ARCH_HAS_IOREMAP_WT
+
/* Values for nocacheflag and cmode */
#define IOMAP_FULL_CACHING 0
#define IOMAP_NOCACHE_SER 1
@@ -16,6 +18,7 @@
*/
extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
int cacheflag);
+#define iounmap iounmap
extern void iounmap(void __iomem *addr);
extern void __iounmap(void *addr, unsigned long size);
@@ -33,31 +36,35 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr,
}
#define ioremap_uc ioremap_nocache
+#define ioremap_wt ioremap_wt
static inline void __iomem *ioremap_wt(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
-#define ioremap_fillcache ioremap_fullcache
+#define ioremap_fullcache ioremap_fullcache
static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
+#define memset_io memset_io
static inline void memset_io(volatile void __iomem *addr, unsigned char val,
int count)
{
__builtin_memset((void __force *) addr, val, count);
}
+#define memcpy_fromio memcpy_fromio
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
int count)
{
__builtin_memcpy(dst, (void __force *) src, count);
}
+#define memcpy_toio memcpy_toio
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
int count)
{
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 1605da48ebf2..49bd3266b4b1 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -22,7 +22,6 @@ extern int (*mach_hwclk)(int, struct rtc_time*);
extern unsigned int (*mach_get_ss)(void);
extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
-extern int (*mach_set_clock_mmss)(unsigned long);
extern void (*mach_reset)( void );
extern void (*mach_halt)( void );
extern void (*mach_power_off)( void );
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 9b840c03ebb7..08cee11180e6 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -57,7 +57,6 @@ struct mac_model
#define MAC_SCSI_IIFX 5
#define MAC_SCSI_DUO 6
#define MAC_SCSI_LC 7
-#define MAC_SCSI_LATE 8
#define MAC_IDE_NONE 0
#define MAC_IDE_QUADRA 1
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index e644c4daf540..6bbe52025de3 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -18,7 +18,7 @@ extern unsigned long memory_end;
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
#define __pa(vaddr) ((unsigned long)(vaddr))
-#define __va(paddr) ((void *)(paddr))
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 463572c4943f..e99993c57d6b 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -6,7 +6,7 @@
#undef DEBUG
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -19,7 +19,7 @@
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
-static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t flag, unsigned long attrs)
{
struct page *page, **map;
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
return addr;
}
-static void m68k_dma_free(struct device *dev, size_t size, void *addr,
+void arch_dma_free(struct device *dev, size_t size, void *addr,
dma_addr_t handle, unsigned long attrs)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
@@ -73,8 +73,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr,
#include <asm/cacheflush.h>
-static void *m68k_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -89,7 +89,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
return ret;
}
-static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
@@ -97,8 +97,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
-static void m68k_dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -115,58 +115,6 @@ static void m68k_dma_sync_single_for_device(struct device *dev,
}
}
-static void m68k_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist, int nents, enum dma_data_direction dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- dma_sync_single_for_device(dev, sg->dma_address, sg->length,
- dir);
- }
-}
-
-static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t handle = page_to_phys(page) + offset;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_sync_single_for_device(dev, handle, size, dir);
-
- return handle;
-}
-
-static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- dma_sync_single_for_device(dev, sg->dma_address, sg->length,
- dir);
- }
- return nents;
-}
-
-const struct dma_map_ops m68k_dma_ops = {
- .alloc = m68k_dma_alloc,
- .free = m68k_dma_free,
- .map_page = m68k_dma_map_page,
- .map_sg = m68k_dma_map_sg,
- .sync_single_for_device = m68k_dma_sync_single_for_device,
- .sync_sg_for_device = m68k_dma_sync_sg_for_device,
-};
-EXPORT_SYMBOL(m68k_dma_ops);
-
void arch_setup_pdev_archdata(struct platform_device *pdev)
{
if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE &&
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index f35e3ebd6331..5d3596c180f9 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
@@ -88,7 +89,6 @@ void (*mach_get_hardware_list) (struct seq_file *m);
/* machine dependent timer functions */
int (*mach_hwclk) (int, struct rtc_time*);
EXPORT_SYMBOL(mach_hwclk);
-int (*mach_set_clock_mmss) (unsigned long);
unsigned int (*mach_get_ss)(void);
int (*mach_get_rtc_pll)(struct rtc_pll_info *);
int (*mach_set_rtc_pll)(struct rtc_pll_info *);
@@ -165,6 +165,8 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
be32_to_cpu(m->addr);
m68k_memory[m68k_num_memory].size =
be32_to_cpu(m->size);
+ memblock_add(m68k_memory[m68k_num_memory].addr,
+ m68k_memory[m68k_num_memory].size);
m68k_num_memory++;
} else
pr_warn("%s: too many memory chunks\n",
@@ -224,10 +226,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
void __init setup_arch(char **cmdline_p)
{
-#ifndef CONFIG_SUN3
- int i;
-#endif
-
/* The bootinfo is located right after the kernel */
if (!CPU_IS_COLDFIRE)
m68k_parse_bootinfo((const struct bi_record *)_end);
@@ -356,14 +354,9 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifndef CONFIG_SUN3
- for (i = 1; i < m68k_num_memory; i++)
- free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
- m68k_memory[i].size);
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
- reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
- m68k_ramdisk.addr, m68k_ramdisk.size,
- BOOTMEM_DEFAULT);
+ memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index a98af1018201..cfd5475bfc31 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/initrd.h>
@@ -51,7 +52,6 @@ char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */
void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
-int (*mach_set_clock_mmss)(unsigned long);
int (*mach_hwclk) (int, struct rtc_time*);
/* machine dependent reboot functions */
@@ -86,8 +86,6 @@ void (*mach_power_off)(void);
void __init setup_arch(char **cmdline_p)
{
- int bootmap_size;
-
memory_start = PAGE_ALIGN(_ramstart);
memory_end = _ramend;
@@ -142,6 +140,8 @@ void __init setup_arch(char **cmdline_p)
pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
__bss_stop, memory_start, memory_start, memory_end);
+ memblock_add(memory_start, memory_end - memory_start);
+
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
@@ -158,23 +158,10 @@ void __init setup_arch(char **cmdline_p)
min_low_pfn = PFN_DOWN(memory_start);
max_pfn = max_low_pfn = PFN_DOWN(memory_end);
- bootmap_size = init_bootmem_node(
- NODE_DATA(0),
- min_low_pfn, /* map goes here */
- PFN_DOWN(PAGE_OFFSET),
- max_pfn);
- /*
- * Free the usable memory, we have to make sure we do not free
- * the bootmem bitmap so we then reserve it after freeing it :-)
- */
- free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
-
#if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
if ((initrd_start > 0) && (initrd_start < initrd_end) &&
(initrd_end < memory_end))
- reserve_bootmem(initrd_start, initrd_end - initrd_start,
- BOOTMEM_DEFAULT);
+ memblock_reserve(initrd_start, initrd_end - initrd_start);
#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
/*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index e522307db47c..b02d7254b73a 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -57,7 +57,6 @@ static unsigned long mac_orig_videoaddr;
/* Mac specific timer functions */
extern u32 mac_gettimeoffset(void);
extern int mac_hwclk(int, struct rtc_time *);
-extern int mac_set_clock_mmss(unsigned long);
extern void iop_preinit(void);
extern void iop_init(void);
extern void via_init(void);
@@ -158,7 +157,6 @@ void __init config_mac(void)
mach_get_model = mac_get_model;
arch_gettimeoffset = mac_gettimeoffset;
mach_hwclk = mac_hwclk;
- mach_set_clock_mmss = mac_set_clock_mmss;
mach_reset = mac_reset;
mach_halt = mac_poweroff;
mach_power_off = mac_poweroff;
@@ -709,7 +707,7 @@ static struct mac_model mac_data_table[] = {
.name = "PowerBook 520",
.adb_type = MAC_ADB_PB2,
.via_type = MAC_VIA_QUADRA,
- .scsi_type = MAC_SCSI_LATE,
+ .scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -943,18 +941,6 @@ static const struct resource mac_scsi_old_rsrc[] __initconst = {
},
};
-static const struct resource mac_scsi_late_rsrc[] __initconst = {
- {
- .flags = IORESOURCE_IRQ,
- .start = IRQ_MAC_SCSI,
- .end = IRQ_MAC_SCSI,
- }, {
- .flags = IORESOURCE_MEM,
- .start = 0x50010000,
- .end = 0x50011FFF,
- },
-};
-
static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
{
.flags = IORESOURCE_IRQ,
@@ -1064,11 +1050,6 @@ int __init mac_platform_init(void)
platform_device_register_simple("mac_scsi", 0,
mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc));
break;
- case MAC_SCSI_LATE:
- /* XXX PDMA support for PowerBook 500 series needs testing */
- platform_device_register_simple("mac_scsi", 0,
- mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
- break;
case MAC_SCSI_LC:
/* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
* Also from the Developer Notes for Classic II, LC III,
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index c68054361615..19e9d8eef1f2 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -26,33 +26,38 @@
#include <asm/machdep.h>
-/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
#define RTC_OFFSET 2082844800
static void (*rom_reset)(void);
#ifdef CONFIG_ADB_CUDA
-static long cuda_read_time(void)
+static time64_t cuda_read_time(void)
{
struct adb_request req;
- long time;
+ time64_t time;
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
return 0;
while (!req.complete)
cuda_poll();
- time = (req.reply[3] << 24) | (req.reply[4] << 16) |
- (req.reply[5] << 8) | req.reply[6];
+ time = (u32)((req.reply[3] << 24) | (req.reply[4] << 16) |
+ (req.reply[5] << 8) | req.reply[6]);
+
return time - RTC_OFFSET;
}
-static void cuda_write_time(long data)
+static void cuda_write_time(time64_t time)
{
struct adb_request req;
+ u32 data = lower_32_bits(time + RTC_OFFSET);
- data += RTC_OFFSET;
if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
(data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -86,26 +91,27 @@ static void cuda_write_pram(int offset, __u8 data)
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU68K
-static long pmu_read_time(void)
+static time64_t pmu_read_time(void)
{
struct adb_request req;
- long time;
+ time64_t time;
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
while (!req.complete)
pmu_poll();
- time = (req.reply[1] << 24) | (req.reply[2] << 16) |
- (req.reply[3] << 8) | req.reply[4];
+ time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
+ (req.reply[3] << 8) | req.reply[4]);
+
return time - RTC_OFFSET;
}
-static void pmu_write_time(long data)
+static void pmu_write_time(time64_t time)
{
struct adb_request req;
+ u32 data = lower_32_bits(time + RTC_OFFSET);
- data += RTC_OFFSET;
if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
(data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -245,11 +251,11 @@ static void via_write_pram(int offset, __u8 data)
* is basically any machine with Mac II-style ADB.
*/
-static long via_read_time(void)
+static time64_t via_read_time(void)
{
union {
__u8 cdata[4];
- long idata;
+ __u32 idata;
} result, last_result;
int count = 1;
@@ -270,7 +276,7 @@ static long via_read_time(void)
via_pram_command(0x8D, &result.cdata[0]);
if (result.idata == last_result.idata)
- return result.idata - RTC_OFFSET;
+ return (time64_t)result.idata - RTC_OFFSET;
if (++count > 10)
break;
@@ -278,8 +284,8 @@ static long via_read_time(void)
last_result.idata = result.idata;
}
- pr_err("via_read_time: failed to read a stable value; got 0x%08lx then 0x%08lx\n",
- last_result.idata, result.idata);
+ pr_err("%s: failed to read a stable value; got 0x%08x then 0x%08x\n",
+ __func__, last_result.idata, result.idata);
return 0;
}
@@ -291,11 +297,11 @@ static long via_read_time(void)
* is basically any machine with Mac II-style ADB.
*/
-static void via_write_time(long time)
+static void via_write_time(time64_t time)
{
union {
__u8 cdata[4];
- long idata;
+ __u32 idata;
} data;
__u8 temp;
@@ -304,7 +310,7 @@ static void via_write_time(long time)
temp = 0x55;
via_pram_command(0x35, &temp);
- data.idata = time + RTC_OFFSET;
+ data.idata = lower_32_bits(time + RTC_OFFSET);
via_pram_command(0x01, &data.cdata[3]);
via_pram_command(0x05, &data.cdata[2]);
via_pram_command(0x09, &data.cdata[1]);
@@ -585,12 +591,15 @@ void mac_reset(void)
* This function translates seconds since 1970 into a proper date.
*
* Algorithm cribbed from glibc2.1, __offtime().
+ *
+ * This is roughly same as rtc_time64_to_tm(), which we should probably
+ * use here, but it's only available when CONFIG_RTC_LIB is enabled.
*/
#define SECS_PER_MINUTE (60)
#define SECS_PER_HOUR (SECS_PER_MINUTE * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
-static void unmktime(unsigned long time, long offset,
+static void unmktime(time64_t time, long offset,
int *yearp, int *monp, int *dayp,
int *hourp, int *minp, int *secp)
{
@@ -602,11 +611,10 @@ static void unmktime(unsigned long time, long offset,
/* Leap years. */
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
- long int days, rem, y, wday, yday;
+ int days, rem, y, wday, yday;
const unsigned short int *ip;
- days = time / SECS_PER_DAY;
- rem = time % SECS_PER_DAY;
+ days = div_u64_rem(time, SECS_PER_DAY, &rem);
rem += offset;
while (rem < 0) {
rem += SECS_PER_DAY;
@@ -657,7 +665,7 @@ static void unmktime(unsigned long time, long offset,
int mac_hwclk(int op, struct rtc_time *t)
{
- unsigned long now;
+ time64_t now;
if (!op) { /* read */
switch (macintosh_config->adb_type) {
@@ -693,8 +701,8 @@ int mac_hwclk(int op, struct rtc_time *t)
__func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
- now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
- t->tm_hour, t->tm_min, t->tm_sec);
+ now = mktime64(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
+ t->tm_hour, t->tm_min, t->tm_sec);
switch (macintosh_config->adb_type) {
case MAC_ADB_IOP:
@@ -719,19 +727,3 @@ int mac_hwclk(int op, struct rtc_time *t)
}
return 0;
}
-
-/*
- * Set minutes/seconds in the hardware clock
- */
-
-int mac_set_clock_mmss (unsigned long nowtime)
-{
- struct rtc_time now;
-
- mac_hwclk(0, &now);
- now.tm_sec = nowtime % 60;
- now.tm_min = (nowtime / 60) % 60;
- mac_hwclk(1, &now);
-
- return 0;
-}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 8827b7f91402..38e2b272c220 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -71,7 +71,6 @@ void __init m68k_setup_node(int node)
pg_data_table[i] = pg_data_map + node;
}
#endif
- pg_data_map[node].bdata = bootmem_node_data + node;
node_set_online(node);
}
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 2925d795d71a..70dde040779b 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -153,31 +154,31 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
void __init cf_bootmem_alloc(void)
{
- unsigned long start_pfn;
unsigned long memstart;
/* _rambase and _ramend will be naturally page aligned */
m68k_memory[0].addr = _rambase;
m68k_memory[0].size = _ramend - _rambase;
+ memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+
/* compute total pages in system */
num_pages = PFN_DOWN(_ramend - _rambase);
/* page numbers */
memstart = PAGE_ALIGN(_ramstart);
min_low_pfn = PFN_DOWN(_rambase);
- start_pfn = PFN_DOWN(memstart);
max_pfn = max_low_pfn = PFN_DOWN(_ramend);
high_memory = (void *)_ramend;
+ /* Reserve kernel text/data/bss */
+ memblock_reserve(memstart, memstart - _rambase);
+
m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
module_fixup(NULL, __start_fixup, __stop_fixup);
- /* setup bootmem data */
+ /* setup node data */
m68k_setup_node(0);
- memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
- min_low_pfn, max_low_pfn);
- free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
}
/*
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index e490ecc7842c..4e17ecb5928a 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <asm/setup.h>
@@ -208,7 +209,7 @@ void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long min_addr, max_addr;
- unsigned long addr, size, end;
+ unsigned long addr;
int i;
#ifdef DEBUG
@@ -253,34 +254,20 @@ void __init paging_init(void)
min_low_pfn = availmem >> PAGE_SHIFT;
max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
- for (i = 0; i < m68k_num_memory; i++) {
- addr = m68k_memory[i].addr;
- end = addr + m68k_memory[i].size;
- m68k_setup_node(i);
- availmem = PAGE_ALIGN(availmem);
- availmem += init_bootmem_node(NODE_DATA(i),
- availmem >> PAGE_SHIFT,
- addr >> PAGE_SHIFT,
- end >> PAGE_SHIFT);
- }
+ /* Reserve kernel text/data/bss and the memory allocated in head.S */
+ memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
/*
* Map the physical memory available into the kernel virtual
- * address space. First initialize the bootmem allocator with
- * the memory we already mapped, so map_node() has something
- * to allocate.
+ * address space. Make sure memblock will not try to allocate
+ * pages beyond the memory we already mapped in head.S
*/
- addr = m68k_memory[0].addr;
- size = m68k_memory[0].size;
- free_bootmem_node(NODE_DATA(0), availmem,
- min(m68k_init_mapped_size, size) - (availmem - addr));
- map_node(0);
- if (size > m68k_init_mapped_size)
- free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
- size - m68k_init_mapped_size);
-
- for (i = 1; i < m68k_num_memory; i++)
+ memblock_set_bottom_up(true);
+
+ for (i = 0; i < m68k_num_memory; i++) {
+ m68k_setup_node(i);
map_node(i);
+ }
flush_tlb_all();
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index f8a710fd84cd..adea549d240e 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -40,7 +40,6 @@ static void mvme147_get_model(char *model);
extern void mvme147_sched_init(irq_handler_t handler);
extern u32 mvme147_gettimeoffset(void);
extern int mvme147_hwclk (int, struct rtc_time *);
-extern int mvme147_set_clock_mmss (unsigned long);
extern void mvme147_reset (void);
@@ -92,7 +91,6 @@ void __init config_mvme147(void)
mach_init_IRQ = mvme147_init_IRQ;
arch_gettimeoffset = mvme147_gettimeoffset;
mach_hwclk = mvme147_hwclk;
- mach_set_clock_mmss = mvme147_set_clock_mmss;
mach_reset = mvme147_reset;
mach_get_model = mvme147_get_model;
@@ -164,8 +162,3 @@ int mvme147_hwclk(int op, struct rtc_time *t)
}
return 0;
}
-
-int mvme147_set_clock_mmss (unsigned long nowtime)
-{
- return 0;
-}
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 4ffd9ef98de4..6ee36a5b528d 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -46,7 +46,6 @@ static void mvme16x_get_model(char *model);
extern void mvme16x_sched_init(irq_handler_t handler);
extern u32 mvme16x_gettimeoffset(void);
extern int mvme16x_hwclk (int, struct rtc_time *);
-extern int mvme16x_set_clock_mmss (unsigned long);
extern void mvme16x_reset (void);
int bcd2int (unsigned char b);
@@ -280,7 +279,6 @@ void __init config_mvme16x(void)
mach_init_IRQ = mvme16x_init_IRQ;
arch_gettimeoffset = mvme16x_gettimeoffset;
mach_hwclk = mvme16x_hwclk;
- mach_set_clock_mmss = mvme16x_set_clock_mmss;
mach_reset = mvme16x_reset;
mach_get_model = mvme16x_get_model;
mach_get_hardware_list = mvme16x_get_hardware_list;
@@ -411,9 +409,3 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
}
return 0;
}
-
-int mvme16x_set_clock_mmss (unsigned long nowtime)
-{
- return 0;
-}
-
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 71c0867ecf20..96810d91da2b 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -43,7 +43,6 @@ extern void q40_sched_init(irq_handler_t handler);
static u32 q40_gettimeoffset(void);
static int q40_hwclk(int, struct rtc_time *);
static unsigned int q40_get_ss(void);
-static int q40_set_clock_mmss(unsigned long);
static int q40_get_rtc_pll(struct rtc_pll_info *pll);
static int q40_set_rtc_pll(struct rtc_pll_info *pll);
@@ -175,7 +174,6 @@ void __init config_q40(void)
mach_get_ss = q40_get_ss;
mach_get_rtc_pll = q40_get_rtc_pll;
mach_set_rtc_pll = q40_set_rtc_pll;
- mach_set_clock_mmss = q40_set_clock_mmss;
mach_reset = q40_reset;
mach_get_model = q40_get_model;
@@ -267,34 +265,6 @@ static unsigned int q40_get_ss(void)
return bcd2bin(Q40_RTC_SECS);
}
-/*
- * Set the minutes and seconds from seconds value 'nowtime'. Fail if
- * clock is out by > 30 minutes. Logic lifted from atari code.
- */
-
-static int q40_set_clock_mmss(unsigned long nowtime)
-{
- int retval = 0;
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-
- int rtc_minutes;
-
- rtc_minutes = bcd2bin(Q40_RTC_MINS);
-
- if ((rtc_minutes < real_minutes ?
- real_minutes - rtc_minutes :
- rtc_minutes - real_minutes) < 30) {
- Q40_RTC_CTRL |= Q40_RTC_WRITE;
- Q40_RTC_MINS = bin2bcd(real_minutes);
- Q40_RTC_SECS = bin2bcd(real_seconds);
- Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
- } else
- retval = -1;
-
- return retval;
-}
-
-
/* get and set PLL calibration of RTC clock */
#define Q40_RTC_PLL_MASK ((1<<5)-1)
#define Q40_RTC_PLL_SIGN (1<<5)
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 1d28d380e8cc..79a2bb857906 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -123,10 +123,6 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
availmem = memory_start;
m68k_setup_node(0);
- availmem += init_bootmem(start_page, num_pages);
- availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
-
- free_bootmem(__pa(availmem), memory_end - (availmem));
}
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d14782100088..6163a39ddeb6 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,5 +1,6 @@
config MICROBLAZE
def_bool y
+ select ARCH_NO_SWAP
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
@@ -52,9 +53,6 @@ config CPU_LITTLE_ENDIAN
endchoice
-config SWAP
- def_bool n
-
config RWSEM_GENERIC_SPINLOCK
def_bool y
@@ -85,16 +83,10 @@ config STACKTRACE_SUPPORT
config LOCKDEP_SUPPORT
def_bool y
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "arch/microblaze/Kconfig.platform"
menu "Processor type and features"
-source "kernel/Kconfig.preempt"
-
source "kernel/Kconfig.hz"
config MMU
@@ -268,14 +260,6 @@ config MICROBLAZE_64K_PAGES
endchoice
-source "mm/Kconfig"
-
-endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
endmenu
menu "Bus Options"
@@ -299,17 +283,3 @@ config PCI_XILINX
source "drivers/pci/Kconfig"
endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/microblaze/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index 93a737c8d1a6..dc2e3c45e8a2 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -1,11 +1,5 @@
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
-menu "Kernel hacking"
-
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-
-source "lib/Kconfig.debug"
-
-endmenu
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 08c10c518f83..2af13b162e5e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -16,6 +16,7 @@ config MIPS
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select CPU_PM if CPU_IDLE
+ select DMA_DIRECT_OPS
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
@@ -97,6 +98,7 @@ config MIPS_GENERIC
select HW_HAS_PCI
select IRQ_MIPS_CPU
select LIBFDT
+ select MIPS_AUTO_PFN_OFFSET
select MIPS_CPU_SCACHE
select MIPS_GIC
select MIPS_L1_CACHE_SHIFT_7
@@ -193,6 +195,7 @@ config ATH79
select CSRC_R4K
select DMA_NONCOHERENT
select GPIOLIB
+ select PINCTRL
select HAVE_CLK
select COMMON_CLK
select CLKDEV_LOOKUP
@@ -211,6 +214,8 @@ config ATH79
config BMIPS_GENERIC
bool "Broadcom Generic BMIPS kernel"
+ select ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+ select ARCH_HAS_PHYS_TO_DMA
select BOOT_RAW
select NO_EXCEPT_FILL
select USE_OF
@@ -438,7 +443,6 @@ config MACH_LOONGSON32
config MACH_LOONGSON64
bool "Loongson-2/3 family of machines"
- select ARCH_HAS_PHYS_TO_DMA
select SYS_SUPPORTS_ZBOOT
help
This enables the support of Loongson-2/3 family of machines.
@@ -662,11 +666,11 @@ config SGI_IP22
config SGI_IP27
bool "SGI IP27 (Origin200/2000)"
+ select ARCH_HAS_PHYS_TO_DMA
select FW_ARC
select FW_ARC64
select BOOT_ELF64
select DEFAULT_SGI_PARTITION
- select DMA_COHERENT
select SYS_HAS_EARLY_PRINTK
select HW_HAS_PCI
select NR_CPUS_DEFAULT_64
@@ -721,6 +725,7 @@ config SGI_IP28
config SGI_IP32
bool "SGI IP32 (O2)"
+ select ARCH_HAS_PHYS_TO_DMA
select FW_ARC
select FW_ARC32
select BOOT_ELF32
@@ -743,7 +748,6 @@ config SGI_IP32
config SIBYTE_CRHINE
bool "Sibyte BCM91120C-CRhine"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1120
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -753,7 +757,6 @@ config SIBYTE_CRHINE
config SIBYTE_CARMEL
bool "Sibyte BCM91120x-Carmel"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1120
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -763,7 +766,6 @@ config SIBYTE_CARMEL
config SIBYTE_CRHONE
bool "Sibyte BCM91125C-CRhone"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1125
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -774,7 +776,6 @@ config SIBYTE_CRHONE
config SIBYTE_RHONE
bool "Sibyte BCM91125E-Rhone"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_BCM1125H
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -784,7 +785,6 @@ config SIBYTE_RHONE
config SIBYTE_SWARM
bool "Sibyte BCM91250A-SWARM"
select BOOT_ELF32
- select DMA_COHERENT
select HAVE_PATA_PLATFORM
select SIBYTE_SB1250
select SWAP_IO_SPACE
@@ -797,7 +797,6 @@ config SIBYTE_SWARM
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
select BOOT_ELF32
- select DMA_COHERENT
select HAVE_PATA_PLATFORM
select SIBYTE_SB1250
select SWAP_IO_SPACE
@@ -809,7 +808,6 @@ config SIBYTE_LITTLESUR
config SIBYTE_SENTOSA
bool "Sibyte BCM91250E-Sentosa"
select BOOT_ELF32
- select DMA_COHERENT
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -819,7 +817,6 @@ config SIBYTE_SENTOSA
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
select BOOT_ELF32
- select DMA_COHERENT
select NR_CPUS_DEFAULT_4
select SIBYTE_BCM1x80
select SWAP_IO_SPACE
@@ -895,8 +892,8 @@ config CAVIUM_OCTEON_SOC
bool "Cavium Networks Octeon SoC based boards"
select CEVT_R4K
select ARCH_HAS_PHYS_TO_DMA
+ select HAS_RAPIDIO
select PHYS_ADDR_T_64BIT
- select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select EDAC_SUPPORT
@@ -945,7 +942,6 @@ config NLM_XLR_BOARD
select PHYS_ADDR_T_64BIT
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
- select DMA_COHERENT
select NR_CPUS_DEFAULT_32
select CEVT_R4K
select CSRC_R4K
@@ -973,7 +969,6 @@ config NLM_XLP_BOARD
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
- select DMA_COHERENT
select NR_CPUS_DEFAULT_32
select CEVT_R4K
select CSRC_R4K
@@ -992,7 +987,6 @@ config MIPS_PARAVIRT
bool "Para-Virtualized guest system"
select CEVT_R4K
select CSRC_R4K
- select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
@@ -1118,12 +1112,14 @@ config DMA_PERDEV_COHERENT
bool
select DMA_MAYBE_COHERENT
-config DMA_COHERENT
- bool
-
config DMA_NONCOHERENT
bool
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
select NEED_DMA_MAP_STATE
+ select DMA_NONCOHERENT_MMAP
+ select DMA_NONCOHERENT_CACHE_SYNC
+ select DMA_NONCOHERENT_OPS
config SYS_HAS_EARLY_PRINTK
bool
@@ -1365,6 +1361,7 @@ choice
config CPU_LOONGSON3
bool "Loongson 3 CPU"
depends on SYS_HAS_CPU_LOONGSON3
+ select ARCH_HAS_PHYS_TO_DMA
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
@@ -1427,7 +1424,8 @@ config CPU_LOONGSON1B
select LEDS_GPIO_REGISTER
help
The Loongson 1B is a 32-bit SoC, which implements the MIPS32
- release 2 instruction set.
+ Release 1 instruction set and part of the MIPS32 Release 2
+ instruction set.
config CPU_LOONGSON1C
bool "Loongson 1C"
@@ -1436,7 +1434,8 @@ config CPU_LOONGSON1C
select LEDS_GPIO_REGISTER
help
The Loongson 1C is a 32-bit SoC, which implements the MIPS32
- release 2 instruction set.
+ Release 1 instruction set and part of the MIPS32 Release 2
+ instruction set.
config CPU_MIPS32_R1
bool "MIPS32 Release 1"
@@ -1831,11 +1830,12 @@ config CPU_LOONGSON2
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
+ select ARCH_HAS_PHYS_TO_DMA
config CPU_LOONGSON1
bool
select CPU_MIPS32
- select CPU_MIPSR2
+ select CPU_MIPSR1
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
@@ -1979,12 +1979,6 @@ config SYS_HAS_CPU_XLR
config SYS_HAS_CPU_XLP
bool
-config MIPS_MALTA_PM
- depends on MIPS_MALTA
- depends on PCI
- bool
- default y
-
#
# CPU may reorder R->R, R->W, W->R, W->W
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -2641,8 +2635,6 @@ config HW_PERF_EVENTS
Enable hardware performance counter support for perf events. If
disabled, perf events will use software events only.
-source "mm/Kconfig"
-
config SMP
bool "Multi-Processing support"
depends on SYS_SUPPORTS_SMP
@@ -2820,8 +2812,6 @@ config HZ
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
-source "kernel/Kconfig.preempt"
-
config KEXEC
bool "Kexec system call"
select KEXEC_CORE
@@ -2994,9 +2984,8 @@ config PGTABLE_LEVELS
default 3 if 64BIT && !PAGE_SIZE_64KB
default 2
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
+config MIPS_AUTO_PFN_OFFSET
+ bool
menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)"
@@ -3115,10 +3104,13 @@ config ZONE_DMA32
source "drivers/pcmcia/Kconfig"
+config HAS_RAPIDIO
+ bool
+ default n
+
config RAPIDIO
tristate "RapidIO support"
- depends on PCI
- default n
+ depends on HAS_RAPIDIO || PCI
help
If you say Y here, the kernel will include drivers and
infrastructure code to support RapidIO interconnect devices.
@@ -3127,10 +3119,6 @@ source "drivers/rapidio/Kconfig"
endmenu
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
config TRAD_SIGNALS
bool
@@ -3176,8 +3164,6 @@ config BINFMT_ELF32
default y if MIPS32_O32 || MIPS32_N32
select ELFCORE
-endmenu
-
menu "Power management options"
config ARCH_HIBERNATION_POSSIBLE
@@ -3205,20 +3191,6 @@ source "drivers/cpuidle/Kconfig"
endmenu
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/firmware/Kconfig"
-source "fs/Kconfig"
-
-source "arch/mips/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
source "arch/mips/kvm/Kconfig"
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 0749c3724543..0c86b2a2adfc 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -1,12 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
-source "lib/Kconfig.debug"
-
config EARLY_PRINTK
bool "Early printk" if EXPERT
depends on SYS_HAS_EARLY_PRINTK
@@ -155,5 +152,3 @@ config MIPS_CPS_NS16550_SHIFT
adjacent ns16550 registers in the system.
endif # MIPS_CPS_NS16550_BOOL
-
-endmenu
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index e2122cca4ae2..5425df002a6b 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -122,12 +122,22 @@ cflags-y += -ffreestanding
# are used, so we kludge that here. A bug has been filed at
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413.
#
+# clang doesn't suffer from these issues and our checks against -dumpmachine
+# don't work so well when cross compiling, since without providing --target
+# clang's output will be based upon the build machine. So for clang we simply
+# unconditionally specify -EB or -EL as appropriate.
+#
+ifeq ($(cc-name),clang)
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -EL
+else
undef-all += -UMIPSEB -U_MIPSEB -U__MIPSEB -U__MIPSEB__
undef-all += -UMIPSEL -U_MIPSEL -U__MIPSEL -U__MIPSEL__
predef-be += -DMIPSEB -D_MIPSEB -D__MIPSEB -D__MIPSEB__
predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
+endif
cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
-fno-omit-frame-pointer
@@ -155,15 +165,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
- -Wa,-mips32 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
- -Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
-cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
- -Wa,-mips64 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
- -Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index fa75d75b5ba9..ddff9a02513d 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -34,6 +34,7 @@
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
@@ -60,7 +61,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index aab55aaf3d62..d625e6f99ae7 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -31,6 +31,7 @@
#include <mtd/mtd-abi.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <asm/mach-au1x00/au1xxx_eth.h>
@@ -58,7 +59,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
index 0fc53e08a894..5f05b8714385 100644
--- a/arch/mips/alchemy/board-xxs1500.c
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -29,6 +29,7 @@
#include <linux/pm.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
@@ -55,7 +56,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
diff --git a/arch/mips/alchemy/devboards/platform.c b/arch/mips/alchemy/devboards/platform.c
index 203854ddd1bb..8d4b65c3268a 100644
--- a/arch/mips/alchemy/devboards/platform.c
+++ b/arch/mips/alchemy/devboards/platform.c
@@ -14,6 +14,7 @@
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
@@ -36,7 +37,7 @@ void __init prom_init(void)
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
-void prom_putchar(unsigned char c)
+void prom_putchar(char c)
{
if (alchemy_get_cputype() == ALCHEMY_CPU_AU1300)
alchemy_uart_putchar(AU1300_UART2_PHYS_ADDR, c);
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c
index 0137656107a9..6b64fd96dba8 100644
--- a/arch/mips/ar7/clock.c
+++ b/arch/mips/ar7/clock.c
@@ -476,3 +476,32 @@ void __init ar7_init_clocks(void)
/* adjust vbus clock rate */
vbus_clk.rate = bus_clk.rate / 2;
}
+
+/* dummy functions, should not be called */
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ WARN_ON(clk);
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index dd53987a690f..2ec8d9ac91ec 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -25,6 +25,7 @@
#include <linux/string.h>
#include <linux/io.h>
#include <asm/bootinfo.h>
+#include <asm/setup.h>
#include <asm/mach-ar7/ar7.h>
#include <asm/mach-ar7/prom.h>
@@ -259,10 +260,9 @@ static inline void serial_out(int offset, int value)
writel(value, (void *)PORT(offset));
}
-int prom_putchar(char c)
+void prom_putchar(char c)
{
while ((serial_in(UART_LSR) & UART_LSR_TEMT) == 0)
;
serial_out(UART_TX, c);
- return 1;
}
diff --git a/arch/mips/ath25/Kconfig b/arch/mips/ath25/Kconfig
index 7070b4bcd01d..2c1dfd06c366 100644
--- a/arch/mips/ath25/Kconfig
+++ b/arch/mips/ath25/Kconfig
@@ -12,6 +12,7 @@ config SOC_AR2315
config PCI_AR2315
bool "Atheros AR2315 PCI controller support"
depends on SOC_AR2315
+ select ARCH_HAS_PHYS_TO_DMA
select HW_HAS_PCI
select PCI
default y
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
index 6d11ae581ea7..989e71015ee6 100644
--- a/arch/mips/ath25/board.c
+++ b/arch/mips/ath25/board.c
@@ -146,10 +146,10 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
pr_info("Fixing up empty mac addresses\n");
config->reset_config_gpio = 0xffff;
config->sys_led_gpio = 0xffff;
- random_ether_addr(config->wlan0_mac);
+ eth_random_addr(config->wlan0_mac);
config->wlan0_mac[0] &= ~0x06;
- random_ether_addr(config->enet0_mac);
- random_ether_addr(config->enet1_mac);
+ eth_random_addr(config->enet0_mac);
+ eth_random_addr(config->enet1_mac);
}
}
diff --git a/arch/mips/ath25/early_printk.c b/arch/mips/ath25/early_printk.c
index 36035b628161..d534761e9cda 100644
--- a/arch/mips/ath25/early_printk.c
+++ b/arch/mips/ath25/early_printk.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/serial_reg.h>
+#include <asm/setup.h>
#include "devices.h"
#include "ar2315_regs.h"
@@ -25,7 +26,7 @@ static inline unsigned char prom_uart_rr(void __iomem *base, unsigned reg)
return __raw_readl(base + 4 * reg);
}
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
{
static void __iomem *base;
@@ -38,7 +39,7 @@ void prom_putchar(unsigned char ch)
while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
;
- prom_uart_wr(base, UART_TX, ch);
+ prom_uart_wr(base, UART_TX, (unsigned char)ch);
while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
;
}
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 6b1000b6a6a6..cf9158e3c2d9 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -355,6 +355,91 @@ static void __init ar934x_clocks_init(void)
iounmap(dpll_base);
}
+static void __init qca953x_clocks_init(void)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(QCA953X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA953X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(QCA953X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA953X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_NINT_MASK;
+ frac = (pll >> QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+ cpu_pll = nint * ref_rate / ref_div;
+ cpu_pll += frac * (ref_rate >> 6) / ref_div;
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(QCA953X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA953X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_NINT_MASK;
+ frac = (pll >> QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+ ddr_pll = nint * ref_rate / ref_div;
+ ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4);
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(QCA953X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ cpu_rate = cpu_pll / (postdiv + 1);
+ else
+ cpu_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ddr_rate = ddr_pll / (postdiv + 1);
+ else
+ ddr_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_add_sys_clkdev("ref", ref_rate);
+ ath79_add_sys_clkdev("cpu", cpu_rate);
+ ath79_add_sys_clkdev("ddr", ddr_rate);
+ ath79_add_sys_clkdev("ahb", ahb_rate);
+
+ clk_add_alias("wdt", NULL, "ref", NULL);
+ clk_add_alias("uart", NULL, "ref", NULL);
+}
+
static void __init qca955x_clocks_init(void)
{
unsigned long ref_rate;
@@ -440,6 +525,110 @@ static void __init qca955x_clocks_init(void)
clk_add_alias("uart", NULL, "ref", NULL);
}
+static void __init qca956x_clocks_init(void)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, hfrac, lfrac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ /*
+ * QCA956x timer init workaround has to be applied right before setting
+ * up the clock. Else, there will be no jiffies
+ */
+ u32 misc;
+
+ misc = ath79_reset_rr(AR71XX_RESET_REG_MISC_INT_ENABLE);
+ misc |= MISC_INT_MIPS_SI_TIMERINT_MASK;
+ ath79_reset_wr(AR71XX_RESET_REG_MISC_INT_ENABLE, misc);
+
+ bootstrap = ath79_reset_rr(QCA956X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA956X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG_REFDIV_MASK;
+
+ pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG1_REG);
+ nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NINT_MASK;
+ hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK;
+ lfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK;
+
+ cpu_pll = nint * ref_rate / ref_div;
+ cpu_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+ cpu_pll += (hfrac >> 13) * ref_rate / ref_div;
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG_REFDIV_MASK;
+ pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG1_REG);
+ nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NINT_MASK;
+ hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK;
+ lfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK;
+
+ ddr_pll = nint * ref_rate / ref_div;
+ ddr_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+ ddr_pll += (hfrac >> 13) * ref_rate / ref_div;
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(QCA956X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL)
+ cpu_rate = ddr_pll / (postdiv + 1);
+ else
+ cpu_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL)
+ ddr_rate = cpu_pll / (postdiv + 1);
+ else
+ ddr_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_add_sys_clkdev("ref", ref_rate);
+ ath79_add_sys_clkdev("cpu", cpu_rate);
+ ath79_add_sys_clkdev("ddr", ddr_rate);
+ ath79_add_sys_clkdev("ahb", ahb_rate);
+
+ clk_add_alias("wdt", NULL, "ref", NULL);
+ clk_add_alias("uart", NULL, "ref", NULL);
+}
+
void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
@@ -450,8 +639,12 @@ void __init ath79_clocks_init(void)
ar933x_clocks_init();
else if (soc_is_ar934x())
ar934x_clocks_init();
+ else if (soc_is_qca953x())
+ qca953x_clocks_init();
else if (soc_is_qca955x())
qca955x_clocks_init();
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ qca956x_clocks_init();
else
BUG();
}
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index c782b10ddf50..cd6055f9e7a0 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -103,8 +103,12 @@ void ath79_device_reset_set(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca953x())
+ reg = QCA953X_RESET_REG_RESET_MODULE;
else if (soc_is_qca955x())
reg = QCA955X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ reg = QCA956X_RESET_REG_RESET_MODULE;
else
BUG();
@@ -131,8 +135,12 @@ void ath79_device_reset_clear(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca953x())
+ reg = QCA953X_RESET_REG_RESET_MODULE;
else if (soc_is_qca955x())
reg = QCA955X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ reg = QCA956X_RESET_REG_RESET_MODULE;
else
BUG();
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index d1adc59af5bf..4b1063117ef7 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -13,12 +13,13 @@
#include <linux/errno.h>
#include <linux/serial_reg.h>
#include <asm/addrspace.h>
+#include <asm/setup.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ar933x_uart.h>
-static void (*_prom_putchar) (unsigned char);
+static void (*_prom_putchar)(char);
static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
{
@@ -33,31 +34,72 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-static void prom_putchar_ar71xx(unsigned char ch)
+static void prom_putchar_ar71xx(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
- __raw_writel(ch, base + UART_TX * 4);
+ __raw_writel((unsigned char)ch, base + UART_TX * 4);
prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
}
-static void prom_putchar_ar933x(unsigned char ch)
+static void prom_putchar_ar933x(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR933X_UART_BASE));
prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
AR933X_UART_DATA_TX_CSR);
- __raw_writel(AR933X_UART_DATA_TX_CSR | ch, base + AR933X_UART_DATA_REG);
+ __raw_writel(AR933X_UART_DATA_TX_CSR | (unsigned char)ch,
+ base + AR933X_UART_DATA_REG);
prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
AR933X_UART_DATA_TX_CSR);
}
-static void prom_putchar_dummy(unsigned char ch)
+static void prom_putchar_dummy(char ch)
{
/* nothing to do */
}
+static void prom_enable_uart(u32 id)
+{
+ void __iomem *gpio_base;
+ u32 uart_en;
+ u32 t;
+
+ switch (id) {
+ case REV_ID_MAJOR_AR71XX:
+ uart_en = AR71XX_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR7240:
+ case REV_ID_MAJOR_AR7241:
+ case REV_ID_MAJOR_AR7242:
+ uart_en = AR724X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR913X:
+ uart_en = AR913X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR9330:
+ case REV_ID_MAJOR_AR9331:
+ uart_en = AR933X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR9341:
+ case REV_ID_MAJOR_AR9342:
+ case REV_ID_MAJOR_AR9344:
+ /* TODO */
+ default:
+ return;
+ }
+
+ gpio_base = (void __iomem *)KSEG1ADDR(AR71XX_GPIO_BASE);
+ t = __raw_readl(gpio_base + AR71XX_GPIO_REG_FUNC);
+ t |= uart_en;
+ __raw_writel(t, gpio_base + AR71XX_GPIO_REG_FUNC);
+}
+
static void prom_putchar_init(void)
{
void __iomem *base;
@@ -76,8 +118,12 @@ static void prom_putchar_init(void)
case REV_ID_MAJOR_AR9341:
case REV_ID_MAJOR_AR9342:
case REV_ID_MAJOR_AR9344:
+ case REV_ID_MAJOR_QCA9533:
+ case REV_ID_MAJOR_QCA9533_V2:
case REV_ID_MAJOR_QCA9556:
case REV_ID_MAJOR_QCA9558:
+ case REV_ID_MAJOR_TP9343:
+ case REV_ID_MAJOR_QCA956X:
_prom_putchar = prom_putchar_ar71xx;
break;
@@ -88,11 +134,13 @@ static void prom_putchar_init(void)
default:
_prom_putchar = prom_putchar_dummy;
- break;
+ return;
}
+
+ prom_enable_uart(id);
}
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
{
if (!_prom_putchar)
prom_putchar_init();
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index f206dafbb0a3..4c7a93f4039a 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -40,6 +40,7 @@ static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
static void ath79_restart(char *command)
{
+ local_irq_disable();
ath79_device_reset_set(AR71XX_RESET_FULL_CHIP);
for (;;)
if (cpu_wait)
@@ -59,6 +60,7 @@ static void __init ath79_detect_sys_type(void)
u32 major;
u32 minor;
u32 rev = 0;
+ u32 ver = 1;
id = ath79_reset_rr(AR71XX_RESET_REG_REV_ID);
major = id & REV_ID_MAJOR_MASK;
@@ -151,6 +153,17 @@ static void __init ath79_detect_sys_type(void)
rev = id & AR934X_REV_ID_REVISION_MASK;
break;
+ case REV_ID_MAJOR_QCA9533_V2:
+ ver = 2;
+ ath79_soc_rev = 2;
+ /* drop through */
+
+ case REV_ID_MAJOR_QCA9533:
+ ath79_soc = ATH79_SOC_QCA9533;
+ chip = "9533";
+ rev = id & QCA953X_REV_ID_REVISION_MASK;
+ break;
+
case REV_ID_MAJOR_QCA9556:
ath79_soc = ATH79_SOC_QCA9556;
chip = "9556";
@@ -163,14 +176,30 @@ static void __init ath79_detect_sys_type(void)
rev = id & QCA955X_REV_ID_REVISION_MASK;
break;
+ case REV_ID_MAJOR_QCA956X:
+ ath79_soc = ATH79_SOC_QCA956X;
+ chip = "956X";
+ rev = id & QCA956X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_TP9343:
+ ath79_soc = ATH79_SOC_TP9343;
+ chip = "9343";
+ rev = id & QCA956X_REV_ID_REVISION_MASK;
+ break;
+
default:
panic("ath79: unknown SoC, id:0x%08x", id);
}
- ath79_soc_rev = rev;
+ if (ver == 1)
+ ath79_soc_rev = rev;
- if (soc_is_qca955x())
- sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s rev %u",
+ if (soc_is_qca953x() || soc_is_qca955x() || soc_is_qca956x())
+ sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s ver %u rev %u",
+ chip, ver, rev);
+ else if (soc_is_tp9343())
+ sprintf(ath79_sys_type, "Qualcomm Atheros TP%s rev %u",
chip, rev);
else
sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
diff --git a/arch/mips/bcm63xx/early_printk.c b/arch/mips/bcm63xx/early_printk.c
index 6092226a6d76..9e9ec27c282f 100644
--- a/arch/mips/bcm63xx/early_printk.c
+++ b/arch/mips/bcm63xx/early_printk.c
@@ -8,6 +8,7 @@
#include <bcm63xx_io.h>
#include <linux/serial_bcm63xx.h>
+#include <asm/setup.h>
static void wait_xfered(void)
{
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index 6dec30842b2f..3d13c77c125f 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -17,7 +17,7 @@
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <dma-coherence.h>
+#include <asm/bmips.h>
/*
* BCM338x has configurable address translation windows which allow the
@@ -40,7 +40,7 @@ static struct bmips_dma_range *bmips_dma_ranges;
#define FLUSH_RAC 0x100
-static dma_addr_t bmips_phys_to_dma(struct device *dev, phys_addr_t pa)
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa)
{
struct bmips_dma_range *r;
@@ -52,17 +52,7 @@ static dma_addr_t bmips_phys_to_dma(struct device *dev, phys_addr_t pa)
return pa;
}
-dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return bmips_phys_to_dma(dev, virt_to_phys(addr));
-}
-
-dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
-{
- return bmips_phys_to_dma(dev, page_to_phys(page));
-}
-
-unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
struct bmips_dma_range *r;
@@ -74,6 +64,22 @@ unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr;
}
+void arch_sync_dma_for_cpu_all(struct device *dev)
+{
+ void __iomem *cbr = BMIPS_GET_CBR();
+ u32 cfg;
+
+ if (boot_cpu_type() != CPU_BMIPS3300 &&
+ boot_cpu_type() != CPU_BMIPS4350 &&
+ boot_cpu_type() != CPU_BMIPS4380)
+ return;
+
+ /* Flush stale data out of the readahead cache */
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+}
+
static int __init bmips_init_dma_ranges(void)
{
struct device_node *np =
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 3b6f687f177c..231fc5ce375e 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -202,13 +202,6 @@ void __init device_tree_init(void)
of_node_put(np);
}
-int __init plat_of_setup(void)
-{
- return __dt_register_buses("simple-bus", NULL);
-}
-
-arch_initcall(plat_of_setup);
-
static int __init plat_dev_init(void)
{
of_clk_init(NULL);
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index c22da16d67b8..35704c28a28b 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -105,28 +105,29 @@ $(obj)/uImage: $(obj)/uImage.$(suffix-y)
# Flattened Image Tree (.itb) images
#
-targets += vmlinux.itb
-targets += vmlinux.gz.itb
-targets += vmlinux.bz2.itb
-targets += vmlinux.lzma.itb
-targets += vmlinux.lzo.itb
-
ifeq ($(ADDR_BITS),32)
- itb_addr_cells = 1
+itb_addr_cells = 1
endif
ifeq ($(ADDR_BITS),64)
- itb_addr_cells = 2
+itb_addr_cells = 2
endif
+targets += vmlinux.its.S
+
quiet_cmd_its_cat = CAT $@
- cmd_its_cat = cat $^ >$@
+ cmd_its_cat = cat $(filter-out $(PHONY), $^) >$@
-$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS))
+$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) FORCE
$(call if_changed,its_cat)
+targets += vmlinux.its
+targets += vmlinux.gz.its
+targets += vmlinux.bz2.its
+targets += vmlinux.lzmo.its
+targets += vmlinux.lzo.its
+
quiet_cmd_cpp_its_S = ITS $@
- cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
- -D__ASSEMBLY__ \
+ cmd_cpp_its_S = $(CPP) -P -C -o $@ $< \
-DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
-DVMLINUX_BINARY="\"$(3)\"" \
-DVMLINUX_COMPRESSION="\"$(2)\"" \
@@ -136,19 +137,25 @@ quiet_cmd_cpp_its_S = ITS $@
-DADDR_CELLS=$(itb_addr_cells)
$(obj)/vmlinux.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
+ $(call if_changed,cpp_its_S,none,vmlinux.bin)
$(obj)/vmlinux.gz.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
+ $(call if_changed,cpp_its_S,gzip,vmlinux.bin.gz)
$(obj)/vmlinux.bz2.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
+ $(call if_changed,cpp_its_S,bzip2,vmlinux.bin.bz2)
$(obj)/vmlinux.lzma.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
+ $(call if_changed,cpp_its_S,lzma,vmlinux.bin.lzma)
$(obj)/vmlinux.lzo.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
- $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
+ $(call if_changed,cpp_its_S,lzo,vmlinux.bin.lzo)
+
+targets += vmlinux.itb
+targets += vmlinux.gz.itb
+targets += vmlinux.bz2.itb
+targets += vmlinux.lzma.itb
+targets += vmlinux.lzo.itb
quiet_cmd_itb-image = ITB $@
cmd_itb-image = \
@@ -162,14 +169,5 @@ quiet_cmd_itb-image = ITB $@
$(obj)/vmlinux.itb: $(obj)/vmlinux.its $(obj)/vmlinux.bin FORCE
$(call if_changed,itb-image,$<)
-$(obj)/vmlinux.gz.itb: $(obj)/vmlinux.gz.its $(obj)/vmlinux.bin.gz FORCE
- $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.bz2.itb: $(obj)/vmlinux.bz2.its $(obj)/vmlinux.bin.bz2 FORCE
- $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.lzma.itb: $(obj)/vmlinux.lzma.its $(obj)/vmlinux.bin.lzma FORCE
- $(call if_changed,itb-image,$<)
-
-$(obj)/vmlinux.lzo.itb: $(obj)/vmlinux.lzo.its $(obj)/vmlinux.bin.lzo FORCE
+$(obj)/vmlinux.%.itb: $(obj)/vmlinux.%.its $(obj)/vmlinux.bin.% FORCE
$(call if_changed,itb-image,$<)
diff --git a/arch/mips/boot/compressed/uart-prom.c b/arch/mips/boot/compressed/uart-prom.c
index d6f0fee0a151..a8a0a32e05d1 100644
--- a/arch/mips/boot/compressed/uart-prom.c
+++ b/arch/mips/boot/compressed/uart-prom.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-
-extern void prom_putchar(unsigned char ch);
+#include <asm/setup.h>
void putc(char c)
{
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index aa4e8f75ff5d..ce93d57f1b4d 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -155,6 +155,25 @@
};
};
+ spi_gpio {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-chipselects = <2>;
+
+ gpio-miso = <&gpe 14 0>;
+ gpio-sck = <&gpe 15 0>;
+ gpio-mosi = <&gpe 17 0>;
+ cs-gpios = <&gpe 16 0
+ &gpe 18 0>;
+
+ spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
uart0: serial@10030000 {
compatible = "ingenic,jz4780-uart";
reg = <0x10030000 0x100>;
diff --git a/arch/mips/boot/dts/mscc/Makefile b/arch/mips/boot/dts/mscc/Makefile
index 3c6aed9f5439..9a9bb7ea0503 100644
--- a/arch/mips/boot/dts/mscc/Makefile
+++ b/arch/mips/boot/dts/mscc/Makefile
@@ -1,3 +1,3 @@
-dtb-$(CONFIG_LEGACY_BOARD_OCELOT) += ocelot_pcb123.dtb
+dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb
obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
index 4f33dbc67348..f7eb612b46ba 100644
--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -91,6 +91,17 @@
status = "disabled";
};
+ spi: spi@101000 {
+ compatible = "mscc,ocelot-spi", "snps,dw-apb-ssi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x101000 0x100>, <0x3c 0x18>;
+ interrupts = <9>;
+ clocks = <&ahb_clk>;
+
+ status = "disabled";
+ };
+
switch@1010000 {
compatible = "mscc,vsc7514-switch";
reg = <0x1010000 0x10000>,
@@ -168,6 +179,9 @@
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&gpio 0 0 22>;
+ interrupt-controller;
+ interrupts = <13>;
+ #interrupt-cells = <2>;
uart_pins: uart-pins {
pins = "GPIO_6", "GPIO_7";
@@ -178,13 +192,18 @@
pins = "GPIO_12", "GPIO_13";
function = "uart2";
};
+
+ miim1: miim1 {
+ pins = "GPIO_14", "GPIO_15";
+ function = "miim1";
+ };
};
mdio0: mdio@107009c {
#address-cells = <1>;
#size-cells = <0>;
compatible = "mscc,ocelot-miim";
- reg = <0x107009c 0x36>, <0x10700f0 0x8>;
+ reg = <0x107009c 0x24>, <0x10700f0 0x8>;
interrupts = <14>;
status = "disabled";
@@ -201,5 +220,16 @@
reg = <3>;
};
};
+
+ mdio1: mdio@10700c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "mscc,ocelot-miim";
+ reg = <0x10700c0 0x24>;
+ interrupts = <15>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&miim1>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
index 4ccd65379059..2266027759f9 100644
--- a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
+++ b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
@@ -26,6 +26,16 @@
status = "okay";
};
+&spi {
+ status = "okay";
+
+ flash@0 {
+ compatible = "macronix,mx25l25635f", "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
&mdio0 {
status = "okay";
};
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 1fe561c5f90e..61dcfa5b6ca7 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -161,7 +161,7 @@
usb_phy: usb-phy {
compatible = "qca,ar7100-usb-phy";
- reset-names = "usb-phy", "usb-suspend-override";
+ reset-names = "phy", "suspend-override";
resets = <&rst 4>, <&rst 3>;
#phy-cells = <0>;
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index 3931033e47c8..7fccf6357225 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -22,11 +22,10 @@
};
gpio-keys {
- compatible = "gpio-keys-polled";
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <20>;
button@0 {
label = "reset";
linux,code = <KEY_RESTART>;
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index efd5f0722206..2bae201aa365 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -146,7 +146,7 @@
usb_phy: usb-phy {
compatible = "qca,ar7100-usb-phy";
- reset-names = "usb-phy", "usb-suspend-override";
+ reset-names = "phy", "suspend-override";
resets = <&rst 4>, <&rst 3>;
#phy-cells = <0>;
diff --git a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
index d4e4502daaa8..e7af2cf5f4c1 100644
--- a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
@@ -29,11 +29,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "reset";
diff --git a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
index 4f95ccf17c4c..d38aa73f1a2e 100644
--- a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
@@ -47,11 +47,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "jumpstart";
diff --git a/arch/mips/boot/dts/qca/ar9331_omega.dts b/arch/mips/boot/dts/qca/ar9331_omega.dts
index f70f79c4d0d5..11778abacf66 100644
--- a/arch/mips/boot/dts/qca/ar9331_omega.dts
+++ b/arch/mips/boot/dts/qca/ar9331_omega.dts
@@ -29,11 +29,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "reset";
diff --git a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
index 748131aea22e..c8290d36cfbe 100644
--- a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
+++ b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
@@ -47,11 +47,10 @@
};
};
- gpio-keys-polled {
- compatible = "gpio-keys-polled";
+ gpio-keys {
+ compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
- poll-interval = <100>;
button@0 {
label = "wps";
diff --git a/arch/mips/boot/ecoff.h b/arch/mips/boot/ecoff.h
index b3e73c22c345..5be79ebfc3f8 100644
--- a/arch/mips/boot/ecoff.h
+++ b/arch/mips/boot/ecoff.h
@@ -2,14 +2,17 @@
/*
* Some ECOFF definitions.
*/
+
+#include <stdint.h>
+
typedef struct filehdr {
- unsigned short f_magic; /* magic number */
- unsigned short f_nscns; /* number of sections */
- long f_timdat; /* time & date stamp */
- long f_symptr; /* file pointer to symbolic header */
- long f_nsyms; /* sizeof(symbolic hdr) */
- unsigned short f_opthdr; /* sizeof(optional hdr) */
- unsigned short f_flags; /* flags */
+ uint16_t f_magic; /* magic number */
+ uint16_t f_nscns; /* number of sections */
+ int32_t f_timdat; /* time & date stamp */
+ int32_t f_symptr; /* file pointer to symbolic header */
+ int32_t f_nsyms; /* sizeof(symbolic hdr) */
+ uint16_t f_opthdr; /* sizeof(optional hdr) */
+ uint16_t f_flags; /* flags */
} FILHDR;
#define FILHSZ sizeof(FILHDR)
@@ -18,32 +21,32 @@ typedef struct filehdr {
typedef struct scnhdr {
char s_name[8]; /* section name */
- long s_paddr; /* physical address, aliased s_nlib */
- long s_vaddr; /* virtual address */
- long s_size; /* section size */
- long s_scnptr; /* file ptr to raw data for section */
- long s_relptr; /* file ptr to relocation */
- long s_lnnoptr; /* file ptr to gp histogram */
- unsigned short s_nreloc; /* number of relocation entries */
- unsigned short s_nlnno; /* number of gp histogram entries */
- long s_flags; /* flags */
+ int32_t s_paddr; /* physical address, aliased s_nlib */
+ int32_t s_vaddr; /* virtual address */
+ int32_t s_size; /* section size */
+ int32_t s_scnptr; /* file ptr to raw data for section */
+ int32_t s_relptr; /* file ptr to relocation */
+ int32_t s_lnnoptr; /* file ptr to gp histogram */
+ uint16_t s_nreloc; /* number of relocation entries */
+ uint16_t s_nlnno; /* number of gp histogram entries */
+ int32_t s_flags; /* flags */
} SCNHDR;
#define SCNHSZ sizeof(SCNHDR)
-#define SCNROUND ((long)16)
+#define SCNROUND ((int32_t)16)
typedef struct aouthdr {
- short magic; /* see above */
- short vstamp; /* version stamp */
- long tsize; /* text size in bytes, padded to DW bdry*/
- long dsize; /* initialized data " " */
- long bsize; /* uninitialized data " " */
- long entry; /* entry pt. */
- long text_start; /* base of text used for this file */
- long data_start; /* base of data used for this file */
- long bss_start; /* base of bss used for this file */
- long gprmask; /* general purpose register mask */
- long cprmask[4]; /* co-processor register masks */
- long gp_value; /* the gp value used for this object */
+ int16_t magic; /* see above */
+ int16_t vstamp; /* version stamp */
+ int32_t tsize; /* text size in bytes, padded to DW bdry*/
+ int32_t dsize; /* initialized data " " */
+ int32_t bsize; /* uninitialized data " " */
+ int32_t entry; /* entry pt. */
+ int32_t text_start; /* base of text used for this file */
+ int32_t data_start; /* base of data used for this file */
+ int32_t bss_start; /* base of bss used for this file */
+ int32_t gprmask; /* general purpose register mask */
+ int32_t cprmask[4]; /* co-processor register masks */
+ int32_t gp_value; /* the gp value used for this object */
} AOUTHDR;
#define AOUTHSZ sizeof(AOUTHDR)
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index 266c8137e859..6972b97235da 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -43,6 +43,8 @@
#include <limits.h>
#include <netinet/in.h>
#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
#include "ecoff.h"
@@ -55,8 +57,8 @@
/* -------------------------------------------------------------------- */
struct sect {
- unsigned long vaddr;
- unsigned long len;
+ uint32_t vaddr;
+ uint32_t len;
};
int *symTypeTable;
@@ -153,16 +155,16 @@ static char *saveRead(int file, off_t offset, off_t len, char *name)
}
#define swab16(x) \
- ((unsigned short)( \
- (((unsigned short)(x) & (unsigned short)0x00ffU) << 8) | \
- (((unsigned short)(x) & (unsigned short)0xff00U) >> 8) ))
+ ((uint16_t)( \
+ (((uint16_t)(x) & (uint16_t)0x00ffU) << 8) | \
+ (((uint16_t)(x) & (uint16_t)0xff00U) >> 8) ))
#define swab32(x) \
((unsigned int)( \
- (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \
- (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \
- (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \
- (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) ))
+ (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
+ (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
+ (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
+ (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24) ))
static void convert_elf_hdr(Elf32_Ehdr * e)
{
@@ -274,7 +276,7 @@ int main(int argc, char *argv[])
struct aouthdr eah;
struct scnhdr esecs[6];
int infile, outfile;
- unsigned long cur_vma = ULONG_MAX;
+ uint32_t cur_vma = UINT32_MAX;
int addflag = 0;
int nosecs;
@@ -518,7 +520,7 @@ int main(int argc, char *argv[])
for (i = 0; i < nosecs; i++) {
printf
- ("Section %d: %s phys %lx size %lx file offset %lx\n",
+ ("Section %d: %s phys %"PRIx32" size %"PRIx32"\t file offset %"PRIx32"\n",
i, esecs[i].s_name, esecs[i].s_paddr,
esecs[i].s_size, esecs[i].s_scnptr);
}
@@ -564,17 +566,16 @@ int main(int argc, char *argv[])
the section can be loaded before copying. */
if (ph[i].p_type == PT_LOAD && ph[i].p_filesz) {
if (cur_vma != ph[i].p_vaddr) {
- unsigned long gap =
- ph[i].p_vaddr - cur_vma;
+ uint32_t gap = ph[i].p_vaddr - cur_vma;
char obuf[1024];
if (gap > 65536) {
fprintf(stderr,
- "Intersegment gap (%ld bytes) too large.\n",
+ "Intersegment gap (%"PRId32" bytes) too large.\n",
gap);
exit(1);
}
fprintf(stderr,
- "Warning: %ld byte intersegment gap.\n",
+ "Warning: %d byte intersegment gap.\n",
gap);
memset(obuf, 0, sizeof obuf);
while (gap) {
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
index af4c712f7afc..d1ed066e1a17 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-md5.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -182,7 +182,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "octeon-md5",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
index 2b74b5b67cae..80d71e775936 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha1.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -215,7 +215,6 @@ static struct shash_alg octeon_sha1_alg = {
.cra_name = "sha1",
.cra_driver_name= "octeon-sha1",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
index 97e96fead08a..8b931e640926 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha256.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -239,7 +239,6 @@ static struct shash_alg octeon_sha256_algs[2] = { {
.cra_name = "sha256",
.cra_driver_name= "octeon-sha256",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -252,7 +251,6 @@ static struct shash_alg octeon_sha256_algs[2] = { {
.base = {
.cra_name = "sha224",
.cra_driver_name= "octeon-sha224",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
index d5fb3c6f22ae..6c9561496257 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha512.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -235,7 +235,6 @@ static struct shash_alg octeon_sha512_algs[2] = { {
.cra_name = "sha512",
.cra_driver_name= "octeon-sha512",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -249,7 +248,6 @@ static struct shash_alg octeon_sha512_algs[2] = { {
.cra_name = "sha384",
.cra_driver_name= "octeon-sha384",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 7b335ab21697..236833be6fbe 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -11,9 +11,7 @@
* Copyright (C) 2010 Cavium Networks, Inc.
*/
#include <linux/dma-direct.h>
-#include <linux/scatterlist.h>
#include <linux/bootmem.h>
-#include <linux/export.h>
#include <linux/swiotlb.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -24,10 +22,16 @@
#include <asm/octeon/octeon.h>
#ifdef CONFIG_PCI
+#include <linux/pci.h>
#include <asm/octeon/pci-octeon.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pci-defs.h>
+struct octeon_dma_map_ops {
+ dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+ phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+};
+
static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
{
if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
@@ -61,6 +65,11 @@ static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr;
}
+static const struct octeon_dma_map_ops octeon_gen1_ops = {
+ .phys_to_dma = octeon_gen1_phys_to_dma,
+ .dma_to_phys = octeon_gen1_dma_to_phys,
+};
+
static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return octeon_hole_phys_to_dma(paddr);
@@ -71,6 +80,11 @@ static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
return octeon_hole_dma_to_phys(daddr);
}
+static const struct octeon_dma_map_ops octeon_gen2_ops = {
+ .phys_to_dma = octeon_gen2_phys_to_dma,
+ .dma_to_phys = octeon_gen2_dma_to_phys,
+};
+
static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
@@ -93,6 +107,11 @@ static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr;
}
+static const struct octeon_dma_map_ops octeon_big_ops = {
+ .phys_to_dma = octeon_big_phys_to_dma,
+ .dma_to_phys = octeon_big_dma_to_phys,
+};
+
static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
phys_addr_t paddr)
{
@@ -121,105 +140,51 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
return daddr;
}
-#endif /* CONFIG_PCI */
-
-static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
- direction, attrs);
- mb();
-
- return daddr;
-}
-
-static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, unsigned long attrs)
-{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
- mb();
- return r;
-}
-
-static void octeon_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
- mb();
-}
-
-static void octeon_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
-{
- swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
- mb();
-}
-
-static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
-
- mb();
+static const struct octeon_dma_map_ops octeon_small_ops = {
+ .phys_to_dma = octeon_small_phys_to_dma,
+ .dma_to_phys = octeon_small_dma_to_phys,
+};
- return ret;
-}
+static const struct octeon_dma_map_ops *octeon_pci_dma_ops;
-static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
+void __init octeon_pci_dma_init(void)
{
- return daddr;
+ switch (octeon_dma_bar_type) {
+ case OCTEON_DMA_BAR_TYPE_PCIE:
+ octeon_pci_dma_ops = &octeon_gen1_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_PCIE2:
+ octeon_pci_dma_ops = &octeon_gen2_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_BIG:
+ octeon_pci_dma_ops = &octeon_big_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_SMALL:
+ octeon_pci_dma_ops = &octeon_small_ops;
+ break;
+ default:
+ BUG();
+ }
}
-
-struct octeon_dma_map_ops {
- const struct dma_map_ops dma_map_ops;
- dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
- phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
-};
+#endif /* CONFIG_PCI */
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
- struct octeon_dma_map_ops,
- dma_map_ops);
-
- return ops->phys_to_dma(dev, paddr);
+#ifdef CONFIG_PCI
+ if (dev && dev_is_pci(dev))
+ return octeon_pci_dma_ops->phys_to_dma(dev, paddr);
+#endif
+ return paddr;
}
-EXPORT_SYMBOL(__phys_to_dma);
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{
- struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
- struct octeon_dma_map_ops,
- dma_map_ops);
-
- return ops->dma_to_phys(dev, daddr);
+#ifdef CONFIG_PCI
+ if (dev && dev_is_pci(dev))
+ return octeon_pci_dma_ops->dma_to_phys(dev, daddr);
+#endif
+ return daddr;
}
-EXPORT_SYMBOL(__dma_to_phys);
-
-static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
- .dma_map_ops = {
- .alloc = octeon_dma_alloc_coherent,
- .free = swiotlb_free,
- .map_page = octeon_dma_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = octeon_dma_map_sg,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = octeon_dma_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = octeon_dma_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported
- },
- .phys_to_dma = octeon_unity_phys_to_dma,
- .dma_to_phys = octeon_unity_dma_to_phys
-};
char *octeon_swiotlb;
@@ -283,52 +248,4 @@ void __init plat_swiotlb_setup(void)
if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
panic("Cannot allocate SWIOTLB buffer");
-
- mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
}
-
-#ifdef CONFIG_PCI
-static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
- .dma_map_ops = {
- .alloc = octeon_dma_alloc_coherent,
- .free = swiotlb_free,
- .map_page = octeon_dma_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = octeon_dma_map_sg,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = octeon_dma_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = octeon_dma_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported
- },
-};
-
-const struct dma_map_ops *octeon_pci_dma_map_ops;
-
-void __init octeon_pci_dma_init(void)
-{
- switch (octeon_dma_bar_type) {
- case OCTEON_DMA_BAR_TYPE_PCIE2:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
- break;
- case OCTEON_DMA_BAR_TYPE_PCIE:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
- break;
- case OCTEON_DMA_BAR_TYPE_BIG:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
- break;
- case OCTEON_DMA_BAR_TYPE_SMALL:
- _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
- _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
- break;
- default:
- BUG();
- }
- octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
-}
-#endif /* CONFIG_PCI */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index d18ed5af62f4..b8898e2b8a6f 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -42,9 +42,6 @@
#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-dbg-defs.h>
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_asxx_enable(int block);
-
/**
* Probe RGMII ports and determine the number present
*
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
index 578283350776..a176358c5a21 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -39,10 +39,7 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-pcsx-defs.h>
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+#include <asm/octeon/cvmx-pcsxx-defs.h>
/**
* Perform initialization required only once for an SGMII port.
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
index ef16aa00167b..2a574d272671 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -25,10 +25,6 @@
* Contact Cavium Networks for more information
***********************license end**************************************/
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_spxx_int_msk_enable(int index);
-void __cvmx_interrupt_stxx_int_msk_enable(int index);
-
/*
* Functions for SPI initialization, configuration,
* and monitoring.
@@ -41,6 +37,8 @@ void __cvmx_interrupt_stxx_int_msk_enable(int index);
#include <asm/octeon/cvmx-pip-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
/*
* CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index 19d54e02c185..2bb6912a580d 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -39,12 +39,9 @@
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
#include <asm/octeon/cvmx-pcsxx-defs.h>
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
-
int __cvmx_helper_xaui_enumerate(int interface)
{
union cvmx_gmxx_hg2_control gmx_hg2_control;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index b3aec101a65d..8272d8c648ca 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -814,7 +814,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
if (cpumask_test_cpu(cpu, dest) && enable_one) {
- enable_one = 0;
+ enable_one = false;
__set_bit(cd->bit, pen);
} else {
__clear_bit(cd->bit, pen);
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 8505db478904..807cadaf554e 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
return 0;
pd = of_find_device_by_node(ehci_node);
+ of_node_put(ehci_node);
if (!pd)
return 0;
@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
return 0;
pd = of_find_device_by_node(ohci_node);
+ of_node_put(ohci_node);
if (!pd)
return 0;
@@ -1067,6 +1069,6 @@ end_led:
static int __init octeon_publish_devices(void)
{
- return of_platform_bus_probe(NULL, octeon_ids, NULL);
+ return of_platform_populate(NULL, octeon_ids, NULL, NULL);
}
arch_initcall(octeon_publish_devices);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index a8034d0dcade..c2426232db06 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -36,6 +36,7 @@
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
@@ -1108,7 +1109,7 @@ void __init plat_mem_setup(void)
* Emit one character to the boot UART. Exported for use by the
* watchdog timer.
*/
-int prom_putchar(char c)
+void prom_putchar(char c)
{
uint64_t lsrval;
@@ -1119,7 +1120,6 @@ int prom_putchar(char c)
/* Write the byte */
cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
- return 1;
}
EXPORT_SYMBOL(prom_putchar);
@@ -1154,11 +1154,7 @@ void __init prom_free_prom_memory(void)
}
void __init octeon_fill_mac_addresses(void);
-int octeon_prune_device_tree(void);
-extern const char __appended_dtb;
-extern const char __dtb_octeon_3xxx_begin;
-extern const char __dtb_octeon_68xx_begin;
void __init device_tree_init(void)
{
const void *fdt;
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index fad8e964f14c..ba800a892384 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -66,7 +66,6 @@ CONFIG_HW_RANDOM=y
CONFIG_GPIO_SYSFS=y
CONFIG_WATCHDOG=y
CONFIG_BCM47XX_WDT=y
-CONFIG_SSB_DEBUG=y
CONFIG_SSB_DRIVER_GIGE=y
CONFIG_BCMA_DRIVER_GMAC_CMN=y
CONFIG_USB=y
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index be23fd25eeaa..030ff9c205fb 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -92,6 +92,8 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_JZ4780=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_INGENIC=y
# CONFIG_HWMON is not set
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index 26b1cd5ffbf5..684c9dcba126 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -43,9 +43,6 @@ CONFIG_NETFILTER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_SCSI=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_HW_RANDOM=y
# CONFIG_HWMON is not set
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index df8a9a15ca83..81058295d35f 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -317,6 +317,7 @@ CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 14df9ef15d40..5c10cddc39d3 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -328,6 +328,7 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index 25092e344574..bb694f5065f1 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -330,6 +330,7 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 210bf609f785..5b5306b80576 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -133,6 +133,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index e5934aa98397..85543599448f 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -133,6 +133,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index cb2ca11c1789..067bb84ac916 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -134,6 +134,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index be29fcec69fc..dfc78c3172a3 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -137,6 +137,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 40462d4c90a0..50a2288c69f8 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -132,6 +132,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 4e50176cb3df..99a19cf5f9ba 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -326,6 +326,7 @@ CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/mips/fw/arc/arc_con.c b/arch/mips/fw/arc/arc_con.c
index 769d4b9ac82e..365e3913231e 100644
--- a/arch/mips/fw/arc/arc_con.c
+++ b/arch/mips/fw/arc/arc_con.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/fs.h>
+#include <asm/setup.h>
#include <asm/sgialib.h>
static void prom_console_write(struct console *co, const char *s,
diff --git a/arch/mips/fw/arc/promlib.c b/arch/mips/fw/arc/promlib.c
index 7e8ba5ce95be..be381307fbb0 100644
--- a/arch/mips/fw/arc/promlib.c
+++ b/arch/mips/fw/arc/promlib.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <asm/sgialib.h>
#include <asm/bcache.h>
+#include <asm/setup.h>
/*
* IP22 boardcache is not compatible with board caches. Thus we disable it
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
index 6aa264b9856a..8772617b64ce 100644
--- a/arch/mips/fw/sni/sniprom.c
+++ b/arch/mips/fw/sni/sniprom.c
@@ -19,6 +19,7 @@
#include <asm/mipsprom.h>
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
+#include <asm/setup.h>
/* special SNI prom calls */
/*
diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig
index ba9b2c8cce68..08e33c6b2539 100644
--- a/arch/mips/generic/Kconfig
+++ b/arch/mips/generic/Kconfig
@@ -35,13 +35,13 @@ config LEGACY_BOARD_OCELOT
depends on LEGACY_BOARD_SEAD3=n
select LEGACY_BOARDS
select MSCC_OCELOT
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
config MSCC_OCELOT
bool
select GPIOLIB
select MSCC_OCELOT_IRQ
- select SYS_HAS_EARLY_PRINTK
- select USE_GENERIC_EARLY_PRINTK_8250
comment "FIT/UHI Boards"
@@ -65,6 +65,14 @@ config FIT_IMAGE_FDT_XILFPGA
Enable this to include the FDT for the MIPSfpga platform
from Imagination Technologies in the FIT kernel image.
+config FIT_IMAGE_FDT_OCELOT_PCB123
+ bool "Include FDT for Microsemi Ocelot PCB123"
+ select MSCC_OCELOT
+ help
+ Enable this to include the FDT for the Ocelot PCB123 platform
+ from Microsemi in the FIT kernel image.
+ This requires u-boot on the platform.
+
config VIRT_BOARD_RANCHU
bool "Support Ranchu platform for Android emulator"
help
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
index 0dd0d5d460a5..879cb80396c8 100644
--- a/arch/mips/generic/Platform
+++ b/arch/mips/generic/Platform
@@ -16,4 +16,5 @@ all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
its-y := vmlinux.its.S
its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S
its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_OCELOT_PCB123) += board-ocelot_pcb123.its.S
its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S
diff --git a/arch/mips/generic/board-ocelot_pcb123.its.S b/arch/mips/generic/board-ocelot_pcb123.its.S
new file mode 100644
index 000000000000..5a7d5e1c878a
--- /dev/null
+++ b/arch/mips/generic/board-ocelot_pcb123.its.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/ {
+ images {
+ fdt@ocelot_pcb123 {
+ description = "MSCC Ocelot PCB123 Device Tree";
+ data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash@0 {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ conf@ocelot_pcb123 {
+ description = "Ocelot Linux kernel";
+ kernel = "kernel@0";
+ fdt = "fdt@ocelot_pcb123";
+ };
+ };
+};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 5ba6fcc26fa7..a106f8113842 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/irqchip.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/fw/fw.h>
@@ -204,22 +203,11 @@ void __init arch_init_irq(void)
"mti,cpu-interrupt-controller");
if (!cpu_has_veic && !intc_node)
mips_cpu_irq_init();
+ of_node_put(intc_node);
irqchip_init();
}
-static int __init publish_devices(void)
-{
- if (!of_have_populated_dt())
- panic("Device-tree not present");
-
- if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
- panic("Failed to populate DT");
-
- return 0;
-}
-arch_initcall(publish_devices);
-
void __init prom_free_prom_memory(void)
{
}
diff --git a/arch/mips/generic/yamon-dt.c b/arch/mips/generic/yamon-dt.c
index b408dac722ac..7ba4ad5cc1d6 100644
--- a/arch/mips/generic/yamon-dt.c
+++ b/arch/mips/generic/yamon-dt.c
@@ -28,8 +28,6 @@ __init int yamon_dt_append_cmdline(void *fdt)
/* find or add chosen node */
chosen_off = fdt_path_offset(fdt, "/chosen");
if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_path_offset(fdt, "/chosen@0");
- if (chosen_off == -FDT_ERR_NOTFOUND)
chosen_off = fdt_add_subnode(fdt, 0, "chosen");
if (chosen_off < 0) {
pr_err("Unable to find or add DT chosen node: %d\n",
@@ -221,8 +219,6 @@ __init int yamon_dt_serial_config(void *fdt)
/* find or add chosen node */
chosen_off = fdt_path_offset(fdt, "/chosen");
if (chosen_off == -FDT_ERR_NOTFOUND)
- chosen_off = fdt_path_offset(fdt, "/chosen@0");
- if (chosen_off == -FDT_ERR_NOTFOUND)
chosen_off = fdt_add_subnode(fdt, 0, "chosen");
if (chosen_off < 0) {
pr_err("Unable to find or add DT chosen node: %d\n",
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 45d541baf359..58351e48421e 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -8,6 +8,7 @@ generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 0ab176bdb8e8..0269b3de8b51 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -22,6 +22,17 @@
#include <asm/cmpxchg.h>
#include <asm/war.h>
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#if R10000_LLSC_WAR
+# define __scbeqz "beqzl"
+#else
+# define __scbeqz "beqz"
+#endif
+
#define ATOMIC_INIT(i) { (i) }
/*
@@ -44,31 +55,18 @@
#define ATOMIC_OP(op, c_op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
int temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %0, %1 # atomic_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " ll %0, %1 # atomic_" #op "\n" \
- " " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!temp)); \
} else { \
unsigned long flags; \
\
@@ -83,36 +81,20 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
int temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " ll %1, %2 # atomic_" #op "_return \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!result)); \
- \
- result = temp; result c_op i; \
} else { \
unsigned long flags; \
\
@@ -131,36 +113,20 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
int temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " ll %1, %2 # atomic_fetch_" #op " \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!result)); \
- \
- result = temp; \
} else { \
unsigned long flags; \
\
@@ -218,38 +184,17 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
smp_mb__before_llsc();
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %1, %2 # atomic_sub_if_positive\n"
- " subu %0, %1, %3 \n"
- " bltz %0, 1f \n"
- " sc %0, %2 \n"
- " .set noreorder \n"
- " beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set reorder \n"
- "1: \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp),
- "+" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
- : "memory");
- } else if (kernel_uses_llsc) {
+ if (kernel_uses_llsc) {
int temp;
__asm__ __volatile__(
" .set "MIPS_ISA_LEVEL" \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
+ " move %1, %0 \n"
" bltz %0, 1f \n"
- " sc %0, %2 \n"
- " .set noreorder \n"
- " beqz %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set reorder \n"
+ " sc %1, %2 \n"
+ "\t" __scbeqz " %1, 1b \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
@@ -274,97 +219,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-/*
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-/*
- * atomic_dec_and_test - decrement by 1 and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
/*
* atomic_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*/
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
-/*
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc(v) atomic_add(1, (v))
-
-/*
- * atomic_dec - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec(v) atomic_sub(1, (v))
-
-/*
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
-
#ifdef CONFIG_64BIT
#define ATOMIC64_INIT(i) { (i) }
@@ -386,31 +246,18 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_OP(op, c_op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
long temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %0, %1 # atomic64_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " lld %0, %1 # atomic64_" #op "\n" \
- " " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- " .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } while (unlikely(!temp)); \
} else { \
unsigned long flags; \
\
@@ -425,37 +272,20 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
long temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " lld %1, %2 # atomic64_" #op "_return\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "=" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
- : "memory"); \
- } while (unlikely(!result)); \
- \
- result = temp; result c_op i; \
} else { \
unsigned long flags; \
\
@@ -478,33 +308,16 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
long temp; \
\
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
- " beqzl %0, 1b \n" \
+ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
- do { \
- __asm__ __volatile__( \
- " .set "MIPS_ISA_LEVEL" \n" \
- " lld %1, %2 # atomic64_fetch_" #op "\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "=" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
- : "memory"); \
- } while (unlikely(!result)); \
- \
- result = temp; \
} else { \
unsigned long flags; \
\
@@ -563,38 +376,17 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
smp_mb__before_llsc();
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %1, %2 # atomic64_sub_if_positive\n"
- " dsubu %0, %1, %3 \n"
- " bltz %0, 1f \n"
- " scd %0, %2 \n"
- " .set noreorder \n"
- " beqzl %0, 1b \n"
- " dsubu %0, %1, %3 \n"
- " .set reorder \n"
- "1: \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp),
- "=" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
- : "memory");
- } else if (kernel_uses_llsc) {
+ if (kernel_uses_llsc) {
long temp;
__asm__ __volatile__(
" .set "MIPS_ISA_LEVEL" \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
+ " move %1, %0 \n"
" bltz %0, 1f \n"
- " scd %0, %2 \n"
- " .set noreorder \n"
- " beqz %0, 1b \n"
- " dsubu %0, %1, %3 \n"
- " .set reorder \n"
+ " scd %1, %2 \n"
+ "\t" __scbeqz " %1, 1b \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
@@ -620,99 +412,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
-/**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns true iff @v was not @u.
- */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-/*
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
-
-/*
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
-/*
- * atomic64_dec_and_test - decrement by 1 and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
-
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*/
#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
-/*
- * atomic64_inc - increment atomic variable
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic64_inc(v) atomic64_add(1, (v))
-
-/*
- * atomic64_dec - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic64_dec(v) atomic64_sub(1, (v))
-
-/*
- * atomic64_add_negative - add and test if negative
- * @v: pointer of type atomic64_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
-
#endif /* CONFIG_64BIT */
#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index b3e2975f83d3..bf6a8afd7ad2 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -123,22 +123,6 @@ static inline void bmips_write_zscm_reg(unsigned int offset, unsigned long data)
barrier();
}
-static inline void bmips_post_dma_flush(struct device *dev)
-{
- void __iomem *cbr = BMIPS_GET_CBR();
- u32 cfg;
-
- if (boot_cpu_type() != CPU_BMIPS3300 &&
- boot_cpu_type() != CPU_BMIPS4350 &&
- boot_cpu_type() != CPU_BMIPS4380)
- return;
-
- /* Flush stale data out of the readahead cache */
- cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
- __raw_readl(cbr + BMIPS_RAC_CONFIG);
-}
-
#endif /* !defined(__ASSEMBLY__) */
#endif /* _ASM_BMIPS_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 9cdb4e4ce258..0edba3e75747 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -14,39 +14,77 @@
#include <asm/isa-rev.h>
#include <cpu-feature-overrides.h>
+#define __ase(ase) (cpu_data[0].ases & (ase))
+#define __opt(opt) (cpu_data[0].options & (opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * Note that these should only be used in cases where a kernel built for an
+ * older ISA *cannot* run on a CPU which supports the feature in question. For
+ * example this may be used for features introduced with MIPSr6, since a kernel
+ * built for an older ISA cannot run on a MIPSr6 CPU. This should not be used
+ * for MIPSr2 features however, since a MIPSr1 or earlier kernel might run on a
+ * MIPSr2 CPU.
+ */
+#define __isa_ge_and_ase(isa, ase) ((MIPS_ISA_REV >= (isa)) && __ase(ase))
+#define __isa_ge_and_opt(isa, opt) ((MIPS_ISA_REV >= (isa)) && __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *or* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & then become required.
+ */
+#define __isa_ge_or_ase(isa, ase) ((MIPS_ISA_REV >= (isa)) || __ase(ase))
+#define __isa_ge_or_opt(isa, opt) ((MIPS_ISA_REV >= (isa)) || __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is < isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & are then removed - ie. no longer present in any CPU implementing
+ * the given ISA revision.
+ */
+#define __isa_lt_and_ase(isa, ase) ((MIPS_ISA_REV < (isa)) && __ase(ase))
+#define __isa_lt_and_opt(isa, opt) ((MIPS_ISA_REV < (isa)) && __opt(opt))
+
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.
*/
#ifndef cpu_has_tlb
-#define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB)
+#define cpu_has_tlb __opt(MIPS_CPU_TLB)
#endif
#ifndef cpu_has_ftlb
-#define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB)
+#define cpu_has_ftlb __opt(MIPS_CPU_FTLB)
#endif
#ifndef cpu_has_tlbinv
-#define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV)
+#define cpu_has_tlbinv __opt(MIPS_CPU_TLBINV)
#endif
#ifndef cpu_has_segments
-#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS)
+#define cpu_has_segments __opt(MIPS_CPU_SEGMENTS)
#endif
#ifndef cpu_has_eva
-#define cpu_has_eva (cpu_data[0].options & MIPS_CPU_EVA)
+#define cpu_has_eva __opt(MIPS_CPU_EVA)
#endif
#ifndef cpu_has_htw
-#define cpu_has_htw (cpu_data[0].options & MIPS_CPU_HTW)
+#define cpu_has_htw __opt(MIPS_CPU_HTW)
#endif
#ifndef cpu_has_ldpte
-#define cpu_has_ldpte (cpu_data[0].options & MIPS_CPU_LDPTE)
+#define cpu_has_ldpte __opt(MIPS_CPU_LDPTE)
#endif
#ifndef cpu_has_rixiex
-#define cpu_has_rixiex (cpu_data[0].options & MIPS_CPU_RIXIEX)
+#define cpu_has_rixiex __isa_ge_or_opt(6, MIPS_CPU_RIXIEX)
#endif
#ifndef cpu_has_maar
-#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR)
+#define cpu_has_maar __opt(MIPS_CPU_MAAR)
#endif
#ifndef cpu_has_rw_llb
-#define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB)
+#define cpu_has_rw_llb __isa_ge_or_opt(6, MIPS_CPU_RW_LLB)
#endif
/*
@@ -59,18 +97,18 @@
#define cpu_has_3kex (!cpu_has_4kex)
#endif
#ifndef cpu_has_4kex
-#define cpu_has_4kex (cpu_data[0].options & MIPS_CPU_4KEX)
+#define cpu_has_4kex __isa_ge_or_opt(1, MIPS_CPU_4KEX)
#endif
#ifndef cpu_has_3k_cache
-#define cpu_has_3k_cache (cpu_data[0].options & MIPS_CPU_3K_CACHE)
+#define cpu_has_3k_cache __isa_lt_and_opt(1, MIPS_CPU_3K_CACHE)
#endif
#define cpu_has_6k_cache 0
#define cpu_has_8k_cache 0
#ifndef cpu_has_4k_cache
-#define cpu_has_4k_cache (cpu_data[0].options & MIPS_CPU_4K_CACHE)
+#define cpu_has_4k_cache __isa_ge_or_opt(1, MIPS_CPU_4K_CACHE)
#endif
#ifndef cpu_has_tx39_cache
-#define cpu_has_tx39_cache (cpu_data[0].options & MIPS_CPU_TX39_CACHE)
+#define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
@@ -83,92 +121,92 @@
#define raw_cpu_has_fpu cpu_has_fpu
#endif
#ifndef cpu_has_32fpr
-#define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR)
+#define cpu_has_32fpr __isa_ge_or_opt(1, MIPS_CPU_32FPR)
#endif
#ifndef cpu_has_counter
-#define cpu_has_counter (cpu_data[0].options & MIPS_CPU_COUNTER)
+#define cpu_has_counter __opt(MIPS_CPU_COUNTER)
#endif
#ifndef cpu_has_watch
-#define cpu_has_watch (cpu_data[0].options & MIPS_CPU_WATCH)
+#define cpu_has_watch __opt(MIPS_CPU_WATCH)
#endif
#ifndef cpu_has_divec
-#define cpu_has_divec (cpu_data[0].options & MIPS_CPU_DIVEC)
+#define cpu_has_divec __isa_ge_or_opt(1, MIPS_CPU_DIVEC)
#endif
#ifndef cpu_has_vce
-#define cpu_has_vce (cpu_data[0].options & MIPS_CPU_VCE)
+#define cpu_has_vce __opt(MIPS_CPU_VCE)
#endif
#ifndef cpu_has_cache_cdex_p
-#define cpu_has_cache_cdex_p (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_P)
+#define cpu_has_cache_cdex_p __opt(MIPS_CPU_CACHE_CDEX_P)
#endif
#ifndef cpu_has_cache_cdex_s
-#define cpu_has_cache_cdex_s (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_S)
+#define cpu_has_cache_cdex_s __opt(MIPS_CPU_CACHE_CDEX_S)
#endif
#ifndef cpu_has_prefetch
-#define cpu_has_prefetch (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#define cpu_has_prefetch __isa_ge_or_opt(1, MIPS_CPU_PREFETCH)
#endif
#ifndef cpu_has_mcheck
-#define cpu_has_mcheck (cpu_data[0].options & MIPS_CPU_MCHECK)
+#define cpu_has_mcheck __isa_ge_or_opt(1, MIPS_CPU_MCHECK)
#endif
#ifndef cpu_has_ejtag
-#define cpu_has_ejtag (cpu_data[0].options & MIPS_CPU_EJTAG)
+#define cpu_has_ejtag __opt(MIPS_CPU_EJTAG)
#endif
#ifndef cpu_has_llsc
-#define cpu_has_llsc (cpu_data[0].options & MIPS_CPU_LLSC)
+#define cpu_has_llsc __isa_ge_or_opt(1, MIPS_CPU_LLSC)
#endif
#ifndef cpu_has_bp_ghist
-#define cpu_has_bp_ghist (cpu_data[0].options & MIPS_CPU_BP_GHIST)
+#define cpu_has_bp_ghist __opt(MIPS_CPU_BP_GHIST)
#endif
#ifndef kernel_uses_llsc
#define kernel_uses_llsc cpu_has_llsc
#endif
#ifndef cpu_has_guestctl0ext
-#define cpu_has_guestctl0ext (cpu_data[0].options & MIPS_CPU_GUESTCTL0EXT)
+#define cpu_has_guestctl0ext __opt(MIPS_CPU_GUESTCTL0EXT)
#endif
#ifndef cpu_has_guestctl1
-#define cpu_has_guestctl1 (cpu_data[0].options & MIPS_CPU_GUESTCTL1)
+#define cpu_has_guestctl1 __opt(MIPS_CPU_GUESTCTL1)
#endif
#ifndef cpu_has_guestctl2
-#define cpu_has_guestctl2 (cpu_data[0].options & MIPS_CPU_GUESTCTL2)
+#define cpu_has_guestctl2 __opt(MIPS_CPU_GUESTCTL2)
#endif
#ifndef cpu_has_guestid
-#define cpu_has_guestid (cpu_data[0].options & MIPS_CPU_GUESTID)
+#define cpu_has_guestid __opt(MIPS_CPU_GUESTID)
#endif
#ifndef cpu_has_drg
-#define cpu_has_drg (cpu_data[0].options & MIPS_CPU_DRG)
+#define cpu_has_drg __opt(MIPS_CPU_DRG)
#endif
#ifndef cpu_has_mips16
-#define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16)
+#define cpu_has_mips16 __isa_lt_and_ase(6, MIPS_ASE_MIPS16)
#endif
#ifndef cpu_has_mips16e2
-#define cpu_has_mips16e2 (cpu_data[0].ases & MIPS_ASE_MIPS16E2)
+#define cpu_has_mips16e2 __isa_lt_and_ase(6, MIPS_ASE_MIPS16E2)
#endif
#ifndef cpu_has_mdmx
-#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
+#define cpu_has_mdmx __isa_lt_and_ase(6, MIPS_ASE_MDMX)
#endif
#ifndef cpu_has_mips3d
-#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D)
+#define cpu_has_mips3d __isa_lt_and_ase(6, MIPS_ASE_MIPS3D)
#endif
#ifndef cpu_has_smartmips
-#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
+#define cpu_has_smartmips __isa_lt_and_ase(6, MIPS_ASE_SMARTMIPS)
#endif
#ifndef cpu_has_rixi
-#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
+#define cpu_has_rixi __isa_ge_or_opt(6, MIPS_CPU_RIXI)
#endif
#ifndef cpu_has_mmips
# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
-# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
+# define cpu_has_mmips __opt(MIPS_CPU_MICROMIPS)
# else
# define cpu_has_mmips 0
# endif
#endif
#ifndef cpu_has_lpa
-#define cpu_has_lpa (cpu_data[0].options & MIPS_CPU_LPA)
+#define cpu_has_lpa __opt(MIPS_CPU_LPA)
#endif
#ifndef cpu_has_mvh
-#define cpu_has_mvh (cpu_data[0].options & MIPS_CPU_MVH)
+#define cpu_has_mvh __opt(MIPS_CPU_MVH)
#endif
#ifndef cpu_has_xpa
#define cpu_has_xpa (cpu_has_lpa && cpu_has_mvh)
@@ -338,32 +376,32 @@
#endif
#ifndef cpu_has_dsp
-#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
+#define cpu_has_dsp __ase(MIPS_ASE_DSP)
#endif
#ifndef cpu_has_dsp2
-#define cpu_has_dsp2 (cpu_data[0].ases & MIPS_ASE_DSP2P)
+#define cpu_has_dsp2 __ase(MIPS_ASE_DSP2P)
#endif
#ifndef cpu_has_dsp3
-#define cpu_has_dsp3 (cpu_data[0].ases & MIPS_ASE_DSP3)
+#define cpu_has_dsp3 __ase(MIPS_ASE_DSP3)
#endif
#ifndef cpu_has_mipsmt
-#define cpu_has_mipsmt (cpu_data[0].ases & MIPS_ASE_MIPSMT)
+#define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
#endif
#ifndef cpu_has_vp
-#define cpu_has_vp (cpu_data[0].options & MIPS_CPU_VP)
+#define cpu_has_vp __isa_ge_and_opt(6, MIPS_CPU_VP)
#endif
#ifndef cpu_has_userlocal
-#define cpu_has_userlocal (cpu_data[0].options & MIPS_CPU_ULRI)
+#define cpu_has_userlocal __isa_ge_or_opt(6, MIPS_CPU_ULRI)
#endif
#ifdef CONFIG_32BIT
# ifndef cpu_has_nofpuex
-# define cpu_has_nofpuex (cpu_data[0].options & MIPS_CPU_NOFPUEX)
+# define cpu_has_nofpuex __isa_lt_and_opt(1, MIPS_CPU_NOFPUEX)
# endif
# ifndef cpu_has_64bits
# define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
@@ -405,19 +443,19 @@
#endif
#if defined(CONFIG_CPU_MIPSR2_IRQ_VI) && !defined(cpu_has_vint)
-# define cpu_has_vint (cpu_data[0].options & MIPS_CPU_VINT)
+# define cpu_has_vint __opt(MIPS_CPU_VINT)
#elif !defined(cpu_has_vint)
# define cpu_has_vint 0
#endif
#if defined(CONFIG_CPU_MIPSR2_IRQ_EI) && !defined(cpu_has_veic)
-# define cpu_has_veic (cpu_data[0].options & MIPS_CPU_VEIC)
+# define cpu_has_veic __opt(MIPS_CPU_VEIC)
#elif !defined(cpu_has_veic)
# define cpu_has_veic 0
#endif
#ifndef cpu_has_inclusive_pcaches
-#define cpu_has_inclusive_pcaches (cpu_data[0].options & MIPS_CPU_INCLUSIVE_CACHES)
+#define cpu_has_inclusive_pcaches __opt(MIPS_CPU_INCLUSIVE_CACHES)
#endif
#ifndef cpu_dcache_line_size
@@ -438,63 +476,63 @@
#endif
#ifndef cpu_has_perf_cntr_intr_bit
-#define cpu_has_perf_cntr_intr_bit (cpu_data[0].options & MIPS_CPU_PCI)
+#define cpu_has_perf_cntr_intr_bit __opt(MIPS_CPU_PCI)
#endif
#ifndef cpu_has_vz
-#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
+#define cpu_has_vz __ase(MIPS_ASE_VZ)
#endif
#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
-# define cpu_has_msa (cpu_data[0].ases & MIPS_ASE_MSA)
+# define cpu_has_msa __ase(MIPS_ASE_MSA)
#elif !defined(cpu_has_msa)
# define cpu_has_msa 0
#endif
#ifndef cpu_has_ufr
-# define cpu_has_ufr (cpu_data[0].options & MIPS_CPU_UFR)
+# define cpu_has_ufr __opt(MIPS_CPU_UFR)
#endif
#ifndef cpu_has_fre
-# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
+# define cpu_has_fre __opt(MIPS_CPU_FRE)
#endif
#ifndef cpu_has_cdmm
-# define cpu_has_cdmm (cpu_data[0].options & MIPS_CPU_CDMM)
+# define cpu_has_cdmm __opt(MIPS_CPU_CDMM)
#endif
#ifndef cpu_has_small_pages
-# define cpu_has_small_pages (cpu_data[0].options & MIPS_CPU_SP)
+# define cpu_has_small_pages __opt(MIPS_CPU_SP)
#endif
#ifndef cpu_has_nan_legacy
-#define cpu_has_nan_legacy (cpu_data[0].options & MIPS_CPU_NAN_LEGACY)
+#define cpu_has_nan_legacy __isa_lt_and_opt(6, MIPS_CPU_NAN_LEGACY)
#endif
#ifndef cpu_has_nan_2008
-#define cpu_has_nan_2008 (cpu_data[0].options & MIPS_CPU_NAN_2008)
+#define cpu_has_nan_2008 __isa_ge_or_opt(6, MIPS_CPU_NAN_2008)
#endif
#ifndef cpu_has_ebase_wg
-# define cpu_has_ebase_wg (cpu_data[0].options & MIPS_CPU_EBASE_WG)
+# define cpu_has_ebase_wg __opt(MIPS_CPU_EBASE_WG)
#endif
#ifndef cpu_has_badinstr
-# define cpu_has_badinstr (cpu_data[0].options & MIPS_CPU_BADINSTR)
+# define cpu_has_badinstr __isa_ge_or_opt(6, MIPS_CPU_BADINSTR)
#endif
#ifndef cpu_has_badinstrp
-# define cpu_has_badinstrp (cpu_data[0].options & MIPS_CPU_BADINSTRP)
+# define cpu_has_badinstrp __isa_ge_or_opt(6, MIPS_CPU_BADINSTRP)
#endif
#ifndef cpu_has_contextconfig
-# define cpu_has_contextconfig (cpu_data[0].options & MIPS_CPU_CTXTC)
+# define cpu_has_contextconfig __opt(MIPS_CPU_CTXTC)
#endif
#ifndef cpu_has_perf
-# define cpu_has_perf (cpu_data[0].options & MIPS_CPU_PERF)
+# define cpu_has_perf __opt(MIPS_CPU_PERF)
#endif
-#if defined(CONFIG_SMP) && (MIPS_ISA_REV >= 6)
+#ifdef CONFIG_SMP
/*
* Some systems share FTLB RAMs between threads within a core (siblings in
* kernel parlance). This means that FTLB entries may become invalid at almost
@@ -507,7 +545,7 @@
*/
# ifndef cpu_has_shared_ftlb_ram
# define cpu_has_shared_ftlb_ram \
- (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_RAM)
+ __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_RAM)
# endif
/*
@@ -524,9 +562,9 @@
*/
# ifndef cpu_has_shared_ftlb_entries
# define cpu_has_shared_ftlb_entries \
- (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_ENTRIES)
+ __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_ENTRIES)
# endif
-#endif /* SMP && MIPS_ISA_REV >= 6 */
+#endif /* SMP */
#ifndef cpu_has_shared_ftlb_ram
# define cpu_has_shared_ftlb_ram 0
@@ -537,7 +575,7 @@
#ifdef CONFIG_MIPS_MT_SMP
# define cpu_has_mipsmt_pertccounters \
- (cpu_data[0].options & MIPS_CPU_MT_PER_TC_PERF_COUNTERS)
+ __isa_lt_and_opt(6, MIPS_CPU_MT_PER_TC_PERF_COUNTERS)
#else
# define cpu_has_mipsmt_pertccounters 0
#endif /* CONFIG_MIPS_MT_SMP */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 5b9d02ef4f60..dacbdb84516a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -225,31 +225,32 @@
* Definitions for 7:0 on legacy processors
*/
-#define PRID_REV_TX4927 0x0022
-#define PRID_REV_TX4937 0x0030
-#define PRID_REV_R4400 0x0040
-#define PRID_REV_R3000A 0x0030
-#define PRID_REV_R3000 0x0020
-#define PRID_REV_R2000A 0x0010
-#define PRID_REV_TX3912 0x0010
-#define PRID_REV_TX3922 0x0030
-#define PRID_REV_TX3927 0x0040
-#define PRID_REV_VR4111 0x0050
-#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
-#define PRID_REV_VR4121 0x0060
-#define PRID_REV_VR4122 0x0070
-#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
-#define PRID_REV_VR4130 0x0080
-#define PRID_REV_34K_V1_0_2 0x0022
-#define PRID_REV_LOONGSON1B 0x0020
-#define PRID_REV_LOONGSON1C 0x0020 /* Same as Loongson-1B */
-#define PRID_REV_LOONGSON2E 0x0002
-#define PRID_REV_LOONGSON2F 0x0003
-#define PRID_REV_LOONGSON3A_R1 0x0005
-#define PRID_REV_LOONGSON3B_R1 0x0006
-#define PRID_REV_LOONGSON3B_R2 0x0007
-#define PRID_REV_LOONGSON3A_R2 0x0008
-#define PRID_REV_LOONGSON3A_R3 0x0009
+#define PRID_REV_TX4927 0x0022
+#define PRID_REV_TX4937 0x0030
+#define PRID_REV_R4400 0x0040
+#define PRID_REV_R3000A 0x0030
+#define PRID_REV_R3000 0x0020
+#define PRID_REV_R2000A 0x0010
+#define PRID_REV_TX3912 0x0010
+#define PRID_REV_TX3922 0x0030
+#define PRID_REV_TX3927 0x0040
+#define PRID_REV_VR4111 0x0050
+#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
+#define PRID_REV_VR4121 0x0060
+#define PRID_REV_VR4122 0x0070
+#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
+#define PRID_REV_VR4130 0x0080
+#define PRID_REV_34K_V1_0_2 0x0022
+#define PRID_REV_LOONGSON1B 0x0020
+#define PRID_REV_LOONGSON1C 0x0020 /* Same as Loongson-1B */
+#define PRID_REV_LOONGSON2E 0x0002
+#define PRID_REV_LOONGSON2F 0x0003
+#define PRID_REV_LOONGSON3A_R1 0x0005
+#define PRID_REV_LOONGSON3B_R1 0x0006
+#define PRID_REV_LOONGSON3B_R2 0x0007
+#define PRID_REV_LOONGSON3A_R2 0x0008
+#define PRID_REV_LOONGSON3A_R3_0 0x0009
+#define PRID_REV_LOONGSON3A_R3_1 0x000d
/*
* Older processors used to encode processor version and revision in two
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
index 72d0eab02afc..8eda48748ed5 100644
--- a/arch/mips/include/asm/dma-coherence.h
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -21,10 +21,10 @@ enum coherent_io_user_state {
extern enum coherent_io_user_state coherentio;
extern int hw_coherentio;
#else
-#ifdef CONFIG_DMA_COHERENT
-#define coherentio IO_COHERENCE_ENABLED
-#else
+#ifdef CONFIG_DMA_NONCOHERENT
#define coherentio IO_COHERENCE_DISABLED
+#else
+#define coherentio IO_COHERENCE_ENABLED
#endif
#define hw_coherentio 0
#endif /* CONFIG_DMA_MAYBE_COHERENT */
diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h
index f32f15530aba..b5c240806e1b 100644
--- a/arch/mips/include/asm/dma-direct.h
+++ b/arch/mips/include/asm/dma-direct.h
@@ -1 +1,16 @@
-#include <asm/dma-coherence.h>
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MIPS_DMA_DIRECT_H
+#define _MIPS_DMA_DIRECT_H 1
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+#endif /* _MIPS_DMA_DIRECT_H */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 886e75a383f2..e81c4e97ff1a 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -2,19 +2,21 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
-#include <linux/scatterlist.h>
-#include <asm/dma-coherence.h>
-#include <asm/cache.h>
+#include <linux/swiotlb.h>
-#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
-#include <dma-coherence.h>
-#endif
-
-extern const struct dma_map_ops *mips_dma_map_ops;
+extern const struct dma_map_ops jazz_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return mips_dma_map_ops;
+#if defined(CONFIG_MACH_JAZZ)
+ return &jazz_dma_ops;
+#elif defined(CONFIG_SWIOTLB)
+ return &swiotlb_dma_ops;
+#elif defined(CONFIG_DMA_NONCOHERENT_OPS)
+ return &dma_noncoherent_ops;
+#else
+ return &dma_direct_ops;
+#endif
}
#define arch_setup_dma_ops arch_setup_dma_ops
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index cea8ad864b3f..54c730aed327 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -12,6 +12,8 @@
#ifndef _ASM_IO_H
#define _ASM_IO_H
+#define ARCH_HAS_IOREMAP_WC
+
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -141,14 +143,14 @@ static inline void * phys_to_virt(unsigned long address)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
-static inline unsigned long isa_virt_to_bus(volatile void * address)
+static inline unsigned long isa_virt_to_bus(volatile void *address)
{
- return (unsigned long)address - PAGE_OFFSET;
+ return virt_to_phys(address);
}
-static inline void * isa_bus_to_virt(unsigned long address)
+static inline void *isa_bus_to_virt(unsigned long address)
{
- return (void *)(address + PAGE_OFFSET);
+ return phys_to_virt(address);
}
#define isa_page_to_bus page_to_phys
@@ -278,15 +280,25 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
#define ioremap_cache ioremap_cachable
/*
- * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
- * requests a cachable mapping, ioremap_uncached_accelerated requests a
- * mapping using the uncached accelerated mode which isn't supported on
- * all processors.
+ * ioremap_wc - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_wc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * but accelerated by means of write-combining feature. It is specifically
+ * useful for PCIe prefetchable windows, which may vastly improve a
+ * communications performance. If it was determined on boot stage, what
+ * CPU CCA doesn't support UCA, the method shall fall-back to the
+ * _CACHE_UNCACHED option (see cpu_probe() method).
*/
-#define ioremap_cacheable_cow(offset, size) \
- __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
-#define ioremap_uncached_accelerated(offset, size) \
- __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
+#define ioremap_wc(offset, size) \
+ __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
static inline void iounmap(const volatile void __iomem *addr)
{
@@ -590,7 +602,7 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
*
* This API used to be exported; it now is for arch code internal use only.
*/
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
@@ -609,7 +621,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#define dma_cache_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
/*
* Read a 32-bit register that requires a 64-bit read cycle on the bus.
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index ad1a99948f27..a72dfbf1babb 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -68,16 +68,6 @@ struct prev_kprobe {
unsigned long saved_epc;
};
-#define MAX_JPROBES_STACK_SIZE 128
-#define MAX_JPROBES_STACK_ADDR \
- (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 - sizeof(struct pt_regs))
-
-#define MIN_JPROBES_STACK_SIZE(ADDR) \
- ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \
- ? MAX_JPROBES_STACK_ADDR - (ADDR) \
- : MAX_JPROBES_STACK_SIZE)
-
-
#define SKIP_DELAYSLOT 0x0001
/* per-cpu kprobe control block */
@@ -86,12 +76,9 @@ struct kprobe_ctlblk {
unsigned long kprobe_old_SR;
unsigned long kprobe_saved_SR;
unsigned long kprobe_saved_epc;
- unsigned long jprobe_saved_sp;
- struct pt_regs jprobe_saved_regs;
/* Per-thread fields, used while emulating branches */
unsigned long flags;
unsigned long target_epc;
- u8 jprobes_stack[MAX_JPROBES_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h
index 660ab64c0fc9..a004d94dfbdd 100644
--- a/arch/mips/include/asm/mach-ar7/spaces.h
+++ b/arch/mips/include/asm/mach-ar7/spaces.h
@@ -17,9 +17,6 @@
#define PAGE_OFFSET _AC(0x94000000, UL)
#define PHYS_OFFSET _AC(0x14000000, UL)
-#define UNCAC_BASE _AC(0xb4000000, UL) /* 0xa0000000 + PHYS_OFFSET */
-#define IO_BASE UNCAC_BASE
-
#include <asm/mach-generic/spaces.h>
#endif /* __ASM_AR7_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ath25/dma-coherence.h b/arch/mips/include/asm/mach-ath25/dma-coherence.h
deleted file mode 100644
index d5defdde32db..000000000000
--- a/arch/mips/include/asm/mach-ath25/dma-coherence.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
- *
- */
-#ifndef __ASM_MACH_ATH25_DMA_COHERENCE_H
-#define __ASM_MACH_ATH25_DMA_COHERENCE_H
-
-#include <linux/device.h>
-
-/*
- * We need some arbitrary non-zero value to be programmed to the BAR1 register
- * of PCI host controller to enable DMA. The same value should be used as the
- * offset to calculate the physical address of DMA buffer for PCI devices.
- */
-#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
-
-static inline dma_addr_t ath25_dev_offset(struct device *dev)
-{
-#ifdef CONFIG_PCI
- extern struct bus_type pci_bus_type;
-
- if (dev && dev->bus == &pci_bus_type)
- return AR2315_PCI_HOST_SDRAM_BASEADDR;
-#endif
- return 0;
-}
-
-static inline dma_addr_t
-plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return virt_to_phys(addr) + ath25_dev_offset(dev);
-}
-
-static inline dma_addr_t
-plat_map_dma_mem_page(struct device *dev, struct page *page)
-{
- return page_to_phys(page) + ath25_dev_offset(dev);
-}
-
-static inline unsigned long
-plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr - ath25_dev_offset(dev);
-}
-
-static inline void
-plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_COHERENT
- return 1;
-#endif
-#ifdef CONFIG_DMA_NONCOHERENT
- return 0;
-#endif
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-#endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index d99ca862dae3..284b4fa23e03 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -20,6 +20,10 @@
#include <linux/bitops.h>
#define AR71XX_APB_BASE 0x18000000
+#define AR71XX_GE0_BASE 0x19000000
+#define AR71XX_GE0_SIZE 0x10000
+#define AR71XX_GE1_BASE 0x1a000000
+#define AR71XX_GE1_SIZE 0x10000
#define AR71XX_EHCI_BASE 0x1b000000
#define AR71XX_EHCI_SIZE 0x1000
#define AR71XX_OHCI_BASE 0x1c000000
@@ -39,6 +43,8 @@
#define AR71XX_PLL_SIZE 0x100
#define AR71XX_RESET_BASE (AR71XX_APB_BASE + 0x00060000)
#define AR71XX_RESET_SIZE 0x100
+#define AR71XX_MII_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR71XX_MII_SIZE 0x100
#define AR71XX_PCI_MEM_BASE 0x10000000
#define AR71XX_PCI_MEM_SIZE 0x07000000
@@ -81,18 +87,39 @@
#define AR933X_UART_BASE (AR71XX_APB_BASE + 0x00020000)
#define AR933X_UART_SIZE 0x14
+#define AR933X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR933X_GMAC_SIZE 0x04
#define AR933X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
#define AR933X_WMAC_SIZE 0x20000
#define AR933X_EHCI_BASE 0x1b000000
#define AR933X_EHCI_SIZE 0x1000
+#define AR934X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR934X_GMAC_SIZE 0x14
#define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
#define AR934X_WMAC_SIZE 0x20000
#define AR934X_EHCI_BASE 0x1b000000
#define AR934X_EHCI_SIZE 0x200
+#define AR934X_NFC_BASE 0x1b000200
+#define AR934X_NFC_SIZE 0xb8
#define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
#define AR934X_SRIF_SIZE 0x1000
+#define QCA953X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA953X_GMAC_SIZE 0x14
+#define QCA953X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA953X_WMAC_SIZE 0x20000
+#define QCA953X_EHCI_BASE 0x1b000000
+#define QCA953X_EHCI_SIZE 0x200
+#define QCA953X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
+#define QCA953X_SRIF_SIZE 0x1000
+
+#define QCA953X_PCI_CFG_BASE0 0x14000000
+#define QCA953X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000)
+#define QCA953X_PCI_CRP_BASE0 (AR71XX_APB_BASE + 0x000c0000)
+#define QCA953X_PCI_MEM_BASE0 0x10000000
+#define QCA953X_PCI_MEM_SIZE 0x02000000
+
#define QCA955X_PCI_MEM_BASE0 0x10000000
#define QCA955X_PCI_MEM_BASE1 0x12000000
#define QCA955X_PCI_MEM_SIZE 0x02000000
@@ -106,11 +133,72 @@
#define QCA955X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
#define QCA955X_PCI_CTRL_SIZE 0x100
+#define QCA955X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA955X_GMAC_SIZE 0x40
#define QCA955X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
#define QCA955X_WMAC_SIZE 0x20000
#define QCA955X_EHCI0_BASE 0x1b000000
#define QCA955X_EHCI1_BASE 0x1b400000
#define QCA955X_EHCI_SIZE 0x1000
+#define QCA955X_NFC_BASE 0x1b800200
+#define QCA955X_NFC_SIZE 0xb8
+
+#define QCA956X_PCI_MEM_BASE1 0x12000000
+#define QCA956X_PCI_MEM_SIZE 0x02000000
+#define QCA956X_PCI_CFG_BASE1 0x16000000
+#define QCA956X_PCI_CFG_SIZE 0x1000
+#define QCA956X_PCI_CRP_BASE1 (AR71XX_APB_BASE + 0x00250000)
+#define QCA956X_PCI_CRP_SIZE 0x1000
+#define QCA956X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
+#define QCA956X_PCI_CTRL_SIZE 0x100
+
+#define QCA956X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA956X_WMAC_SIZE 0x20000
+#define QCA956X_EHCI0_BASE 0x1b000000
+#define QCA956X_EHCI1_BASE 0x1b400000
+#define QCA956X_EHCI_SIZE 0x200
+#define QCA956X_GMAC_SGMII_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SGMII_SIZE 0x64
+#define QCA956X_PLL_BASE (AR71XX_APB_BASE + 0x00050000)
+#define QCA956X_PLL_SIZE 0x50
+#define QCA956X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SIZE 0x64
+
+/*
+ * Hidden Registers
+ */
+#define QCA956X_MAC_CFG_BASE 0xb9000000
+#define QCA956X_MAC_CFG_SIZE 0x64
+
+#define QCA956X_MAC_CFG1_REG 0x00
+#define QCA956X_MAC_CFG1_SOFT_RST BIT(31)
+#define QCA956X_MAC_CFG1_RX_RST BIT(19)
+#define QCA956X_MAC_CFG1_TX_RST BIT(18)
+#define QCA956X_MAC_CFG1_LOOPBACK BIT(8)
+#define QCA956X_MAC_CFG1_RX_EN BIT(2)
+#define QCA956X_MAC_CFG1_TX_EN BIT(0)
+
+#define QCA956X_MAC_CFG2_REG 0x04
+#define QCA956X_MAC_CFG2_IF_1000 BIT(9)
+#define QCA956X_MAC_CFG2_IF_10_100 BIT(8)
+#define QCA956X_MAC_CFG2_HUGE_FRAME_EN BIT(5)
+#define QCA956X_MAC_CFG2_LEN_CHECK BIT(4)
+#define QCA956X_MAC_CFG2_PAD_CRC_EN BIT(2)
+#define QCA956X_MAC_CFG2_FDX BIT(0)
+
+#define QCA956X_MAC_MII_MGMT_CFG_REG 0x20
+#define QCA956X_MGMT_CFG_CLK_DIV_20 0x07
+
+#define QCA956X_MAC_FIFO_CFG0_REG 0x48
+#define QCA956X_MAC_FIFO_CFG1_REG 0x4c
+#define QCA956X_MAC_FIFO_CFG2_REG 0x50
+#define QCA956X_MAC_FIFO_CFG3_REG 0x54
+#define QCA956X_MAC_FIFO_CFG4_REG 0x58
+#define QCA956X_MAC_FIFO_CFG5_REG 0x5c
+
+#define QCA956X_DAM_RESET_OFFSET 0xb90001bc
+#define QCA956X_DAM_RESET_SIZE 0x4
+#define QCA956X_INLINE_CHKSUM_ENG BIT(27)
/*
* DDR_CTRL block
@@ -149,6 +237,12 @@
#define AR934X_DDR_REG_FLUSH_PCIE 0xa8
#define AR934X_DDR_REG_FLUSH_WMAC 0xac
+#define QCA953X_DDR_REG_FLUSH_GE0 0x9c
+#define QCA953X_DDR_REG_FLUSH_GE1 0xa0
+#define QCA953X_DDR_REG_FLUSH_USB 0xa4
+#define QCA953X_DDR_REG_FLUSH_PCIE 0xa8
+#define QCA953X_DDR_REG_FLUSH_WMAC 0xac
+
/*
* PLL block
*/
@@ -166,9 +260,15 @@
#define AR71XX_AHB_DIV_SHIFT 20
#define AR71XX_AHB_DIV_MASK 0x7
+#define AR71XX_ETH0_PLL_SHIFT 17
+#define AR71XX_ETH1_PLL_SHIFT 19
+
#define AR724X_PLL_REG_CPU_CONFIG 0x00
#define AR724X_PLL_REG_PCIE_CONFIG 0x10
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS BIT(16)
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET BIT(25)
+
#define AR724X_PLL_FB_SHIFT 0
#define AR724X_PLL_FB_MASK 0x3ff
#define AR724X_PLL_REF_DIV_SHIFT 10
@@ -178,6 +278,8 @@
#define AR724X_DDR_DIV_SHIFT 22
#define AR724X_DDR_DIV_MASK 0x3
+#define AR7242_PLL_REG_ETH0_INT_CLOCK 0x2c
+
#define AR913X_PLL_REG_CPU_CONFIG 0x00
#define AR913X_PLL_REG_ETH_CONFIG 0x04
#define AR913X_PLL_REG_ETH0_INT_CLOCK 0x14
@@ -190,6 +292,9 @@
#define AR913X_AHB_DIV_SHIFT 19
#define AR913X_AHB_DIV_MASK 0x1
+#define AR913X_ETH0_PLL_SHIFT 20
+#define AR913X_ETH1_PLL_SHIFT 22
+
#define AR933X_PLL_CPU_CONFIG_REG 0x00
#define AR933X_PLL_CLOCK_CTRL_REG 0x08
@@ -211,6 +316,8 @@
#define AR934X_PLL_CPU_CONFIG_REG 0x00
#define AR934X_PLL_DDR_CONFIG_REG 0x04
#define AR934X_PLL_CPU_DDR_CLK_CTRL_REG 0x08
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_REG 0x24
+#define AR934X_PLL_ETH_XMII_CONTROL_REG 0x2c
#define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
#define AR934X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
@@ -243,9 +350,52 @@
#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL BIT(6)
+
+#define QCA953X_PLL_CPU_CONFIG_REG 0x00
+#define QCA953X_PLL_DDR_CONFIG_REG 0x04
+#define QCA953X_PLL_CLK_CTRL_REG 0x08
+#define QCA953X_PLL_SWITCH_CLOCK_CONTROL_REG 0x24
+#define QCA953X_PLL_ETH_XMII_CONTROL_REG 0x2c
+#define QCA953X_PLL_ETH_SGMII_CONTROL_REG 0x48
+
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define QCA953X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define QCA953X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define QCA953X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define QCA953X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
#define QCA955X_PLL_CPU_CONFIG_REG 0x00
#define QCA955X_PLL_DDR_CONFIG_REG 0x04
#define QCA955X_PLL_CLK_CTRL_REG 0x08
+#define QCA955X_PLL_ETH_XMII_CONTROL_REG 0x28
+#define QCA955X_PLL_ETH_SGMII_CONTROL_REG 0x48
+#define QCA955X_PLL_ETH_SGMII_SERDES_REG 0x4c
#define QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
#define QCA955X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
@@ -278,6 +428,81 @@
#define QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
#define QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+#define QCA955X_PLL_ETH_SGMII_SERDES_LOCK_DETECT BIT(2)
+#define QCA955X_PLL_ETH_SGMII_SERDES_PLL_REFCLK BIT(1)
+#define QCA955X_PLL_ETH_SGMII_SERDES_EN_PLL BIT(0)
+
+#define QCA956X_PLL_CPU_CONFIG_REG 0x00
+#define QCA956X_PLL_CPU_CONFIG1_REG 0x04
+#define QCA956X_PLL_DDR_CONFIG_REG 0x08
+#define QCA956X_PLL_DDR_CONFIG1_REG 0x0c
+#define QCA956X_PLL_CLK_CTRL_REG 0x10
+#define QCA956X_PLL_SWITCH_CLOCK_CONTROL_REG 0x28
+#define QCA956X_PLL_ETH_XMII_CONTROL_REG 0x30
+#define QCA956X_PLL_ETH_SGMII_SERDES_REG 0x4c
+
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT 0
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK 0x1f
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT 5
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK 0x1fff
+#define QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT 18
+#define QCA956X_PLL_CPU_CONFIG1_NINT_MASK 0x1ff
+
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT 0
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK 0x1f
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT 5
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK 0x1fff
+#define QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT 18
+#define QCA956X_PLL_DDR_CONFIG1_NINT_MASK 0x1ff
+
+#define QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL BIT(20)
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL BIT(21)
+#define QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_I2C_CLK_SELB BIT(5)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_1 BIT(6)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_UART1_CLK_SEL BIT(7)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_SHIFT 8
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_MASK 0xf
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EN_PLL_TOP BIT(12)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_2 BIT(13)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_1 BIT(14)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_2 BIT(15)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCH_FUNC_TST_MODE BIT(16)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EEE_ENABLE BIT(17)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_OEN_CLK125M_PLL BIT(18)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCHCLK_SEL BIT(19)
+
+#define QCA956X_PLL_ETH_XMII_TX_INVERT BIT(1)
+#define QCA956X_PLL_ETH_XMII_GIGE BIT(25)
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_SHIFT 28
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_MASK 0x3
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_SHIFT 26
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_MASK 3
+
+#define QCA956X_PLL_ETH_SGMII_SERDES_LOCK_DETECT BIT(2)
+#define QCA956X_PLL_ETH_SGMII_SERDES_PLL_REFCLK BIT(1)
+#define QCA956X_PLL_ETH_SGMII_SERDES_EN_PLL BIT(0)
+
/*
* USB_CONFIG block
*/
@@ -317,10 +542,19 @@
#define AR934X_RESET_REG_BOOTSTRAP 0xb0
#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+#define QCA953X_RESET_REG_RESET_MODULE 0x1c
+#define QCA953X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA953X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+
#define QCA955X_RESET_REG_RESET_MODULE 0x1c
#define QCA955X_RESET_REG_BOOTSTRAP 0xb0
#define QCA955X_RESET_REG_EXT_INT_STATUS 0xac
+#define QCA956X_RESET_REG_RESET_MODULE 0x1c
+#define QCA956X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA956X_RESET_REG_EXT_INT_STATUS 0xac
+
+#define MISC_INT_MIPS_SI_TIMERINT_MASK BIT(28)
#define MISC_INT_ETHSW BIT(12)
#define MISC_INT_TIMER4 BIT(10)
#define MISC_INT_TIMER3 BIT(9)
@@ -370,16 +604,123 @@
#define AR913X_RESET_USB_HOST BIT(5)
#define AR913X_RESET_USB_PHY BIT(4)
+#define AR933X_RESET_GE1_MDIO BIT(23)
+#define AR933X_RESET_GE0_MDIO BIT(22)
+#define AR933X_RESET_GE1_MAC BIT(13)
#define AR933X_RESET_WMAC BIT(11)
+#define AR933X_RESET_GE0_MAC BIT(9)
#define AR933X_RESET_USB_HOST BIT(5)
#define AR933X_RESET_USB_PHY BIT(4)
#define AR933X_RESET_USBSUS_OVERRIDE BIT(3)
+#define AR934X_RESET_HOST BIT(31)
+#define AR934X_RESET_SLIC BIT(30)
+#define AR934X_RESET_HDMA BIT(29)
+#define AR934X_RESET_EXTERNAL BIT(28)
+#define AR934X_RESET_RTC BIT(27)
+#define AR934X_RESET_PCIE_EP_INT BIT(26)
+#define AR934X_RESET_CHKSUM_ACC BIT(25)
+#define AR934X_RESET_FULL_CHIP BIT(24)
+#define AR934X_RESET_GE1_MDIO BIT(23)
+#define AR934X_RESET_GE0_MDIO BIT(22)
+#define AR934X_RESET_CPU_NMI BIT(21)
+#define AR934X_RESET_CPU_COLD BIT(20)
+#define AR934X_RESET_HOST_RESET_INT BIT(19)
+#define AR934X_RESET_PCIE_EP BIT(18)
+#define AR934X_RESET_UART1 BIT(17)
+#define AR934X_RESET_DDR BIT(16)
+#define AR934X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define AR934X_RESET_NANDF BIT(14)
+#define AR934X_RESET_GE1_MAC BIT(13)
+#define AR934X_RESET_ETH_SWITCH_ANALOG BIT(12)
#define AR934X_RESET_USB_PHY_ANALOG BIT(11)
+#define AR934X_RESET_HOST_DMA_INT BIT(10)
+#define AR934X_RESET_GE0_MAC BIT(9)
+#define AR934X_RESET_ETH_SWITCH BIT(8)
+#define AR934X_RESET_PCIE_PHY BIT(7)
+#define AR934X_RESET_PCIE BIT(6)
#define AR934X_RESET_USB_HOST BIT(5)
#define AR934X_RESET_USB_PHY BIT(4)
#define AR934X_RESET_USBSUS_OVERRIDE BIT(3)
-
+#define AR934X_RESET_LUT BIT(2)
+#define AR934X_RESET_MBOX BIT(1)
+#define AR934X_RESET_I2S BIT(0)
+
+#define QCA953X_RESET_USB_EXT_PWR BIT(29)
+#define QCA953X_RESET_EXTERNAL BIT(28)
+#define QCA953X_RESET_RTC BIT(27)
+#define QCA953X_RESET_FULL_CHIP BIT(24)
+#define QCA953X_RESET_GE1_MDIO BIT(23)
+#define QCA953X_RESET_GE0_MDIO BIT(22)
+#define QCA953X_RESET_CPU_NMI BIT(21)
+#define QCA953X_RESET_CPU_COLD BIT(20)
+#define QCA953X_RESET_DDR BIT(16)
+#define QCA953X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA953X_RESET_GE1_MAC BIT(13)
+#define QCA953X_RESET_ETH_SWITCH_ANALOG BIT(12)
+#define QCA953X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA953X_RESET_GE0_MAC BIT(9)
+#define QCA953X_RESET_ETH_SWITCH BIT(8)
+#define QCA953X_RESET_PCIE_PHY BIT(7)
+#define QCA953X_RESET_PCIE BIT(6)
+#define QCA953X_RESET_USB_HOST BIT(5)
+#define QCA953X_RESET_USB_PHY BIT(4)
+#define QCA953X_RESET_USBSUS_OVERRIDE BIT(3)
+
+#define QCA955X_RESET_HOST BIT(31)
+#define QCA955X_RESET_SLIC BIT(30)
+#define QCA955X_RESET_HDMA BIT(29)
+#define QCA955X_RESET_EXTERNAL BIT(28)
+#define QCA955X_RESET_RTC BIT(27)
+#define QCA955X_RESET_PCIE_EP_INT BIT(26)
+#define QCA955X_RESET_CHKSUM_ACC BIT(25)
+#define QCA955X_RESET_FULL_CHIP BIT(24)
+#define QCA955X_RESET_GE1_MDIO BIT(23)
+#define QCA955X_RESET_GE0_MDIO BIT(22)
+#define QCA955X_RESET_CPU_NMI BIT(21)
+#define QCA955X_RESET_CPU_COLD BIT(20)
+#define QCA955X_RESET_HOST_RESET_INT BIT(19)
+#define QCA955X_RESET_PCIE_EP BIT(18)
+#define QCA955X_RESET_UART1 BIT(17)
+#define QCA955X_RESET_DDR BIT(16)
+#define QCA955X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA955X_RESET_NANDF BIT(14)
+#define QCA955X_RESET_GE1_MAC BIT(13)
+#define QCA955X_RESET_SGMII_ANALOG BIT(12)
+#define QCA955X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA955X_RESET_HOST_DMA_INT BIT(10)
+#define QCA955X_RESET_GE0_MAC BIT(9)
+#define QCA955X_RESET_SGMII BIT(8)
+#define QCA955X_RESET_PCIE_PHY BIT(7)
+#define QCA955X_RESET_PCIE BIT(6)
+#define QCA955X_RESET_USB_HOST BIT(5)
+#define QCA955X_RESET_USB_PHY BIT(4)
+#define QCA955X_RESET_USBSUS_OVERRIDE BIT(3)
+#define QCA955X_RESET_LUT BIT(2)
+#define QCA955X_RESET_MBOX BIT(1)
+#define QCA955X_RESET_I2S BIT(0)
+
+#define QCA956X_RESET_EXTERNAL BIT(28)
+#define QCA956X_RESET_FULL_CHIP BIT(24)
+#define QCA956X_RESET_GE1_MDIO BIT(23)
+#define QCA956X_RESET_GE0_MDIO BIT(22)
+#define QCA956X_RESET_CPU_NMI BIT(21)
+#define QCA956X_RESET_CPU_COLD BIT(20)
+#define QCA956X_RESET_DMA BIT(19)
+#define QCA956X_RESET_DDR BIT(16)
+#define QCA956X_RESET_GE1_MAC BIT(13)
+#define QCA956X_RESET_SGMII_ANALOG BIT(12)
+#define QCA956X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA956X_RESET_GE0_MAC BIT(9)
+#define QCA956X_RESET_SGMII BIT(8)
+#define QCA956X_RESET_USB_HOST BIT(5)
+#define QCA956X_RESET_USB_PHY BIT(4)
+#define QCA956X_RESET_USBSUS_OVERRIDE BIT(3)
+#define QCA956X_RESET_SWITCH_ANALOG BIT(2)
+#define QCA956X_RESET_SWITCH BIT(0)
+
+#define AR933X_BOOTSTRAP_MDIO_GPIO_EN BIT(18)
+#define AR933X_BOOTSTRAP_EEPBUSY BIT(4)
#define AR933X_BOOTSTRAP_REF_CLK_40 BIT(0)
#define AR934X_BOOTSTRAP_SW_OPTION8 BIT(23)
@@ -398,8 +739,17 @@
#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
#define AR934X_BOOTSTRAP_DDR1 BIT(0)
+#define QCA953X_BOOTSTRAP_SW_OPTION2 BIT(12)
+#define QCA953X_BOOTSTRAP_SW_OPTION1 BIT(11)
+#define QCA953X_BOOTSTRAP_EJTAG_MODE BIT(5)
+#define QCA953X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define QCA953X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define QCA953X_BOOTSTRAP_DDR1 BIT(0)
+
#define QCA955X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define QCA956X_BOOTSTRAP_REF_CLK_40 BIT(2)
+
#define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
#define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1)
#define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
@@ -418,6 +768,24 @@
AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
AR934X_PCIE_WMAC_INT_PCIE_RC3)
+#define QCA953X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
+#define QCA953X_PCIE_WMAC_INT_WMAC_TX BIT(1)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXHP BIT(3)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC BIT(4)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC0 BIT(5)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC1 BIT(6)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC2 BIT(7)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC3 BIT(8)
+#define QCA953X_PCIE_WMAC_INT_WMAC_ALL \
+ (QCA953X_PCIE_WMAC_INT_WMAC_MISC | QCA953X_PCIE_WMAC_INT_WMAC_TX | \
+ QCA953X_PCIE_WMAC_INT_WMAC_RXLP | QCA953X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define QCA953X_PCIE_WMAC_INT_PCIE_ALL \
+ (QCA953X_PCIE_WMAC_INT_PCIE_RC | QCA953X_PCIE_WMAC_INT_PCIE_RC0 | \
+ QCA953X_PCIE_WMAC_INT_PCIE_RC1 | QCA953X_PCIE_WMAC_INT_PCIE_RC2 | \
+ QCA953X_PCIE_WMAC_INT_PCIE_RC3)
+
#define QCA955X_EXT_INT_WMAC_MISC BIT(0)
#define QCA955X_EXT_INT_WMAC_TX BIT(1)
#define QCA955X_EXT_INT_WMAC_RXLP BIT(2)
@@ -449,6 +817,37 @@
QCA955X_EXT_INT_PCIE_RC2_INT1 | QCA955X_EXT_INT_PCIE_RC2_INT2 | \
QCA955X_EXT_INT_PCIE_RC2_INT3)
+#define QCA956X_EXT_INT_WMAC_MISC BIT(0)
+#define QCA956X_EXT_INT_WMAC_TX BIT(1)
+#define QCA956X_EXT_INT_WMAC_RXLP BIT(2)
+#define QCA956X_EXT_INT_WMAC_RXHP BIT(3)
+#define QCA956X_EXT_INT_PCIE_RC1 BIT(4)
+#define QCA956X_EXT_INT_PCIE_RC1_INT0 BIT(5)
+#define QCA956X_EXT_INT_PCIE_RC1_INT1 BIT(6)
+#define QCA956X_EXT_INT_PCIE_RC1_INT2 BIT(7)
+#define QCA956X_EXT_INT_PCIE_RC1_INT3 BIT(8)
+#define QCA956X_EXT_INT_PCIE_RC2 BIT(12)
+#define QCA956X_EXT_INT_PCIE_RC2_INT0 BIT(13)
+#define QCA956X_EXT_INT_PCIE_RC2_INT1 BIT(14)
+#define QCA956X_EXT_INT_PCIE_RC2_INT2 BIT(15)
+#define QCA956X_EXT_INT_PCIE_RC2_INT3 BIT(16)
+#define QCA956X_EXT_INT_USB1 BIT(24)
+#define QCA956X_EXT_INT_USB2 BIT(28)
+
+#define QCA956X_EXT_INT_WMAC_ALL \
+ (QCA956X_EXT_INT_WMAC_MISC | QCA956X_EXT_INT_WMAC_TX | \
+ QCA956X_EXT_INT_WMAC_RXLP | QCA956X_EXT_INT_WMAC_RXHP)
+
+#define QCA956X_EXT_INT_PCIE_RC1_ALL \
+ (QCA956X_EXT_INT_PCIE_RC1 | QCA956X_EXT_INT_PCIE_RC1_INT0 | \
+ QCA956X_EXT_INT_PCIE_RC1_INT1 | QCA956X_EXT_INT_PCIE_RC1_INT2 | \
+ QCA956X_EXT_INT_PCIE_RC1_INT3)
+
+#define QCA956X_EXT_INT_PCIE_RC2_ALL \
+ (QCA956X_EXT_INT_PCIE_RC2 | QCA956X_EXT_INT_PCIE_RC2_INT0 | \
+ QCA956X_EXT_INT_PCIE_RC2_INT1 | QCA956X_EXT_INT_PCIE_RC2_INT2 | \
+ QCA956X_EXT_INT_PCIE_RC2_INT3)
+
#define REV_ID_MAJOR_MASK 0xfff0
#define REV_ID_MAJOR_AR71XX 0x00a0
#define REV_ID_MAJOR_AR913X 0x00b0
@@ -460,8 +859,12 @@
#define REV_ID_MAJOR_AR9341 0x0120
#define REV_ID_MAJOR_AR9342 0x1120
#define REV_ID_MAJOR_AR9344 0x2120
+#define REV_ID_MAJOR_QCA9533 0x0140
+#define REV_ID_MAJOR_QCA9533_V2 0x0160
#define REV_ID_MAJOR_QCA9556 0x0130
#define REV_ID_MAJOR_QCA9558 0x1130
+#define REV_ID_MAJOR_TP9343 0x0150
+#define REV_ID_MAJOR_QCA956X 0x1150
#define AR71XX_REV_ID_MINOR_MASK 0x3
#define AR71XX_REV_ID_MINOR_AR7130 0x0
@@ -482,8 +885,12 @@
#define AR934X_REV_ID_REVISION_MASK 0xf
+#define QCA953X_REV_ID_REVISION_MASK 0xf
+
#define QCA955X_REV_ID_REVISION_MASK 0xf
+#define QCA956X_REV_ID_REVISION_MASK 0xf
+
/*
* SPI block
*/
@@ -521,15 +928,63 @@
#define AR71XX_GPIO_REG_INT_ENABLE 0x24
#define AR71XX_GPIO_REG_FUNC 0x28
+#define AR934X_GPIO_REG_OUT_FUNC0 0x2c
+#define AR934X_GPIO_REG_OUT_FUNC1 0x30
+#define AR934X_GPIO_REG_OUT_FUNC2 0x34
+#define AR934X_GPIO_REG_OUT_FUNC3 0x38
+#define AR934X_GPIO_REG_OUT_FUNC4 0x3c
+#define AR934X_GPIO_REG_OUT_FUNC5 0x40
#define AR934X_GPIO_REG_FUNC 0x6c
+#define QCA953X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA953X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA953X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA953X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA953X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA953X_GPIO_REG_IN_ENABLE0 0x44
+#define QCA953X_GPIO_REG_FUNC 0x6c
+
+#define QCA953X_GPIO_OUT_MUX_SPI_CS1 10
+#define QCA953X_GPIO_OUT_MUX_SPI_CS2 11
+#define QCA953X_GPIO_OUT_MUX_SPI_CS0 9
+#define QCA953X_GPIO_OUT_MUX_SPI_CLK 8
+#define QCA953X_GPIO_OUT_MUX_SPI_MOSI 12
+#define QCA953X_GPIO_OUT_MUX_LED_LINK1 41
+#define QCA953X_GPIO_OUT_MUX_LED_LINK2 42
+#define QCA953X_GPIO_OUT_MUX_LED_LINK3 43
+#define QCA953X_GPIO_OUT_MUX_LED_LINK4 44
+#define QCA953X_GPIO_OUT_MUX_LED_LINK5 45
+
+#define QCA955X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA955X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA955X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA955X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA955X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA955X_GPIO_REG_OUT_FUNC5 0x40
+#define QCA955X_GPIO_REG_FUNC 0x6c
+
+#define QCA956X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA956X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA956X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA956X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA956X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA956X_GPIO_REG_OUT_FUNC5 0x40
+#define QCA956X_GPIO_REG_IN_ENABLE0 0x44
+#define QCA956X_GPIO_REG_IN_ENABLE3 0x50
+#define QCA956X_GPIO_REG_FUNC 0x6c
+
+#define QCA956X_GPIO_OUT_MUX_GE0_MDO 32
+#define QCA956X_GPIO_OUT_MUX_GE0_MDC 33
+
#define AR71XX_GPIO_COUNT 16
#define AR7240_GPIO_COUNT 18
#define AR7241_GPIO_COUNT 20
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
#define AR934X_GPIO_COUNT 23
+#define QCA953X_GPIO_COUNT 18
#define QCA955X_GPIO_COUNT 24
+#define QCA956X_GPIO_COUNT 23
/*
* SRIF block
@@ -552,4 +1007,318 @@
#define AR934X_SRIF_DPLL2_OUTDIV_SHIFT 13
#define AR934X_SRIF_DPLL2_OUTDIV_MASK 0x7
+#define QCA953X_SRIF_CPU_DPLL1_REG 0x1c0
+#define QCA953X_SRIF_CPU_DPLL2_REG 0x1c4
+#define QCA953X_SRIF_CPU_DPLL3_REG 0x1c8
+
+#define QCA953X_SRIF_DDR_DPLL1_REG 0x240
+#define QCA953X_SRIF_DDR_DPLL2_REG 0x244
+#define QCA953X_SRIF_DDR_DPLL3_REG 0x248
+
+#define QCA953X_SRIF_DPLL1_REFDIV_SHIFT 27
+#define QCA953X_SRIF_DPLL1_REFDIV_MASK 0x1f
+#define QCA953X_SRIF_DPLL1_NINT_SHIFT 18
+#define QCA953X_SRIF_DPLL1_NINT_MASK 0x1ff
+#define QCA953X_SRIF_DPLL1_NFRAC_MASK 0x0003ffff
+
+#define QCA953X_SRIF_DPLL2_LOCAL_PLL BIT(30)
+#define QCA953X_SRIF_DPLL2_OUTDIV_SHIFT 13
+#define QCA953X_SRIF_DPLL2_OUTDIV_MASK 0x7
+
+#define AR71XX_GPIO_FUNC_STEREO_EN BIT(17)
+#define AR71XX_GPIO_FUNC_SLIC_EN BIT(16)
+#define AR71XX_GPIO_FUNC_SPI_CS2_EN BIT(13)
+#define AR71XX_GPIO_FUNC_SPI_CS1_EN BIT(12)
+#define AR71XX_GPIO_FUNC_UART_EN BIT(8)
+#define AR71XX_GPIO_FUNC_USB_OC_EN BIT(4)
+#define AR71XX_GPIO_FUNC_USB_CLK_EN BIT(0)
+
+#define AR724X_GPIO_FUNC_GE0_MII_CLK_EN BIT(19)
+#define AR724X_GPIO_FUNC_SPI_EN BIT(18)
+#define AR724X_GPIO_FUNC_SPI_CS_EN2 BIT(14)
+#define AR724X_GPIO_FUNC_SPI_CS_EN1 BIT(13)
+#define AR724X_GPIO_FUNC_CLK_OBS5_EN BIT(12)
+#define AR724X_GPIO_FUNC_CLK_OBS4_EN BIT(11)
+#define AR724X_GPIO_FUNC_CLK_OBS3_EN BIT(10)
+#define AR724X_GPIO_FUNC_CLK_OBS2_EN BIT(9)
+#define AR724X_GPIO_FUNC_CLK_OBS1_EN BIT(8)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED4_EN BIT(7)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED3_EN BIT(6)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED2_EN BIT(5)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED1_EN BIT(4)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED0_EN BIT(3)
+#define AR724X_GPIO_FUNC_UART_RTS_CTS_EN BIT(2)
+#define AR724X_GPIO_FUNC_UART_EN BIT(1)
+#define AR724X_GPIO_FUNC_JTAG_DISABLE BIT(0)
+
+#define AR913X_GPIO_FUNC_WMAC_LED_EN BIT(22)
+#define AR913X_GPIO_FUNC_EXP_PORT_CS_EN BIT(21)
+#define AR913X_GPIO_FUNC_I2S_REFCLKEN BIT(20)
+#define AR913X_GPIO_FUNC_I2S_MCKEN BIT(19)
+#define AR913X_GPIO_FUNC_I2S1_EN BIT(18)
+#define AR913X_GPIO_FUNC_I2S0_EN BIT(17)
+#define AR913X_GPIO_FUNC_SLIC_EN BIT(16)
+#define AR913X_GPIO_FUNC_UART_RTSCTS_EN BIT(9)
+#define AR913X_GPIO_FUNC_UART_EN BIT(8)
+#define AR913X_GPIO_FUNC_USB_CLK_EN BIT(4)
+
+#define AR933X_GPIO_FUNC_SPDIF2TCK BIT(31)
+#define AR933X_GPIO_FUNC_SPDIF_EN BIT(30)
+#define AR933X_GPIO_FUNC_I2SO_22_18_EN BIT(29)
+#define AR933X_GPIO_FUNC_I2S_MCK_EN BIT(27)
+#define AR933X_GPIO_FUNC_I2SO_EN BIT(26)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_DUPL BIT(25)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_COLL BIT(24)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_ACT BIT(23)
+#define AR933X_GPIO_FUNC_SPI_EN BIT(18)
+#define AR933X_GPIO_FUNC_SPI_CS_EN2 BIT(14)
+#define AR933X_GPIO_FUNC_SPI_CS_EN1 BIT(13)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED4_EN BIT(7)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED3_EN BIT(6)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED2_EN BIT(5)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED1_EN BIT(4)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED0_EN BIT(3)
+#define AR933X_GPIO_FUNC_UART_RTS_CTS_EN BIT(2)
+#define AR933X_GPIO_FUNC_UART_EN BIT(1)
+#define AR933X_GPIO_FUNC_JTAG_DISABLE BIT(0)
+
+#define AR934X_GPIO_FUNC_CLK_OBS7_EN BIT(9)
+#define AR934X_GPIO_FUNC_CLK_OBS6_EN BIT(8)
+#define AR934X_GPIO_FUNC_CLK_OBS5_EN BIT(7)
+#define AR934X_GPIO_FUNC_CLK_OBS4_EN BIT(6)
+#define AR934X_GPIO_FUNC_CLK_OBS3_EN BIT(5)
+#define AR934X_GPIO_FUNC_CLK_OBS2_EN BIT(4)
+#define AR934X_GPIO_FUNC_CLK_OBS1_EN BIT(3)
+#define AR934X_GPIO_FUNC_CLK_OBS0_EN BIT(2)
+#define AR934X_GPIO_FUNC_JTAG_DISABLE BIT(1)
+
+#define AR934X_GPIO_OUT_GPIO 0
+#define AR934X_GPIO_OUT_SPI_CS1 7
+#define AR934X_GPIO_OUT_LED_LINK0 41
+#define AR934X_GPIO_OUT_LED_LINK1 42
+#define AR934X_GPIO_OUT_LED_LINK2 43
+#define AR934X_GPIO_OUT_LED_LINK3 44
+#define AR934X_GPIO_OUT_LED_LINK4 45
+#define AR934X_GPIO_OUT_EXT_LNA0 46
+#define AR934X_GPIO_OUT_EXT_LNA1 47
+
+#define QCA955X_GPIO_FUNC_CLK_OBS7_EN BIT(9)
+#define QCA955X_GPIO_FUNC_CLK_OBS6_EN BIT(8)
+#define QCA955X_GPIO_FUNC_CLK_OBS5_EN BIT(7)
+#define QCA955X_GPIO_FUNC_CLK_OBS4_EN BIT(6)
+#define QCA955X_GPIO_FUNC_CLK_OBS3_EN BIT(5)
+#define QCA955X_GPIO_FUNC_CLK_OBS2_EN BIT(4)
+#define QCA955X_GPIO_FUNC_CLK_OBS1_EN BIT(3)
+#define QCA955X_GPIO_FUNC_JTAG_DISABLE BIT(1)
+
+#define QCA955X_GPIO_OUT_GPIO 0
+#define QCA955X_MII_EXT_MDI 1
+#define QCA955X_SLIC_DATA_OUT 3
+#define QCA955X_SLIC_PCM_FS 4
+#define QCA955X_SLIC_PCM_CLK 5
+#define QCA955X_SPI_CLK 8
+#define QCA955X_SPI_CS_0 9
+#define QCA955X_SPI_CS_1 10
+#define QCA955X_SPI_CS_2 11
+#define QCA955X_SPI_MISO 12
+#define QCA955X_I2S_CLK 13
+#define QCA955X_I2S_WS 14
+#define QCA955X_I2S_SD 15
+#define QCA955X_I2S_MCK 16
+#define QCA955X_SPDIF_OUT 17
+#define QCA955X_UART1_TD 18
+#define QCA955X_UART1_RTS 19
+#define QCA955X_UART1_RD 20
+#define QCA955X_UART1_CTS 21
+#define QCA955X_UART0_SOUT 22
+#define QCA955X_SPDIF2_OUT 23
+#define QCA955X_LED_SGMII_SPEED0 24
+#define QCA955X_LED_SGMII_SPEED1 25
+#define QCA955X_LED_SGMII_DUPLEX 26
+#define QCA955X_LED_SGMII_LINK_UP 27
+#define QCA955X_SGMII_SPEED0_INVERT 28
+#define QCA955X_SGMII_SPEED1_INVERT 29
+#define QCA955X_SGMII_DUPLEX_INVERT 30
+#define QCA955X_SGMII_LINK_UP_INVERT 31
+#define QCA955X_GE1_MII_MDO 32
+#define QCA955X_GE1_MII_MDC 33
+#define QCA955X_SWCOM2 38
+#define QCA955X_SWCOM3 39
+#define QCA955X_MAC2_GPIO 40
+#define QCA955X_MAC3_GPIO 41
+#define QCA955X_ATT_LED 42
+#define QCA955X_PWR_LED 43
+#define QCA955X_TX_FRAME 44
+#define QCA955X_RX_CLEAR_EXTERNAL 45
+#define QCA955X_LED_NETWORK_EN 46
+#define QCA955X_LED_POWER_EN 47
+#define QCA955X_WMAC_GLUE_WOW 68
+#define QCA955X_RX_CLEAR_EXTENSION 70
+#define QCA955X_CP_NAND_CS1 73
+#define QCA955X_USB_SUSPEND 74
+#define QCA955X_ETH_TX_ERR 75
+#define QCA955X_DDR_DQ_OE 76
+#define QCA955X_CLKREQ_N_EP 77
+#define QCA955X_CLKREQ_N_RC 78
+#define QCA955X_CLK_OBS0 79
+#define QCA955X_CLK_OBS1 80
+#define QCA955X_CLK_OBS2 81
+#define QCA955X_CLK_OBS3 82
+#define QCA955X_CLK_OBS4 83
+#define QCA955X_CLK_OBS5 84
+
+/*
+ * MII_CTRL block
+ */
+#define AR71XX_MII_REG_MII0_CTRL 0x00
+#define AR71XX_MII_REG_MII1_CTRL 0x04
+
+#define AR71XX_MII_CTRL_IF_MASK 3
+#define AR71XX_MII_CTRL_SPEED_SHIFT 4
+#define AR71XX_MII_CTRL_SPEED_MASK 3
+#define AR71XX_MII_CTRL_SPEED_10 0
+#define AR71XX_MII_CTRL_SPEED_100 1
+#define AR71XX_MII_CTRL_SPEED_1000 2
+
+#define AR71XX_MII0_CTRL_IF_GMII 0
+#define AR71XX_MII0_CTRL_IF_MII 1
+#define AR71XX_MII0_CTRL_IF_RGMII 2
+#define AR71XX_MII0_CTRL_IF_RMII 3
+
+#define AR71XX_MII1_CTRL_IF_RGMII 0
+#define AR71XX_MII1_CTRL_IF_RMII 1
+
+/*
+ * AR933X GMAC interface
+ */
+#define AR933X_GMAC_REG_ETH_CFG 0x00
+
+#define AR933X_ETH_CFG_RGMII_GE0 BIT(0)
+#define AR933X_ETH_CFG_MII_GE0 BIT(1)
+#define AR933X_ETH_CFG_GMII_GE0 BIT(2)
+#define AR933X_ETH_CFG_MII_GE0_MASTER BIT(3)
+#define AR933X_ETH_CFG_MII_GE0_SLAVE BIT(4)
+#define AR933X_ETH_CFG_MII_GE0_ERR_EN BIT(5)
+#define AR933X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define AR933X_ETH_CFG_SW_PHY_ADDR_SWAP BIT(8)
+#define AR933X_ETH_CFG_RMII_GE0 BIT(9)
+#define AR933X_ETH_CFG_RMII_GE0_SPD_10 0
+#define AR933X_ETH_CFG_RMII_GE0_SPD_100 BIT(10)
+
+/*
+ * AR934X GMAC Interface
+ */
+#define AR934X_GMAC_REG_ETH_CFG 0x00
+
+#define AR934X_ETH_CFG_RGMII_GMAC0 BIT(0)
+#define AR934X_ETH_CFG_MII_GMAC0 BIT(1)
+#define AR934X_ETH_CFG_GMII_GMAC0 BIT(2)
+#define AR934X_ETH_CFG_MII_GMAC0_MASTER BIT(3)
+#define AR934X_ETH_CFG_MII_GMAC0_SLAVE BIT(4)
+#define AR934X_ETH_CFG_MII_GMAC0_ERR_EN BIT(5)
+#define AR934X_ETH_CFG_SW_ONLY_MODE BIT(6)
+#define AR934X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define AR934X_ETH_CFG_SW_APB_ACCESS BIT(9)
+#define AR934X_ETH_CFG_RMII_GMAC0 BIT(10)
+#define AR933X_ETH_CFG_MII_CNTL_SPEED BIT(11)
+#define AR934X_ETH_CFG_RMII_GMAC0_MASTER BIT(12)
+#define AR933X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+#define AR934X_ETH_CFG_RXD_DELAY BIT(14)
+#define AR934X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define AR934X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define AR934X_ETH_CFG_RDV_DELAY BIT(16)
+#define AR934X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define AR934X_ETH_CFG_RDV_DELAY_SHIFT 16
+
+/*
+ * QCA953X GMAC Interface
+ */
+#define QCA953X_GMAC_REG_ETH_CFG 0x00
+
+#define QCA953X_ETH_CFG_SW_ONLY_MODE BIT(6)
+#define QCA953X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define QCA953X_ETH_CFG_SW_APB_ACCESS BIT(9)
+#define QCA953X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+
+/*
+ * QCA955X GMAC Interface
+ */
+
+#define QCA955X_GMAC_REG_ETH_CFG 0x00
+#define QCA955X_GMAC_REG_SGMII_SERDES 0x18
+
+#define QCA955X_ETH_CFG_RGMII_EN BIT(0)
+#define QCA955X_ETH_CFG_MII_GE0 BIT(1)
+#define QCA955X_ETH_CFG_GMII_GE0 BIT(2)
+#define QCA955X_ETH_CFG_MII_GE0_MASTER BIT(3)
+#define QCA955X_ETH_CFG_MII_GE0_SLAVE BIT(4)
+#define QCA955X_ETH_CFG_GE0_ERR_EN BIT(5)
+#define QCA955X_ETH_CFG_GE0_SGMII BIT(6)
+#define QCA955X_ETH_CFG_RMII_GE0 BIT(10)
+#define QCA955X_ETH_CFG_MII_CNTL_SPEED BIT(11)
+#define QCA955X_ETH_CFG_RMII_GE0_MASTER BIT(12)
+#define QCA955X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define QCA955X_ETH_CFG_RDV_DELAY BIT(16)
+#define QCA955X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RDV_DELAY_SHIFT 16
+#define QCA955X_ETH_CFG_TXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXD_DELAY_SHIFT 18
+#define QCA955X_ETH_CFG_TXE_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXE_DELAY_SHIFT 20
+
+#define QCA955X_SGMII_SERDES_LOCK_DETECT_STATUS BIT(15)
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+/*
+ * QCA956X GMAC Interface
+ */
+
+#define QCA956X_GMAC_REG_ETH_CFG 0x00
+#define QCA956X_GMAC_REG_SGMII_RESET 0x14
+#define QCA956X_GMAC_REG_SGMII_SERDES 0x18
+#define QCA956X_GMAC_REG_MR_AN_CONTROL 0x1c
+#define QCA956X_GMAC_REG_SGMII_CONFIG 0x34
+#define QCA956X_GMAC_REG_SGMII_DEBUG 0x58
+
+#define QCA956X_ETH_CFG_RGMII_EN BIT(0)
+#define QCA956X_ETH_CFG_GE0_SGMII BIT(6)
+#define QCA956X_ETH_CFG_SW_ONLY_MODE BIT(7)
+#define QCA956X_ETH_CFG_SW_PHY_SWAP BIT(8)
+#define QCA956X_ETH_CFG_SW_PHY_ADDR_SWAP BIT(9)
+#define QCA956X_ETH_CFG_SW_APB_ACCESS BIT(10)
+#define QCA956X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+#define QCA956X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define QCA956X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define QCA956X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define QCA956X_ETH_CFG_RDV_DELAY_SHIFT 16
+
+#define QCA956X_SGMII_RESET_RX_CLK_N_RESET 0x0
+#define QCA956X_SGMII_RESET_RX_CLK_N BIT(0)
+#define QCA956X_SGMII_RESET_TX_CLK_N BIT(1)
+#define QCA956X_SGMII_RESET_RX_125M_N BIT(2)
+#define QCA956X_SGMII_RESET_TX_125M_N BIT(3)
+#define QCA956X_SGMII_RESET_HW_RX_125M_N BIT(4)
+
+#define QCA956X_SGMII_SERDES_CDR_BW_MASK 0x3
+#define QCA956X_SGMII_SERDES_CDR_BW_SHIFT 1
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_MASK 0x7
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_SHIFT 4
+#define QCA956X_SGMII_SERDES_PLL_BW BIT(8)
+#define QCA956X_SGMII_SERDES_VCO_FAST BIT(9)
+#define QCA956X_SGMII_SERDES_VCO_SLOW BIT(10)
+#define QCA956X_SGMII_SERDES_LOCK_DETECT_STATUS BIT(15)
+#define QCA956X_SGMII_SERDES_EN_SIGNAL_DETECT BIT(16)
+#define QCA956X_SGMII_SERDES_FIBER_SDO BIT(17)
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+#define QCA956X_SGMII_SERDES_VCO_REG_SHIFT 27
+#define QCA956X_SGMII_SERDES_VCO_REG_MASK 0xf
+
+#define QCA956X_MR_AN_CONTROL_AN_ENABLE BIT(12)
+#define QCA956X_MR_AN_CONTROL_PHY_RESET BIT(15)
+
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_SHIFT 0
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_MASK 0x7
+
#endif /* __ASM_MACH_AR71XX_REGS_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 441faa92c3cd..73dcd63b8243 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -32,8 +32,11 @@ enum ath79_soc_type {
ATH79_SOC_AR9341,
ATH79_SOC_AR9342,
ATH79_SOC_AR9344,
+ ATH79_SOC_QCA9533,
ATH79_SOC_QCA9556,
ATH79_SOC_QCA9558,
+ ATH79_SOC_TP9343,
+ ATH79_SOC_QCA956X,
};
extern enum ath79_soc_type ath79_soc;
@@ -100,6 +103,16 @@ static inline int soc_is_ar934x(void)
return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
}
+static inline int soc_is_qca9533(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9533;
+}
+
+static inline int soc_is_qca953x(void)
+{
+ return soc_is_qca9533();
+}
+
static inline int soc_is_qca9556(void)
{
return ath79_soc == ATH79_SOC_QCA9556;
@@ -115,6 +128,26 @@ static inline int soc_is_qca955x(void)
return soc_is_qca9556() || soc_is_qca9558();
}
+static inline int soc_is_tp9343(void)
+{
+ return ath79_soc == ATH79_SOC_TP9343;
+}
+
+static inline int soc_is_qca9561(void)
+{
+ return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca9563(void)
+{
+ return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca956x(void)
+{
+ return soc_is_qca9561() || soc_is_qca9563();
+}
+
void ath79_ddr_wb_flush(unsigned int reg);
void ath79_ddr_set_pci_windows(void);
@@ -134,6 +167,7 @@ static inline u32 ath79_pll_rr(unsigned reg)
static inline void ath79_reset_wr(unsigned reg, u32 val)
{
__raw_writel(val, ath79_reset_base + reg);
+ (void) __raw_readl(ath79_reset_base + reg); /* flush */
}
static inline u32 ath79_reset_rr(unsigned reg)
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index 0089a740e5ae..026ad90c8ac0 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -36,6 +36,7 @@
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
#define cpu_has_mips32r1 1
#define cpu_has_mips32r2 1
@@ -43,6 +44,7 @@
#define cpu_has_mips64r2 0
#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
#define cpu_has_64bits 0
#define cpu_has_64bit_zero_reg 0
@@ -51,5 +53,9 @@
#define cpu_dcache_line_size() 32
#define cpu_icache_line_size() 32
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 1
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
#endif /* __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bmips/dma-coherence.h b/arch/mips/include/asm/mach-bmips/dma-coherence.h
deleted file mode 100644
index d29781f02285..000000000000
--- a/arch/mips/include/asm/mach-bmips/dma-coherence.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2009 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_MACH_BMIPS_DMA_COHERENCE_H
-#define __ASM_MACH_BMIPS_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-#include <asm/cpu-type.h>
-#include <asm/cpu.h>
-
-struct device;
-
-extern dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size);
-extern dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page);
-extern unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr);
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0;
-}
-
-#define plat_post_dma_flush bmips_post_dma_flush
-
-#endif /* __ASM_MACH_BMIPS_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
deleted file mode 100644
index 6eb1ee548b11..000000000000
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- *
- * Similar to mach-generic/dma-coherence.h except
- * plat_device_is_coherent hard coded to return 1.
- *
- */
-#ifndef __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
-#define __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
-
-#include <linux/bug.h>
-
-struct device;
-
-extern void octeon_pci_dma_init(void);
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- BUG();
- return 0;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- BUG();
- return 0;
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- BUG();
- return 0;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- BUG();
- return 0;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
-
-struct dma_map_ops;
-extern const struct dma_map_ops *octeon_pci_dma_map_ops;
-extern char *octeon_swiotlb;
-
-#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
deleted file mode 100644
index 8ad7a40ca786..000000000000
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_GENERIC_DMA_COHERENCE_H
-#define __ASM_MACH_GENERIC_DMA_COHERENCE_H
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- return virt_to_phys(addr);
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- return page_to_phys(page);
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return dma_addr;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_PERDEV_COHERENT
- return dev->archdata.dma_coherent;
-#else
- switch (coherentio) {
- default:
- case IO_COHERENCE_DEFAULT:
- return hw_coherentio;
- case IO_COHERENCE_ENABLED:
- return 1;
- case IO_COHERENCE_DISABLED:
- return 0;
- }
-#endif
-}
-
-#ifndef plat_post_dma_flush
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-#endif
-
-#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-generic/kmalloc.h b/arch/mips/include/asm/mach-generic/kmalloc.h
index 74207c7bd00d..649a98338886 100644
--- a/arch/mips/include/asm/mach-generic/kmalloc.h
+++ b/arch/mips/include/asm/mach-generic/kmalloc.h
@@ -2,8 +2,7 @@
#ifndef __ASM_MACH_GENERIC_KMALLOC_H
#define __ASM_MACH_GENERIC_KMALLOC_H
-
-#ifndef CONFIG_DMA_COHERENT
+#ifdef CONFIG_DMA_NONCOHERENT
/*
* Total overkill for most systems but need as a safe default.
* Set this one if any device in the system might do non-coherent DMA.
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 952b0fdfda0e..ee5ebe98f6cf 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -17,9 +17,13 @@
/*
* This gives the physical RAM offset.
*/
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET _AC(0, UL)
-#endif
+#ifndef __ASSEMBLY__
+# if defined(CONFIG_MIPS_AUTO_PFN_OFFSET)
+# define PHYS_OFFSET ((unsigned long)PFN_PHYS(ARCH_PFN_OFFSET))
+# elif !defined(PHYS_OFFSET)
+# define PHYS_OFFSET _AC(0, UL)
+# endif
+#endif /* __ASSEMBLY__ */
#ifdef CONFIG_32BIT
#ifdef CONFIG_KVM_GUEST
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
deleted file mode 100644
index 04d862020ac9..000000000000
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_IP27_DMA_COHERENCE_H
-#define __ASM_MACH_IP27_DMA_COHERENCE_H
-
-#include <asm/pci/bridge.h>
-
-#define pdev_to_baddr(pdev, addr) \
- (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
-#define dev_to_baddr(dev, addr) \
- pdev_to_baddr(to_pci_dev(dev), (addr))
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr));
-
- return pa;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page));
-
- return pa;
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return dma_addr & ~(0xffUL << 56);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 1; /* IP27 non-coherent mode is unsupported */
-}
-
-#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
deleted file mode 100644
index 7bdf212587a0..000000000000
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_MACH_IP32_DMA_COHERENCE_H
-#define __ASM_MACH_IP32_DMA_COHERENCE_H
-
-#include <asm/ip32/crime.h>
-
-struct device;
-
-/*
- * Few notes.
- * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
- * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
- * native-endian)
- * 3. All other devices see memory as one big chunk at 0x40000000
- * 4. Non-PCI devices will pass NULL as struct device*
- *
- * Thus we translate differently, depending on device.
- */
-
-#define RAM_OFFSET_MASK 0x3fffffffUL
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
- dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
-
- if (dev == NULL)
- pa += CRIME_HI_MEM_BASE;
-
- return pa;
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- dma_addr_t pa;
-
- pa = page_to_phys(page) & RAM_OFFSET_MASK;
-
- if (dev == NULL)
- pa += CRIME_HI_MEM_BASE;
-
- return pa;
-}
-
-/* This is almost certainly wrong but it's what dma-ip32.c used to use */
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- unsigned long addr = dma_addr & RAM_OFFSET_MASK;
-
- if (dma_addr >= 256*1024*1024)
- addr += CRIME_HI_MEM_BASE;
-
- return addr;
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0; /* IP32 is non-coherent */
-}
-
-#endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
deleted file mode 100644
index dc347c25c343..000000000000
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MACH_JAZZ_DMA_COHERENCE_H
-#define __ASM_MACH_JAZZ_DMA_COHERENCE_H
-
-#include <asm/jazzdma.h>
-
-struct device;
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
-{
- return vdma_alloc(virt_to_phys(addr), size);
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
- return vdma_alloc(page_to_phys(page), PAGE_SIZE);
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
- return vdma_log2phys(dma_addr);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
- vdma_free(dma_addr);
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
- return 0;
-}
-
-#endif /* __ASM_MACH_JAZZ_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/dma-coherence.h b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
deleted file mode 100644
index 64fc44dec0a8..000000000000
--- a/arch/mips/include/asm/mach-loongson64/dma-coherence.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006, 07 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
- * Author: Fuxin Zhang, zhangfx@lemote.com
- *
- */
-#ifndef __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
-#define __ASM_MACH_LOONGSON64_DMA_COHERENCE_H
-
-#ifdef CONFIG_SWIOTLB
-#include <linux/swiotlb.h>
-#endif
-
-struct device;
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-extern dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-extern phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
- size_t size)
-{
-#ifdef CONFIG_CPU_LOONGSON3
- return __phys_to_dma(dev, virt_to_phys(addr));
-#else
- return virt_to_phys(addr) | 0x80000000;
-#endif
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
- struct page *page)
-{
-#ifdef CONFIG_CPU_LOONGSON3
- return __phys_to_dma(dev, page_to_phys(page));
-#else
- return page_to_phys(page) | 0x80000000;
-#endif
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
- dma_addr_t dma_addr)
-{
-#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
- return __dma_to_phys(dev, dma_addr);
-#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
- return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
-#else
- return dma_addr & 0x7fffffff;
-#endif
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-#ifdef CONFIG_DMA_NONCOHERENT
- return 0;
-#else
- return 1;
-#endif /* CONFIG_DMA_NONCOHERENT */
-}
-
-static inline void plat_post_dma_flush(struct device *dev)
-{
-}
-
-#endif /* __ASM_MACH_LOONGSON64_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index 8393bc548987..312739117bb0 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -19,18 +19,18 @@
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
- mfc0 t0, $16, 3
+ mfc0 t0, CP0_CONFIG3
or t0, (0x1 << 7)
- mtc0 t0, $16, 3
+ mtc0 t0, CP0_CONFIG3
/* Set ELPA on LOONGSON3 pagegrain */
- mfc0 t0, $5, 1
+ mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
- mtc0 t0, $5, 1
+ mtc0 t0, CP0_PAGEGRAIN
#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
- mfc0 t0, $16, 6
+ mfc0 t0, CP0_CONFIG6
or t0, 0x100
- mtc0 t0, $16, 6
+ mtc0 t0, CP0_CONFIG6
#endif
_ehb
.set pop
@@ -45,18 +45,18 @@
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
- mfc0 t0, $16, 3
+ mfc0 t0, CP0_CONFIG3
or t0, (0x1 << 7)
- mtc0 t0, $16, 3
+ mtc0 t0, CP0_CONFIG3
/* Set ELPA on LOONGSON3 pagegrain */
- mfc0 t0, $5, 1
+ mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
- mtc0 t0, $5, 1
+ mtc0 t0, CP0_PAGEGRAIN
#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
- mfc0 t0, $16, 6
+ mfc0 t0, CP0_CONFIG6
or t0, 0x100
- mtc0 t0, $16, 6
+ mtc0 t0, CP0_CONFIG6
#endif
_ehb
.set pop
diff --git a/arch/mips/include/asm/mach-pic32/spaces.h b/arch/mips/include/asm/mach-pic32/spaces.h
index 046a0a9aa8b3..a1b9783b76ea 100644
--- a/arch/mips/include/asm/mach-pic32/spaces.h
+++ b/arch/mips/include/asm/mach-pic32/spaces.h
@@ -16,7 +16,6 @@
#ifdef CONFIG_PIC32MZDA
#define PHYS_OFFSET _AC(0x08000000, UL)
-#define UNCAC_BASE _AC(0xa8000000, UL)
#endif
#include <asm/mach-generic/spaces.h>
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index ae461d91cd1f..01df9ad62fb8 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -16,6 +16,7 @@
#include <linux/linkage.h>
#include <linux/types.h>
#include <asm/hazards.h>
+#include <asm/isa-rev.h>
#include <asm/war.h>
/*
@@ -51,6 +52,7 @@
#define CP0_GLOBALNUMBER $3, 1
#define CP0_CONTEXT $4
#define CP0_PAGEMASK $5
+#define CP0_PAGEGRAIN $5, 1
#define CP0_SEGCTL0 $5, 2
#define CP0_SEGCTL1 $5, 3
#define CP0_SEGCTL2 $5, 4
@@ -77,6 +79,7 @@
#define CP0_CONFIG $16
#define CP0_CONFIG3 $16, 3
#define CP0_CONFIG5 $16, 5
+#define CP0_CONFIG6 $16, 6
#define CP0_LLADDR $17
#define CP0_WATCHLO $18
#define CP0_WATCHHI $19
@@ -1481,32 +1484,38 @@ do { \
#define __write_64bit_c0_split(source, sel, val) \
do { \
- unsigned long long __tmp; \
+ unsigned long long __tmp = (val); \
unsigned long __flags; \
\
local_irq_save(__flags); \
- if (sel == 0) \
+ if (MIPS_ISA_REV >= 2) \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\t" MIPS_ISA_LEVEL "\n\t" \
+ "dins\t%L0, %M0, 32, 32\n\t" \
+ "dmtc0\t%L0, " #source ", " #sel "\n\t" \
+ ".set\tpop" \
+ : "+r" (__tmp)); \
+ else if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dsll\t%L0, %L1, 32\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
- "dsll\t%M0, %M1, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source "\n\t" \
".set\tmips0" \
- : "=&r,r" (__tmp) \
- : "r,0" (val)); \
+ : "+r" (__tmp)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dsll\t%L0, %L1, 32\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
- "dsll\t%M0, %M1, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source ", " #sel "\n\t" \
".set\tmips0" \
- : "=&r,r" (__tmp) \
- : "r,0" (val)); \
+ : "+r" (__tmp)); \
local_irq_restore(__flags); \
} while (0)
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index da2004cef2d5..b509371a6b0c 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -126,8 +126,6 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
for_each_possible_cpu(i)
cpu_context(i, mm) = 0;
- atomic_set(&mm->context.fp_mode_switching, 0);
-
mm->context.bd_emupage_allocmap = NULL;
spin_lock_init(&mm->context.bd_emupage_lock);
init_waitqueue_head(&mm->context.bd_emupage_queue);
diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h
index 5604db3d1836..d79c68fa78d9 100644
--- a/arch/mips/include/asm/netlogic/xlr/fmn.h
+++ b/arch/mips/include/asm/netlogic/xlr/fmn.h
@@ -301,8 +301,6 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code,
for (i = 0; i < 8; i++) {
nlm_msgsnd(dest);
status = nlm_read_c2_status0();
- if ((status & 0x2) == 1)
- pr_info("Send pending fail!\n");
if ((status & 0x4) == 0)
return 0;
}
diff --git a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
index a1e21a3854cf..1eef155979f3 100644
--- a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -55,6 +55,8 @@
#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_ASXX_TX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull)
+void __cvmx_interrupt_asxx_enable(int block);
+
union cvmx_asxx_gmii_rx_clk_set {
uint64_t u64;
struct cvmx_asxx_gmii_rx_clk_set_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
index 6e61792d9248..1d18be8cdddc 100644
--- a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
@@ -1,10014 +1,176 @@
-/***********************license start***************
- * Author: Cavium Networks
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Octeon CIU definitions
*
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ */
#ifndef __CVMX_CIU_DEFS_H__
#define __CVMX_CIU_DEFS_H__
-#define CVMX_CIU_BIST (CVMX_ADD_IO_SEG(0x0001070000000730ull))
-#define CVMX_CIU_BLOCK_INT (CVMX_ADD_IO_SEG(0x00010700000007C0ull))
-#define CVMX_CIU_DINT (CVMX_ADD_IO_SEG(0x0001070000000720ull))
-#define CVMX_CIU_EN2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x000107000000A600ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_IOX_INT_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CE00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_IOX_INT_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AE00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_EN2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x000107000000A000ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP2_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000C800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP2_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000A800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x000107000000A200ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CA00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP3_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AA00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x000107000000A400ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4_W1C(offset) (CVMX_ADD_IO_SEG(0x000107000000CC00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_EN2_PPX_IP4_W1S(offset) (CVMX_ADD_IO_SEG(0x000107000000AC00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_FUSE (CVMX_ADD_IO_SEG(0x0001070000000728ull))
-#define CVMX_CIU_GSTOP (CVMX_ADD_IO_SEG(0x0001070000000710ull))
-#define CVMX_CIU_INT33_SUM0 (CVMX_ADD_IO_SEG(0x0001070000000110ull))
-#define CVMX_CIU_INTX_EN0(offset) (CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1(offset) (CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16)
-#define CVMX_CIU_INTX_EN4_0(offset) (CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1(offset) (CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_EN4_1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16)
-#define CVMX_CIU_INTX_SUM0(offset) (CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8)
-#define CVMX_CIU_INTX_SUM4(offset) (CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_INT_DBG_SEL (CVMX_ADD_IO_SEG(0x00010700000007D0ull))
-#define CVMX_CIU_INT_SUM1 (CVMX_ADD_IO_SEG(0x0001070000000108ull))
-static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned long offset)
+#include <asm/bitfield.h>
+
+#define CVMX_CIU_ADDR(addr, coreid, coremask, offset) \
+ (CVMX_ADD_IO_SEG(0x0001070000000000ull + addr##ull) + \
+ (((coreid) & (coremask)) * offset))
+
+#define CVMX_CIU_EN2_PPX_IP4(c) CVMX_CIU_ADDR(0xA400, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1C(c) CVMX_CIU_ADDR(0xCC00, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1S(c) CVMX_CIU_ADDR(0xAC00, c, 0x0F, 8)
+#define CVMX_CIU_FUSE CVMX_CIU_ADDR(0x0728, 0, 0x00, 0)
+#define CVMX_CIU_INT_SUM1 CVMX_CIU_ADDR(0x0108, 0, 0x00, 0)
+#define CVMX_CIU_INTX_EN0(c) CVMX_CIU_ADDR(0x0200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1C(c) CVMX_CIU_ADDR(0x2200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1S(c) CVMX_CIU_ADDR(0x6200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1(c) CVMX_CIU_ADDR(0x0208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1C(c) CVMX_CIU_ADDR(0x2208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1S(c) CVMX_CIU_ADDR(0x6208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_SUM0(c) CVMX_CIU_ADDR(0x0000, c, 0x3F, 8)
+#define CVMX_CIU_NMI CVMX_CIU_ADDR(0x0718, 0, 0x00, 0)
+#define CVMX_CIU_PCI_INTA CVMX_CIU_ADDR(0x0750, 0, 0x00, 0)
+#define CVMX_CIU_PP_BIST_STAT CVMX_CIU_ADDR(0x07E0, 0, 0x00, 0)
+#define CVMX_CIU_PP_DBG CVMX_CIU_ADDR(0x0708, 0, 0x00, 0)
+#define CVMX_CIU_PP_RST CVMX_CIU_ADDR(0x0700, 0, 0x00, 0)
+#define CVMX_CIU_QLM0 CVMX_CIU_ADDR(0x0780, 0, 0x00, 0)
+#define CVMX_CIU_QLM1 CVMX_CIU_ADDR(0x0788, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGC CVMX_CIU_ADDR(0x0768, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGD CVMX_CIU_ADDR(0x0770, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_BIST CVMX_CIU_ADDR(0x0738, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST1 CVMX_CIU_ADDR(0x0758, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST CVMX_CIU_ADDR(0x0748, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_RST CVMX_CIU_ADDR(0x0740, 0, 0x00, 0)
+#define CVMX_CIU_SUM2_PPX_IP4(c) CVMX_CIU_ADDR(0x8C00, c, 0x0F, 8)
+#define CVMX_CIU_TIM_MULTI_CAST CVMX_CIU_ADDR(0xC200, 0, 0x00, 0)
+#define CVMX_CIU_TIMX(c) CVMX_CIU_ADDR(0x0480, c, 0x0F, 8)
+
+static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned int coreid)
{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100600ull) + (offset) * 8;
- }
- return CVMX_ADD_IO_SEG(0x0001070000000680ull) + (offset) * 8;
+ if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+ return CVMX_CIU_ADDR(0x100100600, coreid, 0x0F, 8);
+ else
+ return CVMX_CIU_ADDR(0x000000680, coreid, 0x0F, 8);
}
-static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned long offset)
+static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned int coreid)
{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100400ull) + (offset) * 8;
- }
- return CVMX_ADD_IO_SEG(0x0001070000000600ull) + (offset) * 8;
+ if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+ return CVMX_CIU_ADDR(0x100100400, coreid, 0x0F, 8);
+ else
+ return CVMX_CIU_ADDR(0x000000600, coreid, 0x0F, 8);
}
-#define CVMX_CIU_NMI (CVMX_ADD_IO_SEG(0x0001070000000718ull))
-#define CVMX_CIU_PCI_INTA (CVMX_ADD_IO_SEG(0x0001070000000750ull))
-#define CVMX_CIU_PP_BIST_STAT (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
-#define CVMX_CIU_PP_DBG (CVMX_ADD_IO_SEG(0x0001070000000708ull))
-static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset)
+static inline uint64_t CVMX_CIU_PP_POKEX(unsigned int coreid)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100200ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x100100200, coreid, 0x0F, 8);
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001010000030000ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x000030000, coreid, 0x0F, 8) -
+ 0x60000000000ull;
+ default:
+ return CVMX_CIU_ADDR(0x000000580, coreid, 0x0F, 8);
}
- return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8;
}
-#define CVMX_CIU_PP_RST (CVMX_ADD_IO_SEG(0x0001070000000700ull))
-#define CVMX_CIU_QLM0 (CVMX_ADD_IO_SEG(0x0001070000000780ull))
-#define CVMX_CIU_QLM1 (CVMX_ADD_IO_SEG(0x0001070000000788ull))
-#define CVMX_CIU_QLM2 (CVMX_ADD_IO_SEG(0x0001070000000790ull))
-#define CVMX_CIU_QLM3 (CVMX_ADD_IO_SEG(0x0001070000000798ull))
-#define CVMX_CIU_QLM4 (CVMX_ADD_IO_SEG(0x00010700000007A0ull))
-#define CVMX_CIU_QLM_DCOK (CVMX_ADD_IO_SEG(0x0001070000000760ull))
-#define CVMX_CIU_QLM_JTGC (CVMX_ADD_IO_SEG(0x0001070000000768ull))
-#define CVMX_CIU_QLM_JTGD (CVMX_ADD_IO_SEG(0x0001070000000770ull))
-#define CVMX_CIU_SOFT_BIST (CVMX_ADD_IO_SEG(0x0001070000000738ull))
-#define CVMX_CIU_SOFT_PRST (CVMX_ADD_IO_SEG(0x0001070000000748ull))
-#define CVMX_CIU_SOFT_PRST1 (CVMX_ADD_IO_SEG(0x0001070000000758ull))
-#define CVMX_CIU_SOFT_PRST2 (CVMX_ADD_IO_SEG(0x00010700000007D8ull))
-#define CVMX_CIU_SOFT_PRST3 (CVMX_ADD_IO_SEG(0x00010700000007E0ull))
-#define CVMX_CIU_SOFT_RST (CVMX_ADD_IO_SEG(0x0001070000000740ull))
-#define CVMX_CIU_SUM1_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008600ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_SUM1_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008000ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM1_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008200ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM1_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008400ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070000008E00ull) + ((offset) & 1) * 8)
-#define CVMX_CIU_SUM2_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070000008800ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070000008A00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_SUM2_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070000008C00ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 15) * 8)
-#define CVMX_CIU_TIM_MULTI_CAST (CVMX_ADD_IO_SEG(0x000107000000C200ull))
-static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset)
+static inline uint64_t CVMX_CIU_WDOGX(unsigned int coreid)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001070100100000ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x100100000, coreid, 0x0F, 8);
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001010000020000ull) + (offset) * 8;
+ return CVMX_CIU_ADDR(0x000020000, coreid, 0x0F, 8) -
+ 0x60000000000ull;
+ default:
+ return CVMX_CIU_ADDR(0x000000500, coreid, 0x0F, 8);
}
- return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8;
}
-union cvmx_ciu_bist {
- uint64_t u64;
- struct cvmx_ciu_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_7_63:57;
- uint64_t bist:7;
-#else
- uint64_t bist:7;
- uint64_t reserved_7_63:57;
-#endif
- } s;
- struct cvmx_ciu_bist_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t bist:4;
-#else
- uint64_t bist:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn30xx;
- struct cvmx_ciu_bist_cn30xx cn31xx;
- struct cvmx_ciu_bist_cn30xx cn38xx;
- struct cvmx_ciu_bist_cn30xx cn38xxp2;
- struct cvmx_ciu_bist_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t bist:2;
-#else
- uint64_t bist:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn50xx;
- struct cvmx_ciu_bist_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t bist:3;
-#else
- uint64_t bist:3;
- uint64_t reserved_3_63:61;
-#endif
- } cn52xx;
- struct cvmx_ciu_bist_cn52xx cn52xxp1;
- struct cvmx_ciu_bist_cn30xx cn56xx;
- struct cvmx_ciu_bist_cn30xx cn56xxp1;
- struct cvmx_ciu_bist_cn30xx cn58xx;
- struct cvmx_ciu_bist_cn30xx cn58xxp1;
- struct cvmx_ciu_bist_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t bist:6;
-#else
- uint64_t bist:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn61xx;
- struct cvmx_ciu_bist_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t bist:5;
-#else
- uint64_t bist:5;
- uint64_t reserved_5_63:59;
-#endif
- } cn63xx;
- struct cvmx_ciu_bist_cn63xx cn63xxp1;
- struct cvmx_ciu_bist_cn61xx cn66xx;
- struct cvmx_ciu_bist_s cn68xx;
- struct cvmx_ciu_bist_s cn68xxp1;
- struct cvmx_ciu_bist_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_block_int {
- uint64_t u64;
- struct cvmx_ciu_block_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_43_59:17;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t dfm:1;
- uint64_t reserved_34_39:6;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t reserved_31_31:1;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_24_24:1;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t gmx1:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t asxpcs1:1;
- uint64_t reserved_24_24:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_31:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t reserved_34_39:6;
- uint64_t dfm:1;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_59:17;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_ciu_block_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t reserved_31_40:10;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_24_24:1;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t gmx1:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t asxpcs1:1;
- uint64_t reserved_24_24:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_40:10;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn61xx;
- struct cvmx_ciu_block_int_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t dfm:1;
- uint64_t reserved_34_39:6;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t reserved_31_31:1;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_23_24:2;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t reserved_2_2:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t reserved_2_2:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_23_24:2;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_31:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t reserved_34_39:6;
- uint64_t dfm:1;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn63xx;
- struct cvmx_ciu_block_int_cn63xx cn63xxp1;
- struct cvmx_ciu_block_int_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_43_59:17;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t dfm:1;
- uint64_t reserved_33_39:7;
- uint64_t srio0:1;
- uint64_t reserved_31_31:1;
- uint64_t iob:1;
- uint64_t reserved_29_29:1;
- uint64_t agl:1;
- uint64_t reserved_27_27:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_24_24:1;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t gmx1:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t dfa:1;
- uint64_t zip:1;
- uint64_t reserved_8_8:1;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t asxpcs1:1;
- uint64_t reserved_24_24:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_27:1;
- uint64_t agl:1;
- uint64_t reserved_29_29:1;
- uint64_t iob:1;
- uint64_t reserved_31_31:1;
- uint64_t srio0:1;
- uint64_t reserved_33_39:7;
- uint64_t dfm:1;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_59:17;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_63:2;
-#endif
- } cn66xx;
- struct cvmx_ciu_block_int_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t ptp:1;
- uint64_t dpi:1;
- uint64_t reserved_31_40:10;
- uint64_t iob:1;
- uint64_t reserved_27_29:3;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t reserved_23_24:2;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_6_8:3;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t sli:1;
- uint64_t reserved_2_2:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
-#else
- uint64_t mio:1;
- uint64_t gmx0:1;
- uint64_t reserved_2_2:1;
- uint64_t sli:1;
- uint64_t key:1;
- uint64_t fpa:1;
- uint64_t reserved_6_8:3;
- uint64_t ipd:1;
- uint64_t pko:1;
- uint64_t tim:1;
- uint64_t pow:1;
- uint64_t usb:1;
- uint64_t rad:1;
- uint64_t reserved_15_15:1;
- uint64_t l2c:1;
- uint64_t lmc0:1;
- uint64_t reserved_18_19:2;
- uint64_t pip:1;
- uint64_t reserved_21_21:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_23_24:2;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_27_29:3;
- uint64_t iob:1;
- uint64_t reserved_31_40:10;
- uint64_t dpi:1;
- uint64_t ptp:1;
- uint64_t reserved_43_63:21;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_dint {
- uint64_t u64;
- struct cvmx_ciu_dint_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t dint:32;
-#else
- uint64_t dint:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_dint_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t dint:1;
-#else
- uint64_t dint:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_dint_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t dint:2;
-#else
- uint64_t dint:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_dint_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t dint:16;
-#else
- uint64_t dint:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_dint_cn38xx cn38xxp2;
- struct cvmx_ciu_dint_cn31xx cn50xx;
- struct cvmx_ciu_dint_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t dint:4;
-#else
- uint64_t dint:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_dint_cn52xx cn52xxp1;
- struct cvmx_ciu_dint_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t dint:12;
-#else
- uint64_t dint:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_dint_cn56xx cn56xxp1;
- struct cvmx_ciu_dint_cn38xx cn58xx;
- struct cvmx_ciu_dint_cn38xx cn58xxp1;
- struct cvmx_ciu_dint_cn52xx cn61xx;
- struct cvmx_ciu_dint_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t dint:6;
-#else
- uint64_t dint:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_dint_cn63xx cn63xxp1;
- struct cvmx_ciu_dint_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t dint:10;
-#else
- uint64_t dint:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_dint_s cn68xx;
- struct cvmx_ciu_dint_s cn68xxp1;
- struct cvmx_ciu_dint_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int {
- uint64_t u64;
- struct cvmx_ciu_en2_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_iox_int_cn61xx cn66xx;
- struct cvmx_ciu_en2_iox_int_s cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_iox_int_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_iox_int_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_iox_int_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_iox_int_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_iox_int_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_iox_int_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_iox_int_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_iox_int_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_iox_int_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip2_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip2_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip2_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip2_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip2_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip2_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip2_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip2_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip2_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip3_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip3_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip3_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip3_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip3_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip3_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip3_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip3_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip3_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip4_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip4_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4_w1c {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip4_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip4_w1c_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip4_w1c_s cnf71xx;
-};
-
-union cvmx_ciu_en2_ppx_ip4_w1s {
- uint64_t u64;
- struct cvmx_ciu_en2_ppx_ip4_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_en2_ppx_ip4_w1s_cn61xx cn66xx;
- struct cvmx_ciu_en2_ppx_ip4_w1s_s cnf71xx;
-};
-
-union cvmx_ciu_fuse {
- uint64_t u64;
- struct cvmx_ciu_fuse_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t fuse:32;
-#else
- uint64_t fuse:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_fuse_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t fuse:1;
-#else
- uint64_t fuse:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_fuse_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t fuse:2;
-#else
- uint64_t fuse:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_fuse_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t fuse:16;
-#else
- uint64_t fuse:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_fuse_cn38xx cn38xxp2;
- struct cvmx_ciu_fuse_cn31xx cn50xx;
- struct cvmx_ciu_fuse_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t fuse:4;
-#else
- uint64_t fuse:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_fuse_cn52xx cn52xxp1;
- struct cvmx_ciu_fuse_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t fuse:12;
-#else
- uint64_t fuse:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_fuse_cn56xx cn56xxp1;
- struct cvmx_ciu_fuse_cn38xx cn58xx;
- struct cvmx_ciu_fuse_cn38xx cn58xxp1;
- struct cvmx_ciu_fuse_cn52xx cn61xx;
- struct cvmx_ciu_fuse_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t fuse:6;
-#else
- uint64_t fuse:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_fuse_cn63xx cn63xxp1;
- struct cvmx_ciu_fuse_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t fuse:10;
-#else
- uint64_t fuse:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_fuse_s cn68xx;
- struct cvmx_ciu_fuse_s cn68xxp1;
- struct cvmx_ciu_fuse_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_gstop {
- uint64_t u64;
- struct cvmx_ciu_gstop_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t gstop:1;
-#else
- uint64_t gstop:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_gstop_s cn30xx;
- struct cvmx_ciu_gstop_s cn31xx;
- struct cvmx_ciu_gstop_s cn38xx;
- struct cvmx_ciu_gstop_s cn38xxp2;
- struct cvmx_ciu_gstop_s cn50xx;
- struct cvmx_ciu_gstop_s cn52xx;
- struct cvmx_ciu_gstop_s cn52xxp1;
- struct cvmx_ciu_gstop_s cn56xx;
- struct cvmx_ciu_gstop_s cn56xxp1;
- struct cvmx_ciu_gstop_s cn58xx;
- struct cvmx_ciu_gstop_s cn58xxp1;
- struct cvmx_ciu_gstop_s cn61xx;
- struct cvmx_ciu_gstop_s cn63xx;
- struct cvmx_ciu_gstop_s cn63xxp1;
- struct cvmx_ciu_gstop_s cn66xx;
- struct cvmx_ciu_gstop_s cn68xx;
- struct cvmx_ciu_gstop_s cn68xxp1;
- struct cvmx_ciu_gstop_s cnf71xx;
-};
-
-union cvmx_ciu_intx_en0 {
- uint64_t u64;
- struct cvmx_ciu_intx_en0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn30xx;
- struct cvmx_ciu_intx_en0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn31xx;
- struct cvmx_ciu_intx_en0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn38xx;
- struct cvmx_ciu_intx_en0_cn38xx cn38xxp2;
- struct cvmx_ciu_intx_en0_cn30xx cn50xx;
- struct cvmx_ciu_intx_en0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en0_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_en0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en0_cn38xx cn58xx;
- struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
- struct cvmx_ciu_intx_en0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en0_cn52xx cn63xx;
- struct cvmx_ciu_intx_en0_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en0_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en0_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en0_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en0_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en0_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en0_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xx;
- struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en0_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en0_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en0_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en0_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en0_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en0_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en0_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en0_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xx;
- struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en0_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en0_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1 {
- uint64_t u64;
- struct cvmx_ciu_intx_en1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t wdog:1;
-#else
- uint64_t wdog:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_intx_en1_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t wdog:2;
-#else
- uint64_t wdog:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_intx_en1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_intx_en1_cn38xx cn38xxp2;
- struct cvmx_ciu_intx_en1_cn31xx cn50xx;
- struct cvmx_ciu_intx_en1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xxp1;
- struct cvmx_ciu_intx_en1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en1_cn38xx cn58xx;
- struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
- struct cvmx_ciu_intx_en1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en1_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en1_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en1_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en1_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en1_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en1_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en1_w1c_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en1_w1c_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en1_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en1_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en1_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en1_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en1_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en1_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en1_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en1_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en1_w1s_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en1_w1s_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en1_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en1_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0 {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn50xx;
- struct cvmx_ciu_intx_en4_0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_en4_0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en4_0_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
- struct cvmx_ciu_intx_en4_0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_0_cn52xx cn63xx;
- struct cvmx_ciu_intx_en4_0_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en4_0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_0_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_0_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xx;
- struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en4_0_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_0_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_0_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_0_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_0_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xx;
- struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_en4_0_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_0_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t reserved_44_44:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t reserved_44_44:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1 {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_1_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t wdog:2;
-#else
- uint64_t wdog:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn50xx;
- struct cvmx_ciu_intx_en4_1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xxp1;
- struct cvmx_ciu_intx_en4_1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_en4_1_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
- struct cvmx_ciu_intx_en4_1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en4_1_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en4_1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1_w1c {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_1_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_1_w1c_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en4_1_w1c_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en4_1_w1c_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_1_w1c_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_en4_1_w1s {
- uint64_t u64;
- struct cvmx_ciu_intx_en4_1_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_intx_en4_1_w1s_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_intx_en4_1_w1s_cn63xx cn63xxp1;
- struct cvmx_ciu_intx_en4_1_w1s_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_en4_1_w1s_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_sum0 {
- uint64_t u64;
- struct cvmx_ciu_intx_sum0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_sum0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn30xx;
- struct cvmx_ciu_intx_sum0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn31xx;
- struct cvmx_ciu_intx_sum0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn38xx;
- struct cvmx_ciu_intx_sum0_cn38xx cn38xxp2;
- struct cvmx_ciu_intx_sum0_cn30xx cn50xx;
- struct cvmx_ciu_intx_sum0_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_sum0_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_sum0_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
- struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
- struct cvmx_ciu_intx_sum0_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_sum0_cn52xx cn63xx;
- struct cvmx_ciu_intx_sum0_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_sum0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_sum0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_intx_sum4 {
- uint64_t u64;
- struct cvmx_ciu_intx_sum4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_intx_sum4_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_59_63:5;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_47_47:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t reserved_47_47:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t reserved_59_63:5;
-#endif
- } cn50xx;
- struct cvmx_ciu_intx_sum4_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_intx_sum4_cn52xx cn52xxp1;
- struct cvmx_ciu_intx_sum4_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_intx_sum4_cn56xx cn56xxp1;
- struct cvmx_ciu_intx_sum4_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t timer:4;
- uint64_t key_zero:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t key_zero:1;
- uint64_t timer:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn58xx;
- struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
- struct cvmx_ciu_intx_sum4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_intx_sum4_cn52xx cn63xx;
- struct cvmx_ciu_intx_sum4_cn52xx cn63xxp1;
- struct cvmx_ciu_intx_sum4_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_intx_sum4_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_int33_sum0 {
- uint64_t u64;
- struct cvmx_ciu_int33_sum0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } s;
- struct cvmx_ciu_int33_sum0_s cn61xx;
- struct cvmx_ciu_int33_sum0_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t reserved_57_58:2;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t reserved_51_51:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_51_51:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_58:2;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_int33_sum0_cn63xx cn63xxp1;
- struct cvmx_ciu_int33_sum0_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t mii:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t reserved_57_57:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t gmx_drp:2;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:2;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t reserved_57_57:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t mii:1;
- uint64_t bootdma:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_int33_sum0_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t bootdma:1;
- uint64_t reserved_62_62:1;
- uint64_t ipdppthr:1;
- uint64_t powiq:1;
- uint64_t twsi2:1;
- uint64_t mpi:1;
- uint64_t pcm:1;
- uint64_t usb:1;
- uint64_t timer:4;
- uint64_t sum2:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t gmx_drp:1;
- uint64_t trace:1;
- uint64_t rml:1;
- uint64_t twsi:1;
- uint64_t wdog_sum:1;
- uint64_t pci_msi:4;
- uint64_t pci_int:4;
- uint64_t uart:2;
- uint64_t mbox:2;
- uint64_t gpio:16;
- uint64_t workq:16;
-#else
- uint64_t workq:16;
- uint64_t gpio:16;
- uint64_t mbox:2;
- uint64_t uart:2;
- uint64_t pci_int:4;
- uint64_t pci_msi:4;
- uint64_t wdog_sum:1;
- uint64_t twsi:1;
- uint64_t rml:1;
- uint64_t trace:1;
- uint64_t gmx_drp:1;
- uint64_t reserved_49_49:1;
- uint64_t ipd_drp:1;
- uint64_t sum2:1;
- uint64_t timer:4;
- uint64_t usb:1;
- uint64_t pcm:1;
- uint64_t mpi:1;
- uint64_t twsi2:1;
- uint64_t powiq:1;
- uint64_t ipdppthr:1;
- uint64_t reserved_62_62:1;
- uint64_t bootdma:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_int_dbg_sel {
- uint64_t u64;
- struct cvmx_ciu_int_dbg_sel_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t sel:3;
- uint64_t reserved_10_15:6;
- uint64_t irq:2;
- uint64_t reserved_5_7:3;
- uint64_t pp:5;
-#else
- uint64_t pp:5;
- uint64_t reserved_5_7:3;
- uint64_t irq:2;
- uint64_t reserved_10_15:6;
- uint64_t sel:3;
- uint64_t reserved_19_63:45;
-#endif
- } s;
- struct cvmx_ciu_int_dbg_sel_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t sel:3;
- uint64_t reserved_10_15:6;
- uint64_t irq:2;
- uint64_t reserved_4_7:4;
- uint64_t pp:4;
-#else
- uint64_t pp:4;
- uint64_t reserved_4_7:4;
- uint64_t irq:2;
- uint64_t reserved_10_15:6;
- uint64_t sel:3;
- uint64_t reserved_19_63:45;
-#endif
- } cn61xx;
- struct cvmx_ciu_int_dbg_sel_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t sel:3;
- uint64_t reserved_10_15:6;
- uint64_t irq:2;
- uint64_t reserved_3_7:5;
- uint64_t pp:3;
-#else
- uint64_t pp:3;
- uint64_t reserved_3_7:5;
- uint64_t irq:2;
- uint64_t reserved_10_15:6;
- uint64_t sel:3;
- uint64_t reserved_19_63:45;
-#endif
- } cn63xx;
- struct cvmx_ciu_int_dbg_sel_cn61xx cn66xx;
- struct cvmx_ciu_int_dbg_sel_s cn68xx;
- struct cvmx_ciu_int_dbg_sel_s cn68xxp1;
- struct cvmx_ciu_int_dbg_sel_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_int_sum1 {
- uint64_t u64;
- struct cvmx_ciu_int_sum1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_int_sum1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t wdog:1;
-#else
- uint64_t wdog:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_int_sum1_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t wdog:2;
-#else
- uint64_t wdog:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_int_sum1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t wdog:16;
-#else
- uint64_t wdog:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_int_sum1_cn38xx cn38xxp2;
- struct cvmx_ciu_int_sum1_cn31xx cn50xx;
- struct cvmx_ciu_int_sum1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_ciu_int_sum1_cn52xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t mii1:1;
- uint64_t usb1:1;
- uint64_t uart2:1;
- uint64_t reserved_4_15:12;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_15:12;
- uint64_t uart2:1;
- uint64_t usb1:1;
- uint64_t mii1:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xxp1;
- struct cvmx_ciu_int_sum1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t wdog:12;
-#else
- uint64_t wdog:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
- struct cvmx_ciu_int_sum1_cn38xx cn58xx;
- struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
- struct cvmx_ciu_int_sum1_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_int_sum1_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_57_62:6;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t srio1:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_37_45:9;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_6_17:12;
- uint64_t wdog:6;
-#else
- uint64_t wdog:6;
- uint64_t reserved_6_17:12;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_45:9;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t srio1:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_62:6;
- uint64_t rst:1;
-#endif
- } cn63xx;
- struct cvmx_ciu_int_sum1_cn63xx cn63xxp1;
- struct cvmx_ciu_int_sum1_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_int_sum1_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_37_46:10;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_46:10;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_mbox_clrx {
- uint64_t u64;
- struct cvmx_ciu_mbox_clrx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bits:32;
-#else
- uint64_t bits:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_mbox_clrx_s cn30xx;
- struct cvmx_ciu_mbox_clrx_s cn31xx;
- struct cvmx_ciu_mbox_clrx_s cn38xx;
- struct cvmx_ciu_mbox_clrx_s cn38xxp2;
- struct cvmx_ciu_mbox_clrx_s cn50xx;
- struct cvmx_ciu_mbox_clrx_s cn52xx;
- struct cvmx_ciu_mbox_clrx_s cn52xxp1;
- struct cvmx_ciu_mbox_clrx_s cn56xx;
- struct cvmx_ciu_mbox_clrx_s cn56xxp1;
- struct cvmx_ciu_mbox_clrx_s cn58xx;
- struct cvmx_ciu_mbox_clrx_s cn58xxp1;
- struct cvmx_ciu_mbox_clrx_s cn61xx;
- struct cvmx_ciu_mbox_clrx_s cn63xx;
- struct cvmx_ciu_mbox_clrx_s cn63xxp1;
- struct cvmx_ciu_mbox_clrx_s cn66xx;
- struct cvmx_ciu_mbox_clrx_s cn68xx;
- struct cvmx_ciu_mbox_clrx_s cn68xxp1;
- struct cvmx_ciu_mbox_clrx_s cnf71xx;
-};
-
-union cvmx_ciu_mbox_setx {
- uint64_t u64;
- struct cvmx_ciu_mbox_setx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bits:32;
-#else
- uint64_t bits:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_mbox_setx_s cn30xx;
- struct cvmx_ciu_mbox_setx_s cn31xx;
- struct cvmx_ciu_mbox_setx_s cn38xx;
- struct cvmx_ciu_mbox_setx_s cn38xxp2;
- struct cvmx_ciu_mbox_setx_s cn50xx;
- struct cvmx_ciu_mbox_setx_s cn52xx;
- struct cvmx_ciu_mbox_setx_s cn52xxp1;
- struct cvmx_ciu_mbox_setx_s cn56xx;
- struct cvmx_ciu_mbox_setx_s cn56xxp1;
- struct cvmx_ciu_mbox_setx_s cn58xx;
- struct cvmx_ciu_mbox_setx_s cn58xxp1;
- struct cvmx_ciu_mbox_setx_s cn61xx;
- struct cvmx_ciu_mbox_setx_s cn63xx;
- struct cvmx_ciu_mbox_setx_s cn63xxp1;
- struct cvmx_ciu_mbox_setx_s cn66xx;
- struct cvmx_ciu_mbox_setx_s cn68xx;
- struct cvmx_ciu_mbox_setx_s cn68xxp1;
- struct cvmx_ciu_mbox_setx_s cnf71xx;
-};
-
-union cvmx_ciu_nmi {
- uint64_t u64;
- struct cvmx_ciu_nmi_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t nmi:32;
-#else
- uint64_t nmi:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_nmi_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t nmi:1;
-#else
- uint64_t nmi:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_nmi_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t nmi:2;
-#else
- uint64_t nmi:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_nmi_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t nmi:16;
-#else
- uint64_t nmi:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_nmi_cn38xx cn38xxp2;
- struct cvmx_ciu_nmi_cn31xx cn50xx;
- struct cvmx_ciu_nmi_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t nmi:4;
-#else
- uint64_t nmi:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_nmi_cn52xx cn52xxp1;
- struct cvmx_ciu_nmi_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t nmi:12;
-#else
- uint64_t nmi:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_nmi_cn56xx cn56xxp1;
- struct cvmx_ciu_nmi_cn38xx cn58xx;
- struct cvmx_ciu_nmi_cn38xx cn58xxp1;
- struct cvmx_ciu_nmi_cn52xx cn61xx;
- struct cvmx_ciu_nmi_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t nmi:6;
-#else
- uint64_t nmi:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_nmi_cn63xx cn63xxp1;
- struct cvmx_ciu_nmi_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t nmi:10;
-#else
- uint64_t nmi:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_nmi_s cn68xx;
- struct cvmx_ciu_nmi_s cn68xxp1;
- struct cvmx_ciu_nmi_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_pci_inta {
- uint64_t u64;
- struct cvmx_ciu_pci_inta_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t intr:2;
-#else
- uint64_t intr:2;
- uint64_t reserved_2_63:62;
-#endif
- } s;
- struct cvmx_ciu_pci_inta_s cn30xx;
- struct cvmx_ciu_pci_inta_s cn31xx;
- struct cvmx_ciu_pci_inta_s cn38xx;
- struct cvmx_ciu_pci_inta_s cn38xxp2;
- struct cvmx_ciu_pci_inta_s cn50xx;
- struct cvmx_ciu_pci_inta_s cn52xx;
- struct cvmx_ciu_pci_inta_s cn52xxp1;
- struct cvmx_ciu_pci_inta_s cn56xx;
- struct cvmx_ciu_pci_inta_s cn56xxp1;
- struct cvmx_ciu_pci_inta_s cn58xx;
- struct cvmx_ciu_pci_inta_s cn58xxp1;
- struct cvmx_ciu_pci_inta_s cn61xx;
- struct cvmx_ciu_pci_inta_s cn63xx;
- struct cvmx_ciu_pci_inta_s cn63xxp1;
- struct cvmx_ciu_pci_inta_s cn66xx;
- struct cvmx_ciu_pci_inta_s cn68xx;
- struct cvmx_ciu_pci_inta_s cn68xxp1;
- struct cvmx_ciu_pci_inta_s cnf71xx;
-};
-
-union cvmx_ciu_pp_bist_stat {
- uint64_t u64;
- struct cvmx_ciu_pp_bist_stat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t pp_bist:32;
-#else
- uint64_t pp_bist:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_pp_bist_stat_s cn68xx;
- struct cvmx_ciu_pp_bist_stat_s cn68xxp1;
-};
-
-union cvmx_ciu_pp_dbg {
- uint64_t u64;
- struct cvmx_ciu_pp_dbg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t ppdbg:32;
-#else
- uint64_t ppdbg:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_pp_dbg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t ppdbg:1;
-#else
- uint64_t ppdbg:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_pp_dbg_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t ppdbg:2;
-#else
- uint64_t ppdbg:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_pp_dbg_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t ppdbg:16;
-#else
- uint64_t ppdbg:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_pp_dbg_cn38xx cn38xxp2;
- struct cvmx_ciu_pp_dbg_cn31xx cn50xx;
- struct cvmx_ciu_pp_dbg_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t ppdbg:4;
-#else
- uint64_t ppdbg:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_pp_dbg_cn52xx cn52xxp1;
- struct cvmx_ciu_pp_dbg_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t ppdbg:12;
-#else
- uint64_t ppdbg:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
- struct cvmx_ciu_pp_dbg_cn38xx cn58xx;
- struct cvmx_ciu_pp_dbg_cn38xx cn58xxp1;
- struct cvmx_ciu_pp_dbg_cn52xx cn61xx;
- struct cvmx_ciu_pp_dbg_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t ppdbg:6;
-#else
- uint64_t ppdbg:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_pp_dbg_cn63xx cn63xxp1;
- struct cvmx_ciu_pp_dbg_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t ppdbg:10;
-#else
- uint64_t ppdbg:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_pp_dbg_s cn68xx;
- struct cvmx_ciu_pp_dbg_s cn68xxp1;
- struct cvmx_ciu_pp_dbg_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_pp_pokex {
- uint64_t u64;
- struct cvmx_ciu_pp_pokex_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t poke:64;
-#else
- uint64_t poke:64;
-#endif
- } s;
- struct cvmx_ciu_pp_pokex_s cn30xx;
- struct cvmx_ciu_pp_pokex_s cn31xx;
- struct cvmx_ciu_pp_pokex_s cn38xx;
- struct cvmx_ciu_pp_pokex_s cn38xxp2;
- struct cvmx_ciu_pp_pokex_s cn50xx;
- struct cvmx_ciu_pp_pokex_s cn52xx;
- struct cvmx_ciu_pp_pokex_s cn52xxp1;
- struct cvmx_ciu_pp_pokex_s cn56xx;
- struct cvmx_ciu_pp_pokex_s cn56xxp1;
- struct cvmx_ciu_pp_pokex_s cn58xx;
- struct cvmx_ciu_pp_pokex_s cn58xxp1;
- struct cvmx_ciu_pp_pokex_s cn61xx;
- struct cvmx_ciu_pp_pokex_s cn63xx;
- struct cvmx_ciu_pp_pokex_s cn63xxp1;
- struct cvmx_ciu_pp_pokex_s cn66xx;
- struct cvmx_ciu_pp_pokex_s cn68xx;
- struct cvmx_ciu_pp_pokex_s cn68xxp1;
- struct cvmx_ciu_pp_pokex_s cnf71xx;
-};
-
-union cvmx_ciu_pp_rst {
- uint64_t u64;
- struct cvmx_ciu_pp_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t rst:31;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:31;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu_pp_rst_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn30xx;
- struct cvmx_ciu_pp_rst_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t rst:1;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:1;
- uint64_t reserved_2_63:62;
-#endif
- } cn31xx;
- struct cvmx_ciu_pp_rst_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t rst:15;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:15;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_ciu_pp_rst_cn38xx cn38xxp2;
- struct cvmx_ciu_pp_rst_cn31xx cn50xx;
- struct cvmx_ciu_pp_rst_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t rst:3;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:3;
- uint64_t reserved_4_63:60;
-#endif
- } cn52xx;
- struct cvmx_ciu_pp_rst_cn52xx cn52xxp1;
- struct cvmx_ciu_pp_rst_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t rst:11;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:11;
- uint64_t reserved_12_63:52;
-#endif
- } cn56xx;
- struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
- struct cvmx_ciu_pp_rst_cn38xx cn58xx;
- struct cvmx_ciu_pp_rst_cn38xx cn58xxp1;
- struct cvmx_ciu_pp_rst_cn52xx cn61xx;
- struct cvmx_ciu_pp_rst_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t rst:5;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:5;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_ciu_pp_rst_cn63xx cn63xxp1;
- struct cvmx_ciu_pp_rst_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t rst:9;
- uint64_t rst0:1;
-#else
- uint64_t rst0:1;
- uint64_t rst:9;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_ciu_pp_rst_s cn68xx;
- struct cvmx_ciu_pp_rst_s cn68xxp1;
- struct cvmx_ciu_pp_rst_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_qlm0 {
- uint64_t u64;
- struct cvmx_ciu_qlm0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm0_s cn61xx;
- struct cvmx_ciu_qlm0_s cn63xx;
- struct cvmx_ciu_qlm0_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_20_30:11;
- uint64_t txdeemph:4;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:4;
- uint64_t reserved_20_30:11;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn63xxp1;
- struct cvmx_ciu_qlm0_s cn66xx;
- struct cvmx_ciu_qlm0_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn68xx;
- struct cvmx_ciu_qlm0_cn68xx cn68xxp1;
- struct cvmx_ciu_qlm0_s cnf71xx;
-};
-
-union cvmx_ciu_qlm1 {
- uint64_t u64;
- struct cvmx_ciu_qlm1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm1_s cn61xx;
- struct cvmx_ciu_qlm1_s cn63xx;
- struct cvmx_ciu_qlm1_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_20_30:11;
- uint64_t txdeemph:4;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:4;
- uint64_t reserved_20_30:11;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn63xxp1;
- struct cvmx_ciu_qlm1_s cn66xx;
- struct cvmx_ciu_qlm1_s cn68xx;
- struct cvmx_ciu_qlm1_s cn68xxp1;
- struct cvmx_ciu_qlm1_s cnf71xx;
-};
-
-union cvmx_ciu_qlm2 {
- uint64_t u64;
- struct cvmx_ciu_qlm2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn61xx;
- struct cvmx_ciu_qlm2_cn61xx cn63xx;
- struct cvmx_ciu_qlm2_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t txbypass:1;
- uint64_t reserved_20_30:11;
- uint64_t txdeemph:4;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:4;
- uint64_t reserved_20_30:11;
- uint64_t txbypass:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn63xxp1;
- struct cvmx_ciu_qlm2_cn61xx cn66xx;
- struct cvmx_ciu_qlm2_s cn68xx;
- struct cvmx_ciu_qlm2_s cn68xxp1;
- struct cvmx_ciu_qlm2_cn61xx cnf71xx;
-};
-union cvmx_ciu_qlm3 {
+union cvmx_ciu_qlm {
uint64_t u64;
- struct cvmx_ciu_qlm3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
+ struct cvmx_ciu_qlm_s {
+ __BITFIELD_FIELD(uint64_t g2bypass:1,
+ __BITFIELD_FIELD(uint64_t reserved_53_62:10,
+ __BITFIELD_FIELD(uint64_t g2deemph:5,
+ __BITFIELD_FIELD(uint64_t reserved_45_47:3,
+ __BITFIELD_FIELD(uint64_t g2margin:5,
+ __BITFIELD_FIELD(uint64_t reserved_32_39:8,
+ __BITFIELD_FIELD(uint64_t txbypass:1,
+ __BITFIELD_FIELD(uint64_t reserved_21_30:10,
+ __BITFIELD_FIELD(uint64_t txdeemph:5,
+ __BITFIELD_FIELD(uint64_t reserved_13_15:3,
+ __BITFIELD_FIELD(uint64_t txmargin:5,
+ __BITFIELD_FIELD(uint64_t reserved_4_7:4,
+ __BITFIELD_FIELD(uint64_t lane_en:4,
+ ;)))))))))))))
} s;
- struct cvmx_ciu_qlm3_s cn68xx;
- struct cvmx_ciu_qlm3_s cn68xxp1;
-};
-
-union cvmx_ciu_qlm4 {
- uint64_t u64;
- struct cvmx_ciu_qlm4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t g2bypass:1;
- uint64_t reserved_53_62:10;
- uint64_t g2deemph:5;
- uint64_t reserved_45_47:3;
- uint64_t g2margin:5;
- uint64_t reserved_32_39:8;
- uint64_t txbypass:1;
- uint64_t reserved_21_30:10;
- uint64_t txdeemph:5;
- uint64_t reserved_13_15:3;
- uint64_t txmargin:5;
- uint64_t reserved_4_7:4;
- uint64_t lane_en:4;
-#else
- uint64_t lane_en:4;
- uint64_t reserved_4_7:4;
- uint64_t txmargin:5;
- uint64_t reserved_13_15:3;
- uint64_t txdeemph:5;
- uint64_t reserved_21_30:10;
- uint64_t txbypass:1;
- uint64_t reserved_32_39:8;
- uint64_t g2margin:5;
- uint64_t reserved_45_47:3;
- uint64_t g2deemph:5;
- uint64_t reserved_53_62:10;
- uint64_t g2bypass:1;
-#endif
- } s;
- struct cvmx_ciu_qlm4_s cn68xx;
- struct cvmx_ciu_qlm4_s cn68xxp1;
-};
-
-union cvmx_ciu_qlm_dcok {
- uint64_t u64;
- struct cvmx_ciu_qlm_dcok_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t qlm_dcok:4;
-#else
- uint64_t qlm_dcok:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu_qlm_dcok_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t qlm_dcok:2;
-#else
- uint64_t qlm_dcok:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn52xx;
- struct cvmx_ciu_qlm_dcok_cn52xx cn52xxp1;
- struct cvmx_ciu_qlm_dcok_s cn56xx;
- struct cvmx_ciu_qlm_dcok_s cn56xxp1;
};
union cvmx_ciu_qlm_jtgc {
uint64_t u64;
struct cvmx_ciu_qlm_jtgc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t bypass_ext:1;
- uint64_t reserved_11_15:5;
- uint64_t clk_div:3;
- uint64_t reserved_7_7:1;
- uint64_t mux_sel:3;
- uint64_t bypass:4;
-#else
- uint64_t bypass:4;
- uint64_t mux_sel:3;
- uint64_t reserved_7_7:1;
- uint64_t clk_div:3;
- uint64_t reserved_11_15:5;
- uint64_t bypass_ext:1;
- uint64_t reserved_17_63:47;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_17_63:47,
+ __BITFIELD_FIELD(uint64_t bypass_ext:1,
+ __BITFIELD_FIELD(uint64_t reserved_11_15:5,
+ __BITFIELD_FIELD(uint64_t clk_div:3,
+ __BITFIELD_FIELD(uint64_t reserved_7_7:1,
+ __BITFIELD_FIELD(uint64_t mux_sel:3,
+ __BITFIELD_FIELD(uint64_t bypass:4,
+ ;)))))))
} s;
- struct cvmx_ciu_qlm_jtgc_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t clk_div:3;
- uint64_t reserved_5_7:3;
- uint64_t mux_sel:1;
- uint64_t reserved_2_3:2;
- uint64_t bypass:2;
-#else
- uint64_t bypass:2;
- uint64_t reserved_2_3:2;
- uint64_t mux_sel:1;
- uint64_t reserved_5_7:3;
- uint64_t clk_div:3;
- uint64_t reserved_11_63:53;
-#endif
- } cn52xx;
- struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
- struct cvmx_ciu_qlm_jtgc_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t clk_div:3;
- uint64_t reserved_6_7:2;
- uint64_t mux_sel:2;
- uint64_t bypass:4;
-#else
- uint64_t bypass:4;
- uint64_t mux_sel:2;
- uint64_t reserved_6_7:2;
- uint64_t clk_div:3;
- uint64_t reserved_11_63:53;
-#endif
- } cn56xx;
- struct cvmx_ciu_qlm_jtgc_cn56xx cn56xxp1;
- struct cvmx_ciu_qlm_jtgc_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t clk_div:3;
- uint64_t reserved_6_7:2;
- uint64_t mux_sel:2;
- uint64_t reserved_3_3:1;
- uint64_t bypass:3;
-#else
- uint64_t bypass:3;
- uint64_t reserved_3_3:1;
- uint64_t mux_sel:2;
- uint64_t reserved_6_7:2;
- uint64_t clk_div:3;
- uint64_t reserved_11_63:53;
-#endif
- } cn61xx;
- struct cvmx_ciu_qlm_jtgc_cn61xx cn63xx;
- struct cvmx_ciu_qlm_jtgc_cn61xx cn63xxp1;
- struct cvmx_ciu_qlm_jtgc_cn61xx cn66xx;
- struct cvmx_ciu_qlm_jtgc_s cn68xx;
- struct cvmx_ciu_qlm_jtgc_s cn68xxp1;
- struct cvmx_ciu_qlm_jtgc_cn61xx cnf71xx;
};
union cvmx_ciu_qlm_jtgd {
uint64_t u64;
struct cvmx_ciu_qlm_jtgd_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_45_60:16;
- uint64_t select:5;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:5;
- uint64_t reserved_45_60:16;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } s;
- struct cvmx_ciu_qlm_jtgd_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_42_60:19;
- uint64_t select:2;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:2;
- uint64_t reserved_42_60:19;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn52xx;
- struct cvmx_ciu_qlm_jtgd_cn52xx cn52xxp1;
- struct cvmx_ciu_qlm_jtgd_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_44_60:17;
- uint64_t select:4;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:4;
- uint64_t reserved_44_60:17;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn56xx;
- struct cvmx_ciu_qlm_jtgd_cn56xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_37_60:24;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_60:24;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn56xxp1;
- struct cvmx_ciu_qlm_jtgd_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t capture:1;
- uint64_t shift:1;
- uint64_t update:1;
- uint64_t reserved_43_60:18;
- uint64_t select:3;
- uint64_t reserved_37_39:3;
- uint64_t shft_cnt:5;
- uint64_t shft_reg:32;
-#else
- uint64_t shft_reg:32;
- uint64_t shft_cnt:5;
- uint64_t reserved_37_39:3;
- uint64_t select:3;
- uint64_t reserved_43_60:18;
- uint64_t update:1;
- uint64_t shift:1;
- uint64_t capture:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_qlm_jtgd_cn61xx cn63xx;
- struct cvmx_ciu_qlm_jtgd_cn61xx cn63xxp1;
- struct cvmx_ciu_qlm_jtgd_cn61xx cn66xx;
- struct cvmx_ciu_qlm_jtgd_s cn68xx;
- struct cvmx_ciu_qlm_jtgd_s cn68xxp1;
- struct cvmx_ciu_qlm_jtgd_cn61xx cnf71xx;
-};
-
-union cvmx_ciu_soft_bist {
- uint64_t u64;
- struct cvmx_ciu_soft_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_bist:1;
-#else
- uint64_t soft_bist:1;
- uint64_t reserved_1_63:63;
-#endif
+ __BITFIELD_FIELD(uint64_t capture:1,
+ __BITFIELD_FIELD(uint64_t shift:1,
+ __BITFIELD_FIELD(uint64_t update:1,
+ __BITFIELD_FIELD(uint64_t reserved_45_60:16,
+ __BITFIELD_FIELD(uint64_t select:5,
+ __BITFIELD_FIELD(uint64_t reserved_37_39:3,
+ __BITFIELD_FIELD(uint64_t shft_cnt:5,
+ __BITFIELD_FIELD(uint64_t shft_reg:32,
+ ;))))))))
} s;
- struct cvmx_ciu_soft_bist_s cn30xx;
- struct cvmx_ciu_soft_bist_s cn31xx;
- struct cvmx_ciu_soft_bist_s cn38xx;
- struct cvmx_ciu_soft_bist_s cn38xxp2;
- struct cvmx_ciu_soft_bist_s cn50xx;
- struct cvmx_ciu_soft_bist_s cn52xx;
- struct cvmx_ciu_soft_bist_s cn52xxp1;
- struct cvmx_ciu_soft_bist_s cn56xx;
- struct cvmx_ciu_soft_bist_s cn56xxp1;
- struct cvmx_ciu_soft_bist_s cn58xx;
- struct cvmx_ciu_soft_bist_s cn58xxp1;
- struct cvmx_ciu_soft_bist_s cn61xx;
- struct cvmx_ciu_soft_bist_s cn63xx;
- struct cvmx_ciu_soft_bist_s cn63xxp1;
- struct cvmx_ciu_soft_bist_s cn66xx;
- struct cvmx_ciu_soft_bist_s cn68xx;
- struct cvmx_ciu_soft_bist_s cn68xxp1;
- struct cvmx_ciu_soft_bist_s cnf71xx;
};
union cvmx_ciu_soft_prst {
uint64_t u64;
struct cvmx_ciu_soft_prst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t host64:1;
- uint64_t npi:1;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t npi:1;
- uint64_t host64:1;
- uint64_t reserved_3_63:61;
-#endif
- } s;
- struct cvmx_ciu_soft_prst_s cn30xx;
- struct cvmx_ciu_soft_prst_s cn31xx;
- struct cvmx_ciu_soft_prst_s cn38xx;
- struct cvmx_ciu_soft_prst_s cn38xxp2;
- struct cvmx_ciu_soft_prst_s cn50xx;
- struct cvmx_ciu_soft_prst_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn52xx;
- struct cvmx_ciu_soft_prst_cn52xx cn52xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cn56xx;
- struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
- struct cvmx_ciu_soft_prst_s cn58xx;
- struct cvmx_ciu_soft_prst_s cn58xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cn61xx;
- struct cvmx_ciu_soft_prst_cn52xx cn63xx;
- struct cvmx_ciu_soft_prst_cn52xx cn63xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cn66xx;
- struct cvmx_ciu_soft_prst_cn52xx cn68xx;
- struct cvmx_ciu_soft_prst_cn52xx cn68xxp1;
- struct cvmx_ciu_soft_prst_cn52xx cnf71xx;
-};
-
-union cvmx_ciu_soft_prst1 {
- uint64_t u64;
- struct cvmx_ciu_soft_prst1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_prst1_s cn52xx;
- struct cvmx_ciu_soft_prst1_s cn52xxp1;
- struct cvmx_ciu_soft_prst1_s cn56xx;
- struct cvmx_ciu_soft_prst1_s cn56xxp1;
- struct cvmx_ciu_soft_prst1_s cn61xx;
- struct cvmx_ciu_soft_prst1_s cn63xx;
- struct cvmx_ciu_soft_prst1_s cn63xxp1;
- struct cvmx_ciu_soft_prst1_s cn66xx;
- struct cvmx_ciu_soft_prst1_s cn68xx;
- struct cvmx_ciu_soft_prst1_s cn68xxp1;
- struct cvmx_ciu_soft_prst1_s cnf71xx;
-};
-
-union cvmx_ciu_soft_prst2 {
- uint64_t u64;
- struct cvmx_ciu_soft_prst2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_prst2_s cn66xx;
-};
-
-union cvmx_ciu_soft_prst3 {
- uint64_t u64;
- struct cvmx_ciu_soft_prst3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_prst:1;
-#else
- uint64_t soft_prst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_prst3_s cn66xx;
-};
-
-union cvmx_ciu_soft_rst {
- uint64_t u64;
- struct cvmx_ciu_soft_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t soft_rst:1;
-#else
- uint64_t soft_rst:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu_soft_rst_s cn30xx;
- struct cvmx_ciu_soft_rst_s cn31xx;
- struct cvmx_ciu_soft_rst_s cn38xx;
- struct cvmx_ciu_soft_rst_s cn38xxp2;
- struct cvmx_ciu_soft_rst_s cn50xx;
- struct cvmx_ciu_soft_rst_s cn52xx;
- struct cvmx_ciu_soft_rst_s cn52xxp1;
- struct cvmx_ciu_soft_rst_s cn56xx;
- struct cvmx_ciu_soft_rst_s cn56xxp1;
- struct cvmx_ciu_soft_rst_s cn58xx;
- struct cvmx_ciu_soft_rst_s cn58xxp1;
- struct cvmx_ciu_soft_rst_s cn61xx;
- struct cvmx_ciu_soft_rst_s cn63xx;
- struct cvmx_ciu_soft_rst_s cn63xxp1;
- struct cvmx_ciu_soft_rst_s cn66xx;
- struct cvmx_ciu_soft_rst_s cn68xx;
- struct cvmx_ciu_soft_rst_s cn68xxp1;
- struct cvmx_ciu_soft_rst_s cnf71xx;
-};
-
-union cvmx_ciu_sum1_iox_int {
- uint64_t u64;
- struct cvmx_ciu_sum1_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_3_63:61,
+ __BITFIELD_FIELD(uint64_t host64:1,
+ __BITFIELD_FIELD(uint64_t npi:1,
+ __BITFIELD_FIELD(uint64_t soft_prst:1,
+ ;))))
} s;
- struct cvmx_ciu_sum1_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_iox_int_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_iox_int_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu_sum1_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_sum1_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_ppx_ip2_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_ppx_ip2_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu_sum1_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_sum1_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_ppx_ip3_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_ppx_ip3_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum1_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu_sum1_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu_sum1_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_41_45:5;
- uint64_t dpi_dma:1;
- uint64_t reserved_38_39:2;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_4_17:14;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_17:14;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_39:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_45:5;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum1_ppx_ip4_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_62_62:1;
- uint64_t srio3:1;
- uint64_t srio2:1;
- uint64_t reserved_57_59:3;
- uint64_t dfm:1;
- uint64_t reserved_53_55:3;
- uint64_t lmc0:1;
- uint64_t reserved_51_51:1;
- uint64_t srio0:1;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t agl:1;
- uint64_t reserved_38_45:8;
- uint64_t agx1:1;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t dfa:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t zip:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t mii1:1;
- uint64_t reserved_10_17:8;
- uint64_t wdog:10;
-#else
- uint64_t wdog:10;
- uint64_t reserved_10_17:8;
- uint64_t mii1:1;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t zip:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t dfa:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t agx1:1;
- uint64_t reserved_38_45:8;
- uint64_t agl:1;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t srio0:1;
- uint64_t reserved_51_51:1;
- uint64_t lmc0:1;
- uint64_t reserved_53_55:3;
- uint64_t dfm:1;
- uint64_t reserved_57_59:3;
- uint64_t srio2:1;
- uint64_t srio3:1;
- uint64_t reserved_62_62:1;
- uint64_t rst:1;
-#endif
- } cn66xx;
- struct cvmx_ciu_sum1_ppx_ip4_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_53_62:10;
- uint64_t lmc0:1;
- uint64_t reserved_50_51:2;
- uint64_t pem1:1;
- uint64_t pem0:1;
- uint64_t ptp:1;
- uint64_t reserved_41_46:6;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t agx0:1;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t usb:1;
- uint64_t reserved_32_32:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_28_28:1;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t l2c:1;
- uint64_t pow:1;
- uint64_t fpa:1;
- uint64_t iob:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_4_18:15;
- uint64_t wdog:4;
-#else
- uint64_t wdog:4;
- uint64_t reserved_4_18:15;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t iob:1;
- uint64_t fpa:1;
- uint64_t pow:1;
- uint64_t l2c:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_28_28:1;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_32_32:1;
- uint64_t usb:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t agx0:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_41_46:6;
- uint64_t ptp:1;
- uint64_t pem0:1;
- uint64_t pem1:1;
- uint64_t reserved_50_51:2;
- uint64_t lmc0:1;
- uint64_t reserved_53_62:10;
- uint64_t rst:1;
-#endif
- } cnf71xx;
-};
-
-union cvmx_ciu_sum2_iox_int {
- uint64_t u64;
- struct cvmx_ciu_sum2_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_iox_int_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_iox_int_cn61xx cn66xx;
- struct cvmx_ciu_sum2_iox_int_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu_sum2_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_ppx_ip2_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_ppx_ip2_cn61xx cn66xx;
- struct cvmx_ciu_sum2_ppx_ip2_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu_sum2_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_ppx_ip3_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_ppx_ip3_cn61xx cn66xx;
- struct cvmx_ciu_sum2_ppx_ip3_s cnf71xx;
-};
-
-union cvmx_ciu_sum2_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu_sum2_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t endor:2;
- uint64_t eoi:1;
- uint64_t reserved_10_11:2;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_11:2;
- uint64_t eoi:1;
- uint64_t endor:2;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_ciu_sum2_ppx_ip4_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t timer:6;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t timer:6;
- uint64_t reserved_10_63:54;
-#endif
- } cn61xx;
- struct cvmx_ciu_sum2_ppx_ip4_cn61xx cn66xx;
- struct cvmx_ciu_sum2_ppx_ip4_s cnf71xx;
};
union cvmx_ciu_timx {
uint64_t u64;
struct cvmx_ciu_timx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_37_63:27;
- uint64_t one_shot:1;
- uint64_t len:36;
-#else
- uint64_t len:36;
- uint64_t one_shot:1;
- uint64_t reserved_37_63:27;
-#endif
- } s;
- struct cvmx_ciu_timx_s cn30xx;
- struct cvmx_ciu_timx_s cn31xx;
- struct cvmx_ciu_timx_s cn38xx;
- struct cvmx_ciu_timx_s cn38xxp2;
- struct cvmx_ciu_timx_s cn50xx;
- struct cvmx_ciu_timx_s cn52xx;
- struct cvmx_ciu_timx_s cn52xxp1;
- struct cvmx_ciu_timx_s cn56xx;
- struct cvmx_ciu_timx_s cn56xxp1;
- struct cvmx_ciu_timx_s cn58xx;
- struct cvmx_ciu_timx_s cn58xxp1;
- struct cvmx_ciu_timx_s cn61xx;
- struct cvmx_ciu_timx_s cn63xx;
- struct cvmx_ciu_timx_s cn63xxp1;
- struct cvmx_ciu_timx_s cn66xx;
- struct cvmx_ciu_timx_s cn68xx;
- struct cvmx_ciu_timx_s cn68xxp1;
- struct cvmx_ciu_timx_s cnf71xx;
-};
-
-union cvmx_ciu_tim_multi_cast {
- uint64_t u64;
- struct cvmx_ciu_tim_multi_cast_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t en:1;
-#else
- uint64_t en:1;
- uint64_t reserved_1_63:63;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_37_63:27,
+ __BITFIELD_FIELD(uint64_t one_shot:1,
+ __BITFIELD_FIELD(uint64_t len:36,
+ ;)))
} s;
- struct cvmx_ciu_tim_multi_cast_s cn61xx;
- struct cvmx_ciu_tim_multi_cast_s cn66xx;
- struct cvmx_ciu_tim_multi_cast_s cnf71xx;
};
union cvmx_ciu_wdogx {
uint64_t u64;
struct cvmx_ciu_wdogx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_46_63:18;
- uint64_t gstopen:1;
- uint64_t dstop:1;
- uint64_t cnt:24;
- uint64_t len:16;
- uint64_t state:2;
- uint64_t mode:2;
-#else
- uint64_t mode:2;
- uint64_t state:2;
- uint64_t len:16;
- uint64_t cnt:24;
- uint64_t dstop:1;
- uint64_t gstopen:1;
- uint64_t reserved_46_63:18;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_46_63:18,
+ __BITFIELD_FIELD(uint64_t gstopen:1,
+ __BITFIELD_FIELD(uint64_t dstop:1,
+ __BITFIELD_FIELD(uint64_t cnt:24,
+ __BITFIELD_FIELD(uint64_t len:16,
+ __BITFIELD_FIELD(uint64_t state:2,
+ __BITFIELD_FIELD(uint64_t mode:2,
+ ;)))))))
} s;
- struct cvmx_ciu_wdogx_s cn30xx;
- struct cvmx_ciu_wdogx_s cn31xx;
- struct cvmx_ciu_wdogx_s cn38xx;
- struct cvmx_ciu_wdogx_s cn38xxp2;
- struct cvmx_ciu_wdogx_s cn50xx;
- struct cvmx_ciu_wdogx_s cn52xx;
- struct cvmx_ciu_wdogx_s cn52xxp1;
- struct cvmx_ciu_wdogx_s cn56xx;
- struct cvmx_ciu_wdogx_s cn56xxp1;
- struct cvmx_ciu_wdogx_s cn58xx;
- struct cvmx_ciu_wdogx_s cn58xxp1;
- struct cvmx_ciu_wdogx_s cn61xx;
- struct cvmx_ciu_wdogx_s cn63xx;
- struct cvmx_ciu_wdogx_s cn63xxp1;
- struct cvmx_ciu_wdogx_s cn66xx;
- struct cvmx_ciu_wdogx_s cn68xx;
- struct cvmx_ciu_wdogx_s cn68xxp1;
- struct cvmx_ciu_wdogx_s cnf71xx;
};
-#endif
+#endif /* __CVMX_CIU_DEFS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
index e347496a33c3..80e4f8358b81 100644
--- a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -2070,6 +2070,8 @@ static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull;
}
+void __cvmx_interrupt_gmxx_enable(int interface);
+
union cvmx_gmxx_bad_reg {
uint64_t u64;
struct cvmx_gmxx_bad_reg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
index a5e8fd861c37..39da7f9d7b3f 100644
--- a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -334,6 +334,8 @@ static inline uint64_t CVMX_PCSX_TX_RXX_POLARITY_REG(unsigned long offset, unsig
return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
}
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
+
union cvmx_pcsx_anx_adv_reg {
uint64_t u64;
struct cvmx_pcsx_anx_adv_reg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
index b5b45d26f1c5..847dd9dca6ea 100644
--- a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -268,6 +268,8 @@ static inline uint64_t CVMX_PCSXX_TX_RX_STATES_REG(unsigned long block_id)
return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x1000000ull;
}
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+
union cvmx_pcsxx_10gbx_status_reg {
uint64_t u64;
struct cvmx_pcsxx_10gbx_status_reg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
index c7d601d9446e..f4c4e8051160 100644
--- a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -45,6 +45,8 @@
#define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull)
+void __cvmx_interrupt_spxx_int_msk_enable(int index);
+
union cvmx_spxx_bckprs_cnt {
uint64_t u64;
struct cvmx_spxx_bckprs_cnt_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
index 146354005d3b..3c409a854d91 100644
--- a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -45,6 +45,8 @@
#define CVMX_STXX_STAT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000638ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_STAT_PKT_XMT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000640ull) + ((block_id) & 1) * 0x8000000ull)
+void __cvmx_interrupt_stxx_int_msk_enable(int index);
+
union cvmx_stxx_arb_ctl {
uint64_t u64;
struct cvmx_stxx_arb_ctl_s {
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index c99c4b6a79f4..60481502826a 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -279,13 +279,12 @@ union octeon_cvmemctl {
} s;
};
-extern void octeon_write_lcd(const char *s);
extern void octeon_check_cpu_bist(void);
-extern int octeon_get_boot_uart(void);
-struct uart_port;
-extern unsigned int octeon_serial_in(struct uart_port *, int);
-extern void octeon_serial_out(struct uart_port *, int, int);
+int octeon_prune_device_tree(void);
+extern const char __appended_dtb;
+extern const char __dtb_octeon_3xxx_begin;
+extern const char __dtb_octeon_68xx_begin;
/**
* Write a 32bit value to the Octeon NPI register space
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index 1884609741a8..b12d9a3fbfb6 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -63,4 +63,7 @@ enum octeon_dma_bar_type {
*/
extern enum octeon_dma_bar_type octeon_dma_bar_type;
+void octeon_pci_dma_init(void);
+extern char *octeon_swiotlb;
+
#endif
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ad461216b5a1..e8cc328fce2d 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -80,7 +80,12 @@ extern void build_copy_page(void);
* used in our early mem init code for all memory models.
* So always define it.
*/
-#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+extern unsigned long ARCH_PFN_OFFSET;
+# define ARCH_PFN_OFFSET ARCH_PFN_OFFSET
+#else
+# define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+#endif
extern void clear_page(void * page);
extern void copy_page(void * to, void * from);
@@ -252,8 +257,8 @@ extern int __virt_addr_valid(const volatile void *kaddr);
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
-#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
+#define UNCAC_ADDR(addr) (UNCAC_BASE + __pa(addr))
+#define CAC_ADDR(addr) ((unsigned long)__va((addr) - UNCAC_BASE))
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index af34afbc32d9..b2fa62922d88 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -141,7 +141,7 @@ struct mips_fpu_struct {
#define NUM_DSP_REGS 6
-typedef __u32 dspreg_t;
+typedef unsigned long dspreg_t;
struct mips_dsp_state {
dspreg_t dspr[NUM_DSP_REGS];
@@ -386,7 +386,20 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
+#ifdef CONFIG_CPU_LOONGSON3
+/*
+ * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
+ * tight read loop is executed, because reads take priority over writes & the
+ * hardware (incorrectly) doesn't ensure that writes will eventually occur.
+ *
+ * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
+ * flush from cpu_relax() such that any pending writes will become visible as
+ * expected.
+ */
+#define cpu_relax() smp_mb()
+#else
#define cpu_relax() barrier()
+#endif
/*
* Return_address is a replacement for __builtin_return_address(count)
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index d49d247d48a1..bb36a400203d 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -2,8 +2,10 @@
#ifndef _MIPS_SETUP_H
#define _MIPS_SETUP_H
+#include <linux/types.h>
#include <uapi/asm/setup.h>
+extern void prom_putchar(char);
extern void setup_early_printk(void);
#ifdef CONFIG_EARLY_PRINTK_8250
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index 195db5045ae5..0d9fad5915fe 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -31,7 +31,6 @@ extern int prom_flags;
#define PROM_FLAG_DONT_FREE_TEMP 4
/* Simple char-by-char console I/O. */
-extern void prom_putchar(char c);
extern char prom_getchar(void);
/* Get next memory descriptor after CURR, returns first descriptor
diff --git a/arch/mips/include/asm/sim.h b/arch/mips/include/asm/sim.h
index 91831800c480..59f31a95facd 100644
--- a/arch/mips/include/asm/sim.h
+++ b/arch/mips/include/asm/sim.h
@@ -39,8 +39,6 @@ __asm__( \
".end\t__" #symbol "\n\t" \
".size\t__" #symbol",. - __" #symbol)
-#define nabi_no_regargs
-
#endif /* CONFIG_32BIT */
#ifdef CONFIG_64BIT
@@ -67,16 +65,6 @@ __asm__( \
".end\t__" #symbol "\n\t" \
".size\t__" #symbol",. - __" #symbol)
-#define nabi_no_regargs \
- unsigned long __dummy0, \
- unsigned long __dummy1, \
- unsigned long __dummy2, \
- unsigned long __dummy3, \
- unsigned long __dummy4, \
- unsigned long __dummy5, \
- unsigned long __dummy6, \
- unsigned long __dummy7,
-
#endif /* CONFIG_64BIT */
#endif /* _ASM_SIM_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 88ebd83b3bf9..056a6bf13491 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -25,7 +25,17 @@ extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
/* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */
diff --git a/arch/mips/include/asm/txx9/generic.h b/arch/mips/include/asm/txx9/generic.h
index 64887d3c7ec3..9a2c47bf3c40 100644
--- a/arch/mips/include/asm/txx9/generic.h
+++ b/arch/mips/include/asm/txx9/generic.h
@@ -49,7 +49,6 @@ void txx9_spi_init(int busid, unsigned long base, int irq);
void txx9_ethaddr_init(unsigned int id, unsigned char *ethaddr);
void txx9_sio_init(unsigned long baseaddr, int irq,
unsigned int line, unsigned int sclk, int nocts);
-void prom_putchar(char c);
#ifdef CONFIG_EARLY_PRINTK
extern void (*txx9_prom_putchar)(char c);
void txx9_sio_putchar_init(unsigned long baseaddr);
diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h
index 6d667087f2aa..00805ac6e9fc 100644
--- a/arch/mips/include/asm/txx9/tx4939.h
+++ b/arch/mips/include/asm/txx9/tx4939.h
@@ -101,13 +101,6 @@ struct tx4939_irc_reg {
struct tx4939_le_reg maskext;
};
-struct tx4939_rtc_reg {
- __u32 ctl;
- __u32 adr;
- __u32 dat;
- __u32 tbc;
-};
-
struct tx4939_crypto_reg {
struct tx4939_le_reg csr;
struct tx4939_le_reg idesptr;
@@ -370,26 +363,6 @@ struct tx4939_vpc_desc {
#define TX4939_CLKCTR_CYPRST 0x00000001
/*
- * RTC
- */
-#define TX4939_RTCCTL_ALME 0x00000080
-#define TX4939_RTCCTL_ALMD 0x00000040
-#define TX4939_RTCCTL_BUSY 0x00000020
-
-#define TX4939_RTCCTL_COMMAND 0x00000007
-#define TX4939_RTCCTL_COMMAND_NOP 0x00000000
-#define TX4939_RTCCTL_COMMAND_GETTIME 0x00000001
-#define TX4939_RTCCTL_COMMAND_SETTIME 0x00000002
-#define TX4939_RTCCTL_COMMAND_GETALARM 0x00000003
-#define TX4939_RTCCTL_COMMAND_SETALARM 0x00000004
-
-#define TX4939_RTCTBC_PM 0x00000080
-#define TX4939_RTCTBC_COMP 0x0000007f
-
-#define TX4939_RTC_REG_RAMSIZE 0x00000100
-#define TX4939_RTC_REG_RWBSIZE 0x00000006
-
-/*
* CRYPTO
*/
#define TX4939_CRYPTO_CSR_SAESO 0x08000000
@@ -498,8 +471,6 @@ struct tx4939_vpc_desc {
#define tx4939_ccfgptr \
((struct tx4939_ccfg_reg __iomem *)TX4939_CCFG_REG)
#define tx4939_sramcptr tx4938_sramcptr
-#define tx4939_rtcptr \
- ((struct tx4939_rtc_reg __iomem *)TX4939_RTC_REG)
#define tx4939_cryptoptr \
((struct tx4939_crypto_reg __iomem *)TX4939_CRYPTO_REG)
#define tx4939_vpcptr ((struct tx4939_vpc_reg __iomem *)TX4939_VPC_REG)
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 49c3d4795963..71370fb3ceef 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -123,4 +123,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index d626a9a391cc..d31bc2f01208 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -16,6 +16,8 @@
#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <asm/mipsregs.h>
#include <asm/jazz.h>
#include <asm/io.h>
@@ -86,6 +88,7 @@ static int __init vdma_init(void)
printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
return 0;
}
+arch_initcall(vdma_init);
/*
* Allocate DMA pagetables using a simple first-fit algorithm
@@ -556,4 +559,140 @@ int vdma_get_enable(int channel)
return enable;
}
-arch_initcall(vdma_init);
+static void *jazz_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ void *ret;
+
+ ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!ret)
+ return NULL;
+
+ *dma_handle = vdma_alloc(virt_to_phys(ret), size);
+ if (*dma_handle == VDMA_ERROR) {
+ dma_direct_free(dev, size, ret, *dma_handle, attrs);
+ return NULL;
+ }
+
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ dma_cache_wback_inv((unsigned long)ret, size);
+ ret = (void *)UNCAC_ADDR(ret);
+ }
+ return ret;
+}
+
+static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ vdma_free(dma_handle);
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))
+ vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
+ return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+}
+
+static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t phys = page_to_phys(page) + offset;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(dev, phys, size, dir);
+ return vdma_alloc(phys, size);
+}
+
+static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
+ vdma_free(dma_addr);
+}
+
+static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+ dir);
+ sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
+ if (sg->dma_address == VDMA_ERROR)
+ return 0;
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nents;
+}
+
+static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
+ dir);
+ vdma_free(sg->dma_address);
+ }
+}
+
+static void jazz_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+}
+
+static void jazz_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+}
+
+static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == VDMA_ERROR;
+}
+
+const struct dma_map_ops jazz_dma_ops = {
+ .alloc = jazz_dma_alloc,
+ .free = jazz_dma_free,
+ .mmap = arch_dma_mmap,
+ .map_page = jazz_dma_map_page,
+ .unmap_page = jazz_dma_unmap_page,
+ .map_sg = jazz_dma_map_sg,
+ .unmap_sg = jazz_dma_unmap_sg,
+ .sync_single_for_cpu = jazz_dma_sync_single_for_cpu,
+ .sync_single_for_device = jazz_dma_sync_single_for_device,
+ .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = jazz_dma_sync_sg_for_device,
+ .dma_supported = dma_direct_supported,
+ .cache_sync = arch_dma_cache_sync,
+ .mapping_error = jazz_dma_mapping_error,
+};
+EXPORT_SYMBOL(jazz_dma_ops);
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 448fd41792e4..1b5e121c3f0d 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -16,6 +16,7 @@
#include <linux/screen_info.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
+#include <linux/dma-mapping.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
@@ -136,10 +137,16 @@ static struct resource jazz_esp_rsrc[] = {
}
};
+static u64 jazz_esp_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device jazz_esp_pdev = {
.name = "jazz_esp",
.num_resources = ARRAY_SIZE(jazz_esp_rsrc),
- .resource = jazz_esp_rsrc
+ .resource = jazz_esp_rsrc,
+ .dev = {
+ .dma_mask = &jazz_esp_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
};
static struct resource jazz_sonic_rsrc[] = {
@@ -155,10 +162,16 @@ static struct resource jazz_sonic_rsrc[] = {
}
};
+static u64 jazz_sonic_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device jazz_sonic_pdev = {
.name = "jazzsonic",
.num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
- .resource = jazz_sonic_rsrc
+ .resource = jazz_sonic_rsrc,
+ .dev = {
+ .dma_mask = &jazz_sonic_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
};
static struct resource jazz_cmos_rsrc[] = {
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform
index 28448d358c10..a2a5a85ea1f9 100644
--- a/arch/mips/jz4740/Platform
+++ b/arch/mips/jz4740/Platform
@@ -1,4 +1,4 @@
platform-$(CONFIG_MACH_INGENIC) += jz4740/
cflags-$(CONFIG_MACH_INGENIC) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
load-$(CONFIG_MACH_INGENIC) += 0xffffffff80010000
-zload-$(CONFIG_MACH_INGENIC) += 0xffffffff80600000
+zload-$(CONFIG_MACH_INGENIC) += 0xffffffff81000000
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 60f0767507c6..af0c8ace0141 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -29,10 +29,11 @@
#include <linux/power/gpio-charger.h>
#include <linux/pwm.h>
+#include <linux/platform_data/jz4740/jz4740_nand.h>
+
#include <asm/mach-jz4740/gpio.h>
#include <asm/mach-jz4740/jz4740_fb.h>
#include <asm/mach-jz4740/jz4740_mmc.h>
-#include <asm/mach-jz4740/jz4740_nand.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b2509c19cfb5..d535fc706a8b 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1849,7 +1849,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
c->cputype = CPU_LOONGSON3;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 505cb77d1280..4a1647ddfbd9 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -14,8 +14,6 @@
#include <asm/setup.h>
-extern void prom_putchar(char);
-
static void early_console_write(struct console *con, const char *s, unsigned n)
{
while (n-- && *s) {
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
index 83cea3767556..ea26614afac6 100644
--- a/arch/mips/kernel/early_printk_8250.c
+++ b/arch/mips/kernel/early_printk_8250.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
+#include <asm/setup.h>
static void __iomem *serial8250_base;
static unsigned int serial8250_reg_shift;
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 37b9383eacd3..6c257b52f57f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -354,16 +354,56 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp)
sll k0, k0, 30 # Check for SDBBP.
bgez k0, ejtag_return
+#ifdef CONFIG_SMP
+1: PTR_LA k0, ejtag_debug_buffer_spinlock
+ ll k0, 0(k0)
+ bnez k0, 1b
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sc k0, 0(k0)
+ beqz k0, 1b
+# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
+ sync
+# endif
+
+ PTR_LA k0, ejtag_debug_buffer
+ LONG_S k1, 0(k0)
+
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+
+ PTR_LA k1, ejtag_debug_buffer
+ LONG_L k1, 0(k1)
+ LONG_S k1, 0(k0)
+
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sw zero, 0(k0)
+#else
PTR_LA k0, ejtag_debug_buffer
LONG_S k1, 0(k0)
+#endif
+
SAVE_ALL
move a0, sp
jal ejtag_exception_handler
RESTORE_ALL
+
+#ifdef CONFIG_SMP
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+ LONG_L k1, 0(k0)
+#else
PTR_LA k0, ejtag_debug_buffer
LONG_L k1, 0(k0)
+#endif
ejtag_return:
+ back_to_back_c0_hazard
MFC0 k0, CP0_DESAVE
.set mips32
deret
@@ -377,6 +417,12 @@ ejtag_return:
.data
EXPORT(ejtag_debug_buffer)
.fill LONGSIZE
+#ifdef CONFIG_SMP
+EXPORT(ejtag_debug_buffer_spinlock)
+ .fill LONGSIZE
+EXPORT(ejtag_debug_buffer_per_cpu)
+ .fill LONGSIZE * NR_CPUS
+#endif
.previous
__INIT
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 7c246b69c545..046846999efd 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -33,21 +33,21 @@
void (*cpu_wait)(void);
EXPORT_SYMBOL(cpu_wait);
-static void r3081_wait(void)
+static void __cpuidle r3081_wait(void)
{
unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT);
local_irq_enable();
}
-static void r39xx_wait(void)
+static void __cpuidle r39xx_wait(void)
{
if (!need_resched())
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
local_irq_enable();
}
-void r4k_wait(void)
+void __cpuidle r4k_wait(void)
{
local_irq_enable();
__r4k_wait();
@@ -60,7 +60,7 @@ void r4k_wait(void)
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
* using this version a gamble.
*/
-void r4k_wait_irqoff(void)
+void __cpuidle r4k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
@@ -75,7 +75,7 @@ void r4k_wait_irqoff(void)
* The RM7000 variant has to handle erratum 38. The workaround is to not
* have any pending stores when the WAIT instruction is executed.
*/
-static void rm7k_wait_irqoff(void)
+static void __cpuidle rm7k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
@@ -96,7 +96,7 @@ static void rm7k_wait_irqoff(void)
* since coreclock (and the cp0 counter) stops upon executing it. Only an
* interrupt can wake it, so they must be enabled before entering idle modes.
*/
-static void au1k_wait(void)
+static void __cpuidle au1k_wait(void)
{
unsigned long c0status = read_c0_status() | 1; /* irqs on */
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index f5c8bce70db2..54cd675c5d1d 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -326,19 +326,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
preempt_enable_no_resched();
}
return 1;
- } else {
- if (addr->word != breakpoint_insn.word) {
- /*
- * The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
- ret = 1;
- goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
+ } else if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
}
goto no_kprobe;
}
@@ -364,10 +358,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
}
-ss_probe:
prepare_singlestep(p, regs, kcb);
if (kcb->flags & SKIP_DELAYSLOT) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
@@ -468,51 +463,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = regs->regs[29];
-
- memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
-
- regs->cp0_epc = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-/* Defined in the inline asm below. */
-void jprobe_return_end(void);
-
-void __kprobes jprobe_return(void)
-{
- /* Assembler quirk necessitates this '0,code' business. */
- asm volatile(
- "break 0,%0\n\t"
- ".globl jprobe_return_end\n"
- "jprobe_return_end:\n"
- : : "n" (BRK_KPROBE_BP) : "memory");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (regs->cp0_epc >= (unsigned long)jprobe_return &&
- regs->cp0_epc <= (unsigned long)jprobe_return_end) {
- *regs = kcb->jprobe_saved_regs;
- memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
- preempt_enable_no_resched();
-
- return 1;
- }
- return 0;
-}
-
/*
* Function return probe trampoline:
* - init_kprobes() establishes a probepoint here
@@ -595,9 +545,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_assert(ri, orig_ret_address, trampoline_address);
instruction_pointer(regs) = orig_ret_address;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 318f1c05c5b3..6b61be486303 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -43,17 +43,6 @@
#include <asm/mmu_context.h>
#include <asm/mman.h>
-/* Use this to get at 32-bit user passed pointers. */
-/* A() macro should be used for places where you e.g.
- have some internal variable u32 and just want to get
- rid of a compiler warning. AA() has to be used in
- places where you want to convert a function argument
- to 32bit pointer or when you e.g. access pt_regs
- structure and want to consider 32bit registers only.
- */
-#define A(__x) ((unsigned long)(__x))
-#define AA(__x) ((unsigned long)((int)__x))
-
#ifdef __MIPSEB__
#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
#endif
@@ -61,24 +50,6 @@
#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
#endif
-SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
- unsigned long, prot, unsigned long, flags, unsigned long, fd,
- unsigned long, pgoff)
-{
- if (pgoff & (~PAGE_MASK >> 12))
- return -EINVAL;
- return ksys_mmap_pgoff(addr, len, prot, flags, fd,
- pgoff >> (PAGE_SHIFT-12));
-}
-
-#define RLIM_INFINITY32 0x7fffffff
-#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
-
-struct rlimit32 {
- int rlim_cur;
- int rlim_max;
-};
-
SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 9670e70139fd..8fc69891e117 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -30,6 +30,7 @@
#include <linux/random.h>
#include <linux/prctl.h>
#include <linux/nmi.h>
+#include <linux/cpu.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
@@ -706,19 +707,25 @@ int mips_get_process_fp_mode(struct task_struct *task)
return value;
}
-static void prepare_for_fp_mode_switch(void *info)
+static long prepare_for_fp_mode_switch(void *unused)
{
- struct mm_struct *mm = info;
-
- if (current->mm == mm)
- lose_fpu(1);
+ /*
+ * This is icky, but we use this to simply ensure that all CPUs have
+ * context switched, regardless of whether they were previously running
+ * kernel or user code. This ensures that no CPU currently has its FPU
+ * enabled, or is about to attempt to enable it through any path other
+ * than enable_restore_fp_context() which will wait appropriately for
+ * fp_mode_switching to be zero.
+ */
+ return 0;
}
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
struct task_struct *t;
- int max_users;
+ struct cpumask process_cpus;
+ int cpu;
/* If nothing to change, return right away, successfully. */
if (value == mips_get_process_fp_mode(task))
@@ -751,35 +758,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
- /* Proceed with the mode switch */
- preempt_disable();
-
- /* Save FP & vector context, then disable FPU & MSA */
- if (task->signal == current->signal)
- lose_fpu(1);
-
- /* Prevent any threads from obtaining live FP context */
- atomic_set(&task->mm->context.fp_mode_switching, 1);
- smp_mb__after_atomic();
-
- /*
- * If there are multiple online CPUs then force any which are running
- * threads in this process to lose their FPU context, which they can't
- * regain until fp_mode_switching is cleared later.
- */
- if (num_online_cpus() > 1) {
- /* No need to send an IPI for the local CPU */
- max_users = (task->mm == current->mm) ? 1 : 0;
-
- if (atomic_read(&current->mm->mm_users) > max_users)
- smp_call_function(prepare_for_fp_mode_switch,
- (void *)current->mm, 1);
- }
-
- /*
- * There are now no threads of the process with live FP context, so it
- * is safe to proceed with the FP mode switch.
- */
+ /* Indicate the new FP mode in each thread */
for_each_thread(task, t) {
/* Update desired FP register width */
if (value & PR_FP_MODE_FR) {
@@ -796,9 +775,34 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
}
- /* Allow threads to use FP again */
- atomic_set(&task->mm->context.fp_mode_switching, 0);
- preempt_enable();
+ /*
+ * We need to ensure that all threads in the process have switched mode
+ * before returning, in order to allow userland to not worry about
+ * races. We can do this by forcing all CPUs that any thread in the
+ * process may be running on to schedule something else - in this case
+ * prepare_for_fp_mode_switch().
+ *
+ * We begin by generating a mask of all CPUs that any thread in the
+ * process may be running on.
+ */
+ cpumask_clear(&process_cpus);
+ for_each_thread(task, t)
+ cpumask_set_cpu(task_cpu(t), &process_cpus);
+
+ /*
+ * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
+ *
+ * The CPUs may have rescheduled already since we switched mode or
+ * generated the cpumask, but that doesn't matter. If the task in this
+ * process is scheduled out then our scheduling
+ * prepare_for_fp_mode_switch() will simply be redundant. If it's
+ * scheduled in then it will already have picked up the new FP mode
+ * whilst doing so.
+ */
+ get_online_cpus();
+ for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
+ work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
+ put_online_cpus();
wake_up_var(&task->mm->context.fp_mode_switching);
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9f6c3f2aa2e2..e5ba56c01ee0 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -41,6 +41,7 @@
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
+#include <asm/processor.h>
#include <asm/syscall.h>
#include <linux/uaccess.h>
#include <asm/bootinfo.h>
@@ -589,9 +590,226 @@ static int fpr_set(struct task_struct *target,
return err;
}
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+/*
+ * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
+ */
+static int dsp32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u32 dspregs[NUM_DSP_REGS + 1];
+
+ BUG_ON(count % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ dspregs[i] = target->thread.dsp.dspr[i];
+ break;
+ case NUM_DSP_REGS:
+ dspregs[i] = target->thread.dsp.dspcontrol;
+ break;
+ }
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u32 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = (s32)dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = (s32)dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
+ */
+static int dsp64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u64 dspregs[NUM_DSP_REGS + 1];
+
+ BUG_ON(count % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ dspregs[i] = target->thread.dsp.dspr[i];
+ break;
+ case NUM_DSP_REGS:
+ dspregs[i] = target->thread.dsp.dspcontrol;
+ break;
+ }
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u64 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * Determine whether the DSP context is present.
+ */
+static int dsp_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
+}
+
+/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
+static int fp_mode_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int fp_mode;
+
+ fp_mode = mips_get_process_fp_mode(target);
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+}
+
+/*
+ * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
+ *
+ * We optimize for the case where `count % sizeof(int) == 0', which
+ * is supposed to have been guaranteed by the kernel before calling
+ * us, e.g. in `ptrace_regset'. We enforce that requirement, so
+ * that we can safely avoid preinitializing temporaries for partial
+ * mode writes.
+ */
+static int fp_mode_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int fp_mode;
+ int err;
+
+ BUG_ON(count % sizeof(int));
+
+ if (pos + count > sizeof(fp_mode))
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+ if (err)
+ return err;
+
+ if (count > 0)
+ err = mips_set_process_fp_mode(target, fp_mode);
+
+ return err;
+}
+
enum mips_regset {
REGSET_GPR,
REGSET_FPR,
+ REGSET_DSP,
+ REGSET_FP_MODE,
};
struct pt_regs_offset {
@@ -697,6 +915,23 @@ static const struct user_regset mips_regsets[] = {
.get = fpr_get,
.set = fpr_set,
},
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .get = dsp32_get,
+ .set = dsp32_set,
+ .active = dsp_active,
+ },
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .get = fp_mode_get,
+ .set = fp_mode_set,
+ },
};
static const struct user_regset_view user_mips_view = {
@@ -728,6 +963,23 @@ static const struct user_regset mips64_regsets[] = {
.get = fpr_get,
.set = fpr_set,
},
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .get = dsp64_get,
+ .set = dsp64_set,
+ .active = dsp_active,
+ },
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .get = fp_mode_get,
+ .set = fp_mode_set,
+ },
};
static const struct user_regset_view user_mips64_view = {
@@ -856,7 +1108,7 @@ long arch_ptrace(struct task_struct *child, long request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 7edc629304c8..bc348d44d151 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -142,7 +142,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index c6bbf2165051..419c92197b2f 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -85,7 +85,7 @@ done:
#ifdef CONFIG_CPU_CAVIUM_OCTEON
/* We need to flush I-cache before jumping to new kernel.
- * Unfortunatelly, this code is cpu-specific.
+ * Unfortunately, this code is cpu-specific.
*/
.set push
.set noreorder
@@ -145,7 +145,7 @@ LEAF(kexec_smp_wait)
#endif
/* All parameters to new kernel are passed in registers a0-a3.
- * kexec_args[0..3] are uses to prepare register values.
+ * kexec_args[0..3] are used to prepare register values.
*/
kexec_args:
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 2c96c0c68116..c71d1eb7da59 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -36,6 +36,7 @@
#include <asm/cdmm.h>
#include <asm/cpu.h>
#include <asm/debug.h>
+#include <asm/dma-coherence.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
@@ -84,6 +85,11 @@ static struct resource bss_resource = { .name = "Kernel bss", };
static void *detect_magic __initdata = detect_memory_region;
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+unsigned long ARCH_PFN_OFFSET;
+EXPORT_SYMBOL(ARCH_PFN_OFFSET);
+#endif
+
void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
{
int x = boot_mem_map.nr_map;
@@ -441,6 +447,12 @@ static void __init bootmem_init(void)
mapstart = max(reserved_end, start);
}
+ if (min_low_pfn >= max_low_pfn)
+ panic("Incorrect memory mapping !!!");
+
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+ ARCH_PFN_OFFSET = PFN_UP(ramstart);
+#else
/*
* Reserve any memory between the start of RAM and PHYS_OFFSET
*/
@@ -448,8 +460,6 @@ static void __init bootmem_init(void)
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
BOOT_MEM_RESERVED);
- if (min_low_pfn >= max_low_pfn)
- panic("Incorrect memory mapping !!!");
if (min_low_pfn > ARCH_PFN_OFFSET) {
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
@@ -459,6 +469,7 @@ static void __init bootmem_init(void)
ARCH_PFN_OFFSET - min_low_pfn);
}
min_low_pfn = ARCH_PFN_OFFSET;
+#endif
/*
* Determine low and high memory ranges
@@ -1055,3 +1066,26 @@ static int __init debugfs_mips(void)
}
arch_initcall(debugfs_mips);
#endif
+
+#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
+/* User defined DMA coherency from command line. */
+enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+ coherentio = IO_COHERENCE_ENABLED;
+ pr_info("Hardware DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+ coherentio = IO_COHERENCE_DISABLED;
+ pr_info("Software DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+#endif
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 0a9cfe7a0372..109ed163a6a6 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -592,13 +592,15 @@ SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
#endif
#ifdef CONFIG_TRAD_SIGNALS
-asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_sigreturn(void)
{
struct sigframe __user *frame;
+ struct pt_regs *regs;
sigset_t blocked;
int sig;
- frame = (struct sigframe __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct sigframe __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
@@ -606,7 +608,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&blocked);
- sig = restore_sigcontext(&regs, &frame->sf_sc);
+ sig = restore_sigcontext(regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
@@ -618,8 +620,8 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
@@ -627,13 +629,15 @@ badframe:
}
#endif /* CONFIG_TRAD_SIGNALS */
-asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_rt_sigreturn(void)
{
struct rt_sigframe __user *frame;
+ struct pt_regs *regs;
sigset_t set;
int sig;
- frame = (struct rt_sigframe __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
@@ -641,7 +645,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&set);
- sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
@@ -656,8 +660,8 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index b672cebb4a1a..8f65aaf9206d 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -64,13 +64,15 @@ struct rt_sigframe_n32 {
struct ucontextn32 rs_uc;
};
-asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sysn32_rt_sigreturn(void)
{
struct rt_sigframe_n32 __user *frame;
+ struct pt_regs *regs;
sigset_t set;
int sig;
- frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe_n32 __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
@@ -78,7 +80,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&set);
- sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
@@ -93,8 +95,8 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c
index 2b3572fb5f1b..b6e3ddef48a0 100644
--- a/arch/mips/kernel/signal_o32.c
+++ b/arch/mips/kernel/signal_o32.c
@@ -151,13 +151,15 @@ static int setup_frame_32(void *sig_return, struct ksignal *ksig,
return 0;
}
-asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_rt_sigreturn(void)
{
struct rt_sigframe32 __user *frame;
+ struct pt_regs *regs;
sigset_t set;
int sig;
- frame = (struct rt_sigframe32 __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe32 __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
@@ -165,7 +167,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&set);
- sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
+ sig = restore_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
@@ -180,8 +182,8 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
@@ -251,13 +253,15 @@ struct mips_abi mips_abi_32 = {
};
-asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_sigreturn(void)
{
struct sigframe32 __user *frame;
+ struct pt_regs *regs;
sigset_t blocked;
int sig;
- frame = (struct sigframe32 __user *) regs.regs[29];
+ regs = current_pt_regs();
+ frame = (struct sigframe32 __user *)regs->regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
@@ -265,7 +269,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
set_current_blocked(&blocked);
- sig = restore_sigcontext32(&regs, &frame->sf_sc);
+ sig = restore_sigcontext32(regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
@@ -277,8 +281,8 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
+ : /* no outputs */
+ : "r" (regs));
/* Unreached */
badframe:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 8d505a21396e..f8871d5b7eb3 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1221,13 +1221,6 @@ static int enable_restore_fp_context(int msa)
{
int err, was_fpu_owner, prior_msa;
- /*
- * If an FP mode switch is currently underway, wait for it to
- * complete before proceeding.
- */
- wait_var_event(&current->mm->context.fp_mode_switching,
- !atomic_read(&current->mm->context.fp_mode_switching));
-
if (!used_math()) {
/* First time FP context user. */
preempt_disable();
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7cd76f93a438..f7ea8e21656b 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -515,7 +515,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
dvcpu->arch.wait = 0;
if (swq_has_sleeper(&dvcpu->wq))
- swake_up(&dvcpu->wq);
+ swake_up_one(&dvcpu->wq);
return 0;
}
@@ -1204,7 +1204,7 @@ static void kvm_mips_comparecount_func(unsigned long data)
vcpu->arch.wait = 0;
if (swq_has_sleeper(&vcpu->wq))
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
}
/* low level hrtimer wake routine */
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
index 44bccaee822b..c4aa140b7c91 100644
--- a/arch/mips/lantiq/early_printk.c
+++ b/arch/mips/lantiq/early_printk.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <lantiq_soc.h>
+#include <asm/setup.h>
#define ASC_BUF 1024
#define LTQ_ASC_FSTAT ((u32 *)(LTQ_EARLY_ASC + 0x0048))
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 9ff7ccde9de0..d984bd5c2ec5 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -9,7 +9,6 @@
#include <linux/export.h>
#include <linux/clk.h>
#include <linux/bootmem.h>
-#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
@@ -114,10 +113,3 @@ void __init prom_init(void)
panic("failed to register_vsmp_smp_ops()");
#endif
}
-
-int __init plat_of_setup(void)
-{
- return of_platform_default_populate(NULL, NULL, NULL);
-}
-
-arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 805b3a6ab2d6..4b9fbb6744ad 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -130,10 +130,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
unsigned long flags;
ch->desc = 0;
- ch->desc_base = dma_alloc_coherent(NULL,
+ ch->desc_base = dma_zalloc_coherent(NULL,
LTQ_DESC_NUM * LTQ_DESC_SIZE,
&ch->phys, GFP_ATOMIC);
- memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 17e15b50a551..37b8fc5b9ac9 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/lasat/lasat.h>
#include <asm/cpu.h>
+#include <asm/setup.h>
#include "at93c.h"
#include <asm/lasat/eeprom.h>
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 1cc306520a55..3a6f34ef5ffc 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -195,6 +195,7 @@
#endif
#else
PTR_SUBU t0, $0, a2
+ move a2, zero /* No remaining longs */
PTR_ADDIU t0, 1
STORE_BYTE(0)
STORE_BYTE(1)
@@ -231,16 +232,25 @@
#ifdef CONFIG_CPU_MIPSR6
.Lbyte_fixup\@:
- PTR_SUBU a2, $0, t0
+ /*
+ * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
+ * a2 = a2 - t0 + 1
+ */
+ PTR_SUBU a2, t0
jr ra
PTR_ADDIU a2, 1
#endif /* CONFIG_CPU_MIPSR6 */
.Lfirst_fixup\@:
+ /* unset_bytes already in a2 */
jr ra
nop
.Lfwd_fixup\@:
+ /*
+ * unset_bytes = partial_start_addr + #bytes - fault_addr
+ * a2 = t1 + (a2 & 3f) - $28->task->BUADDR
+ */
PTR_L t0, TI_TASK($28)
andi a2, 0x3f
LONG_L t0, THREAD_BUADDR(t0)
@@ -249,6 +259,10 @@
LONG_SUBU a2, t0
.Lpartial_fixup\@:
+ /*
+ * unset_bytes = partial_end_addr + #bytes - fault_addr
+ * a2 = a0 + (a2 & STORMASK) - $28->task->BUADDR
+ */
PTR_L t0, TI_TASK($28)
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
@@ -257,10 +271,15 @@
LONG_SUBU a2, t0
.Llast_fixup\@:
+ /* unset_bytes already in a2 */
jr ra
nop
.Lsmall_fixup\@:
+ /*
+ * unset_bytes = end_addr - current_addr + 1
+ * a2 = t1 - a0 + 1
+ */
PTR_SUBU a2, t1, a0
jr ra
PTR_ADDIU a2, 1
diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform
index ffe01c6d0037..a0dbb3b2f2de 100644
--- a/arch/mips/loongson32/Platform
+++ b/arch/mips/loongson32/Platform
@@ -1,8 +1,4 @@
-cflags-$(CONFIG_CPU_LOONGSON1) += \
- $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
- -Wa,-mips32r2 -Wa,--trap
-
+cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32 -Wa,--trap
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32
-load-$(CONFIG_LOONGSON1_LS1B) += 0xffffffff80100000
-load-$(CONFIG_LOONGSON1_LS1C) += 0xffffffff80100000
+load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80100000
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig
index c79e6a565572..c865b4b9b775 100644
--- a/arch/mips/loongson64/Kconfig
+++ b/arch/mips/loongson64/Kconfig
@@ -91,7 +91,6 @@ config LOONGSON_MACH3X
select LOONGSON_MC146818
select ZONE_DMA32
select LEFI_FIRMWARE_INTERFACE
- select PHYS48_TO_HT40
help
Generic Loongson 3 family machines utilize the 3A/3B revision
of Loongson processor and RS780/SBX00 chipset.
@@ -130,10 +129,6 @@ config LOONGSON_UART_BASE
default y
depends on EARLY_PRINTK || SERIAL_8250
-config PHYS48_TO_HT40
- bool
- default y if CPU_LOONGSON3
-
config LOONGSON_MC146818
bool
default n
diff --git a/arch/mips/loongson64/common/Makefile b/arch/mips/loongson64/common/Makefile
index 8235ac7eac95..57ee03022941 100644
--- a/arch/mips/loongson64/common/Makefile
+++ b/arch/mips/loongson64/common/Makefile
@@ -6,6 +6,7 @@
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
bonito-irq.o mem.o machtype.o platform.o serial.o
obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_CPU_LOONGSON2) += dma.o
#
# Serial port support
@@ -25,8 +26,3 @@ obj-$(CONFIG_CS5536) += cs5536/
#
obj-$(CONFIG_SUSPEND) += pm.o
-
-#
-# Big Memory (SWIOTLB) Support
-#
-obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
index f7c905e50dc4..92dc6bafc127 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
@@ -138,7 +138,7 @@ u32 pci_ohci_read_reg(int reg)
break;
case PCI_OHCI_INT_REG:
_rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
- if ((lo & 0x00000f00) == CS5536_USB_INTR)
+ if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR)
conf_data = 1;
break;
default:
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
deleted file mode 100644
index 6a739f8ae110..000000000000
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/swiotlb.h>
-#include <linux/bootmem.h>
-
-#include <asm/bootinfo.h>
-#include <boot_param.h>
-#include <dma-coherence.h>
-
-static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
-
- mb();
- return ret;
-}
-
-static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
- dir, attrs);
- mb();
- return daddr;
-}
-
-static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs);
- mb();
-
- return r;
-}
-
-static void loongson_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
-{
- swiotlb_sync_single_for_device(dev, dma_handle, size, dir);
- mb();
-}
-
-static void loongson_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- swiotlb_sync_sg_for_device(dev, sg, nents, dir);
- mb();
-}
-
-static int loongson_dma_supported(struct device *dev, u64 mask)
-{
- if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits))
- return 0;
- return swiotlb_dma_supported(dev, mask);
-}
-
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- long nid;
-#ifdef CONFIG_PHYS48_TO_HT40
- /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
- * Loongson-3's 48bit address space and embed it into 40bit */
- nid = (paddr >> 44) & 0x3;
- paddr = ((nid << 44) ^ paddr) | (nid << 37);
-#endif
- return paddr;
-}
-
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- long nid;
-#ifdef CONFIG_PHYS48_TO_HT40
- /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
- * Loongson-3's 48bit address space and embed it into 40bit */
- nid = (daddr >> 37) & 0x3;
- daddr = ((nid << 37) ^ daddr) | (nid << 44);
-#endif
- return daddr;
-}
-
-static const struct dma_map_ops loongson_dma_map_ops = {
- .alloc = loongson_dma_alloc_coherent,
- .free = swiotlb_free,
- .map_page = loongson_dma_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = loongson_dma_map_sg,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = loongson_dma_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = loongson_dma_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = loongson_dma_supported,
-};
-
-void __init plat_swiotlb_setup(void)
-{
- swiotlb_init(1);
- mips_dma_map_ops = &loongson_dma_map_ops;
-}
diff --git a/arch/mips/loongson64/common/dma.c b/arch/mips/loongson64/common/dma.c
new file mode 100644
index 000000000000..48f04126bde2
--- /dev/null
+++ b/arch/mips/loongson64/common/dma.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr | 0x80000000;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
+ if (dma_addr > 0x8fffffff)
+ return dma_addr;
+ return dma_addr & 0x0fffffff;
+#else
+ return dma_addr & 0x7fffffff;
+#endif
+}
diff --git a/arch/mips/loongson64/common/early_printk.c b/arch/mips/loongson64/common/early_printk.c
index 6ca632e529dc..a782e2b24747 100644
--- a/arch/mips/loongson64/common/early_printk.c
+++ b/arch/mips/loongson64/common/early_printk.c
@@ -10,6 +10,7 @@
* option) any later version.
*/
#include <linux/serial_reg.h>
+#include <asm/setup.h>
#include <loongson.h>
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index 1e8a955ae5a8..8f68ee02a8c2 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -198,7 +198,8 @@ void __init prom_init_env(void)
break;
case PRID_REV_LOONGSON3A_R1:
case PRID_REV_LOONGSON3A_R2:
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
cpu_clock_freq = 900000000;
break;
case PRID_REV_LOONGSON3B_R1:
diff --git a/arch/mips/loongson64/loongson-3/Makefile b/arch/mips/loongson64/loongson-3/Makefile
index 44bc1482158b..b5a0c2fa5446 100644
--- a/arch/mips/loongson64/loongson-3/Makefile
+++ b/arch/mips/loongson64/loongson-3/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for Loongson-3 family machines
#
-obj-y += irq.o cop2-ex.o platform.o acpi_init.o
+obj-y += irq.o cop2-ex.o platform.o acpi_init.o dma.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/loongson64/loongson-3/dma.c b/arch/mips/loongson64/loongson-3/dma.c
new file mode 100644
index 000000000000..5e86635f71db
--- /dev/null
+++ b/arch/mips/loongson64/loongson-3/dma.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+#include <linux/init.h>
+#include <linux/swiotlb.h>
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ long nid = (paddr >> 44) & 0x3;
+ return ((nid << 44) ^ paddr) | (nid << 37);
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ long nid = (daddr >> 37) & 0x3;
+ return ((nid << 37) ^ daddr) | (nid << 44);
+}
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(1);
+}
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 8501109bb0f0..fea95d003269 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -682,7 +682,8 @@ void play_dead(void)
(void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
break;
case PRID_REV_LOONGSON3A_R2:
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
play_dead_at_ckseg1 =
(void *)CKSEG1ADDR((unsigned long)loongson3a_r2r3_play_dead);
break;
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index c463bdad45c7..3e5bb203c95a 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -3,7 +3,7 @@
# Makefile for the Linux/MIPS-specific parts of the memory manager.
#
-obj-y += cache.o dma-default.o extable.o fault.o \
+obj-y += cache.o extable.o fault.o \
gup.o init.o mmap.o page.o page-funcs.o \
pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o
@@ -17,6 +17,7 @@ obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o
obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o
obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index e12dfa48b478..a9ef057c79fe 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -830,12 +830,13 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
return __r4k_flush_icache_range(start, end, true);
}
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
@@ -904,7 +906,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
bc_inv(addr, size);
__sync();
}
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
struct flush_cache_sigtramp_args {
struct mm_struct *mm;
@@ -1505,6 +1507,14 @@ static void probe_pcache(void)
if (c->dcache.flags & MIPS_CACHE_PINDEX)
c->dcache.flags &= ~MIPS_CACHE_ALIASES;
+ /*
+ * In systems with CM the icache fills from L2 or closer caches, and
+ * thus sees remote stores without needing to write them back any
+ * further than that.
+ */
+ if (mips_cm_present())
+ c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
+
switch (current_cpu_type()) {
case CPU_20KC:
/*
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 0d3c656feba0..70a523151ff3 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
EXPORT_SYMBOL(flush_icache_all);
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
+#ifdef CONFIG_DMA_NONCOHERENT
/* DMA cache operations. */
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
@@ -65,7 +65,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size);
EXPORT_SYMBOL(_dma_cache_wback_inv);
-#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+#endif /* CONFIG_DMA_NONCOHERENT */
/*
* We could optimize the case where the cache argument is not BCACHE but
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
deleted file mode 100644
index f9fef0028ca2..000000000000
--- a/arch/mips/mm/dma-default.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
- * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
- */
-
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/scatterlist.h>
-#include <linux/string.h>
-#include <linux/gfp.h>
-#include <linux/highmem.h>
-#include <linux/dma-contiguous.h>
-
-#include <asm/cache.h>
-#include <asm/cpu-type.h>
-#include <asm/io.h>
-
-#include <dma-coherence.h>
-
-#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
-/* User defined DMA coherency from command line. */
-enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
-EXPORT_SYMBOL_GPL(coherentio);
-int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
-
-static int __init setcoherentio(char *str)
-{
- coherentio = IO_COHERENCE_ENABLED;
- pr_info("Hardware DMA cache coherency (command line)\n");
- return 0;
-}
-early_param("coherentio", setcoherentio);
-
-static int __init setnocoherentio(char *str)
-{
- coherentio = IO_COHERENCE_DISABLED;
- pr_info("Software DMA cache coherency (command line)\n");
- return 0;
-}
-early_param("nocoherentio", setnocoherentio);
-#endif
-
-static inline struct page *dma_addr_to_page(struct device *dev,
- dma_addr_t dma_addr)
-{
- return pfn_to_page(
- plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
-}
-
-/*
- * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
- * speculatively fill random cachelines with stale data at any time,
- * requiring an extra flush post-DMA.
- *
- * Warning on the terminology - Linux calls an uncached area coherent;
- * MIPS terminology calls memory areas with hardware maintained coherency
- * coherent.
- *
- * Note that the R14000 and R16000 should also be checked for in this
- * condition. However this function is only called on non-I/O-coherent
- * systems and only the R10000 and R12000 are used in such systems, the
- * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
- */
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
-{
- if (plat_device_is_coherent(dev))
- return false;
-
- switch (boot_cpu_type()) {
- case CPU_R10000:
- case CPU_R12000:
- case CPU_BMIPS5000:
- return true;
-
- default:
- /*
- * Presence of MAARs suggests that the CPU supports
- * speculatively prefetching data, and therefore requires
- * the post-DMA flush/invalidate.
- */
- return cpu_has_maar;
- }
-}
-
-static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
-{
- gfp_t dma_flag;
-
-#ifdef CONFIG_ISA
- if (dev == NULL)
- dma_flag = __GFP_DMA;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
- if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
- dma_flag = __GFP_DMA;
- else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
- if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev == NULL ||
- dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
- dma_flag = __GFP_DMA;
- else
-#endif
- dma_flag = 0;
-
- /* Don't invoke OOM killer */
- gfp |= __GFP_NORETRY;
-
- return gfp | dma_flag;
-}
-
-static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret;
- struct page *page = NULL;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- gfp = massage_gfp_flags(dev, gfp);
-
- if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
- page = dma_alloc_from_contiguous(dev, count, get_order(size),
- gfp);
- if (!page)
- page = alloc_pages(gfp, get_order(size));
-
- if (!page)
- return NULL;
-
- ret = page_address(page);
- memset(ret, 0, size);
- *dma_handle = plat_map_dma_mem(dev, ret, size);
- if (!(attrs & DMA_ATTR_NON_CONSISTENT) &&
- !plat_device_is_coherent(dev)) {
- dma_cache_wback_inv((unsigned long) ret, size);
- ret = UNCAC_ADDR(ret);
- }
-
- return ret;
-}
-
-static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- unsigned long addr = (unsigned long) vaddr;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct page *page = NULL;
-
- plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
-
- if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev))
- addr = CAC_ADDR(addr);
-
- page = virt_to_page((void *) addr);
-
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
-}
-
-static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- unsigned long user_count = vma_pages(vma);
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long addr = (unsigned long)cpu_addr;
- unsigned long off = vma->vm_pgoff;
- unsigned long pfn;
- int ret = -ENXIO;
-
- if (!plat_device_is_coherent(dev))
- addr = CAC_ADDR(addr);
-
- pfn = page_to_pfn(virt_to_page((void *)addr));
-
- if (attrs & DMA_ATTR_WRITE_COMBINE)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
- if (off < count && user_count <= (count - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
- }
-
- return ret;
-}
-
-static inline void __dma_sync_virtual(void *addr, size_t size,
- enum dma_data_direction direction)
-{
- switch (direction) {
- case DMA_TO_DEVICE:
- dma_cache_wback((unsigned long)addr, size);
- break;
-
- case DMA_FROM_DEVICE:
- dma_cache_inv((unsigned long)addr, size);
- break;
-
- case DMA_BIDIRECTIONAL:
- dma_cache_wback_inv((unsigned long)addr, size);
- break;
-
- default:
- BUG();
- }
-}
-
-/*
- * A single sg entry may refer to multiple physically contiguous
- * pages. But we still need to process highmem pages individually.
- * If highmem is not configured then the bulk of this loop gets
- * optimized out.
- */
-static inline void __dma_sync(struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction)
-{
- size_t left = size;
-
- do {
- size_t len = left;
-
- if (PageHighMem(page)) {
- void *addr;
-
- if (offset + len > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
- }
- len = PAGE_SIZE - offset;
- }
-
- addr = kmap_atomic(page);
- __dma_sync_virtual(addr + offset, len, direction);
- kunmap_atomic(addr);
- } else
- __dma_sync_virtual(page_address(page) + offset,
- size, direction);
- offset = 0;
- page++;
- left -= len;
- } while (left);
-}
-
-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, unsigned long attrs)
-{
- if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(dma_addr_to_page(dev, dma_addr),
- dma_addr & ~PAGE_MASK, size, direction);
- plat_post_dma_flush(dev);
- plat_unmap_dma_mem(dev, dma_addr, size, direction);
-}
-
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction, unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- if (!plat_device_is_coherent(dev) &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
- sg->offset;
- }
-
- return nents;
-}
-
-static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync(page, offset, size, direction);
-
- return plat_map_dma_mem_page(dev, page) + offset;
-}
-
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nhwentries, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nhwentries, i) {
- if (!plat_device_is_coherent(dev) &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- direction != DMA_TO_DEVICE)
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
- }
-}
-
-static void mips_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (cpu_needs_post_dma_flush(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
- plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
-{
- if (!plat_device_is_coherent(dev))
- __dma_sync(dma_addr_to_page(dev, dma_handle),
- dma_handle & ~PAGE_MASK, size, direction);
-}
-
-static void mips_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- if (cpu_needs_post_dma_flush(dev)) {
- for_each_sg(sglist, sg, nelems, i) {
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
- }
- plat_post_dma_flush(dev);
-}
-
-static void mips_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- if (!plat_device_is_coherent(dev)) {
- for_each_sg(sglist, sg, nelems, i) {
- __dma_sync(sg_page(sg), sg->offset, sg->length,
- direction);
- }
- }
-}
-
-static int mips_dma_supported(struct device *dev, u64 mask)
-{
- return plat_dma_supported(dev, mask);
-}
-
-static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- if (!plat_device_is_coherent(dev))
- __dma_sync_virtual(vaddr, size, direction);
-}
-
-static const struct dma_map_ops mips_default_dma_map_ops = {
- .alloc = mips_dma_alloc_coherent,
- .free = mips_dma_free_coherent,
- .mmap = mips_dma_mmap,
- .map_page = mips_dma_map_page,
- .unmap_page = mips_dma_unmap_page,
- .map_sg = mips_dma_map_sg,
- .unmap_sg = mips_dma_unmap_sg,
- .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
- .sync_single_for_device = mips_dma_sync_single_for_device,
- .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
- .sync_sg_for_device = mips_dma_sync_sg_for_device,
- .dma_supported = mips_dma_supported,
- .cache_sync = mips_dma_cache_sync,
-};
-
-const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
-EXPORT_SYMBOL(mips_dma_map_ops);
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
new file mode 100644
index 000000000000..2aca1236af36
--- /dev/null
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ */
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/dma-contiguous.h>
+#include <linux/highmem.h>
+
+#include <asm/cache.h>
+#include <asm/cpu-type.h>
+#include <asm/dma-coherence.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+static inline int dev_is_coherent(struct device *dev)
+{
+ return dev->archdata.dma_coherent;
+}
+#else
+static inline int dev_is_coherent(struct device *dev)
+{
+ switch (coherentio) {
+ default:
+ case IO_COHERENCE_DEFAULT:
+ return hw_coherentio;
+ case IO_COHERENCE_ENABLED:
+ return 1;
+ case IO_COHERENCE_DISABLED:
+ return 0;
+ }
+}
+#endif /* CONFIG_DMA_PERDEV_COHERENT */
+
+/*
+ * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
+ * fill random cachelines with stale data at any time, requiring an extra
+ * flush post-DMA.
+ *
+ * Warning on the terminology - Linux calls an uncached area coherent; MIPS
+ * terminology calls memory areas with hardware maintained coherency coherent.
+ *
+ * Note that the R14000 and R16000 should also be checked for in this condition.
+ * However this function is only called on non-I/O-coherent systems and only the
+ * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
+ * SGI IP32 aka O2.
+ */
+static inline bool cpu_needs_post_dma_flush(struct device *dev)
+{
+ if (dev_is_coherent(dev))
+ return false;
+
+ switch (boot_cpu_type()) {
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_BMIPS5000:
+ return true;
+ default:
+ /*
+ * Presence of MAARs suggests that the CPU supports
+ * speculatively prefetching data, and therefore requires
+ * the post-DMA flush/invalidate.
+ */
+ return cpu_has_maar;
+ }
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ void *ret;
+
+ ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!ret)
+ return NULL;
+
+ if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ dma_cache_wback_inv((unsigned long) ret, size);
+ ret = (void *)UNCAC_ADDR(ret);
+ }
+
+ return ret;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev))
+ cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
+ dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
+}
+
+int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long user_count = vma_pages(vma);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long addr = (unsigned long)cpu_addr;
+ unsigned long off = vma->vm_pgoff;
+ unsigned long pfn;
+ int ret = -ENXIO;
+
+ if (!dev_is_coherent(dev))
+ addr = CAC_ADDR(addr);
+
+ pfn = page_to_pfn(virt_to_page((void *)addr));
+
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
+static inline void dma_sync_virt(void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ dma_cache_wback((unsigned long)addr, size);
+ break;
+
+ case DMA_FROM_DEVICE:
+ dma_cache_inv((unsigned long)addr, size);
+ break;
+
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv((unsigned long)addr, size);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/*
+ * A single sg entry may refer to multiple physically contiguous pages. But
+ * we still need to process highmem pages individually. If highmem is not
+ * configured then the bulk of this loop gets optimized out.
+ */
+static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
+ unsigned long offset = paddr & ~PAGE_MASK;
+ size_t left = size;
+
+ do {
+ size_t len = left;
+
+ if (PageHighMem(page)) {
+ void *addr;
+
+ if (offset + len > PAGE_SIZE) {
+ if (offset >= PAGE_SIZE) {
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
+ }
+ len = PAGE_SIZE - offset;
+ }
+
+ addr = kmap_atomic(page);
+ dma_sync_virt(addr + offset, len, dir);
+ kunmap_atomic(addr);
+ } else
+ dma_sync_virt(page_address(page) + offset, size, dir);
+ offset = 0;
+ page++;
+ left -= len;
+ } while (left);
+}
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (!dev_is_coherent(dev))
+ dma_sync_phys(paddr, size, dir);
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (cpu_needs_post_dma_flush(dev))
+ dma_sync_phys(paddr, size, dir);
+}
+
+void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (!dev_is_coherent(dev))
+ dma_sync_virt(vaddr, size, direction);
+}
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index d5d02993aa21..56e4f8bffd4c 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -623,21 +623,6 @@ struct dmadscr {
u64 pad_b;
} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
-void sb1_dma_init(void)
-{
- int i;
-
- for (i = 0; i < DM_NUM_CHANNELS; i++) {
- const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
- V_DM_DSCR_BASE_RINGSZ(1);
- void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
-
- __raw_writeq(base_val, base_reg);
- __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
- __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
- }
-}
-
void clear_page(void *page)
{
u64 to_phys = CPHYSADDR((unsigned long)page);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 79b9f2ad3ff5..49312a14cd17 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1509,7 +1509,7 @@ static void setup_pw(void)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
write_c0_pwctl(1 << 6 | psn);
#endif
- write_c0_kpgd(swapper_pg_dir);
+ write_c0_kpgd((long)swapper_pg_dir);
kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 9bb6baa45da3..24e5b0d06899 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -19,7 +19,6 @@
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
-#define UASM_ISA _UASM_ISA_MICROMIPS
#include <asm/uasm.h>
#define RS_MASK 0x1f
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 9fea6c6bbf49..60ceb93c71a0 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -19,7 +19,6 @@
#include <asm/inst.h>
#include <asm/elf.h>
#include <asm/bugs.h>
-#define UASM_ISA _UASM_ISA_CLASSIC
#include <asm/uasm.h>
#define RS_MASK 0x1f
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 63940bdce698..17c7fd471a27 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -13,11 +13,9 @@ obj-y += malta-init.o
obj-y += malta-int.o
obj-y += malta-memory.o
obj-y += malta-platform.o
-obj-y += malta-reset.o
obj-y += malta-setup.o
obj-y += malta-time.o
obj-$(CONFIG_MIPS_CMP) += malta-amon.o
-obj-$(CONFIG_MIPS_MALTA_PM) += malta-pm.o
CFLAGS_malta-dtshim.o = -I$(src)/../../../scripts/dtc/libfdt
diff --git a/arch/mips/mti-malta/malta-pm.c b/arch/mips/mti-malta/malta-pm.c
deleted file mode 100644
index efbd659fb602..000000000000
--- a/arch/mips/mti-malta/malta-pm.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2014 Imagination Technologies
- * Author: Paul Burton <paul.burton@mips.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/pci.h>
-
-#include <asm/mach-malta/malta-pm.h>
-
-static struct pci_bus *pm_pci_bus;
-static resource_size_t pm_io_offset;
-
-int mips_pm_suspend(unsigned state)
-{
- int spec_devid;
- u16 sts;
-
- if (!pm_pci_bus || !pm_io_offset)
- return -ENODEV;
-
- /* Ensure the power button status is clear */
- while (1) {
- sts = inw(pm_io_offset + PIIX4_FUNC3IO_PMSTS);
- if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
- break;
- outw(sts, pm_io_offset + PIIX4_FUNC3IO_PMSTS);
- }
-
- /* Enable entry to suspend */
- outw(state | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
- pm_io_offset + PIIX4_FUNC3IO_PMCNTRL);
-
- /* If the special cycle occurs too soon this doesn't work... */
- mdelay(10);
-
- /*
- * The PIIX4 will enter the suspend state only after seeing a special
- * cycle with the correct magic data on the PCI bus. Generate that
- * cycle now.
- */
- spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
- pci_bus_write_config_dword(pm_pci_bus, spec_devid, 0,
- PIIX4_SUSPEND_MAGIC);
-
- /* Give the system some time to power down */
- mdelay(1000);
-
- return 0;
-}
-
-static int __init malta_pm_setup(void)
-{
- struct pci_dev *dev;
- int res, io_region = PCI_BRIDGE_RESOURCES;
-
- /* Find a reference to the PCI bus */
- pm_pci_bus = pci_find_next_bus(NULL);
- if (!pm_pci_bus) {
- pr_warn("malta-pm: failed to find reference to PCI bus\n");
- return -ENODEV;
- }
-
- /* Find the PIIX4 PM device */
- dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
- PCI_ANY_ID, NULL);
- if (!dev) {
- pr_warn("malta-pm: failed to find PIIX4 PM\n");
- return -ENODEV;
- }
-
- /* Request access to the PIIX4 PM IO registers */
- res = pci_request_region(dev, io_region, "PIIX4 PM IO registers");
- if (res) {
- pr_warn("malta-pm: failed to request PM IO registers (%d)\n",
- res);
- pci_dev_put(dev);
- return -ENODEV;
- }
-
- /* Find the offset to the PIIX4 PM IO registers */
- pm_io_offset = pci_resource_start(dev, io_region);
-
- pci_dev_put(dev);
- return 0;
-}
-
-late_initcall(malta_pm_setup);
diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c
deleted file mode 100644
index dd6f62ad4417..000000000000
--- a/arch/mips/mti-malta/malta-reset.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/io.h>
-#include <linux/pm.h>
-#include <linux/reboot.h>
-
-#include <asm/reboot.h>
-#include <asm/mach-malta/malta-pm.h>
-
-static void mips_machine_power_off(void)
-{
- mips_pm_suspend(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF);
-
- pr_info("Failed to power down, resetting\n");
- machine_restart(NULL);
-}
-
-static int __init mips_reboot_setup(void)
-{
- pm_power_off = mips_machine_power_off;
-
- return 0;
-}
-arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 7b63914d2e58..5d4c5e5fbd69 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -26,6 +26,7 @@
#include <linux/screen_info.h>
#include <linux/time.h>
+#include <asm/dma-coherence.h>
#include <asm/fw/fw.h>
#include <asm/mach-malta/malta-dtshim.h>
#include <asm/mips-cps.h>
@@ -144,12 +145,6 @@ static int __init plat_enable_iocoherency(void)
static void __init plat_setup_iocoherency(void)
{
-#ifdef CONFIG_DMA_NONCOHERENT
- /*
- * Kernel has been configured with software coherency
- * but we might choose to turn it off and use hardware
- * coherency instead.
- */
if (plat_enable_iocoherency()) {
if (coherentio == IO_COHERENCE_DISABLED)
pr_info("Hardware DMA cache coherency disabled\n");
@@ -161,10 +156,6 @@ static void __init plat_setup_iocoherency(void)
else
pr_info("Software DMA cache coherency enabled\n");
}
-#else
- if (!plat_enable_iocoherency())
- panic("Hardware DMA cache coherency not supported!");
-#endif
}
static void __init pci_clock_check(void)
@@ -226,29 +217,6 @@ static void __init bonito_quirks_setup(void)
pr_info("Enabled Bonito debug mode\n");
} else
BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
-
-#ifdef CONFIG_DMA_COHERENT
- if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
- BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
- pr_info("Enabled Bonito CPU coherency\n");
-
- argptr = fw_getcmdline();
- if (strstr(argptr, "iobcuncached")) {
- BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
- BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
- ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
- BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
- pr_info("Disabled Bonito IOBC coherency\n");
- } else {
- BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
- BONITO_PCIMEMBASECFG |=
- (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
- BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
- pr_info("Enabled Bonito IOBC coherency\n");
- }
- } else
- panic("Hardware DMA cache coherency not supported");
-#endif
}
void __init *plat_get_fdt(void)
@@ -279,11 +247,6 @@ void __init plat_mem_setup(void)
*/
enable_dma(4);
-#ifdef CONFIG_DMA_COHERENT
- if (mips_revision_sconid != MIPS_REVISION_SCON_BONITO)
- panic("Hardware DMA cache coherency not supported");
-#endif
-
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
bonito_quirks_setup();
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
index 769f93032c53..8f5bc1597550 100644
--- a/arch/mips/netlogic/common/earlycons.c
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -36,6 +36,7 @@
#include <linux/serial_reg.h>
#include <asm/mipsregs.h>
+#include <asm/setup.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/common.h>
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
index 856a6e6d296e..b5ba83f4c646 100644
--- a/arch/mips/netlogic/xlp/dt.c
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -93,17 +93,3 @@ void __init device_tree_init(void)
{
unflatten_and_copy_device_tree();
}
-
-static struct of_device_id __initdata xlp_ids[] = {
- { .compatible = "simple-bus", },
- {},
-};
-
-int __init xlp8xx_ds_publish_devices(void)
-{
- if (!of_have_populated_dt())
- return 0;
- return of_platform_bus_probe(NULL, xlp_ids, NULL);
-}
-
-device_initcall(xlp8xx_ds_publish_devices);
diff --git a/arch/mips/paravirt/serial.c b/arch/mips/paravirt/serial.c
index 02b665c02272..a37b6f9f0ede 100644
--- a/arch/mips/paravirt/serial.c
+++ b/arch/mips/paravirt/serial.c
@@ -9,16 +9,15 @@
#include <linux/kernel.h>
#include <linux/virtio_console.h>
#include <linux/kvm_para.h>
+#include <asm/setup.h>
/*
* Emit one character to the boot console.
*/
-int prom_putchar(char c)
+void prom_putchar(char c)
{
kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, 0 /* port 0 */,
(unsigned long)&c, 1 /* len == 1 */);
-
- return 1;
}
#ifdef CONFIG_VIRTIO_CONSOLE
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
index 57e1463fcd02..a1d2c4ae0d1b 100644
--- a/arch/mips/pci/ops-bridge.c
+++ b/arch/mips/pci/ops-bridge.c
@@ -167,7 +167,7 @@ oh_my_gawd:
static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * value)
{
- if (bus->number > 0)
+ if (!pci_is_root_bus(bus))
return pci_conf1_read_config(bus, devfn, where, size, value);
return pci_conf0_read_config(bus, devfn, where, size, value);
@@ -310,7 +310,7 @@ oh_my_gawd:
static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
- if (bus->number > 0)
+ if (!pci_is_root_bus(bus))
return pci_conf1_write_config(bus, devfn, where, size, value);
return pci_conf0_write_config(bus, devfn, where, size, value);
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index b4fa6413c4e5..c539d0d2b0cf 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -149,6 +149,13 @@
#define AR2315_PCI_HOST_SLOT 3
#define AR2315_PCI_HOST_DEVID ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS)
+/*
+ * We need some arbitrary non-zero value to be programmed to the BAR1 register
+ * of PCI host controller to enable DMA. The same value should be used as the
+ * offset to calculate the physical address of DMA buffer for PCI devices.
+ */
+#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
+
/* ??? access BAR */
#define AR2315_PCI_HOST_MBAR0 0x10000000
/* RAM access BAR */
@@ -167,6 +174,23 @@ struct ar2315_pci_ctrl {
struct resource io_res;
};
+static inline dma_addr_t ar2315_dev_offset(struct device *dev)
+{
+ if (dev && dev_is_pci(dev))
+ return AR2315_PCI_HOST_SDRAM_BASEADDR;
+ return 0;
+}
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + ar2315_dev_offset(dev);
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr - ar2315_dev_offset(dev);
+}
+
static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus)
{
struct pci_controller *hose = bus->sysdata;
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 1e23c8d587bd..64b58cc48a91 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -12,14 +12,18 @@
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
+#define AR724X_PCI_REG_APP 0x00
#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
#define AR724X_PCI_REG_INT_MASK 0x50
+#define AR724X_PCI_APP_LTSSM_ENABLE BIT(0)
+
#define AR724X_PCI_RESET_LINK_UP BIT(0)
#define AR724X_PCI_INT_DEV0 BIT(14)
@@ -325,6 +329,37 @@ static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
apc);
}
+static void ar724x_pci_hw_init(struct ar724x_pci_controller *apc)
+{
+ u32 ppl, app;
+ int wait = 0;
+
+ /* deassert PCIe host controller and PCIe PHY reset */
+ ath79_device_reset_clear(AR724X_RESET_PCIE);
+ ath79_device_reset_clear(AR724X_RESET_PCIE_PHY);
+
+ /* remove the reset of the PCIE PLL */
+ ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+ ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET;
+ ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+ /* deassert bypass for the PCIE PLL */
+ ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+ ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS;
+ ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+ /* set PCIE Application Control to ready */
+ app = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_APP);
+ app |= AR724X_PCI_APP_LTSSM_ENABLE;
+ __raw_writel(app, apc->ctrl_base + AR724X_PCI_REG_APP);
+
+ /* wait up to 100ms for PHY link up */
+ do {
+ mdelay(10);
+ wait++;
+ } while (wait < 10 && !ar724x_pci_check_link(apc));
+}
+
static int ar724x_pci_probe(struct platform_device *pdev)
{
struct ar724x_pci_controller *apc;
@@ -383,6 +418,13 @@ static int ar724x_pci_probe(struct platform_device *pdev)
apc->pci_controller.io_resource = &apc->io_res;
apc->pci_controller.mem_resource = &apc->mem_res;
+ /*
+ * Do the full PCIE Root Complex Initialization Sequence if the PCIe
+ * host controller is in reset.
+ */
+ if (ath79_reset_rr(AR724X_RESET_REG_RESET_MODULE) & AR724X_RESET_PCIE)
+ ar724x_pci_hw_init(apc);
+
apc->link_up = ar724x_pci_check_link(apc);
if (!apc->link_up)
dev_warn(&pdev->dev, "PCIe link is down\n");
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 0f09eafa5e3a..c94a66070a60 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/smp.h>
+#include <linux/dma-direct.h>
#include <asm/sn/arch.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
@@ -182,6 +183,19 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
return 0;
}
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
+
+ return bc->baddr + paddr;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr & ~(0xffUL << 56);
+}
+
/*
* Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
* to find the slot number in sense of the bridge device register.
@@ -200,17 +214,6 @@ static inline void pci_disable_swapping(struct pci_dev *dev)
bridge->b_widget.w_tflush; /* Flush */
}
-static inline void pci_enable_swapping(struct pci_dev *dev)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
- bridge_t *bridge = bc->base;
- int slot = PCI_SLOT(dev->devfn);
-
- /* Turn on byte swapping */
- bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
- bridge->b_widget.w_tflush; /* Flush */
-}
-
static void pci_fixup_ioc3(struct pci_dev *d)
{
pci_disable_swapping(d);
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 3e92a06fa772..5017d5843c5a 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -21,8 +21,6 @@
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/pci-octeon.h>
-#include <dma-coherence.h>
-
#define USE_OCTEON_INTERNAL_ARBITER
/*
@@ -166,8 +164,6 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
}
- dev->dev.dma_ops = octeon_pci_dma_map_ops;
-
return 0;
}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 87ba86bd8696..d919a0d813a1 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -94,8 +94,6 @@ union cvmx_pcie_address {
static int cvmx_pcie_rc_initialize(int pcie_port);
-#include <dma-coherence.h>
-
/**
* Return the Core virtual base address for PCIe IO access. IOs are
* read/written as an offset from this address.
@@ -1239,14 +1237,14 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
if (pcie_port) {
- union cvmx_ciu_qlm1 ciu_qlm;
+ union cvmx_ciu_qlm ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
ciu_qlm.s.txmargin = 0x17;
cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
} else {
- union cvmx_ciu_qlm0 ciu_qlm;
+ union cvmx_ciu_qlm ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c
index d7b783463fac..8ed4961b1271 100644
--- a/arch/mips/pic32/pic32mzda/early_console.c
+++ b/arch/mips/pic32/pic32mzda/early_console.c
@@ -13,6 +13,7 @@
*/
#include <asm/mach-pic32/pic32.h>
#include <asm/fw/fw.h>
+#include <asm/setup.h>
#include "pic32mzda.h"
#include "early_pin.h"
@@ -157,7 +158,7 @@ void __init fw_init_early_console(char port)
setup_early_console(port, baud);
}
-int prom_putchar(char c)
+void prom_putchar(char c)
{
if (console_port >= 0) {
while (__raw_readl(
@@ -166,6 +167,4 @@ int prom_putchar(char c)
__raw_writel(c, uart_base + U_TXR(console_port));
}
-
- return 1;
}
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
index 3c59ffe5f5f5..ecd30ddfb3db 100644
--- a/arch/mips/ralink/early_printk.c
+++ b/arch/mips/ralink/early_printk.c
@@ -10,6 +10,7 @@
#include <linux/serial_reg.h>
#include <asm/addrspace.h>
+#include <asm/setup.h>
#ifdef CONFIG_SOC_RT288X
#define EARLY_UART_BASE 0x300c00
@@ -68,7 +69,7 @@ static void find_uart_base(void)
}
}
-void prom_putchar(unsigned char ch)
+void prom_putchar(char ch)
{
if (!init_complete) {
find_uart_base();
@@ -76,13 +77,13 @@ void prom_putchar(unsigned char ch)
}
if (IS_ENABLED(CONFIG_SOC_MT7621) || soc_is_mt7628()) {
- uart_w32(ch, UART_TX);
+ uart_w32((unsigned char)ch, UART_TX);
while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
;
} else {
while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
;
- uart_w32(ch, UART_REG_TX);
+ uart_w32((unsigned char)ch, UART_REG_TX);
while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
;
}
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
index 45fdfbcbd4c6..6bdb48d41276 100644
--- a/arch/mips/sgi-ip27/ip27-console.c
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -7,6 +7,7 @@
*/
#include <asm/page.h>
+#include <asm/setup.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn0/hub.h>
#include <asm/sn/klconfig.h>
diff --git a/arch/mips/sgi-ip32/Makefile b/arch/mips/sgi-ip32/Makefile
index 60f0227425e7..4745cd94df11 100644
--- a/arch/mips/sgi-ip32/Makefile
+++ b/arch/mips/sgi-ip32/Makefile
@@ -4,4 +4,4 @@
#
obj-y += ip32-berr.o ip32-irq.o ip32-platform.o ip32-setup.o ip32-reset.o \
- crime.o ip32-memory.o
+ crime.o ip32-memory.o ip32-dma.o
diff --git a/arch/mips/sgi-ip32/ip32-dma.c b/arch/mips/sgi-ip32/ip32-dma.c
new file mode 100644
index 000000000000..fa7b17cb5385
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-dma.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/dma-direct.h>
+#include <asm/ip32/crime.h>
+
+/*
+ * Few notes.
+ * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
+ * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
+ * native-endian)
+ * 3. All other devices see memory as one big chunk at 0x40000000
+ * 4. Non-PCI devices will pass NULL as struct device*
+ *
+ * Thus we translate differently, depending on device.
+ */
+
+#define RAM_OFFSET_MASK 0x3fffffffUL
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ dma_addr_t dma_addr = paddr & RAM_OFFSET_MASK;
+
+ if (!dev)
+ dma_addr += CRIME_HI_MEM_BASE;
+ return dma_addr;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ phys_addr_t paddr = dma_addr & RAM_OFFSET_MASK;
+
+ if (dma_addr >= 256*1024*1024)
+ paddr += CRIME_HI_MEM_BASE;
+ return paddr;
+}
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index f4dbce25bc6a..7ec278d72096 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -70,7 +70,6 @@ config SIBYTE_BCM1x55
config SIBYTE_SB1xxx_SOC
bool
- select DMA_COHERENT
select IRQ_MIPS_CPU
select SWAP_IO_SPACE
select SYS_SUPPORTS_32BIT_KERNEL
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 115399202eab..092fb2a6ec4a 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -27,6 +27,7 @@
#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/setup.h>
#include <asm/sibyte/board.h>
#include <asm/smp-ops.h>
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 1791a44ee570..f6d9182ef82a 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/txx9/ndfmc.h>
#include <linux/serial_core.h>
#include <linux/mtd/physmap.h>
#include <linux/leds.h>
@@ -32,10 +33,10 @@
#include <asm/reboot.h>
#include <asm/r4kcache.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/pci.h>
#include <asm/txx9tmr.h>
-#include <asm/txx9/ndfmc.h>
#include <asm/txx9/dmac.h>
#ifdef CONFIG_CPU_TX49XX
#include <asm/txx9/tx4938.h>
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
index 85d1795652da..17395d5d15ca 100644
--- a/arch/mips/txx9/generic/setup_tx4938.c
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -17,13 +17,13 @@
#include <linux/ptrace.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/txx9/ndfmc.h>
#include <asm/reboot.h>
#include <asm/traps.h>
#include <asm/txx9irq.h>
#include <asm/txx9tmr.h>
#include <asm/txx9pio.h>
#include <asm/txx9/generic.h>
-#include <asm/txx9/ndfmc.h>
#include <asm/txx9/dmac.h>
#include <asm/txx9/tx4938.h>
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index 274928987a21..360c388f4c82 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -21,13 +21,13 @@
#include <linux/ptrace.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/txx9/ndfmc.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/traps.h>
#include <asm/txx9irq.h>
#include <asm/txx9tmr.h>
#include <asm/txx9/generic.h>
-#include <asm/txx9/ndfmc.h>
#include <asm/txx9/dmac.h>
#include <asm/txx9/tx4939.h>
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index ce196046ac3e..34605ca21498 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -7,7 +7,13 @@ ccflags-vdso := \
$(filter -I%,$(KBUILD_CFLAGS)) \
$(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
- $(filter -march=%,$(KBUILD_CFLAGS))
+ $(filter -march=%,$(KBUILD_CFLAGS)) \
+ -D__VDSO__
+
+ifeq ($(cc-name),clang)
+ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
+endif
+
cflags-vdso := $(ccflags-vdso) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
-O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
@@ -38,6 +44,7 @@ endif
# VDSO linker flags.
VDSO_LDFLAGS := \
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
+ $(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \
-nostdlib -shared \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
$(call cc-ldoption, -Wl$(comma)--build-id)
diff --git a/arch/mips/vdso/genvdso.h b/arch/mips/vdso/genvdso.h
index 94334727059a..611b06f01a3c 100644
--- a/arch/mips/vdso/genvdso.h
+++ b/arch/mips/vdso/genvdso.h
@@ -15,8 +15,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
ELF(Shdr) *shdr;
char *shstrtab, *name;
uint16_t sh_count, sh_entsize, i;
- unsigned int local_gotno, symtabno, gotsym;
- ELF(Dyn) *dyn = NULL;
shdrs = vdso + FUNC(swap_uint)(ehdr->e_shoff);
sh_count = swap_uint16(ehdr->e_shnum);
@@ -41,9 +39,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
"%s: '%s' contains relocation sections\n",
program_name, path);
return false;
- case SHT_DYNAMIC:
- dyn = vdso + FUNC(swap_uint)(shdr->sh_offset);
- break;
}
/* Check for existing sections. */
@@ -61,52 +56,6 @@ static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
}
}
- /*
- * Ensure the GOT has no entries other than the standard 2, for the same
- * reason we check that there's no relocation sections above.
- * The standard two entries are:
- * - Lazy resolver
- * - Module pointer
- */
- if (dyn) {
- local_gotno = symtabno = gotsym = 0;
-
- while (FUNC(swap_uint)(dyn->d_tag) != DT_NULL) {
- switch (FUNC(swap_uint)(dyn->d_tag)) {
- /*
- * This member holds the number of local GOT entries.
- */
- case DT_MIPS_LOCAL_GOTNO:
- local_gotno = FUNC(swap_uint)(dyn->d_un.d_val);
- break;
- /*
- * This member holds the number of entries in the
- * .dynsym section.
- */
- case DT_MIPS_SYMTABNO:
- symtabno = FUNC(swap_uint)(dyn->d_un.d_val);
- break;
- /*
- * This member holds the index of the first dynamic
- * symbol table entry that corresponds to an entry in
- * the GOT.
- */
- case DT_MIPS_GOTSYM:
- gotsym = FUNC(swap_uint)(dyn->d_un.d_val);
- break;
- }
-
- dyn++;
- }
-
- if (local_gotno > 2 || symtabno - gotsym) {
- fprintf(stderr,
- "%s: '%s' contains unexpected GOT entries\n",
- program_name, path);
- return false;
- }
- }
-
return true;
}
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index 39a0db3e2b34..16e684b59875 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -17,6 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -46,7 +47,7 @@ static void __iomem *pmu_base;
#define pmu_read(offset) readw(pmu_base + (offset))
#define pmu_write(offset, value) writew((value), pmu_base + (offset))
-static void vr41xx_cpu_wait(void)
+static void __cpuidle vr41xx_cpu_wait(void)
{
local_irq_disable();
if (!need_resched())
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 34f7222c5efe..1d4248fa55e9 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -71,8 +71,6 @@ config FIX_EARLYCON_MEM
config PGTABLE_LEVELS
default 2
-source "init/Kconfig"
-
menu "System Type"
source "arch/nds32/Kconfig.cpu"
config NR_CPUS
@@ -90,24 +88,5 @@ config NDS32_BUILTIN_DTB
endmenu
menu "Kernel Features"
-source "kernel/Kconfig.preempt"
-source "kernel/Kconfig.freezer"
-source "mm/Kconfig"
source "kernel/Kconfig.hz"
endmenu
-
-menu "Executable file formats"
-source "fs/Kconfig.binfmt"
-endmenu
-
-source "net/Kconfig"
-source "drivers/Kconfig"
-source "fs/Kconfig"
-
-menu "Kernel hacking"
-source "lib/Kconfig.debug"
-endmenu
-
-source "security/Kconfig"
-source "crypto/Kconfig"
-source "lib/Kconfig"
diff --git a/arch/nds32/Kconfig.debug b/arch/nds32/Kconfig.debug
new file mode 100644
index 000000000000..22a162cd99e8
--- /dev/null
+++ b/arch/nds32/Kconfig.debug
@@ -0,0 +1 @@
+# dummy file, do not delete
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 3d4ec88f1db1..f4ad1138e6b9 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config NIOS2
def_bool y
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_NO_SWAP
+ select DMA_NONCOHERENT_OPS
select TIMER_OF
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
@@ -38,27 +42,16 @@ config HAS_DMA
config FPU
def_bool n
-config SWAP
- def_bool n
-
config RWSEM_GENERIC_SPINLOCK
def_bool y
config TRACE_IRQFLAGS_SUPPORT
def_bool n
-source "init/Kconfig"
-
menu "Kernel features"
-source "kernel/Kconfig.preempt"
-
-source "kernel/Kconfig.freezer"
-
source "kernel/Kconfig.hz"
-source "mm/Kconfig"
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
range 9 20
@@ -195,23 +188,3 @@ config NIOS2_IO_REGION_BASE
default "0xe0000000"
endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/nios2/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
index edfeef049a51..7a49f0d28d14 100644
--- a/arch/nios2/Kconfig.debug
+++ b/arch/nios2/Kconfig.debug
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-source "lib/Kconfig.debug"
-
config DEBUG_STACK_USAGE
bool "Enable stack utilization instrumentation"
depends on DEBUG_KERNEL
@@ -24,5 +21,3 @@ config EARLY_PRINTK
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized.
You should normally say N here, unless you want to debug such a crash.
-
-endmenu
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 64ed3d656956..8fde4fa2c34f 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += current.h
generic-y += device.h
generic-y += div64.h
generic-y += dma.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h
deleted file mode 100644
index 6ceb92251da0..000000000000
--- a/arch/nios2/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
- * Copyright (C) 2009 Wind River Systems Inc
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of this
- * archive for more details.
- */
-
-#ifndef _ASM_NIOS2_DMA_MAPPING_H
-#define _ASM_NIOS2_DMA_MAPPING_H
-
-extern const struct dma_map_ops nios2_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &nios2_dma_ops;
-}
-
-#endif /* _ASM_NIOS2_DMA_MAPPING_H */
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index 4be815519dd4..4af9e5b5ba1c 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -12,18 +12,18 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/export.h>
#include <linux/string.h>
-#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/cache.h>
#include <asm/cacheflush.h>
-static inline void __dma_sync_for_device(void *vaddr, size_t size,
- enum dma_data_direction direction)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- switch (direction) {
+ void *vaddr = phys_to_virt(paddr);
+
+ switch (dir) {
case DMA_FROM_DEVICE:
invalidate_dcache_range((unsigned long)vaddr,
(unsigned long)(vaddr + size));
@@ -42,10 +42,12 @@ static inline void __dma_sync_for_device(void *vaddr, size_t size,
}
}
-static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
- enum dma_data_direction direction)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- switch (direction) {
+ void *vaddr = phys_to_virt(paddr);
+
+ switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_FROM_DEVICE:
invalidate_dcache_range((unsigned long)vaddr,
@@ -58,8 +60,8 @@ static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
}
}
-static void *nios2_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -80,125 +82,10 @@ static void *nios2_dma_alloc(struct device *dev, size_t size,
return ret;
}
-static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
free_pages(addr, get_order(size));
}
-
-static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int i;
-
- for_each_sg(sg, sg, nents, i) {
- void *addr = sg_virt(sg);
-
- if (!addr)
- continue;
-
- sg->dma_address = sg_phys(sg);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- __dma_sync_for_device(addr, sg->length, direction);
- }
-
- return nents;
-}
-
-static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- void *addr = page_address(page) + offset;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync_for_device(addr, size, direction);
-
- return page_to_phys(page) + offset;
-}
-
-static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
-}
-
-static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction,
- unsigned long attrs)
-{
- void *addr;
- int i;
-
- if (direction == DMA_TO_DEVICE)
- return;
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
-
- for_each_sg(sg, sg, nhwentries, i) {
- addr = sg_virt(sg);
- if (addr)
- __dma_sync_for_cpu(addr, sg->length, direction);
- }
-}
-
-static void nios2_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
-}
-
-static void nios2_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
-}
-
-static void nios2_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- int i;
-
- /* Make sure that gcc doesn't leave the empty loop body. */
- for_each_sg(sg, sg, nelems, i)
- __dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
-}
-
-static void nios2_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- int i;
-
- /* Make sure that gcc doesn't leave the empty loop body. */
- for_each_sg(sg, sg, nelems, i)
- __dma_sync_for_device(sg_virt(sg), sg->length, direction);
-
-}
-
-const struct dma_map_ops nios2_dma_ops = {
- .alloc = nios2_dma_alloc,
- .free = nios2_dma_free,
- .map_page = nios2_dma_map_page,
- .unmap_page = nios2_dma_unmap_page,
- .map_sg = nios2_dma_map_sg,
- .unmap_sg = nios2_dma_unmap_sg,
- .sync_single_for_device = nios2_dma_sync_single_for_device,
- .sync_single_for_cpu = nios2_dma_sync_single_for_cpu,
- .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu,
- .sync_sg_for_device = nios2_dma_sync_sg_for_device,
-};
-EXPORT_SYMBOL(nios2_dma_ops);
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 9ecad05bfc73..ed5f32d8fbd8 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -27,7 +27,6 @@ config OPENRISC
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
select MODULES_USE_ELF_RELA
- select MULTI_IRQ_HANDLER
select HAVE_DEBUG_STACKOVERFLOW
select OR1K_PIC
select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
@@ -36,6 +35,7 @@ config OPENRISC
select ARCH_USE_QUEUED_RWLOCKS
select OMPIC if SMP
select ARCH_WANT_FRAME_POINTERS
+ select GENERIC_IRQ_MULTI_HANDLER
config CPU_BIG_ENDIAN
def_bool y
@@ -69,13 +69,6 @@ config STACKTRACE_SUPPORT
config LOCKDEP_SUPPORT
def_bool y
-config MULTI_IRQ_HANDLER
- def_bool y
-
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
choice
@@ -147,8 +140,6 @@ config SMP
If you don't know what to do here, say N.
source kernel/Kconfig.hz
-source kernel/Kconfig.preempt
-source "mm/Kconfig"
config OPENRISC_NO_SPR_SR_DSX
bool "use SPR_SR_DSX software emulation" if OR1K_1200
@@ -206,27 +197,3 @@ config OPENRISC_ESR_EXCEPTION_BUG_CHECK
endmenu
endmenu
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-endmenu
diff --git a/arch/openrisc/Kconfig.debug b/arch/openrisc/Kconfig.debug
new file mode 100644
index 000000000000..22a162cd99e8
--- /dev/null
+++ b/arch/openrisc/Kconfig.debug
@@ -0,0 +1 @@
+# dummy file, do not delete
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index 89076a66eee2..70e06d34006c 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -19,7 +19,6 @@
KBUILD_DEFCONFIG := or1ksim_defconfig
-LDFLAGS :=
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
LDFLAGS_vmlinux :=
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
index 146e1660f00e..b589fac39b92 100644
--- a/arch/openrisc/include/asm/atomic.h
+++ b/arch/openrisc/include/asm/atomic.h
@@ -100,7 +100,7 @@ ATOMIC_OP(xor)
*
* This is often used through atomic_inc_not_zero()
*/
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;
@@ -119,7 +119,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return old;
}
-#define __atomic_add_unless __atomic_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#include <asm-generic/atomic.h>
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index d29f7db53906..f9cd43a39d72 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -16,8 +16,9 @@
#ifndef __ASM_OPENRISC_CMPXCHG_H
#define __ASM_OPENRISC_CMPXCHG_H
+#include <linux/bits.h>
+#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/bitops.h>
#define __HAVE_ARCH_CMPXCHG 1
diff --git a/arch/openrisc/include/asm/irq.h b/arch/openrisc/include/asm/irq.h
index d9eee0a2b7b4..eb612b1865d2 100644
--- a/arch/openrisc/include/asm/irq.h
+++ b/arch/openrisc/include/asm/irq.h
@@ -24,6 +24,4 @@
#define NO_IRQ (-1)
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
#endif /* __ASM_OPENRISC_IRQ_H__ */
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c
index 35e478a93116..5f9445effaf8 100644
--- a/arch/openrisc/kernel/irq.c
+++ b/arch/openrisc/kernel/irq.c
@@ -41,13 +41,6 @@ void __init init_IRQ(void)
irqchip_init();
}
-static void (*handle_arch_irq)(struct pt_regs *);
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- handle_arch_irq = handle_irq;
-}
-
void __irq_entry do_IRQ(struct pt_regs *regs)
{
handle_arch_irq(regs);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index e7705dde953f..8e6d83f79e72 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -45,6 +45,7 @@ config PARISC
select HAVE_ARCH_HASH
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
+ select HAVE_REGS_AND_STACK_ACCESS_API
select GENERIC_SCHED_CLOCK
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
select GENERIC_CLOCKEVENTS
@@ -128,10 +129,6 @@ config PGTABLE_LEVELS
config SYS_SUPPORTS_HUGETLBFS
def_bool y if PA20
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
@@ -187,6 +184,10 @@ config PA20
config PA11
def_bool y
depends on PA7000 || PA7100LC || PA7200 || PA7300LC
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select DMA_NONCOHERENT_OPS
+ select DMA_NONCOHERENT_CACHE_SYNC
config PREFETCH
def_bool y
@@ -326,9 +327,7 @@ config NODES_SHIFT
default "3"
depends on NEED_MULTIPLE_NODES
-source "kernel/Kconfig.preempt"
source "kernel/Kconfig.hz"
-source "mm/Kconfig"
config COMPAT
def_bool y
@@ -353,21 +352,6 @@ endmenu
source "drivers/parisc/Kconfig"
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/parisc/Kconfig.debug"
-
config SECCOMP
def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
@@ -383,9 +367,3 @@ config SECCOMP
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index fb3507f9b14a..1478ded0e247 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -1,9 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-
-endmenu
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 60e6f07b7e32..e9c6385ef0d1 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -36,6 +36,7 @@
#define RP_OFFSET 16
#define FRAME_SIZE 128
#define CALLEE_REG_FRAME_SIZE 144
+#define REG_SZ 8
#define ASM_ULONG_INSN .dword
#else /* CONFIG_64BIT */
#define LDREG ldw
@@ -50,6 +51,7 @@
#define RP_OFFSET 20
#define FRAME_SIZE 64
#define CALLEE_REG_FRAME_SIZE 128
+#define REG_SZ 4
#define ASM_ULONG_INSN .word
#endif
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 88bae6676c9b..118953d41763 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -77,30 +77,6 @@ static __inline__ int atomic_read(const atomic_t *v)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#define ATOMIC_OP(op, c_op) \
static __inline__ void atomic_##op(int i, atomic_t *v) \
{ \
@@ -160,28 +136,6 @@ ATOMIC_OPS(xor, ^=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_inc(v) (atomic_add( 1,(v)))
-#define atomic_dec(v) (atomic_add( -1,(v)))
-
-#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
-#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-
#define ATOMIC_INIT(i) { (i) }
#ifdef CONFIG_64BIT
@@ -264,72 +218,11 @@ atomic64_read(const atomic64_t *v)
return READ_ONCE((v)->counter);
}
-#define atomic64_inc(v) (atomic64_add( 1,(v)))
-#define atomic64_dec(v) (atomic64_add( -1,(v)))
-
-#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
-#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
-
/* exported interface */
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-/**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
- long c, old, dec;
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
#endif /* !CONFIG_64BIT */
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 01e1fc057c83..44a9f97194aa 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -21,11 +21,6 @@
** flush/purge and allocate "regular" cacheable pages for everything.
*/
-#ifdef CONFIG_PA11
-extern const struct dma_map_ops pcxl_dma_ops;
-extern const struct dma_map_ops pcx_dma_ops;
-#endif
-
extern const struct dma_map_ops *hppa_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h
index 9a69bf6fc4b6..49f6f3d772cc 100644
--- a/arch/parisc/include/asm/linkage.h
+++ b/arch/parisc/include/asm/linkage.h
@@ -18,9 +18,9 @@
#ifdef __ASSEMBLY__
#define ENTRY(name) \
- .export name !\
- ALIGN !\
-name:
+ ALIGN !\
+name: ASM_NL\
+ .export name
#ifdef CONFIG_64BIT
#define ENDPROC(name) \
@@ -31,13 +31,18 @@ name:
END(name)
#endif
-#define ENTRY_CFI(name) \
+#define ENTRY_CFI(name, ...) \
ENTRY(name) ASM_NL\
+ .proc ASM_NL\
+ .callinfo __VA_ARGS__ ASM_NL\
+ .entry ASM_NL\
CFI_STARTPROC
#define ENDPROC_CFI(name) \
- ENDPROC(name) ASM_NL\
- CFI_ENDPROC
+ CFI_ENDPROC ASM_NL\
+ .exit ASM_NL\
+ .procend ASM_NL\
+ ENDPROC(name)
#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 46da07670c2b..2a27b275ab09 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -25,4 +25,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->gr[20];
}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->iaoq[0] = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, ipsw))
+
#endif
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 6f84b6acc86e..8a63515f03bf 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
{
volatile unsigned int *a;
- mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
@@ -30,17 +29,16 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
local_irq_disable();
} else
cpu_relax();
- mb();
}
#define arch_spin_lock_flags arch_spin_lock_flags
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
- mb();
+
a = __ldcw_align(x);
- *a = 1;
mb();
+ *a = 1;
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
volatile unsigned int *a;
int ret;
- mb();
a = __ldcw_align(x);
ret = __ldcw(a) != 0;
- mb();
return ret;
}
diff --git a/arch/parisc/include/asm/unwind.h b/arch/parisc/include/asm/unwind.h
index c73a3ee20226..f133b7efbebb 100644
--- a/arch/parisc/include/asm/unwind.h
+++ b/arch/parisc/include/asm/unwind.h
@@ -4,6 +4,9 @@
#include <linux/list.h>
+/* Max number of levels to backtrace */
+#define MAX_UNWIND_ENTRIES 30
+
/* From ABI specifications */
struct unwind_table_entry {
unsigned int region_start;
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index fc0df353ff0d..87245c584784 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -113,7 +113,6 @@
#define ELOOP 249 /* Too many symbolic links encountered */
#define ENOSYS 251 /* Function not implemented */
-#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 1d0fdc3b5d22..061b9cf2a779 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -104,4 +104,7 @@
#define SO_ZEROCOPY 0x4035
+#define SO_TXTIME 0x4036
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 1b4732e20137..c7508f5717fb 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -766,7 +766,6 @@ END(fault_vector_11)
#endif
/* Fault vector is separately protected and *must* be on its own page */
.align PAGE_SIZE
-ENTRY(end_fault_vector)
.import handle_interruption,code
.import do_cpu_irq_mask,code
@@ -778,7 +777,6 @@ ENTRY(end_fault_vector)
*/
ENTRY_CFI(ret_from_kernel_thread)
-
/* Call schedule_tail first though */
BL schedule_tail, %r2
nop
@@ -817,8 +815,9 @@ ENTRY_CFI(_switch_to)
LDREG TASK_THREAD_INFO(%r25), %r25
bv %r0(%r2)
mtctl %r25,%cr30
+ENDPROC_CFI(_switch_to)
-_switch_to_ret:
+ENTRY_CFI(_switch_to_ret)
mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest
callee_rest_float
@@ -826,7 +825,7 @@ _switch_to_ret:
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
copy %r26, %r28
-ENDPROC_CFI(_switch_to)
+ENDPROC_CFI(_switch_to_ret)
/*
* Common rfi return path for interruptions, kernel execve, and
@@ -887,12 +886,14 @@ ENTRY_CFI(syscall_exit_rfi)
STREG %r19,PT_SR5(%r16)
STREG %r19,PT_SR6(%r16)
STREG %r19,PT_SR7(%r16)
+ENDPROC_CFI(syscall_exit_rfi)
-intr_return:
+ENTRY_CFI(intr_return)
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
+ENDPROC_CFI(intr_return)
.import do_notify_resume,code
intr_check_sig:
@@ -1048,7 +1049,6 @@ intr_extint:
b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
-ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
@@ -1999,12 +1999,9 @@ ENDPROC_CFI(syscall_exit)
.align L1_CACHE_BYTES
.globl mcount
.type mcount, @function
-ENTRY(mcount)
+ENTRY_CFI(mcount, caller)
_mcount:
.export _mcount,data
- .proc
- .callinfo caller,frame=0
- .entry
/*
* The 64bit mcount() function pointer needs 4 dwords, of which the
* first two are free. We optimize it here and put 2 instructions for
@@ -2026,18 +2023,13 @@ ftrace_stub:
.dword mcount
.dword 0 /* code in head.S puts value of global gp here */
#endif
- .exit
- .procend
-ENDPROC(mcount)
+ENDPROC_CFI(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.align 8
.globl return_to_handler
.type return_to_handler, @function
-ENTRY_CFI(return_to_handler)
- .proc
- .callinfo caller,frame=FRAME_SIZE
- .entry
+ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
.export parisc_return_to_handler,data
parisc_return_to_handler:
copy %r3,%r1
@@ -2076,8 +2068,6 @@ parisc_return_to_handler:
bv %r0(%rp)
#endif
LDREGM -FRAME_SIZE(%sp),%r3
- .exit
- .procend
ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -2087,31 +2077,30 @@ ENDPROC_CFI(return_to_handler)
#ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */
-ENTRY_CFI(call_on_stack)
+ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers,
we assume the PIC register is not changed across call. For
CONFIG_64BIT, the argument pointer is left to point at the
argument region allocated for the call to call_on_stack. */
+
+ /* Switch to new stack. We allocate two frames. */
+ ldo 2*FRAME_SIZE(%arg2), %sp
# ifdef CONFIG_64BIT
- /* Switch to new stack. We allocate two 128 byte frames. */
- ldo 256(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
- STREG %rp, -144(%sp)
+ STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls always use function descriptor */
LDREG 16(%arg1), %arg1
bve,l (%arg1), %rp
- STREG %r1, -136(%sp)
- LDREG -144(%sp), %rp
+ STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
+ LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bve (%rp)
- LDREG -136(%sp), %sp
+ LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# else
- /* Switch to new stack. We allocate two 64 byte frames. */
- ldo 128(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
- STREG %r1, -68(%sp)
- STREG %rp, -84(%sp)
+ STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
+ STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls use function descriptor if PLABEL bit is set */
bb,>=,n %arg1, 30, 1f
depwi 0,31,2, %arg1
@@ -2119,9 +2108,9 @@ ENTRY_CFI(call_on_stack)
1:
be,l 0(%sr4,%arg1), %sr0, %r31
copy %r31, %rp
- LDREG -84(%sp), %rp
+ LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bv (%rp)
- LDREG -68(%sp), %sp
+ LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# endif /* CONFIG_64BIT */
ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 97451e67d35b..f33bf2d306d6 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -44,10 +44,6 @@
.align 16
ENTRY_CFI(flush_tlb_all_local)
- .proc
- .callinfo NO_CALLS
- .entry
-
/*
* The pitlbe and pdtlbe instructions should only be used to
* flush the entire tlb. Also, there needs to be no intervening
@@ -189,18 +185,11 @@ fdtdone:
2: bv %r0(%r2)
nop
-
- .exit
- .procend
ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
ENTRY_CFI(flush_instruction_cache_local)
- .proc
- .callinfo NO_CALLS
- .entry
-
load32 cache_info, %r1
/* Flush Instruction Cache */
@@ -256,18 +245,11 @@ fisync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
ENTRY_CFI(flush_data_cache_local)
- .proc
- .callinfo NO_CALLS
- .entry
-
load32 cache_info, %r1
/* Flush Data Cache */
@@ -324,9 +306,6 @@ fdsync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_data_cache_local)
/* Macros to serialize TLB purge operations on SMP. */
@@ -362,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local)
/* Clear page using kernel mapping. */
ENTRY_CFI(clear_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
#ifdef CONFIG_64BIT
/* Unroll the loop. */
@@ -424,18 +399,11 @@ ENTRY_CFI(clear_page_asm)
#endif
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */
ENTRY_CFI(copy_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
@@ -542,9 +510,6 @@ ENTRY_CFI(copy_page_asm)
#endif
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(copy_page_asm)
/*
@@ -598,10 +563,6 @@ ENDPROC_CFI(copy_page_asm)
*/
ENTRY_CFI(copy_user_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
/* Convert virtual `to' and `from' addresses to physical addresses.
Move `from' physical address to non shadowed register. */
ldil L%(__PAGE_OFFSET), %r1
@@ -750,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm)
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(copy_user_page_asm)
ENTRY_CFI(clear_user_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
@@ -836,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm)
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(clear_user_page_asm)
ENTRY_CFI(flush_dcache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@@ -903,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_dcache_page_asm)
ENTRY_CFI(flush_icache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@@ -977,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@@ -1020,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@@ -1062,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY_CFI(flush_user_dcache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1083,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY_CFI(flush_kernel_dcache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1105,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1127,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1148,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY_CFI(flush_kernel_icache_page)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
@@ -1191,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page)
sync
bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(flush_kernel_icache_page)
ENTRY_CFI(flush_kernel_icache_range_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@@ -1212,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
sync
bv %r0(%r2)
nop
- .exit
- .procend
ENDPROC_CFI(flush_kernel_icache_range_asm)
__INIT
@@ -1223,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm)
*/
.align 256
ENTRY_CFI(disable_sr_hashing_asm)
- .proc
- .callinfo NO_CALLS
- .entry
-
/*
* Switch to real mode
*/
@@ -1308,9 +1186,6 @@ srdis_done:
2: bv %r0(%r2)
nop
- .exit
-
- .procend
ENDPROC_CFI(disable_sr_hashing_asm)
.end
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 6df07ce4f3c2..04c48f1ef3fb 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -21,13 +21,12 @@
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/mm.h>
-#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <linux/export.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
@@ -395,7 +394,7 @@ pcxl_dma_init(void)
__initcall(pcxl_dma_init);
-static void *pa11_dma_alloc(struct device *dev, size_t size,
+static void *pcxl_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
unsigned long vaddr;
@@ -422,190 +421,60 @@ static void *pa11_dma_alloc(struct device *dev, size_t size,
return (void *)vaddr;
}
-static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
+static void *pcx_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
- int order;
-
- order = get_order(size);
- size = 1 << (order + PAGE_SHIFT);
- unmap_uncached_pages((unsigned long)vaddr, size);
- pcxl_free_range((unsigned long)vaddr, size);
- free_pages((unsigned long)__va(dma_handle), order);
-}
+ void *addr;
-static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction, unsigned long attrs)
-{
- void *addr = page_address(page) + offset;
- BUG_ON(direction == DMA_NONE);
+ if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
+ return NULL;
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- flush_kernel_dcache_range((unsigned long) addr, size);
+ addr = (void *)__get_free_pages(flag, get_order(size));
+ if (addr)
+ *dma_handle = (dma_addr_t)virt_to_phys(addr);
- return virt_to_phys(addr);
+ return addr;
}
-static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- BUG_ON(direction == DMA_NONE);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
- if (direction == DMA_TO_DEVICE)
- return;
-
- /*
- * For PCI_DMA_FROMDEVICE this flush is not necessary for the
- * simple map/unmap case. However, it IS necessary if if
- * pci_dma_sync_single_* has been called and the buffer reused.
- */
-
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
+ if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
+ return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
+ else
+ return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
}
-static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
- int i;
- struct scatterlist *sg;
-
- BUG_ON(direction == DMA_NONE);
+ int order = get_order(size);
- for_each_sg(sglist, sg, nents, i) {
- unsigned long vaddr = (unsigned long)sg_virt(sg);
+ if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
+ size = 1 << (order + PAGE_SHIFT);
+ unmap_uncached_pages((unsigned long)vaddr, size);
+ pcxl_free_range((unsigned long)vaddr, size);
- sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
- sg_dma_len(sg) = sg->length;
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- flush_kernel_dcache_range(vaddr, sg->length);
+ vaddr = __va(dma_handle);
}
- return nents;
-}
-
-static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- BUG_ON(direction == DMA_NONE);
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
-
- if (direction == DMA_TO_DEVICE)
- return;
-
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
- for_each_sg(sglist, sg, nents, i)
- flush_kernel_vmap_range(sg_virt(sg), sg->length);
-}
-
-static void pa11_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
- size);
-}
-
-static void pa11_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
- size);
+ free_pages((unsigned long)vaddr, get_order(size));
}
-static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- int i;
- struct scatterlist *sg;
-
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
- for_each_sg(sglist, sg, nents, i)
- flush_kernel_vmap_range(sg_virt(sg), sg->length);
+ flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
-static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- int i;
- struct scatterlist *sg;
-
- /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
-
- for_each_sg(sglist, sg, nents, i)
- flush_kernel_vmap_range(sg_virt(sg), sg->length);
+ flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
-static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
-
-const struct dma_map_ops pcxl_dma_ops = {
- .alloc = pa11_dma_alloc,
- .free = pa11_dma_free,
- .map_page = pa11_dma_map_page,
- .unmap_page = pa11_dma_unmap_page,
- .map_sg = pa11_dma_map_sg,
- .unmap_sg = pa11_dma_unmap_sg,
- .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .sync_single_for_device = pa11_dma_sync_single_for_device,
- .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .sync_sg_for_device = pa11_dma_sync_sg_for_device,
- .cache_sync = pa11_dma_cache_sync,
-};
-
-static void *pcx_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
-{
- void *addr;
-
- if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
- return NULL;
-
- addr = (void *)__get_free_pages(flag, get_order(size));
- if (addr)
- *dma_handle = (dma_addr_t)virt_to_phys(addr);
-
- return addr;
-}
-
-static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t iova, unsigned long attrs)
-{
- free_pages((unsigned long)vaddr, get_order(size));
- return;
-}
-
-const struct dma_map_ops pcx_dma_ops = {
- .alloc = pcx_dma_alloc,
- .free = pcx_dma_free,
- .map_page = pa11_dma_map_page,
- .unmap_page = pa11_dma_unmap_page,
- .map_sg = pa11_dma_map_sg,
- .unmap_sg = pa11_dma_unmap_sg,
- .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .sync_single_for_device = pa11_dma_sync_single_for_device,
- .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .sync_sg_for_device = pa11_dma_sync_sg_for_device,
- .cache_sync = pa11_dma_cache_sync,
-};
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b931745815e0..eb39e7e380d7 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -302,7 +302,7 @@ get_wchan(struct task_struct *p)
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
- } while (count++ < 16);
+ } while (count++ < MAX_UNWIND_ENTRIES);
return 0;
}
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 7aa1d4d0d444..2582df1c529b 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -676,3 +676,103 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
return &user_parisc_native_view;
}
+
+
+/* HAVE_REGS_AND_STACK_ACCESS_API feature */
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_INDEX(gr,0),
+ REG_OFFSET_INDEX(gr,1),
+ REG_OFFSET_INDEX(gr,2),
+ REG_OFFSET_INDEX(gr,3),
+ REG_OFFSET_INDEX(gr,4),
+ REG_OFFSET_INDEX(gr,5),
+ REG_OFFSET_INDEX(gr,6),
+ REG_OFFSET_INDEX(gr,7),
+ REG_OFFSET_INDEX(gr,8),
+ REG_OFFSET_INDEX(gr,9),
+ REG_OFFSET_INDEX(gr,10),
+ REG_OFFSET_INDEX(gr,11),
+ REG_OFFSET_INDEX(gr,12),
+ REG_OFFSET_INDEX(gr,13),
+ REG_OFFSET_INDEX(gr,14),
+ REG_OFFSET_INDEX(gr,15),
+ REG_OFFSET_INDEX(gr,16),
+ REG_OFFSET_INDEX(gr,17),
+ REG_OFFSET_INDEX(gr,18),
+ REG_OFFSET_INDEX(gr,19),
+ REG_OFFSET_INDEX(gr,20),
+ REG_OFFSET_INDEX(gr,21),
+ REG_OFFSET_INDEX(gr,22),
+ REG_OFFSET_INDEX(gr,23),
+ REG_OFFSET_INDEX(gr,24),
+ REG_OFFSET_INDEX(gr,25),
+ REG_OFFSET_INDEX(gr,26),
+ REG_OFFSET_INDEX(gr,27),
+ REG_OFFSET_INDEX(gr,28),
+ REG_OFFSET_INDEX(gr,29),
+ REG_OFFSET_INDEX(gr,30),
+ REG_OFFSET_INDEX(gr,31),
+ REG_OFFSET_INDEX(sr,0),
+ REG_OFFSET_INDEX(sr,1),
+ REG_OFFSET_INDEX(sr,2),
+ REG_OFFSET_INDEX(sr,3),
+ REG_OFFSET_INDEX(sr,4),
+ REG_OFFSET_INDEX(sr,5),
+ REG_OFFSET_INDEX(sr,6),
+ REG_OFFSET_INDEX(sr,7),
+ REG_OFFSET_INDEX(iasq,0),
+ REG_OFFSET_INDEX(iasq,1),
+ REG_OFFSET_INDEX(iaoq,0),
+ REG_OFFSET_INDEX(iaoq,1),
+ REG_OFFSET_NAME(cr27),
+ REG_OFFSET_NAME(ksp),
+ REG_OFFSET_NAME(kpc),
+ REG_OFFSET_NAME(sar),
+ REG_OFFSET_NAME(iir),
+ REG_OFFSET_NAME(isr),
+ REG_OFFSET_NAME(ior),
+ REG_OFFSET_NAME(ipsw),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index cc9963421a19..2b16d8d6598f 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -35,12 +35,6 @@ real32_stack:
real64_stack:
.block 8192
-#ifdef CONFIG_64BIT
-# define REG_SZ 8
-#else
-# define REG_SZ 4
-#endif
-
#define N_SAVED_REGS 9
save_cr_space:
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 8d3a7b80ac42..4e87c35c22b7 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -97,14 +97,12 @@ void __init dma_ops_init(void)
panic( "PA-RISC Linux currently only supports machines that conform to\n"
"the PA-RISC 1.1 or 2.0 architecture specification.\n");
- case pcxs:
- case pcxt:
- hppa_dma_ops = &pcx_dma_ops;
- break;
case pcxl2:
pa7300lc_init();
case pcxl: /* falls through */
- hppa_dma_ops = &pcxl_dma_ops;
+ case pcxs:
+ case pcxt:
+ hppa_dma_ops = &dma_noncoherent_ops;
break;
default:
break;
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 4886a6db42e9..5f7e57fcaeef 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -629,12 +629,12 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw,ma 0(%r26), %r28
+1: ldw 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw,ma %r24, 0(%r26)
+2: stw %r24, 0(%r26)
/* Free lock */
sync
- stw,ma %r20, 0(%sr2,%r20)
+ stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@@ -798,30 +798,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
-13: ldb,ma 0(%r26), %r29
+13: ldb 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-14: stb,ma %r24, 0(%r26)
+14: stb %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
-15: ldh,ma 0(%r26), %r29
+15: ldh 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-16: sth,ma %r24, 0(%r26)
+16: sth %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
-17: ldw,ma 0(%r26), %r29
+17: ldw 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-18: stw,ma %r24, 0(%r26)
+18: stw %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@@ -829,10 +829,10 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
-19: ldd,ma 0(%r26), %r29
+19: ldd 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
-20: std,ma %r24, 0(%r26)
+20: std %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
@@ -851,7 +851,7 @@ cas2_action:
cas2_end:
/* Free lock */
sync
- stw,ma %r20, 0(%sr2,%r20)
+ stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 4309ad31a874..318815212518 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -172,7 +172,7 @@ static void do_show_stack(struct unwind_frame_info *info)
int i = 1;
printk(KERN_CRIT "Backtrace:\n");
- while (i <= 16) {
+ while (i <= MAX_UNWIND_ENTRIES) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 2ef83d78eec4..5cdf13069dd9 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/kallsyms.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
@@ -117,7 +116,8 @@ unwind_table_init(struct unwind_table *table, const char *name,
for (; start <= end; start++) {
if (start < end &&
start->region_end > (start+1)->region_start) {
- printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
+ pr_warn("Out of order unwind entry! %px and %px\n",
+ start, start+1);
}
start->region_start += base_addr;
@@ -203,25 +203,60 @@ int __init unwind_init(void)
return 0;
}
-#ifdef CONFIG_64BIT
-#define get_func_addr(fptr) fptr[2]
-#else
-#define get_func_addr(fptr) fptr[0]
-#endif
-
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
{
- extern void handle_interruption(int, struct pt_regs *);
- static unsigned long *hi = (unsigned long *)&handle_interruption;
-
- if (pc == get_func_addr(hi)) {
+ /*
+ * We have to use void * instead of a function pointer, because
+ * function pointers aren't a pointer to the function on 64-bit.
+ * Make them const so the compiler knows they live in .text
+ */
+ extern void * const handle_interruption;
+ extern void * const ret_from_kernel_thread;
+ extern void * const syscall_exit;
+ extern void * const intr_return;
+ extern void * const _switch_to_ret;
+#ifdef CONFIG_IRQSTACKS
+ extern void * const call_on_stack;
+#endif /* CONFIG_IRQSTACKS */
+
+ if (pc == (unsigned long) &handle_interruption) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
+ return 1;
+ }
+
+ if (pc == (unsigned long) &ret_from_kernel_thread ||
+ pc == (unsigned long) &syscall_exit) {
+ info->prev_sp = info->prev_ip = 0;
+ return 1;
+ }
+
+ if (pc == (unsigned long) &intr_return) {
+ struct pt_regs *regs;
+
+ dbg("Found intr_return()\n");
+ regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
+ info->prev_sp = regs->gr[30];
+ info->prev_ip = regs->iaoq[0];
+ info->rp = regs->gr[2];
+ return 1;
+ }
+
+ if (pc == (unsigned long) &_switch_to_ret) {
+ info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
+ info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+ return 1;
+ }
+#ifdef CONFIG_IRQSTACKS
+ if (pc == (unsigned long) &call_on_stack) {
+ info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
+ info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;
}
+#endif
return 0;
}
@@ -238,34 +273,8 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
if (e == NULL) {
unsigned long sp;
- dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
-
-#ifdef CONFIG_KALLSYMS
- /* Handle some frequent special cases.... */
- {
- char symname[KSYM_NAME_LEN];
- char *modname;
-
- kallsyms_lookup(info->ip, NULL, NULL, &modname,
- symname);
-
- dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
-
- if (strcmp(symname, "_switch_to_ret") == 0) {
- info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
- info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
- dbg("_switch_to_ret @ %lx - setting "
- "prev_sp=%lx prev_ip=%lx\n",
- info->ip, info->prev_sp,
- info->prev_ip);
- return;
- } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
- strcmp(symname, "syscall_exit") == 0) {
- info->prev_ip = info->prev_sp = 0;
- return;
- }
- }
-#endif
+ dbg("Cannot find unwind entry for %pS; forced unwinding\n",
+ (void *) info->ip);
/* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp
@@ -439,8 +448,8 @@ unsigned long return_address(unsigned int level)
/* initialize unwind info */
asm volatile ("copy %%r30, %0" : "=r"(sp));
memset(&r, 0, sizeof(struct pt_regs));
- r.iaoq[0] = (unsigned long) current_text_addr();
- r.gr[2] = (unsigned long) __builtin_return_address(0);
+ r.iaoq[0] = _THIS_IP_;
+ r.gr[2] = _RET_IP_;
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index d4fe19806d57..b53fb6fedf06 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -64,9 +64,6 @@
*/
ENTRY_CFI(lclear_user)
- .proc
- .callinfo NO_CALLS
- .entry
comib,=,n 0,%r25,$lclu_done
get_sr
$lclu_loop:
@@ -81,13 +78,9 @@ $lclu_done:
ldo 1(%r25),%r25
ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
-
- .exit
ENDPROC_CFI(lclear_user)
- .procend
-
/*
* long lstrnlen_user(char *s, long n)
*
@@ -97,9 +90,6 @@ ENDPROC_CFI(lclear_user)
*/
ENTRY_CFI(lstrnlen_user)
- .proc
- .callinfo NO_CALLS
- .entry
comib,= 0,%r25,$lslen_nzero
copy %r26,%r24
get_sr
@@ -111,7 +101,6 @@ $lslen_loop:
$lslen_done:
bv %r0(%r2)
sub %r26,%r24,%r28
- .exit
$lslen_nzero:
b $lslen_done
@@ -125,9 +114,6 @@ $lslen_nzero:
ENDPROC_CFI(lstrnlen_user)
- .procend
-
-
/*
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
@@ -186,10 +172,6 @@ ENDPROC_CFI(lstrnlen_user)
save_len = r31
ENTRY_CFI(pa_memcpy)
- .proc
- .callinfo NO_CALLS
- .entry
-
/* Last destination address */
add dst,len,end
@@ -439,9 +421,6 @@ ENTRY_CFI(pa_memcpy)
b .Lcopy_done
10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
-
- .exit
ENDPROC_CFI(pa_memcpy)
- .procend
.end
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 2607d2d33405..74842d28a7a1 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -19,7 +19,6 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/unistd.h>
@@ -616,17 +615,13 @@ void __init mem_init(void)
free_all_bootmem();
#ifdef CONFIG_PA11
- if (hppa_dma_ops == &pcxl_dma_ops) {
+ if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+ PCXL_DMA_MAP_SIZE);
- } else {
- pcxl_dma_start = 0;
- parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
- }
-#else
- parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+ } else
#endif
+ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
mem_init_print_info(NULL);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9f2b75fe2c2d..1c10ff0406f2 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -383,10 +383,6 @@ config PGTABLE_LEVELS
default 3 if PPC_64K_PAGES && !PPC_BOOK3S_64
default 4
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "arch/powerpc/sysdev/Kconfig"
source "arch/powerpc/platforms/Kconfig"
@@ -397,8 +393,6 @@ config HIGHMEM
depends on PPC32
source kernel/Kconfig.hz
-source kernel/Kconfig.preempt
-source "fs/Kconfig.binfmt"
config HUGETLB_PAGE_SIZE_VARIABLE
bool
@@ -641,8 +635,6 @@ config ILLEGAL_POINTER_VALUE
default 0x5deadbeef0000000 if PPC64
default 0
-source "mm/Kconfig"
-
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
@@ -1201,20 +1193,6 @@ endif
config ARCH_RANDOM
def_bool n
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "lib/Kconfig"
-
-source "arch/powerpc/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
config PPC_LIB_RHEAP
bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index c45424c64e19..fd63cd914a74 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config PPC_DISABLE_WERROR
bool "Don't build arch/powerpc code with -Werror"
@@ -379,5 +376,3 @@ config PPC_FAST_ENDIAN_SWITCH
depends on DEBUG_KERNEL && PPC_BOOK3S_64
help
If you're unsure what this is, say N.
-
-endmenu
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi
index abd01d466de4..9b6cf9149937 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi
@@ -37,12 +37,13 @@ fman0: fman@400000 {
#size-cells = <1>;
cell-index = <0>;
compatible = "fsl,fman";
- ranges = <0 0x400000 0x100000>;
- reg = <0x400000 0x100000>;
+ ranges = <0 0x400000 0xfe000>;
+ reg = <0x400000 0xfe000>;
interrupts = <96 2 0 0>, <16 2 1 1>;
clocks = <&clockgen 3 0>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x40 0xc>;
+ ptimer-handle = <&ptp_timer0>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -93,9 +94,11 @@ fman0: fman@400000 {
reg = <0x87000 0x1000>;
status = "disabled";
};
+};
- ptp_timer0: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer0: ptp-timer@4fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x4fe000 0x1000>;
+ interrupts = <96 2 0 0>;
+ clocks = <&clockgen 3 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi
index debea75fd3f0..e95c11ff0417 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi
@@ -37,12 +37,13 @@ fman1: fman@500000 {
#size-cells = <1>;
cell-index = <1>;
compatible = "fsl,fman";
- ranges = <0 0x500000 0x100000>;
- reg = <0x500000 0x100000>;
+ ranges = <0 0x500000 0xfe000>;
+ reg = <0x500000 0xfe000>;
interrupts = <97 2 0 0>, <16 2 1 0>;
clocks = <&clockgen 3 1>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x60 0xc>;
+ ptimer-handle = <&ptp_timer1>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -93,9 +94,11 @@ fman1: fman@500000 {
reg = <0x87000 0x1000>;
status = "disabled";
};
+};
- ptp_timer1: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer1: ptp-timer@5fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x5fe000 0x1000>;
+ interrupts = <97 2 0 0>;
+ clocks = <&clockgen 3 1>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi
index 3a20e0d1a6d2..d62b36c5a329 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi
@@ -37,12 +37,13 @@ fman0: fman@400000 {
#size-cells = <1>;
cell-index = <0>;
compatible = "fsl,fman";
- ranges = <0 0x400000 0x100000>;
- reg = <0x400000 0x100000>;
+ ranges = <0 0x400000 0xfe000>;
+ reg = <0x400000 0xfe000>;
interrupts = <96 2 0 0>, <16 2 1 1>;
clocks = <&clockgen 3 0>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x800 0x10>;
+ ptimer-handle = <&ptp_timer0>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -98,9 +99,11 @@ fman0: fman@400000 {
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
};
+};
- ptp_timer0: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer0: ptp-timer@4fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x4fe000 0x1000>;
+ interrupts = <96 2 0 0>;
+ clocks = <&clockgen 3 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi
index 82750ac944c7..310232460500 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi
@@ -37,12 +37,13 @@ fman1: fman@500000 {
#size-cells = <1>;
cell-index = <1>;
compatible = "fsl,fman";
- ranges = <0 0x500000 0x100000>;
- reg = <0x500000 0x100000>;
+ ranges = <0 0x500000 0xfe000>;
+ reg = <0x500000 0xfe000>;
interrupts = <97 2 0 0>, <16 2 1 0>;
clocks = <&clockgen 3 1>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x820 0x10>;
+ ptimer-handle = <&ptp_timer1>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -98,9 +99,11 @@ fman1: fman@500000 {
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
};
+};
- ptp_timer1: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer1: ptp-timer@5fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x5fe000 0x1000>;
+ interrupts = <97 2 0 0>;
+ clocks = <&clockgen 3 1>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
index 7f60b6060176..c90702b04a53 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
@@ -37,12 +37,13 @@ fman0: fman@400000 {
#size-cells = <1>;
cell-index = <0>;
compatible = "fsl,fman";
- ranges = <0 0x400000 0x100000>;
- reg = <0x400000 0x100000>;
+ ranges = <0 0x400000 0xfe000>;
+ reg = <0x400000 0xfe000>;
interrupts = <96 2 0 0>, <16 2 1 1>;
clocks = <&clockgen 3 0>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x800 0x10>;
+ ptimer-handle = <&ptp_timer0>;
muram@0 {
compatible = "fsl,fman-muram";
@@ -86,9 +87,11 @@ fman0: fman@400000 {
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
};
+};
- ptp_timer0: ptp-timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
- };
+ptp_timer0: ptp-timer@4fe000 {
+ compatible = "fsl,fman-ptp-timer";
+ reg = <0x4fe000 0x1000>;
+ interrupts = <96 2 0 0>;
+ clocks = <&clockgen 3 0>;
};
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 10940533da71..f5c366b02828 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -78,7 +78,6 @@ CONFIG_GPIO_HLWD=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_GPIO=y
# CONFIG_HWMON is not set
-CONFIG_SSB_DEBUG=y
CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
index 92289679b4c4..7e44cec37bdb 100644
--- a/arch/powerpc/crypto/md5-glue.c
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -139,7 +139,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "md5-ppc",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index f9ebc38d3fe7..9e1814d99318 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -185,7 +185,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ppc-spe",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index c154cebc1041..3911d5c254fa 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -132,7 +132,6 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-powerpc",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
index 718a079dcdbf..6227888dcf7e 100644
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -231,7 +231,6 @@ static struct shash_alg algs[2] = { {
.cra_name = "sha256",
.cra_driver_name= "sha256-ppc-spe",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -248,7 +247,6 @@ static struct shash_alg algs[2] = { {
.cra_name = "sha224",
.cra_driver_name= "sha224-ppc-spe",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 682b3e6a1e21..963abf8bf1c0 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -18,18 +18,11 @@
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
* on the platform without lwsync.
*/
-#define __atomic_op_acquire(op, args...) \
-({ \
- typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
- __ret; \
-})
-
-#define __atomic_op_release(op, args...) \
-({ \
- __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
- op##_relaxed(args); \
-})
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
static __inline__ int atomic_read(const atomic_t *v)
{
@@ -129,8 +122,6 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
static __inline__ void atomic_inc(atomic_t *v)
{
int t;
@@ -145,6 +136,7 @@ static __inline__ void atomic_inc(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic_inc atomic_inc
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
{
@@ -163,16 +155,6 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
return t;
}
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
static __inline__ void atomic_dec(atomic_t *v)
{
int t;
@@ -187,6 +169,7 @@ static __inline__ void atomic_dec(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic_dec atomic_dec
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
{
@@ -218,7 +201,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -226,13 +209,13 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int t;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # __atomic_add_unless\n\
+"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
cmpw 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
@@ -248,6 +231,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return t;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
@@ -280,9 +264,6 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
}
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
-#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
@@ -412,8 +393,6 @@ ATOMIC64_OPS(xor, xor)
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
static __inline__ void atomic64_inc(atomic64_t *v)
{
long t;
@@ -427,6 +406,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic64_inc atomic64_inc
static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
{
@@ -444,16 +424,6 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
return t;
}
-/*
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
static __inline__ void atomic64_dec(atomic64_t *v)
{
long t;
@@ -467,6 +437,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic64_dec atomic64_dec
static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
{
@@ -487,9 +458,6 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
@@ -513,6 +481,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
return t;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_cmpxchg_relaxed(v, o, n) \
@@ -524,7 +493,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -532,13 +501,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long t;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
+"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
cmpd 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
@@ -551,8 +520,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
- return t != u;
+ return t;
}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero
@@ -582,6 +552,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0;
}
+#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 8e7b09703ca4..27d6e3c8fde9 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -52,6 +52,7 @@ struct arch_hw_breakpoint {
#include <asm/reg.h>
#include <asm/debug.h>
+struct perf_event_attr;
struct perf_event;
struct pmu;
struct perf_sample_data;
@@ -60,8 +61,10 @@ struct perf_sample_data;
extern int hw_breakpoint_slots(int type);
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 9f3be5c8a4a3..785c464b6588 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -88,7 +88,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_saved_msr;
- struct pt_regs jprobe_saved_regs;
struct prev_kprobe prev_kprobe;
};
@@ -103,17 +102,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int __is_active_jprobe(unsigned long addr);
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- return 0;
-}
-#endif
#else
static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 155170d70324..dbfc7056d7df 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -357,9 +357,6 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init dma_init(void)
{
-#ifdef CONFIG_PCI
- dma_debug_add_bus(&pci_bus_type);
-#endif
#ifdef CONFIG_IBMVIO
dma_debug_add_bus(&vio_bus_type);
#endif
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 80547dad37da..fec8a6773119 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -119,11 +119,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
/*
* Check for virtual address in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- return is_kernel_addr(info->address);
+ return is_kernel_addr(hw->address);
}
int arch_bp_generic_fields(int type, int *gen_bp_type)
@@ -141,30 +139,31 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
int ret = -EINVAL, length_max;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
if (!bp)
return ret;
- info->type = HW_BRK_TYPE_TRANSLATE;
- if (bp->attr.bp_type & HW_BREAKPOINT_R)
- info->type |= HW_BRK_TYPE_READ;
- if (bp->attr.bp_type & HW_BREAKPOINT_W)
- info->type |= HW_BRK_TYPE_WRITE;
- if (info->type == HW_BRK_TYPE_TRANSLATE)
+ hw->type = HW_BRK_TYPE_TRANSLATE;
+ if (attr->bp_type & HW_BREAKPOINT_R)
+ hw->type |= HW_BRK_TYPE_READ;
+ if (attr->bp_type & HW_BREAKPOINT_W)
+ hw->type |= HW_BRK_TYPE_WRITE;
+ if (hw->type == HW_BRK_TYPE_TRANSLATE)
/* must set alteast read or write */
return ret;
- if (!(bp->attr.exclude_user))
- info->type |= HW_BRK_TYPE_USER;
- if (!(bp->attr.exclude_kernel))
- info->type |= HW_BRK_TYPE_KERNEL;
- if (!(bp->attr.exclude_hv))
- info->type |= HW_BRK_TYPE_HYP;
- info->address = bp->attr.bp_addr;
- info->len = bp->attr.bp_len;
+ if (!attr->exclude_user)
+ hw->type |= HW_BRK_TYPE_USER;
+ if (!attr->exclude_kernel)
+ hw->type |= HW_BRK_TYPE_KERNEL;
+ if (!attr->exclude_hv)
+ hw->type |= HW_BRK_TYPE_HYP;
+ hw->address = attr->bp_addr;
+ hw->len = attr->bp_len;
/*
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -178,12 +177,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
if (cpu_has_feature(CPU_FTR_DAWR)) {
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
- if ((bp->attr.bp_addr >> 9) !=
- ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
+ if ((attr->bp_addr >> 9) !=
+ ((attr->bp_addr + attr->bp_len - 1) >> 9))
return -EINVAL;
}
- if (info->len >
- (length_max - (info->address & HW_BREAKPOINT_ALIGN)))
+ if (hw->len >
+ (length_max - (hw->address & HW_BREAKPOINT_ALIGN)))
return -EINVAL;
return 0;
}
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 7a1f99f1b47f..e4a49c051325 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -25,50 +25,6 @@
#include <linux/preempt.h>
#include <linux/ftrace.h>
-/*
- * This is called from ftrace code after invoking registered handlers to
- * disambiguate regs->nip changes done by jprobes and livepatch. We check if
- * there is an active jprobe at the provided address (mcount location).
- */
-int __is_active_jprobe(unsigned long addr)
-{
- if (!preemptible()) {
- struct kprobe *p = raw_cpu_read(current_kprobe);
- return (p && (unsigned long)p->addr == addr) ? 1 : 0;
- }
-
- return 0;
-}
-
-static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb, unsigned long orig_nip)
-{
- /*
- * Emulate singlestep (and also recover regs->nip)
- * as if there is a nop
- */
- regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
- if (unlikely(p->post_handler)) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, regs, 0);
- }
- __this_cpu_write(current_kprobe, NULL);
- if (orig_nip)
- regs->nip = orig_nip;
- return 1;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- if (kprobe_ftrace(p))
- return __skip_singlestep(p, regs, kcb, 0);
- else
- return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
/* Ftrace callback handler for kprobes */
void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct ftrace_ops *ops, struct pt_regs *regs)
@@ -76,18 +32,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct kprobe *p;
struct kprobe_ctlblk *kcb;
- preempt_disable();
-
p = get_kprobe((kprobe_opcode_t *)nip);
if (unlikely(!p) || kprobe_disabled(p))
- goto end;
+ return;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
- unsigned long orig_nip = regs->nip;
-
/*
* On powerpc, NIP is *before* this instruction for the
* pre handler
@@ -96,19 +48,23 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (!p->pre_handler || !p->pre_handler(p, regs))
- __skip_singlestep(p, regs, kcb, orig_nip);
- else {
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
/*
- * If pre_handler returns !0, it sets regs->nip and
- * resets current kprobe. In this case, we should not
- * re-enable preemption.
+ * Emulate singlestep (and also recover regs->nip)
+ * as if there is a nop
*/
- return;
+ regs->nip += MCOUNT_INSN_SIZE;
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
}
+ /*
+ * If pre_handler returns !0, it changes regs->nip. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
}
-end:
- preempt_enable_no_resched();
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e4c5bf33970b..5c60bb0f927f 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -317,25 +317,17 @@ int kprobe_handler(struct pt_regs *regs)
}
prepare_singlestep(p, regs);
return 1;
- } else {
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /* If trap variant, then it belongs not to us */
- kprobe_opcode_t cur_insn = *addr;
- if (is_trap(cur_insn))
- goto no_kprobe;
- /* The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
- ret = 1;
+ } else if (*addr != BREAKPOINT_INSTRUCTION) {
+ /* If trap variant, then it belongs not to us */
+ kprobe_opcode_t cur_insn = *addr;
+
+ if (is_trap(cur_insn))
goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- if (!skip_singlestep(p, regs, kcb))
- goto ss_probe;
- ret = 1;
- }
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
}
goto no_kprobe;
}
@@ -350,7 +342,7 @@ int kprobe_handler(struct pt_regs *regs)
*/
kprobe_opcode_t cur_insn = *addr;
if (is_trap(cur_insn))
- goto no_kprobe;
+ goto no_kprobe;
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
@@ -366,11 +358,13 @@ int kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p, regs, kcb);
- if (p->pre_handler && p->pre_handler(p, regs))
- /* handler has already set things up, so skip ss setup */
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ /* handler changed execution path, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
if (p->ainsn.boostable >= 0) {
ret = try_to_emulate(p, regs);
@@ -611,60 +605,6 @@ unsigned long arch_deref_entry_point(void *entry)
}
NOKPROBE_SYMBOL(arch_deref_entry_point);
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
- /* setup return addr to the jprobe handler routine */
- regs->nip = arch_deref_entry_point(jp->entry);
-#ifdef PPC64_ELF_ABI_v2
- regs->gpr[12] = (unsigned long)jp->entry;
-#elif defined(PPC64_ELF_ABI_v1)
- regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
-#endif
-
- /*
- * jprobes use jprobe_return() which skips the normal return
- * path of the function, and this messes up the accounting of the
- * function graph tracer.
- *
- * Pause function graph tracing while performing the jprobe function.
- */
- pause_graph_tracing();
-
- return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void __used jprobe_return(void)
-{
- asm volatile("jprobe_return_trap:\n"
- "trap\n"
- ::: "memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) {
- pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n",
- regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap"));
- return 0;
- }
-
- memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
- /* It's OK to start function graph tracing again */
- unpause_graph_tracing();
- preempt_enable_no_resched();
- return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index 9a5b5a513604..32476a6e4e9c 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -104,39 +104,13 @@ ftrace_regs_call:
bl ftrace_stub
nop
- /* Load the possibly modified NIP */
- ld r15, _NIP(r1)
-
+ /* Load ctr with the possibly modified NIP */
+ ld r3, _NIP(r1)
+ mtctr r3
#ifdef CONFIG_LIVEPATCH
- cmpd r14, r15 /* has NIP been altered? */
+ cmpd r14, r3 /* has NIP been altered? */
#endif
-#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
- /* NIP has not been altered, skip over further checks */
- beq 1f
-
- /* Check if there is an active jprobe on us */
- subi r3, r14, 4
- bl __is_active_jprobe
- nop
-
- /*
- * If r3 == 1, then this is a kprobe/jprobe.
- * else, this is livepatched function.
- *
- * The conditional branch for livepatch_handler below will use the
- * result of this comparison. For kprobe/jprobe, we just need to branch to
- * the new NIP, not call livepatch_handler. The branch below is bne, so we
- * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
- * CR0[EQ] = (r3 == 1).
- */
- cmpdi r3, 1
-1:
-#endif
-
- /* Load CTR with the possibly modified NIP */
- mtctr r15
-
/* Restore gprs */
REST_GPR(0,r1)
REST_10GPRS(2,r1)
@@ -154,10 +128,7 @@ ftrace_regs_call:
addi r1, r1, SWITCH_FRAME_SIZE
#ifdef CONFIG_LIVEPATCH
- /*
- * Based on the cmpd or cmpdi above, if the NIP was altered and we're
- * not on a kprobe/jprobe, then handle livepatch.
- */
+ /* Based on the cmpd above, if the NIP was altered handle livepatch */
bne- livepatch_handler
#endif
diff --git a/arch/powerpc/kernel/vdso32/note.S b/arch/powerpc/kernel/vdso32/note.S
index d4b5be4f3d5f..227a7327399e 100644
--- a/arch/powerpc/kernel/vdso32/note.S
+++ b/arch/powerpc/kernel/vdso32/note.S
@@ -5,6 +5,7 @@
#include <linux/uts.h>
#include <linux/version.h>
+#include <linux/build-salt.h>
#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \
.section name, flags; \
@@ -23,3 +24,5 @@
ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
.long LINUX_VERSION_CODE
ASM_ELF_NOTE_END
+
+BUILD_SALT
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de686b340f4a..ee4a8854985e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -216,7 +216,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
wqp = kvm_arch_vcpu_wq(vcpu);
if (swq_has_sleeper(wqp)) {
- swake_up(wqp);
+ swake_up_one(wqp);
++vcpu->stat.halt_wakeup;
}
@@ -3188,7 +3188,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
}
}
- prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);
if (kvmppc_vcore_check_block(vc)) {
finish_swait(&vc->wq, &wait);
@@ -3311,7 +3311,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
- swake_up(&vc->wq);
+ swake_up_one(&vc->wq);
}
}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 3f66fcf8ad99..19d8ab49d1bd 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1469,7 +1469,7 @@ static int collect_events(struct perf_event *group, int max_count,
}
/*
- * Add a event to the PMU.
+ * Add an event to the PMU.
* If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
@@ -1548,7 +1548,7 @@ nocheck:
}
/*
- * Remove a event from the PMU.
+ * Remove an event from the PMU.
*/
static void power_pmu_del(struct perf_event *event, int ef_flags)
{
@@ -1742,7 +1742,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
- * A event can only go on a limited PMC if it counts something
+ * An event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
index 30e05decbb4c..4314ba5baf43 100644
--- a/arch/powerpc/purgatory/Makefile
+++ b/arch/powerpc/purgatory/Makefile
@@ -6,9 +6,8 @@ LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
$(obj)/purgatory.ro: $(obj)/trampoline.o FORCE
$(call if_changed,ld)
-CMD_BIN2C = $(objtree)/scripts/basic/bin2c
quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
+ cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 4764fdeb4f1f..a344980287a5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -212,10 +212,6 @@ endmenu
menu "Kernel type"
-source "mm/Kconfig"
-
-source "kernel/Kconfig.preempt"
-
source "kernel/Kconfig.hz"
endmenu
@@ -242,75 +238,8 @@ source "drivers/pci/Kconfig"
endmenu
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options"
source kernel/power/Kconfig
endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-menu "Kernel hacking"
-
-config CMDLINE_BOOL
- bool "Built-in kernel command line"
- help
- For most platforms, it is firmware or second stage bootloader
- that by default specifies the kernel command line options.
- However, it might be necessary or advantageous to either override
- the default kernel command line or add a few extra options to it.
- For such cases, this option allows hardcoding command line options
- directly into the kernel.
-
- For that, choose 'Y' here and fill in the extra boot parameters
- in CONFIG_CMDLINE.
-
- The built-in options will be concatenated to the default command
- line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
- command line will be ignored and replaced by the built-in string.
-
-config CMDLINE
- string "Built-in kernel command string"
- depends on CMDLINE_BOOL
- default ""
- help
- Supply command-line options at build time by entering them here.
-
-config CMDLINE_FORCE
- bool "Built-in command line overrides bootloader arguments"
- depends on CMDLINE_BOOL
- help
- Set this option to 'Y' to have the kernel ignore the bootloader
- or firmware command line. Instead, the built-in command line
- will be used exclusively.
-
- If you don't know what to do here, say N.
-
-config EARLY_PRINTK
- def_bool y
-
-source "lib/Kconfig.debug"
-
-config CMDLINE_BOOL
- bool
-endmenu
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug
new file mode 100644
index 000000000000..3224ff6ecf6e
--- /dev/null
+++ b/arch/riscv/Kconfig.debug
@@ -0,0 +1,37 @@
+
+config CMDLINE_BOOL
+ bool "Built-in kernel command line"
+ help
+ For most platforms, it is firmware or second stage bootloader
+ that by default specifies the kernel command line options.
+ However, it might be necessary or advantageous to either override
+ the default kernel command line or add a few extra options to it.
+ For such cases, this option allows hardcoding command line options
+ directly into the kernel.
+
+ For that, choose 'Y' here and fill in the extra boot parameters
+ in CONFIG_CMDLINE.
+
+ The built-in options will be concatenated to the default command
+ line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
+ command line will be ignored and replaced by the built-in string.
+
+config CMDLINE
+ string "Built-in kernel command string"
+ depends on CMDLINE_BOOL
+ default ""
+ help
+ Supply command-line options at build time by entering them here.
+
+config CMDLINE_FORCE
+ bool "Built-in command line overrides bootloader arguments"
+ depends on CMDLINE_BOOL
+ help
+ Set this option to 'Y' to have the kernel ignore the bootloader
+ or firmware command line. Instead, the built-in command line
+ will be used exclusively.
+
+ If you don't know what to do here, say N.
+
+config EARLY_PRINTK
+ def_bool y
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 6d4a5f6c3f4f..2627e4813edf 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -8,7 +8,6 @@
# for more details.
#
-LDFLAGS :=
OBJCOPYFLAGS := -O binary
LDFLAGS_vmlinux :=
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 855115ace98c..c452359c9cb8 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -25,18 +25,11 @@
#define ATOMIC_INIT(i) { (i) }
-#define __atomic_op_acquire(op, args...) \
-({ \
- typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory"); \
- __ret; \
-})
-
-#define __atomic_op_release(op, args...) \
-({ \
- __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); \
- op##_relaxed(args); \
-})
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
static __always_inline int atomic_read(const atomic_t *v)
{
@@ -209,130 +202,8 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
-/*
- * The extra atomic operations that are constructed from one of the core
- * AMO-based operations above (aside from sub, which is easier to fit above).
- * These are required to perform a full barrier, but they're OK this way
- * because atomic_*_return is also required to perform a full barrier.
- *
- */
-#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
-static __always_inline \
-bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, func_op, comp_op, I) \
- ATOMIC_OP(op, func_op, comp_op, I, int, )
-#else
-#define ATOMIC_OPS(op, func_op, comp_op, I) \
- ATOMIC_OP(op, func_op, comp_op, I, int, ) \
- ATOMIC_OP(op, func_op, comp_op, I, long, 64)
-#endif
-
-ATOMIC_OPS(add_and_test, add, ==, 0)
-ATOMIC_OPS(sub_and_test, sub, ==, 0)
-ATOMIC_OPS(add_negative, add, <, 0)
-
-#undef ATOMIC_OP
-#undef ATOMIC_OPS
-
-#define ATOMIC_OP(op, func_op, I, c_type, prefix) \
-static __always_inline \
-void atomic##prefix##_##op(atomic##prefix##_t *v) \
-{ \
- atomic##prefix##_##func_op(I, v); \
-}
-
-#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
-static __always_inline \
-c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##func_op##_relaxed(I, v); \
-} \
-static __always_inline \
-c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##func_op(I, v); \
-}
-
-#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
-static __always_inline \
-c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##op##_relaxed(v) c_op I; \
-} \
-static __always_inline \
-c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_fetch_##op(v) c_op I; \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_OP( op, asm_op, I, int, ) \
- ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
-#else
-#define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_OP( op, asm_op, I, int, ) \
- ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
- ATOMIC_OP( op, asm_op, I, long, 64) \
- ATOMIC_FETCH_OP( op, asm_op, I, long, 64) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
-#endif
-
-ATOMIC_OPS(inc, add, +, 1)
-ATOMIC_OPS(dec, add, +, -1)
-
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#define atomic_inc_return atomic_inc_return
-#define atomic_dec_return atomic_dec_return
-
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#define atomic_fetch_inc atomic_fetch_inc
-#define atomic_fetch_dec atomic_fetch_dec
-
-#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#define atomic64_inc_return atomic64_inc_return
-#define atomic64_dec_return atomic64_dec_return
-
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#define atomic64_fetch_inc atomic64_fetch_inc
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-
-#define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
-static __always_inline \
-bool atomic##prefix##_##op(atomic##prefix##_t *v) \
-{ \
- return atomic##prefix##_##func_op##_return(v) comp_op I; \
-}
-
-ATOMIC_OP(inc_and_test, inc, ==, 0, )
-ATOMIC_OP(dec_and_test, dec, ==, 0, )
-#ifndef CONFIG_GENERIC_ATOMIC64
-ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
-ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
-#endif
-
-#undef ATOMIC_OP
-
/* This is required to provide a full barrier on success. */
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@@ -349,9 +220,10 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
: "memory");
return prev;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
+static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long prev, rc;
@@ -368,27 +240,7 @@ static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
: "memory");
return prev;
}
-
-static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- return __atomic64_add_unless(v, a, u) != u;
-}
-#endif
-
-/*
- * The extra atomic operations that are constructed from one of the core
- * LR/SC-based operations above.
- */
-static __always_inline int atomic_inc_not_zero(atomic_t *v)
-{
- return __atomic_add_unless(v, 1, 0);
-}
-
-#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
-{
- return atomic64_add_unless(v, 1, 0);
-}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif
/*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 4fe5b2affa23..a6afa60074cb 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -145,6 +145,7 @@ config S390
select HAVE_KERNEL_LZ4
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_UNCOMPRESSED
select HAVE_KERNEL_XZ
select HAVE_KPROBES
select HAVE_KRETPROBES
@@ -183,10 +184,6 @@ config PGTABLE_LEVELS
int
default 5
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
source "kernel/livepatch/Kconfig"
menu "Processor type and features"
@@ -514,8 +511,6 @@ config SCHED_TOPOLOGY
making when dealing with machines that have multi-threading,
multiple cores or multiple books.
-source kernel/Kconfig.preempt
-
source kernel/Kconfig.hz
config KEXEC
@@ -626,8 +621,6 @@ config FORCE_MAX_ZONEORDER
int
default "9"
-source "mm/Kconfig"
-
config MAX_PHYSMEM_BITS
int "Maximum size of supported physical memory in bits (42-53)"
range 42 53
@@ -797,10 +790,6 @@ config CRASH_DUMP
endmenu
-menu "Executable file formats / Emulations"
-
-source "fs/Kconfig.binfmt"
-
config SECCOMP
def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
@@ -818,8 +807,6 @@ config SECCOMP
If unsure, say Y.
-endmenu
-
menu "Power Management"
config ARCH_HIBERNATION_POSSIBLE
@@ -829,30 +816,16 @@ source "kernel/power/Kconfig"
endmenu
-source "net/Kconfig"
-
config PCMCIA
def_bool n
config CCW
def_bool y
-source "drivers/Kconfig"
-
config HAVE_PNETID
tristate
default (SMC || CCWGROUP)
-source "fs/Kconfig"
-
-source "arch/s390/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
menu "Virtualization"
config PFAULT
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 2cfdfbf8d320..190527560b2c 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-source "lib/Kconfig.debug"
-
config S390_PTDUMP
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
@@ -20,5 +17,3 @@ config S390_PTDUMP
config EARLY_PRINTK
def_bool y
-
-endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 68a690442be0..eee6703093c3 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -14,8 +14,18 @@ LD_BFD := elf64-s390
LDFLAGS := -m elf64_s390
KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC
-KBUILD_CFLAGS += -m64
KBUILD_AFLAGS += -m64
+KBUILD_CFLAGS += -m64
+aflags_dwarf := -Wa,-gdwarf-2
+KBUILD_AFLAGS_DECOMPRESSOR := -m64 -D__ASSEMBLY__
+KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
+KBUILD_CFLAGS_DECOMPRESSOR := -m64 -O2
+KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
+KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
+KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
UTS_MACHINE := s390x
STACK_SIZE := 16384
CHECKFLAGS += -D__s390__ -D__s390x__
@@ -52,18 +62,14 @@ cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
#
cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
-# old style option for packed stacks
-ifeq ($(call cc-option-yn,-mkernel-backchain),y)
-cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
-aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-endif
-
-# new style option for packed stacks
ifeq ($(call cc-option-yn,-mpacked-stack),y)
cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
endif
+KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y)
+KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y)
+
ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
ifneq ($(call cc-option-yn,-mstack-size=8192),y)
@@ -71,8 +77,11 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
endif
endif
-ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
-cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
+ifdef CONFIG_WARN_DYNAMIC_STACK
+ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
+ KBUILD_CFLAGS += -mwarn-dynamicstack
+ KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
+ endif
endif
ifdef CONFIG_EXPOLINE
@@ -82,6 +91,7 @@ ifdef CONFIG_EXPOLINE
CC_FLAGS_EXPOLINE += -mindirect-branch-table
export CC_FLAGS_EXPOLINE
cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+ aflags-y += -DCC_USING_EXPOLINE
endif
endif
@@ -102,11 +112,12 @@ KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables $(cfi)
KBUILD_AFLAGS += $(aflags-y) $(cfi)
+export KBUILD_AFLAGS_DECOMPRESSOR
+export KBUILD_CFLAGS_DECOMPRESSOR
OBJCOPYFLAGS := -O binary
-head-y := arch/s390/kernel/head.o
-head-y += arch/s390/kernel/head64.o
+head-y := arch/s390/kernel/head64.o
# See arch/s390/Kbuild for content of core part of the kernel
core-y += arch/s390/
@@ -121,7 +132,7 @@ boot := arch/s390/boot
syscalls := arch/s390/kernel/syscalls
tools := arch/s390/tools
-all: image bzImage
+all: bzImage
#KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
KBUILD_IMAGE := $(boot)/bzImage
@@ -129,7 +140,7 @@ KBUILD_IMAGE := $(boot)/bzImage
install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
-image bzImage: vmlinux
+bzImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zfcpdump:
@@ -152,8 +163,7 @@ archprepare:
# Don't use tabs in echo arguments
define archhelp
- echo '* image - Kernel image for IPL ($(boot)/image)'
- echo '* bzImage - Compressed kernel image for IPL ($(boot)/bzImage)'
+ echo '* bzImage - Kernel image for IPL ($(boot)/bzImage)'
echo ' install - Install kernel using'
echo ' (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index ee6a9c387c87..9bf8489df6e6 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -206,35 +206,28 @@ static int
appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int len;
- char buf[2];
+ int timer_active = appldata_timer_active;
+ int zero = 0;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &timer_active,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
+
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
- if (!*lenp || *ppos) {
- *lenp = 0;
- return 0;
- }
- if (!write) {
- strncpy(buf, appldata_timer_active ? "1\n" : "0\n",
- ARRAY_SIZE(buf));
- len = strnlen(buf, ARRAY_SIZE(buf));
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
spin_lock(&appldata_timer_lock);
- if (buf[0] == '1')
+ if (timer_active)
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
- else if (buf[0] == '0')
+ else
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock);
-out:
- *lenp = len;
- *ppos += len;
return 0;
}
@@ -248,37 +241,24 @@ static int
appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int len;
- int interval;
- char buf[16];
+ int interval = appldata_interval;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &interval,
+ .maxlen = sizeof(int),
+ .extra1 = &one,
+ };
- if (!*lenp || *ppos) {
- *lenp = 0;
- return 0;
- }
- if (!write) {
- len = sprintf(buf, "%i\n", appldata_interval);
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- interval = 0;
- sscanf(buf, "%i", &interval);
- if (interval <= 0)
- return -EINVAL;
+ rc = proc_dointvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
spin_lock(&appldata_timer_lock);
appldata_interval = interval;
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
-out:
- *lenp = len;
- *ppos += len;
return 0;
}
@@ -293,10 +273,17 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct appldata_ops *ops = NULL, *tmp_ops;
- unsigned int len;
- int rc, found;
- char buf[2];
struct list_head *lh;
+ int rc, found;
+ int active;
+ int zero = 0;
+ int one = 1;
+ struct ctl_table ctl_entry = {
+ .data = &active,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
found = 0;
mutex_lock(&appldata_ops_mutex);
@@ -317,31 +304,15 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
}
mutex_unlock(&appldata_ops_mutex);
- if (!*lenp || *ppos) {
- *lenp = 0;
+ active = ops->active;
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write) {
module_put(ops->owner);
- return 0;
- }
- if (!write) {
- strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf));
- len = strnlen(buf, ARRAY_SIZE(buf));
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len)) {
- module_put(ops->owner);
- return -EFAULT;
- }
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len)) {
- module_put(ops->owner);
- return -EFAULT;
+ return rc;
}
mutex_lock(&appldata_ops_mutex);
- if ((buf[0] == '1') && (ops->active == 0)) {
+ if (active && (ops->active == 0)) {
// protect work queue callback
if (!try_module_get(ops->owner)) {
mutex_unlock(&appldata_ops_mutex);
@@ -359,7 +330,7 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
module_put(ops->owner);
} else
ops->active = 1;
- } else if ((buf[0] == '0') && (ops->active == 1)) {
+ } else if (!active && (ops->active == 1)) {
ops->active = 0;
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
(unsigned long) ops->data, ops->size,
@@ -370,9 +341,6 @@ appldata_generic_handler(struct ctl_table *ctl, int write,
module_put(ops->owner);
}
mutex_unlock(&appldata_ops_mutex);
-out:
- *lenp = len;
- *ppos += len;
module_put(ops->owner);
return 0;
}
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index d1fa37fcce83..9e6668ee93de 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -3,19 +3,52 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
-targets := image
-targets += bzImage
-subdir- := compressed
+KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
-$(obj)/image: vmlinux FORCE
- $(call if_changed,objcopy)
+KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
+KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
+
+#
+# Use -march=z900 for als.c to be able to print an error
+# message if the kernel is started on a machine which is too old
+#
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
+AFLAGS_head.o += -march=z900
+AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH)
+AFLAGS_mem.o += -march=z900
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_als.o += -march=z900
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
+CFLAGS_sclp_early_core.o += -march=z900
+endif
+
+CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
+
+obj-y := head.o als.o ebcdic.o sclp_early_core.o mem.o
+targets := bzImage startup.a $(obj-y)
+subdir- := compressed
+
+OBJECTS := $(addprefix $(obj)/,$(obj-y))
$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
-$(obj)/compressed/vmlinux: FORCE
+$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
+quiet_cmd_ar = AR $@
+ cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(filter $(OBJECTS), $^)
+
+$(obj)/startup.a: $(OBJECTS) FORCE
+ $(call if_changed,ar)
+
install: $(CONFIGURE) $(obj)/bzImage
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)"
+
+chkbss := $(OBJECTS)
+chkbss-target := $(obj)/startup.a
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/kernel/als.c b/arch/s390/boot/als.c
index d1892bf36cab..d592e0d90d9f 100644
--- a/arch/s390/kernel/als.c
+++ b/arch/s390/boot/als.c
@@ -3,12 +3,10 @@
* Copyright IBM Corp. 2016
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/facility.h>
#include <asm/lowcore.h>
#include <asm/sclp.h>
-#include "entry.h"
/*
* The code within this file will be called very early. It may _not_
@@ -18,9 +16,9 @@
* For temporary objects the stack (16k) should be used.
*/
-static unsigned long als[] __initdata = { FACILITIES_ALS };
+static unsigned long als[] = { FACILITIES_ALS };
-static void __init u16_to_hex(char *str, u16 val)
+static void u16_to_hex(char *str, u16 val)
{
int i, num;
@@ -33,9 +31,9 @@ static void __init u16_to_hex(char *str, u16 val)
*str = '\0';
}
-static void __init print_machine_type(void)
+static void print_machine_type(void)
{
- static char mach_str[80] __initdata = "Detected machine-type number: ";
+ static char mach_str[80] = "Detected machine-type number: ";
char type_str[5];
struct cpuid id;
@@ -46,7 +44,7 @@ static void __init print_machine_type(void)
sclp_early_printk(mach_str);
}
-static void __init u16_to_decimal(char *str, u16 val)
+static void u16_to_decimal(char *str, u16 val)
{
int div = 1;
@@ -60,9 +58,9 @@ static void __init u16_to_decimal(char *str, u16 val)
*str = '\0';
}
-static void __init print_missing_facilities(void)
+static void print_missing_facilities(void)
{
- static char als_str[80] __initdata = "Missing facilities: ";
+ static char als_str[80] = "Missing facilities: ";
unsigned long val;
char val_str[6];
int i, j, first;
@@ -95,7 +93,7 @@ static void __init print_missing_facilities(void)
sclp_early_printk("See Principles of Operations for facility bits\n");
}
-static void __init facility_mismatch(void)
+static void facility_mismatch(void)
{
sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
print_machine_type();
@@ -103,7 +101,7 @@ static void __init facility_mismatch(void)
disabled_wait(0x8badcccc);
}
-void __init verify_facilities(void)
+void verify_facilities(void)
{
int i;
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore
index 2088cc140629..45aeb4f08752 100644
--- a/arch/s390/boot/compressed/.gitignore
+++ b/arch/s390/boot/compressed/.gitignore
@@ -1,4 +1,5 @@
sizes.h
vmlinux
vmlinux.lds
+vmlinux.scr.lds
vmlinux.bin.full
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 5766f7b9b271..04609478d18b 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -6,39 +6,29 @@
#
KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += misc.o piggy.o sizes.h head.o
-
-KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
-KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
-KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
-KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+targets += vmlinux.scr.lds $(obj-y) $(if $(CONFIG_KERNEL_UNCOMPRESSED),,sizes.h)
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
+KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
+KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
-OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o ebcdic.o als.o)
-OBJECTS += $(objtree)/drivers/s390/char/sclp_early_core.o
-OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
+OBJECTS := $(addprefix $(obj)/,$(obj-y))
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
$(call if_changed,ld)
-TRIM_HEAD_SIZE := 0x11000
-
-sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - $(TRIM_HEAD_SIZE))/p'
+# extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin
+sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - 0x100000)/p'
quiet_cmd_sizes = GEN $@
cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
-quiet_cmd_trim_head = TRIM $@
- cmd_trim_head = tail -c +$$(($(TRIM_HEAD_SIZE) + 1)) $< > $@
-
$(obj)/sizes.h: vmlinux
$(call if_changed,sizes)
@@ -48,21 +38,18 @@ $(obj)/head.o: $(obj)/sizes.h
CFLAGS_misc.o += -I$(objtree)/$(obj)
$(obj)/misc.o: $(obj)/sizes.h
-OBJCOPYFLAGS_vmlinux.bin.full := -R .comment -S
-$(obj)/vmlinux.bin.full: vmlinux
+OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
+$(obj)/vmlinux.bin: vmlinux
$(call if_changed,objcopy)
-$(obj)/vmlinux.bin: $(obj)/vmlinux.bin.full
- $(call if_changed,trim_head)
-
vmlinux.bin.all-y := $(obj)/vmlinux.bin
-suffix-$(CONFIG_KERNEL_GZIP) := gz
-suffix-$(CONFIG_KERNEL_BZIP2) := bz2
-suffix-$(CONFIG_KERNEL_LZ4) := lz4
-suffix-$(CONFIG_KERNEL_LZMA) := lzma
-suffix-$(CONFIG_KERNEL_LZO) := lzo
-suffix-$(CONFIG_KERNEL_XZ) := xz
+suffix-$(CONFIG_KERNEL_GZIP) := .gz
+suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
+suffix-$(CONFIG_KERNEL_LZ4) := .lz4
+suffix-$(CONFIG_KERNEL_LZMA) := .lzma
+suffix-$(CONFIG_KERNEL_LZO) := .lzo
+suffix-$(CONFIG_KERNEL_XZ) := .xz
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
$(call if_changed,gzip)
@@ -78,5 +65,9 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
$(call if_changed,xzkern)
LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
+$(obj)/piggy.o: $(obj)/vmlinux.scr.lds $(obj)/vmlinux.bin$(suffix-y)
$(call if_changed,ld)
+
+chkbss := $(filter-out $(obj)/misc.o $(obj)/piggy.o,$(OBJECTS))
+chkbss-target := $(obj)/vmlinux.bin
+include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index 9f94eca0f467..df8dbbc17bcc 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -15,7 +15,7 @@
#include "sizes.h"
__HEAD
-ENTRY(startup_continue)
+ENTRY(startup_decompressor)
basr %r13,0 # get base
.LPG1:
# setup stack
@@ -23,7 +23,7 @@ ENTRY(startup_continue)
aghi %r15,-160
brasl %r14,decompress_kernel
# Set up registers for memory mover. We move the decompressed image to
- # 0x11000, where startup_continue of the decompressed image is supposed
+ # 0x100000, where startup_continue of the decompressed image is supposed
# to be.
lgr %r4,%r2
lg %r2,.Loffset-.LPG1(%r13)
@@ -33,7 +33,7 @@ ENTRY(startup_continue)
la %r1,0x200
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
# When the memory mover is done we pass control to
- # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
+ # arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in
# the decompressed image.
lgr %r6,%r2
br %r1
@@ -47,6 +47,6 @@ mover_end:
.Lstack:
.quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.Loffset:
- .quad 0x11000
+ .quad 0x100000
.Lmvsize:
.quad SZ__bss_start
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 511b2cc9b91a..f66ad73c205b 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -71,43 +71,6 @@ static int puts(const char *s)
return 0;
}
-void *memset(void *s, int c, size_t n)
-{
- char *xs;
-
- xs = s;
- while (n--)
- *xs++ = c;
- return s;
-}
-
-void *memcpy(void *dest, const void *src, size_t n)
-{
- const char *s = src;
- char *d = dest;
-
- while (n--)
- *d++ = *s++;
- return dest;
-}
-
-void *memmove(void *dest, const void *src, size_t n)
-{
- const char *s = src;
- char *d = dest;
-
- if (d <= s) {
- while (n--)
- *d++ = *s++;
- } else {
- d += n;
- s += n;
- while (n--)
- *--d = *--s;
- }
- return dest;
-}
-
static void error(char *x)
{
unsigned long long psw = 0x000a0000deadbeefULL;
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index d43c2db12d30..b16ac8b3c439 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -23,13 +23,10 @@ SECTIONS
*(.text.*)
_etext = . ;
}
- .rodata.compressed : {
- *(.rodata.compressed)
- }
.rodata : {
_rodata = . ;
*(.rodata) /* read-only data */
- *(.rodata.*)
+ *(EXCLUDE_FILE (*piggy.o) .rodata.compressed)
_erodata = . ;
}
.data : {
@@ -38,6 +35,15 @@ SECTIONS
*(.data.*)
_edata = . ;
}
+ startup_continue = 0x100000;
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+ . = 0x100000;
+#else
+ . = ALIGN(8);
+#endif
+ .rodata.compressed : {
+ *(.rodata.compressed)
+ }
. = ALIGN(256);
.bss : {
_bss = . ;
@@ -54,5 +60,6 @@ SECTIONS
*(.eh_frame)
*(__ex_table)
*(*__ksymtab*)
+ *(___kcrctab*)
}
}
diff --git a/arch/s390/boot/compressed/vmlinux.scr b/arch/s390/boot/compressed/vmlinux.scr.lds.S
index 42a242597f34..ff01d18c9222 100644
--- a/arch/s390/boot/compressed/vmlinux.scr
+++ b/arch/s390/boot/compressed/vmlinux.scr.lds.S
@@ -2,10 +2,14 @@
SECTIONS
{
.rodata.compressed : {
+#ifndef CONFIG_KERNEL_UNCOMPRESSED
input_len = .;
LONG(input_data_end - input_data) input_data = .;
+#endif
*(.data)
+#ifndef CONFIG_KERNEL_UNCOMPRESSED
output_len = . - 4;
input_data_end = .;
+#endif
}
}
diff --git a/arch/s390/boot/ebcdic.c b/arch/s390/boot/ebcdic.c
new file mode 100644
index 000000000000..7391e7d36086
--- /dev/null
+++ b/arch/s390/boot/ebcdic.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/ebcdic.c"
diff --git a/arch/s390/kernel/head.S b/arch/s390/boot/head.S
index 5c42f16a54c4..f721913b73f1 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/boot/head.S
@@ -272,14 +272,14 @@ iplstart:
.org 0x10000
ENTRY(startup)
j .Lep_startup_normal
- .org 0x10008
+ .org EP_OFFSET
#
# This is a list of s390 kernel entry points. At address 0x1000f the number of
# valid entry points is stored.
#
# IMPORTANT: Do not change this table, it is s390 kernel ABI!
#
- .ascii "S390EP"
+ .ascii EP_STRING
.byte 0x00,0x01
#
# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
@@ -310,10 +310,11 @@ ENTRY(startup_kdump)
l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-STACK_FRAME_OVERHEAD
brasl %r14,verify_facilities
-# For uncompressed images, continue in
-# arch/s390/kernel/head64.S. For compressed images, continue in
-# arch/s390/boot/compressed/head.S.
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
jg startup_continue
+#else
+ jg startup_decompressor
+#endif
.Lstack:
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/boot/head_kdump.S
index 174d6959bf5b..174d6959bf5b 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/boot/head_kdump.S
diff --git a/arch/s390/boot/mem.S b/arch/s390/boot/mem.S
new file mode 100644
index 000000000000..b33463633f03
--- /dev/null
+++ b/arch/s390/boot/mem.S
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include "../lib/mem.S"
diff --git a/arch/s390/boot/sclp_early_core.c b/arch/s390/boot/sclp_early_core.c
new file mode 100644
index 000000000000..5a19fd7020b5
--- /dev/null
+++ b/arch/s390/boot/sclp_early_core.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../drivers/s390/char/sclp_early_core.c"
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index ad47abd08630..c54cb26eb7f5 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -1035,7 +1035,6 @@ static struct aead_alg gcm_aes_aead = {
.chunksize = AES_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_priority = 900,
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 3b7f96c9eead..86aed30fad3a 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -128,7 +128,6 @@ static struct shash_alg ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index a00c17f761c1..009572e8276d 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -78,7 +78,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 944aa6b237cd..62833a1d8724 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -71,7 +71,6 @@ static struct shash_alg sha256_alg = {
.cra_name = "sha256",
.cra_driver_name= "sha256-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -108,7 +107,6 @@ static struct shash_alg sha224_alg = {
.cra_name = "sha224",
.cra_driver_name= "sha224-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index b17eded532b1..be589c340d15 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -76,7 +76,6 @@ static struct shash_alg sha512_alg = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -115,7 +114,6 @@ static struct shash_alg sha384_alg = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index a2945b289a29..3452e18bb1ca 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -497,7 +497,7 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
}
diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
rc = hypfs_create_str(cpu_dir, "type", buffer);
- return PTR_RET(rc);
+ return PTR_ERR_OR_ZERO(rc);
}
static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
@@ -544,7 +544,7 @@ static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
return PTR_ERR(rc);
diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
rc = hypfs_create_str(cpu_dir, "type", buffer);
- return PTR_RET(rc);
+ return PTR_ERR_OR_ZERO(rc);
}
static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 06b513d192b9..c681329fdeec 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -36,7 +36,7 @@ struct hypfs_sb_info {
kuid_t uid; /* uid used for files and dirs */
kgid_t gid; /* gid used for files and dirs */
struct dentry *update_file; /* file to trigger update */
- time_t last_update; /* last update time in secs since 1970 */
+ time64_t last_update; /* last update, CLOCK_MONOTONIC time */
struct mutex lock; /* lock to protect update process */
};
@@ -52,7 +52,7 @@ static void hypfs_update_update(struct super_block *sb)
struct hypfs_sb_info *sb_info = sb->s_fs_info;
struct inode *inode = d_inode(sb_info->update_file);
- sb_info->last_update = get_seconds();
+ sb_info->last_update = ktime_get_seconds();
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
}
@@ -179,7 +179,7 @@ static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
* to restart data collection in this case.
*/
mutex_lock(&fs_info->lock);
- if (fs_info->last_update == get_seconds()) {
+ if (fs_info->last_update == ktime_get_seconds()) {
rc = -EBUSY;
goto out;
}
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index c1bedb4c8de0..046e044a48d0 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -47,6 +47,50 @@ struct ap_queue_status {
};
/**
+ * ap_intructions_available() - Test if AP instructions are available.
+ *
+ * Returns 0 if the AP instructions are installed.
+ */
+static inline int ap_instructions_available(void)
+{
+ register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
+ register unsigned long reg1 asm ("1") = -ENODEV;
+ register unsigned long reg2 asm ("2");
+
+ asm volatile(
+ " .long 0xb2af0000\n" /* PQAP(TAPQ) */
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg1), "=d" (reg2)
+ : "d" (reg0)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * ap_tapq(): Test adjunct processor queue.
+ * @qid: The AP queue number
+ * @info: Pointer to queue descriptor
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+{
+ register unsigned long reg0 asm ("0") = qid;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2");
+
+ asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
+ : "=d" (reg1), "=d" (reg2)
+ : "d" (reg0)
+ : "cc");
+ if (info)
+ *info = reg2;
+ return reg1;
+}
+
+/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @tbit: Test facilities bit
@@ -54,10 +98,57 @@ struct ap_queue_status {
*
* Returns AP queue status structure.
*/
-struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info);
+static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
+ int tbit,
+ unsigned long *info)
+{
+ if (tbit)
+ qid |= 1UL << 23; /* set T bit*/
+ return ap_tapq(qid, info);
+}
+/**
+ * ap_pqap_rapq(): Reset adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | (1UL << 24);
+ register struct ap_queue_status reg1 asm ("1");
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(RAPQ) */
+ : "=d" (reg1)
+ : "d" (reg0)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | (2UL << 24);
+ register struct ap_queue_status reg1 asm ("1");
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(ZAPQ) */
+ : "=d" (reg1)
+ : "d" (reg0)
+ : "cc");
+ return reg1;
+}
+
+/**
+ * struct ap_config_info - convenience struct for AP crypto
+ * config info as returned by the ap_qci() function.
+ */
struct ap_config_info {
unsigned int apsc : 1; /* S bit */
unsigned int apxa : 1; /* N bit */
@@ -74,50 +165,189 @@ struct ap_config_info {
unsigned char _reserved4[16];
} __aligned(8);
-/*
- * ap_query_configuration(): Fetch cryptographic config info
+/**
+ * ap_qci(): Get AP configuration data
*
- * Returns the ap configuration info fetched via PQAP(QCI).
- * On success 0 is returned, on failure a negative errno
- * is returned, e.g. if the PQAP(QCI) instruction is not
- * available, the return value will be -EOPNOTSUPP.
+ * Returns 0 on success, or -EOPNOTSUPP.
*/
-int ap_query_configuration(struct ap_config_info *info);
+static inline int ap_qci(struct ap_config_info *config)
+{
+ register unsigned long reg0 asm ("0") = 4UL << 24;
+ register unsigned long reg1 asm ("1") = -EOPNOTSUPP;
+ register struct ap_config_info *reg2 asm ("2") = config;
+
+ asm volatile(
+ ".long 0xb2af0000\n" /* PQAP(QCI) */
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg1)
+ : "d" (reg0), "d" (reg2)
+ : "cc", "memory");
+
+ return reg1;
+}
/*
* struct ap_qirq_ctrl - convenient struct for easy invocation
- * of the ap_queue_irq_ctrl() function. This struct is passed
- * as GR1 parameter to the PQAP(AQIC) instruction. For details
- * please see the AR documentation.
+ * of the ap_aqic() function. This struct is passed as GR1
+ * parameter to the PQAP(AQIC) instruction. For details please
+ * see the AR documentation.
*/
struct ap_qirq_ctrl {
unsigned int _res1 : 8;
- unsigned int zone : 8; /* zone info */
- unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */
+ unsigned int zone : 8; /* zone info */
+ unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */
unsigned int _res2 : 4;
- unsigned int gisc : 3; /* guest isc field */
+ unsigned int gisc : 3; /* guest isc field */
unsigned int _res3 : 6;
- unsigned int gf : 2; /* gisa format */
+ unsigned int gf : 2; /* gisa format */
unsigned int _res4 : 1;
- unsigned int gisa : 27; /* gisa origin */
+ unsigned int gisa : 27; /* gisa origin */
unsigned int _res5 : 1;
- unsigned int isc : 3; /* irq sub class */
+ unsigned int isc : 3; /* irq sub class */
};
/**
- * ap_queue_irq_ctrl(): Control interruption on a AP queue.
+ * ap_aqic(): Control interruption for a specific AP.
* @qid: The AP queue number
- * @qirqctrl: struct ap_qirq_ctrl, see above
+ * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
* @ind: The notification indicator byte
*
* Returns AP queue status.
+ */
+static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
+ struct ap_qirq_ctrl qirqctrl,
+ void *ind)
+{
+ register unsigned long reg0 asm ("0") = qid | (3UL << 24);
+ register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
+ register struct ap_queue_status reg1_out asm ("1");
+ register void *reg2 asm ("2") = ind;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(AQIC) */
+ : "=d" (reg1_out)
+ : "d" (reg0), "d" (reg1_in), "d" (reg2)
+ : "cc");
+ return reg1_out;
+}
+
+/*
+ * union ap_qact_ap_info - used together with the
+ * ap_aqic() function to provide a convenient way
+ * to handle the ap info needed by the qact function.
+ */
+union ap_qact_ap_info {
+ unsigned long val;
+ struct {
+ unsigned int : 3;
+ unsigned int mode : 3;
+ unsigned int : 26;
+ unsigned int cat : 8;
+ unsigned int : 8;
+ unsigned char ver[2];
+ };
+};
+
+/**
+ * ap_qact(): Query AP combatibility type.
+ * @qid: The AP queue number
+ * @apinfo: On input the info about the AP queue. On output the
+ * alternate AP queue info provided by the qact function
+ * in GR2 is stored in.
*
- * Control interruption on the given AP queue.
- * Just a simple wrapper function for the low level PQAP(AQIC)
- * instruction available for other kernel modules.
+ * Returns AP queue status. Check response_code field for failures.
*/
-struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind);
+static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
+ union ap_qact_ap_info *apinfo)
+{
+ register unsigned long reg0 asm ("0") = qid | (5UL << 24)
+ | ((ifbit & 0x01) << 22);
+ register unsigned long reg1_in asm ("1") = apinfo->val;
+ register struct ap_queue_status reg1_out asm ("1");
+ register unsigned long reg2 asm ("2");
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(QACT) */
+ : "+d" (reg1_in), "=d" (reg1_out), "=d" (reg2)
+ : "d" (reg0)
+ : "cc");
+ apinfo->val = reg2;
+ return reg1_out;
+}
+
+/**
+ * ap_nqap(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
+ unsigned long long psmid,
+ void *msg, size_t length)
+{
+ register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = (unsigned long) msg;
+ register unsigned long reg3 asm ("3") = (unsigned long) length;
+ register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+
+ asm volatile (
+ "0: .long 0xb2ad0042\n" /* NQAP */
+ " brc 2,0b"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+ : "d" (reg4), "d" (reg5)
+ : "cc", "memory");
+ return reg1;
+}
+
+/**
+ * ap_dqap(): Receive message from adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: Pointer to program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on DQAP means the receive has taken place
+ * but only partially. The response is incomplete, hence the
+ * DQAP is repeated.
+ * Condition code 2 on DQAP also means the receive is incomplete,
+ * this time because a segment boundary was reached. Again, the
+ * DQAP is repeated.
+ * Note that gpr2 is used by the DQAP instruction to keep track of
+ * any 'residual' length, in case the instruction gets interrupted.
+ * Hence it gets zeroed before the instruction.
+ */
+static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
+ unsigned long long *psmid,
+ void *msg, size_t length)
+{
+ register unsigned long reg0 asm("0") = qid | 0x80000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm("2") = 0UL;
+ register unsigned long reg4 asm("4") = (unsigned long) msg;
+ register unsigned long reg5 asm("5") = (unsigned long) length;
+ register unsigned long reg6 asm("6") = 0UL;
+ register unsigned long reg7 asm("7") = 0UL;
+
+
+ asm volatile(
+ "0: .long 0xb2ae0064\n" /* DQAP */
+ " brc 6,0b\n"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2),
+ "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
+ : : "cc", "memory");
+ *psmid = (((unsigned long long) reg6) << 32) + reg7;
+ return reg1;
+}
#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 4b55532f15c4..fd20ab5d4cf7 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -55,17 +55,9 @@ static inline void atomic_add(int i, atomic_t *v)
__atomic_add(i, &v->counter);
}
-#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v) atomic_add(1, _v)
-#define atomic_inc_return(_v) atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
-#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v) atomic_sub(1, _v)
-#define atomic_dec_return(_v) atomic_sub_return(1, _v)
-#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
#define ATOMIC_OPS(op) \
static inline void atomic_##op(int i, atomic_t *v) \
@@ -90,21 +82,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return __atomic_cmpxchg(&v->counter, old, new);
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == u))
- break;
- old = atomic_cmpxchg(v, c, c + a);
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#define ATOMIC64_INIT(i) { (i) }
static inline long atomic64_read(const atomic64_t *v)
@@ -168,50 +145,8 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
-static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
-{
- long c, old;
-
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == u))
- break;
- old = atomic64_cmpxchg(v, c, c + i);
- if (likely(old == c))
- break;
- c = old;
- }
- return c != u;
-}
-
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
- long c, old, dec;
-
- c = atomic64_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic64_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
-#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v) atomic64_add(1, _v)
-#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
-#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
-#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v) atomic64_sub(1, _v)
-#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
-#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index de023a9a88ca..bf2cbff926ef 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -2,7 +2,7 @@
/*
* CPU-measurement facilities
*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012, 2018
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
@@ -139,8 +139,14 @@ struct hws_trailer_entry {
unsigned char timestamp[16]; /* 16 - 31 timestamp */
unsigned long long reserved1; /* 32 -Reserved */
unsigned long long reserved2; /* */
- unsigned long long progusage1; /* 48 - reserved for programming use */
- unsigned long long progusage2; /* */
+ union { /* 48 - reserved for programming use */
+ struct {
+ unsigned int clock_base:1; /* in progusage2 */
+ unsigned long long progusage1:63;
+ unsigned long long progusage2;
+ };
+ unsigned long long progusage[2];
+ };
} __packed;
/* Load program parameter */
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index e07cce88dfb0..fcbd638fb9f4 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -9,6 +9,14 @@
#ifndef _ASM_S390_GMAP_H
#define _ASM_S390_GMAP_H
+/* Generic bits for GMAP notification on DAT table entry changes. */
+#define GMAP_NOTIFY_SHADOW 0x2
+#define GMAP_NOTIFY_MPROT 0x1
+
+/* Status bits only for huge segment entries */
+#define _SEGMENT_ENTRY_GMAP_IN 0x8000 /* invalidation notify bit */
+#define _SEGMENT_ENTRY_GMAP_UC 0x4000 /* dirty (migration) */
+
/**
* struct gmap_struct - guest address space
* @list: list head for the mm->context gmap list
@@ -132,4 +140,6 @@ void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
int gmap_mprotect_notify(struct gmap *, unsigned long start,
unsigned long len, int prot);
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
+ unsigned long gaddr, unsigned long vmaddr);
#endif /* _ASM_S390_GMAP_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 9c5fc50204dd..2d1afa58a4b6 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -37,7 +37,10 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
-#define arch_clear_hugepage_flags(page) do { } while (0)
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_arch_1, &page->flags);
+}
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 13de80cf741c..b106aa29bf55 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -68,8 +68,6 @@ struct kprobe_ctlblk {
unsigned long kprobe_saved_imask;
unsigned long kprobe_saved_ctl[3];
struct prev_kprobe prev_kprobe;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *p);
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 5bc888841eaf..406d940173ab 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -185,7 +185,7 @@ struct lowcore {
/* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
-} __packed;
+} __packed __aligned(8192);
#define S390_lowcore (*((struct lowcore *) 0))
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index f5ff9dbad8ac..f31a15044c24 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -24,6 +24,8 @@ typedef struct {
unsigned int uses_skeys:1;
/* The mmu context uses CMM. */
unsigned int uses_cmm:1;
+ /* The gmaps associated with this context are allowed to use huge pages. */
+ unsigned int allow_gmap_hpage_1m:1;
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d16bc79c30bb..0717ee76885d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -32,6 +32,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.uses_skeys = 0;
mm->context.uses_cmm = 0;
+ mm->context.allow_gmap_hpage_1m = 0;
#endif
switch (mm->context.asce_limit) {
case _REGION2_SIZE:
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index a01f81186e86..123dac3717b3 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -8,7 +8,7 @@
#ifdef __ASSEMBLY__
-#ifdef CONFIG_EXPOLINE
+#ifdef CC_USING_EXPOLINE
_LC_BR_R1 = __LC_BR_R1
@@ -189,7 +189,7 @@ _LC_BR_R1 = __LC_BR_R1
.macro BASR_EX rsave,rtarget,ruse=%r1
basr \rsave,\rtarget
.endm
-#endif
+#endif /* CC_USING_EXPOLINE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 94f8db468c9b..10fe982f2b4b 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -51,6 +51,10 @@ struct zpci_fmb_fmt2 {
u64 max_work_units;
};
+struct zpci_fmb_fmt3 {
+ u64 tx_bytes;
+};
+
struct zpci_fmb {
u32 format : 8;
u32 fmt_ind : 24;
@@ -66,6 +70,7 @@ struct zpci_fmb {
struct zpci_fmb_fmt0 fmt0;
struct zpci_fmb_fmt1 fmt1;
struct zpci_fmb_fmt2 fmt2;
+ struct zpci_fmb_fmt3 fmt3;
};
} __packed __aligned(128);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 5ab636089c60..0e7cb0dc9c33 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -268,8 +268,10 @@ static inline int is_module_addr(void *addr)
#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
-#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
+#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
+#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
+#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
+#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
@@ -1101,7 +1103,8 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
pte_t *sptep, pte_t *tptep, pte_t pte);
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
+bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
+ pte_t *ptep);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq);
int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
@@ -1116,6 +1119,10 @@ int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
unsigned long *oldpte, unsigned long *oldpgste);
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
/*
* Certain architectures need to do special things when PTEs
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h
index 6090670df51f..e297bcfc476f 100644
--- a/arch/s390/include/asm/purgatory.h
+++ b/arch/s390/include/asm/purgatory.h
@@ -13,11 +13,5 @@
int verify_sha256_digest(void);
-extern u64 kernel_entry;
-extern u64 kernel_type;
-
-extern u64 crash_start;
-extern u64 crash_size;
-
#endif /* __ASSEMBLY__ */
#endif /* _S390_PURGATORY_H_ */
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index de11ecc99c7c..9c9970a5dfb1 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -262,7 +262,6 @@ struct qdio_outbuf_state {
void *user;
};
-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
#define CHSC_AC1_INITIATE_INPUTQ 0x80
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 54f81f8ed662..724faede8ac5 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,4 @@
#include <asm-generic/sections.h>
-extern char _ehead[];
-
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 9c30ebe046f3..1d66016f4170 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -9,8 +9,10 @@
#include <linux/const.h>
#include <uapi/asm/setup.h>
-
+#define EP_OFFSET 0x10008
+#define EP_STRING "S390EP"
#define PARMAREA 0x10400
+#define PARMAREA_END 0x11000
/*
* Machine features detected in early.c
diff --git a/arch/s390/include/uapi/asm/chsc.h b/arch/s390/include/uapi/asm/chsc.h
index dc329aa03f76..83a574e95b3a 100644
--- a/arch/s390/include/uapi/asm/chsc.h
+++ b/arch/s390/include/uapi/asm/chsc.h
@@ -23,29 +23,29 @@ struct chsc_async_header {
__u32 key : 4;
__u32 : 28;
struct subchannel_id sid;
-} __attribute__ ((packed));
+};
struct chsc_async_area {
struct chsc_async_header header;
__u8 data[CHSC_SIZE - sizeof(struct chsc_async_header)];
-} __attribute__ ((packed));
+};
struct chsc_header {
__u16 length;
__u16 code;
-} __attribute__ ((packed));
+};
struct chsc_sync_area {
struct chsc_header header;
__u8 data[CHSC_SIZE - sizeof(struct chsc_header)];
-} __attribute__ ((packed));
+};
struct chsc_response_struct {
__u16 length;
__u16 code;
__u32 parms;
__u8 data[CHSC_SIZE - 2 * sizeof(__u16) - sizeof(__u32)];
-} __attribute__ ((packed));
+};
struct chsc_chp_cd {
struct chp_id chpid;
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 3510c0fd06f4..39d901476ee5 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -111,4 +111,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2fed39b26b42..dbfd1730e631 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -9,39 +9,21 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
# Do not trace early setup code
-CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early_nobss.o = $(CC_FLAGS_FTRACE)
endif
-GCOV_PROFILE_als.o := n
GCOV_PROFILE_early.o := n
GCOV_PROFILE_early_nobss.o := n
-KCOV_INSTRUMENT_als.o := n
KCOV_INSTRUMENT_early.o := n
KCOV_INSTRUMENT_early_nobss.o := n
-UBSAN_SANITIZE_als.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_early_nobss.o := n
#
-# Use -march=z900 for als.c to be able to print an error
-# message if the kernel is started on a machine which is too old
-#
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
-CFLAGS_als.o += -march=z900
-AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += -march=z900
-endif
-
-CFLAGS_als.o += -D__NO_FORTIFY
-
-#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
CFLAGS_smp.o := -Wno-nonnull
@@ -61,13 +43,13 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
-obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o early_nobss.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
obj-y += nospec-branch.o
-extra-y += head.o head64.o vmlinux.lds
+extra-y += head64.o vmlinux.lds
obj-$(CONFIG_SYSFS) += nospec-sysfs.o
CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
@@ -99,5 +81,5 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o
obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
-chkbss := head.o head64.o als.o early_nobss.o
+chkbss := head64.o early_nobss.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 9f5ea9d87069..c3620bafc374 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -306,6 +306,15 @@ static void *kzalloc_panic(int len)
return rc;
}
+static const char *nt_name(Elf64_Word type)
+{
+ const char *name = "LINUX";
+
+ if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
+ name = KEXEC_CORE_NOTE_NAME;
+ return name;
+}
+
/*
* Initialize ELF note
*/
@@ -332,11 +341,26 @@ static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
{
- const char *note_name = "LINUX";
+ return nt_init_name(buf, type, desc, d_len, nt_name(type));
+}
+
+/*
+ * Calculate the size of ELF note
+ */
+static size_t nt_size_name(int d_len, const char *name)
+{
+ size_t size;
- if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
- note_name = KEXEC_CORE_NOTE_NAME;
- return nt_init_name(buf, type, desc, d_len, note_name);
+ size = sizeof(Elf64_Nhdr);
+ size += roundup(strlen(name) + 1, 4);
+ size += roundup(d_len, 4);
+
+ return size;
+}
+
+static inline size_t nt_size(Elf64_Word type, int d_len)
+{
+ return nt_size_name(d_len, nt_name(type));
}
/*
@@ -375,6 +399,29 @@ static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
}
/*
+ * Calculate size of ELF notes per cpu
+ */
+static size_t get_cpu_elf_notes_size(void)
+{
+ struct save_area *sa = NULL;
+ size_t size;
+
+ size = nt_size(NT_PRSTATUS, sizeof(struct elf_prstatus));
+ size += nt_size(NT_PRFPREG, sizeof(elf_fpregset_t));
+ size += nt_size(NT_S390_TIMER, sizeof(sa->timer));
+ size += nt_size(NT_S390_TODCMP, sizeof(sa->todcmp));
+ size += nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
+ size += nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
+ size += nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
+ if (MACHINE_HAS_VX) {
+ size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
+ size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
+ }
+
+ return size;
+}
+
+/*
* Initialize prpsinfo note (new kernel)
*/
static void *nt_prpsinfo(void *ptr)
@@ -429,6 +476,30 @@ static void *nt_vmcoreinfo(void *ptr)
return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
}
+static size_t nt_vmcoreinfo_size(void)
+{
+ const char *name = "VMCOREINFO";
+ char nt_name[11];
+ Elf64_Nhdr note;
+ void *addr;
+
+ if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
+ return 0;
+
+ if (copy_oldmem_kernel(&note, addr, sizeof(note)))
+ return 0;
+
+ memset(nt_name, 0, sizeof(nt_name));
+ if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
+ sizeof(nt_name) - 1))
+ return 0;
+
+ if (strcmp(nt_name, name) != 0)
+ return 0;
+
+ return nt_size_name(note.n_descsz, name);
+}
+
/*
* Initialize final note (needed for /proc/vmcore code)
*/
@@ -539,6 +610,27 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
return ptr;
}
+static size_t get_elfcorehdr_size(int mem_chunk_cnt)
+{
+ size_t size;
+
+ size = sizeof(Elf64_Ehdr);
+ /* PT_NOTES */
+ size += sizeof(Elf64_Phdr);
+ /* nt_prpsinfo */
+ size += nt_size(NT_PRPSINFO, sizeof(struct elf_prpsinfo));
+ /* regsets */
+ size += get_cpu_cnt() * get_cpu_elf_notes_size();
+ /* nt_vmcoreinfo */
+ size += nt_vmcoreinfo_size();
+ /* nt_final */
+ size += sizeof(Elf64_Nhdr);
+ /* PT_LOADS */
+ size += mem_chunk_cnt * sizeof(Elf64_Phdr);
+
+ return size;
+}
+
/*
* Create ELF core header (new kernel)
*/
@@ -566,8 +658,8 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
mem_chunk_cnt = get_mem_chunk_cnt();
- alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
- mem_chunk_cnt * sizeof(Elf64_Phdr);
+ alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
+
hdr = kzalloc_panic(alloc_size);
/* Init elf header */
ptr = ehdr_init(hdr, mem_chunk_cnt);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 827699eb48fa..5b28b434f8a1 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -331,8 +331,20 @@ static void __init setup_boot_command_line(void)
append_to_cmdline(append_ipl_scpdata);
}
+static void __init check_image_bootable(void)
+{
+ if (!memcmp(EP_STRING, (void *)EP_OFFSET, strlen(EP_STRING)))
+ return;
+
+ sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
+ sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
+ sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
+ disabled_wait(0xbadb007);
+}
+
void __init startup_init(void)
{
+ check_image_bootable();
time_early_init();
init_kernel_storage_key();
lockdep_off();
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 961abfac2c5f..472fa2f1a4a5 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -83,7 +83,6 @@ long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user
DECLARE_PER_CPU(u64, mt_cycles[8]);
-void verify_facilities(void);
void gs_load_bc_cb(struct pt_regs *regs);
void set_fs_fixup(void);
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 791cb9000e86..6d14ad42ba88 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -48,11 +48,23 @@ ENTRY(startup_continue)
# Early machine initialization and detection functions.
#
brasl %r14,startup_init
- lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
- # virtual and never return ...
+
+# check control registers
+ stctg %c0,%c15,0(%r15)
+ oi 6(%r15),0x60 # enable sigp emergency & external call
+ oi 4(%r15),0x10 # switch on low address proctection
+ lctlg %c0,%c15,0(%r15)
+
+ lam 0,15,.Laregs-.LPG1(%r13) # load acrs needed by uaccess
+ brasl %r14,start_kernel # go to C code
+#
+# We returned from start_kernel ?!? PANIK
+#
+ basr %r13,0
+ lpswe .Ldw-.(%r13) # load disabled wait psw
+
.align 16
.LPG1:
-.Lentry:.quad 0x0000000180000000,_stext
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
@@ -85,30 +97,5 @@ ENTRY(startup_continue)
.endr
.Llinkage_stack:
.long 0,0,0x89000000,0,0,0,0x8a000000,0
-
-ENTRY(_ehead)
-
- .org 0x100000 - 0x11000 # head.o ends at 0x11000
-#
-# startup-code, running in absolute addressing mode
-#
-ENTRY(_stext)
- basr %r13,0 # get base
-.LPG3:
-# check control registers
- stctg %c0,%c15,0(%r15)
- oi 6(%r15),0x60 # enable sigp emergency & external call
- oi 4(%r15),0x10 # switch on low address proctection
- lctlg %c0,%c15,0(%r15)
-
- lam 0,15,.Laregs-.LPG3(%r13) # load acrs needed by uaccess
- brasl %r14,start_kernel # go to C code
-#
-# We returned from start_kernel ?!? PANIK
-#
- basr %r13,0
- lpswe .Ldw-.(%r13) # load disabled wait psw
-
- .align 8
.Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 60f60afa645c..7c0a095e9c5f 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -321,38 +321,20 @@ static int kprobe_handler(struct pt_regs *regs)
* If we have no pre-handler or it returned 0, we
* continue with single stepping. If we have a
* pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry
- * for jprobe processing, so get out doing nothing
- * more here.
+ * for changing execution path, so get out doing
+ * nothing more here.
*/
push_kprobe(kcb, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ pop_kprobe(kcb);
+ preempt_enable_no_resched();
return 1;
+ }
kcb->kprobe_status = KPROBE_HIT_SS;
}
enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
return 1;
- } else if (kprobe_running()) {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- /*
- * Continuation after the jprobe completed and
- * caused the jprobe_return trap. The jprobe
- * break_handler "returns" to the original
- * function that still has the kprobe breakpoint
- * installed. We continue with single stepping.
- */
- kcb->kprobe_status = KPROBE_HIT_SS;
- enable_singlestep(kcb, regs,
- (unsigned long) p->ainsn.insn);
- return 1;
- } /* else:
- * No kprobe at this address and the current kprobe
- * has no break handler (no jprobe!). The kernel just
- * exploded, let the standard trap handler pick up the
- * pieces.
- */
} /* else:
* No kprobe at this address and no active kprobe. The trap has
* not been caused by a kprobe breakpoint. The race of breakpoint
@@ -452,9 +434,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->psw.addr = orig_ret_address;
- pop_kprobe(get_kprobe_ctlblk());
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
@@ -661,60 +641,6 @@ int kprobe_exceptions_notify(struct notifier_block *self,
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long stack;
-
- memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
- /* setup return addr to the jprobe handler routine */
- regs->psw.addr = (unsigned long) jp->entry;
- regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
-
- /* r15 is the stack pointer */
- stack = (unsigned long) regs->gprs[15];
-
- memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
-
- /*
- * jprobes use jprobe_return() which skips the normal return
- * path of the function, and this messes up the accounting of the
- * function graph tracer to get messed up.
- *
- * Pause function graph tracing while performing the jprobe function.
- */
- pause_graph_tracing();
- return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
- asm volatile(".word 0x0002");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long stack;
-
- /* It's OK to start function graph tracing again */
- unpause_graph_tracing();
-
- stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
-
- /* Put the regs back */
- memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
- /* put the stack back */
- memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
- preempt_enable_no_resched();
- return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
static struct kprobe trampoline = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 18ae7b9c71d6..bdddaae96559 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -35,6 +35,8 @@ early_param("nospec", nospec_setup_early);
static int __init nospec_report(void)
{
+ if (test_facility(156))
+ pr_info("Spectre V2 mitigation: etokens\n");
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
@@ -56,7 +58,15 @@ early_param("nospectre_v2", nospectre_v2_setup_early);
void __init nospec_auto_detect(void)
{
- if (IS_ENABLED(CC_USING_EXPOLINE)) {
+ if (test_facility(156)) {
+ /*
+ * The machine supports etokens.
+ * Disable expolines and disable nobp.
+ */
+ if (IS_ENABLED(CC_USING_EXPOLINE))
+ nospec_disable = 1;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ } else if (IS_ENABLED(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index 8affad5f18cb..e30e580ae362 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -13,6 +13,8 @@ ssize_t cpu_show_spectre_v1(struct device *dev,
ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ if (test_facility(156))
+ return sprintf(buf, "Mitigation: etokens\n");
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 0292d68e7dde..cb198d4a6dca 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -2,7 +2,7 @@
/*
* Performance event support for the System z CPU-measurement Sampling Facility
*
- * Copyright IBM Corp. 2013
+ * Copyright IBM Corp. 2013, 2018
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
#define KMSG_COMPONENT "cpum_sf"
@@ -1587,6 +1587,17 @@ static void aux_buffer_free(void *data)
"%lu SDBTs\n", num_sdbt);
}
+static void aux_sdb_init(unsigned long sdb)
+{
+ struct hws_trailer_entry *te;
+
+ te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
+
+ /* Save clock base */
+ te->clock_base = 1;
+ memcpy(&te->progusage2, &tod_clock_base[1], 8);
+}
+
/*
* aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
* @cpu: On which to allocate, -1 means current
@@ -1666,6 +1677,7 @@ static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
/* Tail is the entry in a SDBT */
*tail = (unsigned long)pages[i];
aux->sdb_index[i] = (unsigned long)pages[i];
+ aux_sdb_init((unsigned long)pages[i]);
}
sfb->num_sdb = nr_pages;
diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c
index 54e2d634b849..4352a504f235 100644
--- a/arch/s390/kernel/perf_regs.c
+++ b/arch/s390/kernel/perf_regs.c
@@ -12,9 +12,6 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
{
freg_t fp;
- if (WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX))
- return 0;
-
if (idx >= PERF_REG_S390_R0 && idx <= PERF_REG_S390_R15)
return regs->gprs[idx];
@@ -33,7 +30,8 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
if (idx == PERF_REG_S390_PC)
return regs->psw.addr;
- return regs->gprs[idx];
+ WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX);
+ return 0;
}
#define REG_RESERVED (~((1UL << PERF_REG_S390_MAX) - 1))
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d82a9ec64ea9..c637c12f9e37 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -674,12 +674,12 @@ static void __init reserve_kernel(void)
#ifdef CONFIG_DMA_API_DEBUG
/*
* DMA_API_DEBUG code stumbles over addresses from the
- * range [_ehead, _stext]. Mark the memory as reserved
+ * range [PARMAREA_END, _stext]. Mark the memory as reserved
* so it is not used for CONFIG_DMA_API_DEBUG=y.
*/
memblock_reserve(0, PFN_PHYS(start_pfn));
#else
- memblock_reserve(0, (unsigned long)_ehead);
+ memblock_reserve(0, PARMAREA_END);
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- (unsigned long)_stext);
#endif
diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile
index 8ff96c08955f..4d929edc80a6 100644
--- a/arch/s390/kernel/syscalls/Makefile
+++ b/arch/s390/kernel/syscalls/Makefile
@@ -25,15 +25,15 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
define filechk_syshdr
- $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2"
+ $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$2" < $<
endef
define filechk_sysnr
- $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget))
+ $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $<
endef
define filechk_syscalls
- $(CONFIG_SHELL) '$(systbl)' -S
+ $(CONFIG_SHELL) '$(systbl)' -S < $<
endef
syshdr_abi_unistd_32 := common,32
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 54f5496913fa..12f80d1f0415 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -59,6 +59,8 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
}
EXPORT_SYMBOL(stsi);
+#ifdef CONFIG_PROC_FS
+
static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
{
switch (encoding) {
@@ -301,6 +303,8 @@ static int __init sysinfo_create_proc(void)
}
device_initcall(sysinfo_create_proc);
+#endif /* CONFIG_PROC_FS */
+
/*
* Service levels interface.
*/
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index cf561160ea88..e8766beee5ad 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -221,17 +221,22 @@ void read_persistent_clock64(struct timespec64 *ts)
ext_to_timespec64(clk, ts);
}
-void read_boot_clock64(struct timespec64 *ts)
+void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
+ struct timespec64 *boot_offset)
{
unsigned char clk[STORE_CLOCK_EXT_SIZE];
+ struct timespec64 boot_time;
__u64 delta;
delta = initial_leap_seconds + TOD_UNIX_EPOCH;
- memcpy(clk, tod_clock_base, 16);
- *(__u64 *) &clk[1] -= delta;
- if (*(__u64 *) &clk[1] > delta)
+ memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE);
+ *(__u64 *)&clk[1] -= delta;
+ if (*(__u64 *)&clk[1] > delta)
clk[0]--;
- ext_to_timespec64(clk, ts);
+ ext_to_timespec64(clk, &boot_time);
+
+ read_persistent_clock64(wall_time);
+ *boot_offset = timespec64_sub(*wall_time, boot_time);
}
static u64 read_tod_clock(struct clocksource *cs)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 4b6e0397f66d..e8184a15578a 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -579,41 +579,33 @@ early_param("topology", topology_setup);
static int topology_ctl_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int len;
+ int enabled = topology_is_enabled();
int new_mode;
- char buf[2];
+ int zero = 0;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &enabled,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
+
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
- if (!*lenp || *ppos) {
- *lenp = 0;
- return 0;
- }
- if (!write) {
- strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
- ARRAY_SIZE(buf));
- len = strnlen(buf, ARRAY_SIZE(buf));
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- goto out;
- }
- len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- if (buf[0] != '0' && buf[0] != '1')
- return -EINVAL;
mutex_lock(&smp_cpu_state_mutex);
- new_mode = topology_get_mode(buf[0] == '1');
+ new_mode = topology_get_mode(enabled);
if (topology_mode != new_mode) {
topology_mode = new_mode;
topology_schedule_update();
}
mutex_unlock(&smp_cpu_state_mutex);
topology_flush_work();
-out:
- *lenp = len;
- *ppos += len;
- return 0;
+
+ return rc;
}
static struct ctl_table topology_ctl_table[] = {
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 09abae40f917..3031cc6dd0ab 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -47,7 +47,7 @@ static struct page **vdso64_pagelist;
*/
unsigned int __read_mostly vdso_enabled = 1;
-static int vdso_fault(const struct vm_special_mapping *sm,
+static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page **vdso_pagelist;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index f0414f52817b..b43f8d33a369 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -19,7 +19,7 @@
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
-ENTRY(startup)
+ENTRY(startup_continue)
jiffies = jiffies_64;
PHDRS {
@@ -30,16 +30,12 @@ PHDRS {
SECTIONS
{
- . = 0x00000000;
+ . = 0x100000;
+ _stext = .; /* Start of text section */
.text : {
/* Text and read-only data */
+ _text = .;
HEAD_TEXT
- /*
- * E.g. perf doesn't like symbols starting at address zero,
- * therefore skip the initial PSW and channel program located
- * at address zero and let _text start at 0x200.
- */
- _text = 0x200;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
@@ -47,6 +43,7 @@ SECTIONS
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
+ *(.text.*_indirect_*)
*(.fixup)
*(.gnu.warning)
} :text = 0x0700
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index daa09f89ca2d..fcb55b02990e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1145,7 +1145,7 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
* yield-candidate.
*/
vcpu->preempted = true;
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
/*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3b7a5151b6a5..f9d90337e64a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -172,6 +172,10 @@ static int nested;
module_param(nested, int, S_IRUGO);
MODULE_PARM_DESC(nested, "Nested virtualization support");
+/* allow 1m huge page guest backing, if !nested */
+static int hpage;
+module_param(hpage, int, 0444);
+MODULE_PARM_DESC(hpage, "1m huge page backing support");
/*
* For now we handle at most 16 double words as this is what the s390 base
@@ -475,6 +479,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_AIS_MIGRATION:
r = 1;
break;
+ case KVM_CAP_S390_HPAGE_1M:
+ r = 0;
+ if (hpage)
+ r = 1;
+ break;
case KVM_CAP_S390_MEM_OP:
r = MEM_OP_MAX_SIZE;
break;
@@ -511,19 +520,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
}
static void kvm_s390_sync_dirty_log(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+ struct kvm_memory_slot *memslot)
{
+ int i;
gfn_t cur_gfn, last_gfn;
- unsigned long address;
+ unsigned long gaddr, vmaddr;
struct gmap *gmap = kvm->arch.gmap;
+ DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
- /* Loop over all guest pages */
+ /* Loop over all guest segments */
+ cur_gfn = memslot->base_gfn;
last_gfn = memslot->base_gfn + memslot->npages;
- for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
- address = gfn_to_hva_memslot(memslot, cur_gfn);
+ for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
+ gaddr = gfn_to_gpa(cur_gfn);
+ vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
+ if (kvm_is_error_hva(vmaddr))
+ continue;
+
+ bitmap_zero(bitmap, _PAGE_ENTRIES);
+ gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
+ for (i = 0; i < _PAGE_ENTRIES; i++) {
+ if (test_bit(i, bitmap))
+ mark_page_dirty(kvm, cur_gfn + i);
+ }
- if (test_and_clear_guest_dirty(gmap->mm, address))
- mark_page_dirty(kvm, cur_gfn);
if (fatal_signal_pending(current))
return;
cond_resched();
@@ -667,6 +687,27 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
r ? "(not available)" : "(success)");
break;
+ case KVM_CAP_S390_HPAGE_1M:
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus)
+ r = -EBUSY;
+ else if (!hpage || kvm->arch.use_cmma)
+ r = -EINVAL;
+ else {
+ r = 0;
+ kvm->mm->context.allow_gmap_hpage_1m = 1;
+ /*
+ * We might have to create fake 4k page
+ * tables. To avoid that the hardware works on
+ * stale PGSTEs, we emulate these instructions.
+ */
+ kvm->arch.use_skf = 0;
+ kvm->arch.use_pfmfi = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
+ r ? "(not available)" : "(success)");
+ break;
case KVM_CAP_S390_USER_STSI:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm->arch.user_stsi = 1;
@@ -714,10 +755,13 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
if (!sclp.has_cmma)
break;
- ret = -EBUSY;
VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
mutex_lock(&kvm->lock);
- if (!kvm->created_vcpus) {
+ if (kvm->created_vcpus)
+ ret = -EBUSY;
+ else if (kvm->mm->context.allow_gmap_hpage_1m)
+ ret = -EINVAL;
+ else {
kvm->arch.use_cmma = 1;
/* Not compatible with cmma. */
kvm->arch.use_pfmfi = 0;
@@ -1540,6 +1584,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
uint8_t *keys;
uint64_t hva;
int srcu_idx, i, r = 0;
+ bool unlocked;
if (args->flags != 0)
return -EINVAL;
@@ -1564,9 +1609,11 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (r)
goto out;
+ i = 0;
down_read(&current->mm->mmap_sem);
srcu_idx = srcu_read_lock(&kvm->srcu);
- for (i = 0; i < args->count; i++) {
+ while (i < args->count) {
+ unlocked = false;
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
r = -EFAULT;
@@ -1580,8 +1627,14 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
}
r = set_guest_storage_key(current->mm, hva, keys[i], 0);
- if (r)
- break;
+ if (r) {
+ r = fixup_user_fault(current, current->mm, hva,
+ FAULT_FLAG_WRITE, &unlocked);
+ if (r)
+ break;
+ }
+ if (!r)
+ i++;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
up_read(&current->mm->mmap_sem);
@@ -4082,6 +4135,11 @@ static int __init kvm_s390_init(void)
return -ENODEV;
}
+ if (nested && hpage) {
+ pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently");
+ return -EINVAL;
+ }
+
for (i = 0; i < 16; i++)
kvm_s390_fac_base[i] |=
S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index eb0eb60c7be6..cfc5a62329f6 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -246,9 +246,10 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
static int handle_iske(struct kvm_vcpu *vcpu)
{
- unsigned long addr;
+ unsigned long gaddr, vmaddr;
unsigned char key;
int reg1, reg2;
+ bool unlocked;
int rc;
vcpu->stat.instruction_iske++;
@@ -262,18 +263,28 @@ static int handle_iske(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
- addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
- addr = kvm_s390_logical_to_effective(vcpu, addr);
- addr = kvm_s390_real_to_abs(vcpu, addr);
- addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
- if (kvm_is_error_hva(addr))
+ gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
+ gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
+ vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+retry:
+ unlocked = false;
down_read(&current->mm->mmap_sem);
- rc = get_guest_storage_key(current->mm, addr, &key);
- up_read(&current->mm->mmap_sem);
+ rc = get_guest_storage_key(current->mm, vmaddr, &key);
+
+ if (rc) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ if (!rc) {
+ up_read(&current->mm->mmap_sem);
+ goto retry;
+ }
+ }
if (rc)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ up_read(&current->mm->mmap_sem);
vcpu->run->s.regs.gprs[reg1] &= ~0xff;
vcpu->run->s.regs.gprs[reg1] |= key;
return 0;
@@ -281,8 +292,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
static int handle_rrbe(struct kvm_vcpu *vcpu)
{
- unsigned long addr;
+ unsigned long vmaddr, gaddr;
int reg1, reg2;
+ bool unlocked;
int rc;
vcpu->stat.instruction_rrbe++;
@@ -296,19 +308,27 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
- addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
- addr = kvm_s390_logical_to_effective(vcpu, addr);
- addr = kvm_s390_real_to_abs(vcpu, addr);
- addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
- if (kvm_is_error_hva(addr))
+ gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
+ gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
+ vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+retry:
+ unlocked = false;
down_read(&current->mm->mmap_sem);
- rc = reset_guest_reference_bit(current->mm, addr);
- up_read(&current->mm->mmap_sem);
+ rc = reset_guest_reference_bit(current->mm, vmaddr);
+ if (rc < 0) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ if (!rc) {
+ up_read(&current->mm->mmap_sem);
+ goto retry;
+ }
+ }
if (rc < 0)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
+ up_read(&current->mm->mmap_sem);
kvm_s390_set_psw_cc(vcpu, rc);
return 0;
}
@@ -323,6 +343,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
unsigned long start, end;
unsigned char key, oldkey;
int reg1, reg2;
+ bool unlocked;
int rc;
vcpu->stat.instruction_sske++;
@@ -355,19 +376,28 @@ static int handle_sske(struct kvm_vcpu *vcpu)
}
while (start != end) {
- unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+ unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+ unlocked = false;
- if (kvm_is_error_hva(addr))
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
down_read(&current->mm->mmap_sem);
- rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
+ rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
m3 & SSKE_NQ, m3 & SSKE_MR,
m3 & SSKE_MC);
- up_read(&current->mm->mmap_sem);
- if (rc < 0)
+
+ if (rc < 0) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ rc = !rc ? -EAGAIN : rc;
+ }
+ if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- start += PAGE_SIZE;
+
+ up_read(&current->mm->mmap_sem);
+ if (rc >= 0)
+ start += PAGE_SIZE;
}
if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -948,15 +978,16 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
while (start != end) {
- unsigned long useraddr;
+ unsigned long vmaddr;
+ bool unlocked = false;
/* Translate guest address to host address */
- useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
- if (kvm_is_error_hva(useraddr))
+ vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+ if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (clear_user((void __user *)useraddr, PAGE_SIZE))
+ if (clear_user((void __user *)vmaddr, PAGE_SIZE))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
@@ -966,14 +997,20 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
if (rc)
return rc;
down_read(&current->mm->mmap_sem);
- rc = cond_set_guest_storage_key(current->mm, useraddr,
+ rc = cond_set_guest_storage_key(current->mm, vmaddr,
key, NULL, nq, mr, mc);
- up_read(&current->mm->mmap_sem);
- if (rc < 0)
+ if (rc < 0) {
+ rc = fixup_user_fault(current, current->mm, vmaddr,
+ FAULT_FLAG_WRITE, &unlocked);
+ rc = !rc ? -EAGAIN : rc;
+ }
+ if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- }
- start += PAGE_SIZE;
+ up_read(&current->mm->mmap_sem);
+ if (rc >= 0)
+ start += PAGE_SIZE;
+ }
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 2311f15be9cf..40c4d59c926e 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -17,7 +17,7 @@
ENTRY(memmove)
ltgr %r4,%r4
lgr %r1,%r2
- bzr %r14
+ jz .Lmemmove_exit
aghi %r4,-1
clgr %r2,%r3
jnh .Lmemmove_forward
@@ -36,6 +36,7 @@ ENTRY(memmove)
.Lmemmove_forward_remainder:
larl %r5,.Lmemmove_mvc
ex %r4,0(%r5)
+.Lmemmove_exit:
BR_EX %r14
.Lmemmove_reverse:
ic %r0,0(%r4,%r3)
@@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove)
*/
ENTRY(memset)
ltgr %r4,%r4
- bzr %r14
+ jz .Lmemset_exit
ltgr %r3,%r3
jnz .Lmemset_fill
aghi %r4,-1
@@ -80,6 +81,7 @@ ENTRY(memset)
.Lmemset_clear_remainder:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
+.Lmemset_exit:
BR_EX %r14
.Lmemset_fill:
cghi %r4,1
@@ -115,7 +117,7 @@ EXPORT_SYMBOL(memset)
*/
ENTRY(memcpy)
ltgr %r4,%r4
- bzr %r14
+ jz .Lmemcpy_exit
aghi %r4,-1
srlg %r5,%r4,8
ltgr %r5,%r5
@@ -124,6 +126,7 @@ ENTRY(memcpy)
.Lmemcpy_remainder:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
+.Lmemcpy_exit:
BR_EX %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)
@@ -145,9 +148,9 @@ EXPORT_SYMBOL(memcpy)
.macro __MEMSET bits,bytes,insn
ENTRY(__memset\bits)
ltgr %r4,%r4
- bzr %r14
+ jz .L__memset_exit\bits
cghi %r4,\bytes
- je .L__memset_exit\bits
+ je .L__memset_store\bits
aghi %r4,-(\bytes+1)
srlg %r5,%r4,8
ltgr %r5,%r5
@@ -163,8 +166,9 @@ ENTRY(__memset\bits)
larl %r5,.L__memset_mvc\bits
ex %r4,0(%r5)
BR_EX %r14
-.L__memset_exit\bits:
+.L__memset_store\bits:
\insn %r3,0(%r2)
+.L__memset_exit\bits:
BR_EX %r14
.L__memset_mvc\bits:
mvc \bytes(1,%r1),0(%r1)
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 6cf024eb2085..510a18299196 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -191,12 +191,7 @@ static void cmm_set_timer(void)
del_timer(&cmm_timer);
return;
}
- if (timer_pending(&cmm_timer)) {
- if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
- return;
- }
- cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
- add_timer(&cmm_timer);
+ mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds * HZ);
}
static void cmm_timer_fn(struct timer_list *unused)
@@ -251,45 +246,42 @@ static int cmm_skip_blanks(char *cp, char **endp)
return str != cp;
}
-static struct ctl_table cmm_table[];
-
static int cmm_pages_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- char buf[16], *p;
- unsigned int len;
- long nr;
+ long nr = cmm_get_pages();
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &nr,
+ .maxlen = sizeof(long),
+ };
+ int rc;
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
+ rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
- if (write) {
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len))
- return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- cmm_skip_blanks(buf, &p);
- nr = simple_strtoul(p, &p, 0);
- if (ctl == &cmm_table[0])
- cmm_set_pages(nr);
- else
- cmm_add_timed_pages(nr);
- } else {
- if (ctl == &cmm_table[0])
- nr = cmm_get_pages();
- else
- nr = cmm_get_timed_pages();
- len = sprintf(buf, "%ld\n", nr);
- if (len > *lenp)
- len = *lenp;
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
- }
- *lenp = len;
- *ppos += len;
+ cmm_set_pages(nr);
+ return 0;
+}
+
+static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ long nr = cmm_get_timed_pages();
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &nr,
+ .maxlen = sizeof(long),
+ };
+ int rc;
+
+ rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
+
+ cmm_add_timed_pages(nr);
return 0;
}
@@ -338,7 +330,7 @@ static struct ctl_table cmm_table[] = {
{
.procname = "cmm_timed_pages",
.mode = 0644,
- .proc_handler = cmm_pages_handler,
+ .proc_handler = cmm_timed_pages_handler,
},
{
.procname = "cmm_timeout",
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 6ad15d3fab81..84111a43ea29 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -80,7 +80,7 @@ struct qin64 {
struct dcss_segment {
struct list_head list;
char dcss_name[8];
- char res_name[15];
+ char res_name[16];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
@@ -433,7 +433,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
- strncat(seg->res_name, " (DCSS)", 7);
+ strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
seg->res->name = seg->res_name;
rc = seg->vm_segtype;
if (rc == SEG_TYPE_SC ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index e074480d3598..4cc3f06b0ab3 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -502,6 +502,8 @@ retry:
/* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL;
+ if (flags & FAULT_FLAG_RETRY_NOWAIT)
+ goto out_up;
goto out;
}
if (unlikely(fault & VM_FAULT_ERROR))
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index bc56ec8abcf7..bb44990c8212 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2,8 +2,10 @@
/*
* KVM guest address space mapping code
*
- * Copyright IBM Corp. 2007, 2016
+ * Copyright IBM Corp. 2007, 2016, 2018
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * David Hildenbrand <david@redhat.com>
+ * Janosch Frank <frankja@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
@@ -521,6 +523,9 @@ void gmap_unlink(struct mm_struct *mm, unsigned long *table,
rcu_read_unlock();
}
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
+ unsigned long gaddr);
+
/**
* gmap_link - set up shadow page tables to connect a host to a guest address
* @gmap: pointer to guest mapping meta data structure
@@ -541,6 +546,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
+ u64 unprot;
int rc;
BUG_ON(gmap_is_shadow(gmap));
@@ -584,8 +590,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
return -EFAULT;
pmd = pmd_offset(pud, vmaddr);
VM_BUG_ON(pmd_none(*pmd));
- /* large pmds cannot yet be handled */
- if (pmd_large(*pmd))
+ /* Are we allowed to use huge pages? */
+ if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
return -EFAULT;
/* Link gmap segment table entry location to page table. */
rc = radix_tree_preload(GFP_KERNEL);
@@ -596,10 +602,22 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
if (*table == _SEGMENT_ENTRY_EMPTY) {
rc = radix_tree_insert(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT, table);
- if (!rc)
- *table = pmd_val(*pmd);
- } else
- rc = 0;
+ if (!rc) {
+ if (pmd_large(*pmd)) {
+ *table = (pmd_val(*pmd) &
+ _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
+ | _SEGMENT_ENTRY_GMAP_UC;
+ } else
+ *table = pmd_val(*pmd) &
+ _SEGMENT_ENTRY_HARDWARE_BITS;
+ }
+ } else if (*table & _SEGMENT_ENTRY_PROTECT &&
+ !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
+ unprot = (u64)*table;
+ unprot &= ~_SEGMENT_ENTRY_PROTECT;
+ unprot |= _SEGMENT_ENTRY_GMAP_UC;
+ gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
+ }
spin_unlock(&gmap->guest_table_lock);
spin_unlock(ptl);
radix_tree_preload_end();
@@ -690,6 +708,12 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
vmaddr |= gaddr & ~PMD_MASK;
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
+ /*
+ * We do not discard pages that are backed by
+ * hugetlbfs, so we don't have to refault them.
+ */
+ if (vma && is_vm_hugetlb_page(vma))
+ continue;
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size);
}
@@ -864,7 +888,128 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
*/
static void gmap_pte_op_end(spinlock_t *ptl)
{
- spin_unlock(ptl);
+ if (ptl)
+ spin_unlock(ptl);
+}
+
+/**
+ * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
+ * and return the pmd pointer
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ *
+ * Returns a pointer to the pmd for a guest address, or NULL
+ */
+static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
+{
+ pmd_t *pmdp;
+
+ BUG_ON(gmap_is_shadow(gmap));
+ spin_lock(&gmap->guest_table_lock);
+ pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
+
+ if (!pmdp || pmd_none(*pmdp)) {
+ spin_unlock(&gmap->guest_table_lock);
+ return NULL;
+ }
+
+ /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
+ if (!pmd_large(*pmdp))
+ spin_unlock(&gmap->guest_table_lock);
+ return pmdp;
+}
+
+/**
+ * gmap_pmd_op_end - release the guest_table_lock if needed
+ * @gmap: pointer to the guest mapping meta data structure
+ * @pmdp: pointer to the pmd
+ */
+static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
+{
+ if (pmd_large(*pmdp))
+ spin_unlock(&gmap->guest_table_lock);
+}
+
+/*
+ * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
+ * @pmdp: pointer to the pmd to be protected
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: notification bits to set
+ *
+ * Returns:
+ * 0 if successfully protected
+ * -EAGAIN if a fixup is needed
+ * -EINVAL if unsupported notifier bits have been specified
+ *
+ * Expected to be called with sg->mm->mmap_sem in read and
+ * guest_table_lock held.
+ */
+static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
+ pmd_t *pmdp, int prot, unsigned long bits)
+{
+ int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
+ int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
+ pmd_t new = *pmdp;
+
+ /* Fixup needed */
+ if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
+ return -EAGAIN;
+
+ if (prot == PROT_NONE && !pmd_i) {
+ pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
+ gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+ }
+
+ if (prot == PROT_READ && !pmd_p) {
+ pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
+ pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
+ gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+ }
+
+ if (bits & GMAP_NOTIFY_MPROT)
+ pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
+
+ /* Shadow GMAP protection needs split PMDs */
+ if (bits & GMAP_NOTIFY_SHADOW)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * gmap_protect_pte - remove access rights to memory and set pgste bits
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @pmdp: pointer to the pmd associated with the pte
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: notification bits to set
+ *
+ * Returns 0 if successfully protected, -ENOMEM if out of memory and
+ * -EAGAIN if a fixup is needed.
+ *
+ * Expected to be called with sg->mm->mmap_sem in read
+ */
+static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
+ pmd_t *pmdp, int prot, unsigned long bits)
+{
+ int rc;
+ pte_t *ptep;
+ spinlock_t *ptl = NULL;
+ unsigned long pbits = 0;
+
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return -EAGAIN;
+
+ ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
+ if (!ptep)
+ return -ENOMEM;
+
+ pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
+ pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
+ /* Protect and unlock. */
+ rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
+ gmap_pte_op_end(ptl);
+ return rc;
}
/*
@@ -883,30 +1028,45 @@ static void gmap_pte_op_end(spinlock_t *ptl)
static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot, unsigned long bits)
{
- unsigned long vmaddr;
- spinlock_t *ptl;
- pte_t *ptep;
+ unsigned long vmaddr, dist;
+ pmd_t *pmdp;
int rc;
BUG_ON(gmap_is_shadow(gmap));
while (len) {
rc = -EAGAIN;
- ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
- if (ptep) {
- rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
- gmap_pte_op_end(ptl);
+ pmdp = gmap_pmd_op_walk(gmap, gaddr);
+ if (pmdp) {
+ if (!pmd_large(*pmdp)) {
+ rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
+ bits);
+ if (!rc) {
+ len -= PAGE_SIZE;
+ gaddr += PAGE_SIZE;
+ }
+ } else {
+ rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
+ bits);
+ if (!rc) {
+ dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
+ len = len < dist ? 0 : len - dist;
+ gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
+ }
+ }
+ gmap_pmd_op_end(gmap, pmdp);
}
if (rc) {
+ if (rc == -EINVAL)
+ return rc;
+
+ /* -EAGAIN, fixup of userspace mm and gmap */
vmaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(vmaddr))
return vmaddr;
rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
if (rc)
return rc;
- continue;
}
- gaddr += PAGE_SIZE;
- len -= PAGE_SIZE;
}
return 0;
}
@@ -935,7 +1095,7 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
if (!MACHINE_HAS_ESOP && prot == PROT_READ)
return -EINVAL;
down_read(&gmap->mm->mmap_sem);
- rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
+ rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
up_read(&gmap->mm->mmap_sem);
return rc;
}
@@ -1474,6 +1634,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
unsigned long limit;
int rc;
+ BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
BUG_ON(gmap_is_shadow(parent));
spin_lock(&parent->shadow_lock);
sg = gmap_find_shadow(parent, asce, edat_level);
@@ -1526,7 +1687,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
down_read(&parent->mm->mmap_sem);
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
- PROT_READ, PGSTE_VSIE_BIT);
+ PROT_READ, GMAP_NOTIFY_SHADOW);
up_read(&parent->mm->mmap_sem);
spin_lock(&parent->shadow_lock);
new->initialized = true;
@@ -2092,6 +2253,225 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
}
EXPORT_SYMBOL_GPL(ptep_notify);
+static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
+ unsigned long gaddr)
+{
+ pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
+ gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
+}
+
+/**
+ * gmap_pmdp_xchg - exchange a gmap pmd with another
+ * @gmap: pointer to the guest address space structure
+ * @pmdp: pointer to the pmd entry
+ * @new: replacement entry
+ * @gaddr: the affected guest address
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
+ unsigned long gaddr)
+{
+ gaddr &= HPAGE_MASK;
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
+ IDTE_GLOBAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
+ else
+ __pmdp_csp(pmdp);
+ *pmdp = new;
+}
+
+static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
+ int purge)
+{
+ pmd_t *pmdp;
+ struct gmap *gmap;
+ unsigned long gaddr;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ spin_lock(&gmap->guest_table_lock);
+ pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ if (pmdp) {
+ gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC));
+ if (purge)
+ __pmdp_csp(pmdp);
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+ }
+ spin_unlock(&gmap->guest_table_lock);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
+ * flushing
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
+{
+ gmap_pmdp_clear(mm, vmaddr, 0);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
+
+/**
+ * gmap_pmdp_csp - csp all affected guest pmd entries
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
+{
+ gmap_pmdp_clear(mm, vmaddr, 1);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
+
+/**
+ * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
+{
+ unsigned long *entry, gaddr;
+ struct gmap *gmap;
+ pmd_t *pmdp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ spin_lock(&gmap->guest_table_lock);
+ entry = radix_tree_delete(&gmap->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ if (entry) {
+ pmdp = (pmd_t *)entry;
+ gaddr = __gmap_segment_gaddr(entry);
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC));
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
+ gmap->asce, IDTE_LOCAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
+ *entry = _SEGMENT_ENTRY_EMPTY;
+ }
+ spin_unlock(&gmap->guest_table_lock);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
+
+/**
+ * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
+{
+ unsigned long *entry, gaddr;
+ struct gmap *gmap;
+ pmd_t *pmdp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ spin_lock(&gmap->guest_table_lock);
+ entry = radix_tree_delete(&gmap->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ if (entry) {
+ pmdp = (pmd_t *)entry;
+ gaddr = __gmap_segment_gaddr(entry);
+ pmdp_notify_gmap(gmap, pmdp, gaddr);
+ WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
+ _SEGMENT_ENTRY_GMAP_UC));
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
+ gmap->asce, IDTE_GLOBAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
+ else
+ __pmdp_csp(pmdp);
+ *entry = _SEGMENT_ENTRY_EMPTY;
+ }
+ spin_unlock(&gmap->guest_table_lock);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
+
+/**
+ * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
+ * @gmap: pointer to guest address space
+ * @pmdp: pointer to the pmd to be tested
+ * @gaddr: virtual address in the guest address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
+ unsigned long gaddr)
+{
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return false;
+
+ /* Already protected memory, which did not change is clean */
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
+ !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
+ return false;
+
+ /* Clear UC indication and reset protection */
+ pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
+ gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
+ return true;
+}
+
+/**
+ * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
+ * @gmap: pointer to guest address space
+ * @bitmap: dirty bitmap for this pmd
+ * @gaddr: virtual address in the guest address space
+ * @vmaddr: virtual address in the host address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
+ unsigned long gaddr, unsigned long vmaddr)
+{
+ int i;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ spinlock_t *ptl;
+
+ pmdp = gmap_pmd_op_walk(gmap, gaddr);
+ if (!pmdp)
+ return;
+
+ if (pmd_large(*pmdp)) {
+ if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
+ bitmap_fill(bitmap, _PAGE_ENTRIES);
+ } else {
+ for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
+ ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
+ if (!ptep)
+ continue;
+ if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
+ set_bit(i, bitmap);
+ spin_unlock(ptl);
+ }
+ }
+ gmap_pmd_op_end(gmap, pmdp);
+}
+EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
+
static inline void thp_split_mm(struct mm_struct *mm)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -2168,17 +2548,45 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
* Enable storage key handling from now on and initialize the storage
* keys with the default key.
*/
-static int __s390_enable_skey(pte_t *pte, unsigned long addr,
- unsigned long next, struct mm_walk *walk)
+static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
{
/* Clear storage key */
ptep_zap_key(walk->mm, addr, pte);
return 0;
}
+static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
+ unsigned long hmask, unsigned long next,
+ struct mm_walk *walk)
+{
+ pmd_t *pmd = (pmd_t *)pte;
+ unsigned long start, end;
+ struct page *page = pmd_page(*pmd);
+
+ /*
+ * The write check makes sure we do not set a key on shared
+ * memory. This is needed as the walker does not differentiate
+ * between actual guest memory and the process executable or
+ * shared libraries.
+ */
+ if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
+ !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
+ return 0;
+
+ start = pmd_val(*pmd) & HPAGE_MASK;
+ end = start + HPAGE_SIZE - 1;
+ __storage_key_init_range(start, end);
+ set_bit(PG_arch_1, &page->flags);
+ return 0;
+}
+
int s390_enable_skey(void)
{
- struct mm_walk walk = { .pte_entry = __s390_enable_skey };
+ struct mm_walk walk = {
+ .hugetlb_entry = __s390_enable_skey_hugetlb,
+ .pte_entry = __s390_enable_skey_pte,
+ };
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc = 0;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index e804090f4470..b0246c705a19 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -123,6 +123,29 @@ static inline pte_t __rste_to_pte(unsigned long rste)
return pte;
}
+static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
+{
+ struct page *page;
+ unsigned long size, paddr;
+
+ if (!mm_uses_skeys(mm) ||
+ rste & _SEGMENT_ENTRY_INVALID)
+ return;
+
+ if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
+ page = pud_page(__pud(rste));
+ size = PUD_SIZE;
+ paddr = rste & PUD_MASK;
+ } else {
+ page = pmd_page(__pmd(rste));
+ size = PMD_SIZE;
+ paddr = rste & PMD_MASK;
+ }
+
+ if (!test_and_set_bit(PG_arch_1, &page->flags))
+ __storage_key_init_range(paddr, paddr + size - 1);
+}
+
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
@@ -137,6 +160,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
else
rste |= _SEGMENT_ENTRY_LARGE;
+ clear_huge_pte_skeys(mm, rste);
pte_val(*ptep) = rste;
}
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 382153ff17e3..dc3cede7f2ec 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable)
list_for_each(l, &zone->free_area[order].free_list[t]) {
page = list_entry(l, struct page, lru);
if (make_stable)
- set_page_stable_dat(page, 0);
+ set_page_stable_dat(page, order);
else
set_page_unused(page, order);
}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index c44171588d08..f8c6faab41f4 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -14,7 +14,7 @@
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
{
- asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+ asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
: [addr] "+a" (addr) : [skey] "d" (skey));
return addr;
}
@@ -23,8 +23,6 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
{
unsigned long boundary, size;
- if (!PAGE_DEFAULT_KEY)
- return;
while (start < end) {
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
@@ -37,7 +35,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
continue;
}
}
- page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
+ page_set_storage_key(start, PAGE_DEFAULT_KEY, 1);
start += PAGE_SIZE;
}
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index e3bd5627afef..76d89ee8b428 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -28,7 +28,7 @@ static struct ctl_table page_table_sysctl[] = {
.data = &page_table_allocate_pgste,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &page_table_allocate_pgste_min,
.extra2 = &page_table_allocate_pgste_max,
},
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 301e466e4263..f2cc7da473e4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -347,18 +347,27 @@ static inline void pmdp_idte_local(struct mm_struct *mm,
mm->context.asce, IDTE_LOCAL);
else
__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_idte_local(mm, addr);
}
static inline void pmdp_idte_global(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- if (MACHINE_HAS_TLB_GUEST)
+ if (MACHINE_HAS_TLB_GUEST) {
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
- else if (MACHINE_HAS_IDTE)
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_idte_global(mm, addr);
+ } else if (MACHINE_HAS_IDTE) {
__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
- else
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_idte_global(mm, addr);
+ } else {
__pmdp_csp(pmdp);
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
+ gmap_pmdp_csp(mm, addr);
+ }
}
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
@@ -392,6 +401,8 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
cpumask_of(smp_processor_id()))) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1;
+ if (mm_has_pgste(mm))
+ gmap_pmdp_invalidate(mm, addr);
} else {
pmdp_idte_global(mm, addr, pmdp);
}
@@ -399,6 +410,24 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
return old;
}
+static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd = pgd_offset(mm, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return NULL;
+ pud = pud_alloc(mm, p4d, addr);
+ if (!pud)
+ return NULL;
+ pmd = pmd_alloc(mm, pud, addr);
+ return pmd;
+}
+
pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t new)
{
@@ -693,40 +722,14 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
/*
* Test and reset if a guest page is dirty
*/
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
+bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
- spinlock_t *ptl;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
pgste_t pgste;
- pte_t *ptep;
pte_t pte;
bool dirty;
int nodat;
- pgd = pgd_offset(mm, addr);
- p4d = p4d_alloc(mm, pgd, addr);
- if (!p4d)
- return false;
- pud = pud_alloc(mm, p4d, addr);
- if (!pud)
- return false;
- pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
- return false;
- /* We can't run guests backed by huge pages, but userspace can
- * still set them up and then try to migrate them without any
- * migration support.
- */
- if (pmd_large(*pmd))
- return true;
-
- ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (unlikely(!ptep))
- return false;
-
pgste = pgste_get_lock(ptep);
dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
pgste_val(pgste) &= ~PGSTE_UC_BIT;
@@ -742,21 +745,43 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
*ptep = pte;
}
pgste_set_unlock(ptep, pgste);
-
- spin_unlock(ptl);
return dirty;
}
-EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
+EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq)
{
- unsigned long keyul;
+ unsigned long keyul, paddr;
spinlock_t *ptl;
pgste_t old, new;
+ pmd_t *pmdp;
pte_t *ptep;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pmdp = pmd_alloc_map(mm, addr);
+ if (unlikely(!pmdp))
+ return -EFAULT;
+
+ ptl = pmd_lock(mm, pmdp);
+ if (!pmd_present(*pmdp)) {
+ spin_unlock(ptl);
+ return -EFAULT;
+ }
+
+ if (pmd_large(*pmdp)) {
+ paddr = pmd_val(*pmdp) & HPAGE_MASK;
+ paddr |= addr & ~HPAGE_MASK;
+ /*
+ * Huge pmds need quiescing operations, they are
+ * always mapped.
+ */
+ page_set_storage_key(paddr, key, 1);
+ spin_unlock(ptl);
+ return 0;
+ }
+ spin_unlock(ptl);
+
+ ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
if (unlikely(!ptep))
return -EFAULT;
@@ -767,14 +792,14 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
- unsigned long address, bits, skey;
+ unsigned long bits, skey;
- address = pte_val(*ptep) & PAGE_MASK;
- skey = (unsigned long) page_get_storage_key(address);
+ paddr = pte_val(*ptep) & PAGE_MASK;
+ skey = (unsigned long) page_get_storage_key(paddr);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
/* Set storage key ACC and FP */
- page_set_storage_key(address, skey, !nq);
+ page_set_storage_key(paddr, skey, !nq);
/* Merge host changed & referenced into pgste */
pgste_val(new) |= bits << 52;
}
@@ -830,11 +855,32 @@ EXPORT_SYMBOL(cond_set_guest_storage_key);
int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
{
spinlock_t *ptl;
+ unsigned long paddr;
pgste_t old, new;
+ pmd_t *pmdp;
pte_t *ptep;
int cc = 0;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pmdp = pmd_alloc_map(mm, addr);
+ if (unlikely(!pmdp))
+ return -EFAULT;
+
+ ptl = pmd_lock(mm, pmdp);
+ if (!pmd_present(*pmdp)) {
+ spin_unlock(ptl);
+ return -EFAULT;
+ }
+
+ if (pmd_large(*pmdp)) {
+ paddr = pmd_val(*pmdp) & HPAGE_MASK;
+ paddr |= addr & ~HPAGE_MASK;
+ cc = page_reset_referenced(paddr);
+ spin_unlock(ptl);
+ return cc;
+ }
+ spin_unlock(ptl);
+
+ ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
if (unlikely(!ptep))
return -EFAULT;
@@ -843,7 +889,8 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
pgste_val(new) &= ~PGSTE_GR_BIT;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
- cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
+ paddr = pte_val(*ptep) & PAGE_MASK;
+ cc = page_reset_referenced(paddr);
/* Merge real referenced bit into host-set */
pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
}
@@ -862,18 +909,42 @@ EXPORT_SYMBOL(reset_guest_reference_bit);
int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char *key)
{
+ unsigned long paddr;
spinlock_t *ptl;
pgste_t pgste;
+ pmd_t *pmdp;
pte_t *ptep;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pmdp = pmd_alloc_map(mm, addr);
+ if (unlikely(!pmdp))
+ return -EFAULT;
+
+ ptl = pmd_lock(mm, pmdp);
+ if (!pmd_present(*pmdp)) {
+ /* Not yet mapped memory has a zero key */
+ spin_unlock(ptl);
+ *key = 0;
+ return 0;
+ }
+
+ if (pmd_large(*pmdp)) {
+ paddr = pmd_val(*pmdp) & HPAGE_MASK;
+ paddr |= addr & ~HPAGE_MASK;
+ *key = page_get_storage_key(paddr);
+ spin_unlock(ptl);
+ return 0;
+ }
+ spin_unlock(ptl);
+
+ ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
if (unlikely(!ptep))
return -EFAULT;
pgste = pgste_get_lock(ptep);
*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
+ paddr = pte_val(*ptep) & PAGE_MASK;
if (!(pte_val(*ptep) & _PAGE_INVALID))
- *key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
+ *key = page_get_storage_key(paddr);
/* Reflect guest's logical view, not physical */
*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
pgste_set_unlock(ptep, pgste);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 5f0234ec8038..d7052cbe984f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -485,8 +485,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
/* br %r1 */
_EMIT2(0x07f1);
} else {
- /* larl %r1,.+14 */
- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct lowcore, br_r1_trampoline));
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 06a80434cfe6..5bd374491f94 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -134,6 +134,8 @@ void __init numa_setup(void)
{
pr_info("NUMA mode: %s\n", mode->name);
nodes_clear(node_possible_map);
+ /* Initially attach all possible CPUs to node 0. */
+ cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
if (mode->setup)
mode->setup();
numa_setup_memory();
@@ -141,20 +143,6 @@ void __init numa_setup(void)
}
/*
- * numa_init_early() - Initialization initcall
- *
- * This runs when only one CPU is online and before the first
- * topology update is called for by the scheduler.
- */
-static int __init numa_init_early(void)
-{
- /* Attach all possible CPUs to node 0 for now. */
- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
- return 0;
-}
-early_initcall(numa_init_early);
-
-/*
* numa_init_late() - Initialization initcall
*
* Register NUMA nodes.
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index b482e95b6249..57f7cdac70a3 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -48,6 +48,10 @@ static char *pci_fmt2_names[] = {
"Maximum work units",
};
+static char *pci_fmt3_names[] = {
+ "Transmitted bytes",
+};
+
static char *pci_sw_names[] = {
"Allocated pages",
"Mapped pages",
@@ -112,6 +116,10 @@ static int pci_perf_show(struct seq_file *m, void *v)
pci_fmb_show(m, pci_fmt2_names, ARRAY_SIZE(pci_fmt2_names),
&zdev->fmb->fmt2.consumed_work_units);
break;
+ case 3:
+ pci_fmb_show(m, pci_fmt3_names, ARRAY_SIZE(pci_fmt3_names),
+ &zdev->fmb->fmt3.tx_bytes);
+ break;
default:
seq_puts(m, "Unknown format\n");
}
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index 1ace023cbdce..ce6a3f75065b 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -7,13 +7,13 @@ purgatory-y := head.o purgatory.o string.o sha256.o mem.o
targets += $(purgatory-y) purgatory.ro kexec-purgatory.c
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
-$(obj)/sha256.o: $(srctree)/lib/sha256.c
+$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
$(call if_changed_rule,cc_o_c)
-$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S
+$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
-$(obj)/string.o: $(srctree)/arch/s390/lib/string.c
+$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
$(call if_changed_rule,cc_o_c)
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
@@ -21,15 +21,15 @@ LDFLAGS_purgatory.ro += -z nodefaultlib
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
-KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float
+KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
-CMD_BIN2C = $(objtree)/scripts/basic/bin2c
quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
+ cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index 660c96a05a9b..2e3707b12edd 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -243,33 +243,26 @@ gprregs:
.quad 0
.endr
-purgatory_sha256_digest:
- .global purgatory_sha256_digest
- .rept 32 /* SHA256_DIGEST_SIZE */
- .byte 0
- .endr
-
-purgatory_sha_regions:
- .global purgatory_sha_regions
- .rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */
- .byte 0
- .endr
-
-kernel_entry:
- .global kernel_entry
- .quad 0
-
-kernel_type:
- .global kernel_type
- .quad 0
-
-crash_start:
- .global crash_start
- .quad 0
+/* Macro to define a global variable with name and size (in bytes) to be
+ * shared with C code.
+ *
+ * Add the .size and .type attribute to satisfy checks on the Elf_Sym during
+ * purgatory load.
+ */
+.macro GLOBAL_VARIABLE name,size
+\name:
+ .global \name
+ .size \name,\size
+ .type \name,object
+ .skip \size,0
+.endm
-crash_size:
- .global crash_size
- .quad 0
+GLOBAL_VARIABLE purgatory_sha256_digest,32
+GLOBAL_VARIABLE purgatory_sha_regions,16*__KEXEC_SHA_REGION_SIZE
+GLOBAL_VARIABLE kernel_entry,8
+GLOBAL_VARIABLE kernel_type,8
+GLOBAL_VARIABLE crash_start,8
+GLOBAL_VARIABLE crash_size,8
.align PAGE_SIZE
stack:
diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c
index 4e2beb3c29b7..3528e6da4e87 100644
--- a/arch/s390/purgatory/purgatory.c
+++ b/arch/s390/purgatory/purgatory.c
@@ -12,15 +12,6 @@
#include <linux/string.h>
#include <asm/purgatory.h>
-struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX];
-u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE];
-
-u64 kernel_entry;
-u64 kernel_type;
-
-u64 crash_start;
-u64 crash_size;
-
int verify_sha256_digest(void)
{
struct kexec_sha_region *ptr, *end;
diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss
index d92f2d94a5d9..9bba2c14e0ca 100644
--- a/arch/s390/scripts/Makefile.chkbss
+++ b/arch/s390/scripts/Makefile.chkbss
@@ -2,13 +2,22 @@
quiet_cmd_chkbss = CHKBSS $<
define cmd_chkbss
+ rm -f $@; \
if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
echo "error: $< .bss section is not empty" >&2; exit 1; \
fi; \
touch $@;
endef
-$(obj)/built-in.a: $(patsubst %, $(obj)/%.chkbss, $(chkbss))
+chkbss-target ?= $(obj)/built-in.a
+ifneq (,$(findstring /,$(chkbss)))
+chkbss-files := $(patsubst %, %.chkbss, $(chkbss))
+else
+chkbss-files := $(patsubst %, $(obj)/%.chkbss, $(chkbss))
+endif
+
+$(chkbss-target): $(chkbss-files)
+targets += $(notdir $(chkbss-files))
%.o.chkbss: %.o
$(call cmd,chkbss)
diff --git a/arch/s390/tools/gen_opcode_table.c b/arch/s390/tools/gen_opcode_table.c
index 259aa0680d1a..a1bc02b29c81 100644
--- a/arch/s390/tools/gen_opcode_table.c
+++ b/arch/s390/tools/gen_opcode_table.c
@@ -257,7 +257,7 @@ static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
if (!desc->group)
exit(EXIT_FAILURE);
group = &desc->group[desc->nr_groups - 1];
- strncpy(group->opcode, insn->opcode, 2);
+ memcpy(group->opcode, insn->opcode, 2);
group->type = insn->type;
group->offset = offset;
group->count = 1;
@@ -283,7 +283,7 @@ static void print_opcode_table(struct gen_opcode *desc)
continue;
add_to_group(desc, insn, offset);
if (strncmp(opcode, insn->opcode, 2)) {
- strncpy(opcode, insn->opcode, 2);
+ memcpy(opcode, insn->opcode, 2);
printf("\t/* %.2s */ \\\n", opcode);
}
print_opcode(insn, offset);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index dd4f3d3e644f..1fb7b6d72baf 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -51,7 +51,6 @@ config SUPERH
select HAVE_ARCH_AUDITSYSCALL
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_NMI
- select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
help
@@ -159,19 +158,18 @@ config SWAP_IO_SPACE
bool
config DMA_COHERENT
+ select DMA_DIRECT_OPS
bool
config DMA_NONCOHERENT
def_bool !DMA_COHERENT
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select DMA_NONCOHERENT_OPS
config PGTABLE_LEVELS
default 3 if X2TLB
default 2
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "System type"
#
@@ -713,8 +711,6 @@ config HOTPLUG_CPU
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
-source "kernel/Kconfig.preempt"
-
config GUSA
def_bool y
depends on !SMP && SUPERH32
@@ -882,12 +878,6 @@ source "drivers/pcmcia/Kconfig"
endmenu
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options (EXPERIMENTAL)"
source "kernel/power/Kconfig"
@@ -895,17 +885,3 @@ source "kernel/power/Kconfig"
source "drivers/cpuidle/Kconfig"
endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/sh/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index d0767672640d..010b6c33bbba 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-source "lib/Kconfig.debug"
-
config SH_STANDARD_BIOS
bool "Use LinuxSH standard BIOS"
depends on SUPERH32
@@ -88,5 +85,3 @@ config MCOUNT
def_bool y
depends on SUPERH32
depends on STACK_DEBUG || FUNCTION_TRACER
-
-endmenu
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index de8393cb7313..8f234d0435aa 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -1,40 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas - AP-325RXA
* (Compatible with Algo System ., LTD. - AP-320A)
*
* Copyright (C) 2008 Renesas Solutions Corp.
* Author : Yusuke Goda <goda.yuske@renesas.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
-#include <linux/init.h>
+#include <asm/clock.h>
+#include <asm/io.h>
+#include <asm/suspend.h>
+
+#include <cpu/sh7723.h>
+
+#include <linux/clkdev.h>
+#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/memblock.h>
+#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/sh_flctl.h>
-#include <linux/mfd/tmio.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
+#include <linux/sh_intc.h>
#include <linux/smsc911x.h>
-#include <linux/gpio.h>
#include <linux/videodev2.h>
-#include <linux/sh_intc.h>
+
+#include <media/drv-intf/renesas-ceu.h>
#include <media/i2c/ov772x.h>
-#include <media/soc_camera.h>
-#include <linux/platform_data/media/soc_camera_platform.h>
-#include <media/drv-intf/sh_mobile_ceu.h>
+
#include <video/sh_mobile_lcdc.h>
-#include <asm/io.h>
-#include <asm/clock.h>
-#include <asm/suspend.h>
-#include <cpu/sh7723.h>
+
+#define CEU_BUFFER_MEMORY_SIZE (4 << 20)
+static phys_addr_t ceu_dma_membase;
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
@@ -253,150 +258,25 @@ static struct platform_device lcdc_device = {
},
};
-static void camera_power(int val)
-{
- gpio_set_value(GPIO_PTZ5, val); /* RST_CAM/RSTB */
- mdelay(10);
-}
-
-#ifdef CONFIG_I2C
-/* support for the old ncm03j camera */
-static unsigned char camera_ncm03j_magic[] =
-{
- 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8,
- 0x1D, 0x00, 0x1E, 0x8A, 0x21, 0x00, 0x33, 0x36,
- 0x36, 0x60, 0x37, 0x08, 0x3B, 0x31, 0x44, 0x0F,
- 0x46, 0xF0, 0x4B, 0x28, 0x4C, 0x21, 0x4D, 0x55,
- 0x4E, 0x1B, 0x4F, 0xC7, 0x50, 0xFC, 0x51, 0x12,
- 0x58, 0x02, 0x66, 0xC0, 0x67, 0x46, 0x6B, 0xA0,
- 0x6C, 0x34, 0x7E, 0x25, 0x7F, 0x25, 0x8D, 0x0F,
- 0x92, 0x40, 0x93, 0x04, 0x94, 0x26, 0x95, 0x0A,
- 0x99, 0x03, 0x9A, 0xF0, 0x9B, 0x14, 0x9D, 0x7A,
- 0xC5, 0x02, 0xD6, 0x07, 0x59, 0x00, 0x5A, 0x1A,
- 0x5B, 0x2A, 0x5C, 0x37, 0x5D, 0x42, 0x5E, 0x56,
- 0xC8, 0x00, 0xC9, 0x1A, 0xCA, 0x2A, 0xCB, 0x37,
- 0xCC, 0x42, 0xCD, 0x56, 0xCE, 0x00, 0xCF, 0x1A,
- 0xD0, 0x2A, 0xD1, 0x37, 0xD2, 0x42, 0xD3, 0x56,
- 0x5F, 0x68, 0x60, 0x87, 0x61, 0xA3, 0x62, 0xBC,
- 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F,
-};
-
-static int camera_probe(void)
-{
- struct i2c_adapter *a = i2c_get_adapter(0);
- struct i2c_msg msg;
- int ret;
-
- if (!a)
- return -ENODEV;
-
- camera_power(1);
- msg.addr = 0x6e;
- msg.buf = camera_ncm03j_magic;
- msg.len = 2;
- msg.flags = 0;
- ret = i2c_transfer(a, &msg, 1);
- camera_power(0);
-
- return ret;
-}
-
-static int camera_set_capture(struct soc_camera_platform_info *info,
- int enable)
-{
- struct i2c_adapter *a = i2c_get_adapter(0);
- struct i2c_msg msg;
- int ret = 0;
- int i;
-
- camera_power(0);
- if (!enable)
- return 0; /* no disable for now */
-
- camera_power(1);
- for (i = 0; i < ARRAY_SIZE(camera_ncm03j_magic); i += 2) {
- u_int8_t buf[8];
-
- msg.addr = 0x6e;
- msg.buf = buf;
- msg.len = 2;
- msg.flags = 0;
-
- buf[0] = camera_ncm03j_magic[i];
- buf[1] = camera_ncm03j_magic[i + 1];
-
- ret = (ret < 0) ? ret : i2c_transfer(a, &msg, 1);
- }
-
- return ret;
-}
-
-static int ap325rxa_camera_add(struct soc_camera_device *icd);
-static void ap325rxa_camera_del(struct soc_camera_device *icd);
-
-static struct soc_camera_platform_info camera_info = {
- .format_name = "UYVY",
- .format_depth = 16,
- .format = {
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- .field = V4L2_FIELD_NONE,
- .width = 640,
- .height = 480,
+/* Powerdown/reset gpios for CEU image sensors */
+static struct gpiod_lookup_table ov7725_gpios = {
+ .dev_id = "0-0021",
+ .table = {
+ GPIO_LOOKUP("sh7723_pfc", GPIO_PTZ5, "reset", GPIO_ACTIVE_LOW),
},
- .mbus_param = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
- V4L2_MBUS_DATA_ACTIVE_HIGH,
- .mbus_type = V4L2_MBUS_PARALLEL,
- .set_capture = camera_set_capture,
-};
-
-static struct soc_camera_link camera_link = {
- .bus_id = 0,
- .add_device = ap325rxa_camera_add,
- .del_device = ap325rxa_camera_del,
- .module_name = "soc_camera_platform",
- .priv = &camera_info,
};
-static struct platform_device *camera_device;
-
-static void ap325rxa_camera_release(struct device *dev)
-{
- soc_camera_platform_release(&camera_device);
-}
-
-static int ap325rxa_camera_add(struct soc_camera_device *icd)
-{
- int ret = soc_camera_platform_add(icd, &camera_device, &camera_link,
- ap325rxa_camera_release, 0);
- if (ret < 0)
- return ret;
-
- ret = camera_probe();
- if (ret < 0)
- soc_camera_platform_del(icd, camera_device, &camera_link);
-
- return ret;
-}
-
-static void ap325rxa_camera_del(struct soc_camera_device *icd)
-{
- soc_camera_platform_del(icd, camera_device, &camera_link);
-}
-#endif /* CONFIG_I2C */
-
-static int ov7725_power(struct device *dev, int mode)
-{
- camera_power(0);
- if (mode)
- camera_power(1);
-
- return 0;
-}
-
-static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
- .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+static struct ceu_platform_data ceu0_pdata = {
+ .num_subdevs = 1,
+ .subdevs = {
+ { /* [0] = ov7725 */
+ .flags = 0,
+ .bus_width = 8,
+ .bus_shift = 0,
+ .i2c_adapter_id = 0,
+ .i2c_address = 0x21,
+ },
+ },
};
static struct resource ceu_resources[] = {
@@ -410,18 +290,15 @@ static struct resource ceu_resources[] = {
.start = evt2irq(0x880),
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* place holder for contiguous memory */
- },
};
-static struct platform_device ceu_device = {
- .name = "sh_mobile_ceu",
- .id = 0, /* "ceu0" clock */
+static struct platform_device ap325rxa_ceu_device = {
+ .name = "renesas-ceu",
+ .id = 0, /* "ceu.0" clock */
.num_resources = ARRAY_SIZE(ceu_resources),
.resource = ceu_resources,
.dev = {
- .platform_data = &sh_mobile_ceu_info,
+ .platform_data = &ceu0_pdata,
},
};
@@ -488,44 +365,18 @@ static struct platform_device sdhi1_cn7_device = {
},
};
-static struct i2c_board_info __initdata ap325rxa_i2c_devices[] = {
- {
- I2C_BOARD_INFO("pcf8563", 0x51),
- },
-};
-
-static struct i2c_board_info ap325rxa_i2c_camera[] = {
- {
- I2C_BOARD_INFO("ov772x", 0x21),
- },
-};
-
static struct ov772x_camera_info ov7725_info = {
.flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
.edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0),
};
-static struct soc_camera_link ov7725_link = {
- .bus_id = 0,
- .power = ov7725_power,
- .board_info = &ap325rxa_i2c_camera[0],
- .i2c_adapter_id = 0,
- .priv = &ov7725_info,
-};
-
-static struct platform_device ap325rxa_camera[] = {
+static struct i2c_board_info ap325rxa_i2c_devices[] __initdata = {
{
- .name = "soc-camera-pdrv",
- .id = 0,
- .dev = {
- .platform_data = &ov7725_link,
- },
- }, {
- .name = "soc-camera-pdrv",
- .id = 1,
- .dev = {
- .platform_data = &camera_link,
- },
+ I2C_BOARD_INFO("pcf8563", 0x51),
+ },
+ {
+ I2C_BOARD_INFO("ov772x", 0x21),
+ .platform_data = &ov7725_info,
},
};
@@ -533,12 +384,9 @@ static struct platform_device *ap325rxa_devices[] __initdata = {
&smsc9118_device,
&ap325rxa_nor_flash_device,
&lcdc_device,
- &ceu_device,
&nand_flash_device,
&sdhi0_cn3_device,
&sdhi1_cn7_device,
- &ap325rxa_camera[0],
- &ap325rxa_camera[1],
};
extern char ap325rxa_sdram_enter_start;
@@ -649,8 +497,6 @@ static int __init ap325rxa_devices_setup(void)
__raw_writew(0xFFFF, PORT_DRVCRA);
__raw_writew(0xFFFF, PORT_DRVCRB);
- platform_resource_setup_memory(&ceu_device, "ceu", 4 << 20);
-
/* SDHI0 - CN3 - SD CARD */
gpio_request(GPIO_FN_SDHI0CD_PTD, NULL);
gpio_request(GPIO_FN_SDHI0WP_PTD, NULL);
@@ -670,9 +516,25 @@ static int __init ap325rxa_devices_setup(void)
gpio_request(GPIO_FN_SDHI1CMD, NULL);
gpio_request(GPIO_FN_SDHI1CLK, NULL);
+ /* Add a clock alias for ov7725 xclk source. */
+ clk_add_alias(NULL, "0-0021", "video_clk", NULL);
+
+ /* Register RSTB gpio for ov7725 camera sensor. */
+ gpiod_add_lookup_table(&ov7725_gpios);
+
i2c_register_board_info(0, ap325rxa_i2c_devices,
ARRAY_SIZE(ap325rxa_i2c_devices));
+ /* Initialize CEU platform device separately to map memory first */
+ device_initialize(&ap325rxa_ceu_device.dev);
+ arch_setup_pdev_archdata(&ap325rxa_ceu_device);
+ dma_declare_coherent_memory(&ap325rxa_ceu_device.dev,
+ ceu_dma_membase, ceu_dma_membase,
+ ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1,
+ DMA_MEMORY_EXCLUSIVE);
+
+ platform_device_add(&ap325rxa_ceu_device);
+
return platform_add_devices(ap325rxa_devices,
ARRAY_SIZE(ap325rxa_devices));
}
@@ -689,7 +551,21 @@ static int ap325rxa_mode_pins(void)
return MODE_PIN5 | MODE_PIN8;
}
+/* Reserve a portion of memory for CEU buffers */
+static void __init ap325rxa_mv_mem_reserve(void)
+{
+ phys_addr_t phys;
+ phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
+
+ phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_free(phys, size);
+ memblock_remove(phys, size);
+
+ ceu_dma_membase = phys;
+}
+
static struct sh_machine_vector mv_ap325rxa __initmv = {
.mv_name = "AP-325RXA",
.mv_mode_pins = ap325rxa_mode_pins,
+ .mv_mem_reserve = ap325rxa_mv_mem_reserve,
};
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 6af7777332fc..e59c577ed871 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -1,41 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* KFR2R09 board support code
*
* Copyright (C) 2009 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/mmc/host.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/onenand.h>
+
+#include <asm/clock.h>
+#include <asm/io.h>
+#include <asm/machvec.h>
+#include <asm/suspend.h>
+
+#include <cpu/sh7724.h>
+
+#include <linux/clkdev.h>
#include <linux/delay.h>
-#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
-#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/physmap.h>
#include <linux/platform_data/lv5207lp.h>
+#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
+#include <linux/sh_intc.h>
#include <linux/usb/r8a66597.h>
#include <linux/videodev2.h>
-#include <linux/sh_intc.h>
+
+#include <mach/kfr2r09.h>
+
+#include <media/drv-intf/renesas-ceu.h>
#include <media/i2c/rj54n1cb0c.h>
-#include <media/soc_camera.h>
-#include <media/drv-intf/sh_mobile_ceu.h>
+
#include <video/sh_mobile_lcdc.h>
-#include <asm/suspend.h>
-#include <asm/clock.h>
-#include <asm/machvec.h>
-#include <asm/io.h>
-#include <cpu/sh7724.h>
-#include <mach/kfr2r09.h>
+
+#define CEU_BUFFER_MEMORY_SIZE (4 << 20)
+static phys_addr_t ceu_dma_membase;
+
+/* set VIO_CKO clock to 25MHz */
+#define CEU_MCLK_FREQ 25000000
+#define DRVCRB 0xA405018C
static struct mtd_partition kfr2r09_nor_flash_partitions[] =
{
@@ -230,8 +242,17 @@ static struct platform_device kfr2r09_usb0_gadget_device = {
.resource = kfr2r09_usb0_gadget_resources,
};
-static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
- .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+static struct ceu_platform_data ceu_pdata = {
+ .num_subdevs = 1,
+ .subdevs = {
+ { /* [0] = rj54n1cb0c */
+ .flags = 0,
+ .bus_width = 8,
+ .bus_shift = 0,
+ .i2c_adapter_id = 1,
+ .i2c_address = 0x50,
+ },
+ },
};
static struct resource kfr2r09_ceu_resources[] = {
@@ -246,109 +267,35 @@ static struct resource kfr2r09_ceu_resources[] = {
.end = evt2irq(0x880),
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* place holder for contiguous memory */
- },
};
static struct platform_device kfr2r09_ceu_device = {
- .name = "sh_mobile_ceu",
+ .name = "renesas-ceu",
.id = 0, /* "ceu0" clock */
.num_resources = ARRAY_SIZE(kfr2r09_ceu_resources),
.resource = kfr2r09_ceu_resources,
.dev = {
- .platform_data = &sh_mobile_ceu_info,
+ .platform_data = &ceu_pdata,
},
};
-static struct i2c_board_info kfr2r09_i2c_camera = {
- I2C_BOARD_INFO("rj54n1cb0c", 0x50),
-};
-
-static struct clk *camera_clk;
-
-/* set VIO_CKO clock to 25MHz */
-#define CEU_MCLK_FREQ 25000000
-
-#define DRVCRB 0xA405018C
-static int camera_power(struct device *dev, int mode)
-{
- int ret;
-
- if (mode) {
- long rate;
-
- camera_clk = clk_get(NULL, "video_clk");
- if (IS_ERR(camera_clk))
- return PTR_ERR(camera_clk);
-
- rate = clk_round_rate(camera_clk, CEU_MCLK_FREQ);
- ret = clk_set_rate(camera_clk, rate);
- if (ret < 0)
- goto eclkrate;
-
- /* set DRVCRB
- *
- * use 1.8 V for VccQ_VIO
- * use 2.85V for VccQ_SR
- */
- __raw_writew((__raw_readw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB);
-
- /* reset clear */
- ret = gpio_request(GPIO_PTB4, NULL);
- if (ret < 0)
- goto eptb4;
- ret = gpio_request(GPIO_PTB7, NULL);
- if (ret < 0)
- goto eptb7;
-
- ret = gpio_direction_output(GPIO_PTB4, 1);
- if (!ret)
- ret = gpio_direction_output(GPIO_PTB7, 1);
- if (ret < 0)
- goto egpioout;
- msleep(1);
-
- ret = clk_enable(camera_clk); /* start VIO_CKO */
- if (ret < 0)
- goto eclkon;
-
- return 0;
- }
-
- ret = 0;
-
- clk_disable(camera_clk);
-eclkon:
- gpio_set_value(GPIO_PTB7, 0);
-egpioout:
- gpio_set_value(GPIO_PTB4, 0);
- gpio_free(GPIO_PTB7);
-eptb7:
- gpio_free(GPIO_PTB4);
-eptb4:
-eclkrate:
- clk_put(camera_clk);
- return ret;
-}
-
static struct rj54n1_pdata rj54n1_priv = {
.mclk_freq = CEU_MCLK_FREQ,
.ioctl_high = false,
};
-static struct soc_camera_link rj54n1_link = {
- .power = camera_power,
- .board_info = &kfr2r09_i2c_camera,
- .i2c_adapter_id = 1,
- .priv = &rj54n1_priv,
+static struct i2c_board_info kfr2r09_i2c_camera = {
+ I2C_BOARD_INFO("rj54n1cb0c", 0x50),
+ .platform_data = &rj54n1_priv,
};
-static struct platform_device kfr2r09_camera = {
- .name = "soc-camera-pdrv",
- .id = 0,
- .dev = {
- .platform_data = &rj54n1_link,
+static struct gpiod_lookup_table rj54n1_gpios = {
+ .dev_id = "1-0050",
+ .table = {
+ GPIO_LOOKUP("sh7724_pfc", GPIO_PTB4, "poweron",
+ GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sh7724_pfc", GPIO_PTB7, "enable",
+ GPIO_ACTIVE_HIGH),
},
};
@@ -393,8 +340,6 @@ static struct platform_device *kfr2r09_devices[] __initdata = {
&kfr2r09_nand_flash_device,
&kfr2r09_sh_keysc_device,
&kfr2r09_sh_lcdc_device,
- &kfr2r09_ceu_device,
- &kfr2r09_camera,
&kfr2r09_sh_sdhi0_device,
};
@@ -533,6 +478,8 @@ extern char kfr2r09_sdram_leave_end;
static int __init kfr2r09_devices_setup(void)
{
+ static struct clk *camera_clk;
+
/* register board specific self-refresh code */
sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF |
SUSP_SH_RSTANDBY,
@@ -622,8 +569,6 @@ static int __init kfr2r09_devices_setup(void)
gpio_request(GPIO_FN_VIO0_D1, NULL);
gpio_request(GPIO_FN_VIO0_D0, NULL);
- platform_resource_setup_memory(&kfr2r09_ceu_device, "ceu", 4 << 20);
-
/* SDHI0 connected to yc304 */
gpio_request(GPIO_FN_SDHI0CD, NULL);
gpio_request(GPIO_FN_SDHI0D3, NULL);
@@ -635,6 +580,36 @@ static int __init kfr2r09_devices_setup(void)
i2c_register_board_info(0, &kfr2r09_backlight_board_info, 1);
+ /* Set camera clock frequency and register and alias for rj54n1. */
+ camera_clk = clk_get(NULL, "video_clk");
+ if (!IS_ERR(camera_clk)) {
+ clk_set_rate(camera_clk,
+ clk_round_rate(camera_clk, CEU_MCLK_FREQ));
+ clk_put(camera_clk);
+ }
+ clk_add_alias(NULL, "1-0050", "video_clk", NULL);
+
+ /* set DRVCRB
+ *
+ * use 1.8 V for VccQ_VIO
+ * use 2.85V for VccQ_SR
+ */
+ __raw_writew((__raw_readw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB);
+
+ gpiod_add_lookup_table(&rj54n1_gpios);
+
+ i2c_register_board_info(1, &kfr2r09_i2c_camera, 1);
+
+ /* Initialize CEU platform device separately to map memory first */
+ device_initialize(&kfr2r09_ceu_device.dev);
+ arch_setup_pdev_archdata(&kfr2r09_ceu_device);
+ dma_declare_coherent_memory(&kfr2r09_ceu_device.dev,
+ ceu_dma_membase, ceu_dma_membase,
+ ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1,
+ DMA_MEMORY_EXCLUSIVE);
+
+ platform_device_add(&kfr2r09_ceu_device);
+
return platform_add_devices(kfr2r09_devices,
ARRAY_SIZE(kfr2r09_devices));
}
@@ -651,10 +626,24 @@ static int kfr2r09_mode_pins(void)
return MODE_PIN0 | MODE_PIN1 | MODE_PIN5 | MODE_PIN8;
}
+/* Reserve a portion of memory for CEU buffers */
+static void __init kfr2r09_mv_mem_reserve(void)
+{
+ phys_addr_t phys;
+ phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
+
+ phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_free(phys, size);
+ memblock_remove(phys, size);
+
+ ceu_dma_membase = phys;
+}
+
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_kfr2r09 __initmv = {
.mv_name = "kfr2r09",
.mv_mode_pins = kfr2r09_mode_pins,
+ .mv_mem_reserve = kfr2r09_mv_mem_reserve,
};
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 3d7d0046cf49..254f2c662703 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -28,7 +28,6 @@
#include <video/sh_mobile_lcdc.h>
#include <media/drv-intf/renesas-ceu.h>
#include <media/i2c/ov772x.h>
-#include <media/soc_camera.h>
#include <media/i2c/tw9910.h>
#include <asm/clock.h>
#include <asm/machvec.h>
@@ -351,8 +350,9 @@ static struct platform_device migor_ceu_device = {
static struct gpiod_lookup_table ov7725_gpios = {
.dev_id = "0-0021",
.table = {
- GPIO_LOOKUP("sh7722_pfc", GPIO_PTT0, "pwdn", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("sh7722_pfc", GPIO_PTT3, "rstb", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("sh7722_pfc", GPIO_PTT0, "powerdown",
+ GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("sh7722_pfc", GPIO_PTT3, "reset", GPIO_ACTIVE_LOW),
},
};
@@ -592,7 +592,7 @@ static int __init migor_devices_setup(void)
}
/* Add a clock alias for ov7725 xclk source. */
- clk_add_alias("xclk", "0-0021", "video_clk", NULL);
+ clk_add_alias(NULL, "0-0021", "video_clk", NULL);
/* Register GPIOs for video sources. */
gpiod_add_lookup_table(&ov7725_gpios);
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 255952555656..fdbec22ae687 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -1,43 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/boards/se/7724/setup.c
*
* Copyright (C) 2009 Renesas Solutions Corp.
*
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
+#include <asm/clock.h>
+#include <asm/heartbeat.h>
+#include <asm/io.h>
+#include <asm/suspend.h>
-#include <linux/init.h>
+#include <cpu/sh7724.h>
+
+#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/mmc/host.h>
+#include <linux/memblock.h>
#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
-#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
-#include <linux/smc91x.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include <linux/input/sh_keysc.h>
-#include <linux/usb/r8a66597.h>
#include <linux/sh_eth.h>
#include <linux/sh_intc.h>
+#include <linux/smc91x.h>
+#include <linux/usb/r8a66597.h>
#include <linux/videodev2.h>
-#include <video/sh_mobile_lcdc.h>
-#include <media/drv-intf/sh_mobile_ceu.h>
+
+#include <mach-se/mach/se7724.h>
+#include <media/drv-intf/renesas-ceu.h>
+
#include <sound/sh_fsi.h>
#include <sound/simple_card.h>
-#include <asm/io.h>
-#include <asm/heartbeat.h>
-#include <asm/clock.h>
-#include <asm/suspend.h>
-#include <cpu/sh7724.h>
-#include <mach-se/mach/se7724.h>
+
+#include <video/sh_mobile_lcdc.h>
+
+#define CEU_BUFFER_MEMORY_SIZE (4 << 20)
+static phys_addr_t ceu0_dma_membase;
+static phys_addr_t ceu1_dma_membase;
/*
* SWx 1234 5678
@@ -216,8 +222,8 @@ static struct platform_device lcdc_device = {
};
/* CEU0 */
-static struct sh_mobile_ceu_info sh_mobile_ceu0_info = {
- .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+static struct ceu_platform_data ceu0_pdata = {
+ .num_subdevs = 0,
};
static struct resource ceu0_resources[] = {
@@ -231,24 +237,21 @@ static struct resource ceu0_resources[] = {
.start = evt2irq(0x880),
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* place holder for contiguous memory */
- },
};
static struct platform_device ceu0_device = {
- .name = "sh_mobile_ceu",
- .id = 0, /* "ceu0" clock */
+ .name = "renesas-ceu",
+ .id = 0, /* "ceu.0" clock */
.num_resources = ARRAY_SIZE(ceu0_resources),
.resource = ceu0_resources,
.dev = {
- .platform_data = &sh_mobile_ceu0_info,
+ .platform_data = &ceu0_pdata,
},
};
/* CEU1 */
-static struct sh_mobile_ceu_info sh_mobile_ceu1_info = {
- .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+static struct ceu_platform_data ceu1_pdata = {
+ .num_subdevs = 0,
};
static struct resource ceu1_resources[] = {
@@ -262,18 +265,15 @@ static struct resource ceu1_resources[] = {
.start = evt2irq(0x9e0),
.flags = IORESOURCE_IRQ,
},
- [2] = {
- /* place holder for contiguous memory */
- },
};
static struct platform_device ceu1_device = {
- .name = "sh_mobile_ceu",
- .id = 1, /* "ceu1" clock */
+ .name = "renesas-ceu",
+ .id = 1, /* "ceu.1" clock */
.num_resources = ARRAY_SIZE(ceu1_resources),
.resource = ceu1_resources,
.dev = {
- .platform_data = &sh_mobile_ceu1_info,
+ .platform_data = &ceu1_pdata,
},
};
@@ -574,13 +574,16 @@ static struct platform_device vou_device = {
},
};
+static struct platform_device *ms7724se_ceu_devices[] __initdata = {
+ &ceu0_device,
+ &ceu1_device,
+};
+
static struct platform_device *ms7724se_devices[] __initdata = {
&heartbeat_device,
&smc91x_eth_device,
&lcdc_device,
&nor_flash_device,
- &ceu0_device,
- &ceu1_device,
&keysc_device,
&sh_eth_device,
&sh7724_usb0_host_device,
@@ -797,7 +800,6 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_VIO0_CLK, NULL);
gpio_request(GPIO_FN_VIO0_FLD, NULL);
gpio_request(GPIO_FN_VIO0_HD, NULL);
- platform_resource_setup_memory(&ceu0_device, "ceu0", 4 << 20);
/* enable CEU1 */
gpio_request(GPIO_FN_VIO1_D7, NULL);
@@ -812,7 +814,6 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_VIO1_HD, NULL);
gpio_request(GPIO_FN_VIO1_VD, NULL);
gpio_request(GPIO_FN_VIO1_CLK, NULL);
- platform_resource_setup_memory(&ceu1_device, "ceu1", 4 << 20);
/* KEYSC */
gpio_request(GPIO_FN_KEYOUT5_IN5, NULL);
@@ -934,12 +935,49 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_DV_VSYNC, NULL);
gpio_request(GPIO_FN_DV_HSYNC, NULL);
+ /* Initialize CEU platform devices separately to map memory first */
+ device_initialize(&ms7724se_ceu_devices[0]->dev);
+ arch_setup_pdev_archdata(ms7724se_ceu_devices[0]);
+ dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev,
+ ceu0_dma_membase, ceu0_dma_membase,
+ ceu0_dma_membase +
+ CEU_BUFFER_MEMORY_SIZE - 1,
+ DMA_MEMORY_EXCLUSIVE);
+ platform_device_add(ms7724se_ceu_devices[0]);
+
+ device_initialize(&ms7724se_ceu_devices[1]->dev);
+ arch_setup_pdev_archdata(ms7724se_ceu_devices[1]);
+ dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev,
+ ceu1_dma_membase, ceu1_dma_membase,
+ ceu1_dma_membase +
+ CEU_BUFFER_MEMORY_SIZE - 1,
+ DMA_MEMORY_EXCLUSIVE);
+ platform_device_add(ms7724se_ceu_devices[1]);
+
return platform_add_devices(ms7724se_devices,
ARRAY_SIZE(ms7724se_devices));
}
device_initcall(devices_setup);
+/* Reserve a portion of memory for CEU 0 and CEU 1 buffers */
+static void __init ms7724se_mv_mem_reserve(void)
+{
+ phys_addr_t phys;
+ phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
+
+ phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_free(phys, size);
+ memblock_remove(phys, size);
+ ceu0_dma_membase = phys;
+
+ phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_free(phys, size);
+ memblock_remove(phys, size);
+ ceu1_dma_membase = phys;
+}
+
static struct sh_machine_vector mv_ms7724se __initmv = {
.mv_name = "ms7724se",
.mv_init_irq = init_se7724_IRQ,
+ .mv_mem_reserve = ms7724se_mv_mem_reserve,
};
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index e5b7437ab4af..8256626bc53c 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -160,8 +160,6 @@ static int __init pcibios_init(void)
for (hose = hose_head; hose; hose = hose->next)
pcibios_scanbus(hose);
- dma_debug_add_bus(&pci_bus_type);
-
pci_initialized = 1;
return 0;
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 46dd82ab2c29..6a5609a55965 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -2,6 +2,7 @@ generic-y += compat.h
generic-y += current.h
generic-y += delay.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += irq_regs.h
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 0fd0099f43cc..f37b95a80232 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -32,44 +32,9 @@
#include <asm/atomic-irq.h>
#endif
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v) atomic_add(1, (v))
-#define atomic_dec(v) atomic_sub(1, (v))
-
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
-
- return c;
-}
-
#endif /* CONFIG_CPU_J2 */
#endif /* __ASM_SH_ATOMIC_H */
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index d103ab5a4e4b..b932e42ef028 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -101,5 +101,12 @@ void kunmap_coherent(void *kvaddr);
void cpu_cache_init(void);
+static inline void *sh_cacheop_vaddr(void *vaddr)
+{
+ if (__in_29bit_mode())
+ vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
+ return vaddr;
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h
index 1e881f5db659..593a9704782b 100644
--- a/arch/sh/include/asm/cmpxchg-xchg.h
+++ b/arch/sh/include/asm/cmpxchg-xchg.h
@@ -8,7 +8,8 @@
* This work is licensed under the terms of the GNU GPL, version 2. See the
* file "COPYING" in the main directory of this archive for more details.
*/
-#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/compiler.h>
#include <asm/byteorder.h>
/*
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
deleted file mode 100644
index 41167931e5d9..000000000000
--- a/arch/sh/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_DMA_MAPPING_H
-#define __ASM_SH_DMA_MAPPING_H
-
-extern const struct dma_map_ops *dma_ops;
-extern void no_iommu_init(void);
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return dma_ops;
-}
-
-extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag,
- unsigned long attrs);
-extern void dma_generic_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs);
-
-void sh_sync_dma_for_device(void *vaddr, size_t size,
- enum dma_data_direction dir);
-
-#endif /* __ASM_SH_DMA_MAPPING_H */
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
index 7431c172c0cb..199d17b765f2 100644
--- a/arch/sh/include/asm/hw_breakpoint.h
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -10,7 +10,6 @@
#include <linux/types.h>
struct arch_hw_breakpoint {
- char *name; /* Contains name of the symbol to set bkpt */
unsigned long address;
u16 len;
u16 type;
@@ -41,6 +40,7 @@ struct sh_ubc {
struct clk *clk; /* optional interface clock / MSTP bit */
};
+struct perf_event_attr;
struct perf_event;
struct task_struct;
struct pmu;
@@ -54,8 +54,10 @@ static inline int hw_breakpoint_slots(int type)
}
/* arch/sh/kernel/hw_breakpoint.c */
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h
index 85d8bcaa8493..6171682f7798 100644
--- a/arch/sh/include/asm/kprobes.h
+++ b/arch/sh/include/asm/kprobes.h
@@ -27,7 +27,6 @@ struct kprobe;
void arch_remove_kprobe(struct kprobe *);
void kretprobe_trampoline(void);
-void jprobe_return_end(void);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
@@ -43,9 +42,6 @@ struct prev_kprobe {
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned long kprobe_status;
- unsigned long jprobe_saved_r15;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index dc80041f7363..59673f8a3379 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -12,7 +12,7 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
-obj-y := debugtraps.o dma-nommu.o dumpstack.o \
+obj-y := debugtraps.o dumpstack.o \
idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
machvec.o nmi_debug.o process.o \
process_$(BITS).o ptrace.o ptrace_$(BITS).o \
@@ -45,7 +45,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o
obj-$(CONFIG_HIBERNATION) += swsusp.o
obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
-
+obj-$(CONFIG_DMA_NONCOHERENT) += dma-coherent.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
ccflags-y := -Werror
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index fe844222f3f6..af01664f7b4c 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -260,7 +260,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]),
CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
- CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_DEV_ID("ceu.0", &mstp_clks[HWBLK_CEU]),
CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]),
CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c
new file mode 100644
index 000000000000..a0021eef956b
--- /dev/null
+++ b/arch/sh/kernel/dma-coherent.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2004 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include <asm/addrspace.h>
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ void *ret, *ret_nocache;
+ int order = get_order(size);
+
+ gfp |= __GFP_ZERO;
+
+ ret = (void *)__get_free_pages(gfp, order);
+ if (!ret)
+ return NULL;
+
+ /*
+ * Pages from the page allocator may have data present in
+ * cache. So flush the cache before using uncached memory.
+ */
+ arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
+ DMA_BIDIRECTIONAL);
+
+ ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
+ if (!ret_nocache) {
+ free_pages((unsigned long)ret, order);
+ return NULL;
+ }
+
+ split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
+
+ *dma_handle = virt_to_phys(ret);
+ if (!WARN_ON(!dev))
+ *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
+
+ return ret_nocache;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ int order = get_order(size);
+ unsigned long pfn = (dma_handle >> PAGE_SHIFT);
+ int k;
+
+ if (!WARN_ON(!dev))
+ pfn += dev->dma_pfn_offset;
+
+ for (k = 0; k < (1 << order); k++)
+ __free_pages(pfn_to_page(pfn + k), 0);
+
+ iounmap(vaddr);
+}
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
+
+ switch (dir) {
+ case DMA_FROM_DEVICE: /* invalidate only */
+ __flush_invalidate_region(addr, size);
+ break;
+ case DMA_TO_DEVICE: /* writeback only */
+ __flush_wback_region(addr, size);
+ break;
+ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ __flush_purge_region(addr, size);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
deleted file mode 100644
index 3e3a32fc676e..000000000000
--- a/arch/sh/kernel/dma-nommu.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * DMA mapping support for platforms lacking IOMMUs.
- *
- * Copyright (C) 2009 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <asm/cacheflush.h>
-
-static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t addr = page_to_phys(page) + offset
- - PFN_PHYS(dev->dma_pfn_offset);
-
- WARN_ON(size == 0);
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- sh_sync_dma_for_device(page_address(page) + offset, size, dir);
-
- return addr;
-}
-
-static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *s;
- int i;
-
- WARN_ON(nents == 0 || sg[0].length == 0);
-
- for_each_sg(sg, s, nents, i) {
- dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
-
- BUG_ON(!sg_page(s));
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- sh_sync_dma_for_device(sg_virt(s), s->length, dir);
-
- s->dma_address = sg_phys(s) - offset;
- s->dma_length = s->length;
- }
-
- return nents;
-}
-
-#ifdef CONFIG_DMA_NONCOHERENT
-static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- sh_sync_dma_for_device(phys_to_virt(addr), size, dir);
-}
-
-static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nelems, i)
- sh_sync_dma_for_device(sg_virt(s), s->length, dir);
-}
-#endif
-
-const struct dma_map_ops nommu_dma_ops = {
- .alloc = dma_generic_alloc_coherent,
- .free = dma_generic_free_coherent,
- .map_page = nommu_map_page,
- .map_sg = nommu_map_sg,
-#ifdef CONFIG_DMA_NONCOHERENT
- .sync_single_for_device = nommu_sync_single_for_device,
- .sync_sg_for_device = nommu_sync_sg_for_device,
-#endif
-};
-
-void __init no_iommu_init(void)
-{
- if (dma_ops)
- return;
- dma_ops = &nommu_dma_ops;
-}
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 8648ed05ccf0..d9ff3b42da7c 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -124,14 +124,13 @@ static int get_hbp_len(u16 hbp_len)
/*
* Check for virtual address in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->len);
+ va = hw->address;
+ len = get_hbp_len(hw->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -174,40 +173,40 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
return 0;
}
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->len = SH_BREAKPOINT_LEN_1;
+ hw->len = SH_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->len = SH_BREAKPOINT_LEN_2;
+ hw->len = SH_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->len = SH_BREAKPOINT_LEN_4;
+ hw->len = SH_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
- info->len = SH_BREAKPOINT_LEN_8;
+ hw->len = SH_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_R:
- info->type = SH_BREAKPOINT_READ;
+ hw->type = SH_BREAKPOINT_READ;
break;
case HW_BREAKPOINT_W:
- info->type = SH_BREAKPOINT_WRITE;
+ hw->type = SH_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
- info->type = SH_BREAKPOINT_RW;
+ hw->type = SH_BREAKPOINT_RW;
break;
default:
return -EINVAL;
@@ -219,19 +218,20 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
int ret;
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
ret = -EINVAL;
- switch (info->len) {
+ switch (hw->len) {
case SH_BREAKPOINT_LEN_1:
align = 0;
break;
@@ -249,17 +249,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
}
/*
- * For kernel-addresses, either the address or symbol name can be
- * specified.
- */
- if (info->name)
- info->address = (unsigned long)kallsyms_lookup_name(info->name);
-
- /*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
- if (info->address & align)
+ if (hw->address & align)
return -EINVAL;
return 0;
@@ -346,7 +339,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
perf_bp_event(bp, args->regs);
/* Deliver the signal to userspace */
- if (!arch_check_bp_in_kernelspace(bp)) {
+ if (!arch_check_bp_in_kernelspace(&bp->hw.info)) {
force_sig_fault(SIGTRAP, TRAP_HWBKPT,
(void __user *)NULL, current);
}
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 52a5e11247d1..241e903dd3ee 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -248,11 +248,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
- } else {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- goto ss_probe;
- }
}
goto no_kprobe;
}
@@ -277,11 +272,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -358,8 +355,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->pc = orig_ret_address;
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
-
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
@@ -508,14 +503,8 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
} else {
- if (kprobe_handler(args->regs)) {
+ if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
- } else {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler &&
- p->break_handler(p, args->regs))
- ret = NOTIFY_STOP;
- }
}
}
}
@@ -523,57 +512,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
return ret;
}
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_r15 = regs->regs[15];
- addr = kcb->jprobe_saved_r15;
-
- /*
- * TBD: As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
- */
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
- MIN_STACK_SIZE(addr));
-
- regs->pc = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long stack_addr = kcb->jprobe_saved_r15;
- u8 *addr = (u8 *)regs->pc;
-
- if ((addr >= (u8 *)jprobe_return) &&
- (addr <= (u8 *)jprobe_return_end)) {
- *regs = kcb->jprobe_saved_regs;
-
- memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
- MIN_STACK_SIZE(stack_addr));
-
- kcb->kprobe_status = KPROBE_HIT_SS;
- preempt_enable_no_resched();
- return 1;
- }
-
- return 0;
-}
-
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 50cdd1349015..02ed2df25a54 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -225,8 +225,6 @@ config HUGETLB_PAGE_SIZE_512MB
endchoice
-source "mm/Kconfig"
-
config SCHED_MC
bool "Multi-core scheduler support"
depends on SMP
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index fceb2adfcac7..792f36129062 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -1,10 +1,6 @@
/*
- * arch/sh/mm/consistent.c
- *
* Copyright (C) 2004 - 2007 Paul Mundt
*
- * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -13,90 +9,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-debug.h>
#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <asm/cacheflush.h>
-#include <asm/addrspace.h>
-
-const struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
-void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret, *ret_nocache;
- int order = get_order(size);
-
- gfp |= __GFP_ZERO;
-
- ret = (void *)__get_free_pages(gfp, order);
- if (!ret)
- return NULL;
-
- /*
- * Pages from the page allocator may have data present in
- * cache. So flush the cache before using uncached memory.
- */
- sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
-
- ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
- if (!ret_nocache) {
- free_pages((unsigned long)ret, order);
- return NULL;
- }
-
- split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
-
- *dma_handle = virt_to_phys(ret);
- if (!WARN_ON(!dev))
- *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
-
- return ret_nocache;
-}
-
-void dma_generic_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- int order = get_order(size);
- unsigned long pfn = dma_handle >> PAGE_SHIFT;
- int k;
-
- if (!WARN_ON(!dev))
- pfn += dev->dma_pfn_offset;
-
- for (k = 0; k < (1 << order); k++)
- __free_pages(pfn_to_page(pfn + k), 0);
-
- iounmap(vaddr);
-}
-
-void sh_sync_dma_for_device(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- void *addr;
-
- addr = __in_29bit_mode() ?
- (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
-
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- __flush_invalidate_region(addr, size);
- break;
- case DMA_TO_DEVICE: /* writeback only */
- __flush_wback_region(addr, size);
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- __flush_purge_region(addr, size);
- break;
- default:
- BUG();
- }
-}
-EXPORT_SYMBOL(sh_sync_dma_for_device);
static int __init memchunk_setup(char *str)
{
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 4034035fbede..7713c084d040 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -339,22 +339,12 @@ void __init paging_init(void)
free_area_init_nodes(max_zone_pfns);
}
-/*
- * Early initialization for any I/O MMUs we might have.
- */
-static void __init iommu_init(void)
-{
- no_iommu_init();
-}
-
unsigned int mem_init_done = 0;
void __init mem_init(void)
{
pg_data_t *pgdat;
- iommu_init();
-
high_memory = NULL;
for_each_online_pgdat(pgdat)
high_memory = max_t(void *, high_memory,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 0f535debf802..2d58c26bff9a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -155,10 +155,6 @@ config PGTABLE_LEVELS
config ARCH_SUPPORTS_UPROBES
def_bool y if SPARC64
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
config SMP
@@ -331,8 +327,6 @@ config FORCE_MAX_ZONEORDER
This config option is actually maximum order plus one. For example,
a value of 13 means that the largest free memory block is 2^12 pages.
-source "mm/Kconfig"
-
if SPARC64
source "kernel/power/Kconfig"
endif
@@ -355,8 +349,6 @@ config SCHED_MC
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
-source "kernel/Kconfig.preempt"
-
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
depends on SPARC64
@@ -556,10 +548,6 @@ config SPARC64_PCI_MSI
endmenu
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
config COMPAT
bool
depends on SPARC64
@@ -574,20 +562,4 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/sbus/char/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/sparc/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 4aef29a11925..50a918d496c8 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -1,12 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
-source "lib/Kconfig.debug"
-
config DEBUG_DCFLUSH
bool "D-cache flush debugging"
depends on SPARC64 && DEBUG_KERNEL
@@ -21,5 +18,3 @@ config FRAME_POINTER
bool
depends on MCOUNT
default y
-
-endmenu
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index c9d2b922734b..bc9cc26efa3d 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -144,7 +144,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "md5-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 1b3e47accc74..4d6d7faf728e 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -139,7 +139,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index 285268ca9279..54c4de2db188 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -169,7 +169,6 @@ static struct shash_alg sha256 = {
.cra_name = "sha256",
.cra_driver_name= "sha256-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -185,7 +184,6 @@ static struct shash_alg sha224 = {
.cra_name = "sha224",
.cra_driver_name= "sha224-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index 11eb36c3fc8c..4c55e97a4408 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -154,7 +154,6 @@ static struct shash_alg sha512 = {
.cra_name = "sha512",
.cra_driver_name= "sha512-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -170,7 +169,6 @@ static struct shash_alg sha384 = {
.cra_name = "sha384",
.cra_driver_name= "sha384-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index d13ce517f4b9..94c930f0bc62 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -27,17 +27,17 @@ int atomic_fetch_or(int, atomic_t *);
int atomic_fetch_xor(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int);
int atomic_xchg(atomic_t *, int);
-int __atomic_add_unless(atomic_t *, int, int);
+int atomic_fetch_add_unless(atomic_t *, int, int);
void atomic_set(atomic_t *, int);
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+
#define atomic_set_release(v, i) atomic_set((v), (i))
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
-#define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
-#define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
@@ -46,22 +46,4 @@ void atomic_set(atomic_t *, int);
#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
-#define atomic_inc_return(v) (atomic_add_return( 1, (v)))
-#define atomic_dec_return(v) (atomic_add_return( -1, (v)))
-
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 28db058d471b..6963482c81d8 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -50,38 +50,6 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_dec_return(v) atomic_sub_return(1, v)
-#define atomic64_dec_return(v) atomic64_sub_return(1, v)
-
-#define atomic_inc_return(v) atomic_add_return(1, v)
-#define atomic64_inc_return(v) atomic64_add_return(1, v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0)
-
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic64_inc(v) atomic64_add(1, v)
-
-#define atomic_dec(v) atomic_sub(1, v)
-#define atomic64_dec(v) atomic64_sub(1, v)
-
-#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
-
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
static inline int atomic_xchg(atomic_t *v, int new)
@@ -89,42 +57,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(&v->counter, new);
}
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
-}
-
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9a1e9cbc7e6d..b162c23ae8c2 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -243,35 +243,42 @@ void insb(unsigned long, void *, unsigned long);
void insw(unsigned long, void *, unsigned long);
void insl(unsigned long, void *, unsigned long);
-static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
+static inline void readsb(void __iomem *port, void *buf, unsigned long count)
{
insb((unsigned long __force)port, buf, count);
}
-static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
+static inline void readsw(void __iomem *port, void *buf, unsigned long count)
{
insw((unsigned long __force)port, buf, count);
}
-static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
+static inline void readsl(void __iomem *port, void *buf, unsigned long count)
{
insl((unsigned long __force)port, buf, count);
}
-static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
+static inline void writesb(void __iomem *port, const void *buf, unsigned long count)
{
outsb((unsigned long __force)port, buf, count);
}
-static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
+static inline void writesw(void __iomem *port, const void *buf, unsigned long count)
{
outsw((unsigned long __force)port, buf, count);
}
-static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
+static inline void writesl(void __iomem *port, const void *buf, unsigned long count)
{
outsl((unsigned long __force)port, buf, count);
}
+#define ioread8_rep(p,d,l) readsb(p,d,l)
+#define ioread16_rep(p,d,l) readsw(p,d,l)
+#define ioread32_rep(p,d,l) readsl(p,d,l)
+#define iowrite8_rep(p,d,l) writesb(p,d,l)
+#define iowrite16_rep(p,d,l) writesw(p,d,l)
+#define iowrite32_rep(p,d,l) writesl(p,d,l)
+
/* Valid I/O Space regions are anywhere, because each PCI bus supported
* can live in an arbitrary area of the physical address range.
*/
diff --git a/arch/sparc/include/asm/kprobes.h b/arch/sparc/include/asm/kprobes.h
index 3704490b4488..bfcaa6326c20 100644
--- a/arch/sparc/include/asm/kprobes.h
+++ b/arch/sparc/include/asm/kprobes.h
@@ -44,7 +44,6 @@ struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_orig_tnpc;
unsigned long kprobe_orig_tstate_pil;
- struct pt_regs jprobe_saved_regs;
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index d58520c2e6ff..7ea35e5601b6 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -101,6 +101,9 @@
#define SO_ZEROCOPY 0x003e
+#define SO_TXTIME 0x003f
+#define SCM_TXTIME SO_TXTIME
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index ab4ba4347941..dfbca2470536 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -147,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_REENTER;
prepare_singlestep(p, regs, kcb);
return 1;
- } else {
- if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+ } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
- ret = 1;
- goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
+ ret = 1;
}
goto no_kprobe;
}
@@ -181,10 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -441,53 +437,6 @@ out:
exception_exit(prev_state);
}
-/* Jprobes support. */
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
-
- regs->tpc = (unsigned long) jp->entry;
- regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
- regs->tstate |= TSTATE_PIL;
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- register unsigned long orig_fp asm("g1");
-
- orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
- __asm__ __volatile__("\n"
-"1: cmp %%sp, %0\n\t"
- "blu,a,pt %%xcc, 1b\n\t"
- " restore\n\t"
- ".globl jprobe_return_trap_instruction\n"
-"jprobe_return_trap_instruction:\n\t"
- "ta 0x70"
- : /* no outputs */
- : "r" (orig_fp));
-}
-
-extern void jprobe_return_trap_instruction(void);
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- u32 *addr = (u32 *) regs->tpc;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (addr == (u32 *) jprobe_return_trap_instruction) {
- memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-
/* The value stored in the return address register is actually 2
* instructions before where the callee will return to.
* Sequences usually look something like this
@@ -562,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
regs->tpc = orig_ret_address;
regs->tnpc = orig_ret_address + 4;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 465a901a0ada..281fa634bb1a 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -95,7 +95,7 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
}
EXPORT_SYMBOL(atomic_cmpxchg);
-int __atomic_add_unless(atomic_t *v, int a, int u)
+int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
@@ -107,7 +107,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(__atomic_add_unless);
+EXPORT_SYMBOL(atomic_fetch_add_unless);
/* Atomic operations are already serializing */
void atomic_set(atomic_t *v, int i)
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig
index 20da5a8ca949..6b9938919f0b 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig
@@ -1,4 +1,70 @@
# SPDX-License-Identifier: GPL-2.0
+
+menu "UML-specific options"
+
+config UML
+ bool
+ default y
+ select ARCH_HAS_KCOV
+ select ARCH_NO_PREEMPT
+ select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_UID16
+ select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_DEBUG_KMEMLEAK
+ select GENERIC_IRQ_SHOW
+ select GENERIC_CPU_DEVICES
+ select GENERIC_CLOCKEVENTS
+ select HAVE_GCC_PLUGINS
+ select TTY # Needed for line.c
+
+config MMU
+ bool
+ default y
+
+config NO_IOMEM
+ def_bool y
+
+config ISA
+ bool
+
+config SBUS
+ bool
+
+config PCI
+ bool
+
+config PCMCIA
+ bool
+
+config TRACE_IRQFLAGS_SUPPORT
+ bool
+ default y
+
+config LOCKDEP_SUPPORT
+ bool
+ default y
+
+config STACKTRACE_SUPPORT
+ bool
+ default y
+ select STACKTRACE
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+config HZ
+ int
+ default 100
+
+config NR_CPUS
+ int
+ range 1 1
+ default 1
+
+source "arch/$(HEADER_ARCH)/um/Kconfig"
+
config STATIC_LINK
bool "Force a static link"
default n
@@ -10,8 +76,6 @@ config STATIC_LINK
Additionally, this option enables using higher memory spaces (up to
2.75G) for UML.
-source "mm/Kconfig"
-
config LD_SCRIPT_STATIC
bool
default y
@@ -23,8 +87,6 @@ config LD_SCRIPT_DYN
depends on !LD_SCRIPT_STATIC
select MODULE_REL_CRCS if MODVERSIONS
-source "fs/Kconfig.binfmt"
-
config HOSTFS
tristate "Host filesystem"
help
@@ -122,3 +184,7 @@ config SECCOMP
defined by each seccomp mode.
If unsure, say Y.
+
+endmenu
+
+source "arch/um/drivers/Kconfig"
diff --git a/arch/um/Kconfig.char b/arch/um/Kconfig.char
deleted file mode 100644
index f184bde7030e..000000000000
--- a/arch/um/Kconfig.char
+++ /dev/null
@@ -1,124 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-menu "UML Character Devices"
-
-config STDERR_CONSOLE
- bool "stderr console"
- default y
- help
- console driver which dumps all printk messages to stderr.
-
-config SSL
- bool "Virtual serial line"
- help
- The User-Mode Linux environment allows you to create virtual serial
- lines on the UML that are usually made to show up on the host as
- ttys or ptys.
-
- See <http://user-mode-linux.sourceforge.net/old/input.html> for more
- information and command line examples of how to use this facility.
-
- Unless you have a specific reason for disabling this, say Y.
-
-config NULL_CHAN
- bool "null channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to a device similar to /dev/null. Data written to it disappears
- and there is never any data to be read.
-
-config PORT_CHAN
- bool "port channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to host portals. They may be accessed with 'telnet <host>
- <port number>'. Any number of consoles and serial lines may be
- attached to a single portal, although what UML device you get when
- you telnet to that portal will be unpredictable.
- It is safe to say 'Y' here.
-
-config PTY_CHAN
- bool "pty channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to host pseudo-terminals. Access to both traditional
- pseudo-terminals (/dev/pty*) and pts pseudo-terminals are controlled
- with this option. The assignment of UML devices to host devices
- will be announced in the kernel message log.
- It is safe to say 'Y' here.
-
-config TTY_CHAN
- bool "tty channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to host terminals. Access to both virtual consoles
- (/dev/tty*) and the slave side of pseudo-terminals (/dev/ttyp* and
- /dev/pts/*) are controlled by this option.
- It is safe to say 'Y' here.
-
-config XTERM_CHAN
- bool "xterm channel support"
- help
- This option enables support for attaching UML consoles and serial
- lines to xterms. Each UML device so assigned will be brought up in
- its own xterm.
- It is safe to say 'Y' here.
-
-config NOCONFIG_CHAN
- bool
- default !(XTERM_CHAN && TTY_CHAN && PTY_CHAN && PORT_CHAN && NULL_CHAN)
-
-config CON_ZERO_CHAN
- string "Default main console channel initialization"
- default "fd:0,fd:1"
- help
- This is the string describing the channel to which the main console
- will be attached by default. This value can be overridden from the
- command line. The default value is "fd:0,fd:1", which attaches the
- main console to stdin and stdout.
- It is safe to leave this unchanged.
-
-config CON_CHAN
- string "Default console channel initialization"
- default "xterm"
- help
- This is the string describing the channel to which all consoles
- except the main console will be attached by default. This value can
- be overridden from the command line. The default value is "xterm",
- which brings them up in xterms.
- It is safe to leave this unchanged, although you may wish to change
- this if you expect the UML that you build to be run in environments
- which don't have X or xterm available.
-
-config SSL_CHAN
- string "Default serial line channel initialization"
- default "pty"
- help
- This is the string describing the channel to which the serial lines
- will be attached by default. This value can be overridden from the
- command line. The default value is "pty", which attaches them to
- traditional pseudo-terminals.
- It is safe to leave this unchanged, although you may wish to change
- this if you expect the UML that you build to be run in environments
- which don't have a set of /dev/pty* devices.
-
-config UML_SOUND
- tristate "Sound support"
- help
- This option enables UML sound support. If enabled, it will pull in
- soundcore and the UML hostaudio relay, which acts as a intermediary
- between the host's dsp and mixer devices and the UML sound system.
- It is safe to say 'Y' here.
-
-config SOUND
- tristate
- default UML_SOUND
-
-config SOUND_OSS_CORE
- bool
- default UML_SOUND
-
-config HOSTAUDIO
- tristate
- default UML_SOUND
-
-endmenu
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
deleted file mode 100644
index 07f84c842cc3..000000000000
--- a/arch/um/Kconfig.common
+++ /dev/null
@@ -1,60 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config UML
- bool
- default y
- select ARCH_HAS_KCOV
- select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_UID16
- select HAVE_FUTEX_CMPXCHG if FUTEX
- select HAVE_DEBUG_KMEMLEAK
- select GENERIC_IRQ_SHOW
- select GENERIC_CPU_DEVICES
- select GENERIC_CLOCKEVENTS
- select HAVE_GCC_PLUGINS
- select TTY # Needed for line.c
-
-config MMU
- bool
- default y
-
-config NO_IOMEM
- def_bool y
-
-config ISA
- bool
-
-config SBUS
- bool
-
-config PCI
- bool
-
-config PCMCIA
- bool
-
-config TRACE_IRQFLAGS_SUPPORT
- bool
- default y
-
-config LOCKDEP_SUPPORT
- bool
- default y
-
-config STACKTRACE_SUPPORT
- bool
- default y
- select STACKTRACE
-
-config GENERIC_CALIBRATE_DELAY
- bool
- default y
-
-config HZ
- int
- default 100
-
-config NR_CPUS
- int
- range 1 1
- default 1
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
index 967d3109689f..2014597605ea 100644
--- a/arch/um/Kconfig.debug
+++ b/arch/um/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config GPROF
bool "Enable gprof support"
@@ -37,5 +34,3 @@ config EARLY_PRINTK
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized.
-
-endmenu
diff --git a/arch/um/Kconfig.rest b/arch/um/Kconfig.rest
deleted file mode 100644
index 08327b9c0cbe..000000000000
--- a/arch/um/Kconfig.rest
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
-source "arch/um/Kconfig.char"
-
-source "drivers/Kconfig"
-
-source "net/Kconfig"
-
-source "arch/um/Kconfig.net"
-
-source "fs/Kconfig"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
-source "arch/um/Kconfig.debug"
diff --git a/arch/um/Makefile b/arch/um/Makefile
index e54dda8a0363..44ddc3e8fa66 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -113,17 +113,8 @@ define archhelp
echo ' find in the kernel root.'
endef
-KBUILD_KCONFIG := $(HOST_DIR)/um/Kconfig
-
archheaders:
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
- kbuild-file=$(HOST_DIR)/include/asm/Kbuild \
- obj=$(HOST_DIR)/include/generated/asm
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
- kbuild-file=$(HOST_DIR)/include/uapi/asm/Kbuild \
- obj=$(HOST_DIR)/include/generated/uapi/asm
- $(Q)$(MAKE) KBUILD_SRC= ARCH=$(HEADER_ARCH) archheaders
-
+ $(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) asm-generic archheaders
archprepare: include/generated/user_constants.h
@@ -169,11 +160,11 @@ define filechk_gen-asm-offsets
echo " *"; \
echo " */"; \
echo ""; \
- sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \
+ sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}" < $<; \
echo ""; )
endef
include/generated/user_constants.h: $(HOST_DIR)/um/user-offsets.s
$(call filechk,gen-asm-offsets)
-export SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS DEV_NULL_PATH
+export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS DEV_NULL_PATH
diff --git a/arch/um/Kconfig.net b/arch/um/drivers/Kconfig
index c390f3deb0dc..2b1aaf7755aa 100644
--- a/arch/um/Kconfig.net
+++ b/arch/um/drivers/Kconfig
@@ -1,5 +1,129 @@
# SPDX-License-Identifier: GPL-2.0
+menu "UML Character Devices"
+
+config STDERR_CONSOLE
+ bool "stderr console"
+ default y
+ help
+ console driver which dumps all printk messages to stderr.
+
+config SSL
+ bool "Virtual serial line"
+ help
+ The User-Mode Linux environment allows you to create virtual serial
+ lines on the UML that are usually made to show up on the host as
+ ttys or ptys.
+
+ See <http://user-mode-linux.sourceforge.net/old/input.html> for more
+ information and command line examples of how to use this facility.
+
+ Unless you have a specific reason for disabling this, say Y.
+
+config NULL_CHAN
+ bool "null channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to a device similar to /dev/null. Data written to it disappears
+ and there is never any data to be read.
+
+config PORT_CHAN
+ bool "port channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to host portals. They may be accessed with 'telnet <host>
+ <port number>'. Any number of consoles and serial lines may be
+ attached to a single portal, although what UML device you get when
+ you telnet to that portal will be unpredictable.
+ It is safe to say 'Y' here.
+
+config PTY_CHAN
+ bool "pty channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to host pseudo-terminals. Access to both traditional
+ pseudo-terminals (/dev/pty*) and pts pseudo-terminals are controlled
+ with this option. The assignment of UML devices to host devices
+ will be announced in the kernel message log.
+ It is safe to say 'Y' here.
+
+config TTY_CHAN
+ bool "tty channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to host terminals. Access to both virtual consoles
+ (/dev/tty*) and the slave side of pseudo-terminals (/dev/ttyp* and
+ /dev/pts/*) are controlled by this option.
+ It is safe to say 'Y' here.
+
+config XTERM_CHAN
+ bool "xterm channel support"
+ help
+ This option enables support for attaching UML consoles and serial
+ lines to xterms. Each UML device so assigned will be brought up in
+ its own xterm.
+ It is safe to say 'Y' here.
+
+config NOCONFIG_CHAN
+ bool
+ default !(XTERM_CHAN && TTY_CHAN && PTY_CHAN && PORT_CHAN && NULL_CHAN)
+
+config CON_ZERO_CHAN
+ string "Default main console channel initialization"
+ default "fd:0,fd:1"
+ help
+ This is the string describing the channel to which the main console
+ will be attached by default. This value can be overridden from the
+ command line. The default value is "fd:0,fd:1", which attaches the
+ main console to stdin and stdout.
+ It is safe to leave this unchanged.
+
+config CON_CHAN
+ string "Default console channel initialization"
+ default "xterm"
+ help
+ This is the string describing the channel to which all consoles
+ except the main console will be attached by default. This value can
+ be overridden from the command line. The default value is "xterm",
+ which brings them up in xterms.
+ It is safe to leave this unchanged, although you may wish to change
+ this if you expect the UML that you build to be run in environments
+ which don't have X or xterm available.
+
+config SSL_CHAN
+ string "Default serial line channel initialization"
+ default "pty"
+ help
+ This is the string describing the channel to which the serial lines
+ will be attached by default. This value can be overridden from the
+ command line. The default value is "pty", which attaches them to
+ traditional pseudo-terminals.
+ It is safe to leave this unchanged, although you may wish to change
+ this if you expect the UML that you build to be run in environments
+ which don't have a set of /dev/pty* devices.
+
+config UML_SOUND
+ tristate "Sound support"
+ help
+ This option enables UML sound support. If enabled, it will pull in
+ soundcore and the UML hostaudio relay, which acts as a intermediary
+ between the host's dsp and mixer devices and the UML sound system.
+ It is safe to say 'Y' here.
+
+config SOUND
+ tristate
+ default UML_SOUND
+
+config SOUND_OSS_CORE
+ bool
+ default UML_SOUND
+
+config HOSTAUDIO
+ tristate
+ default UML_SOUND
+
+endmenu
+
menu "UML Network Devices"
depends on NET
@@ -211,4 +335,3 @@ config UML_NET_SLIRP
Startup example: "eth0=slirp,FE:FD:01:02:03:04,/usr/local/bin/slirp"
endmenu
-
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 16b3cebddafb..693319839f69 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -25,10 +25,10 @@ LDFLAGS_vde.o := -r $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a)
targets := pcap_kern.o pcap_user.o vde_kern.o vde_user.o
$(obj)/pcap.o: $(obj)/pcap_kern.o $(obj)/pcap_user.o
- $(LD) -r -dp -o $@ $^ $(LDFLAGS) $(LDFLAGS_pcap.o)
+ $(LD) -r -dp -o $@ $^ $(ld_flags)
$(obj)/vde.o: $(obj)/vde_kern.o $(obj)/vde_user.o
- $(LD) -r -dp -o $@ $^ $(LDFLAGS) $(LDFLAGS_vde.o)
+ $(LD) -r -dp -o $@ $^ $(ld_flags)
#XXX: The call below does not work because the flags are added before the
# object name, so nothing from the library gets linked.
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 03f991e44288..60eae744d8fd 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -63,10 +63,6 @@ config ARCH_MAY_HAVE_PC_FDC
config ZONE_DMA
def_bool y
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
menu "System Type"
config MMU
@@ -139,12 +135,8 @@ endmenu
menu "Kernel Features"
-source "kernel/Kconfig.preempt"
-
source "kernel/Kconfig.hz"
-source "mm/Kconfig"
-
config LEDS
def_bool y
depends on GPIOLIB
@@ -181,12 +173,6 @@ config CMDLINE_FORCE
endmenu
-menu "Userspace binary formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options"
source "kernel/power/Kconfig"
@@ -201,8 +187,6 @@ config ARCH_HIBERNATION_POSSIBLE
endmenu
-source "net/Kconfig"
-
if ARCH_PUV3
config PUV3_GPIO
@@ -236,15 +220,3 @@ endmenu
endif
endif
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/unicore32/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/unicore32/Kconfig.debug b/arch/unicore32/Kconfig.debug
index de8dae3abc0a..ca0ff97657ef 100644
--- a/arch/unicore32/Kconfig.debug
+++ b/arch/unicore32/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config EARLY_PRINTK
def_bool DEBUG_OCD
@@ -30,5 +27,3 @@ config DEBUG_OCD
help
Say Y here if you want the debug print routines to direct their
output to the UniCore On-Chip-Debugger channel using CP #1.
-
-endmenu
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 887d3a7bb646..b0312f8947ce 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -75,6 +75,7 @@ config X86
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_USE_BUILTIN_BSWAP
@@ -180,13 +181,14 @@ config X86
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
+ select HOTPLUG_SMT if SMP
select IRQ_FORCED_THREADING
select NEED_SG_DMA_LENGTH
select PCI_LOCKLESS_CONFIG
@@ -345,8 +347,6 @@ config PGTABLE_LEVELS
default 3 if X86_PAE
default 2
-source "init/Kconfig"
-
config CC_HAS_SANE_STACKPROTECTOR
bool
default $(success,$(srctree)/scripts/gcc-x86_64-has-stack-protector.sh $(CC)) if 64BIT
@@ -355,8 +355,6 @@ config CC_HAS_SANE_STACKPROTECTOR
We have to make sure stack protector is unconditionally disabled if
the compiler produces broken code.
-source "kernel/Kconfig.freezer"
-
menu "Processor type and features"
config ZONE_DMA
@@ -1043,8 +1041,6 @@ config SCHED_MC_PRIO
If unsure say Y here.
-source "kernel/Kconfig.preempt"
-
config UP_LATE_INIT
def_bool y
depends on !SMP && X86_LOCAL_APIC
@@ -1638,8 +1634,6 @@ config ILLEGAL_POINTER_VALUE
default 0 if X86_32
default 0xdead000000000000 if X86_64
-source "mm/Kconfig"
-
config X86_PMEM_LEGACY_DEVICE
bool
@@ -2865,9 +2859,7 @@ config X86_SYSFB
endmenu
-menu "Executable file formats / Emulations"
-
-source "fs/Kconfig.binfmt"
+menu "Binary Emulations"
config IA32_EMULATION
bool "IA32 Emulation"
@@ -2937,20 +2929,6 @@ config X86_DMA_REMAP
config HAVE_GENERIC_GUP
def_bool y
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
source "drivers/firmware/Kconfig"
-source "fs/Kconfig"
-
-source "arch/x86/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
source "arch/x86/kvm/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index c6dd1d980081..7d68f0c7cfb1 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-source "lib/Kconfig.debug"
-
config EARLY_PRINTK_USB
bool
@@ -410,5 +407,3 @@ endchoice
config FRAME_POINTER
depends on !UNWINDER_ORC && !UNWINDER_GUESS
bool
-
-endmenu
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index a08e82856563..7e3c07d6ad42 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -80,11 +80,6 @@ ifeq ($(CONFIG_X86_32),y)
# alignment instructions.
KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
- # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
- # a lot more stack due to the lack of sharing of stacklots:
- KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0400, \
- $(call cc-option,-fno-unit-at-a-time))
-
# CPU-specific tuning. Anything which can be shared with UML should go here.
include arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 45af19921ebd..5296f8c9e7f0 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -13,8 +13,6 @@ KBUILD_CFLAGS += $(call cc-option,-m32)
KBUILD_AFLAGS += $(call cc-option,-m32)
LINK-y += $(call cc-option,-m32)
-export LDFLAGS
-
LDS_EXTRA := -Ui386
export LDS_EXTRA
diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
index 0d41d68131cc..2e1382486e91 100644
--- a/arch/x86/boot/bitops.h
+++ b/arch/x86/boot/bitops.h
@@ -17,6 +17,7 @@
#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */
#include <linux/types.h>
+#include <asm/asm.h>
static inline bool constant_test_bit(int nr, const void *addr)
{
@@ -28,7 +29,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
bool v;
const u32 *p = (const u32 *)addr;
- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr));
return v;
}
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index e98522ea6f09..1458b1700fc7 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -34,74 +34,13 @@ static void setup_boot_services##bits(struct efi_config *c) \
\
table = (typeof(table))sys_table; \
\
- c->runtime_services = table->runtime; \
- c->boot_services = table->boottime; \
- c->text_output = table->con_out; \
+ c->runtime_services = table->runtime; \
+ c->boot_services = table->boottime; \
+ c->text_output = table->con_out; \
}
BOOT_SERVICES(32);
BOOT_SERVICES(64);
-static inline efi_status_t __open_volume32(void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_32_t *image = __image;
- efi_file_handle_32_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
- unsigned long func;
-
- status = efi_call_early(handle_protocol, handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to handle fs_proto\n");
- return status;
- }
-
- func = (unsigned long)io->open_volume;
- status = efi_early->call(func, io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
-static inline efi_status_t __open_volume64(void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_64_t *image = __image;
- efi_file_handle_64_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
- unsigned long func;
-
- status = efi_call_early(handle_protocol, handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to handle fs_proto\n");
- return status;
- }
-
- func = (unsigned long)io->open_volume;
- status = efi_early->call(func, io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
-efi_status_t
-efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
-{
- if (efi_early->is64)
- return __open_volume64(__image, __fh);
-
- return __open_volume32(__image, __fh);
-}
-
void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
{
efi_call_proto(efi_simple_text_output_protocol, output_string,
@@ -109,7 +48,7 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
}
static efi_status_t
-__setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
{
struct pci_setup_rom *rom = NULL;
efi_status_t status;
@@ -134,16 +73,16 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for rom\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'rom'\n");
return status;
}
memset(rom, 0, sizeof(*rom));
- rom->data.type = SETUP_PCI;
- rom->data.len = size - sizeof(struct setup_data);
- rom->data.next = 0;
- rom->pcilen = pci->romsize;
+ rom->data.type = SETUP_PCI;
+ rom->data.len = size - sizeof(struct setup_data);
+ rom->data.next = 0;
+ rom->pcilen = pci->romsize;
*__rom = rom;
status = efi_call_proto(efi_pci_io_protocol, pci.read, pci,
@@ -179,96 +118,6 @@ free_struct:
return status;
}
-static void
-setup_efi_pci32(struct boot_params *params, void **pci_handle,
- unsigned long size)
-{
- efi_pci_io_protocol_t *pci = NULL;
- efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 *handles = (u32 *)(unsigned long)pci_handle;
- efi_status_t status;
- unsigned long nr_pci;
- struct setup_data *data;
- int i;
-
- data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
- while (data && data->next)
- data = (struct setup_data *)(unsigned long)data->next;
-
- nr_pci = size / sizeof(u32);
- for (i = 0; i < nr_pci; i++) {
- struct pci_setup_rom *rom = NULL;
- u32 h = handles[i];
-
- status = efi_call_early(handle_protocol, h,
- &pci_proto, (void **)&pci);
-
- if (status != EFI_SUCCESS)
- continue;
-
- if (!pci)
- continue;
-
- status = __setup_efi_pci(pci, &rom);
- if (status != EFI_SUCCESS)
- continue;
-
- if (data)
- data->next = (unsigned long)rom;
- else
- params->hdr.setup_data = (unsigned long)rom;
-
- data = (struct setup_data *)rom;
-
- }
-}
-
-static void
-setup_efi_pci64(struct boot_params *params, void **pci_handle,
- unsigned long size)
-{
- efi_pci_io_protocol_t *pci = NULL;
- efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u64 *handles = (u64 *)(unsigned long)pci_handle;
- efi_status_t status;
- unsigned long nr_pci;
- struct setup_data *data;
- int i;
-
- data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
- while (data && data->next)
- data = (struct setup_data *)(unsigned long)data->next;
-
- nr_pci = size / sizeof(u64);
- for (i = 0; i < nr_pci; i++) {
- struct pci_setup_rom *rom = NULL;
- u64 h = handles[i];
-
- status = efi_call_early(handle_protocol, h,
- &pci_proto, (void **)&pci);
-
- if (status != EFI_SUCCESS)
- continue;
-
- if (!pci)
- continue;
-
- status = __setup_efi_pci(pci, &rom);
- if (status != EFI_SUCCESS)
- continue;
-
- if (data)
- data->next = (unsigned long)rom;
- else
- params->hdr.setup_data = (unsigned long)rom;
-
- data = (struct setup_data *)rom;
-
- }
-}
-
/*
* There's no way to return an informative status from this function,
* because any analysis (and printing of error messages) needs to be
@@ -284,6 +133,9 @@ static void setup_efi_pci(struct boot_params *params)
void **pci_handle = NULL;
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
unsigned long size = 0;
+ unsigned long nr_pci;
+ struct setup_data *data;
+ int i;
status = efi_call_early(locate_handle,
EFI_LOCATE_BY_PROTOCOL,
@@ -295,7 +147,7 @@ static void setup_efi_pci(struct boot_params *params)
size, (void **)&pci_handle);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for pci_handle\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n");
return;
}
@@ -307,10 +159,34 @@ static void setup_efi_pci(struct boot_params *params)
if (status != EFI_SUCCESS)
goto free_handle;
- if (efi_early->is64)
- setup_efi_pci64(params, pci_handle, size);
- else
- setup_efi_pci32(params, pci_handle, size);
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
+ for (i = 0; i < nr_pci; i++) {
+ efi_pci_io_protocol_t *pci = NULL;
+ struct pci_setup_rom *rom;
+
+ status = efi_call_early(handle_protocol,
+ efi_is_64bit() ? ((u64 *)pci_handle)[i]
+ : ((u32 *)pci_handle)[i],
+ &pci_proto, (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = preserve_pci_rom_image(pci, &rom);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (data)
+ data->next = (unsigned long)rom;
+ else
+ params->hdr.setup_data = (unsigned long)rom;
+
+ data = (struct setup_data *)rom;
+ }
free_handle:
efi_call_early(free_pool, pci_handle);
@@ -341,8 +217,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
size + sizeof(struct setup_data), &new);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table,
- "Failed to alloc mem for properties\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'properties'\n");
return;
}
@@ -358,9 +233,9 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
new->next = 0;
data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
- if (!data)
+ if (!data) {
boot_params->hdr.setup_data = (unsigned long)new;
- else {
+ } else {
while (data->next)
data = (struct setup_data *)(unsigned long)data->next;
data->next = (unsigned long)new;
@@ -380,81 +255,55 @@ static void setup_quirks(struct boot_params *boot_params)
}
}
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
static efi_status_t
-setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
{
- struct efi_uga_draw_protocol *uga = NULL, *first_uga;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+ efi_status_t status;
+ u32 width, height;
+ void **uga_handle = NULL;
+ efi_uga_draw_protocol_t *uga = NULL, *first_uga;
unsigned long nr_ugas;
- u32 *handles = (u32 *)uga_handle;
- efi_status_t status = EFI_INVALID_PARAMETER;
int i;
- first_uga = NULL;
- nr_ugas = size / sizeof(u32);
- for (i = 0; i < nr_ugas; i++) {
- efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 w, h, depth, refresh;
- void *pciio;
- u32 handle = handles[i];
-
- status = efi_call_early(handle_protocol, handle,
- &uga_proto, (void **)&uga);
- if (status != EFI_SUCCESS)
- continue;
-
- efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
-
- status = efi_early->call((unsigned long)uga->get_mode, uga,
- &w, &h, &depth, &refresh);
- if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- *width = w;
- *height = h;
-
- /*
- * Once we've found a UGA supporting PCIIO,
- * don't bother looking any further.
- */
- if (pciio)
- break;
-
- first_uga = uga;
- }
- }
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size, (void **)&uga_handle);
+ if (status != EFI_SUCCESS)
+ return status;
- return status;
-}
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ uga_proto, NULL, &size, uga_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
-static efi_status_t
-setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
-{
- struct efi_uga_draw_protocol *uga = NULL, *first_uga;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
- unsigned long nr_ugas;
- u64 *handles = (u64 *)uga_handle;
- efi_status_t status = EFI_INVALID_PARAMETER;
- int i;
+ height = 0;
+ width = 0;
first_uga = NULL;
- nr_ugas = size / sizeof(u64);
+ nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
for (i = 0; i < nr_ugas; i++) {
efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
u32 w, h, depth, refresh;
void *pciio;
- u64 handle = handles[i];
+ unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i]
+ : ((u32 *)uga_handle)[i];
status = efi_call_early(handle_protocol, handle,
- &uga_proto, (void **)&uga);
+ uga_proto, (void **)&uga);
if (status != EFI_SUCCESS)
continue;
+ pciio = NULL;
efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
- status = efi_early->call((unsigned long)uga->get_mode, uga,
- &w, &h, &depth, &refresh);
+ status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga,
+ &w, &h, &depth, &refresh);
if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- *width = w;
- *height = h;
+ width = w;
+ height = h;
/*
* Once we've found a UGA supporting PCIIO,
@@ -467,59 +316,28 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
}
}
- return status;
-}
-
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
- unsigned long size)
-{
- efi_status_t status;
- u32 width, height;
- void **uga_handle = NULL;
-
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- size, (void **)&uga_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_call_early(locate_handle,
- EFI_LOCATE_BY_PROTOCOL,
- uga_proto, NULL, &size, uga_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- height = 0;
- width = 0;
-
- if (efi_early->is64)
- status = setup_uga64(uga_handle, size, &width, &height);
- else
- status = setup_uga32(uga_handle, size, &width, &height);
-
if (!width && !height)
goto free_handle;
/* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
- si->lfb_depth = 32;
- si->lfb_width = width;
- si->lfb_height = height;
+ si->lfb_depth = 32;
+ si->lfb_width = width;
+ si->lfb_height = height;
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
free_handle:
efi_call_early(free_pool, uga_handle);
+
return status;
}
@@ -586,7 +404,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
return NULL;
- if (efi_early->is64)
+ if (efi_is_64bit())
setup_boot_services64(efi_early);
else
setup_boot_services32(efi_early);
@@ -601,7 +419,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
status = efi_low_alloc(sys_table, 0x4000, 1,
(unsigned long *)&boot_params);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc lowmem for boot params\n");
+ efi_printk(sys_table, "Failed to allocate lowmem for boot params\n");
return NULL;
}
@@ -617,9 +435,9 @@ struct boot_params *make_boot_params(struct efi_config *c)
* Fill out some of the header fields ourselves because the
* EFI firmware loader doesn't load the first sector.
*/
- hdr->root_flags = 1;
- hdr->vid_mode = 0xffff;
- hdr->boot_flag = 0xAA55;
+ hdr->root_flags = 1;
+ hdr->vid_mode = 0xffff;
+ hdr->boot_flag = 0xAA55;
hdr->type_of_loader = 0x21;
@@ -627,6 +445,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
if (!cmdline_ptr)
goto fail;
+
hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
/* Fill in upper bits of command line address, NOP on 32 bit */
boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
@@ -663,10 +482,12 @@ struct boot_params *make_boot_params(struct efi_config *c)
boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32;
return boot_params;
+
fail2:
efi_free(sys_table, options_size, hdr->cmd_line_ptr);
fail:
efi_free(sys_table, 0x4000, (unsigned long)boot_params);
+
return NULL;
}
@@ -678,7 +499,7 @@ static void add_e820ext(struct boot_params *params,
unsigned long size;
e820ext->type = SETUP_E820_EXT;
- e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
+ e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
e820ext->next = 0;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
@@ -692,8 +513,8 @@ static void add_e820ext(struct boot_params *params,
params->hdr.setup_data = (unsigned long)e820ext;
}
-static efi_status_t setup_e820(struct boot_params *params,
- struct setup_data *e820ext, u32 e820ext_size)
+static efi_status_t
+setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
{
struct boot_e820_entry *entry = params->e820_table;
struct efi_info *efi = &params->efi_info;
@@ -814,11 +635,10 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
}
struct exit_boot_struct {
- struct boot_params *boot_params;
- struct efi_info *efi;
- struct setup_data *e820ext;
- __u32 e820ext_size;
- bool is64;
+ struct boot_params *boot_params;
+ struct efi_info *efi;
+ struct setup_data *e820ext;
+ __u32 e820ext_size;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -845,25 +665,25 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
first = false;
}
- signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+ signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
+ : EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
- p->efi->efi_systab = (unsigned long)sys_table_arg;
- p->efi->efi_memdesc_size = *map->desc_size;
- p->efi->efi_memdesc_version = *map->desc_ver;
- p->efi->efi_memmap = (unsigned long)*map->map;
- p->efi->efi_memmap_size = *map->map_size;
+ p->efi->efi_systab = (unsigned long)sys_table_arg;
+ p->efi->efi_memdesc_size = *map->desc_size;
+ p->efi->efi_memdesc_version = *map->desc_ver;
+ p->efi->efi_memmap = (unsigned long)*map->map;
+ p->efi->efi_memmap_size = *map->map_size;
#ifdef CONFIG_X86_64
- p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
- p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
+ p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
+ p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
#endif
return EFI_SUCCESS;
}
-static efi_status_t exit_boot(struct boot_params *boot_params,
- void *handle, bool is64)
+static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
{
unsigned long map_sz, key, desc_size, buff_size;
efi_memory_desc_t *mem_map;
@@ -874,17 +694,16 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
struct efi_boot_memmap map;
struct exit_boot_struct priv;
- map.map = &mem_map;
- map.map_size = &map_sz;
- map.desc_size = &desc_size;
- map.desc_ver = &desc_version;
- map.key_ptr = &key;
- map.buff_size = &buff_size;
- priv.boot_params = boot_params;
- priv.efi = &boot_params->efi_info;
- priv.e820ext = NULL;
- priv.e820ext_size = 0;
- priv.is64 = is64;
+ map.map = &mem_map;
+ map.map_size = &map_sz;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_version;
+ map.key_ptr = &key;
+ map.buff_size = &buff_size;
+ priv.boot_params = boot_params;
+ priv.efi = &boot_params->efi_info;
+ priv.e820ext = NULL;
+ priv.e820ext_size = 0;
/* Might as well exit boot services now */
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -892,10 +711,11 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
if (status != EFI_SUCCESS)
return status;
- e820ext = priv.e820ext;
- e820ext_size = priv.e820ext_size;
+ e820ext = priv.e820ext;
+ e820ext_size = priv.e820ext_size;
+
/* Historic? */
- boot_params->alt_mem_k = 32 * 1024;
+ boot_params->alt_mem_k = 32 * 1024;
status = setup_e820(boot_params, e820ext, e820ext_size);
if (status != EFI_SUCCESS)
@@ -908,8 +728,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
* On success we return a pointer to a boot_params structure, and NULL
* on failure.
*/
-struct boot_params *efi_main(struct efi_config *c,
- struct boot_params *boot_params)
+struct boot_params *
+efi_main(struct efi_config *c, struct boot_params *boot_params)
{
struct desc_ptr *gdt = NULL;
efi_loaded_image_t *image;
@@ -918,13 +738,11 @@ struct boot_params *efi_main(struct efi_config *c,
struct desc_struct *desc;
void *handle;
efi_system_table_t *_table;
- bool is64;
efi_early = c;
_table = (efi_system_table_t *)(unsigned long)efi_early->table;
handle = (void *)(unsigned long)efi_early->image_handle;
- is64 = efi_early->is64;
sys_table = _table;
@@ -932,7 +750,7 @@ struct boot_params *efi_main(struct efi_config *c,
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
goto fail;
- if (is64)
+ if (efi_is_64bit())
setup_boot_services64(efi_early);
else
setup_boot_services32(efi_early);
@@ -957,7 +775,7 @@ struct boot_params *efi_main(struct efi_config *c,
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
sizeof(*gdt), (void **)&gdt);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n");
goto fail;
}
@@ -965,7 +783,7 @@ struct boot_params *efi_main(struct efi_config *c,
status = efi_low_alloc(sys_table, gdt->size, 8,
(unsigned long *)&gdt->address);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table, "Failed to alloc mem for gdt\n");
+ efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n");
goto fail;
}
@@ -988,7 +806,7 @@ struct boot_params *efi_main(struct efi_config *c,
hdr->code32_start = bzimage_addr;
}
- status = exit_boot(boot_params, handle, is64);
+ status = exit_boot(boot_params, handle);
if (status != EFI_SUCCESS) {
efi_printk(sys_table, "exit_boot() failed!\n");
goto fail;
@@ -1002,19 +820,20 @@ struct boot_params *efi_main(struct efi_config *c,
if (IS_ENABLED(CONFIG_X86_64)) {
/* __KERNEL32_CS */
- desc->limit0 = 0xffff;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
- desc->s = DESC_TYPE_CODE_DATA;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0xf;
- desc->avl = 0;
- desc->l = 0;
- desc->d = SEG_OP_SIZE_32BIT;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0xf;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = SEG_OP_SIZE_32BIT;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
+
desc++;
} else {
/* Second entry is unused on 32-bit */
@@ -1022,15 +841,16 @@ struct boot_params *efi_main(struct efi_config *c,
}
/* __KERNEL_CS */
- desc->limit0 = 0xffff;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
- desc->s = DESC_TYPE_CODE_DATA;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0xf;
- desc->avl = 0;
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0xf;
+ desc->avl = 0;
+
if (IS_ENABLED(CONFIG_X86_64)) {
desc->l = 1;
desc->d = 0;
@@ -1038,41 +858,41 @@ struct boot_params *efi_main(struct efi_config *c,
desc->l = 0;
desc->d = SEG_OP_SIZE_32BIT;
}
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
desc++;
/* __KERNEL_DS */
- desc->limit0 = 0xffff;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
- desc->s = DESC_TYPE_CODE_DATA;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0xf;
- desc->avl = 0;
- desc->l = 0;
- desc->d = SEG_OP_SIZE_32BIT;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0xf;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = SEG_OP_SIZE_32BIT;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
desc++;
if (IS_ENABLED(CONFIG_X86_64)) {
/* Task segment value */
- desc->limit0 = 0x0000;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_TSS;
- desc->s = 0;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit1 = 0x0;
- desc->avl = 0;
- desc->l = 0;
- desc->d = 0;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
+ desc->limit0 = 0x0000;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_TSS;
+ desc->s = 0;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit1 = 0x0;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = 0;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
desc++;
}
@@ -1082,5 +902,6 @@ struct boot_params *efi_main(struct efi_config *c,
return boot_params;
fail:
efi_printk(sys_table, "efi_main() failed!\n");
+
return NULL;
}
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index e799dc5c6448..8297387c4676 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -12,22 +12,22 @@
#define DESC_TYPE_CODE_DATA (1 << 0)
-struct efi_uga_draw_protocol_32 {
+typedef struct {
u32 get_mode;
u32 set_mode;
u32 blt;
-};
+} efi_uga_draw_protocol_32_t;
-struct efi_uga_draw_protocol_64 {
+typedef struct {
u64 get_mode;
u64 set_mode;
u64 blt;
-};
+} efi_uga_draw_protocol_64_t;
-struct efi_uga_draw_protocol {
+typedef struct {
void *get_mode;
void *set_mode;
void *blt;
-};
+} efi_uga_draw_protocol_t;
#endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index b87a7582853d..302517929932 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -102,7 +102,7 @@ static bool memmap_too_large;
/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
-unsigned long long mem_limit = ULLONG_MAX;
+static unsigned long long mem_limit = ULLONG_MAX;
enum mem_avoid_index {
@@ -215,7 +215,36 @@ static void mem_avoid_memmap(char *str)
memmap_too_large = true;
}
-static int handle_mem_memmap(void)
+/* Store the number of 1GB huge pages which users specified: */
+static unsigned long max_gb_huge_pages;
+
+static void parse_gb_huge_pages(char *param, char *val)
+{
+ static bool gbpage_sz;
+ char *p;
+
+ if (!strcmp(param, "hugepagesz")) {
+ p = val;
+ if (memparse(p, &p) != PUD_SIZE) {
+ gbpage_sz = false;
+ return;
+ }
+
+ if (gbpage_sz)
+ warn("Repeatedly set hugeTLB page size of 1G!\n");
+ gbpage_sz = true;
+ return;
+ }
+
+ if (!strcmp(param, "hugepages") && gbpage_sz) {
+ p = val;
+ max_gb_huge_pages = simple_strtoull(p, &p, 0);
+ return;
+ }
+}
+
+
+static int handle_mem_options(void)
{
char *args = (char *)get_cmd_line_ptr();
size_t len = strlen((char *)args);
@@ -223,7 +252,8 @@ static int handle_mem_memmap(void)
char *param, *val;
u64 mem_size;
- if (!strstr(args, "memmap=") && !strstr(args, "mem="))
+ if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
+ !strstr(args, "hugepages"))
return 0;
tmp_cmdline = malloc(len + 1);
@@ -248,6 +278,8 @@ static int handle_mem_memmap(void)
if (!strcmp(param, "memmap")) {
mem_avoid_memmap(val);
+ } else if (strstr(param, "hugepages")) {
+ parse_gb_huge_pages(param, val);
} else if (!strcmp(param, "mem")) {
char *p = val;
@@ -387,7 +419,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
/* We don't need to set a mapping for setup_data. */
/* Mark the memmap regions we need to avoid */
- handle_mem_memmap();
+ handle_mem_options();
#ifdef CONFIG_X86_VERBOSE_BOOTUP
/* Make sure video RAM can be used. */
@@ -466,6 +498,60 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
}
}
+/*
+ * Skip as many 1GB huge pages as possible in the passed region
+ * according to the number which users specified:
+ */
+static void
+process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
+{
+ unsigned long addr, size = 0;
+ struct mem_vector tmp;
+ int i = 0;
+
+ if (!max_gb_huge_pages) {
+ store_slot_info(region, image_size);
+ return;
+ }
+
+ addr = ALIGN(region->start, PUD_SIZE);
+ /* Did we raise the address above the passed in memory entry? */
+ if (addr < region->start + region->size)
+ size = region->size - (addr - region->start);
+
+ /* Check how many 1GB huge pages can be filtered out: */
+ while (size > PUD_SIZE && max_gb_huge_pages) {
+ size -= PUD_SIZE;
+ max_gb_huge_pages--;
+ i++;
+ }
+
+ /* No good 1GB huge pages found: */
+ if (!i) {
+ store_slot_info(region, image_size);
+ return;
+ }
+
+ /*
+ * Skip those 'i'*1GB good huge pages, and continue checking and
+ * processing the remaining head or tail part of the passed region
+ * if available.
+ */
+
+ if (addr >= region->start + image_size) {
+ tmp.start = region->start;
+ tmp.size = addr - region->start;
+ store_slot_info(&tmp, image_size);
+ }
+
+ size = region->size - (addr - region->start) - i * PUD_SIZE;
+ if (size >= image_size) {
+ tmp.start = addr + i * PUD_SIZE;
+ tmp.size = size;
+ store_slot_info(&tmp, image_size);
+ }
+}
+
static unsigned long slots_fetch_random(void)
{
unsigned long slot;
@@ -546,7 +632,7 @@ static void process_mem_region(struct mem_vector *entry,
/* If nothing overlaps, store the region and return. */
if (!mem_avoid_overlap(&region, &overlap)) {
- store_slot_info(&region, image_size);
+ process_gb_huge_pages(&region, image_size);
return;
}
@@ -556,7 +642,7 @@ static void process_mem_region(struct mem_vector *entry,
beginning.start = region.start;
beginning.size = overlap.start - region.start;
- store_slot_info(&beginning, image_size);
+ process_gb_huge_pages(&beginning, image_size);
}
/* Return if overlap extends to or past end of region. */
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 16f49123d747..c4428a176973 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -13,6 +13,7 @@
*/
#include <linux/types.h>
+#include <asm/asm.h>
#include "ctype.h"
#include "string.h"
@@ -28,8 +29,8 @@
int memcmp(const void *s1, const void *s2, size_t len)
{
bool diff;
- asm("repe; cmpsb; setnz %0"
- : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ asm("repe; cmpsb" CC_SET(nz)
+ : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
index 717bf0776421..5f7e43d4f64a 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -75,7 +75,7 @@
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG, MSG
mov LEN, %r8
diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S b/arch/x86/crypto/aegis128l-aesni-asm.S
index 4eda2b8db9e1..491dd61c845c 100644
--- a/arch/x86/crypto/aegis128l-aesni-asm.S
+++ b/arch/x86/crypto/aegis128l-aesni-asm.S
@@ -66,7 +66,7 @@
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG0, MSG0
pxor MSG1, MSG1
diff --git a/arch/x86/crypto/aegis256-aesni-asm.S b/arch/x86/crypto/aegis256-aesni-asm.S
index 32aae8397268..8870c7c5d9a4 100644
--- a/arch/x86/crypto/aegis256-aesni-asm.S
+++ b/arch/x86/crypto/aegis256-aesni-asm.S
@@ -59,7 +59,7 @@
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG, MSG
mov LEN, %r8
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index e762ef417562..9bd139569b41 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -258,7 +258,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.macro GCM_INIT Iv SUBKEY AAD AADLEN
mov \AADLEN, %r11
mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length
- xor %r11, %r11
+ xor %r11d, %r11d
mov %r11, InLen(%arg2) # ctx_data.in_length = 0
mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0
mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0
@@ -286,7 +286,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
movdqu HashKey(%arg2), %xmm13
add %arg5, InLen(%arg2)
- xor %r11, %r11 # initialise the data pointer offset as zero
+ xor %r11d, %r11d # initialise the data pointer offset as zero
PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation
sub %r11, %arg5 # sub partial block data used
@@ -702,7 +702,7 @@ _no_extra_mask_1_\@:
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- xor %rax,%rax
+ xor %eax, %eax
mov %rax, PBlockLen(%arg2)
jmp _dec_done_\@
@@ -737,7 +737,7 @@ _no_extra_mask_2_\@:
# GHASH computation for the last <16 Byte block
GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- xor %rax,%rax
+ xor %eax, %eax
mov %rax, PBlockLen(%arg2)
jmp _encode_done_\@
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index faecb1518bf8..1985ea0b551b 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -463,7 +463,7 @@ _get_AAD_rest_final\@:
_get_AAD_done\@:
# initialize the data pointer offset as zero
- xor %r11, %r11
+ xor %r11d, %r11d
# start AES for num_initial_blocks blocks
mov arg5, %rax # rax = *Y0
@@ -1770,7 +1770,7 @@ _get_AAD_rest_final\@:
_get_AAD_done\@:
# initialize the data pointer offset as zero
- xor %r11, %r11
+ xor %r11d, %r11d
# start AES for num_initial_blocks blocks
mov arg5, %rax # rax = *Y0
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 2ddbe3a1868b..3582ae885ee1 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -154,8 +154,7 @@ static struct shash_alg ghash_alg = {
.cra_name = "__ghash",
.cra_driver_name = "__ghash-pclmulqdqni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
@@ -315,9 +314,8 @@ static struct ahash_alg ghash_async_alg = {
.cra_driver_name = "ghash-clmulni",
.cra_priority = 400,
.cra_ctxsize = sizeof(struct ghash_async_ctx),
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_init = ghash_async_init_tfm,
.cra_exit = ghash_async_exit_tfm,
diff --git a/arch/x86/crypto/morus1280-avx2-asm.S b/arch/x86/crypto/morus1280-avx2-asm.S
index 07653d4582a6..de182c460f82 100644
--- a/arch/x86/crypto/morus1280-avx2-asm.S
+++ b/arch/x86/crypto/morus1280-avx2-asm.S
@@ -113,7 +113,7 @@ ENDPROC(__morus1280_update_zero)
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
vpxor MSG, MSG, MSG
mov %rcx, %r8
diff --git a/arch/x86/crypto/morus1280-sse2-asm.S b/arch/x86/crypto/morus1280-sse2-asm.S
index bd1aa1b60869..da5d2905db60 100644
--- a/arch/x86/crypto/morus1280-sse2-asm.S
+++ b/arch/x86/crypto/morus1280-sse2-asm.S
@@ -235,7 +235,7 @@ ENDPROC(__morus1280_update_zero)
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG_LO, MSG_LO
pxor MSG_HI, MSG_HI
diff --git a/arch/x86/crypto/morus640-sse2-asm.S b/arch/x86/crypto/morus640-sse2-asm.S
index efa02816d921..414db480250e 100644
--- a/arch/x86/crypto/morus640-sse2-asm.S
+++ b/arch/x86/crypto/morus640-sse2-asm.S
@@ -113,7 +113,7 @@ ENDPROC(__morus640_update_zero)
* %r9
*/
__load_partial:
- xor %r9, %r9
+ xor %r9d, %r9d
pxor MSG, MSG
mov %rcx, %r8
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 790377797544..f012b7e28ad1 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -169,7 +169,6 @@ static struct shash_alg alg = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-simd",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index e17655ffde79..b93805664c1d 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -746,9 +746,8 @@ static struct ahash_alg sha1_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@@ -871,10 +870,16 @@ static struct ahash_alg sha1_mb_async_alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1_mb",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ /*
+ * Low priority, since with few concurrent hash requests
+ * this is extremely slow due to the flush delay. Users
+ * whose workloads would benefit from this can request
+ * it explicitly by driver name, or can increase its
+ * priority at runtime using NETLINK_CRYPTO.
+ */
+ .cra_priority = 50,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
.cra_init = sha1_mb_async_init_tfm,
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
index 6204bd53528c..613d0bfc3d84 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -96,7 +96,7 @@
# cleanup workspace
mov $8, %ecx
mov %rsp, %rdi
- xor %rax, %rax
+ xor %eax, %eax
rep stosq
mov %rbp, %rsp # deallocate workspace
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index fc61739150e7..7391c7de72c7 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -104,7 +104,6 @@ static struct shash_alg sha1_ssse3_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -157,7 +156,6 @@ static struct shash_alg sha1_avx_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -249,7 +247,6 @@ static struct shash_alg sha1_avx2_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -307,7 +304,6 @@ static struct shash_alg sha1_ni_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ni",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 4c46ac1b6653..97c5fc43e115 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -745,9 +745,8 @@ static struct ahash_alg sha256_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@@ -870,11 +869,16 @@ static struct ahash_alg sha256_mb_async_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256_mb",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ /*
+ * Low priority, since with few concurrent hash requests
+ * this is extremely slow due to the flush delay. Users
+ * whose workloads would benefit from this can request
+ * it explicitly by driver name, or can increase its
+ * priority at runtime using NETLINK_CRYPTO.
+ */
+ .cra_priority = 50,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
(sha256_mb_async_alg.halg.base.cra_list),
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index 16c4ccb1f154..d2364c55bbde 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest(state , idx, 4) , %xmm0
+ vmovd _args_digest+4*32(state, idx, 4), %xmm1
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 9e79baf03a4b..773a873d2b28 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -109,7 +109,6 @@ static struct shash_alg sha256_ssse3_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -124,7 +123,6 @@ static struct shash_alg sha256_ssse3_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -177,7 +175,6 @@ static struct shash_alg sha256_avx_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -192,7 +189,6 @@ static struct shash_alg sha256_avx_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -261,7 +257,6 @@ static struct shash_alg sha256_avx2_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -276,7 +271,6 @@ static struct shash_alg sha256_avx2_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -343,7 +337,6 @@ static struct shash_alg sha256_ni_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ni",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -358,7 +351,6 @@ static struct shash_alg sha256_ni_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ni",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index 39e2bbdc1836..26b85678012d 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -778,9 +778,8 @@ static struct ahash_alg sha512_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@@ -904,11 +903,16 @@ static struct ahash_alg sha512_mb_async_alg = {
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512_mb",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ /*
+ * Low priority, since with few concurrent hash requests
+ * this is extremely slow due to the flush delay. Users
+ * whose workloads would benefit from this can request
+ * it explicitly by driver name, or can increase its
+ * priority at runtime using NETLINK_CRYPTO.
+ */
+ .cra_priority = 50,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
(sha512_mb_async_alg.halg.base.cra_list),
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 2b0e2a6825f3..f1b811b60ba6 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -109,7 +109,6 @@ static struct shash_alg sha512_ssse3_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -124,7 +123,6 @@ static struct shash_alg sha512_ssse3_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -188,7 +186,6 @@ static struct shash_alg sha512_avx_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -203,7 +200,6 @@ static struct shash_alg sha512_avx_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -261,7 +257,6 @@ static struct shash_alg sha512_avx2_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -276,7 +271,6 @@ static struct shash_alg sha512_avx2_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index c371bfee137a..2767c625a52c 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -65,7 +65,7 @@
# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else
# define preempt_stop(clobbers)
-# define resume_kernel restore_all
+# define resume_kernel restore_all_kernel
#endif
.macro TRACE_IRQS_IRET
@@ -77,6 +77,8 @@
#endif
.endm
+#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
+
/*
* User gs save/restore
*
@@ -154,7 +156,52 @@
#endif /* CONFIG_X86_32_LAZY_GS */
-.macro SAVE_ALL pt_regs_ax=%eax
+/* Unconditionally switch to user cr3 */
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+
+ movl %cr3, \scratch_reg
+ orl $PTI_SWITCH_MASK, \scratch_reg
+ movl \scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+.macro BUG_IF_WRONG_CR3 no_user_check=0
+#ifdef CONFIG_DEBUG_ENTRY
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+ .if \no_user_check == 0
+ /* coming from usermode? */
+ testl $SEGMENT_RPL_MASK, PT_CS(%esp)
+ jz .Lend_\@
+ .endif
+ /* On user-cr3? */
+ movl %cr3, %eax
+ testl $PTI_SWITCH_MASK, %eax
+ jnz .Lend_\@
+ /* From userspace with kernel cr3 - BUG */
+ ud2
+.Lend_\@:
+#endif
+.endm
+
+/*
+ * Switch to kernel cr3 if not already loaded and return current cr3 in
+ * \scratch_reg
+ */
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+ movl %cr3, \scratch_reg
+ /* Test if we are already on kernel CR3 */
+ testl $PTI_SWITCH_MASK, \scratch_reg
+ jz .Lend_\@
+ andl $(~PTI_SWITCH_MASK), \scratch_reg
+ movl \scratch_reg, %cr3
+ /* Return original CR3 in \scratch_reg */
+ orl $PTI_SWITCH_MASK, \scratch_reg
+.Lend_\@:
+.endm
+
+.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
cld
PUSH_GS
pushl %fs
@@ -173,6 +220,29 @@
movl $(__KERNEL_PERCPU), %edx
movl %edx, %fs
SET_KERNEL_GS %edx
+
+ /* Switch to kernel stack if necessary */
+.if \switch_stacks > 0
+ SWITCH_TO_KERNEL_STACK
+.endif
+
+.endm
+
+.macro SAVE_ALL_NMI cr3_reg:req
+ SAVE_ALL
+
+ BUG_IF_WRONG_CR3
+
+ /*
+ * Now switch the CR3 when PTI is enabled.
+ *
+ * We can enter with either user or kernel cr3, the code will
+ * store the old cr3 in \cr3_reg and switches to the kernel cr3
+ * if necessary.
+ */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
+
+.Lend_\@:
.endm
/*
@@ -221,6 +291,349 @@
POP_GS_EX
.endm
+.macro RESTORE_ALL_NMI cr3_reg:req pop=0
+ /*
+ * Now switch the CR3 when PTI is enabled.
+ *
+ * We enter with kernel cr3 and switch the cr3 to the value
+ * stored on \cr3_reg, which is either a user or a kernel cr3.
+ */
+ ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
+
+ testl $PTI_SWITCH_MASK, \cr3_reg
+ jz .Lswitched_\@
+
+ /* User cr3 in \cr3_reg - write it to hardware cr3 */
+ movl \cr3_reg, %cr3
+
+.Lswitched_\@:
+
+ BUG_IF_WRONG_CR3
+
+ RESTORE_REGS pop=\pop
+.endm
+
+.macro CHECK_AND_APPLY_ESPFIX
+#ifdef CONFIG_X86_ESPFIX32
+#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
+
+ ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
+
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
+ /*
+ * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+ * are returning to the kernel.
+ * See comments in process.c:copy_thread() for details.
+ */
+ movb PT_OLDSS(%esp), %ah
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
+ jne .Lend_\@ # returning to user-space with LDT SS
+
+ /*
+ * Setup and switch to ESPFIX stack
+ *
+ * We're returning to userspace with a 16 bit stack. The CPU will not
+ * restore the high word of ESP for us on executing iret... This is an
+ * "official" bug of all the x86-compatible CPUs, which we can work
+ * around to make dosemu and wine happy. We do this by preloading the
+ * high word of ESP with the high word of the userspace ESP while
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
+ shr $16, %edx
+ mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
+ pushl $__ESPFIX_SS
+ pushl %eax /* new kernel esp */
+ /*
+ * Disable interrupts, but do not irqtrace this section: we
+ * will soon execute iret and the tracer was already set to
+ * the irqstate after the IRET:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ lss (%esp), %esp /* switch to espfix segment */
+.Lend_\@:
+#endif /* CONFIG_X86_ESPFIX32 */
+.endm
+
+/*
+ * Called with pt_regs fully populated and kernel segments loaded,
+ * so we can access PER_CPU and use the integer registers.
+ *
+ * We need to be very careful here with the %esp switch, because an NMI
+ * can happen everywhere. If the NMI handler finds itself on the
+ * entry-stack, it will overwrite the task-stack and everything we
+ * copied there. So allocate the stack-frame on the task-stack and
+ * switch to it before we do any copying.
+ */
+
+#define CS_FROM_ENTRY_STACK (1 << 31)
+#define CS_FROM_USER_CR3 (1 << 30)
+
+.macro SWITCH_TO_KERNEL_STACK
+
+ ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
+
+ BUG_IF_WRONG_CR3
+
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+
+ /*
+ * %eax now contains the entry cr3 and we carry it forward in
+ * that register for the time this macro runs
+ */
+
+ /* Are we on the entry stack? Bail out if not! */
+ movl PER_CPU_VAR(cpu_entry_area), %ecx
+ addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+ subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
+ cmpl $SIZEOF_entry_stack, %ecx
+ jae .Lend_\@
+
+ /* Load stack pointer into %esi and %edi */
+ movl %esp, %esi
+ movl %esi, %edi
+
+ /* Move %edi to the top of the entry stack */
+ andl $(MASK_entry_stack), %edi
+ addl $(SIZEOF_entry_stack), %edi
+
+ /* Load top of task-stack into %edi */
+ movl TSS_entry2task_stack(%edi), %edi
+
+ /*
+ * Clear unused upper bits of the dword containing the word-sized CS
+ * slot in pt_regs in case hardware didn't clear it for us.
+ */
+ andl $(0x0000ffff), PT_CS(%esp)
+
+ /* Special case - entry from kernel mode via entry stack */
+#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
+ movb PT_CS(%esp), %cl
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
+#else
+ movl PT_CS(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+#endif
+ cmpl $USER_RPL, %ecx
+ jb .Lentry_from_kernel_\@
+
+ /* Bytes to copy */
+ movl $PTREGS_SIZE, %ecx
+
+#ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esi)
+ jz .Lcopy_pt_regs_\@
+
+ /*
+ * Stack-frame contains 4 additional segment registers when
+ * coming from VM86 mode
+ */
+ addl $(4 * 4), %ecx
+
+#endif
+.Lcopy_pt_regs_\@:
+
+ /* Allocate frame on task-stack */
+ subl %ecx, %edi
+
+ /* Switch to task-stack */
+ movl %edi, %esp
+
+ /*
+ * We are now on the task-stack and can safely copy over the
+ * stack-frame
+ */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ jmp .Lend_\@
+
+.Lentry_from_kernel_\@:
+
+ /*
+ * This handles the case when we enter the kernel from
+ * kernel-mode and %esp points to the entry-stack. When this
+ * happens we need to switch to the task-stack to run C code,
+ * but switch back to the entry-stack again when we approach
+ * iret and return to the interrupted code-path. This usually
+ * happens when we hit an exception while restoring user-space
+ * segment registers on the way back to user-space or when the
+ * sysenter handler runs with eflags.tf set.
+ *
+ * When we switch to the task-stack here, we can't trust the
+ * contents of the entry-stack anymore, as the exception handler
+ * might be scheduled out or moved to another CPU. Therefore we
+ * copy the complete entry-stack to the task-stack and set a
+ * marker in the iret-frame (bit 31 of the CS dword) to detect
+ * what we've done on the iret path.
+ *
+ * On the iret path we copy everything back and switch to the
+ * entry-stack, so that the interrupted kernel code-path
+ * continues on the same stack it was interrupted with.
+ *
+ * Be aware that an NMI can happen anytime in this code.
+ *
+ * %esi: Entry-Stack pointer (same as %esp)
+ * %edi: Top of the task stack
+ * %eax: CR3 on kernel entry
+ */
+
+ /* Calculate number of bytes on the entry stack in %ecx */
+ movl %esi, %ecx
+
+ /* %ecx to the top of entry-stack */
+ andl $(MASK_entry_stack), %ecx
+ addl $(SIZEOF_entry_stack), %ecx
+
+ /* Number of bytes on the entry stack to %ecx */
+ sub %esi, %ecx
+
+ /* Mark stackframe as coming from entry stack */
+ orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+
+ /*
+ * Test the cr3 used to enter the kernel and add a marker
+ * so that we can switch back to it before iret.
+ */
+ testl $PTI_SWITCH_MASK, %eax
+ jz .Lcopy_pt_regs_\@
+ orl $CS_FROM_USER_CR3, PT_CS(%esp)
+
+ /*
+ * %esi and %edi are unchanged, %ecx contains the number of
+ * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
+ * the stack-frame on task-stack and copy everything over
+ */
+ jmp .Lcopy_pt_regs_\@
+
+.Lend_\@:
+.endm
+
+/*
+ * Switch back from the kernel stack to the entry stack.
+ *
+ * The %esp register must point to pt_regs on the task stack. It will
+ * first calculate the size of the stack-frame to copy, depending on
+ * whether we return to VM86 mode or not. With that it uses 'rep movsl'
+ * to copy the contents of the stack over to the entry stack.
+ *
+ * We must be very careful here, as we can't trust the contents of the
+ * task-stack once we switched to the entry-stack. When an NMI happens
+ * while on the entry-stack, the NMI handler will switch back to the top
+ * of the task stack, overwriting our stack-frame we are about to copy.
+ * Therefore we switch the stack only after everything is copied over.
+ */
+.macro SWITCH_TO_ENTRY_STACK
+
+ ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
+
+ /* Bytes to copy */
+ movl $PTREGS_SIZE, %ecx
+
+#ifdef CONFIG_VM86
+ testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
+ jz .Lcopy_pt_regs_\@
+
+ /* Additional 4 registers to copy when returning to VM86 mode */
+ addl $(4 * 4), %ecx
+
+.Lcopy_pt_regs_\@:
+#endif
+
+ /* Initialize source and destination for movsl */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+ subl %ecx, %edi
+ movl %esp, %esi
+
+ /* Save future stack pointer in %ebx */
+ movl %edi, %ebx
+
+ /* Copy over the stack-frame */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ /*
+ * Switch to entry-stack - needs to happen after everything is
+ * copied because the NMI handler will overwrite the task-stack
+ * when on entry-stack
+ */
+ movl %ebx, %esp
+
+.Lend_\@:
+.endm
+
+/*
+ * This macro handles the case when we return to kernel-mode on the iret
+ * path and have to switch back to the entry stack and/or user-cr3
+ *
+ * See the comments below the .Lentry_from_kernel_\@ label in the
+ * SWITCH_TO_KERNEL_STACK macro for more details.
+ */
+.macro PARANOID_EXIT_TO_KERNEL_MODE
+
+ /*
+ * Test if we entered the kernel with the entry-stack. Most
+ * likely we did not, because this code only runs on the
+ * return-to-kernel path.
+ */
+ testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
+ jz .Lend_\@
+
+ /* Unlikely slow-path */
+
+ /* Clear marker from stack-frame */
+ andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
+
+ /* Copy the remaining task-stack contents to entry-stack */
+ movl %esp, %esi
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+
+ /* Bytes on the task-stack to ecx */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
+ subl %esi, %ecx
+
+ /* Allocate stack-frame on entry-stack */
+ subl %ecx, %edi
+
+ /*
+ * Save future stack-pointer, we must not switch until the
+ * copy is done, otherwise the NMI handler could destroy the
+ * contents of the task-stack we are about to copy.
+ */
+ movl %edi, %ebx
+
+ /* Do the copy */
+ shrl $2, %ecx
+ cld
+ rep movsl
+
+ /* Safe to switch to entry-stack now */
+ movl %ebx, %esp
+
+ /*
+ * We came from entry-stack and need to check if we also need to
+ * switch back to user cr3.
+ */
+ testl $CS_FROM_USER_CR3, PT_CS(%esp)
+ jz .Lend_\@
+
+ /* Clear marker from stack-frame */
+ andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
+
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
+
+.Lend_\@:
+.endm
/*
* %eax: prev task
* %edx: next task
@@ -351,9 +764,9 @@ ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
.Lneed_resched:
cmpl $0, PER_CPU_VAR(__preempt_count)
- jnz restore_all
+ jnz restore_all_kernel
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
+ jz restore_all_kernel
call preempt_schedule_irq
jmp .Lneed_resched
END(resume_kernel)
@@ -412,7 +825,21 @@ ENTRY(xen_sysenter_target)
* 0(%ebp) arg6
*/
ENTRY(entry_SYSENTER_32)
- movl TSS_sysenter_sp0(%esp), %esp
+ /*
+ * On entry-stack with all userspace-regs live - save and
+ * restore eflags and %eax to use it as scratch-reg for the cr3
+ * switch.
+ */
+ pushfl
+ pushl %eax
+ BUG_IF_WRONG_CR3 no_user_check=1
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+ popl %eax
+ popfl
+
+ /* Stack empty again, switch to task stack */
+ movl TSS_entry2task_stack(%esp), %esp
+
.Lsysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
pushl %ebp /* pt_regs->sp (stashed in bp) */
@@ -421,7 +848,7 @@ ENTRY(entry_SYSENTER_32)
pushl $__USER_CS /* pt_regs->cs */
pushl $0 /* pt_regs->ip = 0 (placeholder) */
pushl %eax /* pt_regs->orig_ax */
- SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
+ SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
/*
* SYSENTER doesn't filter flags, so we need to clear NT, AC
@@ -460,25 +887,49 @@ ENTRY(entry_SYSENTER_32)
/* Opportunistic SYSEXIT */
TRACE_IRQS_ON /* User mode traces as IRQs on. */
+
+ /*
+ * Setup entry stack - we keep the pointer in %eax and do the
+ * switch after almost all user-state is restored.
+ */
+
+ /* Load entry stack pointer and allocate frame for eflags/eax */
+ movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
+ subl $(2*4), %eax
+
+ /* Copy eflags and eax to entry stack */
+ movl PT_EFLAGS(%esp), %edi
+ movl PT_EAX(%esp), %esi
+ movl %edi, (%eax)
+ movl %esi, 4(%eax)
+
+ /* Restore user registers and segments */
movl PT_EIP(%esp), %edx /* pt_regs->ip */
movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
1: mov PT_FS(%esp), %fs
PTGS_TO_GS
+
popl %ebx /* pt_regs->bx */
addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
popl %esi /* pt_regs->si */
popl %edi /* pt_regs->di */
popl %ebp /* pt_regs->bp */
- popl %eax /* pt_regs->ax */
+
+ /* Switch to entry stack */
+ movl %eax, %esp
+
+ /* Now ready to switch the cr3 */
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
/*
* Restore all flags except IF. (We restore IF separately because
* STI gives a one-instruction window in which we won't be interrupted,
* whereas POPF does not.)
*/
- addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
btrl $X86_EFLAGS_IF_BIT, (%esp)
+ BUG_IF_WRONG_CR3 no_user_check=1
popfl
+ popl %eax
/*
* Return back to the vDSO, which will pop ecx and edx.
@@ -532,7 +983,8 @@ ENDPROC(entry_SYSENTER_32)
ENTRY(entry_INT80_32)
ASM_CLAC
pushl %eax /* pt_regs->orig_ax */
- SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
+
+ SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
/*
* User mode is traced as though IRQs are on, and the interrupt gate
@@ -546,24 +998,17 @@ ENTRY(entry_INT80_32)
restore_all:
TRACE_IRQS_IRET
+ SWITCH_TO_ENTRY_STACK
.Lrestore_all_notrace:
-#ifdef CONFIG_X86_ESPFIX32
- ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
-
- movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
- /*
- * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
- * are returning to the kernel.
- * See comments in process.c:copy_thread() for details.
- */
- movb PT_OLDSS(%esp), %ah
- movb PT_CS(%esp), %al
- andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
- cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
- je .Lldt_ss # returning to user-space with LDT SS
-#endif
+ CHECK_AND_APPLY_ESPFIX
.Lrestore_nocheck:
- RESTORE_REGS 4 # skip orig_eax/error_code
+ /* Switch back to user CR3 */
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
+
+ BUG_IF_WRONG_CR3
+
+ /* Restore user state */
+ RESTORE_REGS pop=4 # skip orig_eax/error_code
.Lirq_return:
/*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -572,46 +1017,33 @@ restore_all:
*/
INTERRUPT_RETURN
+restore_all_kernel:
+ TRACE_IRQS_IRET
+ PARANOID_EXIT_TO_KERNEL_MODE
+ BUG_IF_WRONG_CR3
+ RESTORE_REGS 4
+ jmp .Lirq_return
+
.section .fixup, "ax"
ENTRY(iret_exc )
pushl $0 # no error code
pushl $do_iret_error
- jmp common_exception
-.previous
- _ASM_EXTABLE(.Lirq_return, iret_exc)
-#ifdef CONFIG_X86_ESPFIX32
-.Lldt_ss:
-/*
- * Setup and switch to ESPFIX stack
- *
- * We're returning to userspace with a 16 bit stack. The CPU will not
- * restore the high word of ESP for us on executing iret... This is an
- * "official" bug of all the x86-compatible CPUs, which we can work
- * around to make dosemu and wine happy. We do this by preloading the
- * high word of ESP with the high word of the userspace ESP while
- * compensating for the offset by changing to the ESPFIX segment with
- * a base address that matches for the difference.
- */
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
- mov %esp, %edx /* load kernel esp */
- mov PT_OLDESP(%esp), %eax /* load userspace esp */
- mov %dx, %ax /* eax: new kernel esp */
- sub %eax, %edx /* offset (low word is 0) */
- shr $16, %edx
- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
- pushl $__ESPFIX_SS
- pushl %eax /* new kernel esp */
+#ifdef CONFIG_DEBUG_ENTRY
/*
- * Disable interrupts, but do not irqtrace this section: we
- * will soon execute iret and the tracer was already set to
- * the irqstate after the IRET:
+ * The stack-frame here is the one that iret faulted on, so its a
+ * return-to-user frame. We are on kernel-cr3 because we come here from
+ * the fixup code. This confuses the CR3 checker, so switch to user-cr3
+ * as the checker expects it.
*/
- DISABLE_INTERRUPTS(CLBR_ANY)
- lss (%esp), %esp /* switch to espfix segment */
- jmp .Lrestore_nocheck
+ pushl %eax
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
+ popl %eax
#endif
+
+ jmp common_exception
+.previous
+ _ASM_EXTABLE(.Lirq_return, iret_exc)
ENDPROC(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK
@@ -671,7 +1103,8 @@ END(irq_entries_start)
common_interrupt:
ASM_CLAC
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
- SAVE_ALL
+
+ SAVE_ALL switch_stacks=1
ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
movl %esp, %eax
@@ -679,16 +1112,16 @@ common_interrupt:
jmp ret_from_intr
ENDPROC(common_interrupt)
-#define BUILD_INTERRUPT3(name, nr, fn) \
-ENTRY(name) \
- ASM_CLAC; \
- pushl $~(nr); \
- SAVE_ALL; \
- ENCODE_FRAME_POINTER; \
- TRACE_IRQS_OFF \
- movl %esp, %eax; \
- call fn; \
- jmp ret_from_intr; \
+#define BUILD_INTERRUPT3(name, nr, fn) \
+ENTRY(name) \
+ ASM_CLAC; \
+ pushl $~(nr); \
+ SAVE_ALL switch_stacks=1; \
+ ENCODE_FRAME_POINTER; \
+ TRACE_IRQS_OFF \
+ movl %esp, %eax; \
+ call fn; \
+ jmp ret_from_intr; \
ENDPROC(name)
#define BUILD_INTERRUPT(name, nr) \
@@ -920,16 +1353,20 @@ common_exception:
pushl %es
pushl %ds
pushl %eax
+ movl $(__USER_DS), %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl $(__KERNEL_PERCPU), %eax
+ movl %eax, %fs
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
+ SWITCH_TO_KERNEL_STACK
ENCODE_FRAME_POINTER
cld
- movl $(__KERNEL_PERCPU), %ecx
- movl %ecx, %fs
UNWIND_ESPFIX_STACK
GS_TO_REG %ecx
movl PT_GS(%esp), %edi # get the function address
@@ -937,9 +1374,6 @@ common_exception:
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
- movl $(__USER_DS), %ecx
- movl %ecx, %ds
- movl %ecx, %es
TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
CALL_NOSPEC %edi
@@ -948,40 +1382,12 @@ END(common_exception)
ENTRY(debug)
/*
- * #DB can happen at the first instruction of
- * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
- * happens, then we will be running on a very small stack. We
- * need to detect this condition and switch to the thread
- * stack before calling any C code at all.
- *
- * If you edit this code, keep in mind that NMIs can happen in here.
+ * Entry from sysenter is now handled in common_exception
*/
ASM_CLAC
pushl $-1 # mark this as an int
- SAVE_ALL
- ENCODE_FRAME_POINTER
- xorl %edx, %edx # error code 0
- movl %esp, %eax # pt_regs pointer
-
- /* Are we currently on the SYSENTER stack? */
- movl PER_CPU_VAR(cpu_entry_area), %ecx
- addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
- subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
- cmpl $SIZEOF_entry_stack, %ecx
- jb .Ldebug_from_sysenter_stack
-
- TRACE_IRQS_OFF
- call do_debug
- jmp ret_from_exception
-
-.Ldebug_from_sysenter_stack:
- /* We're on the SYSENTER stack. Switch off. */
- movl %esp, %ebx
- movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
- TRACE_IRQS_OFF
- call do_debug
- movl %ebx, %esp
- jmp ret_from_exception
+ pushl $do_debug
+ jmp common_exception
END(debug)
/*
@@ -993,6 +1399,7 @@ END(debug)
*/
ENTRY(nmi)
ASM_CLAC
+
#ifdef CONFIG_X86_ESPFIX32
pushl %eax
movl %ss, %eax
@@ -1002,7 +1409,7 @@ ENTRY(nmi)
#endif
pushl %eax # pt_regs->orig_ax
- SAVE_ALL
+ SAVE_ALL_NMI cr3_reg=%edi
ENCODE_FRAME_POINTER
xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
@@ -1016,7 +1423,7 @@ ENTRY(nmi)
/* Not on SYSENTER stack. */
call do_nmi
- jmp .Lrestore_all_notrace
+ jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
/*
@@ -1027,7 +1434,11 @@ ENTRY(nmi)
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
call do_nmi
movl %ebx, %esp
- jmp .Lrestore_all_notrace
+
+.Lnmi_return:
+ CHECK_AND_APPLY_ESPFIX
+ RESTORE_ALL_NMI cr3_reg=%edi pop=4
+ jmp .Lirq_return
#ifdef CONFIG_X86_ESPFIX32
.Lnmi_espfix_stack:
@@ -1042,12 +1453,12 @@ ENTRY(nmi)
pushl 16(%esp)
.endr
pushl %eax
- SAVE_ALL
+ SAVE_ALL_NMI cr3_reg=%edi
ENCODE_FRAME_POINTER
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx, %edx # zero error code
call do_nmi
- RESTORE_REGS
+ RESTORE_ALL_NMI cr3_reg=%edi
lss 12+4(%esp), %esp # back to espfix stack
jmp .Lirq_return
#endif
@@ -1056,7 +1467,8 @@ END(nmi)
ENTRY(int3)
ASM_CLAC
pushl $-1 # mark this as an int
- SAVE_ALL
+
+ SAVE_ALL switch_stacks=1
ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
xorl %edx, %edx # zero error code
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 8ae7ffda8f98..957dfb693ecc 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -92,7 +92,7 @@ END(native_usergs_sysret64)
.endm
.macro TRACE_IRQS_IRETQ_DEBUG
- bt $9, EFLAGS(%rsp) /* interrupts off? */
+ btl $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
@@ -408,6 +408,7 @@ ENTRY(ret_from_fork)
1:
/* kernel thread */
+ UNWIND_HINT_EMPTY
movq %r12, %rdi
CALL_NOSPEC %rbx
/*
@@ -701,7 +702,7 @@ retint_kernel:
#ifdef CONFIG_PREEMPT
/* Interrupts are off */
/* Check if we need preemption */
- bt $9, EFLAGS(%rsp) /* were interrupts off? */
+ btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 261802b1cc50..9f695f517747 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -46,10 +46,8 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
- -Wl,--no-undefined \
- -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
- $(DISABLE_LTO)
+VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
+ -z max-page-size=4096 -z common-page-size=4096
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
@@ -58,9 +56,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(src
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
-define cmd_vdso2c
- $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+ cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
$(call if_changed,vdso2c)
@@ -95,10 +91,8 @@ CFLAGS_REMOVE_vvar.o = -pg
#
CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
- -Wl,-soname=linux-vdso.so.1 \
- -Wl,-z,max-page-size=4096 \
- -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
+ -z max-page-size=4096 -z common-page-size=4096
# x32-rebranded versions
vobjx32s-y := $(vobjs-y:.o=-x32.o)
@@ -123,7 +117,7 @@ $(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE
$(call if_changed,vdso)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
targets += vdso32/vdso32.lds
targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
@@ -157,13 +151,13 @@ $(obj)/vdso32.so.dbg: FORCE \
# The DSO images are built using a special linker script.
#
quiet_cmd_vdso = VDSO $@
- cmd_vdso = $(CC) -nostdlib -o $@ \
+ cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+ -T $(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
- $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
+ $(call ld-option, --build-id) -Bsymbolic
GCOV_PROFILE := n
#
diff --git a/arch/x86/entry/vdso/vdso-note.S b/arch/x86/entry/vdso/vdso-note.S
index 79a071e4357e..79423170118f 100644
--- a/arch/x86/entry/vdso/vdso-note.S
+++ b/arch/x86/entry/vdso/vdso-note.S
@@ -3,6 +3,7 @@
* Here we can supply some information useful to userland.
*/
+#include <linux/build-salt.h>
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
@@ -10,3 +11,5 @@
ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
+
+BUILD_SALT
diff --git a/arch/x86/entry/vdso/vdso32/note.S b/arch/x86/entry/vdso/vdso32/note.S
index 9fd51f206314..e78047d119f6 100644
--- a/arch/x86/entry/vdso/vdso32/note.S
+++ b/arch/x86/entry/vdso/vdso32/note.S
@@ -4,6 +4,7 @@
* Here we can supply some information useful to userland.
*/
+#include <linux/build-salt.h>
#include <linux/version.h>
#include <linux/elfnote.h>
@@ -14,6 +15,8 @@ ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
+BUILD_SALT
+
#ifdef CONFIG_XEN
/*
* Add a special note telling glibc's dynamic linker a fake hardware
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 86f0c15dcc2d..035c37481f57 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2041,15 +2041,15 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_disable(event);
+
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
}
x86_pmu_disable_event(event);
-
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_disable(event);
}
static void intel_pmu_del_event(struct perf_event *event)
@@ -2068,17 +2068,19 @@ static void intel_pmu_read_event(struct perf_event *event)
x86_perf_event_update(event);
}
-static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_enable_fixed(struct perf_event *event)
{
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
- u64 ctrl_val, bits, mask;
+ u64 ctrl_val, mask, bits = 0;
/*
- * Enable IRQ generation (0x8),
+ * Enable IRQ generation (0x8), if not PEBS,
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
* if requested:
*/
- bits = 0x8ULL;
+ if (!event->attr.precise_ip)
+ bits |= 0x8;
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
bits |= 0x2;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
@@ -2120,14 +2122,14 @@ static void intel_pmu_enable_event(struct perf_event *event)
if (unlikely(event_is_checkpointed(event)))
cpuc->intel_cp_status |= (1ull << hwc->idx);
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_enable(event);
+
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
- intel_pmu_enable_fixed(hwc);
+ intel_pmu_enable_fixed(event);
return;
}
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_enable(event);
-
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
@@ -2280,7 +2282,10 @@ again:
* counters from the GLOBAL_STATUS mask and we always process PEBS
* events via drain_pebs().
*/
- status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ status &= ~cpuc->pebs_enabled;
+ else
+ status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
/*
* PEBS overflow sets bit 62 in the global status register
@@ -4072,7 +4077,6 @@ __init int intel_pmu_init(void)
intel_pmu_lbr_init_skl();
x86_pmu.event_constraints = intel_slm_event_constraints;
- x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
x86_pmu.extra_regs = intel_glm_extra_regs;
/*
* It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
@@ -4082,6 +4086,7 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_prec_dist = true;
x86_pmu.lbr_pt_coexist = true;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu.get_event_constraints = glp_get_event_constraints;
x86_pmu.cpu_events = glm_events_attrs;
/* Goldmont Plus has 4-wide pipeline */
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8dbba77e0518..b7b01d762d32 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -713,12 +713,6 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
-struct event_constraint intel_glp_pebs_event_constraints[] = {
- /* Allow all events as PEBS with no flags */
- INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
- EVENT_CONSTRAINT_END
-};
-
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
@@ -871,6 +865,13 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
}
}
+ /*
+ * Extended PEBS support
+ * Makes the PEBS code search the normal constraints.
+ */
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ return NULL;
+
return &emptyconstraint;
}
@@ -896,10 +897,16 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
u64 threshold;
+ int reserved;
+
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
+ else
+ reserved = x86_pmu.max_pebs_events;
if (cpuc->n_pebs == cpuc->n_large_pebs) {
threshold = ds->pebs_absolute_maximum -
- x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
+ reserved * x86_pmu.pebs_record_size;
} else {
threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
}
@@ -963,7 +970,11 @@ void intel_pmu_pebs_enable(struct perf_event *event)
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
*/
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
- ds->pebs_event_reset[hwc->idx] =
+ unsigned int idx = hwc->idx;
+
+ if (idx >= INTEL_PMC_IDX_FIXED)
+ idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
+ ds->pebs_event_reset[idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
} else {
ds->pebs_event_reset[hwc->idx] = 0;
@@ -1481,9 +1492,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
struct debug_store *ds = cpuc->ds;
struct perf_event *event;
void *base, *at, *top;
- short counts[MAX_PEBS_EVENTS] = {};
- short error[MAX_PEBS_EVENTS] = {};
- int bit, i;
+ short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+ short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+ int bit, i, size;
+ u64 mask;
if (!x86_pmu.pebs_active)
return;
@@ -1493,6 +1505,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
ds->pebs_index = ds->pebs_buffer_base;
+ mask = (1ULL << x86_pmu.max_pebs_events) - 1;
+ size = x86_pmu.max_pebs_events;
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
+ mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
+ size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
+ }
+
if (unlikely(base >= top)) {
/*
* The drain_pebs() could be called twice in a short period
@@ -1502,7 +1521,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* update the event->count for this case.
*/
for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
- x86_pmu.max_pebs_events) {
+ size) {
event = cpuc->events[bit];
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
intel_pmu_save_and_restart_reload(event, 0);
@@ -1515,12 +1534,12 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
u64 pebs_status;
pebs_status = p->status & cpuc->pebs_enabled;
- pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
+ pebs_status &= mask;
/* PEBS v3 has more accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3) {
for_each_set_bit(bit, (unsigned long *)&pebs_status,
- x86_pmu.max_pebs_events)
+ size)
counts[bit]++;
continue;
@@ -1568,7 +1587,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
counts[bit]++;
}
- for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
+ for (bit = 0; bit < size; bit++) {
if ((counts[bit] == 0) && (error[bit] == 0))
continue;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index cf372b90557e..f3e006bed9a7 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -216,6 +216,8 @@ static void intel_pmu_lbr_reset_64(void)
void intel_pmu_lbr_reset(void)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
if (!x86_pmu.lbr_nr)
return;
@@ -223,6 +225,9 @@ void intel_pmu_lbr_reset(void)
intel_pmu_lbr_reset_32();
else
intel_pmu_lbr_reset_64();
+
+ cpuc->last_task_ctx = NULL;
+ cpuc->last_log_id = 0;
}
/*
@@ -334,6 +339,7 @@ static inline u64 rdlbr_to(unsigned int idx)
static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
unsigned lbr_idx, mask;
u64 tos;
@@ -344,9 +350,21 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
return;
}
- mask = x86_pmu.lbr_nr - 1;
tos = task_ctx->tos;
- for (i = 0; i < tos; i++) {
+ /*
+ * Does not restore the LBR registers, if
+ * - No one else touched them, and
+ * - Did not enter C6
+ */
+ if ((task_ctx == cpuc->last_task_ctx) &&
+ (task_ctx->log_id == cpuc->last_log_id) &&
+ rdlbr_from(tos)) {
+ task_ctx->lbr_stack_state = LBR_NONE;
+ return;
+ }
+
+ mask = x86_pmu.lbr_nr - 1;
+ for (i = 0; i < task_ctx->valid_lbrs; i++) {
lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
@@ -354,14 +372,24 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+
+ for (; i < x86_pmu.lbr_nr; i++) {
+ lbr_idx = (tos - i) & mask;
+ wrlbr_from(lbr_idx, 0);
+ wrlbr_to(lbr_idx, 0);
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+ wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
+ }
+
wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE;
}
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned lbr_idx, mask;
- u64 tos;
+ u64 tos, from;
int i;
if (task_ctx->lbr_callstack_users == 0) {
@@ -371,15 +399,22 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
- for (i = 0; i < tos; i++) {
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
lbr_idx = (tos - i) & mask;
- task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+ from = rdlbr_from(lbr_idx);
+ if (!from)
+ break;
+ task_ctx->lbr_from[i] = from;
task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+ task_ctx->valid_lbrs = i;
task_ctx->tos = tos;
task_ctx->lbr_stack_state = LBR_VALID;
+
+ cpuc->last_task_ctx = task_ctx;
+ cpuc->last_log_id = ++task_ctx->log_id;
}
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
@@ -531,7 +566,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
*/
static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{
- bool need_info = false;
+ bool need_info = false, call_stack = false;
unsigned long mask = x86_pmu.lbr_nr - 1;
int lbr_format = x86_pmu.intel_cap.lbr_format;
u64 tos = intel_pmu_lbr_tos();
@@ -542,7 +577,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
if (cpuc->lbr_sel) {
need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
if (cpuc->lbr_sel->config & LBR_CALL_STACK)
- num = tos;
+ call_stack = true;
}
for (i = 0; i < num; i++) {
@@ -555,6 +590,13 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
from = rdlbr_from(lbr_idx);
to = rdlbr_to(lbr_idx);
+ /*
+ * Read LBR call stack entries
+ * until invalid entry (0s) is detected.
+ */
+ if (call_stack && !from)
+ break;
+
if (lbr_format == LBR_FORMAT_INFO && need_info) {
u64 info;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 9f3711470ec1..156286335351 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -163,6 +163,7 @@ struct intel_excl_cntrs {
unsigned core_id; /* per-core: core id */
};
+struct x86_perf_task_context;
#define MAX_LBR_ENTRIES 32
enum {
@@ -214,6 +215,8 @@ struct cpu_hw_events {
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
struct er_account *lbr_sel;
u64 br_sel;
+ struct x86_perf_task_context *last_task_ctx;
+ int last_log_id;
/*
* Intel host/guest exclude bits
@@ -648,8 +651,10 @@ struct x86_perf_task_context {
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
int tos;
+ int valid_lbrs;
int lbr_callstack_users;
int lbr_stack_state;
+ int log_id;
};
#define x86_add_quirk(func_) \
@@ -668,6 +673,7 @@ do { \
#define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
+#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 402338365651..5b0f613428c2 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -31,6 +31,8 @@
#include <asm/mshyperv.h>
#include <asm/apic.h>
+#include <asm/trace/hyperv.h>
+
static struct apic orig_apic;
static u64 hv_apic_icr_read(void)
@@ -99,6 +101,9 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
int nr_bank = 0;
int ret = 1;
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+
local_irq_save(flags);
arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -130,10 +135,10 @@ ipi_mask_ex_done:
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
{
int cur_cpu, vcpu;
- struct ipi_arg_non_ex **arg;
- struct ipi_arg_non_ex *ipi_arg;
+ struct ipi_arg_non_ex ipi_arg;
int ret = 1;
- unsigned long flags;
+
+ trace_hyperv_send_ipi_mask(mask, vector);
if (cpumask_empty(mask))
return true;
@@ -144,40 +149,43 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return false;
- if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- return __send_ipi_mask_ex(mask, vector);
-
- local_irq_save(flags);
- arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
-
- ipi_arg = *arg;
- if (unlikely(!ipi_arg))
- goto ipi_mask_done;
-
- ipi_arg->vector = vector;
- ipi_arg->reserved = 0;
- ipi_arg->cpu_mask = 0;
+ /*
+ * From the supplied CPU set we need to figure out if we can get away
+ * with cheaper HVCALL_SEND_IPI hypercall. This is possible when the
+ * highest VP number in the set is < 64. As VP numbers are usually in
+ * ascending order and match Linux CPU ids, here is an optimization:
+ * we check the VP number for the highest bit in the supplied set first
+ * so we can quickly find out if using HVCALL_SEND_IPI_EX hypercall is
+ * a must. We will also check all VP numbers when walking the supplied
+ * CPU set to remain correct in all cases.
+ */
+ if (hv_cpu_number_to_vp_number(cpumask_last(mask)) >= 64)
+ goto do_ex_hypercall;
+
+ ipi_arg.vector = vector;
+ ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) {
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
if (vcpu == VP_INVAL)
- goto ipi_mask_done;
+ return false;
/*
* This particular version of the IPI hypercall can
* only target upto 64 CPUs.
*/
if (vcpu >= 64)
- goto ipi_mask_done;
+ goto do_ex_hypercall;
- __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask);
+ __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
}
- ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL);
-
-ipi_mask_done:
- local_irq_restore(flags);
+ ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
+ ipi_arg.cpu_mask);
return ((ret == 0) ? true : false);
+
+do_ex_hypercall:
+ return __send_ipi_mask_ex(mask, vector);
}
static bool __send_ipi_one(int cpu, int vector)
@@ -233,10 +241,7 @@ static void hv_send_ipi_self(int vector)
void __init hv_apic_init(void)
{
if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) {
- if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- pr_info("Hyper-V: Using ext hypercalls for IPI\n");
- else
- pr_info("Hyper-V: Using IPI hypercalls\n");
+ pr_info("Hyper-V: Using IPI hypercalls\n");
/*
* Set the IPI entry points.
*/
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index de27615c51ea..1147e1fed7ff 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -16,6 +16,8 @@
/* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+ const struct flush_tlb_info *info);
/*
* Fills in gva_list starting from offset. Returns the number of items added.
@@ -93,10 +95,29 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
if (cpumask_equal(cpus, cpu_present_mask)) {
flush->flags |= HV_FLUSH_ALL_PROCESSORS;
} else {
+ /*
+ * From the supplied CPU set we need to figure out if we can get
+ * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}
+ * hypercalls. This is possible when the highest VP number in
+ * the set is < 64. As VP numbers are usually in ascending order
+ * and match Linux CPU ids, here is an optimization: we check
+ * the VP number for the highest bit in the supplied set first
+ * so we can quickly find out if using *_EX hypercalls is a
+ * must. We will also check all VP numbers when walking the
+ * supplied CPU set to remain correct in all cases.
+ */
+ if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64)
+ goto do_ex_hypercall;
+
for_each_cpu(cpu, cpus) {
vcpu = hv_cpu_number_to_vp_number(cpu);
- if (vcpu >= 64)
+ if (vcpu == VP_INVAL) {
+ local_irq_restore(flags);
goto do_native;
+ }
+
+ if (vcpu >= 64)
+ goto do_ex_hypercall;
__set_bit(vcpu, (unsigned long *)
&flush->processor_mask);
@@ -123,7 +144,12 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
gva_n, 0, flush, NULL);
}
+ goto check_status;
+
+do_ex_hypercall:
+ status = hyperv_flush_tlb_others_ex(cpus, info);
+check_status:
local_irq_restore(flags);
if (!(status & HV_HYPERCALL_RESULT_MASK))
@@ -132,35 +158,22 @@ do_native:
native_flush_tlb_others(cpus, info);
}
-static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
- const struct flush_tlb_info *info)
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+ const struct flush_tlb_info *info)
{
int nr_bank = 0, max_gvas, gva_n;
struct hv_tlb_flush_ex **flush_pcpu;
struct hv_tlb_flush_ex *flush;
- u64 status = U64_MAX;
- unsigned long flags;
+ u64 status;
- trace_hyperv_mmu_flush_tlb_others(cpus, info);
-
- if (!hv_hypercall_pg)
- goto do_native;
-
- if (cpumask_empty(cpus))
- return;
-
- local_irq_save(flags);
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return U64_MAX;
flush_pcpu = (struct hv_tlb_flush_ex **)
this_cpu_ptr(hyperv_pcpu_input_arg);
flush = *flush_pcpu;
- if (unlikely(!flush)) {
- local_irq_restore(flags);
- goto do_native;
- }
-
if (info->mm) {
/*
* AddressSpace argument must match the CR3 with PCID bits
@@ -176,15 +189,10 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->hv_vp_set.valid_bank_mask = 0;
- if (!cpumask_equal(cpus, cpu_present_mask)) {
- flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
- nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
- }
-
- if (!nr_bank) {
- flush->hv_vp_set.format = HV_GENERIC_SET_ALL;
- flush->flags |= HV_FLUSH_ALL_PROCESSORS;
- }
+ flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
+ if (nr_bank < 0)
+ return U64_MAX;
/*
* We can flush not more than max_gvas with one hypercall. Flush the
@@ -213,12 +221,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
gva_n, nr_bank, flush, NULL);
}
- local_irq_restore(flags);
-
- if (!(status & HV_HYPERCALL_RESULT_MASK))
- return;
-do_native:
- native_flush_tlb_others(cpus, info);
+ return status;
}
void hyperv_setup_mmu_ops(void)
@@ -226,11 +229,6 @@ void hyperv_setup_mmu_ops(void)
if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
return;
- if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
- pr_info("Using hypercall for remote TLB flush\n");
- pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
- } else {
- pr_info("Using ext hypercall for remote TLB flush\n");
- pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
- }
+ pr_info("Using hypercall for remote TLB flush\n");
+ pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
}
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 74a9e06b6cfd..130e81e10fc7 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -10,6 +10,7 @@
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/msr.h>
+#include <asm/hardirq.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -502,12 +503,19 @@ extern int default_check_phys_apicid_present(int phys_apicid);
#endif /* CONFIG_X86_LOCAL_APIC */
+#ifdef CONFIG_SMP
+bool apic_id_is_primary_thread(unsigned int id);
+#else
+static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
+#endif
+
extern void irq_enter(void);
extern void irq_exit(void);
static inline void entering_irq(void)
{
irq_enter();
+ kvm_set_cpu_l1tf_flush_l1d();
}
static inline void entering_ack_irq(void)
@@ -520,6 +528,7 @@ static inline void ipi_entering_ack_irq(void)
{
irq_enter();
ack_APIC_irq();
+ kvm_set_cpu_l1tf_flush_l1d();
}
static inline void exiting_irq(void)
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 0db6bec95489..b143717b92b3 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -80,6 +80,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
* true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
@@ -91,6 +92,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
*
* Atomically increments @v by 1.
*/
+#define arch_atomic_inc arch_atomic_inc
static __always_inline void arch_atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
@@ -103,6 +105,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
*
* Atomically decrements @v by 1.
*/
+#define arch_atomic_dec arch_atomic_dec
static __always_inline void arch_atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
@@ -117,6 +120,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
@@ -130,6 +134,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
@@ -144,6 +149,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
+#define arch_atomic_add_negative arch_atomic_add_negative
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
@@ -173,9 +179,6 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
return arch_atomic_add_return(-i, v);
}
-#define arch_atomic_inc_return(v) (arch_atomic_add_return(1, v))
-#define arch_atomic_dec_return(v) (arch_atomic_sub_return(1, v))
-
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
@@ -199,7 +202,7 @@ static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
- return xchg(&v->counter, new);
+ return arch_xchg(&v->counter, new);
}
static inline void arch_atomic_and(int i, atomic_t *v)
@@ -253,27 +256,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
return val;
}
-/**
- * __arch_atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns the old value of @v.
- */
-static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c = arch_atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 92212bf0484f..ef959f02d070 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -158,6 +158,7 @@ static inline long long arch_atomic64_inc_return(atomic64_t *v)
"S" (v) : "memory", "ecx");
return a;
}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
static inline long long arch_atomic64_dec_return(atomic64_t *v)
{
@@ -166,6 +167,7 @@ static inline long long arch_atomic64_dec_return(atomic64_t *v)
"S" (v) : "memory", "ecx");
return a;
}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
/**
* arch_atomic64_add - add integer to atomic64 variable
@@ -198,25 +200,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
}
/**
- * arch_atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer to type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static inline int arch_atomic64_sub_and_test(long long i, atomic64_t *v)
-{
- return arch_atomic64_sub_return(i, v) == 0;
-}
-
-/**
* arch_atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
+#define arch_atomic64_inc arch_atomic64_inc
static inline void arch_atomic64_inc(atomic64_t *v)
{
__alternative_atomic64(inc, inc_return, /* no output */,
@@ -229,6 +218,7 @@ static inline void arch_atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
+#define arch_atomic64_dec arch_atomic64_dec
static inline void arch_atomic64_dec(atomic64_t *v)
{
__alternative_atomic64(dec, dec_return, /* no output */,
@@ -236,46 +226,6 @@ static inline void arch_atomic64_dec(atomic64_t *v)
}
/**
- * arch_atomic64_dec_and_test - decrement and test
- * @v: pointer to type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static inline int arch_atomic64_dec_and_test(atomic64_t *v)
-{
- return arch_atomic64_dec_return(v) == 0;
-}
-
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer to type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static inline int arch_atomic64_inc_and_test(atomic64_t *v)
-{
- return arch_atomic64_inc_return(v) == 0;
-}
-
-/**
- * arch_atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer to type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static inline int arch_atomic64_add_negative(long long i, atomic64_t *v)
-{
- return arch_atomic64_add_return(i, v) < 0;
-}
-
-/**
* arch_atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
@@ -295,7 +245,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
return (int)a;
}
-
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
int r;
@@ -304,6 +254,7 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
return r;
}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
{
long long r;
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 6106b59d3260..4343d9b4f30e 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -71,6 +71,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
@@ -82,6 +83,7 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
*
* Atomically increments @v by 1.
*/
+#define arch_atomic64_inc arch_atomic64_inc
static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
@@ -95,6 +97,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
+#define arch_atomic64_dec arch_atomic64_dec
static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
@@ -110,6 +113,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
@@ -123,6 +127,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
@@ -137,6 +142,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
+#define arch_atomic64_add_negative arch_atomic64_add_negative
static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
@@ -169,9 +175,6 @@ static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
return xadd(&v->counter, -i);
}
-#define arch_atomic64_inc_return(v) (arch_atomic64_add_return(1, (v)))
-#define arch_atomic64_dec_return(v) (arch_atomic64_sub_return(1, (v)))
-
static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
return arch_cmpxchg(&v->counter, old, new);
@@ -185,46 +188,7 @@ static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, l
static inline long arch_atomic64_xchg(atomic64_t *v, long new)
{
- return xchg(&v->counter, new);
-}
-
-/**
- * arch_atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- s64 c = arch_atomic64_read(v);
- do {
- if (unlikely(c == u))
- return false;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
- return true;
-}
-
-#define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0)
-
-/*
- * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = arch_atomic64_read(v);
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
- return dec;
+ return arch_xchg(&v->counter, new);
}
static inline void arch_atomic64_and(long i, atomic64_t *v)
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index e3efd8a06066..a55d79b233d3 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -75,7 +75,7 @@ extern void __add_wrong_size(void)
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
* information around.
*/
-#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
+#define arch_xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index bfca3b346c74..072e5459fe2f 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -10,13 +10,13 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 5701f5cecd31..89a048c2faec 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -219,6 +219,8 @@
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
+#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -229,7 +231,7 @@
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
-
+#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
@@ -341,6 +343,7 @@
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
@@ -373,5 +376,6 @@
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h
index 0ab2ab27ad1f..b825cb201251 100644
--- a/arch/x86/include/asm/dmi.h
+++ b/arch/x86/include/asm/dmi.h
@@ -4,8 +4,8 @@
#include <linux/compiler.h>
#include <linux/init.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/setup.h>
static __always_inline __init void *dmi_alloc(unsigned len)
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 740a428acf1e..d9069bb26c7f 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -3,10 +3,12 @@
#define _ASM_X86_HARDIRQ_H
#include <linux/threads.h>
-#include <linux/irq.h>
typedef struct {
- unsigned int __softirq_pending;
+ u16 __softirq_pending;
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+ u8 kvm_cpu_l1tf_flush_l1d;
+#endif
unsigned int __nmi_count; /* arch dependent */
#ifdef CONFIG_X86_LOCAL_APIC
unsigned int apic_timer_irqs; /* arch dependent */
@@ -58,4 +60,24 @@ extern u64 arch_irq_stat_cpu(unsigned int cpu);
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+static inline void kvm_set_cpu_l1tf_flush_l1d(void)
+{
+ __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
+}
+
+static inline void kvm_clear_cpu_l1tf_flush_l1d(void)
+{
+ __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
+}
+
+static inline bool kvm_get_cpu_l1tf_flush_l1d(void)
+{
+ return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
+}
+#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
+static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
+#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
+
#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index f59c39835a5a..a1f0e90d0818 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -49,11 +49,14 @@ static inline int hw_breakpoint_slots(int type)
return HBP_NUM;
}
+struct perf_event_attr;
struct perf_event;
struct pmu;
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 5cdcdbd4d892..89789e8c80f6 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -3,6 +3,7 @@
#define _ASM_X86_I8259_H
#include <linux/delay.h>
+#include <asm/io.h>
extern unsigned int cached_irq_mask;
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index cf090e584202..7ed08a7c3398 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -76,4 +76,17 @@
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
+/* Useful macros */
+#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \
+{ \
+ .vendor = X86_VENDOR_INTEL, \
+ .family = _family, \
+ .model = _model, \
+ .feature = X86_FEATURE_ANY, \
+ .driver_data = (kernel_ulong_t)&_driver_data \
+}
+
+#define INTEL_CPU_FAM6(_model, _driver_data) \
+ INTEL_CPU_FAM_ANY(6, INTEL_FAM6_##_model, _driver_data)
+
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index fe04491130ae..52f815a80539 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -80,35 +80,6 @@ enum intel_mid_cpu_type {
extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
-/**
- * struct intel_mid_ops - Interface between intel-mid & sub archs
- * @arch_setup: arch_setup function to re-initialize platform
- * structures (x86_init, x86_platform_init)
- *
- * This structure can be extended if any new interface is required
- * between intel-mid & its sub arch files.
- */
-struct intel_mid_ops {
- void (*arch_setup)(void);
-};
-
-/* Helper API's for INTEL_MID_OPS_INIT */
-#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
- [cpuid] = get_##cpuname##_ops
-
-/* Maximum number of CPU ops */
-#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
-
-/*
- * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
- * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
- */
-#define INTEL_MID_OPS_INIT { \
- DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
- DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
- DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
-};
-
#ifdef CONFIG_X86_INTEL_MID
static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
@@ -136,20 +107,6 @@ enum intel_mid_timer_options {
extern enum intel_mid_timer_options intel_mid_timer_options;
-/*
- * Penwell uses spread spectrum clock, so the freq number is not exactly
- * the same as reported by MSR based on SDM.
- */
-#define FSB_FREQ_83SKU 83200
-#define FSB_FREQ_100SKU 99840
-#define FSB_FREQ_133SKU 133000
-
-#define FSB_FREQ_167SKU 167000
-#define FSB_FREQ_200SKU 200000
-#define FSB_FREQ_267SKU 267000
-#define FSB_FREQ_333SKU 333000
-#define FSB_FREQ_400SKU 400000
-
/* Bus Select SoC Fuse value */
#define BSEL_SOC_FUSE_MASK 0x7
/* FSB 133MHz */
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
index 62a9f4966b42..ae26df1c2789 100644
--- a/arch/x86/include/asm/intel_ds.h
+++ b/arch/x86/include/asm/intel_ds.h
@@ -8,6 +8,7 @@
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 8
+#define MAX_FIXED_PEBS_EVENTS 3
/*
* A debug store configuration.
@@ -23,7 +24,7 @@ struct debug_store {
u64 pebs_index;
u64 pebs_absolute_maximum;
u64 pebs_interrupt_threshold;
- u64 pebs_event_reset[MAX_PEBS_EVENTS];
+ u64 pebs_event_reset[MAX_PEBS_EVENTS + MAX_FIXED_PEBS_EVENTS];
} __aligned(PAGE_SIZE);
DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index c4fc17220df9..c14f2a74b2be 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -13,6 +13,8 @@
* Interrupt control:
*/
+/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
+extern inline unsigned long native_save_fl(void);
extern inline unsigned long native_save_fl(void)
{
unsigned long flags;
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 367d99cff426..c8cec1b39b88 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -78,7 +78,7 @@ struct arch_specific_insn {
* boostable = true: This instruction has been boosted: we have
* added a relative jump after the instruction copy in insn,
* so no single-step and fixup are needed (unless there's
- * a post_handler or break_handler).
+ * a post_handler).
*/
bool boostable;
bool if_modifier;
@@ -111,9 +111,6 @@ struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_old_flags;
unsigned long kprobe_saved_flags;
- unsigned long *jprobe_saved_sp;
- struct pt_regs jprobe_saved_regs;
- kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
deleted file mode 100644
index 46185263d9c2..000000000000
--- a/arch/x86/include/asm/kvm_guest.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_KVM_GUEST_H
-#define _ASM_X86_KVM_GUEST_H
-
-int kvm_setup_vsyscall_timeinfo(void);
-
-#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c13cd28d9d1b..acebb808c4b5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -17,6 +17,7 @@
#include <linux/tracepoint.h>
#include <linux/cpumask.h>
#include <linux/irq_work.h>
+#include <linux/irq.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
@@ -713,6 +714,9 @@ struct kvm_vcpu_arch {
/* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel;
+
+ /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
+ bool l1tf_flush_l1d;
};
struct kvm_lpage_info {
@@ -881,6 +885,7 @@ struct kvm_vcpu_stat {
u64 signal_exits;
u64 irq_window_exits;
u64 nmi_window_exits;
+ u64 l1d_flush;
u64 halt_exits;
u64 halt_successful_poll;
u64 halt_attempted_poll;
@@ -1413,6 +1418,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
+u64 kvm_get_arch_capabilities(void);
void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 3aea2658323a..4c723632c036 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -7,7 +7,6 @@
#include <uapi/asm/kvm_para.h>
extern void kvmclock_init(void);
-extern int kvm_register_clock(char *txt);
#ifdef CONFIG_KVM_GUEST
bool kvm_check_and_clear_guest_paused(void);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index bbc796eb0a3b..eeeb9289c764 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -71,12 +71,7 @@ struct ldt_struct {
static inline void *ldt_slot_va(int slot)
{
-#ifdef CONFIG_X86_64
return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
-#else
- BUG();
- return (void *)fix_to_virt(FIX_HOLE);
-#endif
}
/*
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 5a7375ed5f7c..19886fef1dfc 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -194,6 +194,40 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
return hv_status;
}
+/* Fast hypercall with 16 bytes of input */
+static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
+{
+ u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
+
+#ifdef CONFIG_X86_64
+ {
+ __asm__ __volatile__("mov %4, %%r8\n"
+ CALL_NOSPEC
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+ "+c" (control), "+d" (input1)
+ : "r" (input2),
+ THUNK_TARGET(hv_hypercall_pg)
+ : "cc", "r8", "r9", "r10", "r11");
+ }
+#else
+ {
+ u32 input1_hi = upper_32_bits(input1);
+ u32 input1_lo = lower_32_bits(input1);
+ u32 input2_hi = upper_32_bits(input2);
+ u32 input2_lo = lower_32_bits(input2);
+
+ __asm__ __volatile__ (CALL_NOSPEC
+ : "=A"(hv_status),
+ "+c"(input1_lo), ASM_CALL_CONSTRAINT
+ : "A" (control), "b" (input1_hi),
+ "D"(input2_hi), "S"(input2_lo),
+ THUNK_TARGET(hv_hypercall_pg)
+ : "cc");
+ }
+#endif
+ return hv_status;
+}
+
/*
* Rep hypercalls. Callers of this functions are supposed to ensure that
* rep_count and varhead_size comply with Hyper-V hypercall definition.
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 68b2c3150de1..4731f0cf97c5 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -70,12 +70,19 @@
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
#define ARCH_CAP_SSB_NO (1 << 4) /*
* Not susceptible to Speculative Store Bypass
* attack, so no Speculative Store Bypass
* control required.
*/
+#define MSR_IA32_FLUSH_CMD 0x0000010b
+#define L1D_FLUSH (1 << 0) /*
+ * Writeback and invalidate the
+ * L1 data cache.
+ */
+
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index f6f6c63da62f..fd2a8c1b88bc 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -214,7 +214,7 @@ enum spectre_v2_mitigation {
SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
SPECTRE_V2_RETPOLINE_GENERIC,
SPECTRE_V2_RETPOLINE_AMD,
- SPECTRE_V2_IBRS,
+ SPECTRE_V2_IBRS_ENHANCED,
};
/* The Speculative Store Bypass disable variants */
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
index 9c9dc579bd7d..46f516dd80ce 100644
--- a/arch/x86/include/asm/orc_types.h
+++ b/arch/x86/include/asm/orc_types.h
@@ -88,6 +88,7 @@ struct orc_entry {
unsigned sp_reg:4;
unsigned bp_reg:4;
unsigned type:2;
+ unsigned end:1;
} __packed;
/*
@@ -101,6 +102,7 @@ struct unwind_hint {
s16 sp_offset;
u8 sp_reg;
u8 type;
+ u8 end;
};
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index aa30c3241ea7..0d5c739eebd7 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -29,8 +29,13 @@
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
-/* 44=32+12, the limit we can fit into an unsigned long pfn */
-#define __PHYSICAL_MASK_SHIFT 44
+/*
+ * This is beyond the 44 bit limit imposed by the 32bit long pfns,
+ * but we need the full mask to make sure inverted PROT_NONE
+ * entries have all the host bits set in a guest.
+ * The real limit is still 44 bits.
+ */
+#define __PHYSICAL_MASK_SHIFT 52
#define __VIRTUAL_MASK_SHIFT 32
#else /* !CONFIG_X86_PAE */
diff --git a/arch/x86/include/asm/pci-direct.h b/arch/x86/include/asm/pci-direct.h
index e1084f71a295..94597a3cf3d0 100644
--- a/arch/x86/include/asm/pci-direct.h
+++ b/arch/x86/include/asm/pci-direct.h
@@ -15,8 +15,4 @@ extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
extern void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val);
extern int early_pci_allowed(void);
-
-extern unsigned int pci_early_dump_regs;
-extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
-extern void early_dump_pci_devices(void);
#endif /* _ASM_X86_PCI_DIRECT_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index a06b07399d17..e9202a0de8f0 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -450,9 +450,10 @@ do { \
bool __ret; \
typeof(pcp1) __o1 = (o1), __n1 = (n1); \
typeof(pcp2) __o2 = (o2), __n2 = (n2); \
- asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
- : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
- : "b" (__n1), "c" (__n2), "a" (__o1)); \
+ asm volatile("cmpxchg8b "__percpu_arg(1) \
+ CC_SET(z) \
+ : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \
+ : "b" (__n1), "c" (__n2)); \
__ret; \
})
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 685ffe8a0eaf..24c6cf5f16b7 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -19,6 +19,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
+#endif
*pmdp = pmd;
}
@@ -58,6 +61,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#ifdef CONFIG_SMP
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
+#endif
return __pmd(xchg((pmdval_t *)xp, 0));
}
#else
@@ -67,6 +73,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#ifdef CONFIG_SMP
static inline pud_t native_pudp_get_and_clear(pud_t *xp)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
+#endif
return __pud(xchg((pudval_t *)xp, 0));
}
#else
@@ -95,4 +104,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+/* No inverted PFNs on 2 level page tables */
+
+static inline u64 protnone_mask(u64 val)
+{
+ return 0;
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
+{
+ return val;
+}
+
+static inline bool __pte_needs_invert(u64 val)
+{
+ return false;
+}
+
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index f982ef808e7e..6deb6cd236e3 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -35,4 +35,7 @@ typedef union {
#define PTRS_PER_PTE 1024
+/* This covers all VMSPLIT_* and VMSPLIT_*_OPT variants */
+#define PGD_KERNEL_START (CONFIG_PAGE_OFFSET >> PGDIR_SHIFT)
+
#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index f24df59c40b2..a564084c6141 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -98,6 +98,9 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
+#endif
set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
}
@@ -229,6 +232,10 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
{
union split_pud res, *orig = (union split_pud *)pudp;
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
+#endif
+
/* xchg acts as a barrier before setting of the high bits */
res.pud_low = xchg(&orig->pud_low, 0);
res.pud_high = orig->pud_high;
@@ -241,12 +248,43 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
#endif
/* Encode and de-code a swap entry */
+#define SWP_TYPE_BITS 5
+
+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
+
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
+
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
#define __swp_type(x) (((x).val) & 0x1f)
#define __swp_offset(x) ((x).val >> 5)
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
-#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
-#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
+
+/*
+ * Normally, __swp_entry() converts from arch-independent swp_entry_t to
+ * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
+ * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
+ * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
+ * __swp_entry_to_pte() through the following helper macro based on 64bit
+ * __swp_entry().
+ */
+#define __swp_pteval_entry(type, offset) ((pteval_t) { \
+ (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+ | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
+
+#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
+ __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
+/*
+ * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
+ * swp_entry_t, but also has to convert it from 64bit to the 32bit
+ * intermediate representation, using the following macros based on 64bit
+ * __swp_type() and __swp_offset().
+ */
+#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
+#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
+
+#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
+ __pteval_swp_offset(pte)))
#define gup_get_pte gup_get_pte
/*
@@ -295,4 +333,6 @@ static inline pte_t gup_get_pte(pte_t *ptep)
return pte;
}
+#include <asm/pgtable-invert.h>
+
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 6a59a6d0cc50..858358a82b14 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -21,9 +21,10 @@ typedef union {
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_PARAVIRT
-#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
+#define SHARED_KERNEL_PMD ((!static_cpu_has(X86_FEATURE_PTI) && \
+ (pv_info.shared_kernel_pmd)))
#else
-#define SHARED_KERNEL_PMD 1
+#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
#endif
/*
@@ -45,5 +46,6 @@ typedef union {
#define PTRS_PER_PTE 512
#define MAX_POSSIBLE_PHYSMEM_BITS 36
+#define PGD_KERNEL_START (CONFIG_PAGE_OFFSET >> PGDIR_SHIFT)
#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h
new file mode 100644
index 000000000000..44b1203ece12
--- /dev/null
+++ b/arch/x86/include/asm/pgtable-invert.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PGTABLE_INVERT_H
+#define _ASM_PGTABLE_INVERT_H 1
+
+#ifndef __ASSEMBLY__
+
+static inline bool __pte_needs_invert(u64 val)
+{
+ return !(val & _PAGE_PRESENT);
+}
+
+/* Get a mask to xor with the page table entry to get the correct pfn. */
+static inline u64 protnone_mask(u64 val)
+{
+ return __pte_needs_invert(val) ? ~0ull : 0;
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
+{
+ /*
+ * When a PTE transitions from NONE to !NONE or vice-versa
+ * invert the PFN part to stop speculation.
+ * pte_pfn undoes this when needed.
+ */
+ if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
+ val = (val & ~mask) | (~val & mask);
+ return val;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5715647fc4fe..e4ffa565a69f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -30,11 +30,14 @@ int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
void ptdump_walk_pgd_level_checkwx(void);
+void ptdump_walk_user_pgd_level_checkwx(void);
#ifdef CONFIG_DEBUG_WX
-#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
+#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
+#define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx()
#else
-#define debug_checkwx() do { } while (0)
+#define debug_checkwx() do { } while (0)
+#define debug_checkwx_user() do { } while (0)
#endif
/*
@@ -185,19 +188,29 @@ static inline int pte_special(pte_t pte)
return pte_flags(pte) & _PAGE_SPECIAL;
}
+/* Entries that were set to PROT_NONE are inverted */
+
+static inline u64 protnone_mask(u64 val);
+
static inline unsigned long pte_pfn(pte_t pte)
{
- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ phys_addr_t pfn = pte_val(pte);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
- return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
+ phys_addr_t pfn = pmd_val(pmd);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
}
static inline unsigned long pud_pfn(pud_t pud)
{
- return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ phys_addr_t pfn = pud_val(pud);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
}
static inline unsigned long p4d_pfn(p4d_t p4d)
@@ -400,11 +413,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_RW);
}
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
- return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
-}
-
static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
{
pudval_t v = native_pud_val(pud);
@@ -459,11 +467,6 @@ static inline pud_t pud_mkwrite(pud_t pud)
return pud_set_flags(pud, _PAGE_RW);
}
-static inline pud_t pud_mknotpresent(pud_t pud)
-{
- return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
-}
-
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline int pte_soft_dirty(pte_t pte)
{
@@ -545,25 +548,45 @@ static inline pgprotval_t check_pgprot(pgprot_t pgprot)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
- check_pgprot(pgprot));
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PTE_PFN_MASK;
+ return __pte(pfn | check_pgprot(pgprot));
}
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
- return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
- check_pgprot(pgprot));
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PHYSICAL_PMD_PAGE_MASK;
+ return __pmd(pfn | check_pgprot(pgprot));
}
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
{
- return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
- check_pgprot(pgprot));
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PHYSICAL_PUD_PAGE_MASK;
+ return __pud(pfn | check_pgprot(pgprot));
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+ return pfn_pmd(pmd_pfn(pmd),
+ __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
+}
+
+static inline pud_t pud_mknotpresent(pud_t pud)
+{
+ return pfn_pud(pud_pfn(pud),
+ __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
}
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
+
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- pteval_t val = pte_val(pte);
+ pteval_t val = pte_val(pte), oldval = val;
/*
* Chop off the NX bit (if present), and add the NX portion of
@@ -571,17 +594,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
val &= _PAGE_CHG_MASK;
val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
-
+ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
return __pte(val);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- pmdval_t val = pmd_val(pmd);
+ pmdval_t val = pmd_val(pmd), oldval = val;
val &= _HPAGE_CHG_MASK;
val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
-
+ val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
return __pmd(val);
}
@@ -640,8 +663,31 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
+
+/*
+ * Take a PGD location (pgdp) and a pgd value that needs to be set there.
+ * Populates the user and returns the resulting PGD that must be set in
+ * the kernel copy of the page tables.
+ */
+static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return pgd;
+ return __pti_set_user_pgtbl(pgdp, pgd);
+}
+#else /* CONFIG_PAGE_TABLE_ISOLATION */
+static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
+{
+ return pgd;
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
#endif /* __ASSEMBLY__ */
+
#ifdef CONFIG_X86_32
# include <asm/pgtable_32.h>
#else
@@ -1154,6 +1200,70 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
}
}
#endif
+/*
+ * Page table pages are page-aligned. The lower half of the top
+ * level is used for userspace and the top half for the kernel.
+ *
+ * Returns true for parts of the PGD that map userspace and
+ * false for the parts that map the kernel.
+ */
+static inline bool pgdp_maps_userspace(void *__ptr)
+{
+ unsigned long ptr = (unsigned long)__ptr;
+
+ return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
+}
+
+static inline int pgd_large(pgd_t pgd) { return 0; }
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
+ * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
+ * the user one is in the last 4k. To switch between them, you
+ * just need to flip the 12th bit in their addresses.
+ */
+#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
+
+/*
+ * This generates better code than the inline assembly in
+ * __set_bit().
+ */
+static inline void *ptr_set_bit(void *ptr, int bit)
+{
+ unsigned long __ptr = (unsigned long)ptr;
+
+ __ptr |= BIT(bit);
+ return (void *)__ptr;
+}
+static inline void *ptr_clear_bit(void *ptr, int bit)
+{
+ unsigned long __ptr = (unsigned long)ptr;
+
+ __ptr &= ~BIT(bit);
+ return (void *)__ptr;
+}
+
+static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
+{
+ return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
+{
+ return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
+{
+ return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
+{
+ return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
@@ -1320,6 +1430,14 @@ static inline bool pud_access_permitted(pud_t pud, bool write)
return __pte_access_permitted(pud_val(pud), write);
}
+#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
+extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return boot_cpu_has_bug(X86_BUG_L1TF);
+}
+
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 88a056b01db4..b3ec519e3982 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -34,8 +34,6 @@ static inline void check_pgt_cache(void) { }
void paging_init(void);
void sync_initial_page_table(void);
-static inline int pgd_large(pgd_t pgd) { return 0; }
-
/*
* Define this if things work differently on an i386 and an i486:
* it will (on an i486) warn about kernel memory accesses that are
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
index d9a001a4a872..b0bc0fff5f1f 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -50,13 +50,18 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \
& PMD_MASK)
-#define PKMAP_BASE \
+#define LDT_BASE_ADDR \
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
+#define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE)
+
+#define PKMAP_BASE \
+ ((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
+
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
-# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
+# define VMALLOC_END (LDT_BASE_ADDR - 2 * PAGE_SIZE)
#endif
#define MODULES_VADDR VMALLOC_START
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 3c5385f9a88f..f773d5e6c8cc 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -132,90 +132,6 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp)
#endif
}
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-/*
- * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
- * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
- * the user one is in the last 4k. To switch between them, you
- * just need to flip the 12th bit in their addresses.
- */
-#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
-
-/*
- * This generates better code than the inline assembly in
- * __set_bit().
- */
-static inline void *ptr_set_bit(void *ptr, int bit)
-{
- unsigned long __ptr = (unsigned long)ptr;
-
- __ptr |= BIT(bit);
- return (void *)__ptr;
-}
-static inline void *ptr_clear_bit(void *ptr, int bit)
-{
- unsigned long __ptr = (unsigned long)ptr;
-
- __ptr &= ~BIT(bit);
- return (void *)__ptr;
-}
-
-static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
-{
- return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
-{
- return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
-{
- return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
-}
-
-static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
-{
- return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
-}
-#endif /* CONFIG_PAGE_TABLE_ISOLATION */
-
-/*
- * Page table pages are page-aligned. The lower half of the top
- * level is used for userspace and the top half for the kernel.
- *
- * Returns true for parts of the PGD that map userspace and
- * false for the parts that map the kernel.
- */
-static inline bool pgdp_maps_userspace(void *__ptr)
-{
- unsigned long ptr = (unsigned long)__ptr;
-
- return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
-}
-
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
-
-/*
- * Take a PGD location (pgdp) and a pgd value that needs to be set there.
- * Populates the user and returns the resulting PGD that must be set in
- * the kernel copy of the page tables.
- */
-static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
-{
- if (!static_cpu_has(X86_FEATURE_PTI))
- return pgd;
- return __pti_set_user_pgd(pgdp, pgd);
-}
-#else
-static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
-{
- return pgd;
-}
-#endif
-
static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
{
pgd_t pgd;
@@ -226,7 +142,7 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
}
pgd = native_make_pgd(native_p4d_val(p4d));
- pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd);
+ pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
*p4dp = native_make_p4d(native_pgd_val(pgd));
}
@@ -237,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
- *pgdp = pti_set_user_pgd(pgdp, pgd);
+ *pgdp = pti_set_user_pgtbl(pgdp, pgd);
}
static inline void native_pgd_clear(pgd_t *pgd)
@@ -255,7 +171,6 @@ extern void sync_global_pgds(unsigned long start, unsigned long end);
/*
* Level 4 access.
*/
-static inline int pgd_large(pgd_t pgd) { return 0; }
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
/* PUD - Level3 access */
@@ -273,7 +188,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
*
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
- * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry
+ * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
*
* G (8) is aliased and used as a PROT_NONE indicator for
* !present ptes. We need to start storing swap entries above
@@ -286,20 +201,34 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
*
* Bit 7 in swp entry should be 0 because pmd_present checks not only P,
* but also L and G.
+ *
+ * The offset is inverted by a binary not operation to make the high
+ * physical bits set.
*/
-#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
-#define SWP_TYPE_BITS 5
-/* Place the offset above the type: */
-#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
+#define SWP_TYPE_BITS 5
+
+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
+
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
-#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
- & ((1U << SWP_TYPE_BITS) - 1))
-#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
-#define __swp_entry(type, offset) ((swp_entry_t) { \
- ((type) << (SWP_TYPE_FIRST_BIT)) \
- | ((offset) << SWP_OFFSET_FIRST_BIT) })
+/* Extract the high bits for type */
+#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
+
+/* Shift up (to get rid of type), then down to get value */
+#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
+
+/*
+ * Shift the offset up "too far" by TYPE bits, then down again
+ * The offset is inverted by a binary not operation to make the high
+ * physical bits set.
+ */
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
+
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
@@ -343,5 +272,7 @@ static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
return true;
}
+#include <asm/pgtable-invert.h>
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 054765ab2da2..04edd2d58211 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -115,6 +115,7 @@ extern unsigned int ptrs_per_p4d;
#define LDT_PGD_ENTRY_L5 -112UL
#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
+#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
#define __VMALLOC_BASE_L4 0xffffc90000000000UL
#define __VMALLOC_BASE_L5 0xffa0000000000000UL
@@ -153,4 +154,6 @@ extern unsigned int ptrs_per_p4d;
#define EARLY_DYNAMIC_PAGE_TABLES 64
+#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
+
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 99fff853c944..b64acb08a62b 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -50,6 +50,7 @@
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
+#define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
@@ -266,14 +267,37 @@ typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
typedef struct { pgdval_t pgd; } pgd_t;
+#ifdef CONFIG_X86_PAE
+
+/*
+ * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
+ * use it here.
+ */
+
+#define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK)
+#define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
+
+/*
+ * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
+ * All other bits are Reserved MBZ
+ */
+#define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
+ _PAGE_PWT | _PAGE_PCD | \
+ _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
+
+#else
+/* No need to mask any bits for !PAE */
+#define PGD_ALLOWED_BITS (~0ULL)
+#endif
+
static inline pgd_t native_make_pgd(pgdval_t val)
{
- return (pgd_t) { val };
+ return (pgd_t) { val & PGD_ALLOWED_BITS };
}
static inline pgdval_t native_pgd_val(pgd_t pgd)
{
- return pgd.pgd;
+ return pgd.pgd & PGD_ALLOWED_BITS;
}
static inline pgdval_t pgd_flags(pgd_t pgd)
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 625a52a5594f..02c2cbda4a74 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -39,10 +39,6 @@
#define CR3_PCID_MASK 0xFFFull
#define CR3_NOFLUSH BIT_ULL(63)
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-# define X86_CR3_PTI_PCID_USER_BIT 11
-#endif
-
#else
/*
* CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
@@ -53,4 +49,8 @@
#define CR3_NOFLUSH 0
#endif
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define X86_CR3_PTI_PCID_USER_BIT 11
+#endif
+
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cfd29ee8c3da..682286aca881 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -181,6 +181,11 @@ extern const struct seq_operations cpuinfo_op;
extern void cpu_detect(struct cpuinfo_x86 *c);
+static inline unsigned long l1tf_pfn_limit(void)
+{
+ return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
+}
+
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
@@ -966,6 +971,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+extern void free_kernel_image_pages(void *begin, void *end);
void default_idle(void);
#ifdef CONFIG_XEN
@@ -977,4 +983,16 @@ bool xen_set_default_idle(void);
void stop_this_cpu(void *dummy);
void df_debug(struct pt_regs *regs, long error_code);
void microcode_check(void);
+
+enum l1tf_mitigations {
+ L1TF_MITIGATION_OFF,
+ L1TF_MITIGATION_FLUSH_NOWARN,
+ L1TF_MITIGATION_FLUSH,
+ L1TF_MITIGATION_FLUSH_NOSMT,
+ L1TF_MITIGATION_FULL,
+ L1TF_MITIGATION_FULL_FORCE
+};
+
+extern enum l1tf_mitigations l1tf_mitigation;
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
index 38a17f1d5c9d..5df09a0b80b8 100644
--- a/arch/x86/include/asm/pti.h
+++ b/arch/x86/include/asm/pti.h
@@ -6,10 +6,9 @@
#ifdef CONFIG_PAGE_TABLE_ISOLATION
extern void pti_init(void);
extern void pti_check_boottime_disable(void);
-extern void pti_clone_kernel_text(void);
+extern void pti_finalize(void);
#else
static inline void pti_check_boottime_disable(void) { }
-static inline void pti_clone_kernel_text(void) { }
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index 4cf11d88d3b3..19b90521954c 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -5,6 +5,7 @@
* PaX/grsecurity.
*/
#include <linux/refcount.h>
+#include <asm/bug.h>
/*
* This is the first portion of the refcount error handling, which lives in
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 5c019d23d06b..4a911a382ade 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -7,6 +7,7 @@
extern char __brk_base[], __brk_limit[];
extern struct exception_table_entry __stop___ex_table[];
+extern char __end_rodata_aligned[];
#if defined(CONFIG_X86_64)
extern char __end_rodata_hpage_align[];
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index bd090367236c..34cffcef7375 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -46,6 +46,7 @@ int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
+int set_memory_np_noalias(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index eb5f7999a893..36bd243843d6 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -87,15 +87,25 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
#endif
/* This is used when switching tasks or entering/exiting vm86 mode. */
-static inline void update_sp0(struct task_struct *task)
+static inline void update_task_stack(struct task_struct *task)
{
- /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
+ /* sp0 always points to the entry trampoline stack, which is constant: */
#ifdef CONFIG_X86_32
- load_sp0(task->thread.sp0);
+ if (static_cpu_has(X86_FEATURE_XENPV))
+ load_sp0(task->thread.sp0);
+ else
+ this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
#else
+ /*
+ * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
+ * doesn't work on x86-32 because sp1 and
+ * cpu_current_top_of_stack have different values (because of
+ * the non-zero stack-padding on 32bit).
+ */
if (static_cpu_has(X86_FEATURE_XENPV))
load_sp0(task_top_of_stack(task));
#endif
+
}
#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 2ecd34e2d46c..e85ff65c43c3 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -37,5 +37,6 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+extern int after_bootmem;
#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6690cd3fc8b1..511bf5fae8b8 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -148,22 +148,6 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
#endif
-static inline bool tlb_defer_switch_to_init_mm(void)
-{
- /*
- * If we have PCID, then switching to init_mm is reasonably
- * fast. If we don't have PCID, then switching to init_mm is
- * quite slow, so we try to defer it in the hopes that we can
- * avoid it entirely. The latter approach runs the risk of
- * receiving otherwise unnecessary IPIs.
- *
- * This choice is just a heuristic. The tlb code can handle this
- * function returning true or false regardless of whether we have
- * PCID.
- */
- return !static_cpu_has(X86_FEATURE_PCID);
-}
-
struct tlb_context {
u64 ctx_id;
u64 tlb_gen;
@@ -554,4 +538,9 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
native_flush_tlb_others(mask, info)
#endif
+extern void tlb_flush_remove_tables(struct mm_struct *mm);
+extern void tlb_flush_remove_tables_local(void *arg);
+
+#define HAVE_TLB_FLUSH_REMOVE_TABLES
+
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index c1d2a9892352..453cf38a1c33 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -123,13 +123,17 @@ static inline int topology_max_smt_threads(void)
}
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
-extern int topology_phys_to_logical_pkg(unsigned int pkg);
+int topology_phys_to_logical_pkg(unsigned int pkg);
+bool topology_is_primary_thread(unsigned int cpu);
+bool topology_smt_supported(void);
#else
#define topology_max_packages() (1)
static inline int
topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
static inline int topology_max_smt_threads(void) { return 1; }
+static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
+static inline bool topology_smt_supported(void) { return false; }
#endif
static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h
index 4253bca99989..9c0d4b588e3f 100644
--- a/arch/x86/include/asm/trace/hyperv.h
+++ b/arch/x86/include/asm/trace/hyperv.h
@@ -28,6 +28,21 @@ TRACE_EVENT(hyperv_mmu_flush_tlb_others,
__entry->addr, __entry->end)
);
+TRACE_EVENT(hyperv_send_ipi_mask,
+ TP_PROTO(const struct cpumask *cpus,
+ int vector),
+ TP_ARGS(cpus, vector),
+ TP_STRUCT__entry(
+ __field(unsigned int, ncpus)
+ __field(int, vector)
+ ),
+ TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
+ __entry->vector = vector;
+ ),
+ TP_printk("ncpus %d vector %x",
+ __entry->ncpus, __entry->vector)
+ );
+
#endif /* CONFIG_HYPERV */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 2701d221583a..eb5bbfeccb66 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -33,13 +33,13 @@ static inline cycles_t get_cycles(void)
extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
-extern void tsc_early_delay_calibrate(void);
+extern void tsc_early_init(void);
extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
-extern unsigned long native_calibrate_cpu(void);
+extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index bae46fc6b9de..0bcdb1279361 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -26,7 +26,7 @@
* the debuginfo as necessary. It will also warn if it sees any
* inconsistencies.
*/
-.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL
+.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL end=0
#ifdef CONFIG_STACK_VALIDATION
.Lunwind_hint_ip_\@:
.pushsection .discard.unwind_hints
@@ -35,12 +35,14 @@
.short \sp_offset
.byte \sp_reg
.byte \type
+ .byte \end
+ .balign 4
.popsection
#endif
.endm
.macro UNWIND_HINT_EMPTY
- UNWIND_HINT sp_reg=ORC_REG_UNDEFINED
+ UNWIND_HINT sp_reg=ORC_REG_UNDEFINED end=1
.endm
.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
@@ -86,19 +88,21 @@
#else /* !__ASSEMBLY__ */
-#define UNWIND_HINT(sp_reg, sp_offset, type) \
+#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
"987: \n\t" \
".pushsection .discard.unwind_hints\n\t" \
/* struct unwind_hint */ \
".long 987b - .\n\t" \
- ".short " __stringify(sp_offset) "\n\t" \
+ ".short " __stringify(sp_offset) "\n\t" \
".byte " __stringify(sp_reg) "\n\t" \
".byte " __stringify(type) "\n\t" \
+ ".byte " __stringify(end) "\n\t" \
+ ".balign 4 \n\t" \
".popsection\n\t"
-#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE)
+#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE, 0)
-#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE)
+#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE, 0)
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 6aa8499e1f62..95f9107449bf 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -576,4 +576,15 @@ enum vm_instruction_error_number {
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
};
+enum vmx_l1d_flush_state {
+ VMENTER_L1D_FLUSH_AUTO,
+ VMENTER_L1D_FLUSH_NEVER,
+ VMENTER_L1D_FLUSH_COND,
+ VMENTER_L1D_FLUSH_ALWAYS,
+ VMENTER_L1D_FLUSH_EPT_DISABLED,
+ VMENTER_L1D_FLUSH_NOT_REQUIRED,
+};
+
+extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+
#endif
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index bfd882617613..6b2f90a0b149 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -209,24 +209,37 @@ extern struct { char _entry[32]; } hypercall_page[];
})
static inline long
-privcmd_call(unsigned call,
- unsigned long a1, unsigned long a2,
- unsigned long a3, unsigned long a4,
- unsigned long a5)
+xen_single_call(unsigned int call,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4,
+ unsigned long a5)
{
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
- stac();
asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM
: [thunk_target] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5);
- clac();
return (long)__res;
}
+static inline long
+privcmd_call(unsigned int call,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4,
+ unsigned long a5)
+{
+ long res;
+
+ stac();
+ res = xen_single_call(call, a1, a2, a3, a4, a5);
+ clac();
+
+ return res;
+}
+
static inline int
HYPERVISOR_set_trap_table(struct trap_info *table)
{
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a481763a3776..014f214da581 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -668,6 +668,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
local_irq_save(flags);
memcpy(addr, opcode, len);
local_irq_restore(flags);
+ sync_core();
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
return addr;
@@ -693,6 +694,12 @@ void *text_poke(void *addr, const void *opcode, size_t len)
struct page *pages[2];
int i;
+ /*
+ * While boot memory allocator is runnig we cannot use struct
+ * pages as they are not yet initialized.
+ */
+ BUG_ON(!after_bootmem);
+
if (!core_kernel_text((unsigned long)addr)) {
pages[0] = vmalloc_to_page(addr);
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index adbda5847b14..84132eddb5a8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -56,6 +56,7 @@
#include <asm/hypervisor.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/irq_regs.h>
unsigned int num_processors;
@@ -940,7 +941,7 @@ static int __init calibrate_APIC_clock(void)
if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
pr_warning("APIC timer disabled due to verification failure\n");
- return -1;
+ return -1;
}
return 0;
@@ -2192,6 +2193,23 @@ static int cpuid_to_apicid[] = {
[0 ... NR_CPUS - 1] = -1,
};
+#ifdef CONFIG_SMP
+/**
+ * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
+ * @id: APIC ID to check
+ */
+bool apic_id_is_primary_thread(unsigned int apicid)
+{
+ u32 mask;
+
+ if (smp_num_siblings == 1)
+ return true;
+ /* Isolate the SMT bit(s) in the APICID and check for 0 */
+ mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
+ return !(apicid & mask);
+}
+#endif
+
/*
* Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
* and cpuid_to_apicid[] synchronized.
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3982f79d2377..ff0d14cd9e82 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -33,6 +33,7 @@
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index ce503c99f5c4..72a94401f9e0 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -12,6 +12,7 @@
*/
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/hpet.h>
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 35aaee4fc028..9f148e3d45b4 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/compiler.h>
@@ -218,7 +219,8 @@ static int reserve_irq_vector(struct irq_data *irqd)
return 0;
}
-static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
+static int
+assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
{
struct apic_chip_data *apicd = apic_chip_data(irqd);
bool resvd = apicd->has_reserved;
@@ -245,22 +247,12 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
return -EBUSY;
vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
- if (vector > 0)
- apic_update_vector(irqd, vector, cpu);
trace_vector_alloc(irqd->irq, vector, resvd, vector);
- return vector;
-}
-
-static int assign_vector_locked(struct irq_data *irqd,
- const struct cpumask *dest)
-{
- struct apic_chip_data *apicd = apic_chip_data(irqd);
- int vector = allocate_vector(irqd, dest);
-
if (vector < 0)
return vector;
+ apic_update_vector(irqd, vector, cpu);
+ apic_update_irq_cfg(irqd, vector, cpu);
- apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
return 0;
}
@@ -433,7 +425,7 @@ static int activate_managed(struct irq_data *irqd)
pr_err("Managed startup irq %u, no vector available\n",
irqd->irq);
}
- return ret;
+ return ret;
}
static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d492752f79e1..391f358ebb4c 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -394,10 +394,10 @@ extern int uv_hub_info_version(void)
EXPORT_SYMBOL(uv_hub_info_version);
/* Default UV memory block size is 2GB */
-static unsigned long mem_block_size = (2UL << 30);
+static unsigned long mem_block_size __initdata = (2UL << 30);
/* Kernel parameter to specify UV mem block size */
-static int parse_mem_block_size(char *ptr)
+static int __init parse_mem_block_size(char *ptr)
{
unsigned long size = memparse(ptr, NULL);
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index dcb008c320fe..01de31db300d 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -103,4 +103,9 @@ void common(void) {
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
+ DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));
+
+ /* Offset for sp0 and sp1 into the tss_struct */
+ OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+ OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
}
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index a4a3be399f4b..82826f2275cc 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -46,8 +46,14 @@ void foo(void)
OFFSET(saved_context_gdt_desc, saved_context, gdt_desc);
BLANK();
- /* Offset from the sysenter stack to tss.sp0 */
- DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+ /*
+ * Offset from the entry stack to task stack stored in TSS. Kernel entry
+ * happens on the per-cpu entry-stack, and the asm code switches to the
+ * task-stack pointer stored in x86_tss.sp1, which is a copy of
+ * task->thread.sp0 where entry code can find it.
+ */
+ DEFINE(TSS_entry2task_stack,
+ offsetof(struct cpu_entry_area, tss.x86_tss.sp1) -
offsetofend(struct cpu_entry_area, entry_stack_page.stack));
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index b2dcd161f514..3b9405e7ba2b 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -65,8 +65,6 @@ int main(void)
#undef ENTRY
OFFSET(TSS_ist, tss_struct, x86_tss.ist);
- OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
- OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
BLANK();
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 7a40196967cb..347137e80bf5 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -35,7 +35,9 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
-obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o intel_rdt_ctrlmondata.o
+obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o
+obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o
+CFLAGS_intel_rdt_pseudo_lock.o = -I$(src)
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 38915fbfae73..22ab408177b2 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -232,8 +232,6 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
}
}
- set_cpu_cap(c, X86_FEATURE_K7);
-
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
return;
@@ -315,6 +313,13 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
c->cpu_core_id %= cus_per_node;
}
+
+static void amd_get_topology_early(struct cpuinfo_x86 *c)
+{
+ if (cpu_has(c, X86_FEATURE_TOPOEXT))
+ smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+}
+
/*
* Fixup core topology information for
* (1) AMD multi-node processors
@@ -334,7 +339,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
node_id = ecx & 0xff;
- smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
if (c->x86 == 0x15)
c->cu_id = ebx & 0xff;
@@ -613,10 +617,19 @@ clear_sev:
static void early_init_amd(struct cpuinfo_x86 *c)
{
+ u64 value;
u32 dummy;
early_init_amd_mc(c);
+#ifdef CONFIG_X86_32
+ if (c->x86 == 6)
+ set_cpu_cap(c, X86_FEATURE_K7);
+#endif
+
+ if (c->x86 >= 0xf)
+ set_cpu_cap(c, X86_FEATURE_K8);
+
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
/*
@@ -683,6 +696,22 @@ static void early_init_amd(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_E400);
early_detect_mem_encrypt(c);
+
+ /* Re-enable TopologyExtensions if switched off by BIOS */
+ if (c->x86 == 0x15 &&
+ (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
+ !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+
+ if (msr_set_bit(0xc0011005, 54) > 0) {
+ rdmsrl(0xc0011005, value);
+ if (value & BIT_64(54)) {
+ set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+ }
+ }
+ }
+
+ amd_get_topology_early(c);
}
static void init_amd_k8(struct cpuinfo_x86 *c)
@@ -774,19 +803,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
{
u64 value;
- /* re-enable TopologyExtensions if switched off by BIOS */
- if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
-
- if (msr_set_bit(0xc0011005, 54) > 0) {
- rdmsrl(0xc0011005, value);
- if (value & BIT_64(54)) {
- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
- pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
- }
- }
- }
-
/*
* The way access filter has a performance penalty on some workloads.
* Disable it on the affected CPUs.
@@ -850,22 +866,12 @@ static void init_amd(struct cpuinfo_x86 *c)
cpu_detect_cache_sizes(c);
- /* Multi core CPU? */
- if (c->extended_cpuid_level >= 0x80000008) {
- amd_detect_cmp(c);
- amd_get_topology(c);
- srat_detect_node(c);
- }
-
-#ifdef CONFIG_X86_32
- detect_ht(c);
-#endif
+ amd_detect_cmp(c);
+ amd_get_topology(c);
+ srat_detect_node(c);
init_amd_cacheinfo(c);
- if (c->x86 >= 0xf)
- set_cpu_cap(c, X86_FEATURE_K8);
-
if (cpu_has(c, X86_FEATURE_XMM2)) {
unsigned long long val;
int ret;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 5c0ea39311fe..cb4a16292aa7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -22,15 +22,18 @@
#include <asm/processor-flags.h>
#include <asm/fpu/internal.h>
#include <asm/msr.h>
+#include <asm/vmx.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/intel-family.h>
+#include <asm/e820/api.h>
#include <asm/hypervisor.h>
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
+static void __init l1tf_select_mitigation(void);
/*
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@@ -56,6 +59,12 @@ void __init check_bugs(void)
{
identify_boot_cpu();
+ /*
+ * identify_boot_cpu() initialized SMT support information, let the
+ * core code know.
+ */
+ cpu_smt_check_topology_early();
+
if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: ");
print_cpu_info(&boot_cpu_data);
@@ -82,6 +91,8 @@ void __init check_bugs(void)
*/
ssb_select_mitigation();
+ l1tf_select_mitigation();
+
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@@ -130,6 +141,7 @@ static const char *spectre_v2_strings[] = {
[SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
[SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
[SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
+ [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
};
#undef pr_fmt
@@ -313,23 +325,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return cmd;
}
-/* Check for Skylake-like CPUs (for RSB handling) */
-static bool __init is_skylake_era(void)
-{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6) {
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_SKYLAKE_MOBILE:
- case INTEL_FAM6_SKYLAKE_DESKTOP:
- case INTEL_FAM6_SKYLAKE_X:
- case INTEL_FAM6_KABYLAKE_MOBILE:
- case INTEL_FAM6_KABYLAKE_DESKTOP:
- return true;
- }
- }
- return false;
-}
-
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -349,6 +344,13 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_CMD_FORCE:
case SPECTRE_V2_CMD_AUTO:
+ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+ mode = SPECTRE_V2_IBRS_ENHANCED;
+ /* Force it so VMEXIT will restore correctly */
+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ goto specv2_set_mode;
+ }
if (IS_ENABLED(CONFIG_RETPOLINE))
goto retpoline_auto;
break;
@@ -386,26 +388,20 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
}
+specv2_set_mode:
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
/*
- * If neither SMEP nor PTI are available, there is a risk of
- * hitting userspace addresses in the RSB after a context switch
- * from a shallow call stack to a deeper one. To prevent this fill
- * the entire RSB, even when using IBRS.
+ * If spectre v2 protection has been enabled, unconditionally fill
+ * RSB during a context switch; this protects against two independent
+ * issues:
*
- * Skylake era CPUs have a separate issue with *underflow* of the
- * RSB, when they will predict 'ret' targets from the generic BTB.
- * The proper mitigation for this is IBRS. If IBRS is not supported
- * or deactivated in favour of retpolines the RSB fill on context
- * switch is required.
+ * - RSB underflow (and switch to BTB) on Skylake+
+ * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
*/
- if ((!boot_cpu_has(X86_FEATURE_PTI) &&
- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
- }
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
/* Initialize Indirect Branch Prediction Barrier if supported */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
@@ -415,9 +411,16 @@ retpoline_auto:
/*
* Retpoline means the kernel is safe because it has no indirect
- * branches. But firmware isn't, so use IBRS to protect that.
+ * branches. Enhanced IBRS protects firmware too, so, enable restricted
+ * speculation around firmware calls only when Enhanced IBRS isn't
+ * supported.
+ *
+ * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
+ * the user might select retpoline on the kernel command line and if
+ * the CPU supports Enhanced IBRS, kernel might un-intentionally not
+ * enable IBRS around firmware calls.
*/
- if (boot_cpu_has(X86_FEATURE_IBRS)) {
+ if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
@@ -654,8 +657,120 @@ void x86_spec_ctrl_setup_ap(void)
x86_amd_ssb_disable();
}
+#undef pr_fmt
+#define pr_fmt(fmt) "L1TF: " fmt
+
+/* Default mitigation for L1TF-affected CPUs */
+enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+EXPORT_SYMBOL_GPL(l1tf_mitigation);
+#endif
+enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+
+static void __init l1tf_select_mitigation(void)
+{
+ u64 half_pa;
+
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+ case L1TF_MITIGATION_FLUSH:
+ break;
+ case L1TF_MITIGATION_FLUSH_NOSMT:
+ case L1TF_MITIGATION_FULL:
+ cpu_smt_disable(false);
+ break;
+ case L1TF_MITIGATION_FULL_FORCE:
+ cpu_smt_disable(true);
+ break;
+ }
+
+#if CONFIG_PGTABLE_LEVELS == 2
+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+ return;
+#endif
+
+ /*
+ * This is extremely unlikely to happen because almost all
+ * systems have far more MAX_PA/2 than RAM can be fit into
+ * DIMM slots.
+ */
+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+ if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+ return;
+ }
+
+ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
+}
+
+static int __init l1tf_cmdline(char *str)
+{
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return 0;
+
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off"))
+ l1tf_mitigation = L1TF_MITIGATION_OFF;
+ else if (!strcmp(str, "flush,nowarn"))
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
+ else if (!strcmp(str, "flush"))
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH;
+ else if (!strcmp(str, "flush,nosmt"))
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+ else if (!strcmp(str, "full"))
+ l1tf_mitigation = L1TF_MITIGATION_FULL;
+ else if (!strcmp(str, "full,force"))
+ l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
+
+ return 0;
+}
+early_param("l1tf", l1tf_cmdline);
+
+#undef pr_fmt
+
#ifdef CONFIG_SYSFS
+#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+static const char *l1tf_vmx_states[] = {
+ [VMENTER_L1D_FLUSH_AUTO] = "auto",
+ [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
+ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
+ [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
+ [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
+ [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
+};
+
+static ssize_t l1tf_show_state(char *buf)
+{
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
+ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
+ (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
+ cpu_smt_control == CPU_SMT_ENABLED))
+ return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
+ l1tf_vmx_states[l1tf_vmx_mitigation]);
+
+ return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
+ l1tf_vmx_states[l1tf_vmx_mitigation],
+ cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
+}
+#else
+static ssize_t l1tf_show_state(char *buf)
+{
+ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+}
+#endif
+
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -684,6 +799,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+ case X86_BUG_L1TF:
+ if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+ return l1tf_show_state(buf);
+ break;
default:
break;
}
@@ -710,4 +829,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
{
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
}
+
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index eb4cb3efd20e..84dee5ab745a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -661,33 +661,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
}
-void detect_ht(struct cpuinfo_x86 *c)
+int detect_ht_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
- int index_msb, core_bits;
- static bool printed;
if (!cpu_has(c, X86_FEATURE_HT))
- return;
+ return -1;
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
- goto out;
+ return -1;
if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
- return;
+ return -1;
cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
-
- if (smp_num_siblings == 1) {
+ if (smp_num_siblings == 1)
pr_info_once("CPU0: Hyper-Threading is disabled\n");
- goto out;
- }
+#endif
+ return 0;
+}
- if (smp_num_siblings <= 1)
- goto out;
+void detect_ht(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ int index_msb, core_bits;
+
+ if (detect_ht_early(c) < 0)
+ return;
index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
@@ -700,15 +703,6 @@ void detect_ht(struct cpuinfo_x86 *c)
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);
-
-out:
- if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
- pr_info("CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- pr_info("CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
- printed = 1;
- }
#endif
}
@@ -911,7 +905,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
apply_forced_caps(c);
}
-static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
+void get_cpu_address_sizes(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
@@ -987,6 +981,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
{}
};
+static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+ /* in addition to cpu_no_speculation */
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+ {}
+};
+
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = 0;
@@ -1005,6 +1014,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+ if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
if (x86_match_cpu(cpu_no_meltdown))
return;
@@ -1013,6 +1025,29 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+ if (x86_match_cpu(cpu_no_l1tf))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_L1TF);
+}
+
+/*
+ * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
+ * unfortunately, that's not true in practice because of early VIA
+ * chips and (more importantly) broken virtualizers that are not easy
+ * to detect. In the latter case it doesn't even *fail* reliably, so
+ * probing for it doesn't even work. Disable it completely on 32-bit
+ * unless we can find a reliable way to detect all the broken cases.
+ * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
+ */
+static void detect_nopl(void)
+{
+#ifdef CONFIG_X86_32
+ setup_clear_cpu_cap(X86_FEATURE_NOPL);
+#else
+ setup_force_cpu_cap(X86_FEATURE_NOPL);
+#endif
}
/*
@@ -1089,6 +1124,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
*/
if (!pgtable_l5_enabled())
setup_clear_cpu_cap(X86_FEATURE_LA57);
+
+ detect_nopl();
}
void __init early_cpu_init(void)
@@ -1124,24 +1161,6 @@ void __init early_cpu_init(void)
early_identify_cpu(&boot_cpu_data);
}
-/*
- * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
- * unfortunately, that's not true in practice because of early VIA
- * chips and (more importantly) broken virtualizers that are not easy
- * to detect. In the latter case it doesn't even *fail* reliably, so
- * probing for it doesn't even work. Disable it completely on 32-bit
- * unless we can find a reliable way to detect all the broken cases.
- * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
- */
-static void detect_nopl(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_32
- clear_cpu_cap(c, X86_FEATURE_NOPL);
-#else
- set_cpu_cap(c, X86_FEATURE_NOPL);
-#endif
-}
-
static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_64
@@ -1204,8 +1223,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
get_model_name(c); /* Default name */
- detect_nopl(c);
-
detect_null_seg_behavior(c);
/*
@@ -1804,11 +1821,12 @@ void cpu_init(void)
enter_lazy_tlb(&init_mm, curr);
/*
- * Initialize the TSS. Don't bother initializing sp0, as the initial
- * task never enters user mode.
+ * Initialize the TSS. sp0 points to the entry trampoline stack
+ * regardless of what task is running.
*/
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc();
+ load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
load_mm_ldt(&init_mm);
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 38216f678fc3..7b229afa0a37 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -46,6 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[];
extern void get_cpu_cap(struct cpuinfo_x86 *c);
+extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern u32 get_scattered_cpuid_leaf(unsigned int level,
@@ -55,7 +56,9 @@ extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void detect_num_cpu_cores(struct cpuinfo_x86 *c);
+extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
extern int detect_extended_topology(struct cpuinfo_x86 *c);
+extern int detect_ht_early(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c);
unsigned int aperfmperf_get_khz(int cpu);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index eb75564f2d25..401e8c133108 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -301,6 +301,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
}
check_mpx_erratum(c);
+
+ /*
+ * Get the number of SMT siblings early from the extended topology
+ * leaf, if available. Otherwise try the legacy SMT detection.
+ */
+ if (detect_extended_topology_early(c) < 0)
+ detect_ht_early(c);
}
#ifdef CONFIG_X86_32
@@ -465,14 +472,17 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
+#define x86_VMX_FEATURE_EPT_CAP_AD 0x00200000
u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
+ u32 msr_vpid_cap, msr_ept_cap;
clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
clear_cpu_cap(c, X86_FEATURE_VNMI);
clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
clear_cpu_cap(c, X86_FEATURE_EPT);
clear_cpu_cap(c, X86_FEATURE_VPID);
+ clear_cpu_cap(c, X86_FEATURE_EPT_AD);
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
msr_ctl = vmx_msr_high | vmx_msr_low;
@@ -487,8 +497,13 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
(msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
- if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
+ if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) {
set_cpu_cap(c, X86_FEATURE_EPT);
+ rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
+ msr_ept_cap, msr_vpid_cap);
+ if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD)
+ set_cpu_cap(c, X86_FEATURE_EPT_AD);
+ }
if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
set_cpu_cap(c, X86_FEATURE_VPID);
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index ec4754f81cbd..abb71ac70443 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -859,6 +859,8 @@ static __init bool get_rdt_resources(void)
return (rdt_mon_capable || rdt_alloc_capable);
}
+static enum cpuhp_state rdt_online;
+
static int __init intel_rdt_late_init(void)
{
struct rdt_resource *r;
@@ -880,6 +882,7 @@ static int __init intel_rdt_late_init(void)
cpuhp_remove_state(state);
return ret;
}
+ rdt_online = state;
for_each_alloc_capable_rdt_resource(r)
pr_info("Intel RDT %s allocation detected\n", r->name);
@@ -891,3 +894,11 @@ static int __init intel_rdt_late_init(void)
}
late_initcall(intel_rdt_late_init);
+
+static void __exit intel_rdt_exit(void)
+{
+ cpuhp_remove_state(rdt_online);
+ rdtgroup_exit();
+}
+
+__exitcall(intel_rdt_exit);
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 39752825e376..4e588f36228f 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -81,6 +81,34 @@ enum rdt_group_type {
};
/**
+ * enum rdtgrp_mode - Mode of a RDT resource group
+ * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations
+ * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed
+ * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
+ * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
+ * allowed AND the allocations are Cache Pseudo-Locked
+ *
+ * The mode of a resource group enables control over the allowed overlap
+ * between allocations associated with different resource groups (classes
+ * of service). User is able to modify the mode of a resource group by
+ * writing to the "mode" resctrl file associated with the resource group.
+ *
+ * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by
+ * writing the appropriate text to the "mode" file. A resource group enters
+ * "pseudo-locked" mode after the schemata is written while the resource
+ * group is in "pseudo-locksetup" mode.
+ */
+enum rdtgrp_mode {
+ RDT_MODE_SHAREABLE = 0,
+ RDT_MODE_EXCLUSIVE,
+ RDT_MODE_PSEUDO_LOCKSETUP,
+ RDT_MODE_PSEUDO_LOCKED,
+
+ /* Must be last */
+ RDT_NUM_MODES,
+};
+
+/**
* struct mongroup - store mon group's data in resctrl fs.
* @mon_data_kn kernlfs node for the mon_data directory
* @parent: parent rdtgrp
@@ -95,6 +123,43 @@ struct mongroup {
};
/**
+ * struct pseudo_lock_region - pseudo-lock region information
+ * @r: RDT resource to which this pseudo-locked region
+ * belongs
+ * @d: RDT domain to which this pseudo-locked region
+ * belongs
+ * @cbm: bitmask of the pseudo-locked region
+ * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread
+ * completion
+ * @thread_done: variable used by waitqueue to test if pseudo-locking
+ * thread completed
+ * @cpu: core associated with the cache on which the setup code
+ * will be run
+ * @line_size: size of the cache lines
+ * @size: size of pseudo-locked region in bytes
+ * @kmem: the kernel memory associated with pseudo-locked region
+ * @minor: minor number of character device associated with this
+ * region
+ * @debugfs_dir: pointer to this region's directory in the debugfs
+ * filesystem
+ * @pm_reqs: Power management QoS requests related to this region
+ */
+struct pseudo_lock_region {
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+ u32 cbm;
+ wait_queue_head_t lock_thread_wq;
+ int thread_done;
+ int cpu;
+ unsigned int line_size;
+ unsigned int size;
+ void *kmem;
+ unsigned int minor;
+ struct dentry *debugfs_dir;
+ struct list_head pm_reqs;
+};
+
+/**
* struct rdtgroup - store rdtgroup's data in resctrl file system.
* @kn: kernfs node
* @rdtgroup_list: linked list for all rdtgroups
@@ -106,16 +171,20 @@ struct mongroup {
* @type: indicates type of this rdtgroup - either
* monitor only or ctrl_mon group
* @mon: mongroup related data
+ * @mode: mode of resource group
+ * @plr: pseudo-locked region
*/
struct rdtgroup {
- struct kernfs_node *kn;
- struct list_head rdtgroup_list;
- u32 closid;
- struct cpumask cpu_mask;
- int flags;
- atomic_t waitcount;
- enum rdt_group_type type;
- struct mongroup mon;
+ struct kernfs_node *kn;
+ struct list_head rdtgroup_list;
+ u32 closid;
+ struct cpumask cpu_mask;
+ int flags;
+ atomic_t waitcount;
+ enum rdt_group_type type;
+ struct mongroup mon;
+ enum rdtgrp_mode mode;
+ struct pseudo_lock_region *plr;
};
/* rdtgroup.flags */
@@ -148,6 +217,7 @@ extern struct list_head rdt_all_groups;
extern int max_name_width, max_data_width;
int __init rdtgroup_init(void);
+void __exit rdtgroup_exit(void);
/**
* struct rftype - describe each file in the resctrl file system
@@ -216,22 +286,24 @@ struct mbm_state {
* @mbps_val: When mba_sc is enabled, this holds the bandwidth in MBps
* @new_ctrl: new ctrl value to be loaded
* @have_new_ctrl: did user provide new_ctrl for this domain
+ * @plr: pseudo-locked region (if any) associated with domain
*/
struct rdt_domain {
- struct list_head list;
- int id;
- struct cpumask cpu_mask;
- unsigned long *rmid_busy_llc;
- struct mbm_state *mbm_total;
- struct mbm_state *mbm_local;
- struct delayed_work mbm_over;
- struct delayed_work cqm_limbo;
- int mbm_work_cpu;
- int cqm_work_cpu;
- u32 *ctrl_val;
- u32 *mbps_val;
- u32 new_ctrl;
- bool have_new_ctrl;
+ struct list_head list;
+ int id;
+ struct cpumask cpu_mask;
+ unsigned long *rmid_busy_llc;
+ struct mbm_state *mbm_total;
+ struct mbm_state *mbm_local;
+ struct delayed_work mbm_over;
+ struct delayed_work cqm_limbo;
+ int mbm_work_cpu;
+ int cqm_work_cpu;
+ u32 *ctrl_val;
+ u32 *mbps_val;
+ u32 new_ctrl;
+ bool have_new_ctrl;
+ struct pseudo_lock_region *plr;
};
/**
@@ -351,7 +423,7 @@ struct rdt_resource {
struct rdt_cache cache;
struct rdt_membw membw;
const char *format_str;
- int (*parse_ctrlval) (char *buf, struct rdt_resource *r,
+ int (*parse_ctrlval) (void *data, struct rdt_resource *r,
struct rdt_domain *d);
struct list_head evt_list;
int num_rmid;
@@ -359,8 +431,8 @@ struct rdt_resource {
unsigned long fflags;
};
-int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d);
-int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d);
+int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d);
+int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -368,7 +440,7 @@ extern struct rdt_resource rdt_resources_all[];
extern struct rdtgroup rdtgroup_default;
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
-int __init rdtgroup_init(void);
+extern struct dentry *debugfs_resctrl;
enum {
RDT_RESOURCE_L3,
@@ -439,13 +511,32 @@ void rdt_last_cmd_printf(const char *fmt, ...);
void rdt_ctrl_update(void *arg);
struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
void rdtgroup_kn_unlock(struct kernfs_node *kn);
+int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
+int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
+ umode_t mask);
struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
struct list_head **pos);
ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
int rdtgroup_schemata_show(struct kernfs_open_file *of,
struct seq_file *s, void *v);
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+ u32 _cbm, int closid, bool exclusive);
+unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
+ u32 cbm);
+enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
+int rdtgroup_tasks_assigned(struct rdtgroup *r);
+int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
+int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
+int rdt_pseudo_lock_init(void);
+void rdt_pseudo_lock_release(void);
+int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
+void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
+int update_domains(struct rdt_resource *r, int closid);
+void closid_free(int closid);
int alloc_rmid(void);
void free_rmid(u32 rmid);
int rdt_get_mon_l3_config(struct rdt_resource *r);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index 116d57b248d3..af358ca05160 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -64,9 +64,10 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
-int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d)
{
unsigned long data;
+ char *buf = _buf;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
@@ -87,7 +88,7 @@ int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
* are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
* Additionally Haswell requires at least two bits set.
*/
-static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
+static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
{
unsigned long first_bit, zero_bit, val;
unsigned int cbm_len = r->cache.cbm_len;
@@ -122,22 +123,64 @@ static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
+struct rdt_cbm_parse_data {
+ struct rdtgroup *rdtgrp;
+ char *buf;
+};
+
/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
*/
-int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
{
- unsigned long data;
+ struct rdt_cbm_parse_data *data = _data;
+ struct rdtgroup *rdtgrp = data->rdtgrp;
+ u32 cbm_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
return -EINVAL;
}
- if(!cbm_validate(buf, &data, r))
+ /*
+ * Cannot set up more than one pseudo-locked region in a cache
+ * hierarchy.
+ */
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
+ rdtgroup_pseudo_locked_in_hierarchy(d)) {
+ rdt_last_cmd_printf("pseudo-locked region in hierarchy\n");
return -EINVAL;
- d->new_ctrl = data;
+ }
+
+ if (!cbm_validate(data->buf, &cbm_val, r))
+ return -EINVAL;
+
+ if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
+ rdtgrp->mode == RDT_MODE_SHAREABLE) &&
+ rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
+ rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The CBM may not overlap with the CBM of another closid if
+ * either is exclusive.
+ */
+ if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
+ rdt_last_cmd_printf("overlaps with exclusive group\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
+ if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ rdt_last_cmd_printf("overlaps with other group\n");
+ return -EINVAL;
+ }
+ }
+
+ d->new_ctrl = cbm_val;
d->have_new_ctrl = true;
return 0;
@@ -149,8 +192,10 @@ int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
* separated by ";". The "id" is in decimal, and must match one of
* the "id"s for this resource.
*/
-static int parse_line(char *line, struct rdt_resource *r)
+static int parse_line(char *line, struct rdt_resource *r,
+ struct rdtgroup *rdtgrp)
{
+ struct rdt_cbm_parse_data data;
char *dom = NULL, *id;
struct rdt_domain *d;
unsigned long dom_id;
@@ -167,15 +212,32 @@ next:
dom = strim(dom);
list_for_each_entry(d, &r->domains, list) {
if (d->id == dom_id) {
- if (r->parse_ctrlval(dom, r, d))
+ data.buf = dom;
+ data.rdtgrp = rdtgrp;
+ if (r->parse_ctrlval(&data, r, d))
return -EINVAL;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ /*
+ * In pseudo-locking setup mode and just
+ * parsed a valid CBM that should be
+ * pseudo-locked. Only one locked region per
+ * resource group and domain so just do
+ * the required initialization for single
+ * region and return.
+ */
+ rdtgrp->plr->r = r;
+ rdtgrp->plr->d = d;
+ rdtgrp->plr->cbm = d->new_ctrl;
+ d->plr = rdtgrp->plr;
+ return 0;
+ }
goto next;
}
}
return -EINVAL;
}
-static int update_domains(struct rdt_resource *r, int closid)
+int update_domains(struct rdt_resource *r, int closid)
{
struct msr_param msr_param;
cpumask_var_t cpu_mask;
@@ -220,13 +282,14 @@ done:
return 0;
}
-static int rdtgroup_parse_resource(char *resname, char *tok, int closid)
+static int rdtgroup_parse_resource(char *resname, char *tok,
+ struct rdtgroup *rdtgrp)
{
struct rdt_resource *r;
for_each_alloc_enabled_rdt_resource(r) {
- if (!strcmp(resname, r->name) && closid < r->num_closid)
- return parse_line(tok, r);
+ if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
+ return parse_line(tok, r, rdtgrp);
}
rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
return -EINVAL;
@@ -239,7 +302,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
struct rdt_domain *dom;
struct rdt_resource *r;
char *tok, *resname;
- int closid, ret = 0;
+ int ret = 0;
/* Valid input requires a trailing newline */
if (nbytes == 0 || buf[nbytes - 1] != '\n')
@@ -253,7 +316,15 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
}
rdt_last_cmd_clear();
- closid = rdtgrp->closid;
+ /*
+ * No changes to pseudo-locked region allowed. It has to be removed
+ * and re-created instead.
+ */
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("resource group is pseudo-locked\n");
+ goto out;
+ }
for_each_alloc_enabled_rdt_resource(r) {
list_for_each_entry(dom, &r->domains, list)
@@ -272,17 +343,27 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
ret = -EINVAL;
goto out;
}
- ret = rdtgroup_parse_resource(resname, tok, closid);
+ ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
if (ret)
goto out;
}
for_each_alloc_enabled_rdt_resource(r) {
- ret = update_domains(r, closid);
+ ret = update_domains(r, rdtgrp->closid);
if (ret)
goto out;
}
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ /*
+ * If pseudo-locking fails we keep the resource group in
+ * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
+ * active and updated for just the domain the pseudo-locked
+ * region was requested for.
+ */
+ ret = rdtgroup_pseudo_lock_create(rdtgrp);
+ }
+
out:
rdtgroup_kn_unlock(of->kn);
return ret ?: nbytes;
@@ -318,10 +399,18 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) {
- closid = rdtgrp->closid;
- for_each_alloc_enabled_rdt_resource(r) {
- if (closid < r->num_closid)
- show_doms(s, r, closid);
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ for_each_alloc_enabled_rdt_resource(r)
+ seq_printf(s, "%s:uninitialized\n", r->name);
+ } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
+ rdtgrp->plr->d->id, rdtgrp->plr->cbm);
+ } else {
+ closid = rdtgrp->closid;
+ for_each_alloc_enabled_rdt_resource(r) {
+ if (closid < r->num_closid)
+ show_doms(s, r, closid);
+ }
}
} else {
ret = -ENOENT;
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
new file mode 100644
index 000000000000..40f3903ae5d9
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -0,0 +1,1522 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Resource Director Technology (RDT)
+ *
+ * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Author: Reinette Chatre <reinette.chatre@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/kthread.h>
+#include <linux/mman.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/intel-family.h>
+#include <asm/intel_rdt_sched.h>
+#include <asm/perf_event.h>
+
+#include "intel_rdt.h"
+
+#define CREATE_TRACE_POINTS
+#include "intel_rdt_pseudo_lock_event.h"
+
+/*
+ * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
+ * prefetcher state. Details about this register can be found in the MSR
+ * tables for specific platforms found in Intel's SDM.
+ */
+#define MSR_MISC_FEATURE_CONTROL 0x000001a4
+
+/*
+ * The bits needed to disable hardware prefetching varies based on the
+ * platform. During initialization we will discover which bits to use.
+ */
+static u64 prefetch_disable_bits;
+
+/*
+ * Major number assigned to and shared by all devices exposing
+ * pseudo-locked regions.
+ */
+static unsigned int pseudo_lock_major;
+static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
+static struct class *pseudo_lock_class;
+
+/**
+ * get_prefetch_disable_bits - prefetch disable bits of supported platforms
+ *
+ * Capture the list of platforms that have been validated to support
+ * pseudo-locking. This includes testing to ensure pseudo-locked regions
+ * with low cache miss rates can be created under variety of load conditions
+ * as well as that these pseudo-locked regions can maintain their low cache
+ * miss rates under variety of load conditions for significant lengths of time.
+ *
+ * After a platform has been validated to support pseudo-locking its
+ * hardware prefetch disable bits are included here as they are documented
+ * in the SDM.
+ *
+ * When adding a platform here also add support for its cache events to
+ * measure_cycles_perf_fn()
+ *
+ * Return:
+ * If platform is supported, the bits to disable hardware prefetchers, 0
+ * if platform is not supported.
+ */
+static u64 get_prefetch_disable_bits(void)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ boot_cpu_data.x86 != 6)
+ return 0;
+
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_BROADWELL_X:
+ /*
+ * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
+ * as:
+ * 0 L2 Hardware Prefetcher Disable (R/W)
+ * 1 L2 Adjacent Cache Line Prefetcher Disable (R/W)
+ * 2 DCU Hardware Prefetcher Disable (R/W)
+ * 3 DCU IP Prefetcher Disable (R/W)
+ * 63:4 Reserved
+ */
+ return 0xF;
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ /*
+ * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
+ * as:
+ * 0 L2 Hardware Prefetcher Disable (R/W)
+ * 1 Reserved
+ * 2 DCU Hardware Prefetcher Disable (R/W)
+ * 63:3 Reserved
+ */
+ return 0x5;
+ }
+
+ return 0;
+}
+
+/*
+ * Helper to write 64bit value to MSR without tracing. Used when
+ * use of the cache should be restricted and use of registers used
+ * for local variables avoided.
+ */
+static inline void pseudo_wrmsrl_notrace(unsigned int msr, u64 val)
+{
+ __wrmsr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
+}
+
+/**
+ * pseudo_lock_minor_get - Obtain available minor number
+ * @minor: Pointer to where new minor number will be stored
+ *
+ * A bitmask is used to track available minor numbers. Here the next free
+ * minor number is marked as unavailable and returned.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+static int pseudo_lock_minor_get(unsigned int *minor)
+{
+ unsigned long first_bit;
+
+ first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
+
+ if (first_bit == MINORBITS)
+ return -ENOSPC;
+
+ __clear_bit(first_bit, &pseudo_lock_minor_avail);
+ *minor = first_bit;
+
+ return 0;
+}
+
+/**
+ * pseudo_lock_minor_release - Return minor number to available
+ * @minor: The minor number made available
+ */
+static void pseudo_lock_minor_release(unsigned int minor)
+{
+ __set_bit(minor, &pseudo_lock_minor_avail);
+}
+
+/**
+ * region_find_by_minor - Locate a pseudo-lock region by inode minor number
+ * @minor: The minor number of the device representing pseudo-locked region
+ *
+ * When the character device is accessed we need to determine which
+ * pseudo-locked region it belongs to. This is done by matching the minor
+ * number of the device to the pseudo-locked region it belongs.
+ *
+ * Minor numbers are assigned at the time a pseudo-locked region is associated
+ * with a cache instance.
+ *
+ * Return: On success return pointer to resource group owning the pseudo-locked
+ * region, NULL on failure.
+ */
+static struct rdtgroup *region_find_by_minor(unsigned int minor)
+{
+ struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
+
+ list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+ if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
+ rdtgrp_match = rdtgrp;
+ break;
+ }
+ }
+ return rdtgrp_match;
+}
+
+/**
+ * pseudo_lock_pm_req - A power management QoS request list entry
+ * @list: Entry within the @pm_reqs list for a pseudo-locked region
+ * @req: PM QoS request
+ */
+struct pseudo_lock_pm_req {
+ struct list_head list;
+ struct dev_pm_qos_request req;
+};
+
+static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
+{
+ struct pseudo_lock_pm_req *pm_req, *next;
+
+ list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
+ dev_pm_qos_remove_request(&pm_req->req);
+ list_del(&pm_req->list);
+ kfree(pm_req);
+ }
+}
+
+/**
+ * pseudo_lock_cstates_constrain - Restrict cores from entering C6
+ *
+ * To prevent the cache from being affected by power management entering
+ * C6 has to be avoided. This is accomplished by requesting a latency
+ * requirement lower than lowest C6 exit latency of all supported
+ * platforms as found in the cpuidle state tables in the intel_idle driver.
+ * At this time it is possible to do so with a single latency requirement
+ * for all supported platforms.
+ *
+ * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
+ * the ACPI latencies need to be considered while keeping in mind that C2
+ * may be set to map to deeper sleep states. In this case the latency
+ * requirement needs to prevent entering C2 also.
+ */
+static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
+{
+ struct pseudo_lock_pm_req *pm_req;
+ int cpu;
+ int ret;
+
+ for_each_cpu(cpu, &plr->d->cpu_mask) {
+ pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
+ if (!pm_req) {
+ rdt_last_cmd_puts("fail allocating mem for PM QoS\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ ret = dev_pm_qos_add_request(get_cpu_device(cpu),
+ &pm_req->req,
+ DEV_PM_QOS_RESUME_LATENCY,
+ 30);
+ if (ret < 0) {
+ rdt_last_cmd_printf("fail to add latency req cpu%d\n",
+ cpu);
+ kfree(pm_req);
+ ret = -1;
+ goto out_err;
+ }
+ list_add(&pm_req->list, &plr->pm_reqs);
+ }
+
+ return 0;
+
+out_err:
+ pseudo_lock_cstates_relax(plr);
+ return ret;
+}
+
+/**
+ * pseudo_lock_region_clear - Reset pseudo-lock region data
+ * @plr: pseudo-lock region
+ *
+ * All content of the pseudo-locked region is reset - any memory allocated
+ * freed.
+ *
+ * Return: void
+ */
+static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
+{
+ plr->size = 0;
+ plr->line_size = 0;
+ kfree(plr->kmem);
+ plr->kmem = NULL;
+ plr->r = NULL;
+ if (plr->d)
+ plr->d->plr = NULL;
+ plr->d = NULL;
+ plr->cbm = 0;
+ plr->debugfs_dir = NULL;
+}
+
+/**
+ * pseudo_lock_region_init - Initialize pseudo-lock region information
+ * @plr: pseudo-lock region
+ *
+ * Called after user provided a schemata to be pseudo-locked. From the
+ * schemata the &struct pseudo_lock_region is on entry already initialized
+ * with the resource, domain, and capacity bitmask. Here the information
+ * required for pseudo-locking is deduced from this data and &struct
+ * pseudo_lock_region initialized further. This information includes:
+ * - size in bytes of the region to be pseudo-locked
+ * - cache line size to know the stride with which data needs to be accessed
+ * to be pseudo-locked
+ * - a cpu associated with the cache instance on which the pseudo-locking
+ * flow can be executed
+ *
+ * Return: 0 on success, <0 on failure. Descriptive error will be written
+ * to last_cmd_status buffer.
+ */
+static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
+{
+ struct cpu_cacheinfo *ci;
+ int ret;
+ int i;
+
+ /* Pick the first cpu we find that is associated with the cache. */
+ plr->cpu = cpumask_first(&plr->d->cpu_mask);
+
+ if (!cpu_online(plr->cpu)) {
+ rdt_last_cmd_printf("cpu %u associated with cache not online\n",
+ plr->cpu);
+ ret = -ENODEV;
+ goto out_region;
+ }
+
+ ci = get_cpu_cacheinfo(plr->cpu);
+
+ plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == plr->r->cache_level) {
+ plr->line_size = ci->info_list[i].coherency_line_size;
+ return 0;
+ }
+ }
+
+ ret = -1;
+ rdt_last_cmd_puts("unable to determine cache line size\n");
+out_region:
+ pseudo_lock_region_clear(plr);
+ return ret;
+}
+
+/**
+ * pseudo_lock_init - Initialize a pseudo-lock region
+ * @rdtgrp: resource group to which new pseudo-locked region will belong
+ *
+ * A pseudo-locked region is associated with a resource group. When this
+ * association is created the pseudo-locked region is initialized. The
+ * details of the pseudo-locked region are not known at this time so only
+ * allocation is done and association established.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static int pseudo_lock_init(struct rdtgroup *rdtgrp)
+{
+ struct pseudo_lock_region *plr;
+
+ plr = kzalloc(sizeof(*plr), GFP_KERNEL);
+ if (!plr)
+ return -ENOMEM;
+
+ init_waitqueue_head(&plr->lock_thread_wq);
+ INIT_LIST_HEAD(&plr->pm_reqs);
+ rdtgrp->plr = plr;
+ return 0;
+}
+
+/**
+ * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
+ * @plr: pseudo-lock region
+ *
+ * Initialize the details required to set up the pseudo-locked region and
+ * allocate the contiguous memory that will be pseudo-locked to the cache.
+ *
+ * Return: 0 on success, <0 on failure. Descriptive error will be written
+ * to last_cmd_status buffer.
+ */
+static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
+{
+ int ret;
+
+ ret = pseudo_lock_region_init(plr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We do not yet support contiguous regions larger than
+ * KMALLOC_MAX_SIZE.
+ */
+ if (plr->size > KMALLOC_MAX_SIZE) {
+ rdt_last_cmd_puts("requested region exceeds maximum size\n");
+ ret = -E2BIG;
+ goto out_region;
+ }
+
+ plr->kmem = kzalloc(plr->size, GFP_KERNEL);
+ if (!plr->kmem) {
+ rdt_last_cmd_puts("unable to allocate memory\n");
+ ret = -ENOMEM;
+ goto out_region;
+ }
+
+ ret = 0;
+ goto out;
+out_region:
+ pseudo_lock_region_clear(plr);
+out:
+ return ret;
+}
+
+/**
+ * pseudo_lock_free - Free a pseudo-locked region
+ * @rdtgrp: resource group to which pseudo-locked region belonged
+ *
+ * The pseudo-locked region's resources have already been released, or not
+ * yet created at this point. Now it can be freed and disassociated from the
+ * resource group.
+ *
+ * Return: void
+ */
+static void pseudo_lock_free(struct rdtgroup *rdtgrp)
+{
+ pseudo_lock_region_clear(rdtgrp->plr);
+ kfree(rdtgrp->plr);
+ rdtgrp->plr = NULL;
+}
+
+/**
+ * pseudo_lock_fn - Load kernel memory into cache
+ * @_rdtgrp: resource group to which pseudo-lock region belongs
+ *
+ * This is the core pseudo-locking flow.
+ *
+ * First we ensure that the kernel memory cannot be found in the cache.
+ * Then, while taking care that there will be as little interference as
+ * possible, the memory to be loaded is accessed while core is running
+ * with class of service set to the bitmask of the pseudo-locked region.
+ * After this is complete no future CAT allocations will be allowed to
+ * overlap with this bitmask.
+ *
+ * Local register variables are utilized to ensure that the memory region
+ * to be locked is the only memory access made during the critical locking
+ * loop.
+ *
+ * Return: 0. Waiter on waitqueue will be woken on completion.
+ */
+static int pseudo_lock_fn(void *_rdtgrp)
+{
+ struct rdtgroup *rdtgrp = _rdtgrp;
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+ u32 rmid_p, closid_p;
+ unsigned long i;
+#ifdef CONFIG_KASAN
+ /*
+ * The registers used for local register variables are also used
+ * when KASAN is active. When KASAN is active we use a regular
+ * variable to ensure we always use a valid pointer, but the cost
+ * is that this variable will enter the cache through evicting the
+ * memory we are trying to lock into the cache. Thus expect lower
+ * pseudo-locking success rate when KASAN is active.
+ */
+ unsigned int line_size;
+ unsigned int size;
+ void *mem_r;
+#else
+ register unsigned int line_size asm("esi");
+ register unsigned int size asm("edi");
+#ifdef CONFIG_X86_64
+ register void *mem_r asm("rbx");
+#else
+ register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+ /*
+ * Make sure none of the allocated memory is cached. If it is we
+ * will get a cache hit in below loop from outside of pseudo-locked
+ * region.
+ * wbinvd (as opposed to clflush/clflushopt) is required to
+ * increase likelihood that allocated cache portion will be filled
+ * with associated memory.
+ */
+ native_wbinvd();
+
+ /*
+ * Always called with interrupts enabled. By disabling interrupts
+ * ensure that we will not be preempted during this critical section.
+ */
+ local_irq_disable();
+
+ /*
+ * Call wrmsr and rdmsr as directly as possible to avoid tracing
+ * clobbering local register variables or affecting cache accesses.
+ *
+ * Disable the hardware prefetcher so that when the end of the memory
+ * being pseudo-locked is reached the hardware will not read beyond
+ * the buffer and evict pseudo-locked memory read earlier from the
+ * cache.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ closid_p = this_cpu_read(pqr_state.cur_closid);
+ rmid_p = this_cpu_read(pqr_state.cur_rmid);
+ mem_r = plr->kmem;
+ size = plr->size;
+ line_size = plr->line_size;
+ /*
+ * Critical section begin: start by writing the closid associated
+ * with the capacity bitmask of the cache region being
+ * pseudo-locked followed by reading of kernel memory to load it
+ * into the cache.
+ */
+ __wrmsr(IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
+ /*
+ * Cache was flushed earlier. Now access kernel memory to read it
+ * into cache region associated with just activated plr->closid.
+ * Loop over data twice:
+ * - In first loop the cache region is shared with the page walker
+ * as it populates the paging structure caches (including TLB).
+ * - In the second loop the paging structure caches are used and
+ * cache region is populated with the memory being referenced.
+ */
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ /*
+ * Add a barrier to prevent speculative execution of this
+ * loop reading beyond the end of the buffer.
+ */
+ rmb();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ for (i = 0; i < size; i += line_size) {
+ /*
+ * Add a barrier to prevent speculative execution of this
+ * loop reading beyond the end of the buffer.
+ */
+ rmb();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ /*
+ * Critical section end: restore closid with capacity bitmask that
+ * does not overlap with pseudo-locked region.
+ */
+ __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
+
+ /* Re-enable the hardware prefetcher(s) */
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+/**
+ * rdtgroup_monitor_in_progress - Test if monitoring in progress
+ * @r: resource group being queried
+ *
+ * Return: 1 if monitor groups have been created for this resource
+ * group, 0 otherwise.
+ */
+static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
+{
+ return !list_empty(&rdtgrp->mon.crdtgrp_list);
+}
+
+/**
+ * rdtgroup_locksetup_user_restrict - Restrict user access to group
+ * @rdtgrp: resource group needing access restricted
+ *
+ * A resource group used for cache pseudo-locking cannot have cpus or tasks
+ * assigned to it. This is communicated to the user by restricting access
+ * to all the files that can be used to make such changes.
+ *
+ * Permissions restored with rdtgroup_locksetup_user_restore()
+ *
+ * Return: 0 on success, <0 on failure. If a failure occurs during the
+ * restriction of access an attempt will be made to restore permissions but
+ * the state of the mode of these files will be uncertain when a failure
+ * occurs.
+ */
+static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
+ if (ret)
+ return ret;
+
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
+ if (ret)
+ goto err_tasks;
+
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
+ if (ret)
+ goto err_cpus;
+
+ if (rdt_mon_capable) {
+ ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
+ if (ret)
+ goto err_cpus_list;
+ }
+
+ ret = 0;
+ goto out;
+
+err_cpus_list:
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
+err_cpus:
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
+err_tasks:
+ rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_locksetup_user_restore - Restore user access to group
+ * @rdtgrp: resource group needing access restored
+ *
+ * Restore all file access previously removed using
+ * rdtgroup_locksetup_user_restrict()
+ *
+ * Return: 0 on success, <0 on failure. If a failure occurs during the
+ * restoration of access an attempt will be made to restrict permissions
+ * again but the state of the mode of these files will be uncertain when
+ * a failure occurs.
+ */
+static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
+ if (ret)
+ return ret;
+
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
+ if (ret)
+ goto err_tasks;
+
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
+ if (ret)
+ goto err_cpus;
+
+ if (rdt_mon_capable) {
+ ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
+ if (ret)
+ goto err_cpus_list;
+ }
+
+ ret = 0;
+ goto out;
+
+err_cpus_list:
+ rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
+err_cpus:
+ rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
+err_tasks:
+ rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_locksetup_enter - Resource group enters locksetup mode
+ * @rdtgrp: resource group requested to enter locksetup mode
+ *
+ * A resource group enters locksetup mode to reflect that it would be used
+ * to represent a pseudo-locked region and is in the process of being set
+ * up to do so. A resource group used for a pseudo-locked region would
+ * lose the closid associated with it so we cannot allow it to have any
+ * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
+ * future. Monitoring of a pseudo-locked region is not allowed either.
+ *
+ * The above and more restrictions on a pseudo-locked region are checked
+ * for and enforced before the resource group enters the locksetup mode.
+ *
+ * Returns: 0 if the resource group successfully entered locksetup mode, <0
+ * on failure. On failure the last_cmd_status buffer is updated with text to
+ * communicate details of failure to the user.
+ */
+int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ /*
+ * The default resource group can neither be removed nor lose the
+ * default closid associated with it.
+ */
+ if (rdtgrp == &rdtgroup_default) {
+ rdt_last_cmd_puts("cannot pseudo-lock default group\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Cache Pseudo-locking not supported when CDP is enabled.
+ *
+ * Some things to consider if you would like to enable this
+ * support (using L3 CDP as example):
+ * - When CDP is enabled two separate resources are exposed,
+ * L3DATA and L3CODE, but they are actually on the same cache.
+ * The implication for pseudo-locking is that if a
+ * pseudo-locked region is created on a domain of one
+ * resource (eg. L3CODE), then a pseudo-locked region cannot
+ * be created on that same domain of the other resource
+ * (eg. L3DATA). This is because the creation of a
+ * pseudo-locked region involves a call to wbinvd that will
+ * affect all cache allocations on particular domain.
+ * - Considering the previous, it may be possible to only
+ * expose one of the CDP resources to pseudo-locking and
+ * hide the other. For example, we could consider to only
+ * expose L3DATA and since the L3 cache is unified it is
+ * still possible to place instructions there are execute it.
+ * - If only one region is exposed to pseudo-locking we should
+ * still keep in mind that availability of a portion of cache
+ * for pseudo-locking should take into account both resources.
+ * Similarly, if a pseudo-locked region is created in one
+ * resource, the portion of cache used by it should be made
+ * unavailable to all future allocations from both resources.
+ */
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
+ rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
+ rdt_last_cmd_puts("CDP enabled\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Not knowing the bits to disable prefetching implies that this
+ * platform does not support Cache Pseudo-Locking.
+ */
+ prefetch_disable_bits = get_prefetch_disable_bits();
+ if (prefetch_disable_bits == 0) {
+ rdt_last_cmd_puts("pseudo-locking not supported\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_monitor_in_progress(rdtgrp)) {
+ rdt_last_cmd_puts("monitoring in progress\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_tasks_assigned(rdtgrp)) {
+ rdt_last_cmd_puts("tasks assigned to resource group\n");
+ return -EINVAL;
+ }
+
+ if (!cpumask_empty(&rdtgrp->cpu_mask)) {
+ rdt_last_cmd_puts("CPUs assigned to resource group\n");
+ return -EINVAL;
+ }
+
+ if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
+ rdt_last_cmd_puts("unable to modify resctrl permissions\n");
+ return -EIO;
+ }
+
+ ret = pseudo_lock_init(rdtgrp);
+ if (ret) {
+ rdt_last_cmd_puts("unable to init pseudo-lock region\n");
+ goto out_release;
+ }
+
+ /*
+ * If this system is capable of monitoring a rmid would have been
+ * allocated when the control group was created. This is not needed
+ * anymore when this group would be used for pseudo-locking. This
+ * is safe to call on platforms not capable of monitoring.
+ */
+ free_rmid(rdtgrp->mon.rmid);
+
+ ret = 0;
+ goto out;
+
+out_release:
+ rdtgroup_locksetup_user_restore(rdtgrp);
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_locksetup_exit - resource group exist locksetup mode
+ * @rdtgrp: resource group
+ *
+ * When a resource group exits locksetup mode the earlier restrictions are
+ * lifted.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
+{
+ int ret;
+
+ if (rdt_mon_capable) {
+ ret = alloc_rmid();
+ if (ret < 0) {
+ rdt_last_cmd_puts("out of RMIDs\n");
+ return ret;
+ }
+ rdtgrp->mon.rmid = ret;
+ }
+
+ ret = rdtgroup_locksetup_user_restore(rdtgrp);
+ if (ret) {
+ free_rmid(rdtgrp->mon.rmid);
+ return ret;
+ }
+
+ pseudo_lock_free(rdtgrp);
+ return 0;
+}
+
+/**
+ * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
+ * @d: RDT domain
+ * @_cbm: CBM to test
+ *
+ * @d represents a cache instance and @_cbm a capacity bitmask that is
+ * considered for it. Determine if @_cbm overlaps with any existing
+ * pseudo-locked region on @d.
+ *
+ * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * otherwise.
+ */
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+{
+ unsigned long *cbm = (unsigned long *)&_cbm;
+ unsigned long *cbm_b;
+ unsigned int cbm_len;
+
+ if (d->plr) {
+ cbm_len = d->plr->r->cache.cbm_len;
+ cbm_b = (unsigned long *)&d->plr->cbm;
+ if (bitmap_intersects(cbm, cbm_b, cbm_len))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
+ * @d: RDT domain under test
+ *
+ * The setup of a pseudo-locked region affects all cache instances within
+ * the hierarchy of the region. It is thus essential to know if any
+ * pseudo-locked regions exist within a cache hierarchy to prevent any
+ * attempts to create new pseudo-locked regions in the same hierarchy.
+ *
+ * Return: true if a pseudo-locked region exists in the hierarchy of @d or
+ * if it is not possible to test due to memory allocation issue,
+ * false otherwise.
+ */
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
+{
+ cpumask_var_t cpu_with_psl;
+ struct rdt_resource *r;
+ struct rdt_domain *d_i;
+ bool ret = false;
+
+ if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
+ return true;
+
+ /*
+ * First determine which cpus have pseudo-locked regions
+ * associated with them.
+ */
+ for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d_i, &r->domains, list) {
+ if (d_i->plr)
+ cpumask_or(cpu_with_psl, cpu_with_psl,
+ &d_i->cpu_mask);
+ }
+ }
+
+ /*
+ * Next test if new pseudo-locked region would intersect with
+ * existing region.
+ */
+ if (cpumask_intersects(&d->cpu_mask, cpu_with_psl))
+ ret = true;
+
+ free_cpumask_var(cpu_with_psl);
+ return ret;
+}
+
+/**
+ * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
+ * @_plr: pseudo-lock region to measure
+ *
+ * There is no deterministic way to test if a memory region is cached. One
+ * way is to measure how long it takes to read the memory, the speed of
+ * access is a good way to learn how close to the cpu the data was. Even
+ * more, if the prefetcher is disabled and the memory is read at a stride
+ * of half the cache line, then a cache miss will be easy to spot since the
+ * read of the first half would be significantly slower than the read of
+ * the second half.
+ *
+ * Return: 0. Waiter on waitqueue will be woken on completion.
+ */
+static int measure_cycles_lat_fn(void *_plr)
+{
+ struct pseudo_lock_region *plr = _plr;
+ unsigned long i;
+ u64 start, end;
+#ifdef CONFIG_KASAN
+ /*
+ * The registers used for local register variables are also used
+ * when KASAN is active. When KASAN is active we use a regular
+ * variable to ensure we always use a valid pointer to access memory.
+ * The cost is that accessing this pointer, which could be in
+ * cache, will be included in the measurement of memory read latency.
+ */
+ void *mem_r;
+#else
+#ifdef CONFIG_X86_64
+ register void *mem_r asm("rbx");
+#else
+ register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+ local_irq_disable();
+ /*
+ * The wrmsr call may be reordered with the assignment below it.
+ * Call wrmsr as directly as possible to avoid tracing clobbering
+ * local register variable used for memory pointer.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ mem_r = plr->kmem;
+ /*
+ * Dummy execute of the time measurement to load the needed
+ * instructions into the L1 instruction cache.
+ */
+ start = rdtsc_ordered();
+ for (i = 0; i < plr->size; i += 32) {
+ start = rdtsc_ordered();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ end = rdtsc_ordered();
+ trace_pseudo_lock_mem_latency((u32)(end - start));
+ }
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+static int measure_cycles_perf_fn(void *_plr)
+{
+ unsigned long long l3_hits = 0, l3_miss = 0;
+ u64 l3_hit_bits = 0, l3_miss_bits = 0;
+ struct pseudo_lock_region *plr = _plr;
+ unsigned long long l2_hits, l2_miss;
+ u64 l2_hit_bits, l2_miss_bits;
+ unsigned long i;
+#ifdef CONFIG_KASAN
+ /*
+ * The registers used for local register variables are also used
+ * when KASAN is active. When KASAN is active we use regular variables
+ * at the cost of including cache access latency to these variables
+ * in the measurements.
+ */
+ unsigned int line_size;
+ unsigned int size;
+ void *mem_r;
+#else
+ register unsigned int line_size asm("esi");
+ register unsigned int size asm("edi");
+#ifdef CONFIG_X86_64
+ register void *mem_r asm("rbx");
+#else
+ register void *mem_r asm("ebx");
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_KASAN */
+
+ /*
+ * Non-architectural event for the Goldmont Microarchitecture
+ * from Intel x86 Architecture Software Developer Manual (SDM):
+ * MEM_LOAD_UOPS_RETIRED D1H (event number)
+ * Umask values:
+ * L1_HIT 01H
+ * L2_HIT 02H
+ * L1_MISS 08H
+ * L2_MISS 10H
+ *
+ * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
+ * has two "no fix" errata associated with it: BDM35 and BDM100. On
+ * this platform we use the following events instead:
+ * L2_RQSTS 24H (Documented in https://download.01.org/perfmon/BDW/)
+ * REFERENCES FFH
+ * MISS 3FH
+ * LONGEST_LAT_CACHE 2EH (Documented in SDM)
+ * REFERENCE 4FH
+ * MISS 41H
+ */
+
+ /*
+ * Start by setting flags for IA32_PERFEVTSELx:
+ * OS (Operating system mode) 0x2
+ * INT (APIC interrupt enable) 0x10
+ * EN (Enable counter) 0x40
+ *
+ * Then add the Umask value and event number to select performance
+ * event.
+ */
+
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ l2_hit_bits = (0x52ULL << 16) | (0x2 << 8) | 0xd1;
+ l2_miss_bits = (0x52ULL << 16) | (0x10 << 8) | 0xd1;
+ break;
+ case INTEL_FAM6_BROADWELL_X:
+ /* On BDW the l2_hit_bits count references, not hits */
+ l2_hit_bits = (0x52ULL << 16) | (0xff << 8) | 0x24;
+ l2_miss_bits = (0x52ULL << 16) | (0x3f << 8) | 0x24;
+ /* On BDW the l3_hit_bits count references, not hits */
+ l3_hit_bits = (0x52ULL << 16) | (0x4f << 8) | 0x2e;
+ l3_miss_bits = (0x52ULL << 16) | (0x41 << 8) | 0x2e;
+ break;
+ default:
+ goto out;
+ }
+
+ local_irq_disable();
+ /*
+ * Call wrmsr direcly to avoid the local register variables from
+ * being overwritten due to reordering of their assignment with
+ * the wrmsr calls.
+ */
+ __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ /* Disable events and reset counters */
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 1, 0x0);
+ if (l3_hit_bits > 0) {
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 2, 0x0);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 3, 0x0);
+ }
+ /* Set and enable the L2 counters */
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, l2_hit_bits);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, l2_miss_bits);
+ if (l3_hit_bits > 0) {
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
+ l3_hit_bits);
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+ l3_miss_bits);
+ }
+ mem_r = plr->kmem;
+ size = plr->size;
+ line_size = plr->line_size;
+ for (i = 0; i < size; i += line_size) {
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ /*
+ * Call wrmsr directly (no tracing) to not influence
+ * the cache access counters as they are disabled.
+ */
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0,
+ l2_hit_bits & ~(0x40ULL << 16));
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1,
+ l2_miss_bits & ~(0x40ULL << 16));
+ if (l3_hit_bits > 0) {
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
+ l3_hit_bits & ~(0x40ULL << 16));
+ pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+ l3_miss_bits & ~(0x40ULL << 16));
+ }
+ l2_hits = native_read_pmc(0);
+ l2_miss = native_read_pmc(1);
+ if (l3_hit_bits > 0) {
+ l3_hits = native_read_pmc(2);
+ l3_miss = native_read_pmc(3);
+ }
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+ /*
+ * On BDW we count references and misses, need to adjust. Sometimes
+ * the "hits" counter is a bit more than the references, for
+ * example, x references but x + 1 hits. To not report invalid
+ * hit values in this case we treat that as misses eaqual to
+ * references.
+ */
+ if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
+ l2_hits -= (l2_miss > l2_hits ? l2_hits : l2_miss);
+ trace_pseudo_lock_l2(l2_hits, l2_miss);
+ if (l3_hit_bits > 0) {
+ if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
+ l3_hits -= (l3_miss > l3_hits ? l3_hits : l3_miss);
+ trace_pseudo_lock_l3(l3_hits, l3_miss);
+ }
+
+out:
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+/**
+ * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
+ *
+ * The measurement of latency to access a pseudo-locked region should be
+ * done from a cpu that is associated with that pseudo-locked region.
+ * Determine which cpu is associated with this region and start a thread on
+ * that cpu to perform the measurement, wait for that thread to complete.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
+{
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+ struct task_struct *thread;
+ unsigned int cpu;
+ int ret = -1;
+
+ cpus_read_lock();
+ mutex_lock(&rdtgroup_mutex);
+
+ if (rdtgrp->flags & RDT_DELETED) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ plr->thread_done = 0;
+ cpu = cpumask_first(&plr->d->cpu_mask);
+ if (!cpu_online(cpu)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (sel == 1)
+ thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
+ cpu_to_node(cpu),
+ "pseudo_lock_measure/%u",
+ cpu);
+ else if (sel == 2)
+ thread = kthread_create_on_node(measure_cycles_perf_fn, plr,
+ cpu_to_node(cpu),
+ "pseudo_lock_measure/%u",
+ cpu);
+ else
+ goto out;
+
+ if (IS_ERR(thread)) {
+ ret = PTR_ERR(thread);
+ goto out;
+ }
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+
+ ret = wait_event_interruptible(plr->lock_thread_wq,
+ plr->thread_done == 1);
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+
+out:
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+ return ret;
+}
+
+static ssize_t pseudo_lock_measure_trigger(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rdtgroup *rdtgrp = file->private_data;
+ size_t buf_size;
+ char buf[32];
+ int ret;
+ int sel;
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ ret = kstrtoint(buf, 10, &sel);
+ if (ret == 0) {
+ if (sel != 1)
+ return -EINVAL;
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (ret)
+ return ret;
+ ret = pseudo_lock_measure_cycles(rdtgrp, sel);
+ if (ret == 0)
+ ret = count;
+ debugfs_file_put(file->f_path.dentry);
+ }
+
+ return ret;
+}
+
+static const struct file_operations pseudo_measure_fops = {
+ .write = pseudo_lock_measure_trigger,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+/**
+ * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
+ * @rdtgrp: resource group to which pseudo-lock region belongs
+ *
+ * Called when a resource group in the pseudo-locksetup mode receives a
+ * valid schemata that should be pseudo-locked. Since the resource group is
+ * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
+ * allocated and initialized with the essential information. If a failure
+ * occurs the resource group remains in the pseudo-locksetup mode with the
+ * &struct pseudo_lock_region associated with it, but cleared from all
+ * information and ready for the user to re-attempt pseudo-locking by
+ * writing the schemata again.
+ *
+ * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
+ * on failure. Descriptive error will be written to last_cmd_status buffer.
+ */
+int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
+{
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+ struct task_struct *thread;
+ unsigned int new_minor;
+ struct device *dev;
+ int ret;
+
+ ret = pseudo_lock_region_alloc(plr);
+ if (ret < 0)
+ return ret;
+
+ ret = pseudo_lock_cstates_constrain(plr);
+ if (ret < 0) {
+ ret = -EINVAL;
+ goto out_region;
+ }
+
+ plr->thread_done = 0;
+
+ thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp,
+ cpu_to_node(plr->cpu),
+ "pseudo_lock/%u", plr->cpu);
+ if (IS_ERR(thread)) {
+ ret = PTR_ERR(thread);
+ rdt_last_cmd_printf("locking thread returned error %d\n", ret);
+ goto out_cstates;
+ }
+
+ kthread_bind(thread, plr->cpu);
+ wake_up_process(thread);
+
+ ret = wait_event_interruptible(plr->lock_thread_wq,
+ plr->thread_done == 1);
+ if (ret < 0) {
+ /*
+ * If the thread does not get on the CPU for whatever
+ * reason and the process which sets up the region is
+ * interrupted then this will leave the thread in runnable
+ * state and once it gets on the CPU it will derefence
+ * the cleared, but not freed, plr struct resulting in an
+ * empty pseudo-locking loop.
+ */
+ rdt_last_cmd_puts("locking thread interrupted\n");
+ goto out_cstates;
+ }
+
+ ret = pseudo_lock_minor_get(&new_minor);
+ if (ret < 0) {
+ rdt_last_cmd_puts("unable to obtain a new minor number\n");
+ goto out_cstates;
+ }
+
+ /*
+ * Unlock access but do not release the reference. The
+ * pseudo-locked region will still be here on return.
+ *
+ * The mutex has to be released temporarily to avoid a potential
+ * deadlock with the mm->mmap_sem semaphore which is obtained in
+ * the device_create() and debugfs_create_dir() callpath below
+ * as well as before the mmap() callback is called.
+ */
+ mutex_unlock(&rdtgroup_mutex);
+
+ if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
+ plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
+ debugfs_resctrl);
+ if (!IS_ERR_OR_NULL(plr->debugfs_dir))
+ debugfs_create_file("pseudo_lock_measure", 0200,
+ plr->debugfs_dir, rdtgrp,
+ &pseudo_measure_fops);
+ }
+
+ dev = device_create(pseudo_lock_class, NULL,
+ MKDEV(pseudo_lock_major, new_minor),
+ rdtgrp, "%s", rdtgrp->kn->name);
+
+ mutex_lock(&rdtgroup_mutex);
+
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ rdt_last_cmd_printf("failed to create character device: %d\n",
+ ret);
+ goto out_debugfs;
+ }
+
+ /* We released the mutex - check if group was removed while we did so */
+ if (rdtgrp->flags & RDT_DELETED) {
+ ret = -ENODEV;
+ goto out_device;
+ }
+
+ plr->minor = new_minor;
+
+ rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
+ closid_free(rdtgrp->closid);
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
+ rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
+
+ ret = 0;
+ goto out;
+
+out_device:
+ device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
+out_debugfs:
+ debugfs_remove_recursive(plr->debugfs_dir);
+ pseudo_lock_minor_release(new_minor);
+out_cstates:
+ pseudo_lock_cstates_relax(plr);
+out_region:
+ pseudo_lock_region_clear(plr);
+out:
+ return ret;
+}
+
+/**
+ * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
+ * @rdtgrp: resource group to which the pseudo-locked region belongs
+ *
+ * The removal of a pseudo-locked region can be initiated when the resource
+ * group is removed from user space via a "rmdir" from userspace or the
+ * unmount of the resctrl filesystem. On removal the resource group does
+ * not go back to pseudo-locksetup mode before it is removed, instead it is
+ * removed directly. There is thus assymmetry with the creation where the
+ * &struct pseudo_lock_region is removed here while it was not created in
+ * rdtgroup_pseudo_lock_create().
+ *
+ * Return: void
+ */
+void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
+{
+ struct pseudo_lock_region *plr = rdtgrp->plr;
+
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ /*
+ * Default group cannot be a pseudo-locked region so we can
+ * free closid here.
+ */
+ closid_free(rdtgrp->closid);
+ goto free;
+ }
+
+ pseudo_lock_cstates_relax(plr);
+ debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
+ device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
+ pseudo_lock_minor_release(plr->minor);
+
+free:
+ pseudo_lock_free(rdtgrp);
+}
+
+static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
+{
+ struct rdtgroup *rdtgrp;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ rdtgrp = region_find_by_minor(iminor(inode));
+ if (!rdtgrp) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+
+ filp->private_data = rdtgrp;
+ atomic_inc(&rdtgrp->waitcount);
+ /* Perform a non-seekable open - llseek is not supported */
+ filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+
+ mutex_unlock(&rdtgroup_mutex);
+
+ return 0;
+}
+
+static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
+{
+ struct rdtgroup *rdtgrp;
+
+ mutex_lock(&rdtgroup_mutex);
+ rdtgrp = filp->private_data;
+ WARN_ON(!rdtgrp);
+ if (!rdtgrp) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+ filp->private_data = NULL;
+ atomic_dec(&rdtgrp->waitcount);
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
+static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
+{
+ /* Not supported */
+ return -EINVAL;
+}
+
+static const struct vm_operations_struct pseudo_mmap_ops = {
+ .mremap = pseudo_lock_dev_mremap,
+};
+
+static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ struct pseudo_lock_region *plr;
+ struct rdtgroup *rdtgrp;
+ unsigned long physical;
+ unsigned long psize;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ rdtgrp = filp->private_data;
+ WARN_ON(!rdtgrp);
+ if (!rdtgrp) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+
+ plr = rdtgrp->plr;
+
+ /*
+ * Task is required to run with affinity to the cpus associated
+ * with the pseudo-locked region. If this is not the case the task
+ * may be scheduled elsewhere and invalidate entries in the
+ * pseudo-locked region.
+ */
+ if (!cpumask_subset(&current->cpus_allowed, &plr->d->cpu_mask)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -EINVAL;
+ }
+
+ physical = __pa(plr->kmem) >> PAGE_SHIFT;
+ psize = plr->size - off;
+
+ if (off > plr->size) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENOSPC;
+ }
+
+ /*
+ * Ensure changes are carried directly to the memory being mapped,
+ * do not allow copy-on-write mapping.
+ */
+ if (!(vma->vm_flags & VM_SHARED)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -EINVAL;
+ }
+
+ if (vsize > psize) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENOSPC;
+ }
+
+ memset(plr->kmem + off, 0, vsize);
+
+ if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
+ vsize, vma->vm_page_prot)) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -EAGAIN;
+ }
+ vma->vm_ops = &pseudo_mmap_ops;
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
+static const struct file_operations pseudo_lock_dev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = NULL,
+ .write = NULL,
+ .open = pseudo_lock_dev_open,
+ .release = pseudo_lock_dev_release,
+ .mmap = pseudo_lock_dev_mmap,
+};
+
+static char *pseudo_lock_devnode(struct device *dev, umode_t *mode)
+{
+ struct rdtgroup *rdtgrp;
+
+ rdtgrp = dev_get_drvdata(dev);
+ if (mode)
+ *mode = 0600;
+ return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
+}
+
+int rdt_pseudo_lock_init(void)
+{
+ int ret;
+
+ ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
+ if (ret < 0)
+ return ret;
+
+ pseudo_lock_major = ret;
+
+ pseudo_lock_class = class_create(THIS_MODULE, "pseudo_lock");
+ if (IS_ERR(pseudo_lock_class)) {
+ ret = PTR_ERR(pseudo_lock_class);
+ unregister_chrdev(pseudo_lock_major, "pseudo_lock");
+ return ret;
+ }
+
+ pseudo_lock_class->devnode = pseudo_lock_devnode;
+ return 0;
+}
+
+void rdt_pseudo_lock_release(void)
+{
+ class_destroy(pseudo_lock_class);
+ pseudo_lock_class = NULL;
+ unregister_chrdev(pseudo_lock_major, "pseudo_lock");
+ pseudo_lock_major = 0;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h
new file mode 100644
index 000000000000..2c041e6d9f05
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM resctrl
+
+#if !defined(_TRACE_PSEUDO_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PSEUDO_LOCK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(pseudo_lock_mem_latency,
+ TP_PROTO(u32 latency),
+ TP_ARGS(latency),
+ TP_STRUCT__entry(__field(u32, latency)),
+ TP_fast_assign(__entry->latency = latency),
+ TP_printk("latency=%u", __entry->latency)
+ );
+
+TRACE_EVENT(pseudo_lock_l2,
+ TP_PROTO(u64 l2_hits, u64 l2_miss),
+ TP_ARGS(l2_hits, l2_miss),
+ TP_STRUCT__entry(__field(u64, l2_hits)
+ __field(u64, l2_miss)),
+ TP_fast_assign(__entry->l2_hits = l2_hits;
+ __entry->l2_miss = l2_miss;),
+ TP_printk("hits=%llu miss=%llu",
+ __entry->l2_hits, __entry->l2_miss));
+
+TRACE_EVENT(pseudo_lock_l3,
+ TP_PROTO(u64 l3_hits, u64 l3_miss),
+ TP_ARGS(l3_hits, l3_miss),
+ TP_STRUCT__entry(__field(u64, l3_hits)
+ __field(u64, l3_miss)),
+ TP_fast_assign(__entry->l3_hits = l3_hits;
+ __entry->l3_miss = l3_miss;),
+ TP_printk("hits=%llu miss=%llu",
+ __entry->l3_hits, __entry->l3_miss));
+
+#endif /* _TRACE_PSEUDO_LOCK_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event
+#include <trace/define_trace.h>
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 749856a2e736..b799c00bef09 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -20,7 +20,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cacheinfo.h>
#include <linux/cpu.h>
+#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kernfs.h>
@@ -55,6 +57,8 @@ static struct kernfs_node *kn_mondata;
static struct seq_buf last_cmd_status;
static char last_cmd_status_buf[512];
+struct dentry *debugfs_resctrl;
+
void rdt_last_cmd_clear(void)
{
lockdep_assert_held(&rdtgroup_mutex);
@@ -121,11 +125,65 @@ static int closid_alloc(void)
return closid;
}
-static void closid_free(int closid)
+void closid_free(int closid)
{
closid_free_map |= 1 << closid;
}
+/**
+ * closid_allocated - test if provided closid is in use
+ * @closid: closid to be tested
+ *
+ * Return: true if @closid is currently associated with a resource group,
+ * false if @closid is free
+ */
+static bool closid_allocated(unsigned int closid)
+{
+ return (closid_free_map & (1 << closid)) == 0;
+}
+
+/**
+ * rdtgroup_mode_by_closid - Return mode of resource group with closid
+ * @closid: closid if the resource group
+ *
+ * Each resource group is associated with a @closid. Here the mode
+ * of a resource group can be queried by searching for it using its closid.
+ *
+ * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
+ */
+enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
+{
+ struct rdtgroup *rdtgrp;
+
+ list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+ if (rdtgrp->closid == closid)
+ return rdtgrp->mode;
+ }
+
+ return RDT_NUM_MODES;
+}
+
+static const char * const rdt_mode_str[] = {
+ [RDT_MODE_SHAREABLE] = "shareable",
+ [RDT_MODE_EXCLUSIVE] = "exclusive",
+ [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
+ [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
+};
+
+/**
+ * rdtgroup_mode_str - Return the string representation of mode
+ * @mode: the resource group mode as &enum rdtgroup_mode
+ *
+ * Return: string representation of valid mode, "unknown" otherwise
+ */
+static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
+{
+ if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
+ return "unknown";
+
+ return rdt_mode_str[mode];
+}
+
/* set uid and gid of rdtgroup dirs and files to that of the creator */
static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
{
@@ -146,6 +204,7 @@ static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
int ret;
kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
0, rft->kf_ops, rft, NULL, NULL);
if (IS_ERR(kn))
return PTR_ERR(kn);
@@ -207,8 +266,12 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) {
- seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
- cpumask_pr_args(&rdtgrp->cpu_mask));
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
+ cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
+ else
+ seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
+ cpumask_pr_args(&rdtgrp->cpu_mask));
} else {
ret = -ENOENT;
}
@@ -394,6 +457,13 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
goto unlock;
}
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("pseudo-locking in progress\n");
+ goto unlock;
+ }
+
if (is_cpu_list(of))
ret = cpulist_parse(buf, newmask);
else
@@ -509,6 +579,32 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
return ret;
}
+/**
+ * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
+ * @r: Resource group
+ *
+ * Return: 1 if tasks have been assigned to @r, 0 otherwise
+ */
+int rdtgroup_tasks_assigned(struct rdtgroup *r)
+{
+ struct task_struct *p, *t;
+ int ret = 0;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ rcu_read_lock();
+ for_each_process_thread(p, t) {
+ if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
+ (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) {
+ ret = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static int rdtgroup_task_write_permission(struct task_struct *task,
struct kernfs_open_file *of)
{
@@ -570,13 +666,22 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
rdt_last_cmd_clear();
- if (rdtgrp)
- ret = rdtgroup_move_task(pid, rdtgrp, of);
- else
- ret = -ENOENT;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("pseudo-locking in progress\n");
+ goto unlock;
+ }
+ ret = rdtgroup_move_task(pid, rdtgrp, of);
+
+unlock:
rdtgroup_kn_unlock(of->kn);
return ret ?: nbytes;
@@ -662,6 +767,94 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
return 0;
}
+/**
+ * rdt_bit_usage_show - Display current usage of resources
+ *
+ * A domain is a shared resource that can now be allocated differently. Here
+ * we display the current regions of the domain as an annotated bitmask.
+ * For each domain of this resource its allocation bitmask
+ * is annotated as below to indicate the current usage of the corresponding bit:
+ * 0 - currently unused
+ * X - currently available for sharing and used by software and hardware
+ * H - currently used by hardware only but available for software use
+ * S - currently used and shareable by software only
+ * E - currently used exclusively by one resource group
+ * P - currently pseudo-locked by one resource group
+ */
+static int rdt_bit_usage_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+ u32 sw_shareable = 0, hw_shareable = 0;
+ u32 exclusive = 0, pseudo_locked = 0;
+ struct rdt_domain *dom;
+ int i, hwb, swb, excl, psl;
+ enum rdtgrp_mode mode;
+ bool sep = false;
+ u32 *ctrl;
+
+ mutex_lock(&rdtgroup_mutex);
+ hw_shareable = r->cache.shareable_bits;
+ list_for_each_entry(dom, &r->domains, list) {
+ if (sep)
+ seq_putc(seq, ';');
+ ctrl = dom->ctrl_val;
+ sw_shareable = 0;
+ exclusive = 0;
+ seq_printf(seq, "%d=", dom->id);
+ for (i = 0; i < r->num_closid; i++, ctrl++) {
+ if (!closid_allocated(i))
+ continue;
+ mode = rdtgroup_mode_by_closid(i);
+ switch (mode) {
+ case RDT_MODE_SHAREABLE:
+ sw_shareable |= *ctrl;
+ break;
+ case RDT_MODE_EXCLUSIVE:
+ exclusive |= *ctrl;
+ break;
+ case RDT_MODE_PSEUDO_LOCKSETUP:
+ /*
+ * RDT_MODE_PSEUDO_LOCKSETUP is possible
+ * here but not included since the CBM
+ * associated with this CLOSID in this mode
+ * is not initialized and no task or cpu can be
+ * assigned this CLOSID.
+ */
+ break;
+ case RDT_MODE_PSEUDO_LOCKED:
+ case RDT_NUM_MODES:
+ WARN(1,
+ "invalid mode for closid %d\n", i);
+ break;
+ }
+ }
+ for (i = r->cache.cbm_len - 1; i >= 0; i--) {
+ pseudo_locked = dom->plr ? dom->plr->cbm : 0;
+ hwb = test_bit(i, (unsigned long *)&hw_shareable);
+ swb = test_bit(i, (unsigned long *)&sw_shareable);
+ excl = test_bit(i, (unsigned long *)&exclusive);
+ psl = test_bit(i, (unsigned long *)&pseudo_locked);
+ if (hwb && swb)
+ seq_putc(seq, 'X');
+ else if (hwb && !swb)
+ seq_putc(seq, 'H');
+ else if (!hwb && swb)
+ seq_putc(seq, 'S');
+ else if (excl)
+ seq_putc(seq, 'E');
+ else if (psl)
+ seq_putc(seq, 'P');
+ else /* Unused bits remain */
+ seq_putc(seq, '0');
+ }
+ sep = true;
+ }
+ seq_putc(seq, '\n');
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
static int rdt_min_bw_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
@@ -740,6 +933,269 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
return nbytes;
}
+/*
+ * rdtgroup_mode_show - Display mode of this resource group
+ */
+static int rdtgroup_mode_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
+
+ rdtgroup_kn_unlock(of->kn);
+ return 0;
+}
+
+/**
+ * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
+ * @r: Resource to which domain instance @d belongs.
+ * @d: The domain instance for which @closid is being tested.
+ * @cbm: Capacity bitmask being tested.
+ * @closid: Intended closid for @cbm.
+ * @exclusive: Only check if overlaps with exclusive resource groups
+ *
+ * Checks if provided @cbm intended to be used for @closid on domain
+ * @d overlaps with any other closids or other hardware usage associated
+ * with this domain. If @exclusive is true then only overlaps with
+ * resource groups in exclusive mode will be considered. If @exclusive
+ * is false then overlaps with any resource group or hardware entities
+ * will be considered.
+ *
+ * Return: false if CBM does not overlap, true if it does.
+ */
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+ u32 _cbm, int closid, bool exclusive)
+{
+ unsigned long *cbm = (unsigned long *)&_cbm;
+ unsigned long *ctrl_b;
+ enum rdtgrp_mode mode;
+ u32 *ctrl;
+ int i;
+
+ /* Check for any overlap with regions used by hardware directly */
+ if (!exclusive) {
+ if (bitmap_intersects(cbm,
+ (unsigned long *)&r->cache.shareable_bits,
+ r->cache.cbm_len))
+ return true;
+ }
+
+ /* Check for overlap with other resource groups */
+ ctrl = d->ctrl_val;
+ for (i = 0; i < r->num_closid; i++, ctrl++) {
+ ctrl_b = (unsigned long *)ctrl;
+ mode = rdtgroup_mode_by_closid(i);
+ if (closid_allocated(i) && i != closid &&
+ mode != RDT_MODE_PSEUDO_LOCKSETUP) {
+ if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+ if (exclusive) {
+ if (mode == RDT_MODE_EXCLUSIVE)
+ return true;
+ continue;
+ }
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
+ *
+ * An exclusive resource group implies that there should be no sharing of
+ * its allocated resources. At the time this group is considered to be
+ * exclusive this test can determine if its current schemata supports this
+ * setting by testing for overlap with all other resource groups.
+ *
+ * Return: true if resource group can be exclusive, false if there is overlap
+ * with allocations of other resource groups and thus this resource group
+ * cannot be exclusive.
+ */
+static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
+{
+ int closid = rdtgrp->closid;
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d, &r->domains, list) {
+ if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
+ rdtgrp->closid, false))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * rdtgroup_mode_write - Modify the resource group's mode
+ *
+ */
+static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct rdtgroup *rdtgrp;
+ enum rdtgrp_mode mode;
+ int ret = 0;
+
+ /* Valid input requires a trailing newline */
+ if (nbytes == 0 || buf[nbytes - 1] != '\n')
+ return -EINVAL;
+ buf[nbytes - 1] = '\0';
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ rdt_last_cmd_clear();
+
+ mode = rdtgrp->mode;
+
+ if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
+ (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
+ (!strcmp(buf, "pseudo-locksetup") &&
+ mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
+ (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
+ goto out;
+
+ if (mode == RDT_MODE_PSEUDO_LOCKED) {
+ rdt_last_cmd_printf("cannot change pseudo-locked group\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!strcmp(buf, "shareable")) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = rdtgroup_locksetup_exit(rdtgrp);
+ if (ret)
+ goto out;
+ }
+ rdtgrp->mode = RDT_MODE_SHAREABLE;
+ } else if (!strcmp(buf, "exclusive")) {
+ if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
+ rdt_last_cmd_printf("schemata overlaps\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ ret = rdtgroup_locksetup_exit(rdtgrp);
+ if (ret)
+ goto out;
+ }
+ rdtgrp->mode = RDT_MODE_EXCLUSIVE;
+ } else if (!strcmp(buf, "pseudo-locksetup")) {
+ ret = rdtgroup_locksetup_enter(rdtgrp);
+ if (ret)
+ goto out;
+ rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
+ } else {
+ rdt_last_cmd_printf("unknown/unsupported mode\n");
+ ret = -EINVAL;
+ }
+
+out:
+ rdtgroup_kn_unlock(of->kn);
+ return ret ?: nbytes;
+}
+
+/**
+ * rdtgroup_cbm_to_size - Translate CBM to size in bytes
+ * @r: RDT resource to which @d belongs.
+ * @d: RDT domain instance.
+ * @cbm: bitmask for which the size should be computed.
+ *
+ * The bitmask provided associated with the RDT domain instance @d will be
+ * translated into how many bytes it represents. The size in bytes is
+ * computed by first dividing the total cache size by the CBM length to
+ * determine how many bytes each bit in the bitmask represents. The result
+ * is multiplied with the number of bits set in the bitmask.
+ */
+unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
+ struct rdt_domain *d, u32 cbm)
+{
+ struct cpu_cacheinfo *ci;
+ unsigned int size = 0;
+ int num_b, i;
+
+ num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+ ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == r->cache_level) {
+ size = ci->info_list[i].size / r->cache.cbm_len * num_b;
+ break;
+ }
+ }
+
+ return size;
+}
+
+/**
+ * rdtgroup_size_show - Display size in bytes of allocated regions
+ *
+ * The "size" file mirrors the layout of the "schemata" file, printing the
+ * size in bytes of each region instead of the capacity bitmask.
+ *
+ */
+static int rdtgroup_size_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+ unsigned int size;
+ bool sep = false;
+ u32 cbm;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
+ size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+ rdtgrp->plr->d,
+ rdtgrp->plr->cbm);
+ seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+ goto out;
+ }
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ seq_printf(s, "%*s:", max_name_width, r->name);
+ list_for_each_entry(d, &r->domains, list) {
+ if (sep)
+ seq_putc(s, ';');
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
+ size = 0;
+ } else {
+ cbm = d->ctrl_val[rdtgrp->closid];
+ size = rdtgroup_cbm_to_size(r, d, cbm);
+ }
+ seq_printf(s, "%d=%u", d->id, size);
+ sep = true;
+ }
+ seq_putc(s, '\n');
+ }
+
+out:
+ rdtgroup_kn_unlock(of->kn);
+
+ return 0;
+}
+
/* rdtgroup information files for one cache resource. */
static struct rftype res_common_files[] = {
{
@@ -792,6 +1248,13 @@ static struct rftype res_common_files[] = {
.fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
},
{
+ .name = "bit_usage",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_bit_usage_show,
+ .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
+ },
+ {
.name = "min_bandwidth",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
@@ -853,6 +1316,22 @@ static struct rftype res_common_files[] = {
.seq_show = rdtgroup_schemata_show,
.fflags = RF_CTRL_BASE,
},
+ {
+ .name = "mode",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_mode_write,
+ .seq_show = rdtgroup_mode_show,
+ .fflags = RF_CTRL_BASE,
+ },
+ {
+ .name = "size",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdtgroup_size_show,
+ .fflags = RF_CTRL_BASE,
+ },
+
};
static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
@@ -883,6 +1362,103 @@ error:
return ret;
}
+/**
+ * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
+ * @r: The resource group with which the file is associated.
+ * @name: Name of the file
+ *
+ * The permissions of named resctrl file, directory, or link are modified
+ * to not allow read, write, or execute by any user.
+ *
+ * WARNING: This function is intended to communicate to the user that the
+ * resctrl file has been locked down - that it is not relevant to the
+ * particular state the system finds itself in. It should not be relied
+ * on to protect from user access because after the file's permissions
+ * are restricted the user can still change the permissions using chmod
+ * from the command line.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
+{
+ struct iattr iattr = {.ia_valid = ATTR_MODE,};
+ struct kernfs_node *kn;
+ int ret = 0;
+
+ kn = kernfs_find_and_get_ns(r->kn, name, NULL);
+ if (!kn)
+ return -ENOENT;
+
+ switch (kernfs_type(kn)) {
+ case KERNFS_DIR:
+ iattr.ia_mode = S_IFDIR;
+ break;
+ case KERNFS_FILE:
+ iattr.ia_mode = S_IFREG;
+ break;
+ case KERNFS_LINK:
+ iattr.ia_mode = S_IFLNK;
+ break;
+ }
+
+ ret = kernfs_setattr(kn, &iattr);
+ kernfs_put(kn);
+ return ret;
+}
+
+/**
+ * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
+ * @r: The resource group with which the file is associated.
+ * @name: Name of the file
+ * @mask: Mask of permissions that should be restored
+ *
+ * Restore the permissions of the named file. If @name is a directory the
+ * permissions of its parent will be used.
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
+ umode_t mask)
+{
+ struct iattr iattr = {.ia_valid = ATTR_MODE,};
+ struct kernfs_node *kn, *parent;
+ struct rftype *rfts, *rft;
+ int ret, len;
+
+ rfts = res_common_files;
+ len = ARRAY_SIZE(res_common_files);
+
+ for (rft = rfts; rft < rfts + len; rft++) {
+ if (!strcmp(rft->name, name))
+ iattr.ia_mode = rft->mode & mask;
+ }
+
+ kn = kernfs_find_and_get_ns(r->kn, name, NULL);
+ if (!kn)
+ return -ENOENT;
+
+ switch (kernfs_type(kn)) {
+ case KERNFS_DIR:
+ parent = kernfs_get_parent(kn);
+ if (parent) {
+ iattr.ia_mode |= parent->mode;
+ kernfs_put(parent);
+ }
+ iattr.ia_mode |= S_IFDIR;
+ break;
+ case KERNFS_FILE:
+ iattr.ia_mode |= S_IFREG;
+ break;
+ case KERNFS_LINK:
+ iattr.ia_mode |= S_IFLNK;
+ break;
+ }
+
+ ret = kernfs_setattr(kn, &iattr);
+ kernfs_put(kn);
+ return ret;
+}
+
static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
unsigned long fflags)
{
@@ -1224,6 +1800,9 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
(rdtgrp->flags & RDT_DELETED)) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ rdtgroup_pseudo_lock_remove(rdtgrp);
kernfs_unbreak_active_protection(kn);
kernfs_put(rdtgrp->kn);
kfree(rdtgrp);
@@ -1289,10 +1868,16 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
rdtgroup_default.mon.mon_data_kn = kn_mondata;
}
+ ret = rdt_pseudo_lock_init();
+ if (ret) {
+ dentry = ERR_PTR(ret);
+ goto out_mondata;
+ }
+
dentry = kernfs_mount(fs_type, flags, rdt_root,
RDTGROUP_SUPER_MAGIC, NULL);
if (IS_ERR(dentry))
- goto out_mondata;
+ goto out_psl;
if (rdt_alloc_capable)
static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
@@ -1310,6 +1895,8 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
goto out;
+out_psl:
+ rdt_pseudo_lock_release();
out_mondata:
if (rdt_mon_capable)
kernfs_remove(kn_mondata);
@@ -1447,6 +2034,10 @@ static void rmdir_all_sub(void)
if (rdtgrp == &rdtgroup_default)
continue;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ rdtgroup_pseudo_lock_remove(rdtgrp);
+
/*
* Give any CPUs back to the default group. We cannot copy
* cpu_online_mask because a CPU might have executed the
@@ -1483,6 +2074,8 @@ static void rdt_kill_sb(struct super_block *sb)
reset_all_ctrls(r);
cdp_disable_all();
rmdir_all_sub();
+ rdt_pseudo_lock_release();
+ rdtgroup_default.mode = RDT_MODE_SHAREABLE;
static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
static_branch_disable_cpuslocked(&rdt_mon_enable_key);
static_branch_disable_cpuslocked(&rdt_enable_key);
@@ -1503,7 +2096,8 @@ static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
struct kernfs_node *kn;
int ret = 0;
- kn = __kernfs_create_file(parent_kn, name, 0444, 0,
+ kn = __kernfs_create_file(parent_kn, name, 0444,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
&kf_mondata_ops, priv, NULL, NULL);
if (IS_ERR(kn))
return PTR_ERR(kn);
@@ -1682,6 +2276,114 @@ out_destroy:
return ret;
}
+/**
+ * cbm_ensure_valid - Enforce validity on provided CBM
+ * @_val: Candidate CBM
+ * @r: RDT resource to which the CBM belongs
+ *
+ * The provided CBM represents all cache portions available for use. This
+ * may be represented by a bitmap that does not consist of contiguous ones
+ * and thus be an invalid CBM.
+ * Here the provided CBM is forced to be a valid CBM by only considering
+ * the first set of contiguous bits as valid and clearing all bits.
+ * The intention here is to provide a valid default CBM with which a new
+ * resource group is initialized. The user can follow this with a
+ * modification to the CBM if the default does not satisfy the
+ * requirements.
+ */
+static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
+{
+ /*
+ * Convert the u32 _val to an unsigned long required by all the bit
+ * operations within this function. No more than 32 bits of this
+ * converted value can be accessed because all bit operations are
+ * additionally provided with cbm_len that is initialized during
+ * hardware enumeration using five bits from the EAX register and
+ * thus never can exceed 32 bits.
+ */
+ unsigned long *val = (unsigned long *)_val;
+ unsigned int cbm_len = r->cache.cbm_len;
+ unsigned long first_bit, zero_bit;
+
+ if (*val == 0)
+ return;
+
+ first_bit = find_first_bit(val, cbm_len);
+ zero_bit = find_next_zero_bit(val, cbm_len, first_bit);
+
+ /* Clear any remaining bits to ensure contiguous region */
+ bitmap_clear(val, zero_bit, cbm_len - zero_bit);
+}
+
+/**
+ * rdtgroup_init_alloc - Initialize the new RDT group's allocations
+ *
+ * A new RDT group is being created on an allocation capable (CAT)
+ * supporting system. Set this group up to start off with all usable
+ * allocations. That is, all shareable and unused bits.
+ *
+ * All-zero CBM is invalid. If there are no more shareable bits available
+ * on any domain then the entire allocation will fail.
+ */
+static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+{
+ u32 used_b = 0, unused_b = 0;
+ u32 closid = rdtgrp->closid;
+ struct rdt_resource *r;
+ enum rdtgrp_mode mode;
+ struct rdt_domain *d;
+ int i, ret;
+ u32 *ctrl;
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d, &r->domains, list) {
+ d->have_new_ctrl = false;
+ d->new_ctrl = r->cache.shareable_bits;
+ used_b = r->cache.shareable_bits;
+ ctrl = d->ctrl_val;
+ for (i = 0; i < r->num_closid; i++, ctrl++) {
+ if (closid_allocated(i) && i != closid) {
+ mode = rdtgroup_mode_by_closid(i);
+ if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+ break;
+ used_b |= *ctrl;
+ if (mode == RDT_MODE_SHAREABLE)
+ d->new_ctrl |= *ctrl;
+ }
+ }
+ if (d->plr && d->plr->cbm > 0)
+ used_b |= d->plr->cbm;
+ unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+ unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+ d->new_ctrl |= unused_b;
+ /*
+ * Force the initial CBM to be valid, user can
+ * modify the CBM based on system availability.
+ */
+ cbm_ensure_valid(&d->new_ctrl, r);
+ if (bitmap_weight((unsigned long *) &d->new_ctrl,
+ r->cache.cbm_len) <
+ r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("no space on %s:%d\n",
+ r->name, d->id);
+ return -ENOSPC;
+ }
+ d->have_new_ctrl = true;
+ }
+ }
+
+ for_each_alloc_enabled_rdt_resource(r) {
+ ret = update_domains(r, rdtgrp->closid);
+ if (ret < 0) {
+ rdt_last_cmd_puts("failed to initialize allocations\n");
+ return ret;
+ }
+ rdtgrp->mode = RDT_MODE_SHAREABLE;
+ }
+
+ return 0;
+}
+
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
struct kernfs_node *prgrp_kn,
const char *name, umode_t mode,
@@ -1700,6 +2402,14 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
goto out_unlock;
}
+ if (rtype == RDTMON_GROUP &&
+ (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
+ ret = -EINVAL;
+ rdt_last_cmd_puts("pseudo-locking in progress\n");
+ goto out_unlock;
+ }
+
/* allocate the rdtgroup. */
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
if (!rdtgrp) {
@@ -1840,6 +2550,10 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
ret = 0;
rdtgrp->closid = closid;
+ ret = rdtgroup_init_alloc(rdtgrp);
+ if (ret < 0)
+ goto out_id_free;
+
list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
if (rdt_mon_capable) {
@@ -1850,15 +2564,16 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
if (ret) {
rdt_last_cmd_puts("kernfs subdir error\n");
- goto out_id_free;
+ goto out_del_list;
}
}
goto out_unlock;
+out_del_list:
+ list_del(&rdtgrp->rdtgroup_list);
out_id_free:
closid_free(closid);
- list_del(&rdtgrp->rdtgroup_list);
out_common_fail:
mkdir_rdt_prepare_clean(rdtgrp);
out_unlock:
@@ -1945,6 +2660,21 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
return 0;
}
+static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
+ struct rdtgroup *rdtgrp)
+{
+ rdtgrp->flags = RDT_DELETED;
+ list_del(&rdtgrp->rdtgroup_list);
+
+ /*
+ * one extra hold on this, will drop when we kfree(rdtgrp)
+ * in rdtgroup_kn_unlock()
+ */
+ kernfs_get(kn);
+ kernfs_remove(rdtgrp->kn);
+ return 0;
+}
+
static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
cpumask_var_t tmpmask)
{
@@ -1970,7 +2700,6 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
update_closid_rmid(tmpmask, NULL);
- rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid);
free_rmid(rdtgrp->mon.rmid);
@@ -1979,14 +2708,7 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
*/
free_all_child_rdtgrp(rdtgrp);
- list_del(&rdtgrp->rdtgroup_list);
-
- /*
- * one extra hold on this, will drop when we kfree(rdtgrp)
- * in rdtgroup_kn_unlock()
- */
- kernfs_get(kn);
- kernfs_remove(rdtgrp->kn);
+ rdtgroup_ctrl_remove(kn, rdtgrp);
return 0;
}
@@ -2014,13 +2736,19 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
* If the rdtgroup is a mon group and parent directory
* is a valid "mon_groups" directory, remove the mon group.
*/
- if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn)
- ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
- else if (rdtgrp->type == RDTMON_GROUP &&
- is_mon_groups(parent_kn, kn->name))
+ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ ret = rdtgroup_ctrl_remove(kn, rdtgrp);
+ } else {
+ ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
+ }
+ } else if (rdtgrp->type == RDTMON_GROUP &&
+ is_mon_groups(parent_kn, kn->name)) {
ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
- else
+ } else {
ret = -EPERM;
+ }
out:
rdtgroup_kn_unlock(kn);
@@ -2046,7 +2774,8 @@ static int __init rdtgroup_setup_root(void)
int ret;
rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
- KERNFS_ROOT_CREATE_DEACTIVATED,
+ KERNFS_ROOT_CREATE_DEACTIVATED |
+ KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
&rdtgroup_default);
if (IS_ERR(rdt_root))
return PTR_ERR(rdt_root);
@@ -2102,6 +2831,29 @@ int __init rdtgroup_init(void)
if (ret)
goto cleanup_mountpoint;
+ /*
+ * Adding the resctrl debugfs directory here may not be ideal since
+ * it would let the resctrl debugfs directory appear on the debugfs
+ * filesystem before the resctrl filesystem is mounted.
+ * It may also be ok since that would enable debugging of RDT before
+ * resctrl is mounted.
+ * The reason why the debugfs directory is created here and not in
+ * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and
+ * during the debugfs directory creation also &sb->s_type->i_mutex_key
+ * (the lockdep class of inode->i_rwsem). Other filesystem
+ * interactions (eg. SyS_getdents) have the lock ordering:
+ * &sb->s_type->i_mutex_key --> &mm->mmap_sem
+ * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
+ * is taken, thus creating dependency:
+ * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
+ * issues considering the other two lock dependencies.
+ * By creating the debugfs directory here we avoid a dependency
+ * that may cause deadlock (even though file operations cannot
+ * occur until the filesystem is mounted, but I do not know how to
+ * tell lockdep that).
+ */
+ debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
+
return 0;
cleanup_mountpoint:
@@ -2111,3 +2863,11 @@ cleanup_root:
return ret;
}
+
+void __exit rdtgroup_exit(void)
+{
+ debugfs_remove_recursive(debugfs_resctrl);
+ unregister_filesystem(&rdt_fs_type);
+ sysfs_remove_mount_point(fs_kobj, "resctrl");
+ kernfs_destroy_root(rdt_root);
+}
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8c50754c09c1..4b767284b7f5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -123,8 +123,8 @@ void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
- /* We hope get_seconds stays lockless */
- m->time = get_seconds();
+ /* need the internal __ version to avoid deadlocks */
+ m->time = __ktime_get_real_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
m->cpuid = cpuid_eax(1);
m->socketid = cpu_data(m->extcpu).phys_proc_id;
@@ -1104,6 +1104,101 @@ static void mce_unmap_kpfn(unsigned long pfn)
}
#endif
+
+/*
+ * Cases where we avoid rendezvous handler timeout:
+ * 1) If this CPU is offline.
+ *
+ * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
+ * skip those CPUs which remain looping in the 1st kernel - see
+ * crash_nmi_callback().
+ *
+ * Note: there still is a small window between kexec-ing and the new,
+ * kdump kernel establishing a new #MC handler where a broadcasted MCE
+ * might not get handled properly.
+ */
+static bool __mc_check_crashing_cpu(int cpu)
+{
+ if (cpu_is_offline(cpu) ||
+ (crashing_cpu != -1 && crashing_cpu != cpu)) {
+ u64 mcgstatus;
+
+ mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+ if (mcgstatus & MCG_STATUS_RIPV) {
+ mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void __mc_scan_banks(struct mce *m, struct mce *final,
+ unsigned long *toclear, unsigned long *valid_banks,
+ int no_way_out, int *worst)
+{
+ struct mca_config *cfg = &mca_cfg;
+ int severity, i;
+
+ for (i = 0; i < cfg->banks; i++) {
+ __clear_bit(i, toclear);
+ if (!test_bit(i, valid_banks))
+ continue;
+
+ if (!mce_banks[i].ctl)
+ continue;
+
+ m->misc = 0;
+ m->addr = 0;
+ m->bank = i;
+
+ m->status = mce_rdmsrl(msr_ops.status(i));
+ if (!(m->status & MCI_STATUS_VAL))
+ continue;
+
+ /*
+ * Corrected or non-signaled errors are handled by
+ * machine_check_poll(). Leave them alone, unless this panics.
+ */
+ if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
+ !no_way_out)
+ continue;
+
+ /* Set taint even when machine check was not enabled. */
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+ severity = mce_severity(m, cfg->tolerant, NULL, true);
+
+ /*
+ * When machine check was for corrected/deferred handler don't
+ * touch, unless we're panicking.
+ */
+ if ((severity == MCE_KEEP_SEVERITY ||
+ severity == MCE_UCNA_SEVERITY) && !no_way_out)
+ continue;
+
+ __set_bit(i, toclear);
+
+ /* Machine check event was not enabled. Clear, but ignore. */
+ if (severity == MCE_NO_SEVERITY)
+ continue;
+
+ mce_read_aux(m, i);
+
+ /* assuming valid severity level != 0 */
+ m->severity = severity;
+
+ mce_log(m);
+
+ if (severity > *worst) {
+ *final = *m;
+ *worst = severity;
+ }
+ }
+
+ /* mce_clear_state will clear *final, save locally for use later */
+ *m = *final;
+}
+
/*
* The actual machine check handler. This only handles real
* exceptions when something got corrupted coming in through int 18.
@@ -1118,68 +1213,45 @@ static void mce_unmap_kpfn(unsigned long pfn)
*/
void do_machine_check(struct pt_regs *regs, long error_code)
{
+ DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
+ DECLARE_BITMAP(toclear, MAX_NR_BANKS);
struct mca_config *cfg = &mca_cfg;
+ int cpu = smp_processor_id();
+ char *msg = "Unknown";
struct mce m, *final;
- int i;
int worst = 0;
- int severity;
/*
* Establish sequential order between the CPUs entering the machine
* check handler.
*/
int order = -1;
+
/*
* If no_way_out gets set, there is no safe way to recover from this
* MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
*/
int no_way_out = 0;
+
/*
* If kill_it gets set, there might be a way to recover from this
* error.
*/
int kill_it = 0;
- DECLARE_BITMAP(toclear, MAX_NR_BANKS);
- DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
- char *msg = "Unknown";
/*
* MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
* on Intel.
*/
int lmce = 1;
- int cpu = smp_processor_id();
- /*
- * Cases where we avoid rendezvous handler timeout:
- * 1) If this CPU is offline.
- *
- * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
- * skip those CPUs which remain looping in the 1st kernel - see
- * crash_nmi_callback().
- *
- * Note: there still is a small window between kexec-ing and the new,
- * kdump kernel establishing a new #MC handler where a broadcasted MCE
- * might not get handled properly.
- */
- if (cpu_is_offline(cpu) ||
- (crashing_cpu != -1 && crashing_cpu != cpu)) {
- u64 mcgstatus;
-
- mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
- if (mcgstatus & MCG_STATUS_RIPV) {
- mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
- return;
- }
- }
+ if (__mc_check_crashing_cpu(cpu))
+ return;
ist_enter(regs);
this_cpu_inc(mce_exception_count);
- if (!cfg->banks)
- goto out;
-
mce_gather_info(&m, regs);
m.tsc = rdtsc();
@@ -1220,67 +1292,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
order = mce_start(&no_way_out);
}
- for (i = 0; i < cfg->banks; i++) {
- __clear_bit(i, toclear);
- if (!test_bit(i, valid_banks))
- continue;
- if (!mce_banks[i].ctl)
- continue;
-
- m.misc = 0;
- m.addr = 0;
- m.bank = i;
-
- m.status = mce_rdmsrl(msr_ops.status(i));
- if ((m.status & MCI_STATUS_VAL) == 0)
- continue;
-
- /*
- * Non uncorrected or non signaled errors are handled by
- * machine_check_poll. Leave them alone, unless this panics.
- */
- if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
- !no_way_out)
- continue;
-
- /*
- * Set taint even when machine check was not enabled.
- */
- add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
- severity = mce_severity(&m, cfg->tolerant, NULL, true);
-
- /*
- * When machine check was for corrected/deferred handler don't
- * touch, unless we're panicing.
- */
- if ((severity == MCE_KEEP_SEVERITY ||
- severity == MCE_UCNA_SEVERITY) && !no_way_out)
- continue;
- __set_bit(i, toclear);
- if (severity == MCE_NO_SEVERITY) {
- /*
- * Machine check event was not enabled. Clear, but
- * ignore.
- */
- continue;
- }
-
- mce_read_aux(&m, i);
-
- /* assuming valid severity level != 0 */
- m.severity = severity;
-
- mce_log(&m);
-
- if (severity > worst) {
- *final = m;
- worst = severity;
- }
- }
-
- /* mce_clear_state will clear *final, save locally for use later */
- m = *final;
+ __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
if (!no_way_out)
mce_clear_state(toclear);
@@ -1319,7 +1331,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
if (worst > 0)
mce_report_event(regs);
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-out:
+
sync_core();
if (worst != MCE_AR_SEVERITY && !kill_it)
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 08286269fd24..b9bc8a1a584e 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -509,12 +509,20 @@ static struct platform_device *microcode_pdev;
static int check_online_cpus(void)
{
- if (num_online_cpus() == num_present_cpus())
- return 0;
+ unsigned int cpu;
- pr_err("Not all CPUs online, aborting microcode update.\n");
+ /*
+ * Make sure all CPUs are online. It's fine for SMT to be disabled if
+ * all the primary threads are still online.
+ */
+ for_each_present_cpu(cpu) {
+ if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
+ pr_err("Not all CPUs online, aborting microcode update.\n");
+ return -EINVAL;
+ }
+ }
- return -EINVAL;
+ return 0;
}
static atomic_t late_cpus_in;
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 81c0afb39d0a..71ca064e3794 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -22,18 +22,10 @@
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
-/*
- * Check for extended topology enumeration cpuid leaf 0xb and if it
- * exists, use it for populating initial_apicid and cpu topology
- * detection.
- */
-int detect_extended_topology(struct cpuinfo_x86 *c)
+int detect_extended_topology_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
- unsigned int eax, ebx, ecx, edx, sub_index;
- unsigned int ht_mask_width, core_plus_mask_width;
- unsigned int core_select_mask, core_level_siblings;
- static bool printed;
+ unsigned int eax, ebx, ecx, edx;
if (c->cpuid_level < 0xb)
return -1;
@@ -52,10 +44,30 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
* initial apic id, which also represents 32-bit extended x2apic id.
*/
c->initial_apicid = edx;
+ smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+#endif
+ return 0;
+}
+
+/*
+ * Check for extended topology enumeration cpuid leaf 0xb and if it
+ * exists, use it for populating initial_apicid and cpu topology
+ * detection.
+ */
+int detect_extended_topology(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ unsigned int eax, ebx, ecx, edx, sub_index;
+ unsigned int ht_mask_width, core_plus_mask_width;
+ unsigned int core_select_mask, core_level_siblings;
+
+ if (detect_extended_topology_early(c) < 0)
+ return -1;
/*
* Populate HT related information from sub-leaf level 0.
*/
+ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
@@ -86,15 +98,6 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
-
- if (!printed) {
- pr_info("CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- if (c->x86_max_cores > 1)
- pr_info("CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
- printed = 1;
- }
#endif
return 0;
}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 666a284116ac..9c8652974f8e 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -22,8 +22,6 @@
#include <asm/stacktrace.h>
#include <asm/unwind.h>
-#define OPCODE_BUFSIZE 64
-
int panic_on_unrecovered_nmi;
int panic_on_io_nmi;
static int die_counter;
@@ -93,26 +91,18 @@ static void printk_stack_address(unsigned long address, int reliable,
*/
void show_opcodes(u8 *rip, const char *loglvl)
{
- unsigned int code_prologue = OPCODE_BUFSIZE * 2 / 3;
+#define PROLOGUE_SIZE 42
+#define EPILOGUE_SIZE 21
+#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
u8 opcodes[OPCODE_BUFSIZE];
- u8 *ip;
- int i;
-
- printk("%sCode: ", loglvl);
-
- ip = (u8 *)rip - code_prologue;
- if (probe_kernel_read(opcodes, ip, OPCODE_BUFSIZE)) {
- pr_cont("Bad RIP value.\n");
- return;
- }
- for (i = 0; i < OPCODE_BUFSIZE; i++, ip++) {
- if (ip == rip)
- pr_cont("<%02x> ", opcodes[i]);
- else
- pr_cont("%02x ", opcodes[i]);
+ if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
+ printk("%sCode: Bad RIP value.\n", loglvl);
+ } else {
+ printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
+ __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
+ opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
}
- pr_cont("\n");
}
void show_ip(struct pt_regs *regs, const char *loglvl)
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index da5d8ac60062..50d5848bf22e 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -338,6 +338,18 @@ static resource_size_t __init gen3_stolen_base(int num, int slot, int func,
return bsm & INTEL_BSM_MASK;
}
+static resource_size_t __init gen11_stolen_base(int num, int slot, int func,
+ resource_size_t stolen_size)
+{
+ u64 bsm;
+
+ bsm = read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW0);
+ bsm &= INTEL_BSM_MASK;
+ bsm |= (u64)read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW1) << 32;
+
+ return bsm;
+}
+
static resource_size_t __init i830_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
@@ -498,6 +510,11 @@ static const struct intel_early_ops chv_early_ops __initconst = {
.stolen_size = chv_stolen_size,
};
+static const struct intel_early_ops gen11_early_ops __initconst = {
+ .stolen_base = gen11_stolen_base,
+ .stolen_size = gen9_stolen_size,
+};
+
static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_I830_IDS(&i830_early_ops),
INTEL_I845G_IDS(&i845_early_ops),
@@ -529,6 +546,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_CFL_IDS(&gen9_early_ops),
INTEL_GLK_IDS(&gen9_early_ops),
INTEL_CNL_IDS(&gen9_early_ops),
+ INTEL_ICL_11_IDS(&gen11_early_ops),
};
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index f92a6593de1e..2ea85b32421a 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -10,6 +10,7 @@
#include <asm/fpu/signal.h>
#include <asm/fpu/types.h>
#include <asm/traps.h>
+#include <asm/irq_regs.h>
#include <linux/hardirq.h>
#include <linux/pkeys.h>
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index abe6df15a8fb..30f9cb2c0b55 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -512,11 +512,18 @@ ENTRY(initial_code)
ENTRY(setup_once_ref)
.long setup_once
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#define PGD_ALIGN (2 * PAGE_SIZE)
+#define PTI_USER_PGD_FILL 1024
+#else
+#define PGD_ALIGN (PAGE_SIZE)
+#define PTI_USER_PGD_FILL 0
+#endif
/*
* BSS section
*/
__PAGE_ALIGNED_BSS
- .align PAGE_SIZE
+ .align PGD_ALIGN
#ifdef CONFIG_X86_PAE
.globl initial_pg_pmd
initial_pg_pmd:
@@ -526,14 +533,17 @@ initial_pg_pmd:
initial_page_table:
.fill 1024,4,0
#endif
+ .align PGD_ALIGN
initial_pg_fixmap:
.fill 1024,4,0
-.globl empty_zero_page
-empty_zero_page:
- .fill 4096,1,0
.globl swapper_pg_dir
+ .align PGD_ALIGN
swapper_pg_dir:
.fill 1024,4,0
+ .fill PTI_USER_PGD_FILL,4,0
+.globl empty_zero_page
+empty_zero_page:
+ .fill 4096,1,0
EXPORT_SYMBOL(empty_zero_page)
/*
@@ -542,7 +552,7 @@ EXPORT_SYMBOL(empty_zero_page)
#ifdef CONFIG_X86_PAE
__PAGE_ALIGNED_DATA
/* Page-aligned for the benefit of paravirt? */
- .align PAGE_SIZE
+ .align PGD_ALIGN
ENTRY(initial_page_table)
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 8344dd2f310a..15ebc2fc166e 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -235,7 +235,7 @@ ENTRY(secondary_startup_64)
* address given in m16:64.
*/
pushq $.Lafter_lret # put return address on stack for unwinder
- xorq %rbp, %rbp # clear frame pointer
+ xorl %ebp, %ebp # clear frame pointer
movq initial_code(%rip), %rax
pushq $__KERNEL_CS # set correct cs
pushq %rax # target address in negative space
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 346b24883911..b0acb22e5a46 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1,6 +1,7 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/errno.h>
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 8771766d46b6..34a5c1715148 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -169,28 +169,29 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
set_dr_addr_mask(0, i);
}
-/*
- * Check for virtual address in kernel space.
- */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+static int arch_bp_generic_len(int x86_len)
{
- unsigned int len;
- unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
- va = info->address;
- len = bp->attr.bp_len;
-
- /*
- * We don't need to worry about va + len - 1 overflowing:
- * we already require that va is aligned to a multiple of len.
- */
- return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+ switch (x86_len) {
+ case X86_BREAKPOINT_LEN_1:
+ return HW_BREAKPOINT_LEN_1;
+ case X86_BREAKPOINT_LEN_2:
+ return HW_BREAKPOINT_LEN_2;
+ case X86_BREAKPOINT_LEN_4:
+ return HW_BREAKPOINT_LEN_4;
+#ifdef CONFIG_X86_64
+ case X86_BREAKPOINT_LEN_8:
+ return HW_BREAKPOINT_LEN_8;
+#endif
+ default:
+ return -EINVAL;
+ }
}
int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type)
{
+ int len;
+
/* Type */
switch (x86_type) {
case X86_BREAKPOINT_EXECUTE:
@@ -211,42 +212,47 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
}
/* Len */
- switch (x86_len) {
- case X86_BREAKPOINT_LEN_1:
- *gen_len = HW_BREAKPOINT_LEN_1;
- break;
- case X86_BREAKPOINT_LEN_2:
- *gen_len = HW_BREAKPOINT_LEN_2;
- break;
- case X86_BREAKPOINT_LEN_4:
- *gen_len = HW_BREAKPOINT_LEN_4;
- break;
-#ifdef CONFIG_X86_64
- case X86_BREAKPOINT_LEN_8:
- *gen_len = HW_BREAKPOINT_LEN_8;
- break;
-#endif
- default:
+ len = arch_bp_generic_len(x86_len);
+ if (len < 0)
return -EINVAL;
- }
+ *gen_len = len;
return 0;
}
-
-static int arch_build_bp_info(struct perf_event *bp)
+/*
+ * Check for virtual address in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned long va;
+ int len;
- info->address = bp->attr.bp_addr;
+ va = hw->address;
+ len = arch_bp_generic_len(hw->len);
+ WARN_ON_ONCE(len < 0);
+
+ /*
+ * We don't need to worry about va + len - 1 overflowing:
+ * we already require that va is aligned to a multiple of len.
+ */
+ return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+}
+
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ hw->address = attr->bp_addr;
+ hw->mask = 0;
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_W:
- info->type = X86_BREAKPOINT_WRITE;
+ hw->type = X86_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
- info->type = X86_BREAKPOINT_RW;
+ hw->type = X86_BREAKPOINT_RW;
break;
case HW_BREAKPOINT_X:
/*
@@ -254,23 +260,23 @@ static int arch_build_bp_info(struct perf_event *bp)
* acceptable for kprobes. On non-kprobes kernels, we don't
* allow kernel breakpoints at all.
*/
- if (bp->attr.bp_addr >= TASK_SIZE_MAX) {
+ if (attr->bp_addr >= TASK_SIZE_MAX) {
#ifdef CONFIG_KPROBES
- if (within_kprobe_blacklist(bp->attr.bp_addr))
+ if (within_kprobe_blacklist(attr->bp_addr))
return -EINVAL;
#else
return -EINVAL;
#endif
}
- info->type = X86_BREAKPOINT_EXECUTE;
+ hw->type = X86_BREAKPOINT_EXECUTE;
/*
* x86 inst breakpoints need to have a specific undefined len.
* But we still need to check userspace is not trying to setup
* an unsupported length, to get a range breakpoint for example.
*/
- if (bp->attr.bp_len == sizeof(long)) {
- info->len = X86_BREAKPOINT_LEN_X;
+ if (attr->bp_len == sizeof(long)) {
+ hw->len = X86_BREAKPOINT_LEN_X;
return 0;
}
default:
@@ -278,28 +284,26 @@ static int arch_build_bp_info(struct perf_event *bp)
}
/* Len */
- info->mask = 0;
-
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->len = X86_BREAKPOINT_LEN_1;
+ hw->len = X86_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->len = X86_BREAKPOINT_LEN_2;
+ hw->len = X86_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->len = X86_BREAKPOINT_LEN_4;
+ hw->len = X86_BREAKPOINT_LEN_4;
break;
#ifdef CONFIG_X86_64
case HW_BREAKPOINT_LEN_8:
- info->len = X86_BREAKPOINT_LEN_8;
+ hw->len = X86_BREAKPOINT_LEN_8;
break;
#endif
default:
/* AMD range breakpoint */
- if (!is_power_of_2(bp->attr.bp_len))
+ if (!is_power_of_2(attr->bp_len))
return -EINVAL;
- if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
+ if (attr->bp_addr & (attr->bp_len - 1))
return -EINVAL;
if (!boot_cpu_has(X86_FEATURE_BPEXT))
@@ -312,8 +316,8 @@ static int arch_build_bp_info(struct perf_event *bp)
* breakpoints, then we'll have to check for kprobe-blacklisted
* addresses anywhere in the range.
*/
- info->mask = bp->attr.bp_len - 1;
- info->len = X86_BREAKPOINT_LEN_1;
+ hw->mask = attr->bp_len - 1;
+ hw->len = X86_BREAKPOINT_LEN_1;
}
return 0;
@@ -322,22 +326,23 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
int ret;
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
- switch (info->len) {
+ switch (hw->len) {
case X86_BREAKPOINT_LEN_1:
align = 0;
- if (info->mask)
- align = info->mask;
+ if (hw->mask)
+ align = hw->mask;
break;
case X86_BREAKPOINT_LEN_2:
align = 1;
@@ -358,7 +363,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
- if (info->address & align)
+ if (hw->address & align)
return -EINVAL;
return 0;
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 86c4439f9d74..519649ddf100 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/timex.h>
#include <linux/random.h>
#include <linux/init.h>
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 74383a3780dc..01adea278a71 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -8,6 +8,7 @@
#include <asm/traps.h>
#include <asm/proto.h>
#include <asm/desc.h>
+#include <asm/hw_irq.h>
struct idt_data {
unsigned int vector;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 328d027d829d..59b5f2ea7c2f 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -10,6 +10,7 @@
#include <linux/ftrace.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/irq.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index c1bdbd3d3232..95600a99ae93 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -11,6 +11,7 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index d86e344f5b3d..0469cd078db1 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -11,6 +11,7 @@
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/ftrace.h>
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 772196c1b8c4..a0693b71cfc1 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/timex.h>
#include <linux/random.h>
#include <linux/kprobes.h>
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index e56c95be2808..eeea935e9bb5 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -37,15 +37,18 @@ static void bug_at(unsigned char *ip, int line)
BUG();
}
-static void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type,
- void *(*poker)(void *, const void *, size_t),
- int init)
+static void __ref __jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ void *(*poker)(void *, const void *, size_t),
+ int init)
{
union jump_code_union code;
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
+ if (early_boot_irqs_disabled)
+ poker = text_poke_early;
+
if (type == JUMP_LABEL_JMP) {
if (init) {
/*
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 7326078eaa7a..278cd07228dd 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data)
static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{
return verify_pefile_signature(kernel, kernel_len,
- NULL,
+ VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
}
#endif
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index ae38dccf0c8f..2b949f4fd4d8 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -105,14 +105,4 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
}
#endif
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- return 0;
-}
-#endif
#endif
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 6f4d42377fe5..b0d1e81c96bb 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -66,8 +66,6 @@
#include "common.h"
-void jprobe_return_end(void);
-
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -395,8 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
- (u8 *) real;
if ((s64) (s32) newdisp != newdisp) {
pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
- pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
- src, real, insn->displacement.value);
return 0;
}
disp = (u8 *) dest + insn_offset_displacement(insn);
@@ -596,7 +592,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
* stepping.
*/
regs->ip = (unsigned long)p->ainsn.insn;
- preempt_enable_no_resched();
return;
}
#endif
@@ -640,8 +635,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
* Raise a BUG or we'll continue in an endless reentering loop
* and eventually a stack overflow.
*/
- printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
- p->addr);
+ pr_err("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
default:
@@ -669,12 +663,10 @@ int kprobe_int3_handler(struct pt_regs *regs)
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
/*
- * We don't want to be preempted for the entire
- * duration of kprobe processing. We conditionally
- * re-enable preemption at the end of this function,
- * and also in reenter_kprobe() and setup_singlestep().
+ * We don't want to be preempted for the entire duration of kprobe
+ * processing. Since int3 and debug trap disables irqs and we clear
+ * IF while singlestepping, it must be no preemptible.
*/
- preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe(addr);
@@ -690,13 +682,14 @@ int kprobe_int3_handler(struct pt_regs *regs)
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
- * pre-handler and it returned non-zero, it prepped
- * for calling the break_handler below on re-entry
- * for jprobe processing, so get out doing nothing
- * more here.
+ * pre-handler and it returned non-zero, that means
+ * user handler setup registers to exit to another
+ * instruction, we must skip the single stepping.
*/
if (!p->pre_handler || !p->pre_handler(p, regs))
setup_singlestep(p, regs, kcb, 0);
+ else
+ reset_current_kprobe();
return 1;
}
} else if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -710,18 +703,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
* the original instruction.
*/
regs->ip = (unsigned long)addr;
- preempt_enable_no_resched();
return 1;
- } else if (kprobe_running()) {
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- if (!skip_singlestep(p, regs, kcb))
- setup_singlestep(p, regs, kcb, 0);
- return 1;
- }
} /* else: not a kprobe fault; let the kernel handle it */
- preempt_enable_no_resched();
return 0;
}
NOKPROBE_SYMBOL(kprobe_int3_handler);
@@ -972,8 +956,6 @@ int kprobe_debug_handler(struct pt_regs *regs)
}
reset_current_kprobe();
out:
- preempt_enable_no_resched();
-
/*
* if somebody else is singlestepping across a probe point, flags
* will have TF set, in which case, continue the remaining processing
@@ -1020,7 +1002,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
- preempt_enable_no_resched();
} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
kcb->kprobe_status == KPROBE_HIT_SSDONE) {
/*
@@ -1083,93 +1064,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = stack_addr(regs);
- addr = (unsigned long)(kcb->jprobe_saved_sp);
-
- /*
- * As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
- * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
- * raw stack chunk with redzones:
- */
- __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
- regs->ip = (unsigned long)(jp->entry);
-
- /*
- * jprobes use jprobe_return() which skips the normal return
- * path of the function, and this messes up the accounting of the
- * function graph tracer to get messed up.
- *
- * Pause function graph tracing while performing the jprobe function.
- */
- pause_graph_tracing();
- return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- /* Unpoison stack redzones in the frames we are going to jump over. */
- kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
-
- asm volatile (
-#ifdef CONFIG_X86_64
- " xchg %%rbx,%%rsp \n"
-#else
- " xchgl %%ebx,%%esp \n"
-#endif
- " int3 \n"
- " .globl jprobe_return_end\n"
- " jprobe_return_end: \n"
- " nop \n"::"b"
- (kcb->jprobe_saved_sp):"memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-NOKPROBE_SYMBOL(jprobe_return_end);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- u8 *addr = (u8 *) (regs->ip - 1);
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- void *saved_sp = kcb->jprobe_saved_sp;
-
- if ((addr > (u8 *) jprobe_return) &&
- (addr < (u8 *) jprobe_return_end)) {
- if (stack_addr(regs) != saved_sp) {
- struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
- printk(KERN_ERR
- "current sp %p does not match saved sp %p\n",
- stack_addr(regs), saved_sp);
- printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
- show_regs(saved_regs);
- printk(KERN_ERR "Current registers\n");
- show_regs(regs);
- BUG();
- }
- /* It's OK to start function graph tracing again */
- unpause_graph_tracing();
- *regs = kcb->jprobe_saved_regs;
- __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
bool arch_within_kprobe_blacklist(unsigned long addr)
{
bool is_in_entry_trampoline_section = false;
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 8dc0161cec8f..ef819e19650b 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -25,36 +25,6 @@
#include "common.h"
-static nokprobe_inline
-void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb, unsigned long orig_ip)
-{
- /*
- * Emulate singlestep (and also recover regs->ip)
- * as if there is a 5byte nop
- */
- regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
- if (unlikely(p->post_handler)) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, regs, 0);
- }
- __this_cpu_write(current_kprobe, NULL);
- if (orig_ip)
- regs->ip = orig_ip;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- if (kprobe_ftrace(p)) {
- __skip_singlestep(p, regs, kcb, 0);
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
@@ -75,18 +45,25 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t);
- /* To emulate trap based kprobes, preempt_disable here */
- preempt_disable();
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
- __skip_singlestep(p, regs, kcb, orig_ip);
- preempt_enable_no_resched();
+ /*
+ * Emulate singlestep (and also recover regs->ip)
+ * as if there is a 5byte nop
+ */
+ regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ regs->ip = orig_ip;
}
/*
- * If pre_handler returns !0, it sets regs->ip and
- * resets current kprobe, and keep preempt count +1.
+ * If pre_handler returns !0, it changes regs->ip. We have to
+ * skip emulating post_handler.
*/
+ __this_cpu_write(current_kprobe, NULL);
}
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 203d398802a3..eaf02f2e7300 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -491,7 +491,6 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
if (!reenter)
reset_current_kprobe();
- preempt_enable_no_resched();
return 1;
}
return 0;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5b2300b818af..09aaabb2bbf1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -45,7 +45,6 @@
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
-#include <asm/kvm_guest.h>
static int kvmapf = 1;
@@ -66,15 +65,6 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
-static int kvmclock_vsyscall = 1;
-static int __init parse_no_kvmclock_vsyscall(char *arg)
-{
- kvmclock_vsyscall = 0;
- return 0;
-}
-
-early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
-
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
@@ -154,7 +144,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
for (;;) {
if (!n.halted)
- prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
if (hlist_unhashed(&n.link))
break;
@@ -188,7 +178,7 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n)
if (n->halted)
smp_send_reschedule(n->cpu);
else if (swq_has_sleeper(&n->wq))
- swake_up(&n->wq);
+ swake_up_one(&n->wq);
}
static void apf_task_wake_all(void)
@@ -560,9 +550,6 @@ static void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write);
- if (kvmclock_vsyscall)
- kvm_setup_vsyscall_timeinfo();
-
#ifdef CONFIG_SMP
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
@@ -628,6 +615,7 @@ const __initconst struct hypervisor_x86 x86_hyper_kvm = {
.name = "KVM",
.detect = kvm_detect,
.type = X86_HYPER_KVM,
+ .init.init_platform = kvmclock_init,
.init.guest_late_init = kvm_guest_init,
.init.x2apic_available = kvm_para_available,
};
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 3b8e7c13c614..1e6764648af3 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -23,30 +23,57 @@
#include <asm/apic.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
-#include <linux/memblock.h>
+#include <linux/cpuhotplug.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
#include <asm/kvmclock.h>
-static int kvmclock __ro_after_init = 1;
-static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
-static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
-static u64 kvm_sched_clock_offset;
+static int kvmclock __initdata = 1;
+static int kvmclock_vsyscall __initdata = 1;
+static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
+static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
+static u64 kvm_sched_clock_offset __ro_after_init;
-static int parse_no_kvmclock(char *arg)
+static int __init parse_no_kvmclock(char *arg)
{
kvmclock = 0;
return 0;
}
early_param("no-kvmclock", parse_no_kvmclock);
-/* The hypervisor will put information about time periodically here */
-static struct pvclock_vsyscall_time_info *hv_clock;
-static struct pvclock_wall_clock *wall_clock;
+static int __init parse_no_kvmclock_vsyscall(char *arg)
+{
+ kvmclock_vsyscall = 0;
+ return 0;
+}
+early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+
+/* Aligned to page sizes to match whats mapped via vsyscalls to userspace */
+#define HV_CLOCK_SIZE (sizeof(struct pvclock_vsyscall_time_info) * NR_CPUS)
+#define HVC_BOOT_ARRAY_SIZE \
+ (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
+
+static struct pvclock_vsyscall_time_info
+ hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
+static struct pvclock_wall_clock wall_clock;
+static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+
+static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
+{
+ return &this_cpu_read(hv_clock_per_cpu)->pvti;
+}
+
+static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
+{
+ return this_cpu_read(hv_clock_per_cpu);
+}
/*
* The wallclock is the time of day when we booted. Since then, some time may
@@ -55,21 +82,10 @@ static struct pvclock_wall_clock *wall_clock;
*/
static void kvm_get_wallclock(struct timespec64 *now)
{
- struct pvclock_vcpu_time_info *vcpu_time;
- int low, high;
- int cpu;
-
- low = (int)slow_virt_to_phys(wall_clock);
- high = ((u64)slow_virt_to_phys(wall_clock) >> 32);
-
- native_write_msr(msr_kvm_wall_clock, low, high);
-
- cpu = get_cpu();
-
- vcpu_time = &hv_clock[cpu].pvti;
- pvclock_read_wallclock(wall_clock, vcpu_time, now);
-
- put_cpu();
+ wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock));
+ preempt_disable();
+ pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now);
+ preempt_enable();
}
static int kvm_set_wallclock(const struct timespec64 *now)
@@ -79,14 +95,10 @@ static int kvm_set_wallclock(const struct timespec64 *now)
static u64 kvm_clock_read(void)
{
- struct pvclock_vcpu_time_info *src;
u64 ret;
- int cpu;
preempt_disable_notrace();
- cpu = smp_processor_id();
- src = &hv_clock[cpu].pvti;
- ret = pvclock_clocksource_read(src);
+ ret = pvclock_clocksource_read(this_cpu_pvti());
preempt_enable_notrace();
return ret;
}
@@ -112,11 +124,11 @@ static inline void kvm_sched_clock_init(bool stable)
kvm_sched_clock_offset = kvm_clock_read();
pv_time_ops.sched_clock = kvm_sched_clock_read;
- printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
- kvm_sched_clock_offset);
+ pr_info("kvm-clock: using sched offset of %llu cycles",
+ kvm_sched_clock_offset);
BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) >
- sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
+ sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time));
}
/*
@@ -130,19 +142,11 @@ static inline void kvm_sched_clock_init(bool stable)
*/
static unsigned long kvm_get_tsc_khz(void)
{
- struct pvclock_vcpu_time_info *src;
- int cpu;
- unsigned long tsc_khz;
-
- cpu = get_cpu();
- src = &hv_clock[cpu].pvti;
- tsc_khz = pvclock_tsc_khz(src);
- put_cpu();
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
- return tsc_khz;
+ return pvclock_tsc_khz(this_cpu_pvti());
}
-static void kvm_get_preset_lpj(void)
+static void __init kvm_get_preset_lpj(void)
{
unsigned long khz;
u64 lpj;
@@ -156,49 +160,40 @@ static void kvm_get_preset_lpj(void)
bool kvm_check_and_clear_guest_paused(void)
{
+ struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
bool ret = false;
- struct pvclock_vcpu_time_info *src;
- int cpu = smp_processor_id();
- if (!hv_clock)
+ if (!src)
return ret;
- src = &hv_clock[cpu].pvti;
- if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
- src->flags &= ~PVCLOCK_GUEST_STOPPED;
+ if ((src->pvti.flags & PVCLOCK_GUEST_STOPPED) != 0) {
+ src->pvti.flags &= ~PVCLOCK_GUEST_STOPPED;
pvclock_touch_watchdogs();
ret = true;
}
-
return ret;
}
struct clocksource kvm_clock = {
- .name = "kvm-clock",
- .read = kvm_clock_get_cycles,
- .rating = 400,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .name = "kvm-clock",
+ .read = kvm_clock_get_cycles,
+ .rating = 400,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
EXPORT_SYMBOL_GPL(kvm_clock);
-int kvm_register_clock(char *txt)
+static void kvm_register_clock(char *txt)
{
- int cpu = smp_processor_id();
- int low, high, ret;
- struct pvclock_vcpu_time_info *src;
-
- if (!hv_clock)
- return 0;
+ struct pvclock_vsyscall_time_info *src = this_cpu_hvclock();
+ u64 pa;
- src = &hv_clock[cpu].pvti;
- low = (int)slow_virt_to_phys(src) | 1;
- high = ((u64)slow_virt_to_phys(src) >> 32);
- ret = native_write_msr_safe(msr_kvm_system_time, low, high);
- printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
- cpu, high, low, txt);
+ if (!src)
+ return;
- return ret;
+ pa = slow_virt_to_phys(&src->pvti) | 0x01ULL;
+ wrmsrl(msr_kvm_system_time, pa);
+ pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
}
static void kvm_save_sched_clock_state(void)
@@ -213,11 +208,7 @@ static void kvm_restore_sched_clock_state(void)
#ifdef CONFIG_X86_LOCAL_APIC
static void kvm_setup_secondary_clock(void)
{
- /*
- * Now that the first cpu already had this clocksource initialized,
- * we shouldn't fail.
- */
- WARN_ON(kvm_register_clock("secondary cpu clock"));
+ kvm_register_clock("secondary cpu clock");
}
#endif
@@ -245,100 +236,84 @@ static void kvm_shutdown(void)
native_machine_shutdown();
}
-static phys_addr_t __init kvm_memblock_alloc(phys_addr_t size,
- phys_addr_t align)
+static int __init kvm_setup_vsyscall_timeinfo(void)
{
- phys_addr_t mem;
+#ifdef CONFIG_X86_64
+ u8 flags;
- mem = memblock_alloc(size, align);
- if (!mem)
+ if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
return 0;
- if (sev_active()) {
- if (early_set_memory_decrypted((unsigned long)__va(mem), size))
- goto e_free;
- }
+ flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
+ if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+ return 0;
- return mem;
-e_free:
- memblock_free(mem, size);
+ kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+#endif
return 0;
}
+early_initcall(kvm_setup_vsyscall_timeinfo);
-static void __init kvm_memblock_free(phys_addr_t addr, phys_addr_t size)
+static int kvmclock_setup_percpu(unsigned int cpu)
{
- if (sev_active())
- early_set_memory_encrypted((unsigned long)__va(addr), size);
+ struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
- memblock_free(addr, size);
+ /*
+ * The per cpu area setup replicates CPU0 data to all cpu
+ * pointers. So carefully check. CPU0 has been set up in init
+ * already.
+ */
+ if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
+ return 0;
+
+ /* Use the static page for the first CPUs, allocate otherwise */
+ if (cpu < HVC_BOOT_ARRAY_SIZE)
+ p = &hv_clock_boot[cpu];
+ else
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+ per_cpu(hv_clock_per_cpu, cpu) = p;
+ return p ? 0 : -ENOMEM;
}
void __init kvmclock_init(void)
{
- struct pvclock_vcpu_time_info *vcpu_time;
- unsigned long mem, mem_wall_clock;
- int size, cpu, wall_clock_size;
u8 flags;
- size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
-
- if (!kvm_para_available())
+ if (!kvm_para_available() || !kvmclock)
return;
- if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
- } else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)))
- return;
-
- wall_clock_size = PAGE_ALIGN(sizeof(struct pvclock_wall_clock));
- mem_wall_clock = kvm_memblock_alloc(wall_clock_size, PAGE_SIZE);
- if (!mem_wall_clock)
- return;
-
- wall_clock = __va(mem_wall_clock);
- memset(wall_clock, 0, wall_clock_size);
-
- mem = kvm_memblock_alloc(size, PAGE_SIZE);
- if (!mem) {
- kvm_memblock_free(mem_wall_clock, wall_clock_size);
- wall_clock = NULL;
+ } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
return;
}
- hv_clock = __va(mem);
- memset(hv_clock, 0, size);
-
- if (kvm_register_clock("primary cpu clock")) {
- hv_clock = NULL;
- kvm_memblock_free(mem, size);
- kvm_memblock_free(mem_wall_clock, wall_clock_size);
- wall_clock = NULL;
+ if (cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "kvmclock:setup_percpu",
+ kvmclock_setup_percpu, NULL) < 0) {
return;
}
- printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+ pr_info("kvm-clock: Using msrs %x and %x",
msr_kvm_system_time, msr_kvm_wall_clock);
- pvclock_set_pvti_cpu0_va(hv_clock);
+ this_cpu_write(hv_clock_per_cpu, &hv_clock_boot[0]);
+ kvm_register_clock("primary cpu clock");
+ pvclock_set_pvti_cpu0_va(hv_clock_boot);
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
- cpu = get_cpu();
- vcpu_time = &hv_clock[cpu].pvti;
- flags = pvclock_read_flags(vcpu_time);
-
+ flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT);
- put_cpu();
x86_platform.calibrate_tsc = kvm_get_tsc_khz;
x86_platform.calibrate_cpu = kvm_get_tsc_khz;
x86_platform.get_wallclock = kvm_get_wallclock;
x86_platform.set_wallclock = kvm_set_wallclock;
#ifdef CONFIG_X86_LOCAL_APIC
- x86_cpuinit.early_percpu_clock_init =
- kvm_setup_secondary_clock;
+ x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock;
#endif
x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
@@ -350,31 +325,3 @@ void __init kvmclock_init(void)
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
pv_info.name = "KVM";
}
-
-int __init kvm_setup_vsyscall_timeinfo(void)
-{
-#ifdef CONFIG_X86_64
- int cpu;
- u8 flags;
- struct pvclock_vcpu_time_info *vcpu_time;
- unsigned int size;
-
- if (!hv_clock)
- return 0;
-
- size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
-
- cpu = get_cpu();
-
- vcpu_time = &hv_clock[cpu].pvti;
- flags = pvclock_read_flags(vcpu_time);
-
- put_cpu();
-
- if (!(flags & PVCLOCK_TSC_STABLE_BIT))
- return 1;
-
- kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
-#endif
- return 0;
-}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c9b14020f4dd..733e6ace0fa4 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -100,6 +100,102 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
return new_ldt;
}
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+
+static void do_sanity_check(struct mm_struct *mm,
+ bool had_kernel_mapping,
+ bool had_user_mapping)
+{
+ if (mm->context.ldt) {
+ /*
+ * We already had an LDT. The top-level entry should already
+ * have been allocated and synchronized with the usermode
+ * tables.
+ */
+ WARN_ON(!had_kernel_mapping);
+ if (static_cpu_has(X86_FEATURE_PTI))
+ WARN_ON(!had_user_mapping);
+ } else {
+ /*
+ * This is the first time we're mapping an LDT for this process.
+ * Sync the pgd to the usermode tables.
+ */
+ WARN_ON(had_kernel_mapping);
+ if (static_cpu_has(X86_FEATURE_PTI))
+ WARN_ON(had_user_mapping);
+ }
+}
+
+#ifdef CONFIG_X86_PAE
+
+static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+
+ if (pgd->pgd == 0)
+ return NULL;
+
+ p4d = p4d_offset(pgd, va);
+ if (p4d_none(*p4d))
+ return NULL;
+
+ pud = pud_offset(p4d, va);
+ if (pud_none(*pud))
+ return NULL;
+
+ return pmd_offset(pud, va);
+}
+
+static void map_ldt_struct_to_user(struct mm_struct *mm)
+{
+ pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
+ pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+ pmd_t *k_pmd, *u_pmd;
+
+ k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
+ u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
+
+ if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ set_pmd(u_pmd, *k_pmd);
+}
+
+static void sanity_check_ldt_mapping(struct mm_struct *mm)
+{
+ pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
+ pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+ bool had_kernel, had_user;
+ pmd_t *k_pmd, *u_pmd;
+
+ k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
+ u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
+ had_kernel = (k_pmd->pmd != 0);
+ had_user = (u_pmd->pmd != 0);
+
+ do_sanity_check(mm, had_kernel, had_user);
+}
+
+#else /* !CONFIG_X86_PAE */
+
+static void map_ldt_struct_to_user(struct mm_struct *mm)
+{
+ pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
+
+ if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ set_pgd(kernel_to_user_pgdp(pgd), *pgd);
+}
+
+static void sanity_check_ldt_mapping(struct mm_struct *mm)
+{
+ pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
+ bool had_kernel = (pgd->pgd != 0);
+ bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
+
+ do_sanity_check(mm, had_kernel, had_user);
+}
+
+#endif /* CONFIG_X86_PAE */
+
/*
* If PTI is enabled, this maps the LDT into the kernelmode and
* usermode tables for the given mm.
@@ -115,9 +211,8 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
static int
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
{
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
- bool is_vmalloc, had_top_level_entry;
unsigned long va;
+ bool is_vmalloc;
spinlock_t *ptl;
pgd_t *pgd;
int i;
@@ -131,13 +226,15 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
*/
WARN_ON(ldt->slot != -1);
+ /* Check if the current mappings are sane */
+ sanity_check_ldt_mapping(mm);
+
/*
* Did we already have the top level entry allocated? We can't
* use pgd_none() for this because it doens't do anything on
* 4-level page table kernels.
*/
pgd = pgd_offset(mm, LDT_BASE_ADDR);
- had_top_level_entry = (pgd->pgd != 0);
is_vmalloc = is_vmalloc_addr(ldt->entries);
@@ -172,41 +269,31 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
pte_unmap_unlock(ptep, ptl);
}
- if (mm->context.ldt) {
- /*
- * We already had an LDT. The top-level entry should already
- * have been allocated and synchronized with the usermode
- * tables.
- */
- WARN_ON(!had_top_level_entry);
- if (static_cpu_has(X86_FEATURE_PTI))
- WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
- } else {
- /*
- * This is the first time we're mapping an LDT for this process.
- * Sync the pgd to the usermode tables.
- */
- WARN_ON(had_top_level_entry);
- if (static_cpu_has(X86_FEATURE_PTI)) {
- WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
- set_pgd(kernel_to_user_pgdp(pgd), *pgd);
- }
- }
+ /* Propagate LDT mapping to the user page-table */
+ map_ldt_struct_to_user(mm);
va = (unsigned long)ldt_slot_va(slot);
flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
ldt->slot = slot;
-#endif
return 0;
}
+#else /* !CONFIG_PAGE_TABLE_ISOLATION */
+
+static int
+map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+{
+ return 0;
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
static void free_ldt_pgtables(struct mm_struct *mm)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
struct mmu_gather tlb;
unsigned long start = LDT_BASE_ADDR;
- unsigned long end = start + (1UL << PGDIR_SHIFT);
+ unsigned long end = LDT_END_ADDR;
if (!static_cpu_has(X86_FEATURE_PTI))
return;
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index d1ab07ec8c9a..5409c2800ab5 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -56,7 +56,7 @@ static void load_segments(void)
static void machine_kexec_free_page_tables(struct kimage *image)
{
- free_page((unsigned long)image->arch.pgd);
+ free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER);
image->arch.pgd = NULL;
#ifdef CONFIG_X86_PAE
free_page((unsigned long)image->arch.pmd0);
@@ -72,7 +72,8 @@ static void machine_kexec_free_page_tables(struct kimage *image)
static int machine_kexec_alloc_page_tables(struct kimage *image)
{
- image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PGD_ALLOCATION_ORDER);
#ifdef CONFIG_X86_PAE
image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 99dc79e76bdc..930c88341e4e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -88,10 +88,12 @@ unsigned paravirt_patch_call(void *insnbuf,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
- if (tgt_clobbers & ~site_clobbers)
- return len; /* target would clobber too much for this site */
- if (len < 5)
+ if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+ WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+#endif
return len; /* call too long for patch site */
+ }
b->opcode = 0xe8; /* call */
b->delta = delta;
@@ -106,8 +108,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
- if (len < 5)
+ if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+ WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+#endif
return len; /* call too long for patch site */
+ }
b->opcode = 0xe9; /* jmp */
b->delta = delta;
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 9edadabf04f6..9cb98f7b07c9 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -20,7 +20,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
-DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index ab5d9dd668d2..acfd04121da3 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -155,9 +155,6 @@ static int __init pci_iommu_init(void)
{
struct iommu_table_entry *p;
-#ifdef CONFIG_PCI
- dma_debug_add_bus(&pci_bus_type);
-#endif
x86_init.iommu.iommu_init();
for (p = __iommu_table; p < __iommu_table_end; p++) {
@@ -175,7 +172,7 @@ rootfs_initcall(pci_iommu_init);
static int via_no_dac_cb(struct pci_dev *pdev, void *data)
{
- pdev->dev.dma_32bit_limit = true;
+ pdev->dev.bus_dma_mask = DMA_BIT_MASK(32);
return 0;
}
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
index 4dfd90a75e63..2e9006c1e240 100644
--- a/arch/x86/kernel/pci-iommu_table.c
+++ b/arch/x86/kernel/pci-iommu_table.c
@@ -60,7 +60,7 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n",
p->detect, q->detect);
/* Heavy handed way..*/
- x->depend = 0;
+ x->depend = NULL;
}
}
diff --git a/arch/x86/kernel/pcspeaker.c b/arch/x86/kernel/pcspeaker.c
index da5190a1ea16..4a710ffffd9a 100644
--- a/arch/x86/kernel/pcspeaker.c
+++ b/arch/x86/kernel/pcspeaker.c
@@ -9,6 +9,6 @@ static __init int add_pcspkr(void)
pd = platform_device_register_simple("pcspkr", -1, NULL, 0);
- return IS_ERR(pd) ? PTR_ERR(pd) : 0;
+ return PTR_ERR_OR_ZERO(pd);
}
device_initcall(add_pcspkr);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 30ca2d1a9231..c93fcfdf1673 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -57,14 +57,12 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
*/
.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
-#ifdef CONFIG_X86_64
/*
* .sp1 is cpu_current_top_of_stack. The init task never
* runs user code, but cpu_current_top_of_stack should still
* be well defined before the first context switch.
*/
.sp1 = TOP_OF_INIT_STACK,
-#endif
#ifdef CONFIG_X86_32
.ss0 = __KERNEL_DS,
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 0ae659de21eb..2924fd447e61 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -285,7 +285,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* current_thread_info(). Refresh the SYSENTER configuration in
* case prev or next is vm86.
*/
- update_sp0(next_p);
+ update_task_stack(next_p);
refresh_sysenter_cs(next);
this_cpu_write(cpu_current_top_of_stack,
(unsigned long)task_stack_page(next_p) +
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 12bb445fb98d..476e3ddf8890 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -478,7 +478,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
/* Reload sp0. */
- update_sp0(next_p);
+ update_task_stack(next_p);
/*
* Now maybe reload the debug registers and handle I/O bitmaps
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2f86d883dd95..b4866badb235 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -823,6 +823,12 @@ void __init setup_arch(char **cmdline_p)
memblock_reserve(__pa_symbol(_text),
(unsigned long)__bss_stop - (unsigned long)_text);
+ /*
+ * Make sure page 0 is always reserved because on systems with
+ * L1TF its contents can be leaked to user processes.
+ */
+ memblock_reserve(0, PAGE_SIZE);
+
early_reserve_initrd();
/*
@@ -866,6 +872,8 @@ void __init setup_arch(char **cmdline_p)
idt_setup_early_traps();
early_cpu_init();
+ arch_init_ideal_nops();
+ jump_label_init();
early_ioremap_init();
setup_olpc_ofw_pgd();
@@ -991,11 +999,6 @@ void __init setup_arch(char **cmdline_p)
setup_clear_cpu_cap(X86_FEATURE_APIC);
}
-#ifdef CONFIG_PCI
- if (pci_early_dump_regs)
- early_dump_pci_devices();
-#endif
-
e820__reserve_setup_data();
e820__finish_early_params();
@@ -1012,6 +1015,7 @@ void __init setup_arch(char **cmdline_p)
*/
init_hypervisor_platform();
+ tsc_early_init();
x86_init.resources.probe_roms();
/* after parse_early_param, so could debug it */
@@ -1197,11 +1201,6 @@ void __init setup_arch(char **cmdline_p)
memblock_find_dma_reserve();
-#ifdef CONFIG_KVM_GUEST
- kvmclock_init();
-#endif
-
- tsc_early_delay_calibrate();
if (!early_xdbc_setup_hardware())
early_xdbc_register_console();
@@ -1272,8 +1271,6 @@ void __init setup_arch(char **cmdline_p)
mcheck_init();
- arch_init_ideal_nops();
-
register_refined_jiffies(CLOCK_TICK_RATE);
#ifdef CONFIG_EFI
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 5c574dff4c1a..04adc8d60aed 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -261,6 +261,7 @@ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
inc_irq_stat(irq_resched_count);
+ kvm_set_cpu_l1tf_flush_l1d();
if (trace_resched_ipi_enabled()) {
/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index db9656e13ea0..f02ecaf97904 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -80,6 +80,7 @@
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/spec-ctrl.h>
+#include <asm/hw_irq.h>
/* representing HT siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
@@ -271,6 +272,23 @@ static void notrace start_secondary(void *unused)
}
/**
+ * topology_is_primary_thread - Check whether CPU is the primary SMT thread
+ * @cpu: CPU to check
+ */
+bool topology_is_primary_thread(unsigned int cpu)
+{
+ return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
+}
+
+/**
+ * topology_smt_supported - Check whether SMT is supported by the CPUs
+ */
+bool topology_smt_supported(void)
+{
+ return smp_num_siblings > 1;
+}
+
+/**
* topology_phys_to_logical_pkg - Map a physical package id to a logical
*
* Returns logical package id or -1 if not found
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 093f2ea5dd56..7627455047c2 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -81,16 +81,6 @@ EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
-#define STACKTRACE_DUMP_ONCE(task) ({ \
- static bool __section(.data.unlikely) __dumped; \
- \
- if (!__dumped) { \
- __dumped = true; \
- WARN_ON(1); \
- show_stack(task, NULL); \
- } \
-})
-
static int __always_inline
__save_stack_trace_reliable(struct stack_trace *trace,
struct task_struct *task)
@@ -99,30 +89,25 @@ __save_stack_trace_reliable(struct stack_trace *trace,
struct pt_regs *regs;
unsigned long addr;
- for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
+ for (unwind_start(&state, task, NULL, NULL);
+ !unwind_done(&state) && !unwind_error(&state);
unwind_next_frame(&state)) {
regs = unwind_get_entry_regs(&state, NULL);
if (regs) {
+ /* Success path for user tasks */
+ if (user_mode(regs))
+ goto success;
+
/*
* Kernel mode registers on the stack indicate an
* in-kernel interrupt or exception (e.g., preemption
* or a page fault), which can make frame pointers
* unreliable.
*/
- if (!user_mode(regs))
- return -EINVAL;
- /*
- * The last frame contains the user mode syscall
- * pt_regs. Skip it and finish the unwind.
- */
- unwind_next_frame(&state);
- if (!unwind_done(&state)) {
- STACKTRACE_DUMP_ONCE(task);
+ if (IS_ENABLED(CONFIG_FRAME_POINTER))
return -EINVAL;
- }
- break;
}
addr = unwind_get_return_address(&state);
@@ -132,21 +117,22 @@ __save_stack_trace_reliable(struct stack_trace *trace,
* generated code which __kernel_text_address() doesn't know
* about.
*/
- if (!addr) {
- STACKTRACE_DUMP_ONCE(task);
+ if (!addr)
return -EINVAL;
- }
if (save_stack_address(trace, addr, false))
return -EINVAL;
}
/* Check for stack corruption */
- if (unwind_error(&state)) {
- STACKTRACE_DUMP_ONCE(task);
+ if (unwind_error(&state))
+ return -EINVAL;
+
+ /* Success path for non-user tasks, i.e. kthreads and idle tasks */
+ if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
return -EINVAL;
- }
+success:
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 774ebafa97c4..be01328eb755 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -12,6 +12,7 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/i8253.h>
#include <linux/time.h>
#include <linux/export.h>
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 74392d9d51e0..1463468ba9a0 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -33,16 +33,13 @@ EXPORT_SYMBOL(cpu_khz);
unsigned int __read_mostly tsc_khz;
EXPORT_SYMBOL(tsc_khz);
+#define KHZ 1000
+
/*
* TSC can be unstable due to cpufreq or due to unsynced TSCs
*/
static int __read_mostly tsc_unstable;
-/* native_sched_clock() is called before tsc_init(), so
- we must start with the TSC soft disabled to prevent
- erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */
-static int __read_mostly tsc_disabled = -1;
-
static DEFINE_STATIC_KEY_FALSE(__use_tsc);
int tsc_clocksource_reliable;
@@ -106,23 +103,6 @@ void cyc2ns_read_end(void)
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
-static void cyc2ns_data_init(struct cyc2ns_data *data)
-{
- data->cyc2ns_mul = 0;
- data->cyc2ns_shift = 0;
- data->cyc2ns_offset = 0;
-}
-
-static void __init cyc2ns_init(int cpu)
-{
- struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
-
- cyc2ns_data_init(&c2n->data[0]);
- cyc2ns_data_init(&c2n->data[1]);
-
- seqcount_init(&c2n->seq);
-}
-
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
struct cyc2ns_data data;
@@ -138,18 +118,11 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
return ns;
}
-static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
+static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
{
unsigned long long ns_now;
struct cyc2ns_data data;
struct cyc2ns *c2n;
- unsigned long flags;
-
- local_irq_save(flags);
- sched_clock_idle_sleep_event();
-
- if (!khz)
- goto done;
ns_now = cycles_2_ns(tsc_now);
@@ -181,13 +154,56 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_
c2n->data[0] = data;
raw_write_seqcount_latch(&c2n->seq);
c2n->data[1] = data;
+}
+
+static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sched_clock_idle_sleep_event();
+
+ if (khz)
+ __set_cyc2ns_scale(khz, cpu, tsc_now);
-done:
sched_clock_idle_wakeup_event();
local_irq_restore(flags);
}
/*
+ * Initialize cyc2ns for boot cpu
+ */
+static void __init cyc2ns_init_boot_cpu(void)
+{
+ struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
+
+ seqcount_init(&c2n->seq);
+ __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
+}
+
+/*
+ * Secondary CPUs do not run through tsc_init(), so set up
+ * all the scale factors for all CPUs, assuming the same
+ * speed as the bootup CPU. (cpufreq notifiers will fix this
+ * up if their speed diverges)
+ */
+static void __init cyc2ns_init_secondary_cpus(void)
+{
+ unsigned int cpu, this_cpu = smp_processor_id();
+ struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
+ struct cyc2ns_data *data = c2n->data;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu != this_cpu) {
+ seqcount_init(&c2n->seq);
+ c2n = per_cpu_ptr(&cyc2ns, cpu);
+ c2n->data[0] = data[0];
+ c2n->data[1] = data[1];
+ }
+ }
+}
+
+/*
* Scheduler clock - returns current time in nanosec units.
*/
u64 native_sched_clock(void)
@@ -248,8 +264,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
#ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
- pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
- tsc_disabled = 1;
+ mark_tsc_unstable("boot parameter notsc");
return 1;
}
#else
@@ -665,30 +680,17 @@ static unsigned long cpu_khz_from_cpuid(void)
return eax_base_mhz * 1000;
}
-/**
- * native_calibrate_cpu - calibrate the cpu on boot
+/*
+ * calibrate cpu using pit, hpet, and ptimer methods. They are available
+ * later in boot after acpi is initialized.
*/
-unsigned long native_calibrate_cpu(void)
+static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
{
u64 tsc1, tsc2, delta, ref1, ref2;
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
- unsigned long flags, latch, ms, fast_calibrate;
+ unsigned long flags, latch, ms;
int hpet = is_hpet_enabled(), i, loopmin;
- fast_calibrate = cpu_khz_from_cpuid();
- if (fast_calibrate)
- return fast_calibrate;
-
- fast_calibrate = cpu_khz_from_msr();
- if (fast_calibrate)
- return fast_calibrate;
-
- local_irq_save(flags);
- fast_calibrate = quick_pit_calibrate();
- local_irq_restore(flags);
- if (fast_calibrate)
- return fast_calibrate;
-
/*
* Run 5 calibration loops to get the lowest frequency value
* (the best estimate). We use two different calibration modes
@@ -831,6 +833,37 @@ unsigned long native_calibrate_cpu(void)
return tsc_pit_min;
}
+/**
+ * native_calibrate_cpu_early - can calibrate the cpu early in boot
+ */
+unsigned long native_calibrate_cpu_early(void)
+{
+ unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
+
+ if (!fast_calibrate)
+ fast_calibrate = cpu_khz_from_msr();
+ if (!fast_calibrate) {
+ local_irq_save(flags);
+ fast_calibrate = quick_pit_calibrate();
+ local_irq_restore(flags);
+ }
+ return fast_calibrate;
+}
+
+
+/**
+ * native_calibrate_cpu - calibrate the cpu
+ */
+static unsigned long native_calibrate_cpu(void)
+{
+ unsigned long tsc_freq = native_calibrate_cpu_early();
+
+ if (!tsc_freq)
+ tsc_freq = pit_hpet_ptimer_calibrate_cpu();
+
+ return tsc_freq;
+}
+
void recalibrate_cpu_khz(void)
{
#ifndef CONFIG_SMP
@@ -1307,7 +1340,7 @@ unreg:
static int __init init_tsc_clocksource(void)
{
- if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
+ if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
return 0;
if (tsc_unstable)
@@ -1341,40 +1374,22 @@ unreg:
*/
device_initcall(init_tsc_clocksource);
-void __init tsc_early_delay_calibrate(void)
+static bool __init determine_cpu_tsc_frequencies(bool early)
{
- unsigned long lpj;
-
- if (!boot_cpu_has(X86_FEATURE_TSC))
- return;
-
- cpu_khz = x86_platform.calibrate_cpu();
- tsc_khz = x86_platform.calibrate_tsc();
-
- tsc_khz = tsc_khz ? : cpu_khz;
- if (!tsc_khz)
- return;
-
- lpj = tsc_khz * 1000;
- do_div(lpj, HZ);
- loops_per_jiffy = lpj;
-}
-
-void __init tsc_init(void)
-{
- u64 lpj, cyc;
- int cpu;
-
- if (!boot_cpu_has(X86_FEATURE_TSC)) {
- setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
- return;
+ /* Make sure that cpu and tsc are not already calibrated */
+ WARN_ON(cpu_khz || tsc_khz);
+
+ if (early) {
+ cpu_khz = x86_platform.calibrate_cpu();
+ tsc_khz = x86_platform.calibrate_tsc();
+ } else {
+ /* We should not be here with non-native cpu calibration */
+ WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
+ cpu_khz = pit_hpet_ptimer_calibrate_cpu();
}
- cpu_khz = x86_platform.calibrate_cpu();
- tsc_khz = x86_platform.calibrate_tsc();
-
/*
- * Trust non-zero tsc_khz as authorative,
+ * Trust non-zero tsc_khz as authoritative,
* and use it to sanity check cpu_khz,
* which will be off if system timer is off.
*/
@@ -1383,52 +1398,78 @@ void __init tsc_init(void)
else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
cpu_khz = tsc_khz;
- if (!tsc_khz) {
- mark_tsc_unstable("could not calculate TSC khz");
- setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
- return;
- }
+ if (tsc_khz == 0)
+ return false;
pr_info("Detected %lu.%03lu MHz processor\n",
- (unsigned long)cpu_khz / 1000,
- (unsigned long)cpu_khz % 1000);
+ (unsigned long)cpu_khz / KHZ,
+ (unsigned long)cpu_khz % KHZ);
if (cpu_khz != tsc_khz) {
pr_info("Detected %lu.%03lu MHz TSC",
- (unsigned long)tsc_khz / 1000,
- (unsigned long)tsc_khz % 1000);
+ (unsigned long)tsc_khz / KHZ,
+ (unsigned long)tsc_khz % KHZ);
}
+ return true;
+}
+
+static unsigned long __init get_loops_per_jiffy(void)
+{
+ unsigned long lpj = tsc_khz * KHZ;
+ do_div(lpj, HZ);
+ return lpj;
+}
+
+static void __init tsc_enable_sched_clock(void)
+{
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
tsc_store_and_check_tsc_adjust(true);
+ cyc2ns_init_boot_cpu();
+ static_branch_enable(&__use_tsc);
+}
+
+void __init tsc_early_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_TSC))
+ return;
+ if (!determine_cpu_tsc_frequencies(true))
+ return;
+ loops_per_jiffy = get_loops_per_jiffy();
+ tsc_enable_sched_clock();
+}
+
+void __init tsc_init(void)
+{
/*
- * Secondary CPUs do not run through tsc_init(), so set up
- * all the scale factors for all CPUs, assuming the same
- * speed as the bootup CPU. (cpufreq notifiers will fix this
- * up if their speed diverges)
+ * native_calibrate_cpu_early can only calibrate using methods that are
+ * available early in boot.
*/
- cyc = rdtsc();
- for_each_possible_cpu(cpu) {
- cyc2ns_init(cpu);
- set_cyc2ns_scale(tsc_khz, cpu, cyc);
- }
+ if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
+ x86_platform.calibrate_cpu = native_calibrate_cpu;
- if (tsc_disabled > 0)
+ if (!boot_cpu_has(X86_FEATURE_TSC)) {
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return;
+ }
- /* now allow native_sched_clock() to use rdtsc */
+ if (!tsc_khz) {
+ /* We failed to determine frequencies earlier, try again */
+ if (!determine_cpu_tsc_frequencies(false)) {
+ mark_tsc_unstable("could not calculate TSC khz");
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ return;
+ }
+ tsc_enable_sched_clock();
+ }
- tsc_disabled = 0;
- static_branch_enable(&__use_tsc);
+ cyc2ns_init_secondary_cpus();
if (!no_sched_irq_time)
enable_sched_clock_irqtime();
- lpj = ((u64)tsc_khz * 1000);
- do_div(lpj, HZ);
- lpj_fine = lpj;
-
+ lpj_fine = get_loops_per_jiffy();
use_tsc_delay();
check_system_tsc_reliable();
@@ -1455,7 +1496,7 @@ unsigned long calibrate_delay_is_known(void)
int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
const struct cpumask *mask = topology_core_cpumask(cpu);
- if (tsc_disabled || !constant_tsc || !mask)
+ if (!constant_tsc || !mask)
return 0;
sibling = cpumask_any_but(mask, cpu);
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 19afdbd7d0a7..27ef714d886c 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -1,17 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * tsc_msr.c - TSC frequency enumeration via MSR
+ * TSC frequency enumeration via MSR
*
- * Copyright (C) 2013 Intel Corporation
+ * Copyright (C) 2013, 2018 Intel Corporation
* Author: Bin Gao <bin.gao@intel.com>
- *
- * This file is released under the GPLv2.
*/
#include <linux/kernel.h>
-#include <asm/processor.h>
-#include <asm/setup.h>
+
#include <asm/apic.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/msr.h>
#include <asm/param.h>
+#include <asm/tsc.h>
#define MAX_NUM_FREQS 9
@@ -23,44 +25,48 @@
* field msr_plat does.
*/
struct freq_desc {
- u8 x86_family; /* CPU family */
- u8 x86_model; /* model */
u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */
u32 freqs[MAX_NUM_FREQS];
};
-static struct freq_desc freq_desc_tables[] = {
- /* PNW */
- { 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } },
- /* CLV+ */
- { 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } },
- /* TNG - Intel Atom processor Z3400 series */
- { 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } },
- /* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */
- { 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } },
- /* ANN - Intel Atom processor Z3500 series */
- { 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } },
- /* AMT - Intel Atom processor X7-Z8000 and X5-Z8000 series */
- { 6, 0x4c, 1, { 83300, 100000, 133300, 116700,
- 80000, 93300, 90000, 88900, 87500 } },
+/*
+ * Penwell and Clovertrail use spread spectrum clock,
+ * so the freq number is not exactly the same as reported
+ * by MSR based on SDM.
+ */
+static const struct freq_desc freq_desc_pnw = {
+ 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 }
};
-static int match_cpu(u8 family, u8 model)
-{
- int i;
+static const struct freq_desc freq_desc_clv = {
+ 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 }
+};
- for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) {
- if ((family == freq_desc_tables[i].x86_family) &&
- (model == freq_desc_tables[i].x86_model))
- return i;
- }
+static const struct freq_desc freq_desc_byt = {
+ 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 }
+};
- return -1;
-}
+static const struct freq_desc freq_desc_cht = {
+ 1, { 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500 }
+};
-/* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
-#define id_to_freq(cpu_index, freq_id) \
- (freq_desc_tables[cpu_index].freqs[freq_id])
+static const struct freq_desc freq_desc_tng = {
+ 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 }
+};
+
+static const struct freq_desc freq_desc_ann = {
+ 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 }
+};
+
+static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
+ INTEL_CPU_FAM6(ATOM_PENWELL, freq_desc_pnw),
+ INTEL_CPU_FAM6(ATOM_CLOVERVIEW, freq_desc_clv),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT1, freq_desc_byt),
+ INTEL_CPU_FAM6(ATOM_AIRMONT, freq_desc_cht),
+ INTEL_CPU_FAM6(ATOM_MERRIFIELD, freq_desc_tng),
+ INTEL_CPU_FAM6(ATOM_MOOREFIELD, freq_desc_ann),
+ {}
+};
/*
* MSR-based CPU/TSC frequency discovery for certain CPUs.
@@ -70,18 +76,17 @@ static int match_cpu(u8 family, u8 model)
*/
unsigned long cpu_khz_from_msr(void)
{
- u32 lo, hi, ratio, freq_id, freq;
+ u32 lo, hi, ratio, freq;
+ const struct freq_desc *freq_desc;
+ const struct x86_cpu_id *id;
unsigned long res;
- int cpu_index;
-
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- return 0;
- cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
- if (cpu_index < 0)
+ id = x86_match_cpu(tsc_msr_cpu_ids);
+ if (!id)
return 0;
- if (freq_desc_tables[cpu_index].msr_plat) {
+ freq_desc = (struct freq_desc *)id->driver_data;
+ if (freq_desc->msr_plat) {
rdmsr(MSR_PLATFORM_INFO, lo, hi);
ratio = (lo >> 8) & 0xff;
} else {
@@ -91,8 +96,9 @@ unsigned long cpu_khz_from_msr(void)
/* Get FSB FREQ ID */
rdmsr(MSR_FSB_FREQ, lo, hi);
- freq_id = lo & 0x7;
- freq = id_to_freq(cpu_index, freq_id);
+
+ /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
+ freq = freq_desc->freqs[lo & 0x7];
/* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
res = freq * ratio;
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index feb28fee6cea..26038eacf74a 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -198,7 +198,7 @@ static int orc_sort_cmp(const void *_a, const void *_b)
* whitelisted .o files which didn't get objtool generation.
*/
orc_a = cur_orc_table + (a - cur_orc_ip_table);
- return orc_a->sp_reg == ORC_REG_UNDEFINED ? -1 : 1;
+ return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
}
#ifdef CONFIG_MODULES
@@ -352,7 +352,7 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
bool unwind_next_frame(struct unwind_state *state)
{
- unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
+ unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
enum stack_type prev_type = state->stack_info.type;
struct orc_entry *orc;
bool indirect = false;
@@ -363,9 +363,9 @@ bool unwind_next_frame(struct unwind_state *state)
/* Don't let modules unload while we're reading their ORC data. */
preempt_disable();
- /* Have we reached the end? */
+ /* End-of-stack check for user tasks: */
if (state->regs && user_mode(state->regs))
- goto done;
+ goto the_end;
/*
* Find the orc_entry associated with the text address.
@@ -374,9 +374,16 @@ bool unwind_next_frame(struct unwind_state *state)
* calls and calls to noreturn functions.
*/
orc = orc_find(state->signal ? state->ip : state->ip - 1);
- if (!orc || orc->sp_reg == ORC_REG_UNDEFINED)
- goto done;
- orig_ip = state->ip;
+ if (!orc)
+ goto err;
+
+ /* End-of-stack check for kernel threads: */
+ if (orc->sp_reg == ORC_REG_UNDEFINED) {
+ if (!orc->end)
+ goto err;
+
+ goto the_end;
+ }
/* Find the previous frame's stack: */
switch (orc->sp_reg) {
@@ -402,7 +409,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg R10 at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->r10;
break;
@@ -411,7 +418,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg R13 at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->r13;
break;
@@ -420,7 +427,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg DI at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->di;
break;
@@ -429,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg DX at ip %pB\n",
(void *)state->ip);
- goto done;
+ goto err;
}
sp = state->regs->dx;
break;
@@ -437,12 +444,12 @@ bool unwind_next_frame(struct unwind_state *state)
default:
orc_warn("unknown SP base reg %d for ip %pB\n",
orc->sp_reg, (void *)state->ip);
- goto done;
+ goto err;
}
if (indirect) {
if (!deref_stack_reg(state, sp, &sp))
- goto done;
+ goto err;
}
/* Find IP, SP and possibly regs: */
@@ -451,7 +458,7 @@ bool unwind_next_frame(struct unwind_state *state)
ip_p = sp - sizeof(long);
if (!deref_stack_reg(state, ip_p, &state->ip))
- goto done;
+ goto err;
state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
state->ip, (void *)ip_p);
@@ -465,7 +472,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
orc_warn("can't dereference registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
- goto done;
+ goto err;
}
state->regs = (struct pt_regs *)sp;
@@ -477,7 +484,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
orc_warn("can't dereference iret registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
- goto done;
+ goto err;
}
state->regs = (void *)sp - IRET_FRAME_OFFSET;
@@ -500,18 +507,18 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_PREV_SP:
if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
- goto done;
+ goto err;
break;
case ORC_REG_BP:
if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
- goto done;
+ goto err;
break;
default:
orc_warn("unknown BP base reg %d for ip %pB\n",
orc->bp_reg, (void *)orig_ip);
- goto done;
+ goto err;
}
/* Prevent a recursive loop due to bad ORC data: */
@@ -520,13 +527,16 @@ bool unwind_next_frame(struct unwind_state *state)
state->sp <= prev_sp) {
orc_warn("stack going in the wrong direction? ip=%pB\n",
(void *)orig_ip);
- goto done;
+ goto err;
}
preempt_enable();
return true;
-done:
+err:
+ state->error = true;
+
+the_end:
preempt_enable();
state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 9d0b5af7db91..1c03e4aa6474 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -149,7 +149,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
preempt_disable();
tsk->thread.sp0 = vm86->saved_sp0;
tsk->thread.sysenter_cs = __KERNEL_CS;
- update_sp0(tsk);
+ update_task_stack(tsk);
refresh_sysenter_cs(&tsk->thread);
vm86->saved_sp0 = 0;
preempt_enable();
@@ -374,7 +374,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
refresh_sysenter_cs(&tsk->thread);
}
- update_sp0(tsk);
+ update_task_stack(tsk);
preempt_enable();
if (vm86->flags & VM86_SCREEN_BITMAP)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 5e1458f609a1..8bde0a419f86 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -55,19 +55,22 @@ jiffies_64 = jiffies;
* so we can enable protection checks as well as retain 2MB large page
* mappings for kernel text.
*/
-#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
+#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
-#define X64_ALIGN_RODATA_END \
+#define X86_ALIGN_RODATA_END \
. = ALIGN(HPAGE_SIZE); \
- __end_rodata_hpage_align = .;
+ __end_rodata_hpage_align = .; \
+ __end_rodata_aligned = .;
#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
#else
-#define X64_ALIGN_RODATA_BEGIN
-#define X64_ALIGN_RODATA_END
+#define X86_ALIGN_RODATA_BEGIN
+#define X86_ALIGN_RODATA_END \
+ . = ALIGN(PAGE_SIZE); \
+ __end_rodata_aligned = .;
#define ALIGN_ENTRY_TEXT_BEGIN
#define ALIGN_ENTRY_TEXT_END
@@ -141,9 +144,9 @@ SECTIONS
/* .text should occupy whole number of pages */
. = ALIGN(PAGE_SIZE);
- X64_ALIGN_RODATA_BEGIN
+ X86_ALIGN_RODATA_BEGIN
RO_DATA(PAGE_SIZE)
- X64_ALIGN_RODATA_END
+ X86_ALIGN_RODATA_END
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 3ab867603e81..2792b5573818 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -109,7 +109,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
static void default_nmi_init(void) { };
struct x86_platform_ops x86_platform __ro_after_init = {
- .calibrate_cpu = native_calibrate_cpu,
+ .calibrate_cpu = native_calibrate_cpu_early,
.calibrate_tsc = native_calibrate_tsc,
.get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b5cd8465d44f..d536d457517b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1379,7 +1379,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
* using swait_active() is safe.
*/
if (swait_active(q))
- swake_up(q);
+ swake_up_one(q);
if (apic_lvtt_tscdeadline(apic))
ktimer->expired_tscdeadline = ktimer->tscdeadline;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6b8f11521c41..a44e568363a4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3840,6 +3840,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
{
int r = 1;
+ vcpu->arch.l1tf_flush_l1d = true;
switch (vcpu->arch.apf.host_apf_reason) {
default:
trace_kvm_page_fault(fault_address, error_code);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5d8e317c2b04..46b428c0990e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -188,6 +188,150 @@ module_param(ple_window_max, uint, 0444);
extern const ulong vmx_return;
+static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
+static DEFINE_MUTEX(vmx_l1d_flush_mutex);
+
+/* Storage for pre module init parameter parsing */
+static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+
+static const struct {
+ const char *option;
+ enum vmx_l1d_flush_state cmd;
+} vmentry_l1d_param[] = {
+ {"auto", VMENTER_L1D_FLUSH_AUTO},
+ {"never", VMENTER_L1D_FLUSH_NEVER},
+ {"cond", VMENTER_L1D_FLUSH_COND},
+ {"always", VMENTER_L1D_FLUSH_ALWAYS},
+};
+
+#define L1D_CACHE_ORDER 4
+static void *vmx_l1d_flush_pages;
+
+static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+{
+ struct page *page;
+ unsigned int i;
+
+ if (!enable_ept) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+ return 0;
+ }
+
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
+ u64 msr;
+
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
+ if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
+ return 0;
+ }
+ }
+
+ /* If set to auto use the default l1tf mitigation method */
+ if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ l1tf = VMENTER_L1D_FLUSH_NEVER;
+ break;
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+ case L1TF_MITIGATION_FLUSH:
+ case L1TF_MITIGATION_FLUSH_NOSMT:
+ l1tf = VMENTER_L1D_FLUSH_COND;
+ break;
+ case L1TF_MITIGATION_FULL:
+ case L1TF_MITIGATION_FULL_FORCE:
+ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
+ break;
+ }
+ } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
+ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
+ }
+
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
+ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+ if (!page)
+ return -ENOMEM;
+ vmx_l1d_flush_pages = page_address(page);
+
+ /*
+ * Initialize each page with a different pattern in
+ * order to protect against KSM in the nested
+ * virtualization case.
+ */
+ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+ memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
+ PAGE_SIZE);
+ }
+ }
+
+ l1tf_vmx_mitigation = l1tf;
+
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
+ static_branch_enable(&vmx_l1d_should_flush);
+ else
+ static_branch_disable(&vmx_l1d_should_flush);
+
+ if (l1tf == VMENTER_L1D_FLUSH_COND)
+ static_branch_enable(&vmx_l1d_flush_cond);
+ else
+ static_branch_disable(&vmx_l1d_flush_cond);
+ return 0;
+}
+
+static int vmentry_l1d_flush_parse(const char *s)
+{
+ unsigned int i;
+
+ if (s) {
+ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+ if (sysfs_streq(s, vmentry_l1d_param[i].option))
+ return vmentry_l1d_param[i].cmd;
+ }
+ }
+ return -EINVAL;
+}
+
+static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
+{
+ int l1tf, ret;
+
+ if (!boot_cpu_has(X86_BUG_L1TF))
+ return 0;
+
+ l1tf = vmentry_l1d_flush_parse(s);
+ if (l1tf < 0)
+ return l1tf;
+
+ /*
+ * Has vmx_init() run already? If not then this is the pre init
+ * parameter parsing. In that case just store the value and let
+ * vmx_init() do the proper setup after enable_ept has been
+ * established.
+ */
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
+ vmentry_l1d_flush_param = l1tf;
+ return 0;
+ }
+
+ mutex_lock(&vmx_l1d_flush_mutex);
+ ret = vmx_setup_l1d_flush(l1tf);
+ mutex_unlock(&vmx_l1d_flush_mutex);
+ return ret;
+}
+
+static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
+{
+ return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
+}
+
+static const struct kernel_param_ops vmentry_l1d_flush_ops = {
+ .set = vmentry_l1d_flush_set,
+ .get = vmentry_l1d_flush_get,
+};
+module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
+
struct kvm_vmx {
struct kvm kvm;
@@ -757,6 +901,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
+struct vmx_msrs {
+ unsigned int nr;
+ struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
+};
+
struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
@@ -790,9 +939,8 @@ struct vcpu_vmx {
struct loaded_vmcs *loaded_vmcs;
bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
- unsigned nr;
- struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
- struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+ struct vmx_msrs guest;
+ struct vmx_msrs host;
} msr_autoload;
struct {
int loaded;
@@ -2377,9 +2525,20 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
vm_exit_controls_clearbit(vmx, exit);
}
+static int find_msr(struct vmx_msrs *m, unsigned int msr)
+{
+ unsigned int i;
+
+ for (i = 0; i < m->nr; ++i) {
+ if (m->val[i].index == msr)
+ return i;
+ }
+ return -ENOENT;
+}
+
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
{
- unsigned i;
+ int i;
struct msr_autoload *m = &vmx->msr_autoload;
switch (msr) {
@@ -2400,18 +2559,21 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
}
break;
}
+ i = find_msr(&m->guest, msr);
+ if (i < 0)
+ goto skip_guest;
+ --m->guest.nr;
+ m->guest.val[i] = m->guest.val[m->guest.nr];
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
- for (i = 0; i < m->nr; ++i)
- if (m->guest[i].index == msr)
- break;
-
- if (i == m->nr)
+skip_guest:
+ i = find_msr(&m->host, msr);
+ if (i < 0)
return;
- --m->nr;
- m->guest[i] = m->guest[m->nr];
- m->host[i] = m->host[m->nr];
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+
+ --m->host.nr;
+ m->host.val[i] = m->host.val[m->host.nr];
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
}
static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
@@ -2426,9 +2588,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
}
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
- u64 guest_val, u64 host_val)
+ u64 guest_val, u64 host_val, bool entry_only)
{
- unsigned i;
+ int i, j = 0;
struct msr_autoload *m = &vmx->msr_autoload;
switch (msr) {
@@ -2463,24 +2625,31 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}
- for (i = 0; i < m->nr; ++i)
- if (m->guest[i].index == msr)
- break;
+ i = find_msr(&m->guest, msr);
+ if (!entry_only)
+ j = find_msr(&m->host, msr);
- if (i == NR_AUTOLOAD_MSRS) {
+ if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
- } else if (i == m->nr) {
- ++m->nr;
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
}
+ if (i < 0) {
+ i = m->guest.nr++;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+ }
+ m->guest.val[i].index = msr;
+ m->guest.val[i].value = guest_val;
+
+ if (entry_only)
+ return;
- m->guest[i].index = msr;
- m->guest[i].value = guest_val;
- m->host[i].index = msr;
- m->host[i].value = host_val;
+ if (j < 0) {
+ j = m->host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+ m->host.val[j].index = msr;
+ m->host.val[j].value = host_val;
}
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
@@ -2524,7 +2693,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
guest_efer &= ~EFER_LME;
if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER,
- guest_efer, host_efer);
+ guest_efer, host_efer, false);
return false;
} else {
guest_efer &= ~ignore_bits;
@@ -3987,7 +4156,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.ia32_xss = data;
if (vcpu->arch.ia32_xss != host_xss)
add_atomic_switch_msr(vmx, MSR_IA32_XSS,
- vcpu->arch.ia32_xss, host_xss);
+ vcpu->arch.ia32_xss, host_xss, false);
else
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
break;
@@ -6274,9 +6443,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
@@ -6296,8 +6465,7 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);
+ vmx->arch_capabilities = kvm_get_arch_capabilities();
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
@@ -9548,6 +9716,79 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
}
}
+/*
+ * Software based L1D cache flush which is used when microcode providing
+ * the cache control MSR is not loaded.
+ *
+ * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
+ * flush it is required to read in 64 KiB because the replacement algorithm
+ * is not exactly LRU. This could be sized at runtime via topology
+ * information but as all relevant affected CPUs have 32KiB L1D cache size
+ * there is no point in doing so.
+ */
+#define L1D_CACHE_ORDER 4
+static void *vmx_l1d_flush_pages;
+
+static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+{
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
+
+ /*
+ * This code is only executed when the the flush mode is 'cond' or
+ * 'always'
+ */
+ if (static_branch_likely(&vmx_l1d_flush_cond)) {
+ bool flush_l1d;
+
+ /*
+ * Clear the per-vcpu flush bit, it gets set again
+ * either from vcpu_run() or from one of the unsafe
+ * VMEXIT handlers.
+ */
+ flush_l1d = vcpu->arch.l1tf_flush_l1d;
+ vcpu->arch.l1tf_flush_l1d = false;
+
+ /*
+ * Clear the per-cpu flush bit, it gets set again from
+ * the interrupt handlers.
+ */
+ flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
+ kvm_clear_cpu_l1tf_flush_l1d();
+
+ if (!flush_l1d)
+ return;
+ }
+
+ vcpu->stat.l1d_flush++;
+
+ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+ return;
+ }
+
+ asm volatile(
+ /* First ensure the pages are in the TLB */
+ "xorl %%eax, %%eax\n"
+ ".Lpopulate_tlb:\n\t"
+ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $4096, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lpopulate_tlb\n\t"
+ "xorl %%eax, %%eax\n\t"
+ "cpuid\n\t"
+ /* Now fill the cache */
+ "xorl %%eax, %%eax\n"
+ ".Lfill_cache:\n"
+ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $64, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lfill_cache\n\t"
+ "lfence\n"
+ :: [flush_pages] "r" (vmx_l1d_flush_pages),
+ [size] "r" (size)
+ : "eax", "ebx", "ecx", "edx");
+}
+
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -9949,7 +10190,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
clear_atomic_switch_msr(vmx, msrs[i].msr);
else
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
- msrs[i].host);
+ msrs[i].host, false);
}
static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
@@ -10044,6 +10285,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
(unsigned long)&current_evmcs->host_rsp : 0;
+ if (static_branch_unlikely(&vmx_l1d_should_flush))
+ vmx_l1d_flush(vcpu);
+
asm(
/* Store host registers */
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -10403,10 +10647,37 @@ free_vcpu:
return ERR_PTR(err);
}
+#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+
static int vmx_vm_init(struct kvm *kvm)
{
if (!ple_gap)
kvm->arch.pause_in_guest = true;
+
+ if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+ /* 'I explicitly don't care' is set */
+ break;
+ case L1TF_MITIGATION_FLUSH:
+ case L1TF_MITIGATION_FLUSH_NOSMT:
+ case L1TF_MITIGATION_FULL:
+ /*
+ * Warn upon starting the first VM in a potentially
+ * insecure environment.
+ */
+ if (cpu_smt_control == CPU_SMT_ENABLED)
+ pr_warn_once(L1TF_MSG_SMT);
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
+ pr_warn_once(L1TF_MSG_L1D);
+ break;
+ case L1TF_MITIGATION_FULL_FORCE:
+ /* Flush is enforced */
+ break;
+ }
+ }
return 0;
}
@@ -11260,10 +11531,10 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
* Set the MSR load/store lists to match L0's settings.
*/
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
set_cr4_guest_host_mask(vmx);
@@ -11899,6 +12170,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return ret;
}
+ /* Hide L1D cache contents from the nested guest. */
+ vmx->vcpu.arch.l1tf_flush_l1d = true;
+
/*
* If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
* by event injection, halt vcpu.
@@ -12419,8 +12693,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx_segment_cache_clear(vmx);
/* Update any VMCS fields that might have changed while L2 ran */
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (vmx->hv_deadline_tsc == -1)
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
@@ -13137,6 +13411,51 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.enable_smi_window = enable_smi_window,
};
+static void vmx_cleanup_l1d_flush(void)
+{
+ if (vmx_l1d_flush_pages) {
+ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+ vmx_l1d_flush_pages = NULL;
+ }
+ /* Restore state so sysfs ignores VMX */
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+}
+
+static void vmx_exit(void)
+{
+#ifdef CONFIG_KEXEC_CORE
+ RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+ synchronize_rcu();
+#endif
+
+ kvm_exit();
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ if (static_branch_unlikely(&enable_evmcs)) {
+ int cpu;
+ struct hv_vp_assist_page *vp_ap;
+ /*
+ * Reset everything to support using non-enlightened VMCS
+ * access later (e.g. when we reload the module with
+ * enlightened_vmcs=0)
+ */
+ for_each_online_cpu(cpu) {
+ vp_ap = hv_get_vp_assist_page(cpu);
+
+ if (!vp_ap)
+ continue;
+
+ vp_ap->current_nested_vmcs = 0;
+ vp_ap->enlighten_vmentry = 0;
+ }
+
+ static_branch_disable(&enable_evmcs);
+ }
+#endif
+ vmx_cleanup_l1d_flush();
+}
+module_exit(vmx_exit);
+
static int __init vmx_init(void)
{
int r;
@@ -13171,10 +13490,25 @@ static int __init vmx_init(void)
#endif
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
- __alignof__(struct vcpu_vmx), THIS_MODULE);
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
return r;
+ /*
+ * Must be called after kvm_init() so enable_ept is properly set
+ * up. Hand the parameter mitigation value in which was stored in
+ * the pre module init parser. If no parameter was given, it will
+ * contain 'auto' which will be turned into the default 'cond'
+ * mitigation mode.
+ */
+ if (boot_cpu_has(X86_BUG_L1TF)) {
+ r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
+ if (r) {
+ vmx_exit();
+ return r;
+ }
+ }
+
#ifdef CONFIG_KEXEC_CORE
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss);
@@ -13183,39 +13517,4 @@ static int __init vmx_init(void)
return 0;
}
-
-static void __exit vmx_exit(void)
-{
-#ifdef CONFIG_KEXEC_CORE
- RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
- synchronize_rcu();
-#endif
-
- kvm_exit();
-
-#if IS_ENABLED(CONFIG_HYPERV)
- if (static_branch_unlikely(&enable_evmcs)) {
- int cpu;
- struct hv_vp_assist_page *vp_ap;
- /*
- * Reset everything to support using non-enlightened VMCS
- * access later (e.g. when we reload the module with
- * enlightened_vmcs=0)
- */
- for_each_online_cpu(cpu) {
- vp_ap = hv_get_vp_assist_page(cpu);
-
- if (!vp_ap)
- continue;
-
- vp_ap->current_nested_vmcs = 0;
- vp_ap->enlighten_vmentry = 0;
- }
-
- static_branch_disable(&enable_evmcs);
- }
-#endif
-}
-
-module_init(vmx_init)
-module_exit(vmx_exit)
+module_init(vmx_init);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2b812b3c5088..a5caa5e5480c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -195,6 +195,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "irq_injections", VCPU_STAT(irq_injections) },
{ "nmi_injections", VCPU_STAT(nmi_injections) },
{ "req_event", VCPU_STAT(req_event) },
+ { "l1d_flush", VCPU_STAT(l1d_flush) },
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
@@ -1102,11 +1103,35 @@ static u32 msr_based_features[] = {
static unsigned int num_msr_based_features;
+u64 kvm_get_arch_capabilities(void)
+{
+ u64 data;
+
+ rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
+
+ /*
+ * If we're doing cache flushes (either "always" or "cond")
+ * we will do one whenever the guest does a vmlaunch/vmresume.
+ * If an outer hypervisor is doing the cache flush for us
+ * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
+ * capability to the guest too, and if EPT is disabled we're not
+ * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
+ * require a nested hypervisor to do a flush of its own.
+ */
+ if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
+ data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
+
static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
{
switch (msr->index) {
- case MSR_IA32_UCODE_REV:
case MSR_IA32_ARCH_CAPABILITIES:
+ msr->data = kvm_get_arch_capabilities();
+ break;
+ case MSR_IA32_UCODE_REV:
rdmsrl_safe(msr->index, &msr->data);
break;
default:
@@ -4876,6 +4901,9 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
unsigned int bytes, struct x86_exception *exception)
{
+ /* kvm_write_guest_virt_system can pull in tons of pages. */
+ vcpu->arch.l1tf_flush_l1d = true;
+
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
@@ -6052,6 +6080,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
+ vcpu->arch.l1tf_flush_l1d = true;
+
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
@@ -7581,6 +7611,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
if (kvm_vcpu_running(vcpu)) {
@@ -8700,6 +8731,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
+ vcpu->arch.l1tf_flush_l1d = true;
kvm_x86_ops->sched_in(vcpu, cpu);
}
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 298ef1479240..3b24dc05251c 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -256,7 +256,7 @@ ENTRY(__memcpy_mcsafe)
/* Copy successful. Return zero */
.L_done_memcpy_trap:
- xorq %rax, %rax
+ xorl %eax, %eax
ret
ENDPROC(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 2f3c9196b834..a12afff146d1 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -111,6 +111,8 @@ static struct addr_marker address_markers[] = {
[END_OF_SPACE_NR] = { -1, NULL }
};
+#define INIT_PGD ((pgd_t *) &init_top_pgt)
+
#else /* CONFIG_X86_64 */
enum address_markers_idx {
@@ -121,6 +123,9 @@ enum address_markers_idx {
#ifdef CONFIG_HIGHMEM
PKMAP_BASE_NR,
#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ LDT_NR,
+#endif
CPU_ENTRY_AREA_NR,
FIXADDR_START_NR,
END_OF_SPACE_NR,
@@ -134,11 +139,16 @@ static struct addr_marker address_markers[] = {
#ifdef CONFIG_HIGHMEM
[PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ [LDT_NR] = { 0UL, "LDT remap" },
+#endif
[CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
[FIXADDR_START_NR] = { 0UL, "Fixmap area" },
[END_OF_SPACE_NR] = { -1, NULL }
};
+#define INIT_PGD (swapper_pg_dir)
+
#endif /* !CONFIG_X86_64 */
/* Multipliers for offsets within the PTEs */
@@ -496,11 +506,7 @@ static inline bool is_hypervisor_range(int idx)
static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
bool checkwx, bool dmesg)
{
-#ifdef CONFIG_X86_64
- pgd_t *start = (pgd_t *) &init_top_pgt;
-#else
- pgd_t *start = swapper_pg_dir;
-#endif
+ pgd_t *start = INIT_PGD;
pgprotval_t prot, eff;
int i;
struct pg_state st = {};
@@ -563,12 +569,13 @@ void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
}
EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
-static void ptdump_walk_user_pgd_level_checkwx(void)
+void ptdump_walk_user_pgd_level_checkwx(void)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pgd_t *pgd = (pgd_t *) &init_top_pgt;
+ pgd_t *pgd = INIT_PGD;
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!(__supported_pte_mask & _PAGE_NX) ||
+ !static_cpu_has(X86_FEATURE_PTI))
return;
pr_info("x86/mm: Checking user space page tables\n");
@@ -580,7 +587,6 @@ static void ptdump_walk_user_pgd_level_checkwx(void)
void ptdump_walk_pgd_level_checkwx(void)
{
ptdump_walk_pgd_level_core(NULL, NULL, true, false);
- ptdump_walk_user_pgd_level_checkwx();
}
static int __init pt_dump_init(void)
@@ -609,6 +615,9 @@ static int __init pt_dump_init(void)
# endif
address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
+# ifdef CONFIG_MODIFY_LDT_SYSCALL
+ address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
+# endif
#endif
return 0;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2aafa6ab6103..db1c042e9853 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
- WARN_ON_ONCE(in_nmi());
-
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cee58a972cb2..acfab322fbe0 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -4,6 +4,8 @@
#include <linux/swap.h>
#include <linux/memblock.h>
#include <linux/bootmem.h> /* for max_low_pfn */
+#include <linux/swapfile.h>
+#include <linux/swapops.h>
#include <asm/set_memory.h>
#include <asm/e820/api.h>
@@ -773,13 +775,44 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
}
}
+/*
+ * begin/end can be in the direct map or the "high kernel mapping"
+ * used for the kernel image only. free_init_pages() will do the
+ * right thing for either kind of address.
+ */
+void free_kernel_image_pages(void *begin, void *end)
+{
+ unsigned long begin_ul = (unsigned long)begin;
+ unsigned long end_ul = (unsigned long)end;
+ unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
+
+
+ free_init_pages("unused kernel image", begin_ul, end_ul);
+
+ /*
+ * PTI maps some of the kernel into userspace. For performance,
+ * this includes some kernel areas that do not contain secrets.
+ * Those areas might be adjacent to the parts of the kernel image
+ * being freed, which may contain secrets. Remove the "high kernel
+ * image mapping" for these freed areas, ensuring they are not even
+ * potentially vulnerable to Meltdown regardless of the specific
+ * optimizations PTI is currently using.
+ *
+ * The "noalias" prevents unmapping the direct map alias which is
+ * needed to access the freed pages.
+ *
+ * This is only valid for 64bit kernels. 32bit has only one mapping
+ * which can't be treated in this way for obvious reasons.
+ */
+ if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
+ set_memory_np_noalias(begin_ul, len_pages);
+}
+
void __ref free_initmem(void)
{
e820__reallocate_tables();
- free_init_pages("unused kernel",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
+ free_kernel_image_pages(&__init_begin, &__init_end);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -880,3 +913,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
__pte2cachemode_tbl[entry] = cache;
}
+
+#ifdef CONFIG_SWAP
+unsigned long max_swapfile_size(void)
+{
+ unsigned long pages;
+
+ pages = generic_max_swapfile_size();
+
+ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
+ unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
+ /*
+ * We encode swap offsets also with 3 bits below those for pfn
+ * which makes the usable limit higher.
+ */
+#if CONFIG_PGTABLE_LEVELS > 2
+ l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
+#endif
+ pages = min_t(unsigned long, l1tf_limit, pages);
+ }
+ return pages;
+}
+#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a688617c727e..dd519f372169 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1283,20 +1283,10 @@ void mark_rodata_ro(void)
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
#endif
- free_init_pages("unused kernel",
- (unsigned long) __va(__pa_symbol(text_end)),
- (unsigned long) __va(__pa_symbol(rodata_start)));
- free_init_pages("unused kernel",
- (unsigned long) __va(__pa_symbol(rodata_end)),
- (unsigned long) __va(__pa_symbol(_sdata)));
+ free_kernel_image_pages((void *)text_end, (void *)rodata_start);
+ free_kernel_image_pages((void *)rodata_end, (void *)_sdata);
debug_checkwx();
-
- /*
- * Do this after all of the manipulation of the
- * kernel text page tables are complete.
- */
- pti_clone_kernel_text();
}
int kern_addr_valid(unsigned long addr)
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 7c8686709636..79eb55ce69a9 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -126,24 +126,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
{
+ pmd_t new_pmd;
pmdval_t v = pmd_val(*pmd);
if (clear) {
- *old = v & _PAGE_PRESENT;
- v &= ~_PAGE_PRESENT;
- } else /* presume this has been called with clear==true previously */
- v |= *old;
- set_pmd(pmd, __pmd(v));
+ *old = v;
+ new_pmd = pmd_mknotpresent(*pmd);
+ } else {
+ /* Presume this has been called with clear==true previously */
+ new_pmd = __pmd(*old);
+ }
+ set_pmd(pmd, new_pmd);
}
static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
{
pteval_t v = pte_val(*pte);
if (clear) {
- *old = v & _PAGE_PRESENT;
- v &= ~_PAGE_PRESENT;
- } else /* presume this has been called with clear==true previously */
- v |= *old;
- set_pte_atomic(pte, __pte(v));
+ *old = v;
+ /* Nothing should care about address */
+ pte_clear(&init_mm, 0, pte);
+ } else {
+ /* Presume this has been called with clear==true previously */
+ set_pte_atomic(pte, __pte(*old));
+ }
}
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 48c591251600..f40ab8185d94 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -240,3 +240,24 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
return phys_addr_valid(addr + count - 1);
}
+
+/*
+ * Only allow root to set high MMIO mappings to PROT_NONE.
+ * This prevents an unpriv. user to set them to PROT_NONE and invert
+ * them, then pointing to valid memory for L1TF speculation.
+ *
+ * Note: for locked down kernels may want to disable the root override.
+ */
+bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return true;
+ if (!__pte_needs_invert(pgprot_val(prot)))
+ return true;
+ /* If it's real memory always allow */
+ if (pfn_valid(pfn))
+ return true;
+ if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
+ return false;
+ return true;
+}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 34a2a3bfde9c..b54d52a2d00a 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -61,7 +61,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
eb->nid = nid;
if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
- emu_nid_to_phys[nid] = nid;
+ emu_nid_to_phys[nid] = pb->nid;
pb->start += size;
if (pb->start >= pb->end) {
@@ -198,40 +198,73 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
return end;
}
+static u64 uniform_size(u64 max_addr, u64 base, u64 hole, int nr_nodes)
+{
+ unsigned long max_pfn = PHYS_PFN(max_addr);
+ unsigned long base_pfn = PHYS_PFN(base);
+ unsigned long hole_pfns = PHYS_PFN(hole);
+
+ return PFN_PHYS((max_pfn - base_pfn - hole_pfns) / nr_nodes);
+}
+
/*
* Sets up fake nodes of `size' interleaved over physical nodes ranging from
* `addr' to `max_addr'.
*
* Returns zero on success or negative on error.
*/
-static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
struct numa_meminfo *pi,
- u64 addr, u64 max_addr, u64 size)
+ u64 addr, u64 max_addr, u64 size,
+ int nr_nodes, struct numa_memblk *pblk,
+ int nid)
{
nodemask_t physnode_mask = numa_nodes_parsed;
+ int i, ret, uniform = 0;
u64 min_size;
- int nid = 0;
- int i, ret;
- if (!size)
+ if ((!size && !nr_nodes) || (nr_nodes && !pblk))
return -1;
+
/*
- * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
- * increased accordingly if the requested size is too small. This
- * creates a uniform distribution of node sizes across the entire
- * machine (but not necessarily over physical nodes).
+ * In the 'uniform' case split the passed in physical node by
+ * nr_nodes, in the non-uniform case, ignore the passed in
+ * physical block and try to create nodes of at least size
+ * @size.
+ *
+ * In the uniform case, split the nodes strictly by physical
+ * capacity, i.e. ignore holes. In the non-uniform case account
+ * for holes and treat @size as a minimum floor.
*/
- min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
- min_size = max(min_size, FAKE_NODE_MIN_SIZE);
- if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
- min_size = (min_size + FAKE_NODE_MIN_SIZE) &
- FAKE_NODE_MIN_HASH_MASK;
+ if (!nr_nodes)
+ nr_nodes = MAX_NUMNODES;
+ else {
+ nodes_clear(physnode_mask);
+ node_set(pblk->nid, physnode_mask);
+ uniform = 1;
+ }
+
+ if (uniform) {
+ min_size = uniform_size(max_addr, addr, 0, nr_nodes);
+ size = min_size;
+ } else {
+ /*
+ * The limit on emulated nodes is MAX_NUMNODES, so the
+ * size per node is increased accordingly if the
+ * requested size is too small. This creates a uniform
+ * distribution of node sizes across the entire machine
+ * (but not necessarily over physical nodes).
+ */
+ min_size = uniform_size(max_addr, addr,
+ mem_hole_size(addr, max_addr), nr_nodes);
+ }
+ min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
if (size < min_size) {
pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
size >> 20, min_size >> 20);
size = min_size;
}
- size &= FAKE_NODE_MIN_HASH_MASK;
+ size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);
/*
* Fill physical nodes with fake nodes of size until there is no memory
@@ -248,10 +281,14 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
node_clear(i, physnode_mask);
continue;
}
+
start = pi->blk[phys_blk].start;
limit = pi->blk[phys_blk].end;
- end = find_end_of_node(start, limit, size);
+ if (uniform)
+ end = start + size;
+ else
+ end = find_end_of_node(start, limit, size);
/*
* If there won't be at least FAKE_NODE_MIN_SIZE of
* non-reserved memory in ZONE_DMA32 for the next node,
@@ -266,7 +303,8 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* next node, this one must extend to the end of the
* physical node.
*/
- if (limit - end - mem_hole_size(end, limit) < size)
+ if ((limit - end - mem_hole_size(end, limit) < size)
+ && !uniform)
end = limit;
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
@@ -276,7 +314,15 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
return ret;
}
}
- return 0;
+ return nid;
+}
+
+static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+ struct numa_meminfo *pi,
+ u64 addr, u64 max_addr, u64 size)
+{
+ return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
+ 0, NULL, NUMA_NO_NODE);
}
int __init setup_emu2phys_nid(int *dfl_phys_nid)
@@ -346,7 +392,28 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
* the fixed node size. Otherwise, if it is just a single number N,
* split the system RAM into N fake nodes.
*/
- if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
+ if (strchr(emu_cmdline, 'U')) {
+ nodemask_t physnode_mask = numa_nodes_parsed;
+ unsigned long n;
+ int nid = 0;
+
+ n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
+ ret = -1;
+ for_each_node_mask(i, physnode_mask) {
+ ret = split_nodes_size_interleave_uniform(&ei, &pi,
+ pi.blk[i].start, pi.blk[i].end, 0,
+ n, &pi.blk[i], nid);
+ if (ret < 0)
+ break;
+ if (ret < n) {
+ pr_info("%s: phys: %d only got %d of %ld nodes, failing\n",
+ __func__, i, ret, n);
+ ret = -1;
+ break;
+ }
+ nid = ret;
+ }
+ } else if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
u64 size;
size = memparse(emu_cmdline, &emu_cmdline);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3bded76e8d5c..8d6c34fe49be 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -53,6 +53,7 @@ static DEFINE_SPINLOCK(cpa_lock);
#define CPA_FLUSHTLB 1
#define CPA_ARRAY 2
#define CPA_PAGES_ARRAY 4
+#define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
#ifdef CONFIG_PROC_FS
static unsigned long direct_pages_count[PG_LEVEL_NUM];
@@ -1014,8 +1015,8 @@ static long populate_pmd(struct cpa_data *cpa,
pmd = pmd_offset(pud, start);
- set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
- massage_pgprot(pmd_pgprot)));
+ set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
+ canon_pgprot(pmd_pgprot))));
start += PMD_SIZE;
cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
@@ -1087,8 +1088,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
* Map everything starting from the Gb boundary, possibly with 1G pages
*/
while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
- set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
- massage_pgprot(pud_pgprot)));
+ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
+ canon_pgprot(pud_pgprot))));
start += PUD_SIZE;
cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
@@ -1486,6 +1487,9 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/* No alias checking for _NX bit modifications */
checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
+ /* Has caller explicitly disabled alias checking? */
+ if (in_flag & CPA_NO_CHECK_ALIAS)
+ checkalias = 0;
ret = __change_page_attr_set_clr(&cpa, checkalias);
@@ -1772,6 +1776,15 @@ int set_memory_np(unsigned long addr, int numpages)
return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
}
+int set_memory_np_noalias(unsigned long addr, int numpages)
+{
+ int cpa_flags = CPA_NO_CHECK_ALIAS;
+
+ return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
+ __pgprot(_PAGE_PRESENT), 0,
+ cpa_flags, NULL);
+}
+
int set_memory_4k(unsigned long addr, int numpages)
{
return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
@@ -1784,6 +1797,12 @@ int set_memory_nonglobal(unsigned long addr, int numpages)
__pgprot(_PAGE_GLOBAL), 0);
}
+int set_memory_global(unsigned long addr, int numpages)
+{
+ return change_page_attr_set(&addr, numpages,
+ __pgprot(_PAGE_GLOBAL), 0);
+}
+
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
struct cpa_data cpa;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 47b5951e592b..3ef095c70ae3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -182,6 +182,14 @@ static void pgd_dtor(pgd_t *pgd)
*/
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
+/*
+ * We allocate separate PMDs for the kernel part of the user page-table
+ * when PTI is enabled. We need them to map the per-process LDT into the
+ * user-space page-table.
+ */
+#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
+ KERNEL_PGD_PTRS : 0)
+
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
@@ -202,14 +210,14 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS 0
-
+#define PREALLOCATED_USER_PMDS 0
#endif /* CONFIG_X86_PAE */
-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
+static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{
int i;
- for(i = 0; i < PREALLOCATED_PMDS; i++)
+ for (i = 0; i < count; i++)
if (pmds[i]) {
pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
free_page((unsigned long)pmds[i]);
@@ -217,7 +225,7 @@ static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
}
}
-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
+static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{
int i;
bool failed = false;
@@ -226,7 +234,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
if (mm == &init_mm)
gfp &= ~__GFP_ACCOUNT;
- for(i = 0; i < PREALLOCATED_PMDS; i++) {
+ for (i = 0; i < count; i++) {
pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
if (!pmd)
failed = true;
@@ -241,7 +249,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
}
if (failed) {
- free_pmds(mm, pmds);
+ free_pmds(mm, pmds, count);
return -ENOMEM;
}
@@ -254,23 +262,38 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
+static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
+{
+ pgd_t pgd = *pgdp;
+
+ if (pgd_val(pgd) != 0) {
+ pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+
+ *pgdp = native_make_pgd(0);
+
+ paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+ pmd_free(mm, pmd);
+ mm_dec_nr_pmds(mm);
+ }
+}
+
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
int i;
- for(i = 0; i < PREALLOCATED_PMDS; i++) {
- pgd_t pgd = pgdp[i];
+ for (i = 0; i < PREALLOCATED_PMDS; i++)
+ mop_up_one_pmd(mm, &pgdp[i]);
- if (pgd_val(pgd) != 0) {
- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pgdp[i] = native_make_pgd(0);
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
- pmd_free(mm, pmd);
- mm_dec_nr_pmds(mm);
- }
- }
+ pgdp = kernel_to_user_pgdp(pgdp);
+
+ for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
+ mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
+#endif
}
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
@@ -296,6 +319,38 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
}
}
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
+ pgd_t *k_pgd, pmd_t *pmds[])
+{
+ pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
+ pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
+ p4d_t *u_p4d;
+ pud_t *u_pud;
+ int i;
+
+ u_p4d = p4d_offset(u_pgd, 0);
+ u_pud = pud_offset(u_p4d, 0);
+
+ s_pgd += KERNEL_PGD_BOUNDARY;
+ u_pud += KERNEL_PGD_BOUNDARY;
+
+ for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
+ pmd_t *pmd = pmds[i];
+
+ memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
+ sizeof(pmd_t) * PTRS_PER_PMD);
+
+ pud_populate(mm, u_pud, pmd);
+ }
+
+}
+#else
+static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
+ pgd_t *k_pgd, pmd_t *pmds[])
+{
+}
+#endif
/*
* Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
* assumes that pgd should be in one page.
@@ -329,9 +384,6 @@ static int __init pgd_cache_init(void)
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
- if (!pgd_cache)
- return -ENOMEM;
-
return 0;
}
core_initcall(pgd_cache_init);
@@ -343,7 +395,8 @@ static inline pgd_t *_pgd_alloc(void)
* We allocate one page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return (pgd_t *)__get_free_page(PGALLOC_GFP);
+ return (pgd_t *)__get_free_pages(PGALLOC_GFP,
+ PGD_ALLOCATION_ORDER);
/*
* Now PAE kernel is not running as a Xen domain. We can allocate
@@ -355,7 +408,7 @@ static inline pgd_t *_pgd_alloc(void)
static inline void _pgd_free(pgd_t *pgd)
{
if (!SHARED_KERNEL_PMD)
- free_page((unsigned long)pgd);
+ free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
else
kmem_cache_free(pgd_cache, pgd);
}
@@ -375,6 +428,7 @@ static inline void _pgd_free(pgd_t *pgd)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
+ pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
pmd_t *pmds[PREALLOCATED_PMDS];
pgd = _pgd_alloc();
@@ -384,12 +438,15 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
mm->pgd = pgd;
- if (preallocate_pmds(mm, pmds) != 0)
+ if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
goto out_free_pgd;
- if (paravirt_pgd_alloc(mm) != 0)
+ if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
goto out_free_pmds;
+ if (paravirt_pgd_alloc(mm) != 0)
+ goto out_free_user_pmds;
+
/*
* Make sure that pre-populating the pmds is atomic with
* respect to anything walking the pgd_list, so that they
@@ -399,13 +456,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pgd_ctor(mm, pgd);
pgd_prepopulate_pmd(mm, pgd, pmds);
+ pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
spin_unlock(&pgd_lock);
return pgd;
+out_free_user_pmds:
+ free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
out_free_pmds:
- free_pmds(mm, pmds);
+ free_pmds(mm, pmds, PREALLOCATED_PMDS);
out_free_pgd:
_pgd_free(pgd);
out:
@@ -719,28 +779,50 @@ int pmd_clear_huge(pmd_t *pmd)
return 0;
}
+#ifdef CONFIG_X86_64
/**
* pud_free_pmd_page - Clear pud entry and free pmd page.
* @pud: Pointer to a PUD.
+ * @addr: Virtual address associated with pud.
*
- * Context: The pud range has been unmaped and TLB purged.
+ * Context: The pud range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
+ *
+ * NOTE: Callers must allow a single page allocation.
*/
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
- pmd_t *pmd;
+ pmd_t *pmd, *pmd_sv;
+ pte_t *pte;
int i;
if (pud_none(*pud))
return 1;
pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
+ if (!pmd_sv)
+ return 0;
- for (i = 0; i < PTRS_PER_PMD; i++)
- if (!pmd_free_pte_page(&pmd[i]))
- return 0;
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ pmd_sv[i] = pmd[i];
+ if (!pmd_none(pmd[i]))
+ pmd_clear(&pmd[i]);
+ }
pud_clear(pud);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd_sv[i])) {
+ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
+ free_page((unsigned long)pte);
+ }
+ }
+
+ free_page((unsigned long)pmd_sv);
free_page((unsigned long)pmd);
return 1;
@@ -749,11 +831,12 @@ int pud_free_pmd_page(pud_t *pud)
/**
* pmd_free_pte_page - Clear pmd entry and free pte page.
* @pmd: Pointer to a PMD.
+ * @addr: Virtual address associated with pmd.
*
- * Context: The pmd range has been unmaped and TLB purged.
+ * Context: The pmd range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
pte_t *pte;
@@ -762,8 +845,30 @@ int pmd_free_pte_page(pmd_t *pmd)
pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
free_page((unsigned long)pte);
return 1;
}
+
+#else /* !CONFIG_X86_64 */
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ return pud_none(*pud);
+}
+
+/*
+ * Disable free page handling on x86-PAE. This assures that ioremap()
+ * does not update sync'd pmd entries. See vmalloc_sync_one().
+ */
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ return pmd_none(*pmd);
+}
+
+#endif /* CONFIG_X86_64 */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 4d418e705878..31341ae7309f 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -45,6 +45,7 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
+#include <asm/sections.h>
#undef pr_fmt
#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
@@ -54,6 +55,16 @@
#define __GFP_NOTRACK 0
#endif
+/*
+ * Define the page-table levels we clone for user-space on 32
+ * and 64 bit.
+ */
+#ifdef CONFIG_X86_64
+#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
+#else
+#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
+#endif
+
static void __init pti_print_if_insecure(const char *reason)
{
if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
@@ -117,7 +128,7 @@ enable:
setup_force_cpu_cap(X86_FEATURE_PTI);
}
-pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
{
/*
* Changes to the high (kernel) portion of the kernelmode page
@@ -176,7 +187,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
if (pgd_none(*pgd)) {
unsigned long new_p4d_page = __get_free_page(gfp);
- if (!new_p4d_page)
+ if (WARN_ON_ONCE(!new_p4d_page))
return NULL;
set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
@@ -195,13 +206,17 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
+ p4d_t *p4d;
pud_t *pud;
+ p4d = pti_user_pagetable_walk_p4d(address);
+ if (!p4d)
+ return NULL;
+
BUILD_BUG_ON(p4d_large(*p4d) != 0);
if (p4d_none(*p4d)) {
unsigned long new_pud_page = __get_free_page(gfp);
- if (!new_pud_page)
+ if (WARN_ON_ONCE(!new_pud_page))
return NULL;
set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
@@ -215,7 +230,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
}
if (pud_none(*pud)) {
unsigned long new_pmd_page = __get_free_page(gfp);
- if (!new_pmd_page)
+ if (WARN_ON_ONCE(!new_pmd_page))
return NULL;
set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
@@ -224,7 +239,6 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
return pmd_offset(pud, address);
}
-#ifdef CONFIG_X86_VSYSCALL_EMULATION
/*
* Walk the shadow copy of the page tables (optionally) trying to allocate
* page table pages on the way down. Does not support large pages.
@@ -237,9 +251,13 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
+ pmd_t *pmd;
pte_t *pte;
+ pmd = pti_user_pagetable_walk_pmd(address);
+ if (!pmd)
+ return NULL;
+
/* We can't do anything sensible if we hit a large mapping. */
if (pmd_large(*pmd)) {
WARN_ON(1);
@@ -262,6 +280,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
return pte;
}
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
static void __init pti_setup_vsyscall(void)
{
pte_t *pte, *target_pte;
@@ -282,8 +301,14 @@ static void __init pti_setup_vsyscall(void)
static void __init pti_setup_vsyscall(void) { }
#endif
+enum pti_clone_level {
+ PTI_CLONE_PMD,
+ PTI_CLONE_PTE,
+};
+
static void
-pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
+pti_clone_pgtable(unsigned long start, unsigned long end,
+ enum pti_clone_level level)
{
unsigned long addr;
@@ -291,59 +316,105 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
* Clone the populated PMDs which cover start to end. These PMD areas
* can have holes.
*/
- for (addr = start; addr < end; addr += PMD_SIZE) {
+ for (addr = start; addr < end;) {
+ pte_t *pte, *target_pte;
pmd_t *pmd, *target_pmd;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
+ /* Overflow check */
+ if (addr < start)
+ break;
+
pgd = pgd_offset_k(addr);
if (WARN_ON(pgd_none(*pgd)))
return;
p4d = p4d_offset(pgd, addr);
if (WARN_ON(p4d_none(*p4d)))
return;
+
pud = pud_offset(p4d, addr);
- if (pud_none(*pud))
+ if (pud_none(*pud)) {
+ addr += PUD_SIZE;
continue;
+ }
+
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ if (pmd_none(*pmd)) {
+ addr += PMD_SIZE;
continue;
+ }
- target_pmd = pti_user_pagetable_walk_pmd(addr);
- if (WARN_ON(!target_pmd))
- return;
-
- /*
- * Only clone present PMDs. This ensures only setting
- * _PAGE_GLOBAL on present PMDs. This should only be
- * called on well-known addresses anyway, so a non-
- * present PMD would be a surprise.
- */
- if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
- return;
-
- /*
- * Setting 'target_pmd' below creates a mapping in both
- * the user and kernel page tables. It is effectively
- * global, so set it as global in both copies. Note:
- * the X86_FEATURE_PGE check is not _required_ because
- * the CPU ignores _PAGE_GLOBAL when PGE is not
- * supported. The check keeps consistentency with
- * code that only set this bit when supported.
- */
- if (boot_cpu_has(X86_FEATURE_PGE))
- *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
-
- /*
- * Copy the PMD. That is, the kernelmode and usermode
- * tables will share the last-level page tables of this
- * address range
- */
- *target_pmd = pmd_clear_flags(*pmd, clear);
+ if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
+ target_pmd = pti_user_pagetable_walk_pmd(addr);
+ if (WARN_ON(!target_pmd))
+ return;
+
+ /*
+ * Only clone present PMDs. This ensures only setting
+ * _PAGE_GLOBAL on present PMDs. This should only be
+ * called on well-known addresses anyway, so a non-
+ * present PMD would be a surprise.
+ */
+ if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
+ return;
+
+ /*
+ * Setting 'target_pmd' below creates a mapping in both
+ * the user and kernel page tables. It is effectively
+ * global, so set it as global in both copies. Note:
+ * the X86_FEATURE_PGE check is not _required_ because
+ * the CPU ignores _PAGE_GLOBAL when PGE is not
+ * supported. The check keeps consistentency with
+ * code that only set this bit when supported.
+ */
+ if (boot_cpu_has(X86_FEATURE_PGE))
+ *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
+
+ /*
+ * Copy the PMD. That is, the kernelmode and usermode
+ * tables will share the last-level page tables of this
+ * address range
+ */
+ *target_pmd = *pmd;
+
+ addr += PMD_SIZE;
+
+ } else if (level == PTI_CLONE_PTE) {
+
+ /* Walk the page-table down to the pte level */
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte)) {
+ addr += PAGE_SIZE;
+ continue;
+ }
+
+ /* Only clone present PTEs */
+ if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
+ return;
+
+ /* Allocate PTE in the user page-table */
+ target_pte = pti_user_pagetable_walk_pte(addr);
+ if (WARN_ON(!target_pte))
+ return;
+
+ /* Set GLOBAL bit in both PTEs */
+ if (boot_cpu_has(X86_FEATURE_PGE))
+ *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
+
+ /* Clone the PTE */
+ *target_pte = *pte;
+
+ addr += PAGE_SIZE;
+
+ } else {
+ BUG();
+ }
}
}
+#ifdef CONFIG_X86_64
/*
* Clone a single p4d (i.e. a top-level entry on 4-level systems and a
* next-level entry on 5-level systems.
@@ -354,6 +425,9 @@ static void __init pti_clone_p4d(unsigned long addr)
pgd_t *kernel_pgd;
user_p4d = pti_user_pagetable_walk_p4d(addr);
+ if (!user_p4d)
+ return;
+
kernel_pgd = pgd_offset_k(addr);
kernel_p4d = p4d_offset(kernel_pgd, addr);
*user_p4d = *kernel_p4d;
@@ -367,6 +441,25 @@ static void __init pti_clone_user_shared(void)
pti_clone_p4d(CPU_ENTRY_AREA_BASE);
}
+#else /* CONFIG_X86_64 */
+
+/*
+ * On 32 bit PAE systems with 1GB of Kernel address space there is only
+ * one pgd/p4d for the whole kernel. Cloning that would map the whole
+ * address space into the user page-tables, making PTI useless. So clone
+ * the page-table on the PMD level to prevent that.
+ */
+static void __init pti_clone_user_shared(void)
+{
+ unsigned long start, end;
+
+ start = CPU_ENTRY_AREA_BASE;
+ end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
+
+ pti_clone_pgtable(start, end, PTI_CLONE_PMD);
+}
+#endif /* CONFIG_X86_64 */
+
/*
* Clone the ESPFIX P4D into the user space visible page table
*/
@@ -380,11 +473,11 @@ static void __init pti_setup_espfix64(void)
/*
* Clone the populated PMDs of the entry and irqentry text and force it RO.
*/
-static void __init pti_clone_entry_text(void)
+static void pti_clone_entry_text(void)
{
- pti_clone_pmds((unsigned long) __entry_text_start,
- (unsigned long) __irqentry_text_end,
- _PAGE_RW);
+ pti_clone_pgtable((unsigned long) __entry_text_start,
+ (unsigned long) __irqentry_text_end,
+ PTI_CLONE_PMD);
}
/*
@@ -435,10 +528,17 @@ static inline bool pti_kernel_image_global_ok(void)
}
/*
+ * This is the only user for these and it is not arch-generic
+ * like the other set_memory.h functions. Just extern them.
+ */
+extern int set_memory_nonglobal(unsigned long addr, int numpages);
+extern int set_memory_global(unsigned long addr, int numpages);
+
+/*
* For some configurations, map all of kernel text into the user page
* tables. This reduces TLB misses, especially on non-PCID systems.
*/
-void pti_clone_kernel_text(void)
+static void pti_clone_kernel_text(void)
{
/*
* rodata is part of the kernel image and is normally
@@ -446,7 +546,8 @@ void pti_clone_kernel_text(void)
* clone the areas past rodata, they might contain secrets.
*/
unsigned long start = PFN_ALIGN(_text);
- unsigned long end = (unsigned long)__end_rodata_hpage_align;
+ unsigned long end_clone = (unsigned long)__end_rodata_aligned;
+ unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
if (!pti_kernel_image_global_ok())
return;
@@ -458,14 +559,18 @@ void pti_clone_kernel_text(void)
* pti_set_kernel_image_nonglobal() did to clear the
* global bit.
*/
- pti_clone_pmds(start, end, _PAGE_RW);
+ pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
+
+ /*
+ * pti_clone_pgtable() will set the global bit in any PMDs
+ * that it clones, but we also need to get any PTEs in
+ * the last level for areas that are not huge-page-aligned.
+ */
+
+ /* Set the global bit for normal non-__init kernel text: */
+ set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
}
-/*
- * This is the only user for it and it is not arch-generic like
- * the other set_memory.h functions. Just extern it.
- */
-extern int set_memory_nonglobal(unsigned long addr, int numpages);
void pti_set_kernel_image_nonglobal(void)
{
/*
@@ -477,9 +582,11 @@ void pti_set_kernel_image_nonglobal(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
- if (pti_kernel_image_global_ok())
- return;
-
+ /*
+ * This clears _PAGE_GLOBAL from the entire kernel image.
+ * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
+ * areas that are mapped to userspace.
+ */
set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
}
@@ -493,6 +600,28 @@ void __init pti_init(void)
pr_info("enabled\n");
+#ifdef CONFIG_X86_32
+ /*
+ * We check for X86_FEATURE_PCID here. But the init-code will
+ * clear the feature flag on 32 bit because the feature is not
+ * supported on 32 bit anyway. To print the warning we need to
+ * check with cpuid directly again.
+ */
+ if (cpuid_ecx(0x1) & BIT(17)) {
+ /* Use printk to work around pr_fmt() */
+ printk(KERN_WARNING "\n");
+ printk(KERN_WARNING "************************************************************\n");
+ printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
+ printk(KERN_WARNING "** **\n");
+ printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
+ printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
+ printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
+ printk(KERN_WARNING "** **\n");
+ printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
+ printk(KERN_WARNING "************************************************************\n");
+ }
+#endif
+
pti_clone_user_shared();
/* Undo all global bits from the init pagetables in head_64.S: */
@@ -502,3 +631,22 @@ void __init pti_init(void)
pti_setup_espfix64();
pti_setup_vsyscall();
}
+
+/*
+ * Finalize the kernel mappings in the userspace page-table. Some of the
+ * mappings for the kernel image might have changed since pti_init()
+ * cloned them. This is because parts of the kernel image have been
+ * mapped RO and/or NX. These changes need to be cloned again to the
+ * userspace page-table.
+ */
+void pti_finalize(void)
+{
+ /*
+ * We need to clone everything (again) that maps parts of the
+ * kernel image.
+ */
+ pti_clone_entry_text();
+ pti_clone_kernel_text();
+
+ debug_checkwx_user();
+}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 6eb1f34c3c85..752dbf4e0e50 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/cpu.h>
#include <linux/debugfs.h>
+#include <linux/gfp.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
@@ -35,7 +36,7 @@
* necessary invalidation by clearing out the 'ctx_id' which
* forces a TLB flush when the context is loaded.
*/
-void clear_asid_other(void)
+static void clear_asid_other(void)
{
u16 asid;
@@ -185,8 +186,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
{
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+ bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
unsigned cpu = smp_processor_id();
u64 next_tlb_gen;
+ bool need_flush;
+ u16 new_asid;
/*
* NB: The scheduler will call us with prev == next when switching
@@ -240,20 +244,41 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
next->context.ctx_id);
/*
- * We don't currently support having a real mm loaded without
- * our cpu set in mm_cpumask(). We have all the bookkeeping
- * in place to figure out whether we would need to flush
- * if our cpu were cleared in mm_cpumask(), but we don't
- * currently use it.
+ * Even in lazy TLB mode, the CPU should stay set in the
+ * mm_cpumask. The TLB shootdown code can figure out from
+ * from cpu_tlbstate.is_lazy whether or not to send an IPI.
*/
if (WARN_ON_ONCE(real_prev != &init_mm &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
cpumask_set_cpu(cpu, mm_cpumask(next));
- return;
+ /*
+ * If the CPU is not in lazy TLB mode, we are just switching
+ * from one thread in a process to another thread in the same
+ * process. No TLB flush required.
+ */
+ if (!was_lazy)
+ return;
+
+ /*
+ * Read the tlb_gen to check whether a flush is needed.
+ * If the TLB is up to date, just use it.
+ * The barrier synchronizes with the tlb_gen increment in
+ * the TLB shootdown code.
+ */
+ smp_mb();
+ next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+ if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
+ next_tlb_gen)
+ return;
+
+ /*
+ * TLB contents went out of date while we were in lazy
+ * mode. Fall through to the TLB switching code below.
+ */
+ new_asid = prev_asid;
+ need_flush = true;
} else {
- u16 new_asid;
- bool need_flush;
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
/*
@@ -285,53 +310,60 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
sync_current_stack_to_mm(next);
}
- /* Stop remote flushes for the previous mm */
- VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
- real_prev != &init_mm);
- cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+ /*
+ * Stop remote flushes for the previous mm.
+ * Skip kernel threads; we never send init_mm TLB flushing IPIs,
+ * but the bitmap manipulation can cause cache line contention.
+ */
+ if (real_prev != &init_mm) {
+ VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
+ mm_cpumask(real_prev)));
+ cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+ }
/*
* Start remote flushes and then read tlb_gen.
*/
- cpumask_set_cpu(cpu, mm_cpumask(next));
+ if (next != &init_mm)
+ cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+ }
- if (need_flush) {
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
- load_new_mm_cr3(next->pgd, new_asid, true);
-
- /*
- * NB: This gets called via leave_mm() in the idle path
- * where RCU functions differently. Tracing normally
- * uses RCU, so we need to use the _rcuidle variant.
- *
- * (There is no good reason for this. The idle code should
- * be rearranged to call this before rcu_idle_enter().)
- */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
- } else {
- /* The new ASID is already up to date. */
- load_new_mm_cr3(next->pgd, new_asid, false);
-
- /* See above wrt _rcuidle. */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
- }
+ if (need_flush) {
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
+ load_new_mm_cr3(next->pgd, new_asid, true);
/*
- * Record last user mm's context id, so we can avoid
- * flushing branch buffer with IBPB if we switch back
- * to the same user.
+ * NB: This gets called via leave_mm() in the idle path
+ * where RCU functions differently. Tracing normally
+ * uses RCU, so we need to use the _rcuidle variant.
+ *
+ * (There is no good reason for this. The idle code should
+ * be rearranged to call this before rcu_idle_enter().)
*/
- if (next != &init_mm)
- this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ } else {
+ /* The new ASID is already up to date. */
+ load_new_mm_cr3(next->pgd, new_asid, false);
- this_cpu_write(cpu_tlbstate.loaded_mm, next);
- this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+ /* See above wrt _rcuidle. */
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
+ /*
+ * Record last user mm's context id, so we can avoid
+ * flushing branch buffer with IBPB if we switch back
+ * to the same user.
+ */
+ if (next != &init_mm)
+ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
+ this_cpu_write(cpu_tlbstate.loaded_mm, next);
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+
load_mm_cr4(next);
switch_ldt(real_prev, next);
}
@@ -354,20 +386,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
- if (tlb_defer_switch_to_init_mm()) {
- /*
- * There's a significant optimization that may be possible
- * here. We have accurate enough TLB flush tracking that we
- * don't need to maintain coherence of TLB per se when we're
- * lazy. We do, however, need to maintain coherence of
- * paging-structure caches. We could, in principle, leave our
- * old mm loaded and only switch to init_mm when
- * tlb_remove_page() happens.
- */
- this_cpu_write(cpu_tlbstate.is_lazy, true);
- } else {
- switch_mm(NULL, &init_mm, NULL);
- }
+ this_cpu_write(cpu_tlbstate.is_lazy, true);
}
/*
@@ -454,6 +473,9 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* paging-structure cache to avoid speculatively reading
* garbage into our TLB. Since switching to init_mm is barely
* slower than a minimal flush, just switch to init_mm.
+ *
+ * This should be rare, with native_flush_tlb_others skipping
+ * IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
return;
@@ -560,6 +582,9 @@ static void flush_tlb_func_remote(void *info)
void native_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
+ cpumask_var_t lazymask;
+ unsigned int cpu;
+
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (info->end == TLB_FLUSH_ALL)
trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
@@ -583,8 +608,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
* that UV should be updated so that smp_call_function_many(),
* etc, are optimal on UV.
*/
- unsigned int cpu;
-
cpu = smp_processor_id();
cpumask = uv_flush_tlb_others(cpumask, info);
if (cpumask)
@@ -592,8 +615,29 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
(void *)info, 1);
return;
}
- smp_call_function_many(cpumask, flush_tlb_func_remote,
+
+ /*
+ * A temporary cpumask is used in order to skip sending IPIs
+ * to CPUs in lazy TLB state, while keeping them in mm_cpumask(mm).
+ * If the allocation fails, simply IPI every CPU in mm_cpumask.
+ */
+ if (!alloc_cpumask_var(&lazymask, GFP_ATOMIC)) {
+ smp_call_function_many(cpumask, flush_tlb_func_remote,
(void *)info, 1);
+ return;
+ }
+
+ cpumask_copy(lazymask, cpumask);
+
+ for_each_cpu(cpu, lazymask) {
+ if (per_cpu(cpu_tlbstate.is_lazy, cpu))
+ cpumask_clear_cpu(cpu, lazymask);
+ }
+
+ smp_call_function_many(lazymask, flush_tlb_func_remote,
+ (void *)info, 1);
+
+ free_cpumask_var(lazymask);
}
/*
@@ -646,6 +690,68 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
put_cpu();
}
+void tlb_flush_remove_tables_local(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm &&
+ this_cpu_read(cpu_tlbstate.is_lazy)) {
+ /*
+ * We're in lazy mode. We need to at least flush our
+ * paging-structure cache to avoid speculatively reading
+ * garbage into our TLB. Since switching to init_mm is barely
+ * slower than a minimal flush, just switch to init_mm.
+ */
+ switch_mm_irqs_off(NULL, &init_mm, NULL);
+ }
+}
+
+static void mm_fill_lazy_tlb_cpu_mask(struct mm_struct *mm,
+ struct cpumask *lazy_cpus)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mm_cpumask(mm)) {
+ if (!per_cpu(cpu_tlbstate.is_lazy, cpu))
+ cpumask_set_cpu(cpu, lazy_cpus);
+ }
+}
+
+void tlb_flush_remove_tables(struct mm_struct *mm)
+{
+ int cpu = get_cpu();
+ cpumask_var_t lazy_cpus;
+
+ if (cpumask_any_but(mm_cpumask(mm), cpu) >= nr_cpu_ids) {
+ put_cpu();
+ return;
+ }
+
+ if (!zalloc_cpumask_var(&lazy_cpus, GFP_ATOMIC)) {
+ /*
+ * If the cpumask allocation fails, do a brute force flush
+ * on all the CPUs that have this mm loaded.
+ */
+ smp_call_function_many(mm_cpumask(mm),
+ tlb_flush_remove_tables_local, (void *)mm, 1);
+ put_cpu();
+ return;
+ }
+
+ /*
+ * CPUs with !is_lazy either received a TLB flush IPI while the user
+ * pages in this address range were unmapped, or have context switched
+ * and reloaded %CR3 since then.
+ *
+ * Shootdown IPIs at page table freeing time only need to be sent to
+ * CPUs that may have out of date TLB contents.
+ */
+ mm_fill_lazy_tlb_cpu_mask(mm, lazy_cpus);
+ smp_call_function_many(lazy_cpus,
+ tlb_flush_remove_tables_local, (void *)mm, 1);
+ free_cpumask_var(lazy_cpus);
+ put_cpu();
+}
static void do_flush_tlb_all(void *info)
{
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 563049c483a1..d4ec117c1142 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -22,7 +22,6 @@
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF;
-unsigned int pci_early_dump_regs;
static int pci_bf_sort;
int pci_routeirq;
int noioapicquirk;
@@ -599,9 +598,6 @@ char *__init pcibios_setup(char *str)
pci_probe |= PCI_BIG_ROOT_WINDOW;
return NULL;
#endif
- } else if (!strcmp(str, "earlydump")) {
- pci_early_dump_regs = 1;
- return NULL;
} else if (!strcmp(str, "routeirq")) {
pci_routeirq = 1;
return NULL;
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index e5f753cbb1c3..f5fc953e5848 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -57,47 +57,3 @@ int early_pci_allowed(void)
PCI_PROBE_CONF1;
}
-void early_dump_pci_device(u8 bus, u8 slot, u8 func)
-{
- u32 value[256 / 4];
- int i;
-
- pr_info("pci 0000:%02x:%02x.%d config space:\n", bus, slot, func);
-
- for (i = 0; i < 256; i += 4)
- value[i / 4] = read_pci_config(bus, slot, func, i);
-
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, value, 256, false);
-}
-
-void early_dump_pci_devices(void)
-{
- unsigned bus, slot, func;
-
- if (!early_pci_allowed())
- return;
-
- for (bus = 0; bus < 256; bus++) {
- for (slot = 0; slot < 32; slot++) {
- for (func = 0; func < 8; func++) {
- u32 class;
- u8 type;
-
- class = read_pci_config(bus, slot, func,
- PCI_CLASS_REVISION);
- if (class == 0xffffffff)
- continue;
-
- early_dump_pci_device(bus, slot, func);
-
- if (func == 0) {
- type = read_pci_config_byte(bus, slot,
- func,
- PCI_HEADER_TYPE);
- if (!(type & 0x80))
- break;
- }
- }
- }
- }
-}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 5f2eb3231607..ee5d08f25ce4 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -636,6 +636,8 @@ void efi_switch_mm(struct mm_struct *mm)
#ifdef CONFIG_EFI_MIXED
extern efi_status_t efi64_thunk(u32, ...);
+static DEFINE_SPINLOCK(efi_runtime_lock);
+
#define runtime_service32(func) \
({ \
u32 table = (u32)(unsigned long)efi.systab; \
@@ -657,17 +659,14 @@ extern efi_status_t efi64_thunk(u32, ...);
#define efi_thunk(f, ...) \
({ \
efi_status_t __s; \
- unsigned long __flags; \
u32 __func; \
\
- local_irq_save(__flags); \
arch_efi_call_virt_setup(); \
\
__func = runtime_service32(f); \
__s = efi64_thunk(__func, __VA_ARGS__); \
\
arch_efi_call_virt_teardown(); \
- local_irq_restore(__flags); \
\
__s; \
})
@@ -702,14 +701,17 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
u32 phys_tm, phys_tc;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
phys_tc = virt_to_phys_or_null(tc);
status = efi_thunk(get_time, phys_tm, phys_tc);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -719,13 +721,16 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
{
efi_status_t status;
u32 phys_tm;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_time, phys_tm);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -737,8 +742,10 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
{
efi_status_t status;
u32 phys_enabled, phys_pending, phys_tm;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_enabled = virt_to_phys_or_null(enabled);
phys_pending = virt_to_phys_or_null(pending);
@@ -747,6 +754,7 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
status = efi_thunk(get_wakeup_time, phys_enabled,
phys_pending, phys_tm);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -757,13 +765,16 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
efi_status_t status;
u32 phys_tm;
+ unsigned long flags;
spin_lock(&rtc_lock);
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_wakeup_time, enabled, phys_tm);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
spin_unlock(&rtc_lock);
return status;
@@ -781,6 +792,9 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
efi_status_t status;
u32 phys_name, phys_vendor, phys_attr;
u32 phys_data_size, phys_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_data_size = virt_to_phys_or_null(data_size);
phys_vendor = virt_to_phys_or_null(vendor);
@@ -791,6 +805,8 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
status = efi_thunk(get_variable, phys_name, phys_vendor,
phys_attr, phys_data_size, phys_data);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -800,6 +816,34 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
{
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_vendor = virt_to_phys_or_null(vendor);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+ /* If data_size is > sizeof(u32) we've got problems */
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data)
+{
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
phys_vendor = virt_to_phys_or_null(vendor);
@@ -809,6 +853,8 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
status = efi_thunk(set_variable, phys_name, phys_vendor,
attr, data_size, phys_data);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -819,6 +865,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
{
efi_status_t status;
u32 phys_name_size, phys_name, phys_vendor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_name_size = virt_to_phys_or_null(name_size);
phys_vendor = virt_to_phys_or_null(vendor);
@@ -827,6 +876,8 @@ efi_thunk_get_next_variable(unsigned long *name_size,
status = efi_thunk(get_next_variable, phys_name_size,
phys_name, phys_vendor);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -835,10 +886,15 @@ efi_thunk_get_next_high_mono_count(u32 *count)
{
efi_status_t status;
u32 phys_count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_count = virt_to_phys_or_null(count);
status = efi_thunk(get_next_high_mono_count, phys_count);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -847,10 +903,15 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
unsigned long data_size, efi_char16_t *data)
{
u32 phys_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
phys_data = virt_to_phys_or_null_size(data, data_size);
efi_thunk(reset_system, reset_type, status, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
}
static efi_status_t
@@ -872,10 +933,13 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
{
efi_status_t status;
u32 phys_storage, phys_remaining, phys_max;
+ unsigned long flags;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
phys_storage = virt_to_phys_or_null(storage_space);
phys_remaining = virt_to_phys_or_null(remaining_space);
phys_max = virt_to_phys_or_null(max_variable_size);
@@ -883,6 +947,35 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
status = efi_thunk(query_variable_info, attr, phys_storage,
phys_remaining, phys_max);
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+ u32 phys_storage, phys_remaining, phys_max;
+ unsigned long flags;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
+
+ phys_storage = virt_to_phys_or_null(storage_space);
+ phys_remaining = virt_to_phys_or_null(remaining_space);
+ phys_max = virt_to_phys_or_null(max_variable_size);
+
+ status = efi_thunk(query_variable_info, attr, phys_storage,
+ phys_remaining, phys_max);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
return status;
}
@@ -908,9 +1001,11 @@ void efi_thunk_runtime_setup(void)
efi.get_variable = efi_thunk_get_variable;
efi.get_next_variable = efi_thunk_get_next_variable;
efi.set_variable = efi_thunk_set_variable;
+ efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
efi.reset_system = efi_thunk_reset_system;
efi.query_variable_info = efi_thunk_query_variable_info;
+ efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
efi.update_capsule = efi_thunk_update_capsule;
efi.query_capsule_caps = efi_thunk_query_capsule_caps;
}
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 36c1f8b9f7e0..844d31cb8a0c 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -105,12 +105,11 @@ early_param("efi_no_storage_paranoia", setup_storage_paranoia);
*/
void efi_delete_dummy_variable(void)
{
- efi.set_variable((efi_char16_t *)efi_dummy_name,
- &EFI_DUMMY_GUID,
- EFI_VARIABLE_NON_VOLATILE |
- EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS,
- 0, NULL);
+ efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name,
+ &EFI_DUMMY_GUID,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
}
/*
@@ -249,7 +248,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
int num_entries;
void *new;
- if (efi_mem_desc_lookup(addr, &md)) {
+ if (efi_mem_desc_lookup(addr, &md) ||
+ md.type != EFI_BOOT_SERVICES_DATA) {
pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
return;
}
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
index fa021dfab088..5cf886c867c2 100644
--- a/arch/x86/platform/intel-mid/Makefile
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfld.o pwr.o
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o pwr.o
# SFI specific code
ifdef CONFIG_X86_INTEL_MID
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 4f5fa65a1011..2acd6be13375 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -18,6 +18,7 @@
#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
#include <asm/io_apic.h>
+#include <asm/hw_irq.h>
#define TANGIER_EXT_TIMER0_MSI 12
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 2ebdf31d9996..56f66eafb94f 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -36,8 +36,6 @@
#include <asm/apb_timer.h>
#include <asm/reboot.h>
-#include "intel_mid_weak_decls.h"
-
/*
* the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
* cmdline option x86_intel_mid_timer can be used to override the configuration
@@ -61,10 +59,6 @@
enum intel_mid_timer_options intel_mid_timer_options;
-/* intel_mid_ops to store sub arch ops */
-static struct intel_mid_ops *intel_mid_ops;
-/* getter function for sub arch ops*/
-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
enum intel_mid_cpu_type __intel_mid_cpu_chip;
EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
@@ -82,11 +76,6 @@ static void intel_mid_reboot(void)
intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
}
-static unsigned long __init intel_mid_calibrate_tsc(void)
-{
- return 0;
-}
-
static void __init intel_mid_setup_bp_timer(void)
{
apbt_time_init();
@@ -133,6 +122,7 @@ static void intel_mid_arch_setup(void)
case 0x3C:
case 0x4A:
__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER;
+ x86_platform.legacy.rtc = 1;
break;
case 0x27:
default:
@@ -140,17 +130,7 @@ static void intel_mid_arch_setup(void)
break;
}
- if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops))
- intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
- else {
- intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
- pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
- }
-
out:
- if (intel_mid_ops->arch_setup)
- intel_mid_ops->arch_setup();
-
/*
* Intel MID platforms are using explicitly defined regulators.
*
@@ -191,7 +171,6 @@ void __init x86_intel_mid_early_setup(void)
x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
- x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
x86_init.pci.arch_init = intel_mid_pci_init;
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
deleted file mode 100644
index 3c1c3866d82b..000000000000
--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * intel_mid_weak_decls.h: Weak declarations of intel-mid.c
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-
-/* For every CPU addition a new get_<cpuname>_ops interface needs
- * to be added.
- */
-extern void *get_penwell_ops(void);
-extern void *get_cloverview_ops(void);
-extern void *get_tangier_ops(void);
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
deleted file mode 100644
index e42978d4deaf..000000000000
--- a/arch/x86/platform/intel-mid/mfld.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * mfld.c: Intel Medfield platform setup code
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-
-#include <asm/apic.h>
-#include <asm/intel-mid.h>
-#include <asm/intel_mid_vrtc.h>
-
-#include "intel_mid_weak_decls.h"
-
-static unsigned long __init mfld_calibrate_tsc(void)
-{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb;
-
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
- ratio = (hi >> 8) & 0x1f;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("read a zero ratio, should be incorrect!\n");
- pr_err("force tsc ratio to 16 ...\n");
- ratio = 16;
- }
- rdmsr(MSR_FSB_FREQ, lo, hi);
- if ((lo & 0x7) == 0x7)
- fsb = FSB_FREQ_83SKU;
- else
- fsb = FSB_FREQ_100SKU;
- fast_calibrate = ratio * fsb;
- pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
- lapic_timer_frequency = fsb * 1000 / HZ;
-
- /*
- * TSC on Intel Atom SoCs is reliable and of known frequency.
- * See tsc_msr.c for details.
- */
- setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
- setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
-
- return fast_calibrate;
-}
-
-static void __init penwell_arch_setup(void)
-{
- x86_platform.calibrate_tsc = mfld_calibrate_tsc;
-}
-
-static struct intel_mid_ops penwell_ops = {
- .arch_setup = penwell_arch_setup,
-};
-
-void *get_penwell_ops(void)
-{
- return &penwell_ops;
-}
-
-void *get_cloverview_ops(void)
-{
- return &penwell_ops;
-}
diff --git a/arch/x86/platform/intel-mid/mrfld.c b/arch/x86/platform/intel-mid/mrfld.c
deleted file mode 100644
index ae7bdeb0e507..000000000000
--- a/arch/x86/platform/intel-mid/mrfld.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Intel Merrifield platform specific setup code
- *
- * (C) Copyright 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-
-#include <asm/apic.h>
-#include <asm/intel-mid.h>
-
-#include "intel_mid_weak_decls.h"
-
-static unsigned long __init tangier_calibrate_tsc(void)
-{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb, bus_freq;
-
- /* *********************** */
- /* Compute TSC:Ratio * FSB */
- /* *********************** */
-
- /* Compute Ratio */
- rdmsr(MSR_PLATFORM_INFO, lo, hi);
- pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo);
-
- ratio = (lo >> 8) & 0xFF;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("Read a zero ratio, force tsc ratio to 4 ...\n");
- ratio = 4;
- }
-
- /* Compute FSB */
- rdmsr(MSR_FSB_FREQ, lo, hi);
- pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n",
- hi, lo);
-
- bus_freq = lo & 0x7;
- pr_debug("bus_freq = 0x%x\n", bus_freq);
-
- if (bus_freq == 0)
- fsb = FSB_FREQ_100SKU;
- else if (bus_freq == 1)
- fsb = FSB_FREQ_100SKU;
- else if (bus_freq == 2)
- fsb = FSB_FREQ_133SKU;
- else if (bus_freq == 3)
- fsb = FSB_FREQ_167SKU;
- else if (bus_freq == 4)
- fsb = FSB_FREQ_83SKU;
- else if (bus_freq == 5)
- fsb = FSB_FREQ_400SKU;
- else if (bus_freq == 6)
- fsb = FSB_FREQ_267SKU;
- else if (bus_freq == 7)
- fsb = FSB_FREQ_333SKU;
- else {
- BUG();
- pr_err("Invalid bus_freq! Setting to minimal value!\n");
- fsb = FSB_FREQ_100SKU;
- }
-
- /* TSC = FSB Freq * Resolved HFM Ratio */
- fast_calibrate = ratio * fsb;
- pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate);
-
- /* ************************************ */
- /* Calculate Local APIC Timer Frequency */
- /* ************************************ */
- lapic_timer_frequency = (fsb * 1000) / HZ;
-
- pr_debug("Setting lapic_timer_frequency = %d\n",
- lapic_timer_frequency);
-
- /*
- * TSC on Intel Atom SoCs is reliable and of known frequency.
- * See tsc_msr.c for details.
- */
- setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
- setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
-
- return fast_calibrate;
-}
-
-static void __init tangier_arch_setup(void)
-{
- x86_platform.calibrate_tsc = tangier_calibrate_tsc;
- x86_platform.legacy.rtc = 1;
-}
-
-/* tangier arch ops */
-static struct intel_mid_ops tangier_ops = {
- .arch_setup = tangier_arch_setup,
-};
-
-void *get_tangier_ops(void)
-{
- return &tangier_ops;
-}
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index 7c3077e58fa0..f0e920fb98ad 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -311,10 +311,8 @@ static int __init add_xo1_platform_devices(void)
return PTR_ERR(pdev);
pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
- return 0;
+ return PTR_ERR_OR_ZERO(pdev);
}
static int olpc_xo1_ec_probe(struct platform_device *pdev)
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index ca446da48fd2..a4130b84d1ff 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1285,6 +1285,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
struct msg_desc msgdesc;
ack_APIC_irq();
+ kvm_set_cpu_l1tf_flush_l1d();
time_start = get_cycles();
bcp = &per_cpu(bau_control, smp_processor_id());
@@ -1607,8 +1608,6 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
*tunables[cnt].tunp = val;
continue;
}
- if (q == p)
- break;
}
return 0;
}
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 67ccf64c8bd8..f8e3b668d20b 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -233,29 +233,35 @@ struct restore_data_record {
*/
static int get_e820_md5(struct e820_table *table, void *buf)
{
- struct scatterlist sg;
- struct crypto_ahash *tfm;
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
int size;
int ret = 0;
- tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(tfm))
return -ENOMEM;
- {
- AHASH_REQUEST_ON_STACK(req, tfm);
- size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry) * table->nr_entries;
- ahash_request_set_tfm(req, tfm);
- sg_init_one(&sg, (u8 *)table, size);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, &sg, buf, size);
-
- if (crypto_ahash_digest(req))
- ret = -EINVAL;
- ahash_request_zero(req);
+ desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto free_tfm;
}
- crypto_free_ahash(tfm);
+ desc->tfm = tfm;
+ desc->flags = 0;
+
+ size = offsetof(struct e820_table, entries) +
+ sizeof(struct e820_entry) * table->nr_entries;
+
+ if (crypto_shash_digest(desc, (u8 *)table, size, buf))
+ ret = -EINVAL;
+
+ kzfree(desc);
+
+free_tfm:
+ crypto_free_shash(tfm);
return ret;
}
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index ce8da3a0412c..fd369a6e9ff8 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -137,7 +137,7 @@ ENTRY(restore_registers)
/* Saved in save_processor_state. */
lgdt saved_context_gdt_desc(%rax)
- xorq %rax, %rax
+ xorl %eax, %eax
/* tell the hibernation core that we've just restored the memory */
movq %rax, in_suspend(%rip)
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 81a8e33115ad..3cf302b26332 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -28,9 +28,8 @@ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
targets += kexec-purgatory.c
-CMD_BIN2C = $(objtree)/scripts/basic/bin2c
quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
+ cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 220e97841e49..3a6c8ebc8032 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -67,6 +67,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"__tracedata_(start|end)|"
"__(start|stop)_notes|"
"__end_rodata|"
+ "__end_rodata_aligned|"
"__initramfs_start|"
"(jiffies|jiffies_64)|"
#if ELF_BITS == 64
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 9d529f22fd9d..f518b4744ff8 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -1,13 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-mainmenu "User Mode Linux/$(SUBARCH) $(KERNELVERSION) Kernel Configuration"
-
-comment "Compiler: $(CC_VERSION_TEXT)"
-
-source "scripts/Kconfig.include"
-
-source "arch/um/Kconfig.common"
-
-menu "UML-specific options"
menu "Host processor type and features"
@@ -66,9 +57,3 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA
config GENERIC_HWEIGHT
def_bool y
-
-source "arch/um/Kconfig.um"
-
-endmenu
-
-source "arch/um/Kconfig.rest"
diff --git a/arch/x86/um/vdso/.gitignore b/arch/x86/um/vdso/.gitignore
index 9cac6d072199..f8b69d84238e 100644
--- a/arch/x86/um/vdso/.gitignore
+++ b/arch/x86/um/vdso/.gitignore
@@ -1,2 +1 @@
-vdso-syms.lds
vdso.lds
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index b2d6967262b2..822ccdba93ad 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -53,22 +53,6 @@ $(vobjs): KBUILD_CFLAGS += $(CFL)
CFLAGS_REMOVE_vdso-note.o = -pg -fprofile-arcs -ftest-coverage
CFLAGS_REMOVE_um_vdso.o = -pg -fprofile-arcs -ftest-coverage
-targets += vdso-syms.lds
-extra-$(VDSO64-y) += vdso-syms.lds
-
-#
-# Match symbols in the DSO that look like VDSO*; produce a file of constants.
-#
-sed-vdsosym := -e 's/^00*/0/' \
- -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
-quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
- $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
-endef
-
-$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
- $(call if_changed,vdsosym)
-
#
# The DSO images are built using a special linker script.
#
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 3b5318505c69..2eeddd814653 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -3,6 +3,7 @@
#endif
#include <linux/cpu.h>
#include <linux/kexec.h>
+#include <linux/slab.h>
#include <xen/features.h>
#include <xen/page.h>
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 439a94bf89ad..ee3b00c7acda 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -119,6 +119,27 @@ static void __init xen_banner(void)
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
}
+
+static void __init xen_pv_init_platform(void)
+{
+ set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
+ HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+
+ /* xen clock uses per-cpu vcpu_info, need to init it for boot cpu */
+ xen_vcpu_info_reset(0);
+
+ /* pvclock is in shared info area */
+ xen_init_time_ops();
+}
+
+static void __init xen_pv_guest_late_init(void)
+{
+#ifndef CONFIG_SMP
+ /* Setup shared vcpu info for non-smp configurations */
+ xen_setup_vcpu_info_placement();
+#endif
+}
+
/* Check if running on Xen version (major, minor) or later */
bool
xen_running_on_version_or_later(unsigned int major, unsigned int minor)
@@ -947,34 +968,8 @@ static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
xen_write_msr_safe(msr, low, high);
}
-void xen_setup_shared_info(void)
-{
- set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
-
- HYPERVISOR_shared_info =
- (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
-
- xen_setup_mfn_list_list();
-
- if (system_state == SYSTEM_BOOTING) {
-#ifndef CONFIG_SMP
- /*
- * In UP this is as good a place as any to set up shared info.
- * Limit this to boot only, at restore vcpu setup is done via
- * xen_vcpu_restore().
- */
- xen_setup_vcpu_info_placement();
-#endif
- /*
- * Now that shared info is set up we can start using routines
- * that point to pvclock area.
- */
- xen_init_time_ops();
- }
-}
-
/* This is called once we have the cpu_possible_mask */
-void __ref xen_setup_vcpu_info_placement(void)
+void __init xen_setup_vcpu_info_placement(void)
{
int cpu;
@@ -1228,6 +1223,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
x86_init.irqs.intr_mode_init = x86_init_noop;
x86_init.oem.arch_setup = xen_arch_setup;
x86_init.oem.banner = xen_banner;
+ x86_init.hyper.init_platform = xen_pv_init_platform;
+ x86_init.hyper.guest_late_init = xen_pv_guest_late_init;
/*
* Set up some pagetable state before starting to set any ptes.
@@ -1259,6 +1256,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
+ /* Determine virtual and physical address sizes */
+ get_cpu_address_sizes(&boot_cpu_data);
+
/* Let's presume PV guests always boot on vCPU with id 0. */
per_cpu(xen_vcpu_id, 0) = 0;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2c30cabfda90..52206ad81e4b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1230,8 +1230,7 @@ static void __init xen_pagetable_p2m_free(void)
* We roundup to the PMD, which means that if anybody at this stage is
* using the __ka address of xen_start_info or
* xen_start_info->shared_info they are in going to crash. Fortunatly
- * we have already revectored in xen_setup_kernel_pagetable and in
- * xen_setup_shared_info.
+ * we have already revectored in xen_setup_kernel_pagetable.
*/
size = roundup(size, PMD_SIZE);
@@ -1292,8 +1291,7 @@ static void __init xen_pagetable_init(void)
/* Remap memory freed due to conflicts with E820 map */
xen_remap_memory();
-
- xen_setup_shared_info();
+ xen_setup_mfn_list_list();
}
static void xen_write_cr2(unsigned long cr2)
{
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index dc502ca8263e..2bce7958ce8b 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -80,9 +80,9 @@ void xen_mc_flush(void)
and just do the call directly. */
mc = &b->entries[0];
- mc->result = privcmd_call(mc->op,
- mc->args[0], mc->args[1], mc->args[2],
- mc->args[3], mc->args[4]);
+ mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1],
+ mc->args[2], mc->args[3],
+ mc->args[4]);
ret = mc->result < 0;
break;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index cd97a62394e7..973f10e05211 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -130,6 +130,10 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
void __init xen_init_spinlocks(void)
{
+ /* Don't need to use pvqspinlock code if there is only 1 vCPU. */
+ if (num_possible_cpus() == 1)
+ xen_pvspin = false;
+
if (!xen_pvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
return;
diff --git a/arch/x86/xen/suspend_pv.c b/arch/x86/xen/suspend_pv.c
index a2e0f110af56..8303b58c79a9 100644
--- a/arch/x86/xen/suspend_pv.c
+++ b/arch/x86/xen/suspend_pv.c
@@ -27,8 +27,9 @@ void xen_pv_pre_suspend(void)
void xen_pv_post_suspend(int suspend_cancelled)
{
xen_build_mfn_list_list();
-
- xen_setup_shared_info();
+ set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
+ HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+ xen_setup_mfn_list_list();
if (suspend_cancelled) {
xen_start_info->store_mfn =
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index e0f1bcf01d63..c84f1e039d84 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -31,6 +31,8 @@
/* Xen may fire a timer up to this many ns early */
#define TIMER_SLOP 100000
+static u64 xen_sched_clock_offset __read_mostly;
+
/* Get the TSC speed from Xen */
static unsigned long xen_tsc_khz(void)
{
@@ -40,7 +42,7 @@ static unsigned long xen_tsc_khz(void)
return pvclock_tsc_khz(info);
}
-u64 xen_clocksource_read(void)
+static u64 xen_clocksource_read(void)
{
struct pvclock_vcpu_time_info *src;
u64 ret;
@@ -57,6 +59,11 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs)
return xen_clocksource_read();
}
+static u64 xen_sched_clock(void)
+{
+ return xen_clocksource_read() - xen_sched_clock_offset;
+}
+
static void xen_read_wallclock(struct timespec64 *ts)
{
struct shared_info *s = HYPERVISOR_shared_info;
@@ -367,7 +374,7 @@ void xen_timer_resume(void)
}
static const struct pv_time_ops xen_time_ops __initconst = {
- .sched_clock = xen_clocksource_read,
+ .sched_clock = xen_sched_clock,
.steal_clock = xen_steal_clock,
};
@@ -503,8 +510,9 @@ static void __init xen_time_init(void)
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
-void __ref xen_init_time_ops(void)
+void __init xen_init_time_ops(void)
{
+ xen_sched_clock_offset = xen_clocksource_read();
pv_time_ops = xen_time_ops;
x86_init.timers.timer_init = xen_time_init;
@@ -542,11 +550,11 @@ void __init xen_hvm_init_time_ops(void)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
- printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
- "disable pv timer\n");
+ pr_info("Xen doesn't support pvclock on HVM, disable pv timer");
return;
}
+ xen_sched_clock_offset = xen_clocksource_read();
pv_time_ops = xen_time_ops;
x86_init.timers.setup_percpu_clockev = xen_time_init;
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 3b34745d0a52..e78684597f57 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -31,7 +31,6 @@ extern struct shared_info xen_dummy_shared_info;
extern struct shared_info *HYPERVISOR_shared_info;
void xen_setup_mfn_list_list(void);
-void xen_setup_shared_info(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
@@ -68,12 +67,11 @@ void xen_init_irq_ops(void);
void xen_setup_timer(int cpu);
void xen_setup_runstate_info(int cpu);
void xen_teardown_timer(int cpu);
-u64 xen_clocksource_read(void);
void xen_setup_cpu_clockevents(void);
void xen_save_time_memory_area(void);
void xen_restore_time_memory_area(void);
-void __ref xen_init_time_ops(void);
-void __init xen_hvm_init_time_ops(void);
+void xen_init_time_ops(void);
+void xen_hvm_init_time_ops(void);
irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index d575e8701955..801491e98890 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -60,9 +60,6 @@ config HZ
int
default 100
-source "init/Kconfig"
-source "kernel/Kconfig.freezer"
-
config LOCKDEP_SUPPORT
def_bool y
@@ -176,8 +173,6 @@ config XTENSA_UNALIGNED_USER
Say Y here to enable unaligned memory access in user space.
-source "kernel/Kconfig.preempt"
-
config HAVE_SMP
bool "System Supports SMP (MX)"
depends on XTENSA_VARIANT_CUSTOM
@@ -491,8 +486,6 @@ config SIMDISK1_FILENAME
Another simulated disk in a host file for a buildroot-independent
storage.
-source "mm/Kconfig"
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "11"
@@ -567,30 +560,8 @@ config XTFPGA_LCD_8BIT_ACCESS
endmenu
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
menu "Power management options"
source "kernel/power/Kconfig"
endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/xtensa/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
-
-
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
index f64c14adadb3..39de98e20018 100644
--- a/arch/xtensa/Kconfig.debug
+++ b/arch/xtensa/Kconfig.debug
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
config DEBUG_TLB_SANITY
bool "Debug TLB sanity"
@@ -34,5 +31,3 @@ config S32C1I_SELFTEST
It is easy to make wrong hardware configuration, this test should catch it early.
Say 'N' on stable hardware.
-
-endmenu
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index 53e4178711e6..dc9e0ba7122c 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -30,8 +30,7 @@ Image: boot-elf
zImage: boot-redboot
uImage: $(obj)/uImage
-boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) \
- $(addprefix $(obj)/,$(host-progs))
+boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index e7a23f2a519a..7de0149e1cf7 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -197,107 +197,9 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc(v) atomic_add(1,(v))
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-#define atomic_inc_return(v) atomic_add_return(1,(v))
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec(v) atomic_sub(1,(v))
-
-/**
- * atomic_dec_return - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
-
-/**
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
-
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h
index dbe3053b284a..9f119c1ca0b5 100644
--- a/arch/xtensa/include/asm/hw_breakpoint.h
+++ b/arch/xtensa/include/asm/hw_breakpoint.h
@@ -30,13 +30,16 @@ struct arch_hw_breakpoint {
u16 type;
};
+struct perf_event_attr;
struct perf_event;
struct pt_regs;
struct task_struct;
int hw_breakpoint_slots(int type);
-int arch_check_bp_in_kernelspace(struct perf_event *bp);
-int arch_validate_hwbkpt_settings(struct perf_event *bp);
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 75a07b8119a9..1de07a7f7680 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -116,4 +116,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c
index b35656ab7dbd..c2e387c19cda 100644
--- a/arch/xtensa/kernel/hw_breakpoint.c
+++ b/arch/xtensa/kernel/hw_breakpoint.c
@@ -33,14 +33,13 @@ int hw_breakpoint_slots(int type)
}
}
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = bp->attr.bp_len;
+ va = hw->address;
+ len = hw->len;
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -48,50 +47,41 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->type = XTENSA_BREAKPOINT_EXECUTE;
+ hw->type = XTENSA_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->type = XTENSA_BREAKPOINT_LOAD;
+ hw->type = XTENSA_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->type = XTENSA_BREAKPOINT_STORE;
+ hw->type = XTENSA_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
+ hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- info->len = bp->attr.bp_len;
- if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len))
+ hw->len = attr->bp_len;
+ if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len))
return -EINVAL;
/* Address */
- info->address = bp->attr.bp_addr;
- if (info->address & (info->len - 1))
+ hw->address = attr->bp_addr;
+ if (hw->address & (hw->len - 1))
return -EINVAL;
return 0;
}
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
-{
- int ret;
-
- /* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
- return ret;
-}
-
int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data)
{
diff --git a/block/Kconfig b/block/Kconfig
index eb50fd4977c2..1f2469a0123c 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -149,6 +149,18 @@ config BLK_WBT
dynamically on an algorithm loosely based on CoDel, factoring in
the realtime performance of the disk.
+config BLK_CGROUP_IOLATENCY
+ bool "Enable support for latency based cgroup IO protection"
+ depends on BLK_CGROUP=y
+ default n
+ ---help---
+ Enabling this option enables the .latency interface for IO throttling.
+ The IO controller will attempt to maintain average IO latencies below
+ the configured latency target, throttling anybody with a higher latency
+ target than the victimized group.
+
+ Note, this is an experimental interface and could be changed someday.
+
config BLK_WBT_SQ
bool "Single queue writeback throttling"
default n
@@ -177,6 +189,10 @@ config BLK_DEBUG_FS
Unless you are building a kernel for a tiny system, you should
say Y here.
+config BLK_DEBUG_FS_ZONED
+ bool
+ default BLK_DEBUG_FS && BLK_DEV_ZONED
+
config BLK_SED_OPAL
bool "Logic for interfacing with Opal enabled SEDs"
---help---
diff --git a/block/Makefile b/block/Makefile
index 6a56303b9925..572b33f32c07 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
genhd.o partition-generic.o ioprio.o \
- badblocks.o partitions/
+ badblocks.o partitions/ blk-rq-qos.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
@@ -17,6 +17,7 @@ obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
+obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
@@ -34,4 +35,5 @@ obj-$(CONFIG_BLK_MQ_RDMA) += blk-mq-rdma.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
+obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 495b9ddb3355..41d9036b1822 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -634,7 +634,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
* The following function returns true if every queue must receive the
* same share of the throughput (this condition is used when deciding
* whether idling may be disabled, see the comments in the function
- * bfq_bfqq_may_idle()).
+ * bfq_better_to_idle()).
*
* Such a scenario occurs when:
* 1) all active queues have the same weight,
@@ -742,8 +742,9 @@ inc_counter:
* See the comments to the function bfq_weights_tree_add() for considerations
* about overhead.
*/
-void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
- struct rb_root *root)
+void __bfq_weights_tree_remove(struct bfq_data *bfqd,
+ struct bfq_entity *entity,
+ struct rb_root *root)
{
if (!entity->weight_counter)
return;
@@ -760,6 +761,43 @@ reset_entity_pointer:
}
/*
+ * Invoke __bfq_weights_tree_remove on bfqq and all its inactive
+ * parent entities.
+ */
+void bfq_weights_tree_remove(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+{
+ struct bfq_entity *entity = bfqq->entity.parent;
+
+ __bfq_weights_tree_remove(bfqd, &bfqq->entity,
+ &bfqd->queue_weights_tree);
+
+ for_each_entity(entity) {
+ struct bfq_sched_data *sd = entity->my_sched_data;
+
+ if (sd->next_in_service || sd->in_service_entity) {
+ /*
+ * entity is still active, because either
+ * next_in_service or in_service_entity is not
+ * NULL (see the comments on the definition of
+ * next_in_service for details on why
+ * in_service_entity must be checked too).
+ *
+ * As a consequence, the weight of entity is
+ * not to be removed. In addition, if entity
+ * is active, then its parent entities are
+ * active as well, and thus their weights are
+ * not to be removed either. In the end, this
+ * loop must stop here.
+ */
+ break;
+ }
+ __bfq_weights_tree_remove(bfqd, entity,
+ &bfqd->group_weights_tree);
+ }
+}
+
+/*
* Return expired entry, or NULL to just start from scratch in rbtree.
*/
static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
@@ -1344,18 +1382,30 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
* remain unchanged after such an expiration, and the
* following statement therefore assigns to
* entity->budget the remaining budget on such an
- * expiration. For clarity, entity->service is not
- * updated on expiration in any case, and, in normal
- * operation, is reset only when bfqq is selected for
- * service (see bfq_get_next_queue).
+ * expiration.
*/
entity->budget = min_t(unsigned long,
bfq_bfqq_budget_left(bfqq),
bfqq->max_budget);
+ /*
+ * At this point, we have used entity->service to get
+ * the budget left (needed for updating
+ * entity->budget). Thus we finally can, and have to,
+ * reset entity->service. The latter must be reset
+ * because bfqq would otherwise be charged again for
+ * the service it has received during its previous
+ * service slot(s).
+ */
+ entity->service = 0;
+
return true;
}
+ /*
+ * We can finally complete expiration, by setting service to 0.
+ */
+ entity->service = 0;
entity->budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(bfqq->next_rq, bfqq));
bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
@@ -3233,11 +3283,21 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
ref = bfqq->ref;
__bfq_bfqq_expire(bfqd, bfqq);
+ if (ref == 1) /* bfqq is gone, no more actions on it */
+ return;
+
/* mark bfqq as waiting a request only if a bic still points to it */
- if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
+ if (!bfq_bfqq_busy(bfqq) &&
reason != BFQQE_BUDGET_TIMEOUT &&
- reason != BFQQE_BUDGET_EXHAUSTED)
+ reason != BFQQE_BUDGET_EXHAUSTED) {
bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
+ /*
+ * Not setting service to 0, because, if the next rq
+ * arrives in time, the queue will go on receiving
+ * service with this same budget (as if it never expired)
+ */
+ } else
+ entity->service = 0;
}
/*
@@ -3295,7 +3355,7 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
* issues taken into account are not trivial. We discuss these issues
* individually while introducing the variables.
*/
-static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+static bool bfq_better_to_idle(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
bool rot_without_queueing =
@@ -3528,19 +3588,19 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
}
/*
- * If the in-service queue is empty but the function bfq_bfqq_may_idle
+ * If the in-service queue is empty but the function bfq_better_to_idle
* returns true, then:
* 1) the queue must remain in service and cannot be expired, and
* 2) the device must be idled to wait for the possible arrival of a new
* request for the queue.
- * See the comments on the function bfq_bfqq_may_idle for the reasons
+ * See the comments on the function bfq_better_to_idle for the reasons
* why performing device idling is the best choice to boost the throughput
- * and preserve service guarantees when bfq_bfqq_may_idle itself
+ * and preserve service guarantees when bfq_better_to_idle itself
* returns true.
*/
static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
{
- return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
}
/*
@@ -3559,8 +3619,14 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
+ /*
+ * Do not expire bfqq for budget timeout if bfqq may be about
+ * to enjoy device idling. The reason why, in this case, we
+ * prevent bfqq from expiring is the same as in the comments
+ * on the case where bfq_bfqq_must_idle() returns true, in
+ * bfq_completed_request().
+ */
if (bfq_may_expire_for_budg_timeout(bfqq) &&
- !bfq_bfqq_wait_request(bfqq) &&
!bfq_bfqq_must_idle(bfqq))
goto expire;
@@ -3620,7 +3686,7 @@ check_queue:
* may idle after their completion, then keep it anyway.
*/
if (bfq_bfqq_wait_request(bfqq) ||
- (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
+ (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
bfqq = NULL;
goto keep_queue;
}
@@ -4582,8 +4648,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
*/
bfqq->budget_timeout = jiffies;
- bfq_weights_tree_remove(bfqd, &bfqq->entity,
- &bfqd->queue_weights_tree);
+ bfq_weights_tree_remove(bfqd, bfqq);
}
now_ns = ktime_get_ns();
@@ -4637,15 +4702,39 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
* or if we want to idle in case it has no pending requests.
*/
if (bfqd->in_service_queue == bfqq) {
- if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
- bfq_arm_slice_timer(bfqd);
+ if (bfq_bfqq_must_idle(bfqq)) {
+ if (bfqq->dispatched == 0)
+ bfq_arm_slice_timer(bfqd);
+ /*
+ * If we get here, we do not expire bfqq, even
+ * if bfqq was in budget timeout or had no
+ * more requests (as controlled in the next
+ * conditional instructions). The reason for
+ * not expiring bfqq is as follows.
+ *
+ * Here bfqq->dispatched > 0 holds, but
+ * bfq_bfqq_must_idle() returned true. This
+ * implies that, even if no request arrives
+ * for bfqq before bfqq->dispatched reaches 0,
+ * bfqq will, however, not be expired on the
+ * completion event that causes bfqq->dispatch
+ * to reach zero. In contrast, on this event,
+ * bfqq will start enjoying device idling
+ * (I/O-dispatch plugging).
+ *
+ * But, if we expired bfqq here, bfqq would
+ * not have the chance to enjoy device idling
+ * when bfqq->dispatched finally reaches
+ * zero. This would expose bfqq to violation
+ * of its reserved service guarantees.
+ */
return;
} else if (bfq_may_expire_for_budg_timeout(bfqq))
bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_BUDGET_TIMEOUT);
else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
(bfqq->dispatched == 0 ||
- !bfq_bfqq_may_idle(bfqq)))
+ !bfq_better_to_idle(bfqq)))
bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_NO_MORE_REQUESTS);
}
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 0f712e03b035..a8a2e5aca4d4 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -827,8 +827,11 @@ struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
struct rb_root *root);
-void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
- struct rb_root *root);
+void __bfq_weights_tree_remove(struct bfq_data *bfqd,
+ struct bfq_entity *entity,
+ struct rb_root *root);
+void bfq_weights_tree_remove(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq);
void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool compensate, enum bfqq_expiration reason);
void bfq_put_queue(struct bfq_queue *bfqq);
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 4498c43245e2..dbc07b456059 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -499,9 +499,6 @@ static void bfq_active_insert(struct bfq_service_tree *st,
if (bfqq)
list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
- else /* bfq_group */
- bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
-
if (bfqg != bfqd->root_group)
bfqg->active_entities++;
#endif
@@ -601,10 +598,6 @@ static void bfq_active_extract(struct bfq_service_tree *st,
if (bfqq)
list_del(&bfqq->bfqq_list);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
- else /* bfq_group */
- bfq_weights_tree_remove(bfqd, entity,
- &bfqd->group_weights_tree);
-
if (bfqg != bfqd->root_group)
bfqg->active_entities--;
#endif
@@ -799,7 +792,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
if (prev_weight != new_weight) {
root = bfqq ? &bfqd->queue_weights_tree :
&bfqd->group_weights_tree;
- bfq_weights_tree_remove(bfqd, entity, root);
+ __bfq_weights_tree_remove(bfqd, entity, root);
}
entity->weight = new_weight;
/*
@@ -971,7 +964,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
* one of its children receives a new request.
*
* Basically, this function updates the timestamps of entity and
- * inserts entity into its active tree, ater possibly extracting it
+ * inserts entity into its active tree, after possibly extracting it
* from its idle tree.
*/
static void __bfq_activate_entity(struct bfq_entity *entity,
@@ -1015,6 +1008,16 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
entity->on_st = true;
}
+#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_weights_tree_add(bfqg->bfqd, entity,
+ &bfqd->group_weights_tree);
+ }
+#endif
+
bfq_update_fin_time_enqueue(entity, st, backshifted);
}
@@ -1542,12 +1545,6 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
sd->in_service_entity = entity;
/*
- * Reset the accumulator of the amount of service that
- * the entity is about to receive.
- */
- entity->service = 0;
-
- /*
* If entity is no longer a candidate for next
* service, then it must be extracted from its active
* tree, so as to make sure that it won't be
@@ -1664,8 +1661,7 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqd->busy_queues--;
if (!bfqq->dispatched)
- bfq_weights_tree_remove(bfqd, &bfqq->entity,
- &bfqd->queue_weights_tree);
+ bfq_weights_tree_remove(bfqd, bfqq);
if (bfqq->wr_coeff > 1)
bfqd->wr_busy_queues--;
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index add7c7c85335..67b5fb861a51 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -160,28 +160,6 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL(bio_integrity_add_page);
/**
- * bio_integrity_intervals - Return number of integrity intervals for a bio
- * @bi: blk_integrity profile for device
- * @sectors: Size of the bio in 512-byte sectors
- *
- * Description: The block layer calculates everything in 512 byte
- * sectors but integrity metadata is done in terms of the data integrity
- * interval size of the storage device. Convert the block layer sectors
- * to the appropriate number of integrity intervals.
- */
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return sectors >> (bi->interval_exp - 9);
-}
-
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
-{
- return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
-}
-
-/**
* bio_integrity_process - Process integrity metadata for a bio
* @bio: bio to generate/verify integrity metadata for
* @proc_iter: iterator to process
diff --git a/block/bio.c b/block/bio.c
index 047c5dca6d90..b12966e415d3 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -28,9 +28,11 @@
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
+#include <linux/blk-cgroup.h>
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-rq-qos.h"
/*
* Test patch to inline a certain number of bi_io_vec's inside the bio
@@ -156,7 +158,7 @@ out:
unsigned int bvec_nr_vecs(unsigned short idx)
{
- return bvec_slabs[idx].nr_vecs;
+ return bvec_slabs[--idx].nr_vecs;
}
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
@@ -645,83 +647,6 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
EXPORT_SYMBOL(bio_clone_fast);
/**
- * bio_clone_bioset - clone a bio
- * @bio_src: bio to clone
- * @gfp_mask: allocation priority
- * @bs: bio_set to allocate from
- *
- * Clone bio. Caller will own the returned bio, but not the actual data it
- * points to. Reference count of returned bio will be one.
- */
-struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
- struct bio_set *bs)
-{
- struct bvec_iter iter;
- struct bio_vec bv;
- struct bio *bio;
-
- /*
- * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
- * bio_src->bi_io_vec to bio->bi_io_vec.
- *
- * We can't do that anymore, because:
- *
- * - The point of cloning the biovec is to produce a bio with a biovec
- * the caller can modify: bi_idx and bi_bvec_done should be 0.
- *
- * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
- * we tried to clone the whole thing bio_alloc_bioset() would fail.
- * But the clone should succeed as long as the number of biovecs we
- * actually need to allocate is fewer than BIO_MAX_PAGES.
- *
- * - Lastly, bi_vcnt should not be looked at or relied upon by code
- * that does not own the bio - reason being drivers don't use it for
- * iterating over the biovec anymore, so expecting it to be kept up
- * to date (i.e. for clones that share the parent biovec) is just
- * asking for trouble and would force extra work on
- * __bio_clone_fast() anyways.
- */
-
- bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
- if (!bio)
- return NULL;
- bio->bi_disk = bio_src->bi_disk;
- bio->bi_opf = bio_src->bi_opf;
- bio->bi_write_hint = bio_src->bi_write_hint;
- bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
- bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
-
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- case REQ_OP_WRITE_ZEROES:
- break;
- case REQ_OP_WRITE_SAME:
- bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
- break;
- default:
- bio_for_each_segment(bv, bio_src, iter)
- bio->bi_io_vec[bio->bi_vcnt++] = bv;
- break;
- }
-
- if (bio_integrity(bio_src)) {
- int ret;
-
- ret = bio_integrity_clone(bio, bio_src, gfp_mask);
- if (ret < 0) {
- bio_put(bio);
- return NULL;
- }
- }
-
- bio_clone_blkcg_association(bio, bio_src);
-
- return bio;
-}
-EXPORT_SYMBOL(bio_clone_bioset);
-
-/**
* bio_add_pc_page - attempt to add page to bio
* @q: the target queue
* @bio: destination bio
@@ -1661,10 +1586,8 @@ void bio_set_pages_dirty(struct bio *bio)
int i;
bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
-
- if (page && !PageCompound(page))
- set_page_dirty_lock(page);
+ if (!PageCompound(bvec->bv_page))
+ set_page_dirty_lock(bvec->bv_page);
}
}
EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
@@ -1674,19 +1597,15 @@ static void bio_release_pages(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
-
- if (page)
- put_page(page);
- }
+ bio_for_each_segment_all(bvec, bio, i)
+ put_page(bvec->bv_page);
}
/*
* bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
* If they are, then fine. If, however, some pages are clean then they must
* have been written out during the direct-IO read. So we take another ref on
- * the BIO and the offending pages and re-dirty the pages in process context.
+ * the BIO and re-dirty the pages in process context.
*
* It is expected that bio_check_pages_dirty() will wholly own the BIO from
* here on. It will run one put_page() against each page and will run one
@@ -1704,78 +1623,70 @@ static struct bio *bio_dirty_list;
*/
static void bio_dirty_fn(struct work_struct *work)
{
- unsigned long flags;
- struct bio *bio;
+ struct bio *bio, *next;
- spin_lock_irqsave(&bio_dirty_lock, flags);
- bio = bio_dirty_list;
+ spin_lock_irq(&bio_dirty_lock);
+ next = bio_dirty_list;
bio_dirty_list = NULL;
- spin_unlock_irqrestore(&bio_dirty_lock, flags);
+ spin_unlock_irq(&bio_dirty_lock);
- while (bio) {
- struct bio *next = bio->bi_private;
+ while ((bio = next) != NULL) {
+ next = bio->bi_private;
bio_set_pages_dirty(bio);
bio_release_pages(bio);
bio_put(bio);
- bio = next;
}
}
void bio_check_pages_dirty(struct bio *bio)
{
struct bio_vec *bvec;
- int nr_clean_pages = 0;
+ unsigned long flags;
int i;
bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
-
- if (PageDirty(page) || PageCompound(page)) {
- put_page(page);
- bvec->bv_page = NULL;
- } else {
- nr_clean_pages++;
- }
+ if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
+ goto defer;
}
- if (nr_clean_pages) {
- unsigned long flags;
-
- spin_lock_irqsave(&bio_dirty_lock, flags);
- bio->bi_private = bio_dirty_list;
- bio_dirty_list = bio;
- spin_unlock_irqrestore(&bio_dirty_lock, flags);
- schedule_work(&bio_dirty_work);
- } else {
- bio_put(bio);
- }
+ bio_release_pages(bio);
+ bio_put(bio);
+ return;
+defer:
+ spin_lock_irqsave(&bio_dirty_lock, flags);
+ bio->bi_private = bio_dirty_list;
+ bio_dirty_list = bio;
+ spin_unlock_irqrestore(&bio_dirty_lock, flags);
+ schedule_work(&bio_dirty_work);
}
EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
-void generic_start_io_acct(struct request_queue *q, int rw,
+void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part)
{
+ const int sgrp = op_stat_group(op);
int cpu = part_stat_lock();
part_round_stats(q, cpu, part);
- part_stat_inc(cpu, part, ios[rw]);
- part_stat_add(cpu, part, sectors[rw], sectors);
- part_inc_in_flight(q, part, rw);
+ part_stat_inc(cpu, part, ios[sgrp]);
+ part_stat_add(cpu, part, sectors[sgrp], sectors);
+ part_inc_in_flight(q, part, op_is_write(op));
part_stat_unlock();
}
EXPORT_SYMBOL(generic_start_io_acct);
-void generic_end_io_acct(struct request_queue *q, int rw,
+void generic_end_io_acct(struct request_queue *q, int req_op,
struct hd_struct *part, unsigned long start_time)
{
unsigned long duration = jiffies - start_time;
+ const int sgrp = op_stat_group(req_op);
int cpu = part_stat_lock();
- part_stat_add(cpu, part, ticks[rw], duration);
+ part_stat_add(cpu, part, ticks[sgrp], duration);
part_round_stats(q, cpu, part);
- part_dec_in_flight(q, part, rw);
+ part_dec_in_flight(q, part, op_is_write(req_op));
part_stat_unlock();
}
@@ -1834,6 +1745,9 @@ again:
if (!bio_integrity_endio(bio))
return;
+ if (bio->bi_disk)
+ rq_qos_done_bio(bio->bi_disk->queue, bio);
+
/*
* Need to have a real endio function for chained bios, otherwise
* various corner cases will break (like stacking block devices that
@@ -2042,6 +1956,30 @@ EXPORT_SYMBOL(bioset_init_from_src);
#ifdef CONFIG_BLK_CGROUP
+#ifdef CONFIG_MEMCG
+/**
+ * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
+ * @bio: target bio
+ * @page: the page to lookup the blkcg from
+ *
+ * Associate @bio with the blkcg from @page's owning memcg. This works like
+ * every other associate function wrt references.
+ */
+int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
+{
+ struct cgroup_subsys_state *blkcg_css;
+
+ if (unlikely(bio->bi_css))
+ return -EBUSY;
+ if (!page->mem_cgroup)
+ return 0;
+ blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
+ &io_cgrp_subsys);
+ bio->bi_css = blkcg_css;
+ return 0;
+}
+#endif /* CONFIG_MEMCG */
+
/**
* bio_associate_blkcg - associate a bio with the specified blkcg
* @bio: target bio
@@ -2065,6 +2003,24 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/**
+ * bio_associate_blkg - associate a bio with the specified blkg
+ * @bio: target bio
+ * @blkg: the blkg to associate
+ *
+ * Associate @bio with the blkg specified by @blkg. This is the queue specific
+ * blkcg information associated with the @bio, a reference will be taken on the
+ * @blkg and will be freed when the bio is freed.
+ */
+int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
+{
+ if (unlikely(bio->bi_blkg))
+ return -EBUSY;
+ blkg_get(blkg);
+ bio->bi_blkg = blkg;
+ return 0;
+}
+
+/**
* bio_disassociate_task - undo bio_associate_current()
* @bio: target bio
*/
@@ -2078,6 +2034,10 @@ void bio_disassociate_task(struct bio *bio)
css_put(bio->bi_css);
bio->bi_css = NULL;
}
+ if (bio->bi_blkg) {
+ blkg_put(bio->bi_blkg);
+ bio->bi_blkg = NULL;
+ }
}
/**
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index eb85cb87c40f..694595b29b8f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -27,6 +27,7 @@
#include <linux/atomic.h>
#include <linux/ctype.h>
#include <linux/blk-cgroup.h>
+#include <linux/tracehook.h>
#include "blk.h"
#define MAX_KEY_LEN 100
@@ -50,6 +51,8 @@ static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
+static bool blkcg_debug_stats = false;
+
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
@@ -564,6 +567,7 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
[BLKG_RWSTAT_WRITE] = "Write",
[BLKG_RWSTAT_SYNC] = "Sync",
[BLKG_RWSTAT_ASYNC] = "Async",
+ [BLKG_RWSTAT_DISCARD] = "Discard",
};
const char *dname = blkg_dev_name(pd->blkg);
u64 v;
@@ -577,7 +581,8 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
(unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
- atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
+ atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
+ atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
return v;
}
@@ -954,30 +959,77 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
const char *dname;
+ char *buf;
struct blkg_rwstat rwstat;
- u64 rbytes, wbytes, rios, wios;
+ u64 rbytes, wbytes, rios, wios, dbytes, dios;
+ size_t size = seq_get_buf(sf, &buf), off = 0;
+ int i;
+ bool has_stats = false;
dname = blkg_dev_name(blkg);
if (!dname)
continue;
+ /*
+ * Hooray string manipulation, count is the size written NOT
+ * INCLUDING THE \0, so size is now count+1 less than what we
+ * had before, but we want to start writing the next bit from
+ * the \0 so we only add count to buf.
+ */
+ off += scnprintf(buf+off, size-off, "%s ", dname);
+
spin_lock_irq(blkg->q->queue_lock);
rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_bytes));
rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+ dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_ios));
rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+ dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
spin_unlock_irq(blkg->q->queue_lock);
- if (rbytes || wbytes || rios || wios)
- seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
- dname, rbytes, wbytes, rios, wios);
+ if (rbytes || wbytes || rios || wios) {
+ has_stats = true;
+ off += scnprintf(buf+off, size-off,
+ "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
+ rbytes, wbytes, rios, wios,
+ dbytes, dios);
+ }
+
+ if (!blkcg_debug_stats)
+ goto next;
+
+ if (atomic_read(&blkg->use_delay)) {
+ has_stats = true;
+ off += scnprintf(buf+off, size-off,
+ " use_delay=%d delay_nsec=%llu",
+ atomic_read(&blkg->use_delay),
+ (unsigned long long)atomic64_read(&blkg->delay_nsec));
+ }
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ size_t written;
+
+ if (!blkg->pd[i] || !pol->pd_stat_fn)
+ continue;
+
+ written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
+ if (written)
+ has_stats = true;
+ off += written;
+ }
+next:
+ if (has_stats) {
+ off += scnprintf(buf+off, size-off, "\n");
+ seq_commit(sf, off);
+ }
}
rcu_read_unlock();
@@ -1191,6 +1243,14 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded)
radix_tree_preload_end();
+ ret = blk_iolatency_init(q);
+ if (ret) {
+ spin_lock_irq(q->queue_lock);
+ blkg_destroy_all(q);
+ spin_unlock_irq(q->queue_lock);
+ return ret;
+ }
+
ret = blk_throtl_init(q);
if (ret) {
spin_lock_irq(q->queue_lock);
@@ -1288,6 +1348,13 @@ static void blkcg_bind(struct cgroup_subsys_state *root_css)
mutex_unlock(&blkcg_pol_mutex);
}
+static void blkcg_exit(struct task_struct *tsk)
+{
+ if (tsk->throttle_queue)
+ blk_put_queue(tsk->throttle_queue);
+ tsk->throttle_queue = NULL;
+}
+
struct cgroup_subsys io_cgrp_subsys = {
.css_alloc = blkcg_css_alloc,
.css_offline = blkcg_css_offline,
@@ -1297,6 +1364,7 @@ struct cgroup_subsys io_cgrp_subsys = {
.dfl_cftypes = blkcg_files,
.legacy_cftypes = blkcg_legacy_files,
.legacy_name = "blkio",
+ .exit = blkcg_exit,
#ifdef CONFIG_MEMCG
/*
* This ensures that, if available, memcg is automatically enabled
@@ -1547,3 +1615,209 @@ out_unlock:
mutex_unlock(&blkcg_pol_register_mutex);
}
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
+
+/*
+ * Scale the accumulated delay based on how long it has been since we updated
+ * the delay. We only call this when we are adding delay, in case it's been a
+ * while since we added delay, and when we are checking to see if we need to
+ * delay a task, to account for any delays that may have occurred.
+ */
+static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
+{
+ u64 old = atomic64_read(&blkg->delay_start);
+
+ /*
+ * We only want to scale down every second. The idea here is that we
+ * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
+ * time window. We only want to throttle tasks for recent delay that
+ * has occurred, in 1 second time windows since that's the maximum
+ * things can be throttled. We save the current delay window in
+ * blkg->last_delay so we know what amount is still left to be charged
+ * to the blkg from this point onward. blkg->last_use keeps track of
+ * the use_delay counter. The idea is if we're unthrottling the blkg we
+ * are ok with whatever is happening now, and we can take away more of
+ * the accumulated delay as we've already throttled enough that
+ * everybody is happy with their IO latencies.
+ */
+ if (time_before64(old + NSEC_PER_SEC, now) &&
+ atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
+ u64 cur = atomic64_read(&blkg->delay_nsec);
+ u64 sub = min_t(u64, blkg->last_delay, now - old);
+ int cur_use = atomic_read(&blkg->use_delay);
+
+ /*
+ * We've been unthrottled, subtract a larger chunk of our
+ * accumulated delay.
+ */
+ if (cur_use < blkg->last_use)
+ sub = max_t(u64, sub, blkg->last_delay >> 1);
+
+ /*
+ * This shouldn't happen, but handle it anyway. Our delay_nsec
+ * should only ever be growing except here where we subtract out
+ * min(last_delay, 1 second), but lord knows bugs happen and I'd
+ * rather not end up with negative numbers.
+ */
+ if (unlikely(cur < sub)) {
+ atomic64_set(&blkg->delay_nsec, 0);
+ blkg->last_delay = 0;
+ } else {
+ atomic64_sub(sub, &blkg->delay_nsec);
+ blkg->last_delay = cur - sub;
+ }
+ blkg->last_use = cur_use;
+ }
+}
+
+/*
+ * This is called when we want to actually walk up the hierarchy and check to
+ * see if we need to throttle, and then actually throttle if there is some
+ * accumulated delay. This should only be called upon return to user space so
+ * we're not holding some lock that would induce a priority inversion.
+ */
+static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
+{
+ u64 now = ktime_to_ns(ktime_get());
+ u64 exp;
+ u64 delay_nsec = 0;
+ int tok;
+
+ while (blkg->parent) {
+ if (atomic_read(&blkg->use_delay)) {
+ blkcg_scale_delay(blkg, now);
+ delay_nsec = max_t(u64, delay_nsec,
+ atomic64_read(&blkg->delay_nsec));
+ }
+ blkg = blkg->parent;
+ }
+
+ if (!delay_nsec)
+ return;
+
+ /*
+ * Let's not sleep for all eternity if we've amassed a huge delay.
+ * Swapping or metadata IO can accumulate 10's of seconds worth of
+ * delay, and we want userspace to be able to do _something_ so cap the
+ * delays at 1 second. If there's 10's of seconds worth of delay then
+ * the tasks will be delayed for 1 second for every syscall.
+ */
+ delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
+
+ /*
+ * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
+ * that hasn't landed upstream yet. Once that stuff is in place we need
+ * to do a psi_memstall_enter/leave if memdelay is set.
+ */
+
+ exp = ktime_add_ns(now, delay_nsec);
+ tok = io_schedule_prepare();
+ do {
+ __set_current_state(TASK_KILLABLE);
+ if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
+ break;
+ } while (!fatal_signal_pending(current));
+ io_schedule_finish(tok);
+}
+
+/**
+ * blkcg_maybe_throttle_current - throttle the current task if it has been marked
+ *
+ * This is only called if we've been marked with set_notify_resume(). Obviously
+ * we can be set_notify_resume() for reasons other than blkcg throttling, so we
+ * check to see if current->throttle_queue is set and if not this doesn't do
+ * anything. This should only ever be called by the resume code, it's not meant
+ * to be called by people willy-nilly as it will actually do the work to
+ * throttle the task if it is setup for throttling.
+ */
+void blkcg_maybe_throttle_current(void)
+{
+ struct request_queue *q = current->throttle_queue;
+ struct cgroup_subsys_state *css;
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+ bool use_memdelay = current->use_memdelay;
+
+ if (!q)
+ return;
+
+ current->throttle_queue = NULL;
+ current->use_memdelay = false;
+
+ rcu_read_lock();
+ css = kthread_blkcg();
+ if (css)
+ blkcg = css_to_blkcg(css);
+ else
+ blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
+
+ if (!blkcg)
+ goto out;
+ blkg = blkg_lookup(blkcg, q);
+ if (!blkg)
+ goto out;
+ blkg = blkg_try_get(blkg);
+ if (!blkg)
+ goto out;
+ rcu_read_unlock();
+
+ blkcg_maybe_throttle_blkg(blkg, use_memdelay);
+ blkg_put(blkg);
+ blk_put_queue(q);
+ return;
+out:
+ rcu_read_unlock();
+ blk_put_queue(q);
+}
+EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
+
+/**
+ * blkcg_schedule_throttle - this task needs to check for throttling
+ * @q - the request queue IO was submitted on
+ * @use_memdelay - do we charge this to memory delay for PSI
+ *
+ * This is called by the IO controller when we know there's delay accumulated
+ * for the blkg for this task. We do not pass the blkg because there are places
+ * we call this that may not have that information, the swapping code for
+ * instance will only have a request_queue at that point. This set's the
+ * notify_resume for the task to check and see if it requires throttling before
+ * returning to user space.
+ *
+ * We will only schedule once per syscall. You can call this over and over
+ * again and it will only do the check once upon return to user space, and only
+ * throttle once. If the task needs to be throttled again it'll need to be
+ * re-set at the next time we see the task.
+ */
+void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
+{
+ if (unlikely(current->flags & PF_KTHREAD))
+ return;
+
+ if (!blk_get_queue(q))
+ return;
+
+ if (current->throttle_queue)
+ blk_put_queue(current->throttle_queue);
+ current->throttle_queue = q;
+ if (use_memdelay)
+ current->use_memdelay = use_memdelay;
+ set_notify_resume(current);
+}
+EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
+
+/**
+ * blkcg_add_delay - add delay to this blkg
+ * @now - the current time in nanoseconds
+ * @delta - how many nanoseconds of delay to add
+ *
+ * Charge @delta to the blkg's current delay accumulation. This is used to
+ * throttle tasks if an IO controller thinks we need more throttling.
+ */
+void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
+{
+ blkcg_scale_delay(blkg, now);
+ atomic64_add(delta, &blkg->delay_nsec);
+}
+EXPORT_SYMBOL_GPL(blkcg_add_delay);
+
+module_param(blkcg_debug_stats, bool, 0644);
+MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
diff --git a/block/blk-core.c b/block/blk-core.c
index ee33590f54eb..12550340418d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -42,7 +42,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
-#include "blk-wbt.h"
+#include "blk-rq-qos.h"
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
@@ -715,6 +715,35 @@ void blk_set_queue_dying(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
+void blk_exit_queue(struct request_queue *q)
+{
+ /*
+ * Since the I/O scheduler exit code may access cgroup information,
+ * perform I/O scheduler exit before disassociating from the block
+ * cgroup controller.
+ */
+ if (q->elevator) {
+ ioc_clear_queue(q);
+ elevator_exit(q, q->elevator);
+ q->elevator = NULL;
+ }
+
+ /*
+ * Remove all references to @q from the block cgroup controller before
+ * restoring @q->queue_lock to avoid that restoring this pointer causes
+ * e.g. blkcg_print_blkgs() to crash.
+ */
+ blkcg_exit_queue(q);
+
+ /*
+ * Since the cgroup code may dereference the @q->backing_dev_info
+ * pointer, only decrease its reference count after having removed the
+ * association with the block cgroup controller.
+ */
+ bdi_put(q->backing_dev_info);
+}
+
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -762,9 +791,13 @@ void blk_cleanup_queue(struct request_queue *q)
* make sure all in-progress dispatch are completed because
* blk_freeze_queue() can only complete all requests, and
* dispatch may still be in-progress since we dispatch requests
- * from more than one contexts
+ * from more than one contexts.
+ *
+ * No need to quiesce queue if it isn't initialized yet since
+ * blk_freeze_queue() should be enough for cases of passthrough
+ * request.
*/
- if (q->mq_ops)
+ if (q->mq_ops && blk_queue_init_done(q))
blk_mq_quiesce_queue(q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
@@ -780,30 +813,7 @@ void blk_cleanup_queue(struct request_queue *q)
*/
WARN_ON_ONCE(q->kobj.state_in_sysfs);
- /*
- * Since the I/O scheduler exit code may access cgroup information,
- * perform I/O scheduler exit before disassociating from the block
- * cgroup controller.
- */
- if (q->elevator) {
- ioc_clear_queue(q);
- elevator_exit(q, q->elevator);
- q->elevator = NULL;
- }
-
- /*
- * Remove all references to @q from the block cgroup controller before
- * restoring @q->queue_lock to avoid that restoring this pointer causes
- * e.g. blkcg_print_blkgs() to crash.
- */
- blkcg_exit_queue(q);
-
- /*
- * Since the cgroup code may dereference the @q->backing_dev_info
- * pointer, only decrease its reference count after having removed the
- * association with the block cgroup controller.
- */
- bdi_put(q->backing_dev_info);
+ blk_exit_queue(q);
if (q->mq_ops)
blk_mq_free_queue(q);
@@ -1180,6 +1190,7 @@ out_exit_flush_rq:
q->exit_rq_fn(q, q->fq->flush_rq);
out_free_flush_queue:
blk_free_flush_queue(q->fq);
+ q->fq = NULL;
return -ENOMEM;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1641,7 +1652,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
- wbt_requeue(q->rq_wb, rq);
+ rq_qos_requeue(q, rq);
if (rq->rq_flags & RQF_QUEUED)
blk_queue_end_tag(q, rq);
@@ -1748,7 +1759,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
- wbt_done(q->rq_wb, req);
+ rq_qos_done(q, req);
/*
* Request may not have originated from ll_rw_blk. if not,
@@ -1982,7 +1993,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int where = ELEVATOR_INSERT_SORT;
struct request *req, *free;
unsigned int request_count = 0;
- unsigned int wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -2040,7 +2050,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
- wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
+ rq_qos_throttle(q, bio, q->queue_lock);
/*
* Grab a free request. This is might sleep but can not fail.
@@ -2050,7 +2060,7 @@ get_rq:
req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO);
if (IS_ERR(req)) {
blk_queue_exit(q);
- __wbt_done(q->rq_wb, wb_acct);
+ rq_qos_cleanup(q, bio);
if (PTR_ERR(req) == -ENOMEM)
bio->bi_status = BLK_STS_RESOURCE;
else
@@ -2059,7 +2069,7 @@ get_rq:
goto out_unlock;
}
- wbt_track(req, wb_acct);
+ rq_qos_track(q, req, bio);
/*
* After dropping the lock and possibly sleeping here, our request
@@ -2700,13 +2710,13 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (blk_do_io_stat(req)) {
- const int rw = rq_data_dir(req);
+ const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
part = req->part;
- part_stat_add(cpu, part, sectors[rw], bytes >> 9);
+ part_stat_add(cpu, part, sectors[sgrp], bytes >> 9);
part_stat_unlock();
}
}
@@ -2720,7 +2730,7 @@ void blk_account_io_done(struct request *req, u64 now)
*/
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
unsigned long duration;
- const int rw = rq_data_dir(req);
+ const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
int cpu;
@@ -2728,10 +2738,10 @@ void blk_account_io_done(struct request *req, u64 now)
cpu = part_stat_lock();
part = req->part;
- part_stat_inc(cpu, part, ios[rw]);
- part_stat_add(cpu, part, ticks[rw], duration);
+ part_stat_inc(cpu, part, ios[sgrp]);
+ part_stat_add(cpu, part, ticks[sgrp], duration);
part_round_stats(req->q, cpu, part);
- part_dec_in_flight(req->q, part, rw);
+ part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
part_stat_unlock();
@@ -2751,9 +2761,9 @@ static bool blk_pm_allow_request(struct request *rq)
return rq->rq_flags & RQF_PM;
case RPM_SUSPENDED:
return false;
+ default:
+ return true;
}
-
- return true;
}
#else
static bool blk_pm_allow_request(struct request *rq)
@@ -2980,7 +2990,7 @@ void blk_start_request(struct request *req)
req->throtl_size = blk_rq_sectors(req);
#endif
req->rq_flags |= RQF_STATS;
- wbt_issue(req->q->rq_wb, req);
+ rq_qos_issue(req->q, req);
}
BUG_ON(blk_rq_is_complete(req));
@@ -3053,6 +3063,10 @@ EXPORT_SYMBOL_GPL(blk_steal_bios);
* Passing the result of blk_rq_bytes() as @nr_bytes guarantees
* %false return from this function.
*
+ * Note:
+ * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
+ * blk_rq_bytes() and in blk_update_request().
+ *
* Return:
* %false - this request doesn't have any more data
* %true - this request has more data
@@ -3200,7 +3214,7 @@ void blk_finish_request(struct request *req, blk_status_t error)
blk_account_io_done(req, now);
if (req->end_io) {
- wbt_done(req->q->rq_wb, req);
+ rq_qos_done(q, req);
req->end_io(req, error);
} else {
if (blk_bidi_rq(req))
@@ -3763,9 +3777,11 @@ EXPORT_SYMBOL(blk_finish_plug);
*/
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{
- /* not support for RQF_PM and ->rpm_status in blk-mq yet */
- if (q->mq_ops)
+ /* Don't enable runtime PM for blk-mq until it is ready */
+ if (q->mq_ops) {
+ pm_runtime_disable(dev);
return;
+ }
q->dev = dev;
q->rpm_status = RPM_ACTIVE;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index f23311e4b201..01580f88fcb3 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -278,7 +278,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
atomic_set(&ioc->nr_tasks, 1);
atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
- INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
+ INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
INIT_HLIST_HEAD(&ioc->icq_list);
INIT_WORK(&ioc->release_work, ioc_release_fn);
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
new file mode 100644
index 000000000000..19923f8a029d
--- /dev/null
+++ b/block/blk-iolatency.c
@@ -0,0 +1,955 @@
+/*
+ * Block rq-qos base io controller
+ *
+ * This works similar to wbt with a few exceptions
+ *
+ * - It's bio based, so the latency covers the whole block layer in addition to
+ * the actual io.
+ * - We will throttle all IO that comes in here if we need to.
+ * - We use the mean latency over the 100ms window. This is because writes can
+ * be particularly fast, which could give us a false sense of the impact of
+ * other workloads on our protected workload.
+ * - By default there's no throttling, we set the queue_depth to UINT_MAX so
+ * that we can have as many outstanding bio's as we're allowed to. Only at
+ * throttle time do we pay attention to the actual queue depth.
+ *
+ * The hierarchy works like the cpu controller does, we track the latency at
+ * every configured node, and each configured node has it's own independent
+ * queue depth. This means that we only care about our latency targets at the
+ * peer level. Some group at the bottom of the hierarchy isn't going to affect
+ * a group at the end of some other path if we're only configred at leaf level.
+ *
+ * Consider the following
+ *
+ * root blkg
+ * / \
+ * fast (target=5ms) slow (target=10ms)
+ * / \ / \
+ * a b normal(15ms) unloved
+ *
+ * "a" and "b" have no target, but their combined io under "fast" cannot exceed
+ * an average latency of 5ms. If it does then we will throttle the "slow"
+ * group. In the case of "normal", if it exceeds its 15ms target, we will
+ * throttle "unloved", but nobody else.
+ *
+ * In this example "fast", "slow", and "normal" will be the only groups actually
+ * accounting their io latencies. We have to walk up the heirarchy to the root
+ * on every submit and complete so we can do the appropriate stat recording and
+ * adjust the queue depth of ourselves if needed.
+ *
+ * There are 2 ways we throttle IO.
+ *
+ * 1) Queue depth throttling. As we throttle down we will adjust the maximum
+ * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
+ * to 1. If the group is only ever submitting IO for itself then this is the
+ * only way we throttle.
+ *
+ * 2) Induced delay throttling. This is for the case that a group is generating
+ * IO that has to be issued by the root cg to avoid priority inversion. So think
+ * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
+ * of work done for us on behalf of the root cg and are being asked to scale
+ * down more then we induce a latency at userspace return. We accumulate the
+ * total amount of time we need to be punished by doing
+ *
+ * total_time += min_lat_nsec - actual_io_completion
+ *
+ * and then at throttle time will do
+ *
+ * throttle_time = min(total_time, NSEC_PER_SEC)
+ *
+ * This induced delay will throttle back the activity that is generating the
+ * root cg issued io's, wethere that's some metadata intensive operation or the
+ * group is using so much memory that it is pushing us into swap.
+ *
+ * Copyright (C) 2018 Josef Bacik
+ */
+#include <linux/kernel.h>
+#include <linux/blk_types.h>
+#include <linux/backing-dev.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/memcontrol.h>
+#include <linux/sched/loadavg.h>
+#include <linux/sched/signal.h>
+#include <trace/events/block.h>
+#include "blk-rq-qos.h"
+#include "blk-stat.h"
+
+#define DEFAULT_SCALE_COOKIE 1000000U
+
+static struct blkcg_policy blkcg_policy_iolatency;
+struct iolatency_grp;
+
+struct blk_iolatency {
+ struct rq_qos rqos;
+ struct timer_list timer;
+ atomic_t enabled;
+};
+
+static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
+{
+ return container_of(rqos, struct blk_iolatency, rqos);
+}
+
+static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
+{
+ return atomic_read(&blkiolat->enabled) > 0;
+}
+
+struct child_latency_info {
+ spinlock_t lock;
+
+ /* Last time we adjusted the scale of everybody. */
+ u64 last_scale_event;
+
+ /* The latency that we missed. */
+ u64 scale_lat;
+
+ /* Total io's from all of our children for the last summation. */
+ u64 nr_samples;
+
+ /* The guy who actually changed the latency numbers. */
+ struct iolatency_grp *scale_grp;
+
+ /* Cookie to tell if we need to scale up or down. */
+ atomic_t scale_cookie;
+};
+
+struct iolatency_grp {
+ struct blkg_policy_data pd;
+ struct blk_rq_stat __percpu *stats;
+ struct blk_iolatency *blkiolat;
+ struct rq_depth rq_depth;
+ struct rq_wait rq_wait;
+ atomic64_t window_start;
+ atomic_t scale_cookie;
+ u64 min_lat_nsec;
+ u64 cur_win_nsec;
+
+ /* total running average of our io latency. */
+ u64 lat_avg;
+
+ /* Our current number of IO's for the last summation. */
+ u64 nr_samples;
+
+ struct child_latency_info child_lat;
+};
+
+#define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
+#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
+/*
+ * These are the constants used to fake the fixed-point moving average
+ * calculation just like load average. The call to CALC_LOAD folds
+ * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
+ * window size is bucketed to try to approximately calculate average
+ * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
+ * elapse immediately. Note, windows only elapse with IO activity. Idle
+ * periods extend the most recent window.
+ */
+#define BLKIOLATENCY_NR_EXP_FACTORS 5
+#define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
+ (BLKIOLATENCY_NR_EXP_FACTORS - 1))
+static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
+ 2045, // exp(1/600) - 600 samples
+ 2039, // exp(1/240) - 240 samples
+ 2031, // exp(1/120) - 120 samples
+ 2023, // exp(1/80) - 80 samples
+ 2014, // exp(1/60) - 60 samples
+};
+
+static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
+}
+
+static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
+{
+ return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
+}
+
+static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
+{
+ return pd_to_blkg(&iolat->pd);
+}
+
+static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
+ wait_queue_entry_t *wait,
+ bool first_block)
+{
+ struct rq_wait *rqw = &iolat->rq_wait;
+
+ if (first_block && waitqueue_active(&rqw->wait) &&
+ rqw->wait.head.next != &wait->entry)
+ return false;
+ return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
+}
+
+static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
+ struct iolatency_grp *iolat,
+ spinlock_t *lock, bool issue_as_root,
+ bool use_memdelay)
+ __releases(lock)
+ __acquires(lock)
+{
+ struct rq_wait *rqw = &iolat->rq_wait;
+ unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
+ DEFINE_WAIT(wait);
+ bool first_block = true;
+
+ if (use_delay)
+ blkcg_schedule_throttle(rqos->q, use_memdelay);
+
+ /*
+ * To avoid priority inversions we want to just take a slot if we are
+ * issuing as root. If we're being killed off there's no point in
+ * delaying things, we may have been killed by OOM so throttling may
+ * make recovery take even longer, so just let the IO's through so the
+ * task can go away.
+ */
+ if (issue_as_root || fatal_signal_pending(current)) {
+ atomic_inc(&rqw->inflight);
+ return;
+ }
+
+ if (iolatency_may_queue(iolat, &wait, first_block))
+ return;
+
+ do {
+ prepare_to_wait_exclusive(&rqw->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ if (iolatency_may_queue(iolat, &wait, first_block))
+ break;
+ first_block = false;
+
+ if (lock) {
+ spin_unlock_irq(lock);
+ io_schedule();
+ spin_lock_irq(lock);
+ } else {
+ io_schedule();
+ }
+ } while (1);
+
+ finish_wait(&rqw->wait, &wait);
+}
+
+#define SCALE_DOWN_FACTOR 2
+#define SCALE_UP_FACTOR 4
+
+static inline unsigned long scale_amount(unsigned long qd, bool up)
+{
+ return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
+}
+
+/*
+ * We scale the qd down faster than we scale up, so we need to use this helper
+ * to adjust the scale_cookie accordingly so we don't prematurely get
+ * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
+ *
+ * Each group has their own local copy of the last scale cookie they saw, so if
+ * the global scale cookie goes up or down they know which way they need to go
+ * based on their last knowledge of it.
+ */
+static void scale_cookie_change(struct blk_iolatency *blkiolat,
+ struct child_latency_info *lat_info,
+ bool up)
+{
+ unsigned long qd = blk_queue_depth(blkiolat->rqos.q);
+ unsigned long scale = scale_amount(qd, up);
+ unsigned long old = atomic_read(&lat_info->scale_cookie);
+ unsigned long max_scale = qd << 1;
+ unsigned long diff = 0;
+
+ if (old < DEFAULT_SCALE_COOKIE)
+ diff = DEFAULT_SCALE_COOKIE - old;
+
+ if (up) {
+ if (scale + old > DEFAULT_SCALE_COOKIE)
+ atomic_set(&lat_info->scale_cookie,
+ DEFAULT_SCALE_COOKIE);
+ else if (diff > qd)
+ atomic_inc(&lat_info->scale_cookie);
+ else
+ atomic_add(scale, &lat_info->scale_cookie);
+ } else {
+ /*
+ * We don't want to dig a hole so deep that it takes us hours to
+ * dig out of it. Just enough that we don't throttle/unthrottle
+ * with jagged workloads but can still unthrottle once pressure
+ * has sufficiently dissipated.
+ */
+ if (diff > qd) {
+ if (diff < max_scale)
+ atomic_dec(&lat_info->scale_cookie);
+ } else {
+ atomic_sub(scale, &lat_info->scale_cookie);
+ }
+ }
+}
+
+/*
+ * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
+ * queue depth at a time so we don't get wild swings and hopefully dial in to
+ * fairer distribution of the overall queue depth.
+ */
+static void scale_change(struct iolatency_grp *iolat, bool up)
+{
+ unsigned long qd = blk_queue_depth(iolat->blkiolat->rqos.q);
+ unsigned long scale = scale_amount(qd, up);
+ unsigned long old = iolat->rq_depth.max_depth;
+ bool changed = false;
+
+ if (old > qd)
+ old = qd;
+
+ if (up) {
+ if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
+ return;
+
+ if (old < qd) {
+ changed = true;
+ old += scale;
+ old = min(old, qd);
+ iolat->rq_depth.max_depth = old;
+ wake_up_all(&iolat->rq_wait.wait);
+ }
+ } else if (old > 1) {
+ old >>= 1;
+ changed = true;
+ iolat->rq_depth.max_depth = max(old, 1UL);
+ }
+}
+
+/* Check our parent and see if the scale cookie has changed. */
+static void check_scale_change(struct iolatency_grp *iolat)
+{
+ struct iolatency_grp *parent;
+ struct child_latency_info *lat_info;
+ unsigned int cur_cookie;
+ unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
+ u64 scale_lat;
+ unsigned int old;
+ int direction = 0;
+
+ if (lat_to_blkg(iolat)->parent == NULL)
+ return;
+
+ parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
+ if (!parent)
+ return;
+
+ lat_info = &parent->child_lat;
+ cur_cookie = atomic_read(&lat_info->scale_cookie);
+ scale_lat = READ_ONCE(lat_info->scale_lat);
+
+ if (cur_cookie < our_cookie)
+ direction = -1;
+ else if (cur_cookie > our_cookie)
+ direction = 1;
+ else
+ return;
+
+ old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
+
+ /* Somebody beat us to the punch, just bail. */
+ if (old != our_cookie)
+ return;
+
+ if (direction < 0 && iolat->min_lat_nsec) {
+ u64 samples_thresh;
+
+ if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
+ return;
+
+ /*
+ * Sometimes high priority groups are their own worst enemy, so
+ * instead of taking it out on some poor other group that did 5%
+ * or less of the IO's for the last summation just skip this
+ * scale down event.
+ */
+ samples_thresh = lat_info->nr_samples * 5;
+ samples_thresh = div64_u64(samples_thresh, 100);
+ if (iolat->nr_samples <= samples_thresh)
+ return;
+ }
+
+ /* We're as low as we can go. */
+ if (iolat->rq_depth.max_depth == 1 && direction < 0) {
+ blkcg_use_delay(lat_to_blkg(iolat));
+ return;
+ }
+
+ /* We're back to the default cookie, unthrottle all the things. */
+ if (cur_cookie == DEFAULT_SCALE_COOKIE) {
+ blkcg_clear_delay(lat_to_blkg(iolat));
+ iolat->rq_depth.max_depth = UINT_MAX;
+ wake_up_all(&iolat->rq_wait.wait);
+ return;
+ }
+
+ scale_change(iolat, direction > 0);
+}
+
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
+ spinlock_t *lock)
+{
+ struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+ struct request_queue *q = rqos->q;
+ bool issue_as_root = bio_issue_as_root_blkg(bio);
+
+ if (!blk_iolatency_enabled(blkiolat))
+ return;
+
+ rcu_read_lock();
+ blkcg = bio_blkcg(bio);
+ bio_associate_blkcg(bio, &blkcg->css);
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg)) {
+ if (!lock)
+ spin_lock_irq(q->queue_lock);
+ blkg = blkg_lookup_create(blkcg, q);
+ if (IS_ERR(blkg))
+ blkg = NULL;
+ if (!lock)
+ spin_unlock_irq(q->queue_lock);
+ }
+ if (!blkg)
+ goto out;
+
+ bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+ bio_associate_blkg(bio, blkg);
+out:
+ rcu_read_unlock();
+ while (blkg && blkg->parent) {
+ struct iolatency_grp *iolat = blkg_to_lat(blkg);
+ if (!iolat) {
+ blkg = blkg->parent;
+ continue;
+ }
+
+ check_scale_change(iolat);
+ __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+ (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
+ blkg = blkg->parent;
+ }
+ if (!timer_pending(&blkiolat->timer))
+ mod_timer(&blkiolat->timer, jiffies + HZ);
+}
+
+static void iolatency_record_time(struct iolatency_grp *iolat,
+ struct bio_issue *issue, u64 now,
+ bool issue_as_root)
+{
+ struct blk_rq_stat *rq_stat;
+ u64 start = bio_issue_time(issue);
+ u64 req_time;
+
+ /*
+ * Have to do this so we are truncated to the correct time that our
+ * issue is truncated to.
+ */
+ now = __bio_issue_time(now);
+
+ if (now <= start)
+ return;
+
+ req_time = now - start;
+
+ /*
+ * We don't want to count issue_as_root bio's in the cgroups latency
+ * statistics as it could skew the numbers downwards.
+ */
+ if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
+ u64 sub = iolat->min_lat_nsec;
+ if (req_time < sub)
+ blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
+ return;
+ }
+
+ rq_stat = get_cpu_ptr(iolat->stats);
+ blk_rq_stat_add(rq_stat, req_time);
+ put_cpu_ptr(rq_stat);
+}
+
+#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
+#define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
+
+static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
+{
+ struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ struct iolatency_grp *parent;
+ struct child_latency_info *lat_info;
+ struct blk_rq_stat stat;
+ unsigned long flags;
+ int cpu, exp_idx;
+
+ blk_rq_stat_init(&stat);
+ preempt_disable();
+ for_each_online_cpu(cpu) {
+ struct blk_rq_stat *s;
+ s = per_cpu_ptr(iolat->stats, cpu);
+ blk_rq_stat_sum(&stat, s);
+ blk_rq_stat_init(s);
+ }
+ preempt_enable();
+
+ parent = blkg_to_lat(blkg->parent);
+ if (!parent)
+ return;
+
+ lat_info = &parent->child_lat;
+
+ /*
+ * CALC_LOAD takes in a number stored in fixed point representation.
+ * Because we are using this for IO time in ns, the values stored
+ * are significantly larger than the FIXED_1 denominator (2048).
+ * Therefore, rounding errors in the calculation are negligible and
+ * can be ignored.
+ */
+ exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
+ div64_u64(iolat->cur_win_nsec,
+ BLKIOLATENCY_EXP_BUCKET_SIZE));
+ CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat.mean);
+
+ /* Everything is ok and we don't need to adjust the scale. */
+ if (stat.mean <= iolat->min_lat_nsec &&
+ atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
+ return;
+
+ /* Somebody beat us to the punch, just bail. */
+ spin_lock_irqsave(&lat_info->lock, flags);
+ lat_info->nr_samples -= iolat->nr_samples;
+ lat_info->nr_samples += stat.nr_samples;
+ iolat->nr_samples = stat.nr_samples;
+
+ if ((lat_info->last_scale_event >= now ||
+ now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME) &&
+ lat_info->scale_lat <= iolat->min_lat_nsec)
+ goto out;
+
+ if (stat.mean <= iolat->min_lat_nsec &&
+ stat.nr_samples >= BLKIOLATENCY_MIN_GOOD_SAMPLES) {
+ if (lat_info->scale_grp == iolat) {
+ lat_info->last_scale_event = now;
+ scale_cookie_change(iolat->blkiolat, lat_info, true);
+ }
+ } else if (stat.mean > iolat->min_lat_nsec) {
+ lat_info->last_scale_event = now;
+ if (!lat_info->scale_grp ||
+ lat_info->scale_lat > iolat->min_lat_nsec) {
+ WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
+ lat_info->scale_grp = iolat;
+ }
+ scale_cookie_change(iolat->blkiolat, lat_info, false);
+ }
+out:
+ spin_unlock_irqrestore(&lat_info->lock, flags);
+}
+
+static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
+{
+ struct blkcg_gq *blkg;
+ struct rq_wait *rqw;
+ struct iolatency_grp *iolat;
+ u64 window_start;
+ u64 now = ktime_to_ns(ktime_get());
+ bool issue_as_root = bio_issue_as_root_blkg(bio);
+ bool enabled = false;
+
+ blkg = bio->bi_blkg;
+ if (!blkg)
+ return;
+
+ iolat = blkg_to_lat(bio->bi_blkg);
+ if (!iolat)
+ return;
+
+ enabled = blk_iolatency_enabled(iolat->blkiolat);
+ while (blkg && blkg->parent) {
+ iolat = blkg_to_lat(blkg);
+ if (!iolat) {
+ blkg = blkg->parent;
+ continue;
+ }
+ rqw = &iolat->rq_wait;
+
+ atomic_dec(&rqw->inflight);
+ if (!enabled || iolat->min_lat_nsec == 0)
+ goto next;
+ iolatency_record_time(iolat, &bio->bi_issue, now,
+ issue_as_root);
+ window_start = atomic64_read(&iolat->window_start);
+ if (now > window_start &&
+ (now - window_start) >= iolat->cur_win_nsec) {
+ if (atomic64_cmpxchg(&iolat->window_start,
+ window_start, now) == window_start)
+ iolatency_check_latencies(iolat, now);
+ }
+next:
+ wake_up(&rqw->wait);
+ blkg = blkg->parent;
+ }
+}
+
+static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
+{
+ struct blkcg_gq *blkg;
+
+ blkg = bio->bi_blkg;
+ while (blkg && blkg->parent) {
+ struct rq_wait *rqw;
+ struct iolatency_grp *iolat;
+
+ iolat = blkg_to_lat(blkg);
+ if (!iolat)
+ goto next;
+
+ rqw = &iolat->rq_wait;
+ atomic_dec(&rqw->inflight);
+ wake_up(&rqw->wait);
+next:
+ blkg = blkg->parent;
+ }
+}
+
+static void blkcg_iolatency_exit(struct rq_qos *rqos)
+{
+ struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+
+ del_timer_sync(&blkiolat->timer);
+ blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
+ kfree(blkiolat);
+}
+
+static struct rq_qos_ops blkcg_iolatency_ops = {
+ .throttle = blkcg_iolatency_throttle,
+ .cleanup = blkcg_iolatency_cleanup,
+ .done_bio = blkcg_iolatency_done_bio,
+ .exit = blkcg_iolatency_exit,
+};
+
+static void blkiolatency_timer_fn(struct timer_list *t)
+{
+ struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
+ struct blkcg_gq *blkg;
+ struct cgroup_subsys_state *pos_css;
+ u64 now = ktime_to_ns(ktime_get());
+
+ rcu_read_lock();
+ blkg_for_each_descendant_pre(blkg, pos_css,
+ blkiolat->rqos.q->root_blkg) {
+ struct iolatency_grp *iolat;
+ struct child_latency_info *lat_info;
+ unsigned long flags;
+ u64 cookie;
+
+ /*
+ * We could be exiting, don't access the pd unless we have a
+ * ref on the blkg.
+ */
+ if (!blkg_try_get(blkg))
+ continue;
+
+ iolat = blkg_to_lat(blkg);
+ if (!iolat)
+ goto next;
+
+ lat_info = &iolat->child_lat;
+ cookie = atomic_read(&lat_info->scale_cookie);
+
+ if (cookie >= DEFAULT_SCALE_COOKIE)
+ goto next;
+
+ spin_lock_irqsave(&lat_info->lock, flags);
+ if (lat_info->last_scale_event >= now)
+ goto next_lock;
+
+ /*
+ * We scaled down but don't have a scale_grp, scale up and carry
+ * on.
+ */
+ if (lat_info->scale_grp == NULL) {
+ scale_cookie_change(iolat->blkiolat, lat_info, true);
+ goto next_lock;
+ }
+
+ /*
+ * It's been 5 seconds since our last scale event, clear the
+ * scale grp in case the group that needed the scale down isn't
+ * doing any IO currently.
+ */
+ if (now - lat_info->last_scale_event >=
+ ((u64)NSEC_PER_SEC * 5))
+ lat_info->scale_grp = NULL;
+next_lock:
+ spin_unlock_irqrestore(&lat_info->lock, flags);
+next:
+ blkg_put(blkg);
+ }
+ rcu_read_unlock();
+}
+
+int blk_iolatency_init(struct request_queue *q)
+{
+ struct blk_iolatency *blkiolat;
+ struct rq_qos *rqos;
+ int ret;
+
+ blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
+ if (!blkiolat)
+ return -ENOMEM;
+
+ rqos = &blkiolat->rqos;
+ rqos->id = RQ_QOS_CGROUP;
+ rqos->ops = &blkcg_iolatency_ops;
+ rqos->q = q;
+
+ rq_qos_add(q, rqos);
+
+ ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
+ if (ret) {
+ rq_qos_del(q, rqos);
+ kfree(blkiolat);
+ return ret;
+ }
+
+ timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
+
+ return 0;
+}
+
+static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+{
+ struct iolatency_grp *iolat = blkg_to_lat(blkg);
+ struct blk_iolatency *blkiolat = iolat->blkiolat;
+ u64 oldval = iolat->min_lat_nsec;
+
+ iolat->min_lat_nsec = val;
+ iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
+ iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
+ BLKIOLATENCY_MAX_WIN_SIZE);
+
+ if (!oldval && val)
+ atomic_inc(&blkiolat->enabled);
+ if (oldval && !val)
+ atomic_dec(&blkiolat->enabled);
+}
+
+static void iolatency_clear_scaling(struct blkcg_gq *blkg)
+{
+ if (blkg->parent) {
+ struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
+ struct child_latency_info *lat_info;
+ if (!iolat)
+ return;
+
+ lat_info = &iolat->child_lat;
+ spin_lock(&lat_info->lock);
+ atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
+ lat_info->last_scale_event = 0;
+ lat_info->scale_grp = NULL;
+ lat_info->scale_lat = 0;
+ spin_unlock(&lat_info->lock);
+ }
+}
+
+static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct blkcg *blkcg = css_to_blkcg(of_css(of));
+ struct blkcg_gq *blkg;
+ struct blk_iolatency *blkiolat;
+ struct blkg_conf_ctx ctx;
+ struct iolatency_grp *iolat;
+ char *p, *tok;
+ u64 lat_val = 0;
+ u64 oldval;
+ int ret;
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
+ if (ret)
+ return ret;
+
+ iolat = blkg_to_lat(ctx.blkg);
+ blkiolat = iolat->blkiolat;
+ p = ctx.body;
+
+ ret = -EINVAL;
+ while ((tok = strsep(&p, " "))) {
+ char key[16];
+ char val[21]; /* 18446744073709551616 */
+
+ if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
+ goto out;
+
+ if (!strcmp(key, "target")) {
+ u64 v;
+
+ if (!strcmp(val, "max"))
+ lat_val = 0;
+ else if (sscanf(val, "%llu", &v) == 1)
+ lat_val = v * NSEC_PER_USEC;
+ else
+ goto out;
+ } else {
+ goto out;
+ }
+ }
+
+ /* Walk up the tree to see if our new val is lower than it should be. */
+ blkg = ctx.blkg;
+ oldval = iolat->min_lat_nsec;
+
+ iolatency_set_min_lat_nsec(blkg, lat_val);
+ if (oldval != iolat->min_lat_nsec) {
+ iolatency_clear_scaling(blkg);
+ }
+
+ ret = 0;
+out:
+ blkg_conf_finish(&ctx);
+ return ret ?: nbytes;
+}
+
+static u64 iolatency_prfill_limit(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ const char *dname = blkg_dev_name(pd->blkg);
+
+ if (!dname || !iolat->min_lat_nsec)
+ return 0;
+ seq_printf(sf, "%s target=%llu\n",
+ dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
+ return 0;
+}
+
+static int iolatency_print_limit(struct seq_file *sf, void *v)
+{
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ iolatency_prfill_limit,
+ &blkcg_policy_iolatency, seq_cft(sf)->private, false);
+ return 0;
+}
+
+static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
+ size_t size)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ unsigned long long avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
+ unsigned long long cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
+
+ if (iolat->rq_depth.max_depth == UINT_MAX)
+ return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
+ avg_lat, cur_win);
+
+ return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
+ iolat->rq_depth.max_depth, avg_lat, cur_win);
+}
+
+
+static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
+{
+ struct iolatency_grp *iolat;
+
+ iolat = kzalloc_node(sizeof(*iolat), gfp, node);
+ if (!iolat)
+ return NULL;
+ iolat->stats = __alloc_percpu_gfp(sizeof(struct blk_rq_stat),
+ __alignof__(struct blk_rq_stat), gfp);
+ if (!iolat->stats) {
+ kfree(iolat);
+ return NULL;
+ }
+ return &iolat->pd;
+}
+
+static void iolatency_pd_init(struct blkg_policy_data *pd)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
+ struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+ u64 now = ktime_to_ns(ktime_get());
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct blk_rq_stat *stat;
+ stat = per_cpu_ptr(iolat->stats, cpu);
+ blk_rq_stat_init(stat);
+ }
+
+ rq_wait_init(&iolat->rq_wait);
+ spin_lock_init(&iolat->child_lat.lock);
+ iolat->rq_depth.queue_depth = blk_queue_depth(blkg->q);
+ iolat->rq_depth.max_depth = UINT_MAX;
+ iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
+ iolat->blkiolat = blkiolat;
+ iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
+ atomic64_set(&iolat->window_start, now);
+
+ /*
+ * We init things in list order, so the pd for the parent may not be
+ * init'ed yet for whatever reason.
+ */
+ if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
+ struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
+ atomic_set(&iolat->scale_cookie,
+ atomic_read(&parent->child_lat.scale_cookie));
+ } else {
+ atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
+ }
+
+ atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
+}
+
+static void iolatency_pd_offline(struct blkg_policy_data *pd)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ struct blkcg_gq *blkg = lat_to_blkg(iolat);
+
+ iolatency_set_min_lat_nsec(blkg, 0);
+ iolatency_clear_scaling(blkg);
+}
+
+static void iolatency_pd_free(struct blkg_policy_data *pd)
+{
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ free_percpu(iolat->stats);
+ kfree(iolat);
+}
+
+static struct cftype iolatency_files[] = {
+ {
+ .name = "latency",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = iolatency_print_limit,
+ .write = iolatency_set_limit,
+ },
+ {}
+};
+
+static struct blkcg_policy blkcg_policy_iolatency = {
+ .dfl_cftypes = iolatency_files,
+ .pd_alloc_fn = iolatency_pd_alloc,
+ .pd_init_fn = iolatency_pd_init,
+ .pd_offline_fn = iolatency_pd_offline,
+ .pd_free_fn = iolatency_pd_free,
+ .pd_stat_fn = iolatency_pd_stat,
+};
+
+static int __init iolatency_init(void)
+{
+ return blkcg_policy_register(&blkcg_policy_iolatency);
+}
+
+static void __exit iolatency_exit(void)
+{
+ return blkcg_policy_unregister(&blkcg_policy_iolatency);
+}
+
+module_init(iolatency_init);
+module_exit(iolatency_exit);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 8faa70f26fcd..d1b9dd03da25 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -68,6 +68,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*/
req_sects = min_t(sector_t, nr_sects,
q->limits.max_discard_sectors);
+ if (!req_sects)
+ goto fail;
if (req_sects > UINT_MAX >> 9)
req_sects = UINT_MAX >> 9;
@@ -105,6 +107,14 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*biop = bio;
return 0;
+
+fail:
+ if (bio) {
+ submit_bio_wait(bio);
+ bio_put(bio);
+ }
+ *biop = NULL;
+ return -EOPNOTSUPP;
}
EXPORT_SYMBOL(__blkdev_issue_discard);
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c
new file mode 100644
index 000000000000..fb2c82c351e4
--- /dev/null
+++ b/block/blk-mq-debugfs-zoned.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Western Digital Corporation or its affiliates.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/blkdev.h>
+#include "blk-mq-debugfs.h"
+
+int queue_zone_wlock_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ unsigned int i;
+
+ if (!q->seq_zones_wlock)
+ return 0;
+
+ for (i = 0; i < q->nr_zones; i++)
+ if (test_bit(i, q->seq_zones_wlock))
+ seq_printf(m, "%u\n", i);
+
+ return 0;
+}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 1c4532e92938..cb1e6cf7ac48 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -206,21 +206,6 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf,
return count;
}
-static int queue_zone_wlock_show(void *data, struct seq_file *m)
-{
- struct request_queue *q = data;
- unsigned int i;
-
- if (!q->seq_zones_wlock)
- return 0;
-
- for (i = 0; i < blk_queue_nr_zones(q); i++)
- if (test_bit(i, q->seq_zones_wlock))
- seq_printf(m, "%u\n", i);
-
- return 0;
-}
-
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
{ "poll_stat", 0400, queue_poll_stat_show },
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
@@ -637,6 +622,14 @@ static int hctx_active_show(void *data, struct seq_file *m)
return 0;
}
+static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+
+ seq_printf(m, "%u\n", hctx->dispatch_busy);
+ return 0;
+}
+
static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
__acquires(&ctx->lock)
{
@@ -798,6 +791,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
{"queued", 0600, hctx_queued_show, hctx_queued_write},
{"run", 0600, hctx_run_show, hctx_run_write},
{"active", 0400, hctx_active_show},
+ {"dispatch_busy", 0400, hctx_dispatch_busy_show},
{},
};
diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h
index b9d366e57097..a9160be12be0 100644
--- a/block/blk-mq-debugfs.h
+++ b/block/blk-mq-debugfs.h
@@ -80,4 +80,13 @@ static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hc
}
#endif
+#ifdef CONFIG_BLK_DEBUG_FS_ZONED
+int queue_zone_wlock_show(void *data, struct seq_file *m);
+#else
+static inline int queue_zone_wlock_show(void *data, struct seq_file *m)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index e233996bb76f..db644ec624f5 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -17,6 +17,8 @@
#include <linux/pci.h>
#include <linux/module.h>
+#include "blk-mq.h"
+
/**
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
* @set: tagset to provide the mapping for
@@ -48,8 +50,7 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
fallback:
WARN_ON_ONCE(set->nr_hw_queues > 1);
- for_each_possible_cpu(cpu)
- set->mq_map[cpu] = 0;
+ blk_mq_clear_mq_map(set);
return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 56c493c6cd90..cf9c66c6d35a 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -59,29 +59,16 @@ static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return;
- if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
- struct request_queue *q = hctx->queue;
-
- if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- atomic_inc(&q->shared_hctx_restart);
- } else
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
-static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- return false;
-
- if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
- struct request_queue *q = hctx->queue;
-
- if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- atomic_dec(&q->shared_hctx_restart);
- } else
- clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ return;
+ clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- return blk_mq_run_hw_queue(hctx, true);
+ blk_mq_run_hw_queue(hctx, true);
}
/*
@@ -219,15 +206,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
}
} else if (has_sched_dispatch) {
blk_mq_do_dispatch_sched(hctx);
- } else if (q->mq_ops->get_budget) {
- /*
- * If we need to get budget before queuing request, we
- * dequeue request one by one from sw queue for avoiding
- * to mess up I/O merge when dispatch runs out of resource.
- *
- * TODO: get more budgets, and dequeue more requests in
- * one time.
- */
+ } else if (hctx->dispatch_busy) {
+ /* dequeue request one by one from sw queue if queue is busy */
blk_mq_do_dispatch_ctx(hctx);
} else {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
@@ -339,7 +319,8 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
return e->type->ops.mq.bio_merge(hctx, bio);
}
- if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
+ if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
+ !list_empty_careful(&ctx->rq_list)) {
/* default per sw-queue merge */
spin_lock(&ctx->lock);
ret = blk_mq_attempt_merge(q, ctx, bio);
@@ -380,68 +361,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
return false;
}
-/**
- * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
- * @pos: loop cursor.
- * @skip: the list element that will not be examined. Iteration starts at
- * @skip->next.
- * @head: head of the list to examine. This list must have at least one
- * element, namely @skip.
- * @member: name of the list_head structure within typeof(*pos).
- */
-#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
- for ((pos) = (skip); \
- (pos = (pos)->member.next != (head) ? list_entry_rcu( \
- (pos)->member.next, typeof(*pos), member) : \
- list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
- (pos) != (skip); )
-
-/*
- * Called after a driver tag has been freed to check whether a hctx needs to
- * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
- * queues in a round-robin fashion if the tag set of @hctx is shared with other
- * hardware queues.
- */
-void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
-{
- struct blk_mq_tags *const tags = hctx->tags;
- struct blk_mq_tag_set *const set = hctx->queue->tag_set;
- struct request_queue *const queue = hctx->queue, *q;
- struct blk_mq_hw_ctx *hctx2;
- unsigned int i, j;
-
- if (set->flags & BLK_MQ_F_TAG_SHARED) {
- /*
- * If this is 0, then we know that no hardware queues
- * have RESTART marked. We're done.
- */
- if (!atomic_read(&queue->shared_hctx_restart))
- return;
-
- rcu_read_lock();
- list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
- tag_set_list) {
- queue_for_each_hw_ctx(q, hctx2, i)
- if (hctx2->tags == tags &&
- blk_mq_sched_restart_hctx(hctx2))
- goto done;
- }
- j = hctx->queue_num + 1;
- for (i = 0; i < queue->nr_hw_queues; i++, j++) {
- if (j == queue->nr_hw_queues)
- j = 0;
- hctx2 = queue->queue_hw_ctx[j];
- if (hctx2->tags == tags &&
- blk_mq_sched_restart_hctx(hctx2))
- break;
- }
-done:
- rcu_read_unlock();
- } else {
- blk_mq_sched_restart_hctx(hctx);
- }
-}
-
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async)
{
@@ -486,8 +405,19 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
if (e && e->type->ops.mq.insert_requests)
e->type->ops.mq.insert_requests(hctx, list, false);
- else
+ else {
+ /*
+ * try to issue requests directly if the hw queue isn't
+ * busy in case of 'none' scheduler, and this way may save
+ * us one extra enqueue & dequeue to sw queue.
+ */
+ if (!hctx->dispatch_busy && !e && !run_queue_async) {
+ blk_mq_try_issue_list_directly(hctx, list);
+ if (list_empty(list))
+ return;
+ }
blk_mq_insert_requests(hctx, ctx, list);
+ }
blk_mq_run_hw_queue(hctx, run_queue_async);
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 3de0836163c2..816923bf874d 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -23,6 +23,9 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
/*
* If a previously inactive queue goes active, bump the active user count.
+ * We need to do this before try to allocate driver tag, then even if fail
+ * to get tag when first time, the other shared-tag users could reserve
+ * budget for it.
*/
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
@@ -399,8 +402,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth <= tags->nr_reserved_tags)
return -EINVAL;
- tdepth -= tags->nr_reserved_tags;
-
/*
* If we are allowed to grow beyond the original size, allocate
* a new set of tags before freeing the old one.
@@ -420,7 +421,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth > 16 * BLKDEV_MAX_RQ)
return -EINVAL;
- new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
+ new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
+ tags->nr_reserved_tags);
if (!new)
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
@@ -437,7 +439,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
* Don't need (or can't) update reserved tags here, they
* remain static and should never need resizing.
*/
- sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
+ sbitmap_queue_resize(&tags->bitmap_tags,
+ tdepth - tags->nr_reserved_tags);
}
return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 654b0dc7e001..72a0033ccee9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -34,8 +34,8 @@
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
-#include "blk-wbt.h"
#include "blk-mq-sched.h"
+#include "blk-rq-qos.h"
static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static void blk_mq_poll_stats_start(struct request_queue *q);
@@ -285,7 +285,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->tag = -1;
rq->internal_tag = tag;
} else {
- if (blk_mq_tag_busy(data->hctx)) {
+ if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
rq_flags = RQF_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
}
@@ -367,6 +367,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED))
e->type->ops.mq.limit_depth(op, data);
+ } else {
+ blk_mq_tag_busy(data->hctx);
}
tag = blk_mq_get_tag(data);
@@ -504,7 +506,7 @@ void blk_mq_free_request(struct request *rq)
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->backing_dev_info);
- wbt_done(q->rq_wb, rq);
+ rq_qos_done(q, rq);
if (blk_rq_rl(rq))
blk_put_rl(blk_rq_rl(rq));
@@ -527,7 +529,7 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
blk_account_io_done(rq, now);
if (rq->end_io) {
- wbt_done(rq->q->rq_wb, rq);
+ rq_qos_done(rq->q, rq);
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))
@@ -639,7 +641,7 @@ void blk_mq_start_request(struct request *rq)
rq->throtl_size = blk_rq_sectors(rq);
#endif
rq->rq_flags |= RQF_STATS;
- wbt_issue(q->rq_wb, rq);
+ rq_qos_issue(q, rq);
}
WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
@@ -665,7 +667,7 @@ static void __blk_mq_requeue_request(struct request *rq)
blk_mq_put_driver_tag(rq);
trace_block_rq_requeue(q, rq);
- wbt_requeue(q->rq_wb, rq);
+ rq_qos_requeue(q, rq);
if (blk_mq_request_started(rq)) {
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
@@ -962,16 +964,14 @@ static inline unsigned int queued_to_index(unsigned int queued)
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
}
-bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
- bool wait)
+bool blk_mq_get_driver_tag(struct request *rq)
{
struct blk_mq_alloc_data data = {
.q = rq->q,
.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
- .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
+ .flags = BLK_MQ_REQ_NOWAIT,
};
-
- might_sleep_if(wait);
+ bool shared;
if (rq->tag != -1)
goto done;
@@ -979,9 +979,10 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
data.flags |= BLK_MQ_REQ_RESERVED;
+ shared = blk_mq_tag_busy(data.hctx);
rq->tag = blk_mq_get_tag(&data);
if (rq->tag >= 0) {
- if (blk_mq_tag_busy(data.hctx)) {
+ if (shared) {
rq->rq_flags |= RQF_MQ_INFLIGHT;
atomic_inc(&data.hctx->nr_active);
}
@@ -989,8 +990,6 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
}
done:
- if (hctx)
- *hctx = data.hctx;
return rq->tag != -1;
}
@@ -1001,7 +1000,10 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
+ spin_lock(&hctx->dispatch_wait_lock);
list_del_init(&wait->entry);
+ spin_unlock(&hctx->dispatch_wait_lock);
+
blk_mq_run_hw_queue(hctx, true);
return 1;
}
@@ -1012,17 +1014,16 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
* restart. For both cases, take care to check the condition again after
* marking us as waiting.
*/
-static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
+static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
- struct blk_mq_hw_ctx *this_hctx = *hctx;
- struct sbq_wait_state *ws;
+ struct wait_queue_head *wq;
wait_queue_entry_t *wait;
bool ret;
- if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) {
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
- set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
+ if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
+ if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+ set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
/*
* It's possible that a tag was freed in the window between the
@@ -1032,30 +1033,35 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
* Don't clear RESTART here, someone else could have set it.
* At most this will cost an extra queue run.
*/
- return blk_mq_get_driver_tag(rq, hctx, false);
+ return blk_mq_get_driver_tag(rq);
}
- wait = &this_hctx->dispatch_wait;
+ wait = &hctx->dispatch_wait;
if (!list_empty_careful(&wait->entry))
return false;
- spin_lock(&this_hctx->lock);
+ wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+
+ spin_lock_irq(&wq->lock);
+ spin_lock(&hctx->dispatch_wait_lock);
if (!list_empty(&wait->entry)) {
- spin_unlock(&this_hctx->lock);
+ spin_unlock(&hctx->dispatch_wait_lock);
+ spin_unlock_irq(&wq->lock);
return false;
}
- ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
- add_wait_queue(&ws->wait, wait);
+ wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(wq, wait);
/*
* It's possible that a tag was freed in the window between the
* allocation failure and adding the hardware queue to the wait
* queue.
*/
- ret = blk_mq_get_driver_tag(rq, hctx, false);
+ ret = blk_mq_get_driver_tag(rq);
if (!ret) {
- spin_unlock(&this_hctx->lock);
+ spin_unlock(&hctx->dispatch_wait_lock);
+ spin_unlock_irq(&wq->lock);
return false;
}
@@ -1063,14 +1069,42 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
* We got a tag, remove ourselves from the wait queue to ensure
* someone else gets the wakeup.
*/
- spin_lock_irq(&ws->wait.lock);
list_del_init(&wait->entry);
- spin_unlock_irq(&ws->wait.lock);
- spin_unlock(&this_hctx->lock);
+ spin_unlock(&hctx->dispatch_wait_lock);
+ spin_unlock_irq(&wq->lock);
return true;
}
+#define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
+#define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
+/*
+ * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
+ * - EWMA is one simple way to compute running average value
+ * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
+ * - take 4 as factor for avoiding to get too small(0) result, and this
+ * factor doesn't matter because EWMA decreases exponentially
+ */
+static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
+{
+ unsigned int ewma;
+
+ if (hctx->queue->elevator)
+ return;
+
+ ewma = hctx->dispatch_busy;
+
+ if (!ewma && !busy)
+ return;
+
+ ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
+ if (busy)
+ ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
+ ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
+
+ hctx->dispatch_busy = ewma;
+}
+
#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
/*
@@ -1103,7 +1137,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
break;
- if (!blk_mq_get_driver_tag(rq, NULL, false)) {
+ if (!blk_mq_get_driver_tag(rq)) {
/*
* The initial allocation attempt failed, so we need to
* rerun the hardware queue when a tag is freed. The
@@ -1111,7 +1145,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
* before we add this entry back on the dispatch list,
* we'll re-run it below.
*/
- if (!blk_mq_mark_tag_wait(&hctx, rq)) {
+ if (!blk_mq_mark_tag_wait(hctx, rq)) {
blk_mq_put_dispatch_budget(hctx);
/*
* For non-shared tags, the RESTART check
@@ -1135,7 +1169,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
bd.last = true;
else {
nxt = list_first_entry(list, struct request, queuelist);
- bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
+ bd.last = !blk_mq_get_driver_tag(nxt);
}
ret = q->mq_ops->queue_rq(hctx, &bd);
@@ -1207,8 +1241,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
else if (needs_restart && (ret == BLK_STS_RESOURCE))
blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
+ blk_mq_update_dispatch_busy(hctx, true);
return false;
- }
+ } else
+ blk_mq_update_dispatch_busy(hctx, false);
/*
* If the host/device is unable to accept more work, inform the
@@ -1542,19 +1578,19 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list)
{
+ struct request *rq;
+
/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
* offline now
*/
- spin_lock(&ctx->lock);
- while (!list_empty(list)) {
- struct request *rq;
-
- rq = list_first_entry(list, struct request, queuelist);
+ list_for_each_entry(rq, list, queuelist) {
BUG_ON(rq->mq_ctx != ctx);
- list_del_init(&rq->queuelist);
- __blk_mq_insert_req_list(hctx, rq, false);
+ trace_block_rq_insert(hctx->queue, rq);
}
+
+ spin_lock(&ctx->lock);
+ list_splice_tail_init(list, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
}
@@ -1657,13 +1693,16 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
ret = q->mq_ops->queue_rq(hctx, &bd);
switch (ret) {
case BLK_STS_OK:
+ blk_mq_update_dispatch_busy(hctx, false);
*cookie = new_cookie;
break;
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
+ blk_mq_update_dispatch_busy(hctx, true);
__blk_mq_requeue_request(rq);
break;
default:
+ blk_mq_update_dispatch_busy(hctx, false);
*cookie = BLK_QC_T_NONE;
break;
}
@@ -1698,7 +1737,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if (!blk_mq_get_dispatch_budget(hctx))
goto insert;
- if (!blk_mq_get_driver_tag(rq, NULL, false)) {
+ if (!blk_mq_get_driver_tag(rq)) {
blk_mq_put_dispatch_budget(hctx);
goto insert;
}
@@ -1746,6 +1785,27 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq)
return ret;
}
+void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
+ struct list_head *list)
+{
+ while (!list_empty(list)) {
+ blk_status_t ret;
+ struct request *rq = list_first_entry(list, struct request,
+ queuelist);
+
+ list_del_init(&rq->queuelist);
+ ret = blk_mq_request_issue_directly(rq);
+ if (ret != BLK_STS_OK) {
+ if (ret == BLK_STS_RESOURCE ||
+ ret == BLK_STS_DEV_RESOURCE) {
+ list_add(&rq->queuelist, list);
+ break;
+ }
+ blk_mq_end_request(rq, ret);
+ }
+ }
+}
+
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
@@ -1756,7 +1816,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
- unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1772,19 +1831,19 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (blk_mq_sched_bio_merge(q, bio))
return BLK_QC_T_NONE;
- wb_acct = wbt_wait(q->rq_wb, bio, NULL);
+ rq_qos_throttle(q, bio, NULL);
trace_block_getrq(q, bio, bio->bi_opf);
rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
if (unlikely(!rq)) {
- __wbt_done(q->rq_wb, wb_acct);
+ rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
return BLK_QC_T_NONE;
}
- wbt_track(rq, wb_acct);
+ rq_qos_track(q, rq, bio);
cookie = request_to_qc_t(data.hctx, rq);
@@ -1847,7 +1906,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
}
- } else if (q->nr_hw_queues > 1 && is_sync) {
+ } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
+ !data.hctx->dispatch_busy)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
@@ -2146,6 +2206,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->nr_ctx = 0;
+ spin_lock_init(&hctx->dispatch_wait_lock);
init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
@@ -2331,15 +2392,10 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- if (shared) {
- if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- atomic_inc(&q->shared_hctx_restart);
+ if (shared)
hctx->flags |= BLK_MQ_F_TAG_SHARED;
- } else {
- if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- atomic_dec(&q->shared_hctx_restart);
+ else
hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
- }
}
}
@@ -2370,7 +2426,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(&set->tag_list_lock);
- synchronize_rcu();
INIT_LIST_HEAD(&q->tag_set_list);
}
@@ -2685,7 +2740,6 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
if (set->ops->map_queues) {
- int cpu;
/*
* transport .map_queues is usually done in the following
* way:
@@ -2700,8 +2754,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
* killing stale mapping since one CPU may not be mapped
* to any hw queue.
*/
- for_each_possible_cpu(cpu)
- set->mq_map[cpu] = 0;
+ blk_mq_clear_mq_map(set);
return set->ops->map_queues(set);
} else
@@ -2711,7 +2764,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
- * requested depth down, if if it too large. In that case, the set
+ * requested depth down, if it's too large. In that case, the set
* value will be stored in set->queue_depth.
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 89231e439b2f..9497b47e2526 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -36,8 +36,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
-bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
- bool wait);
+bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);
@@ -65,6 +64,8 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
/* Used by blk_insert_cloned_request() to issue request directly */
blk_status_t blk_mq_request_issue_directly(struct request *rq);
+void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
+ struct list_head *list);
/*
* CPU -> queue mappings
@@ -203,4 +204,12 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
__blk_mq_put_driver_tag(hctx, rq);
}
+static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ set->mq_map[cpu] = 0;
+}
+
#endif
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
new file mode 100644
index 000000000000..0005dfd568dd
--- /dev/null
+++ b/block/blk-rq-qos.c
@@ -0,0 +1,194 @@
+#include "blk-rq-qos.h"
+
+/*
+ * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
+ * false if 'v' + 1 would be bigger than 'below'.
+ */
+static bool atomic_inc_below(atomic_t *v, unsigned int below)
+{
+ unsigned int cur = atomic_read(v);
+
+ for (;;) {
+ unsigned int old;
+
+ if (cur >= below)
+ return false;
+ old = atomic_cmpxchg(v, cur, cur + 1);
+ if (old == cur)
+ break;
+ cur = old;
+ }
+
+ return true;
+}
+
+bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
+{
+ return atomic_inc_below(&rq_wait->inflight, limit);
+}
+
+void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
+{
+ struct rq_qos *rqos;
+
+ for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->cleanup)
+ rqos->ops->cleanup(rqos, bio);
+ }
+}
+
+void rq_qos_done(struct request_queue *q, struct request *rq)
+{
+ struct rq_qos *rqos;
+
+ for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->done)
+ rqos->ops->done(rqos, rq);
+ }
+}
+
+void rq_qos_issue(struct request_queue *q, struct request *rq)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->issue)
+ rqos->ops->issue(rqos, rq);
+ }
+}
+
+void rq_qos_requeue(struct request_queue *q, struct request *rq)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->requeue)
+ rqos->ops->requeue(rqos, rq);
+ }
+}
+
+void rq_qos_throttle(struct request_queue *q, struct bio *bio,
+ spinlock_t *lock)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->throttle)
+ rqos->ops->throttle(rqos, bio, lock);
+ }
+}
+
+void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->track)
+ rqos->ops->track(rqos, rq, bio);
+ }
+}
+
+void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
+{
+ struct rq_qos *rqos;
+
+ for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->ops->done_bio)
+ rqos->ops->done_bio(rqos, bio);
+ }
+}
+
+/*
+ * Return true, if we can't increase the depth further by scaling
+ */
+bool rq_depth_calc_max_depth(struct rq_depth *rqd)
+{
+ unsigned int depth;
+ bool ret = false;
+
+ /*
+ * For QD=1 devices, this is a special case. It's important for those
+ * to have one request ready when one completes, so force a depth of
+ * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
+ * since the device can't have more than that in flight. If we're
+ * scaling down, then keep a setting of 1/1/1.
+ */
+ if (rqd->queue_depth == 1) {
+ if (rqd->scale_step > 0)
+ rqd->max_depth = 1;
+ else {
+ rqd->max_depth = 2;
+ ret = true;
+ }
+ } else {
+ /*
+ * scale_step == 0 is our default state. If we have suffered
+ * latency spikes, step will be > 0, and we shrink the
+ * allowed write depths. If step is < 0, we're only doing
+ * writes, and we allow a temporarily higher depth to
+ * increase performance.
+ */
+ depth = min_t(unsigned int, rqd->default_depth,
+ rqd->queue_depth);
+ if (rqd->scale_step > 0)
+ depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
+ else if (rqd->scale_step < 0) {
+ unsigned int maxd = 3 * rqd->queue_depth / 4;
+
+ depth = 1 + ((depth - 1) << -rqd->scale_step);
+ if (depth > maxd) {
+ depth = maxd;
+ ret = true;
+ }
+ }
+
+ rqd->max_depth = depth;
+ }
+
+ return ret;
+}
+
+void rq_depth_scale_up(struct rq_depth *rqd)
+{
+ /*
+ * Hit max in previous round, stop here
+ */
+ if (rqd->scaled_max)
+ return;
+
+ rqd->scale_step--;
+
+ rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+}
+
+/*
+ * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
+ * had a latency violation.
+ */
+void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+{
+ /*
+ * Stop scaling down when we've hit the limit. This also prevents
+ * ->scale_step from going to crazy values, if the device can't
+ * keep up.
+ */
+ if (rqd->max_depth == 1)
+ return;
+
+ if (rqd->scale_step < 0 && hard_throttle)
+ rqd->scale_step = 0;
+ else
+ rqd->scale_step++;
+
+ rqd->scaled_max = false;
+ rq_depth_calc_max_depth(rqd);
+}
+
+void rq_qos_exit(struct request_queue *q)
+{
+ while (q->rq_qos) {
+ struct rq_qos *rqos = q->rq_qos;
+ q->rq_qos = rqos->next;
+ rqos->ops->exit(rqos);
+ }
+}
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
new file mode 100644
index 000000000000..32b02efbfa66
--- /dev/null
+++ b/block/blk-rq-qos.h
@@ -0,0 +1,109 @@
+#ifndef RQ_QOS_H
+#define RQ_QOS_H
+
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/blk_types.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+
+enum rq_qos_id {
+ RQ_QOS_WBT,
+ RQ_QOS_CGROUP,
+};
+
+struct rq_wait {
+ wait_queue_head_t wait;
+ atomic_t inflight;
+};
+
+struct rq_qos {
+ struct rq_qos_ops *ops;
+ struct request_queue *q;
+ enum rq_qos_id id;
+ struct rq_qos *next;
+};
+
+struct rq_qos_ops {
+ void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
+ void (*track)(struct rq_qos *, struct request *, struct bio *);
+ void (*issue)(struct rq_qos *, struct request *);
+ void (*requeue)(struct rq_qos *, struct request *);
+ void (*done)(struct rq_qos *, struct request *);
+ void (*done_bio)(struct rq_qos *, struct bio *);
+ void (*cleanup)(struct rq_qos *, struct bio *);
+ void (*exit)(struct rq_qos *);
+};
+
+struct rq_depth {
+ unsigned int max_depth;
+
+ int scale_step;
+ bool scaled_max;
+
+ unsigned int queue_depth;
+ unsigned int default_depth;
+};
+
+static inline struct rq_qos *rq_qos_id(struct request_queue *q,
+ enum rq_qos_id id)
+{
+ struct rq_qos *rqos;
+ for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ if (rqos->id == id)
+ break;
+ }
+ return rqos;
+}
+
+static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
+{
+ return rq_qos_id(q, RQ_QOS_WBT);
+}
+
+static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
+{
+ return rq_qos_id(q, RQ_QOS_CGROUP);
+}
+
+static inline void rq_wait_init(struct rq_wait *rq_wait)
+{
+ atomic_set(&rq_wait->inflight, 0);
+ init_waitqueue_head(&rq_wait->wait);
+}
+
+static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
+{
+ rqos->next = q->rq_qos;
+ q->rq_qos = rqos;
+}
+
+static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
+{
+ struct rq_qos *cur, *prev = NULL;
+ for (cur = q->rq_qos; cur; cur = cur->next) {
+ if (cur == rqos) {
+ if (prev)
+ prev->next = rqos->next;
+ else
+ q->rq_qos = cur;
+ break;
+ }
+ prev = cur;
+ }
+}
+
+bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
+void rq_depth_scale_up(struct rq_depth *rqd);
+void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_calc_max_depth(struct rq_depth *rqd);
+
+void rq_qos_cleanup(struct request_queue *, struct bio *);
+void rq_qos_done(struct request_queue *, struct request *);
+void rq_qos_issue(struct request_queue *, struct request *);
+void rq_qos_requeue(struct request_queue *, struct request *);
+void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
+void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
+void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
+void rq_qos_exit(struct request_queue *);
+#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d1de71124656..ffd459969689 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -128,7 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
/* Inherit limits from component devices */
lim->max_segments = USHRT_MAX;
- lim->max_discard_segments = 1;
+ lim->max_discard_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
@@ -875,7 +875,7 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
q->queue_depth = depth;
- wbt_set_queue_depth(q->rq_wb, depth);
+ wbt_set_queue_depth(q, depth);
}
EXPORT_SYMBOL(blk_set_queue_depth);
@@ -900,7 +900,7 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
- wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+ wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 175c143ac5b9..7587b1c3caaf 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -17,7 +17,7 @@ struct blk_queue_stats {
bool enable_accounting;
};
-static void blk_stat_init(struct blk_rq_stat *stat)
+void blk_rq_stat_init(struct blk_rq_stat *stat)
{
stat->min = -1ULL;
stat->max = stat->nr_samples = stat->mean = 0;
@@ -25,7 +25,7 @@ static void blk_stat_init(struct blk_rq_stat *stat)
}
/* src is a per-cpu stat, mean isn't initialized */
-static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
+void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
if (!src->nr_samples)
return;
@@ -39,7 +39,7 @@ static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
dst->nr_samples += src->nr_samples;
}
-static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
+void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
{
stat->min = min(stat->min, value);
stat->max = max(stat->max, value);
@@ -69,7 +69,7 @@ void blk_stat_add(struct request *rq, u64 now)
continue;
stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
- __blk_stat_add(stat, value);
+ blk_rq_stat_add(stat, value);
put_cpu_ptr(cb->cpu_stat);
}
rcu_read_unlock();
@@ -82,15 +82,15 @@ static void blk_stat_timer_fn(struct timer_list *t)
int cpu;
for (bucket = 0; bucket < cb->buckets; bucket++)
- blk_stat_init(&cb->stat[bucket]);
+ blk_rq_stat_init(&cb->stat[bucket]);
for_each_online_cpu(cpu) {
struct blk_rq_stat *cpu_stat;
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
for (bucket = 0; bucket < cb->buckets; bucket++) {
- blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
- blk_stat_init(&cpu_stat[bucket]);
+ blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
+ blk_rq_stat_init(&cpu_stat[bucket]);
}
}
@@ -143,7 +143,7 @@ void blk_stat_add_callback(struct request_queue *q,
cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
for (bucket = 0; bucket < cb->buckets; bucket++)
- blk_stat_init(&cpu_stat[bucket]);
+ blk_rq_stat_init(&cpu_stat[bucket]);
}
spin_lock(&q->stats->lock);
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 78399cdde9c9..f4a1568e81a4 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -159,4 +159,8 @@ static inline void blk_stat_activate_msecs(struct blk_stat_callback *cb,
mod_timer(&cb->timer, jiffies + msecs_to_jiffies(msecs));
}
+void blk_rq_stat_add(struct blk_rq_stat *, u64);
+void blk_rq_stat_sum(struct blk_rq_stat *, struct blk_rq_stat *);
+void blk_rq_stat_init(struct blk_rq_stat *);
+
#endif
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 94987b1f69e1..bb109bb0a055 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -422,16 +422,16 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
{
- if (!q->rq_wb)
+ if (!wbt_rq_qos(q))
return -EINVAL;
- return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
+ return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
}
static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
size_t count)
{
- struct rq_wb *rwb;
+ struct rq_qos *rqos;
ssize_t ret;
s64 val;
@@ -441,23 +441,21 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
if (val < -1)
return -EINVAL;
- rwb = q->rq_wb;
- if (!rwb) {
+ rqos = wbt_rq_qos(q);
+ if (!rqos) {
ret = wbt_init(q);
if (ret)
return ret;
}
- rwb = q->rq_wb;
if (val == -1)
- rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+ val = wbt_default_latency_nsec(q);
else if (val >= 0)
- rwb->min_lat_nsec = val * 1000ULL;
+ val *= 1000ULL;
- if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
- rwb->enable_state = WBT_STATE_ON_MANUAL;
+ wbt_set_min_lat(q, val);
- wbt_update_limits(rwb);
+ wbt_update_limits(q);
return count;
}
@@ -804,6 +802,21 @@ static void __blk_release_queue(struct work_struct *work)
blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb);
+ if (!blk_queue_dead(q)) {
+ /*
+ * Last reference was dropped without having called
+ * blk_cleanup_queue().
+ */
+ WARN_ONCE(blk_queue_init_done(q),
+ "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
+ q);
+ blk_exit_queue(q);
+ }
+
+ WARN(blk_queue_root_blkg(q),
+ "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
+ q);
+
blk_free_queue_stats(q->stats);
blk_exit_rl(q, &q->root_rl);
@@ -964,7 +977,7 @@ void blk_unregister_queue(struct gendisk *disk)
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
- wbt_exit(q);
+ rq_qos_exit(q);
mutex_lock(&q->sysfs_lock);
if (q->request_fn || (q->mq_ops && q->elevator))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 82282e6fdcf8..a3eede00d302 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -579,8 +579,10 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td)
struct throtl_grp *tg = blkg_to_tg(blkg);
if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
- tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
+ tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
low_valid = true;
+ break;
+ }
}
rcu_read_unlock();
@@ -920,12 +922,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
}
/* Calc approx time to dispatch */
- jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
-
- if (jiffy_wait > jiffy_elapsed)
- jiffy_wait = jiffy_wait - jiffy_elapsed;
- else
- jiffy_wait = 1;
+ jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
if (wait)
*wait = jiffy_wait;
@@ -2132,12 +2129,8 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
{
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- if (bio->bi_css) {
- if (bio->bi_cg_private)
- blkg_put(tg_to_blkg(bio->bi_cg_private));
- bio->bi_cg_private = tg;
- blkg_get(tg_to_blkg(tg));
- }
+ if (bio->bi_css)
+ bio_associate_blkg(bio, tg_to_blkg(tg));
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
#endif
}
@@ -2285,6 +2278,7 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
void blk_throtl_bio_endio(struct bio *bio)
{
+ struct blkcg_gq *blkg;
struct throtl_grp *tg;
u64 finish_time_ns;
unsigned long finish_time;
@@ -2292,20 +2286,18 @@ void blk_throtl_bio_endio(struct bio *bio)
unsigned long lat;
int rw = bio_data_dir(bio);
- tg = bio->bi_cg_private;
- if (!tg)
+ blkg = bio->bi_blkg;
+ if (!blkg)
return;
- bio->bi_cg_private = NULL;
+ tg = blkg_to_tg(blkg);
finish_time_ns = ktime_get_ns();
tg->last_finish_time = finish_time_ns >> 10;
start_time = bio_issue_time(&bio->bi_issue) >> 10;
finish_time = __bio_issue_time(finish_time_ns) >> 10;
- if (!start_time || finish_time <= start_time) {
- blkg_put(tg_to_blkg(tg));
+ if (!start_time || finish_time <= start_time)
return;
- }
lat = finish_time - start_time;
/* this is only for bio based driver */
@@ -2334,8 +2326,6 @@ void blk_throtl_bio_endio(struct bio *bio)
tg->bio_cnt /= 2;
tg->bad_bio_cnt /= 2;
}
-
- blkg_put(tg_to_blkg(tg));
}
#endif
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 4f89b28fa652..1d94a20374fc 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -25,6 +25,7 @@
#include <linux/swap.h>
#include "blk-wbt.h"
+#include "blk-rq-qos.h"
#define CREATE_TRACE_POINTS
#include <trace/events/wbt.h>
@@ -78,28 +79,6 @@ static inline bool rwb_enabled(struct rq_wb *rwb)
return rwb && rwb->wb_normal != 0;
}
-/*
- * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
- * false if 'v' + 1 would be bigger than 'below'.
- */
-static bool atomic_inc_below(atomic_t *v, int below)
-{
- int cur = atomic_read(v);
-
- for (;;) {
- int old;
-
- if (cur >= below)
- return false;
- old = atomic_cmpxchg(v, cur, cur + 1);
- if (old == cur)
- break;
- cur = old;
- }
-
- return true;
-}
-
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
{
if (rwb_enabled(rwb)) {
@@ -116,7 +95,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
*/
static bool wb_recent_wait(struct rq_wb *rwb)
{
- struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
+ struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
return time_before(jiffies, wb->dirty_sleep + HZ);
}
@@ -144,8 +123,9 @@ static void rwb_wake_all(struct rq_wb *rwb)
}
}
-void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
+static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
{
+ struct rq_wb *rwb = RQWB(rqos);
struct rq_wait *rqw;
int inflight, limit;
@@ -186,7 +166,7 @@ void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
int diff = limit - inflight;
if (!inflight || diff >= rwb->wb_background / 2)
- wake_up_all(&rqw->wait);
+ wake_up(&rqw->wait);
}
}
@@ -194,10 +174,9 @@ void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
* Called on completion of a request. Note that it's also called when
* a request is merged, when the request gets freed.
*/
-void wbt_done(struct rq_wb *rwb, struct request *rq)
+static void wbt_done(struct rq_qos *rqos, struct request *rq)
{
- if (!rwb)
- return;
+ struct rq_wb *rwb = RQWB(rqos);
if (!wbt_is_tracked(rq)) {
if (rwb->sync_cookie == rq) {
@@ -209,72 +188,11 @@ void wbt_done(struct rq_wb *rwb, struct request *rq)
wb_timestamp(rwb, &rwb->last_comp);
} else {
WARN_ON_ONCE(rq == rwb->sync_cookie);
- __wbt_done(rwb, wbt_flags(rq));
+ __wbt_done(rqos, wbt_flags(rq));
}
wbt_clear_state(rq);
}
-/*
- * Return true, if we can't increase the depth further by scaling
- */
-static bool calc_wb_limits(struct rq_wb *rwb)
-{
- unsigned int depth;
- bool ret = false;
-
- if (!rwb->min_lat_nsec) {
- rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
- return false;
- }
-
- /*
- * For QD=1 devices, this is a special case. It's important for those
- * to have one request ready when one completes, so force a depth of
- * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
- * since the device can't have more than that in flight. If we're
- * scaling down, then keep a setting of 1/1/1.
- */
- if (rwb->queue_depth == 1) {
- if (rwb->scale_step > 0)
- rwb->wb_max = rwb->wb_normal = 1;
- else {
- rwb->wb_max = rwb->wb_normal = 2;
- ret = true;
- }
- rwb->wb_background = 1;
- } else {
- /*
- * scale_step == 0 is our default state. If we have suffered
- * latency spikes, step will be > 0, and we shrink the
- * allowed write depths. If step is < 0, we're only doing
- * writes, and we allow a temporarily higher depth to
- * increase performance.
- */
- depth = min_t(unsigned int, RWB_DEF_DEPTH, rwb->queue_depth);
- if (rwb->scale_step > 0)
- depth = 1 + ((depth - 1) >> min(31, rwb->scale_step));
- else if (rwb->scale_step < 0) {
- unsigned int maxd = 3 * rwb->queue_depth / 4;
-
- depth = 1 + ((depth - 1) << -rwb->scale_step);
- if (depth > maxd) {
- depth = maxd;
- ret = true;
- }
- }
-
- /*
- * Set our max/normal/bg queue depths based on how far
- * we have scaled down (->scale_step).
- */
- rwb->wb_max = depth;
- rwb->wb_normal = (rwb->wb_max + 1) / 2;
- rwb->wb_background = (rwb->wb_max + 3) / 4;
- }
-
- return ret;
-}
-
static inline bool stat_sample_valid(struct blk_rq_stat *stat)
{
/*
@@ -307,7 +225,8 @@ enum {
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{
- struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
+ struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+ struct rq_depth *rqd = &rwb->rq_depth;
u64 thislat;
/*
@@ -351,7 +270,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
return LAT_EXCEEDED;
}
- if (rwb->scale_step)
+ if (rqd->scale_step)
trace_wbt_stat(bdi, stat);
return LAT_OK;
@@ -359,58 +278,48 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
- struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
+ struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
+ struct rq_depth *rqd = &rwb->rq_depth;
- trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
- rwb->wb_background, rwb->wb_normal, rwb->wb_max);
+ trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
+ rwb->wb_background, rwb->wb_normal, rqd->max_depth);
}
-static void scale_up(struct rq_wb *rwb)
+static void calc_wb_limits(struct rq_wb *rwb)
{
- /*
- * Hit max in previous round, stop here
- */
- if (rwb->scaled_max)
- return;
+ if (rwb->min_lat_nsec == 0) {
+ rwb->wb_normal = rwb->wb_background = 0;
+ } else if (rwb->rq_depth.max_depth <= 2) {
+ rwb->wb_normal = rwb->rq_depth.max_depth;
+ rwb->wb_background = 1;
+ } else {
+ rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
+ rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
+ }
+}
- rwb->scale_step--;
+static void scale_up(struct rq_wb *rwb)
+{
+ rq_depth_scale_up(&rwb->rq_depth);
+ calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
-
- rwb->scaled_max = calc_wb_limits(rwb);
-
- rwb_wake_all(rwb);
-
- rwb_trace_step(rwb, "step up");
+ rwb_trace_step(rwb, "scale up");
}
-/*
- * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
- */
static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{
- /*
- * Stop scaling down when we've hit the limit. This also prevents
- * ->scale_step from going to crazy values, if the device can't
- * keep up.
- */
- if (rwb->wb_max == 1)
- return;
-
- if (rwb->scale_step < 0 && hard_throttle)
- rwb->scale_step = 0;
- else
- rwb->scale_step++;
-
- rwb->scaled_max = false;
- rwb->unknown_cnt = 0;
+ rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
calc_wb_limits(rwb);
- rwb_trace_step(rwb, "step down");
+ rwb->unknown_cnt = 0;
+ rwb_wake_all(rwb);
+ rwb_trace_step(rwb, "scale down");
}
static void rwb_arm_timer(struct rq_wb *rwb)
{
- if (rwb->scale_step > 0) {
+ struct rq_depth *rqd = &rwb->rq_depth;
+
+ if (rqd->scale_step > 0) {
/*
* We should speed this up, using some variant of a fast
* integer inverse square root calculation. Since we only do
@@ -418,7 +327,7 @@ static void rwb_arm_timer(struct rq_wb *rwb)
* though.
*/
rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
- int_sqrt((rwb->scale_step + 1) << 8));
+ int_sqrt((rqd->scale_step + 1) << 8));
} else {
/*
* For step < 0, we don't want to increase/decrease the
@@ -433,12 +342,13 @@ static void rwb_arm_timer(struct rq_wb *rwb)
static void wb_timer_fn(struct blk_stat_callback *cb)
{
struct rq_wb *rwb = cb->data;
+ struct rq_depth *rqd = &rwb->rq_depth;
unsigned int inflight = wbt_inflight(rwb);
int status;
status = latency_exceeded(rwb, cb->stat);
- trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
+ trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
inflight);
/*
@@ -469,9 +379,9 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
* currently don't have a valid read/write sample. For that
* case, slowly return to center state (step == 0).
*/
- if (rwb->scale_step > 0)
+ if (rqd->scale_step > 0)
scale_up(rwb);
- else if (rwb->scale_step < 0)
+ else if (rqd->scale_step < 0)
scale_down(rwb, false);
break;
default:
@@ -481,19 +391,50 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
/*
* Re-arm timer, if we have IO in flight
*/
- if (rwb->scale_step || inflight)
+ if (rqd->scale_step || inflight)
rwb_arm_timer(rwb);
}
-void wbt_update_limits(struct rq_wb *rwb)
+static void __wbt_update_limits(struct rq_wb *rwb)
{
- rwb->scale_step = 0;
- rwb->scaled_max = false;
+ struct rq_depth *rqd = &rwb->rq_depth;
+
+ rqd->scale_step = 0;
+ rqd->scaled_max = false;
+
+ rq_depth_calc_max_depth(rqd);
calc_wb_limits(rwb);
rwb_wake_all(rwb);
}
+void wbt_update_limits(struct request_queue *q)
+{
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (!rqos)
+ return;
+ __wbt_update_limits(RQWB(rqos));
+}
+
+u64 wbt_get_min_lat(struct request_queue *q)
+{
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (!rqos)
+ return 0;
+ return RQWB(rqos)->min_lat_nsec;
+}
+
+void wbt_set_min_lat(struct request_queue *q, u64 val)
+{
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (!rqos)
+ return;
+ RQWB(rqos)->min_lat_nsec = val;
+ RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
+ __wbt_update_limits(RQWB(rqos));
+}
+
+
static bool close_io(struct rq_wb *rwb)
{
const unsigned long now = jiffies;
@@ -520,7 +461,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
* IO for a bit.
*/
if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
- limit = rwb->wb_max;
+ limit = rwb->rq_depth.max_depth;
else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
/*
* If less than 100ms since we completed unrelated IO,
@@ -533,30 +474,6 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
return limit;
}
-static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
- wait_queue_entry_t *wait, unsigned long rw)
-{
- /*
- * inc it here even if disabled, since we'll dec it at completion.
- * this only happens if the task was sleeping in __wbt_wait(),
- * and someone turned it off at the same time.
- */
- if (!rwb_enabled(rwb)) {
- atomic_inc(&rqw->inflight);
- return true;
- }
-
- /*
- * If the waitqueue is already active and we are not the next
- * in line to be woken up, wait for our turn.
- */
- if (waitqueue_active(&rqw->wait) &&
- rqw->wait.head.next != &wait->entry)
- return false;
-
- return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
-}
-
/*
* Block if we will exceed our limit, or if we are currently waiting for
* the timer to kick off queuing again.
@@ -567,16 +484,32 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
__acquires(lock)
{
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
- DEFINE_WAIT(wait);
+ DECLARE_WAITQUEUE(wait, current);
- if (may_queue(rwb, rqw, &wait, rw))
+ /*
+ * inc it here even if disabled, since we'll dec it at completion.
+ * this only happens if the task was sleeping in __wbt_wait(),
+ * and someone turned it off at the same time.
+ */
+ if (!rwb_enabled(rwb)) {
+ atomic_inc(&rqw->inflight);
return;
+ }
+ if (!waitqueue_active(&rqw->wait)
+ && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
+ return;
+
+ add_wait_queue_exclusive(&rqw->wait, &wait);
do {
- prepare_to_wait_exclusive(&rqw->wait, &wait,
- TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
- if (may_queue(rwb, rqw, &wait, rw))
+ if (!rwb_enabled(rwb)) {
+ atomic_inc(&rqw->inflight);
+ break;
+ }
+
+ if (rq_wait_inc_below(rqw, get_limit(rwb, rw)))
break;
if (lock) {
@@ -587,7 +520,8 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
io_schedule();
} while (1);
- finish_wait(&rqw->wait, &wait);
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&rqw->wait, &wait);
}
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
@@ -608,43 +542,72 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
}
}
+static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
+{
+ enum wbt_flags flags = 0;
+
+ if (bio_op(bio) == REQ_OP_READ) {
+ flags = WBT_READ;
+ } else if (wbt_should_throttle(rwb, bio)) {
+ if (current_is_kswapd())
+ flags |= WBT_KSWAPD;
+ if (bio_op(bio) == REQ_OP_DISCARD)
+ flags |= WBT_DISCARD;
+ flags |= WBT_TRACKED;
+ }
+ return flags;
+}
+
+static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
+{
+ struct rq_wb *rwb = RQWB(rqos);
+ enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
+ __wbt_done(rqos, flags);
+}
+
/*
* Returns true if the IO request should be accounted, false if not.
* May sleep, if we have exceeded the writeback limits. Caller can pass
* in an irq held spinlock, if it holds one when calling this function.
* If we do sleep, we'll release and re-grab it.
*/
-enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
{
- enum wbt_flags ret = 0;
+ struct rq_wb *rwb = RQWB(rqos);
+ enum wbt_flags flags;
if (!rwb_enabled(rwb))
- return 0;
+ return;
- if (bio_op(bio) == REQ_OP_READ)
- ret = WBT_READ;
+ flags = bio_to_wbt_flags(rwb, bio);
if (!wbt_should_throttle(rwb, bio)) {
- if (ret & WBT_READ)
+ if (flags & WBT_READ)
wb_timestamp(rwb, &rwb->last_issue);
- return ret;
+ return;
}
if (current_is_kswapd())
- ret |= WBT_KSWAPD;
+ flags |= WBT_KSWAPD;
if (bio_op(bio) == REQ_OP_DISCARD)
- ret |= WBT_DISCARD;
+ flags |= WBT_DISCARD;
- __wbt_wait(rwb, ret, bio->bi_opf, lock);
+ __wbt_wait(rwb, flags, bio->bi_opf, lock);
if (!blk_stat_is_active(rwb->cb))
rwb_arm_timer(rwb);
+}
- return ret | WBT_TRACKED;
+static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
+{
+ struct rq_wb *rwb = RQWB(rqos);
+ rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
}
-void wbt_issue(struct rq_wb *rwb, struct request *rq)
+void wbt_issue(struct rq_qos *rqos, struct request *rq)
{
+ struct rq_wb *rwb = RQWB(rqos);
+
if (!rwb_enabled(rwb))
return;
@@ -661,8 +624,9 @@ void wbt_issue(struct rq_wb *rwb, struct request *rq)
}
}
-void wbt_requeue(struct rq_wb *rwb, struct request *rq)
+void wbt_requeue(struct rq_qos *rqos, struct request *rq)
{
+ struct rq_wb *rwb = RQWB(rqos);
if (!rwb_enabled(rwb))
return;
if (rq == rwb->sync_cookie) {
@@ -671,39 +635,30 @@ void wbt_requeue(struct rq_wb *rwb, struct request *rq)
}
}
-void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
+void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
{
- if (rwb) {
- rwb->queue_depth = depth;
- wbt_update_limits(rwb);
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (rqos) {
+ RQWB(rqos)->rq_depth.queue_depth = depth;
+ __wbt_update_limits(RQWB(rqos));
}
}
-void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
+void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
{
- if (rwb)
- rwb->wc = write_cache_on;
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ if (rqos)
+ RQWB(rqos)->wc = write_cache_on;
}
/*
- * Disable wbt, if enabled by default.
- */
-void wbt_disable_default(struct request_queue *q)
-{
- struct rq_wb *rwb = q->rq_wb;
-
- if (rwb && rwb->enable_state == WBT_STATE_ON_DEFAULT)
- wbt_exit(q);
-}
-EXPORT_SYMBOL_GPL(wbt_disable_default);
-
-/*
* Enable wbt if defaults are configured that way
*/
void wbt_enable_default(struct request_queue *q)
{
+ struct rq_qos *rqos = wbt_rq_qos(q);
/* Throttling already enabled? */
- if (q->rq_wb)
+ if (rqos)
return;
/* Queue not registered? Maybe shutting down... */
@@ -741,6 +696,42 @@ static int wbt_data_dir(const struct request *rq)
return -1;
}
+static void wbt_exit(struct rq_qos *rqos)
+{
+ struct rq_wb *rwb = RQWB(rqos);
+ struct request_queue *q = rqos->q;
+
+ blk_stat_remove_callback(q, rwb->cb);
+ blk_stat_free_callback(rwb->cb);
+ kfree(rwb);
+}
+
+/*
+ * Disable wbt, if enabled by default.
+ */
+void wbt_disable_default(struct request_queue *q)
+{
+ struct rq_qos *rqos = wbt_rq_qos(q);
+ struct rq_wb *rwb;
+ if (!rqos)
+ return;
+ rwb = RQWB(rqos);
+ if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+ rwb->wb_normal = 0;
+}
+EXPORT_SYMBOL_GPL(wbt_disable_default);
+
+
+static struct rq_qos_ops wbt_rqos_ops = {
+ .throttle = wbt_wait,
+ .issue = wbt_issue,
+ .track = wbt_track,
+ .requeue = wbt_requeue,
+ .done = wbt_done,
+ .cleanup = wbt_cleanup,
+ .exit = wbt_exit,
+};
+
int wbt_init(struct request_queue *q)
{
struct rq_wb *rwb;
@@ -756,39 +747,29 @@ int wbt_init(struct request_queue *q)
return -ENOMEM;
}
- for (i = 0; i < WBT_NUM_RWQ; i++) {
- atomic_set(&rwb->rq_wait[i].inflight, 0);
- init_waitqueue_head(&rwb->rq_wait[i].wait);
- }
+ for (i = 0; i < WBT_NUM_RWQ; i++)
+ rq_wait_init(&rwb->rq_wait[i]);
+ rwb->rqos.id = RQ_QOS_WBT;
+ rwb->rqos.ops = &wbt_rqos_ops;
+ rwb->rqos.q = q;
rwb->last_comp = rwb->last_issue = jiffies;
- rwb->queue = q;
rwb->win_nsec = RWB_WINDOW_NSEC;
rwb->enable_state = WBT_STATE_ON_DEFAULT;
- wbt_update_limits(rwb);
+ rwb->wc = 1;
+ rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
+ __wbt_update_limits(rwb);
/*
* Assign rwb and add the stats callback.
*/
- q->rq_wb = rwb;
+ rq_qos_add(q, &rwb->rqos);
blk_stat_add_callback(q, rwb->cb);
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
- wbt_set_queue_depth(rwb, blk_queue_depth(q));
- wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+ wbt_set_queue_depth(q, blk_queue_depth(q));
+ wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
return 0;
}
-
-void wbt_exit(struct request_queue *q)
-{
- struct rq_wb *rwb = q->rq_wb;
-
- if (rwb) {
- blk_stat_remove_callback(q, rwb->cb);
- blk_stat_free_callback(rwb->cb);
- q->rq_wb = NULL;
- kfree(rwb);
- }
-}
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 300df531d0a6..f47218d5b3b2 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -9,6 +9,7 @@
#include <linux/ktime.h>
#include "blk-stat.h"
+#include "blk-rq-qos.h"
enum wbt_flags {
WBT_TRACKED = 1, /* write, tracked for throttling */
@@ -35,20 +36,12 @@ enum {
WBT_STATE_ON_MANUAL = 2,
};
-struct rq_wait {
- wait_queue_head_t wait;
- atomic_t inflight;
-};
-
struct rq_wb {
/*
* Settings that govern how we throttle
*/
unsigned int wb_background; /* background writeback */
unsigned int wb_normal; /* normal writeback */
- unsigned int wb_max; /* max throughput writeback */
- int scale_step;
- bool scaled_max;
short enable_state; /* WBT_STATE_* */
@@ -67,15 +60,20 @@ struct rq_wb {
void *sync_cookie;
unsigned int wc;
- unsigned int queue_depth;
unsigned long last_issue; /* last non-throttled issue */
unsigned long last_comp; /* last non-throttled comp */
unsigned long min_lat_nsec;
- struct request_queue *queue;
+ struct rq_qos rqos;
struct rq_wait rq_wait[WBT_NUM_RWQ];
+ struct rq_depth rq_depth;
};
+static inline struct rq_wb *RQWB(struct rq_qos *rqos)
+{
+ return container_of(rqos, struct rq_wb, rqos);
+}
+
static inline unsigned int wbt_inflight(struct rq_wb *rwb)
{
unsigned int i, ret = 0;
@@ -86,26 +84,19 @@ static inline unsigned int wbt_inflight(struct rq_wb *rwb)
return ret;
}
-#ifdef CONFIG_BLK_WBT
-static inline void wbt_track(struct request *rq, enum wbt_flags flags)
-{
- rq->wbt_flags |= flags;
-}
+#ifdef CONFIG_BLK_WBT
-void __wbt_done(struct rq_wb *, enum wbt_flags);
-void wbt_done(struct rq_wb *, struct request *);
-enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
int wbt_init(struct request_queue *);
-void wbt_exit(struct request_queue *);
-void wbt_update_limits(struct rq_wb *);
-void wbt_requeue(struct rq_wb *, struct request *);
-void wbt_issue(struct rq_wb *, struct request *);
+void wbt_update_limits(struct request_queue *);
void wbt_disable_default(struct request_queue *);
void wbt_enable_default(struct request_queue *);
-void wbt_set_queue_depth(struct rq_wb *, unsigned int);
-void wbt_set_write_cache(struct rq_wb *, bool);
+u64 wbt_get_min_lat(struct request_queue *q);
+void wbt_set_min_lat(struct request_queue *q, u64 val);
+
+void wbt_set_queue_depth(struct request_queue *, unsigned int);
+void wbt_set_write_cache(struct request_queue *, bool);
u64 wbt_default_latency_nsec(struct request_queue *);
@@ -114,43 +105,30 @@ u64 wbt_default_latency_nsec(struct request_queue *);
static inline void wbt_track(struct request *rq, enum wbt_flags flags)
{
}
-static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
-{
-}
-static inline void wbt_done(struct rq_wb *rwb, struct request *rq)
-{
-}
-static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
- spinlock_t *lock)
-{
- return 0;
-}
static inline int wbt_init(struct request_queue *q)
{
return -EINVAL;
}
-static inline void wbt_exit(struct request_queue *q)
-{
-}
-static inline void wbt_update_limits(struct rq_wb *rwb)
+static inline void wbt_update_limits(struct request_queue *q)
{
}
-static inline void wbt_requeue(struct rq_wb *rwb, struct request *rq)
+static inline void wbt_disable_default(struct request_queue *q)
{
}
-static inline void wbt_issue(struct rq_wb *rwb, struct request *rq)
+static inline void wbt_enable_default(struct request_queue *q)
{
}
-static inline void wbt_disable_default(struct request_queue *q)
+static inline void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
{
}
-static inline void wbt_enable_default(struct request_queue *q)
+static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
{
}
-static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
+static inline u64 wbt_get_min_lat(struct request_queue *q)
{
+ return 0;
}
-static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
+static inline void wbt_set_min_lat(struct request_queue *q, u64 val)
{
}
static inline u64 wbt_default_latency_nsec(struct request_queue *q)
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 51000914e23f..c461cf63f1f4 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -200,7 +200,7 @@ int blkdev_report_zones(struct block_device *bdev,
/* Get header in the first page */
ofst = 0;
if (!nr_rep) {
- hdr = (struct blk_zone_report_hdr *) addr;
+ hdr = addr;
nr_rep = hdr->nr_zones;
ofst = sizeof(struct blk_zone_report_hdr);
}
diff --git a/block/blk.h b/block/blk.h
index 8d23aea96ce9..d4d67e948920 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -130,6 +130,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask);
void blk_exit_rl(struct request_queue *q, struct request_list *rl);
+void blk_exit_queue(struct request_queue *q);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_queue_bypass_start(struct request_queue *q);
@@ -412,4 +413,10 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
extern void blk_drain_queue(struct request_queue *q);
+#ifdef CONFIG_BLK_CGROUP_IOLATENCY
+extern int blk_iolatency_init(struct request_queue *q);
+#else
+static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
+#endif
+
#endif /* BLK_INTERNAL_H */
diff --git a/block/bounce.c b/block/bounce.c
index fd31347b7836..bc63b3a2d18c 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -195,6 +195,73 @@ static void bounce_end_io_read_isa(struct bio *bio)
__bounce_end_io_read(bio, &isa_page_pool);
}
+static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
+ struct bio_set *bs)
+{
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ struct bio *bio;
+
+ /*
+ * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
+ * bio_src->bi_io_vec to bio->bi_io_vec.
+ *
+ * We can't do that anymore, because:
+ *
+ * - The point of cloning the biovec is to produce a bio with a biovec
+ * the caller can modify: bi_idx and bi_bvec_done should be 0.
+ *
+ * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
+ * we tried to clone the whole thing bio_alloc_bioset() would fail.
+ * But the clone should succeed as long as the number of biovecs we
+ * actually need to allocate is fewer than BIO_MAX_PAGES.
+ *
+ * - Lastly, bi_vcnt should not be looked at or relied upon by code
+ * that does not own the bio - reason being drivers don't use it for
+ * iterating over the biovec anymore, so expecting it to be kept up
+ * to date (i.e. for clones that share the parent biovec) is just
+ * asking for trouble and would force extra work on
+ * __bio_clone_fast() anyways.
+ */
+
+ bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
+ if (!bio)
+ return NULL;
+ bio->bi_disk = bio_src->bi_disk;
+ bio->bi_opf = bio_src->bi_opf;
+ bio->bi_write_hint = bio_src->bi_write_hint;
+ bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
+ bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
+
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
+ break;
+ case REQ_OP_WRITE_SAME:
+ bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
+ break;
+ default:
+ bio_for_each_segment(bv, bio_src, iter)
+ bio->bi_io_vec[bio->bi_vcnt++] = bv;
+ break;
+ }
+
+ if (bio_integrity(bio_src)) {
+ int ret;
+
+ ret = bio_integrity_clone(bio, bio_src, gfp_mask);
+ if (ret < 0) {
+ bio_put(bio);
+ return NULL;
+ }
+ }
+
+ bio_clone_blkcg_association(bio, bio_src);
+
+ return bio;
+}
+
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool)
{
@@ -222,7 +289,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
generic_make_request(*bio_orig);
*bio_orig = bio;
}
- bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
+ bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
&bounce_bio_set);
bio_for_each_segment_all(to, bio, i) {
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 9419def8c017..f3501cdaf1a6 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -48,9 +48,8 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
job->request_len = hdr->request_len;
job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
- if (IS_ERR(job->request))
- return PTR_ERR(job->request);
- return 0;
+
+ return PTR_ERR_OR_ZERO(job->request);
}
static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
diff --git a/block/bsg.c b/block/bsg.c
index 3da540faf673..db588add6ba6 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -13,11 +13,9 @@
#include <linux/init.h>
#include <linux/file.h>
#include <linux/blkdev.h>
-#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
-#include <linux/uio.h>
#include <linux/idr.h>
#include <linux/bsg.h>
#include <linux/slab.h>
@@ -38,21 +36,10 @@
struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
- struct list_head busy_list;
- struct list_head done_list;
struct hlist_node dev_list;
atomic_t ref_count;
- int queued_cmds;
- int done_cmds;
- wait_queue_head_t wq_done;
- wait_queue_head_t wq_free;
char name[20];
int max_queue;
- unsigned long flags;
-};
-
-enum {
- BSG_F_BLOCK = 1,
};
#define BSG_DEFAULT_CMDS 64
@@ -67,64 +54,6 @@ static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
static struct class *bsg_class;
static int bsg_major;
-static struct kmem_cache *bsg_cmd_cachep;
-
-/*
- * our internal command type
- */
-struct bsg_command {
- struct bsg_device *bd;
- struct list_head list;
- struct request *rq;
- struct bio *bio;
- struct bio *bidi_bio;
- int err;
- struct sg_io_v4 hdr;
-};
-
-static void bsg_free_command(struct bsg_command *bc)
-{
- struct bsg_device *bd = bc->bd;
- unsigned long flags;
-
- kmem_cache_free(bsg_cmd_cachep, bc);
-
- spin_lock_irqsave(&bd->lock, flags);
- bd->queued_cmds--;
- spin_unlock_irqrestore(&bd->lock, flags);
-
- wake_up(&bd->wq_free);
-}
-
-static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
-{
- struct bsg_command *bc = ERR_PTR(-EINVAL);
-
- spin_lock_irq(&bd->lock);
-
- if (bd->queued_cmds >= bd->max_queue)
- goto out;
-
- bd->queued_cmds++;
- spin_unlock_irq(&bd->lock);
-
- bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
- if (unlikely(!bc)) {
- spin_lock_irq(&bd->lock);
- bd->queued_cmds--;
- bc = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- bc->bd = bd;
- INIT_LIST_HEAD(&bc->list);
- bsg_dbg(bd, "returning free cmd %p\n", bc);
- return bc;
-out:
- spin_unlock_irq(&bd->lock);
- return bc;
-}
-
static inline struct hlist_head *bsg_dev_idx_hash(int index)
{
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
@@ -285,101 +214,6 @@ out:
return ERR_PTR(ret);
}
-/*
- * async completion call-back from the block layer, when scsi/ide/whatever
- * calls end_that_request_last() on a request
- */
-static void bsg_rq_end_io(struct request *rq, blk_status_t status)
-{
- struct bsg_command *bc = rq->end_io_data;
- struct bsg_device *bd = bc->bd;
- unsigned long flags;
-
- bsg_dbg(bd, "finished rq %p bc %p, bio %p\n",
- rq, bc, bc->bio);
-
- bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
-
- spin_lock_irqsave(&bd->lock, flags);
- list_move_tail(&bc->list, &bd->done_list);
- bd->done_cmds++;
- spin_unlock_irqrestore(&bd->lock, flags);
-
- wake_up(&bd->wq_done);
-}
-
-/*
- * do final setup of a 'bc' and submit the matching 'rq' to the block
- * layer for io
- */
-static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
- struct bsg_command *bc, struct request *rq)
-{
- int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
-
- /*
- * add bc command to busy queue and submit rq for io
- */
- bc->rq = rq;
- bc->bio = rq->bio;
- if (rq->next_rq)
- bc->bidi_bio = rq->next_rq->bio;
- bc->hdr.duration = jiffies;
- spin_lock_irq(&bd->lock);
- list_add_tail(&bc->list, &bd->busy_list);
- spin_unlock_irq(&bd->lock);
-
- bsg_dbg(bd, "queueing rq %p, bc %p\n", rq, bc);
-
- rq->end_io_data = bc;
- blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
-}
-
-static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
-{
- struct bsg_command *bc = NULL;
-
- spin_lock_irq(&bd->lock);
- if (bd->done_cmds) {
- bc = list_first_entry(&bd->done_list, struct bsg_command, list);
- list_del(&bc->list);
- bd->done_cmds--;
- }
- spin_unlock_irq(&bd->lock);
-
- return bc;
-}
-
-/*
- * Get a finished command from the done list
- */
-static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
-{
- struct bsg_command *bc;
- int ret;
-
- do {
- bc = bsg_next_done_cmd(bd);
- if (bc)
- break;
-
- if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
- bc = ERR_PTR(-EAGAIN);
- break;
- }
-
- ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
- if (ret) {
- bc = ERR_PTR(-ERESTARTSYS);
- break;
- }
- } while (1);
-
- bsg_dbg(bd, "returning done %p\n", bc);
-
- return bc;
-}
-
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct bio *bio, struct bio *bidi_bio)
{
@@ -398,234 +232,6 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
return ret;
}
-static bool bsg_complete(struct bsg_device *bd)
-{
- bool ret = false;
- bool spin;
-
- do {
- spin_lock_irq(&bd->lock);
-
- BUG_ON(bd->done_cmds > bd->queued_cmds);
-
- /*
- * All commands consumed.
- */
- if (bd->done_cmds == bd->queued_cmds)
- ret = true;
-
- spin = !test_bit(BSG_F_BLOCK, &bd->flags);
-
- spin_unlock_irq(&bd->lock);
- } while (!ret && spin);
-
- return ret;
-}
-
-static int bsg_complete_all_commands(struct bsg_device *bd)
-{
- struct bsg_command *bc;
- int ret, tret;
-
- bsg_dbg(bd, "entered\n");
-
- /*
- * wait for all commands to complete
- */
- io_wait_event(bd->wq_done, bsg_complete(bd));
-
- /*
- * discard done commands
- */
- ret = 0;
- do {
- spin_lock_irq(&bd->lock);
- if (!bd->queued_cmds) {
- spin_unlock_irq(&bd->lock);
- break;
- }
- spin_unlock_irq(&bd->lock);
-
- bc = bsg_get_done_cmd(bd);
- if (IS_ERR(bc))
- break;
-
- tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
- bc->bidi_bio);
- if (!ret)
- ret = tret;
-
- bsg_free_command(bc);
- } while (1);
-
- return ret;
-}
-
-static int
-__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
- const struct iovec *iov, ssize_t *bytes_read)
-{
- struct bsg_command *bc;
- int nr_commands, ret;
-
- if (count % sizeof(struct sg_io_v4))
- return -EINVAL;
-
- ret = 0;
- nr_commands = count / sizeof(struct sg_io_v4);
- while (nr_commands) {
- bc = bsg_get_done_cmd(bd);
- if (IS_ERR(bc)) {
- ret = PTR_ERR(bc);
- break;
- }
-
- /*
- * this is the only case where we need to copy data back
- * after completing the request. so do that here,
- * bsg_complete_work() cannot do that for us
- */
- ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
- bc->bidi_bio);
-
- if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
- ret = -EFAULT;
-
- bsg_free_command(bc);
-
- if (ret)
- break;
-
- buf += sizeof(struct sg_io_v4);
- *bytes_read += sizeof(struct sg_io_v4);
- nr_commands--;
- }
-
- return ret;
-}
-
-static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
-{
- if (file->f_flags & O_NONBLOCK)
- clear_bit(BSG_F_BLOCK, &bd->flags);
- else
- set_bit(BSG_F_BLOCK, &bd->flags);
-}
-
-/*
- * Check if the error is a "real" error that we should return.
- */
-static inline int err_block_err(int ret)
-{
- if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
- return 1;
-
- return 0;
-}
-
-static ssize_t
-bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
-{
- struct bsg_device *bd = file->private_data;
- int ret;
- ssize_t bytes_read;
-
- bsg_dbg(bd, "read %zd bytes\n", count);
-
- bsg_set_block(bd, file);
-
- bytes_read = 0;
- ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
- *ppos = bytes_read;
-
- if (!bytes_read || err_block_err(ret))
- bytes_read = ret;
-
- return bytes_read;
-}
-
-static int __bsg_write(struct bsg_device *bd, const char __user *buf,
- size_t count, ssize_t *bytes_written, fmode_t mode)
-{
- struct bsg_command *bc;
- struct request *rq;
- int ret, nr_commands;
-
- if (count % sizeof(struct sg_io_v4))
- return -EINVAL;
-
- nr_commands = count / sizeof(struct sg_io_v4);
- rq = NULL;
- bc = NULL;
- ret = 0;
- while (nr_commands) {
- struct request_queue *q = bd->queue;
-
- bc = bsg_alloc_command(bd);
- if (IS_ERR(bc)) {
- ret = PTR_ERR(bc);
- bc = NULL;
- break;
- }
-
- if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
- ret = -EFAULT;
- break;
- }
-
- /*
- * get a request, fill in the blanks, and add to request queue
- */
- rq = bsg_map_hdr(bd->queue, &bc->hdr, mode);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- rq = NULL;
- break;
- }
-
- bsg_add_command(bd, q, bc, rq);
- bc = NULL;
- rq = NULL;
- nr_commands--;
- buf += sizeof(struct sg_io_v4);
- *bytes_written += sizeof(struct sg_io_v4);
- }
-
- if (bc)
- bsg_free_command(bc);
-
- return ret;
-}
-
-static ssize_t
-bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
- struct bsg_device *bd = file->private_data;
- ssize_t bytes_written;
- int ret;
-
- bsg_dbg(bd, "write %zd bytes\n", count);
-
- if (unlikely(uaccess_kernel()))
- return -EINVAL;
-
- bsg_set_block(bd, file);
-
- bytes_written = 0;
- ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode);
-
- *ppos = bytes_written;
-
- /*
- * return bytes written on non-fatal errors
- */
- if (!bytes_written || err_block_err(ret))
- bytes_written = ret;
-
- bsg_dbg(bd, "returning %zd\n", bytes_written);
- return bytes_written;
-}
-
static struct bsg_device *bsg_alloc_device(void)
{
struct bsg_device *bd;
@@ -635,29 +241,20 @@ static struct bsg_device *bsg_alloc_device(void)
return NULL;
spin_lock_init(&bd->lock);
-
bd->max_queue = BSG_DEFAULT_CMDS;
-
- INIT_LIST_HEAD(&bd->busy_list);
- INIT_LIST_HEAD(&bd->done_list);
INIT_HLIST_NODE(&bd->dev_list);
-
- init_waitqueue_head(&bd->wq_free);
- init_waitqueue_head(&bd->wq_done);
return bd;
}
static int bsg_put_device(struct bsg_device *bd)
{
- int ret = 0, do_free;
struct request_queue *q = bd->queue;
mutex_lock(&bsg_mutex);
- do_free = atomic_dec_and_test(&bd->ref_count);
- if (!do_free) {
+ if (!atomic_dec_and_test(&bd->ref_count)) {
mutex_unlock(&bsg_mutex);
- goto out;
+ return 0;
}
hlist_del(&bd->dev_list);
@@ -668,20 +265,9 @@ static int bsg_put_device(struct bsg_device *bd)
/*
* close can always block
*/
- set_bit(BSG_F_BLOCK, &bd->flags);
-
- /*
- * correct error detection baddies here again. it's the responsibility
- * of the app to properly reap commands before close() if it wants
- * fool-proof error detection
- */
- ret = bsg_complete_all_commands(bd);
-
kfree(bd);
-out:
- if (do_free)
- blk_put_queue(q);
- return ret;
+ blk_put_queue(q);
+ return 0;
}
static struct bsg_device *bsg_add_device(struct inode *inode,
@@ -704,8 +290,6 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
bd->queue = rq;
- bsg_set_block(bd, file);
-
atomic_set(&bd->ref_count, 1);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
@@ -779,24 +363,6 @@ static int bsg_release(struct inode *inode, struct file *file)
return bsg_put_device(bd);
}
-static __poll_t bsg_poll(struct file *file, poll_table *wait)
-{
- struct bsg_device *bd = file->private_data;
- __poll_t mask = 0;
-
- poll_wait(file, &bd->wq_done, wait);
- poll_wait(file, &bd->wq_free, wait);
-
- spin_lock_irq(&bd->lock);
- if (!list_empty(&bd->done_list))
- mask |= EPOLLIN | EPOLLRDNORM;
- if (bd->queued_cmds < bd->max_queue)
- mask |= EPOLLOUT;
- spin_unlock_irq(&bd->lock);
-
- return mask;
-}
-
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
@@ -870,9 +436,6 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
static const struct file_operations bsg_fops = {
- .read = bsg_read,
- .write = bsg_write,
- .poll = bsg_poll,
.open = bsg_open,
.release = bsg_release,
.unlocked_ioctl = bsg_ioctl,
@@ -977,21 +540,12 @@ static int __init bsg_init(void)
int ret, i;
dev_t devid;
- bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
- sizeof(struct bsg_command), 0, 0, NULL);
- if (!bsg_cmd_cachep) {
- printk(KERN_ERR "bsg: failed creating slab cache\n");
- return -ENOMEM;
- }
-
for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
INIT_HLIST_HEAD(&bsg_device_list[i]);
bsg_class = class_create(THIS_MODULE, "bsg");
- if (IS_ERR(bsg_class)) {
- ret = PTR_ERR(bsg_class);
- goto destroy_kmemcache;
- }
+ if (IS_ERR(bsg_class))
+ return PTR_ERR(bsg_class);
bsg_class->devnode = bsg_devnode;
ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
@@ -1012,8 +566,6 @@ unregister_chrdev:
unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
destroy_bsg_class:
class_destroy(bsg_class);
-destroy_kmemcache:
- kmem_cache_destroy(bsg_cmd_cachep);
return ret;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 82b6c27b3245..2eb87444b157 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3666,6 +3666,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
switch (ioprio_class) {
default:
printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
+ /* fall through */
case IOPRIO_CLASS_NONE:
/*
* no prio set, inherit CPU scheduling settings
@@ -4735,12 +4736,13 @@ USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
- unsigned int __data; \
+ unsigned int __data, __min = (MIN), __max = (MAX); \
+ \
cfq_var_store(&__data, (page)); \
- if (__data < (MIN)) \
- __data = (MIN); \
- else if (__data > (MAX)) \
- __data = (MAX); \
+ if (__data < __min) \
+ __data = __min; \
+ else if (__data > __max) \
+ __data = __max; \
if (__CONV) \
*(__PTR) = (u64)__data * NSEC_PER_MSEC; \
else \
@@ -4769,12 +4771,13 @@ STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX,
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
- unsigned int __data; \
+ unsigned int __data, __min = (MIN), __max = (MAX); \
+ \
cfq_var_store(&__data, (page)); \
- if (__data < (MIN)) \
- __data = (MIN); \
- else if (__data > (MAX)) \
- __data = (MAX); \
+ if (__data < __min) \
+ __data = __min; \
+ else if (__data > __max) \
+ __data = __max; \
*(__PTR) = (u64)__data * NSEC_PER_USEC; \
return count; \
}
diff --git a/block/genhd.c b/block/genhd.c
index f1543a45e73b..8cc719a37b32 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1333,21 +1333,28 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_round_stats(gp->queue, cpu, hd);
part_stat_unlock();
part_in_flight(gp->queue, hd, inflight);
- seq_printf(seqf, "%4d %7d %s %lu %lu %lu "
- "%u %lu %lu %lu %u %u %u %u\n",
+ seq_printf(seqf, "%4d %7d %s "
+ "%lu %lu %lu %u "
+ "%lu %lu %lu %u "
+ "%u %u %u "
+ "%lu %lu %lu %u\n",
MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
disk_name(gp, hd->partno, buf),
- part_stat_read(hd, ios[READ]),
- part_stat_read(hd, merges[READ]),
- part_stat_read(hd, sectors[READ]),
- jiffies_to_msecs(part_stat_read(hd, ticks[READ])),
- part_stat_read(hd, ios[WRITE]),
- part_stat_read(hd, merges[WRITE]),
- part_stat_read(hd, sectors[WRITE]),
- jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
+ part_stat_read(hd, ios[STAT_READ]),
+ part_stat_read(hd, merges[STAT_READ]),
+ part_stat_read(hd, sectors[STAT_READ]),
+ jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])),
+ part_stat_read(hd, ios[STAT_WRITE]),
+ part_stat_read(hd, merges[STAT_WRITE]),
+ part_stat_read(hd, sectors[STAT_WRITE]),
+ jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])),
inflight[0],
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
- jiffies_to_msecs(part_stat_read(hd, time_in_queue))
+ jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
+ part_stat_read(hd, ios[STAT_DISCARD]),
+ part_stat_read(hd, merges[STAT_DISCARD]),
+ part_stat_read(hd, sectors[STAT_DISCARD]),
+ jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD]))
);
}
disk_part_iter_exit(&piter);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 3dcfd4ec0e11..5a8975a1201c 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -130,19 +130,24 @@ ssize_t part_stat_show(struct device *dev,
return sprintf(buf,
"%8lu %8lu %8llu %8u "
"%8lu %8lu %8llu %8u "
- "%8u %8u %8u"
+ "%8u %8u %8u "
+ "%8lu %8lu %8llu %8u"
"\n",
- part_stat_read(p, ios[READ]),
- part_stat_read(p, merges[READ]),
- (unsigned long long)part_stat_read(p, sectors[READ]),
- jiffies_to_msecs(part_stat_read(p, ticks[READ])),
- part_stat_read(p, ios[WRITE]),
- part_stat_read(p, merges[WRITE]),
- (unsigned long long)part_stat_read(p, sectors[WRITE]),
- jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
+ part_stat_read(p, ios[STAT_READ]),
+ part_stat_read(p, merges[STAT_READ]),
+ (unsigned long long)part_stat_read(p, sectors[STAT_READ]),
+ jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])),
+ part_stat_read(p, ios[STAT_WRITE]),
+ part_stat_read(p, merges[STAT_WRITE]),
+ (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
+ jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])),
inflight[0],
jiffies_to_msecs(part_stat_read(p, io_ticks)),
- jiffies_to_msecs(part_stat_read(p, time_in_queue)));
+ jiffies_to_msecs(part_stat_read(p, time_in_queue)),
+ part_stat_read(p, ios[STAT_DISCARD]),
+ part_stat_read(p, merges[STAT_DISCARD]),
+ (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
+ jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD])));
}
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
diff --git a/block/partitions/aix.c b/block/partitions/aix.c
index 007f95eea0e1..903f3ed175d0 100644
--- a/block/partitions/aix.c
+++ b/block/partitions/aix.c
@@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state)
u32 vgda_sector = 0;
u32 vgda_len = 0;
int numlvs = 0;
- struct pvd *pvd;
+ struct pvd *pvd = NULL;
struct lv_info {
unsigned short pps_per_lv;
unsigned short pps_found;
@@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state)
if (lvip[i].pps_per_lv)
foundlvs += 1;
}
+ /* pvd loops depend on n[].name and lvip[].pps_per_lv */
+ pvd = alloc_pvd(state, vgda_sector + 17);
}
put_dev_sector(sect);
}
- pvd = alloc_pvd(state, vgda_sector + 17);
if (pvd) {
int numpps = be16_to_cpu(pvd->pp_count);
int psn_part1 = be32_to_cpu(pvd->psn_part1);
@@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state)
next_lp_ix += 1;
}
for (i = 0; i < state->limit; i += 1)
- if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
+ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
+ char tmp[sizeof(n[i].name) + 1]; // null char
+
+ snprintf(tmp, sizeof(tmp), "%s", n[i].name);
pr_warn("partition %s (%u pp's found) is "
"not contiguous\n",
- n[i].name, lvip[i].pps_found);
+ tmp, lvip[i].pps_found);
+ }
kfree(pvd);
}
kfree(n);
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 0417937dfe99..16766f267559 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -830,7 +830,6 @@ static bool ldm_parse_dgr4 (const u8 *buffer, int buflen, struct vblk *vb)
{
char buf[64];
int r_objid, r_name, r_id1, r_id2, len;
- struct vblk_dgrp *dgrp;
BUG_ON (!buffer || !vb);
@@ -853,8 +852,6 @@ static bool ldm_parse_dgr4 (const u8 *buffer, int buflen, struct vblk *vb)
if (len != get_unaligned_be32(buffer + 0x14))
return false;
- dgrp = &vb->vblk.dgrp;
-
ldm_get_vstr (buffer + 0x18 + r_objid, buf, sizeof (buf));
return true;
}
diff --git a/block/t10-pi.c b/block/t10-pi.c
index a98db384048f..62aed77d0bb9 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -184,3 +184,113 @@ const struct blk_integrity_profile t10_pi_type3_ip = {
.verify_fn = t10_pi_type3_verify_ip,
};
EXPORT_SYMBOL(t10_pi_type3_ip);
+
+/**
+ * t10_pi_prepare - prepare PI prior submitting request to device
+ * @rq: request with PI that should be prepared
+ * @protection_type: PI type (Type 1/Type 2/Type 3)
+ *
+ * For Type 1/Type 2, the virtual start sector is the one that was
+ * originally submitted by the block layer for the ref_tag usage. Due to
+ * partitioning, MD/DM cloning, etc. the actual physical start sector is
+ * likely to be different. Remap protection information to match the
+ * physical LBA.
+ *
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+void t10_pi_prepare(struct request *rq, u8 protection_type)
+{
+ const int tuple_sz = rq->q->integrity.tuple_size;
+ u32 ref_tag = t10_pi_ref_tag(rq);
+ struct bio *bio;
+
+ if (protection_type == T10_PI_TYPE3_PROTECTION)
+ return;
+
+ __rq_for_each_bio(bio, rq) {
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ u32 virt = bip_get_seed(bip) & 0xffffffff;
+ struct bio_vec iv;
+ struct bvec_iter iter;
+
+ /* Already remapped? */
+ if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
+ break;
+
+ bip_for_each_vec(iv, bip, iter) {
+ void *p, *pmap;
+ unsigned int j;
+
+ pmap = kmap_atomic(iv.bv_page);
+ p = pmap + iv.bv_offset;
+ for (j = 0; j < iv.bv_len; j += tuple_sz) {
+ struct t10_pi_tuple *pi = p;
+
+ if (be32_to_cpu(pi->ref_tag) == virt)
+ pi->ref_tag = cpu_to_be32(ref_tag);
+ virt++;
+ ref_tag++;
+ p += tuple_sz;
+ }
+
+ kunmap_atomic(pmap);
+ }
+
+ bip->bip_flags |= BIP_MAPPED_INTEGRITY;
+ }
+}
+EXPORT_SYMBOL(t10_pi_prepare);
+
+/**
+ * t10_pi_complete - prepare PI prior returning request to the block layer
+ * @rq: request with PI that should be prepared
+ * @protection_type: PI type (Type 1/Type 2/Type 3)
+ * @intervals: total elements to prepare
+ *
+ * For Type 1/Type 2, the virtual start sector is the one that was
+ * originally submitted by the block layer for the ref_tag usage. Due to
+ * partitioning, MD/DM cloning, etc. the actual physical start sector is
+ * likely to be different. Since the physical start sector was submitted
+ * to the device, we should remap it back to virtual values expected by the
+ * block layer.
+ *
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+void t10_pi_complete(struct request *rq, u8 protection_type,
+ unsigned int intervals)
+{
+ const int tuple_sz = rq->q->integrity.tuple_size;
+ u32 ref_tag = t10_pi_ref_tag(rq);
+ struct bio *bio;
+
+ if (protection_type == T10_PI_TYPE3_PROTECTION)
+ return;
+
+ __rq_for_each_bio(bio, rq) {
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ u32 virt = bip_get_seed(bip) & 0xffffffff;
+ struct bio_vec iv;
+ struct bvec_iter iter;
+
+ bip_for_each_vec(iv, bip, iter) {
+ void *p, *pmap;
+ unsigned int j;
+
+ pmap = kmap_atomic(iv.bv_page);
+ p = pmap + iv.bv_offset;
+ for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
+ struct t10_pi_tuple *pi = p;
+
+ if (be32_to_cpu(pi->ref_tag) == ref_tag)
+ pi->ref_tag = cpu_to_be32(virt);
+ virt++;
+ ref_tag++;
+ intervals--;
+ p += tuple_sz;
+ }
+
+ kunmap_atomic(pmap);
+ }
+ }
+}
+EXPORT_SYMBOL(t10_pi_complete);
diff --git a/certs/system_keyring.c b/certs/system_keyring.c
index 6251d1b27f0c..81728717523d 100644
--- a/certs/system_keyring.c
+++ b/certs/system_keyring.c
@@ -15,6 +15,7 @@
#include <linux/cred.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/verification.h>
#include <keys/asymmetric-type.h>
#include <keys/system_keyring.h>
#include <crypto/pkcs7.h>
@@ -230,7 +231,7 @@ int verify_pkcs7_signature(const void *data, size_t len,
if (!trusted_keys) {
trusted_keys = builtin_trusted_keys;
- } else if (trusted_keys == (void *)1UL) {
+ } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
trusted_keys = secondary_trusted_keys;
#else
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index d880a4897159..8882e90e868e 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
- unsigned int bsize)
+static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
+ unsigned int n)
{
- unsigned int n = bsize;
-
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
@@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
n -= len_this_page;
scatterwalk_start(&walk->out, sg_next(walk->out.sg));
}
-
- return bsize;
}
-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
- unsigned int n)
+static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
+ unsigned int n)
{
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
-
- return n;
}
static int ablkcipher_walk_next(struct ablkcipher_request *req,
@@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err)
{
struct crypto_tfm *tfm = req->base.tfm;
- unsigned int nbytes = 0;
+ unsigned int n; /* bytes processed */
+ bool more;
- if (likely(err >= 0)) {
- unsigned int n = walk->nbytes - err;
+ if (unlikely(err < 0))
+ goto finish;
- if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
- n = ablkcipher_done_fast(walk, n);
- else if (WARN_ON(err)) {
- err = -EINVAL;
- goto err;
- } else
- n = ablkcipher_done_slow(walk, n);
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
- nbytes = walk->total - n;
- err = 0;
+ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
+ ablkcipher_done_fast(walk, n);
+ } else {
+ if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
+ err = -EINVAL;
+ goto finish;
+ }
+ ablkcipher_done_slow(walk, n);
}
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
-
-err:
- walk->total = nbytes;
- walk->nbytes = nbytes;
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
- if (nbytes) {
+ if (more) {
crypto_yield(req->base.flags);
return ablkcipher_walk_next(req, walk);
}
-
+ err = 0;
+finish:
+ walk->nbytes = 0;
if (walk->iv != req->info)
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
kfree(walk->iv_buffer);
-
return err;
}
EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
@@ -373,6 +368,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -447,6 +443,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 38271303ce16..c22f4414856d 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -429,7 +429,6 @@ static struct aead_alg crypto_aegis128_alg = {
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index 0cc1a7525c85..b6fb21ebdc3e 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -121,7 +121,7 @@ static void crypto_aegis128l_ad(struct aegis_state *state,
(const union aegis_chunk *)src;
while (size >= AEGIS128L_CHUNK_SIZE) {
- crypto_aegis128l_update_a(state, src_chunk);
+ crypto_aegis128l_update_a(state, src_chunk);
size -= AEGIS128L_CHUNK_SIZE;
src_chunk += 1;
@@ -493,7 +493,6 @@ static struct aead_alg crypto_aegis128l_alg = {
.chunksize = AEGIS128L_CHUNK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index a489d741d33a..11f0f8ec9c7c 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -444,7 +444,6 @@ static struct aead_alg crypto_aegis256_alg = {
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index c166f424871c..b053179e0bc5 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1071,7 +1071,7 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock,
struct af_alg_ctx *ctx = ask->private;
__poll_t mask;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
mask = 0;
if (!ctx->more || ctx->used)
diff --git a/crypto/api.c b/crypto/api.c
index 0ee632bba064..7aca9f86c5f3 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -229,7 +229,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
alg = crypto_alg_lookup(name, type, mask);
- if (!alg) {
+ if (!alg && !(mask & CRYPTO_NOLOAD)) {
request_module("crypto-%s", name);
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
index e284d9cb9237..5b2f6a2b5585 100644
--- a/crypto/asymmetric_keys/pkcs7_key_type.c
+++ b/crypto/asymmetric_keys/pkcs7_key_type.c
@@ -63,7 +63,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep)
return verify_pkcs7_signature(NULL, 0,
prep->data, prep->datalen,
- (void *)1UL, usage,
+ VERIFY_USE_SECONDARY_KEYRING, usage,
pkcs7_view_content, prep);
}
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 01c0d4aa2563..f93abf13b5d4 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -70,19 +70,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
- unsigned int bsize)
+static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
+ unsigned int bsize)
{
u8 *addr;
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
addr = blkcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
- return bsize;
}
-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
- unsigned int n)
+static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
+ unsigned int n)
{
if (walk->flags & BLKCIPHER_WALK_COPY) {
blkcipher_map_dst(walk);
@@ -96,49 +95,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
-
- return n;
}
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err)
{
- unsigned int nbytes = 0;
+ unsigned int n; /* bytes processed */
+ bool more;
- if (likely(err >= 0)) {
- unsigned int n = walk->nbytes - err;
+ if (unlikely(err < 0))
+ goto finish;
- if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
- n = blkcipher_done_fast(walk, n);
- else if (WARN_ON(err)) {
- err = -EINVAL;
- goto err;
- } else
- n = blkcipher_done_slow(walk, n);
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
- nbytes = walk->total - n;
- err = 0;
+ if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
+ blkcipher_done_fast(walk, n);
+ } else {
+ if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
+ err = -EINVAL;
+ goto finish;
+ }
+ blkcipher_done_slow(walk, n);
}
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
-err:
- walk->total = nbytes;
- walk->nbytes = nbytes;
-
- if (nbytes) {
+ if (more) {
crypto_yield(desc->flags);
return blkcipher_walk_next(desc, walk);
}
-
+ err = 0;
+finish:
+ walk->nbytes = 0;
if (walk->iv != desc->info)
memcpy(desc->info, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
free_page((unsigned long)walk->page);
-
return err;
}
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
@@ -512,6 +510,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 20ff2c746e0b..0959b268966c 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -104,7 +104,6 @@ static struct shash_alg digest_null = {
.final = null_final,
.base = {
.cra_name = "digest_null",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/dh.c b/crypto/dh.c
index 5659fe7f446d..09a44de4209d 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -16,14 +16,16 @@
#include <linux/mpi.h>
struct dh_ctx {
- MPI p;
- MPI g;
- MPI xa;
+ MPI p; /* Value is guaranteed to be set. */
+ MPI q; /* Value is optional. */
+ MPI g; /* Value is guaranteed to be set. */
+ MPI xa; /* Value is guaranteed to be set. */
};
static void dh_clear_ctx(struct dh_ctx *ctx)
{
mpi_free(ctx->p);
+ mpi_free(ctx->q);
mpi_free(ctx->g);
mpi_free(ctx->xa);
memset(ctx, 0, sizeof(*ctx));
@@ -60,6 +62,12 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
if (!ctx->p)
return -EINVAL;
+ if (params->q && params->q_size) {
+ ctx->q = mpi_read_raw_data(params->q, params->q_size);
+ if (!ctx->q)
+ return -EINVAL;
+ }
+
ctx->g = mpi_read_raw_data(params->g, params->g_size);
if (!ctx->g)
return -EINVAL;
@@ -93,6 +101,55 @@ err_clear_ctx:
return -EINVAL;
}
+/*
+ * SP800-56A public key verification:
+ *
+ * * If Q is provided as part of the domain paramenters, a full validation
+ * according to SP800-56A section 5.6.2.3.1 is performed.
+ *
+ * * If Q is not provided, a partial validation according to SP800-56A section
+ * 5.6.2.3.2 is performed.
+ */
+static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
+{
+ if (unlikely(!ctx->p))
+ return -EINVAL;
+
+ /*
+ * Step 1: Verify that 2 <= y <= p - 2.
+ *
+ * The upper limit check is actually y < p instead of y < p - 1
+ * as the mpi_sub_ui function is yet missing.
+ */
+ if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0)
+ return -EINVAL;
+
+ /* Step 2: Verify that 1 = y^q mod p */
+ if (ctx->q) {
+ MPI val = mpi_alloc(0);
+ int ret;
+
+ if (!val)
+ return -ENOMEM;
+
+ ret = mpi_powm(val, y, ctx->q, ctx->p);
+
+ if (ret) {
+ mpi_free(val);
+ return ret;
+ }
+
+ ret = mpi_cmp_ui(val, 1);
+
+ mpi_free(val);
+
+ if (ret != 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
@@ -115,6 +172,9 @@ static int dh_compute_value(struct kpp_request *req)
ret = -EINVAL;
goto err_free_val;
}
+ ret = dh_is_pubkey_valid(ctx, base);
+ if (ret)
+ goto err_free_base;
} else {
base = ctx->g;
}
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
index 24fdb2ecaa85..edacda5f6a4d 100644
--- a/crypto/dh_helper.c
+++ b/crypto/dh_helper.c
@@ -14,10 +14,12 @@
#include <crypto/dh.h>
#include <crypto/kpp.h>
-#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
+#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 4 * sizeof(int))
-static inline u8 *dh_pack_data(void *dst, const void *src, size_t size)
+static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size)
{
+ if (!dst || size > end - dst)
+ return NULL;
memcpy(dst, src, size);
return dst + size;
}
@@ -30,7 +32,7 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
static inline unsigned int dh_data_size(const struct dh *p)
{
- return p->key_size + p->p_size + p->g_size;
+ return p->key_size + p->p_size + p->q_size + p->g_size;
}
unsigned int crypto_dh_key_len(const struct dh *p)
@@ -42,25 +44,27 @@ EXPORT_SYMBOL_GPL(crypto_dh_key_len);
int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
{
u8 *ptr = buf;
+ u8 * const end = ptr + len;
struct kpp_secret secret = {
.type = CRYPTO_KPP_SECRET_TYPE_DH,
.len = len
};
- if (unlikely(!buf))
+ if (unlikely(!len))
return -EINVAL;
- if (len != crypto_dh_key_len(params))
+ ptr = dh_pack_data(ptr, end, &secret, sizeof(secret));
+ ptr = dh_pack_data(ptr, end, &params->key_size,
+ sizeof(params->key_size));
+ ptr = dh_pack_data(ptr, end, &params->p_size, sizeof(params->p_size));
+ ptr = dh_pack_data(ptr, end, &params->q_size, sizeof(params->q_size));
+ ptr = dh_pack_data(ptr, end, &params->g_size, sizeof(params->g_size));
+ ptr = dh_pack_data(ptr, end, params->key, params->key_size);
+ ptr = dh_pack_data(ptr, end, params->p, params->p_size);
+ ptr = dh_pack_data(ptr, end, params->q, params->q_size);
+ ptr = dh_pack_data(ptr, end, params->g, params->g_size);
+ if (ptr != end)
return -EINVAL;
-
- ptr = dh_pack_data(ptr, &secret, sizeof(secret));
- ptr = dh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
- ptr = dh_pack_data(ptr, &params->p_size, sizeof(params->p_size));
- ptr = dh_pack_data(ptr, &params->g_size, sizeof(params->g_size));
- ptr = dh_pack_data(ptr, params->key, params->key_size);
- ptr = dh_pack_data(ptr, params->p, params->p_size);
- dh_pack_data(ptr, params->g, params->g_size);
-
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
@@ -79,6 +83,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
ptr = dh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
ptr = dh_unpack_data(&params->p_size, ptr, sizeof(params->p_size));
+ ptr = dh_unpack_data(&params->q_size, ptr, sizeof(params->q_size));
ptr = dh_unpack_data(&params->g_size, ptr, sizeof(params->g_size));
if (secret.len != crypto_dh_key_len(params))
return -EINVAL;
@@ -88,7 +93,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
* some drivers assume otherwise.
*/
if (params->key_size > params->p_size ||
- params->g_size > params->p_size)
+ params->g_size > params->p_size || params->q_size > params->p_size)
return -EINVAL;
/* Don't allocate memory. Set pointers to data within
@@ -96,7 +101,9 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
*/
params->key = (void *)ptr;
params->p = (void *)(ptr + params->key_size);
- params->g = (void *)(ptr + params->key_size + params->p_size);
+ params->q = (void *)(ptr + params->key_size + params->p_size);
+ params->g = (void *)(ptr + params->key_size + params->p_size +
+ params->q_size);
/*
* Don't permit 'p' to be 0. It's not a prime number, and it's subject
@@ -106,6 +113,10 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
if (memchr_inv(params->p, 0, params->p_size) == NULL)
return -EINVAL;
+ /* It is permissible to not provide Q. */
+ if (params->q_size == 0)
+ params->q = NULL;
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 466a112a4446..bc52d9562611 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -261,8 +261,7 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inbuflen,
u8 *outbuf, u32 outlen);
-#define DRBG_CTR_NULL_LEN 128
-#define DRBG_OUTSCRATCHLEN DRBG_CTR_NULL_LEN
+#define DRBG_OUTSCRATCHLEN 256
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg,
@@ -555,8 +554,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
}
/* 10.2.1.5.2 step 4.1 */
- ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN,
- buf, len);
+ ret = drbg_kcapi_sym_ctr(drbg, NULL, 0, buf, len);
if (ret)
return ret;
@@ -1644,9 +1642,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
skcipher_request_free(drbg->ctr_req);
drbg->ctr_req = NULL;
- kfree(drbg->ctr_null_value_buf);
- drbg->ctr_null_value = NULL;
-
kfree(drbg->outscratchpadbuf);
drbg->outscratchpadbuf = NULL;
@@ -1697,15 +1692,6 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
- drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
- GFP_KERNEL);
- if (!drbg->ctr_null_value_buf) {
- drbg_fini_sym_kernel(drbg);
- return -ENOMEM;
- }
- drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
- alignmask + 1);
-
drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask,
GFP_KERNEL);
if (!drbg->outscratchpadbuf) {
@@ -1715,6 +1701,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf,
alignmask + 1);
+ sg_init_table(&drbg->sg_in, 1);
+ sg_init_one(&drbg->sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
+
return alignmask;
}
@@ -1743,17 +1732,25 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
{
- struct scatterlist sg_in, sg_out;
+ struct scatterlist *sg_in = &drbg->sg_in, *sg_out = &drbg->sg_out;
+ u32 scratchpad_use = min_t(u32, outlen, DRBG_OUTSCRATCHLEN);
int ret;
- sg_init_one(&sg_in, inbuf, inlen);
- sg_init_one(&sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
+ if (inbuf) {
+ /* Use caller-provided input buffer */
+ sg_set_buf(sg_in, inbuf, inlen);
+ } else {
+ /* Use scratchpad for in-place operation */
+ inlen = scratchpad_use;
+ memset(drbg->outscratchpad, 0, scratchpad_use);
+ sg_set_buf(sg_in, drbg->outscratchpad, scratchpad_use);
+ }
while (outlen) {
u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
/* Output buffer may not be valid for SGL, use scratchpad */
- skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
+ skcipher_request_set_crypt(drbg->ctr_req, sg_in, sg_out,
cryptlen, drbg->V);
ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
&drbg->ctr_wait);
@@ -1763,6 +1760,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
+ memzero_explicit(drbg->outscratchpad, cryptlen);
outlen -= cryptlen;
outbuf += cryptlen;
@@ -1770,7 +1768,6 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
ret = 0;
out:
- memzero_explicit(drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
return ret;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 815541309a95..8facafd67802 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1019,6 +1019,36 @@ out:
return ret;
}
+/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
+static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
+
+ /* Check 1: Verify key is not the zero point. */
+ if (ecc_point_is_zero(pk))
+ return -EINVAL;
+
+ /* Check 2: Verify key is in the range [1, p-1]. */
+ if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
+ return -EINVAL;
+ if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
+ return -EINVAL;
+
+ /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
+ vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */
+ vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */
+ vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */
+ vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */
+ vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
+ vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
+ if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */
+ return -EINVAL;
+
+ return 0;
+
+}
+
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
u64 *secret)
@@ -1046,16 +1076,20 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto out;
}
+ ecc_swap_digits(public_key, pk->x, ndigits);
+ ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
+ ret = ecc_is_pubkey_valid_partial(curve, pk);
+ if (ret)
+ goto err_alloc_product;
+
+ ecc_swap_digits(private_key, priv, ndigits);
+
product = ecc_alloc_point(ndigits);
if (!product) {
ret = -ENOMEM;
goto err_alloc_product;
}
- ecc_swap_digits(public_key, pk->x, ndigits);
- ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
- ecc_swap_digits(private_key, priv, ndigits);
-
ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
ecc_swap_digits(product->x, secret, ndigits);
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index b80f45da829c..336ab1805639 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -13,9 +13,11 @@ struct ecc_curve {
struct ecc_point g;
u64 *p;
u64 *n;
+ u64 *a;
+ u64 *b;
};
-/* NIST P-192 */
+/* NIST P-192: a = p - 3 */
static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
0x188DA80EB03090F6ull };
static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
@@ -24,6 +26,10 @@ static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull,
0xFFFFFFFFFFFFFFFFull };
static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p192_a[] = { 0xFFFFFFFFFFFFFFFCull, 0xFFFFFFFFFFFFFFFEull,
+ 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull,
+ 0x64210519E59C80E7ull };
static struct ecc_curve nist_p192 = {
.name = "nist_192",
.g = {
@@ -32,10 +38,12 @@ static struct ecc_curve nist_p192 = {
.ndigits = 3,
},
.p = nist_p192_p,
- .n = nist_p192_n
+ .n = nist_p192_n,
+ .a = nist_p192_a,
+ .b = nist_p192_b
};
-/* NIST P-256 */
+/* NIST P-256: a = p - 3 */
static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
@@ -44,6 +52,10 @@ static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull,
0x0000000000000000ull, 0xFFFFFFFF00000001ull };
static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
+static u64 nist_p256_a[] = { 0xFFFFFFFFFFFFFFFCull, 0x00000000FFFFFFFFull,
+ 0x0000000000000000ull, 0xFFFFFFFF00000001ull };
+static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull,
+ 0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull };
static struct ecc_curve nist_p256 = {
.name = "nist_256",
.g = {
@@ -52,7 +64,9 @@ static struct ecc_curve nist_p256 = {
.ndigits = 4,
},
.p = nist_p256_p,
- .n = nist_p256_n
+ .n = nist_p256_n,
+ .a = nist_p256_a,
+ .b = nist_p256_b
};
#endif
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 1bffb3f712dd..d9f192b953b2 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -132,7 +132,6 @@ static struct shash_alg ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 954a7064a179..393a782679c7 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -188,7 +188,7 @@ static int post_crypt(struct skcipher_request *req)
if (rctx->dst != sg) {
rctx->dst[0] = *sg;
sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
}
rctx->dst[0].length -= offset - sg->offset;
rctx->dst[0].offset = offset;
@@ -265,7 +265,7 @@ static int pre_crypt(struct skcipher_request *req)
if (rctx->src != sg) {
rctx->src[0] = *sg;
sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
}
rctx->src[0].length -= offset - sg->offset;
rctx->src[0].offset = offset;
diff --git a/crypto/md4.c b/crypto/md4.c
index 810fefb0a007..9965ec40d9f9 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -217,7 +217,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct md4_ctx),
.base = {
.cra_name = "md4",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/md5.c b/crypto/md5.c
index f776ef43d621..94dd78144ba3 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -229,7 +229,6 @@ static struct shash_alg alg = {
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 6180b2557836..d057cf5ac4a8 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -514,7 +514,6 @@ static struct aead_alg crypto_morus1280_alg = {
.chunksize = MORUS1280_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus1280_ctx),
.cra_alignmask = 0,
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 5eede3749e64..1ca76e54281b 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -511,7 +511,6 @@ static struct aead_alg crypto_morus640_alg = {
.chunksize = MORUS640_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus640_ctx),
.cra_alignmask = 0,
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index b7a3a0613a30..47d3a6b83931 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -279,7 +279,6 @@ static struct shash_alg poly1305_alg = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-generic",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 40e053b97b69..5f4472256e27 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -303,7 +303,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd128_ctx),
.base = {
.cra_name = "rmd128",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD128_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 5f3e6ea35268..737645344d1c 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -347,7 +347,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd160_ctx),
.base = {
.cra_name = "rmd160",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index f50c025cc962..0e9d30676a01 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -49,7 +49,7 @@ struct rmd256_ctx {
static void rmd256_transform(u32 *state, const __le32 *in)
{
- u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
+ u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
/* Initialize left lane */
aa = state[0];
@@ -100,7 +100,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
/* Swap contents of "a" registers */
- tmp = aa; aa = aaa; aaa = tmp;
+ swap(aa, aaa);
/* round 2: left lane */
ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
@@ -139,7 +139,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
/* Swap contents of "b" registers */
- tmp = bb; bb = bbb; bbb = tmp;
+ swap(bb, bbb);
/* round 3: left lane */
ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
@@ -178,7 +178,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
/* Swap contents of "c" registers */
- tmp = cc; cc = ccc; ccc = tmp;
+ swap(cc, ccc);
/* round 4: left lane */
ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
@@ -217,7 +217,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
/* Swap contents of "d" registers */
- tmp = dd; dd = ddd; ddd = tmp;
+ swap(dd, ddd);
/* combine results */
state[0] += aa;
@@ -322,7 +322,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd256_ctx),
.base = {
.cra_name = "rmd256",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index e1315e4869e8..3ae1df5bb48c 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -53,7 +53,7 @@ struct rmd320_ctx {
static void rmd320_transform(u32 *state, const __le32 *in)
{
- u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
+ u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
/* Initialize left lane */
aa = state[0];
@@ -106,7 +106,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
/* Swap contents of "a" registers */
- tmp = aa; aa = aaa; aaa = tmp;
+ swap(aa, aaa);
/* round 2: left lane" */
ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
@@ -145,7 +145,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
/* Swap contents of "b" registers */
- tmp = bb; bb = bbb; bbb = tmp;
+ swap(bb, bbb);
/* round 3: left lane" */
ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
@@ -184,7 +184,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
/* Swap contents of "c" registers */
- tmp = cc; cc = ccc; ccc = tmp;
+ swap(cc, ccc);
/* round 4: left lane" */
ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
@@ -223,7 +223,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
/* Swap contents of "d" registers */
- tmp = dd; dd = ddd; ddd = tmp;
+ swap(dd, ddd);
/* round 5: left lane" */
ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
@@ -262,7 +262,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
/* Swap contents of "e" registers */
- tmp = ee; ee = eee; eee = tmp;
+ swap(ee, eee);
/* combine results */
state[0] += aa;
@@ -371,7 +371,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd320_ctx),
.base = {
.cra_name = "rmd320",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD320_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index c16c94f88733..d0b92c1cd6e9 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -91,7 +91,7 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
sg_init_table(dst, 2);
sg_set_page(dst, sg_page(src), src->length - len, src->offset + len);
- scatterwalk_crypto_chain(dst, sg_next(src), 0, 2);
+ scatterwalk_crypto_chain(dst, sg_next(src), 2);
return dst;
}
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 6877cbb9105f..2af64ef81f40 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -76,7 +76,7 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 8f9c47e1a96e..1e5ba6649e8d 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -271,7 +271,7 @@ static struct shash_alg sha256_algs[2] = { {
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -285,7 +285,7 @@ static struct shash_alg sha256_algs[2] = { {
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7f6735d9003f..7ed98367d4fb 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -250,7 +250,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -261,7 +260,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -272,7 +270,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -283,7 +280,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index eba965d18bfc..4097cd555eb6 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -23,6 +23,28 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
+const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
+ 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38,
+ 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a,
+ 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43,
+ 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda,
+ 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb,
+ 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b
+};
+EXPORT_SYMBOL_GPL(sha384_zero_message_hash);
+
+const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = {
+ 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd,
+ 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07,
+ 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc,
+ 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce,
+ 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0,
+ 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f,
+ 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81,
+ 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e
+};
+EXPORT_SYMBOL_GPL(sha512_zero_message_hash);
+
static inline u64 Ch(u64 x, u64 y, u64 z)
{
return z ^ (x & (y ^ z));
@@ -171,7 +193,7 @@ static struct shash_alg sha512_algs[2] = { {
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -185,7 +207,7 @@ static struct shash_alg sha512_algs[2] = { {
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0fe2a2923ad0..0bd8c6caa498 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
@@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
addr = skcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize,
(walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
- return 0;
}
int skcipher_walk_done(struct skcipher_walk *walk, int err)
{
- unsigned int n = walk->nbytes - err;
- unsigned int nbytes;
-
- nbytes = walk->total - n;
-
- if (unlikely(err < 0)) {
- nbytes = 0;
- n = 0;
- } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
- SKCIPHER_WALK_SLOW |
- SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF)))) {
+ unsigned int n; /* bytes processed */
+ bool more;
+
+ if (unlikely(err < 0))
+ goto finish;
+
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+ SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
unmap_src:
skcipher_unmap_src(walk);
} else if (walk->flags & SKCIPHER_WALK_DIFF) {
@@ -131,28 +132,28 @@ unmap_src:
skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
err = -EINVAL;
- nbytes = 0;
- } else
- n = skcipher_done_slow(walk, n);
+ goto finish;
+ }
+ skcipher_done_slow(walk, n);
+ goto already_advanced;
}
- if (err > 0)
- err = 0;
-
- walk->total = nbytes;
- walk->nbytes = nbytes;
-
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
+already_advanced:
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
- if (nbytes) {
+ if (more) {
crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
CRYPTO_TFM_REQ_MAY_SLEEP : 0);
return skcipher_walk_next(walk);
}
+ err = 0;
+finish:
+ walk->nbytes = 0;
/* Short-circuit for the common/fast path. */
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
@@ -387,7 +388,6 @@ set_phys_lowmem:
}
return err;
}
-EXPORT_SYMBOL_GPL(skcipher_walk_next);
static int skcipher_copy_iv(struct skcipher_walk *walk)
{
@@ -399,7 +399,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
unsigned size;
u8 *iv;
- aligned_bs = ALIGN(bs, alignmask);
+ aligned_bs = ALIGN(bs, alignmask + 1);
/* Minimum size to align buffer by alignmask. */
size = alignmask & ~a;
@@ -437,7 +437,6 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
}
walk->page = NULL;
- walk->nbytes = walk->total;
return skcipher_walk_next(walk);
}
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9e823d99f095..9a5c60f08aad 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -184,7 +184,6 @@ static struct shash_alg sm3_alg = {
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d5bcdd905007..bdde95e8d369 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -415,12 +415,14 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
}
- if (secs)
+ if (secs) {
ret = test_mb_aead_jiffies(data, enc, *b_size,
secs, num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_aead_cycles(data, enc, *b_size,
num_mb);
+ }
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -660,11 +662,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
*b_size + (enc ? 0 : authsize),
iv);
- if (secs)
+ if (secs) {
ret = test_aead_jiffies(req, enc, *b_size,
secs);
- else
+ cond_resched();
+ } else {
ret = test_aead_cycles(req, enc, *b_size);
+ }
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -876,11 +880,13 @@ static void test_mb_ahash_speed(const char *algo, unsigned int secs,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
- if (secs)
+ if (secs) {
ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
+ }
if (ret) {
@@ -1103,12 +1109,14 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
ahash_request_set_crypt(req, sg, output, speed[i].plen);
- if (secs)
+ if (secs) {
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, secs);
- else
+ cond_resched();
+ } else {
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
+ }
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
@@ -1367,13 +1375,15 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
iv);
}
- if (secs)
+ if (secs) {
ret = test_mb_acipher_jiffies(data, enc,
*b_size, secs,
num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_acipher_cycles(data, enc,
*b_size, num_mb);
+ }
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@@ -1581,12 +1591,14 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
- if (secs)
+ if (secs) {
ret = test_acipher_jiffies(req, enc,
*b_size, secs);
- else
+ cond_resched();
+ } else {
ret = test_acipher_cycles(req, enc,
*b_size);
+ }
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@@ -1939,7 +1951,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
break;
case 109:
- ret += tcrypt_test("vmac(aes)");
+ ret += tcrypt_test("vmac64(aes)");
break;
case 111:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 11e45352fd0b..a1d42245082a 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -259,9 +259,15 @@ out_nostate:
return ret;
}
+enum hash_test {
+ HASH_TEST_DIGEST,
+ HASH_TEST_FINAL,
+ HASH_TEST_FINUP
+};
+
static int __test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template, unsigned int tcount,
- bool use_digest, const int align_offset)
+ enum hash_test test_type, const int align_offset)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
size_t digest_size = crypto_ahash_digestsize(tfm);
@@ -332,14 +338,17 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- if (use_digest) {
+ switch (test_type) {
+ case HASH_TEST_DIGEST:
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- } else {
+ break;
+
+ case HASH_TEST_FINAL:
memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
@@ -371,6 +380,29 @@ static int __test_hash(struct crypto_ahash *tfm,
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ break;
+
+ case HASH_TEST_FINUP:
+ memset(result, 1, digest_size);
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
+ ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: final failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ break;
}
if (memcmp(result, template[i].digest,
@@ -383,6 +415,9 @@ static int __test_hash(struct crypto_ahash *tfm,
}
}
+ if (test_type)
+ goto out;
+
j = 0;
for (i = 0; i < tcount; i++) {
/* alignment tests are only done with continuous buffers */
@@ -540,24 +575,24 @@ out_nobuf:
static int test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template,
- unsigned int tcount, bool use_digest)
+ unsigned int tcount, enum hash_test test_type)
{
unsigned int alignmask;
int ret;
- ret = __test_hash(tfm, template, tcount, use_digest, 0);
+ ret = __test_hash(tfm, template, tcount, test_type, 0);
if (ret)
return ret;
/* test unaligned buffers, check with one byte offset */
- ret = __test_hash(tfm, template, tcount, use_digest, 1);
+ ret = __test_hash(tfm, template, tcount, test_type, 1);
if (ret)
return ret;
alignmask = crypto_tfm_alg_alignmask(&tfm->base);
if (alignmask) {
/* Check if alignment mask for tfm is correctly set. */
- ret = __test_hash(tfm, template, tcount, use_digest,
+ ret = __test_hash(tfm, template, tcount, test_type,
alignmask + 1);
if (ret)
return ret;
@@ -1803,9 +1838,11 @@ static int __alg_test_hash(const struct hash_testvec *template,
return PTR_ERR(tfm);
}
- err = test_hash(tfm, template, tcount, true);
+ err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST);
+ if (!err)
+ err = test_hash(tfm, template, tcount, HASH_TEST_FINAL);
if (!err)
- err = test_hash(tfm, template, tcount, false);
+ err = test_hash(tfm, template, tcount, HASH_TEST_FINUP);
crypto_free_ahash(tfm);
return err;
}
@@ -3478,10 +3515,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(tgr192_tv_template)
}
}, {
- .alg = "vmac(aes)",
+ .alg = "vmac64(aes)",
.test = alg_test_hash,
.suite = {
- .hash = __VECS(aes_vmac128_tv_template)
+ .hash = __VECS(vmac64_aes_tv_template)
}
}, {
.alg = "wp256",
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index b950aa234e43..173111c70746 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -641,15 +641,17 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
- "\x11\x02" /* len */
+ "\x15\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
- "\x02\x11" /* len */
+ "\x02\x15" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@@ -739,7 +741,7 @@ static const struct kpp_testvec dh_tv_template[] = {
"\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11"
"\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50"
"\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17",
- .secret_size = 529,
+ .secret_size = 533,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
@@ -748,15 +750,17 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
- "\x11\x02" /* len */
+ "\x15\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
- "\x02\x11" /* len */
+ "\x02\x15" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@@ -846,7 +850,7 @@ static const struct kpp_testvec dh_tv_template[] = {
"\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a"
"\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee"
"\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd",
- .secret_size = 529,
+ .secret_size = 533,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
@@ -4603,105 +4607,158 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
- '\x02', '\x03', '\x02', '\x02',
- '\x02', '\x04', '\x01', '\x07',
- '\x04', '\x01', '\x04', '\x03',};
-static const char vmac_string2[128] = {'a', 'b', 'c',};
-static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- };
+static const char vmac64_string1[144] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02',
+ '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',
+};
-static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
- 'i', 'j', 'l', 'm',
- 'o', 'p', 'r', 's',
- 't', 'u', 'w', 'x', 'z'};
+static const char vmac64_string2[144] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'a', 'b', 'c',
+};
-static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
- 'o', 'l', 'k', ']', '%',
- '9', '2', '7', '!', 'A'};
+static const char vmac64_string3[144] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
+ 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
+ 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
+ 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
+ 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
+};
-static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
- 'i', '!', '#', 'w', '0',
- 'z', '/', '4', 'A', 'n'};
+static const char vmac64_string4[33] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm',
+ 'o', 'p', 'r', 's', 't', 'u', 'w', 'x',
+ 'z',
+};
-static const struct hash_testvec aes_vmac128_tv_template[] = {
- {
+static const char vmac64_string5[143] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k',
+ ']', '%', '9', '2', '7', '!', 'A',
+};
+
+static const char vmac64_string6[145] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'p', 't', '*', '7', 'l', 'i', '!', '#',
+ 'w', '0', 'z', '/', '4', 'A', 'n',
+};
+
+static const struct hash_testvec vmac64_aes_tv_template[] = {
+ { /* draft-krovetz-vmac-01 test vector 1 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghi",
+ .psize = 16,
+ .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b",
+ }, { /* draft-krovetz-vmac-01 test vector 2 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc",
+ .psize = 19,
+ .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5",
+ }, { /* draft-krovetz-vmac-01 test vector 3 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
+ .psize = 64,
+ .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98",
+ }, { /* draft-krovetz-vmac-01 test vector 4 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
+ .psize = 316,
+ .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
+ .tap = { 1, 100, 200, 15 },
+ .np = 4,
+ }, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = NULL,
- .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
- .psize = 0,
.ksize = 16,
+ .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .psize = 16,
+ .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07",
}, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string1,
- .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
- .psize = 128,
- .ksize = 16,
+ .ksize = 16,
+ .plaintext = vmac64_string1,
+ .psize = sizeof(vmac64_string1),
+ .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce",
}, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string2,
- .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
- .psize = 128,
- .ksize = 16,
+ .ksize = 16,
+ .plaintext = vmac64_string2,
+ .psize = sizeof(vmac64_string2),
+ .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9",
}, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string3,
- .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
- .psize = 128,
- .ksize = 16,
+ .ksize = 16,
+ .plaintext = vmac64_string3,
+ .psize = sizeof(vmac64_string3),
+ .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d",
}, {
.key = "abcdefghijklmnop",
- .plaintext = NULL,
- .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
- .psize = 0,
.ksize = 16,
+ .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .psize = 16,
+ .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b",
}, {
- .key = "abcdefghijklmnop",
- .plaintext = vmac_string1,
- .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
- .psize = 128,
- .ksize = 16,
- }, {
- .key = "abcdefghijklmnop",
- .plaintext = vmac_string2,
- .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
- .psize = 128,
- .ksize = 16,
- }, {
- .key = "abcdefghijklmnop",
- .plaintext = vmac_string3,
- .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
- .psize = 128,
- .ksize = 16,
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .plaintext = vmac_string4,
- .digest = "\xab\xa5\x0f\xea\x42\x4e\xa1\x5f",
- .psize = sizeof(vmac_string4),
- .ksize = 16,
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .plaintext = vmac_string5,
- .digest = "\x25\x31\x98\xbc\x1d\xe8\x67\x60",
- .psize = sizeof(vmac_string5),
- .ksize = 16,
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .plaintext = vmac_string6,
- .digest = "\xc4\xae\x9b\x47\x95\x65\xeb\x41",
- .psize = sizeof(vmac_string6),
- .ksize = 16,
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = vmac64_string1,
+ .psize = sizeof(vmac64_string1),
+ .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab",
+ }, {
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = vmac64_string2,
+ .psize = sizeof(vmac64_string2),
+ .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11",
+ }, {
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = vmac64_string3,
+ .psize = sizeof(vmac64_string3),
+ .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b",
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .ksize = 16,
+ .plaintext = vmac64_string4,
+ .psize = sizeof(vmac64_string4),
+ .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab",
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .ksize = 16,
+ .plaintext = vmac64_string5,
+ .psize = sizeof(vmac64_string5),
+ .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25",
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .ksize = 16,
+ .plaintext = vmac64_string6,
+ .psize = sizeof(vmac64_string6),
+ .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4",
},
};
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 321bc6ff2a9d..022d3dd76c3b 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -636,7 +636,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr192",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -648,7 +647,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr160",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -660,7 +658,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr128",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/vmac.c b/crypto/vmac.c
index df76a816cfb2..5f436dfdfc61 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -1,6 +1,10 @@
/*
- * Modified to interface to the Linux kernel
+ * VMAC: Message Authentication Code using Universal Hashing
+ *
+ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
+ *
* Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2018, Google Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -16,14 +20,15 @@
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
+/*
+ * Derived from:
+ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ * This implementation is herby placed in the public domain.
+ * The authors offers no warranty. Use at your own risk.
+ * Last modified: 17 APR 08, 1700 PDT
+ */
+#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
@@ -31,10 +36,42 @@
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
-#include <crypto/vmac.h>
#include <crypto/internal/hash.h>
/*
+ * User definable settings.
+ */
+#define VMAC_TAG_LEN 64
+#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
+#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
+#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+#define VMAC_NONCEBYTES 16
+
+/* per-transform (per-key) context */
+struct vmac_tfm_ctx {
+ struct crypto_cipher *cipher;
+ u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+ u64 polykey[2*VMAC_TAG_LEN/64];
+ u64 l3key[2*VMAC_TAG_LEN/64];
+};
+
+/* per-request context */
+struct vmac_desc_ctx {
+ union {
+ u8 partial[VMAC_NHBYTES]; /* partial block */
+ __le64 partial_words[VMAC_NHBYTES / 8];
+ };
+ unsigned int partial_size; /* size of the partial block */
+ bool first_block_processed;
+ u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
+ union {
+ u8 bytes[VMAC_NONCEBYTES];
+ __be64 pads[VMAC_NONCEBYTES / 8];
+ } nonce;
+ unsigned int nonce_size; /* nonce bytes filled so far */
+};
+
+/*
* Constants and masks
*/
#define UINT64_C(x) x##ULL
@@ -318,13 +355,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
} while (0)
#endif
-static void vhash_abort(struct vmac_ctx *ctx)
-{
- ctx->polytmp[0] = ctx->polykey[0] ;
- ctx->polytmp[1] = ctx->polykey[1] ;
- ctx->first_block_processed = 0;
-}
-
static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
@@ -364,280 +394,227 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
return rl;
}
-static void vhash_update(const unsigned char *m,
- unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
- struct vmac_ctx *ctx)
+/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
+static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
+ struct vmac_desc_ctx *dctx,
+ const __le64 *mptr, unsigned int blocks)
{
- u64 rh, rl, *mptr;
- const u64 *kptr = (u64 *)ctx->nhkey;
- int i;
- u64 ch, cl;
- u64 pkh = ctx->polykey[0];
- u64 pkl = ctx->polykey[1];
-
- if (!mbytes)
- return;
-
- BUG_ON(mbytes % VMAC_NHBYTES);
-
- mptr = (u64 *)m;
- i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
-
- ch = ctx->polytmp[0];
- cl = ctx->polytmp[1];
-
- if (!ctx->first_block_processed) {
- ctx->first_block_processed = 1;
+ const u64 *kptr = tctx->nhkey;
+ const u64 pkh = tctx->polykey[0];
+ const u64 pkl = tctx->polykey[1];
+ u64 ch = dctx->polytmp[0];
+ u64 cl = dctx->polytmp[1];
+ u64 rh, rl;
+
+ if (!dctx->first_block_processed) {
+ dctx->first_block_processed = true;
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
ADD128(ch, cl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
- i--;
+ blocks--;
}
- while (i--) {
+ while (blocks--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
- ctx->polytmp[0] = ch;
- ctx->polytmp[1] = cl;
+ dctx->polytmp[0] = ch;
+ dctx->polytmp[1] = cl;
}
-static u64 vhash(unsigned char m[], unsigned int mbytes,
- u64 *tagl, struct vmac_ctx *ctx)
+static int vmac_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
{
- u64 rh, rl, *mptr;
- const u64 *kptr = (u64 *)ctx->nhkey;
- int i, remaining;
- u64 ch, cl;
- u64 pkh = ctx->polykey[0];
- u64 pkl = ctx->polykey[1];
-
- mptr = (u64 *)m;
- i = mbytes / VMAC_NHBYTES;
- remaining = mbytes % VMAC_NHBYTES;
-
- if (ctx->first_block_processed) {
- ch = ctx->polytmp[0];
- cl = ctx->polytmp[1];
- } else if (i) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
- ch &= m62;
- ADD128(ch, cl, pkh, pkl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- i--;
- } else if (remaining) {
- nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
- ch &= m62;
- ADD128(ch, cl, pkh, pkl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- goto do_l3;
- } else {/* Empty String */
- ch = pkh; cl = pkl;
- goto do_l3;
- }
-
- while (i--) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- }
- if (remaining) {
- nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- }
-
-do_l3:
- vhash_abort(ctx);
- remaining *= 8;
- return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
-}
+ struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+ __be64 out[2];
+ u8 in[16] = { 0 };
+ unsigned int i;
+ int err;
-static u64 vmac(unsigned char m[], unsigned int mbytes,
- const unsigned char n[16], u64 *tagl,
- struct vmac_ctx_t *ctx)
-{
- u64 *in_n, *out_p;
- u64 p, h;
- int i;
-
- in_n = ctx->__vmac_ctx.cached_nonce;
- out_p = ctx->__vmac_ctx.cached_aes;
-
- i = n[15] & 1;
- if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
- in_n[0] = *(u64 *)(n);
- in_n[1] = *(u64 *)(n+8);
- ((unsigned char *)in_n)[15] &= 0xFE;
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out_p, (unsigned char *)in_n);
-
- ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
+ if (keylen != VMAC_KEY_LEN) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
}
- p = be64_to_cpup(out_p + i);
- h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
- return le64_to_cpu(p + h);
-}
-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
-{
- u64 in[2] = {0}, out[2];
- unsigned i;
- int err = 0;
-
- err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
+ err = crypto_cipher_setkey(tctx->cipher, key, keylen);
if (err)
return err;
/* Fill nh key */
- ((unsigned char *)in)[0] = 0x80;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
- ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
- ((unsigned char *)in)[15] += 1;
+ in[0] = 0x80;
+ for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->nhkey[i] = be64_to_cpu(out[0]);
+ tctx->nhkey[i+1] = be64_to_cpu(out[1]);
+ in[15]++;
}
/* Fill poly key */
- ((unsigned char *)in)[0] = 0xC0;
- in[1] = 0;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.polytmp[i] =
- ctx->__vmac_ctx.polykey[i] =
- be64_to_cpup(out) & mpoly;
- ctx->__vmac_ctx.polytmp[i+1] =
- ctx->__vmac_ctx.polykey[i+1] =
- be64_to_cpup(out+1) & mpoly;
- ((unsigned char *)in)[15] += 1;
+ in[0] = 0xC0;
+ in[15] = 0;
+ for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
+ tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
+ in[15]++;
}
/* Fill ip key */
- ((unsigned char *)in)[0] = 0xE0;
- in[1] = 0;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
+ in[0] = 0xE0;
+ in[15] = 0;
+ for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
do {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
- ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
- ((unsigned char *)in)[15] += 1;
- } while (ctx->__vmac_ctx.l3key[i] >= p64
- || ctx->__vmac_ctx.l3key[i+1] >= p64);
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->l3key[i] = be64_to_cpu(out[0]);
+ tctx->l3key[i+1] = be64_to_cpu(out[1]);
+ in[15]++;
+ } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
}
- /* Invalidate nonce/aes cache and reset other elements */
- ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
- ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
- ctx->__vmac_ctx.first_block_processed = 0;
-
- return err;
+ return 0;
}
-static int vmac_setkey(struct crypto_shash *parent,
- const u8 *key, unsigned int keylen)
+static int vmac_init(struct shash_desc *desc)
{
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- if (keylen != VMAC_KEY_LEN) {
- crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return vmac_set_key((u8 *)key, ctx);
-}
-
-static int vmac_init(struct shash_desc *pdesc)
-{
+ dctx->partial_size = 0;
+ dctx->first_block_processed = false;
+ memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
+ dctx->nonce_size = 0;
return 0;
}
-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
- unsigned int len)
+static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
- int expand;
- int min;
-
- expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
- VMAC_NHBYTES - ctx->partial_size : 0;
-
- min = len < expand ? len : expand;
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+ unsigned int n;
+
+ /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
+ if (dctx->nonce_size < VMAC_NONCEBYTES) {
+ n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
+ memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
+ dctx->nonce_size += n;
+ p += n;
+ len -= n;
+ }
- memcpy(ctx->partial + ctx->partial_size, p, min);
- ctx->partial_size += min;
+ if (dctx->partial_size) {
+ n = min(len, VMAC_NHBYTES - dctx->partial_size);
+ memcpy(&dctx->partial[dctx->partial_size], p, n);
+ dctx->partial_size += n;
+ p += n;
+ len -= n;
+ if (dctx->partial_size == VMAC_NHBYTES) {
+ vhash_blocks(tctx, dctx, dctx->partial_words, 1);
+ dctx->partial_size = 0;
+ }
+ }
- if (len < expand)
- return 0;
+ if (len >= VMAC_NHBYTES) {
+ n = round_down(len, VMAC_NHBYTES);
+ /* TODO: 'p' may be misaligned here */
+ vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
+ p += n;
+ len -= n;
+ }
- vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
- ctx->partial_size = 0;
+ if (len) {
+ memcpy(dctx->partial, p, len);
+ dctx->partial_size = len;
+ }
- len -= expand;
- p += expand;
+ return 0;
+}
- if (len % VMAC_NHBYTES) {
- memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
- len % VMAC_NHBYTES);
- ctx->partial_size = len % VMAC_NHBYTES;
+static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
+ struct vmac_desc_ctx *dctx)
+{
+ unsigned int partial = dctx->partial_size;
+ u64 ch = dctx->polytmp[0];
+ u64 cl = dctx->polytmp[1];
+
+ /* L1 and L2-hash the final block if needed */
+ if (partial) {
+ /* Zero-pad to next 128-bit boundary */
+ unsigned int n = round_up(partial, 16);
+ u64 rh, rl;
+
+ memset(&dctx->partial[partial], 0, n - partial);
+ nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
+ rh &= m62;
+ if (dctx->first_block_processed)
+ poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
+ rh, rl);
+ else
+ ADD128(ch, cl, rh, rl);
}
- vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
-
- return 0;
+ /* L3-hash the 128-bit output of L2-hash */
+ return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
}
-static int vmac_final(struct shash_desc *pdesc, u8 *out)
+static int vmac_final(struct shash_desc *desc, u8 *out)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
- vmac_t mac;
- u8 nonce[16] = {};
-
- /* vmac() ends up accessing outside the array bounds that
- * we specify. In appears to access up to the next 2-word
- * boundary. We'll just be uber cautious and zero the
- * unwritten bytes in the buffer.
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+ int index;
+ u64 hash, pad;
+
+ if (dctx->nonce_size != VMAC_NONCEBYTES)
+ return -EINVAL;
+
+ /*
+ * The VMAC specification requires a nonce at least 1 bit shorter than
+ * the block cipher's block length, so we actually only accept a 127-bit
+ * nonce. We define the unused bit to be the first one and require that
+ * it be 0, so the needed prepending of a 0 bit is implicit.
*/
- if (ctx->partial_size) {
- memset(ctx->partial + ctx->partial_size, 0,
- VMAC_NHBYTES - ctx->partial_size);
- }
- mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
- memcpy(out, &mac, sizeof(vmac_t));
- memzero_explicit(&mac, sizeof(vmac_t));
- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
- ctx->partial_size = 0;
+ if (dctx->nonce.bytes[0] & 0x80)
+ return -EINVAL;
+
+ /* Finish calculating the VHASH of the message */
+ hash = vhash_final(tctx, dctx);
+
+ /* Generate pseudorandom pad by encrypting the nonce */
+ BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
+ index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
+ dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
+ crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
+ dctx->nonce.bytes);
+ pad = be64_to_cpu(dctx->nonce.pads[index]);
+
+ /* The VMAC is the sum of VHASH and the pseudorandom pad */
+ put_unaligned_be64(hash + pad, out);
return 0;
}
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctx->child = cipher;
+ tctx->cipher = cipher;
return 0;
}
static void vmac_exit_tfm(struct crypto_tfm *tfm)
{
- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->child);
+ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(tctx->cipher);
}
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -655,7 +632,11 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(alg))
return PTR_ERR(alg);
- inst = shash_alloc_instance("vmac", alg);
+ err = -EINVAL;
+ if (alg->cra_blocksize != VMAC_NONCEBYTES)
+ goto out_put_alg;
+
+ inst = shash_alloc_instance(tmpl->name, alg);
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
@@ -670,11 +651,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
- inst->alg.digestsize = sizeof(vmac_t);
- inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
+ inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
inst->alg.base.cra_init = vmac_init_tfm;
inst->alg.base.cra_exit = vmac_exit_tfm;
+ inst->alg.descsize = sizeof(struct vmac_desc_ctx);
+ inst->alg.digestsize = VMAC_TAG_LEN / 8;
inst->alg.init = vmac_init;
inst->alg.update = vmac_update;
inst->alg.final = vmac_final;
@@ -691,8 +673,8 @@ out_put_alg:
return err;
}
-static struct crypto_template vmac_tmpl = {
- .name = "vmac",
+static struct crypto_template vmac64_tmpl = {
+ .name = "vmac64",
.create = vmac_create,
.free = shash_free_instance,
.module = THIS_MODULE,
@@ -700,12 +682,12 @@ static struct crypto_template vmac_tmpl = {
static int __init vmac_module_init(void)
{
- return crypto_register_template(&vmac_tmpl);
+ return crypto_register_template(&vmac64_tmpl);
}
static void __exit vmac_module_exit(void)
{
- crypto_unregister_template(&vmac_tmpl);
+ crypto_unregister_template(&vmac64_tmpl);
}
module_init(vmac_module_init);
@@ -713,4 +695,4 @@ module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
-MODULE_ALIAS_CRYPTO("vmac");
+MODULE_ALIAS_CRYPTO("vmac64");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 7ee5a043a988..149e577fb772 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1127,7 +1127,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp512",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1139,7 +1138,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp384",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1151,7 +1149,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp256",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/xts.c b/crypto/xts.c
index 12284183bd20..ccf55fbb8bc2 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -138,7 +138,7 @@ static int post_crypt(struct skcipher_request *req)
if (rctx->dst != sg) {
rctx->dst[0] = *sg;
sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
}
rctx->dst[0].length -= offset - sg->offset;
rctx->dst[0].offset = offset;
@@ -204,7 +204,7 @@ static int pre_crypt(struct skcipher_request *req)
if (rctx->src != sg) {
rctx->src[0] = *sg;
sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
}
rctx->src[0].length -= offset - sg->offset;
rctx->src[0].offset = offset;
diff --git a/drivers/Makefile b/drivers/Makefile
index 24cd47014657..a6abd7a856c6 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -76,7 +76,7 @@ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
-obj-$(CONFIG_SCSI) += scsi/
+obj-y += scsi/
obj-y += nvme/
obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b533eeb6139d..4a46344bf0e3 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -5,11 +5,10 @@
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
- depends on !IA64_HP_SIM
- depends on IA64 || X86 || ARM64
+ depends on ARCH_SUPPORTS_ACPI
depends on PCI
select PNP
- default y if (IA64 || X86)
+ default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
Linux requires an ACPI-compliant platform (hardware/firmware),
@@ -41,6 +40,9 @@ menuconfig ACPI
<http://www.acpi.info>
<http://www.uefi.org/acpi/specs>
+config ARCH_SUPPORTS_ACPI
+ bool
+
if ACPI
config ACPI_LEGACY_TABLES_LOOKUP
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 51c386bf230d..c5367bf5487f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -165,7 +165,6 @@ struct acpi_namespace_node {
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
-#define IMPLICIT_EXTERNAL 0x02 /* iASL only: This object created implicitly via External */
#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 220a718fbce9..83a593e2155d 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -613,13 +613,6 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Special handling for the last segment (num_segments == 0) */
else {
-#ifdef ACPI_ASL_COMPILER
- if (!acpi_gbl_disasm_flag
- && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
- this_node->flags &= ~IMPLICIT_EXTERNAL;
- }
-#endif
-
/*
* Sanity typecheck of the target object:
*
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index e9c9a63bb6a4..f594ab75a5fe 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -381,7 +381,6 @@ acpi_ns_search_and_enter(u32 target_name,
if (flags & ACPI_NS_EXTERNAL ||
(walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
- new_node->flags |= IMPLICIT_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 7a3a541046ed..08f26db2da7e 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -947,6 +947,24 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
return 0;
}
+static int rc_dma_get_range(struct device *dev, u64 *size)
+{
+ struct acpi_iort_node *node;
+ struct acpi_iort_root_complex *rc;
+
+ node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
+ iort_match_node_callback, dev);
+ if (!node || node->revision < 1)
+ return -ENODEV;
+
+ rc = (struct acpi_iort_root_complex *)node->node_data;
+
+ *size = rc->memory_address_limit >= 64 ? U64_MAX :
+ 1ULL<<rc->memory_address_limit;
+
+ return 0;
+}
+
/**
* iort_dma_setup() - Set-up device DMA parameters.
*
@@ -960,25 +978,28 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
int ret, msb;
/*
- * Set default coherent_dma_mask to 32 bit. Drivers are expected to
- * setup the correct supported mask.
+ * If @dev is expected to be DMA-capable then the bus code that created
+ * it should have initialised its dma_mask pointer by this point. For
+ * now, we'll continue the legacy behaviour of coercing it to the
+ * coherent mask if not, but we'll no longer do so quietly.
*/
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
-
- /*
- * Set it to coherent_dma_mask by default if the architecture
- * code has not set it.
- */
- if (!dev->dma_mask)
+ if (!dev->dma_mask) {
+ dev_warn(dev, "DMA mask not set\n");
dev->dma_mask = &dev->coherent_dma_mask;
+ }
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ if (dev->coherent_dma_mask)
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ else
+ size = 1ULL << 32;
- if (dev_is_pci(dev))
+ if (dev_is_pci(dev)) {
ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
- else
+ if (ret == -ENODEV)
+ ret = rc_dma_get_range(dev, &size);
+ } else {
ret = nc_dma_get_range(dev, &size);
+ }
if (!ret) {
msb = fls64(dmaaddr + size - 1);
@@ -993,6 +1014,7 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
* Limit coherent and dma mask based on size
* retrieved from firmware.
*/
+ dev->bus_dma_mask = mask;
dev->coherent_dma_mask = mask;
*dev->dma_mask = mask;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index d79ad844c78f..cb97b6105f52 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -23,18 +23,18 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/jiffies.h>
-#include <linux/async.h>
-#include <linux/dmi.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/types.h>
+
#include <asm/unaligned.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -364,6 +364,20 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
+static enum power_supply_property energy_battery_full_cap_broken_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
@@ -577,8 +591,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
(s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
- printk_once(KERN_WARNING FW_BUG
- "battery: (dis)charge rate invalid.\n");
+ pr_warn_once(FW_BUG "battery: (dis)charge rate invalid.\n");
}
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
@@ -799,6 +812,11 @@ static int sysfs_add_battery(struct acpi_battery *battery)
battery->bat_desc.properties = charge_battery_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(charge_battery_props);
+ } else if (battery->full_charge_capacity == 0) {
+ battery->bat_desc.properties =
+ energy_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_full_cap_broken_props);
} else {
battery->bat_desc.properties = energy_battery_props;
battery->bat_desc.num_properties =
@@ -918,10 +936,11 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
static int acpi_battery_update(struct acpi_battery *battery, bool resume)
{
- int result, old_present = acpi_battery_present(battery);
- result = acpi_battery_get_status(battery);
+ int result = acpi_battery_get_status(battery);
+
if (result)
return result;
+
if (!acpi_battery_present(battery)) {
sysfs_remove_battery(battery);
battery->update_time = 0;
@@ -931,8 +950,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
if (resume)
return 0;
- if (!battery->update_time ||
- old_present != acpi_battery_present(battery)) {
+ if (!battery->update_time) {
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1021,7 +1039,7 @@ static int acpi_battery_info_proc_show(struct seq_file *seq, void *offset)
acpi_battery_units(battery));
seq_printf(seq, "battery technology: %srechargeable\n",
- (!battery->technology)?"non-":"");
+ battery->technology ? "" : "non-");
if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "design voltage: unknown\n");
@@ -1112,11 +1130,12 @@ static int acpi_battery_alarm_proc_show(struct seq_file *seq, void *offset)
goto end;
}
seq_printf(seq, "alarm: ");
- if (!battery->alarm)
- seq_printf(seq, "unsupported\n");
- else
+ if (battery->alarm) {
seq_printf(seq, "%u %sh\n", battery->alarm,
acpi_battery_units(battery));
+ } else {
+ seq_printf(seq, "unsupported\n");
+ }
end:
if (result)
seq_printf(seq, "ERROR: Unable to read battery alarm\n");
@@ -1149,9 +1168,9 @@ static ssize_t acpi_battery_write_alarm(struct file *file,
}
result = acpi_battery_set_alarm(battery);
end:
- if (!result)
- return count;
- return result;
+ if (result)
+ return result;
+ return count;
}
static int acpi_battery_alarm_proc_open(struct inode *inode, struct file *file)
@@ -1170,8 +1189,7 @@ static const struct file_operations acpi_battery_alarm_fops = {
static int acpi_battery_add_fs(struct acpi_device *device)
{
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+ pr_warning(PREFIX "Deprecated procfs I/F for battery is loaded, please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
acpi_battery_dir);
@@ -1247,7 +1265,9 @@ static int battery_notify(struct notifier_block *nb,
if (!acpi_battery_present(battery))
return 0;
- if (!battery->bat) {
+ if (battery->bat) {
+ acpi_battery_refresh(battery);
+ } else {
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1255,8 +1275,7 @@ static int battery_notify(struct notifier_block *nb,
result = sysfs_add_battery(battery);
if (result)
return result;
- } else
- acpi_battery_refresh(battery);
+ }
acpi_battery_init_alarm(battery);
acpi_battery_get_state(battery);
@@ -1398,7 +1417,7 @@ static int acpi_battery_add(struct acpi_device *device)
}
#endif
- printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ pr_info(PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
device->status.battery_present ? "present" : "absent");
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 84b4a62018eb..292088fcc624 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -66,37 +66,10 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
return 0;
}
#endif
-static int set_gbl_term_list(const struct dmi_system_id *id)
-{
- acpi_gbl_execute_tables_as_methods = 1;
- return 0;
-}
-static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
- /*
- * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C
- * mode.
- * https://bugzilla.kernel.org/show_bug.cgi?id=198515
- */
- {
- .callback = set_gbl_term_list,
- .ident = "Dell Precision M5530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"),
- },
- },
- {
- .callback = set_gbl_term_list,
- .ident = "Dell XPS 15 9570",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
- },
- },
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
/*
* Invoke DSDT corruption work-around on all Toshiba Satellite.
- * DSDT will be copied to memory.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
*/
{
@@ -110,7 +83,7 @@ static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
{}
};
#else
-static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
{}
};
#endif
@@ -962,7 +935,7 @@ static int acpi_device_probe(struct device *dev)
return 0;
}
-static int acpi_device_remove(struct device * dev)
+static int acpi_device_remove(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = acpi_dev->driver;
@@ -1060,8 +1033,11 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
- /* Check machine-specific quirks */
- dmi_check_system(acpi_quirks_dmi_table);
+ /*
+ * If the machine falls into the DMI check table,
+ * DSDT will be copied to memory
+ */
+ dmi_check_system(dsdt_dmi_table);
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 2345a5ee2dbb..a19ff3977ac4 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) "ACPI: button: " fmt
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -235,9 +236,6 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
button->last_time = ktime_get();
}
- if (state)
- acpi_pm_wakeup_event(&device->dev);
-
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
if (ret == NOTIFY_DONE)
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
@@ -252,7 +250,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
return ret;
}
-static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused acpi_button_state_seq_show(struct seq_file *seq,
+ void *offset)
{
struct acpi_device *device = seq->private;
int state;
@@ -366,7 +365,8 @@ int acpi_lid_open(void)
}
EXPORT_SYMBOL(acpi_lid_open);
-static int acpi_lid_update_state(struct acpi_device *device)
+static int acpi_lid_update_state(struct acpi_device *device,
+ bool signal_wakeup)
{
int state;
@@ -374,6 +374,9 @@ static int acpi_lid_update_state(struct acpi_device *device)
if (state < 0)
return state;
+ if (state && signal_wakeup)
+ acpi_pm_wakeup_event(&device->dev);
+
return acpi_lid_notify_state(device, state);
}
@@ -384,7 +387,7 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
(void)acpi_lid_notify_state(device, 1);
break;
case ACPI_BUTTON_LID_INIT_METHOD:
- (void)acpi_lid_update_state(device);
+ (void)acpi_lid_update_state(device, false);
break;
case ACPI_BUTTON_LID_INIT_IGNORE:
default:
@@ -409,7 +412,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
users = button->input->users;
mutex_unlock(&button->input->mutex);
if (users)
- acpi_lid_update_state(device);
+ acpi_lid_update_state(device, true);
} else {
int keycode;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 917f77f4cb55..d4e5610e09c5 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2045,6 +2045,20 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
},
},
+ {
+ .ident = "ThinkPad X1 Carbon 6th",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
+ },
+ },
+ {
+ .ident = "ThinkPad X1 Yoga 3rd",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index 8a8f43568510..b2a16ed7e81a 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -66,6 +66,14 @@ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
* be removed if both new and old graphics cards are supported.
*/
{"Linux-Dell-Video", true},
+ /*
+ * Linux-Lenovo-NV-HDMI-Audio is used by BIOS to power on NVidia's HDMI
+ * audio device which is turned off for power-saving in Windows OS.
+ * This power management feature observed on some Lenovo Thinkpad
+ * systems which will not be able to output audio via HDMI without
+ * a BIOS workaround.
+ */
+ {"Linux-Lenovo-NV-HDMI-Audio", true},
};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 5815356ea6ad..693cf05b0cc4 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -542,6 +542,23 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
return 0;
}
+static struct fwnode_handle *
+acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
+{
+ struct fwnode_handle *child;
+
+ /*
+ * Find first matching named child node of this fwnode.
+ * For ACPI this will be a data only sub-node.
+ */
+ fwnode_for_each_child_node(fwnode, child)
+ if (acpi_data_node_match(child, childname))
+ return child;
+
+ return NULL;
+}
+
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
@@ -579,7 +596,7 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
*/
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *propname, size_t index, size_t num_args,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
@@ -607,7 +624,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret)
return ret == -ENODEV ? -EINVAL : ret;
- args->adev = device;
+ args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
}
@@ -633,6 +650,8 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
u32 nargs, i;
if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ struct fwnode_handle *ref_fwnode;
+
ret = acpi_bus_get_device(element->reference.handle,
&device);
if (ret)
@@ -641,6 +660,19 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
nargs = 0;
element++;
+ /*
+ * Find the referred data extension node under the
+ * referred device node.
+ */
+ for (ref_fwnode = acpi_fwnode_handle(device);
+ element < end && element->type == ACPI_TYPE_STRING;
+ element++) {
+ ref_fwnode = acpi_fwnode_get_named_child_node(
+ ref_fwnode, element->string.pointer);
+ if (!ref_fwnode)
+ return -EINVAL;
+ }
+
/* assume following integer elements are all args */
for (i = 0; element + i < end && i < num_args; i++) {
int type = element[i].type;
@@ -653,11 +685,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
}
- if (nargs > MAX_ACPI_REFERENCE_ARGS)
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL;
if (idx == index) {
- args->adev = device;
+ args->fwnode = ref_fwnode;
args->nargs = nargs;
for (i = 0; i < nargs; i++)
args->args[i] = element[i].integer.value;
@@ -995,16 +1027,36 @@ struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
return NULL;
}
+/*
+ * Return true if the node is an ACPI graph node. Called on either ports
+ * or endpoints.
+ */
+static bool is_acpi_graph_node(struct fwnode_handle *fwnode,
+ const char *str)
+{
+ unsigned int len = strlen(str);
+ const char *name;
+
+ if (!len || !is_acpi_data_node(fwnode))
+ return false;
+
+ name = to_acpi_data_node(fwnode)->name;
+
+ return (fwnode_property_present(fwnode, "reg") &&
+ !strncmp(name, str, len) && name[len] == '@') ||
+ fwnode_property_present(fwnode, str);
+}
+
/**
* acpi_graph_get_next_endpoint - Get next endpoint ACPI firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
*
* Looks up next endpoint ACPI firmware node below a given @fwnode. Returns
- * %NULL if there is no next endpoint, ERR_PTR() in case of error. In case
- * of success the next endpoint is returned.
+ * %NULL if there is no next endpoint or in case of error. In case of success
+ * the next endpoint is returned.
*/
-struct fwnode_handle *acpi_graph_get_next_endpoint(
+static struct fwnode_handle *acpi_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev)
{
struct fwnode_handle *port = NULL;
@@ -1013,8 +1065,14 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!prev) {
do {
port = fwnode_get_next_child_node(fwnode, port);
- /* Ports must have port property */
- if (fwnode_property_present(port, "port"))
+ /*
+ * The names of the port nodes begin with "port@"
+ * followed by the number of the port node and they also
+ * have a "reg" property that also has the number of the
+ * port node. For compatibility reasons a node is also
+ * recognised as a port node from the "port" property.
+ */
+ if (is_acpi_graph_node(port, "port"))
break;
} while (port);
} else {
@@ -1029,15 +1087,19 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(
port = fwnode_get_next_child_node(fwnode, port);
if (!port)
break;
- if (fwnode_property_present(port, "port"))
+ if (is_acpi_graph_node(port, "port"))
endpoint = fwnode_get_next_child_node(port, NULL);
}
- if (endpoint) {
- /* Endpoints must have "endpoint" property */
- if (!fwnode_property_present(endpoint, "endpoint"))
- return ERR_PTR(-EPROTO);
- }
+ /*
+ * The names of the endpoint nodes begin with "endpoint@" followed by
+ * the number of the endpoint node and they also have a "reg" property
+ * that also has the number of the endpoint node. For compatibility
+ * reasons a node is also recognised as an endpoint node from the
+ * "endpoint" property.
+ */
+ if (!is_acpi_graph_node(endpoint, "endpoint"))
+ return NULL;
return endpoint;
}
@@ -1074,65 +1136,42 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
/**
* acpi_graph_get_remote_enpoint - Parses and returns remote end of an endpoint
* @fwnode: Endpoint firmware node pointing to a remote device
- * @parent: Firmware node of remote port parent is filled here if not %NULL
- * @port: Firmware node of remote port is filled here if not %NULL
* @endpoint: Firmware node of remote endpoint is filled here if not %NULL
*
- * Function parses remote end of ACPI firmware remote endpoint and fills in
- * fields requested by the caller. Returns %0 in case of success and
- * negative errno otherwise.
+ * Returns the remote endpoint corresponding to @__fwnode. NULL on error.
*/
-int acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode,
- struct fwnode_handle **parent,
- struct fwnode_handle **port,
- struct fwnode_handle **endpoint)
+static struct fwnode_handle *
+acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
{
struct fwnode_handle *fwnode;
unsigned int port_nr, endpoint_nr;
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
int ret;
memset(&args, 0, sizeof(args));
ret = acpi_node_get_property_reference(__fwnode, "remote-endpoint", 0,
&args);
if (ret)
- return ret;
+ return NULL;
+
+ /* Direct endpoint reference? */
+ if (!is_acpi_device_node(args.fwnode))
+ return args.nargs ? NULL : args.fwnode;
/*
* Always require two arguments with the reference: port and
* endpoint indices.
*/
if (args.nargs != 2)
- return -EPROTO;
+ return NULL;
- fwnode = acpi_fwnode_handle(args.adev);
+ fwnode = args.fwnode;
port_nr = args.args[0];
endpoint_nr = args.args[1];
- if (parent)
- *parent = fwnode;
-
- if (!port && !endpoint)
- return 0;
-
fwnode = acpi_graph_get_child_prop_value(fwnode, "port", port_nr);
- if (!fwnode)
- return -EPROTO;
-
- if (port)
- *port = fwnode;
-
- if (!endpoint)
- return 0;
- fwnode = acpi_graph_get_child_prop_value(fwnode, "endpoint",
- endpoint_nr);
- if (!fwnode)
- return -EPROTO;
-
- *endpoint = fwnode;
-
- return 0;
+ return acpi_graph_get_child_prop_value(fwnode, "endpoint", endpoint_nr);
}
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
@@ -1186,70 +1225,14 @@ acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-static struct fwnode_handle *
-acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
- const char *childname)
-{
- struct fwnode_handle *child;
-
- /*
- * Find first matching named child node of this fwnode.
- * For ACPI this will be a data only sub-node.
- */
- fwnode_for_each_child_node(fwnode, child)
- if (acpi_data_node_match(child, childname))
- return child;
-
- return NULL;
-}
-
static int
acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int args_count, unsigned int index,
struct fwnode_reference_args *args)
{
- struct acpi_reference_args acpi_args;
- unsigned int i;
- int ret;
-
- ret = __acpi_node_get_property_reference(fwnode, prop, index,
- args_count, &acpi_args);
- if (ret < 0)
- return ret;
- if (!args)
- return 0;
-
- args->nargs = acpi_args.nargs;
- args->fwnode = acpi_fwnode_handle(acpi_args.adev);
-
- for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++)
- args->args[i] = i < acpi_args.nargs ? acpi_args.args[i] : 0;
-
- return 0;
-}
-
-static struct fwnode_handle *
-acpi_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle *prev)
-{
- struct fwnode_handle *endpoint;
-
- endpoint = acpi_graph_get_next_endpoint(fwnode, prev);
- if (IS_ERR(endpoint))
- return NULL;
-
- return endpoint;
-}
-
-static struct fwnode_handle *
-acpi_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
-{
- struct fwnode_handle *endpoint = NULL;
-
- acpi_graph_get_remote_endpoint(fwnode, NULL, NULL, &endpoint);
-
- return endpoint;
+ return __acpi_node_get_property_reference(fwnode, prop, index,
+ args_count, args);
}
static struct fwnode_handle *
@@ -1265,8 +1248,10 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
endpoint->local_fwnode = fwnode;
- fwnode_property_read_u32(port_fwnode, "port", &endpoint->port);
- fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
+ if (fwnode_property_read_u32(port_fwnode, "reg", &endpoint->port))
+ fwnode_property_read_u32(port_fwnode, "port", &endpoint->port);
+ if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id))
+ fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
return 0;
}
@@ -1292,9 +1277,9 @@ acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_reference_args = acpi_fwnode_get_reference_args, \
.graph_get_next_endpoint = \
- acpi_fwnode_graph_get_next_endpoint, \
+ acpi_graph_get_next_endpoint, \
.graph_get_remote_endpoint = \
- acpi_fwnode_graph_get_remote_endpoint, \
+ acpi_graph_get_remote_endpoint, \
.graph_get_port_parent = acpi_fwnode_get_parent, \
.graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \
}; \
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 970dd87d347c..e1b6231cfa1c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1528,7 +1528,7 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
static bool acpi_is_indirect_io_slave(struct acpi_device *device)
{
struct acpi_device *parent = device->parent;
- const struct acpi_device_id indirect_io_hosts[] = {
+ static const struct acpi_device_id indirect_io_hosts[] = {
{"HISI0191", 0},
{}
};
@@ -1540,6 +1540,18 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{
struct list_head resource_list;
bool is_serial_bus_slave = false;
+ /*
+ * These devices have multiple I2cSerialBus resources and an i2c-client
+ * must be instantiated for each, each with its own i2c_device_id.
+ * Normally we only instantiate an i2c-client for the first resource,
+ * using the ACPI HID as id. These special cases are handled by the
+ * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows
+ * which i2c_device_id to use for each resource.
+ */
+ static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
+ {"BSG1160", },
+ {}
+ };
if (acpi_is_indirect_io_slave(device))
return true;
@@ -1551,6 +1563,10 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
fwnode_property_present(&device->fwnode, "baud")))
return true;
+ /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */
+ if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids))
+ return false;
+
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list,
acpi_check_serial_bus_slave,
@@ -1612,7 +1628,8 @@ static int acpi_add_single_object(struct acpi_device **child,
* Note this must be done before the get power-/wakeup_dev-flags calls.
*/
if (type == ACPI_BUS_TYPE_DEVICE)
- acpi_bus_get_status(device);
+ if (acpi_bus_get_status(device) < 0)
+ acpi_set_device_status(device, 0);
acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
@@ -1690,7 +1707,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
* acpi_add_single_object updates this once we've an acpi_device
* so that acpi_bus_get_status' quirk handling can be used.
*/
- *sta = 0;
+ *sta = ACPI_STA_DEFAULT;
break;
case ACPI_TYPE_PROCESSOR:
*type = ACPI_BUS_TYPE_PROCESSOR;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 5d0486f1cfcd..754d59f95500 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -338,6 +338,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
},
},
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Asus 1025C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
+ },
+ },
/*
* https://bugzilla.kernel.org/show_bug.cgi?id=189431
* Lenovo G50-45 is a platform later than 2012, but needs nvs memory
@@ -718,9 +726,6 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
-#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
-#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
-
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
static char lps0_dsm_func_mask;
@@ -924,17 +929,14 @@ static int lps0_device_attach(struct acpi_device *adev,
if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
char bitmask = *(char *)out_obj->buffer.pointer;
- if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
- (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
- lps0_dsm_func_mask = bitmask;
- lps0_device_handle = adev->handle;
- /*
- * Use suspend-to-idle by default if the default
- * suspend mode was not set from the command line.
- */
- if (mem_sleep_default > PM_SUSPEND_MEM)
- mem_sleep_current = PM_SUSPEND_TO_IDLE;
- }
+ lps0_dsm_func_mask = bitmask;
+ lps0_device_handle = adev->handle;
+ /*
+ * Use suspend-to-idle by default if the default
+ * suspend mode was not set from the command line.
+ */
+ if (mem_sleep_default > PM_SUSPEND_MEM)
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
bitmask);
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index ec5b0f190231..06c31ec3cc70 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -62,14 +62,20 @@ static const struct always_present_id always_present_ids[] = {
*/
ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
/*
- * On the Dell Venue 11 Pro 7130 the DSDT hides the touchscreen ACPI
- * device until a certain time after _SB.PCI0.GFX0.LCD.LCD1._ON gets
- * called has passed *and* _STA has been called at least 3 times since.
+ * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides
+ * the touchscreen ACPI device until a certain time
+ * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed
+ * *and* _STA has been called at least 3 times since.
*/
ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
+ ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"),
+ }),
+
/*
* The GPD win BIOS dated 20170221 has disabled the accelerometer, the
* drivers sometimes cause crashes under Windows and this is how the
@@ -103,13 +109,9 @@ static const struct always_present_id always_present_ids[] = {
bool acpi_device_always_present(struct acpi_device *adev)
{
- u32 *status = (u32 *)&adev->status;
- u32 old_status = *status;
bool ret = false;
unsigned int i;
- /* acpi_match_device_ids checks status, so set it to default */
- *status = ACPI_STA_DEFAULT;
for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) {
if (acpi_match_device_ids(adev, always_present_ids[i].hid))
continue;
@@ -125,15 +127,9 @@ bool acpi_device_always_present(struct acpi_device *adev)
!dmi_check_system(always_present_ids[i].dmi_ids))
continue;
- if (old_status != ACPI_STA_DEFAULT) /* Log only once */
- dev_info(&adev->dev,
- "Device [%s] is in always present list\n",
- adev->pnp.bus_id);
-
ret = true;
break;
}
- *status = old_status;
return ret;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cc71c63df381..984b37647b2f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6424,6 +6424,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
host->n_tags = ATA_MAX_QUEUE;
host->dev = dev;
host->ops = ops;
+ kref_init(&host->kref);
}
void __ata_port_probe(struct ata_port *ap)
@@ -7391,3 +7392,5 @@ EXPORT_SYMBOL_GPL(ata_cable_80wire);
EXPORT_SYMBOL_GPL(ata_cable_unknown);
EXPORT_SYMBOL_GPL(ata_cable_ignore);
EXPORT_SYMBOL_GPL(ata_cable_sata);
+EXPORT_SYMBOL_GPL(ata_host_get);
+EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index aad1b01447de..8e270962b2f3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -597,8 +597,9 @@ static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
+ u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
u8 scsi_cmd[MAX_COMMAND_SIZE];
- u8 args[4], *argbuf = NULL, *sensebuf = NULL;
+ u8 args[4], *argbuf = NULL;
int argsize = 0;
enum dma_data_direction data_dir;
struct scsi_sense_hdr sshdr;
@@ -610,10 +611,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
- sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
- if (!sensebuf)
- return -ENOMEM;
-
+ memset(sensebuf, 0, sizeof(sensebuf));
memset(scsi_cmd, 0, sizeof(scsi_cmd));
if (args[3]) {
@@ -685,7 +683,6 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
&& copy_to_user(arg + sizeof(args), argbuf, argsize))
rc = -EFAULT;
error:
- kfree(sensebuf);
kfree(argbuf);
return rc;
}
@@ -704,8 +701,9 @@ error:
int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
{
int rc = 0;
+ u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
u8 scsi_cmd[MAX_COMMAND_SIZE];
- u8 args[7], *sensebuf = NULL;
+ u8 args[7];
struct scsi_sense_hdr sshdr;
int cmd_result;
@@ -715,10 +713,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
if (copy_from_user(args, arg, sizeof(args)))
return -EFAULT;
- sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
- if (!sensebuf)
- return -ENOMEM;
-
+ memset(sensebuf, 0, sizeof(sensebuf));
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
scsi_cmd[1] = (3 << 1); /* Non-data */
@@ -769,7 +764,6 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
}
error:
- kfree(sensebuf);
return rc;
}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 9e21c49cf6be..f953cb4bb1ba 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -100,8 +100,6 @@ extern int ata_port_probe(struct ata_port *ap);
extern void __ata_port_probe(struct ata_port *ap);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
-extern void ata_host_get(struct ata_host *host);
-extern void ata_host_put(struct ata_host *host);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index f6c46e9a4dc0..e8b6a2e464c9 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -25,7 +25,6 @@
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
-#include <linux/dma/pxa-dma.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/completion.h>
@@ -180,8 +179,6 @@ static int pxa_ata_probe(struct platform_device *pdev)
struct resource *irq_res;
struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
struct dma_slave_config config;
- dma_cap_mask_t mask;
- struct pxad_param param;
int ret = 0;
/*
@@ -278,10 +275,6 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->private_data = data;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- param.prio = PXAD_PRIO_LOWEST;
- param.drcmr = pdata->dma_dreq;
memset(&config, 0, sizeof(config));
config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
@@ -294,8 +287,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
* Request the DMA channel
*/
data->dma_chan =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param, &pdev->dev, "data");
+ dma_request_slave_channel(&pdev->dev, "data");
if (!data->dma_chan)
return -EBUSY;
ret = dmaengine_slave_config(data->dma_chan, &config);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 2c288d1f42bb..e89146ddede6 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1385,14 +1385,12 @@ static void zatm_close(struct atm_vcc *vcc)
static int zatm_open(struct atm_vcc *vcc)
{
- struct zatm_dev *zatm_dev;
struct zatm_vcc *zatm_vcc;
short vpi = vcc->vpi;
int vci = vcc->vci;
int error;
DPRINTK(">zatm_open\n");
- zatm_dev = ZATM_DEV(vcc->dev);
if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
vcc->dev_data = NULL;
if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index 296fb30dfa00..dea031484cc4 100644
--- a/drivers/auxdisplay/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
@@ -331,8 +331,7 @@ out_no_resource:
static int charlcd_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct charlcd *lcd = platform_get_drvdata(pdev);
+ struct charlcd *lcd = dev_get_drvdata(dev);
/* Power the display off */
charlcd_4bit_command(lcd, HD_DISPCTRL);
@@ -341,8 +340,7 @@ static int charlcd_suspend(struct device *dev)
static int charlcd_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct charlcd *lcd = platform_get_drvdata(pdev);
+ struct charlcd *lcd = dev_get_drvdata(dev);
/* Turn the display back on */
charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON);
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 8673fc2b9eb8..81c22d20d9d9 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -99,10 +99,7 @@ static atomic_t charlcd_available = ATOMIC_INIT(1);
/* sleeps that many milliseconds with a reschedule */
static void long_sleep(int ms)
{
- if (in_interrupt())
- mdelay(ms);
- else
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
+ schedule_timeout_interruptible(msecs_to_jiffies(ms));
}
/* turn the backlight on or off */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index df3e1a44707a..7fbd281cfd5d 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -178,10 +178,10 @@ void device_pm_move_to_tail(struct device *dev)
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
- * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
- * when the consumer device driver unbinds from it. The combination of both
- * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
- * to be returned.
+ * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed
+ * automatically when the consumer device driver unbinds from it.
+ * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS
+ * set is invalid and will cause NULL to be returned.
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
@@ -198,7 +198,8 @@ struct device_link *device_link_add(struct device *consumer,
struct device_link *link;
if (!consumer || !supplier ||
- ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
+ ((flags & DL_FLAG_STATELESS) &&
+ (flags & DL_FLAG_AUTOREMOVE_CONSUMER)))
return NULL;
device_links_write_lock();
@@ -372,6 +373,36 @@ void device_link_del(struct device_link *link)
}
EXPORT_SYMBOL_GPL(device_link_del);
+/**
+ * device_link_remove - remove a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_remove(void *consumer, struct device *supplier)
+{
+ struct device_link *link;
+
+ if (WARN_ON(consumer == supplier))
+ return;
+
+ device_links_write_lock();
+ device_pm_lock();
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node) {
+ if (link->consumer == consumer) {
+ kref_put(&link->kref, __device_link_del);
+ break;
+ }
+ }
+
+ device_pm_unlock();
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_remove);
+
static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
@@ -479,7 +510,7 @@ static void __device_links_no_driver(struct device *dev)
if (link->flags & DL_FLAG_STATELESS)
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE)
+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
kref_put(&link->kref, __device_link_del);
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
@@ -515,8 +546,18 @@ void device_links_driver_cleanup(struct device *dev)
if (link->flags & DL_FLAG_STATELESS)
continue;
- WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+
+ /*
+ * autoremove the links between this @dev and its consumer
+ * devices that are not active, i.e. where the link state
+ * has moved to DL_STATE_SUPPLIER_UNBIND.
+ */
+ if (link->status == DL_STATE_SUPPLIER_UNBIND &&
+ link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ kref_put(&link->kref, __device_link_del);
+
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
@@ -866,10 +907,19 @@ static const void *device_namespace(struct kobject *kobj)
return ns;
}
+static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (dev->class && dev->class->get_ownership)
+ dev->class->get_ownership(dev, uid, gid);
+}
+
static struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
.namespace = device_namespace,
+ .get_ownership = device_get_ownership,
};
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 30cc9c877ebb..eb9443d5bae1 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -540,16 +540,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
&dev_attr_spectre_v1.attr,
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
+ &dev_attr_l1tf.attr,
NULL
};
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 7f732744f0d3..202324291542 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -651,6 +651,8 @@ static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
{
+ int ret;
+
if (fw_fallback_config.ignore_sysfs_fallback) {
pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
return false;
@@ -659,6 +661,11 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
if ((opt_flags & FW_OPT_NOFALLBACK))
return false;
+ /* Also permit LSMs and IMA to fail firmware sysfs fallback */
+ ret = security_kernel_load_data(LOADING_FIRMWARE);
+ if (ret < 0)
+ return ret;
+
return fw_force_sysfs_fallback(opt_flags);
}
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index df41b4780b3b..b413951c6abc 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -153,6 +153,23 @@ struct device *dev_pm_domain_attach_by_id(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
/**
+ * dev_pm_domain_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * For a detailed function description, see dev_pm_domain_attach_by_id().
+ */
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ char *name)
+{
+ if (dev->pm_domain)
+ return ERR_PTR(-EEXIST);
+
+ return genpd_dev_pm_attach_by_name(dev, name);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9e8484189034..79bdca70a81a 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2374,6 +2374,30 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
+/**
+ * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * Parse device's OF node to find a PM domain specifier using the
+ * power-domain-names DT property. For further description see
+ * genpd_dev_pm_attach_by_id().
+ */
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
+{
+ int index;
+
+ if (!dev->of_node)
+ return NULL;
+
+ index = of_property_match_string(dev->of_node, "power-domain-names",
+ name);
+ if (index < 0)
+ return NULL;
+
+ return genpd_dev_pm_attach_by_id(dev, index);
+}
+
static const struct of_device_id idle_state_match[] = {
{ .compatible = "domain-idle-state", },
{ }
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index aff34c0c2a3e..6ad5ef48b61e 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -45,3 +45,7 @@ config REGMAP_IRQ
config REGMAP_SOUNDWIRE
tristate
depends on SOUNDWIRE_BUS
+
+config REGMAP_SCCB
+ tristate
+ depends on I2C
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 5ed0023fabda..f5b4e8851d00 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
+obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 53785e0e297a..a6bf34d6394e 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -94,10 +94,12 @@ struct regmap {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *rd_noinc_table;
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
@@ -181,6 +183,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg);
bool regmap_readable(struct regmap *map, unsigned int reg);
bool regmap_volatile(struct regmap *map, unsigned int reg);
bool regmap_precious(struct regmap *map, unsigned int reg);
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg);
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val);
diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c
new file mode 100644
index 000000000000..597042e2d009
--- /dev/null
+++ b/drivers/base/regmap/regmap-sccb.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+// Register map access API - SCCB support
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "internal.h"
+
+/**
+ * sccb_is_available - Check if the adapter supports SCCB protocol
+ * @adap: I2C adapter
+ *
+ * Return true if the I2C adapter is capable of using SCCB helper functions,
+ * false otherwise.
+ */
+static bool sccb_is_available(struct i2c_adapter *adap)
+{
+ u32 needed_funcs = I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+
+ /*
+ * If we ever want support for hardware doing SCCB natively, we will
+ * introduce a sccb_xfer() callback to struct i2c_algorithm and check
+ * for it here.
+ */
+
+ return (i2c_get_functionality(adap) & needed_funcs) == needed_funcs;
+}
+
+/**
+ * regmap_sccb_read - Read data from SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * This executes the 2-phase write transmission cycle that is followed by a
+ * 2-phase read transmission cycle, returning negative errno else zero on
+ * success.
+ */
+static int regmap_sccb_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+ union i2c_smbus_data data;
+
+ i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &data);
+ if (ret < 0)
+ goto out;
+
+ *val = data.byte;
+out:
+ i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+ return ret;
+}
+
+/**
+ * regmap_sccb_write - Write data to SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * This executes the SCCB 3-phase write transmission cycle, returning negative
+ * errno else zero on success.
+ */
+static int regmap_sccb_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_sccb_bus = {
+ .reg_write = regmap_sccb_write,
+ .reg_read = regmap_sccb_read,
+};
+
+static const struct regmap_bus *regmap_get_sccb_bus(struct i2c_client *i2c,
+ const struct regmap_config *config)
+{
+ if (config->val_bits == 8 && config->reg_bits == 8 &&
+ sccb_is_available(i2c->adapter))
+ return &regmap_sccb_bus;
+
+ return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sccb);
+
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 91d501eda8a9..0968059f1ef5 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -7,33 +7,24 @@
#include "internal.h"
-static int regmap_slimbus_byte_reg_read(void *context, unsigned int reg,
- unsigned int *val)
+static int regmap_slimbus_write(void *context, const void *data, size_t count)
{
struct slim_device *sdev = context;
- int v;
- v = slim_readb(sdev, reg);
-
- if (v < 0)
- return v;
-
- *val = v;
-
- return 0;
+ return slim_write(sdev, *(u16 *)data, count - 2, (u8 *)data + 2);
}
-static int regmap_slimbus_byte_reg_write(void *context, unsigned int reg,
- unsigned int val)
+static int regmap_slimbus_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
struct slim_device *sdev = context;
- return slim_writeb(sdev, reg, val);
+ return slim_read(sdev, *(u16 *)reg, val_size, val);
}
static struct regmap_bus regmap_slimbus_bus = {
- .reg_write = regmap_slimbus_byte_reg_write,
- .reg_read = regmap_slimbus_byte_reg_read,
+ .write = regmap_slimbus_write,
+ .read = regmap_slimbus_read,
.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 3bc84885eb91..0360a90ad6b6 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -168,6 +168,17 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return false;
}
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
+{
+ if (map->readable_noinc_reg)
+ return map->readable_noinc_reg(map->dev, reg);
+
+ if (map->rd_noinc_table)
+ return regmap_check_range_table(map, reg, map->rd_noinc_table);
+
+ return true;
+}
+
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
size_t num)
{
@@ -766,10 +777,12 @@ struct regmap *__regmap_init(struct device *dev,
map->rd_table = config->rd_table;
map->volatile_table = config->volatile_table;
map->precious_table = config->precious_table;
+ map->rd_noinc_table = config->rd_noinc_table;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
+ map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
spin_lock_init(&map->async_lock);
@@ -1285,6 +1298,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
+ map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
regmap_debugfs_init(map, config->name);
@@ -2564,7 +2578,70 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
EXPORT_SYMBOL_GPL(regmap_raw_read);
/**
- * regmap_field_read() - Read a value to a single register field
+ * regmap_noinc_read(): Read data from a register without incrementing the
+ * register number
+ *
+ * @map: Register map to read from
+ * @reg: Register to read from
+ * @val: Pointer to data buffer
+ * @val_len: Length of output buffer in bytes.
+ *
+ * The regmap API usually assumes that bulk bus read operations will read a
+ * range of registers. Some devices have certain registers for which a read
+ * operation read will read from an internal FIFO.
+ *
+ * The target register must be volatile but registers after it can be
+ * completely unrelated cacheable registers.
+ *
+ * This will attempt multiple reads as required to read val_len bytes.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ size_t read_len;
+ int ret;
+
+ if (!map->bus)
+ return -EINVAL;
+ if (!map->bus->read)
+ return -ENOTSUPP;
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+ if (val_len == 0)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ while (val_len) {
+ if (map->max_raw_read && map->max_raw_read < val_len)
+ read_len = map->max_raw_read;
+ else
+ read_len = val_len;
+ ret = _regmap_raw_read(map, reg, val, read_len);
+ if (ret)
+ goto out_unlock;
+ val = ((u8 *)val) + read_len;
+ val_len -= read_len;
+ }
+
+out_unlock:
+ map->unlock(map->lock_arg);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_noinc_read);
+
+/**
+ * regmap_field_read(): Read a value to a single register field
*
* @field: Register field to read from
* @val: Pointer to store read value
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index cb0f1aad20b7..b9558ff20830 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -30,6 +30,7 @@ config BCMA_HOST_PCI
config BCMA_HOST_SOC
bool "Support for BCMA in a SoC"
+ depends on HAS_IOMEM
help
Host interface for a Broadcom AIX bus directly mapped into
the memory. This only works with the Broadcom SoCs from the
@@ -61,7 +62,7 @@ config BCMA_DRIVER_PCI_HOSTMODE
config BCMA_DRIVER_MIPS
bool "BCMA Broadcom MIPS core driver"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
help
Driver for the Broadcom MIPS core attached to Broadcom specific
Advanced Microcontroller Bus.
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f6518067aa7d..f99e5c883368 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -21,6 +21,7 @@
#define DAC960_DriverDate "21 Aug 2007"
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
@@ -6426,7 +6427,7 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
return true;
}
-static int dac960_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused dac960_proc_show(struct seq_file *m, void *v)
{
unsigned char *StatusMessage = "OK\n";
int ControllerNumber;
@@ -6446,14 +6447,16 @@ static int dac960_proc_show(struct seq_file *m, void *v)
return 0;
}
-static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused dac960_initial_status_proc_show(struct seq_file *m,
+ void *v)
{
DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
return 0;
}
-static int dac960_current_status_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused dac960_current_status_proc_show(struct seq_file *m,
+ void *v)
{
DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
unsigned char *StatusMessage =
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ad9b687a236a..d4913516823f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -74,12 +74,12 @@ config AMIGA_Z2RAM
config CDROM
tristate
+ select BLK_SCSI_REQUEST
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST
select CDROM
- select BLK_SCSI_REQUEST # only for the generic cdrom code
help
A standard SEGA Dreamcast comes with a modified CD ROM drive called a
"GD-ROM" by SEGA to signify it is capable of reading special disks
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index dc061158b403..8566b188368b 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -36,8 +36,11 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
-obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
obj-$(CONFIG_ZRAM) += zram/
+obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
+null_blk-objs := null_blk_main.o
+null_blk-$(CONFIG_BLK_DEV_ZONED) += null_blk_zoned.o
+
skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 096882e54095..136dc507d020 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1137,6 +1137,7 @@ noskb: if (buf)
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
+ /* fall through */
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 697f735b07a4..41060e9cedf2 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -284,8 +284,8 @@ freedev(struct aoedev *d)
e = t + d->ntargets;
for (; t < e && *t; t++)
freetgt(d, *t);
- if (d->bufpool)
- mempool_destroy(d->bufpool);
+
+ mempool_destroy(d->bufpool);
skbpoolfree(d);
minor_free(d->sysminor);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bb976598ee43..df8103dd40ac 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -254,20 +254,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, bool is_write,
+ unsigned int len, unsigned int off, unsigned int op,
sector_t sector)
{
void *mem;
int err = 0;
- if (is_write) {
+ if (op_is_write(op)) {
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (!is_write) {
+ if (!op_is_write(op)) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -296,7 +296,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
int err;
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(bio_op(bio)), sector);
+ bio_op(bio), sector);
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
@@ -310,15 +310,15 @@ io_error:
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
struct brd_device *brd = bdev->bd_disk->private_data;
int err;
if (PageTransHuge(page))
return -ENOTSUPP;
- err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
- page_endio(page, is_write, err);
+ err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
+ page_endio(page, op_is_write(op), err);
return err;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index bc4ed2ed40a2..e35a234b0a8f 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -55,12 +55,10 @@
# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
-# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
#else
# define __protected_by(x)
# define __protected_read_by(x)
# define __protected_write_by(x)
-# define __must_hold(x)
#endif
/* shared module parameters, defined in drbd_main.c */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a80809bd3057..ef8212a4b73e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2103,14 +2103,10 @@ static void drbd_destroy_mempools(void)
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
- if (drbd_ee_cache)
- kmem_cache_destroy(drbd_ee_cache);
- if (drbd_request_cache)
- kmem_cache_destroy(drbd_request_cache);
- if (drbd_bm_ext_cache)
- kmem_cache_destroy(drbd_bm_ext_cache);
- if (drbd_al_ext_cache)
- kmem_cache_destroy(drbd_al_ext_cache);
+ kmem_cache_destroy(drbd_ee_cache);
+ kmem_cache_destroy(drbd_request_cache);
+ kmem_cache_destroy(drbd_bm_ext_cache);
+ kmem_cache_destroy(drbd_al_ext_cache);
drbd_ee_cache = NULL;
drbd_request_cache = NULL;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index be9450f5ad1c..75f6b47169e6 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2674,8 +2674,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
if (c_min_rate == 0)
return false;
- curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]) -
+ curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
atomic_read(&device->rs_sect_ev);
if (atomic_read(&device->ap_actlog_cnt)
@@ -2790,6 +2789,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
then we would do something smarter here than reading
the block... */
peer_req->flags |= EE_RS_THIN_REQ;
+ /* fall through */
case P_RS_DATA_REQUEST:
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
@@ -2968,6 +2968,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
/* Else fall through to one of the other strategies... */
drbd_warn(device, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n");
+ /* fall through */
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
@@ -2979,6 +2980,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
}
if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
+ /* else: fall through */
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index d146fedc38bb..19cac36e9737 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -38,7 +38,7 @@ static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request
{
struct request_queue *q = device->rq_queue;
- generic_start_io_acct(q, bio_data_dir(req->master_bio),
+ generic_start_io_acct(q, bio_op(req->master_bio),
req->i.size >> 9, &device->vdisk->part0);
}
@@ -47,7 +47,7 @@ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *r
{
struct request_queue *q = device->rq_queue;
- generic_end_io_acct(q, bio_data_dir(req->master_bio),
+ generic_end_io_acct(q, bio_op(req->master_bio),
&device->vdisk->part0, req->start_jif);
}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 5e793dd7adfb..b8f77e83d456 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1690,9 +1690,7 @@ void drbd_rs_controller_reset(struct drbd_device *device)
atomic_set(&device->rs_sect_in, 0);
atomic_set(&device->rs_sect_ev, 0);
device->rs_in_flight = 0;
- device->rs_last_events =
- (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]);
+ device->rs_last_events = (int)part_stat_read_accum(&disk->part0, sectors);
/* Updating the RCU protected object in place is necessary since
this function gets called from atomic context.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8871b5044d9e..48f622728ce6 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1461,7 +1461,6 @@ static void setup_rw_floppy(void)
int i;
int r;
int flags;
- int dflags;
unsigned long ready_date;
void (*function)(void);
@@ -1485,8 +1484,6 @@ static void setup_rw_floppy(void)
if (fd_wait_for_completion(ready_date, function))
return;
}
- dflags = DRS->flags;
-
if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
setup_DMA();
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4cb1d1be3cfb..ea9debf59b22 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -690,7 +690,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
unsigned int arg)
{
struct file *file, *old_file;
- struct inode *inode;
int error;
error = -ENXIO;
@@ -711,7 +710,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (error)
goto out_putf;
- inode = file->f_mapping->host;
old_file = lo->lo_backing_file;
error = -EINVAL;
@@ -1611,6 +1609,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_GET_STATUS64:
case LOOP_SET_STATUS64:
arg = (unsigned long) compat_ptr(arg);
+ /* fall through */
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
case LOOP_SET_BLOCK_SIZE:
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index c73626decb46..db253cd5b32a 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2575,8 +2575,7 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
static void mtip_hw_debugfs_exit(struct driver_data *dd)
{
- if (dd->dfs_node)
- debugfs_remove_recursive(dd->dfs_node);
+ debugfs_remove_recursive(dd->dfs_node);
}
/*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3fb95c8d9fd8..3863c00372bb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1633,7 +1633,7 @@ static int find_free_cb(int id, void *ptr, void *data)
}
/* Netlink interface. */
-static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
+static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
[NBD_ATTR_INDEX] = { .type = NLA_U32 },
[NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
[NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
@@ -1645,14 +1645,14 @@ static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
[NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
};
-static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
+static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
[NBD_SOCK_FD] = { .type = NLA_U32 },
};
/* We don't use this right now since we don't parse the incoming list, but we
* still want it here so userspace knows what to expect.
*/
-static struct nla_policy __attribute__((unused))
+static const struct nla_policy __attribute__((unused))
nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
[NBD_DEVICE_INDEX] = { .type = NLA_U32 },
[NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
new file mode 100644
index 000000000000..d81781f22dba
--- /dev/null
+++ b/drivers/block/null_blk.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BLK_NULL_BLK_H
+#define __BLK_NULL_BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/hrtimer.h>
+#include <linux/configfs.h>
+#include <linux/badblocks.h>
+#include <linux/fault-inject.h>
+
+struct nullb_cmd {
+ struct list_head list;
+ struct llist_node ll_list;
+ struct __call_single_data csd;
+ struct request *rq;
+ struct bio *bio;
+ unsigned int tag;
+ blk_status_t error;
+ struct nullb_queue *nq;
+ struct hrtimer timer;
+};
+
+struct nullb_queue {
+ unsigned long *tag_map;
+ wait_queue_head_t wait;
+ unsigned int queue_depth;
+ struct nullb_device *dev;
+ unsigned int requeue_selection;
+
+ struct nullb_cmd *cmds;
+};
+
+struct nullb_device {
+ struct nullb *nullb;
+ struct config_item item;
+ struct radix_tree_root data; /* data stored in the disk */
+ struct radix_tree_root cache; /* disk cache data */
+ unsigned long flags; /* device flags */
+ unsigned int curr_cache;
+ struct badblocks badblocks;
+
+ unsigned int nr_zones;
+ struct blk_zone *zones;
+ sector_t zone_size_sects;
+
+ unsigned long size; /* device size in MB */
+ unsigned long completion_nsec; /* time in ns to complete a request */
+ unsigned long cache_size; /* disk cache size in MB */
+ unsigned long zone_size; /* zone size in MB if device is zoned */
+ unsigned int submit_queues; /* number of submission queues */
+ unsigned int home_node; /* home node for the device */
+ unsigned int queue_mode; /* block interface */
+ unsigned int blocksize; /* block size */
+ unsigned int irqmode; /* IRQ completion handler */
+ unsigned int hw_queue_depth; /* queue depth */
+ unsigned int index; /* index of the disk, only valid with a disk */
+ unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
+ bool blocking; /* blocking blk-mq device */
+ bool use_per_node_hctx; /* use per-node allocation for hardware context */
+ bool power; /* power on/off the device */
+ bool memory_backed; /* if data is stored in memory */
+ bool discard; /* if support discard */
+ bool zoned; /* if device is zoned */
+};
+
+struct nullb {
+ struct nullb_device *dev;
+ struct list_head list;
+ unsigned int index;
+ struct request_queue *q;
+ struct gendisk *disk;
+ struct blk_mq_tag_set *tag_set;
+ struct blk_mq_tag_set __tag_set;
+ unsigned int queue_depth;
+ atomic_long_t cur_bytes;
+ struct hrtimer bw_timer;
+ unsigned long cache_flush_pos;
+ spinlock_t lock;
+
+ struct nullb_queue *queues;
+ unsigned int nr_queues;
+ char disk_name[DISK_NAME_LEN];
+};
+
+#ifdef CONFIG_BLK_DEV_ZONED
+int null_zone_init(struct nullb_device *dev);
+void null_zone_exit(struct nullb_device *dev);
+blk_status_t null_zone_report(struct nullb *nullb,
+ struct nullb_cmd *cmd);
+void null_zone_write(struct nullb_cmd *cmd);
+void null_zone_reset(struct nullb_cmd *cmd);
+#else
+static inline int null_zone_init(struct nullb_device *dev)
+{
+ return -EINVAL;
+}
+static inline void null_zone_exit(struct nullb_device *dev) {}
+static inline blk_status_t null_zone_report(struct nullb *nullb,
+ struct nullb_cmd *cmd)
+{
+ return BLK_STS_NOTSUPP;
+}
+static inline void null_zone_write(struct nullb_cmd *cmd) {}
+static inline void null_zone_reset(struct nullb_cmd *cmd) {}
+#endif /* CONFIG_BLK_DEV_ZONED */
+#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk_main.c
index 042c778e5a4e..6127e3ff7b4b 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk_main.c
@@ -7,14 +7,8 @@
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
-#include <linux/blkdev.h>
#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/blk-mq.h>
-#include <linux/hrtimer.h>
-#include <linux/configfs.h>
-#include <linux/badblocks.h>
-#include <linux/fault-inject.h>
+#include "null_blk.h"
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
@@ -35,28 +29,6 @@ static inline u64 mb_per_tick(int mbps)
return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
}
-struct nullb_cmd {
- struct list_head list;
- struct llist_node ll_list;
- struct __call_single_data csd;
- struct request *rq;
- struct bio *bio;
- unsigned int tag;
- blk_status_t error;
- struct nullb_queue *nq;
- struct hrtimer timer;
-};
-
-struct nullb_queue {
- unsigned long *tag_map;
- wait_queue_head_t wait;
- unsigned int queue_depth;
- struct nullb_device *dev;
- unsigned int requeue_selection;
-
- struct nullb_cmd *cmds;
-};
-
/*
* Status flags for nullb_device.
*
@@ -92,52 +64,6 @@ struct nullb_page {
#define NULLB_PAGE_LOCK (MAP_SZ - 1)
#define NULLB_PAGE_FREE (MAP_SZ - 2)
-struct nullb_device {
- struct nullb *nullb;
- struct config_item item;
- struct radix_tree_root data; /* data stored in the disk */
- struct radix_tree_root cache; /* disk cache data */
- unsigned long flags; /* device flags */
- unsigned int curr_cache;
- struct badblocks badblocks;
-
- unsigned long size; /* device size in MB */
- unsigned long completion_nsec; /* time in ns to complete a request */
- unsigned long cache_size; /* disk cache size in MB */
- unsigned int submit_queues; /* number of submission queues */
- unsigned int home_node; /* home node for the device */
- unsigned int queue_mode; /* block interface */
- unsigned int blocksize; /* block size */
- unsigned int irqmode; /* IRQ completion handler */
- unsigned int hw_queue_depth; /* queue depth */
- unsigned int index; /* index of the disk, only valid with a disk */
- unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
- bool blocking; /* blocking blk-mq device */
- bool use_per_node_hctx; /* use per-node allocation for hardware context */
- bool power; /* power on/off the device */
- bool memory_backed; /* if data is stored in memory */
- bool discard; /* if support discard */
-};
-
-struct nullb {
- struct nullb_device *dev;
- struct list_head list;
- unsigned int index;
- struct request_queue *q;
- struct gendisk *disk;
- struct blk_mq_tag_set *tag_set;
- struct blk_mq_tag_set __tag_set;
- unsigned int queue_depth;
- atomic_long_t cur_bytes;
- struct hrtimer bw_timer;
- unsigned long cache_flush_pos;
- spinlock_t lock;
-
- struct nullb_queue *queues;
- unsigned int nr_queues;
- char disk_name[DISK_NAME_LEN];
-};
-
static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
@@ -254,6 +180,14 @@ static bool g_use_per_node_hctx;
module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static bool g_zoned;
+module_param_named(zoned, g_zoned, bool, S_IRUGO);
+MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
+
+static unsigned long g_zone_size = 256;
+module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
+MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -357,6 +291,8 @@ NULLB_DEVICE_ATTR(memory_backed, bool);
NULLB_DEVICE_ATTR(discard, bool);
NULLB_DEVICE_ATTR(mbps, uint);
NULLB_DEVICE_ATTR(cache_size, ulong);
+NULLB_DEVICE_ATTR(zoned, bool);
+NULLB_DEVICE_ATTR(zone_size, ulong);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -390,6 +326,7 @@ static ssize_t nullb_device_power_store(struct config_item *item,
null_del_dev(dev->nullb);
mutex_unlock(&lock);
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
}
return count;
@@ -468,6 +405,8 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_mbps,
&nullb_device_attr_cache_size,
&nullb_device_attr_badblocks,
+ &nullb_device_attr_zoned,
+ &nullb_device_attr_zone_size,
NULL,
};
@@ -520,7 +459,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
+ return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -579,6 +518,8 @@ static struct nullb_device *null_alloc_dev(void)
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
dev->use_per_node_hctx = g_use_per_node_hctx;
+ dev->zoned = g_zoned;
+ dev->zone_size = g_zone_size;
return dev;
}
@@ -587,6 +528,7 @@ static void null_free_dev(struct nullb_device *dev)
if (!dev)
return;
+ null_zone_exit(dev);
badblocks_exit(&dev->badblocks);
kfree(dev);
}
@@ -862,7 +804,9 @@ static struct nullb_page *null_lookup_page(struct nullb *nullb,
}
static struct nullb_page *null_insert_page(struct nullb *nullb,
- sector_t sector, bool ignore_cache)
+ sector_t sector, bool ignore_cache)
+ __releases(&nullb->lock)
+ __acquires(&nullb->lock)
{
u64 idx;
struct nullb_page *t_page;
@@ -1219,6 +1163,11 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
struct nullb *nullb = dev->nullb;
int err = 0;
+ if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
+ cmd->error = null_zone_report(nullb, cmd);
+ goto out;
+ }
+
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
@@ -1283,6 +1232,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
}
}
cmd->error = errno_to_blk_status(err);
+
+ if (!cmd->error && dev->zoned) {
+ if (req_op(cmd->rq) == REQ_OP_WRITE)
+ null_zone_write(cmd);
+ else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
+ null_zone_reset(cmd);
+ }
out:
/* Complete IO by inline, softirq or timer */
switch (dev->irqmode) {
@@ -1810,6 +1766,15 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flush_queueable(nullb->q, true);
}
+ if (dev->zoned) {
+ rv = null_zone_init(dev);
+ if (rv)
+ goto out_cleanup_blk_queue;
+
+ blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
+ nullb->q->limits.zoned = BLK_ZONED_HM;
+ }
+
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
@@ -1828,13 +1793,16 @@ static int null_add_dev(struct nullb_device *dev)
rv = null_gendisk_register(nullb);
if (rv)
- goto out_cleanup_blk_queue;
+ goto out_cleanup_zone;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
return 0;
+out_cleanup_zone:
+ if (dev->zoned)
+ null_zone_exit(dev);
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
@@ -1861,6 +1829,11 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
+ if (!is_power_of_2(g_zone_size)) {
+ pr_err("null_blk: zone_size must be power-of-two\n");
+ return -EINVAL;
+ }
+
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
new file mode 100644
index 000000000000..a979ca00d7be
--- /dev/null
+++ b/drivers/block/null_blk_zoned.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/vmalloc.h>
+#include "null_blk.h"
+
+/* zone_size in MBs to sectors. */
+#define ZONE_SIZE_SHIFT 11
+
+static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
+{
+ return sect >> ilog2(dev->zone_size_sects);
+}
+
+int null_zone_init(struct nullb_device *dev)
+{
+ sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
+ sector_t sector = 0;
+ unsigned int i;
+
+ if (!is_power_of_2(dev->zone_size)) {
+ pr_err("null_blk: zone_size must be power-of-two\n");
+ return -EINVAL;
+ }
+
+ dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
+ dev->nr_zones = dev_size >>
+ (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
+ dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!dev->zones)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->nr_zones; i++) {
+ struct blk_zone *zone = &dev->zones[i];
+
+ zone->start = zone->wp = sector;
+ zone->len = dev->zone_size_sects;
+ zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ zone->cond = BLK_ZONE_COND_EMPTY;
+
+ sector += dev->zone_size_sects;
+ }
+
+ return 0;
+}
+
+void null_zone_exit(struct nullb_device *dev)
+{
+ kvfree(dev->zones);
+}
+
+static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
+ unsigned int zno, unsigned int nr_zones)
+{
+ struct blk_zone_report_hdr *hdr = NULL;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ void *addr;
+ unsigned int zones_to_cpy;
+
+ bio_for_each_segment(bvec, rq->bio, iter) {
+ addr = kmap_atomic(bvec.bv_page);
+
+ zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
+
+ if (!hdr) {
+ hdr = (struct blk_zone_report_hdr *)addr;
+ hdr->nr_zones = nr_zones;
+ zones_to_cpy--;
+ addr += sizeof(struct blk_zone_report_hdr);
+ }
+
+ zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
+
+ memcpy(addr, &dev->zones[zno],
+ zones_to_cpy * sizeof(struct blk_zone));
+
+ kunmap_atomic(addr);
+
+ nr_zones -= zones_to_cpy;
+ zno += zones_to_cpy;
+
+ if (!nr_zones)
+ break;
+ }
+}
+
+blk_status_t null_zone_report(struct nullb *nullb,
+ struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = nullb->dev;
+ struct request *rq = cmd->rq;
+ unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ unsigned int nr_zones = dev->nr_zones - zno;
+ unsigned int max_zones = (blk_rq_bytes(rq) /
+ sizeof(struct blk_zone)) - 1;
+
+ nr_zones = min_t(unsigned int, nr_zones, max_zones);
+
+ null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
+
+ return BLK_STS_OK;
+}
+
+void null_zone_write(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct request *rq = cmd->rq;
+ sector_t sector = blk_rq_pos(rq);
+ unsigned int rq_sectors = blk_rq_sectors(rq);
+ unsigned int zno = null_zone_no(dev, sector);
+ struct blk_zone *zone = &dev->zones[zno];
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_FULL:
+ /* Cannot write to a full zone */
+ cmd->error = BLK_STS_IOERR;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_IMP_OPEN:
+ /* Writes must be at the write pointer position */
+ if (blk_rq_pos(rq) != zone->wp) {
+ cmd->error = BLK_STS_IOERR;
+ break;
+ }
+
+ if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ zone->wp += rq_sectors;
+ if (zone->wp == zone->start + zone->len)
+ zone->cond = BLK_ZONE_COND_FULL;
+ break;
+ default:
+ /* Invalid zone condition */
+ cmd->error = BLK_STS_IOERR;
+ break;
+ }
+}
+
+void null_zone_reset(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct request *rq = cmd->rq;
+ unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ struct blk_zone *zone = &dev->zones[zno];
+
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+}
diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c
index 4f27e7392e38..f5f63ca2889d 100644
--- a/drivers/block/paride/bpck.c
+++ b/drivers/block/paride/bpck.c
@@ -347,7 +347,7 @@ static int bpck_test_proto( PIA *pi, char * scratch, int verbose )
static void bpck_read_eeprom ( PIA *pi, char * buf )
-{ int i,j,k,n,p,v,f, om, od;
+{ int i, j, k, p, v, f, om, od;
bpck_force_spp(pi);
@@ -356,7 +356,6 @@ static void bpck_read_eeprom ( PIA *pi, char * buf )
bpck_connect(pi);
- n = 0;
WR(4,0);
for (i=0;i<64;i++) {
WR(6,8);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 8961b190e256..7cf947586fe4 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -426,6 +426,7 @@ static void run_fsm(void)
pd_claimed = 1;
if (!pi_schedule_claimed(pi_current, run_fsm))
return;
+ /* fall through */
case 1:
pd_claimed = 2;
pi_current->proto->connect(pi_current);
@@ -445,6 +446,7 @@ static void run_fsm(void)
spin_unlock_irqrestore(&pd_lock, saved_flags);
if (stop)
return;
+ /* fall through */
case Hold:
schedule_fsm();
return;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b3f83cd96f33..e285413d4a75 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -67,7 +67,7 @@
#include <scsi/scsi.h>
#include <linux/debugfs.h>
#include <linux/device.h>
-
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#define DRIVER_NAME "pktcdvd"
@@ -748,13 +748,13 @@ static const char *sense_key_string(__u8 index)
static void pkt_dump_sense(struct pktcdvd_device *pd,
struct packet_command *cgc)
{
- struct request_sense *sense = cgc->sense;
+ struct scsi_sense_hdr *sshdr = cgc->sshdr;
- if (sense)
+ if (sshdr)
pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
CDROM_PACKET_SIZE, cgc->cmd,
- sense->sense_key, sense->asc, sense->ascq,
- sense_key_string(sense->sense_key));
+ sshdr->sense_key, sshdr->asc, sshdr->ascq,
+ sense_key_string(sshdr->sense_key));
else
pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
}
@@ -787,18 +787,19 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
unsigned write_speed, unsigned read_speed)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
int ret;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_SET_SPEED;
cgc.cmd[2] = (read_speed >> 8) & 0xff;
cgc.cmd[3] = read_speed & 0xff;
cgc.cmd[4] = (write_speed >> 8) & 0xff;
cgc.cmd[5] = write_speed & 0xff;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
pkt_dump_sense(pd, &cgc);
return ret;
@@ -1562,7 +1563,8 @@ static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
cgc.cmd[8] = cgc.buflen = 2;
cgc.quiet = 1;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
return ret;
/* not all drives have the same disc_info length, so requeue
@@ -1591,7 +1593,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type,
cgc.cmd[8] = 8;
cgc.quiet = 1;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
return ret;
cgc.buflen = be16_to_cpu(ti->track_information_length) +
@@ -1612,17 +1615,20 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
__u32 last_track;
int ret = -1;
- if ((ret = pkt_get_disc_info(pd, &di)))
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret)
return ret;
last_track = (di.last_track_msb << 8) | di.last_track_lsb;
- if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
return ret;
/* if this track is blank, try the previous. */
if (ti.blank) {
last_track--;
- if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+ ret = pkt_get_track_info(pd, last_track, 1, &ti);
+ if (ret)
return ret;
}
@@ -1645,7 +1651,7 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
write_param_page *wp;
char buffer[128];
int ret, size;
@@ -1656,8 +1662,9 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
memset(buffer, 0, sizeof(buffer));
init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
- cgc.sense = &sense;
- if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1671,8 +1678,9 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
* now get it all
*/
init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
- cgc.sense = &sense;
- if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+ cgc.sshdr = &sshdr;
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+ if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1714,7 +1722,8 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
cgc.buflen = cgc.cmd[8] = size;
- if ((ret = pkt_mode_select(pd, &cgc))) {
+ ret = pkt_mode_select(pd, &cgc);
+ if (ret) {
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1819,7 +1828,8 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
memset(&di, 0, sizeof(disc_information));
memset(&ti, 0, sizeof(track_information));
- if ((ret = pkt_get_disc_info(pd, &di))) {
+ ret = pkt_get_disc_info(pd, &di);
+ if (ret) {
pkt_err(pd, "failed get_disc\n");
return ret;
}
@@ -1830,7 +1840,8 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
- if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
+ ret = pkt_get_track_info(pd, track, 1, &ti);
+ if (ret) {
pkt_err(pd, "failed get_track\n");
return ret;
}
@@ -1905,12 +1916,12 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
int set)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
unsigned char buf[64];
int ret;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.buflen = pd->mode_offset + 12;
/*
@@ -1918,7 +1929,8 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
*/
cgc.quiet = 1;
- if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
+ if (ret)
return ret;
buf[pd->mode_offset + 10] |= (!!set << 2);
@@ -1950,14 +1962,14 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
unsigned *write_speed)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
unsigned char buf[256+18];
unsigned char *cap_buf;
int ret, offset;
cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
@@ -2011,13 +2023,13 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
unsigned *speed)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
unsigned char buf[64];
unsigned int size, st, sp;
int ret;
init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4; /* READ ATIP */
@@ -2032,7 +2044,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
size = sizeof(buf);
init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4;
@@ -2083,17 +2095,18 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
{
struct packet_command cgc;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
int ret;
pkt_dbg(2, pd, "Performing OPC\n");
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sense = &sense;
+ cgc.sshdr = &sshdr;
cgc.timeout = 60*HZ;
cgc.cmd[0] = GPCMD_SEND_OPC;
cgc.cmd[1] = 1;
- if ((ret = pkt_generic_packet(pd, &cgc)))
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret)
pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -2103,19 +2116,22 @@ static int pkt_open_write(struct pktcdvd_device *pd)
int ret;
unsigned int write_speed, media_write_speed, read_speed;
- if ((ret = pkt_probe_settings(pd))) {
+ ret = pkt_probe_settings(pd);
+ if (ret) {
pkt_dbg(2, pd, "failed probe\n");
return ret;
}
- if ((ret = pkt_set_write_settings(pd))) {
+ ret = pkt_set_write_settings(pd);
+ if (ret) {
pkt_dbg(1, pd, "failed saving write settings\n");
return -EIO;
}
pkt_write_caching(pd, USE_WCACHING);
- if ((ret = pkt_get_max_speed(pd, &write_speed)))
+ ret = pkt_get_max_speed(pd, &write_speed);
+ if (ret)
write_speed = 16 * 177;
switch (pd->mmc3_profile) {
case 0x13: /* DVD-RW */
@@ -2124,7 +2140,8 @@ static int pkt_open_write(struct pktcdvd_device *pd)
pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
break;
default:
- if ((ret = pkt_media_speed(pd, &media_write_speed)))
+ ret = pkt_media_speed(pd, &media_write_speed);
+ if (ret)
media_write_speed = 16;
write_speed = min(write_speed, media_write_speed * 177);
pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
@@ -2132,14 +2149,16 @@ static int pkt_open_write(struct pktcdvd_device *pd)
}
read_speed = write_speed;
- if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
+ ret = pkt_set_speed(pd, write_speed, read_speed);
+ if (ret) {
pkt_dbg(1, pd, "couldn't set write speed\n");
return -EIO;
}
pd->write_speed = write_speed;
pd->read_speed = read_speed;
- if ((ret = pkt_perform_opc(pd))) {
+ ret = pkt_perform_opc(pd);
+ if (ret) {
pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
}
@@ -2161,10 +2180,12 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
* so bdget() can't fail.
*/
bdget(pd->bdev->bd_dev);
- if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
+ ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd);
+ if (ret)
goto out;
- if ((ret = pkt_get_last_written(pd, &lba))) {
+ ret = pkt_get_last_written(pd, &lba);
+ if (ret) {
pkt_err(pd, "pkt_get_last_written failed\n");
goto out_putdev;
}
@@ -2175,7 +2196,8 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
q = bdev_get_queue(pd->bdev);
if (write) {
- if ((ret = pkt_open_write(pd)))
+ ret = pkt_open_write(pd);
+ if (ret)
goto out_putdev;
/*
* Some CDRW drives can not handle writes larger than one packet,
@@ -2190,7 +2212,8 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
clear_bit(PACKET_WRITABLE, &pd->flags);
}
- if ((ret = pkt_set_segment_merging(pd, q)))
+ ret = pkt_set_segment_merging(pd, q);
+ if (ret)
goto out_putdev;
if (write) {
@@ -2231,6 +2254,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
+
+ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
return pkt_devs[dev_minor];
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index fa0729c1e776..d81c653b9bf6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -61,7 +61,7 @@ static int atomic_inc_return_safe(atomic_t *v)
{
unsigned int counter;
- counter = (unsigned int)__atomic_add_unless(v, 1, 0);
+ counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
if (counter <= (unsigned int)INT_MAX)
return (int)counter;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index dddb3f2490b6..1a92f9e65937 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = {
static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
{
- generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio),
+ generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio),
&card->gendisk->part0);
}
@@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card,
struct bio *bio,
unsigned long start_time)
{
- generic_end_io_acct(card->queue, bio_data_dir(bio),
- &card->gendisk->part0, start_time);
+ generic_end_io_acct(card->queue, bio_op(bio),
+ &card->gendisk->part0, start_time);
}
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index bc7aea6d7b7c..87b9e7fbf062 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -657,8 +657,8 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
if (unlikely(skdev->dbg_level > 1)) {
dev_dbg(&skdev->pdev->dev,
- "skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ "skreq=%x sksg_list=%p sksg_dma=%pad\n",
+ skreq->id, skreq->sksg_list, &skreq->sksg_dma_address);
for (i = 0; i < n_sg; i++) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
@@ -1190,8 +1190,8 @@ static void skd_send_fitmsg(struct skd_device *skdev,
{
u64 qcmd;
- dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
- skmsg->mb_dma_address, skd_in_flight(skdev));
+ dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
+ &skmsg->mb_dma_address, skd_in_flight(skdev));
dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
qcmd = skmsg->mb_dma_address;
@@ -1250,9 +1250,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
}
dev_dbg(&skdev->pdev->dev,
- "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
+ "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n",
skspcl, skspcl->req.id, skspcl->req.sksg_list,
- skspcl->req.sksg_dma_address);
+ &skspcl->req.sksg_dma_address);
for (i = 0; i < skspcl->req.n_sg; i++) {
struct fit_sg_descriptor *sgd =
&skspcl->req.sksg_list[i];
@@ -2685,8 +2685,8 @@ static int skd_cons_skmsg(struct skd_device *skdev)
WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
(FIT_QCMD_ALIGN - 1),
- "not aligned: msg_buf %p mb_dma_address %#llx\n",
- skmsg->msg_buf, skmsg->mb_dma_address);
+ "not aligned: msg_buf %p mb_dma_address %pad\n",
+ skmsg->msg_buf, &skmsg->mb_dma_address);
memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b5cedccb5d7d..8986adab9bf5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -251,14 +251,9 @@ static DEFINE_SPINLOCK(minor_lock);
#define GRANTS_PER_INDIRECT_FRAME \
(XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
-#define PSEGS_PER_INDIRECT_FRAME \
- (GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)
-
#define INDIRECT_GREFS(_grants) \
DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
-#define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG)
-
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);
@@ -1441,7 +1436,7 @@ static bool blkif_completion(unsigned long *id,
/* Wait the second response if not yet here. */
if (s2->status == REQ_WAITING)
- return 0;
+ return false;
bret->status = blkif_get_final_status(s->status,
s2->status);
@@ -1542,7 +1537,7 @@ static bool blkif_completion(unsigned long *id,
}
}
- return 1;
+ return true;
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index a390c6d4f72d..c7acf74253a1 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1287,17 +1287,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
* Returns 1 if IO request was successfully submitted.
*/
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, bool is_write, struct bio *bio)
+ int offset, unsigned int op, struct bio *bio)
{
unsigned long start_time = jiffies;
- int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
struct request_queue *q = zram->disk->queue;
int ret;
- generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
- if (!is_write) {
+ if (!op_is_write(op)) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
flush_dcache_page(bvec->bv_page);
@@ -1306,14 +1305,14 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset, bio);
}
- generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
+ generic_end_io_acct(q, op, &zram->disk->part0, start_time);
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
if (unlikely(ret < 0)) {
- if (!is_write)
+ if (!op_is_write(op))
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
@@ -1351,7 +1350,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
unwritten);
if (zram_bvec_rw(zram, &bv, index, offset,
- op_is_write(bio_op(bio)), bio) < 0)
+ bio_op(bio), bio) < 0)
goto out;
bv.bv_offset += bv.bv_len;
@@ -1403,7 +1402,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
int offset, ret;
u32 index;
@@ -1427,7 +1426,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
+ ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
@@ -1442,7 +1441,7 @@ out:
switch (ret) {
case 0:
- page_endio(page, is_write, 0);
+ page_endio(page, op_is_write(op), 0);
break;
case 1:
ret = 0;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index f3c643a0473c..2df11cc08a46 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -159,6 +159,7 @@ config BT_HCIUART_LL
config BT_HCIUART_3WIRE
bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
help
The HCI Three-wire UART Transport Layer makes it possible to
user the Bluetooth HCI over a serial port interface. The HCI
@@ -194,6 +195,19 @@ config BT_HCIUART_BCM
Say Y here to compile support for Broadcom protocol.
+config BT_HCIUART_RTL
+ bool "Realtek protocol support"
+ depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
+ depends on GPIOLIB
+ select BT_HCIUART_3WIRE
+ select BT_RTL
+ help
+ The Realtek protocol support enables Bluetooth HCI over 3-Wire
+ serial port internface for Realtek Bluetooth controllers.
+
+ Say Y here to compile support for Realtek protocol.
+
config BT_HCIUART_QCA
bool "Qualcomm Atheros protocol support"
depends on BT_HCIUART
@@ -364,6 +378,17 @@ config BT_WILINK
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module (btwilink).
+config BT_MTKUART
+ tristate "MediaTek HCI UART driver"
+ depends on SERIAL_DEV_BUS
+ help
+ MediaTek Bluetooth HCI UART driver.
+ This driver is required if you want to use MediaTek Bluetooth
+ with serial interface.
+
+ Say Y here to compile support for MediaTek Bluetooth UART devices
+ into the kernel or say M to compile it as module (btmtkuart).
+
config BT_QCOMSMD
tristate "Qualcomm SMD based HCI support"
depends on RPMSG || (COMPILE_TEST && RPMSG=n)
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index ec16c55eb6e9..b7e393cfc1e3 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_MTKUART) += btmtkuart.o
obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index ab090a313a5f..0588639b899a 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -490,7 +490,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
count = skb->len;
/* Max HCI frame size seems to be 1511 + 1 */
- nskb = bt_skb_alloc(count + 32, GFP_ATOMIC);
+ nskb = bt_skb_alloc(count + 32, GFP_KERNEL);
if (!nskb) {
BT_ERR("Can't allocate memory for new packet");
return -ENOMEM;
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 82437a69f99c..cc6e56223656 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -565,7 +565,7 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
/* Ericsson baud rate command */
unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
- skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_KERNEL);
if (!skb) {
BT_ERR("Can't allocate mem for new packet");
return -1;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index c6f7cc57db14..d1c2adf08576 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -289,7 +289,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb->dev = (void *) hdev;
- urb = usb_alloc_urb(0, GFP_ATOMIC);
+ urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
@@ -298,7 +298,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT:
- dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
+ dr = kmalloc(sizeof(*dr), GFP_KERNEL);
if (!dr) {
usb_free_urb(urb);
return -ENOMEM;
@@ -343,7 +343,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
usb_anchor_urb(urb, &data->tx_anchor);
- err = usb_submit_urb(urb, GFP_ATOMIC);
+ err = usb_submit_urb(urb, GFP_KERNEL);
if (err < 0) {
bt_dev_err(hdev, "urb %p submission failed", urb);
kfree(urb->setup_packet);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 888bac49a87b..fb3d03928460 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -718,7 +718,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
}
/* Allocate buffer */
- skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
+ skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_KERNEL);
if (!skb) {
BT_ERR("No free skb");
ret = -ENOMEM;
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
new file mode 100644
index 000000000000..ed2a5c7cb77f
--- /dev/null
+++ b/drivers/bluetooth/btmtkuart.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 MediaTek Inc.
+
+/*
+ * Bluetooth support for MediaTek serial devices
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/serdev.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "h4_recv.h"
+
+#define VERSION "0.1"
+
+#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
+
+#define MTK_STP_TLR_SIZE 2
+
+#define BTMTKUART_TX_STATE_ACTIVE 1
+#define BTMTKUART_TX_STATE_WAKEUP 2
+#define BTMTKUART_TX_WAIT_VND_EVT 3
+
+enum {
+ MTK_WMT_PATCH_DWNLD = 0x1,
+ MTK_WMT_FUNC_CTRL = 0x6,
+ MTK_WMT_RST = 0x7
+};
+
+struct mtk_stp_hdr {
+ u8 prefix;
+ __be16 dlen;
+ u8 cs;
+} __packed;
+
+struct mtk_wmt_hdr {
+ u8 dir;
+ u8 op;
+ __le16 dlen;
+ u8 flag;
+} __packed;
+
+struct mtk_hci_wmt_cmd {
+ struct mtk_wmt_hdr hdr;
+ u8 data[256];
+} __packed;
+
+struct btmtkuart_dev {
+ struct hci_dev *hdev;
+ struct serdev_device *serdev;
+ struct clk *clk;
+
+ struct work_struct tx_work;
+ unsigned long tx_state;
+ struct sk_buff_head txq;
+
+ struct sk_buff *rx_skb;
+
+ u8 stp_pad[6];
+ u8 stp_cursor;
+ u16 stp_dlen;
+};
+
+static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen,
+ const void *param)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct mtk_hci_wmt_cmd wc;
+ struct mtk_wmt_hdr *hdr;
+ u32 hlen;
+ int err;
+
+ hlen = sizeof(*hdr) + plen;
+ if (hlen > 255)
+ return -EINVAL;
+
+ hdr = (struct mtk_wmt_hdr *)&wc;
+ hdr->dir = 1;
+ hdr->op = op;
+ hdr->dlen = cpu_to_le16(plen + 1);
+ hdr->flag = flag;
+ memcpy(wc.data, param, plen);
+
+ set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ if (err < 0) {
+ clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return err;
+ }
+
+ /* The vendor specific WMT commands are all answered by a vendor
+ * specific event and will not have the Command Status or Command
+ * Complete as with usual HCI command flow control.
+ *
+ * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
+ * state to be cleared. The driver speicfic event receive routine
+ * will clear that state and with that indicate completion of the
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
+ TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Execution of wmt command interrupted");
+ return err;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mtk_setup_fw(struct hci_dev *hdev)
+{
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+ u8 flag;
+
+ err = request_firmware(&fw, FIRMWARE_MT7622, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ /* The size of patch header is 30 bytes, should be skip */
+ if (fw_size < 30)
+ return -EINVAL;
+
+ fw_size -= 30;
+ fw_ptr += 30;
+ flag = 1;
+
+ while (fw_size > 0) {
+ dlen = min_t(int, 250, fw_size);
+
+ /* Tell device the position in sequence */
+ if (fw_size - dlen <= 0)
+ flag = 3;
+ else if (fw_size < fw->size - 30)
+ flag = 2;
+
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_PATCH_DWNLD, flag, dlen,
+ fw_ptr);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ break;
+ }
+
+ fw_size -= dlen;
+ fw_ptr += dlen;
+ }
+
+ release_firmware(fw);
+
+ return err;
+}
+
+static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ int err;
+
+ /* Fix up the vendor event id with 0xff for vendor specific instead
+ * of 0xe4 so that event send via monitoring socket can be parsed
+ * properly.
+ */
+ if (hdr->evt == 0xe4)
+ hdr->evt = HCI_EV_VENDOR;
+
+ err = hci_recv_frame(hdev, skb);
+
+ if (hdr->evt == HCI_EV_VENDOR) {
+ if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
+ &bdev->tx_state)) {
+ /* Barrier to sync with other CPUs */
+ smp_mb__after_atomic();
+ wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
+ }
+ }
+
+ return err;
+}
+
+static const struct h4_recv_pkt mtk_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = btmtkuart_recv_event },
+};
+
+static void btmtkuart_tx_work(struct work_struct *work)
+{
+ struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
+ tx_work);
+ struct serdev_device *serdev = bdev->serdev;
+ struct hci_dev *hdev = bdev->hdev;
+
+ while (1) {
+ clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
+
+ while (1) {
+ struct sk_buff *skb = skb_dequeue(&bdev->txq);
+ int len;
+
+ if (!skb)
+ break;
+
+ len = serdev_device_write_buf(serdev, skb->data,
+ skb->len);
+ hdev->stat.byte_tx += len;
+
+ skb_pull(skb, len);
+ if (skb->len > 0) {
+ skb_queue_head(&bdev->txq, skb);
+ break;
+ }
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ }
+
+ kfree_skb(skb);
+ }
+
+ if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
+ break;
+ }
+
+ clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
+}
+
+static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
+{
+ if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
+ set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
+
+ schedule_work(&bdev->tx_work);
+}
+
+static const unsigned char *
+mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
+ int *sz_h4)
+{
+ struct mtk_stp_hdr *shdr;
+
+ /* The cursor is reset when all the data of STP is consumed out */
+ if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
+ bdev->stp_cursor = 0;
+
+ /* Filling pad until all STP info is obtained */
+ while (bdev->stp_cursor < 6 && count > 0) {
+ bdev->stp_pad[bdev->stp_cursor] = *data;
+ bdev->stp_cursor++;
+ data++;
+ count--;
+ }
+
+ /* Retrieve STP info and have a sanity check */
+ if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
+ shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
+ bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
+
+ /* Resync STP when unexpected data is being read */
+ if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
+ bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
+ shdr->prefix, bdev->stp_dlen);
+ bdev->stp_cursor = 2;
+ bdev->stp_dlen = 0;
+ }
+ }
+
+ /* Directly quit when there's no data found for H4 can process */
+ if (count <= 0)
+ return NULL;
+
+ /* Tranlate to how much the size of data H4 can handle so far */
+ *sz_h4 = min_t(int, count, bdev->stp_dlen);
+
+ /* Update the remaining size of STP packet */
+ bdev->stp_dlen -= *sz_h4;
+
+ /* Data points to STP payload which can be handled by H4 */
+ return data;
+}
+
+static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ const unsigned char *p_left = data, *p_h4;
+ int sz_left = count, sz_h4, adv;
+ int err;
+
+ while (sz_left > 0) {
+ /* The serial data received from MT7622 BT controller is
+ * at all time padded around with the STP header and tailer.
+ *
+ * A full STP packet is looking like
+ * -----------------------------------
+ * | STP header | H:4 | STP tailer |
+ * -----------------------------------
+ * but it doesn't guarantee to contain a full H:4 packet which
+ * means that it's possible for multiple STP packets forms a
+ * full H:4 packet that means extra STP header + length doesn't
+ * indicate a full H:4 frame, things can fragment. Whose length
+ * recorded in STP header just shows up the most length the
+ * H:4 engine can handle currently.
+ */
+
+ p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
+ if (!p_h4)
+ break;
+
+ adv = p_h4 - p_left;
+ sz_left -= adv;
+ p_left += adv;
+
+ bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
+ sz_h4, mtk_recv_pkts,
+ ARRAY_SIZE(mtk_recv_pkts));
+ if (IS_ERR(bdev->rx_skb)) {
+ err = PTR_ERR(bdev->rx_skb);
+ bt_dev_err(bdev->hdev,
+ "Frame reassembly failed (%d)", err);
+ bdev->rx_skb = NULL;
+ return err;
+ }
+
+ sz_left -= sz_h4;
+ p_left += sz_h4;
+ }
+
+ return 0;
+}
+
+static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
+ size_t count)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+ int err;
+
+ err = btmtkuart_recv(bdev->hdev, data, count);
+ if (err < 0)
+ return err;
+
+ bdev->hdev->stat.byte_rx += count;
+
+ return count;
+}
+
+static void btmtkuart_write_wakeup(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+
+ btmtkuart_tx_wakeup(bdev);
+}
+
+static const struct serdev_device_ops btmtkuart_client_ops = {
+ .receive_buf = btmtkuart_receive_buf,
+ .write_wakeup = btmtkuart_write_wakeup,
+};
+
+static int btmtkuart_open(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct device *dev;
+ int err;
+
+ err = serdev_device_open(bdev->serdev);
+ if (err) {
+ bt_dev_err(hdev, "Unable to open UART device %s",
+ dev_name(&bdev->serdev->dev));
+ goto err_open;
+ }
+
+ bdev->stp_cursor = 2;
+ bdev->stp_dlen = 0;
+
+ dev = &bdev->serdev->dev;
+
+ /* Enable the power domain and clock the device requires */
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(dev);
+ goto err_disable_rpm;
+ }
+
+ err = clk_prepare_enable(bdev->clk);
+ if (err < 0)
+ goto err_put_rpm;
+
+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(dev);
+err_disable_rpm:
+ pm_runtime_disable(dev);
+err_open:
+ return err;
+}
+
+static int btmtkuart_close(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct device *dev = &bdev->serdev->dev;
+
+ /* Shutdown the clock and power domain the device requires */
+ clk_disable_unprepare(bdev->clk);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ serdev_device_close(bdev->serdev);
+
+ return 0;
+}
+
+static int btmtkuart_flush(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+
+ /* Flush any pending characters */
+ serdev_device_write_flush(bdev->serdev);
+ skb_queue_purge(&bdev->txq);
+
+ cancel_work_sync(&bdev->tx_work);
+
+ kfree_skb(bdev->rx_skb);
+ bdev->rx_skb = NULL;
+
+ bdev->stp_cursor = 2;
+ bdev->stp_dlen = 0;
+
+ return 0;
+}
+
+static int btmtkuart_setup(struct hci_dev *hdev)
+{
+ u8 param = 0x1;
+ int err = 0;
+
+ /* Setup a firmware which the device definitely requires */
+ err = mtk_setup_fw(hdev);
+ if (err < 0)
+ return err;
+
+ /* Activate function the firmware providing to */
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_RST, 0x4, 0, 0);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ return err;
+ }
+
+ /* Enable Bluetooth protocol */
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
+ &param);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int btmtkuart_shutdown(struct hci_dev *hdev)
+{
+ u8 param = 0x0;
+ int err;
+
+ /* Disable the device */
+ err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
+ &param);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct mtk_stp_hdr *shdr;
+ int err, dlen, type = 0;
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+ /* Make sure that there is enough rooms for STP header and trailer */
+ if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
+ (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
+ err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
+ GFP_ATOMIC);
+ if (err < 0)
+ return err;
+ }
+
+ /* Add the STP header */
+ dlen = skb->len;
+ shdr = skb_push(skb, sizeof(*shdr));
+ shdr->prefix = 0x80;
+ shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
+ shdr->cs = 0; /* MT7622 doesn't care about checksum value */
+
+ /* Add the STP trailer */
+ skb_put_zero(skb, MTK_STP_TLR_SIZE);
+
+ skb_queue_tail(&bdev->txq, skb);
+
+ btmtkuart_tx_wakeup(bdev);
+ return 0;
+}
+
+static int btmtkuart_probe(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev;
+ struct hci_dev *hdev;
+
+ bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ bdev->clk = devm_clk_get(&serdev->dev, "ref");
+ if (IS_ERR(bdev->clk))
+ return PTR_ERR(bdev->clk);
+
+ bdev->serdev = serdev;
+ serdev_device_set_drvdata(serdev, bdev);
+
+ serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
+
+ INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
+ skb_queue_head_init(&bdev->txq);
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ dev_err(&serdev->dev, "Can't allocate HCI device\n");
+ return -ENOMEM;
+ }
+
+ bdev->hdev = hdev;
+
+ hdev->bus = HCI_UART;
+ hci_set_drvdata(hdev, bdev);
+
+ hdev->open = btmtkuart_open;
+ hdev->close = btmtkuart_close;
+ hdev->flush = btmtkuart_flush;
+ hdev->setup = btmtkuart_setup;
+ hdev->shutdown = btmtkuart_shutdown;
+ hdev->send = btmtkuart_send_frame;
+ SET_HCIDEV_DEV(hdev, &serdev->dev);
+
+ hdev->manufacturer = 70;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+
+ if (hci_register_dev(hdev) < 0) {
+ dev_err(&serdev->dev, "Can't register HCI device\n");
+ hci_free_dev(hdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void btmtkuart_remove(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+ struct hci_dev *hdev = bdev->hdev;
+
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_of_match_table[] = {
+ { .compatible = "mediatek,mt7622-bluetooth"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, mtk_of_match_table);
+#endif
+
+static struct serdev_device_driver btmtkuart_driver = {
+ .probe = btmtkuart_probe,
+ .remove = btmtkuart_remove,
+ .driver = {
+ .name = "btmtkuart",
+ .of_match_table = of_match_ptr(mtk_of_match_table),
+ },
+};
+
+module_serdev_device_driver(btmtkuart_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7622);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8219816c54a0..ec9e03a6b778 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -27,7 +27,7 @@
#define VERSION "0.1"
-static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
@@ -35,36 +35,35 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
char cmd;
int err = 0;
- BT_DBG("%s: ROME Patch Version Request", hdev->name);
+ bt_dev_dbg(hdev, "QCA Version Request");
cmd = EDL_PATCH_VER_REQ_CMD;
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
- &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ &cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
- err);
+ bt_dev_err(hdev, "Reading QCA version information failed (%d)",
+ err);
return err;
}
if (skb->len != sizeof(*edl) + sizeof(*ver)) {
- BT_ERR("%s: Version size mismatch len %d", hdev->name,
- skb->len);
+ bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
err = -EILSEQ;
goto out;
}
edl = (struct edl_event_hdr *)(skb->data);
if (!edl) {
- BT_ERR("%s: TLV with no header", hdev->name);
+ bt_dev_err(hdev, "QCA TLV with no header");
err = -EILSEQ;
goto out;
}
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_APP_VER_RES_EVT) {
- BT_ERR("%s: Wrong packet received %d %d", hdev->name,
- edl->cresp, edl->rtype);
+ bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
+ edl->rtype);
err = -EIO;
goto out;
}
@@ -76,30 +75,35 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
- /* ROME chipset version can be decided by patch and SoC
+ /* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used.
*/
- *rome_version = (le32_to_cpu(ver->soc_id) << 16) |
+ *soc_version = (le32_to_cpu(ver->soc_id) << 16) |
(le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+ if (*soc_version == 0)
+ err = -EILSEQ;
out:
kfree_skb(skb);
+ if (err)
+ bt_dev_err(hdev, "QCA Failed to get version (%d)", err);
return err;
}
+EXPORT_SYMBOL_GPL(qca_read_soc_version);
-static int rome_reset(struct hci_dev *hdev)
+static int qca_send_reset(struct hci_dev *hdev)
{
struct sk_buff *skb;
int err;
- BT_DBG("%s: ROME HCI_RESET", hdev->name);
+ bt_dev_dbg(hdev, "QCA HCI_RESET");
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Reset failed (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Reset failed (%d)", err);
return err;
}
@@ -108,7 +112,7 @@ static int rome_reset(struct hci_dev *hdev)
return 0;
}
-static void rome_tlv_check_data(struct rome_config *config,
+static void qca_tlv_check_data(struct rome_config *config,
const struct firmware *fw)
{
const u8 *data;
@@ -207,7 +211,7 @@ static void rome_tlv_check_data(struct rome_config *config,
}
}
-static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
+static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
const u8 *data, enum rome_tlv_dnld_mode mode)
{
struct sk_buff *skb;
@@ -225,22 +229,22 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
cmd);
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
- HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err);
return err;
}
if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
- BT_ERR("%s: TLV response size mismatch", hdev->name);
+ bt_dev_err(hdev, "QCA TLV response size mismatch");
err = -EILSEQ;
goto out;
}
edl = (struct edl_event_hdr *)(skb->data);
if (!edl) {
- BT_ERR("%s: TLV with no header", hdev->name);
+ bt_dev_err(hdev, "TLV with no header");
err = -EILSEQ;
goto out;
}
@@ -249,8 +253,8 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
- BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
- hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
+ bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)",
+ edl->cresp, edl->rtype, tlv_resp->result);
err = -EIO;
}
@@ -260,23 +264,23 @@ out:
return err;
}
-static int rome_download_firmware(struct hci_dev *hdev,
+static int qca_download_firmware(struct hci_dev *hdev,
struct rome_config *config)
{
const struct firmware *fw;
const u8 *segment;
int ret, remain, i = 0;
- bt_dev_info(hdev, "ROME Downloading %s", config->fwname);
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
ret = request_firmware(&fw, config->fwname, &hdev->dev);
if (ret) {
- BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
- config->fwname, ret);
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
return ret;
}
- rome_tlv_check_data(config, fw);
+ qca_tlv_check_data(config, fw);
segment = fw->data;
remain = fw->size;
@@ -290,7 +294,7 @@ static int rome_download_firmware(struct hci_dev *hdev,
if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
config->dnld_mode = ROME_SKIP_EVT_NONE;
- ret = rome_tlv_send_segment(hdev, segsize, segment,
+ ret = qca_tlv_send_segment(hdev, segsize, segment,
config->dnld_mode);
if (ret)
break;
@@ -314,11 +318,10 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
cmd[2] = sizeof(bdaddr_t); /* size */
memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
- HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Change address command failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "QCA Change address command failed (%d)", err);
return err;
}
@@ -328,57 +331,65 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
-int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
+int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver)
{
- u32 rome_ver = 0;
struct rome_config config;
int err;
+ u8 rom_ver;
- BT_DBG("%s: ROME setup on UART", hdev->name);
+ bt_dev_dbg(hdev, "QCA setup on UART");
config.user_baud_rate = baudrate;
- /* Get ROME version information */
- err = rome_patch_ver_req(hdev, &rome_ver);
- if (err < 0 || rome_ver == 0) {
- BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
- return err;
- }
-
- bt_dev_info(hdev, "ROME controller version 0x%08x", rome_ver);
-
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
- rome_ver);
- err = rome_download_firmware(hdev, &config);
+ if (soc_type == QCA_WCN3990) {
+ /* Firmware files to download are based on ROM version.
+ * ROM version is derived from last two bytes of soc_ver.
+ */
+ rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
+ (soc_ver & 0x0000000f);
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crbtfw%02x.tlv", rom_ver);
+ } else {
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/rampatch_%08x.bin", soc_ver);
+ }
+
+ err = qca_download_firmware(hdev, &config);
if (err < 0) {
- BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
return err;
}
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
- snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
- rome_ver);
- err = rome_download_firmware(hdev, &config);
+ if (soc_type == QCA_WCN3990)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crnv%02x.bin", rom_ver);
+ else
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/nvm_%08x.bin", soc_ver);
+
+ err = qca_download_firmware(hdev, &config);
if (err < 0) {
- BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
return err;
}
/* Perform HCI reset */
- err = rome_reset(hdev);
+ err = qca_send_reset(hdev);
if (err < 0) {
- BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
+ bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err);
return err;
}
- bt_dev_info(hdev, "ROME setup on UART is completed");
+ bt_dev_info(hdev, "QCA setup on UART is completed");
return 0;
}
-EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
+EXPORT_SYMBOL_GPL(qca_uart_setup);
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 13d77fd873b6..0c01f375fe83 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -37,6 +37,9 @@
#define EDL_TAG_ID_HCI (17)
#define EDL_TAG_ID_DEEP_SLEEP (27)
+#define QCA_WCN3990_POWERON_PULSE 0xFC
+#define QCA_WCN3990_POWEROFF_PULSE 0xC0
+
enum qca_bardrate {
QCA_BAUDRATE_115200 = 0,
QCA_BAUDRATE_57600,
@@ -124,10 +127,19 @@ struct tlv_type_hdr {
__u8 data[0];
} __packed;
+enum qca_btsoc_type {
+ QCA_INVALID = -1,
+ QCA_AR3002,
+ QCA_ROME,
+ QCA_WCN3990
+};
+
#if IS_ENABLED(CONFIG_BT_QCA)
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
-int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
+int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver);
+int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
#else
@@ -136,7 +148,13 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
return -EOPNOTSUPP;
}
-static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
+static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 437f080deaab..7f9ea8e4c1b2 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -34,9 +34,12 @@
#define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761
#define RTL_ROM_LMP_8822B 0x8822
+#define RTL_CONFIG_MAGIC 0x8723ab55
#define IC_MATCH_FL_LMPSUBV (1 << 0)
#define IC_MATCH_FL_HCIREV (1 << 1)
+#define IC_MATCH_FL_HCIVER (1 << 2)
+#define IC_MATCH_FL_HCIBUS (1 << 3)
#define IC_INFO(lmps, hcir) \
.match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV, \
.lmp_subver = (lmps), \
@@ -46,49 +49,130 @@ struct id_table {
__u16 match_flags;
__u16 lmp_subver;
__u16 hci_rev;
+ __u8 hci_ver;
+ __u8 hci_bus;
bool config_needed;
+ bool has_rom_version;
char *fw_name;
char *cfg_name;
};
+struct btrtl_device_info {
+ const struct id_table *ic_info;
+ u8 rom_version;
+ u8 *fw_data;
+ int fw_len;
+ u8 *cfg_data;
+ int cfg_len;
+};
+
static const struct id_table ic_id_table[] = {
+ { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8723A, 0x0,
+ .config_needed = false,
+ .has_rom_version = false,
+ .fw_name = "rtl_bt/rtl8723a_fw.bin",
+ .cfg_name = NULL },
+
+ { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_3499, 0x0,
+ .config_needed = false,
+ .has_rom_version = false,
+ .fw_name = "rtl_bt/rtl8723a_fw.bin",
+ .cfg_name = NULL },
+
+ /* 8723BS */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
+ IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8723B,
+ .hci_rev = 0xb,
+ .hci_ver = 6,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723bs_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723bs_config" },
+
/* 8723B */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xb),
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8723b_fw.bin",
- .cfg_name = "rtl_bt/rtl8723b_config.bin" },
+ .cfg_name = "rtl_bt/rtl8723b_config" },
/* 8723D */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xd),
.config_needed = true,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8723d_fw.bin",
- .cfg_name = "rtl_bt/rtl8723d_config.bin" },
+ .cfg_name = "rtl_bt/rtl8723d_config" },
+
+ /* 8723DS */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
+ IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8723B,
+ .hci_rev = 0xd,
+ .hci_ver = 8,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723ds_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723ds_config" },
/* 8821A */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xa),
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8821a_fw.bin",
- .cfg_name = "rtl_bt/rtl8821a_config.bin" },
+ .cfg_name = "rtl_bt/rtl8821a_config" },
/* 8821C */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc),
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8821c_fw.bin",
- .cfg_name = "rtl_bt/rtl8821c_config.bin" },
+ .cfg_name = "rtl_bt/rtl8821c_config" },
/* 8761A */
{ IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0,
.config_needed = false,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8761a_fw.bin",
- .cfg_name = "rtl_bt/rtl8761a_config.bin" },
+ .cfg_name = "rtl_bt/rtl8761a_config" },
/* 8822B */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb),
.config_needed = true,
+ .has_rom_version = true,
.fw_name = "rtl_bt/rtl8822b_fw.bin",
- .cfg_name = "rtl_bt/rtl8822b_config.bin" },
+ .cfg_name = "rtl_bt/rtl8822b_config" },
};
+static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
+ u8 hci_ver, u8 hci_bus)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) {
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) &&
+ (ic_id_table[i].lmp_subver != lmp_subver))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) &&
+ (ic_id_table[i].hci_rev != hci_rev))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) &&
+ (ic_id_table[i].hci_ver != hci_ver))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) &&
+ (ic_id_table[i].hci_bus != hci_bus))
+ continue;
+
+ break;
+ }
+ if (i >= ARRAY_SIZE(ic_id_table))
+ return NULL;
+
+ return &ic_id_table[i];
+}
+
static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
{
struct rtl_rom_version_evt *rom_version;
@@ -97,20 +181,20 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
/* Read RTL ROM version command */
skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: Read ROM version failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ rtl_dev_err(hdev, "Read ROM version failed (%ld)\n",
+ PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*rom_version)) {
- BT_ERR("%s: RTL version event length mismatch", hdev->name);
+ rtl_dev_err(hdev, "RTL version event length mismatch\n");
kfree_skb(skb);
return -EIO;
}
rom_version = (struct rtl_rom_version_evt *)skb->data;
- bt_dev_info(hdev, "rom_version status=%x version=%x",
- rom_version->status, rom_version->version);
+ rtl_dev_info(hdev, "rom_version status=%x version=%x\n",
+ rom_version->status, rom_version->version);
*version = rom_version->version;
@@ -118,16 +202,16 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
return 0;
}
-static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
- const struct firmware *fw,
+static int rtlbt_parse_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
unsigned char **_buf)
{
const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
struct rtl_epatch_header *epatch_info;
unsigned char *buf;
- int i, ret, len;
+ int i, len;
size_t min_size;
- u8 opcode, length, data, rom_version = 0;
+ u8 opcode, length, data;
int project_id = -1;
const unsigned char *fwptr, *chip_id_base;
const unsigned char *patch_length_base, *patch_offset_base;
@@ -146,17 +230,13 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */
};
- ret = rtl_read_rom_version(hdev, &rom_version);
- if (ret)
- return ret;
-
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
- if (fw->size < min_size)
+ if (btrtl_dev->fw_len < min_size)
return -EINVAL;
- fwptr = fw->data + fw->size - sizeof(extension_sig);
+ fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig);
if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
- BT_ERR("%s: extension section signature mismatch", hdev->name);
+ rtl_dev_err(hdev, "extension section signature mismatch\n");
return -EINVAL;
}
@@ -166,7 +246,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
* Once we have that, we double-check that that project_id is suitable
* for the hardware we are working with.
*/
- while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
+ while (fwptr >= btrtl_dev->fw_data + (sizeof(*epatch_info) + 3)) {
opcode = *--fwptr;
length = *--fwptr;
data = *--fwptr;
@@ -177,8 +257,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
break;
if (length == 0) {
- BT_ERR("%s: found instruction with length 0",
- hdev->name);
+ rtl_dev_err(hdev, "found instruction with length 0\n");
return -EINVAL;
}
@@ -191,7 +270,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
}
if (project_id < 0) {
- BT_ERR("%s: failed to find version instruction", hdev->name);
+ rtl_dev_err(hdev, "failed to find version instruction\n");
return -EINVAL;
}
@@ -202,19 +281,21 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
}
if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
- BT_ERR("%s: unknown project id %d", hdev->name, project_id);
+ rtl_dev_err(hdev, "unknown project id %d\n", project_id);
return -EINVAL;
}
- if (lmp_subver != project_id_to_lmp_subver[i].lmp_subver) {
- BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
- project_id_to_lmp_subver[i].lmp_subver, lmp_subver);
+ if (btrtl_dev->ic_info->lmp_subver !=
+ project_id_to_lmp_subver[i].lmp_subver) {
+ rtl_dev_err(hdev, "firmware is for %x but this is a %x\n",
+ project_id_to_lmp_subver[i].lmp_subver,
+ btrtl_dev->ic_info->lmp_subver);
return -EINVAL;
}
- epatch_info = (struct rtl_epatch_header *)fw->data;
+ epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data;
if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
- BT_ERR("%s: bad EPATCH signature", hdev->name);
+ rtl_dev_err(hdev, "bad EPATCH signature\n");
return -EINVAL;
}
@@ -229,16 +310,16 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
* Find the right patch for this chip.
*/
min_size += 8 * num_patches;
- if (fw->size < min_size)
+ if (btrtl_dev->fw_len < min_size)
return -EINVAL;
- chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
+ chip_id_base = btrtl_dev->fw_data + sizeof(struct rtl_epatch_header);
patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
for (i = 0; i < num_patches; i++) {
u16 chip_id = get_unaligned_le16(chip_id_base +
(i * sizeof(u16)));
- if (chip_id == rom_version + 1) {
+ if (chip_id == btrtl_dev->rom_version + 1) {
patch_length = get_unaligned_le16(patch_length_base +
(i * sizeof(u16)));
patch_offset = get_unaligned_le32(patch_offset_base +
@@ -248,21 +329,22 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
}
if (!patch_offset) {
- BT_ERR("%s: didn't find patch for chip id %d",
- hdev->name, rom_version);
+ rtl_dev_err(hdev, "didn't find patch for chip id %d",
+ btrtl_dev->rom_version);
return -EINVAL;
}
BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
min_size = patch_offset + patch_length;
- if (fw->size < min_size)
+ if (btrtl_dev->fw_len < min_size)
return -EINVAL;
/* Copy the firmware into a new buffer and write the version at
* the end.
*/
len = patch_length;
- buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
+ buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
+ GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -301,15 +383,14 @@ static int rtl_download_firmware(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: download fw command failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ rtl_dev_err(hdev, "download fw command failed (%ld)\n",
+ PTR_ERR(skb));
ret = -PTR_ERR(skb);
goto out;
}
if (skb->len != sizeof(struct rtl_download_response)) {
- BT_ERR("%s: download fw event length mismatch",
- hdev->name);
+ rtl_dev_err(hdev, "download fw event length mismatch\n");
kfree_skb(skb);
ret = -EIO;
goto out;
@@ -324,12 +405,12 @@ out:
return ret;
}
-static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
+static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
{
const struct firmware *fw;
int ret;
- bt_dev_info(hdev, "rtl: loading %s", name);
+ rtl_dev_info(hdev, "rtl: loading %s\n", name);
ret = request_firmware(&fw, name, &hdev->dev);
if (ret < 0)
return ret;
@@ -343,96 +424,37 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
return ret;
}
-static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
+static int btrtl_setup_rtl8723a(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
{
- const struct firmware *fw;
- int ret;
-
- bt_dev_info(hdev, "rtl: loading rtl_bt/rtl8723a_fw.bin");
- ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev);
- if (ret < 0) {
- BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
- return ret;
- }
-
- if (fw->size < 8) {
- ret = -EINVAL;
- goto out;
- }
+ if (btrtl_dev->fw_len < 8)
+ return -EINVAL;
/* Check that the firmware doesn't have the epatch signature
* (which is only for RTL8723B and newer).
*/
- if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
- BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
- ret = -EINVAL;
- goto out;
+ if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) {
+ rtl_dev_err(hdev, "unexpected EPATCH signature!\n");
+ return -EINVAL;
}
- ret = rtl_download_firmware(hdev, fw->data, fw->size);
-
-out:
- release_firmware(fw);
- return ret;
+ return rtl_download_firmware(hdev, btrtl_dev->fw_data,
+ btrtl_dev->fw_len);
}
-static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev,
- u16 lmp_subver)
+static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
{
unsigned char *fw_data = NULL;
- const struct firmware *fw;
int ret;
- int cfg_sz;
- u8 *cfg_buff = NULL;
u8 *tbuff;
- char *cfg_name = NULL;
- char *fw_name = NULL;
- int i;
- for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) {
- if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) &&
- (ic_id_table[i].lmp_subver != lmp_subver))
- continue;
- if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) &&
- (ic_id_table[i].hci_rev != hci_rev))
- continue;
-
- break;
- }
-
- if (i >= ARRAY_SIZE(ic_id_table)) {
- BT_ERR("%s: unknown IC info, lmp subver %04x, hci rev %04x",
- hdev->name, lmp_subver, hci_rev);
- return -EINVAL;
- }
-
- cfg_name = ic_id_table[i].cfg_name;
-
- if (cfg_name) {
- cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff);
- if (cfg_sz < 0) {
- cfg_sz = 0;
- if (ic_id_table[i].config_needed)
- BT_ERR("Necessary config file %s not found\n",
- cfg_name);
- }
- } else
- cfg_sz = 0;
-
- fw_name = ic_id_table[i].fw_name;
- bt_dev_info(hdev, "rtl: loading %s", fw_name);
- ret = request_firmware(&fw, fw_name, &hdev->dev);
- if (ret < 0) {
- BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
- goto err_req_fw;
- }
-
- ret = rtlbt_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+ ret = rtlbt_parse_firmware(hdev, btrtl_dev, &fw_data);
if (ret < 0)
goto out;
- if (cfg_sz) {
- tbuff = kzalloc(ret + cfg_sz, GFP_KERNEL);
+ if (btrtl_dev->cfg_len > 0) {
+ tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
if (!tbuff) {
ret = -ENOMEM;
goto out;
@@ -441,22 +463,18 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev,
memcpy(tbuff, fw_data, ret);
kfree(fw_data);
- memcpy(tbuff + ret, cfg_buff, cfg_sz);
- ret += cfg_sz;
+ memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
+ ret += btrtl_dev->cfg_len;
fw_data = tbuff;
}
- bt_dev_info(hdev, "cfg_sz %d, total size %d", cfg_sz, ret);
+ rtl_dev_info(hdev, "cfg_sz %d, total sz %d\n", btrtl_dev->cfg_len, ret);
ret = rtl_download_firmware(hdev, fw_data, ret);
out:
- release_firmware(fw);
kfree(fw_data);
-err_req_fw:
- if (cfg_sz)
- kfree(cfg_buff);
return ret;
}
@@ -467,14 +485,13 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)\n",
+ PTR_ERR(skb));
return skb;
}
if (skb->len != sizeof(struct hci_rp_read_local_version)) {
- BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
- hdev->name);
+ rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch\n");
kfree_skb(skb);
return ERR_PTR(-EIO);
}
@@ -482,49 +499,264 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
return skb;
}
-int btrtl_setup_realtek(struct hci_dev *hdev)
+void btrtl_free(struct btrtl_device_info *btrtl_dev)
{
+ kfree(btrtl_dev->fw_data);
+ kfree(btrtl_dev->cfg_data);
+ kfree(btrtl_dev);
+}
+EXPORT_SYMBOL_GPL(btrtl_free);
+
+struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ const char *postfix)
+{
+ struct btrtl_device_info *btrtl_dev;
struct sk_buff *skb;
struct hci_rp_read_local_version *resp;
+ char cfg_name[40];
u16 hci_rev, lmp_subver;
+ u8 hci_ver;
+ int ret;
+
+ btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
+ if (!btrtl_dev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
skb = btrtl_read_local_version(hdev);
- if (IS_ERR(skb))
- return -PTR_ERR(skb);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ goto err_free;
+ }
resp = (struct hci_rp_read_local_version *)skb->data;
- bt_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x "
- "lmp_ver=%02x lmp_subver=%04x",
- resp->hci_ver, resp->hci_rev,
- resp->lmp_ver, resp->lmp_subver);
+ rtl_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x\n",
+ resp->hci_ver, resp->hci_rev,
+ resp->lmp_ver, resp->lmp_subver);
+ hci_ver = resp->hci_ver;
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
kfree_skb(skb);
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
+
+ if (!btrtl_dev->ic_info) {
+ rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
+ lmp_subver, hci_rev, hci_ver);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ if (btrtl_dev->ic_info->has_rom_version) {
+ ret = rtl_read_rom_version(hdev, &btrtl_dev->rom_version);
+ if (ret)
+ goto err_free;
+ }
+
+ btrtl_dev->fw_len = rtl_load_file(hdev, btrtl_dev->ic_info->fw_name,
+ &btrtl_dev->fw_data);
+ if (btrtl_dev->fw_len < 0) {
+ rtl_dev_err(hdev, "firmware file %s not found\n",
+ btrtl_dev->ic_info->fw_name);
+ ret = btrtl_dev->fw_len;
+ goto err_free;
+ }
+
+ if (btrtl_dev->ic_info->cfg_name) {
+ if (postfix) {
+ snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin",
+ btrtl_dev->ic_info->cfg_name, postfix);
+ } else {
+ snprintf(cfg_name, sizeof(cfg_name), "%s.bin",
+ btrtl_dev->ic_info->cfg_name);
+ }
+ btrtl_dev->cfg_len = rtl_load_file(hdev, cfg_name,
+ &btrtl_dev->cfg_data);
+ if (btrtl_dev->ic_info->config_needed &&
+ btrtl_dev->cfg_len <= 0) {
+ rtl_dev_err(hdev, "mandatory config file %s not found\n",
+ btrtl_dev->ic_info->cfg_name);
+ ret = btrtl_dev->cfg_len;
+ goto err_free;
+ }
+ }
+
+ return btrtl_dev;
+
+err_free:
+ btrtl_free(btrtl_dev);
+err_alloc:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(btrtl_initialize);
+
+int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
+{
/* Match a set of subver values that correspond to stock firmware,
* which is not compatible with standard btusb.
* If matched, upload an alternative firmware that does conform to
* standard btusb. Once that firmware is uploaded, the subver changes
* to a different value.
*/
- switch (lmp_subver) {
+ switch (btrtl_dev->ic_info->lmp_subver) {
case RTL_ROM_LMP_8723A:
case RTL_ROM_LMP_3499:
- return btrtl_setup_rtl8723a(hdev);
+ return btrtl_setup_rtl8723a(hdev, btrtl_dev);
case RTL_ROM_LMP_8723B:
case RTL_ROM_LMP_8821A:
case RTL_ROM_LMP_8761A:
case RTL_ROM_LMP_8822B:
- return btrtl_setup_rtl8723b(hdev, hci_rev, lmp_subver);
+ return btrtl_setup_rtl8723b(hdev, btrtl_dev);
default:
- bt_dev_info(hdev, "rtl: assuming no firmware upload needed");
+ rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
return 0;
}
}
+EXPORT_SYMBOL_GPL(btrtl_download_firmware);
+
+int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+ struct btrtl_device_info *btrtl_dev;
+ int ret;
+
+ btrtl_dev = btrtl_initialize(hdev, NULL);
+ if (IS_ERR(btrtl_dev))
+ return PTR_ERR(btrtl_dev);
+
+ ret = btrtl_download_firmware(hdev, btrtl_dev);
+
+ btrtl_free(btrtl_dev);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+static unsigned int btrtl_convert_baudrate(u32 device_baudrate)
+{
+ switch (device_baudrate) {
+ case 0x0252a00a:
+ return 230400;
+
+ case 0x05f75004:
+ return 921600;
+
+ case 0x00005004:
+ return 1000000;
+
+ case 0x04928002:
+ case 0x01128002:
+ return 1500000;
+
+ case 0x00005002:
+ return 2000000;
+
+ case 0x0000b001:
+ return 2500000;
+
+ case 0x04928001:
+ return 3000000;
+
+ case 0x052a6001:
+ return 3500000;
+
+ case 0x00005001:
+ return 4000000;
+
+ case 0x0252c014:
+ default:
+ return 115200;
+ }
+}
+
+int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+ u32 *device_baudrate, bool *flow_control)
+{
+ struct rtl_vendor_config *config;
+ struct rtl_vendor_config_entry *entry;
+ int i, total_data_len;
+ bool found = false;
+
+ total_data_len = btrtl_dev->cfg_len - sizeof(*config);
+ if (total_data_len <= 0) {
+ rtl_dev_warn(hdev, "no config loaded\n");
+ return -EINVAL;
+ }
+
+ config = (struct rtl_vendor_config *)btrtl_dev->cfg_data;
+ if (le32_to_cpu(config->signature) != RTL_CONFIG_MAGIC) {
+ rtl_dev_err(hdev, "invalid config magic\n");
+ return -EINVAL;
+ }
+
+ if (total_data_len < le16_to_cpu(config->total_len)) {
+ rtl_dev_err(hdev, "config is too short\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < total_data_len; ) {
+ entry = ((void *)config->entry) + i;
+
+ switch (le16_to_cpu(entry->offset)) {
+ case 0xc:
+ if (entry->len < sizeof(*device_baudrate)) {
+ rtl_dev_err(hdev, "invalid UART config entry\n");
+ return -EINVAL;
+ }
+
+ *device_baudrate = get_unaligned_le32(entry->data);
+ *controller_baudrate = btrtl_convert_baudrate(
+ *device_baudrate);
+
+ if (entry->len >= 13)
+ *flow_control = !!(entry->data[12] & BIT(2));
+ else
+ *flow_control = false;
+
+ found = true;
+ break;
+
+ default:
+ rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)\n",
+ le16_to_cpu(entry->offset), entry->len);
+ break;
+ };
+
+ i += sizeof(*entry) + entry->len;
+ }
+
+ if (!found) {
+ rtl_dev_err(hdev, "no UART config entry found\n");
+ return -ENOENT;
+ }
+
+ rtl_dev_dbg(hdev, "device baudrate = 0x%08x\n", *device_baudrate);
+ rtl_dev_dbg(hdev, "controller baudrate = %u\n", *controller_baudrate);
+ rtl_dev_dbg(hdev, "flow control %d\n", *flow_control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btrtl_get_uart_settings);
+
MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>");
MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("rtl_bt/rtl8723a_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index 38ffe4890cd1..f5e36f3993a8 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -17,6 +17,13 @@
#define RTL_FRAG_LEN 252
+#define rtl_dev_err(dev, fmt, ...) bt_dev_err(dev, "RTL: " fmt, ##__VA_ARGS__)
+#define rtl_dev_warn(dev, fmt, ...) bt_dev_warn(dev, "RTL: " fmt, ##__VA_ARGS__)
+#define rtl_dev_info(dev, fmt, ...) bt_dev_info(dev, "RTL: " fmt, ##__VA_ARGS__)
+#define rtl_dev_dbg(dev, fmt, ...) bt_dev_dbg(dev, "RTL: " fmt, ##__VA_ARGS__)
+
+struct btrtl_device_info;
+
struct rtl_download_cmd {
__u8 index;
__u8 data[RTL_FRAG_LEN];
@@ -38,15 +45,61 @@ struct rtl_epatch_header {
__le16 num_patches;
} __packed;
+struct rtl_vendor_config_entry {
+ __le16 offset;
+ __u8 len;
+ __u8 data[0];
+} __packed;
+
+struct rtl_vendor_config {
+ __le32 signature;
+ __le16 total_len;
+ struct rtl_vendor_config_entry entry[0];
+} __packed;
+
#if IS_ENABLED(CONFIG_BT_RTL)
+struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ const char *postfix);
+void btrtl_free(struct btrtl_device_info *btrtl_dev);
+int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev);
int btrtl_setup_realtek(struct hci_dev *hdev);
+int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+ u32 *device_baudrate, bool *flow_control);
#else
+static inline struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ const char *postfix)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void btrtl_free(struct btrtl_device_info *btrtl_dev)
+{
+}
+
+static inline int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int btrtl_setup_realtek(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
+static inline int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+ u32 *device_baudrate,
+ bool *flow_control)
+{
+ return -ENOENT;
+}
+
#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f73a27ea28cc..cd2e5cf14ea5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -374,6 +374,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723DE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
@@ -509,9 +510,10 @@ static inline void btusb_free_frags(struct btusb_data *data)
static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
- spin_lock(&data->rxlock);
+ spin_lock_irqsave(&data->rxlock, flags);
skb = data->evt_skb;
while (count) {
@@ -556,7 +558,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
}
data->evt_skb = skb;
- spin_unlock(&data->rxlock);
+ spin_unlock_irqrestore(&data->rxlock, flags);
return err;
}
@@ -564,9 +566,10 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
- spin_lock(&data->rxlock);
+ spin_lock_irqsave(&data->rxlock, flags);
skb = data->acl_skb;
while (count) {
@@ -613,7 +616,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
}
data->acl_skb = skb;
- spin_unlock(&data->rxlock);
+ spin_unlock_irqrestore(&data->rxlock, flags);
return err;
}
@@ -621,9 +624,10 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
{
struct sk_buff *skb;
+ unsigned long flags;
int err = 0;
- spin_lock(&data->rxlock);
+ spin_lock_irqsave(&data->rxlock, flags);
skb = data->sco_skb;
while (count) {
@@ -668,7 +672,7 @@ static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
}
data->sco_skb = skb;
- spin_unlock(&data->rxlock);
+ spin_unlock_irqrestore(&data->rxlock, flags);
return err;
}
@@ -1066,6 +1070,7 @@ static void btusb_tx_complete(struct urb *urb)
struct sk_buff *skb = urb->context;
struct hci_dev *hdev = (struct hci_dev *)skb->dev;
struct btusb_data *data = hci_get_drvdata(hdev);
+ unsigned long flags;
BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status,
urb->actual_length);
@@ -1079,9 +1084,9 @@ static void btusb_tx_complete(struct urb *urb)
hdev->stat.err_tx++;
done:
- spin_lock(&data->txlock);
+ spin_lock_irqsave(&data->txlock, flags);
data->tx_in_flight--;
- spin_unlock(&data->txlock);
+ spin_unlock_irqrestore(&data->txlock, flags);
kfree(urb->setup_packet);
@@ -1593,13 +1598,13 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
ret = request_firmware(&fw, fwname, &hdev->dev);
if (ret < 0) {
if (ret == -EINVAL) {
- BT_ERR("%s Intel firmware file request failed (%d)",
- hdev->name, ret);
+ bt_dev_err(hdev, "Intel firmware file request failed (%d)",
+ ret);
return NULL;
}
- BT_ERR("%s failed to open Intel firmware file: %s(%d)",
- hdev->name, fwname, ret);
+ bt_dev_err(hdev, "failed to open Intel firmware file: %s (%d)",
+ fwname, ret);
/* If the correct firmware patch file is not found, use the
* default firmware patch file instead
@@ -1607,8 +1612,8 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq",
ver->hw_platform, ver->hw_variant);
if (request_firmware(&fw, fwname, &hdev->dev) < 0) {
- BT_ERR("%s failed to open default Intel fw file: %s",
- hdev->name, fwname);
+ bt_dev_err(hdev, "failed to open default fw file: %s",
+ fwname);
return NULL;
}
}
@@ -1637,7 +1642,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* process.
*/
if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) {
- BT_ERR("%s Intel fw corrupted: invalid cmd read", hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid cmd read");
return -EINVAL;
}
(*fw_ptr)++;
@@ -1651,7 +1656,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* of command parameter. If not, the firmware file is corrupted.
*/
if (remain < cmd->plen) {
- BT_ERR("%s Intel fw corrupted: invalid cmd len", hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid cmd len");
return -EFAULT;
}
@@ -1684,8 +1689,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
remain -= sizeof(*evt);
if (remain < evt->plen) {
- BT_ERR("%s Intel fw corrupted: invalid evt len",
- hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid evt len");
return -EFAULT;
}
@@ -1699,15 +1703,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* file is corrupted.
*/
if (!evt || !evt_param || remain < 0) {
- BT_ERR("%s Intel fw corrupted: invalid evt read", hdev->name);
+ bt_dev_err(hdev, "Intel fw corrupted: invalid evt read");
return -EFAULT;
}
skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen,
cmd_param, evt->evt, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
- hdev->name, cmd->opcode, PTR_ERR(skb));
+ bt_dev_err(hdev, "sending Intel patch command (0x%4.4x) failed (%ld)",
+ cmd->opcode, PTR_ERR(skb));
return PTR_ERR(skb);
}
@@ -1716,15 +1720,15 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
* the contents of the event.
*/
if (skb->len != evt->plen) {
- BT_ERR("%s mismatch event length (opcode 0x%4.4x)", hdev->name,
- le16_to_cpu(cmd->opcode));
+ bt_dev_err(hdev, "mismatch event length (opcode 0x%4.4x)",
+ le16_to_cpu(cmd->opcode));
kfree_skb(skb);
return -EFAULT;
}
if (memcmp(skb->data, evt_param, evt->plen)) {
- BT_ERR("%s mismatch event parameter (opcode 0x%4.4x)",
- hdev->name, le16_to_cpu(cmd->opcode));
+ bt_dev_err(hdev, "mismatch event parameter (opcode 0x%4.4x)",
+ le16_to_cpu(cmd->opcode));
kfree_skb(skb);
return -EFAULT;
}
@@ -1753,8 +1757,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
*/
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s sending initial HCI reset command failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "sending initial HCI reset command failed (%ld)",
+ PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
@@ -1890,7 +1894,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
struct hci_event_hdr *hdr;
struct hci_ev_cmd_complete *evt;
- skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -2084,8 +2088,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* for now only accept this single value.
*/
if (ver.hw_platform != 0x37) {
- BT_ERR("%s: Unsupported Intel hardware platform (%u)",
- hdev->name, ver.hw_platform);
+ bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
+ ver.hw_platform);
return -EINVAL;
}
@@ -2104,8 +2108,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x14: /* QnJ, IcP */
break;
default:
- BT_ERR("%s: Unsupported Intel hardware variant (%u)",
- hdev->name, ver.hw_variant);
+ bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+ ver.hw_variant);
return -EINVAL;
}
@@ -2134,8 +2138,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* choice is to return an error and abort the device initialization.
*/
if (ver.fw_variant != 0x06) {
- BT_ERR("%s: Unsupported Intel firmware variant (%u)",
- hdev->name, ver.fw_variant);
+ bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
+ ver.fw_variant);
return -ENODEV;
}
@@ -2151,8 +2155,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* that this bootloader does not send them, then abort the setup.
*/
if (params.limited_cce != 0x00) {
- BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
- hdev->name, params.limited_cce);
+ bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
+ params.limited_cce);
return -EINVAL;
}
@@ -2202,14 +2206,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
le16_to_cpu(ver.fw_revision));
break;
default:
- BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
- BT_ERR("%s: Failed to load Intel firmware file (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err);
return err;
}
@@ -2235,13 +2238,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
le16_to_cpu(ver.fw_revision));
break;
default:
- BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
if (fw->size < 644) {
- BT_ERR("%s: Invalid size of firmware file (%zu)",
- hdev->name, fw->size);
+ bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
+ fw->size);
err = -EBADF;
goto done;
}
@@ -2272,18 +2275,18 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000));
if (err == -EINTR) {
- BT_ERR("%s: Firmware loading interrupted", hdev->name);
+ bt_dev_err(hdev, "Firmware loading interrupted");
goto done;
}
if (err) {
- BT_ERR("%s: Firmware loading timeout", hdev->name);
+ bt_dev_err(hdev, "Firmware loading timeout");
err = -ETIMEDOUT;
goto done;
}
if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
- BT_ERR("%s: Firmware loading failed", hdev->name);
+ bt_dev_err(hdev, "Firmware loading failed");
err = -ENOEXEC;
goto done;
}
@@ -2322,12 +2325,12 @@ done:
msecs_to_jiffies(1000));
if (err == -EINTR) {
- BT_ERR("%s: Device boot interrupted", hdev->name);
+ bt_dev_err(hdev, "Device boot interrupted");
return -EINTR;
}
if (err) {
- BT_ERR("%s: Device boot timeout", hdev->name);
+ bt_dev_err(hdev, "Device boot timeout");
return -ETIMEDOUT;
}
@@ -2364,6 +2367,22 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
struct sk_buff *skb;
long ret;
+ /* In the shutdown sequence where Bluetooth is turned off followed
+ * by WiFi being turned off, turning WiFi back on causes issue with
+ * the RF calibration.
+ *
+ * To ensure that any RF activity has been stopped, issue HCI Reset
+ * command to clear all ongoing activity including advertising,
+ * scanning etc.
+ */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "HCI reset during shutdown failed");
+ return ret;
+ }
+ kfree_skb(skb);
+
/* Some platforms have an issue with BT LED when the interface is
* down or BT radio is turned off, which takes 5 seconds to BT LED
* goes off. This command turns off the BT LED immediately.
@@ -2371,8 +2390,7 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
- BT_ERR("%s: turning off Intel device LED failed (%ld)",
- hdev->name, ret);
+ bt_dev_err(hdev, "turning off Intel device LED failed");
return ret;
}
kfree_skb(skb);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 6a8d0d06aba7..8eede1197cd2 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -21,13 +21,18 @@
*
*/
-#include <linux/kernel.h>
+#include <linux/acpi.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/serdev.h>
#include <linux/skbuff.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include "btrtl.h"
#include "hci_uart.h"
#define HCI_3WIRE_ACK_PKT 0
@@ -65,6 +70,9 @@ enum {
};
struct h5 {
+ /* Must be the first member, hci_serdev.c expects this. */
+ struct hci_uart serdev_hu;
+
struct sk_buff_head unack; /* Unack'ed packets queue */
struct sk_buff_head rel; /* Reliable packets queue */
struct sk_buff_head unrel; /* Unreliable packets queue */
@@ -95,6 +103,19 @@ struct h5 {
H5_SLEEPING,
H5_WAKING_UP,
} sleep;
+
+ const struct h5_vnd *vnd;
+ const char *id;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *device_wake_gpio;
+};
+
+struct h5_vnd {
+ int (*setup)(struct h5 *h5);
+ void (*open)(struct h5 *h5);
+ void (*close)(struct h5 *h5);
+ const struct acpi_gpio_mapping *acpi_gpio_map;
};
static void h5_reset_rx(struct h5 *h5);
@@ -193,9 +214,13 @@ static int h5_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
- h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
- if (!h5)
- return -ENOMEM;
+ if (hu->serdev) {
+ h5 = serdev_device_get_drvdata(hu->serdev);
+ } else {
+ h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
+ if (!h5)
+ return -ENOMEM;
+ }
hu->priv = h5;
h5->hu = hu;
@@ -210,6 +235,9 @@ static int h5_open(struct hci_uart *hu)
h5->tx_win = H5_TX_WIN_MAX;
+ if (h5->vnd && h5->vnd->open)
+ h5->vnd->open(h5);
+
set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
/* Send initial sync request */
@@ -229,7 +257,21 @@ static int h5_close(struct hci_uart *hu)
skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel);
- kfree(h5);
+ if (h5->vnd && h5->vnd->close)
+ h5->vnd->close(h5);
+
+ if (!hu->serdev)
+ kfree(h5);
+
+ return 0;
+}
+
+static int h5_setup(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (h5->vnd && h5->vnd->setup)
+ return h5->vnd->setup(h5);
return 0;
}
@@ -744,18 +786,172 @@ static const struct hci_uart_proto h5p = {
.name = "Three-wire (H5)",
.open = h5_open,
.close = h5_close,
+ .setup = h5_setup,
.recv = h5_recv,
.enqueue = h5_enqueue,
.dequeue = h5_dequeue,
.flush = h5_flush,
};
+static int h5_serdev_probe(struct serdev_device *serdev)
+{
+ const struct acpi_device_id *match;
+ struct device *dev = &serdev->dev;
+ struct h5 *h5;
+
+ h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
+ if (!h5)
+ return -ENOMEM;
+
+ set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
+
+ h5->hu = &h5->serdev_hu;
+ h5->serdev_hu.serdev = serdev;
+ serdev_device_set_drvdata(serdev, h5);
+
+ if (has_acpi_companion(dev)) {
+ match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!match)
+ return -ENODEV;
+
+ h5->vnd = (const struct h5_vnd *)match->driver_data;
+ h5->id = (char *)match->id;
+
+ if (h5->vnd->acpi_gpio_map)
+ devm_acpi_dev_add_driver_gpios(dev,
+ h5->vnd->acpi_gpio_map);
+ }
+
+ h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(h5->enable_gpio))
+ return PTR_ERR(h5->enable_gpio);
+
+ h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(h5->device_wake_gpio))
+ return PTR_ERR(h5->device_wake_gpio);
+
+ return hci_uart_register_device(&h5->serdev_hu, &h5p);
+}
+
+static void h5_serdev_remove(struct serdev_device *serdev)
+{
+ struct h5 *h5 = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&h5->serdev_hu);
+}
+
+#ifdef CONFIG_BT_HCIUART_RTL
+static int h5_btrtl_setup(struct h5 *h5)
+{
+ struct btrtl_device_info *btrtl_dev;
+ struct sk_buff *skb;
+ __le32 baudrate_data;
+ u32 device_baudrate;
+ unsigned int controller_baudrate;
+ bool flow_control;
+ int err;
+
+ btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
+ if (IS_ERR(btrtl_dev))
+ return PTR_ERR(btrtl_dev);
+
+ err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
+ &controller_baudrate, &device_baudrate,
+ &flow_control);
+ if (err)
+ goto out_free;
+
+ baudrate_data = cpu_to_le32(device_baudrate);
+ skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
+ &baudrate_data, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
+ err = PTR_ERR(skb);
+ goto out_free;
+ } else {
+ kfree_skb(skb);
+ }
+ /* Give the device some time to set up the new baudrate. */
+ usleep_range(10000, 20000);
+
+ serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
+ serdev_device_set_flow_control(h5->hu->serdev, flow_control);
+
+ err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
+ /* Give the device some time before the hci-core sends it a reset */
+ usleep_range(10000, 20000);
+
+out_free:
+ btrtl_free(btrtl_dev);
+
+ return err;
+}
+
+static void h5_btrtl_open(struct h5 *h5)
+{
+ /* Devices always start with these fixed parameters */
+ serdev_device_set_flow_control(h5->hu->serdev, false);
+ serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
+ serdev_device_set_baudrate(h5->hu->serdev, 115200);
+
+ /* The controller needs up to 500ms to wakeup */
+ gpiod_set_value_cansleep(h5->enable_gpio, 1);
+ gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
+ msleep(500);
+}
+
+static void h5_btrtl_close(struct h5 *h5)
+{
+ gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
+ gpiod_set_value_cansleep(h5->enable_gpio, 0);
+}
+
+static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
+static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
+static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
+static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
+ { "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
+ { "enable-gpios", &btrtl_enable_gpios, 1 },
+ { "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
+ {},
+};
+
+static struct h5_vnd rtl_vnd = {
+ .setup = h5_btrtl_setup,
+ .open = h5_btrtl_open,
+ .close = h5_btrtl_close,
+ .acpi_gpio_map = acpi_btrtl_gpios,
+};
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id h5_acpi_match[] = {
+#ifdef CONFIG_BT_HCIUART_RTL
+ { "OBDA8723", (kernel_ulong_t)&rtl_vnd },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
+#endif
+
+static struct serdev_device_driver h5_serdev_driver = {
+ .probe = h5_serdev_probe,
+ .remove = h5_serdev_remove,
+ .driver = {
+ .name = "hci_uart_h5",
+ .acpi_match_table = ACPI_PTR(h5_acpi_match),
+ },
+};
+
int __init h5_init(void)
{
+ serdev_device_driver_register(&h5_serdev_driver);
return hci_uart_register_proto(&h5p);
}
int __exit h5_deinit(void)
{
+ serdev_device_driver_unregister(&h5_serdev_driver);
return hci_uart_unregister_proto(&h5p);
}
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 7c166e3b308b..46ace321bf60 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -458,7 +458,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
struct hci_event_hdr *hdr;
struct hci_ev_cmd_complete *evt;
- skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
if (!skb)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 51790dd02afb..e182f6019f68 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -5,7 +5,7 @@
* protocol extension to H4.
*
* Copyright (C) 2007 Texas Instruments, Inc.
- * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -31,9 +31,14 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h>
@@ -119,12 +124,51 @@ struct qca_data {
u64 votes_off;
};
+enum qca_speed_type {
+ QCA_INIT_SPEED = 1,
+ QCA_OPER_SPEED
+};
+
+/*
+ * Voltage regulator information required for configuring the
+ * QCA Bluetooth chipset
+ */
+struct qca_vreg {
+ const char *name;
+ unsigned int min_uV;
+ unsigned int max_uV;
+ unsigned int load_uA;
+};
+
+struct qca_vreg_data {
+ enum qca_btsoc_type soc_type;
+ struct qca_vreg *vregs;
+ size_t num_vregs;
+};
+
+/*
+ * Platform data for the QCA Bluetooth power driver.
+ */
+struct qca_power {
+ struct device *dev;
+ const struct qca_vreg_data *vreg_data;
+ struct regulator_bulk_data *vreg_bulk;
+ bool vregs_on;
+};
+
struct qca_serdev {
struct hci_uart serdev_hu;
struct gpio_desc *bt_en;
struct clk *susclk;
+ enum qca_btsoc_type btsoc_type;
+ struct qca_power *bt_power;
+ u32 init_speed;
+ u32 oper_speed;
};
+static int qca_power_setup(struct hci_uart *hu, bool on);
+static void qca_power_shutdown(struct hci_dev *hdev);
+
static void __serial_clock_on(struct tty_struct *tty)
{
/* TODO: Some chipset requires to enable UART clock on client
@@ -402,10 +446,11 @@ static int qca_open(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
struct qca_data *qca;
+ int ret;
BT_DBG("hu %p qca_open", hu);
- qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
+ qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
if (!qca)
return -ENOMEM;
@@ -453,19 +498,32 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
- timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
- qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
-
- timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
- qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
-
if (hu->serdev) {
serdev_device_open(hu->serdev);
qcadev = serdev_device_get_drvdata(hu->serdev);
- gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ if (qcadev->btsoc_type != QCA_WCN3990) {
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ } else {
+ hu->init_speed = qcadev->init_speed;
+ hu->oper_speed = qcadev->oper_speed;
+ ret = qca_power_setup(hu, true);
+ if (ret) {
+ destroy_workqueue(qca->workqueue);
+ kfree_skb(qca->rx_skb);
+ hu->priv = NULL;
+ kfree(qca);
+ return ret;
+ }
+ }
}
+ timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
+ qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+
+ timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
+ qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -549,10 +607,13 @@ static int qca_close(struct hci_uart *hu)
qca->hu = NULL;
if (hu->serdev) {
- serdev_device_close(hu->serdev);
-
qcadev = serdev_device_get_drvdata(hu->serdev);
- gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ qca_power_shutdown(hu->hdev);
+ else
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
+
+ serdev_device_close(hu->serdev);
}
kfree_skb(qca->rx_skb);
@@ -872,6 +933,8 @@ static uint8_t qca_get_baudrate_value(int speed)
return QCA_BAUDRATE_2000000;
case 3000000:
return QCA_BAUDRATE_3000000;
+ case 3200000:
+ return QCA_BAUDRATE_3200000;
case 3500000:
return QCA_BAUDRATE_3500000;
default:
@@ -884,19 +947,27 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
struct sk_buff *skb;
+ struct qca_serdev *qcadev;
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
- if (baudrate > QCA_BAUDRATE_3000000)
+ if (baudrate > QCA_BAUDRATE_3200000)
return -EINVAL;
cmd[4] = baudrate;
- skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
+ skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (!skb) {
bt_dev_err(hdev, "Failed to allocate baudrate packet");
return -ENOMEM;
}
+ /* Disabling hardware flow control is mandatory while
+ * sending change baudrate request to wcn3990 SoC.
+ */
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ hci_uart_set_flow_control(hu, true);
+
/* Assign commands to change baudrate and packet type. */
skb_put_data(skb, cmd, sizeof(cmd));
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
@@ -912,6 +983,9 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
set_current_state(TASK_RUNNING);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ hci_uart_set_flow_control(hu, false);
+
return 0;
}
@@ -923,50 +997,195 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
hci_uart_set_baudrate(hu, speed);
}
+static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb;
+
+ /* These power pulses are single byte command which are sent
+ * at required baudrate to wcn3990. On wcn3990, we have an external
+ * circuit at Tx pin which decodes the pulse sent at specific baudrate.
+ * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
+ * and also we use the same power inputs to turn on and off for
+ * Wi-Fi/BT. Powering up the power sources will not enable BT, until
+ * we send a power on pulse at 115200 bps. This algorithm will help to
+ * save power. Disabling hardware flow control is mandatory while
+ * sending power pulses to SoC.
+ */
+ bt_dev_dbg(hdev, "sending power pulse %02x to SoC", cmd);
+
+ skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hci_uart_set_flow_control(hu, true);
+
+ skb_put_u8(skb, cmd);
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+
+ skb_queue_tail(&qca->txq, skb);
+ hci_uart_tx_wakeup(hu);
+
+ /* Wait for 100 uS for SoC to settle down */
+ usleep_range(100, 200);
+ hci_uart_set_flow_control(hu, false);
+
+ return 0;
+}
+
+static unsigned int qca_get_speed(struct hci_uart *hu,
+ enum qca_speed_type speed_type)
+{
+ unsigned int speed = 0;
+
+ if (speed_type == QCA_INIT_SPEED) {
+ if (hu->init_speed)
+ speed = hu->init_speed;
+ else if (hu->proto->init_speed)
+ speed = hu->proto->init_speed;
+ } else {
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+ }
+
+ return speed;
+}
+
+static int qca_check_speeds(struct hci_uart *hu)
+{
+ struct qca_serdev *qcadev;
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->btsoc_type == QCA_WCN3990) {
+ if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
+ !qca_get_speed(hu, QCA_OPER_SPEED))
+ return -EINVAL;
+ } else {
+ if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
+ !qca_get_speed(hu, QCA_OPER_SPEED))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
+{
+ unsigned int speed, qca_baudrate;
+ int ret;
+
+ if (speed_type == QCA_INIT_SPEED) {
+ speed = qca_get_speed(hu, QCA_INIT_SPEED);
+ if (speed)
+ host_set_baudrate(hu, speed);
+ } else {
+ speed = qca_get_speed(hu, QCA_OPER_SPEED);
+ if (!speed)
+ return 0;
+
+ qca_baudrate = qca_get_baudrate_value(speed);
+ bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
+ ret = qca_set_baudrate(hu->hdev, qca_baudrate);
+ if (ret)
+ return ret;
+
+ host_set_baudrate(hu, speed);
+ }
+
+ return 0;
+}
+
+static int qca_wcn3990_init(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+ int ret;
+
+ /* Forcefully enable wcn3990 to enter in to boot mode. */
+ host_set_baudrate(hu, 2400);
+ ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
+ if (ret)
+ return ret;
+
+ qca_set_speed(hu, QCA_INIT_SPEED);
+ ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWERON_PULSE);
+ if (ret)
+ return ret;
+
+ /* Wait for 100 ms for SoC to boot */
+ msleep(100);
+
+ /* Now the device is in ready state to communicate with host.
+ * To sync host with device we need to reopen port.
+ * Without this, we will have RTS and CTS synchronization
+ * issues.
+ */
+ serdev_device_close(hu->serdev);
+ ret = serdev_device_open(hu->serdev);
+ if (ret) {
+ bt_dev_err(hu->hdev, "failed to open port");
+ return ret;
+ }
+
+ hci_uart_set_flow_control(hu, false);
+
+ return 0;
+}
+
static int qca_setup(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+ struct qca_serdev *qcadev;
int ret;
+ int soc_ver = 0;
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
- bt_dev_info(hdev, "ROME setup");
+ ret = qca_check_speeds(hu);
+ if (ret)
+ return ret;
/* Patch downloading has to be done without IBS mode */
clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
- /* Setup initial baudrate */
- speed = 0;
- if (hu->init_speed)
- speed = hu->init_speed;
- else if (hu->proto->init_speed)
- speed = hu->proto->init_speed;
+ if (qcadev->btsoc_type == QCA_WCN3990) {
+ bt_dev_info(hdev, "setting up wcn3990");
+ ret = qca_wcn3990_init(hu);
+ if (ret)
+ return ret;
- if (speed)
- host_set_baudrate(hu, speed);
+ ret = qca_read_soc_version(hdev, &soc_ver);
+ if (ret)
+ return ret;
+ } else {
+ bt_dev_info(hdev, "ROME setup");
+ qca_set_speed(hu, QCA_INIT_SPEED);
+ }
/* Setup user speed if needed */
- speed = 0;
- if (hu->oper_speed)
- speed = hu->oper_speed;
- else if (hu->proto->oper_speed)
- speed = hu->proto->oper_speed;
-
+ speed = qca_get_speed(hu, QCA_OPER_SPEED);
if (speed) {
+ ret = qca_set_speed(hu, QCA_OPER_SPEED);
+ if (ret)
+ return ret;
+
qca_baudrate = qca_get_baudrate_value(speed);
+ }
- bt_dev_info(hdev, "Set UART speed to %d", speed);
- ret = qca_set_baudrate(hdev, qca_baudrate);
- if (ret) {
- bt_dev_err(hdev, "Failed to change the baud rate (%d)",
- ret);
+ if (qcadev->btsoc_type != QCA_WCN3990) {
+ /* Get QCA version information */
+ ret = qca_read_soc_version(hdev, &soc_ver);
+ if (ret)
return ret;
- }
- host_set_baudrate(hu, speed);
}
+ bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
/* Setup patch / NVM configurations */
- ret = qca_uart_setup_rome(hdev, qca_baudrate);
+ ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver);
if (!ret) {
set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
@@ -1002,9 +1221,123 @@ static struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
+static const struct qca_vreg_data qca_soc_data = {
+ .soc_type = QCA_WCN3990,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 1800000, 1900000, 15000 },
+ { "vddxo", 1800000, 1900000, 80000 },
+ { "vddrf", 1300000, 1350000, 300000 },
+ { "vddch0", 3300000, 3400000, 450000 },
+ },
+ .num_vregs = 4,
+};
+
+static void qca_power_shutdown(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ host_set_baudrate(hu, 2400);
+ qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
+ qca_power_setup(hu, false);
+}
+
+static int qca_enable_regulator(struct qca_vreg vregs,
+ struct regulator *regulator)
+{
+ int ret;
+
+ ret = regulator_set_voltage(regulator, vregs.min_uV,
+ vregs.max_uV);
+ if (ret)
+ return ret;
+
+ if (vregs.load_uA)
+ ret = regulator_set_load(regulator,
+ vregs.load_uA);
+
+ if (ret)
+ return ret;
+
+ return regulator_enable(regulator);
+
+}
+
+static void qca_disable_regulator(struct qca_vreg vregs,
+ struct regulator *regulator)
+{
+ regulator_disable(regulator);
+ regulator_set_voltage(regulator, 0, vregs.max_uV);
+ if (vregs.load_uA)
+ regulator_set_load(regulator, 0);
+
+}
+
+static int qca_power_setup(struct hci_uart *hu, bool on)
+{
+ struct qca_vreg *vregs;
+ struct regulator_bulk_data *vreg_bulk;
+ struct qca_serdev *qcadev;
+ int i, num_vregs, ret = 0;
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data ||
+ !qcadev->bt_power->vreg_bulk)
+ return -EINVAL;
+
+ vregs = qcadev->bt_power->vreg_data->vregs;
+ vreg_bulk = qcadev->bt_power->vreg_bulk;
+ num_vregs = qcadev->bt_power->vreg_data->num_vregs;
+ BT_DBG("on: %d", on);
+ if (on && !qcadev->bt_power->vregs_on) {
+ for (i = 0; i < num_vregs; i++) {
+ ret = qca_enable_regulator(vregs[i],
+ vreg_bulk[i].consumer);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ BT_ERR("failed to enable regulator:%s", vregs[i].name);
+ /* turn off regulators which are enabled */
+ for (i = i - 1; i >= 0; i--)
+ qca_disable_regulator(vregs[i],
+ vreg_bulk[i].consumer);
+ } else {
+ qcadev->bt_power->vregs_on = true;
+ }
+ } else if (!on && qcadev->bt_power->vregs_on) {
+ /* turn off regulator in reverse order */
+ i = qcadev->bt_power->vreg_data->num_vregs - 1;
+ for ( ; i >= 0; i--)
+ qca_disable_regulator(vregs[i], vreg_bulk[i].consumer);
+
+ qcadev->bt_power->vregs_on = false;
+ }
+
+ return ret;
+}
+
+static int qca_init_regulators(struct qca_power *qca,
+ const struct qca_vreg *vregs, size_t num_vregs)
+{
+ int i;
+
+ qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
+ sizeof(struct regulator_bulk_data),
+ GFP_KERNEL);
+ if (!qca->vreg_bulk)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vregs; i++)
+ qca->vreg_bulk[i].supply = vregs[i].name;
+
+ return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk);
+}
+
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
+ const struct qca_vreg_data *data;
int err;
qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
@@ -1012,47 +1345,84 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
qcadev->serdev_hu.serdev = serdev;
+ data = of_device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
+ if (data && data->soc_type == QCA_WCN3990) {
+ qcadev->btsoc_type = QCA_WCN3990;
+ qcadev->bt_power = devm_kzalloc(&serdev->dev,
+ sizeof(struct qca_power),
+ GFP_KERNEL);
+ if (!qcadev->bt_power)
+ return -ENOMEM;
+
+ qcadev->bt_power->dev = &serdev->dev;
+ qcadev->bt_power->vreg_data = data;
+ err = qca_init_regulators(qcadev->bt_power, data->vregs,
+ data->num_vregs);
+ if (err) {
+ BT_ERR("Failed to init regulators:%d", err);
+ goto out;
+ }
- qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en)) {
- dev_err(&serdev->dev, "failed to acquire enable gpio\n");
- return PTR_ERR(qcadev->bt_en);
- }
+ qcadev->bt_power->vregs_on = false;
- qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
- if (IS_ERR(qcadev->susclk)) {
- dev_err(&serdev->dev, "failed to acquire clk\n");
- return PTR_ERR(qcadev->susclk);
- }
+ device_property_read_u32(&serdev->dev, "max-speed",
+ &qcadev->oper_speed);
+ if (!qcadev->oper_speed)
+ BT_DBG("UART will pick default operating speed");
- err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
- if (err)
- return err;
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
+ if (err) {
+ BT_ERR("wcn3990 serdev registration failed");
+ goto out;
+ }
+ } else {
+ qcadev->btsoc_type = QCA_ROME;
+ qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(qcadev->bt_en)) {
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
+ }
- err = clk_prepare_enable(qcadev->susclk);
- if (err)
- return err;
+ qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
+ if (IS_ERR(qcadev->susclk)) {
+ dev_err(&serdev->dev, "failed to acquire clk\n");
+ return PTR_ERR(qcadev->susclk);
+ }
- err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err)
- clk_disable_unprepare(qcadev->susclk);
+ err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(qcadev->susclk);
+ if (err)
+ return err;
+
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
+ if (err)
+ clk_disable_unprepare(qcadev->susclk);
+ }
+
+out: return err;
- return err;
}
static void qca_serdev_remove(struct serdev_device *serdev)
{
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
- hci_uart_unregister_device(&qcadev->serdev_hu);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ qca_power_shutdown(qcadev->serdev_hu.hdev);
+ else
+ clk_disable_unprepare(qcadev->susclk);
- clk_disable_unprepare(qcadev->susclk);
+ hci_uart_unregister_device(&qcadev->serdev_hu);
}
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
+ { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a78b8e7085e9..113fc6edb2b0 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -282,6 +282,7 @@
#include <linux/blkdev.h>
#include <linux/times.h>
#include <linux/uaccess.h>
+#include <scsi/scsi_common.h>
#include <scsi/scsi_request.h>
/* used to tell the module to turn on full debugging messages */
@@ -345,10 +346,10 @@ static LIST_HEAD(cdrom_list);
int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
- if (cgc->sense) {
- cgc->sense->sense_key = 0x05;
- cgc->sense->asc = 0x20;
- cgc->sense->ascq = 0x00;
+ if (cgc->sshdr) {
+ cgc->sshdr->sense_key = 0x05;
+ cgc->sshdr->asc = 0x20;
+ cgc->sshdr->ascq = 0x00;
}
cgc->stat = -EIO;
@@ -2222,9 +2223,12 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
blk_execute_rq(q, cdi->disk, rq, 0);
if (scsi_req(rq)->result) {
- struct request_sense *s = req->sense;
+ struct scsi_sense_hdr sshdr;
+
ret = -EIO;
- cdi->last_sense = s->sense_key;
+ scsi_normalize_sense(req->sense, req->sense_len,
+ &sshdr);
+ cdi->last_sense = sshdr.sense_key;
}
if (blk_rq_unmap_user(bio))
@@ -2943,7 +2947,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
struct packet_command *cgc,
int cmd)
{
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
struct cdrom_msf msf;
int blocksize = 0, format = 0, lba;
int ret;
@@ -2971,13 +2975,13 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
if (cgc->buffer == NULL)
return -ENOMEM;
- memset(&sense, 0, sizeof(sense));
- cgc->sense = &sense;
+ memset(&sshdr, 0, sizeof(sshdr));
+ cgc->sshdr = &sshdr;
cgc->data_direction = CGC_DATA_READ;
ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
- if (ret && sense.sense_key == 0x05 &&
- sense.asc == 0x20 &&
- sense.ascq == 0x00) {
+ if (ret && sshdr.sense_key == 0x05 &&
+ sshdr.asc == 0x20 &&
+ sshdr.ascq == 0x00) {
/*
* SCSI-II devices are not required to support
* READ_CD, so let's try switching block size
@@ -2986,7 +2990,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
ret = cdrom_switch_blocksize(cdi, blocksize);
if (ret)
goto out;
- cgc->sense = NULL;
+ cgc->sshdr = NULL;
ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
ret |= cdrom_switch_blocksize(cdi, blocksize);
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 212f447938ae..ce277ee0a28a 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -554,3 +554,17 @@ config ADI
endmenu
+config RANDOM_TRUST_CPU
+ bool "Trust the CPU manufacturer to initialize Linux's CRNG"
+ depends on X86 || S390 || PPC
+ default n
+ help
+ Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
+ RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
+ for the purposes of initializing Linux's CRNG. Since this is not
+ something that can be independently audited, this amounts to trusting
+ that CPU manufacturer (perhaps with the insistence or mandate
+ of a Nation State's intelligence or law enforcement agencies)
+ has not installed a hidden back door to compromise the CPU's
+ random number generation facilities.
+
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c34b257d852d..dac895dc01b9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -307,19 +307,6 @@ config HW_RANDOM_HISI
If unsure, say Y.
-config HW_RANDOM_MSM
- tristate "Qualcomm SoCs Random Number Generator support"
- depends on HW_RANDOM && ARCH_QCOM
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on Qualcomm SoCs.
-
- To compile this driver as a module, choose M here. the
- module will be called msm-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 533e913c93d1..e35ec3ce3a20 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
-obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
deleted file mode 100644
index 841fee845ec9..000000000000
--- a/drivers/char/hw_random/msm-rng.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/hw_random.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-/* Device specific register offsets */
-#define PRNG_DATA_OUT 0x0000
-#define PRNG_STATUS 0x0004
-#define PRNG_LFSR_CFG 0x0100
-#define PRNG_CONFIG 0x0104
-
-/* Device specific register masks and config values */
-#define PRNG_LFSR_CFG_MASK 0x0000ffff
-#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
-#define PRNG_CONFIG_HW_ENABLE BIT(1)
-#define PRNG_STATUS_DATA_AVAIL BIT(0)
-
-#define MAX_HW_FIFO_DEPTH 16
-#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
-#define WORD_SZ 4
-
-struct msm_rng {
- void __iomem *base;
- struct clk *clk;
- struct hwrng hwrng;
-};
-
-#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
-
-static int msm_rng_enable(struct hwrng *hwrng, int enable)
-{
- struct msm_rng *rng = to_msm_rng(hwrng);
- u32 val;
- int ret;
-
- ret = clk_prepare_enable(rng->clk);
- if (ret)
- return ret;
-
- if (enable) {
- /* Enable PRNG only if it is not already enabled */
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- if (val & PRNG_CONFIG_HW_ENABLE)
- goto already_enabled;
-
- val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
- val &= ~PRNG_LFSR_CFG_MASK;
- val |= PRNG_LFSR_CFG_CLOCKS;
- writel(val, rng->base + PRNG_LFSR_CFG);
-
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- val |= PRNG_CONFIG_HW_ENABLE;
- writel(val, rng->base + PRNG_CONFIG);
- } else {
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- val &= ~PRNG_CONFIG_HW_ENABLE;
- writel(val, rng->base + PRNG_CONFIG);
- }
-
-already_enabled:
- clk_disable_unprepare(rng->clk);
- return 0;
-}
-
-static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
-{
- struct msm_rng *rng = to_msm_rng(hwrng);
- size_t currsize = 0;
- u32 *retdata = data;
- size_t maxsize;
- int ret;
- u32 val;
-
- /* calculate max size bytes to transfer back to caller */
- maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
-
- ret = clk_prepare_enable(rng->clk);
- if (ret)
- return ret;
-
- /* read random data from hardware */
- do {
- val = readl_relaxed(rng->base + PRNG_STATUS);
- if (!(val & PRNG_STATUS_DATA_AVAIL))
- break;
-
- val = readl_relaxed(rng->base + PRNG_DATA_OUT);
- if (!val)
- break;
-
- *retdata++ = val;
- currsize += WORD_SZ;
-
- /* make sure we stay on 32bit boundary */
- if ((maxsize - currsize) < WORD_SZ)
- break;
- } while (currsize < maxsize);
-
- clk_disable_unprepare(rng->clk);
-
- return currsize;
-}
-
-static int msm_rng_init(struct hwrng *hwrng)
-{
- return msm_rng_enable(hwrng, 1);
-}
-
-static void msm_rng_cleanup(struct hwrng *hwrng)
-{
- msm_rng_enable(hwrng, 0);
-}
-
-static int msm_rng_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct msm_rng *rng;
- int ret;
-
- rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
- if (!rng)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, rng);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(rng->base))
- return PTR_ERR(rng->base);
-
- rng->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(rng->clk))
- return PTR_ERR(rng->clk);
-
- rng->hwrng.name = KBUILD_MODNAME,
- rng->hwrng.init = msm_rng_init,
- rng->hwrng.cleanup = msm_rng_cleanup,
- rng->hwrng.read = msm_rng_read,
-
- ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register hwrng\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct of_device_id msm_rng_of_match[] = {
- { .compatible = "qcom,prng", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msm_rng_of_match);
-
-static struct platform_driver msm_rng_driver = {
- .probe = msm_rng_probe,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = of_match_ptr(msm_rng_of_match),
- }
-};
-module_platform_driver(msm_rng_driver);
-
-MODULE_ALIAS("platform:" KBUILD_MODNAME);
-MODULE_AUTHOR("The Linux Foundation");
-MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index bd449ad52442..bf5f99fc36f1 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -782,6 +782,7 @@ static void invalidate_batched_entropy(void);
static void crng_initialize(struct crng_state *crng)
{
int i;
+ int arch_init = 1;
unsigned long rv;
memcpy(&crng->state[0], "expand 32-byte k", 16);
@@ -792,10 +793,18 @@ static void crng_initialize(struct crng_state *crng)
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
+ !arch_get_random_long(&rv)) {
rv = random_get_entropy();
+ arch_init = 0;
+ }
crng->state[i] ^= rv;
}
+#ifdef CONFIG_RANDOM_TRUST_CPU
+ if (arch_init) {
+ crng_init = 2;
+ pr_notice("random: crng done (trusting CPU's manufacturer)\n");
+ }
+#endif
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
@@ -1122,8 +1131,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
} sample;
long delta, delta2, delta3;
- preempt_disable();
-
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
@@ -1161,8 +1168,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* and limit entropy entimate to 12 bits.
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
-
- preempt_enable();
}
void add_input_randomness(unsigned int type, unsigned int code,
@@ -1659,6 +1664,21 @@ int wait_for_random_bytes(void)
EXPORT_SYMBOL(wait_for_random_bytes);
/*
+ * Returns whether or not the urandom pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
+ * ,u64,int,long} family of functions.
+ *
+ * Returns: true if the urandom pool has been seeded.
+ * false if the urandom pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
+
+/*
* Add a callback function that will be invoked when the nonblocking
* pool is initialised.
*
@@ -1725,30 +1745,31 @@ EXPORT_SYMBOL(del_random_ready_callback);
* key known by the NSA). So it's useful if we need the speed, but
* only if we're willing to trust the hardware manufacturer not to
* have put in a back door.
+ *
+ * Return number of bytes filled in.
*/
-void get_random_bytes_arch(void *buf, int nbytes)
+int __must_check get_random_bytes_arch(void *buf, int nbytes)
{
+ int left = nbytes;
char *p = buf;
- trace_get_random_bytes_arch(nbytes, _RET_IP_);
- while (nbytes) {
+ trace_get_random_bytes_arch(left, _RET_IP_);
+ while (left) {
unsigned long v;
- int chunk = min(nbytes, (int)sizeof(unsigned long));
+ int chunk = min_t(int, left, sizeof(unsigned long));
if (!arch_get_random_long(&v))
break;
-
+
memcpy(p, &v, chunk);
p += chunk;
- nbytes -= chunk;
+ left -= chunk;
}
- if (nbytes)
- get_random_bytes(p, nbytes);
+ return nbytes - left;
}
EXPORT_SYMBOL(get_random_bytes_arch);
-
/*
* init_std_data - initialize pool with system data
*
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 0a62c19937b6..46caadca916a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -81,42 +81,66 @@ void tpm_put_ops(struct tpm_chip *chip)
EXPORT_SYMBOL_GPL(tpm_put_ops);
/**
- * tpm_chip_find_get() - find and reserve a TPM chip
+ * tpm_default_chip() - find a TPM chip and get a reference to it
+ */
+struct tpm_chip *tpm_default_chip(void)
+{
+ struct tpm_chip *chip, *res = NULL;
+ int chip_num = 0;
+ int chip_prev;
+
+ mutex_lock(&idr_lock);
+
+ do {
+ chip_prev = chip_num;
+ chip = idr_get_next(&dev_nums_idr, &chip_num);
+ if (chip) {
+ get_device(&chip->dev);
+ res = chip;
+ break;
+ }
+ } while (chip_prev != chip_num);
+
+ mutex_unlock(&idr_lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(tpm_default_chip);
+
+/**
+ * tpm_find_get_ops() - find and reserve a TPM chip
* @chip: a &struct tpm_chip instance, %NULL for the default chip
*
* Finds a TPM chip and reserves its class device and operations. The chip must
- * be released with tpm_chip_put_ops() after use.
+ * be released with tpm_put_ops() after use.
+ * This function is for internal use only. It supports existing TPM callers
+ * by accepting NULL, but those callers should be converted to pass in a chip
+ * directly.
*
* Return:
* A reserved &struct tpm_chip instance.
* %NULL if a chip is not found.
* %NULL if the chip is not available.
*/
-struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip)
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
{
- struct tpm_chip *res = NULL;
- int chip_num = 0;
- int chip_prev;
-
- mutex_lock(&idr_lock);
+ int rc;
- if (!chip) {
- do {
- chip_prev = chip_num;
- chip = idr_get_next(&dev_nums_idr, &chip_num);
- if (chip && !tpm_try_get_ops(chip)) {
- res = chip;
- break;
- }
- } while (chip_prev != chip_num);
- } else {
+ if (chip) {
if (!tpm_try_get_ops(chip))
- res = chip;
+ return chip;
+ return NULL;
}
- mutex_unlock(&idr_lock);
-
- return res;
+ chip = tpm_default_chip();
+ if (!chip)
+ return NULL;
+ rc = tpm_try_get_ops(chip);
+ /* release additional reference we got from tpm_default_chip() */
+ put_device(&chip->dev);
+ if (rc)
+ return NULL;
+ return chip;
}
/**
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index e32f6e85dc6d..1a803b0cf980 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -29,7 +29,6 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
-#include <linux/pm_runtime.h>
#include <linux/tpm_eventlog.h>
#include "tpm.h"
@@ -369,10 +368,13 @@ err_len:
return -EINVAL;
}
-static int tpm_request_locality(struct tpm_chip *chip)
+static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
{
int rc;
+ if (flags & TPM_TRANSMIT_NESTED)
+ return 0;
+
if (!chip->ops->request_locality)
return 0;
@@ -385,10 +387,13 @@ static int tpm_request_locality(struct tpm_chip *chip)
return 0;
}
-static void tpm_relinquish_locality(struct tpm_chip *chip)
+static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
{
int rc;
+ if (flags & TPM_TRANSMIT_NESTED)
+ return;
+
if (!chip->ops->relinquish_locality)
return;
@@ -399,6 +404,28 @@ static void tpm_relinquish_locality(struct tpm_chip *chip)
chip->locality = -1;
}
+static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
+{
+ if (flags & TPM_TRANSMIT_NESTED)
+ return 0;
+
+ if (!chip->ops->cmd_ready)
+ return 0;
+
+ return chip->ops->cmd_ready(chip);
+}
+
+static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
+{
+ if (flags & TPM_TRANSMIT_NESTED)
+ return 0;
+
+ if (!chip->ops->go_idle)
+ return 0;
+
+ return chip->ops->go_idle(chip);
+}
+
static ssize_t tpm_try_transmit(struct tpm_chip *chip,
struct tpm_space *space,
u8 *buf, size_t bufsiz,
@@ -423,7 +450,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
TSS2_RESMGR_TPM_RC_LAYER);
- return bufsiz;
+ return sizeof(*header);
}
if (bufsiz > TPM_BUFSIZE)
@@ -439,24 +466,24 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
return -E2BIG;
}
- if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
mutex_lock(&chip->tpm_mutex);
-
if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, true);
/* Store the decision as chip->locality will be changed. */
need_locality = chip->locality == -1;
- if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
- rc = tpm_request_locality(chip);
+ if (need_locality) {
+ rc = tpm_request_locality(chip, flags);
if (rc < 0)
goto out_no_locality;
}
- if (chip->dev.parent)
- pm_runtime_get_sync(chip->dev.parent);
+ rc = tpm_cmd_ready(chip, flags);
+ if (rc)
+ goto out;
rc = tpm2_prepare_space(chip, space, ordinal, buf);
if (rc)
@@ -516,19 +543,22 @@ out_recv:
}
rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
+ if (rc)
+ dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
out:
- if (chip->dev.parent)
- pm_runtime_put_sync(chip->dev.parent);
+ rc = tpm_go_idle(chip, flags);
+ if (rc)
+ goto out;
if (need_locality)
- tpm_relinquish_locality(chip);
+ tpm_relinquish_locality(chip, flags);
out_no_locality:
if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, false);
- if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED))
mutex_unlock(&chip->tpm_mutex);
return rc ? rc : len;
}
@@ -930,7 +960,7 @@ int tpm_is_tpm2(struct tpm_chip *chip)
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -954,7 +984,7 @@ int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
@@ -1013,7 +1043,7 @@ int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
u32 count = 0;
int i;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -1142,7 +1172,7 @@ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -1262,7 +1292,7 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
@@ -1324,7 +1354,7 @@ int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
@@ -1352,7 +1382,7 @@ int tpm_unseal_trusted(struct tpm_chip *chip,
{
int rc;
- chip = tpm_chip_find_get(chip);
+ chip = tpm_find_get_ops(chip);
if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4426649e431c..f3501d05264f 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -424,23 +424,24 @@ struct tpm_buf {
u8 *data;
};
-static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
{
struct tpm_input_header *head;
+ head = (struct tpm_input_header *)buf->data;
+ head->tag = cpu_to_be16(tag);
+ head->length = cpu_to_be32(sizeof(*head));
+ head->ordinal = cpu_to_be32(ordinal);
+}
+static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
buf->data_page = alloc_page(GFP_HIGHUSER);
if (!buf->data_page)
return -ENOMEM;
buf->flags = 0;
buf->data = kmap(buf->data_page);
-
- head = (struct tpm_input_header *) buf->data;
-
- head->tag = cpu_to_be16(tag);
- head->length = cpu_to_be32(sizeof(*head));
- head->ordinal = cpu_to_be32(ordinal);
-
+ tpm_buf_reset(buf, tag, ordinal);
return 0;
}
@@ -511,9 +512,17 @@ extern const struct file_operations tpm_fops;
extern const struct file_operations tpmrm_fops;
extern struct idr dev_nums_idr;
+/**
+ * enum tpm_transmit_flags - flags for tpm_transmit()
+ *
+ * @TPM_TRANSMIT_UNLOCKED: do not lock the chip
+ * @TPM_TRANSMIT_NESTED: discard setup steps (power management,
+ * locality) including locking (i.e. implicit
+ * UNLOCKED)
+ */
enum tpm_transmit_flags {
TPM_TRANSMIT_UNLOCKED = BIT(0),
- TPM_TRANSMIT_RAW = BIT(1),
+ TPM_TRANSMIT_NESTED = BIT(1),
};
ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
@@ -538,7 +547,7 @@ static inline void tpm_msleep(unsigned int delay_msec)
delay_msec * 1000);
};
-struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip);
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
@@ -569,7 +578,7 @@ static inline u32 tpm2_rc_value(u32 rc)
int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
struct tpm2_digest *digests);
-int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max);
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
unsigned int flags);
int tpm2_seal_trusted(struct tpm_chip *chip,
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index d31b09099216..c31b490bd41d 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -27,46 +27,6 @@ enum tpm2_session_attributes {
TPM2_SA_CONTINUE_SESSION = BIT(0),
};
-struct tpm2_startup_in {
- __be16 startup_type;
-} __packed;
-
-struct tpm2_get_tpm_pt_in {
- __be32 cap_id;
- __be32 property_id;
- __be32 property_cnt;
-} __packed;
-
-struct tpm2_get_tpm_pt_out {
- u8 more_data;
- __be32 subcap_id;
- __be32 property_cnt;
- __be32 property_id;
- __be32 value;
-} __packed;
-
-struct tpm2_get_random_in {
- __be16 size;
-} __packed;
-
-struct tpm2_get_random_out {
- __be16 size;
- u8 buffer[TPM_MAX_RNG_DATA];
-} __packed;
-
-union tpm2_cmd_params {
- struct tpm2_startup_in startup_in;
- struct tpm2_get_tpm_pt_in get_tpm_pt_in;
- struct tpm2_get_tpm_pt_out get_tpm_pt_out;
- struct tpm2_get_random_in getrandom_in;
- struct tpm2_get_random_out getrandom_out;
-};
-
-struct tpm2_cmd {
- tpm_cmd_header header;
- union tpm2_cmd_params params;
-} __packed;
-
struct tpm2_hash {
unsigned int crypto_id;
unsigned int tpm_id;
@@ -321,82 +281,72 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 count,
}
-#define TPM2_GETRANDOM_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_get_random_in))
-
-static const struct tpm_input_header tpm2_getrandom_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_GETRANDOM_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_GET_RANDOM)
-};
+struct tpm2_get_random_out {
+ __be16 size;
+ u8 buffer[TPM_MAX_RNG_DATA];
+} __packed;
/**
* tpm2_get_random() - get random bytes from the TPM RNG
*
- * @chip: TPM chip to use
- * @out: destination buffer for the random bytes
- * @max: the max number of bytes to write to @out
+ * @chip: a &tpm_chip instance
+ * @dest: destination buffer
+ * @max: the max number of random bytes to pull
*
* Return:
- * Size of the output buffer, or -EIO on error.
+ * size of the buffer on success,
+ * -errno otherwise
*/
-int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max)
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
- struct tpm2_cmd cmd;
- u32 recd, rlength;
- u32 num_bytes;
+ struct tpm2_get_random_out *out;
+ struct tpm_buf buf;
+ u32 recd;
+ u32 num_bytes = max;
int err;
int total = 0;
int retries = 5;
- u8 *dest = out;
+ u8 *dest_ptr = dest;
- num_bytes = min_t(u32, max, sizeof(cmd.params.getrandom_out.buffer));
-
- if (!out || !num_bytes ||
- max > sizeof(cmd.params.getrandom_out.buffer))
+ if (!num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- do {
- cmd.header.in = tpm2_getrandom_header;
- cmd.params.getrandom_in.size = cpu_to_be16(num_bytes);
+ err = tpm_buf_init(&buf, 0, 0);
+ if (err)
+ return err;
- err = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd),
+ do {
+ tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM);
+ tpm_buf_append_u16(&buf, num_bytes);
+ err = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
offsetof(struct tpm2_get_random_out,
buffer),
0, "attempting get random");
if (err)
- break;
+ goto out;
- recd = min_t(u32, be16_to_cpu(cmd.params.getrandom_out.size),
- num_bytes);
- rlength = be32_to_cpu(cmd.header.out.length);
- if (rlength < offsetof(struct tpm2_get_random_out, buffer) +
- recd)
- return -EFAULT;
- memcpy(dest, cmd.params.getrandom_out.buffer, recd);
+ out = (struct tpm2_get_random_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
+ if (tpm_buf_length(&buf) <
+ offsetof(struct tpm2_get_random_out, buffer) + recd) {
+ err = -EFAULT;
+ goto out;
+ }
+ memcpy(dest_ptr, out->buffer, recd);
- dest += recd;
+ dest_ptr += recd;
total += recd;
num_bytes -= recd;
} while (retries-- && total < max);
+ tpm_buf_destroy(&buf);
return total ? total : -EIO;
+out:
+ tpm_buf_destroy(&buf);
+ return err;
}
-#define TPM2_GET_TPM_PT_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_get_tpm_pt_in))
-
-#define TPM2_GET_TPM_PT_OUT_BODY_SIZE \
- sizeof(struct tpm2_get_tpm_pt_out)
-
-static const struct tpm_input_header tpm2_get_tpm_pt_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_GET_TPM_PT_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_GET_CAPABILITY)
-};
-
/**
* tpm2_flush_context_cmd() - execute a TPM2_FlushContext command
* @chip: TPM chip to use
@@ -471,7 +421,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
{
unsigned int blob_len;
struct tpm_buf buf;
- u32 hash, rlength;
+ u32 hash;
int i;
int rc;
@@ -546,8 +496,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
rc = -E2BIG;
goto out;
}
- rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)->header.out.length);
- if (rlength < TPM_HEADER_SIZE + 4 + blob_len) {
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
rc = -EFAULT;
goto out;
}
@@ -657,7 +606,6 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
u16 data_len;
u8 *data;
int rc;
- u32 rlength;
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
if (rc)
@@ -685,9 +633,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
goto out;
}
- rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
- ->header.out.length);
- if (rlength < TPM_HEADER_SIZE + 6 + data_len) {
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
rc = -EFAULT;
goto out;
}
@@ -733,69 +679,71 @@ out:
return rc;
}
+struct tpm2_get_cap_out {
+ u8 more_data;
+ __be32 subcap_id;
+ __be32 property_cnt;
+ __be32 property_id;
+ __be32 value;
+} __packed;
+
/**
* tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property
- * @chip: TPM chip to use.
+ * @chip: a &tpm_chip instance
* @property_id: property ID.
* @value: output variable.
* @desc: passed to tpm_transmit_cmd()
*
- * Return: Same as with tpm_transmit_cmd.
+ * Return:
+ * 0 on success,
+ * -errno or a TPM return code otherwise
*/
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
const char *desc)
{
- struct tpm2_cmd cmd;
+ struct tpm2_get_cap_out *out;
+ struct tpm_buf buf;
int rc;
- cmd.header.in = tpm2_get_tpm_pt_header;
- cmd.params.get_tpm_pt_in.cap_id = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES);
- cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(property_id);
- cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd),
- TPM2_GET_TPM_PT_OUT_BODY_SIZE, 0, desc);
- if (!rc)
- *value = be32_to_cpu(cmd.params.get_tpm_pt_out.value);
-
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, property_id);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+ if (!rc) {
+ out = (struct tpm2_get_cap_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ *value = be32_to_cpu(out->value);
+ }
+ tpm_buf_destroy(&buf);
return rc;
}
EXPORT_SYMBOL_GPL(tpm2_get_tpm_pt);
-#define TPM2_SHUTDOWN_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_startup_in))
-
-static const struct tpm_input_header tpm2_shutdown_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_SHUTDOWN_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_SHUTDOWN)
-};
-
/**
- * tpm2_shutdown() - send shutdown command to the TPM chip
+ * tpm2_shutdown() - send a TPM shutdown command
+ *
+ * Sends a TPM shutdown command. The shutdown command is used in call
+ * sites where the system is going down. If it fails, there is not much
+ * that can be done except print an error message.
*
- * @chip: TPM chip to use.
- * @shutdown_type: shutdown type. The value is either
- * TPM_SU_CLEAR or TPM_SU_STATE.
+ * @chip: a &tpm_chip instance
+ * @shutdown_type: TPM_SU_CLEAR or TPM_SU_STATE.
*/
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
{
- struct tpm2_cmd cmd;
+ struct tpm_buf buf;
int rc;
- cmd.header.in = tpm2_shutdown_header;
- cmd.params.startup_in.startup_type = cpu_to_be16(shutdown_type);
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd), 0, 0,
- "stopping the TPM");
-
- /* In places where shutdown command is sent there's no much we can do
- * except print the error code on a system failure.
- */
- if (rc < 0 && rc != -EPIPE)
- dev_warn(&chip->dev, "transmit returned %d while stopping the TPM",
- rc);
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SHUTDOWN);
+ if (rc)
+ return;
+ tpm_buf_append_u16(&buf, shutdown_type);
+ tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+ "stopping the TPM");
+ tpm_buf_destroy(&buf);
}
/*
@@ -863,31 +811,37 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
}
/**
- * tpm2_probe() - probe TPM 2.0
- * @chip: TPM chip to use
+ * tpm2_probe() - probe for the TPM 2.0 protocol
+ * @chip: a &tpm_chip instance
*
- * Return: < 0 error and 0 on success.
+ * Send an idempotent TPM 2.0 command and see whether there is TPM2 chip in the
+ * other end based on the response tag. The flag TPM_CHIP_FLAG_TPM2 is set by
+ * this function if this is the case.
*
- * Send idempotent TPM 2.0 command and see whether TPM 2.0 chip replied based on
- * the reply tag.
+ * Return:
+ * 0 on success,
+ * -errno otherwise
*/
int tpm2_probe(struct tpm_chip *chip)
{
- struct tpm2_cmd cmd;
+ struct tpm_output_header *out;
+ struct tpm_buf buf;
int rc;
- cmd.header.in = tpm2_get_tpm_pt_header;
- cmd.params.get_tpm_pt_in.cap_id = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES);
- cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(0x100);
- cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd), 0, 0, NULL);
- if (rc < 0)
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
return rc;
-
- if (be16_to_cpu(cmd.header.out.tag) == TPM2_ST_NO_SESSIONS)
- chip->flags |= TPM_CHIP_FLAG_TPM2;
-
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, TPM_PT_TOTAL_COMMANDS);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL);
+ /* We ignore TPM return codes on purpose. */
+ if (rc >= 0) {
+ out = (struct tpm_output_header *)buf.data;
+ if (be16_to_cpu(out->tag) == TPM2_ST_NO_SESSIONS)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ }
+ tpm_buf_destroy(&buf);
return 0;
}
EXPORT_SYMBOL_GPL(tpm2_probe);
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 6122d3276f72..d2e101b32482 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -39,7 +39,7 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
if (space->session_tbl[i])
tpm2_flush_context_cmd(chip, space->session_tbl[i],
- TPM_TRANSMIT_UNLOCKED);
+ TPM_TRANSMIT_NESTED);
}
}
@@ -84,7 +84,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
tpm_buf_append(&tbuf, &buf[*offset], body_size);
rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
- TPM_TRANSMIT_UNLOCKED, NULL);
+ TPM_TRANSMIT_NESTED, NULL);
if (rc < 0) {
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
__func__, rc);
@@ -133,7 +133,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
tpm_buf_append_u32(&tbuf, handle);
rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
- TPM_TRANSMIT_UNLOCKED, NULL);
+ TPM_TRANSMIT_NESTED, NULL);
if (rc < 0) {
dev_warn(&chip->dev, "%s: failed with a system error %d\n",
__func__, rc);
@@ -170,7 +170,7 @@ static void tpm2_flush_space(struct tpm_chip *chip)
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
if (space->context_tbl[i] && ~space->context_tbl[i])
tpm2_flush_context_cmd(chip, space->context_tbl[i],
- TPM_TRANSMIT_UNLOCKED);
+ TPM_TRANSMIT_NESTED);
tpm2_flush_sessions(chip, space);
}
@@ -377,7 +377,7 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
return 0;
out_no_slots:
- tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED);
+ tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_NESTED);
dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
phandle);
return -ENOMEM;
@@ -465,7 +465,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
return rc;
tpm2_flush_context_cmd(chip, space->context_tbl[i],
- TPM_TRANSMIT_UNLOCKED);
+ TPM_TRANSMIT_NESTED);
space->context_tbl[i] = ~0;
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 34fbc6cb097b..36952ef98f90 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -132,7 +132,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
}
/**
- * crb_go_idle - request tpm crb device to go the idle state
+ * __crb_go_idle - request tpm crb device to go the idle state
*
* @dev: crb device
* @priv: crb private data
@@ -147,7 +147,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
*
* Return: 0 always
*/
-static int crb_go_idle(struct device *dev, struct crb_priv *priv)
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
{
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
@@ -163,11 +163,20 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
dev_warn(dev, "goIdle timed out\n");
return -ETIME;
}
+
return 0;
}
+static int crb_go_idle(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_go_idle(dev, priv);
+}
+
/**
- * crb_cmd_ready - request tpm crb device to enter ready state
+ * __crb_cmd_ready - request tpm crb device to enter ready state
*
* @dev: crb device
* @priv: crb private data
@@ -181,7 +190,7 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
*
* Return: 0 on success -ETIME on timeout;
*/
-static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
{
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
@@ -200,6 +209,14 @@ static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
return 0;
}
+static int crb_cmd_ready(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_cmd_ready(dev, priv);
+}
+
static int __crb_request_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
@@ -401,6 +418,8 @@ static const struct tpm_class_ops tpm_crb = {
.send = crb_send,
.cancel = crb_cancel,
.req_canceled = crb_req_canceled,
+ .go_idle = crb_go_idle,
+ .cmd_ready = crb_cmd_ready,
.request_locality = crb_request_locality,
.relinquish_locality = crb_relinquish_locality,
.req_complete_mask = CRB_DRV_STS_COMPLETE,
@@ -520,7 +539,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
*/
- ret = crb_cmd_ready(dev, priv);
+ ret = __crb_cmd_ready(dev, priv);
if (ret)
goto out_relinquish_locality;
@@ -565,7 +584,7 @@ out:
if (!ret)
priv->cmd_size = cmd_size;
- crb_go_idle(dev, priv);
+ __crb_go_idle(dev, priv);
out_relinquish_locality:
@@ -628,32 +647,7 @@ static int crb_acpi_add(struct acpi_device *device)
chip->acpi_dev_handle = device->handle;
chip->flags = TPM_CHIP_FLAG_TPM2;
- rc = __crb_request_locality(dev, priv, 0);
- if (rc)
- return rc;
-
- rc = crb_cmd_ready(dev, priv);
- if (rc)
- goto out;
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- rc = tpm_chip_register(chip);
- if (rc) {
- crb_go_idle(dev, priv);
- pm_runtime_put_noidle(dev);
- pm_runtime_disable(dev);
- goto out;
- }
-
- pm_runtime_put_sync(dev);
-
-out:
- __crb_relinquish_locality(dev, priv, 0);
-
- return rc;
+ return tpm_chip_register(chip);
}
static int crb_acpi_remove(struct acpi_device *device)
@@ -663,52 +657,11 @@ static int crb_acpi_remove(struct acpi_device *device)
tpm_chip_unregister(chip);
- pm_runtime_disable(dev);
-
return 0;
}
-static int __maybe_unused crb_pm_runtime_suspend(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
-
- return crb_go_idle(dev, priv);
-}
-
-static int __maybe_unused crb_pm_runtime_resume(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct crb_priv *priv = dev_get_drvdata(&chip->dev);
-
- return crb_cmd_ready(dev, priv);
-}
-
-static int __maybe_unused crb_pm_suspend(struct device *dev)
-{
- int ret;
-
- ret = tpm_pm_suspend(dev);
- if (ret)
- return ret;
-
- return crb_pm_runtime_suspend(dev);
-}
-
-static int __maybe_unused crb_pm_resume(struct device *dev)
-{
- int ret;
-
- ret = crb_pm_runtime_resume(dev);
- if (ret)
- return ret;
-
- return tpm_pm_resume(dev);
-}
-
static const struct dev_pm_ops crb_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume)
- SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
};
static const struct acpi_device_id crb_device_ids[] = {
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 6116cd05e228..9086edc9066b 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
/* Lock the adapter for the duration of the whole sequence. */
if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
- i2c_lock_adapter(tpm_dev.client->adapter);
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
if (tpm_dev.chip_type == SLB9645) {
/* use a combined read for newer chips
@@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
}
out:
- i2c_unlock_adapter(tpm_dev.client->adapter);
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
@@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
- i2c_lock_adapter(tpm_dev.client->adapter);
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* prepend the 'register address' to the buffer */
tpm_dev.buf[0] = addr;
@@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
usleep_range(sleep_low, sleep_hi);
}
- i2c_unlock_adapter(tpm_dev.client->adapter);
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 8b46aaa9e049..d2345d9fd7b5 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -875,6 +875,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
chip->acpi_dev_handle = acpi_dev_handle;
#endif
+ chip->hwrng.quality = priv->rng_quality;
+
/* Maximum timeouts */
chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX);
chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index f6e1dbe212a7..f48125f1e6e0 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -99,6 +99,7 @@ struct tpm_tis_data {
wait_queue_head_t int_queue;
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
+ unsigned short rng_quality;
};
struct tpm_tis_phy_ops {
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 424ff2fde1f2..9914f6973463 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -199,6 +199,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
static int tpm_tis_spi_probe(struct spi_device *dev)
{
struct tpm_tis_spi_phy *phy;
+ int irq;
phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
GFP_KERNEL);
@@ -211,7 +212,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
if (!phy->iobuf)
return -ENOMEM;
- return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
+ /* If the SPI device has an IRQ then use that */
+ if (dev->irq > 0)
+ irq = dev->irq;
+ else
+ irq = -1;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
NULL);
}
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index e4f79f920450..87a0ce47f201 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -418,7 +418,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
proxy_dev->state |= STATE_DRIVER_COMMAND;
rc = tpm_transmit_cmd(chip, NULL, buf.data, tpm_buf_length(&buf), 0,
- TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW,
+ TPM_TRANSMIT_NESTED,
"attempting to set locality");
proxy_dev->state &= ~STATE_DRIVER_COMMAND;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 721572a8c429..292056bbb30e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -45,6 +45,12 @@ config COMMON_CLK_MAX77686
This driver supports Maxim 77620/77686/77802 crystal oscillator
clock.
+config COMMON_CLK_MAX9485
+ tristate "Maxim 9485 Programmable Clock Generator"
+ depends on I2C
+ help
+ This driver supports Maxim 9485 Programmable Audio Clock Generator
+
config COMMON_CLK_RK808
tristate "Clock driver for RK805/RK808/RK818"
depends on MFD_RK808
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 0bb25dd009d1..a84c5573cabe 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
+obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig
index 8854adb37847..dc38c85a4833 100644
--- a/drivers/clk/actions/Kconfig
+++ b/drivers/clk/actions/Kconfig
@@ -1,14 +1,21 @@
config CLK_ACTIONS
bool "Clock driver for Actions Semi SoCs"
depends on ARCH_ACTIONS || COMPILE_TEST
+ select REGMAP_MMIO
default ARCH_ACTIONS
if CLK_ACTIONS
# SoC Drivers
+config CLK_OWL_S700
+ bool "Support for the Actions Semi OWL S700 clocks"
+ depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
+ default ARM64 && ARCH_ACTIONS
+
config CLK_OWL_S900
bool "Support for the Actions Semi OWL S900 clocks"
depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST
default ARM64 && ARCH_ACTIONS
+
endif
diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile
index 76e431434d10..78c17d56f991 100644
--- a/drivers/clk/actions/Makefile
+++ b/drivers/clk/actions/Makefile
@@ -9,4 +9,5 @@ clk-owl-y += owl-composite.o
clk-owl-y += owl-pll.o
# SoC support
+obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o
obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o
diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c
new file mode 100644
index 000000000000..5e9531392ee5
--- /dev/null
+++ b/drivers/clk/actions/owl-s700.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Actions Semi S700 clock driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Author: Pathiban Nallathambi <pn@denx.de>
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "owl-common.h"
+#include "owl-composite.h"
+#include "owl-divider.h"
+#include "owl-factor.h"
+#include "owl-fixed-factor.h"
+#include "owl-gate.h"
+#include "owl-mux.h"
+#include "owl-pll.h"
+
+#include <dt-bindings/clock/actions,s700-cmu.h>
+
+#define CMU_COREPLL (0x0000)
+#define CMU_DEVPLL (0x0004)
+#define CMU_DDRPLL (0x0008)
+#define CMU_NANDPLL (0x000C)
+#define CMU_DISPLAYPLL (0x0010)
+#define CMU_AUDIOPLL (0x0014)
+#define CMU_TVOUTPLL (0x0018)
+#define CMU_BUSCLK (0x001C)
+#define CMU_SENSORCLK (0x0020)
+#define CMU_LCDCLK (0x0024)
+#define CMU_DSIPLLCLK (0x0028)
+#define CMU_CSICLK (0x002C)
+#define CMU_DECLK (0x0030)
+#define CMU_SICLK (0x0034)
+#define CMU_BUSCLK1 (0x0038)
+#define CMU_HDECLK (0x003C)
+#define CMU_VDECLK (0x0040)
+#define CMU_VCECLK (0x0044)
+#define CMU_NANDCCLK (0x004C)
+#define CMU_SD0CLK (0x0050)
+#define CMU_SD1CLK (0x0054)
+#define CMU_SD2CLK (0x0058)
+#define CMU_UART0CLK (0x005C)
+#define CMU_UART1CLK (0x0060)
+#define CMU_UART2CLK (0x0064)
+#define CMU_UART3CLK (0x0068)
+#define CMU_UART4CLK (0x006C)
+#define CMU_UART5CLK (0x0070)
+#define CMU_UART6CLK (0x0074)
+#define CMU_PWM0CLK (0x0078)
+#define CMU_PWM1CLK (0x007C)
+#define CMU_PWM2CLK (0x0080)
+#define CMU_PWM3CLK (0x0084)
+#define CMU_PWM4CLK (0x0088)
+#define CMU_PWM5CLK (0x008C)
+#define CMU_GPU3DCLK (0x0090)
+#define CMU_CORECTL (0x009C)
+#define CMU_DEVCLKEN0 (0x00A0)
+#define CMU_DEVCLKEN1 (0x00A4)
+#define CMU_DEVRST0 (0x00A8)
+#define CMU_DEVRST1 (0x00AC)
+#define CMU_USBPLL (0x00B0)
+#define CMU_ETHERNETPLL (0x00B4)
+#define CMU_CVBSPLL (0x00B8)
+#define CMU_SSTSCLK (0x00C0)
+
+static struct clk_pll_table clk_audio_pll_table[] = {
+ {0, 45158400}, {1, 49152000},
+ {0, 0},
+};
+
+static struct clk_pll_table clk_cvbs_pll_table[] = {
+ {27, 29 * 12000000}, {28, 30 * 12000000}, {29, 31 * 12000000},
+ {30, 32 * 12000000}, {31, 33 * 12000000}, {32, 34 * 12000000},
+ {33, 35 * 12000000}, {34, 36 * 12000000}, {35, 37 * 12000000},
+ {36, 38 * 12000000}, {37, 39 * 12000000}, {38, 40 * 12000000},
+ {39, 41 * 12000000}, {40, 42 * 12000000}, {41, 43 * 12000000},
+ {42, 44 * 12000000}, {43, 45 * 12000000}, {0, 0},
+};
+
+/* pll clocks */
+static OWL_PLL_NO_PARENT(clk_core_pll, "core_pll", CMU_COREPLL, 12000000, 9, 0, 8, 4, 174, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_dev_pll, "dev_pll", CMU_DEVPLL, 6000000, 8, 0, 8, 8, 126, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_ddr_pll, "ddr_pll", CMU_DDRPLL, 6000000, 8, 0, 8, 2, 180, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_nand_pll, "nand_pll", CMU_NANDPLL, 6000000, 8, 0, 8, 2, 86, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_display_pll, "display_pll", CMU_DISPLAYPLL, 6000000, 8, 0, 8, 2, 140, NULL, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_cvbs_pll, "cvbs_pll", CMU_CVBSPLL, 0, 8, 0, 8, 27, 43, clk_cvbs_pll_table, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_audio_pll, "audio_pll", CMU_AUDIOPLL, 0, 4, 0, 1, 0, 0, clk_audio_pll_table, CLK_IGNORE_UNUSED);
+static OWL_PLL_NO_PARENT(clk_ethernet_pll, "ethernet_pll", CMU_ETHERNETPLL, 500000000, 0, 0, 0, 0, 0, NULL, CLK_IGNORE_UNUSED);
+
+static const char *cpu_clk_mux_p[] = {"losc", "hosc", "core_pll", "noc1_clk_div"};
+static const char *dev_clk_p[] = { "hosc", "dev_pll"};
+static const char *noc_clk_mux_p[] = { "dev_clk", "display_pll", "nand_pll", "ddr_pll", "cvbs_pll"};
+
+static const char *csi_clk_mux_p[] = { "display_pll", "dev_clk"};
+static const char *de_clk_mux_p[] = { "display_pll", "dev_clk"};
+static const char *hde_clk_mux_p[] = { "dev_clk", "display_pll", "nand_pll", "ddr_pll"};
+static const char *nand_clk_mux_p[] = { "nand_pll", "display_pll", "dev_clk", "ddr_pll"};
+static const char *sd_clk_mux_p[] = { "dev_clk", "nand_pll", };
+static const char *uart_clk_mux_p[] = { "hosc", "dev_pll"};
+static const char *pwm_clk_mux_p[] = { "losc", "hosc"};
+static const char *gpu_clk_mux_p[] = { "dev_clk", "display_pll", "nand_pll", "ddr_clk", "cvbs_pll"};
+static const char *lcd_clk_mux_p[] = { "display_pll", "dev_clk" };
+static const char *i2s_clk_mux_p[] = { "audio_pll" };
+static const char *sensor_clk_mux_p[] = { "hosc", "si"};
+
+/* mux clocks */
+static OWL_MUX(clk_cpu, "cpu_clk", cpu_clk_mux_p, CMU_BUSCLK, 0, 2, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_dev, "dev_clk", dev_clk_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_noc0_clk_mux, "noc0_clk_mux", noc_clk_mux_p, CMU_BUSCLK, 4, 3, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_noc1_clk_mux, "noc1_clk_mux", noc_clk_mux_p, CMU_BUSCLK1, 4, 3, CLK_SET_RATE_PARENT);
+static OWL_MUX(clk_hp_clk_mux, "hp_clk_mux", noc_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
+
+static struct clk_factor_table sd_factor_table[] = {
+ /* bit0 ~ 4 */
+ {0, 1, 1}, {1, 1, 2}, {2, 1, 3}, {3, 1, 4},
+ {4, 1, 5}, {5, 1, 6}, {6, 1, 7}, {7, 1, 8},
+ {8, 1, 9}, {9, 1, 10}, {10, 1, 11}, {11, 1, 12},
+ {12, 1, 13}, {13, 1, 14}, {14, 1, 15}, {15, 1, 16},
+ {16, 1, 17}, {17, 1, 18}, {18, 1, 19}, {19, 1, 20},
+ {20, 1, 21}, {21, 1, 22}, {22, 1, 23}, {23, 1, 24},
+ {24, 1, 25}, {25, 1, 26},
+
+ /* bit8: /128 */
+ {256, 1, 1 * 128}, {257, 1, 2 * 128}, {258, 1, 3 * 128}, {259, 1, 4 * 128},
+ {260, 1, 5 * 128}, {261, 1, 6 * 128}, {262, 1, 7 * 128}, {263, 1, 8 * 128},
+ {264, 1, 9 * 128}, {265, 1, 10 * 128}, {266, 1, 11 * 128}, {267, 1, 12 * 128},
+ {268, 1, 13 * 128}, {269, 1, 14 * 128}, {270, 1, 15 * 128}, {271, 1, 16 * 128},
+ {272, 1, 17 * 128}, {273, 1, 18 * 128}, {274, 1, 19 * 128}, {275, 1, 20 * 128},
+ {276, 1, 21 * 128}, {277, 1, 22 * 128}, {278, 1, 23 * 128}, {279, 1, 24 * 128},
+ {280, 1, 25 * 128}, {281, 1, 26 * 128},
+
+ {0, 0},
+};
+
+static struct clk_factor_table lcd_factor_table[] = {
+ /* bit0 ~ 3 */
+ {0, 1, 1}, {1, 1, 2}, {2, 1, 3}, {3, 1, 4},
+ {4, 1, 5}, {5, 1, 6}, {6, 1, 7}, {7, 1, 8},
+ {8, 1, 9}, {9, 1, 10}, {10, 1, 11}, {11, 1, 12},
+
+ /* bit8: /7 */
+ {256, 1, 1 * 7}, {257, 1, 2 * 7}, {258, 1, 3 * 7}, {259, 1, 4 * 7},
+ {260, 1, 5 * 7}, {261, 1, 6 * 7}, {262, 1, 7 * 7}, {263, 1, 8 * 7},
+ {264, 1, 9 * 7}, {265, 1, 10 * 7}, {266, 1, 11 * 7}, {267, 1, 12 * 7},
+ {0, 0},
+};
+
+static struct clk_div_table hdmia_div_table[] = {
+ {0, 1}, {1, 2}, {2, 3}, {3, 4},
+ {4, 6}, {5, 8}, {6, 12}, {7, 16},
+ {8, 24},
+ {0, 0},
+};
+
+static struct clk_div_table rmii_div_table[] = {
+ {0, 4}, {1, 10},
+};
+
+/* divider clocks */
+static OWL_DIVIDER(clk_noc0, "noc0_clk", "noc0_clk_mux", CMU_BUSCLK, 16, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_noc1, "noc1_clk", "noc1_clk_mux", CMU_BUSCLK1, 16, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_noc1_clk_div, "noc1_clk_div", "noc1_clk", CMU_BUSCLK1, 20, 1, NULL, 0, 0);
+static OWL_DIVIDER(clk_hp_clk_div, "hp_clk_div", "hp_clk_mux", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_ahb, "ahb_clk", "hp_clk_div", CMU_BUSCLK1, 2, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_apb, "apb_clk", "ahb_clk", CMU_BUSCLK1, 14, 2, NULL, 0, 0);
+static OWL_DIVIDER(clk_sensor0, "sensor0", "sensor_src", CMU_SENSORCLK, 0, 4, NULL, 0, 0);
+static OWL_DIVIDER(clk_sensor1, "sensor1", "sensor_src", CMU_SENSORCLK, 8, 4, NULL, 0, 0);
+static OWL_DIVIDER(clk_rmii_ref, "rmii_ref", "ethernet_pll", CMU_ETHERNETPLL, 2, 1, rmii_div_table, 0, 0);
+
+static struct clk_factor_table de_factor_table[] = {
+ {0, 1, 1}, {1, 2, 3}, {2, 1, 2}, {3, 2, 5},
+ {4, 1, 3}, {5, 1, 4}, {6, 1, 6}, {7, 1, 8},
+ {8, 1, 12}, {0, 0, 0},
+};
+
+static struct clk_factor_table hde_factor_table[] = {
+ {0, 1, 1}, {1, 2, 3}, {2, 1, 2}, {3, 2, 5},
+ {4, 1, 3}, {5, 1, 4}, {6, 1, 6}, {7, 1, 8},
+ {0, 0, 0},
+};
+
+/* gate clocks */
+static OWL_GATE(clk_gpio, "gpio", "apb_clk", CMU_DEVCLKEN1, 25, 0, 0);
+static OWL_GATE(clk_dmac, "dmac", "hp_clk_div", CMU_DEVCLKEN0, 17, 0, 0);
+static OWL_GATE(clk_timer, "timer", "hosc", CMU_DEVCLKEN1, 22, 0, 0);
+static OWL_GATE_NO_PARENT(clk_dsi, "dsi_clk", CMU_DEVCLKEN0, 2, 0, 0);
+static OWL_GATE_NO_PARENT(clk_tvout, "tvout_clk", CMU_DEVCLKEN0, 3, 0, 0);
+static OWL_GATE_NO_PARENT(clk_hdmi_dev, "hdmi_dev", CMU_DEVCLKEN0, 5, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_480mpll0, "usb3_480mpll0", CMU_USBPLL, 3, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_480mphy0, "usb3_480mphy0", CMU_USBPLL, 2, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_5gphy, "usb3_5gphy", CMU_USBPLL, 1, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb3_cce, "usb3_cce", CMU_DEVCLKEN0, 25, 0, 0);
+static OWL_GATE(clk_i2c0, "i2c0", "hosc", CMU_DEVCLKEN1, 0, 0, 0);
+static OWL_GATE(clk_i2c1, "i2c1", "hosc", CMU_DEVCLKEN1, 1, 0, 0);
+static OWL_GATE(clk_i2c2, "i2c2", "hosc", CMU_DEVCLKEN1, 2, 0, 0);
+static OWL_GATE(clk_i2c3, "i2c3", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
+static OWL_GATE(clk_spi0, "spi0", "ahb_clk", CMU_DEVCLKEN1, 4, 0, 0);
+static OWL_GATE(clk_spi1, "spi1", "ahb_clk", CMU_DEVCLKEN1, 5, 0, 0);
+static OWL_GATE(clk_spi2, "spi2", "ahb_clk", CMU_DEVCLKEN1, 6, 0, 0);
+static OWL_GATE(clk_spi3, "spi3", "ahb_clk", CMU_DEVCLKEN1, 7, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h0_pllen, "usbh0_pllen", CMU_USBPLL, 12, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h0_phy, "usbh0_phy", CMU_USBPLL, 10, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h0_cce, "usbh0_cce", CMU_DEVCLKEN0, 26, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h1_pllen, "usbh1_pllen", CMU_USBPLL, 13, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h1_phy, "usbh1_phy", CMU_USBPLL, 11, 0, 0);
+static OWL_GATE_NO_PARENT(clk_usb2h1_cce, "usbh1_cce", CMU_DEVCLKEN0, 27, 0, 0);
+static OWL_GATE_NO_PARENT(clk_irc_switch, "irc_switch", CMU_DEVCLKEN1, 15, 0, 0);
+
+/* composite clocks */
+
+static OWL_COMP_DIV(clk_csi, "csi", csi_clk_mux_p,
+ OWL_MUX_HW(CMU_CSICLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 13, 0),
+ OWL_DIVIDER_HW(CMU_CSICLK, 0, 4, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_si, "si", csi_clk_mux_p,
+ OWL_MUX_HW(CMU_SICLK, 4, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
+ OWL_DIVIDER_HW(CMU_SICLK, 0, 4, 0, NULL),
+ 0);
+
+static OWL_COMP_FACTOR(clk_de, "de", de_clk_mux_p,
+ OWL_MUX_HW(CMU_DECLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 0, 0),
+ OWL_FACTOR_HW(CMU_DECLK, 0, 3, 0, de_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_hde, "hde", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_HDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 9, 0),
+ OWL_FACTOR_HW(CMU_HDECLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_vde, "vde", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VDECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 10, 0),
+ OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_vce, "vce", hde_clk_mux_p,
+ OWL_MUX_HW(CMU_VCECLK, 4, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 11, 0),
+ OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_DIV(clk_nand, "nand", nand_clk_mux_p,
+ OWL_MUX_HW(CMU_NANDCCLK, 8, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 21, 0),
+ OWL_DIVIDER_HW(CMU_NANDCCLK, 0, 3, 0, NULL),
+ CLK_SET_RATE_PARENT);
+
+static OWL_COMP_FACTOR(clk_sd0, "sd0", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD0CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 22, 0),
+ OWL_FACTOR_HW(CMU_SD0CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_sd1, "sd1", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD1CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 23, 0),
+ OWL_FACTOR_HW(CMU_SD1CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_sd2, "sd2", sd_clk_mux_p,
+ OWL_MUX_HW(CMU_SD2CLK, 9, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 24, 0),
+ OWL_FACTOR_HW(CMU_SD2CLK, 0, 9, 0, sd_factor_table),
+ 0);
+
+static OWL_COMP_DIV(clk_uart0, "uart0", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART0CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
+ OWL_DIVIDER_HW(CMU_UART0CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart1, "uart1", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART1CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 9, 0),
+ OWL_DIVIDER_HW(CMU_UART1CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart2, "uart2", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART2CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 10, 0),
+ OWL_DIVIDER_HW(CMU_UART2CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart3, "uart3", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART3CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 11, 0),
+ OWL_DIVIDER_HW(CMU_UART3CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart4, "uart4", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART4CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 12, 0),
+ OWL_DIVIDER_HW(CMU_UART4CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart5, "uart5", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART5CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 13, 0),
+ OWL_DIVIDER_HW(CMU_UART5CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_uart6, "uart6", uart_clk_mux_p,
+ OWL_MUX_HW(CMU_UART6CLK, 16, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 14, 0),
+ OWL_DIVIDER_HW(CMU_UART6CLK, 0, 9, CLK_DIVIDER_ROUND_CLOSEST, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm0, "pwm0", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM0CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 16, 0),
+ OWL_DIVIDER_HW(CMU_PWM0CLK, 0, 10, 0, NULL),
+ CLK_IGNORE_UNUSED);
+
+static OWL_COMP_DIV(clk_pwm1, "pwm1", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM1CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 17, 0),
+ OWL_DIVIDER_HW(CMU_PWM1CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm2, "pwm2", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM2CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
+ OWL_DIVIDER_HW(CMU_PWM2CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm3, "pwm3", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM3CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
+ OWL_DIVIDER_HW(CMU_PWM3CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm4, "pwm4", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM4CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
+ OWL_DIVIDER_HW(CMU_PWM4CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_DIV(clk_pwm5, "pwm5", pwm_clk_mux_p,
+ OWL_MUX_HW(CMU_PWM5CLK, 12, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
+ OWL_DIVIDER_HW(CMU_PWM5CLK, 0, 10, 0, NULL),
+ 0);
+
+static OWL_COMP_FACTOR(clk_gpu3d, "gpu3d", gpu_clk_mux_p,
+ OWL_MUX_HW(CMU_GPU3DCLK, 4, 3),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 8, 0),
+ OWL_FACTOR_HW(CMU_GPU3DCLK, 0, 3, 0, hde_factor_table),
+ 0);
+
+static OWL_COMP_FACTOR(clk_lcd, "lcd", lcd_clk_mux_p,
+ OWL_MUX_HW(CMU_LCDCLK, 12, 2),
+ OWL_GATE_HW(CMU_DEVCLKEN0, 1, 0),
+ OWL_FACTOR_HW(CMU_LCDCLK, 0, 9, 0, lcd_factor_table),
+ 0);
+
+static OWL_COMP_DIV(clk_hdmi_audio, "hdmia", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1), /*CMU_AUDIOPLL 24,1 unused*/
+ OWL_GATE_HW(CMU_DEVCLKEN1, 28, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 24, 4, 0, hdmia_div_table),
+ 0);
+
+static OWL_COMP_DIV(clk_i2srx, "i2srx", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 27, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 20, 4, 0, hdmia_div_table),
+ 0);
+
+static OWL_COMP_DIV(clk_i2stx, "i2stx", i2s_clk_mux_p,
+ OWL_MUX_HW(CMU_AUDIOPLL, 24, 1),
+ OWL_GATE_HW(CMU_DEVCLKEN1, 26, 0),
+ OWL_DIVIDER_HW(CMU_AUDIOPLL, 16, 4, 0, hdmia_div_table),
+ 0);
+
+/* for bluetooth pcm communication */
+static OWL_COMP_FIXED_FACTOR(clk_pcm1, "pcm1", "audio_pll",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 31, 0),
+ 1, 2, 0);
+
+static OWL_COMP_DIV(clk_sensor_src, "sensor_src", sensor_clk_mux_p,
+ OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
+ {0},
+ OWL_DIVIDER_HW(CMU_SENSORCLK, 5, 2, 0, NULL),
+ 0);
+
+static OWL_COMP_FIXED_FACTOR(clk_ethernet, "ethernet", "ethernet_pll",
+ OWL_GATE_HW(CMU_DEVCLKEN1, 23, 0),
+ 1, 20, 0);
+
+static OWL_COMP_DIV_FIXED(clk_thermal_sensor, "thermal_sensor", "hosc",
+ OWL_GATE_HW(CMU_DEVCLKEN0, 31, 0),
+ OWL_DIVIDER_HW(CMU_SSTSCLK, 20, 10, 0, NULL),
+ 0);
+
+static struct owl_clk_common *s700_clks[] = {
+ &clk_core_pll.common,
+ &clk_dev_pll.common,
+ &clk_ddr_pll.common,
+ &clk_nand_pll.common,
+ &clk_display_pll.common,
+ &clk_cvbs_pll .common,
+ &clk_audio_pll.common,
+ &clk_ethernet_pll.common,
+ &clk_cpu.common,
+ &clk_dev.common,
+ &clk_ahb.common,
+ &clk_apb.common,
+ &clk_dmac.common,
+ &clk_noc0_clk_mux.common,
+ &clk_noc1_clk_mux.common,
+ &clk_hp_clk_mux.common,
+ &clk_hp_clk_div.common,
+ &clk_noc1_clk_div.common,
+ &clk_noc0.common,
+ &clk_noc1.common,
+ &clk_sensor_src.common,
+ &clk_gpio.common,
+ &clk_timer.common,
+ &clk_dsi.common,
+ &clk_csi.common,
+ &clk_si.common,
+ &clk_de.common,
+ &clk_hde.common,
+ &clk_vde.common,
+ &clk_vce.common,
+ &clk_nand.common,
+ &clk_sd0.common,
+ &clk_sd1.common,
+ &clk_sd2.common,
+ &clk_uart0.common,
+ &clk_uart1.common,
+ &clk_uart2.common,
+ &clk_uart3.common,
+ &clk_uart4.common,
+ &clk_uart5.common,
+ &clk_uart6.common,
+ &clk_pwm0.common,
+ &clk_pwm1.common,
+ &clk_pwm2.common,
+ &clk_pwm3.common,
+ &clk_pwm4.common,
+ &clk_pwm5.common,
+ &clk_gpu3d.common,
+ &clk_i2c0.common,
+ &clk_i2c1.common,
+ &clk_i2c2.common,
+ &clk_i2c3.common,
+ &clk_spi0.common,
+ &clk_spi1.common,
+ &clk_spi2.common,
+ &clk_spi3.common,
+ &clk_usb3_480mpll0.common,
+ &clk_usb3_480mphy0.common,
+ &clk_usb3_5gphy.common,
+ &clk_usb3_cce.common,
+ &clk_lcd.common,
+ &clk_hdmi_audio.common,
+ &clk_i2srx.common,
+ &clk_i2stx.common,
+ &clk_sensor0.common,
+ &clk_sensor1.common,
+ &clk_hdmi_dev.common,
+ &clk_ethernet.common,
+ &clk_rmii_ref.common,
+ &clk_usb2h0_pllen.common,
+ &clk_usb2h0_phy.common,
+ &clk_usb2h0_cce.common,
+ &clk_usb2h1_pllen.common,
+ &clk_usb2h1_phy.common,
+ &clk_usb2h1_cce.common,
+ &clk_tvout.common,
+ &clk_thermal_sensor.common,
+ &clk_irc_switch.common,
+ &clk_pcm1.common,
+};
+
+static struct clk_hw_onecell_data s700_hw_clks = {
+ .hws = {
+ [CLK_CORE_PLL] = &clk_core_pll.common.hw,
+ [CLK_DEV_PLL] = &clk_dev_pll.common.hw,
+ [CLK_DDR_PLL] = &clk_ddr_pll.common.hw,
+ [CLK_NAND_PLL] = &clk_nand_pll.common.hw,
+ [CLK_DISPLAY_PLL] = &clk_display_pll.common.hw,
+ [CLK_CVBS_PLL] = &clk_cvbs_pll .common.hw,
+ [CLK_AUDIO_PLL] = &clk_audio_pll.common.hw,
+ [CLK_ETHERNET_PLL] = &clk_ethernet_pll.common.hw,
+ [CLK_CPU] = &clk_cpu.common.hw,
+ [CLK_DEV] = &clk_dev.common.hw,
+ [CLK_AHB] = &clk_ahb.common.hw,
+ [CLK_APB] = &clk_apb.common.hw,
+ [CLK_DMAC] = &clk_dmac.common.hw,
+ [CLK_NOC0_CLK_MUX] = &clk_noc0_clk_mux.common.hw,
+ [CLK_NOC1_CLK_MUX] = &clk_noc1_clk_mux.common.hw,
+ [CLK_HP_CLK_MUX] = &clk_hp_clk_mux.common.hw,
+ [CLK_HP_CLK_DIV] = &clk_hp_clk_div.common.hw,
+ [CLK_NOC1_CLK_DIV] = &clk_noc1_clk_div.common.hw,
+ [CLK_NOC0] = &clk_noc0.common.hw,
+ [CLK_NOC1] = &clk_noc1.common.hw,
+ [CLK_SENOR_SRC] = &clk_sensor_src.common.hw,
+ [CLK_GPIO] = &clk_gpio.common.hw,
+ [CLK_TIMER] = &clk_timer.common.hw,
+ [CLK_DSI] = &clk_dsi.common.hw,
+ [CLK_CSI] = &clk_csi.common.hw,
+ [CLK_SI] = &clk_si.common.hw,
+ [CLK_DE] = &clk_de.common.hw,
+ [CLK_HDE] = &clk_hde.common.hw,
+ [CLK_VDE] = &clk_vde.common.hw,
+ [CLK_VCE] = &clk_vce.common.hw,
+ [CLK_NAND] = &clk_nand.common.hw,
+ [CLK_SD0] = &clk_sd0.common.hw,
+ [CLK_SD1] = &clk_sd1.common.hw,
+ [CLK_SD2] = &clk_sd2.common.hw,
+ [CLK_UART0] = &clk_uart0.common.hw,
+ [CLK_UART1] = &clk_uart1.common.hw,
+ [CLK_UART2] = &clk_uart2.common.hw,
+ [CLK_UART3] = &clk_uart3.common.hw,
+ [CLK_UART4] = &clk_uart4.common.hw,
+ [CLK_UART5] = &clk_uart5.common.hw,
+ [CLK_UART6] = &clk_uart6.common.hw,
+ [CLK_PWM0] = &clk_pwm0.common.hw,
+ [CLK_PWM1] = &clk_pwm1.common.hw,
+ [CLK_PWM2] = &clk_pwm2.common.hw,
+ [CLK_PWM3] = &clk_pwm3.common.hw,
+ [CLK_PWM4] = &clk_pwm4.common.hw,
+ [CLK_PWM5] = &clk_pwm5.common.hw,
+ [CLK_GPU3D] = &clk_gpu3d.common.hw,
+ [CLK_I2C0] = &clk_i2c0.common.hw,
+ [CLK_I2C1] = &clk_i2c1.common.hw,
+ [CLK_I2C2] = &clk_i2c2.common.hw,
+ [CLK_I2C3] = &clk_i2c3.common.hw,
+ [CLK_SPI0] = &clk_spi0.common.hw,
+ [CLK_SPI1] = &clk_spi1.common.hw,
+ [CLK_SPI2] = &clk_spi2.common.hw,
+ [CLK_SPI3] = &clk_spi3.common.hw,
+ [CLK_USB3_480MPLL0] = &clk_usb3_480mpll0.common.hw,
+ [CLK_USB3_480MPHY0] = &clk_usb3_480mphy0.common.hw,
+ [CLK_USB3_5GPHY] = &clk_usb3_5gphy.common.hw,
+ [CLK_USB3_CCE] = &clk_usb3_cce.common.hw,
+ [CLK_LCD] = &clk_lcd.common.hw,
+ [CLK_HDMI_AUDIO] = &clk_hdmi_audio.common.hw,
+ [CLK_I2SRX] = &clk_i2srx.common.hw,
+ [CLK_I2STX] = &clk_i2stx.common.hw,
+ [CLK_SENSOR0] = &clk_sensor0.common.hw,
+ [CLK_SENSOR1] = &clk_sensor1.common.hw,
+ [CLK_HDMI_DEV] = &clk_hdmi_dev.common.hw,
+ [CLK_ETHERNET] = &clk_ethernet.common.hw,
+ [CLK_RMII_REF] = &clk_rmii_ref.common.hw,
+ [CLK_USB2H0_PLLEN] = &clk_usb2h0_pllen.common.hw,
+ [CLK_USB2H0_PHY] = &clk_usb2h0_phy.common.hw,
+ [CLK_USB2H0_CCE] = &clk_usb2h0_cce.common.hw,
+ [CLK_USB2H1_PLLEN] = &clk_usb2h1_pllen.common.hw,
+ [CLK_USB2H1_PHY] = &clk_usb2h1_phy.common.hw,
+ [CLK_USB2H1_CCE] = &clk_usb2h1_cce.common.hw,
+ [CLK_TVOUT] = &clk_tvout.common.hw,
+ [CLK_THERMAL_SENSOR] = &clk_thermal_sensor.common.hw,
+ [CLK_IRC_SWITCH] = &clk_irc_switch.common.hw,
+ [CLK_PCM1] = &clk_pcm1.common.hw,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static const struct owl_clk_desc s700_clk_desc = {
+ .clks = s700_clks,
+ .num_clks = ARRAY_SIZE(s700_clks),
+
+ .hw_clks = &s700_hw_clks,
+};
+
+static int s700_clk_probe(struct platform_device *pdev)
+{
+ const struct owl_clk_desc *desc;
+
+ desc = &s700_clk_desc;
+ owl_clk_regmap_init(pdev, desc);
+
+ return owl_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static const struct of_device_id s700_clk_of_match[] = {
+ { .compatible = "actions,s700-cmu", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver s700_clk_driver = {
+ .probe = s700_clk_probe,
+ .driver = {
+ .name = "s700-cmu",
+ .of_match_table = s700_clk_of_match
+ },
+};
+
+static int __init s700_clk_init(void)
+{
+ return platform_driver_register(&s700_clk_driver);
+}
+core_initcall(s700_clk_init);
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 082596f37c1d..facc169ebb68 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o
obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
+obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o
diff --git a/drivers/clk/at91/clk-i2s-mux.c b/drivers/clk/at91/clk-i2s-mux.c
new file mode 100644
index 000000000000..f0c3c3079f04
--- /dev/null
+++ b/drivers/clk/at91/clk-i2s-mux.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Microchip Technology Inc,
+ * Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
+ *
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <soc/at91/atmel-sfr.h>
+
+#define I2S_BUS_NR 2
+
+struct clk_i2s_mux {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u8 bus_id;
+};
+
+#define to_clk_i2s_mux(hw) container_of(hw, struct clk_i2s_mux, hw)
+
+static u8 clk_i2s_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_i2s_mux *mux = to_clk_i2s_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, AT91_SFR_I2SCLKSEL, &val);
+
+ return (val & BIT(mux->bus_id)) >> mux->bus_id;
+}
+
+static int clk_i2s_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_i2s_mux *mux = to_clk_i2s_mux(hw);
+
+ return regmap_update_bits(mux->regmap, AT91_SFR_I2SCLKSEL,
+ BIT(mux->bus_id), index << mux->bus_id);
+}
+
+static const struct clk_ops clk_i2s_mux_ops = {
+ .get_parent = clk_i2s_mux_get_parent,
+ .set_parent = clk_i2s_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk_hw * __init
+at91_clk_i2s_mux_register(struct regmap *regmap, const char *name,
+ const char * const *parent_names,
+ unsigned int num_parents, u8 bus_id)
+{
+ struct clk_init_data init = {};
+ struct clk_i2s_mux *i2s_ck;
+ int ret;
+
+ i2s_ck = kzalloc(sizeof(*i2s_ck), GFP_KERNEL);
+ if (!i2s_ck)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_i2s_mux_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ i2s_ck->hw.init = &init;
+ i2s_ck->bus_id = bus_id;
+ i2s_ck->regmap = regmap;
+
+ ret = clk_hw_register(NULL, &i2s_ck->hw);
+ if (ret) {
+ kfree(i2s_ck);
+ return ERR_PTR(ret);
+ }
+
+ return &i2s_ck->hw;
+}
+
+static void __init of_sama5d2_clk_i2s_mux_setup(struct device_node *np)
+{
+ struct regmap *regmap_sfr;
+ u8 bus_id;
+ const char *parent_names[2];
+ struct device_node *i2s_mux_np;
+ struct clk_hw *hw;
+ int ret;
+
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+ if (IS_ERR(regmap_sfr))
+ return;
+
+ for_each_child_of_node(np, i2s_mux_np) {
+ if (of_property_read_u8(i2s_mux_np, "reg", &bus_id))
+ continue;
+
+ if (bus_id > I2S_BUS_NR)
+ continue;
+
+ ret = of_clk_parent_fill(i2s_mux_np, parent_names, 2);
+ if (ret != 2)
+ continue;
+
+ hw = at91_clk_i2s_mux_register(regmap_sfr, i2s_mux_np->name,
+ parent_names, 2, bus_id);
+ if (IS_ERR(hw))
+ continue;
+
+ of_clk_add_hw_provider(i2s_mux_np, of_clk_hw_simple_get, hw);
+ }
+}
+
+CLK_OF_DECLARE(sama5d2_clk_i2s_mux, "atmel,sama5d2-clk-i2s-mux",
+ of_sama5d2_clk_i2s_mux_setup);
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 7b70a074095d..596136793fc4 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -109,7 +109,7 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_RSACLK] = { 24, -1, "rsaclk-gate", NULL, 0 }, /* RSA */
[ASPEED_CLK_GATE_UART3CLK] = { 25, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
[ASPEED_CLK_GATE_UART4CLK] = { 26, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */
- [ASPEED_CLK_GATE_SDCLKCLK] = { 27, 16, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
+ [ASPEED_CLK_GATE_SDCLK] = { 27, 16, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
[ASPEED_CLK_GATE_LHCCLK] = { 28, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
};
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index a2f8c42e527a..92bc4aca0f95 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CS2000 -- CIRRUS LOGIC Fractional-N Clock Synthesizer & Clock Multiplier
*
* Copyright (C) 2015 Renesas Electronics Corporation
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index a5d402de5584..20724abd38bd 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -177,8 +177,15 @@ static struct clk *_of_fixed_factor_clk_setup(struct device_node *node)
clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
mult, div);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ /*
+ * If parent clock is not registered, registration would fail.
+ * Clear OF_POPULATED flag so that clock registration can be
+ * attempted again from probe function.
+ */
+ of_node_clear_flag(node, OF_POPULATED);
return clk;
+ }
ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
if (ret) {
diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c
new file mode 100644
index 000000000000..5e80f3d090f3
--- /dev/null
+++ b/drivers/clk/clk-max9485.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+
+#include <dt-bindings/clock/maxim,max9485.h>
+
+#define MAX9485_NUM_CLKS 4
+
+/* This chip has only one register of 8 bit width. */
+
+#define MAX9485_FS_12KHZ (0 << 0)
+#define MAX9485_FS_32KHZ (1 << 0)
+#define MAX9485_FS_44_1KHZ (2 << 0)
+#define MAX9485_FS_48KHZ (3 << 0)
+
+#define MAX9485_SCALE_256 (0 << 2)
+#define MAX9485_SCALE_384 (1 << 2)
+#define MAX9485_SCALE_768 (2 << 2)
+
+#define MAX9485_DOUBLE BIT(4)
+#define MAX9485_CLKOUT1_ENABLE BIT(5)
+#define MAX9485_CLKOUT2_ENABLE BIT(6)
+#define MAX9485_MCLK_ENABLE BIT(7)
+#define MAX9485_FREQ_MASK 0x1f
+
+struct max9485_rate {
+ unsigned long out;
+ u8 reg_value;
+};
+
+/*
+ * Ordered by frequency. For frequency the hardware can generate with
+ * multiple settings, the one with lowest jitter is listed first.
+ */
+static const struct max9485_rate max9485_rates[] = {
+ { 3072000, MAX9485_FS_12KHZ | MAX9485_SCALE_256 },
+ { 4608000, MAX9485_FS_12KHZ | MAX9485_SCALE_384 },
+ { 8192000, MAX9485_FS_32KHZ | MAX9485_SCALE_256 },
+ { 9126000, MAX9485_FS_12KHZ | MAX9485_SCALE_768 },
+ { 11289600, MAX9485_FS_44_1KHZ | MAX9485_SCALE_256 },
+ { 12288000, MAX9485_FS_48KHZ | MAX9485_SCALE_256 },
+ { 12288000, MAX9485_FS_32KHZ | MAX9485_SCALE_384 },
+ { 16384000, MAX9485_FS_32KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
+ { 16934400, MAX9485_FS_44_1KHZ | MAX9485_SCALE_384 },
+ { 18384000, MAX9485_FS_48KHZ | MAX9485_SCALE_384 },
+ { 22579200, MAX9485_FS_44_1KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
+ { 24576000, MAX9485_FS_48KHZ | MAX9485_SCALE_256 | MAX9485_DOUBLE },
+ { 24576000, MAX9485_FS_32KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
+ { 24576000, MAX9485_FS_32KHZ | MAX9485_SCALE_768 },
+ { 33868800, MAX9485_FS_44_1KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
+ { 33868800, MAX9485_FS_44_1KHZ | MAX9485_SCALE_768 },
+ { 36864000, MAX9485_FS_48KHZ | MAX9485_SCALE_384 | MAX9485_DOUBLE },
+ { 36864000, MAX9485_FS_48KHZ | MAX9485_SCALE_768 },
+ { 49152000, MAX9485_FS_32KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
+ { 67737600, MAX9485_FS_44_1KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
+ { 73728000, MAX9485_FS_48KHZ | MAX9485_SCALE_768 | MAX9485_DOUBLE },
+ { } /* sentinel */
+};
+
+struct max9485_driver_data;
+
+struct max9485_clk_hw {
+ struct clk_hw hw;
+ struct clk_init_data init;
+ u8 enable_bit;
+ struct max9485_driver_data *drvdata;
+};
+
+struct max9485_driver_data {
+ struct clk *xclk;
+ struct i2c_client *client;
+ u8 reg_value;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ struct max9485_clk_hw hw[MAX9485_NUM_CLKS];
+};
+
+static inline struct max9485_clk_hw *to_max9485_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct max9485_clk_hw, hw);
+}
+
+static int max9485_update_bits(struct max9485_driver_data *drvdata,
+ u8 mask, u8 value)
+{
+ int ret;
+
+ drvdata->reg_value &= ~mask;
+ drvdata->reg_value |= value;
+
+ dev_dbg(&drvdata->client->dev,
+ "updating mask 0x%02x value 0x%02x -> 0x%02x\n",
+ mask, value, drvdata->reg_value);
+
+ ret = i2c_master_send(drvdata->client,
+ &drvdata->reg_value,
+ sizeof(drvdata->reg_value));
+
+ return ret < 0 ? ret : 0;
+}
+
+static int max9485_clk_prepare(struct clk_hw *hw)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+
+ return max9485_update_bits(clk_hw->drvdata,
+ clk_hw->enable_bit,
+ clk_hw->enable_bit);
+}
+
+static void max9485_clk_unprepare(struct clk_hw *hw)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+
+ max9485_update_bits(clk_hw->drvdata, clk_hw->enable_bit, 0);
+}
+
+/*
+ * CLKOUT - configurable clock output
+ */
+static int max9485_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+ const struct max9485_rate *entry;
+
+ for (entry = max9485_rates; entry->out != 0; entry++)
+ if (entry->out == rate)
+ break;
+
+ if (entry->out == 0)
+ return -EINVAL;
+
+ return max9485_update_bits(clk_hw->drvdata,
+ MAX9485_FREQ_MASK,
+ entry->reg_value);
+}
+
+static unsigned long max9485_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct max9485_clk_hw *clk_hw = to_max9485_clk(hw);
+ struct max9485_driver_data *drvdata = clk_hw->drvdata;
+ u8 val = drvdata->reg_value & MAX9485_FREQ_MASK;
+ const struct max9485_rate *entry;
+
+ for (entry = max9485_rates; entry->out != 0; entry++)
+ if (val == entry->reg_value)
+ return entry->out;
+
+ return 0;
+}
+
+static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct max9485_rate *curr, *prev = NULL;
+
+ for (curr = max9485_rates; curr->out != 0; curr++) {
+ /* Exact matches */
+ if (curr->out == rate)
+ return rate;
+
+ /*
+ * Find the first entry that has a frequency higher than the
+ * requested one.
+ */
+ if (curr->out > rate) {
+ unsigned int mid;
+
+ /*
+ * If this is the first entry, clamp the value to the
+ * lowest possible frequency.
+ */
+ if (!prev)
+ return curr->out;
+
+ /*
+ * Otherwise, determine whether the previous entry or
+ * current one is closer.
+ */
+ mid = prev->out + ((curr->out - prev->out) / 2);
+
+ return (mid > rate) ? prev->out : curr->out;
+ }
+
+ prev = curr;
+ }
+
+ /* If the last entry was still too high, clamp the value */
+ return prev->out;
+}
+
+struct max9485_clk {
+ const char *name;
+ int parent_index;
+ const struct clk_ops ops;
+ u8 enable_bit;
+};
+
+static const struct max9485_clk max9485_clks[MAX9485_NUM_CLKS] = {
+ [MAX9485_MCLKOUT] = {
+ .name = "mclkout",
+ .parent_index = -1,
+ .enable_bit = MAX9485_MCLK_ENABLE,
+ .ops = {
+ .prepare = max9485_clk_prepare,
+ .unprepare = max9485_clk_unprepare,
+ },
+ },
+ [MAX9485_CLKOUT] = {
+ .name = "clkout",
+ .parent_index = -1,
+ .ops = {
+ .set_rate = max9485_clkout_set_rate,
+ .round_rate = max9485_clkout_round_rate,
+ .recalc_rate = max9485_clkout_recalc_rate,
+ },
+ },
+ [MAX9485_CLKOUT1] = {
+ .name = "clkout1",
+ .parent_index = MAX9485_CLKOUT,
+ .enable_bit = MAX9485_CLKOUT1_ENABLE,
+ .ops = {
+ .prepare = max9485_clk_prepare,
+ .unprepare = max9485_clk_unprepare,
+ },
+ },
+ [MAX9485_CLKOUT2] = {
+ .name = "clkout2",
+ .parent_index = MAX9485_CLKOUT,
+ .enable_bit = MAX9485_CLKOUT2_ENABLE,
+ .ops = {
+ .prepare = max9485_clk_prepare,
+ .unprepare = max9485_clk_unprepare,
+ },
+ },
+};
+
+static struct clk_hw *
+max9485_of_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct max9485_driver_data *drvdata = data;
+ unsigned int idx = clkspec->args[0];
+
+ return &drvdata->hw[idx].hw;
+}
+
+static int max9485_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max9485_driver_data *drvdata;
+ struct device *dev = &client->dev;
+ const char *xclk_name;
+ int i, ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(drvdata->xclk))
+ return PTR_ERR(drvdata->xclk);
+
+ xclk_name = __clk_get_name(drvdata->xclk);
+
+ drvdata->supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(drvdata->supply))
+ return PTR_ERR(drvdata->supply);
+
+ ret = regulator_enable(drvdata->supply);
+ if (ret < 0)
+ return ret;
+
+ drvdata->reset_gpio =
+ devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(drvdata->reset_gpio))
+ return PTR_ERR(drvdata->reset_gpio);
+
+ i2c_set_clientdata(client, drvdata);
+ drvdata->client = client;
+
+ ret = i2c_master_recv(drvdata->client, &drvdata->reg_value,
+ sizeof(drvdata->reg_value));
+ if (ret < 0) {
+ dev_warn(dev, "Unable to read device register: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < MAX9485_NUM_CLKS; i++) {
+ int parent_index = max9485_clks[i].parent_index;
+ const char *name;
+
+ if (of_property_read_string_index(dev->of_node,
+ "clock-output-names",
+ i, &name) == 0) {
+ drvdata->hw[i].init.name = name;
+ } else {
+ drvdata->hw[i].init.name = max9485_clks[i].name;
+ }
+
+ drvdata->hw[i].init.ops = &max9485_clks[i].ops;
+ drvdata->hw[i].init.num_parents = 1;
+ drvdata->hw[i].init.flags = 0;
+
+ if (parent_index > 0) {
+ drvdata->hw[i].init.parent_names =
+ &drvdata->hw[parent_index].init.name;
+ drvdata->hw[i].init.flags |= CLK_SET_RATE_PARENT;
+ } else {
+ drvdata->hw[i].init.parent_names = &xclk_name;
+ }
+
+ drvdata->hw[i].enable_bit = max9485_clks[i].enable_bit;
+ drvdata->hw[i].hw.init = &drvdata->hw[i].init;
+ drvdata->hw[i].drvdata = drvdata;
+
+ ret = devm_clk_hw_register(dev, &drvdata->hw[i].hw);
+ if (ret < 0)
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, max9485_of_clk_get, drvdata);
+}
+
+static int __maybe_unused max9485_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max9485_driver_data *drvdata = i2c_get_clientdata(client);
+
+ gpiod_set_value_cansleep(drvdata->reset_gpio, 0);
+
+ return 0;
+}
+
+static int __maybe_unused max9485_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max9485_driver_data *drvdata = i2c_get_clientdata(client);
+ int ret;
+
+ gpiod_set_value_cansleep(drvdata->reset_gpio, 1);
+
+ ret = i2c_master_send(client, &drvdata->reg_value,
+ sizeof(drvdata->reg_value));
+
+ return ret < 0 ? ret : 0;
+}
+
+static const struct dev_pm_ops max9485_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(max9485_suspend, max9485_resume)
+};
+
+static const struct of_device_id max9485_dt_ids[] = {
+ { .compatible = "maxim,max9485", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max9485_dt_ids);
+
+static const struct i2c_device_id max9485_i2c_ids[] = {
+ { .name = "max9485", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max9485_i2c_ids);
+
+static struct i2c_driver max9485_driver = {
+ .driver = {
+ .name = "max9485",
+ .pm = &max9485_pm_ops,
+ .of_match_table = max9485_dt_ids,
+ },
+ .probe = max9485_i2c_probe,
+ .id_table = max9485_i2c_ids,
+};
+module_i2c_driver(max9485_driver);
+
+MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
+MODULE_DESCRIPTION("MAX9485 Programmable Audio Clock Generator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index bb2a6f2f5516..a985bf5e1ac6 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -38,7 +38,6 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- int step;
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -60,9 +59,9 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
ftmp = rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
- step = do_div(ftmp, clk->info->range.step_size);
+ do_div(ftmp, clk->info->range.step_size);
- return step * clk->info->range.step_size + fmin;
+ return ftmp * clk->info->range.step_size + fmin;
}
static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 09b6718956bd..153b3a2b5857 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -74,6 +74,33 @@ static int si514_enable_output(struct clk_si514 *data, bool enable)
SI514_CONTROL_OE, enable ? SI514_CONTROL_OE : 0);
}
+static int si514_prepare(struct clk_hw *hw)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+
+ return si514_enable_output(data, true);
+}
+
+static void si514_unprepare(struct clk_hw *hw)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+
+ si514_enable_output(data, false);
+}
+
+static int si514_is_prepared(struct clk_hw *hw)
+{
+ struct clk_si514 *data = to_clk_si514(hw);
+ unsigned int val;
+ int err;
+
+ err = regmap_read(data->regmap, SI514_REG_CONTROL, &val);
+ if (err < 0)
+ return err;
+
+ return !!(val & SI514_CONTROL_OE);
+}
+
/* Retrieve clock multiplier and dividers from hardware */
static int si514_get_muldiv(struct clk_si514 *data,
struct clk_si514_muldiv *settings)
@@ -235,12 +262,17 @@ static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_si514 *data = to_clk_si514(hw);
struct clk_si514_muldiv settings;
+ unsigned int old_oe_state;
int err;
err = si514_calc_muldiv(&settings, rate);
if (err)
return err;
+ err = regmap_read(data->regmap, SI514_REG_CONTROL, &old_oe_state);
+ if (err)
+ return err;
+
si514_enable_output(data, false);
err = si514_set_muldiv(data, &settings);
@@ -255,12 +287,16 @@ static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
- si514_enable_output(data, true);
+ if (old_oe_state & SI514_CONTROL_OE)
+ si514_enable_output(data, true);
return err;
}
static const struct clk_ops si514_clk_ops = {
+ .prepare = si514_prepare,
+ .unprepare = si514_unprepare,
+ .is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
.round_rate = si514_round_rate,
.set_rate = si514_set_rate,
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index 1e2a3b8f9454..64e607f3232a 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -86,6 +86,33 @@ static int si544_enable_output(struct clk_si544 *data, bool enable)
SI544_OE_STATE_ODC_OE, enable ? SI544_OE_STATE_ODC_OE : 0);
}
+static int si544_prepare(struct clk_hw *hw)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+
+ return si544_enable_output(data, true);
+}
+
+static void si544_unprepare(struct clk_hw *hw)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+
+ si544_enable_output(data, false);
+}
+
+static int si544_is_prepared(struct clk_hw *hw)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+ unsigned int val;
+ int err;
+
+ err = regmap_read(data->regmap, SI544_REG_OE_STATE, &val);
+ if (err < 0)
+ return err;
+
+ return !!(val & SI544_OE_STATE_ODC_OE);
+}
+
/* Retrieve clock multiplier and dividers from hardware */
static int si544_get_muldiv(struct clk_si544 *data,
struct clk_si544_muldiv *settings)
@@ -273,6 +300,7 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_si544 *data = to_clk_si544(hw);
struct clk_si544_muldiv settings;
+ unsigned int old_oe_state;
int err;
if (!is_valid_frequency(data, rate))
@@ -282,6 +310,10 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
if (err)
return err;
+ err = regmap_read(data->regmap, SI544_REG_OE_STATE, &old_oe_state);
+ if (err)
+ return err;
+
si544_enable_output(data, false);
/* Allow FCAL for this frequency update */
@@ -303,12 +335,16 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
- si544_enable_output(data, true);
+ if (old_oe_state & SI544_OE_STATE_ODC_OE)
+ si544_enable_output(data, true);
return err;
}
static const struct clk_ops si544_clk_ops = {
+ .prepare = si544_prepare,
+ .unprepare = si544_unprepare,
+ .is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
.round_rate = si544_round_rate,
.set_rate = si544_set_rate,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index e2ed078abd90..d31055ae6ec6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -67,6 +67,7 @@ struct clk_core {
unsigned long max_rate;
unsigned long accuracy;
int phase;
+ struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
struct hlist_head clks;
@@ -690,6 +691,9 @@ static void clk_core_unprepare(struct clk_core *core)
"Unpreparing critical %s\n", core->name))
return;
+ if (core->flags & CLK_SET_RATE_GATE)
+ clk_core_rate_unprotect(core);
+
if (--core->prepare_count > 0)
return;
@@ -764,6 +768,16 @@ static int clk_core_prepare(struct clk_core *core)
core->prepare_count++;
+ /*
+ * CLK_SET_RATE_GATE is a special case of clock protection
+ * Instead of a consumer claiming exclusive rate control, it is
+ * actually the provider which prevents any consumer from making any
+ * operation which could result in a rate change or rate glitch while
+ * the clock is prepared.
+ */
+ if (core->flags & CLK_SET_RATE_GATE)
+ clk_core_rate_protect(core);
+
return 0;
unprepare:
clk_core_unprepare(core->parent);
@@ -1887,9 +1901,6 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
if (clk_core_rate_is_protected(core))
return -EBUSY;
- if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
- return -EBUSY;
-
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(core, req_rate);
if (!top)
@@ -2401,6 +2412,172 @@ int clk_get_phase(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_phase);
+static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
+{
+ /* Assume a default value of 50% */
+ core->duty.num = 1;
+ core->duty.den = 2;
+}
+
+static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
+
+static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
+{
+ struct clk_duty *duty = &core->duty;
+ int ret = 0;
+
+ if (!core->ops->get_duty_cycle)
+ return clk_core_update_duty_cycle_parent_nolock(core);
+
+ ret = core->ops->get_duty_cycle(core->hw, duty);
+ if (ret)
+ goto reset;
+
+ /* Don't trust the clock provider too much */
+ if (duty->den == 0 || duty->num > duty->den) {
+ ret = -EINVAL;
+ goto reset;
+ }
+
+ return 0;
+
+reset:
+ clk_core_reset_duty_cycle_nolock(core);
+ return ret;
+}
+
+static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
+{
+ int ret = 0;
+
+ if (core->parent &&
+ core->flags & CLK_DUTY_CYCLE_PARENT) {
+ ret = clk_core_update_duty_cycle_nolock(core->parent);
+ memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
+ } else {
+ clk_core_reset_duty_cycle_nolock(core);
+ }
+
+ return ret;
+}
+
+static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
+ struct clk_duty *duty);
+
+static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
+ struct clk_duty *duty)
+{
+ int ret;
+
+ lockdep_assert_held(&prepare_lock);
+
+ if (clk_core_rate_is_protected(core))
+ return -EBUSY;
+
+ trace_clk_set_duty_cycle(core, duty);
+
+ if (!core->ops->set_duty_cycle)
+ return clk_core_set_duty_cycle_parent_nolock(core, duty);
+
+ ret = core->ops->set_duty_cycle(core->hw, duty);
+ if (!ret)
+ memcpy(&core->duty, duty, sizeof(*duty));
+
+ trace_clk_set_duty_cycle_complete(core, duty);
+
+ return ret;
+}
+
+static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
+ struct clk_duty *duty)
+{
+ int ret = 0;
+
+ if (core->parent &&
+ core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
+ ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
+ memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
+ }
+
+ return ret;
+}
+
+/**
+ * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @num: numerator of the duty cycle ratio to be applied
+ * @den: denominator of the duty cycle ratio to be applied
+ *
+ * Apply the duty cycle ratio if the ratio is valid and the clock can
+ * perform this operation
+ *
+ * Returns (0) on success, a negative errno otherwise.
+ */
+int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
+{
+ int ret;
+ struct clk_duty duty;
+
+ if (!clk)
+ return 0;
+
+ /* sanity check the ratio */
+ if (den == 0 || num > den)
+ return -EINVAL;
+
+ duty.num = num;
+ duty.den = den;
+
+ clk_prepare_lock();
+
+ if (clk->exclusive_count)
+ clk_core_rate_unprotect(clk->core);
+
+ ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
+
+ if (clk->exclusive_count)
+ clk_core_rate_protect(clk->core);
+
+ clk_prepare_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
+
+static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
+ unsigned int scale)
+{
+ struct clk_duty *duty = &core->duty;
+ int ret;
+
+ clk_prepare_lock();
+
+ ret = clk_core_update_duty_cycle_nolock(core);
+ if (!ret)
+ ret = mult_frac(scale, duty->num, duty->den);
+
+ clk_prepare_unlock();
+
+ return ret;
+}
+
+/**
+ * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @scale: scaling factor to be applied to represent the ratio as an integer
+ *
+ * Returns the duty cycle ratio of a clock node multiplied by the provided
+ * scaling factor, or negative errno on error.
+ */
+int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
+{
+ if (!clk)
+ return 0;
+
+ return clk_core_get_scaled_duty_cycle(clk->core, scale);
+}
+EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
+
/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
@@ -2454,12 +2631,13 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
if (!c)
return;
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n",
+ seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate(c), clk_core_get_accuracy(c),
- clk_core_get_phase(c));
+ clk_core_get_phase(c),
+ clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2481,9 +2659,9 @@ static int clk_summary_show(struct seq_file *s, void *data)
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
- seq_puts(s, " enable prepare protect \n");
- seq_puts(s, " clock count count count rate accuracy phase\n");
- seq_puts(s, "----------------------------------------------------------------------------------------\n");
+ seq_puts(s, " enable prepare protect duty\n");
+ seq_puts(s, " clock count count count rate accuracy phase cycle\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------------\n");
clk_prepare_lock();
@@ -2510,6 +2688,8 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+ seq_printf(s, "\"duty_cycle\": %u",
+ clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
@@ -2571,6 +2751,7 @@ static const struct {
ENTRY(CLK_SET_RATE_UNGATE),
ENTRY(CLK_IS_CRITICAL),
ENTRY(CLK_OPS_PARENT_ENABLE),
+ ENTRY(CLK_DUTY_CYCLE_PARENT),
#undef ENTRY
};
@@ -2609,6 +2790,17 @@ static int possible_parents_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(possible_parents);
+static int clk_duty_cycle_show(struct seq_file *s, void *data)
+{
+ struct clk_core *core = s->private;
+ struct clk_duty *duty = &core->duty;
+
+ seq_printf(s, "%u/%u\n", duty->num, duty->den);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
+
static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
struct dentry *root;
@@ -2627,6 +2819,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
+ debugfs_create_file("clk_duty_cycle", 0444, root, core,
+ &clk_duty_cycle_fops);
if (core->num_parents > 1)
debugfs_create_file("clk_possible_parents", 0444, root, core,
@@ -2845,6 +3039,11 @@ static int __clk_core_init(struct clk_core *core)
core->phase = 0;
/*
+ * Set clk's duty cycle.
+ */
+ clk_core_update_duty_cycle_nolock(core);
+
+ /*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
* parent's rate. If a clock doesn't have a parent (or is orphaned)
@@ -2933,6 +3132,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
return clk;
}
+/* keep in sync with __clk_put */
void __clk_free_clk(struct clk *clk)
{
clk_prepare_lock();
@@ -3312,6 +3512,7 @@ int __clk_get(struct clk *clk)
return 1;
}
+/* keep in sync with __clk_free_clk */
void __clk_put(struct clk *clk)
{
struct module *owner;
@@ -3345,6 +3546,7 @@ void __clk_put(struct clk *clk)
module_put(owner);
+ kfree_const(clk->con_id);
kfree(clk);
}
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 7513411140b6..9ab3db8b3988 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -35,9 +35,6 @@ static struct clk *__of_clk_get(struct device_node *np, int index,
struct clk *clk;
int rc;
- if (index < 0)
- return ERR_PTR(-EINVAL);
-
rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
&clkspec);
if (rc)
@@ -199,7 +196,7 @@ struct clk *clk_get(struct device *dev, const char *con_id)
const char *dev_id = dev ? dev_name(dev) : NULL;
struct clk *clk;
- if (dev) {
+ if (dev && dev->of_node) {
clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
return clk;
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index caa8bd40692c..fc8e782d817b 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/sizes.h>
#include <soc/imx/revision.h>
#include <dt-bindings/clock/imx5-clock.h>
@@ -175,13 +176,13 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_PER_ROOT] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
per_root_sel, ARRAY_SIZE(per_root_sel));
clk[IMX5_CLK_AHB] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
- clk[IMX5_CLK_AHB_MAX] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
- clk[IMX5_CLK_AIPS_TZ1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
- clk[IMX5_CLK_AIPS_TZ2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
- clk[IMX5_CLK_TMAX1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
- clk[IMX5_CLK_TMAX2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
- clk[IMX5_CLK_TMAX3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
- clk[IMX5_CLK_SPBA] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
+ clk[IMX5_CLK_AHB_MAX] = imx_clk_gate2_flags("ahb_max", "ahb", MXC_CCM_CCGR0, 28, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_AIPS_TZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", MXC_CCM_CCGR0, 24, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_AIPS_TZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", MXC_CCM_CCGR0, 26, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_TMAX1] = imx_clk_gate2_flags("tmax1", "ahb", MXC_CCM_CCGR1, 0, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_TMAX2] = imx_clk_gate2_flags("tmax2", "ahb", MXC_CCM_CCGR1, 2, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_TMAX3] = imx_clk_gate2_flags("tmax3", "ahb", MXC_CCM_CCGR1, 4, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_SPBA] = imx_clk_gate2_flags("spba", "ipg", MXC_CCM_CCGR5, 0, CLK_IS_CRITICAL);
clk[IMX5_CLK_IPG] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
clk[IMX5_CLK_AXI_A] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
clk[IMX5_CLK_AXI_B] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
@@ -252,8 +253,8 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_ECSPI2_PER_GATE] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
clk[IMX5_CLK_CSPI_IPG_GATE] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
clk[IMX5_CLK_SDMA_GATE] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
- clk[IMX5_CLK_EMI_FAST_GATE] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
- clk[IMX5_CLK_EMI_SLOW_GATE] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
+ clk[IMX5_CLK_EMI_FAST_GATE] = imx_clk_gate2_flags("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_EMI_SLOW_GATE] = imx_clk_gate2_flags("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16, CLK_IS_CRITICAL);
clk[IMX5_CLK_IPU_SEL] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
clk[IMX5_CLK_IPU_GATE] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
clk[IMX5_CLK_NFC_GATE] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
@@ -267,7 +268,7 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_VPU_SEL] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
clk[IMX5_CLK_VPU_GATE] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
clk[IMX5_CLK_VPU_REFERENCE_GATE] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
- clk[IMX5_CLK_GPC_DVFS] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
+ clk[IMX5_CLK_GPC_DVFS] = imx_clk_gate2_flags("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24, CLK_IS_CRITICAL);
clk[IMX5_CLK_SSI_APM] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
clk[IMX5_CLK_SSI1_ROOT_SEL] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
@@ -316,21 +317,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
/* move usb phy clk to 24MHz */
clk_set_parent(clk[IMX5_CLK_USB_PHY_SEL], clk[IMX5_CLK_OSC]);
-
- clk_prepare_enable(clk[IMX5_CLK_GPC_DVFS]);
- clk_prepare_enable(clk[IMX5_CLK_AHB_MAX]); /* esdhc3 */
- clk_prepare_enable(clk[IMX5_CLK_AIPS_TZ1]);
- clk_prepare_enable(clk[IMX5_CLK_AIPS_TZ2]); /* fec */
- clk_prepare_enable(clk[IMX5_CLK_SPBA]);
- clk_prepare_enable(clk[IMX5_CLK_EMI_FAST_GATE]); /* fec */
- clk_prepare_enable(clk[IMX5_CLK_EMI_SLOW_GATE]); /* eim */
- clk_prepare_enable(clk[IMX5_CLK_MIPI_HSC1_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_MIPI_HSC2_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_MIPI_ESC_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_MIPI_HSP_GATE]);
- clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
- clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
- clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
}
static void __init mx50_clocks_init(struct device_node *np)
@@ -442,10 +428,10 @@ static void __init mx51_clocks_init(struct device_node *np)
clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
- clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
- clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
- clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
- clk[IMX5_CLK_MIPI_HSP_GATE] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
+ clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2_flags("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2_flags("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2_flags("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10, CLK_IS_CRITICAL);
+ clk[IMX5_CLK_MIPI_HSP_GATE] = imx_clk_gate2_flags("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12, CLK_IS_CRITICAL);
clk[IMX5_CLK_SPDIF_XTAL_SEL] = imx_clk_mux("spdif_xtal_sel", MXC_CCM_CSCMR1, 2, 2,
mx51_spdif_xtal_sel, ARRAY_SIZE(mx51_spdif_xtal_sel));
clk[IMX5_CLK_SPDIF1_SEL] = imx_clk_mux("spdif1_sel", MXC_CCM_CSCMR2, 2, 2,
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index b9ea7037e193..8c7c2fcb8d94 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -65,7 +65,7 @@ static const char *ipg_per_sels[] = { "ipg", "osc", };
static const char *ecspi_sels[] = { "pll3_60m", "osc", };
static const char *can_sels[] = { "pll3_60m", "osc", "pll3_80m", };
static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
- "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
+ "video_27m", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
"ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio_div", };
static const char *cko2_sels[] = {
"mmdc_ch0_axi", "mmdc_ch1_axi", "usdhc4", "usdhc1",
@@ -96,12 +96,6 @@ static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
static struct clk *clk[IMX6QDL_CLK_END];
static struct clk_onecell_data clk_data;
-static unsigned int const clks_init_on[] __initconst = {
- IMX6QDL_CLK_MMDC_CH0_AXI,
- IMX6QDL_CLK_ROM,
- IMX6QDL_CLK_ARM,
-};
-
static struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
@@ -417,7 +411,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *anatop_base, *base;
- int i;
int ret;
clk[IMX6QDL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -794,7 +787,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "mlb_podf", base + 0x74, 18);
else
clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "axi", base + 0x74, 18);
- clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
+ clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2_flags("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clk[IMX6QDL_CLK_MMDC_CH1_AXI] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
clk[IMX6QDL_CLK_OCRAM] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
clk[IMX6QDL_CLK_OPENVG_AXI] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30);
@@ -808,7 +801,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPMI_BCH] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
clk[IMX6QDL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
clk[IMX6QDL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
- clk[IMX6QDL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clk[IMX6QDL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
clk[IMX6QDL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clk[IMX6QDL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
@@ -878,9 +871,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
*/
clk_set_parent(clk[IMX6QDL_CLK_ENFC_SEL], clk[IMX6QDL_CLK_PLL2_PFD2_396M]);
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clk[clks_init_on[i]]);
-
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY1_GATE]);
clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY2_GATE]);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 66b1dd1cfad0..eb6bcbf345a3 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -104,10 +104,6 @@ static struct clk_onecell_data clk_data;
static void __iomem *ccm_base;
static void __iomem *anatop_base;
-static const u32 clks_init_on[] __initconst = {
- IMX6SL_CLK_IPG, IMX6SL_CLK_ARM, IMX6SL_CLK_MMDC_ROOT,
-};
-
/*
* ERR005311 CCM: After exit from WAIT mode, unwanted interrupt(s) taken
* during WAIT mode entry process could cause cache memory
@@ -195,7 +191,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
int ret;
clks[IMX6SL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -426,13 +421,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
pr_warn("%s: failed to set AHB clock rate %d!\n",
__func__, ret);
- /*
- * Make sure those always on clocks are enabled to maintain the correct
- * usecount and enabling/disabling of parent PLLs.
- */
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(clks[IMX6SL_CLK_USBPHY1_GATE]);
clk_prepare_enable(clks[IMX6SL_CLK_USBPHY2_GATE]);
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
index 3651c77fbabe..52379ee49aec 100644
--- a/drivers/clk/imx/clk-imx6sll.c
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -92,6 +92,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sll-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!base);
/* Do not bypass PLLs initially */
@@ -253,6 +254,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_DCP] = imx_clk_gate2("dcp", "ahb", base + 0x68, 10);
clks[IMX6SLL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28);
clks[IMX6SLL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
+ clks[IMX6SLL_CLK_GPIO2] = imx_clk_gate2("gpio2", "ipg", base + 0x68, 30);
/* CCGR1 */
clks[IMX6SLL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -267,13 +269,17 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
clks[IMX6SLL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
+ clks[IMX6SLL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26);
+ clks[IMX6SLL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30);
/* CCGR2 */
+ clks[IMX6SLL_CLK_GPIO6] = imx_clk_gate2("gpio6", "ipg", base + 0x70, 0);
clks[IMX6SLL_CLK_CSI] = imx_clk_gate2("csi", "axi", base + 0x70, 2);
clks[IMX6SLL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
clks[IMX6SLL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
clks[IMX6SLL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6SLL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
+ clks[IMX6SLL_CLK_GPIO3] = imx_clk_gate2("gpio3", "ipg", base + 0x70, 26);
clks[IMX6SLL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
clks[IMX6SLL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
@@ -283,6 +289,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_EPDC_AXI] = imx_clk_gate2("epdc_aclk", "axi", base + 0x74, 4);
clks[IMX6SLL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_podf", base + 0x74, 4);
clks[IMX6SLL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
+ clks[IMX6SLL_CLK_GPIO4] = imx_clk_gate2("gpio4", "ipg", base + 0x74, 12);
clks[IMX6SLL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
clks[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clks[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 10c771b91ef6..d9f2890ffe62 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -92,14 +92,6 @@ static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
static struct clk *clks[IMX6SX_CLK_CLK_END];
static struct clk_onecell_data clk_data;
-static int const clks_init_on[] __initconst = {
- IMX6SX_CLK_AIPS_TZ1, IMX6SX_CLK_AIPS_TZ2, IMX6SX_CLK_AIPS_TZ3,
- IMX6SX_CLK_IPMUX1, IMX6SX_CLK_IPMUX2, IMX6SX_CLK_IPMUX3,
- IMX6SX_CLK_WAKEUP, IMX6SX_CLK_MMDC_P0_FAST, IMX6SX_CLK_MMDC_P0_IPG,
- IMX6SX_CLK_ROM, IMX6SX_CLK_ARM, IMX6SX_CLK_IPG, IMX6SX_CLK_OCRAM,
- IMX6SX_CLK_PER2_MAIN, IMX6SX_CLK_PERCLK, IMX6SX_CLK_TZASC1,
-};
-
static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
@@ -142,7 +134,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
clks[IMX6SX_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -332,7 +323,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_QSPI1_PODF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3);
clks[IMX6SX_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3);
clks[IMX6SX_CLK_LCDIF2_PODF] = imx_clk_divider("lcdif2_podf", "lcdif2_pred", base + 0x1c, 20, 3);
- clks[IMX6SX_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6);
+ clks[IMX6SX_CLK_PERCLK] = imx_clk_divider_flags("perclk", "perclk_sel", base + 0x1c, 0, 6, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_VID_PODF] = imx_clk_divider("vid_podf", "vid_sel", base + 0x20, 24, 2);
clks[IMX6SX_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6);
clks[IMX6SX_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
@@ -380,8 +371,8 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
/* name parent_name reg shift */
/* CCGR0 */
- clks[IMX6SX_CLK_AIPS_TZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
- clks[IMX6SX_CLK_AIPS_TZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
+ clks[IMX6SX_CLK_AIPS_TZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_AIPS_TZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_APBH_DMA] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4);
clks[IMX6SX_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6SX_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
@@ -394,7 +385,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20);
clks[IMX6SX_CLK_DCIC1] = imx_clk_gate2("dcic1", "display_podf", base + 0x68, 24);
clks[IMX6SX_CLK_DCIC2] = imx_clk_gate2("dcic2", "display_podf", base + 0x68, 26);
- clks[IMX6SX_CLK_AIPS_TZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30);
+ clks[IMX6SX_CLK_AIPS_TZ3] = imx_clk_gate2_flags("aips_tz3", "ahb", base + 0x68, 30, CLK_IS_CRITICAL);
/* CCGR1 */
clks[IMX6SX_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -407,10 +398,11 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai);
clks[IMX6SX_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai);
clks[IMX6SX_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai);
- clks[IMX6SX_CLK_WAKEUP] = imx_clk_gate2("wakeup", "ipg", base + 0x6c, 18);
+ clks[IMX6SX_CLK_WAKEUP] = imx_clk_gate2_flags("wakeup", "ipg", base + 0x6c, 18, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_GPT_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x6c, 20);
clks[IMX6SX_CLK_GPT_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x6c, 22);
clks[IMX6SX_CLK_GPU] = imx_clk_gate2("gpu", "gpu_core_podf", base + 0x6c, 26);
+ clks[IMX6SX_CLK_OCRAM_S] = imx_clk_gate2("ocram_s", "ahb", base + 0x6c, 28);
clks[IMX6SX_CLK_CANFD] = imx_clk_gate2("canfd", "can_podf", base + 0x6c, 30);
/* CCGR2 */
@@ -420,10 +412,10 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6SX_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
clks[IMX6SX_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif1_podf", base + 0x70, 14);
- clks[IMX6SX_CLK_IPMUX1] = imx_clk_gate2("ipmux1", "ahb", base + 0x70, 16);
- clks[IMX6SX_CLK_IPMUX2] = imx_clk_gate2("ipmux2", "ahb", base + 0x70, 18);
- clks[IMX6SX_CLK_IPMUX3] = imx_clk_gate2("ipmux3", "ahb", base + 0x70, 20);
- clks[IMX6SX_CLK_TZASC1] = imx_clk_gate2("tzasc1", "mmdc_podf", base + 0x70, 22);
+ clks[IMX6SX_CLK_IPMUX1] = imx_clk_gate2_flags("ipmux1", "ahb", base + 0x70, 16, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_IPMUX2] = imx_clk_gate2_flags("ipmux2", "ahb", base + 0x70, 18, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_IPMUX3] = imx_clk_gate2_flags("ipmux3", "ahb", base + 0x70, 20, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_TZASC1] = imx_clk_gate2_flags("tzasc1", "mmdc_podf", base + 0x70, 22, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "display_podf", base + 0x70, 28);
clks[IMX6SX_CLK_PXP_AXI] = imx_clk_gate2("pxp_axi", "display_podf", base + 0x70, 30);
@@ -437,15 +429,15 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12);
clks[IMX6SX_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14);
clks[IMX6SX_CLK_MLB] = imx_clk_gate2("mlb", "ahb", base + 0x74, 18);
- clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20);
- clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24);
- clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2("ocram", "ocram_podf", base + 0x74, 28);
+ clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2_flags("ocram", "ocram_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
clks[IMX6SX_CLK_PCIE_AXI] = imx_clk_gate2("pcie_axi", "display_podf", base + 0x78, 0);
clks[IMX6SX_CLK_QSPI2] = imx_clk_gate2("qspi2", "qspi2_podf", base + 0x78, 10);
clks[IMX6SX_CLK_PER1_BCH] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12);
- clks[IMX6SX_CLK_PER2_MAIN] = imx_clk_gate2("per2_main", "ahb", base + 0x78, 14);
+ clks[IMX6SX_CLK_PER2_MAIN] = imx_clk_gate2_flags("per2_main", "ahb", base + 0x78, 14, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16);
clks[IMX6SX_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18);
clks[IMX6SX_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20);
@@ -456,7 +448,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
/* CCGR5 */
- clks[IMX6SX_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clks[IMX6SX_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clks[IMX6SX_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
clks[IMX6SX_CLK_AUDIO] = imx_clk_gate2_shared("audio", "audio_podf", base + 0x7c, 14, &share_count_audio);
@@ -502,9 +494,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(clks[IMX6SX_CLK_USBPHY1_GATE]);
clk_prepare_enable(clks[IMX6SX_CLK_USBPHY2_GATE]);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index ba563ba50b40..361b43f9742e 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -79,12 +79,6 @@ static const char *cko_sels[] = { "cko1", "cko2", };
static struct clk *clks[IMX6UL_CLK_END];
static struct clk_onecell_data clk_data;
-static int const clks_init_on[] __initconst = {
- IMX6UL_CLK_AIPSTZ1, IMX6UL_CLK_AIPSTZ2,
- IMX6UL_CLK_AXI, IMX6UL_CLK_ARM, IMX6UL_CLK_ROM,
- IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG,
-};
-
static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
@@ -129,7 +123,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
clks[IMX6UL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
@@ -142,6 +135,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!base);
clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
@@ -336,8 +330,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
/* CCGR0 */
- clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
- clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
+ clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4);
clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
@@ -360,6 +354,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
if (clk_on_imx6ull())
clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x80, 18);
+ clks[IMX6UL_CLK_GPIO2] = imx_clk_gate2("gpio2", "ipg", base + 0x68, 30);
/* CCGR1 */
clks[IMX6UL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
@@ -376,6 +371,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPT1_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
clks[IMX6UL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24);
+ clks[IMX6UL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26);
+ clks[IMX6UL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30);
/* CCGR2 */
if (clk_on_imx6ull()) {
@@ -389,6 +386,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6UL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
clks[IMX6UL_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif_podf", base + 0x70, 14);
+ clks[IMX6UL_CLK_GPIO3] = imx_clk_gate2("gpio3", "ipg", base + 0x70, 26);
clks[IMX6UL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
clks[IMX6UL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
@@ -405,11 +403,12 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART6_IPG] = imx_clk_gate2("uart6_ipg", "ipg", base + 0x74, 6);
clks[IMX6UL_CLK_UART6_SERIAL] = imx_clk_gate2("uart6_serial", "uart_podf", base + 0x74, 6);
clks[IMX6UL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
+ clks[IMX6UL_CLK_GPIO4] = imx_clk_gate2("gpio4", "ipg", base + 0x74, 12);
clks[IMX6UL_CLK_QSPI] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14);
clks[IMX6UL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
- clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20);
- clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24);
- clks[IMX6UL_CLK_AXI] = imx_clk_gate("axi", "axi_podf", base + 0x74, 28);
+ clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_AXI] = imx_clk_gate_flags("axi", "axi_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
clks[IMX6UL_CLK_PER_BCH] = imx_clk_gate2("per_bch", "bch_podf", base + 0x78, 12);
@@ -423,7 +422,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "bch_podf", base + 0x78, 30);
/* CCGR5 */
- clks[IMX6UL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
+ clks[IMX6UL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clks[IMX6UL_CLK_KPP] = imx_clk_gate2("kpp", "ipg", base + 0x7c, 8);
clks[IMX6UL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10);
@@ -497,10 +496,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_rate(clks[IMX6UL_CLK_ENET2_REF], 50000000);
clk_set_rate(clks[IMX6UL_CLK_CSI], 24000000);
- /* keep all the clks on just for bringup */
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
if (clk_on_imx6ull())
clk_prepare_enable(clks[IMX6UL_CLK_AIPSTZ3]);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 27217a7ea17e..881b772c4ac9 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -797,6 +797,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate4("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0);
clks[IMX7D_OCOTP_CLK] = imx_clk_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0);
clks[IMX7D_SNVS_CLK] = imx_clk_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0);
+ clks[IMX7D_MU_ROOT_CLK] = imx_clk_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0);
clks[IMX7D_CAAM_CLK] = imx_clk_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0);
clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0);
clks[IMX7D_SDMA_CORE_CLK] = imx_clk_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0);
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 32fcc75f6f77..4479c102e899 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -134,7 +134,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
"i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 31, 1 },
- .div = { CGU_REG_I2SCDR, 0, 1, 8, -1, -1, -1 },
+ .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
.gate = { CGU_REG_CLKGR, 6 },
},
@@ -161,7 +161,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
},
[JZ4740_CLK_UDC] = {
- "udc", CGU_CLK_MUX | CGU_CLK_DIV,
+ "udc", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 29, 1 },
.div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 },
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 815659eebea3..efaa70f682b4 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -1,13 +1,19 @@
config COMMON_CLK_AMLOGIC
bool
- depends on OF
depends on ARCH_MESON || COMPILE_TEST
+ select COMMON_CLK_REGMAP_MESON
+
+config COMMON_CLK_AMLOGIC_AUDIO
+ bool
+ depends on ARCH_MESON || COMPILE_TEST
+ select COMMON_CLK_AMLOGIC
config COMMON_CLK_MESON_AO
bool
depends on OF
depends on ARCH_MESON || COMPILE_TEST
select COMMON_CLK_REGMAP_MESON
+ select RESET_CONTROLLER
config COMMON_CLK_REGMAP_MESON
bool
@@ -15,9 +21,8 @@ config COMMON_CLK_REGMAP_MESON
config COMMON_CLK_MESON8B
bool
- depends on COMMON_CLK_AMLOGIC
+ select COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
- select COMMON_CLK_REGMAP_MESON
help
Support for the clock controller on AmLogic S802 (Meson8),
S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you
@@ -25,10 +30,8 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
bool
- depends on COMMON_CLK_AMLOGIC
- select RESET_CONTROLLER
+ select COMMON_CLK_AMLOGIC
select COMMON_CLK_MESON_AO
- select COMMON_CLK_REGMAP_MESON
select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -36,11 +39,18 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
bool
- depends on COMMON_CLK_AMLOGIC
- select RESET_CONTROLLER
+ select COMMON_CLK_AMLOGIC
select COMMON_CLK_MESON_AO
- select COMMON_CLK_REGMAP_MESON
select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
Say Y if you want peripherals and CPU frequency scaling to work.
+
+config COMMON_CLK_AXG_AUDIO
+ tristate "Meson AXG Audio Clock Controller Driver"
+ depends on COMMON_CLK_AXG
+ select COMMON_CLK_AMLOGIC_AUDIO
+ select MFD_SYSCON
+ help
+ Support for the audio clock controller on AmLogic A113D devices,
+ aka axg, Say Y if you want audio subsystem to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index d0d13aeb369a..72ec8c40d848 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -2,9 +2,11 @@
# Makefile for Meson specific clk
#
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-audio-divider.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC_AUDIO) += clk-triphase.o sclk-div.o
obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
+obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
new file mode 100644
index 000000000000..a0ed41e73bde
--- /dev/null
+++ b/drivers/clk/meson/axg-audio.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "clkc-audio.h"
+#include "axg-audio.h"
+
+#define AXG_MST_IN_COUNT 8
+#define AXG_SLV_SCLK_COUNT 10
+#define AXG_SLV_LRCLK_COUNT 10
+
+#define AXG_AUD_GATE(_name, _reg, _bit, _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct clk_regmap_gate_data){ \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_"#_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_names = (const char *[]){ _pname }, \
+ .num_parents = 1, \
+ .flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
+ }, \
+}
+
+#define AXG_AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct clk_regmap_mux_data){ \
+ .offset = (_reg), \
+ .mask = (_mask), \
+ .shift = (_shift), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = "axg_"#_name, \
+ .ops = &clk_regmap_mux_ops, \
+ .parent_names = (_pnames), \
+ .num_parents = ARRAY_SIZE(_pnames), \
+ .flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
+ }, \
+}
+
+#define AXG_AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct clk_regmap_div_data){ \
+ .offset = (_reg), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = "axg_"#_name, \
+ .ops = &clk_regmap_divider_ops, \
+ .parent_names = (const char *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define AXG_PCLK_GATE(_name, _bit) \
+ AXG_AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "axg_audio_pclk", 0)
+
+/* Audio peripheral clocks */
+static AXG_PCLK_GATE(ddr_arb, 0);
+static AXG_PCLK_GATE(pdm, 1);
+static AXG_PCLK_GATE(tdmin_a, 2);
+static AXG_PCLK_GATE(tdmin_b, 3);
+static AXG_PCLK_GATE(tdmin_c, 4);
+static AXG_PCLK_GATE(tdmin_lb, 5);
+static AXG_PCLK_GATE(tdmout_a, 6);
+static AXG_PCLK_GATE(tdmout_b, 7);
+static AXG_PCLK_GATE(tdmout_c, 8);
+static AXG_PCLK_GATE(frddr_a, 9);
+static AXG_PCLK_GATE(frddr_b, 10);
+static AXG_PCLK_GATE(frddr_c, 11);
+static AXG_PCLK_GATE(toddr_a, 12);
+static AXG_PCLK_GATE(toddr_b, 13);
+static AXG_PCLK_GATE(toddr_c, 14);
+static AXG_PCLK_GATE(loopback, 15);
+static AXG_PCLK_GATE(spdifin, 16);
+static AXG_PCLK_GATE(spdifout, 17);
+static AXG_PCLK_GATE(resample, 18);
+static AXG_PCLK_GATE(power_detect, 19);
+
+/* Audio Master Clocks */
+static const char * const mst_mux_parent_names[] = {
+ "axg_mst_in0", "axg_mst_in1", "axg_mst_in2", "axg_mst_in3",
+ "axg_mst_in4", "axg_mst_in5", "axg_mst_in6", "axg_mst_in7",
+};
+
+#define AXG_MST_MCLK_MUX(_name, _reg) \
+ AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, CLK_MUX_ROUND_CLOSEST, \
+ mst_mux_parent_names, CLK_SET_RATE_PARENT)
+
+static AXG_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AXG_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AXG_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AXG_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AXG_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AXG_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AXG_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AXG_MST_MCLK_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AXG_MST_MCLK_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+#define AXG_MST_MCLK_DIV(_name, _reg) \
+ AXG_AUD_DIV(_name##_div, _reg, 0, 16, CLK_DIVIDER_ROUND_CLOSEST, \
+ "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \
+
+static AXG_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AXG_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AXG_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AXG_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AXG_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AXG_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AXG_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AXG_MST_MCLK_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AXG_MST_MCLK_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+#define AXG_MST_MCLK_GATE(_name, _reg) \
+ AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \
+ CLK_SET_RATE_PARENT)
+
+static AXG_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AXG_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AXG_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AXG_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AXG_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AXG_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AXG_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AXG_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AXG_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+/* Sample Clocks */
+#define AXG_MST_SCLK_PRE_EN(_name, _reg) \
+ AXG_AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
+ "axg_mst_"#_name"_mclk", 0)
+
+static AXG_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
+ _hi_shift, _hi_width, _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct meson_sclk_div_data) { \
+ .div = { \
+ .reg_off = (_reg), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ }, \
+ .hi = { \
+ .reg_off = (_reg), \
+ .shift = (_hi_shift), \
+ .width = (_hi_width), \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_"#_name, \
+ .ops = &meson_sclk_div_ops, \
+ .parent_names = (const char *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define AXG_MST_SCLK_DIV(_name, _reg) \
+ AXG_AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
+ "axg_mst_"#_name"_sclk_pre_en", \
+ CLK_SET_RATE_PARENT)
+
+static AXG_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_MST_SCLK_POST_EN(_name, _reg) \
+ AXG_AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
+ "axg_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT)
+
+static AXG_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
+ _pname, _iflags) \
+struct clk_regmap axg_##_name = { \
+ .data = &(struct meson_clk_triphase_data) { \
+ .ph0 = { \
+ .reg_off = (_reg), \
+ .shift = (_shift0), \
+ .width = (_width), \
+ }, \
+ .ph1 = { \
+ .reg_off = (_reg), \
+ .shift = (_shift1), \
+ .width = (_width), \
+ }, \
+ .ph2 = { \
+ .reg_off = (_reg), \
+ .shift = (_shift2), \
+ .width = (_width), \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_"#_name, \
+ .ops = &meson_clk_triphase_ops, \
+ .parent_names = (const char *[]) { _pname }, \
+ .num_parents = 1, \
+ .flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
+ }, \
+}
+
+#define AXG_MST_SCLK(_name, _reg) \
+ AXG_AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
+ "axg_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT)
+
+static AXG_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AXG_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AXG_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AXG_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AXG_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AXG_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+#define AXG_MST_LRCLK_DIV(_name, _reg) \
+ AXG_AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
+ "axg_mst_"#_name"_sclk_post_en", 0) \
+
+static AXG_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AXG_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AXG_MST_LRCLK(_name, _reg) \
+ AXG_AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
+ "axg_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT)
+
+static AXG_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AXG_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AXG_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AXG_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AXG_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AXG_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+static const char * const tdm_sclk_parent_names[] = {
+ "axg_mst_a_sclk", "axg_mst_b_sclk", "axg_mst_c_sclk",
+ "axg_mst_d_sclk", "axg_mst_e_sclk", "axg_mst_f_sclk",
+ "axg_slv_sclk0", "axg_slv_sclk1", "axg_slv_sclk2",
+ "axg_slv_sclk3", "axg_slv_sclk4", "axg_slv_sclk5",
+ "axg_slv_sclk6", "axg_slv_sclk7", "axg_slv_sclk8",
+ "axg_slv_sclk9"
+};
+
+#define AXG_TDM_SCLK_MUX(_name, _reg) \
+ AXG_AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
+ CLK_MUX_ROUND_CLOSEST, \
+ tdm_sclk_parent_names, 0)
+
+static AXG_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AXG_TDM_SCLK_PRE_EN(_name, _reg) \
+ AXG_AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
+ "axg_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT)
+
+static AXG_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AXG_TDM_SCLK_POST_EN(_name, _reg) \
+ AXG_AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
+ "axg_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT)
+
+static AXG_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AXG_TDM_SCLK(_name, _reg) \
+ struct clk_regmap axg_tdm##_name##_sclk = { \
+ .data = &(struct meson_clk_phase_data) { \
+ .ph = { \
+ .reg_off = (_reg), \
+ .shift = 29, \
+ .width = 1, \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "axg_tdm"#_name"_sclk", \
+ .ops = &meson_clk_phase_ops, \
+ .parent_names = (const char *[]) \
+ { "axg_tdm"#_name"_sclk_post_en" }, \
+ .num_parents = 1, \
+ .flags = CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT, \
+ }, \
+}
+
+static AXG_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static const char * const tdm_lrclk_parent_names[] = {
+ "axg_mst_a_lrclk", "axg_mst_b_lrclk", "axg_mst_c_lrclk",
+ "axg_mst_d_lrclk", "axg_mst_e_lrclk", "axg_mst_f_lrclk",
+ "axg_slv_lrclk0", "axg_slv_lrclk1", "axg_slv_lrclk2",
+ "axg_slv_lrclk3", "axg_slv_lrclk4", "axg_slv_lrclk5",
+ "axg_slv_lrclk6", "axg_slv_lrclk7", "axg_slv_lrclk8",
+ "axg_slv_lrclk9"
+};
+
+#define AXG_TDM_LRLCK(_name, _reg) \
+ AXG_AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
+ CLK_MUX_ROUND_CLOSEST, \
+ tdm_lrclk_parent_names, 0)
+
+static AXG_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AXG_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AXG_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AXG_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AXG_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AXG_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AXG_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+/*
+ * Array of all clocks provided by this provider
+ * The input clocks of the controller will be populated at runtime
+ */
+static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
+ .hws = {
+ [AUD_CLKID_DDR_ARB] = &axg_ddr_arb.hw,
+ [AUD_CLKID_PDM] = &axg_pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &axg_tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &axg_tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &axg_tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &axg_tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &axg_tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &axg_tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &axg_tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &axg_frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &axg_frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &axg_frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &axg_toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &axg_toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &axg_toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &axg_loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &axg_spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &axg_spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &axg_resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &axg_power_detect.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &axg_mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &axg_mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &axg_mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &axg_mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &axg_mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &axg_mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &axg_mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &axg_mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &axg_mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &axg_mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &axg_mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &axg_mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &axg_mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &axg_mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &axg_mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &axg_mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &axg_mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &axg_mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &axg_spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &axg_spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &axg_spdifout_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &axg_spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &axg_spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &axg_spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &axg_pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &axg_pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &axg_pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &axg_pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &axg_pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &axg_pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &axg_mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &axg_mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &axg_mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &axg_mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &axg_mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &axg_mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &axg_mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &axg_mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &axg_mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &axg_mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &axg_mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &axg_mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &axg_mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &axg_mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &axg_mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &axg_mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &axg_mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &axg_mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &axg_mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &axg_mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &axg_mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &axg_mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &axg_mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &axg_mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &axg_mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &axg_mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &axg_mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &axg_mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &axg_mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &axg_mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &axg_mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &axg_mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &axg_mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &axg_mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &axg_mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &axg_mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &axg_tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &axg_tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &axg_tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &axg_tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &axg_tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &axg_tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &axg_tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &axg_tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &axg_tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &axg_tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &axg_tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &axg_tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &axg_tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &axg_tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &axg_tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &axg_tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &axg_tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &axg_tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &axg_tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &axg_tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &axg_tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &axg_tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &axg_tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &axg_tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &axg_tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &axg_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &axg_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &axg_tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &axg_tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &axg_tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &axg_tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &axg_tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &axg_tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &axg_tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &axg_tdmout_c_lrclk.hw,
+ [NR_CLKS] = NULL,
+ },
+ .num = NR_CLKS,
+};
+
+/* Convenience table to populate regmap in .probe() */
+static struct clk_regmap *const axg_audio_clk_regmaps[] = {
+ &axg_ddr_arb,
+ &axg_pdm,
+ &axg_tdmin_a,
+ &axg_tdmin_b,
+ &axg_tdmin_c,
+ &axg_tdmin_lb,
+ &axg_tdmout_a,
+ &axg_tdmout_b,
+ &axg_tdmout_c,
+ &axg_frddr_a,
+ &axg_frddr_b,
+ &axg_frddr_c,
+ &axg_toddr_a,
+ &axg_toddr_b,
+ &axg_toddr_c,
+ &axg_loopback,
+ &axg_spdifin,
+ &axg_spdifout,
+ &axg_resample,
+ &axg_power_detect,
+ &axg_mst_a_mclk_sel,
+ &axg_mst_b_mclk_sel,
+ &axg_mst_c_mclk_sel,
+ &axg_mst_d_mclk_sel,
+ &axg_mst_e_mclk_sel,
+ &axg_mst_f_mclk_sel,
+ &axg_mst_a_mclk_div,
+ &axg_mst_b_mclk_div,
+ &axg_mst_c_mclk_div,
+ &axg_mst_d_mclk_div,
+ &axg_mst_e_mclk_div,
+ &axg_mst_f_mclk_div,
+ &axg_mst_a_mclk,
+ &axg_mst_b_mclk,
+ &axg_mst_c_mclk,
+ &axg_mst_d_mclk,
+ &axg_mst_e_mclk,
+ &axg_mst_f_mclk,
+ &axg_spdifout_clk_sel,
+ &axg_spdifout_clk_div,
+ &axg_spdifout_clk,
+ &axg_spdifin_clk_sel,
+ &axg_spdifin_clk_div,
+ &axg_spdifin_clk,
+ &axg_pdm_dclk_sel,
+ &axg_pdm_dclk_div,
+ &axg_pdm_dclk,
+ &axg_pdm_sysclk_sel,
+ &axg_pdm_sysclk_div,
+ &axg_pdm_sysclk,
+ &axg_mst_a_sclk_pre_en,
+ &axg_mst_b_sclk_pre_en,
+ &axg_mst_c_sclk_pre_en,
+ &axg_mst_d_sclk_pre_en,
+ &axg_mst_e_sclk_pre_en,
+ &axg_mst_f_sclk_pre_en,
+ &axg_mst_a_sclk_div,
+ &axg_mst_b_sclk_div,
+ &axg_mst_c_sclk_div,
+ &axg_mst_d_sclk_div,
+ &axg_mst_e_sclk_div,
+ &axg_mst_f_sclk_div,
+ &axg_mst_a_sclk_post_en,
+ &axg_mst_b_sclk_post_en,
+ &axg_mst_c_sclk_post_en,
+ &axg_mst_d_sclk_post_en,
+ &axg_mst_e_sclk_post_en,
+ &axg_mst_f_sclk_post_en,
+ &axg_mst_a_sclk,
+ &axg_mst_b_sclk,
+ &axg_mst_c_sclk,
+ &axg_mst_d_sclk,
+ &axg_mst_e_sclk,
+ &axg_mst_f_sclk,
+ &axg_mst_a_lrclk_div,
+ &axg_mst_b_lrclk_div,
+ &axg_mst_c_lrclk_div,
+ &axg_mst_d_lrclk_div,
+ &axg_mst_e_lrclk_div,
+ &axg_mst_f_lrclk_div,
+ &axg_mst_a_lrclk,
+ &axg_mst_b_lrclk,
+ &axg_mst_c_lrclk,
+ &axg_mst_d_lrclk,
+ &axg_mst_e_lrclk,
+ &axg_mst_f_lrclk,
+ &axg_tdmin_a_sclk_sel,
+ &axg_tdmin_b_sclk_sel,
+ &axg_tdmin_c_sclk_sel,
+ &axg_tdmin_lb_sclk_sel,
+ &axg_tdmout_a_sclk_sel,
+ &axg_tdmout_b_sclk_sel,
+ &axg_tdmout_c_sclk_sel,
+ &axg_tdmin_a_sclk_pre_en,
+ &axg_tdmin_b_sclk_pre_en,
+ &axg_tdmin_c_sclk_pre_en,
+ &axg_tdmin_lb_sclk_pre_en,
+ &axg_tdmout_a_sclk_pre_en,
+ &axg_tdmout_b_sclk_pre_en,
+ &axg_tdmout_c_sclk_pre_en,
+ &axg_tdmin_a_sclk_post_en,
+ &axg_tdmin_b_sclk_post_en,
+ &axg_tdmin_c_sclk_post_en,
+ &axg_tdmin_lb_sclk_post_en,
+ &axg_tdmout_a_sclk_post_en,
+ &axg_tdmout_b_sclk_post_en,
+ &axg_tdmout_c_sclk_post_en,
+ &axg_tdmin_a_sclk,
+ &axg_tdmin_b_sclk,
+ &axg_tdmin_c_sclk,
+ &axg_tdmin_lb_sclk,
+ &axg_tdmout_a_sclk,
+ &axg_tdmout_b_sclk,
+ &axg_tdmout_c_sclk,
+ &axg_tdmin_a_lrclk,
+ &axg_tdmin_b_lrclk,
+ &axg_tdmin_c_lrclk,
+ &axg_tdmin_lb_lrclk,
+ &axg_tdmout_a_lrclk,
+ &axg_tdmout_b_lrclk,
+ &axg_tdmout_c_lrclk,
+};
+
+static struct clk *devm_clk_get_enable(struct device *dev, char *id)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s", id);
+ return clk;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable %s", id);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret) {
+ dev_err(dev, "failed to add reset action on %s", id);
+ return ERR_PTR(ret);
+ }
+
+ return clk;
+}
+
+static const struct clk_ops axg_clk_no_ops = {};
+
+static struct clk_hw *axg_clk_hw_register_bypass(struct device *dev,
+ const char *name,
+ const char *parent_name)
+{
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ char *clk_name;
+ int ret;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ clk_name = kasprintf(GFP_KERNEL, "axg_%s", name);
+ if (!clk_name)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = clk_name;
+ init.ops = &axg_clk_no_ops;
+ init.flags = 0;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, hw);
+ kfree(clk_name);
+
+ return ret ? ERR_PTR(ret) : hw;
+}
+
+static int axg_register_clk_hw_input(struct device *dev,
+ const char *name,
+ unsigned int clkid)
+{
+ struct clk *parent_clk = devm_clk_get(dev, name);
+ struct clk_hw *hw = NULL;
+
+ if (IS_ERR(parent_clk)) {
+ int err = PTR_ERR(parent_clk);
+
+ /* It is ok if an input clock is missing */
+ if (err == -ENOENT) {
+ dev_dbg(dev, "%s not provided", name);
+ } else {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s clock", name);
+ return err;
+ }
+ } else {
+ hw = axg_clk_hw_register_bypass(dev, name,
+ __clk_get_name(parent_clk));
+ }
+
+ if (IS_ERR(hw)) {
+ dev_err(dev, "failed to register %s clock", name);
+ return PTR_ERR(hw);
+ }
+
+ axg_audio_hw_onecell_data.hws[clkid] = hw;
+ return 0;
+}
+
+static int axg_register_clk_hw_inputs(struct device *dev,
+ const char *basename,
+ unsigned int count,
+ unsigned int clkid)
+{
+ char *name;
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ name = kasprintf(GFP_KERNEL, "%s%d", basename, i);
+ if (!name)
+ return -ENOMEM;
+
+ ret = axg_register_clk_hw_input(dev, name, clkid + i);
+ kfree(name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config axg_audio_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = AUDIO_CLK_PDMIN_CTRL1,
+};
+
+static int axg_audio_clkc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *map;
+ struct resource *res;
+ void __iomem *regs;
+ struct clk *clk;
+ struct clk_hw *hw;
+ int ret, i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ map = devm_regmap_init_mmio(dev, regs, &axg_audio_regmap_cfg);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to init regmap: %ld\n", PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ /* Get the mandatory peripheral clock */
+ clk = devm_clk_get_enable(dev, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = device_reset(dev);
+ if (ret) {
+ dev_err(dev, "failed to reset device\n");
+ return ret;
+ }
+
+ /* Register the peripheral input clock */
+ hw = axg_clk_hw_register_bypass(dev, "audio_pclk",
+ __clk_get_name(clk));
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ axg_audio_hw_onecell_data.hws[AUD_CLKID_PCLK] = hw;
+
+ /* Register optional input master clocks */
+ ret = axg_register_clk_hw_inputs(dev, "mst_in",
+ AXG_MST_IN_COUNT,
+ AUD_CLKID_MST0);
+ if (ret)
+ return ret;
+
+ /* Register optional input slave sclks */
+ ret = axg_register_clk_hw_inputs(dev, "slv_sclk",
+ AXG_SLV_SCLK_COUNT,
+ AUD_CLKID_SLV_SCLK0);
+ if (ret)
+ return ret;
+
+ /* Register optional input slave lrclks */
+ ret = axg_register_clk_hw_inputs(dev, "slv_lrclk",
+ AXG_SLV_LRCLK_COUNT,
+ AUD_CLKID_SLV_LRCLK0);
+ if (ret)
+ return ret;
+
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(axg_audio_clk_regmaps); i++)
+ axg_audio_clk_regmaps[i]->map = map;
+
+ /* Take care to skip the registered input clocks */
+ for (i = AUD_CLKID_DDR_ARB; i < axg_audio_hw_onecell_data.num; i++) {
+ hw = axg_audio_hw_onecell_data.hws[i];
+ /* array might be sparse */
+ if (!hw)
+ continue;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "failed to register clock %s\n",
+ hw->init->name);
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &axg_audio_hw_onecell_data);
+}
+
+static const struct of_device_id clkc_match_table[] = {
+ { .compatible = "amlogic,axg-audio-clkc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
+
+static struct platform_driver axg_audio_driver = {
+ .probe = axg_audio_clkc_probe,
+ .driver = {
+ .name = "axg-audio-clkc",
+ .of_match_table = clkc_match_table,
+ },
+};
+module_platform_driver(axg_audio_driver);
+
+MODULE_DESCRIPTION("Amlogic A113x Audio Clock driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
new file mode 100644
index 000000000000..7191b39c9d65
--- /dev/null
+++ b/drivers/clk/meson/axg-audio.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __AXG_AUDIO_CLKC_H
+#define __AXG_AUDIO_CLKC_H
+
+/*
+ * Audio Clock register offsets
+ *
+ * Register offsets from the datasheet must be multiplied by 4 before
+ * to get the right offset
+ */
+#define AUDIO_CLK_GATE_EN 0x000
+#define AUDIO_MCLK_A_CTRL 0x004
+#define AUDIO_MCLK_B_CTRL 0x008
+#define AUDIO_MCLK_C_CTRL 0x00C
+#define AUDIO_MCLK_D_CTRL 0x010
+#define AUDIO_MCLK_E_CTRL 0x014
+#define AUDIO_MCLK_F_CTRL 0x018
+#define AUDIO_MST_A_SCLK_CTRL0 0x040
+#define AUDIO_MST_A_SCLK_CTRL1 0x044
+#define AUDIO_MST_B_SCLK_CTRL0 0x048
+#define AUDIO_MST_B_SCLK_CTRL1 0x04C
+#define AUDIO_MST_C_SCLK_CTRL0 0x050
+#define AUDIO_MST_C_SCLK_CTRL1 0x054
+#define AUDIO_MST_D_SCLK_CTRL0 0x058
+#define AUDIO_MST_D_SCLK_CTRL1 0x05C
+#define AUDIO_MST_E_SCLK_CTRL0 0x060
+#define AUDIO_MST_E_SCLK_CTRL1 0x064
+#define AUDIO_MST_F_SCLK_CTRL0 0x068
+#define AUDIO_MST_F_SCLK_CTRL1 0x06C
+#define AUDIO_CLK_TDMIN_A_CTRL 0x080
+#define AUDIO_CLK_TDMIN_B_CTRL 0x084
+#define AUDIO_CLK_TDMIN_C_CTRL 0x088
+#define AUDIO_CLK_TDMIN_LB_CTRL 0x08C
+#define AUDIO_CLK_TDMOUT_A_CTRL 0x090
+#define AUDIO_CLK_TDMOUT_B_CTRL 0x094
+#define AUDIO_CLK_TDMOUT_C_CTRL 0x098
+#define AUDIO_CLK_SPDIFIN_CTRL 0x09C
+#define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0
+#define AUDIO_CLK_RESAMPLE_CTRL 0x0A4
+#define AUDIO_CLK_LOCKER_CTRL 0x0A8
+#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
+#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
+
+/*
+ * CLKID index values
+ * These indices are entirely contrived and do not map onto the hardware.
+ */
+
+#define AUD_CLKID_PCLK 0
+#define AUD_CLKID_MST0 1
+#define AUD_CLKID_MST1 2
+#define AUD_CLKID_MST2 3
+#define AUD_CLKID_MST3 4
+#define AUD_CLKID_MST4 5
+#define AUD_CLKID_MST5 6
+#define AUD_CLKID_MST6 7
+#define AUD_CLKID_MST7 8
+#define AUD_CLKID_MST_A_MCLK_SEL 59
+#define AUD_CLKID_MST_B_MCLK_SEL 60
+#define AUD_CLKID_MST_C_MCLK_SEL 61
+#define AUD_CLKID_MST_D_MCLK_SEL 62
+#define AUD_CLKID_MST_E_MCLK_SEL 63
+#define AUD_CLKID_MST_F_MCLK_SEL 64
+#define AUD_CLKID_MST_A_MCLK_DIV 65
+#define AUD_CLKID_MST_B_MCLK_DIV 66
+#define AUD_CLKID_MST_C_MCLK_DIV 67
+#define AUD_CLKID_MST_D_MCLK_DIV 68
+#define AUD_CLKID_MST_E_MCLK_DIV 69
+#define AUD_CLKID_MST_F_MCLK_DIV 70
+#define AUD_CLKID_SPDIFOUT_CLK_SEL 71
+#define AUD_CLKID_SPDIFOUT_CLK_DIV 72
+#define AUD_CLKID_SPDIFIN_CLK_SEL 73
+#define AUD_CLKID_SPDIFIN_CLK_DIV 74
+#define AUD_CLKID_PDM_DCLK_SEL 75
+#define AUD_CLKID_PDM_DCLK_DIV 76
+#define AUD_CLKID_PDM_SYSCLK_SEL 77
+#define AUD_CLKID_PDM_SYSCLK_DIV 78
+#define AUD_CLKID_MST_A_SCLK_PRE_EN 92
+#define AUD_CLKID_MST_B_SCLK_PRE_EN 93
+#define AUD_CLKID_MST_C_SCLK_PRE_EN 94
+#define AUD_CLKID_MST_D_SCLK_PRE_EN 95
+#define AUD_CLKID_MST_E_SCLK_PRE_EN 96
+#define AUD_CLKID_MST_F_SCLK_PRE_EN 97
+#define AUD_CLKID_MST_A_SCLK_DIV 98
+#define AUD_CLKID_MST_B_SCLK_DIV 99
+#define AUD_CLKID_MST_C_SCLK_DIV 100
+#define AUD_CLKID_MST_D_SCLK_DIV 101
+#define AUD_CLKID_MST_E_SCLK_DIV 102
+#define AUD_CLKID_MST_F_SCLK_DIV 103
+#define AUD_CLKID_MST_A_SCLK_POST_EN 104
+#define AUD_CLKID_MST_B_SCLK_POST_EN 105
+#define AUD_CLKID_MST_C_SCLK_POST_EN 106
+#define AUD_CLKID_MST_D_SCLK_POST_EN 107
+#define AUD_CLKID_MST_E_SCLK_POST_EN 108
+#define AUD_CLKID_MST_F_SCLK_POST_EN 109
+#define AUD_CLKID_MST_A_LRCLK_DIV 110
+#define AUD_CLKID_MST_B_LRCLK_DIV 111
+#define AUD_CLKID_MST_C_LRCLK_DIV 112
+#define AUD_CLKID_MST_D_LRCLK_DIV 113
+#define AUD_CLKID_MST_E_LRCLK_DIV 114
+#define AUD_CLKID_MST_F_LRCLK_DIV 115
+#define AUD_CLKID_TDMIN_A_SCLK_PRE_EN 137
+#define AUD_CLKID_TDMIN_B_SCLK_PRE_EN 138
+#define AUD_CLKID_TDMIN_C_SCLK_PRE_EN 139
+#define AUD_CLKID_TDMIN_LB_SCLK_PRE_EN 140
+#define AUD_CLKID_TDMOUT_A_SCLK_PRE_EN 141
+#define AUD_CLKID_TDMOUT_B_SCLK_PRE_EN 142
+#define AUD_CLKID_TDMOUT_C_SCLK_PRE_EN 143
+#define AUD_CLKID_TDMIN_A_SCLK_POST_EN 144
+#define AUD_CLKID_TDMIN_B_SCLK_POST_EN 145
+#define AUD_CLKID_TDMIN_C_SCLK_POST_EN 146
+#define AUD_CLKID_TDMIN_LB_SCLK_POST_EN 147
+#define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148
+#define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149
+#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
+
+/* include the CLKIDs which are part of the DT bindings */
+#include <dt-bindings/clock/axg-audio-clkc.h>
+
+#define NR_CLKS 151
+
+#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index bd4dbc696b88..00ce62ad6416 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -12,7 +12,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
@@ -626,6 +625,137 @@ static struct clk_regmap axg_mpll3 = {
},
};
+static const struct pll_rate_table axg_pcie_pll_rate_table[] = {
+ {
+ .rate = 100000000,
+ .m = 200,
+ .n = 3,
+ .od = 1,
+ .od2 = 3,
+ },
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence axg_pcie_init_regs[] = {
+ { .reg = HHI_PCIE_PLL_CNTL, .def = 0x400106c8 },
+ { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x0084a2aa },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0xb75020be },
+ { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x0a47488e },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0xc000004d },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x00078000 },
+ { .reg = HHI_PCIE_PLL_CNTL6, .def = 0x002323c6 },
+};
+
+static struct clk_regmap axg_pcie_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .od2 = {
+ .reg_off = HHI_PCIE_PLL_CNTL6,
+ .shift = 6,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_PCIE_PLL_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = axg_pcie_pll_rate_table,
+ .init_regs = axg_pcie_init_regs,
+ .init_count = ARRAY_SIZE(axg_pcie_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap axg_pcie_mux = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .mask = 0x1,
+ .shift = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_mux",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "mpll3", "pcie_pll" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_pcie_ref = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .mask = 0x1,
+ .shift = 1,
+ /* skip the parent 0, reserved for debug */
+ .table = (u32[]){ 1 },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_ref",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "pcie_mux" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_pcie_cml_en0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie_cml_en0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "pcie_ref" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+
+ },
+};
+
+static struct clk_regmap axg_pcie_cml_en1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "pcie_cml_en1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "pcie_ref" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
"xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
@@ -779,6 +909,63 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
},
};
+static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
+ 9, 10, 11, 13, 14, };
+static const char * const gen_clk_parent_names[] = {
+ "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
+};
+
+static struct clk_regmap axg_gen_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 12,
+ .table = mux_table_gen_clk,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 15:12 selects from 14 possible parents:
+ * xtal, [rtc_oscin_i], [sys_cpu_div16], [ddr_dpll_pt],
+ * hifi_pll, mpll0, mpll1, mpll2, mpll3, fdiv4,
+ * fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
+ */
+ .parent_names = gen_clk_parent_names,
+ .num_parents = ARRAY_SIZE(gen_clk_parent_names),
+ },
+};
+
+static struct clk_regmap axg_gen_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .shift = 0,
+ .width = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gen_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_gen_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "gen_clk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(axg_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(axg_audio_locker, HHI_GCLK_MPEG0, 2);
@@ -821,6 +1008,7 @@ static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
+static MESON_GATE(axg_mipi_enable, HHI_MIPI_CNTL0, 29);
/* Always On (AO) domain gates */
@@ -910,6 +1098,15 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_FCLK_DIV4_DIV] = &axg_fclk_div4_div.hw,
[CLKID_FCLK_DIV5_DIV] = &axg_fclk_div5_div.hw,
[CLKID_FCLK_DIV7_DIV] = &axg_fclk_div7_div.hw,
+ [CLKID_PCIE_PLL] = &axg_pcie_pll.hw,
+ [CLKID_PCIE_MUX] = &axg_pcie_mux.hw,
+ [CLKID_PCIE_REF] = &axg_pcie_ref.hw,
+ [CLKID_PCIE_CML_EN0] = &axg_pcie_cml_en0.hw,
+ [CLKID_PCIE_CML_EN1] = &axg_pcie_cml_en1.hw,
+ [CLKID_MIPI_ENABLE] = &axg_mipi_enable.hw,
+ [CLKID_GEN_CLK_SEL] = &axg_gen_clk_sel.hw,
+ [CLKID_GEN_CLK_DIV] = &axg_gen_clk_div.hw,
+ [CLKID_GEN_CLK] = &axg_gen_clk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -988,6 +1185,15 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_fclk_div4,
&axg_fclk_div5,
&axg_fclk_div7,
+ &axg_pcie_pll,
+ &axg_pcie_mux,
+ &axg_pcie_ref,
+ &axg_pcie_cml_en0,
+ &axg_pcie_cml_en1,
+ &axg_mipi_enable,
+ &axg_gen_clk_sel,
+ &axg_gen_clk_div,
+ &axg_gen_clk,
};
static const struct of_device_id clkc_match_table[] = {
@@ -995,49 +1201,17 @@ static const struct of_device_id clkc_match_table[] = {
{}
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static int axg_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
- void __iomem *clk_base = NULL;
struct regmap *map;
int ret, i;
/* Get the hhi system controller node if available */
map = syscon_node_to_regmap(of_get_parent(dev->of_node));
if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap - Trying obsolete regs\n");
-
- /*
- * FIXME: HHI registers should be accessed through
- * the appropriate system controller. This is required because
- * there is more than just clocks in this register space
- *
- * This fallback method is only provided temporarily until
- * all the platform DTs are properly using the syscon node
- */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
-
- clk_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!clk_base) {
- dev_err(dev, "Unable to map clk base\n");
- return -ENXIO;
- }
-
- map = devm_regmap_init_mmio(dev, clk_base,
- &clkc_regmap_config);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ dev_err(dev, "failed to get HHI regmap\n");
+ return PTR_ERR(map);
}
/* Populate regmap for the regmap backed clocks */
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index b421df6a7ea0..1d04144a1b2c 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -16,6 +16,7 @@
* Register offsets from the data sheet must be multiplied by 4 before
* adding them to the base address to get the right value.
*/
+#define HHI_MIPI_CNTL0 0x00
#define HHI_GP0_PLL_CNTL 0x40
#define HHI_GP0_PLL_CNTL2 0x44
#define HHI_GP0_PLL_CNTL3 0x48
@@ -127,8 +128,13 @@
#define CLKID_FCLK_DIV4_DIV 73
#define CLKID_FCLK_DIV5_DIV 74
#define CLKID_FCLK_DIV7_DIV 75
+#define CLKID_PCIE_PLL 76
+#define CLKID_PCIE_MUX 77
+#define CLKID_PCIE_REF 78
+#define CLKID_GEN_CLK_SEL 82
+#define CLKID_GEN_CLK_DIV 83
-#define NR_CLKS 76
+#define NR_CLKS 85
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/axg-clkc.h>
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
deleted file mode 100644
index e4cf96ba704e..000000000000
--- a/drivers/clk/meson/clk-audio-divider.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2017 AmLogic, Inc.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-/*
- * i2s master clock divider: The algorithm of the generic clk-divider used with
- * a very precise clock parent such as the mpll tends to select a low divider
- * factor. This gives poor results with this particular divider, especially with
- * high frequencies (> 100 MHz)
- *
- * This driver try to select the maximum possible divider with the rate the
- * upstream clock can provide.
- */
-
-#include <linux/clk-provider.h>
-#include "clkc.h"
-
-static inline struct meson_clk_audio_div_data *
-meson_clk_audio_div_data(struct clk_regmap *clk)
-{
- return (struct meson_clk_audio_div_data *)clk->data;
-}
-
-static int _div_round(unsigned long parent_rate, unsigned long rate,
- unsigned long flags)
-{
- if (flags & CLK_DIVIDER_ROUND_CLOSEST)
- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate, rate);
-
- return DIV_ROUND_UP_ULL((u64)parent_rate, rate);
-}
-
-static int _get_val(unsigned long parent_rate, unsigned long rate)
-{
- return DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
-}
-
-static int _valid_divider(unsigned int width, int divider)
-{
- int max_divider = 1 << width;
-
- return clamp(divider, 1, max_divider);
-}
-
-static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
- unsigned long divider;
-
- divider = meson_parm_read(clk->map, &adiv->div) + 1;
-
- return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
-}
-
-static long audio_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
- unsigned long max_prate;
- int divider;
-
- if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
- divider = _div_round(*parent_rate, rate, adiv->flags);
- divider = _valid_divider(adiv->div.width, divider);
- return DIV_ROUND_UP_ULL((u64)*parent_rate, divider);
- }
-
- /* Get the maximum parent rate */
- max_prate = clk_hw_round_rate(clk_hw_get_parent(hw), ULONG_MAX);
-
- /* Get the corresponding rounded down divider */
- divider = max_prate / rate;
- divider = _valid_divider(adiv->div.width, divider);
-
- /* Get actual rate of the parent */
- *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
- divider * rate);
-
- return DIV_ROUND_UP_ULL((u64)*parent_rate, divider);
-}
-
-static int audio_divider_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_regmap *clk = to_clk_regmap(hw);
- struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
- int val = _get_val(parent_rate, rate);
-
- meson_parm_write(clk->map, &adiv->div, val);
-
- return 0;
-}
-
-const struct clk_ops meson_clk_audio_divider_ro_ops = {
- .recalc_rate = audio_divider_recalc_rate,
- .round_rate = audio_divider_round_rate,
-};
-
-const struct clk_ops meson_clk_audio_divider_ops = {
- .recalc_rate = audio_divider_recalc_rate,
- .round_rate = audio_divider_round_rate,
- .set_rate = audio_divider_set_rate,
-};
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
new file mode 100644
index 000000000000..cba43748ce3d
--- /dev/null
+++ b/drivers/clk/meson/clk-phase.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include "clkc.h"
+
+#define phase_step(_width) (360 / (1 << (_width)))
+
+static inline struct meson_clk_phase_data *
+meson_clk_phase_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_phase_data *)clk->data;
+}
+
+int meson_clk_degrees_from_val(unsigned int val, unsigned int width)
+{
+ return phase_step(width) * val;
+}
+EXPORT_SYMBOL_GPL(meson_clk_degrees_from_val);
+
+unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width)
+{
+ unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width));
+
+ /*
+ * This last calculation is here for cases when degrees is rounded
+ * to 360, in which case val == (1 << width).
+ */
+ return val % (1 << width);
+}
+EXPORT_SYMBOL_GPL(meson_clk_degrees_to_val);
+
+static int meson_clk_phase_get_phase(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_phase_data *phase = meson_clk_phase_data(clk);
+ unsigned int val;
+
+ val = meson_parm_read(clk->map, &phase->ph);
+
+ return meson_clk_degrees_from_val(val, phase->ph.width);
+}
+
+static int meson_clk_phase_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_phase_data *phase = meson_clk_phase_data(clk);
+ unsigned int val;
+
+ val = meson_clk_degrees_to_val(degrees, phase->ph.width);
+ meson_parm_write(clk->map, &phase->ph, val);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_phase_ops = {
+ .get_phase = meson_clk_phase_get_phase,
+ .set_phase = meson_clk_phase_set_phase,
+};
+EXPORT_SYMBOL_GPL(meson_clk_phase_ops);
diff --git a/drivers/clk/meson/clk-triphase.c b/drivers/clk/meson/clk-triphase.c
new file mode 100644
index 000000000000..4a59936251e5
--- /dev/null
+++ b/drivers/clk/meson/clk-triphase.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include "clkc-audio.h"
+
+/*
+ * This is a special clock for the audio controller.
+ * The phase of mst_sclk clock output can be controlled independently
+ * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2).
+ * Controlling these 3 phases as just one makes things simpler and
+ * give the same clock view to all the element on the i2s bus.
+ * If necessary, we can still control the phase in the tdm block
+ * which makes these independent control redundant.
+ */
+static inline struct meson_clk_triphase_data *
+meson_clk_triphase_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_triphase_data *)clk->data;
+}
+
+static void meson_clk_triphase_sync(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Get phase 0 and sync it to phase 1 and 2 */
+ val = meson_parm_read(clk->map, &tph->ph0);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+}
+
+static int meson_clk_triphase_get_phase(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ /* Phase are in sync, reading phase 0 is enough */
+ val = meson_parm_read(clk->map, &tph->ph0);
+
+ return meson_clk_degrees_from_val(val, tph->ph0.width);
+}
+
+static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
+ unsigned int val;
+
+ val = meson_clk_degrees_to_val(degrees, tph->ph0.width);
+ meson_parm_write(clk->map, &tph->ph0, val);
+ meson_parm_write(clk->map, &tph->ph1, val);
+ meson_parm_write(clk->map, &tph->ph2, val);
+
+ return 0;
+}
+
+const struct clk_ops meson_clk_triphase_ops = {
+ .init = meson_clk_triphase_sync,
+ .get_phase = meson_clk_triphase_get_phase,
+ .set_phase = meson_clk_triphase_set_phase,
+};
+EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
diff --git a/drivers/clk/meson/clkc-audio.h b/drivers/clk/meson/clkc-audio.h
new file mode 100644
index 000000000000..0a7c157ebf81
--- /dev/null
+++ b/drivers/clk/meson/clkc-audio.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __MESON_CLKC_AUDIO_H
+#define __MESON_CLKC_AUDIO_H
+
+#include "clkc.h"
+
+struct meson_clk_triphase_data {
+ struct parm ph0;
+ struct parm ph1;
+ struct parm ph2;
+};
+
+struct meson_sclk_div_data {
+ struct parm div;
+ struct parm hi;
+ unsigned int cached_div;
+ struct clk_duty cached_duty;
+};
+
+extern const struct clk_ops meson_clk_triphase_ops;
+extern const struct clk_ops meson_sclk_div_ops;
+
+#endif /* __MESON_CLKC_AUDIO_H */
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index 2fb084330ee9..24cec16b6038 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -91,11 +91,13 @@ struct meson_clk_mpll_data {
#define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
-struct meson_clk_audio_div_data {
- struct parm div;
- u8 flags;
+struct meson_clk_phase_data {
+ struct parm ph;
};
+int meson_clk_degrees_from_val(unsigned int val, unsigned int width);
+unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width);
+
#define MESON_GATE(_name, _reg, _bit) \
struct clk_regmap _name = { \
.data = &(struct clk_regmap_gate_data){ \
@@ -117,7 +119,6 @@ extern const struct clk_ops meson_clk_pll_ops;
extern const struct clk_ops meson_clk_cpu_ops;
extern const struct clk_ops meson_clk_mpll_ro_ops;
extern const struct clk_ops meson_clk_mpll_ops;
-extern const struct clk_ops meson_clk_audio_divider_ro_ops;
-extern const struct clk_ops meson_clk_audio_divider_ops;
+extern const struct clk_ops meson_clk_phase_ops;
#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 177fffb9ebef..86d3ae58e84c 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
@@ -971,28 +970,26 @@ static struct clk_regmap gxbb_cts_amclk_sel = {
.mask = 0x3,
.shift = 9,
.table = (u32[]){ 1, 2, 3 },
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
},
};
static struct clk_regmap gxbb_cts_amclk_div = {
- .data = &(struct meson_clk_audio_div_data){
- .div = {
- .reg_off = HHI_AUD_CLK_CNTL,
- .shift = 0,
- .width = 8,
- },
+ .data = &(struct clk_regmap_div_data) {
+ .offset = HHI_AUD_CLK_CNTL,
+ .shift = 0,
+ .width = 8,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_div",
- .ops = &meson_clk_audio_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "cts_amclk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1019,13 +1016,13 @@ static struct clk_regmap gxbb_cts_mclk_i958_sel = {
.mask = 0x3,
.shift = 25,
.table = (u32[]){ 1, 2, 3 },
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
},
};
@@ -1627,6 +1624,63 @@ static struct clk_regmap gxbb_vdec_hevc = {
},
};
+static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
+ 9, 10, 11, 13, 14, };
+static const char * const gen_clk_parent_names[] = {
+ "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
+};
+
+static struct clk_regmap gxbb_gen_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 12,
+ .table = mux_table_gen_clk,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 15:12 selects from 14 possible parents:
+ * xtal, [rtc_oscin_i], [sys_cpu_div16], [ddr_dpll_pt],
+ * vid_pll, vid2_pll (hevc), mpll0, mpll1, mpll2, fdiv4,
+ * fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
+ */
+ .parent_names = gen_clk_parent_names,
+ .num_parents = ARRAY_SIZE(gen_clk_parent_names),
+ },
+};
+
+static struct clk_regmap gxbb_gen_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .shift = 0,
+ .width = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gen_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_gen_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_GEN_CLK_CNTL,
+ .bit_idx = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gen_clk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "gen_clk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
@@ -1876,6 +1930,9 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_VDEC_HEVC_SEL] = &gxbb_vdec_hevc_sel.hw,
[CLKID_VDEC_HEVC_DIV] = &gxbb_vdec_hevc_div.hw,
[CLKID_VDEC_HEVC] = &gxbb_vdec_hevc.hw,
+ [CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw,
+ [CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw,
+ [CLKID_GEN_CLK] = &gxbb_gen_clk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2038,6 +2095,9 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_VDEC_HEVC_SEL] = &gxbb_vdec_hevc_sel.hw,
[CLKID_VDEC_HEVC_DIV] = &gxbb_vdec_hevc_div.hw,
[CLKID_VDEC_HEVC] = &gxbb_vdec_hevc.hw,
+ [CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw,
+ [CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw,
+ [CLKID_GEN_CLK] = &gxbb_gen_clk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2202,6 +2262,9 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_vdec_hevc_sel,
&gxbb_vdec_hevc_div,
&gxbb_vdec_hevc,
+ &gxbb_gen_clk_sel,
+ &gxbb_gen_clk_div,
+ &gxbb_gen_clk,
};
struct clkc_data {
@@ -2228,17 +2291,9 @@ static const struct of_device_id clkc_match_table[] = {
{},
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static int gxbb_clkc_probe(struct platform_device *pdev)
{
const struct clkc_data *clkc_data;
- struct resource *res;
- void __iomem *clk_base;
struct regmap *map;
int ret, i;
struct device *dev = &pdev->dev;
@@ -2250,31 +2305,8 @@ static int gxbb_clkc_probe(struct platform_device *pdev)
/* Get the hhi system controller node if available */
map = syscon_node_to_regmap(of_get_parent(dev->of_node));
if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap - Trying obsolete regs\n");
-
- /*
- * FIXME: HHI registers should be accessed through
- * the appropriate system controller. This is required because
- * there is more than just clocks in this register space
- *
- * This fallback method is only provided temporarily until
- * all the platform DTs are properly using the syscon node
- */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- clk_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!clk_base) {
- dev_err(dev, "Unable to map clk base\n");
- return -ENXIO;
- }
-
- map = devm_regmap_init_mmio(dev, clk_base,
- &clkc_regmap_config);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ dev_err(dev, "failed to get HHI regmap\n");
+ return PTR_ERR(map);
}
/* Populate regmap for the common regmap backed clocks */
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index ec1a812bf1fd..20dfb1daf5b8 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -66,7 +66,6 @@
#define HHI_USB_CLK_CNTL 0x220 /* 0x88 offset in data sheet */
#define HHI_32K_CLK_CNTL 0x224 /* 0x89 offset in data sheet */
#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
-#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
#define HHI_PCM_CLK_CNTL 0x258 /* 0x96 offset in data sheet */
#define HHI_NAND_CLK_CNTL 0x25C /* 0x97 offset in data sheet */
@@ -158,8 +157,10 @@
#define CLKID_VDEC_1_DIV 152
#define CLKID_VDEC_HEVC_SEL 154
#define CLKID_VDEC_HEVC_DIV 155
+#define CLKID_GEN_CLK_SEL 157
+#define CLKID_GEN_CLK_DIV 158
-#define NR_CLKS 157
+#define NR_CLKS 160
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
new file mode 100644
index 000000000000..bc64019b8eeb
--- /dev/null
+++ b/drivers/clk/meson/sclk-div.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * Sample clock generator divider:
+ * This HW divider gates with value 0 but is otherwise a zero based divider:
+ *
+ * val >= 1
+ * divider = val + 1
+ *
+ * The duty cycle may also be set for the LR clock variant. The duty cycle
+ * ratio is:
+ *
+ * hi = [0 - val]
+ * duty_cycle = (1 + hi) / (1 + val)
+ */
+
+#include "clkc-audio.h"
+
+static inline struct meson_sclk_div_data *
+meson_sclk_div_data(struct clk_regmap *clk)
+{
+ return (struct meson_sclk_div_data *)clk->data;
+}
+
+static int sclk_div_maxval(struct meson_sclk_div_data *sclk)
+{
+ return (1 << sclk->div.width) - 1;
+}
+
+static int sclk_div_maxdiv(struct meson_sclk_div_data *sclk)
+{
+ return sclk_div_maxval(sclk) + 1;
+}
+
+static int sclk_div_getdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, int maxdiv)
+{
+ int div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
+
+ return clamp(div, 2, maxdiv);
+}
+
+static int sclk_div_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate,
+ struct meson_sclk_div_data *sclk)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ int bestdiv = 0, i;
+ unsigned long maxdiv, now, parent_now;
+ unsigned long best = 0, best_parent = 0;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = sclk_div_maxdiv(sclk);
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT))
+ return sclk_div_getdiv(hw, rate, *prate, maxdiv);
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 2; i <= maxdiv; i++) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ if (rate * i == *prate)
+ return i;
+
+ parent_now = clk_hw_round_rate(parent, rate * i);
+ now = DIV_ROUND_UP_ULL((u64)parent_now, i);
+
+ if (abs(rate - now) < abs(rate - best)) {
+ bestdiv = i;
+ best = now;
+ best_parent = parent_now;
+ }
+ }
+
+ if (!bestdiv)
+ bestdiv = sclk_div_maxdiv(sclk);
+ else
+ *prate = best_parent;
+
+ return bestdiv;
+}
+
+static long sclk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ int div;
+
+ div = sclk_div_bestdiv(hw, rate, prate, sclk);
+
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
+}
+
+static void sclk_apply_ratio(struct clk_regmap *clk,
+ struct meson_sclk_div_data *sclk)
+{
+ unsigned int hi = DIV_ROUND_CLOSEST(sclk->cached_div *
+ sclk->cached_duty.num,
+ sclk->cached_duty.den);
+
+ if (hi)
+ hi -= 1;
+
+ meson_parm_write(clk->map, &sclk->hi, hi);
+}
+
+static int sclk_div_set_duty_cycle(struct clk_hw *hw,
+ struct clk_duty *duty)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ if (MESON_PARM_APPLICABLE(&sclk->hi)) {
+ memcpy(&sclk->cached_duty, duty, sizeof(*duty));
+ sclk_apply_ratio(clk, sclk);
+ }
+
+ return 0;
+}
+
+static int sclk_div_get_duty_cycle(struct clk_hw *hw,
+ struct clk_duty *duty)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ int hi;
+
+ if (!MESON_PARM_APPLICABLE(&sclk->hi)) {
+ duty->num = 1;
+ duty->den = 2;
+ return 0;
+ }
+
+ hi = meson_parm_read(clk->map, &sclk->hi);
+ duty->num = hi + 1;
+ duty->den = sclk->cached_div;
+ return 0;
+}
+
+static void sclk_apply_divider(struct clk_regmap *clk,
+ struct meson_sclk_div_data *sclk)
+{
+ if (MESON_PARM_APPLICABLE(&sclk->hi))
+ sclk_apply_ratio(clk, sclk);
+
+ meson_parm_write(clk->map, &sclk->div, sclk->cached_div - 1);
+}
+
+static int sclk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ unsigned long maxdiv = sclk_div_maxdiv(sclk);
+
+ sclk->cached_div = sclk_div_getdiv(hw, rate, prate, maxdiv);
+
+ if (clk_hw_is_enabled(hw))
+ sclk_apply_divider(clk, sclk);
+
+ return 0;
+}
+
+static unsigned long sclk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ return DIV_ROUND_UP_ULL((u64)prate, sclk->cached_div);
+}
+
+static int sclk_div_enable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ sclk_apply_divider(clk, sclk);
+
+ return 0;
+}
+
+static void sclk_div_disable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ meson_parm_write(clk->map, &sclk->div, 0);
+}
+
+static int sclk_div_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+
+ if (meson_parm_read(clk->map, &sclk->div))
+ return 1;
+
+ return 0;
+}
+
+static void sclk_div_init(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
+ unsigned int val;
+
+ val = meson_parm_read(clk->map, &sclk->div);
+
+ /* if the divider is initially disabled, assume max */
+ if (!val)
+ sclk->cached_div = sclk_div_maxdiv(sclk);
+ else
+ sclk->cached_div = val + 1;
+
+ sclk_div_get_duty_cycle(hw, &sclk->cached_duty);
+}
+
+const struct clk_ops meson_sclk_div_ops = {
+ .recalc_rate = sclk_div_recalc_rate,
+ .round_rate = sclk_div_round_rate,
+ .set_rate = sclk_div_set_rate,
+ .enable = sclk_div_enable,
+ .disable = sclk_div_disable,
+ .is_enabled = sclk_div_is_enabled,
+ .get_duty_cycle = sclk_div_get_duty_cycle,
+ .set_duty_cycle = sclk_div_set_duty_cycle,
+ .init = sclk_div_init,
+};
+EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 44e4e27eddad..499f5962c8b0 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell Armada 37xx SoC Peripheral clocks
*
@@ -5,10 +6,6 @@
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2 or later. This program is licensed "as is"
- * without any warranty of any kind, whether express or implied.
- *
* Most of the peripheral clocks can be modelled like this:
* _____ _______ _______
* TBG-A-P --| | | | | | ______
@@ -419,7 +416,6 @@ static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
- int num_parents = clk_hw_get_num_parents(hw);
u32 val;
if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
@@ -429,9 +425,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
val &= pm_cpu->mask_mux;
}
- if (val >= num_parents)
- return -EINVAL;
-
return val;
}
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index 6416c1f8e632..e88f8e01fe3a 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -292,8 +292,10 @@ static void __init pxa25x_register_plls(void)
{
clk_register_fixed_rate(NULL, "osc_3_6864mhz", NULL,
CLK_GET_RATE_NOCACHE, 3686400);
- clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE, 32768);
+ clkdev_pxa_register(CLK_OSC32k768, "osc_32_768khz", NULL,
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE,
+ 32768));
clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_95_85mhz", "osc_3_6864mhz",
0, 26, 1);
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 25a30194d27a..d40b63e7bbce 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -314,9 +314,10 @@ static void __init pxa27x_register_plls(void)
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
CLK_GET_RATE_NOCACHE,
13 * MHz);
- clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE,
- 32768 * KHz);
+ clkdev_pxa_register(CLK_OSC32k768, "osc_32_768khz", NULL,
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE,
+ 32768 * KHz));
clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_312mhz", "osc_13mhz", 0, 24, 1);
}
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 2d126df2bccd..7aa120c3bd08 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -286,9 +286,10 @@ static void __init pxa3xx_register_plls(void)
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
CLK_GET_RATE_NOCACHE,
13 * MHz);
- clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE,
- 32768);
+ clkdev_pxa_register(CLK_OSC32k768, "osc_32_768khz", NULL,
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE,
+ 32768));
clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
CLK_GET_RATE_NOCACHE,
120 * MHz);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 9c3480dcc38a..064768699fe7 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -59,6 +59,15 @@ config QCOM_CLK_SMD_RPM
Say Y if you want to support the clocks exposed by the RPM on
platforms such as apq8016, apq8084, msm8974 etc.
+config QCOM_CLK_RPMH
+ tristate "RPMh Clock Driver"
+ depends on COMMON_CLK_QCOM && QCOM_RPMH
+ help
+ RPMh manages shared resources on some Qualcomm Technologies, Inc.
+ SoCs. It accepts requests from other hardware subsystems via RSC.
+ Say Y if you want to support the clocks exposed by RPMh on
+ platforms such as SDM845.
+
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
select QCOM_GDSC
@@ -245,6 +254,16 @@ config SDM_VIDEOCC_845
Say Y if you want to support video devices and functionality such as
video encode and decode.
+config SDM_DISPCC_845
+ tristate "SDM845 Display Clock Controller"
+ select SDM_GCC_845
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SDM845 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 762c01137c2f..21a45035930d 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -37,7 +37,9 @@ obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 3c49a60072f1..a91d97cecbad 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index f981b486c468..66755f0f84fc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_ALPHA_PLL_H__
#define __QCOM_CLK_ALPHA_PLL_H__
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index c58c5538b1b6..bc2205c450b6 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 1702efb1c511..b3561e0a3984 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_BRANCH_H__
#define __QCOM_CLK_BRANCH_H__
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index b209a2fe86b9..dbd5a9e83554 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -7,6 +7,8 @@
#include <linux/clk-provider.h>
#include "clk-regmap.h"
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
struct freq_tbl {
unsigned long freq;
u8 src;
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
index 1c856d330733..ce80db27ccf2 100644
--- a/drivers/clk/qcom/clk-regmap.c
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
diff --git a/drivers/clk/qcom/clk-regmap.h b/drivers/clk/qcom/clk-regmap.h
index 90d95cd11ec6..6cfc1bccb255 100644
--- a/drivers/clk/qcom/clk-regmap.h
+++ b/drivers/clk/qcom/clk-regmap.h
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved. */
#ifndef __QCOM_CLK_REGMAP_H__
#define __QCOM_CLK_REGMAP_H__
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
new file mode 100644
index 000000000000..9f4fc7773fb2
--- /dev/null
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+
+#include <dt-bindings/clock/qcom,rpmh.h>
+
+#define CLK_RPMH_ARC_EN_OFFSET 0
+#define CLK_RPMH_VRM_EN_OFFSET 4
+
+/**
+ * struct clk_rpmh - individual rpmh clock data structure
+ * @hw: handle between common and hardware-specific interfaces
+ * @res_name: resource name for the rpmh clock
+ * @div: clock divider to compute the clock rate
+ * @res_addr: base address of the rpmh resource within the RPMh
+ * @res_on_val: rpmh clock enable value
+ * @state: rpmh clock requested state
+ * @aggr_state: rpmh clock aggregated state
+ * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh
+ * @valid_state_mask: mask to determine the state of the rpmh clock
+ * @dev: device to which it is attached
+ * @peer: pointer to the clock rpmh sibling
+ */
+struct clk_rpmh {
+ struct clk_hw hw;
+ const char *res_name;
+ u8 div;
+ u32 res_addr;
+ u32 res_on_val;
+ u32 state;
+ u32 aggr_state;
+ u32 last_sent_aggr_state;
+ u32 valid_state_mask;
+ struct device *dev;
+ struct clk_rpmh *peer;
+};
+
+struct clk_rpmh_desc {
+ struct clk_hw **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpmh_clk_lock);
+
+#define __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
+ _res_en_offset, _res_on, _div) \
+ static struct clk_rpmh _platform##_##_name_active; \
+ static struct clk_rpmh _platform##_##_name = { \
+ .res_name = _res_name, \
+ .res_addr = _res_en_offset, \
+ .res_on_val = _res_on, \
+ .div = _div, \
+ .peer = &_platform##_##_name_active, \
+ .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \
+ BIT(RPMH_ACTIVE_ONLY_STATE) | \
+ BIT(RPMH_SLEEP_STATE)), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpmh_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpmh _platform##_##_name_active = { \
+ .res_name = _res_name, \
+ .res_addr = _res_en_offset, \
+ .res_on_val = _res_on, \
+ .div = _div, \
+ .peer = &_platform##_##_name, \
+ .valid_state_mask = (BIT(RPMH_WAKE_ONLY_STATE) | \
+ BIT(RPMH_ACTIVE_ONLY_STATE)), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpmh_ops, \
+ .name = #_name_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPMH_ARC(_platform, _name, _name_active, _res_name, \
+ _res_on, _div) \
+ __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
+ CLK_RPMH_ARC_EN_OFFSET, _res_on, _div)
+
+#define DEFINE_CLK_RPMH_VRM(_platform, _name, _name_active, _res_name, \
+ _div) \
+ __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \
+ CLK_RPMH_VRM_EN_OFFSET, 1, _div)
+
+static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw)
+{
+ return container_of(_hw, struct clk_rpmh, hw);
+}
+
+static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
+{
+ return (c->last_sent_aggr_state & BIT(state))
+ != (c->aggr_state & BIT(state));
+}
+
+static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
+{
+ struct tcs_cmd cmd = { 0 };
+ u32 cmd_state, on_val;
+ enum rpmh_state state = RPMH_SLEEP_STATE;
+ int ret;
+
+ cmd.addr = c->res_addr;
+ cmd_state = c->aggr_state;
+ on_val = c->res_on_val;
+
+ for (; state <= RPMH_ACTIVE_ONLY_STATE; state++) {
+ if (has_state_changed(c, state)) {
+ if (cmd_state & BIT(state))
+ cmd.data = on_val;
+
+ ret = rpmh_write_async(c->dev, state, &cmd, 1);
+ if (ret) {
+ dev_err(c->dev, "set %s state of %s failed: (%d)\n",
+ !state ? "sleep" :
+ state == RPMH_WAKE_ONLY_STATE ?
+ "wake" : "active", c->res_name, ret);
+ return ret;
+ }
+ }
+ }
+
+ c->last_sent_aggr_state = c->aggr_state;
+ c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
+
+ return 0;
+}
+
+/*
+ * Update state and aggregate state values based on enable value.
+ */
+static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
+ bool enable)
+{
+ int ret;
+
+ /* Nothing required to be done if already off or on */
+ if (enable == c->state)
+ return 0;
+
+ c->state = enable ? c->valid_state_mask : 0;
+ c->aggr_state = c->state | c->peer->state;
+ c->peer->aggr_state = c->aggr_state;
+
+ ret = clk_rpmh_send_aggregate_command(c);
+ if (!ret)
+ return 0;
+
+ if (ret && enable)
+ c->state = 0;
+ else if (ret)
+ c->state = c->valid_state_mask;
+
+ WARN(1, "clk: %s failed to %s\n", c->res_name,
+ enable ? "enable" : "disable");
+ return ret;
+}
+
+static int clk_rpmh_prepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+ int ret = 0;
+
+ mutex_lock(&rpmh_clk_lock);
+ ret = clk_rpmh_aggregate_state_send_command(c, true);
+ mutex_unlock(&rpmh_clk_lock);
+
+ return ret;
+};
+
+static void clk_rpmh_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpmh *c = to_clk_rpmh(hw);
+
+ mutex_lock(&rpmh_clk_lock);
+ clk_rpmh_aggregate_state_send_command(c, false);
+ mutex_unlock(&rpmh_clk_lock);
+};
+
+static unsigned long clk_rpmh_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_rpmh *r = to_clk_rpmh(hw);
+
+ /*
+ * RPMh clocks have a fixed rate. Return static rate.
+ */
+ return prate / r->div;
+}
+
+static const struct clk_ops clk_rpmh_ops = {
+ .prepare = clk_rpmh_prepare,
+ .unprepare = clk_rpmh_unprepare,
+ .recalc_rate = clk_rpmh_recalc_rate,
+};
+
+/* Resource name must match resource id present in cmd-db. */
+DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
+DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
+DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
+DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
+DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
+DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+
+static struct clk_hw *sdm845_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
+ .clks = sdm845_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
+};
+
+static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_rpmh_desc *rpmh = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= rpmh->num_clks) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return rpmh->clks[idx];
+}
+
+static int clk_rpmh_probe(struct platform_device *pdev)
+{
+ struct clk_hw **hw_clks;
+ struct clk_rpmh *rpmh_clk;
+ const struct clk_rpmh_desc *desc;
+ int ret, i;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -ENODEV;
+
+ hw_clks = desc->clks;
+
+ for (i = 0; i < desc->num_clks; i++) {
+ u32 res_addr;
+
+ rpmh_clk = to_clk_rpmh(hw_clks[i]);
+ res_addr = cmd_db_read_addr(rpmh_clk->res_name);
+ if (!res_addr) {
+ dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
+ rpmh_clk->res_name);
+ return -ENODEV;
+ }
+ rpmh_clk->res_addr += res_addr;
+ rpmh_clk->dev = &pdev->dev;
+
+ ret = devm_clk_hw_register(&pdev->dev, hw_clks[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ hw_clks[i]->init->name);
+ return ret;
+ }
+ }
+
+ /* typecast to silence compiler warning */
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_rpmh_hw_get,
+ (void *)desc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add clock provider\n");
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "Registered RPMh clocks\n");
+
+ return 0;
+}
+
+static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
+ { }
+};
+MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
+
+static struct platform_driver clk_rpmh_driver = {
+ .probe = clk_rpmh_probe,
+ .driver = {
+ .name = "clk-rpmh",
+ .of_match_table = clk_rpmh_match_table,
+ },
+};
+
+static int __init clk_rpmh_init(void)
+{
+ return platform_driver_register(&clk_rpmh_driver);
+}
+subsys_initcall(clk_rpmh_init);
+
+static void __exit clk_rpmh_exit(void)
+{
+ platform_driver_unregister(&clk_rpmh_driver);
+}
+module_exit(clk_rpmh_exit);
+
+MODULE_DESCRIPTION("QCOM RPMh Clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 39ce64c2783b..db9b2471ac40 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/export.h>
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 00196ee15e73..4aa33ee70bae 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,15 +1,6 @@
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved. */
+
#ifndef __QCOM_CLK_COMMON_H__
#define __QCOM_CLK_COMMON_H__
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
new file mode 100644
index 000000000000..0cc4909b5dbe
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_0[] = {
+ "bi_tcxo",
+ "dsi0_phy_pll_out_byteclk",
+ "dsi1_phy_pll_out_byteclk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_2[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_MAIN, 4 },
+ { P_GPLL0_OUT_MAIN_DIV, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_3[] = {
+ "bi_tcxo",
+ "disp_cc_pll0",
+ "gcc_disp_gpll0_clk_src",
+ "gcc_disp_gpll0_div_clk_src",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_4[] = {
+ "bi_tcxo",
+ "dsi0_phy_pll_out_dsiclk",
+ "dsi1_phy_pll_out_dsiclk",
+ "core_bi_pll_test_se",
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x20d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x20ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x2108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_GPLL0_OUT_MAIN, 7, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x2088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_names = disp_cc_parent_names_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x2058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_names = disp_cc_parent_names_4,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x2070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_names = disp_cc_parent_names_4,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x20a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_names = disp_cc_parent_names_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_names = disp_cc_parent_names_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_axi_clk = {
+ .halt_reg = 0x4008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x20e8,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_div_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x2030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x2104,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte1_div_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_esc0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_esc1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_mdp_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_pclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Return the HW recalc rate for idle use case */
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_pclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_rot_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0x5008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x3000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *disp_cc_sdm845_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AXI_CLK] = &disp_cc_mdss_axi_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] =
+ &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] =
+ &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sdm845_resets[] = {
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x5000 },
+};
+
+static struct gdsc *disp_cc_sdm845_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static const struct regmap_config disp_cc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sdm845_desc = {
+ .config = &disp_cc_sdm845_regmap_config,
+ .clks = disp_cc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sdm845_clocks),
+ .resets = disp_cc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sdm845_resets),
+ .gdscs = disp_cc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sdm845_gdscs),
+};
+
+static const struct of_device_id disp_cc_sdm845_match_table[] = {
+ { .compatible = "qcom,sdm845-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sdm845_match_table);
+
+static int disp_cc_sdm845_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct alpha_pll_config disp_cc_pll0_config = {};
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ disp_cc_pll0_config.l = 0x2c;
+ disp_cc_pll0_config.alpha = 0xcaaa;
+
+ clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+ /* Enable hardware clock gating for DSI and MDP clocks */
+ regmap_update_bits(regmap, 0x8000, 0x7f0, 0x7f0);
+
+ return qcom_cc_really_probe(pdev, &disp_cc_sdm845_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sdm845_driver = {
+ .probe = disp_cc_sdm845_probe,
+ .driver = {
+ .name = "disp_cc-sdm845",
+ .of_match_table = disp_cc_sdm845_match_table,
+ },
+};
+
+static int __init disp_cc_sdm845_init(void)
+{
+ return platform_driver_register(&disp_cc_sdm845_driver);
+}
+subsys_initcall(disp_cc_sdm845_init);
+
+static void __exit disp_cc_sdm845_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sdm845_driver);
+}
+module_exit(disp_cc_sdm845_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI DISPCC SDM845 Driver");
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 486d9610355c..9c99a71ea71e 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -106,8 +106,6 @@ static const char * const gcc_xo_pcie_sleep[] = {
"sleep_clk_src",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll gpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index 46cb256b4aa2..8902ad42ba87 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -179,8 +179,6 @@ static const char * const gcc_xo_ddr_500_200[] = {
"ddrpllapss",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
F(48000000, P_XO, 1, 0, 0),
F(200000000, P_FEPLL200, 1, 0, 0),
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 28eb200d0f1e..5f61225657ab 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -1220,7 +1220,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1269,7 +1268,6 @@ static struct clk_rcg sdc3_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1353,7 +1351,6 @@ static struct clk_rcg tsif_ref_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 0462f4a8c932..505c6263141d 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -32,8 +32,6 @@
#include "clk-regmap-mux.h"
#include "reset.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_GPLL0,
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index b99dd406e907..849046fbed6d 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -947,7 +947,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_cxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -996,7 +995,6 @@ static struct clk_rcg sdc2_src = {
.parent_names = gcc_cxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index c347a0d44bc8..7e930e25c79f 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -1558,7 +1558,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1607,7 +1606,6 @@ static struct clk_rcg sdc2_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1656,7 +1654,6 @@ static struct clk_rcg sdc3_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1705,7 +1702,6 @@ static struct clk_rcg sdc4_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1754,7 +1750,6 @@ static struct clk_rcg sdc5_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index d6c7f50ba86a..ac2b0aa1e8b5 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -264,8 +264,6 @@ static const char * const gcc_xo_gpll1_emclk_sleep[] = {
"sleep_clk",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll gpll0 = {
.l_reg = 0x21004,
.m_reg = 0x21008,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index eb551c75fba6..fd495e0471bb 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -1628,7 +1628,6 @@ static struct clk_rcg sdc1_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1677,7 +1676,6 @@ static struct clk_rcg sdc2_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1726,7 +1724,6 @@ static struct clk_rcg sdc3_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1775,7 +1772,6 @@ static struct clk_rcg sdc4_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
@@ -1824,7 +1820,6 @@ static struct clk_rcg sdc5_src = {
.parent_names = gcc_pxo_pll8,
.num_parents = 2,
.ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
},
}
};
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 348e30da4f18..08e2900d172c 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -62,8 +62,6 @@ static const char * const gcc_xo_gpll0_gpll4[] = {
"gpll4_vote",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll gpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 1e38efc37180..53f0f369a33e 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -57,8 +57,6 @@ static const char * const gcc_xo_gpll0_gpll4[] = {
"gpll4",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_fixed_factor xo = {
.mult = 1,
.div = 1,
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index ff8d66fd94e6..9a3290fdd01b 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -32,8 +32,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_GPLL0,
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 78d87f5c7098..9f0ae403d5f5 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -25,8 +25,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_AUD_REF_CLK,
P_CORE_BI_PLL_TEST_SE,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index e78e6f5b99fc..fa1a196350f1 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -25,8 +25,6 @@
#include "gdsc.h"
#include "reset.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_BI_TCXO,
P_AUD_REF_CLK,
@@ -1103,6 +1101,7 @@ static struct clk_branch gcc_camera_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1129,6 +1128,7 @@ static struct clk_branch gcc_camera_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camera_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1270,6 +1270,7 @@ static struct clk_branch gcc_disp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1328,6 +1329,7 @@ static struct clk_branch gcc_disp_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_disp_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1397,6 +1399,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2985,6 +2988,7 @@ static struct clk_branch gcc_video_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3011,6 +3015,7 @@ static struct clk_branch gcc_video_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3049,6 +3054,36 @@ static struct clk_branch gcc_vs_ctrl_clk = {
},
};
+static struct clk_branch gcc_cpuss_dvm_bus_clk = {
+ .halt_reg = 0x48190,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48190,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_dvm_bus_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc pcie_0_gdsc = {
.gdscr = 0x6b004,
.pd = {
@@ -3344,6 +3379,8 @@ static struct clk_regmap *gcc_sdm845_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
[GPLL4] = &gpll4.clkr,
+ [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
};
static const struct qcom_reset_map gcc_sdm845_resets[] = {
@@ -3433,10 +3470,6 @@ static int gcc_sdm845_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
- /* Enable CPUSS clocks */
- regmap_update_bits(regmap, 0x48190, BIT(0), 0x1);
- regmap_update_bits(regmap, 0x52004, BIT(22), 0x1);
-
return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
}
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 30777f9f1a43..4ce1d7c88377 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -219,8 +219,6 @@ static const char * const mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
"sleep_clk_src",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll mmpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 715e7cd94125..91818516c3e0 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -184,8 +184,6 @@ static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
"dsi1pllbyte",
};
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
static struct clk_pll mmpll0 = {
.l_reg = 0x0004,
.m_reg = 0x0008,
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 4b20d1b67a1b..7d4ee109435c 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -34,8 +34,6 @@
#include "reset.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_XO,
P_MMPLL0,
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 9073b7a710ac..5d6a7724a194 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -18,8 +18,6 @@
#include "clk-pll.h"
#include "gdsc.h"
-#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-
enum {
P_BI_TCXO,
P_CORE_BI_PLL_TEST_SE,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index f9ba71311727..9022bbe1297e 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -21,6 +21,7 @@ config CLK_RENESAS
select CLK_R8A77980 if ARCH_R8A77980
select CLK_R8A77990 if ARCH_R8A77990
select CLK_R8A77995 if ARCH_R8A77995
+ select CLK_R9A06G032 if ARCH_R9A06G032
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -125,6 +126,11 @@ config CLK_R8A77995
bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R9A06G032
+ bool "Renesas R9A06G032 clock driver"
+ help
+ This is a driver for R9A06G032 clocks
+
config CLK_SH73A0
bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index fe5bac9215e5..e4aa3d6143d2 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77990) += r8a77990-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
+obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 775b0ceaa337..a85dd50e8911 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -103,6 +103,7 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x26c),
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cr", R8A7795_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
@@ -132,6 +133,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3),
DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3),
DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3),
+ DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR),
DEF_MOD("cmt3", 300, R8A7795_CLK_R),
DEF_MOD("cmt2", 301, R8A7795_CLK_R),
DEF_MOD("cmt1", 302, R8A7795_CLK_R),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
new file mode 100644
index 000000000000..a0b6ecdc63dd
--- /dev/null
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -0,0 +1,893 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R9A09G032 clock driver
+ *
+ * Copyright (C) 2018 Renesas Electronics Europe Limited
+ *
+ * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <dt-bindings/clock/r9a06g032-sysctrl.h>
+
+struct r9a06g032_gate {
+ u16 gate, reset, ready, midle,
+ scon, mirack, mistat;
+};
+
+/* This is used to describe a clock for instantiation */
+struct r9a06g032_clkdesc {
+ const char *name;
+ uint32_t type: 3;
+ uint32_t index: 8;
+ uint32_t source : 8; /* source index + 1 (0 == none) */
+ /* these are used to populate the bitsel struct */
+ union {
+ struct r9a06g032_gate gate;
+ /* for dividers */
+ struct {
+ unsigned int div_min : 10, div_max : 10, reg: 10;
+ u16 div_table[4];
+ };
+ /* For fixed-factor ones */
+ struct {
+ u16 div, mul;
+ };
+ unsigned int factor;
+ unsigned int frequency;
+ /* for dual gate */
+ struct {
+ uint16_t group : 1, index: 3;
+ u16 sel, g1, r1, g2, r2;
+ } dual;
+ };
+} __packed;
+
+#define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
+ { .gate = _clk, .reset = _rst, \
+ .ready = _rdy, .midle = _midle, \
+ .scon = _scon, .mirack = _mirack, .mistat = _mistat }
+#define D_GATE(_idx, _n, _src, ...) \
+ { .type = K_GATE, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .gate = I_GATE(__VA_ARGS__), }
+#define D_ROOT(_idx, _n, _mul, _div) \
+ { .type = K_FFC, .index = R9A06G032_##_idx, .name = _n, \
+ .div = _div, .mul = _mul }
+#define D_FFC(_idx, _n, _src, _div) \
+ { .type = K_FFC, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .div = _div, .mul = 1}
+#define D_DIV(_idx, _n, _src, _reg, _min, _max, ...) \
+ { .type = K_DIV, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .reg = _reg, .div_min = _min, .div_max = _max, \
+ .div_table = { __VA_ARGS__ } }
+#define D_UGATE(_idx, _n, _src, _g, _gi, _g1, _r1, _g2, _r2) \
+ { .type = K_DUALGATE, .index = R9A06G032_##_idx, \
+ .source = 1 + R9A06G032_##_src, .name = _n, \
+ .dual = { .group = _g, .index = _gi, \
+ .g1 = _g1, .r1 = _r1, .g2 = _g2, .r2 = _r2 }, }
+
+enum { K_GATE = 0, K_FFC, K_DIV, K_BITSEL, K_DUALGATE };
+
+/* Internal clock IDs */
+#define R9A06G032_CLKOUT 0
+#define R9A06G032_CLKOUT_D10 2
+#define R9A06G032_CLKOUT_D16 3
+#define R9A06G032_CLKOUT_D160 4
+#define R9A06G032_CLKOUT_D1OR2 5
+#define R9A06G032_CLKOUT_D20 6
+#define R9A06G032_CLKOUT_D40 7
+#define R9A06G032_CLKOUT_D5 8
+#define R9A06G032_CLKOUT_D8 9
+#define R9A06G032_DIV_ADC 10
+#define R9A06G032_DIV_I2C 11
+#define R9A06G032_DIV_NAND 12
+#define R9A06G032_DIV_P1_PG 13
+#define R9A06G032_DIV_P2_PG 14
+#define R9A06G032_DIV_P3_PG 15
+#define R9A06G032_DIV_P4_PG 16
+#define R9A06G032_DIV_P5_PG 17
+#define R9A06G032_DIV_P6_PG 18
+#define R9A06G032_DIV_QSPI0 19
+#define R9A06G032_DIV_QSPI1 20
+#define R9A06G032_DIV_REF_SYNC 21
+#define R9A06G032_DIV_SDIO0 22
+#define R9A06G032_DIV_SDIO1 23
+#define R9A06G032_DIV_SWITCH 24
+#define R9A06G032_DIV_UART 25
+#define R9A06G032_DIV_MOTOR 64
+#define R9A06G032_CLK_DDRPHY_PLLCLK_D4 78
+#define R9A06G032_CLK_ECAT100_D4 79
+#define R9A06G032_CLK_HSR100_D2 80
+#define R9A06G032_CLK_REF_SYNC_D4 81
+#define R9A06G032_CLK_REF_SYNC_D8 82
+#define R9A06G032_CLK_SERCOS100_D2 83
+#define R9A06G032_DIV_CA7 84
+
+#define R9A06G032_UART_GROUP_012 154
+#define R9A06G032_UART_GROUP_34567 155
+
+#define R9A06G032_CLOCK_COUNT (R9A06G032_UART_GROUP_34567 + 1)
+
+static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = {
+ D_ROOT(CLKOUT, "clkout", 25, 1),
+ D_ROOT(CLK_PLL_USB, "clk_pll_usb", 12, 10),
+ D_FFC(CLKOUT_D10, "clkout_d10", CLKOUT, 10),
+ D_FFC(CLKOUT_D16, "clkout_d16", CLKOUT, 16),
+ D_FFC(CLKOUT_D160, "clkout_d160", CLKOUT, 160),
+ D_DIV(CLKOUT_D1OR2, "clkout_d1or2", CLKOUT, 0, 1, 2),
+ D_FFC(CLKOUT_D20, "clkout_d20", CLKOUT, 20),
+ D_FFC(CLKOUT_D40, "clkout_d40", CLKOUT, 40),
+ D_FFC(CLKOUT_D5, "clkout_d5", CLKOUT, 5),
+ D_FFC(CLKOUT_D8, "clkout_d8", CLKOUT, 8),
+ D_DIV(DIV_ADC, "div_adc", CLKOUT, 77, 50, 250),
+ D_DIV(DIV_I2C, "div_i2c", CLKOUT, 78, 12, 16),
+ D_DIV(DIV_NAND, "div_nand", CLKOUT, 82, 12, 32),
+ D_DIV(DIV_P1_PG, "div_p1_pg", CLKOUT, 68, 12, 200),
+ D_DIV(DIV_P2_PG, "div_p2_pg", CLKOUT, 62, 12, 128),
+ D_DIV(DIV_P3_PG, "div_p3_pg", CLKOUT, 64, 8, 128),
+ D_DIV(DIV_P4_PG, "div_p4_pg", CLKOUT, 66, 8, 128),
+ D_DIV(DIV_P5_PG, "div_p5_pg", CLKOUT, 71, 10, 40),
+ D_DIV(DIV_P6_PG, "div_p6_pg", CLKOUT, 18, 12, 64),
+ D_DIV(DIV_QSPI0, "div_qspi0", CLKOUT, 73, 3, 7),
+ D_DIV(DIV_QSPI1, "div_qspi1", CLKOUT, 25, 3, 7),
+ D_DIV(DIV_REF_SYNC, "div_ref_sync", CLKOUT, 56, 2, 16, 2, 4, 8, 16),
+ D_DIV(DIV_SDIO0, "div_sdio0", CLKOUT, 74, 20, 128),
+ D_DIV(DIV_SDIO1, "div_sdio1", CLKOUT, 75, 20, 128),
+ D_DIV(DIV_SWITCH, "div_switch", CLKOUT, 37, 5, 40),
+ D_DIV(DIV_UART, "div_uart", CLKOUT, 79, 12, 128),
+ D_GATE(CLK_25_PG4, "clk_25_pg4", CLKOUT_D40, 0x749, 0x74a, 0x74b, 0, 0xae3, 0, 0),
+ D_GATE(CLK_25_PG5, "clk_25_pg5", CLKOUT_D40, 0x74c, 0x74d, 0x74e, 0, 0xae4, 0, 0),
+ D_GATE(CLK_25_PG6, "clk_25_pg6", CLKOUT_D40, 0x74f, 0x750, 0x751, 0, 0xae5, 0, 0),
+ D_GATE(CLK_25_PG7, "clk_25_pg7", CLKOUT_D40, 0x752, 0x753, 0x754, 0, 0xae6, 0, 0),
+ D_GATE(CLK_25_PG8, "clk_25_pg8", CLKOUT_D40, 0x755, 0x756, 0x757, 0, 0xae7, 0, 0),
+ D_GATE(CLK_ADC, "clk_adc", DIV_ADC, 0x1ea, 0x1eb, 0, 0, 0, 0, 0),
+ D_GATE(CLK_ECAT100, "clk_ecat100", CLKOUT_D10, 0x405, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_HSR100, "clk_hsr100", CLKOUT_D10, 0x483, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_I2C0, "clk_i2c0", DIV_I2C, 0x1e6, 0x1e7, 0, 0, 0, 0, 0),
+ D_GATE(CLK_I2C1, "clk_i2c1", DIV_I2C, 0x1e8, 0x1e9, 0, 0, 0, 0, 0),
+ D_GATE(CLK_MII_REF, "clk_mii_ref", CLKOUT_D40, 0x342, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_NAND, "clk_nand", DIV_NAND, 0x284, 0x285, 0, 0, 0, 0, 0),
+ D_GATE(CLK_NOUSBP2_PG6, "clk_nousbp2_pg6", DIV_P2_PG, 0x774, 0x775, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P1_PG2, "clk_p1_pg2", DIV_P1_PG, 0x862, 0x863, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P1_PG3, "clk_p1_pg3", DIV_P1_PG, 0x864, 0x865, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P1_PG4, "clk_p1_pg4", DIV_P1_PG, 0x866, 0x867, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P4_PG3, "clk_p4_pg3", DIV_P4_PG, 0x824, 0x825, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P4_PG4, "clk_p4_pg4", DIV_P4_PG, 0x826, 0x827, 0, 0, 0, 0, 0),
+ D_GATE(CLK_P6_PG1, "clk_p6_pg1", DIV_P6_PG, 0x8a0, 0x8a1, 0x8a2, 0, 0xb60, 0, 0),
+ D_GATE(CLK_P6_PG2, "clk_p6_pg2", DIV_P6_PG, 0x8a3, 0x8a4, 0x8a5, 0, 0xb61, 0, 0),
+ D_GATE(CLK_P6_PG3, "clk_p6_pg3", DIV_P6_PG, 0x8a6, 0x8a7, 0x8a8, 0, 0xb62, 0, 0),
+ D_GATE(CLK_P6_PG4, "clk_p6_pg4", DIV_P6_PG, 0x8a9, 0x8aa, 0x8ab, 0, 0xb63, 0, 0),
+ D_GATE(CLK_QSPI0, "clk_qspi0", DIV_QSPI0, 0x2a4, 0x2a5, 0, 0, 0, 0, 0),
+ D_GATE(CLK_QSPI1, "clk_qspi1", DIV_QSPI1, 0x484, 0x485, 0, 0, 0, 0, 0),
+ D_GATE(CLK_RGMII_REF, "clk_rgmii_ref", CLKOUT_D8, 0x340, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_RMII_REF, "clk_rmii_ref", CLKOUT_D20, 0x341, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SDIO0, "clk_sdio0", DIV_SDIO0, 0x64, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SDIO1, "clk_sdio1", DIV_SDIO1, 0x644, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SERCOS100, "clk_sercos100", CLKOUT_D10, 0x425, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SLCD, "clk_slcd", DIV_P1_PG, 0x860, 0x861, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI0, "clk_spi0", DIV_P3_PG, 0x7e0, 0x7e1, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI1, "clk_spi1", DIV_P3_PG, 0x7e2, 0x7e3, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI2, "clk_spi2", DIV_P3_PG, 0x7e4, 0x7e5, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI3, "clk_spi3", DIV_P3_PG, 0x7e6, 0x7e7, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI4, "clk_spi4", DIV_P4_PG, 0x820, 0x821, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SPI5, "clk_spi5", DIV_P4_PG, 0x822, 0x823, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SWITCH, "clk_switch", DIV_SWITCH, 0x982, 0x983, 0, 0, 0, 0, 0),
+ D_DIV(DIV_MOTOR, "div_motor", CLKOUT_D5, 84, 2, 8),
+ D_GATE(HCLK_ECAT125, "hclk_ecat125", CLKOUT_D8, 0x400, 0x401, 0, 0x402, 0, 0x440, 0x441),
+ D_GATE(HCLK_PINCONFIG, "hclk_pinconfig", CLKOUT_D40, 0x740, 0x741, 0x742, 0, 0xae0, 0, 0),
+ D_GATE(HCLK_SERCOS, "hclk_sercos", CLKOUT_D10, 0x420, 0x422, 0, 0x421, 0, 0x460, 0x461),
+ D_GATE(HCLK_SGPIO2, "hclk_sgpio2", DIV_P5_PG, 0x8c3, 0x8c4, 0x8c5, 0, 0xb41, 0, 0),
+ D_GATE(HCLK_SGPIO3, "hclk_sgpio3", DIV_P5_PG, 0x8c6, 0x8c7, 0x8c8, 0, 0xb42, 0, 0),
+ D_GATE(HCLK_SGPIO4, "hclk_sgpio4", DIV_P5_PG, 0x8c9, 0x8ca, 0x8cb, 0, 0xb43, 0, 0),
+ D_GATE(HCLK_TIMER0, "hclk_timer0", CLKOUT_D40, 0x743, 0x744, 0x745, 0, 0xae1, 0, 0),
+ D_GATE(HCLK_TIMER1, "hclk_timer1", CLKOUT_D40, 0x746, 0x747, 0x748, 0, 0xae2, 0, 0),
+ D_GATE(HCLK_USBF, "hclk_usbf", CLKOUT_D8, 0xe3, 0, 0, 0xe4, 0, 0x102, 0x103),
+ D_GATE(HCLK_USBH, "hclk_usbh", CLKOUT_D8, 0xe0, 0xe1, 0, 0xe2, 0, 0x100, 0x101),
+ D_GATE(HCLK_USBPM, "hclk_usbpm", CLKOUT_D8, 0xe5, 0, 0, 0, 0, 0, 0),
+ D_GATE(CLK_48_PG_F, "clk_48_pg_f", CLK_48, 0x78c, 0x78d, 0, 0x78e, 0, 0xb04, 0xb05),
+ D_GATE(CLK_48_PG4, "clk_48_pg4", CLK_48, 0x789, 0x78a, 0x78b, 0, 0xb03, 0, 0),
+ D_FFC(CLK_DDRPHY_PLLCLK_D4, "clk_ddrphy_pllclk_d4", CLK_DDRPHY_PLLCLK, 4),
+ D_FFC(CLK_ECAT100_D4, "clk_ecat100_d4", CLK_ECAT100, 4),
+ D_FFC(CLK_HSR100_D2, "clk_hsr100_d2", CLK_HSR100, 2),
+ D_FFC(CLK_REF_SYNC_D4, "clk_ref_sync_d4", CLK_REF_SYNC, 4),
+ D_FFC(CLK_REF_SYNC_D8, "clk_ref_sync_d8", CLK_REF_SYNC, 8),
+ D_FFC(CLK_SERCOS100_D2, "clk_sercos100_d2", CLK_SERCOS100, 2),
+ D_DIV(DIV_CA7, "div_ca7", CLK_REF_SYNC, 57, 1, 4, 1, 2, 4),
+ D_GATE(HCLK_CAN0, "hclk_can0", CLK_48, 0x783, 0x784, 0x785, 0, 0xb01, 0, 0),
+ D_GATE(HCLK_CAN1, "hclk_can1", CLK_48, 0x786, 0x787, 0x788, 0, 0xb02, 0, 0),
+ D_GATE(HCLK_DELTASIGMA, "hclk_deltasigma", DIV_MOTOR, 0x1ef, 0x1f0, 0x1f1, 0, 0, 0, 0),
+ D_GATE(HCLK_PWMPTO, "hclk_pwmpto", DIV_MOTOR, 0x1ec, 0x1ed, 0x1ee, 0, 0, 0, 0),
+ D_GATE(HCLK_RSV, "hclk_rsv", CLK_48, 0x780, 0x781, 0x782, 0, 0xb00, 0, 0),
+ D_GATE(HCLK_SGPIO0, "hclk_sgpio0", DIV_MOTOR, 0x1e0, 0x1e1, 0x1e2, 0, 0, 0, 0),
+ D_GATE(HCLK_SGPIO1, "hclk_sgpio1", DIV_MOTOR, 0x1e3, 0x1e4, 0x1e5, 0, 0, 0, 0),
+ D_DIV(RTOS_MDC, "rtos_mdc", CLK_REF_SYNC, 100, 80, 640, 80, 160, 320, 640),
+ D_GATE(CLK_CM3, "clk_cm3", CLK_REF_SYNC_D4, 0xba0, 0xba1, 0, 0xba2, 0, 0xbc0, 0xbc1),
+ D_GATE(CLK_DDRC, "clk_ddrc", CLK_DDRPHY_PLLCLK_D4, 0x323, 0x324, 0, 0, 0, 0, 0),
+ D_GATE(CLK_ECAT25, "clk_ecat25", CLK_ECAT100_D4, 0x403, 0x404, 0, 0, 0, 0, 0),
+ D_GATE(CLK_HSR50, "clk_hsr50", CLK_HSR100_D2, 0x484, 0x485, 0, 0, 0, 0, 0),
+ D_GATE(CLK_HW_RTOS, "clk_hw_rtos", CLK_REF_SYNC_D4, 0xc60, 0xc61, 0, 0, 0, 0, 0),
+ D_GATE(CLK_SERCOS50, "clk_sercos50", CLK_SERCOS100_D2, 0x424, 0x423, 0, 0, 0, 0, 0),
+ D_GATE(HCLK_ADC, "hclk_adc", CLK_REF_SYNC_D8, 0x1af, 0x1b0, 0x1b1, 0, 0, 0, 0),
+ D_GATE(HCLK_CM3, "hclk_cm3", CLK_REF_SYNC_D4, 0xc20, 0xc21, 0xc22, 0, 0, 0, 0),
+ D_GATE(HCLK_CRYPTO_EIP150, "hclk_crypto_eip150", CLK_REF_SYNC_D4, 0x123, 0x124, 0x125, 0, 0x142, 0, 0),
+ D_GATE(HCLK_CRYPTO_EIP93, "hclk_crypto_eip93", CLK_REF_SYNC_D4, 0x120, 0x121, 0, 0x122, 0, 0x140, 0x141),
+ D_GATE(HCLK_DDRC, "hclk_ddrc", CLK_REF_SYNC_D4, 0x320, 0x322, 0, 0x321, 0, 0x3a0, 0x3a1),
+ D_GATE(HCLK_DMA0, "hclk_dma0", CLK_REF_SYNC_D4, 0x260, 0x261, 0x262, 0x263, 0x2c0, 0x2c1, 0x2c2),
+ D_GATE(HCLK_DMA1, "hclk_dma1", CLK_REF_SYNC_D4, 0x264, 0x265, 0x266, 0x267, 0x2c3, 0x2c4, 0x2c5),
+ D_GATE(HCLK_GMAC0, "hclk_gmac0", CLK_REF_SYNC_D4, 0x360, 0x361, 0x362, 0x363, 0x3c0, 0x3c1, 0x3c2),
+ D_GATE(HCLK_GMAC1, "hclk_gmac1", CLK_REF_SYNC_D4, 0x380, 0x381, 0x382, 0x383, 0x3e0, 0x3e1, 0x3e2),
+ D_GATE(HCLK_GPIO0, "hclk_gpio0", CLK_REF_SYNC_D4, 0x212, 0x213, 0x214, 0, 0, 0, 0),
+ D_GATE(HCLK_GPIO1, "hclk_gpio1", CLK_REF_SYNC_D4, 0x215, 0x216, 0x217, 0, 0, 0, 0),
+ D_GATE(HCLK_GPIO2, "hclk_gpio2", CLK_REF_SYNC_D4, 0x229, 0x22a, 0x22b, 0, 0, 0, 0),
+ D_GATE(HCLK_HSR, "hclk_hsr", CLK_HSR100_D2, 0x480, 0x482, 0, 0x481, 0, 0x4c0, 0x4c1),
+ D_GATE(HCLK_I2C0, "hclk_i2c0", CLK_REF_SYNC_D8, 0x1a9, 0x1aa, 0x1ab, 0, 0, 0, 0),
+ D_GATE(HCLK_I2C1, "hclk_i2c1", CLK_REF_SYNC_D8, 0x1ac, 0x1ad, 0x1ae, 0, 0, 0, 0),
+ D_GATE(HCLK_LCD, "hclk_lcd", CLK_REF_SYNC_D4, 0x7a0, 0x7a1, 0x7a2, 0, 0xb20, 0, 0),
+ D_GATE(HCLK_MSEBI_M, "hclk_msebi_m", CLK_REF_SYNC_D4, 0x164, 0x165, 0x166, 0, 0x183, 0, 0),
+ D_GATE(HCLK_MSEBI_S, "hclk_msebi_s", CLK_REF_SYNC_D4, 0x160, 0x161, 0x162, 0x163, 0x180, 0x181, 0x182),
+ D_GATE(HCLK_NAND, "hclk_nand", CLK_REF_SYNC_D4, 0x280, 0x281, 0x282, 0x283, 0x2e0, 0x2e1, 0x2e2),
+ D_GATE(HCLK_PG_I, "hclk_pg_i", CLK_REF_SYNC_D4, 0x7ac, 0x7ad, 0, 0x7ae, 0, 0xb24, 0xb25),
+ D_GATE(HCLK_PG19, "hclk_pg19", CLK_REF_SYNC_D4, 0x22c, 0x22d, 0x22e, 0, 0, 0, 0),
+ D_GATE(HCLK_PG20, "hclk_pg20", CLK_REF_SYNC_D4, 0x22f, 0x230, 0x231, 0, 0, 0, 0),
+ D_GATE(HCLK_PG3, "hclk_pg3", CLK_REF_SYNC_D4, 0x7a6, 0x7a7, 0x7a8, 0, 0xb22, 0, 0),
+ D_GATE(HCLK_PG4, "hclk_pg4", CLK_REF_SYNC_D4, 0x7a9, 0x7aa, 0x7ab, 0, 0xb23, 0, 0),
+ D_GATE(HCLK_QSPI0, "hclk_qspi0", CLK_REF_SYNC_D4, 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x300, 0x301, 0x302),
+ D_GATE(HCLK_QSPI1, "hclk_qspi1", CLK_REF_SYNC_D4, 0x480, 0x481, 0x482, 0x483, 0x4c0, 0x4c1, 0x4c2),
+ D_GATE(HCLK_ROM, "hclk_rom", CLK_REF_SYNC_D4, 0xaa0, 0xaa1, 0xaa2, 0, 0xb80, 0, 0),
+ D_GATE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0, 0, 0, 0, 0, 0),
+ D_GATE(HCLK_SDIO0, "hclk_sdio0", CLK_REF_SYNC_D4, 0x60, 0x61, 0x62, 0x63, 0x80, 0x81, 0x82),
+ D_GATE(HCLK_SDIO1, "hclk_sdio1", CLK_REF_SYNC_D4, 0x640, 0x641, 0x642, 0x643, 0x660, 0x661, 0x662),
+ D_GATE(HCLK_SEMAP, "hclk_semap", CLK_REF_SYNC_D4, 0x7a3, 0x7a4, 0x7a5, 0, 0xb21, 0, 0),
+ D_GATE(HCLK_SPI0, "hclk_spi0", CLK_REF_SYNC_D4, 0x200, 0x201, 0x202, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI1, "hclk_spi1", CLK_REF_SYNC_D4, 0x203, 0x204, 0x205, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI2, "hclk_spi2", CLK_REF_SYNC_D4, 0x206, 0x207, 0x208, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI3, "hclk_spi3", CLK_REF_SYNC_D4, 0x209, 0x20a, 0x20b, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI4, "hclk_spi4", CLK_REF_SYNC_D4, 0x20c, 0x20d, 0x20e, 0, 0, 0, 0),
+ D_GATE(HCLK_SPI5, "hclk_spi5", CLK_REF_SYNC_D4, 0x20f, 0x210, 0x211, 0, 0, 0, 0),
+ D_GATE(HCLK_SWITCH, "hclk_switch", CLK_REF_SYNC_D4, 0x980, 0, 0x981, 0, 0, 0, 0),
+ D_GATE(HCLK_SWITCH_RG, "hclk_switch_rg", CLK_REF_SYNC_D4, 0xc40, 0xc41, 0xc42, 0, 0, 0, 0),
+ D_GATE(HCLK_UART0, "hclk_uart0", CLK_REF_SYNC_D8, 0x1a0, 0x1a1, 0x1a2, 0, 0, 0, 0),
+ D_GATE(HCLK_UART1, "hclk_uart1", CLK_REF_SYNC_D8, 0x1a3, 0x1a4, 0x1a5, 0, 0, 0, 0),
+ D_GATE(HCLK_UART2, "hclk_uart2", CLK_REF_SYNC_D8, 0x1a6, 0x1a7, 0x1a8, 0, 0, 0, 0),
+ D_GATE(HCLK_UART3, "hclk_uart3", CLK_REF_SYNC_D4, 0x218, 0x219, 0x21a, 0, 0, 0, 0),
+ D_GATE(HCLK_UART4, "hclk_uart4", CLK_REF_SYNC_D4, 0x21b, 0x21c, 0x21d, 0, 0, 0, 0),
+ D_GATE(HCLK_UART5, "hclk_uart5", CLK_REF_SYNC_D4, 0x220, 0x221, 0x222, 0, 0, 0, 0),
+ D_GATE(HCLK_UART6, "hclk_uart6", CLK_REF_SYNC_D4, 0x223, 0x224, 0x225, 0, 0, 0, 0),
+ D_GATE(HCLK_UART7, "hclk_uart7", CLK_REF_SYNC_D4, 0x226, 0x227, 0x228, 0, 0, 0, 0),
+ /*
+ * These are not hardware clocks, but are needed to handle the special
+ * case where we have a 'selector bit' that doesn't just change the
+ * parent for a clock, but also the gate it's suposed to use.
+ */
+ {
+ .index = R9A06G032_UART_GROUP_012,
+ .name = "uart_group_012",
+ .type = K_BITSEL,
+ .source = 1 + R9A06G032_DIV_UART,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
+ .dual.sel = ((0xec / 4) << 5) | 24,
+ .dual.group = 0,
+ },
+ {
+ .index = R9A06G032_UART_GROUP_34567,
+ .name = "uart_group_34567",
+ .type = K_BITSEL,
+ .source = 1 + R9A06G032_DIV_P2_PG,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
+ .dual.sel = ((0x34 / 4) << 5) | 30,
+ .dual.group = 1,
+ },
+ D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
+ D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 1, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
+ D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 2, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
+ D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0, 0x760, 0x761, 0x762, 0x763),
+ D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 1, 0x764, 0x765, 0x766, 0x767),
+ D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 2, 0x768, 0x769, 0x76a, 0x76b),
+ D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 3, 0x76c, 0x76d, 0x76e, 0x76f),
+ D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 4, 0x770, 0x771, 0x772, 0x773),
+};
+
+struct r9a06g032_priv {
+ struct clk_onecell_data data;
+ spinlock_t lock; /* protects concurent access to gates */
+ void __iomem *reg;
+};
+
+/* register/bit pairs are encoded as an uint16_t */
+static void
+clk_rdesc_set(struct r9a06g032_priv *clocks,
+ u16 one, unsigned int on)
+{
+ u32 __iomem *reg = clocks->reg + (4 * (one >> 5));
+ u32 val = readl(reg);
+
+ val = (val & ~(1U << (one & 0x1f))) | ((!!on) << (one & 0x1f));
+ writel(val, reg);
+}
+
+static int
+clk_rdesc_get(struct r9a06g032_priv *clocks,
+ uint16_t one)
+{
+ u32 __iomem *reg = clocks->reg + (4 * (one >> 5));
+ u32 val = readl(reg);
+
+ return !!(val & (1U << (one & 0x1f)));
+}
+
+/*
+ * This implements the R9A09G032 clock gate 'driver'. We cannot use the system's
+ * clock gate framework as the gates on the R9A09G032 have a special enabling
+ * sequence, therefore we use this little proxy.
+ */
+struct r9a06g032_clk_gate {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+
+ struct r9a06g032_gate gate;
+};
+
+#define to_r9a06g032_gate(_hw) container_of(_hw, struct r9a06g032_clk_gate, hw)
+
+static void
+r9a06g032_clk_gate_set(struct r9a06g032_priv *clocks,
+ struct r9a06g032_gate *g, int on)
+{
+ unsigned long flags;
+
+ WARN_ON(!g->gate);
+
+ spin_lock_irqsave(&clocks->lock, flags);
+ clk_rdesc_set(clocks, g->gate, on);
+ /* De-assert reset */
+ if (g->reset)
+ clk_rdesc_set(clocks, g->reset, 1);
+ spin_unlock_irqrestore(&clocks->lock, flags);
+
+ /* Hardware manual recommends 5us delay after enabling clock & reset */
+ udelay(5);
+
+ /* If the peripheral is memory mapped (i.e. an AXI slave), there is an
+ * associated SLVRDY bit in the System Controller that needs to be set
+ * so that the FlexWAY bus fabric passes on the read/write requests.
+ */
+ if (g->ready || g->midle) {
+ spin_lock_irqsave(&clocks->lock, flags);
+ if (g->ready)
+ clk_rdesc_set(clocks, g->ready, on);
+ /* Clear 'Master Idle Request' bit */
+ if (g->midle)
+ clk_rdesc_set(clocks, g->midle, !on);
+ spin_unlock_irqrestore(&clocks->lock, flags);
+ }
+ /* Note: We don't wait for FlexWAY Socket Connection signal */
+}
+
+static int r9a06g032_clk_gate_enable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
+
+ r9a06g032_clk_gate_set(g->clocks, &g->gate, 1);
+ return 0;
+}
+
+static void r9a06g032_clk_gate_disable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
+
+ r9a06g032_clk_gate_set(g->clocks, &g->gate, 0);
+}
+
+static int r9a06g032_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_gate *g = to_r9a06g032_gate(hw);
+
+ /* if clock is in reset, the gate might be on, and still not 'be' on */
+ if (g->gate.reset && !clk_rdesc_get(g->clocks, g->gate.reset))
+ return 0;
+
+ return clk_rdesc_get(g->clocks, g->gate.gate);
+}
+
+static const struct clk_ops r9a06g032_clk_gate_ops = {
+ .enable = r9a06g032_clk_gate_enable,
+ .disable = r9a06g032_clk_gate_disable,
+ .is_enabled = r9a06g032_clk_gate_is_enabled,
+};
+
+static struct clk *
+r9a06g032_register_gate(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc)
+{
+ struct clk *clk;
+ struct r9a06g032_clk_gate *g;
+ struct clk_init_data init;
+
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return NULL;
+
+ init.name = desc->name;
+ init.ops = &r9a06g032_clk_gate_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ g->clocks = clocks;
+ g->index = desc->index;
+ g->gate = desc->gate;
+ g->hw.init = &init;
+
+ /*
+ * important here, some clocks are already in use by the CM3, we
+ * have to assume they are not Linux's to play with and try to disable
+ * at the end of the boot!
+ */
+ if (r9a06g032_clk_gate_is_enabled(&g->hw)) {
+ init.flags |= CLK_IS_CRITICAL;
+ pr_debug("%s was enabled, making read-only\n", desc->name);
+ }
+
+ clk = clk_register(NULL, &g->hw);
+ if (IS_ERR(clk)) {
+ kfree(g);
+ return NULL;
+ }
+ return clk;
+}
+
+struct r9a06g032_clk_div {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+ u16 reg;
+ u16 min, max;
+ u8 table_size;
+ u16 table[8]; /* we know there are no more than 8 */
+};
+
+#define to_r9a06g032_div(_hw) \
+ container_of(_hw, struct r9a06g032_clk_div, hw)
+
+static unsigned long
+r9a06g032_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct r9a06g032_clk_div *clk = to_r9a06g032_div(hw);
+ u32 __iomem *reg = clk->clocks->reg + (4 * clk->reg);
+ u32 div = readl(reg);
+
+ if (div < clk->min)
+ div = clk->min;
+ else if (div > clk->max)
+ div = clk->max;
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+/*
+ * Attempts to find a value that is in range of min,max,
+ * and if a table of set dividers was specified for this
+ * register, try to find the fixed divider that is the closest
+ * to the target frequency
+ */
+static long
+r9a06g032_div_clamp_div(struct r9a06g032_clk_div *clk,
+ unsigned long rate, unsigned long prate)
+{
+ /* + 1 to cope with rates that have the remainder dropped */
+ u32 div = DIV_ROUND_UP(prate, rate + 1);
+ int i;
+
+ if (div <= clk->min)
+ return clk->min;
+ if (div >= clk->max)
+ return clk->max;
+
+ for (i = 0; clk->table_size && i < clk->table_size - 1; i++) {
+ if (div >= clk->table[i] && div <= clk->table[i + 1]) {
+ unsigned long m = rate -
+ DIV_ROUND_UP(prate, clk->table[i]);
+ unsigned long p =
+ DIV_ROUND_UP(prate, clk->table[i + 1]) -
+ rate;
+ /*
+ * select the divider that generates
+ * the value closest to the ideal frequency
+ */
+ div = p >= m ? clk->table[i] : clk->table[i + 1];
+ return div;
+ }
+ }
+ return div;
+}
+
+static long
+r9a06g032_div_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct r9a06g032_clk_div *clk = to_r9a06g032_div(hw);
+ u32 div = DIV_ROUND_UP(*prate, rate);
+
+ pr_devel("%s %pC %ld (prate %ld) (wanted div %u)\n", __func__,
+ hw->clk, rate, *prate, div);
+ pr_devel(" min %d (%ld) max %d (%ld)\n",
+ clk->min, DIV_ROUND_UP(*prate, clk->min),
+ clk->max, DIV_ROUND_UP(*prate, clk->max));
+
+ div = r9a06g032_div_clamp_div(clk, rate, *prate);
+ /*
+ * this is a hack. Currently the serial driver asks for a clock rate
+ * that is 16 times the baud rate -- and that is wildly outside the
+ * range of the UART divider, somehow there is no provision for that
+ * case of 'let the divider as is if outside range'.
+ * The serial driver *shouldn't* play with these clocks anyway, there's
+ * several uarts attached to this divider, and changing this impacts
+ * everyone.
+ */
+ if (clk->index == R9A06G032_DIV_UART) {
+ pr_devel("%s div uart hack!\n", __func__);
+ return clk_get_rate(hw->clk);
+ }
+ pr_devel("%s %pC %ld / %u = %ld\n", __func__, hw->clk,
+ *prate, div, DIV_ROUND_UP(*prate, div));
+ return DIV_ROUND_UP(*prate, div);
+}
+
+static int
+r9a06g032_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct r9a06g032_clk_div *clk = to_r9a06g032_div(hw);
+ /* + 1 to cope with rates that have the remainder dropped */
+ u32 div = DIV_ROUND_UP(parent_rate, rate + 1);
+ u32 __iomem *reg = clk->clocks->reg + (4 * clk->reg);
+
+ pr_devel("%s %pC rate %ld parent %ld div %d\n", __func__, hw->clk,
+ rate, parent_rate, div);
+
+ /*
+ * Need to write the bit 31 with the divider value to
+ * latch it. Technically we should wait until it has been
+ * cleared too.
+ * TODO: Find whether this callback is sleepable, in case
+ * the hardware /does/ require some sort of spinloop here.
+ */
+ writel(div | BIT(31), reg);
+
+ return 0;
+}
+
+static const struct clk_ops r9a06g032_clk_div_ops = {
+ .recalc_rate = r9a06g032_div_recalc_rate,
+ .round_rate = r9a06g032_div_round_rate,
+ .set_rate = r9a06g032_div_set_rate,
+};
+
+static struct clk *
+r9a06g032_register_div(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc)
+{
+ struct r9a06g032_clk_div *div;
+ struct clk *clk;
+ struct clk_init_data init;
+ unsigned int i;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return NULL;
+
+ init.name = desc->name;
+ init.ops = &r9a06g032_clk_div_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ div->clocks = clocks;
+ div->index = desc->index;
+ div->reg = desc->reg;
+ div->hw.init = &init;
+ div->min = desc->div_min;
+ div->max = desc->div_max;
+ /* populate (optional) divider table fixed values */
+ for (i = 0; i < ARRAY_SIZE(div->table) &&
+ i < ARRAY_SIZE(desc->div_table) && desc->div_table[i]; i++) {
+ div->table[div->table_size++] = desc->div_table[i];
+ }
+
+ clk = clk_register(NULL, &div->hw);
+ if (IS_ERR(clk)) {
+ kfree(div);
+ return NULL;
+ }
+ return clk;
+}
+
+/*
+ * This clock provider handles the case of the R9A06G032 where you have
+ * peripherals that have two potential clock source and two gates, one for
+ * each of the clock source - the used clock source (for all sub clocks)
+ * is selected by a single bit.
+ * That single bit affects all sub-clocks, and therefore needs to change the
+ * active gate (and turn the others off) and force a recalculation of the rates.
+ *
+ * This implements two clock providers, one 'bitselect' that
+ * handles the switch between both parents, and another 'dualgate'
+ * that knows which gate to poke at, depending on the parent's bit position.
+ */
+struct r9a06g032_clk_bitsel {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+ u16 selector; /* selector register + bit */
+};
+
+#define to_clk_bitselect(_hw) \
+ container_of(_hw, struct r9a06g032_clk_bitsel, hw)
+
+static u8 r9a06g032_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_bitsel *set = to_clk_bitselect(hw);
+
+ return clk_rdesc_get(set->clocks, set->selector);
+}
+
+static int r9a06g032_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct r9a06g032_clk_bitsel *set = to_clk_bitselect(hw);
+
+ /* a single bit in the register selects one of two parent clocks */
+ clk_rdesc_set(set->clocks, set->selector, !!index);
+
+ return 0;
+}
+
+static const struct clk_ops clk_bitselect_ops = {
+ .get_parent = r9a06g032_clk_mux_get_parent,
+ .set_parent = r9a06g032_clk_mux_set_parent,
+};
+
+static struct clk *
+r9a06g032_register_bitsel(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc)
+{
+ struct clk *clk;
+ struct r9a06g032_clk_bitsel *g;
+ struct clk_init_data init;
+ const char *names[2];
+
+ /* allocate the gate */
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return NULL;
+
+ names[0] = parent_name;
+ names[1] = "clk_pll_usb";
+
+ init.name = desc->name;
+ init.ops = &clk_bitselect_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = names;
+ init.num_parents = 2;
+
+ g->clocks = clocks;
+ g->index = desc->index;
+ g->selector = desc->dual.sel;
+ g->hw.init = &init;
+
+ clk = clk_register(NULL, &g->hw);
+ if (IS_ERR(clk)) {
+ kfree(g);
+ return NULL;
+ }
+ return clk;
+}
+
+struct r9a06g032_clk_dualgate {
+ struct clk_hw hw;
+ struct r9a06g032_priv *clocks;
+ u16 index;
+ u16 selector; /* selector register + bit */
+ struct r9a06g032_gate gate[2];
+};
+
+#define to_clk_dualgate(_hw) \
+ container_of(_hw, struct r9a06g032_clk_dualgate, hw)
+
+static int
+r9a06g032_clk_dualgate_setenable(struct r9a06g032_clk_dualgate *g, int enable)
+{
+ u8 sel_bit = clk_rdesc_get(g->clocks, g->selector);
+
+ /* we always turn off the 'other' gate, regardless */
+ r9a06g032_clk_gate_set(g->clocks, &g->gate[!sel_bit], 0);
+ r9a06g032_clk_gate_set(g->clocks, &g->gate[sel_bit], enable);
+
+ return 0;
+}
+
+static int r9a06g032_clk_dualgate_enable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_dualgate *gate = to_clk_dualgate(hw);
+
+ r9a06g032_clk_dualgate_setenable(gate, 1);
+
+ return 0;
+}
+
+static void r9a06g032_clk_dualgate_disable(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_dualgate *gate = to_clk_dualgate(hw);
+
+ r9a06g032_clk_dualgate_setenable(gate, 0);
+}
+
+static int r9a06g032_clk_dualgate_is_enabled(struct clk_hw *hw)
+{
+ struct r9a06g032_clk_dualgate *g = to_clk_dualgate(hw);
+ u8 sel_bit = clk_rdesc_get(g->clocks, g->selector);
+
+ return clk_rdesc_get(g->clocks, g->gate[sel_bit].gate);
+}
+
+static const struct clk_ops r9a06g032_clk_dualgate_ops = {
+ .enable = r9a06g032_clk_dualgate_enable,
+ .disable = r9a06g032_clk_dualgate_disable,
+ .is_enabled = r9a06g032_clk_dualgate_is_enabled,
+};
+
+static struct clk *
+r9a06g032_register_dualgate(struct r9a06g032_priv *clocks,
+ const char *parent_name,
+ const struct r9a06g032_clkdesc *desc,
+ uint16_t sel)
+{
+ struct r9a06g032_clk_dualgate *g;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the gate */
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return NULL;
+ g->clocks = clocks;
+ g->index = desc->index;
+ g->selector = sel;
+ g->gate[0].gate = desc->dual.g1;
+ g->gate[0].reset = desc->dual.r1;
+ g->gate[1].gate = desc->dual.g2;
+ g->gate[1].reset = desc->dual.r2;
+
+ init.name = desc->name;
+ init.ops = &r9a06g032_clk_dualgate_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ g->hw.init = &init;
+ /*
+ * important here, some clocks are already in use by the CM3, we
+ * have to assume they are not Linux's to play with and try to disable
+ * at the end of the boot!
+ */
+ if (r9a06g032_clk_dualgate_is_enabled(&g->hw)) {
+ init.flags |= CLK_IS_CRITICAL;
+ pr_debug("%s was enabled, making read-only\n", desc->name);
+ }
+
+ clk = clk_register(NULL, &g->hw);
+ if (IS_ERR(clk)) {
+ kfree(g);
+ return NULL;
+ }
+ return clk;
+}
+
+static void r9a06g032_clocks_del_clk_provider(void *data)
+{
+ of_clk_del_provider(data);
+}
+
+static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct r9a06g032_priv *clocks;
+ struct clk **clks;
+ struct clk *mclk;
+ unsigned int i;
+ u16 uart_group_sel[2];
+ int error;
+
+ clocks = devm_kzalloc(dev, sizeof(*clocks), GFP_KERNEL);
+ clks = devm_kcalloc(dev, R9A06G032_CLOCK_COUNT, sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!clocks || !clks)
+ return -ENOMEM;
+
+ spin_lock_init(&clocks->lock);
+
+ clocks->data.clks = clks;
+ clocks->data.clk_num = R9A06G032_CLOCK_COUNT;
+
+ mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(mclk))
+ return PTR_ERR(mclk);
+
+ clocks->reg = of_iomap(np, 0);
+ if (WARN_ON(!clocks->reg))
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
+ const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
+ const char *parent_name = d->source ?
+ __clk_get_name(clocks->data.clks[d->source - 1]) :
+ __clk_get_name(mclk);
+ struct clk *clk = NULL;
+
+ switch (d->type) {
+ case K_FFC:
+ clk = clk_register_fixed_factor(NULL, d->name,
+ parent_name, 0,
+ d->mul, d->div);
+ break;
+ case K_GATE:
+ clk = r9a06g032_register_gate(clocks, parent_name, d);
+ break;
+ case K_DIV:
+ clk = r9a06g032_register_div(clocks, parent_name, d);
+ break;
+ case K_BITSEL:
+ /* keep that selector register around */
+ uart_group_sel[d->dual.group] = d->dual.sel;
+ clk = r9a06g032_register_bitsel(clocks, parent_name, d);
+ break;
+ case K_DUALGATE:
+ clk = r9a06g032_register_dualgate(clocks, parent_name,
+ d,
+ uart_group_sel[d->dual.group]);
+ break;
+ }
+ clocks->data.clks[d->index] = clk;
+ }
+ error = of_clk_add_provider(np, of_clk_src_onecell_get, &clocks->data);
+ if (error)
+ return error;
+
+ return devm_add_action_or_reset(dev,
+ r9a06g032_clocks_del_clk_provider, np);
+}
+
+static const struct of_device_id r9a06g032_match[] = {
+ { .compatible = "renesas,r9a06g032-sysctrl" },
+ { }
+};
+
+static struct platform_driver r9a06g032_clock_driver = {
+ .driver = {
+ .name = "renesas,r9a06g032-sysctrl",
+ .of_match_table = r9a06g032_match,
+ },
+};
+
+static int __init r9a06g032_clocks_init(void)
+{
+ return platform_driver_probe(&r9a06g032_clock_driver,
+ r9a06g032_clocks_probe);
+}
+
+subsys_initcall(r9a06g032_clocks_init);
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 98e7b9429b83..ff35ab463a6f 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -6,12 +6,14 @@
obj-y += clk.o
obj-y += clk-pll.o
obj-y += clk-cpu.o
+obj-y += clk-half-divider.o
obj-y += clk-inverter.o
obj-y += clk-mmc-phase.o
obj-y += clk-muxgrf.o
obj-y += clk-ddr.o
obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
+obj-y += clk-px30.o
obj-y += clk-rv1108.o
obj-y += clk-rk3036.o
obj-y += clk-rk3128.o
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
new file mode 100644
index 000000000000..b8da6e799423
--- /dev/null
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include "clk.h"
+
+#define div_mask(width) ((1 << (width)) - 1)
+
+static bool _is_best_half_div(unsigned long rate, unsigned long now,
+ unsigned long best, unsigned long flags)
+{
+ if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+ return abs(rate - now) < abs(rate - best);
+
+ return now <= rate && now > best;
+}
+
+static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int val;
+
+ val = clk_readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider->width);
+ val = val * 2 + 3;
+
+ return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val);
+}
+
+static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate, u8 width,
+ unsigned long flags)
+{
+ unsigned int i, bestdiv = 0;
+ unsigned long parent_rate, best = 0, now, maxdiv;
+ unsigned long parent_rate_saved = *best_parent_rate;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = div_mask(width);
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
+ bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
+ if (bestdiv < 3)
+ bestdiv = 0;
+ else
+ bestdiv = (bestdiv - 3) / 2;
+ bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
+ return bestdiv;
+ }
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 0; i <= maxdiv; i++) {
+ if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ *best_parent_rate = parent_rate_saved;
+ return i;
+ }
+ parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ ((u64)rate * (i * 2 + 3)) / 2);
+ now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
+ (i * 2 + 3));
+
+ if (_is_best_half_div(rate, now, best, flags)) {
+ bestdiv = i;
+ best = now;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ if (!bestdiv) {
+ bestdiv = div_mask(width);
+ *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
+ }
+
+ return bestdiv;
+}
+
+static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int div;
+
+ div = clk_half_divider_bestdiv(hw, rate, prate,
+ divider->width,
+ divider->flags);
+
+ return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
+}
+
+static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int value;
+ unsigned long flags = 0;
+ u32 val;
+
+ value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
+ value = (value - 3) / 2;
+ value = min_t(unsigned int, value, div_mask(divider->width));
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+ val = div_mask(divider->width) << (divider->shift + 16);
+ } else {
+ val = clk_readl(divider->reg);
+ val &= ~(div_mask(divider->width) << divider->shift);
+ }
+ val |= value << divider->shift;
+ clk_writel(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return 0;
+}
+
+const struct clk_ops clk_half_divider_ops = {
+ .recalc_rate = clk_half_divider_recalc_rate,
+ .round_rate = clk_half_divider_round_rate,
+ .set_rate = clk_half_divider_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_half_divider_ops);
+
+/**
+ * Register a clock branch.
+ * Most clock branches have a form like
+ *
+ * src1 --|--\
+ * |M |--[GATE]-[DIV]-
+ * src2 --|--/
+ *
+ * sometimes without one of those components.
+ */
+struct clk *rockchip_clk_register_halfdiv(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ u8 div_shift, u8 div_width,
+ u8 div_flags, int gate_offset,
+ u8 gate_shift, u8 gate_flags,
+ unsigned long flags,
+ spinlock_t *lock)
+{
+ struct clk *clk;
+ struct clk_mux *mux = NULL;
+ struct clk_gate *gate = NULL;
+ struct clk_divider *div = NULL;
+ const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
+ *gate_ops = NULL;
+
+ if (num_parents > 1) {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = base + muxdiv_offset;
+ mux->shift = mux_shift;
+ mux->mask = BIT(mux_width) - 1;
+ mux->flags = mux_flags;
+ mux->lock = lock;
+ mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
+ : &clk_mux_ops;
+ }
+
+ if (gate_offset >= 0) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto err_gate;
+
+ gate->flags = gate_flags;
+ gate->reg = base + gate_offset;
+ gate->bit_idx = gate_shift;
+ gate->lock = lock;
+ gate_ops = &clk_gate_ops;
+ }
+
+ if (div_width > 0) {
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ goto err_div;
+
+ div->flags = div_flags;
+ div->reg = base + muxdiv_offset;
+ div->shift = div_shift;
+ div->width = div_width;
+ div->lock = lock;
+ div_ops = &clk_half_divider_ops;
+ }
+
+ clk = clk_register_composite(NULL, name, parent_names, num_parents,
+ mux ? &mux->hw : NULL, mux_ops,
+ div ? &div->hw : NULL, div_ops,
+ gate ? &gate->hw : NULL, gate_ops,
+ flags);
+
+ return clk;
+err_div:
+ kfree(gate);
+err_gate:
+ kfree(mux);
+ return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
new file mode 100644
index 000000000000..601a77f1af78
--- /dev/null
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -0,0 +1,1039 @@
+/*
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ * Author: Elaine Zhang<zhangqing@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/px30-cru.h>
+#include "clk.h"
+
+#define PX30_GRF_SOC_STATUS0 0x480
+
+enum px30_plls {
+ apll, dpll, cpll, npll, apll_b_h, apll_b_l,
+};
+
+enum px30_pmu_plls {
+ gpll,
+};
+
+static struct rockchip_pll_rate_table px30_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0),
+ RK3036_PLL_RATE(984000000, 1, 82, 2, 1, 1, 0),
+ RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE(936000000, 1, 78, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(900000000, 4, 300, 2, 1, 1, 0),
+ RK3036_PLL_RATE(888000000, 1, 74, 2, 1, 1, 0),
+ RK3036_PLL_RATE(864000000, 1, 72, 2, 1, 1, 0),
+ RK3036_PLL_RATE(840000000, 1, 70, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 6, 400, 2, 1, 1, 0),
+ RK3036_PLL_RATE(700000000, 6, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE(696000000, 1, 58, 2, 1, 1, 0),
+ RK3036_PLL_RATE(624000000, 1, 52, 2, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 75, 3, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(504000000, 1, 63, 3, 1, 1, 0),
+ RK3036_PLL_RATE(500000000, 6, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 52, 2, 2, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 64, 4, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+#define PX30_DIV_ACLKM_MASK 0x7
+#define PX30_DIV_ACLKM_SHIFT 12
+#define PX30_DIV_PCLK_DBG_MASK 0xf
+#define PX30_DIV_PCLK_DBG_SHIFT 8
+
+#define PX30_CLKSEL0(_aclk_core, _pclk_dbg) \
+{ \
+ .reg = PX30_CLKSEL_CON(0), \
+ .val = HIWORD_UPDATE(_aclk_core, PX30_DIV_ACLKM_MASK, \
+ PX30_DIV_ACLKM_SHIFT) | \
+ HIWORD_UPDATE(_pclk_dbg, PX30_DIV_PCLK_DBG_MASK, \
+ PX30_DIV_PCLK_DBG_SHIFT), \
+}
+
+#define PX30_CPUCLK_RATE(_prate, _aclk_core, _pclk_dbg) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ PX30_CLKSEL0(_aclk_core, _pclk_dbg), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table px30_cpuclk_rates[] __initdata = {
+ PX30_CPUCLK_RATE(1608000000, 1, 7),
+ PX30_CPUCLK_RATE(1584000000, 1, 7),
+ PX30_CPUCLK_RATE(1560000000, 1, 7),
+ PX30_CPUCLK_RATE(1536000000, 1, 7),
+ PX30_CPUCLK_RATE(1512000000, 1, 7),
+ PX30_CPUCLK_RATE(1488000000, 1, 5),
+ PX30_CPUCLK_RATE(1464000000, 1, 5),
+ PX30_CPUCLK_RATE(1440000000, 1, 5),
+ PX30_CPUCLK_RATE(1416000000, 1, 5),
+ PX30_CPUCLK_RATE(1392000000, 1, 5),
+ PX30_CPUCLK_RATE(1368000000, 1, 5),
+ PX30_CPUCLK_RATE(1344000000, 1, 5),
+ PX30_CPUCLK_RATE(1320000000, 1, 5),
+ PX30_CPUCLK_RATE(1296000000, 1, 5),
+ PX30_CPUCLK_RATE(1272000000, 1, 5),
+ PX30_CPUCLK_RATE(1248000000, 1, 5),
+ PX30_CPUCLK_RATE(1224000000, 1, 5),
+ PX30_CPUCLK_RATE(1200000000, 1, 5),
+ PX30_CPUCLK_RATE(1104000000, 1, 5),
+ PX30_CPUCLK_RATE(1008000000, 1, 5),
+ PX30_CPUCLK_RATE(912000000, 1, 5),
+ PX30_CPUCLK_RATE(816000000, 1, 3),
+ PX30_CPUCLK_RATE(696000000, 1, 3),
+ PX30_CPUCLK_RATE(600000000, 1, 3),
+ PX30_CPUCLK_RATE(408000000, 1, 1),
+ PX30_CPUCLK_RATE(312000000, 1, 1),
+ PX30_CPUCLK_RATE(216000000, 1, 1),
+ PX30_CPUCLK_RATE(96000000, 1, 1),
+};
+
+static const struct rockchip_cpuclk_reg_data px30_cpuclk_data = {
+ .core_reg = PX30_CLKSEL_CON(0),
+ .div_core_shift = 0,
+ .div_core_mask = 0xf,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 7,
+ .mux_core_mask = 0x1,
+};
+
+PNAME(mux_pll_p) = { "xin24m"};
+PNAME(mux_usb480m_p) = { "xin24m", "usb480m_phy", "clk_rtc32k_pmu" };
+PNAME(mux_armclk_p) = { "apll_core", "gpll_core" };
+PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
+PNAME(mux_ddrstdby_p) = { "clk_ddrphy1x", "clk_stdby_2wrap" };
+PNAME(mux_4plls_p) = { "gpll", "dummy_cpll", "usb480m", "npll" };
+PNAME(mux_cpll_npll_p) = { "cpll", "npll" };
+PNAME(mux_npll_cpll_p) = { "npll", "cpll" };
+PNAME(mux_gpll_cpll_p) = { "gpll", "dummy_cpll" };
+PNAME(mux_gpll_npll_p) = { "gpll", "npll" };
+PNAME(mux_gpll_xin24m_p) = { "gpll", "xin24m"};
+PNAME(mux_gpll_cpll_npll_p) = { "gpll", "dummy_cpll", "npll" };
+PNAME(mux_gpll_cpll_npll_xin24m_p) = { "gpll", "dummy_cpll", "npll", "xin24m" };
+PNAME(mux_gpll_xin24m_npll_p) = { "gpll", "xin24m", "npll"};
+PNAME(mux_pdm_p) = { "clk_pdm_src", "clk_pdm_frac" };
+PNAME(mux_i2s0_tx_p) = { "clk_i2s0_tx_src", "clk_i2s0_tx_frac", "mclk_i2s0_tx_in", "xin12m"};
+PNAME(mux_i2s0_rx_p) = { "clk_i2s0_rx_src", "clk_i2s0_rx_frac", "mclk_i2s0_rx_in", "xin12m"};
+PNAME(mux_i2s1_p) = { "clk_i2s1_src", "clk_i2s1_frac", "i2s1_clkin", "xin12m"};
+PNAME(mux_i2s2_p) = { "clk_i2s2_src", "clk_i2s2_frac", "i2s2_clkin", "xin12m"};
+PNAME(mux_i2s0_tx_out_p) = { "clk_i2s0_tx", "xin12m", "clk_i2s0_rx"};
+PNAME(mux_i2s0_rx_out_p) = { "clk_i2s0_rx", "xin12m", "clk_i2s0_tx"};
+PNAME(mux_i2s1_out_p) = { "clk_i2s1", "xin12m"};
+PNAME(mux_i2s2_out_p) = { "clk_i2s2", "xin12m"};
+PNAME(mux_i2s0_tx_rx_p) = { "clk_i2s0_tx_mux", "clk_i2s0_rx_mux"};
+PNAME(mux_i2s0_rx_tx_p) = { "clk_i2s0_rx_mux", "clk_i2s0_tx_mux"};
+PNAME(mux_uart_src_p) = { "gpll", "xin24m", "usb480m", "npll" };
+PNAME(mux_uart1_p) = { "clk_uart1_src", "clk_uart1_np5", "clk_uart1_frac" };
+PNAME(mux_uart2_p) = { "clk_uart2_src", "clk_uart2_np5", "clk_uart2_frac" };
+PNAME(mux_uart3_p) = { "clk_uart3_src", "clk_uart3_np5", "clk_uart3_frac" };
+PNAME(mux_uart4_p) = { "clk_uart4_src", "clk_uart4_np5", "clk_uart4_frac" };
+PNAME(mux_uart5_p) = { "clk_uart5_src", "clk_uart5_np5", "clk_uart5_frac" };
+PNAME(mux_cif_out_p) = { "xin24m", "dummy_cpll", "npll", "usb480m" };
+PNAME(mux_dclk_vopb_p) = { "dclk_vopb_src", "dclk_vopb_frac", "xin24m" };
+PNAME(mux_dclk_vopl_p) = { "dclk_vopl_src", "dclk_vopl_frac", "xin24m" };
+PNAME(mux_gmac_p) = { "clk_gmac_src", "gmac_clkin" };
+PNAME(mux_gmac_rmii_sel_p) = { "clk_gmac_rx_tx_div20", "clk_gmac_rx_tx_div2" };
+PNAME(mux_rtc32k_pmu_p) = { "xin32k", "pmu_pvtm_32k", "clk_rtc32k_frac", };
+PNAME(mux_wifi_pmu_p) = { "xin24m", "clk_wifi_pmu_src" };
+PNAME(mux_uart0_pmu_p) = { "clk_uart0_pmu_src", "clk_uart0_np5", "clk_uart0_frac" };
+PNAME(mux_usbphy_ref_p) = { "xin24m", "clk_ref24m_pmu" };
+PNAME(mux_mipidsiphy_ref_p) = { "xin24m", "clk_ref24m_pmu" };
+PNAME(mux_gpu_p) = { "clk_gpu_div", "clk_gpu_np5" };
+
+static struct rockchip_pll_clock px30_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p,
+ 0, PX30_PLL_CON(0),
+ PX30_MODE_CON, 0, 0, 0, px30_pll_rates),
+ [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p,
+ 0, PX30_PLL_CON(8),
+ PX30_MODE_CON, 4, 1, 0, NULL),
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ 0, PX30_PLL_CON(16),
+ PX30_MODE_CON, 2, 2, 0, px30_pll_rates),
+ [npll] = PLL(pll_rk3328, PLL_NPLL, "npll", mux_pll_p,
+ 0, PX30_PLL_CON(24),
+ PX30_MODE_CON, 6, 4, 0, px30_pll_rates),
+};
+
+static struct rockchip_pll_clock px30_pmu_pll_clks[] __initdata = {
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p, 0, PX30_PMU_PLL_CON(0),
+ PX30_PMU_MODE, 0, 3, 0, px30_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch px30_pdm_fracmux __initdata =
+ MUX(0, "clk_pdm_mux", mux_pdm_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(26), 15, 1, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s0_tx_fracmux __initdata =
+ MUX(0, "clk_i2s0_tx_mux", mux_i2s0_tx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(28), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s0_rx_fracmux __initdata =
+ MUX(0, "clk_i2s0_rx_mux", mux_i2s0_rx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(58), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s1_fracmux __initdata =
+ MUX(0, "clk_i2s1_mux", mux_i2s1_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(30), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_i2s2_fracmux __initdata =
+ MUX(0, "clk_i2s2_mux", mux_i2s2_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(32), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart1_fracmux __initdata =
+ MUX(0, "clk_uart1_mux", mux_uart1_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(35), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart2_fracmux __initdata =
+ MUX(0, "clk_uart2_mux", mux_uart2_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(38), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart3_fracmux __initdata =
+ MUX(0, "clk_uart3_mux", mux_uart3_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(41), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart4_fracmux __initdata =
+ MUX(0, "clk_uart4_mux", mux_uart4_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(44), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart5_fracmux __initdata =
+ MUX(0, "clk_uart5_mux", mux_uart5_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(47), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_dclk_vopb_fracmux __initdata =
+ MUX(0, "dclk_vopb_mux", mux_dclk_vopb_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(5), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_dclk_vopl_fracmux __initdata =
+ MUX(0, "dclk_vopl_mux", mux_dclk_vopl_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(8), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_rtc32k_pmu_fracmux __initdata =
+ MUX(SCLK_RTC32K_PMU, "clk_rtc32k_pmu", mux_rtc32k_pmu_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(0), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_uart0_pmu_fracmux __initdata =
+ MUX(0, "clk_uart0_pmu_mux", mux_uart0_pmu_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(4), 14, 2, MFLAGS);
+
+static struct rockchip_clk_branch px30_clk_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 1
+ */
+
+ MUX(USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ PX30_MODE_CON, 8, 2, MFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ /*
+ * Clock-Architecture Diagram 3
+ */
+
+ /* PD_CORE */
+ GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(0), 8, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ PX30_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOMUX(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(0), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ PX30_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(0, "aclk_core_niu", "aclk_core", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(0, "aclk_core_prf", "aclk_core", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 5, GFLAGS),
+ GATE(0, "pclk_dbg_niu", "pclk_dbg", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(0, "pclk_core_dbg", "pclk_dbg", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(0, "pclk_core_grf", "pclk_dbg", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 6, GFLAGS),
+
+ GATE(0, "clk_jtag", "jtag_clkin", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(SCLK_PVTM, "clk_pvtm", "xin24m", 0,
+ PX30_CLKGATE_CON(17), 4, GFLAGS),
+
+ /* PD_GPU */
+ COMPOSITE_NODIV(0, "clk_gpu_src", mux_4plls_p, 0,
+ PX30_CLKSEL_CON(1), 6, 2, MFLAGS,
+ PX30_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOMUX(0, "clk_gpu_div", "clk_gpu_src", 0,
+ PX30_CLKSEL_CON(1), 0, 4, DFLAGS,
+ PX30_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_gpu_np5", "clk_gpu_src", 0,
+ PX30_CLKSEL_CON(1), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE_NODIV(SCLK_GPU, "clk_gpu", mux_gpu_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(1), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE_NOMUX(0, "aclk_gpu", "clk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(1), 13, 2, DFLAGS,
+ PX30_CLKGATE_CON(17), 10, GFLAGS),
+ GATE(0, "aclk_gpu_niu", "aclk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 11, GFLAGS),
+ GATE(0, "aclk_gpu_prf", "aclk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(0, "pclk_gpu_grf", "aclk_gpu", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(17), 9, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 4
+ */
+
+ /* PD_DDR */
+ GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 13, GFLAGS),
+ COMPOSITE_NOGATE(SCLK_DDRCLK, "sclk_ddrc", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 7, 1, MFLAGS, 0, 3, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+ COMPOSITE_NOGATE(0, "clk_ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 7, 1, MFLAGS, 0, 3, DFLAGS),
+ FACTOR_GATE(0, "clk_ddrphy1x", "clk_ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
+ PX30_CLKGATE_CON(0), 14, GFLAGS),
+ FACTOR_GATE(0, "clk_stdby_2wrap", "clk_ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
+ PX30_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_ddrstdby", mux_ddrstdby_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 4, 1, MFLAGS,
+ PX30_CLKGATE_CON(1), 13, GFLAGS),
+ GATE(0, "aclk_split", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 15, GFLAGS),
+ GATE(0, "clk_msch", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 8, GFLAGS),
+ GATE(0, "aclk_ddrc", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(0, "clk_core_ddrc", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "aclk_cmd_buff", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "clk_ddrmon", "clk_ddrphy1x", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 11, GFLAGS),
+
+ GATE(0, "clk_ddrmon_timer", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(0), 15, GFLAGS),
+
+ COMPOSITE_NOMUX(PCLK_DDR, "pclk_ddr", "gpll", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(2), 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(0, "pclk_ddrmon", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 10, GFLAGS),
+ GATE(0, "pclk_ddrc", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 7, GFLAGS),
+ GATE(0, "pclk_msch", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 9, GFLAGS),
+ GATE(0, "pclk_stdby", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 12, GFLAGS),
+ GATE(0, "pclk_ddr_grf", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 14, GFLAGS),
+ GATE(0, "pclk_cmdbuff", "pclk_ddr", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(1), 3, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 5
+ */
+
+ /* PD_VI */
+ COMPOSITE(ACLK_VI_PRE, "aclk_vi_pre", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 8, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VI_PRE, "hclk_vi_pre", "aclk_vi_pre", 0,
+ PX30_CLKSEL_CON(11), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(4), 12, GFLAGS),
+ COMPOSITE(SCLK_ISP, "clk_isp", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(12), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 9, GFLAGS),
+ COMPOSITE(SCLK_CIF_OUT, "clk_cif_out", mux_cif_out_p, 0,
+ PX30_CLKSEL_CON(13), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ PX30_CLKGATE_CON(4), 11, GFLAGS),
+ GATE(PCLK_ISP, "pclkin_isp", "ext_pclkin", 0,
+ PX30_CLKGATE_CON(4), 13, GFLAGS),
+ GATE(PCLK_CIF, "pclkin_cif", "ext_pclkin", 0,
+ PX30_CLKGATE_CON(4), 14, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 6
+ */
+
+ /* PD_VO */
+ COMPOSITE(ACLK_VO_PRE, "aclk_vo_pre", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(3), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_VO_PRE, "hclk_vo_pre", "aclk_vo_pre", 0,
+ PX30_CLKSEL_CON(3), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(2), 12, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_VO_PRE, "pclk_vo_pre", "aclk_vo_pre", 0,
+ PX30_CLKSEL_CON(3), 12, 4, DFLAGS,
+ PX30_CLKGATE_CON(2), 13, GFLAGS),
+ COMPOSITE(SCLK_RGA_CORE, "clk_rga_core", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(2), 1, GFLAGS),
+
+ COMPOSITE(SCLK_VOPB_PWM, "clk_vopb_pwm", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(7), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE(0, "dclk_vopb_src", mux_cpll_npll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(5), 11, 1, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(2), 2, GFLAGS),
+ COMPOSITE_FRACMUX(0, "dclk_vopb_frac", "dclk_vopb_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(6), 0,
+ PX30_CLKGATE_CON(2), 3, GFLAGS,
+ &px30_dclk_vopb_fracmux),
+ GATE(DCLK_VOPB, "dclk_vopb", "dclk_vopb_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE(0, "dclk_vopl_src", mux_npll_cpll_p, 0,
+ PX30_CLKSEL_CON(8), 11, 1, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_FRACMUX(0, "dclk_vopl_frac", "dclk_vopl_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(9), 0,
+ PX30_CLKGATE_CON(2), 7, GFLAGS,
+ &px30_dclk_vopl_fracmux),
+ GATE(DCLK_VOPL, "dclk_vopl", "dclk_vopl_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(2), 8, GFLAGS),
+
+ /* PD_VPU */
+ COMPOSITE(0, "aclk_vpu_pre", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(10), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_vpu_pre", "aclk_vpu_pre", 0,
+ PX30_CLKSEL_CON(10), 8, 4, DFLAGS,
+ PX30_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(SCLK_CORE_VPU, "sclk_core_vpu", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(13), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(4), 1, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 7
+ */
+
+ COMPOSITE_NODIV(ACLK_PERI_SRC, "aclk_peri_src", mux_gpll_cpll_p, 0,
+ PX30_CLKSEL_CON(14), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(5), 7, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_PERI_PRE, "aclk_peri_pre", "aclk_peri_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(14), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 8, GFLAGS),
+ DIV(HCLK_PERI_PRE, "hclk_peri_pre", "aclk_peri_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(14), 8, 5, DFLAGS),
+
+ /* PD_MMC_NAND */
+ GATE(HCLK_MMC_NAND, "hclk_mmc_nand", "hclk_peri_pre", 0,
+ PX30_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE(SCLK_NANDC, "clk_nandc", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 13, GFLAGS),
+
+ COMPOSITE(SCLK_SDIO, "clk_sdio", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(18), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 3, GFLAGS),
+
+ COMPOSITE(SCLK_EMMC, "clk_emmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(20), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 6, GFLAGS),
+
+ COMPOSITE(SCLK_SFC, "clk_sfc", mux_gpll_cpll_p, 0,
+ PX30_CLKSEL_CON(22), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(6), 7, GFLAGS),
+
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc",
+ PX30_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc",
+ PX30_SDMMC_CON1, 1),
+
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio",
+ PX30_SDIO_CON0, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio",
+ PX30_SDIO_CON1, 1),
+
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc",
+ PX30_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc",
+ PX30_EMMC_CON1, 1),
+
+ /* PD_SDCARD */
+ GATE(0, "hclk_sdmmc_pre", "hclk_peri_pre", 0,
+ PX30_CLKGATE_CON(6), 12, GFLAGS),
+ COMPOSITE(SCLK_SDMMC, "clk_sdmmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(16), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 15, GFLAGS),
+
+ /* PD_USB */
+ GATE(HCLK_USB, "hclk_usb", "hclk_peri_pre", 0,
+ PX30_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(SCLK_OTG_ADP, "clk_otg_adp", "clk_rtc32k_pmu", 0,
+ PX30_CLKGATE_CON(7), 3, GFLAGS),
+
+ /* PD_GMAC */
+ COMPOSITE(SCLK_GMAC_SRC, "clk_gmac_src", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(22), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(7), 11, GFLAGS),
+ MUX(SCLK_GMAC, "clk_gmac", mux_gmac_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(23), 6, 1, MFLAGS),
+ GATE(SCLK_MAC_REF, "clk_mac_ref", "clk_gmac", 0,
+ PX30_CLKGATE_CON(7), 15, GFLAGS),
+ GATE(SCLK_GMAC_RX_TX, "clk_gmac_rx_tx", "clk_gmac", 0,
+ PX30_CLKGATE_CON(7), 13, GFLAGS),
+ FACTOR(0, "clk_gmac_rx_tx_div2", "clk_gmac_rx_tx", 0, 1, 2),
+ FACTOR(0, "clk_gmac_rx_tx_div20", "clk_gmac_rx_tx", 0, 1, 20),
+ MUX(SCLK_GMAC_RMII, "clk_gmac_rmii_sel", mux_gmac_rmii_sel_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(23), 7, 1, MFLAGS),
+
+ GATE(0, "aclk_gmac_pre", "aclk_peri_pre", 0,
+ PX30_CLKGATE_CON(7), 10, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_gmac_pre", "aclk_gmac_pre", 0,
+ PX30_CLKSEL_CON(23), 0, 4, DFLAGS,
+ PX30_CLKGATE_CON(7), 12, GFLAGS),
+
+ COMPOSITE(SCLK_MAC_OUT, "clk_mac_out", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 5, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 8
+ */
+
+ /* PD_BUS */
+ COMPOSITE_NODIV(ACLK_BUS_SRC, "aclk_bus_src", mux_gpll_cpll_p, CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(23), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(8), 6, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_BUS_PRE, "hclk_bus_pre", "aclk_bus_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(24), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 8, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_BUS_PRE, "aclk_bus_pre", "aclk_bus_src", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(23), 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 7, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_BUS_PRE, "pclk_bus_pre", "aclk_bus_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKSEL_CON(24), 8, 2, DFLAGS,
+ PX30_CLKGATE_CON(8), 9, GFLAGS),
+ GATE(0, "pclk_top_pre", "pclk_bus_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(8), 10, GFLAGS),
+
+ COMPOSITE(0, "clk_pdm_src", mux_gpll_xin24m_npll_p, 0,
+ PX30_CLKSEL_CON(26), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(9), 9, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_pdm_frac", "clk_pdm_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(27), 0,
+ PX30_CLKGATE_CON(9), 10, GFLAGS,
+ &px30_pdm_fracmux),
+ GATE(SCLK_PDM, "clk_pdm", "clk_pdm_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(9), 11, GFLAGS),
+
+ COMPOSITE(0, "clk_i2s0_tx_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(28), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(9), 12, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s0_tx_frac", "clk_i2s0_tx_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(29), 0,
+ PX30_CLKGATE_CON(9), 13, GFLAGS,
+ &px30_i2s0_tx_fracmux),
+ COMPOSITE_NODIV(SCLK_I2S0_TX, "clk_i2s0_tx", mux_i2s0_tx_rx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(28), 12, 1, MFLAGS,
+ PX30_CLKGATE_CON(9), 14, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s0_tx_out_pre", mux_i2s0_tx_out_p, 0,
+ PX30_CLKSEL_CON(28), 14, 2, MFLAGS,
+ PX30_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(SCLK_I2S0_TX_OUT, "clk_i2s0_tx_out", "clk_i2s0_tx_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 8, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(0, "clk_i2s0_rx_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(58), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(17), 0, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s0_rx_frac", "clk_i2s0_rx_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(59), 0,
+ PX30_CLKGATE_CON(17), 1, GFLAGS,
+ &px30_i2s0_rx_fracmux),
+ COMPOSITE_NODIV(SCLK_I2S0_RX, "clk_i2s0_rx", mux_i2s0_rx_tx_p, CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(58), 12, 1, MFLAGS,
+ PX30_CLKGATE_CON(17), 2, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s0_rx_out_pre", mux_i2s0_rx_out_p, 0,
+ PX30_CLKSEL_CON(58), 14, 2, MFLAGS,
+ PX30_CLKGATE_CON(17), 3, GFLAGS),
+ GATE(SCLK_I2S0_RX_OUT, "clk_i2s0_rx_out", "clk_i2s0_rx_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 11, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(0, "clk_i2s1_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(30), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(10), 0, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(31), 0,
+ PX30_CLKGATE_CON(10), 1, GFLAGS,
+ &px30_i2s1_fracmux),
+ GATE(SCLK_I2S1, "clk_i2s1", "clk_i2s1_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 2, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s1_out_pre", mux_i2s1_out_p, 0,
+ PX30_CLKSEL_CON(30), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(10), 3, GFLAGS),
+ GATE(SCLK_I2S1_OUT, "clk_i2s1_out", "clk_i2s1_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 9, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(0, "clk_i2s2_src", mux_gpll_npll_p, 0,
+ PX30_CLKSEL_CON(32), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(10), 4, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(33), 0,
+ PX30_CLKGATE_CON(10), 5, GFLAGS,
+ &px30_i2s2_fracmux),
+ GATE(SCLK_I2S2, "clk_i2s2", "clk_i2s2_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 6, GFLAGS),
+ COMPOSITE_NODIV(0, "clk_i2s2_out_pre", mux_i2s2_out_p, 0,
+ PX30_CLKSEL_CON(32), 15, 1, MFLAGS,
+ PX30_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(SCLK_I2S2_OUT, "clk_i2s2_out", "clk_i2s2_out_pre", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 10, CLK_GATE_HIWORD_MASK),
+
+ COMPOSITE(SCLK_UART1_SRC, "clk_uart1_src", mux_uart_src_p, CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(34), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(10), 12, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart1_np5", "clk_uart1_src", 0,
+ PX30_CLKSEL_CON(35), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(10), 13, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(36), 0,
+ PX30_CLKGATE_CON(10), 14, GFLAGS,
+ &px30_uart1_fracmux),
+ GATE(SCLK_UART1, "clk_uart1", "clk_uart1_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(10), 15, GFLAGS),
+
+ COMPOSITE(SCLK_UART2_SRC, "clk_uart2_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(37), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 0, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart2_np5", "clk_uart2_src", 0,
+ PX30_CLKSEL_CON(38), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 1, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(39), 0,
+ PX30_CLKGATE_CON(11), 2, GFLAGS,
+ &px30_uart2_fracmux),
+ GATE(SCLK_UART2, "clk_uart2", "clk_uart2_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 3, GFLAGS),
+
+ COMPOSITE(0, "clk_uart3_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(40), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 4, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart3_np5", "clk_uart3_src", 0,
+ PX30_CLKSEL_CON(41), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 5, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart3_frac", "clk_uart3_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(42), 0,
+ PX30_CLKGATE_CON(11), 6, GFLAGS,
+ &px30_uart3_fracmux),
+ GATE(SCLK_UART3, "clk_uart3", "clk_uart3_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 7, GFLAGS),
+
+ COMPOSITE(0, "clk_uart4_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(43), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 8, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart4_np5", "clk_uart4_src", 0,
+ PX30_CLKSEL_CON(44), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 9, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart4_frac", "clk_uart4_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(45), 0,
+ PX30_CLKGATE_CON(11), 10, GFLAGS,
+ &px30_uart4_fracmux),
+ GATE(SCLK_UART4, "clk_uart4", "clk_uart4_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 11, GFLAGS),
+
+ COMPOSITE(0, "clk_uart5_src", mux_uart_src_p, 0,
+ PX30_CLKSEL_CON(46), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 12, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart5_np5", "clk_uart5_src", 0,
+ PX30_CLKSEL_CON(47), 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(11), 13, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart5_frac", "clk_uart5_src", CLK_SET_RATE_PARENT,
+ PX30_CLKSEL_CON(48), 0,
+ PX30_CLKGATE_CON(11), 14, GFLAGS,
+ &px30_uart5_fracmux),
+ GATE(SCLK_UART5, "clk_uart5", "clk_uart5_mux", CLK_SET_RATE_PARENT,
+ PX30_CLKGATE_CON(11), 15, GFLAGS),
+
+ COMPOSITE(SCLK_I2C0, "clk_i2c0", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(49), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 0, GFLAGS),
+ COMPOSITE(SCLK_I2C1, "clk_i2c1", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(49), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 1, GFLAGS),
+ COMPOSITE(SCLK_I2C2, "clk_i2c2", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(50), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 2, GFLAGS),
+ COMPOSITE(SCLK_I2C3, "clk_i2c3", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(50), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 3, GFLAGS),
+ COMPOSITE(SCLK_PWM0, "clk_pwm0", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(52), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 5, GFLAGS),
+ COMPOSITE(SCLK_PWM1, "clk_pwm1", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(52), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 6, GFLAGS),
+ COMPOSITE(SCLK_SPI0, "clk_spi0", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(53), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 7, GFLAGS),
+ COMPOSITE(SCLK_SPI1, "clk_spi1", mux_gpll_xin24m_p, 0,
+ PX30_CLKSEL_CON(53), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ PX30_CLKGATE_CON(12), 8, GFLAGS),
+
+ GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 0, GFLAGS),
+ GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 2, GFLAGS),
+ GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 4, GFLAGS),
+ GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 0,
+ PX30_CLKGATE_CON(13), 5, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "xin24m", 0,
+ PX30_CLKSEL_CON(54), 0, 11, DFLAGS,
+ PX30_CLKGATE_CON(12), 9, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_SARADC, "clk_saradc", "xin24m", 0,
+ PX30_CLKSEL_CON(55), 0, 11, DFLAGS,
+ PX30_CLKGATE_CON(12), 10, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_OTP, "clk_otp", "xin24m", 0,
+ PX30_CLKSEL_CON(56), 0, 3, DFLAGS,
+ PX30_CLKGATE_CON(12), 11, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_OTP_USR, "clk_otp_usr", "clk_otp", 0,
+ PX30_CLKSEL_CON(56), 4, 2, DFLAGS,
+ PX30_CLKGATE_CON(13), 6, GFLAGS),
+
+ GATE(0, "clk_cpu_boost", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(12), 12, GFLAGS),
+
+ /* PD_CRYPTO */
+ GATE(0, "aclk_crypto_pre", "aclk_bus_pre", 0,
+ PX30_CLKGATE_CON(8), 12, GFLAGS),
+ GATE(0, "hclk_crypto_pre", "hclk_bus_pre", 0,
+ PX30_CLKGATE_CON(8), 13, GFLAGS),
+ COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 14, GFLAGS),
+ COMPOSITE(SCLK_CRYPTO_APK, "clk_crypto_apk", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(25), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(8), 15, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 9
+ */
+
+ /* PD_BUS_TOP */
+ GATE(0, "pclk_top_niu", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 0, GFLAGS),
+ GATE(0, "pclk_top_cru", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 1, GFLAGS),
+ GATE(PCLK_OTP_PHY, "pclk_otp_phy", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 2, GFLAGS),
+ GATE(0, "pclk_ddrphy", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 3, GFLAGS),
+ GATE(PCLK_MIPIDSIPHY, "pclk_mipidsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 4, GFLAGS),
+ GATE(PCLK_MIPICSIPHY, "pclk_mipicsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 5, GFLAGS),
+ GATE(PCLK_USB_GRF, "pclk_usb_grf", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(0, "pclk_cpu_hoost", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 7, GFLAGS),
+
+ /* PD_VI */
+ GATE(0, "aclk_vi_niu", "aclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 15, GFLAGS),
+ GATE(ACLK_CIF, "aclk_cif", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 1, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 3, GFLAGS),
+ GATE(0, "hclk_vi_niu", "hclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(HCLK_CIF, "hclk_cif", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 2, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 4, GFLAGS),
+
+ /* PD_VO */
+ GATE(0, "aclk_vo_niu", "aclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 0, GFLAGS),
+ GATE(ACLK_VOPB, "aclk_vopb", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 3, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 7, GFLAGS),
+ GATE(ACLK_VOPL, "aclk_vopl", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 5, GFLAGS),
+
+ GATE(0, "hclk_vo_niu", "hclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 1, GFLAGS),
+ GATE(HCLK_VOPB, "hclk_vopb", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 4, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 8, GFLAGS),
+ GATE(HCLK_VOPL, "hclk_vopl", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 6, GFLAGS),
+
+ GATE(0, "pclk_vo_niu", "pclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 2, GFLAGS),
+ GATE(PCLK_MIPI_DSI, "pclk_mipi_dsi", "pclk_vo_pre", 0, PX30_CLKGATE_CON(3), 9, GFLAGS),
+
+ /* PD_BUS */
+ GATE(0, "aclk_bus_niu", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 8, GFLAGS),
+ GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 11, GFLAGS),
+ GATE(ACLK_GIC, "aclk_gic", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 12, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_pre", 0, PX30_CLKGATE_CON(13), 15, GFLAGS),
+
+ GATE(0, "hclk_bus_niu", "hclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 9, GFLAGS),
+ GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 14, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 1, GFLAGS),
+ GATE(HCLK_I2S0, "hclk_i2s0", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(HCLK_I2S1, "hclk_i2s1", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(HCLK_I2S2, "hclk_i2s2", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 4, GFLAGS),
+
+ GATE(0, "pclk_bus_niu", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 10, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 5, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 7, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 9, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 10, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 11, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 12, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 13, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 14, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus_pre", 0, PX30_CLKGATE_CON(14), 15, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 1, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 2, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 3, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 4, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 5, GFLAGS),
+ GATE(PCLK_OTP_NS, "pclk_otp_ns", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 6, GFLAGS),
+ GATE(PCLK_WDT_NS, "pclk_wdt_ns", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 7, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 8, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 9, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus_pre", 0, PX30_CLKGATE_CON(15), 10, GFLAGS),
+ GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 11, GFLAGS),
+ GATE(0, "pclk_sgrf", "pclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(15), 12, GFLAGS),
+
+ /* PD_VPU */
+ GATE(0, "hclk_vpu_niu", "hclk_vpu_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_pre", 0, PX30_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_pre", 0, PX30_CLKGATE_CON(4), 4, GFLAGS),
+
+ /* PD_CRYPTO */
+ GATE(0, "hclk_crypto_niu", "hclk_crypto_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(9), 3, GFLAGS),
+ GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_crypto_pre", 0, PX30_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(0, "aclk_crypto_niu", "aclk_crypto_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_crypto_pre", 0, PX30_CLKGATE_CON(9), 4, GFLAGS),
+
+ /* PD_SDCARD */
+ GATE(0, "hclk_sdmmc_niu", "hclk_sdmmc_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_sdmmc_pre", 0, PX30_CLKGATE_CON(7), 1, GFLAGS),
+
+ /* PD_PERI */
+ GATE(0, "aclk_peri_niu", "aclk_peri_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(5), 9, GFLAGS),
+
+ /* PD_MMC_NAND */
+ GATE(HCLK_NANDC, "hclk_nandc", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(0, "hclk_mmc_nand_niu", "hclk_mmc_nand", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(6), 8, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_mmc_nand", 0, PX30_CLKGATE_CON(6), 11, GFLAGS),
+
+ /* PD_USB */
+ GATE(0, "hclk_usb_niu", "hclk_usb", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_OTG, "hclk_otg", "hclk_usb", 0, PX30_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(HCLK_HOST, "hclk_host", "hclk_usb", 0, PX30_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(HCLK_HOST_ARB, "hclk_host_arb", "hclk_usb", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(7), 8, GFLAGS),
+
+ /* PD_GMAC */
+ GATE(0, "aclk_gmac_niu", "aclk_gmac_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(8), 0, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_pre", 0,
+ PX30_CLKGATE_CON(8), 2, GFLAGS),
+ GATE(0, "pclk_gmac_niu", "pclk_gmac_pre", CLK_IGNORE_UNUSED,
+ PX30_CLKGATE_CON(8), 1, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_gmac_pre", 0,
+ PX30_CLKGATE_CON(8), 3, GFLAGS),
+};
+
+static struct rockchip_clk_branch px30_clk_pmu_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 2
+ */
+
+ COMPOSITE_FRACMUX(0, "clk_rtc32k_frac", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_PMU_CLKSEL_CON(1), 0,
+ PX30_PMU_CLKGATE_CON(0), 13, GFLAGS,
+ &px30_rtc32k_pmu_fracmux),
+
+ COMPOSITE_NOMUX(XIN24M_DIV, "xin24m_div", "xin24m", CLK_IGNORE_UNUSED,
+ PX30_PMU_CLKSEL_CON(0), 8, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 12, GFLAGS),
+
+ COMPOSITE_NOMUX(0, "clk_wifi_pmu_src", "gpll", 0,
+ PX30_PMU_CLKSEL_CON(2), 8, 6, DFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 14, GFLAGS),
+ COMPOSITE_NODIV(SCLK_WIFI_PMU, "clk_wifi_pmu", mux_wifi_pmu_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(2), 15, 1, MFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 15, GFLAGS),
+
+ COMPOSITE(0, "clk_uart0_pmu_src", mux_uart_src_p, 0,
+ PX30_PMU_CLKSEL_CON(3), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE_NOMUX_HALFDIV(0, "clk_uart0_np5", "clk_uart0_pmu_src", 0,
+ PX30_PMU_CLKSEL_CON(4), 0, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_pmu_src", CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(5), 0,
+ PX30_PMU_CLKGATE_CON(1), 2, GFLAGS,
+ &px30_uart0_pmu_fracmux),
+ GATE(SCLK_UART0_PMU, "clk_uart0_pmu", "clk_uart0_pmu_mux", CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKGATE_CON(1), 3, GFLAGS),
+
+ GATE(SCLK_PVTM_PMU, "clk_pvtm_pmu", "xin24m", 0,
+ PX30_PMU_CLKGATE_CON(1), 4, GFLAGS),
+
+ COMPOSITE_NOMUX(PCLK_PMU_PRE, "pclk_pmu_pre", "gpll", 0,
+ PX30_PMU_CLKSEL_CON(0), 0, 5, DFLAGS,
+ PX30_PMU_CLKGATE_CON(0), 0, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_REF24M_PMU, "clk_ref24m_pmu", "gpll", 0,
+ PX30_PMU_CLKSEL_CON(2), 0, 6, DFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_NODIV(SCLK_USBPHY_REF, "clk_usbphy_ref", mux_usbphy_ref_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(2), 6, 1, MFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE_NODIV(SCLK_MIPIDSIPHY_REF, "clk_mipidsiphy_ref", mux_mipidsiphy_ref_p, CLK_SET_RATE_PARENT,
+ PX30_PMU_CLKSEL_CON(2), 7, 1, MFLAGS,
+ PX30_PMU_CLKGATE_CON(1), 10, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 9
+ */
+
+ /* PD_PMU */
+ GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(0, "pclk_pmu_sgrf", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(0, "pclk_pmu_grf", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(0, "pclk_pmu_mem", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_GPIO0_PMU, "pclk_gpio0_pmu", "pclk_pmu_pre", 0, PX30_PMU_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(PCLK_UART0_PMU, "pclk_uart0_pmu", "pclk_pmu_pre", 0, PX30_PMU_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(0, "pclk_cru_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 8, GFLAGS),
+};
+
+static const char *const px30_pmucru_critical_clocks[] __initconst = {
+ "aclk_bus_pre",
+ "pclk_bus_pre",
+ "hclk_bus_pre",
+ "aclk_peri_pre",
+ "hclk_peri_pre",
+ "aclk_gpu_niu",
+ "pclk_top_pre",
+ "pclk_pmu_pre",
+ "hclk_usb_niu",
+ "pll_npll",
+ "usb480m",
+ "clk_uart2",
+ "pclk_uart2",
+};
+
+static void __init px30_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+ struct clk *clk;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ /* aclk_dmac is controlled by sgrf_soc_con1[11]. */
+ clk = clk_register_fixed_factor(NULL, "aclk_dmac", "aclk_bus_pre", 0, 1, 1);
+ if (IS_ERR(clk))
+ pr_warn("%s: could not register clock aclk_dmac: %ld\n",
+ __func__, PTR_ERR(clk));
+ else
+ rockchip_clk_add_lookup(ctx, clk, ACLK_DMAC);
+
+ rockchip_clk_register_plls(ctx, px30_pll_clks,
+ ARRAY_SIZE(px30_pll_clks),
+ PX30_GRF_SOC_STATUS0);
+ rockchip_clk_register_branches(ctx, px30_clk_branches,
+ ARRAY_SIZE(px30_clk_branches));
+
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
+ &px30_cpuclk_data, px30_cpuclk_rates,
+ ARRAY_SIZE(px30_cpuclk_rates));
+
+ rockchip_register_softrst(np, 12, reg_base + PX30_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, PX30_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(px30_cru, "rockchip,px30-cru", px30_clk_init);
+
+static void __init px30_pmu_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru pmu region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip pmu clk init failed\n", __func__);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, px30_pmu_pll_clks,
+ ARRAY_SIZE(px30_pmu_pll_clks), PX30_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_branches(ctx, px30_clk_pmu_branches,
+ ARRAY_SIZE(px30_clk_pmu_branches));
+
+ rockchip_clk_protect_critical(px30_pmucru_critical_clocks,
+ ARRAY_SIZE(px30_pmucru_critical_clocks));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(px30_cru_pmu, "rockchip,px30-pmucru", px30_pmu_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index bca10d618f0a..5a628148f3f0 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -631,7 +631,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
- RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
+ RK3399_CLKSEL_CON(31), 2, 1, MFLAGS,
RK3399_CLKGATE_CON(8), 12, GFLAGS),
/* uart */
@@ -1523,6 +1523,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
"pclk_pmu_src",
"fclk_cm0s_src_pmu",
"clk_timer_src_pmu",
+ "pclk_rkpwm_pmu",
};
static void __init rk3399_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 326b3fa44f5d..c3ad92965823 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -492,6 +492,16 @@ void __init rockchip_clk_register_branches(
list->gate_flags, flags, list->child,
&ctx->lock);
break;
+ case branch_half_divider:
+ clk = rockchip_clk_register_halfdiv(list->name,
+ list->parent_names, list->num_parents,
+ ctx->reg_base, list->muxdiv_offset,
+ list->mux_shift, list->mux_width,
+ list->mux_flags, list->div_shift,
+ list->div_width, list->div_flags,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags, flags, &ctx->lock);
+ break;
case branch_gate:
flags |= CLK_SET_RATE_PARENT;
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index ef601dded32c..6b53fff4cc96 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -34,7 +34,46 @@ struct clk;
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
-/* register positions shared by RV1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */
+/* register positions shared by PX30, RV1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */
+#define BOOST_PLL_H_CON(x) ((x) * 0x4)
+#define BOOST_CLK_CON 0x0008
+#define BOOST_BOOST_CON 0x000c
+#define BOOST_SWITCH_CNT 0x0010
+#define BOOST_HIGH_PERF_CNT0 0x0014
+#define BOOST_HIGH_PERF_CNT1 0x0018
+#define BOOST_STATIS_THRESHOLD 0x001c
+#define BOOST_SHORT_SWITCH_CNT 0x0020
+#define BOOST_SWITCH_THRESHOLD 0x0024
+#define BOOST_FSM_STATUS 0x0028
+#define BOOST_PLL_L_CON(x) ((x) * 0x4 + 0x2c)
+#define BOOST_RECOVERY_MASK 0x1
+#define BOOST_RECOVERY_SHIFT 1
+#define BOOST_SW_CTRL_MASK 0x1
+#define BOOST_SW_CTRL_SHIFT 2
+#define BOOST_LOW_FREQ_EN_MASK 0x1
+#define BOOST_LOW_FREQ_EN_SHIFT 3
+#define BOOST_BUSY_STATE BIT(8)
+
+#define PX30_PLL_CON(x) ((x) * 0x4)
+#define PX30_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define PX30_CLKGATE_CON(x) ((x) * 0x4 + 0x200)
+#define PX30_GLB_SRST_FST 0xb8
+#define PX30_GLB_SRST_SND 0xbc
+#define PX30_SOFTRST_CON(x) ((x) * 0x4 + 0x300)
+#define PX30_MODE_CON 0xa0
+#define PX30_MISC_CON 0xa4
+#define PX30_SDMMC_CON0 0x380
+#define PX30_SDMMC_CON1 0x384
+#define PX30_SDIO_CON0 0x388
+#define PX30_SDIO_CON1 0x38c
+#define PX30_EMMC_CON0 0x390
+#define PX30_EMMC_CON1 0x394
+
+#define PX30_PMU_PLL_CON(x) ((x) * 0x4)
+#define PX30_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x40)
+#define PX30_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x80)
+#define PX30_PMU_MODE 0x0020
+
#define RV1108_PLL_CON(x) ((x) * 0x4)
#define RV1108_CLKSEL_CON(x) ((x) * 0x4 + 0x60)
#define RV1108_CLKGATE_CON(x) ((x) * 0x4 + 0x120)
@@ -354,6 +393,7 @@ enum rockchip_clk_branch_type {
branch_inverter,
branch_factor,
branch_ddrclk,
+ branch_half_divider,
};
struct rockchip_clk_branch {
@@ -684,6 +724,79 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define COMPOSITE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\
+ df, go, gs, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .mux_flags = mf, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = go, \
+ .gate_shift = gs, \
+ .gate_flags = gf, \
+ }
+
+#define COMPOSITE_NOGATE_HALFDIV(_id, cname, pnames, f, mo, ms, mw, mf, \
+ ds, dw, df) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .mux_flags = mf, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = -1, \
+ }
+
+#define COMPOSITE_NOMUX_HALFDIV(_id, cname, pname, f, mo, ds, dw, df, \
+ go, gs, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = go, \
+ .gate_shift = gs, \
+ .gate_flags = gf, \
+ }
+
+#define DIV_HALF(_id, cname, pname, f, o, s, w, df) \
+ { \
+ .id = _id, \
+ .branch_type = branch_half_divider, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .muxdiv_offset = o, \
+ .div_shift = s, \
+ .div_width = w, \
+ .div_flags = df, \
+ .gate_offset = -1, \
+ }
+
struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks);
void rockchip_clk_of_add_provider(struct device_node *np,
@@ -708,6 +821,17 @@ void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
#define ROCKCHIP_SOFTRST_HIWORD_MASK BIT(0)
+struct clk *rockchip_clk_register_halfdiv(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ u8 div_shift, u8 div_width,
+ u8 div_flags, int gate_offset,
+ u8 gate_shift, u8 gate_flags,
+ unsigned long flags,
+ spinlock_t *lock);
+
#ifdef CONFIG_RESET_CONTROLLER
void rockchip_register_softrst(struct device_node *np,
unsigned int num_regs,
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
index d5f1ccb36300..cfaa057035ad 100644
--- a/drivers/clk/samsung/clk-exynos4412-isp.c
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -37,8 +37,6 @@ static const unsigned long exynos4x12_clk_isp_save[] __initconst = {
E4X12_GATE_ISP1,
};
-PNAME(mout_user_aclk400_mcuisp_p4x12) = { "fin_pll", "div_aclk400_mcuisp", };
-
static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
DIV(CLK_ISP_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
DIV(CLK_ISP_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 72714633e39c..5b238fc314ac 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -28,7 +28,7 @@ static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
static const char * const emac_ptp_free_mux[] = {"peri_emac_ptp_clk", "boot_clk"};
static const char * const gpio_db_free_mux[] = {"peri_gpio_db_clk", "boot_clk"};
-static const char * const sdmmc_free_mux[] = {"peri_sdmmc_clk", "boot_clk"};
+static const char * const sdmmc_free_mux[] = {"main_sdmmc_clk", "boot_clk"};
static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"};
static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
@@ -37,6 +37,11 @@ static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
+static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
+ "peri_mpu_base_clk",
+ "osc1", "cb_intosc_hs_div2_clk",
+ "f2s_free_clk"};
+
/* clocks in AO (always on) controller */
static const struct stratix10_pll_clock s10_pll_clks[] = {
{ STRATIX10_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
@@ -57,7 +62,7 @@ static const struct stratix10_perip_c_clock s10_main_perip_c_clks[] = {
};
static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
- { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
0, 0x48, 0, 0, 0},
{ STRATIX10_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
0, 0x4C, 0, 0, 0},
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 468d1abaf0ee..bae5ee67a797 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -289,16 +289,13 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.data = &sun8i_v3s_de2_clk_desc,
},
{
+ .compatible = "allwinner,sun50i-a64-de2-clk",
+ .data = &sun50i_a64_de2_clk_desc,
+ },
+ {
.compatible = "allwinner,sun50i-h5-de2-clk",
.data = &sun50i_a64_de2_clk_desc,
},
- /*
- * The Allwinner A64 SoC needs some bit to be poke in syscon to make
- * DE2 really working.
- * So there's currently no A64 compatible here.
- * H5 shares the same reset line with A64, so here H5 is using the
- * clock description of A64.
- */
{ }
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 65ba6455feb7..0f388f6944d5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -66,17 +66,18 @@ static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
CLK_SET_RATE_UNGATE);
/* TODO: The result of N/M is required to be in [8, 25] range. */
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
- "osc24M", 0x0010,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video0_clk, "pll-video0",
+ "osc24M", 0x0010,
+ 192000000, /* Minimum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
/* TODO: The result of N/M is required to be in [8, 25] range. */
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
@@ -152,17 +153,18 @@ static struct ccu_nk pll_periph1_clk = {
};
/* TODO: The result of N/M is required to be in [8, 25] range. */
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1",
- "osc24M", 0x030,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video1_clk, "pll-video1",
+ "osc24M", 0x030,
+ 192000000, /* Minimum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static struct ccu_nkm pll_sata_clk = {
.enable = BIT(31),
@@ -654,7 +656,8 @@ static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram",
static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 3, BIT(31), 0);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(mp_clk, "mp", de_parents,
0x108, 0, 4, 24, 3, BIT(31), 0);
@@ -666,9 +669,11 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_parents,
static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd1_clk, "tcon-lcd1", tcon_parents,
0x114, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0", tcon_parents,
- 0x118, 0, 4, 24, 3, BIT(31), 0);
+ 0x118, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_tv1_clk, "tcon-tv1", tcon_parents,
- 0x11c, 0, 4, 24, 3, BIT(31), 0);
+ 0x11c, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const deinterlace_parents[] = { "pll-periph0",
"pll-periph1" };
@@ -698,7 +703,8 @@ static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
static const char * const hdmi_parents[] = { "pll-video0", "pll-video1" };
static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
- 0x150, 0, 4, 24, 2, BIT(31), 0);
+ 0x150, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M",
0x154, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
index 0db8e1e97af8..db2a1243f9ff 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
@@ -25,7 +25,9 @@
#define CLK_PLL_AUDIO_2X 4
#define CLK_PLL_AUDIO_4X 5
#define CLK_PLL_AUDIO_8X 6
-#define CLK_PLL_VIDEO0 7
+
+/* PLL_VIDEO0 is exported */
+
#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10
@@ -34,7 +36,9 @@
#define CLK_PLL_PERIPH0_2X 13
#define CLK_PLL_PERIPH1 14
#define CLK_PLL_PERIPH1_2X 15
-#define CLK_PLL_VIDEO1 16
+
+/* PLL_VIDEO1 is exported */
+
#define CLK_PLL_VIDEO1_2X 17
#define CLK_PLL_SATA 18
#define CLK_PLL_SATA_OUT 19
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index b71692391bd6..6507acc843c7 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -8,6 +8,7 @@ obj-y += clk-periph-fixed.o
obj-y += clk-periph-gate.o
obj-y += clk-pll.o
obj-y += clk-pll-out.o
+obj-y += clk-sdmmc-mux.o
obj-y += clk-super.o
obj-y += clk-tegra-audio.o
obj-y += clk-tegra-periph.o
@@ -24,3 +25,4 @@ obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
obj-$(CONFIG_CLK_TEGRA_BPMP) += clk-bpmp.o
+obj-y += clk-utils.o
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index a896692b74ec..01dada561c10 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -586,9 +586,15 @@ static struct clk_hw *tegra_bpmp_clk_of_xlate(struct of_phandle_args *clkspec,
unsigned int id = clkspec->args[0], i;
struct tegra_bpmp *bpmp = data;
- for (i = 0; i < bpmp->num_clocks; i++)
- if (bpmp->clocks[i]->id == id)
- return &bpmp->clocks[i]->hw;
+ for (i = 0; i < bpmp->num_clocks; i++) {
+ struct tegra_bpmp_clk *clk = bpmp->clocks[i];
+
+ if (!clk)
+ continue;
+
+ if (clk->id == id)
+ return &clk->hw;
+ }
return NULL;
}
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 16e0aee14773..205fe8ff63f0 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -32,35 +32,15 @@
static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
unsigned long parent_rate)
{
- u64 divider_ux1 = parent_rate;
- u8 flags = divider->flags;
- int mul;
-
- if (!rate)
- return 0;
-
- mul = get_mul(divider);
-
- if (!(flags & TEGRA_DIVIDER_INT))
- divider_ux1 *= mul;
-
- if (flags & TEGRA_DIVIDER_ROUND_UP)
- divider_ux1 += rate - 1;
-
- do_div(divider_ux1, rate);
-
- if (flags & TEGRA_DIVIDER_INT)
- divider_ux1 *= mul;
+ int div;
- divider_ux1 -= mul;
+ div = div_frac_get(rate, parent_rate, divider->width,
+ divider->frac_width, divider->flags);
- if ((s64)divider_ux1 < 0)
+ if (div < 0)
return 0;
- if (divider_ux1 > get_max_div(divider))
- return get_max_div(divider);
-
- return divider_ux1;
+ return div;
}
static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
@@ -194,6 +174,7 @@ static const struct clk_div_table mc_div_table[] = {
struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
void __iomem *reg, spinlock_t *lock)
{
- return clk_register_divider_table(NULL, name, parent_name, 0, reg,
- 16, 1, 0, mc_div_table, lock);
+ return clk_register_divider_table(NULL, name, parent_name,
+ CLK_IS_CRITICAL, reg, 16, 1, 0,
+ mc_div_table, lock);
}
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 5234acd30e89..0621a3a82ea6 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -132,7 +132,7 @@ static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
timing = tegra->timings + i;
if (timing->rate > req->max_rate) {
- i = min(i, 1);
+ i = max(i, 1);
req->rate = tegra->timings[i - 1].rate;
return 0;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index b616e33c5255..de466b4446da 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -227,13 +227,11 @@ enum clk_id {
tegra_clk_sdmmc1_9,
tegra_clk_sdmmc2,
tegra_clk_sdmmc2_8,
- tegra_clk_sdmmc2_9,
tegra_clk_sdmmc3,
tegra_clk_sdmmc3_8,
tegra_clk_sdmmc3_9,
tegra_clk_sdmmc4,
tegra_clk_sdmmc4_8,
- tegra_clk_sdmmc4_9,
tegra_clk_se,
tegra_clk_soc_therm,
tegra_clk_soc_therm_8,
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
new file mode 100644
index 000000000000..473d418533cb
--- /dev/null
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.
+ *
+ * based on clk-mux.c
+ *
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+#define DIV_MASK GENMASK(7, 0)
+#define MUX_SHIFT 29
+#define MUX_MASK GENMASK(MUX_SHIFT + 2, MUX_SHIFT)
+#define SDMMC_MUL 2
+
+#define get_max_div(d) DIV_MASK
+#define get_div_field(val) ((val) & DIV_MASK)
+#define get_mux_field(val) (((val) & MUX_MASK) >> MUX_SHIFT)
+
+static const char * const mux_sdmmc_parents[] = {
+ "pll_p", "pll_c4_out2", "pll_c4_out0", "pll_c4_out1", "clk_m"
+};
+
+static const u8 mux_lj_idx[] = {
+ [0] = 0, [1] = 1, [2] = 2, [3] = 5, [4] = 6
+};
+
+static const u8 mux_non_lj_idx[] = {
+ [0] = 0, [1] = 3, [2] = 7, [3] = 4, [4] = 6
+};
+
+static u8 clk_sdmmc_mux_get_parent(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ int num_parents, i;
+ u32 src, val;
+ const u8 *mux_idx;
+
+ num_parents = clk_hw_get_num_parents(hw);
+
+ val = readl_relaxed(sdmmc_mux->reg);
+ src = get_mux_field(val);
+ if (get_div_field(val))
+ mux_idx = mux_non_lj_idx;
+ else
+ mux_idx = mux_lj_idx;
+
+ for (i = 0; i < num_parents; i++) {
+ if (mux_idx[i] == src)
+ return i;
+ }
+
+ WARN(1, "Unknown parent selector %d\n", src);
+
+ return 0;
+}
+
+static int clk_sdmmc_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ u32 val;
+
+
+ val = readl_relaxed(sdmmc_mux->reg);
+ if (get_div_field(val))
+ index = mux_non_lj_idx[index];
+ else
+ index = mux_lj_idx[index];
+
+ val &= ~MUX_MASK;
+ val |= index << MUX_SHIFT;
+
+ writel(val, sdmmc_mux->reg);
+
+ return 0;
+}
+
+static unsigned long clk_sdmmc_mux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ u32 val;
+ int div;
+ u64 rate = parent_rate;
+
+ val = readl_relaxed(sdmmc_mux->reg);
+ div = get_div_field(val);
+
+ div += SDMMC_MUL;
+
+ rate *= SDMMC_MUL;
+ rate += div - 1;
+ do_div(rate, div);
+
+ return rate;
+}
+
+static int clk_sdmmc_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ int div;
+ unsigned long output_rate = req->best_parent_rate;
+
+ req->rate = max(req->rate, req->min_rate);
+ req->rate = min(req->rate, req->max_rate);
+
+ if (!req->rate)
+ return output_rate;
+
+ div = div_frac_get(req->rate, output_rate, 8, 1, sdmmc_mux->div_flags);
+ if (div < 0)
+ div = 0;
+
+ if (sdmmc_mux->div_flags & TEGRA_DIVIDER_ROUND_UP)
+ req->rate = DIV_ROUND_UP(output_rate * SDMMC_MUL,
+ div + SDMMC_MUL);
+ else
+ req->rate = output_rate * SDMMC_MUL / (div + SDMMC_MUL);
+
+ return 0;
+}
+
+static int clk_sdmmc_mux_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ int div;
+ unsigned long flags = 0;
+ u32 val;
+ u8 src;
+
+ div = div_frac_get(rate, parent_rate, 8, 1, sdmmc_mux->div_flags);
+ if (div < 0)
+ return div;
+
+ if (sdmmc_mux->lock)
+ spin_lock_irqsave(sdmmc_mux->lock, flags);
+
+ src = clk_sdmmc_mux_get_parent(hw);
+ if (div)
+ src = mux_non_lj_idx[src];
+ else
+ src = mux_lj_idx[src];
+
+ val = src << MUX_SHIFT;
+ val |= div;
+ writel(val, sdmmc_mux->reg);
+ fence_udelay(2, sdmmc_mux->reg);
+
+ if (sdmmc_mux->lock)
+ spin_unlock_irqrestore(sdmmc_mux->lock, flags);
+
+ return 0;
+}
+
+static int clk_sdmmc_mux_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return gate_ops->is_enabled(gate_hw);
+}
+
+static int clk_sdmmc_mux_enable(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return gate_ops->enable(gate_hw);
+}
+
+static void clk_sdmmc_mux_disable(struct clk_hw *hw)
+{
+ struct tegra_sdmmc_mux *sdmmc_mux = to_clk_sdmmc_mux(hw);
+ const struct clk_ops *gate_ops = sdmmc_mux->gate_ops;
+ struct clk_hw *gate_hw = &sdmmc_mux->gate.hw;
+
+ gate_ops->disable(gate_hw);
+}
+
+static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
+ .get_parent = clk_sdmmc_mux_get_parent,
+ .set_parent = clk_sdmmc_mux_set_parent,
+ .determine_rate = clk_sdmmc_mux_determine_rate,
+ .recalc_rate = clk_sdmmc_mux_recalc_rate,
+ .set_rate = clk_sdmmc_mux_set_rate,
+ .is_enabled = clk_sdmmc_mux_is_enabled,
+ .enable = clk_sdmmc_mux_enable,
+ .disable = clk_sdmmc_mux_disable,
+};
+
+struct clk *tegra_clk_register_sdmmc_mux_div(const char *name,
+ void __iomem *clk_base, u32 offset, u32 clk_num, u8 div_flags,
+ unsigned long flags, void *lock)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+ const struct tegra_clk_periph_regs *bank;
+ struct tegra_sdmmc_mux *sdmmc_mux;
+
+ init.ops = &tegra_clk_sdmmc_mux_ops;
+ init.name = name;
+ init.flags = flags;
+ init.parent_names = mux_sdmmc_parents;
+ init.num_parents = ARRAY_SIZE(mux_sdmmc_parents);
+
+ bank = get_reg_bank(clk_num);
+ if (!bank)
+ return ERR_PTR(-EINVAL);
+
+ sdmmc_mux = kzalloc(sizeof(*sdmmc_mux), GFP_KERNEL);
+ if (!sdmmc_mux)
+ return ERR_PTR(-ENOMEM);
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ sdmmc_mux->hw.init = &init;
+ sdmmc_mux->reg = clk_base + offset;
+ sdmmc_mux->lock = lock;
+ sdmmc_mux->gate.clk_base = clk_base;
+ sdmmc_mux->gate.regs = bank;
+ sdmmc_mux->gate.enable_refcnt = periph_clk_enb_refcnt;
+ sdmmc_mux->gate.clk_num = clk_num;
+ sdmmc_mux->gate.flags = TEGRA_PERIPH_ON_APB;
+ sdmmc_mux->div_flags = div_flags;
+ sdmmc_mux->gate_ops = &tegra_clk_periph_gate_ops;
+
+ clk = clk_register(NULL, &sdmmc_mux->hw);
+ if (IS_ERR(clk)) {
+ kfree(sdmmc_mux);
+ return clk;
+ }
+
+ sdmmc_mux->gate.hw.clk = clk;
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 2acba2986bc6..38c4eb28c8bf 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -451,15 +451,6 @@ static u32 mux_pllp_pllc4_out2_pllc4_out1_clkm_pllc4_out0_idx[] = {
[0] = 0, [1] = 3, [2] = 4, [3] = 6, [4] = 7,
};
-static const char *mux_pllp_clkm_pllc4_out2_out1_out0_lj[] = {
- "pll_p",
- "pll_c4_out2", "pll_c4_out0", /* LJ input */
- "pll_c4_out2", "pll_c4_out1",
- "pll_c4_out1", /* LJ input */
- "clk_m", "pll_c4_out0"
-};
-#define mux_pllp_clkm_pllc4_out2_out1_out0_lj_idx NULL
-
static const char *mux_pllp_pllc2_c_c3_clkm[] = {
"pll_p", "pll_c2", "pll_c", "pll_c3", "clk_m"
};
@@ -686,9 +677,7 @@ static struct tegra_periph_init_data periph_clks[] = {
MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3),
MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4),
MUX8("sdmmc1", mux_pllp_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_9),
- MUX8("sdmmc2", mux_pllp_clkm_pllc4_out2_out1_out0_lj, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_9),
MUX8("sdmmc3", mux_pllp_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_9),
- MUX8("sdmmc4", mux_pllp_clkm_pllc4_out2_out1_out0_lj, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_9),
MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 0c69c7970950..b6cf28ca2ed2 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1267,7 +1267,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S3, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
- { TEGRA124_CLK_VDE, TEGRA124_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_C3, 600000000, 0 },
{ TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1 },
{ TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0 },
{ TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0 },
@@ -1290,6 +1290,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1 },
{ TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1 },
{ TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0 },
+ { TEGRA124_CLK_VIC03, TEGRA124_CLK_PLL_C3, 0, 0 },
/* must be the last entry */
{ TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 5435d01c636a..9eb1cb14fce1 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -44,6 +44,8 @@
#define CLK_SOURCE_EMC 0x19c
#define CLK_SOURCE_SOR1 0x410
#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC4 0x164
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
@@ -2286,11 +2288,9 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_rtc] = { .dt_id = TEGRA210_CLK_RTC, .present = true },
[tegra_clk_timer] = { .dt_id = TEGRA210_CLK_TIMER, .present = true },
[tegra_clk_uarta_8] = { .dt_id = TEGRA210_CLK_UARTA, .present = true },
- [tegra_clk_sdmmc2_9] = { .dt_id = TEGRA210_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA210_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA210_CLK_I2C1, .present = true },
[tegra_clk_sdmmc1_9] = { .dt_id = TEGRA210_CLK_SDMMC1, .present = true },
- [tegra_clk_sdmmc4_9] = { .dt_id = TEGRA210_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA210_CLK_PWM, .present = true },
[tegra_clk_i2s2] = { .dt_id = TEGRA210_CLK_I2S2, .present = true },
[tegra_clk_usbd] = { .dt_id = TEGRA210_CLK_USBD, .present = true },
@@ -3030,6 +3030,16 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
0, NULL);
clks[TEGRA210_CLK_ACLK] = clk;
+ clk = tegra_clk_register_sdmmc_mux_div("sdmmc2", clk_base,
+ CLK_SOURCE_SDMMC2, 9,
+ TEGRA_DIVIDER_ROUND_UP, 0, NULL);
+ clks[TEGRA210_CLK_SDMMC2] = clk;
+
+ clk = tegra_clk_register_sdmmc_mux_div("sdmmc4", clk_base,
+ CLK_SOURCE_SDMMC4, 15,
+ TEGRA_DIVIDER_ROUND_UP, 0, NULL);
+ clks[TEGRA210_CLK_SDMMC4] = clk;
+
for (i = 0; i < ARRAY_SIZE(tegra210_periph); i++) {
struct tegra_periph_init_data *init = &tegra210_periph[i];
struct clk **clkp;
diff --git a/drivers/clk/tegra/clk-utils.c b/drivers/clk/tegra/clk-utils.c
new file mode 100644
index 000000000000..1a5daae4e501
--- /dev/null
+++ b/drivers/clk/tegra/clk-utils.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <asm/div64.h>
+
+#include "clk.h"
+
+#define div_mask(w) ((1 << (w)) - 1)
+
+int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
+ u8 frac_width, u8 flags)
+{
+ u64 divider_ux1 = parent_rate;
+ int mul;
+
+ if (!rate)
+ return 0;
+
+ mul = 1 << frac_width;
+
+ if (!(flags & TEGRA_DIVIDER_INT))
+ divider_ux1 *= mul;
+
+ if (flags & TEGRA_DIVIDER_ROUND_UP)
+ divider_ux1 += rate - 1;
+
+ do_div(divider_ux1, rate);
+
+ if (flags & TEGRA_DIVIDER_INT)
+ divider_ux1 *= mul;
+
+ if (divider_ux1 < mul)
+ return 0;
+
+ divider_ux1 -= mul;
+
+ if (divider_ux1 > div_mask(width))
+ return div_mask(width);
+
+ return divider_ux1;
+}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index e1f88463b600..d2c3a010f8e9 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/delay.h>
/**
* struct tegra_clk_sync_source - external clock source from codec
@@ -705,6 +706,32 @@ struct clk *tegra_clk_register_super_clk(const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 clk_super_flags,
spinlock_t *lock);
+
+/**
+ * struct tegra_sdmmc_mux - switch divider with Low Jitter inputs for SDMMC
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register controlling mux and divider
+ * @flags: hardware-specific flags
+ * @lock: optional register lock
+ * @gate: gate clock
+ * @gate_ops: gate clock ops
+ */
+struct tegra_sdmmc_mux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ spinlock_t *lock;
+ const struct clk_ops *gate_ops;
+ struct tegra_clk_periph_gate gate;
+ u8 div_flags;
+};
+
+#define to_clk_sdmmc_mux(_hw) container_of(_hw, struct tegra_sdmmc_mux, hw)
+
+struct clk *tegra_clk_register_sdmmc_mux_div(const char *name,
+ void __iomem *clk_base, u32 offset, u32 clk_num, u8 div_flags,
+ unsigned long flags, void *lock);
+
/**
* struct clk_init_table - clock initialization table
* @clk_id: clock id as mentioned in device tree bindings
@@ -811,6 +838,9 @@ extern tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
int tegra_pll_wait_for_lock(struct tegra_clk_pll *pll);
u16 tegra_pll_get_fixed_mdiv(struct clk_hw *hw, unsigned long input_rate);
int tegra_pll_p_div_to_hw(struct tegra_clk_pll *pll, u8 p_div);
+int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
+ u8 frac_width, u8 flags);
+
/* Combined read fence with delay */
#define fence_udelay(delay, reg) \
diff --git a/drivers/clk/uniphier/clk-uniphier-peri.c b/drivers/clk/uniphier/clk-uniphier-peri.c
index 521c80e9a06f..89b3ac378b3f 100644
--- a/drivers/clk/uniphier/clk-uniphier-peri.c
+++ b/drivers/clk/uniphier/clk-uniphier-peri.c
@@ -27,6 +27,12 @@
#define UNIPHIER_PERI_CLK_FI2C(idx, ch) \
UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
+#define UNIPHIER_PERI_CLK_SCSSI(idx) \
+ UNIPHIER_CLK_GATE("scssi", (idx), "spi", 0x20, 17)
+
+#define UNIPHIER_PERI_CLK_MCSSI(idx) \
+ UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14)
+
const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_UART(0, 0),
UNIPHIER_PERI_CLK_UART(1, 1),
@@ -38,6 +44,7 @@ const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_I2C(6, 2),
UNIPHIER_PERI_CLK_I2C(7, 3),
UNIPHIER_PERI_CLK_I2C(8, 4),
+ UNIPHIER_PERI_CLK_SCSSI(11),
{ /* sentinel */ }
};
@@ -53,5 +60,7 @@ const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_FI2C(8, 4),
UNIPHIER_PERI_CLK_FI2C(9, 5),
UNIPHIER_PERI_CLK_FI2C(10, 6),
+ UNIPHIER_PERI_CLK_SCSSI(11),
+ UNIPHIER_PERI_CLK_MCSSI(12),
{ /* sentinel */ }
};
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 4f5ff9fa11fd..2bb5a8570adc 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -29,18 +29,20 @@
UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 10), \
UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15)
-/* Denali driver requires clk_x rate (clk: 50MHz, clk_x & ecc_clk: 200MHz) */
#define UNIPHIER_LD4_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 8), \
- UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2)
+ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 32), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x2104, 2)
#define UNIPHIER_PRO5_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 12), \
- UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2)
+ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 48), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x2104, 2)
#define UNIPHIER_LD11_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 10), \
- UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x210c, 0)
+ UNIPHIER_CLK_FACTOR("nand-50m", -1, "spll", 1, 40), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-50m", 0x210c, 0)
+
+#define UNIPHIER_SYS_CLK_NAND_4X(idx) \
+ UNIPHIER_CLK_FACTOR("nand-4x", (idx), "nand", 4, 1)
#define UNIPHIER_LD11_SYS_CLK_EMMC(idx) \
UNIPHIER_CLK_GATE("emmc", (idx), NULL, 0x210c, 2)
@@ -93,7 +95,9 @@ const struct uniphier_clk_data uniphier_ld4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 5625, 512), /* 270 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 16),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
@@ -108,7 +112,9 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("gpll", -1, "ref", 10, 1), /* 250 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 8),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 32),
+ UNIPHIER_CLK_FACTOR("spi", 1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_PRO4_SYS_CLK_ETHER(6),
@@ -118,6 +124,9 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* Ether, SATA, USB3 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ UNIPHIER_CLK_FACTOR("usb30-hsphy0", 16, "upll", 1, 12),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy0", 17, "ref", 1, 1),
+ UNIPHIER_CLK_FACTOR("usb31-ssphy0", 20, "ref", 1, 1),
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 18),
UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x2104, 19),
UNIPHIER_PRO4_SYS_CLK_AIO(40),
@@ -130,7 +139,9 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 20),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 16),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* Ether, HSC, MIO */
@@ -143,7 +154,9 @@ const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_PRO5_SYS_CLK_SD,
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC */
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */
@@ -158,7 +171,9 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 96, 1), /* 2400 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 27),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_PRO5_SYS_CLK_SD,
UNIPHIER_PRO4_SYS_CLK_ETHER(6),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC, RLE */
@@ -166,8 +181,11 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
/* The document mentions 0x2104 bit 18, but not functional */
- UNIPHIER_CLK_GATE("usb30-phy", 16, NULL, 0x2104, 19),
- UNIPHIER_CLK_GATE("usb31-phy", 20, NULL, 0x2104, 20),
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x2104, 19),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy0", 17, "ref", 1, 1),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy1", 18, "ref", 1, 1),
+ UNIPHIER_CLK_GATE("usb31-hsphy0", 20, NULL, 0x2104, 20),
+ UNIPHIER_CLK_FACTOR("usb31-ssphy0", 21, "ref", 1, 1),
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 22),
UNIPHIER_PRO5_SYS_CLK_AIO(40),
{ /* sentinel */ }
@@ -180,7 +198,9 @@ const struct uniphier_clk_data uniphier_ld11_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vspll", -1, "ref", 80, 1), /* 2000 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 40),
UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
/* Index 5 reserved for eMMC PHY */
UNIPHIER_LD11_SYS_CLK_ETHER(6),
@@ -213,7 +233,9 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("vppll", -1, "ref", 504, 5), /* 2520 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 40),
UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
/* Index 5 reserved for eMMC PHY */
UNIPHIER_LD20_SYS_CLK_SD,
@@ -226,8 +248,10 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
* We do not use bit 15 here.
*/
UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14),
- UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12),
- UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13),
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x210c, 12),
+ UNIPHIER_CLK_GATE("usb30-hsphy1", 17, NULL, 0x210c, 13),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy0", 18, "ref", 1, 1),
+ UNIPHIER_CLK_FACTOR("usb30-ssphy1", 19, "ref", 1, 1),
UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 4),
UNIPHIER_LD11_SYS_CLK_AIO(40),
UNIPHIER_LD11_SYS_CLK_EVEA(41),
@@ -254,19 +278,21 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("s2pll", -1, "ref", 88, 1), /* IPP: 2400 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 34),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 40),
+ UNIPHIER_CLK_FACTOR("spi", -1, "spll", 1, 40),
UNIPHIER_LD20_SYS_CLK_SD,
UNIPHIER_LD11_SYS_CLK_NAND(2),
+ UNIPHIER_SYS_CLK_NAND_4X(3),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
UNIPHIER_CLK_GATE("ether0", 6, NULL, 0x210c, 9),
UNIPHIER_CLK_GATE("ether1", 7, NULL, 0x210c, 10),
UNIPHIER_CLK_GATE("usb30", 12, NULL, 0x210c, 4), /* =GIO0 */
UNIPHIER_CLK_GATE("usb31-0", 13, NULL, 0x210c, 5), /* =GIO1 */
UNIPHIER_CLK_GATE("usb31-1", 14, NULL, 0x210c, 6), /* =GIO1-1 */
- UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 16),
- UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 18),
- UNIPHIER_CLK_GATE("usb30-phy2", 18, NULL, 0x210c, 20),
- UNIPHIER_CLK_GATE("usb31-phy0", 20, NULL, 0x210c, 17),
- UNIPHIER_CLK_GATE("usb31-phy1", 21, NULL, 0x210c, 19),
+ UNIPHIER_CLK_GATE("usb30-hsphy0", 16, NULL, 0x210c, 16),
+ UNIPHIER_CLK_GATE("usb30-ssphy0", 17, NULL, 0x210c, 18),
+ UNIPHIER_CLK_GATE("usb30-ssphy1", 18, NULL, 0x210c, 20),
+ UNIPHIER_CLK_GATE("usb31-hsphy0", 20, NULL, 0x210c, 17),
+ UNIPHIER_CLK_GATE("usb31-ssphy0", 21, NULL, 0x210c, 19),
UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 3),
UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x210c, 7),
UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x210c, 8),
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 00caf37e52f9..c070cc7992e9 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
-obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
+obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
deleted file mode 100644
index f9b724fd9950..000000000000
--- a/drivers/clocksource/mtk_timer.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Mediatek SoCs General-Purpose Timer handling.
- *
- * Copyright (C) 2014 Matthias Brugger
- *
- * Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqreturn.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-#include <linux/slab.h>
-
-#define GPT_IRQ_EN_REG 0x00
-#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
-#define GPT_IRQ_ACK_REG 0x08
-#define GPT_IRQ_ACK(val) BIT((val) - 1)
-
-#define TIMER_CTRL_REG(val) (0x10 * (val))
-#define TIMER_CTRL_OP(val) (((val) & 0x3) << 4)
-#define TIMER_CTRL_OP_ONESHOT (0)
-#define TIMER_CTRL_OP_REPEAT (1)
-#define TIMER_CTRL_OP_FREERUN (3)
-#define TIMER_CTRL_CLEAR (2)
-#define TIMER_CTRL_ENABLE (1)
-#define TIMER_CTRL_DISABLE (0)
-
-#define TIMER_CLK_REG(val) (0x04 + (0x10 * (val)))
-#define TIMER_CLK_SRC(val) (((val) & 0x1) << 4)
-#define TIMER_CLK_SRC_SYS13M (0)
-#define TIMER_CLK_SRC_RTC32K (1)
-#define TIMER_CLK_DIV1 (0x0)
-#define TIMER_CLK_DIV2 (0x1)
-
-#define TIMER_CNT_REG(val) (0x08 + (0x10 * (val)))
-#define TIMER_CMP_REG(val) (0x0C + (0x10 * (val)))
-
-#define GPT_CLK_EVT 1
-#define GPT_CLK_SRC 2
-
-struct mtk_clock_event_device {
- void __iomem *gpt_base;
- u32 ticks_per_jiffy;
- struct clock_event_device dev;
-};
-
-static void __iomem *gpt_sched_reg __read_mostly;
-
-static u64 notrace mtk_read_sched_clock(void)
-{
- return readl_relaxed(gpt_sched_reg);
-}
-
-static inline struct mtk_clock_event_device *to_mtk_clk(
- struct clock_event_device *c)
-{
- return container_of(c, struct mtk_clock_event_device, dev);
-}
-
-static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
-{
- u32 val;
-
- val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
- writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
- TIMER_CTRL_REG(timer));
-}
-
-static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
- unsigned long delay, u8 timer)
-{
- writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
-}
-
-static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
- bool periodic, u8 timer)
-{
- u32 val;
-
- /* Acknowledge interrupt */
- writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
-
- val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
-
- /* Clear 2 bit timer operation mode field */
- val &= ~TIMER_CTRL_OP(0x3);
-
- if (periodic)
- val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
- else
- val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
-
- writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static int mtk_clkevt_shutdown(struct clock_event_device *clk)
-{
- mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT);
- return 0;
-}
-
-static int mtk_clkevt_set_periodic(struct clock_event_device *clk)
-{
- struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
- mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
- mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
- mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
- return 0;
-}
-
-static int mtk_clkevt_next_event(unsigned long event,
- struct clock_event_device *clk)
-{
- struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
- mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
- mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
- mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
-
- return 0;
-}
-
-static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
-{
- struct mtk_clock_event_device *evt = dev_id;
-
- /* Acknowledge timer0 irq */
- writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
- evt->dev.event_handler(&evt->dev);
-
- return IRQ_HANDLED;
-}
-
-static void
-__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
-{
- writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-
- writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
- evt->gpt_base + TIMER_CLK_REG(timer));
-
- writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
-
- writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
- evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
-{
- u32 val;
-
- /* Disable all interrupts */
- writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
-
- /* Acknowledge all spurious pending interrupts */
- writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
-
- val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
- writel(val | GPT_IRQ_ENABLE(timer),
- evt->gpt_base + GPT_IRQ_EN_REG);
-}
-
-static int __init mtk_timer_init(struct device_node *node)
-{
- struct mtk_clock_event_device *evt;
- struct resource res;
- unsigned long rate = 0;
- struct clk *clk;
-
- evt = kzalloc(sizeof(*evt), GFP_KERNEL);
- if (!evt)
- return -ENOMEM;
-
- evt->dev.name = "mtk_tick";
- evt->dev.rating = 300;
- evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- evt->dev.set_state_shutdown = mtk_clkevt_shutdown;
- evt->dev.set_state_periodic = mtk_clkevt_set_periodic;
- evt->dev.set_state_oneshot = mtk_clkevt_shutdown;
- evt->dev.tick_resume = mtk_clkevt_shutdown;
- evt->dev.set_next_event = mtk_clkevt_next_event;
- evt->dev.cpumask = cpu_possible_mask;
-
- evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
- if (IS_ERR(evt->gpt_base)) {
- pr_err("Can't get resource\n");
- goto err_kzalloc;
- }
-
- evt->dev.irq = irq_of_parse_and_map(node, 0);
- if (evt->dev.irq <= 0) {
- pr_err("Can't parse IRQ\n");
- goto err_mem;
- }
-
- clk = of_clk_get(node, 0);
- if (IS_ERR(clk)) {
- pr_err("Can't get timer clock\n");
- goto err_irq;
- }
-
- if (clk_prepare_enable(clk)) {
- pr_err("Can't prepare clock\n");
- goto err_clk_put;
- }
- rate = clk_get_rate(clk);
-
- if (request_irq(evt->dev.irq, mtk_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
- pr_err("failed to setup irq %d\n", evt->dev.irq);
- goto err_clk_disable;
- }
-
- evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
-
- /* Configure clock source */
- mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
- clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
- node->name, rate, 300, 32, clocksource_mmio_readl_up);
- gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC);
- sched_clock_register(mtk_read_sched_clock, 32, rate);
-
- /* Configure clock event */
- mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
- clockevents_config_and_register(&evt->dev, rate, 0x3,
- 0xffffffff);
-
- mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
- return 0;
-
-err_clk_disable:
- clk_disable_unprepare(clk);
-err_clk_put:
- clk_put(clk);
-err_irq:
- irq_dispose_mapping(evt->dev.irq);
-err_mem:
- iounmap(evt->gpt_base);
- of_address_to_resource(node, 0, &res);
- release_mem_region(res.start, resource_size(&res));
-err_kzalloc:
- kfree(evt);
-
- return -EINVAL;
-}
-TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index c337a8100a7b..aa624885e0e2 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -230,7 +230,7 @@ static int __init tegra20_init_timer(struct device_node *np)
return ret;
}
- tegra_clockevent.cpumask = cpu_all_mask;
+ tegra_clockevent.cpumask = cpu_possible_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_config_and_register(&tegra_clockevent, 1000000,
0x1, 0x1fffffff);
@@ -259,6 +259,6 @@ static int __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- return register_persistent_clock(NULL, tegra_read_persistent_clock64);
+ return register_persistent_clock(tegra_read_persistent_clock64);
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/timer-atcpit100.c b/drivers/clocksource/timer-atcpit100.c
index 5e23d7b4a722..b4bd2f5b801d 100644
--- a/drivers/clocksource/timer-atcpit100.c
+++ b/drivers/clocksource/timer-atcpit100.c
@@ -185,7 +185,7 @@ static struct timer_of to = {
.set_state_oneshot = atcpit100_clkevt_set_oneshot,
.tick_resume = atcpit100_clkevt_shutdown,
.set_next_event = atcpit100_clkevt_next_event,
- .cpumask = cpu_all_mask,
+ .cpumask = cpu_possible_mask,
},
.of_irq = {
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 0eee03250cfc..f5b2eda30bf3 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -211,7 +211,7 @@ static int __init keystone_timer_init(struct device_node *np)
event_dev->set_state_shutdown = keystone_shutdown;
event_dev->set_state_periodic = keystone_set_periodic;
event_dev->set_state_oneshot = keystone_shutdown;
- event_dev->cpumask = cpu_all_mask;
+ event_dev->cpumask = cpu_possible_mask;
event_dev->owner = THIS_MODULE;
event_dev->name = TIMER_NAME;
event_dev->irq = irq;
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
new file mode 100644
index 000000000000..eb10321f8517
--- /dev/null
+++ b/drivers/clocksource/timer-mediatek.c
@@ -0,0 +1,328 @@
+/*
+ * Mediatek SoCs General-Purpose Timer handling.
+ *
+ * Copyright (C) 2014 Matthias Brugger
+ *
+ * Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include "timer-of.h"
+
+#define TIMER_CLK_EVT (1)
+#define TIMER_CLK_SRC (2)
+
+#define TIMER_SYNC_TICKS (3)
+
+/* gpt */
+#define GPT_IRQ_EN_REG 0x00
+#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
+#define GPT_IRQ_ACK_REG 0x08
+#define GPT_IRQ_ACK(val) BIT((val) - 1)
+
+#define GPT_CTRL_REG(val) (0x10 * (val))
+#define GPT_CTRL_OP(val) (((val) & 0x3) << 4)
+#define GPT_CTRL_OP_ONESHOT (0)
+#define GPT_CTRL_OP_REPEAT (1)
+#define GPT_CTRL_OP_FREERUN (3)
+#define GPT_CTRL_CLEAR (2)
+#define GPT_CTRL_ENABLE (1)
+#define GPT_CTRL_DISABLE (0)
+
+#define GPT_CLK_REG(val) (0x04 + (0x10 * (val)))
+#define GPT_CLK_SRC(val) (((val) & 0x1) << 4)
+#define GPT_CLK_SRC_SYS13M (0)
+#define GPT_CLK_SRC_RTC32K (1)
+#define GPT_CLK_DIV1 (0x0)
+#define GPT_CLK_DIV2 (0x1)
+
+#define GPT_CNT_REG(val) (0x08 + (0x10 * (val)))
+#define GPT_CMP_REG(val) (0x0C + (0x10 * (val)))
+
+/* system timer */
+#define SYST_BASE (0x40)
+
+#define SYST_CON (SYST_BASE + 0x0)
+#define SYST_VAL (SYST_BASE + 0x4)
+
+#define SYST_CON_REG(to) (timer_of_base(to) + SYST_CON)
+#define SYST_VAL_REG(to) (timer_of_base(to) + SYST_VAL)
+
+/*
+ * SYST_CON_EN: Clock enable. Shall be set to
+ * - Start timer countdown.
+ * - Allow timeout ticks being updated.
+ * - Allow changing interrupt functions.
+ *
+ * SYST_CON_IRQ_EN: Set to allow interrupt.
+ *
+ * SYST_CON_IRQ_CLR: Set to clear interrupt.
+ */
+#define SYST_CON_EN BIT(0)
+#define SYST_CON_IRQ_EN BIT(1)
+#define SYST_CON_IRQ_CLR BIT(4)
+
+static void __iomem *gpt_sched_reg __read_mostly;
+
+static void mtk_syst_ack_irq(struct timer_of *to)
+{
+ /* Clear and disable interrupt */
+ writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
+}
+
+static irqreturn_t mtk_syst_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ mtk_syst_ack_irq(to);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_syst_clkevt_next_event(unsigned long ticks,
+ struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ /* Enable clock to allow timeout tick update later */
+ writel(SYST_CON_EN, SYST_CON_REG(to));
+
+ /*
+ * Write new timeout ticks. Timer shall start countdown
+ * after timeout ticks are updated.
+ */
+ writel(ticks, SYST_VAL_REG(to));
+
+ /* Enable interrupt */
+ writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to));
+
+ return 0;
+}
+
+static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
+{
+ /* Disable timer */
+ writel(0, SYST_CON_REG(to_timer_of(clkevt)));
+
+ return 0;
+}
+
+static int mtk_syst_clkevt_resume(struct clock_event_device *clkevt)
+{
+ return mtk_syst_clkevt_shutdown(clkevt);
+}
+
+static int mtk_syst_clkevt_oneshot(struct clock_event_device *clkevt)
+{
+ return 0;
+}
+
+static u64 notrace mtk_gpt_read_sched_clock(void)
+{
+ return readl_relaxed(gpt_sched_reg);
+}
+
+static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer)
+{
+ u32 val;
+
+ val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+ writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) +
+ GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_setup(struct timer_of *to,
+ unsigned long delay, u8 timer)
+{
+ writel(delay, timer_of_base(to) + GPT_CMP_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_start(struct timer_of *to,
+ bool periodic, u8 timer)
+{
+ u32 val;
+
+ /* Acknowledge interrupt */
+ writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+ val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+
+ /* Clear 2 bit timer operation mode field */
+ val &= ~GPT_CTRL_OP(0x3);
+
+ if (periodic)
+ val |= GPT_CTRL_OP(GPT_CTRL_OP_REPEAT);
+ else
+ val |= GPT_CTRL_OP(GPT_CTRL_OP_ONESHOT);
+
+ writel(val | GPT_CTRL_ENABLE | GPT_CTRL_CLEAR,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static int mtk_gpt_clkevt_shutdown(struct clock_event_device *clk)
+{
+ mtk_gpt_clkevt_time_stop(to_timer_of(clk), TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static int mtk_gpt_clkevt_set_periodic(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static int mtk_gpt_clkevt_next_event(unsigned long event,
+ struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT);
+ mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT);
+
+ return 0;
+}
+
+static irqreturn_t mtk_gpt_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ /* Acknowledge timer0 irq */
+ writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void
+__init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option)
+{
+ writel(GPT_CTRL_CLEAR | GPT_CTRL_DISABLE,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+
+ writel(GPT_CLK_SRC(GPT_CLK_SRC_SYS13M) | GPT_CLK_DIV1,
+ timer_of_base(to) + GPT_CLK_REG(timer));
+
+ writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer));
+
+ writel(GPT_CTRL_OP(option) | GPT_CTRL_ENABLE,
+ timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
+{
+ u32 val;
+
+ /* Disable all interrupts */
+ writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
+
+ /* Acknowledge all spurious pending interrupts */
+ writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+ val = readl(timer_of_base(to) + GPT_IRQ_EN_REG);
+ writel(val | GPT_IRQ_ENABLE(timer),
+ timer_of_base(to) + GPT_IRQ_EN_REG);
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = "mtk-clkevt",
+ .rating = 300,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static int __init mtk_syst_init(struct device_node *node)
+{
+ int ret;
+
+ to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT;
+ to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown;
+ to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot;
+ to.clkevt.tick_resume = mtk_syst_clkevt_resume;
+ to.clkevt.set_next_event = mtk_syst_clkevt_next_event;
+ to.of_irq.handler = mtk_syst_handler;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ goto err;
+
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ return 0;
+err:
+ timer_of_cleanup(&to);
+ return ret;
+}
+
+static int __init mtk_gpt_init(struct device_node *node)
+{
+ int ret;
+
+ to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown;
+ to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic;
+ to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
+ to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
+ to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
+ to.of_irq.handler = mtk_gpt_interrupt;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ goto err;
+
+ /* Configure clock source */
+ mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
+ clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC),
+ node->name, timer_of_rate(&to), 300, 32,
+ clocksource_mmio_readl_up);
+ gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
+ sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
+
+ /* Configure clock event */
+ mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+
+ mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
+
+ return 0;
+err:
+ timer_of_cleanup(&to);
+ return ret;
+}
+TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
+TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
index ef9ebeafb3ed..430cb99d8d79 100644
--- a/drivers/clocksource/timer-sprd.c
+++ b/drivers/clocksource/timer-sprd.c
@@ -156,4 +156,54 @@ static int __init sprd_timer_init(struct device_node *np)
return 0;
}
+static struct timer_of suspend_to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
+static u64 sprd_suspend_timer_read(struct clocksource *cs)
+{
+ return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_LO) & cs->mask;
+}
+
+static int sprd_suspend_timer_enable(struct clocksource *cs)
+{
+ sprd_timer_update_counter(timer_of_base(&suspend_to),
+ TIMER_VALUE_LO_MASK);
+ sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
+
+ return 0;
+}
+
+static void sprd_suspend_timer_disable(struct clocksource *cs)
+{
+ sprd_timer_disable(timer_of_base(&suspend_to));
+}
+
+static struct clocksource suspend_clocksource = {
+ .name = "sprd_suspend_timer",
+ .rating = 200,
+ .read = sprd_suspend_timer_read,
+ .enable = sprd_suspend_timer_enable,
+ .disable = sprd_suspend_timer_disable,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
+
+static int __init sprd_suspend_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &suspend_to);
+ if (ret)
+ return ret;
+
+ clocksource_register_hz(&suspend_clocksource,
+ timer_of_rate(&suspend_to));
+
+ return 0;
+}
+
TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
+TIMER_OF_DECLARE(sc9860_persistent_timer, "sprd,sc9860-suspend-timer",
+ sprd_suspend_timer_init);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 880a861ab3c8..29e2e1a78a43 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -78,8 +78,7 @@ static struct ti_32k ti_32k_timer = {
.rating = 250,
.read = ti_32k_read_cycles,
.mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS |
- CLOCK_SOURCE_SUSPEND_NONSTOP,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index a6a0338eea77..f74689334f7c 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -162,7 +162,7 @@ static int __init zevio_timer_add(struct device_node *node)
timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot;
timer->clkevt.tick_resume = zevio_timer_set_oneshot;
timer->clkevt.rating = 200;
- timer->clkevt.cpumask = cpu_all_mask;
+ timer->clkevt.cpumask = cpu_possible_mask;
timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT;
timer->clkevt.irq = irqnr;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index e718b8c69a56..eeb7d31cbda5 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -239,7 +240,7 @@ void cn_del_callback(struct cb_id *id)
}
EXPORT_SYMBOL_GPL(cn_del_callback);
-static int cn_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
{
struct cn_queue_dev *dev = cdev.cbdev;
struct cn_callback_entry *cbq;
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 739da90ff3f6..75491fc841a6 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -51,6 +51,16 @@
#define ARMADA_37XX_DVFS_LOAD_2 2
#define ARMADA_37XX_DVFS_LOAD_3 3
+/* AVS register set */
+#define ARMADA_37XX_AVS_CTL0 0x0
+#define ARMADA_37XX_AVS_ENABLE BIT(30)
+#define ARMADA_37XX_AVS_HIGH_VDD_LIMIT 16
+#define ARMADA_37XX_AVS_LOW_VDD_LIMIT 22
+#define ARMADA_37XX_AVS_VDD_MASK 0x3F
+#define ARMADA_37XX_AVS_CTL2 0x8
+#define ARMADA_37XX_AVS_LOW_VDD_EN BIT(6)
+#define ARMADA_37XX_AVS_VSET(x) (0x1C + 4 * (x))
+
/*
* On Armada 37xx the Power management manages 4 level of CPU load,
* each level can be associated with a CPU clock source, a CPU
@@ -58,6 +68,17 @@
*/
#define LOAD_LEVEL_NR 4
+#define MIN_VOLT_MV 1000
+
+/* AVS value for the corresponding voltage (in mV) */
+static int avs_map[] = {
+ 747, 758, 770, 782, 793, 805, 817, 828, 840, 852, 863, 875, 887, 898,
+ 910, 922, 933, 945, 957, 968, 980, 992, 1003, 1015, 1027, 1038, 1050,
+ 1062, 1073, 1085, 1097, 1108, 1120, 1132, 1143, 1155, 1167, 1178, 1190,
+ 1202, 1213, 1225, 1237, 1248, 1260, 1272, 1283, 1295, 1307, 1318, 1330,
+ 1342
+};
+
struct armada37xx_cpufreq_state {
struct regmap *regmap;
u32 nb_l0l1;
@@ -71,6 +92,7 @@ static struct armada37xx_cpufreq_state *armada37xx_cpufreq_state;
struct armada_37xx_dvfs {
u32 cpu_freq_max;
u8 divider[LOAD_LEVEL_NR];
+ u32 avs[LOAD_LEVEL_NR];
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
@@ -148,6 +170,128 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
clk_set_parent(clk, parent);
}
+/*
+ * Find out the armada 37x supported AVS value whose voltage value is
+ * the round-up closest to the target voltage value.
+ */
+static u32 armada_37xx_avs_val_match(int target_vm)
+{
+ u32 avs;
+
+ /* Find out the round-up closest supported voltage value */
+ for (avs = 0; avs < ARRAY_SIZE(avs_map); avs++)
+ if (avs_map[avs] >= target_vm)
+ break;
+
+ /*
+ * If all supported voltages are smaller than target one,
+ * choose the largest supported voltage
+ */
+ if (avs == ARRAY_SIZE(avs_map))
+ avs = ARRAY_SIZE(avs_map) - 1;
+
+ return avs;
+}
+
+/*
+ * For Armada 37xx soc, L0(VSET0) VDD AVS value is set to SVC revision
+ * value or a default value when SVC is not supported.
+ * - L0 can be read out from the register of AVS_CTRL_0 and L0 voltage
+ * can be got from the mapping table of avs_map.
+ * - L1 voltage should be about 100mv smaller than L0 voltage
+ * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
+ * This function calculates L1 & L2 & L3 AVS values dynamically based
+ * on L0 voltage and fill all AVS values to the AVS value table.
+ */
+static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ struct armada_37xx_dvfs *dvfs)
+{
+ unsigned int target_vm;
+ int load_level = 0;
+ u32 l0_vdd_min;
+
+ if (base == NULL)
+ return;
+
+ /* Get L0 VDD min value */
+ regmap_read(base, ARMADA_37XX_AVS_CTL0, &l0_vdd_min);
+ l0_vdd_min = (l0_vdd_min >> ARMADA_37XX_AVS_LOW_VDD_LIMIT) &
+ ARMADA_37XX_AVS_VDD_MASK;
+ if (l0_vdd_min >= ARRAY_SIZE(avs_map)) {
+ pr_err("L0 VDD MIN %d is not correct.\n", l0_vdd_min);
+ return;
+ }
+ dvfs->avs[0] = l0_vdd_min;
+
+ if (avs_map[l0_vdd_min] <= MIN_VOLT_MV) {
+ /*
+ * If L0 voltage is smaller than 1000mv, then all VDD sets
+ * use L0 voltage;
+ */
+ u32 avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV);
+
+ for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
+ dvfs->avs[load_level] = avs_min;
+
+ return;
+ }
+
+ /*
+ * L1 voltage is equal to L0 voltage - 100mv and it must be
+ * larger than 1000mv
+ */
+
+ target_vm = avs_map[l0_vdd_min] - 100;
+ target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
+
+ /*
+ * L2 & L3 voltage is equal to L0 voltage - 150mv and it must
+ * be larger than 1000mv
+ */
+ target_vm = avs_map[l0_vdd_min] - 150;
+ target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
+}
+
+static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
+ struct armada_37xx_dvfs *dvfs)
+{
+ unsigned int avs_val = 0, freq;
+ int load_level = 0;
+
+ if (base == NULL)
+ return;
+
+ /* Disable AVS before the configuration */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
+ ARMADA_37XX_AVS_ENABLE, 0);
+
+
+ /* Enable low voltage mode */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL2,
+ ARMADA_37XX_AVS_LOW_VDD_EN,
+ ARMADA_37XX_AVS_LOW_VDD_EN);
+
+
+ for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++) {
+ freq = dvfs->cpu_freq_max / dvfs->divider[load_level];
+
+ avs_val = dvfs->avs[load_level];
+ regmap_update_bits(base, ARMADA_37XX_AVS_VSET(load_level-1),
+ ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
+ ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_LOW_VDD_LIMIT,
+ avs_val << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
+ avs_val << ARMADA_37XX_AVS_LOW_VDD_LIMIT);
+ }
+
+ /* Enable AVS after the configuration */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
+ ARMADA_37XX_AVS_ENABLE,
+ ARMADA_37XX_AVS_ENABLE);
+
+}
+
static void armada37xx_cpufreq_disable_dvfs(struct regmap *base)
{
unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
@@ -216,7 +360,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
struct platform_device *pdev;
unsigned long freq;
unsigned int cur_frequency;
- struct regmap *nb_pm_base;
+ struct regmap *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
struct clk *clk;
@@ -227,6 +371,14 @@ static int __init armada37xx_cpufreq_driver_init(void)
if (IS_ERR(nb_pm_base))
return -ENODEV;
+ avs_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-avs");
+
+ /* if AVS is not present don't use it but still try to setup dvfs */
+ if (IS_ERR(avs_base)) {
+ pr_info("Syscon failed for Adapting Voltage Scaling: skip it\n");
+ avs_base = NULL;
+ }
/* Before doing any configuration on the DVFS first, disable it */
armada37xx_cpufreq_disable_dvfs(nb_pm_base);
@@ -270,16 +422,21 @@ static int __init armada37xx_cpufreq_driver_init(void)
armada37xx_cpufreq_state->regmap = nb_pm_base;
+ armada37xx_cpufreq_avs_configure(avs_base, dvfs);
+ armada37xx_cpufreq_avs_setup(avs_base, dvfs);
+
armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
+ unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
freq = cur_frequency / dvfs->divider[load_lvl];
-
- ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
if (ret)
goto remove_opp;
+
+
}
/* Now that everything is setup, enable the DVFS at hardware level */
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index a9d3eec32795..30f302149730 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -296,10 +296,62 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
+static inline u64 get_delta(u64 t1, u64 t0)
+{
+ if (t1 > t0 || t0 > ~(u32)0)
+ return t1 - t0;
+
+ return (u32)t1 - (u32)t0;
+}
+
+static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
+ struct cppc_perf_fb_ctrs fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs fb_ctrs_t1)
+{
+ u64 delta_reference, delta_delivered;
+ u64 reference_perf, delivered_perf;
+
+ reference_perf = fb_ctrs_t0.reference_perf;
+
+ delta_reference = get_delta(fb_ctrs_t1.reference,
+ fb_ctrs_t0.reference);
+ delta_delivered = get_delta(fb_ctrs_t1.delivered,
+ fb_ctrs_t0.delivered);
+
+ /* Check to avoid divide-by zero */
+ if (delta_reference || delta_delivered)
+ delivered_perf = (reference_perf * delta_delivered) /
+ delta_reference;
+ else
+ delivered_perf = cpu->perf_ctrls.desired_perf;
+
+ return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
+}
+
+static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
+{
+ struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
+ struct cppc_cpudata *cpu = all_cpu_data[cpunum];
+ int ret;
+
+ ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
+ if (ret)
+ return ret;
+
+ udelay(2); /* 2usec delay between sampling */
+
+ ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1);
+ if (ret)
+ return ret;
+
+ return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
+}
+
static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
+ .get = cppc_cpufreq_get_rate,
.init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu,
.name = "cppc_cpufreq",
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b0dfd3222013..f53fb41efb7b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -923,7 +923,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- cpus_read_lock();
+ /*
+ * cpus_read_trylock() is used here to work around a circular lock
+ * dependency problem with respect to the cpufreq_register_driver().
+ */
+ if (!cpus_read_trylock())
+ return -EBUSY;
if (cpu_online(policy->cpu)) {
down_write(&policy->rwsem);
@@ -2236,6 +2241,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min = new_policy->min;
policy->max = new_policy->max;
+ trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 8b3c2a79ad6c..b2ff423ad7f8 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_cooling.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -50,6 +51,7 @@ static struct clk_bulk_data clks[] = {
};
static struct device *cpu_dev;
+static struct thermal_cooling_device *cdev;
static bool free_opp;
static struct cpufreq_frequency_table *freq_table;
static unsigned int max_freq;
@@ -191,6 +193,16 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
return 0;
}
+static void imx6q_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ cdev = of_cpufreq_cooling_register(policy);
+
+ if (!cdev)
+ dev_err(cpu_dev,
+ "running cpufreq without cooling device: %ld\n",
+ PTR_ERR(cdev));
+}
+
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
@@ -202,13 +214,22 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
+static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_cooling_unregister(cdev);
+
+ return 0;
+}
+
static struct cpufreq_driver imx6q_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
+ .exit = imx6q_cpufreq_exit,
.name = "imx6q-cpufreq",
+ .ready = imx6q_cpufreq_ready,
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index d4ed0022b0dd..b6a1aadaff9f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -670,21 +670,18 @@ static ssize_t store_energy_performance_preference(
{
struct cpudata *cpu_data = all_cpu_data[policy->cpu];
char str_preference[21];
- int ret, i = 0;
+ int ret;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
return -EINVAL;
- while (energy_perf_strings[i] != NULL) {
- if (!strcmp(str_preference, energy_perf_strings[i])) {
- intel_pstate_set_energy_pref_index(cpu_data, i);
- return count;
- }
- ++i;
- }
+ ret = match_string(energy_perf_strings, -1, str_preference);
+ if (ret < 0)
+ return ret;
- return -EINVAL;
+ intel_pstate_set_energy_pref_index(cpu_data, ret);
+ return count;
}
static ssize_t show_energy_performance_preference(
@@ -2011,7 +2008,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
struct cpudata *cpu)
{
- if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
+ if (!hwp_active &&
+ cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_freq) {
pr_debug("policy->max > max non turbo frequency\n");
@@ -2085,6 +2083,15 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+ if (hwp_active) {
+ unsigned int max_freq;
+
+ max_freq = global.turbo_disabled ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+ if (max_freq < policy->cpuinfo.max_freq)
+ policy->cpuinfo.max_freq = max_freq;
+ }
+
intel_pstate_init_acpi_perf_limits(policy);
policy->fast_switch_possible = true;
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 0c56c9759672..099a849396f6 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -593,6 +593,15 @@ static int __init pcc_cpufreq_init(void)
return ret;
}
+ if (num_present_cpus() > 4) {
+ pcc_cpufreq_driver.flags |= CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
+ pr_err("%s: Too many CPUs, dynamic performance scaling disabled\n",
+ __func__);
+ pr_err("%s: Try to enable another scaling driver through BIOS settings\n",
+ __func__);
+ pr_err("%s: and complain to the system vendor\n", __func__);
+ }
+
ret = cpufreq_register_driver(&pcc_cpufreq_driver);
return ret;
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index efc9a7ae4857..a1830fa25fc5 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -109,8 +109,9 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
of_node_put(np);
if (IS_ERR(speedbin_nvmem)) {
- dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
- PTR_ERR(speedbin_nvmem));
+ if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
+ dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
+ PTR_ERR(speedbin_nvmem));
return PTR_ERR(speedbin_nvmem);
}
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index e07bc7ace774..073557f433eb 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -105,7 +105,8 @@ static int __init arm_idle_init_cpu(int cpu)
ret = cpuidle_register_driver(drv);
if (ret) {
- pr_err("Failed to register cpuidle driver\n");
+ if (ret != -EBUSY)
+ pr_err("Failed to register cpuidle driver\n");
goto out_kfree_drv;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43cccf6aff61..a8c4ce07fc9d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -585,6 +585,17 @@ config CRYPTO_DEV_QCE
hardware. To compile this driver as a module, choose M here. The
module will be called qcrypto.
+config CRYPTO_DEV_QCOM_RNG
+ tristate "Qualcomm Random Number Generator Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select CRYPTO_RNG
+ help
+ This driver provides support for the Random Number
+ Generator hardware found on Qualcomm SoCs.
+
+ To compile this driver as a module, choose M here. The
+ module will be called qcom-rng. If unsure, say N.
+
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
depends on PPC64 && VSX
@@ -689,8 +700,10 @@ config CRYPTO_DEV_SAFEXCEL
select CRYPTO_AES
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
select CRYPTO_HASH
select CRYPTO_HMAC
+ select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@@ -746,4 +759,6 @@ config CRYPTO_DEV_CCREE
cryptographic operations on the system REE.
If unsure say Y.
+source "drivers/crypto/hisilicon/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 7ae87b4f6c8d..c23396f32c8a 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
@@ -45,3 +46,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
+obj-y += hisilicon/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 05981ccd9901..6eaec9ba0f68 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1132,8 +1132,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1153,8 +1152,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cfb(aes)",
.cra_driver_name = "cfb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1174,8 +1172,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK |
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1196,8 +1193,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1217,8 +1213,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1237,8 +1232,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ofb(aes)",
.cra_driver_name = "ofb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index e66f18a0ddd0..74f083f45e97 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -186,7 +186,10 @@ static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
* always be the same. Use a macro for the key size to avoid unnecessary
* computations.
*/
- copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+ copied = sg_copy_to_buffer(pubkey,
+ sg_nents_for_len(pubkey,
+ ATMEL_ECC_PUBKEY_SIZE),
+ cmd->data, ATMEL_ECC_PUBKEY_SIZE);
if (copied != ATMEL_ECC_PUBKEY_SIZE)
return -EINVAL;
@@ -268,15 +271,17 @@ static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
struct kpp_request *req = areq;
struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_ecc_cmd *cmd = &work_data->cmd;
- size_t copied;
- size_t n_sz = ctx->n_sz;
+ size_t copied, n_sz;
if (status)
goto free_work_data;
+ /* might want less than we've got */
+ n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+
/* copy the shared secret */
- copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX],
- n_sz);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
+ &cmd->data[RSP_DATA_IDX], n_sz);
if (copied != n_sz)
status = -EINVAL;
@@ -440,7 +445,7 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
- size_t copied;
+ size_t copied, nbytes;
int ret = 0;
if (ctx->do_fallback) {
@@ -448,10 +453,14 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
return crypto_kpp_generate_public_key(req);
}
+ /* might want less than we've got */
+ nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
+
/* public key was saved at private key generation */
- copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key,
- ATMEL_ECC_PUBKEY_SIZE);
- if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, nbytes),
+ ctx->public_key, nbytes);
+ if (copied != nbytes)
ret = -EINVAL;
return ret;
@@ -470,6 +479,10 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
return crypto_kpp_compute_shared_secret(req);
}
+ /* must have exactly two points to be on the curve */
+ if (req->src_len != ATMEL_ECC_PUBKEY_SIZE)
+ return -EINVAL;
+
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
GFP_ATOMIC;
@@ -554,10 +567,6 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
}
crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
-
- dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback)));
-
ctx->fallback = fallback;
return 0;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 4d43081120db..8a19df2fba6a 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2316,9 +2316,7 @@ struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
goto error;
}
- tfm = crypto_alloc_ahash(name,
- CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(name, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
goto error;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 0fb8bbf41a8d..7f07a5085e9b 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2704,7 +2704,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha1",
.cra_driver_name = "artpec-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2727,7 +2727,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha256",
.cra_driver_name = "artpec-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2751,7 +2751,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "artpec-hmac-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2777,7 +2777,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha384",
.cra_driver_name = "artpec-sha384",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2801,7 +2801,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "artpec-hmac-sha384",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2824,7 +2824,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha512",
.cra_driver_name = "artpec-sha512",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2848,7 +2848,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "artpec-hmac-sha512",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2867,8 +2867,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "artpec6-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2888,8 +2887,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "artpec6-ctr-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
@@ -2911,8 +2909,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "artpec6-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2933,8 +2930,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "xts(aes)",
.cra_driver_name = "artpec6-xts-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2964,7 +2960,7 @@ static struct aead_alg aead_algos[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "artpec-gcm-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 309c67c7012f..2d1f1db9f807 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3914,8 +3914,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-iproc",
.cra_blocksize = MD5_BLOCK_WORDS * 4,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.cipher_info = {
@@ -4649,8 +4648,7 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
hash->halg.base.cra_init = ahash_cra_init;
hash->halg.base.cra_exit = generic_cra_exit;
- hash->halg.base.cra_type = &crypto_ahash_type;
- hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
+ hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
@@ -4691,7 +4689,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
INIT_LIST_HEAD(&aead->base.cra_list);
- aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
/* setkey set in alg initialization */
aead->setauthsize = aead_setauthsize;
aead->encrypt = aead_encrypt;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0beb28196e20..43975ab5f09c 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1846,8 +1846,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
- alg->cra_type = &crypto_ahash_type;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
t_alg->alg_type = template->alg_type;
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index df21d996db7e..600336d169a9 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -351,7 +351,7 @@ static int cvm_enc_dec_init(struct crypto_tfm *tfm)
return 0;
}
-struct crypto_alg algs[] = { {
+static struct crypto_alg algs[] = { {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cvm_enc_ctx),
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4fdc921ba611..ebe267379ac9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -148,7 +148,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
void *vaddr;
dma_addr_t dma;
- vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_ATOMIC | __GFP_ZERO), &dma);
+ vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
if (!vaddr)
return NULL;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 26687f318de6..3c6fe57f91f8 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -399,13 +399,12 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = AES_BLOCK_SIZE;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
- base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_aes_cmac_cra_init;
base->cra_exit = ccp_aes_cmac_cra_exit;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 871c9628a2ee..2ca64bb57d2e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -497,13 +497,12 @@ static int ccp_register_sha_alg(struct list_head *head,
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = def->block_size;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
- base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_sha_cra_init;
base->cra_exit = ccp_sha_cra_exit;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index ff478d826d7d..218739b961fe 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -62,14 +62,14 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
int reg;
/* Read the interrupt status: */
- status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
+ status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
/* Check if it is command completion: */
- if (!(status & BIT(PSP_CMD_COMPLETE_REG)))
+ if (!(status & PSP_CMD_COMPLETE))
goto done;
/* Check if it is SEV command completion: */
- reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
if (reg & PSP_CMDRESP_RESP) {
psp->sev_int_rcvd = 1;
wake_up(&psp->sev_int_queue);
@@ -77,17 +77,15 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
done:
/* Clear the interrupt status by writing the same value we read. */
- iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS);
+ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
{
- psp->sev_int_rcvd = 0;
-
wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
- *reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
}
static int sev_cmd_buffer_len(int cmd)
@@ -145,13 +143,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
sev_cmd_buffer_len(cmd), false);
- iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
- iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
+ iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
+
+ psp->sev_int_rcvd = 0;
reg = cmd;
reg <<= PSP_CMDRESP_CMD_SHIFT;
reg |= PSP_CMDRESP_IOC;
- iowrite32(reg, psp->io_regs + PSP_CMDRESP);
+ iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
/* wait for command completion */
sev_wait_cmd_ioc(psp, &reg);
@@ -789,7 +789,7 @@ static int sev_misc_init(struct psp_device *psp)
static int sev_init(struct psp_device *psp)
{
/* Check if device supports SEV feature */
- if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) {
+ if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
dev_dbg(psp->dev, "device does not support SEV\n");
return 1;
}
@@ -817,11 +817,11 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- psp->io_regs = sp->io_map + psp->vdata->offset;
+ psp->io_regs = sp->io_map;
/* Disable and clear interrupts until ready */
- iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN);
- iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS);
+ iowrite32(0, psp->io_regs + psp->vdata->inten_reg);
+ iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg);
/* Request an irq */
ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
@@ -838,7 +838,9 @@ int psp_dev_init(struct sp_device *sp)
sp->set_psp_master_device(sp);
/* Enable interrupt */
- iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
+ iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
+
+ dev_notice(dev, "psp enabled\n");
return 0;
@@ -856,6 +858,9 @@ void psp_dev_destroy(struct sp_device *sp)
{
struct psp_device *psp = sp->psp_data;
+ if (!psp)
+ return;
+
if (psp->sev_misc)
kref_put(&misc_dev->refcount, sev_exit);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index c7e9098a233c..8b53a9674ecb 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -30,24 +30,7 @@
#include "sp-dev.h"
-#define PSP_C2PMSG(_num) ((_num) << 2)
-#define PSP_CMDRESP PSP_C2PMSG(32)
-#define PSP_CMDBUFF_ADDR_LO PSP_C2PMSG(56)
-#define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57)
-#define PSP_FEATURE_REG PSP_C2PMSG(63)
-
-#define PSP_P2CMSG(_num) ((_num) << 2)
-#define PSP_CMD_COMPLETE_REG 1
-#define PSP_CMD_COMPLETE PSP_P2CMSG(PSP_CMD_COMPLETE_REG)
-
-#define PSP_P2CMSG_INTEN 0x0110
-#define PSP_P2CMSG_INTSTS 0x0114
-
-#define PSP_C2PMSG_ATTR_0 0x0118
-#define PSP_C2PMSG_ATTR_1 0x011c
-#define PSP_C2PMSG_ATTR_2 0x0120
-#define PSP_C2PMSG_ATTR_3 0x0124
-#define PSP_P2CMSG_ATTR_0 0x0128
+#define PSP_CMD_COMPLETE BIT(1)
#define PSP_CMDRESP_CMD_SHIFT 16
#define PSP_CMDRESP_IOC BIT(0)
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index acb197b66ced..14398cad1625 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -44,7 +44,12 @@ struct ccp_vdata {
};
struct psp_vdata {
- const unsigned int offset;
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int feature_reg;
+ const unsigned int inten_reg;
+ const unsigned int intsts_reg;
};
/* Structure to hold SP device data */
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f5f43c50698a..7da93e9bebed 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -269,38 +269,62 @@ static int sp_pci_resume(struct pci_dev *pdev)
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata psp_entry = {
- .offset = 0x10500,
+static const struct psp_vdata pspv1 = {
+ .cmdresp_reg = 0x10580,
+ .cmdbuff_addr_lo_reg = 0x105e0,
+ .cmdbuff_addr_hi_reg = 0x105e4,
+ .feature_reg = 0x105fc,
+ .inten_reg = 0x10610,
+ .intsts_reg = 0x10614,
+};
+
+static const struct psp_vdata pspv2 = {
+ .cmdresp_reg = 0x10980,
+ .cmdbuff_addr_lo_reg = 0x109e0,
+ .cmdbuff_addr_hi_reg = 0x109e4,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
};
#endif
static const struct sp_dev_vdata dev_vdata[] = {
- {
+ { /* 0 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv3,
#endif
},
- {
+ { /* 1 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5a,
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
- .psp_vdata = &psp_entry
+ .psp_vdata = &pspv1,
#endif
},
- {
+ { /* 2 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5b,
#endif
},
+ { /* 3 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv2,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+ { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 03f4b9fce556..01b82b82f8b8 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2344,7 +2344,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2364,7 +2363,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2384,7 +2382,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),cbc(aes))",
.driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2404,7 +2401,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2424,7 +2420,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(xcbc(aes),cbc(aes))",
.driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2444,7 +2439,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2464,7 +2458,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2484,7 +2477,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2504,7 +2496,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "ccm(aes)",
.driver_name = "ccm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_ccm_setauthsize,
@@ -2524,7 +2515,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4309(ccm(aes))",
.driver_name = "rfc4309-ccm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4309_ccm_setkey,
.setauthsize = cc_rfc4309_ccm_setauthsize,
@@ -2544,7 +2534,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "gcm(aes)",
.driver_name = "gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_gcm_setauthsize,
@@ -2564,7 +2553,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4106(gcm(aes))",
.driver_name = "rfc4106-gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4106_gcm_setkey,
.setauthsize = cc_rfc4106_gcm_setauthsize,
@@ -2584,7 +2572,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4543(gcm(aes))",
.driver_name = "rfc4543-gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4543_gcm_setkey,
.setauthsize = cc_rfc4543_gcm_setauthsize,
@@ -2621,8 +2608,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
alg->base.cra_priority = CC_CRA_PRIO;
alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- tmpl->type;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = cc_aead_init;
alg->exit = cc_aead_exit;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index b32577477b4c..dd948e1df9e5 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -454,9 +454,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
- ivsize,
- req_ctx->is_giv ? DMA_BIDIRECTIONAL :
- DMA_TO_DEVICE);
+ ivsize, DMA_TO_DEVICE);
}
/* Release pool */
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -498,9 +496,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
- ivsize,
- req_ctx->is_giv ? DMA_BIDIRECTIONAL :
- DMA_TO_DEVICE);
+ ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d2810c183b73..7623b29911af 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -19,8 +19,6 @@
#define template_skcipher template_u.skcipher
-#define CC_MIN_AES_XTS_SIZE 0x10
-#define CC_MAX_AES_XTS_SIZE 0x2000
struct cc_cipher_handle {
struct list_head alg_list;
};
@@ -98,8 +96,7 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,
case S_DIN_to_AES:
switch (ctx_p->cipher_mode) {
case DRV_CIPHER_XTS:
- if (size >= CC_MIN_AES_XTS_SIZE &&
- size <= CC_MAX_AES_XTS_SIZE &&
+ if (size >= AES_BLOCK_SIZE &&
IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
@@ -593,34 +590,82 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
}
}
+/*
+ * Update a CTR-AES 128 bit counter
+ */
+static void cc_update_ctr(u8 *ctr, unsigned int increment)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ IS_ALIGNED((unsigned long)ctr, 8)) {
+
+ __be64 *high_be = (__be64 *)ctr;
+ __be64 *low_be = high_be + 1;
+ u64 orig_low = __be64_to_cpu(*low_be);
+ u64 new_low = orig_low + (u64)increment;
+
+ *low_be = __cpu_to_be64(new_low);
+
+ if (new_low < orig_low)
+ *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
+ } else {
+ u8 *pos = (ctr + AES_BLOCK_SIZE);
+ u8 val;
+ unsigned int size;
+
+ for (; increment; increment--)
+ for (size = AES_BLOCK_SIZE; size; size--) {
+ val = *--pos + 1;
+ *pos = val;
+ if (val)
+ break;
+ }
+ }
+}
+
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ unsigned int len;
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
- kzfree(req_ctx->iv);
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_CBC:
+ /*
+ * The crypto API expects us to set the req->iv to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->iv, req_ctx->backup_info, ivsize);
+ kzfree(req_ctx->backup_info);
+ } else if (!err) {
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req->iv, req->dst, len,
+ ivsize, 0);
+ }
+ break;
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- scatterwalk_map_and_copy(req->iv, req->dst,
- (req->cryptlen - ivsize),
- ivsize, 0);
+ case DRV_CIPHER_CTR:
+ /* Compute the counter of the last block */
+ len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
+ cc_update_ctr((u8 *)req->iv, len);
+ break;
+
+ default:
+ break;
}
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ kzfree(req_ctx->iv);
+
skcipher_request_complete(req, err);
}
@@ -639,7 +684,7 @@ static int cc_cipher_process(struct skcipher_request *req,
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
struct cc_crypto_req cc_req = {};
- int rc, cts_restore_flag = 0;
+ int rc;
unsigned int seq_len = 0;
gfp_t flags = cc_gfp_flags(&req->base);
@@ -671,23 +716,10 @@ static int cc_cipher_process(struct skcipher_request *req,
goto exit_process;
}
- /*For CTS in case of data size aligned to 16 use CBC mode*/
- if (((nbytes % AES_BLOCK_SIZE) == 0) &&
- ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
- ctx_p->cipher_mode = DRV_CIPHER_CBC;
- cts_restore_flag = 1;
- }
-
/* Setup request structure */
cc_req.user_cb = (void *)cc_cipher_complete;
cc_req.user_arg = (void *)req;
-#ifdef ENABLE_CYCLE_COUNT
- cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
- STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
-
-#endif
-
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
@@ -708,14 +740,6 @@ static int cc_cipher_process(struct skcipher_request *req,
cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
&seq_len);
- /* do we need to generate IV? */
- if (req_ctx->is_giv) {
- cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
- cc_req.ivgen_dma_addr_len = 1;
- /* set the IV size (8/16 B long)*/
- cc_req.ivgen_size = ivsize;
- }
-
/* STAT_PHASE_3: Lock HW and push sequence */
rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
@@ -728,9 +752,6 @@ static int cc_cipher_process(struct skcipher_request *req,
}
exit_process:
- if (cts_restore_flag)
- ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
-
if (rc != -EINPROGRESS && rc != -EBUSY) {
kzfree(req_ctx->backup_info);
kzfree(req_ctx->iv);
@@ -743,8 +764,7 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
{
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- req_ctx->is_giv = false;
- req_ctx->backup_info = NULL;
+ memset(req_ctx, 0, sizeof(*req_ctx));
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
}
@@ -752,21 +772,28 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
static int cc_cipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
gfp_t flags = cc_gfp_flags(&req->base);
+ unsigned int len;
- /*
- * Allocate and save the last IV sized bytes of the source, which will
- * be lost in case of in-place decryption and might be needed for CTS.
- */
- req_ctx->backup_info = kmalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
+
+ /* Allocate and save the last IV sized bytes of the source,
+ * which will be lost in case of in-place decryption.
+ */
+ req_ctx->backup_info = kzalloc(ivsize, flags);
+ if (!req_ctx->backup_info)
+ return -ENOMEM;
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
- (req->cryptlen - ivsize), ivsize, 0);
- req_ctx->is_giv = false;
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
+ ivsize, 0);
+ }
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
@@ -927,7 +954,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(paes)",
.driver_name = "ecb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -944,7 +970,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(paes)",
.driver_name = "cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -961,7 +986,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ofb(paes)",
.driver_name = "ofb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -975,10 +999,9 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_712,
},
{
- .name = "cts1(cbc(paes))",
- .driver_name = "cts1-cbc-paes-ccree",
+ .name = "cts(cbc(paes))",
+ .driver_name = "cts-cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -995,7 +1018,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ctr(paes)",
.driver_name = "ctr-paes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -1162,7 +1184,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1179,7 +1200,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(aes)",
.driver_name = "cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1196,7 +1216,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1210,10 +1229,9 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_630,
},
{
- .name = "cts1(cbc(aes))",
- .driver_name = "cts1-cbc-aes-ccree",
+ .name = "cts(cbc(aes))",
+ .driver_name = "cts-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1230,7 +1248,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ctr(aes)",
.driver_name = "ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1247,7 +1264,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(des3_ede)",
.driver_name = "cbc-3des-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1264,7 +1280,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(des3_ede)",
.driver_name = "ecb-3des-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1281,7 +1296,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(des)",
.driver_name = "cbc-des-ccree",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1298,7 +1312,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(des)",
.driver_name = "ecb-des-ccree",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1338,8 +1351,7 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
alg->base.cra_init = cc_cipher_init;
alg->base.cra_exit = cc_cipher_exit;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_TYPE_SKCIPHER;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
t_alg->cipher_mode = tmpl->cipher_mode;
t_alg->flow_mode = tmpl->flow_mode;
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 68444cfa936b..4dbc0a1e6d5c 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -22,7 +22,6 @@ struct cipher_req_ctx {
u32 out_mlli_nents;
u8 *backup_info; /*store iv for generated IV flow*/
u8 *iv;
- bool is_giv;
struct mlli_params mlli_params;
};
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index bd974fef05e4..1ff229c2aeab 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -131,8 +131,8 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
if (irr) {
- dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
- irr);
+ dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
/* Just warning */
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 95f82b2d1e70..d608a4faf662 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -148,7 +148,6 @@ struct cc_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
- u32 type;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 96ff777474d7..b9313306c36f 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -602,66 +602,7 @@ static int cc_hash_update(struct ahash_request *req)
return rc;
}
-static int cc_hash_finup(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
- struct scatterlist *src = req->src;
- unsigned int nbytes = req->nbytes;
- u8 *result = req->result;
- struct device *dev = drvdata_to_dev(ctx->drvdata);
- bool is_hmac = ctx->is_hmac;
- struct cc_crypto_req cc_req = {};
- struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- unsigned int idx = 0;
- int rc;
- gfp_t flags = cc_gfp_flags(&req->base);
-
- dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
- nbytes);
-
- if (cc_map_req(dev, state, ctx)) {
- dev_err(dev, "map_ahash_source() failed\n");
- return -EINVAL;
- }
-
- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
- flags)) {
- dev_err(dev, "map_ahash_request_final() failed\n");
- cc_unmap_req(dev, state, ctx);
- return -ENOMEM;
- }
- if (cc_map_result(dev, state, digestsize)) {
- dev_err(dev, "map_ahash_digest() failed\n");
- cc_unmap_hash_request(dev, state, src, true);
- cc_unmap_req(dev, state, ctx);
- return -ENOMEM;
- }
-
- /* Setup request structure */
- cc_req.user_cb = cc_hash_complete;
- cc_req.user_arg = req;
-
- idx = cc_restore_hash(desc, ctx, state, idx);
-
- if (is_hmac)
- idx = cc_fin_hmac(desc, req, idx);
-
- idx = cc_fin_result(desc, req, idx);
-
- rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS && rc != -EBUSY) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- cc_unmap_hash_request(dev, state, src, true);
- cc_unmap_result(dev, state, digestsize, result);
- cc_unmap_req(dev, state, ctx);
- }
- return rc;
-}
-
-static int cc_hash_final(struct ahash_request *req)
+static int cc_do_finup(struct ahash_request *req, bool update)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -678,21 +619,20 @@ static int cc_hash_final(struct ahash_request *req)
int rc;
gfp_t flags = cc_gfp_flags(&req->base);
- dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
- nbytes);
+ dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
+ update ? "finup" : "final", nbytes);
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -EINVAL;
}
- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
-
if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
cc_unmap_hash_request(dev, state, src, true);
@@ -706,7 +646,7 @@ static int cc_hash_final(struct ahash_request *req)
idx = cc_restore_hash(desc, ctx, state, idx);
- /* "DO-PAD" must be enabled only when writing current length to HW */
+ /* Pad the hash */
hw_desc_init(&desc[idx]);
set_cipher_do(&desc[idx], DO_PAD);
set_cipher_mode(&desc[idx], ctx->hw_mode);
@@ -731,6 +671,17 @@ static int cc_hash_final(struct ahash_request *req)
return rc;
}
+static int cc_hash_finup(struct ahash_request *req)
+{
+ return cc_do_finup(req, true);
+}
+
+
+static int cc_hash_final(struct ahash_request *req)
+{
+ return cc_do_finup(req, false);
+}
+
static int cc_hash_init(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
@@ -1813,9 +1764,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
alg->cra_exit = cc_cra_exit;
alg->cra_init = cc_cra_init;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
- alg->cra_type = &crypto_ahash_type;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
t_crypto_alg->hash_mode = template->hash_mode;
t_crypto_alg->hw_mode = template->hw_mode;
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index b916c4eb608c..5c539af8ed60 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -4203,7 +4203,6 @@ static int chcr_unregister_alg(void)
#define SZ_AHASH_CTX sizeof(struct chcr_context)
#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
-#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
/*
* chcr_register_alg - Register crypto algorithms with kernel framework.
@@ -4237,8 +4236,7 @@ static int chcr_register_alg(void)
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_flags =
- CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK;
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
driver_algs[i].alg.aead.init = chcr_aead_cra_init;
@@ -4258,10 +4256,9 @@ static int chcr_register_alg(void)
a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
a_hash->halg.base.cra_module = THIS_MODULE;
- a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
+ a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
a_hash->halg.base.cra_alignmask = 0;
a_hash->halg.base.cra_exit = NULL;
- a_hash->halg.base.cra_type = &crypto_ahash_type;
if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
a_hash->halg.base.cra_init = chcr_hmac_cra_init;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 2bb6f0380758..0997e166ea57 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1673,7 +1673,7 @@ static void chtls_timewait(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
tp->rcv_nxt++;
- tp->rx_opt.ts_recent_stamp = get_seconds();
+ tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
tp->srtt_us = 0;
tcp_time_wait(sk, TCP_TIME_WAIT, 0);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 55d50140f9e5..490960755864 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -97,7 +97,7 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
{
return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
- val << bit_pos);
+ (u64)val << bit_pos);
}
static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
new file mode 100644
index 000000000000..8ca9c503bcb0
--- /dev/null
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CRYPTO_DEV_HISI_SEC
+ tristate "Support for Hisilicon SEC crypto block cipher accelerator"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ select SG_SPLIT
+ depends on ARM64 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Support for Hisilicon SEC Engine in Hip06 and Hip07
+
+ To compile this as a module, choose M here: the module
+ will be called hisi_sec.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
new file mode 100644
index 000000000000..463f46ace182
--- /dev/null
+++ b/drivers/crypto/hisilicon/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
diff --git a/drivers/crypto/hisilicon/sec/Makefile b/drivers/crypto/hisilicon/sec/Makefile
new file mode 100644
index 000000000000..a55b698e0c27
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += hisi_sec.o
+hisi_sec-y = sec_algs.o sec_drv.o
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
new file mode 100644
index 000000000000..f7d6d690116e
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -0,0 +1,1122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/skcipher.h>
+#include <crypto/xts.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sec_drv.h"
+
+#define SEC_MAX_CIPHER_KEY 64
+#define SEC_REQ_LIMIT SZ_32M
+
+struct sec_c_alg_cfg {
+ unsigned c_alg : 3;
+ unsigned c_mode : 3;
+ unsigned key_len : 2;
+ unsigned c_width : 2;
+};
+
+static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {
+ [SEC_C_DES_ECB_64] = {
+ .c_alg = SEC_C_ALG_DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_DES,
+ },
+ [SEC_C_DES_CBC_64] = {
+ .c_alg = SEC_C_ALG_DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_DES,
+ },
+ [SEC_C_3DES_ECB_192_3KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_3DES_3_KEY,
+ },
+ [SEC_C_3DES_ECB_192_2KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_3DES_2_KEY,
+ },
+ [SEC_C_3DES_CBC_192_3KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_3DES_3_KEY,
+ },
+ [SEC_C_3DES_CBC_192_2KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_3DES_2_KEY,
+ },
+ [SEC_C_AES_ECB_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_ECB_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_ECB_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_CBC_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_CBC_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_CBC_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_CTR_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_CTR_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_CTR_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_XTS_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_XTS,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_XTS_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_XTS,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_NULL] = {
+ },
+};
+
+/*
+ * Mutex used to ensure safe operation of reference count of
+ * alg providers
+ */
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
+ struct sec_bd_info *req,
+ enum sec_cipher_alg alg)
+{
+ const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
+
+ memset(req, 0, sizeof(*req));
+ req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
+ req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
+ req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
+ req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
+
+ req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
+ req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
+}
+
+static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
+ const u8 *key,
+ unsigned int keylen,
+ enum sec_cipher_alg alg)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+ struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->cipher_alg = alg;
+ memcpy(ctx->key, key, keylen);
+ sec_alg_skcipher_init_template(ctx, &ctx->req_template,
+ ctx->cipher_alg);
+}
+
+static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
+ dma_addr_t *psec_sgl,
+ struct scatterlist *sgl,
+ int count,
+ struct sec_dev_info *info)
+{
+ struct sec_hw_sgl *sgl_current = NULL;
+ struct sec_hw_sgl *sgl_next;
+ dma_addr_t sgl_next_dma;
+ struct scatterlist *sg;
+ int ret, sge_index, i;
+
+ if (!count)
+ return -EINVAL;
+
+ for_each_sg(sgl, sg, count, i) {
+ sge_index = i % SEC_MAX_SGE_NUM;
+ if (sge_index == 0) {
+ sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
+ GFP_KERNEL, &sgl_next_dma);
+ if (!sgl_next) {
+ ret = -ENOMEM;
+ goto err_free_hw_sgls;
+ }
+
+ if (!sgl_current) { /* First one */
+ *psec_sgl = sgl_next_dma;
+ *sec_sgl = sgl_next;
+ } else { /* Chained */
+ sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
+ sgl_current->next_sgl = sgl_next_dma;
+ sgl_current->next = sgl_next;
+ }
+ sgl_current = sgl_next;
+ }
+ sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
+ sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
+ sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
+ }
+ sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
+ sgl_current->next_sgl = 0;
+ (*sec_sgl)->entry_sum_in_chain = count;
+
+ return 0;
+
+err_free_hw_sgls:
+ sgl_current = *sec_sgl;
+ while (sgl_current) {
+ sgl_next = sgl_current->next;
+ dma_pool_free(info->hw_sgl_pool, sgl_current,
+ sgl_current->next_sgl);
+ sgl_current = sgl_next;
+ }
+ *psec_sgl = 0;
+
+ return ret;
+}
+
+static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
+ dma_addr_t psec_sgl, struct sec_dev_info *info)
+{
+ struct sec_hw_sgl *sgl_current, *sgl_next;
+
+ if (!hw_sgl)
+ return;
+ sgl_current = hw_sgl;
+ while (sgl_current->next) {
+ sgl_next = sgl_current->next;
+ dma_pool_free(info->hw_sgl_pool, sgl_current,
+ sgl_current->next_sgl);
+ sgl_current = sgl_next;
+ }
+ dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
+}
+
+static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ enum sec_cipher_alg alg)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct device *dev = ctx->queue->dev_info->dev;
+
+ mutex_lock(&ctx->lock);
+ if (ctx->key) {
+ /* rekeying */
+ memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
+ } else {
+ /* new key */
+ ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
+ &ctx->pkey, GFP_KERNEL);
+ if (!ctx->key) {
+ mutex_unlock(&ctx->lock);
+ return -ENOMEM;
+ }
+ }
+ mutex_unlock(&ctx->lock);
+ sec_alg_skcipher_init_context(tfm, key, keylen, alg);
+
+ return 0;
+}
+
+static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_ECB_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_ECB_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_ECB_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_CBC_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_CBC_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_CBC_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_CTR_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_CTR_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_CTR_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128 * 2:
+ alg = SEC_C_AES_XTS_128;
+ break;
+ case AES_KEYSIZE_256 * 2:
+ alg = SEC_C_AES_XTS_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
+}
+
+static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
+}
+
+static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE * 3)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen,
+ SEC_C_3DES_ECB_192_3KEY);
+}
+
+static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen,
+ SEC_C_3DES_CBC_192_3KEY);
+}
+
+static void sec_alg_free_el(struct sec_request_el *el,
+ struct sec_dev_info *info)
+{
+ sec_free_hw_sgl(el->out, el->dma_out, info);
+ sec_free_hw_sgl(el->in, el->dma_in, info);
+ kfree(el->sgl_in);
+ kfree(el->sgl_out);
+ kfree(el);
+}
+
+/* queuelock must be held */
+static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
+{
+ struct sec_request_el *el, *temp;
+ int ret = 0;
+
+ mutex_lock(&sec_req->lock);
+ list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+ /*
+ * Add to hardware queue only under following circumstances
+ * 1) Software and hardware queue empty so no chain dependencies
+ * 2) No dependencies as new IV - (check software queue empty
+ * to maintain order)
+ * 3) No dependencies because the mode does no chaining.
+ *
+ * In other cases first insert onto the software queue which
+ * is then emptied as requests complete
+ */
+ if (!queue->havesoftqueue ||
+ (kfifo_is_empty(&queue->softqueue) &&
+ sec_queue_empty(queue))) {
+ ret = sec_queue_send(queue, &el->req, sec_req);
+ if (ret == -EAGAIN) {
+ /* Wait unti we can send then try again */
+ /* DEAD if here - should not happen */
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+ } else {
+ kfifo_put(&queue->softqueue, el);
+ }
+ }
+err_unlock:
+ mutex_unlock(&sec_req->lock);
+
+ return ret;
+}
+
+static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
+ struct crypto_async_request *req_base)
+{
+ struct skcipher_request *skreq = container_of(req_base,
+ struct skcipher_request,
+ base);
+ struct sec_request *sec_req = skcipher_request_ctx(skreq);
+ struct sec_request *backlog_req;
+ struct sec_request_el *sec_req_el, *nextrequest;
+ struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+ struct device *dev = ctx->queue->dev_info->dev;
+ int icv_or_skey_en, ret;
+ bool done;
+
+ sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
+ head);
+ icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
+ SEC_BD_W0_ICV_OR_SKEY_EN_S;
+ if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
+ dev_err(dev, "Got an invalid answer %lu %d\n",
+ sec_resp->w1 & SEC_BD_W1_BD_INVALID,
+ icv_or_skey_en);
+ sec_req->err = -EINVAL;
+ /*
+ * We need to muddle on to avoid getting stuck with elements
+ * on the queue. Error will be reported so requester so
+ * it should be able to handle appropriately.
+ */
+ }
+
+ mutex_lock(&ctx->queue->queuelock);
+ /* Put the IV in place for chained cases */
+ switch (ctx->cipher_alg) {
+ case SEC_C_AES_CBC_128:
+ case SEC_C_AES_CBC_192:
+ case SEC_C_AES_CBC_256:
+ if (sec_req_el->req.w0 & SEC_BD_W0_DE)
+ sg_pcopy_to_buffer(sec_req_el->sgl_out,
+ sg_nents(sec_req_el->sgl_out),
+ skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ sec_req_el->el_length -
+ crypto_skcipher_ivsize(atfm));
+ else
+ sg_pcopy_to_buffer(sec_req_el->sgl_in,
+ sg_nents(sec_req_el->sgl_in),
+ skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ sec_req_el->el_length -
+ crypto_skcipher_ivsize(atfm));
+ /* No need to sync to the device as coherent DMA */
+ break;
+ case SEC_C_AES_CTR_128:
+ case SEC_C_AES_CTR_192:
+ case SEC_C_AES_CTR_256:
+ crypto_inc(skreq->iv, 16);
+ break;
+ default:
+ /* Do not update */
+ break;
+ }
+
+ if (ctx->queue->havesoftqueue &&
+ !kfifo_is_empty(&ctx->queue->softqueue) &&
+ sec_queue_empty(ctx->queue)) {
+ ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
+ if (ret <= 0)
+ dev_err(dev,
+ "Error getting next element from kfifo %d\n",
+ ret);
+ else
+ /* We know there is space so this cannot fail */
+ sec_queue_send(ctx->queue, &nextrequest->req,
+ nextrequest->sec_req);
+ } else if (!list_empty(&ctx->backlog)) {
+ /* Need to verify there is room first */
+ backlog_req = list_first_entry(&ctx->backlog,
+ typeof(*backlog_req),
+ backlog_head);
+ if (sec_queue_can_enqueue(ctx->queue,
+ backlog_req->num_elements) ||
+ (ctx->queue->havesoftqueue &&
+ kfifo_avail(&ctx->queue->softqueue) >
+ backlog_req->num_elements)) {
+ sec_send_request(backlog_req, ctx->queue);
+ backlog_req->req_base->complete(backlog_req->req_base,
+ -EINPROGRESS);
+ list_del(&backlog_req->backlog_head);
+ }
+ }
+ mutex_unlock(&ctx->queue->queuelock);
+
+ mutex_lock(&sec_req->lock);
+ list_del(&sec_req_el->head);
+ mutex_unlock(&sec_req->lock);
+ sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
+
+ /*
+ * Request is done.
+ * The dance is needed as the lock is freed in the completion
+ */
+ mutex_lock(&sec_req->lock);
+ done = list_empty(&sec_req->elements);
+ mutex_unlock(&sec_req->lock);
+ if (done) {
+ if (crypto_skcipher_ivsize(atfm)) {
+ dma_unmap_single(dev, sec_req->dma_iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_TO_DEVICE);
+ }
+ dma_unmap_sg(dev, skreq->src, sec_req->len_in,
+ DMA_BIDIRECTIONAL);
+ if (skreq->src != skreq->dst)
+ dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
+ DMA_BIDIRECTIONAL);
+ skreq->base.complete(&skreq->base, sec_req->err);
+ }
+}
+
+void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
+{
+ struct sec_request *sec_req = shadow;
+
+ sec_req->cb(resp, sec_req->req_base);
+}
+
+static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
+ int *steps)
+{
+ size_t *sizes;
+ int i;
+
+ /* Split into suitable sized blocks */
+ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
+ sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
+ if (!sizes)
+ return -ENOMEM;
+
+ for (i = 0; i < *steps - 1; i++)
+ sizes[i] = SEC_REQ_LIMIT;
+ sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
+ *split_sizes = sizes;
+
+ return 0;
+}
+
+static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
+ int steps, struct scatterlist ***splits,
+ int **splits_nents,
+ int sgl_len_in,
+ struct device *dev)
+{
+ int ret, count;
+
+ count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+ if (!count)
+ return -EINVAL;
+
+ *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
+ if (!*splits) {
+ ret = -ENOMEM;
+ goto err_unmap_sg;
+ }
+ *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
+ if (!*splits_nents) {
+ ret = -ENOMEM;
+ goto err_free_splits;
+ }
+
+ /* output the scatter list before and after this */
+ ret = sg_split(sgl, count, 0, steps, split_sizes,
+ *splits, *splits_nents, GFP_KERNEL);
+ if (ret) {
+ ret = -ENOMEM;
+ goto err_free_splits_nents;
+ }
+
+ return 0;
+
+err_free_splits_nents:
+ kfree(*splits_nents);
+err_free_splits:
+ kfree(*splits);
+err_unmap_sg:
+ dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+
+ return ret;
+}
+
+/*
+ * Reverses the sec_map_and_split_sg call for messages not yet added to
+ * the queues.
+ */
+static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
+ struct scatterlist **splits, int *splits_nents,
+ int sgl_len_in, struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < steps; i++)
+ kfree(splits[i]);
+ kfree(splits_nents);
+ kfree(splits);
+
+ dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+}
+
+static struct sec_request_el
+*sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
+ int el_size, bool different_dest,
+ struct scatterlist *sgl_in, int n_ents_in,
+ struct scatterlist *sgl_out, int n_ents_out,
+ struct sec_dev_info *info)
+{
+ struct sec_request_el *el;
+ struct sec_bd_info *req;
+ int ret;
+
+ el = kzalloc(sizeof(*el), GFP_KERNEL);
+ if (!el)
+ return ERR_PTR(-ENOMEM);
+ el->el_length = el_size;
+ req = &el->req;
+ memcpy(req, template, sizeof(*req));
+
+ req->w0 &= ~SEC_BD_W0_CIPHER_M;
+ if (encrypt)
+ req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
+ else
+ req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
+
+ req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+ req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
+ SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+
+ req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+ req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
+ SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+
+ /* Writing whole u32 so no need to take care of masking */
+ req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
+ ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
+ SEC_BD_W2_C_GRAN_SIZE_15_0_M);
+
+ req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
+ req->w1 |= SEC_BD_W1_ADDR_TYPE;
+
+ el->sgl_in = sgl_in;
+
+ ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
+ n_ents_in, info);
+ if (ret)
+ goto err_free_el;
+
+ req->data_addr_lo = lower_32_bits(el->dma_in);
+ req->data_addr_hi = upper_32_bits(el->dma_in);
+
+ if (different_dest) {
+ el->sgl_out = sgl_out;
+ ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
+ el->sgl_out,
+ n_ents_out, info);
+ if (ret)
+ goto err_free_hw_sgl_in;
+
+ req->w0 |= SEC_BD_W0_DE;
+ req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
+ req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
+
+ } else {
+ req->w0 &= ~SEC_BD_W0_DE;
+ req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
+ req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
+ }
+
+ return el;
+
+err_free_hw_sgl_in:
+ sec_free_hw_sgl(el->in, el->dma_in, info);
+err_free_el:
+ kfree(el);
+
+ return ERR_PTR(ret);
+}
+
+static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
+ bool encrypt)
+{
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+ struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sec_queue *queue = ctx->queue;
+ struct sec_request *sec_req = skcipher_request_ctx(skreq);
+ struct sec_dev_info *info = queue->dev_info;
+ int i, ret, steps;
+ size_t *split_sizes;
+ struct scatterlist **splits_in;
+ struct scatterlist **splits_out = NULL;
+ int *splits_in_nents;
+ int *splits_out_nents = NULL;
+ struct sec_request_el *el, *temp;
+
+ mutex_init(&sec_req->lock);
+ sec_req->req_base = &skreq->base;
+ sec_req->err = 0;
+ /* SGL mapping out here to allow us to break it up as necessary */
+ sec_req->len_in = sg_nents(skreq->src);
+
+ ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
+ &steps);
+ if (ret)
+ return ret;
+ sec_req->num_elements = steps;
+ ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
+ &splits_in_nents, sec_req->len_in,
+ info->dev);
+ if (ret)
+ goto err_free_split_sizes;
+
+ if (skreq->src != skreq->dst) {
+ sec_req->len_out = sg_nents(skreq->dst);
+ ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
+ &splits_out, &splits_out_nents,
+ sec_req->len_out, info->dev);
+ if (ret)
+ goto err_unmap_in_sg;
+ }
+ /* Shared info stored in seq_req - applies to all BDs */
+ sec_req->tfm_ctx = ctx;
+ sec_req->cb = sec_skcipher_alg_callback;
+ INIT_LIST_HEAD(&sec_req->elements);
+
+ /*
+ * Future optimization.
+ * In the chaining case we can't use a dma pool bounce buffer
+ * but in the case where we know there is no chaining we can
+ */
+ if (crypto_skcipher_ivsize(atfm)) {
+ sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
+ ret = -ENOMEM;
+ goto err_unmap_out_sg;
+ }
+ }
+
+ /* Set them all up then queue - cleaner error handling. */
+ for (i = 0; i < steps; i++) {
+ el = sec_alg_alloc_and_fill_el(&ctx->req_template,
+ encrypt ? 1 : 0,
+ split_sizes[i],
+ skreq->src != skreq->dst,
+ splits_in[i], splits_in_nents[i],
+ splits_out[i],
+ splits_out_nents[i], info);
+ if (IS_ERR(el)) {
+ ret = PTR_ERR(el);
+ goto err_free_elements;
+ }
+ el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
+ el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
+ el->sec_req = sec_req;
+ list_add_tail(&el->head, &sec_req->elements);
+ }
+
+ /*
+ * Only attempt to queue if the whole lot can fit in the queue -
+ * we can't successfully cleanup after a partial queing so this
+ * must succeed or fail atomically.
+ *
+ * Big hammer test of both software and hardware queues - could be
+ * more refined but this is unlikely to happen so no need.
+ */
+
+ /* Cleanup - all elements in pointer arrays have been coppied */
+ kfree(splits_in_nents);
+ kfree(splits_in);
+ kfree(splits_out_nents);
+ kfree(splits_out);
+ kfree(split_sizes);
+
+ /* Grab a big lock for a long time to avoid concurrency issues */
+ mutex_lock(&queue->queuelock);
+
+ /*
+ * Can go on to queue if we have space in either:
+ * 1) The hardware queue and no software queue
+ * 2) The software queue
+ * AND there is nothing in the backlog. If there is backlog we
+ * have to only queue to the backlog queue and return busy.
+ */
+ if ((!sec_queue_can_enqueue(queue, steps) &&
+ (!queue->havesoftqueue ||
+ kfifo_avail(&queue->softqueue) > steps)) ||
+ !list_empty(&ctx->backlog)) {
+ if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ list_add_tail(&sec_req->backlog_head, &ctx->backlog);
+ mutex_unlock(&queue->queuelock);
+ return -EBUSY;
+ }
+
+ ret = -EBUSY;
+ mutex_unlock(&queue->queuelock);
+ goto err_free_elements;
+ }
+ ret = sec_send_request(sec_req, queue);
+ mutex_unlock(&queue->queuelock);
+ if (ret)
+ goto err_free_elements;
+
+ return -EINPROGRESS;
+
+err_free_elements:
+ list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+ list_del(&el->head);
+ sec_alg_free_el(el, info);
+ }
+ if (crypto_skcipher_ivsize(atfm))
+ dma_unmap_single(info->dev, sec_req->dma_iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_BIDIRECTIONAL);
+err_unmap_out_sg:
+ if (skreq->src != skreq->dst)
+ sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
+ splits_out_nents, sec_req->len_out,
+ info->dev);
+err_unmap_in_sg:
+ sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
+ sec_req->len_in, info->dev);
+err_free_split_sizes:
+ kfree(split_sizes);
+
+ return ret;
+}
+
+static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
+{
+ return sec_alg_skcipher_crypto(req, true);
+}
+
+static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
+{
+ return sec_alg_skcipher_crypto(req, false);
+}
+
+static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->backlog);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
+
+ ctx->queue = sec_queue_alloc_start_safe();
+ if (IS_ERR(ctx->queue))
+ return PTR_ERR(ctx->queue);
+
+ mutex_init(&ctx->queue->queuelock);
+ ctx->queue->havesoftqueue = false;
+
+ return 0;
+}
+
+static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct device *dev = ctx->queue->dev_info->dev;
+
+ if (ctx->key) {
+ memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
+ dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
+ ctx->pkey);
+ }
+ sec_queue_stop_release(ctx->queue);
+}
+
+static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ret = sec_alg_skcipher_init(tfm);
+ if (ret)
+ return ret;
+
+ INIT_KFIFO(ctx->queue->softqueue);
+ ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
+ if (ret) {
+ sec_alg_skcipher_exit(tfm);
+ return ret;
+ }
+ ctx->queue->havesoftqueue = true;
+
+ return 0;
+}
+
+static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ kfifo_free(&ctx->queue->softqueue);
+ sec_alg_skcipher_exit(tfm);
+}
+
+static struct skcipher_alg sec_algs[] = {
+ {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "hisi_sec_aes_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_aes_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ }, {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "hisi_sec_aes_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_aes_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "hisi_sec_aes_ctr",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_aes_ctr,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "hisi_sec_aes_xts",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_aes_xts,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ /* Unable to find any test vectors so untested */
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "hisi_sec_des_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_des_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ }, {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "hisi_sec_des_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_des_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "hisi_sec_3des_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_3des_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "hisi_sec_3des_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_3des_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ }
+};
+
+int sec_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs != 1)
+ goto unlock;
+
+ ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ if (ret)
+ --active_devs;
+unlock:
+ mutex_unlock(&algs_lock);
+
+ return ret;
+}
+
+void sec_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs != 0)
+ goto unlock;
+ crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+
+unlock:
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
new file mode 100644
index 000000000000..c1ee4e7bf996
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -0,0 +1,1323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Hisilicon SEC units found on Hip06 Hip07
+ *
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ */
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "sec_drv.h"
+
+#define SEC_QUEUE_AR_FROCE_ALLOC 0
+#define SEC_QUEUE_AR_FROCE_NOALLOC 1
+#define SEC_QUEUE_AR_FROCE_DIS 2
+
+#define SEC_QUEUE_AW_FROCE_ALLOC 0
+#define SEC_QUEUE_AW_FROCE_NOALLOC 1
+#define SEC_QUEUE_AW_FROCE_DIS 2
+
+/* SEC_ALGSUB registers */
+#define SEC_ALGSUB_CLK_EN_REG 0x03b8
+#define SEC_ALGSUB_CLK_DIS_REG 0x03bc
+#define SEC_ALGSUB_CLK_ST_REG 0x535c
+#define SEC_ALGSUB_RST_REQ_REG 0x0aa8
+#define SEC_ALGSUB_RST_DREQ_REG 0x0aac
+#define SEC_ALGSUB_RST_ST_REG 0x5a54
+#define SEC_ALGSUB_RST_ST_IS_RST BIT(0)
+
+#define SEC_ALGSUB_BUILD_RST_REQ_REG 0x0ab8
+#define SEC_ALGSUB_BUILD_RST_DREQ_REG 0x0abc
+#define SEC_ALGSUB_BUILD_RST_ST_REG 0x5a5c
+#define SEC_ALGSUB_BUILD_RST_ST_IS_RST BIT(0)
+
+#define SEC_SAA_BASE 0x00001000UL
+
+/* SEC_SAA registers */
+#define SEC_SAA_CTRL_REG(x) ((x) * SEC_SAA_ADDR_SIZE)
+#define SEC_SAA_CTRL_GET_QM_EN BIT(0)
+
+#define SEC_ST_INTMSK1_REG 0x0200
+#define SEC_ST_RINT1_REG 0x0400
+#define SEC_ST_INTSTS1_REG 0x0600
+#define SEC_BD_MNG_STAT_REG 0x0800
+#define SEC_PARSING_STAT_REG 0x0804
+#define SEC_LOAD_TIME_OUT_CNT_REG 0x0808
+#define SEC_CORE_WORK_TIME_OUT_CNT_REG 0x080c
+#define SEC_BACK_TIME_OUT_CNT_REG 0x0810
+#define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG 0x0814
+#define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG 0x0818
+#define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG 0x081c
+#define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG 0x0820
+#define SEC_SAA_ACC_REG 0x083c
+#define SEC_BD_NUM_CNT_IN_SEC_REG 0x0858
+#define SEC_LOAD_WORK_TIME_CNT_REG 0x0860
+#define SEC_CORE_WORK_WORK_TIME_CNT_REG 0x0864
+#define SEC_BACK_WORK_TIME_CNT_REG 0x0868
+#define SEC_SAA_IDLE_TIME_CNT_REG 0x086c
+#define SEC_SAA_CLK_CNT_REG 0x0870
+
+/* SEC_COMMON registers */
+#define SEC_CLK_EN_REG 0x0000
+#define SEC_CTRL_REG 0x0004
+
+#define SEC_COMMON_CNT_CLR_CE_REG 0x0008
+#define SEC_COMMON_CNT_CLR_CE_CLEAR BIT(0)
+#define SEC_COMMON_CNT_CLR_CE_SNAP_EN BIT(1)
+
+#define SEC_SECURE_CTRL_REG 0x000c
+#define SEC_AXI_CACHE_CFG_REG 0x0010
+#define SEC_AXI_QOS_CFG_REG 0x0014
+#define SEC_IPV4_MASK_TABLE_REG 0x0020
+#define SEC_IPV6_MASK_TABLE_X_REG(x) (0x0024 + (x) * 4)
+#define SEC_FSM_MAX_CNT_REG 0x0064
+
+#define SEC_CTRL2_REG 0x0068
+#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M GENMASK(3, 0)
+#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S 0
+#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M GENMASK(6, 4)
+#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S 4
+#define SEC_CTRL2_CLK_GATE_EN BIT(7)
+#define SEC_CTRL2_ENDIAN_BD BIT(8)
+#define SEC_CTRL2_ENDIAN_BD_TYPE BIT(9)
+
+#define SEC_CNT_PRECISION_CFG_REG 0x006c
+#define SEC_DEBUG_BD_CFG_REG 0x0070
+#define SEC_DEBUG_BD_CFG_WB_NORMAL BIT(0)
+#define SEC_DEBUG_BD_CFG_WB_EN BIT(1)
+
+#define SEC_Q_SIGHT_SEL 0x0074
+#define SEC_Q_SIGHT_HIS_CLR 0x0078
+#define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4)
+#define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4)
+#define SEC_STAT_CLR_REG 0x0a00
+#define SEC_SAA_IDLE_CNT_CLR_REG 0x0a04
+#define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG 0x0b00
+#define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG 0x0b04
+#define SEC_QM_BD_DFX_CFG_REG 0x0b08
+#define SEC_QM_BD_DFX_RESULT_REG 0x0b0c
+#define SEC_QM_BDID_DFX_RESULT_REG 0x0b10
+#define SEC_QM_BD_DFIFO_STATUS_REG 0x0b14
+#define SEC_QM_BD_DFX_CFG2_REG 0x0b1c
+#define SEC_QM_BD_DFX_RESULT2_REG 0x0b20
+#define SEC_QM_BD_IDFIFO_STATUS_REG 0x0b18
+#define SEC_QM_BD_DFIFO_STATUS2_REG 0x0b28
+#define SEC_QM_BD_IDFIFO_STATUS2_REG 0x0b2c
+
+#define SEC_HASH_IPV4_MASK 0xfff00000
+#define SEC_MAX_SAA_NUM 0xa
+#define SEC_SAA_ADDR_SIZE 0x1000
+
+#define SEC_Q_INIT_REG 0x0
+#define SEC_Q_INIT_WO_STAT_CLEAR 0x2
+#define SEC_Q_INIT_AND_STAT_CLEAR 0x3
+
+#define SEC_Q_CFG_REG 0x8
+#define SEC_Q_CFG_REORDER BIT(0)
+
+#define SEC_Q_PROC_NUM_CFG_REG 0x10
+#define SEC_QUEUE_ENB_REG 0x18
+
+#define SEC_Q_DEPTH_CFG_REG 0x50
+#define SEC_Q_DEPTH_CFG_DEPTH_M GENMASK(11, 0)
+#define SEC_Q_DEPTH_CFG_DEPTH_S 0
+
+#define SEC_Q_BASE_HADDR_REG 0x54
+#define SEC_Q_BASE_LADDR_REG 0x58
+#define SEC_Q_WR_PTR_REG 0x5c
+#define SEC_Q_OUTORDER_BASE_HADDR_REG 0x60
+#define SEC_Q_OUTORDER_BASE_LADDR_REG 0x64
+#define SEC_Q_OUTORDER_RD_PTR_REG 0x68
+#define SEC_Q_OT_TH_REG 0x6c
+
+#define SEC_Q_ARUSER_CFG_REG 0x70
+#define SEC_Q_ARUSER_CFG_FA BIT(0)
+#define SEC_Q_ARUSER_CFG_FNA BIT(1)
+#define SEC_Q_ARUSER_CFG_RINVLD BIT(2)
+#define SEC_Q_ARUSER_CFG_PKG BIT(3)
+
+#define SEC_Q_AWUSER_CFG_REG 0x74
+#define SEC_Q_AWUSER_CFG_FA BIT(0)
+#define SEC_Q_AWUSER_CFG_FNA BIT(1)
+#define SEC_Q_AWUSER_CFG_PKG BIT(2)
+
+#define SEC_Q_ERR_BASE_HADDR_REG 0x7c
+#define SEC_Q_ERR_BASE_LADDR_REG 0x80
+#define SEC_Q_CFG_VF_NUM_REG 0x84
+#define SEC_Q_SOFT_PROC_PTR_REG 0x88
+#define SEC_Q_FAIL_INT_MSK_REG 0x300
+#define SEC_Q_FLOW_INT_MKS_REG 0x304
+#define SEC_Q_FAIL_RINT_REG 0x400
+#define SEC_Q_FLOW_RINT_REG 0x404
+#define SEC_Q_FAIL_INT_STATUS_REG 0x500
+#define SEC_Q_FLOW_INT_STATUS_REG 0x504
+#define SEC_Q_STATUS_REG 0x600
+#define SEC_Q_RD_PTR_REG 0x604
+#define SEC_Q_PRO_PTR_REG 0x608
+#define SEC_Q_OUTORDER_WR_PTR_REG 0x60c
+#define SEC_Q_OT_CNT_STATUS_REG 0x610
+#define SEC_Q_INORDER_BD_NUM_ST_REG 0x650
+#define SEC_Q_INORDER_GET_FLAG_ST_REG 0x654
+#define SEC_Q_INORDER_ADD_FLAG_ST_REG 0x658
+#define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG 0x65c
+#define SEC_Q_RD_DONE_PTR_REG 0x660
+#define SEC_Q_CPL_Q_BD_NUM_ST_REG 0x700
+#define SEC_Q_CPL_Q_PTR_ST_REG 0x704
+#define SEC_Q_CPL_Q_H_ADDR_ST_REG 0x708
+#define SEC_Q_CPL_Q_L_ADDR_ST_REG 0x70c
+#define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG 0x710
+#define SEC_Q_WRR_ID_CHECK_REG 0x714
+#define SEC_Q_CPLQ_FULL_CHECK_REG 0x718
+#define SEC_Q_SUCCESS_BD_CNT_REG 0x800
+#define SEC_Q_FAIL_BD_CNT_REG 0x804
+#define SEC_Q_GET_BD_CNT_REG 0x808
+#define SEC_Q_IVLD_CNT_REG 0x80c
+#define SEC_Q_BD_PROC_GET_CNT_REG 0x810
+#define SEC_Q_BD_PROC_DONE_CNT_REG 0x814
+#define SEC_Q_LAT_CLR_REG 0x850
+#define SEC_Q_PKT_LAT_MAX_REG 0x854
+#define SEC_Q_PKT_LAT_AVG_REG 0x858
+#define SEC_Q_PKT_LAT_MIN_REG 0x85c
+#define SEC_Q_ID_CLR_CFG_REG 0x900
+#define SEC_Q_1ST_BD_ERR_ID_REG 0x904
+#define SEC_Q_1ST_AUTH_FAIL_ID_REG 0x908
+#define SEC_Q_1ST_RD_ERR_ID_REG 0x90c
+#define SEC_Q_1ST_ECC2_ERR_ID_REG 0x910
+#define SEC_Q_1ST_IVLD_ID_REG 0x914
+#define SEC_Q_1ST_BD_WR_ERR_ID_REG 0x918
+#define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG 0x91c
+#define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG 0x920
+
+struct sec_debug_bd_info {
+#define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M GENMASK(22, 0)
+ u32 soft_err_check;
+#define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M GENMASK(9, 0)
+ u32 hard_err_check;
+ u32 icv_mac1st_word;
+#define SEC_DEBUG_BD_INFO_GET_ID_M GENMASK(19, 0)
+ u32 sec_get_id;
+ /* W4---W15 */
+ u32 reserv_left[12];
+};
+
+struct sec_out_bd_info {
+#define SEC_OUT_BD_INFO_Q_ID_M GENMASK(11, 0)
+#define SEC_OUT_BD_INFO_ECC_2BIT_ERR BIT(14)
+ u16 data;
+};
+
+#define SEC_MAX_DEVICES 8
+static struct sec_dev_info *sec_devices[SEC_MAX_DEVICES];
+static DEFINE_MUTEX(sec_id_lock);
+
+static int sec_queue_map_io(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ struct resource *res;
+
+ res = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM,
+ 2 + queue->queue_id);
+ if (!res) {
+ dev_err(dev, "Failed to get queue %d memory resource\n",
+ queue->queue_id);
+ return -ENOMEM;
+ }
+ queue->regs = ioremap(res->start, resource_size(res));
+ if (!queue->regs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_queue_unmap_io(struct sec_queue *queue)
+{
+ iounmap(queue->regs);
+}
+
+static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)
+{
+ void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (ar_pkg)
+ regval |= SEC_Q_ARUSER_CFG_PKG;
+ else
+ regval &= ~SEC_Q_ARUSER_CFG_PKG;
+ writel_relaxed(regval, addr);
+
+ return 0;
+}
+
+static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)
+{
+ void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval |= SEC_Q_AWUSER_CFG_PKG;
+ writel_relaxed(regval, addr);
+
+ return 0;
+}
+
+static int sec_clk_en(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ u32 i = 0;
+
+ writel_relaxed(0x7, base + SEC_ALGSUB_CLK_EN_REG);
+ do {
+ usleep_range(1000, 10000);
+ if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0x7)
+ return 0;
+ i++;
+ } while (i < 10);
+ dev_err(info->dev, "sec clock enable fail!\n");
+
+ return -EIO;
+}
+
+static int sec_clk_dis(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ u32 i = 0;
+
+ writel_relaxed(0x7, base + SEC_ALGSUB_CLK_DIS_REG);
+ do {
+ usleep_range(1000, 10000);
+ if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0)
+ return 0;
+ i++;
+ } while (i < 10);
+ dev_err(info->dev, "sec clock disable fail!\n");
+
+ return -EIO;
+}
+
+static int sec_reset_whole_module(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ bool is_reset, b_is_reset;
+ u32 i = 0;
+
+ writel_relaxed(1, base + SEC_ALGSUB_RST_REQ_REG);
+ writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_REQ_REG);
+ while (1) {
+ usleep_range(1000, 10000);
+ is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+ SEC_ALGSUB_RST_ST_IS_RST;
+ b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+ SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+ if (is_reset && b_is_reset)
+ break;
+ i++;
+ if (i > 10) {
+ dev_err(info->dev, "Reset req failed\n");
+ return -EIO;
+ }
+ }
+
+ i = 0;
+ writel_relaxed(1, base + SEC_ALGSUB_RST_DREQ_REG);
+ writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_DREQ_REG);
+ while (1) {
+ usleep_range(1000, 10000);
+ is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+ SEC_ALGSUB_RST_ST_IS_RST;
+ b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+ SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+ if (!is_reset && !b_is_reset)
+ break;
+
+ i++;
+ if (i > 10) {
+ dev_err(info->dev, "Reset dreq failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static void sec_bd_endian_little(struct sec_dev_info *info)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~(SEC_CTRL2_ENDIAN_BD | SEC_CTRL2_ENDIAN_BD_TYPE);
+ writel_relaxed(regval, addr);
+}
+
+/*
+ * sec_cache_config - configure optimum cache placement
+ */
+static void sec_cache_config(struct sec_dev_info *info)
+{
+ struct iommu_domain *domain;
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL_REG;
+
+ domain = iommu_get_domain_for_dev(info->dev);
+
+ /* Check that translation is occurring */
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+ writel_relaxed(0x44cf9e, addr);
+ else
+ writel_relaxed(0x4cfd9, addr);
+}
+
+static void sec_data_axiwr_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+ regval |= (cfg << SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S) &
+ SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_data_axird_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+ regval |= (cfg << SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S) &
+ SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_clk_gate_en(struct sec_dev_info *info, bool clkgate)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (clkgate)
+ regval |= SEC_CTRL2_CLK_GATE_EN;
+ else
+ regval &= ~SEC_CTRL2_CLK_GATE_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_comm_cnt_cfg(struct sec_dev_info *info, bool clr_ce)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (clr_ce)
+ regval |= SEC_COMMON_CNT_CLR_CE_CLEAR;
+ else
+ regval &= ~SEC_COMMON_CNT_CLR_CE_CLEAR;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_commsnap_en(struct sec_dev_info *info, bool snap_en)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (snap_en)
+ regval |= SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+ else
+ regval &= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
+{
+ void __iomem *base = info->regs[SEC_SAA];
+ int i;
+
+ for (i = 0; i < 10; i++)
+ writel_relaxed(hash_mask[0],
+ base + SEC_IPV6_MASK_TABLE_X_REG(i));
+}
+
+static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
+{
+ if (hash_mask & SEC_HASH_IPV4_MASK) {
+ dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
+ return -EINVAL;
+ }
+
+ writel_relaxed(hash_mask,
+ info->regs[SEC_SAA] + SEC_IPV4_MASK_TABLE_REG);
+
+ return 0;
+}
+
+static void sec_set_dbg_bd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_DEBUG_BD_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ /* Always disable write back of normal bd */
+ regval &= ~SEC_DEBUG_BD_CFG_WB_NORMAL;
+
+ if (cfg)
+ regval &= ~SEC_DEBUG_BD_CFG_WB_EN;
+ else
+ regval |= SEC_DEBUG_BD_CFG_WB_EN;
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_saa_getqm_en(struct sec_dev_info *info, u32 saa_indx, u32 en)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_SAA_BASE +
+ SEC_SAA_CTRL_REG(saa_indx);
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (en)
+ regval |= SEC_SAA_CTRL_GET_QM_EN;
+ else
+ regval &= ~SEC_SAA_CTRL_GET_QM_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_saa_int_mask(struct sec_dev_info *info, u32 saa_indx,
+ u32 saa_int_mask)
+{
+ writel_relaxed(saa_int_mask,
+ info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_ST_INTMSK1_REG +
+ saa_indx * SEC_SAA_ADDR_SIZE);
+}
+
+static void sec_streamid(struct sec_dev_info *info, int i)
+{
+ #define SEC_SID 0x600
+ #define SEC_VMID 0
+
+ writel_relaxed((SEC_VMID | ((SEC_SID & 0xffff) << 8)),
+ info->regs[SEC_SAA] + SEC_Q_VMID_CFG_REG(i));
+}
+
+static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)
+{
+ void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (alloc == SEC_QUEUE_AR_FROCE_ALLOC) {
+ regval |= SEC_Q_ARUSER_CFG_FA;
+ regval &= ~SEC_Q_ARUSER_CFG_FNA;
+ } else {
+ regval &= ~SEC_Q_ARUSER_CFG_FA;
+ regval |= SEC_Q_ARUSER_CFG_FNA;
+ }
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)
+{
+ void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (alloc == SEC_QUEUE_AW_FROCE_ALLOC) {
+ regval |= SEC_Q_AWUSER_CFG_FA;
+ regval &= ~SEC_Q_AWUSER_CFG_FNA;
+ } else {
+ regval &= ~SEC_Q_AWUSER_CFG_FA;
+ regval |= SEC_Q_AWUSER_CFG_FNA;
+ }
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_reorder(struct sec_queue *queue, bool reorder)
+{
+ void __iomem *base = queue->regs;
+ u32 regval;
+
+ regval = readl_relaxed(base + SEC_Q_CFG_REG);
+ if (reorder)
+ regval |= SEC_Q_CFG_REORDER;
+ else
+ regval &= ~SEC_Q_CFG_REORDER;
+ writel_relaxed(regval, base + SEC_Q_CFG_REG);
+}
+
+static void sec_queue_depth(struct sec_queue *queue, u32 depth)
+{
+ void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_Q_DEPTH_CFG_DEPTH_M;
+ regval |= (depth << SEC_Q_DEPTH_CFG_DEPTH_S) & SEC_Q_DEPTH_CFG_DEPTH_M;
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);
+}
+
+static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr),
+ queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr),
+ queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);
+}
+
+static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr),
+ queue->regs + SEC_Q_ERR_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr),
+ queue->regs + SEC_Q_ERR_BASE_LADDR_REG);
+}
+
+static void sec_queue_irq_disable(struct sec_queue *queue)
+{
+ writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_irq_enable(struct sec_queue *queue)
+{
+ writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_abn_irq_disable(struct sec_queue *queue)
+{
+ writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);
+}
+
+static void sec_queue_stop(struct sec_queue *queue)
+{
+ disable_irq(queue->task_irq);
+ sec_queue_irq_disable(queue);
+ writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static void sec_queue_start(struct sec_queue *queue)
+{
+ sec_queue_irq_enable(queue);
+ enable_irq(queue->task_irq);
+ queue->expected = 0;
+ writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+ writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static struct sec_queue *sec_alloc_queue(struct sec_dev_info *info)
+{
+ int i;
+
+ mutex_lock(&info->dev_lock);
+
+ /* Get the first idle queue in SEC device */
+ for (i = 0; i < SEC_Q_NUM; i++)
+ if (!info->queues[i].in_use) {
+ info->queues[i].in_use = true;
+ info->queues_in_use++;
+ mutex_unlock(&info->dev_lock);
+
+ return &info->queues[i];
+ }
+ mutex_unlock(&info->dev_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int sec_queue_free(struct sec_queue *queue)
+{
+ struct sec_dev_info *info = queue->dev_info;
+
+ if (queue->queue_id >= SEC_Q_NUM) {
+ dev_err(info->dev, "No queue %d\n", queue->queue_id);
+ return -ENODEV;
+ }
+
+ if (!queue->in_use) {
+ dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&info->dev_lock);
+ queue->in_use = false;
+ info->queues_in_use--;
+ mutex_unlock(&info->dev_lock);
+
+ return 0;
+}
+
+static irqreturn_t sec_isr_handle_th(int irq, void *q)
+{
+ sec_queue_irq_disable(q);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t sec_isr_handle(int irq, void *q)
+{
+ struct sec_queue *queue = q;
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+ struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;
+ struct sec_out_bd_info *outorder_msg;
+ struct sec_bd_info *msg;
+ u32 ooo_read, ooo_write;
+ void __iomem *base = queue->regs;
+ int q_id;
+
+ ooo_read = readl(base + SEC_Q_OUTORDER_RD_PTR_REG);
+ ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+ outorder_msg = cq_ring->vaddr + ooo_read;
+ q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+ msg = msg_ring->vaddr + q_id;
+
+ while ((ooo_write != ooo_read) && msg->w0 & SEC_BD_W0_DONE) {
+ /*
+ * Must be before callback otherwise blocks adding other chained
+ * elements
+ */
+ set_bit(q_id, queue->unprocessed);
+ if (q_id == queue->expected)
+ while (test_bit(queue->expected, queue->unprocessed)) {
+ clear_bit(queue->expected, queue->unprocessed);
+ msg = msg_ring->vaddr + queue->expected;
+ msg->w0 &= ~SEC_BD_W0_DONE;
+ msg_ring->callback(msg,
+ queue->shadow[queue->expected]);
+ queue->shadow[queue->expected] = NULL;
+ queue->expected = (queue->expected + 1) %
+ SEC_QUEUE_LEN;
+ atomic_dec(&msg_ring->used);
+ }
+
+ ooo_read = (ooo_read + 1) % SEC_QUEUE_LEN;
+ writel(ooo_read, base + SEC_Q_OUTORDER_RD_PTR_REG);
+ ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+ outorder_msg = cq_ring->vaddr + ooo_read;
+ q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+ msg = msg_ring->vaddr + q_id;
+ }
+
+ sec_queue_irq_enable(queue);
+
+ return IRQ_HANDLED;
+}
+
+static int sec_queue_irq_init(struct sec_queue *queue)
+{
+ struct sec_dev_info *info = queue->dev_info;
+ int irq = queue->task_irq;
+ int ret;
+
+ ret = request_threaded_irq(irq, sec_isr_handle_th, sec_isr_handle,
+ IRQF_TRIGGER_RISING, queue->name, queue);
+ if (ret) {
+ dev_err(info->dev, "request irq(%d) failed %d\n", irq, ret);
+ return ret;
+ }
+ disable_irq(irq);
+
+ return 0;
+}
+
+static int sec_queue_irq_uninit(struct sec_queue *queue)
+{
+ free_irq(queue->task_irq, queue);
+
+ return 0;
+}
+
+static struct sec_dev_info *sec_device_get(void)
+{
+ struct sec_dev_info *sec_dev = NULL;
+ struct sec_dev_info *this_sec_dev;
+ int least_busy_n = SEC_Q_NUM + 1;
+ int i;
+
+ /* Find which one is least busy and use that first */
+ for (i = 0; i < SEC_MAX_DEVICES; i++) {
+ this_sec_dev = sec_devices[i];
+ if (this_sec_dev &&
+ this_sec_dev->queues_in_use < least_busy_n) {
+ least_busy_n = this_sec_dev->queues_in_use;
+ sec_dev = this_sec_dev;
+ }
+ }
+
+ return sec_dev;
+}
+
+static struct sec_queue *sec_queue_alloc_start(struct sec_dev_info *info)
+{
+ struct sec_queue *queue;
+
+ queue = sec_alloc_queue(info);
+ if (IS_ERR(queue)) {
+ dev_err(info->dev, "alloc sec queue failed! %ld\n",
+ PTR_ERR(queue));
+ return queue;
+ }
+
+ sec_queue_start(queue);
+
+ return queue;
+}
+
+/**
+ * sec_queue_alloc_start_safe - get a hw queue from appropriate instance
+ *
+ * This function does extremely simplistic load balancing. It does not take into
+ * account NUMA locality of the accelerator, or which cpu has requested the
+ * queue. Future work may focus on optimizing this in order to improve full
+ * machine throughput.
+ */
+struct sec_queue *sec_queue_alloc_start_safe(void)
+{
+ struct sec_dev_info *info;
+ struct sec_queue *queue = ERR_PTR(-ENODEV);
+
+ mutex_lock(&sec_id_lock);
+ info = sec_device_get();
+ if (!info)
+ goto unlock;
+
+ queue = sec_queue_alloc_start(info);
+
+unlock:
+ mutex_unlock(&sec_id_lock);
+
+ return queue;
+}
+
+/**
+ * sec_queue_stop_release() - free up a hw queue for reuse
+ * @queue: The queue we are done with.
+ *
+ * This will stop the current queue, terminanting any transactions
+ * that are inflight an return it to the pool of available hw queuess
+ */
+int sec_queue_stop_release(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ int ret;
+
+ sec_queue_stop(queue);
+
+ ret = sec_queue_free(queue);
+ if (ret)
+ dev_err(dev, "Releasing queue failed %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * sec_queue_empty() - Is this hardware queue currently empty.
+ *
+ * We need to know if we have an empty queue for some of the chaining modes
+ * as if it is not empty we may need to hold the message in a software queue
+ * until the hw queue is drained.
+ */
+bool sec_queue_empty(struct sec_queue *queue)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+ return !atomic_read(&msg_ring->used);
+}
+
+/**
+ * sec_queue_send() - queue up a single operation in the hw queue
+ * @queue: The queue in which to put the message
+ * @msg: The message
+ * @ctx: Context to be put in the shadow array and passed back to cb on result.
+ *
+ * This function will return -EAGAIN if the queue is currently full.
+ */
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+ void __iomem *base = queue->regs;
+ u32 write, read;
+
+ mutex_lock(&msg_ring->lock);
+ read = readl(base + SEC_Q_RD_PTR_REG);
+ write = readl(base + SEC_Q_WR_PTR_REG);
+ if (write == read && atomic_read(&msg_ring->used) == SEC_QUEUE_LEN) {
+ mutex_unlock(&msg_ring->lock);
+ return -EAGAIN;
+ }
+ memcpy(msg_ring->vaddr + write, msg, sizeof(*msg));
+ queue->shadow[write] = ctx;
+ write = (write + 1) % SEC_QUEUE_LEN;
+
+ /* Ensure content updated before queue advance */
+ wmb();
+ writel(write, base + SEC_Q_WR_PTR_REG);
+
+ atomic_inc(&msg_ring->used);
+ mutex_unlock(&msg_ring->lock);
+
+ return 0;
+}
+
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+ return SEC_QUEUE_LEN - atomic_read(&msg_ring->used) >= num;
+}
+
+static void sec_queue_hw_init(struct sec_queue *queue)
+{
+ sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+ sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+ sec_queue_ar_pkgattr(queue, 1);
+ sec_queue_aw_pkgattr(queue, 1);
+
+ /* Enable out of order queue */
+ sec_queue_reorder(queue, true);
+
+ /* Interrupt after a single complete element */
+ writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);
+
+ sec_queue_depth(queue, SEC_QUEUE_LEN - 1);
+
+ sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);
+
+ sec_queue_outorder_addr(queue, queue->ring_cq.paddr);
+
+ sec_queue_errbase_addr(queue, queue->ring_db.paddr);
+
+ writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);
+
+ sec_queue_abn_irq_disable(queue);
+ sec_queue_irq_disable(queue);
+ writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+}
+
+static int sec_hw_init(struct sec_dev_info *info)
+{
+ struct iommu_domain *domain;
+ u32 sec_ipv4_mask = 0;
+ u32 sec_ipv6_mask[10] = {};
+ u32 i, ret;
+
+ domain = iommu_get_domain_for_dev(info->dev);
+
+ /*
+ * Enable all available processing unit clocks.
+ * Only the first cluster is usable with translations.
+ */
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+ info->num_saas = 5;
+
+ else
+ info->num_saas = 10;
+
+ writel_relaxed(GENMASK(info->num_saas - 1, 0),
+ info->regs[SEC_SAA] + SEC_CLK_EN_REG);
+
+ /* 32 bit little endian */
+ sec_bd_endian_little(info);
+
+ sec_cache_config(info);
+
+ /* Data axi port write and read outstanding config as per datasheet */
+ sec_data_axiwr_otsd_cfg(info, 0x7);
+ sec_data_axird_otsd_cfg(info, 0x7);
+
+ /* Enable clock gating */
+ sec_clk_gate_en(info, true);
+
+ /* Set CNT_CYC register not read clear */
+ sec_comm_cnt_cfg(info, false);
+
+ /* Enable CNT_CYC */
+ sec_commsnap_en(info, false);
+
+ writel_relaxed((u32)~0, info->regs[SEC_SAA] + SEC_FSM_MAX_CNT_REG);
+
+ ret = sec_ipv4_hashmask(info, sec_ipv4_mask);
+ if (ret) {
+ dev_err(info->dev, "Failed to set ipv4 hashmask %d\n", ret);
+ return -EIO;
+ }
+
+ sec_ipv6_hashmask(info, sec_ipv6_mask);
+
+ /* do not use debug bd */
+ sec_set_dbg_bd_cfg(info, 0);
+
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) {
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ sec_streamid(info, i);
+ /* Same QoS for all queues */
+ writel_relaxed(0x3f,
+ info->regs[SEC_SAA] +
+ SEC_Q_WEIGHT_CFG_REG(i));
+ }
+ }
+
+ for (i = 0; i < info->num_saas; i++) {
+ sec_saa_getqm_en(info, i, 1);
+ sec_saa_int_mask(info, i, 0);
+ }
+
+ return 0;
+}
+
+static void sec_hw_exit(struct sec_dev_info *info)
+{
+ int i;
+
+ for (i = 0; i < SEC_MAX_SAA_NUM; i++) {
+ sec_saa_int_mask(info, i, (u32)~0);
+ sec_saa_getqm_en(info, i, 0);
+ }
+}
+
+static void sec_queue_base_init(struct sec_dev_info *info,
+ struct sec_queue *queue, int queue_id)
+{
+ queue->dev_info = info;
+ queue->queue_id = queue_id;
+ snprintf(queue->name, sizeof(queue->name),
+ "%s_%d", dev_name(info->dev), queue->queue_id);
+}
+
+static int sec_map_io(struct sec_dev_info *info, struct platform_device *pdev)
+{
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < SEC_NUM_ADDR_REGIONS; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+
+ if (!res) {
+ dev_err(info->dev, "Memory resource %d not found\n", i);
+ return -EINVAL;
+ }
+
+ info->regs[i] = devm_ioremap(info->dev, res->start,
+ resource_size(res));
+ if (!info->regs[i]) {
+ dev_err(info->dev,
+ "Memory resource %d could not be remapped\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sec_base_init(struct sec_dev_info *info,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ ret = sec_map_io(info, pdev);
+ if (ret)
+ return ret;
+
+ ret = sec_clk_en(info);
+ if (ret)
+ return ret;
+
+ ret = sec_reset_whole_module(info);
+ if (ret)
+ goto sec_clk_disable;
+
+ ret = sec_hw_init(info);
+ if (ret)
+ goto sec_clk_disable;
+
+ return 0;
+
+sec_clk_disable:
+ sec_clk_dis(info);
+
+ return ret;
+}
+
+static void sec_base_exit(struct sec_dev_info *info)
+{
+ sec_hw_exit(info);
+ sec_clk_dis(info);
+}
+
+#define SEC_Q_CMD_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)
+#define SEC_Q_CQ_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)
+#define SEC_Q_DB_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)
+
+static int sec_queue_res_cfg(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;
+ struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;
+ struct sec_queue_ring_db *ring_db = &queue->ring_db;
+ int ret;
+
+ ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
+ &ring_cmd->paddr,
+ GFP_KERNEL);
+ if (!ring_cmd->vaddr)
+ return -ENOMEM;
+
+ atomic_set(&ring_cmd->used, 0);
+ mutex_init(&ring_cmd->lock);
+ ring_cmd->callback = sec_alg_callback;
+
+ ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
+ &ring_cq->paddr,
+ GFP_KERNEL);
+ if (!ring_cq->vaddr) {
+ ret = -ENOMEM;
+ goto err_free_ring_cmd;
+ }
+
+ ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
+ &ring_db->paddr,
+ GFP_KERNEL);
+ if (!ring_db->vaddr) {
+ ret = -ENOMEM;
+ goto err_free_ring_cq;
+ }
+ queue->task_irq = platform_get_irq(to_platform_device(dev),
+ queue->queue_id * 2 + 1);
+ if (queue->task_irq <= 0) {
+ ret = -EINVAL;
+ goto err_free_ring_db;
+ }
+
+ return 0;
+
+err_free_ring_db:
+ dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+ queue->ring_db.paddr);
+err_free_ring_cq:
+ dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+ queue->ring_cq.paddr);
+err_free_ring_cmd:
+ dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+ queue->ring_cmd.paddr);
+
+ return ret;
+}
+
+static void sec_queue_free_ring_pages(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+
+ dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+ queue->ring_db.paddr);
+ dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+ queue->ring_cq.paddr);
+ dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+ queue->ring_cmd.paddr);
+}
+
+static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,
+ int queue_id)
+{
+ int ret;
+
+ sec_queue_base_init(info, queue, queue_id);
+
+ ret = sec_queue_res_cfg(queue);
+ if (ret)
+ return ret;
+
+ ret = sec_queue_map_io(queue);
+ if (ret) {
+ dev_err(info->dev, "Queue map failed %d\n", ret);
+ sec_queue_free_ring_pages(queue);
+ return ret;
+ }
+
+ sec_queue_hw_init(queue);
+
+ return 0;
+}
+
+static void sec_queue_unconfig(struct sec_dev_info *info,
+ struct sec_queue *queue)
+{
+ sec_queue_unmap_io(queue);
+ sec_queue_free_ring_pages(queue);
+}
+
+static int sec_id_alloc(struct sec_dev_info *info)
+{
+ int ret = 0;
+ int i;
+
+ mutex_lock(&sec_id_lock);
+
+ for (i = 0; i < SEC_MAX_DEVICES; i++)
+ if (!sec_devices[i])
+ break;
+ if (i == SEC_MAX_DEVICES) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ info->sec_id = i;
+ sec_devices[info->sec_id] = info;
+
+unlock:
+ mutex_unlock(&sec_id_lock);
+
+ return ret;
+}
+
+static void sec_id_free(struct sec_dev_info *info)
+{
+ mutex_lock(&sec_id_lock);
+ sec_devices[info->sec_id] = NULL;
+ mutex_unlock(&sec_id_lock);
+}
+
+static int sec_probe(struct platform_device *pdev)
+{
+ struct sec_dev_info *info;
+ struct device *dev = &pdev->dev;
+ int i, j;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(dev, "Failed to set 64 bit dma mask %d", ret);
+ return -ENODEV;
+ }
+
+ info = devm_kzalloc(dev, (sizeof(*info)), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ mutex_init(&info->dev_lock);
+
+ info->hw_sgl_pool = dmam_pool_create("sgl", dev,
+ sizeof(struct sec_hw_sgl), 64, 0);
+ if (!info->hw_sgl_pool) {
+ dev_err(dev, "Failed to create sec sgl dma pool\n");
+ return -ENOMEM;
+ }
+
+ ret = sec_base_init(info, pdev);
+ if (ret) {
+ dev_err(dev, "Base initialization fail! %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ ret = sec_queue_config(info, &info->queues[i], i);
+ if (ret)
+ goto queues_unconfig;
+
+ ret = sec_queue_irq_init(&info->queues[i]);
+ if (ret) {
+ sec_queue_unconfig(info, &info->queues[i]);
+ goto queues_unconfig;
+ }
+ }
+
+ ret = sec_algs_register();
+ if (ret) {
+ dev_err(dev, "Failed to register algorithms with crypto %d\n",
+ ret);
+ goto queues_unconfig;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ ret = sec_id_alloc(info);
+ if (ret)
+ goto algs_unregister;
+
+ return 0;
+
+algs_unregister:
+ sec_algs_unregister();
+queues_unconfig:
+ for (j = i - 1; j >= 0; j--) {
+ sec_queue_irq_uninit(&info->queues[j]);
+ sec_queue_unconfig(info, &info->queues[j]);
+ }
+ sec_base_exit(info);
+
+ return ret;
+}
+
+static int sec_remove(struct platform_device *pdev)
+{
+ struct sec_dev_info *info = platform_get_drvdata(pdev);
+ int i;
+
+ /* Unexpose as soon as possible, reuse during remove is fine */
+ sec_id_free(info);
+
+ sec_algs_unregister();
+
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ sec_queue_irq_uninit(&info->queues[i]);
+ sec_queue_unconfig(info, &info->queues[i]);
+ }
+
+ sec_base_exit(info);
+
+ return 0;
+}
+
+static const __maybe_unused struct of_device_id sec_match[] = {
+ { .compatible = "hisilicon,hip06-sec" },
+ { .compatible = "hisilicon,hip07-sec" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sec_match);
+
+static const __maybe_unused struct acpi_device_id sec_acpi_match[] = {
+ { "HISI02C1", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
+
+static struct platform_driver sec_driver = {
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .driver = {
+ .name = "hisi_sec_platform_driver",
+ .of_match_table = sec_match,
+ .acpi_match_table = ACPI_PTR(sec_acpi_match),
+ },
+};
+module_platform_driver(sec_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Security Accelerators");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
+MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
new file mode 100644
index 000000000000..2d2f186674ba
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef _SEC_DRV_H_
+#define _SEC_DRV_H_
+
+#include <crypto/algapi.h>
+#include <linux/kfifo.h>
+
+#define SEC_MAX_SGE_NUM 64
+#define SEC_HW_RING_NUM 3
+
+#define SEC_CMD_RING 0
+#define SEC_OUTORDER_RING 1
+#define SEC_DBG_RING 2
+
+/* A reasonable length to balance memory use against flexibility */
+#define SEC_QUEUE_LEN 512
+
+#define SEC_MAX_SGE_NUM 64
+
+struct sec_bd_info {
+#define SEC_BD_W0_T_LEN_M GENMASK(4, 0)
+#define SEC_BD_W0_T_LEN_S 0
+
+#define SEC_BD_W0_C_WIDTH_M GENMASK(6, 5)
+#define SEC_BD_W0_C_WIDTH_S 5
+#define SEC_C_WIDTH_AES_128BIT 0
+#define SEC_C_WIDTH_AES_8BIT 1
+#define SEC_C_WIDTH_AES_1BIT 2
+#define SEC_C_WIDTH_DES_64BIT 0
+#define SEC_C_WIDTH_DES_8BIT 1
+#define SEC_C_WIDTH_DES_1BIT 2
+
+#define SEC_BD_W0_C_MODE_M GENMASK(9, 7)
+#define SEC_BD_W0_C_MODE_S 7
+#define SEC_C_MODE_ECB 0
+#define SEC_C_MODE_CBC 1
+#define SEC_C_MODE_CTR 4
+#define SEC_C_MODE_CCM 5
+#define SEC_C_MODE_GCM 6
+#define SEC_C_MODE_XTS 7
+
+#define SEC_BD_W0_SEQ BIT(10)
+#define SEC_BD_W0_DE BIT(11)
+#define SEC_BD_W0_DAT_SKIP_M GENMASK(13, 12)
+#define SEC_BD_W0_DAT_SKIP_S 12
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_M GENMASK(17, 14)
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_S 14
+
+#define SEC_BD_W0_CIPHER_M GENMASK(19, 18)
+#define SEC_BD_W0_CIPHER_S 18
+#define SEC_CIPHER_NULL 0
+#define SEC_CIPHER_ENCRYPT 1
+#define SEC_CIPHER_DECRYPT 2
+
+#define SEC_BD_W0_AUTH_M GENMASK(21, 20)
+#define SEC_BD_W0_AUTH_S 20
+#define SEC_AUTH_NULL 0
+#define SEC_AUTH_MAC 1
+#define SEC_AUTH_VERIF 2
+
+#define SEC_BD_W0_AI_GEN BIT(22)
+#define SEC_BD_W0_CI_GEN BIT(23)
+#define SEC_BD_W0_NO_HPAD BIT(24)
+#define SEC_BD_W0_HM_M GENMASK(26, 25)
+#define SEC_BD_W0_HM_S 25
+#define SEC_BD_W0_ICV_OR_SKEY_EN_M GENMASK(28, 27)
+#define SEC_BD_W0_ICV_OR_SKEY_EN_S 27
+
+/* Multi purpose field - gran size bits for send, flag for recv */
+#define SEC_BD_W0_FLAG_M GENMASK(30, 29)
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_M GENMASK(30, 29)
+#define SEC_BD_W0_FLAG_S 29
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_S 29
+
+#define SEC_BD_W0_DONE BIT(31)
+ u32 w0;
+
+#define SEC_BD_W1_AUTH_GRAN_SIZE_M GENMASK(21, 0)
+#define SEC_BD_W1_AUTH_GRAN_SIZE_S 0
+#define SEC_BD_W1_M_KEY_EN BIT(22)
+#define SEC_BD_W1_BD_INVALID BIT(23)
+#define SEC_BD_W1_ADDR_TYPE BIT(24)
+
+#define SEC_BD_W1_A_ALG_M GENMASK(28, 25)
+#define SEC_BD_W1_A_ALG_S 25
+#define SEC_A_ALG_SHA1 0
+#define SEC_A_ALG_SHA256 1
+#define SEC_A_ALG_MD5 2
+#define SEC_A_ALG_SHA224 3
+#define SEC_A_ALG_HMAC_SHA1 8
+#define SEC_A_ALG_HMAC_SHA224 10
+#define SEC_A_ALG_HMAC_SHA256 11
+#define SEC_A_ALG_HMAC_MD5 12
+#define SEC_A_ALG_AES_XCBC 13
+#define SEC_A_ALG_AES_CMAC 14
+
+#define SEC_BD_W1_C_ALG_M GENMASK(31, 29)
+#define SEC_BD_W1_C_ALG_S 29
+#define SEC_C_ALG_DES 0
+#define SEC_C_ALG_3DES 1
+#define SEC_C_ALG_AES 2
+
+ u32 w1;
+
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_M GENMASK(15, 0)
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_S 0
+#define SEC_BD_W2_GRAN_NUM_M GENMASK(31, 16)
+#define SEC_BD_W2_GRAN_NUM_S 16
+ u32 w2;
+
+#define SEC_BD_W3_AUTH_LEN_OFFSET_M GENMASK(9, 0)
+#define SEC_BD_W3_AUTH_LEN_OFFSET_S 0
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_M GENMASK(19, 10)
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_S 10
+#define SEC_BD_W3_MAC_LEN_M GENMASK(24, 20)
+#define SEC_BD_W3_MAC_LEN_S 20
+#define SEC_BD_W3_A_KEY_LEN_M GENMASK(29, 25)
+#define SEC_BD_W3_A_KEY_LEN_S 25
+#define SEC_BD_W3_C_KEY_LEN_M GENMASK(31, 30)
+#define SEC_BD_W3_C_KEY_LEN_S 30
+#define SEC_KEY_LEN_AES_128 0
+#define SEC_KEY_LEN_AES_192 1
+#define SEC_KEY_LEN_AES_256 2
+#define SEC_KEY_LEN_DES 1
+#define SEC_KEY_LEN_3DES_3_KEY 1
+#define SEC_KEY_LEN_3DES_2_KEY 3
+ u32 w3;
+
+ /* W4,5 */
+ union {
+ u32 authkey_addr_lo;
+ u32 authiv_addr_lo;
+ };
+ union {
+ u32 authkey_addr_hi;
+ u32 authiv_addr_hi;
+ };
+
+ /* W6,7 */
+ u32 cipher_key_addr_lo;
+ u32 cipher_key_addr_hi;
+
+ /* W8,9 */
+ u32 cipher_iv_addr_lo;
+ u32 cipher_iv_addr_hi;
+
+ /* W10,11 */
+ u32 data_addr_lo;
+ u32 data_addr_hi;
+
+ /* W12,13 */
+ u32 mac_addr_lo;
+ u32 mac_addr_hi;
+
+ /* W14,15 */
+ u32 cipher_destin_addr_lo;
+ u32 cipher_destin_addr_hi;
+};
+
+enum sec_mem_region {
+ SEC_COMMON = 0,
+ SEC_SAA,
+ SEC_NUM_ADDR_REGIONS
+};
+
+#define SEC_NAME_SIZE 64
+#define SEC_Q_NUM 16
+
+
+/**
+ * struct sec_queue_ring_cmd - store information about a SEC HW cmd ring
+ * @used: Local counter used to cheaply establish if the ring is empty.
+ * @lock: Protect against simultaneous adjusting of the read and write pointers.
+ * @vaddr: Virtual address for the ram pages used for the ring.
+ * @paddr: Physical address of the dma mapped region of ram used for the ring.
+ * @callback: Callback function called on a ring element completing.
+ */
+struct sec_queue_ring_cmd {
+ atomic_t used;
+ struct mutex lock;
+ struct sec_bd_info *vaddr;
+ dma_addr_t paddr;
+ void (*callback)(struct sec_bd_info *resp, void *ctx);
+};
+
+struct sec_debug_bd_info;
+struct sec_queue_ring_db {
+ struct sec_debug_bd_info *vaddr;
+ dma_addr_t paddr;
+};
+
+struct sec_out_bd_info;
+struct sec_queue_ring_cq {
+ struct sec_out_bd_info *vaddr;
+ dma_addr_t paddr;
+};
+
+struct sec_dev_info;
+
+enum sec_cipher_alg {
+ SEC_C_DES_ECB_64,
+ SEC_C_DES_CBC_64,
+
+ SEC_C_3DES_ECB_192_3KEY,
+ SEC_C_3DES_ECB_192_2KEY,
+
+ SEC_C_3DES_CBC_192_3KEY,
+ SEC_C_3DES_CBC_192_2KEY,
+
+ SEC_C_AES_ECB_128,
+ SEC_C_AES_ECB_192,
+ SEC_C_AES_ECB_256,
+
+ SEC_C_AES_CBC_128,
+ SEC_C_AES_CBC_192,
+ SEC_C_AES_CBC_256,
+
+ SEC_C_AES_CTR_128,
+ SEC_C_AES_CTR_192,
+ SEC_C_AES_CTR_256,
+
+ SEC_C_AES_XTS_128,
+ SEC_C_AES_XTS_256,
+
+ SEC_C_NULL,
+};
+
+/**
+ * struct sec_alg_tfm_ctx - hardware specific tranformation context
+ * @cipher_alg: Cipher algorithm enabled include encryption mode.
+ * @key: Key storage if required.
+ * @pkey: DMA address for the key storage.
+ * @req_template: Request template to save time on setup.
+ * @queue: The hardware queue associated with this tfm context.
+ * @lock: Protect key and pkey to ensure they are consistent
+ * @auth_buf: Current context buffer for auth operations.
+ * @backlog: The backlog queue used for cases where our buffers aren't
+ * large enough.
+ */
+struct sec_alg_tfm_ctx {
+ enum sec_cipher_alg cipher_alg;
+ u8 *key;
+ dma_addr_t pkey;
+ struct sec_bd_info req_template;
+ struct sec_queue *queue;
+ struct mutex lock;
+ u8 *auth_buf;
+ struct list_head backlog;
+};
+
+/**
+ * struct sec_request - data associate with a single crypto request
+ * @elements: List of subparts of this request (hardware size restriction)
+ * @num_elements: The number of subparts (used as an optimization)
+ * @lock: Protect elements of this structure against concurrent change.
+ * @tfm_ctx: hardware specific context.
+ * @len_in: length of in sgl from upper layers
+ * @len_out: length of out sgl from upper layers
+ * @dma_iv: initialization vector - phsyical address
+ * @err: store used to track errors across subelements of this request.
+ * @req_base: pointer to base element of associate crypto context.
+ * This is needed to allow shared handling skcipher, ahash etc.
+ * @cb: completion callback.
+ * @backlog_head: list head to allow backlog maintenance.
+ *
+ * The hardware is limited in the maximum size of data that it can
+ * process from a single BD. Typically this is fairly large (32MB)
+ * but still requires the complexity of splitting the incoming
+ * skreq up into a number of elements complete with appropriate
+ * iv chaining.
+ */
+struct sec_request {
+ struct list_head elements;
+ int num_elements;
+ struct mutex lock;
+ struct sec_alg_tfm_ctx *tfm_ctx;
+ int len_in;
+ int len_out;
+ dma_addr_t dma_iv;
+ int err;
+ struct crypto_async_request *req_base;
+ void (*cb)(struct sec_bd_info *resp, struct crypto_async_request *req);
+ struct list_head backlog_head;
+};
+
+/**
+ * struct sec_request_el - A subpart of a request.
+ * @head: allow us to attach this to the list in the sec_request
+ * @req: hardware block descriptor corresponding to this request subpart
+ * @in: hardware sgl for input - virtual address
+ * @dma_in: hardware sgl for input - physical address
+ * @sgl_in: scatterlist for this request subpart
+ * @out: hardware sgl for output - virtual address
+ * @dma_out: hardware sgl for output - physical address
+ * @sgl_out: scatterlist for this request subpart
+ * @sec_req: The request which this subpart forms a part of
+ * @el_length: Number of bytes in this subpart. Needed to locate
+ * last ivsize chunk for iv chaining.
+ */
+struct sec_request_el {
+ struct list_head head;
+ struct sec_bd_info req;
+ struct sec_hw_sgl *in;
+ dma_addr_t dma_in;
+ struct scatterlist *sgl_in;
+ struct sec_hw_sgl *out;
+ dma_addr_t dma_out;
+ struct scatterlist *sgl_out;
+ struct sec_request *sec_req;
+ size_t el_length;
+};
+
+/**
+ * struct sec_queue - All the information about a HW queue
+ * @dev_info: The parent SEC device to which this queue belongs.
+ * @task_irq: Completion interrupt for the queue.
+ * @name: Human readable queue description also used as irq name.
+ * @ring: The several HW rings associated with one queue.
+ * @regs: The iomapped device registers
+ * @queue_id: Index of the queue used for naming and resource selection.
+ * @in_use: Flag to say if the queue is in use.
+ * @expected: The next expected element to finish assuming we were in order.
+ * @uprocessed: A bitmap to track which OoO elements are done but not handled.
+ * @softqueue: A software queue used when chaining requirements prevent direct
+ * use of the hardware queues.
+ * @havesoftqueue: A flag to say we have a queues - as we may need one for the
+ * current mode.
+ * @queuelock: Protect the soft queue from concurrent changes to avoid some
+ * potential loss of data races.
+ * @shadow: Pointers back to the shadow copy of the hardware ring element
+ * need because we can't store any context reference in the bd element.
+ */
+struct sec_queue {
+ struct sec_dev_info *dev_info;
+ int task_irq;
+ char name[SEC_NAME_SIZE];
+ struct sec_queue_ring_cmd ring_cmd;
+ struct sec_queue_ring_cq ring_cq;
+ struct sec_queue_ring_db ring_db;
+ void __iomem *regs;
+ u32 queue_id;
+ bool in_use;
+ int expected;
+
+ DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
+ DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
+ bool havesoftqueue;
+ struct mutex queuelock;
+ void *shadow[SEC_QUEUE_LEN];
+};
+
+/**
+ * struct sec_hw_sge: Track each of the 64 element SEC HW SGL entries
+ * @buf: The IOV dma address for this entry.
+ * @len: Length of this IOV.
+ * @pad: Reserved space.
+ */
+struct sec_hw_sge {
+ dma_addr_t buf;
+ unsigned int len;
+ unsigned int pad;
+};
+
+/**
+ * struct sec_hw_sgl: One hardware SGL entry.
+ * @next_sgl: The next entry if we need to chain dma address. Null if last.
+ * @entry_sum_in_chain: The full count of SGEs - only matters for first SGL.
+ * @entry_sum_in_sgl: The number of SGEs in this SGL element.
+ * @flag: Unused in skciphers.
+ * @serial_num: Unsued in skciphers.
+ * @cpuid: Currently unused.
+ * @data_bytes_in_sgl: Count of bytes from all SGEs in this SGL.
+ * @next: Virtual address used to stash the next sgl - useful in completion.
+ * @reserved: A reserved field not currently used.
+ * @sge_entries: The (up to) 64 Scatter Gather Entries, representing IOVs.
+ * @node: Currently unused.
+ */
+struct sec_hw_sgl {
+ dma_addr_t next_sgl;
+ u16 entry_sum_in_chain;
+ u16 entry_sum_in_sgl;
+ u32 flag;
+ u64 serial_num;
+ u32 cpuid;
+ u32 data_bytes_in_sgl;
+ struct sec_hw_sgl *next;
+ u64 reserved;
+ struct sec_hw_sge sge_entries[SEC_MAX_SGE_NUM];
+ u8 node[16];
+};
+
+struct dma_pool;
+
+/**
+ * struct sec_dev_info: The full SEC unit comprising queues and processors.
+ * @sec_id: Index used to track which SEC this is when more than one is present.
+ * @num_saas: The number of backed processors enabled.
+ * @regs: iomapped register regions shared by whole SEC unit.
+ * @dev_lock: Protects concurrent queue allocation / freeing for the SEC.
+ * @queues: The 16 queues that this SEC instance provides.
+ * @dev: Device pointer.
+ * @hw_sgl_pool: DMA pool used to mimise mapping for the scatter gather lists.
+ */
+struct sec_dev_info {
+ int sec_id;
+ int num_saas;
+ void __iomem *regs[SEC_NUM_ADDR_REGIONS];
+ struct mutex dev_lock;
+ int queues_in_use;
+ struct sec_queue queues[SEC_Q_NUM];
+ struct device *dev;
+ struct dma_pool *hw_sgl_pool;
+};
+
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx);
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num);
+int sec_queue_stop_release(struct sec_queue *queue);
+struct sec_queue *sec_queue_alloc_start_safe(void);
+bool sec_queue_empty(struct sec_queue *queue);
+
+/* Algorithm specific elements from sec_algs.c */
+void sec_alg_callback(struct sec_bd_info *resp, void *ctx);
+int sec_algs_register(void);
+void sec_algs_unregister(void);
+
+#endif /* _SEC_DRV_H_ */
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4e86f864a952..7e71043457a6 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
@@ -33,7 +30,19 @@ MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
u32 val, htable_offset;
- int i;
+ int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
+
+ if (priv->version == EIP197B) {
+ cs_rc_max = EIP197B_CS_RC_MAX;
+ cs_ht_wc = EIP197B_CS_HT_WC;
+ cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
+ cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
+ } else {
+ cs_rc_max = EIP197D_CS_RC_MAX;
+ cs_ht_wc = EIP197D_CS_HT_WC;
+ cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
+ cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
+ }
/* Enable the record cache memory access */
val = readl(priv->base + EIP197_CS_RAM_CTRL);
@@ -54,7 +63,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Clear all records */
- for (i = 0; i < EIP197_CS_RC_MAX; i++) {
+ for (i = 0; i < cs_rc_max; i++) {
u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
@@ -64,14 +73,14 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
if (i == 0)
val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
- else if (i == EIP197_CS_RC_MAX - 1)
+ else if (i == cs_rc_max - 1)
val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
writel(val, priv->base + offset + sizeof(u32));
}
/* Clear the hash table entries */
- htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
- for (i = 0; i < 64; i++)
+ htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
+ for (i = 0; i < cs_ht_wc; i++)
writel(GENMASK(29, 0),
priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
@@ -82,23 +91,23 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
/* Write head and tail pointers of the record free chain */
val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
- EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
+ EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
writel(val, priv->base + EIP197_TRC_FREECHAIN);
/* Configure the record cache #1 */
- val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
- EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
+ val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
+ EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
writel(val, priv->base + EIP197_TRC_PARAMS2);
/* Configure the record cache #2 */
- val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
+ val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
EIP197_TRC_PARAMS_HTABLE_SZ(2);
writel(val, priv->base + EIP197_TRC_PARAMS);
}
static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
- const struct firmware *fw, u32 ctrl,
+ const struct firmware *fw, int pe, u32 ctrl,
u32 prog_en)
{
const u32 *data = (const u32 *)fw->data;
@@ -112,7 +121,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -120,7 +129,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* Release engine from reset */
val = readl(EIP197_PE(priv) + ctrl);
@@ -132,35 +141,62 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
{
const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
const struct firmware *fw[FW_NB];
- int i, j, ret = 0;
+ char fw_path[31], *dir = NULL;
+ int i, j, ret = 0, pe;
u32 val;
+ switch (priv->version) {
+ case EIP197B:
+ dir = "eip197b";
+ break;
+ case EIP197D:
+ dir = "eip197d";
+ break;
+ default:
+ /* No firmware is required */
+ return 0;
+ }
+
for (i = 0; i < FW_NB; i++) {
- ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+ snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
+ ret = request_firmware(&fw[i], fw_path, priv->dev);
if (ret) {
- dev_err(priv->dev,
- "Failed to request firmware %s (%d)\n",
- fw_name[i], ret);
- goto release_fw;
- }
- }
-
- /* Clear the scratchpad memory */
- val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
- EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
- EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
- EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
+ if (priv->version != EIP197B)
+ goto release_fw;
- memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
- EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
-
- eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
- EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+ /* Fallback to the old firmware location for the
+ * EIP197b.
+ */
+ ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+ if (ret) {
+ dev_err(priv->dev,
+ "Failed to request firmware %s (%d)\n",
+ fw_name[i], ret);
+ goto release_fw;
+ }
+ }
+ }
- eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
- EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Clear the scratchpad memory */
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+ val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
+ EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
+ EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
+ EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+
+ memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
+ EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+
+ eip197_write_firmware(priv, fw[FW_IFPP], pe,
+ EIP197_PE_ICE_FPP_CTRL(pe),
+ EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+
+ eip197_write_firmware(priv, fw[FW_IPUE], pe,
+ EIP197_PE_ICE_PUE_CTRL(pe),
+ EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+ }
release_fw:
for (j = 0; j < i; j++)
@@ -256,7 +292,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
u32 version, val;
- int i, ret;
+ int i, ret, pe;
/* Determine endianess and configure byte swap */
version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
@@ -267,6 +303,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
+ /* For EIP197 set maximum number of TX commands to 2^5 = 32 */
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
+
writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
@@ -282,82 +322,94 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Clear any pending interrupt */
writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- /* Data Fetch Engine configuration */
-
- /* Reset all DFE threads */
- writel(EIP197_DxE_THR_CTRL_RESET_PE,
- EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
- if (priv->version == EIP197) {
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
- }
-
- /* DMA transfer size to use */
- val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
- val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
- val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
- val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
-
- /* Leave the DFE threads reset state */
- writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
- /* Configure the procesing engine thresholds */
- writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
- writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
-
- if (priv->version == EIP197) {
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN |
- GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
- }
-
- /* Data Store Engine configuration */
-
- /* Reset all DSE threads */
- writel(EIP197_DxE_THR_CTRL_RESET_PE,
- EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
-
- /* Wait for all DSE threads to complete */
- while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
- GENMASK(15, 12)) != GENMASK(15, 12))
- ;
-
- /* DMA transfer size to use */
- val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
- val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
- val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
- /* FIXME: instability issues can occur for EIP97 but disabling it impact
- * performances.
- */
- if (priv->version == EIP197)
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
+ /* Processing Engine configuration */
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Data Fetch Engine configuration */
- /* Leave the DSE threads reset state */
- writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
+ /* Reset all DFE threads */
+ writel(EIP197_DxE_THR_CTRL_RESET_PE,
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- /* Configure the procesing engine thresholds */
- writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
+ if (priv->version == EIP197B || priv->version == EIP197D) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+ }
- /* Processing Engine configuration */
+ /* DMA transfer size to use */
+ val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
+ val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
+ EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
+ val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+ val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
+
+ /* Leave the DFE threads reset state */
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+ /* Configure the processing engine thresholds */
+ writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+ EIP197_PE_IN_xBUF_THRES_MAX(9),
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
+ writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+ EIP197_PE_IN_xBUF_THRES_MAX(7),
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
+
+ if (priv->version == EIP197B || priv->version == EIP197D) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+ }
- /* H/W capabilities selection */
- val = EIP197_FUNCTION_RSVD;
- val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
- val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
- val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
- val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
- val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
- writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
+ /* Data Store Engine configuration */
+
+ /* Reset all DSE threads */
+ writel(EIP197_DxE_THR_CTRL_RESET_PE,
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+ /* Wait for all DSE threads to complete */
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
+ GENMASK(15, 12)) != GENMASK(15, 12))
+ ;
+
+ /* DMA transfer size to use */
+ val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+ val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+ val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
+
+ /* Leave the DSE threads reset state */
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+ /* Configure the procesing engine thresholds */
+ writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
+ EIP197_PE_OUT_DBUF_THRES_MAX(8),
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
+
+ /* Processing Engine configuration */
+
+ /* H/W capabilities selection */
+ val = EIP197_FUNCTION_RSVD;
+ val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
+ val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
+ val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
+ val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
+ val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
+ val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
+ val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
+ val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
+ }
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
@@ -408,18 +460,20 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
- /* Enable command descriptor rings */
- writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Enable command descriptor rings */
+ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- /* Enable result descriptor rings */
- writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
+ /* Enable result descriptor rings */
+ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+ }
/* Clear any HIA interrupt */
writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- if (priv->version == EIP197) {
+ if (priv->version == EIP197B || priv->version == EIP197D) {
eip197_trc_cache_init(priv);
ret = eip197_load_firmwares(priv);
@@ -452,7 +506,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx;
- struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
/* If a request wasn't properly dequeued because of a lack of resources,
@@ -476,16 +529,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
}
handle_req:
- request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request)
- goto request_failed;
-
ctx = crypto_tfm_ctx(req->tfm);
- ret = ctx->send(req, ring, request, &commands, &results);
- if (ret) {
- kfree(request);
+ ret = ctx->send(req, ring, &commands, &results);
+ if (ret)
goto request_failed;
- }
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -494,14 +541,8 @@ handle_req:
* to the engine because the input data was cached, continue to
* dequeue other requests as this is valid and not an error.
*/
- if (!commands && !results) {
- kfree(request);
+ if (!commands && !results)
continue;
- }
-
- spin_lock_bh(&priv->ring[ring].egress_lock);
- list_add_tail(&request->list, &priv->ring[ring].list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
cdesc += commands;
rdesc += results;
@@ -519,7 +560,7 @@ finalize:
if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests += nreq;
@@ -528,7 +569,7 @@ finalize:
priv->ring[ring].busy = true;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
@@ -560,6 +601,24 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
return -EINVAL;
}
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc,
+ struct crypto_async_request *req)
+{
+ int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+ priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+ int i = safexcel_ring_first_rdr_index(priv, ring);
+
+ return priv->ring[ring].rdr_req[i];
+}
+
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
@@ -588,21 +647,16 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
- dma_addr_t ctxr_dma, int ring,
- struct safexcel_request *request)
+ dma_addr_t ctxr_dma, int ring)
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
int ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* Prepare command descriptor */
cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
- if (IS_ERR(cdesc)) {
- ret = PTR_ERR(cdesc);
- goto unlock;
- }
+ if (IS_ERR(cdesc))
+ return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
@@ -617,21 +671,20 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
goto cdesc_rollback;
}
- request->req = async;
- goto unlock;
+ safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+ return ret;
cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-unlock:
- spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
int ring)
{
- struct safexcel_request *sreq;
+ struct crypto_async_request *req;
struct safexcel_context *ctx;
int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete;
@@ -646,28 +699,22 @@ handle_results:
goto requests_left;
for (i = 0; i < nreq; i++) {
- spin_lock_bh(&priv->ring[ring].egress_lock);
- sreq = list_first_entry(&priv->ring[ring].list,
- struct safexcel_request, list);
- list_del(&sreq->list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- ctx = crypto_tfm_ctx(sreq->req->tfm);
- ndesc = ctx->handle_result(priv, ring, sreq->req,
+ req = safexcel_rdr_req_get(priv, ring);
+
+ ctx = crypto_tfm_ctx(req->tfm);
+ ndesc = ctx->handle_result(priv, ring, req,
&should_complete, &ret);
if (ndesc < 0) {
- kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
goto acknowledge;
}
if (should_complete) {
local_bh_disable();
- sreq->req->complete(sreq->req, ret);
+ req->complete(req, ret);
local_bh_enable();
}
- kfree(sreq);
tot_descs += ndesc;
handled++;
}
@@ -686,7 +733,7 @@ acknowledge:
goto handle_results;
requests_left:
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests -= handled;
safexcel_try_push_requests(priv, ring);
@@ -694,7 +741,7 @@ requests_left:
if (!priv->ring[ring].requests)
priv->ring[ring].busy = false;
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
}
static void safexcel_dequeue_work(struct work_struct *work)
@@ -785,17 +832,29 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
}
static struct safexcel_alg_template *safexcel_algs[] = {
+ &safexcel_alg_ecb_des,
+ &safexcel_alg_cbc_des,
+ &safexcel_alg_ecb_des3_ede,
+ &safexcel_alg_cbc_des3_ede,
&safexcel_alg_ecb_aes,
&safexcel_alg_cbc_aes,
+ &safexcel_alg_md5,
&safexcel_alg_sha1,
&safexcel_alg_sha224,
&safexcel_alg_sha256,
+ &safexcel_alg_sha384,
+ &safexcel_alg_sha512,
+ &safexcel_alg_hmac_md5,
&safexcel_alg_hmac_sha1,
&safexcel_alg_hmac_sha224,
&safexcel_alg_hmac_sha256,
+ &safexcel_alg_hmac_sha384,
+ &safexcel_alg_hmac_sha512,
&safexcel_alg_authenc_hmac_sha1_cbc_aes,
&safexcel_alg_authenc_hmac_sha224_cbc_aes,
&safexcel_alg_authenc_hmac_sha256_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha384_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha512_cbc_aes,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -805,6 +864,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
safexcel_algs[i]->priv = priv;
+ if (!(safexcel_algs[i]->engines & priv->version))
+ continue;
+
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -820,6 +882,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
fail:
for (j = 0; j < i; j++) {
+ if (!(safexcel_algs[j]->engines & priv->version))
+ continue;
+
if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -836,6 +901,9 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
int i;
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+ if (!(safexcel_algs[i]->engines & priv->version))
+ continue;
+
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -847,9 +915,21 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
- u32 val, mask;
+ u32 val, mask = 0;
val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+
+ /* Read number of PEs from the engine */
+ switch (priv->version) {
+ case EIP197B:
+ case EIP197D:
+ mask = EIP197_N_PES_MASK;
+ break;
+ default:
+ mask = EIP97_N_PES_MASK;
+ }
+ priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
@@ -867,7 +947,9 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
{
struct safexcel_register_offsets *offsets = &priv->offsets;
- if (priv->version == EIP197) {
+ switch (priv->version) {
+ case EIP197B:
+ case EIP197D:
offsets->hia_aic = EIP197_HIA_AIC_BASE;
offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
@@ -878,7 +960,8 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
offsets->pe = EIP197_PE_BASE;
- } else {
+ break;
+ case EIP97IES:
offsets->hia_aic = EIP97_HIA_AIC_BASE;
offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
@@ -889,6 +972,7 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
offsets->pe = EIP97_PE_BASE;
+ break;
}
}
@@ -906,6 +990,9 @@ static int safexcel_probe(struct platform_device *pdev)
priv->dev = dev;
priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ priv->flags |= EIP197_TRC_CACHE;
+
safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -957,6 +1044,13 @@ static int safexcel_probe(struct platform_device *pdev)
safexcel_configure(priv);
+ priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+ GFP_KERNEL);
+ if (!priv->ring) {
+ ret = -ENOMEM;
+ goto err_reg_clk;
+ }
+
for (i = 0; i < priv->config.rings; i++) {
char irq_name[6] = {0}; /* "ringX\0" */
char wq_name[9] = {0}; /* "wq_ringX\0" */
@@ -969,6 +1063,14 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret)
goto err_reg_clk;
+ priv->ring[i].rdr_req = devm_kzalloc(dev,
+ sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+ GFP_KERNEL);
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_reg_clk;
+ }
+
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
if (!ring_irq) {
ret = -ENOMEM;
@@ -1004,9 +1106,7 @@ static int safexcel_probe(struct platform_device *pdev)
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
- INIT_LIST_HEAD(&priv->ring[i].list);
spin_lock_init(&priv->ring[i].lock);
- spin_lock_init(&priv->ring[i].egress_lock);
spin_lock_init(&priv->ring[i].queue_lock);
}
@@ -1034,6 +1134,24 @@ err_core_clk:
return ret;
}
+static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->config.rings; i++) {
+ /* clear any pending interrupt */
+ writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+ writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+ /* Reset the CDR base address */
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+ /* Reset the RDR base address */
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ }
+}
static int safexcel_remove(struct platform_device *pdev)
{
@@ -1041,6 +1159,8 @@ static int safexcel_remove(struct platform_device *pdev)
int i;
safexcel_unregister_algorithms(priv);
+ safexcel_hw_reset_rings(priv);
+
clk_disable_unprepare(priv->clk);
for (i = 0; i < priv->config.rings; i++)
@@ -1051,12 +1171,26 @@ static int safexcel_remove(struct platform_device *pdev)
static const struct of_device_id safexcel_of_match_table[] = {
{
+ .compatible = "inside-secure,safexcel-eip97ies",
+ .data = (void *)EIP97IES,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197b",
+ .data = (void *)EIP197B,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197d",
+ .data = (void *)EIP197D,
+ },
+ {
+ /* Deprecated. Kept for backward compatibility. */
.compatible = "inside-secure,safexcel-eip97",
- .data = (void *)EIP97,
+ .data = (void *)EIP97IES,
},
{
+ /* Deprecated. Kept for backward compatibility. */
.compatible = "inside-secure,safexcel-eip197",
- .data = (void *)EIP197,
+ .data = (void *)EIP197B,
},
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 8b3ee9b59f53..65624a81f0fd 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __SAFEXCEL_H__
@@ -95,13 +92,13 @@
#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x0000
-#define EIP197_HIA_DFE_THR_CTRL 0x0000
-#define EIP197_HIA_DFE_THR_STAT 0x0004
-#define EIP197_HIA_DSE_CFG 0x0000
-#define EIP197_HIA_DSE_THR_CTRL 0x0000
-#define EIP197_HIA_DSE_THR_STAT 0x0004
-#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_DFE_CFG(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_CTRL(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_STAT(n) (0x0004 + (128 * (n)))
+#define EIP197_HIA_DSE_CFG(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_CTRL(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_STAT(n) (0x0004 + (128 * (n)))
+#define EIP197_HIA_RA_PE_CTRL(n) (0x0010 + (8 * (n)))
#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
@@ -114,18 +111,18 @@
#define EIP197_HIA_MST_CTRL 0xfff4
#define EIP197_HIA_OPTIONS 0xfff8
#define EIP197_HIA_VERSION 0xfffc
-#define EIP197_PE_IN_DBUF_THRES 0x0000
-#define EIP197_PE_IN_TBUF_THRES 0x0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
-#define EIP197_PE_ICE_PUE_CTRL 0x0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
-#define EIP197_PE_ICE_FPP_CTRL 0x0d80
-#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
-#define EIP197_PE_OUT_DBUF_THRES 0x1c00
-#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_PE_IN_DBUF_THRES(n) (0x0000 + (0x2000 * (n)))
+#define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
+#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
+#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
+#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
#define EIP197_MST_CTRL 0xfff4
/* EIP197-specific registers, no indirection */
@@ -184,6 +181,11 @@
#define EIP197_HIA_RA_PE_CTRL_RESET BIT(31)
#define EIP197_HIA_RA_PE_CTRL_EN BIT(30)
+/* EIP197_HIA_OPTIONS */
+#define EIP197_N_PES_OFFSET 4
+#define EIP197_N_PES_MASK GENMASK(4, 0)
+#define EIP97_N_PES_MASK GENMASK(2, 0)
+
/* EIP197_HIA_AIC_R_ENABLE_CTRL */
#define EIP197_CDR_IRQ(n) BIT((n) * 2)
#define EIP197_RDR_IRQ(n) BIT((n) * 2 + 1)
@@ -217,6 +219,7 @@
#define WR_CACHE_4BITS (WR_CACHE_3BITS << 1 | BIT(0))
#define EIP197_MST_CTRL_RD_CACHE(n) (((n) & 0xf) << 0)
#define EIP197_MST_CTRL_WD_CACHE(n) (((n) & 0xf) << 4)
+#define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20)
#define EIP197_MST_CTRL_BYTE_SWAP BIT(24)
#define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25)
@@ -287,7 +290,7 @@ struct safexcel_context_record {
u32 control0;
u32 control1;
- __le32 data[24];
+ __le32 data[40];
} __packed;
/* control0 */
@@ -305,14 +308,19 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_NO_FINISH_HASH BIT(5)
#define CONTEXT_CONTROL_SIZE(n) ((n) << 8)
#define CONTEXT_CONTROL_KEY_EN BIT(16)
+#define CONTEXT_CONTROL_CRYPTO_ALG_DES (0x0 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_3DES (0x2 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17)
#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21)
#define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21)
+#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23)
#define CONTEXT_CONTROL_INV_FR (0x5 << 24)
#define CONTEXT_CONTROL_INV_TR (0x6 << 24)
@@ -327,6 +335,11 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
#define CONTEXT_CONTROL_HASH_STORE BIT(19)
+/* The hash counter given to the engine in the context has a granularity of
+ * 64 bits.
+ */
+#define EIP197_COUNTER_BLOCK_SIZE 64
+
/* EIP197_CS_RAM_CTRL */
#define EIP197_TRC_ENABLE_0 BIT(4)
#define EIP197_TRC_ENABLE_1 BIT(5)
@@ -349,13 +362,19 @@ struct safexcel_context_record {
#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18)
/* Cache helpers */
-#define EIP197_CS_RC_MAX 52
+#define EIP197B_CS_RC_MAX 52
+#define EIP197D_CS_RC_MAX 96
#define EIP197_CS_RC_SIZE (4 * sizeof(u32))
#define EIP197_CS_RC_NEXT(x) (x)
#define EIP197_CS_RC_PREV(x) ((x) << 10)
#define EIP197_RC_NULL 0x3ff
-#define EIP197_CS_TRC_REC_WC 59
-#define EIP197_CS_TRC_LG_REC_WC 73
+#define EIP197B_CS_TRC_REC_WC 59
+#define EIP197D_CS_TRC_REC_WC 64
+#define EIP197B_CS_TRC_LG_REC_WC 73
+#define EIP197D_CS_TRC_LG_REC_WC 80
+#define EIP197B_CS_HT_WC 64
+#define EIP197D_CS_HT_WC 256
+
/* Result data */
struct result_data_desc {
@@ -450,6 +469,7 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
#define EIP197_OPTION_64BIT_CTX BIT(1)
#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
+#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
#define EIP197_TYPE_EXTENDED 0x3
@@ -480,7 +500,7 @@ enum eip197_fw {
FW_NB
};
-struct safexcel_ring {
+struct safexcel_desc_ring {
void *base;
void *base_end;
dma_addr_t base_dma;
@@ -489,8 +509,7 @@ struct safexcel_ring {
void *write;
void *read;
- /* number of elements used in the ring */
- unsigned nr;
+ /* descriptor element offset */
unsigned offset;
};
@@ -500,12 +519,8 @@ enum safexcel_alg_type {
SAFEXCEL_ALG_TYPE_AHASH,
};
-struct safexcel_request {
- struct list_head list;
- struct crypto_async_request *req;
-};
-
struct safexcel_config {
+ u32 pes;
u32 rings;
u32 cd_size;
@@ -521,9 +536,40 @@ struct safexcel_work_data {
int ring;
};
+struct safexcel_ring {
+ spinlock_t lock;
+
+ struct workqueue_struct *workqueue;
+ struct safexcel_work_data work_data;
+
+ /* command/result rings */
+ struct safexcel_desc_ring cdr;
+ struct safexcel_desc_ring rdr;
+
+ /* result ring crypto API request */
+ struct crypto_async_request **rdr_req;
+
+ /* queue */
+ struct crypto_queue queue;
+ spinlock_t queue_lock;
+
+ /* Number of requests in the engine. */
+ int requests;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
+};
+
enum safexcel_eip_version {
- EIP97,
- EIP197,
+ EIP97IES = BIT(0),
+ EIP197B = BIT(1),
+ EIP197D = BIT(2),
};
struct safexcel_register_offsets {
@@ -539,6 +585,10 @@ struct safexcel_register_offsets {
u32 pe;
};
+enum safexcel_flags {
+ EIP197_TRC_CACHE = BIT(0),
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
@@ -548,46 +598,19 @@ struct safexcel_crypto_priv {
enum safexcel_eip_version version;
struct safexcel_register_offsets offsets;
+ u32 flags;
/* context DMA pool */
struct dma_pool *context_pool;
atomic_t ring_used;
- struct {
- spinlock_t lock;
- spinlock_t egress_lock;
-
- struct list_head list;
- struct workqueue_struct *workqueue;
- struct safexcel_work_data work_data;
-
- /* command/result rings */
- struct safexcel_ring cdr;
- struct safexcel_ring rdr;
-
- /* queue */
- struct crypto_queue queue;
- spinlock_t queue_lock;
-
- /* Number of requests in the engine. */
- int requests;
-
- /* The ring is currently handling at least one request */
- bool busy;
-
- /* Store for current requests when bailing out of the dequeueing
- * function when no enough resources are available.
- */
- struct crypto_async_request *req;
- struct crypto_async_request *backlog;
- } ring[EIP197_MAX_RINGS];
+ struct safexcel_ring *ring;
};
struct safexcel_context {
int (*send)(struct crypto_async_request *req, int ring,
- struct safexcel_request *request, int *commands,
- int *results);
+ int *commands, int *results);
int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *req, bool *complete,
int *ret);
@@ -600,13 +623,13 @@ struct safexcel_context {
};
struct safexcel_ahash_export_state {
- u64 len;
- u64 processed;
+ u64 len[2];
+ u64 processed[2];
u32 digest;
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
- u8 cache[SHA256_BLOCK_SIZE];
+ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u8 cache[SHA512_BLOCK_SIZE];
};
/*
@@ -617,6 +640,7 @@ struct safexcel_ahash_export_state {
struct safexcel_alg_template {
struct safexcel_crypto_priv *priv;
enum safexcel_alg_type type;
+ u32 engines;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
@@ -635,16 +659,16 @@ int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
- dma_addr_t ctxr_dma, int ring,
- struct safexcel_request *request);
+ dma_addr_t ctxr_dma, int ring);
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *cdr,
- struct safexcel_ring *rdr);
+ struct safexcel_desc_ring *cdr,
+ struct safexcel_desc_ring *rdr);
int safexcel_select_ring(struct safexcel_crypto_priv *priv);
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring);
+ struct safexcel_desc_ring *ring);
+void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring);
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring);
+ struct safexcel_desc_ring *ring);
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
@@ -655,21 +679,44 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
int ring_id,
bool first, bool last,
dma_addr_t data, u32 len);
+int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+ int ring);
+int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc);
+void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc,
+ struct crypto_async_request *req);
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
void safexcel_inv_complete(struct crypto_async_request *req, int error);
int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
void *istate, void *ostate);
/* available algorithms */
+extern struct safexcel_alg_template safexcel_alg_ecb_des;
+extern struct safexcel_alg_template safexcel_alg_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_ecb_aes;
extern struct safexcel_alg_template safexcel_alg_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_md5;
extern struct safexcel_alg_template safexcel_alg_sha1;
extern struct safexcel_alg_template safexcel_alg_sha224;
extern struct safexcel_alg_template safexcel_alg_sha256;
+extern struct safexcel_alg_template safexcel_alg_sha384;
+extern struct safexcel_alg_template safexcel_alg_sha512;
+extern struct safexcel_alg_template safexcel_alg_hmac_md5;
extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 6bb60fda2043..3aef1d43e435 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/device.h>
@@ -15,6 +12,7 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
+#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -27,21 +25,28 @@ enum safexcel_cipher_direction {
SAFEXCEL_DECRYPT,
};
+enum safexcel_cipher_alg {
+ SAFEXCEL_DES,
+ SAFEXCEL_3DES,
+ SAFEXCEL_AES,
+};
+
struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
u32 mode;
+ enum safexcel_cipher_alg alg;
bool aead;
__le32 key[8];
unsigned int key_len;
/* All the below is AEAD specific */
- u32 alg;
+ u32 hash_alg;
u32 state_sz;
- u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_cipher_req {
@@ -57,10 +62,24 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
unsigned offset = 0;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
- offset = AES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ offset = DES_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+ break;
+ case SAFEXCEL_3DES:
+ offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+ break;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ case SAFEXCEL_AES:
+ offset = AES_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ break;
+ }
}
token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -145,7 +164,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
return ret;
}
- if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
ctx->base.needs_inv = true;
@@ -179,12 +198,12 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
goto badkey;
/* Encryption key */
- if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
memcmp(ctx->key, keys.enckey, keys.enckeylen))
ctx->base.needs_inv = true;
/* Auth key */
- switch (ctx->alg) {
+ switch (ctx->hash_alg) {
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
keys.authkeylen, &istate, &ostate))
@@ -200,6 +219,16 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
keys.authkeylen, &istate, &ostate))
goto badkey;
break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
+ if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
+ if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
@@ -208,7 +237,7 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
CRYPTO_TFM_RES_MASK);
- if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
(memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
memcmp(ctx->opad, ostate.state, ctx->state_sz)))
ctx->base.needs_inv = true;
@@ -258,22 +287,28 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
if (ctx->aead)
cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
- ctx->alg;
-
- switch (ctx->key_len) {
- case AES_KEYSIZE_128:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
- break;
- default:
- dev_err(priv->dev, "aes keysize not supported: %u\n",
- ctx->key_len);
- return -EINVAL;
+ ctx->hash_alg;
+
+ if (ctx->alg == SAFEXCEL_DES) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES;
+ } else if (ctx->alg == SAFEXCEL_3DES) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES;
+ } else if (ctx->alg == SAFEXCEL_AES) {
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
+ break;
+ default:
+ dev_err(priv->dev, "aes keysize not supported: %u\n",
+ ctx->key_len);
+ return -EINVAL;
+ }
}
ctrl_size = ctx->key_len / sizeof(u32);
@@ -298,7 +333,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -315,7 +349,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (src == dst) {
dma_unmap_sg(priv->dev, src,
@@ -335,8 +368,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
return ndesc;
}
-static int safexcel_aes_send(struct crypto_async_request *base, int ring,
- struct safexcel_request *request,
+static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct safexcel_cipher_req *sreq,
struct scatterlist *src, struct scatterlist *dst,
unsigned int cryptlen, unsigned int assoclen,
@@ -346,7 +378,7 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_command_desc *cdesc;
- struct safexcel_result_desc *rdesc;
+ struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
struct scatterlist *sg;
unsigned int totlen = cryptlen + assoclen;
int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
@@ -386,8 +418,6 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
ctx->opad, ctx->state_sz);
}
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* command descriptors */
for_each_sg(src, sg, nr_src, i) {
int len = sg_dma_len(sg);
@@ -434,12 +464,12 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
ret = PTR_ERR(rdesc);
goto rdesc_rollback;
}
+ if (first)
+ first_rdesc = rdesc;
n_rdesc++;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- request->req = base;
+ safexcel_rdr_req_set(priv, ring, first_rdesc, base);
*commands = n_cdesc;
*results = n_rdesc;
@@ -452,8 +482,6 @@ cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
if (src == dst) {
dma_unmap_sg(priv->dev, src,
sg_nents_for_len(src, totlen),
@@ -481,7 +509,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -491,17 +518,13 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
break;
}
- if (rdesc->result_data.error_code) {
- dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EIO;
- }
+ if (likely(!*ret))
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -577,15 +600,13 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
}
static int safexcel_cipher_send_inv(struct crypto_async_request *base,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring,
- request);
+ ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -596,7 +617,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base,
}
static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
@@ -605,21 +625,19 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_aes_send(async, ring, request, sreq, req->src,
+ ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv,
commands, results);
return ret;
}
static int safexcel_aead_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+ int *commands, int *results)
{
struct aead_request *req = aead_request_cast(async);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -628,14 +646,13 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_aes_send(async, ring, request, sreq, req->src,
- req->dst, req->cryptlen, req->assoclen,
+ ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+ req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
return ret;
@@ -705,9 +722,10 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
}
-static int safexcel_aes(struct crypto_async_request *base,
+static int safexcel_queue_req(struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
- enum safexcel_cipher_direction dir, u32 mode)
+ enum safexcel_cipher_direction dir, u32 mode,
+ enum safexcel_cipher_alg alg)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
@@ -715,10 +733,11 @@ static int safexcel_aes(struct crypto_async_request *base,
sreq->needs_inv = false;
sreq->direction = dir;
+ ctx->alg = alg;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (priv->version == EIP197 && ctx->base.needs_inv) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
@@ -745,14 +764,16 @@ static int safexcel_aes(struct crypto_async_request *base,
static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_AES);
}
static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_AES);
}
static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
@@ -795,7 +816,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
if (safexcel_cipher_cra_exit(tfm))
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_skcipher_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "skcipher: invalidation error %d\n",
@@ -815,7 +836,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
if (safexcel_cipher_cra_exit(tfm))
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_aead_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "aead: invalidation error %d\n",
@@ -828,6 +849,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
struct safexcel_alg_template safexcel_alg_ecb_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_ecb_aes_encrypt,
@@ -838,7 +860,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
.cra_name = "ecb(aes)",
.cra_driver_name = "safexcel-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -852,18 +874,21 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_AES);
}
static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_AES);
}
struct safexcel_alg_template safexcel_alg_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_cbc_aes_encrypt,
@@ -875,7 +900,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
.cra_name = "cbc(aes)",
.cra_driver_name = "safexcel-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -887,20 +912,234 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
},
};
+static int safexcel_cbc_des_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_cbc_des_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ if (len != DES_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ /* if context exits and key changed, need to invalidate it */
+ if (ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des_setkey,
+ .encrypt = safexcel_cbc_des_encrypt,
+ .decrypt = safexcel_cbc_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "safexcel-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_ecb_des_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_ecb_des_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des_setkey,
+ .encrypt = safexcel_ecb_des_encrypt,
+ .decrypt = safexcel_ecb_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "safexcel-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (len != DES3_EDE_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* if context exits and key changed, need to invalidate it */
+ if (ctx->base.ctxr_dma) {
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+ }
+
+ memcpy(ctx->key, key, len);
+
+ ctx->key_len = len;
+
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des3_ede_setkey,
+ .encrypt = safexcel_cbc_des3_ede_encrypt,
+ .decrypt = safexcel_cbc_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "safexcel-cbc-des3_ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_3DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des3_ede_setkey,
+ .encrypt = safexcel_ecb_des3_ede_encrypt,
+ .decrypt = safexcel_ecb_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "safexcel-ecb-des3_ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_encrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
- return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
}
static int safexcel_aead_decrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
- return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
}
static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
@@ -926,13 +1165,14 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -943,7 +1183,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -960,13 +1200,14 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -977,7 +1218,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -994,13 +1235,14 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -1011,7 +1253,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1022,3 +1264,73 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
},
},
};
+
+static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ ctx->state_sz = SHA512_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ ctx->state_sz = SHA512_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index c77b0e1655a8..ac9282c1a5ec 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -1,14 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <crypto/hmac.h>
+#include <crypto/md5.h>
#include <crypto/sha.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -22,8 +20,8 @@ struct safexcel_ahash_ctx {
u32 alg;
- u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_ahash_req {
@@ -38,18 +36,26 @@ struct safexcel_ahash_req {
u32 digest;
u8 state_sz; /* expected sate size, only set once */
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
- u64 len;
- u64 processed;
+ u64 len[2];
+ u64 processed[2];
- u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
- u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
};
+static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
+{
+ if (req->len[1] > req->processed[1])
+ return 0xffffffff - (req->len[0] - req->processed[0]);
+
+ return req->len[0] - req->processed[0];
+}
+
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
u32 input_length, u32 result_length)
{
@@ -72,9 +78,9 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_ahash_req *req,
struct safexcel_command_desc *cdesc,
- unsigned int digestsize,
- unsigned int blocksize)
+ unsigned int digestsize)
{
+ struct safexcel_crypto_priv *priv = ctx->priv;
int i;
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
@@ -82,12 +88,17 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= req->digest;
if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
- if (req->processed) {
- if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ if (req->processed[0] || req->processed[1]) {
+ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
} else {
@@ -102,12 +113,28 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
* fields. Do this now as we need it to setup the first command
* descriptor.
*/
- if (req->processed) {
+ if (req->processed[0] || req->processed[1]) {
for (i = 0; i < digestsize / sizeof(u32); i++)
ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
- if (req->finish)
- ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
+ if (req->finish) {
+ u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+ count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) *
+ req->processed[1]);
+
+ /* This is a haredware limitation, as the
+ * counter must fit into an u32. This represents
+ * a farily big amount of input data, so we
+ * shouldn't see this.
+ */
+ if (unlikely(count & 0xffff0000)) {
+ dev_warn(priv->dev,
+ "Input data is too big\n");
+ return;
+ }
+
+ ctx->base.ctxr->data[i] = cpu_to_le32(count);
+ }
}
} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
@@ -126,11 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
- int cache_len;
+ u64 cache_len;
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -141,7 +167,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
}
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->nents) {
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
@@ -164,7 +189,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
memcpy(areq->result, sreq->state,
crypto_ahash_digestsize(ahash));
- cache_len = sreq->len - sreq->processed;
+ cache_len = safexcel_queued_len(sreq);
if (cache_len)
memcpy(sreq->cache, sreq->cache_next, cache_len);
@@ -174,7 +199,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
}
static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
- struct safexcel_request *request,
int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
@@ -185,9 +209,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, extra, n_cdesc = 0, ret = 0;
+ u64 queued, len, cache_len;
- queued = len = req->len - req->processed;
+ queued = len = safexcel_queued_len(req);
if (queued <= crypto_ahash_blocksize(ahash))
cache_len = queued;
else
@@ -220,16 +245,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
}
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
req->cache_dma = dma_map_single(priv->dev, req->cache,
cache_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->dev, req->cache_dma)) {
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ if (dma_mapping_error(priv->dev, req->cache_dma))
return -EINVAL;
- }
req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
@@ -260,7 +281,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
- if (queued - sglen < 0)
+ if (queued < sglen)
sglen = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
@@ -282,8 +303,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
send_command:
/* Setup the context options */
- safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
- crypto_ahash_blocksize(ahash));
+ safexcel_context_control(ctx, req, first_cdesc, req->state_sz);
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
@@ -303,10 +323,11 @@ send_command:
goto unmap_result;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
- req->processed += len;
- request->req = &areq->base;
+ req->processed[0] += len;
+ if (req->processed[0] < len)
+ req->processed[1]++;
*commands = n_cdesc;
*results = 1;
@@ -327,7 +348,6 @@ unmap_cache:
req->cache_sz = 0;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
@@ -335,16 +355,18 @@ static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
unsigned int state_w_sz = req->state_sz / sizeof(u32);
+ u64 processed;
int i;
+ processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+ processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
+
for (i = 0; i < state_w_sz; i++)
if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
return true;
- if (ctx->base.ctxr->data[state_w_sz] !=
- cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
+ if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
return true;
return false;
@@ -363,21 +385,16 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
"hash: invalidate: could not retrieve the result descriptor\n");
*ret = PTR_ERR(rdesc);
- } else if (rdesc->result_data.error_code) {
- dev_err(priv->dev,
- "hash: invalidate: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EINVAL;
+ } else {
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
}
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -413,7 +430,7 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int err;
- BUG_ON(priv->version == EIP97 && req->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
if (req->needs_inv) {
req->needs_inv = false;
@@ -428,15 +445,14 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
}
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
ret = safexcel_invalidate_cache(async, ctx->priv,
- ctx->base.ctxr_dma, ring, request);
+ ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -447,19 +463,17 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
}
static int safexcel_ahash_send(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int ret;
if (req->needs_inv)
- ret = safexcel_ahash_send_inv(async, ring, request,
- commands, results);
+ ret = safexcel_ahash_send_inv(async, ring, commands, results);
else
- ret = safexcel_ahash_send_req(async, ring, request,
- commands, results);
+ ret = safexcel_ahash_send_req(async, ring, commands, results);
+
return ret;
}
@@ -509,17 +523,17 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
- int queued, cache_len;
+ u64 queued, cache_len;
- /* cache_len: everyting accepted by the driver but not sent yet,
- * tot sz handled by update() - last req sz - tot sz handled by send()
- */
- cache_len = req->len - areq->nbytes - req->processed;
/* queued: everything accepted by the driver which will be handled by
* the next send() calls.
* tot sz handled by update() - tot sz handled by send()
*/
- queued = req->len - req->processed;
+ queued = safexcel_queued_len(req);
+ /* cache_len: everything accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
+ cache_len = queued - areq->nbytes;
/*
* In case there isn't enough bytes to proceed (less than a
@@ -546,8 +560,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
req->needs_inv = false;
if (ctx->base.ctxr) {
- if (priv->version == EIP197 &&
- !ctx->base.needs_inv && req->processed &&
+ if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
+ (req->processed[0] || req->processed[1]) &&
req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
/* We're still setting needs_inv here, even though it is
* cleared right away, because the needs_inv flag can be
@@ -590,7 +604,9 @@ static int safexcel_ahash_update(struct ahash_request *areq)
if (!areq->nbytes)
return 0;
- req->len += areq->nbytes;
+ req->len[0] += areq->nbytes;
+ if (req->len[0] < areq->nbytes)
+ req->len[1]++;
safexcel_ahash_cache(areq);
@@ -605,7 +621,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
return safexcel_ahash_enqueue(areq);
if (!req->last_req &&
- req->len - req->processed > crypto_ahash_blocksize(ahash))
+ safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
return safexcel_ahash_enqueue(areq);
return 0;
@@ -620,8 +636,11 @@ static int safexcel_ahash_final(struct ahash_request *areq)
req->finish = true;
/* If we have an overall 0 length request */
- if (!(req->len + areq->nbytes)) {
- if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ if (!req->len[0] && !req->len[1] && !areq->nbytes) {
+ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+ memcpy(areq->result, md5_zero_message_hash,
+ MD5_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
memcpy(areq->result, sha1_zero_message_hash,
SHA1_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
@@ -630,6 +649,12 @@ static int safexcel_ahash_final(struct ahash_request *areq)
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
memcpy(areq->result, sha256_zero_message_hash,
SHA256_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
+ memcpy(areq->result, sha384_zero_message_hash,
+ SHA384_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+ memcpy(areq->result, sha512_zero_message_hash,
+ SHA512_DIGEST_SIZE);
return 0;
}
@@ -654,8 +679,10 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_export_state *export = out;
- export->len = req->len;
- export->processed = req->processed;
+ export->len[0] = req->len[0];
+ export->len[1] = req->len[1];
+ export->processed[0] = req->processed[0];
+ export->processed[1] = req->processed[1];
export->digest = req->digest;
@@ -676,8 +703,10 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
if (ret)
return ret;
- req->len = export->len;
- req->processed = export->processed;
+ req->len[0] = export->len[0];
+ req->len[1] = export->len[1];
+ req->processed[0] = export->processed[0];
+ req->processed[1] = export->processed[1];
req->digest = export->digest;
@@ -743,7 +772,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_ahash_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
@@ -755,6 +784,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
struct safexcel_alg_template safexcel_alg_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha1_init,
.update = safexcel_ahash_update,
@@ -908,8 +938,7 @@ int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
u8 *ipad, *opad;
int ret;
- tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(alg, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
@@ -963,7 +992,7 @@ static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- if (priv->version == EIP197 && ctx->base.ctxr) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) {
for (i = 0; i < state_sz / sizeof(u32); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
@@ -988,6 +1017,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha1_init,
.update = safexcel_ahash_update,
@@ -1051,6 +1081,7 @@ static int safexcel_sha256_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha256_init,
.update = safexcel_ahash_update,
@@ -1113,6 +1144,7 @@ static int safexcel_sha224_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha224_init,
.update = safexcel_ahash_update,
@@ -1168,6 +1200,7 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha224_init,
.update = safexcel_ahash_update,
@@ -1224,6 +1257,7 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha256_init,
.update = safexcel_ahash_update,
@@ -1251,3 +1285,375 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
},
},
};
+
+static int safexcel_sha512_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = lower_32_bits(SHA512_H0);
+ req->state[1] = upper_32_bits(SHA512_H0);
+ req->state[2] = lower_32_bits(SHA512_H1);
+ req->state[3] = upper_32_bits(SHA512_H1);
+ req->state[4] = lower_32_bits(SHA512_H2);
+ req->state[5] = upper_32_bits(SHA512_H2);
+ req->state[6] = lower_32_bits(SHA512_H3);
+ req->state[7] = upper_32_bits(SHA512_H3);
+ req->state[8] = lower_32_bits(SHA512_H4);
+ req->state[9] = upper_32_bits(SHA512_H4);
+ req->state[10] = lower_32_bits(SHA512_H5);
+ req->state[11] = upper_32_bits(SHA512_H5);
+ req->state[12] = lower_32_bits(SHA512_H6);
+ req->state[13] = upper_32_bits(SHA512_H6);
+ req->state[14] = lower_32_bits(SHA512_H7);
+ req->state[15] = upper_32_bits(SHA512_H7);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sha512_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sha512_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_sha512_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sha512_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "safexcel-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha384_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = lower_32_bits(SHA384_H0);
+ req->state[1] = upper_32_bits(SHA384_H0);
+ req->state[2] = lower_32_bits(SHA384_H1);
+ req->state[3] = upper_32_bits(SHA384_H1);
+ req->state[4] = lower_32_bits(SHA384_H2);
+ req->state[5] = upper_32_bits(SHA384_H2);
+ req->state[6] = lower_32_bits(SHA384_H3);
+ req->state[7] = upper_32_bits(SHA384_H3);
+ req->state[8] = lower_32_bits(SHA384_H4);
+ req->state[9] = upper_32_bits(SHA384_H4);
+ req->state[10] = lower_32_bits(SHA384_H5);
+ req->state[11] = upper_32_bits(SHA384_H5);
+ req->state[12] = lower_32_bits(SHA384_H6);
+ req->state[13] = upper_32_bits(SHA384_H6);
+ req->state[14] = lower_32_bits(SHA384_H7);
+ req->state[15] = upper_32_bits(SHA384_H7);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sha384_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sha384_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_sha384_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sha384_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "safexcel-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
+ SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha512_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha512_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha512_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha512_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha512_digest,
+ .setkey = safexcel_hmac_sha512_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "safexcel-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
+ SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha384_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha384_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha384_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha384_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha384_digest,
+ .setkey = safexcel_hmac_sha384_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "safexcel-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_md5_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = MD5_H0;
+ req->state[1] = MD5_H1;
+ req->state[2] = MD5_H2;
+ req->state[3] = MD5_H3;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = MD5_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_md5_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_md5_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_md5 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_md5_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_md5_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "safexcel-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_md5_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_md5_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
+ MD5_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_md5_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_md5_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_md5 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_md5_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_md5_digest,
+ .setkey = safexcel_hmac_md5_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "safexcel-hmac-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index c9d2a8716b5b..eb75fa684876 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/dma-mapping.h>
@@ -14,8 +11,8 @@
#include "safexcel.h"
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *cdr,
- struct safexcel_ring *rdr)
+ struct safexcel_desc_ring *cdr,
+ struct safexcel_desc_ring *rdr)
{
cdr->offset = sizeof(u32) * priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
@@ -24,7 +21,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!cdr->base)
return -ENOMEM;
cdr->write = cdr->base;
- cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE;
+ cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
rdr->offset = sizeof(u32) * priv->config.rd_offset;
@@ -34,7 +31,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!rdr->base)
return -ENOMEM;
rdr->write = rdr->base;
- rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE;
+ rdr->base_end = rdr->base + rdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
rdr->read = rdr->base;
return 0;
@@ -46,49 +43,73 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
}
static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
void *ptr = ring->write;
- if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1)
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
- ring->write += ring->offset;
if (ring->write == ring->base_end)
ring->write = ring->base;
+ else
+ ring->write += ring->offset;
- ring->nr++;
return ptr;
}
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
void *ptr = ring->read;
- if (!ring->nr)
+ if (ring->write == ring->read)
return ERR_PTR(-ENOENT);
- ring->read += ring->offset;
if (ring->read == ring->base_end)
ring->read = ring->base;
+ else
+ ring->read += ring->offset;
- ring->nr--;
return ptr;
}
+inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
+ int ring)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return rdr->read;
+}
+
+inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+ int ring)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return (rdr->read - rdr->base) / rdr->offset;
+}
+
+inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return ((void *)rdesc - rdr->base) / rdr->offset;
+}
+
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
- if (!ring->nr)
+ if (ring->write == ring->read)
return;
if (ring->write == ring->base)
- ring->write = ring->base_end - ring->offset;
+ ring->write = ring->base_end;
else
ring->write -= ring->offset;
-
- ring->nr--;
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index e34d80b6b7e5..99ff54cc8a15 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -1183,8 +1183,7 @@ static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
u8 *opad;
int ret;
- tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index ab6235b7ff22..55f34cfc43ff 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1487,8 +1487,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = tmpl->block_size;
base->cra_ctxsize = sizeof(struct n2_hash_ctx);
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index c2f7d4befb55..ad3358e74f5c 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -386,7 +386,6 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index becb738c897b..a6764af83c6d 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -288,7 +288,6 @@ struct shash_alg nx_shash_sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index b6e183d58d73..92956bc6e45e 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -294,7 +294,6 @@ struct shash_alg nx_shash_sha512_alg = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index d1a1c74fb56a..0641185bd82f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1464,8 +1464,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "sha1",
.cra_driver_name = "omap-sha1",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1487,8 +1486,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "md5",
.cra_driver_name = "omap-md5",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1511,8 +1509,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "omap-hmac-sha1",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1536,8 +1533,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "omap-hmac-md5",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1564,8 +1560,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1586,8 +1581,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1609,8 +1603,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1633,8 +1626,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1659,8 +1651,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1681,8 +1672,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1704,8 +1694,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1728,8 +1717,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d32c79328876..21e5cae0a1e0 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -247,8 +247,7 @@ static struct shash_alg sha1_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -271,8 +270,7 @@ static struct shash_alg sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -484,7 +482,6 @@ static struct shash_alg sha1_alg_nano = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -503,7 +500,6 @@ static struct shash_alg sha256_alg_nano = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index da8a2d3b5e9a..9225d060e18f 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -163,7 +163,7 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
return 0;
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
reset_data->accel_dev = accel_dev;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 53227d70d397..d8a5db11b7ea 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -378,8 +378,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
else
return -EINVAL;
- ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
if (IS_ERR(ahash_tfm))
return PTR_ERR(ahash_tfm);
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
new file mode 100644
index 000000000000..e54249ccc009
--- /dev/null
+++ b/drivers/crypto/qcom-rng.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-18 Linaro Limited
+//
+// Based on msm-rng.c and downstream driver
+
+#include <crypto/internal/rng.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT 0x0000
+#define PRNG_STATUS 0x0004
+#define PRNG_LFSR_CFG 0x0100
+#define PRNG_CONFIG 0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK 0x0000ffff
+#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
+#define PRNG_CONFIG_HW_ENABLE BIT(1)
+#define PRNG_STATUS_DATA_AVAIL BIT(0)
+
+#define WORD_SZ 4
+
+struct qcom_rng {
+ struct mutex lock;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int skip_init;
+};
+
+struct qcom_rng_ctx {
+ struct qcom_rng *rng;
+};
+
+static struct qcom_rng *qcom_rng_dev;
+
+static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
+{
+ unsigned int currsize = 0;
+ u32 val;
+
+ /* read random data from hardware */
+ do {
+ val = readl_relaxed(rng->base + PRNG_STATUS);
+ if (!(val & PRNG_STATUS_DATA_AVAIL))
+ break;
+
+ val = readl_relaxed(rng->base + PRNG_DATA_OUT);
+ if (!val)
+ break;
+
+ if ((max - currsize) >= WORD_SZ) {
+ memcpy(data, &val, WORD_SZ);
+ data += WORD_SZ;
+ currsize += WORD_SZ;
+ } else {
+ /* copy only remaining bytes */
+ memcpy(data, &val, max - currsize);
+ break;
+ }
+ } while (currsize < max);
+
+ return currsize;
+}
+
+static int qcom_rng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dstn, unsigned int dlen)
+{
+ struct qcom_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct qcom_rng *rng = ctx->rng;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ mutex_lock(&rng->lock);
+
+ ret = qcom_rng_read(rng, dstn, dlen);
+
+ mutex_unlock(&rng->lock);
+ clk_disable_unprepare(rng->clk);
+
+ return 0;
+}
+
+static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ return 0;
+}
+
+static int qcom_rng_enable(struct qcom_rng *rng)
+{
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ /* Enable PRNG only if it is not already enabled */
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ if (val & PRNG_CONFIG_HW_ENABLE)
+ goto already_enabled;
+
+ val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
+ val &= ~PRNG_LFSR_CFG_MASK;
+ val |= PRNG_LFSR_CFG_CLOCKS;
+ writel(val, rng->base + PRNG_LFSR_CFG);
+
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ val |= PRNG_CONFIG_HW_ENABLE;
+ writel(val, rng->base + PRNG_CONFIG);
+
+already_enabled:
+ clk_disable_unprepare(rng->clk);
+
+ return 0;
+}
+
+static int qcom_rng_init(struct crypto_tfm *tfm)
+{
+ struct qcom_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->rng = qcom_rng_dev;
+
+ if (!ctx->rng->skip_init)
+ return qcom_rng_enable(ctx->rng);
+
+ return 0;
+}
+
+static struct rng_alg qcom_rng_alg = {
+ .generate = qcom_rng_generate,
+ .seed = qcom_rng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "qcom-rng",
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct qcom_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = qcom_rng_init,
+ }
+};
+
+static int qcom_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct qcom_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rng);
+ mutex_init(&rng->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng->base))
+ return PTR_ERR(rng->base);
+
+ /* ACPI systems have clk already on, so skip clk_get */
+ if (!has_acpi_companion(&pdev->dev)) {
+ rng->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(rng->clk))
+ return PTR_ERR(rng->clk);
+ }
+
+
+ rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
+
+ qcom_rng_dev = rng;
+ ret = crypto_register_rng(&qcom_rng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "Register crypto rng failed: %d\n", ret);
+ qcom_rng_dev = NULL;
+ }
+
+ return ret;
+}
+
+static int qcom_rng_remove(struct platform_device *pdev)
+{
+ crypto_unregister_rng(&qcom_rng_alg);
+
+ qcom_rng_dev = NULL;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id qcom_rng_acpi_match[] = {
+ { .id = "QCOM8160", .driver_data = 1 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
+#endif
+
+static const struct of_device_id qcom_rng_of_match[] = {
+ { .compatible = "qcom,prng", .data = (void *)0},
+ { .compatible = "qcom,prng-ee", .data = (void *)1},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
+
+static struct platform_driver qcom_rng_driver = {
+ .probe = qcom_rng_probe,
+ .remove = qcom_rng_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(qcom_rng_of_match),
+ .acpi_match_table = ACPI_PTR(qcom_rng_acpi_match),
+ }
+};
+module_platform_driver(qcom_rng_driver);
+
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_DESCRIPTION("Qualcomm random number generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index bf7163042569..faa282074e5a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1765,8 +1765,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "sha1",
.cra_driver_name = "exynos-sha1",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
@@ -1791,8 +1790,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "md5",
.cra_driver_name = "exynos-md5",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
@@ -1817,8 +1815,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "exynos-sha256",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0f2245e1af2b..e7540a5b8197 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1253,8 +1253,7 @@ static struct ahash_alg sha_v3_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sahara-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sahara_ctx),
@@ -1280,8 +1279,7 @@ static struct ahash_alg sha_v4_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sahara-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sahara_ctx),
@@ -1351,7 +1349,7 @@ err_sha_v4_algs:
err_sha_v3_algs:
for (j = 0; j < k; j++)
- crypto_unregister_ahash(&sha_v4_algs[j]);
+ crypto_unregister_ahash(&sha_v3_algs[j]);
err_aes_algs:
for (j = 0; j < i; j++)
@@ -1367,7 +1365,7 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
- for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+ for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
crypto_unregister_ahash(&sha_v3_algs[i]);
if (dev->version > SAHARA_VERSION_3)
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index c5d3efc54a4f..23b0b7bd64c7 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/aes.h>
@@ -105,6 +106,7 @@
#define GCM_CTR_INIT 2
#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+#define CRYP_AUTOSUSPEND_DELAY 50
struct stm32_cryp_caps {
bool swap_final;
@@ -519,6 +521,8 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
+ pm_runtime_get_sync(cryp->dev);
+
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -638,6 +642,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
free_pages((unsigned long)buf_out, pages);
}
+ pm_runtime_mark_last_busy(cryp->dev);
+ pm_runtime_put_autosuspend(cryp->dev);
+
if (is_gcm(cryp) || is_ccm(cryp)) {
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
cryp->areq = NULL;
@@ -1969,6 +1976,13 @@ static int stm32_cryp_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, CRYP_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
rst = devm_reset_control_get(dev, NULL);
if (!IS_ERR(rst)) {
reset_control_assert(rst);
@@ -2008,6 +2022,8 @@ static int stm32_cryp_probe(struct platform_device *pdev)
dev_info(dev, "Initialized\n");
+ pm_runtime_put_sync(dev);
+
return 0;
err_aead_algs:
@@ -2020,6 +2036,11 @@ err_engine1:
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
clk_disable_unprepare(cryp->clk);
return ret;
@@ -2028,10 +2049,15 @@ err_engine1:
static int stm32_cryp_remove(struct platform_device *pdev)
{
struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+ int ret;
if (!cryp)
return -ENODEV;
+ ret = pm_runtime_get_sync(cryp->dev);
+ if (ret < 0)
+ return ret;
+
crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
@@ -2041,16 +2067,52 @@ static int stm32_cryp_remove(struct platform_device *pdev)
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
+ pm_runtime_disable(cryp->dev);
+ pm_runtime_put_noidle(cryp->dev);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_cryp_runtime_suspend(struct device *dev)
+{
+ struct stm32_cryp *cryp = dev_get_drvdata(dev);
+
clk_disable_unprepare(cryp->clk);
return 0;
}
+static int stm32_cryp_runtime_resume(struct device *dev)
+{
+ struct stm32_cryp *cryp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_cryp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_cryp_runtime_suspend,
+ stm32_cryp_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_cryp_driver = {
.probe = stm32_cryp_probe,
.remove = stm32_cryp_remove,
.driver = {
.name = DRIVER_NAME,
+ .pm = &stm32_cryp_pm_ops,
.of_match_table = stm32_dt_ids,
},
};
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cdc96f1bb917..590d7352837e 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/engine.h>
@@ -121,6 +122,8 @@ enum stm32_hash_data_format {
#define HASH_QUEUE_LENGTH 16
#define HASH_DMA_THRESHOLD 50
+#define HASH_AUTOSUSPEND_DELAY 50
+
struct stm32_hash_ctx {
struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
@@ -814,12 +817,17 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
rctx->flags |= HASH_FLAGS_ERRORS;
}
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
crypto_finalize_hash_request(hdev->engine, req, err);
}
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
+ pm_runtime_get_sync(hdev->dev);
+
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
stm32_hash_write(hdev, HASH_STR, 0);
@@ -967,6 +975,8 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
+ pm_runtime_get_sync(hdev->dev);
+
while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
cpu_relax();
@@ -982,6 +992,9 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
memcpy(out, rctx, sizeof(*rctx));
return 0;
@@ -1000,6 +1013,8 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
+ pm_runtime_get_sync(hdev->dev);
+
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
stm32_hash_write(hdev, HASH_CR, *preg);
@@ -1009,6 +1024,9 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
stm32_hash_write(hdev, HASH_CSR(i), *preg++);
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
kfree(rctx->hw_context);
return 0;
@@ -1132,8 +1150,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "md5",
.cra_driver_name = "stm32-md5",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1159,8 +1176,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "stm32-hmac-md5",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1185,8 +1201,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "sha1",
.cra_driver_name = "stm32-sha1",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1212,8 +1227,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "stm32-hmac-sha1",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1241,8 +1255,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "stm32-sha224",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1268,8 +1281,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "stm32-hmac-sha224",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1294,8 +1306,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "stm32-sha256",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1321,8 +1332,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "stm32-hmac-sha256",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1482,6 +1492,13 @@ static int stm32_hash_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(hdev->rst)) {
reset_control_assert(hdev->rst);
@@ -1522,6 +1539,8 @@ static int stm32_hash_probe(struct platform_device *pdev)
dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
+ pm_runtime_put_sync(dev);
+
return 0;
err_algs:
@@ -1535,6 +1554,9 @@ err_engine:
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
clk_disable_unprepare(hdev->clk);
return ret;
@@ -1543,11 +1565,16 @@ err_engine:
static int stm32_hash_remove(struct platform_device *pdev)
{
static struct stm32_hash_dev *hdev;
+ int ret;
hdev = platform_get_drvdata(pdev);
if (!hdev)
return -ENODEV;
+ ret = pm_runtime_get_sync(hdev->dev);
+ if (ret < 0)
+ return ret;
+
stm32_hash_unregister_algs(hdev);
crypto_engine_exit(hdev->engine);
@@ -1559,16 +1586,52 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
+ pm_runtime_disable(hdev->dev);
+ pm_runtime_put_noidle(hdev->dev);
+
clk_disable_unprepare(hdev->clk);
return 0;
}
+#ifdef CONFIG_PM
+static int stm32_hash_runtime_suspend(struct device *dev)
+{
+ struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return 0;
+}
+
+static int stm32_hash_runtime_resume(struct device *dev)
+{
+ struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(hdev->clk);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_hash_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
+ stm32_hash_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_hash_driver = {
.probe = stm32_hash_probe,
.remove = stm32_hash_remove,
.driver = {
.name = "stm32-hash",
+ .pm = &stm32_hash_pm_ops,
.of_match_table = stm32_hash_of_match,
}
};
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 8f09b8430893..5f3242a246fc 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -6,8 +6,10 @@
#include <linux/bitrev.h>
#include <linux/clk.h>
+#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <crypto/internal/hash.h>
@@ -28,9 +30,7 @@
#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
#define CRC_INIT_DEFAULT 0xFFFFFFFF
-/* Polynomial reversed */
-#define POLY_CRC32 0xEDB88320
-#define POLY_CRC32C 0x82F63B78
+#define CRC_AUTOSUSPEND_DELAY 50
struct stm32_crc {
struct list_head list;
@@ -66,7 +66,7 @@ static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
- mctx->poly = POLY_CRC32;
+ mctx->poly = CRC32_POLY_LE;
return 0;
}
@@ -75,7 +75,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
- mctx->poly = POLY_CRC32C;
+ mctx->poly = CRC32C_POLY_LE;
return 0;
}
@@ -106,6 +106,8 @@ static int stm32_crc_init(struct shash_desc *desc)
}
spin_unlock_bh(&crc_list.lock);
+ pm_runtime_get_sync(ctx->crc->dev);
+
/* Reset, set key, poly and configure in bit reverse mode */
writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
@@ -115,6 +117,9 @@ static int stm32_crc_init(struct shash_desc *desc)
ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
+ pm_runtime_mark_last_busy(ctx->crc->dev);
+ pm_runtime_put_autosuspend(ctx->crc->dev);
+
return 0;
}
@@ -126,6 +131,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
u32 *d32;
unsigned int i;
+ pm_runtime_get_sync(crc->dev);
+
if (unlikely(crc->nb_pending_bytes)) {
while (crc->nb_pending_bytes != sizeof(u32) && length) {
/* Fill in pending data */
@@ -149,6 +156,9 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+ pm_runtime_mark_last_busy(crc->dev);
+ pm_runtime_put_autosuspend(crc->dev);
+
/* Check for pending data (non 32 bits) */
length &= 3;
if (likely(!length))
@@ -174,7 +184,7 @@ static int stm32_crc_final(struct shash_desc *desc, u8 *out)
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
/* Send computed CRC */
- put_unaligned_le32(mctx->poly == POLY_CRC32C ?
+ put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
~ctx->partial : ctx->partial, out);
return 0;
@@ -272,6 +282,13 @@ static int stm32_crc_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);
@@ -287,12 +304,18 @@ static int stm32_crc_probe(struct platform_device *pdev)
dev_info(dev, "Initialized\n");
+ pm_runtime_put_sync(dev);
+
return 0;
}
static int stm32_crc_remove(struct platform_device *pdev)
{
struct stm32_crc *crc = platform_get_drvdata(pdev);
+ int ret = pm_runtime_get_sync(crc->dev);
+
+ if (ret < 0)
+ return ret;
spin_lock(&crc_list.lock);
list_del(&crc->list);
@@ -300,11 +323,46 @@ static int stm32_crc_remove(struct platform_device *pdev)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ pm_runtime_disable(crc->dev);
+ pm_runtime_put_noidle(crc->dev);
+
+ clk_disable_unprepare(crc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_crc_runtime_suspend(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+
clk_disable_unprepare(crc->clk);
return 0;
}
+static int stm32_crc_runtime_resume(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(crc->clk);
+ if (ret) {
+ dev_err(crc->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_crc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
+ stm32_crc_runtime_resume, NULL)
+};
+
static const struct of_device_id stm32_dt_ids[] = {
{ .compatible = "st,stm32f7-crc", },
{},
@@ -316,6 +374,7 @@ static struct platform_driver stm32_crc_driver = {
.remove = stm32_crc_remove,
.driver = {
.name = DRIVER_NAME,
+ .pm = &stm32_crc_pm_ops,
.of_match_table = stm32_dt_ids,
},
};
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index a81d89b3b7d8..89adf9e0fed2 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -45,11 +45,9 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "md5-sun4i-ss",
.cra_priority = 300,
.cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_type = &crypto_ahash_type,
.cra_init = sun4i_hash_crainit
}
}
@@ -73,11 +71,9 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "sha1-sun4i-ss",
.cra_priority = 300,
.cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_type = &crypto_ahash_type,
.cra_init = sun4i_hash_crainit
}
}
@@ -96,8 +92,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -118,8 +113,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -140,8 +134,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -161,8 +154,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -183,8 +175,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -205,7 +196,6 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index cf14f099ce4a..6988012deca4 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2822,8 +2822,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2838,8 +2837,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2854,8 +2852,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha224",
.cra_driver_name = "sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2870,8 +2867,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2886,8 +2882,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha384",
.cra_driver_name = "sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2902,8 +2897,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha512",
.cra_driver_name = "sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2918,8 +2912,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2934,8 +2927,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2950,8 +2942,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2966,8 +2957,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2982,8 +2972,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2998,8 +2987,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3186,7 +3174,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
alg->cra_exit = talitos_cra_exit;
- alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
t_alg->algt.alg.hash.final = ahash_final;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 2d0a677bcc76..daf4fed0df8c 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1524,8 +1524,7 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
.cra_init = hash_cra_init,
@@ -1548,11 +1547,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
@@ -1574,11 +1571,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
@@ -1600,11 +1595,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index af6a908dfa7a..2c573d1aaa64 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -49,12 +49,18 @@ struct virtio_crypto_sym_request {
bool encrypt;
};
+struct virtio_crypto_algo {
+ uint32_t algonum;
+ uint32_t service;
+ unsigned int active_devs;
+ struct crypto_alg algo;
+};
+
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
-static unsigned int virtio_crypto_active_devs;
static void virtio_crypto_ablkcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
@@ -312,15 +318,21 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
unsigned int keylen)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ uint32_t alg;
int ret;
+ ret = virtio_crypto_alg_validate_key(keylen, &alg);
+ if (ret)
+ return ret;
+
if (!ctx->vcrypto) {
/* New key */
int node = virtio_crypto_get_current_node();
struct virtio_crypto *vcrypto =
- virtcrypto_get_dev_node(node);
+ virtcrypto_get_dev_node(node,
+ VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
if (!vcrypto) {
- pr_err("virtio_crypto: Could not find a virtio device in the system\n");
+ pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
return -ENODEV;
}
@@ -371,12 +383,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
/* Why 3? outhdr + iv + inhdr */
sg_total = src_nents + dst_nents + 3;
- sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_ATOMIC,
+ sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!sgs)
return -ENOMEM;
- req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
+ req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!req_data) {
kfree(sgs);
@@ -571,57 +583,85 @@ static void virtio_crypto_ablkcipher_finalize_req(
virtcrypto_clear_request(&vc_sym_req->base);
}
-static struct crypto_alg virtio_crypto_algs[] = { {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "virtio_crypto_aes_cbc",
- .cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = virtio_crypto_ablkcipher_init,
- .cra_exit = virtio_crypto_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = virtio_crypto_ablkcipher_setkey,
- .decrypt = virtio_crypto_ablkcipher_decrypt,
- .encrypt = virtio_crypto_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+ .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
+ .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
+ .algo = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "virtio_crypto_aes_cbc",
+ .cra_priority = 150,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = virtio_crypto_ablkcipher_init,
+ .cra_exit = virtio_crypto_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .setkey = virtio_crypto_ablkcipher_setkey,
+ .decrypt = virtio_crypto_ablkcipher_decrypt,
+ .encrypt = virtio_crypto_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
},
},
} };
-int virtio_crypto_algs_register(void)
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
{
int ret = 0;
+ int i = 0;
mutex_lock(&algs_lock);
- if (++virtio_crypto_active_devs != 1)
- goto unlock;
- ret = crypto_register_algs(virtio_crypto_algs,
- ARRAY_SIZE(virtio_crypto_algs));
- if (ret)
- virtio_crypto_active_devs--;
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 0) {
+ ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+ if (ret)
+ goto unlock;
+ }
+
+ virtio_crypto_algs[i].active_devs++;
+ dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
+ virtio_crypto_algs[i].algo.cra_name);
+ }
unlock:
mutex_unlock(&algs_lock);
return ret;
}
-void virtio_crypto_algs_unregister(void)
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
{
+ int i = 0;
+
mutex_lock(&algs_lock);
- if (--virtio_crypto_active_devs != 0)
- goto unlock;
- crypto_unregister_algs(virtio_crypto_algs,
- ARRAY_SIZE(virtio_crypto_algs));
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (virtio_crypto_algs[i].active_devs == 0 ||
+ !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 1)
+ crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+
+ virtio_crypto_algs[i].active_devs--;
+ }
-unlock:
mutex_unlock(&algs_lock);
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 66501a5a2b7b..63ef7f7924ea 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -55,6 +55,20 @@ struct virtio_crypto {
/* Number of queue currently used by the driver */
u32 curr_queue;
+ /*
+ * Specifies the services mask which the device support,
+ * see VIRTIO_CRYPTO_SERVICE_*
+ */
+ u32 crypto_services;
+
+ /* Detailed algorithms mask */
+ u32 cipher_algo_l;
+ u32 cipher_algo_h;
+ u32 hash_algo;
+ u32 mac_algo_l;
+ u32 mac_algo_h;
+ u32 aead_algo;
+
/* Maximum length of cipher key */
u32 max_cipher_key_len;
/* Maximum length of authenticated key */
@@ -102,7 +116,12 @@ int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
-struct virtio_crypto *virtcrypto_get_dev_node(int node);
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
+ uint32_t service,
+ uint32_t algo);
+struct virtio_crypto *virtcrypto_get_dev_node(int node,
+ uint32_t service,
+ uint32_t algo);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
@@ -122,7 +141,7 @@ static inline int virtio_crypto_get_current_node(void)
return node;
}
-int virtio_crypto_algs_register(void);
-void virtio_crypto_algs_unregister(void);
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 83326986c113..3c9e120287af 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -146,7 +146,7 @@ static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_data_queues; i++)
- virtqueue_set_affinity(vi->data_vq[i].vq, -1);
+ virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
vi->affinity_hint_set = false;
}
@@ -173,7 +173,7 @@ static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
*
*/
for_each_online_cpu(cpu) {
- virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
+ virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
if (++i >= vcrypto->max_data_queues)
break;
}
@@ -303,6 +303,13 @@ static int virtcrypto_probe(struct virtio_device *vdev)
u32 max_data_queues = 0, max_cipher_key_len = 0;
u32 max_auth_key_len = 0;
u64 max_size = 0;
+ u32 cipher_algo_l = 0;
+ u32 cipher_algo_h = 0;
+ u32 hash_algo = 0;
+ u32 mac_algo_l = 0;
+ u32 mac_algo_h = 0;
+ u32 aead_algo = 0;
+ u32 crypto_services = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
@@ -339,6 +346,20 @@ static int virtcrypto_probe(struct virtio_device *vdev)
max_auth_key_len, &max_auth_key_len);
virtio_cread(vdev, struct virtio_crypto_config,
max_size, &max_size);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ crypto_services, &crypto_services);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ cipher_algo_l, &cipher_algo_l);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ cipher_algo_h, &cipher_algo_h);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ hash_algo, &hash_algo);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ mac_algo_l, &mac_algo_l);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ mac_algo_h, &mac_algo_h);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ aead_algo, &aead_algo);
/* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto);
@@ -358,6 +379,14 @@ static int virtcrypto_probe(struct virtio_device *vdev)
vcrypto->max_cipher_key_len = max_cipher_key_len;
vcrypto->max_auth_key_len = max_auth_key_len;
vcrypto->max_size = max_size;
+ vcrypto->crypto_services = crypto_services;
+ vcrypto->cipher_algo_l = cipher_algo_l;
+ vcrypto->cipher_algo_h = cipher_algo_h;
+ vcrypto->mac_algo_l = mac_algo_l;
+ vcrypto->mac_algo_h = mac_algo_h;
+ vcrypto->hash_algo = hash_algo;
+ vcrypto->aead_algo = aead_algo;
+
dev_info(&vdev->dev,
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index a69ff71de2c4..d70de3a4f7d7 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -181,14 +181,20 @@ int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
/*
* virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
* @node: Node id the driver works.
+ * @service: Crypto service that needs to be supported by the
+ * dev
+ * @algo: The algorithm number that needs to be supported by the
+ * dev
*
- * Function returns the virtio crypto device used fewest on the node.
+ * Function returns the virtio crypto device used fewest on the node,
+ * and supports the given crypto service and algorithm.
*
* To be used by virtio crypto device specific drivers.
*
* Return: pointer to vcrypto_dev or NULL if not found.
*/
-struct virtio_crypto *virtcrypto_get_dev_node(int node)
+struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
+ uint32_t algo)
{
struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
unsigned long best = ~0;
@@ -199,7 +205,8 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
dev_to_node(&tmp_dev->vdev->dev) < 0) &&
- virtcrypto_dev_started(tmp_dev)) {
+ virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
ctr = atomic_read(&tmp_dev->ref_count);
if (best > ctr) {
vcrypto_dev = tmp_dev;
@@ -214,7 +221,9 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
/* Get any started device */
list_for_each_entry(tmp_dev,
virtcrypto_devmgr_get_head(), list) {
- if (virtcrypto_dev_started(tmp_dev)) {
+ if (virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev,
+ service, algo)) {
vcrypto_dev = tmp_dev;
break;
}
@@ -240,7 +249,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
*/
int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
{
- if (virtio_crypto_algs_register()) {
+ if (virtio_crypto_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto algs\n");
return -EFAULT;
}
@@ -260,5 +269,65 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
*/
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
{
- virtio_crypto_algs_unregister();
+ virtio_crypto_algs_unregister(vcrypto);
+}
+
+/*
+ * vcrypto_algo_is_supported()
+ * @vcrypto: Pointer to virtio crypto device.
+ * @service: The bit number for service validate.
+ * See VIRTIO_CRYPTO_SERVICE_*
+ * @algo : The bit number for the algorithm to validate.
+ *
+ *
+ * Validate if the virtio crypto device supports a service and
+ * algo.
+ *
+ * Return true if device supports a service and algo.
+ */
+
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
+ uint32_t service,
+ uint32_t algo)
+{
+ uint32_t service_mask = 1u << service;
+ uint32_t algo_mask = 0;
+ bool low = true;
+
+ if (algo > 31) {
+ algo -= 32;
+ low = false;
+ }
+
+ if (!(vcrypto->crypto_services & service_mask))
+ return false;
+
+ switch (service) {
+ case VIRTIO_CRYPTO_SERVICE_CIPHER:
+ if (low)
+ algo_mask = vcrypto->cipher_algo_l;
+ else
+ algo_mask = vcrypto->cipher_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_HASH:
+ algo_mask = vcrypto->hash_algo;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_MAC:
+ if (low)
+ algo_mask = vcrypto->mac_algo_l;
+ else
+ algo_mask = vcrypto->mac_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_AEAD:
+ algo_mask = vcrypto->aead_algo;
+ break;
+ }
+
+ if (!(algo_mask & (1u << algo)))
+ return false;
+
+ return true;
}
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 1c4b5b889fba..dd8b8716467a 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -215,7 +215,7 @@ struct shash_alg p8_ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
.cra_priority = 1000,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 0b5b3abe054e..4c49bb1330b5 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -604,28 +604,29 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
}
- devfreq->min_freq = find_available_min_freq(devfreq);
- if (!devfreq->min_freq) {
+ devfreq->scaling_min_freq = find_available_min_freq(devfreq);
+ if (!devfreq->scaling_min_freq) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->scaling_min_freq = devfreq->min_freq;
+ devfreq->min_freq = devfreq->scaling_min_freq;
- devfreq->max_freq = find_available_max_freq(devfreq);
- if (!devfreq->max_freq) {
+ devfreq->scaling_max_freq = find_available_max_freq(devfreq);
+ if (!devfreq->scaling_max_freq) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->scaling_max_freq = devfreq->max_freq;
+ devfreq->max_freq = devfreq->scaling_max_freq;
dev_set_name(&devfreq->dev, "devfreq%d",
atomic_inc_return(&devfreq_no));
err = device_register(&devfreq->dev);
if (err) {
mutex_unlock(&devfreq->lock);
- goto err_dev;
+ put_device(&devfreq->dev);
+ goto err_out;
}
devfreq->trans_table =
@@ -672,6 +673,7 @@ err_init:
mutex_unlock(&devfreq_list_lock);
device_unregister(&devfreq->dev);
+ devfreq = NULL;
err_dev:
if (devfreq)
kfree(devfreq);
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 3cd6a184fe7c..a9c64f0d3284 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -627,11 +627,9 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
size = sizeof(struct devfreq_event_dev *) * info->num_events;
info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!info->edev) {
- dev_err(&pdev->dev,
- "failed to allocate memory devfreq-event devices\n");
+ if (!info->edev)
return -ENOMEM;
- }
+
edev = info->edev;
platform_set_drvdata(pdev, info);
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 5dfbfa3cc878..e795ad2b3f6b 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -68,15 +68,6 @@ struct rk3399_dmcfreq {
struct devfreq_event_dev *edev;
struct mutex lock;
struct dram_timing timing;
-
- /*
- * DDR Converser of Frequency (DCF) is used to implement DDR frequency
- * conversion without the participation of CPU, we will implement and
- * control it in arm trust firmware.
- */
- wait_queue_head_t wait_dcf_queue;
- int irq;
- int wait_dcf_flag;
struct regulator *vdd_center;
unsigned long rate, target_rate;
unsigned long volt, target_volt;
@@ -112,31 +103,22 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
target_volt);
if (err) {
- dev_err(dev, "Cannot to set voltage %lu uV\n",
+ dev_err(dev, "Cannot set voltage %lu uV\n",
target_volt);
goto out;
}
}
- dmcfreq->wait_dcf_flag = 1;
err = clk_set_rate(dmcfreq->dmc_clk, target_rate);
if (err) {
- dev_err(dev, "Cannot to set frequency %lu (%d)\n",
- target_rate, err);
+ dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
+ err);
regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
dmcfreq->volt);
goto out;
}
/*
- * Wait until bcf irq happen, it means freq scaling finish in
- * arm trust firmware, use 100ms as timeout time.
- */
- if (!wait_event_timeout(dmcfreq->wait_dcf_queue,
- !dmcfreq->wait_dcf_flag, HZ / 10))
- dev_warn(dev, "Timeout waiting for dcf interrupt\n");
-
- /*
* Check the dpll rate,
* There only two result we will get,
* 1. Ddr frequency scaling fail, we still get the old rate.
@@ -146,8 +128,8 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
/* If get the incorrect rate, set voltage to old value. */
if (dmcfreq->rate != target_rate) {
- dev_err(dev, "Get wrong ddr frequency, Request frequency %lu,\
- Current frequency %lu\n", target_rate, dmcfreq->rate);
+ dev_err(dev, "Got wrong frequency, Request %lu, Current %lu\n",
+ target_rate, dmcfreq->rate);
regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
dmcfreq->volt);
goto out;
@@ -155,7 +137,7 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
target_volt);
if (err)
- dev_err(dev, "Cannot to set vol %lu uV\n", target_volt);
+ dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
dmcfreq->rate = target_rate;
dmcfreq->volt = target_volt;
@@ -241,22 +223,6 @@ static __maybe_unused int rk3399_dmcfreq_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk3399_dmcfreq_pm, rk3399_dmcfreq_suspend,
rk3399_dmcfreq_resume);
-static irqreturn_t rk3399_dmc_irq(int irq, void *dev_id)
-{
- struct rk3399_dmcfreq *dmcfreq = dev_id;
- struct arm_smccc_res res;
-
- dmcfreq->wait_dcf_flag = 0;
- wake_up(&dmcfreq->wait_dcf_queue);
-
- /* Clear the DCF interrupt */
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
- ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ,
- 0, 0, 0, 0, &res);
-
- return IRQ_HANDLED;
-}
-
static int of_get_ddr_timings(struct dram_timing *timing,
struct device_node *np)
{
@@ -330,16 +296,10 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct rk3399_dmcfreq *data;
- int ret, irq, index, size;
+ int ret, index, size;
uint32_t *timing;
struct dev_pm_opp *opp;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev,
- "Cannot get the dmc interrupt resource: %d\n", irq);
- return irq;
- }
data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -348,27 +308,22 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->vdd_center = devm_regulator_get(dev, "center");
if (IS_ERR(data->vdd_center)) {
+ if (PTR_ERR(data->vdd_center) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
dev_err(dev, "Cannot get the regulator \"center\"\n");
return PTR_ERR(data->vdd_center);
}
data->dmc_clk = devm_clk_get(dev, "dmc_clk");
if (IS_ERR(data->dmc_clk)) {
+ if (PTR_ERR(data->dmc_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
dev_err(dev, "Cannot get the clk dmc_clk\n");
return PTR_ERR(data->dmc_clk);
};
- data->irq = irq;
- ret = devm_request_irq(dev, irq, rk3399_dmc_irq, 0,
- dev_name(dev), data);
- if (ret) {
- dev_err(dev, "Failed to request dmc irq: %d\n", ret);
- return ret;
- }
-
- init_waitqueue_head(&data->wait_dcf_queue);
- data->wait_dcf_flag = 0;
-
data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
if (IS_ERR(data->edev))
return -EPROBE_DEFER;
@@ -420,8 +375,10 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->rate = clk_get_rate(data->dmc_clk);
opp = devfreq_recommended_opp(dev, &data->rate, 0);
- if (IS_ERR(opp))
- return PTR_ERR(opp);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto err_free_opp;
+ }
data->rate = dev_pm_opp_get_freq(opp);
data->volt = dev_pm_opp_get_voltage(opp);
@@ -433,14 +390,34 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
&rk3399_devfreq_dmc_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&data->ondemand_data);
- if (IS_ERR(data->devfreq))
- return PTR_ERR(data->devfreq);
+ if (IS_ERR(data->devfreq)) {
+ ret = PTR_ERR(data->devfreq);
+ goto err_free_opp;
+ }
+
devm_devfreq_register_opp_notifier(dev, data->devfreq);
data->dev = dev;
platform_set_drvdata(pdev, data);
return 0;
+
+err_free_opp:
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ return ret;
+}
+
+static int rk3399_dmcfreq_remove(struct platform_device *pdev)
+{
+ struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
+
+ /*
+ * Before remove the opp table we need to unregister the opp notifier.
+ */
+ devm_devfreq_unregister_opp_notifier(dmcfreq->dev, dmcfreq->devfreq);
+ dev_pm_opp_of_remove_table(dmcfreq->dev);
+
+ return 0;
}
static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
@@ -451,6 +428,7 @@ MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
static struct platform_driver rk3399_dmcfreq_driver = {
.probe = rk3399_dmcfreq_probe,
+ .remove = rk3399_dmcfreq_remove,
.driver = {
.name = "rk3399-dmc-freq",
.pm = &rk3399_dmcfreq_pm,
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index d78d5fc173dc..13884474d158 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release
- || !exp_info->ops->map_atomic
|| !exp_info->ops->map
|| !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
@@ -568,7 +567,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
mutex_lock(&dmabuf->lock);
if (dmabuf->ops->attach) {
- ret = dmabuf->ops->attach(dmabuf, dev, attach);
+ ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
@@ -687,26 +686,14 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
* void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
*
- * There are also atomic variants of these interfaces. Like for kmap they
- * facilitate non-blocking fast-paths. Neither the importer nor the exporter
- * (in the callback) is allowed to block when using these.
- *
- * Interfaces::
- * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
- * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
- *
- * For importers all the restrictions of using kmap apply, like the limited
- * supply of kmap_atomic slots. Hence an importer shall only hold onto at
- * max 2 atomic dma_buf kmaps at the same time (in any given process context).
+ * Implementing the functions is optional for exporters and for importers all
+ * the restrictions of using kmap apply.
*
* dma_buf kmap calls outside of the range specified in begin_cpu_access are
* undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
* the partial chunks at the beginning and end but may return stale or bogus
* data outside of the range (in these partial chunks).
*
- * Note that these calls need to always succeed. The exporter needs to
- * complete any preparations that might fail in begin_cpu_access.
- *
* For some cases the overhead of kmap can be too high, a vmap interface
* is introduced. This interface should be used very carefully, as vmalloc
* space is a limited resources on many architectures.
@@ -860,41 +847,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
/**
- * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
- * space. The same restrictions as for kmap_atomic and friends apply.
- * @dmabuf: [in] buffer to map page from.
- * @page_num: [in] page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
-{
- WARN_ON(!dmabuf);
-
- return dmabuf->ops->map_atomic(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
-
-/**
- * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
- * @dmabuf: [in] buffer to unmap page from.
- * @page_num: [in] page in PAGE_SIZE units to unmap.
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
- void *vaddr)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->unmap_atomic)
- dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
-
-/**
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
* same restrictions as for kmap and friends apply.
* @dmabuf: [in] buffer to map page from.
@@ -907,6 +859,8 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
{
WARN_ON(!dmabuf);
+ if (!dmabuf->ops->map)
+ return NULL;
return dmabuf->ops->map(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap);
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index dd1edfb27b61..a8c254497251 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -104,7 +104,6 @@ const struct dma_fence_ops dma_fence_array_ops = {
.get_timeline_name = dma_fence_array_get_timeline_name,
.enable_signaling = dma_fence_array_enable_signaling,
.signaled = dma_fence_array_signaled,
- .wait = dma_fence_default_wait,
.release = dma_fence_array_release,
};
EXPORT_SYMBOL(dma_fence_array_ops);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 4edb9fd3cf47..1551ca7df394 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -39,11 +39,42 @@ EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
/**
+ * DOC: DMA fences overview
+ *
+ * DMA fences, represented by &struct dma_fence, are the kernel internal
+ * synchronization primitive for DMA operations like GPU rendering, video
+ * encoding/decoding, or displaying buffers on a screen.
+ *
+ * A fence is initialized using dma_fence_init() and completed using
+ * dma_fence_signal(). Fences are associated with a context, allocated through
+ * dma_fence_context_alloc(), and all fences on the same context are
+ * fully ordered.
+ *
+ * Since the purposes of fences is to facilitate cross-device and
+ * cross-application synchronization, there's multiple ways to use one:
+ *
+ * - Individual fences can be exposed as a &sync_file, accessed as a file
+ * descriptor from userspace, created by calling sync_file_create(). This is
+ * called explicit fencing, since userspace passes around explicit
+ * synchronization points.
+ *
+ * - Some subsystems also have their own explicit fencing primitives, like
+ * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
+ * fence to be updated.
+ *
+ * - Then there's also implicit fencing, where the synchronization points are
+ * implicitly passed around as part of shared &dma_buf instances. Such
+ * implicit fences are stored in &struct reservation_object through the
+ * &dma_buf.resv pointer.
+ */
+
+/**
* dma_fence_context_alloc - allocate an array of fence contexts
- * @num: [in] amount of contexts to allocate
+ * @num: amount of contexts to allocate
*
- * This function will return the first index of the number of fences allocated.
- * The fence context is used for setting fence->context to a unique number.
+ * This function will return the first index of the number of fence contexts
+ * allocated. The fence context is used for setting &dma_fence.context to a
+ * unique number by passing the context to dma_fence_init().
*/
u64 dma_fence_context_alloc(unsigned num)
{
@@ -59,10 +90,14 @@ EXPORT_SYMBOL(dma_fence_context_alloc);
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
- * can only go from unsignaled to signaled state, it will only be effective
- * the first time.
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
*
- * Unlike dma_fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
+ * held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
*/
int dma_fence_signal_locked(struct dma_fence *fence)
{
@@ -102,8 +137,11 @@ EXPORT_SYMBOL(dma_fence_signal_locked);
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
- * can only go from unsignaled to signaled state, it will only be effective
- * the first time.
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
*/
int dma_fence_signal(struct dma_fence *fence)
{
@@ -136,9 +174,9 @@ EXPORT_SYMBOL(dma_fence_signal);
/**
* dma_fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
* remaining timeout in jiffies on success. Other error values may be
@@ -148,6 +186,8 @@ EXPORT_SYMBOL(dma_fence_signal);
* directly or indirectly (buf-mgr between reservation and committing)
* holds a reference to the fence, otherwise the fence might be
* freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_any_timeout().
*/
signed long
dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
@@ -158,12 +198,22 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
return -EINVAL;
trace_dma_fence_wait_start(fence);
- ret = fence->ops->wait(fence, intr, timeout);
+ if (fence->ops->wait)
+ ret = fence->ops->wait(fence, intr, timeout);
+ else
+ ret = dma_fence_default_wait(fence, intr, timeout);
trace_dma_fence_wait_end(fence);
return ret;
}
EXPORT_SYMBOL(dma_fence_wait_timeout);
+/**
+ * dma_fence_release - default relese function for fences
+ * @kref: &dma_fence.recfount
+ *
+ * This is the default release functions for &dma_fence. Drivers shouldn't call
+ * this directly, but instead call dma_fence_put().
+ */
void dma_fence_release(struct kref *kref)
{
struct dma_fence *fence =
@@ -181,6 +231,13 @@ void dma_fence_release(struct kref *kref)
}
EXPORT_SYMBOL(dma_fence_release);
+/**
+ * dma_fence_free - default release function for &dma_fence.
+ * @fence: fence to release
+ *
+ * This is the default implementation for &dma_fence_ops.release. It calls
+ * kfree_rcu() on @fence.
+ */
void dma_fence_free(struct dma_fence *fence)
{
kfree_rcu(fence, rcu);
@@ -189,10 +246,11 @@ EXPORT_SYMBOL(dma_fence_free);
/**
* dma_fence_enable_sw_signaling - enable signaling on fence
- * @fence: [in] the fence to enable
+ * @fence: the fence to enable
*
- * this will request for sw signaling to be enabled, to make the fence
- * complete as soon as possible
+ * This will request for sw signaling to be enabled, to make the fence
+ * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
+ * internally.
*/
void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
@@ -200,7 +258,8 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+ fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags);
@@ -216,24 +275,24 @@ EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
/**
* dma_fence_add_callback - add a callback to be called when the fence
* is signaled
- * @fence: [in] the fence to wait on
- * @cb: [in] the callback to register
- * @func: [in] the function to call
+ * @fence: the fence to wait on
+ * @cb: the callback to register
+ * @func: the function to call
*
- * cb will be initialized by dma_fence_add_callback, no initialization
+ * @cb will be initialized by dma_fence_add_callback(), no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
* Note that the callback can be called from an atomic context. If
* fence is already signaled, this function will return -ENOENT (and
- * *not* call the callback)
+ * *not* call the callback).
*
* Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to dma_fence_wait, however the caller doesn't need to
- * keep a refcount to fence afterwards: when software access is enabled,
- * the creator of the fence is required to keep the fence alive until
- * after it signals with dma_fence_signal. The callback itself can be called
- * from irq context.
+ * refcount as it does to dma_fence_wait(), however the caller doesn't need to
+ * keep a refcount to fence afterward dma_fence_add_callback() has returned:
+ * when software access is enabled, the creator of the fence is required to keep
+ * the fence alive until after it signals with dma_fence_signal(). The callback
+ * itself can be called from irq context.
*
* Returns 0 in case of success, -ENOENT if the fence is already signaled
* and -EINVAL in case of error.
@@ -260,7 +319,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
- else if (!was_set) {
+ else if (!was_set && fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
@@ -282,7 +341,7 @@ EXPORT_SYMBOL(dma_fence_add_callback);
/**
* dma_fence_get_status - returns the status upon completion
- * @fence: [in] the dma_fence to query
+ * @fence: the dma_fence to query
*
* This wraps dma_fence_get_status_locked() to return the error status
* condition on a signaled fence. See dma_fence_get_status_locked() for more
@@ -307,8 +366,8 @@ EXPORT_SYMBOL(dma_fence_get_status);
/**
* dma_fence_remove_callback - remove a callback from the signaling list
- * @fence: [in] the fence to wait on
- * @cb: [in] the callback to remove
+ * @fence: the fence to wait on
+ * @cb: the callback to remove
*
* Remove a previously queued callback from the fence. This function returns
* true if the callback is successfully removed, or false if the fence has
@@ -319,6 +378,9 @@ EXPORT_SYMBOL(dma_fence_get_status);
* doing, since deadlocks and race conditions could occur all too easily. For
* this reason, it should only ever be done on hardware lockup recovery,
* with a reference held to the fence.
+ *
+ * Behaviour is undefined if @cb has not been added to @fence using
+ * dma_fence_add_callback() beforehand.
*/
bool
dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
@@ -355,9 +417,9 @@ dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
/**
* dma_fence_default_wait - default sleep until the fence gets signaled
* or until timeout elapses
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
* remaining timeout in jiffies on success. If timeout is zero the value one is
@@ -388,7 +450,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out;
- if (!was_set) {
+ if (!was_set && fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
@@ -450,12 +512,12 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
/**
* dma_fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
- * @fences: [in] array of fences to wait on
- * @count: [in] number of fences to wait on
- * @intr: [in] if true, do an interruptible wait
- * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
- * @idx: [out] the first signaled fence index, meaningful only on
- * positive return
+ * @fences: array of fences to wait on
+ * @count: number of fences to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: used to store the first signaled fence index, meaningful only on
+ * positive return
*
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
@@ -464,6 +526,8 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
* Synchronous waits for the first fence in the array to be signaled. The
* caller needs to hold a reference to all fences in the array, otherwise a
* fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_timeout().
*/
signed long
dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
@@ -496,11 +560,6 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
for (i = 0; i < count; ++i) {
struct dma_fence *fence = fences[i];
- if (fence->ops->wait != dma_fence_default_wait) {
- ret = -EINVAL;
- goto fence_rm_cb;
- }
-
cb[i].task = current;
if (dma_fence_add_callback(fence, &cb[i].base,
dma_fence_default_wait_cb)) {
@@ -541,27 +600,25 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
* dma_fence_init - Initialize a custom fence.
- * @fence: [in] the fence to initialize
- * @ops: [in] the dma_fence_ops for operations on this fence
- * @lock: [in] the irqsafe spinlock to use for locking this fence
- * @context: [in] the execution context this fence is run on
- * @seqno: [in] a linear increasing sequence number for this context
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
- * refcount again if dma_fence_ops.enable_signaling gets called. This can
- * be used for other implementing other types of fence.
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
*
* context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using dma_fence_later.
+ * to check which fence is later by simply using dma_fence_later().
*/
void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
- BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
- !ops->get_driver_name || !ops->get_timeline_name);
+ BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
kref_init(&fence->refcount);
fence->ops = ops;
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 314eb1071cce..6c95f61a32e7 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -46,7 +46,7 @@
* write-side updates.
*/
-DEFINE_WW_CLASS(reservation_ww_class);
+DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
struct lock_class_key reservation_seqcount_class;
@@ -141,6 +141,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
if (signaled) {
RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
} else {
+ BUG_ON(fobj->shared_count >= fobj->shared_max);
RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
fobj->shared_count++;
}
@@ -230,10 +231,9 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
old = reservation_object_get_list(obj);
obj->staged = NULL;
- if (!fobj) {
- BUG_ON(old->shared_count >= old->shared_max);
+ if (!fobj)
reservation_object_add_shared_inplace(obj, old, fence);
- } else
+ else
reservation_object_add_shared_replace(obj, old, fobj, fence);
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 3d78ca89a605..53c1d6d36a64 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -188,7 +188,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
.get_timeline_name = timeline_fence_get_timeline_name,
.enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
- .wait = dma_fence_default_wait,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index b53fb618bbf6..b31c28b67ad3 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -179,6 +179,8 @@ static unsigned int pxad_drcmr(unsigned int line)
return 0x1000 + line * 4;
}
+bool pxad_filter_fn(struct dma_chan *chan, void *param);
+
/*
* Debug fs
*/
@@ -760,6 +762,8 @@ static void pxad_free_chan_resources(struct dma_chan *dchan)
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
+ chan->drcmr = U32_MAX;
+ chan->prio = PXAD_PRIO_LOWEST;
}
static void pxad_free_desc(struct virt_dma_desc *vd)
@@ -1384,6 +1388,9 @@ static int pxad_init_dmadev(struct platform_device *op,
c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
+
+ c->drcmr = U32_MAX;
+ c->prio = PXAD_PRIO_LOWEST;
c->vc.desc_free = pxad_free_desc;
vchan_init(&c->vc, &pdev->slave);
init_waitqueue_head(&c->wq_state);
@@ -1396,9 +1403,10 @@ static int pxad_probe(struct platform_device *op)
{
struct pxad_device *pdev;
const struct of_device_id *of_id;
+ const struct dma_slave_map *slave_map = NULL;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
struct resource *iores;
- int ret, dma_channels = 0, nb_requestors = 0;
+ int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1429,6 +1437,8 @@ static int pxad_probe(struct platform_device *op)
} else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
nb_requestors = pdata->nb_requestors;
+ slave_map = pdata->slave_map;
+ slave_map_cnt = pdata->slave_map_cnt;
} else {
dma_channels = 32; /* default 32 channel */
}
@@ -1440,6 +1450,9 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
+ pdev->slave.filter.map = slave_map;
+ pdev->slave.filter.mapcnt = slave_map_cnt;
+ pdev->slave.filter.fn = pxad_filter_fn;
pdev->slave.copy_align = PDMA_ALIGNMENT;
pdev->slave.src_addr_widths = widths;
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index d0d5c4dbe097..5762c3c383f2 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -730,7 +730,8 @@ static int altr_s10_sdram_probe(struct platform_device *pdev)
S10_DDR0_IRQ_MASK)) {
edac_printk(KERN_ERR, EDAC_MC,
"Error clearing SDRAM ECC count\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err2;
}
if (regmap_update_bits(drvdata->mc_vbase, priv->ecc_irq_en_offset,
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 7481955160a4..20374b8248f0 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1075,14 +1075,14 @@ int __init edac_mc_sysfs_init(void)
err = device_add(mci_pdev);
if (err < 0)
- goto out_dev_free;
+ goto out_put_device;
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
- out_dev_free:
- kfree(mci_pdev);
+ out_put_device:
+ put_device(mci_pdev);
out:
return err;
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8ed4dd9c571b..8e120bf60624 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1177,15 +1177,14 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->addrmatch_dev);
if (rc < 0)
- return rc;
+ goto err_put_addrmatch;
if (!pvt->is_registered) {
pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
GFP_KERNEL);
if (!pvt->chancounts_dev) {
- put_device(pvt->addrmatch_dev);
- device_del(pvt->addrmatch_dev);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_del_addrmatch;
}
pvt->chancounts_dev->type = &all_channel_counts_type;
@@ -1199,9 +1198,18 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->chancounts_dev);
if (rc < 0)
- return rc;
+ goto err_put_chancounts;
}
return 0;
+
+err_put_chancounts:
+ put_device(pvt->chancounts_dev);
+err_del_addrmatch:
+ device_del(pvt->addrmatch_dev);
+err_put_addrmatch:
+ put_device(pvt->addrmatch_dev);
+
+ return rc;
}
static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
@@ -1211,11 +1219,11 @@ static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
edac_dbg(1, "\n");
if (!pvt->is_registered) {
- put_device(pvt->chancounts_dev);
device_del(pvt->chancounts_dev);
+ put_device(pvt->chancounts_dev);
}
- put_device(pvt->addrmatch_dev);
device_del(pvt->addrmatch_dev);
+ put_device(pvt->addrmatch_dev);
}
/****************************************************************************
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4a89c8093307..07726fb00321 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -352,6 +352,7 @@ struct pci_id_table {
struct sbridge_dev {
struct list_head list;
+ int seg;
u8 bus, mc;
u8 node_id, source_id;
struct pci_dev **pdev;
@@ -729,7 +730,8 @@ static inline int numcol(u32 mtr)
return 1 << cols;
}
-static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus,
+static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
+ int multi_bus,
struct sbridge_dev *prev)
{
struct sbridge_dev *sbridge_dev;
@@ -747,14 +749,15 @@ static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bu
: sbridge_edac_list.next, struct sbridge_dev, list);
list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
- if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom))
+ if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
+ (dom == SOCK || dom == sbridge_dev->dom))
return sbridge_dev;
}
return NULL;
}
-static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
+static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
const struct pci_id_table *table)
{
struct sbridge_dev *sbridge_dev;
@@ -771,6 +774,7 @@ static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
return NULL;
}
+ sbridge_dev->seg = seg;
sbridge_dev->bus = bus;
sbridge_dev->dom = dom;
sbridge_dev->n_devs = table->n_devs_per_imc;
@@ -2246,6 +2250,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
struct sbridge_dev *sbridge_dev = NULL;
const struct pci_id_descr *dev_descr = &table->descr[devno];
struct pci_dev *pdev = NULL;
+ int seg = 0;
u8 bus = 0;
int i = 0;
@@ -2276,10 +2281,12 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
/* End of list, leave */
return -ENODEV;
}
+ seg = pci_domain_nr(pdev->bus);
bus = pdev->bus->number;
next_imc:
- sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
+ sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
+ multi_bus, sbridge_dev);
if (!sbridge_dev) {
/* If the HA1 wasn't found, don't create EDAC second memory controller */
if (dev_descr->dom == IMC1 && devno != 1) {
@@ -2292,7 +2299,7 @@ next_imc:
if (dev_descr->dom == SOCK)
goto out_imc;
- sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table);
+ sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
if (!sbridge_dev) {
pci_dev_put(pdev);
return -ENOMEM;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 4803c6468bab..c009d94f40c5 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -408,26 +408,29 @@ static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
size_t count, loff_t *ppos)
{
struct thunderx_lmc *lmc = file->private_data;
-
unsigned int cline_size = cache_line_size();
-
- u8 tmp[cline_size];
+ u8 *tmp;
void __iomem *addr;
unsigned int offs, timeout = 100000;
atomic_set(&lmc->ecc_int, 0);
lmc->mem = alloc_pages_node(lmc->node, GFP_KERNEL, 0);
-
if (!lmc->mem)
return -ENOMEM;
+ tmp = kmalloc(cline_size, GFP_KERNEL);
+ if (!tmp) {
+ __free_pages(lmc->mem, 0);
+ return -ENOMEM;
+ }
+
addr = page_address(lmc->mem);
while (!atomic_read(&lmc->ecc_int) && timeout--) {
stop_machine(inject_ecc_fn, lmc, NULL);
- for (offs = 0; offs < PAGE_SIZE; offs += sizeof(tmp)) {
+ for (offs = 0; offs < PAGE_SIZE; offs += cline_size) {
/*
* Do a load from the previously rigged location
* This should generate an error interrupt.
@@ -437,6 +440,7 @@ static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
}
}
+ kfree(tmp);
__free_pages(lmc->mem, 0);
return count;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 781a4a337557..d8e159feb573 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -87,6 +87,18 @@ config EFI_RUNTIME_WRAPPERS
config EFI_ARMSTUB
bool
+config EFI_ARMSTUB_DTB_LOADER
+ bool "Enable the DTB loader"
+ depends on EFI_ARMSTUB
+ help
+ Select this config option to add support for the dtb= command
+ line parameter, allowing a device tree blob to be loaded into
+ memory from the EFI System Partition by the stub.
+
+ The device tree is typically provided by the platform or by
+ the bootloader, so this option is mostly for development
+ purposes only.
+
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
depends on EFI_VARS
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index b5214c143fee..388a929baf95 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -259,7 +259,6 @@ void __init efi_init(void)
reserve_regions();
efi_esrt_init();
- efi_memmap_unmap();
memblock_reserve(params.mmap & PAGE_MASK,
PAGE_ALIGN(params.mmap_size +
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 5889cbea60b8..922cfb813109 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -110,11 +110,20 @@ static int __init arm_enable_runtime_services(void)
{
u64 mapsize;
- if (!efi_enabled(EFI_BOOT)) {
+ if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
pr_info("EFI services will not be available.\n");
return 0;
}
+ efi_memmap_unmap();
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return 0;
+ }
+
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return 0;
@@ -127,13 +136,6 @@ static int __init arm_enable_runtime_services(void)
pr_info("Remapping and enabling EFI services.\n");
- mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
-
- if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
- pr_err("Failed to remap EFI memory map\n");
- return -ENOMEM;
- }
-
if (!efi_virtmap_init()) {
pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
return -ENOMEM;
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 3bf0dca378a6..a7902fccdcfa 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -48,8 +48,21 @@ u64 cper_next_record_id(void)
{
static atomic64_t seq;
- if (!atomic64_read(&seq))
- atomic64_set(&seq, ((u64)get_seconds()) << 32);
+ if (!atomic64_read(&seq)) {
+ time64_t time = ktime_get_real_seconds();
+
+ /*
+ * This code is unlikely to still be needed in year 2106,
+ * but just in case, let's use a few more bits for timestamps
+ * after y2038 to be sure they keep increasing monotonically
+ * for the next few hundred years...
+ */
+ if (time < 0x80000000)
+ atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+ else
+ atomic64_set(&seq, 0x8000000000000000ull |
+ ktime_get_real_seconds() << 24);
+ }
return atomic64_inc_return(&seq);
}
@@ -459,7 +472,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
- } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: ARM processor error\n", newpfx);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 232f4915223b..2a29dd9c986d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -82,8 +82,11 @@ struct mm_struct efi_mm = {
.mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+ .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
};
+struct workqueue_struct *efi_rts_wq;
+
static bool disable_runtime;
static int __init setup_noefi(char *arg)
{
@@ -337,6 +340,18 @@ static int __init efisubsys_init(void)
if (!efi_enabled(EFI_BOOT))
return 0;
+ /*
+ * Since we process only one efi_runtime_service() at a time, an
+ * ordered workqueue (which creates only one execution context)
+ * should suffice all our needs.
+ */
+ efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+ if (!efi_rts_wq) {
+ pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return 0;
+ }
+
/* We register the efi directory at /sys/firmware/efi */
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
@@ -388,7 +403,7 @@ subsys_initcall(efisubsys_init);
* and if so, populate the supplied memory descriptor with the appropriate
* data.
*/
-int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
efi_memory_desc_t *md;
@@ -406,12 +421,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
u64 size;
u64 end;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_DATA &&
- md->type != EFI_RUNTIME_SERVICES_DATA) {
- continue;
- }
-
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
if (phys_addr >= md->phys_addr && phys_addr < end) {
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 1ab80e06e7c5..5d06bd247d07 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -250,7 +250,10 @@ void __init efi_esrt_init(void)
return;
rc = efi_mem_desc_lookup(efi.esrt, &md);
- if (rc < 0) {
+ if (rc < 0 ||
+ (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+ md.type != EFI_BOOT_SERVICES_DATA &&
+ md.type != EFI_RUNTIME_SERVICES_DATA)) {
pr_warn("ESRT header is not in the memory map.\n");
return;
}
@@ -326,7 +329,8 @@ void __init efi_esrt_init(void)
end = esrt_data + size;
pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
- efi_mem_reserve(esrt_data, esrt_data_size);
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(esrt_data, esrt_data_size);
pr_debug("esrt-init: loaded.\n");
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index a34e9290a699..88c322d7c71e 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,10 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
+# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+# disable the stackleak plugin
+cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
+ $(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
@@ -20,7 +23,7 @@ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
- $(call cc-option,-fno-stack-protector)
+ $(call cc-option,-fno-stack-protector) \
GCOV_PROFILE := n
KASAN_SANITIZE := n
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 01a9d78ee415..6920033de6d4 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -40,31 +40,6 @@
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
- void *__image, void **__fh)
-{
- efi_file_io_interface_t *io;
- efi_loaded_image_t *image = __image;
- efi_file_handle_t *fh;
- efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
- efi_status_t status;
- void *handle = (void *)(unsigned long)image->device_handle;
-
- status = sys_table_arg->boottime->handle_protocol(handle,
- &fs_proto, (void **)&io);
- if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
- return status;
- }
-
- status = io->open_volume(io, &fh);
- if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to open volume\n");
-
- *__fh = fh;
- return status;
-}
-
void efi_char16_printk(efi_system_table_t *sys_table_arg,
efi_char16_t *str)
{
@@ -202,9 +177,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
* 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
* boot is enabled if we can't determine its state.
*/
- if (secure_boot != efi_secureboot_mode_disabled &&
- strstr(cmdline_ptr, "dtb=")) {
- pr_efi(sys_table, "Ignoring DTB from command line.\n");
+ if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+ secure_boot != efi_secureboot_mode_disabled) {
+ if (strstr(cmdline_ptr, "dtb="))
+ pr_efi(sys_table, "Ignoring DTB from command line.\n");
} else {
status = handle_cmdline_files(sys_table, image, cmdline_ptr,
"dtb=",
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 50a9cab5a834..e94975f4655b 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -413,6 +413,34 @@ static efi_status_t efi_file_close(void *handle)
return efi_call_proto(efi_file_handle, close, handle);
}
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image,
+ efi_file_handle_t **__fh)
+{
+ efi_file_io_interface_t *io;
+ efi_file_handle_t *fh;
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_status_t status;
+ void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
+ device_handle,
+ image);
+
+ status = efi_call_early(handle_protocol, handle,
+ &fs_proto, (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ return status;
+ }
+
+ status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+ if (status != EFI_SUCCESS)
+ efi_printk(sys_table_arg, "Failed to open volume\n");
+ else
+ *__fh = fh;
+
+ return status;
+}
+
/*
* Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
* option, e.g. efi=nochunk.
@@ -563,8 +591,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
/* Only open the volume once. */
if (!i) {
- status = efi_open_volume(sys_table_arg, image,
- (void **)&fh);
+ status = efi_open_volume(sys_table_arg, image, &fh);
if (status != EFI_SUCCESS)
goto free_files;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index f59564b72ddc..32799cf039ef 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -36,9 +36,6 @@ extern int __pure is_quiet(void);
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
- void **__fh);
-
unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index ae54870b2788..aa66cbf23512 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -1,6 +1,15 @@
/*
* runtime-wrappers.c - Runtime Services function call wrappers
*
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* Split off from arch/x86/platform/efi/efi.c
@@ -22,6 +31,9 @@
#include <linux/mutex.h>
#include <linux/semaphore.h>
#include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
#include <asm/efi.h>
/*
@@ -33,6 +45,76 @@
#define __efi_call_virt(f, args...) \
__efi_call_virt_pointer(efi.systab->runtime, f, args)
+/* efi_runtime_service() function identifiers */
+enum efi_rts_ids {
+ GET_TIME,
+ SET_TIME,
+ GET_WAKEUP_TIME,
+ SET_WAKEUP_TIME,
+ GET_VARIABLE,
+ GET_NEXT_VARIABLE,
+ SET_VARIABLE,
+ QUERY_VARIABLE_INFO,
+ GET_NEXT_HIGH_MONO_COUNT,
+ UPDATE_CAPSULE,
+ QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work: Details of EFI Runtime Service work
+ * @arg<1-5>: EFI Runtime Service function arguments
+ * @status: Status of executing EFI Runtime Service
+ * @efi_rts_id: EFI Runtime Service function identifier
+ * @efi_rts_comp: Struct used for handling completions
+ */
+struct efi_runtime_work {
+ void *arg1;
+ void *arg2;
+ void *arg3;
+ void *arg4;
+ void *arg5;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+};
+
+/*
+ * efi_queue_work: Queue efi_runtime_service() and wait until it's done
+ * @rts: efi_runtime_service() function identifier
+ * @rts_arg<1-5>: efi_runtime_service() function arguments
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
+({ \
+ struct efi_runtime_work efi_rts_work; \
+ efi_rts_work.status = EFI_ABORTED; \
+ \
+ init_completion(&efi_rts_work.efi_rts_comp); \
+ INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
+ efi_rts_work.arg1 = _arg1; \
+ efi_rts_work.arg2 = _arg2; \
+ efi_rts_work.arg3 = _arg3; \
+ efi_rts_work.arg4 = _arg4; \
+ efi_rts_work.arg5 = _arg5; \
+ efi_rts_work.efi_rts_id = _rts; \
+ \
+ /* \
+ * queue_work() returns 0 if work was already on queue, \
+ * _ideally_ this should never happen. \
+ */ \
+ if (queue_work(efi_rts_wq, &efi_rts_work.work)) \
+ wait_for_completion(&efi_rts_work.efi_rts_comp); \
+ else \
+ pr_err("Failed to queue work to efi_rts_wq.\n"); \
+ \
+ efi_rts_work.status; \
+})
+
void efi_call_virt_check_flags(unsigned long flags, const char *call)
{
unsigned long cur_flags, mismatch;
@@ -90,13 +172,98 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
*/
static DEFINE_SEMAPHORE(efi_runtime_lock);
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *
+ * Semantics followed by efi_call_rts() to understand efi_runtime_work:
+ * 1. If argument was a pointer, recast it from void pointer to original
+ * pointer type.
+ * 2. If argument was a value, recast it from void pointer to original
+ * pointer type and dereference it.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+ struct efi_runtime_work *efi_rts_work;
+ void *arg1, *arg2, *arg3, *arg4, *arg5;
+ efi_status_t status = EFI_NOT_FOUND;
+
+ efi_rts_work = container_of(work, struct efi_runtime_work, work);
+ arg1 = efi_rts_work->arg1;
+ arg2 = efi_rts_work->arg2;
+ arg3 = efi_rts_work->arg3;
+ arg4 = efi_rts_work->arg4;
+ arg5 = efi_rts_work->arg5;
+
+ switch (efi_rts_work->efi_rts_id) {
+ case GET_TIME:
+ status = efi_call_virt(get_time, (efi_time_t *)arg1,
+ (efi_time_cap_t *)arg2);
+ break;
+ case SET_TIME:
+ status = efi_call_virt(set_time, (efi_time_t *)arg1);
+ break;
+ case GET_WAKEUP_TIME:
+ status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
+ (efi_bool_t *)arg2, (efi_time_t *)arg3);
+ break;
+ case SET_WAKEUP_TIME:
+ status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
+ (efi_time_t *)arg2);
+ break;
+ case GET_VARIABLE:
+ status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, (u32 *)arg3,
+ (unsigned long *)arg4, (void *)arg5);
+ break;
+ case GET_NEXT_VARIABLE:
+ status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
+ (efi_char16_t *)arg2,
+ (efi_guid_t *)arg3);
+ break;
+ case SET_VARIABLE:
+ status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, *(u32 *)arg3,
+ *(unsigned long *)arg4, (void *)arg5);
+ break;
+ case QUERY_VARIABLE_INFO:
+ status = efi_call_virt(query_variable_info, *(u32 *)arg1,
+ (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+ break;
+ case GET_NEXT_HIGH_MONO_COUNT:
+ status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+ break;
+ case UPDATE_CAPSULE:
+ status = efi_call_virt(update_capsule,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2,
+ *(unsigned long *)arg3);
+ break;
+ case QUERY_CAPSULE_CAPS:
+ status = efi_call_virt(query_capsule_caps,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2, (u64 *)arg3,
+ (int *)arg4);
+ break;
+ default:
+ /*
+ * Ideally, we should never reach here because a caller of this
+ * function should have put the right efi_runtime_service()
+ * function identifier into efi_rts_work->efi_rts_id
+ */
+ pr_err("Requested executing invalid EFI Runtime Service.\n");
+ }
+ efi_rts_work->status = status;
+ complete(&efi_rts_work->efi_rts_comp);
+}
+
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_time, tm, tc);
+ status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -107,7 +274,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_time, tm);
+ status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -120,7 +287,8 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
+ status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+ NULL);
up(&efi_runtime_lock);
return status;
}
@@ -131,7 +299,8 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_wakeup_time, enabled, tm);
+ status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+ NULL);
up(&efi_runtime_lock);
return status;
}
@@ -146,8 +315,8 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_variable, name, vendor, attr, data_size,
- data);
+ status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+ data);
up(&efi_runtime_lock);
return status;
}
@@ -160,7 +329,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_next_variable, name_size, name, vendor);
+ status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -175,8 +345,8 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(set_variable, name, vendor, attr, data_size,
- data);
+ status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+ data);
up(&efi_runtime_lock);
return status;
}
@@ -210,8 +380,8 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(query_variable_info, attr, storage_space,
- remaining_space, max_variable_size);
+ status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+ remaining_space, max_variable_size, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -242,7 +412,8 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(get_next_high_mono_count, count);
+ status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -272,7 +443,8 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(update_capsule, capsules, count, sg_list);
+ status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+ NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -289,8 +461,8 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
- reset_type);
+ status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+ max_size, reset_type, NULL);
up(&efi_runtime_lock);
return status;
}
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 14fedbeca724..039e0f91dba8 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -28,6 +28,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 71c0ab46f216..f7a0f576f918 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -359,6 +359,15 @@ config GPIO_MPC8XXX
Say Y here if you're going to use hardware that connects to the
MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
+config GPIO_MT7621
+ bool "Mediatek MT7621 GPIO Support"
+ depends on SOC_MT7620 || SOC_MT7621 || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the Mediatek MT7621 SoC GPIO device
+
config GPIO_MVEBU
def_bool y
depends on PLAT_ORION || ARCH_MVEBU
@@ -684,7 +693,8 @@ config GPIO_IT87
Say yes here to support GPIO functionality of IT87xx Super I/O chips.
This driver is tested with ITE IT8728 and IT8732 Super I/O chips, and
- supports the IT8761E, IT8620E and IT8628E Super I/O chip as well.
+ supports the IT8761E, IT8613, IT8620E, and IT8628E Super I/O chips as
+ well.
To compile this driver as a module, choose M here: the module will
be called gpio_it87
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 1324c8f966a7..fc77989371be 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
+obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index b31ae16170e7..2342e154029b 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -12,6 +12,7 @@
#include <asm/div64.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/aspeed.h>
#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -22,6 +23,15 @@
#include <linux/spinlock.h>
#include <linux/string.h>
+/*
+ * These two headers aren't meant to be used by GPIO drivers. We need
+ * them in order to access gpio_chip_hwgpio() which we need to implement
+ * the aspeed specific API which allows the coprocessor to request
+ * access to some GPIOs and to arbitrate between coprocessor and ARM.
+ */
+#include <linux/gpio/consumer.h>
+#include "gpiolib.h"
+
struct aspeed_bank_props {
unsigned int bank;
u32 input;
@@ -56,83 +66,130 @@ struct aspeed_gpio {
struct clk *clk;
u32 *dcache;
+ u8 *cf_copro_bankmap;
};
struct aspeed_gpio_bank {
- uint16_t val_regs;
+ uint16_t val_regs; /* +0: Rd: read input value, Wr: set write latch
+ * +4: Rd/Wr: Direction (0=in, 1=out)
+ */
+ uint16_t rdata_reg; /* Rd: read write latch, Wr: <none> */
uint16_t irq_regs;
uint16_t debounce_regs;
uint16_t tolerance_regs;
+ uint16_t cmdsrc_regs;
const char names[4][3];
};
+/*
+ * Note: The "value" register returns the input value sampled on the
+ * line even when the GPIO is configured as an output. Since
+ * that input goes through synchronizers, writing, then reading
+ * back may not return the written value right away.
+ *
+ * The "rdata" register returns the content of the write latch
+ * and thus can be used to read back what was last written
+ * reliably.
+ */
+
static const int debounce_timers[4] = { 0x00, 0x50, 0x54, 0x58 };
+static const struct aspeed_gpio_copro_ops *copro_ops;
+static void *copro_data;
+
static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
{
.val_regs = 0x0000,
+ .rdata_reg = 0x00c0,
.irq_regs = 0x0008,
.debounce_regs = 0x0040,
.tolerance_regs = 0x001c,
+ .cmdsrc_regs = 0x0060,
.names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
+ .rdata_reg = 0x00c4,
.irq_regs = 0x0028,
.debounce_regs = 0x0048,
.tolerance_regs = 0x003c,
+ .cmdsrc_regs = 0x0068,
.names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
+ .rdata_reg = 0x00c8,
.irq_regs = 0x0098,
.debounce_regs = 0x00b0,
.tolerance_regs = 0x00ac,
+ .cmdsrc_regs = 0x0090,
.names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
+ .rdata_reg = 0x00cc,
.irq_regs = 0x00e8,
.debounce_regs = 0x0100,
.tolerance_regs = 0x00fc,
+ .cmdsrc_regs = 0x00e0,
.names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
+ .rdata_reg = 0x00d0,
.irq_regs = 0x0118,
.debounce_regs = 0x0130,
.tolerance_regs = 0x012c,
+ .cmdsrc_regs = 0x0110,
.names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
+ .rdata_reg = 0x00d4,
.irq_regs = 0x0148,
.debounce_regs = 0x0160,
.tolerance_regs = 0x015c,
+ .cmdsrc_regs = 0x0140,
.names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
+ .rdata_reg = 0x00d8,
.irq_regs = 0x0178,
.debounce_regs = 0x0190,
.tolerance_regs = 0x018c,
+ .cmdsrc_regs = 0x0170,
.names = { "Y", "Z", "AA", "AB" },
},
{
.val_regs = 0x01e8,
+ .rdata_reg = 0x00dc,
.irq_regs = 0x01a8,
.debounce_regs = 0x01c0,
.tolerance_regs = 0x01bc,
+ .cmdsrc_regs = 0x01a0,
.names = { "AC", "", "", "" },
},
};
-#define GPIO_BANK(x) ((x) >> 5)
-#define GPIO_OFFSET(x) ((x) & 0x1f)
-#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
+enum aspeed_gpio_reg {
+ reg_val,
+ reg_rdata,
+ reg_dir,
+ reg_irq_enable,
+ reg_irq_type0,
+ reg_irq_type1,
+ reg_irq_type2,
+ reg_irq_status,
+ reg_debounce_sel1,
+ reg_debounce_sel2,
+ reg_tolerance,
+ reg_cmdsrc0,
+ reg_cmdsrc1,
+};
-#define GPIO_DATA 0x00
-#define GPIO_DIR 0x04
+#define GPIO_VAL_VALUE 0x00
+#define GPIO_VAL_DIR 0x04
#define GPIO_IRQ_ENABLE 0x00
#define GPIO_IRQ_TYPE0 0x04
@@ -143,6 +200,53 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
#define GPIO_DEBOUNCE_SEL1 0x00
#define GPIO_DEBOUNCE_SEL2 0x04
+#define GPIO_CMDSRC_0 0x00
+#define GPIO_CMDSRC_1 0x04
+#define GPIO_CMDSRC_ARM 0
+#define GPIO_CMDSRC_LPC 1
+#define GPIO_CMDSRC_COLDFIRE 2
+#define GPIO_CMDSRC_RESERVED 3
+
+/* This will be resolved at compile time */
+static inline void __iomem *bank_reg(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ const enum aspeed_gpio_reg reg)
+{
+ switch (reg) {
+ case reg_val:
+ return gpio->base + bank->val_regs + GPIO_VAL_VALUE;
+ case reg_rdata:
+ return gpio->base + bank->rdata_reg;
+ case reg_dir:
+ return gpio->base + bank->val_regs + GPIO_VAL_DIR;
+ case reg_irq_enable:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_ENABLE;
+ case reg_irq_type0:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE0;
+ case reg_irq_type1:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE1;
+ case reg_irq_type2:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE2;
+ case reg_irq_status:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
+ case reg_debounce_sel1:
+ return gpio->base + bank->debounce_regs + GPIO_DEBOUNCE_SEL1;
+ case reg_debounce_sel2:
+ return gpio->base + bank->debounce_regs + GPIO_DEBOUNCE_SEL2;
+ case reg_tolerance:
+ return gpio->base + bank->tolerance_regs;
+ case reg_cmdsrc0:
+ return gpio->base + bank->cmdsrc_regs + GPIO_CMDSRC_0;
+ case reg_cmdsrc1:
+ return gpio->base + bank->cmdsrc_regs + GPIO_CMDSRC_1;
+ }
+ BUG();
+}
+
+#define GPIO_BANK(x) ((x) >> 5)
+#define GPIO_OFFSET(x) ((x) & 0x1f)
+#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
+
#define _GPIO_SET_DEBOUNCE(t, o, i) ((!!((t) & BIT(i))) << GPIO_OFFSET(o))
#define GPIO_SET_DEBOUNCE1(t, o) _GPIO_SET_DEBOUNCE(t, o, 1)
#define GPIO_SET_DEBOUNCE2(t, o) _GPIO_SET_DEBOUNCE(t, o, 0)
@@ -201,18 +305,80 @@ static inline bool have_output(struct aspeed_gpio *gpio, unsigned int offset)
return !props || (props->output & GPIO_BIT(offset));
}
-static void __iomem *bank_val_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- unsigned int reg)
+static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ int bindex, int cmdsrc)
{
- return gpio->base + bank->val_regs + reg;
+ void __iomem *c0 = bank_reg(gpio, bank, reg_cmdsrc0);
+ void __iomem *c1 = bank_reg(gpio, bank, reg_cmdsrc1);
+ u32 bit, reg;
+
+ /*
+ * Each register controls 4 banks, so take the bottom 2
+ * bits of the bank index, and use them to select the
+ * right control bit (0, 8, 16 or 24).
+ */
+ bit = BIT((bindex & 3) << 3);
+
+ /* Source 1 first to avoid illegal 11 combination */
+ reg = ioread32(c1);
+ if (cmdsrc & 2)
+ reg |= bit;
+ else
+ reg &= ~bit;
+ iowrite32(reg, c1);
+
+ /* Then Source 0 */
+ reg = ioread32(c0);
+ if (cmdsrc & 1)
+ reg |= bit;
+ else
+ reg &= ~bit;
+ iowrite32(reg, c0);
}
-static void __iomem *bank_irq_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- unsigned int reg)
+static bool aspeed_gpio_copro_request(struct aspeed_gpio *gpio,
+ unsigned int offset)
{
- return gpio->base + bank->irq_regs + reg;
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return false;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return false;
+ if (!copro_ops->request_access)
+ return false;
+
+ /* Pause the coprocessor */
+ copro_ops->request_access(copro_data);
+
+ /* Change command source back to ARM */
+ aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3, GPIO_CMDSRC_ARM);
+
+ /* Update cache */
+ gpio->dcache[GPIO_BANK(offset)] = ioread32(bank_reg(gpio, bank, reg_rdata));
+
+ return true;
+}
+
+static void aspeed_gpio_copro_release(struct aspeed_gpio *gpio,
+ unsigned int offset)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return;
+ if (!copro_ops->release_access)
+ return;
+
+ /* Change command source back to ColdFire */
+ aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3,
+ GPIO_CMDSRC_COLDFIRE);
+
+ /* Restart the coprocessor */
+ copro_ops->release_access(copro_data);
}
static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -220,8 +386,7 @@ static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
- return !!(ioread32(bank_val_reg(gpio, bank, GPIO_DATA))
- & GPIO_BIT(offset));
+ return !!(ioread32(bank_reg(gpio, bank, reg_val)) & GPIO_BIT(offset));
}
static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
@@ -232,7 +397,7 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
void __iomem *addr;
u32 reg;
- addr = bank_val_reg(gpio, bank, GPIO_DATA);
+ addr = bank_reg(gpio, bank, reg_val);
reg = gpio->dcache[GPIO_BANK(offset)];
if (val)
@@ -249,11 +414,15 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
unsigned long flags;
+ bool copro;
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
}
@@ -261,7 +430,9 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = bank_reg(gpio, bank, reg_dir);
unsigned long flags;
+ bool copro;
u32 reg;
if (!have_input(gpio, offset))
@@ -269,8 +440,13 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
spin_lock_irqsave(&gpio->lock, flags);
- reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
- iowrite32(reg & ~GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+ reg = ioread32(addr);
+ reg &= ~GPIO_BIT(offset);
+
+ copro = aspeed_gpio_copro_request(gpio, offset);
+ iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
@@ -282,7 +458,9 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = bank_reg(gpio, bank, reg_dir);
unsigned long flags;
+ bool copro;
u32 reg;
if (!have_output(gpio, offset))
@@ -290,10 +468,15 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
spin_lock_irqsave(&gpio->lock, flags);
+ reg = ioread32(addr);
+ reg |= GPIO_BIT(offset);
+
+ copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
- reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
- iowrite32(reg | GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+ iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
@@ -314,7 +497,7 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
spin_lock_irqsave(&gpio->lock, flags);
- val = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)) & GPIO_BIT(offset);
+ val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset);
spin_unlock_irqrestore(&gpio->lock, flags);
@@ -323,24 +506,23 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
}
static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
- struct aspeed_gpio **gpio,
- const struct aspeed_gpio_bank **bank,
- u32 *bit)
+ struct aspeed_gpio **gpio,
+ const struct aspeed_gpio_bank **bank,
+ u32 *bit, int *offset)
{
- int offset;
struct aspeed_gpio *internal;
- offset = irqd_to_hwirq(d);
+ *offset = irqd_to_hwirq(d);
internal = irq_data_get_irq_chip_data(d);
/* This might be a bit of a questionable place to check */
- if (!have_irq(internal, offset))
+ if (!have_irq(internal, *offset))
return -ENOTSUPP;
*gpio = internal;
- *bank = to_bank(offset);
- *bit = GPIO_BIT(offset);
+ *bank = to_bank(*offset);
+ *bit = GPIO_BIT(*offset);
return 0;
}
@@ -351,17 +533,23 @@ static void aspeed_gpio_irq_ack(struct irq_data *d)
struct aspeed_gpio *gpio;
unsigned long flags;
void __iomem *status_addr;
+ int rc, offset;
+ bool copro;
u32 bit;
- int rc;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
if (rc)
return;
- status_addr = bank_irq_reg(gpio, bank, GPIO_IRQ_STATUS);
+ status_addr = bank_reg(gpio, bank, reg_irq_status);
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
+
iowrite32(bit, status_addr);
+
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
}
@@ -372,15 +560,17 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
unsigned long flags;
u32 reg, bit;
void __iomem *addr;
- int rc;
+ int rc, offset;
+ bool copro;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
if (rc)
return;
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_ENABLE);
+ addr = bank_reg(gpio, bank, reg_irq_enable);
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
reg = ioread32(addr);
if (set)
@@ -389,6 +579,8 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
reg &= ~bit;
iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
}
@@ -413,9 +605,10 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
struct aspeed_gpio *gpio;
unsigned long flags;
void __iomem *addr;
- int rc;
+ int rc, offset;
+ bool copro;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
if (rc)
return -EINVAL;
@@ -441,22 +634,25 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
}
spin_lock_irqsave(&gpio->lock, flags);
+ copro = aspeed_gpio_copro_request(gpio, offset);
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE0);
+ addr = bank_reg(gpio, bank, reg_irq_type0);
reg = ioread32(addr);
reg = (reg & ~bit) | type0;
iowrite32(reg, addr);
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE1);
+ addr = bank_reg(gpio, bank, reg_irq_type1);
reg = ioread32(addr);
reg = (reg & ~bit) | type1;
iowrite32(reg, addr);
- addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE2);
+ addr = bank_reg(gpio, bank, reg_irq_type2);
reg = ioread32(addr);
reg = (reg & ~bit) | type2;
iowrite32(reg, addr);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
irq_set_handler_locked(d, handler);
@@ -477,7 +673,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
for (i = 0; i < ARRAY_SIZE(aspeed_gpio_banks); i++) {
const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
- reg = ioread32(bank_irq_reg(data, bank, GPIO_IRQ_STATUS));
+ reg = ioread32(bank_reg(data, bank, reg_irq_status));
for_each_set_bit(p, &reg, 32) {
girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
@@ -549,21 +745,27 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- const struct aspeed_gpio_bank *bank;
unsigned long flags;
+ void __iomem *treg;
+ bool copro;
u32 val;
- bank = to_bank(offset);
+ treg = bank_reg(gpio, to_bank(offset), reg_tolerance);
spin_lock_irqsave(&gpio->lock, flags);
- val = readl(gpio->base + bank->tolerance_regs);
+ copro = aspeed_gpio_copro_request(gpio, offset);
+
+ val = readl(treg);
if (enable)
val |= GPIO_BIT(offset);
else
val &= ~GPIO_BIT(offset);
- writel(val, gpio->base + bank->tolerance_regs);
+ writel(val, treg);
+
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
@@ -582,13 +784,6 @@ static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
pinctrl_gpio_free(chip->base + offset);
}
-static inline void __iomem *bank_debounce_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- unsigned int reg)
-{
- return gpio->base + bank->debounce_regs + reg;
-}
-
static int usecs_to_cycles(struct aspeed_gpio *gpio, unsigned long usecs,
u32 *cycles)
{
@@ -666,11 +861,14 @@ static void configure_timer(struct aspeed_gpio *gpio, unsigned int offset,
void __iomem *addr;
u32 val;
- addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL1);
+ /* Note: Debounce timer isn't under control of the command
+ * source registers, so no need to sync with the coprocessor
+ */
+ addr = bank_reg(gpio, bank, reg_debounce_sel1);
val = ioread32(addr);
iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE1(timer, offset), addr);
- addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL2);
+ addr = bank_reg(gpio, bank, reg_debounce_sel2);
val = ioread32(addr);
iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE2(timer, offset), addr);
}
@@ -812,6 +1010,111 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return -ENOTSUPP;
}
+/**
+ * aspeed_gpio_copro_set_ops - Sets the callbacks used for handhsaking with
+ * the coprocessor for shared GPIO banks
+ * @ops: The callbacks
+ * @data: Pointer passed back to the callbacks
+ */
+int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data)
+{
+ copro_data = data;
+ copro_ops = ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(aspeed_gpio_copro_set_ops);
+
+/**
+ * aspeed_gpio_copro_grab_gpio - Mark a GPIO used by the coprocessor. The entire
+ * bank gets marked and any access from the ARM will
+ * result in handshaking via callbacks.
+ * @desc: The GPIO to be marked
+ * @vreg_offset: If non-NULL, returns the value register offset in the GPIO space
+ * @dreg_offset: If non-NULL, returns the data latch register offset in the GPIO space
+ * @bit: If non-NULL, returns the bit number of the GPIO in the registers
+ */
+int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
+ u16 *vreg_offset, u16 *dreg_offset, u8 *bit)
+{
+ struct gpio_chip *chip = gpiod_to_chip(desc);
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+
+ if (!gpio->cf_copro_bankmap)
+ gpio->cf_copro_bankmap = kzalloc(gpio->config->nr_gpios >> 3, GFP_KERNEL);
+ if (!gpio->cf_copro_bankmap)
+ return -ENOMEM;
+ if (offset < 0 || offset > gpio->config->nr_gpios)
+ return -EINVAL;
+ bindex = offset >> 3;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ /* Sanity check, this shouldn't happen */
+ if (gpio->cf_copro_bankmap[bindex] == 0xff) {
+ rc = -EIO;
+ goto bail;
+ }
+ gpio->cf_copro_bankmap[bindex]++;
+
+ /* Switch command source */
+ if (gpio->cf_copro_bankmap[bindex] == 1)
+ aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ GPIO_CMDSRC_COLDFIRE);
+
+ if (vreg_offset)
+ *vreg_offset = bank->val_regs;
+ if (dreg_offset)
+ *dreg_offset = bank->rdata_reg;
+ if (bit)
+ *bit = GPIO_OFFSET(offset);
+ bail:
+ spin_unlock_irqrestore(&gpio->lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio);
+
+/**
+ * aspeed_gpio_copro_release_gpio - Unmark a GPIO used by the coprocessor.
+ * @desc: The GPIO to be marked
+ */
+int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
+{
+ struct gpio_chip *chip = gpiod_to_chip(desc);
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+
+ if (!gpio->cf_copro_bankmap)
+ return -ENXIO;
+
+ if (offset < 0 || offset > gpio->config->nr_gpios)
+ return -EINVAL;
+ bindex = offset >> 3;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ /* Sanity check, this shouldn't happen */
+ if (gpio->cf_copro_bankmap[bindex] == 0) {
+ rc = -EIO;
+ goto bail;
+ }
+ gpio->cf_copro_bankmap[bindex]--;
+
+ /* Switch command source */
+ if (gpio->cf_copro_bankmap[bindex] == 0)
+ aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ GPIO_CMDSRC_ARM);
+ bail:
+ spin_unlock_irqrestore(&gpio->lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio);
+
/*
* Any banks not specified in a struct aspeed_bank_props array are assumed to
* have the properties:
@@ -902,11 +1205,18 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
if (!gpio->dcache)
return -ENOMEM;
- /* Populate it with initial values read from the HW */
+ /*
+ * Populate it with initial values read from the HW and switch
+ * all command sources to the ARM by default
+ */
for (i = 0; i < banks; i++) {
const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
- gpio->dcache[i] = ioread32(gpio->base + bank->val_regs +
- GPIO_DATA);
+ void __iomem *addr = bank_reg(gpio, bank, reg_rdata);
+ gpio->dcache[i] = ioread32(addr);
+ aspeed_gpio_change_cmd_source(gpio, bank, 0, GPIO_CMDSRC_ARM);
+ aspeed_gpio_change_cmd_source(gpio, bank, 1, GPIO_CMDSRC_ARM);
+ aspeed_gpio_change_cmd_source(gpio, bank, 2, GPIO_CMDSRC_ARM);
+ aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM);
}
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 684e9d6d6623..0a553d676042 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -51,7 +51,7 @@ static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg)
static void ath79_gpio_write(struct ath79_gpio_ctrl *ctrl,
unsigned reg, u32 val)
{
- return writel(val, ctrl->base + reg);
+ writel(val, ctrl->base + reg);
}
static bool ath79_gpio_update_bits(
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 00272fa7cc4f..d0707fc23afd 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -485,12 +485,14 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ int ret;
- if (gpiochip_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq)) {
+ ret = gpiochip_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq);
+ if (ret) {
dev_err(kona_gpio->gpio_chip.parent,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
- return -EINVAL;
+ return ret;
}
return 0;
}
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 035a454eca43..a5ece8ea79bc 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -167,8 +167,8 @@ of_err:
static int davinci_gpio_probe(struct platform_device *pdev)
{
static int ctrl_num, bank_base;
- int gpio, bank, ret = 0;
- unsigned ngpio, nbank;
+ int gpio, bank, i, ret = 0;
+ unsigned int ngpio, nbank, nirq;
struct davinci_gpio_controller *chips;
struct davinci_gpio_platform_data *pdata;
struct device *dev = &pdev->dev;
@@ -197,6 +197,16 @@ static int davinci_gpio_probe(struct platform_device *pdev)
if (WARN_ON(ARCH_NR_GPIOS < ngpio))
ngpio = ARCH_NR_GPIOS;
+ /*
+ * If there are unbanked interrupts then the number of
+ * interrupts is equal to number of gpios else all are banked so
+ * number of interrupts is equal to number of banks(each with 16 gpios)
+ */
+ if (pdata->gpio_unbanked)
+ nirq = pdata->gpio_unbanked;
+ else
+ nirq = DIV_ROUND_UP(ngpio, 16);
+
nbank = DIV_ROUND_UP(ngpio, 32);
chips = devm_kcalloc(dev,
nbank, sizeof(struct davinci_gpio_controller),
@@ -209,6 +219,15 @@ static int davinci_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio_base))
return PTR_ERR(gpio_base);
+ for (i = 0; i < nirq; i++) {
+ chips->irqs[i] = platform_get_irq(pdev, i);
+ if (chips->irqs[i] < 0) {
+ dev_info(dev, "IRQ not populated, err = %d\n",
+ chips->irqs[i]);
+ return chips->irqs[i];
+ }
+ }
+
snprintf(label, MAX_LABEL_SIZE, "davinci_gpio.%d", ctrl_num++);
chips->chip.label = devm_kstrdup(dev, label, GFP_KERNEL);
if (!chips->chip.label)
@@ -377,7 +396,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
* can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
*/
if (offset < d->gpio_unbanked)
- return d->base_irq + offset;
+ return d->irqs[offset];
else
return -ENODEV;
}
@@ -386,11 +405,18 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
{
struct davinci_gpio_controller *d;
struct davinci_gpio_regs __iomem *g;
- u32 mask;
+ u32 mask, i;
d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
g = (struct davinci_gpio_regs __iomem *)d->regs[0];
- mask = __gpio_mask(data->irq - d->base_irq);
+ for (i = 0; i < MAX_INT_PER_BANK; i++)
+ if (data->irq == d->irqs[i])
+ break;
+
+ if (i == MAX_INT_PER_BANK)
+ return -EINVAL;
+
+ mask = __gpio_mask(i);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
@@ -459,9 +485,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
int ret;
struct clk *clk;
u32 binten = 0;
- unsigned ngpio, bank_irq;
+ unsigned ngpio;
struct device *dev = &pdev->dev;
- struct resource *res;
struct davinci_gpio_controller *chips = platform_get_drvdata(pdev);
struct davinci_gpio_platform_data *pdata = dev->platform_data;
struct davinci_gpio_regs __iomem *g;
@@ -481,24 +506,13 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
gpio_get_irq_chip = (gpio_get_irq_chip_cb_t)match->data;
ngpio = pdata->ngpio;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "Invalid IRQ resource\n");
- return -EBUSY;
- }
-
- bank_irq = res->start;
-
- if (!bank_irq) {
- dev_err(dev, "Invalid IRQ resource\n");
- return -ENODEV;
- }
clk = devm_clk_get(dev, "gpio");
if (IS_ERR(clk)) {
dev_err(dev, "Error %ld getting gpio clock\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
+
ret = clk_prepare_enable(clk);
if (ret)
return ret;
@@ -538,12 +552,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
if (pdata->gpio_unbanked) {
/* pass "bank 0" GPIO IRQs to AINTC */
chips->chip.to_irq = gpio_to_irq_unbanked;
- chips->base_irq = bank_irq;
chips->gpio_unbanked = pdata->gpio_unbanked;
binten = GENMASK(pdata->gpio_unbanked / 16, 0);
/* AINTC handles mask/unmask; GPIO handles triggering */
- irq = bank_irq;
+ irq = chips->irqs[0];
irq_chip = gpio_get_irq_chip(irq);
irq_chip->name = "GPIO-AINTC";
irq_chip->irq_set_type = gpio_irq_type_unbanked;
@@ -554,10 +567,11 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
writel_relaxed(~0, &g->set_rising);
/* set the direct IRQs up to use that irqchip */
- for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) {
- irq_set_chip(irq, irq_chip);
- irq_set_handler_data(irq, chips);
- irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
+ for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++) {
+ irq_set_chip(chips->irqs[gpio], irq_chip);
+ irq_set_handler_data(chips->irqs[gpio], chips);
+ irq_set_status_flags(chips->irqs[gpio],
+ IRQ_TYPE_EDGE_BOTH);
}
goto done;
@@ -567,7 +581,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
* Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we
* then chain through our own handler.
*/
- for (gpio = 0, bank = 0; gpio < ngpio; bank++, bank_irq++, gpio += 16) {
+ for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 16) {
/* disabled by default, enabled only as needed
* There are register sets for 32 GPIOs. 2 banks of 16
* GPIOs are covered by each set of registers hence divide by 2
@@ -594,8 +608,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
irqdata->bank_num = bank;
irqdata->chip = chips;
- irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler,
- irqdata);
+ irq_set_chained_handler_and_data(chips->irqs[bank],
+ gpio_irq_handler, irqdata);
binten |= BIT(bank);
}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 7a2de3de6571..28da700f5f52 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -255,11 +255,13 @@ static int dwapb_irq_reqres(struct irq_data *d)
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = igc->private;
struct gpio_chip *gc = &gpio->ports[0].gc;
+ int ret;
- if (gpiochip_lock_as_irq(gc, irqd_to_hwirq(d))) {
+ ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d));
+ if (ret) {
dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- return -EINVAL;
+ return ret;
}
return 0;
}
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 2b466b80e70a..982e699a5b81 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -101,12 +101,14 @@ static void em_gio_irq_enable(struct irq_data *d)
static int em_gio_irq_reqres(struct irq_data *d)
{
struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
+ int ret;
- if (gpiochip_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d))) {
+ ret = gpiochip_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d));
+ if (ret) {
dev_err(p->gpio_chip.parent,
"unable to lock HW IRQ %lu for IRQ\n",
irqd_to_hwirq(d));
- return -EINVAL;
+ return ret;
}
return 0;
}
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index 7cad14d3f127..389ecd8b7d26 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -35,12 +35,15 @@
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
+#define IT8613_ID 0x8613
#define IT8620_ID 0x8620
#define IT8628_ID 0x8628
+#define IT8718_ID 0x8718
#define IT8728_ID 0x8728
#define IT8732_ID 0x8732
#define IT8761_ID 0x8761
#define IT8772_ID 0x8772
+#define IT8786_ID 0x8786
/* IO Ports */
#define REG 0x2e
@@ -306,6 +309,14 @@ static int __init it87_gpio_init(void)
it87_gpio->chip = it87_template_chip;
switch (chip_type) {
+ case IT8613_ID:
+ gpio_ba_reg = 0x62;
+ it87_gpio->io_size = 8; /* it8613 only needs 6, use 8 for alignment */
+ it87_gpio->output_base = 0xc8;
+ it87_gpio->simple_base = 0xc0;
+ it87_gpio->simple_size = 6;
+ it87_gpio->chip.ngpio = 64; /* has 48, use 64 for convenient calc */
+ break;
case IT8620_ID:
case IT8628_ID:
gpio_ba_reg = 0x62;
@@ -314,9 +325,11 @@ static int __init it87_gpio_init(void)
it87_gpio->simple_size = 0;
it87_gpio->chip.ngpio = 64;
break;
+ case IT8718_ID:
case IT8728_ID:
case IT8732_ID:
case IT8772_ID:
+ case IT8786_ID:
gpio_ba_reg = 0x62;
it87_gpio->io_size = 8;
it87_gpio->output_base = 0xc8;
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 9d8bcc69f245..f03cb0ba7726 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -653,6 +653,12 @@ static int max732x_probe(struct i2c_client *client,
chip->client_group_a = client;
if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_b);
+ if (!c) {
+ dev_err(&client->dev,
+ "Failed to allocate I2C device\n");
+ ret = -ENODEV;
+ goto out_failed;
+ }
chip->client_group_b = chip->client_dummy = c;
}
break;
@@ -660,6 +666,12 @@ static int max732x_probe(struct i2c_client *client,
chip->client_group_b = client;
if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_a);
+ if (!c) {
+ dev_err(&client->dev,
+ "Failed to allocate I2C device\n");
+ ret = -ENODEV;
+ goto out_failed;
+ }
chip->client_group_a = chip->client_dummy = c;
}
break;
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index e1037582e34d..b2635326546e 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -56,9 +56,9 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
rnd = fls(debounce) - 1;
if (rnd && (debounce & BIT(rnd - 1)))
- debounce = round_up(debounce, MEN_Z127_DB_MIN_US);
+ debounce = roundup(debounce, MEN_Z127_DB_MIN_US);
else
- debounce = round_down(debounce, MEN_Z127_DB_MIN_US);
+ debounce = rounddown(debounce, MEN_Z127_DB_MIN_US);
if (debounce > MEN_Z127_DB_MAX_US)
debounce = MEN_Z127_DB_MAX_US;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index b23d9a36be1f..51c7d1b84c2e 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -496,9 +496,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
return 0;
err_gpiochip_add:
+ chip = chip_save;
while (--i >= 0) {
- chip--;
gpiochip_remove(&chip->gpio);
+ chip++;
}
kfree(chip_save);
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 7b14d6280e44..935292a30c99 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -136,8 +136,20 @@ static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line)
static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long pinmask = bgpio_line2mask(gc, gpio);
+ bool dir = !!(gc->bgpio_dir & pinmask);
- if (gc->bgpio_dir & pinmask)
+ /*
+ * If the direction is OUT we read the value from the SET
+ * register, and if the direction is IN we read the value
+ * from the DAT register.
+ *
+ * If the direction bits are inverted, naturally this gets
+ * inverted too.
+ */
+ if (gc->bgpio_dir_inverted)
+ dir = !dir;
+
+ if (dir)
return !!(gc->read_reg(gc->reg_set) & pinmask);
else
return !!(gc->read_reg(gc->reg_dat) & pinmask);
@@ -157,8 +169,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
*bits &= ~*mask;
/* Exploit the fact that we know which directions are set */
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
+ if (gc->bgpio_dir_inverted) {
+ set_mask = *mask & ~gc->bgpio_dir;
+ get_mask = *mask & gc->bgpio_dir;
+ } else {
+ set_mask = *mask & gc->bgpio_dir;
+ get_mask = *mask & ~gc->bgpio_dir;
+ }
if (set_mask)
*bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -359,7 +376,10 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ if (gc->bgpio_dir_inverted)
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+ else
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -370,7 +390,10 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
/* Return 0 if output, 1 of input */
- return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
+ if (gc->bgpio_dir_inverted)
+ return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
+ else
+ return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -381,37 +404,10 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
- spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
- return 0;
-}
-
-static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
- spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
- return 0;
-}
-
-static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- unsigned long flags;
-
- gc->set(gc, gpio, val);
-
- spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ if (gc->bgpio_dir_inverted)
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ else
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -419,12 +415,6 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
-static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
-{
- /* Return 0 if output, 1 if input */
- return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
-}
-
static int bgpio_setup_accessors(struct device *dev,
struct gpio_chip *gc,
bool byte_be)
@@ -560,9 +550,10 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
gc->get_direction = bgpio_get_dir;
} else if (dirin) {
gc->reg_dir = dirin;
- gc->direction_output = bgpio_dir_out_inv;
- gc->direction_input = bgpio_dir_in_inv;
- gc->get_direction = bgpio_get_dir_inv;
+ gc->direction_output = bgpio_dir_out;
+ gc->direction_input = bgpio_dir_in;
+ gc->get_direction = bgpio_get_dir;
+ gc->bgpio_dir_inverted = true;
} else {
if (flags & BGPIOF_NO_OUTPUT)
gc->direction_output = bgpio_dir_out_err;
@@ -582,6 +573,33 @@ static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
return -EINVAL;
}
+/**
+ * bgpio_init() - Initialize generic GPIO accessor functions
+ * @gc: the GPIO chip to set up
+ * @dev: the parent device of the new GPIO chip (compulsory)
+ * @sz: the size (width) of the MMIO registers in bytes, typically 1, 2 or 4
+ * @dat: MMIO address for the register to READ the value of the GPIO lines, it
+ * is expected that a 1 in the corresponding bit in this register means the
+ * line is asserted
+ * @set: MMIO address for the register to SET the value of the GPIO lines, it is
+ * expected that we write the line with 1 in this register to drive the GPIO line
+ * high.
+ * @clr: MMIO address for the register to CLEAR the value of the GPIO lines, it is
+ * expected that we write the line with 1 in this register to drive the GPIO line
+ * low. It is allowed to leave this address as NULL, in that case the SET register
+ * will be assumed to also clear the GPIO lines, by actively writing the line
+ * with 0.
+ * @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
+ * that setting a line to 1 in this register will turn that line into an
+ * output line. Conversely, setting the line to 0 will turn that line into
+ * an input. Either this or @dirin can be defined, but never both.
+ * @dirin: MMIO address for the register to set this line as INPUT. It is assumed
+ * that setting a line to 1 in this register will turn that line into an
+ * input line. Conversely, setting the line to 0 will turn that line into
+ * an output. Either this or @dirout can be defined, but never both.
+ * @flags: Different flags that will affect the behaviour of the device, such as
+ * endianness etc.
+ */
int bgpio_init(struct gpio_chip *gc, struct device *dev,
unsigned long sz, void __iomem *dat, void __iomem *set,
void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
new file mode 100644
index 000000000000..d72af6f6cdbd
--- /dev/null
+++ b/drivers/gpio/gpio-mt7621.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define MTK_BANK_CNT 3
+#define MTK_BANK_WIDTH 32
+
+#define GPIO_BANK_STRIDE 0x04
+#define GPIO_REG_CTRL 0x00
+#define GPIO_REG_POL 0x10
+#define GPIO_REG_DATA 0x20
+#define GPIO_REG_DSET 0x30
+#define GPIO_REG_DCLR 0x40
+#define GPIO_REG_REDGE 0x50
+#define GPIO_REG_FEDGE 0x60
+#define GPIO_REG_HLVL 0x70
+#define GPIO_REG_LLVL 0x80
+#define GPIO_REG_STAT 0x90
+#define GPIO_REG_EDGE 0xA0
+
+struct mtk_gc {
+ struct gpio_chip chip;
+ spinlock_t lock;
+ int bank;
+ u32 rising;
+ u32 falling;
+ u32 hlevel;
+ u32 llevel;
+};
+
+/**
+ * struct mtk - state container for
+ * data of the platform driver. It is 3
+ * separate gpio-chip each one with its
+ * own irq_chip.
+ * @dev: device instance
+ * @base: memory base address
+ * @gpio_irq: irq number from the device tree
+ * @gc_map: array of the gpio chips
+ */
+struct mtk {
+ struct device *dev;
+ void __iomem *base;
+ int gpio_irq;
+ struct mtk_gc gc_map[MTK_BANK_CNT];
+};
+
+static inline struct mtk_gc *
+to_mediatek_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct mtk_gc, chip);
+}
+
+static inline void
+mtk_gpio_w32(struct mtk_gc *rg, u32 offset, u32 val)
+{
+ struct gpio_chip *gc = &rg->chip;
+ struct mtk *mtk = gpiochip_get_data(gc);
+
+ offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
+ gc->write_reg(mtk->base + offset, val);
+}
+
+static inline u32
+mtk_gpio_r32(struct mtk_gc *rg, u32 offset)
+{
+ struct gpio_chip *gc = &rg->chip;
+ struct mtk *mtk = gpiochip_get_data(gc);
+
+ offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
+ return gc->read_reg(mtk->base + offset);
+}
+
+static irqreturn_t
+mediatek_gpio_irq_handler(int irq, void *data)
+{
+ struct gpio_chip *gc = data;
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long pending;
+ int bit;
+
+ pending = mtk_gpio_r32(rg, GPIO_REG_STAT);
+
+ for_each_set_bit(bit, &pending, MTK_BANK_WIDTH) {
+ u32 map = irq_find_mapping(gc->irq.domain, bit);
+
+ generic_handle_irq(map);
+ mtk_gpio_w32(rg, GPIO_REG_STAT, BIT(bit));
+ ret |= IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void
+mediatek_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 rise, fall, high, low;
+
+ spin_lock_irqsave(&rg->lock, flags);
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise | (BIT(pin) & rg->rising));
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(pin) & rg->falling));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high | (BIT(pin) & rg->hlevel));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low | (BIT(pin) & rg->llevel));
+ spin_unlock_irqrestore(&rg->lock, flags);
+}
+
+static void
+mediatek_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 rise, fall, high, low;
+
+ spin_lock_irqsave(&rg->lock, flags);
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
+ spin_unlock_irqrestore(&rg->lock, flags);
+}
+
+static int
+mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mtk_gc *rg = to_mediatek_gpio(gc);
+ int pin = d->hwirq;
+ u32 mask = BIT(pin);
+
+ if (type == IRQ_TYPE_PROBE) {
+ if ((rg->rising | rg->falling |
+ rg->hlevel | rg->llevel) & mask)
+ return 0;
+
+ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
+ }
+
+ rg->rising &= ~mask;
+ rg->falling &= ~mask;
+ rg->hlevel &= ~mask;
+ rg->llevel &= ~mask;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ rg->rising |= mask;
+ rg->falling |= mask;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ rg->rising |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ rg->falling |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ rg->hlevel |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ rg->llevel |= mask;
+ break;
+ }
+
+ return 0;
+}
+
+static struct irq_chip mediatek_gpio_irq_chip = {
+ .irq_unmask = mediatek_gpio_irq_unmask,
+ .irq_mask = mediatek_gpio_irq_mask,
+ .irq_mask_ack = mediatek_gpio_irq_mask,
+ .irq_set_type = mediatek_gpio_irq_type,
+};
+
+static int
+mediatek_gpio_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *spec, u32 *flags)
+{
+ int gpio = spec->args[0];
+ struct mtk_gc *rg = to_mediatek_gpio(chip);
+
+ if (rg->bank != gpio / MTK_BANK_WIDTH)
+ return -EINVAL;
+
+ if (flags)
+ *flags = spec->args[1];
+
+ return gpio % MTK_BANK_WIDTH;
+}
+
+static int
+mediatek_gpio_bank_probe(struct device *dev,
+ struct device_node *node, int bank)
+{
+ struct mtk *mtk = dev_get_drvdata(dev);
+ struct mtk_gc *rg;
+ void __iomem *dat, *set, *ctrl, *diro;
+ int ret;
+
+ rg = &mtk->gc_map[bank];
+ memset(rg, 0, sizeof(*rg));
+
+ spin_lock_init(&rg->lock);
+ rg->chip.of_node = node;
+ rg->bank = bank;
+
+ dat = mtk->base + GPIO_REG_DATA + (rg->bank * GPIO_BANK_STRIDE);
+ set = mtk->base + GPIO_REG_DSET + (rg->bank * GPIO_BANK_STRIDE);
+ ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE);
+ diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE);
+
+ ret = bgpio_init(&rg->chip, dev, 4,
+ dat, set, ctrl, diro, NULL, 0);
+ if (ret) {
+ dev_err(dev, "bgpio_init() failed\n");
+ return ret;
+ }
+
+ rg->chip.of_gpio_n_cells = 2;
+ rg->chip.of_xlate = mediatek_gpio_xlate;
+ rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
+ dev_name(dev), bank);
+
+ ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+ if (ret < 0) {
+ dev_err(dev, "Could not register gpio %d, ret=%d\n",
+ rg->chip.ngpio, ret);
+ return ret;
+ }
+
+ if (mtk->gpio_irq) {
+ /*
+ * Manually request the irq here instead of passing
+ * a flow-handler to gpiochip_set_chained_irqchip,
+ * because the irq is shared.
+ */
+ ret = devm_request_irq(dev, mtk->gpio_irq,
+ mediatek_gpio_irq_handler, IRQF_SHARED,
+ rg->chip.label, &rg->chip);
+
+ if (ret) {
+ dev_err(dev, "Error requesting IRQ %d: %d\n",
+ mtk->gpio_irq, ret);
+ return ret;
+ }
+
+ ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
+ 0, handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "failed to add gpiochip_irqchip\n");
+ return ret;
+ }
+
+ gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
+ mtk->gpio_irq, NULL);
+ }
+
+ /* set polarity to low for all gpios */
+ mtk_gpio_w32(rg, GPIO_REG_POL, 0);
+
+ dev_info(dev, "registering %d gpios\n", rg->chip.ngpio);
+
+ return 0;
+}
+
+static int
+mediatek_gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk *mtk;
+ int i;
+
+ mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
+ if (!mtk)
+ return -ENOMEM;
+
+ mtk->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mtk->base))
+ return PTR_ERR(mtk->base);
+
+ mtk->gpio_irq = irq_of_parse_and_map(np, 0);
+ mtk->dev = dev;
+ platform_set_drvdata(pdev, mtk);
+ mediatek_gpio_irq_chip.name = dev_name(dev);
+
+ for (i = 0; i < MTK_BANK_CNT; i++)
+ mediatek_gpio_bank_probe(dev, np, i);
+
+ return 0;
+}
+
+static const struct of_device_id mediatek_gpio_match[] = {
+ { .compatible = "mediatek,mt7621-gpio" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mediatek_gpio_match);
+
+static struct platform_driver mediatek_gpio_driver = {
+ .probe = mediatek_gpio_probe,
+ .driver = {
+ .name = "mt7621_gpio",
+ .of_match_table = mediatek_gpio_match,
+ },
+};
+
+builtin_platform_driver(mediatek_gpio_driver);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 2f2829966d4c..995cf0b9e0b1 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -45,6 +45,15 @@ struct mxc_gpio_hwdata {
unsigned fall_edge;
};
+struct mxc_gpio_reg_saved {
+ u32 icr1;
+ u32 icr2;
+ u32 imr;
+ u32 gdir;
+ u32 edge_sel;
+ u32 dr;
+};
+
struct mxc_gpio_port {
struct list_head node;
void __iomem *base;
@@ -55,6 +64,8 @@ struct mxc_gpio_port {
struct gpio_chip gc;
struct device *dev;
u32 both_edges;
+ struct mxc_gpio_reg_saved gpio_saved_reg;
+ bool power_off;
};
static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
@@ -143,6 +154,7 @@ static const struct of_device_id mxc_gpio_dt_ids[] = {
{ .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
{ .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
{ .compatible = "fsl,imx35-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
+ { .compatible = "fsl,imx7d-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
{ /* sentinel */ }
};
@@ -434,6 +446,9 @@ static int mxc_gpio_probe(struct platform_device *pdev)
return err;
}
+ if (of_device_is_compatible(np, "fsl,imx7d-gpio"))
+ port->power_off = true;
+
/* disable the interrupt and clear the status */
writel(0, port->base + GPIO_IMR);
writel(~0, port->base + GPIO_ISR);
@@ -497,6 +512,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
list_add_tail(&port->node, &mxc_gpio_ports);
+ platform_set_drvdata(pdev, port);
+
return 0;
out_irqdomain_remove:
@@ -507,11 +524,67 @@ out_bgio:
return err;
}
+static void mxc_gpio_save_regs(struct mxc_gpio_port *port)
+{
+ if (!port->power_off)
+ return;
+
+ port->gpio_saved_reg.icr1 = readl(port->base + GPIO_ICR1);
+ port->gpio_saved_reg.icr2 = readl(port->base + GPIO_ICR2);
+ port->gpio_saved_reg.imr = readl(port->base + GPIO_IMR);
+ port->gpio_saved_reg.gdir = readl(port->base + GPIO_GDIR);
+ port->gpio_saved_reg.edge_sel = readl(port->base + GPIO_EDGE_SEL);
+ port->gpio_saved_reg.dr = readl(port->base + GPIO_DR);
+}
+
+static void mxc_gpio_restore_regs(struct mxc_gpio_port *port)
+{
+ if (!port->power_off)
+ return;
+
+ writel(port->gpio_saved_reg.icr1, port->base + GPIO_ICR1);
+ writel(port->gpio_saved_reg.icr2, port->base + GPIO_ICR2);
+ writel(port->gpio_saved_reg.imr, port->base + GPIO_IMR);
+ writel(port->gpio_saved_reg.gdir, port->base + GPIO_GDIR);
+ writel(port->gpio_saved_reg.edge_sel, port->base + GPIO_EDGE_SEL);
+ writel(port->gpio_saved_reg.dr, port->base + GPIO_DR);
+}
+
+static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+
+ mxc_gpio_save_regs(port);
+ clk_disable_unprepare(port->clk);
+
+ return 0;
+}
+
+static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret)
+ return ret;
+ mxc_gpio_restore_regs(port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
+};
+
static struct platform_driver mxc_gpio_driver = {
.driver = {
.name = "gpio-mxc",
.of_match_table = mxc_gpio_dt_ids,
.suppress_bind_attrs = true,
+ .pm = &mxc_gpio_dev_pm_ops,
},
.probe = mxc_gpio_probe,
.id_table = mxc_gpio_devtype,
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index e2831ee70cdc..df30490da820 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -126,8 +126,7 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
else
writel(pin_mask, pin_addr + MXS_CLR);
- writel(pin_mask,
- port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
+ writel(pin_mask, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
return 0;
}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index d1afedf4dcbf..e81008678a38 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -77,6 +77,8 @@ struct gpio_bank {
bool workaround_enabled;
void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
+ void (*set_dataout_multiple)(struct gpio_bank *bank,
+ unsigned long *mask, unsigned long *bits);
int (*get_context_loss_count)(struct device *dev);
struct omap_gpio_reg_offs *regs;
@@ -161,6 +163,51 @@ static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
return (readl_relaxed(reg) & (BIT(offset))) != 0;
}
+/* set multiple data out values using dedicate set/clear register */
+static void omap_set_gpio_dataout_reg_multiple(struct gpio_bank *bank,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ l = *bits & *mask;
+ writel_relaxed(l, reg + bank->regs->set_dataout);
+ bank->context.dataout |= l;
+
+ l = ~*bits & *mask;
+ writel_relaxed(l, reg + bank->regs->clr_dataout);
+ bank->context.dataout &= ~l;
+}
+
+/* set multiple data out values using mask register */
+static void omap_set_gpio_dataout_mask_multiple(struct gpio_bank *bank,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ void __iomem *reg = bank->base + bank->regs->dataout;
+ u32 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask);
+
+ writel_relaxed(l, reg);
+ bank->context.dataout = l;
+}
+
+static unsigned long omap_get_gpio_datain_multiple(struct gpio_bank *bank,
+ unsigned long *mask)
+{
+ void __iomem *reg = bank->base + bank->regs->datain;
+
+ return readl_relaxed(reg) & *mask;
+}
+
+static unsigned long omap_get_gpio_dataout_multiple(struct gpio_bank *bank,
+ unsigned long *mask)
+{
+ void __iomem *reg = bank->base + bank->regs->dataout;
+
+ return readl_relaxed(reg) & *mask;
+}
+
static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
{
int l = readl_relaxed(base + reg);
@@ -968,6 +1015,26 @@ static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
return 0;
}
+static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_bank *bank = gpiochip_get_data(chip);
+ void __iomem *reg = bank->base + bank->regs->direction;
+ unsigned long in = readl_relaxed(reg), l;
+
+ *bits = 0;
+
+ l = in & *mask;
+ if (l)
+ *bits |= omap_get_gpio_datain_multiple(bank, &l);
+
+ l = ~in & *mask;
+ if (l)
+ *bits |= omap_get_gpio_dataout_multiple(bank, &l);
+
+ return 0;
+}
+
static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
unsigned debounce)
{
@@ -1012,6 +1079,17 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
+static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ bank->set_dataout_multiple(bank, mask, bits);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+}
+
/*---------------------------------------------------------------------*/
static void omap_gpio_show_rev(struct gpio_bank *bank)
@@ -1073,9 +1151,11 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
bank->chip.get_direction = omap_gpio_get_direction;
bank->chip.direction_input = omap_gpio_input;
bank->chip.get = omap_gpio_get;
+ bank->chip.get_multiple = omap_gpio_get_multiple;
bank->chip.direction_output = omap_gpio_output;
bank->chip.set_config = omap_gpio_set_config;
bank->chip.set = omap_gpio_set;
+ bank->chip.set_multiple = omap_gpio_set_multiple;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
@@ -1209,10 +1289,14 @@ static int omap_gpio_probe(struct platform_device *pdev)
pdata->get_context_loss_count;
}
- if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ if (bank->regs->set_dataout && bank->regs->clr_dataout) {
bank->set_dataout = omap_set_gpio_dataout_reg;
- else
+ bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
+ } else {
bank->set_dataout = omap_set_gpio_dataout_mask;
+ bank->set_dataout_multiple =
+ omap_set_gpio_dataout_mask_multiple;
+ }
raw_spin_lock_init(&bank->lock);
raw_spin_lock_init(&bank->wa_lock);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index c55ad157e820..023a32cfac42 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -708,7 +708,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
- if (irq_base != -1 && (chip->driver_data & PCA_INT))
+ if (client->irq && irq_base != -1 && (chip->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index f5545049c187..f809a5a8e9eb 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -12,6 +12,8 @@
* GNU General Public License version 2 for more details.
*/
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
@@ -90,6 +92,25 @@ static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
return (gpio->buffer[offset / 8] >> (offset % 8)) & 0x1;
}
+static int pisosr_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct pisosr_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int nbytes = DIV_ROUND_UP(chip->ngpio, 8);
+ unsigned int i, j;
+
+ pisosr_gpio_refresh(gpio);
+
+ bitmap_zero(bits, chip->ngpio);
+ for (i = 0; i < nbytes; i++) {
+ j = i / sizeof(unsigned long);
+ bits[j] |= ((unsigned long) gpio->buffer[i])
+ << (8 * (i % sizeof(unsigned long)));
+ }
+
+ return 0;
+}
+
static const struct gpio_chip template_chip = {
.label = "pisosr-gpio",
.owner = THIS_MODULE,
@@ -97,6 +118,7 @@ static const struct gpio_chip template_chip = {
.direction_input = pisosr_gpio_direction_input,
.direction_output = pisosr_gpio_direction_output,
.get = pisosr_gpio_get,
+ .get_multiple = pisosr_gpio_get_multiple,
.base = -1,
.ngpio = DEFAULT_NGPIO,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 1e66f808051c..c18712dabf93 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -241,6 +241,17 @@ int pxa_irq_to_gpio(int irq)
return irq_gpio0;
}
+static bool pxa_gpio_has_pinctrl(void)
+{
+ switch (gpio_type) {
+ case PXA3XX_GPIO:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct pxa_gpio_chip *pchip = chip_to_pxachip(chip);
@@ -255,9 +266,11 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
int ret;
- ret = pinctrl_gpio_direction_input(chip->base + offset);
- if (!ret)
- return 0;
+ if (pxa_gpio_has_pinctrl()) {
+ ret = pinctrl_gpio_direction_input(chip->base + offset);
+ if (!ret)
+ return 0;
+ }
spin_lock_irqsave(&gpio_lock, flags);
@@ -282,9 +295,11 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
- ret = pinctrl_gpio_direction_output(chip->base + offset);
- if (ret)
- return ret;
+ if (pxa_gpio_has_pinctrl()) {
+ ret = pinctrl_gpio_direction_output(chip->base + offset);
+ if (ret)
+ return ret;
+ }
spin_lock_irqsave(&gpio_lock, flags);
@@ -348,8 +363,12 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
- pchip->chip.request = gpiochip_generic_request;
- pchip->chip.free = gpiochip_generic_free;
+
+ if (pxa_gpio_has_pinctrl()) {
+ pchip->chip.request = gpiochip_generic_request;
+ pchip->chip.free = gpiochip_generic_free;
+ }
+
#ifdef CONFIG_OF_GPIO
pchip->chip.of_node = np;
pchip->chip.of_xlate = pxa_gpio_of_xlate;
@@ -607,7 +626,7 @@ static int pxa_gpio_probe(struct platform_device *pdev)
struct pxa_gpio_platform_data *info;
void __iomem *gpio_reg_base;
int gpio, ret;
- int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
+ int irq0 = 0, irq1 = 0, irq_mux;
pchip = devm_kzalloc(&pdev->dev, sizeof(*pchip), GFP_KERNEL);
if (!pchip)
@@ -646,14 +665,13 @@ static int pxa_gpio_probe(struct platform_device *pdev)
pchip->irq0 = irq0;
pchip->irq1 = irq1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
gpio_reg_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!gpio_reg_base)
return -EINVAL;
- if (irq0 > 0)
- gpio_offset = 2;
-
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 3b4dc1a9a68d..a499c633a6c5 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/rc5t583.h>
struct rc5t583_gpio {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 350390c0b290..55cc61086d99 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -15,7 +15,7 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -278,6 +278,13 @@ static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
pm_runtime_put(&p->pdev->dev);
}
+static int gpio_rcar_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_rcar_priv *p = gpiochip_get_data(chip);
+
+ return !(gpio_rcar_read(p, INOUTSEL) & BIT(offset));
+}
+
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
{
gpio_rcar_config_general_input_output_mode(chip, offset, false);
@@ -461,6 +468,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip = &p->gpio_chip;
gpio_chip->request = gpio_rcar_request;
gpio_chip->free = gpio_rcar_free;
+ gpio_chip->get_direction = gpio_rcar_get_direction;
gpio_chip->direction_input = gpio_rcar_direction_input;
gpio_chip->get = gpio_rcar_get;
gpio_chip->direction_output = gpio_rcar_direction_output;
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index cbf0f9e6465b..2938217566d3 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -25,7 +25,7 @@
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/rdc321x.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 249f433aa62d..986eb3b231ac 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 545004445846..e9878f6ede67 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -26,8 +26,7 @@
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/pci_ids.h>
-
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#define GEN 0x00
#define GIO 0x04
@@ -138,6 +137,13 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
return 0;
}
+static int sch_gpio_get_direction(struct gpio_chip *gc, unsigned gpio_num)
+{
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+
+ return sch_gpio_reg_get(sch, gpio_num, GIO);
+}
+
static const struct gpio_chip sch_gpio_chip = {
.label = "sch_gpio",
.owner = THIS_MODULE,
@@ -145,6 +151,7 @@ static const struct gpio_chip sch_gpio_chip = {
.get = sch_gpio_get,
.direction_output = sch_gpio_direction_out,
.set = sch_gpio_set,
+ .get_direction = sch_gpio_get_direction,
};
static int sch_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index b96990c262a1..5497f0a88cf0 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -17,16 +17,15 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/bitops.h>
#include <linux/io.h>
#define DRV_NAME "gpio-sch311x"
-#define SCH311X_GPIO_CONF_OUT 0x00
-#define SCH311X_GPIO_CONF_IN 0x01
-#define SCH311X_GPIO_CONF_INVERT 0x02
-#define SCH311X_GPIO_CONF_OPEN_DRAIN 0x80
+#define SCH311X_GPIO_CONF_DIR BIT(0)
+#define SCH311X_GPIO_CONF_INVERT BIT(1)
+#define SCH311X_GPIO_CONF_OPEN_DRAIN BIT(7)
#define SIO_CONFIG_KEY_ENTER 0x55
#define SIO_CONFIG_KEY_EXIT 0xaa
@@ -163,7 +162,7 @@ static void sch311x_gpio_free(struct gpio_chip *chip, unsigned offset)
static int sch311x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
- unsigned char data;
+ u8 data;
spin_lock(&block->lock);
data = inb(block->runtime_reg + block->data_reg);
@@ -175,7 +174,7 @@ static int sch311x_gpio_get(struct gpio_chip *chip, unsigned offset)
static void __sch311x_gpio_set(struct sch311x_gpio_block *block,
unsigned offset, int value)
{
- unsigned char data = inb(block->runtime_reg + block->data_reg);
+ u8 data = inb(block->runtime_reg + block->data_reg);
if (value)
data |= BIT(offset);
else
@@ -196,10 +195,12 @@ static void sch311x_gpio_set(struct gpio_chip *chip, unsigned offset,
static int sch311x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ u8 data;
spin_lock(&block->lock);
- outb(SCH311X_GPIO_CONF_IN, block->runtime_reg +
- block->config_regs[offset]);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data |= SCH311X_GPIO_CONF_DIR;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
spin_unlock(&block->lock);
return 0;
@@ -209,18 +210,59 @@ static int sch311x_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
int value)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ u8 data;
spin_lock(&block->lock);
- outb(SCH311X_GPIO_CONF_OUT, block->runtime_reg +
- block->config_regs[offset]);
-
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data &= ~SCH311X_GPIO_CONF_DIR;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
__sch311x_gpio_set(block, offset, value);
spin_unlock(&block->lock);
return 0;
}
+static int sch311x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ u8 data;
+
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ spin_unlock(&block->lock);
+
+ return !!(data & SCH311X_GPIO_CONF_DIR);
+}
+
+static int sch311x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
+{
+ struct sch311x_gpio_block *block = gpiochip_get_data(chip);
+ enum pin_config_param param = pinconf_to_config_param(config);
+ u8 data;
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data |= SCH311X_GPIO_CONF_OPEN_DRAIN;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
+ spin_unlock(&block->lock);
+ return 0;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ spin_lock(&block->lock);
+ data = inb(block->runtime_reg + block->config_regs[offset]);
+ data &= ~SCH311X_GPIO_CONF_OPEN_DRAIN;
+ outb(data, block->runtime_reg + block->config_regs[offset]);
+ spin_unlock(&block->lock);
+ return 0;
+ default:
+ break;
+ }
+ return -ENOTSUPP;
+}
+
static int sch311x_gpio_probe(struct platform_device *pdev)
{
struct sch311x_pdev_data *pdata = dev_get_platdata(&pdev->dev);
@@ -253,6 +295,8 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
block->chip.free = sch311x_gpio_free;
block->chip.direction_input = sch311x_gpio_direction_in;
block->chip.direction_output = sch311x_gpio_direction_out;
+ block->chip.get_direction = sch311x_gpio_get_direction;
+ block->chip.set_config = sch311x_gpio_set_config;
block->chip.get = sch311x_gpio_get;
block->chip.set = sch311x_gpio_set;
block->chip.ngpio = 8;
@@ -309,7 +353,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
{
int err = 0, reg;
unsigned short base_addr;
- unsigned char dev_id;
+ u8 dev_id;
err = sch311x_sio_enter(sio_config_port);
if (err)
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 22267479ba68..ee3039f091f4 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -10,7 +10,7 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of.h>
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 407359da08f9..2283c869ad5d 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -23,7 +23,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pci.h>
@@ -58,16 +59,6 @@ struct gsta_gpio {
unsigned irq_type[GSTA_NR_GPIO];
};
-static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr)
-{
- return chip->regs[nr / GSTA_GPIO_PER_BLOCK];
-}
-
-static inline u32 __bit(int nr)
-{
- return 1U << (nr % GSTA_GPIO_PER_BLOCK);
-}
-
/*
* gpio methods
*/
@@ -75,8 +66,8 @@ static inline u32 __bit(int nr)
static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
if (val)
writel(bit, &regs->dats);
@@ -87,8 +78,8 @@ static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
return !!(readl(&regs->dat) & bit);
}
@@ -97,8 +88,8 @@ static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
int val)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
writel(bit, &regs->dirs);
/* Data register after direction, otherwise pullup/down is selected */
@@ -112,8 +103,8 @@ static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
struct gsta_gpio *chip = gpiochip_get_data(gpio);
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
writel(bit, &regs->dirc);
return 0;
@@ -165,9 +156,9 @@ static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
*/
static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg)
{
- struct gsta_regs __iomem *regs = __regs(chip, nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
unsigned long flags;
- u32 bit = __bit(nr);
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
u32 val;
int err = 0;
@@ -234,8 +225,8 @@ static void gsta_irq_disable(struct irq_data *data)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct gsta_gpio *chip = gc->private;
int nr = data->irq - chip->irq_base;
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
u32 val;
unsigned long flags;
@@ -257,8 +248,8 @@ static void gsta_irq_enable(struct irq_data *data)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct gsta_gpio *chip = gc->private;
int nr = data->irq - chip->irq_base;
- struct gsta_regs __iomem *regs = __regs(chip, nr);
- u32 bit = __bit(nr);
+ struct gsta_regs __iomem *regs = chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+ u32 bit = BIT(nr % GSTA_GPIO_PER_BLOCK);
u32 val;
int type;
unsigned long flags;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 8d6a5a7e612d..65a2315f1673 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -8,7 +8,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/mfd/stmpe.h>
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index c07385b71403..19972084c45b 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -13,9 +13,8 @@
#include <linux/types.h>
#include <linux/of_platform.h>
#include <linux/mutex.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -91,6 +90,20 @@ struct xway_stp {
};
/**
+ * xway_stp_get() - gpio_chip->get - get gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ *
+ * Gets the shadow value.
+ */
+static int xway_stp_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct xway_stp *chip = gpiochip_get_data(gc);
+
+ return (xway_stp_r32(chip->virt, XWAY_STP_CPU0) & BIT(gpio));
+}
+
+/**
* xway_stp_set() - gpio_chip->set - set gpios.
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
@@ -215,6 +228,7 @@ static int xway_stp_probe(struct platform_device *pdev)
chip->gc.parent = &pdev->dev;
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
+ chip->gc.get = xway_stp_get;
chip->gc.set = xway_stp_set;
chip->gc.request = xway_stp_request;
chip->gc.base = -1;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 8b0a69c5ba88..87c18a544513 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -10,7 +10,7 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -135,6 +135,33 @@ static const struct syscon_gpio_data clps711x_mctrl_gpio = {
.dat_bit_offset = 0x40 * 8 + 8,
};
+static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
+ unsigned int offs;
+ u8 bit;
+ u32 data;
+ int ret;
+
+ offs = priv->dreg_offset + priv->data->dat_bit_offset + offset;
+ bit = offs % SYSCON_REG_BITS;
+ data = (val ? BIT(bit) : 0) | BIT(bit + 16);
+ ret = regmap_write(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
+ data);
+ if (ret < 0)
+ dev_err(chip->parent, "gpio write failed ret(%d)\n", ret);
+}
+
+static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = {
+ /* RK3328 GPIO_MUTE is an output only pin at GRF_SOC_CON10[1] */
+ .flags = GPIO_SYSCON_FEAT_OUT,
+ .bit_count = 1,
+ .dat_bit_offset = 0x0428 * 8 + 1,
+ .set = rockchip_gpio_set,
+};
+
#define KEYSTONE_LOCK_BIT BIT(0)
static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
@@ -175,6 +202,10 @@ static const struct of_device_id syscon_gpio_ids[] = {
.compatible = "ti,keystone-dsp-gpio",
.data = &keystone_dsp_gpio,
},
+ {
+ .compatible = "rockchip,rk3328-grf-gpio",
+ .data = &rockchip_rk3328_gpio_mute,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index ac6f2a9841e5..a12cd0b5c972 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -22,7 +22,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -30,7 +30,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 94396caaca75..47dbd19751d0 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -22,7 +22,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -207,7 +207,7 @@ static int tegra_gpio_get_direction(struct gpio_chip *chip,
oe = tegra_gpio_readl(tgi, GPIO_OE(tgi, offset));
- return (oe & pin_mask) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
+ return !(oe & pin_mask);
}
static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -323,13 +323,6 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
- if (ret) {
- dev_err(tgi->dev,
- "unable to lock Tegra GPIO %u as IRQ\n", gpio);
- return ret;
- }
-
spin_lock_irqsave(&bank->lvl_lock[port], flags);
val = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
@@ -342,6 +335,14 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, gpio), gpio, 0);
tegra_gpio_enable(tgi, gpio);
+ ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
+ if (ret) {
+ dev_err(tgi->dev,
+ "unable to lock Tegra GPIO %u as IRQ\n", gpio);
+ tegra_gpio_disable(tgi, gpio);
+ return ret;
+ }
+
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
@@ -550,13 +551,6 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
-/*
- * This lock class tells lockdep that GPIO irqs are in a different category
- * than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-static struct lock_class_key gpio_request_class;
-
static int tegra_gpio_probe(struct platform_device *pdev)
{
struct tegra_gpio_info *tgi;
@@ -661,8 +655,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[GPIO_BANK(gpio)];
- irq_set_lockdep_class(irq, &gpio_lock_class,
- &gpio_request_class);
irq_set_chip_data(irq, bank);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
@@ -720,4 +712,4 @@ static int __init tegra_gpio_init(void)
{
return platform_driver_register(&tegra_gpio_driver);
}
-postcore_initcall(tegra_gpio_init);
+subsys_initcall(tegra_gpio_init);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 7f1aa4c21e0d..9d0292c8a199 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
+#include <dt-bindings/gpio/tegra194-gpio.h>
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
#define TEGRA186_GPIO_ENABLE_CONFIG_ENABLE BIT(0)
@@ -593,6 +594,73 @@ static const struct tegra_gpio_soc tegra186_aon_soc = {
.name = "tegra186-gpio-aon",
};
+#define TEGRA194_MAIN_GPIO_PORT(port, base, count, controller) \
+ [TEGRA194_MAIN_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra194_main_ports[] = {
+ TEGRA194_MAIN_GPIO_PORT( A, 0x1400, 8, 1),
+ TEGRA194_MAIN_GPIO_PORT( B, 0x4e00, 2, 4),
+ TEGRA194_MAIN_GPIO_PORT( C, 0x4600, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( D, 0x4800, 4, 4),
+ TEGRA194_MAIN_GPIO_PORT( E, 0x4a00, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( F, 0x4c00, 6, 4),
+ TEGRA194_MAIN_GPIO_PORT( G, 0x4000, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( H, 0x4200, 8, 4),
+ TEGRA194_MAIN_GPIO_PORT( I, 0x4400, 5, 4),
+ TEGRA194_MAIN_GPIO_PORT( J, 0x5200, 6, 5),
+ TEGRA194_MAIN_GPIO_PORT( K, 0x3000, 8, 3),
+ TEGRA194_MAIN_GPIO_PORT( L, 0x3200, 4, 3),
+ TEGRA194_MAIN_GPIO_PORT( M, 0x2600, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( N, 0x2800, 3, 2),
+ TEGRA194_MAIN_GPIO_PORT( O, 0x5000, 6, 5),
+ TEGRA194_MAIN_GPIO_PORT( P, 0x2a00, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( Q, 0x2c00, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( R, 0x2e00, 6, 2),
+ TEGRA194_MAIN_GPIO_PORT( S, 0x3600, 8, 3),
+ TEGRA194_MAIN_GPIO_PORT( T, 0x3800, 8, 3),
+ TEGRA194_MAIN_GPIO_PORT( U, 0x3a00, 1, 3),
+ TEGRA194_MAIN_GPIO_PORT( V, 0x1000, 8, 1),
+ TEGRA194_MAIN_GPIO_PORT( W, 0x1200, 2, 1),
+ TEGRA194_MAIN_GPIO_PORT( X, 0x2000, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( Y, 0x2200, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT( Z, 0x2400, 8, 2),
+ TEGRA194_MAIN_GPIO_PORT(FF, 0x3400, 2, 3),
+ TEGRA194_MAIN_GPIO_PORT(GG, 0x0000, 2, 0)
+};
+
+static const struct tegra_gpio_soc tegra194_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra194_main_ports),
+ .ports = tegra194_main_ports,
+ .name = "tegra194-gpio",
+};
+
+#define TEGRA194_AON_GPIO_PORT(port, base, count, controller) \
+ [TEGRA194_AON_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra194_aon_ports[] = {
+ TEGRA194_AON_GPIO_PORT(AA, 0x0600, 8, 0),
+ TEGRA194_AON_GPIO_PORT(BB, 0x0800, 4, 0),
+ TEGRA194_AON_GPIO_PORT(CC, 0x0200, 8, 0),
+ TEGRA194_AON_GPIO_PORT(DD, 0x0400, 3, 0),
+ TEGRA194_AON_GPIO_PORT(EE, 0x0000, 7, 0)
+};
+
+static const struct tegra_gpio_soc tegra194_aon_soc = {
+ .num_ports = ARRAY_SIZE(tegra194_aon_ports),
+ .ports = tegra194_aon_ports,
+ .name = "tegra194-gpio-aon",
+};
+
static const struct of_device_id tegra186_gpio_of_match[] = {
{
.compatible = "nvidia,tegra186-gpio",
@@ -601,6 +669,12 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
.compatible = "nvidia,tegra186-gpio-aon",
.data = &tegra186_aon_soc
}, {
+ .compatible = "nvidia,tegra194-gpio",
+ .data = &tegra194_main_soc
+ }, {
+ .compatible = "nvidia,tegra194-gpio-aon",
+ .data = &tegra194_aon_soc
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 6520a8475910..314e300d6ba3 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -22,7 +22,7 @@
*/
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 58faeb1cef63..7fdac9060979 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -310,8 +310,7 @@ static int uniphier_gpio_irq_domain_activate(struct irq_domain *domain,
struct uniphier_gpio_priv *priv = domain->host_data;
struct gpio_chip *chip = &priv->chip;
- gpiochip_lock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
- return 0;
+ return gpiochip_lock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
}
static void uniphier_gpio_irq_domain_deactivate(struct irq_domain *domain,
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index ac8deb01f6f6..027699cec911 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -138,10 +138,16 @@ static void unmask_giuint_low(struct irq_data *d)
static unsigned int startup_giuint(struct irq_data *data)
{
- if (gpiochip_lock_as_irq(&vr41xx_gpio_chip, data->hwirq))
+ int ret;
+
+ ret = gpiochip_lock_as_irq(&vr41xx_gpio_chip, irqd_to_hwirq(data));
+ if (ret) {
dev_err(vr41xx_gpio_chip.parent,
"unable to lock HW IRQ %lu for IRQ\n",
data->hwirq);
+ return ret;
+ }
+
/* Satisfy the .enable semantics by unmasking the line */
unmask_giuint_low(data);
return 0;
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index acd59113e08b..2eb76f35aa7e 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -143,12 +143,14 @@ static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
{
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
+ int ret;
- if (gpiochip_lock_as_irq(&priv->gc, gpio)) {
+ ret = gpiochip_lock_as_irq(&priv->gc, gpio);
+ if (ret) {
dev_err(priv->gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
- return -ENOSPC;
+ return ret;
}
xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index e8ec0e33a0a9..8f24478cc18b 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -20,7 +20,7 @@
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
/* Register Offset Definitions */
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index addd9fecc198..c48ed9d89ff5 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -389,7 +389,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dev_remove_driver_gpios);
static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
const char *name, int index,
- struct acpi_reference_args *args,
+ struct fwnode_reference_args *args,
unsigned int *quirks)
{
const struct acpi_gpio_mapping *gm;
@@ -401,7 +401,7 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
const struct acpi_gpio_params *par = gm->data + index;
- args->adev = adev;
+ args->fwnode = acpi_fwnode_handle(adev);
args->args[0] = par->crs_entry_index;
args->args[1] = par->line_index;
args->args[2] = par->active_low;
@@ -564,7 +564,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
const char *propname, int index,
struct acpi_gpio_lookup *lookup)
{
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
unsigned int quirks = 0;
int ret;
@@ -585,6 +585,8 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* The property was found and resolved, so need to lookup the GPIO based
* on returned args.
*/
+ if (!to_acpi_device_node(args.fwnode))
+ return -EINVAL;
if (args.nargs != 3)
return -EPROTO;
@@ -592,8 +594,9 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
lookup->pin_index = args.args[1];
lookup->active_low = !!args.args[2];
- lookup->info.adev = args.adev;
+ lookup->info.adev = to_acpi_device_node(args.fwnode);
lookup->info.quirks = quirks;
+
return 0;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 53a14ee8ad6d..a4f1157d6aa0 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -621,9 +621,6 @@ int of_gpiochip_add(struct gpio_chip *chip)
{
int status;
- if ((!chip->of_node) && (chip->parent))
- chip->of_node = chip->parent->of_node;
-
if (!chip->of_node)
return 0;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e11a3bb03820..e8f8a1999393 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -431,7 +431,7 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
int i;
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
- /* TODO: check if descriptors are really input */
+ /* NOTE: It's ok to read values of output lines. */
int ret = gpiod_get_array_value_complex(false,
true,
lh->numdescs,
@@ -449,7 +449,13 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
return 0;
} else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) {
- /* TODO: check if descriptors are really output */
+ /*
+ * All line descriptors were created at once with the same
+ * flags so just check if the first one is really output.
+ */
+ if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
+ return -EPERM;
+
if (copy_from_user(&ghd, ip, sizeof(ghd)))
return -EFAULT;
@@ -1256,6 +1262,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
/* If the gpiochip has an assigned OF node this takes precedence */
if (chip->of_node)
gdev->dev.of_node = chip->of_node;
+ else
+ chip->of_node = gdev->dev.of_node;
#endif
gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
@@ -1408,9 +1416,9 @@ err_free_descs:
err_free_gdev:
ida_simple_remove(&gpio_ida, gdev->id);
/* failures here can mean systems won't boot... */
- pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
+ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
gdev->base, gdev->base + gdev->ngpio - 1,
- chip->label ? : "generic");
+ chip->label ? : "generic", status);
kfree(gdev);
return status;
}
@@ -1664,8 +1672,7 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
if (parent_handler) {
if (gpiochip->can_sleep) {
chip_err(gpiochip,
- "you cannot have chained interrupts on a "
- "chip that may sleep\n");
+ "you cannot have chained interrupts on a chip that may sleep\n");
return;
}
/*
@@ -1800,16 +1807,18 @@ static const struct irq_domain_ops gpiochip_domain_ops = {
static int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ int ret;
if (!try_module_get(chip->gpiodev->owner))
return -ENODEV;
- if (gpiochip_lock_as_irq(chip, d->hwirq)) {
+ ret = gpiochip_lock_as_irq(chip, d->hwirq);
+ if (ret) {
chip_err(chip,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
module_put(chip->gpiodev->owner);
- return -EINVAL;
+ return ret;
}
return 0;
}
@@ -1850,8 +1859,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
return 0;
if (gpiochip->irq.parent_handler && gpiochip->can_sleep) {
- chip_err(gpiochip, "you cannot have chained interrupts on a "
- "chip that may sleep\n");
+ chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n");
return -EINVAL;
}
@@ -2259,6 +2267,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
struct gpio_chip *chip = desc->gdev->chip;
int status;
unsigned long flags;
+ unsigned offset;
spin_lock_irqsave(&gpio_lock, flags);
@@ -2277,7 +2286,11 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (chip->request) {
/* chip->request may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
- status = chip->request(chip, gpio_chip_hwgpio(desc));
+ offset = gpio_chip_hwgpio(desc);
+ if (gpiochip_line_is_valid(chip, offset))
+ status = chip->request(chip, offset);
+ else
+ status = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
if (status < 0) {
@@ -3194,6 +3207,19 @@ int gpiod_cansleep(const struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_cansleep);
/**
+ * gpiod_set_consumer_name() - set the consumer name for the descriptor
+ * @desc: gpio to set the consumer name on
+ * @name: the new consumer name
+ */
+void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+{
+ VALIDATE_DESC_VOID(desc);
+ /* Just overwrite whatever the previous name was */
+ desc->label = name;
+}
+EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
+
+/**
* gpiod_to_irq() - return the IRQ corresponding to a GPIO
* @desc: gpio whose IRQ will be returned (already requested)
*
@@ -3249,18 +3275,19 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
* behind our back
*/
if (!chip->can_sleep && chip->get_direction) {
- int dir = chip->get_direction(chip, offset);
+ int dir = gpiod_get_direction(desc);
- if (dir)
- clear_bit(FLAG_IS_OUT, &desc->flags);
- else
- set_bit(FLAG_IS_OUT, &desc->flags);
+ if (dir < 0) {
+ chip_err(chip, "%s: cannot get GPIO direction\n",
+ __func__);
+ return dir;
+ }
}
if (test_bit(FLAG_IS_OUT, &desc->flags)) {
chip_err(chip,
- "%s: tried to flag a GPIO set as output for IRQ\n",
- __func__);
+ "%s: tried to flag a GPIO set as output for IRQ\n",
+ __func__);
return -EIO;
}
@@ -3639,9 +3666,16 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
chip = find_chip_by_name(p->chip_label);
if (!chip) {
- dev_err(dev, "cannot find GPIO chip %s\n",
- p->chip_label);
- return ERR_PTR(-ENODEV);
+ /*
+ * As the lookup table indicates a chip with
+ * p->chip_label should exist, assume it may
+ * still appear later and let the interested
+ * consumer be probed again or let the Deferred
+ * Probe infrastructure handle the error.
+ */
+ dev_warn(dev, "cannot find GPIO chip %s, deferring\n",
+ p->chip_label);
+ return ERR_PTR(-EPROBE_DEFER);
}
if (chip->ngpio <= p->chip_hwnum) {
@@ -4215,7 +4249,7 @@ static int __init gpiolib_dev_init(void)
int ret;
/* Register GPIO sysfs bus */
- ret = bus_register(&gpio_bus_type);
+ ret = bus_register(&gpio_bus_type);
if (ret < 0) {
pr_err("gpiolib: could not register GPIO bus type\n");
return ret;
@@ -4259,9 +4293,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s",
gpio, gdesc->name ? gdesc->name : "", gdesc->label,
is_out ? "out" : "in ",
- chip->get
- ? (chip->get(chip, i) ? "hi" : "lo")
- : "? ",
+ chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? ",
is_irq ? "IRQ" : " ");
seq_printf(s, "\n");
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 1a8e20363861..a7e49fef73d4 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -92,7 +92,7 @@ struct acpi_gpio_info {
};
/* gpio suffixes used for ACPI and device tree lookup */
-static const char * const gpio_suffixes[] = { "gpios", "gpio" };
+static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
#ifdef CONFIG_OF_GPIO
struct gpio_desc *of_find_gpio(struct device *dev,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2a72d2feb76d..cb88528e7b10 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -122,6 +122,16 @@ config DRM_LOAD_EDID_FIRMWARE
default case is N. Details and instructions how to build your own
EDID data are given in Documentation/EDID/HOWTO.txt.
+config DRM_DP_CEC
+ bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+ select CEC_CORE
+ help
+ Choose this option if you want to enable HDMI CEC support for
+ DisplayPort/USB-C to HDMI adapters.
+
+ Note: not all adapters support this feature, and even for those
+ that do support this they often do not hook up the CEC pin.
+
config DRM_TTM
tristate
depends on DRM && MMU
@@ -213,6 +223,17 @@ config DRM_VGEM
as used by Mesa's software renderer for enhanced performance.
If M is selected the module will be called vgem.
+config DRM_VKMS
+ tristate "Virtual KMS (EXPERIMENTAL)"
+ depends on DRM
+ select DRM_KMS_HELPER
+ default n
+ help
+ Virtual Kernel Mode-Setting (VKMS) is used for testing or for
+ running GPU in a headless machines. Choose this option to get
+ a VKMS.
+
+ If M is selected the module will be called vkms.
source "drivers/gpu/drm/exynos/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ef9f3dab287f..a6771cef85e2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
- drm_syncobj.o drm_lease.o
+ drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
@@ -41,6 +41,7 @@ drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
@@ -69,6 +70,7 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_VGEM) += vgem/
+obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 06192698bd96..5b393622f592 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -136,6 +136,7 @@
#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
#define GENERIC_OBJECT_ID_MXM_OPM 0x03
#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
/****************************************************/
/* Graphics Object ENUM ID Definition */
@@ -714,6 +715,13 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7dcbac8af9a7..447c4c7a36d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -73,6 +73,8 @@
#include "amdgpu_virt.h"
#include "amdgpu_gart.h"
#include "amdgpu_debugfs.h"
+#include "amdgpu_job.h"
+#include "amdgpu_bo_list.h"
/*
* Modules parameters.
@@ -105,11 +107,8 @@ extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
extern int amdgpu_dc;
-extern int amdgpu_dc_log;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
-extern int amdgpu_no_evict;
-extern int amdgpu_direct_gma_size;
extern uint amdgpu_pcie_gen_cap;
extern uint amdgpu_pcie_lane_cap;
extern uint amdgpu_cg_mask;
@@ -600,17 +599,6 @@ struct amdgpu_ib {
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm);
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job);
-
-void amdgpu_job_free_resources(struct amdgpu_job *job);
-void amdgpu_job_free(struct amdgpu_job *job);
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct drm_sched_entity *entity, void *owner,
- struct dma_fence **f);
-
/*
* Queue manager
*/
@@ -684,8 +672,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
@@ -703,37 +691,6 @@ struct amdgpu_fpriv {
};
/*
- * residency list
- */
-struct amdgpu_bo_list_entry {
- struct amdgpu_bo *robj;
- struct ttm_validate_buffer tv;
- struct amdgpu_bo_va *bo_va;
- uint32_t priority;
- struct page **user_pages;
- int user_invalidated;
-};
-
-struct amdgpu_bo_list {
- struct mutex lock;
- struct rcu_head rhead;
- struct kref refcount;
- struct amdgpu_bo *gds_obj;
- struct amdgpu_bo *gws_obj;
- struct amdgpu_bo *oa_obj;
- unsigned first_userptr;
- unsigned num_entries;
- struct amdgpu_bo_list_entry *array;
-};
-
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
- struct list_head *validated);
-void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
-
-/*
* GFX stuff
*/
#include "clearstate_defs.h"
@@ -931,6 +888,11 @@ struct amdgpu_ngg {
bool init;
};
+struct sq_work {
+ struct work_struct work;
+ unsigned ih_data;
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gfx_config config;
@@ -969,6 +931,10 @@ struct amdgpu_gfx {
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq;
+ struct amdgpu_irq_src cp_ecc_error_irq;
+ struct amdgpu_irq_src sq_irq;
+ struct sq_work sq_work;
+
/* gfx status */
uint32_t gfx_current_status;
/* ce ram size*/
@@ -1020,6 +986,7 @@ struct amdgpu_cs_parser {
/* scheduler job object */
struct amdgpu_job *job;
+ struct amdgpu_ring *ring;
/* buffer objects */
struct ww_acquire_ctx ticket;
@@ -1041,40 +1008,6 @@ struct amdgpu_cs_parser {
struct drm_syncobj **post_dep_syncobjs;
};
-#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
-#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
-#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
-
-struct amdgpu_job {
- struct drm_sched_job base;
- struct amdgpu_device *adev;
- struct amdgpu_vm *vm;
- struct amdgpu_ring *ring;
- struct amdgpu_sync sync;
- struct amdgpu_sync sched_sync;
- struct amdgpu_ib *ibs;
- struct dma_fence *fence; /* the hw fence */
- uint32_t preamble_status;
- uint32_t num_ibs;
- void *owner;
- uint64_t fence_ctx; /* the fence_context this job uses */
- bool vm_needs_flush;
- uint64_t vm_pd_addr;
- unsigned vmid;
- unsigned pasid;
- uint32_t gds_base, gds_size;
- uint32_t gws_base, gws_size;
- uint32_t oa_base, oa_size;
- uint32_t vram_lost_counter;
-
- /* user fence handling */
- uint64_t uf_addr;
- uint64_t uf_sequence;
-
-};
-#define to_amdgpu_job(sched_job) \
- container_of((sched_job), struct amdgpu_job, base)
-
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
uint32_t ib_idx, int idx)
{
@@ -1389,6 +1322,7 @@ enum amd_hw_ip_block_type {
PWR_HWIP,
NBIF_HWIP,
THM_HWIP,
+ CLK_HWIP,
MAX_HWIP
};
@@ -1579,9 +1513,9 @@ struct amdgpu_device {
DECLARE_HASHTABLE(mn_hash, 7);
/* tracking pinned memory */
- u64 vram_pin_size;
- u64 invisible_pin_size;
- u64 gart_pin_size;
+ atomic64_t vram_pin_size;
+ atomic64_t visible_pin_size;
+ atomic64_t gart_pin_size;
/* amdkfd interface */
struct kfd_dev *kfd;
@@ -1776,6 +1710,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
+#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
@@ -1829,8 +1764,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc, u64 base);
void amdgpu_device_gart_location(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 0d8c3fc6eace..353993218f21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -364,7 +364,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
{
struct amdgpu_atif *atif = adev->atif;
- struct atif_sbios_requests req;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -379,42 +378,48 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
/* Not our event */
return NOTIFY_DONE;
- /* Check pending SBIOS requests */
- count = amdgpu_atif_get_sbios_requests(atif, &req);
+ if (atif->functions.sbios_requests) {
+ struct atif_sbios_requests req;
- if (count <= 0)
- return NOTIFY_DONE;
+ /* Check pending SBIOS requests */
+ count = amdgpu_atif_get_sbios_requests(atif, &req);
+
+ if (count <= 0)
+ return NOTIFY_DONE;
- DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
- if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
- struct amdgpu_encoder *enc = atif->encoder_for_bl;
+ /* todo: add DC handling */
+ if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+ !amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_encoder *enc = atif->encoder_for_bl;
- if (enc) {
- struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+ if (enc) {
+ struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
- DRM_DEBUG_DRIVER("Changing brightness to %d\n",
- req.backlight_level);
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
- amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
+ amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
- backlight_force_update(dig->bl_dev,
- BACKLIGHT_UPDATE_HOTKEY);
+ backlight_force_update(dig->bl_dev,
+ BACKLIGHT_UPDATE_HOTKEY);
#endif
+ }
}
- }
- if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
- if ((adev->flags & AMD_IS_PX) &&
- amdgpu_atpx_dgpu_req_power_for_displays()) {
- pm_runtime_get_sync(adev->ddev->dev);
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(adev->ddev);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+ if ((adev->flags & AMD_IS_PX) &&
+ amdgpu_atpx_dgpu_req_power_for_displays()) {
+ pm_runtime_get_sync(adev->ddev->dev);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(adev->ddev);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ }
}
+ /* TODO: check other events */
}
- /* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
* overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 305143fcc1ce..f8bbbb3a9504 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -243,6 +243,33 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
return r;
}
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd)
+ r = kgd2kfd->pre_reset(adev->kfd);
+
+ return r;
+}
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd)
+ r = kgd2kfd->post_reset(adev->kfd);
+
+ return r;
+}
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ amdgpu_device_gpu_recover(adev, NULL, false);
+}
+
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr)
@@ -251,7 +278,6 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
struct amdgpu_bo *bo = NULL;
struct amdgpu_bo_param bp;
int r;
- uint64_t gpu_addr_tmp = 0;
void *cpu_ptr_tmp = NULL;
memset(&bp, 0, sizeof(bp));
@@ -275,13 +301,18 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
goto allocate_mem_reserve_bo_failed;
}
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
- &gpu_addr_tmp);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r) {
dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
goto allocate_mem_pin_bo_failed;
}
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", bo);
+ goto allocate_mem_kmap_bo_failed;
+ }
+
r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
if (r) {
dev_err(adev->dev,
@@ -290,7 +321,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
}
*mem_obj = bo;
- *gpu_addr = gpu_addr_tmp;
+ *gpu_addr = amdgpu_bo_gpu_offset(bo);
*cpu_ptr = cpu_ptr_tmp;
amdgpu_bo_unreserve(bo);
@@ -457,6 +488,14 @@ err:
return ret;
}
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE, !idle);
+}
+
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
if (adev->kfd) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a8418a3f4e9d..2f379c183ed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -119,6 +119,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
@@ -126,6 +127,12 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+
/* Shared API */
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@@ -183,6 +190,9 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *info);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 2c14025e5e76..574c1181ae9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -173,7 +173,5 @@ static const struct dma_fence_ops amdkfd_fence_ops = {
.get_driver_name = amdkfd_fence_get_driver_name,
.get_timeline_name = amdkfd_fence_get_timeline_name,
.enable_signaling = amdkfd_fence_enable_signaling,
- .signaled = NULL,
- .wait = dma_fence_default_wait,
.release = amdkfd_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea79908dac4c..ea3f698aef5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -145,6 +145,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint32_t page_table_base);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
@@ -216,6 +217,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -571,6 +576,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
unsigned long flags, end_jiffies;
int retry;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@@ -882,6 +890,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
unsigned int tmp;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
@@ -911,3 +922,19 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
+
+ /**
+ * read_vmid_from_vmfault_reg - read vmid from register
+ *
+ * adev: amdgpu_device pointer
+ * @vmid: vmid pointer
+ * read vmid from register (CIK).
+ */
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
+
+ return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 19dd665e7307..f6e53e9352bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -176,6 +176,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -568,6 +571,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
int retry;
struct vi_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
@@ -844,6 +850,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
unsigned int tmp;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 1db60aa5b7f0..8efedfcb9dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -213,6 +213,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
@@ -679,6 +681,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v9_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
@@ -866,6 +871,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
if (ring->ready)
return invalidate_tlbs_with_kiq(adev, pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ff8fd75f7ca5..8a707d8bbb1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -334,7 +334,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
"Called with userptr BO"))
return -EINVAL;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -622,7 +622,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
- amdgpu_ttm_placement_from_domain(bo, mem->domain);
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
pr_err("%s: failed to validate BO\n", __func__);
@@ -1587,7 +1587,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
goto bo_reserve_failed;
}
- ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (ret) {
pr_err("Failed to pin bo. ret %d\n", ret);
goto pin_failed;
@@ -1621,6 +1621,20 @@ bo_reserve_failed:
return ret;
}
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *mem)
+{
+ struct amdgpu_device *adev;
+
+ adev = (struct amdgpu_device *)kgd;
+ if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ *mem = *adev->gmc.vm_fault_info;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ }
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1680,7 +1694,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (amdgpu_bo_reserve(bo, true))
return -EAGAIN;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (ret) {
@@ -1824,7 +1838,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
if (mem->user_pages[0]) {
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
mem->user_pages);
- amdgpu_ttm_placement_from_domain(bo, mem->domain);
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret) {
pr_err("%s: failed to validate BO\n", __func__);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index ca8bf1c9a98e..a028661d9e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -32,7 +32,7 @@ struct amdgpu_atpx_functions {
bool switch_start;
bool switch_end;
bool disp_connectors_mapping;
- bool disp_detetion_ports;
+ bool disp_detection_ports;
};
struct amdgpu_atpx {
@@ -162,7 +162,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
- f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+ f->disp_detection_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 19cfff31f2e1..3079ea8523c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -95,11 +95,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
r = amdgpu_bo_reserve(sobj, false);
if (unlikely(r != 0))
goto out_cleanup;
- r = amdgpu_bo_pin(sobj, sdomain, &saddr);
+ r = amdgpu_bo_pin(sobj, sdomain);
+ if (r) {
+ amdgpu_bo_unreserve(sobj);
+ goto out_cleanup;
+ }
+ r = amdgpu_ttm_alloc_gart(&sobj->tbo);
amdgpu_bo_unreserve(sobj);
if (r) {
goto out_cleanup;
}
+ saddr = amdgpu_bo_gpu_offset(sobj);
bp.domain = ddomain;
r = amdgpu_bo_create(adev, &bp, &dobj);
if (r) {
@@ -108,11 +114,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
r = amdgpu_bo_reserve(dobj, false);
if (unlikely(r != 0))
goto out_cleanup;
- r = amdgpu_bo_pin(dobj, ddomain, &daddr);
+ r = amdgpu_bo_pin(dobj, ddomain);
+ if (r) {
+ amdgpu_bo_unreserve(sobj);
+ goto out_cleanup;
+ }
+ r = amdgpu_ttm_alloc_gart(&dobj->tbo);
amdgpu_bo_unreserve(dobj);
if (r) {
goto out_cleanup;
}
+ daddr = amdgpu_bo_gpu_offset(dobj);
if (adev->mman.buffer_funcs) {
time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 92be7f6de197..d472a2c8399f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -35,92 +35,53 @@
#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_bo_list *list,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries);
+static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
+{
+ struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
+ rhead);
+
+ kvfree(list);
+}
-static void amdgpu_bo_list_release_rcu(struct kref *ref)
+static void amdgpu_bo_list_free(struct kref *ref)
{
- unsigned i;
struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
refcount);
+ struct amdgpu_bo_list_entry *e;
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
+ amdgpu_bo_list_for_each_entry(e, list)
+ amdgpu_bo_unref(&e->robj);
- mutex_destroy(&list->lock);
- kvfree(list->array);
- kfree_rcu(list, rhead);
+ call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
}
-static int amdgpu_bo_list_create(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries,
- int *id)
+int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ unsigned num_entries, struct amdgpu_bo_list **result)
{
- int r;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ unsigned last_entry = 0, first_userptr = num_entries;
+ struct amdgpu_bo_list_entry *array;
struct amdgpu_bo_list *list;
+ uint64_t total_size = 0;
+ size_t size;
+ unsigned i;
+ int r;
+
+ if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry))
+ return -EINVAL;
- list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
+ size = sizeof(struct amdgpu_bo_list);
+ size += num_entries * sizeof(struct amdgpu_bo_list_entry);
+ list = kvmalloc(size, GFP_KERNEL);
if (!list)
return -ENOMEM;
- /* initialize bo list*/
- mutex_init(&list->lock);
kref_init(&list->refcount);
- r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
- if (r) {
- kfree(list);
- return r;
- }
-
- /* idr alloc should be called only after initialization of bo list. */
- mutex_lock(&fpriv->bo_list_lock);
- r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
- mutex_unlock(&fpriv->bo_list_lock);
- if (r < 0) {
- amdgpu_bo_list_free(list);
- return r;
- }
- *id = r;
-
- return 0;
-}
-
-static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
-{
- struct amdgpu_bo_list *list;
-
- mutex_lock(&fpriv->bo_list_lock);
- list = idr_remove(&fpriv->bo_list_handles, id);
- mutex_unlock(&fpriv->bo_list_lock);
- if (list)
- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_bo_list *list,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries)
-{
- struct amdgpu_bo_list_entry *array;
- struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
- struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
- struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
-
- unsigned last_entry = 0, first_userptr = num_entries;
- unsigned i;
- int r;
- unsigned long total_size = 0;
+ list->gds_obj = adev->gds.gds_gfx_bo;
+ list->gws_obj = adev->gds.gws_gfx_bo;
+ list->oa_obj = adev->gds.oa_gfx_bo;
- array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
- if (!array)
- return -ENOMEM;
+ array = amdgpu_bo_list_array_entry(list, 0);
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < num_entries; ++i) {
@@ -157,59 +118,56 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->tv.shared = !entry->robj->prime_shared_count;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
- gds_obj = entry->robj;
+ list->gds_obj = entry->robj;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
- gws_obj = entry->robj;
+ list->gws_obj = entry->robj;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
- oa_obj = entry->robj;
+ list->oa_obj = entry->robj;
total_size += amdgpu_bo_size(entry->robj);
trace_amdgpu_bo_list_set(list, entry->robj);
}
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
-
- kvfree(list->array);
-
- list->gds_obj = gds_obj;
- list->gws_obj = gws_obj;
- list->oa_obj = oa_obj;
list->first_userptr = first_userptr;
- list->array = array;
list->num_entries = num_entries;
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
+
+ *result = list;
return 0;
error_free:
while (i--)
amdgpu_bo_unref(&array[i].robj);
- kvfree(array);
+ kvfree(list);
return r;
+
}
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
+static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
{
- struct amdgpu_bo_list *result;
+ struct amdgpu_bo_list *list;
+
+ mutex_lock(&fpriv->bo_list_lock);
+ list = idr_remove(&fpriv->bo_list_handles, id);
+ mutex_unlock(&fpriv->bo_list_lock);
+ if (list)
+ kref_put(&list->refcount, amdgpu_bo_list_free);
+}
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result)
+{
rcu_read_lock();
- result = idr_find(&fpriv->bo_list_handles, id);
+ *result = idr_find(&fpriv->bo_list_handles, id);
- if (result) {
- if (kref_get_unless_zero(&result->refcount)) {
- rcu_read_unlock();
- mutex_lock(&result->lock);
- } else {
- rcu_read_unlock();
- result = NULL;
- }
- } else {
+ if (*result && kref_get_unless_zero(&(*result)->refcount)) {
rcu_read_unlock();
+ return 0;
}
- return result;
+ rcu_read_unlock();
+ return -ENOENT;
}
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
@@ -220,6 +178,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* concatenated in descending order.
*/
struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
+ struct amdgpu_bo_list_entry *e;
unsigned i;
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
@@ -230,14 +189,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* in the list, the sort mustn't change the ordering of buffers
* with the same priority, i.e. it must be stable.
*/
- for (i = 0; i < list->num_entries; i++) {
- unsigned priority = list->array[i].priority;
+ amdgpu_bo_list_for_each_entry(e, list) {
+ unsigned priority = e->priority;
- if (!list->array[i].robj->parent)
- list_add_tail(&list->array[i].tv.head,
- &bucket[priority]);
+ if (!e->robj->parent)
+ list_add_tail(&e->tv.head, &bucket[priority]);
- list->array[i].user_pages = NULL;
+ e->user_pages = NULL;
}
/* Connect the sorted buckets in the output list. */
@@ -247,71 +205,82 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
{
- mutex_unlock(&list->lock);
- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
-{
- unsigned i;
-
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
-
- mutex_destroy(&list->lock);
- kvfree(list->array);
- kfree(list);
+ kref_put(&list->refcount, amdgpu_bo_list_free);
}
-int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param)
{
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
-
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
- union drm_amdgpu_bo_list *args = data;
- uint32_t handle = args->in.list_handle;
- const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
-
struct drm_amdgpu_bo_list_entry *info;
- struct amdgpu_bo_list *list;
-
int r;
- info = kvmalloc_array(args->in.bo_number,
- sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
+ info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
if (!info)
return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
r = -EFAULT;
- if (likely(info_size == args->in.bo_info_size)) {
- unsigned long bytes = args->in.bo_number *
- args->in.bo_info_size;
+ if (likely(info_size == in->bo_info_size)) {
+ unsigned long bytes = in->bo_number *
+ in->bo_info_size;
if (copy_from_user(info, uptr, bytes))
goto error_free;
} else {
- unsigned long bytes = min(args->in.bo_info_size, info_size);
+ unsigned long bytes = min(in->bo_info_size, info_size);
unsigned i;
- memset(info, 0, args->in.bo_number * info_size);
- for (i = 0; i < args->in.bo_number; ++i) {
+ memset(info, 0, in->bo_number * info_size);
+ for (i = 0; i < in->bo_number; ++i) {
if (copy_from_user(&info[i], uptr, bytes))
goto error_free;
- uptr += args->in.bo_info_size;
+ uptr += in->bo_info_size;
}
}
+ *info_param = info;
+ return 0;
+
+error_free:
+ kvfree(info);
+ return r;
+}
+
+int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ union drm_amdgpu_bo_list *args = data;
+ uint32_t handle = args->in.list_handle;
+ struct drm_amdgpu_bo_list_entry *info = NULL;
+ struct amdgpu_bo_list *list, *old;
+ int r;
+
+ r = amdgpu_bo_create_list_entry_array(&args->in, &info);
+ if (r)
+ goto error_free;
+
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
- &handle);
+ &list);
if (r)
goto error_free;
+
+ mutex_lock(&fpriv->bo_list_lock);
+ r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
+ mutex_unlock(&fpriv->bo_list_lock);
+ if (r < 0) {
+ amdgpu_bo_list_put(list);
+ return r;
+ }
+
+ handle = r;
break;
case AMDGPU_BO_LIST_OP_DESTROY:
@@ -320,17 +289,22 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
break;
case AMDGPU_BO_LIST_OP_UPDATE:
- r = -ENOENT;
- list = amdgpu_bo_list_get(fpriv, handle);
- if (!list)
+ r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
+ &list);
+ if (r)
goto error_free;
- r = amdgpu_bo_list_set(adev, filp, list, info,
- args->in.bo_number);
- amdgpu_bo_list_put(list);
- if (r)
+ mutex_lock(&fpriv->bo_list_lock);
+ old = idr_replace(&fpriv->bo_list_handles, list, handle);
+ mutex_unlock(&fpriv->bo_list_lock);
+
+ if (IS_ERR(old)) {
+ amdgpu_bo_list_put(list);
+ r = PTR_ERR(old);
goto error_free;
+ }
+ amdgpu_bo_list_put(old);
break;
default:
@@ -345,6 +319,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
return 0;
error_free:
- kvfree(info);
+ if (info)
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
new file mode 100644
index 000000000000..61b089768e1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_BO_LIST_H__
+#define __AMDGPU_BO_LIST_H__
+
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/amdgpu_drm.h>
+
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_bo_va;
+struct amdgpu_fpriv;
+
+struct amdgpu_bo_list_entry {
+ struct amdgpu_bo *robj;
+ struct ttm_validate_buffer tv;
+ struct amdgpu_bo_va *bo_va;
+ uint32_t priority;
+ struct page **user_pages;
+ int user_invalidated;
+};
+
+struct amdgpu_bo_list {
+ struct rcu_head rhead;
+ struct kref refcount;
+ struct amdgpu_bo *gds_obj;
+ struct amdgpu_bo *gws_obj;
+ struct amdgpu_bo *oa_obj;
+ unsigned first_userptr;
+ unsigned num_entries;
+};
+
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result);
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ struct list_head *validated);
+void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param);
+
+int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ unsigned num_entries,
+ struct amdgpu_bo_list **list);
+
+static inline struct amdgpu_bo_list_entry *
+amdgpu_bo_list_array_entry(struct amdgpu_bo_list *list, unsigned index)
+{
+ struct amdgpu_bo_list_entry *array = (void *)&list[1];
+
+ return &array[index];
+}
+
+#define amdgpu_bo_list_for_each_entry(e, list) \
+ for (e = amdgpu_bo_list_array_entry(list, 0); \
+ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
+ for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \
+ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index e950730f1933..693ec5ea4950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -314,17 +314,17 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
- strcpy(fw_name, "radeon/bonaire_k_smc.bin");
+ strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
} else {
- strcpy(fw_name, "radeon/bonaire_smc.bin");
+ strcpy(fw_name, "amdgpu/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
- strcpy(fw_name, "radeon/hawaii_k_smc.bin");
+ strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
} else {
- strcpy(fw_name, "radeon/hawaii_smc.bin");
+ strcpy(fw_name, "amdgpu/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8e66851eb427..c770d73352a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -212,30 +212,21 @@ static void
amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
enum drm_connector_status status)
{
- struct drm_encoder *best_encoder = NULL;
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *best_encoder;
+ struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
int i;
best_encoder = connector_funcs->best_encoder(connector);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
connected = false;
amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected);
-
}
}
@@ -246,17 +237,11 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
+
return NULL;
}
@@ -349,22 +334,24 @@ static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
int ret;
if (amdgpu_connector->edid) {
- drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
+ drm_connector_update_edid_property(connector, amdgpu_connector->edid);
ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
return 0;
}
static struct drm_encoder *
amdgpu_connector_best_single_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
+
+ /* pick the first one */
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -985,9 +972,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- struct drm_encoder *encoder = NULL;
const struct drm_encoder_helper_funcs *encoder_funcs;
- int i, r;
+ int r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1077,14 +1063,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (amdgpu_connector->dac_load_detect) {
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ struct drm_encoder *encoder;
+ int i;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1132,18 +1114,11 @@ exit:
static struct drm_encoder *
amdgpu_connector_dvi_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (amdgpu_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1158,8 +1133,9 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -1296,15 +1272,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
struct amdgpu_encoder *amdgpu_encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
@@ -1326,14 +1294,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
int i;
bool found = false;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9c85a90be293..502b94fb116a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -31,6 +31,7 @@
#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_gmc.h"
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
@@ -65,11 +66,35 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
return 0;
}
-static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_bo_list_in *data)
+{
+ int r;
+ struct drm_amdgpu_bo_list_entry *info = NULL;
+
+ r = amdgpu_bo_create_list_entry_array(data, &info);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
+ &p->bo_list);
+ if (r)
+ goto error_free;
+
+ kvfree(info);
+ return 0;
+
+error_free:
+ if (info)
+ kvfree(info);
+
+ return r;
+}
+
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
unsigned size, num_ibs = 0;
@@ -163,6 +188,19 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
break;
+ case AMDGPU_CHUNK_ID_BO_HANDLES:
+ size = sizeof(struct drm_amdgpu_bo_list_in);
+ if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
+ ret = -EINVAL;
+ goto free_partial_kdata;
+ }
+
+ ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
+ if (ret)
+ goto free_partial_kdata;
+
+ break;
+
case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
@@ -186,6 +224,10 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (p->uf_entry.robj)
p->job->uf_addr = uf_offset;
kfree(chunk_array);
+
+ /* Use this opportunity to fill in task info for the vm */
+ amdgpu_vm_set_task_info(vm);
+
return 0;
free_all_kdata:
@@ -257,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
return;
}
- total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
+ total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
@@ -302,7 +344,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
/* Do the same for visible VRAM if half of it is free */
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@@ -359,7 +401,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
* to move it. Don't move anything if the threshold is zero.
*/
if (p->bytes_moved < p->bytes_moved_threshold) {
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
* visible VRAM if we've depleted our allowance to do
@@ -377,11 +419,11 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
}
retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo))
p->bytes_moved_vis += ctx.bytes_moved;
@@ -434,9 +476,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
update_bytes_moved_vis =
- adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
- amdgpu_bo_in_cpu_visible_vram(bo);
- amdgpu_ttm_placement_from_domain(bo, other);
+ !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ amdgpu_bo_in_cpu_visible_vram(bo);
+ amdgpu_bo_placement_from_domain(bo, other);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
if (update_bytes_moved_vis)
@@ -490,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
/* Check if we have user pages and nobody bound the BO already */
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
lobj->user_pages) {
- amdgpu_ttm_placement_from_domain(bo,
- AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(bo,
+ AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
@@ -519,23 +561,38 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- unsigned i, tries = 10;
struct amdgpu_bo *gds;
struct amdgpu_bo *gws;
struct amdgpu_bo *oa;
+ unsigned tries = 10;
int r;
INIT_LIST_HEAD(&p->validated);
- p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
- if (p->bo_list) {
- amdgpu_bo_list_get_list(p->bo_list, &p->validated);
- if (p->bo_list->first_userptr != p->bo_list->num_entries)
- p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+ /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
+ if (cs->in.bo_list_handle) {
+ if (p->bo_list)
+ return -EINVAL;
+
+ r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
+ &p->bo_list);
+ if (r)
+ return r;
+ } else if (!p->bo_list) {
+ /* Create a empty bo_list when no handle is provided */
+ r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
+ &p->bo_list);
+ if (r)
+ return r;
}
+ amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+ p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
@@ -544,7 +601,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
while (1) {
struct list_head need_pages;
- unsigned i;
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
@@ -554,17 +610,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_free_pages;
}
- /* Without a BO list we don't have userptr BOs */
- if (!p->bo_list)
- break;
-
INIT_LIST_HEAD(&need_pages);
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo;
-
- e = &p->bo_list->array[i];
- bo = e->robj;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
@@ -656,23 +704,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
- if (p->bo_list) {
- struct amdgpu_vm *vm = &fpriv->vm;
- unsigned i;
+ gds = p->bo_list->gds_obj;
+ gws = p->bo_list->gws_obj;
+ oa = p->bo_list->oa_obj;
- gds = p->bo_list->gds_obj;
- gws = p->bo_list->gws_obj;
- oa = p->bo_list->oa_obj;
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
-
- p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
- }
- } else {
- gds = p->adev->gds.gds_gfx_bo;
- gws = p->adev->gds.gws_gfx_bo;
- oa = p->adev->gds.oa_gfx_bo;
- }
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
@@ -700,18 +737,13 @@ error_validate:
error_free_pages:
- if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- e = &p->bo_list->array[i];
-
- if (!e->user_pages)
- continue;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ if (!e->user_pages)
+ continue;
- release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages);
- kvfree(e->user_pages);
- }
+ release_pages(e->user_pages,
+ e->robj->tbo.ttm->num_pages);
+ kvfree(e->user_pages);
}
return r;
@@ -773,12 +805,13 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
{
- struct amdgpu_device *adev = p->adev;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_device *adev = p->adev;
struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_list_entry *e;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
- int i, r;
+ int r;
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
@@ -808,29 +841,26 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
}
- if (p->bo_list) {
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct dma_fence *f;
-
- /* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
- continue;
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct dma_fence *f;
- bo_va = p->bo_list->array[i].bo_va;
- if (bo_va == NULL)
- continue;
+ /* ignore duplicates */
+ bo = e->robj;
+ if (!bo)
+ continue;
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r)
- return r;
+ bo_va = e->bo_va;
+ if (bo_va == NULL)
+ continue;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
- if (r)
- return r;
- }
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ return r;
+ f = bo_va->last_pt_update;
+ r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+ if (r)
+ return r;
}
r = amdgpu_vm_handle_moved(adev, vm);
@@ -845,15 +875,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- if (amdgpu_vm_debug && p->bo_list) {
+ if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
- for (i = 0; i < p->bo_list->num_entries; i++) {
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
/* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
+ if (!e->robj)
continue;
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(adev, e->robj, false);
}
}
@@ -865,11 +894,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_ring *ring = p->job->ring;
+ struct amdgpu_ring *ring = p->ring;
int r;
/* Only for UVD/VCE VM emulation */
- if (p->job->ring->funcs->parse_cs) {
+ if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
unsigned i, j;
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
@@ -910,12 +939,20 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += va_start - offset;
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
-
- r = amdgpu_ring_parse_cs(ring, p, j);
- if (r)
- return r;
+ if (p->ring->funcs->parse_cs) {
+ memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, j);
+ if (r)
+ return r;
+ } else {
+ ib->ptr = (uint32_t *)kptr;
+ r = amdgpu_ring_patch_cs_in_place(ring, p, j);
+ amdgpu_bo_kunmap(aobj);
+ if (r)
+ return r;
+ }
j++;
}
@@ -983,10 +1020,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
}
- if (parser->job->ring && parser->job->ring != ring)
+ if (parser->ring && parser->ring != ring)
return -EINVAL;
- parser->job->ring = ring;
+ parser->ring = ring;
r = amdgpu_ib_get(adev, vm,
ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
@@ -1005,11 +1042,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && (
- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
+ parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
- return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
+ return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1160,31 +1197,30 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
- struct amdgpu_ring *ring = p->job->ring;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_ring *ring = p->ring;
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ enum drm_sched_priority priority;
+ struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
- unsigned i;
uint64_t seq;
int r;
amdgpu_mn_lock(p->mn);
- if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
-
- if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
- amdgpu_mn_unlock(p->mn);
- return -ERESTARTSYS;
- }
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ amdgpu_mn_unlock(p->mn);
+ return -ERESTARTSYS;
}
}
job = p->job;
p->job = NULL;
- r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+ r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
@@ -1192,7 +1228,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
job->owner = p->filp;
- job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
@@ -1210,11 +1245,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = seq;
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job);
+ amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
+ priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
+ ring = to_amdgpu_ring(entity->rq->sched);
+ amdgpu_ring_priority_get(ring, priority);
+
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
@@ -1605,7 +1644,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c5bb36275e93..df6965761046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (ring == &adev->gfx.kiq.ring)
continue;
- r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
- rq, &ctx->guilty);
+ r = drm_sched_entity_init(&ctx->rings[i].entity,
+ &rq, 1, &ctx->guilty);
if (r)
goto failed;
}
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
- drm_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
+ drm_sched_entity_destroy(&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_destroy(&ctx->rings[i].entity);
}
amdgpu_ctx_fini(ref);
@@ -444,34 +442,36 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
idr_init(&mgr->ctx_handles);
}
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i;
+ long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
idp = &mgr->ctx_handles;
+ mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
- if (!ctx->adev)
+ if (!ctx->adev) {
+ mutex_unlock(&mgr->lock);
return;
+ }
for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- if (kref_read(&ctx->refcount) == 1)
- drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
- else
- DRM_ERROR("ctx %p is still alive\n", ctx);
+ max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+ max_wait);
}
}
+ mutex_unlock(&mgr->lock);
}
-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
@@ -490,8 +490,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
continue;
if (kref_read(&ctx->refcount) == 1)
- drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_fini(&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}
@@ -504,7 +503,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
struct idr *idp;
uint32_t id;
- amdgpu_ctx_mgr_entity_cleanup(mgr);
+ amdgpu_ctx_mgr_entity_fini(mgr);
idp = &mgr->ctx_handles;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2c5f093e79e3..1e66dfd0e39c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/power_supply.h>
#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
@@ -675,17 +676,15 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GART location
*
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
- * Function will place try to place GTT before or after VRAM.
+ * Function will place try to place GART before or after VRAM.
*
- * If GTT size is bigger than space left then we ajust GTT size.
+ * If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
- *
- * FIXME: when reducing GTT size align new size on power of 2.
*/
void amdgpu_device_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
@@ -698,13 +697,13 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
size_bf = mc->vram_start;
if (size_bf > size_af) {
if (mc->gart_size > size_bf) {
- dev_warn(adev->dev, "limiting GTT\n");
+ dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_bf;
}
mc->gart_start = 0;
} else {
if (mc->gart_size > size_af) {
- dev_warn(adev->dev, "limiting GTT\n");
+ dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_af;
}
/* VCE doesn't like it when BOs cross a 4GB segment, so align
@@ -713,7 +712,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
}
mc->gart_end = mc->gart_start + mc->gart_size - 1;
- dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
@@ -1077,7 +1076,7 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
/**
* amdgpu_device_ip_set_clockgating_state - set the CG state
*
- * @adev: amdgpu_device pointer
+ * @dev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
* @state: clockgating state (gate or ungate)
*
@@ -1111,7 +1110,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
/**
* amdgpu_device_ip_set_powergating_state - set the PG state
*
- * @adev: amdgpu_device pointer
+ * @dev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
* @state: powergating state (gate or ungate)
*
@@ -1222,7 +1221,7 @@ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
* amdgpu_device_ip_get_ip_block - get a hw IP pointer
*
* @adev: amdgpu_device pointer
- * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
*
* Returns a pointer to the hardware IP block structure
* if it exists for the asic, otherwise NULL.
@@ -1708,10 +1707,6 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
if (amdgpu_emu_mode == 1)
return 0;
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
-
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1731,17 +1726,34 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
}
}
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) {
- /* enable gfx powergating */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
- /* enable gfxoff */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- AMD_PG_STATE_GATE);
- }
+ return 0;
+}
+static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
+{
+ int i = 0, r;
+
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->funcs->set_powergating_state) {
+ /* enable powergating to save power */
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ AMD_PG_STATE_GATE);
+ if (r) {
+ DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ }
+ }
return 0;
}
@@ -1775,6 +1787,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
}
}
+ amdgpu_device_ip_late_set_cg_state(adev);
+ amdgpu_device_ip_late_set_pg_state(adev);
+
queue_delayed_work(system_wq, &adev->late_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
@@ -1813,6 +1828,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
@@ -1901,11 +1918,15 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, late_init_work.work);
- amdgpu_device_ip_late_set_cg_state(adev);
+ int r;
+
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
}
/**
- * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
*
* @adev: amdgpu_device pointer
*
@@ -1915,18 +1936,60 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_request_full_gpu(adev, false);
- /* ungate SMC block powergating */
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- AMD_CG_STATE_UNGATE);
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* displays are handled separately */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ /* ungate blocks so that suspend can properly shut them down */
+ if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
+ /* XXX handle errors */
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_ERROR("suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
/* ungate SMC block first */
r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
@@ -1935,9 +1998,16 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
}
+ /* call smu to disable gfx off feature first when suspend */
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
/* ungate blocks so that suspend can properly shut them down */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
@@ -1963,6 +2033,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_suspend_phase2(adev);
+
+ return r;
+}
+
static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -1985,7 +2078,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2020,7 +2113,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2181,7 +2274,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
#endif
return amdgpu_dc != 0;
@@ -2210,7 +2303,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
* amdgpu_device_init - initialize the driver
*
* @adev: amdgpu_device pointer
- * @pdev: drm dev pointer
+ * @ddev: drm dev pointer
* @pdev: pci dev pointer
* @flags: driver flags
*
@@ -2301,6 +2394,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->late_init_work,
amdgpu_device_ip_late_init_func_handler);
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+
/* Registers mapping */
/* TODO: block userspace mapping of io register */
if (adev->asic_type >= CHIP_BONAIRE) {
@@ -2581,8 +2676,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/**
* amdgpu_device_suspend - initiate device suspend
*
- * @pdev: drm dev pointer
- * @state: suspend state
+ * @dev: drm dev pointer
+ * @suspend: suspend state
+ * @fbcon : notify the fbdev of suspend
*
* Puts the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
@@ -2606,6 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
+ if (fbcon)
+ amdgpu_fbdev_set_suspend(adev, 1);
+
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
@@ -2613,44 +2712,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
drm_modeset_unlock_all(dev);
- }
-
- amdgpu_amdkfd_suspend(adev);
-
- /* unpin the front buffers and cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct amdgpu_bo *robj;
-
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
+ /* unpin the front buffers and cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct amdgpu_bo *robj;
+
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
}
- }
- if (fb == NULL || fb->obj[0] == NULL) {
- continue;
- }
- robj = gem_to_amdgpu_bo(fb->obj[0]);
- /* don't unpin kernel fb objects */
- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
+ if (fb == NULL || fb->obj[0] == NULL) {
+ continue;
+ }
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
+ /* don't unpin kernel fb objects */
+ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
}
}
}
+
+ amdgpu_amdkfd_suspend(adev);
+
+ r = amdgpu_device_ip_suspend_phase1(adev);
+
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
- r = amdgpu_device_ip_suspend(adev);
+ r = amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
@@ -2669,18 +2770,15 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
DRM_ERROR("amdgpu asic reset failed\n");
}
- if (fbcon) {
- console_lock();
- amdgpu_fbdev_set_suspend(adev, 1);
- console_unlock();
- }
return 0;
}
/**
* amdgpu_device_resume - initiate device resume
*
- * @pdev: drm dev pointer
+ * @dev: drm dev pointer
+ * @resume: resume state
+ * @fbcon : notify the fbdev of resume
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
@@ -2696,15 +2794,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (fbcon)
- console_lock();
-
if (resume) {
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
r = pci_enable_device(dev->pdev);
if (r)
- goto unlock;
+ return r;
}
/* post card */
@@ -2717,29 +2812,30 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_device_ip_resume(adev);
if (r) {
DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
- goto unlock;
+ return r;
}
amdgpu_fence_driver_resume(adev);
r = amdgpu_device_ip_late_init(adev);
if (r)
- goto unlock;
-
- /* pin cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- r = amdgpu_bo_pin(aobj,
- AMDGPU_GEM_DOMAIN_VRAM,
- &amdgpu_crtc->cursor_addr);
- if (r != 0)
- DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
- amdgpu_bo_unreserve(aobj);
+ return r;
+
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r != 0)
+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
}
}
}
@@ -2763,6 +2859,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
drm_modeset_unlock_all(dev);
}
+ amdgpu_fbdev_set_suspend(adev, 0);
}
drm_kms_helper_poll_enable(dev);
@@ -2786,15 +2883,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
-
- if (fbcon)
- amdgpu_fbdev_set_suspend(adev, 0);
-
-unlock:
- if (fbcon)
- console_unlock();
-
- return r;
+ return 0;
}
/**
@@ -3019,7 +3108,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
long tmo;
if (amdgpu_sriov_runtime(adev))
- tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
+ tmo = msecs_to_jiffies(8000);
else
tmo = msecs_to_jiffies(100);
@@ -3071,7 +3160,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
* @adev: amdgpu device pointer
*
* attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset(struct amdgpu_device *adev)
{
@@ -3146,9 +3235,10 @@ out:
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
* @adev: amdgpu device pointer
+ * @from_hypervisor: request from hypervisor
*
* do VF FLR and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
bool from_hypervisor)
@@ -3193,7 +3283,7 @@ error:
*
* @adev: amdgpu device pointer
* @job: which job trigger hang
- * @force forces reset regardless of amdgpu_gpu_recovery
+ * @force: forces reset regardless of amdgpu_gpu_recovery
*
* Attempt to reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
@@ -3220,6 +3310,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
atomic_inc(&adev->gpu_reset_counter);
adev->in_gpu_reset = 1;
+ /* Block kfd */
+ amdgpu_amdkfd_pre_reset(adev);
+
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@@ -3232,10 +3325,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
kthread_park(ring->sched.thread);
- if (job && job->ring->idx != i)
+ if (job && job->base.sched == &ring->sched)
continue;
- drm_sched_hw_job_reset(&ring->sched, &job->base);
+ drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
@@ -3256,7 +3349,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* or all rings (in the case @job is NULL)
* after above amdgpu_reset accomplished
*/
- if ((!job || job->ring->idx == i) && !r)
+ if ((!job || job->base.sched == &ring->sched) && !r)
drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
@@ -3273,9 +3366,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
- dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
+ dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
}
+ /*unlock kfd */
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
@@ -3293,8 +3388,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
- u32 mask;
- int ret;
+ struct pci_dev *pdev;
+ enum pci_bus_speed speed_cap;
+ enum pcie_link_width link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -3312,27 +3408,61 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
if (adev->pm.pcie_gen_mask == 0) {
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (!ret) {
- adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ /* asic caps */
+ pdev = adev->pdev;
+ speed_cap = pcie_get_speed_cap(pdev);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
-
- if (mask & DRM_PCIE_SPEED_25)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
- if (mask & DRM_PCIE_SPEED_50)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
- if (mask & DRM_PCIE_SPEED_80)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
} else {
- adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+ if (speed_cap == PCIE_SPEED_16_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
+ else if (speed_cap == PCIE_SPEED_8_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ else
+ adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
+ }
+ /* platform caps */
+ pdev = adev->ddev->pdev->bus->self;
+ speed_cap = pcie_get_speed_cap(pdev);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ } else {
+ if (speed_cap == PCIE_SPEED_16_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
+ else if (speed_cap == PCIE_SPEED_8_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ else
+ adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
+
}
}
if (adev->pm.pcie_mlw_mask == 0) {
- ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
- if (!ret) {
- switch (mask) {
- case 32:
+ pdev = adev->ddev->pdev->bus->self;
+ link_width = pcie_get_width_cap(pdev);
+ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
+ } else {
+ switch (link_width) {
+ case PCIE_LNK_X32:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
@@ -3341,7 +3471,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 16:
+ case PCIE_LNK_X16:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
@@ -3349,36 +3479,34 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 12:
+ case PCIE_LNK_X12:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 8:
+ case PCIE_LNK_X8:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 4:
+ case PCIE_LNK_X4:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 2:
+ case PCIE_LNK_X2:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 1:
+ case PCIE_LNK_X1:
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
}
- } else {
- adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 76ee8e04ff11..6748cd7fc129 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -157,7 +157,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct amdgpu_bo *new_abo;
unsigned long flags;
u64 tiling_flags;
- u64 base;
int i, r;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -189,12 +188,18 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
+ r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("%p bind failed\n", new_abo);
+ goto unpin;
+ }
+
r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
@@ -206,7 +211,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
- work->base = base;
+ work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 77ad59ade85c..1c4595562f8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -28,6 +28,7 @@
#include "amdgpu_i2c.h"
#include "amdgpu_dpm.h"
#include "atom.h"
+#include "amd_pcie.h"
void amdgpu_dpm_print_class_info(u32 class, u32 class2)
{
@@ -936,9 +937,11 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
case AMDGPU_PCIE_GEN3:
return AMDGPU_PCIE_GEN3;
default:
- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+ if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
+ (default_gen == AMDGPU_PCIE_GEN3))
return AMDGPU_PCIE_GEN3;
- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+ else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
+ (default_gen == AMDGPU_PCIE_GEN2))
return AMDGPU_PCIE_GEN2;
else
return AMDGPU_PCIE_GEN1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index dd6203a0a6b7..ff24e1cc5b65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -287,12 +287,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
-#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
-
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
@@ -347,6 +341,10 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
+#define amdgpu_dpm_set_powergating_by_smu(adev, block_type, gate) \
+ ((adev)->powerplay.pp_funcs->set_powergating_by_smu(\
+ (adev)->powerplay.pp_handle, block_type, gate))
+
#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
((adev)->powerplay.pp_funcs->get_power_profile_mode(\
(adev)->powerplay.pp_handle, buf))
@@ -359,10 +357,6 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
(adev)->powerplay.pp_handle, type, parameter, size))
-#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
- ((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
- (adev)->powerplay.pp_handle))
-
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -402,7 +396,6 @@ struct amdgpu_dpm {
u32 tdp_adjustment;
u16 load_line_slope;
bool power_control;
- bool ac_power;
/* special states active */
bool thermal_active;
bool uvd_active;
@@ -439,6 +432,7 @@ struct amdgpu_pm {
struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
uint32_t smu_prv_buffer_size;
struct amdgpu_bo *smu_prv_buffer;
+ bool ac_power;
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b0bf2f24da48..8843a06360fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1,10 +1,3 @@
-/**
- * \file amdgpu_drv.c
- * AMD Amdgpu driver
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -76,9 +69,10 @@
* - 3.24.0 - Add high priority compute support for gfx9
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
* - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
+ * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 26
+#define KMS_DRIVER_MINOR 27
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -110,11 +104,8 @@ int amdgpu_vram_page_split = 512;
int amdgpu_vm_update_mode = -1;
int amdgpu_exp_hw_support = 0;
int amdgpu_dc = -1;
-int amdgpu_dc_log = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
-int amdgpu_no_evict = 0;
-int amdgpu_direct_gma_size = 0;
uint amdgpu_pcie_gen_cap = 0;
uint amdgpu_pcie_lane_cap = 0;
uint amdgpu_cg_mask = 0xffffffff;
@@ -122,7 +113,8 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */
+/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xfffd3fff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -135,163 +127,368 @@ int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
+/**
+ * DOC: vramlimit (int)
+ * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM).
+ */
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
+/**
+ * DOC: vis_vramlimit (int)
+ * Restrict the amount of CPU visible VRAM in MiB for testing. The default is 0 (Use full CPU visible VRAM).
+ */
MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
+/**
+ * DOC: gartsize (uint)
+ * Restrict the size of GART in Mib (32, 64, etc.) for testing. The default is -1 (The size depends on asic).
+ */
MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
+/**
+ * DOC: gttsize (int)
+ * Restrict the size of GTT domain in MiB for testing. The default is -1 (It's VRAM size if 3GB < VRAM < 3/4 RAM,
+ * otherwise 3/4 RAM size).
+ */
MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
+/**
+ * DOC: moverate (int)
+ * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
+ */
MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
module_param_named(moverate, amdgpu_moverate, int, 0600);
+/**
+ * DOC: benchmark (int)
+ * Run benchmarks. The default is 0 (Skip benchmarks).
+ */
MODULE_PARM_DESC(benchmark, "Run benchmark");
module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
+/**
+ * DOC: test (int)
+ * Test BO GTT->VRAM and VRAM->GTT GPU copies. The default is 0 (Skip test, only set 1 to run test).
+ */
MODULE_PARM_DESC(test, "Run tests");
module_param_named(test, amdgpu_testing, int, 0444);
+/**
+ * DOC: audio (int)
+ * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
+ */
MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, amdgpu_audio, int, 0444);
+/**
+ * DOC: disp_priority (int)
+ * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
+ */
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
+/**
+ * DOC: hw_i2c (int)
+ * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
+ */
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
+/**
+ * DOC: pcie_gen2 (int)
+ * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
+/**
+ * DOC: msi (int)
+ * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
+/**
+ * DOC: lockup_timeout (int)
+ * Set GPU scheduler timeout value in ms. Value 0 is invalidated, will be adjusted to 10000.
+ * Negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET). The default is 10000.
+ */
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
+/**
+ * DOC: dpm (int)
+ * Override for dynamic power management setting (1 = enable, 0 = disable). The default is -1 (auto).
+ */
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(dpm, amdgpu_dpm, int, 0444);
+/**
+ * DOC: fw_load_type (int)
+ * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
+ */
MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
+/**
+ * DOC: aspm (int)
+ * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(aspm, amdgpu_aspm, int, 0444);
+/**
+ * DOC: runpm (int)
+ * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
+ * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
+ */
MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
+/**
+ * DOC: ip_block_mask (uint)
+ * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
+ * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
+ * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
+ * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
+ */
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+/**
+ * DOC: bapm (int)
+ * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
+ * The default -1 (auto, enabled)
+ */
MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(bapm, amdgpu_bapm, int, 0444);
+/**
+ * DOC: deep_color (int)
+ * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, amdgpu_deep_color, int, 0444);
+/**
+ * DOC: vm_size (int)
+ * Override the size of the GPU's per client virtual address space in GiB. The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
+/**
+ * DOC: vm_fragment_size (int)
+ * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
+/**
+ * DOC: vm_block_size (int)
+ * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
+/**
+ * DOC: vm_fault_stop (int)
+ * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
+ */
MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
+/**
+ * DOC: vm_debug (int)
+ * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled).
+ */
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+/**
+ * DOC: vm_update_mode (int)
+ * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
+ * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
+ */
MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
+/**
+ * DOC: vram_page_split (int)
+ * Override the number of pages after we split VRAM allocations (default 512, -1 = disable). The default is 512.
+ */
MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+/**
+ * DOC: exp_hw_support (int)
+ * Enable experimental hw support (1 = enable). The default is 0 (disabled).
+ */
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+/**
+ * DOC: dc (int)
+ * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(dc, amdgpu_dc, int, 0444);
-MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
-module_param_named(dc_log, amdgpu_dc_log, int, 0444);
-
+/**
+ * DOC: sched_jobs (int)
+ * Override the max number of jobs supported in the sw queue. The default is 32.
+ */
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
+/**
+ * DOC: sched_hw_submission (int)
+ * Override the max number of HW submissions. The default is 2.
+ */
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+/**
+ * DOC: ppfeaturemask (uint)
+ * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable power features.
+ */
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
-MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
-module_param_named(no_evict, amdgpu_no_evict, int, 0444);
-
-MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
-module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
-
+/**
+ * DOC: pcie_gen_cap (uint)
+ * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
+/**
+ * DOC: pcie_lane_cap (uint)
+ * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+/**
+ * DOC: cg_mask (uint)
+ * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ */
MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+/**
+ * DOC: pg_mask (uint)
+ * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ */
MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+/**
+ * DOC: sdma_phase_quantum (uint)
+ * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
+ */
MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
+/**
+ * DOC: disable_cu (charp)
+ * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
+ */
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+/**
+ * DOC: virtual_display (charp)
+ * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
+ * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
+ * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
+ * device at 26:00.0. The default is NULL.
+ */
MODULE_PARM_DESC(virtual_display,
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+/**
+ * DOC: ngg (int)
+ * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
+ */
MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
module_param_named(ngg, amdgpu_ngg, int, 0444);
+/**
+ * DOC: prim_buf_per_se (int)
+ * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
+/**
+ * DOC: pos_buf_per_se (int)
+ * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
+/**
+ * DOC: cntl_sb_buf_per_se (int)
+ * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
+/**
+ * DOC: param_buf_per_se (int)
+ * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
+/**
+ * DOC: job_hang_limit (int)
+ * Set how much time allow a job hang and not drop it. The default is 0.
+ */
MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
+/**
+ * DOC: lbpw (int)
+ * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+/**
+ * DOC: gpu_recovery (int)
+ * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
+ */
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+/**
+ * DOC: emu_mode (int)
+ * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+/**
+ * DOC: si_support (int)
+ * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -305,6 +502,12 @@ MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)")
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif
+/**
+ * DOC: cik_support (int)
+ * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
#ifdef CONFIG_DRM_AMDGPU_CIK
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -318,6 +521,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
+/**
+ * DOC: smu_memory_pool_size (uint)
+ * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
+ * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(smu_memory_pool_size,
"reserve gtt for smu debug usage, 0 = disable,"
"0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
@@ -664,7 +872,7 @@ retry_init:
err_pci:
pci_disable_device(pdev);
err_free:
- drm_dev_unref(dev);
+ drm_dev_put(dev);
return ret;
}
@@ -674,7 +882,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -855,9 +1063,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
.runtime_idle = amdgpu_pmops_runtime_idle,
};
+static int amdgpu_flush(struct file *f, fl_owner_t id)
+{
+ struct drm_file *file_priv = f->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+ amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
+
+ return 0;
+}
+
+
static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
+ .flush = amdgpu_flush,
.release = drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = amdgpu_mmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 94138abe093b..ae8fac34f7a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -46,7 +46,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->devices & amdgpu_connector->devices) {
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector);
adev->mode_info.bl_encoder = amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index bc5fd8ebab5d..69c5d22f29bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
- true, NULL, &gobj);
+ ttm_bo_type_kernel, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;
@@ -168,11 +168,19 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
}
- ret = amdgpu_bo_pin(abo, domain, NULL);
+ ret = amdgpu_bo_pin(abo, domain);
if (ret) {
amdgpu_bo_unreserve(abo);
goto out_unref;
}
+
+ ret = amdgpu_ttm_alloc_gart(&abo->tbo);
+ if (ret) {
+ amdgpu_bo_unreserve(abo);
+ dev_err(adev->dev, "%p bind failed\n", abo);
+ goto out_unref;
+ }
+
ret = amdgpu_bo_kmap(abo, NULL);
amdgpu_bo_unreserve(abo);
if (ret) {
@@ -365,8 +373,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
{
if (adev->mode_info.rfbdev)
- drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
- state);
+ drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
+ state);
}
int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index e74d620d9699..7056925eb386 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -646,7 +646,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index dd11b7313ca0..a54d5655a191 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -143,14 +143,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
*/
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
{
- uint64_t gpu_addr;
int r;
r = amdgpu_bo_reserve(adev->gart.robj, false);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin(adev->gart.robj,
- AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+ r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
if (r) {
amdgpu_bo_unreserve(adev->gart.robj);
return r;
@@ -159,7 +157,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
if (r)
amdgpu_bo_unpin(adev->gart.robj);
amdgpu_bo_unreserve(adev->gart.robj);
- adev->gart.table_addr = gpu_addr;
+ adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
return r;
}
@@ -234,7 +232,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) {
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = NULL;
@@ -243,7 +241,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
if (!adev->gart.ptr)
continue;
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
@@ -282,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < pages; i++) {
page_base = dma_addr[i];
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
@@ -319,7 +317,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++)
adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 456295c00291..9f9e9dc87da1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -37,6 +37,8 @@ struct amdgpu_bo;
#define AMDGPU_GPU_PAGE_SHIFT 12
#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
+
struct amdgpu_gart {
u64 table_addr;
struct amdgpu_bo *robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5fb156a01774..71792d820ae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -265,7 +265,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains),
- flags, false, resv, &gobj);
+ flags, ttm_bo_type_device, resv, &gobj);
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -317,7 +317,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
- 0, 0, NULL, &gobj);
+ 0, ttm_bo_type_device, NULL, &gobj);
if (r)
return r;
@@ -344,7 +344,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto free_pages;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)
@@ -510,7 +510,6 @@ out:
* @adev: amdgpu_device pointer
* @vm: vm to update
* @bo_va: bo_va to update
- * @list: validation list
* @operation: map, unmap or clear
*
* Update the bo_va directly after setting its address. Errors are not
@@ -519,7 +518,6 @@ out:
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo_va *bo_va,
- struct list_head *list,
uint32_t operation)
{
int r;
@@ -612,7 +610,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
- tv.shared = false;
+ tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
list_add(&tv.head, &list);
} else {
gobj = NULL;
@@ -673,7 +671,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
break;
}
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
+ amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
args->operation);
error_backoff:
@@ -768,7 +766,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
amdgpu_display_supported_domains(adev));
r = amdgpu_gem_object_create(adev, args->size, 0, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- false, NULL, &gobj);
+ ttm_bo_type_device, NULL, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 893c2490b783..bb5a47a45790 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -105,8 +105,25 @@ struct amdgpu_gmc {
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
bool translate_further;
+ struct kfd_vm_fault_info *vm_fault_info;
+ atomic_t vm_fault_info_updated;
const struct amdgpu_gmc_funcs *gmc_funcs;
};
+/**
+ * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * True if full VRAM is visible through the BAR
+ */
+static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+{
+ WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
+
+ return (gmc->real_vram_size == gmc->visible_vram_size);
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7aaa263ad8c7..5518e623fed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -139,7 +139,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* ring tests don't use a job */
if (job) {
vm = job->vm;
- fence_ctx = job->fence_ctx;
+ fence_ctx = job->base.s_fence->scheduled.context;
} else {
vm = NULL;
fence_ctx = 0;
@@ -353,7 +353,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
tmo = tmo_mm;
else
tmo = tmo_gfx;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index a1c78f90eadf..3a072a7a39f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -578,11 +578,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
-
- adev->vm_manager.fence_context =
- dma_fence_context_alloc(AMDGPU_MAX_RINGS);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- adev->vm_manager.seqno[i] = 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 3a5ca462abf0..1abf5b5bac9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -25,6 +25,23 @@
* Alex Deucher
* Jerome Glisse
*/
+
+/**
+ * DOC: Interrupt Handling
+ *
+ * Interrupts generated within GPU hardware raise interrupt requests that are
+ * passed to amdgpu IRQ handler which is responsible for detecting source and
+ * type of the interrupt and dispatching matching handlers. If handling an
+ * interrupt requires calling kernel functions that may sleep processing is
+ * dispatched to work handlers.
+ *
+ * If MSI functionality is not disabled by module parameter then MSI
+ * support will be enabled.
+ *
+ * For GPU interrupt sources that may be driven by another driver, IRQ domain
+ * support is used (with mapping between virtual and hardware IRQs).
+ */
+
#include <linux/irq.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -43,19 +60,21 @@
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
/**
- * amdgpu_hotplug_work_func - display hotplug work handler
+ * amdgpu_hotplug_work_func - work handler for display hotplug event
*
- * @work: work struct
+ * @work: work struct pointer
*
- * This is the hot plug event work handler (all asics).
- * The work gets scheduled from the irq handler if there
- * was a hot plug interrupt. It walks the connector table
- * and calls the hotplug handler for each one, then sends
- * a drm hotplug event to alert userspace.
+ * This is the hotplug event work handler (all ASICs).
+ * The work gets scheduled from the IRQ handler if there
+ * was a hotplug interrupt. It walks through the connector table
+ * and calls hotplug handler for each connector. After this, it sends
+ * a DRM hotplug event to alert userspace.
+ *
+ * This design approach is required in order to defer hotplug event handling
+ * from the IRQ handler to a work handler because hotplug handler has to use
+ * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
+ * sleep).
*/
static void amdgpu_hotplug_work_func(struct work_struct *work)
{
@@ -74,13 +93,12 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
}
/**
- * amdgpu_irq_reset_work_func - execute gpu reset
+ * amdgpu_irq_reset_work_func - execute GPU reset
*
- * @work: work struct
+ * @work: work struct pointer
*
- * Execute scheduled gpu reset (cayman+).
- * This function is called when the irq handler
- * thinks we need a gpu reset.
+ * Execute scheduled GPU reset (Cayman+).
+ * This function is called when the IRQ handler thinks we need a GPU reset.
*/
static void amdgpu_irq_reset_work_func(struct work_struct *work)
{
@@ -91,7 +109,13 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
amdgpu_device_gpu_recover(adev, NULL, false);
}
-/* Disable *all* interrupts */
+/**
+ * amdgpu_irq_disable_all - disable *all* interrupts
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Disable all types of interrupts from all sources.
+ */
void amdgpu_irq_disable_all(struct amdgpu_device *adev)
{
unsigned long irqflags;
@@ -123,11 +147,15 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_handler - irq handler
+ * amdgpu_irq_handler - IRQ handler
+ *
+ * @irq: IRQ number (unused)
+ * @arg: pointer to DRM device
*
- * @int irq, void *arg: args
+ * IRQ handler for amdgpu driver (all ASICs).
*
- * This is the irq handler for the amdgpu driver (all asics).
+ * Returns:
+ * result of handling the IRQ, as defined by &irqreturn_t
*/
irqreturn_t amdgpu_irq_handler(int irq, void *arg)
{
@@ -142,18 +170,18 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
}
/**
- * amdgpu_msi_ok - asic specific msi checks
+ * amdgpu_msi_ok - check whether MSI functionality is enabled
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu device pointer (unused)
+ *
+ * Checks whether MSI functionality has been disabled via module parameter
+ * (all ASICs).
*
- * Handles asic specific MSI checks to determine if
- * MSIs should be enabled on a particular chip (all asics).
- * Returns true if MSIs should be enabled, false if MSIs
- * should not be enabled.
+ * Returns:
+ * *true* if MSIs are allowed to be enabled or *false* otherwise
*/
static bool amdgpu_msi_ok(struct amdgpu_device *adev)
{
- /* force MSI on */
if (amdgpu_msi == 1)
return true;
else if (amdgpu_msi == 0)
@@ -163,12 +191,15 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_init - init driver interrupt info
+ * amdgpu_irq_init - initialize interrupt handling
*
* @adev: amdgpu device pointer
*
- * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
- * Returns 0 for success, error for failure.
+ * Sets up work functions for hotplug and reset interrupts, enables MSI
+ * functionality, initializes vblank, hotplug and reset interrupt handling.
+ *
+ * Returns:
+ * 0 on success or error code on failure
*/
int amdgpu_irq_init(struct amdgpu_device *adev)
{
@@ -176,7 +207,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock);
- /* enable msi */
+ /* Enable MSI if not disabled by module parameter */
adev->irq.msi_enabled = false;
if (amdgpu_msi_ok(adev)) {
@@ -189,7 +220,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (!amdgpu_device_has_dc_support(adev)) {
if (!adev->enable_virtual_display)
- /* Disable vblank irqs aggressively for power-saving */
+ /* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev->ddev->vblank_disable_immediate = true;
@@ -197,7 +228,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r)
return r;
- /* pre DCE11 */
+ /* Pre-DCE11 */
INIT_WORK(&adev->hotplug_work,
amdgpu_hotplug_work_func);
}
@@ -220,11 +251,13 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_fini - tear down driver interrupt info
+ * amdgpu_irq_fini - shut down interrupt handling
*
* @adev: amdgpu device pointer
*
- * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ * Tears down work functions for hotplug and reset interrupts, disables MSI
+ * functionality, shuts down vblank, hotplug and reset interrupt handling,
+ * turns off interrupts from all sources (all ASICs).
*/
void amdgpu_irq_fini(struct amdgpu_device *adev)
{
@@ -264,12 +297,17 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_add_id - register irq source
+ * amdgpu_irq_add_id - register IRQ source
*
* @adev: amdgpu device pointer
- * @src_id: source id for this source
- * @source: irq source
+ * @client_id: client id
+ * @src_id: source id
+ * @source: IRQ source pointer
+ *
+ * Registers IRQ source on a client.
*
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
@@ -312,12 +350,12 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
}
/**
- * amdgpu_irq_dispatch - dispatch irq to IP blocks
+ * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
*
* @adev: amdgpu device pointer
- * @entry: interrupt vector
+ * @entry: interrupt vector pointer
*
- * Dispatches the irq to the different IP blocks
+ * Dispatches IRQ to IP blocks.
*/
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
@@ -361,13 +399,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
}
/**
- * amdgpu_irq_update - update hw interrupt state
+ * amdgpu_irq_update - update hardware interrupt state
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to update
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Updates the interrupt state for a specific src (all asics).
+ * Updates interrupt state for the specific source (all ASICs).
*/
int amdgpu_irq_update(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, unsigned type)
@@ -378,7 +416,7 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
spin_lock_irqsave(&adev->irq.lock, irqflags);
- /* we need to determine after taking the lock, otherwise
+ /* We need to determine after taking the lock, otherwise
we might disable just enabled interrupts again */
if (amdgpu_irq_enabled(adev, src, type))
state = AMDGPU_IRQ_STATE_ENABLE;
@@ -390,6 +428,14 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Updates state of all types of interrupts on all sources on resume after
+ * reset.
+ */
void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
@@ -413,10 +459,13 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
* amdgpu_irq_get - enable interrupt
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to enable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Enables the interrupt type for a specific src (all asics).
+ * Enables specified type of interrupt on the specified source (all ASICs).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
@@ -440,10 +489,13 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
* amdgpu_irq_put - disable interrupt
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to disable
- * @type: type of interrupt you want to disable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
+ *
+ * Enables specified type of interrupt on the specified source (all ASICs).
*
- * Disables the interrupt type for a specific src (all asics).
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
@@ -464,12 +516,17 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
}
/**
- * amdgpu_irq_enabled - test if irq is enabled or not
+ * amdgpu_irq_enabled - check whether interrupt is enabled or not
*
* @adev: amdgpu device pointer
- * @idx: interrupt src you want to test
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Tests if the given interrupt source is enabled or not
+ * Checks whether the given type of interrupt is enabled on the given source.
+ *
+ * Returns:
+ * *true* if interrupt is enabled, *false* if interrupt is disabled or on
+ * invalid parameters
*/
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
@@ -486,7 +543,7 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
return !!atomic_read(&src->enabled_types[type]);
}
-/* gen irq */
+/* XXX: Generic IRQ handling */
static void amdgpu_irq_mask(struct irq_data *irqd)
{
/* XXX */
@@ -497,12 +554,26 @@ static void amdgpu_irq_unmask(struct irq_data *irqd)
/* XXX */
}
+/* amdgpu hardware interrupt chip descriptor */
static struct irq_chip amdgpu_irq_chip = {
.name = "amdgpu-ih",
.irq_mask = amdgpu_irq_mask,
.irq_unmask = amdgpu_irq_unmask,
};
+/**
+ * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
+ *
+ * @d: amdgpu IRQ domain pointer (unused)
+ * @irq: virtual IRQ number
+ * @hwirq: hardware irq number
+ *
+ * Current implementation assigns simple interrupt handler to the given virtual
+ * IRQ.
+ *
+ * Returns:
+ * 0 on success or error code otherwise
+ */
static int amdgpu_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
@@ -514,17 +585,21 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
return 0;
}
+/* Implementation of methods for amdgpu IRQ domain */
static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
.map = amdgpu_irqdomain_map,
};
/**
- * amdgpu_irq_add_domain - create a linear irq domain
+ * amdgpu_irq_add_domain - create a linear IRQ domain
*
* @adev: amdgpu device pointer
*
- * Create an irq domain for GPU interrupt sources
+ * Creates an IRQ domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
@@ -539,11 +614,11 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_remove_domain - remove the irq domain
+ * amdgpu_irq_remove_domain - remove the IRQ domain
*
* @adev: amdgpu device pointer
*
- * Remove the irq domain for GPU interrupt sources
+ * Removes the IRQ domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
*/
void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
@@ -555,16 +630,17 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_create_mapping - create a mapping between a domain irq and a
- * Linux irq
+ * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
*
* @adev: amdgpu device pointer
* @src_id: IH source id
*
- * Create a mapping between a domain irq (GPU IH src id) and a Linux irq
+ * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
* Use this for components that generate a GPU interrupt, but are driven
* by a different driver (e.g., ACP).
- * Returns the Linux irq.
+ *
+ * Returns:
+ * Linux IRQ
*/
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 2bd56760c744..391e2f7c03aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -30,14 +30,14 @@
static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
- DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
- job->base.sched->name,
- atomic_read(&job->ring->fence_drv.last_seq),
- job->ring->fence_drv.sync_seq);
+ DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
- amdgpu_device_gpu_recover(job->adev, job, false);
+ amdgpu_device_gpu_recover(ring->adev, job, false);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
if (!*job)
return -ENOMEM;
- (*job)->adev = adev;
+ /*
+ * Initialize the scheduler to at least some ring so that we always
+ * have a pointer to adev.
+ */
+ (*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
unsigned i;
@@ -93,14 +98,15 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(job->adev, &job->ibs[i], f);
+ amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
- amdgpu_ring_priority_put(job->ring, s_job->s_priority);
+ amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
@@ -117,50 +123,68 @@ void amdgpu_job_free(struct amdgpu_job *job)
kfree(job);
}
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct drm_sched_entity *entity, void *owner,
- struct dma_fence **f)
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f)
{
+ enum drm_sched_priority priority;
+ struct amdgpu_ring *ring;
int r;
- job->ring = ring;
if (!f)
return -EINVAL;
- r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
job->owner = owner;
- job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+ priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
+ ring = to_amdgpu_ring(entity->rq->sched);
+ amdgpu_ring_priority_get(ring, priority);
+
+ return 0;
+}
+
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence)
+{
+ int r;
+
+ job->base.sched = &ring->sched;
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
+ job->fence = dma_fence_get(*fence);
+ if (r)
+ return r;
+
+ amdgpu_job_free(job);
return 0;
}
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
+ struct dma_fence *fence;
bool explicit = false;
int r;
- struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
+ fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
if (drm_sched_dependency_optimized(fence, s_entity)) {
- r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+ r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
+ fence, false);
if (r)
- DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ DRM_ERROR("Error adding fence (%d)\n", r);
}
}
while (fence == NULL && vm && !job->vmid) {
- struct amdgpu_ring *ring = job->ring;
-
r = amdgpu_vmid_grab(vm, ring, &job->sync,
&job->base.s_fence->finished,
job);
@@ -175,30 +199,25 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct dma_fence *fence = NULL, *finished;
- struct amdgpu_device *adev;
struct amdgpu_job *job;
int r;
- if (!sched_job) {
- DRM_ERROR("job is null\n");
- return NULL;
- }
job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished;
- adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+ if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
} else {
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
new file mode 100644
index 000000000000..57cfe78a262b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_JOB_H__
+#define __AMDGPU_JOB_H__
+
+/* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
+/* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
+/* bit set means context switch occured */
+#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
+
+#define to_amdgpu_job(sched_job) \
+ container_of((sched_job), struct amdgpu_job, base)
+
+struct amdgpu_fence;
+
+struct amdgpu_job {
+ struct drm_sched_job base;
+ struct amdgpu_vm *vm;
+ struct amdgpu_sync sync;
+ struct amdgpu_sync sched_sync;
+ struct amdgpu_ib *ibs;
+ struct dma_fence *fence; /* the hw fence */
+ uint32_t preamble_status;
+ uint32_t num_ibs;
+ void *owner;
+ bool vm_needs_flush;
+ uint64_t vm_pd_addr;
+ unsigned vmid;
+ unsigned pasid;
+ uint32_t gds_base, gds_size;
+ uint32_t gws_base, gws_size;
+ uint32_t oa_base, oa_size;
+ uint32_t vram_lost_counter;
+
+ /* user fence handling */
+ uint64_t uf_addr;
+ uint64_t uf_sequence;
+
+};
+
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ struct amdgpu_job **job, struct amdgpu_vm *vm);
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+ struct amdgpu_job **job);
+
+void amdgpu_job_free_resources(struct amdgpu_job *job);
+void amdgpu_job_free(struct amdgpu_job *job);
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f);
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 91517b166a3b..bd98cc5fb97b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -328,61 +328,71 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ring_mask |= adev->gfx.gfx_ring[i].ready << i;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ring_mask |= adev->gfx.compute_ring[i].ready << i;
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 1;
+ ring_mask |= adev->sdma.instance[i].ring.ready << i;
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
- ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 16;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+ ring_mask |= adev->uvd.inst[i].ring.ready;
+ }
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ring_mask |= adev->vce.ring[i].ready << i;
+ ib_start_alignment = 4;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- ring_mask |=
- ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
- (j + i * adev->uvd.num_enc_rings));
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 1;
+ ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
+ }
+ ib_start_alignment = 64;
+ ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
- ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ring_mask = adev->vcn.ring_dec.ready;
+ ib_start_alignment = 16;
ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_enc_rings; i++)
- ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+ ring_mask |= adev->vcn.ring_enc[i].ready << i;
+ ib_start_alignment = 64;
ib_size_alignment = 1;
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ ring_mask = adev->vcn.ring_jpeg.ready;
+ ib_start_alignment = 16;
+ ib_size_alignment = 16;
+ break;
default:
return -EINVAL;
}
@@ -427,6 +437,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break;
case AMDGPU_HW_IP_VCN_DEC:
case AMDGPU_HW_IP_VCN_ENC:
+ case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
break;
default:
@@ -494,13 +505,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_VRAM_GTT: {
struct drm_amdgpu_info_vram_gtt vram_gtt;
- vram_gtt.vram_size = adev->gmc.real_vram_size;
- vram_gtt.vram_size -= adev->vram_pin_size;
- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
- vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
+ vram_gtt.vram_size = adev->gmc.real_vram_size -
+ atomic64_read(&adev->vram_pin_size);
+ vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
- vram_gtt.gtt_size -= adev->gart_pin_size;
+ vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
@@ -509,17 +520,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
- mem.vram.usable_heap_size =
- adev->gmc.real_vram_size - adev->vram_pin_size;
+ mem.vram.usable_heap_size = adev->gmc.real_vram_size -
+ atomic64_read(&adev->vram_pin_size);
mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
- mem.cpu_accessible_vram.usable_heap_size =
- adev->gmc.visible_vram_size -
- (adev->vram_pin_size - adev->invisible_pin_size);
+ mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
@@ -527,8 +537,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
mem.gtt.total_heap_size *= PAGE_SIZE;
- mem.gtt.usable_heap_size = mem.gtt.total_heap_size
- - adev->gart_pin_size;
+ mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
+ atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage =
amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
@@ -930,7 +940,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
return;
pm_runtime_get_sync(dev->dev);
- amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
if (adev->asic_type != CHIP_RAVEN) {
amdgpu_uvd_free_handles(adev, file_priv);
@@ -958,7 +967,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
- amdgpu_bo_list_free(list);
+ amdgpu_bo_list_put(list);
idr_destroy(&fpriv->bo_list_handles);
mutex_destroy(&fpriv->bo_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 83e344fbb50a..a365ea2383d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -28,6 +28,21 @@
* Christian König <christian.koenig@amd.com>
*/
+/**
+ * DOC: MMU Notifier
+ *
+ * For coherent userptr handling registers an MMU notifier to inform the driver
+ * about updates on the page tables of a process.
+ *
+ * When somebody tries to invalidate the page tables we block the update until
+ * all operations on the pages in question are completed, then those pages are
+ * marked as accessed and also dirty if it wasn't a read only access.
+ *
+ * New command submissions using the userptrs in question are delayed until all
+ * page table invalidation are completed and we once more see a coherent process
+ * address space.
+ */
+
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
@@ -38,6 +53,22 @@
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
+/**
+ * struct amdgpu_mn
+ *
+ * @adev: amdgpu device pointer
+ * @mm: process address space
+ * @mn: MMU notifier structure
+ * @type: type of MMU notifier
+ * @work: destruction work item
+ * @node: hash table node to find structure by adev and mn
+ * @lock: rw semaphore protecting the notifier nodes
+ * @objects: interval tree containing amdgpu_mn_nodes
+ * @read_lock: mutex for recursive locking of @lock
+ * @recursion: depth of recursion
+ *
+ * Data for each amdgpu device and process address space.
+ */
struct amdgpu_mn {
/* constant after initialisation */
struct amdgpu_device *adev;
@@ -58,13 +89,21 @@ struct amdgpu_mn {
atomic_t recursion;
};
+/**
+ * struct amdgpu_mn_node
+ *
+ * @it: interval node defining start-last of the affected address range
+ * @bos: list of all BOs in the affected address range
+ *
+ * Manages all BOs which are affected of a certain range of address space.
+ */
struct amdgpu_mn_node {
struct interval_tree_node it;
struct list_head bos;
};
/**
- * amdgpu_mn_destroy - destroy the rmn
+ * amdgpu_mn_destroy - destroy the MMU notifier
*
* @work: previously sheduled work item
*
@@ -72,47 +111,50 @@ struct amdgpu_mn_node {
*/
static void amdgpu_mn_destroy(struct work_struct *work)
{
- struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
- struct amdgpu_device *adev = rmn->adev;
+ struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
+ struct amdgpu_device *adev = amn->adev;
struct amdgpu_mn_node *node, *next_node;
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- down_write(&rmn->lock);
- hash_del(&rmn->node);
+ down_write(&amn->lock);
+ hash_del(&amn->node);
rbtree_postorder_for_each_entry_safe(node, next_node,
- &rmn->objects.rb_root, it.rb) {
+ &amn->objects.rb_root, it.rb) {
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
bo->mn = NULL;
list_del_init(&bo->mn_list);
}
kfree(node);
}
- up_write(&rmn->lock);
+ up_write(&amn->lock);
mutex_unlock(&adev->mn_lock);
- mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
- kfree(rmn);
+ mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
+ kfree(amn);
}
/**
* amdgpu_mn_release - callback to notify about mm destruction
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
*
* Shedule a work item to lazy destroy our notifier.
*/
static void amdgpu_mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- INIT_WORK(&rmn->work, amdgpu_mn_destroy);
- schedule_work(&rmn->work);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+
+ INIT_WORK(&amn->work, amdgpu_mn_destroy);
+ schedule_work(&amn->work);
}
/**
- * amdgpu_mn_lock - take the write side lock for this mn
+ * amdgpu_mn_lock - take the write side lock for this notifier
+ *
+ * @mn: our notifier
*/
void amdgpu_mn_lock(struct amdgpu_mn *mn)
{
@@ -121,7 +163,9 @@ void amdgpu_mn_lock(struct amdgpu_mn *mn)
}
/**
- * amdgpu_mn_unlock - drop the write side lock for this mn
+ * amdgpu_mn_unlock - drop the write side lock for this notifier
+ *
+ * @mn: our notifier
*/
void amdgpu_mn_unlock(struct amdgpu_mn *mn)
{
@@ -130,40 +174,38 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
}
/**
- * amdgpu_mn_read_lock - take the rmn read lock
- *
- * @rmn: our notifier
+ * amdgpu_mn_read_lock - take the read side lock for this notifier
*
- * Take the rmn read side lock.
+ * @amn: our notifier
*/
-static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
{
- mutex_lock(&rmn->read_lock);
- if (atomic_inc_return(&rmn->recursion) == 1)
- down_read_non_owner(&rmn->lock);
- mutex_unlock(&rmn->read_lock);
+ mutex_lock(&amn->read_lock);
+ if (atomic_inc_return(&amn->recursion) == 1)
+ down_read_non_owner(&amn->lock);
+ mutex_unlock(&amn->read_lock);
}
/**
- * amdgpu_mn_read_unlock - drop the rmn read lock
+ * amdgpu_mn_read_unlock - drop the read side lock for this notifier
*
- * @rmn: our notifier
- *
- * Drop the rmn read side lock.
+ * @amn: our notifier
*/
-static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
{
- if (atomic_dec_return(&rmn->recursion) == 0)
- up_read_non_owner(&rmn->lock);
+ if (atomic_dec_return(&amn->recursion) == 0)
+ up_read_non_owner(&amn->lock);
}
/**
* amdgpu_mn_invalidate_node - unmap all BOs of a node
*
* @node: the node with the BOs to unmap
+ * @start: start of address range affected
+ * @end: end of address range affected
*
- * We block for all BOs and unmap them by move them
- * into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
*/
static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
unsigned long start,
@@ -190,27 +232,27 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
- * We block for all BOs between start and end to be idle and
- * unmap them by move them into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
*/
static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- amdgpu_mn_read_lock(rmn);
+ amdgpu_mn_read_lock(amn);
- it = interval_tree_iter_first(&rmn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
@@ -225,7 +267,7 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
* amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
@@ -238,15 +280,15 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- amdgpu_mn_read_lock(rmn);
+ amdgpu_mn_read_lock(amn);
- it = interval_tree_iter_first(&rmn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
@@ -268,7 +310,7 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
* amdgpu_mn_invalidate_range_end - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
@@ -279,9 +321,9 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
- amdgpu_mn_read_unlock(rmn);
+ amdgpu_mn_read_unlock(amn);
}
static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
@@ -315,7 +357,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type)
{
struct mm_struct *mm = current->mm;
- struct amdgpu_mn *rmn;
+ struct amdgpu_mn *amn;
unsigned long key = AMDGPU_MN_KEY(mm, type);
int r;
@@ -325,41 +367,41 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
return ERR_PTR(-EINTR);
}
- hash_for_each_possible(adev->mn_hash, rmn, node, key)
- if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
+ hash_for_each_possible(adev->mn_hash, amn, node, key)
+ if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
goto release_locks;
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
- if (!rmn) {
- rmn = ERR_PTR(-ENOMEM);
+ amn = kzalloc(sizeof(*amn), GFP_KERNEL);
+ if (!amn) {
+ amn = ERR_PTR(-ENOMEM);
goto release_locks;
}
- rmn->adev = adev;
- rmn->mm = mm;
- init_rwsem(&rmn->lock);
- rmn->type = type;
- rmn->mn.ops = &amdgpu_mn_ops[type];
- rmn->objects = RB_ROOT_CACHED;
- mutex_init(&rmn->read_lock);
- atomic_set(&rmn->recursion, 0);
+ amn->adev = adev;
+ amn->mm = mm;
+ init_rwsem(&amn->lock);
+ amn->type = type;
+ amn->mn.ops = &amdgpu_mn_ops[type];
+ amn->objects = RB_ROOT_CACHED;
+ mutex_init(&amn->read_lock);
+ atomic_set(&amn->recursion, 0);
- r = __mmu_notifier_register(&rmn->mn, mm);
+ r = __mmu_notifier_register(&amn->mn, mm);
if (r)
- goto free_rmn;
+ goto free_amn;
- hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));
+ hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
release_locks:
up_write(&mm->mmap_sem);
mutex_unlock(&adev->mn_lock);
- return rmn;
+ return amn;
-free_rmn:
+free_amn:
up_write(&mm->mmap_sem);
mutex_unlock(&adev->mn_lock);
- kfree(rmn);
+ kfree(amn);
return ERR_PTR(r);
}
@@ -379,14 +421,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
enum amdgpu_mn_type type =
bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
- struct amdgpu_mn *rmn;
+ struct amdgpu_mn *amn;
struct amdgpu_mn_node *node = NULL, *new_node;
struct list_head bos;
struct interval_tree_node *it;
- rmn = amdgpu_mn_get(adev, type);
- if (IS_ERR(rmn))
- return PTR_ERR(rmn);
+ amn = amdgpu_mn_get(adev, type);
+ if (IS_ERR(amn))
+ return PTR_ERR(amn);
new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node)
@@ -394,12 +436,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- down_write(&rmn->lock);
+ down_write(&amn->lock);
- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+ while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
kfree(node);
node = container_of(it, struct amdgpu_mn_node, it);
- interval_tree_remove(&node->it, &rmn->objects);
+ interval_tree_remove(&node->it, &amn->objects);
addr = min(it->start, addr);
end = max(it->last, end);
list_splice(&node->bos, &bos);
@@ -410,7 +452,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
else
kfree(new_node);
- bo->mn = rmn;
+ bo->mn = amn;
node->it.start = addr;
node->it.last = end;
@@ -418,9 +460,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
list_splice(&bos, &node->bos);
list_add(&bo->mn_list, &node->bos);
- interval_tree_insert(&node->it, &rmn->objects);
+ interval_tree_insert(&node->it, &amn->objects);
- up_write(&rmn->lock);
+ up_write(&amn->lock);
return 0;
}
@@ -435,18 +477,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_mn *rmn;
+ struct amdgpu_mn *amn;
struct list_head *head;
mutex_lock(&adev->mn_lock);
- rmn = bo->mn;
- if (rmn == NULL) {
+ amn = bo->mn;
+ if (amn == NULL) {
mutex_unlock(&adev->mn_lock);
return;
}
- down_write(&rmn->lock);
+ down_write(&amn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@@ -456,12 +498,13 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
if (list_empty(head)) {
struct amdgpu_mn_node *node;
+
node = container_of(head, struct amdgpu_mn_node, bos);
- interval_tree_remove(&node->it, &rmn->objects);
+ interval_tree_remove(&node->it, &amn->objects);
kfree(node);
}
- up_write(&rmn->lock);
+ up_write(&amn->lock);
mutex_unlock(&adev->mn_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3526efa8960e..b0e14a3d54ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -38,7 +38,20 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
-static bool amdgpu_need_backup(struct amdgpu_device *adev)
+/**
+ * DOC: amdgpu_object
+ *
+ * This defines the interfaces to operate on an &amdgpu_bo buffer object which
+ * represents memory used by driver (VRAM, system memory, etc.). The driver
+ * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
+ * to create/destroy/set buffer object which are then managed by the kernel TTM
+ * memory manager.
+ * The interfaces are also used internally by kernel clients, including gfx,
+ * uvd, etc. for kernel managed allocations used by the GPU.
+ *
+ */
+
+static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
@@ -50,11 +63,35 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev)
return true;
}
-static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+/**
+ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
+ *
+ * @bo: &amdgpu_bo buffer object
+ *
+ * This function is called when a BO stops being pinned, and updates the
+ * &amdgpu_device pin_size values accordingly.
+ */
+static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
+ }
+}
+
+static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ if (bo->pin_count > 0)
+ amdgpu_bo_subtract_pin_size(bo);
+
if (bo->kfd_bo)
amdgpu_amdkfd_unreserve_system_memory_limit(bo);
@@ -73,14 +110,32 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+/**
+ * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * an &amdgpu_bo.
+ *
+ * Returns:
+ * true if the object belongs to &amdgpu_bo, false if not.
+ */
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
- if (bo->destroy == &amdgpu_ttm_bo_destroy)
+ if (bo->destroy == &amdgpu_bo_destroy)
return true;
return false;
}
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+/**
+ * amdgpu_bo_placement_from_domain - set buffer's placement
+ * @abo: &amdgpu_bo buffer object whose placement is to be set
+ * @domain: requested domain
+ *
+ * Sets buffer's placement according to requested domain and the buffer's
+ * flags.
+ */
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct ttm_placement *placement = &abo->placement;
@@ -161,6 +216,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
+
placement->num_placement = c;
placement->placement = places;
@@ -184,7 +241,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
*
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
@@ -220,22 +278,33 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
goto error_free;
}
- r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+ r = amdgpu_bo_pin(*bo_ptr, domain);
if (r) {
dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
goto error_unreserve;
}
+ r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
+ goto error_unpin;
+ }
+
+ if (gpu_addr)
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
+
if (cpu_addr) {
r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
- goto error_unreserve;
+ goto error_unpin;
}
}
return 0;
+error_unpin:
+ amdgpu_bo_unpin(*bo_ptr);
error_unreserve:
amdgpu_bo_unreserve(*bo_ptr);
@@ -261,7 +330,8 @@ error_free:
*
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
@@ -285,6 +355,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
+ * @gpu_addr: pointer to where the BO's GPU memory space address was stored
+ * @cpu_addr: pointer to where the BO's CPU memory space address was stored
*
* unmaps and unpin a BO for kernel internal use.
*/
@@ -418,17 +490,17 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
#endif
bo->tbo.bdev = &adev->mman.bdev;
- amdgpu_ttm_placement_from_domain(bo, bp->domain);
+ amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
&bo->placement, page_align, &ctx, acc_size,
- NULL, bp->resv, &amdgpu_ttm_bo_destroy);
+ NULL, bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0))
return r;
- if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
@@ -498,6 +570,20 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_bo_create - create an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @bo_ptr: pointer to the buffer object pointer
+ *
+ * Creates an &amdgpu_bo buffer object; and if requested, also creates a
+ * shadow object.
+ * Shadow object is used to backup the original buffer object, and is always
+ * in GTT.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr)
@@ -510,7 +596,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (r)
return r;
- if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
+ if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
if (!bp->resv)
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
NULL));
@@ -527,6 +613,21 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be backed up
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies an &amdgpu_bo buffer object to its shadow object.
+ * Not used for now.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
@@ -559,6 +660,18 @@ err:
return r;
}
+/**
+ * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
+ * @bo: pointer to the buffer object
+ *
+ * Sets placement according to domain; and changes placement and caching
+ * policy of the buffer object according to the placement.
+ * This is used for validating shadow bos. It calls ttm_bo_validate() to
+ * make sure the buffer is resident where it needs to be.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
@@ -571,7 +684,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
domain = bo->preferred_domains;
retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@@ -581,6 +694,22 @@ retry:
return r;
}
+/**
+ * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be restored
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies a buffer object's shadow content back to the object.
+ * This is used for recovering a buffer from its shadow in case of a gpu
+ * reset where vram context may be lost.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
@@ -613,6 +742,17 @@ err:
return r;
}
+/**
+ * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be mapped
+ * @ptr: kernel virtual address to be returned
+ *
+ * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
+ * amdgpu_bo_kptr() to get the kernel virtual address.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
void *kptr;
@@ -643,6 +783,15 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
+/**
+ * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
+ *
+ * Returns:
+ * the virtual address of a buffer object area.
+ */
void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
{
bool is_iomem;
@@ -650,21 +799,42 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
}
+/**
+ * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unmapped
+ *
+ * Unmaps a kernel map set up by amdgpu_bo_kmap().
+ */
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
{
if (bo->kmap.bo)
ttm_bo_kunmap(&bo->kmap);
}
+/**
+ * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * References the contained &ttm_buffer_object.
+ *
+ * Returns:
+ * a refcounted pointer to the &amdgpu_bo buffer object.
+ */
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
{
if (bo == NULL)
return NULL;
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
+/**
+ * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Unreferences the contained &ttm_buffer_object and clear the pointer
+ */
void amdgpu_bo_unref(struct amdgpu_bo **bo)
{
struct ttm_buffer_object *tbo;
@@ -673,14 +843,34 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
return;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
+/**
+ * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ * @min_offset: the start of requested address range
+ * @max_offset: the end of requested address range
+ *
+ * Pins the buffer object according to requested domain and address range. If
+ * the memory is unbound gart memory, binds the pages into gart table. Adjusts
+ * pin_count and pin_size accordingly.
+ *
+ * Pinning means to lock pages in memory along with keeping them at a fixed
+ * offset. It is required when a buffer can not be moved, for example, when
+ * a display buffer is being scanned out.
+ *
+ * Compared with amdgpu_bo_pin(), this function gives more flexibility on
+ * where to pin a buffer if there are specific restrictions on where a buffer
+ * must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset,
- u64 *gpu_addr)
+ u64 min_offset, u64 max_offset)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
@@ -712,8 +902,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
if (max_offset != 0) {
u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
@@ -728,7 +916,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
unsigned fpfn, lpfn;
@@ -749,33 +937,48 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
goto error;
}
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p bind failed\n", bo);
- goto error;
- }
-
bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- adev->vram_pin_size += amdgpu_bo_size(bo);
- adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
+ atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- adev->gart_pin_size += amdgpu_bo_size(bo);
+ atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
error:
return r;
}
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+/**
+ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ *
+ * A simple wrapper to amdgpu_bo_pin_restricted().
+ * Provides a simpler API for buffers that do not have any strict restrictions
+ * on where a buffer must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
- return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
+ return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
}
+/**
+ * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unpinned
+ *
+ * Decreases the pin_count, and clears the flags if pin_count reaches 0.
+ * Changes placement and pin size accordingly.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -790,12 +993,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->pin_count)
return 0;
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- adev->vram_pin_size -= amdgpu_bo_size(bo);
- adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- adev->gart_pin_size -= amdgpu_bo_size(bo);
- }
+ amdgpu_bo_subtract_pin_size(bo);
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
@@ -808,6 +1006,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
return r;
}
+/**
+ * amdgpu_bo_evict_vram - evict VRAM buffers
+ * @adev: amdgpu device object
+ *
+ * Evicts all VRAM buffers on the lru list of the memory type.
+ * Mainly used for evicting vram at suspend time.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
@@ -830,6 +1038,15 @@ static const char *amdgpu_vram_names[] = {
"DDR4",
};
+/**
+ * amdgpu_bo_init - initialize memory manager
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_init(struct amdgpu_device *adev)
{
/* reserve PAT memory space to WC for VRAM */
@@ -847,6 +1064,16 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
return amdgpu_ttm_init(adev);
}
+/**
+ * amdgpu_bo_late_init - late init
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_late_init() to free resources used earlier during
+ * initialization.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_late_init(struct amdgpu_device *adev)
{
amdgpu_ttm_late_init(adev);
@@ -854,6 +1081,12 @@ int amdgpu_bo_late_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_bo_fini - tear down memory manager
+ * @adev: amdgpu device object
+ *
+ * Reverses amdgpu_bo_init() to tear down memory manager.
+ */
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
@@ -861,12 +1094,33 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
}
+/**
+ * amdgpu_bo_fbdev_mmap - mmap fbdev memory
+ * @bo: &amdgpu_bo buffer object
+ * @vma: vma as input from the fbdev mmap method
+ *
+ * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
struct vm_area_struct *vma)
{
return ttm_fbdev_mmap(vma, &bo->tbo);
}
+/**
+ * amdgpu_bo_set_tiling_flags - set tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: new flags
+ *
+ * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
+ * kernel driver to set the tiling flags on a buffer.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -879,6 +1133,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
return 0;
}
+/**
+ * amdgpu_bo_get_tiling_flags - get tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: returned flags
+ *
+ * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
+ * set the tiling flags on a buffer.
+ */
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
lockdep_assert_held(&bo->tbo.resv->lock.base);
@@ -887,6 +1149,19 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
*tiling_flags = bo->tiling_flags;
}
+/**
+ * amdgpu_bo_set_metadata - set metadata
+ * @bo: &amdgpu_bo buffer object
+ * @metadata: new metadata
+ * @metadata_size: size of the new metadata
+ * @flags: flags of the new metadata
+ *
+ * Sets buffer object's metadata, its size and flags.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
uint32_t metadata_size, uint64_t flags)
{
@@ -916,6 +1191,21 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
return 0;
}
+/**
+ * amdgpu_bo_get_metadata - get metadata
+ * @bo: &amdgpu_bo buffer object
+ * @buffer: returned metadata
+ * @buffer_size: size of the buffer
+ * @metadata_size: size of the returned metadata
+ * @flags: flags of the returned metadata
+ *
+ * Gets buffer object's metadata, its size and flags. buffer_size shall not be
+ * less than metadata_size.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags)
@@ -939,6 +1229,16 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
return 0;
}
+/**
+ * amdgpu_bo_move_notify - notification about a memory move
+ * @bo: pointer to a buffer object
+ * @evict: if this move is evicting the buffer from the graphics address space
+ * @new_mem: new information of the bufer object
+ *
+ * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
+ * bookkeeping.
+ * TTM driver callback which is called when ttm moves a buffer.
+ */
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_mem_reg *new_mem)
@@ -947,7 +1247,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
abo = ttm_to_amdgpu_bo(bo);
@@ -964,9 +1264,20 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
/* move_notify is called before move happens */
- trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+ trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
+/**
+ * amdgpu_bo_fault_reserve_notify - notification about a memory fault
+ * @bo: pointer to a buffer object
+ *
+ * Notifies the driver we are taking a fault on this BO and have reserved it,
+ * also performs bookkeeping.
+ * TTM driver callback for dealing with vm faults.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
@@ -975,7 +1286,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
unsigned long offset, size;
int r;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return 0;
abo = ttm_to_amdgpu_bo(bo);
@@ -997,8 +1308,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
/* Avoid costly evictions; only set GTT as a busy placement */
abo->placement.num_busy_placement = 1;
@@ -1040,10 +1351,11 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
- * Returns current GPU offset of the object.
- *
* Note: object should either be pinned or reserved when calling this
* function, it might be useful to add check for this for debugging.
+ *
+ * Returns:
+ * current GPU offset of the object.
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
@@ -1059,6 +1371,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
return bo->tbo.offset;
}
+/**
+ * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
+ * @adev: amdgpu device object
+ * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
+ *
+ * Returns:
+ * Which of the allowed domains is preferred for pinning the BO for scanout.
+ */
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
uint32_t domain)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 731748033878..18945dd6982d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -32,6 +32,7 @@
#include "amdgpu.h"
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+#define AMDGPU_BO_MAX_PLACEMENTS 3
struct amdgpu_bo_param {
unsigned long size;
@@ -77,7 +78,7 @@ struct amdgpu_bo {
/* Protected by tbo.reserved */
u32 preferred_domains;
u32 allowed_domains;
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -234,6 +235,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
+
int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr);
@@ -252,10 +256,9 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset,
- u64 *gpu_addr);
+ u64 min_offset, u64 max_offset);
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index fc818b4d849c..8f98629fbe59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -31,7 +31,7 @@
#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-
+#include <linux/nospec.h>
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -68,11 +68,11 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
- adev->pm.dpm.ac_power = true;
+ adev->pm.ac_power = true;
else
- adev->pm.dpm.ac_power = false;
+ adev->pm.ac_power = false;
if (adev->powerplay.pp_funcs->enable_bapm)
- amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
+ amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
}
}
@@ -80,12 +80,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
/**
* DOC: power_dpm_state
*
- * This is a legacy interface and is only provided for backwards compatibility.
- * The amdgpu driver provides a sysfs API for adjusting certain power
- * related parameters. The file power_dpm_state is used for this.
+ * The power_dpm_state file is a legacy interface and is only provided for
+ * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
+ * certain power related parameters. The file power_dpm_state is used for this.
* It accepts the following arguments:
+ *
* - battery
+ *
* - balanced
+ *
* - performance
*
* battery
@@ -169,14 +172,21 @@ fail:
* The amdgpu driver provides a sysfs API for adjusting certain power
* related parameters. The file power_dpm_force_performance_level is
* used for this. It accepts the following arguments:
+ *
* - auto
+ *
* - low
+ *
* - high
+ *
* - manual
- * - GPU fan
+ *
* - profile_standard
+ *
* - profile_min_sclk
+ *
* - profile_min_mclk
+ *
* - profile_peak
*
* auto
@@ -393,6 +403,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
count = -EINVAL;
goto fail;
}
+ idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
amdgpu_dpm_get_pp_num_states(adev, &data);
state = data.states[idx];
@@ -463,8 +474,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* this.
*
* Reading the file will display:
+ *
* - a list of engine clock levels and voltages labeled OD_SCLK
+ *
* - a list of memory clock levels and voltages labeled OD_MCLK
+ *
* - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
*
* To manually adjust these settings, first select manual using
@@ -593,40 +607,59 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
return snprintf(buf, PAGE_SIZE, "\n");
}
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+/*
+ * Worst case: 32 bits individually specified, in octal at 12 characters
+ * per line (+1 for \n).
+ */
+#define AMDGPU_MASK_BUF_MAX (32 * 13)
+
+static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
- uint32_t mask = 0;
char *sub_str = NULL;
char *tmp;
- char buf_cpy[count];
+ char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
const char delimiter[3] = {' ', '\n', '\0'};
+ size_t bytes;
- memcpy(buf_cpy, buf, count+1);
+ *mask = 0;
+
+ bytes = min(count, sizeof(buf_cpy) - 1);
+ memcpy(buf_cpy, buf, bytes);
+ buf_cpy[bytes] = '\0';
tmp = buf_cpy;
while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
-
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
+ if (ret)
+ return -EINVAL;
+ *mask |= 1 << level;
} else
break;
}
+
+ return 0;
+}
+
+static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ uint32_t mask = 0;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
-fail:
return count;
}
@@ -651,32 +684,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int ret;
- long level;
uint32_t mask = 0;
- char *sub_str = NULL;
- char *tmp;
- char buf_cpy[count];
- const char delimiter[3] = {' ', '\n', '\0'};
- memcpy(buf_cpy, buf, count+1);
- tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
- if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
- } else
- break;
- }
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
-fail:
return count;
}
@@ -701,33 +717,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int ret;
- long level;
uint32_t mask = 0;
- char *sub_str = NULL;
- char *tmp;
- char buf_cpy[count];
- const char delimiter[3] = {' ', '\n', '\0'};
- memcpy(buf_cpy, buf, count+1);
- tmp = buf_cpy;
-
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
- if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
- } else
- break;
- }
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
-fail:
return count;
}
@@ -905,6 +903,36 @@ fail:
return -EINVAL;
}
+/**
+ * DOC: busy_percent
+ *
+ * The amdgpu driver provides a sysfs API for reading how busy the GPU
+ * is as a percentage. The file gpu_busy_percent is used for this.
+ * The SMU firmware computes a percentage of load based on the
+ * aggregate activity level in the IP cores.
+ */
+static ssize_t amdgpu_get_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int r, value, size = sizeof(value);
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* read the IP busy sensor */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
+ (void *)&value, &size);
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
amdgpu_get_dpm_forced_performance_level,
@@ -938,6 +966,8 @@ static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
amdgpu_get_pp_od_clk_voltage,
amdgpu_set_pp_od_clk_voltage);
+static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
+ amdgpu_get_busy_percent, NULL);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1156,7 +1186,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
int r, size = sizeof(vddnb);
/* only APUs have vddnb */
- if (adev->flags & AMD_IS_APU)
+ if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
/* Can't get voltage when the card is off */
@@ -1285,35 +1315,51 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
* DOC: hwmon
*
* The amdgpu driver exposes the following sensor interfaces:
+ *
* - GPU temperature (via the on-die sensor)
+ *
* - GPU voltage
+ *
* - Northbridge voltage (APUs only)
+ *
* - GPU power
+ *
* - GPU fan
*
* hwmon interfaces for GPU temperature:
+ *
* - temp1_input: the on die GPU temperature in millidegrees Celsius
+ *
* - temp1_crit: temperature critical max value in millidegrees Celsius
+ *
* - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
*
* hwmon interfaces for GPU voltage:
+ *
* - in0_input: the voltage on the GPU in millivolts
+ *
* - in1_input: the voltage on the Northbridge in millivolts
*
* hwmon interfaces for GPU power:
+ *
* - power1_average: average power used by the GPU in microWatts
+ *
* - power1_cap_min: minimum cap supported in microWatts
+ *
* - power1_cap_max: maximum cap supported in microWatts
+ *
* - power1_cap: selected power cap in microWatts
*
* hwmon interfaces for GPU fan:
+ *
* - pwm1: pulse width modulation fan level (0-255)
- * - pwm1_enable: pulse width modulation fan control method
- * 0: no fan speed control
- * 1: manual fan speed control using pwm interface
- * 2: automatic fan speed control
+ *
+ * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
+ *
* - pwm1_min: pulse width modulation fan control minimum level (0)
+ *
* - pwm1_max: pulse width modulation fan control maximum level (255)
+ *
* - fan1_input: fan speed in RPM
*
* You can use hwmon tools like sensors to view this information on your system.
@@ -1668,10 +1714,10 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->powergate_uvd) {
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_uvd(adev, !enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
mutex_unlock(&adev->pm.mutex);
} else {
if (enable) {
@@ -1690,10 +1736,10 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->powergate_vce) {
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_vce(adev, !enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
mutex_unlock(&adev->pm.mutex);
} else {
if (enable) {
@@ -1825,6 +1871,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_od_clk_voltage\n");
return ret;
}
+ ret = device_create_file(adev->dev,
+ &dev_attr_gpu_busy_percent);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "gpu_busy_level\n");
+ return ret;
+ }
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1860,6 +1913,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
&dev_attr_pp_power_profile_mode);
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
+ device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -1878,6 +1932,14 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
+ mutex_lock(&adev->pm.mutex);
+ /* update battery/ac status */
+ if (power_supply_is_system_supplied() > 0)
+ adev->pm.ac_power = true;
+ else
+ adev->pm.ac_power = false;
+ mutex_unlock(&adev->pm.mutex);
+
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
mutex_lock(&adev->pm.mutex);
@@ -1898,14 +1960,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
} else {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
- /* update battery/ac status */
- if (power_supply_is_system_supplied() > 0)
- adev->pm.dpm.ac_power = true;
- else
- adev->pm.dpm.ac_power = false;
-
amdgpu_dpm_change_power_state_locked(adev);
-
mutex_unlock(&adev->pm.mutex);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 4683626b065f..1c5d97f4b4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -23,6 +23,14 @@
*
* Authors: Alex Deucher
*/
+
+/**
+ * DOC: PRIME Buffer Sharing
+ *
+ * The following callback implementations are used for :ref:`sharing GEM buffer
+ * objects between different devices via PRIME <prime_buffer_sharing>`.
+ */
+
#include <drm/drmP.h>
#include "amdgpu.h"
@@ -32,6 +40,14 @@
static const struct dma_buf_ops amdgpu_dmabuf_ops;
+/**
+ * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
+ * implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * A scatter/gather table for the pinned pages of the buffer object's memory.
+ */
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -40,6 +56,15 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
+/**
+ * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
+ * @obj: GEM buffer object
+ *
+ * Sets up an in-kernel virtual mapping of the buffer object's memory.
+ *
+ * Returns:
+ * The virtual address of the mapping or an error pointer.
+ */
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -53,6 +78,13 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
return bo->dma_buf_vmap.virtual;
}
+/**
+ * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
+ * @obj: GEM buffer object
+ * @vaddr: virtual address (unused)
+ *
+ * Tears down the in-kernel virtual mapping of the buffer object's memory.
+ */
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -60,6 +92,17 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
+/**
+ * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
+ * @obj: GEM buffer object
+ * @vma: virtual memory area
+ *
+ * Sets up a userspace mapping of the buffer object's memory in the given
+ * virtual memory area.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -94,6 +137,19 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
return ret;
}
+/**
+ * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
+ * implementation
+ * @dev: DRM device
+ * @attach: DMA-buf attachment
+ * @sg: Scatter/gather table
+ *
+ * Import shared DMA buffer memory exported by another device.
+ *
+ * Returns:
+ * A new GEM buffer object of the given DRM device, representing the memory
+ * described by the given DMA-buf attachment and scatter/gather table.
+ */
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -132,8 +188,19 @@ error:
return ERR_PTR(ret);
}
+/**
+ * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
+ * @dma_buf: shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * Makes sure that the shared DMA buffer can be accessed by the target device.
+ * For now, simply pins it to the GTT domain, where it should be accessible by
+ * all DMA devices.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
@@ -141,7 +208,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
- r = drm_gem_map_attach(dma_buf, target_dev, attach);
+ r = drm_gem_map_attach(dma_buf, attach);
if (r)
return r;
@@ -165,7 +232,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
}
/* pin buffer into GTT */
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error_unreserve;
@@ -181,6 +248,14 @@ error_detach:
return r;
}
+/**
+ * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
+ * @dma_buf: shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * This is called when a shared DMA buffer no longer needs to be accessible by
+ * the other device. For now, simply unpins the buffer from GTT.
+ */
static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
@@ -202,6 +277,13 @@ error:
drm_gem_map_detach(dma_buf, attach);
}
+/**
+ * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * The buffer object's reservation object.
+ */
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -209,6 +291,18 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
return bo->tbo.resv;
}
+/**
+ * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
+ * @dma_buf: shared DMA buffer
+ * @direction: direction of DMA transfer
+ *
+ * This is called before CPU access to the shared DMA buffer's memory. If it's
+ * a read access, the buffer is moved to the GTT domain if possible, for optimal
+ * CPU read performance.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction direction)
{
@@ -229,7 +323,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
@@ -245,14 +339,24 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
};
+/**
+ * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
+ * @dev: DRM device
+ * @gobj: GEM buffer object
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
+ *
+ * The main work is done by the &drm_gem_prime_export helper, which in turn
+ * uses &amdgpu_gem_prime_res_obj.
+ *
+ * Returns:
+ * Shared DMA buffer representing the GEM buffer object from the given device.
+ */
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags)
@@ -273,6 +377,17 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
return buf;
}
+/**
+ * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
+ * @dev: DRM device
+ * @dma_buf: Shared DMA buffer
+ *
+ * The main work is done by the &drm_gem_prime_import helper, which in turn
+ * uses &amdgpu_gem_prime_import_sg_table.
+ *
+ * Returns:
+ * GEM buffer object representing the shared DMA buffer for the given device.
+ */
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 8af16e81c7d4..a172bba32b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
u32 ring,
struct amdgpu_ring **out_ring)
{
- u32 instance;
-
switch (mapper->hw_ip) {
case AMDGPU_HW_IP_GFX:
*out_ring = &adev->gfx.gfx_ring[ring];
@@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = &adev->sdma.instance[ring].ring;
break;
case AMDGPU_HW_IP_UVD:
- instance = ring;
- *out_ring = &adev->uvd.inst[instance].ring;
+ *out_ring = &adev->uvd.inst[0].ring;
break;
case AMDGPU_HW_IP_VCE:
*out_ring = &adev->vce.ring[ring];
break;
case AMDGPU_HW_IP_UVD_ENC:
- instance = ring / adev->uvd.num_enc_rings;
- *out_ring =
- &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
+ *out_ring = &adev->uvd.inst[0].ring_enc[ring];
break;
case AMDGPU_HW_IP_VCN_DEC:
*out_ring = &adev->vcn.ring_dec;
@@ -96,6 +91,9 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCN_ENC:
*out_ring = &adev->vcn.ring_enc[ring];
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ *out_ring = &adev->vcn.ring_jpeg;
+ break;
default:
*out_ring = NULL;
DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
@@ -216,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring)
{
- int r, ip_num_rings;
+ int i, r, ip_num_rings = 0;
struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
if (!adev || !mgr || !out_ring)
@@ -245,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->sdma.num_instances;
break;
case AMDGPU_HW_IP_UVD:
- ip_num_rings = adev->uvd.num_uvd_inst;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (!(adev->uvd.harvest_config & (1 << i)))
+ ip_num_rings++;
+ }
break;
case AMDGPU_HW_IP_VCE:
ip_num_rings = adev->vce.num_rings;
break;
case AMDGPU_HW_IP_UVD_ENC:
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (!(adev->uvd.harvest_config & (1 << i)))
+ ip_num_rings++;
+ }
ip_num_rings =
- adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
+ adev->uvd.num_enc_rings * ip_num_rings;
break;
case AMDGPU_HW_IP_VCN_DEC:
ip_num_rings = 1;
@@ -260,6 +265,9 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCN_ENC:
ip_num_rings = adev->vcn.num_enc_rings;
break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ ip_num_rings = 1;
+ break;
default:
DRM_DEBUG("unknown ip type: %d\n", hw_ip);
return -EINVAL;
@@ -287,6 +295,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
case AMDGPU_HW_IP_UVD_ENC:
case AMDGPU_HW_IP_VCN_DEC:
case AMDGPU_HW_IP_VCN_ENC:
+ case AMDGPU_HW_IP_VCN_JPEG:
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
break;
case AMDGPU_HW_IP_DMA:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index c6850b629d0e..93794a85f83d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -211,7 +211,8 @@ void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
if (!ring->funcs->set_priority)
return;
- atomic_inc(&ring->num_jobs[priority]);
+ if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
+ return;
mutex_lock(&ring->priority_mutex);
if (priority <= ring->priority)
@@ -304,7 +305,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
0xffffffffffffffff : ring->buf_mask;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 1513124c5659..d242b9a51e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -44,6 +44,8 @@
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
+#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+
enum amdgpu_ring_type {
AMDGPU_RING_TYPE_GFX,
AMDGPU_RING_TYPE_COMPUTE,
@@ -53,7 +55,8 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_KIQ,
AMDGPU_RING_TYPE_UVD_ENC,
AMDGPU_RING_TYPE_VCN_DEC,
- AMDGPU_RING_TYPE_VCN_ENC
+ AMDGPU_RING_TYPE_VCN_ENC,
+ AMDGPU_RING_TYPE_VCN_JPEG
};
struct amdgpu_device;
@@ -112,6 +115,7 @@ struct amdgpu_ring_funcs {
u32 nop;
bool support_64bit_ptrs;
unsigned vmhub;
+ unsigned extra_dw;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -119,6 +123,7 @@ struct amdgpu_ring_funcs {
void (*set_wptr)(struct amdgpu_ring *ring);
/* validating and patching of IBs */
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* constants to calculate how many DW are needed for an emit */
unsigned emit_frame_size;
unsigned emit_ib_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index e3878256743a..8904e62dca7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2009 VMware, Inc.
*
@@ -75,11 +76,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(vram_obj, false);
if (unlikely(r != 0))
goto out_unref;
- r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
+ r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
if (r) {
DRM_ERROR("Failed to pin VRAM object\n");
goto out_unres;
}
+ vram_addr = amdgpu_bo_gpu_offset(vram_obj);
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
void **gart_start, **gart_end;
@@ -96,11 +98,17 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(gtt_obj[i], false);
if (unlikely(r != 0))
goto out_lclean_unref;
- r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
+ r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_lclean_unres;
}
+ r = amdgpu_ttm_alloc_gart(&gtt_obj[i]->tbo);
+ if (r) {
+ DRM_ERROR("%p bind failed\n", gtt_obj[i]);
+ goto out_lclean_unpin;
+ }
+ gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index e96e26d3f3b0..7206a0025b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = p->job->ring->idx;
+ __entry->ring = p->ring->idx;
__entry->dw = p->job->ibs[i].length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- p->job->ring);
+ p->ring);
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = job->ring->name;
+ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = job->ring->name;
+ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -314,6 +314,11 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TP_ARGS(mapping)
);
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
+ TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+ TP_ARGS(mapping)
+);
+
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags),
@@ -436,7 +441,7 @@ TRACE_EVENT(amdgpu_cs_bo_status,
__entry->total_bo, __entry->total_size)
);
-TRACE_EVENT(amdgpu_ttm_bo_move,
+TRACE_EVENT(amdgpu_bo_move,
TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
TP_ARGS(bo, new_placement, old_placement),
TP_STRUCT__entry(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e93a0a237dc3..fcf421263fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -92,11 +92,9 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
}
/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference
- * structures.
+ * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
*
- * @adev: AMDGPU device for which the global structures need to be
- * registered.
+ * @adev: AMDGPU device for which the global structures need to be registered.
*
* This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
* during bring up.
@@ -104,8 +102,6 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
int r;
/* ensure reference is false in case init fails */
@@ -138,21 +134,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
- ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
- rq, NULL);
- if (r) {
- DRM_ERROR("Failed setting up TTM BO move run queue.\n");
- goto error_entity;
- }
-
adev->mman.mem_global_referenced = true;
return 0;
-error_entity:
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
error_bo:
drm_global_item_unref(&adev->mman.mem_global_ref);
error_mem:
@@ -162,8 +147,6 @@ error_mem:
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
if (adev->mman.mem_global_referenced) {
- drm_sched_entity_fini(adev->mman.entity.sched,
- &adev->mman.entity);
mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
drm_global_item_unref(&adev->mman.mem_global_ref);
@@ -177,13 +160,12 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
}
/**
- * amdgpu_init_mem_type - Initialize a memory manager for a specific
- * type of memory request.
+ * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
+ * memory request.
*
- * @bdev: The TTM BO device object (contains a reference to
- * amdgpu_device)
- * @type: The type of memory requested
- * @man:
+ * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
+ * @type: The type of memory requested
+ * @man: The memory type manager for each domain
*
* This is called by ttm_bo_init_mm() when a buffer object is being
* initialized.
@@ -263,7 +245,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
/* Object isn't an AMDGPU object so ignore */
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
+ if (!amdgpu_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
@@ -276,8 +258,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
if (!adev->mman.buffer_funcs_enabled) {
/* Move to system memory */
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
- } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
@@ -286,7 +268,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* BO will be evicted to GTT rather than causing other
* BOs to be evicted from VRAM
*/
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT);
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
abo->placements[0].lpfn = 0;
@@ -294,12 +276,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo->placement.num_busy_placement = 1;
} else {
/* Move to GTT memory */
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
}
break;
case TTM_PL_TT:
default:
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
}
*placement = abo->placement;
}
@@ -307,8 +289,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
/**
* amdgpu_verify_access - Verify access for a mmap call
*
- * @bo: The buffer object to map
- * @filp: The file pointer from the process performing the mmap
+ * @bo: The buffer object to map
+ * @filp: The file pointer from the process performing the mmap
*
* This is called by ttm_bo_mmap() to verify whether a process
* has the right to mmap a BO to their process space.
@@ -333,11 +315,10 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
/**
* amdgpu_move_null - Register memory for a buffer object
*
- * @bo: The bo to assign the memory to
- * @new_mem: The memory to be assigned.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
*
- * Assign the memory from new_mem to the memory of the buffer object
- * bo.
+ * Assign the memory from new_mem to the memory of the buffer object bo.
*/
static void amdgpu_move_null(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
@@ -350,8 +331,12 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT
- * buffer.
+ * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
+ *
+ * @bo: The bo to assign the memory to.
+ * @mm_node: Memory manager node for drm allocator.
+ * @mem: The region where the bo resides.
+ *
*/
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
struct drm_mm_node *mm_node,
@@ -367,10 +352,12 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_find_mm_node - Helper function finds the drm_mm_node
- * corresponding to @offset. It also modifies
- * the offset to be within the drm_mm_node
- * returned
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
+ * @offset. It also modifies the offset to be within the drm_mm_node returned
+ *
+ * @mem: The region where the bo resides.
+ * @offset: The offset that drm_mm_node is used for finding.
+ *
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
unsigned long *offset)
@@ -512,8 +499,8 @@ error:
/**
* amdgpu_move_blit - Copy an entire buffer to another buffer
*
- * This is a helper called by amdgpu_bo_move() and
- * amdgpu_move_vram_ram() to help move buffers to and from VRAM.
+ * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
+ * help move buffers to and from VRAM.
*/
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
@@ -595,7 +582,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
}
/* blit VRAM to GTT */
- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -647,7 +634,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
}
/* copy to VRAM */
- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -809,8 +796,8 @@ struct amdgpu_ttm_tt {
};
/**
- * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to
- * by a USERPTR pointer to memory
+ * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
+ * pointer to memory
*
* Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
* This provides a wrapper around the get_user_pages() call to provide
@@ -833,8 +820,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
down_read(&mm->mmap_sem);
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
- /* check that we only use anonymous memory
- to prevent problems with writeback */
+ /*
+ * check that we only use anonymous memory to prevent problems
+ * with writeback
+ */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
@@ -885,10 +874,9 @@ release_pages:
}
/**
- * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages
- * as necessary.
+ * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
*
- * Called by amdgpu_cs_list_validate(). This creates the page list
+ * Called by amdgpu_cs_list_validate(). This creates the page list
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
@@ -930,8 +918,7 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the
- * user pages
+ * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
*
* Called by amdgpu_ttm_backend_bind()
**/
@@ -1310,8 +1297,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
- * for the current task
+ * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
+ * task
*
* @ttm: The ttm_tt object to bind this userptr object to
* @addr: The address in the current tasks VM space to use
@@ -1361,9 +1348,8 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays
- * inside an address range for the
- * current task.
+ * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
+ * address range for the current task.
*
*/
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
@@ -1401,8 +1387,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
}
/**
- * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
- * invalidated?
+ * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
*/
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated)
@@ -1415,10 +1400,8 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
}
/**
- * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
- * ttm_tt object been invalidated
- * since the last time they've
- * been set?
+ * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
+ * been invalidated since the last time they've been set?
*/
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
{
@@ -1474,13 +1457,12 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
}
/**
- * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict
- * a buffer object.
+ * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
+ * object.
*
- * Return true if eviction is sensible. Called by
- * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
- * which tries to evict buffer objects until it can find space
- * for a new object and by ttm_bo_force_list_clean() which is
+ * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
+ * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
+ * it can find space for a new object and by ttm_bo_force_list_clean() which is
* used to clean out a memory space.
*/
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
@@ -1530,8 +1512,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_ttm_access_memory - Read or Write memory that backs a
- * buffer object.
+ * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
*
* @bo: The buffer object to read/write
* @offset: Offset into buffer object
@@ -1695,7 +1676,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_VRAM,
adev->fw_vram_usage.start_offset,
(adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size), NULL);
+ adev->fw_vram_usage.size));
if (r)
goto error_pin;
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
@@ -1719,8 +1700,8 @@ error_create:
return r;
}
/**
- * amdgpu_ttm_init - Init the memory management (ttm) as well as
- * various gtt/vram related fields.
+ * amdgpu_ttm_init - Init the memory management (ttm) as well as various
+ * gtt/vram related fields.
*
* This initializes all of the memory space pools that the TTM layer
* will need such as the GTT space (system memory mapped to the device),
@@ -1871,8 +1852,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_ttm_late_init - Handle any late initialization for
- * amdgpu_ttm
+ * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
*/
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{
@@ -1921,10 +1901,30 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{
struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
uint64_t size;
+ int r;
- if (!adev->mman.initialized || adev->in_gpu_reset)
+ if (!adev->mman.initialized || adev->in_gpu_reset ||
+ adev->mman.buffer_funcs_enabled == enable)
return;
+ if (enable) {
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+
+ ring = adev->mman.buffer_funcs_ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
+ r);
+ return;
+ }
+ } else {
+ drm_sched_entity_destroy(&adev->mman.entity);
+ dma_fence_put(man->move);
+ man->move = NULL;
+ }
+
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
if (enable)
size = adev->gmc.real_vram_size;
@@ -2002,7 +2002,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
if (r)
goto error_free;
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
if (r)
goto error_free;
@@ -2071,24 +2071,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- if (direct_submit) {
- r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
- NULL, fence);
- job->fence = dma_fence_get(*fence);
- if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ if (direct_submit)
+ r = amdgpu_job_submit_direct(job, ring, fence);
+ else
+ r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
- }
+ if (r)
+ goto error_free;
return r;
error_free:
amdgpu_job_free(job);
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
return r;
}
@@ -2171,7 +2166,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e5da4654b630..8b3cc6687769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 3ff08e326838..632fa5980ff4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -53,11 +53,11 @@
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
-#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
-#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
-#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
-#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
+#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
+#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
+#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
+#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
+#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
@@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
- unsigned version_major, version_minor, family_id;
+ unsigned family_id;
int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
@@ -208,29 +208,46 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
- version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
- version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
- version_major, version_minor, family_id);
-
- /*
- * Limit the number of UVD handles depending on microcode major
- * and minor versions. The firmware version which has 40 UVD
- * instances support is 1.80. So all subsequent versions should
- * also have the same support.
- */
- if ((version_major > 0x01) ||
- ((version_major == 0x01) && (version_minor >= 0x50)))
- adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
- adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
- (family_id << 8));
+ if (adev->asic_type < CHIP_VEGA20) {
+ unsigned version_major, version_minor;
- if ((adev->asic_type == CHIP_POLARIS10 ||
- adev->asic_type == CHIP_POLARIS11) &&
- (adev->uvd.fw_version < FW_1_66_16))
- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
- version_major, version_minor);
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ version_major, version_minor, family_id);
+
+ /*
+ * Limit the number of UVD handles depending on microcode major
+ * and minor versions. The firmware version which has 40 UVD
+ * instances support is 1.80. So all subsequent versions should
+ * also have the same support.
+ */
+ if ((version_major > 0x01) ||
+ ((version_major == 0x01) && (version_minor >= 0x50)))
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+ (family_id << 8));
+
+ if ((adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS11) &&
+ (adev->uvd.fw_version < FW_1_66_16))
+ DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+ version_major, version_minor);
+ } else {
+ unsigned int enc_major, enc_minor, dec_minor;
+
+ dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
+ enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
+ DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
+ enc_major, enc_minor, dec_minor, family_id);
+
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+ adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
+ }
bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
@@ -238,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
-
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
@@ -246,21 +264,20 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
+ }
- ring = &adev->uvd.inst[j].ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
- rq, NULL);
- if (r != 0) {
- DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
- return r;
- }
-
- for (i = 0; i < adev->uvd.max_handles; ++i) {
- atomic_set(&adev->uvd.inst[j].handles[i], 0);
- adev->uvd.inst[j].filp[i] = NULL;
- }
+ ring = &adev->uvd.inst[0].ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD kernel entity.\n");
+ return r;
+ }
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+ atomic_set(&adev->uvd.handles[i], 0);
+ adev->uvd.filp[i] = NULL;
}
+
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
@@ -289,11 +306,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ drm_sched_entity_destroy(&adev->uvd.entity);
+
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
kfree(adev->uvd.inst[j].saved_bo);
- drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
-
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
(void **)&adev->uvd.inst[j].cpu_addr);
@@ -316,20 +335,22 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
cancel_delayed_work_sync(&adev->uvd.idle_work);
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
+
+ if (i == adev->uvd.max_handles)
+ return 0;
+ }
+
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
- /* only valid for physical mode */
- if (adev->asic_type < CHIP_POLARIS10) {
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.inst[j].handles[i]))
- break;
-
- if (i == adev->uvd.max_handles)
- continue;
- }
-
size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
ptr = adev->uvd.inst[j].cpu_addr;
@@ -349,6 +370,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
if (adev->uvd.inst[i].vcpu_bo == NULL)
return -EINVAL;
@@ -381,30 +404,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{
- struct amdgpu_ring *ring;
- int i, j, r;
-
- for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
- ring = &adev->uvd.inst[j].ring;
+ struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
+ int i, r;
- for (i = 0; i < adev->uvd.max_handles; ++i) {
- uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
- if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
- struct dma_fence *fence;
-
- r = amdgpu_uvd_get_destroy_msg(ring, handle,
- false, &fence);
- if (r) {
- DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
- continue;
- }
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+ uint32_t handle = atomic_read(&adev->uvd.handles[i]);
- dma_fence_wait(fence, false);
- dma_fence_put(fence);
+ if (handle != 0 && adev->uvd.filp[i] == filp) {
+ struct dma_fence *fence;
- adev->uvd.inst[j].filp[i] = NULL;
- atomic_set(&adev->uvd.inst[j].handles[i], 0);
+ r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
+ &fence);
+ if (r) {
+ DRM_ERROR("Error destroying UVD %d!\n", r);
+ continue;
}
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+ adev->uvd.filp[i] = NULL;
+ atomic_set(&adev->uvd.handles[i], 0);
}
}
}
@@ -459,7 +479,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
if (cmd == 0x0 || cmd == 0x3) {
/* yes, force it into VRAM */
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
}
amdgpu_uvd_force_into_uvd_segment(bo);
@@ -679,16 +699,15 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
void *ptr;
long r;
int i;
- uint32_t ip_instance = ctx->parser->job->ring->me;
if (offset & 0x3F) {
- DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
+ DRM_ERROR("UVD messages must be 64 byte aligned!\n");
return -EINVAL;
}
r = amdgpu_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
+ DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
return r;
}
@@ -698,7 +717,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
handle = msg[2];
if (handle == 0) {
- DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
+ DRM_ERROR("Invalid UVD handle!\n");
return -EINVAL;
}
@@ -709,18 +728,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* try to alloc a new handle */
for (i = 0; i < adev->uvd.max_handles; ++i) {
- if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
- DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ DRM_ERROR(")Handle 0x%x already in use!\n",
+ handle);
return -EINVAL;
}
- if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
- adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
+ if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
+ adev->uvd.filp[i] = ctx->parser->filp;
return 0;
}
}
- DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
+ DRM_ERROR("No more free UVD handles!\n");
return -ENOSPC;
case 1:
@@ -732,27 +752,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* validate the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) {
- if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
- if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
- DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ if (adev->uvd.filp[i] != ctx->parser->filp) {
+ DRM_ERROR("UVD handle collision detected!\n");
return -EINVAL;
}
return 0;
}
}
- DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
return -ENOENT;
case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < adev->uvd.max_handles; ++i)
- atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
+ atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
default:
- DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
return -EINVAL;
}
BUG();
@@ -1000,7 +1020,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (!ring->adev->uvd.address_64_bit) {
struct ttm_operation_ctx ctx = { true, false };
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
@@ -1045,19 +1065,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r < 0)
goto err_free;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
-
- amdgpu_job_free(job);
} else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r)
goto err_free;
- r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
+ r = amdgpu_job_submit(job, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
@@ -1149,6 +1166,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
@@ -1259,7 +1278,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
* necessarily linear. So we need to count
* all non-zero handles.
*/
- if (atomic_read(&adev->uvd.inst->handles[i]))
+ if (atomic_read(&adev->uvd.handles[i]))
used_handles++;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 8b23a1b00c76..33c5f806f925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -42,26 +42,29 @@ struct amdgpu_uvd_inst {
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
- atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
- struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq;
- struct drm_sched_entity entity;
- struct drm_sched_entity entity_enc;
uint32_t srbm_soft_reset;
};
+#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
+#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
+
struct amdgpu_uvd {
const struct firmware *fw; /* UVD firmware */
unsigned fw_version;
unsigned max_handles;
unsigned num_enc_rings;
- uint8_t num_uvd_inst;
+ uint8_t num_uvd_inst;
bool address_64_bit;
bool use_ctx_buf;
- struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_sched_entity entity;
struct delayed_work idle_work;
+ unsigned harvest_config;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 23d960ec1cf2..b6ab4f5350c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -40,11 +40,11 @@
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
-#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
-#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
-#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
-#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
+#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
+#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
+#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
+#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
+#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
@@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
ring = &adev->vce.ring[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
- rq, NULL);
+ r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;
@@ -222,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
- drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+ drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
@@ -470,12 +469,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -532,19 +529,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 1b4ad9b2a755..798648a19710 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -140,6 +140,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
+ amdgpu_ring_fini(&adev->vcn.ring_jpeg);
+
release_firmware(adev->vcn.fw);
return 0;
@@ -209,6 +211,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
}
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
+
if (fences == 0) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
@@ -225,7 +229,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (set_clocks && adev->pm.dpm_enabled) {
+ if (set_clocks) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
else
@@ -304,13 +308,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
}
ib->length_dw = 16;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
- amdgpu_job_free(job);
-
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
@@ -495,12 +496,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -549,12 +548,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -597,3 +594,127 @@ error:
dma_fence_put(fence);
return r;
}
+
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ const unsigned ib_size_dw = 16;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+
+ ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
+ ib->ptr[1] = 0xDEADBEEF;
+ for (i = 2; i < 16; i += 2) {
+ ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ib->ptr[i+1] = 0;
+ }
+ ib->length_dw = 16;
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ struct dma_fence *fence = NULL;
+ long r = 0;
+
+ r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+ goto error;
+ }
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ goto error;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto error;
+ } else
+ r = 0;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout)
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else {
+ DRM_ERROR("ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+
+ dma_fence_put(fence);
+
+error:
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 773010b9ff15..0b0b8638d73f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -66,6 +66,7 @@ struct amdgpu_vcn {
const struct firmware *fw; /* VCN firmware */
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+ struct amdgpu_ring ring_jpeg;
struct amdgpu_irq_src irq;
unsigned num_enc_rings;
};
@@ -83,4 +84,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index fdcb498f6d19..ece0ac703e27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -33,9 +33,11 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_gmc.h"
-/*
- * GPUVM
+/**
+ * DOC: GPUVM
+ *
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
* for the entire GPU, there are multiple VM page tables active
@@ -63,37 +65,84 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
#undef START
#undef LAST
-/* Local structure. Encapsulate some VM table update parameters to reduce
+/**
+ * struct amdgpu_pte_update_params - Local structure
+ *
+ * Encapsulate some VM table update parameters to reduce
* the number of function parameters
+ *
*/
struct amdgpu_pte_update_params {
- /* amdgpu device we do this update for */
+
+ /**
+ * @adev: amdgpu device we do this update for
+ */
struct amdgpu_device *adev;
- /* optional amdgpu_vm we do this update for */
+
+ /**
+ * @vm: optional amdgpu_vm we do this update for
+ */
struct amdgpu_vm *vm;
- /* address where to copy page table entries from */
+
+ /**
+ * @src: address where to copy page table entries from
+ */
uint64_t src;
- /* indirect buffer to fill with commands */
+
+ /**
+ * @ib: indirect buffer to fill with commands
+ */
struct amdgpu_ib *ib;
- /* Function which actually does the update */
+
+ /**
+ * @func: Function which actually does the update
+ */
void (*func)(struct amdgpu_pte_update_params *params,
struct amdgpu_bo *bo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags);
- /* The next two are used during VM update by CPU
- * DMA addresses to use for mapping
- * Kernel pointer of PD/PT BO that needs to be updated
+ /**
+ * @pages_addr:
+ *
+ * DMA addresses to use for mapping, used during VM update by CPU
*/
dma_addr_t *pages_addr;
+
+ /**
+ * @kptr:
+ *
+ * Kernel pointer of PD/PT BO that needs to be updated,
+ * used during VM update by CPU
+ */
void *kptr;
};
-/* Helper to disable partial resident texture feature from a fence callback */
+/**
+ * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
+ */
struct amdgpu_prt_cb {
+
+ /**
+ * @adev: amdgpu device
+ */
struct amdgpu_device *adev;
+
+ /**
+ * @cb: callback
+ */
struct dma_fence_cb cb;
};
+/**
+ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
+ *
+ * @base: base structure for tracking BO usage in a VM
+ * @vm: vm to which bo is to be added
+ * @bo: amdgpu buffer object
+ *
+ * Initialize a bo_va_base structure and add it to the appropriate lists
+ *
+ */
static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
@@ -129,8 +178,10 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* amdgpu_vm_level_shift - return the addr shift for each level
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Returns the number of bits the pfn needs to be right shifted for a level.
+ * Returns:
+ * The number of bits the pfn needs to be right shifted for a level.
*/
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
@@ -158,8 +209,10 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Calculate the number of entries in a page directory or page table.
+ * Returns:
+ * The number of entries in a page directory or page table.
*/
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
unsigned level)
@@ -182,8 +235,10 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
* amdgpu_vm_bo_size - returns the size of the BOs in bytes
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Calculate the size of the BO for a page directory or page table in bytes.
+ * Returns:
+ * The size of the BO for a page directory or page table in bytes.
*/
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
{
@@ -221,6 +276,9 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
* @param: parameter for the validation callback
*
* Validate the page table BOs on command submission if neccessary.
+ *
+ * Returns:
+ * Validation result.
*/
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*validate)(void *p, struct amdgpu_bo *bo),
@@ -276,6 +334,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @vm: VM to check
*
* Check if all VM PDs/PTs are ready for updates
+ *
+ * Returns:
+ * True if eviction list is empty.
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
@@ -286,10 +347,15 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* amdgpu_vm_clear_bo - initially clear the PDs/PTs
*
* @adev: amdgpu_device pointer
+ * @vm: VM to clear BO from
* @bo: BO to clear
* @level: level this BO is at
+ * @pte_support_ats: indicate ATS support from PTE
*
* Root PD needs to be reserved when calling this.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_bo *bo,
@@ -321,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ats_entries = 0;
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
@@ -359,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
+ &fence);
if (r)
goto error_free;
@@ -385,10 +451,16 @@ error:
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @parent: parent PT
* @saddr: start of the address range
* @eaddr: end of the address range
+ * @level: VMPT level
+ * @ats: indicate ATS support from PTE
*
* Make sure the page directories and page tables are allocated
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -423,11 +495,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
eaddr = eaddr & ((1 << shift) - 1);
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ if (vm->root.base.bo->shadow)
+ flags |= AMDGPU_GEM_CREATE_SHADOW;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else
- flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW);
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
@@ -496,6 +569,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
* @size: Size from start address we need.
*
* Make sure the page tables are allocated.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -561,6 +637,15 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
}
}
+/**
+ * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
+ *
+ * @ring: ring on which the job will be submitted
+ * @job: job to submit
+ *
+ * Returns:
+ * True if sync is needed.
+ */
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
@@ -588,19 +673,17 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
return vm_flush_needed || gds_switch_needed;
}
-static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
-{
- return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
-}
-
/**
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
- * @vmid: vmid number to use
- * @pd_addr: address of the page directory
+ * @job: related job
+ * @need_pipe_sync: is pipe sync needed
*
* Emit a VM flush when it is necessary.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
{
@@ -708,6 +791,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
* Returns the found bo_va or NULL if none is found
*
* Object has to be reserved!
+ *
+ * Returns:
+ * Found bo_va or NULL.
*/
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
@@ -789,7 +875,10 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
* @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
- * to and return the pointer for the page table entry.
+ * to.
+ *
+ * Returns:
+ * The pointer for the page table entry.
*/
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
@@ -842,6 +931,17 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
}
}
+
+/**
+ * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
+ * @owner: fence owner
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *owner)
{
@@ -895,7 +995,10 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
/*
* amdgpu_vm_invalidate_level - mark all PD levels as invalid
*
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
* @parent: parent PD
+ * @level: VMPT level
*
* Mark all PD level as invalid after an error.
*/
@@ -930,7 +1033,9 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
* @vm: requested vm
*
* Makes sure all directories are up to date.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
@@ -980,7 +1085,7 @@ restart:
struct amdgpu_vm_bo_base,
vm_status);
bo_base->moved = false;
- list_move(&bo_base->vm_status, &vm->idle);
+ list_del_init(&bo_base->vm_status);
bo = bo_base->bo->parent;
if (!bo)
@@ -1009,15 +1114,15 @@ restart:
struct amdgpu_ring *ring;
struct dma_fence *fence;
- ring = container_of(vm->entity.sched, struct amdgpu_ring,
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
sched);
amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
AMDGPU_FENCE_OWNER_VM, false);
WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
+ &fence);
if (r)
goto error;
@@ -1117,14 +1222,15 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @params: see amdgpu_pte_update_params definition
- * @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to, the next dst inside the function
* @flags: mapping flags
*
* Update the page tables in the range @start - @end.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
@@ -1178,7 +1284,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
* @end: last PTE to handle
* @dst: addr those PTEs should point to
* @flags: hw mapping flags
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
@@ -1250,7 +1358,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
@@ -1294,7 +1404,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
addr, flags);
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
nptes = last - start + 1;
@@ -1326,7 +1436,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
ndw += ncmds * 10;
/* extra commands for begin/end fragments */
- ndw += 2 * 10 * adev->vm_manager.fragment_size;
+ if (vm->root.base.bo->shadow)
+ ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
+ else
+ ndw += 2 * 10 * adev->vm_manager.fragment_size;
params.func = amdgpu_vm_do_set_ptes;
}
@@ -1373,8 +1486,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_ring_pad_ib(ring, params.ib);
WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error_free;
@@ -1402,7 +1514,9 @@ error_free:
*
* Split the mapping into smaller chunks so that each update fits
* into a SDMA IB.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
@@ -1455,7 +1569,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (nodes) {
addr = nodes->start << PAGE_SHIFT;
max_entries = (nodes->size - pfn) *
- (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
} else {
addr = 0;
max_entries = S64_MAX;
@@ -1466,7 +1580,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
max_entries = min(max_entries, 16ull * 1024ull);
for (count = 1;
- count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
++count) {
uint64_t idx = pfn + count;
@@ -1480,7 +1594,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1495,7 +1609,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
- pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
if (nodes && nodes->size == pfn) {
pfn = 0;
++nodes;
@@ -1515,7 +1629,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* @clear: if true clear the entries
*
* Fill in the page table entries for @bo_va.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@@ -1531,18 +1647,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
uint64_t flags;
int r;
- if (clear || !bo_va->base.bo) {
+ if (clear || !bo) {
mem = NULL;
nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
- mem = &bo_va->base.bo->tbo.mem;
+ mem = &bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo_va->base.bo->tbo.ttm,
- struct ttm_dma_tt, ttm);
+ ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
exclusive = reservation_object_get_excl(bo->tbo.resv);
@@ -1610,6 +1725,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
/**
* amdgpu_vm_update_prt_state - update the global PRT state
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
{
@@ -1624,6 +1741,8 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_get - add a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
@@ -1636,6 +1755,8 @@ static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_put - drop a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
{
@@ -1645,6 +1766,9 @@ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_cb - callback for updating the PRT status
+ *
+ * @fence: fence for the callback
+ * @_cb: the callback function
*/
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
{
@@ -1656,6 +1780,9 @@ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
/**
* amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ *
+ * @adev: amdgpu_device pointer
+ * @fence: fence for the callback
*/
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
struct dma_fence *fence)
@@ -1747,9 +1874,11 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* or if an error occurred)
*
* Make sure all freed BOs are cleared in the PT.
- * Returns 0 for success.
- *
* PTs have to be reserved and mutex must be locked!
+ *
+ * Returns:
+ * 0 for success.
+ *
*/
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -1794,10 +1923,11 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @sync: sync object to add fences to
*
* Make sure all BOs which are moved are updated in the PTs.
- * Returns 0 for success.
+ *
+ * Returns:
+ * 0 for success.
*
* PTs have to be reserved!
*/
@@ -1852,7 +1982,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
*
* Add @bo into the requested vm.
* Add @bo to the list of bos associated with the vm
- * Returns newly added bo_va or NULL for failure
+ *
+ * Returns:
+ * Newly added bo_va or NULL for failure
*
* Object has to be reserved!
*/
@@ -1915,10 +2047,13 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -1976,11 +2111,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM. Replace existing
* mappings as we do so.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -2037,7 +2175,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
* @saddr: where to the BO is mapped
*
* Remove a mapping of the BO at the specefied addr from the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -2091,7 +2231,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
* @size: size of the range
*
* Remove all mappings in a range, split them as appropriate.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -2188,8 +2330,13 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
* amdgpu_vm_bo_lookup_mapping - find mapping by address
*
* @vm: the requested VM
+ * @addr: the address
*
* Find a mapping by it's address.
+ *
+ * Returns:
+ * The amdgpu_bo_va_mapping matching for addr or NULL
+ *
*/
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr)
@@ -2198,6 +2345,35 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
}
/**
+ * amdgpu_vm_bo_trace_cs - trace all reserved mappings
+ *
+ * @vm: the requested vm
+ * @ticket: CS ticket
+ *
+ * Trace all mappings of BOs reserved during a command submission.
+ */
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+
+ if (!trace_amdgpu_vm_bo_cs_enabled())
+ return;
+
+ for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
+ mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
+ if (mapping->bo_va && mapping->bo_va->base.bo) {
+ struct amdgpu_bo *bo;
+
+ bo = mapping->bo_va->base.bo;
+ if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+ continue;
+ }
+
+ trace_amdgpu_vm_bo_cs(mapping);
+ }
+}
+
+/**
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
*
* @adev: amdgpu_device pointer
@@ -2241,8 +2417,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
* @adev: amdgpu_device pointer
- * @vm: requested vm
* @bo: amdgpu buffer object
+ * @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
@@ -2282,6 +2458,14 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
}
+/**
+ * amdgpu_vm_get_block_size - calculate VM page table size as power of two
+ *
+ * @vm_size: VM size
+ *
+ * Returns:
+ * VM page table as power of two
+ */
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
/* Total bits covered by PD + PTs */
@@ -2300,6 +2484,10 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
*
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
+ * @fragment_size_default: Default PTE fragment size
+ * @max_level: max VMPT level
+ * @max_bits: max address space size in bits
+ *
*/
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
uint32_t fragment_size_default, unsigned max_level,
@@ -2367,8 +2555,12 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
* @adev: amdgpu_device pointer
* @vm: requested vm
* @vm_context: Indicates if it GFX or Compute context
+ * @pasid: Process address space identifier
*
* Init @vm fields.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid)
@@ -2400,8 +2592,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&ring->sched, &vm->entity,
- rq, NULL);
+ r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
if (r)
return r;
@@ -2419,14 +2610,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
vm->last_update = NULL;
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- else
+ else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
flags |= AMDGPU_GEM_CREATE_SHADOW;
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
@@ -2481,7 +2672,7 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_sched_entity:
- drm_sched_entity_fini(&ring->sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
return r;
}
@@ -2489,6 +2680,9 @@ error_free_sched_entity:
/**
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
*
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
* This only works on GFX VMs that don't have any BOs added and no
* page tables allocated yet.
*
@@ -2498,10 +2692,10 @@ error_free_sched_entity:
* - pasid (old PASID is released, because compute manages its own PASIDs)
*
* Reinitializes the page directory to reflect the changed ATS
- * setting. May leave behind an unused shadow BO for the page
- * directory when switching from SDMA updates to CPU updates.
+ * setting.
*
- * Returns 0 for success, -errno for errors.
+ * Returns:
+ * 0 for success, -errno for errors.
*/
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
@@ -2535,7 +2729,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pte_support_ats = pte_support_ats;
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->pasid) {
@@ -2548,6 +2742,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0;
}
+ /* Free the shadow bo for compute VM */
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+
error:
amdgpu_bo_unreserve(vm->root.base.bo);
return r;
@@ -2614,7 +2811,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2656,8 +2853,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @pasid: PASID do identify the VM
*
- * This function is expected to be called in interrupt context. Returns
- * true if there was fault credit, false otherwise
+ * This function is expected to be called in interrupt context.
+ *
+ * Returns:
+ * True if there was fault credit, false otherwise
*/
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
unsigned int pasid)
@@ -2711,7 +2910,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
#ifdef CONFIG_X86_64
if (amdgpu_vm_update_mode == -1) {
- if (amdgpu_vm_is_large_bar(adev))
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
adev->vm_manager.vm_update_mode =
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
else
@@ -2741,6 +2940,16 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
amdgpu_vmid_mgr_fini(adev);
}
+/**
+ * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_vm
+ * @filp: drm file pointer
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
union drm_amdgpu_vm *args = data;
@@ -2764,3 +2973,42 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return 0;
}
+
+/**
+ * amdgpu_vm_get_task_info - Extracts task info for a PASID.
+ *
+ * @dev: drm device pointer
+ * @pasid: PASID identifier for VM
+ * @task_info: task_info to fill.
+ */
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+ struct amdgpu_task_info *task_info)
+{
+ struct amdgpu_vm *vm;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ *task_info = vm->task_info;
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+}
+
+/**
+ * amdgpu_vm_set_task_info - Sets VMs task info.
+ *
+ * @vm: vm for which to set the info
+ */
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
+{
+ if (!vm->task_info.pid) {
+ vm->task_info.pid = current->pid;
+ get_task_comm(vm->task_info.task_name, current);
+
+ if (current->group_leader->mm == current->mm) {
+ vm->task_info.tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info.process_name, current->group_leader);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 061b99a18cb8..67a15d439ac0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -164,6 +164,14 @@ struct amdgpu_vm_pt {
#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
+
+struct amdgpu_task_info {
+ char process_name[TASK_COMM_LEN];
+ char task_name[TASK_COMM_LEN];
+ pid_t pid;
+ pid_t tgid;
+};
+
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
@@ -215,6 +223,9 @@ struct amdgpu_vm {
/* Valid while the PD is reserved or fenced */
uint64_t pd_phys_addr;
+
+ /* Some basic info about the task */
+ struct amdgpu_task_info task_info;
};
struct amdgpu_vm_manager {
@@ -307,6 +318,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
@@ -317,4 +329,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+ struct amdgpu_task_info *task_info);
+
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index b6333f92ba45..9cfa8a9ada92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
}
/**
- * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
+ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
*
* @bo: &amdgpu_bo buffer object (must be in VRAM)
*
* Returns:
- * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
+ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
*/
-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_mem_reg *mem = &bo->tbo.mem;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
- u64 usage = 0;
+ u64 usage;
- if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
- return 0;
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+ return amdgpu_bo_size(bo);
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
- return amdgpu_bo_size(bo);
+ return 0;
- while (nodes && pages) {
- usage += nodes->size << PAGE_SHIFT;
- usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
- pages -= nodes->size;
- ++nodes;
- }
+ for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
+ usage += amdgpu_vram_mgr_vis_size(adev, nodes);
return usage;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 7fbad2f5f0bd..d2469453dca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -49,10 +49,10 @@
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
-MODULE_FIRMWARE("radeon/bonaire_smc.bin");
-MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -951,12 +951,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
else
pi->battery_state = false;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
- if (adev->pm.dpm.ac_power == false) {
+ if (adev->pm.ac_power == false) {
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk > max_limits->mclk)
ps->performance_levels[i].mclk = max_limits->mclk;
@@ -4078,7 +4078,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4127,7 +4127,7 @@ static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4160,7 +4160,7 @@ static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4191,7 +4191,7 @@ static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
const struct amdgpu_clock_and_voltage_limits *max_limits;
int i;
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -5815,7 +5815,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
@@ -5846,8 +5846,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
adev->pm.dpm.priv = pi;
pi->sys_pcie_mask =
- (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
- CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
@@ -6767,6 +6766,19 @@ static int ci_dpm_read_sensor(void *handle, int idx,
}
}
+static int ci_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ ci_dpm_powergate_uvd(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
@@ -6804,7 +6816,7 @@ static const struct amd_pm_funcs ci_dpm_funcs = {
.debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
.force_performance_level = &ci_dpm_force_performance_level,
.vblank_too_short = &ci_dpm_vblank_too_short,
- .powergate_uvd = &ci_dpm_powergate_uvd,
+ .set_powergating_by_smu = &ci_set_powergating_by_smu,
.set_fan_control_mode = &ci_dpm_set_fan_control_mode,
.get_fan_control_mode = &ci_dpm_get_fan_control_mode,
.set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 8ff4c60d1b59..78ab939ae5d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
- mdelay(100);
+ msleep(100);
/* linkctl */
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
@@ -2003,9 +2003,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
if (amdgpu_dpm == -1)
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
- else
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -2024,9 +2024,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
if (amdgpu_dpm == -1)
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
- else
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index a7576255cc30..d0fa2aac2388 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -54,16 +54,16 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(void *handle);
-MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
-MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
@@ -132,9 +132,9 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
if (err)
goto out;
@@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+ return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
}
/**
@@ -192,9 +191,8 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me],
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
(lower_32_bits(ring->wptr) << 2) & 0x3fffc);
}
@@ -248,7 +246,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
u32 ref_and_mask;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
else
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@@ -1290,8 +1288,10 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index ada241bfeee9..308f9f238bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -41,6 +41,8 @@
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1855,15 +1857,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2370,13 +2371,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v10_0_lock_cursor(crtc, true);
@@ -2737,14 +2739,14 @@ static int dce_v10_0_sw_init(void *handle)
return r;
}
- for (i = 8; i < 20; i += 2) {
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index a5b96eac3033..76dfb76f7900 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -41,6 +41,8 @@
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1897,15 +1899,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2449,13 +2450,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v11_0_lock_cursor(crtc, true);
@@ -2858,14 +2860,14 @@ static int dce_v11_0_sw_init(void *handle)
return r;
}
- for (i = 8; i < 20; i += 2) {
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 394cc1e8fe20..c9adc627305d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1811,15 +1811,14 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2263,13 +2262,14 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v6_0_lock_cursor(crtc, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c9b9ab8f1b05..50cd03beac7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1786,15 +1786,14 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2274,13 +2273,14 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put_unlocked(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v8_0_lock_cursor(crtc, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index dbf2ccd0c744..15257634a53a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -36,6 +36,7 @@
#include "dce_v10_0.h"
#include "dce_v11_0.h"
#include "dce_virtual.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
@@ -269,25 +270,18 @@ static int dce_virtual_early_init(void *handle)
static struct drm_encoder *
dce_virtual_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
return encoder;
}
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -378,7 +372,7 @@ static int dce_virtual_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
if (r)
return r;
@@ -634,7 +628,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
drm_connector_register(connector);
/* link them */
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index cd6bf291a853..de184a886057 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -44,30 +44,30 @@ static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
-MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
-MODULE_FIRMWARE("radeon/tahiti_me.bin");
-MODULE_FIRMWARE("radeon/tahiti_ce.bin");
-MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
-
-MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
-MODULE_FIRMWARE("radeon/pitcairn_me.bin");
-MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
-MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
-
-MODULE_FIRMWARE("radeon/verde_pfp.bin");
-MODULE_FIRMWARE("radeon/verde_me.bin");
-MODULE_FIRMWARE("radeon/verde_ce.bin");
-MODULE_FIRMWARE("radeon/verde_rlc.bin");
-
-MODULE_FIRMWARE("radeon/oland_pfp.bin");
-MODULE_FIRMWARE("radeon/oland_me.bin");
-MODULE_FIRMWARE("radeon/oland_ce.bin");
-MODULE_FIRMWARE("radeon/oland_rlc.bin");
-
-MODULE_FIRMWARE("radeon/hainan_pfp.bin");
-MODULE_FIRMWARE("radeon/hainan_me.bin");
-MODULE_FIRMWARE("radeon/hainan_ce.bin");
-MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
+MODULE_FIRMWARE("amdgpu/verde_me.bin");
+MODULE_FIRMWARE("amdgpu/verde_ce.bin");
+MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
+MODULE_FIRMWARE("amdgpu/oland_me.bin");
+MODULE_FIRMWARE("amdgpu/oland_ce.bin");
+MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
+MODULE_FIRMWARE("amdgpu/hainan_me.bin");
+MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
+MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
@@ -335,7 +335,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -346,7 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -357,7 +357,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -368,7 +368,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 42b6144c1fd5..95452c5a9df6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -57,36 +57,36 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
-MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
-MODULE_FIRMWARE("radeon/bonaire_me.bin");
-MODULE_FIRMWARE("radeon/bonaire_ce.bin");
-MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
-MODULE_FIRMWARE("radeon/bonaire_mec.bin");
-
-MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
-MODULE_FIRMWARE("radeon/hawaii_me.bin");
-MODULE_FIRMWARE("radeon/hawaii_ce.bin");
-MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
-MODULE_FIRMWARE("radeon/hawaii_mec.bin");
-
-MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
-MODULE_FIRMWARE("radeon/kaveri_me.bin");
-MODULE_FIRMWARE("radeon/kaveri_ce.bin");
-MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
-MODULE_FIRMWARE("radeon/kaveri_mec.bin");
-MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
-
-MODULE_FIRMWARE("radeon/kabini_pfp.bin");
-MODULE_FIRMWARE("radeon/kabini_me.bin");
-MODULE_FIRMWARE("radeon/kabini_ce.bin");
-MODULE_FIRMWARE("radeon/kabini_rlc.bin");
-MODULE_FIRMWARE("radeon/kabini_mec.bin");
-
-MODULE_FIRMWARE("radeon/mullins_pfp.bin");
-MODULE_FIRMWARE("radeon/mullins_me.bin");
-MODULE_FIRMWARE("radeon/mullins_ce.bin");
-MODULE_FIRMWARE("radeon/mullins_rlc.bin");
-MODULE_FIRMWARE("radeon/mullins_mec.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
+
+MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
+MODULE_FIRMWARE("amdgpu/kabini_me.bin");
+MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
+MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
+MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
+MODULE_FIRMWARE("amdgpu/mullins_me.bin");
+MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
+MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
+MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
@@ -925,7 +925,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -933,7 +933,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -941,7 +941,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -949,7 +949,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -958,7 +958,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -967,7 +967,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 818874b13c99..5cd45210113f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -51,6 +51,8 @@
#include "smu/smu_7_1_3_d.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
#define GFX8_NUM_GFX_RINGS 1
#define GFX8_MEC_HPD_SIZE 2048
@@ -704,6 +706,17 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
};
+
+static const char * const sq_edc_source_names[] = {
+ "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
+ "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
+ "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
+ "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
+ "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
+ "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
+ "SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
+};
+
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
@@ -866,26 +879,32 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- uint32_t scratch;
- uint32_t tmp = 0;
+
+ unsigned int index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
long r;
- r = amdgpu_gfx_scratch_get(adev, &scratch);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
- WREG32(scratch, 0xCAFEDEAD);
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
}
- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
- ib.ptr[2] = 0xDEADBEEF;
- ib.length_dw = 3;
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
@@ -900,20 +919,21 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
- tmp = RREG32(scratch);
+
+ tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF) {
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ DRM_ERROR("ib test on ring %d failed\n", ring->idx);
r = -EINVAL;
}
+
err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err1:
- amdgpu_gfx_scratch_free(adev, scratch);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1999,6 +2019,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
return 0;
}
+static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
+
static int gfx_v8_0_sw_init(void *handle)
{
int i, j, k, r, ring_id;
@@ -2027,27 +2049,43 @@ static int gfx_v8_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
+ /* Add CP EDC/ECC irq */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
+ /* SQ interrupts. */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
+ &adev->gfx.sq_irq);
+ if (r) {
+ DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
+ return r;
+ }
+
+ INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
+
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
gfx_v8_0_scratch_init(adev);
@@ -5111,6 +5149,10 @@ static int gfx_v8_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+
+ amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
+
/* disable KCQ to avoid CPC touch memory not valid anymore */
for (i = 0; i < adev->gfx.num_compute_rings; i++)
gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
@@ -5542,9 +5584,19 @@ static int gfx_v8_0_late_init(void *handle)
if (r)
return r;
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r) {
+ DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
+ return r;
+ }
+
+ r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
+ if (r) {
+ DRM_ERROR(
+ "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
+ r);
+ return r;
+ }
return 0;
}
@@ -5552,14 +5604,12 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- if ((adev->asic_type == CHIP_POLARIS11) ||
+ if (((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM))
+ (adev->asic_type == CHIP_VEGAM)) &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
/* Send msg to SMU via Powerplay */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ?
- AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
@@ -6787,6 +6837,77 @@ static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ int enable_flag;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ enable_flag = 0;
+ break;
+
+ case AMDGPU_IRQ_STATE_ENABLE:
+ enable_flag = 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+ WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+ enable_flag);
+
+ return 0;
+}
+
+static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ int enable_flag;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ enable_flag = 1;
+ break;
+
+ case AMDGPU_IRQ_STATE_ENABLE:
+ enable_flag = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
+ enable_flag);
+
+ return 0;
+}
+
static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -6837,6 +6958,114 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("CP EDC/ECC error detected.");
+ return 0;
+}
+
+static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
+{
+ u32 enc, se_id, sh_id, cu_id;
+ char type[20];
+ int sq_edc_source = -1;
+
+ enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
+ se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
+
+ switch (enc) {
+ case 0:
+ DRM_INFO("SQ general purpose intr detected:"
+ "se_id %d, immed_overflow %d, host_reg_overflow %d,"
+ "host_cmd_overflow %d, cmd_timestamp %d,"
+ "reg_timestamp %d, thread_trace_buff_full %d,"
+ "wlt %d, thread_trace %d.\n",
+ se_id,
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
+ );
+ break;
+ case 1:
+ case 2:
+
+ cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
+ sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
+
+ /*
+ * This function can be called either directly from ISR
+ * or from BH in which case we can access SQ_EDC_INFO
+ * instance
+ */
+ if (in_task()) {
+ mutex_lock(&adev->grbm_idx_mutex);
+ gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
+
+ sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
+
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (enc == 1)
+ sprintf(type, "instruction intr");
+ else
+ sprintf(type, "EDC/ECC error");
+
+ DRM_INFO(
+ "SQ %s detected: "
+ "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
+ "trap %s, sq_ed_info.source %s.\n",
+ type, se_id, sh_id, cu_id,
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
+ REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
+ (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
+ );
+ break;
+ default:
+ DRM_ERROR("SQ invalid encoding type\n.");
+ }
+}
+
+static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
+{
+
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
+ struct sq_work *sq_work = container_of(work, struct sq_work, work);
+
+ gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
+}
+
+static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned ih_data = entry->src_data[0];
+
+ /*
+ * Try to submit work so SQ_EDC_INFO can be accessed from
+ * BH. If previous work submission hasn't finished yet
+ * just print whatever info is possible directly from the ISR.
+ */
+ if (work_pending(&adev->gfx.sq_work.work)) {
+ gfx_v8_0_parse_sq_irq(adev, ih_data);
+ } else {
+ adev->gfx.sq_work.ih_data = ih_data;
+ schedule_work(&adev->gfx.sq_work.work);
+ }
+
+ return 0;
+}
+
static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
@@ -7037,6 +7266,16 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
.process = gfx_v8_0_kiq_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
+ .set = gfx_v8_0_set_cp_ecc_int_state,
+ .process = gfx_v8_0_cp_ecc_error_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
+ .set = gfx_v8_0_set_sq_int_state,
+ .process = gfx_v8_0_sq_irq,
+};
+
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -7050,6 +7289,12 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
+
+ adev->gfx.cp_ecc_error_irq.num_types = 1;
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
+
+ adev->gfx.sq_irq.num_types = 1;
+ adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
}
static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index a69153435ea7..ef00d14f8645 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -38,6 +38,8 @@
#include "clearstate_gfx9.h"
#include "v9_structs.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -102,11 +104,22 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -648,7 +661,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- if (adev->gfx.rlc.is_rlc_v2_1) {
+ if (adev->gfx.rlc.is_rlc_v2_1 &&
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
+ adev->gfx.rlc.save_restore_list_srm_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
info->fw = adev->gfx.rlc_fw;
@@ -943,6 +959,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v9_0_get_csb_buffer(adev, dst_ptr);
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
@@ -971,6 +988,39 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
+static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (!r)
+ adev->gfx.rlc.clear_state_gpu_addr =
+ amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
+
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ return r;
+}
+
+static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!adev->gfx.rlc.clear_state_obj)
+ return;
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
+ if (likely(r == 0)) {
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+}
+
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -1451,23 +1501,23 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -2148,8 +2198,16 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
{
- if (!adev->gfx.rlc.is_rlc_v2_1)
- return;
+ gfx_v9_0_init_csb(adev);
+
+ /*
+ * Rlc save restore list is workable since v2_1.
+ * And it's needed by gfxoff feature.
+ */
+ if (adev->gfx.rlc.is_rlc_v2_1) {
+ gfx_v9_1_init_rlc_save_restore_list(adev);
+ gfx_v9_0_enable_save_restore_machine(adev);
+ }
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
@@ -2157,10 +2215,6 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS)) {
- gfx_v9_0_init_csb(adev);
- gfx_v9_1_init_rlc_save_restore_list(adev);
- gfx_v9_0_enable_save_restore_machine(adev);
-
WREG32(mmRLC_JUMP_TABLE_RESTORE,
adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v9_0_init_gfx_power_gating(adev);
@@ -2252,9 +2306,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- /* disable PG */
- WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
-
gfx_v9_0_rlc_reset(adev);
gfx_v9_0_init_pg(adev);
@@ -3116,6 +3167,10 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_gpu_init(adev);
+ r = gfx_v9_0_csb_vram_pin(adev);
+ if (r)
+ return r;
+
r = gfx_v9_0_rlc_resume(adev);
if (r)
return r;
@@ -3224,6 +3279,8 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_cp_enable(adev, false);
gfx_v9_0_rlc_stop(adev);
+ gfx_v9_0_csb_vram_unpin(adev);
+
return 0;
}
@@ -3433,7 +3490,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
@@ -3510,8 +3567,11 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+
+ if (adev->asic_type != CHIP_VEGA12)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
+
+ data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
@@ -3541,11 +3601,15 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
} else {
/* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
+
+ if (adev->asic_type != CHIP_VEGA12)
+ data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
+
+ data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
@@ -3581,9 +3645,11 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
/* update CGCG and CGLS override bits */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* enable 3Dcgcg FSM(0x0020003f) */
+
+ /* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3630,9 +3696,10 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* enable cgcg FSM(0x0020003F) */
+ /* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
- data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+
+ data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3714,6 +3781,15 @@ static int gfx_v9_0_set_powergating_state(void *handle,
/* update mgcg state */
gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
+
+ /* set gfx off through smu */
+ if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
+ break;
+ case CHIP_VEGA12:
+ /* set gfx off through smu */
+ if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 79f9ac29019b..75317f283c69 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -41,11 +41,11 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v6_0_wait_for_idle(void *handle);
-MODULE_FIRMWARE("radeon/tahiti_mc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
-MODULE_FIRMWARE("radeon/verde_mc.bin");
-MODULE_FIRMWARE("radeon/oland_mc.bin");
-MODULE_FIRMWARE("radeon/si58_mc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
+MODULE_FIRMWARE("amdgpu/verde_mc.bin");
+MODULE_FIRMWARE("amdgpu/oland_mc.bin");
+MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -134,9 +134,9 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
is_58_fw = true;
if (is_58_fw)
- snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 7147bfe25a23..36dc367c4b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -28,6 +28,7 @@
#include "cik.h"
#include "gmc_v7_0.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
#include "bif/bif_4_1_d.h"
#include "bif/bif_4_1_sh_mask.h"
@@ -43,12 +44,14 @@
#include "amdgpu_atombios.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle);
-MODULE_FIRMWARE("radeon/bonaire_mc.bin");
-MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
static const u32 golden_settings_iceland_a11[] =
@@ -147,10 +150,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- if (adev->asic_type == CHIP_TOPAZ)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err)
@@ -999,11 +999,11 @@ static int gmc_v7_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1079,6 +1079,12 @@ static int gmc_v7_0_sw_init(void *handle)
adev->vm_manager.vram_base_offset = 0;
}
+ adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
return 0;
}
@@ -1088,6 +1094,7 @@ static int gmc_v7_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
gmc_v7_0_gart_fini(adev);
amdgpu_bo_fini(adev);
release_firmware(adev->gmc.fw);
@@ -1277,7 +1284,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u32 addr, status, mc_client;
+ u32 addr, status, mc_client, vmid;
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
@@ -1302,6 +1309,29 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
entry->pasid);
}
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+ && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
+
+ info->vmid = vmid;
+ info->mc_id = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
+ info->status = status;
+ info->page_addr = addr;
+ info->prot_valid = protections & 0x7 ? true : false;
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1edbe6b477b5..70fc97b59b4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "gmc_v8_0.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
@@ -44,6 +45,7 @@
#include "amdgpu_atombios.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1101,11 +1103,11 @@ static int gmc_v8_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1181,6 +1183,12 @@ static int gmc_v8_0_sw_init(void *handle)
adev->vm_manager.vram_base_offset = 0;
}
+ adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
return 0;
}
@@ -1190,6 +1198,7 @@ static int gmc_v8_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
gmc_v8_0_gart_fini(adev);
amdgpu_bo_fini(adev);
release_firmware(adev->gmc.fw);
@@ -1425,7 +1434,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u32 addr, status, mc_client;
+ u32 addr, status, mc_client, vmid;
if (amdgpu_sriov_vf(adev)) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
@@ -1447,8 +1456,13 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
gmc_v8_0_set_fault_enable_default(adev, false);
if (printk_ratelimit()) {
- dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
- entry->src_id, entry->src_data[0]);
+ struct amdgpu_task_info task_info = { 0 };
+
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
+ entry->src_id, entry->src_data[0], task_info.process_name,
+ task_info.tgid, task_info.task_name, task_info.pid);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1457,6 +1471,29 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
entry->pasid);
}
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+ && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
+
+ info->vmid = vmid;
+ info->mc_id = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
+ info->status = status;
+ info->page_addr = addr;
+ info->prot_valid = protections & 0x7 ? true : false;
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3c0a85d4e4ab..399a5db27649 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -43,6 +43,8 @@
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
+#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
@@ -257,12 +259,17 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
if (printk_ratelimit()) {
+ struct amdgpu_task_info task_info = { 0 };
+
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
dev_err(adev->dev,
- "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
+ "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
entry->src_id, entry->ring_id, entry->vmid,
- entry->pasid);
- dev_err(adev->dev, " at page 0x%016llx from %d\n",
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+ dev_err(adev->dev, " at address 0x%016llx from %d\n",
addr, entry->client_id);
if (!amdgpu_sriov_vf(adev))
dev_err(adev->dev,
@@ -872,9 +879,9 @@ static int gmc_v9_0_sw_init(void *handle)
}
/* This interrupt is VMC page fault.*/
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7a1e77c93bf1..3f57f6463dc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1921,7 +1921,7 @@ static int kv_dpm_set_power_state(void *handle)
int ret;
if (pi->bapm_enable) {
- ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power);
+ ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
if (ret) {
DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
return ret;
@@ -3306,6 +3306,19 @@ static int kv_dpm_read_sensor(void *handle, int idx,
}
}
+static int kv_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ kv_dpm_powergate_uvd(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
@@ -3342,7 +3355,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = {
.print_power_state = &kv_dpm_print_power_state,
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
.force_performance_level = &kv_dpm_force_performance_level,
- .powergate_uvd = &kv_dpm_powergate_uvd,
+ .set_powergating_by_smu = kv_set_powergating_by_smu,
.enable_bapm = &kv_dpm_enable_bapm,
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.check_state_equal = kv_check_state_equal,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3d53c4413f13..e70a0d4d6db4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -471,8 +471,8 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
RENG_EXECUTE_ON_REG_UPDATE, 1);
WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
- if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu)
- amdgpu_dpm_set_mmhub_powergating_by_smu(adev);
+ if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
} else {
pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index c7190c39c4f5..15ae4bc9c072 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -44,6 +44,8 @@
#include "iceland_sdma_pkt_open.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -202,8 +204,7 @@ static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
- u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+ u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
return wptr;
}
@@ -218,9 +219,8 @@ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
}
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@@ -273,7 +273,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -898,7 +898,7 @@ static int sdma_v2_4_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -910,7 +910,7 @@ static int sdma_v2_4_sw_init(void *handle)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1213,8 +1213,10 @@ static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index aa9ab299fd32..1e07ff274d73 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -44,6 +44,8 @@
#include "tonga_sdma_pkt_open.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -365,9 +367,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
/* XXX check if swapping is necessary on BE */
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
} else {
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
- wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+ wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
}
return wptr;
@@ -394,9 +394,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
} else {
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
}
}
@@ -450,7 +448,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -1179,7 +1177,7 @@ static int sdma_v3_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -1191,7 +1189,7 @@ static int sdma_v3_0_sw_init(void *handle)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1655,8 +1653,10 @@ static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ca53b3fba422..e7ca4623cfb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -38,6 +38,9 @@
#include "soc15.h"
#include "vega10_sdma_pkt_open.h"
+#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
+#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
@@ -296,13 +299,12 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
u32 lowbit, highbit;
- int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
+ lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
+ highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
- me, highbit, lowbit);
+ ring->me, highbit, lowbit);
wptr = highbit;
wptr = wptr << 32;
wptr |= lowbit;
@@ -339,17 +341,15 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- me,
+ ring->me,
lower_32_bits(ring->wptr << 2),
- me,
+ ring->me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
}
}
@@ -430,7 +430,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
else
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
@@ -1228,13 +1228,13 @@ static int sdma_v4_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -1651,8 +1651,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 5c97a3671726..db327b412562 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -56,16 +56,16 @@
#define BIOS_SCRATCH_4 0x5cd
-MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
-MODULE_FIRMWARE("radeon/verde_smc.bin");
-MODULE_FIRMWARE("radeon/verde_k_smc.bin");
-MODULE_FIRMWARE("radeon/oland_smc.bin");
-MODULE_FIRMWARE("radeon/oland_k_smc.bin");
-MODULE_FIRMWARE("radeon/hainan_smc.bin");
-MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
-MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/verde_smc.bin");
+MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/oland_smc.bin");
+MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
static const struct amd_pm_funcs si_dpm_funcs;
@@ -3480,7 +3480,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
disable_sclk_switching = true;
}
- if (adev->pm.dpm.ac_power)
+ if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -3489,7 +3489,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
}
- if (adev->pm.dpm.ac_power == false) {
+ if (adev->pm.ac_power == false) {
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk > max_limits->mclk)
ps->performance_levels[i].mclk = max_limits->mclk;
@@ -7318,8 +7318,7 @@ static int si_dpm_init(struct amdgpu_device *adev)
pi = &eg_pi->rv7xx;
si_pi->sys_pcie_mask =
- (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
- CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
@@ -7667,7 +7666,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index 8dc29107228f..edfe50821cd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -53,6 +53,29 @@
#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+#define PACKETJ_CONDITION_CHECK0 0
+#define PACKETJ_CONDITION_CHECK1 1
+#define PACKETJ_CONDITION_CHECK2 2
+#define PACKETJ_CONDITION_CHECK3 3
+#define PACKETJ_CONDITION_CHECK4 4
+#define PACKETJ_CONDITION_CHECK5 5
+#define PACKETJ_CONDITION_CHECK6 6
+#define PACKETJ_CONDITION_CHECK7 7
+
+#define PACKETJ_TYPE0 0
+#define PACKETJ_TYPE1 1
+#define PACKETJ_TYPE2 2
+#define PACKETJ_TYPE3 3
+#define PACKETJ_TYPE4 4
+#define PACKETJ_TYPE5 5
+#define PACKETJ_TYPE6 6
+#define PACKETJ_TYPE7 7
+
+#define PACKETJ(reg, r, cond, type) ((reg & 0x3FFFF) | \
+ ((r & 0x3F) << 18) | \
+ ((cond & 0xF) << 24) | \
+ ((type & 0xF) << 28))
+
/* Packet 3 types */
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 341ee6d55ce8..aeaa1ca46a99 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -35,6 +35,7 @@
#include "vi.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -104,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
int r;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index bfddf97dd13e..598dbeaba636 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -36,6 +36,7 @@
#include "bif/bif_5_1_d.h"
#include "gmc/gmc_8_1_d.h"
#include "vi.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
/* Polaris10/11/12 firmware version */
#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
@@ -247,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -311,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
@@ -400,14 +393,14 @@ static int uvd_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
/* UVD ENC TRAP */
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
if (r)
return r;
}
@@ -425,16 +418,6 @@ static int uvd_v6_0_sw_init(void *handle)
adev->uvd.num_enc_rings = 0;
DRM_INFO("UVD ENC is disabled\n");
- } else {
- struct drm_sched_rq *rq;
- ring = &adev->uvd.inst->ring_enc[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
- rq, NULL);
- if (r) {
- DRM_ERROR("Failed setting up UVD ENC run queue.\n");
- return r;
- }
}
r = amdgpu_uvd_resume(adev);
@@ -470,8 +453,6 @@ static int uvd_v6_0_sw_fini(void *handle)
return r;
if (uvd_v6_0_enc_support(adev)) {
- drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
-
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
}
@@ -1569,7 +1550,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
- .nop = PACKET0(mmUVD_NO_OP, 0),
.support_64bit_ptrs = false,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
@@ -1587,7 +1567,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = uvd_v6_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 57d32f21b3a6..5fab3560a71d 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -39,6 +39,13 @@
#include "hdp/hdp_4_0_offset.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
+#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
+
+#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
+#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
+//UVD_PG0_CC_UVD_HARVESTING
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
#define UVD7_MAX_HW_INSTANCES_VEGA20 2
@@ -249,12 +256,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -312,19 +317,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ r = amdgpu_job_submit(job, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
@@ -377,10 +376,25 @@ error:
static int uvd_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_VEGA20)
+
+ if (adev->asic_type == CHIP_VEGA20) {
+ u32 harvest;
+ int i;
+
adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
- else
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
+ if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
+ adev->uvd.harvest_config |= 1 << i;
+ }
+ }
+ if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
+ AMDGPU_UVD_HARVEST_UVD1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
+ } else {
adev->uvd.num_uvd_inst = 1;
+ }
if (amdgpu_sriov_vf(adev))
adev->uvd.num_enc_rings = 1;
@@ -396,19 +410,20 @@ static int uvd_v7_0_early_init(void *handle)
static int uvd_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
int i, j, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq);
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
if (r)
return r;
/* UVD ENC TRAP */
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq);
+ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
if (r)
return r;
}
@@ -428,22 +443,13 @@ static int uvd_v7_0_sw_init(void *handle)
DRM_INFO("PSP loading UVD firmware\n");
}
- for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
- ring = &adev->uvd.inst[j].ring_enc[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
- rq, NULL);
- if (r) {
- DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
- return r;
- }
- }
-
r = amdgpu_uvd_resume(adev);
if (r)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
sprintf(ring->name, "uvd<%d>", j);
@@ -491,8 +497,8 @@ static int uvd_v7_0_sw_fini(void *handle)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
- drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
-
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
@@ -521,6 +527,8 @@ static int uvd_v7_0_hw_init(void *handle)
goto done;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
ring = &adev->uvd.inst[j].ring;
if (!amdgpu_sriov_vf(adev)) {
@@ -600,8 +608,11 @@ static int uvd_v7_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].ring.ready = false;
+ }
return 0;
}
@@ -644,6 +655,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
@@ -716,6 +729,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
adev->uvd.inst[i].ring_enc[0].wptr = 0;
@@ -772,6 +787,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
init_table += header->uvd_table_offset;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
ring = &adev->uvd.inst[i].ring;
ring->wptr = 0;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
@@ -911,6 +928,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
int i, j, k, r;
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+ if (adev->uvd.harvest_config & (1 << k))
+ continue;
/* disable DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
@@ -923,6 +942,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
uvd_v7_0_mc_resume(adev);
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+ if (adev->uvd.harvest_config & (1 << k))
+ continue;
ring = &adev->uvd.inst[k].ring;
/* disable clock gating */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
@@ -1090,6 +1111,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
uint8_t i = 0;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
/* force RBC into idle state */
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
@@ -1227,6 +1250,34 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
}
/**
+ * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
+ *
+ * @p: the CS parser with the IBs
+ * @ib_idx: which IB to patch
+ *
+ */
+static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ uint32_t ib_idx)
+{
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ unsigned i;
+
+ /* No patching necessary for the first instance */
+ if (!p->ring->me)
+ return 0;
+
+ for (i = 0; i < ib->length_dw; i += 2) {
+ uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+
+ reg -= p->adev->reg_offset[UVD_HWIP][0][1];
+ reg += p->adev->reg_offset[UVD_HWIP][1][1];
+
+ amdgpu_set_ib_value(p, ib_idx, i, reg);
+ }
+ return 0;
+}
+
+/**
* uvd_v7_0_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
@@ -1718,6 +1769,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
+ .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
.emit_frame_size =
6 + /* hdp invalidate */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
@@ -1777,6 +1829,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
adev->uvd.inst[i].ring.me = i;
DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
@@ -1788,6 +1842,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i, j;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
adev->uvd.inst[j].ring_enc[i].me = j;
@@ -1807,6 +1863,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 47f70827195b..d48e877b682e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -56,7 +56,7 @@ static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(mmVCE_RB_RPTR);
else
return RREG32(mmVCE_RB_RPTR2);
@@ -73,7 +73,7 @@ static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(mmVCE_RB_WPTR);
else
return RREG32(mmVCE_RB_WPTR2);
@@ -90,7 +90,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
@@ -627,8 +627,10 @@ static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
+ adev->vce.ring[i].me = i;
+ }
}
static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index a71b97519cc0..cc6ce6cc03f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -39,6 +39,7 @@
#include "smu/smu_7_1_2_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
@@ -86,9 +87,9 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
v = RREG32(mmVCE_RB_RPTR);
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
v = RREG32(mmVCE_RB_RPTR2);
else
v = RREG32(mmVCE_RB_RPTR3);
@@ -118,9 +119,9 @@ static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
v = RREG32(mmVCE_RB_WPTR);
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
v = RREG32(mmVCE_RB_WPTR2);
else
v = RREG32(mmVCE_RB_WPTR3);
@@ -149,9 +150,9 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
@@ -422,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
int r, i;
/* VCE */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
if (r)
return r;
@@ -942,12 +943,16 @@ static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
if (adev->asic_type >= CHIP_STONEY) {
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in VM mode\n");
} else {
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in physical mode\n");
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 8fd1b742985a..65f8860169e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -39,6 +39,8 @@
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
+#include "ivsrcid/vce/irqsrcs_vce_4_0.h"
+
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V4_0_FW_SIZE (384 * 1024)
@@ -60,9 +62,9 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
@@ -82,9 +84,9 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
@@ -108,10 +110,10 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
return;
}
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
lower_32_bits(ring->wptr));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
lower_32_bits(ring->wptr));
else
@@ -1088,8 +1090,10 @@ static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in VM mode\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 29684c3ea4ef..2ce91a748c40 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -35,10 +35,14 @@
#include "mmhub/mmhub_9_1_offset.h"
#include "mmhub/mmhub_9_1_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
+
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
/**
* vcn_v1_0_early_init - set function pointers
@@ -55,6 +59,7 @@ static int vcn_v1_0_early_init(void *handle)
vcn_v1_0_set_dec_ring_funcs(adev);
vcn_v1_0_set_enc_ring_funcs(adev);
+ vcn_v1_0_set_jpeg_ring_funcs(adev);
vcn_v1_0_set_irq_funcs(adev);
return 0;
@@ -74,18 +79,23 @@ static int vcn_v1_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCN DEC TRAP */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
if (r)
return r;
/* VCN ENC TRAP */
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
&adev->vcn.irq);
if (r)
return r;
}
+ /* VCN JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
+ if (r)
+ return r;
+
r = amdgpu_vcn_sw_init(adev);
if (r)
return r;
@@ -108,6 +118,12 @@ static int vcn_v1_0_sw_init(void *handle)
return r;
}
+ ring = &adev->vcn.ring_jpeg;
+ sprintf(ring->name, "vcn_jpeg");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+ if (r)
+ return r;
+
return r;
}
@@ -162,6 +178,14 @@ static int vcn_v1_0_hw_init(void *handle)
}
}
+ ring = &adev->vcn.ring_jpeg;
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully.\n");
@@ -578,12 +602,12 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
/* disable byte swapping */
lmi_swap_cntl = 0;
- vcn_v1_0_mc_resume(adev);
-
vcn_1_0_disable_static_power_gating(adev);
/* disable clock gating */
vcn_v1_0_disable_clock_gating(adev);
+ vcn_v1_0_mc_resume(adev);
+
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
@@ -729,6 +753,22 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ ring = &adev->vcn.ring_jpeg;
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+
+ /* initialize wptr */
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+
+ /* copy patch commands to the jpeg ring */
+ vcn_v1_0_jpeg_ring_set_patch_ring(ring,
+ (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+
return 0;
}
@@ -1126,6 +1166,383 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
+
+/**
+ * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x68e04);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0xffffffff);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x3fbc);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vmid, bool ctx_switch)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, ib->length_dw);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+ amdgpu_ring_write(ring, 0x2);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val,
+ uint32_t mask)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, val);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE3));
+ }
+ amdgpu_ring_write(ring, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+ uint32_t data0, data1, mask;
+
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+ /* wait for register write */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring,
+ PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+ }
+ amdgpu_ring_write(ring, val);
+}
+
+static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ WARN_ON(ring->wptr % 2 || count % 2);
+
+ for (i = 0; i < count / 2; i++) {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+ ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[(*ptr)++] = 0;
+ ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
+ } else {
+ ring->ring[(*ptr)++] = reg_offset;
+ ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
+ }
+ ring->ring[(*ptr)++] = val;
+}
+
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ uint32_t reg, reg_offset, val, mask, i;
+
+ // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
+ reg_offset = (reg << 2);
+ val = lower_32_bits(ring->gpu_addr);
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
+ reg_offset = (reg << 2);
+ val = upper_32_bits(ring->gpu_addr);
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 3rd to 5th: issue MEM_READ commands
+ for (i = 0; i <= 2; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
+ ring->ring[ptr++] = 0;
+ }
+
+ // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x13;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 7th: program mmUVD_JRBC_RB_REF_DATA
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x1;
+ mask = 0x1;
+
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = 0x01400200;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
+ ring->ring[ptr++] = val;
+ ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+ if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+ ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+ ring->ring[ptr++] = 0;
+ ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
+ } else {
+ ring->ring[ptr++] = reg_offset;
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
+ }
+ ring->ring[ptr++] = mask;
+
+ //9th to 21st: insert no-op
+ for (i = 0; i <= 12; i++) {
+ ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ring->ring[ptr++] = 0;
+ }
+
+ //22nd: reset mmUVD_JRBC_RB_RPTR
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
+ reg_offset = (reg << 2);
+ val = 0;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+ //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
+ reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+ reg_offset = (reg << 2);
+ val = 0x12;
+ vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+}
+
static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1150,6 +1567,9 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
case 120:
amdgpu_fence_process(&adev->vcn.ring_enc[1]);
break;
+ case 126:
+ amdgpu_fence_process(&adev->vcn.ring_jpeg);
+ break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -1273,6 +1693,39 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .nop = PACKET0(0x81ff, 0),
+ .support_64bit_ptrs = false,
+ .vmhub = AMDGPU_MMHUB,
+ .extra_dw = 64,
+ .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
+ .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
+ .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
+ .emit_frame_size =
+ 6 + 6 + /* hdp invalidate / flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
+ 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
+ 6,
+ .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */
+ .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
+ .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
+ .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
+ .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
+ .insert_nop = vcn_v1_0_jpeg_ring_nop,
+ .insert_start = vcn_v1_0_jpeg_ring_insert_start,
+ .insert_end = vcn_v1_0_jpeg_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
+ .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
+};
+
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
@@ -1289,6 +1742,12 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
DRM_INFO("VCN encode is enabled in VM mode\n");
}
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
+ DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
+}
+
static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
.set = vcn_v1_0_set_interrupt_state,
.process = vcn_v1_0_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 45aafca7f315..c5c9b2bc190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -51,6 +51,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 4ac1288ab7df..42c8ad105b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1363,11 +1363,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
- pp_support_state = AMD_CG_SUPPORT_MC_LS;
+ pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
- pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
+ pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
@@ -1382,11 +1382,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
- pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
+ pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
- pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
+ pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
@@ -1401,11 +1401,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
- pp_support_state = AMD_CG_SUPPORT_HDP_LS;
+ pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
- pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
+ pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 49df6c791cfc..5d2475d5392c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -25,12 +25,39 @@
#include "cik_int.h"
static bool cik_event_interrupt_isr(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry)
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
{
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
+ const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
unsigned int vmid, pasid;
+ /* This workaround is due to HW/FW limitation on Hawaii that
+ * VMID and PASID are not written into ih_ring_entry
+ */
+ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+ dev->device_info->asic_family == CHIP_HAWAII) {
+ struct cik_ih_ring_entry *tmp_ihre =
+ (struct cik_ih_ring_entry *)patched_ihre;
+
+ *patched_flag = true;
+ *tmp_ihre = *ihre;
+
+ vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
+ pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+
+ tmp_ihre->ring_id &= 0x000000ff;
+ tmp_ihre->ring_id |= vmid << 8;
+ tmp_ihre->ring_id |= pasid << 16;
+
+ return (pasid != 0) &&
+ vmid >= dev->vm_info.first_vmid_kfd &&
+ vmid <= dev->vm_info.last_vmid_kfd;
+ }
+
/* Only handle interrupts from KFD VMIDs */
vmid = (ihre->ring_id & 0x0000ff00) >> 8;
if (vmid < dev->vm_info.first_vmid_kfd ||
@@ -48,18 +75,19 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
- ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE;
+ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+ ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT;
}
static void cik_event_interrupt_wq(struct kfd_dev *dev,
const uint32_t *ih_ring_entry)
{
- unsigned int pasid;
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
-
- pasid = (ihre->ring_id & 0xffff0000) >> 16;
+ unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
+ unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
return;
@@ -72,6 +100,22 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
+ else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_vm_fault_info info;
+
+ kfd_process_vm_fault(dev->dqm, pasid);
+
+ memset(&info, 0, sizeof(info));
+ dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+ if (!info.page_addr && !info.status)
+ return;
+
+ if (info.vmid == vmid)
+ kfd_signal_vm_fault_event(dev, pasid, &info);
+ else
+ kfd_signal_vm_fault_event(dev, pasid, NULL);
+ }
}
const struct kfd_event_interrupt_class event_interrupt_class_cik = {
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
index 109298b9d507..76f8677a7926 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
@@ -20,8 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef HSA_RADEON_CIK_INT_H_INCLUDED
-#define HSA_RADEON_CIK_INT_H_INCLUDED
+#ifndef CIK_INT_H_INCLUDED
+#define CIK_INT_H_INCLUDED
#include <linux/types.h>
@@ -34,9 +34,10 @@ struct cik_ih_ring_entry {
#define CIK_INTSRC_CP_END_OF_PIPE 0xB5
#define CIK_INTSRC_CP_BAD_OPCODE 0xB7
-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
#define CIK_INTSRC_SDMA_TRAP 0xE0
#define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
+#define CIK_INTSRC_GFX_PAGE_INV_FAULT 0x92
+#define CIK_INTSRC_GFX_MEM_PROT_FAULT 0x93
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index f68aef02fc1f..3621efbd5759 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -21,18 +21,21 @@
*/
static const uint32_t cwsr_trap_gfx8_hex[] = {
- 0xbf820001, 0xbf820125,
+ 0xbf820001, 0xbf82012b,
0xb8f4f802, 0x89748674,
0xb8f5f803, 0x8675ff75,
- 0x00000400, 0xbf850011,
+ 0x00000400, 0xbf850017,
0xc00a1e37, 0x00000000,
0xbf8c007f, 0x87777978,
- 0xbf840002, 0xb974f802,
- 0xbe801d78, 0xb8f5f803,
- 0x8675ff75, 0x000001ff,
- 0xbf850002, 0x80708470,
- 0x82718071, 0x8671ff71,
- 0x0000ffff, 0xb974f802,
+ 0xbf840005, 0x8f728374,
+ 0xb972e0c2, 0xbf800002,
+ 0xb9740002, 0xbe801d78,
+ 0xb8f5f803, 0x8675ff75,
+ 0x000001ff, 0xbf850002,
+ 0x80708470, 0x82718071,
+ 0x8671ff71, 0x0000ffff,
+ 0x8f728374, 0xb972e0c2,
+ 0xbf800002, 0xb9740002,
0xbe801f70, 0xb8f5f803,
0x8675ff75, 0x00000100,
0xbf840006, 0xbefa0080,
@@ -168,7 +171,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x807c847c, 0x806eff6e,
0x00000400, 0xbf0a757c,
0xbf85ffef, 0xbf9c0000,
- 0xbf8200ca, 0xbef8007e,
+ 0xbf8200cd, 0xbef8007e,
0x8679ff7f, 0x0000ffff,
0x8779ff79, 0x00040000,
0xbefa0080, 0xbefb00ff,
@@ -268,16 +271,18 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x8f739773, 0xb976f807,
0x8671ff71, 0x0000ffff,
0x86fe7e7e, 0x86ea6a6a,
- 0xb974f802, 0xbf8a0000,
- 0x95807370, 0xbf810000,
+ 0x8f768374, 0xb976e0c2,
+ 0xbf800002, 0xb9740002,
+ 0xbf8a0000, 0x95807370,
+ 0xbf810000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf82015a,
+ 0xbf820001, 0xbf82015d,
0xb8f8f802, 0x89788678,
0xb8f1f803, 0x866eff71,
- 0x00000400, 0xbf850034,
+ 0x00000400, 0xbf850037,
0x866eff71, 0x00000800,
0xbf850003, 0x866eff71,
0x00000100, 0xbf840008,
@@ -303,258 +308,261 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x8f6e8b77, 0x866eff6e,
0x001f8000, 0xb96ef807,
0x86fe7e7e, 0x86ea6a6a,
- 0xb978f802, 0xbe801f6c,
- 0x866dff6d, 0x0000ffff,
- 0xbef00080, 0xb9700283,
- 0xb8f02407, 0x8e709c70,
- 0x876d706d, 0xb8f003c7,
- 0x8e709b70, 0x876d706d,
- 0xb8f0f807, 0x8670ff70,
- 0x00007fff, 0xb970f807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x87708478, 0xb970f802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8f11605,
- 0x80718171, 0x8e718671,
- 0x80707170, 0x80707e70,
- 0x8271807f, 0x8671ff71,
- 0x0000ffff, 0xc0471cb8,
- 0x00000040, 0xbf8cc07f,
- 0xc04b1d38, 0x00000048,
- 0xbf8cc07f, 0xc0431e78,
- 0x00000058, 0xbf8cc07f,
- 0xc0471eb8, 0x0000005c,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x8670ff7f,
- 0x08000000, 0x8f708370,
- 0x87777077, 0x8670ff7f,
- 0x70000000, 0x8f708170,
- 0x87777077, 0xbefb007c,
- 0xbefa0080, 0xb8fa2a05,
- 0x807a817a, 0x8e7a8a7a,
- 0xb8f01605, 0x80708170,
- 0x8e708670, 0x807a707a,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xbefe007c,
- 0xbefc007a, 0xc0611efa,
- 0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
+ 0x8f6e8378, 0xb96ee0c2,
+ 0xbf800002, 0xb9780002,
+ 0xbe801f6c, 0x866dff6d,
+ 0x0000ffff, 0xbef00080,
+ 0xb9700283, 0xb8f02407,
+ 0x8e709c70, 0x876d706d,
+ 0xb8f003c7, 0x8e709b70,
+ 0x876d706d, 0xb8f0f807,
+ 0x8670ff70, 0x00007fff,
+ 0xb970f807, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x87708478,
+ 0xb970f802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8f11605, 0x80718171,
+ 0x8e718671, 0x80707170,
+ 0x80707e70, 0x8271807f,
+ 0x8671ff71, 0x0000ffff,
+ 0xc0471cb8, 0x00000040,
+ 0xbf8cc07f, 0xc04b1d38,
+ 0x00000048, 0xbf8cc07f,
+ 0xc0431e78, 0x00000058,
+ 0xbf8cc07f, 0xc0471eb8,
+ 0x0000005c, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x8670ff7f, 0x08000000,
+ 0x8f708370, 0x87777077,
+ 0x8670ff7f, 0x70000000,
+ 0x8f708170, 0x87777077,
+ 0xbefb007c, 0xbefa0080,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8f01605,
+ 0x80708170, 0x8e708670,
+ 0x807a707a, 0xbef60084,
+ 0xbef600ff, 0x01000000,
0xbefe007c, 0xbefc007a,
- 0xc0611b3a, 0x0000007c,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611b7a,
+ 0xbefc007a, 0xc0611b3a,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611bba, 0x0000007c,
+ 0xc0611b7a, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611bfa,
+ 0xbefc007a, 0xc0611bba,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611e3a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0xb8f1f803,
- 0xbefe007c, 0xbefc007a,
- 0xc0611c7a, 0x0000007c,
+ 0xc0611bfa, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611a3a,
+ 0xbefc007a, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xb8f1f803, 0xbefe007c,
+ 0xbefc007a, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611a7a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0xb8fbf801,
- 0xbefe007c, 0xbefc007a,
- 0xc0611efa, 0x0000007c,
+ 0xc0611a3a, 0x0000007c,
0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0x8670ff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f70, 0xb8fa2a05,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc007a, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xb8fbf801, 0xbefe007c,
+ 0xbefc007a, 0xc0611efa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0x8670ff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f70,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8f11605,
+ 0x80718171, 0x8e718471,
+ 0x8e768271, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747a74, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a717c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbefa0080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0xe0724000,
+ 0x7a1d0000, 0xe0724100,
+ 0x7a1d0100, 0xe0724200,
+ 0x7a1d0200, 0xe0724300,
+ 0x7a1d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8f14306,
+ 0x8671c171, 0xbf84002c,
+ 0xbf8a0000, 0x8670ff6f,
+ 0x04000000, 0xbf840028,
+ 0x8e718671, 0x8e718271,
+ 0xbef60071, 0xb8fa2a05,
0x807a817a, 0x8e7a8a7a,
- 0xb8f11605, 0x80718171,
- 0x8e718471, 0x8e768271,
+ 0xb8f01605, 0x80708170,
+ 0x8e708670, 0x807a707a,
+ 0x807aff7a, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747a74,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a717c, 0xbf85ffe7,
- 0xbef40172, 0xbefa0080,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x7a1d0002, 0x68040702,
+ 0xd0c9006a, 0x0000e302,
+ 0xbf87fff7, 0xbef70000,
+ 0xbefa00ff, 0x00000400,
0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
+ 0xb8f12a05, 0x80718171,
+ 0x8e718271, 0x8e768871,
0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a717c,
+ 0xbf840015, 0xbf11017c,
+ 0x8071ff71, 0x00001000,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x7a1d0000,
0xe0724100, 0x7a1d0100,
0xe0724200, 0x7a1d0200,
0xe0724300, 0x7a1d0300,
+ 0x807c847c, 0x807aff7a,
+ 0x00000400, 0xbf0a717c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200dc, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x08000000, 0x8f6e836e,
+ 0x87776e77, 0x866eff7f,
+ 0x70000000, 0x8f6e816e,
+ 0x87776e77, 0x866eff7f,
+ 0x04000000, 0xbf84001e,
0xbefe00c1, 0xbeff00c1,
- 0xb8f14306, 0x8671c171,
- 0xbf84002c, 0xbf8a0000,
- 0x8670ff6f, 0x04000000,
- 0xbf840028, 0x8e718671,
- 0x8e718271, 0xbef60071,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0xb8f01605,
- 0x80708170, 0x8e708670,
- 0x807a707a, 0x807aff7a,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf840019, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
0x00000080, 0xbef600ff,
0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x7a1d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000e302, 0xbf87fff7,
- 0xbef70000, 0xbefa00ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8f12a05,
- 0x80718171, 0x8e718271,
- 0x8e768871, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a717c, 0xbf840015,
- 0xbf11017c, 0x8071ff71,
- 0x00001000, 0x7e000300,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbef80080, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x8e76886f, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0x806fff6f, 0x00008000,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x7a1d0000, 0xe0724100,
- 0x7a1d0100, 0xe0724200,
- 0x7a1d0200, 0xe0724300,
- 0x7a1d0300, 0x807c847c,
- 0x807aff7a, 0x00000400,
- 0xbf0a717c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200d9,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
- 0x866eff7f, 0x04000000,
- 0xbf84001e, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf840019,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xb8f82a05,
0x80788178, 0x8e788a78,
0xb8ee1605, 0x806e816e,
0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbef80080,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x8e76886f,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0x806fff6f,
- 0x00008000, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
+ 0x80786e78, 0xbef60084,
0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe007a,
- 0xbeff007b, 0x866f71ff,
- 0x000003ff, 0xb96f4803,
- 0x866f71ff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2a05,
- 0x806e816e, 0x8e6e8a6e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc0071cb7, 0x00000040,
- 0xc00b1d37, 0x00000048,
- 0xc0031e77, 0x00000058,
- 0xc0071eb7, 0x0000005c,
- 0xbf8cc07f, 0x866fff6d,
- 0xf0000000, 0x8f6f9c6f,
- 0x8e6f906f, 0xbeee0080,
- 0x876e6f6e, 0x866fff6d,
- 0x08000000, 0x8f6f9b6f,
- 0x8e6f8f6f, 0x876e6f6e,
- 0x866fff70, 0x00800000,
- 0x8f6f976f, 0xb96ef807,
- 0x866dff6d, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0xb970f802, 0xbf8a0000,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe007a, 0xbeff007b,
+ 0x866f71ff, 0x000003ff,
+ 0xb96f4803, 0x866f71ff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc0071cb7,
+ 0x00000040, 0xc00b1d37,
+ 0x00000048, 0xc0031e77,
+ 0x00000058, 0xc0071eb7,
+ 0x0000005c, 0xbf8cc07f,
+ 0x866fff6d, 0xf0000000,
+ 0x8f6f9c6f, 0x8e6f906f,
+ 0xbeee0080, 0x876e6f6e,
+ 0x866fff6d, 0x08000000,
+ 0x8f6f9b6f, 0x8e6f8f6f,
+ 0x876e6f6e, 0x866fff70,
+ 0x00800000, 0x8f6f976f,
+ 0xb96ef807, 0x866dff6d,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8370,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9700002, 0xbf8a0000,
0x95806f6c, 0xbf810000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index a2a04bb64096..abe1a5da29fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -251,7 +255,7 @@ if (!EMU_RUN_HACK)
s_waitcnt lgkmcnt(0)
s_or_b32 ttmp7, ttmp8, ttmp9
s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
L_NO_NEXT_TRAP:
@@ -262,7 +266,7 @@ L_NO_NEXT_TRAP:
s_addc_u32 ttmp1, ttmp1, 0
L_EXCP_CASE:
s_and_b32 ttmp1, ttmp1, 0xFFFF
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_rfe_b64 [ttmp0, ttmp1]
end
// ********* End handling of non-CWSR traps *******************
@@ -1053,7 +1057,7 @@ end
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
@@ -1134,3 +1138,11 @@ end
function get_hwreg_size_bytes
return 128 //HWREG size 128 bytes
end
+
+function set_status_without_spi_prio(status, tmp)
+ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 998be96be736..0bb9c577b3a2 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -317,7 +321,7 @@ L_EXCP_CASE:
// Restore SQ_WAVE_STATUS.
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
+ set_status_without_spi_prio(s_save_status, ttmp2)
s_rfe_b64 [ttmp0, ttmp1]
end
@@ -1120,7 +1124,7 @@ end
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
@@ -1212,3 +1216,11 @@ function ack_sqc_store_workaround
s_waitcnt lgkmcnt(0)
end
end
+
+function set_status_without_spi_prio(status, tmp)
+ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f64c5551cdba..297b36c26a05 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -122,6 +122,9 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
+ if (kfd_is_locked())
+ return -EAGAIN;
+
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
@@ -389,6 +392,61 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
return retval;
}
+static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
+ void *data)
+{
+ int retval;
+ const int max_num_cus = 1024;
+ struct kfd_ioctl_set_cu_mask_args *args = data;
+ struct queue_properties properties;
+ uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
+ size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
+
+ if ((args->num_cu_mask % 32) != 0) {
+ pr_debug("num_cu_mask 0x%x must be a multiple of 32",
+ args->num_cu_mask);
+ return -EINVAL;
+ }
+
+ properties.cu_mask_count = args->num_cu_mask;
+ if (properties.cu_mask_count == 0) {
+ pr_debug("CU mask cannot be 0");
+ return -EINVAL;
+ }
+
+ /* To prevent an unreasonably large CU mask size, set an arbitrary
+ * limit of max_num_cus bits. We can then just drop any CU mask bits
+ * past max_num_cus bits and just use the first max_num_cus bits.
+ */
+ if (properties.cu_mask_count > max_num_cus) {
+ pr_debug("CU mask cannot be greater than 1024 bits");
+ properties.cu_mask_count = max_num_cus;
+ cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
+ }
+
+ properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
+ if (!properties.cu_mask)
+ return -ENOMEM;
+
+ retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
+ if (retval) {
+ pr_debug("Could not copy CU mask from userspace");
+ kfree(properties.cu_mask);
+ return -EFAULT;
+ }
+
+ mutex_lock(&p->mutex);
+
+ retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
+
+ mutex_unlock(&p->mutex);
+
+ if (retval)
+ kfree(properties.cu_mask);
+
+ return retval;
+}
+
static int kfd_ioctl_set_memory_policy(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -754,7 +812,6 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
{
struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
- struct timespec64 time;
dev = kfd_device_by_id(args->gpu_id);
if (dev)
@@ -766,11 +823,8 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
args->gpu_clock_counter = 0;
/* No access to rdtsc. Using raw monotonic time */
- getrawmonotonic64(&time);
- args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
-
- get_monotonic_boottime64(&time);
- args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
+ args->cpu_clock_counter = ktime_get_raw_ns();
+ args->system_clock_counter = ktime_get_boot_ns();
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;
@@ -1558,6 +1612,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
kfd_ioctl_unmap_memory_from_gpu, 0),
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
+ kfd_ioctl_set_cu_mask, 0),
+
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 296b3f230280..ee4996029a86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -189,6 +189,21 @@ static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
return 0;
}
+static struct kfd_mem_properties *
+find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
+ struct kfd_topology_device *dev)
+{
+ struct kfd_mem_properties *props;
+
+ list_for_each_entry(props, &dev->mem_props, list) {
+ if (props->heap_type == heap_type
+ && props->flags == flags
+ && props->width == width)
+ return props;
+ }
+
+ return NULL;
+}
/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
* topology device present in the device_list
*/
@@ -197,36 +212,56 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
{
struct kfd_mem_properties *props;
struct kfd_topology_device *dev;
+ uint32_t heap_type;
+ uint64_t size_in_bytes;
+ uint32_t flags = 0;
+ uint32_t width;
pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
mem->proximity_domain);
list_for_each_entry(dev, device_list, list) {
if (mem->proximity_domain == dev->proximity_domain) {
- props = kfd_alloc_struct(props);
- if (!props)
- return -ENOMEM;
-
/* We're on GPU node */
if (dev->node_props.cpu_cores_count == 0) {
/* APU */
if (mem->visibility_type == 0)
- props->heap_type =
+ heap_type =
HSA_MEM_HEAP_TYPE_FB_PRIVATE;
/* dGPU */
else
- props->heap_type = mem->visibility_type;
+ heap_type = mem->visibility_type;
} else
- props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+ heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
- props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+ flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
- props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+ flags |= HSA_MEM_FLAGS_NON_VOLATILE;
- props->size_in_bytes =
+ size_in_bytes =
((uint64_t)mem->length_high << 32) +
mem->length_low;
- props->width = mem->width;
+ width = mem->width;
+
+ /* Multiple banks of the same type are aggregated into
+ * one. User mode doesn't care about multiple physical
+ * memory segments. It's managed as a single virtual
+ * heap for user mode.
+ */
+ props = find_subtype_mem(heap_type, flags, width, dev);
+ if (props) {
+ props->size_in_bytes += size_in_bytes;
+ break;
+ }
+
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ props->heap_type = heap_type;
+ props->flags = flags;
+ props->size_in_bytes = size_in_bytes;
+ props->width = width;
dev->node_props.mem_banks_count++;
list_add_tail(&props->list, &dev->mem_props);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index afb26f205d29..a3441b0e385b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -38,7 +38,6 @@
#include "kfd_dbgmgr.h"
#include "kfd_dbgdev.h"
#include "kfd_device_queue_manager.h"
-#include "../../radeon/cik_reg.h"
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
index 03424c20920c..0619c777b47e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
@@ -60,6 +60,9 @@ enum {
SH_REG_SIZE = SH_REG_END - SH_REG_BASE
};
+/* SQ_CMD definitions */
+#define SQ_CMD 0x8DEC
+
enum SQ_IND_CMD_CMD {
SQ_IND_CMD_CMD_NULL = 0x00000000,
SQ_IND_CMD_CMD_HALT = 0x00000001,
@@ -190,4 +193,38 @@ union ULARGE_INTEGER {
void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
enum DBGDEV_TYPE type);
+union TCP_WATCH_CNTL_BITS {
+ struct {
+ uint32_t mask:24;
+ uint32_t vmid:4;
+ uint32_t atc:1;
+ uint32_t mode:2;
+ uint32_t valid:1;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+enum {
+ ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+ ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+ /* extend the mask to 26 bits in order to match the low address field */
+ ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+ ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+enum {
+ MAX_TRAPID = 8, /* 3 bits in the bitfield. */
+ MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+ ADDRESS_WATCH_REG_ADDR_HI = 0,
+ ADDRESS_WATCH_REG_ADDR_LO,
+ ADDRESS_WATCH_REG_CNTL,
+ ADDRESS_WATCH_REG_MAX
+};
+
#endif /* KFD_DBGDEV_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 4bd6ebfaf425..ab37d36d9cd6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -21,6 +21,8 @@
*/
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
#include "kfd_priv.h"
static struct dentry *debugfs_root;
@@ -32,6 +34,38 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, show, NULL);
}
+static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
+ const char __user *user_buf, size_t size, loff_t *ppos)
+{
+ struct kfd_dev *dev;
+ char tmp[16];
+ uint32_t gpu_id;
+ int ret = -EINVAL;
+
+ memset(tmp, 0, 16);
+ if (size >= 16) {
+ pr_err("Invalid input for gpu id.\n");
+ goto out;
+ }
+ if (copy_from_user(tmp, user_buf, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (kstrtoint(tmp, 10, &gpu_id)) {
+ pr_err("Invalid input for gpu id.\n");
+ goto out;
+ }
+ dev = kfd_device_by_id(gpu_id);
+ if (dev) {
+ kfd_debugfs_hang_hws(dev);
+ ret = size;
+ } else
+ pr_err("Cannot find device %d.\n", gpu_id);
+
+out:
+ return ret;
+}
+
static const struct file_operations kfd_debugfs_fops = {
.owner = THIS_MODULE,
.open = kfd_debugfs_open,
@@ -40,6 +74,15 @@ static const struct file_operations kfd_debugfs_fops = {
.release = single_release,
};
+static const struct file_operations kfd_debugfs_hang_hws_fops = {
+ .owner = THIS_MODULE,
+ .open = kfd_debugfs_open,
+ .read = seq_read,
+ .write = kfd_debugfs_hang_hws_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void kfd_debugfs_init(void)
{
struct dentry *ent;
@@ -65,6 +108,11 @@ void kfd_debugfs_init(void)
ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device,
&kfd_debugfs_fops);
+
+ ent = debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+ NULL,
+ &kfd_debugfs_hang_hws_fops);
+
if (!ent)
pr_warn("Failed to create rls in kfd debugfs\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7ee6cec2c060..1b048715ab8a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -30,7 +30,13 @@
#include "kfd_iommu.h"
#define MQD_SIZE_ALIGNED 768
-static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
+
+/*
+ * kfd_locked is used to lock the kfd driver during suspend or reset
+ * once locked, kfd driver will stop any further GPU execution.
+ * create process (open) will return -EAGAIN.
+ */
+static atomic_t kfd_locked = ATOMIC_INIT(0);
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
@@ -46,6 +52,7 @@ static const struct kfd_device_info kaveri_device_info = {
.supports_cwsr = false,
.needs_iommu_device = true,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info carrizo_device_info = {
@@ -61,6 +68,22 @@ static const struct kfd_device_info carrizo_device_info = {
.supports_cwsr = true,
.needs_iommu_device = true,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+};
+
+static const struct kfd_device_info raven_device_info = {
+ .asic_family = CHIP_RAVEN,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = true,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 1,
};
#endif
@@ -77,6 +100,7 @@ static const struct kfd_device_info hawaii_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_device_info = {
@@ -91,6 +115,7 @@ static const struct kfd_device_info tonga_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_vf_device_info = {
@@ -105,6 +130,7 @@ static const struct kfd_device_info tonga_vf_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_device_info = {
@@ -119,6 +145,7 @@ static const struct kfd_device_info fiji_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_vf_device_info = {
@@ -133,6 +160,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
@@ -148,6 +176,7 @@ static const struct kfd_device_info polaris10_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info polaris10_vf_device_info = {
@@ -162,6 +191,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info polaris11_device_info = {
@@ -176,6 +206,7 @@ static const struct kfd_device_info polaris11_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_device_info = {
@@ -190,6 +221,7 @@ static const struct kfd_device_info vega10_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_vf_device_info = {
@@ -204,6 +236,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
@@ -241,6 +274,7 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x9875, &carrizo_device_info }, /* Carrizo */
{ 0x9876, &carrizo_device_info }, /* Carrizo */
{ 0x9877, &carrizo_device_info }, /* Carrizo */
+ { 0x15DD, &raven_device_info }, /* Raven */
#endif
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
@@ -514,13 +548,54 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfree(kfd);
}
+int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+{
+ if (!kfd->init_complete)
+ return 0;
+ kgd2kfd_suspend(kfd);
+
+ /* hold dqm->lock to prevent further execution*/
+ dqm_lock(kfd->dqm);
+
+ kfd_signal_reset_event(kfd);
+ return 0;
+}
+
+/*
+ * Fix me. KFD won't be able to resume existing process for now.
+ * We will keep all existing process in a evicted state and
+ * wait the process to be terminated.
+ */
+
+int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+ int ret, count;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ dqm_unlock(kfd->dqm);
+
+ ret = kfd_resume(kfd);
+ if (ret)
+ return ret;
+ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count != 0, "KFD reset ref. error");
+ return 0;
+}
+
+bool kfd_is_locked(void)
+{
+ return (atomic_read(&kfd_locked) > 0);
+}
+
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return;
/* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_device_suspended) == 1)
+ if (atomic_inc_return(&kfd_locked) == 1)
kfd_suspend_all_processes();
kfd->dqm->ops.stop(kfd->dqm);
@@ -539,7 +614,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
if (ret)
return ret;
- count = atomic_dec_return(&kfd_device_suspended);
+ count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
ret = kfd_resume_all_processes();
@@ -577,14 +652,24 @@ dqm_start_error:
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
+ uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
+ bool is_patched = false;
+
if (!kfd->init_complete)
return;
+ if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
+ dev_err_once(kfd_device, "Ring entry too small\n");
+ return;
+ }
+
spin_lock(&kfd->interrupt_lock);
if (kfd->interrupts_active
- && interrupt_is_wanted(kfd, ih_ring_entry)
- && enqueue_ih_ring_entry(kfd, ih_ring_entry))
+ && interrupt_is_wanted(kfd, ih_ring_entry,
+ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(kfd,
+ is_patched ? patched_ihre : ih_ring_entry))
queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock(&kfd->interrupt_lock);
@@ -739,8 +824,8 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
return -ENOMEM;
- *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if ((*mem_obj) == NULL)
+ *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if (!(*mem_obj))
return -ENOMEM;
pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
@@ -857,3 +942,26 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
kfree(mem_obj);
return 0;
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+/* This function will send a package to HIQ to hang the HWS
+ * which will trigger a GPU reset and bring the HWS back to normal state
+ */
+int kfd_debugfs_hang_hws(struct kfd_dev *dev)
+{
+ int r = 0;
+
+ if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
+ pr_err("HWS is not enabled");
+ return -EINVAL;
+ }
+
+ r = pm_debugfs_hang_hws(&dev->dqm->packets);
+ if (!r)
+ r = dqm_debugfs_execute_queues(dev->dqm);
+
+ return r;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 668ad07ebe1f..ec0d62a16e53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -61,6 +61,8 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id);
+static void kfd_process_hw_exception(struct work_struct *work);
+
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
{
@@ -99,6 +101,17 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
return dqm->dev->shared_resources.num_pipe_per_mec;
}
+static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_sdma_engines;
+}
+
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_sdma_engines
+ * KFD_SDMA_QUEUES_PER_ENGINE;
+}
+
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -240,7 +253,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
print_queue(q);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -297,7 +310,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -346,10 +359,10 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (!mqd)
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (!mqd_mgr)
return -ENOMEM;
retval = allocate_hqd(dqm, q);
@@ -360,7 +373,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (retval)
goto out_deallocate_hqd;
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
@@ -374,15 +387,15 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (!q->properties.is_active)
return 0;
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
- q->process->mm);
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+ &q->properties, q->process->mm);
if (retval)
goto out_uninit_mqd;
return 0;
out_uninit_mqd:
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_hqd:
@@ -399,11 +412,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd)
+ if (!mqd_mgr)
return -ENOMEM;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
@@ -420,14 +433,14 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
q->pipe, q->queue);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
list_del(&q->list);
if (list_empty(&qpd->queues_list)) {
@@ -457,9 +470,9 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
{
int retval;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -467,19 +480,19 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
bool prev_active = false;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
pdd = kfd_get_process_device_data(q->device, q->process);
if (!pdd) {
retval = -ENODEV;
goto out_unlock;
}
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out_unlock;
}
@@ -506,7 +519,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
} else if (prev_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval) {
@@ -515,7 +528,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
}
}
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
/*
* check active state vs. the previous state and modify
@@ -533,44 +546,44 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA))
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
&q->properties, q->process->mm);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
static struct mqd_manager *get_mqd_manager(
struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
pr_debug("mqd type %d\n", type);
- mqd = dqm->mqds[type];
- if (!mqd) {
- mqd = mqd_manager_init(type, dqm->dev);
- if (!mqd)
+ mqd_mgr = dqm->mqd_mgrs[type];
+ if (!mqd_mgr) {
+ mqd_mgr = mqd_manager_init(type, dqm->dev);
+ if (!mqd_mgr)
pr_err("mqd manager is NULL");
- dqm->mqds[type] = mqd;
+ dqm->mqd_mgrs[type] = mqd_mgr;
}
- return mqd;
+ return mqd_mgr;
}
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
@@ -582,16 +595,16 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_active)
continue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) { /* should not be here */
+ if (!mqd_mgr) { /* should not be here */
pr_err("Cannot evict queue, mqd mgr is NULL\n");
retval = -ENOMEM;
goto out;
}
q->properties.is_evicted = true;
q->properties.is_active = false;
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval)
@@ -600,7 +613,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
}
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -611,7 +624,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
@@ -633,7 +646,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -641,7 +654,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
uint32_t pd_base;
int retval = 0;
@@ -650,7 +663,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -677,16 +690,16 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_evicted)
continue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) { /* should not be here */
+ if (!mqd_mgr) { /* should not be here */
pr_err("Cannot restore queue, mqd mgr is NULL\n");
retval = -ENOMEM;
goto out;
}
q->properties.is_evicted = false;
q->properties.is_active = true;
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties,
q->process->mm);
if (retval)
@@ -695,7 +708,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
}
qpd->evicted = 0;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -711,7 +724,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -739,7 +752,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
if (!retval)
qpd->evicted = 0;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -761,7 +774,7 @@ static int register_process(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_add(&n->list, &dqm->queues);
/* Update PD Base in QPD */
@@ -769,9 +782,10 @@ static int register_process(struct device_queue_manager *dqm,
retval = dqm->asic_ops.update_qpd(dqm, qpd);
- dqm->processes_count++;
+ if (dqm->processes_count++ == 0)
+ dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -786,20 +800,22 @@ static int unregister_process(struct device_queue_manager *dqm,
list_empty(&qpd->queues_list) ? "empty" : "not empty");
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_for_each_entry_safe(cur, next, &dqm->queues, list) {
if (qpd == cur->qpd) {
list_del(&cur->list);
kfree(cur);
- dqm->processes_count--;
+ if (--dqm->processes_count == 0)
+ dqm->dev->kfd2kgd->set_compute_idle(
+ dqm->dev->kgd, true);
goto out;
}
}
/* qpd not found in dqm list */
retval = 1;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -838,7 +854,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
if (!dqm->allocated_queues)
return -ENOMEM;
- mutex_init(&dqm->lock);
+ mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
@@ -853,7 +869,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
}
dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
- dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
return 0;
}
@@ -866,8 +882,8 @@ static void uninitialize(struct device_queue_manager *dqm)
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
- kfree(dqm->mqds[i]);
- mutex_destroy(&dqm->lock);
+ kfree(dqm->mqd_mgrs[i]);
+ mutex_destroy(&dqm->lock_hidden);
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
@@ -901,7 +917,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id)
{
- if (sdma_queue_id >= CIK_SDMA_QUEUES)
+ if (sdma_queue_id >= get_num_sdma_queues(dqm))
return;
dqm->sdma_bitmap |= (1 << sdma_queue_id);
}
@@ -910,19 +926,19 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
int retval;
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
- if (!mqd)
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ if (!mqd_mgr)
return -ENOMEM;
retval = allocate_sdma_queue(dqm, &q->sdma_id);
if (retval)
return retval;
- q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
- q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm);
+ q->properties.sdma_engine_id = q->sdma_id % get_num_sdma_engines(dqm);
retval = allocate_doorbell(qpd, q);
if (retval)
@@ -933,19 +949,20 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
- retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
+ NULL);
if (retval)
goto out_uninit_mqd;
return 0;
out_uninit_mqd:
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_sdma_queue:
@@ -1003,12 +1020,14 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
{
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
- mutex_init(&dqm->lock);
+ mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->processes_count = 0;
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
- dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+
+ INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
return 0;
}
@@ -1041,9 +1060,11 @@ static int start_cpsch(struct device_queue_manager *dqm)
init_interrupts(dqm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
+ /* clear hang status when driver try to start the hw scheduler */
+ dqm->is_hws_hang = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return 0;
fail_allocate_vidmem:
@@ -1055,9 +1076,9 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
@@ -1069,11 +1090,11 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return -EPERM;
}
@@ -1089,7 +1110,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count++;
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return 0;
}
@@ -1098,7 +1119,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
@@ -1110,18 +1131,18 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
}
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -1135,19 +1156,19 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (retval)
goto out_unlock;
q->properties.sdma_queue_id =
- q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
+ q->sdma_id / get_num_sdma_engines(dqm);
q->properties.sdma_engine_id =
- q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->sdma_id % get_num_sdma_engines(dqm);
}
retval = allocate_doorbell(qpd, q);
if (retval)
goto out_deallocate_sdma_queue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out_deallocate_doorbell;
}
@@ -1164,7 +1185,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
q->properties.tba_addr = qpd->tba_addr;
q->properties.tma_addr = qpd->tma_addr;
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
@@ -1188,7 +1209,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
out_deallocate_doorbell:
@@ -1197,7 +1218,8 @@ out_deallocate_sdma_queue:
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q->sdma_id);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
+
return retval;
}
@@ -1210,6 +1232,13 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
while (*fence_addr != fence_value) {
if (time_after(jiffies, end_jiffies)) {
pr_err("qcm fence wait loop timeout expired\n");
+ /* In HWS case, this is used to halt the driver thread
+ * in order not to mess up CP states before doing
+ * scandumps for FW debugging.
+ */
+ while (halt_if_hws_hang)
+ schedule();
+
return -ETIME;
}
schedule();
@@ -1254,6 +1283,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
int retval = 0;
+ if (dqm->is_hws_hang)
+ return -EIO;
if (!dqm->active_runlist)
return retval;
@@ -1292,9 +1323,13 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
{
int retval;
+ if (dqm->is_hws_hang)
+ return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param);
if (retval) {
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ dqm->is_hws_hang = true;
+ schedule_work(&dqm->hw_exception_work);
return retval;
}
@@ -1306,7 +1341,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
bool preempt_all_queues;
preempt_all_queues = false;
@@ -1314,7 +1349,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
retval = 0;
/* remove queue from list to prevent rescheduling after preemption */
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->is_debug) {
/*
@@ -1326,9 +1361,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
}
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto failed;
}
@@ -1350,7 +1385,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = true;
}
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
/*
* Unconditionally decrement this counter, regardless of the queue's
@@ -1360,14 +1395,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
failed:
failed_try_destroy_debugged_queue:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1391,7 +1426,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
if (!dqm->asic_ops.set_cache_memory_policy)
return retval;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (alternate_aperture_size == 0) {
/* base > limit disables APE1 */
@@ -1437,7 +1472,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit);
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1468,7 +1503,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct device_process_node *cur, *next_dpn;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
/* Clear all user mode queues */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
@@ -1489,7 +1524,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
}
}
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1500,14 +1535,14 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
int retval;
struct queue *q, *next;
struct kernel_queue *kq, *kq_next;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
enum kfd_unmap_queues_filter filter =
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
@@ -1542,7 +1577,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
}
retval = execute_queues_cpsch(dqm, filter, 0);
- if (retval || qpd->reset_wavefronts) {
+ if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
qpd->reset_wavefronts = false;
@@ -1550,19 +1585,19 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* lastly, free mqd resources */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out;
}
list_del(&q->list);
qpd->queue_count--;
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
}
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1683,6 +1718,30 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
kfree(dqm);
}
+int kfd_process_vm_fault(struct device_queue_manager *dqm,
+ unsigned int pasid)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ int ret = 0;
+
+ if (!p)
+ return -EINVAL;
+ pdd = kfd_get_process_device_data(dqm->dev, p);
+ if (pdd)
+ ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
+ kfd_unref_process(p);
+
+ return ret;
+}
+
+static void kfd_process_hw_exception(struct work_struct *work)
+{
+ struct device_queue_manager *dqm = container_of(work,
+ struct device_queue_manager, hw_exception_work);
+ dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+}
+
#if defined(CONFIG_DEBUG_FS)
static void seq_reg_dump(struct seq_file *m,
@@ -1746,8 +1805,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
- for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
dqm->dev->kgd, pipe, queue, &dump, &n_regs);
if (r)
@@ -1764,4 +1823,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
return r;
}
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
+{
+ int r = 0;
+
+ dqm_lock(dqm);
+ dqm->active_runlist = true;
+ r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ dqm_unlock(dqm);
+
+ return r;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 59a6b1956932..00da3169a004 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -26,15 +26,14 @@
#include <linux/rwsem.h>
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/sched/mm.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#define KFD_UNMAP_LATENCY_MS (4000)
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
-
-#define CIK_SDMA_QUEUES (4)
-#define CIK_SDMA_QUEUES_PER_ENGINE (2)
-#define CIK_SDMA_ENGINE_NUM (2)
+#define KFD_SDMA_QUEUES_PER_ENGINE (2)
struct device_process_node {
struct qcm_process_device *qpd;
@@ -170,11 +169,12 @@ struct device_queue_manager {
struct device_queue_manager_ops ops;
struct device_queue_manager_asic_ops asic_ops;
- struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
+ struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
struct packet_manager packets;
struct kfd_dev *dev;
- struct mutex lock;
+ struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
struct list_head queues;
+ unsigned int saved_flags;
unsigned int processes_count;
unsigned int queue_count;
unsigned int sdma_queue_count;
@@ -190,6 +190,10 @@ struct device_queue_manager {
struct kfd_mem_obj *fence_mem;
bool active_runlist;
int sched_policy;
+
+ /* hw exception */
+ bool is_hws_hang;
+ struct work_struct hw_exception_work;
};
void device_queue_manager_init_cik(
@@ -207,6 +211,7 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
unsigned int get_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
@@ -219,4 +224,19 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
return (pdd->lds_base >> 60) & 0x0E;
}
+/* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void dqm_lock(struct device_queue_manager *dqm)
+{
+ mutex_lock(&dqm->lock_hidden);
+ dqm->saved_flags = memalloc_nofs_save();
+}
+static inline void dqm_unlock(struct device_queue_manager *dqm)
+{
+ memalloc_nofs_restore(dqm->saved_flags);
+ mutex_unlock(&dqm->lock_hidden);
+}
+
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 79e5bcf6367c..417515332c35 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -60,7 +60,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config =
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (vega10_noretry &&
+ if (noretry &&
!dqm->dev->device_info->needs_iommu_device)
qpd->sh_mem_config |=
1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index c3744d89352c..ebe79bf00145 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -188,9 +188,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
*doorbell_off = kfd->doorbell_id_offset + inx;
pr_debug("Get kernel queue doorbell\n"
- " doorbell offset == 0x%08X\n"
- " kernel address == %p\n",
- *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
+ " doorbell offset == 0x%08X\n"
+ " doorbell index == 0x%x\n",
+ *doorbell_off, inx);
return kfd->doorbell_kernel_ptr + inx;
}
@@ -199,7 +199,8 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
- inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
+ * sizeof(u32) / kfd->device_info->doorbell_size;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_available_index);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 5562e94e786a..e9f0e0a1b41c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -850,6 +850,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
ev->memory_exception_data = *ev_data;
}
+ if (type == KFD_EVENT_TYPE_MEMORY) {
+ dev_warn(kfd_device,
+ "Sending SIGSEGV to HSA Process with PID %d ",
+ p->lead_thread->pid);
+ send_sig(SIGSEGV, p->lead_thread, 0);
+ }
+
/* Send SIGTERM no event of type "type" has been found*/
if (send_signal) {
if (send_sigterm) {
@@ -904,34 +911,41 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memory_exception_data.failure.NotPresent = 1;
memory_exception_data.failure.NoExecute = 0;
memory_exception_data.failure.ReadOnly = 0;
- if (vma) {
- if (vma->vm_start > address) {
- memory_exception_data.failure.NotPresent = 1;
- memory_exception_data.failure.NoExecute = 0;
+ if (vma && address >= vma->vm_start) {
+ memory_exception_data.failure.NotPresent = 0;
+
+ if (is_write_requested && !(vma->vm_flags & VM_WRITE))
+ memory_exception_data.failure.ReadOnly = 1;
+ else
memory_exception_data.failure.ReadOnly = 0;
- } else {
- memory_exception_data.failure.NotPresent = 0;
- if (is_write_requested && !(vma->vm_flags & VM_WRITE))
- memory_exception_data.failure.ReadOnly = 1;
- else
- memory_exception_data.failure.ReadOnly = 0;
- if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
- memory_exception_data.failure.NoExecute = 1;
- else
- memory_exception_data.failure.NoExecute = 0;
- }
+
+ if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
+ memory_exception_data.failure.NoExecute = 1;
+ else
+ memory_exception_data.failure.NoExecute = 0;
}
up_read(&mm->mmap_sem);
mmput(mm);
- mutex_lock(&p->event_mutex);
+ pr_debug("notpresent %d, noexecute %d, readonly %d\n",
+ memory_exception_data.failure.NotPresent,
+ memory_exception_data.failure.NoExecute,
+ memory_exception_data.failure.ReadOnly);
- /* Lookup events by type and signal them */
- lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
- &memory_exception_data);
+ /* Workaround on Raven to not kill the process when memory is freed
+ * before IOMMU is able to finish processing all the excessive PPRs
+ */
+ if (dev->device_info->asic_family != CHIP_RAVEN) {
+ mutex_lock(&p->event_mutex);
+
+ /* Lookup events by type and signal them */
+ lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
+ &memory_exception_data);
+
+ mutex_unlock(&p->event_mutex);
+ }
- mutex_unlock(&p->event_mutex);
kfd_unref_process(p);
}
#endif /* KFD_SUPPORT_IOMMU_V2 */
@@ -956,3 +970,67 @@ void kfd_signal_hw_exception_event(unsigned int pasid)
mutex_unlock(&p->event_mutex);
kfd_unref_process(p);
}
+
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info)
+{
+ struct kfd_event *ev;
+ uint32_t id;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_hsa_memory_exception_data memory_exception_data;
+
+ if (!p)
+ return; /* Presumably process exited. */
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+ memory_exception_data.gpu_id = dev->id;
+ memory_exception_data.failure.imprecise = 1;
+ /* Set failure reason */
+ if (info) {
+ memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
+ memory_exception_data.failure.NotPresent =
+ info->prot_valid ? 1 : 0;
+ memory_exception_data.failure.NoExecute =
+ info->prot_exec ? 1 : 0;
+ memory_exception_data.failure.ReadOnly =
+ info->prot_write ? 1 : 0;
+ memory_exception_data.failure.imprecise = 0;
+ }
+ mutex_lock(&p->event_mutex);
+
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
+ if (ev->type == KFD_EVENT_TYPE_MEMORY) {
+ ev->memory_exception_data = memory_exception_data;
+ set_event(ev);
+ }
+
+ mutex_unlock(&p->event_mutex);
+ kfd_unref_process(p);
+}
+
+void kfd_signal_reset_event(struct kfd_dev *dev)
+{
+ struct kfd_hsa_hw_exception_data hw_exception_data;
+ struct kfd_process *p;
+ struct kfd_event *ev;
+ unsigned int temp;
+ uint32_t id, idx;
+
+ /* Whole gpu reset caused by GPU hang and memory is lost */
+ memset(&hw_exception_data, 0, sizeof(hw_exception_data));
+ hw_exception_data.gpu_id = dev->id;
+ hw_exception_data.memory_lost = 1;
+
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->event_mutex);
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
+ if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
+ ev->hw_exception_data = hw_exception_data;
+ set_event(ev);
+ }
+ mutex_unlock(&p->event_mutex);
+ }
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index abca5bfebbff..c7ac6c73af86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -66,6 +66,7 @@ struct kfd_event {
/* type specific data */
union {
struct kfd_hsa_memory_exception_data memory_exception_data;
+ struct kfd_hsa_hw_exception_data hw_exception_data;
};
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 37029baa3346..f836897bbf58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -26,7 +26,9 @@
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry)
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
{
uint16_t source_id, client_id, pasid, vmid;
const uint32_t *data = ih_ring_entry;
@@ -57,7 +59,9 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SDMA_TRAP ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
- source_id == SOC15_INTSRC_CP_BAD_OPCODE;
+ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+ client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_UTCL2;
}
static void event_interrupt_wq_v9(struct kfd_dev *dev,
@@ -82,7 +86,19 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
kfd_signal_hw_exception_event(pasid);
else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
- /* TODO */
+ struct kfd_vm_fault_info info = {0};
+ uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+
+ info.vmid = vmid;
+ info.mc_id = client_id;
+ info.page_addr = ih_ring_entry[4] |
+ (uint64_t)(ih_ring_entry[5] & 0xf) << 32;
+ info.prot_valid = ring_id & 0x08;
+ info.prot_read = ring_id & 0x10;
+ info.prot_write = ring_id & 0x20;
+
+ kfd_process_vm_fault(dev->dqm, pasid);
+ kfd_signal_vm_fault_event(dev, pasid, &info);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index db6d9336b80d..c56ac47cd318 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -151,13 +151,15 @@ static void interrupt_wq(struct work_struct *work)
ih_ring_entry);
}
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
+bool interrupt_is_wanted(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre, bool *flag)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
- ih_ring_entry);
+ ih_ring_entry, patched_ihre, flag);
return wanted != 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index c71817963eea..7a61f38c09e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -190,7 +190,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
{
struct kfd_dev *dev;
- dev_warn(kfd_device,
+ dev_warn_ratelimited(kfd_device,
"Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 476951d8c91c..9f84b4d9fb88 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -59,7 +59,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
switch (type) {
case KFD_QUEUE_TYPE_DIQ:
case KFD_QUEUE_TYPE_HIQ:
- kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
+ kq->mqd_mgr = dev->dqm->ops.get_mqd_manager(dev->dqm,
KFD_MQD_TYPE_HIQ);
break;
default:
@@ -67,7 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
return false;
}
- if (!kq->mqd)
+ if (!kq->mqd_mgr)
return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
@@ -123,6 +123,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
+ prop.cu_mask = NULL;
if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
@@ -130,7 +131,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->queue->device = dev;
kq->queue->process = kfd_get_process(current);
- retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
+ retval = kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
&kq->queue->mqd_mem_obj,
&kq->queue->gart_mqd_addr,
&kq->queue->properties);
@@ -142,9 +143,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE;
- kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
- kq->queue->queue, &kq->queue->properties,
- NULL);
+ kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
+ kq->queue->pipe, kq->queue->queue,
+ &kq->queue->properties, NULL);
} else {
/* allocate fence for DIQ */
@@ -182,7 +183,7 @@ err_get_kernel_doorbell:
static void uninitialize(struct kernel_queue *kq)
{
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
- kq->mqd->destroy_mqd(kq->mqd,
+ kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
@@ -191,7 +192,8 @@ static void uninitialize(struct kernel_queue *kq)
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
- kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
+ kq->mqd_mgr->uninit_mqd(kq->mqd_mgr, kq->queue->mqd,
+ kq->queue->mqd_mem_obj);
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 97aff2041a5d..a7116a939029 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -70,7 +70,7 @@ struct kernel_queue {
/* data */
struct kfd_dev *dev;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct queue *queue;
uint64_t pending_wptr64;
uint32_t pending_wptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 76bf2dc8aec4..6e1f5c7c2d4b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -47,6 +47,8 @@ static const struct kgd2kfd_calls kgd2kfd = {
.resume_mm = kgd2kfd_resume_mm,
.schedule_evict_and_restore_process =
kgd2kfd_schedule_evict_and_restore_process,
+ .pre_reset = kgd2kfd_pre_reset,
+ .post_reset = kgd2kfd_post_reset,
};
int sched_policy = KFD_SCHED_POLICY_HWS;
@@ -61,7 +63,7 @@ MODULE_PARM_DESC(hws_max_conc_proc,
int cwsr_enable = 1;
module_param(cwsr_enable, int, 0444);
-MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))");
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_queues_per_device, int, 0444);
@@ -83,13 +85,19 @@ module_param(ignore_crat, int, 0444);
MODULE_PARM_DESC(ignore_crat,
"Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
-int vega10_noretry;
-module_param_named(noretry, vega10_noretry, int, 0644);
+int noretry;
+module_param(noretry, int, 0644);
MODULE_PARM_DESC(noretry,
- "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)");
+ "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled (default), 1 = retry disabled)");
+
+int halt_if_hws_hang;
+module_param(halt_if_hws_hang, int, 0644);
+MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
static int amdkfd_init_completed;
+
int kgd2kfd_init(unsigned int interface_version,
const struct kgd2kfd_calls **g2f)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 4b8eb506642b..3bc25ab84f34 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -21,7 +21,7 @@
*
*/
-#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
@@ -48,3 +48,42 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
return NULL;
}
+
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ const uint32_t *cu_mask, uint32_t cu_mask_count,
+ uint32_t *se_mask)
+{
+ struct kfd_cu_info cu_info;
+ uint32_t cu_per_sh[4] = {0};
+ int i, se, cu = 0;
+
+ mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+
+ if (cu_mask_count > cu_info.cu_active_number)
+ cu_mask_count = cu_info.cu_active_number;
+
+ for (se = 0; se < cu_info.num_shader_engines; se++)
+ for (i = 0; i < 4; i++)
+ cu_per_sh[se] += hweight32(cu_info.cu_bitmap[se][i]);
+
+ /* Symmetrically map cu_mask to all SEs:
+ * cu_mask[0] bit0 -> se_mask[0] bit0;
+ * cu_mask[0] bit1 -> se_mask[1] bit0;
+ * ... (if # SE is 4)
+ * cu_mask[0] bit4 -> se_mask[0] bit1;
+ * ...
+ */
+ se = 0;
+ for (i = 0; i < cu_mask_count; i++) {
+ if (cu_mask[i / 32] & (1 << (i % 32)))
+ se_mask[se] |= 1 << cu;
+
+ do {
+ se++;
+ if (se == cu_info.num_shader_engines) {
+ se = 0;
+ cu++;
+ }
+ } while (cu >= cu_per_sh[se] && cu < 32);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 8972bcfbf701..4e84052d4e21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -93,4 +93,8 @@ struct mqd_manager {
struct kfd_dev *dev;
};
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ const uint32_t *cu_mask, uint32_t cu_mask_count,
+ uint32_t *se_mask);
+
#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 06eaa218eba6..47243165a082 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -41,6 +41,31 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
return (struct cik_sdma_rlc_registers *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -408,7 +435,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 684054ff02cd..f5fc3675f21e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -41,6 +41,31 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct v9_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -55,7 +80,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
* instead of sub-allocation function.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+ *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!*mqd_mem_obj)
return -ENOMEM;
retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
@@ -198,6 +223,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -393,7 +420,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 481307b8b4db..b81fda3754da 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -43,6 +43,31 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct vi_sdma_mqd *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct vi_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -394,7 +421,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index c317feb43f69..1092631765cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -418,4 +418,30 @@ out:
return 0;
}
+int pm_debugfs_hang_hws(struct packet_manager *pm)
+{
+ uint32_t *buffer, size;
+ int r = 0;
+
+ size = pm->pmf->query_status_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+ if (!buffer) {
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(buffer, 0x55, size);
+ pm->priv_queue->ops.submit_packet(pm->priv_queue);
+
+ pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
+ buffer[0], buffer[1], buffer[2], buffer[3],
+ buffer[4], buffer[5], buffer[6]);
+out:
+ mutex_unlock(&pm->lock);
+ return r;
+}
+
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 5e3990bb4c4b..f971710f1c91 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -73,7 +73,7 @@
/*
* When working with cp scheduler we should assign the HIQ manually or via
- * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
+ * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
* definitions for Kaveri. In Kaveri only the first ME queues participates
* in the cp scheduling taking that in mind we set the HIQ slot in the
* second ME.
@@ -142,7 +142,12 @@ extern int ignore_crat;
/*
* Set sh_mem_config.retry_disable on Vega10
*/
-extern int vega10_noretry;
+extern int noretry;
+
+/*
+ * Halt if HWS hang is detected
+ */
+extern int halt_if_hws_hang;
/**
* enum kfd_sched_policy
@@ -180,9 +185,10 @@ enum cache_policy {
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry);
+ const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
+ bool *patched_flag);
void (*interrupt_wq)(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry);
+ const uint32_t *ih_ring_entry);
};
struct kfd_device_info {
@@ -197,6 +203,7 @@ struct kfd_device_info {
bool supports_cwsr;
bool needs_iommu_device;
bool needs_pci_atomics;
+ unsigned int num_sdma_engines;
};
struct kfd_mem_obj {
@@ -415,6 +422,9 @@ struct queue_properties {
uint32_t ctl_stack_size;
uint64_t tba_addr;
uint64_t tma_addr;
+ /* Relevant for CU */
+ uint32_t cu_mask_count; /* Must be a multiple of 32 */
+ uint32_t *cu_mask;
};
/**
@@ -806,12 +816,18 @@ int kfd_interrupt_init(struct kfd_dev *dev);
void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
+bool interrupt_is_wanted(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre, bool *flag);
/* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd);
+/* GPU reset */
+int kgd2kfd_pre_reset(struct kfd_dev *kfd);
+int kgd2kfd_post_reset(struct kfd_dev *kfd);
+
/* amdkfd Apertures */
int kfd_init_apertures(struct kfd_process *process);
@@ -838,6 +854,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq);
+int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
/* Process Queue Manager */
struct process_queue_node {
@@ -858,6 +875,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
@@ -964,10 +983,17 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
uint64_t *event_page_offset, uint32_t *event_slot_index);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info);
+
+void kfd_signal_reset_event(struct kfd_dev *dev);
+
void kfd_flush_tlb(struct kfd_process_device *pdd);
int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+bool kfd_is_locked(void);
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
@@ -980,6 +1006,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
int pm_debugfs_runlist(struct seq_file *m, void *data);
+int kfd_debugfs_hang_hws(struct kfd_dev *dev);
+int pm_debugfs_hang_hws(struct packet_manager *pm);
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
+
#else
static inline void kfd_debugfs_init(void) {}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 1d80b4f7c681..4694386cc623 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -244,6 +244,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
return ERR_PTR(-EINVAL);
process = find_process(thread);
+ if (!process)
+ return ERR_PTR(-EINVAL);
return process;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index d65ce0436b31..c8cad9c078ae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -186,8 +186,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
- if (dev->dqm->queue_count >=
- CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+ if (dev->dqm->queue_count >= get_num_sdma_queues(dev->dqm)) {
pr_err("Over-subscription is not allowed for SDMA.\n");
retval = -EPERM;
goto err_create_queue;
@@ -209,7 +208,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
- pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+ pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
@@ -326,6 +325,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (retval != -ETIME)
goto err_destroy_queue;
}
+ kfree(pqn->q->properties.cu_mask);
+ pqn->q->properties.cu_mask = NULL;
uninit_queue(pqn->q);
}
@@ -366,6 +367,34 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
return 0;
}
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p)
+{
+ int retval;
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_debug("No queue %d exists for update operation\n", qid);
+ return -EFAULT;
+ }
+
+ /* Free the old CU mask memory if it is already allocated, then
+ * allocate memory for the new CU mask.
+ */
+ kfree(pqn->q->properties.cu_mask);
+
+ pqn->q->properties.cu_mask_count = p->cu_mask_count;
+ pqn->q->properties.cu_mask = p->cu_mask;
+
+ retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
+ pqn->q);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
struct kernel_queue *pqm_get_kernel_queue(
struct process_queue_manager *pqm,
unsigned int qid)
@@ -387,7 +416,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
struct process_queue_node *pqn;
struct queue *q;
enum KFD_MQD_TYPE mqd_type;
- struct mqd_manager *mqd_manager;
+ struct mqd_manager *mqd_mgr;
int r = 0;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
@@ -410,11 +439,11 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
q->properties.type, q->device->id);
continue;
}
- mqd_manager = q->device->dqm->ops.get_mqd_manager(
+ mqd_mgr = q->device->dqm->ops.get_mqd_manager(
q->device->dqm, mqd_type);
} else if (pqn->kq) {
q = pqn->kq->queue;
- mqd_manager = pqn->kq->mqd;
+ mqd_mgr = pqn->kq->mqd_mgr;
switch (q->properties.type) {
case KFD_QUEUE_TYPE_DIQ:
seq_printf(m, " DIQ on device %x\n",
@@ -434,7 +463,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
continue;
}
- r = mqd_manager->debugfs_show_mqd(m, q->mqd);
+ r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
if (r != 0)
break;
}
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index d5d4586e6176..325083b0297e 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -9,23 +9,6 @@ config DRM_AMD_DC
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
-config DRM_AMD_DC_FBC
- bool "AMD FBC - Enable Frame Buffer Compression"
- depends on DRM_AMD_DC
- help
- Choose this option if you want to use frame buffer compression
- support.
- This is a power optimisation feature, check its availability
- on your hardware before enabling this option.
-
-
-config DRM_AMD_DC_DCN1_0
- bool "DCN 1.0 Raven family"
- depends on DRM_AMD_DC && X86
- help
- Choose this option if you want to have
- RV family for display engine
-
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
index 357d59648401..a8a6c106e8c7 100644
--- a/drivers/gpu/drm/amd/display/TODO
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -97,10 +97,10 @@ share it with drivers. But that's a very long term goal, and by far not just an
issue with DC - other drivers, especially around DP sink handling, are equally
guilty.
-19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
-stuff just isn't up to the challenges either. We need to figure out something
-that integrates better with DRM and linux debug printing, while not being
-useless with filtering output. dynamic debug printing might be an option.
+19. DONE - The DC logger is still a rather sore thing, but I know that the
+DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
+something that integrates better with DRM and linux debug printing, while not
+being useless with filtering output. dynamic debug printing might be an option.
20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
retimer that we need to program to pass PHY compliance. Currently that's
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index af16973f2c41..94911871eb9b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -28,11 +28,11 @@
AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
ifneq ($(CONFIG_DRM_AMD_DC),)
-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
endif
ifneq ($(CONFIG_DEBUG_FS),)
-AMDGPUDM += amdgpu_dm_crc.o
+AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 770c6b24be0b..34f34823bab5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -39,6 +39,9 @@
#include "dm_helpers.h"
#include "dm_services_types.h"
#include "amdgpu_dm_mst_types.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -57,7 +60,7 @@
#include "modules/inc/mod_freesync.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "ivsrcid/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
@@ -347,7 +350,6 @@ static void hotplug_notify_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
@@ -388,7 +390,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
}
}
-#endif
/* Init display KMS
@@ -902,14 +903,14 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
(struct edid *) sink->dc_edid.raw_edid;
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
aconnector->edid);
}
amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
} else {
amdgpu_dm_remove_sink_from_freesync_module(connector);
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
@@ -1040,7 +1041,7 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
- if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+ if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
@@ -1191,7 +1192,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
return 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
/* Register IRQ sources and initialize IRQ callbacks */
static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
{
@@ -1319,7 +1320,12 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
{
- return bd->props.brightness;
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int ret = dc_link_get_backlight_level(dm->backlight_link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return bd->props.brightness;
+ return ret;
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
@@ -1334,6 +1340,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
struct backlight_properties props = { 0 };
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -1525,16 +1532,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
- /*
- * Temporary disable until pplib/smu interaction is implemented
- */
- dm->dc->debug.disable_stutter = true;
break;
#endif
default:
@@ -1542,6 +1545,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+
return 0;
fail:
kfree(aencoder);
@@ -1573,18 +1579,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
/* TODO: implement later */
}
-static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
- u8 level)
-{
- /* TODO: translate amdgpu_encoder to display_index and call DAL */
-}
-
-static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
-{
- /* TODO: translate amdgpu_encoder to display_index and call DAL */
- return 0;
-}
-
static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -1613,10 +1607,8 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
- .backlight_set_level =
- dm_set_backlight_level,/* called unconditionally */
- .backlight_get_level =
- dm_get_backlight_level,/* called unconditionally */
+ .backlight_set_level = NULL, /* never called for DC */
+ .backlight_get_level = NULL, /* never called for DC */
.hpd_sense = NULL,/* called unconditionally */
.hpd_set_polarity = NULL, /* called unconditionally */
.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
@@ -1724,7 +1716,7 @@ static int dm_early_init(void *handle)
adev->mode_info.num_dig = 6;
adev->mode_info.plane_type = dm_plane_type_default;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
@@ -2123,13 +2115,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode *mode_in)
{
- int32_t width = mode_in->crtc_hdisplay * 9;
- int32_t height = mode_in->crtc_vdisplay * 16;
-
- if ((width - height) < 10 && (width - height) > -10)
- return ASPECT_RATIO_16_9;
- else
- return ASPECT_RATIO_4_3;
+ /* 1-1 mapping, since both enums follow the HDMI spec. */
+ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
}
static enum dc_color_space
@@ -3093,15 +3080,25 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
- r = amdgpu_bo_pin(rbo, domain, &afb->address);
- amdgpu_bo_unreserve(rbo);
-
+ r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ amdgpu_bo_unreserve(rbo);
return r;
}
+ r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ DRM_ERROR("%p bind failed\n", rbo);
+ return r;
+ }
+ amdgpu_bo_unreserve(rbo);
+
+ afb->address = amdgpu_bo_gpu_offset(rbo);
+
amdgpu_bo_ref(rbo);
if (dm_plane_state_new->dc_state &&
@@ -3471,12 +3468,15 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
struct edid *edid = amdgpu_dm_connector->edid;
encoder = helper->best_encoder(connector);
- amdgpu_dm_connector_ddc_get_modes(connector, edid);
- amdgpu_dm_connector_add_common_modes(encoder, connector);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (!edid || !drm_edid_is_valid(edid)) {
+ drm_add_modes_noedid(connector, 640, 480);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ }
amdgpu_dm_fbc_init(connector);
-#endif
+
return amdgpu_dm_connector->num_modes;
}
@@ -3495,7 +3495,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.stereo_allowed = false;
aconnector->base.dpms = DRM_MODE_DPMS_OFF;
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
-
mutex_init(&aconnector->hpd_lock);
/* configure support HPD hot plug connector_>polled default value is 0
@@ -3504,9 +3503,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
switch (connector_type) {
case DRM_MODE_CONNECTOR_HDMIA:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -3659,10 +3662,17 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link,
link_index);
- drm_mode_connector_attach_encoder(
+ drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
drm_connector_register(&aconnector->base);
+#if defined(CONFIG_DEBUG_FS)
+ res = connector_debugfs_init(aconnector);
+ if (res) {
+ DRM_ERROR("Failed to create debugfs for connector");
+ goto out_free;
+ }
+#endif
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -3959,8 +3969,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
/* Flip */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- /* update crtc fb */
- crtc->primary->fb = fb;
WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
WARN_ON(!acrtc_state->stream);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d5aa89ad5571..a29dc35954c9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -72,13 +72,11 @@ struct irq_list_head {
struct work_struct work;
};
-#if defined(CONFIG_DRM_AMD_DC_FBC)
struct dm_comressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
};
-#endif
struct amdgpu_display_manager {
@@ -129,9 +127,8 @@ struct amdgpu_display_manager {
* Caches device atomic state for suspend/resume
*/
struct drm_atomic_state *cached_state;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
struct dm_comressor_info compressor;
-#endif
};
struct amdgpu_dm_connector {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index b329393307e5..326f6fb7e0bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -231,18 +231,21 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
* preparation for hardware commit. If no lut is specified by user, we default
* to SRGB degamma.
*
- * Currently, we only support degamma bypass, or preprogrammed SRGB degamma.
- * Programmable degamma is not supported, and an attempt to do so will return
- * -EINVAL.
+ * We support degamma bypass, predefined SRGB, and custom degamma
*
* RETURNS:
- * 0 on success, -EINVAL if custom degamma curve is given.
+ * 0 on success
+ * -EINVAL if crtc_state has a degamma_lut of invalid size
+ * -ENOMEM if gamma allocation fails
*/
int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
struct dc_plane_state *dc_plane_state)
{
struct drm_property_blob *blob = crtc_state->degamma_lut;
struct drm_color_lut *lut;
+ uint32_t lut_size;
+ struct dc_gamma *gamma;
+ bool ret;
if (!blob) {
/* Default to SRGB */
@@ -258,11 +261,30 @@ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
return 0;
}
- /* Otherwise, assume SRGB, since programmable degamma is not
- * supported.
- */
- dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
- return -EINVAL;
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ lut_size = blob->length / sizeof(struct drm_color_lut);
+ gamma->num_entries = lut_size;
+ if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
+ gamma->type = GAMMA_CUSTOM;
+ else {
+ dc_gamma_release(&gamma);
+ return -EINVAL;
+ }
+
+ __drm_lut_to_dc_gamma(lut, gamma, false);
+
+ dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ ret = mod_color_calculate_degamma_params(dc_plane_state->in_transfer_func, gamma, true);
+ dc_gamma_release(&gamma);
+ if (!ret) {
+ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+ DRM_ERROR("Out of memory when calculating degamma params\n");
+ return -ENOMEM;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
new file mode 100644
index 000000000000..0d9e410ca01e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include "dc.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_debugfs.h"
+
+/* function description
+ * get/ set DP configuration: lane_count, link_rate, spread_spectrum
+ *
+ * valid lane count value: 1, 2, 4
+ * valid link rate value:
+ * 06h = 1.62Gbps per lane
+ * 0Ah = 2.7Gbps per lane
+ * 0Ch = 3.24Gbps per lane
+ * 14h = 5.4Gbps per lane
+ * 1Eh = 8.1Gbps per lane
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings
+ *
+ * --- to get dp configuration
+ *
+ * cat link_settings
+ *
+ * It will list current, verified, reported, preferred dp configuration.
+ * current -- for current video mode
+ * verified --- maximum configuration which pass link training
+ * reported --- DP rx report caps (DPCD register offset 0, 1 2)
+ * preferred --- user force settings
+ *
+ * --- set (or force) dp configuration
+ *
+ * echo <lane_count> <link_rate> > link_settings
+ *
+ * for example, to force to 2 lane, 2.7GHz,
+ * echo 4 0xa > link_settings
+ *
+ * spread_spectrum could not be changed dynamically.
+ *
+ * in case invalid lane count, link rate are force, no hw programming will be
+ * done. please check link settings after force operation to see if HW get
+ * programming.
+ *
+ * cat link_settings
+ *
+ * check current and preferred settings.
+ *
+ */
+static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ const uint32_t rd_buf_size = 100;
+ uint32_t result = 0;
+ uint8_t str_len = 0;
+ int r;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+ return 0;
+
+ rd_buf_ptr = rd_buf;
+
+ str_len = strlen("Current: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
+ link->cur_link_settings.lane_count,
+ link->cur_link_settings.link_rate,
+ link->cur_link_settings.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Verified: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
+ link->verified_link_cap.lane_count,
+ link->verified_link_cap.link_rate,
+ link->verified_link_cap.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Reported: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
+ link->reported_link_cap.lane_count,
+ link->reported_link_cap.link_rate,
+ link->reported_link_cap.link_spread);
+ rd_buf_ptr += str_len;
+
+ str_len = strlen("Preferred: %d %d %d ");
+ snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
+ link->preferred_link_setting.lane_count,
+ link->preferred_link_setting.link_rate,
+ link->preferred_link_setting.link_spread);
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
+ struct dc_link_settings prefer_link_settings;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ const uint32_t wr_buf_size = 40;
+ int r;
+ int bytes_from_user;
+ char *sub_str;
+ /* 0: lane_count; 1: link_rate */
+ uint8_t param_index = 0;
+ long param[2];
+ const char delimiter[3] = {' ', '\n', '\0'};
+ bool valid_input = false;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return -EINVAL;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not read\n");
+ return -EINVAL;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+
+ while ((*wr_buf_ptr != '\0') && (param_index < 2)) {
+
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+ }
+
+ switch (param[0]) {
+ case LANE_COUNT_ONE:
+ case LANE_COUNT_TWO:
+ case LANE_COUNT_FOUR:
+ valid_input = true;
+ break;
+ default:
+ break;
+ }
+
+ switch (param[1]) {
+ case LINK_RATE_LOW:
+ case LINK_RATE_HIGH:
+ case LINK_RATE_RBR2:
+ case LINK_RATE_HIGH2:
+ case LINK_RATE_HIGH3:
+ valid_input = true;
+ break;
+ default:
+ break;
+ }
+
+ if (!valid_input) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
+ return bytes_from_user;
+ }
+
+ /* save user force lane_count, link_rate to preferred settings
+ * spread spectrum will not be changed
+ */
+ prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+ prefer_link_settings.lane_count = param[0];
+ prefer_link_settings.link_rate = param[1];
+
+ dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
+
+ kfree(wr_buf);
+ return bytes_from_user;
+}
+
+/* function: get current DP PHY settings: voltage swing, pre-emphasis,
+ * post-cursor2 (defined by VESA DP specification)
+ *
+ * valid values
+ * voltage swing: 0,1,2,3
+ * pre-emphasis : 0,1,2,3
+ * post cursor2 : 0,1,2,3
+ *
+ *
+ * how to use this debugfs
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x
+ *
+ * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display
+ *
+ * To figure out which DP-x is the display for DP to be check,
+ * cd DP-x
+ * ls -ll
+ * There should be debugfs file, like link_settings, phy_settings.
+ * cat link_settings
+ * from lane_count, link_rate to figure which DP-x is for display to be worked
+ * on
+ *
+ * To get current DP PHY settings,
+ * cat phy_settings
+ *
+ * To change DP PHY settings,
+ * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings
+ * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to
+ * 0,
+ * echo 2 3 0 > phy_settings
+ *
+ * To check if change be applied, get current phy settings by
+ * cat phy_settings
+ *
+ * In case invalid values are set by user, like
+ * echo 1 4 0 > phy_settings
+ *
+ * HW will NOT be programmed by these settings.
+ * cat phy_settings will show the previous valid settings.
+ */
+static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *rd_buf = NULL;
+ const uint32_t rd_buf_size = 20;
+ uint32_t result = 0;
+ int r;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+ return -EINVAL;
+
+ snprintf(rd_buf, rd_buf_size, " %d %d %d ",
+ link->cur_lane_setting.VOLTAGE_SWING,
+ link->cur_lane_setting.PRE_EMPHASIS,
+ link->cur_lane_setting.POST_CURSOR2);
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user((*(rd_buf + result)), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_size = 40;
+ int r;
+ int bytes_from_user;
+ char *sub_str;
+ uint8_t param_index = 0;
+ long param[3];
+ const char delimiter[3] = {' ', '\n', '\0'};
+ bool use_prefer_link_setting;
+ struct link_training_settings link_lane_settings;
+
+ if (size == 0)
+ return 0;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return 0;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return 0;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+
+ while ((*wr_buf_ptr != '\0') && (param_index < 3)) {
+
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ while (isspace(*wr_buf_ptr))
+ wr_buf_ptr++;
+ }
+
+ if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
+ (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
+ (param[2] > POST_CURSOR2_MAX_LEVEL)) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
+ return bytes_from_user;
+ }
+
+ /* get link settings: lane count, link rate */
+ use_prefer_link_setting =
+ ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
+ (link->test_pattern_enabled));
+
+ memset(&link_lane_settings, 0, sizeof(link_lane_settings));
+
+ if (use_prefer_link_setting) {
+ link_lane_settings.link_settings.lane_count =
+ link->preferred_link_setting.lane_count;
+ link_lane_settings.link_settings.link_rate =
+ link->preferred_link_setting.link_rate;
+ link_lane_settings.link_settings.link_spread =
+ link->preferred_link_setting.link_spread;
+ } else {
+ link_lane_settings.link_settings.lane_count =
+ link->cur_link_settings.lane_count;
+ link_lane_settings.link_settings.link_rate =
+ link->cur_link_settings.link_rate;
+ link_lane_settings.link_settings.link_spread =
+ link->cur_link_settings.link_spread;
+ }
+
+ /* apply phy settings from user */
+ for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
+ link_lane_settings.lane_settings[r].VOLTAGE_SWING =
+ (enum dc_voltage_swing) (param[0]);
+ link_lane_settings.lane_settings[r].PRE_EMPHASIS =
+ (enum dc_pre_emphasis) (param[1]);
+ link_lane_settings.lane_settings[r].POST_CURSOR2 =
+ (enum dc_post_cursor2) (param[2]);
+ }
+
+ /* program ASIC registers and DPCD registers */
+ dc_link_set_drive_settings(dc, &link_lane_settings, link);
+
+ kfree(wr_buf);
+ return bytes_from_user;
+}
+
+/* function description
+ *
+ * set PHY layer or Link layer test pattern
+ * PHY test pattern is used for PHY SI check.
+ * Link layer test will not affect PHY SI.
+ *
+ * Reset Test Pattern:
+ * 0 = DP_TEST_PATTERN_VIDEO_MODE
+ *
+ * PHY test pattern supported:
+ * 1 = DP_TEST_PATTERN_D102
+ * 2 = DP_TEST_PATTERN_SYMBOL_ERROR
+ * 3 = DP_TEST_PATTERN_PRBS7
+ * 4 = DP_TEST_PATTERN_80BIT_CUSTOM
+ * 5 = DP_TEST_PATTERN_CP2520_1
+ * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE
+ * 7 = DP_TEST_PATTERN_CP2520_3
+ *
+ * DP PHY Link Training Patterns
+ * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1
+ * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2
+ * a = DP_TEST_PATTERN_TRAINING_PATTERN3
+ * b = DP_TEST_PATTERN_TRAINING_PATTERN4
+ *
+ * DP Link Layer Test pattern
+ * c = DP_TEST_PATTERN_COLOR_SQUARES
+ * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA
+ * e = DP_TEST_PATTERN_VERTICAL_BARS
+ * f = DP_TEST_PATTERN_HORIZONTAL_BARS
+ * 10= DP_TEST_PATTERN_COLOR_RAMP
+ *
+ * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x
+ *
+ * --- set test pattern
+ * echo <test pattern #> > test_pattern
+ *
+ * If test pattern # is not supported, NO HW programming will be done.
+ * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data
+ * for the user pattern. input 10 bytes data are separated by space
+ *
+ * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern
+ *
+ * --- reset test pattern
+ * echo 0 > test_pattern
+ *
+ * --- HPD detection is disabled when set PHY test pattern
+ *
+ * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC
+ * is disable. User could unplug DP display from DP connected and plug scope to
+ * check test pattern PHY SI.
+ * If there is need unplug scope and plug DP display back, do steps below:
+ * echo 0 > phy_test_pattern
+ * unplug scope
+ * plug DP display.
+ *
+ * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw
+ * driver could detect "unplug scope" and "plug DP display"
+ */
+static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ char *wr_buf = NULL;
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_size = 100;
+ uint32_t wr_buf_count = 0;
+ int r;
+ int bytes_from_user;
+ char *sub_str = NULL;
+ uint8_t param_index = 0;
+ uint8_t param_nums = 0;
+ long param[11] = {0x0};
+ const char delimiter[3] = {' ', '\n', '\0'};
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+ bool disable_hpd = false;
+ bool valid_test_pattern = false;
+ /* init with defalut 80bit custom pattern */
+ uint8_t custom_pattern[10] = {
+ 0x1f, 0x7c, 0xf0, 0xc1, 0x07,
+ 0x1f, 0x7c, 0xf0, 0xc1, 0x07
+ };
+ struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
+ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+ struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
+ LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+ struct link_training_settings link_training_settings;
+ int i;
+
+ if (size == 0)
+ return 0;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return 0;
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return 0;
+ }
+
+ bytes_from_user = wr_buf_size - r;
+
+ /* check number of parameters. isspace could not differ space and \n */
+ while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
+ /* skip space*/
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+
+ /* skip non-space*/
+ while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ param_nums++;
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+ }
+
+ /* max 11 parameters */
+ if (param_nums > 11)
+ param_nums = 11;
+
+ wr_buf_ptr = wr_buf; /* reset buf pinter */
+ wr_buf_count = 0; /* number of char already checked */
+
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ while (param_index < param_nums) {
+ /* after strsep, wr_buf_ptr will be moved to after space */
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &param[param_index]);
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ }
+
+ test_pattern = param[0];
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ valid_test_pattern = true;
+ break;
+
+ case DP_TEST_PATTERN_D102:
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ case DP_TEST_PATTERN_PRBS7:
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
+ case DP_TEST_PATTERN_TRAINING_PATTERN4:
+ disable_hpd = true;
+ valid_test_pattern = true;
+ break;
+
+ default:
+ valid_test_pattern = false;
+ test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+ break;
+ }
+
+ if (!valid_test_pattern) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
+ return bytes_from_user;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
+ for (i = 0; i < 10; i++) {
+ if ((uint8_t) param[i + 1] != 0x0)
+ break;
+ }
+
+ if (i < 10) {
+ /* not use default value */
+ for (i = 0; i < 10; i++)
+ custom_pattern[i] = (uint8_t) param[i + 1];
+ }
+ }
+
+ /* Usage: set DP physical test pattern using debugfs with normal DP
+ * panel. Then plug out DP panel and connect a scope to measure
+ * For normal video mode and test pattern generated from CRCT,
+ * they are visibile to user. So do not disable HPD.
+ * Video Mode is also set to clear the test pattern, so enable HPD
+ * because it might have been disabled after a test pattern was set.
+ * AUX depends on HPD * sequence dependent, do not move!
+ */
+ if (!disable_hpd)
+ dc_link_enable_hpd(link);
+
+ prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
+ prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
+ prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
+
+ cur_link_settings.lane_count = link->cur_link_settings.lane_count;
+ cur_link_settings.link_rate = link->cur_link_settings.link_rate;
+ cur_link_settings.link_spread = link->cur_link_settings.link_spread;
+
+ link_training_settings.link_settings = cur_link_settings;
+
+
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
+ prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
+ (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
+ prefer_link_settings.link_rate != cur_link_settings.link_rate))
+ link_training_settings.link_settings = prefer_link_settings;
+ }
+
+ for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
+ link_training_settings.lane_settings[i] = link->cur_lane_setting;
+
+ dc_link_set_test_pattern(
+ link,
+ test_pattern,
+ &link_training_settings,
+ custom_pattern,
+ 10);
+
+ /* Usage: Set DP physical test pattern using AMDDP with normal DP panel
+ * Then plug out DP panel and connect a scope to measure DP PHY signal.
+ * Need disable interrupt to avoid SW driver disable DP output. This is
+ * done after the test pattern is set.
+ */
+ if (valid_test_pattern && disable_hpd)
+ dc_link_disable_hpd(link);
+
+ kfree(wr_buf);
+
+ return bytes_from_user;
+}
+
+static const struct file_operations dp_link_settings_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_link_settings_read,
+ .write = dp_link_settings_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_settings_debugfs_fop = {
+ .owner = THIS_MODULE,
+ .read = dp_phy_settings_read,
+ .write = dp_phy_settings_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_test_pattern_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_phy_test_pattern_debugfs_write,
+ .llseek = default_llseek
+};
+
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+} dp_debugfs_entries[] = {
+ {"link_settings", &dp_link_settings_debugfs_fops},
+ {"phy_settings", &dp_phy_settings_debugfs_fop},
+ {"test_pattern", &dp_phy_test_pattern_fops}
+};
+
+int connector_debugfs_init(struct amdgpu_dm_connector *connector)
+{
+ int i;
+ struct dentry *ent, *dir = connector->base.debugfs_entry;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
+ ent = debugfs_create_file(dp_debugfs_entries[i].name,
+ 0644,
+ dir,
+ connector,
+ dp_debugfs_entries[i].fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index b43315cc5d58..d9ed1b2aa811 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,18 +19,16 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: AMD
+ *
*/
-#ifndef PP_POWERSOURCE_H
-#define PP_POWERSOURCE_H
+#ifndef __AMDGPU_DM_DEBUGFS_H__
+#define __AMDGPU_DM_DEBUGFS_H__
-enum pp_power_source {
- PP_PowerSource_AC = 0,
- PP_PowerSource_DC,
- PP_PowerSource_LimitedPower,
- PP_PowerSource_LimitedPower_2,
- PP_PowerSource_Max
-};
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+int connector_debugfs_init(struct amdgpu_dm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ec304b1a5973..8403b6a9a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -169,6 +169,11 @@ static void get_payload_table(
mutex_unlock(&mst_mgr->payload_lock);
}
+void dm_helpers_dp_update_branch_info(
+ struct dc_context *ctx,
+ const struct dc_link *link)
+{}
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -454,6 +459,22 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_is_dp_sink_present(struct dc_link *link)
+{
+ bool dp_sink_present;
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ BUG_ON("Failed to found connector for link!");
+ return true;
+ }
+
+ mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
+ dp_sink_present = dc_link_is_dp_sink_present(link);
+ mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
+ return dp_sink_present;
+}
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
@@ -497,6 +518,34 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
+ if (link->aux_mode) {
+ union test_request test_request = { {0} };
+ union test_response test_response = { {0} };
+
+ dm_helpers_dp_read_dpcd(ctx,
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+
+ if (!test_request.bits.EDID_READ)
+ return edid_status;
+
+ test_response.bits.EDID_CHECKSUM_WRITE = 1;
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_EDID_CHECKSUM,
+ &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
+ 1);
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+
+ }
return edid_status;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index ace9ad578ca0..9a300732ba37 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -80,53 +80,72 @@ static void log_dpcd(uint8_t type,
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
- enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
- I2C_MOT_TRUE : I2C_MOT_FALSE;
- enum ddc_result res;
- ssize_t read_bytes;
+ ssize_t result = 0;
+ enum i2caux_transaction_action action;
+ enum aux_transaction_type type;
if (WARN_ON(msg->size > 16))
return -E2BIG;
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_READ:
- read_bytes = dal_ddc_service_read_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- false,
- I2C_MOT_UNDEF,
- msg->address,
- msg->buffer,
- msg->size);
- return read_bytes;
+ type = AUX_TRANSACTION_TYPE_DP;
+ action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+
+ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ break;
case DP_AUX_NATIVE_WRITE:
- res = dal_ddc_service_write_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- false,
- I2C_MOT_UNDEF,
- msg->address,
- msg->buffer,
- msg->size);
+ type = AUX_TRANSACTION_TYPE_DP;
+ action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+
+ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ result = msg->size;
break;
case DP_AUX_I2C_READ:
- read_bytes = dal_ddc_service_read_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- true,
- mot,
- msg->address,
- msg->buffer,
- msg->size);
- return read_bytes;
+ type = AUX_TRANSACTION_TYPE_I2C;
+ if (msg->request & DP_AUX_I2C_MOT)
+ action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
+ else
+ action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
+
+ result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ break;
case DP_AUX_I2C_WRITE:
- res = dal_ddc_service_write_dpcd_data(
- TO_DM_AUX(aux)->ddc_service,
- true,
- mot,
- msg->address,
- msg->buffer,
- msg->size);
+ type = AUX_TRANSACTION_TYPE_I2C;
+ if (msg->request & DP_AUX_I2C_MOT)
+ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
+ else
+ action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+
+ dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+ msg->address,
+ &msg->reply,
+ msg->buffer,
+ msg->size,
+ type,
+ action);
+ result = msg->size;
break;
default:
- return 0;
+ return -EINVAL;
}
#ifdef TRACE_DPCD
@@ -137,7 +156,10 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
r == DDC_RESULT_SUCESSFULL);
#endif
- return msg->size;
+ if (result < 0) /* DC doesn't know about kernel error codes */
+ result = -EIO;
+
+ return result;
}
static enum drm_connector_status
@@ -229,7 +251,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
&aconnector->base,
NULL);
return ret;
@@ -257,7 +279,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
connector, aconnector->edid);
}
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
&aconnector->base, aconnector->edid);
ret = drm_add_edid_modes(connector, aconnector->edid);
@@ -341,7 +363,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector, connector->base.id, aconnector->mst_port);
aconnector->port = port;
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
aconnector->mst_connected = true;
@@ -389,7 +411,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
dev->mode_config.tile_property,
0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
/*
* Initialize connector state before adding the connectror to drm and
@@ -437,7 +459,7 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
{
mutex_lock(&connector->dev->mode_config.mutex);
- drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+ drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
new file mode 100644
index 000000000000..fbe878ae1e8c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_pm.h"
+#include "dm_pp_smu.h"
+
+
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ int i;
+
+ if (adev->pm.dpm_enabled) {
+
+ memset(&adev->pm.pm_display_cfg, 0,
+ sizeof(adev->pm.pm_display_cfg));
+
+ adev->pm.pm_display_cfg.cpu_cc6_disable =
+ pp_display_cfg->cpu_cc6_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_disable =
+ pp_display_cfg->cpu_pstate_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+ pp_display_cfg->cpu_pstate_separation_time;
+
+ adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+ pp_display_cfg->nb_pstate_switch_disable;
+
+ adev->pm.pm_display_cfg.num_display =
+ pp_display_cfg->display_count;
+ adev->pm.pm_display_cfg.num_path_including_non_display =
+ pp_display_cfg->display_count;
+
+ adev->pm.pm_display_cfg.min_core_set_clock =
+ pp_display_cfg->min_engine_clock_khz/10;
+ adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_mem_set_clock =
+ pp_display_cfg->min_memory_clock_khz/10;
+
+ adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_dcef_set_clk =
+ pp_display_cfg->min_dcfclock_khz/10;
+
+ adev->pm.pm_display_cfg.multi_monitor_in_sync =
+ pp_display_cfg->all_displays_in_sync;
+ adev->pm.pm_display_cfg.min_vblank_time =
+ pp_display_cfg->avail_mclk_switch_time_us;
+
+ adev->pm.pm_display_cfg.display_clk =
+ pp_display_cfg->disp_clk_khz/10;
+
+ adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+ adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+ adev->pm.pm_display_cfg.line_time_in_us =
+ pp_display_cfg->line_time_in_us;
+
+ adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+ adev->pm.pm_display_cfg.crossfire_display_index = -1;
+ adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+ for (i = 0; i < pp_display_cfg->display_count; i++) {
+ const struct dm_pp_single_disp_config *dc_cfg =
+ &pp_display_cfg->disp_configs[i];
+ adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ }
+
+ /* TODO: complete implementation of
+ * pp_display_configuration_change().
+ * Follow example of:
+ * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+ * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+
+ /* TODO: replace by a separate call to 'apply display cfg'? */
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+ return true;
+}
+
+static void get_default_clock_levels(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clks)
+{
+ uint32_t disp_clks_in_khz[6] = {
+ 300000, 400000, 496560, 626090, 685720, 757900 };
+ uint32_t sclks_in_khz[6] = {
+ 300000, 360000, 423530, 514290, 626090, 720000 };
+ uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+ switch (clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, disp_clks_in_khz,
+ sizeof(disp_clks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, sclks_in_khz,
+ sizeof(sclks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ clks->num_levels = 2;
+ memmove(clks->clocks_in_khz, mclks_in_khz,
+ sizeof(mclks_in_khz));
+ break;
+ default:
+ clks->num_levels = 0;
+ break;
+ }
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+ enum dm_pp_clock_type dm_pp_clk_type)
+{
+ enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+ switch (dm_pp_clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ amd_pp_clk_type = amd_pp_disp_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ amd_pp_clk_type = amd_pp_sys_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ amd_pp_clk_type = amd_pp_mem_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DCEFCLK:
+ amd_pp_clk_type = amd_pp_dcef_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+ amd_pp_clk_type = amd_pp_dcf_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ amd_pp_clk_type = amd_pp_pixel_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_FCLK:
+ amd_pp_clk_type = amd_pp_f_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ amd_pp_clk_type = amd_pp_phy_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+ amd_pp_clk_type = amd_pp_dpp_clock;
+ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+ break;
+ }
+
+ return amd_pp_clk_type;
+}
+
+static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
+ enum PP_DAL_POWERLEVEL max_clocks_state)
+{
+ switch (max_clocks_state) {
+ case PP_DAL_POWERLEVEL_0:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
+ case PP_DAL_POWERLEVEL_1:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
+ case PP_DAL_POWERLEVEL_2:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
+ case PP_DAL_POWERLEVEL_3:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
+ case PP_DAL_POWERLEVEL_4:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
+ case PP_DAL_POWERLEVEL_5:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
+ case PP_DAL_POWERLEVEL_6:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
+ case PP_DAL_POWERLEVEL_7:
+ return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
+ max_clocks_state);
+ return DM_PP_CLOCKS_STATE_INVALID;
+ }
+}
+
+static void pp_to_dc_clock_levels(
+ const struct amd_pp_clocks *pp_clks,
+ struct dm_pp_clock_levels *dc_clks,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->count,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ dc_clks->num_levels = pp_clks->count;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+ dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
+ }
+}
+
+static void pp_to_dc_clock_levels_with_latency(
+ const struct pp_clock_levels_with_latency *pp_clks,
+ struct dm_pp_clock_levels_with_latency *clk_level_info,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->num_levels,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ clk_level_info->num_levels = pp_clks->num_levels;
+
+ DRM_DEBUG("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+ DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
+ }
+}
+
+static void pp_to_dc_clock_levels_with_voltage(
+ const struct pp_clock_levels_with_voltage *pp_clks,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->num_levels,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ clk_level_info->num_levels = pp_clks->num_levels;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < clk_level_info->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
+ }
+}
+
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *dc_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct amd_pp_clocks pp_clks = { 0 };
+ struct amd_pp_simple_clock_info validation_clks = { 0 };
+ uint32_t i;
+
+ if (adev->powerplay.pp_funcs->get_clock_by_type) {
+ if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
+ dc_to_pp_clock_type(clk_type), &pp_clks)) {
+ /* Error in pplib. Provide default values. */
+ get_default_clock_levels(clk_type, dc_clks);
+ return true;
+ }
+ }
+
+ pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
+ pp_handle, &validation_clks)) {
+ /* Error in pplib. Provide default values. */
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
+ }
+
+ DRM_INFO("DM_PPLIB: Validation clocks:\n");
+ DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
+ validation_clks.engine_max_clock);
+ DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
+ validation_clks.memory_max_clock);
+ DRM_INFO("DM_PPLIB: level : %d\n",
+ validation_clks.level);
+
+ /* Translate 10 kHz to kHz. */
+ validation_clks.engine_max_clock *= 10;
+ validation_clks.memory_max_clock *= 10;
+
+ /* Determine the highest non-boosted level from the Validation Clocks */
+ if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+ /* This clock is higher the validation clock.
+ * Than means the previous one is the highest
+ * non-boosted one. */
+ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+ DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct pp_clock_levels_with_latency pp_clks = { 0 };
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
+ return false;
+
+ if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks))
+ return false;
+
+ pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct pp_clock_levels_with_voltage pp_clk_info = {0};
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info))
+ return false;
+
+ pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
+
+ return true;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct pp_display_clock_request pp_clock_request = {0};
+ int ret = 0;
+
+ pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
+ pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
+
+ if (!pp_clock_request.clock_type)
+ return false;
+
+ if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+ ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
+ adev->powerplay.pp_handle,
+ &pp_clock_request);
+ if (ret)
+ return false;
+ return true;
+}
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct amd_pp_clock_info pp_clk_info = {0};
+ int ret = 0;
+
+ if (adev->powerplay.pp_funcs->get_current_clocks)
+ ret = adev->powerplay.pp_funcs->get_current_clocks(
+ adev->powerplay.pp_handle,
+ &pp_clk_info);
+ if (ret)
+ return false;
+
+ static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
+ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
+ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
+
+ return true;
+}
+
+void pp_rv_set_display_requirement(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->display_configuration_changed)
+ return;
+
+ amdgpu_dpm_display_configuration_changed(adev);
+}
+
+void pp_rv_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_khz;
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_khz;
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_khz;
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_khz;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_khz;
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_khz;
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_drain_clk_khz;
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_drain_clk_khz;
+ }
+
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+}
+
+void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
+{
+ struct dc_context *ctx = pp->ctx;
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
+ return;
+
+ pp_funcs->notify_smu_enable_pwe(pp_handle);
+}
+
+void dm_pp_get_funcs_rv(
+ struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs)
+{
+ funcs->pp_smu.ctx = ctx;
+ funcs->set_display_requirement = pp_rv_set_display_requirement;
+ funcs->set_wm_ranges = pp_rv_set_wm_ranges;
+ funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 5a2e952c5bea..516795342dd2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -35,13 +35,7 @@
#include "amdgpu_dm_irq.h"
#include "amdgpu_pm.h"
-unsigned long long dm_get_timestamp(struct dc_context *ctx)
-{
- struct timespec64 time;
- getrawmonotonic64(&time);
- return timespec64_to_ns(&time);
-}
unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
unsigned long long current_time_stamp,
@@ -80,327 +74,3 @@ bool dm_read_persistent_data(struct dc_context *ctx,
/**** power component interfaces ****/
-bool dm_pp_apply_display_requirements(
- const struct dc_context *ctx,
- const struct dm_pp_display_configuration *pp_display_cfg)
-{
- struct amdgpu_device *adev = ctx->driver_context;
-
- if (adev->pm.dpm_enabled) {
-
- memset(&adev->pm.pm_display_cfg, 0,
- sizeof(adev->pm.pm_display_cfg));
-
- adev->pm.pm_display_cfg.cpu_cc6_disable =
- pp_display_cfg->cpu_cc6_disable;
-
- adev->pm.pm_display_cfg.cpu_pstate_disable =
- pp_display_cfg->cpu_pstate_disable;
-
- adev->pm.pm_display_cfg.cpu_pstate_separation_time =
- pp_display_cfg->cpu_pstate_separation_time;
-
- adev->pm.pm_display_cfg.nb_pstate_switch_disable =
- pp_display_cfg->nb_pstate_switch_disable;
-
- adev->pm.pm_display_cfg.num_display =
- pp_display_cfg->display_count;
- adev->pm.pm_display_cfg.num_path_including_non_display =
- pp_display_cfg->display_count;
-
- adev->pm.pm_display_cfg.min_core_set_clock =
- pp_display_cfg->min_engine_clock_khz/10;
- adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
- pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
- adev->pm.pm_display_cfg.min_mem_set_clock =
- pp_display_cfg->min_memory_clock_khz/10;
-
- adev->pm.pm_display_cfg.multi_monitor_in_sync =
- pp_display_cfg->all_displays_in_sync;
- adev->pm.pm_display_cfg.min_vblank_time =
- pp_display_cfg->avail_mclk_switch_time_us;
-
- adev->pm.pm_display_cfg.display_clk =
- pp_display_cfg->disp_clk_khz/10;
-
- adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
-
- adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
- adev->pm.pm_display_cfg.line_time_in_us =
- pp_display_cfg->line_time_in_us;
-
- adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
- adev->pm.pm_display_cfg.crossfire_display_index = -1;
- adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
-
- /* TODO: complete implementation of
- * pp_display_configuration_change().
- * Follow example of:
- * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
- * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
-
- /* TODO: replace by a separate call to 'apply display cfg'? */
- amdgpu_pm_compute_clocks(adev);
- }
-
- return true;
-}
-
-static void get_default_clock_levels(
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels *clks)
-{
- uint32_t disp_clks_in_khz[6] = {
- 300000, 400000, 496560, 626090, 685720, 757900 };
- uint32_t sclks_in_khz[6] = {
- 300000, 360000, 423530, 514290, 626090, 720000 };
- uint32_t mclks_in_khz[2] = { 333000, 800000 };
-
- switch (clk_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- clks->num_levels = 6;
- memmove(clks->clocks_in_khz, disp_clks_in_khz,
- sizeof(disp_clks_in_khz));
- break;
- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
- clks->num_levels = 6;
- memmove(clks->clocks_in_khz, sclks_in_khz,
- sizeof(sclks_in_khz));
- break;
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- clks->num_levels = 2;
- memmove(clks->clocks_in_khz, mclks_in_khz,
- sizeof(mclks_in_khz));
- break;
- default:
- clks->num_levels = 0;
- break;
- }
-}
-
-static enum amd_pp_clock_type dc_to_pp_clock_type(
- enum dm_pp_clock_type dm_pp_clk_type)
-{
- enum amd_pp_clock_type amd_pp_clk_type = 0;
-
- switch (dm_pp_clk_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- amd_pp_clk_type = amd_pp_disp_clock;
- break;
- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
- amd_pp_clk_type = amd_pp_sys_clock;
- break;
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- amd_pp_clk_type = amd_pp_mem_clock;
- break;
- default:
- DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
- dm_pp_clk_type);
- break;
- }
-
- return amd_pp_clk_type;
-}
-
-static void pp_to_dc_clock_levels(
- const struct amd_pp_clocks *pp_clks,
- struct dm_pp_clock_levels *dc_clks,
- enum dm_pp_clock_type dc_clk_type)
-{
- uint32_t i;
-
- if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
- pp_clks->count,
- DM_PP_MAX_CLOCK_LEVELS);
-
- dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
- } else
- dc_clks->num_levels = pp_clks->count;
-
- DRM_INFO("DM_PPLIB: values for %s clock\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
-
- for (i = 0; i < dc_clks->num_levels; i++) {
- DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
- /* translate 10kHz to kHz */
- dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
- }
-}
-
-static void pp_to_dc_clock_levels_with_latency(
- const struct pp_clock_levels_with_latency *pp_clks,
- struct dm_pp_clock_levels_with_latency *clk_level_info,
- enum dm_pp_clock_type dc_clk_type)
-{
- uint32_t i;
-
- if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
- DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
- pp_clks->num_levels,
- DM_PP_MAX_CLOCK_LEVELS);
-
- clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
- } else
- clk_level_info->num_levels = pp_clks->num_levels;
-
- DRM_DEBUG("DM_PPLIB: values for %s clock\n",
- DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
-
- for (i = 0; i < clk_level_info->num_levels; i++) {
- DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
- /* translate 10kHz to kHz */
- clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
- clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
- }
-}
-
-bool dm_pp_get_clock_levels_by_type(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels *dc_clks)
-{
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- struct amd_pp_clocks pp_clks = { 0 };
- struct amd_pp_simple_clock_info validation_clks = { 0 };
- uint32_t i;
-
- if (adev->powerplay.pp_funcs->get_clock_by_type) {
- if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
- dc_to_pp_clock_type(clk_type), &pp_clks)) {
- /* Error in pplib. Provide default values. */
- get_default_clock_levels(clk_type, dc_clks);
- return true;
- }
- }
-
- pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
-
- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
- pp_handle, &validation_clks)) {
- /* Error in pplib. Provide default values. */
- DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
- validation_clks.engine_max_clock = 72000;
- validation_clks.memory_max_clock = 80000;
- validation_clks.level = 0;
- }
- }
-
- DRM_INFO("DM_PPLIB: Validation clocks:\n");
- DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
- validation_clks.engine_max_clock);
- DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
- validation_clks.memory_max_clock);
- DRM_INFO("DM_PPLIB: level : %d\n",
- validation_clks.level);
-
- /* Translate 10 kHz to kHz. */
- validation_clks.engine_max_clock *= 10;
- validation_clks.memory_max_clock *= 10;
-
- /* Determine the highest non-boosted level from the Validation Clocks */
- if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
- for (i = 0; i < dc_clks->num_levels; i++) {
- if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
- /* This clock is higher the validation clock.
- * Than means the previous one is the highest
- * non-boosted one. */
- DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
- dc_clks->num_levels, i);
- dc_clks->num_levels = i > 0 ? i : 1;
- break;
- }
- }
- } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
- for (i = 0; i < dc_clks->num_levels; i++) {
- if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
- DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
- dc_clks->num_levels, i);
- dc_clks->num_levels = i > 0 ? i : 1;
- break;
- }
- }
- }
-
- return true;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_latency(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels_with_latency *clk_level_info)
-{
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- struct pp_clock_levels_with_latency pp_clks = { 0 };
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
- return false;
-
- if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
- dc_to_pp_clock_type(clk_type),
- &pp_clks))
- return false;
-
- pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
-
- return true;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_voltage(
- const struct dc_context *ctx,
- enum dm_pp_clock_type clk_type,
- struct dm_pp_clock_levels_with_voltage *clk_level_info)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_notify_wm_clock_changes(
- const struct dc_context *ctx,
- struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_apply_power_level_change_request(
- const struct dc_context *ctx,
- struct dm_pp_power_level_change_request *level_change_req)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_apply_clock_for_voltage_request(
- const struct dc_context *ctx,
- struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-bool dm_pp_get_static_clocks(
- const struct dc_context *ctx,
- struct dm_pp_static_clock_info *static_clk_info)
-{
- /* TODO: to be implemented */
- return false;
-}
-
-void dm_pp_get_funcs_rv(
- struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs)
-{}
-
-/**** end of power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index aed538a4d1ba..532a515fda9a 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -25,7 +25,7 @@
DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
DC_LIBS += dcn10 dml
endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index b49ea96b5dae..a50a76471107 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -25,7 +25,7 @@
# subcomponents.
BASICS = conversion.o fixpt31_32.o \
- logger.o log_helpers.o vector.o
+ log_helpers.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
index 021451549ff7..26583f346c39 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -28,75 +28,12 @@
#include "include/logger_interface.h"
#include "dm_helpers.h"
-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
-
-struct dc_signal_type_info {
- enum signal_type type;
- char name[MAX_NAME_LEN];
-};
-
-static const struct dc_signal_type_info signal_type_info_tbl[] = {
- {SIGNAL_TYPE_NONE, "NC"},
- {SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
- {SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
- {SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
- {SIGNAL_TYPE_LVDS, "LVDS"},
- {SIGNAL_TYPE_RGB, "VGA"},
- {SIGNAL_TYPE_DISPLAY_PORT, "DP"},
- {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
- {SIGNAL_TYPE_EDP, "eDP"},
- {SIGNAL_TYPE_VIRTUAL, "Virtual"}
-};
-
-void dc_conn_log(struct dc_context *ctx,
- const struct dc_link *link,
- uint8_t *hex_data,
- int hex_data_count,
- enum dc_log_type event,
- const char *msg,
- ...)
+void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
{
int i;
- va_list args;
- struct log_entry entry = { 0 };
- enum signal_type signal;
-
- if (link->local_sink)
- signal = link->local_sink->sink_signal;
- else
- signal = link->connector_signal;
-
- if (link->type == dc_connection_mst_branch)
- signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
-
- dm_logger_open(ctx->logger, &entry, event);
-
- for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
- if (signal == signal_type_info_tbl[i].type)
- break;
-
- if (i == NUM_ELEMENTS(signal_type_info_tbl))
- goto fail;
-
- dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
- signal_type_info_tbl[i].name,
- link->link_index);
-
- va_start(args, msg);
- dm_logger_append_va(&entry, msg, args);
-
- if (entry.buf_offset > 0 &&
- entry.buf[entry.buf_offset - 1] == '\n')
- entry.buf_offset--;
if (hex_data)
for (i = 0; i < hex_data_count; i++)
- dm_logger_append(&entry, "%2.2X ", hex_data[i]);
-
- dm_logger_append(&entry, "^\n");
-
-fail:
- dm_logger_close(&entry);
-
- va_end(args);
+ DC_LOG_DEBUG("%2.2X ", hex_data[i]);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
deleted file mode 100644
index 0866874ae8c6..000000000000
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "dm_services.h"
-#include "include/logger_interface.h"
-#include "logger.h"
-
-
-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
-
-static const struct dc_log_type_info log_type_info_tbl[] = {
- {LOG_ERROR, "Error"},
- {LOG_WARNING, "Warning"},
- {LOG_DEBUG, "Debug"},
- {LOG_DC, "DC_Interface"},
- {LOG_SURFACE, "Surface"},
- {LOG_HW_HOTPLUG, "HW_Hotplug"},
- {LOG_HW_LINK_TRAINING, "HW_LKTN"},
- {LOG_HW_SET_MODE, "HW_Mode"},
- {LOG_HW_RESUME_S3, "HW_Resume"},
- {LOG_HW_AUDIO, "HW_Audio"},
- {LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
- {LOG_MST, "MST"},
- {LOG_SCALER, "Scaler"},
- {LOG_BIOS, "BIOS"},
- {LOG_BANDWIDTH_CALCS, "BWCalcs"},
- {LOG_BANDWIDTH_VALIDATION, "BWValidation"},
- {LOG_I2C_AUX, "I2C_AUX"},
- {LOG_SYNC, "Sync"},
- {LOG_BACKLIGHT, "Backlight"},
- {LOG_FEATURE_OVERRIDE, "Override"},
- {LOG_DETECTION_EDID_PARSER, "Edid"},
- {LOG_DETECTION_DP_CAPS, "DP_Caps"},
- {LOG_RESOURCE, "Resource"},
- {LOG_DML, "DML"},
- {LOG_EVENT_MODE_SET, "Mode"},
- {LOG_EVENT_DETECTION, "Detect"},
- {LOG_EVENT_LINK_TRAINING, "LKTN"},
- {LOG_EVENT_LINK_LOSS, "LinkLoss"},
- {LOG_EVENT_UNDERFLOW, "Underflow"},
- {LOG_IF_TRACE, "InterfaceTrace"},
- {LOG_DTN, "DTN"},
- {LOG_DISPLAYSTATS, "DisplayStats"}
-};
-
-
-/* ----------- Object init and destruction ----------- */
-static bool construct(struct dc_context *ctx, struct dal_logger *logger,
- uint32_t log_mask)
-{
- /* malloc buffer and init offsets */
- logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
- logger->log_buffer = kcalloc(logger->log_buffer_size, sizeof(char),
- GFP_KERNEL);
- if (!logger->log_buffer)
- return false;
-
- /* Initialize both offsets to start of buffer (empty) */
- logger->buffer_read_offset = 0;
- logger->buffer_write_offset = 0;
-
- logger->open_count = 0;
-
- logger->flags.bits.ENABLE_CONSOLE = 1;
- logger->flags.bits.ENABLE_BUFFER = 0;
-
- logger->ctx = ctx;
-
- logger->mask = log_mask;
-
- return true;
-}
-
-static void destruct(struct dal_logger *logger)
-{
- if (logger->log_buffer) {
- kfree(logger->log_buffer);
- logger->log_buffer = NULL;
- }
-}
-
-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
-{
- /* malloc struct */
- struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
- GFP_KERNEL);
-
- if (!logger)
- return NULL;
- if (!construct(ctx, logger, log_mask)) {
- kfree(logger);
- return NULL;
- }
-
- return logger;
-}
-
-uint32_t dal_logger_destroy(struct dal_logger **logger)
-{
- if (logger == NULL || *logger == NULL)
- return 1;
- destruct(*logger);
- kfree(*logger);
- *logger = NULL;
-
- return 0;
-}
-
-/* ------------------------------------------------------------------------ */
-
-
-static bool dal_logger_should_log(
- struct dal_logger *logger,
- enum dc_log_type log_type)
-{
- if (logger->mask & (1 << log_type))
- return true;
-
- return false;
-}
-
-static void log_to_debug_console(struct log_entry *entry)
-{
- struct dal_logger *logger = entry->logger;
-
- if (logger->flags.bits.ENABLE_CONSOLE == 0)
- return;
-
- if (entry->buf_offset) {
- switch (entry->type) {
- case LOG_ERROR:
- dm_error("%s", entry->buf);
- break;
- default:
- dm_output_to_console("%s", entry->buf);
- break;
- }
- }
-}
-
-/* Print everything unread existing in log_buffer to debug console*/
-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
-{
- char *string_start = &logger->log_buffer[logger->buffer_read_offset];
-
- if (should_warn)
- dm_output_to_console(
- "---------------- FLUSHING LOG BUFFER ----------------\n");
- while (logger->buffer_read_offset < logger->buffer_write_offset) {
-
- if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
- dm_output_to_console("%s", string_start);
- string_start = logger->log_buffer + logger->buffer_read_offset + 1;
- }
- logger->buffer_read_offset++;
- }
- if (should_warn)
- dm_output_to_console(
- "-------------- END FLUSHING LOG BUFFER --------------\n\n");
-}
-
-static void log_to_internal_buffer(struct log_entry *entry)
-{
-
- uint32_t size = entry->buf_offset;
- struct dal_logger *logger = entry->logger;
-
- if (logger->flags.bits.ENABLE_BUFFER == 0)
- return;
-
- if (logger->log_buffer == NULL)
- return;
-
- if (size > 0 && size < logger->log_buffer_size) {
-
- int buffer_space = logger->log_buffer_size -
- logger->buffer_write_offset;
-
- if (logger->buffer_write_offset == logger->buffer_read_offset) {
- /* Buffer is empty, start writing at beginning */
- buffer_space = logger->log_buffer_size;
- logger->buffer_write_offset = 0;
- logger->buffer_read_offset = 0;
- }
-
- if (buffer_space > size) {
- /* No wrap around, copy 'size' bytes
- * from 'entry->buf' to 'log_buffer'
- */
- memmove(logger->log_buffer +
- logger->buffer_write_offset,
- entry->buf, size);
- logger->buffer_write_offset += size;
-
- } else {
- /* Not enough room remaining, we should flush
- * existing logs */
-
- /* Flush existing unread logs to console */
- dm_logger_flush_buffer(logger, true);
-
- /* Start writing to beginning of buffer */
- memmove(logger->log_buffer, entry->buf, size);
- logger->buffer_write_offset = size;
- logger->buffer_read_offset = 0;
- }
-
- }
-}
-
-static void log_heading(struct log_entry *entry)
-{
- int j;
-
- for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
-
- const struct dc_log_type_info *info = &log_type_info_tbl[j];
-
- if (info->type == entry->type)
- dm_logger_append(entry, "[%s]\t", info->name);
- }
-}
-
-static void append_entry(
- struct log_entry *entry,
- char *buffer,
- uint32_t buf_size)
-{
- if (!entry->buf ||
- entry->buf_offset + buf_size > entry->max_buf_bytes
- ) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- /* Todo: check if off by 1 byte due to \0 anywhere */
- memmove(entry->buf + entry->buf_offset, buffer, buf_size);
- entry->buf_offset += buf_size;
-}
-
-/* ------------------------------------------------------------------------ */
-
-/* Warning: Be careful that 'msg' is null terminated and the total size is
- * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
- */
-void dm_logger_write(
- struct dal_logger *logger,
- enum dc_log_type log_type,
- const char *msg,
- ...)
-{
- if (logger && dal_logger_should_log(logger, log_type)) {
- uint32_t size;
- va_list args;
- char buffer[LOG_MAX_LINE_SIZE];
- struct log_entry entry;
-
- va_start(args, msg);
-
- entry.logger = logger;
-
- entry.buf = buffer;
-
- entry.buf_offset = 0;
- entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
-
- entry.type = log_type;
-
- log_heading(&entry);
-
- size = dm_log_to_buffer(
- buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
-
- buffer[entry.buf_offset + size] = '\0';
- entry.buf_offset += size + 1;
-
- /* --Flush log_entry buffer-- */
- /* print to kernel console */
- log_to_debug_console(&entry);
- /* log internally for dsat */
- log_to_internal_buffer(&entry);
-
- va_end(args);
- }
-}
-
-/* Same as dm_logger_write, except without open() and close(), which must
- * be done separately.
- */
-void dm_logger_append(
- struct log_entry *entry,
- const char *msg,
- ...)
-{
- va_list args;
-
- va_start(args, msg);
- dm_logger_append_va(entry, msg, args);
- va_end(args);
-}
-
-void dm_logger_append_va(
- struct log_entry *entry,
- const char *msg,
- va_list args)
-{
- struct dal_logger *logger;
-
- if (!entry) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- logger = entry->logger;
-
- if (logger && logger->open_count > 0 &&
- dal_logger_should_log(logger, entry->type)) {
-
- uint32_t size;
- char buffer[LOG_MAX_LINE_SIZE];
-
- size = dm_log_to_buffer(
- buffer, LOG_MAX_LINE_SIZE, msg, args);
-
- if (size < LOG_MAX_LINE_SIZE - 1) {
- append_entry(entry, buffer, size);
- } else {
- append_entry(entry, "LOG_ERROR, line too long\n", 27);
- }
- }
-}
-
-void dm_logger_open(
- struct dal_logger *logger,
- struct log_entry *entry, /* out */
- enum dc_log_type log_type)
-{
- if (!entry) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- entry->type = log_type;
- entry->logger = logger;
-
- entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE,
- GFP_KERNEL);
-
- entry->buf_offset = 0;
- entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
-
- logger->open_count++;
-
- log_heading(entry);
-}
-
-void dm_logger_close(struct log_entry *entry)
-{
- struct dal_logger *logger = entry->logger;
-
- if (logger && logger->open_count > 0) {
- logger->open_count--;
- } else {
- BREAK_TO_DEBUGGER();
- goto cleanup;
- }
-
- /* --Flush log_entry buffer-- */
- /* print to kernel console */
- log_to_debug_console(entry);
- /* log internally for dsat */
- log_to_internal_buffer(entry);
-
- /* TODO: Write end heading */
-
-cleanup:
- if (entry->buf) {
- kfree(entry->buf);
- entry->buf = NULL;
- entry->buf_offset = 0;
- entry->max_buf_bytes = 0;
- }
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index c7f0b27e457e..be8a2494355a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -3762,6 +3762,200 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
+enum bp_result update_slot_layout_info(
+ struct dc_bios *dcb,
+ unsigned int i,
+ struct slot_layout_info *slot_layout_info,
+ unsigned int record_offset)
+{
+ unsigned int j;
+ struct bios_parser *bp;
+ ATOM_BRACKET_LAYOUT_RECORD *record;
+ ATOM_COMMON_RECORD_HEADER *record_header;
+ enum bp_result result = BP_RESULT_NORECORD;
+
+ bp = BP_FROM_DCB(dcb);
+ record = NULL;
+ record_header = NULL;
+
+ for (;;) {
+
+ record_header = (ATOM_COMMON_RECORD_HEADER *)
+ GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
+ if (record_header == NULL) {
+ result = BP_RESULT_BADBIOSTABLE;
+ break;
+ }
+
+ /* the end of the list */
+ if (record_header->ucRecordType == 0xff ||
+ record_header->ucRecordSize == 0) {
+ break;
+ }
+
+ if (record_header->ucRecordType ==
+ ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
+ sizeof(ATOM_BRACKET_LAYOUT_RECORD)
+ <= record_header->ucRecordSize) {
+ record = (ATOM_BRACKET_LAYOUT_RECORD *)
+ (record_header);
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ record_offset += record_header->ucRecordSize;
+ }
+
+ /* return if the record not found */
+ if (result != BP_RESULT_OK)
+ return result;
+
+ /* get slot sizes */
+ slot_layout_info->length = record->ucLength;
+ slot_layout_info->width = record->ucWidth;
+
+ /* get info for each connector in the slot */
+ slot_layout_info->num_of_connectors = record->ucConnNum;
+ for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
+ slot_layout_info->connectors[j].connector_type =
+ (enum connector_layout_type)
+ (record->asConnInfo[j].ucConnectorType);
+ switch (record->asConnInfo[j].ucConnectorType) {
+ case CONNECTOR_TYPE_DVI_D:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DVI_D;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DVI;
+ break;
+
+ case CONNECTOR_TYPE_HDMI:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_HDMI;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_HDMI;
+ break;
+
+ case CONNECTOR_TYPE_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DP;
+ break;
+
+ case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_MINI_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_MINI_DP;
+ break;
+
+ default:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_UNKNOWN;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_UNKNOWN;
+ }
+
+ slot_layout_info->connectors[j].position =
+ record->asConnInfo[j].ucPosition;
+ slot_layout_info->connectors[j].connector_id =
+ object_id_from_bios_object_id(
+ record->asConnInfo[j].usConnectorObjectId);
+ }
+ return result;
+}
+
+
+enum bp_result get_bracket_layout_record(
+ struct dc_bios *dcb,
+ unsigned int bracket_layout_id,
+ struct slot_layout_info *slot_layout_info)
+{
+ unsigned int i;
+ unsigned int record_offset;
+ struct bios_parser *bp;
+ enum bp_result result;
+ ATOM_OBJECT *object;
+ ATOM_OBJECT_TABLE *object_table;
+ unsigned int genericTableOffset;
+
+ bp = BP_FROM_DCB(dcb);
+ object = NULL;
+ if (slot_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+
+
+ genericTableOffset = bp->object_info_tbl_offset +
+ bp->object_info_tbl.v1_3->usMiscObjectTableOffset;
+ object_table = (ATOM_OBJECT_TABLE *)
+ GET_IMAGE(ATOM_OBJECT_TABLE, genericTableOffset);
+ if (!object_table)
+ return BP_RESULT_FAILURE;
+
+ result = BP_RESULT_NORECORD;
+ for (i = 0; i < object_table->ucNumberOfObjects; ++i) {
+
+ if (bracket_layout_id ==
+ object_table->asObjects[i].usObjectID) {
+
+ object = &object_table->asObjects[i];
+ record_offset = object->usRecordOffset +
+ bp->object_info_tbl_offset;
+
+ result = update_slot_layout_info(dcb, i,
+ slot_layout_info, record_offset);
+ break;
+ }
+ }
+ return result;
+}
+
+static enum bp_result bios_get_board_layout_info(
+ struct dc_bios *dcb,
+ struct board_layout_info *board_layout_info)
+{
+ unsigned int i;
+ struct bios_parser *bp;
+ enum bp_result record_result;
+
+ const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
+ 0, 0
+ };
+
+ bp = BP_FROM_DCB(dcb);
+ if (board_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+
+ board_layout_info->num_of_slots = 0;
+
+ for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+ record_result = get_bracket_layout_record(dcb,
+ slot_index_to_vbios_id[i],
+ &board_layout_info->slots[i]);
+
+ if (record_result == BP_RESULT_NORECORD && i > 0)
+ break; /* no more slots present in bios */
+ else if (record_result != BP_RESULT_OK)
+ return record_result; /* fail */
+
+ ++board_layout_info->num_of_slots;
+ }
+
+ /* all data is valid */
+ board_layout_info->is_number_of_slots_valid = 1;
+ board_layout_info->is_slots_size_valid = 1;
+ board_layout_info->is_connector_offsets_valid = 1;
+ board_layout_info->is_connector_lengths_valid = 1;
+
+ return BP_RESULT_OK;
+}
+
/******************************************************************************/
static const struct dc_vbios_funcs vbios_funcs = {
@@ -3836,6 +4030,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
.post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
.bios_parser_destroy = bios_parser_destroy,
+
+ .get_board_layout_info = bios_get_board_layout_info,
};
static bool bios_parser_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index b8cef7af3c4a..eab007e1793c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -43,6 +43,29 @@
#include "bios_parser_interface.h"
#include "bios_parser_common.h"
+
+/* Temporarily add in defines until ObjectID.h patch is updated in a few days */
+#ifndef GENERIC_OBJECT_ID_BRACKET_LAYOUT
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
+#endif /* GENERIC_OBJECT_ID_BRACKET_LAYOUT */
+
+#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 \
+ (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 */
+
+#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 \
+ (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 */
+
+#define DC_LOGGER \
+ bp->base.ctx->logger
+
#define LAST_RECORD_TYPE 0xff
#define SMU9_SYSPLL0_ID 0
@@ -86,7 +109,6 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
-
static void destruct(struct bios_parser *bp)
{
kfree(bp->base.bios_local_image);
@@ -656,7 +678,7 @@ static enum bp_result bios_parser_get_gpio_pin_info(
return BP_RESULT_BADBIOSTABLE;
if (sizeof(struct atom_common_table_header) +
- sizeof(struct atom_gpio_pin_lut_v2_1)
+ sizeof(struct atom_gpio_pin_assignment)
> le16_to_cpu(header->table_header.structuresize))
return BP_RESULT_BADBIOSTABLE;
@@ -1854,6 +1876,198 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
+static enum bp_result update_slot_layout_info(
+ struct dc_bios *dcb,
+ unsigned int i,
+ struct slot_layout_info *slot_layout_info)
+{
+ unsigned int record_offset;
+ unsigned int j;
+ struct atom_display_object_path_v2 *object;
+ struct atom_bracket_layout_record *record;
+ struct atom_common_record_header *record_header;
+ enum bp_result result;
+ struct bios_parser *bp;
+ struct object_info_table *tbl;
+ struct display_object_info_table_v1_4 *v1_4;
+
+ record = NULL;
+ record_header = NULL;
+ result = BP_RESULT_NORECORD;
+
+ bp = BP_FROM_DCB(dcb);
+ tbl = &bp->object_info_tbl;
+ v1_4 = tbl->v1_4;
+
+ object = &v1_4->display_path[i];
+ record_offset = (unsigned int)
+ (object->disp_recordoffset) +
+ (unsigned int)(bp->object_info_tbl_offset);
+
+ for (;;) {
+
+ record_header = (struct atom_common_record_header *)
+ GET_IMAGE(struct atom_common_record_header,
+ record_offset);
+ if (record_header == NULL) {
+ result = BP_RESULT_BADBIOSTABLE;
+ break;
+ }
+
+ /* the end of the list */
+ if (record_header->record_type == 0xff ||
+ record_header->record_size == 0) {
+ break;
+ }
+
+ if (record_header->record_type ==
+ ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
+ sizeof(struct atom_bracket_layout_record)
+ <= record_header->record_size) {
+ record = (struct atom_bracket_layout_record *)
+ (record_header);
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ record_offset += record_header->record_size;
+ }
+
+ /* return if the record not found */
+ if (result != BP_RESULT_OK)
+ return result;
+
+ /* get slot sizes */
+ slot_layout_info->length = record->bracketlen;
+ slot_layout_info->width = record->bracketwidth;
+
+ /* get info for each connector in the slot */
+ slot_layout_info->num_of_connectors = record->conn_num;
+ for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
+ slot_layout_info->connectors[j].connector_type =
+ (enum connector_layout_type)
+ (record->conn_info[j].connector_type);
+ switch (record->conn_info[j].connector_type) {
+ case CONNECTOR_TYPE_DVI_D:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DVI_D;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DVI;
+ break;
+
+ case CONNECTOR_TYPE_HDMI:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_HDMI;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_HDMI;
+ break;
+
+ case CONNECTOR_TYPE_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_DP;
+ break;
+
+ case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_MINI_DP;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_MINI_DP;
+ break;
+
+ default:
+ slot_layout_info->connectors[j].connector_type =
+ CONNECTOR_LAYOUT_TYPE_UNKNOWN;
+ slot_layout_info->connectors[j].length =
+ CONNECTOR_SIZE_UNKNOWN;
+ }
+
+ slot_layout_info->connectors[j].position =
+ record->conn_info[j].position;
+ slot_layout_info->connectors[j].connector_id =
+ object_id_from_bios_object_id(
+ record->conn_info[j].connectorobjid);
+ }
+ return result;
+}
+
+
+static enum bp_result get_bracket_layout_record(
+ struct dc_bios *dcb,
+ unsigned int bracket_layout_id,
+ struct slot_layout_info *slot_layout_info)
+{
+ unsigned int i;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result;
+ struct object_info_table *tbl;
+ struct display_object_info_table_v1_4 *v1_4;
+
+ if (slot_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+ tbl = &bp->object_info_tbl;
+ v1_4 = tbl->v1_4;
+
+ result = BP_RESULT_NORECORD;
+ for (i = 0; i < v1_4->number_of_path; ++i) {
+
+ if (bracket_layout_id ==
+ v1_4->display_path[i].display_objid) {
+ result = update_slot_layout_info(dcb, i,
+ slot_layout_info);
+ break;
+ }
+ }
+ return result;
+}
+
+static enum bp_result bios_get_board_layout_info(
+ struct dc_bios *dcb,
+ struct board_layout_info *board_layout_info)
+{
+ unsigned int i;
+ struct bios_parser *bp;
+ enum bp_result record_result;
+
+ const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
+ GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
+ 0, 0
+ };
+
+ bp = BP_FROM_DCB(dcb);
+ if (board_layout_info == NULL) {
+ DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
+ return BP_RESULT_BADINPUT;
+ }
+
+ board_layout_info->num_of_slots = 0;
+
+ for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+ record_result = get_bracket_layout_record(dcb,
+ slot_index_to_vbios_id[i],
+ &board_layout_info->slots[i]);
+
+ if (record_result == BP_RESULT_NORECORD && i > 0)
+ break; /* no more slots present in bios */
+ else if (record_result != BP_RESULT_OK)
+ return record_result; /* fail */
+
+ ++board_layout_info->num_of_slots;
+ }
+
+ /* all data is valid */
+ board_layout_info->is_number_of_slots_valid = 1;
+ board_layout_info->is_slots_size_valid = 1;
+ board_layout_info->is_connector_offsets_valid = 1;
+ board_layout_info->is_connector_lengths_valid = 1;
+
+ return BP_RESULT_OK;
+}
+
static const struct dc_vbios_funcs vbios_funcs = {
.get_connectors_number = bios_parser_get_connectors_number,
@@ -1925,6 +2139,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
.bios_parser_destroy = firmware_parser_destroy,
.get_smu_clock_info = bios_parser_get_smu_clock_info,
+
+ .get_board_layout_info = bios_get_board_layout_info,
};
static bool bios_parser_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 651e1fd4622f..a558bfaa0c46 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5(
* (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
* LVDS mode: usPixelClock = pixel clock
*/
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24);
+ break;
+ case COLOR_DEPTH_121212:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24);
+ break;
+ case COLOR_DEPTH_161616:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24);
+ break;
+ default:
+ break;
+ }
+ }
if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 752b08a42d3e..2b5dc499a35e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -59,36 +59,7 @@
bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
GET_INDEX_INTO_MASTER_TABLE(command, fname))
-static void init_dig_encoder_control(struct bios_parser *bp);
-static void init_transmitter_control(struct bios_parser *bp);
-static void init_set_pixel_clock(struct bios_parser *bp);
-static void init_set_crtc_timing(struct bios_parser *bp);
-
-static void init_select_crtc_source(struct bios_parser *bp);
-static void init_enable_crtc(struct bios_parser *bp);
-
-static void init_external_encoder_control(struct bios_parser *bp);
-static void init_enable_disp_power_gating(struct bios_parser *bp);
-static void init_set_dce_clock(struct bios_parser *bp);
-static void init_get_smu_clock_info(struct bios_parser *bp);
-
-void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
-{
- init_dig_encoder_control(bp);
- init_transmitter_control(bp);
- init_set_pixel_clock(bp);
-
- init_set_crtc_timing(bp);
-
- init_select_crtc_source(bp);
- init_enable_crtc(bp);
-
- init_external_encoder_control(bp);
- init_enable_disp_power_gating(bp);
- init_set_dce_clock(bp);
- init_get_smu_clock_info(bp);
-}
static uint32_t bios_cmd_table_para_revision(void *dev,
uint32_t index)
@@ -829,3 +800,20 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
return 0;
}
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+ init_dig_encoder_control(bp);
+ init_transmitter_control(bp);
+ init_set_pixel_clock(bp);
+
+ init_set_crtc_timing(bp);
+
+ init_select_crtc_source(bp);
+ init_enable_crtc(bp);
+
+ init_external_encoder_control(bp);
+ init_enable_disp_power_gating(bp);
+ init_set_dce_clock(bp);
+ init_get_smu_clock_info(bp);
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index bbbcef566c55..770ff89ba7e1 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -55,7 +55,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_22:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 95f332ee3e7e..416500e51b8d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -38,7 +38,7 @@ CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
index fc3f98fb09ea..62435bfc274d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
+++ b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
@@ -25,10 +25,9 @@
#ifndef _CALCS_CALCS_LOGGER_H_
#define _CALCS_CALCS_LOGGER_H_
-#define DC_LOGGER \
- logger
+#define DC_LOGGER ctx->logger
-static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
+static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip)
{
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -122,7 +121,7 @@ static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calc
}
-static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
+static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios)
{
DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -181,7 +180,7 @@ static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calc
}
-static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
+static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data)
{
int i, j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 2c4e8f0cb2dc..160d11a15eac 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3010,9 +3010,9 @@ bool bw_calcs(struct dc_context *ctx,
struct bw_fixed low_yclk = vbios->low_yclk;
if (ctx->dc->debug.bandwidth_calcs_trace) {
- print_bw_calcs_dceip(ctx->logger, dceip);
- print_bw_calcs_vbios(ctx->logger, vbios);
- print_bw_calcs_data(ctx->logger, data);
+ print_bw_calcs_dceip(ctx, dceip);
+ print_bw_calcs_vbios(ctx, vbios);
+ print_bw_calcs_data(ctx, data);
}
calculate_bandwidth(dceip, vbios, data);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 49a4ea45466d..bd039322f697 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -31,6 +31,8 @@
#include "resource.h"
#include "dcn10/dcn10_resource.h"
+#include "dcn10/dcn10_hubbub.h"
+
#include "dcn_calc_math.h"
#define DC_LOGGER \
@@ -248,7 +250,24 @@ static void pipe_ctx_to_e2e_pipe_params (
else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
input->src.is_hsplit = true;
- input->src.dcc = pipe->plane_state->dcc.enable;
+ if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
+ /*
+ * this method requires us to always re-calculate watermark when dcc change
+ * between flip.
+ */
+ input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0;
+ } else {
+ /*
+ * allow us to disable dcc on the fly without re-calculating WM
+ *
+ * extra overhead for DCC is quite small. for 1080p WM without
+ * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
+ */
+ unsigned int bpe;
+
+ input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs->
+ dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0;
+ }
input->src.dcc_rate = 1;
input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
input->src.source_scan = dm_horz;
@@ -423,6 +442,10 @@ static void dcn_bw_calc_rq_dlg_ttu(
int total_flip_bytes = 0;
int i;
+ memset(dlg_regs, 0, sizeof(*dlg_regs));
+ memset(ttu_regs, 0, sizeof(*ttu_regs));
+ memset(rq_regs, 0, sizeof(*rq_regs));
+
for (i = 0; i < number_of_planes; i++) {
total_active_bw += v->read_bandwidth[i];
total_prefetch_bw += v->prefetch_bandwidth[i];
@@ -501,6 +524,7 @@ static void split_stream_across_pipes(
resource_build_scaling_params(secondary_pipe);
}
+#if 0
static void calc_wm_sets_and_perf_params(
struct dc_state *context,
struct dcn_bw_internal_vars *v)
@@ -582,6 +606,7 @@ static void calc_wm_sets_and_perf_params(
if (v->voltage_level >= 3)
context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
}
+#endif
static bool dcn_bw_apply_registry_override(struct dc *dc)
{
@@ -651,7 +676,7 @@ static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
}
static void hack_bounding_box(struct dcn_bw_internal_vars *v,
- struct dc_debug *dbg,
+ struct dc_debug_options *dbg,
struct dc_state *context)
{
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
@@ -883,7 +908,26 @@ bool dcn_validate_bandwidth(
ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
|| v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
}
- v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+
+ if (dc->debug.optimized_watermark) {
+ /*
+ * this method requires us to always re-calculate watermark when dcc change
+ * between flip.
+ */
+ v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+ } else {
+ /*
+ * allow us to disable dcc on the fly without re-calculating WM
+ *
+ * extra overhead for DCC is quite small. for 1080p WM without
+ * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
+ */
+ unsigned int bpe;
+
+ v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format(
+ pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no;
+ }
+
v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
pipe->plane_state->format);
v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
@@ -976,43 +1020,60 @@ bool dcn_validate_bandwidth(
bw_consumed = v->fabric_and_dram_bandwidth;
display_pipe_configuration(v);
- calc_wm_sets_and_perf_params(context, v);
- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ /*calc_wm_sets_and_perf_params(context, v);*/
+ /* Only 1 set is used by dcn since no noticeable
+ * performance improvement was measured and due to hw bug DEGVIDCN10-254
+ */
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
+ context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+
+ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
- context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
- context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
+ context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
- context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+ context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
- if (context->bw.dcn.calc_clk.dispclk_khz <
+ if (context->bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
- context->bw.dcn.calc_clk.dispclk_khz =
+ context->bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
-
+ context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
+ context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
@@ -1225,27 +1286,27 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
- struct clocks_value *clocks)
+ struct dc_clocks *clocks)
{
unsigned vdd_level, vdd_level_temp;
unsigned dcf_clk;
/*find a common supported voltage level*/
vdd_level = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz);
+ dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz);
+ dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
/*find that level conresponding dcfclk*/
vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
@@ -1331,21 +1392,14 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
{
struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
struct pp_smu_wm_range_sets ranges = {0};
- int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz;
- int max_dcfclk_khz, min_dcfclk_khz;
- int socclk_khz;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
- unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
if (!pp->set_wm_ranges)
return;
kernel_fpu_begin();
- max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
- nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
- mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
- max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
socclk_khz = dc->dcn_soc->socclk * 1000;
kernel_fpu_end();
@@ -1353,105 +1407,46 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
* Memory clock member variables for Watermarks calculations for each
- * Watermark Set
+ * Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254.
*/
/* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
* care what the value is, hence min to overdrive level
*/
- ranges.num_reader_wm_sets = WM_COUNT;
- ranges.num_writer_wm_sets = WM_COUNT;
+ ranges.num_reader_wm_sets = WM_SET_COUNT;
+ ranges.num_writer_wm_sets = WM_SET_COUNT;
ranges.reader_wm_sets[0].wm_inst = WM_A;
ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
- ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
ranges.writer_wm_sets[0].wm_inst = WM_A;
ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
- ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz;
-
- ranges.reader_wm_sets[1].wm_inst = WM_B;
- ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
- ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
- ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
- ranges.writer_wm_sets[1].wm_inst = WM_B;
- ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
- ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
-
-
- ranges.reader_wm_sets[2].wm_inst = WM_C;
- ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
- ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
- ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
- ranges.writer_wm_sets[2].wm_inst = WM_C;
- ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
- ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
-
- ranges.reader_wm_sets[3].wm_inst = WM_D;
- ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
- ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
- ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
- ranges.writer_wm_sets[3].wm_inst = WM_D;
- ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
- ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
ranges.reader_wm_sets[0].wm_inst = WM_A;
ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[0].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
- ranges.reader_wm_sets[0].max_fill_clk_khz = 800000;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
ranges.writer_wm_sets[0].wm_inst = WM_A;
ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[0].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
- ranges.writer_wm_sets[0].max_drain_clk_khz = 800000;
-
- ranges.reader_wm_sets[1].wm_inst = WM_B;
- ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
- ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
- ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
- ranges.writer_wm_sets[1].wm_inst = WM_B;
- ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
- ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
- ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
-
-
- ranges.reader_wm_sets[2].wm_inst = WM_C;
- ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
- ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
- ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
- ranges.writer_wm_sets[2].wm_inst = WM_C;
- ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
- ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
- ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
-
- ranges.reader_wm_sets[3].wm_inst = WM_D;
- ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
- ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
- ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
- ranges.writer_wm_sets[3].wm_inst = WM_D;
- ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
- ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
- ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
}
+ ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
+ ranges.reader_wm_sets[1].wm_inst = WM_B;
+
+ ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0];
+ ranges.reader_wm_sets[2].wm_inst = WM_C;
+
+ ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0];
+ ranges.reader_wm_sets[3].wm_inst = WM_D;
+
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
pp->set_wm_ranges(&pp->pp_smu, &ranges);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 644b2187507b..733ac224e7fd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -169,6 +169,22 @@ failed_alloc:
return false;
}
+/**
+ *****************************************************************************
+ * Function: dc_stream_adjust_vmin_vmax
+ *
+ * @brief
+ * Looks up the pipe context of dc_stream_state and updates the
+ * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ * Rate, which is a power-saving feature that targets reducing panel
+ * refresh rate while the screen is static
+ *
+ * @param [in] dc: dc reference
+ * @param [in] stream: Initial dc stream state
+ * @param [in] adjust: Updated parameters for vertical_total_min and
+ * vertical_total_max
+ *****************************************************************************
+ */
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state **streams, int num_streams,
int vmin, int vmax)
@@ -368,6 +384,71 @@ void dc_stream_set_static_screen_events(struct dc *dc,
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
}
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+{
+
+ int i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i] == link)
+ break;
+ }
+
+ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+void dc_link_perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ int i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc_link_dp_perform_link_training(
+ dc->links[i],
+ link_setting,
+ skip_video_pattern);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ link->preferred_link_setting = *link_setting;
+ dp_retrain_link_dp_test(link, link_setting, false);
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_enable_hpd(link);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_disable_hpd(link);
+}
+
+
+void dc_link_set_test_pattern(struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ if (link != NULL)
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ p_link_settings,
+ p_custom_pattern,
+ cust_pattern_size);
+}
+
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@@ -386,9 +467,6 @@ static void destruct(struct dc *dc)
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
- if (dc->ctx->logger)
- dal_logger_destroy(&dc->ctx->logger);
-
kfree(dc->ctx);
dc->ctx = NULL;
@@ -398,7 +476,7 @@ static void destruct(struct dc *dc)
kfree(dc->bw_dceip);
dc->bw_dceip = NULL;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
kfree(dc->dcn_soc);
dc->dcn_soc = NULL;
@@ -411,11 +489,10 @@ static void destruct(struct dc *dc)
static bool construct(struct dc *dc,
const struct dc_init_data *init_params)
{
- struct dal_logger *logger;
struct dc_context *dc_ctx;
struct bw_calcs_dceip *dc_dceip;
struct bw_calcs_vbios *dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
#endif
@@ -437,7 +514,7 @@ static bool construct(struct dc *dc,
}
dc->bw_vbios = dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__);
@@ -465,6 +542,7 @@ static bool construct(struct dc *dc,
dc_ctx->driver_context = init_params->driver;
dc_ctx->dc = dc;
dc_ctx->asic_id = init_params->asic_id;
+ dc_ctx->dc_sink_id_count = 0;
dc->ctx = dc_ctx;
dc->current_state = dc_create_state();
@@ -475,14 +553,7 @@ static bool construct(struct dc *dc,
}
/* Create logger */
- logger = dal_logger_create(dc_ctx, init_params->log_mask);
- if (!logger) {
- /* can *not* call logger. call base driver 'print error' */
- dm_error("%s: failed to create Logger!\n", __func__);
- goto fail;
- }
- dc_ctx->logger = logger;
dc_ctx->dce_environment = init_params->dce_environment;
dc_version = resource_parse_asic_id(init_params->asic_id);
@@ -901,9 +972,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
for (i = 0; i < context->stream_count; i++) {
struct dc_stream_state *stream = context->streams[i];
- dc_stream_log(stream,
- dc->ctx->logger,
- LOG_DC);
+ dc_stream_log(dc, stream);
}
result = dc_commit_state_no_check(dc, context);
@@ -927,12 +996,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->optimized_required = false;
- /* 3rd param should be true, temp w/a for RV*/
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
-#else
dc->hwss.set_bandwidth(dc, context, true);
-#endif
return true;
}
@@ -1548,7 +1612,7 @@ struct dc_sink *dc_link_add_remote_sink(
struct dc_sink *dc_sink;
enum dc_edid_status edid_status;
- if (len > MAX_EDID_BUFFER_SIZE) {
+ if (len > DC_MAX_EDID_BUFFER_SIZE) {
dm_error("Max EDID buffer size breached!\n");
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 267c76766dea..caece7c13bc6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -348,23 +348,23 @@ void context_clock_trace(
struct dc *dc,
struct dc_state *context)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_khz,
- context->bw.dcn.calc_clk.dcfclk_khz,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.socclk_khz);
+ context->bw.dcn.clk.dispclk_khz,
+ context->bw.dcn.clk.dppclk_khz,
+ context->bw.dcn.clk.dcfclk_khz,
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.clk.fclk_khz,
+ context->bw.dcn.clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_khz,
- context->bw.dcn.calc_clk.dcfclk_khz,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.socclk_khz);
+ context->bw.dcn.clk.dispclk_khz,
+ context->bw.dcn.clk.dppclk_khz,
+ context->bw.dcn.clk.dcfclk_khz,
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.clk.fclk_khz,
+ context->bw.dcn.clk.socclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 2fa521812d23..a38e7ad36a7e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -33,6 +33,7 @@
#include "dc_link_dp.h"
#include "dc_link_ddc.h"
#include "link_hwss.h"
+#include "opp.h"
#include "link_encoder.h"
#include "hw_sequencer.h"
@@ -59,7 +60,14 @@
enum {
LINK_RATE_REF_FREQ_IN_MHZ = 27,
- PEAK_FACTOR_X1000 = 1006
+ PEAK_FACTOR_X1000 = 1006,
+ /*
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
+ LINK_TRAINING_MAX_VERIFY_RETRY = 2
};
/*******************************************************************************
@@ -312,7 +320,7 @@ static enum signal_type get_basic_signal_type(
* @brief
* Check whether there is a dongle on DP connector
*/
-static bool is_dp_sink_present(struct dc_link *link)
+bool dc_link_is_dp_sink_present(struct dc_link *link)
{
enum gpio_result gpio_result;
uint32_t clock_pin = 0;
@@ -405,7 +413,7 @@ static enum signal_type link_detect_sink(
* we assume signal is DVI; it could be corrected
* to HDMI after dongle detection
*/
- if (!is_dp_sink_present(link))
+ if (!dm_helpers_is_dp_sink_present(link))
result = SIGNAL_TYPE_DVI_SINGLE_LINK;
}
}
@@ -497,6 +505,10 @@ static bool detect_dp(
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
+ dal_ddc_service_set_transaction_type(
+ link->ddc,
+ sink_caps->transaction_type);
+
/*
* This call will initiate MST topology discovery. Which
* will detect MST ports and add new DRM connector DRM
@@ -524,6 +536,10 @@ static bool detect_dp(
if (reason == DETECT_REASON_BOOT)
boot = true;
+ dm_helpers_dp_update_branch_info(
+ link->ctx,
+ link);
+
if (!dm_helpers_dp_mst_start_top_mgr(
link->ctx,
link, boot)) {
@@ -728,6 +744,18 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
break;
case EDID_NO_RESPONSE:
DC_LOG_ERROR("No EDID read.\n");
+
+ /*
+ * Abort detection for non-DP connectors if we have
+ * no EDID
+ *
+ * DP needs to report as connected if HDP is high
+ * even if we have no EDID in order to go to
+ * fail-safe mode
+ */
+ if (dc_is_hdmi_signal(link->connector_signal) ||
+ dc_is_dvi_signal(link->connector_signal))
+ return false;
default:
break;
}
@@ -736,30 +764,41 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
- // If both edid and dpcd are the same, then discard new sink and revert back to original sink
- if ((same_edid) && (same_dpcd)) {
- link_disconnect_remap(prev_sink, link);
- sink = prev_sink;
- prev_sink = NULL;
- } else {
- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type ==
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
- /*
- * TODO debug why Dell 2413 doesn't like
- * two link trainings
- */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
+ reason != DETECT_REASON_HPDRX) {
+ /*
+ * TODO debug why Dell 2413 doesn't like
+ * two link trainings
+ */
+
+ /* deal with non-mst cases */
+ for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
+ int fail_count = 0;
+
+ dp_verify_link_cap(link,
+ &link->reported_link_cap,
+ &fail_count);
- /* deal with non-mst cases */
- dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+ if (fail_count == 0)
+ break;
}
- /* HDMI-DVI Dongle */
- if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
- !sink->edid_caps.edid_hdmi)
- sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ } else {
+ // If edid is the same, then discard new sink and revert back to original sink
+ if (same_edid) {
+ link_disconnect_remap(prev_sink, link);
+ sink = prev_sink;
+ prev_sink = NULL;
+
+ }
}
+ /* HDMI-DVI Dongle */
+ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+ !sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
@@ -1000,6 +1039,9 @@ static bool construct(
link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+ if (dc_ctx->dc_bios->integrated_info)
+ link->dp_ss_off = !!dc_ctx->dc_bios->integrated_info->dp_ss_control;
+
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
__func__, init_params->connector_index,
@@ -1284,29 +1326,15 @@ static enum dc_status enable_link_dp(
max_link_rate = LINK_RATE_HIGH3;
if (link_settings.link_rate == max_link_rate) {
- if (state->dis_clk->funcs->set_min_clocks_state) {
- if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
- state->dis_clk->funcs->set_min_clocks_state(
- state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
- } else {
- uint32_t dp_phyclk_in_khz;
- const struct clocks_value clocks_value =
- state->dis_clk->cur_clocks_value;
-
- /* 27mhz = 27000000hz= 27000khz */
- dp_phyclk_in_khz = link_settings.link_rate * 27000;
-
- if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
- (dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
- (dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
- state->dis_clk->funcs->apply_clock_voltage_request(
- state->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
- dp_phyclk_in_khz,
- false,
- true);
- }
- }
+ struct dc_clocks clocks = state->bw.dcn.clk;
+
+ /* dce/dcn compat, do not update dispclk */
+ clocks.dispclk_khz = 0;
+ /* 27mhz = 27000000hz= 27000khz */
+ clocks.phyclk_khz = link_settings.link_rate * 27000;
+
+ state->dis_clk->funcs->update_clocks(
+ state->dis_clk, &clocks, false);
}
dp_enable_link_phy(
@@ -1861,28 +1889,6 @@ static enum dc_status enable_link(
break;
}
- if (pipe_ctx->stream_res.audio && status == DC_OK) {
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
- /* notify audio driver for audio modes of monitor */
- struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
- unsigned int i, num_audio = 1;
- for (i = 0; i < MAX_PIPES; i++) {
- /*current_state not updated yet*/
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
- num_audio++;
- }
-
- pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
-
- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
- /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
- /* un-mute audio */
- /* TODO: audio should be per stream rather than per link */
- pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
- pipe_ctx->stream_res.stream_enc, false);
- }
-
return status;
}
@@ -2023,6 +2029,15 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
}
+int dc_link_get_backlight_level(const struct dc_link *link)
+{
+ struct abm *abm = link->ctx->dc->res_pool->abm;
+
+ if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
+ return DC_ERROR_UNEXPECTED;
+
+ return (int) abm->funcs->get_current_backlight_8_bit(abm);
+}
bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream)
@@ -2415,10 +2430,13 @@ void core_link_enable_stream(
}
}
+ core_dc->hwss.enable_audio_stream(pipe_ctx);
+
/* turn off otg test pattern if enable */
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- COLOR_DEPTH_UNDEFINED);
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
core_dc->hwss.enable_stream(pipe_ctx);
@@ -2453,6 +2471,22 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
core_dc->hwss.set_avmute(pipe_ctx, enable);
}
+/**
+ *****************************************************************************
+ * Function: dc_link_enable_hpd_filter
+ *
+ * @brief
+ * If enable is true, programs HPD filter on associated HPD line using
+ * delay_on_disconnect/delay_on_connect values dependent on
+ * link->connector_signal
+ *
+ * If enable is false, programs HPD filter on associated HPD line with no
+ * delays on connect or disconnect
+ *
+ * @param [in] link: pointer to the dc link
+ * @param [in] enable: boolean specifying whether to enable hbd
+ *****************************************************************************
+ */
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
{
struct gpio *hpd;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 49c2face1e7a..8def0d9fa0ff 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -33,6 +33,7 @@
#include "include/vector.h"
#include "core_types.h"
#include "dc_link_ddc.h"
+#include "aux_engine.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
@@ -629,79 +630,61 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-ssize_t dal_ddc_service_read_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- uint8_t *data,
- uint32_t len)
+int dc_link_aux_transfer(struct ddc_service *ddc,
+ unsigned int address,
+ uint8_t *reply,
+ void *buffer,
+ unsigned int size,
+ enum aux_transaction_type type,
+ enum i2caux_transaction_action action)
{
- struct aux_payload read_payload = {
- .i2c_over_aux = i2c,
- .write = false,
- .address = address,
- .length = len,
- .data = data,
- };
- struct aux_command command = {
- .payloads = &read_payload,
- .number_of_payloads = 1,
- .defer_delay = 0,
- .max_defer_write_retry = 0,
- .mot = mot
- };
-
- if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
- BREAK_TO_DEBUGGER();
- return DDC_RESULT_FAILED_INVALID_OPERATION;
- }
+ struct ddc *ddc_pin = ddc->ddc_pin;
+ struct aux_engine *aux_engine;
+ enum aux_channel_operation_result operation_result;
+ struct aux_request_transaction_data aux_req;
+ struct aux_reply_transaction_data aux_rep;
+ uint8_t returned_bytes = 0;
+ int res = -1;
+ uint32_t status;
- if (dal_i2caux_submit_aux_command(
- ddc->ctx->i2caux,
- ddc->ddc_pin,
- &command)) {
- return (ssize_t)command.payloads->length;
- }
+ memset(&aux_req, 0, sizeof(aux_req));
+ memset(&aux_rep, 0, sizeof(aux_rep));
- return DDC_RESULT_FAILED_OPERATION;
-}
+ aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ aux_engine->funcs->acquire(aux_engine, ddc_pin);
-enum ddc_result dal_ddc_service_write_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- const uint8_t *data,
- uint32_t len)
-{
- struct aux_payload write_payload = {
- .i2c_over_aux = i2c,
- .write = true,
- .address = address,
- .length = len,
- .data = (uint8_t *)data,
- };
- struct aux_command command = {
- .payloads = &write_payload,
- .number_of_payloads = 1,
- .defer_delay = 0,
- .max_defer_write_retry = 0,
- .mot = mot
- };
-
- if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
- BREAK_TO_DEBUGGER();
- return DDC_RESULT_FAILED_INVALID_OPERATION;
- }
+ aux_req.type = type;
+ aux_req.action = action;
+
+ aux_req.address = address;
+ aux_req.delay = 0;
+ aux_req.length = size;
+ aux_req.data = buffer;
- if (dal_i2caux_submit_aux_command(
- ddc->ctx->i2caux,
- ddc->ddc_pin,
- &command))
- return DDC_RESULT_SUCESSFULL;
+ aux_engine->funcs->submit_channel_request(aux_engine, &aux_req);
+ operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes);
- return DDC_RESULT_FAILED_OPERATION;
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ res = returned_bytes;
+
+ if (res <= size && res >= 0)
+ res = aux_engine->funcs->read_channel_reply(aux_engine, size,
+ buffer, reply,
+ &status);
+
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ res = 0;
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ res = -1;
+ break;
+ }
+ aux_engine->funcs->release_engine(aux_engine);
+ return res;
}
/*test only function*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index bdd121485cbc..a7553b6d59c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3,6 +3,7 @@
#include "dc.h"
#include "dc_link_dp.h"
#include "dm_helpers.h"
+#include "opp.h"
#include "inc/core_types.h"
#include "link_hwss.h"
@@ -38,7 +39,7 @@ static bool decide_fallback_link_setting(
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
enum link_training_result training_result);
-static struct dc_link_settings get_common_supported_link_settings (
+static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b);
@@ -93,8 +94,8 @@ static void dpcd_set_link_settings(
uint8_t rate = (uint8_t)
(lt_settings->link_settings.link_rate);
- union down_spread_ctrl downspread = {{0}};
- union lane_count_set lane_count_set = {{0}};
+ union down_spread_ctrl downspread = { {0} };
+ union lane_count_set lane_count_set = { {0} };
uint8_t link_set_buffer[2];
downspread.raw = (uint8_t)
@@ -164,11 +165,11 @@ static void dpcd_set_lt_pattern_and_lane_settings(
const struct link_training_settings *lt_settings,
enum hw_dp_training_pattern pattern)
{
- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
const uint32_t dpcd_base_lt_offset =
DP_TRAINING_PATTERN_SET;
uint8_t dpcd_lt_buffer[5] = {0};
- union dpcd_training_pattern dpcd_pattern = {{0}};
+ union dpcd_training_pattern dpcd_pattern = { {0} };
uint32_t lane;
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
@@ -232,7 +233,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
link,
DP_TRAINING_PATTERN_SET,
&dpcd_pattern.raw,
- sizeof(dpcd_pattern.raw) );
+ sizeof(dpcd_pattern.raw));
core_link_write_dpcd(
link,
@@ -246,7 +247,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
link,
dpcd_base_lt_offset,
dpcd_lt_buffer,
- size_in_bytes + sizeof(dpcd_pattern.raw) );
+ size_in_bytes + sizeof(dpcd_pattern.raw));
link->cur_lane_setting = lt_settings->lane_settings[0];
}
@@ -428,8 +429,8 @@ static void get_lane_status_and_drive_settings(
struct link_training_settings *req_settings)
{
uint8_t dpcd_buf[6] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
- struct link_training_settings request_settings = {{0}};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ struct link_training_settings request_settings = { {0} };
uint32_t lane;
memset(req_settings, '\0', sizeof(struct link_training_settings));
@@ -651,7 +652,7 @@ static bool perform_post_lt_adj_req_sequence(
if (req_drv_setting_changed) {
update_drive_settings(
- lt_settings,req_settings);
+ lt_settings, req_settings);
dc_link_dp_set_drive_settings(link,
lt_settings);
@@ -724,8 +725,8 @@ static enum link_training_result perform_channel_equalization_sequence(
enum hw_dp_training_pattern hw_tr_pattern;
uint32_t retries_ch_eq;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {{0}};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
hw_tr_pattern = get_supported_tp(link);
@@ -952,7 +953,10 @@ enum link_training_result dc_link_dp_perform_link_training(
* LINK_SPREAD_05_DOWNSPREAD_30KHZ :
* LINK_SPREAD_DISABLED;
*/
- lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+ if (link->dp_ss_off)
+ lt_settings.link_settings.link_spread = LINK_SPREAD_DISABLED;
+ else
+ lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
/* 1. set link rate, lane count and spread*/
dpcd_set_link_settings(link, &lt_settings);
@@ -1027,6 +1031,9 @@ enum link_training_result dc_link_dp_perform_link_training(
lt_settings.lane_settings[0].VOLTAGE_SWING,
lt_settings.lane_settings[0].PRE_EMPHASIS);
+ if (status != LINK_TRAINING_SUCCESS)
+ link->ctx->dc->debug_data.ltFailCount++;
+
return status;
}
@@ -1082,9 +1089,10 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting)
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count)
{
struct dc_link_settings max_link_cap = {0};
struct dc_link_settings cur_link_setting = {0};
@@ -1097,6 +1105,11 @@ bool dp_hbr_verify_link_cap(
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
+ if (link->dc->debug.skip_detection_link_training) {
+ link->verified_link_cap = *known_limit_link_setting;
+ return true;
+ }
+
success = false;
skip_link_training = false;
@@ -1151,6 +1164,8 @@ bool dp_hbr_verify_link_cap(
skip_video_pattern);
if (status == LINK_TRAINING_SUCCESS)
success = true;
+ else
+ (*fail_count)++;
}
if (success)
@@ -1182,7 +1197,7 @@ bool dp_hbr_verify_link_cap(
return success;
}
-static struct dc_link_settings get_common_supported_link_settings (
+static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b)
{
@@ -1428,6 +1443,7 @@ static uint32_t bandwidth_in_kbps_from_link_settings(
uint32_t lane_count = link_setting->lane_count;
uint32_t kbps = link_rate_in_kbps;
+
kbps *= lane_count;
kbps *= 8; /* 8 bits per byte*/
@@ -1445,9 +1461,9 @@ bool dp_validate_mode_timing(
const struct dc_link_settings *link_setting;
/*always DP fail safe mode*/
- if (timing->pix_clk_khz == (uint32_t)25175 &&
- timing->h_addressable == (uint32_t)640 &&
- timing->v_addressable == (uint32_t)480)
+ if (timing->pix_clk_khz == (uint32_t) 25175 &&
+ timing->h_addressable == (uint32_t) 640 &&
+ timing->v_addressable == (uint32_t) 480)
return true;
/* We always use verified link settings */
@@ -1647,22 +1663,26 @@ static enum dc_status read_hpd_rx_irq_data(
irq_data->raw,
sizeof(union hpd_irq_data));
else {
- /* Read 2 bytes at this location,... */
+ /* Read 14 bytes in a single read and then copy only the required fields.
+ * This is more efficient than doing it in two separate AUX reads. */
+
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
retval = core_link_read_dpcd(
link,
DP_SINK_COUNT_ESI,
- irq_data->raw,
- 2);
+ tmp,
+ sizeof(tmp));
if (retval != DC_OK)
return retval;
- /* ... then read remaining 4 at the other location */
- retval = core_link_read_dpcd(
- link,
- DP_LANE0_1_STATUS_ESI,
- &irq_data->raw[2],
- 4);
+ irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
}
return retval;
@@ -1989,12 +2009,16 @@ static void handle_automated_test(struct dc_link *link)
sizeof(test_response));
}
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
{
- union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+ union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } };
- enum dc_status result = DDC_RESULT_UNKNOWN;
+ enum dc_status result;
+
bool status = false;
+
+ if (out_link_loss)
+ *out_link_loss = false;
/* For use cases related to down stream connection status change,
* PSR and device auto test, refer to function handle_sst_hpd_irq
* in DAL2.1*/
@@ -2069,6 +2093,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
true, LINK_TRAINING_ATTEMPTS);
status = false;
+ if (out_link_loss)
+ *out_link_loss = true;
}
if (link->type == dc_connection_active_dongle &&
@@ -2255,6 +2281,11 @@ static void get_active_converter_info(
link->dpcd_caps.branch_hw_revision =
dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.branch_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
}
}
@@ -2303,12 +2334,14 @@ static bool retrieve_link_cap(struct dc_link *link)
{
uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+ struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t read_dpcd_retry_cnt = 3;
int i;
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(&down_strm_port_count,
@@ -2389,6 +2422,36 @@ static bool retrieve_link_cap(struct dc_link *link)
&link->dpcd_caps.sink_count.raw,
sizeof(link->dpcd_caps.sink_count.raw));
+ /* read sink ieee oui */
+ core_link_read_dpcd(link,
+ DP_SINK_OUI,
+ (uint8_t *)(&sink_id),
+ sizeof(sink_id));
+
+ link->dpcd_caps.sink_dev_id =
+ (sink_id.ieee_oui[0] << 16) +
+ (sink_id.ieee_oui[1] << 8) +
+ (sink_id.ieee_oui[2]);
+
+ memmove(
+ link->dpcd_caps.sink_dev_id_str,
+ sink_id.ieee_device_id,
+ sizeof(sink_id.ieee_device_id));
+
+ core_link_read_dpcd(
+ link,
+ DP_SINK_HW_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.sink_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.sink_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -2493,8 +2556,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
pipe_ctx->stream->bit_depth_params = params;
pipe_ctx->stream_res.opp->funcs->
opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
-
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
}
break;
@@ -2506,8 +2569,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
pipe_ctx->stream->bit_depth_params = params;
pipe_ctx->stream_res.opp->funcs->
opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
-
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
color_depth);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 751f3ac9d921..4ca41d6e3bcf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -41,7 +41,7 @@
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/dcn10_resource.h"
#endif
#include "dce120/dce120_resource.h"
@@ -85,7 +85,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_AI:
dc_version = DCE_VERSION_12_0;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case FAMILY_RV:
dc_version = DCN_VERSION_1_0;
break;
@@ -136,7 +136,7 @@ struct resource_pool *dc_create_resource_pool(
num_virtual_links, dc);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
res_pool = dcn10_create_resource_pool(
num_virtual_links, dc);
@@ -330,6 +330,9 @@ bool resource_are_streams_timing_synchronizable(
!= stream2->timing.pix_clk_khz)
return false;
+ if (stream1->clamping.c_depth != stream2->clamping.c_depth)
+ return false;
+
if (stream1->phy_pix_clk != stream2->phy_pix_clk
&& (!dc_is_dp_signal(stream1->signal)
|| !dc_is_dp_signal(stream2->signal)))
@@ -337,6 +340,20 @@ bool resource_are_streams_timing_synchronizable(
return true;
}
+static bool is_dp_and_hdmi_sharable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2)
+{
+ if (stream1->ctx->dc->caps.disable_dp_clk_share)
+ return false;
+
+ if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
+ stream2->clamping.c_depth != COLOR_DEPTH_888)
+ return false;
+
+ return true;
+
+}
static bool is_sharable_clk_src(
const struct pipe_ctx *pipe_with_clk_src,
@@ -348,7 +365,10 @@ static bool is_sharable_clk_src(
if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
return false;
- if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
+ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) ||
+ (dc_is_dp_signal(pipe->stream->signal) &&
+ !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream,
+ pipe->stream)))
return false;
if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
@@ -522,13 +542,12 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
}
}
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
struct rect surf_src = plane_state->src_rect;
struct rect surf_clip = plane_state->clip_rect;
- int recout_full_x, recout_full_y;
bool pri_split = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
@@ -597,20 +616,22 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
}
}
/* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
- * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
- * ratio)
+ * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+ * ratio)
*/
- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
* stream->dst.width / stream->src.width -
surf_src.x * plane_state->dst_rect.width / surf_src.width
* stream->dst.width / stream->src.width;
- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
surf_src.y * plane_state->dst_rect.height / surf_src.height
* stream->dst.height / stream->src.height;
- recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
- recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
+ recout_full->width = plane_state->dst_rect.width
+ * stream->dst.width / stream->src.width;
+ recout_full->height = plane_state->dst_rect.height
+ * stream->dst.height / stream->src.height;
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -662,7 +683,7 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
}
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
{
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect;
@@ -680,15 +701,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
flip_vert_scan_dir = true;
else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
flip_horz_scan_dir = true;
- if (pipe_ctx->plane_state->horizontal_mirror)
- flip_horz_scan_dir = !flip_horz_scan_dir;
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
rect_swap_helper(&src);
rect_swap_helper(&data->viewport_c);
rect_swap_helper(&data->viewport);
- }
+ } else if (pipe_ctx->plane_state->horizontal_mirror)
+ flip_horz_scan_dir = !flip_horz_scan_dir;
/*
* Init calculated according to formula:
@@ -708,127 +728,286 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
+ if (!flip_horz_scan_dir) {
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h, data->ratios.horz));
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ int vp_clip = (src.x + src.width) / vpc_div -
+ data->viewport_c.width - data->viewport_c.x;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.x) {
+ int int_part;
+
+ data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+ data->ratios.horz, data->recout.x - recout_full->x));
+ int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : data->viewport.x;
+ data->viewport.x -= int_adj;
+ data->viewport.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps) {
+ data->viewport.x += int_part - data->taps.h_taps;
+ data->viewport.width -= int_part - data->taps.h_taps;
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+ data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+ }
- /* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
- int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
- int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
- int vp_clip = (src.x + src.width) / vpc_div -
- data->viewport_c.width - data->viewport_c.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
- int vp_clip = (src.y + src.height) / vpc_div -
- data->viewport_c.height - data->viewport_c.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
- }
-
- /* Adjust for non-0 viewport offset */
- if (data->viewport.x && !flip_horz_scan_dir) {
- int int_part;
-
- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
- data->ratios.horz, recout_skip->width));
- int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
- if (int_part < data->taps.h_taps) {
- int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
- (data->taps.h_taps - int_part) : data->viewport.x;
- data->viewport.x -= int_adj;
- data->viewport.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps) {
- data->viewport.x += int_part - data->taps.h_taps;
- data->viewport.width -= int_part - data->taps.h_taps;
- int_part = data->taps.h_taps;
+ if (data->viewport_c.x) {
+ int int_part;
+
+ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+ data->ratios.horz_c, data->recout.x - recout_full->x));
+ int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+ data->viewport_c.x -= int_adj;
+ data->viewport_c.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps_c) {
+ data->viewport_c.x += int_part - data->taps.h_taps_c;
+ data->viewport_c.width -= int_part - data->taps.h_taps_c;
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+ data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
}
- data->inits.h.value &= 0xffffffff;
- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
- }
-
- if (data->viewport_c.x && !flip_horz_scan_dir) {
- int int_part;
-
- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
- data->ratios.horz_c, recout_skip->width));
- int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
- if (int_part < data->taps.h_taps_c) {
- int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
- (data->taps.h_taps_c - int_part) : data->viewport_c.x;
- data->viewport_c.x -= int_adj;
- data->viewport_c.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps_c) {
- data->viewport_c.x += int_part - data->taps.h_taps_c;
- data->viewport_c.width -= int_part - data->taps.h_taps_c;
- int_part = data->taps.h_taps_c;
+ } else {
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.x) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h, data->ratios.horz));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
+ data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
}
- data->inits.h_c.value &= 0xffffffff;
- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
- }
-
- if (data->viewport.y && !flip_vert_scan_dir) {
- int int_part;
-
- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
- data->ratios.vert, recout_skip->height));
- int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
- if (int_part < data->taps.v_taps) {
- int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
- (data->taps.v_taps - int_part) : data->viewport.y;
- data->viewport.y -= int_adj;
- data->viewport.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps) {
- data->viewport.y += int_part - data->taps.v_taps;
- data->viewport.height -= int_part - data->taps.v_taps;
- int_part = data->taps.v_taps;
+ if (data->viewport_c.x) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+ data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
}
- data->inits.v.value &= 0xffffffff;
- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
- }
-
- if (data->viewport_c.y && !flip_vert_scan_dir) {
- int int_part;
-
- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
- data->ratios.vert_c, recout_skip->height));
- int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
- if (int_part < data->taps.v_taps_c) {
- int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
- (data->taps.v_taps_c - int_part) : data->viewport_c.y;
- data->viewport_c.y -= int_adj;
- data->viewport_c.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps_c) {
- data->viewport_c.y += int_part - data->taps.v_taps_c;
- data->viewport_c.height -= int_part - data->taps.v_taps_c;
- int_part = data->taps.v_taps_c;
+
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ int int_part;
+ int end_offset = src.x + src.width
+ - data->viewport.x - data->viewport.width;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+ data->ratios.horz, data->recout.x - recout_full->x));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, takning into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.h) - end_offset;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : end_offset;
+ data->viewport.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps) {
+ data->viewport.width += int_part - data->taps.h_taps;
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+ data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+ }
+
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ int int_part;
+ int end_offset = (src.x + src.width) / vpc_div
+ - data->viewport_c.x - data->viewport_c.width;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+ data->ratios.horz_c, data->recout.x - recout_full->x));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, takning into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : end_offset;
+ data->viewport_c.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps_c) {
+ data->viewport_c.width += int_part - data->taps.h_taps_c;
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+ data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ }
+
+ }
+ if (!flip_vert_scan_dir) {
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ int vp_clip = (src.y + src.height) / vpc_div -
+ data->viewport_c.height - data->viewport_c.y;
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
+ }
+
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.y) {
+ int int_part;
+
+ data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+ data->ratios.vert, data->recout.y - recout_full->y));
+ int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : data->viewport.y;
+ data->viewport.y -= int_adj;
+ data->viewport.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps) {
+ data->viewport.y += int_part - data->taps.v_taps;
+ data->viewport.height -= int_part - data->taps.v_taps;
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+ data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+ }
+
+ if (data->viewport_c.y) {
+ int int_part;
+
+ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+ data->ratios.vert_c, data->recout.y - recout_full->y));
+ int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+ data->viewport_c.y -= int_adj;
+ data->viewport_c.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps_c) {
+ data->viewport_c.y += int_part - data->taps.v_taps_c;
+ data->viewport_c.height -= int_part - data->taps.v_taps_c;
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+ data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
+ }
+ } else {
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.y) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
+ data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
+ }
+ if (data->viewport_c.y) {
+ int int_part = dc_fixpt_floor(
+ dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+ data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+ }
+
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ int int_part;
+ int end_offset = src.y + src.height
+ - data->viewport.y - data->viewport.height;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+ data->ratios.vert, data->recout.y - recout_full->y));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, taking into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.v) - end_offset;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : end_offset;
+ data->viewport.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps) {
+ data->viewport.height += int_part - data->taps.v_taps;
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+ data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+ }
+
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ int int_part;
+ int end_offset = (src.y + src.height) / vpc_div
+ - data->viewport_c.y - data->viewport_c.height;
+
+ /*
+ * this is init if vp had no offset, keep in mind this is from the
+ * right side of vp due to scan direction
+ */
+ data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+ data->ratios.vert_c, data->recout.y - recout_full->y));
+ /*
+ * this is the difference between first pixel of viewport available to read
+ * and init position, taking into account scan direction
+ */
+ int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : end_offset;
+ data->viewport_c.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps_c) {
+ data->viewport_c.height += int_part - data->taps.v_taps_c;
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+ data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
}
- data->inits.v_c.value &= 0xffffffff;
- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
}
/* Interlaced inits based on final vert inits */
@@ -846,7 +1025,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- struct view recout_skip = { 0 };
+ struct rect recout_full = { 0 };
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
@@ -866,7 +1045,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
return false;
- calculate_recout(pipe_ctx, &recout_skip);
+ calculate_recout(pipe_ctx, &recout_full);
/**
* Setting line buffer pixel depth to 24bpp yields banding
@@ -910,7 +1089,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (res)
/* May need to re-check lb size after this in some obscure scenario */
- calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
+ calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
DC_LOG_SCALER(
"%s: Viewport:\nheight:%d width:%d x:%d "
@@ -1054,7 +1233,7 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
static int acquire_first_split_pipe(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1125,7 +1304,7 @@ bool dc_add_plane_to_context(
free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (!free_pipe) {
int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
if (pipe_idx >= 0)
@@ -1546,8 +1725,8 @@ enum dc_status dc_add_stream_to_ctx(
struct dc_context *dc_ctx = dc->ctx;
enum dc_status res;
- if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
- DC_ERROR("Max streams reached, can add stream %p !\n", stream);
+ if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
+ DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
return DC_ERROR_UNEXPECTED;
}
@@ -1723,7 +1902,7 @@ enum dc_status resource_map_pool_resources(
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
if (pipe_idx < 0)
pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
#endif
@@ -1789,7 +1968,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->dis_clk = dc->res_pool->display_clock;
+ dst_ctx->dis_clk = dc->res_pool->dccg;
}
enum dc_status dc_validate_global_state(
@@ -2347,7 +2526,8 @@ static void set_hdr_static_info_packet(
{
/* HDR Static Metadata info packet for HDR10 */
- if (!stream->hdr_static_metadata.valid)
+ if (!stream->hdr_static_metadata.valid ||
+ stream->use_dynamic_meta)
return;
*info_packet = stream->hdr_static_metadata;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index 25fae38409ab..9971b515c3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -53,6 +53,10 @@ static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init
sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
sink->converter_disable_audio = init_params->converter_disable_audio;
sink->dc_container_id = NULL;
+ sink->sink_id = init_params->link->ctx->dc_sink_id_count;
+ // increment dc_sink_id_count because we don't want two sinks with same ID
+ // unless they are actually the same
+ init_params->link->ctx->dc_sink_id_count++;
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 3732a1de9d6c..fdcc8ab19bf3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -30,6 +30,8 @@
#include "ipp.h"
#include "timing_generator.h"
+#define DC_LOGGER dc->ctx->logger
+
/*******************************************************************************
* Private functions
******************************************************************************/
@@ -212,6 +214,8 @@ bool dc_stream_set_cursor_attributes(
}
core_dc->hwss.set_cursor_attribute(pipe_ctx);
+ if (core_dc->hwss.set_cursor_sdr_white_level)
+ core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (pipe_to_program)
@@ -317,16 +321,10 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
return ret;
}
-
-void dc_stream_log(
- const struct dc_stream_state *stream,
- struct dal_logger *dm_logger,
- enum dc_log_type log_type)
+void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
{
-
- dm_logger_write(dm_logger,
- log_type,
- "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
+ DC_LOG_DC(
+ "core_stream 0x%p: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
stream,
stream->src.x,
stream->src.y,
@@ -337,21 +335,18 @@ void dc_stream_log(
stream->dst.width,
stream->dst.height,
stream->output_color_space);
- dm_logger_write(dm_logger,
- log_type,
+ DC_LOG_DC(
"\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
stream->timing.pix_clk_khz,
stream->timing.h_total,
stream->timing.v_total,
stream->timing.pixel_encoding,
stream->timing.display_color_depth);
- dm_logger_write(dm_logger,
- log_type,
+ DC_LOG_DC(
"\tsink name: %s, serial: %d\n",
stream->sink->edid_caps.display_name,
stream->sink->edid_caps.serial_number);
- dm_logger_write(dm_logger,
- log_type,
+ DC_LOG_DC(
"\tlink: %d\n",
stream->sink->link->link_index);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 68a71adeb12e..8fb3aefd195c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -84,6 +84,17 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
return plane_state;
}
+/**
+ *****************************************************************************
+ * Function: dc_plane_get_status
+ *
+ * @brief
+ * Looks up the pipe context of plane_state and updates the pending status
+ * of the pipe context. Then returns plane_state->status
+ *
+ * @param [in] plane_state: pointer to the plane_state to get the status of
+ *****************************************************************************
+ */
const struct dc_plane_status *dc_plane_get_status(
const struct dc_plane_state *plane_state)
{
@@ -181,7 +192,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
kref_put(&tf->refcount, dc_transfer_func_free);
}
-struct dc_transfer_func *dc_create_transfer_func()
+struct dc_transfer_func *dc_create_transfer_func(void)
{
struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 53c71296f3dd..e2f033d420a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.44"
+#define DC_VER "3.1.59"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -68,6 +68,7 @@ struct dc_caps {
uint32_t max_planes;
uint32_t max_downscale_ratio;
uint32_t i2c_speed_in_khz;
+ uint32_t dmdata_alloc_size;
unsigned int max_cursor_size;
unsigned int max_video_width;
int linear_pitch_alignment;
@@ -77,6 +78,8 @@ struct dc_caps {
bool dual_link_dvi;
bool post_blend_color_processing;
bool force_dp_tps4_for_cp2520;
+ bool disable_dp_clk_share;
+ bool psp_setup_panel_mode;
};
struct dc_dcc_surface_param {
@@ -169,6 +172,12 @@ struct dc_config {
bool disable_disp_pll_sharing;
};
+enum visual_confirm {
+ VISUAL_CONFIRM_DISABLE = 0,
+ VISUAL_CONFIRM_SURFACE = 1,
+ VISUAL_CONFIRM_HDR = 2,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -186,6 +195,10 @@ enum wm_report_mode {
WM_REPORT_OVERRIDE = 1,
};
+/*
+ * For any clocks that may differ per pipe
+ * only the max is stored in this structure
+ */
struct dc_clocks {
int dispclk_khz;
int max_supported_dppclk_khz;
@@ -194,10 +207,11 @@ struct dc_clocks {
int socclk_khz;
int dcfclk_deep_sleep_khz;
int fclk_khz;
+ int phyclk_khz;
};
-struct dc_debug {
- bool surface_visual_confirm;
+struct dc_debug_options {
+ enum visual_confirm visual_confirm;
bool sanity_checks;
bool max_disp_clk;
bool surface_trace;
@@ -228,6 +242,7 @@ struct dc_debug {
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
+ bool optimized_watermark;
int always_scale;
bool disable_pplib_clock_request;
bool disable_clock_gate;
@@ -243,8 +258,19 @@ struct dc_debug {
bool always_use_regamma;
bool p010_mpo_support;
bool recovery_enabled;
+ bool avoid_vbios_exec_table;
+ bool scl_reset_length10;
+ bool hdmi20_disable;
+ bool skip_detection_link_training;
+};
+struct dc_debug_data {
+ uint32_t ltFailCount;
+ uint32_t i2cErrorCount;
+ uint32_t auxErrorCount;
};
+
+
struct dc_state;
struct resource_pool;
struct dce_hwseq;
@@ -253,8 +279,7 @@ struct dc {
struct dc_caps caps;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
- struct dc_debug debug;
-
+ struct dc_debug_options debug;
struct dc_context *ctx;
uint8_t link_count;
@@ -269,7 +294,7 @@ struct dc {
/* Inputs into BW and WM calculations. */
struct bw_calcs_dceip *bw_dceip;
struct bw_calcs_vbios *bw_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
struct display_mode_lib dml;
@@ -289,9 +314,9 @@ struct dc {
bool apply_edp_fast_boot_optimization;
/* FBC compressor */
-#if defined(CONFIG_DRM_AMD_DC_FBC)
struct compressor *fbc_compressor;
-#endif
+
+ struct dc_debug_data debug_data;
};
enum frame_buffer_mode {
@@ -359,6 +384,7 @@ enum dc_transfer_func_type {
TF_TYPE_PREDEFINED,
TF_TYPE_DISTRIBUTED_POINTS,
TF_TYPE_BYPASS,
+ TF_TYPE_HWPWL
};
struct dc_transfer_func_distributed_points {
@@ -378,16 +404,22 @@ enum dc_transfer_func_predefined {
TRANSFER_FUNCTION_PQ,
TRANSFER_FUNCTION_LINEAR,
TRANSFER_FUNCTION_UNITY,
+ TRANSFER_FUNCTION_HLG,
+ TRANSFER_FUNCTION_HLG12,
+ TRANSFER_FUNCTION_GAMMA22
};
struct dc_transfer_func {
struct kref refcount;
- struct dc_transfer_func_distributed_points tf_pts;
enum dc_transfer_func_type type;
enum dc_transfer_func_predefined tf;
/* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
uint32_t sdr_ref_white_level;
struct dc_context *ctx;
+ union {
+ struct pwl_params pwl;
+ struct dc_transfer_func_distributed_points tf_pts;
+ };
};
/*
@@ -617,9 +649,14 @@ struct dpcd_caps {
struct dc_dongle_caps dongle_caps;
uint32_t sink_dev_id;
+ int8_t sink_dev_id_str[6];
+ int8_t sink_hw_revision;
+ int8_t sink_fw_revision[2];
+
uint32_t branch_dev_id;
int8_t branch_dev_name[6];
int8_t branch_hw_revision;
+ int8_t branch_fw_revision[2];
bool allow_invalid_MSA_timing_param;
bool panel_mode_edp;
@@ -662,9 +699,13 @@ struct dc_sink {
struct dc_link *link;
struct dc_context *ctx;
+ uint32_t sink_id;
+
/* private to dc_sink.c */
+ // refcount must be the last member in dc_sink, since we want the
+ // sink structure to be logically cloneable up to (but not including)
+ // refcount
struct kref refcount;
-
};
void dc_sink_retain(struct dc_sink *sink);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index d9b84ec7954c..90082bab71f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -198,6 +198,10 @@ struct dc_vbios_funcs {
void (*post_init)(struct dc_bios *bios);
void (*bios_parser_destroy)(struct dc_bios **dcb);
+
+ enum bp_result (*get_board_layout_info)(
+ struct dc_bios *dcb,
+ struct board_layout_info *board_layout_info);
};
struct bios_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index e1affeb5cc51..05c8c31d8b31 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -25,6 +25,65 @@
#ifndef DC_DDC_TYPES_H_
#define DC_DDC_TYPES_H_
+enum aux_transaction_type {
+ AUX_TRANSACTION_TYPE_DP,
+ AUX_TRANSACTION_TYPE_I2C
+};
+
+
+enum i2caux_transaction_action {
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
+
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
+
+ I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
+ I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
+};
+
+enum aux_channel_operation_result {
+ AUX_CHANNEL_OPERATION_SUCCEEDED,
+ AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+ AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
+ AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+};
+
+
+struct aux_request_transaction_data {
+ enum aux_transaction_type type;
+ enum i2caux_transaction_action action;
+ /* 20-bit AUX channel transaction address */
+ uint32_t address;
+ /* delay, in 100-microsecond units */
+ uint8_t delay;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum aux_transaction_reply {
+ AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
+ AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+
+ AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+ AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+
+ AUX_TRANSACTION_REPLY_HPD_DISCON = 0x40,
+
+ AUX_TRANSACTION_REPLY_INVALID = 0xFF
+};
+
+struct aux_reply_transaction_data {
+ enum aux_transaction_reply status;
+ uint32_t length;
+ uint8_t *data;
+};
+
struct i2c_payload {
bool write;
uint8_t address;
@@ -109,7 +168,7 @@ struct ddc_service {
uint32_t address;
uint32_t edid_buf_len;
- uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+ uint8_t edid_buf[DC_MAX_EDID_BUFFER_SIZE];
};
#endif /* DC_DDC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 90bccd5ccaa2..da93ab43f2d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -430,7 +430,7 @@ union test_request {
struct {
uint8_t LINK_TRAINING :1;
uint8_t LINK_TEST_PATTRN :1;
- uint8_t EDID_REAT :1;
+ uint8_t EDID_READ :1;
uint8_t PHY_TEST_PATTERN :1;
uint8_t AUDIO_TEST_PATTERN :1;
uint8_t RESERVED :1;
@@ -443,7 +443,8 @@ union test_response {
struct {
uint8_t ACK :1;
uint8_t NO_ACK :1;
- uint8_t RESERVED :6;
+ uint8_t EDID_CHECKSUM_WRITE:1;
+ uint8_t RESERVED :5;
} bits;
uint8_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index bd0fda0ceb91..e68077e65565 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -255,3 +255,54 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
return reg_val;
}
+
+void generic_write_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t data)
+{
+ dm_write_reg(ctx, addr_index, index);
+ dm_write_reg(ctx, addr_data, data);
+}
+
+uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index)
+{
+ uint32_t value = 0;
+
+ dm_write_reg(ctx, addr_index, index);
+ value = dm_read_reg(ctx, addr_data);
+
+ return value;
+}
+
+
+uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ uint32_t shift, mask, field_value;
+ int i = 1;
+
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+ i++;
+ }
+
+ generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
+ va_end(ap);
+
+ return reg_val;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index b1f70579d61b..b789cb2b354b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -192,13 +192,14 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
-
+ SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
+ SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
SURFACE_PIXEL_FORMAT_INVALID
/*grow 444 video here if necessary */
@@ -403,9 +404,11 @@ struct dc_cursor_position {
struct dc_cursor_mi_param {
unsigned int pixel_clk_khz;
unsigned int ref_clk_khz;
- unsigned int viewport_x_start;
- unsigned int viewport_width;
+ struct rect viewport;
struct fixed31_32 h_scale_ratio;
+ struct fixed31_32 v_scale_ratio;
+ enum dc_rotation_angle rotation;
+ bool mirror;
};
/* IPP related types */
@@ -414,6 +417,7 @@ enum {
GAMMA_RGB_256_ENTRIES = 256,
GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
GAMMA_CS_TFM_1D_ENTRIES = 4096,
+ GAMMA_CUSTOM_ENTRIES = 4096,
GAMMA_MAX_ENTRIES = 4096
};
@@ -421,6 +425,7 @@ enum dc_gamma_type {
GAMMA_RGB_256 = 1,
GAMMA_RGB_FLOAT_1024 = 2,
GAMMA_CS_TFM_1D = 3,
+ GAMMA_CUSTOM = 4,
};
struct dc_csc_transform {
@@ -489,6 +494,7 @@ struct dc_cursor_attributes {
uint32_t height;
enum dc_cursor_color_format color_format;
+ uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
/* In case we support HW Cursor rotation in the future */
enum dc_rotation_angle rotation_angle;
@@ -496,6 +502,11 @@ struct dc_cursor_attributes {
union dc_cursor_attribute_flags attribute_flags;
};
+struct dpp_cursor_attributes {
+ int bias;
+ int scale;
+};
+
/* OPP */
enum dc_color_space {
@@ -567,25 +578,25 @@ struct scaling_taps {
};
enum dc_timing_standard {
- TIMING_STANDARD_UNDEFINED,
- TIMING_STANDARD_DMT,
- TIMING_STANDARD_GTF,
- TIMING_STANDARD_CVT,
- TIMING_STANDARD_CVT_RB,
- TIMING_STANDARD_CEA770,
- TIMING_STANDARD_CEA861,
- TIMING_STANDARD_HDMI,
- TIMING_STANDARD_TV_NTSC,
- TIMING_STANDARD_TV_NTSC_J,
- TIMING_STANDARD_TV_PAL,
- TIMING_STANDARD_TV_PAL_M,
- TIMING_STANDARD_TV_PAL_CN,
- TIMING_STANDARD_TV_SECAM,
- TIMING_STANDARD_EXPLICIT,
+ DC_TIMING_STANDARD_UNDEFINED,
+ DC_TIMING_STANDARD_DMT,
+ DC_TIMING_STANDARD_GTF,
+ DC_TIMING_STANDARD_CVT,
+ DC_TIMING_STANDARD_CVT_RB,
+ DC_TIMING_STANDARD_CEA770,
+ DC_TIMING_STANDARD_CEA861,
+ DC_TIMING_STANDARD_HDMI,
+ DC_TIMING_STANDARD_TV_NTSC,
+ DC_TIMING_STANDARD_TV_NTSC_J,
+ DC_TIMING_STANDARD_TV_PAL,
+ DC_TIMING_STANDARD_TV_PAL_M,
+ DC_TIMING_STANDARD_TV_PAL_CN,
+ DC_TIMING_STANDARD_TV_SECAM,
+ DC_TIMING_STANDARD_EXPLICIT,
/*!< For explicit timings from EDID, VBIOS, etc.*/
- TIMING_STANDARD_USER_OVERRIDE,
+ DC_TIMING_STANDARD_USER_OVERRIDE,
/*!< For mode timing override by user*/
- TIMING_STANDARD_MAX
+ DC_TIMING_STANDARD_MAX
};
enum dc_color_depth {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8a716baa1203..d43cefbc43d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -73,6 +73,7 @@ struct dc_link {
enum dc_irq_source irq_source_hpd;
enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
bool is_hpd_filter_disabled;
+ bool dp_ss_off;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
@@ -141,6 +142,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream);
+int dc_link_get_backlight_level(const struct dc_link *dc_link);
+
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
@@ -172,7 +175,7 @@ bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
* false - no change in Downstream port status. No further action required
* from DM. */
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
- union hpd_irq_data *hpd_irq_dpcd_data);
+ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
struct dc_sink_init_data;
@@ -210,10 +213,29 @@ bool dc_link_dp_set_test_pattern(
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
+bool dc_link_is_dp_sink_present(struct dc_link *link);
+
/*
* DPCD access interfaces
*/
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link);
+void dc_link_perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+void dc_link_enable_hpd(const struct dc_link *link);
+void dc_link_disable_hpd(const struct dc_link *link);
+void dc_link_set_test_pattern(struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index d7e6d53bb383..cbfe418006cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -59,6 +59,9 @@ struct dc_stream_state {
struct freesync_context freesync_ctx;
struct dc_info_packet hdr_static_metadata;
+ PHYSICAL_ADDRESS_LOC dmdata_address;
+ bool use_dynamic_meta;
+
struct dc_transfer_func *out_transfer_func;
struct colorspace_transform gamut_remap_matrix;
struct dc_csc_transform csc_color_matrix;
@@ -97,6 +100,7 @@ struct dc_stream_state {
struct dc_cursor_attributes cursor_attributes;
struct dc_cursor_position cursor_position;
+ uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
/* from stream struct */
struct kref refcount;
@@ -144,10 +148,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
/*
* Log the current stream state.
*/
-void dc_stream_log(
- const struct dc_stream_state *stream,
- struct dal_logger *dc_logger,
- enum dc_log_type log_type);
+void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream);
uint8_t dc_get_current_stream_count(struct dc *dc);
struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
@@ -255,6 +256,7 @@ bool dc_stream_set_cursor_position(
struct dc_stream_state *stream,
const struct dc_cursor_position *position);
+
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state **stream,
int num_streams,
@@ -299,9 +301,4 @@ bool dc_stream_get_crtc_position(struct dc *dc,
unsigned int *v_pos,
unsigned int *nom_v_pos);
-void dc_stream_set_static_screen_events(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- const struct dc_static_screen_events *events);
-
#endif /* DC_STREAM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 76df2534c4a4..8c6eb78b0c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -77,8 +77,6 @@ struct dc_context {
struct dc *dc;
void *driver_context; /* e.g. amdgpu_device */
-
- struct dal_logger *logger;
void *cgs_device;
enum dce_environment dce_environment;
@@ -92,13 +90,12 @@ struct dc_context {
bool created_bios;
struct gpio_service *gpio_service;
struct i2caux *i2caux;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint32_t dc_sink_id_count;
uint64_t fbc_gpu_addr;
-#endif
};
-#define MAX_EDID_BUFFER_SIZE 512
+#define DC_MAX_EDID_BUFFER_SIZE 512
#define EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
@@ -137,13 +134,13 @@ enum plane_stereo_format {
*/
enum dc_edid_connector_type {
- EDID_CONNECTOR_UNKNOWN = 0,
- EDID_CONNECTOR_ANALOG = 1,
- EDID_CONNECTOR_DIGITAL = 10,
- EDID_CONNECTOR_DVI = 11,
- EDID_CONNECTOR_HDMIA = 12,
- EDID_CONNECTOR_MDDI = 14,
- EDID_CONNECTOR_DISPLAYPORT = 15
+ DC_EDID_CONNECTOR_UNKNOWN = 0,
+ DC_EDID_CONNECTOR_ANALOG = 1,
+ DC_EDID_CONNECTOR_DIGITAL = 10,
+ DC_EDID_CONNECTOR_DVI = 11,
+ DC_EDID_CONNECTOR_HDMIA = 12,
+ DC_EDID_CONNECTOR_MDDI = 14,
+ DC_EDID_CONNECTOR_DISPLAYPORT = 15
};
enum dc_edid_status {
@@ -169,7 +166,7 @@ struct dc_cea_audio_mode {
struct dc_edid {
uint32_t length;
- uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+ uint8_t raw_edid[DC_MAX_EDID_BUFFER_SIZE];
};
/* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
@@ -195,6 +192,7 @@ union display_content_support {
struct dc_panel_patch {
unsigned int dppowerup_delay;
+ unsigned int extra_t12_ms;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 11401fd8e535..825537bd4545 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
new file mode 100644
index 000000000000..3f5b2e6f7553
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_aux.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define CTX \
+ aux110->base.ctx
+#define REG(reg_name)\
+ (aux110->regs->reg_name)
+
+#define DC_LOGGER \
+ engine->ctx->logger
+
+#include "reg_helper.h"
+
+#define FROM_AUX_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine_dce110, base)
+
+#define FROM_ENGINE(ptr) \
+ FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
+
+#define FROM_AUX_ENGINE_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine, base)
+enum {
+ AUX_INVALID_REPLY_RETRY_COUNTER = 1,
+ AUX_TIMED_OUT_RETRY_COUNTER = 2,
+ AUX_DEFER_RETRY_COUNTER = 6
+};
+static void release_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ dal_ddc_close(engine->ddc);
+
+ engine->ddc = NULL;
+
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+}
+
+#define SW_CAN_ACCESS_AUX 1
+#define DMCU_CAN_ACCESS_AUX 2
+
+static bool is_engine_available(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field != DMCU_CAN_ACCESS_AUX);
+}
+static bool acquire_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+ if (field == DMCU_CAN_ACCESS_AUX)
+ return false;
+ /* enable AUX before request SW to access AUX */
+ value = REG_READ(AUX_CONTROL);
+ field = get_reg_field_value(value,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (field == 0) {
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*DP_AUX block as part of the enable sequence*/
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_RESET);
+ }
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*poll HW to make sure reset it done*/
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
+ 1, 11);
+
+ set_reg_field_value(
+ value,
+ 0,
+ AUX_CONTROL,
+ AUX_RESET);
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
+ 1, 11);
+ }
+ } /*if (field)*/
+
+ /* request SW to access AUX */
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
+
+ value = REG_READ(AUX_ARB_CONTROL);
+ field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field == SW_CAN_ACCESS_AUX);
+}
+
+#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
+ ((command) | ((0xF0000 & (address)) >> 16))
+
+#define COMPOSE_AUX_SW_DATA_8_15(address) \
+ ((0xFF00 & (address)) >> 8)
+
+#define COMPOSE_AUX_SW_DATA_0_7(address) \
+ (0xFF & (address))
+
+static void submit_channel_request(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t value;
+ uint32_t length;
+
+ bool is_write =
+ ((request->type == AUX_TRANSACTION_TYPE_DP) &&
+ (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
+ ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
+ ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+ if (REG(AUXN_IMPCAL)) {
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+ }
+ /* set the delay and the number of bytes to write */
+
+ /* The length include
+ * the 4 bit header and the 20 bit address
+ * (that is 3 byte).
+ * If the requested length is non zero this means
+ * an addition byte specifying the length is required.
+ */
+
+ length = request->length ? 4 : 3;
+ if (is_write)
+ length += request->length;
+
+ REG_UPDATE_2(AUX_SW_CONTROL,
+ AUX_SW_START_DELAY, request->delay,
+ AUX_SW_WR_BYTES, length);
+
+ /* program action and address and payload data (if 'is_write') */
+ value = REG_UPDATE_4(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_DATA_RW, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
+
+ value = REG_SET_2(AUX_SW_DATA, value,
+ AUX_SW_AUTOINCREMENT_DISABLE, 0,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
+
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
+
+ if (request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->length - 1);
+ }
+
+ if (is_write) {
+ /* Load the HW buffer with the Data to be sent.
+ * This is relevant for write operation.
+ * For read, the data recived data will be
+ * processed in process_channel_reply().
+ */
+ uint32_t i = 0;
+
+ while (i < request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->data[i]);
+
+ ++i;
+ }
+ }
+
+ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+}
+
+static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+ uint8_t *buffer, uint8_t *reply_result,
+ uint32_t *sw_status)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t bytes_replied;
+ uint32_t reply_result_32;
+
+ *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
+ &bytes_replied);
+
+ /* In case HPD is LOW, exit AUX transaction */
+ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return -1;
+
+ /* Need at least the status byte */
+ if (!bytes_replied)
+ return -1;
+
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
+
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
+ reply_result_32 = reply_result_32 >> 4;
+ *reply_result = (uint8_t)reply_result_32;
+
+ if (reply_result_32 == 0) { /* ACK */
+ uint32_t i = 0;
+
+ /* First byte was already used to get the command status */
+ --bytes_replied;
+
+ /* Do not overflow buffer */
+ if (bytes_replied > size)
+ return -1;
+
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
+
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
+ buffer[i] = aux_sw_data_val;
+ ++i;
+ }
+
+ return i;
+ }
+
+ return 0;
+}
+
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ int bytes_replied;
+ uint8_t reply_result;
+ uint32_t sw_status;
+
+ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+ &reply_result, &sw_status);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+ return;
+ }
+
+ if (bytes_replied < 0) {
+ /* Need to handle an error case...
+ * Hopefully, upper layer function won't call this function if
+ * the number of bytes in the reply was 0, because there was
+ * surely an error that was asserted that should have been
+ * handled for hot plug case, this could happens
+ */
+ if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ ASSERT_CRITICAL(false);
+ return;
+ }
+ } else {
+
+ switch (reply_result) {
+ case 0: /* ACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+ break;
+ case 1: /* NACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
+ break;
+ case 2: /* DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
+ break;
+ case 4: /* AUX ACK / I2C NACK */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
+ break;
+ case 8: /* AUX ACK / I2C DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
+ break;
+ default:
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ }
+ }
+}
+
+static enum aux_channel_operation_result get_channel_status(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value;
+
+ if (returned_bytes == NULL) {
+ /*caller pass NULL pointer*/
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
+ }
+ *returned_bytes = 0;
+
+ /* poll to make sure that SW_DONE is asserted */
+ value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ 10, aux110->timeout_period/10);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
+ /* Note that the following bits are set in 'status.bits'
+ * during CTS 4.2.1.2 (FW 3.3.1):
+ * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
+ * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
+ *
+ * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
+ * HW debugging bit and should be ignored.
+ */
+ if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
+ if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+
+ else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
+ (value &
+ AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+
+ *returned_bytes = get_reg_field_value(value,
+ AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT);
+
+ if (*returned_bytes == 0)
+ return
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+ else {
+ *returned_bytes -= 1;
+ return AUX_CHANNEL_OPERATION_SUCCEEDED;
+ }
+ } else {
+ /*time_elapsed >= aux_engine->timeout_period
+ * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+ */
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+ }
+}
+static void process_read_reply(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->defer_retry_aux = 0;
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->transaction_complete = true;
+ ctx->operation_succeeded = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static void process_read_request(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else {
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->buffer;
+
+ process_read_reply(engine, ctx);
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here
+ */
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static bool read_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct read_command_context ctx;
+
+ ctx.buffer = request->payload.data;
+ ctx.current_read_length = request->payload.length;
+ ctx.offset = 0;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.invalid_reply_retry_aux_on_ack = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ do {
+ memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
+
+ ctx.request.data = ctx.buffer + ctx.offset;
+ ctx.request.length = ctx.current_read_length;
+
+ process_read_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
+ return ctx.operation_succeeded;
+}
+
+static void process_write_reply(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->operation_succeeded = true;
+
+ if (ctx->returned_byte) {
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ctx->current_write_length = 0;
+
+ ++ctx->ack_m_retry;
+
+ if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(300);
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->defer_retry_aux = 0;
+ ctx->ack_m_retry = 0;
+ ctx->transaction_complete = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+ ctx->current_write_length = 0;
+
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static void process_write_request(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->reply_data;
+
+ process_write_reply(engine, ctx);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here
+ */
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static bool write_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct write_command_context ctx;
+
+ ctx.mot = middle_of_transaction;
+ ctx.buffer = request->payload.data;
+ ctx.current_write_length = request->payload.length;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.ack_m_retry = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ ctx.max_defer_retry =
+ (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
+ engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
+
+ do {
+ ctx.request.data = ctx.buffer;
+ ctx.request.length = ctx.current_write_length;
+
+ process_write_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
+ return ctx.operation_succeeded;
+}
+static bool end_of_transaction_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request)
+{
+ struct i2caux_transaction_request dummy_request;
+ uint8_t dummy_data;
+
+ /* [tcheng] We only need to send the stop (read with MOT = 0)
+ * for I2C-over-Aux, not native AUX
+ */
+
+ if (request->payload.address_space !=
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
+ return false;
+
+ dummy_request.operation = request->operation;
+ dummy_request.payload.address_space = request->payload.address_space;
+ dummy_request.payload.address = request->payload.address;
+
+ /*
+ * Add a dummy byte due to some receiver quirk
+ * where one byte is sent along with MOT = 0.
+ * Ideally this should be 0.
+ */
+
+ dummy_request.payload.length = 0;
+ dummy_request.payload.data = &dummy_data;
+
+ if (request->operation == I2CAUX_TRANSACTION_READ)
+ return read_command(engine, &dummy_request, false);
+ else
+ return write_command(engine, &dummy_request, false);
+
+ /* according Syed, it does not need now DoDummyMOT */
+}
+static bool submit_request(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+
+ bool result;
+ bool mot_used = true;
+
+ switch (request->operation) {
+ case I2CAUX_TRANSACTION_READ:
+ result = read_command(engine, request, mot_used);
+ break;
+ case I2CAUX_TRANSACTION_WRITE:
+ result = write_command(engine, request, mot_used);
+ break;
+ default:
+ result = false;
+ }
+
+ /* [tcheng]
+ * need to send stop for the last transaction to free up the AUX
+ * if the above command fails, this would be the last transaction
+ */
+
+ if (!middle_of_transaction || !result)
+ end_of_transaction_command(engine, request);
+
+ /* mask AUX interrupt */
+
+ return result;
+}
+enum i2caux_engine_type get_engine_type(
+ const struct aux_engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_AUX;
+}
+
+static bool acquire(
+ struct aux_engine *engine,
+ struct ddc *ddc)
+{
+
+ enum gpio_result result;
+
+ if (engine->funcs->is_engine_available) {
+ /*check whether SW could use the engine*/
+ if (!engine->funcs->is_engine_available(engine))
+ return false;
+ }
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ if (!engine->funcs->acquire_engine(engine)) {
+ dal_ddc_close(ddc);
+ return false;
+ }
+
+ engine->ddc = ddc;
+
+ return true;
+}
+
+static const struct aux_engine_funcs aux_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .read_channel_reply = read_channel_reply,
+ .get_channel_status = get_channel_status,
+ .is_engine_available = is_engine_available,
+ .release_engine = release_engine,
+ .destroy_engine = dce110_engine_destroy,
+ .submit_request = submit_request,
+ .get_engine_type = get_engine_type,
+ .acquire = acquire,
+};
+
+void dce110_engine_destroy(struct aux_engine **engine)
+{
+
+ struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine);
+
+ kfree(engine110);
+ *engine = NULL;
+
+}
+struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs)
+{
+ aux_engine110->base.ddc = NULL;
+ aux_engine110->base.ctx = ctx;
+ aux_engine110->base.delay = 0;
+ aux_engine110->base.max_defer_write_retry = 0;
+ aux_engine110->base.funcs = &aux_engine_funcs;
+ aux_engine110->base.inst = inst;
+ aux_engine110->timeout_period = timeout_period;
+ aux_engine110->regs = regs;
+
+ return &aux_engine110->base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
new file mode 100644
index 000000000000..f7caab85dc80
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_DCE110_H__
+#define __DAL_AUX_ENGINE_DCE110_H__
+#include "aux_engine.h"
+
+#define AUX_COMMON_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_DATA, DP_AUX, id), \
+ SRI(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_STATUS, DP_AUX, id), \
+ SR(AUXN_IMPCAL), \
+ SR(AUXP_IMPCAL)
+
+struct dce110_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_ARB_CONTROL;
+ uint32_t AUX_SW_DATA;
+ uint32_t AUX_SW_CONTROL;
+ uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_SW_STATUS;
+ uint32_t AUXN_IMPCAL;
+ uint32_t AUXP_IMPCAL;
+
+ uint32_t AUX_RESET_MASK;
+};
+
+enum { /* This is the timeout as defined in DP 1.2a,
+ * 2.3.4 "Detailed uPacket TX AUX CH State Description".
+ */
+ AUX_TIMEOUT_PERIOD = 400,
+
+ /* Ideally, the SW timeout should be just above 550usec
+ * which is programmed in HW.
+ * But the SW timeout of 600usec is not reliable,
+ * because on some systems, delay_in_microseconds()
+ * returns faster than it should.
+ * EPR #379763: by trial-and-error on different systems,
+ * 700usec is the minimum reliable SW timeout for polling
+ * the AUX_SW_STATUS.AUX_SW_DONE bit.
+ * This timeout expires *only* when there is
+ * AUX Error or AUX Timeout conditions - not during normal operation.
+ * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
+ * at most within ~240usec. That means,
+ * increasing this timeout will not affect normal operation,
+ * and we'll timeout after
+ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+ * This timeout is especially important for
+ * resume from S3 and CTS.
+ */
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+};
+struct aux_engine_dce110 {
+ struct aux_engine base;
+ const struct dce110_aux_registers *regs;
+ struct {
+ uint32_t aux_control;
+ uint32_t aux_arb_control;
+ uint32_t aux_sw_data;
+ uint32_t aux_sw_control;
+ uint32_t aux_interrupt_control;
+ uint32_t aux_sw_status;
+ } addr;
+ uint32_t timeout_period;
+};
+
+struct aux_engine_dce110_init_data {
+ uint32_t engine_id;
+ uint32_t timeout_period;
+ struct dc_context *ctx;
+ const struct dce110_aux_registers *regs;
+};
+
+struct aux_engine *dce110_aux_engine_construct(
+ struct aux_engine_dce110 *aux_engine110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs);
+
+void dce110_engine_destroy(struct aux_engine **engine);
+
+bool dce110_aux_engine_acquire(
+ struct aux_engine *aux_engine,
+ struct ddc *ddc);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 88b09dd758ba..439dcf3b596c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -133,7 +133,7 @@ static bool calculate_fb_and_fractional_fb_divider(
uint64_t feedback_divider;
feedback_divider =
- (uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
+ (uint64_t)target_pix_clk_khz * ref_divider * post_divider;
feedback_divider *= 10;
/* additional factor, since we divide by 10 afterwards */
feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
@@ -145,8 +145,8 @@ static bool calculate_fb_and_fractional_fb_divider(
* of fractional feedback decimal point and the fractional FB Divider precision
* is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
- feedback_divider += (uint64_t)
- (5 * calc_pll_cs->fract_fb_divider_precision_factor);
+ feedback_divider += 5ULL *
+ calc_pll_cs->fract_fb_divider_precision_factor;
feedback_divider =
div_u64(feedback_divider,
calc_pll_cs->fract_fb_divider_precision_factor * 10);
@@ -203,8 +203,8 @@ static bool calc_fb_divider_checking_tolerance(
&fract_feedback_divider);
/*Actual calculated value*/
- actual_calc_clk_khz = (uint64_t)(feedback_divider *
- calc_pll_cs->fract_fb_divider_factor) +
+ actual_calc_clk_khz = (uint64_t)feedback_divider *
+ calc_pll_cs->fract_fb_divider_factor +
fract_feedback_divider;
actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
actual_calc_clk_khz =
@@ -592,7 +592,7 @@ static uint32_t dce110_get_pix_clk_dividers(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
case DCE_VERSION_12_0:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
#endif
@@ -909,7 +909,7 @@ static bool dce110_program_pix_clk(
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
struct bp_pixel_clock_parameters bp_pc_params = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
unsigned dp_dto_ref_kHz = 700000;
@@ -982,7 +982,7 @@ static bool dce110_program_pix_clk(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
case DCE_VERSION_12_0:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index c45e2f76189e..801bb65707b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -55,7 +55,7 @@
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 8a581c67bf2d..684da3db7568 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -30,7 +30,7 @@
#include "bios_parser_interface.h"
#include "dc.h"
#include "dmcu.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn_calcs.h"
#endif
#include "core_types.h"
@@ -38,7 +38,7 @@
#include "dal_asic_id.h"
#define TO_DCE_CLOCKS(clocks)\
- container_of(clocks, struct dce_disp_clk, base)
+ container_of(clocks, struct dce_dccg, base)
#define REG(reg) \
(clk_dce->regs->reg)
@@ -101,99 +101,78 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
/*ClocksStatePerformance*/
{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
-/* Starting point for each divider range.*/
-enum dce_divider_range_start {
- DIVIDER_RANGE_01_START = 200, /* 2.00*/
- DIVIDER_RANGE_02_START = 1600, /* 16.00*/
- DIVIDER_RANGE_03_START = 3200, /* 32.00*/
- DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+ DENTIST_BASE_DID_1 = 0x08,
+ DENTIST_BASE_DID_2 = 0x40,
+ DENTIST_BASE_DID_3 = 0x60,
+ DENTIST_MAX_DID = 0x80
};
-/* Ranges for divider identifiers (Divider ID or DID)
- mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
-enum dce_divider_id_register_setting {
- DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
- DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
- DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
- DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
+ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
+ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
+ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
+ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
};
-/* Step size between each divider within a range.
- Incrementing the DENTIST_DISPCLK_WDIVIDER by one
- will increment the divider by this much.*/
-enum dce_divider_range_step_size {
- DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
- DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
- DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
-};
-
-static bool dce_divider_range_construct(
- struct dce_divider_range *div_range,
- int range_start,
- int range_step,
- int did_min,
- int did_max)
+static int dentist_get_divider_from_did(int did)
{
- div_range->div_range_start = range_start;
- div_range->div_range_step = range_step;
- div_range->did_min = did_min;
- div_range->did_max = did_max;
-
- if (div_range->div_range_step == 0) {
- div_range->div_range_step = 1;
- /*div_range_step cannot be zero*/
- BREAK_TO_DEBUGGER();
+ if (did < DENTIST_BASE_DID_1)
+ did = DENTIST_BASE_DID_1;
+ if (did > DENTIST_MAX_DID)
+ did = DENTIST_MAX_DID;
+
+ if (did < DENTIST_BASE_DID_2) {
+ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+ * (did - DENTIST_BASE_DID_1);
+ } else if (did < DENTIST_BASE_DID_3) {
+ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+ * (did - DENTIST_BASE_DID_2);
+ } else {
+ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+ * (did - DENTIST_BASE_DID_3);
}
- /* Calculate this based on the other inputs.*/
- /* See DividerRange.h for explanation of */
- /* the relationship between divider id (DID) and a divider.*/
- /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
- /* Maximum divider identified in this range =
- * (Number of Divider IDs)*Step size between dividers
- * + The start of this range.*/
- div_range->div_range_end = (did_max - did_min) * range_step
- + range_start;
- return true;
-}
-
-static int dce_divider_range_calc_divider(
- struct dce_divider_range *div_range,
- int did)
-{
- /* Is this DID within our range?*/
- if ((did < div_range->did_min) || (did >= div_range->did_max))
- return INVALID_DIVIDER;
-
- return ((did - div_range->did_min) * div_range->div_range_step)
- + div_range->div_range_start;
-
}
-static int dce_divider_range_get_divider(
- struct dce_divider_range *div_range,
- int ranges_num,
- int did)
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
{
- int div = INVALID_DIVIDER;
- int i;
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+ dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed31_32 adj_dp_ref_clk_khz;
- for (i = 0; i < ranges_num; i++) {
- /* Calculate divider with given divider ID*/
- div = dce_divider_range_calc_divider(&div_range[i], did);
- /* Found a valid return divider*/
- if (div != INVALID_DIVIDER)
- break;
+ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
}
- return div;
+ return dp_ref_clk_khz;
}
-static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
+static int dce_get_dp_ref_freq_khz(struct dccg *clk)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
int dprefclk_wdivider;
int dprefclk_src_sel;
int dp_ref_clk_khz = 600000;
- int target_div = INVALID_DIVIDER;
+ int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
@@ -204,80 +183,27 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dce_divider_range_get_divider(
- clk_dce->divider_ranges,
- DIVIDER_RANGE_MAX,
- dprefclk_wdivider);
-
- if (target_div != INVALID_DIVIDER) {
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
- * clk_dce->dentist_vco_freq_khz) / target_div;
- }
+ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
- /* SW will adjust DP REF Clock average value for all purposes
- * (DP DTO / DP Audio DTO and DP GTC)
- if clock is spread for all cases:
- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
- calculations (not planned to be used, but average clock should still
- be valid)
- -if SS enabled on DP Ref clock and HW de-spreading disabled
- (should not be case with CIK) then SW should program all rates
- generated according to average value (case as with previous ASICs)
- */
- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
- dc_fixpt_from_fraction(
- clk_dce->dprefclk_ss_percentage,
- clk_dce->dprefclk_ss_divider), 200);
- struct fixed31_32 adj_dp_ref_clk_khz;
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_dce->dentist_vco_freq_khz) / target_div;
- ss_percentage = dc_fixpt_sub(dc_fixpt_one,
- ss_percentage);
- adj_dp_ref_clk_khz =
- dc_fixpt_mul_int(
- ss_percentage,
- dp_ref_clk_khz);
- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
- }
-
- return dp_ref_clk_khz;
+ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
}
-/* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS
- * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
- * clock implementation
- */
-static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
- int dp_ref_clk_khz = 600000;
-
- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
- dc_fixpt_from_fraction(
- clk_dce->dprefclk_ss_percentage,
- clk_dce->dprefclk_ss_divider), 200);
- struct fixed31_32 adj_dp_ref_clk_khz;
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- ss_percentage = dc_fixpt_sub(dc_fixpt_one,
- ss_percentage);
- adj_dp_ref_clk_khz =
- dc_fixpt_mul_int(
- ss_percentage,
- dp_ref_clk_khz);
- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
- }
-
- return dp_ref_clk_khz;
+ return dccg_adjust_dp_ref_freq_for_ss(clk_dce, 600000);
}
+
static enum dm_pp_clocks_state dce_get_required_clocks_state(
- struct display_clock *clk,
- struct state_dependent_clocks *req_clocks)
+ struct dccg *clk,
+ struct dc_clocks *req_clocks)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
int i;
enum dm_pp_clocks_state low_req_clk;
@@ -286,53 +212,30 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
* all required clocks
*/
for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
- if (req_clocks->display_clk_khz >
+ if (req_clocks->dispclk_khz >
clk_dce->max_clks_by_state[i].display_clk_khz
- || req_clocks->pixel_clk_khz >
+ || req_clocks->phyclk_khz >
clk_dce->max_clks_by_state[i].pixel_clk_khz)
break;
low_req_clk = i + 1;
if (low_req_clk > clk->max_clks_state) {
- DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d",
- __func__,
- req_clocks->display_clk_khz,
- req_clocks->pixel_clk_khz);
- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ /* set max clock state for high phyclock, invalid on exceeding display clock */
+ if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
+ < req_clocks->dispclk_khz)
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ else
+ low_req_clk = clk->max_clks_state;
}
return low_req_clk;
}
-static bool dce_clock_set_min_clocks_state(
- struct display_clock *clk,
- enum dm_pp_clocks_state clocks_state)
-{
- struct dm_pp_power_level_change_request level_change_req = {
- clocks_state };
-
- if (clocks_state > clk->max_clks_state) {
- /*Requested state exceeds max supported state.*/
- DC_LOG_WARNING("Requested state exceeds max supported state");
- return false;
- } else if (clocks_state == clk->cur_min_clks_state) {
- /*if we're trying to set the same state, we can just return
- * since nothing needs to be done*/
- return true;
- }
-
- /* get max clock state from PPLIB */
- if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
- clk->cur_min_clks_state = clocks_state;
-
- return true;
-}
-
static int dce_set_clock(
- struct display_clock *clk,
+ struct dccg *clk,
int requested_clk_khz)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
struct dc_bios *bp = clk->ctx->dc_bios;
int actual_clock = requested_clk_khz;
@@ -364,10 +267,10 @@ static int dce_set_clock(
}
static int dce_psr_set_clock(
- struct display_clock *clk,
+ struct dccg *clk,
int requested_clk_khz)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
struct dc_context *ctx = clk_dce->base.ctx;
struct dc *core_dc = ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
@@ -380,10 +283,10 @@ static int dce_psr_set_clock(
}
static int dce112_set_clock(
- struct display_clock *clk,
+ struct dccg *clk,
int requested_clk_khz)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk->ctx->dc_bios;
struct dc *core_dc = clk->ctx->dc;
@@ -432,9 +335,9 @@ static int dce112_set_clock(
return actual_clock;
}
-static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
{
- struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
+ struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
struct integrated_info info = { { { 0 } } };
struct dc_firmware_info fw_info = { { 0 } };
@@ -488,11 +391,9 @@ static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
clk_dce->dfs_bypass_enabled = true;
-
- clk_dce->use_max_disp_clk = debug->max_disp_clk;
}
-static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
{
struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
int ss_info_num = bp->funcs->get_ss_entry_number(
@@ -548,139 +449,265 @@ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
}
}
-static bool dce_apply_clock_voltage_request(
- struct display_clock *clk,
- enum dm_pp_clock_type clocks_type,
- int clocks_in_khz,
- bool pre_mode_set,
- bool update_dp_phyclk)
+static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+{
+ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+static void dce12_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
{
- bool send_request = false;
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- case DM_PP_CLOCK_TYPE_PIXELCLK:
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- break;
- default:
- BREAK_TO_DEBUGGER();
- return false;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
}
- clock_voltage_req.clk_type = clocks_type;
- clock_voltage_req.clocks_in_khz = clocks_in_khz;
-
- /* to pplib */
- if (pre_mode_set) {
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) {
- clk->cur_clocks_value.dispclk_notify_pplib_done = true;
- send_request = true;
- } else
- clk->cur_clocks_value.dispclk_notify_pplib_done = false;
- /* no matter incrase or decrase clock, update current clock value */
- clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz;
- break;
- case DM_PP_CLOCK_TYPE_PIXELCLK:
- if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) {
- clk->cur_clocks_value.pixelclk_notify_pplib_done = true;
- send_request = true;
- } else
- clk->cur_clocks_value.pixelclk_notify_pplib_done = false;
- /* no matter incrase or decrase clock, update current clock value */
- clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz;
- break;
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) {
- clk->cur_clocks_value.phyclk_notigy_pplib_done = true;
- send_request = true;
- } else
- clk->cur_clocks_value.phyclk_notigy_pplib_done = false;
- /* no matter incrase or decrase clock, update current clock value */
- clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz;
- break;
- default:
- ASSERT(0);
- break;
- }
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+}
+
+#ifdef CONFIG_X86
+static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
+{
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
+ int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+ bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
+
+ /* increase clock, looking for div is 0 for current, request div is 1*/
+ if (dispclk_increase) {
+ /* already divided by 2, no need to reach target clk with 2 steps*/
+ if (cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* request disp clk is lower than maximum supported dpp clk,
+ * no need to reach target clk with two steps.
+ */
+ if (new_clocks->dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* target dpp clk not request divided by 2, still within threshold */
+ if (!request_dpp_div)
+ return new_clocks->dispclk_khz;
} else {
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- if (!clk->cur_clocks_value.dispclk_notify_pplib_done)
- send_request = true;
- break;
- case DM_PP_CLOCK_TYPE_PIXELCLK:
- if (!clk->cur_clocks_value.pixelclk_notify_pplib_done)
- send_request = true;
- break;
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- if (!clk->cur_clocks_value.phyclk_notigy_pplib_done)
- send_request = true;
- break;
- default:
- ASSERT(0);
- break;
- }
+ /* decrease clock, looking for current dppclk divided by 2,
+ * request dppclk not divided by 2.
+ */
+
+ /* current dpp clk not divided by 2, no need to ramp*/
+ if (!cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* current disp clk is lower than current maximum dpp clk,
+ * no need to ramp
+ */
+ if (dccg->clks.dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* request dpp clk need to be divided by 2 */
+ if (request_dpp_div)
+ return new_clocks->dispclk_khz;
}
- if (send_request) {
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (clk->ctx->dce_version >= DCN_VERSION_1_0) {
- struct dc *core_dc = clk->ctx->dc;
- /*use dcfclk request voltage*/
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock_voltage_req.clocks_in_khz =
- dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value);
- }
+
+ return disp_clk_threshold;
+}
+
+static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
+{
+ struct dc *dc = dccg->ctx->dc;
+ int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ int i;
+
+ /* set disp clk to dpp clk threshold */
+ dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
+
+ /* update request dpp clk division option */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+ pipe_ctx->plane_res.dpp,
+ request_dpp_div,
+ true);
+ }
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
+ dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+}
+
+static void dcn1_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+{
+ struct dc *dc = dccg->ctx->dc;
+ struct pp_smu_display_requirement_rv *smu_req_cur =
+ &dc->res_pool->pp_smu_req;
+ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ bool send_request_to_increase = false;
+ bool send_request_to_lower = false;
+
+ if (new_clocks->phyclk_khz)
+ smu_req.display_count = 1;
+ else
+ smu_req.display_count = 0;
+
+ if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
+ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
+ || new_clocks->fclk_khz > dccg->clks.fclk_khz
+ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
+ send_request_to_increase = true;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+ dccg->clks.fclk_khz = new_clocks->fclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+ smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+ dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
+ dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
+
+ send_request_to_lower = true;
+ }
+
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+ if (send_request_to_increase) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+ }
+
+ /* dcn1 dppclk is tied to dispclk */
+ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
+ || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
+ dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (!send_request_to_increase && send_request_to_lower) {
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+ }
+
+
+ *smu_req_cur = smu_req;
+}
#endif
- dm_pp_apply_clock_for_voltage_request(
- clk->ctx, &clock_voltage_req);
+
+static void dce_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+{
+ struct dm_pp_power_level_change_request level_change_req;
+
+ level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > dccg->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+ dccg->cur_min_clks_state = level_change_req.power_level;
}
- if (update_dp_phyclk && (clocks_in_khz >
- clk->cur_clocks_value.max_dp_phyclk_in_khz))
- clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
- return true;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
}
+#ifdef CONFIG_X86
+static const struct display_clock_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dcn1_update_clocks
+};
+#endif
static const struct display_clock_funcs dce120_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
- .apply_clock_voltage_request = dce_apply_clock_voltage_request,
- .set_clock = dce112_set_clock
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dce12_update_clocks
};
static const struct display_clock_funcs dce112_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
- .get_required_clocks_state = dce_get_required_clocks_state,
- .set_min_clocks_state = dce_clock_set_min_clocks_state,
- .set_clock = dce112_set_clock
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce112_set_clock,
+ .update_clocks = dce_update_clocks
};
static const struct display_clock_funcs dce110_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
- .get_required_clocks_state = dce_get_required_clocks_state,
- .set_min_clocks_state = dce_clock_set_min_clocks_state,
- .set_clock = dce_psr_set_clock
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce_psr_set_clock,
+ .update_clocks = dce_update_clocks
};
static const struct display_clock_funcs dce_funcs = {
- .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
- .get_required_clocks_state = dce_get_required_clocks_state,
- .set_min_clocks_state = dce_clock_set_min_clocks_state,
- .set_clock = dce_set_clock
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .set_dispclk = dce_set_clock,
+ .update_clocks = dce_update_clocks
};
-static void dce_disp_clk_construct(
- struct dce_disp_clk *clk_dce,
+static void dce_dccg_construct(
+ struct dce_dccg *clk_dce,
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct display_clock *base = &clk_dce->base;
+ struct dccg *base = &clk_dce->base;
base->ctx = ctx;
base->funcs = &dce_funcs;
@@ -700,34 +727,15 @@ static void dce_disp_clk_construct(
dce_clock_read_integrated_info(clk_dce);
dce_clock_read_ss_info(clk_dce);
-
- dce_divider_range_construct(
- &clk_dce->divider_ranges[DIVIDER_RANGE_01],
- DIVIDER_RANGE_01_START,
- DIVIDER_RANGE_01_STEP_SIZE,
- DIVIDER_RANGE_01_BASE_DIVIDER_ID,
- DIVIDER_RANGE_02_BASE_DIVIDER_ID);
- dce_divider_range_construct(
- &clk_dce->divider_ranges[DIVIDER_RANGE_02],
- DIVIDER_RANGE_02_START,
- DIVIDER_RANGE_02_STEP_SIZE,
- DIVIDER_RANGE_02_BASE_DIVIDER_ID,
- DIVIDER_RANGE_03_BASE_DIVIDER_ID);
- dce_divider_range_construct(
- &clk_dce->divider_ranges[DIVIDER_RANGE_03],
- DIVIDER_RANGE_03_START,
- DIVIDER_RANGE_03_STEP_SIZE,
- DIVIDER_RANGE_03_BASE_DIVIDER_ID,
- DIVIDER_RANGE_MAX_DIVIDER_ID);
}
-struct display_clock *dce_disp_clk_create(
+struct dccg *dce_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -738,19 +746,19 @@ struct display_clock *dce_disp_clk_create(
dce80_max_clks_by_state,
sizeof(dce80_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, regs, clk_shift, clk_mask);
return &clk_dce->base;
}
-struct display_clock *dce110_disp_clk_create(
+struct dccg *dce110_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -761,7 +769,7 @@ struct display_clock *dce110_disp_clk_create(
dce110_max_clks_by_state,
sizeof(dce110_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, regs, clk_shift, clk_mask);
clk_dce->base.funcs = &dce110_funcs;
@@ -769,13 +777,13 @@ struct display_clock *dce110_disp_clk_create(
return &clk_dce->base;
}
-struct display_clock *dce112_disp_clk_create(
+struct dccg *dce112_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask)
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -786,7 +794,7 @@ struct display_clock *dce112_disp_clk_create(
dce112_max_clks_by_state,
sizeof(dce112_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, regs, clk_shift, clk_mask);
clk_dce->base.funcs = &dce112_funcs;
@@ -794,10 +802,9 @@ struct display_clock *dce112_disp_clk_create(
return &clk_dce->base;
}
-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+struct dccg *dce120_dccg_create(struct dc_context *ctx)
{
- struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
- struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
if (clk_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -808,28 +815,59 @@ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
dce120_max_clks_by_state,
sizeof(dce120_max_clks_by_state));
- dce_disp_clk_construct(
+ dce_dccg_construct(
clk_dce, ctx, NULL, NULL, NULL);
clk_dce->base.funcs = &dce120_funcs;
- /* new in dce120 */
- if (!ctx->dc->debug.disable_pplib_clock_request &&
- dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
- && clk_level_info.num_levels)
- clk_dce->max_displ_clk_in_khz =
- clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
- else
- clk_dce->max_displ_clk_in_khz = 1133000;
+ return &clk_dce->base;
+}
+
+#ifdef CONFIG_X86
+struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+{
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dc_bios *bp = ctx->dc_bios;
+ struct dc_firmware_info fw_info = { { 0 } };
+ struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ clk_dce->base.ctx = ctx;
+ clk_dce->base.funcs = &dcn1_funcs;
+
+ clk_dce->dfs_bypass_disp_clk = 0;
+
+ clk_dce->dprefclk_ss_percentage = 0;
+ clk_dce->dprefclk_ss_divider = 1000;
+ clk_dce->ss_on_dprefclk = false;
+
+ if (bp->integrated_info)
+ clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0)
+ clk_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_dce->dfs_bypass_enabled = true;
+
+ dce_clock_read_ss_info(clk_dce);
return &clk_dce->base;
}
+#endif
-void dce_disp_clk_destroy(struct display_clock **disp_clk)
+void dce_dccg_destroy(struct dccg **dccg)
{
- struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
+ struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
kfree(clk_dce);
- *disp_clk = NULL;
+ *dccg = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
index 0e717e0dc8f0..e5e44adc6c27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
@@ -33,6 +33,9 @@
.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
.DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+#define CLK_COMMON_REG_LIST_DCN_BASE() \
+ SR(DENTIST_DISPCLK_CNTL)
+
#define CLK_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -40,58 +43,37 @@
CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+
#define CLK_REG_FIELD_LIST(type) \
type DPREFCLK_SRC_SEL; \
- type DENTIST_DPREFCLK_WDIVIDER;
+ type DENTIST_DPREFCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_CHG_DONE;
-struct dce_disp_clk_shift {
+struct dccg_shift {
CLK_REG_FIELD_LIST(uint8_t)
};
-struct dce_disp_clk_mask {
+struct dccg_mask {
CLK_REG_FIELD_LIST(uint32_t)
};
-struct dce_disp_clk_registers {
+struct dccg_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
};
-/* Array identifiers and count for the divider ranges.*/
-enum dce_divider_range_count {
- DIVIDER_RANGE_01 = 0,
- DIVIDER_RANGE_02,
- DIVIDER_RANGE_03,
- DIVIDER_RANGE_MAX /* == 3*/
-};
-
-enum dce_divider_error_types {
- INVALID_DID = 0,
- INVALID_DIVIDER = 1
-};
-
-struct dce_divider_range {
- int div_range_start;
- /* The end of this range of dividers.*/
- int div_range_end;
- /* The distance between each divider in this range.*/
- int div_range_step;
- /* The divider id for the lowest divider.*/
- int did_min;
- /* The divider id for the highest divider.*/
- int did_max;
-};
-
-struct dce_disp_clk {
- struct display_clock base;
- const struct dce_disp_clk_registers *regs;
- const struct dce_disp_clk_shift *clk_shift;
- const struct dce_disp_clk_mask *clk_mask;
+struct dce_dccg {
+ struct dccg base;
+ const struct dccg_registers *regs;
+ const struct dccg_shift *clk_shift;
+ const struct dccg_mask *clk_mask;
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
- struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
- bool use_max_disp_clk;
int dentist_vco_freq_khz;
/* Cache the status of DFS-bypass feature*/
@@ -106,32 +88,33 @@ struct dce_disp_clk {
int dprefclk_ss_percentage;
/* DPREFCLK SS percentage Divider (100 or 1000) */
int dprefclk_ss_divider;
-
- /* max disp_clk from PPLIB for max validation display clock*/
- int max_displ_clk_in_khz;
};
-struct display_clock *dce_disp_clk_create(
+struct dccg *dce_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask);
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask);
-struct display_clock *dce110_disp_clk_create(
+struct dccg *dce110_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask);
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask);
-struct display_clock *dce112_disp_clk_create(
+struct dccg *dce112_dccg_create(
struct dc_context *ctx,
- const struct dce_disp_clk_registers *regs,
- const struct dce_disp_clk_shift *clk_shift,
- const struct dce_disp_clk_mask *clk_mask);
+ const struct dccg_registers *regs,
+ const struct dccg_shift *clk_shift,
+ const struct dccg_mask *clk_mask);
+
+struct dccg *dce120_dccg_create(struct dc_context *ctx);
-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
+#ifdef CONFIG_X86
+struct dccg *dcn1_dccg_create(struct dc_context *ctx);
+#endif
-void dce_disp_clk_destroy(struct display_clock **disp_clk);
+void dce_dccg_destroy(struct dccg **dccg);
#endif /* _DCE_CLOCKS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a576b8bbb3cd..ca7989e4932b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
}
}
-static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
}
static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
@@ -314,7 +316,7 @@ static void dce_get_psr_wait_loop(
return;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
static void dcn10_get_dmcu_state(struct dmcu *dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
* least a few frames. Should never hit the max retry assert below.
*/
if (wait == true) {
- for (retryCount = 0; retryCount <= 1000; retryCount++) {
- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
- if (enable) {
- if (psr_state != 0)
- break;
- } else {
- if (psr_state == 0)
- break;
+ for (retryCount = 0; retryCount <= 1000; retryCount++) {
+ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(500);
}
- udelay(500);
- }
- /* assert if max retry hit */
- ASSERT(retryCount <= 1000);
+ /* assert if max retry hit */
+ if (retryCount >= 1000)
+ ASSERT(0);
}
}
-static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
/* If microcontroller is not running, do nothing */
if (dmcu->dmcu_state != DMCU_RUNNING)
- return;
+ return false;
link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
psr_context->psrExitLinkTrainingRequired);
@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ return true;
}
static void dcn10_psr_wait_loop(
@@ -735,7 +743,7 @@ static const struct dmcu_funcs dce_funcs = {
.is_dmcu_initialized = dce_is_dmcu_initialized
};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
static const struct dmcu_funcs dcn10_funcs = {
.dmcu_init = dcn10_dmcu_init,
.load_iram = dcn10_dmcu_load_iram,
@@ -787,7 +795,7 @@ struct dmcu *dce_dmcu_create(
return &dmcu_dce->base;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
struct dmcu *dcn10_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 057b8afd74bc..64dc75378541 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -147,6 +147,7 @@
SR(DCCG_GATE_DISABLE_CNTL2), \
SR(DCFCLK_CNTL),\
SR(DCFCLK_CNTL), \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
/* todo: get these from GVM instead of reading registers ourselves */\
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
@@ -249,7 +250,6 @@ struct dce_hwseq_registers {
uint32_t DISPCLK_FREQ_CHANGE_CNTL;
uint32_t RBBMIF_TIMEOUT_DIS;
uint32_t RBBMIF_TIMEOUT_DIS_2;
- uint32_t DENTIST_DISPCLK_CNTL;
uint32_t DCHUBBUB_CRC_CTRL;
uint32_t DPP_TOP0_DPP_CRC_CTRL;
uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
@@ -276,6 +276,8 @@ struct dce_hwseq_registers {
uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
+ uint32_t AZALIA_AUDIO_DTO;
+ uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -362,7 +364,8 @@ struct dce_hwseq_registers {
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
- HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
+ HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh), \
+ HWS_SF(, DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -496,14 +499,13 @@ struct dce_hwseq_registers {
type DOMAIN7_PGFSM_PWR_STATUS; \
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
- type DENTIST_DPPCLK_WDIVIDER; \
- type DENTIST_DISPCLK_WDIVIDER; \
type VGA_TEST_ENABLE; \
type VGA_TEST_RENDER_START; \
type D1VGA_MODE_ENABLE; \
type D2VGA_MODE_ENABLE; \
type D3VGA_MODE_ENABLE; \
- type D4VGA_MODE_ENABLE;
+ type D4VGA_MODE_ENABLE; \
+ type AZALIA_AUDIO_DTO_MODULE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index dbe3b26b6d9e..752b3d62e793 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -256,6 +256,11 @@ static void setup_panel_mode(
enum dp_panel_mode panel_mode)
{
uint32_t value;
+ struct dc_context *ctx = enc110->base.ctx;
+
+ /* if psp set panel mode, dal should be program it */
+ if (ctx->dc->caps.psp_setup_panel_mode)
+ return;
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
@@ -646,6 +651,9 @@ static bool dce110_link_encoder_validate_hdmi_output(
if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
adjusted_pix_clk_khz >= 300000)
return false;
+ if (enc110->base.ctx->dc->debug.hdmi20_disable &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
return true;
}
@@ -773,6 +781,9 @@ void dce110_link_encoder_construct(
__func__,
result);
}
+ if (enc110->base.ctx->dc->debug.hdmi20_disable) {
+ enc110->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
}
bool dce110_link_encoder_validate_output_with_stream(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index bae752332a9f..85686d917636 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -729,7 +729,7 @@ static bool dce_mi_program_surface_flip_and_addr(
return true;
}
-static struct mem_input_funcs dce_mi_funcs = {
+static const struct mem_input_funcs dce_mi_funcs = {
.mem_input_program_display_marks = dce_mi_program_display_marks,
.allocate_mem_input = dce_mi_allocate_dmif,
.free_mem_input = dce_mi_free_dmif,
@@ -741,7 +741,7 @@ static struct mem_input_funcs dce_mi_funcs = {
.mem_input_is_flip_pending = dce_mi_is_flip_pending
};
-static struct mem_input_funcs dce112_mi_funcs = {
+static const struct mem_input_funcs dce112_mi_funcs = {
.mem_input_program_display_marks = dce112_mi_program_display_marks,
.allocate_mem_input = dce_mi_allocate_dmif,
.free_mem_input = dce_mi_free_dmif,
@@ -753,7 +753,7 @@ static struct mem_input_funcs dce112_mi_funcs = {
.mem_input_is_flip_pending = dce_mi_is_flip_pending
};
-static struct mem_input_funcs dce120_mi_funcs = {
+static const struct mem_input_funcs dce120_mi_funcs = {
.mem_input_program_display_marks = dce120_mi_program_display_marks,
.allocate_mem_input = dce_mi_allocate_dmif,
.free_mem_input = dce_mi_free_dmif,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index c0e813c7ddd4..b139b4017820 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -135,7 +135,7 @@ static void dce110_update_generic_info_packet(
AFMT_GENERIC0_UPDATE, (packet_index == 0),
AFMT_GENERIC2_UPDATE, (packet_index == 2));
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
switch (packet_index) {
case 0:
@@ -229,7 +229,7 @@ static void dce110_update_hdmi_info_packet(
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case 4:
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
@@ -274,7 +274,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
@@ -289,11 +289,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (REG(DP_DB_CNTL))
- REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-#endif
-
/* set pixel encoding */
switch (crtc_timing->pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
@@ -322,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (enc110->se_mask->DP_VID_N_MUL)
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
#endif
@@ -333,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (REG(DP_MSA_MISC))
misc1 = REG_READ(DP_MSA_MISC);
#endif
@@ -367,7 +362,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
/* set dynamic range and YCbCr range */
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_666:
colorimetry_bpc = 0;
@@ -446,7 +441,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
DP_DYN_RANGE, dynamic_range_rgb,
DP_YCBCR_RANGE, dynamic_range_ycbcr);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (REG(DP_MSA_COLORIMETRY))
REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
@@ -481,7 +476,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
crtc_timing->v_front_porch;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
/* start at begining of left border */
if (REG(DP_MSA_TIMING_PARAM2))
REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
@@ -756,7 +751,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
if (enc110->se_mask->HDMI_DB_DISABLE) {
/* for bring up, disable dp double TODO */
if (REG(HDMI_DB_CONTROL))
@@ -794,7 +789,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
/* stop generic packets 2 & 3 on HDMI */
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index a02e719d7794..ab63d0d0304c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -155,7 +155,7 @@ static void program_overscan(
int overscan_bottom = data->v_active
- data->recout.y - data->recout.height;
- if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
overscan_bottom += 2;
overscan_right += 2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 41f83ecd7469..74c05e878807 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -125,17 +125,50 @@ static void dce100_pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ uint32_t max_pix_clk = 0;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ /* do not check under lay */
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+ max_pix_clk =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+ }
+ return max_pix_clk;
+}
+
void dce100_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dce.dispclk_khz * 115 / 100);
- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
- }
+ struct dc_clocks req_clks;
+
+ req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &req_clks,
+ decrease_allowed);
+
dce100_pplib_apply_display_requirements(dc, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 344dd2e69e7c..3f76e6019546 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -52,6 +52,7 @@
#include "dce/dce_10_0_sh_mask.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -135,15 +136,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -279,7 +280,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
#define audio_regs(id)\
[id] = {\
@@ -572,6 +586,23 @@ struct output_pixel_processor *dce100_opp_create(
return &opp->base;
}
+struct aux_engine *dce100_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce100_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -624,6 +655,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -644,8 +679,8 @@ static void destruct(struct dce110_resource_pool *pool)
dce_aud_destroy(&pool->base.audios[i]);
}
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.abm != NULL)
dce_abm_destroy(&pool->base.abm);
@@ -830,11 +865,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -864,7 +899,7 @@ static bool construct(
* max_clock_state
*/
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
struct irq_service_init_data init_data;
@@ -884,7 +919,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
-
+ dc->caps.disable_dp_clk_share = true;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(
@@ -928,6 +963,13 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+ pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index e2994d337044..1f7f25013217 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
struct dce110_compressor *cp110,
bool enabled)
{
- uint8_t counter = 0;
+ uint32_t counter = 0;
uint32_t addr = mmFBC_STATUS;
uint32_t value;
@@ -158,7 +158,7 @@ static void wait_for_fbc_state_changed(
counter++;
}
- if (counter == 10) {
+ if (counter == 1000) {
DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied",
__func__);
} else {
@@ -551,9 +551,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
compressor->base.lpt_channels_num = 0;
compressor->base.attached_inst = 0;
compressor->base.is_enabled = false;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
compressor->base.funcs = &dce110_compressor_funcs;
-#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index c29052b6da5a..1d98e3678b04 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -34,9 +34,7 @@
#include "dce/dce_hwseq.h"
#include "gpio_service_interface.h"
-#if defined(CONFIG_DRM_AMD_DC_FBC)
#include "dce110_compressor.h"
-#endif
#include "bios/bios_parser_helper.h"
#include "timing_generator.h"
@@ -667,16 +665,25 @@ static enum dc_status bios_parser_crtc_source_select(
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
{
+ bool is_hdmi;
+ bool is_dp;
+
ASSERT(pipe_ctx->stream);
if (pipe_ctx->stream_res.stream_enc == NULL)
return; /* this is not root pipe */
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
+ is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
+
+ if (!is_hdmi && !is_dp)
+ return;
+
+ if (is_hdmi)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ else
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -857,17 +864,22 @@ void hwss_edp_power_control(
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
unsigned long long duration_in_ms =
- dm_get_elapse_time_in_ns(
+ div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
+ link->link_trace.time_stamp.edp_poweroff), 1000000);
unsigned long long wait_time_ms = 0;
/* max 500ms from LCDVDD off to on */
+ unsigned long long edp_poweroff_time_ms = 500;
+
+ if (link->local_sink != NULL)
+ edp_poweroff_time_ms =
+ 500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
if (link->link_trace.time_stamp.edp_poweroff == 0)
- wait_time_ms = 500;
- else if (duration_in_ms < 500)
- wait_time_ms = 500 - duration_in_ms;
+ wait_time_ms = edp_poweroff_time_ms;
+ else if (duration_in_ms < edp_poweroff_time_ms)
+ wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
if (wait_time_ms) {
msleep(wait_time_ms);
@@ -972,19 +984,35 @@ void hwss_edp_backlight_control(
edp_receiver_ready_T9(link);
}
-void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->sink->link;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ /* notify audio driver for audio modes of monitor */
+ struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+ unsigned int i, num_audio = 1;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
- pipe_ctx->stream_res.stream_enc);
+ if (pipe_ctx->stream_res.audio) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ /*current_state not updated yet*/
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+ num_audio++;
+ }
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
- pipe_ctx->stream_res.stream_enc);
+ pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+
+ if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ /* un-mute audio */
+ /* TODO: audio should be per stream rather than per link */
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, false);
+ }
+}
+
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
@@ -1015,7 +1043,23 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
* stream->stream_engine_id);
*/
}
+}
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ dc->hwss.disable_audio_stream(pipe_ctx, option);
link->link_enc->funcs->connect_dig_be_to_fe(
link->link_enc,
@@ -1206,13 +1250,13 @@ static void program_scaler(const struct dc *dc,
{
struct tg_color color = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
/* TOFPGA */
if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
return;
#endif
- if (dc->debug.surface_visual_confirm)
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
get_surface_visual_confirm_color(pipe_ctx, &color);
else
color_space_to_black_color(dc,
@@ -1298,6 +1342,30 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
pipe_ctx[pipe_ctx->pipe_idx];
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info,
+ &audio_output.crtc_info);
+
+ pipe_ctx->stream_res.audio->funcs->az_configure(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &pipe_ctx->stream->audio_info);
+ }
+
/* */
dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
@@ -1412,7 +1480,7 @@ static void power_down_controllers(struct dc *dc)
{
int i;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
dc->res_pool->timing_generators[i]->funcs->disable_crtc(
dc->res_pool->timing_generators[i]);
}
@@ -1441,10 +1509,8 @@ static void power_down_all_hw_blocks(struct dc *dc)
power_down_clock_sources(dc);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
if (dc->fbc_compressor)
dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
}
static void disable_vga_and_power_gate_all_controllers(
@@ -1454,12 +1520,13 @@ static void disable_vga_and_power_gate_all_controllers(
struct timing_generator *tg;
struct dc_context *ctx = dc->ctx;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
tg = dc->res_pool->timing_generators[i];
if (tg->funcs->disable_vga)
tg->funcs->disable_vga(tg);
-
+ }
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
/* Enable CLOCK gating for each pipe BEFORE controller
* powergating. */
enable_display_pipe_clock_gating(ctx,
@@ -1602,7 +1669,7 @@ static void dce110_set_displaymarks(
}
}
-static void set_safe_displaymarks(
+void dce110_set_safe_displaymarks(
struct resource_context *res_ctx,
const struct resource_pool *pool)
{
@@ -1686,9 +1753,7 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
if (events->force_trigger)
value |= 0x1;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
value |= 0x84;
-#endif
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
@@ -1696,23 +1761,15 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
}
/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet.
- * TODO: after mode set, pre_mode_set = false,
- * may read PLL register to get pixel clock
+ * may not be programmed yet
*/
static uint32_t get_max_pixel_clock_for_all_paths(
struct dc *dc,
- struct dc_state *context,
- bool pre_mode_set)
+ struct dc_state *context)
{
uint32_t max_pix_clk = 0;
int i;
- if (!pre_mode_set) {
- /* TODO: read ASIC register to get pixel clock */
- ASSERT(0);
- }
-
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -1728,97 +1785,10 @@ static uint32_t get_max_pixel_clock_for_all_paths(
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
}
- if (max_pix_clk == 0)
- ASSERT(0);
-
return max_pix_clk;
}
/*
- * Find clock state based on clock requested. if clock value is 0, simply
- * set clock state as requested without finding clock state by clock value
- */
-
-static void apply_min_clocks(
- struct dc *dc,
- struct dc_state *context,
- enum dm_pp_clocks_state *clocks_state,
- bool pre_mode_set)
-{
- struct state_dependent_clocks req_clocks = {0};
-
- if (!pre_mode_set) {
- /* set clock_state without verification */
- if (context->dis_clk->funcs->set_min_clocks_state) {
- context->dis_clk->funcs->set_min_clocks_state(
- context->dis_clk, *clocks_state);
- return;
- }
-
- /* TODO: This is incorrect. Figure out how to fix. */
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
- context->dis_clk->cur_clocks_value.dispclk_in_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_PIXELCLK,
- context->dis_clk->cur_clocks_value.max_pixelclk_in_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
- context->dis_clk->cur_clocks_value.max_non_dp_phyclk_in_khz,
- pre_mode_set,
- false);
- return;
- }
-
- /* get the required state based on state dependent clocks:
- * display clock and pixel clock
- */
- req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
-
- req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
- dc, context, true);
-
- if (context->dis_clk->funcs->get_required_clocks_state) {
- *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
- context->dis_clk, &req_clocks);
- context->dis_clk->funcs->set_min_clocks_state(
- context->dis_clk, *clocks_state);
- } else {
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
- req_clocks.display_clk_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_PIXELCLK,
- req_clocks.pixel_clk_khz,
- pre_mode_set,
- false);
-
- context->dis_clk->funcs->apply_clock_voltage_request(
- context->dis_clk,
- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
- req_clocks.pixel_clk_khz,
- pre_mode_set,
- false);
- }
-}
-
-#if defined(CONFIG_DRM_AMD_DC_FBC)
-
-/*
* Check if FBC can be enabled
*/
static bool should_enable_fbc(struct dc *dc,
@@ -1896,7 +1866,6 @@ static void enable_fbc(struct dc *dc,
compr->funcs->enable_fbc(compr, &params);
}
}
-#endif
static void dce110_reset_hw_ctx_wrap(
struct dc *dc,
@@ -1949,97 +1918,12 @@ static void dce110_reset_hw_ctx_wrap(
}
}
-
-enum dc_status dce110_apply_ctx_to_hw(
+static void dce110_setup_audio_dto(
struct dc *dc,
struct dc_state *context)
{
- struct dc_bios *dcb = dc->ctx->dc_bios;
- enum dc_status status;
int i;
- enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
-
- /* Reset old context */
- /* look up the targets that have been removed since last commit */
- dc->hwss.reset_hw_ctx_wrap(dc, context);
-
- /* Skip applying if no targets */
- if (context->stream_count <= 0)
- return DC_OK;
-
- /* Apply new context */
- dcb->funcs->set_scratch_critical_state(dcb, true);
- /* below is for real asic only */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx_old =
- &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream == pipe_ctx_old->stream) {
- if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
- dce_crtc_switch_to_clk_src(dc->hwseq,
- pipe_ctx->clock_source, i);
- continue;
- }
-
- dc->hwss.enable_display_power_gating(
- dc, i, dc->ctx->dc_bios,
- PIPE_GATING_CONTROL_DISABLE);
- }
-
- set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
-#if defined(CONFIG_DRM_AMD_DC_FBC)
- if (dc->fbc_compressor)
- dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
- /*TODO: when pplib works*/
- apply_min_clocks(dc, context, &clocks_state, true);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
- if (context->bw.dcn.calc_clk.fclk_khz
- > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
- struct dm_pp_clock_for_voltage_req clock;
-
- clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
- clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
- dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
- context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
- }
- if (context->bw.dcn.calc_clk.dcfclk_khz
- > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
- struct dm_pp_clock_for_voltage_req clock;
-
- clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
- dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
- context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
- }
- if (context->bw.dcn.calc_clk.dispclk_khz
- > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dcn.calc_clk.dispclk_khz);
- dc->current_state->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- context->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- }
- } else
-#endif
- if (context->bw.dce.dispclk_khz
- > dc->current_state->bw.dce.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dce.dispclk_khz * 115 / 100);
- }
/* program audio wall clock. use HDMI as clock source if HDMI
* audio active. Otherwise, use DP as clock source
* first, loop to find any HDMI audio, if not, loop find DP audio
@@ -2113,6 +1997,52 @@ enum dc_status dce110_apply_ctx_to_hw(
}
}
}
+}
+
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status status;
+ int i;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+ dc->hwss.reset_hw_ctx_wrap(dc, context);
+
+ /* Skip applying if no targets */
+ if (context->stream_count <= 0)
+ return DC_OK;
+
+ /* Apply new context */
+ dcb->funcs->set_scratch_critical_state(dcb, true);
+
+ /* below is for real asic only */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream) {
+ if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
+ dce_crtc_switch_to_clk_src(dc->hwseq,
+ pipe_ctx->clock_source, i);
+ continue;
+ }
+
+ dc->hwss.enable_display_power_gating(
+ dc, i, dc->ctx->dc_bios,
+ PIPE_GATING_CONTROL_DISABLE);
+ }
+
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
+ dce110_setup_audio_dto(dc, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx_old =
@@ -2131,31 +2061,6 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->top_pipe)
continue;
- if (context->res_ctx.pipe_ctx[i].stream_res.audio != NULL) {
-
- struct audio_output audio_output;
-
- build_audio_output(context, pipe_ctx, &audio_output);
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.audio->inst,
- &pipe_ctx->stream->audio_info);
- else
- pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.audio->inst,
- &pipe_ctx->stream->audio_info,
- &audio_output.crtc_info);
-
- pipe_ctx->stream_res.audio->funcs->az_configure(
- pipe_ctx->stream_res.audio,
- pipe_ctx->stream->signal,
- &audio_output.crtc_info,
- &pipe_ctx->stream->audio_info);
- }
-
status = apply_single_controller_ctx_to_hw(
pipe_ctx,
context,
@@ -2165,17 +2070,11 @@ enum dc_status dce110_apply_ctx_to_hw(
return status;
}
- /* to save power */
- apply_min_clocks(dc, context, &clocks_state, false);
-
dcb->funcs->set_scratch_critical_state(dcb, false);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
if (dc->fbc_compressor)
enable_fbc(dc, context);
-#endif
-
return DC_OK;
}
@@ -2490,10 +2389,9 @@ static void init_hw(struct dc *dc)
abm->funcs->init_backlight(abm);
abm->funcs->abm_init(abm);
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
if (dc->fbc_compressor)
dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
-#endif
}
@@ -2632,7 +2530,7 @@ static void pplib_apply_display_requirements(
/* TODO: dce11.2*/
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
- pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
@@ -2654,20 +2552,25 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
-static void dce110_set_bandwidth(
+void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
- dce110_set_displaymarks(dc, context);
+ struct dc_clocks req_clks;
- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dce.dispclk_khz * 115 / 100);
- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
- }
+ req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+
+ if (decrease_allowed)
+ dce110_set_displaymarks(dc, context);
+ else
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &req_clks,
+ decrease_allowed);
pplib_apply_display_requirements(dc, context);
}
@@ -2679,9 +2582,7 @@ static void dce110_program_front_end_for_pipe(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
-#endif
unsigned int i;
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2722,7 +2623,6 @@ static void dce110_program_front_end_for_pipe(
program_scaler(dc, pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_FBC)
/* fbc not applicable on Underlay pipe */
if (dc->fbc_compressor && old_pipe->stream &&
pipe_ctx->pipe_idx != underlay_idx) {
@@ -2731,7 +2631,6 @@ static void dce110_program_front_end_for_pipe(
else
enable_fbc(dc, dc->current_state);
}
-#endif
mi->funcs->mem_input_program_surface_config(
mi,
@@ -2907,9 +2806,11 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ .viewport = pipe_ctx->plane_res.scl_data.viewport,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+ .rotation = pipe_ctx->plane_state->rotation,
+ .mirror = pipe_ctx->plane_state->horizontal_mirror
};
if (pipe_ctx->plane_state->address.type
@@ -2968,6 +2869,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_stream = dce110_disable_stream,
.unblank_stream = dce110_unblank_stream,
.blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
.enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
.enable_display_power_gating = dce110_enable_display_power_gating,
.disable_plane = dce110_power_down_fe,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 5d7e9f516827..e4c5db75c4c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -49,6 +49,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
+
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option);
+
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
@@ -56,10 +60,19 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
void dce110_power_down(struct dc *dc);
+void dce110_set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
+void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 0564c8e31252..9b9fc3d96c07 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -1011,7 +1011,7 @@ void dce110_free_mem_input_v(
{
}
-static struct mem_input_funcs dce110_mem_input_v_funcs = {
+static const struct mem_input_funcs dce110_mem_input_v_funcs = {
.mem_input_program_display_marks =
dce_mem_input_v_program_display_marks,
.mem_input_program_chroma_display_marks =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index ee33786bdef6..e5e9e92521e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -49,14 +49,14 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#define DC_LOGGER \
dc->ctx->logger
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
#include "dce110/dce110_compressor.h"
-#endif
#include "reg_helper.h"
@@ -147,15 +147,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -307,6 +307,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -589,6 +604,23 @@ static struct output_pixel_processor *dce110_opp_create(
return &opp->base;
}
+struct aux_engine *dce110_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce110_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -652,6 +684,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -680,8 +716,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -795,43 +831,38 @@ static bool dce110_validate_bandwidth(
if (memcmp(&dc->current_state->bw.dce,
&context->bw.dce, sizeof(context->bw.dce))) {
- struct log_entry log_entry;
- dm_logger_open(
- dc->ctx->logger,
- &log_entry,
- LOG_BANDWIDTH_CALCS);
- dm_logger_append(&log_entry, "%s: finish,\n"
+
+ DC_LOG_BANDWIDTH_CALCS(
+ "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n"
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
+ ,
__func__,
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw.dce.urgent_wm_ns[0].b_mark,
context->bw.dce.urgent_wm_ns[0].a_mark,
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw.dce.urgent_wm_ns[1].b_mark,
context->bw.dce.urgent_wm_ns[1].a_mark,
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw.dce.urgent_wm_ns[2].b_mark,
context->bw.dce.urgent_wm_ns[2].a_mark,
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable);
- dm_logger_append(&log_entry,
- "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
- "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.stutter_mode_enable,
context->bw.dce.cpuc_state_change_enable,
context->bw.dce.cpup_state_change_enable,
context->bw.dce.nbp_state_change_enable,
@@ -841,7 +872,6 @@ static bool dce110_validate_bandwidth(
context->bw.dce.sclk_deep_sleep_khz,
context->bw.dce.yclk_khz,
context->bw.dce.blackout_recovery_time_us);
- dm_logger_close(&log_entry);
}
return result;
}
@@ -1180,11 +1210,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce110_disp_clk_create(ctx,
+ pool->base.dccg = dce110_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1214,7 +1244,7 @@ static bool construct(
* max_clock_state
*/
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1265,14 +1295,18 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+
+ pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
-#if defined(CONFIG_DRM_AMD_DC_FBC)
dc->fbc_compressor = dce110_compressor_create(ctx);
-
-
-#endif
if (!underlay_create(ctx, &pool->base))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index a7dce060204f..aa8d6b10d2c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -235,7 +235,7 @@ static void program_overscan(
int overscan_right = data->h_active - data->recout.x - data->recout.width;
int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
- if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
overscan_bottom += 2;
overscan_right += 2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 00c0a1ef15eb..288129343c77 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -49,6 +49,7 @@
#include "dce112/dce112_hw_sequencer.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "reg_helper.h"
@@ -146,15 +147,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -314,6 +315,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -588,6 +604,23 @@ struct output_pixel_processor *dce112_opp_create(
return &opp->base;
}
+struct aux_engine *dce112_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce112_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -625,6 +658,9 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.opps[i] != NULL)
dce110_opp_destroy(&pool->base.opps[i]);
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
if (pool->base.transforms[i] != NULL)
dce112_transform_destroy(&pool->base.transforms[i]);
@@ -640,6 +676,7 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -668,8 +705,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -744,43 +781,38 @@ bool dce112_validate_bandwidth(
if (memcmp(&dc->current_state->bw.dce,
&context->bw.dce, sizeof(context->bw.dce))) {
- struct log_entry log_entry;
- dm_logger_open(
- dc->ctx->logger,
- &log_entry,
- LOG_BANDWIDTH_CALCS);
- dm_logger_append(&log_entry, "%s: finish,\n"
+
+ DC_LOG_BANDWIDTH_CALCS(
+ "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n"
"nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ "stutMark_b: %d stutMark_a: %d\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
+ ,
__func__,
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw.dce.urgent_wm_ns[0].b_mark,
context->bw.dce.urgent_wm_ns[0].a_mark,
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw.dce.urgent_wm_ns[1].b_mark,
context->bw.dce.urgent_wm_ns[1].a_mark,
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark);
- dm_logger_append(&log_entry,
- "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
- "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw.dce.urgent_wm_ns[2].b_mark,
context->bw.dce.urgent_wm_ns[2].a_mark,
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable);
- dm_logger_append(&log_entry,
- "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
- "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.stutter_mode_enable,
context->bw.dce.cpuc_state_change_enable,
context->bw.dce.cpup_state_change_enable,
context->bw.dce.nbp_state_change_enable,
@@ -790,7 +822,6 @@ bool dce112_validate_bandwidth(
context->bw.dce.sclk_deep_sleep_khz,
context->bw.dce.yclk_khz,
context->bw.dce.blackout_recovery_time_us);
- dm_logger_close(&log_entry);
}
return result;
}
@@ -1000,7 +1031,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1010,7 +1041,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1020,7 +1051,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -1030,7 +1061,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -1124,11 +1155,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce112_disp_clk_create(ctx,
+ pool->base.dccg = dce112_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1158,7 +1189,7 @@ static bool construct(
* max_clock_state
*/
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1214,6 +1245,13 @@ static bool construct(
"DC:failed to create output pixel processor!\n");
goto res_create_fail;
}
+ pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index e96ff86d2fc3..5853522a6182 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,7 +244,16 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false;
}
+static void dce120_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ if (context->stream_count <= 0)
+ return;
+ dce110_set_bandwidth(dc, context, decrease_allowed);
+}
void dce120_hw_sequencer_construct(struct dc *dc)
{
@@ -254,5 +263,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
+ dc->hwss.set_bandwidth = dce120_set_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 2d58daccc005..d43f37d99c7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -53,6 +53,7 @@
#include "dce/dce_hwseq.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
@@ -297,6 +298,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
};
+ #define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
#define audio_regs(id)\
[id] = {\
@@ -361,6 +376,22 @@ struct output_pixel_processor *dce120_opp_create(
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
+struct aux_engine *dce120_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
static const struct bios_registers bios_regs = {
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
@@ -373,7 +404,7 @@ static const struct resource_caps res_cap = {
.num_pll = 6,
};
-static const struct dc_debug debug_defaults = {
+static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
@@ -467,6 +498,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.audio_count; i++) {
@@ -494,8 +529,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
}
static void read_dce_straps(
@@ -775,7 +810,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -785,7 +820,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
mem_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -795,7 +830,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[0].clocks_in_khz;
clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
- clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -805,7 +840,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
/* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
- clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
/* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -848,6 +883,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
+ dc->caps.psp_setup_panel_mode = true;
dc->debug = debug_defaults;
@@ -894,11 +930,11 @@ static bool construct(
}
}
- pool->base.display_clock = dce120_disp_clk_create(ctx);
- if (pool->base.display_clock == NULL) {
+ pool->base.dccg = dce120_dccg_create(ctx);
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
- goto disp_clk_create_fail;
+ goto dccg_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
@@ -984,6 +1020,13 @@ static bool construct(
dm_error(
"DC: failed to create output pixel processor!\n");
}
+ pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
/* check next valid pipe */
j++;
@@ -1011,7 +1054,7 @@ static bool construct(
irqs_create_fail:
controller_create_fail:
-disp_clk_create_fail:
+dccg_create_fail:
clk_src_create_fail:
res_create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 48a068964722..604c62969ead 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -54,6 +54,7 @@
#include "reg_helper.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
/* TODO remove this include */
@@ -153,15 +154,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -298,6 +299,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -448,6 +464,23 @@ static struct output_pixel_processor *dce80_opp_create(
return &opp->base;
}
+struct aux_engine *dce80_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
static struct stream_encoder *dce80_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
@@ -655,6 +688,9 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -683,8 +719,8 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -822,11 +858,11 @@ static bool dce80_construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -852,7 +888,7 @@ static bool dce80_construct(
goto res_create_fail;
}
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -899,9 +935,18 @@ static bool dce80_construct(
dm_error("DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+
+ pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@@ -1006,11 +1051,11 @@ static bool dce81_construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1037,7 +1082,7 @@ static bool dce81_construct(
}
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1087,6 +1132,7 @@ static bool dce81_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@@ -1187,11 +1233,11 @@ static bool dce83_construct(
}
}
- pool->base.display_clock = dce_disp_clk_create(ctx,
+ pool->base.dccg = dce_dccg_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.display_clock == NULL) {
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1218,7 +1264,7 @@ static bool dce83_construct(
}
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.display_clock->max_clks_state =
+ pool->base.dccg->max_clks_state =
static_clk_info.max_clocks_state;
{
@@ -1268,6 +1314,7 @@ static bool dce83_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index c69fa4bfab0a..bf8b68f8db4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -145,10 +145,10 @@ static bool dpp_get_optimal_number_of_taps(
pixel_width = scl_data->viewport.width;
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
+ if (scl_data->format == PIXEL_FORMAT_FP16 &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
+ scl_data->ratios.horz.value != dc_fixpt_one.value &&
+ scl_data->ratios.vert.value != dc_fixpt_one.value)
return false;
if (scl_data->viewport.width > scl_data->h_active &&
@@ -445,10 +445,10 @@ void dpp1_set_cursor_position(
uint32_t width)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
uint32_t cur_en = pos->enable ? 1 : 0;
- if (src_x_offset >= (int)param->viewport_width)
+ if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
if (src_x_offset + (int)width <= 0)
@@ -459,6 +459,18 @@ void dpp1_set_cursor_position(
}
+void dpp1_cnv_set_optional_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dpp_cursor_attributes *attr)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (attr) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ }
+}
+
void dpp1_dppclk_control(
struct dpp *dpp_base,
bool dppclk_div,
@@ -499,6 +511,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_full_bypass = dpp1_full_bypass,
.set_cursor_attributes = dpp1_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index e862cafa6501..e2889e61b18c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -119,6 +119,7 @@
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
+ SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
SRI(DPP_CONTROL, DPP_TOP, id), \
SRI(CM_HDR_MULT_COEF, CM, id)
@@ -324,6 +325,8 @@
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \
TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
@@ -1076,7 +1079,9 @@
type CUR0_COLOR1; \
type DPPCLK_RATE_CONTROL; \
type DPP_CLOCK_ENABLE; \
- type CM_HDR_MULT_COEF;
+ type CM_HDR_MULT_COEF; \
+ type CUR0_FP_BIAS; \
+ type CUR0_FP_SCALE;
struct dcn_dpp_shift {
TF_REG_FIELD_LIST(uint8_t)
@@ -1329,7 +1334,8 @@ struct dcn_dpp_mask {
uint32_t CURSOR0_COLOR0; \
uint32_t CURSOR0_COLOR1; \
uint32_t DPP_CONTROL; \
- uint32_t CM_HDR_MULT_COEF;
+ uint32_t CM_HDR_MULT_COEF; \
+ uint32_t CURSOR0_FP_SCALE_BIAS;
struct dcn_dpp_registers {
DPP_COMMON_REG_VARIABLE_LIST
@@ -1370,6 +1376,10 @@ void dpp1_set_cursor_position(
const struct dc_cursor_mi_param *param,
uint32_t width);
+void dpp1_cnv_set_optional_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dpp_cursor_attributes *attr);
+
bool dpp1_dscl_is_lb_conf_valid(
int ceil_vratio,
int num_partitions,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index f862fd148cca..4a863a5dab41 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -621,6 +621,10 @@ static void dpp1_dscl_set_manual_ratio_init(
static void dpp1_dscl_set_recout(
struct dcn10_dpp *dpp, const struct rect *recout)
{
+ int visual_confirm_on = 0;
+ if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
+ visual_confirm_on = 1;
+
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT */
RECOUT_START_X, recout->x,
@@ -632,8 +636,7 @@ static void dpp1_dscl_set_recout(
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
RECOUT_HEIGHT, recout->height
- - dpp->base.ctx->dc->debug.surface_visual_confirm * 4 *
- (dpp->base.inst + 1));
+ - visual_confirm_on * 4 * (dpp->base.inst + 1));
}
/* Main function to program scaler and line buffer in manual scaling mode */
@@ -655,6 +658,12 @@ void dpp1_dscl_set_scaler_manual_scale(
dpp->scl_data = *scl_data;
+ /* Autocal off */
+ REG_SET_3(DSCL_AUTOCAL, 0,
+ AUTOCAL_MODE, AUTOCAL_MODE_OFF,
+ AUTOCAL_NUM_PIPE, 0,
+ AUTOCAL_PIPE_ID, 0);
+
/* Recout */
dpp1_dscl_set_recout(dpp, &scl_data->recout);
@@ -678,12 +687,6 @@ void dpp1_dscl_set_scaler_manual_scale(
if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
return;
- /* Autocal off */
- REG_SET_3(DSCL_AUTOCAL, 0,
- AUTOCAL_MODE, AUTOCAL_MODE_OFF,
- AUTOCAL_NUM_PIPE, 0,
- AUTOCAL_PIPE_ID, 0);
-
/* Black offsets */
if (ycbcr)
REG_SET_2(SCL_BLACK_OFFSET, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 943143efbb82..1ea91e153d3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -190,10 +190,17 @@ static uint32_t convert_and_clamp(
}
+void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
+{
+ REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
+}
+
void hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
- unsigned int refclk_mhz)
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
{
uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0;
/*
@@ -202,191 +209,259 @@ void hubbub1_program_watermarks(
*/
uint32_t prog_wm_value;
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
- prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
-
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.urgent_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->a.urgent_ns > hubbub->watermarks.a.urgent_ns) {
+ hubbub->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
- prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.urgent_ns, prog_wm_value);
+ }
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub->watermarks.a.pte_meta_urgent_ns) {
+ hubbub->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+ }
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.a.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.a.cstate_pstate.cstate_exit_ns =
+ watermarks->a.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
+ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.a.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.a.cstate_pstate.pstate_change_ns =
+ watermarks->a.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_exit_ns,
+ watermarks->a.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
}
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
-
-
/* clock state B */
- prog_wm_value = convert_and_clamp(
- watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.urgent_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->b.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->b.urgent_ns > hubbub->watermarks.b.urgent_ns) {
+ hubbub->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.urgent_ns, prog_wm_value);
+ }
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub->watermarks.b.pte_meta_urgent_ns) {
+ hubbub->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+ }
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.b.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.b.cstate_pstate.cstate_exit_ns =
+ watermarks->b.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
+ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.b.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.b.cstate_pstate.pstate_change_ns =
+ watermarks->b.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_exit_ns,
+ watermarks->b.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
}
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
-
/* clock state C */
- prog_wm_value = convert_and_clamp(
- watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.urgent_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->c.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->c.urgent_ns > hubbub->watermarks.c.urgent_ns) {
+ hubbub->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.urgent_ns, prog_wm_value);
+ }
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub->watermarks.c.pte_meta_urgent_ns) {
+ hubbub->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+ }
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.c.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.c.cstate_pstate.cstate_exit_ns =
+ watermarks->c.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
+ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.c.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.c.cstate_pstate.pstate_change_ns =
+ watermarks->c.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_exit_ns,
+ watermarks->c.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
}
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
-
/* clock state D */
- prog_wm_value = convert_and_clamp(
- watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.urgent_ns, prog_wm_value);
-
- prog_wm_value = convert_and_clamp(
- watermarks->d.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.pte_meta_urgent_ns, prog_wm_value);
-
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ if (safe_to_lower || watermarks->d.urgent_ns > hubbub->watermarks.d.urgent_ns) {
+ hubbub->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.urgent_ns, prog_wm_value);
+ }
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_exit_ns,
+ if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub->watermarks.d.pte_meta_urgent_ns) {
+ hubbub->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ watermarks->d.pte_meta_urgent_ns, prog_wm_value);
}
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
+ > hubbub->watermarks.d.cstate_pstate.cstate_exit_ns) {
+ hubbub->watermarks.d.cstate_pstate.cstate_exit_ns =
+ watermarks->d.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+ }
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
+ > hubbub->watermarks.d.cstate_pstate.pstate_change_ns) {
+ hubbub->watermarks.d.cstate_pstate.pstate_change_ns =
+ watermarks->d.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -408,6 +483,11 @@ void hubbub1_update_dchub(
struct hubbub *hubbub,
struct dchub_init_data *dh_data)
{
+ if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
+ ASSERT(false);
+ /*should not come here*/
+ return;
+ }
/* TODO: port code from dal2 */
switch (dh_data->fb_mode) {
case FRAME_BUFFER_MODE_ZFB_ONLY:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 6315a0e6b0d6..d6e596eef4c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -185,6 +185,7 @@ struct hubbub {
const struct dcn_hubbub_shift *shifts;
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
+ struct dcn_watermark_set watermarks;
};
void hubbub1_update_dchub(
@@ -194,10 +195,13 @@ void hubbub1_update_dchub(
bool hubbub1_verify_allow_pstate_change_high(
struct hubbub *hubbub);
+void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
+
void hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
- unsigned int refclk_mhz);
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
void hubbub1_toggle_watermark_change_req(
struct hubbub *hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index c28085be39ff..2138cd3c5d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -152,21 +152,19 @@ void hubp1_program_tiling(
PIPE_ALIGNED, info->gfx9.pipe_aligned);
}
-void hubp1_program_size_and_rotation(
+void hubp1_program_size(
struct hubp *hubp,
- enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
- struct dc_plane_dcc_param *dcc,
- bool horizontal_mirror)
+ struct dc_plane_dcc_param *dcc)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
+ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
/* Program data and meta surface pitch (calculation from addrlib)
* 444 or 420 luma
*/
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) {
ASSERT(plane_size->video.chroma_pitch != 0);
/* Chroma pitch zero can cause system hang! */
@@ -192,13 +190,22 @@ void hubp1_program_size_and_rotation(
if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+}
+
+void hubp1_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t mirror;
+
if (horizontal_mirror)
mirror = 1;
else
mirror = 0;
-
/* Program rotation angle and horz mirror - no mirror */
if (rotation == ROTATION_ANGLE_0)
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
@@ -287,6 +294,10 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 66);
break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 12);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
@@ -450,9 +461,6 @@ bool hubp1_program_surface_flip_and_addr(
hubp->request_address = *address;
- if (flip_immediate)
- hubp->current_address = *address;
-
return true;
}
@@ -481,8 +489,8 @@ void hubp1_program_surface_config(
{
hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
hubp1_program_tiling(hubp, tiling_info, format);
- hubp1_program_size_and_rotation(
- hubp, rotation, format, plane_size, dcc, horizontal_mirror);
+ hubp1_program_size(hubp, format, plane_size, dcc);
+ hubp1_program_rotation(hubp, rotation, horizontal_mirror);
hubp1_program_pixel_format(hubp, format);
}
@@ -688,7 +696,6 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
return true;
- hubp->current_address = hubp->request_address;
return false;
}
@@ -1061,9 +1068,11 @@ void hubp1_cursor_set_position(
const struct dc_cursor_mi_param *param)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
+ int x_hotspot = pos->x_hotspot;
+ int y_hotspot = pos->y_hotspot;
+ uint32_t dst_x_offset;
uint32_t cur_en = pos->enable ? 1 : 0;
- uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
/*
* Guard aganst cursor_set_position() from being called with invalid
@@ -1075,6 +1084,18 @@ void hubp1_cursor_set_position(
if (hubp->curs_attr.address.quad_part == 0)
return;
+ if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
+ src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
+ y_hotspot = pos->x_hotspot;
+ x_hotspot = pos->y_hotspot;
+ }
+
+ if (param->mirror) {
+ x_hotspot = param->viewport.width - x_hotspot;
+ src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ }
+
+ dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
dst_x_offset *= param->ref_clk_khz;
dst_x_offset /= param->pixel_clk_khz;
@@ -1085,7 +1106,7 @@ void hubp1_cursor_set_position(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
- if (src_x_offset >= (int)param->viewport_width)
+ if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
if (src_x_offset + (int)hubp->curs_attr.width <= 0)
@@ -1102,8 +1123,8 @@ void hubp1_cursor_set_position(
CURSOR_Y_POSITION, pos->y);
REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ CURSOR_HOT_SPOT_X, x_hotspot,
+ CURSOR_HOT_SPOT_Y, y_hotspot);
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
@@ -1125,7 +1146,7 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
-static struct hubp_funcs dcn10_hubp_funcs = {
+static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
.hubp_program_surface_config =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index d901d5092969..f689feace82d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -268,8 +268,6 @@
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
- HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
- HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
@@ -388,6 +386,8 @@
#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
HUBP_MASK_SH_LIST_DCN(mask_sh),\
HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
@@ -679,12 +679,15 @@ void hubp1_program_pixel_format(
struct hubp *hubp,
enum surface_pixel_format format);
-void hubp1_program_size_and_rotation(
+void hubp1_program_size(
struct hubp *hubp,
- enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
- struct dc_plane_dcc_param *dcc,
+ struct dc_plane_dcc_param *dcc);
+
+void hubp1_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
bool horizontal_mirror);
void hubp1_program_tiling(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index f8e0576af6e0..cfcc54f2ce65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -337,13 +337,13 @@ void dcn10_log_hw_state(struct dc *dc)
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
- dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
- dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.calc_clk.dispclk_khz,
- dc->current_state->bw.dcn.calc_clk.dppclk_khz,
- dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
- dc->current_state->bw.dcn.calc_clk.fclk_khz,
- dc->current_state->bw.dcn.calc_clk.socclk_khz);
+ dc->current_state->bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
+ dc->current_state->bw.dcn.clk.fclk_khz,
+ dc->current_state->bw.dcn.clk.socclk_khz);
log_mpc_crc(dc);
@@ -415,6 +415,8 @@ static void dpp_pg_control(
if (hws->ctx->dc->debug.disable_dpp_power_gate)
return;
+ if (REG(DOMAIN1_PG_CONFIG) == 0)
+ return;
switch (dpp_inst) {
case 0: /* DPP0 */
@@ -465,6 +467,8 @@ static void hubp_pg_control(
if (hws->ctx->dc->debug.disable_hubp_power_gate)
return;
+ if (REG(DOMAIN0_PG_CONFIG) == 0)
+ return;
switch (hubp_inst) {
case 0: /* DCHUBP0 */
@@ -719,19 +723,7 @@ static void reset_back_end_for_pipe(
if (!pipe_ctx->stream->dpms_off)
core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
else if (pipe_ctx->stream_res.audio) {
- /*
- * if stream is already disabled outside of commit streams path,
- * audio disable was skipped. Need to do it here
- */
- pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-
- if (dc->caps.dynamic_audio == true) {
- /*we have to dynamic arbitrate the audio endpoints*/
- pipe_ctx->stream_res.audio = NULL;
- /*we free the resource, need reset is_audio_acquired*/
- update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
- }
-
+ dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
}
}
@@ -842,7 +834,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
}
-static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{
static bool should_log_hw_state; /* prevent hw state log by default */
@@ -877,7 +869,8 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
return;
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
- opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ if (opp != NULL)
+ opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
@@ -1022,7 +1015,7 @@ static void dcn10_init_hw(struct dc *dc)
/* Reset all MPCC muxes */
dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = dc->res_pool->hubps[i];
@@ -1096,6 +1089,8 @@ static void dcn10_init_hw(struct dc *dc)
}
enable_power_gating_plane(dc->hwseq, true);
+
+ memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
}
static void reset_hw_ctx_wrap(
@@ -1164,12 +1159,19 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
if (plane_state == NULL)
return;
+
addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+
pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
pipe_ctx->plane_res.hubp,
&plane_state->address,
plane_state->flip_immediate);
+
plane_state->status.requested_address = plane_state->address;
+
+ if (plane_state->flip_immediate)
+ plane_state->status.current_address = plane_state->address;
+
if (addr_patched)
pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}
@@ -1213,8 +1215,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
} else if (tf->type == TF_TYPE_BYPASS) {
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
} else {
- /*TF_TYPE_DISTRIBUTED_POINTS*/
- result = false;
+ cm_helper_translate_curve_to_degamma_hw_format(tf,
+ &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
+ &dpp_base->degamma_params);
+ result = true;
}
return result;
@@ -1355,10 +1360,11 @@ static void dcn10_enable_per_frame_crtc_position_reset(
DC_SYNC_INFO("Setting up\n");
for (i = 0; i < group_size; i++)
- grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
- grouped_pipes[i]->stream_res.tg,
- grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
- &grouped_pipes[i]->stream->triggered_crtc_reset);
+ if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
+ grouped_pipes[i]->stream_res.tg,
+ grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+ &grouped_pipes[i]->stream->triggered_crtc_reset);
DC_SYNC_INFO("Waiting for trigger\n");
@@ -1774,6 +1780,43 @@ static void dcn10_get_surface_visual_confirm_color(
}
}
+static void dcn10_get_hdr_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ // Determine the overscan color based on the top-most (desktop) plane's context
+ struct pipe_ctx *top_pipe_ctx = pipe_ctx;
+
+ while (top_pipe_ctx->top_pipe != NULL)
+ top_pipe_ctx = top_pipe_ctx->top_pipe;
+
+ switch (top_pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB2101010:
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
+ /* HDR10, ARGB2101010 - set boarder color to red */
+ color->color_r_cr = color_value;
+ }
+ break;
+ case PIXEL_FORMAT_FP16:
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+ /* HDR10, FP16 - set boarder color to blue */
+ color->color_b_cb = color_value;
+ } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ /* FreeSync 2 HDR - set boarder color to green */
+ color->color_g_y = color_value;
+ }
+ break;
+ default:
+ /* SDR - set boarder color to Gray */
+ color->color_r_cr = color_value/2;
+ color->color_b_cb = color_value/2;
+ color->color_g_y = color_value/2;
+ break;
+ }
+}
+
static uint16_t fixed_point_to_int_frac(
struct fixed31_32 arg,
uint8_t integer_bits,
@@ -1854,11 +1897,10 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
-
-static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
int mpcc_id;
struct mpcc *new_mpcc;
@@ -1869,13 +1911,17 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
/* TODO: proper fix once fpga works */
- if (dc->debug.surface_visual_confirm)
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+ dcn10_get_hdr_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
dcn10_get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
- else
+ } else {
color_space_to_black_color(
- dc, pipe_ctx->stream->output_color_space,
- &blnd_cfg.black_color);
+ dc, pipe_ctx->stream->output_color_space,
+ &blnd_cfg.black_color);
+ }
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
@@ -1964,18 +2010,17 @@ static void update_dchubp_dpp(
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
- bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
- context->bw.dcn.cur_clk.dispclk_khz / 2;
+ bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+ dc->res_pool->dccg->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
dpp,
should_divided_by_2,
true);
- dc->current_state->bw.dcn.cur_clk.dppclk_khz =
- should_divided_by_2 ?
- context->bw.dcn.cur_clk.dispclk_khz / 2 :
- context->bw.dcn.cur_clk.dispclk_khz;
+ dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
+ dc->res_pool->dccg->clks.dispclk_khz / 2 :
+ dc->res_pool->dccg->clks.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2001,7 +2046,7 @@ static void update_dchubp_dpp(
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change)
- update_mpcc(dc, pipe_ctx);
+ dc->hwss.update_mpcc(dc, pipe_ctx);
if (plane_state->update_flags.bits.full_update ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
@@ -2063,12 +2108,13 @@ static void update_dchubp_dpp(
static void dcn10_blank_pixel_data(
struct dc *dc,
- struct stream_resource *stream_res,
- struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
bool blank)
{
enum dc_color_space color_space;
struct tg_color black_color = {0};
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+ struct dc_stream_state *stream = pipe_ctx->stream;
/* program otg blank color */
color_space = stream->output_color_space;
@@ -2110,6 +2156,33 @@ static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, hw_mult);
}
+void dcn10_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ dcn10_enable_plane(dc, pipe_ctx, context);
+
+ update_dchubp_dpp(dc, pipe_ctx, context);
+
+ set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->plane_state->update_flags.bits.full_update ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for full update.
+ * TODO: This can be further optimized/cleaned up
+ * Always call this for now since it does memcmp inside before
+ * doing heavy calculation and programming
+ */
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+}
+
static void program_all_pipe_in_tree(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -2127,31 +2200,12 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg);
- dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
- pipe_ctx->stream, blank);
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+
}
if (pipe_ctx->plane_state != NULL) {
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dcn10_enable_plane(dc, pipe_ctx, context);
-
- update_dchubp_dpp(dc, pipe_ctx, context);
-
- set_hdr_multiplier(pipe_ctx);
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
-
- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
- * only do gamma programming for full update.
- * TODO: This can be further optimized/cleaned up
- * Always call this for now since it does memcmp inside before
- * doing heavy calculation and programming
- */
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+ dcn10_program_pipe(dc, pipe_ctx, context);
}
if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) {
@@ -2165,12 +2219,12 @@ static void dcn10_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
- pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
- pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+ pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+ pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
+ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
@@ -2232,8 +2286,6 @@ static void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
bool removed_pipe[4] = { false };
- unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
- bool program_water_mark = false;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2247,7 +2299,7 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes == 0) {
/* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
+ dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
}
/* Disconnect unused mpcc */
@@ -2278,11 +2330,10 @@ static void dcn10_apply_ctx_for_surface(
old_pipe_ctx->plane_state &&
old_pipe_ctx->stream_res.tg == tg) {
- hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
+ dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
removed_pipe[i] = true;
- DC_LOG_DC(
- "Reset mpcc for pipe %d\n",
+ DC_LOG_DC("Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx);
}
}
@@ -2295,248 +2346,41 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == stream &&
- pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.full_update)
- program_water_mark = true;
-
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
if (removed_pipe[i])
- dcn10_disable_plane(dc, old_pipe_ctx);
- }
-
- if (program_water_mark) {
- if (dc->debug.sanity_checks) {
- /* pstate stuck check after watermark update */
- dcn10_verify_allow_pstate_change_high(dc);
- }
-
- /* watermark is for all pipes */
- hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks, ref_clk_mhz);
+ dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
- if (dc->debug.sanity_checks) {
- /* pstate stuck check after watermark update */
- dcn10_verify_allow_pstate_change_high(dc);
- }
- }
-/* DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
- "\n============== Watermark parameters ==============\n"
- "a.urgent_ns: %d \n"
- "a.cstate_enter_plus_exit: %d \n"
- "a.cstate_exit: %d \n"
- "a.pstate_change: %d \n"
- "a.pte_meta_urgent: %d \n"
- "b.urgent_ns: %d \n"
- "b.cstate_enter_plus_exit: %d \n"
- "b.cstate_exit: %d \n"
- "b.pstate_change: %d \n"
- "b.pte_meta_urgent: %d \n",
- context->bw.dcn.watermarks.a.urgent_ns,
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
- context->bw.dcn.watermarks.b.urgent_ns,
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.b.pte_meta_urgent_ns
- );
- DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
- "\nc.urgent_ns: %d \n"
- "c.cstate_enter_plus_exit: %d \n"
- "c.cstate_exit: %d \n"
- "c.pstate_change: %d \n"
- "c.pte_meta_urgent: %d \n"
- "d.urgent_ns: %d \n"
- "d.cstate_enter_plus_exit: %d \n"
- "d.cstate_exit: %d \n"
- "d.pstate_change: %d \n"
- "d.pte_meta_urgent: %d \n"
- "========================================================\n",
- context->bw.dcn.watermarks.c.urgent_ns,
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
- context->bw.dcn.watermarks.d.urgent_ns,
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
- context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
- context->bw.dcn.watermarks.d.pte_meta_urgent_ns
- );
-*/
-}
-
-static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
-{
- return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
-}
-
-static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
-{
- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.calc_clk.dppclk_khz;
- bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.cur_clk.dispclk_khz;
- int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
- bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
- context->bw.dcn.cur_clk.dppclk_khz;
-
- /* increase clock, looking for div is 0 for current, request div is 1*/
- if (dispclk_increase) {
- /* already divided by 2, no need to reach target clk with 2 steps*/
- if (cur_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* request disp clk is lower than maximum supported dpp clk,
- * no need to reach target clk with two steps.
- */
- if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* target dpp clk not request divided by 2, still within threshold */
- if (!request_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- } else {
- /* decrease clock, looking for current dppclk divided by 2,
- * request dppclk not divided by 2.
- */
-
- /* current dpp clk not divided by 2, no need to ramp*/
- if (!cur_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* current disp clk is lower than current maximum dpp clk,
- * no need to ramp
- */
- if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
- return context->bw.dcn.calc_clk.dispclk_khz;
-
- /* request dpp clk need to be divided by 2 */
- if (request_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
- }
-
- return disp_clk_threshold;
-}
-
-static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
-{
- int i;
- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.calc_clk.dppclk_khz;
-
- int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
-
- /* set disp clk to dpp clk threshold */
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- dispclk_to_dpp_threshold);
-
- /* update request dpp clk division option */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->plane_state)
- continue;
-
- pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
- pipe_ctx->plane_res.dpp,
- request_dpp_div,
- true);
- }
-
- /* If target clk not same as dppclk threshold, set to target clock */
- if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dcn.calc_clk.dispclk_khz);
- }
-
- context->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- context->bw.dcn.cur_clk.dppclk_khz =
- context->bw.dcn.calc_clk.dppclk_khz;
- context->bw.dcn.cur_clk.max_supported_dppclk_khz =
- context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+ if (dc->hwseq->wa.DEGVIDCN10_254)
+ hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
}
static void dcn10_set_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool decrease_allowed)
+ bool safe_to_lower)
{
- struct pp_smu_display_requirement_rv *smu_req_cur =
- &dc->res_pool->pp_smu_req;
- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-
- if (dc->debug.sanity_checks) {
+ if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
- }
-
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- return;
-
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.dcfclk_khz,
- dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
- context->bw.dcn.cur_clk.dcfclk_khz =
- context->bw.dcn.calc_clk.dcfclk_khz;
- smu_req.hard_min_dcefclk_khz =
- context->bw.dcn.calc_clk.dcfclk_khz;
- }
-
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
- context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
- }
-
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.fclk_khz,
- dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
- context->bw.dcn.cur_clk.fclk_khz =
- context->bw.dcn.calc_clk.fclk_khz;
- smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
- }
-
- smu_req.display_count = context->stream_count;
-
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- *smu_req_cur = smu_req;
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (context->stream_count == 0)
+ context->bw.dcn.clk.phyclk_khz = 0;
- /* make sure dcf clk is before dpp clk to
- * make sure we have enough voltage to run dpp clk
- */
- if (should_set_clock(
- decrease_allowed,
- context->bw.dcn.calc_clk.dispclk_khz,
- dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &context->bw.dcn.clk,
+ safe_to_lower);
- ramp_up_dispclk_with_dpp(dc, context);
+ dcn10_pplib_apply_display_requirements(dc, context);
}
- dcn10_pplib_apply_display_requirements(dc, context);
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+ &context->bw.dcn.watermarks,
+ dc->res_pool->ref_clock_inKhz / 1000,
+ true);
- if (dc->debug.sanity_checks) {
+ if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
- }
-
- /* need to fix this function. not doing the right thing here */
}
static void set_drr(struct pipe_ctx **pipe_ctx,
@@ -2701,16 +2545,20 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ bool flip_pending;
if (plane_state == NULL)
return;
- plane_state->status.is_flip_pending =
- pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
+ flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
pipe_ctx->plane_res.hubp);
- plane_state->status.current_address = pipe_ctx->plane_res.hubp->current_address;
- if (pipe_ctx->plane_res.hubp->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ plane_state->status.is_flip_pending = flip_pending;
+
+ if (!flip_pending)
+ plane_state->status.current_address = plane_state->status.requested_address;
+
+ if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
tg->funcs->is_stereo_left_eye) {
plane_state->status.is_right_eye =
!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
@@ -2719,8 +2567,14 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
{
- if (hws->ctx->dc->res_pool->hubbub != NULL)
- hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+ if (hws->ctx->dc->res_pool->hubbub != NULL) {
+ struct hubp *hubp = hws->ctx->dc->res_pool->hubps[0];
+
+ if (hubp->funcs->hubp_update_dchub)
+ hubp->funcs->hubp_update_dchub(hubp, dh_data);
+ else
+ hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+ }
}
static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
@@ -2731,9 +2585,11 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ .viewport = pipe_ctx->plane_res.scl_data.viewport,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+ .rotation = pipe_ctx->plane_state->rotation,
+ .mirror = pipe_ctx->plane_state->horizontal_mirror
};
if (pipe_ctx->plane_state->address.type
@@ -2757,6 +2613,33 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, attributes->color_format);
}
+static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
+ struct fixed31_32 multiplier;
+ struct dpp_cursor_attributes opt_attr = { 0 };
+ uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
+ return;
+
+ fmt.exponenta_bits = 5;
+ fmt.mantissa_bits = 10;
+ fmt.sign = true;
+
+ if (sdr_white_level > 80) {
+ multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
+ convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
+ }
+
+ opt_attr.scale = hw_scale;
+ opt_attr.bias = 0;
+
+ pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
+ pipe_ctx->plane_res.dpp, &opt_attr);
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_csc_matrix = program_csc_matrix,
@@ -2764,7 +2647,9 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
.update_plane_addr = dcn10_update_plane_addr,
+ .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
.update_dchub = dcn10_update_dchub,
+ .update_mpcc = dcn10_update_mpcc,
.update_pending_status = dcn10_update_pending_status,
.set_input_transfer_func = dcn10_set_input_transfer_func,
.set_output_transfer_func = dcn10_set_output_transfer_func,
@@ -2778,6 +2663,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_stream = dce110_disable_stream,
.unblank_stream = dce110_unblank_stream,
.blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.disable_plane = dcn10_disable_plane,
.blank_pixel_data = dcn10_blank_pixel_data,
@@ -2800,7 +2687,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.edp_power_control = hwss_edp_power_control,
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
- .set_cursor_attribute = dcn10_set_cursor_attribute
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 44f734b73f9e..7139fb73e966 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -39,4 +39,11 @@ bool is_rgb_cspace(enum dc_color_space output_color_space);
void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+
+void dcn10_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 21fa40ac0786..6f675206a136 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -65,11 +65,6 @@ enum {
DP_MST_UPDATE_MAX_RETRY = 50
};
-
-
-static void aux_initialize(struct dcn10_link_encoder *enc10);
-
-
static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
.validate_output_with_stream =
dcn10_link_encoder_validate_output_with_stream,
@@ -445,12 +440,11 @@ static uint8_t get_frontend_source(
}
}
-static void configure_encoder(
+void configure_encoder(
struct dcn10_link_encoder *enc10,
const struct dc_link_settings *link_settings)
{
/* set number of lanes */
-
REG_SET(DP_CONFIG, 0,
DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
@@ -602,6 +596,9 @@ static bool dcn10_link_encoder_validate_hdmi_output(
if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
adjusted_pix_clk_khz >= 300000)
return false;
+ if (enc10->base.ctx->dc->debug.hdmi20_disable &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
return true;
}
@@ -734,6 +731,9 @@ void dcn10_link_encoder_construct(
__func__,
result);
}
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
}
bool dcn10_link_encoder_validate_output_with_stream(
@@ -812,7 +812,7 @@ void dcn10_link_encoder_hw_init(
ASSERT(result == BP_RESULT_OK);
}
- aux_initialize(enc10);
+ dcn10_aux_initialize(enc10);
/* reinitialize HPD.
* hpd_initialize() will pass DIG_FE id to HW context.
@@ -995,6 +995,8 @@ void dcn10_link_encoder_disable_output(
if (!dcn10_is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+ /*in DP_Alt_No_Connect case, we turn off the dig already,
+ after excuation the PHY w/a sequence, not allow touch PHY any more*/
return;
}
/* Power-down RX and disable GPU PHY should be paired.
@@ -1347,8 +1349,7 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
FN(reg, f1), v1,\
FN(reg, f2), v2)
-static void aux_initialize(
- struct dcn10_link_encoder *enc10)
+void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
{
enum hpd_source_id hpd_source = enc10->base.hpd_source;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 2a97cdb2cfbb..49ead12b2532 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -42,6 +42,7 @@
#define LE_DCN_COMMON_REG_LIST(id) \
SRI(DIG_BE_CNTL, DIG, id), \
SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
SRI(DP_CONFIG, DP, id), \
SRI(DP_DPHY_CNTL, DP, id), \
SRI(DP_DPHY_PRBS_CNTL, DP, id), \
@@ -64,6 +65,7 @@
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
#define LE_DCN10_REG_LIST(id)\
LE_DCN_COMMON_REG_LIST(id)
@@ -100,6 +102,7 @@ struct dcn10_link_enc_registers {
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
+ uint32_t TMDS_CTL_BITS;
};
#define LE_SF(reg_name, field_name, post_fix)\
@@ -110,6 +113,7 @@ struct dcn10_link_enc_registers {
LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
+ LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
@@ -198,10 +202,11 @@ struct dcn10_link_enc_registers {
type DP_MSE_SAT_SLOT_COUNT3;\
type DP_MSE_SAT_UPDATE;\
type DP_MSE_16_MTP_KEEPOUT;\
+ type DC_HPD_EN;\
+ type TMDS_CTL0;\
type AUX_HPD_SEL;\
type AUX_LS_READ_EN;\
- type AUX_RX_RECEIVE_WINDOW;\
- type DC_HPD_EN
+ type AUX_RX_RECEIVE_WINDOW
struct dcn10_link_enc_shift {
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
@@ -266,6 +271,10 @@ void dcn10_link_encoder_setup(
struct link_encoder *enc,
enum signal_type signal);
+void configure_encoder(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings);
+
/* enables TMDS PHY output */
/* TODO: still need depth or just pass in adjusted pixel clock? */
void dcn10_link_encoder_enable_tmds_output(
@@ -327,4 +336,6 @@ void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
bool dcn10_is_dig_enabled(struct link_encoder *enc);
+void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
+
#endif /* __DC_LINK_ENCODER__DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 9ca51ae46de7..958994edf2c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -428,7 +428,7 @@ void mpc1_read_mpcc_state(
MPCC_BUSY, &s->busy);
}
-const struct mpc_funcs dcn10_mpc_funcs = {
+static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 77a1a9d541a4..ab958cff3b76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -385,7 +385,7 @@ void opp1_destroy(struct output_pixel_processor **opp)
*opp = NULL;
}
-static struct opp_funcs dcn10_opp_funcs = {
+static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_dyn_expansion = opp1_set_dyn_expansion,
.opp_program_fmt = opp1_program_fmt,
.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index f2fbce0e3fc5..411f89218e01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1257,6 +1257,37 @@ void optc1_read_otg_state(struct optc *optc1,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
}
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height)
+{
+ uint32_t otg_enabled;
+ uint32_t v_blank_start;
+ uint32_t v_blank_end;
+ uint32_t h_blank_start;
+ uint32_t h_blank_end;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+
+ REG_GET(OTG_CONTROL,
+ OTG_MASTER_EN, &otg_enabled);
+
+ if (otg_enabled == 0)
+ return false;
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &v_blank_start,
+ OTG_V_BLANK_END, &v_blank_end);
+
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &h_blank_start,
+ OTG_H_BLANK_END, &h_blank_end);
+
+ *otg_active_width = v_blank_start - v_blank_end;
+ *otg_active_height = h_blank_start - h_blank_end;
+ return true;
+}
+
void optc1_clear_optc_underflow(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -1293,6 +1324,72 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
return (underflow_occurred == 1);
}
+bool optc1_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ /* Cannot configure crc on a CRTC that is disabled */
+ if (!optc1_is_tg_enabled(optc))
+ return false;
+
+ REG_WRITE(OTG_CRC_CNTL, 0);
+
+ if (!params->enable)
+ return true;
+
+ /* Program frame boundaries */
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable. Only using CRC0*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+
+ return true;
+}
+
+bool optc1_get_crc(struct timing_generator *optc,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+{
+ uint32_t field = 0;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field);
+
+ /* Early return if CRC is not enabled for this CRTC */
+ if (!field)
+ return false;
+
+ REG_GET_2(OTG_CRC0_DATA_RG,
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
+
+ REG_GET(OTG_CRC0_DATA_B,
+ CRC0_B_CB, b_cb);
+
+ return true;
+}
+
static const struct timing_generator_funcs dcn10_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -1305,6 +1402,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.get_position = optc1_get_position,
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
@@ -1328,6 +1426,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.is_tg_enabled = optc1_is_tg_enabled,
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
.clear_optc_underflow = optc1_clear_optc_underflow,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc1_configure_crc,
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c62052f46460..c1b114209fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -75,7 +75,14 @@
SRI(CONTROL, VTG, inst),\
SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
- SRI(OTG_GSL_CONTROL, OTG, inst)
+ SRI(OTG_GSL_CONTROL, OTG, inst),\
+ SRI(OTG_CRC_CNTL, OTG, inst),\
+ SRI(OTG_CRC0_DATA_RG, OTG, inst),\
+ SRI(OTG_CRC0_DATA_B, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst)
#define TG_COMMON_REG_LIST_DCN1_0(inst) \
TG_COMMON_REG_LIST_DCN(inst),\
@@ -138,6 +145,13 @@ struct dcn_optc_registers {
uint32_t OTG_GSL_WINDOW_X;
uint32_t OTG_GSL_WINDOW_Y;
uint32_t OTG_VUPDATE_KEEPOUT;
+ uint32_t OTG_CRC_CNTL;
+ uint32_t OTG_CRC0_DATA_RG;
+ uint32_t OTG_CRC0_DATA_B;
+ uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
+ uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
+ uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
+ uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -232,7 +246,21 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
- SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh)
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh)
#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
@@ -363,7 +391,22 @@ struct dcn_optc_registers {
type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
- type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
+ type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
+ type OTG_CRC_CONT_EN;\
+ type OTG_CRC0_SELECT;\
+ type OTG_CRC_EN;\
+ type CRC0_R_CR;\
+ type CRC0_G_Y;\
+ type CRC0_B_CB;\
+ type OTG_CRC0_WINDOWA_X_START;\
+ type OTG_CRC0_WINDOWA_X_END;\
+ type OTG_CRC0_WINDOWA_Y_START;\
+ type OTG_CRC0_WINDOWA_Y_END;\
+ type OTG_CRC0_WINDOWB_X_START;\
+ type OTG_CRC0_WINDOWB_X_END;\
+ type OTG_CRC0_WINDOWB_Y_START;\
+ type OTG_CRC0_WINDOWB_Y_END;
+
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)
@@ -507,4 +550,19 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height);
+
+void optc1_enable_crtc_reset(
+ struct timing_generator *optc,
+ int source_tg_inst,
+ struct crtc_trigger_info *crtc_tp);
+
+bool optc1_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params);
+
+bool optc1_get_crc(struct timing_generator *optc,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 34dac84066a0..6b44ed3697a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -64,6 +64,69 @@
#include "reg_helper.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
+
+const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = 164,
+ .dpte_buffer_size_in_pte_reqs = 42,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .pte_chunk_size_kbytes = 2,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 2,
+ .line_buffer_size_bits = 589824,
+ .max_line_buffer_lines = 12,
+ .IsLineBufferBppFixed = 0,
+ .LineBufferFixedBpp = -1,
+ .writeback_luma_buffer_size_kbytes = 12,
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .max_num_dpp = 4,
+ .max_num_wb = 2,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 4,
+ .max_vscl_ratio = 4,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.10,
+ .min_vblank_lines = 14,
+ .dppclk_delay_subtotal = 90,
+ .dispclk_delay_subtotal = 42,
+ .dcfclk_cstate_latency = 10,
+ .max_inter_dcn_tile_repeaters = 8,
+ .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
+ .bug_forcing_LC_req_same_size_fixed = 0,
+};
+
+const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .urgent_latency_us = 4.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 80.0,
+ .max_request_size_bytes = 256,
+ .downspread_percent = 0.5,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 128,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 2,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 17.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+};
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -294,6 +357,21 @@ static const struct dcn10_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN10(_MASK),
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN10(id),\
@@ -417,13 +495,14 @@ static const struct dce110_clk_src_mask cs_mask = {
static const struct resource_caps res_cap = {
.num_timing_generator = 4,
+ .num_opp = 4,
.num_video_plane = 4,
.num_audio = 4,
.num_stream_encoder = 4,
.num_pll = 4,
};
-static const struct dc_debug debug_defaults_drv = {
+static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
.force_abm_enable = false,
@@ -436,7 +515,7 @@ static const struct dc_debug debug_defaults_drv = {
*/
.min_disp_clk_khz = 100000,
- .disable_pplib_clock_request = true,
+ .disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
@@ -451,7 +530,7 @@ static const struct dc_debug debug_defaults_drv = {
.max_downscale_src_width = 3840,
};
-static const struct dc_debug debug_defaults_diags = {
+static const struct dc_debug_options debug_defaults_diags = {
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = true,
@@ -515,6 +594,23 @@ static struct output_pixel_processor *dcn10_opp_create(
return &opp->base;
}
+struct aux_engine *dcn10_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
{
struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
@@ -680,6 +776,7 @@ static struct dce_hwseq *dcn10_hwseq_create(
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN10_253 = true;
hws->wa.false_optc_underflow = true;
+ hws->wa.DEGVIDCN10_254 = true;
}
return hws;
}
@@ -762,6 +859,9 @@ static void destruct(struct dcn10_resource_pool *pool)
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++)
@@ -790,8 +890,8 @@ static void destruct(struct dcn10_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.display_clock != NULL)
- dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.dccg != NULL)
+ dce_dccg_destroy(&pool->base.dccg);
kfree(pool->base.pp_smu);
}
@@ -971,11 +1071,11 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
return DC_OK;
}
-static struct dc_cap_funcs cap_funcs = {
+static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
-static struct resource_funcs dcn10_res_pool_funcs = {
+static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
.validate_bandwidth = dcn_validate_bandwidth,
@@ -1072,8 +1172,8 @@ static bool construct(
}
}
- pool->base.display_clock = dce120_disp_clk_create(ctx);
- if (pool->base.display_clock == NULL) {
+ pool->base.dccg = dcn1_dccg_create(ctx);
+ if (pool->base.dccg == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto fail;
@@ -1193,6 +1293,14 @@ static bool construct(
goto fail;
}
+ pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto fail;
+ }
+
/* check next valid pipe */
j++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index c928ee4cd382..6f9078f3c4d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -257,20 +257,18 @@ void enc1_stream_encoder_dp_set_stream_attribute(
uint8_t colorimetry_bpc;
uint8_t dynamic_range_rgb = 0; /*full range*/
uint8_t dynamic_range_ycbcr = 1; /*bt709*/
+ uint8_t dp_pixel_encoding = 0;
+ uint8_t dp_component_depth = 0;
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-
/* set pixel encoding */
switch (crtc_timing->pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_YCBCR422);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422;
break;
case PIXEL_ENCODING_YCBCR444:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_YCBCR444);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444;
if (crtc_timing->flags.Y_ONLY)
if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
@@ -278,8 +276,8 @@ void enc1_stream_encoder_dp_set_stream_attribute(
* Color depth of Y-only could be
* 8, 10, 12, 16 bits
*/
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_Y_ONLY);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_Y_ONLY;
+
/* Note: DP_MSA_MISC1 bit 7 is the indicator
* of Y-only mode.
* This bit is set in HW if register
@@ -287,48 +285,55 @@ void enc1_stream_encoder_dp_set_stream_attribute(
*/
break;
case PIXEL_ENCODING_YCBCR420:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_YCBCR420);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
break;
default:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_TYPE_RGB444);
+ dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
break;
}
misc1 = REG_READ(DP_MSA_MISC);
+ /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
+ * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
+ * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
+ * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
+ */
+ if ((crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
+ (output_color_space == COLOR_SPACE_2020_YCBCR) ||
+ (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
+ (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
+ misc1 = misc1 | 0x40;
+ else
+ misc1 = misc1 & ~0x40;
/* set color depth */
-
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_666:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- 0);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
case COLOR_DEPTH_888:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_8BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_8BPC;
break;
case COLOR_DEPTH_101010:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_10BPC);
-
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_10BPC;
break;
case COLOR_DEPTH_121212:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_12BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_12BPC;
break;
case COLOR_DEPTH_161616:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_16BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_16BPC;
break;
default:
- REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_PIXEL_DEPTH_6BPC);
+ dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
break;
}
+ /* Set DP pixel encoding and component depth */
+ REG_UPDATE_2(DP_PIXEL_FORMAT,
+ DP_PIXEL_ENCODING, dp_pixel_encoding,
+ DP_COMPONENT_DEPTH, dp_component_depth);
+
/* set dynamic range and YCbCr range */
switch (crtc_timing->display_color_depth) {
@@ -354,7 +359,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
switch (output_color_space) {
case COLOR_SPACE_SRGB:
- misc0 = misc0 | 0x0;
misc1 = misc1 & ~0x80; /* bit7 = 0*/
dynamic_range_rgb = 0; /*full range*/
break;
@@ -1087,27 +1091,6 @@ static union audio_cea_channels speakers_to_channels(
return cea_channels;
}
-static uint32_t calc_max_audio_packets_per_line(
- const struct audio_crtc_info *crtc_info)
-{
- uint32_t max_packets_per_line;
-
- max_packets_per_line =
- crtc_info->h_total - crtc_info->h_active;
-
- if (crtc_info->pixel_repetition)
- max_packets_per_line *= crtc_info->pixel_repetition;
-
- /* for other hdmi features */
- max_packets_per_line -= 58;
- /* for Control Period */
- max_packets_per_line -= 16;
- /* Number of Audio Packets per Line */
- max_packets_per_line /= 32;
-
- return max_packets_per_line;
-}
-
static void get_audio_clock_info(
enum dc_color_depth color_depth,
uint32_t crtc_pixel_clock_in_khz,
@@ -1201,16 +1184,9 @@ static void enc1_se_setup_hdmi_audio(
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
struct audio_clock_info audio_clock_info = {0};
- uint32_t max_packets_per_line;
-
- /* For now still do calculation, although this field is ignored when
- * above HDMI_PACKET_GEN_VERSION set to 1
- */
- max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
/* HDMI_AUDIO_PACKET_CONTROL */
- REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
- HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+ REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL,
HDMI_AUDIO_DELAY_EN, 1);
/* AFMT_AUDIO_PACKET_CONTROL */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 034369fbb9e2..5d4527d03045 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -40,6 +40,14 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
const struct dc_edid *edid,
struct dc_edid_caps *edid_caps);
+
+/*
+ * Update DP branch info
+ */
+void dm_helpers_dp_update_branch_info(
+ struct dc_context *ctx,
+ const struct dc_link *link);
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -103,6 +111,9 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
+bool dm_helpers_is_dp_sink_present(
+ struct dc_link *link);
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index eac4bfe12257..58ed2055ef9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -40,7 +40,7 @@ enum wm_set_id {
WM_B,
WM_C,
WM_D,
- WM_COUNT,
+ WM_SET_COUNT,
};
struct pp_smu_wm_set_range {
@@ -53,10 +53,10 @@ struct pp_smu_wm_set_range {
struct pp_smu_wm_range_sets {
uint32_t num_reader_wm_sets;
- struct pp_smu_wm_set_range reader_wm_sets[WM_COUNT];
+ struct pp_smu_wm_set_range reader_wm_sets[WM_SET_COUNT];
uint32_t num_writer_wm_sets;
- struct pp_smu_wm_set_range writer_wm_sets[WM_COUNT];
+ struct pp_smu_wm_set_range writer_wm_sets[WM_SET_COUNT];
};
struct pp_smu_display_requirement_rv {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 4ff9b2bba178..eb5ab3978e84 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -339,7 +339,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
#define dm_log_to_buffer(buffer, size, fmt, args)\
vsnprintf(buffer, size, fmt, args)
-unsigned long long dm_get_timestamp(struct dc_context *ctx);
+static inline unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+ return ktime_get_raw_ns();
+}
unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
unsigned long long current_time_stamp,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index ab8c77d4e6df..2b83f922ac02 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -137,7 +137,7 @@ struct dm_pp_clock_range_for_wm_set {
enum dm_pp_wm_set_id wm_set_id;
uint32_t wm_min_eng_clk_in_khz;
uint32_t wm_max_eng_clk_in_khz;
- uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_min_mem_clk_in_khz;
uint32_t wm_max_mem_clk_in_khz;
};
@@ -150,7 +150,7 @@ struct dm_pp_clock_range_for_dmif_wm_set_soc15 {
enum dm_pp_wm_set_id wm_set_id;
uint32_t wm_min_dcfclk_clk_in_khz;
uint32_t wm_max_dcfclk_clk_in_khz;
- uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_min_mem_clk_in_khz;
uint32_t wm_max_mem_clk_in_khz;
};
@@ -158,7 +158,7 @@ struct dm_pp_clock_range_for_mcif_wm_set_soc15 {
enum dm_pp_wm_set_id wm_set_id;
uint32_t wm_min_socclk_clk_in_khz;
uint32_t wm_max_socclk_clk_in_khz;
- uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_min_mem_clk_in_khz;
uint32_t wm_max_mem_clk_in_khz;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index f83a608f93e9..d97ca6528f9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -36,11 +36,10 @@ CFLAGS_display_mode_lib.o := $(dml_ccflags)
CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags)
-CFLAGS_soc_bounding_box.o := $(dml_ccflags)
CFLAGS_dml_common_defs.o := $(dml_ccflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
- soc_bounding_box.o dml_common_defs.o
+ dml_common_defs.o
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index fd9d97aab071..dddeb0d4db8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,67 +26,8 @@
#include "display_mode_lib.h"
#include "dc_features.h"
-static const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = 164,
- .dpte_buffer_size_in_pte_reqs = 42,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .pte_enable = 1,
- .pte_chunk_size_kbytes = 2,
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 2,
- .line_buffer_size_bits = 589824,
- .max_line_buffer_lines = 12,
- .IsLineBufferBppFixed = 0,
- .LineBufferFixedBpp = -1,
- .writeback_luma_buffer_size_kbytes = 12,
- .writeback_chroma_buffer_size_kbytes = 8,
- .max_num_dpp = 4,
- .max_num_wb = 2,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 4,
- .max_vscl_ratio = 4,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.10,
- .min_vblank_lines = 14,
- .dppclk_delay_subtotal = 90,
- .dispclk_delay_subtotal = 42,
- .dcfclk_cstate_latency = 10,
- .max_inter_dcn_tile_repeaters = 8,
- .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
- .bug_forcing_LC_req_same_size_fixed = 0,
-};
-
-static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .urgent_latency_us = 4.0,
- .writeback_latency_us = 12.0,
- .ideal_dram_bw_after_urgent_percent = 80.0,
- .max_request_size_bytes = 256,
- .downspread_percent = 0.5,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 128,
- .urgent_out_of_order_return_per_channel_bytes = 256,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .num_chans = 2,
- .vmm_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 17.0,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
-};
+extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
+extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 3c2abcb8a1b0..635206248889 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -27,7 +27,6 @@
#include "dml_common_defs.h"
-#include "soc_bounding_box.h"
#include "dml1_display_rq_dlg_calc.h"
enum dml_project {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 7fa0375939ae..cbafce649e33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -64,10 +64,9 @@ struct _vcs_dpi_voltage_scaling_st {
double dscclk_mhz;
double dcfclk_mhz;
double socclk_mhz;
- double dram_speed_mhz;
+ double dram_speed_mts;
double fabricclk_mhz;
double dispclk_mhz;
- double dram_bw_per_chan_gbps;
double phyclk_mhz;
double dppclk_mhz;
};
@@ -112,6 +111,8 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_bus_transport_time_us;
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
+ double max_hscl_ratio;
+ double max_vscl_ratio;
struct _vcs_dpi_voltage_scaling_st clock_limits[7];
};
@@ -304,6 +305,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned char otg_inst;
unsigned char odm_split_cnt;
unsigned char odm_combine;
+ unsigned char use_maximum_vstartup;
};
struct _vcs_dpi_display_pipe_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
deleted file mode 100644
index 324239c77958..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "soc_bounding_box.h"
-#include "display_mode_lib.h"
-#include "dc_features.h"
-
-#include "dml_inline_defs.h"
-
-/*
- * NOTE:
- * This file is gcc-parseable HW gospel, coming straight from HW engineers.
- *
- * It doesn't adhere to Linux kernel style and sometimes will do things in odd
- * ways. Unless there is something clearly wrong with it the code should
- * remain as-is as it provides us with a guarantee from HW that it is correct.
- */
-
-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
-{
- to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
- to_box->sr_exit_time_us = from_box->sr_exit_time_us;
- to_box->sr_enter_plus_exit_time_us = from_box->sr_enter_plus_exit_time_us;
- to_box->urgent_latency_us = from_box->urgent_latency_us;
- to_box->writeback_latency_us = from_box->writeback_latency_us;
-}
-
-voltage_scaling_st dml_socbb_voltage_scaling(
- const soc_bounding_box_st *soc,
- enum voltage_state voltage)
-{
- const voltage_scaling_st *voltage_state;
- const voltage_scaling_st * const voltage_end = soc->clock_limits + DC__VOLTAGE_STATES;
-
- for (voltage_state = soc->clock_limits;
- voltage_state < voltage_end && voltage_state->state != voltage;
- voltage_state++) {
- }
-
- if (voltage_state < voltage_end)
- return *voltage_state;
- return soc->clock_limits[DC__VOLTAGE_STATES - 1];
-}
-
-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage)
-{
- double return_bw;
-
- voltage_scaling_st state = dml_socbb_voltage_scaling(box, voltage);
-
- return_bw = dml_min((double) box->return_bus_width_bytes * state.dcfclk_mhz,
- state.dram_bw_per_chan_gbps * 1000.0 * (double) box->num_chans
- * box->ideal_dram_bw_after_urgent_percent / 100.0);
-
- return_bw = dml_min((double) box->return_bus_width_bytes * state.fabricclk_mhz, return_bw);
-
- return return_bw;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index 562ee189d780..b9d9930a4974 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -61,7 +61,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index 9c4a56c738c0..bf40725f982f 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -82,13 +82,16 @@
DDC_GPIO_I2C_REG_LIST(cd),\
.ddc_setup = 0
-#define DDC_MASK_SH_LIST(mask_sh) \
+#define DDC_MASK_SH_LIST_COMMON(mask_sh) \
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
- SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh)
+
+#define DDC_MASK_SH_LIST(mask_sh) \
+ DDC_MASK_SH_LIST_COMMON(mask_sh),\
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index ab5483c0c502..f20161c5706d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -375,6 +375,7 @@ struct gpio *dal_gpio_create_irq(
case GPIO_ID_GPIO_PAD:
break;
default:
+ id = GPIO_ID_HPD;
ASSERT_CRITICAL(false);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 0caee3523017..83df779984e5 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -43,7 +43,7 @@
#include "dce80/hw_factory_dce80.h"
#include "dce110/hw_factory_dce110.h"
#include "dce120/hw_factory_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/hw_factory_dcn10.h"
#endif
@@ -81,7 +81,7 @@ bool dal_hw_factory_init(
case DCE_VERSION_12_0:
dal_hw_factory_dce120_init(factory);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
dal_hw_factory_dcn10_init(factory);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 55c707488541..e7541310480b 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -43,7 +43,7 @@
#include "dce80/hw_translate_dce80.h"
#include "dce110/hw_translate_dce110.h"
#include "dce120/hw_translate_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/hw_translate_dcn10.h"
#endif
@@ -78,7 +78,7 @@ bool dal_hw_translate_init(
case DCE_VERSION_12_0:
dal_hw_translate_dce120_init(translate);
return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
dal_hw_translate_dcn10_init(translate);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
index 352885cb4d07..a851d07f0190 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -71,7 +71,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
###############################################################################
# DCN 1.0 family
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
I2CAUX_DCN1 = i2caux_dcn10.o
AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
index bb526ad326e5..0afd2fa57bbe 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -128,8 +128,20 @@ static void process_read_reply(
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
} else {
- ctx->current_read_length = ctx->returned_byte;
ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
ctx->transaction_complete = true;
ctx->operation_succeeded = true;
@@ -157,6 +169,10 @@ static void process_read_reply(
ctx->operation_succeeded = false;
}
break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
@@ -215,6 +231,10 @@ static void process_read_request(
* so we should not wait here */
}
break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
@@ -282,7 +302,6 @@ static bool read_command(
ctx.operation_succeeded);
}
- request->payload.length = ctx.reply.length;
return ctx.operation_succeeded;
}
@@ -370,6 +389,10 @@ static void process_write_reply(
ctx->operation_succeeded = false;
}
break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
@@ -422,6 +445,10 @@ static void process_write_request(
* so we should not wait here */
}
break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
default:
ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
ctx->operation_succeeded = false;
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
index 8e71324ccb10..c33a2898d967 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
@@ -26,46 +26,7 @@
#ifndef __DAL_AUX_ENGINE_H__
#define __DAL_AUX_ENGINE_H__
-enum aux_transaction_type {
- AUX_TRANSACTION_TYPE_DP,
- AUX_TRANSACTION_TYPE_I2C
-};
-
-struct aux_request_transaction_data {
- enum aux_transaction_type type;
- enum i2caux_transaction_action action;
- /* 20-bit AUX channel transaction address */
- uint32_t address;
- /* delay, in 100-microsecond units */
- uint8_t delay;
- uint32_t length;
- uint8_t *data;
-};
-
-enum aux_transaction_reply {
- AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
- AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
- AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
-
- AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
- AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
- AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
-
- AUX_TRANSACTION_REPLY_INVALID = 0xFF
-};
-
-struct aux_reply_transaction_data {
- enum aux_transaction_reply status;
- uint32_t length;
- uint8_t *data;
-};
-
-enum aux_channel_operation_result {
- AUX_CHANNEL_OPERATION_SUCCEEDED,
- AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
- AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
- AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
-};
+#include "dc_ddc_types.h"
struct aux_engine;
@@ -83,6 +44,12 @@ struct aux_engine_funcs {
void (*process_channel_reply)(
struct aux_engine *engine,
struct aux_reply_transaction_data *reply);
+ int (*read_channel_reply)(
+ struct aux_engine *engine,
+ uint32_t size,
+ uint8_t *buffer,
+ uint8_t *reply_result,
+ uint32_t *sw_status);
enum aux_channel_operation_result (*get_channel_status)(
struct aux_engine *engine,
uint8_t *returned_bytes);
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
index e8d3781deaed..8b704ab0471c 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
@@ -97,6 +97,7 @@ struct i2caux *dal_i2caux_dce100_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce100_aux_regs),
dce100_aux_regs,
dce100_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
index 5f47f6c007ac..ae5caa97caca 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -198,27 +198,27 @@ static void submit_channel_request(
((request->type == AUX_TRANSACTION_TYPE_I2C) &&
((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
(request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+ if (REG(AUXN_IMPCAL)) {
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
- /* clear_aux_error */
- REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
- 1,
- 0);
-
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
- 1,
- 0);
-
- /* force_default_calibrate */
- REG_UPDATE_1BY1_2(AUXN_IMPCAL,
- AUXN_IMPCAL_ENABLE, 1,
- AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
- /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
- 1,
- 0);
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+ }
/* set the delay and the number of bytes to write */
/* The length include
@@ -275,55 +275,92 @@ static void submit_channel_request(
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
}
-static void process_channel_reply(
- struct aux_engine *engine,
- struct aux_reply_transaction_data *reply)
+static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+ uint8_t *buffer, uint8_t *reply_result,
+ uint32_t *sw_status)
{
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t bytes_replied;
+ uint32_t reply_result_32;
- /* Need to do a read to get the number of bytes to process
- * Alternatively, this information can be passed -
- * but that causes coupling which isn't good either. */
+ *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
+ &bytes_replied);
- uint32_t bytes_replied;
- uint32_t value;
+ /* In case HPD is LOW, exit AUX transaction */
+ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return -1;
- value = REG_GET(AUX_SW_STATUS,
- AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
+ /* Need at least the status byte */
+ if (!bytes_replied)
+ return -1;
- if (bytes_replied) {
- uint32_t reply_result;
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
- REG_UPDATE_1BY1_3(AUX_SW_DATA,
- AUX_SW_INDEX, 0,
- AUX_SW_AUTOINCREMENT_DISABLE, 1,
- AUX_SW_DATA_RW, 1);
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
+ reply_result_32 = reply_result_32 >> 4;
+ *reply_result = (uint8_t)reply_result_32;
- REG_GET(AUX_SW_DATA,
- AUX_SW_DATA, &reply_result);
+ if (reply_result_32 == 0) { /* ACK */
+ uint32_t i = 0;
- reply_result = reply_result >> 4;
+ /* First byte was already used to get the command status */
+ --bytes_replied;
- switch (reply_result) {
- case 0: /* ACK */ {
- uint32_t i = 0;
+ /* Do not overflow buffer */
+ if (bytes_replied > size)
+ return -1;
- /* first byte was already used
- * to get the command status */
- --bytes_replied;
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
- while (i < bytes_replied) {
- uint32_t aux_sw_data_val;
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
+ buffer[i] = aux_sw_data_val;
+ ++i;
+ }
- REG_GET(AUX_SW_DATA,
- AUX_SW_DATA, &aux_sw_data_val);
+ return i;
+ }
- reply->data[i] = aux_sw_data_val;
- ++i;
- }
+ return 0;
+}
- reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ int bytes_replied;
+ uint8_t reply_result;
+ uint32_t sw_status;
+
+ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+ &reply_result, &sw_status);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+ return;
+ }
+
+ if (bytes_replied < 0) {
+ /* Need to handle an error case...
+ * Hopefully, upper layer function won't call this function if
+ * the number of bytes in the reply was 0, because there was
+ * surely an error that was asserted that should have been
+ * handled for hot plug case, this could happens
+ */
+ if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ ASSERT_CRITICAL(false);
+ return;
}
+ } else {
+
+ switch (reply_result) {
+ case 0: /* ACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
break;
case 1: /* NACK */
reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
@@ -340,15 +377,6 @@ static void process_channel_reply(
default:
reply->status = AUX_TRANSACTION_REPLY_INVALID;
}
- } else {
- /* Need to handle an error case...
- * hopefully, upper layer function won't call this function
- * if the number of bytes in the reply was 0
- * because there was surely an error that was asserted
- * that should have been handled
- * for hot plug case, this could happens*/
- if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
- ASSERT_CRITICAL(false);
}
}
@@ -371,6 +399,10 @@ static enum aux_channel_operation_result get_channel_status(
value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
10, aux110->timeout_period/10);
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
/* Note that the following bits are set in 'status.bits'
* during CTS 4.2.1.2 (FW 3.3.1):
* AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
@@ -402,10 +434,10 @@ static enum aux_channel_operation_result get_channel_status(
return AUX_CHANNEL_OPERATION_SUCCEEDED;
}
} else {
- /*time_elapsed >= aux_engine->timeout_period */
- if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
- ASSERT_CRITICAL(false);
-
+ /*time_elapsed >= aux_engine->timeout_period
+ * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+ */
+ ASSERT_CRITICAL(false);
return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
}
}
@@ -415,6 +447,7 @@ static const struct aux_engine_funcs aux_engine_funcs = {
.acquire_engine = acquire_engine,
.submit_channel_request = submit_channel_request,
.process_channel_reply = process_channel_reply,
+ .read_channel_reply = read_channel_reply,
.get_channel_status = get_channel_status,
.is_engine_available = is_engine_available,
};
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
index b7256f595052..9cbe1a7a6bcb 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -62,12 +62,7 @@ enum dc_i2c_arbitration {
DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
};
-enum {
- /* No timeout in HW
- * (timeout implemented in SW by querying status) */
- I2C_SETUP_TIME_LIMIT = 255,
- I2C_HW_BUFFER_SIZE = 538
-};
+
/*
* @brief
@@ -152,6 +147,11 @@ static bool setup_engine(
struct i2c_engine *i2c_engine)
{
struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+ uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
+ uint32_t reset_length = 0;
+
+ if (hw_engine->base.base.setup_limit != 0)
+ i2c_setup_limit = hw_engine->base.base.setup_limit;
/* Program pin select */
REG_UPDATE_6(
@@ -164,11 +164,15 @@ static bool setup_engine(
DC_I2C_DDC_SELECT, hw_engine->engine_id);
/* Program time limit */
- REG_UPDATE_N(
- SETUP, 2,
- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT,
- FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-
+ if (hw_engine->base.base.send_reset_length == 0) {
+ /*pre-dcn*/
+ REG_UPDATE_N(
+ SETUP, 2,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
+ } else {
+ reset_length = hw_engine->base.base.send_reset_length;
+ }
/* Program HW priority
* set to High - interrupt software I2C at any time
* Enable restart of SW I2C that was interrupted by HW
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
index 5bb04085f670..fea2946906ed 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
@@ -192,6 +192,7 @@ struct i2c_hw_engine_dce110 {
/* number of pending transactions (before GO) */
uint32_t transaction_count;
uint32_t engine_keep_power_up_count;
+ uint32_t i2_setup_time_limit;
};
struct i2c_hw_engine_dce110_create_arg {
@@ -207,4 +208,11 @@ struct i2c_hw_engine_dce110_create_arg {
struct i2c_engine *dal_i2c_hw_engine_dce110_create(
const struct i2c_hw_engine_dce110_create_arg *arg);
+enum {
+ I2C_SETUP_TIME_LIMIT_DCE = 255,
+ I2C_SETUP_TIME_LIMIT_DCN = 3,
+ I2C_HW_BUFFER_SIZE = 538,
+ I2C_SEND_RESET_LENGTH_9 = 9,
+ I2C_SEND_RESET_LENGTH_10 = 10,
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
index 2a047f8ca0e9..1d748ac1d6d6 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
@@ -43,6 +43,9 @@
#include "i2c_sw_engine_dce110.h"
#include "i2c_hw_engine_dce110.h"
#include "aux_engine_dce110.h"
+#include "../../dc.h"
+#include "dc_types.h"
+
/*
* Post-requisites: headers required by this unit
@@ -199,6 +202,7 @@ static const struct dce110_i2c_hw_engine_mask i2c_mask = {
void dal_i2caux_dce110_construct(
struct i2caux_dce110 *i2caux_dce110,
struct dc_context *ctx,
+ unsigned int num_i2caux_inst,
const struct dce110_aux_registers aux_regs[],
const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
const struct dce110_i2c_hw_engine_shift *i2c_shift,
@@ -249,9 +253,22 @@ void dal_i2caux_dce110_construct(
base->i2c_hw_engines[line_id] =
dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
-
+ if (base->i2c_hw_engines[line_id] != NULL) {
+ switch (ctx->dce_version) {
+ case DCN_VERSION_1_0:
+ base->i2c_hw_engines[line_id]->setup_limit =
+ I2C_SETUP_TIME_LIMIT_DCN;
+ base->i2c_hw_engines[line_id]->send_reset_length = 0;
+ break;
+ default:
+ base->i2c_hw_engines[line_id]->setup_limit =
+ I2C_SETUP_TIME_LIMIT_DCE;
+ base->i2c_hw_engines[line_id]->send_reset_length = 0;
+ break;
+ }
+ }
++i;
- } while (i < ARRAY_SIZE(hw_ddc_lines));
+ } while (i < num_i2caux_inst);
/* Create AUX engines for all lines which has assisted HW AUX
* 'i' (loop counter) used as DDC/AUX engine_id */
@@ -272,7 +289,7 @@ void dal_i2caux_dce110_construct(
dal_aux_engine_dce110_create(&aux_init_data);
++i;
- } while (i < ARRAY_SIZE(hw_aux_lines));
+ } while (i < num_i2caux_inst);
/*TODO Generic I2C SW and HW*/
}
@@ -303,6 +320,7 @@ struct i2caux *dal_i2caux_dce110_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce110_aux_regs),
dce110_aux_regs,
i2c_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
index 1b1f71c60ac9..d3d8cc58666a 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
@@ -45,6 +45,7 @@ struct i2caux *dal_i2caux_dce110_create(
void dal_i2caux_dce110_construct(
struct i2caux_dce110 *i2caux_dce110,
struct dc_context *ctx,
+ unsigned int num_i2caux_inst,
const struct dce110_aux_registers *aux_regs,
const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
const struct dce110_i2c_hw_engine_shift *i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
index dafc1a727f7f..a9db04738724 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
@@ -93,6 +93,7 @@ static void construct(
{
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce112_aux_regs),
dce112_aux_regs,
dce112_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
index 0e7b18260027..6a4f344c1db4 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dce120_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dce120_aux_regs),
dce120_aux_regs,
dce120_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
index e44a8901f38b..a59c1f50c1e8 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dcn10_create(
dal_i2caux_dce110_construct(i2caux_dce110,
ctx,
+ ARRAY_SIZE(dcn10_aux_regs),
dcn10_aux_regs,
dcn10_hw_engine_regs,
&i2c_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
index 33de8a8834dc..b16fb1ff687d 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
@@ -26,6 +26,8 @@
#ifndef __DAL_ENGINE_H__
#define __DAL_ENGINE_H__
+#include "dc_ddc_types.h"
+
enum i2caux_transaction_operation {
I2CAUX_TRANSACTION_READ,
I2CAUX_TRANSACTION_WRITE
@@ -53,7 +55,8 @@ enum i2caux_transaction_status {
I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
- I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
};
struct i2caux_transaction_request {
@@ -75,19 +78,6 @@ enum i2c_default_speed {
I2CAUX_DEFAULT_I2C_SW_SPEED = 50
};
-enum i2caux_transaction_action {
- I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
- I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
- I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
-
- I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
- I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
- I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
-
- I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
- I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
-};
-
struct engine;
struct engine_funcs {
@@ -106,6 +96,7 @@ struct engine_funcs {
struct engine {
const struct engine_funcs *funcs;
+ uint32_t inst;
struct ddc *ddc;
struct dc_context *ctx;
};
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
index 58fc0f25eceb..ded6ea34b714 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
@@ -86,6 +86,8 @@ struct i2c_engine {
struct engine base;
const struct i2c_engine_funcs *funcs;
uint32_t timeout_delay;
+ uint32_t setup_limit;
+ uint32_t send_reset_length;
};
void dal_i2c_engine_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index 14dc8c94d862..f7ed355fc84f 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -59,7 +59,7 @@
#include "dce120/i2caux_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/i2caux_dcn10.h"
#endif
@@ -91,7 +91,7 @@ struct i2caux *dal_i2caux_create(
return dal_i2caux_dce100_create(ctx);
case DCE_VERSION_12_0:
return dal_i2caux_dce120_create(ctx);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
case DCN_VERSION_1_0:
return dal_i2caux_dcn10_create(ctx);
#endif
@@ -254,7 +254,6 @@ bool dal_i2caux_submit_aux_command(
break;
}
- cmd->payloads->length = request.payload.length;
++index_of_payload;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index a94942d4e66b..9f33306f9014 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -33,7 +33,7 @@
#include "dc_bios_types.h"
#include "mem_input.h"
#include "hubp.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "mpc.h"
#endif
@@ -138,7 +138,7 @@ struct resource_pool {
struct output_pixel_processor *opps[MAX_PIPES];
struct timing_generator *timing_generators[MAX_PIPES];
struct stream_encoder *stream_enc[MAX_PIPES * 2];
-
+ struct aux_engine *engines[MAX_PIPES];
struct hubbub *hubbub;
struct mpc *mpc;
struct pp_smu_funcs_rv *pp_smu;
@@ -162,7 +162,7 @@ struct resource_pool {
unsigned int audio_count;
struct audio_support audio_support;
- struct display_clock *display_clock;
+ struct dccg *dccg;
struct irq_service *irqs;
struct abm *abm;
@@ -221,7 +221,7 @@ struct pipe_ctx {
struct pipe_ctx *top_pipe;
struct pipe_ctx *bottom_pipe;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
struct _vcs_dpi_display_dlg_regs_st dlg_regs;
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
@@ -255,8 +255,7 @@ struct dce_bw_output {
};
struct dcn_bw_output {
- struct dc_clocks cur_clk;
- struct dc_clocks calc_clk;
+ struct dc_clocks clk;
struct dcn_watermark_set watermarks;
};
@@ -277,11 +276,11 @@ struct dc_state {
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
- struct display_clock *dis_clk;
+ struct dccg *dis_clk;
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 090b7a8dd67b..538b83303b86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,21 +102,13 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-ssize_t dal_ddc_service_read_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- uint8_t *data,
- uint32_t len);
-
-enum ddc_result dal_ddc_service_write_dpcd_data(
- struct ddc_service *ddc,
- bool i2c,
- enum i2c_mot_mode mot,
- uint32_t address,
- const uint8_t *data,
- uint32_t len);
+int dc_link_aux_transfer(struct ddc_service *ddc,
+ unsigned int address,
+ uint8_t *reply,
+ void *buffer,
+ unsigned int size,
+ enum aux_transaction_type type,
+ enum i2caux_transaction_action action);
void dal_ddc_service_write_scdc_data(
struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 2f783c650084..a37255c757e0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -33,9 +33,10 @@ struct dc_link;
struct dc_stream_state;
struct dc_link_settings;
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting);
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count);
bool dp_validate_mode_timing(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 132d18d4b293..ddbb673caa08 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -625,7 +625,7 @@ bool dcn_validate_bandwidth(
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
- struct clocks_value *clocks);
+ struct dc_clocks *clocks);
void dcn_bw_update_from_pplib(struct dc *dc);
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
new file mode 100644
index 000000000000..e79cd4e92919
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_H__
+#define __DAL_AUX_ENGINE_H__
+
+#include "dc_ddc_types.h"
+#include "include/i2caux_interface.h"
+
+enum i2caux_transaction_operation {
+ I2CAUX_TRANSACTION_READ,
+ I2CAUX_TRANSACTION_WRITE
+};
+
+enum i2caux_transaction_address_space {
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
+};
+
+struct i2caux_transaction_payload {
+ enum i2caux_transaction_address_space address_space;
+ uint32_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2caux_transaction_status {
+ I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
+};
+
+struct i2caux_transaction_request {
+ enum i2caux_transaction_operation operation;
+ struct i2caux_transaction_payload payload;
+ enum i2caux_transaction_status status;
+};
+
+enum i2caux_engine_type {
+ I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
+ I2CAUX_ENGINE_TYPE_AUX,
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_SW
+};
+
+enum i2c_default_speed {
+ I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
+ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+};
+
+union aux_config;
+
+struct aux_engine {
+ uint32_t inst;
+ struct ddc *ddc;
+ struct dc_context *ctx;
+ const struct aux_engine_funcs *funcs;
+ /* following values are expressed in milliseconds */
+ uint32_t delay;
+ uint32_t max_defer_write_retry;
+ bool acquire_reset;
+};
+
+struct read_command_context {
+ uint8_t *buffer;
+ uint32_t current_read_length;
+ uint32_t offset;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t invalid_reply_retry_aux_on_ack;
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+struct write_command_context {
+ bool mot;
+
+ uint8_t *buffer;
+ uint32_t current_write_length;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t max_defer_retry;
+ uint32_t ack_m_retry;
+
+ uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+
+struct aux_engine_funcs {
+ void (*destroy)(
+ struct aux_engine **ptr);
+ bool (*acquire_engine)(
+ struct aux_engine *engine);
+ void (*configure)(
+ struct aux_engine *engine,
+ union aux_config cfg);
+ void (*submit_channel_request)(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply);
+ int (*read_channel_reply)(
+ struct aux_engine *engine,
+ uint32_t size,
+ uint8_t *buffer,
+ uint8_t *reply_result,
+ uint32_t *sw_status);
+ enum aux_channel_operation_result (*get_channel_status)(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes);
+ bool (*is_engine_available)(struct aux_engine *engine);
+ enum i2caux_engine_type (*get_engine_type)(
+ const struct aux_engine *engine);
+ bool (*acquire)(
+ struct aux_engine *engine,
+ struct ddc *ddc);
+ bool (*submit_request)(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+ void (*release_engine)(
+ struct aux_engine *engine);
+ void (*destroy_engine)(
+ struct aux_engine **engine);
+};
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
index f5f69cd81f6f..3c7ccb68ecdb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
@@ -27,23 +27,7 @@
#define __DISPLAY_CLOCK_H__
#include "dm_services_types.h"
-
-
-struct clocks_value {
- int dispclk_in_khz;
- int max_pixelclk_in_khz;
- int max_non_dp_phyclk_in_khz;
- int max_dp_phyclk_in_khz;
- bool dispclk_notify_pplib_done;
- bool pixelclk_notify_pplib_done;
- bool phyclk_notigy_pplib_done;
- int dcfclock_in_khz;
- int dppclk_in_khz;
- int mclk_in_khz;
- int phyclk_in_khz;
- int common_vdd_level;
-};
-
+#include "dc.h"
/* Structure containing all state-dependent clocks
* (dependent on "enum clocks_state") */
@@ -52,34 +36,23 @@ struct state_dependent_clocks {
int pixel_clk_khz;
};
-struct display_clock {
+struct dccg {
struct dc_context *ctx;
const struct display_clock_funcs *funcs;
enum dm_pp_clocks_state max_clks_state;
enum dm_pp_clocks_state cur_min_clks_state;
- struct clocks_value cur_clocks_value;
+ struct dc_clocks clks;
};
struct display_clock_funcs {
- int (*set_clock)(struct display_clock *disp_clk,
+ void (*update_clocks)(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower);
+ int (*set_dispclk)(struct dccg *dccg,
int requested_clock_khz);
- enum dm_pp_clocks_state (*get_required_clocks_state)(
- struct display_clock *disp_clk,
- struct state_dependent_clocks *req_clocks);
-
- bool (*set_min_clocks_state)(struct display_clock *disp_clk,
- enum dm_pp_clocks_state dm_pp_clocks_state);
-
- int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
-
- bool (*apply_clock_voltage_request)(
- struct display_clock *disp_clk,
- enum dm_pp_clock_type clocks_type,
- int clocks_in_khz,
- bool pre_mode_set,
- bool update_dp_phyclk);
+ int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
};
#endif /* __DISPLAY_CLOCK_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index de60f940030d..4550747fb61c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -48,7 +48,7 @@ struct dmcu_funcs {
const char *src,
unsigned int bytes);
void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
- void (*setup_psr)(struct dmcu *dmcu,
+ bool (*setup_psr)(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context);
void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 582458f028f8..74ad94b0e4f0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -151,6 +151,9 @@ struct dpp_funcs {
void (*dpp_set_hdr_multiplier)(
struct dpp *dpp_base,
uint32_t multiplier);
+ void (*set_optional_cursor_attributes)(
+ struct dpp *dpp_base,
+ struct dpp_cursor_attributes *attr);
void (*dpp_dppclk_control)(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 97df82cddf82..4f3f9e68ccfa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -43,10 +43,9 @@ enum cursor_lines_per_chunk {
};
struct hubp {
- struct hubp_funcs *funcs;
+ const struct hubp_funcs *funcs;
struct dc_context *ctx;
struct dc_plane_address request_address;
- struct dc_plane_address current_address;
int inst;
/* run time states */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 47f1dc5a43b7..da89c2edb07c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -64,7 +64,7 @@ struct stutter_modes {
};
struct mem_input {
- struct mem_input_funcs *funcs;
+ const struct mem_input_funcs *funcs;
struct dc_context *ctx;
struct dc_plane_address request_address;
struct dc_plane_address current_address;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 69cb0a105300..af700c7dac50 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -156,6 +156,9 @@ struct timing_generator_funcs {
uint32_t *v_blank_end,
uint32_t *h_position,
uint32_t *v_position);
+ bool (*get_otg_active_size)(struct timing_generator *optc,
+ uint32_t *otg_active_width,
+ uint32_t *otg_active_height);
void (*set_early_control)(struct timing_generator *tg,
uint32_t early_cntl);
void (*wait_for_state)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 63fc6c499789..a14ce4de80b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -44,6 +44,7 @@ struct dce_hwseq_wa {
bool blnd_crtc_trigger;
bool DEGVIDCN10_253;
bool false_optc_underflow;
+ bool DEGVIDCN10_254;
};
struct hwseq_wa_state {
@@ -101,10 +102,18 @@ struct hw_sequencer_funcs {
const struct dc *dc,
struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
void (*update_dchub)(
struct dce_hwseq *hws,
struct dchub_init_data *dh_data);
+ void (*update_mpcc)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
void (*update_pending_status)(
struct pipe_ctx *pipe_ctx);
@@ -154,20 +163,24 @@ struct hw_sequencer_funcs {
struct dc_link_settings *link_settings);
void (*blank_stream)(struct pipe_ctx *pipe_ctx);
+
+ void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+
+ void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx, int option);
+
void (*pipe_control_lock)(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
void (*blank_pixel_data)(
struct dc *dc,
- struct stream_resource *stream_res,
- struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
bool blank);
void (*set_bandwidth)(
struct dc *dc,
struct dc_state *context,
- bool decrease_allowed);
+ bool safe_to_lower);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
int vmin, int vmax);
@@ -210,6 +223,7 @@ struct hw_sequencer_funcs {
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
+ void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 3306e7b0b3e3..cf5a84b9e27c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -445,4 +445,50 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
+
+
+/* indirect register access */
+
+#define IX_REG_SET_N(index_reg_name, data_reg_name, index, n, initial_val, ...) \
+ generic_indirect_reg_update_ex(CTX, \
+ REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
+ initial_val, \
+ n, __VA_ARGS__)
+
+#define IX_REG_SET_2(index_reg_name, data_reg_name, index, init_value, f1, v1, f2, v2) \
+ IX_REG_SET_N(index_reg_name, data_reg_name, index, 2, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+
+#define IX_REG_READ(index_reg_name, data_reg_name, index) \
+ generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
+
+
+
+#define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \
+ generic_indirect_reg_update_ex(CTX, \
+ REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
+ IX_REG_READ(index_reg_name, data_reg_name, index), \
+ n, __VA_ARGS__)
+
+#define IX_REG_UPDATE_2(index_reg_name, data_reg_name, index, f1, v1, f2, v2) \
+ IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, 2,\
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+void generic_write_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t data);
+
+uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index);
+
+uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 640a647f4611..e92facbd038f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -38,6 +38,7 @@ enum dce_version resource_parse_asic_id(
struct resource_caps {
int num_timing_generator;
+ int num_opp;
int num_video_plane;
int num_audio;
int num_stream_encoder;
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 498515aad4a5..a76ee600ecee 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -60,7 +60,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
IRQ_DCN1 = irq_service_dcn10.o
AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index dcdfa0f01551..ae3fd0a235ba 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -36,7 +36,7 @@
#include "dce120/irq_service_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include "dcn10/irq_service_dcn10.h"
#endif
@@ -78,7 +78,7 @@ const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
- if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
+ if (source >= DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
return NULL;
return &irq_service->info[source];
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index a407892905af..c9fce9066ad8 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -48,7 +48,7 @@
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
#include <asm/fpu/api.h>
#endif
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 019e7a095ea1..d968956a10cd 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -40,7 +40,8 @@ enum ddc_result {
DDC_RESULT_FAILED_INCOMPLETE,
DDC_RESULT_FAILED_OPERATION,
DDC_RESULT_FAILED_INVALID_OPERATION,
- DDC_RESULT_FAILED_BUFFER_OVERFLOW
+ DDC_RESULT_FAILED_BUFFER_OVERFLOW,
+ DDC_RESULT_FAILED_HPD_DISCON
};
enum ddc_service_type {
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index d8e52e3b8e3c..1c66166d0a94 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -27,6 +27,9 @@
#define __DAL_DPCD_DEFS_H__
#include <drm/drm_dp_helper.h>
+#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_SINK_HW_REVISION_START 0x409
+#endif
enum dpcd_revision {
DPCD_REV_10 = 0x10,
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index a981b3e99ab3..52a73332befb 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -26,6 +26,13 @@
#ifndef __DAL_FIXED31_32_H__
#define __DAL_FIXED31_32_H__
+#ifndef LLONG_MAX
+#define LLONG_MAX 9223372036854775807ll
+#endif
+#ifndef LLONG_MIN
+#define LLONG_MIN (-LLONG_MAX - 1ll)
+#endif
+
#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
#ifndef LLONG_MIN
#define LLONG_MIN (1LL<<63)
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 36bbad594267..f312834fef50 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -395,6 +395,8 @@ struct integrated_info {
struct i2c_reg_info dp3_ext_hdmi_reg_settings[9];
unsigned char dp3_ext_hdmi_6g_reg_num;
struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3];
+ /* V11 */
+ uint32_t dp_ss_control;
};
/**
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
index 2941b882b0b6..58bb42ed85ca 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -37,6 +37,10 @@
* ********************************************************************
*/
+#define MAX_CONNECTOR_NUMBER_PER_SLOT (16)
+#define MAX_BOARD_SLOTS (4)
+#define INVALID_CONNECTOR_INDEX ((unsigned int)(-1))
+
/* HPD unit id - HW direct translation */
enum hpd_source_id {
HPD_SOURCEID1 = 0,
@@ -136,5 +140,47 @@ enum sync_source {
SYNC_SOURCE_DUAL_GPU_PIN
};
+/* connector sizes in millimeters - from BiosParserTypes.hpp */
+#define CONNECTOR_SIZE_DVI 40
+#define CONNECTOR_SIZE_VGA 32
+#define CONNECTOR_SIZE_HDMI 16
+#define CONNECTOR_SIZE_DP 16
+#define CONNECTOR_SIZE_MINI_DP 9
+#define CONNECTOR_SIZE_UNKNOWN 30
+
+enum connector_layout_type {
+ CONNECTOR_LAYOUT_TYPE_UNKNOWN,
+ CONNECTOR_LAYOUT_TYPE_DVI_D,
+ CONNECTOR_LAYOUT_TYPE_DVI_I,
+ CONNECTOR_LAYOUT_TYPE_VGA,
+ CONNECTOR_LAYOUT_TYPE_HDMI,
+ CONNECTOR_LAYOUT_TYPE_DP,
+ CONNECTOR_LAYOUT_TYPE_MINI_DP,
+};
+struct connector_layout_info {
+ struct graphics_object_id connector_id;
+ enum connector_layout_type connector_type;
+ unsigned int length;
+ unsigned int position; /* offset in mm from right side of the board */
+};
+
+/* length and width in mm */
+struct slot_layout_info {
+ unsigned int length;
+ unsigned int width;
+ unsigned int num_of_connectors;
+ struct connector_layout_info connectors[MAX_CONNECTOR_NUMBER_PER_SLOT];
+};
+
+struct board_layout_info {
+ unsigned int num_of_slots;
+ /* indicates valid information in bracket layout structure. */
+ unsigned int is_number_of_slots_valid : 1;
+ unsigned int is_slots_size_valid : 1;
+ unsigned int is_connector_offsets_valid : 1;
+ unsigned int is_connector_lengths_valid : 1;
+
+ struct slot_layout_info slots[MAX_BOARD_SLOTS];
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index c4197432eb7c..33b3d755fe65 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -197,6 +197,11 @@ enum transmitter_color_depth {
TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */
};
+enum dp_alt_mode {
+ DP_Alt_mode__Unknown = 0,
+ DP_Alt_mode__Connect,
+ DP_Alt_mode__NoConnect,
+};
/*
*****************************************************************************
* graphics_object_id struct
@@ -287,4 +292,15 @@ static inline enum engine_id dal_graphics_object_id_get_engine_id(
return (enum engine_id) id.id;
return ENGINE_ID_UNKNOWN;
}
+
+static inline bool dal_graphics_object_id_equal(
+ struct graphics_object_id id_1,
+ struct graphics_object_id id_2)
+{
+ if ((id_1.id == id_2.id) && (id_1.enum_id == id_2.enum_id) &&
+ (id_1.type == id_2.type)) {
+ return true;
+ }
+ return false;
+}
#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index dc98d6d4b2bd..e3c79616682d 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,47 +40,7 @@ struct dc_state;
*
*/
-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask);
-
-uint32_t dal_logger_destroy(struct dal_logger **logger);
-
-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn);
-
-void dm_logger_write(
- struct dal_logger *logger,
- enum dc_log_type log_type,
- const char *msg,
- ...);
-
-void dm_logger_append(
- struct log_entry *entry,
- const char *msg,
- ...);
-
-void dm_logger_append_va(
- struct log_entry *entry,
- const char *msg,
- va_list args);
-
-void dm_logger_open(
- struct dal_logger *logger,
- struct log_entry *entry,
- enum dc_log_type log_type);
-
-void dm_logger_close(struct log_entry *entry);
-
-void dc_conn_log(struct dc_context *ctx,
- const struct dc_link *link,
- uint8_t *hex_data,
- int hex_data_count,
- enum dc_log_type event,
- const char *msg,
- ...);
-
-void logger_write(struct dal_logger *logger,
- enum dc_log_type log_type,
- const char *msg,
- void *paralist);
+void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
void pre_surface_trace(
struct dc *dc,
@@ -106,28 +66,31 @@ void context_clock_trace(
* marked by this macro.
* Note that the message will be printed exactly once for every function
* it is used in order to avoid repeating of the same message. */
+
#define DAL_LOGGER_NOT_IMPL(fmt, ...) \
-{ \
- static bool print_not_impl = true; \
-\
- if (print_not_impl == true) { \
- print_not_impl = false; \
- dm_logger_write(ctx->logger, LOG_WARNING, \
- "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
- } \
-}
+ do { \
+ static bool print_not_impl = true; \
+ if (print_not_impl == true) { \
+ print_not_impl = false; \
+ DRM_WARN("DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
+ } \
+ } while (0)
/******************************************************************************
* Convenience macros to save on typing.
*****************************************************************************/
#define DC_ERROR(...) \
- dm_logger_write(dc_ctx->logger, LOG_ERROR, \
- __VA_ARGS__)
+ do { \
+ (void)(dc_ctx); \
+ DC_LOG_ERROR(__VA_ARGS__); \
+ } while (0)
#define DC_SYNC_INFO(...) \
- dm_logger_write(dc_ctx->logger, LOG_SYNC, \
- __VA_ARGS__)
+ do { \
+ (void)(dc_ctx); \
+ DC_LOG_SYNC(__VA_ARGS__); \
+ } while (0)
/* Connectivity log format:
* [time stamp] [drm] [Major_minor] [connector name] message.....
@@ -137,20 +100,30 @@ void context_clock_trace(
*/
#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
- dc_conn_log(link->ctx, link, hex_data, hex_len, \
- LOG_EVENT_DETECTION, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ dc_conn_log_hex_linux(hex_data, hex_len); \
+ DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
+ } while (0)
#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
- dc_conn_log(link->ctx, link, hex_data, hex_len, \
- LOG_EVENT_LINK_LOSS, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ dc_conn_log_hex_linux(hex_data, hex_len); \
+ DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
+ } while (0)
#define CONN_MSG_LT(link, ...) \
- dc_conn_log(link->ctx, link, NULL, 0, \
- LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ DC_LOG_EVENT_LINK_TRAINING(__VA_ARGS__); \
+ } while (0)
#define CONN_MSG_MODE(link, ...) \
- dc_conn_log(link->ctx, link, NULL, 0, \
- LOG_EVENT_MODE_SET, ##__VA_ARGS__)
+ do { \
+ (void)(link); \
+ DC_LOG_EVENT_MODE_SET(__VA_ARGS__); \
+ } while (0)
/*
* Display Test Next logging
@@ -165,38 +138,21 @@ void context_clock_trace(
dm_dtn_log_end(dc_ctx)
#define PERFORMANCE_TRACE_START() \
- unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx); \
- unsigned long long perf_trc_start_log_msk = dc->ctx->logger->mask; \
- unsigned int perf_trc_start_log_flags = dc->ctx->logger->flags.value; \
- if (dc->debug.performance_trace) {\
- dm_logger_flush_buffer(dc->ctx->logger, false);\
- dc->ctx->logger->mask = 1<<LOG_PERF_TRACE;\
- dc->ctx->logger->flags.bits.ENABLE_CONSOLE = 0;\
- dc->ctx->logger->flags.bits.ENABLE_BUFFER = 1;\
- }
-
-#define PERFORMANCE_TRACE_END() do {\
- unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx);\
- if (dc->debug.performance_trace) {\
- dm_logger_write(dc->ctx->logger, \
- LOG_PERF_TRACE, \
- "%s duration: %d ticks\n", __func__,\
+ unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx)
+
+#define PERFORMANCE_TRACE_END() \
+ do { \
+ unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx); \
+ if (dc->debug.performance_trace) { \
+ DC_LOG_PERF_TRACE("%s duration: %lld ticks\n", __func__, \
perf_trc_end_stmp - perf_trc_start_stmp); \
- if (perf_trc_start_log_msk != 1<<LOG_PERF_TRACE) {\
- dc->ctx->logger->mask = perf_trc_start_log_msk;\
- dc->ctx->logger->flags.value = perf_trc_start_log_flags;\
- dm_logger_flush_buffer(dc->ctx->logger, false);\
} \
- } \
-} while (0)
+ } while (0)
-#define DISPLAY_STATS_BEGIN(entry) \
- dm_logger_open(dc->ctx->logger, &entry, LOG_DISPLAYSTATS)
+#define DISPLAY_STATS_BEGIN(entry) (void)(entry)
-#define DISPLAY_STATS(msg, ...) \
- dm_logger_append(&log_entry, msg, ##__VA_ARGS__)
+#define DISPLAY_STATS(msg, ...) DC_LOG_PERF_TRACE(msg, __VA_ARGS__)
-#define DISPLAY_STATS_END(entry) \
- dm_logger_close(&entry)
+#define DISPLAY_STATS_END(entry) (void)(entry)
#endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 0a540b9897a6..ad3695e67b76 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -138,63 +138,4 @@ enum dc_log_type {
(1 << LOG_HW_AUDIO)| \
(1 << LOG_BANDWIDTH_CALCS)*/
-union logger_flags {
- struct {
- uint32_t ENABLE_CONSOLE:1; /* Print to console */
- uint32_t ENABLE_BUFFER:1; /* Print to buffer */
- uint32_t RESERVED:30;
- } bits;
- uint32_t value;
-};
-
-struct log_entry {
- struct dal_logger *logger;
- enum dc_log_type type;
-
- char *buf;
- uint32_t buf_offset;
- uint32_t max_buf_bytes;
-};
-
-/**
-* Structure for enumerating log types
-*/
-struct dc_log_type_info {
- enum dc_log_type type;
- char name[MAX_NAME_LEN];
-};
-
-/* Structure for keeping track of offsets, buffer, etc */
-
-#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
-
-/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
- * change log line size to 896 to meet the request.
- */
-#define LOG_MAX_LINE_SIZE 896
-
-struct dal_logger {
-
- /* How far into the circular buffer has been read by dsat
- * Read offset should never cross write offset. Write \0's to
- * read data just to be sure?
- */
- uint32_t buffer_read_offset;
-
- /* How far into the circular buffer we have written
- * Write offset should never cross read offset
- */
- uint32_t buffer_write_offset;
-
- uint32_t open_count;
-
- char *log_buffer; /* Pointer to malloc'ed buffer */
- uint32_t log_buffer_size; /* Size of circular buffer */
-
- uint32_t mask; /*array of masks for major elements*/
-
- union logger_flags flags;
- struct dc_context *ctx;
-};
-
#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index eee0dfad6962..bf29733958c3 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -131,6 +131,63 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
dc_fixpt_div(dc_fixpt_one, m1));
}
+
+/*de gamma, none linear to linear*/
+static void compute_hlg_oetf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+ struct fixed31_32 a;
+ struct fixed31_32 b;
+ struct fixed31_32 c;
+ struct fixed31_32 threshold;
+ struct fixed31_32 reference_white_level;
+
+ a = dc_fixpt_from_fraction(17883277, 100000000);
+ if (is_light0_12) {
+ /*light 0-12*/
+ b = dc_fixpt_from_fraction(28466892, 100000000);
+ c = dc_fixpt_from_fraction(55991073, 100000000);
+ threshold = dc_fixpt_one;
+ reference_white_level = dc_fixpt_half;
+ } else {
+ /*light 0-1*/
+ b = dc_fixpt_from_fraction(2372241, 100000000);
+ c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+ threshold = dc_fixpt_from_fraction(1, 12);
+ reference_white_level = dc_fixpt_pow(dc_fixpt_from_fraction(3, 1), dc_fixpt_half);
+ }
+ if (dc_fixpt_lt(threshold, in_x))
+ *out_y = dc_fixpt_add(c, dc_fixpt_mul(a, dc_fixpt_log(dc_fixpt_sub(in_x, b))));
+ else
+ *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_half), reference_white_level);
+}
+
+/*re gamma, linear to none linear*/
+static void compute_hlg_eotf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+ struct fixed31_32 a;
+ struct fixed31_32 b;
+ struct fixed31_32 c;
+ struct fixed31_32 reference_white_level;
+
+ a = dc_fixpt_from_fraction(17883277, 100000000);
+ if (is_light0_12) {
+ /*light 0-12*/
+ b = dc_fixpt_from_fraction(28466892, 100000000);
+ c = dc_fixpt_from_fraction(55991073, 100000000);
+ reference_white_level = dc_fixpt_from_fraction(4, 1);
+ } else {
+ /*light 0-1*/
+ b = dc_fixpt_from_fraction(2372241, 100000000);
+ c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+ reference_white_level = dc_fixpt_from_fraction(1, 3);
+ }
+ if (dc_fixpt_lt(dc_fixpt_half, in_x))
+ *out_y = dc_fixpt_add(dc_fixpt_exp(dc_fixpt_div(dc_fixpt_sub(in_x, c), a)), b);
+ else
+ *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_from_fraction(2, 1)), reference_white_level);
+}
+
+
/* one-time pre-compute PQ values - only for sdr_white_level 80 */
void precompute_pq(void)
{
@@ -691,6 +748,48 @@ static void build_degamma(struct pwl_float_data_ex *curve,
}
}
+static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+ uint32_t i;
+
+ struct pwl_float_data_ex *rgb = degamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+
+ i = 0;
+
+ while (i != hw_points_num + 1) {
+ compute_hlg_oetf(coord_x->x, is_light0_12, &rgb->r);
+ rgb->g = rgb->r;
+ rgb->b = rgb->r;
+ ++coord_x;
+ ++rgb;
+ ++i;
+ }
+}
+
+static void build_hlg_regamma(struct pwl_float_data_ex *regamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+ uint32_t i;
+
+ struct pwl_float_data_ex *rgb = regamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+
+ i = 0;
+
+ while (i != hw_points_num + 1) {
+ compute_hlg_eotf(coord_x->x, is_light0_12, &rgb->r);
+ rgb->g = rgb->r;
+ rgb->b = rgb->r;
+ ++coord_x;
+ ++rgb;
+ ++i;
+ }
+}
+
static void scale_gamma(struct pwl_float_data *pwl_rgb,
const struct dc_gamma *ramp,
struct dividers dividers)
@@ -898,7 +997,9 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
* norm_y = 4095*regamma_y, and index is just truncating to nearest integer
* lut1 = lut1D[index], lut2 = lut1D[index+1]
*
- *adjustedY is then linearly interpolating regamma Y between lut1 and lut2
+ * adjustedY is then linearly interpolating regamma Y between lut1 and lut2
+ *
+ * Custom degamma on Linux uses the same interpolation math, so is handled here
*/
static void apply_lut_1d(
const struct dc_gamma *ramp,
@@ -919,7 +1020,7 @@ static void apply_lut_1d(
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
- if (ramp->type != GAMMA_CS_TFM_1D)
+ if (ramp->type != GAMMA_CS_TFM_1D && ramp->type != GAMMA_CUSTOM)
return; // this is not expected
for (i = 0; i < num_hw_points; i++) {
@@ -1537,7 +1638,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axix_x, curve,
MAX_HW_POINTS, tf_pts,
- mapUserRamp);
+ mapUserRamp && ramp->type != GAMMA_CUSTOM);
+ if (ramp->type == GAMMA_CUSTOM)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
@@ -1622,6 +1725,25 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
ret = true;
kvfree(rgb_regamma);
+ } else if (trans == TRANSFER_FUNCTION_HLG ||
+ trans == TRANSFER_FUNCTION_HLG12) {
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
+ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+
+ build_hlg_regamma(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_regamma[i].r;
+ points->green[i] = rgb_regamma[i].g;
+ points->blue[i] = rgb_regamma[i].b;
+ }
+ ret = true;
+ kvfree(rgb_regamma);
}
rgb_regamma_alloc_fail:
return ret;
@@ -1682,6 +1804,25 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
ret = true;
kvfree(rgb_degamma);
+ } else if (trans == TRANSFER_FUNCTION_HLG ||
+ trans == TRANSFER_FUNCTION_HLG12) {
+ rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_degamma),
+ GFP_KERNEL);
+ if (!rgb_degamma)
+ goto rgb_degamma_alloc_fail;
+
+ build_hlg_degamma(rgb_degamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_degamma[i].r;
+ points->green[i] = rgb_degamma[i].g;
+ points->blue[i] = rgb_degamma[i].b;
+ }
+ ret = true;
+ kvfree(rgb_degamma);
}
points->end_exponent = 0;
points->x_point_at_y1_red = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/color/luts_1d.h b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
new file mode 100644
index 000000000000..66b1fad572ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef LUTS_1D_H
+#define LUTS_1D_H
+
+#include "hw_shared.h"
+
+struct point_config {
+ uint32_t custom_float_x;
+ uint32_t custom_float_y;
+ uint32_t custom_float_slope;
+};
+
+struct lut_point {
+ uint32_t red;
+ uint32_t green;
+ uint32_t blue;
+ uint32_t delta_red;
+ uint32_t delta_green;
+ uint32_t delta_blue;
+};
+
+struct pwl_1dlut_parameter {
+ struct gamma_curve arr_curve_points[34];
+ struct point_config arr_points[2];
+ struct lut_point rgb_resulted[256];
+ uint32_t hw_points_num;
+};
+#endif // LUTS_1D_H
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 710852ad03f3..3d4c1b1ab8c4 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -29,7 +29,7 @@
#include "core_types.h"
#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
+#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000000
#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
@@ -238,7 +238,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
for (int i = 0; i < core_stats->entry_id; i++) {
if (event_index < core_stats->event_index &&
i == events[event_index].entry_id) {
- DISPLAY_STATS("%s\n", events[event_index].event_string);
+ DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
event_index++;
} else if (time_index < core_stats->index &&
i == time[time_index].entry_id) {
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 5eb895fd98bf..9cb9ceb4d74d 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -27,6 +27,7 @@
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00010000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000
+#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00080000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16
@@ -34,6 +35,7 @@
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00000001
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004
+#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00000008
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index b178176b72ac..265621d8945c 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -128,47 +128,57 @@ enum PP_FEATURE_MASK {
PP_OVERDRIVE_MASK = 0x4000,
PP_GFXOFF_MASK = 0x8000,
PP_ACG_MASK = 0x10000,
+ PP_STUTTER_MODE = 0x20000,
};
+/**
+ * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
+ */
struct amd_ip_funcs {
- /* Name of IP block */
+ /** @name: Name of IP block */
char *name;
- /* sets up early driver state (pre sw_init), does not configure hw - Optional */
+ /**
+ * @early_init:
+ *
+ * sets up early driver state (pre sw_init),
+ * does not configure hw - Optional
+ */
int (*early_init)(void *handle);
- /* sets up late driver/hw state (post hw_init) - Optional */
+ /** @late_init: sets up late driver/hw state (post hw_init) - Optional */
int (*late_init)(void *handle);
- /* sets up driver state, does not configure hw */
+ /** @sw_init: sets up driver state, does not configure hw */
int (*sw_init)(void *handle);
- /* tears down driver state, does not configure hw */
+ /** @sw_fini: tears down driver state, does not configure hw */
int (*sw_fini)(void *handle);
- /* sets up the hw state */
+ /** @hw_init: sets up the hw state */
int (*hw_init)(void *handle);
- /* tears down the hw state */
+ /** @hw_fini: tears down the hw state */
int (*hw_fini)(void *handle);
+ /** @late_fini: final cleanup */
void (*late_fini)(void *handle);
- /* handles IP specific hw/sw changes for suspend */
+ /** @suspend: handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
- /* handles IP specific hw/sw changes for resume */
+ /** @resume: handles IP specific hw/sw changes for resume */
int (*resume)(void *handle);
- /* returns current IP block idle status */
+ /** @is_idle: returns current IP block idle status */
bool (*is_idle)(void *handle);
- /* poll for idle */
+ /** @wait_for_idle: poll for idle */
int (*wait_for_idle)(void *handle);
- /* check soft reset the IP block */
+ /** @check_soft_reset: check soft reset the IP block */
bool (*check_soft_reset)(void *handle);
- /* pre soft reset the IP block */
+ /** @pre_soft_reset: pre soft reset the IP block */
int (*pre_soft_reset)(void *handle);
- /* soft reset the IP block */
+ /** @soft_reset: soft reset the IP block */
int (*soft_reset)(void *handle);
- /* post soft reset the IP block */
+ /** @post_soft_reset: post soft reset the IP block */
int (*post_soft_reset)(void *handle);
- /* enable/disable cg for the IP block */
+ /** @set_clockgating_state: enable/disable cg for the IP block */
int (*set_clockgating_state)(void *handle,
enum amd_clockgating_state state);
- /* enable/disable pg for the IP block */
+ /** @set_powergating_state: enable/disable pg for the IP block */
int (*set_powergating_state)(void *handle,
enum amd_powergating_state state);
- /* get current clockgating status */
+ /** @get_clockgating_state: get current clockgating status */
void (*get_clockgating_state)(void *handle, u32 *flags);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
index 18a32477ed1d..fe0cbaade3c3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
@@ -89,6 +89,8 @@
#define mmUVD_JPEG_RB_SIZE_BASE_IDX 1
#define mmUVD_JPEG_ADDR_CONFIG 0x021f
#define mmUVD_JPEG_ADDR_CONFIG_BASE_IDX 1
+#define mmUVD_JPEG_PITCH 0x0222
+#define mmUVD_JPEG_PITCH_BASE_IDX 1
#define mmUVD_JPEG_GPCOM_CMD 0x022c
#define mmUVD_JPEG_GPCOM_CMD_BASE_IDX 1
#define mmUVD_JPEG_GPCOM_DATA0 0x022d
@@ -203,6 +205,8 @@
#define mmUVD_RB_WPTR4_BASE_IDX 1
#define mmUVD_JRBC_RB_RPTR 0x0457
#define mmUVD_JRBC_RB_RPTR_BASE_IDX 1
+#define mmUVD_LMI_JPEG_VMID 0x045d
+#define mmUVD_LMI_JPEG_VMID_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x045e
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x045f
@@ -231,6 +235,8 @@
#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
#define mmUVD_LMI_JRBC_IB_VMID 0x0507
#define mmUVD_LMI_JRBC_IB_VMID_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_VMID 0x0508
+#define mmUVD_LMI_JRBC_RB_VMID_BASE_IDX 1
#define mmUVD_JRBC_RB_WPTR 0x0509
#define mmUVD_JRBC_RB_WPTR_BASE_IDX 1
#define mmUVD_JRBC_RB_CNTL 0x050a
@@ -239,6 +245,20 @@
#define mmUVD_JRBC_IB_SIZE_BASE_IDX 1
#define mmUVD_JRBC_LMI_SWAP_CNTL 0x050d
#define mmUVD_JRBC_LMI_SWAP_CNTL_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x050e
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x050f
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW 0x0510
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x0511
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 1
+#define mmUVD_JRBC_RB_REF_DATA 0x0512
+#define mmUVD_JRBC_RB_REF_DATA_BASE_IDX 1
+#define mmUVD_JRBC_RB_COND_RD_TIMER 0x0513
+#define mmUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 1
+#define mmUVD_JRBC_EXTERNAL_REG_BASE 0x0517
+#define mmUVD_JRBC_EXTERNAL_REG_BASE_BASE_IDX 1
#define mmUVD_JRBC_SOFT_RESET 0x0519
#define mmUVD_JRBC_SOFT_RESET_BASE_IDX 1
#define mmUVD_JRBC_STATUS 0x051a
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 33b4de4ad66e..4bc118df3bc4 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1074,7 +1074,7 @@ struct atom_integrated_system_info_v1_11
uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
uint16_t lvds_misc; // enum of atom_sys_info_lvds_misc_def
uint16_t backlight_pwm_hz; // pwm frequency in hz
- uint8_t memorytype; // enum of atom_sys_mem_type
+ uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
uint8_t umachannelnumber; // number of memory channels
uint8_t pwr_on_digon_to_de; /* all pwr sequence numbers below are in uint of 4ms */
uint8_t pwr_on_de_to_vary_bl;
@@ -1084,18 +1084,25 @@ struct atom_integrated_system_info_v1_11
uint8_t pwr_on_vary_bl_to_blon;
uint8_t pwr_down_bloff_to_vary_bloff;
uint8_t min_allowed_bl_level;
+ uint8_t htc_hyst_limit;
+ uint8_t htc_tmp_limit;
+ uint8_t reserved1;
+ uint8_t reserved2;
struct atom_external_display_connection_info extdispconninfo;
struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset;
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset;
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset;
- struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
- struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
+ struct atom_14nm_dpphy_dp_tuningset dp_tuningset; // rbr 1.62G dp tuning set
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset; // HBR3 dp tuning set
struct atom_camera_data camera_info;
struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
- uint32_t reserved[108];
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr_tuningset; //hbr 2.7G dp tuning set
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr2_tuningset; //hbr2 5.4G dp turnig set
+ struct atom_14nm_dpphy_dp_tuningset edp_tuningset; //edp tuning set
+ uint32_t reserved[66];
};
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index 7852952d1fde..1d93a0c574c9 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -23,6 +23,8 @@
#ifndef _DM_PP_INTERFACE_
#define _DM_PP_INTERFACE_
+#include "dm_services_types.h"
+
#define PP_MAX_CLOCK_LEVELS 16
enum amd_pp_display_config_type{
@@ -189,39 +191,4 @@ struct pp_display_clock_request {
uint32_t clock_freq_in_khz;
};
-#define PP_MAX_WM_SETS 4
-
-enum pp_wm_set_id {
- DC_WM_SET_A = 0,
- DC_WM_SET_B,
- DC_WM_SET_C,
- DC_WM_SET_D,
- DC_WM_SET_INVALID = 0xffff,
-};
-
-struct pp_wm_set_with_dmif_clock_range_soc15 {
- enum pp_wm_set_id wm_set_id;
- uint32_t wm_min_dcefclk_in_khz;
- uint32_t wm_max_dcefclk_in_khz;
- uint32_t wm_min_memclk_in_khz;
- uint32_t wm_max_memclk_in_khz;
-};
-
-struct pp_wm_set_with_mcif_clock_range_soc15 {
- enum pp_wm_set_id wm_set_id;
- uint32_t wm_min_socclk_in_khz;
- uint32_t wm_max_socclk_in_khz;
- uint32_t wm_min_memclk_in_khz;
- uint32_t wm_max_memclk_in_khz;
-};
-
-struct pp_wm_sets_with_clock_ranges_soc15 {
- uint32_t num_wm_sets_dmif;
- uint32_t num_wm_sets_mcif;
- struct pp_wm_set_with_dmif_clock_range_soc15
- wm_sets_dmif[PP_MAX_WM_SETS];
- struct pp_wm_set_with_mcif_clock_range_soc15
- wm_sets_mcif[PP_MAX_WM_SETS];
-};
-
#endif /* _DM_PP_INTERFACE_ */
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
new file mode 100644
index 000000000000..36306c57a2b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_GFX_9_0_H__
+#define __IRQSRCS_GFX_9_0_H__
+
+
+#define GFX_9_0__SRCID__CP_RB_INTERRUPT_PKT 176 /* B0 CP_INTERRUPT pkt in RB */
+#define GFX_9_0__SRCID__CP_IB1_INTERRUPT_PKT 177 /* B1 CP_INTERRUPT pkt in IB1 */
+#define GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT 178 /* B2 CP_INTERRUPT pkt in IB2 */
+#define GFX_9_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 /* B4 PM4 Pkt Rsvd Bits Error */
+#define GFX_9_0__SRCID__CP_EOP_INTERRUPT 181 /* B5 End-of-Pipe Interrupt */
+#define GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR 183 /* B7 Bad Opcode Error */
+#define GFX_9_0__SRCID__CP_PRIV_REG_FAULT 184 /* B8 Privileged Register Fault */
+#define GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT 185 /* B9 Privileged Instr Fault */
+#define GFX_9_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 /* BA Wait Memory Semaphore Fault (Synchronization Object Fault) */
+#define GFX_9_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 /* BB Context Empty Interrupt */
+#define GFX_9_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 /* BC Context Busy Interrupt */
+#define GFX_9_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 /* C0 CP.ME Wait_Reg_Mem Poll Timeout */
+#define GFX_9_0__SRCID__CP_SIG_INCOMPLETE 193 /* C1 "Surface Probe Fault Signal Incomplete" */
+#define GFX_9_0__SRCID__CP_PREEMPT_ACK 194 /* C2 Preemption Ack-wledge */
+#define GFX_9_0__SRCID__CP_GPF 195 /* C3 General Protection Fault (GPF) */
+#define GFX_9_0__SRCID__CP_GDS_ALLOC_ERROR 196 /* C4 GDS Alloc Error */
+#define GFX_9_0__SRCID__CP_ECC_ERROR 197 /* C5 ECC Error */
+#define GFX_9_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 /* C7 Compute query status */
+#define GFX_9_0__SRCID__CP_VM_DOORBELL 200 /* C8 Unattached VM Doorbell Received */
+#define GFX_9_0__SRCID__CP_FUE_ERROR 201 /* C9 ECC FUE Error */
+#define GFX_9_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 /* CA Streaming Perf Monitor Interrupt */
+#define GFX_9_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 /* E8 CRead timeout error */
+#define GFX_9_0__SRCID__GRBM_REG_GUI_IDLE 233 /* E9 Register GUI Idle */
+#define GFX_9_0__SRCID__SQ_INTERRUPT_ID 239 /* EF SQ Interrupt (ttrace wrap, errors) */
+
+#endif /* __IRQSRCS_GFX_9_0_H__ */
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
index c6b6f97de9de..aaed7f59e0e2 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
@@ -198,4 +198,102 @@
#define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a
#define VISLANDS30_IV_EXTID_HPD_RX_F 11
+#define VISLANDS30_IV_SRCID_GPIO_19 0x00000053 /* 83 */
+
+#define VISLANDS30_IV_SRCID_SRBM_READ_TIMEOUT_ERR 0x00000060 /* 96 */
+#define VISLANDS30_IV_SRCID_SRBM_CTX_SWITCH 0x00000061 /* 97 */
+
+#define VISLANDS30_IV_SRBM_REG_ACCESS_ERROR 0x00000062 /* 98 */
+
+
+#define VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP 0x00000077 /* 119 */
+#define VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE 0x0000007c /* 124 */
+
+#define VISLANDS30_IV_SRCID_BIF_PF_VF_MSGBUF_VALID 0x00000087 /* 135 */
+
+#define VISLANDS30_IV_SRCID_BIF_VF_PF_MSGBUF_ACK 0x0000008a /* 138 */
+
+#define VISLANDS30_IV_SRCID_SYS_PAGE_INV_FAULT 0x0000008c /* 140 */
+#define VISLANDS30_IV_SRCID_SYS_MEM_PROT_FAULT 0x0000008d /* 141 */
+
+#define VISLANDS30_IV_SRCID_SEM_PAGE_INV_FAULT 0x00000090 /* 144 */
+#define VISLANDS30_IV_SRCID_SEM_MEM_PROT_FAULT 0x00000091 /* 145 */
+
+#define VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT 0x00000092 /* 146 */
+#define VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT 0x00000093 /* 147 */
+
+#define VISLANDS30_IV_SRCID_ACP 0x000000a2 /* 162 */
+
+#define VISLANDS30_IV_SRCID_VCE_TRAP 0x000000a7 /* 167 */
+#define VISLANDS30_IV_EXTID_VCE_TRAP_GENERAL_PURPOSE 0
+#define VISLANDS30_IV_EXTID_VCE_TRAP_LOW_LATENCY 1
+#define VISLANDS30_IV_EXTID_VCE_TRAP_REAL_TIME 2
+
+#define VISLANDS30_IV_SRCID_CP_INT_RB 0x000000b0 /* 176 */
+#define VISLANDS30_IV_SRCID_CP_INT_IB1 0x000000b1 /* 177 */
+#define VISLANDS30_IV_SRCID_CP_INT_IB2 0x000000b2 /* 178 */
+#define VISLANDS30_IV_SRCID_CP_PM4_RES_BITS_ERR 0x000000b4 /* 180 */
+#define VISLANDS30_IV_SRCID_CP_END_OF_PIPE 0x000000b5 /* 181 */
+#define VISLANDS30_IV_SRCID_CP_BAD_OPCODE 0x000000b7 /* 183 */
+#define VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT 0x000000b8 /* 184 */
+#define VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT 0x000000b9 /* 185 */
+#define VISLANDS30_IV_SRCID_CP_WAIT_MEM_SEM_FAULT 0x000000ba /* 186 */
+#define VISLANDS30_IV_SRCID_CP_GUI_IDLE 0x000000bb /* 187 */
+#define VISLANDS30_IV_SRCID_CP_GUI_BUSY 0x000000bc /* 188 */
+
+#define VISLANDS30_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000bf /* 191 */
+#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
+
+#define CARRIZO_IV_SRCID_CP_COMPUTE_QUERY_STATUS 0x000000c7 /* 199 */
+
+#define VISLANDS30_IV_SRCID_CP_WAIT_REG_MEM_POLL_TIMEOUT 0x000000c0 /* 192 */
+#define VISLANDS30_IV_SRCID_CP_SEM_SIG_INCOMPL 0x000000c1 /* 193 */
+#define VISLANDS30_IV_SRCID_CP_PREEMPT_ACK 0x000000c2 /* 194 */
+#define VISLANDS30_IV_SRCID_CP_GENERAL_PROT_FAULT 0x000000c3 /* 195 */
+#define VISLANDS30_IV_SRCID_CP_GDS_ALLOC_ERROR 0x000000c4 /* 196 */
+#define VISLANDS30_IV_SRCID_CP_ECC_ERROR 0x000000c5 /* 197 */
+
+#define VISLANDS30_IV_SRCID_RLC_STRM_PERF_MONITOR 0x000000ca /* 202 */
+
+#define VISLANDS30_IV_SDMA_ATOMIC_SRC_ID 0x000000da /* 218 */
+
+#define VISLANDS30_IV_SRCID_SDMA_ECC_ERROR 0x000000dc /* 220 */
+
+#define VISLANDS30_IV_SRCID_SDMA_TRAP 0x000000e0 /* 224 */
+#define VISLANDS30_IV_SRCID_SDMA_SEM_INCOMPLETE 0x000000e1 /* 225 */
+#define VISLANDS30_IV_SRCID_SDMA_SEM_WAIT 0x000000e2 /* 226 */
+
+
+#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER 0x000000e5 /* 229 */
+
+#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH 0x000000e6 /* 230 */
+#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW 0x000000e7 /* 231 */
+
+#define VISLANDS30_IV_SRCID_GRBM_READ_TIMEOUT_ERR 0x000000e8 /* 232 */
+#define VISLANDS30_IV_SRCID_GRBM_REG_GUI_IDLE 0x000000e9 /* 233 */
+
+#define VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG 0x000000ef /* 239 */
+
+#define VISLANDS30_IV_SRCID_SDMA_PREEMPT 0x000000f0 /* 240 */
+#define VISLANDS30_IV_SRCID_SDMA_VM_HOLE 0x000000f2 /* 242 */
+#define VISLANDS30_IV_SRCID_SDMA_CTXEMPTY 0x000000f3 /* 243 */
+#define VISLANDS30_IV_SRCID_SDMA_DOORBELL_INVALID 0x000000f4 /* 244 */
+#define VISLANDS30_IV_SRCID_SDMA_FROZEN 0x000000f5 /* 245 */
+#define VISLANDS30_IV_SRCID_SDMA_POLL_TIMEOUT 0x000000f6 /* 246 */
+#define VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE 0x000000f7 /* 247 */
+
+#define VISLANDS30_IV_SRCID_CG_THERMAL_TRIG 0x000000f8 /* 248 */
+
+#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER_TRIGGER 0x000000fd /* 253 */
+
+/* These are not "real" source ids defined by HW */
+#define VISLANDS30_IV_SRCID_VM_CONTEXT_ALL 0x00000100 /* 256 */
+#define VISLANDS30_IV_EXTID_VM_CONTEXT0_ALL 0
+#define VISLANDS30_IV_EXTID_VM_CONTEXT1_ALL 1
+
+
+/* IV Extended IDs */
+#define VISLANDS30_IV_EXTID_NONE 0x00000000
+#define VISLANDS30_IV_EXTID_INVALID 0xffffffff
+
#endif // _IVSRCID_VISLANDS30_H_
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
new file mode 100644
index 000000000000..802413832fe8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_SDMA0_4_0_H__
+#define __IRQSRCS_SDMA0_4_0_H__
+
+#define SDMA0_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
+#define SDMA0_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
+#define SDMA0_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
+#define SDMA0_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
+#define SDMA0_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
+#define SDMA0_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
+#define SDMA0_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
+#define SDMA0_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
+#define SDMA0_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
+#define SDMA0_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
+#define SDMA0_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
+#define SDMA0_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
+#define SDMA0_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
+#define SDMA0_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
+#define SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
+#define SDMA0_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
+#define SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
+#define SDMA0_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
+
+#endif /* __IRQSRCS_SDMA_4_0_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
new file mode 100644
index 000000000000..d12a35619f9a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_SDMA1_4_0_H__
+#define __IRQSRCS_SDMA1_4_0_H__
+
+#define SDMA1_4_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 /* 0xD9 SDMA atomic*_rtn ops complete */
+#define SDMA1_4_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 /* 0xDA SDMA atomic CMPSWAP loop timeout */
+#define SDMA1_4_0__SRCID__SDMA_IB_PREEMPT 219 /* 0xDB sdma mid-command buffer preempt interrupt */
+#define SDMA1_4_0__SRCID__SDMA_ECC 220 /* 0xDC ECC Error */
+#define SDMA1_4_0__SRCID__SDMA_PAGE_FAULT 221 /* 0xDD Page Fault Error from UTCL2 when nack=3 */
+#define SDMA1_4_0__SRCID__SDMA_PAGE_NULL 222 /* 0xDE Page Null from UTCL2 when nack=2 */
+#define SDMA1_4_0__SRCID__SDMA_XNACK 223 /* 0xDF Page retry timeout after UTCL2 return nack=1 */
+#define SDMA1_4_0__SRCID__SDMA_TRAP 224 /* 0xE0 Trap */
+#define SDMA1_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 225 /* 0xE1 0xDAGPF (Sem incomplete timeout) */
+#define SDMA1_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 226 /* 0xE2 Semaphore wait fail timeout */
+#define SDMA1_4_0__SRCID__SDMA_SRAM_ECC 228 /* 0xE4 SRAM ECC Error */
+#define SDMA1_4_0__SRCID__SDMA_PREEMPT 240 /* 0xF0 SDMA New Run List */
+#define SDMA1_4_0__SRCID__SDMA_VM_HOLE 242 /* 0xF2 MC or SEM address in VM hole */
+#define SDMA1_4_0__SRCID__SDMA_CTXEMPTY 243 /* 0xF3 Context Empty */
+#define SDMA1_4_0__SRCID__SDMA_DOORBELL_INVALID 244 /* 0xF4 Doorbell BE invalid */
+#define SDMA1_4_0__SRCID__SDMA_FROZEN 245 /* 0xF5 SDMA Frozen */
+#define SDMA1_4_0__SRCID__SDMA_POLL_TIMEOUT 246 /* 0xF6 SRBM read poll timeout */
+#define SDMA1_4_0__SRCID__SDMA_SRBMWRITE 247 /* 0xF7 SRBM write Protection */
+
+#endif /* __IRQSRCS_SDMA1_4_0_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
index 7a65206a6d21..02bab4673cd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
@@ -23,13 +23,10 @@
*
*/
-#ifndef __SOC_BOUNDING_BOX_H__
-#define __SOC_BOUNDING_BOX_H__
+#ifndef __IRQSRCS_SMUIO_9_0_H__
+#define __IRQSRCS_SMUIO_9_0_H__
-#include "dml_common_defs.h"
+#define SMUIO_9_0__SRCID__SMUIO_GPIO19 83 /* GPIO19 interrupt */
-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box);
-voltage_scaling_st dml_socbb_voltage_scaling(const soc_bounding_box_st *box, enum voltage_state voltage);
-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage);
+#endif /* __IRQSRCS_SMUIO_9_0_H__ */
-#endif
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
new file mode 100644
index 000000000000..5218bc53fb2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_THM_9_0_H__
+#define __IRQSRCS_THM_9_0_H__
+
+#define THM_9_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
+#define THM_9_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+
+#endif /* __IRQSRCS_THM_9_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
new file mode 100644
index 000000000000..fb041aee6c66
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_UVD_7_0_H__
+#define __IRQSRCS_UVD_7_0_H__
+
+#define UVD_7_0__SRCID__UVD_ENC_GEN_PURP 119
+#define UVD_7_0__SRCID__UVD_ENC_LOW_LATENCY 120
+#define UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* UVD system message interrupt */
+
+#endif /* __IRQSRCS_UVD_7_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
new file mode 100644
index 000000000000..3440bab565af
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VCE_4_0_H__
+#define __IRQSRCS_VCE_4_0_H__
+
+#define VCE_4_0__CTXID__VCE_TRAP_GENERAL_PURPOSE 0
+#define VCE_4_0__CTXID__VCE_TRAP_LOW_LATENCY 1
+#define VCE_4_0__CTXID__VCE_TRAP_REAL_TIME 2
+
+#endif /* __IRQSRCS_VCE_4_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
new file mode 100644
index 000000000000..e5951709bfc3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VCN_1_0_H__
+#define __IRQSRCS_VCN_1_0_H__
+
+#define VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE 119 /* 0x77 Encoder General Purpose */
+#define VCN_1_0__SRCID__UVD_ENC_LOW_LATENCY 120 /* 0x78 Encoder Low Latency */
+#define VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 /* 0x7c UVD system message interrupt */
+
+#endif /* __IRQSRCS_VCN_1_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
new file mode 100644
index 000000000000..d130936c9989
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VMC_1_0_H__
+#define __IRQSRCS_VMC_1_0_H__
+
+
+#define VMC_1_0__SRCID__VM_FAULT 0
+#define VMC_1_0__SRCID__VM_CONTEXT0_ALL 256
+#define VMC_1_0__SRCID__VM_CONTEXT1_ALL 257
+
+#define UTCL2_1_0__SRCID__FAULT 0 /* UTC L2 has encountered a fault or retry scenario */
+
+
+#endif /* __IRQSRCS_VMC_1_0_H__ */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 5733fbee07f7..14391b06080c 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -47,6 +47,17 @@ enum kfd_preempt_type {
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
};
+struct kfd_vm_fault_info {
+ uint64_t page_addr;
+ uint32_t vmid;
+ uint32_t mc_id;
+ uint32_t status;
+ bool prot_valid;
+ bool prot_read;
+ bool prot_write;
+ bool prot_exec;
+};
+
struct kfd_cu_info {
uint32_t num_shader_engines;
uint32_t num_shader_arrays_per_engine;
@@ -259,6 +270,21 @@ struct tile_config {
* IB to the corresponding ring (ring type). The IB is executed with the
* specified VMID in a user mode context.
*
+ * @get_vm_fault_info: Return information about a recent VM fault on
+ * GFXv7 and v8. If multiple VM faults occurred since the last call of
+ * this function, it will return information about the first of those
+ * faults. On GFXv9 VM fault information is fully contained in the IH
+ * packet and this function is not needed.
+ *
+ * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
+ * IH ring entry. This function allows the KFD ISR to get the VMID
+ * from the fault status register as early as possible.
+ *
+ * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
+ *
+ * @set_compute_idle: Indicates that compute is idle on a device. This
+ * can be used to change power profiles depending on compute activity.
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -374,6 +400,14 @@ struct kfd2kgd_calls {
int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
+
+ int (*get_vm_fault_info)(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *info);
+ uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
+
+ void (*gpu_recover)(struct kgd_dev *kgd);
+
+ void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
};
/**
@@ -399,6 +433,10 @@ struct kfd2kgd_calls {
* @schedule_evict_and_restore_process: Schedules work queue that will prepare
* for safe eviction of KFD BOs that belong to the specified process.
*
+ * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu
+ *
+ * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu
+ *
* This structure contains function callback pointers so the kgd driver
* will notify to the amdkfd about certain status changes.
*
@@ -417,6 +455,8 @@ struct kgd2kfd_calls {
int (*resume_mm)(struct mm_struct *mm);
int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
struct dma_fence *fence);
+ int (*pre_reset)(struct kfd_dev *kfd);
+ int (*post_reset)(struct kfd_dev *kfd);
};
int kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 06f08f34a110..6a41b81c7325 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -192,7 +192,6 @@ struct amd_pp_simple_clock_info;
struct amd_pp_display_configuration;
struct amd_pp_clock_info;
struct pp_display_clock_request;
-struct pp_wm_sets_with_clock_ranges_soc15;
struct pp_clock_levels_with_voltage;
struct pp_clock_levels_with_latency;
struct amd_pp_clocks;
@@ -232,16 +231,19 @@ struct amd_pm_funcs {
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
/* export to amdgpu */
- void (*powergate_uvd)(void *handle, bool gate);
- void (*powergate_vce)(void *handle, bool gate);
struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state);
int (*load_firmware)(void *handle);
int (*wait_for_fw_loading_complete)(void *handle);
+ int (*set_powergating_by_smu)(void *handle,
+ uint32_t block_type, bool gate);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
int (*set_power_limit)(void *handle, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
+ int (*get_power_profile_mode)(void *handle, char *buf);
+ int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+ int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
/* export to DC */
u32 (*get_sclk)(void *handle, bool low);
u32 (*get_mclk)(void *handle, bool low);
@@ -261,15 +263,12 @@ struct amd_pm_funcs {
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
int (*set_watermarks_for_clocks_ranges)(void *handle,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ void *clock_ranges);
int (*display_clock_voltage_request)(void *handle,
struct pp_display_clock_request *clock);
int (*get_display_mode_validation_clocks)(void *handle,
struct amd_pp_simple_clock_info *clocks);
- int (*get_power_profile_mode)(void *handle, char *buf);
- int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
- int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
- int (*set_mmhub_powergating_by_smu)(void *handle);
+ int (*notify_smu_enable_pwe)(void *handle);
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d567be49c31b..7a646f94b478 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -221,29 +221,7 @@ static int pp_sw_reset(void *handle)
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = handle;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret;
-
- if (!hwmgr || !hwmgr->pm_en)
- return 0;
-
- if (hwmgr->hwmgr_func->gfx_off_control) {
- /* Enable/disable GFX off through SMU */
- ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
- state == AMD_PG_STATE_GATE);
- if (ret)
- pr_err("gfx off control failed!\n");
- }
-
- if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
- pr_debug("%s was not implemented.\n", __func__);
- return 0;
- }
-
- /* Enable/disable GFX per cu powergating through SMU */
- return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
- state == AMD_PG_STATE_GATE);
+ return 0;
}
static int pp_suspend(void *handle)
@@ -1020,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
{
- struct amd_pp_simple_clock_info simple_clocks;
+ struct amd_pp_simple_clock_info simple_clocks = { 0 };
struct pp_clock_info hw_clocks;
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
@@ -1056,7 +1034,10 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
- clocks->max_clocks_state = simple_clocks.level;
+ if (simple_clocks.level == 0)
+ clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
+ else
+ clocks->max_clocks_state = simple_clocks.level;
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
@@ -1118,17 +1099,17 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
}
static int pp_set_watermarks_for_clocks_ranges(void *handle,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
+ if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
- wm_with_clock_ranges);
+ clock_ranges);
mutex_unlock(&hwmgr->smu_lock);
return ret;
@@ -1159,6 +1140,8 @@ static int pp_get_display_mode_validation_clocks(void *handle,
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
+ clocks->level = PP_DAL_POWERLEVEL_7;
+
mutex_lock(&hwmgr->smu_lock);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
@@ -1168,19 +1151,78 @@ static int pp_get_display_mode_validation_clocks(void *handle,
return ret;
}
-static int pp_set_mmhub_powergating_by_smu(void *handle)
+static int pp_dpm_powergate_mmhub(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
+ if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
+}
+
+static int pp_dpm_powergate_gfx(void *handle, bool gate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
+
+ if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
+ return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
+}
+
+static int pp_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ int ret = 0;
+
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ case AMD_IP_BLOCK_TYPE_VCN:
+ pp_dpm_powergate_uvd(handle, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ pp_dpm_powergate_vce(handle, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_GMC:
+ pp_dpm_powergate_mmhub(handle);
+ break;
+ case AMD_IP_BLOCK_TYPE_GFX:
+ ret = pp_dpm_powergate_gfx(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int pp_notify_smu_enable_pwe(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
}
static const struct amd_pm_funcs pp_dpm_funcs = {
@@ -1189,8 +1231,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.force_performance_level = pp_dpm_force_performance_level,
.get_performance_level = pp_dpm_get_performance_level,
.get_current_power_state = pp_dpm_get_current_power_state,
- .powergate_vce = pp_dpm_powergate_vce,
- .powergate_uvd = pp_dpm_powergate_uvd,
.dispatch_tasks = pp_dpm_dispatch_tasks,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
@@ -1210,6 +1250,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_vce_clock_state = pp_dpm_get_vce_clock_state,
.switch_power_profile = pp_dpm_switch_power_profile,
.set_clockgating_by_smu = pp_set_clockgating_by_smu,
+ .set_powergating_by_smu = pp_set_powergating_by_smu,
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
@@ -1227,5 +1268,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request = pp_display_clock_voltage_request,
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
- .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
+ .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index a0bb921fac22..53207e76b0f3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -435,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
PHM_FUNC_CHECK(hwmgr);
@@ -443,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
return -EINVAL;
return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
- wm_with_clock_ranges);
+ clock_ranges);
}
int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index e63bc47dc715..8994aa5c8cf8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -81,7 +81,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
return -EINVAL;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
- hwmgr->power_source = PP_PowerSource_AC;
hwmgr->pp_table_version = PP_TABLE_V1;
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
@@ -148,10 +147,10 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_AI:
- hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
case CHIP_VEGA20:
+ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
break;
@@ -236,6 +235,11 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
goto err1;
+ /* make sure dc limits are valid */
+ if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ hwmgr->dyn_state.max_clock_voltage_on_dc =
+ hwmgr->dyn_state.max_clock_voltage_on_ac;
ret = psm_init_power_state_table(hwmgr);
if (ret)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 7047e29755c3..01dc46dc9c8a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1544,14 +1544,14 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
switch (hwmgr->chip_id) {
case CHIP_TONGA:
case CHIP_FIJI:
- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
return;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
return;
default:
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 35bd9870ab10..4e1fd5393845 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -183,10 +183,10 @@ static int get_vddc_lookup_table(
ATOM_Tonga_Voltage_Lookup_Record,
entries, vddc_lookup_pp_tables, i);
record->us_calculated = 0;
- record->us_vdd = atom_record->usVdd;
- record->us_cac_low = atom_record->usCACLow;
- record->us_cac_mid = atom_record->usCACMid;
- record->us_cac_high = atom_record->usCACHigh;
+ record->us_vdd = le16_to_cpu(atom_record->usVdd);
+ record->us_cac_low = le16_to_cpu(atom_record->usCACLow);
+ record->us_cac_mid = le16_to_cpu(atom_record->usCACMid);
+ record->us_cac_high = le16_to_cpu(atom_record->usCACHigh);
}
*lookup_table = table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index d4bc83e81389..a63e00653324 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -993,7 +993,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = latency_required ?
smu10_get_mem_latency(hwmgr,
pclk_vol_table->entries[i].clk) :
@@ -1044,7 +1044,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
clocks->num_levels++;
}
@@ -1108,9 +1108,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
}
static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct smu10_hwmgr *data = hwmgr->backend;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
Watermarks_t *table = &(data->water_marks_table);
int result = 0;
@@ -1126,7 +1127,7 @@ static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
}
-static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
+static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
}
@@ -1182,10 +1183,11 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.asic_setup = smu10_setup_asic_task,
.power_state_set = smu10_set_power_state_tasks,
.dynamic_state_management_disable = smu10_disable_dpm_tasks,
- .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+ .powergate_mmhub = smu10_powergate_mmhub,
.smus_notify_pwe = smu10_smus_notify_pwe,
.gfx_off_control = smu10_gfx_off_control,
.display_clock_voltage_request = smu10_display_clock_voltage_request,
+ .powergate_gfx = smu10_gfx_off_control,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6d72a5600917..683b29a99366 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -39,13 +39,6 @@ static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
PPSMC_MSG_VCEDPM_Disable);
}
-static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr, enable ?
- PPSMC_MSG_SAMUDPM_Enable :
- PPSMC_MSG_SAMUDPM_Disable);
-}
-
static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
if (!bgate)
@@ -60,13 +53,6 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
}
-static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- if (!bgate)
- smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
- return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
@@ -107,35 +93,15 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SAMPowerOFF);
- return 0;
-}
-
-static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SAMPowerON);
- return 0;
-}
-
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false;
data->vce_power_gated = false;
- data->samu_power_gated = false;
smu7_powerup_uvd(hwmgr);
smu7_powerup_vce(hwmgr);
- smu7_powerup_samu(hwmgr);
return 0;
}
@@ -195,26 +161,6 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
}
}
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->samu_power_gated == bgate)
- return 0;
-
- data->samu_power_gated = bgate;
-
- if (bgate) {
- smu7_update_samu_dpm(hwmgr, true);
- smu7_powerdown_samu(hwmgr);
- } else {
- smu7_powerup_samu(hwmgr);
- smu7_update_samu_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id)
{
@@ -470,7 +416,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
* Powerplay will only control the static per CU Power Gating.
* Dynamic per CU Power Gating will be done in gfx.
*/
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
{
struct amdgpu_device *adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 1ddce023218a..fc8f8a6acc72 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -29,11 +29,10 @@
void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id);
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index f8e866ceda02..052e60dfaf9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,8 @@
#include "processpptables.h"
#include "pp_thermal.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
@@ -885,6 +887,60 @@ static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
data->odn_dpm_table.max_vddc = max_vddc;
}
+static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i;
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+
+ if (table_info == NULL)
+ return;
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
+ data->dpm_table.sclk_table.dpm_levels[i].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ break;
+ }
+ }
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
+ data->dpm_table.mclk_table.dpm_levels[i].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ break;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_mclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
+
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+ return;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_sclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+ return;
+ }
+ }
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ }
+}
+
static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -904,10 +960,13 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
/* initialize ODN table */
if (hwmgr->od_enabled) {
- smu7_setup_voltage_range_from_vbios(hwmgr);
- smu7_odn_initial_default_setting(hwmgr);
+ if (data->odn_dpm_table.max_vddc) {
+ smu7_check_dpm_table_updated(hwmgr);
+ } else {
+ smu7_setup_voltage_range_from_vbios(hwmgr);
+ smu7_odn_initial_default_setting(hwmgr);
+ }
}
-
return 0;
}
@@ -1521,7 +1580,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->current_profile_setting.sclk_up_hyst = 0;
data->current_profile_setting.sclk_down_hyst = 100;
data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
- data->current_profile_setting.bupdate_sclk = 1;
+ data->current_profile_setting.bupdate_mclk = 1;
data->current_profile_setting.mclk_up_hyst = 0;
data->current_profile_setting.mclk_down_hyst = 100;
data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
@@ -2820,7 +2879,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps,
const struct pp_power_state *current_ps)
{
-
+ struct amdgpu_device *adev = hwmgr->adev;
struct smu7_power_state *smu7_ps =
cast_phw_smu7_power_state(&request_ps->hardware);
uint32_t sclk;
@@ -2843,12 +2902,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
"VI should always have 2 performance levels",
);
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+ max_limits = adev->pm.ac_power ?
&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
&(hwmgr->dyn_state.max_clock_voltage_on_dc);
/* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
+ if (!adev->pm.ac_power) {
for (i = 0; i < smu7_ps->performance_level_count; i++) {
if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
@@ -3126,7 +3185,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
state_entry->ucPCIEGenLow);
performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
+ state_entry->ucPCIELaneLow);
performance_level = &(smu7_power_state->performance_levels
[smu7_power_state->performance_level_count++]);
@@ -3717,8 +3776,9 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit)
- || (dpm_table->dpm_levels[i].value > high_limit))
+ /*skip the trim if od is enabled*/
+ if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+ || dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else
dpm_table->dpm_levels[i].enabled = true;
@@ -3762,10 +3822,8 @@ static int smu7_generate_dpm_level_enable_mask(
const struct smu7_power_state *smu7_ps =
cast_const_phw_smu7_power_state(states->pnew_state);
- /*skip the trim if od is enabled*/
- if (!hwmgr->od_enabled)
- result = smu7_trim_dpm_states(hwmgr, smu7_ps);
+ result = smu7_trim_dpm_states(hwmgr, smu7_ps);
if (result)
return result;
@@ -4049,17 +4107,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
AMDGPU_IH_CLIENTID_LEGACY,
- 230,
+ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
source);
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
AMDGPU_IH_CLIENTID_LEGACY,
- 231,
+ VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
source);
/* Register CTF(GPIO_19) interrupt */
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
AMDGPU_IH_CLIENTID_LEGACY,
- 83,
+ VISLANDS30_IV_SRCID_GPIO_19,
source);
return 0;
@@ -4244,7 +4302,6 @@ static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
data->uvd_power_gated = false;
data->vce_power_gated = false;
- data->samu_power_gated = false;
return 0;
}
@@ -4555,12 +4612,12 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
return -EINVAL;
dep_sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < dep_sclk_table->count; i++)
- clocks->clock[i] = dep_sclk_table->entries[i].clk;
+ clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
clocks->count = dep_sclk_table->count;
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
for (i = 0; i < sclk_table->count; i++)
- clocks->clock[i] = sclk_table->entries[i].clk;
+ clocks->clock[i] = sclk_table->entries[i].clk * 10;
clocks->count = sclk_table->count;
}
@@ -4592,7 +4649,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
return -EINVAL;
dep_mclk_table = table_info->vdd_dep_on_mclk;
for (i = 0; i < dep_mclk_table->count; i++) {
- clocks->clock[i] = dep_mclk_table->entries[i].clk;
+ clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
clocks->latency[i] = smu7_get_mem_latency(hwmgr,
dep_mclk_table->entries[i].clk);
}
@@ -4600,7 +4657,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
for (i = 0; i < mclk_table->count; i++)
- clocks->clock[i] = mclk_table->entries[i].clk;
+ clocks->clock[i] = mclk_table->entries[i].clk * 10;
clocks->count = mclk_table->count;
}
return 0;
@@ -4739,60 +4796,6 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
return true;
}
-static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
- struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
-
- if (table_info == NULL)
- return;
-
- for (i=0; i<data->dpm_table.sclk_table.count; i++) {
- if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
- data->dpm_table.sclk_table.dpm_levels[i].value) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- break;
- }
- }
-
- for (i=0; i<data->dpm_table.mclk_table.count; i++) {
- if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
- data->dpm_table.mclk_table.dpm_levels[i].value) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- break;
- }
- }
-
- dep_table = table_info->vdd_dep_on_mclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
-
- for (i=0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
- return;
- }
- }
-
- dep_table = table_info->vdd_dep_on_sclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
- for (i=0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
- return;
- }
- }
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
- data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
- }
-}
-
static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
@@ -5043,7 +5046,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_fan_control_mode = smu7_get_fan_control_mode,
.force_clock_level = smu7_force_clock_level,
.print_clock_levels = smu7_print_clock_levels,
- .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
+ .powergate_gfx = smu7_powergate_gfx,
.get_sclk_od = smu7_get_sclk_od,
.set_sclk_od = smu7_set_sclk_od,
.get_mclk_od = smu7_get_mclk_od,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index c91e75db6a8e..3784ce6e50ab 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -310,7 +310,6 @@ struct smu7_hwmgr {
/* ---- Power Gating States ---- */
bool uvd_power_gated;
bool vce_power_gated;
- bool samu_power_gated;
bool need_long_memory_training;
/* Application power optimization parameters */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index c952845833d7..5e19f5977eb1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 50690c72b2ea..0adfc5392cd3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
return 0;
}
+/* convert form 8bit vid to real voltage in mV*4 */
static uint32_t smu8_convert_8Bit_index_to_voltage(
struct pp_hwmgr *hwmgr, uint16_t voltage)
{
@@ -1604,17 +1605,17 @@ static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
switch (type) {
case amd_pp_disp_clock:
for (i = 0; i < clocks->count; i++)
- clocks->clock[i] = data->sys_info.display_clock[i];
+ clocks->clock[i] = data->sys_info.display_clock[i] * 10;
break;
case amd_pp_sys_clock:
table = hwmgr->dyn_state.vddc_dependency_on_sclk;
for (i = 0; i < clocks->count; i++)
- clocks->clock[i] = table->entries[i].clk;
+ clocks->clock[i] = table->entries[i].clk * 10;
break;
case amd_pp_mem_clock:
clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
for (i = 0; i < clocks->count; i++)
- clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
+ clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
break;
default:
return -1;
@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_VDDNB:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
+ vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
*((uint32_t *)value) = vddnb;
return 0;
case AMDGPU_PP_SENSOR_VDDGFX:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+ vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
*((uint32_t *)value) = vddgfx;
return 0;
case AMDGPU_PP_SENSOR_UVD_VCLK:
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 93a3d022ba47..2aab1b475945 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -25,6 +25,9 @@
#include "ppatomctrl.h"
#include "ppsmc.h"
#include "atom.h"
+#include "ivsrcid/thm/irqsrcs_thm_9_0.h"
+#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
uint8_t convert_to_vid(uint16_t vddc)
{
@@ -543,17 +546,17 @@ int phm_irq_process(struct amdgpu_device *adev,
uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
- if (src_id == 230)
+ if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
- else if (src_id == 231)
+ else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
PCI_FUNC(adev->pdev->devfn));
- else if (src_id == 83)
+ else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
PCI_BUS_NUM(adev->pdev->devfn),
PCI_SLOT(adev->pdev->devfn),
@@ -594,17 +597,17 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_THM,
- 0,
+ THM_9_0__SRCID__THM_DIG_THERM_L2H,
source);
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_THM,
- 1,
+ THM_9_0__SRCID__THM_DIG_THERM_H2L,
source);
/* Register CTF(GPIO_19) interrupt */
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_ROM_SMUIO,
- 83,
+ SMUIO_9_0__SRCID__SMUIO_GPIO19,
source);
return 0;
@@ -652,7 +655,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
}
int smu_set_watermarks_for_clocks_ranges(void *wt_table,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
uint32_t i;
struct watermarks *table = wt_table;
@@ -660,49 +663,49 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
if (!table || !wm_with_clock_ranges)
return -EINVAL;
- if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)
+ if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
return -EINVAL;
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
+ for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
table->WatermarkRow[1][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
100);
table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
+ wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
}
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
+ for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
table->WatermarkRow[0][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
+ wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index 916cc01e7652..5454289d5226 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -107,7 +107,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
int smu_set_watermarks_for_clocks_ranges(void *wt_table,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 05e680d55dbb..fb86c24394ff 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -55,12 +55,6 @@
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
-#define MEM_FREQ_LOW_LATENCY 25000
-#define MEM_FREQ_HIGH_LATENCY 80000
-#define MEM_LATENCY_HIGH 245
-#define MEM_LATENCY_LOW 35
-#define MEM_LATENCY_ERR 0xFFFF
-
#define mmDF_CS_AON0_DramBaseAddress0 0x0044
#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
@@ -295,7 +289,15 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
+ struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
uint32_t i;
+ int result;
+
+ result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
+ if (!result) {
+ data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
+ data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
+ }
od_lookup_table = &odn_table->vddc_lookup_table;
vddc_lookup_table = table_info->vddc_lookup_table;
@@ -2078,9 +2080,6 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_AVFS].supported) {
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
- data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
- data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
-
pp_table->MinVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
pp_table->MaxVoltageVid = (uint8_t)
@@ -2414,6 +2413,40 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
return result;
}
+static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+ uint32_t i;
+
+ dep_table = table_info->vdd_dep_on_mclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
+
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+ return;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_sclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+ return;
+ }
+ }
+
+ if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ }
+}
+
/**
* Initializes the SMC table and uploads it
*
@@ -2430,6 +2463,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_voltage_table voltage_table;
struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
+ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
result = vega10_setup_default_dpm_tables(hwmgr);
PP_ASSERT_WITH_CODE(!result,
@@ -2437,8 +2471,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
return result);
/* initialize ODN table */
- if (hwmgr->od_enabled)
- vega10_odn_initial_default_setting(hwmgr);
+ if (hwmgr->od_enabled) {
+ if (odn_table->max_vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ vega10_check_dpm_table_updated(hwmgr);
+ } else {
+ vega10_odn_initial_default_setting(hwmgr);
+ }
+ }
pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
VOLTAGE_OBJ_SVID2, &voltage_table);
@@ -2861,11 +2901,6 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
vega10_enable_disable_PCC_limit_feature(hwmgr, true);
- if ((hwmgr->smu_version == 0x001c2c00) ||
- (hwmgr->smu_version == 0x001c2d00))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
-
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
@@ -3061,6 +3096,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps,
const struct pp_power_state *current_ps)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct vega10_power_state *vega10_ps =
cast_phw_vega10_power_state(&request_ps->hardware);
uint32_t sclk;
@@ -3086,12 +3122,12 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (vega10_ps->performance_level_count != 2)
pr_info("VI should always have 2 performance levels");
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+ max_limits = adev->pm.ac_power ?
&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
&(hwmgr->dyn_state.max_clock_voltage_on_dc);
/* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
+ if (!adev->pm.ac_power) {
for (i = 0; i < vega10_ps->performance_level_count; i++) {
if (vega10_ps->performance_levels[i].mem_clock >
max_limits->mclk)
@@ -3181,7 +3217,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
/* Find the lowest MCLK frequency that is within
* the tolerable latency defined in DAL
*/
- latency = 0;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
for (i = 0; i < data->mclk_latency_table.count; i++) {
if ((data->mclk_latency_table.entries[i].latency <= latency) &&
(data->mclk_latency_table.entries[i].frequency >=
@@ -3223,10 +3259,25 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
{
int result = 0;
struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_dpm_table *dpm_table = &data->dpm_table;
+ struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
+ struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
+ int count;
if (!data->need_update_dpm_table)
return 0;
+ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+ for (count = 0; count < dpm_table->gfx_table.count; count++)
+ dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+ }
+
+ odn_clk_table = &odn_table->vdd_dep_on_mclk;
+ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+ for (count = 0; count < dpm_table->mem_table.count; count++)
+ dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+ }
+
if (data->need_update_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
@@ -3674,7 +3725,7 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 0 : 1);
+ has_disp ? 1 : 0);
}
int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3749,7 +3800,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
uint32_t i;
struct pp_display_clock_request clock_req;
- if (hwmgr->display_config->num_display > 1)
+ if ((hwmgr->display_config->num_display > 1) &&
+ !hwmgr->display_config->multi_monitor_in_sync &&
+ !hwmgr->display_config->nb_pstate_switch_disable)
vega10_notify_smc_display_change(hwmgr, false);
else
vega10_notify_smc_display_change(hwmgr, true);
@@ -3765,7 +3818,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (i < dpm_table->count) {
clock_req.clock_type = amd_pp_dcef_clock;
- clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
+ clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -4022,28 +4075,17 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
table_info->vdd_dep_on_sclk;
uint32_t i;
+ clocks->num_levels = 0;
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
clocks->data[clocks->num_levels].clocks_in_khz =
- dep_table->entries[i].clk;
+ dep_table->entries[i].clk * 10;
clocks->num_levels++;
}
}
}
-static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
- uint32_t clock)
-{
- if (clock >= MEM_FREQ_LOW_LATENCY &&
- clock < MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_HIGH;
- else if (clock >= MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_LOW;
- else
- return MEM_LATENCY_ERR;
-}
-
static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
@@ -4052,26 +4094,22 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_mclk;
struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t j = 0;
uint32_t i;
- clocks->num_levels = 0;
- data->mclk_latency_table.count = 0;
-
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
- clocks->data[clocks->num_levels].clocks_in_khz =
- data->mclk_latency_table.entries
- [data->mclk_latency_table.count].frequency =
- dep_table->entries[i].clk;
- clocks->data[clocks->num_levels].latency_in_us =
- data->mclk_latency_table.entries
- [data->mclk_latency_table.count].latency =
- vega10_get_mem_latency(hwmgr,
- dep_table->entries[i].clk);
- clocks->num_levels++;
- data->mclk_latency_table.count++;
+
+ clocks->data[j].clocks_in_khz =
+ dep_table->entries[i].clk * 10;
+ data->mclk_latency_table.entries[j].frequency =
+ dep_table->entries[i].clk;
+ clocks->data[j].latency_in_us =
+ data->mclk_latency_table.entries[j].latency = 25;
+ j++;
}
}
+ clocks->num_levels = data->mclk_latency_table.count = j;
}
static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
@@ -4084,7 +4122,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4100,7 +4138,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4160,7 +4198,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
entries[dep_table->entries[i].vddInd].us_vdd);
clocks->num_levels++;
@@ -4173,9 +4211,10 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_range)
{
struct vega10_hwmgr *data = hwmgr->backend;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
int result = 0;
@@ -4695,40 +4734,6 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
return true;
}
-static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
- struct vega10_hwmgr *data = hwmgr->backend;
- struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
- struct phm_ppt_v2_information *table_info = hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
- struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
- uint32_t i;
-
- dep_table = table_info->vdd_dep_on_mclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
-
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
- return;
- }
- }
-
- dep_table = table_info->vdd_dep_on_sclk;
- odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
- return;
- }
- }
-
- if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
- data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
- }
-}
-
static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index aadd6cbc7e85..339820da9e6a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -370,7 +370,6 @@ struct vega10_hwmgr {
/* ---- Power Gating States ---- */
bool uvd_power_gated;
bool vce_power_gated;
- bool samu_power_gated;
bool need_long_memory_training;
/* Internal settings to apply the application power optimization parameters */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index c98e5de777cd..0789d64246ca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -423,6 +423,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+ if (hwmgr->feature_mask & PP_GFXOFF_MASK)
+ data->gfxoff_controlled_by_driver = true;
+ else
+ data->gfxoff_controlled_by_driver = false;
+
return result;
}
@@ -454,43 +459,36 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
*/
static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
{
- dpm_state->soft_min_level = 0xff;
- dpm_state->soft_max_level = 0xff;
- dpm_state->hard_min_level = 0xff;
- dpm_state->hard_max_level = 0xff;
+ dpm_state->soft_min_level = 0x0;
+ dpm_state->soft_max_level = 0xffff;
+ dpm_state->hard_min_level = 0x0;
+ dpm_state->hard_max_level = 0xffff;
}
-static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
- PPCLK_e clkID, uint32_t *num_dpm_level)
+static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+ PPCLK_e clk_id, uint32_t *num_of_levels)
{
- int result;
- /*
- * SMU expects the Clock ID to be in the top 16 bits.
- * Lower 16 bits specify the level however 0xFF is a
- * special argument the returns the total number of levels
- */
- PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
- "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
- return -EINVAL);
-
- result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
+ int ret = 0;
- PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER,
- "[GetNumberDPMLevel] Number of DPM levels is greater than limit",
- return -EINVAL);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | 0xFF));
+ PP_ASSERT_WITH_CODE(!ret,
+ "[GetNumOfDpmLevel] failed to get dpm levels!",
+ return ret);
- PP_ASSERT_WITH_CODE(*num_dpm_level != 0,
- "[GetNumberDPMLevel] Number of CLK Levels is zero!",
- return -EINVAL);
+ *num_of_levels = smum_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE(*num_of_levels > 0,
+ "[GetNumOfDpmLevel] number of clk levels is invalid!",
+ return -EINVAL);
- return result;
+ return ret;
}
static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
PPCLK_e clkID, uint32_t index, uint32_t *clock)
{
- int result;
+ int result = 0;
/*
*SMU expects the Clock ID to be in the top 16 bits.
@@ -501,15 +499,36 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
"[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
return -EINVAL);
- result = vega12_read_arg_from_smc(hwmgr, clock);
-
- PP_ASSERT_WITH_CODE(*clock != 0,
- "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!",
- return -EINVAL);
+ *clock = smum_get_argument(hwmgr);
return result;
}
+static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
+ struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
+{
+ int ret = 0;
+ uint32_t i, num_of_levels, clk;
+
+ ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupSingleDpmTable] failed to get clk levels!",
+ return ret);
+
+ dpm_table->count = num_of_levels;
+
+ for (i = 0; i < num_of_levels; i++) {
+ ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupSingleDpmTable] failed to get clk of specific level!",
+ return ret);
+ dpm_table->dpm_levels[i].value = clk;
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ return ret;
+}
+
/*
* This function is to initialize all DPM state tables
* for SMU based on the dependency table.
@@ -520,224 +539,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
*/
static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
- uint32_t num_levels, i, clock;
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
-
struct vega12_single_dpm_table *dpm_table;
+ int ret = 0;
memset(&data->dpm_table, 0, sizeof(data->dpm_table));
- /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */
+ /* socclk */
dpm_table = &(data->dpm_table.soc_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_SOCCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* gfxclk */
dpm_table = &(data->dpm_table.gfx_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_GFXCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
- /* Initialize Mclk DPM table based on allow Mclk values */
- dpm_table = &(data->dpm_table.mem_table);
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_UCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* eclk */
dpm_table = &(data->dpm_table.eclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_ECLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* vclk */
dpm_table = &(data->dpm_table.vclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_VCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* dclk */
dpm_table = &(data->dpm_table.dclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
- /* Assume there is no headless Vega12 for now */
+ /* dcefclk */
dpm_table = &(data->dpm_table.dcef_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_DCEFCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DCEFCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* pixclk */
dpm_table = &(data->dpm_table.pixel_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_PIXCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_PIXCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* dispclk */
dpm_table = &(data->dpm_table.display_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_DISPCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DISPCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* phyclk */
dpm_table = &(data->dpm_table.phy_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_PHYCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_PHYCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* save a copy of the default DPM table */
@@ -848,6 +779,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
return 0;
}
+static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = true;
+ data->vce_power_gated = true;
+
+ if (data->smu_features[GNLD_DPM_UVD].enabled)
+ data->uvd_power_gated = false;
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled)
+ data->vce_power_gated = false;
+}
+
static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
@@ -866,12 +812,11 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
data->smu_features[i].enabled = enabled;
data->smu_features[i].supported = enabled;
- PP_ASSERT(
- !data->smu_features[i].allowed || enabled,
- "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
}
}
+ vega12_init_powergate_state(hwmgr);
+
return 0;
}
@@ -927,6 +872,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
return result;
}
+static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
+ PPCLK_e clkid, struct vega12_clock_range *clock)
+{
+ /* AC Max */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max ac clock from SMC!",
+ return -EINVAL);
+ clock->ACMax = smum_get_argument(hwmgr);
+
+ /* AC Min */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get min ac clock from SMC!",
+ return -EINVAL);
+ clock->ACMin = smum_get_argument(hwmgr);
+
+ /* DC Max */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max dc clock from SMC!",
+ return -EINVAL);
+ clock->DCMax = smum_get_argument(hwmgr);
+
+ return 0;
+}
+
+static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t i;
+
+ for (i = 0; i < PPCLK_COUNT; i++)
+ PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
+ i, &(data->clk_range[i])),
+ "Failed to get clk range from SMC!",
+ return -EINVAL);
+
+ return 0;
+}
+
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -954,6 +941,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to power control set level!",
result = tmp_result);
+ result = vega12_get_all_clock_ranges(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to get all clock ranges!",
+ return result);
+
result = vega12_odn_initialize_default_settings(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to power control set level!",
@@ -982,76 +974,172 @@ static uint32_t vega12_find_lowest_dpm_level(
break;
}
+ if (i >= table->count) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
return i;
}
static uint32_t vega12_find_highest_dpm_level(
struct vega12_single_dpm_table *table)
{
- uint32_t i = 0;
+ int32_t i = 0;
+ PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
+ "[FindHighestDPMLevel] DPM Table has too many entries!",
+ return MAX_REGULAR_DPM_NUMBER - 1);
- if (table->count <= MAX_REGULAR_DPM_NUMBER) {
- for (i = table->count; i > 0; i--) {
- if (table->dpm_levels[i - 1].enabled)
- return i - 1;
- }
- } else {
- pr_info("DPM Table Has Too Many Entries!");
- return MAX_REGULAR_DPM_NUMBER - 1;
+ for (i = table->count - 1; i >= 0; i--) {
+ if (table->dpm_levels[i].enabled)
+ break;
}
- return i;
+ if (i < 0) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return (uint32_t)i;
}
static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
- if (data->smc_state_table.gfx_boot_level !=
- data->dpm_table.gfx_table.dpm_state.soft_min_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinByFreq,
- PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
- data->dpm_table.gfx_table.dpm_state.soft_min_level =
- data->smc_state_table.gfx_boot_level;
+ uint32_t min_freq;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min gfxclk !",
+ return ret);
}
- if (data->smc_state_table.mem_boot_level !=
- data->dpm_table.mem_table.dpm_state.soft_min_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinByFreq,
- PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
- data->dpm_table.mem_table.dpm_state.soft_min_level =
- data->smc_state_table.mem_boot_level;
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min memclk !",
+ return ret);
+
+ min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set hard min memclk !",
+ return ret);
}
- return 0;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min vclk!",
+ return ret);
+
+ min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min dclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min eclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min socclk!",
+ return ret);
+ }
+
+ return ret;
}
static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
- if (data->smc_state_table.gfx_max_level !=
- data->dpm_table.gfx_table.dpm_state.soft_max_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMaxByFreq,
- /* plus the vale by 1 to align the resolution */
- PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
- data->dpm_table.gfx_table.dpm_state.soft_max_level =
- data->smc_state_table.gfx_max_level;
+ uint32_t max_freq;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max gfxclk!",
+ return ret);
}
- if (data->smc_state_table.mem_max_level !=
- data->dpm_table.mem_table.dpm_state.soft_max_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMaxByFreq,
- /* plus the vale by 1 to align the resolution */
- PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
- data->dpm_table.mem_table.dpm_state.soft_max_level =
- data->smc_state_table.mem_max_level;
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max memclk!",
+ return ret);
}
- return 0;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max vclk!",
+ return ret);
+
+ max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max dclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max eclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max socclk!",
+ return ret);
+ }
+
+ return ret;
}
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
@@ -1127,7 +1215,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
"Failed to get current package power!",
return -EINVAL);
- vega12_read_arg_from_smc(hwmgr, &value);
+ value = smum_get_argument(hwmgr);
/* power value is an integer */
*query = value << 8;
#endif
@@ -1140,14 +1228,11 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
*gfx_freq = 0;
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(
- vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
- "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
- return -1);
+ return -EINVAL);
+ gfx_clk = smum_get_argument(hwmgr);
*gfx_freq = gfx_clk * 100;
@@ -1163,11 +1248,8 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(
- vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
- "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
- return -1);
+ return -EINVAL);
+ mem_clk = smum_get_argument(hwmgr);
*mclk_freq = mem_clk * 100;
@@ -1184,16 +1266,12 @@ static int vega12_get_current_activity_percent(
#if 0
ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
if (!ret) {
- ret = vega12_read_arg_from_smc(hwmgr, &current_activity);
- if (!ret) {
- if (current_activity > 100) {
- PP_ASSERT(false,
- "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
- current_activity = 100;
- }
- } else
+ current_activity = smum_get_argument(hwmgr);
+ if (current_activity > 100) {
PP_ASSERT(false,
- "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!");
+ "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
+ current_activity = 100;
+ }
} else
PP_ASSERT(false,
"[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
@@ -1256,7 +1334,7 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 0 : 1);
+ has_disp ? 1 : 0);
return 0;
}
@@ -1274,7 +1352,6 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
switch (clk_type) {
case amd_pp_dcef_clock:
- clk_freq = clock_req->clock_freq_in_khz / 100;
clk_select = PPCLK_DCEFCLK;
break;
case amd_pp_disp_clock:
@@ -1310,9 +1387,10 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
(struct vega12_hwmgr *)(hwmgr->backend);
struct PP_Clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
- uint32_t clk_request;
- if (hwmgr->display_config->num_display > 1)
+ if ((hwmgr->display_config->num_display > 1) &&
+ !hwmgr->display_config->multi_monitor_in_sync &&
+ !hwmgr->display_config->nb_pstate_switch_disable)
vega12_notify_smc_display_change(hwmgr, false);
else
vega12_notify_smc_display_change(hwmgr, true);
@@ -1323,7 +1401,7 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
clock_req.clock_type = amd_pp_dcef_clock;
- clock_req.clock_freq_in_khz = min_clocks.dcefClock;
+ clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
if (data->smu_features[GNLD_DS_DCEFCLK].supported)
PP_ASSERT_WITH_CODE(
@@ -1337,15 +1415,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
}
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
- clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
- "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
- return -1);
- data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
- }
-
return 0;
}
@@ -1354,12 +1423,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
- data->smc_state_table.gfx_boot_level =
- data->smc_state_table.gfx_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- data->smc_state_table.mem_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+ uint32_t soft_level;
+
+ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
+
+ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
@@ -1376,13 +1452,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t soft_level;
+
+ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
- data->smc_state_table.gfx_boot_level =
- data->smc_state_table.gfx_max_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- data->smc_state_table.mem_max_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
@@ -1398,17 +1480,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
- struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- data->smc_state_table.gfx_boot_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.gfx_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
- data->smc_state_table.mem_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
-
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload DPM Bootup Levels!",
return -1);
@@ -1416,22 +1487,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
"Failed to upload DPM Max Levels!",
return -1);
+
return 0;
}
-#if 0
static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
{
- struct phm_ppt_v2_information *table_info =
- (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
+ struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
+ struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
- if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
- table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
- table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+ *sclk_mask = 0;
+ *mclk_mask = 0;
+ *soc_mask = 0;
+
+ if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
+ soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
*sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
- *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
+ *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -1439,13 +1516,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
*mclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
- *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
- *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
- *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+ *sclk_mask = gfx_dpm_table->count - 1;
+ *mclk_mask = mem_dpm_table->count - 1;
+ *soc_mask = soc_dpm_table->count - 1;
}
+
return 0;
}
-#endif
static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
@@ -1469,11 +1546,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
-#if 0
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
-#endif
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1489,27 +1564,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-#if 0
ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
if (ret)
return ret;
- vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
- vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
-#endif
+ vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
+ vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
-#if 0
- if (!ret) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
- }
-#endif
+
return ret;
}
@@ -1543,24 +1609,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
PPCLK_e clock_select,
bool max)
{
- int result;
- *clock = 0;
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
- if (max) {
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
- "[GetClockRanges] Failed to get max clock from SMC!",
- return -1);
- result = vega12_read_arg_from_smc(hwmgr, clock);
- } else {
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
- "[GetClockRanges] Failed to get min clock from SMC!",
- return -1);
- result = vega12_read_arg_from_smc(hwmgr, clock);
- }
+ if (max)
+ *clock = data->clk_range[clock_select].ACMax;
+ else
+ *clock = data->clk_range[clock_select].ACMin;
- return result;
+ return 0;
}
static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
@@ -1575,12 +1631,12 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
return -1;
dpm_table = &(data->dpm_table.gfx_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1607,13 +1663,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
return -1;
dpm_table = &(data->dpm_table.mem_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
- clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
-
+ clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
+ data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
clocks->data[i].latency_in_us =
data->mclk_latency_table.entries[i].latency =
vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
@@ -1637,12 +1692,12 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
dpm_table = &(data->dpm_table.dcef_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1665,12 +1720,12 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
dpm_table = &(data->dpm_table.soc_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1717,99 +1772,69 @@ static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
- int result = 0;
- uint32_t i;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
if (!data->registry_data.disable_water_mark &&
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
data->smu_features[GNLD_DPM_SOCCLK].supported) {
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
- table->WatermarkRow[WM_DCEFCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
- }
-
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
- table->WatermarkRow[WM_SOCCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
- }
+ smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
data->water_marks_bitmap |= WaterMarksExist;
data->water_marks_bitmap &= ~WaterMarksLoaded;
}
- return result;
+ return 0;
}
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
- return -EINVAL;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
switch (type) {
case PP_SCLK:
- data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ ret = vega12_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
- return -EINVAL);
+ return ret);
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ ret = vega12_upload_dpm_max_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
- return -EINVAL);
+ return ret);
break;
case PP_MCLK:
- data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+ ret = vega12_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
- return -EINVAL);
+ return ret);
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ ret = vega12_upload_dpm_max_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
- return -EINVAL);
+ return ret);
break;
@@ -1842,8 +1867,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
case PP_MCLK:
@@ -1858,8 +1883,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
case PP_PCIE:
@@ -1871,6 +1896,205 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
+static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *dpm_table;
+ bool vblank_too_short = false;
+ bool disable_mclk_switching;
+ uint32_t i, latency;
+
+ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+ !hwmgr->display_config->multi_monitor_in_sync) ||
+ vblank_too_short;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+
+ /* gfxclk */
+ dpm_table = &(data->dpm_table.gfx_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* honour DAL's UCLK Hardmin */
+ if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
+ dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
+
+ /* Hardmin is dependent on displayconfig */
+ if (disable_mclk_switching) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
+ if (data->mclk_latency_table.entries[i].latency <= latency) {
+ if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
+ break;
+ }
+ }
+ }
+ }
+
+ if (hwmgr->display_config->nb_pstate_switch_disable)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ /* vclk */
+ dpm_table = &(data->dpm_table.vclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* dclk */
+ dpm_table = &(data->dpm_table.dclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* socclk */
+ dpm_table = &(data->dpm_table.soc_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* eclk */
+ dpm_table = &(data->dpm_table.eclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ return 0;
+}
+
+static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
+ struct vega12_single_dpm_table *dpm_table)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ PP_ASSERT_WITH_CODE(dpm_table->count > 0,
+ "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
+ "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
+ return -EINVAL);
+
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
+ return ret);
+ }
+
+ return ret;
+}
+
+static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_NumOfDisplays, 0);
+
+ ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
+ &data->dpm_table.mem_table);
+
+ return ret;
+}
+
static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
@@ -1915,6 +2139,9 @@ static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ if (data->vce_power_gated == bgate)
+ return;
+
data->vce_power_gated = bgate;
vega12_enable_disable_vce_dpm(hwmgr, !bgate);
}
@@ -1923,6 +2150,9 @@ static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ if (data->uvd_power_gated == bgate)
+ return;
+
data->uvd_power_gated = bgate;
vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
}
@@ -2090,6 +2320,38 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
return 0;
}
+static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->gfxoff_controlled_by_driver)
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
+
+ return ret;
+}
+
+static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->gfxoff_controlled_by_driver)
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
+
+ return ret;
+}
+
+static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
+{
+ if (enable)
+ return vega12_enable_gfx_off(hwmgr);
+ else
+ return vega12_disable_gfx_off(hwmgr);
+}
+
static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.backend_init = vega12_hwmgr_backend_init,
.backend_fini = vega12_hwmgr_backend_fini,
@@ -2117,6 +2379,10 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.display_clock_voltage_request = vega12_display_clock_voltage_request,
.force_clock_level = vega12_force_clock_level,
.print_clock_levels = vega12_print_clock_levels,
+ .apply_clocks_adjust_rules =
+ vega12_apply_clocks_adjust_rules,
+ .pre_display_config_changed =
+ vega12_pre_display_configuration_changed_task,
.display_config_changed = vega12_display_configuration_changed_task,
.powergate_uvd = vega12_power_gate_uvd,
.powergate_vce = vega12_power_gate_vce,
@@ -2135,6 +2401,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.get_thermal_temperature_range = vega12_get_thermal_temperature_range,
.register_irq_handlers = smu9_register_irq_handlers,
.start_thermal_controller = vega12_start_thermal_controller,
+ .powergate_gfx = vega12_gfx_off_control,
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index 49b38df8c7f2..b3e424d28994 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -304,6 +304,12 @@ struct vega12_odn_fan_table {
bool force_fan_pwm;
};
+struct vega12_clock_range {
+ uint32_t ACMax;
+ uint32_t ACMin;
+ uint32_t DCMax;
+};
+
struct vega12_hwmgr {
struct vega12_dpm_table dpm_table;
struct vega12_dpm_table golden_dpm_table;
@@ -385,6 +391,11 @@ struct vega12_hwmgr {
uint32_t smu_version;
struct smu_features smu_features[GNLD_FEATURES_MAX];
struct vega12_smc_state_table smc_state_table;
+
+ struct vega12_clock_range clk_range[PPCLK_COUNT];
+
+ /* ---- Gfxoff ---- */
+ bool gfxoff_controlled_by_driver;
};
#define VEGA12_DPM2_NEAR_TDP_DEC 10
@@ -435,6 +446,8 @@ struct vega12_hwmgr {
#define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3
#define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3
#define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2
+#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL 0x3
+#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL 0x3
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 29914700ee82..cb3a5b1737c8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -224,11 +224,7 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
- /* 0xFFFF will disable the ACG feature */
- if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
- ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
- ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
- }
+ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index cfd9e6ccb790..904eb2c9155b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -34,11 +34,9 @@ static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentRpm),
"Attempt to get current RPM from SMC Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr,
- current_rpm),
- "Attempt to read current RPM from SMC Failed!",
- return -1);
+ return -EINVAL);
+ *current_rpm = smum_get_argument(hwmgr);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a202247c9894..429c9c4322da 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -455,7 +455,7 @@ extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ void *clock_ranges);
extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index b99fb8ac822c..d3d96260f440 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -26,7 +26,6 @@
#include <linux/seq_file.h>
#include "amd_powerplay.h"
#include "hardwaremanager.h"
-#include "pp_power_source.h"
#include "hwmgr_ppt.h"
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
@@ -195,7 +194,7 @@ struct pp_smumgr_func {
int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr);
int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr,
uint32_t firmware);
- int (*get_argument)(struct pp_hwmgr *hwmgr);
+ uint32_t (*get_argument)(struct pp_hwmgr *hwmgr);
int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg);
int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
@@ -294,8 +293,7 @@ struct pp_hwmgr_func {
int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
- int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges);
int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
@@ -303,7 +301,7 @@ struct pp_hwmgr_func {
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
- int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+ int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable);
int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
@@ -328,7 +326,7 @@ struct pp_hwmgr_func {
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size);
int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
- int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
+ int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
};
@@ -741,7 +739,6 @@ struct pp_hwmgr {
const struct pp_table_func *pptable_func;
struct pp_power_state *ps;
- enum pp_power_source power_source;
uint32_t num_ps;
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 6c22ed9249bf..82550a8a3a3f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -29,7 +29,6 @@
enum SMU_TABLE {
SMU_UVD_TABLE = 0,
SMU_VCE_TABLE,
- SMU_SAMU_TABLE,
SMU_BIF_TABLE,
};
@@ -47,7 +46,6 @@ enum SMU_MEMBER {
UcodeLoadStatus,
UvdBootLevel,
VceBootLevel,
- SamuBootLevel,
LowSclkInterruptThreshold,
DRAM_LOG_ADDR_H,
DRAM_LOG_ADDR_L,
@@ -82,7 +80,7 @@ enum SMU10_TABLE_ID {
SMU10_CLOCKTABLE,
};
-extern int smum_get_argument(struct pp_hwmgr *hwmgr);
+extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index b08526fd1619..b6ffd08784e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,10 +412,10 @@ typedef struct {
QuadraticInt_t ReservedEquation2;
QuadraticInt_t ReservedEquation3;
- uint16_t MinVoltageUlvGfx;
- uint16_t MinVoltageUlvSoc;
+ uint16_t MinVoltageUlvGfx;
+ uint16_t MinVoltageUlvSoc;
- uint32_t Reserved[14];
+ uint32_t Reserved[14];
@@ -483,9 +483,9 @@ typedef struct {
uint8_t padding8_4;
- uint8_t PllGfxclkSpreadEnabled;
- uint8_t PllGfxclkSpreadPercent;
- uint16_t PllGfxclkSpreadFreq;
+ uint8_t PllGfxclkSpreadEnabled;
+ uint8_t PllGfxclkSpreadPercent;
+ uint16_t PllGfxclkSpreadFreq;
uint8_t UclkSpreadEnabled;
uint8_t UclkSpreadPercent;
@@ -495,9 +495,9 @@ typedef struct {
uint8_t SocclkSpreadPercent;
uint16_t SocclkSpreadFreq;
- uint8_t AcgGfxclkSpreadEnabled;
- uint8_t AcgGfxclkSpreadPercent;
- uint16_t AcgGfxclkSpreadFreq;
+ uint8_t AcgGfxclkSpreadEnabled;
+ uint8_t AcgGfxclkSpreadPercent;
+ uint16_t AcgGfxclkSpreadFreq;
uint8_t Vr2_I2C_address;
uint8_t padding_vr2[3];
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 0a200406a1ec..8d557accaef2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -26,7 +26,7 @@
SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
polaris10_smumgr.o iceland_smumgr.o \
smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
- vega12_smumgr.o vegam_smumgr.o
+ vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 2d4ec8ac3a08..fbe3ef4ee45c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -1614,37 +1614,6 @@ static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU7_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_samu_clock_voltage_dependency_table *samu_table =
- hwmgr->dyn_state.samu_clock_voltage_dependency_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(samu_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
- table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
- table->SamuLevel[count].MinPhases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int ci_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
@@ -2026,10 +1995,6 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize ACP Level!", return result);
- result = ci_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
/* need to populate the ARB settings for the initial state. */
result = ci_program_memory_timing_parameters(hwmgr);
@@ -2881,6 +2846,89 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
return 0;
}
+static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data = hwmgr->backend;
+ struct ci_smumgr *smu_data = hwmgr->smu_backend;
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+ int32_t i;
+
+ if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ else
+ smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+ UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
+
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+ for (i = uvd_table->count - 1; i >= 0; i--) {
+ if (uvd_table->entries[i].v <= max_vddc)
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+ if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
+ break;
+ }
+ ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+ return 0;
+}
+
+static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data = hwmgr->backend;
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+ int32_t i;
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+ VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
+
+ data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+
+ for (i = vce_table->count - 1; i >= 0; i--) {
+ if (vce_table->entries[i].v <= max_vddc)
+ data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+ if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
+ break;
+ }
+ ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.vce_dpm_enable_mask);
+
+ return 0;
+}
+
+static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ ci_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ ci_update_vce_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
const struct pp_smumgr_func ci_smu_funcs = {
.smu_init = ci_smu_init,
.smu_fini = ci_smu_fini,
@@ -2903,4 +2951,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
.initialize_mc_reg_table = ci_initialize_mc_reg_table,
.is_dpm_running = ci_is_dpm_running,
.update_dpm_settings = ci_update_dpm_settings,
+ .update_smc_table = ci_update_smc_table,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 53df9405f43a..18048f8e2f13 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -1503,44 +1503,6 @@ static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
int32_t eng_clock, int32_t mem_clock,
struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2028,10 +1990,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize ACP Level!", return result);
- result = fiji_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point
* (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2378,8 +2336,6 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
@@ -2478,33 +2434,6 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
switch (type) {
@@ -2514,9 +2443,6 @@ static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
fiji_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- fiji_update_samu_smc_table(hwmgr);
- break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 415f691c3fa9..9299b93aa09a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -1578,12 +1578,6 @@ static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- return 0;
-}
-
static int iceland_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
@@ -1992,10 +1986,6 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize ACP Level!", return result;);
- result = iceland_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result;);
-
/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
/* need to populate the ARB settings for the initial state. */
result = iceland_program_memory_timing_parameters(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index a8c6524f07e4..1276f168ff68 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1204,7 +1204,6 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
(struct phm_ppt_v1_information *)(hwmgr->pptable);
SMIO_Pattern vol_level;
uint32_t mvdd;
- uint16_t us_mvdd;
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
@@ -1255,16 +1254,11 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
"in Clock Dependency Table",
);
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!polaris10_populate_mvdd_value(hwmgr,
+ if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled)))
+ polaris10_populate_mvdd_value(hwmgr,
data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
+ &vol_level);
if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
@@ -1337,55 +1331,6 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
return result;
}
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
int32_t eng_clock, int32_t mem_clock,
SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -1566,7 +1511,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ uint8_t i, stretch_amount, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1617,11 +1562,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
+ if (stretch_amount == 0 || stretch_amount > 5) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
@@ -1865,10 +1806,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize VCE Level!", return result);
- result = polaris10_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point
* (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2222,34 +2159,6 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-
static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
@@ -2276,9 +2185,6 @@ static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
polaris10_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- polaris10_update_samu_smc_table(hwmgr);
- break;
case SMU_BIF_TABLE:
polaris10_update_bif_smc_table(hwmgr);
default:
@@ -2357,8 +2263,6 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 0a563f6fe9ea..bb07d43f3874 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -68,7 +68,7 @@ static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
return 0;
}
-static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
+static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 9f407c48d4f0..a029e47c2319 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -379,9 +379,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_to_load;
- int result = 0;
- struct SMU_DRAMData_TOC *toc;
- uint32_t num_entries = 0;
+ int r = 0;
if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
@@ -422,49 +420,62 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+ UCODE_ID_CP_MEC_JT2_MASK;
}
- toc = (struct SMU_DRAMData_TOC *)smu_data->header;
- toc->structure_version = 1;
-
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_RLC_G, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_CE, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_PFP, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_ME, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_MEC, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_SDMA0, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_SDMA1, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
- if (!hwmgr->not_vf)
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]),
- "Failed to Get Firmware Entry.", return -EINVAL);
+ if (!smu_data->toc) {
+ struct SMU_DRAMData_TOC *toc;
- toc->num_entries = num_entries;
+ smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL);
+ if (!smu_data->toc)
+ return -ENOMEM;
+ toc = smu_data->toc;
+ toc->num_entries = 0;
+ toc->structure_version = 1;
+
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ if (!hwmgr->not_vf)
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+ UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+ }
+ memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
+ sizeof(struct SMU_DRAMData_TOC));
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
pr_err("Fail to Request SMU Load uCode");
- return result;
+ return r;
+
+failed:
+ kfree(smu_data->toc);
+ smu_data->toc = NULL;
+ return r;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
@@ -571,7 +582,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
int smu7_init(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data;
- uint64_t mc_addr = 0;
int r;
/* Allocate memory for backend private data */
smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
@@ -585,15 +595,12 @@ int smu7_init(struct pp_hwmgr *hwmgr)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&smu_data->header_buffer.handle,
- &mc_addr,
+ &smu_data->header_buffer.mc_addr,
&smu_data->header_buffer.kaddr);
if (r)
return -EINVAL;
- smu_data->header = smu_data->header_buffer.kaddr;
- smu_data->header_buffer.mc_addr = mc_addr;
-
if (!hwmgr->not_vf)
return 0;
@@ -603,7 +610,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&smu_data->smu_buffer.handle,
- &mc_addr,
+ &smu_data->smu_buffer.mc_addr,
&smu_data->smu_buffer.kaddr);
if (r) {
@@ -612,7 +619,6 @@ int smu7_init(struct pp_hwmgr *hwmgr)
&smu_data->header_buffer.kaddr);
return -EINVAL;
}
- smu_data->smu_buffer.mc_addr = mc_addr;
if (smum_is_hw_avfs_present(hwmgr))
hwmgr->avfs_supported = true;
@@ -634,6 +640,9 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
&smu_data->smu_buffer.mc_addr,
&smu_data->smu_buffer.kaddr);
+
+ kfree(smu_data->toc);
+ smu_data->toc = NULL;
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 39c9bfda0ab4..01f0538fba6b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -37,10 +37,9 @@ struct smu7_buffer_entry {
};
struct smu7_smumgr {
- uint8_t *header;
- uint8_t *mec_image;
struct smu7_buffer_entry smu_buffer;
struct smu7_buffer_entry header_buffer;
+ struct SMU_DRAMData_TOC *toc;
uint32_t soft_regs_start;
uint32_t dpm_table_start;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index c861d3023474..f7e3bc22bb93 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -52,10 +52,10 @@ static const enum smu8_scratch_entry firmware_list[] = {
SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
};
-static int smu8_get_argument(struct pp_hwmgr *hwmgr)
+static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
{
if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
+ return 0;
return cgs_read_register(hwmgr->device,
mmSMU_MP1_SRBM2P_ARG_0);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
new file mode 100644
index 000000000000..079fc8e8f709
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "vega10_inc.h"
+#include "soc15_common.h"
+#include "pp_debug.h"
+
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+
+bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t mp1_fw_flags;
+
+ WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
+ (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
+
+ mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+
+ if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if SMC has responded to previous message.
+ *
+ * @param smumgr the address of the powerplay hardware manager.
+ * @return TRUE SMC has responded, FALSE otherwise.
+ */
+static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t reg;
+ uint32_t ret;
+
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+
+ ret = phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_90__CONTENT_MASK);
+
+ if (ret)
+ pr_err("No response from smu\n");
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+/*
+ * Send a message to the SMC, and do not wait for its response.
+ * @param smumgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
+static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+ uint16_t msg)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+
+ return 0;
+}
+
+/*
+ * Send a message to the SMC, and wait for its response.
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
+int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t ret;
+
+ smu9_wait_for_response(hwmgr);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ ret = smu9_wait_for_response(hwmgr);
+ if (ret != 1)
+ pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
+
+ return 0;
+}
+
+/*
+ * Send a message to the SMC with parameter
+ * @param hwmgr: the address of the powerplay hardware manager.
+ * @param msg: the message to send.
+ * @param parameter: the parameter to send
+ * @return Always return 0.
+ */
+int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t ret;
+
+ smu9_wait_for_response(hwmgr);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+
+ smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ ret = smu9_wait_for_response(hwmgr);
+ if (ret != 1)
+ pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
+
+ return 0;
+}
+
+uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
new file mode 100644
index 000000000000..1462279ca128
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _SMU9_SMUMANAGER_H_
+#define _SMU9_SMUMANAGER_H_
+
+bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter);
+uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c9837935f0f5..99d5e4f98f49 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -96,7 +96,7 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
return 0;
}
-int smum_get_argument(struct pp_hwmgr *hwmgr)
+uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr_funcs->get_argument)
return hwmgr->smumgr_funcs->get_argument(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 782b19fc2e70..7dabc6c456e1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -1443,51 +1443,6 @@ static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t) (mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->SamuLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->SamuLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->SamuLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- }
-
- return result;
-}
-
static int tonga_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
@@ -2323,10 +2278,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize ACP Level !", return result);
- result = tonga_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize SAMU Level !", return result);
-
/* Since only the initial state is completely set up at this
* point (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2673,8 +2624,6 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
@@ -2773,32 +2722,6 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
switch (type) {
@@ -2808,9 +2731,6 @@ static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
tonga_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- tonga_update_samu_smc_table(hwmgr);
- break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index e84669c448a3..5d19115f410c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -28,142 +28,11 @@
#include "vega10_hwmgr.h"
#include "vega10_ppsmc.h"
#include "smu9_driver_if.h"
+#include "smu9_smumgr.h"
#include "ppatomctrl.h"
#include "pp_debug.h"
-#define AVFS_EN_MSB 1568
-#define AVFS_EN_LSB 1568
-
-/* Microcode file is stored in this buffer */
-#define BUFFER_SIZE 80000
-#define MAX_STRING_SIZE 15
-#define BUFFER_SIZETWO 131072 /* 128 *1024 */
-
-/* MP Apertures */
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
-#define smnMP0_FW_INTF 0x3010104
-#define smnMP1_PUB_CTRL 0x3010b14
-
-static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t mp1_fw_flags;
-
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
-
- if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
- return true;
-
- return false;
-}
-
-/*
- * Check if SMC has responded to previous message.
- *
- * @param smumgr the address of the powerplay hardware manager.
- * @return TRUE SMC has responded, FALSE otherwise.
- */
-static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
- uint32_t ret;
-
- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-
- ret = phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
-
- if (ret)
- pr_err("No response from smu\n");
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
-/*
- * Send a message to the SMC, and do not wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC, and wait for its response.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t ret;
-
- vega10_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- ret = vega10_wait_for_response(hwmgr);
- if (ret != 1)
- pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC with parameter
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return Always return 0.
- */
-static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t ret;
-
- vega10_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
-
- vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- ret = vega10_wait_for_response(hwmgr);
- if (ret != 1)
- pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
-
- return 0;
-}
-
-static int vega10_get_argument(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-}
-
static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
@@ -175,13 +44,13 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id);
@@ -206,13 +75,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id);
@@ -225,8 +94,8 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
- *features_enabled = vega10_get_argument(hwmgr);
+ smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
+ *features_enabled = smu9_get_argument(hwmgr);
return 0;
}
@@ -248,10 +117,10 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
struct vega10_smumgr *priv = hwmgr->smu_backend;
if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
- vega10_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
}
@@ -265,11 +134,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr,
+ PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- smc_driver_if_version = vega10_get_argument(hwmgr);
+ smc_driver_if_version = smu9_get_argument(hwmgr);
dev_id = adev->pdev->device;
rev_id = adev->pdev->revision;
@@ -441,7 +310,7 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
static int vega10_start_smu(struct pp_hwmgr *hwmgr)
{
- if (!vega10_is_smc_ram_running(hwmgr))
+ if (!smu9_is_smc_ram_running(hwmgr))
return -EINVAL;
PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
@@ -453,7 +322,8 @@ static int vega10_start_smu(struct pp_hwmgr *hwmgr)
return 0;
}
-static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
+static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
+ uint16_t table_id, bool rw)
{
int ret;
@@ -470,11 +340,11 @@ const struct pp_smumgr_func vega10_smu_funcs = {
.smu_fini = &vega10_smu_fini,
.start_smu = &vega10_start_smu,
.request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = &vega10_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter,
+ .send_msg_to_smc = &smu9_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.is_dpm_running = vega10_is_dpm_running,
- .get_argument = vega10_get_argument,
+ .get_argument = smu9_get_argument,
.smc_table_manager = vega10_smc_table_manager,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 7d9b40e8b1bf..7f0e2109f40d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -24,157 +24,14 @@
#include "smumgr.h"
#include "vega12_inc.h"
#include "soc15_common.h"
+#include "smu9_smumgr.h"
#include "vega12_smumgr.h"
#include "vega12_ppsmc.h"
#include "vega12/smu9_driver_if.h"
-
#include "ppatomctrl.h"
#include "pp_debug.h"
-/* MP Apertures */
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
-#define smnMP0_FW_INTF 0x3010104
-#define smnMP1_PUB_CTRL 0x3010b14
-
-static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t mp1_fw_flags;
-
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
-
- if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
- return true;
-
- return false;
-}
-
-/*
- * Check if SMC has responded to previous message.
- *
- * @param smumgr the address of the powerplay hardware manager.
- * @return TRUE SMC has responded, FALSE otherwise.
- */
-static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
-{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-
- phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
-
- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
-/*
- * Send a message to the SMC, and do not wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC, and wait for its response.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param msg the message to send.
- * @return Always return 0.
- */
-int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- vega12_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- if (vega12_wait_for_response(hwmgr) != 1)
- pr_err("Failed to send message: 0x%x\n", msg);
-
- return 0;
-}
-
-/*
- * Send a message to the SMC with parameter
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return Always return 0.
- */
-int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- vega12_wait_for_response(hwmgr);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
-
- vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- if (vega12_wait_for_response(hwmgr) != 1)
- pr_err("Failed to send message: 0x%x\n", msg);
-
- return 0;
-}
-
-
-/*
- * Send a message to the SMC with parameter, do not wait for response
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return The response that came from the SMC.
- */
-int vega12_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter);
-
- return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-}
-
-/*
- * Retrieve an argument from SMC.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param arg pointer to store the argument from SMC.
- * @return Always return 0.
- */
-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
-{
- struct amdgpu_device *adev = hwmgr->adev;
-
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-
- return 0;
-}
-
/*
* Copy table from SMC into driver FB
* @param hwmgr the address of the HW manager
@@ -192,16 +49,16 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -234,17 +91,17 @@ int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -262,20 +119,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
if (enable) {
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
"[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
"[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
return -EINVAL);
} else {
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
"[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
"[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
return -EINVAL);
@@ -292,22 +149,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
"[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
- &smc_features_low) == 0,
- "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+ smc_features_low = smu9_get_argument(hwmgr);
+
+ PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
"[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
- &smc_features_high) == 0,
- "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
- return -EINVAL);
+ smc_features_high = smu9_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -333,39 +185,16 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
(struct vega12_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
- if (!vega12_send_msg_to_smc_with_parameter(hwmgr,
+ if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
- vega12_send_msg_to_smc_with_parameter(hwmgr,
+ smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
}
return 0;
}
-#if 0 /* tentatively remove */
-static int vega12_verify_smc_interface(struct pp_hwmgr *hwmgr)
-{
- uint32_t smc_driver_if_version;
-
- PP_ASSERT_WITH_CODE(!vega12_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion),
- "Attempt to get SMC IF Version Number Failed!",
- return -EINVAL);
- vega12_read_arg_from_smc(hwmgr, &smc_driver_if_version);
-
- if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
- pr_err("Your firmware(0x%x) doesn't match \
- SMU9_DRIVER_IF_VERSION(0x%x). \
- Please update your firmware!\n",
- smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
- return -EINVAL;
- }
-
- return 0;
-}
-#endif
-
static int vega12_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega12_smumgr *priv;
@@ -513,16 +342,10 @@ static int vega12_smu_fini(struct pp_hwmgr *hwmgr)
static int vega12_start_smu(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(vega12_is_smc_ram_running(hwmgr),
+ PP_ASSERT_WITH_CODE(smu9_is_smc_ram_running(hwmgr),
"SMC is not running!",
return -EINVAL);
-#if 0 /* tentatively remove */
- PP_ASSERT_WITH_CODE(!vega12_verify_smc_interface(hwmgr),
- "Failed to verify SMC interface!",
- return -EINVAL);
-#endif
-
vega12_set_tools_address(hwmgr);
return 0;
@@ -533,9 +356,10 @@ const struct pp_smumgr_func vega12_smu_funcs = {
.smu_fini = &vega12_smu_fini,
.start_smu = &vega12_start_smu,
.request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = &vega12_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &vega12_send_msg_to_smc_with_parameter,
+ .send_msg_to_smc = &smu9_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.is_dpm_running = vega12_is_dpm_running,
+ .get_argument = smu9_get_argument,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
index 2810d387b611..b285cbc04019 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
@@ -48,7 +48,6 @@ struct vega12_smumgr {
#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
#define SMU_FEATURES_HIGH_SHIFT 32
-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 2de48959ac93..57420d7caa4e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -393,34 +393,6 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
-static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-
static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
{
struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
@@ -447,9 +419,6 @@ static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
case SMU_VCE_TABLE:
vegam_update_vce_smc_table(hwmgr);
break;
- case SMU_SAMU_TABLE:
- vegam_update_samu_smc_table(hwmgr);
- break;
case SMU_BIF_TABLE:
vegam_update_bif_smc_table(hwmgr);
break;
@@ -1281,54 +1250,6 @@ static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU75_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
int32_t eng_clock, int32_t mem_clock,
SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2062,10 +1983,6 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize VCE Level!", return result);
- result = vegam_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize SAMU Level!", return result);
-
/* Since only the initial state is completely set up at this point
* (the other states are just copies of the boot state) we only
* need to populate the ARB settings for the initial state.
@@ -2273,8 +2190,6 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
case VceBootLevel:
return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
case LowSclkInterruptThreshold:
return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
}
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 16903dc7fe0d..965cda48dc13 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -136,9 +136,6 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- if (!crtc->primary->fb)
- return;
-
clk_disable_unprepare(arcpgu->clk);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
@@ -189,7 +186,7 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
static void arc_pgu_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
index b8f6f9a5dfbe..68629e614990 100644
--- a/drivers/gpu/drm/arc/arcpgu_sim.c
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -99,7 +99,7 @@ int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
goto error_encoder_cleanup;
}
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
dev_err(drm->dev, "could not attach connector to encoder\n");
drm_connector_unregister(connector);
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
index bb8b158ff90d..3bf31d1a4722 100644
--- a/drivers/gpu/drm/arm/Makefile
+++ b/drivers/gpu/drm/arm/Makefile
@@ -1,4 +1,5 @@
hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
obj-$(CONFIG_DRM_HDLCD) += hdlcd.o
mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
+mali-dp-y += malidp_mw.o
obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index cf5cbd63ecdf..e4d67b70244d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -229,6 +229,8 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ int i;
+ struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
u32 src_h = state->src_h >> 16;
@@ -238,20 +240,17 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- if (!state->fb || !state->crtc)
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
- if (!crtc_state) {
- DRM_DEBUG_KMS("Invalid crtc state\n");
- return -EINVAL;
+ for_each_new_crtc_in_state(state->state, crtc, crtc_state, i) {
+ /* we cannot disable the plane while the CRTC is active */
+ if (!state->fb && crtc_state->active)
+ return -EINVAL;
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
- return drm_atomic_helper_check_plane_state(state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ return 0;
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -280,16 +279,10 @@ static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
.atomic_update = hdlcd_plane_atomic_update,
};
-static void hdlcd_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_helper_disable(plane);
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs hdlcd_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = hdlcd_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -334,10 +327,8 @@ int hdlcd_setup_crtc(struct drm_device *drm)
ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL,
&hdlcd_crtc_funcs, NULL);
- if (ret) {
- hdlcd_plane_destroy(primary);
+ if (ret)
return ret;
- }
drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index feaa8bc3d7b7..0ed1cde98cf8 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -27,6 +27,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_of.h>
#include "hdlcd_drv.h"
@@ -100,16 +101,9 @@ setup_fail:
return ret;
}
-static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
-{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
-}
-
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = hdlcd_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -124,13 +118,6 @@ static void hdlcd_setup_mode_config(struct drm_device *drm)
drm->mode_config.funcs = &hdlcd_mode_config_funcs;
}
-static void hdlcd_lastclose(struct drm_device *drm)
-{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(hdlcd->fbdev);
-}
-
static irqreturn_t hdlcd_irq(int irq, void *arg)
{
struct drm_device *drm = arg;
@@ -246,7 +233,7 @@ static struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = hdlcd_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = hdlcd_irq,
.irq_preinstall = hdlcd_irq_preinstall,
.irq_postinstall = hdlcd_irq_postinstall,
@@ -321,14 +308,9 @@ static int hdlcd_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- hdlcd->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
-
- if (IS_ERR(hdlcd->fbdev)) {
- ret = PTR_ERR(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
+ ret = drm_fb_cma_fbdev_init(drm, 32, 0);
+ if (ret)
goto err_fbdev;
- }
ret = drm_dev_register(drm, 0);
if (ret)
@@ -337,15 +319,13 @@ static int hdlcd_drm_bind(struct device *dev)
return 0;
err_register:
- if (hdlcd->fbdev) {
- drm_fbdev_cma_fini(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
err_fbdev:
drm_kms_helper_poll_fini(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
err_unload:
of_node_put(hdlcd->crtc.port);
@@ -366,23 +346,23 @@ static void hdlcd_drm_unbind(struct device *dev)
struct hdlcd_drm_private *hdlcd = drm->dev_private;
drm_dev_unregister(drm);
- if (hdlcd->fbdev) {
- drm_fbdev_cma_fini(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
component_unbind_all(dev, drm);
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
- pm_runtime_get_sync(drm->dev);
+ pm_runtime_get_sync(dev);
+ drm_crtc_vblank_off(&hdlcd->crtc);
drm_irq_uninstall(drm);
- pm_runtime_put_sync(drm->dev);
- pm_runtime_disable(drm->dev);
- of_reserved_mem_device_release(drm->dev);
+ drm_atomic_helper_shutdown(drm);
+ pm_runtime_put(dev);
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ of_reserved_mem_device_release(dev);
drm_mode_config_cleanup(drm);
- drm_dev_put(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
+ drm_dev_put(drm);
}
static const struct component_master_ops hdlcd_master_ops = {
@@ -427,35 +407,15 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
- if (!hdlcd)
- return 0;
- drm_kms_helper_poll_disable(drm);
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
-
- hdlcd->state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(hdlcd->state)) {
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(hdlcd->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm);
}
static int __maybe_unused hdlcd_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
- if (!hdlcd)
- return 0;
- drm_atomic_helper_resume(drm, hdlcd->state);
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
- drm_kms_helper_poll_enable(drm);
+ drm_mode_config_helper_resume(drm);
return 0;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index 56f34dfff640..fd438d177b64 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -9,10 +9,8 @@
struct hdlcd_drm_private {
void __iomem *mmio;
struct clk *clk;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc crtc;
struct drm_plane *plane;
- struct drm_atomic_state *state;
#ifdef CONFIG_DEBUG_FS
atomic_t buffer_underrun_count;
atomic_t bus_error_count;
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index fcc62bc60f6a..ef44202fb43f 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -411,6 +411,16 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
}
}
+ /* If only the writeback routing has changed, we don't need a modeset */
+ if (state->connectors_changed) {
+ u32 old_mask = crtc->state->connector_mask;
+ u32 new_mask = state->connector_mask;
+
+ if ((old_mask ^ new_mask) ==
+ (1 << drm_connector_index(&malidp->mw_connector.base)))
+ state->connectors_changed = false;
+ }
+
ret = malidp_crtc_atomic_check_gamma(crtc, state);
ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 0a788d76ed5f..08b5bb219816 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -17,6 +17,7 @@
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/pm_runtime.h>
+#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -31,6 +32,7 @@
#include <drm/drm_of.h>
#include "malidp_drv.h"
+#include "malidp_mw.h"
#include "malidp_regs.h"
#include "malidp_hw.h"
@@ -170,14 +172,15 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
int ret;
- atomic_set(&malidp->config_valid, 0);
- hwdev->hw->set_config_valid(hwdev);
+ hwdev->hw->set_config_valid(hwdev, 1);
/* don't wait for config_valid flag if we are in config mode */
- if (hwdev->hw->in_config_mode(hwdev))
+ if (hwdev->hw->in_config_mode(hwdev)) {
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
return 0;
+ }
ret = wait_event_interruptible_timeout(malidp->wq,
- atomic_read(&malidp->config_valid) == 1,
+ atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE,
msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
return (ret > 0) ? 0 : -ETIMEDOUT;
@@ -216,12 +219,20 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *drm = state->dev;
+ struct malidp_drm *malidp = drm->dev_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
pm_runtime_get_sync(drm->dev);
+ /*
+ * set config_valid to a special value to let IRQ handlers
+ * know that we are updating registers
+ */
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_START);
+ malidp->dev->hw->set_config_valid(malidp->dev, 0);
+
drm_atomic_helper_commit_modeset_disables(drm, state);
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -230,7 +241,9 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
malidp_atomic_commit_se_config(crtc, old_crtc_state);
}
- drm_atomic_helper_commit_planes(drm, state, 0);
+ drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ malidp_mw_atomic_commit(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
@@ -268,12 +281,18 @@ static int malidp_init(struct drm_device *drm)
drm->mode_config.helper_private = &malidp_mode_config_helpers;
ret = malidp_crtc_init(drm);
- if (ret) {
- drm_mode_config_cleanup(drm);
- return ret;
- }
+ if (ret)
+ goto crtc_fail;
+
+ ret = malidp_mw_connector_init(drm);
+ if (ret)
+ goto crtc_fail;
return 0;
+
+crtc_fail:
+ drm_mode_config_cleanup(drm);
+ return ret;
}
static void malidp_fini(struct drm_device *drm)
@@ -285,6 +304,8 @@ static int malidp_irq_init(struct platform_device *pdev)
{
int irq_de, irq_se, ret = 0;
struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
/* fetch the interrupts from DT */
irq_de = platform_get_irq_byname(pdev, "DE");
@@ -304,7 +325,7 @@ static int malidp_irq_init(struct platform_device *pdev)
ret = malidp_se_irq_init(drm, irq_se);
if (ret) {
- malidp_de_irq_fini(drm);
+ malidp_de_irq_fini(hwdev);
return ret;
}
@@ -326,6 +347,106 @@ static int malidp_dumb_create(struct drm_file *file_priv,
return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
}
+#ifdef CONFIG_DEBUG_FS
+
+static void malidp_error_stats_init(struct malidp_error_stats *error_stats)
+{
+ error_stats->num_errors = 0;
+ error_stats->last_error_status = 0;
+ error_stats->last_error_vblank = -1;
+}
+
+void malidp_error(struct malidp_drm *malidp,
+ struct malidp_error_stats *error_stats, u32 status,
+ u64 vblank)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&malidp->errors_lock, irqflags);
+ error_stats->last_error_status = status;
+ error_stats->last_error_vblank = vblank;
+ error_stats->num_errors++;
+ spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+}
+
+void malidp_error_stats_dump(const char *prefix,
+ struct malidp_error_stats error_stats,
+ struct seq_file *m)
+{
+ seq_printf(m, "[%s] num_errors : %d\n", prefix,
+ error_stats.num_errors);
+ seq_printf(m, "[%s] last_error_status : 0x%08x\n", prefix,
+ error_stats.last_error_status);
+ seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix,
+ error_stats.last_error_vblank);
+}
+
+static int malidp_show_stats(struct seq_file *m, void *arg)
+{
+ struct drm_device *drm = m->private;
+ struct malidp_drm *malidp = drm->dev_private;
+ unsigned long irqflags;
+ struct malidp_error_stats de_errors, se_errors;
+
+ spin_lock_irqsave(&malidp->errors_lock, irqflags);
+ de_errors = malidp->de_errors;
+ se_errors = malidp->se_errors;
+ spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+ malidp_error_stats_dump("DE", de_errors, m);
+ malidp_error_stats_dump("SE", se_errors, m);
+ return 0;
+}
+
+static int malidp_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, malidp_show_stats, inode->i_private);
+}
+
+static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_device *drm = m->private;
+ struct malidp_drm *malidp = drm->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&malidp->errors_lock, irqflags);
+ malidp_error_stats_init(&malidp->de_errors);
+ malidp_error_stats_init(&malidp->se_errors);
+ spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+ return len;
+}
+
+static const struct file_operations malidp_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = malidp_debugfs_open,
+ .read = seq_read,
+ .write = malidp_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int malidp_debugfs_init(struct drm_minor *minor)
+{
+ struct malidp_drm *malidp = minor->dev->dev_private;
+ struct dentry *dentry = NULL;
+
+ malidp_error_stats_init(&malidp->de_errors);
+ malidp_error_stats_init(&malidp->se_errors);
+ spin_lock_init(&malidp->errors_lock);
+ dentry = debugfs_create_file("debug",
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root, minor->dev,
+ &malidp_debugfs_fops);
+ if (!dentry) {
+ DRM_ERROR("Cannot create debug file\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+#endif //CONFIG_DEBUG_FS
+
static struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
DRIVER_PRIME,
@@ -342,6 +463,9 @@ static struct drm_driver malidp_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = malidp_debugfs_init,
+#endif
.fops = &fops,
.name = "mali-dp",
.desc = "ARM Mali Display Processor driver",
@@ -458,6 +582,8 @@ static int malidp_runtime_pm_suspend(struct device *dev)
/* we can only suspend if the hardware is in config mode */
WARN_ON(!hwdev->hw->in_config_mode(hwdev));
+ malidp_se_irq_fini(hwdev);
+ malidp_de_irq_fini(hwdev);
hwdev->pm_suspended = true;
clk_disable_unprepare(hwdev->mclk);
clk_disable_unprepare(hwdev->aclk);
@@ -476,6 +602,8 @@ static int malidp_runtime_pm_resume(struct device *dev)
clk_prepare_enable(hwdev->aclk);
clk_prepare_enable(hwdev->mclk);
hwdev->pm_suspended = false;
+ malidp_de_irq_hw_init(hwdev);
+ malidp_se_irq_hw_init(hwdev);
return 0;
}
@@ -488,6 +616,7 @@ static int malidp_bind(struct device *dev)
struct malidp_hw_device *hwdev;
struct platform_device *pdev = to_platform_device(dev);
struct of_device_id const *dev_id;
+ struct drm_encoder *encoder;
/* number of lines for the R, G and B output */
u8 output_width[MAX_OUTPUT_CHANNELS];
int ret = 0, i;
@@ -587,8 +716,9 @@ static int malidp_bind(struct device *dev)
for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
out_depth = (out_depth << 8) | (output_width[i] & 0xf);
malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
+ hwdev->output_color_depth = out_depth;
- atomic_set(&malidp->config_valid, 0);
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT);
init_waitqueue_head(&malidp->wq);
ret = malidp_init(drm);
@@ -608,6 +738,15 @@ static int malidp_bind(struct device *dev)
goto bind_fail;
}
+ /* We expect to have a maximum of two encoders one for the actual
+ * display and a virtual one for the writeback connector
+ */
+ WARN_ON(drm->mode_config.num_encoder > 2);
+ list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) {
+ encoder->possible_clones =
+ (1 << drm->mode_config.num_encoder) - 1;
+ }
+
ret = malidp_irq_init(pdev);
if (ret < 0)
goto irq_init_fail;
@@ -641,8 +780,8 @@ register_fail:
fbdev_fail:
pm_runtime_get_sync(dev);
vblank_fail:
- malidp_se_irq_fini(drm);
- malidp_de_irq_fini(drm);
+ malidp_se_irq_fini(hwdev);
+ malidp_de_irq_fini(hwdev);
drm->irq_enabled = false;
irq_init_fail:
drm_atomic_helper_shutdown(drm);
@@ -672,14 +811,15 @@ static void malidp_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
drm_dev_unregister(drm);
drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
drm_crtc_vblank_off(&malidp->crtc);
- malidp_se_irq_fini(drm);
- malidp_de_irq_fini(drm);
+ malidp_se_irq_fini(hwdev);
+ malidp_de_irq_fini(hwdev);
drm->irq_enabled = false;
drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
@@ -752,8 +892,25 @@ static int __maybe_unused malidp_pm_resume(struct device *dev)
return 0;
}
+static int __maybe_unused malidp_pm_suspend_late(struct device *dev)
+{
+ if (!pm_runtime_status_suspended(dev)) {
+ malidp_runtime_pm_suspend(dev);
+ pm_runtime_set_suspended(dev);
+ }
+ return 0;
+}
+
+static int __maybe_unused malidp_pm_resume_early(struct device *dev)
+{
+ malidp_runtime_pm_resume(dev);
+ pm_runtime_set_active(dev);
+ return 0;
+}
+
static const struct dev_pm_ops malidp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \
SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL)
};
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index c70989b93387..e3eb0cb1f385 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -13,18 +13,38 @@
#ifndef __MALIDP_DRV_H__
#define __MALIDP_DRV_H__
+#include <drm/drm_writeback.h>
+#include <drm/drm_encoder.h>
#include <linux/mutex.h>
#include <linux/wait.h>
+#include <linux/spinlock.h>
#include <drm/drmP.h>
#include "malidp_hw.h"
+#define MALIDP_CONFIG_VALID_INIT 0
+#define MALIDP_CONFIG_VALID_DONE 1
+#define MALIDP_CONFIG_START 0xd0
+
+struct malidp_error_stats {
+ s32 num_errors;
+ u32 last_error_status;
+ s64 last_error_vblank;
+};
+
struct malidp_drm {
struct malidp_hw_device *dev;
struct drm_crtc crtc;
+ struct drm_writeback_connector mw_connector;
wait_queue_head_t wq;
struct drm_pending_vblank_event *event;
atomic_t config_valid;
u32 core_id;
+#ifdef CONFIG_DEBUG_FS
+ struct malidp_error_stats de_errors;
+ struct malidp_error_stats se_errors;
+ /* Protects errors stats */
+ spinlock_t errors_lock;
+#endif
};
#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
@@ -62,6 +82,12 @@ struct malidp_crtc_state {
int malidp_de_planes_init(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
+#ifdef CONFIG_DEBUG_FS
+void malidp_error(struct malidp_drm *malidp,
+ struct malidp_error_stats *error_stats, u32 status,
+ u64 vblank);
+#endif
+
/* often used combination of rotational bits */
#define MALIDP_ROTATED_MASK (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270)
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 069783e715f1..c94a4422e0e9 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -21,15 +21,24 @@
#include "malidp_drv.h"
#include "malidp_hw.h"
+#include "malidp_mw.h"
+
+enum {
+ MW_NOT_ENABLED = 0, /* SE writeback not enabled */
+ MW_ONESHOT, /* SE in one-shot mode for writeback */
+ MW_START, /* SE started writeback */
+ MW_RESTART, /* SE will start another writeback after this one */
+ MW_STOP, /* SE needs to stop after this writeback */
+};
static const struct malidp_format_id malidp500_de_formats[] = {
/* fourcc, layers supporting the format, internal id */
- { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 },
- { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 },
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 0 },
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 1 },
{ DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 },
{ DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 },
- { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 4 },
- { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 5 },
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 4 },
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE, 5 },
{ DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 },
{ DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 },
{ DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 },
@@ -38,7 +47,7 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
{ DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
{ DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
- { DRM_FORMAT_NV12, DE_VIDEO1, 14 },
+ { DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 },
{ DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
};
@@ -47,27 +56,27 @@ static const struct malidp_format_id malidp500_de_formats[] = {
#define MALIDP_COMMON_FORMATS \
/* fourcc, layers supporting the format, internal id */ \
- { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \
- { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \
- { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \
- { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 1) }, \
+ { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 2) }, \
+ { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 3) }, \
{ DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
{ DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
{ DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
{ DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
- { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \
- { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \
- { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \
- { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \
- { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \
- { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 0) }, \
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 1) }, \
+ { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 2) }, \
+ { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 3) }, \
+ { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 0) }, \
+ { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 1) }, \
{ DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
{ DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
{ DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
- { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
+ { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
static const struct malidp_format_id malidp550_de_formats[] = {
@@ -223,15 +232,20 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
return false;
}
-static void malidp500_set_config_valid(struct malidp_hw_device *hwdev)
+static void malidp500_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
{
- malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+ if (value)
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
}
static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
{
u32 val = 0;
+ malidp_hw_write(hwdev, hwdev->output_color_depth,
+ hwdev->hw->map.out_depth_base);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
val |= MALIDP500_HSYNCPOL;
@@ -368,6 +382,55 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
return ret;
}
+static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
+ dma_addr_t *addrs, s32 *pitches,
+ int num_planes, u16 w, u16 h, u32 fmt_id)
+{
+ u32 base = MALIDP500_SE_MEMWRITE_BASE;
+ u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ /* enable the scaling engine block */
+ malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
+
+ /* restart the writeback if already enabled */
+ if (hwdev->mw_state != MW_NOT_ENABLED)
+ hwdev->mw_state = MW_RESTART;
+ else
+ hwdev->mw_state = MW_START;
+
+ malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
+ switch (num_planes) {
+ case 2:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
+ /* fall through */
+ case 1:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
+ break;
+ default:
+ WARN(1, "Invalid number of planes");
+ }
+
+ malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
+ MALIDP500_SE_MEMWRITE_OUT_SIZE);
+ malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
+
+ return 0;
+}
+
+static void malidp500_disable_memwrite(struct malidp_hw_device *hwdev)
+{
+ u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ if (hwdev->mw_state == MW_START || hwdev->mw_state == MW_RESTART)
+ hwdev->mw_state = MW_STOP;
+ malidp_hw_clearbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
+ malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
+}
+
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -447,15 +510,20 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
return false;
}
-static void malidp550_set_config_valid(struct malidp_hw_device *hwdev)
+static void malidp550_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
{
- malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+ if (value)
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
}
static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
{
u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
+ malidp_hw_write(hwdev, hwdev->output_color_depth,
+ hwdev->hw->map.out_depth_base);
malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
/*
* Mali-DP550 and Mali-DP650 encode the background color like this:
@@ -588,6 +656,51 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
return ret;
}
+static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
+ dma_addr_t *addrs, s32 *pitches,
+ int num_planes, u16 w, u16 h, u32 fmt_id)
+{
+ u32 base = MALIDP550_SE_MEMWRITE_BASE;
+ u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ /* enable the scaling engine block */
+ malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
+
+ hwdev->mw_state = MW_ONESHOT;
+
+ malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
+ switch (num_planes) {
+ case 2:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
+ /* fall through */
+ case 1:
+ malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
+ malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
+ malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
+ break;
+ default:
+ WARN(1, "Invalid number of planes");
+ }
+
+ malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
+ MALIDP550_SE_MEMWRITE_OUT_SIZE);
+ malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
+ MALIDP550_SE_CONTROL);
+
+ return 0;
+}
+
+static void malidp550_disable_memwrite(struct malidp_hw_device *hwdev)
+{
+ u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+ malidp_hw_clearbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
+ MALIDP550_SE_CONTROL);
+ malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
+}
+
static int malidp650_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -632,11 +745,18 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
MALIDP500_DE_IRQ_VSYNC |
MALIDP500_DE_IRQ_GLOBAL,
.vsync_irq = MALIDP500_DE_IRQ_VSYNC,
+ .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP500_DE_IRQ_AXI_ERR |
+ MALIDP500_DE_IRQ_SATURATION,
},
.se_irq_map = {
.irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
+ MALIDP500_SE_IRQ_CONF_VALID |
MALIDP500_SE_IRQ_GLOBAL,
- .vsync_irq = 0,
+ .vsync_irq = MALIDP500_SE_IRQ_CONF_VALID,
+ .err_mask = MALIDP500_SE_IRQ_INIT_BUSY |
+ MALIDP500_SE_IRQ_AXI_ERROR |
+ MALIDP500_SE_IRQ_OVERRUN,
},
.dc_irq_map = {
.irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
@@ -655,6 +775,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.rotmem_required = malidp500_rotmem_required,
.se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs,
.se_calc_mclk = malidp500_se_calc_mclk,
+ .enable_memwrite = malidp500_enable_memwrite,
+ .disable_memwrite = malidp500_disable_memwrite,
.features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
},
[MALIDP_550] = {
@@ -670,13 +792,20 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.irq_mask = MALIDP_DE_IRQ_UNDERRUN |
MALIDP550_DE_IRQ_VSYNC,
.vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP550_DE_IRQ_SATURATION |
+ MALIDP550_DE_IRQ_AXI_ERR,
},
.se_irq_map = {
- .irq_mask = MALIDP550_SE_IRQ_EOW |
- MALIDP550_SE_IRQ_AXI_ERR,
+ .irq_mask = MALIDP550_SE_IRQ_EOW,
+ .vsync_irq = MALIDP550_SE_IRQ_EOW,
+ .err_mask = MALIDP550_SE_IRQ_AXI_ERR |
+ MALIDP550_SE_IRQ_OVR |
+ MALIDP550_SE_IRQ_IBSY,
},
.dc_irq_map = {
- .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
+ MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
.pixel_formats = malidp550_de_formats,
@@ -692,6 +821,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.rotmem_required = malidp550_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
+ .enable_memwrite = malidp550_enable_memwrite,
+ .disable_memwrite = malidp550_disable_memwrite,
.features = 0,
},
[MALIDP_650] = {
@@ -708,13 +839,25 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
MALIDP650_DE_IRQ_DRIFT |
MALIDP550_DE_IRQ_VSYNC,
.vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP650_DE_IRQ_DRIFT |
+ MALIDP550_DE_IRQ_SATURATION |
+ MALIDP550_DE_IRQ_AXI_ERR |
+ MALIDP650_DE_IRQ_ACEV1 |
+ MALIDP650_DE_IRQ_ACEV2 |
+ MALIDP650_DE_IRQ_ACEG |
+ MALIDP650_DE_IRQ_AXIEP,
},
.se_irq_map = {
- .irq_mask = MALIDP550_SE_IRQ_EOW |
- MALIDP550_SE_IRQ_AXI_ERR,
+ .irq_mask = MALIDP550_SE_IRQ_EOW,
+ .vsync_irq = MALIDP550_SE_IRQ_EOW,
+ .err_mask = MALIDP550_SE_IRQ_AXI_ERR |
+ MALIDP550_SE_IRQ_OVR |
+ MALIDP550_SE_IRQ_IBSY,
},
.dc_irq_map = {
- .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
+ MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
.pixel_formats = malidp550_de_formats,
@@ -730,6 +873,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.rotmem_required = malidp550_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
+ .enable_memwrite = malidp550_enable_memwrite,
+ .disable_memwrite = malidp550_disable_memwrite,
.features = 0,
},
};
@@ -791,7 +936,7 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
malidp->event = NULL;
spin_unlock(&drm->event_lock);
}
- atomic_set(&malidp->config_valid, 1);
+ atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
ret = IRQ_WAKE_THREAD;
}
@@ -800,10 +945,17 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
return ret;
mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
- status &= mask;
+ /* keep the status of the enabled interrupts, plus the error bits */
+ status &= (mask | de->err_mask);
if ((status & de->vsync_irq) && malidp->crtc.enabled)
drm_crtc_handle_vblank(&malidp->crtc);
+#ifdef CONFIG_DEBUG_FS
+ if (status & de->err_mask) {
+ malidp_error(malidp, &malidp->de_errors, status,
+ drm_crtc_vblank_count(&malidp->crtc));
+ }
+#endif
malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
@@ -819,6 +971,23 @@ static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev)
+{
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+
+ /* first enable the DC block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
+ hwdev->hw->map.dc_irq_map.irq_mask);
+
+ /* now enable the DE block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->hw->map.de_irq_map.irq_mask);
+}
+
int malidp_de_irq_init(struct drm_device *drm, int irq)
{
struct malidp_drm *malidp = drm->dev_private;
@@ -839,22 +1008,13 @@ int malidp_de_irq_init(struct drm_device *drm, int irq)
return ret;
}
- /* first enable the DC block IRQs */
- malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
- hwdev->hw->map.dc_irq_map.irq_mask);
-
- /* now enable the DE block IRQs */
- malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->hw->map.de_irq_map.irq_mask);
+ malidp_de_irq_hw_init(hwdev);
return 0;
}
-void malidp_de_irq_fini(struct drm_device *drm)
+void malidp_de_irq_fini(struct malidp_hw_device *hwdev)
{
- struct malidp_drm *malidp = drm->dev_private;
- struct malidp_hw_device *hwdev = malidp->dev;
-
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
hwdev->hw->map.de_irq_map.irq_mask);
malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
@@ -879,19 +1039,61 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
return IRQ_NONE;
status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
- if (!(status & se->irq_mask))
+ if (!(status & (se->irq_mask | se->err_mask)))
return IRQ_NONE;
+#ifdef CONFIG_DEBUG_FS
+ if (status & se->err_mask)
+ malidp_error(malidp, &malidp->se_errors, status,
+ drm_crtc_vblank_count(&malidp->crtc));
+#endif
mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
- status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
status &= mask;
- /* ToDo: status decoding and firing up of VSYNC and page flip events */
+
+ if (status & se->vsync_irq) {
+ switch (hwdev->mw_state) {
+ case MW_ONESHOT:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ break;
+ case MW_STOP:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ /* disable writeback after stop */
+ hwdev->mw_state = MW_NOT_ENABLED;
+ break;
+ case MW_RESTART:
+ drm_writeback_signal_completion(&malidp->mw_connector, 0);
+ /* fall through to a new start */
+ case MW_START:
+ /* writeback started, need to emulate one-shot mode */
+ hw->disable_memwrite(hwdev);
+ /*
+ * only set config_valid HW bit if there is no other update
+ * in progress or if we raced ahead of the DE IRQ handler
+ * and config_valid flag will not be update until later
+ */
+ status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+ if ((atomic_read(&malidp->config_valid) != MALIDP_CONFIG_START) ||
+ (status & hw->map.dc_irq_map.vsync_irq))
+ hw->set_config_valid(hwdev, 1);
+ break;
+ }
+ }
malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
return IRQ_HANDLED;
}
+void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev)
+{
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+
+ malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
+ hwdev->hw->map.se_irq_map.irq_mask);
+}
+
static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
{
return IRQ_HANDLED;
@@ -915,17 +1117,14 @@ int malidp_se_irq_init(struct drm_device *drm, int irq)
return ret;
}
- malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
- hwdev->hw->map.se_irq_map.irq_mask);
+ hwdev->mw_state = MW_NOT_ENABLED;
+ malidp_se_irq_hw_init(hwdev);
return 0;
}
-void malidp_se_irq_fini(struct drm_device *drm)
+void malidp_se_irq_fini(struct malidp_hw_device *hwdev)
{
- struct malidp_drm *malidp = drm->dev_private;
- struct malidp_hw_device *hwdev = malidp->dev;
-
malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
hwdev->hw->map.se_irq_map.irq_mask);
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index b5dd6c73ec9f..ad2e96915d44 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -33,6 +33,7 @@ enum {
DE_GRAPHICS2 = BIT(2), /* used only in DP500 */
DE_VIDEO2 = BIT(3),
DE_SMART = BIT(4),
+ SE_MEMWRITE = BIT(5),
};
struct malidp_format_id {
@@ -52,6 +53,7 @@ struct malidp_format_id {
struct malidp_irq_map {
u32 irq_mask; /* mask of IRQs that can be enabled in the block */
u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */
+ u32 err_mask; /* mask of bits that represent errors */
};
struct malidp_layer {
@@ -151,12 +153,13 @@ struct malidp_hw {
bool (*in_config_mode)(struct malidp_hw_device *hwdev);
/*
- * Set configuration valid flag for hardware parameters that can
- * be changed outside the configuration mode. Hardware will use
- * the new settings when config valid is set after the end of the
- * current buffer scanout
+ * Set/clear configuration valid flag for hardware parameters that can
+ * be changed outside the configuration mode to the given value.
+ * Hardware will use the new settings when config valid is set,
+ * after the end of the current buffer scanout, and will ignore
+ * any new values for those parameters if config valid flag is cleared
*/
- void (*set_config_valid)(struct malidp_hw_device *hwdev);
+ void (*set_config_valid)(struct malidp_hw_device *hwdev, u8 value);
/*
* Set a new mode in hardware. Requires the hardware to be in
@@ -177,6 +180,23 @@ struct malidp_hw {
long (*se_calc_mclk)(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
struct videomode *vm);
+ /*
+ * Enable writing to memory the content of the next frame
+ * @param hwdev - malidp_hw_device structure containing the HW description
+ * @param addrs - array of addresses for each plane
+ * @param pitches - array of pitches for each plane
+ * @param num_planes - number of planes to be written
+ * @param w - width of the output frame
+ * @param h - height of the output frame
+ * @param fmt_id - internal format ID of output buffer
+ */
+ int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
+ s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+
+ /*
+ * Disable the writing to memory of the next frame's content.
+ */
+ void (*disable_memwrite)(struct malidp_hw_device *hwdev);
u8 features;
};
@@ -210,10 +230,14 @@ struct malidp_hw_device {
u8 min_line_size;
u16 max_line_size;
+ u32 output_color_depth;
/* track the device PM state */
bool pm_suspended;
+ /* track the SE memory writeback state */
+ u8 mw_state;
+
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
};
@@ -279,9 +303,11 @@ static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev,
}
int malidp_de_irq_init(struct drm_device *drm, int irq);
-void malidp_de_irq_fini(struct drm_device *drm);
+void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev);
+void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev);
+void malidp_de_irq_fini(struct malidp_hw_device *hwdev);
int malidp_se_irq_init(struct drm_device *drm, int irq);
-void malidp_se_irq_fini(struct drm_device *drm);
+void malidp_se_irq_fini(struct malidp_hw_device *hwdev);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format);
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
new file mode 100644
index 000000000000..ba6ae66387c9
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * ARM Mali DP Writeback connector implementation
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drmP.h>
+#include <drm/drm_writeback.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+#include "malidp_mw.h"
+
+#define to_mw_state(_state) (struct malidp_mw_connector_state *)(_state)
+
+struct malidp_mw_connector_state {
+ struct drm_connector_state base;
+ dma_addr_t addrs[2];
+ s32 pitches[2];
+ u8 format;
+ u8 n_planes;
+};
+
+static int malidp_mw_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static enum drm_mode_status
+malidp_mw_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ int w = mode->hdisplay, h = mode->vdisplay;
+
+ if ((w < mode_config->min_width) || (w > mode_config->max_width))
+ return MODE_BAD_HVALUE;
+
+ if ((h < mode_config->min_height) || (h > mode_config->max_height))
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
+}
+
+const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+ .get_modes = malidp_mw_connector_get_modes,
+ .mode_valid = malidp_mw_connector_mode_valid,
+};
+
+static void malidp_mw_connector_reset(struct drm_connector *connector)
+{
+ struct malidp_mw_connector_state *mw_state =
+ kzalloc(sizeof(*mw_state), GFP_KERNEL);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(connector->state);
+ __drm_atomic_helper_connector_reset(connector, &mw_state->base);
+}
+
+static enum drm_connector_status
+malidp_mw_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void malidp_mw_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_state *
+malidp_mw_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct malidp_mw_connector_state *mw_state;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ mw_state = kzalloc(sizeof(*mw_state), GFP_KERNEL);
+ if (!mw_state)
+ return NULL;
+
+ /* No need to preserve any of our driver-local data */
+ __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
+
+ return &mw_state->base;
+}
+
+static const struct drm_connector_funcs malidp_mw_connector_funcs = {
+ .reset = malidp_mw_connector_reset,
+ .detect = malidp_mw_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = malidp_mw_connector_destroy,
+ .atomic_duplicate_state = malidp_mw_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int
+malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct malidp_mw_connector_state *mw_state = to_mw_state(conn_state);
+ struct malidp_drm *malidp = encoder->dev->dev_private;
+ struct drm_framebuffer *fb;
+ int i, n_planes;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+ if ((fb->width != crtc_state->mode.hdisplay) ||
+ (fb->height != crtc_state->mode.vdisplay)) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ mw_state->format =
+ malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
+ fb->format->format);
+ if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
+ return -EINVAL;
+ }
+
+ n_planes = drm_format_num_planes(fb->format->format);
+ for (i = 0; i < n_planes; i++) {
+ struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, i);
+ /* memory write buffers are never rotated */
+ u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 0);
+
+ if (fb->pitches[i] & (alignment - 1)) {
+ DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
+ fb->pitches[i], i);
+ return -EINVAL;
+ }
+ mw_state->pitches[i] = fb->pitches[i];
+ mw_state->addrs[i] = obj->paddr + fb->offsets[i];
+ }
+ mw_state->n_planes = n_planes;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs malidp_mw_encoder_helper_funcs = {
+ .atomic_check = malidp_mw_encoder_atomic_check,
+};
+
+static u32 *get_writeback_formats(struct malidp_drm *malidp, int *n_formats)
+{
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
+ u32 *formats;
+ int n, i;
+
+ formats = kcalloc(map->n_pixel_formats, sizeof(*formats),
+ GFP_KERNEL);
+ if (!formats)
+ return NULL;
+
+ for (n = 0, i = 0; i < map->n_pixel_formats; i++) {
+ if (map->pixel_formats[i].layer & SE_MEMWRITE)
+ formats[n++] = map->pixel_formats[i].format;
+ }
+
+ *n_formats = n;
+
+ return formats;
+}
+
+int malidp_mw_connector_init(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ u32 *formats;
+ int ret, n_formats;
+
+ if (!malidp->dev->hw->enable_memwrite)
+ return 0;
+
+ malidp->mw_connector.encoder.possible_crtcs = 1 << drm_crtc_index(&malidp->crtc);
+ drm_connector_helper_add(&malidp->mw_connector.base,
+ &malidp_mw_connector_helper_funcs);
+
+ formats = get_writeback_formats(malidp, &n_formats);
+ if (!formats)
+ return -ENOMEM;
+
+ ret = drm_writeback_connector_init(drm, &malidp->mw_connector,
+ &malidp_mw_connector_funcs,
+ &malidp_mw_encoder_helper_funcs,
+ formats, n_formats);
+ kfree(formats);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void malidp_mw_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *old_state)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct drm_writeback_connector *mw_conn = &malidp->mw_connector;
+ struct drm_connector_state *conn_state = mw_conn->base.state;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_mw_connector_state *mw_state;
+
+ if (!conn_state)
+ return;
+
+ mw_state = to_mw_state(conn_state);
+
+ if (conn_state->writeback_job && conn_state->writeback_job->fb) {
+ struct drm_framebuffer *fb = conn_state->writeback_job->fb;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev,
+ "Enable memwrite %ux%u:%d %pad fmt: %u\n",
+ fb->width, fb->height,
+ mw_state->pitches[0],
+ &mw_state->addrs[0],
+ mw_state->format);
+
+ drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
+ conn_state->writeback_job = NULL;
+
+ hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
+ mw_state->pitches, mw_state->n_planes,
+ fb->width, fb->height, mw_state->format);
+ } else {
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
+ hwdev->hw->disable_memwrite(hwdev);
+ }
+}
diff --git a/drivers/gpu/drm/arm/malidp_mw.h b/drivers/gpu/drm/arm/malidp_mw.h
new file mode 100644
index 000000000000..19a007676a1d
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_mw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ */
+
+#ifndef __MALIDP_MW_H__
+#define __MALIDP_MW_H__
+
+int malidp_mw_connector_init(struct drm_device *drm);
+void malidp_mw_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *old_state);
+#endif
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 149024fb4432..3579d36b2a71 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -53,6 +53,8 @@
#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16)
#define MALIDP550_SE_IRQ_EOW (1 << 0)
#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16)
+#define MALIDP550_SE_IRQ_OVR (1 << 17)
+#define MALIDP550_SE_IRQ_IBSY (1 << 18)
#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0)
#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4)
#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16)
@@ -60,12 +62,18 @@
#define MALIDP550_DC_IRQ_SE (1 << 24)
#define MALIDP650_DE_IRQ_DRIFT (1 << 4)
+#define MALIDP650_DE_IRQ_ACEV1 (1 << 17)
+#define MALIDP650_DE_IRQ_ACEV2 (1 << 18)
+#define MALIDP650_DE_IRQ_ACEG (1 << 19)
+#define MALIDP650_DE_IRQ_AXIEP (1 << 28)
/* bit masks that are common between products */
#define MALIDP_CFG_VALID (1 << 0)
#define MALIDP_DISP_FUNC_GAMMA (1 << 0)
#define MALIDP_DISP_FUNC_CADJ (1 << 4)
#define MALIDP_DISP_FUNC_ILACED (1 << 8)
+#define MALIDP_SCALE_ENGINE_EN (1 << 16)
+#define MALIDP_SE_MEMWRITE_EN (2 << 5)
/* register offsets for IRQ management */
#define MALIDP_REG_STATUS 0x00000
@@ -153,6 +161,16 @@
(((x) & MALIDP_SE_ENH_LIMIT_MASK) << 16)
#define MALIDP_SE_ENH_COEFF0 0x04
+
+/* register offsets relative to MALIDP5x0_SE_MEMWRITE_BASE */
+#define MALIDP_MW_FORMAT 0x00000
+#define MALIDP_MW_P1_STRIDE 0x00004
+#define MALIDP_MW_P2_STRIDE 0x00008
+#define MALIDP_MW_P1_PTR_LOW 0x0000c
+#define MALIDP_MW_P1_PTR_HIGH 0x00010
+#define MALIDP_MW_P2_PTR_LOW 0x0002c
+#define MALIDP_MW_P2_PTR_HIGH 0x00030
+
/* register offsets and bits specific to DP500 */
#define MALIDP500_ADDR_SPACE_SIZE 0x01000
#define MALIDP500_DC_BASE 0x00000
@@ -186,7 +204,8 @@
#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
#define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c
-#define MALIDP500_SE_PTR_BASE 0x00e0c
+#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_MEMWRITE_BASE 0x00e00
#define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP500_CONFIG_ID 0x00fd4
@@ -217,6 +236,9 @@
#define MALIDP550_DE_PERF_BASE 0x00500
#define MALIDP550_SE_BASE 0x08000
#define MALIDP550_SE_CONTROL 0x08010
+#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
+#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_MEMWRITE_BASE 0x08100
#define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010
#define MALIDP550_DC_CONFIG_REQ (1 << 16)
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index ecf25cf9f9f5..9bc3c3213724 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
- armada_gem.o armada_overlay.o armada_trace.o
+ armada_gem.o armada_overlay.o armada_plane.o armada_trace.o
armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 41a784f5a5e6..2f7c048c5361 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -27,6 +27,10 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
/* Lower the watermark so to eliminate jitter at higher bandwidths */
armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+ /* Initialise SPU register */
+ writel_relaxed(ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+ dcrtc->base + LCD_SPU_ADV_REG);
+
return 0;
}
@@ -75,9 +79,27 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
return 0;
}
+static void armada510_crtc_disable(struct armada_crtc *dcrtc)
+{
+ if (!IS_ERR(dcrtc->clk)) {
+ clk_disable_unprepare(dcrtc->clk);
+ dcrtc->clk = ERR_PTR(-EINVAL);
+ }
+}
+
+static void armada510_crtc_enable(struct armada_crtc *dcrtc,
+ const struct drm_display_mode *mode)
+{
+ if (IS_ERR(dcrtc->clk)) {
+ dcrtc->clk = dcrtc->extclk[0];
+ WARN_ON(clk_prepare_enable(dcrtc->clk));
+ }
+}
+
const struct armada_variant armada510_ops = {
.has_spu_adv_reg = true,
- .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
.init = armada510_crtc_init,
.compute_clock = armada510_crtc_compute_clock,
+ .disable = armada510_crtc_disable,
+ .enable = armada510_crtc_enable,
};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 42a40daff132..da9360688b55 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -11,6 +11,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -19,33 +20,9 @@
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
+#include "armada_plane.h"
#include "armada_trace.h"
-enum csc_mode {
- CSC_AUTO = 0,
- CSC_YUV_CCIR601 = 1,
- CSC_YUV_CCIR709 = 2,
- CSC_RGB_COMPUTER = 1,
- CSC_RGB_STUDIO = 2,
-};
-
-static const uint32_t armada_primary_formats[] = {
- DRM_FORMAT_UYVY,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_BGR565,
-};
-
/*
* A note about interlacing. Let's consider HDMI 1920x1080i.
* The timing parameters we have from X are:
@@ -115,15 +92,13 @@ armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
}
}
-#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON)
-
-static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc, bool enable)
{
uint32_t dumb_ctrl;
dumb_ctrl = dcrtc->cfg_dumb_ctrl;
- if (!dpms_blanked(dcrtc->dpms))
+ if (enable)
dumb_ctrl |= CFG_DUMB_ENA;
/*
@@ -132,295 +107,26 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
* force LCD_D[23:0] to output blank color, overriding the GPIO or
* SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
*/
- if (dpms_blanked(dcrtc->dpms) &&
- (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+ if (!enable && (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
dumb_ctrl &= ~DUMB_MASK;
dumb_ctrl |= DUMB_BLANK;
}
- /*
- * The documentation doesn't indicate what the normal state of
- * the sync signals are. Sebastian Hesselbart kindly probed
- * these signals on his board to determine their state.
- *
- * The non-inverted state of the sync signals is active high.
- * Setting these bits makes the appropriate signal active low.
- */
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
- dumb_ctrl |= CFG_INV_CSYNC;
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
- dumb_ctrl |= CFG_INV_HSYNC;
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
- dumb_ctrl |= CFG_INV_VSYNC;
-
- if (dcrtc->dumb_ctrl != dumb_ctrl) {
- dcrtc->dumb_ctrl = dumb_ctrl;
- writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
- }
-}
-
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
- int x, int y)
-{
- const struct drm_format_info *format = fb->format;
- unsigned int num_planes = format->num_planes;
- u32 addr = drm_fb_obj(fb)->dev_addr;
- int i;
-
- if (num_planes > 3)
- num_planes = 3;
-
- addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
- x * format->cpp[0];
-
- y /= format->vsub;
- x /= format->hsub;
-
- for (i = 1; i < num_planes; i++)
- addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * format->cpp[i];
- for (; i < 3; i++)
- addrs[i] = 0;
-}
-
-static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
- int x, int y, struct armada_regs *regs, bool interlaced)
-{
- unsigned pitch = fb->pitches[0];
- u32 addrs[3], addr_odd, addr_even;
- unsigned i = 0;
-
- DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
- pitch, x, y, fb->format->cpp[0] * 8);
-
- armada_drm_plane_calc_addrs(addrs, fb, x, y);
-
- addr_odd = addr_even = addrs[0];
-
- if (interlaced) {
- addr_even += pitch;
- pitch *= 2;
- }
-
- /* write offset, base, and pitch */
- armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
- armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
- armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
-
- return i;
-}
-
-static void armada_drm_plane_work_call(struct armada_crtc *dcrtc,
- struct armada_plane_work *work,
- void (*fn)(struct armada_crtc *, struct armada_plane_work *))
-{
- struct armada_plane *dplane = drm_to_armada_plane(work->plane);
- struct drm_pending_vblank_event *event;
- struct drm_framebuffer *fb;
-
- if (fn)
- fn(dcrtc, work);
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- event = work->event;
- fb = work->old_fb;
- if (event || fb) {
- struct drm_device *dev = dcrtc->crtc.dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (event)
- drm_crtc_send_vblank_event(&dcrtc->crtc, event);
- if (fb)
- __armada_drm_queue_unref_work(dev, fb);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
- if (work->need_kfree)
- kfree(work);
-
- wake_up(&dplane->frame_wait);
-}
-
-static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
- struct drm_plane *plane)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
- /* Handle any pending frame work. */
- if (work)
- armada_drm_plane_work_call(dcrtc, work, work->fn);
-}
-
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- struct armada_plane *plane = drm_to_armada_plane(work->plane);
- int ret;
-
- ret = drm_crtc_vblank_get(&dcrtc->crtc);
- if (ret)
- return ret;
-
- ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
- if (ret)
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- return ret;
-}
-
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
-{
- return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
-}
-
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
- struct armada_plane *dplane)
-{
- struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
- if (work)
- armada_drm_plane_work_call(dcrtc, work, work->cancel);
-}
-
-static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static void armada_drm_crtc_complete_disable_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- unsigned long flags;
-
- if (dcrtc->plane == work->plane)
- dcrtc->plane = NULL;
-
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static struct armada_plane_work *
-armada_drm_crtc_alloc_plane_work(struct drm_plane *plane)
-{
- struct armada_plane_work *work;
- int i = 0;
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (!work)
- return NULL;
-
- work->plane = plane;
- work->fn = armada_drm_crtc_complete_frame_work;
- work->need_kfree = true;
- armada_reg_queue_end(work->regs, i);
-
- return work;
-}
-
-static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
- struct drm_framebuffer *fb, bool force)
-{
- struct armada_plane_work *work;
-
- if (!fb)
- return;
-
- if (force) {
- /* Display is disabled, so just drop the old fb */
- drm_framebuffer_put(fb);
- return;
- }
-
- work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
- if (work) {
- work->old_fb = fb;
-
- if (armada_drm_plane_work_queue(dcrtc, work) == 0)
- return;
-
- kfree(work);
- }
-
- /*
- * Oops - just drop the reference immediately and hope for
- * the best. The worst that will happen is the buffer gets
- * reused before it has finished being displayed.
- */
- drm_framebuffer_put(fb);
+ armada_updatel(dumb_ctrl,
+ ~(CFG_INV_CSYNC | CFG_INV_HSYNC | CFG_INV_VSYNC),
+ dcrtc->base + LCD_SPU_DUMB_CTRL);
}
-static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
-{
- /*
- * Tell the DRM core that vblank IRQs aren't going to happen for
- * a while. This cleans up any pending vblank events for us.
- */
- drm_crtc_vblank_off(&dcrtc->crtc);
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-
- if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
- if (dpms_blanked(dpms))
- armada_drm_vblank_off(dcrtc);
- else if (!IS_ERR(dcrtc->clk))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
- dcrtc->dpms = dpms;
- armada_drm_crtc_update(dcrtc);
- if (!dpms_blanked(dpms))
- drm_crtc_vblank_on(&dcrtc->crtc);
- else if (!IS_ERR(dcrtc->clk))
- clk_disable_unprepare(dcrtc->clk);
- } else if (dcrtc->dpms != dpms) {
- dcrtc->dpms = dpms;
- }
-}
-
-/*
- * Prepare for a mode set. Turn off overlay to ensure that we don't end
- * up with the overlay size being bigger than the active screen size.
- * We rely upon X refreshing this state after the mode set has completed.
- *
- * The mode_config.mutex will be held for this call
- */
-static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct drm_plane *plane;
-
- /*
- * If we have an overlay plane associated with this CRTC, disable
- * it before the modeset to avoid its coordinates being outside
- * the new mode parameters.
- */
- plane = dcrtc->plane;
- if (plane) {
- drm_plane_force_disable(plane);
- WARN_ON(!armada_drm_plane_work_wait(drm_to_armada_plane(plane),
- HZ));
- }
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+static void armada_drm_crtc_queue_state_event(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_pending_vblank_event *event;
- if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
- dcrtc->dpms = DRM_MODE_DPMS_ON;
- armada_drm_crtc_update(dcrtc);
+ /* If we have an event, we need vblank events enabled */
+ event = xchg(&crtc->state->event, NULL);
+ if (event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ dcrtc->event = event;
}
}
@@ -465,8 +171,8 @@ static void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
{
+ struct drm_pending_vblank_event *event;
void __iomem *base = dcrtc->base;
- struct drm_plane *ovl_plane;
if (stat & DMA_FF_UNDERFLOW)
DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
@@ -476,10 +182,6 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
if (stat & VSYNC_IRQ)
drm_crtc_handle_vblank(&dcrtc->crtc);
- ovl_plane = dcrtc->plane;
- if (ovl_plane)
- armada_drm_plane_work_run(dcrtc, ovl_plane);
-
spin_lock(&dcrtc->irq_lock);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
@@ -495,22 +197,35 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
writel_relaxed(val, base + LCD_SPU_ADV_REG);
}
- if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
- writel_relaxed(dcrtc->cursor_hw_pos,
- base + LCD_SPU_HWC_OVSA_HPXL_VLN);
- writel_relaxed(dcrtc->cursor_hw_sz,
- base + LCD_SPU_HWC_HPXL_VLN);
- armada_updatel(CFG_HWC_ENA,
- CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
- base + LCD_SPU_DMA_CTRL0);
- dcrtc->cursor_update = false;
+ if (stat & dcrtc->irq_ena & DUMB_FRAMEDONE) {
+ if (dcrtc->update_pending) {
+ armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+ dcrtc->update_pending = false;
+ }
+ if (dcrtc->cursor_update) {
+ writel_relaxed(dcrtc->cursor_hw_pos,
+ base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->cursor_hw_sz,
+ base + LCD_SPU_HWC_HPXL_VLN);
+ armada_updatel(CFG_HWC_ENA,
+ CFG_HWC_ENA | CFG_HWC_1BITMOD |
+ CFG_HWC_1BITENA,
+ base + LCD_SPU_DMA_CTRL0);
+ dcrtc->cursor_update = false;
+ }
armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
}
-
spin_unlock(&dcrtc->irq_lock);
- if (stat & GRA_FRAME_IRQ)
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
+ if (stat & VSYNC_IRQ && !dcrtc->update_pending) {
+ event = xchg(&dcrtc->event, NULL);
+ if (event) {
+ spin_lock(&dcrtc->crtc.dev->event_lock);
+ drm_crtc_send_vblank_event(&dcrtc->crtc, event);
+ spin_unlock(&dcrtc->crtc.dev->event_lock);
+ drm_crtc_vblank_put(&dcrtc->crtc);
+ }
+ }
}
static irqreturn_t armada_drm_irq(int irq, void *arg)
@@ -537,107 +252,16 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
return IRQ_NONE;
}
-static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
-{
- struct drm_display_mode *adj = &dcrtc->crtc.mode;
- uint32_t val = 0;
-
- if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
- val |= CFG_CSC_YUV_CCIR709;
- if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
- val |= CFG_CSC_RGB_STUDIO;
-
- /*
- * In auto mode, set the colorimetry, based upon the HDMI spec.
- * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
- * ITU601. It may be more appropriate to set this depending on
- * the source - but what if the graphic frame is YUV and the
- * video frame is RGB?
- */
- if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
- !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
- (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
- if (dcrtc->csc_yuv_mode == CSC_AUTO)
- val |= CFG_CSC_YUV_CCIR709;
- }
-
- /*
- * We assume we're connected to a TV-like device, so the YUV->RGB
- * conversion should produce a limited range. We should set this
- * depending on the connectors attached to this CRTC, and what
- * kind of device they report being connected.
- */
- if (dcrtc->csc_rgb_mode == CSC_AUTO)
- val |= CFG_CSC_RGB_STUDIO;
-
- return val;
-}
-
-static void armada_drm_gra_plane_regs(struct armada_regs *regs,
- struct drm_framebuffer *fb, struct armada_plane_state *state,
- int x, int y, bool interlaced)
-{
- unsigned int i;
- u32 ctrl0;
-
- i = armada_drm_crtc_calc_fb(fb, x, y, regs, interlaced);
- armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
- armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
- armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
-
- ctrl0 = state->ctrl0;
- if (interlaced)
- ctrl0 |= CFG_GRA_FTOGGLE;
-
- armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
- CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
- CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
- CFG_GRA_HSMOOTH | CFG_GRA_ENA,
- LCD_SPU_DMA_CTRL0);
- armada_reg_queue_end(regs, i);
-}
-
-static void armada_drm_primary_set(struct drm_crtc *crtc,
- struct drm_plane *plane, int x, int y)
-{
- struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[8];
- bool interlaced = dcrtc->interlaced;
-
- armada_drm_gra_plane_regs(regs, plane->fb, state, x, y, interlaced);
- armada_drm_crtc_update_regs(dcrtc, regs);
-}
-
/* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode, struct drm_display_mode *adj,
- int x, int y, struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
+ struct drm_display_mode *adj = &crtc->state->adjusted_mode;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_regs regs[17];
uint32_t lm, rm, tm, bm, val, sclk;
unsigned long flags;
unsigned i;
- bool interlaced;
-
- drm_framebuffer_get(crtc->primary->fb);
-
- interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
-
- val = CFG_GRA_ENA;
- val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
- val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
-
- if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
-
- drm_to_armada_plane(crtc->primary)->state.ctrl0 = val;
- drm_to_armada_plane(crtc->primary)->state.src_hw =
- drm_to_armada_plane(crtc->primary)->state.dst_hw =
- adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
- drm_to_armada_plane(crtc->primary)->state.dst_yx = 0;
+ bool interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
i = 0;
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
@@ -645,35 +269,15 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
tm = adj->crtc_vtotal - adj->crtc_vsync_end;
- DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
- adj->crtc_hdisplay,
- adj->crtc_hsync_start,
- adj->crtc_hsync_end,
- adj->crtc_htotal, lm, rm);
- DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
- adj->crtc_vdisplay,
- adj->crtc_vsync_start,
- adj->crtc_vsync_end,
- adj->crtc_vtotal, tm, bm);
-
- /* Wait for pending flips to complete */
- armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
- MAX_SCHEDULE_TIMEOUT);
-
- drm_crtc_vblank_off(crtc);
-
- val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
- if (val != dcrtc->dumb_ctrl) {
- dcrtc->dumb_ctrl = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
- }
-
- /*
- * If we are blanked, we would have disabled the clock. Re-enable
- * it so that compute_clock() does the right thing.
- */
- if (!IS_ERR(dcrtc->clk) && dpms_blanked(dcrtc->dpms))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
+ DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n",
+ crtc->base.id, crtc->name,
+ adj->base.id, adj->name, adj->vrefresh, adj->clock,
+ adj->crtc_hdisplay, adj->crtc_hsync_start,
+ adj->crtc_hsync_end, adj->crtc_htotal,
+ adj->crtc_vdisplay, adj->crtc_vsync_start,
+ adj->crtc_vsync_end, adj->crtc_vtotal,
+ adj->type, adj->flags);
+ DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm);
/* Now compute the divider for real */
dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
@@ -690,25 +294,20 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
spin_lock_irqsave(&dcrtc->irq_lock, flags);
- /* Ensure graphic fifo is enabled */
- armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
/* Even interlaced/progressive frame */
dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
adj->crtc_htotal;
dcrtc->v[1].spu_v_porch = tm << 16 | bm;
val = adj->crtc_hsync_start;
- dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- dcrtc->variant->spu_adv_reg;
+ dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
if (interlaced) {
/* Odd interlaced frame */
+ val -= adj->crtc_htotal / 2;
+ dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
(1 << 16);
dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
- val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
- dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- dcrtc->variant->spu_adv_reg;
} else {
dcrtc->v[0] = dcrtc->v[1];
}
@@ -721,77 +320,136 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
LCD_SPUT_V_H_TOTAL);
- if (dcrtc->variant->has_spu_adv_reg) {
+ if (dcrtc->variant->has_spu_adv_reg)
armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
- }
val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
- val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
- armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+ /*
+ * The documentation doesn't indicate what the normal state of
+ * the sync signals are. Sebastian Hesselbart kindly probed
+ * these signals on his board to determine their state.
+ *
+ * The non-inverted state of the sync signals is active high.
+ * Setting these bits makes the appropriate signal active low.
+ */
+ val = 0;
+ if (adj->flags & DRM_MODE_FLAG_NCSYNC)
+ val |= CFG_INV_CSYNC;
+ if (adj->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= CFG_INV_HSYNC;
+ if (adj->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= CFG_INV_VSYNC;
+ armada_reg_queue_mod(regs, i, val, CFG_INV_CSYNC | CFG_INV_HSYNC |
+ CFG_INV_VSYNC, LCD_SPU_DUMB_CTRL);
armada_reg_queue_end(regs, i);
armada_drm_crtc_update_regs(dcrtc, regs);
-
- armada_drm_primary_set(crtc, crtc->primary, x, y);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
- armada_drm_crtc_update(dcrtc);
+static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- drm_crtc_vblank_on(crtc);
- armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- return 0;
+ dcrtc->regs_idx = 0;
+ dcrtc->regs = dcrtc->atomic_regs;
}
-/* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[4];
- unsigned i;
- i = armada_drm_crtc_calc_fb(crtc->primary->fb, crtc->x, crtc->y, regs,
- dcrtc->interlaced);
- armada_reg_queue_end(regs, i);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- /* Wait for pending flips to complete */
- armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
- MAX_SCHEDULE_TIMEOUT);
+ armada_reg_queue_end(dcrtc->regs, dcrtc->regs_idx);
- /* Take a reference to the new fb as we're using it */
- drm_framebuffer_get(crtc->primary->fb);
+ /*
+ * If we aren't doing a full modeset, then we need to queue
+ * the event here.
+ */
+ if (!drm_atomic_crtc_needs_modeset(crtc->state)) {
+ dcrtc->update_pending = true;
+ armada_drm_crtc_queue_state_event(crtc);
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ } else {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ }
+}
- /* Update the base in the CRTC */
- armada_drm_crtc_update_regs(dcrtc, regs);
+static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_pending_vblank_event *event;
- /* Drop our previously held reference */
- armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- return 0;
+ drm_crtc_vblank_off(crtc);
+ armada_drm_crtc_update(dcrtc, false);
+
+ if (!crtc->state->active) {
+ /*
+ * This modeset will be leaving the CRTC disabled, so
+ * call the backend to disable upstream clocks etc.
+ */
+ if (dcrtc->variant->disable)
+ dcrtc->variant->disable(dcrtc);
+
+ /*
+ * We will not receive any further vblank events.
+ * Send the flip_done event manually.
+ */
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+ if (event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+ }
}
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
- armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- /* Disable our primary plane when we disable the CRTC. */
- crtc->primary->funcs->disable_plane(crtc->primary, NULL);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+ if (!old_state->active) {
+ /*
+ * This modeset is enabling the CRTC after it having
+ * been disabled. Reverse the call to ->disable in
+ * the atomic_disable().
+ */
+ if (dcrtc->variant->enable)
+ dcrtc->variant->enable(dcrtc, &crtc->state->adjusted_mode);
+ }
+ armada_drm_crtc_update(dcrtc, true);
+ drm_crtc_vblank_on(crtc);
+
+ armada_drm_crtc_queue_state_event(crtc);
}
static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
- .dpms = armada_drm_crtc_dpms,
- .prepare = armada_drm_crtc_prepare,
- .commit = armada_drm_crtc_commit,
.mode_fixup = armada_drm_crtc_mode_fixup,
- .mode_set = armada_drm_crtc_mode_set,
- .mode_set_base = armada_drm_crtc_mode_set_base,
- .disable = armada_drm_crtc_disable,
+ .mode_set_nofb = armada_drm_crtc_mode_set_nofb,
+ .atomic_begin = armada_drm_crtc_atomic_begin,
+ .atomic_flush = armada_drm_crtc_atomic_flush,
+ .atomic_disable = armada_drm_crtc_atomic_disable,
+ .atomic_enable = armada_drm_crtc_atomic_enable,
};
static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
@@ -884,7 +542,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
if (!dcrtc->cursor_obj || !h || !w) {
spin_lock_irq(&dcrtc->irq_lock);
- armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
@@ -908,7 +565,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
spin_lock_irq(&dcrtc->irq_lock);
- armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
@@ -1016,8 +672,8 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
priv->dcrtc[dcrtc->num] = NULL;
drm_crtc_cleanup(&dcrtc->crtc);
- if (!IS_ERR(dcrtc->clk))
- clk_disable_unprepare(dcrtc->clk);
+ if (dcrtc->variant->disable)
+ dcrtc->variant->disable(dcrtc);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
@@ -1026,93 +682,6 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
kfree(dcrtc);
}
-/*
- * The mode_config lock is held here, to prevent races between this
- * and a mode_set.
- */
-static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- unsigned i;
- int ret;
-
- /* We don't support changing the pixel format */
- if (fb->format != crtc->primary->fb->format)
- return -EINVAL;
-
- work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
- if (!work)
- return -ENOMEM;
-
- work->event = event;
- work->old_fb = dcrtc->crtc.primary->fb;
-
- i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
- dcrtc->interlaced);
- armada_reg_queue_end(work->regs, i);
-
- /*
- * Ensure that we hold a reference on the new framebuffer.
- * This has to match the behaviour in mode_set.
- */
- drm_framebuffer_get(fb);
-
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret) {
- /* Undo our reference above */
- drm_framebuffer_put(fb);
- kfree(work);
- return ret;
- }
-
- /*
- * Don't take a reference on the new framebuffer;
- * drm_mode_page_flip_ioctl() has already grabbed a reference and
- * will _not_ drop that reference on successful return from this
- * function. Simply mark this new framebuffer as the current one.
- */
- dcrtc->crtc.primary->fb = fb;
-
- /*
- * Finally, if the display is blanked, we won't receive an
- * interrupt, so complete it now.
- */
- if (dpms_blanked(dcrtc->dpms))
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-
- return 0;
-}
-
-static int
-armada_drm_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property, uint64_t val)
-{
- struct armada_private *priv = crtc->dev->dev_private;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- bool update_csc = false;
-
- if (property == priv->csc_yuv_prop) {
- dcrtc->csc_yuv_mode = val;
- update_csc = true;
- } else if (property == priv->csc_rgb_prop) {
- dcrtc->csc_rgb_mode = val;
- update_csc = true;
- }
-
- if (update_csc) {
- uint32_t val;
-
- val = dcrtc->spu_iopad_ctrl |
- armada_drm_crtc_calculate_csc(dcrtc);
- writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
- }
-
- return 0;
-}
-
/* These are called under the vbl_lock. */
static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
@@ -1136,257 +705,28 @@ static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs armada_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
.cursor_set = armada_drm_crtc_cursor_set,
.cursor_move = armada_drm_crtc_cursor_move,
.destroy = armada_drm_crtc_destroy,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = armada_drm_crtc_page_flip,
- .set_property = armada_drm_crtc_set_property,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = armada_drm_crtc_enable_vblank,
.disable_vblank = armada_drm_crtc_disable_vblank,
};
-static void armada_drm_primary_update_state(struct drm_plane_state *state,
- struct armada_regs *regs)
-{
- struct armada_plane *dplane = drm_to_armada_plane(state->plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(state->crtc);
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
- bool was_disabled;
- unsigned int idx = 0;
- u32 val;
-
- val = CFG_GRA_FMT(dfb->fmt) | CFG_GRA_MOD(dfb->mod);
- if (dfb->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
- if (state->visible)
- val |= CFG_GRA_ENA;
- if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
- val |= CFG_GRA_HSMOOTH;
-
- was_disabled = !(dplane->state.ctrl0 & CFG_GRA_ENA);
- if (was_disabled)
- armada_reg_queue_mod(regs, idx,
- 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
- dplane->state.ctrl0 = val;
- dplane->state.src_hw = (drm_rect_height(&state->src) & 0xffff0000) |
- drm_rect_width(&state->src) >> 16;
- dplane->state.dst_hw = drm_rect_height(&state->dst) << 16 |
- drm_rect_width(&state->dst);
- dplane->state.dst_yx = state->dst.y1 << 16 | state->dst.x1;
-
- armada_drm_gra_plane_regs(regs + idx, &dfb->fb, &dplane->state,
- state->src.x1 >> 16, state->src.y1 >> 16,
- dcrtc->interlaced);
-
- dplane->state.vsync_update = !was_disabled;
- dplane->state.changed = true;
-}
-
-static int armada_drm_primary_update(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- struct drm_plane_state state = {
- .plane = plane,
- .crtc = crtc,
- .fb = fb,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .rotation = DRM_MODE_ROTATE_0,
- };
- struct drm_crtc_state crtc_state = {
- .crtc = crtc,
- .enable = crtc->enabled,
- .mode = crtc->mode,
- };
- int ret;
-
- ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
- INT_MAX, true, false);
- if (ret)
- return ret;
-
- work = &dplane->works[dplane->next_work];
- work->fn = armada_drm_crtc_complete_frame_work;
-
- if (plane->fb != fb) {
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_reference(fb);
-
- work->old_fb = plane->fb;
- } else {
- work->old_fb = NULL;
- }
-
- armada_drm_primary_update_state(&state, work->regs);
-
- if (!dplane->state.changed)
- return 0;
-
- /* Wait for pending work to complete */
- if (armada_drm_plane_work_wait(dplane, HZ / 10) == 0)
- armada_drm_plane_work_cancel(dcrtc, dplane);
-
- if (!dplane->state.vsync_update) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- return 0;
- }
-
- /* Queue it for update on the next interrupt if we are enabled */
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- }
-
- dplane->next_work = !dplane->next_work;
-
- return 0;
-}
-
-int armada_drm_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_crtc *dcrtc;
- struct armada_plane_work *work;
- unsigned int idx = 0;
- u32 sram_para1, enable_mask;
-
- if (!plane->crtc)
- return 0;
-
- /*
- * Arrange to power down most RAMs and FIFOs if this is the primary
- * plane, otherwise just the YUV FIFOs for the overlay plane.
- */
- if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
- enable_mask = CFG_GRA_ENA;
- } else {
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
- enable_mask = CFG_DMA_ENA;
- }
-
- dplane->state.ctrl0 &= ~enable_mask;
-
- dcrtc = drm_to_armada_crtc(plane->crtc);
-
- /*
- * Try to disable the plane and drop our ref on the framebuffer
- * at the next frame update. If we fail for any reason, disable
- * the plane immediately.
- */
- work = &dplane->works[dplane->next_work];
- work->fn = armada_drm_crtc_complete_disable_work;
- work->cancel = armada_drm_crtc_complete_disable_work;
- work->old_fb = plane->fb;
-
- armada_reg_queue_mod(work->regs, idx,
- 0, enable_mask, LCD_SPU_DMA_CTRL0);
- armada_reg_queue_mod(work->regs, idx,
- sram_para1, 0, LCD_SPU_SRAM_PARA1);
- armada_reg_queue_end(work->regs, idx);
-
- /* Wait for any preceding work to complete, but don't wedge */
- if (WARN_ON(!armada_drm_plane_work_wait(dplane, HZ)))
- armada_drm_plane_work_cancel(dcrtc, dplane);
-
- if (armada_drm_plane_work_queue(dcrtc, work)) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- }
-
- dplane->next_work = !dplane->next_work;
-
- return 0;
-}
-
-static const struct drm_plane_funcs armada_primary_plane_funcs = {
- .update_plane = armada_drm_primary_update,
- .disable_plane = armada_drm_plane_disable,
- .destroy = drm_primary_helper_destroy,
-};
-
-int armada_drm_plane_init(struct armada_plane *plane)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(plane->works); i++)
- plane->works[i].plane = &plane->base;
-
- init_waitqueue_head(&plane->frame_wait);
-
- return 0;
-}
-
-static const struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
- { CSC_AUTO, "Auto" },
- { CSC_YUV_CCIR601, "CCIR601" },
- { CSC_YUV_CCIR709, "CCIR709" },
-};
-
-static const struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
- { CSC_AUTO, "Auto" },
- { CSC_RGB_COMPUTER, "Computer system" },
- { CSC_RGB_STUDIO, "Studio" },
-};
-
-static int armada_drm_crtc_create_properties(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- if (priv->csc_yuv_prop)
- return 0;
-
- priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
- "CSC_YUV", armada_drm_csc_yuv_enum_list,
- ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
- priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
- "CSC_RGB", armada_drm_csc_rgb_enum_list,
- ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
-
- if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
- return -ENOMEM;
-
- return 0;
-}
-
static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
struct resource *res, int irq, const struct armada_variant *variant,
struct device_node *port)
{
struct armada_private *priv = drm->dev_private;
struct armada_crtc *dcrtc;
- struct armada_plane *primary;
+ struct drm_plane *primary;
void __iomem *base;
int ret;
- ret = armada_drm_crtc_create_properties(drm);
- if (ret)
- return ret;
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1404,8 +744,6 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->base = base;
dcrtc->num = drm->mode_config.num_crtc;
dcrtc->clk = ERR_PTR(-EINVAL);
- dcrtc->csc_yuv_mode = CSC_AUTO;
- dcrtc->csc_rgb_mode = CSC_AUTO;
dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
spin_lock_init(&dcrtc->irq_lock);
@@ -1449,39 +787,23 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
goto err_crtc;
}
- ret = armada_drm_plane_init(primary);
- if (ret) {
- kfree(primary);
- goto err_crtc;
- }
-
- ret = drm_universal_plane_init(drm, &primary->base, 0,
- &armada_primary_plane_funcs,
- armada_primary_formats,
- ARRAY_SIZE(armada_primary_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ ret = armada_drm_primary_plane_init(drm, primary);
if (ret) {
kfree(primary);
goto err_crtc;
}
- ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
+ ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, primary, NULL,
&armada_crtc_funcs, NULL);
if (ret)
goto err_crtc_init;
drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
- drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
- dcrtc->csc_yuv_mode);
- drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
- dcrtc->csc_rgb_mode);
-
return armada_overlay_plane_create(drm, 1 << dcrtc->num);
err_crtc_init:
- primary->base.funcs->destroy(&primary->base);
+ primary->funcs->destroy(primary);
err_crtc:
kfree(dcrtc);
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 445829b8877a..7ebd337b60af 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -32,49 +32,8 @@ struct armada_regs {
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
struct armada_crtc;
-struct armada_plane;
struct armada_variant;
-struct armada_plane_work {
- void (*fn)(struct armada_crtc *, struct armada_plane_work *);
- void (*cancel)(struct armada_crtc *, struct armada_plane_work *);
- bool need_kfree;
- struct drm_plane *plane;
- struct drm_framebuffer *old_fb;
- struct drm_pending_vblank_event *event;
- struct armada_regs regs[14];
-};
-
-struct armada_plane_state {
- u16 src_x;
- u16 src_y;
- u32 src_hw;
- u32 dst_hw;
- u32 dst_yx;
- u32 ctrl0;
- bool changed;
- bool vsync_update;
-};
-
-struct armada_plane {
- struct drm_plane base;
- wait_queue_head_t frame_wait;
- bool next_work;
- struct armada_plane_work works[2];
- struct armada_plane_work *work;
- struct armada_plane_state state;
-};
-#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
-
-int armada_drm_plane_init(struct armada_plane *plane);
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane_work *work);
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
- struct armada_plane *plane);
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
- int x, int y);
-
struct armada_crtc {
struct drm_crtc crtc;
const struct armada_variant *variant;
@@ -89,10 +48,6 @@ struct armada_crtc {
} v[2];
bool interlaced;
bool cursor_update;
- uint8_t csc_yuv_mode;
- uint8_t csc_rgb_mode;
-
- struct drm_plane *plane;
struct armada_gem_object *cursor_obj;
int cursor_x;
@@ -102,21 +57,22 @@ struct armada_crtc {
uint32_t cursor_w;
uint32_t cursor_h;
- int dpms;
uint32_t cfg_dumb_ctrl;
- uint32_t dumb_ctrl;
uint32_t spu_iopad_ctrl;
spinlock_t irq_lock;
uint32_t irq_ena;
+
+ bool update_pending;
+ struct drm_pending_vblank_event *event;
+ struct armada_regs atomic_regs[32];
+ struct armada_regs *regs;
+ unsigned int regs_idx;
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
-int armada_drm_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
-
extern struct platform_driver armada_lcd_platform_driver;
#endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index cc4c557c9f66..f09083ff15d3 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -42,11 +42,12 @@ struct armada_private;
struct armada_variant {
bool has_spu_adv_reg;
- uint32_t spu_adv_reg;
int (*init)(struct armada_crtc *, struct device *);
int (*compute_clock)(struct armada_crtc *,
const struct drm_display_mode *,
uint32_t *);
+ void (*disable)(struct armada_crtc *);
+ void (*enable)(struct armada_crtc *, const struct drm_display_mode *);
};
/* Variant ops */
@@ -54,14 +55,10 @@ extern const struct armada_variant armada510_ops;
struct armada_private {
struct drm_device drm;
- struct work_struct fb_unref_work;
- DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
struct armada_crtc *dcrtc[2];
struct drm_mm linear; /* protected by linear_lock */
struct mutex linear_lock;
- struct drm_property *csc_yuv_prop;
- struct drm_property *csc_rgb_prop;
struct drm_property *colorkey_prop;
struct drm_property *colorkey_min_prop;
struct drm_property *colorkey_max_prop;
@@ -76,13 +73,6 @@ struct armada_private {
#endif
};
-void __armada_drm_queue_unref_work(struct drm_device *,
- struct drm_framebuffer *);
-void armada_drm_queue_unref_work(struct drm_device *,
- struct drm_framebuffer *);
-
-extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
-
int armada_fbdev_init(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4b11b6b52f1d..fa31589b4fc0 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -9,46 +9,18 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_gem.h"
+#include "armada_fb.h"
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
-static void armada_drm_unref_work(struct work_struct *work)
-{
- struct armada_private *priv =
- container_of(work, struct armada_private, fb_unref_work);
- struct drm_framebuffer *fb;
-
- while (kfifo_get(&priv->fb_unref, &fb))
- drm_framebuffer_put(fb);
-}
-
-/* Must be called with dev->event_lock held */
-void __armada_drm_queue_unref_work(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- struct armada_private *priv = dev->dev_private;
-
- WARN_ON(!kfifo_put(&priv->fb_unref, fb));
- schedule_work(&priv->fb_unref_work);
-}
-
-void armada_drm_queue_unref_work(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- __armada_drm_queue_unref_work(dev, fb);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
@@ -72,11 +44,18 @@ static struct drm_driver armada_drm_driver = {
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME,
+ DRIVER_PRIME | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
+static const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+ .fb_create = armada_fb_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static int armada_drm_bind(struct device *dev)
{
struct armada_private *priv;
@@ -109,7 +88,7 @@ static int armada_drm_bind(struct device *dev)
/*
* The drm_device structure must be at the start of
- * armada_private for drm_dev_unref() to work correctly.
+ * armada_private for drm_dev_put() to work correctly.
*/
BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
@@ -125,9 +104,6 @@ static int armada_drm_bind(struct device *dev)
dev_set_drvdata(dev, &priv->drm);
- INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
- INIT_KFIFO(priv->fb_unref);
-
/* Mode setting support */
drm_mode_config_init(&priv->drm);
priv->drm.mode_config.min_width = 320;
@@ -155,6 +131,8 @@ static int armada_drm_bind(struct device *dev)
priv->drm.irq_enabled = true;
+ drm_mode_config_reset(&priv->drm);
+
ret = armada_fbdev_init(&priv->drm);
if (ret)
goto err_comp;
@@ -179,8 +157,7 @@ static int armada_drm_bind(struct device *dev)
err_kms:
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- drm_dev_unref(&priv->drm);
+ drm_dev_put(&priv->drm);
return ret;
}
@@ -198,9 +175,8 @@ static void armada_drm_unbind(struct device *dev)
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- drm_dev_unref(&priv->drm);
+ drm_dev_put(&priv->drm);
}
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index ac92bce07ecd..6bd638a54579 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -7,30 +7,15 @@
*/
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
-static void armada_fb_destroy(struct drm_framebuffer *fb)
-{
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
-
- drm_framebuffer_cleanup(&dfb->fb);
- drm_gem_object_put_unlocked(&dfb->obj->obj);
- kfree(dfb);
-}
-
-static int armada_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *dfile, unsigned int *handle)
-{
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
- return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
-}
-
static const struct drm_framebuffer_funcs armada_fb_funcs = {
- .destroy = armada_fb_destroy,
- .create_handle = armada_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
@@ -78,7 +63,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->fmt = format;
dfb->mod = config;
- dfb->obj = obj;
+ dfb->fb.obj[0] = &obj->obj;
drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
@@ -99,7 +84,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
return dfb;
}
-static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
{
struct armada_gem_object *obj;
@@ -153,8 +138,3 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
return ERR_PTR(ret);
}
-
-const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
- .fb_create = armada_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
-};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index 48073c4f54d8..476daad0a36a 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -10,15 +10,15 @@
struct armada_framebuffer {
struct drm_framebuffer fb;
- struct armada_gem_object *obj;
uint8_t fmt;
uint8_t mod;
};
#define drm_fb_to_armada_fb(dfb) \
container_of(dfb, struct armada_framebuffer, fb)
-#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
-
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+ struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode);
#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 2a59db0994b2..8d23700848df 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -24,7 +24,7 @@ static /*const*/ struct fb_ops armada_fb_ops = {
.fb_imageblit = drm_fb_helper_cfb_imageblit,
};
-static int armada_fb_create(struct drm_fb_helper *fbh,
+static int armada_fbdev_create(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
@@ -108,7 +108,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
int ret = 0;
if (!fbh->fb) {
- ret = armada_fb_create(fbh, sizes);
+ ret = armada_fbdev_create(fbh, sizes);
if (ret == 0)
ret = 1;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index a97f509743a5..892c1d9304bb 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -13,25 +13,14 @@
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
-static int armada_gem_vm_fault(struct vm_fault *vmf)
+static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
{
struct drm_gem_object *gobj = vmf->vma->vm_private_data;
struct armada_gem_object *obj = drm_to_armada_gem(gobj);
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
- int ret;
pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
- ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
-
- switch (ret) {
- case 0:
- case -EBUSY:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
}
const struct vm_operations_struct armada_gem_vm_ops = {
@@ -490,8 +479,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map_atomic = armada_gem_dmabuf_no_kmap,
- .unmap_atomic = armada_gem_dmabuf_no_kunmap,
.map = armada_gem_dmabuf_no_kmap,
.unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
index 345dc4d0851e..277580b36758 100644
--- a/drivers/gpu/drm/armada/armada_hw.h
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -316,4 +316,19 @@ enum {
PWRDN_IRQ_LEVEL = 1 << 0,
};
+static inline u32 armada_rect_hw_fp(struct drm_rect *r)
+{
+ return (drm_rect_height(r) & 0xffff0000) | drm_rect_width(r) >> 16;
+}
+
+static inline u32 armada_rect_hw(struct drm_rect *r)
+{
+ return drm_rect_height(r) << 16 | (drm_rect_width(r) & 0x0000ffff);
+}
+
+static inline u32 armada_rect_yx(struct drm_rect *r)
+{
+ return (r)->y1 << 16 | ((r)->x1 & 0x0000ffff);
+}
+
#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index afa7ded3ae31..eb7dfb65ef47 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,358 +7,468 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/armada_drm.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
-#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
+#include "armada_plane.h"
#include "armada_trace.h"
-struct armada_ovl_plane_properties {
- uint32_t colorkey_yr;
- uint32_t colorkey_ug;
- uint32_t colorkey_vb;
-#define K2R(val) (((val) >> 0) & 0xff)
-#define K2G(val) (((val) >> 8) & 0xff)
-#define K2B(val) (((val) >> 16) & 0xff)
- int16_t brightness;
- uint16_t contrast;
- uint16_t saturation;
- uint32_t colorkey_mode;
- uint32_t colorkey_enable;
-};
-
-struct armada_ovl_plane {
- struct armada_plane base;
- struct armada_ovl_plane_properties prop;
+#define DEFAULT_BRIGHTNESS 0
+#define DEFAULT_CONTRAST 0x4000
+#define DEFAULT_SATURATION 0x4000
+#define DEFAULT_ENCODING DRM_COLOR_YCBCR_BT601
+
+struct armada_overlay_state {
+ struct drm_plane_state base;
+ u32 colorkey_yr;
+ u32 colorkey_ug;
+ u32 colorkey_vb;
+ u32 colorkey_mode;
+ u32 colorkey_enable;
+ s16 brightness;
+ u16 contrast;
+ u16 saturation;
};
-#define drm_to_armada_ovl_plane(p) \
- container_of(p, struct armada_ovl_plane, base.base)
-
+#define drm_to_overlay_state(s) \
+ container_of(s, struct armada_overlay_state, base)
-static void
-armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
- struct armada_crtc *dcrtc)
+static inline u32 armada_spu_contrast(struct drm_plane_state *state)
{
- writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
- writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
- writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+ return drm_to_overlay_state(state)->brightness << 16 |
+ drm_to_overlay_state(state)->contrast;
+}
- writel_relaxed(prop->brightness << 16 | prop->contrast,
- dcrtc->base + LCD_SPU_CONTRAST);
+static inline u32 armada_spu_saturation(struct drm_plane_state *state)
+{
/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
- writel_relaxed(prop->saturation << 16,
- dcrtc->base + LCD_SPU_SATURATION);
- writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
-
- spin_lock_irq(&dcrtc->irq_lock);
- armada_updatel(prop->colorkey_mode,
- CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
- dcrtc->base + LCD_SPU_DMA_CTRL1);
- if (dcrtc->variant->has_spu_adv_reg)
- armada_updatel(prop->colorkey_enable,
- ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
- dcrtc->base + LCD_SPU_ADV_REG);
- spin_unlock_irq(&dcrtc->irq_lock);
+ return drm_to_overlay_state(state)->saturation << 16;
}
-/* === Plane support === */
-static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
+static inline u32 armada_csc(struct drm_plane_state *state)
{
- unsigned long flags;
-
- trace_armada_ovl_plane_work(&dcrtc->crtc, work->plane);
-
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+ /*
+ * The CFG_CSC_RGB_* settings control the output of the colour space
+ * converter, setting the range of output values it produces. Since
+ * we will be blending with the full-range graphics, we need to
+ * produce full-range RGB output from the conversion.
+ */
+ return CFG_CSC_RGB_COMPUTER |
+ (state->color_encoding == DRM_COLOR_YCBCR_BT709 ?
+ CFG_CSC_YUV_CCIR709 : CFG_CSC_YUV_CCIR601);
}
-static void armada_ovl_plane_update_state(struct drm_plane_state *state,
- struct armada_regs *regs)
+/* === Plane support === */
+static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(state->plane);
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
- const struct drm_format_info *format;
- unsigned int idx = 0;
- bool fb_changed;
- u32 val, ctrl0;
- u16 src_x, src_y;
+ struct drm_plane_state *state = plane->state;
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx;
+ u32 cfg, cfg_mask, val;
- ctrl0 = CFG_DMA_FMT(dfb->fmt) | CFG_DMA_MOD(dfb->mod) | CFG_CBSH_ENA;
- if (state->visible)
- ctrl0 |= CFG_DMA_ENA;
- if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
- ctrl0 |= CFG_DMA_HSMOOTH;
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
- /*
- * Shifting a YUV packed format image by one pixel causes the U/V
- * planes to swap. Compensate for it by also toggling the UV swap.
- */
- format = dfb->fb.format;
- if (format->num_planes == 1 && state->src.x1 >> 16 & (format->hsub - 1))
- ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+ if (!state->fb || WARN_ON(!state->crtc))
+ return;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+ plane->base.id, plane->name,
+ state->crtc->base.id, state->crtc->name,
+ state->fb->base.id,
+ old_state->visible, state->visible);
+
+ dcrtc = drm_to_armada_crtc(state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
- if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
- /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+ idx = 0;
+ if (!old_state->visible && state->visible)
armada_reg_queue_mod(regs, idx,
0, CFG_PDWN16x66 | CFG_PDWN32x66,
LCD_SPU_SRAM_PARA1);
- }
-
- fb_changed = dplane->base.base.fb != &dfb->fb ||
- dplane->base.state.src_x != state->src.x1 >> 16 ||
- dplane->base.state.src_y != state->src.y1 >> 16;
-
- dplane->base.state.vsync_update = fb_changed;
-
+ val = armada_rect_hw_fp(&state->src);
+ if (armada_rect_hw_fp(&old_state->src) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_HPXL_VLN);
+ val = armada_rect_yx(&state->dst);
+ if (armada_rect_yx(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN);
+ val = armada_rect_hw(&state->dst);
+ if (armada_rect_hw(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DZM_HPXL_VLN);
/* FIXME: overlay on an interlaced display */
- if (fb_changed) {
- u32 addrs[3];
-
- dplane->base.state.src_y = src_y = state->src.y1 >> 16;
- dplane->base.state.src_x = src_x = state->src.x1 >> 16;
+ if (old_state->src.x1 != state->src.x1 ||
+ old_state->src.y1 != state->src.y1 ||
+ old_state->fb != state->fb) {
+ const struct drm_format_info *format;
+ u16 src_x, pitches[3];
+ u32 addrs[2][3];
- armada_drm_plane_calc_addrs(addrs, &dfb->fb, src_x, src_y);
+ armada_drm_plane_calc(state, addrs, pitches, false);
- armada_reg_queue_set(regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[0][0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[0][1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[0][2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[1][0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[1][1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[1][2],
LCD_SPU_DMA_START_ADDR_V1);
- val = dfb->fb.pitches[0] << 16 | dfb->fb.pitches[0];
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_PITCH_YC);
- val = dfb->fb.pitches[1] << 16 | dfb->fb.pitches[2];
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_PITCH_UV);
- }
+ val = pitches[0] << 16 | pitches[0];
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_YC);
+ val = pitches[1] << 16 | pitches[2];
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_UV);
- val = (drm_rect_height(&state->src) & 0xffff0000) |
- drm_rect_width(&state->src) >> 16;
- if (dplane->base.state.src_hw != val) {
- dplane->base.state.src_hw = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_HPXL_VLN);
- }
+ cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+ CFG_DMA_MOD(drm_fb_to_armada_fb(state->fb)->mod) |
+ CFG_CBSH_ENA;
+ if (state->visible)
+ cfg |= CFG_DMA_ENA;
- val = drm_rect_height(&state->dst) << 16 | drm_rect_width(&state->dst);
- if (dplane->base.state.dst_hw != val) {
- dplane->base.state.dst_hw = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DZM_HPXL_VLN);
+ /*
+ * Shifting a YUV packed format image by one pixel causes the
+ * U/V planes to swap. Compensate for it by also toggling
+ * the UV swap.
+ */
+ format = state->fb->format;
+ src_x = state->src.x1 >> 16;
+ if (format->num_planes == 1 && src_x & (format->hsub - 1))
+ cfg ^= CFG_DMA_MOD(CFG_SWAPUV);
+ cfg_mask = CFG_CBSH_ENA | CFG_DMAFORMAT |
+ CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_DMA_FTOGGLE | CFG_DMA_TSTMODE |
+ CFG_DMA_ENA;
+ } else if (old_state->visible != state->visible) {
+ cfg = state->visible ? CFG_DMA_ENA : 0;
+ cfg_mask = CFG_DMA_ENA;
+ } else {
+ cfg = cfg_mask = 0;
}
-
- val = state->dst.y1 << 16 | state->dst.x1;
- if (dplane->base.state.dst_yx != val) {
- dplane->base.state.dst_yx = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_OVSA_HPXL_VLN);
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ cfg_mask |= CFG_DMA_HSMOOTH;
+ if (drm_rect_width(&state->src) >> 16 !=
+ drm_rect_width(&state->dst))
+ cfg |= CFG_DMA_HSMOOTH;
}
- if (dplane->base.state.ctrl0 != ctrl0) {
- dplane->base.state.ctrl0 = ctrl0;
- armada_reg_queue_mod(regs, idx, ctrl0,
- CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
- CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
- CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
- CFG_YUV2RGB) | CFG_DMA_ENA,
- LCD_SPU_DMA_CTRL0);
- dplane->base.state.vsync_update = true;
- }
+ if (cfg_mask)
+ armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+ LCD_SPU_DMA_CTRL0);
+
+ val = armada_spu_contrast(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_spu_contrast(old_state) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_CONTRAST);
+ val = armada_spu_saturation(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_spu_saturation(old_state) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_SATURATION);
+ if (!old_state->visible && state->visible)
+ armada_reg_queue_set(regs, idx, 0x00002000, LCD_SPU_CBSH_HUE);
+ val = armada_csc(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_csc(old_state) != val)
+ armada_reg_queue_mod(regs, idx, val, CFG_CSC_MASK,
+ LCD_SPU_IOPAD_CONTROL);
+ val = drm_to_overlay_state(state)->colorkey_yr;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_yr != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_Y);
+ val = drm_to_overlay_state(state)->colorkey_ug;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_ug != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_U);
+ val = drm_to_overlay_state(state)->colorkey_vb;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_vb != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_V);
+ val = drm_to_overlay_state(state)->colorkey_mode;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_mode != val)
+ armada_reg_queue_mod(regs, idx, val, CFG_CKMODE_MASK |
+ CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ LCD_SPU_DMA_CTRL1);
+ val = drm_to_overlay_state(state)->colorkey_enable;
+ if (((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_enable != val) &&
+ dcrtc->variant->has_spu_adv_reg)
+ armada_reg_queue_mod(regs, idx, val, ADV_GRACOLORKEY |
+ ADV_VIDCOLORKEY, LCD_SPU_ADV_REG);
+
+ dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx = 0;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!old_state->crtc)
+ return;
- dplane->base.state.changed = idx != 0;
+ DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->crtc->base.id, old_state->crtc->name,
+ old_state->fb->base.id);
- armada_reg_queue_end(regs, idx);
+ dcrtc = drm_to_armada_crtc(old_state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ /* Disable plane and power down the YUV FIFOs */
+ armada_reg_queue_mod(regs, idx, 0, CFG_DMA_ENA, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(regs, idx, CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+ LCD_SPU_SRAM_PARA1);
+
+ dcrtc->regs_idx += idx;
}
+static const struct drm_plane_helper_funcs armada_overlay_plane_helper_funcs = {
+ .prepare_fb = armada_drm_plane_prepare_fb,
+ .cleanup_fb = armada_drm_plane_cleanup_fb,
+ .atomic_check = armada_drm_plane_atomic_check,
+ .atomic_update = armada_drm_overlay_plane_atomic_update,
+ .atomic_disable = armada_drm_overlay_plane_atomic_disable,
+};
+
static int
-armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+armada_overlay_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- struct drm_plane_state state = {
- .plane = plane,
- .crtc = crtc,
- .fb = fb,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .rotation = DRM_MODE_ROTATE_0,
- };
- struct drm_crtc_state crtc_state = {
- .crtc = crtc,
- .enable = crtc->enabled,
- .mode = crtc->mode,
- };
- int ret;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
trace_armada_ovl_plane_update(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
- ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
- INT_MAX, true, false);
- if (ret)
- return ret;
-
- work = &dplane->base.works[dplane->base.next_work];
-
- if (plane->fb != fb) {
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_reference(fb);
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
- work->old_fb = plane->fb;
- } else {
- work->old_fb = NULL;
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
}
- armada_ovl_plane_update_state(&state, work->regs);
-
- if (!dplane->base.state.changed)
- return 0;
-
- /* Wait for pending work to complete */
- if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
- armada_drm_plane_work_cancel(dcrtc, &dplane->base);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_h = crtc_h;
+ plane_state->crtc_w = crtc_w;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_h = src_h;
+ plane_state->src_w = src_w;
+
+ ret = drm_atomic_nonblocking_commit(state);
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
- /* Just updating the position/size? */
- if (!dplane->base.state.vsync_update) {
- armada_ovl_plane_work(dcrtc, work);
- return 0;
- }
+static void armada_ovl_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_cleanup(plane);
+ kfree(plane);
+}
- if (!dcrtc->plane) {
- dcrtc->plane = plane;
- armada_ovl_update_attr(&dplane->prop, dcrtc);
+static void armada_overlay_reset(struct drm_plane *plane)
+{
+ struct armada_overlay_state *state;
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+ kfree(plane->state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->base.plane = plane;
+ state->base.color_encoding = DEFAULT_ENCODING;
+ state->base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+ state->base.rotation = DRM_MODE_ROTATE_0;
+ state->colorkey_yr = 0xfefefe00;
+ state->colorkey_ug = 0x01010100;
+ state->colorkey_vb = 0x01010100;
+ state->colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ state->colorkey_enable = ADV_GRACOLORKEY;
+ state->brightness = DEFAULT_BRIGHTNESS;
+ state->contrast = DEFAULT_CONTRAST;
+ state->saturation = DEFAULT_SATURATION;
}
-
- /* Queue it for update on the next interrupt if we are enabled */
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret)
- DRM_ERROR("failed to queue plane work: %d\n", ret);
-
- dplane->base.next_work = !dplane->base.next_work;
-
- return 0;
+ plane->state = &state->base;
}
-static void armada_ovl_plane_destroy(struct drm_plane *plane)
+struct drm_plane_state *
+armada_overlay_duplicate_state(struct drm_plane *plane)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
+ struct armada_overlay_state *state;
- drm_plane_cleanup(plane);
+ if (WARN_ON(!plane->state))
+ return NULL;
- kfree(dplane);
+ state = kmemdup(plane->state, sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+ return &state->base;
}
-static int armada_ovl_plane_set_property(struct drm_plane *plane,
- struct drm_property *property, uint64_t val)
+static int armada_overlay_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
{
struct armada_private *priv = plane->dev->dev_private;
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- bool update_attr = false;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
if (property == priv->colorkey_prop) {
#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
- dplane->prop.colorkey_yr = CCC(K2R(val));
- dplane->prop.colorkey_ug = CCC(K2G(val));
- dplane->prop.colorkey_vb = CCC(K2B(val));
+ drm_to_overlay_state(state)->colorkey_yr = CCC(K2R(val));
+ drm_to_overlay_state(state)->colorkey_ug = CCC(K2G(val));
+ drm_to_overlay_state(state)->colorkey_vb = CCC(K2B(val));
#undef CCC
- update_attr = true;
} else if (property == priv->colorkey_min_prop) {
- dplane->prop.colorkey_yr &= ~0x00ff0000;
- dplane->prop.colorkey_yr |= K2R(val) << 16;
- dplane->prop.colorkey_ug &= ~0x00ff0000;
- dplane->prop.colorkey_ug |= K2G(val) << 16;
- dplane->prop.colorkey_vb &= ~0x00ff0000;
- dplane->prop.colorkey_vb |= K2B(val) << 16;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 16;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 16;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 16;
} else if (property == priv->colorkey_max_prop) {
- dplane->prop.colorkey_yr &= ~0xff000000;
- dplane->prop.colorkey_yr |= K2R(val) << 24;
- dplane->prop.colorkey_ug &= ~0xff000000;
- dplane->prop.colorkey_ug |= K2G(val) << 24;
- dplane->prop.colorkey_vb &= ~0xff000000;
- dplane->prop.colorkey_vb |= K2B(val) << 24;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 24;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 24;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 24;
} else if (property == priv->colorkey_val_prop) {
- dplane->prop.colorkey_yr &= ~0x0000ff00;
- dplane->prop.colorkey_yr |= K2R(val) << 8;
- dplane->prop.colorkey_ug &= ~0x0000ff00;
- dplane->prop.colorkey_ug |= K2G(val) << 8;
- dplane->prop.colorkey_vb &= ~0x0000ff00;
- dplane->prop.colorkey_vb |= K2B(val) << 8;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 8;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 8;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 8;
} else if (property == priv->colorkey_alpha_prop) {
- dplane->prop.colorkey_yr &= ~0x000000ff;
- dplane->prop.colorkey_yr |= K2R(val);
- dplane->prop.colorkey_ug &= ~0x000000ff;
- dplane->prop.colorkey_ug |= K2G(val);
- dplane->prop.colorkey_vb &= ~0x000000ff;
- dplane->prop.colorkey_vb |= K2B(val);
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val);
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val);
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val);
} else if (property == priv->colorkey_mode_prop) {
if (val == CKMODE_DISABLE) {
- dplane->prop.colorkey_mode =
+ drm_to_overlay_state(state)->colorkey_mode =
CFG_CKMODE(CKMODE_DISABLE) |
CFG_ALPHAM_CFG | CFG_ALPHA(255);
- dplane->prop.colorkey_enable = 0;
+ drm_to_overlay_state(state)->colorkey_enable = 0;
} else {
- dplane->prop.colorkey_mode =
+ drm_to_overlay_state(state)->colorkey_mode =
CFG_CKMODE(val) |
CFG_ALPHAM_GRA | CFG_ALPHA(0);
- dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
+ drm_to_overlay_state(state)->colorkey_enable =
+ ADV_GRACOLORKEY;
}
- update_attr = true;
} else if (property == priv->brightness_prop) {
- dplane->prop.brightness = val - 256;
- update_attr = true;
+ drm_to_overlay_state(state)->brightness = val - 256;
} else if (property == priv->contrast_prop) {
- dplane->prop.contrast = val;
- update_attr = true;
+ drm_to_overlay_state(state)->contrast = val;
} else if (property == priv->saturation_prop) {
- dplane->prop.saturation = val;
- update_attr = true;
+ drm_to_overlay_state(state)->saturation = val;
+ } else {
+ return -EINVAL;
}
+ return 0;
+}
- if (update_attr && dplane->base.base.crtc)
- armada_ovl_update_attr(&dplane->prop,
- drm_to_armada_crtc(dplane->base.base.crtc));
+static int armada_overlay_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state, struct drm_property *property,
+ uint64_t *val)
+{
+ struct armada_private *priv = plane->dev->dev_private;
+#define C2K(c,s) (((c) >> (s)) & 0xff)
+#define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16)
+ if (property == priv->colorkey_prop) {
+ /* Do best-efforts here for this property */
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 16);
+ /* If min != max, or min != val, error out */
+ if (*val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 24) ||
+ *val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 8))
+ return -EINVAL;
+ } else if (property == priv->colorkey_min_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 16);
+ } else if (property == priv->colorkey_max_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 24);
+ } else if (property == priv->colorkey_val_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 8);
+ } else if (property == priv->colorkey_alpha_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 0);
+ } else if (property == priv->colorkey_mode_prop) {
+ *val = (drm_to_overlay_state(state)->colorkey_mode &
+ CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
+ } else if (property == priv->brightness_prop) {
+ *val = drm_to_overlay_state(state)->brightness + 256;
+ } else if (property == priv->contrast_prop) {
+ *val = drm_to_overlay_state(state)->contrast;
+ } else if (property == priv->saturation_prop) {
+ *val = drm_to_overlay_state(state)->saturation;
+ } else {
+ return -EINVAL;
+ }
return 0;
}
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
- .update_plane = armada_ovl_plane_update,
- .disable_plane = armada_drm_plane_disable,
+ .update_plane = armada_overlay_plane_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = armada_ovl_plane_destroy,
- .set_property = armada_ovl_plane_set_property,
+ .reset = armada_overlay_reset,
+ .atomic_duplicate_state = armada_overlay_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_set_property = armada_overlay_set_property,
+ .atomic_get_property = armada_overlay_get_property,
};
static const uint32_t armada_ovl_formats[] = {
@@ -431,48 +541,31 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
{
struct armada_private *priv = dev->dev_private;
struct drm_mode_object *mobj;
- struct armada_ovl_plane *dplane;
+ struct drm_plane *overlay;
int ret;
ret = armada_overlay_create_properties(dev);
if (ret)
return ret;
- dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
- if (!dplane)
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
return -ENOMEM;
- ret = armada_drm_plane_init(&dplane->base);
- if (ret) {
- kfree(dplane);
- return ret;
- }
-
- dplane->base.works[0].fn = armada_ovl_plane_work;
- dplane->base.works[1].fn = armada_ovl_plane_work;
+ drm_plane_helper_add(overlay, &armada_overlay_plane_helper_funcs);
- ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
+ ret = drm_universal_plane_init(dev, overlay, crtcs,
&armada_ovl_plane_funcs,
armada_ovl_formats,
ARRAY_SIZE(armada_ovl_formats),
NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
- kfree(dplane);
+ kfree(overlay);
return ret;
}
- dplane->prop.colorkey_yr = 0xfefefe00;
- dplane->prop.colorkey_ug = 0x01010100;
- dplane->prop.colorkey_vb = 0x01010100;
- dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
- CFG_ALPHAM_GRA | CFG_ALPHA(0);
- dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
- dplane->prop.brightness = 0;
- dplane->prop.contrast = 0x4000;
- dplane->prop.saturation = 0x4000;
-
- mobj = &dplane->base.base.base;
+ mobj = &overlay->base;
drm_object_attach_property(mobj, priv->colorkey_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_min_prop,
@@ -485,11 +578,19 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
0x000000);
drm_object_attach_property(mobj, priv->colorkey_mode_prop,
CKMODE_RGB);
- drm_object_attach_property(mobj, priv->brightness_prop, 256);
+ drm_object_attach_property(mobj, priv->brightness_prop,
+ 256 + DEFAULT_BRIGHTNESS);
drm_object_attach_property(mobj, priv->contrast_prop,
- dplane->prop.contrast);
+ DEFAULT_CONTRAST);
drm_object_attach_property(mobj, priv->saturation_prop,
- dplane->prop.saturation);
+ DEFAULT_SATURATION);
- return 0;
+ ret = drm_plane_create_color_properties(overlay,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DEFAULT_ENCODING,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
new file mode 100644
index 000000000000..9f36423dd394
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include "armada_plane.h"
+#include "armada_trace.h"
+
+static const uint32_t armada_primary_formats[] = {
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+};
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+ u16 pitches[3], bool interlaced)
+{
+ struct drm_framebuffer *fb = state->fb;
+ const struct drm_format_info *format = fb->format;
+ unsigned int num_planes = format->num_planes;
+ unsigned int x = state->src.x1 >> 16;
+ unsigned int y = state->src.y1 >> 16;
+ u32 addr = drm_fb_obj(fb)->dev_addr;
+ int i;
+
+ DRM_DEBUG_KMS("pitch %u x %d y %d bpp %d\n",
+ fb->pitches[0], x, y, format->cpp[0] * 8);
+
+ if (num_planes > 3)
+ num_planes = 3;
+
+ addrs[0][0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+ x * format->cpp[0];
+ pitches[0] = fb->pitches[0];
+
+ y /= format->vsub;
+ x /= format->hsub;
+
+ for (i = 1; i < num_planes; i++) {
+ addrs[0][i] = addr + fb->offsets[i] + y * fb->pitches[i] +
+ x * format->cpp[i];
+ pitches[i] = fb->pitches[i];
+ }
+ for (; i < 3; i++) {
+ addrs[0][i] = 0;
+ pitches[i] = 0;
+ }
+ if (interlaced) {
+ for (i = 0; i < 3; i++) {
+ addrs[1][i] = addrs[0][i] + pitches[i];
+ pitches[i] *= 2;
+ }
+ } else {
+ for (i = 0; i < 3; i++)
+ addrs[1][i] = addrs[0][i];
+ }
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_plane_state *state,
+ struct armada_regs *regs, bool interlaced)
+{
+ u16 pitches[3];
+ u32 addrs[2][3];
+ unsigned i = 0;
+
+ armada_drm_plane_calc(state, addrs, pitches, interlaced);
+
+ /* write offset, base, and pitch */
+ armada_reg_queue_set(regs, i, addrs[0][0], LCD_CFG_GRA_START_ADDR0);
+ armada_reg_queue_set(regs, i, addrs[1][0], LCD_CFG_GRA_START_ADDR1);
+ armada_reg_queue_mod(regs, i, pitches[0], 0xffff, LCD_CFG_GRA_PITCH);
+
+ return i;
+}
+
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+ plane->base.id, plane->name,
+ state->fb ? state->fb->base.id : 0);
+
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ if (state->fb)
+ drm_framebuffer_get(state->fb);
+ return 0;
+}
+
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->fb ? old_state->fb->base.id : 0);
+
+ if (old_state->fb)
+ drm_framebuffer_put(old_state->fb);
+}
+
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb && !WARN_ON(!state->crtc)) {
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (state->state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ else
+ crtc_state = crtc->state;
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ 0, INT_MAX,
+ true, false);
+ } else {
+ state->visible = false;
+ }
+ return 0;
+}
+
+static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ u32 cfg, cfg_mask, val;
+ unsigned int idx;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!state->fb || WARN_ON(!state->crtc))
+ return;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+ plane->base.id, plane->name,
+ state->crtc->base.id, state->crtc->name,
+ state->fb->base.id,
+ old_state->visible, state->visible);
+
+ dcrtc = drm_to_armada_crtc(state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ idx = 0;
+ if (!old_state->visible && state->visible) {
+ val = CFG_PDWN64x66;
+ if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ val |= CFG_PDWN256x24;
+ armada_reg_queue_mod(regs, idx, 0, val, LCD_SPU_SRAM_PARA1);
+ }
+ val = armada_rect_hw_fp(&state->src);
+ if (armada_rect_hw_fp(&old_state->src) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_HPXL_VLN);
+ val = armada_rect_yx(&state->dst);
+ if (armada_rect_yx(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_OVSA_HPXL_VLN);
+ val = armada_rect_hw(&state->dst);
+ if (armada_rect_hw(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GZM_HPXL_VLN);
+ if (old_state->src.x1 != state->src.x1 ||
+ old_state->src.y1 != state->src.y1 ||
+ old_state->fb != state->fb ||
+ state->crtc->state->mode_changed) {
+ idx += armada_drm_crtc_calc_fb(state, regs + idx,
+ dcrtc->interlaced);
+ }
+ if (old_state->fb != state->fb ||
+ state->crtc->state->mode_changed) {
+ cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+ CFG_GRA_MOD(drm_fb_to_armada_fb(state->fb)->mod);
+ if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ cfg |= CFG_PALETTE_ENA;
+ if (state->visible)
+ cfg |= CFG_GRA_ENA;
+ if (dcrtc->interlaced)
+ cfg |= CFG_GRA_FTOGGLE;
+ cfg_mask = CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
+ CFG_GRA_ENA;
+ } else if (old_state->visible != state->visible) {
+ cfg = state->visible ? CFG_GRA_ENA : 0;
+ cfg_mask = CFG_GRA_ENA;
+ } else {
+ cfg = cfg_mask = 0;
+ }
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ cfg_mask |= CFG_GRA_HSMOOTH;
+ if (drm_rect_width(&state->src) >> 16 !=
+ drm_rect_width(&state->dst))
+ cfg |= CFG_GRA_HSMOOTH;
+ }
+
+ if (cfg_mask)
+ armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+ LCD_SPU_DMA_CTRL0);
+
+ dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx = 0;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!old_state->crtc)
+ return;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->crtc->base.id, old_state->crtc->name,
+ old_state->fb->base.id);
+
+ dcrtc = drm_to_armada_crtc(old_state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ /* Disable plane and power down most RAMs and FIFOs */
+ armada_reg_queue_mod(regs, idx, 0, CFG_GRA_ENA, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(regs, idx, CFG_PDWN256x32 | CFG_PDWN256x24 |
+ CFG_PDWN256x8 | CFG_PDWN32x32 | CFG_PDWN64x66,
+ 0, LCD_SPU_SRAM_PARA1);
+
+ dcrtc->regs_idx += idx;
+}
+
+static const struct drm_plane_helper_funcs armada_primary_plane_helper_funcs = {
+ .prepare_fb = armada_drm_plane_prepare_fb,
+ .cleanup_fb = armada_drm_plane_cleanup_fb,
+ .atomic_check = armada_drm_plane_atomic_check,
+ .atomic_update = armada_drm_primary_plane_atomic_update,
+ .atomic_disable = armada_drm_primary_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs armada_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_primary_helper_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+int armada_drm_primary_plane_init(struct drm_device *drm,
+ struct drm_plane *primary)
+{
+ int ret;
+
+ drm_plane_helper_add(primary, &armada_primary_plane_helper_funcs);
+
+ ret = drm_universal_plane_init(drm, primary, 0,
+ &armada_primary_plane_funcs,
+ armada_primary_formats,
+ ARRAY_SIZE(armada_primary_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_plane.h b/drivers/gpu/drm/armada/armada_plane.h
new file mode 100644
index 000000000000..ff4281ba7fad
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_plane.h
@@ -0,0 +1,15 @@
+#ifndef ARMADA_PLANE_H
+#define ARMADA_PLANE_H
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+ u16 pitches[3], bool interlaced);
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state);
+int armada_drm_primary_plane_init(struct drm_device *drm,
+ struct drm_plane *primary);
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 036dff8a1f33..5e77d456d9bb 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -790,12 +790,12 @@ static int ast_get_modes(struct drm_connector *connector)
if (!flags)
edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+ drm_connector_update_edid_property(&ast_connector->base, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
} else
- drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+ drm_connector_update_edid_property(&ast_connector->base, NULL);
return 0;
}
@@ -900,7 +900,7 @@ static int ast_connector_init(struct drm_device *dev)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ast_connector->i2c = ast_i2c_create(dev);
if (!ast_connector->i2c)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index c1ea5c36b006..843cac222e60 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -681,6 +681,7 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
drm_fb_cma_fbdev_fini(dev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
+ drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 47e0992f3908..04440064b9b7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -412,9 +412,10 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg);
}
-static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane)
+static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_plane_state *state)
{
- struct drm_crtc *crtc = plane->base.crtc;
+ struct drm_crtc *crtc = state->base.crtc;
struct drm_color_lut *lut;
int idx;
@@ -779,7 +780,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
atmel_hlcdc_plane_update_pos_and_size(plane, state);
atmel_hlcdc_plane_update_general_settings(plane, state);
atmel_hlcdc_plane_update_format(plane, state);
- atmel_hlcdc_plane_update_clut(plane);
+ atmel_hlcdc_plane_update_clut(plane, state);
atmel_hlcdc_plane_update_buffers(plane, state);
atmel_hlcdc_plane_update_disc_area(plane, state);
@@ -816,16 +817,6 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
}
-static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
-{
- struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-
- if (plane->base.fb)
- drm_framebuffer_put(plane->base.fb);
-
- drm_plane_cleanup(p);
-}
-
static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
@@ -1002,7 +993,7 @@ static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
static const struct drm_plane_funcs layer_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = atmel_hlcdc_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = atmel_hlcdc_plane_reset,
.atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
.atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 233980a78591..ca5a9afdd5cf 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -259,7 +259,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs_crtc_init(bochs->dev);
bochs_encoder_init(bochs->dev);
bochs_connector_init(bochs->dev);
- drm_mode_connector_attach_encoder(&bochs->connector,
+ drm_connector_attach_encoder(&bochs->connector,
&bochs->encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index fa2c7997e2fd..bf6cad6c9178 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -82,9 +82,11 @@ config DRM_PARADE_PS8622
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
- depends on OF && RC_CORE
+ depends on OF
select DRM_KMS_HELPER
imply EXTCON
+ select INPUT
+ select RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index dd3ff2f2cdce..85c2d407a52e 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -613,7 +613,7 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
__adv7511_power_off(adv7511);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
@@ -872,7 +872,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
}
drm_connector_helper_add(&adv->connector,
&adv7511_connector_helper_funcs);
- drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+ drm_connector_attach_encoder(&adv->connector, bridge->encoder);
if (adv->type == ADV7533)
ret = adv7533_attach_dsi(adv);
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index b49043866be6..f8433c93f463 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -969,8 +969,8 @@ static int anx78xx_get_modes(struct drm_connector *connector)
goto unlock;
}
- err = drm_mode_connector_update_edid_property(connector,
- anx78xx->edid);
+ err = drm_connector_update_edid_property(connector,
+ anx78xx->edid);
if (err) {
DRM_ERROR("Failed to update EDID property: %d\n", err);
goto unlock;
@@ -1048,8 +1048,8 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge)
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
- err = drm_mode_connector_attach_encoder(&anx78xx->connector,
- bridge->encoder);
+ err = drm_connector_attach_encoder(&anx78xx->connector,
+ bridge->encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 2bcbfadb6ac5..d68986cea132 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1119,8 +1119,8 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &dp->aux.ddc);
pm_runtime_put(dp->dev);
if (edid) {
- drm_mode_connector_update_edid_property(&dp->connector,
- edid);
+ drm_connector_update_edid_property(&dp->connector,
+ edid);
num_modes += drm_add_edid_modes(&dp->connector, edid);
kfree(edid);
}
@@ -1210,7 +1210,7 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
drm_connector_helper_add(connector,
&analogix_dp_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
}
/*
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index c255fc3e1be5..ce9496d13986 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1152,7 +1152,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
np = of_node_get(dev->dev.of_node);
panel = of_drm_find_panel(np);
- if (panel) {
+ if (!IS_ERR(panel)) {
bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
} else {
bridge = of_drm_find_bridge(dev->dev.of_node);
@@ -1337,7 +1337,7 @@ static const struct mipi_dsi_host_ops cdns_dsi_ops = {
.transfer = cdns_dsi_transfer,
};
-static int cdns_dsi_resume(struct device *dev)
+static int __maybe_unused cdns_dsi_resume(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
@@ -1350,7 +1350,7 @@ static int cdns_dsi_resume(struct device *dev)
return 0;
}
-static int cdns_dsi_suspend(struct device *dev)
+static int __maybe_unused cdns_dsi_suspend(struct device *dev)
{
struct cdns_dsi *dsi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 9837c8d69e69..9b706789a341 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -55,7 +55,7 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
goto fallback;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
@@ -122,7 +122,7 @@ static int dumb_vga_attach(struct drm_bridge *bridge)
return ret;
}
- drm_mode_connector_attach_encoder(&vga->connector,
+ drm_connector_attach_encoder(&vga->connector,
bridge->encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 75b0d3f6e4de..f56c92f7af7c 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -68,9 +68,9 @@ static int lvds_encoder_probe(struct platform_device *pdev)
panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- if (!panel) {
+ if (IS_ERR(panel)) {
dev_dbg(&pdev->dev, "panel not found, deferring probe\n");
- return -EPROBE_DEFER;
+ return PTR_ERR(panel);
}
lvds_encoder->panel_bridge =
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 7ccadba7c98c..2136c97aeb8e 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -152,7 +152,7 @@ static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
ge_b850v3_lvds_ptr->edid = (struct edid *)stdp2690_get_edid(client);
if (ge_b850v3_lvds_ptr->edid) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
ge_b850v3_lvds_ptr->edid);
num_modes = drm_add_edid_modes(connector,
ge_b850v3_lvds_ptr->edid);
@@ -241,7 +241,7 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge)
return ret;
}
- ret = drm_mode_connector_attach_encoder(connector, bridge->encoder);
+ ret = drm_connector_attach_encoder(connector, bridge->encoder);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d64a3283822a..a3e817abace1 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -222,7 +222,7 @@ static int ptn3460_get_modes(struct drm_connector *connector)
}
ptn_bridge->edid = (struct edid *)edid;
- drm_mode_connector_update_edid_property(connector, ptn_bridge->edid);
+ drm_connector_update_edid_property(connector, ptn_bridge->edid);
num_modes = drm_add_edid_modes(connector, ptn_bridge->edid);
@@ -265,7 +265,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&ptn_bridge->connector,
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
- drm_mode_connector_attach_encoder(&ptn_bridge->connector,
+ drm_connector_attach_encoder(&ptn_bridge->connector,
bridge->encoder);
if (ptn_bridge->panel)
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 6d99d4a3beb3..7cbaba213ef6 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -79,7 +79,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge)
return ret;
}
- drm_mode_connector_attach_encoder(&panel_bridge->connector,
+ drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);
ret = drm_panel_attach(panel_bridge->panel, &panel_bridge->connector);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 81198f5e9afa..7334d1b62b71 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -503,7 +503,7 @@ static int ps8622_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&ps8622->connector,
&ps8622_connector_helper_funcs);
drm_connector_register(&ps8622->connector);
- drm_mode_connector_attach_encoder(&ps8622->connector,
+ drm_connector_attach_encoder(&ps8622->connector,
bridge->encoder);
if (ps8622->panel)
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 60373d7eb220..e59a13542333 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -170,7 +170,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
return ret;
edid = drm_get_edid(connector, sii902x->i2c->adapter);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
num = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -324,7 +324,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge)
else
sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
- drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 3c136f2b954f..5971976284bf 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1922,7 +1922,7 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1974,7 +1974,7 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 0fd9cf27542c..8e28e738cb52 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1140,7 +1140,7 @@ static int tc_connector_get_modes(struct drm_connector *connector)
if (!edid)
return 0;
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
return count;
@@ -1195,7 +1195,7 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
drm_display_info_set_bus_formats(&tc->connector.display_info,
&bus_format, 1);
- drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+ drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index acb857030951..c3e32138c6bb 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -62,7 +62,7 @@ static int tfp410_get_modes(struct drm_connector *connector)
goto fallback;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid);
fallback:
@@ -132,7 +132,7 @@ static int tfp410_attach(struct drm_bridge *bridge)
return ret;
}
- drm_mode_connector_attach_encoder(&dvi->connector,
+ drm_connector_attach_encoder(&dvi->connector,
bridge->encoder);
return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index be2d7e488062..ce9db7aab225 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -92,7 +92,6 @@
#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
struct cirrus_crtc {
struct drm_crtc base;
@@ -117,11 +116,6 @@ struct cirrus_connector {
struct drm_connector base;
};
-struct cirrus_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct cirrus_mc {
resource_size_t vram_size;
resource_size_t vram_base;
@@ -152,7 +146,7 @@ struct cirrus_device {
struct cirrus_fbdev {
struct drm_fb_helper helper;
- struct cirrus_framebuffer gfb;
+ struct drm_framebuffer gfb;
void *sysram;
int size;
int x1, y1, x2, y2; /* dirty rect */
@@ -198,7 +192,7 @@ int cirrus_dumb_create(struct drm_file *file,
struct drm_mode_create_dumb *args);
int cirrus_framebuffer_init(struct drm_device *dev,
- struct cirrus_framebuffer *gfb,
+ struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 32fbfba2c623..b643ac92801c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
struct drm_gem_object *obj;
struct cirrus_bo *bo;
int src_offset, dst_offset;
- int bpp = afbdev->gfb.base.format->cpp[0];
+ int bpp = afbdev->gfb.format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
- obj = afbdev->gfb.obj;
+ obj = afbdev->gfb.obj[0];
bo = gem_to_cirrus_bo(obj);
/*
@@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
}
for (i = y; i < y + height; i++) {
/* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+ src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
}
@@ -204,7 +204,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
gfbdev->sysram = sysram;
gfbdev->size = size;
- fb = &gfbdev->gfb.base;
+ fb = &gfbdev->gfb;
if (!fb) {
DRM_INFO("fb is NULL\n");
return -EINVAL;
@@ -246,19 +246,19 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
- struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+ struct drm_framebuffer *gfb = &gfbdev->gfb;
drm_fb_helper_unregister_fbi(&gfbdev->helper);
- if (gfb->obj) {
- drm_gem_object_put_unlocked(gfb->obj);
- gfb->obj = NULL;
+ if (gfb->obj[0]) {
+ drm_gem_object_put_unlocked(gfb->obj[0]);
+ gfb->obj[0] = NULL;
}
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
- drm_framebuffer_unregister_private(&gfb->base);
- drm_framebuffer_cleanup(&gfb->base);
+ drm_framebuffer_unregister_private(gfb);
+ drm_framebuffer_cleanup(gfb);
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 26df1e8cd490..60d54e10a34d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -10,42 +10,25 @@
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "cirrus_drv.h"
-static int cirrus_create_handle(struct drm_framebuffer *fb,
- struct drm_file* file_priv,
- unsigned int* handle)
-{
- struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, cirrus_fb->obj, handle);
-}
-
-static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
- drm_gem_object_put_unlocked(cirrus_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
- .create_handle = cirrus_create_handle,
- .destroy = cirrus_user_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
int cirrus_framebuffer_init(struct drm_device *dev,
- struct cirrus_framebuffer *gfb,
+ struct drm_framebuffer *gfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
- drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
- gfb->obj = obj;
- ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+ drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
+ gfb->obj[0] = obj;
+ ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
if (ret) {
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
return ret;
@@ -60,7 +43,7 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
{
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
- struct cirrus_framebuffer *cirrus_fb;
+ struct drm_framebuffer *fb;
u32 bpp;
int ret;
@@ -74,19 +57,19 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
if (obj == NULL)
return ERR_PTR(-ENOENT);
- cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
- if (!cirrus_fb) {
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb) {
drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
- ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+ ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
drm_gem_object_put_unlocked(obj);
- kfree(cirrus_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return &cirrus_fb->base;
+ return fb;
}
static const struct drm_mode_config_funcs cirrus_mode_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index c91b9b054e3f..336bfda40125 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -101,17 +101,13 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
int x, int y, int atomic)
{
struct cirrus_device *cdev = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct cirrus_framebuffer *cirrus_fb;
struct cirrus_bo *bo;
int ret;
u64 gpu_addr;
/* push the previous fb to system ram */
if (!atomic && fb) {
- cirrus_fb = to_cirrus_framebuffer(fb);
- obj = cirrus_fb->obj;
- bo = gem_to_cirrus_bo(obj);
+ bo = gem_to_cirrus_bo(fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
return ret;
@@ -119,9 +115,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
cirrus_bo_unreserve(bo);
}
- cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
- obj = cirrus_fb->obj;
- bo = gem_to_cirrus_bo(obj);
+ bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
ret = cirrus_bo_reserve(bo, false);
if (ret)
@@ -133,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
return ret;
}
- if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+ if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
/* if pushing console in kmap it */
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret)
@@ -536,7 +530,7 @@ int cirrus_modeset_init(struct cirrus_device *cdev)
return -1;
}
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ret = cirrus_fbdev_init(cdev);
if (ret) {
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 895741e9cd7d..3eb061e11e2e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
+#include <drm/drm_writeback.h>
#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
@@ -325,6 +326,35 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
return fence_ptr;
}
+static int set_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ s32 __user *fence_ptr)
+{
+ unsigned int index = drm_connector_index(connector);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ state->connectors[index].out_fence_ptr = fence_ptr;
+
+ return 0;
+}
+
+static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ unsigned int index = drm_connector_index(connector);
+ s32 __user *fence_ptr;
+
+ fence_ptr = state->connectors[index].out_fence_ptr;
+ state->connectors[index].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
@@ -339,6 +369,7 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_crtc *crtc = state->crtc;
struct drm_mode_modeinfo umode;
/* Early return for no change. */
@@ -359,13 +390,13 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
drm_mode_copy(&state->mode, mode);
state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
- mode->name, state);
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ mode->name, crtc->base.id, crtc->name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
- state);
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
}
return 0;
@@ -388,6 +419,8 @@ EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
struct drm_property_blob *blob)
{
+ struct drm_crtc *crtc = state->crtc;
+
if (blob == state->mode_blob)
return 0;
@@ -397,19 +430,34 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
memset(&state->mode, 0, sizeof(state->mode));
if (blob) {
- if (blob->length != sizeof(struct drm_mode_modeinfo) ||
- drm_mode_convert_umode(state->crtc->dev, &state->mode,
- blob->data))
+ int ret;
+
+ if (blob->length != sizeof(struct drm_mode_modeinfo)) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
+ crtc->base.id, crtc->name,
+ blob->length);
return -EINVAL;
+ }
+
+ ret = drm_mode_convert_umode(crtc->dev,
+ &state->mode, blob->data);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
+ crtc->base.id, crtc->name,
+ ret, drm_get_mode_status_name(state->mode.status));
+ drm_mode_debug_printmodeline(&state->mode);
+ return -EINVAL;
+ }
state->mode_blob = drm_property_blob_get(blob);
state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
- state->mode.name, state);
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ state->mode.name, crtc->base.id, crtc->name,
+ state);
} else {
state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
- state);
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
}
return 0;
@@ -539,10 +587,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
return -EFAULT;
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
- } else if (crtc->funcs->atomic_set_property)
+ } else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
- else
+ } else {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+ crtc->base.id, crtc->name,
+ property->base.id, property->name);
return -EINVAL;
+ }
return 0;
}
@@ -677,6 +729,51 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_connector_check - check connector state
+ * @connector: connector to check
+ * @state: connector state to check
+ *
+ * Provides core sanity checks for connector state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_connector_check(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_writeback_job *writeback_job = state->writeback_job;
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
+ return 0;
+
+ if (writeback_job->fb && !state->crtc) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ if (state->crtc)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+
+ if (writeback_job->fb && !crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
+ connector->base.id, connector->name,
+ state->crtc->base.id);
+ return -EINVAL;
+ }
+
+ if (writeback_job->out_fence && !writeback_job->fb) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
* @plane: plane to get state object for
@@ -700,6 +797,11 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
WARN_ON(!state->acquire_ctx);
+ /* the legacy pointers should never be set */
+ WARN_ON(plane->fb);
+ WARN_ON(plane->old_fb);
+ WARN_ON(plane->crtc);
+
plane_state = drm_atomic_get_existing_plane_state(state, plane);
if (plane_state)
return plane_state;
@@ -794,8 +896,11 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (property == plane->alpha_property) {
state->alpha = val;
} else if (property == plane->rotation_property) {
- if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
+ if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
+ plane->base.id, plane->name, val);
return -EINVAL;
+ }
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@@ -807,6 +912,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return plane->funcs->atomic_set_property(plane, state,
property, val);
} else {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+ plane->base.id, plane->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -914,10 +1022,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* either *both* CRTC and FB must be set, or neither */
if (state->crtc && !state->fb) {
- DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
+ plane->base.id, plane->name);
return -EINVAL;
} else if (state->fb && !state->crtc) {
- DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -927,7 +1037,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
- DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
+ DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
+ state->crtc->base.id, state->crtc->name,
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -936,7 +1048,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n",
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
+ plane->base.id, plane->name,
drm_get_format_name(state->fb->format->format,
&format_name),
state->fb->modifier);
@@ -948,7 +1061,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
state->crtc_h > INT_MAX ||
state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
- DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane->base.id, plane->name,
state->crtc_w, state->crtc_h,
state->crtc_x, state->crtc_y);
return -ERANGE;
@@ -962,8 +1076,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->src_x > fb_width - state->src_w ||
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
- DRM_DEBUG_ATOMIC("Invalid source coordinates "
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+ plane->base.id, plane->name,
state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
@@ -996,6 +1111,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
drm_printf(p, "\trotation=%x\n", state->rotation);
+ drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
drm_printf(p, "\tcolor-encoding=%s\n",
drm_get_color_encoding_name(state->color_encoding));
drm_printf(p, "\tcolor-range=%s\n",
@@ -1120,6 +1236,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
state->private_objs[index].old_state = obj->state;
state->private_objs[index].new_state = obj_state;
state->private_objs[index].ptr = obj;
+ obj_state->state = state;
state->num_private_objs = num_objs;
@@ -1278,6 +1395,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->link_status = val;
} else if (property == config->aspect_ratio_property) {
state->picture_aspect_ratio = val;
+ } else if (property == config->content_type_property) {
+ state->content_type = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
} else if (property == connector->content_protection_property) {
@@ -1286,10 +1405,24 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == config->writeback_fb_id_property) {
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+ int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+ if (fb)
+ drm_framebuffer_put(fb);
+ return ret;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+ return set_out_fence_for_connector(state->state, connector,
+ fence_ptr);
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
+ connector->base.id, connector->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -1304,6 +1437,10 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (state->writeback_job && state->writeback_job->fb)
+ drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
+
if (connector->funcs->atomic_print_state)
connector->funcs->atomic_print_state(p, state);
}
@@ -1363,10 +1500,17 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->link_status;
} else if (property == config->aspect_ratio_property) {
*val = state->picture_aspect_ratio;
+ } else if (property == config->content_type_property) {
+ *val = state->content_type;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
} else if (property == connector->content_protection_property) {
*val = state->content_protection;
+ } else if (property == config->writeback_fb_id_property) {
+ /* Writeback framebuffer is one-shot, write and forget */
+ *val = 0;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ *val = 0;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
@@ -1442,7 +1586,7 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
if (WARN_ON(IS_ERR(crtc_state)))
return PTR_ERR(crtc_state);
- crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
+ crtc_state->plane_mask &= ~drm_plane_mask(plane);
}
plane_state->crtc = crtc;
@@ -1452,15 +1596,16 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- crtc_state->plane_mask |= (1 << drm_plane_index(plane));
+ crtc_state->plane_mask |= drm_plane_mask(plane);
}
if (crtc)
- DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
- plane_state, crtc->base.id, crtc->name);
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
+ plane->base.id, plane->name, plane_state,
+ crtc->base.id, crtc->name);
else
- DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
- plane_state);
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
+ plane->base.id, plane->name, plane_state);
return 0;
}
@@ -1480,12 +1625,15 @@ void
drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb)
{
+ struct drm_plane *plane = plane_state->plane;
+
if (fb)
- DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
- fb->base.id, plane_state);
- else
- DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
+ fb->base.id, plane->base.id, plane->name,
plane_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
+ plane->base.id, plane->name, plane_state);
drm_framebuffer_assign(&plane_state->fb, fb);
}
@@ -1546,6 +1694,7 @@ int
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc)
{
+ struct drm_connector *connector = conn_state->connector;
struct drm_crtc_state *crtc_state;
if (conn_state->crtc == crtc)
@@ -1556,7 +1705,7 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
conn_state->crtc);
crtc_state->connector_mask &=
- ~(1 << drm_connector_index(conn_state->connector));
+ ~drm_connector_mask(conn_state->connector);
drm_connector_put(conn_state->connector);
conn_state->crtc = NULL;
@@ -1568,15 +1717,17 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
return PTR_ERR(crtc_state);
crtc_state->connector_mask |=
- 1 << drm_connector_index(conn_state->connector);
+ drm_connector_mask(conn_state->connector);
drm_connector_get(conn_state->connector);
conn_state->crtc = crtc;
- DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
+ connector->base.id, connector->name,
conn_state, crtc->base.id, crtc->name);
} else {
- DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
+ connector->base.id, connector->name,
conn_state);
}
@@ -1584,6 +1735,70 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
}
EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
+/*
+ * drm_atomic_get_writeback_job - return or allocate a writeback job
+ * @conn_state: Connector state to get the job for
+ *
+ * Writeback jobs have a different lifetime to the atomic state they are
+ * associated with. This convenience function takes care of allocating a job
+ * if there isn't yet one associated with the connector state, otherwise
+ * it just returns the existing job.
+ *
+ * Returns: The writeback job for the given connector state
+ */
+static struct drm_writeback_job *
+drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
+{
+ WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+ if (!conn_state->writeback_job)
+ conn_state->writeback_job =
+ kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+
+ return conn_state->writeback_job;
+}
+
+/**
+ * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
+ * @conn_state: atomic state object for the connector
+ * @fb: fb to use for the connector
+ *
+ * This is used to set the framebuffer for a writeback connector, which outputs
+ * to a buffer instead of an actual physical connector.
+ * Changing the assigned framebuffer requires us to grab a reference to the new
+ * fb and drop the reference to the old fb, if there is one. This function
+ * takes care of all these details besides updating the pointer in the
+ * state object itself.
+ *
+ * Note: The only way conn_state can already have an fb set is if the commit
+ * sets the property more than once.
+ *
+ * See also: drm_writeback_connector_init()
+ *
+ * Returns: 0 on success
+ */
+int drm_atomic_set_writeback_fb_for_connector(
+ struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb)
+{
+ struct drm_writeback_job *job =
+ drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ drm_framebuffer_assign(&job->fb, fb);
+
+ if (fb)
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
+ fb->base.id, conn_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
+ conn_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
+
/**
* drm_atomic_add_affected_connectors - add connectors for crtc
* @state: atomic state
@@ -1629,7 +1844,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
*/
drm_connector_list_iter_begin(state->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
- if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector))))
+ if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
continue;
conn_state = drm_atomic_get_connector_state(state, connector);
@@ -1672,6 +1887,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
+ DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
+ crtc->base.id, crtc->name, state);
+
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
@@ -1702,6 +1920,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_plane_state *plane_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
@@ -1724,6 +1944,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ ret = drm_atomic_connector_check(conn, conn_state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
+ conn->base.id, conn->name);
+ return ret;
+ }
+ }
+
if (config->funcs->atomic_check) {
ret = config->funcs->atomic_check(state->dev, state);
@@ -2048,45 +2277,6 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
/**
- * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
- *
- * @dev: drm device to check.
- * @plane_mask: plane mask for planes that were updated.
- * @ret: return value, can be -EDEADLK for a retry.
- *
- * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
- * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
- * is a common operation for each atomic update, so this call is split off as a
- * helper.
- */
-void drm_atomic_clean_old_fb(struct drm_device *dev,
- unsigned plane_mask,
- int ret)
-{
- struct drm_plane *plane;
-
- /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
- * locks (ie. while it is still safe to deref plane->state). We
- * need to do this here because the driver entry points cannot
- * distinguish between legacy and atomic ioctls.
- */
- drm_for_each_plane_mask(plane, dev, plane_mask) {
- if (ret == 0) {
- struct drm_framebuffer *new_fb = plane->state->fb;
- if (new_fb)
- drm_framebuffer_get(new_fb);
- plane->fb = new_fb;
- plane->crtc = plane->state->crtc;
-
- if (plane->old_fb)
- drm_framebuffer_put(plane->old_fb);
- }
- plane->old_fb = NULL;
- }
-}
-EXPORT_SYMBOL(drm_atomic_clean_old_fb);
-
-/**
* DOC: explicit fencing properties
*
* Explicit fencing allows userspace to control the buffer synchronization
@@ -2161,7 +2351,7 @@ static int setup_out_fence(struct drm_out_fence_state *fence_state,
return 0;
}
-static int prepare_crtc_signaling(struct drm_device *dev,
+static int prepare_signaling(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_mode_atomic *arg,
struct drm_file *file_priv,
@@ -2170,6 +2360,8 @@ static int prepare_crtc_signaling(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
int i, c = 0, ret;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
@@ -2235,6 +2427,45 @@ static int prepare_crtc_signaling(struct drm_device *dev,
c++;
}
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ struct drm_writeback_connector *wb_conn;
+ struct drm_writeback_job *job;
+ struct drm_out_fence_state *f;
+ struct dma_fence *fence;
+ s32 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_connector(state, conn);
+ if (!fence_ptr)
+ continue;
+
+ job = drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ wb_conn = drm_connector_to_writeback(conn);
+ fence = drm_writeback_get_out_fence(wb_conn);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ job->out_fence = fence;
+ }
+
/*
* Having this flag means user mode pends on event which will never
* reach due to lack of at least one CRTC for signaling
@@ -2245,11 +2476,11 @@ static int prepare_crtc_signaling(struct drm_device *dev,
return 0;
}
-static void complete_crtc_signaling(struct drm_device *dev,
- struct drm_atomic_state *state,
- struct drm_out_fence_state *fence_state,
- unsigned int num_fences,
- bool install_fds)
+static void complete_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_out_fence_state *fence_state,
+ unsigned int num_fences,
+ bool install_fds)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -2306,9 +2537,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
unsigned int copied_objs, copied_props;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
- struct drm_plane *plane;
struct drm_out_fence_state *fence_state;
- unsigned plane_mask;
int ret = 0;
unsigned int i, j, num_fences;
@@ -2348,7 +2577,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
retry:
- plane_mask = 0;
copied_objs = 0;
copied_props = 0;
fence_state = NULL;
@@ -2419,17 +2647,11 @@ retry:
copied_props++;
}
- if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
- !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
- plane = obj_to_plane(obj);
- plane_mask |= (1 << drm_plane_index(plane));
- plane->old_fb = plane->fb;
- }
drm_mode_object_put(obj);
}
- ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
- &num_fences);
+ ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
+ &num_fences);
if (ret)
goto out;
@@ -2445,9 +2667,7 @@ retry:
}
out:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
- complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
+ complete_signaling(dev, state, fence_state, num_fences, !ret);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 81e32199d3ef..80be74df7ba6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,6 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_writeback.h>
#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
@@ -120,7 +121,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
new_encoder = drm_atomic_helper_best_encoder(connector);
if (new_encoder) {
- if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
+ if (encoder_mask & drm_encoder_mask(new_encoder)) {
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
new_encoder->base.id, new_encoder->name,
connector->base.id, connector->name);
@@ -128,7 +129,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
return -EINVAL;
}
- encoder_mask |= 1 << drm_encoder_index(new_encoder);
+ encoder_mask |= drm_encoder_mask(new_encoder);
}
}
@@ -154,7 +155,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
continue;
encoder = connector->state->best_encoder;
- if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
+ if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
continue;
if (!disable_conflicting_encoders) {
@@ -222,7 +223,7 @@ set_best_encoder(struct drm_atomic_state *state,
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
crtc_state->encoder_mask &=
- ~(1 << drm_encoder_index(conn_state->best_encoder));
+ ~drm_encoder_mask(conn_state->best_encoder);
}
}
@@ -233,7 +234,7 @@ set_best_encoder(struct drm_atomic_state *state,
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
crtc_state->encoder_mask |=
- 1 << drm_encoder_index(encoder);
+ drm_encoder_mask(encoder);
}
}
@@ -644,7 +645,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (ret)
return ret;
- connectors_mask += BIT(i);
+ connectors_mask |= BIT(i);
}
/*
@@ -1172,6 +1173,27 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
+static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+ const struct drm_connector_helper_funcs *funcs;
+
+ funcs = connector->helper_private;
+ if (!funcs->atomic_commit)
+ continue;
+
+ if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
+ WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+ funcs->atomic_commit(connector, new_conn_state);
+ }
+ }
+}
+
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
@@ -1251,6 +1273,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_bridge_enable(encoder->bridge);
}
+
+ drm_atomic_helper_commit_writebacks(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
@@ -1426,6 +1450,8 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ drm_atomic_helper_fake_vblank(old_state);
+
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -1455,6 +1481,8 @@ void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_fake_vblank(old_state);
+
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -2032,6 +2060,45 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
/**
+ * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
+ * @old_state: atomic state object with old state structures
+ *
+ * This function walks all CRTCs and fake VBLANK events on those with
+ * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
+ * The primary use of this function is writeback connectors working in oneshot
+ * mode and faking VBLANK events. In this case they only fake the VBLANK event
+ * when a job is queued, and any change to the pipeline that does not touch the
+ * connector is leading to timeouts when calling
+ * drm_atomic_helper_wait_for_vblanks() or
+ * drm_atomic_helper_wait_for_flip_done().
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ unsigned long flags;
+
+ if (!new_crtc_state->no_vblank)
+ continue;
+
+ spin_lock_irqsave(&old_state->dev->event_lock, flags);
+ if (new_crtc_state->event) {
+ drm_crtc_send_vblank_event(crtc,
+ new_crtc_state->event);
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
+
+/**
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
* @old_state: atomic state object with old state structures
*
@@ -2322,11 +2389,13 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_crtc *crtc = old_crtc_state->crtc;
struct drm_atomic_state *old_state = old_crtc_state->state;
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(old_state, crtc);
struct drm_plane *plane;
unsigned plane_mask;
plane_mask = old_crtc_state->plane_mask;
- plane_mask |= crtc->state->plane_mask;
+ plane_mask |= new_crtc_state->plane_mask;
crtc_funcs = crtc->helper_private;
if (crtc_funcs && crtc_funcs->atomic_begin)
@@ -2335,6 +2404,8 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(old_state, plane);
+ struct drm_plane_state *new_plane_state =
+ drm_atomic_get_new_plane_state(old_state, plane);
const struct drm_plane_helper_funcs *plane_funcs;
plane_funcs = plane->helper_private;
@@ -2342,13 +2413,14 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
if (!old_plane_state || !plane_funcs)
continue;
- WARN_ON(plane->state->crtc && plane->state->crtc != crtc);
+ WARN_ON(new_plane_state->crtc &&
+ new_plane_state->crtc != crtc);
- if (drm_atomic_plane_disabling(old_plane_state, plane->state) &&
+ if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, old_plane_state);
- else if (plane->state->crtc ||
- drm_atomic_plane_disabling(old_plane_state, plane->state))
+ else if (new_plane_state->crtc ||
+ drm_atomic_plane_disabling(old_plane_state, new_plane_state))
plane_funcs->atomic_update(plane, old_plane_state);
}
@@ -2797,7 +2869,7 @@ static int update_output_state(struct drm_atomic_state *state,
* resets the "link-status" property to GOOD, to force any link
* re-training. The SETCRTC ioctl does not define whether an update does
* need a full modeset or just a plane update, hence we're allowed to do
- * that. See also drm_mode_connector_set_link_status_property().
+ * that. See also drm_connector_set_link_status_property().
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
@@ -2916,7 +2988,6 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- unsigned plane_mask = 0;
int ret, i;
state = drm_atomic_state_alloc(dev);
@@ -2959,17 +3030,10 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
-
- if (clean_old_fbs) {
- plane->old_fb = plane->fb;
- plane_mask |= BIT(drm_plane_index(plane));
- }
}
ret = drm_atomic_commit(state);
free:
- if (plane_mask)
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
drm_atomic_state_put(state);
return ret;
}
@@ -3131,13 +3195,8 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
state->acquire_ctx = ctx;
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- WARN_ON(plane->crtc != new_plane_state->crtc);
- WARN_ON(plane->fb != new_plane_state->fb);
- WARN_ON(plane->old_fb);
-
+ for_each_new_plane_in_state(state, plane, new_plane_state, i)
state->planes[i].old_state = plane->state;
- }
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
state->crtcs[i].old_state = crtc->state;
@@ -3662,6 +3721,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
if (state->crtc)
drm_connector_get(connector);
state->commit = NULL;
+
+ /* Don't copy over a writeback job, they are used only once */
+ state->writeback_job = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
new file mode 100644
index 000000000000..baff50a4c234
--- /dev/null
+++ b/drivers/gpu/drm/drm_client.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include <drm/drm_client.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * This library provides support for clients running in the kernel like fbdev and bootsplash.
+ * Currently it's only partially implemented, just enough to support fbdev.
+ *
+ * GEM drivers which provide a GEM based dumb buffer with a virtual address are supported.
+ */
+
+static int drm_client_open(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_file *file;
+
+ file = drm_file_alloc(dev->primary);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&dev->filelist_mutex);
+ list_add(&file->lhead, &dev->filelist_internal);
+ mutex_unlock(&dev->filelist_mutex);
+
+ client->file = file;
+
+ return 0;
+}
+
+static void drm_client_close(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+
+ mutex_lock(&dev->filelist_mutex);
+ list_del(&client->file->lhead);
+ mutex_unlock(&dev->filelist_mutex);
+
+ drm_file_free(client->file);
+}
+EXPORT_SYMBOL(drm_client_close);
+
+/**
+ * drm_client_new - Create a DRM client
+ * @dev: DRM device
+ * @client: DRM client
+ * @name: Client name
+ * @funcs: DRM client functions (optional)
+ *
+ * The caller needs to hold a reference on @dev before calling this function.
+ * The client is freed when the &drm_device is unregistered. See drm_client_release().
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs)
+{
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
+ !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
+ return -ENOTSUPP;
+
+ if (funcs && !try_module_get(funcs->owner))
+ return -ENODEV;
+
+ client->dev = dev;
+ client->name = name;
+ client->funcs = funcs;
+
+ ret = drm_client_open(client);
+ if (ret)
+ goto err_put_module;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_add(&client->list, &dev->clientlist);
+ mutex_unlock(&dev->clientlist_mutex);
+
+ drm_dev_get(dev);
+
+ return 0;
+
+err_put_module:
+ if (funcs)
+ module_put(funcs->owner);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_client_new);
+
+/**
+ * drm_client_release - Release DRM client resources
+ * @client: DRM client
+ *
+ * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
+ *
+ * This function should only be called from the unregister callback. An exception
+ * is fbdev which cannot free the buffer if userspace has open file descriptors.
+ *
+ * Note:
+ * Clients cannot initiate a release by themselves. This is done to keep the code simple.
+ * The driver has to be unloaded before the client can be unloaded.
+ */
+void drm_client_release(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+
+ DRM_DEV_DEBUG_KMS(dev->dev, "%s\n", client->name);
+
+ drm_client_close(client);
+ drm_dev_put(dev);
+ if (client->funcs)
+ module_put(client->funcs->owner);
+}
+EXPORT_SYMBOL(drm_client_release);
+
+void drm_client_dev_unregister(struct drm_device *dev)
+{
+ struct drm_client_dev *client, *tmp;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
+ list_del(&client->list);
+ if (client->funcs && client->funcs->unregister) {
+ client->funcs->unregister(client);
+ } else {
+ drm_client_release(client);
+ kfree(client);
+ }
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+
+/**
+ * drm_client_dev_hotplug - Send hotplug event to clients
+ * @dev: DRM device
+ *
+ * This function calls the &drm_client_funcs.hotplug callback on the attached clients.
+ *
+ * drm_kms_helper_hotplug_event() calls this function, so drivers that use it
+ * don't need to call this function themselves.
+ */
+void drm_client_dev_hotplug(struct drm_device *dev)
+{
+ struct drm_client_dev *client;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (!client->funcs || !client->funcs->hotplug)
+ continue;
+
+ ret = client->funcs->hotplug(client);
+ DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_dev_hotplug);
+
+void drm_client_dev_restore(struct drm_device *dev)
+{
+ struct drm_client_dev *client;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (!client->funcs || !client->funcs->restore)
+ continue;
+
+ ret = client->funcs->restore(client);
+ DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+ if (!ret) /* The first one to return zero gets the privilege to restore */
+ break;
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+
+static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
+{
+ struct drm_device *dev = buffer->client->dev;
+
+ if (buffer->vaddr && dev->driver->gem_prime_vunmap)
+ dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
+
+ if (buffer->gem)
+ drm_gem_object_put_unlocked(buffer->gem);
+
+ if (buffer->handle)
+ drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
+
+ kfree(buffer);
+}
+
+static struct drm_client_buffer *
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+{
+ struct drm_mode_create_dumb dumb_args = { };
+ struct drm_device *dev = client->dev;
+ struct drm_client_buffer *buffer;
+ struct drm_gem_object *obj;
+ void *vaddr;
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->client = client;
+
+ dumb_args.width = width;
+ dumb_args.height = height;
+ dumb_args.bpp = drm_format_plane_cpp(format, 0) * 8;
+ ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
+ if (ret)
+ goto err_delete;
+
+ buffer->handle = dumb_args.handle;
+ buffer->pitch = dumb_args.pitch;
+
+ obj = drm_gem_object_lookup(client->file, dumb_args.handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto err_delete;
+ }
+
+ buffer->gem = obj;
+
+ /*
+ * FIXME: The dependency on GEM here isn't required, we could
+ * convert the driver handle to a dma-buf instead and use the
+ * backend-agnostic dma-buf vmap support instead. This would
+ * require that the handle2fd prime ioctl is reworked to pull the
+ * fd_install step out of the driver backend hooks, to make that
+ * final step optional for internal users.
+ */
+ vaddr = dev->driver->gem_prime_vmap(obj);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto err_delete;
+ }
+
+ buffer->vaddr = vaddr;
+
+ return buffer;
+
+err_delete:
+ drm_client_buffer_delete(buffer);
+
+ return ERR_PTR(ret);
+}
+
+static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
+{
+ int ret;
+
+ if (!buffer->fb)
+ return;
+
+ ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
+ if (ret)
+ DRM_DEV_ERROR(buffer->client->dev->dev,
+ "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
+
+ buffer->fb = NULL;
+}
+
+static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
+ u32 width, u32 height, u32 format)
+{
+ struct drm_client_dev *client = buffer->client;
+ struct drm_mode_fb_cmd fb_req = { };
+ const struct drm_format_info *info;
+ int ret;
+
+ info = drm_format_info(format);
+ fb_req.bpp = info->cpp[0] * 8;
+ fb_req.depth = info->depth;
+ fb_req.width = width;
+ fb_req.height = height;
+ fb_req.handle = buffer->handle;
+ fb_req.pitch = buffer->pitch;
+
+ ret = drm_mode_addfb(client->dev, &fb_req, client->file);
+ if (ret)
+ return ret;
+
+ buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id);
+ if (WARN_ON(!buffer->fb))
+ return -ENOENT;
+
+ /* drop the reference we picked up in framebuffer lookup */
+ drm_framebuffer_put(buffer->fb);
+
+ strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN);
+
+ return 0;
+}
+
+/**
+ * drm_client_framebuffer_create - Create a client framebuffer
+ * @client: DRM client
+ * @width: Framebuffer width
+ * @height: Framebuffer height
+ * @format: Buffer format
+ *
+ * This function creates a &drm_client_buffer which consists of a
+ * &drm_framebuffer backed by a dumb buffer.
+ * Call drm_client_framebuffer_delete() to free the buffer.
+ *
+ * Returns:
+ * Pointer to a client buffer or an error pointer on failure.
+ */
+struct drm_client_buffer *
+drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+{
+ struct drm_client_buffer *buffer;
+ int ret;
+
+ buffer = drm_client_buffer_create(client, width, height, format);
+ if (IS_ERR(buffer))
+ return buffer;
+
+ ret = drm_client_buffer_addfb(buffer, width, height, format);
+ if (ret) {
+ drm_client_buffer_delete(buffer);
+ return ERR_PTR(ret);
+ }
+
+ return buffer;
+}
+EXPORT_SYMBOL(drm_client_framebuffer_create);
+
+/**
+ * drm_client_framebuffer_delete - Delete a client framebuffer
+ * @buffer: DRM client buffer (can be NULL)
+ */
+void drm_client_framebuffer_delete(struct drm_client_buffer *buffer)
+{
+ if (!buffer)
+ return;
+
+ drm_client_buffer_rmfb(buffer);
+ drm_client_buffer_delete(buffer);
+}
+EXPORT_SYMBOL(drm_client_framebuffer_delete);
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_client_dev *client;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list)
+ drm_printf(&p, "%s\n", client->name);
+ mutex_unlock(&dev->clientlist_mutex);
+
+ return 0;
+}
+
+static const struct drm_info_list drm_client_debugfs_list[] = {
+ { "internal_clients", drm_client_debugfs_internal_clients, 0 },
+};
+
+int drm_client_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9b9ba5d5ec0c..6011d769d50b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -48,7 +48,7 @@
*
* Connectors must be attached to an encoder to be used. For devices that map
* connectors to encoders 1:1, the connector should be attached at
- * initialization time with a call to drm_mode_connector_attach_encoder(). The
+ * initialization time with a call to drm_connector_attach_encoder(). The
* driver must also set the &drm_connector.encoder field to point to the
* attached encoder.
*
@@ -87,6 +87,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
+ { DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
};
void drm_connector_ida_init(void)
@@ -195,6 +196,10 @@ int drm_connector_init(struct drm_device *dev,
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
ret = __drm_mode_object_add(dev, &connector->base,
DRM_MODE_OBJECT_CONNECTOR,
false, drm_connector_free);
@@ -249,7 +254,8 @@ int drm_connector_init(struct drm_device *dev,
config->num_connector++;
spin_unlock_irq(&config->connector_list_lock);
- if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
+ connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_object_attach_property(&connector->base,
config->edid_property,
0);
@@ -285,7 +291,7 @@ out_put:
EXPORT_SYMBOL(drm_connector_init);
/**
- * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * drm_connector_attach_encoder - attach a connector to an encoder
* @connector: connector to attach
* @encoder: encoder to attach @connector to
*
@@ -296,8 +302,8 @@ EXPORT_SYMBOL(drm_connector_init);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
+int drm_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
{
int i;
@@ -315,7 +321,7 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
if (WARN_ON(connector->encoder))
return -EINVAL;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) {
if (connector->encoder_ids[i] == 0) {
connector->encoder_ids[i] = encoder->base.id;
return 0;
@@ -323,7 +329,30 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
}
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+EXPORT_SYMBOL(drm_connector_attach_encoder);
+
+/**
+ * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other
+ * @connector: the connector
+ * @encoder: the encoder
+ *
+ * Returns:
+ * True if @encoder is one of the possible encoders for @connector.
+ */
+bool drm_connector_has_possible_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ struct drm_encoder *enc;
+ int i;
+
+ drm_connector_for_each_possible_encoder(connector, enc, i) {
+ if (enc == encoder)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_connector_has_possible_encoder);
static void drm_mode_remove(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -577,7 +606,7 @@ __drm_connector_put_safe(struct drm_connector *conn)
/**
* drm_connector_list_iter_next - return next connector
- * @iter: connectr_list iterator
+ * @iter: connector_list iterator
*
* Returns the next connector for @iter, or NULL when the list walk has
* completed.
@@ -720,6 +749,14 @@ static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
};
+static const struct drm_prop_enum_list drm_content_type_enum_list[] = {
+ { DRM_MODE_CONTENT_TYPE_NO_DATA, "No Data" },
+ { DRM_MODE_CONTENT_TYPE_GRAPHICS, "Graphics" },
+ { DRM_MODE_CONTENT_TYPE_PHOTO, "Photo" },
+ { DRM_MODE_CONTENT_TYPE_CINEMA, "Cinema" },
+ { DRM_MODE_CONTENT_TYPE_GAME, "Game" },
+};
+
static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = {
{ DRM_MODE_PANEL_ORIENTATION_NORMAL, "Normal" },
{ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down" },
@@ -777,7 +814,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* Blob property which contains the current EDID read from the sink. This
* is useful to parse sink identification information like vendor, model
* and serial. Drivers should update this property by calling
- * drm_mode_connector_update_edid_property(), usually after having parsed
+ * drm_connector_update_edid_property(), usually after having parsed
* the EDID using drm_add_edid_modes(). Userspace cannot change this
* property.
* DPMS:
@@ -815,7 +852,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* PATH:
* Connector path property to identify how this sink is physically
* connected. Used by DP MST. This should be set by calling
- * drm_mode_connector_set_path_property(), in the case of DP MST with the
+ * drm_connector_set_path_property(), in the case of DP MST with the
* path property the MST manager created. Userspace cannot change this
* property.
* TILE:
@@ -826,14 +863,14 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* are not gen-locked. Note that for tiled panels which are genlocked, like
* dual-link LVDS or dual-link DSI, the driver should try to not expose the
* tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers
- * should update this value using drm_mode_connector_set_tile_property().
+ * should update this value using drm_connector_set_tile_property().
* Userspace cannot change this property.
* link-status:
* Connector link-status property to indicate the status of link. The
* default value of link-status is "GOOD". If something fails during or
* after modeset, the kernel driver may set this to "BAD" and issue a
* hotplug uevent. Drivers should update this value using
- * drm_mode_connector_set_link_status_property().
+ * drm_connector_set_link_status_property().
* non_desktop:
* Indicates the output should be ignored for purposes of displaying a
* standard desktop environment or console. This is most likely because
@@ -997,6 +1034,82 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
/**
+ * DOC: HDMI connector properties
+ *
+ * content type (HDMI specific):
+ * Indicates content type setting to be used in HDMI infoframes to indicate
+ * content type for the external device, so that it adjusts it's display
+ * settings accordingly.
+ *
+ * The value of this property can be one of the following:
+ *
+ * No Data:
+ * Content type is unknown
+ * Graphics:
+ * Content type is graphics
+ * Photo:
+ * Content type is photo
+ * Cinema:
+ * Content type is cinema
+ * Game:
+ * Content type is game
+ *
+ * Drivers can set up this property by calling
+ * drm_connector_attach_content_type_property(). Decoding to
+ * infoframe values is done through drm_hdmi_avi_infoframe_content_type().
+ */
+
+/**
+ * drm_connector_attach_content_type_property - attach content-type property
+ * @connector: connector to attach content type property on.
+ *
+ * Called by a driver the first time a HDMI connector is made.
+ */
+int drm_connector_attach_content_type_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_content_type_property(connector->dev))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.content_type_property,
+ DRM_MODE_CONTENT_TYPE_NO_DATA);
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_content_type_property);
+
+
+/**
+ * drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe
+ * content type information, based
+ * on correspondent DRM property.
+ * @frame: HDMI AVI infoframe
+ * @conn_state: DRM display connector state
+ *
+ */
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ switch (conn_state->content_type) {
+ case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ break;
+ case DRM_MODE_CONTENT_TYPE_CINEMA:
+ frame->content_type = HDMI_CONTENT_TYPE_CINEMA;
+ break;
+ case DRM_MODE_CONTENT_TYPE_GAME:
+ frame->content_type = HDMI_CONTENT_TYPE_GAME;
+ break;
+ case DRM_MODE_CONTENT_TYPE_PHOTO:
+ frame->content_type = HDMI_CONTENT_TYPE_PHOTO;
+ break;
+ default:
+ /* Graphics is the default(0) */
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ }
+
+ frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
+
+/**
* drm_create_tv_properties - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
@@ -1261,6 +1374,33 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
/**
+ * drm_mode_create_content_type_property - create content type property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_content_type_property(struct drm_device *dev)
+{
+ if (dev->mode_config.content_type_property)
+ return 0;
+
+ dev->mode_config.content_type_property =
+ drm_property_create_enum(dev, 0, "content type",
+ drm_content_type_enum_list,
+ ARRAY_SIZE(drm_content_type_enum_list));
+
+ if (dev->mode_config.content_type_property == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_content_type_property);
+
+/**
* drm_mode_create_suggested_offset_properties - create suggests offset properties
* @dev: DRM device
*
@@ -1285,7 +1425,7 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
/**
- * drm_mode_connector_set_path_property - set tile property on connector
+ * drm_connector_set_path_property - set tile property on connector
* @connector: connector to set property on.
* @path: path to use for property; must not be NULL.
*
@@ -1297,8 +1437,8 @@ EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
- const char *path)
+int drm_connector_set_path_property(struct drm_connector *connector,
+ const char *path)
{
struct drm_device *dev = connector->dev;
int ret;
@@ -1311,10 +1451,10 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
dev->mode_config.path_property);
return ret;
}
-EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+EXPORT_SYMBOL(drm_connector_set_path_property);
/**
- * drm_mode_connector_set_tile_property - set tile property on connector
+ * drm_connector_set_tile_property - set tile property on connector
* @connector: connector to set property on.
*
* This looks up the tile information for a connector, and creates a
@@ -1324,7 +1464,7 @@ EXPORT_SYMBOL(drm_mode_connector_set_path_property);
* Returns:
* Zero on success, errno on failure.
*/
-int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+int drm_connector_set_tile_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
char tile[256];
@@ -1354,10 +1494,10 @@ int drm_mode_connector_set_tile_property(struct drm_connector *connector)
dev->mode_config.tile_property);
return ret;
}
-EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+EXPORT_SYMBOL(drm_connector_set_tile_property);
/**
- * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * drm_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
* @edid: new value of the edid property
*
@@ -1367,8 +1507,8 @@ EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- const struct edid *edid)
+int drm_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid)
{
struct drm_device *dev = connector->dev;
size_t size = 0;
@@ -1406,10 +1546,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
dev->mode_config.edid_property);
return ret;
}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+EXPORT_SYMBOL(drm_connector_update_edid_property);
/**
- * drm_mode_connector_set_link_status_property - Set link status property of a connector
+ * drm_connector_set_link_status_property - Set link status property of a connector
* @connector: drm connector
* @link_status: new value of link status property (0: Good, 1: Bad)
*
@@ -1427,8 +1567,8 @@ EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
* it is not limited to DP or link training. For example, if we implement
* asynchronous setcrtc, this property can be used to report any failures in that.
*/
-void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
- uint64_t link_status)
+void drm_connector_set_link_status_property(struct drm_connector *connector,
+ uint64_t link_status)
{
struct drm_device *dev = connector->dev;
@@ -1436,7 +1576,7 @@ void drm_mode_connector_set_link_status_property(struct drm_connector *connector
connector->state->link_status = link_status;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
-EXPORT_SYMBOL(drm_mode_connector_set_link_status_property);
+EXPORT_SYMBOL(drm_connector_set_link_status_property);
/**
* drm_connector_init_panel_orientation_property -
@@ -1489,7 +1629,7 @@ int drm_connector_init_panel_orientation_property(
}
EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
-int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
@@ -1507,8 +1647,8 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
return ret;
}
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
struct drm_mode_connector_set_property *conn_set_prop = data;
struct drm_mode_obj_set_property obj_set_prop = {
@@ -1589,22 +1729,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector)
return -ENOENT;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
- if (connector->encoder_ids[i] != 0)
- encoders_count++;
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ encoders_count++;
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- if (put_user(connector->encoder_ids[i],
- encoder_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ if (put_user(encoder->base.id, encoder_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
out_resp->count_encoders = encoders_count;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 98a36e6c69ad..bae43938c8f6 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -225,16 +225,9 @@ static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
return crtc->timeline_name;
}
-static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static const struct dma_fence_ops drm_crtc_fence_ops = {
.get_driver_name = drm_crtc_fence_get_driver_name,
.get_timeline_name = drm_crtc_fence_get_timeline_name,
- .enable_signaling = drm_crtc_fence_enable_signaling,
- .wait = dma_fence_default_wait,
};
struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
@@ -286,6 +279,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (WARN_ON(config->num_crtc >= 32))
return -EINVAL;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
crtc->dev = dev;
crtc->funcs = funcs;
@@ -325,9 +322,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->primary = primary;
crtc->cursor = cursor;
if (primary && !primary->possible_crtcs)
- primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+ primary->possible_crtcs = drm_crtc_mask(crtc);
if (cursor && !cursor->possible_crtcs)
- cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+ cursor->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_crtc_crc_init(crtc);
if (ret) {
@@ -464,32 +461,42 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
struct drm_crtc *tmp;
int ret;
+ WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev));
+
/*
* NOTE: ->set_config can also disable other crtcs (if we steal all
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
- drm_for_each_crtc(tmp, crtc->dev)
- tmp->primary->old_fb = tmp->primary->fb;
+ drm_for_each_crtc(tmp, crtc->dev) {
+ struct drm_plane *plane = tmp->primary;
+
+ plane->old_fb = plane->fb;
+ }
fb = set->fb;
ret = crtc->funcs->set_config(set, ctx);
if (ret == 0) {
- crtc->primary->crtc = fb ? crtc : NULL;
- crtc->primary->fb = fb;
+ struct drm_plane *plane = crtc->primary;
+
+ plane->crtc = fb ? crtc : NULL;
+ plane->fb = fb;
}
drm_for_each_crtc(tmp, crtc->dev) {
- if (tmp->primary->fb)
- drm_framebuffer_get(tmp->primary->fb);
- if (tmp->primary->old_fb)
- drm_framebuffer_put(tmp->primary->old_fb);
- tmp->primary->old_fb = NULL;
+ struct drm_plane *plane = tmp->primary;
+
+ if (plane->fb)
+ drm_framebuffer_get(plane->fb);
+ if (plane->old_fb)
+ drm_framebuffer_put(plane->old_fb);
+ plane->old_fb = NULL;
}
return ret;
}
+
/**
* drm_mode_set_config_internal - helper to call &drm_mode_config_funcs.set_config
* @set: modeset config to set
@@ -640,7 +647,9 @@ retry:
ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
if (ret) {
- DRM_DEBUG_KMS("Invalid mode\n");
+ DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
+ ret, drm_get_mode_status_name(mode->status));
+ drm_mode_debug_printmodeline(mode);
goto out;
}
@@ -732,7 +741,11 @@ retry:
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
- ret = __drm_mode_set_config_internal(&set, &ctx);
+
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = crtc->funcs->set_config(&set, &ctx);
+ else
+ ret = __drm_mode_set_config_internal(&set, &ctx);
out:
if (fb)
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 5d307b23a4e6..b61322763394 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -56,12 +56,21 @@ int drm_mode_setcrtc(struct drm_device *dev,
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
+/* drm_modes.c */
+const char *drm_get_mode_status_name(enum drm_mode_status status);
+
/* IOCTLs */
int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
/* drm_dumb_buffers.c */
+int drm_mode_create_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ struct drm_file *file_priv);
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+ struct drm_file *file_priv);
+
/* IOCTLs */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -139,7 +148,7 @@ void drm_connector_ida_init(void);
void drm_connector_ida_destroy(void);
void drm_connector_unregister_all(struct drm_device *dev);
int drm_connector_register_all(struct drm_device *dev);
-int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value);
int drm_connector_create_standard_properties(struct drm_device *dev);
@@ -147,8 +156,8 @@ const char *drm_get_connector_force_name(enum drm_connector_force force);
void drm_connector_free_work_fn(struct work_struct *work);
/* IOCTL */
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_getconnector(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -163,14 +172,19 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
const struct drm_framebuffer *fb);
void drm_fb_release(struct drm_file *file_priv);
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+ struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+ struct drm_file *file_priv);
+
/* IOCTL */
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index b2482818fee8..6f28fe58f169 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_edid.h>
#include <drm/drm_atomic.h>
@@ -164,6 +165,12 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
DRM_ERROR("Failed to create framebuffer debugfs file\n");
return ret;
}
+
+ ret = drm_client_debugfs_init(minor);
+ if (ret) {
+ DRM_ERROR("Failed to create client debugfs file\n");
+ return ret;
+ }
}
if (dev->driver->debugfs_init) {
@@ -307,13 +314,13 @@ static ssize_t edid_write(struct file *file, const char __user *ubuf,
if (len == 5 && !strncmp(buf, "reset", 5)) {
connector->override_edid = false;
- ret = drm_mode_connector_update_edid_property(connector, NULL);
+ ret = drm_connector_update_edid_property(connector, NULL);
} else if (len < EDID_LENGTH ||
EDID_LENGTH * (1 + edid->extensions) > len)
ret = -EINVAL;
else {
connector->override_edid = false;
- ret = drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_connector_update_edid_property(connector, edid);
if (!ret)
connector->override_edid = true;
}
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 9f8312137cad..99961192bf03 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -139,6 +139,7 @@ static int crtc_crc_data_count(struct drm_crtc_crc *crc)
static void crtc_crc_cleanup(struct drm_crtc_crc *crc)
{
kfree(crc->entries);
+ crc->overflow = false;
crc->entries = NULL;
crc->head = 0;
crc->tail = 0;
@@ -391,8 +392,14 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
tail = crc->tail;
if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+ bool was_overflow = crc->overflow;
+
+ crc->overflow = true;
spin_unlock(&crc->lock);
- DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+
+ if (!was_overflow)
+ DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+
return -ENOBUFS;
}
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
new file mode 100644
index 000000000000..988513346e9c
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DisplayPort CEC-Tunneling-over-AUX support
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <drm/drm_dp_helper.h>
+#include <media/cec.h>
+
+/*
+ * Unfortunately it turns out that we have a chicken-and-egg situation
+ * here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
+ * have a converter chip that supports CEC-Tunneling-over-AUX (usually the
+ * Parade PS176), but they do not wire up the CEC pin, thus making CEC
+ * useless.
+ *
+ * Sadly there is no way for this driver to know this. What happens is
+ * that a /dev/cecX device is created that is isolated and unable to see
+ * any of the other CEC devices. Quite literally the CEC wire is cut
+ * (or in this case, never connected in the first place).
+ *
+ * The reason so few adapters support this is that this tunneling protocol
+ * was never supported by any OS. So there was no easy way of testing it,
+ * and no incentive to correctly wire up the CEC pin.
+ *
+ * Hopefully by creating this driver it will be easier for vendors to
+ * finally fix their adapters and test the CEC functionality.
+ *
+ * I keep a list of known working adapters here:
+ *
+ * https://hverkuil.home.xs4all.nl/cec-status.txt
+ *
+ * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * and is not yet listed there.
+ *
+ * Note that the current implementation does not support CEC over an MST hub.
+ * As far as I can see there is no mechanism defined in the DisplayPort
+ * standard to transport CEC interrupts over an MST device. It might be
+ * possible to do this through polling, but I have not been able to get that
+ * to work.
+ */
+
+/**
+ * DOC: dp cec helpers
+ *
+ * These functions take care of supporting the CEC-Tunneling-over-AUX
+ * feature of DisplayPort-to-HDMI adapters.
+ */
+
+/*
+ * When the EDID is unset because the HPD went low, then the CEC DPCD registers
+ * typically can no longer be read (true for a DP-to-HDMI adapter since it is
+ * powered by the HPD). However, some displays toggle the HPD off and on for a
+ * short period for one reason or another, and that would cause the CEC adapter
+ * to be removed and added again, even though nothing else changed.
+ *
+ * This module parameter sets a delay in seconds before the CEC adapter is
+ * actually unregistered. Only if the HPD does not return within that time will
+ * the CEC adapter be unregistered.
+ *
+ * If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never
+ * be unregistered for as long as the connector remains registered.
+ *
+ * If it is set to 0, then the CEC adapter will be unregistered immediately as
+ * soon as the HPD disappears.
+ *
+ * The default is one second to prevent short HPD glitches from unregistering
+ * the CEC adapter.
+ *
+ * Note that for integrated HDMI branch devices that support CEC the DPCD
+ * registers remain available even if the HPD goes low since it is not powered
+ * by the HPD. In that case the CEC adapter will never be unregistered during
+ * the life time of the connector. At least, this is the theory since I do not
+ * have hardware with an integrated HDMI branch device that supports CEC.
+ */
+#define NEVER_UNREG_DELAY 1000
+static unsigned int drm_dp_cec_unregister_delay = 1;
+module_param(drm_dp_cec_unregister_delay, uint, 0600);
+MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
+ "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
+
+static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
+ ssize_t err = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ return (enable && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ /* Bit 15 (logical address 15) should always be set */
+ u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
+ u8 mask[2];
+ ssize_t err;
+
+ if (addr != CEC_LOG_ADDR_INVALID)
+ la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
+ mask[0] = la_mask & 0xff;
+ mask[1] = la_mask >> 8;
+ err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ unsigned int retries = min(5, attempts - 1);
+ ssize_t err;
+
+ err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
+ if (err < 0)
+ return err;
+
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
+ return err < 0 ? err : 0;
+}
+
+static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ ssize_t err;
+ u8 val;
+
+ if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (err >= 0) {
+ if (enable)
+ val |= DP_CEC_SNOOPING_ENABLE;
+ else
+ val &= ~DP_CEC_SNOOPING_ENABLE;
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ }
+ return (enable && err < 0) ? err : 0;
+}
+
+static void drm_dp_cec_adap_status(struct cec_adapter *adap,
+ struct seq_file *file)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ struct drm_dp_desc desc;
+ struct drm_dp_dpcd_ident *id = &desc.ident;
+
+ if (drm_dp_read_desc(aux, &desc, true))
+ return;
+ seq_printf(file, "OUI: %*phD\n",
+ (int)sizeof(id->oui), id->oui);
+ seq_printf(file, "ID: %*pE\n",
+ (int)strnlen(id->device_id, sizeof(id->device_id)),
+ id->device_id);
+ seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
+ /*
+ * Show this both in decimal and hex: at least one vendor
+ * always reports this in hex.
+ */
+ seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
+ id->sw_major_rev, id->sw_minor_rev,
+ id->sw_major_rev, id->sw_minor_rev);
+}
+
+static const struct cec_adap_ops drm_dp_cec_adap_ops = {
+ .adap_enable = drm_dp_cec_adap_enable,
+ .adap_log_addr = drm_dp_cec_adap_log_addr,
+ .adap_transmit = drm_dp_cec_adap_transmit,
+ .adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
+ .adap_status = drm_dp_cec_adap_status,
+};
+
+static int drm_dp_cec_received(struct drm_dp_aux *aux)
+{
+ struct cec_adapter *adap = aux->cec.adap;
+ struct cec_msg msg;
+ u8 rx_msg_info;
+ ssize_t err;
+
+ err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ if (err < 0)
+ return err;
+
+ if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
+ return 0;
+
+ msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
+ err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ if (err < 0)
+ return err;
+
+ cec_received_msg(adap, &msg);
+ return 0;
+}
+
+static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
+{
+ struct cec_adapter *adap = aux->cec.adap;
+ u8 flags;
+
+ if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ return;
+
+ if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
+ drm_dp_cec_received(aux);
+
+ if (flags & DP_CEC_TX_MESSAGE_SENT)
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
+ else if (flags & DP_CEC_TX_LINE_ERROR)
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
+ CEC_TX_STATUS_MAX_RETRIES);
+ else if (flags &
+ (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES);
+ drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+}
+
+/**
+ * drm_dp_cec_irq() - handle CEC interrupt, if any
+ * @aux: DisplayPort AUX channel
+ *
+ * Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX
+ * is present, then it will check for a CEC_IRQ and handle it accordingly.
+ */
+void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+ u8 cec_irq;
+ int ret;
+
+ mutex_lock(&aux->cec.lock);
+ if (!aux->cec.adap)
+ goto unlock;
+
+ ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
+ if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
+ goto unlock;
+
+ drm_dp_cec_handle_irq(aux);
+ drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_irq);
+
+static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
+{
+ u8 cap = 0;
+
+ if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ !(cap & DP_CEC_TUNNELING_CAPABLE))
+ return false;
+ if (cec_cap)
+ *cec_cap = cap;
+ return true;
+}
+
+/*
+ * Called if the HPD was low for more than drm_dp_cec_unregister_delay
+ * seconds. This unregisters the CEC adapter.
+ */
+static void drm_dp_cec_unregister_work(struct work_struct *work)
+{
+ struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
+ cec.unregister_work.work);
+
+ mutex_lock(&aux->cec.lock);
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ mutex_unlock(&aux->cec.lock);
+}
+
+/*
+ * A new EDID is set. If there is no CEC adapter, then create one. If
+ * there was a CEC adapter, then check if the CEC adapter properties
+ * were unchanged and just update the CEC physical address. Otherwise
+ * unregister the old CEC adapter and create a new one.
+ */
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
+{
+ u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+ unsigned int num_las = 1;
+ u8 cap;
+
+#ifndef CONFIG_MEDIA_CEC_RC
+ /*
+ * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
+ * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
+ *
+ * Do this here as well to ensure the tests against cec_caps are
+ * correct.
+ */
+ cec_caps &= ~CEC_CAP_RC;
+#endif
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+ mutex_lock(&aux->cec.lock);
+ if (!drm_dp_cec_cap(aux, &cap)) {
+ /* CEC is not supported, unregister any existing adapter */
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ goto unlock;
+ }
+
+ if (cap & DP_CEC_SNOOPING_CAPABLE)
+ cec_caps |= CEC_CAP_MONITOR_ALL;
+ if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
+ num_las = CEC_MAX_LOG_ADDRS;
+
+ if (aux->cec.adap) {
+ if (aux->cec.adap->capabilities == cec_caps &&
+ aux->cec.adap->available_log_addrs == num_las) {
+ /* Unchanged, so just set the phys addr */
+ cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+ goto unlock;
+ }
+ /*
+ * The capabilities changed, so unregister the old
+ * adapter first.
+ */
+ cec_unregister_adapter(aux->cec.adap);
+ }
+
+ /* Create a new adapter */
+ aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
+ aux, aux->cec.name, cec_caps,
+ num_las);
+ if (IS_ERR(aux->cec.adap)) {
+ aux->cec.adap = NULL;
+ goto unlock;
+ }
+ if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+ cec_delete_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ } else {
+ /*
+ * Update the phys addr for the new CEC adapter. When called
+ * from drm_dp_cec_register_connector() edid == NULL, so in
+ * that case the phys addr is just invalidated.
+ */
+ cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+ }
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_set_edid);
+
+/*
+ * The EDID disappeared (likely because of the HPD going down).
+ */
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+ mutex_lock(&aux->cec.lock);
+ if (!aux->cec.adap)
+ goto unlock;
+
+ cec_phys_addr_invalidate(aux->cec.adap);
+ /*
+ * We're done if we want to keep the CEC device
+ * (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the
+ * DPCD still indicates the CEC capability (expected for an integrated
+ * HDMI branch device).
+ */
+ if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
+ !drm_dp_cec_cap(aux, NULL)) {
+ /*
+ * Unregister the CEC adapter after drm_dp_cec_unregister_delay
+ * seconds. This to debounce short HPD off-and-on cycles from
+ * displays.
+ */
+ schedule_delayed_work(&aux->cec.unregister_work,
+ drm_dp_cec_unregister_delay * HZ);
+ }
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_unset_edid);
+
+/**
+ * drm_dp_cec_register_connector() - register a new connector
+ * @aux: DisplayPort AUX channel
+ * @name: name of the CEC device
+ * @parent: parent device
+ *
+ * A new connector was registered with associated CEC adapter name and
+ * CEC adapter parent device. After registering the name and parent
+ * drm_dp_cec_set_edid() is called to check if the connector supports
+ * CEC and to register a CEC adapter if that is the case.
+ */
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+ struct device *parent)
+{
+ WARN_ON(aux->cec.adap);
+ aux->cec.name = name;
+ aux->cec.parent = parent;
+ INIT_DELAYED_WORK(&aux->cec.unregister_work,
+ drm_dp_cec_unregister_work);
+
+ drm_dp_cec_set_edid(aux, NULL);
+}
+EXPORT_SYMBOL(drm_dp_cec_register_connector);
+
+/**
+ * drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any
+ * @aux: DisplayPort AUX channel
+ */
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+ if (!aux->cec.adap)
+ return;
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+}
+EXPORT_SYMBOL(drm_dp_cec_unregister_connector);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a7ba602a43a8..0cccbcb2d03e 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -185,6 +185,20 @@ EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
#define AUX_RETRY_INTERVAL 500 /* us */
+static inline void
+drm_dp_dump_access(const struct drm_dp_aux *aux,
+ u8 request, uint offset, void *buffer, int ret)
+{
+ const char *arrow = request == DP_AUX_NATIVE_READ ? "->" : "<-";
+
+ if (ret > 0)
+ drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d) %*ph\n",
+ aux->name, offset, arrow, ret, min(ret, 20), buffer);
+ else
+ drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d)\n",
+ aux->name, offset, arrow, ret);
+}
+
/**
* DOC: dp helpers
*
@@ -288,10 +302,14 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
1);
if (ret != 1)
- return ret;
+ goto out;
- return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
- size);
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
+ size);
+
+out:
+ drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
+ return ret;
}
EXPORT_SYMBOL(drm_dp_dpcd_read);
@@ -312,8 +330,12 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
- return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
- size);
+ int ret;
+
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
+ size);
+ drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
+ return ret;
}
EXPORT_SYMBOL(drm_dp_dpcd_write);
@@ -1087,6 +1109,7 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
void drm_dp_aux_init(struct drm_dp_aux *aux)
{
mutex_init(&aux->hw_mutex);
+ mutex_init(&aux->cec.lock);
INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
aux->ddc.algo = &drm_dp_i2c_algo;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 658830620ca3..7780567aa669 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1215,7 +1215,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
- drm_mode_connector_set_tile_property(port->connector);
+ drm_connector_set_tile_property(port->connector);
}
(*mstb->mgr->cbs->register_connector)(port->connector);
}
@@ -2559,7 +2559,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
edid = drm_edid_duplicate(port->cached_edid);
else {
edid = drm_get_edid(connector, &port->aux.ddc);
- drm_mode_connector_set_tile_property(connector);
+ drm_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_put_port(port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7af748ed1c58..ea4941da9b27 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/srcu.h>
+#include <drm/drm_client.h>
#include <drm/drm_drv.h>
#include <drm/drmP.h>
@@ -53,13 +54,14 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
-"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
-"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
-"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
-"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
-"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
-"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)");
+"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
+"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
@@ -505,6 +507,8 @@ int drm_dev_init(struct drm_device *dev,
dev->driver = driver;
INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->filelist_internal);
+ INIT_LIST_HEAD(&dev->clientlist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
@@ -514,6 +518,7 @@ int drm_dev_init(struct drm_device *dev,
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
+ mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->master_mutex);
@@ -569,6 +574,7 @@ err_minors:
err_free:
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
return ret;
@@ -603,6 +609,7 @@ void drm_dev_fini(struct drm_device *dev)
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
kfree(dev->unique);
@@ -858,6 +865,8 @@ void drm_dev_unregister(struct drm_device *dev)
dev->registered = false;
+ drm_client_dev_unregister(dev);
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 9e2ae02f31e0..81dfdd33753a 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -53,10 +53,10 @@
* a hardware-specific ioctl to allocate suitable buffer objects.
*/
-int drm_mode_create_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_create_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ struct drm_file *file_priv)
{
- struct drm_mode_create_dumb *args = data;
u32 cpp, stride, size;
if (!dev->driver->dumb_create)
@@ -92,6 +92,12 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_create(file_priv, dev, args);
}
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ return drm_mode_create_dumb(dev, data, file_priv);
+}
+
/**
* drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
* @dev: DRM device
@@ -123,17 +129,22 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
&args->offset);
}
-int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+ struct drm_file *file_priv)
{
- struct drm_mode_destroy_dumb *args = data;
-
if (!dev->driver->dumb_create)
return -ENOSYS;
if (dev->driver->dumb_destroy)
- return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+ return dev->driver->dumb_destroy(file_priv, dev, handle);
else
- return drm_gem_dumb_destroy(file_priv, dev, args->handle);
+ return drm_gem_dumb_destroy(file_priv, dev, handle);
}
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ return drm_mode_destroy_dumb(dev, args->handle, file_priv);
+}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a5808382bdf0..5dc742b27ca0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -163,8 +163,9 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
- /* HTC Vive VR Headset */
+ /* HTC Vive and Vive Pro VR Headsets */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+ { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
/* Oculus Rift DK1, DK2, and CV1 VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
@@ -687,562 +688,562 @@ static const struct minimode extra_modes[] = {
static const struct drm_display_mode edid_cea_modes[] = {
/* 0 - dummy, VICs start at 1 */
{ },
- /* 1 - 640x480@60Hz */
+ /* 1 - 640x480@60Hz 4:3 */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 2 - 720x480@60Hz */
+ /* 2 - 720x480@60Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 3 - 720x480@60Hz */
+ /* 3 - 720x480@60Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 4 - 1280x720@60Hz */
+ /* 4 - 1280x720@60Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 5 - 1920x1080i@60Hz */
+ /* 5 - 1920x1080i@60Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 6 - 720(1440)x480i@60Hz */
+ /* 6 - 720(1440)x480i@60Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 7 - 720(1440)x480i@60Hz */
+ /* 7 - 720(1440)x480i@60Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 8 - 720(1440)x240@60Hz */
+ /* 8 - 720(1440)x240@60Hz 4:3 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 9 - 720(1440)x240@60Hz */
+ /* 9 - 720(1440)x240@60Hz 16:9 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 10 - 2880x480i@60Hz */
+ /* 10 - 2880x480i@60Hz 4:3 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 11 - 2880x480i@60Hz */
+ /* 11 - 2880x480i@60Hz 16:9 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 12 - 2880x240@60Hz */
+ /* 12 - 2880x240@60Hz 4:3 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 13 - 2880x240@60Hz */
+ /* 13 - 2880x240@60Hz 16:9 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 14 - 1440x480@60Hz */
+ /* 14 - 1440x480@60Hz 4:3 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 15 - 1440x480@60Hz */
+ /* 15 - 1440x480@60Hz 16:9 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 16 - 1920x1080@60Hz */
+ /* 16 - 1920x1080@60Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 17 - 720x576@50Hz */
+ /* 17 - 720x576@50Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 18 - 720x576@50Hz */
+ /* 18 - 720x576@50Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 19 - 1280x720@50Hz */
+ /* 19 - 1280x720@50Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 20 - 1920x1080i@50Hz */
+ /* 20 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 21 - 720(1440)x576i@50Hz */
+ /* 21 - 720(1440)x576i@50Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 22 - 720(1440)x576i@50Hz */
+ /* 22 - 720(1440)x576i@50Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 23 - 720(1440)x288@50Hz */
+ /* 23 - 720(1440)x288@50Hz 4:3 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 24 - 720(1440)x288@50Hz */
+ /* 24 - 720(1440)x288@50Hz 16:9 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 25 - 2880x576i@50Hz */
+ /* 25 - 2880x576i@50Hz 4:3 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 26 - 2880x576i@50Hz */
+ /* 26 - 2880x576i@50Hz 16:9 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 27 - 2880x288@50Hz */
+ /* 27 - 2880x288@50Hz 4:3 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 28 - 2880x288@50Hz */
+ /* 28 - 2880x288@50Hz 16:9 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 29 - 1440x576@50Hz */
+ /* 29 - 1440x576@50Hz 4:3 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 30 - 1440x576@50Hz */
+ /* 30 - 1440x576@50Hz 16:9 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 31 - 1920x1080@50Hz */
+ /* 31 - 1920x1080@50Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 32 - 1920x1080@24Hz */
+ /* 32 - 1920x1080@24Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 33 - 1920x1080@25Hz */
+ /* 33 - 1920x1080@25Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 34 - 1920x1080@30Hz */
+ /* 34 - 1920x1080@30Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 35 - 2880x480@60Hz */
+ /* 35 - 2880x480@60Hz 4:3 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 36 - 2880x480@60Hz */
+ /* 36 - 2880x480@60Hz 16:9 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 37 - 2880x576@50Hz */
+ /* 37 - 2880x576@50Hz 4:3 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 38 - 2880x576@50Hz */
+ /* 38 - 2880x576@50Hz 16:9 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 39 - 1920x1080i@50Hz */
+ /* 39 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 40 - 1920x1080i@100Hz */
+ /* 40 - 1920x1080i@100Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 41 - 1280x720@100Hz */
+ /* 41 - 1280x720@100Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 42 - 720x576@100Hz */
+ /* 42 - 720x576@100Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 43 - 720x576@100Hz */
+ /* 43 - 720x576@100Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 44 - 720(1440)x576i@100Hz */
+ /* 44 - 720(1440)x576i@100Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 45 - 720(1440)x576i@100Hz */
+ /* 45 - 720(1440)x576i@100Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 46 - 1920x1080i@120Hz */
+ /* 46 - 1920x1080i@120Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE),
+ DRM_MODE_FLAG_INTERLACE),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 47 - 1280x720@120Hz */
+ /* 47 - 1280x720@120Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 48 - 720x480@120Hz */
+ /* 48 - 720x480@120Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 49 - 720x480@120Hz */
+ /* 49 - 720x480@120Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 50 - 720(1440)x480i@120Hz */
+ /* 50 - 720(1440)x480i@120Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 51 - 720(1440)x480i@120Hz */
+ /* 51 - 720(1440)x480i@120Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 52 - 720x576@200Hz */
+ /* 52 - 720x576@200Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 53 - 720x576@200Hz */
+ /* 53 - 720x576@200Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 54 - 720(1440)x576i@200Hz */
+ /* 54 - 720(1440)x576i@200Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 55 - 720(1440)x576i@200Hz */
+ /* 55 - 720(1440)x576i@200Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 56 - 720x480@240Hz */
+ /* 56 - 720x480@240Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 57 - 720x480@240Hz */
+ /* 57 - 720x480@240Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 58 - 720(1440)x480i@240Hz */
+ /* 58 - 720(1440)x480i@240Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
- /* 59 - 720(1440)x480i@240Hz */
+ /* 59 - 720(1440)x480i@240Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 60 - 1280x720@24Hz */
+ /* 60 - 1280x720@24Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 61 - 1280x720@25Hz */
+ /* 61 - 1280x720@25Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 62 - 1280x720@30Hz */
+ /* 62 - 1280x720@30Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 63 - 1920x1080@120Hz */
+ /* 63 - 1920x1080@120Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 64 - 1920x1080@100Hz */
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 64 - 1920x1080@100Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 65 - 1280x720@24Hz */
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 65 - 1280x720@24Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 66 - 1280x720@25Hz */
+ /* 66 - 1280x720@25Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 67 - 1280x720@30Hz */
+ /* 67 - 1280x720@30Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 68 - 1280x720@50Hz */
+ /* 68 - 1280x720@50Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 69 - 1280x720@60Hz */
+ /* 69 - 1280x720@60Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 70 - 1280x720@100Hz */
+ /* 70 - 1280x720@100Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 71 - 1280x720@120Hz */
+ /* 71 - 1280x720@120Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 72 - 1920x1080@24Hz */
+ /* 72 - 1920x1080@24Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 73 - 1920x1080@25Hz */
+ /* 73 - 1920x1080@25Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 74 - 1920x1080@30Hz */
+ /* 74 - 1920x1080@30Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 75 - 1920x1080@50Hz */
+ /* 75 - 1920x1080@50Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 76 - 1920x1080@60Hz */
+ /* 76 - 1920x1080@60Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 77 - 1920x1080@100Hz */
+ /* 77 - 1920x1080@100Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 78 - 1920x1080@120Hz */
+ /* 78 - 1920x1080@120Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 79 - 1680x720@24Hz */
+ /* 79 - 1680x720@24Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 80 - 1680x720@25Hz */
+ /* 80 - 1680x720@25Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
2948, 3168, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 81 - 1680x720@30Hz */
+ /* 81 - 1680x720@30Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
2420, 2640, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 82 - 1680x720@50Hz */
+ /* 82 - 1680x720@50Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 83 - 1680x720@60Hz */
+ /* 83 - 1680x720@60Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 84 - 1680x720@100Hz */
+ /* 84 - 1680x720@100Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 85 - 1680x720@120Hz */
+ /* 85 - 1680x720@120Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 86 - 2560x1080@24Hz */
+ /* 86 - 2560x1080@24Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 87 - 2560x1080@25Hz */
+ /* 87 - 2560x1080@25Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 88 - 2560x1080@30Hz */
+ /* 88 - 2560x1080@30Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 89 - 2560x1080@50Hz */
+ /* 89 - 2560x1080@50Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 90 - 2560x1080@60Hz */
+ /* 90 - 2560x1080@60Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 91 - 2560x1080@100Hz */
+ /* 91 - 2560x1080@100Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 92 - 2560x1080@120Hz */
+ /* 92 - 2560x1080@120Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 93 - 3840x2160p@24Hz 16:9 */
+ /* 93 - 3840x2160@24Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 94 - 3840x2160p@25Hz 16:9 */
+ /* 94 - 3840x2160@25Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 95 - 3840x2160p@30Hz 16:9 */
+ /* 95 - 3840x2160@30Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 96 - 3840x2160p@50Hz 16:9 */
+ /* 96 - 3840x2160@50Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 97 - 3840x2160p@60Hz 16:9 */
+ /* 97 - 3840x2160@60Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
- /* 98 - 4096x2160p@24Hz 256:135 */
+ /* 98 - 4096x2160@24Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 99 - 4096x2160p@25Hz 256:135 */
+ /* 99 - 4096x2160@25Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 100 - 4096x2160p@30Hz 256:135 */
+ /* 100 - 4096x2160@30Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 101 - 4096x2160p@50Hz 256:135 */
+ /* 101 - 4096x2160@50Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 102 - 4096x2160p@60Hz 256:135 */
+ /* 102 - 4096x2160@60Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
- /* 103 - 3840x2160p@24Hz 64:27 */
+ /* 103 - 3840x2160@24Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 104 - 3840x2160p@25Hz 64:27 */
+ /* 104 - 3840x2160@25Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 105 - 3840x2160p@30Hz 64:27 */
+ /* 105 - 3840x2160@30Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 106 - 3840x2160p@50Hz 64:27 */
+ /* 106 - 3840x2160@50Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
- /* 107 - 3840x2160p@60Hz 64:27 */
+ /* 107 - 3840x2160@60Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
@@ -4874,6 +4875,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
/*
+ * As some drivers don't support atomic, we can't use connector state.
+ * So just initialize the frame with default values, just the same way
+ * as it's done with other properties here.
+ */
+ frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+ frame->itc = 0;
+
+ /*
* Populate picture aspect ratio from either
* user input (if specified) or from the CEA mode list.
*/
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 186d00adfb5f..9da36a6271d3 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,6 +18,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_client.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
@@ -26,11 +27,8 @@
#include <drm/drm_print.h>
#include <linux/module.h>
-#define DEFAULT_FBDEFIO_DELAY_MS 50
-
struct drm_fbdev_cma {
struct drm_fb_helper fb_helper;
- const struct drm_framebuffer_funcs *fb_funcs;
};
/**
@@ -44,36 +42,6 @@ struct drm_fbdev_cma {
*
* An fbdev framebuffer backed by cma is also available by calling
* drm_fb_cma_fbdev_init(). drm_fb_cma_fbdev_fini() tears it down.
- * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
- * set up automatically. &drm_framebuffer_funcs.dirty is called by
- * drm_fb_helper_deferred_io() in process context (&struct delayed_work).
- *
- * Example fbdev deferred io code::
- *
- * static int driver_fb_dirty(struct drm_framebuffer *fb,
- * struct drm_file *file_priv,
- * unsigned flags, unsigned color,
- * struct drm_clip_rect *clips,
- * unsigned num_clips)
- * {
- * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
- * ... push changes ...
- * return 0;
- * }
- *
- * static struct drm_framebuffer_funcs driver_fb_funcs = {
- * .destroy = drm_gem_fb_destroy,
- * .create_handle = drm_gem_fb_create_handle,
- * .dirty = driver_fb_dirty,
- * };
- *
- * Initialize::
- *
- * fbdev = drm_fb_cma_fbdev_init_with_funcs(dev, 16,
- * dev->mode_config.num_crtc,
- * dev->mode_config.num_connector,
- * &driver_fb_funcs);
- *
*/
static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
@@ -131,236 +99,28 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
-static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- return dma_mmap_writecombine(info->device, vma, info->screen_base,
- info->fix.smem_start, info->fix.smem_len);
-}
-
-static struct fb_ops drm_fbdev_cma_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
- .fb_mmap = drm_fb_cma_mmap,
-};
-
-static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- fb_deferred_io_mmap(info, vma);
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- return 0;
-}
-
-static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
- struct drm_gem_cma_object *cma_obj)
-{
- struct fb_deferred_io *fbdefio;
- struct fb_ops *fbops;
-
- /*
- * Per device structures are needed because:
- * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
- * fbdefio: individual delays
- */
- fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- if (!fbdefio || !fbops) {
- kfree(fbdefio);
- kfree(fbops);
- return -ENOMEM;
- }
-
- /* can't be offset from vaddr since dirty() uses cma_obj */
- fbi->screen_buffer = cma_obj->vaddr;
- /* fb_deferred_io_fault() needs a physical address */
- fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
-
- *fbops = *fbi->fbops;
- fbi->fbops = fbops;
-
- fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
- fbi->fbdefio = fbdefio;
- fb_deferred_io_init(fbi);
- fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
-
- return 0;
-}
-
-static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
-{
- if (!fbi->fbdefio)
- return;
-
- fb_deferred_io_cleanup(fbi);
- kfree(fbi->fbdefio);
- kfree(fbi->fbops);
-}
-
-static int
-drm_fbdev_cma_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
- struct drm_device *dev = helper->dev;
- struct drm_gem_cma_object *obj;
- struct drm_framebuffer *fb;
- unsigned int bytes_per_pixel;
- unsigned long offset;
- struct fb_info *fbi;
- size_t size;
- int ret;
-
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
-
- bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
- size = sizes->surface_width * sizes->surface_height * bytes_per_pixel;
- obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(obj))
- return -ENOMEM;
-
- fbi = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(fbi)) {
- ret = PTR_ERR(fbi);
- goto err_gem_free_object;
- }
-
- fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base,
- fbdev_cma->fb_funcs);
- if (IS_ERR(fb)) {
- dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
- ret = PTR_ERR(fb);
- goto err_fb_info_destroy;
- }
-
- helper->fb = fb;
-
- fbi->par = helper;
- fbi->flags = FBINFO_FLAG_DEFAULT;
- fbi->fbops = &drm_fbdev_cma_ops;
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
-
- offset = fbi->var.xoffset * bytes_per_pixel;
- offset += fbi->var.yoffset * fb->pitches[0];
-
- dev->mode_config.fb_base = (resource_size_t)obj->paddr;
- fbi->screen_base = obj->vaddr + offset;
- fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
- fbi->screen_size = size;
- fbi->fix.smem_len = size;
-
- if (fb->funcs->dirty) {
- ret = drm_fbdev_cma_defio_init(fbi, obj);
- if (ret)
- goto err_cma_destroy;
- }
-
- return 0;
-
-err_cma_destroy:
- drm_framebuffer_remove(fb);
-err_fb_info_destroy:
- drm_fb_helper_fini(helper);
-err_gem_free_object:
- drm_gem_object_put_unlocked(&obj->base);
- return ret;
-}
-
-static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
- .fb_probe = drm_fbdev_cma_create,
-};
-
/**
- * drm_fb_cma_fbdev_init_with_funcs() - Allocate and initialize fbdev emulation
+ * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
* @dev->mode_config.preferred_depth is used if this is zero.
* @max_conn_count: Maximum number of connectors.
* @dev->mode_config.num_connector is used if this is zero.
- * @funcs: Framebuffer functions, in particular a custom dirty() callback.
- * Can be NULL.
*
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs)
+int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
+ unsigned int max_conn_count)
{
struct drm_fbdev_cma *fbdev_cma;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- if (!preferred_bpp)
- preferred_bpp = dev->mode_config.preferred_depth;
- if (!preferred_bpp)
- preferred_bpp = 32;
-
- if (!max_conn_count)
- max_conn_count = dev->mode_config.num_connector;
-
- fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
- if (!fbdev_cma)
- return -ENOMEM;
- fbdev_cma->fb_funcs = funcs;
- fb_helper = &fbdev_cma->fb_helper;
-
- drm_fb_helper_prepare(dev, fb_helper, &drm_fb_cma_helper_funcs);
-
- ret = drm_fb_helper_init(dev, fb_helper, max_conn_count);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper.\n");
- goto err_free;
- }
-
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "Failed to add connectors.\n");
- goto err_drm_fb_helper_fini;
- }
-
- ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration.\n");
- goto err_drm_fb_helper_fini;
- }
+ /* dev->fb_helper will indirectly point to fbdev_cma after this call */
+ fbdev_cma = drm_fbdev_cma_init(dev, preferred_bpp, max_conn_count);
+ if (IS_ERR(fbdev_cma))
+ return PTR_ERR(fbdev_cma);
return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_free:
- kfree(fbdev_cma);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init_with_funcs);
-
-/**
- * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
- * @max_conn_count: Maximum number of connectors.
- * @dev->mode_config.num_connector is used if this is zero.
- *
- * Returns:
- * Zero on success or negative error code on failure.
- */
-int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
- unsigned int max_conn_count)
-{
- return drm_fb_cma_fbdev_init_with_funcs(dev, preferred_bpp,
- max_conn_count, NULL);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
@@ -370,104 +130,54 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
*/
void drm_fb_cma_fbdev_fini(struct drm_device *dev)
{
- struct drm_fb_helper *fb_helper = dev->fb_helper;
-
- if (!fb_helper)
- return;
-
- /* Unregister if it hasn't been done already */
- if (fb_helper->fbdev && fb_helper->fbdev->dev)
- drm_fb_helper_unregister_fbi(fb_helper);
-
- if (fb_helper->fbdev)
- drm_fbdev_cma_defio_fini(fb_helper->fbdev);
-
- if (fb_helper->fb)
- drm_framebuffer_remove(fb_helper->fb);
-
- drm_fb_helper_fini(fb_helper);
- kfree(to_fbdev_cma(fb_helper));
+ if (dev->fb_helper)
+ drm_fbdev_cma_fini(to_fbdev_cma(dev->fb_helper));
}
EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_fini);
+static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+ .fb_probe = drm_fb_helper_generic_probe,
+};
+
/**
- * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device
* @max_conn_count: Maximum number of connectors
- * @funcs: fb helper functions, in particular a custom dirty() callback
*
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
*/
-struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs)
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int max_conn_count)
{
struct drm_fbdev_cma *fbdev_cma;
- struct drm_fb_helper *helper;
+ struct drm_fb_helper *fb_helper;
int ret;
fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
- if (!fbdev_cma) {
- dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+ if (!fbdev_cma)
return ERR_PTR(-ENOMEM);
- }
- fbdev_cma->fb_funcs = funcs;
- helper = &fbdev_cma->fb_helper;
-
- drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+ fb_helper = &fbdev_cma->fb_helper;
- ret = drm_fb_helper_init(dev, helper, max_conn_count);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+ ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+ if (ret)
goto err_free;
- }
-
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to add connectors.\n");
- goto err_drm_fb_helper_fini;
-
- }
- ret = drm_fb_helper_initial_config(helper, preferred_bpp);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to set initial hw configuration.\n");
- goto err_drm_fb_helper_fini;
- }
+ ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_cma_helper_funcs,
+ preferred_bpp, max_conn_count);
+ if (ret)
+ goto err_client_put;
return fbdev_cma;
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(helper);
+err_client_put:
+ drm_client_release(&fb_helper->client);
err_free:
kfree(fbdev_cma);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
-
-static const struct drm_framebuffer_funcs drm_fb_cma_funcs = {
- .destroy = drm_gem_fb_destroy,
- .create_handle = drm_gem_fb_create_handle,
-};
-
-/**
- * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device
- * @max_conn_count: Maximum number of connectors
- *
- * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
- */
-struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count)
-{
- return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
- max_conn_count,
- &drm_fb_cma_funcs);
-}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
/**
@@ -477,14 +187,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
- if (fbdev_cma->fb_helper.fbdev)
- drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
-
- if (fbdev_cma->fb_helper.fb)
- drm_framebuffer_remove(fbdev_cma->fb_helper.fb);
-
- drm_fb_helper_fini(&fbdev_cma->fb_helper);
- kfree(fbdev_cma);
+ /* All resources have now been freed by drm_fbdev_fb_destroy() */
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2ee1eaa66188..4b0dd20bccb8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/dma-buf.h>
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
@@ -66,6 +67,9 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
+ * Drivers that support a dumb buffer with a virtual address and mmap support,
+ * should try out the generic fbdev emulation using drm_fbdev_generic_setup().
+ *
* Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
* down by calling drm_fb_helper_fbdev_teardown().
*
@@ -368,7 +372,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
struct drm_plane *plane;
struct drm_atomic_state *state;
int i, ret;
- unsigned int plane_mask;
struct drm_modeset_acquire_ctx ctx;
drm_modeset_acquire_init(&ctx, 0);
@@ -381,7 +384,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
state->acquire_ctx = &ctx;
retry:
- plane_mask = 0;
drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -391,9 +393,6 @@ retry:
plane_state->rotation = DRM_MODE_ROTATE_0;
- plane->old_fb = plane->fb;
- plane_mask |= 1 << drm_plane_index(plane);
-
/* disable non-primary: */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
continue;
@@ -430,8 +429,6 @@ retry:
ret = drm_atomic_commit(state);
out_state:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
if (ret == -EDEADLK)
goto backoff;
@@ -745,6 +742,24 @@ static void drm_fb_helper_resume_worker(struct work_struct *work)
console_unlock();
}
+static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp;
+ void *src = fb_helper->fbdev->screen_buffer + offset;
+ void *dst = fb_helper->buffer->vaddr + offset;
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ memcpy(dst, src, len);
+ src += fb->pitches[0];
+ dst += fb->pitches[0];
+ }
+}
+
static void drm_fb_helper_dirty_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
@@ -760,8 +775,12 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
spin_unlock_irqrestore(&helper->dirty_lock, flags);
/* call dirty callback only when it has been really touched */
- if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
+ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
+ /* Generic fbdev uses a shadow buffer */
+ if (helper->buffer)
+ drm_fb_helper_dirty_blit_real(helper, &clip_copy);
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ }
}
/**
@@ -1164,7 +1183,7 @@ EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
* @info: fbdev registered by the helper
* @rect: info about rectangle to fill
*
- * A wrapper around cfb_imageblit implemented by fbdev core
+ * A wrapper around cfb_fillrect implemented by fbdev core
*/
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
@@ -2330,6 +2349,20 @@ retry:
return true;
}
+static bool connector_has_possible_crtc(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ int i;
+
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ if (encoder->possible_crtcs & drm_crtc_mask(crtc))
+ return true;
+ }
+
+ return false;
+}
+
static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **best_crtcs,
struct drm_display_mode **modes,
@@ -2338,7 +2371,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
int c, o;
struct drm_connector *connector;
const struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
@@ -2370,27 +2402,14 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
connector_funcs = connector->helper_private;
/*
- * If the DRM device implements atomic hooks and ->best_encoder() is
- * NULL we fallback to the default drm_atomic_helper_best_encoder()
- * helper.
- */
- if (drm_drv_uses_atomic_modeset(fb_helper->dev) &&
- !connector_funcs->best_encoder)
- encoder = drm_atomic_helper_best_encoder(connector);
- else
- encoder = connector_funcs->best_encoder(connector);
-
- if (!encoder)
- goto out;
-
- /*
* select a crtc for this connector and then attempt to configure
* remaining connectors
*/
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
- if ((encoder->possible_crtcs & (1 << c)) == 0)
+ if (!connector_has_possible_crtc(connector,
+ crtc->mode_set.crtc))
continue;
for (o = 0; o < n; o++)
@@ -2417,7 +2436,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
sizeof(struct drm_fb_helper_crtc *));
}
}
-out:
+
kfree(crtcs);
return best_score;
}
@@ -2928,6 +2947,294 @@ void drm_fb_helper_output_poll_changed(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_fb_helper_output_poll_changed);
+/* @user: 1=userspace, 0=fbcon */
+static int drm_fbdev_fb_open(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (!try_module_get(fb_helper->dev->driver->fops->owner))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int drm_fbdev_fb_release(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ module_put(fb_helper->dev->driver->fops->owner);
+
+ return 0;
+}
+
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
+ * unregister_framebuffer() or fb_release().
+ */
+static void drm_fbdev_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct fb_info *fbi = fb_helper->fbdev;
+ struct fb_ops *fbops = NULL;
+ void *shadow = NULL;
+
+ if (fbi->fbdefio) {
+ fb_deferred_io_cleanup(fbi);
+ shadow = fbi->screen_buffer;
+ fbops = fbi->fbops;
+ }
+
+ drm_fb_helper_fini(fb_helper);
+
+ if (shadow) {
+ vfree(shadow);
+ kfree(fbops);
+ }
+
+ drm_client_framebuffer_delete(fb_helper->buffer);
+ /*
+ * FIXME:
+ * Remove conditional when all CMA drivers have been moved over to using
+ * drm_fbdev_generic_setup().
+ */
+ if (fb_helper->client.funcs) {
+ drm_client_release(&fb_helper->client);
+ kfree(fb_helper);
+ }
+}
+
+static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (fb_helper->dev->driver->gem_prime_mmap)
+ return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
+ else
+ return -ENODEV;
+}
+
+static struct fb_ops drm_fbdev_fb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_open = drm_fbdev_fb_open,
+ .fb_release = drm_fbdev_fb_release,
+ .fb_destroy = drm_fbdev_fb_destroy,
+ .fb_mmap = drm_fbdev_fb_mmap,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+};
+
+static struct fb_deferred_io drm_fbdev_defio = {
+ .delay = HZ / 20,
+ .deferred_io = drm_fb_helper_deferred_io,
+};
+
+/**
+ * drm_fb_helper_generic_probe - Generic fbdev emulation probe helper
+ * @fb_helper: fbdev helper structure
+ * @sizes: describes fbdev size and scanout surface size
+ *
+ * This function uses the client API to crate a framebuffer backed by a dumb buffer.
+ *
+ * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
+ * fb_copyarea, fb_imageblit.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_client_buffer *buffer;
+ struct drm_framebuffer *fb;
+ struct fb_info *fbi;
+ u32 format;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+ buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ sizes->surface_height, format);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ fb_helper->buffer = buffer;
+ fb_helper->fb = buffer->fb;
+ fb = buffer->fb;
+
+ fbi = drm_fb_helper_alloc_fbi(fb_helper);
+ if (IS_ERR(fbi)) {
+ ret = PTR_ERR(fbi);
+ goto err_free_buffer;
+ }
+
+ fbi->par = fb_helper;
+ fbi->fbops = &drm_fbdev_fb_ops;
+ fbi->screen_size = fb->height * fb->pitches[0];
+ fbi->fix.smem_len = fbi->screen_size;
+ fbi->screen_buffer = buffer->vaddr;
+ strcpy(fbi->fix.id, "DRM emulated");
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_var(fbi, fb_helper, sizes->fb_width, sizes->fb_height);
+
+ if (fb->funcs->dirty) {
+ struct fb_ops *fbops;
+ void *shadow;
+
+ /*
+ * fb_deferred_io_cleanup() clears &fbops->fb_mmap so a per
+ * instance version is necessary.
+ */
+ fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+ shadow = vzalloc(fbi->screen_size);
+ if (!fbops || !shadow) {
+ kfree(fbops);
+ vfree(shadow);
+ ret = -ENOMEM;
+ goto err_fb_info_destroy;
+ }
+
+ *fbops = *fbi->fbops;
+ fbi->fbops = fbops;
+ fbi->screen_buffer = shadow;
+ fbi->fbdefio = &drm_fbdev_defio;
+
+ fb_deferred_io_init(fbi);
+ }
+
+ return 0;
+
+err_fb_info_destroy:
+ drm_fb_helper_fini(fb_helper);
+err_free_buffer:
+ drm_client_framebuffer_delete(buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_generic_probe);
+
+static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
+ .fb_probe = drm_fb_helper_generic_probe,
+};
+
+static void drm_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->fbdev) {
+ drm_fb_helper_unregister_fbi(fb_helper);
+ /* drm_fbdev_fb_destroy() takes care of cleanup */
+ return;
+ }
+
+ /* Did drm_fb_helper_fbdev_setup() run? */
+ if (fb_helper->dev)
+ drm_fb_helper_fini(fb_helper);
+
+ drm_client_release(client);
+ kfree(fb_helper);
+}
+
+static int drm_fbdev_client_restore(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+
+ return 0;
+}
+
+static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ /* If drm_fb_helper_fbdev_setup() failed, we only try once */
+ if (!fb_helper->dev && fb_helper->funcs)
+ return 0;
+
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+ if (!dev->mode_config.num_connector)
+ return 0;
+
+ ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
+ fb_helper->preferred_bpp, 0);
+ if (ret) {
+ fb_helper->dev = NULL;
+ fb_helper->fbdev = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_client_funcs drm_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_fbdev_client_unregister,
+ .restore = drm_fbdev_client_restore,
+ .hotplug = drm_fbdev_client_hotplug,
+};
+
+/**
+ * drm_fb_helper_generic_fbdev_setup() - Setup generic fbdev emulation
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ *
+ * This function sets up generic fbdev emulation for drivers that supports
+ * dumb buffers with a virtual address and that can be mmap'ed.
+ *
+ * Restore, hotplug events and teardown are all taken care of. Drivers that do
+ * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
+ * Simple drivers might use drm_mode_config_helper_suspend().
+ *
+ * Drivers that set the dirty callback on their framebuffer will get a shadow
+ * fbdev buffer that is blitted onto the real buffer. This is done in order to
+ * make deferred I/O work with all kinds of buffers.
+ *
+ * This function is safe to call even when there are no connectors present.
+ * Setup will be retried on the next hotplug event.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ if (!drm_fbdev_emulation)
+ return 0;
+
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return -ENOMEM;
+
+ ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+ if (ret) {
+ kfree(fb_helper);
+ return ret;
+ }
+
+ fb_helper->preferred_bpp = preferred_bpp;
+
+ drm_fbdev_client_hotplug(&fb_helper->client);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fbdev_generic_setup);
+
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 6d9b9453707c..ffa8dc35515f 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <drm/drm_client.h>
#include <drm/drm_file.h>
#include <drm/drmP.h>
@@ -101,6 +102,166 @@ DEFINE_MUTEX(drm_global_mutex);
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
+/**
+ * drm_file_alloc - allocate file context
+ * @minor: minor to allocate on
+ *
+ * This allocates a new DRM file context. It is not linked into any context and
+ * can be used by the caller freely. Note that the context keeps a pointer to
+ * @minor, so it must be freed before @minor is.
+ *
+ * RETURNS:
+ * Pointer to newly allocated context, ERR_PTR on failure.
+ */
+struct drm_file *drm_file_alloc(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct drm_file *file;
+ int ret;
+
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return ERR_PTR(-ENOMEM);
+
+ file->pid = get_pid(task_pid(current));
+ file->minor = minor;
+
+ /* for compatibility root is always authenticated */
+ file->authenticated = capable(CAP_SYS_ADMIN);
+ file->lock_count = 0;
+
+ INIT_LIST_HEAD(&file->lhead);
+ INIT_LIST_HEAD(&file->fbs);
+ mutex_init(&file->fbs_lock);
+ INIT_LIST_HEAD(&file->blobs);
+ INIT_LIST_HEAD(&file->pending_event_list);
+ INIT_LIST_HEAD(&file->event_list);
+ init_waitqueue_head(&file->event_wait);
+ file->event_space = 4096; /* set aside 4k for event buffer */
+
+ mutex_init(&file->event_read_lock);
+
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_open(dev, file);
+
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_open(file);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_init_file_private(&file->prime);
+
+ if (dev->driver->open) {
+ ret = dev->driver->open(dev, file);
+ if (ret < 0)
+ goto out_prime_destroy;
+ }
+
+ return file;
+
+out_prime_destroy:
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file->prime);
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_release(file);
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_release(dev, file);
+ put_pid(file->pid);
+ kfree(file);
+
+ return ERR_PTR(ret);
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e, *et;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* Unlink pending events */
+ list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+ pending_link) {
+ list_del(&e->pending_link);
+ e->file_priv = NULL;
+ }
+
+ /* Remove unconsumed events */
+ list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+ list_del(&e->link);
+ kfree(e);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * drm_file_free - free file context
+ * @file: context to free, or NULL
+ *
+ * This destroys and deallocates a DRM file context previously allocated via
+ * drm_file_alloc(). The caller must make sure to unlink it from any contexts
+ * before calling this.
+ *
+ * If NULL is passed, this is a no-op.
+ *
+ * RETURNS:
+ * 0 on success, or error code on failure.
+ */
+void drm_file_free(struct drm_file *file)
+{
+ struct drm_device *dev;
+
+ if (!file)
+ return;
+
+ dev = file->minor->dev;
+
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file->minor->kdev->devt),
+ dev->open_count);
+
+ if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
+ dev->driver->preclose)
+ dev->driver->preclose(dev, file);
+
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ drm_legacy_lock_release(dev, file->filp);
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ drm_legacy_reclaim_buffers(dev, file);
+
+ drm_events_release(file);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_fb_release(file);
+ drm_property_destroy_user_blobs(dev, file);
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_release(file);
+
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_release(dev, file);
+
+ drm_legacy_ctxbitmap_flush(dev, file);
+
+ if (drm_is_primary_client(file))
+ drm_master_release(file);
+
+ if (dev->driver->postclose)
+ dev->driver->postclose(dev, file);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file->prime);
+
+ WARN_ON(!list_empty(&file->event_list));
+
+ put_pid(file->pid);
+ kfree(file);
+}
+
static int drm_setup(struct drm_device * dev)
{
int ret;
@@ -207,52 +368,22 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- filp->private_data = priv;
- filp->f_mode |= FMODE_UNSIGNED_OFFSET;
- priv->filp = filp;
- priv->pid = get_pid(task_pid(current));
- priv->minor = minor;
-
- /* for compatibility root is always authenticated */
- priv->authenticated = capable(CAP_SYS_ADMIN);
- priv->lock_count = 0;
-
- INIT_LIST_HEAD(&priv->lhead);
- INIT_LIST_HEAD(&priv->fbs);
- mutex_init(&priv->fbs_lock);
- INIT_LIST_HEAD(&priv->blobs);
- INIT_LIST_HEAD(&priv->pending_event_list);
- INIT_LIST_HEAD(&priv->event_list);
- init_waitqueue_head(&priv->event_wait);
- priv->event_space = 4096; /* set aside 4k for event buffer */
-
- mutex_init(&priv->event_read_lock);
-
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_open(dev, priv);
-
- if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- drm_syncobj_open(priv);
-
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_init_file_private(&priv->prime);
-
- if (dev->driver->open) {
- ret = dev->driver->open(dev, priv);
- if (ret < 0)
- goto out_prime_destroy;
- }
+ priv = drm_file_alloc(minor);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
if (drm_is_primary_client(priv)) {
ret = drm_master_open(priv);
- if (ret)
- goto out_close;
+ if (ret) {
+ drm_file_free(priv);
+ return ret;
+ }
}
+ filp->private_data = priv;
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+ priv->filp = filp;
+
mutex_lock(&dev->filelist_mutex);
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
@@ -278,45 +409,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
#endif
return 0;
-
-out_close:
- if (dev->driver->postclose)
- dev->driver->postclose(dev, priv);
-out_prime_destroy:
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&priv->prime);
- if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- drm_syncobj_release(priv);
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_release(dev, priv);
- put_pid(priv->pid);
- kfree(priv);
- filp->private_data = NULL;
- return ret;
-}
-
-static void drm_events_release(struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e, *et;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- /* Unlink pending events */
- list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
- pending_link) {
- list_del(&e->pending_link);
- e->file_priv = NULL;
- }
-
- /* Remove unconsumed events */
- list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
- list_del(&e->link);
- kfree(e);
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
}
static void drm_legacy_dev_reinit(struct drm_device *dev)
@@ -353,6 +445,8 @@ void drm_lastclose(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_dev_reinit(dev);
+
+ drm_client_dev_restore(dev);
}
/**
@@ -383,57 +477,7 @@ int drm_release(struct inode *inode, struct file *filp)
list_del(&file_priv->lhead);
mutex_unlock(&dev->filelist_mutex);
- if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
- dev->driver->preclose)
- dev->driver->preclose(dev, file_priv);
-
- /* ========================================================
- * Begin inline drm_release
- */
-
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- dev->open_count);
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_legacy_lock_release(dev, filp);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- drm_legacy_reclaim_buffers(dev, file_priv);
-
- drm_events_release(file_priv);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_fb_release(file_priv);
- drm_property_destroy_user_blobs(dev, file_priv);
- }
-
- if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- drm_syncobj_release(file_priv);
-
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_release(dev, file_priv);
-
- drm_legacy_ctxbitmap_flush(dev, file_priv);
-
- if (drm_is_primary_client(file_priv))
- drm_master_release(file_priv);
-
- if (dev->driver->postclose)
- dev->driver->postclose(dev, file_priv);
-
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&file_priv->prime);
-
- WARN_ON(!list_empty(&file_priv->event_list));
-
- put_pid(file_priv->pid);
- kfree(file_priv);
-
- /* ========================================================
- * End inline drm_release
- */
+ drm_file_free(file_priv);
if (!--dev->open_count) {
drm_lastclose(dev);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 5ca6395cd4d3..35c1e2742c27 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -152,27 +152,27 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
- { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
- { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
- { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
- { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
- { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
- { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index bfedceff87bb..781af1d42d76 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -95,21 +95,20 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @or: pointer to request structure
+ * @file_priv: drm file
*
* Add a new FB to the specified CRTC, given a user request. This is the
* original addfb ioctl which only supported RGB formats.
*
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+ struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd *or = data;
struct drm_mode_fb_cmd2 r = {};
int ret;
@@ -134,6 +133,12 @@ int drm_mode_addfb(struct drm_device *dev,
return 0;
}
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ return drm_mode_addfb(dev, data, file_priv);
+}
+
static int fb_plane_width(int width,
const struct drm_format_info *format, int plane)
{
@@ -367,29 +372,28 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w)
/**
* drm_mode_rmfb - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @dev: drm device
+ * @fb_id: id of framebuffer to remove
+ * @file_priv: drm file
*
- * Remove the FB specified by the user.
+ * Remove the specified FB.
*
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+ struct drm_file *file_priv)
{
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
- uint32_t *id = data;
int found = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = drm_framebuffer_lookup(dev, file_priv, *id);
+ fb = drm_framebuffer_lookup(dev, file_priv, fb_id);
if (!fb)
return -ENOENT;
@@ -435,6 +439,14 @@ fail_unref:
return -ENOENT;
}
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ uint32_t *fb_id = data;
+
+ return drm_mode_rmfb(dev, *fb_id, file_priv);
+}
+
/**
* drm_mode_getfb - get FB info
* @dev: drm device for the ioctl
@@ -835,9 +847,7 @@ retry:
if (ret)
goto unlock;
- plane_mask |= BIT(drm_plane_index(plane));
-
- plane->old_fb = plane->fb;
+ plane_mask |= drm_plane_mask(plane);
}
/* This list is only filled when disable_crtcs is set. */
@@ -852,9 +862,6 @@ retry:
ret = drm_atomic_commit(state);
unlock:
- if (plane_mask)
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4a16d7b26c89..bf90625df3c5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1036,6 +1036,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EACCES;
}
+ if (node->readonly) {
+ if (vma->vm_flags & VM_WRITE) {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index acfbc0641a06..2810d4131411 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -253,7 +253,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct dma_buf *dma_buf;
struct dma_fence *fence;
- if (plane->state->fb == state->fb || !state->fb)
+ if (!state->fb)
return 0;
dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index b2dc21e33ae0..5799e2782dd1 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b72242e93ea4..40179c5fc6b8 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -26,6 +26,8 @@
/* drm_file.c */
extern struct mutex drm_global_mutex;
+struct drm_file *drm_file_alloc(struct drm_minor *minor);
+void drm_file_free(struct drm_file *file);
void drm_lastclose(struct drm_device *dev);
/* drm_pci.c */
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 0d4cfb232576..ea10e9a26aad 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -334,6 +334,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->aspect_ratio_allowed = req->value;
break;
+ case DRM_CLIENT_CAP_WRITEBACK_CONNECTORS:
+ if (!file_priv->atomic)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->writeback_connectors = req->value;
+ break;
default:
return -EINVAL;
}
@@ -634,12 +641,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d638c0fb3418..b54fb78a283c 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -553,7 +553,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
/* Clone the lessor file to create a new file for us */
DRM_DEBUG_LEASE("Allocating lease file\n");
- lessee_file = filp_clone_open(lessor_file);
+ lessee_file = file_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
goto out_lessee;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index bc73b7f5b9fc..80b75501f5c6 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -392,6 +392,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_DCS_COMPRESSION_MODE:
case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
return true;
}
@@ -410,6 +411,7 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
bool mipi_dsi_packet_format_is_long(u8 type)
{
switch (type) {
+ case MIPI_DSI_PPS_LONG_WRITE:
case MIPI_DSI_NULL_PACKET:
case MIPI_DSI_BLANKING_PACKET:
case MIPI_DSI_GENERIC_LONG_WRITE:
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 3166026a1874..3cc5fbd78ee2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -239,6 +239,32 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
+static u64 rb_to_hole_size(struct rb_node *rb)
+{
+ return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
+
+static void insert_hole_size(struct rb_root_cached *root,
+ struct drm_mm_node *node)
+{
+ struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
+ u64 x = node->hole_size;
+ bool first = true;
+
+ while (*link) {
+ rb = *link;
+ if (x > rb_to_hole_size(rb)) {
+ link = &rb->rb_left;
+ } else {
+ link = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb_hole_size, rb, link);
+ rb_insert_color_cached(&node->rb_hole_size, root, first);
+}
+
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
@@ -247,7 +273,7 @@ static void add_hole(struct drm_mm_node *node)
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
- RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+ insert_hole_size(&mm->holes_size, node);
RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
list_add(&node->hole_stack, &mm->hole_stack);
@@ -258,7 +284,7 @@ static void rm_hole(struct drm_mm_node *node)
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
list_del(&node->hole_stack);
- rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+ rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
node->hole_size = 0;
@@ -282,38 +308,39 @@ static inline u64 rb_hole_size(struct rb_node *rb)
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
{
- struct rb_node *best = NULL;
- struct rb_node **link = &mm->holes_size.rb_node;
+ struct rb_node *rb = mm->holes_size.rb_root.rb_node;
+ struct drm_mm_node *best = NULL;
- while (*link) {
- struct rb_node *rb = *link;
+ do {
+ struct drm_mm_node *node =
+ rb_entry(rb, struct drm_mm_node, rb_hole_size);
- if (size <= rb_hole_size(rb)) {
- link = &rb->rb_left;
- best = rb;
+ if (size <= node->hole_size) {
+ best = node;
+ rb = rb->rb_right;
} else {
- link = &rb->rb_right;
+ rb = rb->rb_left;
}
- }
+ } while (rb);
- return rb_hole_size_to_node(best);
+ return best;
}
static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
{
+ struct rb_node *rb = mm->holes_addr.rb_node;
struct drm_mm_node *node = NULL;
- struct rb_node **link = &mm->holes_addr.rb_node;
- while (*link) {
+ while (rb) {
u64 hole_start;
- node = rb_hole_addr_to_node(*link);
+ node = rb_hole_addr_to_node(rb);
hole_start = __drm_mm_hole_node_start(node);
if (addr < hole_start)
- link = &node->rb_hole_addr.rb_left;
+ rb = node->rb_hole_addr.rb_left;
else if (addr > hole_start + node->hole_size)
- link = &node->rb_hole_addr.rb_right;
+ rb = node->rb_hole_addr.rb_right;
else
break;
}
@@ -326,9 +353,6 @@ first_hole(struct drm_mm *mm,
u64 start, u64 end, u64 size,
enum drm_mm_insert_mode mode)
{
- if (RB_EMPTY_ROOT(&mm->holes_size))
- return NULL;
-
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
@@ -355,7 +379,7 @@ next_hole(struct drm_mm *mm,
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
- return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+ return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
@@ -426,6 +450,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_reserve_node);
+static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
+{
+ return rb ? rb_to_hole_size(rb) : 0;
+}
+
/**
* drm_mm_insert_node_in_range - ranged search for space and insert @node
* @mm: drm_mm to allocate from
@@ -451,18 +480,26 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
{
struct drm_mm_node *hole;
u64 remainder_mask;
+ bool once;
DRM_MM_BUG_ON(range_start >= range_end);
if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
+ if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
+ return -ENOSPC;
+
if (alignment <= 1)
alignment = 0;
+ once = mode & DRM_MM_INSERT_ONCE;
+ mode &= ~DRM_MM_INSERT_ONCE;
+
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
- for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
- hole = next_hole(mm, hole, mode)) {
+ for (hole = first_hole(mm, range_start, range_end, size, mode);
+ hole;
+ hole = once ? NULL : next_hole(mm, hole, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
@@ -587,9 +624,9 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
- rb_replace_node(&old->rb_hole_size,
- &new->rb_hole_size,
- &mm->holes_size);
+ rb_replace_node_cached(&old->rb_hole_size,
+ &new->rb_hole_size,
+ &mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
&mm->holes_addr);
@@ -885,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
INIT_LIST_HEAD(&mm->hole_stack);
mm->interval_tree = RB_ROOT_CACHED;
- mm->holes_size = RB_ROOT;
+ mm->holes_size = RB_ROOT_CACHED;
mm->holes_addr = RB_ROOT;
/* Clever trick to avoid a special case in the free hole tracking. */
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index e5c653357024..21e353bd3948 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -145,6 +145,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
+ /* only expose writeback connectors if userspace understands them */
+ if (!file_priv->writeback_connectors &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
+ continue;
+
if (drm_lease_held(file_priv, connector->base.id)) {
if (count < card_res->count_connectors &&
put_user(connector->base.id, connector_id + count)) {
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index ce4d2fb32810..fcb0ab0abb75 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -433,8 +433,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
drm_modeset_lock_all(dev);
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
- ret = drm_mode_connector_set_obj_prop(obj, prop,
- prop_value);
+ ret = drm_connector_set_obj_prop(obj, prop, prop_value);
break;
case DRM_MODE_OBJECT_CRTC:
ret = drm_mode_crtc_set_obj_prop(obj, prop, prop_value);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c78ca0e84ffd..02db9ac82d7a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -659,10 +659,12 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
* drm_bus_flags_from_videomode - extract information about pixelclk and
* DE polarity from videomode and store it in a separate variable
* @vm: videomode structure to use
- * @bus_flags: information about pixelclk and DE polarity will be stored here
+ * @bus_flags: information about pixelclk, sync and DE polarity will be stored
+ * here
*
- * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE
- * in @bus_flags according to DISPLAY_FLAGS found in @vm
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and
+ * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
+ * found in @vm
*/
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
{
@@ -672,6 +674,11 @@ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
*bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
+ *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE;
+ if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE)
+ *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE;
+
if (vm->flags & DISPLAY_FLAGS_DE_LOW)
*bus_flags |= DRM_BUS_FLAG_DE_LOW;
if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
@@ -684,7 +691,7 @@ EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode);
* of_get_drm_display_mode - get a drm_display_mode from devicetree
* @np: device_node with the timing specification
* @dmode: will be set to the return value
- * @bus_flags: information about pixelclk and DE polarity
+ * @bus_flags: information about pixelclk, sync and DE polarity
* @index: index into the list of display timings in devicetree
*
* This function is expensive and should only be used, if only one mode is to be
@@ -1257,7 +1264,7 @@ static const char * const drm_mode_status_names[] = {
#undef MODE_STATUS
-static const char *drm_get_mode_status_name(enum drm_mode_status status)
+const char *drm_get_mode_status_name(enum drm_mode_status status)
{
int index = status + 3;
@@ -1346,7 +1353,7 @@ void drm_mode_sort(struct list_head *mode_list)
EXPORT_SYMBOL(drm_mode_sort);
/**
- * drm_mode_connector_list_update - update the mode list for the connector
+ * drm_connector_list_update - update the mode list for the connector
* @connector: the connector to update
*
* This moves the modes from the @connector probed_modes list
@@ -1356,7 +1363,7 @@ EXPORT_SYMBOL(drm_mode_sort);
* This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves.
*/
-void drm_mode_connector_list_update(struct drm_connector *connector)
+void drm_connector_list_update(struct drm_connector *connector)
{
struct drm_display_mode *pmode, *pt;
@@ -1405,7 +1412,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
}
}
}
-EXPORT_SYMBOL(drm_mode_connector_list_update);
+EXPORT_SYMBOL(drm_connector_list_update);
/**
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 1fe122461298..2763a5ec845b 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -9,21 +9,28 @@
#include <drm/drm_panel.h>
#include <drm/drm_of.h>
+/**
+ * DOC: overview
+ *
+ * A set of helper functions to aid DRM drivers in parsing standard DT
+ * properties.
+ */
+
static void drm_release_of(struct device *dev, void *data)
{
of_node_put(data);
}
/**
- * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
+ * drm_of_crtc_port_mask - find the mask of a registered CRTC by port OF node
* @dev: DRM device
* @port: port OF node
*
* Given a port OF node, return the possible mask of the corresponding
* CRTC within a device's list of CRTCs. Returns zero if not found.
*/
-static uint32_t drm_crtc_port_mask(struct drm_device *dev,
- struct device_node *port)
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port)
{
unsigned int index = 0;
struct drm_crtc *tmp;
@@ -37,6 +44,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
return 0;
}
+EXPORT_SYMBOL(drm_of_crtc_port_mask);
/**
* drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
@@ -62,7 +70,7 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
return 0;
}
- possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
+ possible_crtcs |= drm_of_crtc_port_mask(dev, remote_port);
of_node_put(remote_port);
}
@@ -93,7 +101,7 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add);
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
- * @master_ops: component master ops to be used
+ * @m_ops: component master ops to be used
*
* Parse the platform device OF node and bind all the components associated
* with the master. Interface ports are added before the encoders in order to
@@ -238,10 +246,17 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (!remote)
return -ENODEV;
+ if (!of_device_is_available(remote)) {
+ of_node_put(remote);
+ return -ENODEV;
+ }
+
if (panel) {
*panel = of_drm_find_panel(remote);
- if (*panel)
+ if (!IS_ERR(*panel))
ret = 0;
+ else
+ *panel = NULL;
}
/* No panel found yet, check for a bridge next. */
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 308d442a531b..b902361dee6e 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/module.h>
+#include <drm/drm_device.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -94,6 +95,9 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* An error is returned if the panel is already attached to another connector.
*
+ * When unloading, the driver should detach from the panel by calling
+ * drm_panel_detach().
+ *
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
@@ -101,6 +105,13 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector)
return -EBUSY;
+ panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
+ if (!panel->link) {
+ dev_err(panel->dev, "failed to link panel to %s\n",
+ dev_name(connector->dev->dev));
+ return -EINVAL;
+ }
+
panel->connector = connector;
panel->drm = connector->dev;
@@ -115,10 +126,15 @@ EXPORT_SYMBOL(drm_panel_attach);
* Detaches a panel from the connector it is attached to. If a panel is not
* attached to any connector this is effectively a no-op.
*
+ * This function should not be called by the panel device itself. It
+ * is only for the drm device that called drm_panel_attach().
+ *
* Return: 0 on success or a negative error code on failure.
*/
int drm_panel_detach(struct drm_panel *panel)
{
+ device_link_del(panel->link);
+
panel->connector = NULL;
panel->drm = NULL;
@@ -135,12 +151,19 @@ EXPORT_SYMBOL(drm_panel_detach);
* tree node. If a matching panel is found, return a pointer to it.
*
* Return: A pointer to the panel registered for the specified device tree
- * node or NULL if no panel matching the device tree node can be found.
+ * node or an ERR_PTR() if no panel matching the device tree node can be found.
+ * Possible error codes returned by this function:
+ * - EPROBE_DEFER: the panel device has not been probed yet, and the caller
+ * should retry later
+ * - ENODEV: the device is not available (status != "okay" or "ok")
*/
struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
struct drm_panel *panel;
+ if (!of_device_is_available(np))
+ return ERR_PTR(-ENODEV);
+
mutex_lock(&panel_lock);
list_for_each_entry(panel, &panel_list, list) {
@@ -151,7 +174,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np)
}
mutex_unlock(&panel_lock);
- return NULL;
+ return ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL(of_drm_find_panel);
#endif
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 4db9c515b74f..896e42a34895 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -326,64 +326,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
}
EXPORT_SYMBOL(drm_legacy_pci_init);
-int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
-{
- struct pci_dev *root;
- u32 lnkcap, lnkcap2;
-
- *mask = 0;
- if (!dev->pdev)
- return -EINVAL;
-
- root = dev->pdev->bus->self;
-
- /* we've been informed via and serverworks don't make the cut */
- if (root->vendor == PCI_VENDOR_ID_VIA ||
- root->vendor == PCI_VENDOR_ID_SERVERWORKS)
- return -EINVAL;
-
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
-
- if (lnkcap2) { /* PCIe r3.0-compliant */
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
- *mask |= DRM_PCIE_SPEED_25;
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
- *mask |= DRM_PCIE_SPEED_50;
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
- *mask |= DRM_PCIE_SPEED_80;
- } else { /* pre-r3.0 */
- if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
- *mask |= DRM_PCIE_SPEED_25;
- if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
- *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
- }
-
- DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
- return 0;
-}
-EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
-
-int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw)
-{
- struct pci_dev *root;
- u32 lnkcap;
-
- *mlw = 0;
- if (!dev->pdev)
- return -EINVAL;
-
- root = dev->pdev->bus->self;
-
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
-
- *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
-
- DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap);
- return 0;
-}
-EXPORT_SYMBOL(drm_pcie_get_max_link_width);
-
#else
void drm_pci_agp_destroy(struct drm_device *dev) {}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 035054455301..6153cbda239f 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -177,6 +177,10 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (WARN_ON(config->num_total_plane >= 32))
return -EINVAL;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+ (!funcs->atomic_destroy_state ||
+ !funcs->atomic_duplicate_state));
+
ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
return ret;
@@ -561,19 +565,66 @@ int drm_plane_check_pixel_format(struct drm_plane *plane,
if (i == plane->format_count)
return -EINVAL;
- if (!plane->modifier_count)
- return 0;
+ if (plane->funcs->format_mod_supported) {
+ if (!plane->funcs->format_mod_supported(plane, format, modifier))
+ return -EINVAL;
+ } else {
+ if (!plane->modifier_count)
+ return 0;
- for (i = 0; i < plane->modifier_count; i++) {
- if (modifier == plane->modifiers[i])
- break;
+ for (i = 0; i < plane->modifier_count; i++) {
+ if (modifier == plane->modifiers[i])
+ break;
+ }
+ if (i == plane->modifier_count)
+ return -EINVAL;
}
- if (i == plane->modifier_count)
- return -EINVAL;
- if (plane->funcs->format_mod_supported &&
- !plane->funcs->format_mod_supported(plane, format, modifier))
+ return 0;
+}
+
+static int __setplane_check(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ int ret;
+
+ /* Check whether this plane is usable on this CRTC */
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+ DRM_DEBUG_KMS("Invalid crtc for plane\n");
return -EINVAL;
+ }
+
+ /* Check whether this plane supports the fb pixel format. */
+ ret = drm_plane_check_pixel_format(plane, fb->format->format,
+ fb->modifier);
+ if (ret) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
+ drm_get_format_name(fb->format->format,
+ &format_name),
+ fb->modifier);
+ return ret;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (crtc_w > INT_MAX ||
+ crtc_x > INT_MAX - (int32_t) crtc_w ||
+ crtc_h > INT_MAX ||
+ crtc_y > INT_MAX - (int32_t) crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ crtc_w, crtc_h, crtc_x, crtc_y);
+ return -ERANGE;
+ }
+
+ ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+ if (ret)
+ return ret;
return 0;
}
@@ -598,6 +649,8 @@ static int __setplane_internal(struct drm_plane *plane,
{
int ret = 0;
+ WARN_ON(drm_drv_uses_atomic_modeset(plane->dev));
+
/* No fb means shut it down */
if (!fb) {
plane->old_fb = plane->fb;
@@ -611,37 +664,9 @@ static int __setplane_internal(struct drm_plane *plane,
goto out;
}
- /* Check whether this plane is usable on this CRTC */
- if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
- DRM_DEBUG_KMS("Invalid crtc for plane\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, fb->format->format,
- fb->modifier);
- if (ret) {
- struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
- drm_get_format_name(fb->format->format,
- &format_name),
- fb->modifier);
- goto out;
- }
-
- /* Give drivers some help against integer overflows */
- if (crtc_w > INT_MAX ||
- crtc_x > INT_MAX - (int32_t) crtc_w ||
- crtc_h > INT_MAX ||
- crtc_y > INT_MAX - (int32_t) crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- crtc_w, crtc_h, crtc_x, crtc_y);
- ret = -ERANGE;
- goto out;
- }
-
- ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+ ret = __setplane_check(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
if (ret)
goto out;
@@ -665,6 +690,41 @@ out:
return ret;
}
+static int __setplane_atomic(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ int ret;
+
+ WARN_ON(!drm_drv_uses_atomic_modeset(plane->dev));
+
+ /* No fb means shut it down */
+ if (!fb)
+ return plane->funcs->disable_plane(plane, ctx);
+
+ /*
+ * FIXME: This is redundant with drm_atomic_plane_check(),
+ * but the legacy cursor/"async" .update_plane() tricks
+ * don't call that so we still need this here. Should remove
+ * this when all .update_plane() implementations have been
+ * fixed to call drm_atomic_plane_check().
+ */
+ ret = __setplane_check(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ if (ret)
+ return ret;
+
+ return plane->funcs->update_plane(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, ctx);
+}
+
static int setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -682,9 +742,15 @@ retry:
ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
if (ret)
goto fail;
- ret = __setplane_internal(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h, &ctx);
+
+ if (drm_drv_uses_atomic_modeset(plane->dev))
+ ret = __setplane_atomic(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, &ctx);
+ else
+ ret = __setplane_internal(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, &ctx);
fail:
if (ret == -EDEADLK) {
@@ -816,9 +882,14 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
src_h = fb->height << 16;
}
- ret = __setplane_internal(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- 0, 0, src_w, src_h, ctx);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = __setplane_atomic(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h, ctx);
+ else
+ ret = __setplane_internal(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h, ctx);
if (fb)
drm_framebuffer_put(fb);
@@ -1092,8 +1163,10 @@ retry:
/* Keep the old fb, don't unref it. */
plane->old_fb = NULL;
} else {
- plane->fb = fb;
- drm_framebuffer_get(fb);
+ if (!plane->state) {
+ plane->fb = fb;
+ drm_framebuffer_get(fb);
+ }
}
out:
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index f88f68161519..621f17643bb0 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -440,6 +440,7 @@ out:
* @src_y: y offset of @fb for panning
* @src_w: width of source rectangle in @fb
* @src_h: height of source rectangle in @fb
+ * @ctx: lock acquire context, not used here
*
* Provides a default plane update handler using the atomic plane update
* functions. It is fully left to the driver to check plane constraints and
@@ -455,7 +456,8 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane_state *plane_state;
@@ -489,6 +491,7 @@ EXPORT_SYMBOL(drm_plane_helper_update);
/**
* drm_plane_helper_disable() - Transitional helper for plane disable
* @plane: plane to disable
+ * @ctx: lock acquire context, not used here
*
* Provides a default plane disable handler using the atomic plane update
* functions. It is fully left to the driver to check plane constraints and
@@ -499,9 +502,11 @@ EXPORT_SYMBOL(drm_plane_helper_update);
* RETURNS:
* Zero on success, error code on failure
*/
-int drm_plane_helper_disable(struct drm_plane *plane)
+int drm_plane_helper_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane_state *plane_state;
+ struct drm_framebuffer *old_fb;
/* crtc helpers love to call disable functions for already disabled hw
* functions. So cope with that. */
@@ -521,8 +526,9 @@ int drm_plane_helper_disable(struct drm_plane *plane)
plane_state->plane = plane;
plane_state->crtc = NULL;
+ old_fb = plane_state->fb;
drm_atomic_set_fb_for_plane(plane_state, NULL);
- return drm_plane_helper_commit(plane, plane_state, plane->fb);
+ return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 397b46b33739..186db2e4c57a 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -186,7 +186,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
/**
* drm_gem_map_attach - dma_buf attach implementation for GEM
* @dma_buf: buffer to attach device to
- * @target_dev: not used
* @attach: buffer attachment data
*
* Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
@@ -195,7 +194,7 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
*
* Returns 0 on success, negative error code on failure.
*/
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach;
@@ -435,35 +434,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
/**
- * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
- */
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
-
-/**
- * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
- */
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
-
-/**
* drm_gem_dmabuf_kmap - map implementation for GEM
* @dma_buf: buffer to be mapped
* @page_num: page number within the buffer
@@ -520,9 +490,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = drm_gem_dmabuf_kmap,
- .map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
- .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index b25f98f33f6c..0e7fc3e7dfb4 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -30,6 +30,100 @@
#include <drm/drmP.h>
#include <drm/drm_print.h>
+void __drm_puts_coredump(struct drm_printer *p, const char *str)
+{
+ struct drm_print_iterator *iterator = p->arg;
+ ssize_t len;
+
+ if (!iterator->remain)
+ return;
+
+ if (iterator->offset < iterator->start) {
+ ssize_t copy;
+
+ len = strlen(str);
+
+ if (iterator->offset + len <= iterator->start) {
+ iterator->offset += len;
+ return;
+ }
+
+ copy = len - (iterator->start - iterator->offset);
+
+ if (copy > iterator->remain)
+ copy = iterator->remain;
+
+ /* Copy out the bit of the string that we need */
+ memcpy(iterator->data,
+ str + (iterator->start - iterator->offset), copy);
+
+ iterator->offset = iterator->start + copy;
+ iterator->remain -= copy;
+ } else {
+ ssize_t pos = iterator->offset - iterator->start;
+
+ len = min_t(ssize_t, strlen(str), iterator->remain);
+
+ memcpy(iterator->data + pos, str, len);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+ }
+}
+EXPORT_SYMBOL(__drm_puts_coredump);
+
+void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
+{
+ struct drm_print_iterator *iterator = p->arg;
+ size_t len;
+ char *buf;
+
+ if (!iterator->remain)
+ return;
+
+ /* Figure out how big the string will be */
+ len = snprintf(NULL, 0, "%pV", vaf);
+
+ /* This is the easiest path, we've already advanced beyond the offset */
+ if (iterator->offset + len <= iterator->start) {
+ iterator->offset += len;
+ return;
+ }
+
+ /* Then check if we can directly copy into the target buffer */
+ if ((iterator->offset >= iterator->start) && (len < iterator->remain)) {
+ ssize_t pos = iterator->offset - iterator->start;
+
+ snprintf(((char *) iterator->data) + pos,
+ iterator->remain, "%pV", vaf);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+
+ return;
+ }
+
+ /*
+ * Finally, hit the slow path and make a temporary string to copy over
+ * using _drm_puts_coredump
+ */
+ buf = kmalloc(len + 1, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!buf)
+ return;
+
+ snprintf(buf, len + 1, "%pV", vaf);
+ __drm_puts_coredump(p, (const char *) buf);
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(__drm_printfn_coredump);
+
+void __drm_puts_seq_file(struct drm_printer *p, const char *str)
+{
+ seq_puts(p->arg, str);
+}
+EXPORT_SYMBOL(__drm_puts_seq_file);
+
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
{
seq_printf(p->arg, "%pV", vaf);
@@ -49,6 +143,23 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
EXPORT_SYMBOL(__drm_printfn_debug);
/**
+ * drm_puts - print a const string to a &drm_printer stream
+ * @p: the &drm printer
+ * @str: const string
+ *
+ * Allow &drm_printer types that have a constant string
+ * option to use it.
+ */
+void drm_puts(struct drm_printer *p, const char *str)
+{
+ if (p->puts)
+ p->puts(p, str);
+ else
+ drm_printf(p, "%s", str);
+}
+EXPORT_SYMBOL(drm_puts);
+
+/**
* drm_printf - print to a &drm_printer stream
* @p: the &drm_printer
* @f: format string
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 527743394150..a1bb157bfdfa 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -33,6 +33,7 @@
#include <linux/moduleparam.h>
#include <drm/drmP.h>
+#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
@@ -88,9 +89,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- uint32_t *ids = connector->encoder_ids;
enum drm_mode_status ret = MODE_OK;
- unsigned int i;
+ struct drm_encoder *encoder;
+ int i;
/* Step 1: Validate against connector */
ret = drm_connector_mode_valid(connector, mode);
@@ -98,13 +99,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
return ret;
/* Step 2: Validate against encoders and crtcs */
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct drm_encoder *encoder = drm_encoder_find(dev, NULL, ids[i]);
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
struct drm_crtc *crtc;
- if (!encoder)
- continue;
-
ret = drm_encoder_mode_valid(encoder, mode);
if (ret != MODE_OK) {
/* No point in continuing for crtc check as this encoder
@@ -363,7 +360,7 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
* using the VESA GTF/CVT formulas.
*
* 3. Modes are moved from the probed_modes list to the modes list. Potential
- * duplicates are merged together (see drm_mode_connector_list_update()).
+ * duplicates are merged together (see drm_connector_list_update()).
* After this step the probed_modes list will be empty again.
*
* 4. Any non-stale mode on the modes list then undergoes validation
@@ -475,7 +472,7 @@ retry:
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, connector->name);
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
verbose_prune = false;
goto prune;
}
@@ -488,7 +485,7 @@ retry:
if (count == 0)
goto prune;
- drm_mode_connector_list_update(connector);
+ drm_connector_list_update(connector);
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -563,6 +560,8 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev)
drm_sysfs_hotplug_event(dev);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
+
+ drm_client_dev_hotplug(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 7a00455ca568..51fa978f0d23 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -52,7 +52,7 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
bool has_primary = state->plane_mask &
- BIT(drm_plane_index(crtc->primary));
+ drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */
if (has_primary != state->enable)
@@ -281,13 +281,13 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
if (ret)
return ret;
- encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret || !connector)
return ret;
- return drm_mode_connector_attach_encoder(connector, encoder);
+ return drm_connector_attach_encoder(connector, encoder);
}
EXPORT_SYMBOL(drm_simple_display_pipe_init);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index d4f4ce484529..adb3cb27d31e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -207,7 +207,6 @@ static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
.get_driver_name = drm_syncobj_null_fence_get_name,
.get_timeline_name = drm_syncobj_null_fence_get_name,
.enable_signaling = drm_syncobj_null_fence_enable_signaling,
- .wait = dma_fence_default_wait,
.release = NULL,
};
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 2660543ad86a..c3301046dfaa 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -100,7 +100,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it.
*/
#if IS_ENABLED(CONFIG_AGP)
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@@ -173,7 +173,7 @@ vm_fault_error:
return VM_FAULT_SIGBUS; /* Disallow mremap */
}
#else
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
@@ -189,7 +189,7 @@ static int drm_vm_fault(struct vm_fault *vmf)
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
-static int drm_vm_shm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
@@ -291,7 +291,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
-static int drm_vm_dma_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_file *priv = vma->vm_file->private_data;
@@ -326,7 +326,7 @@ static int drm_vm_dma_fault(struct vm_fault *vmf)
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
-static int drm_vm_sg_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_local_map *map = vma->vm_private_data;
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 23c749c05b5a..a6b2fe36b025 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* Copyright (c) 2012 David Airlie <airlied@linux.ie>
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
new file mode 100644
index 000000000000..c20e6fe00cb3
--- /dev/null
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_property.h>
+#include <drm/drm_writeback.h>
+#include <drm/drmP.h>
+#include <linux/dma-fence.h>
+
+/**
+ * DOC: overview
+ *
+ * Writeback connectors are used to expose hardware which can write the output
+ * from a CRTC to a memory buffer. They are used and act similarly to other
+ * types of connectors, with some important differences:
+ *
+ * * Writeback connectors don't provide a way to output visually to the user.
+ *
+ * * Writeback connectors are visible to userspace only when the client sets
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS.
+ *
+ * * Writeback connectors don't have EDID.
+ *
+ * A framebuffer may only be attached to a writeback connector when the
+ * connector is attached to a CRTC. The WRITEBACK_FB_ID property which sets the
+ * framebuffer applies only to a single commit (see below). A framebuffer may
+ * not be attached while the CRTC is off.
+ *
+ * Unlike with planes, when a writeback framebuffer is removed by userspace DRM
+ * makes no attempt to remove it from active use by the connector. This is
+ * because no method is provided to abort a writeback operation, and in any
+ * case making a new commit whilst a writeback is ongoing is undefined (see
+ * WRITEBACK_OUT_FENCE_PTR below). As soon as the current writeback is finished,
+ * the framebuffer will automatically no longer be in active use. As it will
+ * also have already been removed from the framebuffer list, there will be no
+ * way for any userspace application to retrieve a reference to it in the
+ * intervening period.
+ *
+ * Writeback connectors have some additional properties, which userspace
+ * can use to query and control them:
+ *
+ * "WRITEBACK_FB_ID":
+ * Write-only object property storing a DRM_MODE_OBJECT_FB: it stores the
+ * framebuffer to be written by the writeback connector. This property is
+ * similar to the FB_ID property on planes, but will always read as zero
+ * and is not preserved across commits.
+ * Userspace must set this property to an output buffer every time it
+ * wishes the buffer to get filled.
+ *
+ * "WRITEBACK_PIXEL_FORMATS":
+ * Immutable blob property to store the supported pixel formats table. The
+ * data is an array of u32 DRM_FORMAT_* fourcc values.
+ * Userspace can use this blob to find out what pixel formats are supported
+ * by the connector's writeback engine.
+ *
+ * "WRITEBACK_OUT_FENCE_PTR":
+ * Userspace can use this property to provide a pointer for the kernel to
+ * fill with a sync_file file descriptor, which will signal once the
+ * writeback is finished. The value should be the address of a 32-bit
+ * signed integer, cast to a u64.
+ * Userspace should wait for this fence to signal before making another
+ * commit affecting any of the same CRTCs, Planes or Connectors.
+ * **Failure to do so will result in undefined behaviour.**
+ * For this reason it is strongly recommended that all userspace
+ * applications making use of writeback connectors *always* retrieve an
+ * out-fence for the commit and use it appropriately.
+ * From userspace, this property will always read as zero.
+ */
+
+#define fence_to_wb_connector(x) container_of(x->lock, \
+ struct drm_writeback_connector, \
+ fence_lock)
+
+static const char *drm_writeback_fence_get_driver_name(struct dma_fence *fence)
+{
+ struct drm_writeback_connector *wb_connector =
+ fence_to_wb_connector(fence);
+
+ return wb_connector->base.dev->driver->name;
+}
+
+static const char *
+drm_writeback_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct drm_writeback_connector *wb_connector =
+ fence_to_wb_connector(fence);
+
+ return wb_connector->timeline_name;
+}
+
+static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static const struct dma_fence_ops drm_writeback_fence_ops = {
+ .get_driver_name = drm_writeback_fence_get_driver_name,
+ .get_timeline_name = drm_writeback_fence_get_timeline_name,
+ .enable_signaling = drm_writeback_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+};
+
+static int create_writeback_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+
+ if (!dev->mode_config.writeback_fb_id_property) {
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+ "WRITEBACK_FB_ID",
+ DRM_MODE_OBJECT_FB);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_fb_id_property = prop;
+ }
+
+ if (!dev->mode_config.writeback_pixel_formats_property) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_ATOMIC |
+ DRM_MODE_PROP_IMMUTABLE,
+ "WRITEBACK_PIXEL_FORMATS", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_pixel_formats_property = prop;
+ }
+
+ if (!dev->mode_config.writeback_out_fence_ptr_property) {
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+ "WRITEBACK_OUT_FENCE_PTR", 0,
+ U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.writeback_out_fence_ptr_property = prop;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/**
+ * drm_writeback_connector_init - Initialize a writeback connector and its properties
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @con_funcs: Connector funcs vtable
+ * @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values. It will also create an internal encoder associated with the
+ * drm_writeback_connector and set it to use the @enc_helper_funcs vtable for
+ * the encoder helper.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ const struct drm_encoder_helper_funcs *enc_helper_funcs,
+ const u32 *formats, int n_formats)
+{
+ struct drm_property_blob *blob;
+ struct drm_connector *connector = &wb_connector->base;
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret = create_writeback_properties(dev);
+
+ if (ret != 0)
+ return ret;
+
+ blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
+ formats);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
+ ret = drm_encoder_init(dev, &wb_connector->encoder,
+ &drm_writeback_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ goto fail;
+
+ connector->interlace_allowed = 0;
+
+ ret = drm_connector_init(dev, connector, con_funcs,
+ DRM_MODE_CONNECTOR_WRITEBACK);
+ if (ret)
+ goto connector_fail;
+
+ ret = drm_connector_attach_encoder(connector,
+ &wb_connector->encoder);
+ if (ret)
+ goto attach_fail;
+
+ INIT_LIST_HEAD(&wb_connector->job_queue);
+ spin_lock_init(&wb_connector->job_lock);
+
+ wb_connector->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&wb_connector->fence_lock);
+ snprintf(wb_connector->timeline_name,
+ sizeof(wb_connector->timeline_name),
+ "CONNECTOR:%d-%s", connector->base.id, connector->name);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_out_fence_ptr_property, 0);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_fb_id_property, 0);
+
+ drm_object_attach_property(&connector->base,
+ config->writeback_pixel_formats_property,
+ blob->base.id);
+ wb_connector->pixel_formats_blob_ptr = blob;
+
+ return 0;
+
+attach_fail:
+ drm_connector_cleanup(connector);
+connector_fail:
+ drm_encoder_cleanup(&wb_connector->encoder);
+fail:
+ drm_property_blob_put(blob);
+ return ret;
+}
+EXPORT_SYMBOL(drm_writeback_connector_init);
+
+/**
+ * drm_writeback_queue_job - Queue a writeback job for later signalling
+ * @wb_connector: The writeback connector to queue a job on
+ * @job: The job to queue
+ *
+ * This function adds a job to the job_queue for a writeback connector. It
+ * should be considered to take ownership of the writeback job, and so any other
+ * references to the job must be cleared after calling this function.
+ *
+ * Drivers must ensure that for a given writeback connector, jobs are queued in
+ * exactly the same order as they will be completed by the hardware (and
+ * signaled via drm_writeback_signal_completion).
+ *
+ * For every call to drm_writeback_queue_job() there must be exactly one call to
+ * drm_writeback_signal_completion()
+ *
+ * See also: drm_writeback_signal_completion()
+ */
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ list_add_tail(&job->list_entry, &wb_connector->job_queue);
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+}
+EXPORT_SYMBOL(drm_writeback_queue_job);
+
+/*
+ * @cleanup_work: deferred cleanup of a writeback job
+ *
+ * The job cannot be cleaned up directly in drm_writeback_signal_completion,
+ * because it may be called in interrupt context. Dropping the framebuffer
+ * reference can sleep, and so the cleanup is deferred to a workqueue.
+ */
+static void cleanup_work(struct work_struct *work)
+{
+ struct drm_writeback_job *job = container_of(work,
+ struct drm_writeback_job,
+ cleanup_work);
+ drm_framebuffer_put(job->fb);
+ kfree(job);
+}
+
+
+/**
+ * drm_writeback_signal_completion - Signal the completion of a writeback job
+ * @wb_connector: The writeback connector whose job is complete
+ * @status: Status code to set in the writeback out_fence (0 for success)
+ *
+ * Drivers should call this to signal the completion of a previously queued
+ * writeback job. It should be called as soon as possible after the hardware
+ * has finished writing, and may be called from interrupt context.
+ * It is the driver's responsibility to ensure that for a given connector, the
+ * hardware completes writeback jobs in the same order as they are queued.
+ *
+ * Unless the driver is holding its own reference to the framebuffer, it must
+ * not be accessed after calling this function.
+ *
+ * See also: drm_writeback_queue_job()
+ */
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+ int status)
+{
+ unsigned long flags;
+ struct drm_writeback_job *job;
+
+ spin_lock_irqsave(&wb_connector->job_lock, flags);
+ job = list_first_entry_or_null(&wb_connector->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ if (job) {
+ list_del(&job->list_entry);
+ if (job->out_fence) {
+ if (status)
+ dma_fence_set_error(job->out_fence, status);
+ dma_fence_signal(job->out_fence);
+ dma_fence_put(job->out_fence);
+ }
+ }
+ spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+
+ if (WARN_ON(!job))
+ return;
+
+ INIT_WORK(&job->cleanup_work, cleanup_work);
+ queue_work(system_long_wq, &job->cleanup_work);
+}
+EXPORT_SYMBOL(drm_writeback_signal_completion);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector)
+{
+ struct dma_fence *fence;
+
+ if (WARN_ON(wb_connector->base.connector_type !=
+ DRM_MODE_CONNECTOR_WRITEBACK))
+ return NULL;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ dma_fence_init(fence, &drm_writeback_fence_ops,
+ &wb_connector->fence_lock, wb_connector->fence_context,
+ ++wb_connector->fence_seqno);
+
+ return fence;
+}
+EXPORT_SYMBOL(drm_writeback_get_out_fence);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 540b59fb4103..9b2720b41571 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
+ struct drm_sched_rq *rq;
if (gpu) {
- drm_sched_entity_init(&gpu->sched,
- &ctx->sched_entity[i],
- &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- NULL);
+ rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ drm_sched_entity_init(&ctx->sched_entity[i],
+ &rq, 1, NULL);
}
}
@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
- drm_sched_entity_fini(&gpu->sched,
- &ctx->sched_entity[i]);
+ drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index d36c7bbe66db..8d02d1b7dcf5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -18,6 +18,7 @@
#include <linux/time64.h>
#include <linux/types.h>
#include <linux/sizes.h>
+#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -53,7 +54,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int etnaviv_gem_fault(struct vm_fault *vmf);
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 209ef1274b80..1fa74226db91 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -169,31 +169,30 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return obj->ops->mmap(obj, vma);
}
-int etnaviv_gem_fault(struct vm_fault *vmf)
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct page **pages, *page;
pgoff_t pgoff;
- int ret;
+ int err;
/*
* Make sure we don't parallel update on a fault, nor move or remove
- * something from beneath our feet. Note that vm_insert_page() is
+ * something from beneath our feet. Note that vmf_insert_page() is
* specifically coded to take care of this, so we don't have to.
*/
- ret = mutex_lock_interruptible(&etnaviv_obj->lock);
- if (ret)
- goto out;
-
+ err = mutex_lock_interruptible(&etnaviv_obj->lock);
+ if (err)
+ return VM_FAULT_NOPAGE;
/* make sure we have pages attached now */
pages = etnaviv_gem_get_pages(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto out;
+ err = PTR_ERR(pages);
+ return vmf_error(err);
}
/* We don't use vmf->pgoff since that has the fake offset: */
@@ -204,25 +203,7 @@ int etnaviv_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
- ret = vm_insert_page(vma, vmf->address, page);
-
-out:
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_page(vma, vmf->address, page);
}
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 46ecd3e66ac9..983e67f19e45 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -388,9 +388,9 @@ static void submit_cleanup(struct kref *kref)
dma_fence_put(submit->in_fence);
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
- mutex_lock(&submit->gpu->fence_idr_lock);
+ mutex_lock(&submit->gpu->fence_lock);
idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
- mutex_unlock(&submit->gpu->fence_idr_lock);
+ mutex_unlock(&submit->gpu->fence_lock);
dma_fence_put(submit->out_fence);
}
kfree(submit->pmrs);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 686f6552db48..f225fbc6edd2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -799,6 +799,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer);
+ gpu->buffer.suballoc = NULL;
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
@@ -1027,11 +1028,6 @@ static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
return dev_name(f->gpu->dev);
}
-static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
@@ -1049,9 +1045,7 @@ static void etnaviv_fence_release(struct dma_fence *fence)
static const struct dma_fence_ops etnaviv_fence_ops = {
.get_driver_name = etnaviv_fence_get_driver_name,
.get_timeline_name = etnaviv_fence_get_timeline_name,
- .enable_signaling = etnaviv_fence_enable_signaling,
.signaled = etnaviv_fence_signaled,
- .wait = dma_fence_default_wait,
.release = etnaviv_fence_release,
};
@@ -1733,7 +1727,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- mutex_init(&gpu->fence_idr_lock);
+ mutex_init(&gpu->fence_lock);
/* Map registers: */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 90f17ff7888e..9a75a6937268 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -118,7 +118,7 @@ struct etnaviv_gpu {
u32 idle_mask;
/* Fencing support */
- struct mutex fence_idr_lock;
+ struct mutex fence_lock;
struct idr fence_idr;
u32 next_fence;
u32 active_fence;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 71fbc1f96cb6..f1c88d8ad5ba 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -119,8 +119,7 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{
- u32 *p;
- int ret, i;
+ int ret;
/* allocate scratch page */
etnaviv_domain->base.bad_page_cpu =
@@ -131,9 +130,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
ret = -ENOMEM;
goto fail_mem;
}
- p = etnaviv_domain->base.bad_page_cpu;
- for (i = 0; i < SZ_4K / 4; i++)
- *p++ = 0xdead55aa;
+
+ memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
+ SZ_4K / sizeof(u32));
etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
SZ_4K, &etnaviv_domain->pta_dma,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 50d6b88cb7aa..69e9b431bf1f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -140,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit)
{
- int ret;
+ int ret = 0;
+
+ /*
+ * Hold the fence lock across the whole operation to avoid jobs being
+ * pushed out of order with regard to their sched fence seqnos as
+ * allocated in drm_sched_job_init.
+ */
+ mutex_lock(&submit->gpu->fence_lock);
- ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
- sched_entity, submit->cmdbuf.ctx);
+ ret = drm_sched_job_init(&submit->sched_job, sched_entity,
+ submit->cmdbuf.ctx);
if (ret)
- return ret;
+ goto out_unlock;
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- mutex_lock(&submit->gpu->fence_idr_lock);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
- mutex_unlock(&submit->gpu->fence_idr_lock);
- if (submit->out_fence_id < 0)
- return -ENOMEM;
+ if (submit->out_fence_id < 0) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
drm_sched_entity_push_job(&submit->sched_job, sched_entity);
- return 0;
+out_unlock:
+ mutex_unlock(&submit->gpu->fence_lock);
+
+ return ret;
}
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 3b323f1e0475..2ad146bbf4f5 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,7 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
- exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+ exynos_drm_gem.o exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index e868773ea509..94529aa82339 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -673,6 +673,8 @@ err:
static const struct dev_pm_ops exynos5433_decon_pm_ops = {
SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 3931d5e33fe0..88cbd000eb09 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -832,6 +832,8 @@ static int exynos7_decon_resume(struct device *dev)
static const struct dev_pm_ops exynos7_decon_pm_ops = {
SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver decon_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 86330f396784..c8449ae4f4fe 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/of_graph.h>
#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
@@ -232,9 +233,11 @@ static int exynos_dp_probe(struct platform_device *pdev)
np = of_parse_phandle(dev->of_node, "panel", 0);
if (np) {
dp->plat_data.panel = of_drm_find_panel(np);
+
of_node_put(np);
- if (!dp->plat_data.panel)
- return -EPROBE_DEFER;
+ if (IS_ERR(dp->plat_data.panel))
+ return PTR_ERR(dp->plat_data.panel);
+
goto out;
}
@@ -276,6 +279,8 @@ static int exynos_dp_resume(struct device *dev)
static const struct dev_pm_ops exynos_dp_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id exynos_dp_match[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
deleted file mode 100644
index b0c0621fcdf7..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/* exynos_drm_core.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_crtc.h"
-
-static LIST_HEAD(exynos_drm_subdrv_list);
-
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
-
- return 0;
-}
-
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_del(&subdrv->list);
-
- return 0;
-}
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev)
-{
- struct exynos_drm_subdrv *subdrv, *n;
- int err;
-
- if (!dev)
- return -EINVAL;
-
- list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
- if (subdrv->probe) {
- subdrv->drm_dev = dev;
-
- /*
- * this probe callback would be called by sub driver
- * after setting of all resources to this sub driver,
- * such as clock, irq and register map are done.
- */
- err = subdrv->probe(dev, subdrv->dev);
- if (err) {
- DRM_DEBUG("exynos drm subdrv probe failed.\n");
- list_del(&subdrv->list);
- continue;
- }
- }
- }
-
- return 0;
-}
-
-int exynos_drm_device_subdrv_remove(struct drm_device *dev)
-{
- struct exynos_drm_subdrv *subdrv;
-
- if (!dev) {
- WARN(1, "Unexpected drm device unregister!\n");
- return -EINVAL;
- }
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->remove)
- subdrv->remove(dev, subdrv->dev);
- }
-
- return 0;
-}
-
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
-{
- struct exynos_drm_subdrv *subdrv;
- int ret;
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->open) {
- ret = subdrv->open(dev, subdrv->dev, file);
- if (ret)
- goto err;
- }
- }
-
- return 0;
-
-err:
- list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->close)
- subdrv->close(dev, subdrv->dev, file);
- }
- return ret;
-}
-
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
-{
- struct exynos_drm_subdrv *subdrv;
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->close)
- subdrv->close(dev, subdrv->dev, file);
- }
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 66945e0dc57f..2f0babb67c51 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -113,7 +113,7 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
}
drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -240,8 +240,8 @@ struct drm_encoder *exynos_dpi_probe(struct device *dev)
if (ctx->panel_node) {
ctx->panel = of_drm_find_panel(ctx->panel_node);
- if (!ctx->panel)
- return ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ctx->panel))
+ return ERR_CAST(ctx->panel);
}
return &ctx->encoder;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ed3cc2989f93..b599f74692e5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -55,8 +55,7 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
file->driver_priv = file_priv;
-
- ret = exynos_drm_subdrv_open(dev, file);
+ ret = g2d_open(dev, file);
if (ret)
goto err_file_priv_free;
@@ -70,7 +69,7 @@ err_file_priv_free:
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
- exynos_drm_subdrv_close(dev, file);
+ g2d_close(dev, file);
kfree(file->driver_priv);
file->driver_priv = NULL;
}
@@ -147,13 +146,12 @@ static struct drm_driver exynos_drm_driver = {
.minor = DRIVER_MINOR,
};
-#ifdef CONFIG_PM_SLEEP
static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private;
- if (pm_runtime_suspended(dev) || !drm_dev)
+ if (!drm_dev)
return 0;
private = drm_dev->dev_private;
@@ -170,25 +168,23 @@ static int exynos_drm_suspend(struct device *dev)
return 0;
}
-static int exynos_drm_resume(struct device *dev)
+static void exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private;
- if (pm_runtime_suspended(dev) || !drm_dev)
- return 0;
+ if (!drm_dev)
+ return;
private = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, private->suspend_state);
exynos_drm_fbdev_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
-
- return 0;
}
-#endif
static const struct dev_pm_ops exynos_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
+ .prepare = exynos_drm_suspend,
+ .complete = exynos_drm_resume,
};
/* forward declaration */
@@ -240,6 +236,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
}, {
DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
@@ -376,11 +373,6 @@ static int exynos_drm_bind(struct device *dev)
if (ret)
goto err_unbind_all;
- /* Probe non kms sub drivers and virtual display driver. */
- ret = exynos_drm_device_subdrv_probe(drm);
- if (ret)
- goto err_unbind_all;
-
drm_mode_config_reset(drm);
/*
@@ -411,7 +403,6 @@ err_cleanup_fbdev:
exynos_drm_fbdev_fini(drm);
err_cleanup_poll:
drm_kms_helper_poll_fini(drm);
- exynos_drm_device_subdrv_remove(drm);
err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
@@ -431,8 +422,6 @@ static void exynos_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
- exynos_drm_device_subdrv_remove(drm);
-
exynos_drm_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 0f6d079a55c9..c737c4bd2c19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -179,17 +179,13 @@ static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
crtc->pipe_clk->enable(crtc->pipe_clk, enable);
}
-struct exynos_drm_g2d_private {
- struct device *dev;
+struct drm_exynos_file_private {
+ /* for g2d api */
struct list_head inuse_cmdlist;
struct list_head event_list;
struct list_head userptr_list;
};
-struct drm_exynos_file_private {
- struct exynos_drm_g2d_private *g2d_priv;
-};
-
/*
* Exynos drm private structure.
*
@@ -201,6 +197,7 @@ struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
struct drm_atomic_state *suspend_state;
+ struct device *g2d_dev;
struct device *dma_dev;
void *mapping;
@@ -217,44 +214,6 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
return priv->dma_dev;
}
-/*
- * Exynos drm sub driver structure.
- *
- * @list: sub driver has its own list object to register to exynos drm driver.
- * @dev: pointer to device object for subdrv device driver.
- * @drm_dev: pointer to drm_device and this pointer would be set
- * when sub driver calls exynos_drm_subdrv_register().
- * @probe: this callback would be called by exynos drm driver after
- * subdrv is registered to it.
- * @remove: this callback is used to release resources created
- * by probe callback.
- * @open: this would be called with drm device file open.
- * @close: this would be called with drm device file close.
- */
-struct exynos_drm_subdrv {
- struct list_head list;
- struct device *dev;
- struct drm_device *drm_dev;
-
- int (*probe)(struct drm_device *drm_dev, struct device *dev);
- void (*remove)(struct drm_device *drm_dev, struct device *dev);
- int (*open)(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file);
- void (*close)(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file);
-};
-
- /* This function would be called by non kms drivers such as g2d and ipp. */
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
-
-/* this function removes subdrv list from exynos drm driver */
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev);
-int exynos_drm_device_subdrv_remove(struct drm_device *dev);
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
-
#ifdef CONFIG_DRM_EXYNOS_DPI
struct drm_encoder *exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6d29777884f9..781b82c2c579 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1479,7 +1479,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -1519,6 +1519,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
dsi->panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(dsi->panel))
+ dsi->panel = NULL;
+
if (dsi->panel) {
drm_panel_attach(dsi->panel, &dsi->connector);
dsi->connector.status = connector_status_connected;
@@ -1860,6 +1863,8 @@ err_clk:
static const struct dev_pm_ops exynos_dsi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver dsi_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 27b7d34d776c..9f52382e19ee 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -101,7 +101,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
{
const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
- struct drm_gem_object *obj;
struct drm_framebuffer *fb;
int i;
int ret;
@@ -112,15 +111,14 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
unsigned long size = height * mode_cmd->pitches[i] +
mode_cmd->offsets[i];
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
- if (!obj) {
+ exynos_gem[i] = exynos_drm_gem_get(file_priv,
+ mode_cmd->handles[i]);
+ if (!exynos_gem[i]) {
DRM_ERROR("failed to lookup gem object\n");
ret = -ENOENT;
goto err;
}
- exynos_gem[i] = to_exynos_gem(obj);
-
if (size > exynos_gem[i]->size) {
i++;
ret = -EINVAL;
@@ -138,7 +136,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
err:
while (i--)
- drm_gem_object_put_unlocked(&exynos_gem[i]->base);
+ exynos_drm_gem_put(exynos_gem[i]);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 01b1570d0c3a..b7f56935a46b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1192,6 +1192,8 @@ static int exynos_fimd_resume(struct device *dev)
static const struct dev_pm_ops exynos_fimd_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver fimd_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f68ef1b3a28c..f2481a2014bb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -190,7 +191,7 @@ struct g2d_buf_desc {
struct g2d_buf_info {
unsigned int map_nr;
enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
- unsigned long handles[MAX_REG_TYPE_NR];
+ void *obj[MAX_REG_TYPE_NR];
unsigned int types[MAX_REG_TYPE_NR];
struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
};
@@ -237,7 +238,7 @@ struct g2d_data {
int irq;
struct workqueue_struct *g2d_workq;
struct work_struct runqueue_work;
- struct exynos_drm_subdrv subdrv;
+ struct drm_device *drm_dev;
unsigned long flags;
/* cmdlist */
@@ -268,14 +269,13 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
struct g2d_buf_info *buf_info;
g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
- g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
g2d->cmdlist_dma_attrs);
@@ -308,7 +308,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
+ dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
return ret;
@@ -316,12 +316,10 @@ err:
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
-
kfree(g2d->cmdlist_node);
if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
- dma_free_attrs(to_dma_dev(subdrv->drm_dev),
+ dma_free_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
@@ -355,32 +353,31 @@ static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
mutex_unlock(&g2d->cmdlist_mutex);
}
-static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv,
struct g2d_cmdlist_node *node)
{
struct g2d_cmdlist_node *lnode;
- if (list_empty(&g2d_priv->inuse_cmdlist))
+ if (list_empty(&file_priv->inuse_cmdlist))
goto add_to_list;
/* this links to base address of new cmdlist */
- lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+ lnode = list_entry(file_priv->inuse_cmdlist.prev,
struct g2d_cmdlist_node, list);
lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
add_to_list:
- list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+ list_add_tail(&node->list, &file_priv->inuse_cmdlist);
if (node->event)
- list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+ list_add_tail(&node->event->base.link, &file_priv->event_list);
}
-static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
- unsigned long obj,
+static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
+ void *obj,
bool force)
{
- struct g2d_cmdlist_userptr *g2d_userptr =
- (struct g2d_cmdlist_userptr *)obj;
+ struct g2d_cmdlist_userptr *g2d_userptr = obj;
struct page **pages;
if (!obj)
@@ -398,7 +395,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
return;
out:
- dma_unmap_sg(to_dma_dev(drm_dev), g2d_userptr->sgt->sgl,
+ dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
pages = frame_vector_pages(g2d_userptr->vec);
@@ -419,16 +416,14 @@ out:
kfree(g2d_userptr);
}
-static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
unsigned long userptr,
unsigned long size,
struct drm_file *filp,
- unsigned long *obj)
+ void **obj)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr;
- struct g2d_data *g2d;
struct sg_table *sgt;
unsigned long start, end;
unsigned int npages, offset;
@@ -439,10 +434,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
return ERR_PTR(-EINVAL);
}
- g2d = dev_get_drvdata(g2d_priv->dev);
-
/* check if userptr already exists in userptr_list. */
- list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) {
if (g2d_userptr->userptr == userptr) {
/*
* also check size because there could be same address
@@ -450,7 +443,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
*/
if (g2d_userptr->size == size) {
atomic_inc(&g2d_userptr->refcount);
- *obj = (unsigned long)g2d_userptr;
+ *obj = g2d_userptr;
return &g2d_userptr->dma_addr;
}
@@ -517,7 +510,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->sgt = sgt;
- if (!dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents,
+ if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL)) {
DRM_ERROR("failed to map sgt with dma region.\n");
ret = -ENOMEM;
@@ -527,14 +520,14 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
g2d_userptr->userptr = userptr;
- list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+ list_add_tail(&g2d_userptr->list, &file_priv->userptr_list);
if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
g2d->current_pool += npages << PAGE_SHIFT;
g2d_userptr->in_pool = true;
}
- *obj = (unsigned long)g2d_userptr;
+ *obj = g2d_userptr;
return &g2d_userptr->dma_addr;
@@ -556,19 +549,14 @@ err_free:
return ERR_PTR(ret);
}
-static void g2d_userptr_free_all(struct drm_device *drm_dev,
- struct g2d_data *g2d,
- struct drm_file *filp)
+static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr, *n;
- list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list)
if (g2d_userptr->in_pool)
- g2d_userptr_put_dma_addr(drm_dev,
- (unsigned long)g2d_userptr,
- true);
+ g2d_userptr_put_dma_addr(g2d, g2d_userptr, true);
g2d->current_pool = 0;
}
@@ -723,26 +711,23 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
buf_desc = &buf_info->descs[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
- unsigned long size;
+ struct exynos_drm_gem *exynos_gem;
- size = exynos_drm_gem_get_size(drm_dev, handle, file);
- if (!size) {
+ exynos_gem = exynos_drm_gem_get(file, handle);
+ if (!exynos_gem) {
ret = -EFAULT;
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
- size)) {
+ if (!g2d_check_buf_desc_is_valid(buf_desc,
+ reg_type, exynos_gem->size)) {
+ exynos_drm_gem_put(exynos_gem);
ret = -EFAULT;
goto err;
}
- addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
- file);
- if (IS_ERR(addr)) {
- ret = -EFAULT;
- goto err;
- }
+ addr = &exynos_gem->dma_addr;
+ buf_info->obj[reg_type] = exynos_gem;
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
@@ -758,11 +743,11 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- addr = g2d_userptr_get_dma_addr(drm_dev,
+ addr = g2d_userptr_get_dma_addr(g2d,
g2d_userptr.userptr,
g2d_userptr.size,
file,
- &handle);
+ &buf_info->obj[reg_type]);
if (IS_ERR(addr)) {
ret = -EFAULT;
goto err;
@@ -771,7 +756,6 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
cmdlist->data[reg_pos + 1] = *addr;
buf_info->reg_types[i] = reg_type;
- buf_info->handles[reg_type] = handle;
}
return 0;
@@ -785,29 +769,26 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_file *filp)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
struct g2d_buf_info *buf_info = &node->buf_info;
int i;
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
- unsigned long handle;
+ void *obj;
reg_type = buf_info->reg_types[i];
buf_desc = &buf_info->descs[reg_type];
- handle = buf_info->handles[reg_type];
+ obj = buf_info->obj[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM)
- exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
- filp);
+ exynos_drm_gem_put(obj);
else
- g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
- false);
+ g2d_userptr_put_dma_addr(g2d, obj, false);
buf_info->reg_types[i] = REG_TYPE_NONE;
- buf_info->handles[reg_type] = 0;
+ buf_info->obj[reg_type] = NULL;
buf_info->types[reg_type] = 0;
memset(buf_desc, 0x00, sizeof(*buf_desc));
}
@@ -922,7 +903,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
{
- struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct drm_device *drm_dev = g2d->drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
struct timespec64 now;
@@ -1031,7 +1012,7 @@ out:
mutex_unlock(&g2d->runqueue_mutex);
}
-static int g2d_check_reg_offset(struct device *dev,
+static int g2d_check_reg_offset(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
@@ -1131,7 +1112,7 @@ static int g2d_check_reg_offset(struct device *dev,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
+ dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -1139,23 +1120,8 @@ err:
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
struct drm_exynos_g2d_get_ver *ver = data;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
@@ -1166,9 +1132,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
struct drm_exynos_pending_g2d_event *e;
@@ -1177,17 +1142,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
int size;
int ret;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
node = g2d_get_cmdlist(g2d);
if (!node)
return -ENOMEM;
@@ -1199,7 +1153,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
*/
if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
- dev_err(dev, "number of submitted G2D commands exceeds limit\n");
+ dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n");
return -EINVAL;
}
@@ -1267,7 +1221,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
*/
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
- dev_err(dev, "cmdlist size is too big\n");
+ dev_err(g2d->dev, "cmdlist size is too big\n");
ret = -EINVAL;
goto err_free_event;
}
@@ -1282,7 +1236,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
@@ -1301,7 +1255,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_buf_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
+ ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
@@ -1319,7 +1273,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
/* tail */
cmdlist->data[cmdlist->last] = 0;
- g2d_add_cmdlist_to_inuse(g2d_priv, node);
+ g2d_add_cmdlist_to_inuse(file_priv, node);
return 0;
@@ -1337,25 +1291,13 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
if (!runqueue_node)
return -ENOMEM;
@@ -1367,11 +1309,11 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
init_completion(&runqueue_node->complete);
runqueue_node->async = req->async;
- list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
- list_splice_init(&g2d_priv->event_list, event_list);
+ list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist);
+ list_splice_init(&file_priv->event_list, event_list);
if (list_empty(run_cmdlist)) {
- dev_err(dev, "there is no inuse cmdlist\n");
+ dev_err(g2d->dev, "there is no inuse cmdlist\n");
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
return -EPERM;
}
@@ -1395,71 +1337,28 @@ out:
return 0;
}
-static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
- struct g2d_data *g2d;
- int ret;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
- /* allocate dma-aware cmdlist buffer. */
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0) {
- dev_err(dev, "cmdlist init failed\n");
- return ret;
- }
-
- ret = drm_iommu_attach_device(drm_dev, dev);
- if (ret < 0) {
- dev_err(dev, "failed to enable iommu.\n");
- g2d_fini_cmdlist(g2d);
- }
-
- return ret;
-
-}
-
-static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- drm_iommu_detach_device(drm_dev, dev);
-}
-
-static int g2d_open(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv;
-
- g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
- if (!g2d_priv)
- return -ENOMEM;
- g2d_priv->dev = dev;
- file_priv->g2d_priv = g2d_priv;
-
- INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
- INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->userptr_list);
+ INIT_LIST_HEAD(&file_priv->inuse_cmdlist);
+ INIT_LIST_HEAD(&file_priv->event_list);
+ INIT_LIST_HEAD(&file_priv->userptr_list);
return 0;
}
-static void g2d_close(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d;
struct g2d_cmdlist_node *node, *n;
- if (!dev)
+ if (!priv->g2d_dev)
return;
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return;
+ g2d = dev_get_drvdata(priv->g2d_dev);
/* Remove the runqueue nodes that belong to us. */
mutex_lock(&g2d->runqueue_mutex);
@@ -1480,24 +1379,70 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
* Properly unmap these buffers here.
*/
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
}
mutex_unlock(&g2d->cmdlist_mutex);
/* release all g2d_userptr in pool. */
- g2d_userptr_free_all(drm_dev, g2d, file);
+ g2d_userptr_free_all(g2d, file);
+}
+
+static int g2d_bind(struct device *dev, struct device *master, void *data)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ int ret;
+
+ g2d->drm_dev = drm_dev;
- kfree(file_priv->g2d_priv);
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ return ret;
+ }
+ priv->g2d_dev = dev;
+
+ dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n",
+ G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+ return 0;
+}
+
+static void g2d_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ /* Suspend operation and wait for engine idle. */
+ set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+ g2d_wait_finish(g2d, NULL);
+ priv->g2d_dev = NULL;
+
+ cancel_work_sync(&g2d->runqueue_work);
+ drm_iommu_detach_device(g2d->drm_dev, dev);
}
+static const struct component_ops g2d_component_ops = {
+ .bind = g2d_bind,
+ .unbind = g2d_unbind,
+};
+
static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct g2d_data *g2d;
- struct exynos_drm_subdrv *subdrv;
int ret;
g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
@@ -1564,22 +1509,12 @@ static int g2d_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, g2d);
- subdrv = &g2d->subdrv;
- subdrv->dev = dev;
- subdrv->probe = g2d_subdrv_probe;
- subdrv->remove = g2d_subdrv_remove;
- subdrv->open = g2d_open;
- subdrv->close = g2d_close;
-
- ret = exynos_drm_subdrv_register(subdrv);
+ ret = component_add(dev, &g2d_component_ops);
if (ret < 0) {
dev_err(dev, "failed to register drm g2d device\n");
goto err_put_clk;
}
- dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n",
- G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
-
return 0;
err_put_clk:
@@ -1595,12 +1530,7 @@ static int g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
- /* Suspend operation and wait for engine idle. */
- set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
- g2d_wait_finish(g2d, NULL);
-
- cancel_work_sync(&g2d->runqueue_work);
- exynos_drm_subdrv_unregister(&g2d->subdrv);
+ component_del(&pdev->dev, &g2d_component_ops);
/* There should be no locking needed here. */
g2d_remove_runqueue_nodes(g2d, NULL);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
index 1a9c7ca8c15b..287b2ed8f178 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -14,6 +14,9 @@ extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+extern int g2d_open(struct drm_device *drm_dev, struct drm_file *file);
+extern void g2d_close(struct drm_device *drm_dev, struct drm_file *file);
#else
static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -33,4 +36,12 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
{
return -ENODEV;
}
+
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+{
+ return 0;
+}
+
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+{ }
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index bdf5a7655228..34ace85feb68 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -171,26 +171,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
kfree(exynos_gem);
}
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv)
-{
- struct exynos_drm_gem *exynos_gem;
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(file_priv, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return 0;
- }
-
- exynos_gem = to_exynos_gem(obj);
-
- drm_gem_object_put_unlocked(obj);
-
- return exynos_gem->size;
-}
-
static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
@@ -299,43 +279,15 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp)
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+ unsigned int gem_handle)
{
- struct exynos_drm_gem *exynos_gem;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(filp, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return ERR_PTR(-EINVAL);
- }
-
- exynos_gem = to_exynos_gem(obj);
-
- return &exynos_gem->dma_addr;
-}
-
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp)
-{
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(filp, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return;
- }
-
- drm_gem_object_put_unlocked(obj);
-
- /*
- * decrease obj->refcount one more time because we has already
- * increased it at exynos_drm_gem_get_dma_addr().
- */
- drm_gem_object_put_unlocked(obj);
+ if (!obj)
+ return NULL;
+ return to_exynos_gem(obj);
}
static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 9057d7f1d6ed..d46a62c30812 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -77,32 +77,26 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/*
- * get dma address from gem handle and this function could be used for
+ * get exynos drm object from gem handle, this function could be used for
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp);
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+ unsigned int gem_handle);
/*
- * put dma address from gem handle and this function could be used for
- * other drivers such as 2d/3d acceleration drivers.
- * with this function call, gem object reference count would be decreased.
+ * put exynos drm object acquired from exynos_drm_gem_get(),
+ * gem object reference count would be decreased.
*/
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp);
+static inline void exynos_drm_gem_put(struct exynos_drm_gem *exynos_gem)
+{
+ drm_gem_object_put_unlocked(&exynos_gem->base);
+}
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-/* get buffer size to gem handle. */
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv);
-
/* free gem object. */
void exynos_drm_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index b435db8fc916..23226a0212e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -350,13 +350,13 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
unsigned int height = (i == 0) ? buf->buf.height :
DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
unsigned long size = height * buf->buf.pitch[i];
- struct drm_gem_object *obj = drm_gem_object_lookup(filp,
+ struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
buf->buf.gem_id[i]);
- if (!obj) {
+ if (!gem) {
ret = -ENOENT;
goto gem_free;
}
- buf->exynos_gem[i] = to_exynos_gem(obj);
+ buf->exynos_gem[i] = gem;
if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
i++;
@@ -370,7 +370,7 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
return 0;
gem_free:
while (i--) {
- drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+ exynos_drm_gem_put(buf->exynos_gem[i]);
buf->exynos_gem[i] = NULL;
}
return ret;
@@ -383,7 +383,7 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
if (!buf->exynos_gem[0])
return;
for (i = 0; i < buf->format->num_planes; i++)
- drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+ exynos_drm_gem_put(buf->exynos_gem[i]);
}
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 2174814273e2..2fd299a58297 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -367,6 +367,8 @@ static int exynos_mic_resume(struct device *dev)
static const struct dev_pm_ops exynos_mic_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static int exynos_mic_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 7098c6d35266..dba29aec59b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -263,8 +263,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
if (!state->crtc)
return;
- plane->crtc = state->crtc;
-
if (exynos_crtc->ops->update_plane)
exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e6b0940b1ac2..19697c1362d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -319,7 +319,7 @@ static int vidi_get_modes(struct drm_connector *connector)
return -ENOMEM;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid);
}
@@ -344,7 +344,7 @@ static int vidi_create_connector(struct drm_encoder *encoder)
}
drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index db91932550cf..2092a650df7d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -888,7 +888,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
edid->width_cm, edid->height_cm);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -951,7 +951,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
}
drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
@@ -2093,6 +2093,8 @@ static int __maybe_unused exynos_hdmi_resume(struct device *dev)
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver hdmi_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 272c79f5f5bf..ffbf4a950f69 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -837,8 +837,6 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
struct drm_device *drm_dev)
{
int ret;
- struct exynos_drm_private *priv;
- priv = drm_dev->dev_private;
mixer_ctx->drm_dev = drm_dev;
@@ -1271,6 +1269,8 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev)
static const struct dev_pm_ops exynos_mixer_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver mixer_driver = {
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index c54806d08dd7..2298ed2a9e1c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -117,7 +117,7 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
if (ret < 0)
goto err_cleanup;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
goto err_sysfs;
@@ -148,8 +148,9 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
if (panel_node) {
fsl_dev->connector.panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- if (!fsl_dev->connector.panel)
- return -EPROBE_DEFER;
+ if (IS_ERR(fsl_dev->connector.panel))
+ return PTR_ERR(fsl_dev->connector.panel);
+
return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
}
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index c51d9259c7a7..204c8e452eb7 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -251,7 +251,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
if (!fb)
return;
- offset = psbfb->gtt->offset;
+ offset = to_gtt_range(fb->obj[0])->offset;
stride = fb->pitches[0];
switch (fb->format->depth) {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 5ea785f07ba8..90ed20083009 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1770,7 +1770,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index f0878998526a..4e4e4a66eaee 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -216,7 +216,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index cb0a2ae916e0..2f00a37684a2 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -33,6 +33,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -40,14 +41,9 @@
#include "framebuffer.h"
#include "gtt.h"
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle);
-
static const struct drm_framebuffer_funcs psb_fb_funcs = {
- .destroy = psb_user_framebuffer_destroy,
- .create_handle = psb_user_framebuffer_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
@@ -96,17 +92,18 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
+ struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
/*
* We have to poke our nose in here. The core fb code assumes
* panning is part of the hardware that can be invoked before
* the actual fb is mapped. In our case that isn't quite true.
*/
- if (psbfb->gtt->npage) {
+ if (gtt->npage) {
/* GTT roll shifts in 4K pages, we need to shift the right
number of pages */
int pages = info->fix.line_length >> 12;
- psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+ psb_gtt_roll(dev, gtt, var->yoffset * pages);
}
return 0;
}
@@ -117,13 +114,14 @@ static int psbfb_vm_fault(struct vm_fault *vmf)
struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
int page_num;
int i;
unsigned long address;
int ret;
unsigned long pfn;
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
- psbfb->gtt->offset;
+ gtt->offset;
page_num = vma_pages(vma);
address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
@@ -246,7 +244,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
return -EINVAL;
drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
- fb->gtt = gt;
+ fb->base.obj[0] = &gt->gem;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
@@ -518,8 +516,8 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
drm_framebuffer_unregister_private(&psbfb->base);
drm_framebuffer_cleanup(&psbfb->base);
- if (psbfb->gtt)
- drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
+ if (psbfb->base.obj[0])
+ drm_gem_object_put_unlocked(psbfb->base.obj[0]);
return 0;
}
@@ -576,44 +574,6 @@ static void psb_fbdev_fini(struct drm_device *dev)
dev_priv->fbdev = NULL;
}
-/**
- * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
- * @fb: framebuffer
- * @file_priv: our DRM file
- * @handle: returned handle
- *
- * Our framebuffer object is a GTT range which also contains a GEM
- * object. We need to turn it into a handle for userspace. GEM will do
- * the work for us
- */
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct gtt_range *r = psbfb->gtt;
- return drm_gem_handle_create(file_priv, &r->gem, handle);
-}
-
-/**
- * psb_user_framebuffer_destroy - destruct user created fb
- * @fb: framebuffer
- *
- * User framebuffers are backed by GEM objects so all we have to do is
- * clean up a bit and drop the reference, GEM will handle the fallout
- */
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct gtt_range *r = psbfb->gtt;
-
- /* Let DRM do its clean up */
- drm_framebuffer_cleanup(fb);
- /* We are no longer using the resource in GEM */
- drm_gem_object_unreference_unlocked(&r->gem);
- kfree(fb);
-}
-
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 395f20b07aab..23dc3c5f8f0d 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -31,7 +31,6 @@ struct psb_framebuffer {
struct drm_framebuffer base;
struct address_space *addr_space;
struct fb_info *fbdev;
- struct gtt_range *gtt;
};
struct psb_fbdev {
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 131239759a75..913bf4c256fa 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -93,7 +93,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
- drm_gem_object_unreference_unlocked(&r->gem);
+ drm_gem_object_put_unlocked(&r->gem);
*handlep = handle;
return 0;
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index f3c48a2be71b..09c1161a7ac6 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -60,7 +60,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *gtt;
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -76,12 +76,14 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
goto gma_pipe_cleaner;
}
+ gtt = to_gtt_range(fb->obj[0]);
+
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
- ret = psb_gtt_pin(psbfb->gtt);
+ ret = psb_gtt_pin(gtt);
if (ret < 0)
goto gma_pipe_set_base_exit;
- start = psbfb->gtt->offset;
+ start = gtt->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
@@ -129,7 +131,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
gma_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb)
- psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+ psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
gma_pipe_set_base_exit:
gma_power_end(dev);
@@ -353,7 +355,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
gt = container_of(gma_crtc->cursor_obj,
struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+ drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
gma_crtc->cursor_obj = NULL;
}
return 0;
@@ -429,7 +431,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
if (gma_crtc->cursor_obj) {
gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+ drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
}
gma_crtc->cursor_obj = obj;
@@ -437,7 +439,7 @@ unlock:
return ret;
unref_cursor:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -491,7 +493,7 @@ void gma_crtc_disable(struct drm_crtc *crtc)
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
- gt = to_psb_fb(crtc->primary->fb)->gtt;
+ gt = to_gtt_range(crtc->primary->fb->obj[0]);
psb_gtt_unpin(gt);
}
}
@@ -663,7 +665,7 @@ void gma_connector_attach_encoder(struct gma_connector *connector,
struct gma_encoder *encoder)
{
connector->encoder = encoder;
- drm_mode_connector_attach_encoder(&connector->base,
+ drm_connector_attach_encoder(&connector->base,
&encoder->base);
}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index cdbb350c9d5d..cb0c3a2a1fd4 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -53,6 +53,8 @@ struct gtt_range {
int roll; /* Roll applied to the GTT entries */
};
+#define to_gtt_range(x) container_of(x, struct gtt_range, gem)
+
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed,
u32 align);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 978ae4b25e82..e0ccf1d19a4d 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -34,7 +34,7 @@ struct vbt_header {
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
-} __attribute__((packed));
+} __packed;
struct bdb_header {
@@ -61,7 +61,7 @@ struct vbios_data {
u8 rsvd4; /* popup memory size */
u8 resize_pci_bios;
u8 rsvd5; /* is crt already on ddc2 */
-} __attribute__((packed));
+} __packed;
/*
* There are several types of BIOS data blocks (BDBs), each block has
@@ -133,7 +133,7 @@ struct bdb_general_features {
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 rsvd11:3; /* finish byte */
-} __attribute__((packed));
+} __packed;
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -213,7 +213,7 @@ struct child_device_config {
u8 dvo2_wiring;
u16 extended_type;
u8 dvo_function;
-} __attribute__((packed));
+} __packed;
struct bdb_general_definitions {
@@ -256,7 +256,7 @@ struct bdb_lvds_options {
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_backlight {
u8 type:2;
@@ -268,7 +268,7 @@ struct bdb_lvds_backlight {
u8 i2caddr;
u8 brightnesscmd;
/*FIXME: more...*/
-} __attribute__((packed));
+} __packed;
/* LFP pointer table contains entries to the struct below */
struct bdb_lvds_lfp_data_ptr {
@@ -278,12 +278,12 @@ struct bdb_lvds_lfp_data_ptr {
u8 dvo_table_size;
u16 panel_pnp_id_offset;
u8 pnp_table_size;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
struct bdb_lvds_lfp_data_ptr ptr[16];
-} __attribute__((packed));
+} __packed;
/* LFP data has 3 blocks per entry */
struct lvds_fp_timing {
@@ -300,7 +300,7 @@ struct lvds_fp_timing {
u32 pfit_reg;
u32 pfit_reg_val;
u16 terminator;
-} __attribute__((packed));
+} __packed;
struct lvds_dvo_timing {
u16 clock; /**< In 10khz */
@@ -328,7 +328,7 @@ struct lvds_dvo_timing {
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 rsvd2:1;
-} __attribute__((packed));
+} __packed;
struct lvds_pnp_id {
u16 mfg_name;
@@ -336,17 +336,17 @@ struct lvds_pnp_id {
u32 serial;
u8 mfg_week;
u8 mfg_year;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
struct lvds_pnp_id pnp_id;
-} __attribute__((packed));
+} __packed;
struct bdb_lvds_lfp_data {
struct bdb_lvds_lfp_data_entry data[16];
-} __attribute__((packed));
+} __packed;
struct aimdb_header {
char signature[16];
@@ -354,12 +354,12 @@ struct aimdb_header {
u16 aimdb_version;
u16 aimdb_header_size;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct aimdb_block {
u8 aimdb_id;
u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
struct vch_panel_data {
u16 fp_timing_offset;
@@ -370,12 +370,12 @@ struct vch_panel_data {
u8 text_fitting_size;
u16 graphics_fitting_offset;
u8 graphics_fitting_size;
-} __attribute__((packed));
+} __packed;
struct vch_bdb_22 {
struct aimdb_block aimdb_block;
struct vch_panel_data panels[16];
-} __attribute__((packed));
+} __packed;
struct bdb_sdvo_lvds_options {
u8 panel_backlight;
@@ -391,7 +391,7 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_2;
u8 panel_misc_bits_3;
u8 panel_misc_bits_4;
-} __attribute__((packed));
+} __packed;
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
@@ -436,7 +436,7 @@ struct bdb_driver_features {
u8 hdmi_termination;
u8 custom_vbt_version;
-} __attribute__((packed));
+} __packed;
#define EDP_18BPP 0
#define EDP_24BPP 1
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index a05c020602bd..d0bf5a1e94e8 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -999,7 +999,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
p_funcs->encoder_helper_funcs);
/*attach to given connector*/
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
/*set possible crtcs and clones*/
if (dsi_connector->pipe) {
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 5c066448be5b..2b9fa0163dea 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -167,7 +167,6 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -196,7 +195,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (!gma_power_begin(dev, true))
return 0;
- start = psbfb->gtt->offset;
+ start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 0fff269d3fe6..1b7fd6a9d8a5 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -600,7 +600,6 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -617,7 +616,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
- start = psbfb->gtt->offset;
+ start = to_gtt_range(fb->obj[0])->offset;
offset = y * fb->pitches[0] + x * fb->format->cpp[0];
REG_WRITE(map->stride, fb->pitches[0]);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 78566a80ad25..c6d72de1c054 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -578,7 +578,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
}
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
}
return ret;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index e6943fef0611..83babb815a5d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -376,7 +376,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
* preferred mode is the right one.
*/
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
kfree(edid);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index e5360726d80b..fb4da3cd6681 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -66,7 +66,7 @@ int psb_intel_ddc_get_modes(struct drm_connector *connector,
edid = drm_get_edid(connector, adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index f2ee6aa10afa..dd3cec0e3190 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -429,13 +429,20 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
+#define MAX_ARG_LEN 32
+
static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- u8 buf[args_len*2 + 2], status;
- struct i2c_msg msgs[args_len + 3];
+ u8 buf[MAX_ARG_LEN*2 + 2], status;
+ struct i2c_msg msgs[MAX_ARG_LEN + 3];
int i, ret;
+ if (args_len > MAX_ARG_LEN) {
+ DRM_ERROR("Need to increase arg length\n");
+ return false;
+ }
+
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
@@ -1465,7 +1472,7 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
if (connector_is_digital == monitor_is_digital) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index d2f4749ebf8d..744956cea749 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -133,7 +133,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
}
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 2269be91f3e1..bb774202a5a1 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -859,7 +859,6 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
if (src_w != crtc_w || src_h != crtc_h) {
- DRM_ERROR("Scale not support!!!\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 6ebd8842dbcc..a7c39f39793f 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -69,6 +69,7 @@ struct tda998x_priv {
bool edid_delay_active;
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct drm_connector connector;
struct tda998x_audio_port audio_port[2];
@@ -79,9 +80,10 @@ struct tda998x_priv {
#define conn_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, connector)
-
#define enc_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, encoder)
+#define bridge_to_tda998x_priv(x) \
+ container_of(x, struct tda998x_priv, bridge)
/* The TDA9988 series of devices use a paged register scheme.. to simplify
* things we encode the page # in upper bits of the register #. To read/
@@ -589,13 +591,22 @@ out:
return ret;
}
+#define MAX_WRITE_RANGE_BUF 32
+
static void
reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
{
struct i2c_client *client = priv->hdmi;
- u8 buf[cnt+1];
+ /* This is the maximum size of the buffer passed in */
+ u8 buf[MAX_WRITE_RANGE_BUF + 1];
int ret;
+ if (cnt > MAX_WRITE_RANGE_BUF) {
+ dev_err(&client->dev, "Fixed write buffer too small (%d)\n",
+ MAX_WRITE_RANGE_BUF);
+ return;
+ }
+
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
@@ -753,7 +764,7 @@ static void tda998x_detect_work(struct work_struct *work)
{
struct tda998x_priv *priv =
container_of(work, struct tda998x_priv, detect_work);
- struct drm_device *dev = priv->encoder.dev;
+ struct drm_device *dev = priv->connector.dev;
if (dev)
drm_kms_helper_hotplug_event(dev);
@@ -805,7 +816,7 @@ static void
tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
union hdmi_infoframe *frame)
{
- u8 buf[32];
+ u8 buf[MAX_WRITE_RANGE_BUF];
ssize_t len;
len = hdmi_infoframe_pack(frame, buf, sizeof(buf));
@@ -1095,29 +1106,6 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
/* DRM connector functions */
-static int tda998x_connector_fill_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- int ret;
-
- mutex_lock(&priv->audio_mutex);
- ret = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
-
- if (connector->edid_blob_ptr) {
- struct edid *edid = (void *)connector->edid_blob_ptr->data;
-
- cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
-
- priv->sink_has_audio = drm_detect_monitor_audio(edid);
- } else {
- priv->sink_has_audio = false;
- }
- mutex_unlock(&priv->audio_mutex);
-
- return ret;
-}
-
static enum drm_connector_status
tda998x_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1136,7 +1124,7 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs tda998x_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .fill_modes = tda998x_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,
.destroy = tda998x_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1234,41 +1222,30 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
return 0;
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
+ cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
+
+ mutex_lock(&priv->audio_mutex);
n = drm_add_edid_modes(connector, edid);
+ priv->sink_has_audio = drm_detect_monitor_audio(edid);
+ mutex_unlock(&priv->audio_mutex);
kfree(edid);
return n;
}
-static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* TDA19988 dotclock can go up to 165MHz */
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-
- if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
- return MODE_CLOCK_HIGH;
- if (mode->htotal >= BIT(13))
- return MODE_BAD_HVALUE;
- if (mode->vtotal >= BIT(11))
- return MODE_BAD_VVALUE;
- return MODE_OK;
-}
-
static struct drm_encoder *
tda998x_connector_best_encoder(struct drm_connector *connector)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- return &priv->encoder;
+ return priv->bridge.encoder;
}
static
const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
.get_modes = tda998x_connector_get_modes,
- .mode_valid = tda998x_connector_mode_valid,
.best_encoder = tda998x_connector_best_encoder,
};
@@ -1292,25 +1269,48 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
if (ret)
return ret;
- drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+ drm_connector_attach_encoder(&priv->connector,
+ priv->bridge.encoder);
return 0;
}
-/* DRM encoder functions */
+/* DRM bridge functions */
-static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+static int tda998x_bridge_attach(struct drm_bridge *bridge)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
- bool on;
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
- /* we only care about on or off: */
- on = mode == DRM_MODE_DPMS_ON;
+ return tda998x_connector_init(priv, bridge->dev);
+}
- if (on == priv->is_on)
- return;
+static void tda998x_bridge_detach(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
- if (on) {
+ drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status tda998x_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ /* TDA19988 dotclock can go up to 165MHz */
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
+ return MODE_CLOCK_HIGH;
+ if (mode->htotal >= BIT(13))
+ return MODE_BAD_HVALUE;
+ if (mode->vtotal >= BIT(11))
+ return MODE_BAD_VVALUE;
+ return MODE_OK;
+}
+
+static void tda998x_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (!priv->is_on) {
/* enable video ports, audio will be enabled later */
reg_write(priv, REG_ENA_VP_0, 0xff);
reg_write(priv, REG_ENA_VP_1, 0xff);
@@ -1321,7 +1321,14 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
priv->is_on = true;
- } else {
+ }
+}
+
+static void tda998x_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (priv->is_on) {
/* disable video ports */
reg_write(priv, REG_ENA_VP_0, 0x00);
reg_write(priv, REG_ENA_VP_1, 0x00);
@@ -1331,12 +1338,12 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void
-tda998x_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+ unsigned long tmds_clock;
u16 ref_pix, ref_line, n_pix, n_line;
u16 hs_pix_s, hs_pix_e;
u16 vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
@@ -1407,12 +1414,19 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
(mode->vsync_end - mode->vsync_start)/2;
}
- div = 148500 / mode->clock;
- if (div != 0) {
- div--;
- if (div > 3)
- div = 3;
- }
+ tmds_clock = mode->clock;
+
+ /*
+ * The divisor is power-of-2. The TDA9983B datasheet gives
+ * this as ranges of Msample/s, which is 10x the TMDS clock:
+ * 0 - 800 to 1500 Msample/s
+ * 1 - 400 to 800 Msample/s
+ * 2 - 200 to 400 Msample/s
+ * 3 - as 2 above
+ */
+ for (div = 0; div < 3; div++)
+ if (80000 >> div <= tmds_clock)
+ break;
mutex_lock(&priv->audio_mutex);
@@ -1543,26 +1557,14 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
mutex_unlock(&priv->audio_mutex);
}
-static void tda998x_destroy(struct tda998x_priv *priv)
-{
- /* disable all IRQs and free the IRQ handler */
- cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
- reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-
- if (priv->audio_pdev)
- platform_device_unregister(priv->audio_pdev);
-
- if (priv->hdmi->irq)
- free_irq(priv->hdmi->irq, priv);
-
- del_timer_sync(&priv->edid_delay_timer);
- cancel_work_sync(&priv->detect_work);
-
- i2c_unregister_device(priv->cec);
-
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
-}
+static const struct drm_bridge_funcs tda998x_bridge_funcs = {
+ .attach = tda998x_bridge_attach,
+ .detach = tda998x_bridge_detach,
+ .mode_valid = tda998x_bridge_mode_valid,
+ .disable = tda998x_bridge_disable,
+ .mode_set = tda998x_bridge_mode_set,
+ .enable = tda998x_bridge_enable,
+};
/* I2C driver functions */
@@ -1608,16 +1610,69 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
return 0;
}
-static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
+static void tda998x_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+ priv->audio_params = p->audio_params;
+}
+
+static void tda998x_destroy(struct device *dev)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&priv->bridge);
+
+ /* disable all IRQs and free the IRQ handler */
+ cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
+ reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ if (priv->audio_pdev)
+ platform_device_unregister(priv->audio_pdev);
+
+ if (priv->hdmi->irq)
+ free_irq(priv->hdmi->irq, priv);
+
+ del_timer_sync(&priv->edid_delay_timer);
+ cancel_work_sync(&priv->detect_work);
+
+ i2c_unregister_device(priv->cec);
+
+ if (priv->cec_notify)
+ cec_notifier_put(priv->cec_notify);
+}
+
+static int tda998x_create(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
struct device_node *np = client->dev.of_node;
struct i2c_board_info cec_info;
+ struct tda998x_priv *priv;
u32 video;
int rev_lo, rev_hi, ret;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
mutex_init(&priv->mutex); /* protect the page access */
mutex_init(&priv->audio_mutex); /* protect access from audio thread */
mutex_init(&priv->edid_mutex);
+ INIT_LIST_HEAD(&priv->bridge.list);
init_waitqueue_head(&priv->edid_delay_waitq);
timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
@@ -1640,13 +1695,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* read version: */
rev_lo = reg_read(priv, REG_VERSION_LSB);
if (rev_lo < 0) {
- dev_err(&client->dev, "failed to read version: %d\n", rev_lo);
+ dev_err(dev, "failed to read version: %d\n", rev_lo);
return rev_lo;
}
rev_hi = reg_read(priv, REG_VERSION_MSB);
if (rev_hi < 0) {
- dev_err(&client->dev, "failed to read version: %d\n", rev_hi);
+ dev_err(dev, "failed to read version: %d\n", rev_hi);
return rev_hi;
}
@@ -1657,20 +1712,19 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
switch (priv->rev) {
case TDA9989N2:
- dev_info(&client->dev, "found TDA9989 n2");
+ dev_info(dev, "found TDA9989 n2");
break;
case TDA19989:
- dev_info(&client->dev, "found TDA19989");
+ dev_info(dev, "found TDA19989");
break;
case TDA19989N2:
- dev_info(&client->dev, "found TDA19989 n2");
+ dev_info(dev, "found TDA19989 n2");
break;
case TDA19988:
- dev_info(&client->dev, "found TDA19988");
+ dev_info(dev, "found TDA19988");
break;
default:
- dev_err(&client->dev, "found unsupported device: %04x\n",
- priv->rev);
+ dev_err(dev, "found unsupported device: %04x\n", priv->rev);
return -ENXIO;
}
@@ -1713,8 +1767,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
tda998x_irq_thread, irq_flags,
"tda998x", priv);
if (ret) {
- dev_err(&client->dev,
- "failed to request IRQ#%u: %d\n",
+ dev_err(dev, "failed to request IRQ#%u: %d\n",
client->irq, ret);
goto err_irq;
}
@@ -1723,13 +1776,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
- priv->cec_notify = cec_notifier_get(&client->dev);
+ priv->cec_notify = cec_notifier_get(dev);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
}
- priv->cec_glue.parent = &client->dev;
+ priv->cec_glue.parent = dev;
priv->cec_glue.data = priv;
priv->cec_glue.init = tda998x_cec_hook_init;
priv->cec_glue.exit = tda998x_cec_hook_exit;
@@ -1759,61 +1812,44 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* enable EDID read irq: */
reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
- if (!np)
- return 0; /* non-DT */
+ if (np) {
+ /* get the device tree parameters */
+ ret = of_property_read_u32(np, "video-ports", &video);
+ if (ret == 0) {
+ priv->vip_cntrl_0 = video >> 16;
+ priv->vip_cntrl_1 = video >> 8;
+ priv->vip_cntrl_2 = video;
+ }
+
+ ret = tda998x_get_audio_ports(priv, np);
+ if (ret)
+ goto fail;
- /* get the device tree parameters */
- ret = of_property_read_u32(np, "video-ports", &video);
- if (ret == 0) {
- priv->vip_cntrl_0 = video >> 16;
- priv->vip_cntrl_1 = video >> 8;
- priv->vip_cntrl_2 = video;
+ if (priv->audio_port[0].format != AFMT_UNUSED)
+ tda998x_audio_codec_init(priv, &client->dev);
+ } else if (dev->platform_data) {
+ tda998x_set_config(priv, dev->platform_data);
}
- ret = tda998x_get_audio_ports(priv, np);
- if (ret)
- goto fail;
+ priv->bridge.funcs = &tda998x_bridge_funcs;
+#ifdef CONFIG_OF
+ priv->bridge.of_node = dev->of_node;
+#endif
- if (priv->audio_port[0].format != AFMT_UNUSED)
- tda998x_audio_codec_init(priv, &client->dev);
+ drm_bridge_add(&priv->bridge);
return 0;
fail:
- /* if encoder_init fails, the encoder slave is never registered,
- * so cleanup here:
- */
- i2c_unregister_device(priv->cec);
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
- if (client->irq)
- free_irq(client->irq, priv);
+ tda998x_destroy(dev);
err_irq:
return ret;
}
-static void tda998x_encoder_prepare(struct drm_encoder *encoder)
-{
- tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void tda998x_encoder_commit(struct drm_encoder *encoder)
-{
- tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
- .dpms = tda998x_encoder_dpms,
- .prepare = tda998x_encoder_prepare,
- .commit = tda998x_encoder_commit,
- .mode_set = tda998x_encoder_mode_set,
-};
+/* DRM encoder functions */
static void tda998x_encoder_destroy(struct drm_encoder *encoder)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
-
- tda998x_destroy(priv);
drm_encoder_cleanup(encoder);
}
@@ -1821,40 +1857,12 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
.destroy = tda998x_encoder_destroy,
};
-static void tda998x_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
-{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-
- priv->audio_params = p->audio_params;
-}
-
-static int tda998x_bind(struct device *dev, struct device *master, void *data)
+static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
{
- struct tda998x_encoder_params *params = dev->platform_data;
- struct i2c_client *client = to_i2c_client(dev);
- struct drm_device *drm = data;
- struct tda998x_priv *priv;
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
u32 crtcs = 0;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- dev_set_drvdata(dev, priv);
-
if (dev->of_node)
crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -1866,40 +1874,36 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
priv->encoder.possible_crtcs = crtcs;
- ret = tda998x_create(client, priv);
- if (ret)
- return ret;
-
- if (!dev->of_node && params)
- tda998x_set_config(priv, params);
-
- drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
goto err_encoder;
- ret = tda998x_connector_init(priv, drm);
+ ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
if (ret)
- goto err_connector;
+ goto err_bridge;
return 0;
-err_connector:
+err_bridge:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
- tda998x_destroy(priv);
return ret;
}
+static int tda998x_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+
+ return tda998x_encoder_init(dev, drm);
+}
+
static void tda998x_unbind(struct device *dev, struct device *master,
void *data)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
- drm_connector_cleanup(&priv->connector);
drm_encoder_cleanup(&priv->encoder);
- tda998x_destroy(priv);
}
static const struct component_ops tda998x_ops = {
@@ -1910,16 +1914,27 @@ static const struct component_ops tda998x_ops = {
static int
tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ int ret;
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_warn(&client->dev, "adapter does not support I2C\n");
return -EIO;
}
- return component_add(&client->dev, &tda998x_ops);
+
+ ret = tda998x_create(&client->dev);
+ if (ret)
+ return ret;
+
+ ret = component_add(&client->dev, &tda998x_ops);
+ if (ret)
+ tda998x_destroy(&client->dev);
+ return ret;
}
static int tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
+ tda998x_destroy(&client->dev);
return 0;
}
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 576a417690d4..3b378936f575 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -934,7 +934,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
- if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+ if (vertex->idx < 0 || vertex->idx >= dma->buf_count)
return -EINVAL;
i810_dma_dispatch_vertex(dev,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index dfd95889f4b7..5c607f2c707b 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -23,6 +23,7 @@ config DRM_I915
select SYNC_FILE
select IOSF_MBI
select CRC32
+ select SND_HDA_I915 if SND_HDA_CORE
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 9de8b1c51a5c..459f8f88a34c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -51,6 +51,18 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_ERRLOG_GEM
+ bool "Insert extra logging (very verbose) for common GEM errors"
+ default n
+ depends on DRM_I915_DEBUG_GEM
+ help
+ Enable additional logging that may help track down the cause of
+ principally userspace errors.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_TRACE_GEM
bool "Insert extra ftrace output from the GEM internals"
depends on DRM_I915_DEBUG_GEM
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 4c6adae23e18..5794f102f9b8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -135,15 +135,14 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
+ icl_dsi.o \
intel_crt.o \
intel_ddi.o \
intel_dp_aux_backlight.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
- intel_dsi.o \
intel_dsi_dcs_backlight.o \
- intel_dsi_pll.o \
intel_dsi_vbt.o \
intel_dvo.o \
intel_hdmi.o \
@@ -152,7 +151,9 @@ i915-y += dvo_ch7017.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
- intel_tv.o
+ intel_tv.o \
+ vlv_dsi.o \
+ vlv_dsi_pll.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 80b3e16cf48c..caac9942e1e3 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,7 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t dummy;
+ u8 dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -186,7 +186,7 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
- uint8_t buf[2] = { addr, val };
+ u8 buf[2] = { addr, val };
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -258,11 +258,11 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
- uint8_t outputs_enable, lvds_control_2, lvds_power_down;
- uint8_t horizontal_active_pixel_input;
- uint8_t horizontal_active_pixel_output, vertical_active_line_output;
- uint8_t active_input_line_output;
+ u8 lvds_pll_feedback_div, lvds_pll_vco_control;
+ u8 outputs_enable, lvds_control_2, lvds_power_down;
+ u8 horizontal_active_pixel_input;
+ u8 horizontal_active_pixel_output, vertical_active_line_output;
+ u8 active_input_line_output;
DRM_DEBUG_KMS("Registers before mode setting\n");
ch7017_dump_regs(dvo);
@@ -333,7 +333,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
/* set the CH7017 power state */
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
- uint8_t val;
+ u8 val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
@@ -361,7 +361,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
@@ -373,7 +373,7 @@ static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
#define DUMP(reg) \
do { \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 7aeeffd2428b..397ac5233726 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -85,7 +85,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
static struct ch7xxx_id_struct {
- uint8_t vid;
+ u8 vid;
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
@@ -96,7 +96,7 @@ static struct ch7xxx_id_struct {
};
static struct ch7xxx_did_struct {
- uint8_t did;
+ u8 did;
char *name;
} ch7xxx_dids[] = {
{ CH7xxx_DID, "CH7XXX" },
@@ -107,7 +107,7 @@ struct ch7xxx_priv {
bool quiet;
};
-static char *ch7xxx_get_id(uint8_t vid)
+static char *ch7xxx_get_id(u8 vid)
{
int i;
@@ -119,7 +119,7 @@ static char *ch7xxx_get_id(uint8_t vid)
return NULL;
}
-static char *ch7xxx_get_did(uint8_t did)
+static char *ch7xxx_get_did(u8 did)
{
int i;
@@ -132,7 +132,7 @@ static char *ch7xxx_get_did(uint8_t did)
}
/** Reads an 8 bit register */
-static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -170,11 +170,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
}
/** Writes an 8 bit register */
-static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -201,7 +201,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
- uint8_t vendor, device;
+ u8 vendor, device;
char *name, *devid;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
@@ -244,7 +244,7 @@ out:
static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
{
- uint8_t cdet, orig_pm, pm;
+ u8 cdet, orig_pm, pm;
ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
@@ -276,7 +276,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- uint8_t tvco, tpcp, tpd, tlpf, idf;
+ u8 tvco, tpcp, tpd, tlpf, idf;
if (mode->clock <= 65000) {
tvco = 0x23;
@@ -336,7 +336,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
- uint8_t val;
+ u8 val;
if ((i % 8) == 0)
DRM_DEBUG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index c73aff163908..24278cc49090 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -161,7 +161,7 @@
* instead. The following list contains all registers that
* require saving.
*/
-static const uint16_t backup_addresses[] = {
+static const u16 backup_addresses[] = {
0x11, 0x12,
0x18, 0x19, 0x1a, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
@@ -174,11 +174,11 @@ static const uint16_t backup_addresses[] = {
struct ivch_priv {
bool quiet;
- uint16_t width, height;
+ u16 width, height;
/* Register backup */
- uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
+ u16 reg_backup[ARRAY_SIZE(backup_addresses)];
};
@@ -188,7 +188,7 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
*
* Each of the 256 registers are 16 bits long.
*/
-static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -231,7 +231,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
}
/* Writes a 16-bit register on the ivch */
-static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -263,7 +263,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
struct ivch_priv *priv;
- uint16_t temp;
+ u16 temp;
int i;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
@@ -342,7 +342,7 @@ static void ivch_reset(struct intel_dvo_device *dvo)
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
- uint16_t vr01, vr30, backlight;
+ u16 vr01, vr30, backlight;
ivch_reset(dvo);
@@ -379,7 +379,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
- uint16_t vr01;
+ u16 vr01;
ivch_reset(dvo);
@@ -398,9 +398,9 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *adjusted_mode)
{
struct ivch_priv *priv = dvo->dev_priv;
- uint16_t vr40 = 0;
- uint16_t vr01 = 0;
- uint16_t vr10;
+ u16 vr40 = 0;
+ u16 vr01 = 0;
+ u16 vr10;
ivch_reset(dvo);
@@ -416,7 +416,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
mode->vdisplay != adjusted_mode->crtc_vdisplay) {
- uint16_t x_ratio, y_ratio;
+ u16 x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
vr40 |= VR40_CLOCK_GATING_ENABLE;
@@ -438,7 +438,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
static void ivch_dump_regs(struct intel_dvo_device *dvo)
{
- uint16_t val;
+ u16 val;
ivch_read(dvo, VR00, &val);
DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 2379c33cfe51..c584e01dc8dc 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -191,8 +191,8 @@ enum {
};
struct ns2501_reg {
- uint8_t offset;
- uint8_t value;
+ u8 offset;
+ u8 value;
};
/*
@@ -202,23 +202,23 @@ struct ns2501_reg {
* read all this with a grain of salt.
*/
struct ns2501_configuration {
- uint8_t sync; /* configuration of the C0 register */
- uint8_t conf; /* configuration register 8 */
- uint8_t syncb; /* configuration register 41 */
- uint8_t dither; /* configuration of the dithering */
- uint8_t pll_a; /* PLL configuration, register A, 1B */
- uint16_t pll_b; /* PLL configuration, register B, 1C/1D */
- uint16_t hstart; /* horizontal start, registers C1/C2 */
- uint16_t hstop; /* horizontal total, registers C3/C4 */
- uint16_t vstart; /* vertical start, registers C5/C6 */
- uint16_t vstop; /* vertical total, registers C7/C8 */
- uint16_t vsync; /* manual vertical sync start, 80/81 */
- uint16_t vtotal; /* number of lines generated, 82/83 */
- uint16_t hpos; /* horizontal position + 256, 98/99 */
- uint16_t vpos; /* vertical position, 8e/8f */
- uint16_t voffs; /* vertical output offset, 9c/9d */
- uint16_t hscale; /* horizontal scaling factor, b8/b9 */
- uint16_t vscale; /* vertical scaling factor, 10/11 */
+ u8 sync; /* configuration of the C0 register */
+ u8 conf; /* configuration register 8 */
+ u8 syncb; /* configuration register 41 */
+ u8 dither; /* configuration of the dithering */
+ u8 pll_a; /* PLL configuration, register A, 1B */
+ u16 pll_b; /* PLL configuration, register B, 1C/1D */
+ u16 hstart; /* horizontal start, registers C1/C2 */
+ u16 hstop; /* horizontal total, registers C3/C4 */
+ u16 vstart; /* vertical start, registers C5/C6 */
+ u16 vstop; /* vertical total, registers C7/C8 */
+ u16 vsync; /* manual vertical sync start, 80/81 */
+ u16 vtotal; /* number of lines generated, 82/83 */
+ u16 hpos; /* horizontal position + 256, 98/99 */
+ u16 vpos; /* vertical position, 8e/8f */
+ u16 voffs; /* vertical output offset, 9c/9d */
+ u16 hscale; /* horizontal scaling factor, b8/b9 */
+ u16 vscale; /* vertical scaling factor, 10/11 */
};
/*
@@ -389,7 +389,7 @@ struct ns2501_priv {
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
-static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -434,11 +434,11 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
-static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 1c1a0674dbab..4ae5d8fd9ff0 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -65,7 +65,7 @@ struct sil164_priv {
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
-static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -102,11 +102,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
return false;
}
-static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -173,7 +173,7 @@ out:
static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
{
- uint8_t reg9;
+ u8 reg9;
sil164_readb(dvo, SIL164_REG9, &reg9);
@@ -243,7 +243,7 @@ static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 31e181da93db..d603bc2f2506 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -90,7 +90,7 @@ struct tfp410_priv {
bool quiet;
};
-static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -127,11 +127,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
return false;
}
-static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -155,7 +155,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
{
- uint8_t ch1, ch2;
+ u8 ch1, ch2;
if (tfp410_readb(dvo, addr+0, &ch1) &&
tfp410_readb(dvo, addr+1, &ch2))
@@ -203,7 +203,7 @@ out:
static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
{
enum drm_connector_status ret = connector_status_disconnected;
- uint8_t ctl2;
+ u8 ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
if (ctl2 & TFP410_CTL_2_RSEN)
@@ -236,7 +236,7 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
/* set the tfp410 power state */
static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
- uint8_t ctl1;
+ u8 ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
@@ -251,7 +251,7 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
{
- uint8_t ctl1;
+ u8 ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return false;
@@ -264,7 +264,7 @@ static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val, val2;
+ u8 val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 7c9ec4f4f36c..380eeb2a0e83 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+ ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7f562410f9cf..45e89b1e0481 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -172,6 +172,7 @@ struct decode_info {
#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
+#define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5)
#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
@@ -1279,7 +1280,9 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1307,7 +1310,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1331,7 +1336,9 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv))
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1340,26 +1347,14 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
static int check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv)
- || IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv))
- return gen8_check_mi_display_flip(s, info);
- return -ENODEV;
+ return gen8_check_mi_display_flip(s, info);
}
static int update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv)
- || IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv))
- return gen8_update_plane_mmio_from_mi_display_flip(s, info);
- return -ENODEV;
+ return gen8_update_plane_mmio_from_mi_display_flip(s, info);
}
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
@@ -1638,15 +1633,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
*/
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
- struct intel_gvt *gvt = s->vgpu->gvt;
-
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- /* BDW decides privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
+ /* Decide privilege based on address space */
+ if (cmd_val(s, 0) & (1 << 8) &&
!(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
- return 0;
- }
+ return 0;
return 1;
}
@@ -2372,6 +2362,9 @@ static struct cmd_info cmd_info[] = {
{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
0, 16, NULL},
+ {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 4b072ade8c38..3019dbc39aef 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -171,6 +171,29 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
+ if (IS_BROXTON(dev_priv)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
+ BXT_DE_PORT_HP_DDIB |
+ BXT_DE_PORT_HP_DDIC);
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIA;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIB;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIC;
+ }
+
+ return;
+ }
+
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
@@ -273,8 +296,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
for_each_pipe(dev_priv, pipe) {
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
@@ -337,26 +360,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
int pipe, id;
+ int found = false;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- if (pipe_is_enabled(vgpu, pipe))
- goto out;
+ if (pipe_is_enabled(vgpu, pipe)) {
+ found = true;
+ break;
+ }
}
+ if (found)
+ break;
}
/* all the pipes are disabled */
- hrtimer_cancel(&irq->vblank_timer.timer);
- return;
-
-out:
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
-
+ if (!found)
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ else
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+ mutex_unlock(&gvt->lock);
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
@@ -393,8 +418,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
{
int pipe;
+ mutex_lock(&vgpu->vgpu_lock);
for_each_pipe(vgpu->gvt->dev_priv, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -409,11 +436,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
struct intel_vgpu *vgpu;
int id;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id)
emulate_vblank(vgpu);
+ mutex_unlock(&gvt->lock);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6f4f8e941fc2..6e3f56684f4e 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -164,7 +164,9 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
@@ -192,6 +194,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
return obj;
}
+static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
+{
+ if (c && c->x_hot <= c->width && c->y_hot <= c->height)
+ return true;
+ else
+ return false;
+}
+
static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu *vgpu,
struct intel_vgpu_fb_info *info,
@@ -229,12 +239,14 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->x_pos = c.x_pos;
info->y_pos = c.y_pos;
- /* The invalid cursor hotspot value is delivered to host
- * until we find a way to get the cursor hotspot info of
- * guest OS.
- */
- info->x_hot = UINT_MAX;
- info->y_hot = UINT_MAX;
+ if (validate_hotspot(&c)) {
+ info->x_hot = c.x_hot;
+ info->y_hot = c.y_hot;
+ } else {
+ info->x_hot = UINT_MAX;
+ info->y_hot = UINT_MAX;
+ }
+
info->size = (((info->stride * c.height * c.bpp) / 8)
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index f61337632969..4b98539025c5 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -77,6 +77,20 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
return chr;
}
+static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == 1)
+ port = PORT_B;
+ else if (port_select == 2)
+ port = PORT_C;
+ else if (port_select == 3)
+ port = PORT_D;
+ return port;
+}
+
static inline int get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
@@ -105,6 +119,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -116,7 +131,10 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0)
return 0;
- port = get_port_from_gmbus0(pin_select);
+ if (IS_BROXTON(dev_priv))
+ port = bxt_get_port_from_gmbus0(pin_select);
+ else
+ port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0))
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 427e40e64d41..714d709829a2 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -146,14 +146,11 @@ struct execlist_ring_context {
u32 nop4;
u32 lri_cmd_2;
struct execlist_mmio_pair ctx_timestamp;
- struct execlist_mmio_pair pdp3_UDW;
- struct execlist_mmio_pair pdp3_LDW;
- struct execlist_mmio_pair pdp2_UDW;
- struct execlist_mmio_pair pdp2_LDW;
- struct execlist_mmio_pair pdp1_UDW;
- struct execlist_mmio_pair pdp1_LDW;
- struct execlist_mmio_pair pdp0_UDW;
- struct execlist_mmio_pair pdp0_LDW;
+ /*
+ * pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW,
+ * pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW}
+ */
+ struct execlist_mmio_pair pdps[8];
};
struct intel_vgpu_elsp_dwords {
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 1c120683e958..face664be3e8 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -36,6 +36,7 @@
#include <uapi/drm/drm_fourcc.h>
#include "i915_drv.h"
#include "gvt.h"
+#include "i915_pvinfo.h"
#define PRIMARY_FORMAT_NUM 16
struct pixel_format {
@@ -150,7 +151,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
@@ -214,7 +217,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled)
return -ENODEV;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
_PLANE_CTL_TILED_SHIFT;
fmt = skl_format_to_drm(
@@ -256,7 +261,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
- (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+ (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) ?
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
@@ -300,16 +307,16 @@ static int cursor_mode_to_drm(int mode)
int cursor_pixel_formats_index = 4;
switch (mode) {
- case CURSOR_MODE_128_ARGB_AX:
+ case MCURSOR_MODE_128_ARGB_AX:
cursor_pixel_formats_index = 0;
break;
- case CURSOR_MODE_256_ARGB_AX:
+ case MCURSOR_MODE_256_ARGB_AX:
cursor_pixel_formats_index = 1;
break;
- case CURSOR_MODE_64_ARGB_AX:
+ case MCURSOR_MODE_64_ARGB_AX:
cursor_pixel_formats_index = 2;
break;
- case CURSOR_MODE_64_32B_AX:
+ case MCURSOR_MODE_64_32B_AX:
cursor_pixel_formats_index = 3;
break;
@@ -342,8 +349,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -ENODEV;
val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
- mode = val & CURSOR_MODE;
- plane->enabled = (mode != CURSOR_MODE_DISABLE);
+ mode = val & MCURSOR_MODE;
+ plane->enabled = (mode != MCURSOR_MODE_DISABLE);
if (!plane->enabled)
return -ENODEV;
@@ -384,6 +391,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
+ plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot));
+ plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a73e1d418c22..4ac18b447247 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt,
h = (struct gvt_firmware_header *)fw->data;
- crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ crc32_start = offsetofend(struct gvt_firmware_header, crc32);
mem = fw->data + crc32_start;
#define VERIFY(s, a, b) do { \
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4efec8fa6c1d..00aad8164dec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -216,16 +216,22 @@ static struct gtt_type_table_entry gtt_type_table[] = {
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ /* We take IPS bit as 'PSE' for PTE level. */
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
- GTT_TYPE_INVALID),
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
- GTT_TYPE_INVALID),
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
@@ -339,8 +345,14 @@ static inline int gtt_set_entry64(void *pt,
#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
+#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
+#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
+#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
+
+#define GTT_64K_PTE_STRIDE 16
+
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
unsigned long pfn;
@@ -349,6 +361,8 @@ static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
+ else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
+ pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
else
pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
return pfn;
@@ -362,6 +376,9 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
e->val64 &= ~ADDR_2M_MASK;
pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
+ } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
+ e->val64 &= ~ADDR_64K_MASK;
+ pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
} else {
e->val64 &= ~ADDR_4K_MASK;
pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
@@ -372,16 +389,41 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
{
- /* Entry doesn't have PSE bit. */
- if (get_pse_type(e->type) == GTT_TYPE_INVALID)
- return false;
+ return !!(e->val64 & _PAGE_PSE);
+}
- e->type = get_entry_type(e->type);
- if (!(e->val64 & _PAGE_PSE))
+static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
+{
+ if (gen8_gtt_test_pse(e)) {
+ switch (e->type) {
+ case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ e->val64 &= ~_PAGE_PSE;
+ e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
+ break;
+ case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+ e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
+ e->val64 &= ~_PAGE_PSE;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
+}
+
+static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
+{
+ if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
return false;
- e->type = get_pse_type(e->type);
- return true;
+ return !!(e->val64 & GEN8_PDE_IPS_64K);
+}
+
+static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
+{
+ if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
+ return;
+
+ e->val64 &= ~GEN8_PDE_IPS_64K;
}
static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
@@ -408,6 +450,21 @@ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
e->val64 |= _PAGE_PRESENT;
}
+static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
+}
+
+static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
+}
+
+static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
+}
+
/*
* Per-platform GMA routines.
*/
@@ -440,6 +497,12 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
.set_present = gtt_entry_set_present,
.test_present = gen8_gtt_test_present,
.test_pse = gen8_gtt_test_pse,
+ .clear_pse = gen8_gtt_clear_pse,
+ .clear_ips = gen8_gtt_clear_ips,
+ .test_ips = gen8_gtt_test_ips,
+ .clear_64k_splited = gen8_gtt_clear_64k_splited,
+ .set_64k_splited = gen8_gtt_set_64k_splited,
+ .test_64k_splited = gen8_gtt_test_64k_splited,
.get_pfn = gen8_gtt_get_pfn,
.set_pfn = gen8_gtt_set_pfn,
};
@@ -453,6 +516,27 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
.gma_to_pml4_index = gen8_gma_to_pml4_index,
};
+/* Update entry type per pse and ips bit. */
+static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
+ struct intel_gvt_gtt_entry *entry, bool ips)
+{
+ switch (entry->type) {
+ case GTT_TYPE_PPGTT_PDE_ENTRY:
+ case GTT_TYPE_PPGTT_PDP_ENTRY:
+ if (pte_ops->test_pse(entry))
+ entry->type = get_pse_type(entry->type);
+ break;
+ case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+ if (ips)
+ entry->type = get_pse_type(entry->type);
+ break;
+ default:
+ GEM_BUG_ON(!gtt_type_is_entry(entry->type));
+ }
+
+ GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
+}
+
/*
* MM helpers.
*/
@@ -468,8 +552,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
mm->ppgtt_mm.shadow_pdps,
entry, index, false, 0, mm->vgpu);
-
- pte_ops->test_pse(entry);
+ update_entry_type_for_real(pte_ops, entry, false);
}
static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
@@ -574,7 +657,8 @@ static inline int ppgtt_spt_get_entry(
if (ret)
return ret;
- ops->test_pse(e);
+ update_entry_type_for_real(ops, e, guest ?
+ spt->guest_page.pde_ips : false);
gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
type, e->type, index, e->val64);
@@ -653,10 +737,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
- if (spt->guest_page.oos_page)
- detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
+ if (spt->guest_page.gfn) {
+ if (spt->guest_page.oos_page)
+ detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
- intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+ intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+ }
list_del_init(&spt->post_shadow_list);
free_spt(spt);
@@ -717,8 +803,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
+/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
- struct intel_vgpu *vgpu, int type, unsigned long gfn)
+ struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -753,26 +840,12 @@ retry:
spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
- /*
- * Init guest_page.
- */
- spt->guest_page.type = type;
- spt->guest_page.gfn = gfn;
-
- ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
- ppgtt_write_protection_handler, spt);
- if (ret)
- goto err_unmap_dma;
-
ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
if (ret)
- goto err_unreg_page_track;
+ goto err_unmap_dma;
- trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
return spt;
-err_unreg_page_track:
- intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
err_unmap_dma:
dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
err_free_spt:
@@ -780,6 +853,37 @@ err_free_spt:
return ERR_PTR(ret);
}
+/* Allocate shadow page table associated with specific gfn. */
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
+ struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+ unsigned long gfn, bool guest_pde_ips)
+{
+ struct intel_vgpu_ppgtt_spt *spt;
+ int ret;
+
+ spt = ppgtt_alloc_spt(vgpu, type);
+ if (IS_ERR(spt))
+ return spt;
+
+ /*
+ * Init guest_page.
+ */
+ ret = intel_vgpu_register_page_track(vgpu, gfn,
+ ppgtt_write_protection_handler, spt);
+ if (ret) {
+ ppgtt_free_spt(spt);
+ return ERR_PTR(ret);
+ }
+
+ spt->guest_page.type = type;
+ spt->guest_page.gfn = gfn;
+ spt->guest_page.pde_ips = guest_pde_ips;
+
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+
+ return spt;
+}
+
#define pt_entry_size_shift(spt) \
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
@@ -787,24 +891,38 @@ err_free_spt:
(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \
- for (i = 0; i < pt_entries(spt); i++) \
+ for (i = 0; i < pt_entries(spt); \
+ i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
if (!ppgtt_get_guest_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
#define for_each_present_shadow_entry(spt, e, i) \
- for (i = 0; i < pt_entries(spt); i++) \
+ for (i = 0; i < pt_entries(spt); \
+ i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
if (!ppgtt_get_shadow_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
-static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
+#define for_each_shadow_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); \
+ i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
+ if (!ppgtt_get_shadow_entry(spt, e, i))
+
+static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
{
int v = atomic_read(&spt->refcount);
trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
-
atomic_inc(&spt->refcount);
}
+static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+ return atomic_dec_return(&spt->refcount);
+}
+
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
@@ -843,7 +961,8 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
pfn = ops->get_pfn(entry);
type = spt->shadow_page.type;
- if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
+ /* Uninitialized spte or unshadowed spte. */
+ if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return;
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
@@ -855,14 +974,11 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
struct intel_gvt_gtt_entry e;
unsigned long index;
int ret;
- int v = atomic_read(&spt->refcount);
trace_spt_change(spt->vgpu->id, "die", spt,
spt->guest_page.gfn, spt->shadow_page.type);
- trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
-
- if (atomic_dec_return(&spt->refcount) > 0)
+ if (ppgtt_put_spt(spt) > 0)
return 0;
for_each_present_shadow_entry(spt, &e, index) {
@@ -871,9 +987,15 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(spt, &e);
break;
+ case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+ /* We don't setup 64K shadow entry so far. */
+ WARN(1, "suspicious 64K gtt entry\n");
+ continue;
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ gvt_vdbg_mm("invalidate 2M entry\n");
+ continue;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
- WARN(1, "GVT doesn't support 2M/1GB page\n");
+ WARN(1, "GVT doesn't support 1GB page\n");
continue;
case GTT_TYPE_PPGTT_PML4_ENTRY:
case GTT_TYPE_PPGTT_PDP_ENTRY:
@@ -899,6 +1021,22 @@ fail:
return ret;
}
+static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
+ u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
+ GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+ return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ /* 64K paging only controlled by IPS bit in PTE now. */
+ return true;
+ } else
+ return false;
+}
+
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
@@ -906,35 +1044,54 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *spt = NULL;
+ bool ips = false;
int ret;
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
+ if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+ ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
+
spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
- if (spt)
+ if (spt) {
ppgtt_get_spt(spt);
- else {
+
+ if (ips != spt->guest_page.pde_ips) {
+ spt->guest_page.pde_ips = ips;
+
+ gvt_dbg_mm("reshadow PDE since ips changed\n");
+ clear_page(spt->shadow_page.vaddr);
+ ret = ppgtt_populate_spt(spt);
+ if (ret) {
+ ppgtt_put_spt(spt);
+ goto err;
+ }
+ }
+ } else {
int type = get_next_pt_type(we->type);
- spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
+ spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
- goto fail;
+ goto err;
}
ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
if (ret)
- goto fail;
+ goto err_free_spt;
ret = ppgtt_populate_spt(spt);
if (ret)
- goto fail;
+ goto err_free_spt;
trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
spt->shadow_page.type);
}
return spt;
-fail:
+
+err_free_spt:
+ ppgtt_free_spt(spt);
+err:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type);
return ERR_PTR(ret);
@@ -948,16 +1105,118 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
se->type = ge->type;
se->val64 = ge->val64;
+ /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
+ if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+ ops->clear_ips(se);
+
ops->set_pfn(se, s->shadow_page.mfn);
}
+/**
+ * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
+ * negtive if found err.
+ */
+static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *entry)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long pfn;
+
+ if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+ return 0;
+
+ pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
+ if (pfn == INTEL_GVT_INVALID_ADDR)
+ return -EINVAL;
+
+ return PageTransHuge(pfn_to_page(pfn));
+}
+
+static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *se)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *sub_spt;
+ struct intel_gvt_gtt_entry sub_se;
+ unsigned long start_gfn;
+ dma_addr_t dma_addr;
+ unsigned long sub_index;
+ int ret;
+
+ gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
+
+ start_gfn = ops->get_pfn(se);
+
+ sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
+ if (IS_ERR(sub_spt))
+ return PTR_ERR(sub_spt);
+
+ for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+ start_gfn + sub_index, PAGE_SIZE, &dma_addr);
+ if (ret) {
+ ppgtt_invalidate_spt(spt);
+ return ret;
+ }
+ sub_se.val64 = se->val64;
+
+ /* Copy the PAT field from PDE. */
+ sub_se.val64 &= ~_PAGE_PAT;
+ sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
+
+ ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
+ }
+
+ /* Clear dirty field. */
+ se->val64 &= ~_PAGE_DIRTY;
+
+ ops->clear_pse(se);
+ ops->clear_ips(se);
+ ops->set_pfn(se, sub_spt->shadow_page.mfn);
+ ppgtt_set_shadow_entry(spt, se, index);
+ return 0;
+}
+
+static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *se)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry entry = *se;
+ unsigned long start_gfn;
+ dma_addr_t dma_addr;
+ int i, ret;
+
+ gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
+
+ GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
+
+ start_gfn = ops->get_pfn(se);
+
+ entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
+ ops->set_64k_splited(&entry);
+
+ for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+ start_gfn + i, PAGE_SIZE, &dma_addr);
+ if (ret)
+ return ret;
+
+ ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(spt, &entry, index + i);
+ }
+ return 0;
+}
+
static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
struct intel_gvt_gtt_entry *ge)
{
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry se = *ge;
- unsigned long gfn;
+ unsigned long gfn, page_size = PAGE_SIZE;
dma_addr_t dma_addr;
int ret;
@@ -970,16 +1229,33 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
gvt_vdbg_mm("shadow 4K gtt entry\n");
break;
+ case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+ gvt_vdbg_mm("shadow 64K gtt entry\n");
+ /*
+ * The layout of 64K page is special, the page size is
+ * controlled by uper PDE. To be simple, we always split
+ * 64K page to smaller 4K pages in shadow PT.
+ */
+ return split_64KB_gtt_entry(vgpu, spt, index, &se);
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ gvt_vdbg_mm("shadow 2M gtt entry\n");
+ ret = is_2MB_gtt_possible(vgpu, ge);
+ if (ret == 0)
+ return split_2MB_gtt_entry(vgpu, spt, index, &se);
+ else if (ret < 0)
+ return ret;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+ break;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
- gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
+ gvt_vgpu_err("GVT doesn't support 1GB entry\n");
return -EINVAL;
default:
GEM_BUG_ON(1);
};
/* direct shadow */
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
+ &dma_addr);
if (ret)
return -ENXIO;
@@ -1062,8 +1338,12 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
ret = ppgtt_invalidate_spt(s);
if (ret)
goto fail;
- } else
+ } else {
+ /* We don't setup 64K shadow entry so far. */
+ WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+ "suspicious 64K entry\n");
ppgtt_invalidate_pte(spt, se);
+ }
return 0;
fail:
@@ -1286,7 +1566,7 @@ static int ppgtt_handle_guest_write_page_table(
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry old_se;
int new_present;
- int ret;
+ int i, ret;
new_present = ops->test_present(we);
@@ -1308,8 +1588,27 @@ static int ppgtt_handle_guest_write_page_table(
goto fail;
if (!new_present) {
- ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
- ppgtt_set_shadow_entry(spt, &old_se, index);
+ /* For 64KB splited entries, we need clear them all. */
+ if (ops->test_64k_splited(&old_se) &&
+ !(index % GTT_64K_PTE_STRIDE)) {
+ gvt_vdbg_mm("remove splited 64K shadow entries\n");
+ for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+ ops->clear_64k_splited(&old_se);
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index + i);
+ }
+ } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
+ old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+ ops->clear_pse(&old_se);
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
+ } else {
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
+ }
}
return 0;
@@ -1391,7 +1690,17 @@ static int ppgtt_handle_guest_write_page_table_bytes(
ppgtt_get_guest_entry(spt, &we, index);
- ops->test_pse(&we);
+ /*
+ * For page table which has 64K gtt entry, only PTE#0, PTE#16,
+ * PTE#32, ... PTE#496 are used. Unused PTEs update should be
+ * ignored.
+ */
+ if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
+ (index % GTT_64K_PTE_STRIDE)) {
+ gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
+ index);
+ return 0;
+ }
if (bytes == info->gtt_entry_size) {
ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
@@ -1939,7 +2248,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
}
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
- &dma_addr);
+ PAGE_SIZE, &dma_addr);
if (ret) {
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
@@ -2031,7 +2340,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
*/
- if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+ if (type > GTT_TYPE_PPGTT_PTE_PT) {
struct intel_gvt_gtt_entry se;
memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
@@ -2315,13 +2624,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
gvt_dbg_core("init gtt\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
- gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
- } else {
- return -ENODEV;
- }
+ gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+ gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
page = (void *)get_zeroed_page(GFP_KERNEL);
if (!page) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 97e62647418a..7a9b36176efb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -63,6 +63,12 @@ struct intel_gvt_gtt_pte_ops {
void (*clear_present)(struct intel_gvt_gtt_entry *e);
void (*set_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+ void (*clear_pse)(struct intel_gvt_gtt_entry *e);
+ bool (*test_ips)(struct intel_gvt_gtt_entry *e);
+ void (*clear_ips)(struct intel_gvt_gtt_entry *e);
+ bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
+ void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
+ void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
};
@@ -95,6 +101,7 @@ typedef enum {
GTT_TYPE_GGTT_PTE,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY,
GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PTE_1G_ENTRY,
@@ -222,6 +229,7 @@ struct intel_vgpu_ppgtt_spt {
struct {
intel_gvt_gtt_type_t type;
+ bool pde_ips; /* for 64KB PTEs */
void *vaddr;
struct page *page;
unsigned long mfn;
@@ -229,6 +237,7 @@ struct intel_vgpu_ppgtt_spt {
struct {
intel_gvt_gtt_type_t type;
+ bool pde_ips; /* for 64KB PTEs */
unsigned long gfn;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 61bd14fcb649..712f9d14e720 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -238,18 +238,15 @@ static void init_device_info(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- info->max_support_vgpus = 8;
- info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
- info->mmio_size = 2 * 1024 * 1024;
- info->mmio_bar = 0;
- info->gtt_start_offset = 8 * 1024 * 1024;
- info->gtt_entry_size = 8;
- info->gtt_entry_size_shift = 3;
- info->gmadr_bytes_in_cmd = 8;
- info->max_surface_size = 36 * 1024 * 1024;
- }
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
info->msi_cap_offset = pdev->msi_cap;
}
@@ -271,11 +268,8 @@ static int gvt_service_thread(void *data)
continue;
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
- (void *)&gvt->service_request)) {
- mutex_lock(&gvt->lock);
+ (void *)&gvt->service_request))
intel_gvt_emulate_vblank(gvt);
- mutex_unlock(&gvt->lock);
- }
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
@@ -379,6 +373,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
idr_init(&gvt->vgpu_idr);
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
+ mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv;
init_device_info(gvt);
@@ -473,3 +468,7 @@ out_clean_idr:
kfree(gvt);
return ret;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+MODULE_SOFTDEP("pre: kvmgt");
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 858967daf04b..9a9671522774 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -170,12 +170,18 @@ struct intel_vgpu_submission {
struct intel_vgpu {
struct intel_gvt *gvt;
+ struct mutex vgpu_lock;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
+
+ /* Both sched_data and sched_ctl can be seen a part of the global gvt
+ * scheduler structure. So below 2 vgpu data are protected
+ * by sched_lock, not vgpu_lock.
+ */
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -296,7 +302,13 @@ struct intel_vgpu_type {
};
struct intel_gvt {
+ /* GVT scope lock, protect GVT itself, and all resource currently
+ * not yet protected by special locks(vgpu and scheduler lock).
+ */
struct mutex lock;
+ /* scheduler scope lock, protect gvt and vgpu schedule related data */
+ struct mutex sched_lock;
+
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
@@ -316,6 +328,10 @@ struct intel_gvt {
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
+
+ /* service_request is always used in bit operation, we should always
+ * use it with atomic bit ops so that no need to use gvt big lock.
+ */
unsigned long service_request;
struct {
@@ -363,9 +379,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
#define gvt_ggtt_sz(gvt) \
- ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+ ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8f1caacdc78a..7a58ca555197 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_SKL;
else if (IS_KABYLAKE(gvt->dev_priv))
return D_KBL;
+ else if (IS_BROXTON(gvt->dev_priv))
+ return D_BXT;
return 0;
}
@@ -208,6 +210,31 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
return 0;
}
+static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+ if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+ if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
+ gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
+ else if (!ips)
+ gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
+ else {
+ /* All engines must be enabled together for vGPU,
+ * since we don't know which engine the ppgtt will
+ * bind to when shadowing.
+ */
+ gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
+ ips);
+ return -EINVAL;
+ }
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
@@ -255,7 +282,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_BROXTON(vgpu->gvt->dev_priv)) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -316,6 +344,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
}
+ /* vgpu_lock already hold by emulate mmio r/w */
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
/* sw will wait for the device to ack the reset request */
@@ -420,7 +449,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
else
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+ /* vgpu_lock already hold by emulate mmio r/w */
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_check_vblank_emulation(vgpu->gvt);
+ mutex_lock(&vgpu->vgpu_lock);
return 0;
}
@@ -857,7 +889,8 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
data = vgpu_vreg(vgpu, offset);
if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv))
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_BROXTON(vgpu->gvt->dev_priv))
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
@@ -1209,8 +1242,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
ret = handle_g2v_notification(vgpu, data);
break;
/* add xhot and yhot to handled list to avoid error log */
- case 0x78830:
- case 0x78834:
+ case _vgtif_reg(cursor_x_hot):
+ case _vgtif_reg(cursor_y_hot):
case _vgtif_reg(pdp[0].lo):
case _vgtif_reg(pdp[0].hi):
case _vgtif_reg(pdp[1].lo):
@@ -1369,6 +1402,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
*data0 = 0x1e1a1100;
else
*data0 = 0x61514b3d;
+ } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from Broxton MRB.
+ */
+ if (!*data0)
+ *data0 = 0x16080707;
+ else
+ *data0 = 0x16161616;
}
break;
case SKL_PCODE_CDCLK_CONTROL:
@@ -1426,8 +1469,11 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
{
u32 v = *(u32 *)p_data;
- v &= (1 << 31) | (1 << 29) | (1 << 9) |
- (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+ if (IS_BROXTON(vgpu->gvt->dev_priv))
+ v &= (1 << 31) | (1 << 29);
+ else
+ v &= (1 << 31) | (1 << 29) | (1 << 9) |
+ (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
v |= (v >> 1);
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
@@ -1447,6 +1493,109 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
+static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & BXT_DE_PLL_PLL_ENABLE)
+ v |= BXT_DE_PLL_LOCK;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & PORT_PLL_ENABLE)
+ v |= PORT_PLL_LOCK;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+ u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
+
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = vgpu_vreg(vgpu, offset);
+
+ v &= ~UNIQUE_TRANGE_EN_METHOD;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
+ vgpu_vreg(vgpu, offset - 0x600) = v;
+ vgpu_vreg(vgpu, offset - 0x800) = v;
+ } else {
+ vgpu_vreg(vgpu, offset - 0x400) = v;
+ vgpu_vreg(vgpu, offset - 0x600) = v;
+ }
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & BIT(0)) {
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+ ~PHY_RESERVED;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
+ PHY_POWER_GOOD;
+ }
+
+ if (v & BIT(1)) {
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+ ~PHY_RESERVED;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
+ PHY_POWER_GOOD;
+ }
+
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ vgpu_vreg(vgpu, offset) = 0;
+ return 0;
+}
+
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -1657,7 +1806,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
+ gamw_echo_dev_rw_ia_write);
+
MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
@@ -2670,17 +2821,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
- MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
+ MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
+ MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2805,53 +2956,57 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
- MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ NULL, NULL);
+ MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
/* TRTT */
- MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+ NULL, gen9_trtte_write);
+ MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
- MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
- MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
@@ -2869,11 +3024,188 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_D(_MMIO(0x4ab8), D_KBL);
- MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
+ MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
+
+ return 0;
+}
+
+static int init_bxt_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
+
+ MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
+ MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
+ MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
+ MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
+ MMIO_D(ERROR_GEN6, D_BXT);
+ MMIO_D(DONE_REG, D_BXT);
+ MMIO_D(EIR, D_BXT);
+ MMIO_D(PGTBL_ER, D_BXT);
+ MMIO_D(_MMIO(0x4194), D_BXT);
+ MMIO_D(_MMIO(0x4294), D_BXT);
+ MMIO_D(_MMIO(0x4494), D_BXT);
+
+ MMIO_RING_D(RING_PSMI_CTL, D_BXT);
+ MMIO_RING_D(RING_DMA_FADD, D_BXT);
+ MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
+ MMIO_RING_D(RING_IPEHR, D_BXT);
+ MMIO_RING_D(RING_INSTPS, D_BXT);
+ MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
+ MMIO_RING_D(RING_BBSTATE, D_BXT);
+ MMIO_RING_D(RING_IPEIR, D_BXT);
+
+ MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
+
+ MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
+ MMIO_D(BXT_RP_STATE_CAP, D_BXT);
+ MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
+ NULL, bxt_phy_ctl_family_write);
+ MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
+ NULL, bxt_phy_ctl_family_write);
+ MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
+ MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
+ MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
+ NULL, bxt_port_pll_enable_write);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
+ NULL, bxt_port_pll_enable_write);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
+ bxt_port_pll_enable_write);
+
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
+
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
+
+ MMIO_D(BXT_DE_PLL_CTL, D_BXT);
+ MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
+ MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
+ MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
+
+ MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
+
+ MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+ MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+
+ MMIO_D(RC6_CTX_BASE, D_BXT);
+
+ MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
+ MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
+ MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
+ MMIO_D(GEN6_GFXPAUSE, D_BXT);
+ MMIO_D(GEN8_L3SQCREG1, D_BXT);
+
+ MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2965,6 +3297,16 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
+ } else if (IS_BROXTON(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_skl_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_bxt_mmio_info(gvt);
+ if (ret)
+ goto err;
}
gvt->mmio.mmio_block = mmio_blocks;
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index f6dd9f717888..5af11cf1b482 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -53,7 +53,7 @@ struct intel_gvt_mpt {
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
- dma_addr_t *dma_addr);
+ unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 7a041b368f68..5daa23ae566b 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -350,7 +350,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
clear_bits |= (1 << bit);
}
- WARN_ON(!up_irq_info);
+ if (WARN_ON(!up_irq_info))
+ return;
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
@@ -580,7 +581,9 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
+ } else if (IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)
+ || IS_BROXTON(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -690,14 +693,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
gvt_dbg_core("init irq framework\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- irq->ops = &gen8_irq_ops;
- irq->irq_map = gen8_irq_map;
- } else {
- WARN_ON(1);
- return -ENODEV;
- }
+ irq->ops = &gen8_irq_ops;
+ irq->irq_map = gen8_irq_map;
/* common event initialization */
init_events(irq);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index df4e4a07db3d..4d2f53ae9f0f 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -94,6 +94,7 @@ struct gvt_dma {
struct rb_node dma_addr_node;
gfn_t gfn;
dma_addr_t dma_addr;
+ unsigned long size;
struct kref ref;
};
@@ -106,51 +107,103 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
-static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
- dma_addr_t *dma_addr)
+static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- struct page *page;
- unsigned long pfn;
+ int total_pages;
+ int npage;
int ret;
- /* Pin the page first. */
- ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
- if (ret != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
- gfn, ret);
- return -EINVAL;
+ total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+
+ for (npage = 0; npage < total_pages; npage++) {
+ unsigned long cur_gfn = gfn + npage;
+
+ ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
+ WARN_ON(ret != 1);
}
+}
- if (!pfn_valid(pfn)) {
- gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
- vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- return -EINVAL;
+/* Pin a normal or compound guest page for dma. */
+static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size, struct page **page)
+{
+ unsigned long base_pfn = 0;
+ int total_pages;
+ int npage;
+ int ret;
+
+ total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ /*
+ * We pin the pages one-by-one to avoid allocating a big arrary
+ * on stack to hold pfns.
+ */
+ for (npage = 0; npage < total_pages; npage++) {
+ unsigned long cur_gfn = gfn + npage;
+ unsigned long pfn;
+
+ ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (ret != 1) {
+ gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
+ cur_gfn, ret);
+ goto err;
+ }
+
+ if (!pfn_valid(pfn)) {
+ gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
+ npage++;
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (npage == 0)
+ base_pfn = pfn;
+ else if (base_pfn + npage != pfn) {
+ gvt_vgpu_err("The pages are not continuous\n");
+ ret = -EINVAL;
+ npage++;
+ goto err;
+ }
}
+ *page = pfn_to_page(base_pfn);
+ return 0;
+err:
+ gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
+ return ret;
+}
+
+static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t *dma_addr, unsigned long size)
+{
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct page *page = NULL;
+ int ret;
+
+ ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
+ if (ret)
+ return ret;
+
/* Setup DMA mapping. */
- page = pfn_to_page(pfn);
- *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, *dma_addr)) {
- gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
- vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- return -ENOMEM;
+ *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(dev, *dma_addr);
+ if (ret) {
+ gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
+ page_to_pfn(page), ret);
+ gvt_unpin_guest_page(vgpu, gfn, size);
}
- return 0;
+ return ret;
}
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, unsigned long size)
{
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- int ret;
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- WARN_ON(ret != 1);
+ dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ gvt_unpin_guest_page(vgpu, gfn, size);
}
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
@@ -191,7 +244,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
}
static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, unsigned long size)
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
@@ -203,6 +256,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
new->vgpu = vgpu;
new->gfn = gfn;
new->dma_addr = dma_addr;
+ new->size = size;
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
@@ -260,7 +314,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
- gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
+ gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vgpu->vdev.cache_lock);
}
@@ -515,7 +569,8 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
if (!entry)
continue;
- gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
mutex_unlock(&vgpu->vdev.cache_lock);
@@ -1648,7 +1703,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
}
int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
- dma_addr_t *dma_addr)
+ unsigned long size, dma_addr_t *dma_addr)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
@@ -1665,11 +1720,11 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
if (!entry) {
- ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else {
@@ -1681,7 +1736,7 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
return 0;
err_unmap:
- gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
+ gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
mutex_unlock(&info->vgpu->vdev.cache_lock);
return ret;
@@ -1691,7 +1746,8 @@ static void __gvt_dma_release(struct kref *ref)
{
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
- gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
+ gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
__gvt_cache_remove_entry(entry->vgpu, entry);
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index b31eb36fc102..994366035364 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
return;
gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (reg_is_mmio(gvt, offset)) {
if (read)
@@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
memcpy(pt, p_data, bytes);
}
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -156,7 +156,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
offset, bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -220,7 +220,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index dac8c6401e26..1ffc69eba30e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -42,15 +42,16 @@ struct intel_vgpu;
#define D_BDW (1 << 0)
#define D_SKL (1 << 1)
#define D_KBL (1 << 2)
+#define D_BXT (1 << 3)
-#define D_GEN9PLUS (D_SKL | D_KBL)
-#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
+#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT)
+#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
-#define D_SKL_PLUS (D_SKL | D_KBL)
-#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
+#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT)
+#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
#define D_PRE_SKL (D_BDW)
-#define D_ALL (D_BDW | D_SKL | D_KBL)
+#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT)
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 5ca9caf7552a..42e1e6bdcc2c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -364,7 +364,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@@ -401,7 +402,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -446,9 +447,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
#define CTX_CONTEXT_CONTROL_VAL 0x03
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+bool is_inhibit_context(struct intel_context *ce)
{
- u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
+ const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
@@ -467,7 +468,9 @@ static void switch_mmio(struct intel_vgpu *pre,
u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv))
switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -479,7 +482,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on kabylake, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_KABYLAKE(dev_priv) && mmio->in_context)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ && mmio->in_context)
continue;
// save
@@ -501,7 +505,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(s->shadow_ctx, ring_id))
+ !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
continue;
if (mmio->mask)
@@ -574,7 +578,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ if (IS_SKYLAKE(gvt->dev_priv) ||
+ IS_KABYLAKE(gvt->dev_priv) ||
+ IS_BROXTON(gvt->dev_priv))
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 0439eb8057a8..5c3b9ff9f96a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 32ffcd566cdd..67f19992b226 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -230,17 +230,18 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
/**
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
* @vgpu: a vGPU
- * @gpfn: guest pfn
+ * @gfn: guest pfn
+ * @size: page size
* @dma_addr: retrieve allocated dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_dma_map_guest_page(
- struct intel_vgpu *vgpu, unsigned long gfn,
+ struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
dma_addr_t *dma_addr)
{
- return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
+ return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
dma_addr);
}
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index 53e2bd79c97d..256d0db8bbb1 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes)
{
- struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_page_track *page_track;
int ret = 0;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
if (!page_track) {
@@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
}
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index d053cbe1dc94..09d7bb72b4ff 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
ktime_t cur_time;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
cur_time = ktime_get();
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
@@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
tbs_sched_func(sched_data);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
}
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
@@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{
+ int ret;
+
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops = &tbs_schedule_ops;
+ ret = gvt->scheduler.sched_ops->init(gvt);
+ mutex_unlock(&gvt->sched_lock);
- return gvt->scheduler.sched_ops->init(gvt);
+ return ret;
}
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops->clean(gvt);
+ mutex_unlock(&gvt->sched_lock);
}
+/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
+ * sched_data, and sched_ctl. We see these 2 data as part of
+ * the global scheduler which are proteced by gvt->sched_lock.
+ * Caller should make their decision if the vgpu_lock should
+ * be hold outside.
+ */
+
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{
- return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ int ret;
+
+ mutex_lock(&vgpu->gvt->sched_lock);
+ ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
+
+ return ret;
}
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->gvt->sched_lock);
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ mutex_lock(&vgpu->gvt->sched_lock);
if (!vgpu_data->active) {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+ mutex_unlock(&gvt->sched_lock);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
@@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+ mutex_lock(&vgpu->gvt->sched_lock);
scheduler->sched_ops->stop_schedule(vgpu);
if (scheduler->next_vgpu == vgpu)
@@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index c2d183b91500..b0e566956b8d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -45,20 +45,16 @@ static void set_context_pdp_root_pointer(
struct execlist_ring_context *ring_context,
u32 pdp[8])
{
- struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
int i;
for (i = 0; i < 8; i++)
- pdp_pair[i].val = pdp[7 - i];
+ ring_context->pdps[i].val = pdp[7 - i];
}
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -128,9 +124,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
@@ -205,7 +200,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static inline bool is_gvt_request(struct i915_request *req)
{
- return i915_gem_context_force_single_submission(req->ctx);
+ return i915_gem_context_force_single_submission(req->gem_context);
}
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -280,10 +275,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void shadow_context_descriptor_update(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc = 0;
desc = ce->lrc_desc;
@@ -292,7 +285,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
* like GEN8_CTX_* cached in desc_template
*/
desc &= U64_MAX << 12;
- desc |= ctx->desc_template & ((1ULL << 12) - 1);
+ desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
ce->lrc_desc = desc;
}
@@ -300,12 +293,12 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_request *req = workload->req;
void *shadow_ring_buffer_va;
u32 *cs;
- struct i915_request *req = workload->req;
- if (IS_KABYLAKE(req->i915) &&
- is_inhibit_context(req->ctx, req->engine->id))
+ if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+ && is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */
@@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_ring *ring;
+ struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+ struct intel_context *ce;
+ struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (workload->shadowed)
+ if (workload->req)
return 0;
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ce = intel_context_pin(shadow_ctx, engine);
+ if (IS_ERR(ce)) {
+ gvt_vgpu_err("fail to pin shadow context\n");
+ return PTR_ERR(ce);
+ }
+
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(shadow_ctx,
- dev_priv->engine[ring_id]);
+ if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(ce);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto err_scan;
+ goto err_unpin;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto err_scan;
+ goto err_shadow;
}
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = intel_context_pin(shadow_ctx, engine);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
+ rq = i915_request_alloc(engine, shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
goto err_shadow;
}
+ workload->req = i915_request_get(rq);
ret = populate_shadow_context(workload);
if (ret)
- goto err_unpin;
- workload->shadowed = true;
- return 0;
+ goto err_req;
-err_unpin:
- intel_context_unpin(shadow_ctx, engine);
+ return 0;
+err_req:
+ rq = fetch_and_zero(&workload->req);
+ i915_request_put(rq);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
- return ret;
-}
-
-static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
-{
- int ring_id = workload->ring_id;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct i915_request *rq;
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ret;
-
- rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto err_unpin;
- }
-
- gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
- workload->req = i915_request_get(rq);
- ret = copy_workload_to_ring_buffer(workload);
- if (ret)
- goto err_unpin;
- return 0;
-
err_unpin:
- intel_context_unpin(shadow_ctx, engine);
- release_shadow_wa_ctx(&workload->wa_ctx);
+ intel_context_unpin(ce);
return ret;
}
@@ -508,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
i915_gem_obj_finish_shmem_access(bb->obj);
bb->accessing = false;
- i915_vma_move_to_active(bb->vma, workload->req, 0);
+ ret = i915_vma_move_to_active(bb->vma,
+ workload->req,
+ 0);
+ if (ret)
+ goto err;
}
}
return 0;
@@ -517,21 +489,13 @@ err:
return ret;
}
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- struct intel_vgpu_workload *workload = container_of(wa_ctx,
- struct intel_vgpu_workload,
- wa_ctx);
- int ring_id = workload->ring_id;
- struct intel_vgpu_submission *s = &workload->vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
- struct execlist_ring_context *shadow_ring_context;
- struct page *page;
-
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ struct intel_vgpu_workload *workload =
+ container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
+ struct i915_request *rq = workload->req;
+ struct execlist_ring_context *shadow_ring_context =
+ (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -539,9 +503,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
- kunmap_atomic(shadow_ring_context);
- return 0;
}
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -633,7 +594,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
- ret = intel_gvt_generate_request(workload);
+ ret = copy_workload_to_ring_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm;
@@ -670,16 +631,14 @@ err_unpin_mm:
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- int ret = 0;
+ int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
+ mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
@@ -687,10 +646,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
goto out;
ret = prepare_workload(workload);
- if (ret) {
- intel_context_unpin(shadow_ctx, engine);
- goto out;
- }
out:
if (ret)
@@ -704,6 +659,7 @@ out:
}
mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -713,7 +669,7 @@ static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
/*
* no current vgpu / will be scheduled out / no workload
@@ -759,33 +715,29 @@ static struct intel_vgpu_workload *pick_next_workload(
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
return workload;
}
static void update_guest_context(struct intel_vgpu_workload *workload)
{
+ struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ring_id = workload->ring_id;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
- gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+ gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
+ workload->ctx_desc.lrca);
+ context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
context_page_num = 19;
i = 2;
@@ -858,19 +810,17 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_request *rq = workload->req;
int event;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->sched_lock);
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
*/
- if (workload->req) {
- struct drm_i915_private *dev_priv =
- workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine =
- dev_priv->engine[workload->ring_id];
+ if (rq) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -886,8 +836,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- i915_request_put(fetch_and_zero(&workload->req));
-
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
update_guest_context(workload);
@@ -896,10 +844,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
- mutex_lock(&dev_priv->drm.struct_mutex);
+
/* unpin shadow ctx as the shadow_ctx update is done */
- intel_context_unpin(s->shadow_ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&rq->i915->drm.struct_mutex);
+ intel_context_unpin(rq->hw_context);
+ mutex_unlock(&rq->i915->drm.struct_mutex);
+
+ i915_request_put(fetch_and_zero(&workload->req));
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -939,7 +890,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
if (gvt->scheduler.need_reschedule)
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
struct workload_thread_param {
@@ -957,7 +909,8 @@ static int workload_thread(void *priv)
struct intel_vgpu *vgpu = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv);
+ || IS_KABYLAKE(gvt->dev_priv)
+ || IS_BROXTON(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
@@ -991,9 +944,7 @@ static int workload_thread(void *priv)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
- mutex_lock(&gvt->lock);
ret = dispatch_workload(workload);
- mutex_unlock(&gvt->lock);
if (ret) {
vgpu = workload->vgpu;
@@ -1270,7 +1221,6 @@ alloc_workload(struct intel_vgpu *vgpu)
atomic_set(&workload->shadow_ctx_active, 0);
workload->status = -EINPROGRESS;
- workload->shadowed = false;
workload->vgpu = vgpu;
return workload;
@@ -1285,7 +1235,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 gpa;
int i;
- gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+ gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 6c644782193e..21eddab4a9cd 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
- bool shadowed;
int status;
struct intel_vgpu_mm *shadow_mm;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 572a18c2bfb5..f6fa916517c3 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -46,6 +46,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
@@ -58,6 +59,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
+
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
@@ -223,22 +227,20 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
- struct intel_gvt *gvt = vgpu->gvt;
-
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
vgpu->active = false;
if (atomic_read(&vgpu->submission.running_workload_num)) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_stop_schedule(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -252,14 +254,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
WARN(vgpu->active, "vGPU is still active!\n");
intel_gvt_debugfs_remove_vgpu(vgpu);
- idr_remove(&gvt->vgpu_idr, vgpu->id);
- if (idr_is_empty(&gvt->vgpu_idr))
- intel_gvt_clean_irq(gvt);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
@@ -269,10 +268,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
- vfree(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->lock);
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+ if (idr_is_empty(&gvt->vgpu_idr))
+ intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);
mutex_unlock(&gvt->lock);
+
+ vfree(vgpu);
}
#define IDLE_VGPU_IDR 0
@@ -298,6 +303,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->id = IDLE_VGPU_IDR;
vgpu->gvt = gvt;
+ mutex_init(&vgpu->vgpu_lock);
for (i = 0; i < I915_NUM_ENGINES; i++)
INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
@@ -324,7 +330,10 @@ out_free_vgpu:
*/
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->vgpu_lock);
intel_vgpu_clean_sched_policy(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+
vfree(vgpu);
}
@@ -342,8 +351,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (!vgpu)
return ERR_PTR(-ENOMEM);
- mutex_lock(&gvt->lock);
-
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
@@ -353,6 +360,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
+ mutex_init(&vgpu->vgpu_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
@@ -400,8 +408,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- mutex_unlock(&gvt->lock);
-
return vgpu;
out_clean_sched_policy:
@@ -424,7 +430,6 @@ out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
vfree(vgpu);
- mutex_unlock(&gvt->lock);
return ERR_PTR(ret);
}
@@ -456,12 +461,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+ mutex_lock(&gvt->lock);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (IS_ERR(vgpu))
- return vgpu;
-
- /* calculate left instance change for types */
- intel_gvt_update_vgpu_types(gvt);
+ if (!IS_ERR(vgpu))
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+ mutex_unlock(&gvt->lock);
return vgpu;
}
@@ -473,7 +478,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* @engine_mask: engines to reset for GT reset
*
* This function is called when user wants to reset a virtual GPU through
- * device model reset or GT reset. The caller should hold the gvt lock.
+ * device model reset or GT reset. The caller should hold the vgpu lock.
*
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
* the whole vGPU to default state as when it is created. This vGPU function
@@ -513,9 +518,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
* scheduler when the reset is triggered by current vgpu.
*/
if (scheduler->current_vgpu == NULL) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_reset_submission(vgpu, resetting_eng);
@@ -555,7 +560,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
- mutex_unlock(&vgpu->gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 13e7b9e4a6e6..f9ce35da4123 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
- if (ppgtt->base.file != stats->file_priv)
+ if (ppgtt->vm.file != stats->file_priv)
continue;
}
@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size);
seq_printf(m, "%llu [%pa] gtt total\n",
- ggtt->base.total, &ggtt->mappable_end);
+ ggtt->vm.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf)));
@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct i915_request,
client_link);
rcu_read_lock();
- task = pid_task(request && request->ctx->pid ?
- request->ctx->pid : file->pid,
+ task = pid_task(request && request->gem_context->pid ?
+ request->gem_context->pid : file->pid,
PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
@@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- pm_ier = I915_READ(GEN6_PMIER);
- pm_imr = I915_READ(GEN6_PMIMR);
- pm_isr = I915_READ(GEN6_PMISR);
- pm_iir = I915_READ(GEN6_PMIIR);
- pm_mask = I915_READ(GEN6_PMINTRMSK);
- } else {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+ /*
+ * The equivalent to the PM ISR & IIR cannot be read
+ * without affecting the current state of the system
+ */
+ pm_isr = 0;
+ pm_iir = 0;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
pm_ier = I915_READ(GEN8_GT_IER(2));
pm_imr = I915_READ(GEN8_GT_IMR(2));
pm_isr = I915_READ(GEN8_GT_ISR(2));
pm_iir = I915_READ(GEN8_GT_IIR(2));
- pm_mask = I915_READ(GEN6_PMINTRMSK);
+ } else {
+ pm_ier = I915_READ(GEN6_PMIER);
+ pm_imr = I915_READ(GEN6_PMIMR);
+ pm_isr = I915_READ(GEN6_PMISR);
+ pm_iir = I915_READ(GEN6_PMIIR);
}
+ pm_mask = I915_READ(GEN6_PMINTRMSK);
+
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
- seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
- pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ pm_ier, pm_imr, pm_mask);
+ if (INTEL_GEN(dev_priv) <= 10)
+ seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ pm_isr, pm_iir);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1205,7 +1218,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
seq_printf(m, "RP PREV UP: %d (%dus)\n",
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
+ seq_printf(m, "Up threshold: %d%%\n",
+ rps->power.up_threshold);
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1213,7 +1227,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
+ seq_printf(m, "Down threshold: %d%%\n",
+ rps->power.down_threshold);
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
@@ -1346,11 +1361,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine));
- seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
+ seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)),
- yesno(engine->hangcheck.stalled));
+ yesno(engine->hangcheck.stalled),
+ yesno(engine->hangcheck.wedged));
spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -1645,11 +1661,6 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
else
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
- if (fbc->work.scheduled)
- seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
- fbc->work.scheduled_vblank,
- drm_crtc_vblank_count(&fbc->crtc->base));
-
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
@@ -1895,7 +1906,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.format->cpp[0] * 8,
fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, fbdev_fb->obj);
+ describe_obj(m, intel_fb_obj(&fbdev_fb->base));
seq_putc(m, '\n');
}
#endif
@@ -1913,7 +1924,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.format->cpp[0] * 8,
fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
- describe_obj(m, fb->obj);
+ describe_obj(m, intel_fb_obj(&fb->base));
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
@@ -2209,6 +2220,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
+ seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
@@ -2252,13 +2264,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(rps->power));
+ rps_power_to_str(rps->power.mode));
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
rpup && rpupei ? 100 * rpup / rpupei : 0,
- rps->up_threshold);
+ rps->power.up_threshold);
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- rps->down_threshold);
+ rps->power.down_threshold);
} else {
seq_puts(m, "\nRPS Autotuning inactive\n");
}
@@ -2523,7 +2535,7 @@ static int i915_guc_log_level_get(void *data, u64 *val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- *val = intel_guc_log_level_get(&dev_priv->guc.log);
+ *val = intel_guc_log_get_level(&dev_priv->guc.log);
return 0;
}
@@ -2535,7 +2547,7 @@ static int i915_guc_log_level_set(void *data, u64 val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- return intel_guc_log_level_set(&dev_priv->guc.log, val);
+ return intel_guc_log_set_level(&dev_priv->guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2583,31 +2595,9 @@ static const struct file_operations i915_guc_log_relay_fops = {
.release = i915_guc_log_relay_release,
};
-static const char *psr2_live_status(u32 val)
-{
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
-
- val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
- if (val < ARRAY_SIZE(live_status))
- return live_status[val];
-
- return "unknown";
-}
-
-static const char *psr_sink_status(u8 val)
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
+ u8 val;
static const char * const sink_status[] = {
"inactive",
"transition to active, capture and display",
@@ -2616,22 +2606,94 @@ static const char *psr_sink_status(u8 val)
"transition to inactive, capture and display, timing re-sync",
"reserved",
"reserved",
- "sink internal error"
+ "sink internal error",
};
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp =
+ enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ int ret;
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- return sink_status[val];
+ if (!CAN_PSR(dev_priv)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
- return "unknown";
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+ u32 val, psr_status;
+
+ if (dev_priv->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ psr_status = I915_READ(EDP_PSR2_STATUS);
+ val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ psr_status = I915_READ(EDP_PSR_STATUS);
+ val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ }
+
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
}
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0;
- u32 stat[3];
- enum pipe pipe;
bool enabled = false;
bool sink_support;
@@ -2649,50 +2711,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
dev_priv->psr.busy_frontbuffer_bits);
- seq_printf(m, "Re-enable work scheduled: %s\n",
- yesno(work_busy(&dev_priv->psr.work.work)));
-
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled)
- enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
- else
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
- } else {
- for_each_pipe(dev_priv, pipe) {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
- enum intel_display_power_domain power_domain;
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain))
- continue;
-
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
-
- intel_display_power_put(dev_priv, power_domain);
- }
- }
+ if (dev_priv->psr.psr2_enabled)
+ enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+ else
+ enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
seq_printf(m, "Main link in standby mode: %s\n",
yesno(dev_priv->psr.link_standby));
- seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
-
- if (!HAS_DDI(dev_priv))
- for_each_pipe(dev_priv, pipe) {
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- seq_printf(m, " pipe %c", pipe_name(pipe));
- }
- seq_puts(m, "\n");
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
/*
- * VLV/CHV PSR has no kind of performance counter
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2701,21 +2731,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
- if (dev_priv->psr.psr2_enabled) {
- u32 psr2 = I915_READ(EDP_PSR2_STATUS);
-
- seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
- psr2, psr2_live_status(psr2));
- }
- if (dev_priv->psr.enabled) {
- struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
- u8 val;
-
- if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
- psr_sink_status(val));
- }
+ psr_source_status(dev_priv, m);
mutex_unlock(&dev_priv->psr.lock);
if (READ_ONCE(dev_priv->psr.debug)) {
@@ -2762,86 +2779,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
"%llu\n");
-static int i915_sink_crc(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp = NULL;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
- u8 crc[6];
-
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
-
- for_each_intel_connector_iter(connector, &conn_iter) {
- struct drm_crtc *crtc;
- struct drm_connector_state *state;
- struct intel_crtc_state *crtc_state;
-
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
- continue;
-
-retry:
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
- if (ret)
- goto err;
-
- state = connector->base.state;
- if (!state->best_encoder)
- continue;
-
- crtc = state->crtc;
- ret = drm_modeset_lock(&crtc->mutex, &ctx);
- if (ret)
- goto err;
-
- crtc_state = to_intel_crtc_state(crtc->state);
- if (!crtc_state->base.active)
- continue;
-
- /*
- * We need to wait for all crtc updates to complete, to make
- * sure any pending modesets and plane updates are completed.
- */
- if (crtc_state->base.commit) {
- ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
-
- if (ret)
- goto err;
- }
-
- intel_dp = enc_to_intel_dp(state->best_encoder);
-
- ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
- if (ret)
- goto err;
-
- seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
- crc[0], crc[1], crc[2],
- crc[3], crc[4], crc[5]);
- goto out;
-
-err:
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- goto out;
- }
- ret = -ENODEV;
-out:
- drm_connector_list_iter_end(&conn_iter);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
-
static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3398,28 +3335,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_workarounds *workarounds = &dev_priv->workarounds;
+ struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
int i;
- intel_runtime_pm_get(dev_priv);
-
- seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for (i = 0; i < workarounds->count; ++i) {
- i915_reg_t addr;
- u32 mask, value, read;
- bool ok;
-
- addr = workarounds->reg[i].addr;
- mask = workarounds->reg[i].mask;
- value = workarounds->reg[i].value;
- read = I915_READ(addr);
- ok = (value & mask) == (read & mask);
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
- i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
- }
-
- intel_runtime_pm_put(dev_priv);
+ seq_printf(m, "Workarounds applied: %d\n", wa->count);
+ for (i = 0; i < wa->count; ++i)
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+ wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
return 0;
}
@@ -4121,7 +4043,8 @@ fault_irq_set(struct drm_i915_private *i915,
err = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
- I915_WAIT_INTERRUPTIBLE);
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unlock;
@@ -4226,7 +4149,8 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (val & DROP_RETIRE)
i915_retire_requests(dev_priv);
@@ -4245,8 +4169,13 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv);
fs_reclaim_release(GFP_KERNEL);
- if (val & DROP_IDLE)
- drain_delayed_work(&dev_priv->gt.idle_work);
+ if (val & DROP_IDLE) {
+ do {
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ flush_delayed_work(&dev_priv->gt.retire_work);
+ drain_delayed_work(&dev_priv->gt.idle_work);
+ } while (READ_ONCE(dev_priv->gt.awake));
+ }
if (val & DROP_FREED)
i915_gem_drain_freed_objects(dev_priv);
@@ -4795,7 +4724,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
- {"i915_sink_crc_eDP1", i915_sink_crc, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
@@ -4829,7 +4757,6 @@ static const struct i915_debugfs_files {
#endif
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
{"i915_next_seqno", &i915_next_seqno_fops},
- {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -4849,7 +4776,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
struct dentry *ent;
- int ret, i;
+ int i;
ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
minor->debugfs_root, to_i915(minor->dev),
@@ -4857,10 +4784,6 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
if (!ent)
return -ENOMEM;
- ret = intel_pipe_crc_create(minor);
- if (ret)
- return ret;
-
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
ent = debugfs_create_file(i915_debugfs_files[i].name,
S_IRUGO | S_IWUSR,
@@ -4982,9 +4905,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
debugfs_create_file("i915_dpcd", S_IRUGO, root,
connector, &i915_dpcd_fops);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
connector, &i915_panel_fops);
+ debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+ connector, &i915_psr_sink_status_fops);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9c449b8d8eab..f8cfd16be534 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,11 +67,18 @@ bool __i915_inject_load_failure(const char *func, int line)
if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
i915_modparams.inject_load_failure, func, line);
+ i915_modparams.inject_load_failure = 0;
return true;
}
return false;
}
+
+bool i915_error_injected(void)
+{
+ return i915_load_fail_count && !i915_modparams.inject_load_failure;
+}
+
#endif
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
@@ -97,8 +104,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
vaf.fmt = fmt;
vaf.va = &args;
- dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
+ if (is_error)
+ dev_printk(level, kdev, "%pV", &vaf);
+ else
+ dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
if (is_error && !shown_bug_once) {
/*
@@ -110,25 +122,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
dev_notice(kdev, "%s", FDO_BUG_MSG);
shown_bug_once = true;
}
-
- va_end(args);
-}
-
-static bool i915_error_injected(struct drm_i915_private *dev_priv)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
- return i915_modparams.inject_load_failure &&
- i915_load_fail_count == i915_modparams.inject_load_failure;
-#else
- return false;
-#endif
}
-#define i915_load_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, \
- i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
-
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
static enum intel_pch
intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
@@ -233,6 +228,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -246,14 +243,6 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
- /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
- * (which really amounts to a PCH but no South Display).
- */
- if (INTEL_INFO(dev_priv)->num_pipes == 0) {
- dev_priv->pch_type = PCH_NOP;
- return;
- }
-
/*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only
@@ -282,18 +271,28 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
pch->subsystem_device)) {
id = intel_virt_detect_pch(dev_priv);
- if (id) {
- pch_type = intel_pch_type(dev_priv, id);
- if (WARN_ON(pch_type == PCH_NONE))
- pch_type = PCH_NOP;
- } else {
- pch_type = PCH_NOP;
- }
+ pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (WARN_ON(id && pch_type == PCH_NONE))
+ id = 0;
+
dev_priv->pch_type = pch_type;
dev_priv->pch_id = id;
break;
}
}
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+ DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+ dev_priv->pch_type = PCH_NOP;
+ dev_priv->pch_id = 0;
+ }
+
if (!pch)
DRM_DEBUG_KMS("No PCH found.\n");
@@ -634,26 +633,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static void i915_gem_fini(struct drm_i915_private *dev_priv)
-{
- /* Flush any outstanding unpin_work. */
- i915_gem_drain_workqueue(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uc_fini_hw(dev_priv);
- intel_uc_fini(dev_priv);
- i915_gem_cleanup_engines(dev_priv);
- i915_gem_contexts_fini(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_uc_fini_misc(dev_priv);
- i915_gem_cleanup_userptr(dev_priv);
-
- i915_gem_drain_freed_objects(dev_priv);
-
- WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -703,7 +682,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init(dev_priv);
if (ret)
- goto cleanup_irq;
+ goto cleanup_modeset;
intel_setup_overlay(dev_priv);
@@ -723,6 +702,8 @@ cleanup_gem:
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
+cleanup_modeset:
+ intel_modeset_cleanup(dev);
cleanup_irq:
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv);
@@ -919,7 +900,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->uncore.lock);
mutex_init(&dev_priv->sb_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
@@ -1173,8 +1153,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
- intel_opregion_setup(dev_priv);
-
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1189,6 +1167,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* get lost on g4x as well, and interrupt delivery seems to stay
* properly dead afterwards. So we'll just disable them for all
* pre-gen5 chipsets.
+ *
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy
+ * interrupts even when in MSI mode. This results in spurious
+ * interrupt warnings if the legacy irq no. is shared with another
+ * device. The kernel then disables that interrupt source and so
+ * prevents the other device from working properly.
*/
if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
@@ -1197,10 +1181,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = intel_gvt_init(dev_priv);
if (ret)
- goto err_ggtt;
+ goto err_msi;
+
+ intel_opregion_setup(dev_priv);
return 0;
+err_msi:
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
err_ggtt:
i915_ggtt_cleanup_hw(dev_priv);
err_perf:
@@ -1433,6 +1423,7 @@ out_fini:
drm_dev_fini(&dev_priv->drm);
out_free:
kfree(dev_priv);
+ pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -1553,17 +1544,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false;
}
+static int i915_drm_prepare(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ int err;
+
+ /*
+ * NB intel_display_suspend() may issue new requests after we've
+ * ostensibly marked the GPU as ready-to-sleep here. We need to
+ * split out that work and pull it forward so that after point,
+ * the GPU is not woken again.
+ */
+ err = i915_gem_suspend(i915);
+ if (err)
+ dev_err(&i915->drm.pdev->dev,
+ "GEM idle failed, suspend/resume might fail\n");
+
+ return err;
+}
+
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state;
- int error;
-
- /* ignore lid events during suspend */
- mutex_lock(&dev_priv->modeset_restore_lock);
- dev_priv->modeset_restore = MODESET_SUSPENDED;
- mutex_unlock(&dev_priv->modeset_restore_lock);
disable_rpm_wakeref_asserts(dev_priv);
@@ -1575,16 +1579,9 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(pdev);
- error = i915_gem_suspend(dev_priv);
- if (error) {
- dev_err(&pdev->dev,
- "GEM idle failed, resume might fail\n");
- goto out;
- }
-
intel_display_suspend(dev);
- intel_dp_mst_suspend(dev);
+ intel_dp_mst_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
@@ -1600,7 +1597,6 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_suspend(dev_priv);
intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1609,10 +1605,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_csr_ucode_suspend(dev_priv);
-out:
enable_rpm_wakeref_asserts(dev_priv);
- return error;
+ return 0;
}
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
@@ -1623,7 +1618,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
+ i915_gem_suspend_late(dev_priv);
+
intel_display_set_init_power(dev_priv, false);
+ intel_uncore_suspend(dev_priv);
/*
* In case of firmware assisted context save/restore don't manually
@@ -1710,6 +1708,8 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(dev_priv);
intel_sanitize_gt_powersave(dev_priv);
+ i915_gem_sanitize(dev_priv);
+
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
@@ -1746,7 +1746,7 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_dp_mst_resume(dev);
+ intel_dp_mst_resume(dev_priv);
intel_display_resume(dev);
@@ -1764,10 +1764,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- mutex_lock(&dev_priv->modeset_restore_lock);
- dev_priv->modeset_restore = MODESET_DONE;
- mutex_unlock(&dev_priv->modeset_restore_lock);
-
intel_opregion_notify_adapter(dev_priv, PCI_D0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1851,7 +1847,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
else
intel_display_set_init_power(dev_priv, true);
- i915_gem_sanitize(dev_priv);
+ intel_engines_sanitize(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2081,6 +2077,22 @@ out:
return ret;
}
+static int i915_pm_prepare(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_prepare(dev);
+}
+
static int i915_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2731,6 +2743,7 @@ const struct dev_pm_ops i915_pm_ops = {
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
*/
+ .prepare = i915_pm_prepare,
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 71e1aa54f774..4aca5344863d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -40,6 +40,7 @@
#include <linux/hash.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/mm_types.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
@@ -85,8 +86,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180514"
-#define DRIVER_TIMESTAMP 1526300884
+#define DRIVER_DATE "20180719"
+#define DRIVER_TIMESTAMP 1532015279
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -107,13 +108,24 @@
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \
__i915_inject_load_failure(__func__, __LINE__)
+
+bool i915_error_injected(void);
+
#else
+
#define i915_inject_load_failure() false
+#define i915_error_injected() false
+
#endif
+#define i915_load_error(i915, fmt, ...) \
+ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
+
typedef struct {
uint32_t val;
} uint_fixed_16_16_t;
@@ -287,7 +299,6 @@ struct i915_hotplug {
u32 event_bits;
struct delayed_work reenable_work;
- struct intel_digital_port *irq_port[I915_MAX_PORTS];
u32 long_port_mask;
u32 short_port_mask;
struct work_struct dig_port_work;
@@ -500,6 +511,7 @@ struct intel_fbc {
bool enabled;
bool active;
+ bool flip_pending;
bool underrun_detected;
struct work_struct underrun_work;
@@ -567,12 +579,6 @@ struct intel_fbc {
unsigned int gen9_wa_cfb_stride;
} params;
- struct intel_fbc_work {
- bool scheduled;
- u64 scheduled_vblank;
- struct work_struct work;
- } work;
-
const char *no_fbc_reason;
};
@@ -608,26 +614,17 @@ struct i915_psr {
bool sink_support;
struct intel_dp *enabled;
bool active;
- struct delayed_work work;
+ struct work_struct work;
unsigned busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
bool colorimetry_support;
bool alpm;
- bool has_hw_tracking;
bool psr2_enabled;
u8 sink_sync_latency;
bool debug;
ktime_t last_entry_attempt;
ktime_t last_exit;
-
- void (*enable_source)(struct intel_dp *,
- const struct intel_crtc_state *);
- void (*disable_source)(struct intel_dp *,
- const struct intel_crtc_state *);
- void (*enable_sink)(struct intel_dp *);
- void (*activate)(struct intel_dp *);
- void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
};
enum intel_pch {
@@ -639,7 +636,7 @@ enum intel_pch {
PCH_KBP, /* Kaby Lake PCH */
PCH_CNP, /* Cannon Lake PCH */
PCH_ICP, /* Ice Lake PCH */
- PCH_NOP,
+ PCH_NOP, /* PCH without south display */
};
enum intel_sbi_destination {
@@ -782,11 +779,17 @@ struct intel_rps {
u8 rp0_freq; /* Non-overclocked max frequency. */
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
- u8 up_threshold; /* Current %busy required to uplock */
- u8 down_threshold; /* Current %busy required to downclock */
-
int last_adj;
- enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+ struct {
+ struct mutex mutex;
+
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+ unsigned int interactive;
+
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+ } power;
bool enabled;
atomic_t num_waiters;
@@ -955,7 +958,7 @@ struct i915_gem_mm {
/**
* Small stash of WC pages
*/
- struct pagevec wc_stash;
+ struct pagestash wc_stash;
/**
* tmpfs instance used for shmem backed objects
@@ -1003,16 +1006,13 @@ struct i915_gem_mm {
#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
-enum modeset_restore {
- MODESET_ON_LID_OPEN,
- MODESET_DONE,
- MODESET_SUSPENDED,
-};
+#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
#define DP_AUX_A 0x40
#define DP_AUX_B 0x10
#define DP_AUX_C 0x20
#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
#define DDC_PIN_B 0x05
@@ -1057,9 +1057,9 @@ struct intel_vbt_data {
/* Feature bits */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
unsigned int panel_type:4;
@@ -1075,7 +1075,6 @@ struct intel_vbt_data {
int vswing;
bool low_vswing;
bool initialized;
- bool support;
int bpp;
struct edp_power_seq pps;
} edp;
@@ -1086,8 +1085,8 @@ struct intel_vbt_data {
bool require_aux_wakeup;
int idle_frames;
enum psr_lines_to_wait lines_to_wait;
- int tp1_wakeup_time;
- int tp2_tp3_wakeup_time;
+ int tp1_wakeup_time_us;
+ int tp2_tp3_wakeup_time_us;
} psr;
struct {
@@ -1272,20 +1271,11 @@ enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_MAX,
};
-struct intel_pipe_crc_entry {
- uint32_t frame;
- uint32_t crc[5];
-};
-
#define INTEL_PIPE_CRC_ENTRIES_NR 128
struct intel_pipe_crc {
spinlock_t lock;
- bool opened; /* exclusive access to the result file */
- struct intel_pipe_crc_entry *entries;
- enum intel_pipe_crc_source source;
- int head, tail;
- wait_queue_head_t wq;
int skipped;
+ enum intel_pipe_crc_source source;
};
struct i915_frontbuffer_tracking {
@@ -1300,7 +1290,7 @@ struct i915_frontbuffer_tracking {
};
struct i915_wa_reg {
- i915_reg_t addr;
+ u32 addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
@@ -1740,12 +1730,9 @@ struct drm_i915_private {
unsigned long quirks;
- enum modeset_restore modeset_restore;
- struct mutex modeset_restore_lock;
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;
- struct list_head vm_list; /* Global list of all address spaces */
struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_gem_mm mm;
@@ -1851,6 +1838,7 @@ struct drm_i915_private {
*/
struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts;
@@ -1958,7 +1946,9 @@ struct drm_i915_private {
*/
struct i915_perf_stream *exclusive_stream;
+ struct intel_context *pinned_ctx;
u32 specific_ctx_id;
+ u32 specific_ctx_id_mask;
struct hrtimer poll_check_timer;
wait_queue_head_t poll_wq;
@@ -2311,6 +2301,7 @@ intel_info(const struct drm_i915_private *dev_priv)
}
#define INTEL_INFO(dev_priv) intel_info((dev_priv))
+#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
@@ -2563,17 +2554,10 @@ intel_info(const struct drm_i915_private *dev_priv)
(IS_CANNONLAKE(dev_priv) || \
IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
-/*
- * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
- * even when in MSI mode. This results in spurious interrupt warnings if the
- * legacy irq no. is shared with another device. The kernel then disables that
- * interrupt source and so prevents the other device from working properly.
- *
- * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
- * interrupts.
- */
-#define HAS_AUX_IRQ(dev_priv) true
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
+#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
+ IS_GEMINILAKE(dev_priv) || \
+ IS_KABYLAKE(dev_priv))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -2748,14 +2732,14 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
int intel_engines_init(struct drm_i915_private *dev_priv);
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
+
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
- enum hpd_pin pin);
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
@@ -3102,9 +3086,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
}
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -3169,12 +3150,14 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
+void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags);
+ unsigned int flags, long timeout);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_fault *vmf);
+vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
@@ -3213,7 +3196,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
- return container_of(vm, struct i915_hw_ppgtt, base);
+ return container_of(vm, struct i915_hw_ppgtt, vm);
}
/* i915_gem_fence_reg.c */
@@ -3320,7 +3303,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3445,6 +3428,8 @@ extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
+ bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
@@ -3678,14 +3663,6 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}
-static inline unsigned long
-timespec_to_jiffies_timeout(const struct timespec *value)
-{
- unsigned long j = timespec_to_jiffies(value);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
/*
* If you need to wait X milliseconds between events A and B, but event B
* doesn't happen exactly after event A, you record the timestamp (jiffies) of
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 17c5097721e8..fcc73a6ab503 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+ return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -139,6 +139,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
@@ -181,6 +183,8 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
void i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
@@ -193,6 +197,8 @@ void i915_gem_park(struct drm_i915_private *i915)
void i915_gem_unpark(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->gt.active_requests);
@@ -243,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
- pinned = ggtt->base.reserved;
+ pinned = ggtt->vm.reserved;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
- list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = ggtt->base.total;
+ args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -796,7 +802,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
* that was!).
*/
- wmb();
+ i915_gem_chipset_flush(dev_priv);
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
@@ -831,6 +837,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
}
break;
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
case I915_GEM_DOMAIN_CPU:
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
break;
@@ -1217,9 +1227,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb();
} else {
page_base += offset & PAGE_MASK;
@@ -1240,8 +1250,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1420,9 +1429,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb(); /* flush modifications to the GGTT (insert_page) */
} else {
page_base += offset & PAGE_MASK;
@@ -1449,8 +1458,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1619,6 +1627,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto err;
}
+ /* Writes not allowed into this read-only object */
+ if (i915_gem_object_is_readonly(obj)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
ret = -ENODEV;
@@ -1991,9 +2005,9 @@ compute_partial_view(struct drm_i915_gem_object *obj,
* The current feature set supported by i915_gem_fault() and thus GTT mmaps
* is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
*/
-int i915_gem_fault(struct vm_fault *vmf)
+vm_fault_t i915_gem_fault(struct vm_fault *vmf)
{
-#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
@@ -2004,6 +2018,10 @@ int i915_gem_fault(struct vm_fault *vmf)
pgoff_t page_offset;
int ret;
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write)
+ return VM_FAULT_SIGBUS;
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
@@ -2114,10 +2132,9 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = VM_FAULT_SIGBUS;
- break;
- }
+ if (!i915_terminally_wedged(&dev_priv->gpu_error))
+ return VM_FAULT_SIGBUS;
+ /* else: fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
@@ -2132,21 +2149,16 @@ err:
* EBUSY is ok: this just means that another thread
* already did the job.
*/
- ret = VM_FAULT_NOPAGE;
- break;
+ return VM_FAULT_NOPAGE;
case -ENOMEM:
- ret = VM_FAULT_OOM;
- break;
+ return VM_FAULT_OOM;
case -ENOSPC:
case -EFAULT:
- ret = VM_FAULT_SIGBUS;
- break;
+ return VM_FAULT_SIGBUS;
default:
WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
- ret = VM_FAULT_SIGBUS;
- break;
+ return VM_FAULT_SIGBUS;
}
- return ret;
}
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
@@ -2265,7 +2277,9 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
/* Attempt to reap some mmap space from dead objects */
do {
- err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+ err = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
break;
@@ -2410,29 +2424,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_unlock();
}
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+static struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
- if (i915_gem_object_has_pinned_pages(obj))
- return;
-
- GEM_BUG_ON(obj->bind_count);
- if (!i915_gem_object_has_pages(obj))
- return;
-
- /* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
- goto unlock;
-
- /* ->put_pages might need to allocate memory for the bit17 swizzle
- * array, hence protect them from being reaped by removing them from gtt
- * lists early. */
pages = fetch_and_zero(&obj->mm.pages);
- GEM_BUG_ON(!pages);
+ if (!pages)
+ return NULL;
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
@@ -2451,12 +2451,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
}
__i915_gem_object_reset_page_iter(obj);
+ obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+ return pages;
+}
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+ if (i915_gem_object_has_pinned_pages(obj))
+ return;
+
+ GEM_BUG_ON(obj->bind_count);
+ if (!i915_gem_object_has_pages(obj))
+ return;
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
+
+ /*
+ * ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early.
+ */
+ pages = __i915_gem_object_unset_pages(obj);
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
- obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
unlock:
mutex_unlock(&obj->mm.lock);
}
@@ -2974,16 +2999,16 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
- DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
- ctx->name, atomic_read(&ctx->guilty_count),
- score, yesno(banned && bannable));
-
/* Cool contexts don't accumulate client ban score */
if (!bannable)
return;
- if (banned)
+ if (banned) {
+ DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count),
+ score);
i915_gem_context_set_banned(ctx);
+ }
if (!IS_ERR_OR_NULL(ctx->file_priv))
i915_gem_client_mark_guilty(ctx->file_priv, ctx);
@@ -3031,7 +3056,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
- struct i915_request *request = NULL;
+ struct i915_request *request;
/*
* During the reset sequence, we must prevent the engine from
@@ -3042,52 +3067,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
*/
intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
- /*
- * Prevent the signaler thread from updating the request
- * state (by calling dma_fence_signal) as we are processing
- * the reset. The write from the GPU of the seqno is
- * asynchronous and the signaler thread may see a different
- * value to us and declare the request complete, even though
- * the reset routine have picked that request as the active
- * (incomplete) request. This conflict is not handled
- * gracefully!
- */
- kthread_park(engine->breadcrumbs.signaler);
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- *
- * Note that this needs to be a single atomic operation on the
- * tasklet (flush existing tasks, prevent new tasks) to prevent
- * a race between reset and set-wedged. It is not, so we do the best
- * we can atm and make sure we don't lock the machine up in the more
- * common case of recursively being called from set-wedged from inside
- * i915_reset.
- */
- if (!atomic_read(&engine->execlists.tasklet.count))
- tasklet_kill(&engine->execlists.tasklet);
- tasklet_disable(&engine->execlists.tasklet);
-
- /*
- * We're using worker to queue preemption requests from the tasklet in
- * GuC submission mode.
- * Even though tasklet was disabled, we may still have a worker queued.
- * Let's make sure that all workers scheduled before disabling the
- * tasklet are completed before continuing with the reset.
- */
- if (engine->i915->guc.preempt_wq)
- flush_workqueue(engine->i915->guc.preempt_wq);
-
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- request = i915_gem_find_active_request(engine);
+ request = engine->reset.prepare(engine);
if (request && request->fence.error == -EIO)
request = ERR_PTR(-EIO); /* Previous reset failed! */
@@ -3117,43 +3097,24 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
return err;
}
-static void skip_request(struct i915_request *request)
-{
- void *vaddr = request->ring->vaddr;
- u32 head;
-
- /* As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- head = request->head;
- if (request->postfix < head) {
- memset(vaddr + head, 0, request->ring->size - head);
- head = 0;
- }
- memset(vaddr + head, 0, request->postfix - head);
-
- dma_fence_set_error(&request->fence, -EIO);
-}
-
static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct i915_gem_context *hung_ctx = request->ctx;
+ struct i915_gem_context *hung_ctx = request->gem_context;
struct i915_timeline *timeline = request->timeline;
unsigned long flags;
GEM_BUG_ON(timeline == &engine->timeline);
spin_lock_irqsave(&engine->timeline.lock, flags);
- spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&timeline->lock);
list_for_each_entry_continue(request, &engine->timeline.requests, link)
- if (request->ctx == hung_ctx)
- skip_request(request);
+ if (request->gem_context == hung_ctx)
+ i915_request_skip(request, -EIO);
list_for_each_entry(request, &timeline->requests, link)
- skip_request(request);
+ i915_request_skip(request, -EIO);
spin_unlock(&timeline->lock);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -3195,11 +3156,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
}
if (stalled) {
- i915_gem_context_mark_guilty(request->ctx);
- skip_request(request);
+ i915_gem_context_mark_guilty(request->gem_context);
+ i915_request_skip(request, -EIO);
/* If this context is now banned, skip all pending requests. */
- if (i915_gem_context_is_banned(request->ctx))
+ if (i915_gem_context_is_banned(request->gem_context))
engine_skip_context(request);
} else {
/*
@@ -3209,15 +3170,17 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
*/
request = i915_gem_find_active_request(engine);
if (request) {
- i915_gem_context_mark_innocent(request->ctx);
+ unsigned long flags;
+
+ i915_gem_context_mark_innocent(request->gem_context);
dma_fence_set_error(&request->fence, -EAGAIN);
/* Rewind the engine to replay the incomplete rq */
- spin_lock_irq(&engine->timeline.lock);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
request = list_prev_entry(request, link);
if (&request->link == &engine->timeline.requests)
request = NULL;
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
}
@@ -3238,13 +3201,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
if (request)
request = i915_gem_reset_request(engine, request, stalled);
- if (request) {
- DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->global_seqno);
- }
-
/* Setup the CS to resume from the breadcrumb of the hung request */
- engine->reset_hw(engine, request);
+ engine->reset.reset(engine, request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv,
@@ -3258,14 +3216,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct i915_gem_context *ctx;
+ struct intel_context *ce;
i915_gem_reset_engine(engine,
engine->hangcheck.active_request,
stalled_mask & ENGINE_MASK(id));
- ctx = fetch_and_zero(&engine->last_retired_context);
- if (ctx)
- intel_context_unpin(ctx, engine);
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
/*
* Ostensibily, we always want a context loaded for powersaving,
@@ -3283,7 +3241,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
rq = i915_request_alloc(engine,
dev_priv->kernel_context);
if (!IS_ERR(rq))
- __i915_request_add(rq, false);
+ i915_request_add(rq);
}
}
@@ -3292,8 +3250,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->execlists.tasklet);
- kthread_unpark(engine->breadcrumbs.signaler);
+ engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
}
@@ -3571,6 +3528,22 @@ new_requests_since_last_retire(const struct drm_i915_private *i915)
work_pending(&i915->gt.idle_work.work));
}
+static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return;
+
+ GEM_BUG_ON(i915->gt.active_requests);
+ for_each_engine(engine, i915, id) {
+ GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
+ GEM_BUG_ON(engine->last_retired_context !=
+ to_intel_context(i915->kernel_context, engine));
+ }
+}
+
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
@@ -3582,6 +3555,24 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ return;
+
+ /*
+ * Flush out the last user context, leaving only the pinned
+ * kernel context resident. When we are idling on the kernel_context,
+ * no more new requests (with a context switch) are emitted and we
+ * can finally rest. A consequence is that the idle work handler is
+ * always called at least twice before idling (and if the system is
+ * idle that implies a round trip through the retire worker).
+ */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_switch_to_kernel_context(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
+ READ_ONCE(dev_priv->gt.active_requests));
+
/*
* Wait for last execlists context complete, but bail out in case a
* new request is submitted. As we don't trust the hardware, we
@@ -3615,6 +3606,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
epoch = __i915_gem_park(dev_priv);
+ assert_kernel_context_is_current(dev_priv);
+
rearm_hangcheck = false;
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3761,9 +3754,31 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return ret;
}
-static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
+static long wait_for_timeline(struct i915_timeline *tl,
+ unsigned int flags, long timeout)
{
- return i915_gem_active_wait(&tl->last_request, flags);
+ struct i915_request *rq;
+
+ rq = i915_gem_active_get_unlocked(&tl->last_request);
+ if (!rq)
+ return timeout;
+
+ /*
+ * "Race-to-idle".
+ *
+ * Switching to the kernel context is often used a synchronous
+ * step prior to idling, e.g. in suspend for flushing all
+ * current operations to memory before sleeping. These we
+ * want to complete as quickly as possible to avoid prolonged
+ * stalls, so allow the gpu to boost to maximum clocks.
+ */
+ if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ gen6_rps_boost(rq, NULL);
+
+ timeout = i915_request_wait(rq, flags, timeout);
+ i915_request_put(rq);
+
+ return timeout;
}
static int wait_for_engines(struct drm_i915_private *i915)
@@ -3779,8 +3794,13 @@ static int wait_for_engines(struct drm_i915_private *i915)
return 0;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915,
+ unsigned int flags, long timeout)
{
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
+ flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
+
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
return 0;
@@ -3792,26 +3812,31 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
lockdep_assert_held(&i915->drm.struct_mutex);
list_for_each_entry(tl, &i915->gt.timelines, link) {
- err = wait_for_timeline(tl, flags);
- if (err)
- return err;
+ timeout = wait_for_timeline(tl, flags, timeout);
+ if (timeout < 0)
+ return timeout;
}
- i915_retire_requests(i915);
- return wait_for_engines(i915);
+ err = wait_for_engines(i915);
+ if (err)
+ return err;
+
+ i915_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
} else {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err;
for_each_engine(engine, i915, id) {
- err = wait_for_timeline(&engine->timeline, flags);
- if (err)
- return err;
- }
+ struct i915_timeline *tl = &engine->timeline;
- return 0;
+ timeout = wait_for_timeline(tl, flags, timeout);
+ if (timeout < 0)
+ return timeout;
+ }
}
+
+ return 0;
}
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
@@ -4385,7 +4410,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct i915_vma *vma;
int ret;
@@ -4973,25 +4998,25 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+void i915_gem_sanitize(struct drm_i915_private *i915)
{
- struct i915_gem_context *kernel_context = i915->kernel_context;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ int err;
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
- GEM_BUG_ON(engine->last_retired_context != kernel_context);
- }
-}
+ GEM_TRACE("\n");
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
- if (i915_terminally_wedged(&i915->gpu_error)) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_get(i915);
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (i915_terminally_wedged(&i915->gpu_error))
i915_gem_unset_wedged(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
/*
* If we inherit context state from the BIOS or earlier occupants
@@ -5001,60 +5026,94 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
+ err = -ENODEV;
if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
- WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+ err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+ if (!err)
+ intel_engines_sanitize(i915);
+
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_runtime_pm_put(i915);
+
+ i915_gem_contexts_lost(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
}
-int i915_gem_suspend(struct drm_i915_private *dev_priv)
+int i915_gem_suspend(struct drm_i915_private *i915)
{
- struct drm_device *dev = &dev_priv->drm;
int ret;
- intel_runtime_pm_get(dev_priv);
- intel_suspend_gt_powersave(dev_priv);
+ GEM_TRACE("\n");
- mutex_lock(&dev->struct_mutex);
+ intel_runtime_pm_get(i915);
+ intel_suspend_gt_powersave(i915);
- /* We have to flush all the executing contexts to main memory so
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /*
+ * We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
* context image is coherent, we have to switch away from it. That
- * leaves the dev_priv->kernel_context still active when
+ * leaves the i915->kernel_context still active when
* we actually suspend, and its image in memory may not match the GPU
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = i915_gem_switch_to_kernel_context(dev_priv);
+ if (!i915_terminally_wedged(&i915->gpu_error)) {
+ ret = i915_gem_switch_to_kernel_context(i915);
if (ret)
goto err_unlock;
- ret = i915_gem_wait_for_idle(dev_priv,
+ ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ MAX_SCHEDULE_TIMEOUT);
if (ret && ret != -EIO)
goto err_unlock;
- assert_kernel_context_is_current(dev_priv);
+ assert_kernel_context_is_current(i915);
}
- i915_gem_contexts_lost(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ i915_retire_requests(i915); /* ensure we flush after wedging */
+
+ mutex_unlock(&i915->drm.struct_mutex);
- intel_uc_suspend(dev_priv);
+ intel_uc_suspend(i915);
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&i915->gt.retire_work);
- /* As the idle_work is rearming if it detects a race, play safe and
+ /*
+ * As the idle_work is rearming if it detects a race, play safe and
* repeat the flush until it is definitely idle.
*/
- drain_delayed_work(&dev_priv->gt.idle_work);
+ drain_delayed_work(&i915->gt.idle_work);
- /* Assert that we sucessfully flushed all the work and
+ /*
+ * Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- WARN_ON(dev_priv->gt.awake);
- if (WARN_ON(!intel_engines_are_idle(dev_priv)))
- i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
+ WARN_ON(i915->gt.awake);
+ if (WARN_ON(!intel_engines_are_idle(i915)))
+ i915_gem_set_wedged(i915); /* no hope, discard everything */
+
+ intel_runtime_pm_put(i915);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ intel_runtime_pm_put(i915);
+ return ret;
+}
+
+void i915_gem_suspend_late(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct list_head *phases[] = {
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
+ NULL
+ }, **phase;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -5075,20 +5134,22 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
- intel_uc_sanitize(dev_priv);
- i915_gem_sanitize(dev_priv);
- intel_runtime_pm_put(dev_priv);
- return 0;
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
- return ret;
+ intel_uc_sanitize(i915);
+ i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
@@ -5262,8 +5323,18 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
/* Only when the HW is re-initialised, can we replay the requests */
ret = __i915_gem_restart_engines(dev_priv);
+ if (ret)
+ goto cleanup_uc;
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ return 0;
+
+cleanup_uc:
+ intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
return ret;
}
@@ -5300,7 +5371,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (engine->init_context)
err = engine->init_context(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (err)
goto err_active;
}
@@ -5309,9 +5380,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err)
goto err_active;
- err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
- if (err)
+ if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
+ i915_gem_set_wedged(i915);
+ err = -EIO; /* Caller will declare us wedged */
goto err_active;
+ }
assert_kernel_context_is_current(i915);
@@ -5374,7 +5447,9 @@ err_active:
if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
goto out_ctx;
- if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
+ if (WARN_ON(i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT)))
goto out_ctx;
i915_gem_contexts_lost(i915);
@@ -5385,12 +5460,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
- /*
- * We need to fallback to 4K pages since gvt gtt handling doesn't
- * support huge page entries - we will need to check either hypervisor
- * mm can support huge guest page or just do emulation in gvt.
- */
- if (intel_vgpu_active(dev_priv))
+ /* We need to fallback to 4K pages if host doesn't support huge gtt. */
+ if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
@@ -5408,13 +5479,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- ret = intel_wopcm_init(&dev_priv->wopcm);
+ ret = intel_uc_init_misc(dev_priv);
if (ret)
return ret;
- ret = intel_uc_init_misc(dev_priv);
+ ret = intel_wopcm_init(&dev_priv->wopcm);
if (ret)
- return ret;
+ goto err_uc_misc;
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -5490,8 +5561,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* driver doesn't explode during runtime.
*/
err_init_hw:
- i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
- i915_gem_contexts_lost(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ WARN_ON(i915_gem_suspend(dev_priv));
+ i915_gem_suspend_late(dev_priv);
+
+ i915_gem_drain_workqueue(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
err_uc_init:
intel_uc_fini(dev_priv);
@@ -5508,6 +5585,7 @@ err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
+err_uc_misc:
intel_uc_fini_misc(dev_priv);
if (ret != -EIO)
@@ -5520,7 +5598,8 @@ err_unlock:
* for all other failure, such as an allocation failure, bail.
*/
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+ i915_load_error(dev_priv,
+ "Failed to initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(dev_priv);
}
ret = 0;
@@ -5530,6 +5609,28 @@ err_unlock:
return ret;
}
+void i915_gem_fini(struct drm_i915_private *dev_priv)
+{
+ i915_gem_suspend_late(dev_priv);
+
+ /* Flush any outstanding unpin_work. */
+ i915_gem_drain_workqueue(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_hw(dev_priv);
+ intel_uc_fini(dev_priv);
+ i915_gem_cleanup_engines(dev_priv);
+ i915_gem_contexts_fini(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_uc_fini_misc(dev_priv);
+ i915_gem_cleanup_userptr(dev_priv);
+
+ i915_gem_drain_freed_objects(dev_priv);
+
+ WARN_ON(!list_empty(&dev_priv->contexts.list));
+}
+
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
i915_gem_sanitize(i915);
@@ -5694,16 +5795,17 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
return 0;
}
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+int i915_gem_freeze_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
struct list_head *phases[] = {
- &dev_priv->mm.unbound_list,
- &dev_priv->mm.bound_list,
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
NULL
- }, **p;
+ }, **phase;
- /* Called just before we write the hibernation image.
+ /*
+ * Called just before we write the hibernation image.
*
* We need to update the domain tracking to reflect that the CPU
* will be accessing all the pages to create and restore from the
@@ -5717,15 +5819,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
* the objects as well, see i915_gem_freeze()
*/
- i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
- i915_gem_drain_freed_objects(dev_priv);
+ i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
+ i915_gem_drain_freed_objects(i915);
- spin_lock(&dev_priv->mm.obj_lock);
- for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, mm.link)
- __start_cpu_write(obj);
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
}
- spin_unlock(&dev_priv->mm.obj_lock);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
@@ -6045,16 +6147,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- pages = fetch_and_zero(&obj->mm.pages);
- if (pages) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- __i915_gem_object_reset_page_iter(obj);
-
- spin_lock(&i915->mm.obj_lock);
- list_del(&obj->mm.link);
- spin_unlock(&i915->mm.obj_lock);
- }
+ pages = __i915_gem_object_unset_pages(obj);
obj->ops = &i915_gem_phys_ops;
@@ -6072,7 +6165,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
err_xfer:
obj->ops = &i915_gem_object_ops;
- obj->mm.pages = pages;
+ if (!IS_ERR_OR_NULL(pages)) {
+ unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+ }
err_unlock:
mutex_unlock(&obj->mm.lock);
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 525920404ede..e46592956872 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,6 +26,7 @@
#define __I915_GEM_H__
#include <linux/bug.h>
+#include <linux/interrupt.h>
struct drm_i915_private;
@@ -62,9 +63,12 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP_ON(expr) \
+ do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0)
+#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
#define I915_NUM_ENGINES 8
@@ -72,4 +76,21 @@ struct drm_i915_private;
void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915);
+static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_inc_return(&t->count) == 1)
+ tasklet_unlock_wait(t);
+}
+
+static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_dec_return(&t->count) == 0)
+ tasklet_kill(t);
+}
+
+static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
+{
+ return !atomic_read(&t->count);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 060335d3d9e0..b10770cfccd2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
- if (!ce->state)
- continue;
-
- WARN_ON(ce->pin_count);
- if (ce->ring)
- intel_ring_free(ce->ring);
-
- __i915_gem_object_release_unless_active(ce->state->obj);
+ if (ce->ops)
+ ce->ops->destroy(ce);
}
kfree(ctx->name);
@@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
if (ctx->ppgtt)
- i915_ppgtt_close(&ctx->ppgtt->base);
+ i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
@@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
int ret;
unsigned int max;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
max = GEN11_MAX_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
+ } else {
+ /*
+ * When using GuC in proxy submission, GuC consumes the
+ * highest bit in the context id to indicate proxy submission.
+ */
+ if (USES_GUC_SUBMISSION(dev_priv))
+ max = MAX_GUC_CONTEXT_HW_ID;
+ else
+ max = MAX_CONTEXT_HW_ID;
+ }
+
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, max, GFP_KERNEL);
@@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
+ unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv;
ctx->sched.priority = I915_PRIORITY_NORMAL;
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
+
+ ce->gem_context = ctx;
+ }
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
@@ -364,7 +374,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (USES_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
+ ppgtt = i915_ppgtt_create(dev_priv, file_priv);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -502,8 +512,8 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
- dev_priv->engine[RCS]->context_size ? "logical" :
- "fake");
+ DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+ "logical" : "fake");
return 0;
}
@@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv, id) {
- engine->legacy_active_context = NULL;
- engine->legacy_active_ppgtt = NULL;
-
- if (!engine->last_retired_context)
- continue;
-
- intel_context_unpin(engine->last_retired_context, engine);
- engine->last_retired_context = NULL;
- }
+ for_each_engine(engine, dev_priv, id)
+ intel_engine_lost_context(engine);
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
@@ -583,68 +585,122 @@ last_request_on_engine(struct i915_timeline *timeline,
{
struct i915_request *rq;
- if (timeline == &engine->timeline)
- return NULL;
+ GEM_BUG_ON(timeline == &engine->timeline);
rq = i915_gem_active_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex);
- if (rq && rq->engine == engine)
+ if (rq && rq->engine == engine) {
+ GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
+ timeline->name, engine->name,
+ rq->fence.context, rq->fence.seqno);
+ GEM_BUG_ON(rq->timeline != timeline);
return rq;
+ }
return NULL;
}
-static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
{
- struct i915_timeline *timeline;
+ struct drm_i915_private *i915 = engine->i915;
+ const struct intel_context * const ce =
+ to_intel_context(i915->kernel_context, engine);
+ struct i915_timeline *barrier = ce->ring->timeline;
+ struct intel_ring *ring;
+ bool any_active = false;
- list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
- if (last_request_on_engine(timeline, engine))
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
+ struct i915_request *rq;
+
+ rq = last_request_on_engine(ring->timeline, engine);
+ if (!rq)
+ continue;
+
+ any_active = true;
+
+ if (rq->hw_context == ce)
+ continue;
+
+ /*
+ * Was this request submitted after the previous
+ * switch-to-kernel-context?
+ */
+ if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
+ GEM_TRACE("%s needs barrier for %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
return false;
+ }
+
+ GEM_TRACE("%s has barrier after %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
}
- return intel_engine_has_kernel_context(engine);
+ /*
+ * If any other timeline was still active and behind the last barrier,
+ * then our last switch-to-kernel-context must still be queued and
+ * will run last (leaving the engine in the kernel context when it
+ * eventually idles).
+ */
+ if (any_active)
+ return true;
+
+ /* The engine is idle; check that it is idling in the kernel context. */
+ return engine->last_retired_context == ce;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
- struct i915_timeline *timeline;
enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
- i915_retire_requests(dev_priv);
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915->kernel_context);
+
+ i915_retire_requests(i915);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
+ struct intel_ring *ring;
struct i915_request *rq;
- if (engine_has_idle_kernel_context(engine))
+ GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
+ if (engine_has_kernel_context_barrier(engine))
continue;
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ GEM_TRACE("emit barrier on %s\n", engine->name);
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
/* Queue this switch after all other activity */
- list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *prev;
- prev = last_request_on_engine(timeline, engine);
- if (prev)
- i915_sw_fence_await_sw_fence_gfp(&rq->submit,
- &prev->submit,
- I915_FENCE_GFP);
+ prev = last_request_on_engine(ring->timeline, engine);
+ if (!prev)
+ continue;
+
+ if (prev->gem_context == i915->kernel_context)
+ continue;
+
+ GEM_TRACE("add barrier on %s for %llx:%d\n",
+ engine->name,
+ prev->fence.context,
+ prev->fence.seqno);
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ &prev->submit,
+ I915_FENCE_GFP);
+ i915_timeline_sync_set(rq->timeline, &prev->fence);
}
- /*
- * Force a flush after the switch to ensure that all rendering
- * and operations prior to switching to the kernel context hits
- * memory. This should be guaranteed by the previous request,
- * but an extra layer of paranoia before we declare the system
- * idle (on suspend etc) is advisable!
- */
- __i915_request_add(rq, true);
+ i915_request_add(rq);
}
return 0;
@@ -664,7 +720,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct i915_gem_context *ctx;
int ret;
- if (!dev_priv->engine[RCS]->context_size)
+ if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
return -ENODEV;
if (args->pad != 0)
@@ -747,11 +803,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
- args->value = ctx->ppgtt->base.total;
+ args->value = ctx->ppgtt->vm.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
+ args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else
- args->value = to_i915(dev)->ggtt.base.total;
+ args->value = to_i915(dev)->ggtt.vm.total;
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->value = i915_gem_context_no_error_capture(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ace3b129c189..b116e4942c10 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -30,6 +30,7 @@
#include <linux/radix-tree.h>
#include "i915_gem.h"
+#include "i915_scheduler.h"
struct pid;
@@ -45,6 +46,13 @@ struct intel_ring;
#define DEFAULT_CONTEXT_HANDLE 0
+struct intel_context;
+
+struct intel_context_ops {
+ void (*unpin)(struct intel_context *ce);
+ void (*destroy)(struct intel_context *ce);
+};
+
/**
* struct i915_gem_context - client state
*
@@ -144,11 +152,14 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */
struct intel_context {
+ struct i915_gem_context *gem_context;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
+
+ const struct intel_context_ops *ops;
} __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx,
return &ctx->__engine[engine->id];
}
-static inline struct intel_ring *
+static inline struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
return engine->context_pin(engine, ctx);
}
-static inline void __intel_context_pin(struct i915_gem_context *ctx,
- const struct intel_engine_cs *engine)
+static inline void __intel_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
GEM_BUG_ON(!ce->pin_count);
ce->pin_count++;
}
-static inline void intel_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static inline void intel_context_unpin(struct intel_context *ce)
{
- engine->context_unpin(engine, ctx);
+ GEM_BUG_ON(!ce->pin_count);
+ if (--ce->pin_count)
+ return;
+
+ GEM_BUG_ON(!ce->ops);
+ ce->ops->unpin(ce);
}
/* i915_gem_context.c */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 69a7aec49e84..82e2ca17a441 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
i915_gem_object_unpin_map(obj);
}
-static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = i915_gem_dmabuf_kmap,
- .map_atomic = i915_gem_dmabuf_kmap_atomic,
.unmap = i915_gem_dmabuf_kunmap,
- .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 54814a196ee4..02b83a5ed96c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -69,7 +69,8 @@ static int ggtt_flush(struct drm_i915_private *i915)
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 22df17c8ca9b..3f0c612d42e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -66,6 +66,15 @@ enum {
#define __I915_EXEC_ILLEGAL_FLAGS \
(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
+/* Catch emission of unexpected errors for CI! */
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+#undef EINVAL
+#define EINVAL ({ \
+ DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
+ 22; \
+})
+#endif
+
/**
* DOC: User command execution
*
@@ -534,7 +543,8 @@ eb_add_vma(struct i915_execbuffer *eb,
* paranoia do it everywhere.
*/
if (i == batch_idx) {
- if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+ if (entry->relocation_count &&
+ !(eb->flags[i] & EXEC_OBJECT_PINNED))
eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
@@ -723,7 +733,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->ctx = ctx;
- eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
+ eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -921,7 +931,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915);
- __i915_request_add(cache->rq, true);
+ i915_request_add(cache->rq);
cache->rq = NULL;
}
@@ -948,9 +958,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
- ggtt->base.clear_range(&ggtt->base,
- cache->node.start,
- cache->node.size);
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -1021,7 +1031,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
- (&ggtt->base.mm, &cache->node,
+ (&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -1042,9 +1052,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, page),
- offset, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
} else {
offset += page << PAGE_SHIFT;
}
@@ -1155,18 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_request;
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
- i915_vma_move_to_active(batch, rq, 0);
- reservation_object_lock(batch->resv, NULL);
- reservation_object_add_excl_fence(batch->resv, &rq->fence);
- reservation_object_unlock(batch->resv);
- i915_vma_unpin(batch);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
rq->batch = batch;
+ i915_vma_unpin(batch);
cache->rq = rq;
cache->rq_cmd = cmd;
@@ -1175,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
/* Return with batch mapping (cmd) still pinned */
return 0;
+skip_request:
+ i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1761,25 +1771,6 @@ slow:
return eb_relocate_slow(eb);
}
-static void eb_export_fence(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct reservation_object *resv = vma->resv;
-
- /*
- * Ignore errors from failing to allocate the new fence, we can't
- * handle an error right now. Worst case should be missed
- * synchronisation leading to rendering corruption.
- */
- reservation_object_lock(resv, NULL);
- if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &rq->fence);
- else if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &rq->fence);
- reservation_object_unlock(resv);
-}
-
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
@@ -1833,8 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
unsigned int flags = eb->flags[i];
struct i915_vma *vma = eb->vma[i];
- i915_vma_move_to_active(vma, eb->request, flags);
- eb_export_fence(vma, eb->request, flags);
+ err = i915_vma_move_to_active(vma, eb->request, flags);
+ if (unlikely(err)) {
+ i915_request_skip(eb->request, err);
+ return err;
+ }
__eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL;
@@ -1874,45 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
return true;
}
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- const unsigned int idx = rq->engine->id;
-
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
- /*
- * Add a reference if we're newly entering the active list.
- * The order in which we add operations to the retirement queue is
- * vital here: mark_active adds to the start of the callback list,
- * such that subsequent callbacks are called first. Therefore we
- * add the active reference first and queue for it to be dropped
- * *last*.
- */
- if (!i915_vma_is_active(vma))
- obj->active_count++;
- i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], rq);
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
-
- obj->write_domain = 0;
- if (flags & EXEC_OBJECT_WRITE) {
- obj->write_domain = I915_GEM_DOMAIN_RENDER;
-
- if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- i915_gem_active_set(&obj->frontbuffer_write, rq);
-
- obj->read_domains = 0;
- }
- obj->read_domains |= I915_GEM_GPU_DOMAINS;
-
- if (flags & EXEC_OBJECT_NEEDS_FENCE)
- i915_gem_active_set(&vma->last_fence, rq);
-}
-
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
{
u32 *cs;
@@ -2438,7 +2393,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
- __i915_request_add(eb.request, err == 0);
+ i915_request_add(eb.request);
add_to_client(eb.request, file);
if (fences)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 996ab2ad6c45..f00c7fbef79e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -42,7 +42,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
-#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
/**
* DOC: Global GTT views
@@ -195,18 +195,18 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
u32 unused)
{
u32 pte_flags;
- int ret;
+ int err;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
- vma->size);
- if (ret)
- return ret;
+ err = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start, vma->size);
+ if (err)
+ return err;
}
- /* Currently applicable only to VLV */
+ /* Applicable to VLV, and gen8+ */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@ -244,10 +244,13 @@ static void clear_pages(struct i915_vma *vma)
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ u32 flags)
{
- gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
- pte |= addr;
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
switch (level) {
case I915_CACHE_NONE:
@@ -375,37 +378,70 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
+static void stash_init(struct pagestash *stash)
+{
+ pagevec_init(&stash->pvec);
+ spin_lock_init(&stash->lock);
+}
+
+static struct page *stash_pop_page(struct pagestash *stash)
+{
+ struct page *page = NULL;
+
+ spin_lock(&stash->lock);
+ if (likely(stash->pvec.nr))
+ page = stash->pvec.pages[--stash->pvec.nr];
+ spin_unlock(&stash->lock);
+
+ return page;
+}
+
+static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
+{
+ int nr;
+
+ spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
+
+ nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
+ memcpy(stash->pvec.pages + stash->pvec.nr,
+ pvec->pages + pvec->nr - nr,
+ sizeof(pvec->pages[0]) * nr);
+ stash->pvec.nr += nr;
+
+ spin_unlock(&stash->lock);
+
+ pvec->nr -= nr;
+}
+
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct pagevec *pvec = &vm->free_pages;
- struct pagevec stash;
+ struct pagevec stack;
+ struct page *page;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- if (likely(pvec->nr))
- return pvec->pages[--pvec->nr];
+ page = stash_pop_page(&vm->free_pages);
+ if (page)
+ return page;
if (!vm->pt_kmap_wc)
return alloc_page(gfp);
- /* A placeholder for a specific mutex to guard the WC stash */
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
-
/* Look in our global stash of WC pages... */
- pvec = &vm->i915->mm.wc_stash;
- if (likely(pvec->nr))
- return pvec->pages[--pvec->nr];
+ page = stash_pop_page(&vm->i915->mm.wc_stash);
+ if (page)
+ return page;
/*
- * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
+ * Otherwise batch allocate pages to amortize cost of set_pages_wc.
*
* We have to be careful as page allocation may trigger the shrinker
* (via direct reclaim) which will fill up the WC stash underneath us.
* So we add our WB pages into a temporary pvec on the stack and merge
* them into the WC stash after all the allocations are complete.
*/
- pagevec_init(&stash);
+ pagevec_init(&stack);
do {
struct page *page;
@@ -413,59 +449,67 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
break;
- stash.pages[stash.nr++] = page;
- } while (stash.nr < pagevec_space(pvec));
+ stack.pages[stack.nr++] = page;
+ } while (pagevec_space(&stack));
- if (stash.nr) {
- int nr = min_t(int, stash.nr, pagevec_space(pvec));
- struct page **pages = stash.pages + stash.nr - nr;
+ if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
+ page = stack.pages[--stack.nr];
- if (nr && !set_pages_array_wc(pages, nr)) {
- memcpy(pvec->pages + pvec->nr,
- pages, sizeof(pages[0]) * nr);
- pvec->nr += nr;
- stash.nr -= nr;
- }
+ /* Merge spare WC pages to the global stash */
+ stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
- pagevec_release(&stash);
+ /* Push any surplus WC pages onto the local VM stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->free_pages, &stack);
}
- return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
+ /* Return unwanted leftovers */
+ if (unlikely(stack.nr)) {
+ WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
+ __pagevec_release(&stack);
+ }
+
+ return page;
}
static void vm_free_pages_release(struct i915_address_space *vm,
bool immediate)
{
- struct pagevec *pvec = &vm->free_pages;
+ struct pagevec *pvec = &vm->free_pages.pvec;
+ struct pagevec stack;
+ lockdep_assert_held(&vm->free_pages.lock);
GEM_BUG_ON(!pagevec_count(pvec));
if (vm->pt_kmap_wc) {
- struct pagevec *stash = &vm->i915->mm.wc_stash;
-
- /* When we use WC, first fill up the global stash and then
+ /*
+ * When we use WC, first fill up the global stash and then
* only if full immediately free the overflow.
*/
+ stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
- if (pagevec_space(stash)) {
- do {
- stash->pages[stash->nr++] =
- pvec->pages[--pvec->nr];
- if (!pvec->nr)
- return;
- } while (pagevec_space(stash));
-
- /* As we have made some room in the VM's free_pages,
- * we can wait for it to fill again. Unless we are
- * inside i915_address_space_fini() and must
- * immediately release the pages!
- */
- if (!immediate)
- return;
- }
+ /*
+ * As we have made some room in the VM's free_pages,
+ * we can wait for it to fill again. Unless we are
+ * inside i915_address_space_fini() and must
+ * immediately release the pages!
+ */
+ if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
+ return;
+
+ /*
+ * We have to drop the lock to allow ourselves to sleep,
+ * so take a copy of the pvec and clear the stash for
+ * others to use it as we sleep.
+ */
+ stack = *pvec;
+ pagevec_reinit(pvec);
+ spin_unlock(&vm->free_pages.lock);
+ pvec = &stack;
set_pages_array_wb(pvec->pages, pvec->nr);
+
+ spin_lock(&vm->free_pages.lock);
}
__pagevec_release(pvec);
@@ -481,20 +525,60 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
* unconditional might_sleep() for everybody.
*/
might_sleep();
- if (!pagevec_add(&vm->free_pages, page))
+ spin_lock(&vm->free_pages.lock);
+ if (!pagevec_add(&vm->free_pages.pvec, page))
vm_free_pages_release(vm, false);
+ spin_unlock(&vm->free_pages.lock);
+}
+
+static void i915_address_space_init(struct i915_address_space *vm,
+ struct drm_i915_private *dev_priv)
+{
+ /*
+ * The vm->mutex must be reclaim safe (for use in the shrinker).
+ * Do a dummy acquire now under fs_reclaim so that any allocation
+ * attempt holding the lock is immediately reported by lockdep.
+ */
+ mutex_init(&vm->mutex);
+ i915_gem_shrinker_taints_mutex(&vm->mutex);
+
+ GEM_BUG_ON(!vm->total);
+ drm_mm_init(&vm->mm, 0, vm->total);
+ vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+ stash_init(&vm->free_pages);
+
+ INIT_LIST_HEAD(&vm->active_list);
+ INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->unbound_list);
+}
+
+static void i915_address_space_fini(struct i915_address_space *vm)
+{
+ spin_lock(&vm->free_pages.lock);
+ if (pagevec_count(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, true);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+ spin_unlock(&vm->free_pages.lock);
+
+ drm_mm_takedown(&vm->mm);
+
+ mutex_destroy(&vm->mutex);
}
static int __setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
gfp_t gfp)
{
- p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
+ p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
if (unlikely(!p->page))
return -ENOMEM;
- p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ p->daddr = dma_map_page_attrs(vm->dma,
+ p->page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
vm_free_page(vm, p->page);
return -ENOMEM;
@@ -506,7 +590,7 @@ static int __setup_page_dma(struct i915_address_space *vm,
static int setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p)
{
- return __setup_page_dma(vm, p, I915_GFP_DMA);
+ return __setup_page_dma(vm, p, __GFP_HIGHMEM);
}
static void cleanup_page_dma(struct i915_address_space *vm,
@@ -520,8 +604,8 @@ static void cleanup_page_dma(struct i915_address_space *vm,
#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
+#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
+#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
static void fill_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
@@ -575,8 +659,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
goto skip;
- addr = dma_map_page(vm->dma, page, 0, size,
- PCI_DMA_BIDIRECTIONAL);
+ addr = dma_map_page_attrs(vm->dma,
+ page, 0, size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
if (unlikely(dma_mapping_error(vm->dma, addr)))
goto free_page;
@@ -614,7 +701,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
{
struct i915_page_table *pt;
- pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
+ pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
@@ -637,21 +724,20 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
fill_px(vm, pt,
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
}
-static void gen6_initialize_pt(struct i915_address_space *vm,
+static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
struct i915_page_table *pt)
{
- fill32_px(vm, pt,
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+ fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
+ pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
@@ -685,7 +771,7 @@ static int __pdp_init(struct i915_address_space *vm,
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
- GFP_KERNEL | __GFP_NOWARN);
+ I915_GFP_ALLOW_FAIL);
if (unlikely(!pdp->page_directory))
return -ENOMEM;
@@ -765,53 +851,6 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
-/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct i915_request *rq,
- unsigned entry,
- dma_addr_t addr)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- BUG_ON(entry >= 4);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
- *cs++ = upper_32_bits(addr);
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
- *cs++ = lower_32_bits(addr);
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- int i, ret;
-
- for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- ret = gen8_write_pdp(rq, i, pd_daddr);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
-}
-
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
@@ -819,7 +858,7 @@ static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -833,7 +872,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -1005,14 +1044,15 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp,
struct sgt_dma *iter,
struct gen8_insert_pte *idx,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
gen8_pte_t *vaddr;
bool ret;
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
do {
@@ -1043,7 +1083,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
break;
}
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
}
@@ -1059,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
- cache_level);
+ cache_level, flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
@@ -1074,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
struct i915_page_directory_pointer **pdps,
struct sgt_dma *iter,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
u64 start = vma->node.start;
dma_addr_t rem = iter->sg->length;
@@ -1192,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+ gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
+ flags);
} else {
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
- &iter, &idx, cache_level))
+ &iter, &idx, cache_level,
+ flags))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -1229,7 +1272,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
@@ -1272,7 +1315,7 @@ free_scratch_page:
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct drm_i915_private *dev_priv = vm->i915;
enum vgt_g2v_type msg;
int i;
@@ -1333,13 +1376,13 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
+ if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
continue;
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
}
- cleanup_px(&ppgtt->base, &ppgtt->pml4);
+ cleanup_px(&ppgtt->vm, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1353,7 +1396,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (use_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
gen8_free_scratch(vm);
}
@@ -1489,7 +1532,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
gen8_pte_t scratch_pte,
struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory *pd;
u32 pdpe;
@@ -1499,7 +1542,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u64 pd_start = start;
u32 pde;
- if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
+ if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
continue;
seq_printf(m, "\tPDPE #%d\n", pdpe);
@@ -1507,7 +1550,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u32 pte;
gen8_pte_t *pt_vaddr;
- if (pd->page_table[pde] == ppgtt->base.scratch_pt)
+ if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
continue;
pt_vaddr = kmap_atomic_px(pt);
@@ -1540,10 +1583,10 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
- u64 start = 0, length = ppgtt->base.total;
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
u64 pml4e;
@@ -1551,7 +1594,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
+ if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
continue;
seq_printf(m, " PML4E #%llu\n", pml4e);
@@ -1564,10 +1607,10 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
struct i915_page_directory *pd;
- u64 start = 0, length = ppgtt->base.total;
+ u64 start = 0, length = ppgtt->vm.total;
u64 from = start;
unsigned int pdpe;
@@ -1601,211 +1644,153 @@ unwind:
* space.
*
*/
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = vm->i915;
- int ret;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ppgtt->ref);
+
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
1ULL << 48 :
1ULL << 32;
+ /*
+ * From bdw, there is support for read-only pages in the PPGTT.
+ *
+ * XXX GVT is not honouring the lack of RW in the PTE bits.
+ */
+ ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+
+ i915_address_space_init(&ppgtt->vm, i915);
+
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
- ppgtt->base.pt_kmap_wc = true;
+ if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
+ ppgtt->vm.pt_kmap_wc = true;
- ret = gen8_init_scratch(&ppgtt->base);
- if (ret) {
- ppgtt->base.total = 0;
- return ret;
- }
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_free;
- if (use_4lvl(vm)) {
- ret = setup_px(&ppgtt->base, &ppgtt->pml4);
- if (ret)
- goto free_scratch;
+ if (use_4lvl(&ppgtt->vm)) {
+ err = setup_px(&ppgtt->vm, &ppgtt->pml4);
+ if (err)
+ goto err_scratch;
- gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+ gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
- ppgtt->switch_mm = gen8_mm_switch_4lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
- if (ret)
- goto free_scratch;
+ err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
+ if (err)
+ goto err_scratch;
- if (intel_vgpu_active(dev_priv)) {
- ret = gen8_preallocate_top_level_pdp(ppgtt);
- if (ret) {
+ if (intel_vgpu_active(i915)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err) {
__pdp_fini(&ppgtt->pdp);
- goto free_scratch;
+ goto err_scratch;
}
}
- ppgtt->switch_mm = gen8_mm_switch_3lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
}
- if (intel_vgpu_active(dev_priv))
+ if (intel_vgpu_active(i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
ppgtt->debug_dump = gen8_dump_ppgtt;
- return 0;
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
-free_scratch:
- gen8_free_scratch(&ppgtt->base);
- return ret;
+ return ppgtt;
+
+err_scratch:
+ gen8_free_scratch(&ppgtt->vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
}
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct i915_page_table *unused;
- gen6_pte_t scratch_pte;
- u32 pd_entry, pte, pde;
- u32 start = 0, length = ppgtt->base.total;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ struct i915_page_table *pt;
+ u32 pte, pde;
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
+ gen6_for_all_pdes(pt, &base->pd, pde) {
+ gen6_pte_t *vaddr;
+
+ if (pt == base->vm.scratch_pt)
+ continue;
+
+ if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+ u32 expected =
+ GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
+ GEN6_PDE_VALID;
+ u32 pd_entry = readl(ppgtt->pd_addr + pde);
+
+ if (pd_entry != expected)
+ seq_printf(m,
+ "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+ pde,
+ pd_entry,
+ expected);
+
+ seq_printf(m, "\tPDE: %x\n", pd_entry);
+ }
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
- u32 expected;
- gen6_pte_t *pt_vaddr;
- const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
- pd_entry = readl(ppgtt->pd_addr + pde);
- expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
-
- if (pd_entry != expected)
- seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
- pde,
- pd_entry,
- expected);
- seq_printf(m, "\tPDE: %x\n", pd_entry);
-
- pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
-
- for (pte = 0; pte < GEN6_PTES; pte+=4) {
- unsigned long va =
- (pde * PAGE_SIZE * GEN6_PTES) +
- (pte * PAGE_SIZE);
+ vaddr = kmap_atomic_px(base->pd.page_table[pde]);
+ for (pte = 0; pte < GEN6_PTES; pte += 4) {
int i;
- bool found = false;
+
for (i = 0; i < 4; i++)
- if (pt_vaddr[pte + i] != scratch_pte)
- found = true;
- if (!found)
+ if (vaddr[pte + i] != scratch_pte)
+ break;
+ if (i == 4)
continue;
- seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+ seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+ pde, pte,
+ (pde * GEN6_PTES + pte) * PAGE_SIZE);
for (i = 0; i < 4; i++) {
- if (pt_vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %08x", pt_vaddr[pte + i]);
+ if (vaddr[pte + i] != scratch_pte)
+ seq_printf(m, " %08x", vaddr[pte + i]);
else
- seq_puts(m, " SCRATCH ");
+ seq_puts(m, " SCRATCH");
}
seq_puts(m, "\n");
}
- kunmap_atomic(pt_vaddr);
+ kunmap_atomic(vaddr);
}
}
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
+static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
const unsigned int pde,
const struct i915_page_table *pt)
{
/* Caller needs to make sure the write completes if necessary */
- writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
- ppgtt->pd_addr + pde);
-}
-
-/* Write all the page tables found in the ppgtt structure to incrementing page
- * directories. */
-static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
- u32 start, u32 length)
-{
- struct i915_page_table *pt;
- unsigned int pde;
-
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
- gen6_write_pde(ppgtt, pde, pt);
-
- mark_tlbs_dirty(ppgtt);
- wmb();
-}
-
-static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
-{
- GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
- return ppgtt->pd.base.ggtt_offset << 10;
-}
-
-static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- struct drm_i915_private *dev_priv = rq->i915;
-
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- return 0;
+ iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+ ppgtt->pd_addr + pde);
}
static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
@@ -1867,22 +1852,30 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
unsigned int first_entry = start >> PAGE_SHIFT;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length >> PAGE_SHIFT;
- gen6_pte_t scratch_pte =
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
while (num_entries) {
- struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
- unsigned int end = min(pte + num_entries, GEN6_PTES);
+ struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
+ const unsigned int end = min(pte + num_entries, GEN6_PTES);
+ const unsigned int count = end - pte;
gen6_pte_t *vaddr;
- num_entries -= end - pte;
+ GEM_BUG_ON(pt == vm->scratch_pt);
+
+ num_entries -= count;
+
+ GEM_BUG_ON(count > pt->used_ptes);
+ pt->used_ptes -= count;
+ if (!pt->used_ptes)
+ ppgtt->scan_for_unused_pt = true;
- /* Note that the hw doesn't support removing PDE on the fly
+ /*
+ * Note that the hw doesn't support removing PDE on the fly
* (they are cached inside the context with no means to
* invalidate the cache), so we can only reset the PTE
* entries back to scratch.
@@ -1911,6 +1904,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
+ GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
+
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -1939,218 +1934,280 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
static int gen6_alloc_va_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_table *pt;
u64 from = start;
unsigned int pde;
bool flush = false;
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
+ gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
+ const unsigned int count = gen6_pte_count(start, length);
+
if (pt == vm->scratch_pt) {
pt = alloc_pt(vm);
if (IS_ERR(pt))
goto unwind_out;
- gen6_initialize_pt(vm, pt);
- ppgtt->pd.page_table[pde] = pt;
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
+ gen6_initialize_pt(ppgtt, pt);
+ ppgtt->base.pd.page_table[pde] = pt;
+
+ if (i915_vma_is_bound(ppgtt->vma,
+ I915_VMA_GLOBAL_BIND)) {
+ gen6_write_pde(ppgtt, pde, pt);
+ flush = true;
+ }
+
+ GEM_BUG_ON(pt->used_ptes);
}
+
+ pt->used_ptes += count;
}
if (flush) {
- mark_tlbs_dirty(ppgtt);
- wmb();
+ mark_tlbs_dirty(&ppgtt->base);
+ gen6_ggtt_invalidate(ppgtt->base.vm.i915);
}
return 0;
unwind_out:
- gen6_ppgtt_clear_range(vm, from, start);
+ gen6_ppgtt_clear_range(vm, from, start - from);
return -ENOMEM;
}
-static int gen6_init_scratch(struct i915_address_space *vm)
+static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
{
+ struct i915_address_space * const vm = &ppgtt->base.vm;
+ struct i915_page_table *unused;
+ u32 pde;
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
+ ppgtt->scratch_pte =
+ vm->pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_NONE, PTE_READ_ONLY);
+
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
cleanup_scratch_page(vm);
return PTR_ERR(vm->scratch_pt);
}
- gen6_initialize_pt(vm, vm->scratch_pt);
+ gen6_initialize_pt(ppgtt, vm->scratch_pt);
+ gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
+ ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
return 0;
}
-static void gen6_free_scratch(struct i915_address_space *vm)
+static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
{
free_pt(vm, vm->scratch_pt);
cleanup_scratch_page(vm);
}
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory *pd = &ppgtt->pd;
struct i915_page_table *pt;
u32 pde;
- drm_mm_remove_node(&ppgtt->node);
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ if (pt != ppgtt->base.vm.scratch_pt)
+ free_pt(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- gen6_for_all_pdes(pt, pd, pde)
- if (pt != vm->scratch_pt)
- free_pt(vm, pt);
+ i915_vma_destroy(ppgtt->vma);
- gen6_free_scratch(vm);
+ gen6_ppgtt_free_pd(ppgtt);
+ gen6_ppgtt_free_scratch(vm);
}
-static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+static int pd_vma_set_pages(struct i915_vma *vma)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
+ vma->pages = ERR_PTR(-ENODEV);
+ return 0;
+}
- /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
- * allocator works in address space sizes, so it's multiplied by page
- * size. We allocate at the top of the GTT to avoid fragmentation.
- */
- BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
- ret = gen6_init_scratch(vm);
- if (ret)
- return ret;
+ vma->pages = NULL;
+}
- ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
- GEN6_PD_SIZE, GEN6_PD_ALIGN,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->base.total,
- PIN_HIGH);
- if (ret)
- goto err_out;
+static int pd_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct gen6_hw_ppgtt *ppgtt = vma->private;
+ u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
+ struct i915_page_table *pt;
+ unsigned int pde;
- if (ppgtt->node.start < ggtt->mappable_end)
- DRM_DEBUG("Forced to use aperture for PDEs\n");
+ ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
- ppgtt->pd.base.ggtt_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ gen6_write_pde(ppgtt, pde, pt);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
- ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
+ mark_tlbs_dirty(&ppgtt->base);
+ gen6_ggtt_invalidate(ppgtt->base.vm.i915);
return 0;
-
-err_out:
- gen6_free_scratch(vm);
- return ret;
}
-static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+static void pd_vma_unbind(struct i915_vma *vma)
{
- return gen6_ppgtt_allocate_page_directories(ppgtt);
-}
+ struct gen6_hw_ppgtt *ppgtt = vma->private;
+ struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
+ struct i915_page_table *pt;
+ unsigned int pde;
-static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
- u64 start, u64 length)
-{
- struct i915_page_table *unused;
- u32 pde;
+ if (!ppgtt->scan_for_unused_pt)
+ return;
+
+ /* Free all no longer used page tables */
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
+ if (pt->used_ptes || pt == scratch_pt)
+ continue;
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
- ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
+ free_pt(&ppgtt->base.vm, pt);
+ ppgtt->base.pd.page_table[pde] = scratch_pt;
+ }
+
+ ppgtt->scan_for_unused_pt = false;
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
-{
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
+static const struct i915_vma_ops pd_vma_ops = {
+ .set_pages = pd_vma_set_pages,
+ .clear_pages = pd_vma_clear_pages,
+ .bind_vma = pd_vma_bind,
+ .unbind_vma = pd_vma_unbind,
+};
- ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
- ppgtt->switch_mm = gen6_mm_switch;
- else if (IS_HASWELL(dev_priv))
- ppgtt->switch_mm = hsw_mm_switch;
- else if (IS_GEN7(dev_priv))
- ppgtt->switch_mm = gen7_mm_switch;
- else
- BUG();
+static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
+{
+ struct drm_i915_private *i915 = ppgtt->base.vm.i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_vma *vma;
- ret = gen6_ppgtt_alloc(ppgtt);
- if (ret)
- return ret;
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(size > ggtt->vm.total);
- ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+ vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
- gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+ init_request_active(&vma->last_fence, NULL);
- ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
- if (ret) {
- gen6_ppgtt_cleanup(&ppgtt->base);
- return ret;
- }
+ vma->vm = &ggtt->vm;
+ vma->ops = &pd_vma_ops;
+ vma->private = ppgtt;
- ppgtt->base.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
- ppgtt->base.cleanup = gen6_ppgtt_cleanup;
- ppgtt->debug_dump = gen6_dump_ppgtt;
+ vma->active = RB_ROOT;
- DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
- ppgtt->node.size >> 20,
- ppgtt->node.start / PAGE_SIZE);
+ vma->size = size;
+ vma->fence_size = size;
+ vma->flags = I915_VMA_GGTT;
+ vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
- DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
- ppgtt->pd.base.ggtt_offset << 10);
+ INIT_LIST_HEAD(&vma->obj_link);
+ list_add(&vma->vm_link, &vma->vm->unbound_list);
- return 0;
+ return vma;
}
-static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_private *dev_priv)
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
- ppgtt->base.i915 = dev_priv;
- ppgtt->base.dma = &dev_priv->drm.pdev->dev;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- if (INTEL_GEN(dev_priv) < 8)
- return gen6_ppgtt_init(ppgtt);
- else
- return gen8_ppgtt_init(ppgtt);
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (ppgtt->pin_count++)
+ return 0;
+
+ /*
+ * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ return i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
}
-static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv,
- const char *name)
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
{
- drm_mm_init(&vm->mm, 0, vm->total);
- vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- INIT_LIST_HEAD(&vm->active_list);
- INIT_LIST_HEAD(&vm->inactive_list);
- INIT_LIST_HEAD(&vm->unbound_list);
+ GEM_BUG_ON(!ppgtt->pin_count);
+ if (--ppgtt->pin_count)
+ return;
- list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages);
+ i915_vma_unpin(ppgtt->vma);
}
-static void i915_address_space_fini(struct i915_address_space *vm)
+static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
- if (pagevec_count(&vm->free_pages))
- vm_free_pages_release(vm, true);
+ struct i915_ggtt * const ggtt = &i915->ggtt;
+ struct gen6_hw_ppgtt *ppgtt;
+ int err;
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ppgtt->base.ref);
+
+ ppgtt->base.vm.i915 = i915;
+ ppgtt->base.vm.dma = &i915->drm.pdev->dev;
+
+ ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+
+ i915_address_space_init(&ppgtt->base.vm, i915);
+
+ ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.debug_dump = gen6_dump_ppgtt;
+
+ ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
+
+ ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+ err = gen6_ppgtt_init_scratch(ppgtt);
+ if (err)
+ goto err_free;
+
+ ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ goto err_scratch;
+ }
+
+ return &ppgtt->base;
+
+err_scratch:
+ gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
}
static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
@@ -2212,29 +2269,28 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
}
+static struct i915_hw_ppgtt *
+__hw_ppgtt_create(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) < 8)
+ return gen6_ppgtt_create(i915);
+ else
+ return gen8_ppgtt_create(i915);
+}
+
struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv,
- const char *name)
+i915_ppgtt_create(struct drm_i915_private *i915,
+ struct drm_i915_file_private *fpriv)
{
struct i915_hw_ppgtt *ppgtt;
- int ret;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
- ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret) {
- kfree(ppgtt);
- return ERR_PTR(ret);
- }
+ ppgtt = __hw_ppgtt_create(i915);
+ if (IS_ERR(ppgtt))
+ return ppgtt;
- kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv, name);
- ppgtt->base.file = fpriv;
+ ppgtt->vm.file = fpriv;
- trace_i915_ppgtt_create(&ppgtt->base);
+ trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
@@ -2268,16 +2324,16 @@ void i915_ppgtt_release(struct kref *kref)
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
- trace_i915_ppgtt_release(&ppgtt->base);
+ trace_i915_ppgtt_release(&ppgtt->vm);
- ppgtt_destroy_vma(&ppgtt->base);
+ ppgtt_destroy_vma(&ppgtt->vm);
- GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
- ppgtt->base.cleanup(&ppgtt->base);
- i915_address_space_fini(&ppgtt->base);
+ ppgtt->vm.cleanup(&ppgtt->vm);
+ i915_address_space_fini(&ppgtt->vm);
kfree(ppgtt);
}
@@ -2373,7 +2429,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
i915_ggtt_invalidate(dev_priv);
}
@@ -2419,7 +2475,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
- gen8_set_pte(pte, gen8_pte_encode(addr, level));
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
ggtt->invalidate(vm->i915);
}
@@ -2427,14 +2483,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
dma_addr_t addr;
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start >> PAGE_SHIFT;
for_each_sgt_dma(addr, sgt_iter, vma->pages)
@@ -2500,7 +2561,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2561,13 +2622,14 @@ struct insert_entries {
struct i915_address_space *vm;
struct i915_vma *vma;
enum i915_cache_level level;
+ u32 flags;
};
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
+ gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
@@ -2576,9 +2638,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
- struct insert_entries arg = { vm, vma, level };
+ struct insert_entries arg = { vm, vma, level, flags };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
@@ -2669,9 +2731,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
- /* Currently applicable only to VLV */
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
- if (obj->gt_ro)
+ if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
@@ -2709,23 +2771,22 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
/* Currently applicable only to VLV */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
- if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
- appgtt->base.allocate_va_range) {
- ret = appgtt->base.allocate_va_range(&appgtt->base,
- vma->node.start,
- vma->size);
+ if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ ret = appgtt->vm.allocate_va_range(&appgtt->vm,
+ vma->node.start,
+ vma->size);
if (ret)
return ret;
}
- appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
- pte_flags);
+ appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
+ pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2748,7 +2809,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
- struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
+ struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
vm->clear_range(vm, vma->node.start, vma->size);
}
@@ -2762,7 +2823,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, 0)) {
+ if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -2811,34 +2872,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
struct i915_hw_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
+ ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
- if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
+ if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
err = -ENODEV;
goto err_ppgtt;
}
- if (ppgtt->base.allocate_va_range) {
- /* Note we only pre-allocate as far as the end of the global
- * GTT. On 48b / 4-level page-tables, the difference is very,
- * very significant! We have to preallocate as GVT/vgpu does
- * not like the page directory disappearing.
- */
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- 0, ggtt->base.total);
- if (err)
- goto err_ppgtt;
- }
+ /*
+ * Note we only pre-allocate as far as the end of the global
+ * GTT. On 48b / 4-level page-tables, the difference is very,
+ * very significant! We have to preallocate as GVT/vgpu does
+ * not like the page directory disappearing.
+ */
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+ if (err)
+ goto err_ppgtt;
i915->mm.aliasing_ppgtt = ppgtt;
- GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
- ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+ ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
- GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
- ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+ ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
return 0;
@@ -2858,8 +2917,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
i915_ppgtt_put(ppgtt);
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
}
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2883,7 +2942,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -2891,16 +2950,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start);
+ ggtt->vm.clear_range(&ggtt->vm, hole_start,
+ hole_end - hole_start);
}
/* And finally clear the reserved guard page */
- ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
@@ -2925,30 +2983,26 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
struct i915_vma *vma, *vn;
struct pagevec *pvec;
- ggtt->base.closed = true;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
- list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
- WARN_ON(i915_vma_unbind(vma));
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- i915_gem_cleanup_stolen(&dev_priv->drm);
+ ggtt->vm.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_fini_aliasing_ppgtt(dev_priv);
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
+ WARN_ON(i915_vma_unbind(vma));
+
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
- if (drm_mm_initialized(&ggtt->base.mm)) {
+ if (drm_mm_initialized(&ggtt->vm.mm)) {
intel_vgt_deballoon(dev_priv);
- i915_address_space_fini(&ggtt->base);
+ i915_address_space_fini(&ggtt->vm);
}
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
- pvec = &dev_priv->mm.wc_stash;
+ pvec = &dev_priv->mm.wc_stash.pvec;
if (pvec->nr) {
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
@@ -2958,6 +3012,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
arch_phys_wc_del(ggtt->mtrr);
io_mapping_fini(&ggtt->iomap);
+
+ i915_gem_cleanup_stolen(&dev_priv->drm);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2996,7 +3052,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
phys_addr_t phys_addr;
int ret;
@@ -3020,7 +3076,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
+ ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -3326,7 +3382,7 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3350,29 +3406,30 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- ggtt->base.cleanup = gen6_gmch_remove;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.insert_page = gen8_ggtt_insert_page;
- ggtt->base.clear_range = nop_clear_range;
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->base.clear_range = gen8_ggtt_clear_range;
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
- ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->base.clear_range != nop_clear_range)
- ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->vm.clear_range != nop_clear_range)
+ ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
}
ggtt->invalidate = gen6_ggtt_invalidate;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
setup_private_pat(dev_priv);
return ggtt_probe_common(ggtt, size);
@@ -3380,7 +3437,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3407,29 +3464,30 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
- ggtt->base.clear_range = gen6_ggtt_clear_range;
- ggtt->base.insert_page = gen6_ggtt_insert_page;
- ggtt->base.insert_entries = gen6_ggtt_insert_entries;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = gen6_gmch_remove;
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
if (HAS_EDRAM(dev_priv))
- ggtt->base.pte_encode = iris_pte_encode;
+ ggtt->vm.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev_priv))
- ggtt->base.pte_encode = hsw_pte_encode;
+ ggtt->vm.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev_priv))
- ggtt->base.pte_encode = byt_pte_encode;
+ ggtt->vm.pte_encode = byt_pte_encode;
else if (INTEL_GEN(dev_priv) >= 7)
- ggtt->base.pte_encode = ivb_pte_encode;
+ ggtt->vm.pte_encode = ivb_pte_encode;
else
- ggtt->base.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
return ggtt_probe_common(ggtt, size);
}
@@ -3441,7 +3499,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
phys_addr_t gmadr_base;
int ret;
@@ -3451,26 +3509,25 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
return -EIO;
}
- intel_gtt_get(&ggtt->base.total,
- &gmadr_base,
- &ggtt->mappable_end);
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
ggtt->gmadr =
(struct resource) DEFINE_RES_MEM(gmadr_base,
ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
- ggtt->base.insert_page = i915_ggtt_insert_page;
- ggtt->base.insert_entries = i915_ggtt_insert_entries;
- ggtt->base.clear_range = i915_ggtt_clear_range;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = i915_gmch_remove;
+ ggtt->vm.insert_page = i915_ggtt_insert_page;
+ ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+ ggtt->vm.clear_range = i915_ggtt_clear_range;
+ ggtt->vm.cleanup = i915_gmch_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3486,8 +3543,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ggtt->base.i915 = dev_priv;
- ggtt->base.dma = &dev_priv->drm.pdev->dev;
+ ggtt->vm.i915 = dev_priv;
+ ggtt->vm.dma = &dev_priv->drm.pdev->dev;
if (INTEL_GEN(dev_priv) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -3504,27 +3561,29 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* restriction!
*/
if (USES_GUC(dev_priv)) {
- ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if ((ggtt->base.total - 1) >> 32) {
+ if ((ggtt->vm.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
- ggtt->base.total >> 20);
- ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total >> 20);
+ ggtt->vm.total = 1ULL << 32;
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if (ggtt->mappable_end > ggtt->base.total) {
+ if (ggtt->mappable_end > ggtt->vm.total) {
DRM_ERROR("mappable aperture extends past end of GGTT,"
" aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->base.total);
- ggtt->mappable_end = ggtt->base.total;
+ &ggtt->mappable_end, ggtt->vm.total);
+ ggtt->mappable_end = ggtt->vm.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("DSM size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
@@ -3543,7 +3602,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- INIT_LIST_HEAD(&dev_priv->vm_list);
+ stash_init(&dev_priv->mm.wc_stash);
/* Note that we use page colouring to enforce a guard page at the
* end of the address space. This is required as the CS may prefetch
@@ -3551,9 +3610,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_address_space_init(&ggtt->base, dev_priv, "[global]");
+ i915_address_space_init(&ggtt->vm, dev_priv);
+
+ /* Only VLV supports read-only GGTT mappings */
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+
if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
- ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+ ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
@@ -3576,7 +3639,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
out_gtt_cleanup:
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
return ret;
}
@@ -3610,34 +3673,35 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *obj, *on;
+ struct i915_vma *vma, *vn;
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+ ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
- bool ggtt_bound = false;
- struct i915_vma *vma;
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
- for_each_ggtt_vma(vma, obj) {
- if (!i915_vma_unbind(vma))
- continue;
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+ continue;
- WARN_ON(i915_vma_bind(vma, obj->cache_level,
- PIN_UPDATE));
- ggtt_bound = true;
- }
+ if (!i915_vma_unbind(vma))
+ continue;
- if (ggtt_bound)
+ WARN_ON(i915_vma_bind(vma,
+ obj ? obj->cache_level : 0,
+ PIN_UPDATE));
+ if (obj)
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
- ggtt->base.closed = false;
+ ggtt->vm.closed = false;
+ i915_ggtt_invalidate(dev_priv);
if (INTEL_GEN(dev_priv) >= 8) {
struct intel_ppat *ppat = &dev_priv->ppat;
@@ -3646,23 +3710,6 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
dev_priv->ppat.update_hw(dev_priv);
return;
}
-
- if (USES_PPGTT(dev_priv)) {
- struct i915_address_space *vm;
-
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- struct i915_hw_ppgtt *ppgtt;
-
- if (i915_is_ggtt(vm))
- ppgtt = dev_priv->mm.aliasing_ppgtt;
- else
- ppgtt = i915_vm_to_ppgtt(vm);
-
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
- }
- }
-
- i915_ggtt_invalidate(dev_priv);
}
static struct scatterlist *
@@ -3880,7 +3927,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -3977,7 +4024,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
@@ -3988,7 +4035,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
mode = DRM_MM_INSERT_BEST;
if (flags & PIN_HIGH)
- mode = DRM_MM_INSERT_HIGH;
+ mode = DRM_MM_INSERT_HIGHEST;
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
@@ -4008,6 +4055,15 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
+ if (mode & DRM_MM_INSERT_ONCE) {
+ err = drm_mm_insert_node_in_range(&vm->mm, node,
+ size, alignment, color,
+ start, end,
+ DRM_MM_INSERT_BEST);
+ if (err != -ENOSPC)
+ return err;
+ }
+
if (flags & PIN_NOEVICT)
return -ENOSPC;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index aec4f73574f4..2a116a91420b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -58,6 +58,7 @@
struct drm_i915_file_private;
struct drm_i915_fence_reg;
+struct i915_vma;
typedef u32 gen6_pte_t;
typedef u64 gen8_pte_t;
@@ -65,7 +66,7 @@ typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t;
-#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -254,6 +255,26 @@ struct i915_pml4 {
struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
};
+struct i915_vma_ops {
+ /* Map an object into an address space with the given cache flags. */
+ int (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ /*
+ * Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page.
+ */
+ void (*unbind_vma)(struct i915_vma *vma);
+
+ int (*set_pages)(struct i915_vma *vma);
+ void (*clear_pages)(struct i915_vma *vma);
+};
+
+struct pagestash {
+ spinlock_t lock;
+ struct pagevec pvec;
+};
+
struct i915_address_space {
struct drm_mm mm;
struct drm_i915_private *i915;
@@ -267,12 +288,13 @@ struct i915_address_space {
* assign blame.
*/
struct drm_i915_file_private *file;
- struct list_head global_link;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
bool closed;
+ struct mutex mutex; /* protects vma and our lists */
+
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -308,8 +330,13 @@ struct i915_address_space {
*/
struct list_head unbound_list;
- struct pagevec free_pages;
- bool pt_kmap_wc;
+ struct pagestash free_pages;
+
+ /* Some systems require uncached updates of the page directories */
+ bool pt_kmap_wc:1;
+
+ /* Some systems support read-only mappings for GGTT and/or PPGTT */
+ bool has_read_only:1;
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
@@ -331,15 +358,8 @@ struct i915_address_space {
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
- /** Unmap an object from an address space. This usually consists of
- * setting the valid PTE entries to a reserved scratch page. */
- void (*unbind_vma)(struct i915_vma *vma);
- /* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
+
+ struct i915_vma_ops vma_ops;
I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
I915_SELFTEST_DECLARE(bool scrub_64K);
@@ -367,7 +387,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
* the spec.
*/
struct i915_ggtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct io_mapping iomap; /* Mapping to our CPU mappable region */
struct resource gmadr; /* GMADR resource */
@@ -385,9 +405,9 @@ struct i915_ggtt {
};
struct i915_hw_ppgtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct kref ref;
- struct drm_mm_node node;
+
unsigned long pd_dirty_rings;
union {
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
@@ -395,13 +415,28 @@ struct i915_hw_ppgtt {
struct i915_page_directory pd; /* GEN6-7 */
};
+ void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
+struct gen6_hw_ppgtt {
+ struct i915_hw_ppgtt base;
+
+ struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
+ gen6_pte_t scratch_pte;
- int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq);
- void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+ unsigned int pin_count;
+ bool scan_for_unused_pt;
};
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
+
+static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
+{
+ BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base));
+ return __to_gen6_ppgtt(base);
+}
+
/*
* gen6_for_each_pde() iterates over every pde from start until start+length.
* If start and start+length are not perfectly divisible, the macro will round
@@ -440,8 +475,8 @@ static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
const u64 mask = ~((1ULL << pde_shift) - 1);
u64 end;
- WARN_ON(length == 0);
- WARN_ON(offset_in_page(addr|length));
+ GEM_BUG_ON(length == 0);
+ GEM_BUG_ON(offset_in_page(addr | length));
end = addr + length;
@@ -543,7 +578,7 @@ static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
+ return container_of(vm, struct i915_ggtt, vm);
}
#define INTEL_MAX_PPAT_ENTRIES 8
@@ -591,8 +626,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv,
- const char *name);
+ struct drm_i915_file_private *fpriv);
void i915_ppgtt_close(struct i915_address_space *vm);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
@@ -605,6 +639,9 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 54f00b350779..83e5e01fa9ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -141,7 +141,6 @@ struct drm_i915_gem_object {
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
*/
- unsigned long gt_ro:1;
unsigned int cache_level:3;
unsigned int cache_coherent:2;
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
@@ -268,7 +267,6 @@ struct drm_i915_gem_object {
union {
struct i915_gem_userptr {
uintptr_t ptr;
- unsigned read_only :1;
struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object;
@@ -337,26 +335,17 @@ __attribute__((nonnull))
static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object *obj)
{
- drm_gem_object_reference(&obj->base);
+ drm_gem_object_get(&obj->base);
return obj;
}
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
__attribute__((nonnull))
static inline void
i915_gem_object_put(struct drm_i915_gem_object *obj)
{
- __drm_gem_object_unreference(&obj->base);
+ __drm_gem_object_put(&obj->base);
}
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
reservation_object_lock(obj->resv, NULL);
@@ -367,6 +356,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
reservation_object_unlock(obj->resv);
}
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+ obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+ return obj->base.vma_node.readonly;
+}
+
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 1036e8686916..90baf9086d0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
- so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+ so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
@@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
goto err_unpin;
}
- i915_vma_move_to_active(so.vma, rq, 0);
+ err = i915_vma_move_to_active(so.vma, rq, 0);
err_unpin:
i915_vma_unpin(so.vma);
err_vma:
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 5757fb7c4b5a..ea90d3a0d511 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -23,6 +23,7 @@
*/
#include <linux/oom.h>
+#include <linux/sched/mm.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -172,7 +173,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
* we will free as much as we can and hope to get a second chance.
*/
if (flags & I915_SHRINK_ACTIVE)
- i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
trace_i915_gem_shrink(i915, target, flags);
i915_retire_requests(i915);
@@ -392,7 +395,8 @@ shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
- if (i915_gem_wait_for_idle(i915, 0) == 0 &&
+ if (i915_gem_wait_for_idle(i915,
+ 0, MAX_SCHEDULE_TIMEOUT) == 0 &&
shrinker_lock(i915, unlock))
break;
@@ -466,7 +470,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -480,7 +486,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
- &i915->ggtt.base.inactive_list, vm_link) {
+ &i915->ggtt.vm.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
@@ -526,3 +532,14 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
unregister_shrinker(&i915->mm.shrinker);
}
+
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ mutex_lock(mutex);
+ mutex_unlock(mutex);
+ fs_reclaim_release(GFP_KERNEL);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ad949cc30928..53440bf87650 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -254,6 +254,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
default:
MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+ /* fall through */
case GEN7_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
break;
@@ -343,6 +344,35 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
+static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ resource_size_t *base,
+ resource_size_t *size)
+{
+ u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);
+
+ DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+
+ *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN8_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_2M:
+ *size = 2 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_4M:
+ *size = 4 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_8M:
+ *size = 8 * 1024 * 1024;
+ break;
+ default:
+ *size = 8 * 1024 * 1024;
+ MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
+ }
+}
+
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
resource_size_t reserved_base, stolen_top;
@@ -399,7 +429,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
gen7_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
- default:
+ case 8:
+ case 9:
+ case 10:
if (IS_LP(dev_priv))
chv_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
@@ -407,6 +439,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
bdw_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
+ case 11:
+ default:
+ icl_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
}
/*
@@ -642,7 +679,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret)
goto err;
- vma = i915_vma_instance(obj, &ggtt->base, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_pages;
@@ -653,7 +690,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
- ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
@@ -666,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 854bd51b9478..dcd6e230d16a 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
- if (!obj->userptr.read_only)
+ if (!i915_gem_object_is_readonly(obj))
flags |= FOLL_WRITE;
ret = -EFAULT;
@@ -643,7 +643,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (pvec) /* defer to worker if malloc fails */
pinned = __get_user_pages_fast(obj->userptr.ptr,
num_pages,
- !obj->userptr.read_only,
+ !i915_gem_object_is_readonly(obj),
pvec);
}
@@ -789,10 +789,15 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
- /* On almost all of the current hw, we cannot tell the GPU that a
- * page is readonly, so this is just a placeholder in the uAPI.
+ struct i915_hw_ppgtt *ppgtt;
+
+ /*
+ * On almost all of the older hw, we cannot tell the GPU that
+ * a page is readonly.
*/
- return -ENODEV;
+ ppgtt = dev_priv->kernel_context->ppgtt;
+ if (!ppgtt || !ppgtt->vm.has_read_only)
+ return -ENODEV;
}
obj = i915_gem_object_alloc(dev_priv);
@@ -806,7 +811,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
- obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
+ if (args->flags & I915_USERPTR_READ_ONLY)
+ i915_gem_object_set_readonly(obj);
/* And keep a pointer to the current->mm for resolving the user pages
* at binding. This means that we need to hook into the mmu_notifier
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index df234dc23274..f7f2aa71d8d9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -31,6 +31,7 @@
#include <linux/stop_machine.h>
#include <linux/zlib.h>
#include <drm/drm_print.h>
+#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -335,21 +336,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
struct drm_i915_error_buffer *err,
int count)
{
- int i;
-
err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x [ ",
+ err_printf(m, " %08x_%08x %8u %02x %02x %02x",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
- err->write_domain);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- err_printf(m, "%02x ", err->rseqno[i]);
-
- err_printf(m, "] %02x", err->wseqno);
+ err->write_domain,
+ err->wseqno);
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
@@ -522,35 +518,12 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static int
-ascii85_encode_len(int len)
-{
- return DIV_ROUND_UP(len, 4);
-}
-
-static bool
-ascii85_encode(u32 in, char *out)
-{
- int i;
-
- if (in == 0)
- return false;
-
- out[5] = '\0';
- for (i = 5; i--; ) {
- out[i] = '!' + in % 85;
- in /= 85;
- }
-
- return true;
-}
-
static void print_error_obj(struct drm_i915_error_state_buf *m,
struct intel_engine_cs *engine,
const char *name,
struct drm_i915_error_object *obj)
{
- char out[6];
+ char out[ASCII85_BUFSZ];
int page;
if (!obj)
@@ -572,12 +545,8 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
len -= obj->unused;
len = ascii85_encode_len(len);
- for (i = 0; i < len; i++) {
- if (ascii85_encode(obj->pages[page][i], out))
- err_puts(m, out);
- else
- err_puts(m, "z");
- }
+ for (i = 0; i < len; i++)
+ err_puts(m, ascii85_encode(obj->pages[page][i], out));
}
err_puts(m, "\n");
}
@@ -973,8 +942,7 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s;
int ret;
- ggtt->base.insert_page(&ggtt->base, dma, slot,
- I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
@@ -993,7 +961,7 @@ unwind:
out:
compress_fini(&compress, dst);
- ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
@@ -1022,13 +990,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- int i;
err->size = obj->base.size;
err->name = obj->base.name;
- for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
@@ -1051,6 +1016,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
int i = 0;
list_for_each_entry(vma, head, vm_link) {
+ if (!vma->obj)
+ continue;
+
if (pinned_only && !i915_vma_is_pinned(vma))
continue;
@@ -1287,9 +1255,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
- erq->context = request->ctx->hw_id;
+ struct i915_gem_context *ctx = request->gem_context;
+
+ erq->context = ctx->hw_id;
erq->sched_attr = request->sched.attr;
- erq->ban_score = atomic_read(&request->ctx->ban_score);
+ erq->ban_score = atomic_read(&ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
@@ -1297,7 +1267,7 @@ static void record_request(struct i915_request *request,
erq->tail = request->tail;
rcu_read_lock();
- erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
rcu_read_unlock();
}
@@ -1461,12 +1431,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
request = i915_gem_find_active_request(engine);
if (request) {
+ struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
- ee->vm = request->ctx->ppgtt ?
- &request->ctx->ppgtt->base : &ggtt->base;
+ ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
- record_context(&ee->context, request->ctx);
+ record_context(&ee->context, ctx);
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
@@ -1483,11 +1453,10 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->ctx =
i915_error_object_create(i915,
- to_intel_context(request->ctx,
- engine)->state);
+ request->hw_context->state);
error->simulated |=
- i915_gem_context_no_error_capture(request->ctx);
+ i915_gem_context_no_error_capture(ctx);
ee->rq_head = request->head;
ee->rq_post = request->postfix;
@@ -1563,17 +1532,17 @@ static void capture_active_buffers(struct i915_gpu_state *error)
static void capture_pinned_buffers(struct i915_gpu_state *error)
{
- struct i915_address_space *vm = &error->i915->ggtt.base;
+ struct i915_address_space *vm = &error->i915->ggtt.vm;
struct drm_i915_error_buffer *bo;
struct i915_vma *vma;
int count_inactive, count_active;
count_inactive = 0;
- list_for_each_entry(vma, &vm->active_list, vm_link)
+ list_for_each_entry(vma, &vm->inactive_list, vm_link)
count_inactive++;
count_active = 0;
- list_for_each_entry(vma, &vm->inactive_list, vm_link)
+ list_for_each_entry(vma, &vm->active_list, vm_link)
count_active++;
bo = NULL;
@@ -1667,7 +1636,16 @@ static void capture_reg_state(struct i915_gpu_state *error)
}
/* 4: Everything else */
- if (INTEL_GEN(dev_priv) >= 8) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ error->ier = I915_READ(GEN8_DE_MISC_IER);
+ error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
+ error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
+ error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
+ error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
+ error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
+ error->ngtier = 6;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index dac0f8c4c1cf..f893a4e8b783 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -58,7 +58,7 @@ struct i915_gpu_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
- u32 gtier[4], ngtier;
+ u32 gtier[6], ngtier;
u32 ccid;
u32 derrmr;
u32 forcewake;
@@ -177,7 +177,7 @@ struct i915_gpu_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 rseqno[I915_NUM_ENGINES], wseqno;
+ u32 wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c16cb025755e..90628a47ae17 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -115,6 +115,22 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};
+static const u32 hpd_gen11[HPD_NUM_PINS] = {
+ [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
+ [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
+ [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
+ [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+};
+
+static const u32 hpd_icp[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
+ [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
+ [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
+ [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
+ [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
+ [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
+};
+
/* IIR can theoretically queue up two events. Be paranoid. */
#define GEN8_IRQ_RESET_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
@@ -247,9 +263,9 @@ static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit);
-bool gen11_reset_one_iir(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int bit)
+static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+ const unsigned int bank,
+ const unsigned int bit)
{
void __iomem * const regs = i915->regs;
u32 dw;
@@ -1138,21 +1154,21 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
+ const u32 seqno = intel_engine_get_seqno(engine);
struct i915_request *rq = NULL;
+ struct task_struct *tsk = NULL;
struct intel_wait *wait;
- if (!engine->breadcrumbs.irq_armed)
+ if (unlikely(!engine->breadcrumbs.irq_armed))
return;
- atomic_inc(&engine->irq_count);
- set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ rcu_read_lock();
spin_lock(&engine->breadcrumbs.irq_lock);
wait = engine->breadcrumbs.irq_wait;
if (wait) {
- bool wakeup = engine->irq_seqno_barrier;
-
- /* We use a callback from the dma-fence to submit
+ /*
+ * We use a callback from the dma-fence to submit
* requests after waiting on our own requests. To
* ensure minimum delay in queuing the next request to
* hardware, signal the fence now rather than wait for
@@ -1163,19 +1179,26 @@ static void notify_ring(struct intel_engine_cs *engine)
* and to handle coalescing of multiple seqno updates
* and many waiters.
*/
- if (i915_seqno_passed(intel_engine_get_seqno(engine),
- wait->seqno)) {
+ if (i915_seqno_passed(seqno, wait->seqno)) {
struct i915_request *waiter = wait->request;
- wakeup = true;
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ if (waiter &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&waiter->fence.flags) &&
intel_wait_check_request(wait, waiter))
rq = i915_request_get(waiter);
+
+ tsk = wait->tsk;
+ } else {
+ if (engine->irq_seqno_barrier &&
+ i915_seqno_passed(seqno, wait->seqno - 1)) {
+ set_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted);
+ tsk = wait->tsk;
+ }
}
- if (wakeup)
- wake_up_process(wait->tsk);
+ engine->breadcrumbs.irq_count++;
} else {
if (engine->breadcrumbs.irq_armed)
__intel_engine_disarm_breadcrumbs(engine);
@@ -1183,11 +1206,19 @@ static void notify_ring(struct intel_engine_cs *engine)
spin_unlock(&engine->breadcrumbs.irq_lock);
if (rq) {
- dma_fence_signal(&rq->fence);
+ spin_lock(&rq->lock);
+ dma_fence_signal_locked(&rq->fence);
GEM_BUG_ON(!i915_request_completed(rq));
+ spin_unlock(&rq->lock);
+
i915_request_put(rq);
}
+ if (tsk && tsk->state & TASK_NORMAL)
+ wake_up_process(tsk);
+
+ rcu_read_unlock();
+
trace_intel_engine_notify(engine, wait);
}
@@ -1234,9 +1265,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
c0 = max(render, media);
c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
- if (c0 > time * rps->up_threshold)
+ if (c0 > time * rps->power.up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * rps->down_threshold)
+ else if (c0 < time * rps->power.down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
}
@@ -1462,14 +1493,10 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
static void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
bool tasklet = false;
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
- if (READ_ONCE(engine->execlists.active))
- tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted);
- }
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) {
notify_ring(engine);
@@ -1477,7 +1504,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
}
if (tasklet)
- tasklet_hi_schedule(&execlists->tasklet);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
@@ -1549,78 +1576,122 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
}
}
-static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
+static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_C:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_D:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_E:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_F:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
+ default:
+ return false;
+ }
+}
+
+static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
return val & PORTA_HOTPLUG_LONG_DETECT;
- case PORT_B:
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
+static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
+ return val & ICP_DDIA_HPD_LONG_DETECT;
+ case HPD_PORT_B:
+ return val & ICP_DDIB_HPD_LONG_DETECT;
+ default:
+ return false;
+ }
+}
+
+static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_C:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_D:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_E:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_F:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
+ default:
+ return false;
+ }
+}
+
+static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_E:
+ switch (pin) {
+ case HPD_PORT_E:
return val & PORTE_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool spt_port_hotplug_long_detect(enum port port, u32 val)
+static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_A:
return val & PORTA_HOTPLUG_LONG_DETECT;
- case PORT_B:
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
+static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_A:
return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool pch_port_hotplug_long_detect(enum port port, u32 val)
+static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_B:
+ switch (pin) {
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
+static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_B:
+ switch (pin) {
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_INT_LONG_PULSE;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_INT_LONG_PULSE;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_INT_LONG_PULSE;
default:
return false;
@@ -1638,27 +1709,22 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
- bool long_pulse_detect(enum port port, u32 val))
+ bool long_pulse_detect(enum hpd_pin pin, u32 val))
{
- enum port port;
- int i;
+ enum hpd_pin pin;
- for_each_hpd_pin(i) {
- if ((hpd[i] & hotplug_trigger) == 0)
+ for_each_hpd_pin(pin) {
+ if ((hpd[pin] & hotplug_trigger) == 0)
continue;
- *pin_mask |= BIT(i);
+ *pin_mask |= BIT(pin);
- port = intel_hpd_pin_to_port(dev_priv, i);
- if (port == PORT_NONE)
- continue;
-
- if (long_pulse_detect(port, dig_hotplug_reg))
- *long_mask |= BIT(i);
+ if (long_pulse_detect(pin, dig_hotplug_reg))
+ *long_mask |= BIT(pin);
}
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
@@ -1680,69 +1746,34 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
uint32_t crc4)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_pipe_crc_entry *entry;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct drm_driver *driver = dev_priv->drm.driver;
uint32_t crcs[5];
- int head, tail;
spin_lock(&pipe_crc->lock);
- if (pipe_crc->source && !crtc->base.crc.opened) {
- if (!pipe_crc->entries) {
- spin_unlock(&pipe_crc->lock);
- DRM_DEBUG_KMS("spurious interrupt\n");
- return;
- }
-
- head = pipe_crc->head;
- tail = pipe_crc->tail;
-
- if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
- spin_unlock(&pipe_crc->lock);
- DRM_ERROR("CRC buffer overflowing\n");
- return;
- }
-
- entry = &pipe_crc->entries[head];
-
- entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
- entry->crc[0] = crc0;
- entry->crc[1] = crc1;
- entry->crc[2] = crc2;
- entry->crc[3] = crc3;
- entry->crc[4] = crc4;
-
- head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- pipe_crc->head = head;
-
- spin_unlock(&pipe_crc->lock);
-
- wake_up_interruptible(&pipe_crc->wq);
- } else {
- /*
- * For some not yet identified reason, the first CRC is
- * bonkers. So let's just wait for the next vblank and read
- * out the buggy result.
- *
- * On GEN8+ sometimes the second CRC is bonkers as well, so
- * don't trust that one either.
- */
- if (pipe_crc->skipped <= 0 ||
- (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
- pipe_crc->skipped++;
- spin_unlock(&pipe_crc->lock);
- return;
- }
+ /*
+ * For some not yet identified reason, the first CRC is
+ * bonkers. So let's just wait for the next vblank and read
+ * out the buggy result.
+ *
+ * On GEN8+ sometimes the second CRC is bonkers as well, so
+ * don't trust that one either.
+ */
+ if (pipe_crc->skipped <= 0 ||
+ (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
- crcs[0] = crc0;
- crcs[1] = crc1;
- crcs[2] = crc2;
- crcs[3] = crc3;
- crcs[4] = crc4;
- drm_crtc_add_crc_entry(&crtc->base, true,
- drm_crtc_accurate_vblank_count(&crtc->base),
- crcs);
+ return;
}
+ spin_unlock(&pipe_crc->lock);
+
+ crcs[0] = crc0;
+ crcs[1] = crc1;
+ crcs[2] = crc2;
+ crcs[3] = crc3;
+ crcs[4] = crc4;
+ drm_crtc_add_crc_entry(&crtc->base, true,
+ drm_crtc_accurate_vblank_count(&crtc->base),
+ crcs);
}
#else
static inline void
@@ -2136,7 +2167,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
- POSTING_READ(VLV_MASTER_IER);
if (gt_iir)
snb_gt_irq_handler(dev_priv, gt_iir);
@@ -2221,7 +2251,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
@@ -2390,6 +2419,43 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+{
+ u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ u32 pin_mask = 0, long_mask = 0;
+
+ if (ddi_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
+ I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ ddi_hotplug_trigger,
+ dig_hotplug_reg, hpd_icp,
+ icp_ddi_port_hotplug_long_detect);
+ }
+
+ if (tc_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
+ I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ tc_hotplug_trigger,
+ dig_hotplug_reg, hpd_icp,
+ icp_tc_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+
+ if (pch_iir & SDE_GMBUS_ICP)
+ gmbus_irq_handler(dev_priv);
+}
+
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
@@ -2553,7 +2619,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
@@ -2563,7 +2628,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (!HAS_PCH_NOP(dev_priv)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
- POSTING_READ(SDEIER);
}
/* Find, clear, then process each source of interrupt */
@@ -2598,11 +2662,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
}
I915_WRITE(DEIER, de_ier);
- POSTING_READ(DEIER);
- if (!HAS_PCH_NOP(dev_priv)) {
+ if (!HAS_PCH_NOP(dev_priv))
I915_WRITE(SDEIER, sde_ier);
- POSTING_READ(SDEIER);
- }
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
enable_rpm_wakeref_asserts(dev_priv);
@@ -2626,6 +2687,40 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
+static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+{
+ u32 pin_mask = 0, long_mask = 0;
+ u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
+ u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
+
+ if (trigger_tc) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
+ dig_hotplug_reg, hpd_gen11,
+ gen11_port_hotplug_long_detect);
+ }
+
+ if (trigger_tbt) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
+ dig_hotplug_reg, hpd_gen11,
+ gen11_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ else
+ DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2661,6 +2756,17 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
}
+ if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ iir = I915_READ(GEN11_DE_HPD_IIR);
+ if (iir) {
+ I915_WRITE(GEN11_DE_HPD_IIR, iir);
+ ret = IRQ_HANDLED;
+ gen11_hpd_irq_handler(dev_priv, iir);
+ } else {
+ DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+ }
+ }
+
if (master_ctl & GEN8_DE_PORT_IRQ) {
iir = I915_READ(GEN8_DE_PORT_IIR);
if (iir) {
@@ -2676,7 +2782,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ tmp_mask |= ICL_AUX_CHANNEL_E;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 11)
tmp_mask |= CNL_AUX_CHANNEL_F;
if (iir & tmp_mask) {
@@ -2760,8 +2870,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_ICP(dev_priv))
+ icp_irq_handler(dev_priv, iir);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) ||
+ HAS_PCH_CNP(dev_priv))
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@@ -2978,11 +3091,44 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
spin_unlock(&i915->irq_lock);
}
+static void
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
+ u32 *iir)
+{
+ void __iomem * const regs = dev_priv->regs;
+
+ if (!(master_ctl & GEN11_GU_MISC_IRQ))
+ return;
+
+ *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+ if (likely(*iir))
+ raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+}
+
+static void
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
+ const u32 master_ctl, const u32 iir)
+{
+ if (!(master_ctl & GEN11_GU_MISC_IRQ))
+ return;
+
+ if (unlikely(!iir)) {
+ DRM_ERROR("GU_MISC iir blank!\n");
+ return;
+ }
+
+ if (iir & GEN11_GU_MISC_GSE)
+ intel_opregion_asle_intr(dev_priv);
+ else
+ DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
+}
+
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
void __iomem * const regs = i915->regs;
u32 master_ctl;
+ u32 gu_misc_iir;
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
@@ -3011,9 +3157,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(i915);
}
+ gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+
/* Acknowledge and enable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+ gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+
return IRQ_HANDLED;
}
@@ -3089,7 +3239,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
*/
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
}
}
@@ -3500,7 +3650,12 @@ static void gen11_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(GEN8_DE_PORT_);
GEN3_IRQ_RESET(GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(GEN11_DE_HPD_);
+ GEN3_IRQ_RESET(GEN11_GU_MISC_);
GEN3_IRQ_RESET(GEN8_PCU_);
+
+ if (HAS_PCH_ICP(dev_priv))
+ GEN3_IRQ_RESET(SDE);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3617,6 +3772,73 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
+static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug;
+
+ hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+ hotplug |= ICP_DDIA_HPD_ENABLE |
+ ICP_DDIB_HPD_ENABLE;
+ I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+
+ hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
+ ICP_TC_HPD_ENABLE(PORT_TC2) |
+ ICP_TC_HPD_ENABLE(PORT_TC3) |
+ ICP_TC_HPD_ENABLE(PORT_TC4);
+ I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+}
+
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ icp_hpd_detection_setup(dev_priv);
+}
+
+static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug;
+
+ hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+
+ hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+}
+
+static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+ u32 val;
+
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+ hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
+
+ val = I915_READ(GEN11_DE_HPD_IMR);
+ val &= ~hotplug_irqs;
+ I915_WRITE(GEN11_DE_HPD_IMR, val);
+ POSTING_READ(GEN11_DE_HPD_IMR);
+
+ gen11_hpd_detection_setup(dev_priv);
+
+ if (HAS_PCH_ICP(dev_priv))
+ icp_hpd_irq_setup(dev_priv);
+}
+
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 val, hotplug;
@@ -3943,9 +4165,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
- u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR;
+ u32 de_misc_masked = GEN8_DE_EDP_PSR;
enum pipe pipe;
+ if (INTEL_GEN(dev_priv) <= 10)
+ de_misc_masked |= GEN8_DE_MISC_GSE;
+
if (INTEL_GEN(dev_priv) >= 9) {
de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
@@ -3956,7 +4181,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ de_port_masked |= ICL_AUX_CHANNEL_E;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
de_port_masked |= CNL_AUX_CHANNEL_F;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3984,10 +4212,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11) {
+ u32 de_hpd_masked = 0;
+ u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
+ GEN11_DE_TBT_HOTPLUG_MASK;
+
+ GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+ gen11_hpd_detection_setup(dev_priv);
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_hpd_detection_setup(dev_priv);
- else if (IS_BROADWELL(dev_priv))
+ } else if (IS_BROADWELL(dev_priv)) {
ilk_hpd_detection_setup(dev_priv);
+ }
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4036,13 +4272,34 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
}
+static void icp_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 mask = SDE_GMBUS_ICP;
+
+ WARN_ON(I915_READ(SDEIER) != 0);
+ I915_WRITE(SDEIER, 0xffffffff);
+ POSTING_READ(SDEIER);
+
+ gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ I915_WRITE(SDEIMR, ~mask);
+
+ icp_hpd_detection_setup(dev_priv);
+}
+
static int gen11_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gu_misc_masked = GEN11_GU_MISC_GSE;
+
+ if (HAS_PCH_ICP(dev_priv))
+ icp_irq_postinstall(dev);
gen11_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
+ GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
@@ -4090,11 +4347,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
@@ -4109,6 +4368,81 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
+static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
+ u16 *eir, u16 *eir_stuck)
+{
+ u16 emr;
+
+ *eir = I915_READ16(EIR);
+
+ if (*eir)
+ I915_WRITE16(EIR, *eir);
+
+ *eir_stuck = I915_READ16(EIR);
+ if (*eir_stuck == 0)
+ return;
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits. Otherwise the edge triggered
+ * IIR on i965/g4x wouldn't notice that an interrupt
+ * is still pending. Also some EIR bits can't be
+ * cleared except by handling the underlying error
+ * (or by a GPU reset) so we mask any bit that
+ * remains set.
+ */
+ emr = I915_READ16(EMR);
+ I915_WRITE16(EMR, 0xffff);
+ I915_WRITE16(EMR, emr | *eir_stuck);
+}
+
+static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
+ u16 eir, u16 eir_stuck)
+{
+ DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
+
+ if (eir_stuck)
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
+}
+
+static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
+ u32 *eir, u32 *eir_stuck)
+{
+ u32 emr;
+
+ *eir = I915_READ(EIR);
+
+ I915_WRITE(EIR, *eir);
+
+ *eir_stuck = I915_READ(EIR);
+ if (*eir_stuck == 0)
+ return;
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits. Otherwise the edge triggered
+ * IIR on i965/g4x wouldn't notice that an interrupt
+ * is still pending. Also some EIR bits can't be
+ * cleared except by handling the underlying error
+ * (or by a GPU reset) so we mask any bit that
+ * remains set.
+ */
+ emr = I915_READ(EMR);
+ I915_WRITE(EMR, 0xffffffff);
+ I915_WRITE(EMR, emr | *eir_stuck);
+}
+
+static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
+ u32 eir, u32 eir_stuck)
+{
+ DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
+
+ if (eir_stuck)
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
+}
+
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -4123,6 +4457,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u16 eir = 0, eir_stuck = 0;
u16 iir;
iir = I915_READ16(IIR);
@@ -4135,13 +4470,16 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE16(IIR, iir);
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
@@ -4179,12 +4517,14 @@ static int i915_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask =
~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev_priv)) {
@@ -4222,6 +4562,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 eir = 0, eir_stuck = 0;
u32 hotplug_status = 0;
u32 iir;
@@ -4239,13 +4580,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4299,14 +4643,14 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (IS_G4X(dev_priv))
@@ -4366,6 +4710,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 eir = 0, eir_stuck = 0;
u32 hotplug_status = 0;
u32 iir;
@@ -4382,6 +4727,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
@@ -4390,8 +4738,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev_priv->engine[VCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4506,7 +4854,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen11_irq_reset;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
} else if (INTEL_GEN(dev_priv) >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 66ea3552c63e..295e981e4a39 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -44,10 +44,6 @@ i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
-i915_param_named_unsafe(panel_ignore_lid, int, 0600,
- "Override lid status (0=autodetect, 1=autodetect disabled [default], "
- "-1=force lid closed, -2=force lid open)");
-
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
@@ -92,7 +88,7 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
- "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+ "(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
i915_param_named_unsafe(alpha_support, bool, 0400,
@@ -130,9 +126,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
-i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
- "Enable command parsing (true=enabled [default], false=disabled)");
-
i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6684025b7af8..6c4d4a21474b 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -36,7 +36,6 @@ struct drm_printer;
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
- param(int, panel_ignore_lid, 1) \
param(int, lvds_channel_mode, 0) \
param(int, panel_use_ssc, -1) \
param(int, vbt_sdvo_panel_type, -1) \
@@ -58,7 +57,6 @@ struct drm_printer;
param(unsigned int, inject_load_failure, 0) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
- param(bool, enable_cmd_parser, true) \
param(bool, enable_hangcheck, true) \
param(bool, fastboot, false) \
param(bool, prefault_disable, false) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 4364922e935d..6a4d1388ad2d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = {
GEN(7),
.is_lp = 1,
.num_pipes = 2,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_gmch_display = 1,
@@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = {
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
.has_rc6 = 1,
@@ -659,12 +657,15 @@ static const struct pci_device_id pciidlist[] = {
INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+ INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+ INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
{0, 0, 0}
@@ -673,10 +674,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static void i915_pci_remove(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_device *dev;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev) /* driver load aborted, nothing to cleanup */
+ return;
i915_driver_unload(dev);
drm_dev_put(dev);
+
+ pci_set_drvdata(pdev, NULL);
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -711,6 +718,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
+ if (i915_inject_load_failure()) {
+ i915_pci_remove(pdev);
+ return -ENODEV;
+ }
+
err = i915_live_selftests(pdev);
if (err) {
i915_pci_remove(pdev);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 019bd2d073ad..6bf10952c724 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -315,7 +315,7 @@ static u32 i915_oa_max_sample_rate = 100000;
* code assumes all reports have a power-of-two size and ~(size - 1) can
* be used as a mask to align the OA tail pointer.
*/
-static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A13] = { 0, 64 },
[I915_OA_FORMAT_A29] = { 1, 128 },
[I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
@@ -326,7 +326,7 @@ static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
};
-static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A12] = { 0, 64 },
[I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
@@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
continue;
}
- /*
- * XXX: Just keep the lower 21 bits for now since I'm not
- * entirely sure if the HW touches any of the higher bits in
- * this field
- */
- ctx_id = report32[2] & 0x1fffff;
+ ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
/*
* Squash whatever is in the CTX_ID field if it's marked as
@@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream,
return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
}
+static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx)
+{
+ struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_context *ce;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(&i915->drm);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * As the ID is the gtt offset of the context's vma we
+ * pin the vma to ensure the ID remains fixed.
+ *
+ * NB: implied RCS engine...
+ */
+ ce = intel_context_pin(ctx, engine);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ce))
+ return ce;
+
+ i915->perf.oa.pinned_ctx = ce;
+
+ return ce;
+}
+
/**
* oa_get_render_ctx_id - determine and hold ctx hw id
* @stream: An i915-perf stream opened for OA metrics
@@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream,
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct drm_i915_private *i915 = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
- struct intel_ring *ring;
- int ret;
-
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
+ ce = oa_pin_context(i915, stream->ctx);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+ switch (INTEL_GEN(i915)) {
+ case 7: {
/*
- * As the ID is the gtt offset of the context's vma we
- * pin the vma to ensure the ID remains fixed.
- *
- * NB: implied RCS engine...
+ * On Haswell we don't do any post processing of the reports
+ * and don't need to use the mask.
*/
- ring = intel_context_pin(stream->ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
+ i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
+ i915->perf.oa.specific_ctx_id_mask = 0;
+ break;
+ }
+ case 8:
+ case 9:
+ case 10:
+ if (USES_GUC_SUBMISSION(i915)) {
+ /*
+ * When using GuC, the context descriptor we write in
+ * i915 is read by GuC and rewritten before it's
+ * actually written into the hardware. The LRCA is
+ * what is put into the context id field of the
+ * context descriptor by GuC. Because it's aligned to
+ * a page, the lower 12bits are always at 0 and
+ * dropped by GuC. They won't be part of the context
+ * ID in the OA reports, so squash those lower bits.
+ */
+ i915->perf.oa.specific_ctx_id =
+ lower_32_bits(ce->lrc_desc) >> 12;
- /*
- * Explicitly track the ID (instead of calling
- * i915_ggtt_offset() on the fly) considering the difference
- * with gen8+ and execlists
- */
- dev_priv->perf.oa.specific_ctx_id =
- i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
+ /*
+ * GuC uses the top bit to signal proxy submission, so
+ * ignore that bit.
+ */
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+ } else {
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ i915->perf.oa.specific_ctx_id =
+ upper_32_bits(ce->lrc_desc);
+ i915->perf.oa.specific_ctx_id &=
+ i915->perf.oa.specific_ctx_id_mask;
+ }
+ break;
+
+ case 11: {
+ i915->perf.oa.specific_ctx_id_mask =
+ ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
+ ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
+ ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
+ i915->perf.oa.specific_ctx_id &=
+ i915->perf.oa.specific_ctx_id_mask;
+ break;
+ }
+
+ default:
+ MISSING_CASE(INTEL_GEN(i915));
}
+ DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ i915->perf.oa.specific_ctx_id,
+ i915->perf.oa.specific_ctx_id_mask);
+
return 0;
}
@@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+ dev_priv->perf.oa.specific_ctx_id_mask = 0;
+ ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+ if (ce) {
mutex_lock(&dev_priv->drm.struct_mutex);
-
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- intel_context_unpin(stream->ctx, engine);
-
+ intel_context_unpin(ce);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
}
@@ -1780,7 +1836,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* So far the best way to work around this issue seems to be draining
* the GPU from any submitted work.
*/
- ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
+ ret = i915_gem_wait_for_idle(dev_priv,
+ wait_flags,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index dc87797db500..d6c8f8fdfda5 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -4,6 +4,7 @@
* Copyright © 2017-2018 Intel Corporation
*/
+#include <linux/irq.h>
#include "i915_pmu.h"
#include "intel_ringbuffer.h"
#include "i915_drv.h"
@@ -127,6 +128,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
{
if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
i915->pmu.timer_enabled = true;
+ i915->pmu.timer_last = ktime_get();
hrtimer_start_range_ns(&i915->pmu.timer,
ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED);
@@ -155,12 +157,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
}
static void
-update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+add_sample(struct i915_pmu_sample *sample, u32 val)
{
- sample->cur += mul_u32_u32(val, unit);
+ sample->cur += val;
}
-static void engines_sample(struct drm_i915_private *dev_priv)
+static void
+engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -182,8 +185,9 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = !i915_seqno_passed(current_seqno, last_seqno);
- update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
- PERIOD, val);
+ if (val)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+ period_ns);
if (val && (engine->pmu.enable &
(BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
@@ -194,11 +198,13 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = 0;
}
- update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
- PERIOD, !!(val & RING_WAIT));
+ if (val & RING_WAIT)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+ period_ns);
- update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
- PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+ if (val & RING_WAIT_SEMAPHORE)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+ period_ns);
}
if (fw)
@@ -207,7 +213,14 @@ static void engines_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
-static void frequency_sample(struct drm_i915_private *dev_priv)
+static void
+add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
+{
+ sample->cur += mul_u32_u32(val, mul);
+}
+
+static void
+frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
@@ -221,15 +234,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
- 1, intel_gpu_freq(dev_priv, val));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+ intel_gpu_freq(dev_priv, val),
+ period_ns / 1000);
}
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
+ intel_gpu_freq(dev_priv,
+ dev_priv->gt_pm.rps.cur_freq),
+ period_ns / 1000);
}
}
@@ -237,14 +252,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
+ unsigned int period_ns;
+ ktime_t now;
if (!READ_ONCE(i915->pmu.timer_enabled))
return HRTIMER_NORESTART;
- engines_sample(i915);
- frequency_sample(i915);
+ now = ktime_get();
+ period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
+ i915->pmu.timer_last = now;
+
+ /*
+ * Strictly speaking the passed in period may not be 100% accurate for
+ * all internal calculation, since some amount of time can be spent on
+ * grabbing the forcewake. However the potential error from timer call-
+ * back delay greatly dominates this so we keep it simple.
+ */
+ engines_sample(i915, period_ns);
+ frequency_sample(i915, period_ns);
+
+ hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
- hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
return HRTIMER_RESTART;
}
@@ -519,12 +547,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_ACTUAL_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = count_interrupts(i915);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 2ba735299f7c..7f164ca3db12 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -65,6 +65,14 @@ struct i915_pmu {
* event types.
*/
u64 enable;
+
+ /**
+ * @timer_last:
+ *
+ * Timestmap of the previous timer invocation.
+ */
+ ktime_t timer_last;
+
/**
* @enable_count: Reference counts for the enabled events.
*
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 195203f298df..eeaa3d506d95 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -54,6 +54,7 @@ enum vgt_g2v_type {
*/
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
+#define VGT_CAPS_HUGE_GTT BIT(4)
struct vgt_if {
u64 magic; /* VGT_MAGIC */
@@ -93,7 +94,10 @@ struct vgt_if {
u32 rsv5[4];
u32 g2v_notify;
- u32 rsv6[7];
+ u32 rsv6[5];
+
+ u32 cursor_x_hot;
+ u32 cursor_y_hot;
struct {
u32 lo;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7720569f2024..91e7483228e1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,23 +139,40 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
}
+/*
+ * Given the first two numbers __a and __b of arbitrarily many evenly spaced
+ * numbers, pick the 0-based __index'th value.
+ *
+ * Always prefer this over _PICK() if the numbers are evenly spaced.
+ */
+#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
+
+/*
+ * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
+ *
+ * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
+ */
#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
-#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+/*
+ * Named helper wrappers around _PICK_EVEN() and _PICK().
+ */
+#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PIPE(plane, a, b)
+#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a)))
+#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
+#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
@@ -164,7 +181,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
BUILD_BUG_ON_MSG((value) & ~(mask), \
"Incorrect value for mask"); \
- (mask) << 16 | (value); })
+ __MASKED_FIELD(mask, value); })
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
@@ -270,19 +287,19 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
-#define ILK_GRDOM_FULL (0<<1)
-#define ILK_GRDOM_RENDER (1<<1)
-#define ILK_GRDOM_MEDIA (3<<1)
-#define ILK_GRDOM_MASK (3<<1)
-#define ILK_GRDOM_RESET_ENABLE (1<<0)
+#define ILK_GRDOM_FULL (0 << 1)
+#define ILK_GRDOM_RENDER (1 << 1)
+#define ILK_GRDOM_MEDIA (3 << 1)
+#define ILK_GRDOM_MASK (3 << 1)
+#define ILK_GRDOM_RESET_ENABLE (1 << 0)
#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
-#define GEN6_MBC_SNPCR_MASK (3<<21)
-#define GEN6_MBC_SNPCR_MAX (0<<21)
-#define GEN6_MBC_SNPCR_MED (1<<21)
-#define GEN6_MBC_SNPCR_LOW (2<<21)
-#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+#define GEN6_MBC_SNPCR_MASK (3 << 21)
+#define GEN6_MBC_SNPCR_MAX (0 << 21)
+#define GEN6_MBC_SNPCR_MED (1 << 21)
+#define GEN6_MBC_SNPCR_LOW (2 << 21)
+#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */
#define VLV_G3DCTL _MMIO(0x9024)
#define VLV_GSCKGCTL _MMIO(0x9028)
@@ -314,13 +331,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GRDOM_VECS (1 << 13)
#define GEN11_GRDOM_VECS2 (1 << 14)
-#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base+0x220)
+#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
+#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
+#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220)
#define PP_DIR_DCLV_2G 0xffffffff
-#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
@@ -358,25 +375,25 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
#define GAM_ECOCHK _MMIO(0x4090)
-#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
-#define ECOCHK_SNB_BIT (1<<10)
-#define ECOCHK_DIS_TLB (1<<8)
-#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
-#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
-#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
-#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4)
-#define ECOCHK_PPGTT_LLC_IVB (0x1<<3)
-#define ECOCHK_PPGTT_UC_HSW (0x1<<3)
-#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
-#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+#define BDW_DISABLE_HDC_INVALIDATION (1 << 25)
+#define ECOCHK_SNB_BIT (1 << 10)
+#define ECOCHK_DIS_TLB (1 << 8)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6)
+#define ECOCHK_PPGTT_CACHE64B (0x3 << 3)
+#define ECOCHK_PPGTT_CACHE4B (0x0 << 3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3)
+#define ECOCHK_PPGTT_UC_HSW (0x1 << 3)
+#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
+#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
#define GAC_ECO_BITS _MMIO(0x14090)
-#define ECOBITS_SNB_BIT (1<<13)
-#define ECOBITS_PPGTT_CACHE64B (3<<8)
-#define ECOBITS_PPGTT_CACHE4B (0<<8)
+#define ECOBITS_SNB_BIT (1 << 13)
+#define ECOBITS_PPGTT_CACHE64B (3 << 8)
+#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GAB_CTL _MMIO(0x24000)
-#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
@@ -395,6 +412,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_STOLEN_RESERVED_4M (2 << 7)
#define GEN8_STOLEN_RESERVED_8M (3 << 7)
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
+#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
/* VGA stuff */
@@ -404,15 +422,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _VGA_MSR_WRITE _MMIO(0x3c2)
#define VGA_MSR_WRITE 0x3c2
#define VGA_MSR_READ 0x3cc
-#define VGA_MSR_MEM_EN (1<<1)
-#define VGA_MSR_CGA_MODE (1<<0)
+#define VGA_MSR_MEM_EN (1 << 1)
+#define VGA_MSR_CGA_MODE (1 << 0)
#define VGA_SR_INDEX 0x3c4
#define SR01 1
#define VGA_SR_DATA 0x3c5
#define VGA_AR_INDEX 0x3c0
-#define VGA_AR_VID_EN (1<<5)
+#define VGA_AR_VID_EN (1 << 5)
#define VGA_AR_DATA_WRITE 0x3c0
#define VGA_AR_DATA_READ 0x3c1
@@ -445,8 +463,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
-#define LOWER_SLICE_ENABLED (1<<0)
-#define LOWER_SLICE_DISABLED (0<<0)
+#define LOWER_SLICE_ENABLED (1 << 0)
+#define LOWER_SLICE_DISABLED (0 << 0)
/*
* Registers used only by the command parser
@@ -504,47 +522,47 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000
#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F
#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6
-#define GEN7_OACONTROL_TIMER_ENABLE (1<<5)
-#define GEN7_OACONTROL_FORMAT_A13 (0<<2)
-#define GEN7_OACONTROL_FORMAT_A29 (1<<2)
-#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2)
-#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2)
-#define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2)
-#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2)
-#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2)
-#define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2)
+#define GEN7_OACONTROL_TIMER_ENABLE (1 << 5)
+#define GEN7_OACONTROL_FORMAT_A13 (0 << 2)
+#define GEN7_OACONTROL_FORMAT_A29 (1 << 2)
+#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2 << 2)
+#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3 << 2)
+#define GEN7_OACONTROL_FORMAT_B4_C8 (4 << 2)
+#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5 << 2)
+#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6 << 2)
+#define GEN7_OACONTROL_FORMAT_C4_B8 (7 << 2)
#define GEN7_OACONTROL_FORMAT_SHIFT 2
-#define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1)
-#define GEN7_OACONTROL_ENABLE (1<<0)
+#define GEN7_OACONTROL_PER_CTX_ENABLE (1 << 1)
+#define GEN7_OACONTROL_ENABLE (1 << 0)
#define GEN8_OACTXID _MMIO(0x2364)
#define GEN8_OA_DEBUG _MMIO(0x2B04)
-#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1<<5)
-#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1<<6)
-#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1<<2)
-#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1<<1)
+#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5)
+#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6)
+#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2)
+#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1)
#define GEN8_OACONTROL _MMIO(0x2B00)
-#define GEN8_OA_REPORT_FORMAT_A12 (0<<2)
-#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2)
-#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2)
-#define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2)
+#define GEN8_OA_REPORT_FORMAT_A12 (0 << 2)
+#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2 << 2)
+#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5 << 2)
+#define GEN8_OA_REPORT_FORMAT_C4_B8 (7 << 2)
#define GEN8_OA_REPORT_FORMAT_SHIFT 2
-#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1)
-#define GEN8_OA_COUNTER_ENABLE (1<<0)
+#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1 << 1)
+#define GEN8_OA_COUNTER_ENABLE (1 << 0)
#define GEN8_OACTXCONTROL _MMIO(0x2360)
#define GEN8_OA_TIMER_PERIOD_MASK 0x3F
#define GEN8_OA_TIMER_PERIOD_SHIFT 2
-#define GEN8_OA_TIMER_ENABLE (1<<1)
-#define GEN8_OA_COUNTER_RESUME (1<<0)
+#define GEN8_OA_TIMER_ENABLE (1 << 1)
+#define GEN8_OA_COUNTER_RESUME (1 << 0)
#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */
-#define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3)
-#define GEN7_OABUFFER_EDGE_TRIGGER (1<<2)
-#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1)
-#define GEN7_OABUFFER_RESUME (1<<0)
+#define GEN7_OABUFFER_OVERRUN_DISABLE (1 << 3)
+#define GEN7_OABUFFER_EDGE_TRIGGER (1 << 2)
+#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1 << 1)
+#define GEN7_OABUFFER_RESUME (1 << 0)
#define GEN8_OABUFFER_UDW _MMIO(0x23b4)
#define GEN8_OABUFFER _MMIO(0x2b14)
@@ -552,33 +570,33 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OASTATUS1 _MMIO(0x2364)
#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0
-#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2)
-#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1)
-#define GEN7_OASTATUS1_REPORT_LOST (1<<0)
+#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1 << 2)
+#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1 << 1)
+#define GEN7_OASTATUS1_REPORT_LOST (1 << 0)
#define GEN7_OASTATUS2 _MMIO(0x2368)
#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN8_OASTATUS _MMIO(0x2b08)
-#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3)
-#define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2)
-#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1)
-#define GEN8_OASTATUS_REPORT_LOST (1<<0)
+#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
+#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
+#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
+#define GEN8_OASTATUS_REPORT_LOST (1 << 0)
#define GEN8_OAHEADPTR _MMIO(0x2B0C)
#define GEN8_OAHEADPTR_MASK 0xffffffc0
#define GEN8_OATAILPTR _MMIO(0x2B10)
#define GEN8_OATAILPTR_MASK 0xffffffc0
-#define OABUFFER_SIZE_128K (0<<3)
-#define OABUFFER_SIZE_256K (1<<3)
-#define OABUFFER_SIZE_512K (2<<3)
-#define OABUFFER_SIZE_1M (3<<3)
-#define OABUFFER_SIZE_2M (4<<3)
-#define OABUFFER_SIZE_4M (5<<3)
-#define OABUFFER_SIZE_8M (6<<3)
-#define OABUFFER_SIZE_16M (7<<3)
+#define OABUFFER_SIZE_128K (0 << 3)
+#define OABUFFER_SIZE_256K (1 << 3)
+#define OABUFFER_SIZE_512K (2 << 3)
+#define OABUFFER_SIZE_1M (3 << 3)
+#define OABUFFER_SIZE_2M (4 << 3)
+#define OABUFFER_SIZE_4M (5 << 3)
+#define OABUFFER_SIZE_8M (6 << 3)
+#define OABUFFER_SIZE_16M (7 << 3)
/*
* Flexible, Aggregate EU Counter Registers.
@@ -601,35 +619,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OASTARTTRIG1_THRESHOLD_MASK 0xffff
#define OASTARTTRIG2 _MMIO(0x2714)
-#define OASTARTTRIG2_INVERT_A_0 (1<<0)
-#define OASTARTTRIG2_INVERT_A_1 (1<<1)
-#define OASTARTTRIG2_INVERT_A_2 (1<<2)
-#define OASTARTTRIG2_INVERT_A_3 (1<<3)
-#define OASTARTTRIG2_INVERT_A_4 (1<<4)
-#define OASTARTTRIG2_INVERT_A_5 (1<<5)
-#define OASTARTTRIG2_INVERT_A_6 (1<<6)
-#define OASTARTTRIG2_INVERT_A_7 (1<<7)
-#define OASTARTTRIG2_INVERT_A_8 (1<<8)
-#define OASTARTTRIG2_INVERT_A_9 (1<<9)
-#define OASTARTTRIG2_INVERT_A_10 (1<<10)
-#define OASTARTTRIG2_INVERT_A_11 (1<<11)
-#define OASTARTTRIG2_INVERT_A_12 (1<<12)
-#define OASTARTTRIG2_INVERT_A_13 (1<<13)
-#define OASTARTTRIG2_INVERT_A_14 (1<<14)
-#define OASTARTTRIG2_INVERT_A_15 (1<<15)
-#define OASTARTTRIG2_INVERT_B_0 (1<<16)
-#define OASTARTTRIG2_INVERT_B_1 (1<<17)
-#define OASTARTTRIG2_INVERT_B_2 (1<<18)
-#define OASTARTTRIG2_INVERT_B_3 (1<<19)
-#define OASTARTTRIG2_INVERT_C_0 (1<<20)
-#define OASTARTTRIG2_INVERT_C_1 (1<<21)
-#define OASTARTTRIG2_INVERT_D_0 (1<<22)
-#define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23)
-#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24)
-#define OASTARTTRIG2_EVENT_SELECT_0 (1<<28)
-#define OASTARTTRIG2_EVENT_SELECT_1 (1<<29)
-#define OASTARTTRIG2_EVENT_SELECT_2 (1<<30)
-#define OASTARTTRIG2_EVENT_SELECT_3 (1<<31)
+#define OASTARTTRIG2_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG2_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG2_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG2_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG2_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG2_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG2_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG2_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG2_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG2_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG2_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG2_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG2_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG2_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG2_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG2_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG2_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG2_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG2_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG2_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG2_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG2_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG2_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG2_THRESHOLD_ENABLE (1 << 23)
+#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1 << 24)
+#define OASTARTTRIG2_EVENT_SELECT_0 (1 << 28)
+#define OASTARTTRIG2_EVENT_SELECT_1 (1 << 29)
+#define OASTARTTRIG2_EVENT_SELECT_2 (1 << 30)
+#define OASTARTTRIG2_EVENT_SELECT_3 (1 << 31)
#define OASTARTTRIG3 _MMIO(0x2718)
#define OASTARTTRIG3_NOA_SELECT_MASK 0xf
@@ -658,35 +676,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OASTARTTRIG5_THRESHOLD_MASK 0xffff
#define OASTARTTRIG6 _MMIO(0x2724)
-#define OASTARTTRIG6_INVERT_A_0 (1<<0)
-#define OASTARTTRIG6_INVERT_A_1 (1<<1)
-#define OASTARTTRIG6_INVERT_A_2 (1<<2)
-#define OASTARTTRIG6_INVERT_A_3 (1<<3)
-#define OASTARTTRIG6_INVERT_A_4 (1<<4)
-#define OASTARTTRIG6_INVERT_A_5 (1<<5)
-#define OASTARTTRIG6_INVERT_A_6 (1<<6)
-#define OASTARTTRIG6_INVERT_A_7 (1<<7)
-#define OASTARTTRIG6_INVERT_A_8 (1<<8)
-#define OASTARTTRIG6_INVERT_A_9 (1<<9)
-#define OASTARTTRIG6_INVERT_A_10 (1<<10)
-#define OASTARTTRIG6_INVERT_A_11 (1<<11)
-#define OASTARTTRIG6_INVERT_A_12 (1<<12)
-#define OASTARTTRIG6_INVERT_A_13 (1<<13)
-#define OASTARTTRIG6_INVERT_A_14 (1<<14)
-#define OASTARTTRIG6_INVERT_A_15 (1<<15)
-#define OASTARTTRIG6_INVERT_B_0 (1<<16)
-#define OASTARTTRIG6_INVERT_B_1 (1<<17)
-#define OASTARTTRIG6_INVERT_B_2 (1<<18)
-#define OASTARTTRIG6_INVERT_B_3 (1<<19)
-#define OASTARTTRIG6_INVERT_C_0 (1<<20)
-#define OASTARTTRIG6_INVERT_C_1 (1<<21)
-#define OASTARTTRIG6_INVERT_D_0 (1<<22)
-#define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23)
-#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24)
-#define OASTARTTRIG6_EVENT_SELECT_4 (1<<28)
-#define OASTARTTRIG6_EVENT_SELECT_5 (1<<29)
-#define OASTARTTRIG6_EVENT_SELECT_6 (1<<30)
-#define OASTARTTRIG6_EVENT_SELECT_7 (1<<31)
+#define OASTARTTRIG6_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG6_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG6_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG6_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG6_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG6_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG6_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG6_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG6_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG6_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG6_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG6_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG6_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG6_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG6_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG6_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG6_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG6_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG6_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG6_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG6_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG6_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG6_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG6_THRESHOLD_ENABLE (1 << 23)
+#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1 << 24)
+#define OASTARTTRIG6_EVENT_SELECT_4 (1 << 28)
+#define OASTARTTRIG6_EVENT_SELECT_5 (1 << 29)
+#define OASTARTTRIG6_EVENT_SELECT_6 (1 << 30)
+#define OASTARTTRIG6_EVENT_SELECT_7 (1 << 31)
#define OASTARTTRIG7 _MMIO(0x2728)
#define OASTARTTRIG7_NOA_SELECT_MASK 0xf
@@ -715,31 +733,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG2 _MMIO(0x2744)
-#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG2_INVERT_A_0 (1 << 0)
+#define OAREPORTTRIG2_INVERT_A_1 (1 << 1)
+#define OAREPORTTRIG2_INVERT_A_2 (1 << 2)
+#define OAREPORTTRIG2_INVERT_A_3 (1 << 3)
+#define OAREPORTTRIG2_INVERT_A_4 (1 << 4)
+#define OAREPORTTRIG2_INVERT_A_5 (1 << 5)
+#define OAREPORTTRIG2_INVERT_A_6 (1 << 6)
+#define OAREPORTTRIG2_INVERT_A_7 (1 << 7)
+#define OAREPORTTRIG2_INVERT_A_8 (1 << 8)
+#define OAREPORTTRIG2_INVERT_A_9 (1 << 9)
+#define OAREPORTTRIG2_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG2_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG2_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG2_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG2_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG2_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG2_INVERT_B_0 (1 << 16)
+#define OAREPORTTRIG2_INVERT_B_1 (1 << 17)
+#define OAREPORTTRIG2_INVERT_B_2 (1 << 18)
+#define OAREPORTTRIG2_INVERT_B_3 (1 << 19)
+#define OAREPORTTRIG2_INVERT_C_0 (1 << 20)
+#define OAREPORTTRIG2_INVERT_C_1 (1 << 21)
+#define OAREPORTTRIG2_INVERT_D_0 (1 << 22)
+#define OAREPORTTRIG2_THRESHOLD_ENABLE (1 << 23)
+#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1 << 31)
#define OAREPORTTRIG3 _MMIO(0x2748)
#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
@@ -768,31 +786,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG6 _MMIO(0x2754)
-#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG6_INVERT_A_0 (1 << 0)
+#define OAREPORTTRIG6_INVERT_A_1 (1 << 1)
+#define OAREPORTTRIG6_INVERT_A_2 (1 << 2)
+#define OAREPORTTRIG6_INVERT_A_3 (1 << 3)
+#define OAREPORTTRIG6_INVERT_A_4 (1 << 4)
+#define OAREPORTTRIG6_INVERT_A_5 (1 << 5)
+#define OAREPORTTRIG6_INVERT_A_6 (1 << 6)
+#define OAREPORTTRIG6_INVERT_A_7 (1 << 7)
+#define OAREPORTTRIG6_INVERT_A_8 (1 << 8)
+#define OAREPORTTRIG6_INVERT_A_9 (1 << 9)
+#define OAREPORTTRIG6_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG6_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG6_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG6_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG6_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG6_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG6_INVERT_B_0 (1 << 16)
+#define OAREPORTTRIG6_INVERT_B_1 (1 << 17)
+#define OAREPORTTRIG6_INVERT_B_2 (1 << 18)
+#define OAREPORTTRIG6_INVERT_B_3 (1 << 19)
+#define OAREPORTTRIG6_INVERT_C_0 (1 << 20)
+#define OAREPORTTRIG6_INVERT_C_1 (1 << 21)
+#define OAREPORTTRIG6_INVERT_D_0 (1 << 22)
+#define OAREPORTTRIG6_THRESHOLD_ENABLE (1 << 23)
+#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1 << 31)
#define OAREPORTTRIG7 _MMIO(0x2758)
#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
@@ -828,9 +846,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC_COMPARE_VALUE_MASK 0xffff
#define OACEC_COMPARE_VALUE_SHIFT 3
-#define OACEC_SELECT_NOA (0<<19)
-#define OACEC_SELECT_PREV (1<<19)
-#define OACEC_SELECT_BOOLEAN (2<<19)
+#define OACEC_SELECT_NOA (0 << 19)
+#define OACEC_SELECT_PREV (1 << 19)
+#define OACEC_SELECT_BOOLEAN (2 << 19)
/* CECX_1 */
#define OACEC_MASK_MASK 0xffff
@@ -948,9 +966,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Reset registers
*/
#define DEBUG_RESET_I830 _MMIO(0x6070)
-#define DEBUG_RESET_FULL (1<<7)
-#define DEBUG_RESET_RENDER (1<<8)
-#define DEBUG_RESET_DISPLAY (1<<9)
+#define DEBUG_RESET_FULL (1 << 7)
+#define DEBUG_RESET_RENDER (1 << 8)
+#define DEBUG_RESET_DISPLAY (1 << 9)
/*
* IOSF sideband
@@ -961,7 +979,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IOSF_PORT_SHIFT 8
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
-#define IOSF_SB_BUSY (1<<0)
+#define IOSF_SB_BUSY (1 << 0)
#define IOSF_PORT_BUNIT 0x03
#define IOSF_PORT_PUNIT 0x04
#define IOSF_PORT_NC 0x11
@@ -1044,13 +1062,13 @@ enum i915_power_well_id {
/*
* HSW/BDW
- * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
*/
HSW_DISP_PW_GLOBAL = 15,
/*
* GEN9+
- * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
*/
SKL_DISP_PW_MISC_IO = 0,
SKL_DISP_PW_DDI_A_E,
@@ -1074,17 +1092,54 @@ enum i915_power_well_id {
SKL_DISP_PW_2,
/* - custom power wells */
- SKL_DISP_PW_DC_OFF,
BXT_DPIO_CMN_A,
BXT_DPIO_CMN_BC,
- GLK_DPIO_CMN_C, /* 19 */
+ GLK_DPIO_CMN_C, /* 18 */
+
+ /*
+ * GEN11+
+ * - _HSW_PWR_WELL_CTL1-4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_1 = 0,
+ ICL_DISP_PW_2,
+ ICL_DISP_PW_3,
+ ICL_DISP_PW_4,
+
+ /*
+ * - _HSW_PWR_WELL_CTL_AUX1/2/4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_AUX_A = 16,
+ ICL_DISP_PW_AUX_B,
+ ICL_DISP_PW_AUX_C,
+ ICL_DISP_PW_AUX_D,
+ ICL_DISP_PW_AUX_E,
+ ICL_DISP_PW_AUX_F,
+
+ ICL_DISP_PW_AUX_TBT1 = 24,
+ ICL_DISP_PW_AUX_TBT2,
+ ICL_DISP_PW_AUX_TBT3,
+ ICL_DISP_PW_AUX_TBT4,
+
+ /*
+ * - _HSW_PWR_WELL_CTL_DDI1/2/4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_DDI_A = 32,
+ ICL_DISP_PW_DDI_B,
+ ICL_DISP_PW_DDI_C,
+ ICL_DISP_PW_DDI_D,
+ ICL_DISP_PW_DDI_E,
+ ICL_DISP_PW_DDI_F, /* 37 */
/*
* Multiple platforms.
* Must start following the highest ID of any platform.
* - custom power wells
*/
- I915_DISP_PW_ALWAYS_ON = 20,
+ SKL_DISP_PW_DC_OFF = 38,
+ I915_DISP_PW_ALWAYS_ON,
};
#define PUNIT_REG_PWRGT_CTRL 0x60
@@ -1098,8 +1153,8 @@ enum i915_power_well_id {
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
-#define GPLLENABLE (1<<4)
-#define GENFREQSTATUS (1<<0)
+#define GPLLENABLE (1 << 4)
+#define GENFREQSTATUS (1 << 0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
@@ -1141,11 +1196,11 @@ enum i915_power_well_id {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
-#define VLV_TURBO_SOC_OVERRIDE 0x04
-#define VLV_OVERRIDE_EN 1
-#define VLV_SOC_TDP_EN (1 << 1)
-#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
-#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
+#define VLV_TURBO_SOC_OVERRIDE 0x04
+#define VLV_OVERRIDE_EN 1
+#define VLV_SOC_TDP_EN (1 << 1)
+#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
+#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
@@ -1194,10 +1249,10 @@ enum i915_power_well_id {
#define DPIO_DEVFN 0
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
-#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
-#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
-#define DPIO_SFR_BYPASS (1<<1)
-#define DPIO_CMNRST (1<<0)
+#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1 << 1)
+#define DPIO_CMNRST (1 << 0)
#define DPIO_PHY(pipe) ((pipe) >> 1)
#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy])
@@ -1215,7 +1270,7 @@ enum i915_power_well_id {
#define DPIO_P1_SHIFT (21) /* 3 bits */
#define DPIO_P2_SHIFT (16) /* 5 bits */
#define DPIO_N_SHIFT (12) /* 4 bits */
-#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_ENABLE_CALIBRATION (1 << 11)
#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
#define DPIO_M2DIV_MASK 0xff
#define _VLV_PLL_DW3_CH1 0x802c
@@ -1264,10 +1319,10 @@ enum i915_power_well_id {
#define _VLV_PCS_DW0_CH0 0x8200
#define _VLV_PCS_DW0_CH1 0x8400
-#define DPIO_PCS_TX_LANE2_RESET (1<<16)
-#define DPIO_PCS_TX_LANE1_RESET (1<<7)
-#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
+#define DPIO_PCS_TX_LANE2_RESET (1 << 16)
+#define DPIO_PCS_TX_LANE1_RESET (1 << 7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1 << 3)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
#define _VLV_PCS01_DW0_CH0 0x200
@@ -1279,11 +1334,11 @@ enum i915_power_well_id {
#define _VLV_PCS_DW1_CH0 0x8204
#define _VLV_PCS_DW1_CH1 0x8404
-#define CHV_PCS_REQ_SOFTRESET_EN (1<<23)
-#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
-#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define CHV_PCS_REQ_SOFTRESET_EN (1 << 23)
+#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1 << 22)
+#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
-#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
+#define DPIO_PCS_CLK_SOFT_RESET (1 << 5)
#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
#define _VLV_PCS01_DW1_CH0 0x204
@@ -1308,12 +1363,12 @@ enum i915_power_well_id {
#define _VLV_PCS_DW9_CH0 0x8224
#define _VLV_PCS_DW9_CH1 0x8424
-#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
-#define DPIO_PCS_TX2MARGIN_000 (0<<13)
-#define DPIO_PCS_TX2MARGIN_101 (1<<13)
-#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
-#define DPIO_PCS_TX1MARGIN_000 (0<<10)
-#define DPIO_PCS_TX1MARGIN_101 (1<<10)
+#define DPIO_PCS_TX2MARGIN_MASK (0x7 << 13)
+#define DPIO_PCS_TX2MARGIN_000 (0 << 13)
+#define DPIO_PCS_TX2MARGIN_101 (1 << 13)
+#define DPIO_PCS_TX1MARGIN_MASK (0x7 << 10)
+#define DPIO_PCS_TX1MARGIN_000 (0 << 10)
+#define DPIO_PCS_TX1MARGIN_101 (1 << 10)
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
#define _VLV_PCS01_DW9_CH0 0x224
@@ -1325,14 +1380,14 @@ enum i915_power_well_id {
#define _CHV_PCS_DW10_CH0 0x8228
#define _CHV_PCS_DW10_CH1 0x8428
-#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
-#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
-#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
-#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
-#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
-#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
-#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
-#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
+#define DPIO_PCS_SWING_CALC_TX0_TX2 (1 << 30)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 (1 << 31)
+#define DPIO_PCS_TX2DEEMP_MASK (0xf << 24)
+#define DPIO_PCS_TX2DEEMP_9P5 (0 << 24)
+#define DPIO_PCS_TX2DEEMP_6P0 (2 << 24)
+#define DPIO_PCS_TX1DEEMP_MASK (0xf << 16)
+#define DPIO_PCS_TX1DEEMP_9P5 (0 << 16)
+#define DPIO_PCS_TX1DEEMP_6P0 (2 << 16)
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
#define _VLV_PCS01_DW10_CH0 0x0228
@@ -1344,10 +1399,10 @@ enum i915_power_well_id {
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
-#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24)
-#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
-#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
+#define DPIO_TX2_STAGGER_MASK(x) ((x) << 24)
+#define DPIO_LANEDESKEW_STRAP_OVRD (1 << 3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER (1 << 1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0)
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
#define _VLV_PCS01_DW11_CH0 0x022c
@@ -1366,11 +1421,11 @@ enum i915_power_well_id {
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
-#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20)
-#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16)
-#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8)
-#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6)
-#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0)
+#define DPIO_TX2_STAGGER_MULT(x) ((x) << 20)
+#define DPIO_TX1_STAGGER_MULT(x) ((x) << 16)
+#define DPIO_TX1_STAGGER_MASK(x) ((x) << 8)
+#define DPIO_LANESTAGGER_STRAP_OVRD (1 << 6)
+#define DPIO_LANESTAGGER_STRAP(x) ((x) << 0)
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
#define _VLV_PCS_DW14_CH0 0x8238
@@ -1391,7 +1446,7 @@ enum i915_power_well_id {
#define _VLV_TX_DW3_CH0 0x828c
#define _VLV_TX_DW3_CH1 0x848c
/* The following bit for CHV phy */
-#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1 << 27)
#define DPIO_SWING_MARGIN101_SHIFT 16
#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
@@ -1410,7 +1465,7 @@ enum i915_power_well_id {
#define _VLV_TX_DW5_CH0 0x8294
#define _VLV_TX_DW5_CH1 0x8494
-#define DPIO_TX_OCALINIT_EN (1<<31)
+#define DPIO_TX_OCALINIT_EN (1 << 31)
#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
#define _VLV_TX_DW11_CH0 0x82ac
@@ -1640,10 +1695,10 @@ enum i915_power_well_id {
#define PORT_PLL_LOCK_THRESHOLD_SHIFT 1
#define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
/* PORT_PLL_10_A */
-#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
+#define PORT_PLL_DCO_AMP_OVR_EN_H (1 << 27)
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
-#define PORT_PLL_DCO_AMP(x) ((x)<<10)
+#define PORT_PLL_DCO_AMP(x) ((x) << 10)
#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
_PORT_PLL_0_B, \
_PORT_PLL_0_C)
@@ -1666,6 +1721,26 @@ enum i915_power_well_id {
#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
_ICL_PORT_CL_DW5_B)
+#define _CNL_PORT_CL_DW10_A 0x162028
+#define _ICL_PORT_CL_DW10_B 0x6c028
+#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \
+ _CNL_PORT_CL_DW10_A, \
+ _ICL_PORT_CL_DW10_B)
+#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
+#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
+#define PWR_UP_ALL_LANES (0x0 << 4)
+#define PWR_DOWN_LN_3_2_1 (0xe << 4)
+#define PWR_DOWN_LN_3_2 (0xc << 4)
+#define PWR_DOWN_LN_3 (0x8 << 4)
+#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
+#define PWR_DOWN_LN_1_0 (0x3 << 4)
+#define PWR_DOWN_LN_1 (0x2 << 4)
+#define PWR_DOWN_LN_3_1 (0xa << 4)
+#define PWR_DOWN_LN_3_1_0 (0xb << 4)
+#define PWR_DOWN_LN_MASK (0xf << 4)
+#define PWR_DOWN_LN_SHIFT 4
+
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@@ -1678,6 +1753,13 @@ enum i915_power_well_id {
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
+#define _ICL_PORT_CL_DW12_A 0x162030
+#define _ICL_PORT_CL_DW12_B 0x6C030
+#define ICL_LANE_ENABLE_AUX (1 << 0)
+#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \
+ _ICL_PORT_CL_DW12_A, \
+ _ICL_PORT_CL_DW12_B)
+
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
@@ -1715,16 +1797,22 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_LN0_D, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_F))
+
#define _ICL_PORT_PCS_DW1_GRP_A 0x162604
#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604
#define _ICL_PORT_PCS_DW1_LN0_A 0x162804
#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804
+#define _ICL_PORT_PCS_DW1_AUX_A 0x162304
+#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304
#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\
_ICL_PORT_PCS_DW1_GRP_A, \
_ICL_PORT_PCS_DW1_GRP_B)
#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_PCS_DW1_LN0_A, \
_ICL_PORT_PCS_DW1_LN0_B)
+#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_PCS_DW1_AUX_A, \
+ _ICL_PORT_PCS_DW1_AUX_B)
#define COMMON_KEEPER_EN (1 << 26)
/* CNL Port TX registers */
@@ -1745,7 +1833,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_D_GRP_OFFSET, \
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_F_GRP_OFFSET) + \
- 4*(dw))
+ 4 * (dw))
#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1753,7 +1841,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_D_LN0_OFFSET, \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_F_LN0_OFFSET) + \
- 4*(dw))
+ 4 * (dw))
#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
@@ -1761,16 +1849,23 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_DW2_GRP_B 0x6C688
#define _ICL_PORT_TX_DW2_LN0_A 0x162888
#define _ICL_PORT_TX_DW2_LN0_B 0x6C888
+#define _ICL_PORT_TX_DW2_AUX_A 0x162388
+#define _ICL_PORT_TX_DW2_AUX_B 0x6c388
#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW2_GRP_A, \
_ICL_PORT_TX_DW2_GRP_B)
#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW2_LN0_A, \
_ICL_PORT_TX_DW2_LN0_B)
+#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW2_AUX_A, \
+ _ICL_PORT_TX_DW2_AUX_B)
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
#define SWING_SEL_LOWER_MASK (0x7 << 11)
+#define FRC_LATENCY_OPTIM_MASK (0x7 << 8)
+#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8)
#define RCOMP_SCALAR(x) ((x) << 0)
#define RCOMP_SCALAR_MASK (0xFF << 0)
@@ -1779,21 +1874,26 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
- (ln * (_CNL_PORT_TX_DW4_LN1_AE - \
+ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define _ICL_PORT_TX_DW4_GRP_A 0x162690
#define _ICL_PORT_TX_DW4_GRP_B 0x6C690
#define _ICL_PORT_TX_DW4_LN0_A 0x162890
#define _ICL_PORT_TX_DW4_LN1_A 0x162990
#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
+#define _ICL_PORT_TX_DW4_AUX_A 0x162390
+#define _ICL_PORT_TX_DW4_AUX_B 0x6c390
#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW4_GRP_A, \
_ICL_PORT_TX_DW4_GRP_B)
#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
_ICL_PORT_TX_DW4_LN0_A, \
_ICL_PORT_TX_DW4_LN0_B) + \
- (ln * (_ICL_PORT_TX_DW4_LN1_A - \
- _ICL_PORT_TX_DW4_LN0_A)))
+ ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
+ _ICL_PORT_TX_DW4_LN0_A)))
+#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW4_AUX_A, \
+ _ICL_PORT_TX_DW4_AUX_B)
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1808,12 +1908,17 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_DW5_GRP_B 0x6C694
#define _ICL_PORT_TX_DW5_LN0_A 0x162894
#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
+#define _ICL_PORT_TX_DW5_AUX_A 0x162394
+#define _ICL_PORT_TX_DW5_AUX_B 0x6c394
#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW5_GRP_A, \
_ICL_PORT_TX_DW5_GRP_B)
#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW5_LN0_A, \
_ICL_PORT_TX_DW5_LN0_B)
+#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW5_AUX_A, \
+ _ICL_PORT_TX_DW5_AUX_B)
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -1990,6 +2095,11 @@ enum i915_power_well_id {
_ICL_PORT_COMP_DW10_A, \
_ICL_PORT_COMP_DW10_B)
+/* ICL PHY DFLEX registers */
+#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
+#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
+
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
@@ -2134,8 +2244,8 @@ enum i915_power_well_id {
/* SKL balance leg register */
#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
/* I_boost values */
-#define BALANCE_LEG_SHIFT(port) (8+3*(port))
-#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
+#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
/* Balance leg disable bits */
#define BALANCE_LEG_DISABLE_SHIFT 23
#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
@@ -2155,10 +2265,10 @@ enum i915_power_well_id {
#define I830_FENCE_TILING_Y_SHIFT 12
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
-#define I830_FENCE_REG_VALID (1<<0)
+#define I830_FENCE_REG_VALID (1 << 0)
#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
-#define I830_FENCE_MAX_SIZE_VAL (1<<8)
+#define I830_FENCE_MAX_SIZE_VAL (1 << 8)
#define I915_FENCE_START_MASK 0x0ff00000
#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
@@ -2167,7 +2277,7 @@ enum i915_power_well_id {
#define FENCE_REG_965_HI(i) _MMIO(0x03000 + (i) * 8 + 4)
#define I965_FENCE_PITCH_SHIFT 2
#define I965_FENCE_TILING_Y_SHIFT 1
-#define I965_FENCE_REG_VALID (1<<0)
+#define I965_FENCE_REG_VALID (1 << 0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
#define FENCE_REG_GEN6_LO(i) _MMIO(0x100000 + (i) * 8)
@@ -2190,13 +2300,13 @@ enum i915_power_well_id {
#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER _MMIO(0x02024)
-#define PRB0_BASE (0x2030-0x30)
-#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
-#define PRB2_BASE (0x2050-0x30) /* gen3 */
-#define SRB0_BASE (0x2100-0x30) /* gen2 */
-#define SRB1_BASE (0x2110-0x30) /* gen2 */
-#define SRB2_BASE (0x2120-0x30) /* 830 */
-#define SRB3_BASE (0x2130-0x30) /* 830 */
+#define PRB0_BASE (0x2030 - 0x30)
+#define PRB1_BASE (0x2040 - 0x30) /* 830,gen3 */
+#define PRB2_BASE (0x2050 - 0x30) /* gen3 */
+#define SRB0_BASE (0x2100 - 0x30) /* gen2 */
+#define SRB1_BASE (0x2110 - 0x30) /* gen2 */
+#define SRB2_BASE (0x2120 - 0x30) /* 830 */
+#define SRB3_BASE (0x2130 - 0x30) /* 830 */
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
@@ -2209,14 +2319,14 @@ enum i915_power_well_id {
#define GEN11_VEBOX_RING_BASE 0x1c8000
#define GEN11_VEBOX2_RING_BASE 0x1d8000
#define BLT_RING_BASE 0x22000
-#define RING_TAIL(base) _MMIO((base)+0x30)
-#define RING_HEAD(base) _MMIO((base)+0x34)
-#define RING_START(base) _MMIO((base)+0x38)
-#define RING_CTL(base) _MMIO((base)+0x3c)
+#define RING_TAIL(base) _MMIO((base) + 0x30)
+#define RING_HEAD(base) _MMIO((base) + 0x34)
+#define RING_START(base) _MMIO((base) + 0x38)
+#define RING_CTL(base) _MMIO((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_SYNC_0(base) _MMIO((base)+0x40)
-#define RING_SYNC_1(base) _MMIO((base)+0x44)
-#define RING_SYNC_2(base) _MMIO((base)+0x48)
+#define RING_SYNC_0(base) _MMIO((base) + 0x40)
+#define RING_SYNC_1(base) _MMIO((base) + 0x44)
+#define RING_SYNC_2(base) _MMIO((base) + 0x48)
#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
@@ -2230,21 +2340,22 @@ enum i915_power_well_id {
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC INVALID_MMIO_REG
-#define RING_PSMI_CTL(base) _MMIO((base)+0x50)
-#define RING_MAX_IDLE(base) _MMIO((base)+0x54)
-#define RING_HWS_PGA(base) _MMIO((base)+0x80)
-#define RING_HWS_PGA_GEN6(base) _MMIO((base)+0x2080)
-#define RING_RESET_CTL(base) _MMIO((base)+0xd0)
+#define RING_PSMI_CTL(base) _MMIO((base) + 0x50)
+#define RING_MAX_IDLE(base) _MMIO((base) + 0x54)
+#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
+#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
+#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
#define RESET_CTL_REQUEST_RESET (1 << 0)
#define RESET_CTL_READY_TO_RESET (1 << 1)
+#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
#define GTT_CACHE_EN_ALL 0xF0007FFF
#define GEN7_WR_WATERMARK _MMIO(0x4028)
#define GEN7_GFX_PRIO_CTRL _MMIO(0x402C)
#define ARB_MODE _MMIO(0x4030)
-#define ARB_MODE_SWIZZLE_SNB (1<<4)
-#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define ARB_MODE_SWIZZLE_SNB (1 << 4)
+#define ARB_MODE_SWIZZLE_IVB (1 << 5)
#define GEN7_GFX_PEND_TLB0 _MMIO(0x4034)
#define GEN7_GFX_PEND_TLB1 _MMIO(0x4038)
/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
@@ -2254,30 +2365,30 @@ enum i915_power_well_id {
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
#define GAMTARBMODE _MMIO(0x04a08)
-#define ARB_MODE_BWGTLB_DISABLE (1<<9)
-#define ARB_MODE_SWIZZLE_BDW (1<<1)
+#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
+#define ARB_MODE_SWIZZLE_BDW (1 << 1)
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
-#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
+#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
#define GEN8_RING_FAULT_REG _MMIO(0x4094)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
-#define RING_FAULT_GTTSEL_MASK (1<<11)
+#define RING_FAULT_GTTSEL_MASK (1 << 11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
-#define RING_FAULT_VALID (1<<0)
+#define RING_FAULT_VALID (1 << 0)
#define DONE_REG _MMIO(0x40b0)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
-#define RING_ACTHD(base) _MMIO((base)+0x74)
-#define RING_ACTHD_UDW(base) _MMIO((base)+0x5c)
-#define RING_NOPID(base) _MMIO((base)+0x94)
-#define RING_IMR(base) _MMIO((base)+0xa8)
-#define RING_HWSTAM(base) _MMIO((base)+0x98)
-#define RING_TIMESTAMP(base) _MMIO((base)+0x358)
-#define RING_TIMESTAMP_UDW(base) _MMIO((base)+0x358 + 4)
+#define RING_ACTHD(base) _MMIO((base) + 0x74)
+#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c)
+#define RING_NOPID(base) _MMIO((base) + 0x94)
+#define RING_IMR(base) _MMIO((base) + 0xa8)
+#define RING_HWSTAM(base) _MMIO((base) + 0x98)
+#define RING_TIMESTAMP(base) _MMIO((base) + 0x358)
+#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -2290,24 +2401,25 @@ enum i915_power_well_id {
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
-#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
-#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
-#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
-#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
#define RING_MAX_NONPRIV_SLOTS 12
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
-#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
+#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18)
#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
-#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
-#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
+#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
@@ -2333,19 +2445,19 @@ enum i915_power_well_id {
#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
-#define RING_IPEIR(base) _MMIO((base)+0x64)
-#define RING_IPEHR(base) _MMIO((base)+0x68)
+#define RING_IPEIR(base) _MMIO((base) + 0x64)
+#define RING_IPEHR(base) _MMIO((base) + 0x68)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
* The GEN2 counterpart of this register is GEN2_INSTDONE.
*/
-#define RING_INSTDONE(base) _MMIO((base)+0x6c)
-#define RING_INSTPS(base) _MMIO((base)+0x70)
-#define RING_DMA_FADD(base) _MMIO((base)+0x78)
-#define RING_DMA_FADD_UDW(base) _MMIO((base)+0x60) /* gen8+ */
-#define RING_INSTPM(base) _MMIO((base)+0xc0)
-#define RING_MI_MODE(base) _MMIO((base)+0x9c)
+#define RING_INSTDONE(base) _MMIO((base) + 0x6c)
+#define RING_INSTPS(base) _MMIO((base) + 0x70)
+#define RING_DMA_FADD(base) _MMIO((base) + 0x78)
+#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */
+#define RING_INSTPM(base) _MMIO((base) + 0xc0)
+#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
#define INSTPS _MMIO(0x2070) /* 965+ only */
#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
#define ACTHD_I965 _MMIO(0x2074)
@@ -2353,37 +2465,37 @@ enum i915_power_well_id {
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
-#define PWRCTX_EN (1<<0)
+#define PWRCTX_EN (1 << 0)
#define IPEIR _MMIO(0x2088)
#define IPEHR _MMIO(0x208c)
#define GEN2_INSTDONE _MMIO(0x2090)
#define NOPID _MMIO(0x2094)
#define HWSTAM _MMIO(0x2098)
#define DMA_FADD_I8XX _MMIO(0x20d0)
-#define RING_BBSTATE(base) _MMIO((base)+0x110)
+#define RING_BBSTATE(base) _MMIO((base) + 0x110)
#define RING_BB_PPGTT (1 << 5)
-#define RING_SBBADDR(base) _MMIO((base)+0x114) /* hsw+ */
-#define RING_SBBSTATE(base) _MMIO((base)+0x118) /* hsw+ */
-#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */
-#define RING_BBADDR(base) _MMIO((base)+0x140)
-#define RING_BBADDR_UDW(base) _MMIO((base)+0x168) /* gen8+ */
-#define RING_BB_PER_CTX_PTR(base) _MMIO((base)+0x1c0) /* gen8+ */
-#define RING_INDIRECT_CTX(base) _MMIO((base)+0x1c4) /* gen8+ */
-#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */
-#define RING_CTX_TIMESTAMP(base) _MMIO((base)+0x3a8) /* gen8+ */
+#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
+#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
+#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
+#define RING_BBADDR(base) _MMIO((base) + 0x140)
+#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
+#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
+#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
+#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
+#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
#define ERROR_GEN6 _MMIO(0x40a0)
#define GEN7_ERR_INT _MMIO(0x44040)
-#define ERR_INT_POISON (1<<31)
-#define ERR_INT_MMIO_UNCLAIMED (1<<13)
-#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
-#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
-#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
-#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
-#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
-#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + (pipe)*3))
-#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
-#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
+#define ERR_INT_POISON (1 << 31)
+#define ERR_INT_MMIO_UNCLAIMED (1 << 13)
+#define ERR_INT_PIPE_CRC_DONE_C (1 << 8)
+#define ERR_INT_FIFO_UNDERRUN_C (1 << 6)
+#define ERR_INT_PIPE_CRC_DONE_B (1 << 5)
+#define ERR_INT_FIFO_UNDERRUN_B (1 << 3)
+#define ERR_INT_PIPE_CRC_DONE_A (1 << 2)
+#define ERR_INT_PIPE_CRC_DONE(pipe) (1 << (2 + (pipe) * 3))
+#define ERR_INT_FIFO_UNDERRUN_A (1 << 0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
@@ -2391,7 +2503,7 @@ enum i915_power_well_id {
#define FAULT_GTT_SEL (1 << 4)
#define FPGA_DBG _MMIO(0x42300)
-#define FPGA_DBG_RM_NOCLAIM (1<<31)
+#define FPGA_DBG_RM_NOCLAIM (1 << 31)
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
#define CLAIM_ER_CLR (1 << 31)
@@ -2400,22 +2512,22 @@ enum i915_power_well_id {
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
-#define DERRMR_PIPEA_SCANLINE (1<<0)
-#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
-#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
-#define DERRMR_PIPEA_VBLANK (1<<3)
-#define DERRMR_PIPEA_HBLANK (1<<5)
-#define DERRMR_PIPEB_SCANLINE (1<<8)
-#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
-#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
-#define DERRMR_PIPEB_VBLANK (1<<11)
-#define DERRMR_PIPEB_HBLANK (1<<13)
+#define DERRMR_PIPEA_SCANLINE (1 << 0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
+#define DERRMR_PIPEA_VBLANK (1 << 3)
+#define DERRMR_PIPEA_HBLANK (1 << 5)
+#define DERRMR_PIPEB_SCANLINE (1 << 8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
+#define DERRMR_PIPEB_VBLANK (1 << 11)
+#define DERRMR_PIPEB_HBLANK (1 << 13)
/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
-#define DERRMR_PIPEC_SCANLINE (1<<14)
-#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
-#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
-#define DERRMR_PIPEC_VBLANK (1<<21)
-#define DERRMR_PIPEC_HBLANK (1<<22)
+#define DERRMR_PIPEC_SCANLINE (1 << 14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
+#define DERRMR_PIPEC_VBLANK (1 << 21)
+#define DERRMR_PIPEC_HBLANK (1 << 22)
/* GM45+ chicken bits -- debug workaround bits that may be required
@@ -2439,7 +2551,7 @@ enum i915_power_well_id {
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
-#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
#define MI_MODE _MMIO(0x209c)
@@ -2478,22 +2590,22 @@ enum i915_power_well_id {
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
-#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c)
-#define GFX_RUN_LIST_ENABLE (1<<15)
-#define GFX_INTERRUPT_STEERING (1<<14)
-#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
-#define GFX_SURFACE_FAULT_ENABLE (1<<12)
-#define GFX_REPLAY_MODE (1<<11)
-#define GFX_PSMI_GRANULARITY (1<<10)
-#define GFX_PPGTT_ENABLE (1<<9)
-#define GEN8_GFX_PPGTT_48B (1<<7)
-
-#define GFX_FORWARD_VBLANK_MASK (3<<5)
-#define GFX_FORWARD_VBLANK_NEVER (0<<5)
-#define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
-#define GFX_FORWARD_VBLANK_COND (2<<5)
-
-#define GEN11_GFX_DISABLE_LEGACY_MODE (1<<3)
+#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c)
+#define GFX_RUN_LIST_ENABLE (1 << 15)
+#define GFX_INTERRUPT_STEERING (1 << 14)
+#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13)
+#define GFX_SURFACE_FAULT_ENABLE (1 << 12)
+#define GFX_REPLAY_MODE (1 << 11)
+#define GFX_PSMI_GRANULARITY (1 << 10)
+#define GFX_PPGTT_ENABLE (1 << 9)
+#define GEN8_GFX_PPGTT_48B (1 << 7)
+
+#define GFX_FORWARD_VBLANK_MASK (3 << 5)
+#define GFX_FORWARD_VBLANK_NEVER (0 << 5)
+#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5)
+#define GFX_FORWARD_VBLANK_COND (2 << 5)
+
+#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3)
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
@@ -2507,8 +2619,8 @@ enum i915_power_well_id {
#define IMR _MMIO(0x20a8)
#define ISR _MMIO(0x20ac)
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
-#define GINT_DIS (1<<22)
-#define GCFG_DIS (1<<8)
+#define GINT_DIS (1 << 22)
+#define GCFG_DIS (1 << 8)
#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064)
#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
@@ -2518,35 +2630,35 @@ enum i915_power_well_id {
#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
-#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
+#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
#define ESR _MMIO(0x20b8)
-#define GM45_ERROR_PAGE_TABLE (1<<5)
-#define GM45_ERROR_MEM_PRIV (1<<4)
-#define I915_ERROR_PAGE_TABLE (1<<4)
-#define GM45_ERROR_CP_PRIV (1<<3)
-#define I915_ERROR_MEMORY_REFRESH (1<<1)
-#define I915_ERROR_INSTRUCTION (1<<0)
+#define GM45_ERROR_PAGE_TABLE (1 << 5)
+#define GM45_ERROR_MEM_PRIV (1 << 4)
+#define I915_ERROR_PAGE_TABLE (1 << 4)
+#define GM45_ERROR_CP_PRIV (1 << 3)
+#define I915_ERROR_MEMORY_REFRESH (1 << 1)
+#define I915_ERROR_INSTRUCTION (1 << 0)
#define INSTPM _MMIO(0x20c0)
-#define INSTPM_SELF_EN (1<<12) /* 915GM only */
-#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
+#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
+#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
-#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
-#define INSTPM_TLB_INVALIDATE (1<<9)
-#define INSTPM_SYNC_FLUSH (1<<5)
+#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1 << 9)
+#define INSTPM_SYNC_FLUSH (1 << 5)
#define ACTHD _MMIO(0x20c8)
#define MEM_MODE _MMIO(0x20cc)
-#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
-#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
-#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
+#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
+#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
+#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* 85x only */
#define FW_BLC _MMIO(0x20d8)
#define FW_BLC2 _MMIO(0x20dc)
#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
-#define FW_BLC_SELF_EN_MASK (1<<31)
-#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
-#define FW_BLC_SELF_EN (1<<15) /* 945 only */
+#define FW_BLC_SELF_EN_MASK (1 << 31)
+#define FW_BLC_SELF_FIFO_MASK (1 << 16) /* 945 only */
+#define FW_BLC_SELF_EN (1 << 15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -2645,37 +2757,37 @@ enum i915_power_well_id {
#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
-#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
-#define CM0_IZ_OPT_DISABLE (1<<6)
-#define CM0_ZR_OPT_DISABLE (1<<5)
-#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
-#define CM0_DEPTH_EVICT_DISABLE (1<<4)
-#define CM0_COLOR_EVICT_DISABLE (1<<3)
-#define CM0_DEPTH_WRITE_DISABLE (1<<1)
-#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
+#define CM0_IZ_OPT_DISABLE (1 << 6)
+#define CM0_ZR_OPT_DISABLE (1 << 5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
+#define CM0_DEPTH_EVICT_DISABLE (1 << 4)
+#define CM0_COLOR_EVICT_DISABLE (1 << 3)
+#define CM0_DEPTH_WRITE_DISABLE (1 << 1)
+#define CM0_RC_OP_FLUSH_DISABLE (1 << 0)
#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
-#define GFX_FLSH_CNTL_EN (1<<0)
+#define GFX_FLSH_CNTL_EN (1 << 0)
#define ECOSKPD _MMIO(0x21d0)
-#define ECO_GATING_CX_ONLY (1<<3)
-#define ECO_FLIP_DONE (1<<0)
+#define ECO_GATING_CX_ONLY (1 << 3)
+#define ECO_FLIP_DONE (1 << 0)
#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
-#define RC_OP_FLUSH_ENABLE (1<<0)
-#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
+#define RC_OP_FLUSH_ENABLE (1 << 0)
+#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
-#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
-#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
-#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0)
#define GEN6_BLITTER_LOCK_SHIFT 16
-#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+#define GEN6_BLITTER_FBC_NOTIFY (1 << 3)
#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
-#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
+#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10)
#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
@@ -2714,6 +2826,10 @@ enum i915_power_well_id {
#define GEN10_F2_SS_DIS_SHIFT 18
#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT 4
+#define GEN10_L3BANK_MASK 0x0F
+
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
@@ -2727,7 +2843,7 @@ enum i915_power_well_id {
#define GEN8_EU_DISABLE2 _MMIO(0x913c)
#define GEN8_EU_DIS2_S2_MASK 0xff
-#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
+#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
#define GEN10_EU_DISABLE3 _MMIO(0x9140)
#define GEN10_EU_DIS_SS_MASK 0xff
@@ -2784,44 +2900,43 @@ enum i915_power_well_id {
(IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
-#define ILK_BSD_USER_INTERRUPT (1<<5)
-
-#define I915_PM_INTERRUPT (1<<31)
-#define I915_ISP_INTERRUPT (1<<22)
-#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
-#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
-#define I915_MIPIC_INTERRUPT (1<<19)
-#define I915_MIPIA_INTERRUPT (1<<18)
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
-#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
-#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16)
-#define I915_MASTER_ERROR_INTERRUPT (1<<15)
-#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
-#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13)
-#define I915_HWB_OOM_INTERRUPT (1<<13)
-#define I915_LPE_PIPE_C_INTERRUPT (1<<12)
-#define I915_SYNC_STATUS_INTERRUPT (1<<12)
-#define I915_MISC_INTERRUPT (1<<11)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
-#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
-#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
-#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
-#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3)
-#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2)
-#define I915_DEBUG_INTERRUPT (1<<2)
-#define I915_WINVALID_INTERRUPT (1<<1)
-#define I915_USER_INTERRUPT (1<<1)
-#define I915_ASLE_INTERRUPT (1<<0)
-#define I915_BSD_USER_INTERRUPT (1<<25)
+#define ILK_BSD_USER_INTERRUPT (1 << 5)
+
+#define I915_PM_INTERRUPT (1 << 31)
+#define I915_ISP_INTERRUPT (1 << 22)
+#define I915_LPE_PIPE_B_INTERRUPT (1 << 21)
+#define I915_LPE_PIPE_A_INTERRUPT (1 << 20)
+#define I915_MIPIC_INTERRUPT (1 << 19)
+#define I915_MIPIA_INTERRUPT (1 << 18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18)
+#define I915_DISPLAY_PORT_INTERRUPT (1 << 17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16)
+#define I915_MASTER_ERROR_INTERRUPT (1 << 15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13)
+#define I915_HWB_OOM_INTERRUPT (1 << 13)
+#define I915_LPE_PIPE_C_INTERRUPT (1 << 12)
+#define I915_SYNC_STATUS_INTERRUPT (1 << 12)
+#define I915_MISC_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2)
+#define I915_DEBUG_INTERRUPT (1 << 2)
+#define I915_WINVALID_INTERRUPT (1 << 1)
+#define I915_USER_INTERRUPT (1 << 1)
+#define I915_ASLE_INTERRUPT (1 << 0)
+#define I915_BSD_USER_INTERRUPT (1 << 25)
#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
@@ -2844,19 +2959,19 @@ enum i915_power_well_id {
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
-#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
-#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
-#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
-#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_TS_SCHED_HS1 (0x5 << 16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3 << 16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1 << 16)
+#define GEN7_FF_TS_SCHED_HW (0x0 << 16) /* Default */
#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
-#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
-#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
-#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
-#define GEN7_FF_VS_SCHED_HW (0x0<<12)
-#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
-#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
-#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
-#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+#define GEN7_FF_VS_SCHED_HS1 (0x5 << 12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3 << 12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1 << 12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0 << 12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5 << 4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3 << 4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1 << 4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0 << 4)
/*
* Framebuffer compression (915+ only)
@@ -2865,51 +2980,51 @@ enum i915_power_well_id {
#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN (1<<31)
-#define FBC_CTL_PERIODIC (1<<30)
+#define FBC_CTL_EN (1 << 31)
+#define FBC_CTL_PERIODIC (1 << 30)
#define FBC_CTL_INTERVAL_SHIFT (16)
-#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define FBC_CTL_C3_IDLE (1<<13)
+#define FBC_CTL_UNCOMPRESSIBLE (1 << 14)
+#define FBC_CTL_C3_IDLE (1 << 13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO_SHIFT (0)
#define FBC_COMMAND _MMIO(0x320c)
-#define FBC_CMD_COMPRESS (1<<0)
+#define FBC_CMD_COMPRESS (1 << 0)
#define FBC_STATUS _MMIO(0x3210)
-#define FBC_STAT_COMPRESSING (1<<31)
-#define FBC_STAT_COMPRESSED (1<<30)
-#define FBC_STAT_MODIFIED (1<<29)
+#define FBC_STAT_COMPRESSING (1 << 31)
+#define FBC_STAT_COMPRESSED (1 << 30)
+#define FBC_STAT_MODIFIED (1 << 29)
#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 _MMIO(0x3214)
-#define FBC_CTL_FENCE_DBL (0<<4)
-#define FBC_CTL_IDLE_IMM (0<<2)
-#define FBC_CTL_IDLE_FULL (1<<2)
-#define FBC_CTL_IDLE_LINE (2<<2)
-#define FBC_CTL_IDLE_DEBUG (3<<2)
-#define FBC_CTL_CPU_FENCE (1<<1)
-#define FBC_CTL_PLANE(plane) ((plane)<<0)
+#define FBC_CTL_FENCE_DBL (0 << 4)
+#define FBC_CTL_IDLE_IMM (0 << 2)
+#define FBC_CTL_IDLE_FULL (1 << 2)
+#define FBC_CTL_IDLE_LINE (2 << 2)
+#define FBC_CTL_IDLE_DEBUG (3 << 2)
+#define FBC_CTL_CPU_FENCE (1 << 1)
+#define FBC_CTL_PLANE(plane) ((plane) << 0)
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
#define FBC_LL_SIZE (1536)
#define FBC_LLC_READ_CTRL _MMIO(0x9044)
-#define FBC_LLC_FULLY_OPEN (1<<30)
+#define FBC_LLC_FULLY_OPEN (1 << 30)
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE _MMIO(0x3200)
#define DPFC_CONTROL _MMIO(0x3208)
-#define DPFC_CTL_EN (1<<31)
-#define DPFC_CTL_PLANE(plane) ((plane)<<30)
-#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
-#define DPFC_CTL_FENCE_EN (1<<29)
-#define IVB_DPFC_CTL_FENCE_EN (1<<28)
-#define DPFC_CTL_PERSISTENT_MODE (1<<25)
-#define DPFC_SR_EN (1<<10)
-#define DPFC_CTL_LIMIT_1X (0<<6)
-#define DPFC_CTL_LIMIT_2X (1<<6)
-#define DPFC_CTL_LIMIT_4X (2<<6)
+#define DPFC_CTL_EN (1 << 31)
+#define DPFC_CTL_PLANE(plane) ((plane) << 30)
+#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29)
+#define DPFC_CTL_FENCE_EN (1 << 29)
+#define IVB_DPFC_CTL_FENCE_EN (1 << 28)
+#define DPFC_CTL_PERSISTENT_MODE (1 << 25)
+#define DPFC_SR_EN (1 << 10)
+#define DPFC_CTL_LIMIT_1X (0 << 6)
+#define DPFC_CTL_LIMIT_2X (1 << 6)
+#define DPFC_CTL_LIMIT_4X (2 << 6)
#define DPFC_RECOMP_CTL _MMIO(0x320c)
-#define DPFC_RECOMP_STALL_EN (1<<27)
+#define DPFC_RECOMP_STALL_EN (1 << 27)
#define DPFC_RECOMP_STALL_WM_SHIFT (16)
#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
@@ -2922,12 +3037,12 @@ enum i915_power_well_id {
#define DPFC_STATUS2 _MMIO(0x3214)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
#define DPFC_CHICKEN _MMIO(0x3224)
-#define DPFC_HT_MODIFY (1<<31)
+#define DPFC_HT_MODIFY (1 << 31)
/* Framebuffer compression for Ironlake */
#define ILK_DPFC_CB_BASE _MMIO(0x43200)
#define ILK_DPFC_CONTROL _MMIO(0x43208)
-#define FBC_CTL_FALSE_COLOR (1<<10)
+#define FBC_CTL_FALSE_COLOR (1 << 10)
/* The bit 28-8 is reserved */
#define DPFC_RESERVED (0x1FFFFF00)
#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
@@ -2938,15 +3053,15 @@ enum i915_power_well_id {
#define BDW_FBC_COMP_SEG_MASK 0xfff
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
-#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
-#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
+#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
+#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
-#define ILK_FBC_RT_VALID (1<<0)
-#define SNB_FBC_FRONT_BUFFER (1<<1)
+#define ILK_FBC_RT_VALID (1 << 0)
+#define SNB_FBC_FRONT_BUFFER (1 << 1)
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
-#define ILK_FBCQ_DIS (1<<22)
-#define ILK_PABSTRETCH_DIS (1<<21)
+#define ILK_FBCQ_DIS (1 << 22)
+#define ILK_PABSTRETCH_DIS (1 << 21)
/*
@@ -2955,7 +3070,7 @@ enum i915_power_well_id {
* The following two registers are of type GTTMMADR
*/
#define SNB_DPFC_CTL_SA _MMIO(0x100100)
-#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define SNB_CPU_FENCE_ENABLE (1 << 29)
#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
/* Framebuffer compression for Ivybridge */
@@ -2965,8 +3080,8 @@ enum i915_power_well_id {
#define IPS_ENABLE (1 << 31)
#define MSG_FBC_REND_STATE _MMIO(0x50380)
-#define FBC_REND_NUKE (1<<2)
-#define FBC_REND_CACHE_CLEAN (1<<1)
+#define FBC_REND_NUKE (1 << 2)
+#define FBC_REND_CACHE_CLEAN (1 << 1)
/*
* GPIO regs
@@ -2979,6 +3094,10 @@ enum i915_power_well_id {
#define GPIOF _MMIO(0x5024)
#define GPIOG _MMIO(0x5028)
#define GPIOH _MMIO(0x502c)
+#define GPIOJ _MMIO(0x5034)
+#define GPIOK _MMIO(0x5038)
+#define GPIOL _MMIO(0x503C)
+#define GPIOM _MMIO(0x5040)
# define GPIO_CLOCK_DIR_MASK (1 << 0)
# define GPIO_CLOCK_DIR_IN (0 << 1)
# define GPIO_CLOCK_DIR_OUT (1 << 1)
@@ -2995,12 +3114,13 @@ enum i915_power_well_id {
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
-#define GMBUS_AKSV_SELECT (1<<11)
-#define GMBUS_RATE_100KHZ (0<<8)
-#define GMBUS_RATE_50KHZ (1<<8)
-#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
-#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
-#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_AKSV_SELECT (1 << 11)
+#define GMBUS_RATE_100KHZ (0 << 8)
+#define GMBUS_RATE_50KHZ (1 << 8)
+#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
#define GMBUS_PIN_DISABLED 0
#define GMBUS_PIN_SSC 1
#define GMBUS_PIN_VGADDC 2
@@ -3021,36 +3141,37 @@ enum i915_power_well_id {
#define GMBUS_NUM_PINS 13 /* including 0 */
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
-#define GMBUS_SW_CLR_INT (1<<31)
-#define GMBUS_SW_RDY (1<<30)
-#define GMBUS_ENT (1<<29) /* enable timeout */
-#define GMBUS_CYCLE_NONE (0<<25)
-#define GMBUS_CYCLE_WAIT (1<<25)
-#define GMBUS_CYCLE_INDEX (2<<25)
-#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_SW_CLR_INT (1 << 31)
+#define GMBUS_SW_RDY (1 << 30)
+#define GMBUS_ENT (1 << 29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0 << 25)
+#define GMBUS_CYCLE_WAIT (1 << 25)
+#define GMBUS_CYCLE_INDEX (2 << 25)
+#define GMBUS_CYCLE_STOP (4 << 25)
#define GMBUS_BYTE_COUNT_SHIFT 16
#define GMBUS_BYTE_COUNT_MAX 256U
+#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
#define GMBUS_SLAVE_INDEX_SHIFT 8
#define GMBUS_SLAVE_ADDR_SHIFT 1
-#define GMBUS_SLAVE_READ (1<<0)
-#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS_SLAVE_READ (1 << 0)
+#define GMBUS_SLAVE_WRITE (0 << 0)
#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
-#define GMBUS_INUSE (1<<15)
-#define GMBUS_HW_WAIT_PHASE (1<<14)
-#define GMBUS_STALL_TIMEOUT (1<<13)
-#define GMBUS_INT (1<<12)
-#define GMBUS_HW_RDY (1<<11)
-#define GMBUS_SATOER (1<<10)
-#define GMBUS_ACTIVE (1<<9)
+#define GMBUS_INUSE (1 << 15)
+#define GMBUS_HW_WAIT_PHASE (1 << 14)
+#define GMBUS_STALL_TIMEOUT (1 << 13)
+#define GMBUS_INT (1 << 12)
+#define GMBUS_HW_RDY (1 << 11)
+#define GMBUS_SATOER (1 << 10)
+#define GMBUS_ACTIVE (1 << 9)
#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
-#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
-#define GMBUS_NAK_EN (1<<3)
-#define GMBUS_IDLE_EN (1<<2)
-#define GMBUS_HW_WAIT_EN (1<<1)
-#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define GMBUS_NAK_EN (1 << 3)
+#define GMBUS_IDLE_EN (1 << 2)
+#define GMBUS_HW_WAIT_EN (1 << 1)
+#define GMBUS_HW_RDY_EN (1 << 0)
#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
-#define GMBUS_2BYTE_INDEX_EN (1<<31)
+#define GMBUS_2BYTE_INDEX_EN (1 << 31)
/*
* Clock control & power management
@@ -3088,10 +3209,10 @@ enum i915_power_well_id {
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define DPLL_LOCK_VLV (1<<15)
-#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
-#define DPLL_INTEGRATED_REF_CLK_VLV (1<<13)
-#define DPLL_SSC_REF_CLK_CHV (1<<13)
+#define DPLL_LOCK_VLV (1 << 15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
+#define DPLL_SSC_REF_CLK_CHV (1 << 13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
@@ -3101,20 +3222,20 @@ enum i915_power_well_id {
#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
-#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
+#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
#define PHY_LDO_DELAY_0NS 0x0
#define PHY_LDO_DELAY_200NS 0x1
#define PHY_LDO_DELAY_600NS 0x2
-#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23))
-#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11))
+#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
+#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
#define PHY_CH_SU_PSR 0x1
#define PHY_CH_DEEP_PSR 0x7
-#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
-#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
-#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
-#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
+#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -3135,7 +3256,7 @@ enum i915_power_well_id {
/* Ironlake */
# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
-# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
@@ -3224,10 +3345,10 @@ enum i915_power_well_id {
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE _MMIO(0x6104)
-#define DSTATE_GFX_RESET_I830 (1<<6)
-#define DSTATE_PLL_D3_OFF (1<<3)
-#define DSTATE_GFX_CLOCK_GATING (1<<1)
-#define DSTATE_DOT_CLOCK_GATING (1<<0)
+#define DSTATE_GFX_RESET_I830 (1 << 6)
+#define DSTATE_PLL_D3_OFF (1 << 3)
+#define DSTATE_GFX_CLOCK_GATING (1 << 1)
+#define DSTATE_DOT_CLOCK_GATING (1 << 0)
#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
@@ -3344,7 +3465,7 @@ enum i915_power_well_id {
#define DEUC _MMIO(0x6214) /* CRL only */
#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
-#define FW_CSPWRDWNEN (1<<15)
+#define FW_CSPWRDWNEN (1 << 15)
#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
@@ -3469,7 +3590,7 @@ enum i915_power_well_id {
#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
#define TSC1 _MMIO(0x11001)
-#define TSE (1<<0)
+#define TSE (1 << 0)
#define TR1 _MMIO(0x11006)
#define TSFS _MMIO(0x11020)
#define TSFS_SLOPE_MASK 0x0000ff00
@@ -3515,23 +3636,23 @@ enum i915_power_well_id {
#define MEMCTL_CMD_CHVID 3
#define MEMCTL_CMD_VMMOFF 4
#define MEMCTL_CMD_VMMON 5
-#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears
when command complete */
#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
#define MEMCTL_FREQ_SHIFT 8
-#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_SFCAVM (1 << 7)
#define MEMCTL_TGT_VID_MASK 0x007f
#define MEMIHYST _MMIO(0x1117c)
#define MEMINTREN _MMIO(0x11180) /* 16 bits */
-#define MEMINT_RSEXIT_EN (1<<8)
-#define MEMINT_CX_SUPR_EN (1<<7)
-#define MEMINT_CONT_BUSY_EN (1<<6)
-#define MEMINT_AVG_BUSY_EN (1<<5)
-#define MEMINT_EVAL_CHG_EN (1<<4)
-#define MEMINT_MON_IDLE_EN (1<<3)
-#define MEMINT_UP_EVAL_EN (1<<2)
-#define MEMINT_DOWN_EVAL_EN (1<<1)
-#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINT_RSEXIT_EN (1 << 8)
+#define MEMINT_CX_SUPR_EN (1 << 7)
+#define MEMINT_CONT_BUSY_EN (1 << 6)
+#define MEMINT_AVG_BUSY_EN (1 << 5)
+#define MEMINT_EVAL_CHG_EN (1 << 4)
+#define MEMINT_MON_IDLE_EN (1 << 3)
+#define MEMINT_UP_EVAL_EN (1 << 2)
+#define MEMINT_DOWN_EVAL_EN (1 << 1)
+#define MEMINT_SW_CMD_EN (1 << 0)
#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
#define MEM_RSEXIT_MASK 0xc000
#define MEM_RSEXIT_SHIFT 14
@@ -3553,26 +3674,26 @@ enum i915_power_well_id {
#define MEM_INT_STEER_SMI 2
#define MEM_INT_STEER_SCI 3
#define MEMINTRSTS _MMIO(0x11184)
-#define MEMINT_RSEXIT (1<<7)
-#define MEMINT_CONT_BUSY (1<<6)
-#define MEMINT_AVG_BUSY (1<<5)
-#define MEMINT_EVAL_CHG (1<<4)
-#define MEMINT_MON_IDLE (1<<3)
-#define MEMINT_UP_EVAL (1<<2)
-#define MEMINT_DOWN_EVAL (1<<1)
-#define MEMINT_SW_CMD (1<<0)
+#define MEMINT_RSEXIT (1 << 7)
+#define MEMINT_CONT_BUSY (1 << 6)
+#define MEMINT_AVG_BUSY (1 << 5)
+#define MEMINT_EVAL_CHG (1 << 4)
+#define MEMINT_MON_IDLE (1 << 3)
+#define MEMINT_UP_EVAL (1 << 2)
+#define MEMINT_DOWN_EVAL (1 << 1)
+#define MEMINT_SW_CMD (1 << 0)
#define MEMMODECTL _MMIO(0x11190)
-#define MEMMODE_BOOST_EN (1<<31)
+#define MEMMODE_BOOST_EN (1 << 31)
#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
#define MEMMODE_BOOST_FREQ_SHIFT 24
#define MEMMODE_IDLE_MODE_MASK 0x00030000
#define MEMMODE_IDLE_MODE_SHIFT 16
#define MEMMODE_IDLE_MODE_EVAL 0
#define MEMMODE_IDLE_MODE_CONT 1
-#define MEMMODE_HWIDLE_EN (1<<15)
-#define MEMMODE_SWMODE_EN (1<<14)
-#define MEMMODE_RCLK_GATE (1<<13)
-#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_HWIDLE_EN (1 << 15)
+#define MEMMODE_SWMODE_EN (1 << 14)
+#define MEMMODE_RCLK_GATE (1 << 13)
+#define MEMMODE_HW_UPDATE (1 << 12)
#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
#define MEMMODE_FSTART_SHIFT 8
#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
@@ -3586,8 +3707,8 @@ enum i915_power_well_id {
#define SWMEMCMD_TARVID (3 << 13)
#define SWMEMCMD_VRM_OFF (4 << 13)
#define SWMEMCMD_VRM_ON (5 << 13)
-#define CMDSTS (1<<12)
-#define SFCAVM (1<<11)
+#define CMDSTS (1 << 12)
+#define SFCAVM (1 << 11)
#define SWFREQ_MASK 0x0380 /* P0-7 */
#define SWFREQ_SHIFT 7
#define TARVID_MASK 0x001f
@@ -3596,49 +3717,49 @@ enum i915_power_well_id {
#define RCUPEI _MMIO(0x111b0)
#define RCDNEI _MMIO(0x111b4)
#define RSTDBYCTL _MMIO(0x111b8)
-#define RS1EN (1<<31)
-#define RS2EN (1<<30)
-#define RS3EN (1<<29)
-#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
-#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
-#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
-#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
-#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
-#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
-#define RSX_STATUS_MASK (7<<20)
-#define RSX_STATUS_ON (0<<20)
-#define RSX_STATUS_RC1 (1<<20)
-#define RSX_STATUS_RC1E (2<<20)
-#define RSX_STATUS_RS1 (3<<20)
-#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
-#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
-#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
-#define RSX_STATUS_RSVD2 (7<<20)
-#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
-#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
-#define JRSC (1<<17) /* rsx coupled to cpu c-state */
-#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
-#define RS1CONTSAV_MASK (3<<14)
-#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
-#define RS1CONTSAV_RSVD (1<<14)
-#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
-#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
-#define NORMSLEXLAT_MASK (3<<12)
-#define SLOW_RS123 (0<<12)
-#define SLOW_RS23 (1<<12)
-#define SLOW_RS3 (2<<12)
-#define NORMAL_RS123 (3<<12)
-#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
-#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
-#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
-#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
-#define RS_CSTATE_MASK (3<<4)
-#define RS_CSTATE_C367_RS1 (0<<4)
-#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
-#define RS_CSTATE_RSVD (2<<4)
-#define RS_CSTATE_C367_RS2 (3<<4)
-#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
-#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
+#define RS1EN (1 << 31)
+#define RS2EN (1 << 30)
+#define RS3EN (1 << 29)
+#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */
+#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7 << 20)
+#define RSX_STATUS_ON (0 << 20)
+#define RSX_STATUS_RC1 (1 << 20)
+#define RSX_STATUS_RC1E (2 << 20)
+#define RSX_STATUS_RS1 (3 << 20)
+#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7 << 20)
+#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */
+#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1 << 17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3 << 14)
+#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1 << 14)
+#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3 << 12)
+#define SLOW_RS123 (0 << 12)
+#define SLOW_RS23 (1 << 12)
+#define SLOW_RS3 (2 << 12)
+#define NORMAL_RS123 (3 << 12)
+#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */
+#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3 << 4)
+#define RS_CSTATE_C367_RS1 (0 << 4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
+#define RS_CSTATE_RSVD (2 << 4)
+#define RS_CSTATE_C367_RS2 (3 << 4)
+#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */
#define VIDCTL _MMIO(0x111c0)
#define VIDSTS _MMIO(0x111c8)
#define VIDSTART _MMIO(0x111cc) /* 8 bits */
@@ -3647,7 +3768,7 @@ enum i915_power_well_id {
#define MEMSTAT_VID_SHIFT 8
#define MEMSTAT_PSTATE_MASK 0x00f8
#define MEMSTAT_PSTATE_SHIFT 3
-#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_MON_ACTV (1 << 2)
#define MEMSTAT_SRC_CTL_MASK 0x0003
#define MEMSTAT_SRC_CTL_CORE 0
#define MEMSTAT_SRC_CTL_TRB 1
@@ -3656,7 +3777,7 @@ enum i915_power_well_id {
#define RCPREVBSYTUPAVG _MMIO(0x113b8)
#define RCPREVBSYTDNAVG _MMIO(0x113bc)
#define PMMISC _MMIO(0x11214)
-#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
+#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */
#define SDEW _MMIO(0x1124c)
#define CSIEW0 _MMIO(0x11250)
#define CSIEW1 _MMIO(0x11254)
@@ -3673,8 +3794,8 @@ enum i915_power_well_id {
#define RPPREVBSYTUPAVG _MMIO(0x113b8)
#define RPPREVBSYTDNAVG _MMIO(0x113bc)
#define ECR _MMIO(0x11600)
-#define ECR_GPFE (1<<31)
-#define ECR_IMONE (1<<30)
+#define ECR_GPFE (1 << 31)
+#define ECR_IMONE (1 << 30)
#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
#define OGW0 _MMIO(0x11608)
#define OGW1 _MMIO(0x1160c)
@@ -3781,11 +3902,11 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */
};
-#define GEN8_CTX_VALID (1<<0)
-#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
-#define GEN8_CTX_FORCE_RESTORE (1<<2)
-#define GEN8_CTX_L3LLC_COHERENT (1<<5)
-#define GEN8_CTX_PRIVILEGE (1<<8)
+#define GEN8_CTX_VALID (1 << 0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
+#define GEN8_CTX_FORCE_RESTORE (1 << 2)
+#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
+#define GEN8_CTX_PRIVILEGE (1 << 8)
#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
#define GEN8_CTX_ID_SHIFT 32
@@ -3807,7 +3928,7 @@ enum {
#define OVADD _MMIO(0x30000)
#define DOVSTA _MMIO(0x30008)
-#define OC_BUF (0x3<<20)
+#define OC_BUF (0x3 << 20)
#define OGAMC5 _MMIO(0x30010)
#define OGAMC4 _MMIO(0x30014)
#define OGAMC3 _MMIO(0x30018)
@@ -3975,64 +4096,65 @@ enum {
/* VLV eDP PSR registers */
#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
-#define VLV_EDP_PSR_ENABLE (1<<0)
-#define VLV_EDP_PSR_RESET (1<<1)
-#define VLV_EDP_PSR_MODE_MASK (7<<2)
-#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
-#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
-#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
-#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
-#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
-#define VLV_EDP_PSR_DBL_FRAME (1<<10)
-#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
+#define VLV_EDP_PSR_ENABLE (1 << 0)
+#define VLV_EDP_PSR_RESET (1 << 1)
+#define VLV_EDP_PSR_MODE_MASK (7 << 2)
+#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3)
+#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2)
+#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7)
+#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8)
+#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9)
+#define VLV_EDP_PSR_DBL_FRAME (1 << 10)
+#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16)
#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
-#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
-#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
-#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
+#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30)
+#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31)
+#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30)
#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
-#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
+#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3)
#define VLV_EDP_PSR_CURR_STATE_MASK 7
-#define VLV_EDP_PSR_DISABLED (0<<0)
-#define VLV_EDP_PSR_INACTIVE (1<<0)
-#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
-#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
-#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
-#define VLV_EDP_PSR_EXIT (5<<0)
-#define VLV_EDP_PSR_IN_TRANS (1<<7)
+#define VLV_EDP_PSR_DISABLED (0 << 0)
+#define VLV_EDP_PSR_INACTIVE (1 << 0)
+#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0)
+#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0)
+#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0)
+#define VLV_EDP_PSR_EXIT (5 << 0)
+#define VLV_EDP_PSR_IN_TRANS (1 << 7)
#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
/* HSW+ eDP PSR registers */
#define HSW_EDP_PSR_BASE 0x64800
#define BDW_EDP_PSR_BASE 0x6f800
#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
-#define EDP_PSR_ENABLE (1<<31)
-#define BDW_PSR_SINGLE_FRAME (1<<30)
-#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1<<29) /* SW can't modify */
-#define EDP_PSR_LINK_STANDBY (1<<27)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
+#define EDP_PSR_ENABLE (1 << 31)
+#define BDW_PSR_SINGLE_FRAME (1 << 30)
+#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
+#define EDP_PSR_LINK_STANDBY (1 << 27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25)
#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
-#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
-#define EDP_PSR_TP1_TP2_SEL (0<<11)
-#define EDP_PSR_TP1_TP3_SEL (1<<11)
-#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
-#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
-#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
-#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
-#define EDP_PSR_TP1_TIME_500us (0<<4)
-#define EDP_PSR_TP1_TIME_100us (1<<4)
-#define EDP_PSR_TP1_TIME_2500us (2<<4)
-#define EDP_PSR_TP1_TIME_0us (3<<4)
+#define EDP_PSR_SKIP_AUX_EXIT (1 << 12)
+#define EDP_PSR_TP1_TP2_SEL (0 << 11)
+#define EDP_PSR_TP1_TP3_SEL (1 << 11)
+#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */
+#define EDP_PSR_TP2_TP3_TIME_500us (0 << 8)
+#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
+#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
+#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
+#define EDP_PSR_TP1_TIME_500us (0 << 4)
+#define EDP_PSR_TP1_TIME_100us (1 << 4)
+#define EDP_PSR_TP1_TIME_2500us (2 << 4)
+#define EDP_PSR_TP1_TIME_0us (3 << 4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
/* Bspec claims those aren't shifted but stay at 0x64800 */
@@ -4052,55 +4174,56 @@ enum {
#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
-#define EDP_PSR_STATUS_STATE_MASK (7<<29)
-#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
-#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
-#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
-#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
-#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
-#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
-#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
-#define EDP_PSR_STATUS_LINK_MASK (3<<26)
-#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
-#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
-#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
+#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
+#define EDP_PSR_STATUS_STATE_SHIFT 29
+#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
+#define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29)
+#define EDP_PSR_STATUS_STATE_SRDENT (2 << 29)
+#define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29)
+#define EDP_PSR_STATUS_STATE_BUFON (4 << 29)
+#define EDP_PSR_STATUS_STATE_AUXACK (5 << 29)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29)
+#define EDP_PSR_STATUS_LINK_MASK (3 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26)
+#define EDP_PSR_STATUS_LINK_STANDBY (2 << 26)
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
#define EDP_PSR_STATUS_COUNT_SHIFT 16
#define EDP_PSR_STATUS_COUNT_MASK 0xf
-#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
-#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
-#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
-#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
-#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
+#define EDP_PSR_STATUS_AUX_ERROR (1 << 15)
+#define EDP_PSR_STATUS_AUX_SENDING (1 << 12)
+#define EDP_PSR_STATUS_SENDING_IDLE (1 << 9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8)
+#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
-#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
-#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
-#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
-#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
-#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16)
-#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */
+#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
+#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
+#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16)
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
#define EDP_PSR2_CTL _MMIO(0x6f900)
-#define EDP_PSR2_ENABLE (1<<31)
-#define EDP_SU_TRACK_ENABLE (1<<30)
-#define EDP_Y_COORDINATE_VALID (1<<26) /* GLK and CNL+ */
-#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */
-#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
-#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
-#define EDP_PSR2_TP2_TIME_500 (0<<8)
-#define EDP_PSR2_TP2_TIME_100 (1<<8)
-#define EDP_PSR2_TP2_TIME_2500 (2<<8)
-#define EDP_PSR2_TP2_TIME_50 (3<<8)
-#define EDP_PSR2_TP2_TIME_MASK (3<<8)
+#define EDP_PSR2_ENABLE (1 << 31)
+#define EDP_SU_TRACK_ENABLE (1 << 30)
+#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
+#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */
+#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
+#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define EDP_PSR2_TP2_TIME_500us (0 << 8)
+#define EDP_PSR2_TP2_TIME_100us (1 << 8)
+#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
+#define EDP_PSR2_TP2_TIME_50us (3 << 8)
+#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
-#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
-#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
+#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
#define EDP_PSR2_IDLE_FRAME_MASK 0xf
#define EDP_PSR2_IDLE_FRAME_SHIFT 0
@@ -4128,7 +4251,7 @@ enum {
#define PSR_EVENT_PSR_DISABLE (1 << 0)
#define EDP_PSR2_STATUS _MMIO(0x6f940)
-#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
+#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
/* VGA port control */
@@ -4136,47 +4259,48 @@ enum {
#define PCH_ADPA _MMIO(0xe1100)
#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
-#define ADPA_DAC_ENABLE (1<<31)
+#define ADPA_DAC_ENABLE (1 << 31)
#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SELECT_MASK (1<<30)
-#define ADPA_PIPE_A_SELECT 0
-#define ADPA_PIPE_B_SELECT (1<<30)
-#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
-/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_PIPE_SEL_SHIFT 30
+#define ADPA_PIPE_SEL_MASK (1 << 30)
+#define ADPA_PIPE_SEL(pipe) ((pipe) << 30)
+#define ADPA_PIPE_SEL_SHIFT_CPT 29
+#define ADPA_PIPE_SEL_MASK_CPT (3 << 29)
+#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2 << 24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1 << 23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0 << 22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1 << 22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0 << 21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1 << 21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0 << 20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1 << 20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3 << 18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0 << 17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1 << 17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16)
+#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_VSYNC_CNTL_DISABLE (1 << 10)
#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_HSYNC_CNTL_DISABLE (1 << 11)
#define ADPA_HSYNC_CNTL_ENABLE 0
-#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
#define ADPA_VSYNC_ACTIVE_LOW 0
-#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
#define ADPA_HSYNC_ACTIVE_LOW 0
-#define ADPA_DPMS_MASK (~(3<<10))
-#define ADPA_DPMS_ON (0<<10)
-#define ADPA_DPMS_SUSPEND (1<<10)
-#define ADPA_DPMS_STANDBY (2<<10)
-#define ADPA_DPMS_OFF (3<<10)
+#define ADPA_DPMS_MASK (~(3 << 10))
+#define ADPA_DPMS_ON (0 << 10)
+#define ADPA_DPMS_SUSPEND (1 << 10)
+#define ADPA_DPMS_STANDBY (2 << 10)
+#define ADPA_DPMS_OFF (3 << 10)
/* Hotplug control (945+ only) */
@@ -4301,9 +4425,9 @@ enum {
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_PIPE_SEL_SHIFT 30
#define SDVO_PIPE_SEL_MASK (1 << 30)
-#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
/*
@@ -4343,12 +4467,14 @@ enum {
#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
/* Gen 6 (CPT) SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+#define SDVO_PIPE_SEL_SHIFT_CPT 29
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* CHV SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+#define SDVO_PIPE_SEL_SHIFT_CHV 24
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
/* DVO port control */
@@ -4359,7 +4485,9 @@ enum {
#define _DVOC 0x61160
#define DVOC _MMIO(_DVOC)
#define DVO_ENABLE (1 << 31)
-#define DVO_PIPE_B_SELECT (1 << 30)
+#define DVO_PIPE_SEL_SHIFT 30
+#define DVO_PIPE_SEL_MASK (1 << 30)
+#define DVO_PIPE_SEL(pipe) ((pipe) << 30)
#define DVO_PIPE_STALL_UNUSED (0 << 28)
#define DVO_PIPE_STALL (1 << 28)
#define DVO_PIPE_STALL_TV (2 << 28)
@@ -4381,7 +4509,7 @@ enum {
#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
-#define DVO_PRESERVE_MASK (0x7<<24)
+#define DVO_PRESERVE_MASK (0x7 << 24)
#define DVOA_SRCDIM _MMIO(0x61124)
#define DVOB_SRCDIM _MMIO(0x61144)
#define DVOC_SRCDIM _MMIO(0x61164)
@@ -4396,9 +4524,12 @@ enum {
*/
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
-#define LVDS_PIPEB_SELECT (1 << 30)
-#define LVDS_PIPE_MASK (1 << 30)
-#define LVDS_PIPE(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT 30
+#define LVDS_PIPE_SEL_MASK (1 << 30)
+#define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT_CPT 29
+#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
+#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -4471,6 +4602,16 @@ enum {
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+#define DRM_DIP_ENABLE (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 26)
+#define VSC_SELECT_SHIFT 26
+#define VSC_DIP_HW_HEA_DATA (0 << 26)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
+#define VSC_DIP_SW_HEA_DATA (3 << 26)
+#define VDIP_ENABLE_PPS (1 << 24)
+
/* Panel power sequencing */
#define PPS_BASE 0x61200
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
@@ -4695,7 +4836,9 @@ enum {
/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
/* Sources the TV encoder input from pipe B instead of A. */
-# define TV_ENC_PIPEB_SELECT (1 << 30)
+# define TV_ENC_PIPE_SEL_SHIFT 30
+# define TV_ENC_PIPE_SEL_MASK (1 << 30)
+# define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30)
/* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
/* Outputs SVideo video (DAC B/C) */
@@ -5177,10 +5320,15 @@ enum {
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
#define DP_PORT_EN (1 << 31)
-#define DP_PIPEB_SELECT (1 << 30)
-#define DP_PIPE_MASK (1 << 30)
-#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
-#define DP_PIPE_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_SHIFT 30
+#define DP_PIPE_SEL_MASK (1 << 30)
+#define DP_PIPE_SEL(pipe) ((pipe) << 30)
+#define DP_PIPE_SEL_SHIFT_IVB 29
+#define DP_PIPE_SEL_MASK_IVB (3 << 29)
+#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define DP_PIPE_SEL_SHIFT_CHV 16
+#define DP_PIPE_SEL_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -5287,6 +5435,13 @@ enum {
#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
+#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410)
+#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414)
+#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418)
+#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c)
+#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420)
+#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424)
+
#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510)
#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514)
#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518)
@@ -5342,7 +5497,7 @@ enum {
#define _PIPEB_DATA_M_G4X 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE(x) (((x) - 1) << 25) /* default size 64 */
#define TU_SIZE_SHIFT 25
#define TU_SIZE_MASK (0x3f << 25)
@@ -5384,18 +5539,18 @@ enum {
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
-#define PIPECONF_ENABLE (1<<31)
+#define PIPECONF_ENABLE (1 << 31)
#define PIPECONF_DISABLE 0
-#define PIPECONF_DOUBLE_WIDE (1<<30)
-#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define PIPECONF_DOUBLE_WIDE (1 << 30)
+#define I965_PIPECONF_ACTIVE (1 << 30)
+#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */
+#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
-#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PIPE_LOCKED (1 << 25)
#define PIPECONF_PALETTE 0
-#define PIPECONF_GAMMA (1<<24)
-#define PIPECONF_FORCE_BORDER (1<<25)
+#define PIPECONF_GAMMA (1 << 24)
+#define PIPECONF_FORCE_BORDER (1 << 25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5414,67 +5569,67 @@ enum {
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
-#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_CXSR_DOWNCLOCK (1 << 16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
-#define PIPECONF_8BPC (0<<5)
-#define PIPECONF_10BPC (1<<5)
-#define PIPECONF_6BPC (2<<5)
-#define PIPECONF_12BPC (3<<5)
-#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_8BPC (0 << 5)
+#define PIPECONF_10BPC (1 << 5)
+#define PIPECONF_6BPC (2 << 5)
+#define PIPECONF_12BPC (3 << 5)
+#define PIPECONF_DITHER_EN (1 << 4)
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
-#define PIPECONF_DITHER_TYPE_SP (0<<2)
-#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
-#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
-#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define PIPECONF_DITHER_TYPE_SP (0 << 2)
+#define PIPECONF_DITHER_TYPE_ST1 (1 << 2)
+#define PIPECONF_DITHER_TYPE_ST2 (2 << 2)
+#define PIPECONF_DITHER_TYPE_TEMP (3 << 2)
#define _PIPEASTAT 0x70024
-#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
-#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
-#define PIPE_CRC_DONE_ENABLE (1UL<<28)
-#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27)
-#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
-#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
-#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
-#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
-#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
-#define PERF_COUNTER_INTERRUPT_EN (1UL<<19)
-#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17)
-#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
-#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
-#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
-#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11)
-#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
-#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
-#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
-#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-#define PIPE_DPST_EVENT_STATUS (1UL<<7)
-#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
-#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
-#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3)
-#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1)
-#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
-#define PIPE_HBLANK_INT_STATUS (1UL<<0)
-#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
+#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
+#define PIPE_CRC_DONE_ENABLE (1UL << 28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
+#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_HBLANK_INT_STATUS (1UL << 0)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
@@ -5503,67 +5658,67 @@ enum {
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE (1<<27)
-#define PIPEMISC_YUV420_MODE_FULL_BLEND (1<<26)
-#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1<<11)
-#define PIPEMISC_DITHER_BPC_MASK (7<<5)
-#define PIPEMISC_DITHER_8_BPC (0<<5)
-#define PIPEMISC_DITHER_10_BPC (1<<5)
-#define PIPEMISC_DITHER_6_BPC (2<<5)
-#define PIPEMISC_DITHER_12_BPC (3<<5)
-#define PIPEMISC_DITHER_ENABLE (1<<4)
-#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
-#define PIPEMISC_DITHER_TYPE_SP (0<<2)
+#define PIPEMISC_YUV420_ENABLE (1 << 27)
+#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
+#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
+#define PIPEMISC_DITHER_8_BPC (0 << 5)
+#define PIPEMISC_DITHER_10_BPC (1 << 5)
+#define PIPEMISC_DITHER_6_BPC (2 << 5)
+#define PIPEMISC_DITHER_12_BPC (3 << 5)
+#define PIPEMISC_DITHER_ENABLE (1 << 4)
+#define PIPEMISC_DITHER_TYPE_MASK (3 << 2)
+#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
-#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
-#define PIPEB_HLINE_INT_EN (1<<28)
-#define PIPEB_VBLANK_INT_EN (1<<27)
-#define SPRITED_FLIP_DONE_INT_EN (1<<26)
-#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
-#define PLANEB_FLIP_DONE_INT_EN (1<<24)
-#define PIPE_PSR_INT_EN (1<<22)
-#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
-#define PIPEA_HLINE_INT_EN (1<<20)
-#define PIPEA_VBLANK_INT_EN (1<<19)
-#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
-#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
-#define PLANEA_FLIPDONE_INT_EN (1<<16)
-#define PIPEC_LINE_COMPARE_INT_EN (1<<13)
-#define PIPEC_HLINE_INT_EN (1<<12)
-#define PIPEC_VBLANK_INT_EN (1<<11)
-#define SPRITEF_FLIPDONE_INT_EN (1<<10)
-#define SPRITEE_FLIPDONE_INT_EN (1<<9)
-#define PLANEC_FLIPDONE_INT_EN (1<<8)
+#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
+#define PIPEB_HLINE_INT_EN (1 << 28)
+#define PIPEB_VBLANK_INT_EN (1 << 27)
+#define SPRITED_FLIP_DONE_INT_EN (1 << 26)
+#define SPRITEC_FLIP_DONE_INT_EN (1 << 25)
+#define PLANEB_FLIP_DONE_INT_EN (1 << 24)
+#define PIPE_PSR_INT_EN (1 << 22)
+#define PIPEA_LINE_COMPARE_INT_EN (1 << 21)
+#define PIPEA_HLINE_INT_EN (1 << 20)
+#define PIPEA_VBLANK_INT_EN (1 << 19)
+#define SPRITEB_FLIP_DONE_INT_EN (1 << 18)
+#define SPRITEA_FLIP_DONE_INT_EN (1 << 17)
+#define PLANEA_FLIPDONE_INT_EN (1 << 16)
+#define PIPEC_LINE_COMPARE_INT_EN (1 << 13)
+#define PIPEC_HLINE_INT_EN (1 << 12)
+#define PIPEC_VBLANK_INT_EN (1 << 11)
+#define SPRITEF_FLIPDONE_INT_EN (1 << 10)
+#define SPRITEE_FLIPDONE_INT_EN (1 << 9)
+#define PLANEC_FLIPDONE_INT_EN (1 << 8)
#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
-#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
-#define PLANEC_INVALID_GTT_INT_EN (1<<25)
-#define CURSORC_INVALID_GTT_INT_EN (1<<24)
-#define CURSORB_INVALID_GTT_INT_EN (1<<23)
-#define CURSORA_INVALID_GTT_INT_EN (1<<22)
-#define SPRITED_INVALID_GTT_INT_EN (1<<21)
-#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
-#define PLANEB_INVALID_GTT_INT_EN (1<<19)
-#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
-#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
-#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define SPRITEF_INVALID_GTT_INT_EN (1 << 27)
+#define SPRITEE_INVALID_GTT_INT_EN (1 << 26)
+#define PLANEC_INVALID_GTT_INT_EN (1 << 25)
+#define CURSORC_INVALID_GTT_INT_EN (1 << 24)
+#define CURSORB_INVALID_GTT_INT_EN (1 << 23)
+#define CURSORA_INVALID_GTT_INT_EN (1 << 22)
+#define SPRITED_INVALID_GTT_INT_EN (1 << 21)
+#define SPRITEC_INVALID_GTT_INT_EN (1 << 20)
+#define PLANEB_INVALID_GTT_INT_EN (1 << 19)
+#define SPRITEB_INVALID_GTT_INT_EN (1 << 18)
+#define SPRITEA_INVALID_GTT_INT_EN (1 << 17)
+#define PLANEA_INVALID_GTT_INT_EN (1 << 16)
#define DPINVGTT_EN_MASK 0xff0000
#define DPINVGTT_EN_MASK_CHV 0xfff0000
-#define SPRITEF_INVALID_GTT_STATUS (1<<11)
-#define SPRITEE_INVALID_GTT_STATUS (1<<10)
-#define PLANEC_INVALID_GTT_STATUS (1<<9)
-#define CURSORC_INVALID_GTT_STATUS (1<<8)
-#define CURSORB_INVALID_GTT_STATUS (1<<7)
-#define CURSORA_INVALID_GTT_STATUS (1<<6)
-#define SPRITED_INVALID_GTT_STATUS (1<<5)
-#define SPRITEC_INVALID_GTT_STATUS (1<<4)
-#define PLANEB_INVALID_GTT_STATUS (1<<3)
-#define SPRITEB_INVALID_GTT_STATUS (1<<2)
-#define SPRITEA_INVALID_GTT_STATUS (1<<1)
-#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define SPRITEF_INVALID_GTT_STATUS (1 << 11)
+#define SPRITEE_INVALID_GTT_STATUS (1 << 10)
+#define PLANEC_INVALID_GTT_STATUS (1 << 9)
+#define CURSORC_INVALID_GTT_STATUS (1 << 8)
+#define CURSORB_INVALID_GTT_STATUS (1 << 7)
+#define CURSORA_INVALID_GTT_STATUS (1 << 6)
+#define SPRITED_INVALID_GTT_STATUS (1 << 5)
+#define SPRITEC_INVALID_GTT_STATUS (1 << 4)
+#define PLANEB_INVALID_GTT_STATUS (1 << 3)
+#define SPRITEB_INVALID_GTT_STATUS (1 << 2)
+#define SPRITEA_INVALID_GTT_STATUS (1 << 1)
+#define PLANEA_INVALID_GTT_STATUS (1 << 0)
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
@@ -5604,149 +5759,149 @@ enum {
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff<<23)
+#define DSPFW_SR_MASK (0x1ff << 23)
#define DSPFW_CURSORB_SHIFT 16
-#define DSPFW_CURSORB_MASK (0x3f<<16)
+#define DSPFW_CURSORB_MASK (0x3f << 16)
#define DSPFW_PLANEB_SHIFT 8
-#define DSPFW_PLANEB_MASK (0x7f<<8)
-#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */
+#define DSPFW_PLANEB_MASK (0x7f << 8)
+#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
#define DSPFW_PLANEA_SHIFT 0
-#define DSPFW_PLANEA_MASK (0x7f<<0)
-#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
+#define DSPFW_PLANEA_MASK (0x7f << 0)
+#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
-#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
+#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
#define DSPFW_FBC_SR_SHIFT 28
-#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
+#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
#define DSPFW_FBC_HPLL_SR_SHIFT 24
-#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */
+#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
#define DSPFW_SPRITEB_SHIFT (16)
-#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */
-#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
+#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
+#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
-#define DSPFW_CURSORA_MASK (0x3f<<8)
+#define DSPFW_CURSORA_MASK (0x3f << 8)
#define DSPFW_PLANEC_OLD_SHIFT 0
-#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */
+#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
-#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
-#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
+#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
+#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
-#define DSPFW_HPLL_SR_EN (1<<31)
-#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_HPLL_SR_EN (1 << 31)
+#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
#define DSPFW_CURSOR_SR_SHIFT 24
-#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
#define DSPFW_HPLL_CURSOR_SHIFT 16
-#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
#define DSPFW_HPLL_SR_SHIFT 0
-#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
+#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
/* vlv/chv */
#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
#define DSPFW_SPRITEB_WM1_SHIFT 16
-#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
+#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
#define DSPFW_CURSORA_WM1_SHIFT 8
-#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
+#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
#define DSPFW_SPRITEA_WM1_SHIFT 0
-#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
+#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
#define DSPFW_PLANEB_WM1_SHIFT 24
-#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
#define DSPFW_PLANEA_WM1_SHIFT 16
-#define DSPFW_PLANEA_WM1_MASK (0xff<<16)
+#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
#define DSPFW_CURSORB_WM1_SHIFT 8
-#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
+#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
#define DSPFW_CURSOR_SR_WM1_SHIFT 0
-#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
+#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
#define DSPFW_SR_WM1_SHIFT 0
-#define DSPFW_SR_WM1_MASK (0x1ff<<0)
+#define DSPFW_SR_WM1_MASK (0x1ff << 0)
#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
#define DSPFW_SPRITED_WM1_SHIFT 24
-#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK_VLV (0xff<<16)
+#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
-#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
+#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
-#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK_VLV (0xff<<16)
+#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
-#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
+#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
-#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK_VLV (0xff<<16)
+#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
#define DSPFW_CURSORC_WM1_SHIFT 8
-#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
+#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
#define DSPFW_CURSORC_SHIFT 0
-#define DSPFW_CURSORC_MASK (0x3f<<0)
+#define DSPFW_CURSORC_MASK (0x3f << 0)
/* vlv/chv high order bits */
#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
-#define DSPFW_SPRITEF_HI_MASK (1<<23)
+#define DSPFW_SPRITEF_HI_MASK (1 << 23)
#define DSPFW_SPRITEE_HI_SHIFT 22
-#define DSPFW_SPRITEE_HI_MASK (1<<22)
+#define DSPFW_SPRITEE_HI_MASK (1 << 22)
#define DSPFW_PLANEC_HI_SHIFT 21
-#define DSPFW_PLANEC_HI_MASK (1<<21)
+#define DSPFW_PLANEC_HI_MASK (1 << 21)
#define DSPFW_SPRITED_HI_SHIFT 20
-#define DSPFW_SPRITED_HI_MASK (1<<20)
+#define DSPFW_SPRITED_HI_MASK (1 << 20)
#define DSPFW_SPRITEC_HI_SHIFT 16
-#define DSPFW_SPRITEC_HI_MASK (1<<16)
+#define DSPFW_SPRITEC_HI_MASK (1 << 16)
#define DSPFW_PLANEB_HI_SHIFT 12
-#define DSPFW_PLANEB_HI_MASK (1<<12)
+#define DSPFW_PLANEB_HI_MASK (1 << 12)
#define DSPFW_SPRITEB_HI_SHIFT 8
-#define DSPFW_SPRITEB_HI_MASK (1<<8)
+#define DSPFW_SPRITEB_HI_MASK (1 << 8)
#define DSPFW_SPRITEA_HI_SHIFT 4
-#define DSPFW_SPRITEA_HI_MASK (1<<4)
+#define DSPFW_SPRITEA_HI_MASK (1 << 4)
#define DSPFW_PLANEA_HI_SHIFT 0
-#define DSPFW_PLANEA_HI_MASK (1<<0)
+#define DSPFW_PLANEA_HI_MASK (1 << 0)
#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
-#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
+#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
-#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22)
+#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
#define DSPFW_PLANEC_WM1_HI_SHIFT 21
-#define DSPFW_PLANEC_WM1_HI_MASK (1<<21)
+#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
#define DSPFW_SPRITED_WM1_HI_SHIFT 20
-#define DSPFW_SPRITED_WM1_HI_MASK (1<<20)
+#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
-#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16)
+#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
#define DSPFW_PLANEB_WM1_HI_SHIFT 12
-#define DSPFW_PLANEB_WM1_HI_MASK (1<<12)
+#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
-#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8)
+#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
-#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4)
+#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
#define DSPFW_PLANEA_WM1_HI_SHIFT 0
-#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
+#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
/* drain latency register values*/
#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
+#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
#define DDL_PLANE_SHIFT 0
-#define DDL_PRECISION_HIGH (1<<7)
-#define DDL_PRECISION_LOW (0<<7)
+#define DDL_PRECISION_HIGH (1 << 7)
+#define DDL_PRECISION_LOW (0 << 7)
#define DRAIN_LATENCY_MASK 0x7f
#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
-#define CBR_PND_DEADLINE_DISABLE (1<<31)
-#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
+#define CBR_PND_DEADLINE_DISABLE (1 << 31)
+#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define CBR_DPLLBMD_PIPE(pipe) (1<<(7+(pipe)*11)) /* pipes B and C */
+#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -5818,32 +5973,32 @@ enum {
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK _MMIO(0x45100)
-#define WM0_PIPE_PLANE_MASK (0xffff<<16)
+#define WM0_PIPE_PLANE_MASK (0xffff << 16)
#define WM0_PIPE_PLANE_SHIFT 16
-#define WM0_PIPE_SPRITE_MASK (0xff<<8)
+#define WM0_PIPE_SPRITE_MASK (0xff << 8)
#define WM0_PIPE_SPRITE_SHIFT 8
#define WM0_PIPE_CURSOR_MASK (0xff)
#define WM0_PIPEB_ILK _MMIO(0x45104)
#define WM0_PIPEC_IVB _MMIO(0x45200)
#define WM1_LP_ILK _MMIO(0x45108)
-#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_SR_EN (1 << 31)
#define WM1_LP_LATENCY_SHIFT 24
-#define WM1_LP_LATENCY_MASK (0x7f<<24)
-#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_LATENCY_MASK (0x7f << 24)
+#define WM1_LP_FBC_MASK (0xf << 20)
#define WM1_LP_FBC_SHIFT 20
#define WM1_LP_FBC_SHIFT_BDW 19
-#define WM1_LP_SR_MASK (0x7ff<<8)
+#define WM1_LP_SR_MASK (0x7ff << 8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK _MMIO(0x4510c)
-#define WM2_LP_EN (1<<31)
+#define WM2_LP_EN (1 << 31)
#define WM3_LP_ILK _MMIO(0x45110)
-#define WM3_LP_EN (1<<31)
+#define WM3_LP_EN (1 << 31)
#define WM1S_LP_ILK _MMIO(0x45120)
#define WM2S_LP_IVB _MMIO(0x45124)
#define WM3S_LP_IVB _MMIO(0x45128)
-#define WM1S_LP_EN (1<<31)
+#define WM1S_LP_EN (1 << 31)
#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
(WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
@@ -5900,8 +6055,7 @@ enum {
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_SHIFT 28
-#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
-#define CURSOR_PIPE_CSC_ENABLE (1<<24)
+#define CURSOR_STRIDE(x) ((ffs(x) - 9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -5910,18 +6064,21 @@ enum {
#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
/* New style CUR*CNTR flags */
-#define CURSOR_MODE 0x27
-#define CURSOR_MODE_DISABLE 0x00
-#define CURSOR_MODE_128_32B_AX 0x02
-#define CURSOR_MODE_256_32B_AX 0x03
-#define CURSOR_MODE_64_32B_AX 0x07
-#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
-#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
-#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_MODE 0x27
+#define MCURSOR_MODE_DISABLE 0x00
+#define MCURSOR_MODE_128_32B_AX 0x02
+#define MCURSOR_MODE_256_32B_AX 0x03
+#define MCURSOR_MODE_64_32B_AX 0x07
+#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
+#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
+#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
+#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28)
+#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define CURSOR_ROTATE_180 (1<<15)
-#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
+#define MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define MCURSOR_ROTATE_180 (1 << 15)
+#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
@@ -5958,41 +6115,41 @@ enum {
/* Display A control */
#define _DSPACNTR 0x70180
-#define DISPLAY_PLANE_ENABLE (1<<31)
+#define DISPLAY_PLANE_ENABLE (1 << 31)
#define DISPLAY_PLANE_DISABLE 0
-#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_ENABLE (1 << 30)
#define DISPPLANE_GAMMA_DISABLE 0
-#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
-#define DISPPLANE_YUV422 (0x0<<26)
-#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_BGRA555 (0x3<<26)
-#define DISPPLANE_BGRX555 (0x4<<26)
-#define DISPPLANE_BGRX565 (0x5<<26)
-#define DISPPLANE_BGRX888 (0x6<<26)
-#define DISPPLANE_BGRA888 (0x7<<26)
-#define DISPPLANE_RGBX101010 (0x8<<26)
-#define DISPPLANE_RGBA101010 (0x9<<26)
-#define DISPPLANE_BGRX101010 (0xa<<26)
-#define DISPPLANE_RGBX161616 (0xc<<26)
-#define DISPPLANE_RGBX888 (0xe<<26)
-#define DISPPLANE_RGBA888 (0xf<<26)
-#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
+#define DISPPLANE_YUV422 (0x0 << 26)
+#define DISPPLANE_8BPP (0x2 << 26)
+#define DISPPLANE_BGRA555 (0x3 << 26)
+#define DISPPLANE_BGRX555 (0x4 << 26)
+#define DISPPLANE_BGRX565 (0x5 << 26)
+#define DISPPLANE_BGRX888 (0x6 << 26)
+#define DISPPLANE_BGRA888 (0x7 << 26)
+#define DISPPLANE_RGBX101010 (0x8 << 26)
+#define DISPPLANE_RGBA101010 (0x9 << 26)
+#define DISPPLANE_BGRX101010 (0xa << 26)
+#define DISPPLANE_RGBX161616 (0xc << 26)
+#define DISPPLANE_RGBX888 (0xe << 26)
+#define DISPPLANE_RGBA888 (0xf << 26)
+#define DISPPLANE_STEREO_ENABLE (1 << 25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
+#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
-#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
-#define DISPPLANE_SEL_PIPE(pipe) ((pipe)<<DISPPLANE_SEL_PIPE_SHIFT)
-#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
#define DISPPLANE_SRC_KEY_DISABLE 0
-#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_LINE_DOUBLE (1 << 20)
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
-#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */
-#define DISPPLANE_ROTATE_180 (1<<15)
-#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
-#define DISPPLANE_TILED (1<<10)
-#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */
+#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
+#define DISPPLANE_ALPHA_PREMULTIPLY (1 << 16) /* CHV pipe B */
+#define DISPPLANE_ROTATE_180 (1 << 15)
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */
+#define DISPPLANE_TILED (1 << 10)
+#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
@@ -6015,15 +6172,15 @@ enum {
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
-#define CHV_BLEND_LEGACY (0<<30)
-#define CHV_BLEND_ANDROID (1<<30)
-#define CHV_BLEND_MPO (2<<30)
-#define CHV_BLEND_MASK (3<<30)
+#define CHV_BLEND_LEGACY (0 << 30)
+#define CHV_BLEND_ANDROID (1 << 30)
+#define CHV_BLEND_MPO (2 << 30)
+#define CHV_BLEND_MASK (3 << 30)
#define _CHV_CANVAS_A 0x60a04
#define _PRIMPOS_A 0x60a08
#define _PRIMSIZE_A 0x60a0c
#define _PRIMCNSTALPHA_A 0x60a10
-#define PRIM_CONST_ALPHA_ENABLE (1<<31)
+#define PRIM_CONST_ALPHA_ENABLE (1 << 31)
#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
@@ -6033,8 +6190,8 @@ enum {
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
-#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
-#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
/*
* VBIOS flags
@@ -6064,7 +6221,7 @@ enum {
/* Display B control */
#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
-#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
@@ -6079,27 +6236,27 @@ enum {
/* Sprite A control */
#define _DVSACNTR 0x72180
-#define DVS_ENABLE (1<<31)
-#define DVS_GAMMA_ENABLE (1<<30)
-#define DVS_YUV_RANGE_CORRECTION_DISABLE (1<<27)
-#define DVS_PIXFORMAT_MASK (3<<25)
-#define DVS_FORMAT_YUV422 (0<<25)
-#define DVS_FORMAT_RGBX101010 (1<<25)
-#define DVS_FORMAT_RGBX888 (2<<25)
-#define DVS_FORMAT_RGBX161616 (3<<25)
-#define DVS_PIPE_CSC_ENABLE (1<<24)
-#define DVS_SOURCE_KEY (1<<22)
-#define DVS_RGB_ORDER_XBGR (1<<20)
-#define DVS_YUV_FORMAT_BT709 (1<<18)
-#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
-#define DVS_YUV_ORDER_YUYV (0<<16)
-#define DVS_YUV_ORDER_UYVY (1<<16)
-#define DVS_YUV_ORDER_YVYU (2<<16)
-#define DVS_YUV_ORDER_VYUY (3<<16)
-#define DVS_ROTATE_180 (1<<15)
-#define DVS_DEST_KEY (1<<2)
-#define DVS_TRICKLE_FEED_DISABLE (1<<14)
-#define DVS_TILED (1<<10)
+#define DVS_ENABLE (1 << 31)
+#define DVS_GAMMA_ENABLE (1 << 30)
+#define DVS_YUV_RANGE_CORRECTION_DISABLE (1 << 27)
+#define DVS_PIXFORMAT_MASK (3 << 25)
+#define DVS_FORMAT_YUV422 (0 << 25)
+#define DVS_FORMAT_RGBX101010 (1 << 25)
+#define DVS_FORMAT_RGBX888 (2 << 25)
+#define DVS_FORMAT_RGBX161616 (3 << 25)
+#define DVS_PIPE_CSC_ENABLE (1 << 24)
+#define DVS_SOURCE_KEY (1 << 22)
+#define DVS_RGB_ORDER_XBGR (1 << 20)
+#define DVS_YUV_FORMAT_BT709 (1 << 18)
+#define DVS_YUV_BYTE_ORDER_MASK (3 << 16)
+#define DVS_YUV_ORDER_YUYV (0 << 16)
+#define DVS_YUV_ORDER_UYVY (1 << 16)
+#define DVS_YUV_ORDER_YVYU (2 << 16)
+#define DVS_YUV_ORDER_VYUY (3 << 16)
+#define DVS_ROTATE_180 (1 << 15)
+#define DVS_DEST_KEY (1 << 2)
+#define DVS_TRICKLE_FEED_DISABLE (1 << 14)
+#define DVS_TILED (1 << 10)
#define _DVSALINOFF 0x72184
#define _DVSASTRIDE 0x72188
#define _DVSAPOS 0x7218c
@@ -6111,13 +6268,13 @@ enum {
#define _DVSATILEOFF 0x721a4
#define _DVSASURFLIVE 0x721ac
#define _DVSASCALE 0x72204
-#define DVS_SCALE_ENABLE (1<<31)
-#define DVS_FILTER_MASK (3<<29)
-#define DVS_FILTER_MEDIUM (0<<29)
-#define DVS_FILTER_ENHANCING (1<<29)
-#define DVS_FILTER_SOFTENING (2<<29)
-#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define DVS_SCALE_ENABLE (1 << 31)
+#define DVS_FILTER_MASK (3 << 29)
+#define DVS_FILTER_MEDIUM (0 << 29)
+#define DVS_FILTER_ENHANCING (1 << 29)
+#define DVS_FILTER_SOFTENING (2 << 29)
+#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _DVSAGAMC 0x72300
#define _DVSBCNTR 0x73180
@@ -6148,31 +6305,31 @@ enum {
#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
-#define SPRITE_ENABLE (1<<31)
-#define SPRITE_GAMMA_ENABLE (1<<30)
-#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1<<28)
-#define SPRITE_PIXFORMAT_MASK (7<<25)
-#define SPRITE_FORMAT_YUV422 (0<<25)
-#define SPRITE_FORMAT_RGBX101010 (1<<25)
-#define SPRITE_FORMAT_RGBX888 (2<<25)
-#define SPRITE_FORMAT_RGBX161616 (3<<25)
-#define SPRITE_FORMAT_YUV444 (4<<25)
-#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
-#define SPRITE_PIPE_CSC_ENABLE (1<<24)
-#define SPRITE_SOURCE_KEY (1<<22)
-#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
-#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
-#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
-#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
-#define SPRITE_YUV_ORDER_YUYV (0<<16)
-#define SPRITE_YUV_ORDER_UYVY (1<<16)
-#define SPRITE_YUV_ORDER_YVYU (2<<16)
-#define SPRITE_YUV_ORDER_VYUY (3<<16)
-#define SPRITE_ROTATE_180 (1<<15)
-#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
-#define SPRITE_INT_GAMMA_ENABLE (1<<13)
-#define SPRITE_TILED (1<<10)
-#define SPRITE_DEST_KEY (1<<2)
+#define SPRITE_ENABLE (1 << 31)
+#define SPRITE_GAMMA_ENABLE (1 << 30)
+#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+#define SPRITE_PIXFORMAT_MASK (7 << 25)
+#define SPRITE_FORMAT_YUV422 (0 << 25)
+#define SPRITE_FORMAT_RGBX101010 (1 << 25)
+#define SPRITE_FORMAT_RGBX888 (2 << 25)
+#define SPRITE_FORMAT_RGBX161616 (3 << 25)
+#define SPRITE_FORMAT_YUV444 (4 << 25)
+#define SPRITE_FORMAT_XR_BGR101010 (5 << 25) /* Extended range */
+#define SPRITE_PIPE_CSC_ENABLE (1 << 24)
+#define SPRITE_SOURCE_KEY (1 << 22)
+#define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19)
+#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SPRITE_YUV_ORDER_YUYV (0 << 16)
+#define SPRITE_YUV_ORDER_UYVY (1 << 16)
+#define SPRITE_YUV_ORDER_YVYU (2 << 16)
+#define SPRITE_YUV_ORDER_VYUY (3 << 16)
+#define SPRITE_ROTATE_180 (1 << 15)
+#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14)
+#define SPRITE_INT_GAMMA_ENABLE (1 << 13)
+#define SPRITE_TILED (1 << 10)
+#define SPRITE_DEST_KEY (1 << 2)
#define _SPRA_LINOFF 0x70284
#define _SPRA_STRIDE 0x70288
#define _SPRA_POS 0x7028c
@@ -6185,13 +6342,13 @@ enum {
#define _SPRA_OFFSET 0x702a4
#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
-#define SPRITE_SCALE_ENABLE (1<<31)
-#define SPRITE_FILTER_MASK (3<<29)
-#define SPRITE_FILTER_MEDIUM (0<<29)
-#define SPRITE_FILTER_ENHANCING (1<<29)
-#define SPRITE_FILTER_SOFTENING (2<<29)
-#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define SPRITE_SCALE_ENABLE (1 << 31)
+#define SPRITE_FILTER_MASK (3 << 29)
+#define SPRITE_FILTER_MEDIUM (0 << 29)
+#define SPRITE_FILTER_ENHANCING (1 << 29)
+#define SPRITE_FILTER_SOFTENING (2 << 29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _SPRA_GAMC 0x70400
#define _SPRB_CTL 0x71280
@@ -6225,28 +6382,28 @@ enum {
#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
-#define SP_ENABLE (1<<31)
-#define SP_GAMMA_ENABLE (1<<30)
-#define SP_PIXFORMAT_MASK (0xf<<26)
-#define SP_FORMAT_YUV422 (0<<26)
-#define SP_FORMAT_BGR565 (5<<26)
-#define SP_FORMAT_BGRX8888 (6<<26)
-#define SP_FORMAT_BGRA8888 (7<<26)
-#define SP_FORMAT_RGBX1010102 (8<<26)
-#define SP_FORMAT_RGBA1010102 (9<<26)
-#define SP_FORMAT_RGBX8888 (0xe<<26)
-#define SP_FORMAT_RGBA8888 (0xf<<26)
-#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
-#define SP_SOURCE_KEY (1<<22)
-#define SP_YUV_FORMAT_BT709 (1<<18)
-#define SP_YUV_BYTE_ORDER_MASK (3<<16)
-#define SP_YUV_ORDER_YUYV (0<<16)
-#define SP_YUV_ORDER_UYVY (1<<16)
-#define SP_YUV_ORDER_YVYU (2<<16)
-#define SP_YUV_ORDER_VYUY (3<<16)
-#define SP_ROTATE_180 (1<<15)
-#define SP_TILED (1<<10)
-#define SP_MIRROR (1<<8) /* CHV pipe B */
+#define SP_ENABLE (1 << 31)
+#define SP_GAMMA_ENABLE (1 << 30)
+#define SP_PIXFORMAT_MASK (0xf << 26)
+#define SP_FORMAT_YUV422 (0 << 26)
+#define SP_FORMAT_BGR565 (5 << 26)
+#define SP_FORMAT_BGRX8888 (6 << 26)
+#define SP_FORMAT_BGRA8888 (7 << 26)
+#define SP_FORMAT_RGBX1010102 (8 << 26)
+#define SP_FORMAT_RGBA1010102 (9 << 26)
+#define SP_FORMAT_RGBX8888 (0xe << 26)
+#define SP_FORMAT_RGBA8888 (0xf << 26)
+#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */
+#define SP_SOURCE_KEY (1 << 22)
+#define SP_YUV_FORMAT_BT709 (1 << 18)
+#define SP_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SP_YUV_ORDER_YUYV (0 << 16)
+#define SP_YUV_ORDER_UYVY (1 << 16)
+#define SP_YUV_ORDER_YVYU (2 << 16)
+#define SP_YUV_ORDER_VYUY (3 << 16)
+#define SP_ROTATE_180 (1 << 15)
+#define SP_TILED (1 << 10)
+#define SP_MIRROR (1 << 8) /* CHV pipe B */
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
@@ -6257,7 +6414,7 @@ enum {
#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
-#define SP_CONST_ALPHA_ENABLE (1<<31)
+#define SP_CONST_ALPHA_ENABLE (1 << 31)
#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */
#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */
@@ -6349,40 +6506,40 @@ enum {
* correctly map to the same formats in ICL, as long as bit 23 is set to 0
*/
#define PLANE_CTL_FORMAT_MASK (0xf << 24)
-#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
-#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
-#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
-#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
-#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
-#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
-#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
-#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
+#define PLANE_CTL_FORMAT_YUV422 (0 << 24)
+#define PLANE_CTL_FORMAT_NV12 (1 << 24)
+#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24)
+#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24)
+#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
+#define PLANE_CTL_FORMAT_AYUV (8 << 24)
+#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
+#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
-#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
-#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
+#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21)
+#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
#define PLANE_CTL_ORDER_BGRX (0 << 20)
#define PLANE_CTL_ORDER_RGBX (1 << 20)
#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
-#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
-#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
-#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
-#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
+#define PLANE_CTL_YUV422_YUYV (0 << 16)
+#define PLANE_CTL_YUV422_UYVY (1 << 16)
+#define PLANE_CTL_YUV422_YVYU (2 << 16)
+#define PLANE_CTL_YUV422_VYUY (3 << 16)
#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
-#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
-#define PLANE_CTL_TILED_X ( 1 << 10)
-#define PLANE_CTL_TILED_Y ( 4 << 10)
-#define PLANE_CTL_TILED_YF ( 5 << 10)
-#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8)
+#define PLANE_CTL_TILED_LINEAR (0 << 10)
+#define PLANE_CTL_TILED_X (1 << 10)
+#define PLANE_CTL_TILED_Y (4 << 10)
+#define PLANE_CTL_TILED_YF (5 << 10)
+#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8)
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
-#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
-#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
-#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
+#define PLANE_CTL_ALPHA_DISABLE (0 << 4)
+#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4)
+#define PLANE_CTL_ALPHA_HW_PREMULTIPLY (3 << 4)
#define PLANE_CTL_ROTATE_MASK 0x3
#define PLANE_CTL_ROTATE_0 0x0
#define PLANE_CTL_ROTATE_90 0x1
@@ -6610,7 +6767,7 @@ enum {
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define FDI_PLL_FREQ_CTL _MMIO(0x46030)
-#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
+#define FDI_PLL_FREQ_CHANGE_REQUEST (1 << 24)
#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
@@ -6659,14 +6816,14 @@ enum {
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
-#define PF_ENABLE (1<<31)
-#define PF_PIPE_SEL_MASK_IVB (3<<29)
-#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
-#define PF_FILTER_MASK (3<<23)
-#define PF_FILTER_PROGRAMMED (0<<23)
-#define PF_FILTER_MED_3x3 (1<<23)
-#define PF_FILTER_EDGE_ENHANCE (2<<23)
-#define PF_FILTER_EDGE_SOFTEN (3<<23)
+#define PF_ENABLE (1 << 31)
+#define PF_PIPE_SEL_MASK_IVB (3 << 29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define PF_FILTER_MASK (3 << 23)
+#define PF_FILTER_PROGRAMMED (0 << 23)
+#define PF_FILTER_MED_3x3 (1 << 23)
+#define PF_FILTER_EDGE_ENHANCE (2 << 23)
+#define PF_FILTER_EDGE_SOFTEN (3 << 23)
#define _PFA_WIN_SZ 0x68074
#define _PFB_WIN_SZ 0x68874
#define _PFA_WIN_POS 0x68070
@@ -6684,7 +6841,7 @@ enum {
#define _PSA_CTL 0x68180
#define _PSB_CTL 0x68980
-#define PS_ENABLE (1<<31)
+#define PS_ENABLE (1 << 31)
#define _PSA_WIN_SZ 0x68174
#define _PSB_WIN_SZ 0x68974
#define _PSA_WIN_POS 0x68170
@@ -6769,6 +6926,10 @@ enum {
#define _PS_VPHASE_1B 0x68988
#define _PS_VPHASE_2B 0x68A88
#define _PS_VPHASE_1C 0x69188
+#define PS_Y_PHASE(x) ((x) << 16)
+#define PS_UV_RGB_PHASE(x) ((x) << 0)
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
#define _PS_HPHASE_1A 0x68194
#define _PS_HPHASE_2A 0x68294
@@ -6782,7 +6943,7 @@ enum {
#define _PS_ECC_STAT_2B 0x68AD0
#define _PS_ECC_STAT_1C 0x691D0
-#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
_ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
@@ -6863,37 +7024,37 @@ enum {
#define DE_PIPEB_CRC_DONE (1 << 10)
#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
#define DE_PIPEA_VBLANK (1 << 7)
-#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
+#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8 * (pipe)))
#define DE_PIPEA_EVEN_FIELD (1 << 6)
#define DE_PIPEA_ODD_FIELD (1 << 5)
#define DE_PIPEA_LINE_COMPARE (1 << 4)
#define DE_PIPEA_VSYNC (1 << 3)
#define DE_PIPEA_CRC_DONE (1 << 2)
-#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
-#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
+#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
/* More Ivybridge lolz */
-#define DE_ERR_INT_IVB (1<<30)
-#define DE_GSE_IVB (1<<29)
-#define DE_PCH_EVENT_IVB (1<<28)
-#define DE_DP_A_HOTPLUG_IVB (1<<27)
-#define DE_AUX_CHANNEL_A_IVB (1<<26)
-#define DE_EDP_PSR_INT_HSW (1<<19)
-#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
-#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
-#define DE_PIPEC_VBLANK_IVB (1<<10)
-#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
-#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
-#define DE_PIPEB_VBLANK_IVB (1<<5)
-#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
-#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
-#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
-#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define DE_ERR_INT_IVB (1 << 30)
+#define DE_GSE_IVB (1 << 29)
+#define DE_PCH_EVENT_IVB (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB (1 << 26)
+#define DE_EDP_PSR_INT_HSW (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
+#define DE_PIPEC_VBLANK_IVB (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
+#define DE_PIPEB_VBLANK_IVB (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB (1 << 0)
#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
-#define MASTER_INTERRUPT_ENABLE (1<<31)
+#define MASTER_INTERRUPT_ENABLE (1 << 31)
#define DEISR _MMIO(0x44000)
#define DEIMR _MMIO(0x44004)
@@ -6906,37 +7067,37 @@ enum {
#define GTIER _MMIO(0x4401c)
#define GEN8_MASTER_IRQ _MMIO(0x44200)
-#define GEN8_MASTER_IRQ_CONTROL (1<<31)
-#define GEN8_PCU_IRQ (1<<30)
-#define GEN8_DE_PCH_IRQ (1<<23)
-#define GEN8_DE_MISC_IRQ (1<<22)
-#define GEN8_DE_PORT_IRQ (1<<20)
-#define GEN8_DE_PIPE_C_IRQ (1<<18)
-#define GEN8_DE_PIPE_B_IRQ (1<<17)
-#define GEN8_DE_PIPE_A_IRQ (1<<16)
-#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
-#define GEN8_GT_VECS_IRQ (1<<6)
-#define GEN8_GT_GUC_IRQ (1<<5)
-#define GEN8_GT_PM_IRQ (1<<4)
-#define GEN8_GT_VCS2_IRQ (1<<3)
-#define GEN8_GT_VCS1_IRQ (1<<2)
-#define GEN8_GT_BCS_IRQ (1<<1)
-#define GEN8_GT_RCS_IRQ (1<<0)
+#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
+#define GEN8_PCU_IRQ (1 << 30)
+#define GEN8_DE_PCH_IRQ (1 << 23)
+#define GEN8_DE_MISC_IRQ (1 << 22)
+#define GEN8_DE_PORT_IRQ (1 << 20)
+#define GEN8_DE_PIPE_C_IRQ (1 << 18)
+#define GEN8_DE_PIPE_B_IRQ (1 << 17)
+#define GEN8_DE_PIPE_A_IRQ (1 << 16)
+#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe)))
+#define GEN8_GT_VECS_IRQ (1 << 6)
+#define GEN8_GT_GUC_IRQ (1 << 5)
+#define GEN8_GT_PM_IRQ (1 << 4)
+#define GEN8_GT_VCS2_IRQ (1 << 3)
+#define GEN8_GT_VCS1_IRQ (1 << 2)
+#define GEN8_GT_BCS_IRQ (1 << 1)
+#define GEN8_GT_RCS_IRQ (1 << 0)
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
-#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31)
-#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30)
-#define GEN9_GUC_DISPLAY_EVENT (1<<29)
-#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28)
-#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27)
-#define GEN9_GUC_DB_RING_EVENT (1<<26)
-#define GEN9_GUC_DMA_DONE_EVENT (1<<25)
-#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24)
-#define GEN9_GUC_NOTIFICATION_EVENT (1<<23)
+#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31)
+#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30)
+#define GEN9_GUC_DISPLAY_EVENT (1 << 29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28)
+#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27)
+#define GEN9_GUC_DB_RING_EVENT (1 << 26)
+#define GEN9_GUC_DMA_DONE_EVENT (1 << 25)
+#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24)
+#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23)
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
@@ -6985,6 +7146,7 @@ enum {
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define ICL_AUX_CHANNEL_E (1 << 29)
#define CNL_AUX_CHANNEL_F (1 << 28)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
@@ -7011,9 +7173,16 @@ enum {
#define GEN8_PCU_IIR _MMIO(0x444e8)
#define GEN8_PCU_IER _MMIO(0x444ec)
+#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
+#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
+#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
+#define GEN11_GU_MISC_IER _MMIO(0x444fc)
+#define GEN11_GU_MISC_GSE (1 << 27)
+
#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
#define GEN11_MASTER_IRQ (1 << 31)
#define GEN11_PCU_IRQ (1 << 30)
+#define GEN11_GU_MISC_IRQ (1 << 29)
#define GEN11_DISPLAY_IRQ (1 << 16)
#define GEN11_GT_DW_IRQ(x) (1 << (x))
#define GEN11_GT_DW1_IRQ (1 << 1)
@@ -7024,11 +7193,40 @@ enum {
#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
#define GEN11_DE_PCH_IRQ (1 << 23)
#define GEN11_DE_MISC_IRQ (1 << 22)
+#define GEN11_DE_HPD_IRQ (1 << 21)
#define GEN11_DE_PORT_IRQ (1 << 20)
#define GEN11_DE_PIPE_C (1 << 18)
#define GEN11_DE_PIPE_B (1 << 17)
#define GEN11_DE_PIPE_A (1 << 16)
+#define GEN11_DE_HPD_ISR _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR _MMIO(0x44478)
+#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN11_TC4_HOTPLUG (1 << 19)
+#define GEN11_TC3_HOTPLUG (1 << 18)
+#define GEN11_TC2_HOTPLUG (1 << 17)
+#define GEN11_TC1_HOTPLUG (1 << 16)
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \
+ GEN11_TC3_HOTPLUG | \
+ GEN11_TC2_HOTPLUG | \
+ GEN11_TC1_HOTPLUG)
+#define GEN11_TBT4_HOTPLUG (1 << 3)
+#define GEN11_TBT3_HOTPLUG (1 << 2)
+#define GEN11_TBT2_HOTPLUG (1 << 1)
+#define GEN11_TBT1_HOTPLUG (1 << 0)
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \
+ GEN11_TBT3_HOTPLUG | \
+ GEN11_TBT2_HOTPLUG | \
+ GEN11_TBT1_HOTPLUG)
+
+#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
+#define GEN11_HOTPLUG_CTL_ENABLE(tc_port) (8 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_NO_DETECT(tc_port) (0 << (tc_port) * 4)
+
#define GEN11_GT_INTR_DW0 _MMIO(0x190018)
#define GEN11_CSME (31)
#define GEN11_GUNIT (28)
@@ -7043,7 +7241,7 @@ enum {
#define GEN11_VECS(x) (31 - (x))
#define GEN11_VCS(x) (x)
-#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + (x * 4))
+#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060)
#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064)
@@ -7052,12 +7250,12 @@ enum {
#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
-#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + (x * 4))
+#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
#define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070)
#define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074)
-#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + (x * 4))
+#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
@@ -7079,8 +7277,8 @@ enum {
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
-#define ILK_DPARB_GATE (1<<22)
-#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DPARB_GATE (1 << 22)
+#define ILK_VSDPFD_FULL (1 << 21)
#define FUSE_STRAP _MMIO(0x42014)
#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
@@ -7130,31 +7328,31 @@ enum {
#define CHICKEN_TRANS_A 0x420c0
#define CHICKEN_TRANS_B 0x420c4
#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
-#define VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */
-#define DDI_TRAINING_OVERRIDE_ENABLE (1<<19)
-#define DDI_TRAINING_OVERRIDE_VALUE (1<<18)
-#define DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */
-#define DDIE_TRAINING_OVERRIDE_VALUE (1<<16) /* CHICKEN_TRANS_A only */
-#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
-#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
+#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
+#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE (1 << 17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE (1 << 16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT (1 << 15)
+#define PSR2_VSC_ENABLE_PROG_HEADER (1 << 12)
#define DISP_ARB_CTL _MMIO(0x45000)
-#define DISP_FBC_MEMORY_WAKE (1<<31)
-#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
-#define DISP_FBC_WM_DIS (1<<15)
+#define DISP_FBC_MEMORY_WAKE (1 << 31)
+#define DISP_TILE_SURFACE_SWIZZLING (1 << 13)
+#define DISP_FBC_WM_DIS (1 << 15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
-#define DISP_DATA_PARTITION_5_6 (1<<6)
-#define DISP_IPC_ENABLE (1<<3)
+#define DISP_DATA_PARTITION_5_6 (1 << 6)
+#define DISP_IPC_ENABLE (1 << 3)
#define DBUF_CTL _MMIO(0x45008)
#define DBUF_CTL_S1 _MMIO(0x45008)
#define DBUF_CTL_S2 _MMIO(0x44FE8)
-#define DBUF_POWER_REQUEST (1<<31)
-#define DBUF_POWER_STATE (1<<30)
+#define DBUF_POWER_REQUEST (1 << 31)
+#define DBUF_POWER_STATE (1 << 30)
#define GEN7_MSG_CTL _MMIO(0x45010)
-#define WAIT_FOR_PCH_RESET_ACK (1<<1)
-#define WAIT_FOR_PCH_FLR_ACK (1<<0)
+#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
+#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
@@ -7179,16 +7377,16 @@ enum {
#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
-#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
+#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14)
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
-#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
-#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10)
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
+#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
-#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
@@ -7197,22 +7395,27 @@ enum {
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
-# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
-# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
-#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
-# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13)
-# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
-# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
-# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+ #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26))
+ #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
+
+#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+ #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
+ #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
+ #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
+ #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+
+#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
+ #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
#define HIZ_CHICKEN _MMIO(0x7018)
-# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
-# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
+# define CHV_HZ_8X8_MODE_IN_1X (1 << 15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1 << 3)
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
-#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
+#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -7230,7 +7433,7 @@ enum {
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
-#define GEN7_L3AGDIS (1<<19)
+#define GEN7_L3AGDIS (1 << 19)
#define GEN7_L3CNTLREG2 _MMIO(0xB020)
#define GEN7_L3CNTLREG3 _MMIO(0xB024)
@@ -7240,7 +7443,7 @@ enum {
#define GEN11_I2M_WRITE_DISABLE (1 << 28)
#define GEN7_L3SQCREG4 _MMIO(0xb034)
-#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
#define GEN8_L3SQCREG4 _MMIO(0xb118)
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
@@ -7251,12 +7454,12 @@ enum {
#define HDC_CHICKEN0 _MMIO(0x7300)
#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0)
#define ICL_HDC_MODE _MMIO(0xE5F4)
-#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
-#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
-#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
-#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
-#define HDC_FORCE_NON_COHERENT (1<<4)
-#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
+#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15)
+#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5)
+#define HDC_FORCE_NON_COHERENT (1 << 4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
@@ -7269,13 +7472,21 @@ enum {
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
-#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11)
#define HSW_SCRATCH1 _MMIO(0xb038)
-#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27)
#define BDW_SCRATCH1 _MMIO(0xb11c)
-#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
+
+/*GEN11 chicken */
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
/* PCH */
@@ -7320,7 +7531,7 @@ enum {
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
-/* south display engine interrupt: CPT/PPT */
+/* south display engine interrupt: CPT - CNP */
#define SDE_AUDIO_POWER_D_CPT (1 << 31)
#define SDE_AUDIO_POWER_C_CPT (1 << 30)
#define SDE_AUDIO_POWER_B_CPT (1 << 29)
@@ -7368,14 +7579,29 @@ enum {
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
+/* south display engine interrupt: ICP */
+#define SDE_TC4_HOTPLUG_ICP (1 << 27)
+#define SDE_TC3_HOTPLUG_ICP (1 << 26)
+#define SDE_TC2_HOTPLUG_ICP (1 << 25)
+#define SDE_TC1_HOTPLUG_ICP (1 << 24)
+#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
+#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
+#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \
+ SDE_DDIA_HOTPLUG_ICP)
+#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \
+ SDE_TC3_HOTPLUG_ICP | \
+ SDE_TC2_HOTPLUG_ICP | \
+ SDE_TC1_HOTPLUG_ICP)
+
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
#define SDEIIR _MMIO(0xc4008)
#define SDEIER _MMIO(0xc400c)
#define SERR_INT _MMIO(0xc4040)
-#define SERR_INT_POISON (1<<31)
-#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
+#define SERR_INT_POISON (1 << 31)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
/* digital port hotplug */
#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
@@ -7428,6 +7654,134 @@ enum {
#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+/* This register is a reuse of PCH_PORT_HOTPLUG register. The
+ * functionality covered in PCH_PORT_HOTPLUG is split into
+ * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
+ */
+
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define ICP_DDIB_HPD_ENABLE (1 << 7)
+#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
+#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
+#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4)
+#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
+#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
+#define ICP_DDIA_HPD_ENABLE (1 << 3)
+#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
+#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
+#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
+#define ICP_DDIA_HPD_LONG_DETECT (2 << 0)
+#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0)
+
+#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
+#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
+/* Icelake DSC Rate Control Range Parameter Registers */
+#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
+#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
+#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define RC_BPG_OFFSET_SHIFT 10
+#define RC_MAX_QP_SHIFT 5
+#define RC_MIN_QP_SHIFT 0
+
+#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
+#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
+#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
+#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
+#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
+#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
+#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
+
+#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
+#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+
#define PCH_GPIOA _MMIO(0xc5010)
#define PCH_GPIOB _MMIO(0xc5014)
#define PCH_GPIOC _MMIO(0xc5018)
@@ -7444,46 +7798,46 @@ enum {
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
-#define FP_CB_TUNE (0x3<<22)
+#define FP_CB_TUNE (0x3 << 22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST _MMIO(0xc606c)
#define PCH_DREF_CONTROL _MMIO(0xC6200)
#define DREF_CONTROL_MASK 0x7fc3
-#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
-#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
-#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
-#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
-#define DREF_SSC_SOURCE_DISABLE (0<<11)
-#define DREF_SSC_SOURCE_ENABLE (2<<11)
-#define DREF_SSC_SOURCE_MASK (3<<11)
-#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
-#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
-#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
-#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
-#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
-#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
-#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
-#define DREF_SSC4_DOWNSPREAD (0<<6)
-#define DREF_SSC4_CENTERSPREAD (1<<6)
-#define DREF_SSC1_DISABLE (0<<1)
-#define DREF_SSC1_ENABLE (1<<1)
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
+#define DREF_SSC_SOURCE_DISABLE (0 << 11)
+#define DREF_SSC_SOURCE_ENABLE (2 << 11)
+#define DREF_SSC_SOURCE_MASK (3 << 11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
+#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
+#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
+#define DREF_SSC4_DOWNSPREAD (0 << 6)
+#define DREF_SSC4_CENTERSPREAD (1 << 6)
+#define DREF_SSC1_DISABLE (0 << 1)
+#define DREF_SSC1_ENABLE (1 << 1)
#define DREF_SSC4_DISABLE (0)
#define DREF_SSC4_ENABLE (1)
#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
#define FDL_TP1_TIMER_SHIFT 12
-#define FDL_TP1_TIMER_MASK (3<<12)
+#define FDL_TP1_TIMER_MASK (3 << 12)
#define FDL_TP2_TIMER_SHIFT 10
-#define FDL_TP2_TIMER_MASK (3<<10)
+#define FDL_TP2_TIMER_MASK (3 << 10)
#define RAWCLK_FREQ_MASK 0x3ff
#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
#define CNP_RAWCLK_DIV(div) ((div) << 16)
@@ -7520,7 +7874,7 @@ enum {
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
#define _PCH_TRANS_VSYNC_A 0xe0014
-#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
@@ -7600,15 +7954,28 @@ enum {
#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define _HSW_VIDEO_DIP_GCP_B 0x61210
+/* Icelake PPS_DATA and _ECC DIP Registers.
+ * These are available for transcoders B,C and eDP.
+ * Adding the _A so as to reuse the _MMIO_TRANS2
+ * definition, with which it offsets to the right location.
+ */
+
+#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
+#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
+#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
+#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
+
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
#define _HSW_STEREO_3D_CTL_A 0x70020
-#define S3D_ENABLE (1<<31)
+#define S3D_ENABLE (1 << 31)
#define _HSW_STEREO_3D_CTL_B 0x71020
#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
@@ -7651,156 +8018,156 @@ enum {
#define _PCH_TRANSBCONF 0xf1008
#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
-#define TRANS_DISABLE (0<<31)
-#define TRANS_ENABLE (1<<31)
-#define TRANS_STATE_MASK (1<<30)
-#define TRANS_STATE_DISABLE (0<<30)
-#define TRANS_STATE_ENABLE (1<<30)
-#define TRANS_FSYNC_DELAY_HB1 (0<<27)
-#define TRANS_FSYNC_DELAY_HB2 (1<<27)
-#define TRANS_FSYNC_DELAY_HB3 (2<<27)
-#define TRANS_FSYNC_DELAY_HB4 (3<<27)
-#define TRANS_INTERLACE_MASK (7<<21)
-#define TRANS_PROGRESSIVE (0<<21)
-#define TRANS_INTERLACED (3<<21)
-#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
-#define TRANS_8BPC (0<<5)
-#define TRANS_10BPC (1<<5)
-#define TRANS_6BPC (2<<5)
-#define TRANS_12BPC (3<<5)
+#define TRANS_DISABLE (0 << 31)
+#define TRANS_ENABLE (1 << 31)
+#define TRANS_STATE_MASK (1 << 30)
+#define TRANS_STATE_DISABLE (0 << 30)
+#define TRANS_STATE_ENABLE (1 << 30)
+#define TRANS_FSYNC_DELAY_HB1 (0 << 27)
+#define TRANS_FSYNC_DELAY_HB2 (1 << 27)
+#define TRANS_FSYNC_DELAY_HB3 (2 << 27)
+#define TRANS_FSYNC_DELAY_HB4 (3 << 27)
+#define TRANS_INTERLACE_MASK (7 << 21)
+#define TRANS_PROGRESSIVE (0 << 21)
+#define TRANS_INTERLACED (3 << 21)
+#define TRANS_LEGACY_INTERLACED_ILK (2 << 21)
+#define TRANS_8BPC (0 << 5)
+#define TRANS_10BPC (1 << 5)
+#define TRANS_6BPC (2 << 5)
+#define TRANS_12BPC (3 << 5)
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
-#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
+#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1 << 10)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1 << 4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
-#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
-#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25)
#define SOUTH_CHICKEN1 _MMIO(0xc2000)
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
-#define SPT_PWM_GRANULARITY (1<<0)
+#define SPT_PWM_GRANULARITY (1 << 0)
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
-#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
-#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
-#define LPT_PWM_GRANULARITY (1<<5)
-#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1 << 12)
+#define LPT_PWM_GRANULARITY (1 << 5)
+#define DPLS_EDP_PPS_FIX_DIS (1 << 0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
-#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
-#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1 << 0)
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
-#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
-#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
-#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
-#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define CNP_PWM_CGE_GATING_DISABLE (1<<13)
-#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
+#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
#define _FDI_TXB_CTL 0x61100
#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
-#define FDI_TX_DISABLE (0<<31)
-#define FDI_TX_ENABLE (1<<31)
-#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
-#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
-#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
-#define FDI_LINK_TRAIN_NONE (3<<28)
-#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+#define FDI_TX_DISABLE (0 << 31)
+#define FDI_TX_ENABLE (1 << 31)
+#define FDI_LINK_TRAIN_PATTERN_1 (0 << 28)
+#define FDI_LINK_TRAIN_PATTERN_2 (1 << 28)
+#define FDI_LINK_TRAIN_PATTERN_IDLE (2 << 28)
+#define FDI_LINK_TRAIN_NONE (3 << 28)
+#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3 << 25)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3 << 22)
/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
SNB has different settings. */
/* SNB A-stepping */
-#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
-#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
-#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
-#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
/* SNB B-stepping */
-#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
-#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
-#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
-#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
-#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38 << 22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f << 22)
#define FDI_DP_PORT_WIDTH_SHIFT 19
#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
-#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
+#define FDI_TX_ENHANCE_FRAME_ENABLE (1 << 18)
/* Ironlake: hardwired to 1 */
-#define FDI_TX_PLL_ENABLE (1<<14)
+#define FDI_TX_PLL_ENABLE (1 << 14)
/* Ivybridge has different bits for lolz */
-#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
-#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
-#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
-#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_1_IVB (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_IVB (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2 << 8)
+#define FDI_LINK_TRAIN_NONE_IVB (3 << 8)
/* both Tx and Rx */
-#define FDI_COMPOSITE_SYNC (1<<11)
-#define FDI_LINK_TRAIN_AUTO (1<<10)
-#define FDI_SCRAMBLING_ENABLE (0<<7)
-#define FDI_SCRAMBLING_DISABLE (1<<7)
+#define FDI_COMPOSITE_SYNC (1 << 11)
+#define FDI_LINK_TRAIN_AUTO (1 << 10)
+#define FDI_SCRAMBLING_ENABLE (0 << 7)
+#define FDI_SCRAMBLING_DISABLE (1 << 7)
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
#define _FDI_RXA_CTL 0xf000c
#define _FDI_RXB_CTL 0xf100c
#define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
-#define FDI_RX_ENABLE (1<<31)
+#define FDI_RX_ENABLE (1 << 31)
/* train, dp width same as FDI_TX */
-#define FDI_FS_ERRC_ENABLE (1<<27)
-#define FDI_FE_ERRC_ENABLE (1<<26)
-#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
-#define FDI_8BPC (0<<16)
-#define FDI_10BPC (1<<16)
-#define FDI_6BPC (2<<16)
-#define FDI_12BPC (3<<16)
-#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
-#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
-#define FDI_RX_PLL_ENABLE (1<<13)
-#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
-#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
-#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
-#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
-#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
-#define FDI_PCDCLK (1<<4)
+#define FDI_FS_ERRC_ENABLE (1 << 27)
+#define FDI_FE_ERRC_ENABLE (1 << 26)
+#define FDI_RX_POLARITY_REVERSED_LPT (1 << 16)
+#define FDI_8BPC (0 << 16)
+#define FDI_10BPC (1 << 16)
+#define FDI_6BPC (2 << 16)
+#define FDI_12BPC (3 << 16)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1 << 15)
+#define FDI_DMI_LINK_REVERSE_MASK (1 << 14)
+#define FDI_RX_PLL_ENABLE (1 << 13)
+#define FDI_FS_ERR_CORRECT_ENABLE (1 << 11)
+#define FDI_FE_ERR_CORRECT_ENABLE (1 << 10)
+#define FDI_FS_ERR_REPORT_ENABLE (1 << 9)
+#define FDI_FE_ERR_REPORT_ENABLE (1 << 8)
+#define FDI_RX_ENHANCE_FRAME_ENABLE (1 << 6)
+#define FDI_PCDCLK (1 << 4)
/* CPT */
-#define FDI_AUTO_TRAINING (1<<10)
-#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
-#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
-#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
-#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
-#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+#define FDI_AUTO_TRAINING (1 << 10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2 << 8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3 << 8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3 << 8)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
-#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
-#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
-#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
-#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
-#define FDI_RX_TP1_TO_TP2_48 (2<<20)
-#define FDI_RX_TP1_TO_TP2_64 (3<<20)
-#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_PWRDN_LANE1_MASK (3 << 26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x) << 26)
+#define FDI_RX_PWRDN_LANE0_MASK (3 << 24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x) << 24)
+#define FDI_RX_TP1_TO_TP2_48 (2 << 20)
+#define FDI_RX_TP1_TO_TP2_64 (3 << 20)
+#define FDI_RX_FDI_DELAY_90 (0x90 << 0)
#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define _FDI_RXA_TUSIZE1 0xf0030
@@ -7811,17 +8178,17 @@ enum {
#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
-#define FDI_RX_INTER_LANE_ALIGN (1<<10)
-#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
-#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
-#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
-#define FDI_RX_FS_CODE_ERR (1<<6)
-#define FDI_RX_FE_CODE_ERR (1<<5)
-#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
-#define FDI_RX_HDCP_LINK_FAIL (1<<3)
-#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
-#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
-#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
+#define FDI_RX_INTER_LANE_ALIGN (1 << 10)
+#define FDI_RX_SYMBOL_LOCK (1 << 9) /* train 2 */
+#define FDI_RX_BIT_LOCK (1 << 8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL (1 << 7)
+#define FDI_RX_FS_CODE_ERR (1 << 6)
+#define FDI_RX_FE_CODE_ERR (1 << 5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1 << 4)
+#define FDI_RX_HDCP_LINK_FAIL (1 << 3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW (1 << 2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW (1 << 1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1 << 0)
#define _FDI_RXA_IIR 0xf0014
#define _FDI_RXA_IMR 0xf0018
@@ -7867,71 +8234,58 @@ enum {
#define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
/* CPT */
-#define PORT_TRANS_A_SEL_CPT 0
-#define PORT_TRANS_B_SEL_CPT (1<<29)
-#define PORT_TRANS_C_SEL_CPT (2<<29)
-#define PORT_TRANS_SEL_MASK (3<<29)
-#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
-#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
-#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
-#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
-#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
-
#define _TRANS_DP_CTL_A 0xe0300
#define _TRANS_DP_CTL_B 0xe1300
#define _TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
-#define TRANS_DP_OUTPUT_ENABLE (1<<31)
-#define TRANS_DP_PORT_SEL_B (0<<29)
-#define TRANS_DP_PORT_SEL_C (1<<29)
-#define TRANS_DP_PORT_SEL_D (2<<29)
-#define TRANS_DP_PORT_SEL_NONE (3<<29)
-#define TRANS_DP_PORT_SEL_MASK (3<<29)
-#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
-#define TRANS_DP_AUDIO_ONLY (1<<26)
-#define TRANS_DP_ENH_FRAMING (1<<18)
-#define TRANS_DP_8BPC (0<<9)
-#define TRANS_DP_10BPC (1<<9)
-#define TRANS_DP_6BPC (2<<9)
-#define TRANS_DP_12BPC (3<<9)
-#define TRANS_DP_BPC_MASK (3<<9)
-#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_OUTPUT_ENABLE (1 << 31)
+#define TRANS_DP_PORT_SEL_MASK (3 << 29)
+#define TRANS_DP_PORT_SEL_NONE (3 << 29)
+#define TRANS_DP_PORT_SEL(port) (((port) - PORT_B) << 29)
+#define TRANS_DP_AUDIO_ONLY (1 << 26)
+#define TRANS_DP_ENH_FRAMING (1 << 18)
+#define TRANS_DP_8BPC (0 << 9)
+#define TRANS_DP_10BPC (1 << 9)
+#define TRANS_DP_6BPC (2 << 9)
+#define TRANS_DP_12BPC (3 << 9)
+#define TRANS_DP_BPC_MASK (3 << 9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1 << 4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
-#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1 << 3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
-#define TRANS_DP_SYNC_MASK (3<<3)
+#define TRANS_DP_SYNC_MASK (3 << 3)
/* SNB eDP training params */
/* SNB A-stepping */
-#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
-#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
-#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
/* SNB B-stepping */
-#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
-#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
-#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
-#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
/* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
/* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
#define VLV_PMWGICZ _MMIO(0x1300a4)
@@ -7978,7 +8332,7 @@ enum {
#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
#define FORCEWAKE_MT_ACK _MMIO(0x130040)
#define ECOBUS _MMIO(0xa180)
-#define FORCEWAKE_MT_ENABLE (1<<5)
+#define FORCEWAKE_MT_ENABLE (1 << 5)
#define VLV_SPAREG2H _MMIO(0xA194)
#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0)
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
@@ -7987,13 +8341,13 @@ enum {
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
-#define GT_FIFO_SBDROPERR (1<<6)
-#define GT_FIFO_BLOBDROPERR (1<<5)
-#define GT_FIFO_SB_READ_ABORTERR (1<<4)
-#define GT_FIFO_DROPERR (1<<3)
-#define GT_FIFO_OVFERR (1<<2)
-#define GT_FIFO_IAWRERR (1<<1)
-#define GT_FIFO_IARDERR (1<<0)
+#define GT_FIFO_SBDROPERR (1 << 6)
+#define GT_FIFO_BLOBDROPERR (1 << 5)
+#define GT_FIFO_SB_READ_ABORTERR (1 << 4)
+#define GT_FIFO_DROPERR (1 << 3)
+#define GT_FIFO_OVFERR (1 << 2)
+#define GT_FIFO_IAWRERR (1 << 1)
+#define GT_FIFO_IARDERR (1 << 0)
#define GTFIFOCTL _MMIO(0x120008)
#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
@@ -8027,37 +8381,37 @@ enum {
# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
#define GEN7_UCGCTL4 _MMIO(0x940c)
-#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
-#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
#define GEN6_RSTCTL _MMIO(0x9420)
#define GEN8_UCGCTL6 _MMIO(0x9430)
-#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
-#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
+#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24)
+#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
#define GEN6_GFXPAUSE _MMIO(0xA000)
#define GEN6_RPNSWREQ _MMIO(0xA008)
-#define GEN6_TURBO_DISABLE (1<<31)
-#define GEN6_FREQUENCY(x) ((x)<<25)
-#define HSW_FREQUENCY(x) ((x)<<24)
-#define GEN9_FREQUENCY(x) ((x)<<23)
-#define GEN6_OFFSET(x) ((x)<<19)
-#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_TURBO_DISABLE (1 << 31)
+#define GEN6_FREQUENCY(x) ((x) << 25)
+#define HSW_FREQUENCY(x) ((x) << 24)
+#define GEN9_FREQUENCY(x) ((x) << 23)
+#define GEN6_OFFSET(x) ((x) << 19)
+#define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
#define GEN6_RC_CONTROL _MMIO(0xA090)
-#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
-#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
-#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
-#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
-#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
-#define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24)
-#define GEN7_RC_CTL_TO_MODE (1<<28)
-#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
-#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17)
+#define GEN6_RC_CTL_RC6_ENABLE (1 << 18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1 << 20)
+#define GEN6_RC_CTL_RC7_ENABLE (1 << 22)
+#define VLV_RC_CTL_CTX_RST_PARALLEL (1 << 24)
+#define GEN7_RC_CTL_TO_MODE (1 << 28)
+#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27)
+#define GEN6_RC_CTL_HW_ENABLE (1 << 31)
#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010)
#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014)
#define GEN6_RPSTAT1 _MMIO(0xA01C)
@@ -8068,19 +8422,19 @@ enum {
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL _MMIO(0xA024)
-#define GEN6_RP_MEDIA_TURBO (1<<11)
-#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
-#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
-#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
-#define GEN6_RP_MEDIA_HW_MODE (1<<9)
-#define GEN6_RP_MEDIA_SW_MODE (0<<9)
-#define GEN6_RP_MEDIA_IS_GFX (1<<8)
-#define GEN6_RP_ENABLE (1<<7)
-#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
-#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
-#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
-#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
-#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
+#define GEN6_RP_MEDIA_TURBO (1 << 11)
+#define GEN6_RP_MEDIA_MODE_MASK (3 << 9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2 << 9)
+#define GEN6_RP_MEDIA_HW_MODE (1 << 9)
+#define GEN6_RP_MEDIA_SW_MODE (0 << 9)
+#define GEN6_RP_MEDIA_IS_GFX (1 << 8)
+#define GEN6_RP_ENABLE (1 << 7)
+#define GEN6_RP_UP_IDLE_MIN (0x1 << 3)
+#define GEN6_RP_UP_BUSY_AVG (0x2 << 3)
+#define GEN6_RP_UP_BUSY_CONT (0x4 << 3)
+#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0)
#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
@@ -8116,15 +8470,15 @@ enum {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
-#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1<<31)
-#define ARAT_EXPIRED_INTRMSK (1<<9)
+#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1 << 31)
+#define ARAT_EXPIRED_INTRMSK (1 << 9)
#define GEN8_MISC_CTRL0 _MMIO(0xA180)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
-#define GEN9_RENDER_PG_ENABLE (1<<0)
-#define GEN9_MEDIA_PG_ENABLE (1<<1)
+#define GEN9_RENDER_PG_ENABLE (1 << 0)
+#define GEN9_MEDIA_PG_ENABLE (1 << 1)
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
@@ -8137,13 +8491,13 @@ enum {
#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
#define GEN6_PMIIR _MMIO(0x44028)
#define GEN6_PMIER _MMIO(0x4402C)
-#define GEN6_PM_MBOX_EVENT (1<<25)
-#define GEN6_PM_THERMAL_EVENT (1<<24)
-#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
-#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
-#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
-#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
-#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+#define GEN6_PM_MBOX_EVENT (1 << 25)
+#define GEN6_PM_THERMAL_EVENT (1 << 24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
+#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1)
#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
@@ -8152,16 +8506,16 @@ enum {
#define GEN7_GT_SCRATCH_REG_NUM 8
#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
-#define VLV_GFX_CLK_STATUS_BIT (1<<3)
-#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
+#define VLV_GFX_CLK_STATUS_BIT (1 << 3)
+#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2)
#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
#define VLV_COUNTER_CONTROL _MMIO(0x138104)
-#define VLV_COUNT_RANGE_HIGH (1<<15)
-#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
-#define VLV_RENDER_RC0_COUNT_EN (1<<4)
-#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
-#define VLV_RENDER_RC6_COUNT_EN (1<<0)
+#define VLV_COUNT_RANGE_HIGH (1 << 15)
+#define VLV_MEDIA_RC0_COUNT_EN (1 << 5)
+#define VLV_RENDER_RC0_COUNT_EN (1 << 4)
+#define VLV_MEDIA_RC6_COUNT_EN (1 << 1)
+#define VLV_RENDER_RC6_COUNT_EN (1 << 0)
#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C)
@@ -8172,7 +8526,7 @@ enum {
#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C)
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
-#define GEN6_PCODE_READY (1<<31)
+#define GEN6_PCODE_READY (1 << 31)
#define GEN6_PCODE_ERROR_MASK 0xFF
#define GEN6_PCODE_SUCCESS 0x0
#define GEN6_PCODE_ILLEGAL_CMD 0x1
@@ -8216,7 +8570,7 @@ enum {
#define GEN6_PCODE_DATA1 _MMIO(0x13812C)
#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
-#define GEN6_CORE_CPD_STATE_MASK (7<<4)
+#define GEN6_CORE_CPD_STATE_MASK (7 << 4)
#define GEN6_RCn_MASK 7
#define GEN6_RC0 0
#define GEN6_RC3 2
@@ -8228,26 +8582,26 @@ enum {
#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
-#define CHV_SS_PG_ENABLE (1<<1)
-#define CHV_EU08_PG_ENABLE (1<<9)
-#define CHV_EU19_PG_ENABLE (1<<17)
-#define CHV_EU210_PG_ENABLE (1<<25)
+#define CHV_SS_PG_ENABLE (1 << 1)
+#define CHV_EU08_PG_ENABLE (1 << 9)
+#define CHV_EU19_PG_ENABLE (1 << 17)
+#define CHV_EU210_PG_ENABLE (1 << 25)
#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
-#define CHV_EU311_PG_ENABLE (1<<1)
+#define CHV_EU311_PG_ENABLE (1 << 1)
-#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4)
+#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
((slice) % 3) * 0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
-#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
+#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
-#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8)
+#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
-#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
@@ -8260,10 +8614,10 @@ enum {
#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
#define GEN7_MISCCPCTL _MMIO(0x9424)
-#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
-#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
-#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
-#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
+#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
+#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
+#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
#define GEN8_GARBCNTL _MMIO(0xB004)
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7)
@@ -8292,61 +8646,62 @@ enum {
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
-#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
-#define GEN7_PARITY_ERROR_VALID (1<<13)
-#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
-#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14)
+#define GEN7_PARITY_ERROR_VALID (1 << 13)
+#define GEN7_L3CDERRST1_BANK_MASK (3 << 11)
+#define GEN7_L3CDERRST1_SUBBANK_MASK (7 << 8)
#define GEN7_PARITY_ERROR_ROW(reg) \
- ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+ (((reg) & GEN7_L3CDERRST1_ROW_MASK) >> 14)
#define GEN7_PARITY_ERROR_BANK(reg) \
- ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+ (((reg) & GEN7_L3CDERRST1_BANK_MASK) >> 11)
#define GEN7_PARITY_ERROR_SUBBANK(reg) \
- ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
-#define GEN7_L3CDERRST1_ENABLE (1<<7)
+ (((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define GEN7_L3CDERRST1_ENABLE (1 << 7)
#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
#define GEN7_L3LOG_SIZE 0x80
#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
-#define GEN7_MAX_PS_THREAD_DEP (8<<12)
-#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
-#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
-#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define GEN7_MAX_PS_THREAD_DEP (8 << 12)
+#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10)
+#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1 << 4)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3)
#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
-#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
-#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
+#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5)
+#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
-#define FLOW_CONTROL_ENABLE (1<<15)
-#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
-#define STALL_DOP_GATING_DISABLE (1<<5)
-#define THROTTLE_12_5 (7<<2)
-#define DISABLE_EARLY_EOT (1<<1)
+#define FLOW_CONTROL_ENABLE (1 << 15)
+#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8)
+#define STALL_DOP_GATING_DISABLE (1 << 5)
+#define THROTTLE_12_5 (7 << 2)
+#define DISABLE_EARLY_EOT (1 << 1)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
-#define DOP_CLOCK_GATING_DISABLE (1<<0)
-#define PUSH_CONSTANT_DEREF_DISABLE (1<<8)
+#define DOP_CLOCK_GATING_DISABLE (1 << 0)
+#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
+#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
-#define GEN8_ST_PO_DISABLE (1<<13)
+#define GEN8_ST_PO_DISABLE (1 << 13)
#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
-#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
-#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
-#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
-#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4)
-#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
+#define HSW_SAMPLE_C_PERFORMANCE (1 << 9)
+#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8)
+#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5)
+#define CNL_FAST_ANISO_L1_BANKING_FIX (1 << 4)
+#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
-#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8)
-#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
-#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1 << 8)
+#define GEN9_ENABLE_YV12_BUGFIX (1 << 4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
/* Audio */
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
@@ -8478,6 +8833,14 @@ enum {
#define _HSW_PWR_WELL_CTL3 0x45408
#define _HSW_PWR_WELL_CTL4 0x4540C
+#define _ICL_PWR_WELL_CTL_AUX1 0x45440
+#define _ICL_PWR_WELL_CTL_AUX2 0x45444
+#define _ICL_PWR_WELL_CTL_AUX4 0x4544C
+
+#define _ICL_PWR_WELL_CTL_DDI1 0x45450
+#define _ICL_PWR_WELL_CTL_DDI2 0x45454
+#define _ICL_PWR_WELL_CTL_DDI4 0x4545C
+
/*
* Each power well control register contains up to 16 (request, status) HW
* flag tuples. The register index and HW flag shift is determined by the
@@ -8487,21 +8850,27 @@ enum {
*/
#define _HSW_PW_REG_IDX(pw) ((pw) >> 4)
#define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2)
-/* TODO: Add all PWR_WELL_CTL registers below for new platforms */
#define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL1))
+ _HSW_PWR_WELL_CTL1, \
+ _ICL_PWR_WELL_CTL_AUX1, \
+ _ICL_PWR_WELL_CTL_DDI1))
#define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL2))
+ _HSW_PWR_WELL_CTL2, \
+ _ICL_PWR_WELL_CTL_AUX2, \
+ _ICL_PWR_WELL_CTL_DDI2))
+/* KVMR doesn't have a reg for AUX or DDI power well control */
#define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3)
#define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL4))
+ _HSW_PWR_WELL_CTL4, \
+ _ICL_PWR_WELL_CTL_AUX4, \
+ _ICL_PWR_WELL_CTL_DDI4))
#define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1))
#define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw))
#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
-#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
-#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
-#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
+#define HSW_PWR_WELL_FORCE_ON (1 << 19)
#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
/* SKL Fuse Status */
@@ -8512,9 +8881,11 @@ enum skl_power_gate {
};
#define SKL_FUSE_STATUS _MMIO(0x42000)
-#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
+#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
/* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */
#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1)
+/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */
+#define ICL_PW_TO_PG(pw) ((pw) - ICL_DISP_PW_1 + SKL_PG1)
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
#define _CNL_AUX_REG_IDX(pw) ((pw) - 9)
@@ -8527,8 +8898,8 @@ enum skl_power_gate {
_CNL_AUX_ANAOVRD1_C, \
_CNL_AUX_ANAOVRD1_D, \
_CNL_AUX_ANAOVRD1_F))
-#define CNL_AUX_ANAOVRD1_ENABLE (1<<16)
-#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1<<23)
+#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16)
+#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23)
/* HDCP Key Registers */
#define HDCP_KEY_CONF _MMIO(0x66c00)
@@ -8573,7 +8944,7 @@ enum skl_power_gate {
#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
-#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + h * 4))
+#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
#define HDCP_SHA_TEXT _MMIO(0x66d18)
/* HDCP Auth Registers */
@@ -8589,7 +8960,7 @@ enum skl_power_gate {
_PORTC_HDCP_AUTHENC, \
_PORTD_HDCP_AUTHENC, \
_PORTE_HDCP_AUTHENC, \
- _PORTF_HDCP_AUTHENC) + x)
+ _PORTF_HDCP_AUTHENC) + (x))
#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
#define HDCP_CONF_CAPTURE_AN BIT(0)
#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
@@ -8610,7 +8981,7 @@ enum skl_power_gate {
#define HDCP_STATUS_R0_READY BIT(18)
#define HDCP_STATUS_AN_READY BIT(17)
#define HDCP_STATUS_CIPHER BIT(16)
-#define HDCP_STATUS_FRAME_CNT(x) ((x >> 8) & 0xff)
+#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
@@ -8619,37 +8990,37 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
-#define TRANS_DDI_FUNC_ENABLE (1<<31)
+#define TRANS_DDI_FUNC_ENABLE (1 << 31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_PORT_MASK (7 << 28)
#define TRANS_DDI_PORT_SHIFT 28
-#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
-#define TRANS_DDI_PORT_NONE (0<<28)
-#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
-#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
-#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
-#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
-#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
-#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
-#define TRANS_DDI_BPC_MASK (7<<20)
-#define TRANS_DDI_BPC_8 (0<<20)
-#define TRANS_DDI_BPC_10 (1<<20)
-#define TRANS_DDI_BPC_6 (2<<20)
-#define TRANS_DDI_BPC_12 (3<<20)
-#define TRANS_DDI_PVSYNC (1<<17)
-#define TRANS_DDI_PHSYNC (1<<16)
-#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
-#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
-#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
-#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
-#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
-#define TRANS_DDI_HDCP_SIGNALLING (1<<9)
-#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
-#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
-#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
-#define TRANS_DDI_BFI_ENABLE (1<<4)
-#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4)
-#define TRANS_DDI_HDMI_SCRAMBLING (1<<0)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << 28)
+#define TRANS_DDI_PORT_NONE (0 << 28)
+#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
+#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
+#define TRANS_DDI_MODE_SELECT_FDI (4 << 24)
+#define TRANS_DDI_BPC_MASK (7 << 20)
+#define TRANS_DDI_BPC_8 (0 << 20)
+#define TRANS_DDI_BPC_10 (1 << 20)
+#define TRANS_DDI_BPC_6 (2 << 20)
+#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PVSYNC (1 << 17)
+#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_BFI_ENABLE (1 << 4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
| TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
| TRANS_DDI_HDMI_SCRAMBLING)
@@ -8658,28 +9029,29 @@ enum skl_power_gate {
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define DP_TP_CTL_ENABLE (1<<31)
-#define DP_TP_CTL_MODE_SST (0<<27)
-#define DP_TP_CTL_MODE_MST (1<<27)
-#define DP_TP_CTL_FORCE_ACT (1<<25)
-#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
-#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
-#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
-#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
-#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
+#define DP_TP_CTL_ENABLE (1 << 31)
+#define DP_TP_CTL_MODE_SST (0 << 27)
+#define DP_TP_CTL_MODE_MST (1 << 27)
+#define DP_TP_CTL_FORCE_ACT (1 << 25)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 (4 << 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 (5 << 8)
+#define DP_TP_CTL_LINK_TRAIN_IDLE (2 << 8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3 << 8)
+#define DP_TP_CTL_SCRAMBLE_DISABLE (1 << 7)
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define DP_TP_STATUS_IDLE_DONE (1<<25)
-#define DP_TP_STATUS_ACT_SENT (1<<24)
-#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_IDLE_DONE (1 << 25)
+#define DP_TP_STATUS_ACT_SENT (1 << 24)
+#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1 << 12)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
@@ -8688,16 +9060,16 @@ enum skl_power_gate {
#define _DDI_BUF_CTL_A 0x64000
#define _DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
-#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_CTL_ENABLE (1 << 31)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
-#define DDI_BUF_EMP_MASK (0xf<<24)
-#define DDI_BUF_PORT_REVERSAL (1<<16)
-#define DDI_BUF_IS_IDLE (1<<7)
-#define DDI_A_4_LANES (1<<4)
+#define DDI_BUF_EMP_MASK (0xf << 24)
+#define DDI_BUF_PORT_REVERSAL (1 << 16)
+#define DDI_BUF_IS_IDLE (1 << 7)
+#define DDI_A_4_LANES (1 << 4)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
#define DDI_PORT_WIDTH_SHIFT 1
-#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+#define DDI_INIT_DISPLAY_DETECTED (1 << 0)
/* DDI Buffer Translations */
#define _DDI_BUF_TRANS_A 0x64E00
@@ -8712,95 +9084,99 @@ enum skl_power_gate {
#define SBI_ADDR _MMIO(0xC6000)
#define SBI_DATA _MMIO(0xC6004)
#define SBI_CTL_STAT _MMIO(0xC6008)
-#define SBI_CTL_DEST_ICLK (0x0<<16)
-#define SBI_CTL_DEST_MPHY (0x1<<16)
-#define SBI_CTL_OP_IORD (0x2<<8)
-#define SBI_CTL_OP_IOWR (0x3<<8)
-#define SBI_CTL_OP_CRRD (0x6<<8)
-#define SBI_CTL_OP_CRWR (0x7<<8)
-#define SBI_RESPONSE_FAIL (0x1<<1)
-#define SBI_RESPONSE_SUCCESS (0x0<<1)
-#define SBI_BUSY (0x1<<0)
-#define SBI_READY (0x0<<0)
+#define SBI_CTL_DEST_ICLK (0x0 << 16)
+#define SBI_CTL_DEST_MPHY (0x1 << 16)
+#define SBI_CTL_OP_IORD (0x2 << 8)
+#define SBI_CTL_OP_IOWR (0x3 << 8)
+#define SBI_CTL_OP_CRRD (0x6 << 8)
+#define SBI_CTL_OP_CRWR (0x7 << 8)
+#define SBI_RESPONSE_FAIL (0x1 << 1)
+#define SBI_RESPONSE_SUCCESS (0x0 << 1)
+#define SBI_BUSY (0x1 << 0)
+#define SBI_READY (0x0 << 0)
/* SBI offsets */
#define SBI_SSCDIVINTPHASE 0x0200
#define SBI_SSCDIVINTPHASE6 0x0600
#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
-#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f<<1)
-#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1)
#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
-#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f<<8)
-#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
-#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
-#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0)
#define SBI_SSCDITHPHASE 0x0204
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
-#define SBI_SSCCTL_PATHALT (1<<3)
-#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCCTL_PATHALT (1 << 3)
+#define SBI_SSCCTL_DISABLE (1 << 0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
-#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1<<4)
-#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4)
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4)
#define SBI_DBUFF0 0x2a00
#define SBI_GEN0 0x1f00
-#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE _MMIO(0xC6020)
-#define PIXCLK_GATE_UNGATE (1<<0)
-#define PIXCLK_GATE_GATE (0<<0)
+#define PIXCLK_GATE_UNGATE (1 << 0)
+#define PIXCLK_GATE_GATE (0 << 0)
/* SPLL */
#define SPLL_CTL _MMIO(0x46020)
-#define SPLL_PLL_ENABLE (1<<31)
-#define SPLL_PLL_SSC (1<<28)
-#define SPLL_PLL_NON_SSC (2<<28)
-#define SPLL_PLL_LCPLL (3<<28)
-#define SPLL_PLL_REF_MASK (3<<28)
-#define SPLL_PLL_FREQ_810MHz (0<<26)
-#define SPLL_PLL_FREQ_1350MHz (1<<26)
-#define SPLL_PLL_FREQ_2700MHz (2<<26)
-#define SPLL_PLL_FREQ_MASK (3<<26)
+#define SPLL_PLL_ENABLE (1 << 31)
+#define SPLL_PLL_SSC (1 << 28)
+#define SPLL_PLL_NON_SSC (2 << 28)
+#define SPLL_PLL_LCPLL (3 << 28)
+#define SPLL_PLL_REF_MASK (3 << 28)
+#define SPLL_PLL_FREQ_810MHz (0 << 26)
+#define SPLL_PLL_FREQ_1350MHz (1 << 26)
+#define SPLL_PLL_FREQ_2700MHz (2 << 26)
+#define SPLL_PLL_FREQ_MASK (3 << 26)
/* WRPLL */
#define _WRPLL_CTL1 0x46040
#define _WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
-#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SSC (1<<28)
-#define WRPLL_PLL_NON_SSC (2<<28)
-#define WRPLL_PLL_LCPLL (3<<28)
-#define WRPLL_PLL_REF_MASK (3<<28)
+#define WRPLL_PLL_ENABLE (1 << 31)
+#define WRPLL_PLL_SSC (1 << 28)
+#define WRPLL_PLL_NON_SSC (2 << 28)
+#define WRPLL_PLL_LCPLL (3 << 28)
+#define WRPLL_PLL_REF_MASK (3 << 28)
/* WRPLL divider programming */
-#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
-#define WRPLL_DIVIDER_POST(x) ((x)<<8)
-#define WRPLL_DIVIDER_POST_MASK (0x3f<<8)
+#define WRPLL_DIVIDER_POST(x) ((x) << 8)
+#define WRPLL_DIVIDER_POST_MASK (0x3f << 8)
#define WRPLL_DIVIDER_POST_SHIFT 8
-#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16)
#define WRPLL_DIVIDER_FB_SHIFT 16
-#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
+#define WRPLL_DIVIDER_FB_MASK (0xff << 16)
/* Port clock selection */
#define _PORT_CLK_SEL_A 0x46100
#define _PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
-#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
-#define PORT_CLK_SEL_LCPLL_810 (2<<29)
-#define PORT_CLK_SEL_SPLL (3<<29)
-#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
-#define PORT_CLK_SEL_WRPLL1 (4<<29)
-#define PORT_CLK_SEL_WRPLL2 (5<<29)
-#define PORT_CLK_SEL_NONE (7<<29)
-#define PORT_CLK_SEL_MASK (7<<29)
+#define PORT_CLK_SEL_LCPLL_2700 (0 << 29)
+#define PORT_CLK_SEL_LCPLL_1350 (1 << 29)
+#define PORT_CLK_SEL_LCPLL_810 (2 << 29)
+#define PORT_CLK_SEL_SPLL (3 << 29)
+#define PORT_CLK_SEL_WRPLL(pll) (((pll) + 4) << 29)
+#define PORT_CLK_SEL_WRPLL1 (4 << 29)
+#define PORT_CLK_SEL_WRPLL2 (5 << 29)
+#define PORT_CLK_SEL_NONE (7 << 29)
+#define PORT_CLK_SEL_MASK (7 << 29)
/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
#define DDI_CLK_SEL_NONE (0x0 << 28)
#define DDI_CLK_SEL_MG (0x8 << 28)
+#define DDI_CLK_SEL_TBT_162 (0xC << 28)
+#define DDI_CLK_SEL_TBT_270 (0xD << 28)
+#define DDI_CLK_SEL_TBT_540 (0xE << 28)
+#define DDI_CLK_SEL_TBT_810 (0xF << 28)
#define DDI_CLK_SEL_MASK (0xF << 28)
/* Transcoder clock selection */
@@ -8808,8 +9184,8 @@ enum skl_power_gate {
#define _TRANS_CLK_SEL_B 0x46144
#define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
/* For each transcoder, we need to select the corresponding port clock */
-#define TRANS_CLK_SEL_DISABLED (0x0<<29)
-#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
+#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
+#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
#define CDCLK_FREQ _MMIO(0x46200)
@@ -8819,28 +9195,28 @@ enum skl_power_gate {
#define _TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
-#define TRANS_MSA_SYNC_CLK (1<<0)
-#define TRANS_MSA_6_BPC (0<<5)
-#define TRANS_MSA_8_BPC (1<<5)
-#define TRANS_MSA_10_BPC (2<<5)
-#define TRANS_MSA_12_BPC (3<<5)
-#define TRANS_MSA_16_BPC (4<<5)
+#define TRANS_MSA_SYNC_CLK (1 << 0)
+#define TRANS_MSA_6_BPC (0 << 5)
+#define TRANS_MSA_8_BPC (1 << 5)
+#define TRANS_MSA_10_BPC (2 << 5)
+#define TRANS_MSA_12_BPC (3 << 5)
+#define TRANS_MSA_16_BPC (4 << 5)
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
-#define LCPLL_PLL_DISABLE (1<<31)
-#define LCPLL_PLL_LOCK (1<<30)
-#define LCPLL_CLK_FREQ_MASK (3<<26)
-#define LCPLL_CLK_FREQ_450 (0<<26)
-#define LCPLL_CLK_FREQ_54O_BDW (1<<26)
-#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
-#define LCPLL_CLK_FREQ_675_BDW (3<<26)
-#define LCPLL_CD_CLOCK_DISABLE (1<<25)
-#define LCPLL_ROOT_CD_CLOCK_DISABLE (1<<24)
-#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
-#define LCPLL_POWER_DOWN_ALLOW (1<<22)
-#define LCPLL_CD_SOURCE_FCLK (1<<21)
-#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+#define LCPLL_PLL_DISABLE (1 << 31)
+#define LCPLL_PLL_LOCK (1 << 30)
+#define LCPLL_CLK_FREQ_MASK (3 << 26)
+#define LCPLL_CLK_FREQ_450 (0 << 26)
+#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
+#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26)
+#define LCPLL_CLK_FREQ_675_BDW (3 << 26)
+#define LCPLL_CD_CLOCK_DISABLE (1 << 25)
+#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24)
+#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23)
+#define LCPLL_POWER_DOWN_ALLOW (1 << 22)
+#define LCPLL_CD_SOURCE_FCLK (1 << 21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19)
/*
* SKL Clocks
@@ -8868,16 +9244,16 @@ enum skl_power_gate {
/* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010)
#define LCPLL2_CTL _MMIO(0x46014)
-#define LCPLL_PLL_ENABLE (1<<31)
+#define LCPLL_PLL_ENABLE (1 << 31)
/* DPLL control1 */
#define DPLL_CTRL1 _MMIO(0x6C058)
-#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
-#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
-#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
-#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id)*6+1)
-#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
-#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
+#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5))
+#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4))
+#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1))
+#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1)
+#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1))
+#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6))
#define DPLL_CTRL1_LINK_RATE_2700 0
#define DPLL_CTRL1_LINK_RATE_1350 1
#define DPLL_CTRL1_LINK_RATE_810 2
@@ -8887,43 +9263,43 @@ enum skl_power_gate {
/* DPLL control2 */
#define DPLL_CTRL2 _MMIO(0x6C05C)
-#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15))
-#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
-#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
-#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk)<<((port)*3+1))
-#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
+#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15))
+#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1)
+#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1))
+#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3))
/* DPLL Status */
#define DPLL_STATUS _MMIO(0x6C060)
-#define DPLL_LOCK(id) (1<<((id)*8))
+#define DPLL_LOCK(id) (1 << ((id) * 8))
/* DPLL cfg */
#define _DPLL1_CFGCR1 0x6C040
#define _DPLL2_CFGCR1 0x6C048
#define _DPLL3_CFGCR1 0x6C050
-#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
-#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
-#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9)
+#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
+#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
+#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
#define _DPLL1_CFGCR2 0x6C044
#define _DPLL2_CFGCR2 0x6C04C
#define _DPLL3_CFGCR2 0x6C054
-#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
-#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8)
-#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7)
-#define DPLL_CFGCR2_KDIV_MASK (3<<5)
-#define DPLL_CFGCR2_KDIV(x) ((x)<<5)
-#define DPLL_CFGCR2_KDIV_5 (0<<5)
-#define DPLL_CFGCR2_KDIV_2 (1<<5)
-#define DPLL_CFGCR2_KDIV_3 (2<<5)
-#define DPLL_CFGCR2_KDIV_1 (3<<5)
-#define DPLL_CFGCR2_PDIV_MASK (7<<2)
-#define DPLL_CFGCR2_PDIV(x) ((x)<<2)
-#define DPLL_CFGCR2_PDIV_1 (0<<2)
-#define DPLL_CFGCR2_PDIV_2 (1<<2)
-#define DPLL_CFGCR2_PDIV_3 (2<<2)
-#define DPLL_CFGCR2_PDIV_7 (4<<2)
+#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
+#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
+#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
+#define DPLL_CFGCR2_KDIV_MASK (3 << 5)
+#define DPLL_CFGCR2_KDIV(x) ((x) << 5)
+#define DPLL_CFGCR2_KDIV_5 (0 << 5)
+#define DPLL_CFGCR2_KDIV_2 (1 << 5)
+#define DPLL_CFGCR2_KDIV_3 (2 << 5)
+#define DPLL_CFGCR2_KDIV_1 (3 << 5)
+#define DPLL_CFGCR2_PDIV_MASK (7 << 2)
+#define DPLL_CFGCR2_PDIV(x) ((x) << 2)
+#define DPLL_CFGCR2_PDIV_1 (0 << 2)
+#define DPLL_CFGCR2_PDIV_2 (1 << 2)
+#define DPLL_CFGCR2_PDIV_3 (2 << 2)
+#define DPLL_CFGCR2_PDIV_7 (4 << 2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
@@ -8935,9 +9311,9 @@ enum skl_power_gate {
#define DPCLKA_CFGCR0 _MMIO(0x6C200)
#define DPCLKA_CFGCR0_ICL _MMIO(0x164280)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
- (port)+10))
+ (port) + 10))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
- (port)*2)
+ (port) * 2)
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
@@ -8950,6 +9326,8 @@ enum skl_power_gate {
#define PLL_POWER_STATE (1 << 26)
#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
+#define TBT_PLL_ENABLE _MMIO(0x46020)
+
#define _MG_PLL1_ENABLE 0x46030
#define _MG_PLL2_ENABLE 0x46034
#define _MG_PLL3_ENABLE 0x46038
@@ -8963,6 +9341,7 @@ enum skl_power_gate {
#define _MG_REFCLKIN_CTL_PORT3 0x16A92C
#define _MG_REFCLKIN_CTL_PORT4 0x16B92C
#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8)
+#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8)
#define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \
_MG_REFCLKIN_CTL_PORT1, \
_MG_REFCLKIN_CTL_PORT2)
@@ -8972,7 +9351,9 @@ enum skl_power_gate {
#define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8
#define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8
#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16)
+#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16)
#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8)
+#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8)
#define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \
_MG_CLKTOP2_CORECLKCTL1_PORT1, \
_MG_CLKTOP2_CORECLKCTL1_PORT2)
@@ -8982,9 +9363,13 @@ enum skl_power_gate {
#define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4
#define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4
#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16)
+#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16)
#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14)
+#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14)
#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12)
#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8)
+#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8)
#define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
_MG_CLKTOP2_HSCLKCTL_PORT1, \
_MG_CLKTOP2_HSCLKCTL_PORT2)
@@ -9058,12 +9443,18 @@ enum skl_power_gate {
#define _MG_PLL_BIAS_PORT3 0x16AA14
#define _MG_PLL_BIAS_PORT4 0x16BA14
#define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30)
+#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30)
#define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24)
+#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24)
#define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16)
+#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16)
#define MG_PLL_BIAS_BIASCAL_EN (1 << 15)
#define MG_PLL_BIAS_CTRIM(x) ((x) << 8)
+#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8)
#define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5)
+#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5)
#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0)
+#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0)
#define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \
_MG_PLL_BIAS_PORT2)
@@ -9105,13 +9496,16 @@ enum skl_power_gate {
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
+#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
+#define DPLL_CFGCR1_KDIV_SHIFT (6)
#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6)
#define DPLL_CFGCR1_KDIV_4 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
+#define DPLL_CFGCR1_PDIV_SHIFT (2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
#define DPLL_CFGCR1_PDIV_2 (1 << 2)
#define DPLL_CFGCR1_PDIV_3 (2 << 2)
@@ -9145,22 +9539,22 @@ enum skl_power_gate {
/* GEN9 DC */
#define DC_STATE_EN _MMIO(0x45504)
#define DC_STATE_DISABLE 0
-#define DC_STATE_EN_UPTO_DC5 (1<<0)
-#define DC_STATE_EN_DC9 (1<<3)
-#define DC_STATE_EN_UPTO_DC6 (2<<0)
+#define DC_STATE_EN_UPTO_DC5 (1 << 0)
+#define DC_STATE_EN_DC9 (1 << 3)
+#define DC_STATE_EN_UPTO_DC6 (2 << 0)
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
#define DC_STATE_DEBUG _MMIO(0x45520)
-#define DC_STATE_DEBUG_MASK_CORES (1<<0)
-#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
+#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
+#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
-#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
-#define D_COMP_COMP_FORCE (1<<8)
-#define D_COMP_COMP_DISABLE (1<<0)
+#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
+#define D_COMP_COMP_FORCE (1 << 8)
+#define D_COMP_COMP_DISABLE (1 << 0)
/* Pipe WM_LINETIME - watermark line time */
#define _PIPE_WM_LINETIME_A 0x45270
@@ -9168,27 +9562,27 @@ enum skl_power_gate {
#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
#define PIPE_WM_LINETIME_MASK (0x1ff)
#define PIPE_WM_LINETIME_TIME(x) ((x))
-#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
-#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff << 16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x) << 16)
/* SFUSE_STRAP */
#define SFUSE_STRAP _MMIO(0xc2014)
-#define SFUSE_STRAP_FUSE_LOCK (1<<13)
-#define SFUSE_STRAP_RAW_FREQUENCY (1<<8)
-#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
-#define SFUSE_STRAP_CRT_DISABLED (1<<6)
-#define SFUSE_STRAP_DDIF_DETECTED (1<<3)
-#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
-#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
-#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+#define SFUSE_STRAP_FUSE_LOCK (1 << 13)
+#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8)
+#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7)
+#define SFUSE_STRAP_CRT_DISABLED (1 << 6)
+#define SFUSE_STRAP_DDIF_DETECTED (1 << 3)
+#define SFUSE_STRAP_DDIB_DETECTED (1 << 2)
+#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
+#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
#define WM_MISC _MMIO(0x45260)
#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
#define WM_DBG _MMIO(0x45280)
-#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
-#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
-#define WM_DBG_DISALLOW_SPRITE (1<<2)
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
+#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
+#define WM_DBG_DISALLOW_SPRITE (1 << 2)
/* pipe CSC */
#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
@@ -9314,6 +9708,22 @@ enum skl_power_gate {
#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
+#define _ICL_DSI_ESC_CLK_DIV0 0x6b090
+#define _ICL_DSI_ESC_CLK_DIV1 0x6b890
+#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DSI_ESC_CLK_DIV0, \
+ _ICL_DSI_ESC_CLK_DIV1)
+#define _ICL_DPHY_ESC_CLK_DIV0 0x162190
+#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190
+#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DPHY_ESC_CLK_DIV0, \
+ _ICL_DPHY_ESC_CLK_DIV1)
+#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16)
+#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16
+#define ICL_ESC_CLK_DIV_MASK 0x1ff
+#define ICL_ESC_CLK_DIV_SHIFT 0
+#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
@@ -9351,7 +9761,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \
- ((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+ (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
/* RX upper control divider to select actual RX clock output from 8x */
#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21
#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5
@@ -9364,7 +9774,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \
- ((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
/* 8/3X divider to select the actual 8/3X clock output from 8x */
#define BXT_MIPI1_8X_BY3_SHIFT 19
#define BXT_MIPI2_8X_BY3_SHIFT 3
@@ -9377,7 +9787,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
BXT_MIPI2_8X_BY3_DIVIDER_MASK)
#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \
- ((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+ (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
/* RX lower control divider to select actual RX clock output from 8x */
#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16
#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0
@@ -9390,7 +9800,7 @@ enum skl_power_gate {
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \
- ((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
#define RX_DIVIDER_BIT_1_2 0x3
#define RX_DIVIDER_BIT_3_4 0xC
@@ -9448,6 +9858,14 @@ enum skl_power_gate {
#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
+/* ICL DSI MODE control */
+#define _ICL_DSI_IO_MODECTL_0 0x6B094
+#define _ICL_DSI_IO_MODECTL_1 0x6B894
+#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \
+ _ICL_DSI_IO_MODECTL_0, \
+ _ICL_DSI_IO_MODECTL_1)
+#define COMBO_PHY_MODE_DSI (1 << 0)
+
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
@@ -9927,4 +10345,310 @@ enum skl_power_gate {
_ICL_PHY_MISC_B)
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
+/* Icelake Display Stream Compression Registers */
+#define DSCA_PICTURE_PARAMETER_SET_0 0x6B200
+#define DSCC_PICTURE_PARAMETER_SET_0 0x6BA00
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_VBR_ENABLE (1 << 19)
+#define DSC_422_ENABLE (1 << 18)
+#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
+#define DSC_BLOCK_PREDICTION (1 << 16)
+#define DSC_LINE_BUF_DEPTH_SHIFT 12
+#define DSC_BPC_SHIFT 8
+#define DSC_VER_MIN_SHIFT 4
+#define DSC_VER_MAJ (0x1 << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_1 0x6B204
+#define DSCC_PICTURE_PARAMETER_SET_1 0x6BA04
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
+#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
+#define DSC_BPP(bpp) ((bpp) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_2 0x6B208
+#define DSCC_PICTURE_PARAMETER_SET_2 0x6BA08
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
+#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
+#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
+#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_3 0x6B20C
+#define DSCC_PICTURE_PARAMETER_SET_3 0x6BA0C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
+#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
+#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_4 0x6B210
+#define DSCC_PICTURE_PARAMETER_SET_4 0x6BA10
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
+#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
+#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
+#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_5 0x6B214
+#define DSCC_PICTURE_PARAMETER_SET_5 0x6BA14
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
+#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
+#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16)
+#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_6 0x6B218
+#define DSCC_PICTURE_PARAMETER_SET_6 0x6BA18
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
+#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
+#define DSC_FLATNESS_MAX_QP(max_qp) (qp << 24)
+#define DSC_FLATNESS_MIN_QP(min_qp) (qp << 16)
+#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
+#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_7 0x6B21C
+#define DSCC_PICTURE_PARAMETER_SET_7 0x6BA1C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
+#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
+#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_8 0x6B220
+#define DSCC_PICTURE_PARAMETER_SET_8 0x6BA20
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
+#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
+#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
+#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_9 0x6B224
+#define DSCC_PICTURE_PARAMETER_SET_9 0x6BA24
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
+#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
+#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
+#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_10 0x6B228
+#define DSCC_PICTURE_PARAMETER_SET_10 0x6BA28
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
+#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
+#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
+#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
+#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
+#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_11 0x6B22C
+#define DSCC_PICTURE_PARAMETER_SET_11 0x6BA2C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_12 0x6B260
+#define DSCC_PICTURE_PARAMETER_SET_12 0x6BA60
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_13 0x6B264
+#define DSCC_PICTURE_PARAMETER_SET_13 0x6BA64
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
+#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_14 0x6B268
+#define DSCC_PICTURE_PARAMETER_SET_14 0x6BA68
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
+#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_15 0x6B26C
+#define DSCC_PICTURE_PARAMETER_SET_15 0x6BA6C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
+#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_16 0x6B270
+#define DSCC_PICTURE_PARAMETER_SET_16 0x6BA70
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
+#define DSC_SLICE_CHUNK_SIZE(slice_chunk_aize) (slice_chunk_size << 0)
+
+/* Icelake Rate Control Buffer Threshold Registers */
+#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
+#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
+#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
+#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
+#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PC)
+#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
+
+#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
+#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
+#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
+#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
+#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PC)
+#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8928894dd9c7..5c2c93cbab12 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -206,7 +206,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Carefully retire all requests without writing to the rings */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -320,6 +321,7 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
+ GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail);
list_del(&ring->active_link);
} else {
@@ -383,8 +385,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
* the subsequent request.
*/
if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context, engine);
- engine->last_retired_context = rq->ctx;
+ intel_context_unpin(engine->last_retired_context);
+ engine->last_retired_context = rq->hw_context;
}
static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -455,8 +457,8 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */
- atomic_dec_if_positive(&request->ctx->ban_score);
- intel_context_unpin(request->ctx, request->engine);
+ atomic_dec_if_positive(&request->gem_context->ban_score);
+ intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request);
@@ -502,7 +504,7 @@ static void move_to_timeline(struct i915_request *request,
GEM_BUG_ON(request->timeline == &request->engine->timeline);
lockdep_assert_held(&request->engine->timeline.lock);
- spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&request->timeline->lock);
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
}
@@ -657,7 +659,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
- struct intel_ring *ring;
+ struct intel_context *ce;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -681,22 +683,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ring = intel_context_pin(ctx, engine);
- if (IS_ERR(ring))
- return ERR_CAST(ring);
- GEM_BUG_ON(!ring);
+ ce = intel_context_pin(ctx, engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
ret = reserve_gt(i915);
if (ret)
goto err_unpin;
- ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+ ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
if (ret)
goto err_unreserve;
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ring->request_list) &&
+ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
+ if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
i915_request_completed(rq))
i915_request_retire(rq);
@@ -735,7 +736,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
/* Ratelimit ourselves to prevent oom from malicious clients */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
- I915_WAIT_INTERRUPTIBLE);
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err_unreserve;
@@ -760,9 +762,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
INIT_LIST_HEAD(&rq->active_list);
rq->i915 = i915;
rq->engine = engine;
- rq->ctx = ctx;
- rq->ring = ring;
- rq->timeline = ring->timeline;
+ rq->gem_context = ctx;
+ rq->hw_context = ce;
+ rq->ring = ce->ring;
+ rq->timeline = ce->ring->timeline;
GEM_BUG_ON(rq->timeline == &engine->timeline);
spin_lock_init(&rq->lock);
@@ -814,14 +817,16 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
goto err_unwind;
/* Keep a second pin for the dual retirement along engine and ring */
- __intel_context_pin(rq->ctx, engine);
+ __intel_context_pin(ce);
+
+ rq->infix = rq->ring->emit; /* end of header; start of user payload */
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
return rq;
err_unwind:
- rq->ring->emit = rq->head;
+ ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&rq->active_list));
@@ -832,7 +837,7 @@ err_unwind:
err_unreserve:
unreserve_gt(i915);
err_unpin:
- intel_context_unpin(ctx, engine);
+ intel_context_unpin(ce);
return ERR_PTR(ret);
}
@@ -1010,19 +1015,39 @@ i915_request_await_object(struct i915_request *to,
return ret;
}
+void i915_request_skip(struct i915_request *rq, int error)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+ dma_fence_set_error(&rq->fence, error);
+
+ /*
+ * As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset(vaddr + head, 0, rq->ring->size - head);
+ head = 0;
+ }
+ memset(vaddr + head, 0, rq->postfix - head);
+}
+
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_request_add(struct i915_request *request, bool flush_caches)
+void i915_request_add(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct intel_ring *ring = request->ring;
struct i915_timeline *timeline = request->timeline;
+ struct intel_ring *ring = request->ring;
struct i915_request *prev;
u32 *cs;
- int err;
GEM_TRACE("%s fence %llx:%d\n",
engine->name, request->fence.context, request->fence.seqno);
@@ -1043,20 +1068,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
* know that it is time to use that space up.
*/
request->reserved_space = 0;
-
- /*
- * Emit any outstanding flushes - execbuf can fail to emit the flush
- * after having emitted the batchbuffer command. Hence we need to fix
- * things up similar to emitting the lazy request. The difference here
- * is that the flush _must_ happen before the next request, no matter
- * what.
- */
- if (flush_caches) {
- err = engine->emit_flush(request, EMIT_FLUSH);
-
- /* Not allowed to fail! */
- WARN(err, "engine->emit_flush() failed: %d!\n", err);
- }
+ engine->emit_flush(request, EMIT_FLUSH);
/*
* Record the position of the start of the breadcrumb so that
@@ -1095,8 +1107,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
i915_gem_active_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list))
+ if (list_is_first(&request->ring_link, &ring->request_list)) {
+ GEM_TRACE("marking %s as active\n", ring->timeline->name);
list_add(&ring->active_link, &request->i915->gt.active_rings);
+ }
request->emitted_jiffies = jiffies;
/*
@@ -1113,7 +1127,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
- engine->schedule(request, &request->ctx->sched);
+ engine->schedule(request, &request->gem_context->sched);
rcu_read_unlock();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1205,7 +1219,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- irq = atomic_read(&engine->irq_count);
+ irq = READ_ONCE(engine->breadcrumbs.irq_count);
timeout_us += local_clock_us(&cpu);
do {
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
@@ -1217,7 +1231,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
* assume we won't see one in the near future but require
* the engine->seqno_barrier() to fixup coherency.
*/
- if (atomic_read(&engine->irq_count) != irq)
+ if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
break;
if (signal_pending_state(state, current))
@@ -1294,7 +1308,7 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_LOCKED)
add_wait_queue(errq, &reset);
- intel_wait_init(&wait, rq);
+ intel_wait_init(&wait);
restart:
do {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index eddbd4245cb3..e1c9365dfefb 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -93,8 +93,9 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the
* context.
*/
- struct i915_gem_context *ctx;
+ struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
+ struct intel_context *hw_context;
struct intel_ring *ring;
struct i915_timeline *timeline;
struct intel_signal_node signaling;
@@ -133,6 +134,9 @@ struct i915_request {
/** Position in the ring of the start of the request */
u32 head;
+ /** Position in the ring of the start of the user packets */
+ u32 infix;
+
/**
* Position in the ring of the start of the postfix.
* This is required to calculate the maximum available ring space
@@ -249,13 +253,13 @@ int i915_request_await_object(struct i915_request *to,
int i915_request_await_dma_fence(struct i915_request *rq,
struct dma_fence *fence);
-void __i915_request_add(struct i915_request *rq, bool flush_caches);
-#define i915_request_add(rq) \
- __i915_request_add(rq, false)
+void i915_request_add(struct i915_request *rq);
void __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
+void i915_request_skip(struct i915_request *request, int error);
+
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
@@ -266,6 +270,7 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -375,6 +380,7 @@ static inline void
init_request_active(struct i915_gem_active *active,
i915_gem_retire_fn retire)
{
+ RCU_INIT_POINTER(active->request, NULL);
INIT_LIST_HEAD(&active->link);
active->retire = retire ?: i915_gem_retire_noop;
}
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 9766e806dce6..a73472dd12fd 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -99,6 +99,6 @@ __printf(2, 3)
bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
- __igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+ __igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif /* !__I915_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index dc2a4632faa7..a2c2c3ab5fb0 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -37,6 +37,8 @@ struct i915_timeline {
u32 seqno;
spinlock_t lock;
+#define TIMELINE_CLIENT 0 /* default subclass */
+#define TIMELINE_ENGINE 1
/**
* List of breadcrumbs associated with GPU requests currently
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8cc3a256f29d..b50c6b829715 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, sync_from)
- __field(u32, sync_to)
+ __field(u32, from_class)
+ __field(u32, from_instance)
+ __field(u32, to_class)
+ __field(u32, to_instance)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = from->i915->drm.primary->index;
- __entry->sync_from = from->engine->id;
- __entry->sync_to = to->engine->id;
+ __entry->from_class = from->engine->uabi_class;
+ __entry->from_instance = from->engine->instance;
+ __entry->to_class = to->engine->uabi_class;
+ __entry->to_instance = to->engine->instance;
__entry->seqno = from->global_seqno;
),
- TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+ TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
__entry->dev,
- __entry->sync_from, __entry->sync_to,
+ __entry->from_class, __entry->from_instance,
+ __entry->to_class, __entry->to_instance,
__entry->seqno)
);
@@ -616,24 +621,27 @@ TRACE_EVENT(i915_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, flags)
),
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->flags)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->flags)
);
DECLARE_EVENT_CLASS(i915_request,
@@ -643,24 +651,27 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->global)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -686,8 +697,9 @@ TRACE_EVENT(i915_request_in,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, port)
@@ -696,8 +708,9 @@ TRACE_EVENT(i915_request_in,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
@@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->prio, __entry->global_seqno,
- __entry->port)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->prio, __entry->global_seqno, __entry->port)
);
TRACE_EVENT(i915_request_out,
@@ -718,8 +731,9 @@ TRACE_EVENT(i915_request_out,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, completed)
@@ -727,17 +741,18 @@ TRACE_EVENT(i915_request_out,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u",
- __entry->dev, __entry->hw_id, __entry->ring,
- __entry->ctx, __entry->seqno,
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
__entry->global_seqno, __entry->completed)
);
@@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, ring)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(bool, waiters)
),
TP_fast_assign(
__entry->dev = engine->i915->drm.primary->index;
- __entry->ring = engine->id;
+ __entry->class = engine->uabi_class;
+ __entry->instance = engine->instance;
__entry->seqno = intel_engine_get_seqno(engine);
__entry->waiters = waiters;
),
- TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
- __entry->dev, __entry->ring, __entry->seqno,
- __entry->waiters)
+ TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->seqno, __entry->waiters)
);
DEFINE_EVENT(i915_request, i915_request_retire,
@@ -800,8 +817,9 @@ TRACE_EVENT(i915_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, hw_id)
- __field(u32, ring)
- __field(u32, ctx)
+ __field(u64, ctx)
+ __field(u16, class)
+ __field(u16, instance)
__field(u32, seqno)
__field(u32, global)
__field(unsigned int, flags)
@@ -815,18 +833,20 @@ TRACE_EVENT(i915_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->ctx->hw_id;
- __entry->ring = rq->engine->id;
+ __entry->hw_id = rq->gem_context->hw_id;
+ __entry->class = rq->engine->uabi_class;
+ __entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
- __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global,
- !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ __entry->dev, __entry->class, __entry->instance,
+ __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+ __entry->flags)
);
DEFINE_EVENT(i915_request, i915_request_wait_end,
@@ -936,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
__entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+ __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
@@ -953,36 +973,6 @@ DEFINE_EVENT(i915_context, i915_context_free,
TP_ARGS(ctx)
);
-/**
- * DOC: switch_mm tracepoint
- *
- * This tracepoint allows tracking of the mm switch, which is an important point
- * in the lifetime of the vm in the legacy submission path. This tracepoint is
- * called only if full ppgtt is enabled.
- */
-TRACE_EVENT(switch_mm,
- TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
-
- TP_ARGS(engine, to),
-
- TP_STRUCT__entry(
- __field(u32, ring)
- __field(struct i915_gem_context *, to)
- __field(struct i915_address_space *, vm)
- __field(u32, dev)
- ),
-
- TP_fast_assign(
- __entry->ring = engine->id;
- __entry->to = to;
- __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
- __entry->dev = engine->i915->drm.primary->index;
- ),
-
- TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
- __entry->dev, __entry->ring, __entry->to, __entry->vm)
-);
-
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 5fe9f3f39467..869cf4a3b6de 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
node->start + node->size,
node->size / 1024);
- ggtt->base.reserved -= node->size;
+ ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node);
}
@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
- ret = i915_gem_gtt_reserve(&ggtt->base, node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
if (!ret)
- ggtt->base.reserved += size;
+ ggtt->vm.reserved += size;
return ret;
}
@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long ggtt_end = ggtt->base.total;
+ unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index bb8338450dc1..551acc390046 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -36,6 +36,12 @@ intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
}
+static inline bool
+intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 0531c01c3604..11d834f94220 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -21,7 +21,7 @@
* IN THE SOFTWARE.
*
*/
-
+
#include "i915_vma.h"
#include "i915_drv.h"
@@ -30,18 +30,53 @@
#include <drm/drm_gem.h>
+#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
+
+#include <linux/stackdepot.h>
+
+static void vma_print_allocator(struct i915_vma *vma, const char *reason)
+{
+ unsigned long entries[12];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = ARRAY_SIZE(entries),
+ };
+ char buf[512];
+
+ if (!vma->node.stack) {
+ DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
+ vma->node.start, vma->node.size, reason);
+ return;
+ }
+
+ depot_fetch_stack(vma->node.stack, &trace);
+ snprint_stack_trace(buf, sizeof(buf), &trace, 0);
+ DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
+ vma->node.start, vma->node.size, reason, buf);
+}
+
+#else
+
+static void vma_print_allocator(struct i915_vma *vma, const char *reason)
+{
+}
+
+#endif
+
+struct i915_vma_active {
+ struct i915_gem_active base;
+ struct i915_vma *vma;
+ struct rb_node node;
+ u64 timeline;
+};
+
static void
-i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
+__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
{
- const unsigned int idx = rq->engine->id;
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_read[idx]);
struct drm_i915_gem_object *obj = vma->obj;
- GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
- i915_vma_clear_active(vma, idx);
- if (i915_vma_is_active(vma))
+ GEM_BUG_ON(!i915_vma_is_active(vma));
+ if (--vma->active_count)
return;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -75,6 +110,21 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
}
}
+static void
+i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
+{
+ struct i915_vma_active *active =
+ container_of(base, typeof(*active), base);
+
+ __i915_vma_retire(active->vma, rq);
+}
+
+static void
+i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq)
+{
+ __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq);
+}
+
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@@ -82,19 +132,20 @@ vma_create(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
struct rb_node *rb, **p;
- int i;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- init_request_active(&vma->last_read[i], i915_vma_retire);
+ vma->active = RB_ROOT;
+
+ init_request_active(&vma->last_active, i915_vma_last_retire);
init_request_active(&vma->last_fence, NULL);
vma->vm = vm;
+ vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->resv = obj->resv;
vma->size = obj->base.size;
@@ -280,7 +331,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
- ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
@@ -345,7 +396,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
@@ -365,6 +416,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
return;
obj = vma->obj;
+ GEM_BUG_ON(!obj);
i915_vma_unpin(vma);
i915_vma_close(vma);
@@ -459,6 +511,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
return true;
}
+static void assert_bind_count(const struct drm_i915_gem_object *obj)
+{
+ /*
+ * Combine the assertion that the object is bound and that we have
+ * pinned its pages. But we should never have bound the object
+ * more than we have pinned its pages. (For complete accuracy, we
+ * assume that no else is pinning the pages, but as a rough assertion
+ * that we will not run into problems later, this will do!)
+ */
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
/**
* i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma
@@ -477,7 +541,7 @@ static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *dev_priv = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
+ unsigned int cache_level;
u64 start, end;
int ret;
@@ -512,20 +576,25 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
* attempt to find space.
*/
if (size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
- size, obj->base.size,
- flags & PIN_MAPPABLE ? "mappable" : "total",
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
+ size, flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return -ENOSPC;
}
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
+ if (vma->obj) {
+ ret = i915_gem_object_pin_pages(vma->obj);
+ if (ret)
+ return ret;
+
+ cache_level = vma->obj->cache_level;
+ } else {
+ cache_level = 0;
+ }
GEM_BUG_ON(vma->pages);
- ret = vma->vm->set_pages(vma);
+ ret = vma->ops->set_pages(vma);
if (ret)
goto err_unpin;
@@ -538,7 +607,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
- size, offset, obj->cache_level,
+ size, offset, cache_level,
flags);
if (ret)
goto err_clear;
@@ -577,7 +646,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
- size, alignment, obj->cache_level,
+ size, alignment, cache_level,
start, end, flags);
if (ret)
goto err_clear;
@@ -586,23 +655,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- spin_lock(&dev_priv->mm.obj_lock);
- list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
- obj->bind_count++;
- spin_unlock(&dev_priv->mm.obj_lock);
+ if (vma->obj) {
+ struct drm_i915_gem_object *obj = vma->obj;
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
+ obj->bind_count++;
+ spin_unlock(&dev_priv->mm.obj_lock);
+
+ assert_bind_count(obj);
+ }
return 0;
err_clear:
- vma->vm->clear_pages(vma);
+ vma->ops->clear_pages(vma);
err_unpin:
- i915_gem_object_unpin_pages(obj);
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
return ret;
}
@@ -610,30 +684,35 @@ static void
i915_vma_remove(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
- vma->vm->clear_pages(vma);
+ vma->ops->clear_pages(vma);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- /* Since the unbound list is global, only move to that list if
+ /*
+ * Since the unbound list is global, only move to that list if
* no more VMAs exist.
*/
- spin_lock(&i915->mm.obj_lock);
- if (--obj->bind_count == 0)
- list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock(&i915->mm.obj_lock);
-
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ if (vma->obj) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ spin_lock(&i915->mm.obj_lock);
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+ spin_unlock(&i915->mm.obj_lock);
+
+ /*
+ * And finally now the object is completely decoupled from this
+ * vma, we can drop its hold on the backing storage and allow
+ * it to be reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+ assert_bind_count(obj);
+ }
}
int __i915_vma_do_pin(struct i915_vma *vma,
@@ -658,7 +737,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+ ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
if (ret)
goto err_remove;
@@ -715,23 +794,28 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
- int i;
+ struct drm_i915_private *i915 = vma->vm->i915;
+ struct i915_vma_active *iter, *n;
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
list_del(&vma->obj_link);
list_del(&vma->vm_link);
- rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+ if (vma->obj)
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+ rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
+ GEM_BUG_ON(i915_gem_active_isset(&iter->base));
+ kfree(iter);
+ }
+
+ kmem_cache_free(i915->vmas, vma);
}
void i915_vma_destroy(struct i915_vma *vma)
@@ -795,23 +879,173 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
-int i915_vma_unbind(struct i915_vma *vma)
+static void export_fence(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
+{
+ struct reservation_object *resv = vma->resv;
+
+ /*
+ * Ignore errors from failing to allocate the new fence, we can't
+ * handle an error right now. Worst case should be missed
+ * synchronisation leading to rendering corruption.
+ */
+ reservation_object_lock(resv, NULL);
+ if (flags & EXEC_OBJECT_WRITE)
+ reservation_object_add_excl_fence(resv, &rq->fence);
+ else if (reservation_object_reserve_shared(resv) == 0)
+ reservation_object_add_shared_fence(resv, &rq->fence);
+ reservation_object_unlock(resv);
+}
+
+static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
+{
+ struct i915_vma_active *active;
+ struct rb_node **p, *parent;
+ struct i915_request *old;
+
+ /*
+ * We track the most recently used timeline to skip a rbtree search
+ * for the common case, under typical loads we never need the rbtree
+ * at all. We can reuse the last_active slot if it is empty, that is
+ * after the previous activity has been retired, or if the active
+ * matches the current timeline.
+ *
+ * Note that we allow the timeline to be active simultaneously in
+ * the rbtree and the last_active cache. We do this to avoid having
+ * to search and replace the rbtree element for a new timeline, with
+ * the cost being that we must be aware that the vma may be retired
+ * twice for the same timeline (as the older rbtree element will be
+ * retired before the new request added to last_active).
+ */
+ old = i915_gem_active_raw(&vma->last_active,
+ &vma->vm->i915->drm.struct_mutex);
+ if (!old || old->fence.context == idx)
+ goto out;
+
+ /* Move the currently active fence into the rbtree */
+ idx = old->fence.context;
+
+ parent = NULL;
+ p = &vma->active.rb_node;
+ while (*p) {
+ parent = *p;
+
+ active = rb_entry(parent, struct i915_vma_active, node);
+ if (active->timeline == idx)
+ goto replace;
+
+ if (active->timeline < idx)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+
+ active = kmalloc(sizeof(*active), GFP_KERNEL);
+
+ /* kmalloc may retire the vma->last_active request (thanks shrinker)! */
+ if (unlikely(!i915_gem_active_raw(&vma->last_active,
+ &vma->vm->i915->drm.struct_mutex))) {
+ kfree(active);
+ goto out;
+ }
+
+ if (unlikely(!active))
+ return ERR_PTR(-ENOMEM);
+
+ init_request_active(&active->base, i915_vma_retire);
+ active->vma = vma;
+ active->timeline = idx;
+
+ rb_link_node(&active->node, parent, p);
+ rb_insert_color(&active->node, &vma->active);
+
+replace:
+ /*
+ * Overwrite the previous active slot in the rbtree with last_active,
+ * leaving last_active zeroed. If the previous slot is still active,
+ * we must be careful as we now only expect to receive one retire
+ * callback not two, and so much undo the active counting for the
+ * overwritten slot.
+ */
+ if (i915_gem_active_isset(&active->base)) {
+ /* Retire ourselves from the old rq->active_list */
+ __list_del_entry(&active->base.link);
+ vma->active_count--;
+ GEM_BUG_ON(!vma->active_count);
+ }
+ GEM_BUG_ON(list_empty(&vma->last_active.link));
+ list_replace_init(&vma->last_active.link, &active->base.link);
+ active->base.request = fetch_and_zero(&vma->last_active.request);
+
+out:
+ return &vma->last_active;
+}
+
+int i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
- unsigned long active;
+ struct i915_gem_active *active;
+
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+ active = active_instance(vma, rq->fence.context);
+ if (IS_ERR(active))
+ return PTR_ERR(active);
+
+ /*
+ * Add a reference if we're newly entering the active list.
+ * The order in which we add operations to the retirement queue is
+ * vital here: mark_active adds to the start of the callback list,
+ * such that subsequent callbacks are called first. Therefore we
+ * add the active reference first and queue for it to be dropped
+ * *last*.
+ */
+ if (!i915_gem_active_isset(active) && !vma->active_count++) {
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
+ obj->active_count++;
+ }
+ i915_gem_active_set(active, rq);
+ GEM_BUG_ON(!i915_vma_is_active(vma));
+ GEM_BUG_ON(!obj->active_count);
+
+ obj->write_domain = 0;
+ if (flags & EXEC_OBJECT_WRITE) {
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
+
+ if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+ i915_gem_active_set(&obj->frontbuffer_write, rq);
+
+ obj->read_domains = 0;
+ }
+ obj->read_domains |= I915_GEM_GPU_DOMAINS;
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ i915_gem_active_set(&vma->last_fence, rq);
+
+ export_fence(vma, rq, flags);
+ return 0;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- /* First wait upon any activity as retiring the request may
+ /*
+ * First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
*/
might_sleep();
- active = i915_vma_get_active(vma);
- if (active) {
- int idx;
+ if (i915_vma_is_active(vma)) {
+ struct i915_vma_active *active, *n;
- /* When a closed VMA is retired, it is unbound - eek.
+ /*
+ * When a closed VMA is retired, it is unbound - eek.
* In order to prevent it from being recursively closed,
* take a pin on the vma so that the second unbind is
* aborted.
@@ -825,33 +1059,36 @@ int i915_vma_unbind(struct i915_vma *vma)
*/
__i915_vma_pin(vma);
- for_each_active(active, idx) {
- ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->i915->drm.struct_mutex);
- if (ret)
- break;
- }
+ ret = i915_gem_active_retire(&vma->last_active,
+ &vma->vm->i915->drm.struct_mutex);
+ if (ret)
+ goto unpin;
- if (!ret) {
- ret = i915_gem_active_retire(&vma->last_fence,
+ rbtree_postorder_for_each_entry_safe(active, n,
+ &vma->active, node) {
+ ret = i915_gem_active_retire(&active->base,
&vma->vm->i915->drm.struct_mutex);
+ if (ret)
+ goto unpin;
}
+ ret = i915_gem_active_retire(&vma->last_fence,
+ &vma->vm->i915->drm.struct_mutex);
+unpin:
__i915_vma_unpin(vma);
if (ret)
return ret;
}
GEM_BUG_ON(i915_vma_is_active(vma));
- if (i915_vma_is_pinned(vma))
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
return -EBUSY;
+ }
if (!drm_mm_node_allocated(&vma->node))
return 0;
- GEM_BUG_ON(obj->bind_count == 0);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
if (i915_vma_is_map_and_fenceable(vma)) {
/*
* Check that we have flushed all writes through the GGTT
@@ -878,7 +1115,7 @@ int i915_vma_unbind(struct i915_vma *vma)
if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma);
- vma->vm->unbind_vma(vma);
+ vma->ops->unbind_vma(vma);
}
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index fc4294cfaa91..f06d66377107 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -26,6 +26,7 @@
#define __I915_VMA_H__
#include <linux/io-mapping.h>
+#include <linux/rbtree.h>
#include <drm/drm_mm.h>
@@ -49,10 +50,12 @@ struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ const struct i915_vma_ops *ops;
struct drm_i915_fence_reg *fence;
struct reservation_object *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
+ void *private; /* owned by creator */
u64 size;
u64 display_alignment;
struct i915_page_sizes page_sizes;
@@ -92,8 +95,9 @@ struct i915_vma {
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
#define I915_VMA_GGTT_WRITE BIT(12)
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
+ unsigned int active_count;
+ struct rb_root active;
+ struct i915_gem_active last_active;
struct i915_gem_active last_fence;
/**
@@ -136,6 +140,15 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+static inline bool i915_vma_is_active(struct i915_vma *vma)
+{
+ return vma->active_count;
+}
+
+int __must_check i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags);
+
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_GGTT;
@@ -185,34 +198,6 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
- return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
- return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
- unsigned int engine)
-{
- return vma->active & BIT(engine);
-}
-
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -339,6 +324,12 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
__i915_vma_unpin(vma);
}
+static inline bool i915_vma_is_bound(const struct i915_vma *vma,
+ unsigned int where)
+{
+ return vma->flags & where;
+}
+
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
@@ -407,7 +398,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+ /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
if (vma->fence)
__i915_vma_unpin_fence(vma);
}
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
new file mode 100644
index 000000000000..13830e43a4d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Madhav Chauhan <madhav.chauhan@intel.com>
+ * Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include "intel_dsi.h"
+
+static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 afe_clk_khz; /* 8X Clock */
+ u32 esc_clk_div_m;
+
+ afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp,
+ intel_dsi->lane_count);
+
+ esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(ICL_DSI_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ POSTING_READ(ICL_DSI_ESC_CLK_DIV(port));
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port));
+ }
+}
+
+static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp |= COMBO_PHY_MODE_DSI;
+ I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_display_power_get(dev_priv, port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO);
+ }
+}
+
+static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+ u32 lane_mask;
+
+ switch (intel_dsi->lane_count) {
+ case 1:
+ lane_mask = PWR_DOWN_LN_3_1_0;
+ break;
+ case 2:
+ lane_mask = PWR_DOWN_LN_3_1;
+ break;
+ case 3:
+ lane_mask = PWR_DOWN_LN_3;
+ break;
+ case 4:
+ default:
+ lane_mask = PWR_UP_ALL_LANES;
+ break;
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_CL_DW10(port));
+ tmp &= ~PWR_DOWN_LN_MASK;
+ I915_WRITE(ICL_PORT_CL_DW10(port), tmp | lane_mask);
+ }
+}
+
+static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
+{
+ /* step 4a: power up all lanes of the DDI used by DSI */
+ gen11_dsi_power_up_lanes(encoder);
+}
+
+static void __attribute__((unused))
+gen11_dsi_pre_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ /* step2: enable IO power */
+ gen11_dsi_enable_io_power(encoder);
+
+ /* step3: enable DSI PLL */
+ gen11_dsi_program_esc_clk_div(encoder);
+
+ /* step4: enable DSI port and DPHY */
+ gen11_dsi_enable_port_and_phy(encoder);
+}
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index d1abf4bb7c81..6ba478e57b9b 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -12,10 +12,6 @@
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
-static struct intel_dsm_priv {
- acpi_handle dhandle;
-} intel_dsm_priv;
-
static const guid_t intel_dsm_guid =
GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
@@ -72,12 +68,12 @@ static char *intel_dsm_mux_type(u8 type)
}
}
-static void intel_dsm_platform_mux_info(void)
+static void intel_dsm_platform_mux_info(acpi_handle dhandle)
{
int i;
union acpi_object *pkg, *connector_count;
- pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
+ pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid,
INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
NULL, ACPI_TYPE_PACKAGE);
if (!pkg) {
@@ -107,41 +103,40 @@ static void intel_dsm_platform_mux_info(void)
ACPI_FREE(pkg);
}
-static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
- return false;
+ return NULL;
if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
- return false;
+ return NULL;
}
- intel_dsm_priv.dhandle = dhandle;
- intel_dsm_platform_mux_info();
+ intel_dsm_platform_mux_info(dhandle);
- return true;
+ return dhandle;
}
static bool intel_dsm_detect(void)
{
+ acpi_handle dhandle = NULL;
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
- bool has_dsm = false;
int vga_count = 0;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
- has_dsm |= intel_dsm_pci_probe(pdev);
+ dhandle = intel_dsm_pci_probe(pdev) ?: dhandle;
}
- if (vga_count == 2 && has_dsm) {
- acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ if (vga_count == 2 && dhandle) {
+ acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
return true;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 40285d1b91b7..b04952bacf77 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -59,7 +59,8 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
else if (property == dev_priv->broadcast_rgb_property)
*val = intel_conn_state->broadcast_rgb;
else {
- DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+ DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -95,7 +96,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return 0;
}
- DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+ DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -124,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
if (new_conn_state->force_audio != old_conn_state->force_audio ||
new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
+ new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
crtc_state->mode_changed = true;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 6d068786eb41..dcba645cabb8 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -120,12 +120,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
&crtc_state->base.adjusted_mode;
int ret;
- /*
- * Both crtc and plane->crtc could be NULL if we're updating a
- * property while the plane is disabled. We don't actually have
- * anything driver-specific we need to test in that case, so
- * just return success.
- */
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
@@ -209,12 +203,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
- /*
- * Both crtc and plane->crtc could be NULL if we're updating a
- * property while the plane is disabled. We don't actually have
- * anything driver-specific we need to test in that case, so
- * just return success.
- */
if (!crtc)
return 0;
@@ -277,7 +265,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t *val)
{
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -299,6 +288,7 @@ intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val)
{
- DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+ DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 3ea566f99450..b725835b47ef 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -59,6 +59,7 @@
*/
/* DP N/M table */
+#define LC_810M 810000
#define LC_540M 540000
#define LC_270M 270000
#define LC_162M 162000
@@ -99,6 +100,15 @@ static const struct dp_aud_n_m dp_aud_n_m[] = {
{ 128000, LC_540M, 4096, 33750 },
{ 176400, LC_540M, 3136, 18750 },
{ 192000, LC_540M, 2048, 11250 },
+ { 32000, LC_810M, 1024, 50625 },
+ { 44100, LC_810M, 784, 28125 },
+ { 48000, LC_810M, 512, 16875 },
+ { 64000, LC_810M, 2048, 50625 },
+ { 88200, LC_810M, 1568, 28125 },
+ { 96000, LC_810M, 1024, 16875 },
+ { 128000, LC_810M, 4096, 50625 },
+ { 176400, LC_810M, 3136, 28125 },
+ { 192000, LC_810M, 2048, 16875 },
};
static const struct dp_aud_n_m *
@@ -198,13 +208,13 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
}
static bool intel_eld_uptodate(struct drm_connector *connector,
- i915_reg_t reg_eldv, uint32_t bits_eldv,
- i915_reg_t reg_elda, uint32_t bits_elda,
+ i915_reg_t reg_eldv, u32 bits_eldv,
+ i915_reg_t reg_elda, u32 bits_elda,
i915_reg_t reg_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- uint8_t *eld = connector->eld;
- uint32_t tmp;
+ const u8 *eld = connector->eld;
+ u32 tmp;
int i;
tmp = I915_READ(reg_eldv);
@@ -218,7 +228,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
I915_WRITE(reg_elda, tmp);
for (i = 0; i < drm_eld_size(eld) / 4; i++)
- if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ if (I915_READ(reg_edid) != *((const u32 *)eld + i))
return false;
return true;
@@ -229,7 +239,7 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- uint32_t eldv, tmp;
+ u32 eldv, tmp;
DRM_DEBUG_KMS("Disable audio codec\n");
@@ -251,12 +261,12 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_connector *connector = conn_state->connector;
- uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t tmp;
+ const u8 *eld = connector->eld;
+ u32 eldv;
+ u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+ DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
tmp = I915_READ(G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
@@ -278,7 +288,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
len = min(drm_eld_size(eld) / 4, len);
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
tmp = I915_READ(G4X_AUD_CNTL_ST);
tmp |= eldv;
@@ -393,7 +403,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
- uint32_t tmp;
+ u32 tmp;
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
@@ -426,8 +436,8 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
- const uint8_t *eld = connector->eld;
- uint32_t tmp;
+ const u8 *eld = connector->eld;
+ u32 tmp;
int len, i;
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
@@ -456,7 +466,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+ I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((const u32 *)eld + i));
/* ELD valid */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
@@ -477,7 +487,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
- uint32_t tmp, eldv;
+ u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
@@ -524,8 +534,8 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
- uint8_t *eld = connector->eld;
- uint32_t tmp, eldv;
+ const u8 *eld = connector->eld;
+ u32 tmp, eldv;
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
@@ -575,7 +585,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+ I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
/* ELD valid */
tmp = I915_READ(aud_cntrl_st2);
@@ -639,11 +649,12 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
dev_priv->av_enc_map[pipe] = encoder;
mutex_unlock(&dev_priv->av_mutex);
- if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
(int) port, (int) pipe);
}
@@ -681,11 +692,12 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
- if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
(int) port, (int) pipe);
}
@@ -880,7 +892,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
return ret;
}
-static const struct i915_audio_component_ops i915_audio_component_ops = {
+static const struct drm_audio_component_ops i915_audio_component_ops = {
.owner = THIS_MODULE,
.get_power = i915_audio_component_get_power,
.put_power = i915_audio_component_put_power,
@@ -897,12 +909,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
int i;
- if (WARN_ON(acomp->ops || acomp->dev))
+ if (WARN_ON(acomp->base.ops || acomp->base.dev))
return -EEXIST;
drm_modeset_lock_all(&dev_priv->drm);
- acomp->ops = &i915_audio_component_ops;
- acomp->dev = i915_kdev;
+ acomp->base.ops = &i915_audio_component_ops;
+ acomp->base.dev = i915_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
@@ -919,8 +931,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_modeset_lock_all(&dev_priv->drm);
- acomp->ops = NULL;
- acomp->dev = NULL;
+ acomp->base.ops = NULL;
+ acomp->base.dev = NULL;
dev_priv->audio_component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 54270bdde100..1faa494e2bc9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -267,8 +267,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data_ptrs)
return;
- dev_priv->vbt.lvds_vbt = 1;
-
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
panel_type);
@@ -518,8 +516,31 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (!driver)
return;
- if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
- dev_priv->vbt.edp.support = 1;
+ if (INTEL_GEN(dev_priv) >= 5) {
+ /*
+ * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
+ * to mean "eDP". The VBT spec doesn't agree with that
+ * interpretation, but real world VBTs seem to.
+ */
+ if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
+ dev_priv->vbt.int_lvds_support = 0;
+ } else {
+ /*
+ * FIXME it's not clear which BDB version has the LVDS config
+ * bits defined. Revision history in the VBT spec says:
+ * "0.92 | Add two definitions for VBT value of LVDS Active
+ * Config (00b and 11b values defined) | 06/13/2005"
+ * but does not the specify the BDB version.
+ *
+ * So far version 134 (on i945gm) is the oldest VBT observed
+ * in the wild with the bits correctly populated. Version
+ * 108 (on i85x) does not have the bits correctly populated.
+ */
+ if (bdb->version >= 134 &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
+ dev_priv->vbt.int_lvds_support = 0;
+ }
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
/*
@@ -542,11 +563,8 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
- if (!edp) {
- if (dev_priv->vbt.edp.support)
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+ if (!edp)
return;
- }
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
@@ -634,7 +652,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
}
if (bdb->version >= 173) {
- uint8_t vswing;
+ u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
if (i915_modparams.edp_vswing) {
@@ -688,8 +706,54 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
break;
}
- dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
- dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+ /*
+ * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
+ * Old decimal value is wake up time in multiples of 100 us.
+ */
+ if (bdb->version >= 205 &&
+ (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10)) {
+ switch (psr_table->tp1_wakeup_time) {
+ case 0:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
+ break;
+ case 1:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
+ break;
+ case 3:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp1_wakeup_time);
+ /* fallthrough */
+ case 2:
+ dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
+ break;
+ }
+
+ switch (psr_table->tp2_tp3_wakeup_time) {
+ case 0:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+ break;
+ case 1:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+ break;
+ case 3:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp2_tp3_wakeup_time);
+ /* fallthrough */
+ case 2:
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+ break;
+ }
+ } else {
+ dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+ }
}
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -902,7 +966,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
* includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
* byte.
*/
- size_of_sequence = *((const uint32_t *)(data + index));
+ size_of_sequence = *((const u32 *)(data + index));
index += 4;
seq_end = index + size_of_sequence;
@@ -1197,18 +1261,37 @@ static const u8 cnp_ddc_pin_map[] = {
[DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
};
+static const u8 icp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+ [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+ [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+ [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
- if (HAS_PCH_CNP(dev_priv)) {
- if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
- return cnp_ddc_pin_map[vbt_pin];
- } else {
- DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
- return 0;
- }
+ const u8 *ddc_pin_map;
+ int n_entries;
+
+ if (HAS_PCH_ICP(dev_priv)) {
+ ddc_pin_map = icp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+ } else if (HAS_PCH_CNP(dev_priv)) {
+ ddc_pin_map = cnp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+ } else {
+ /* Assuming direct map */
+ return vbt_pin;
}
- return vbt_pin;
+ if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+ return ddc_pin_map[vbt_pin];
+
+ DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
+ return 0;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
@@ -1504,7 +1587,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* LFP panel data */
dev_priv->vbt.lvds_dither = 1;
- dev_priv->vbt.lvds_vbt = 0;
/* SDVO panel data */
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
@@ -1513,6 +1595,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.int_tv_support = 1;
dev_priv->vbt.int_crt_support = 1;
+ /* driver features */
+ dev_priv->vbt.int_lvds_support = 1;
+
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
/*
@@ -1636,7 +1721,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (HAS_PCH_NOP(dev_priv)) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 0) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 18e643df523e..1db6ba7d926e 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -98,12 +98,14 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
struct intel_engine_cs *engine =
from_timer(engine, t, breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned int irq_count;
if (!b->irq_armed)
return;
- if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
- b->hangcheck_interrupts = atomic_read(&engine->irq_count);
+ irq_count = READ_ONCE(b->irq_count);
+ if (b->hangcheck_interrupts != irq_count) {
+ b->hangcheck_interrupts = irq_count;
mod_timer(&b->hangcheck, wait_timeout());
return;
}
@@ -272,13 +274,14 @@ static bool use_fake_irq(const struct intel_breadcrumbs *b)
if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
return false;
- /* Only start with the heavy weight fake irq timer if we have not
+ /*
+ * Only start with the heavy weight fake irq timer if we have not
* seen any interrupts since enabling it the first time. If the
* interrupts are still arriving, it means we made a mistake in our
* engine->seqno_barrier(), a timing error that should be transient
* and unlikely to reoccur.
*/
- return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
+ return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
}
static void enable_fake_irq(struct intel_breadcrumbs *b)
@@ -846,8 +849,9 @@ static void cancel_fake_irq(struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
/*
* Leave the fake_irq timer enabled (if it is running), but clear the
@@ -871,7 +875,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
*/
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 704ddb4d3ca7..29075c763428 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -316,6 +316,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
break;
default:
DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ /* fall through */
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
cdclk_state->cdclk = 133333;
break;
@@ -991,6 +992,16 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
u32 freq_select, cdclk_ctl;
int ret;
+ /*
+ * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are
+ * unsupported on SKL. In theory this should never happen since only
+ * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not
+ * supported on SKL either, see the above WA. WARN whenever trying to
+ * use the corresponding VCO freq as that always leads to using the
+ * minimum 308MHz CDCLK.
+ */
+ WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+
mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -1787,6 +1798,7 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
switch (ref) {
default:
MISSING_CASE(ref);
+ /* fall through */
case 24000:
ranges = ranges_24;
break;
@@ -1814,6 +1826,7 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
switch (cdclk) {
default:
MISSING_CASE(cdclk);
+ /* fall through */
case 307200:
case 556800:
case 652800:
@@ -1861,11 +1874,36 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
skl_cdclk_decimal(cdclk));
mutex_lock(&dev_priv->pcu_lock);
- /* TODO: add proper DVFS support. */
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2);
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
+
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+}
+
+static u8 icl_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ case 50000:
+ case 307200:
+ case 312000:
+ return 0;
+ case 556800:
+ case 552000:
+ return 1;
+ default:
+ MISSING_CASE(cdclk);
+ /* fall through */
+ case 652800:
+ case 648000:
+ return 2;
+ }
}
static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1879,6 +1917,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
default:
MISSING_CASE(val);
+ /* fall through */
case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
cdclk_state->ref = 24000;
break;
@@ -1899,7 +1938,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
*/
cdclk_state->vco = 0;
cdclk_state->cdclk = cdclk_state->bypass;
- return;
+ goto out;
}
cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
@@ -1908,6 +1947,14 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
cdclk_state->cdclk = cdclk_state->vco / 2;
+
+out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ icl_calc_voltage_level(cdclk_state->cdclk);
}
/**
@@ -1950,6 +1997,8 @@ sanitize:
sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
sanitized_state.cdclk);
+ sanitized_state.voltage_level =
+ icl_calc_voltage_level(sanitized_state.cdclk);
icl_set_cdclk(dev_priv, &sanitized_state);
}
@@ -1967,6 +2016,7 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
+ cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
icl_set_cdclk(dev_priv, &cdclk_state);
}
@@ -2470,6 +2520,9 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ max(icl_calc_voltage_level(cdclk),
+ cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
cdclk = icl_calc_cdclk(0, ref);
@@ -2477,6 +2530,8 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ icl_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual = intel_state->cdclk.logical;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 072b326d5ee0..0c6bf82bb059 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -63,33 +63,35 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(adpa_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+ else
+ *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+
+ return val & ADPA_DAC_ENABLE;
+}
+
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(crt->adpa_reg);
+ ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
- if (!(tmp & ADPA_DAC_ENABLE))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -168,11 +170,9 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev_priv))
- adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
- else if (crtc->pipe == 0)
- adpa |= ADPA_PIPE_A_SELECT;
+ adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else
- adpa |= ADPA_PIPE_B_SELECT;
+ adpa |= ADPA_PIPE_SEL(crtc->pipe);
if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
@@ -232,6 +232,8 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
lpt_disable_pch_transcoder(dev_priv);
@@ -268,6 +270,8 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
dev_priv->display.fdi_link_train(crtc, crtc_state);
+
+ intel_ddi_enable_pipe_clock(crtc_state);
}
static void hsw_enable_crt(struct intel_encoder *encoder,
@@ -333,6 +337,10 @@ intel_crt_mode_valid(struct drm_connector *connector,
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
+ /* HSW/BDW FDI limited to 4k */
+ if (mode->hdisplay > 4096)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -375,6 +383,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ /* HSW/BDW FDI limited to 4k */
+ if (adjusted_mode->crtc_hdisplay > 4096 ||
+ adjusted_mode->crtc_hblank_start > 4096)
+ return false;
+
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
@@ -513,7 +526,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
+ if (IS_G45(dev_priv))
tries = 2;
else
tries = 1;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index fed26d6e4e27..39d66f8493fa 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -915,7 +915,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ if (port == PORT_A || port == PORT_B)
+ icl_get_combo_buf_trans(dev_priv, port,
+ INTEL_OUTPUT_HDMI, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ default_entry = n_entries - 1;
+ } else if (IS_CANNONLAKE(dev_priv)) {
cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
default_entry = n_entries - 1;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -1055,14 +1062,31 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
const struct intel_shared_dpll *pll)
{
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ int clock = crtc->config->port_clock;
const enum intel_dpll_id id = pll->info->id;
switch (id) {
default:
MISSING_CASE(id);
+ /* fall through */
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
return DDI_CLK_SEL_NONE;
+ case DPLL_ID_ICL_TBTPLL:
+ switch (clock) {
+ case 162000:
+ return DDI_CLK_SEL_TBT_162;
+ case 270000:
+ return DDI_CLK_SEL_TBT_270;
+ case 540000:
+ return DDI_CLK_SEL_TBT_540;
+ case 810000:
+ return DDI_CLK_SEL_TBT_810;
+ default:
+ MISSING_CASE(clock);
+ break;
+ }
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
@@ -1243,35 +1267,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
-/* Finds the only possible encoder associated with the given CRTC. */
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_encoder *ret = NULL;
- struct drm_atomic_state *state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int num_encoders = 0;
- int i;
-
- state = crtc_state->base.state;
-
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- ret = to_intel_encoder(connector_state->best_encoder);
- num_encoders++;
- }
-
- WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(crtc->pipe));
-
- BUG_ON(ret == NULL);
- return ret;
-}
-
#define LC_FREQ 2700
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
@@ -1374,8 +1369,13 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+ if (INTEL_GEN(dev_priv) >= 11) {
+ cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+ } else {
+ cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+ }
p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
@@ -1451,6 +1451,30 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
+static void icl_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ int link_clock = 0;
+ uint32_t pll_id;
+
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ if (port == PORT_A || port == PORT_B) {
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ else
+ link_clock = icl_calc_dp_combo_pll_link(dev_priv,
+ pll_id);
+ } else {
+ /* FIXME - Add for MG PLL */
+ WARN(1, "MG PLL clock_get code not implemented yet\n");
+ }
+
+ pipe_config->port_clock = link_clock;
+ ddi_dotclock_get(pipe_config);
+}
+
static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -1644,6 +1668,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
bxt_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
+ else if (IS_ICELAKE(dev_priv))
+ icl_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1967,15 +1993,50 @@ out:
return ret;
}
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder)
+static inline enum intel_display_power_domain
+intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
+{
+ /* CNL HW requires corresponding AUX IOs to be powered up for PSR with
+ * DC states enabled at the same time, while for driver initiated AUX
+ * transfers we need the same AUX IOs to be powered but with DC states
+ * disabled. Accordingly use the AUX power domain here which leaves DC
+ * states enabled.
+ * However, for non-A AUX ports the corresponding non-EDP transcoders
+ * would have already enabled power well 2 and DC_OFF. This means we can
+ * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
+ * specific AUX_IO reference without powering up any extra wells.
+ * Note that PSR is enabled only on Port A even though this function
+ * returns the correct domain for other ports too.
+ */
+ return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+ intel_dp->aux_power_domain;
+}
+
+static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum pipe pipe;
+ struct intel_digital_port *dig_port;
+ u64 domains;
- if (intel_ddi_get_hw_state(encoder, &pipe))
- return BIT_ULL(dig_port->ddi_io_power_domain);
+ /*
+ * TODO: Add support for MST encoders. Atm, the following should never
+ * happen since fake-MST encoders don't set their get_power_domains()
+ * hook.
+ */
+ if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+ return 0;
- return 0;
+ dig_port = enc_to_dig_port(&encoder->base);
+ domains = BIT_ULL(dig_port->ddi_io_power_domain);
+
+ /* AUX power is only needed for (e)DP mode, not for HDMI. */
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
+ }
+
+ return domains;
}
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2124,6 +2185,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
DP_TRAIN_VOLTAGE_SWING_MASK;
}
+/*
+ * We assume that the full set of pre-emphasis values can be
+ * used on all DDI platforms. Should that change we need to
+ * rethink this code.
+ */
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ default:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
+ }
+}
+
static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
@@ -2595,6 +2676,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(intel_dp));
+
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -2619,6 +2703,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
+
+ intel_ddi_enable_pipe_clock(crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2649,6 +2735,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
+ intel_ddi_enable_pipe_clock(crtc_state);
+
intel_dig_port->set_infoframes(&encoder->base,
crtc_state->has_infoframe,
crtc_state, conn_state);
@@ -2718,6 +2806,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
/*
* Power down sink before disabling the port, otherwise we end
* up getting interrupts from the sink on detecting link loss.
@@ -2733,6 +2823,9 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
+
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(intel_dp));
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -2743,11 +2836,13 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- intel_disable_ddi_buf(encoder);
-
dig_port->set_infoframes(&encoder->base, false,
old_crtc_state, old_conn_state);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
+ intel_disable_ddi_buf(encoder);
+
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
@@ -3034,6 +3129,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
{
if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
+ else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 1;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3542,7 +3639,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
goto err;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0fd13df424cf..0ef0c6448d53 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -858,6 +858,8 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p)
{
+ drm_printf(p, "Has logical contexts? %s\n",
+ yesno(caps->has_logical_contexts));
drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 933e31669557..633f9fbf72ea 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -186,6 +186,7 @@ struct intel_device_info {
struct intel_driver_caps {
unsigned int scheduler;
+ bool has_logical_contexts:1;
};
static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dec0d60921bf..ed3fa1c8a983 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1022,7 +1022,7 @@ bool intel_crtc_active(struct intel_crtc *crtc)
* We can ditch the adjusted_mode.crtc_clock check as soon
* as Haswell has gained clock readout/fastboot support.
*
- * We can ditch the crtc->primary->fb check as soon as we can
+ * We can ditch the crtc->primary->state->fb check as soon as we can
* properly reconstruct framebuffers.
*
* FIXME: The intel_crtc->active here should be switched to
@@ -1202,7 +1202,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
i915_reg_t pp_reg;
u32 val;
- enum pipe panel_pipe = PIPE_A;
+ enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
if (WARN_ON(HAS_DDI(dev_priv)))
@@ -1214,18 +1214,35 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
pp_reg = PP_CONTROL(0);
port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
- if (port_sel == PANEL_PORT_SELECT_LVDS &&
- I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
- panel_pipe = PIPE_B;
- /* XXX: else fix for eDP */
+ switch (port_sel) {
+ case PANEL_PORT_SELECT_LVDS:
+ intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPA:
+ intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPC:
+ intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPD:
+ intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+ break;
+ default:
+ MISSING_CASE(port_sel);
+ break;
+ }
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
} else {
+ u32 port_sel;
+
pp_reg = PP_CONTROL(0);
- if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
- panel_pipe = PIPE_B;
+ port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+ intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
val = I915_READ(pp_reg);
@@ -1267,7 +1284,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
static void assert_plane(struct intel_plane *plane, bool state)
{
- bool cur_state = plane->get_hw_state(plane);
+ enum pipe pipe;
+ bool cur_state;
+
+ cur_state = plane->get_hw_state(plane, &pipe);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
@@ -1305,125 +1325,64 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 port_sel, u32 val)
-{
- if ((val & DP_PORT_EN) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv)) {
- u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
- if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
- return false;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
- return false;
- } else {
- if ((val & DP_PIPE_MASK) != (pipe << 30))
- return false;
- }
- return true;
-}
-
-static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t dp_reg)
{
- if ((val & SDVO_ENABLE) == 0)
- return false;
-
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
- return false;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
- return false;
- } else {
- if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
- return false;
- }
- return true;
-}
+ enum pipe port_pipe;
+ bool state;
-static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & LVDS_PORT_EN) == 0)
- return false;
+ state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
- return false;
- }
- return true;
-}
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH DP %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
-static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 val)
-{
- if ((val & ADPA_DAC_ENABLE) == 0)
- return false;
- if (HAS_PCH_CPT(dev_priv)) {
- if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
- return false;
- } else {
- if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
- return false;
- }
- return true;
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH DP %c still using transcoder B\n",
+ port_name(port));
}
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, i915_reg_t reg,
- u32 port_sel)
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t hdmi_reg)
{
- u32 val = I915_READ(reg);
- I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
- "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
- i915_mmio_reg_offset(reg), pipe_name(pipe));
+ enum pipe port_pipe;
+ bool state;
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
- && (val & DP_PIPEB_SELECT),
- "IBX PCH dp port still using transcoder B\n");
-}
+ state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, i915_reg_t reg)
-{
- u32 val = I915_READ(reg);
- I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
- "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
- i915_mmio_reg_offset(reg), pipe_name(pipe));
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
- && (val & SDVO_PIPE_B_SELECT),
- "IBX PCH hdmi port still using transcoder B\n");
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH HDMI %c still using transcoder B\n",
+ port_name(port));
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 val;
+ enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
- val = I915_READ(PCH_ADPA);
- I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
- "PCH VGA enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
- val = I915_READ(PCH_LVDS);
- I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
- "PCH LVDS enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
}
static void _vlv_enable_pll(struct intel_crtc *crtc,
@@ -2521,6 +2480,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 gtt_offset_rotated = 0;
unsigned int max_size = 0;
int i, num_planes = fb->format->num_planes;
@@ -2585,7 +2545,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* fb layout agrees with the fence layout. We already check that the
* fb stride matches the fence stride elsewhere.
*/
- if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
+ if (i == 0 && i915_gem_object_is_tiled(obj) &&
(x + width) * cpp > fb->pitches[i]) {
DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
i, fb->offsets[i]);
@@ -2670,9 +2630,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
max_size = max(max_size, offset + size);
}
- if (max_size * tile_size > intel_fb->obj->base.size) {
+ if (max_size * tile_size > obj->base.size) {
DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
- max_size * tile_size, intel_fb->obj->base.size);
+ max_size * tile_size, obj->base.size);
return -EINVAL;
}
@@ -2796,10 +2756,10 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
/* FIXME pre-g4x don't work like this */
if (visible) {
- crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
+ crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
crtc_state->active_planes |= BIT(plane->id);
} else {
- crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
+ crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
crtc_state->active_planes &= ~BIT(plane->id);
}
@@ -2922,9 +2882,8 @@ valid_fb:
if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true;
- drm_framebuffer_get(fb);
- primary->fb = primary->state->fb = fb;
- primary->crtc = primary->state->crtc = &intel_crtc->base;
+ plane_state->fb = fb;
+ plane_state->crtc = &intel_crtc->base;
intel_set_plane_visible(to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state),
@@ -3430,24 +3389,33 @@ static void i9xx_disable_plane(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe = plane->pipe;
bool ret;
+ u32 val;
/*
* Not 100% correct for planes that can move between pipes,
* but that's only the case for gen2-4 which don't have any
* display power wells.
*/
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
+ val = I915_READ(DSPCNTR(i9xx_plane));
+
+ ret = val & DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_GEN(dev_priv) >= 5)
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
intel_display_power_put(dev_priv, power_domain);
@@ -3689,7 +3657,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
- if (intel_format_is_yuv(fb->format->format)) {
+ if (fb->format->is_yuv) {
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
@@ -4631,20 +4599,33 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
}
}
-/* Return which DP Port should be selected for Transcoder DP control */
-static enum port
-intel_trans_dp_port_sel(struct intel_crtc *crtc)
+/*
+ * Finds the encoder associated with the given CRTC. This can only be
+ * used when we know that the CRTC isn't feeding multiple encoders!
+ */
+static struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_connector_state *connector_state;
+ const struct drm_connector *connector;
+ struct intel_encoder *encoder = NULL;
+ int num_encoders = 0;
+ int i;
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_EDP)
- return encoder->port;
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ num_encoders++;
}
- return -1;
+ WARN(num_encoders != 1, "%d encoders for pipe %c\n",
+ num_encoders, pipe_name(crtc->pipe));
+
+ return encoder;
}
/*
@@ -4655,7 +4636,8 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc)
* - DP transcoding bits
* - transcoder
*/
-static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
+static void ironlake_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
@@ -4714,6 +4696,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
&crtc_state->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
+ enum port port;
+
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
@@ -4726,19 +4710,9 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
- switch (intel_trans_dp_port_sel(crtc)) {
- case PORT_B:
- temp |= TRANS_DP_PORT_SEL_B;
- break;
- case PORT_C:
- temp |= TRANS_DP_PORT_SEL_C;
- break;
- case PORT_D:
- temp |= TRANS_DP_PORT_SEL_D;
- break;
- default:
- BUG();
- }
+ port = intel_get_crtc_new_encoder(state, crtc_state)->port;
+ WARN_ON(port < PORT_B || port > PORT_D);
+ temp |= TRANS_DP_PORT_SEL(port);
I915_WRITE(reg, temp);
}
@@ -4746,7 +4720,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
ironlake_enable_pch_transcoder(dev_priv, pipe);
}
-static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
+static void lpt_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -4776,6 +4751,39 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ */
+u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+{
+ int phase = -0x8000;
+ u16 trip = 0;
+
+ if (chroma_cosited)
+ phase += (sub - 1) * 0x8000 / sub;
+
+ if (phase < 0)
+ phase = 0x10000 + phase;
+ else
+ trip = PS_PHASE_TRIP;
+
+ return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
@@ -4975,14 +4983,22 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
&crtc->config->scaler_state;
if (crtc->config->pch_pfit.enabled) {
+ u16 uv_rgb_hphase, uv_rgb_vphase;
int id;
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return;
+ uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
}
@@ -5501,10 +5517,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
*
* Spurious PCH underruns also occur during PCH enabling.
*/
- if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
@@ -5549,7 +5563,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
- ironlake_pch_enable(pipe_config);
+ ironlake_pch_enable(old_intel_state, pipe_config);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5559,9 +5573,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
- /* Must wait for vblank to avoid spurious PCH FIFO underruns */
- if (intel_crtc->config->has_pch_encoder)
+ /*
+ * Must wait for vblank to avoid spurious PCH FIFO underruns.
+ * And a second vblank wait is needed at least on ILK with
+ * some interlaced HDMI modes. Let's do the double wait always
+ * in case there are more corner cases we don't know about.
+ */
+ if (intel_crtc->config->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ }
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5611,6 +5632,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
bool psl_clkgate_wa;
+ u32 pipe_chicken;
if (WARN_ON(intel_crtc->active))
return;
@@ -5623,6 +5645,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (INTEL_GEN(dev_priv) >= 11)
icl_map_plls_to_ports(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5651,11 +5675,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_pipe_clock(pipe_config);
-
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
intel_crtc->config->pch_pfit.enabled;
@@ -5673,6 +5692,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(&pipe_config->base);
+ /*
+ * Display WA #1153: enable hardware to bypass the alpha math
+ * and rounding for per-pixel values 00 and 0xff
+ */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
+ if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
+ I915_WRITE_FW(PIPE_CHICKEN(pipe),
+ pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
+ }
+
intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_transcoder_func(pipe_config);
@@ -5688,7 +5718,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
- lpt_pch_enable(pipe_config);
+ lpt_pch_enable(old_intel_state, pipe_config);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(pipe_config, true);
@@ -5741,10 +5771,8 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
- if (intel_crtc->config->has_pch_encoder) {
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- }
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -5794,7 +5822,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -5805,8 +5833,8 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(old_crtc_state);
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(old_crtc_state);
@@ -5816,9 +5844,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
else
ironlake_pfit_disable(intel_crtc, false);
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_pipe_clock(intel_crtc->config);
-
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (INTEL_GEN(dev_priv) >= 11)
@@ -5849,6 +5874,22 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (IS_ICELAKE(dev_priv))
+ return port >= PORT_C && port <= PORT_F;
+
+ return false;
+}
+
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (!intel_port_is_tc(dev_priv, port))
+ return PORT_TC_NONE;
+
+ return port - PORT_C;
+}
+
enum intel_display_power_domain intel_port_to_power_domain(enum port port)
{
switch (port) {
@@ -7675,16 +7716,18 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
u32 val, base, offset;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- if (!plane->get_hw_state(plane))
+ if (!plane->get_hw_state(plane, &pipe))
return;
+ WARN_ON(pipe != crtc->pipe);
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -8705,16 +8748,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum plane_id plane_id = plane->id;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
u32 val, base, offset, stride_mult, tiling, alpha;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- if (!plane->get_hw_state(plane))
+ if (!plane->get_hw_state(plane, &pipe))
return;
+ WARN_ON(pipe != crtc->pipe);
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -9142,9 +9187,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->base.state);
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
struct intel_encoder *encoder =
- intel_ddi_get_crtc_new_encoder(crtc_state);
+ intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
@@ -9172,6 +9220,44 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
+static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
+ enum port port,
+ struct intel_crtc_state *pipe_config)
+{
+ enum intel_dpll_id id;
+ u32 temp;
+
+ /* TODO: TBT pll not implemented. */
+ switch (port) {
+ case PORT_A:
+ case PORT_B:
+ temp = I915_READ(DPCLKA_CFGCR0_ICL) &
+ DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
+
+ if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+ return;
+ break;
+ case PORT_C:
+ id = DPLL_ID_ICL_MGPLL1;
+ break;
+ case PORT_D:
+ id = DPLL_ID_ICL_MGPLL2;
+ break;
+ case PORT_E:
+ id = DPLL_ID_ICL_MGPLL3;
+ break;
+ case PORT_F:
+ id = DPLL_ID_ICL_MGPLL4;
+ break;
+ default:
+ MISSING_CASE(port);
+ return;
+ }
+
+ pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
@@ -9273,6 +9359,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
WARN(1, "unknown pipe linked to edp transcoder\n");
+ /* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
trans_edp_pipe = PIPE_A;
@@ -9328,7 +9415,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
* registers/MIPI[BXT]. We can break out here early, since we
* need the same DSI PLL to be enabled for both DSI ports.
*/
- if (!intel_dsi_pll_is_enabled(dev_priv))
+ if (!bxt_dsi_pll_is_enabled(dev_priv))
break;
/* XXX: this works for video mode only */
@@ -9359,7 +9446,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icelake_get_ddi_pll(dev_priv, port, pipe_config);
+ else if (IS_CANNONLAKE(dev_priv))
cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_BC(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -9692,7 +9781,8 @@ static void i845_disable_cursor(struct intel_plane *plane,
i845_update_cursor(plane, NULL, NULL);
}
-static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@@ -9704,6 +9794,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane)
ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+ *pipe = PIPE_A;
+
intel_display_power_put(dev_priv, power_domain);
return ret;
@@ -9715,25 +9807,30 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- u32 cntl;
+ u32 cntl = 0;
- cntl = MCURSOR_GAMMA_ENABLE;
+ if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- if (HAS_DDI(dev_priv))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
+ if (INTEL_GEN(dev_priv) <= 10) {
+ cntl |= MCURSOR_GAMMA_ENABLE;
+
+ if (HAS_DDI(dev_priv))
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
+ }
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
switch (plane_state->base.crtc_w) {
case 64:
- cntl |= CURSOR_MODE_64_ARGB_AX;
+ cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
case 128:
- cntl |= CURSOR_MODE_128_ARGB_AX;
+ cntl |= MCURSOR_MODE_128_ARGB_AX;
break;
case 256:
- cntl |= CURSOR_MODE_256_ARGB_AX;
+ cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
MISSING_CASE(plane_state->base.crtc_w);
@@ -9741,7 +9838,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
}
if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
- cntl |= CURSOR_ROTATE_180;
+ cntl |= MCURSOR_ROTATE_180;
return cntl;
}
@@ -9903,23 +10000,32 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
i9xx_update_cursor(plane, NULL, NULL);
}
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
+ u32 val;
/*
* Not 100% correct for planes that can move between pipes,
* but that's only the case for gen2-3 which don't have any
* display power wells.
*/
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ val = I915_READ(CURCNTR(plane->pipe));
+
+ ret = val & MCURSOR_MODE;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+ MCURSOR_PIPE_SELECT_SHIFT;
intel_display_power_put(dev_priv, power_domain);
@@ -10631,7 +10737,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.state->crtc)
- drm_connector_unreference(&connector->base);
+ drm_connector_put(&connector->base);
if (connector->base.encoder) {
connector->base.state->best_encoder =
@@ -10639,7 +10745,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
connector->base.state->crtc =
connector->base.encoder->crtc;
- drm_connector_reference(&connector->base);
+ drm_connector_get(&connector->base);
} else {
connector->base.state->best_encoder = NULL;
connector->base.state->crtc = NULL;
@@ -10918,6 +11024,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
+ /* else: fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
@@ -11791,7 +11898,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct drm_crtc_state *new_state)
{
struct intel_dpll_hw_state dpll_hw_state;
- unsigned crtc_mask;
+ unsigned int crtc_mask;
bool active;
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
@@ -11818,7 +11925,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
return;
}
- crtc_mask = 1 << drm_crtc_index(crtc);
+ crtc_mask = drm_crtc_mask(crtc);
if (new_state->active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
@@ -11853,7 +11960,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
if (old_state->shared_dpll &&
old_state->shared_dpll != new_state->shared_dpll) {
- unsigned crtc_mask = 1 << drm_crtc_index(crtc);
+ unsigned int crtc_mask = drm_crtc_mask(crtc);
struct intel_shared_dpll *pll = old_state->shared_dpll;
I915_STATE_WARN(pll->active_mask & crtc_mask,
@@ -12449,6 +12556,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
}
+static void intel_atomic_cleanup_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state =
+ container_of(work, struct drm_atomic_state, commit_work);
+ struct drm_i915_private *i915 = to_i915(state->dev);
+
+ drm_atomic_helper_cleanup_planes(&i915->drm, state);
+ drm_atomic_helper_commit_cleanup_done(state);
+ drm_atomic_state_put(state);
+
+ intel_atomic_helper_free_state(i915);
+}
+
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -12609,13 +12729,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
}
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_helper_commit_cleanup_done(state);
-
- drm_atomic_state_put(state);
-
- intel_atomic_helper_free_state(dev_priv);
+ /*
+ * Defer the cleanup of the old state to a separate worker to not
+ * impede the current task (userspace for blocking modesets) that
+ * are executed inline. For out-of-line asynchronous modesets/flips,
+ * deferring to a new worker seems overkill, but we would place a
+ * schedule point (cond_resched()) here anyway to keep latencies
+ * down.
+ */
+ INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
+ schedule_work(&state->commit_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -12981,6 +13104,19 @@ intel_prepare_plane_fb(struct drm_plane *plane,
add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
}
+ /*
+ * We declare pageflips to be interactive and so merit a small bias
+ * towards upclocking to deliver the frame on time. By only changing
+ * the RPS thresholds to sample more regularly and aim for higher
+ * clocks we can hopefully deliver low power workloads (like kodi)
+ * that are not quite steady state without resorting to forcing
+ * maximum clocks following a vblank miss (see do_rps_boost()).
+ */
+ if (!intel_state->rps_interactive) {
+ intel_rps_mark_interactive(dev_priv, true);
+ intel_state->rps_interactive = true;
+ }
+
return 0;
}
@@ -12997,8 +13133,15 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(old_state->state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ if (intel_state->rps_interactive) {
+ intel_rps_mark_interactive(dev_priv, false);
+ intel_state->rps_interactive = false;
+ }
+
/* Should only be called after a successful intel_prepare_plane_fb()! */
mutex_lock(&dev_priv->drm.struct_mutex);
intel_plane_unpin_fb(to_intel_plane_state(old_state));
@@ -13181,8 +13324,17 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@@ -13195,8 +13347,17 @@ static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool i965_mod_supported(uint32_t format, uint64_t modifier)
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
@@ -13211,8 +13372,26 @@ static bool i965_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -13244,38 +13423,36 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
-
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
- modifier != DRM_FORMAT_MOD_LINEAR)
- return false;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_mod_supported(format, modifier);
- else if (INTEL_GEN(dev_priv) >= 4)
- return i965_mod_supported(format, modifier);
- else
- return i8xx_mod_supported(format, modifier);
+ return modifier == DRM_FORMAT_MOD_LINEAR &&
+ format == DRM_FORMAT_ARGB8888;
}
-static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
-{
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
+static struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
+};
- return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
-}
+static struct drm_plane_funcs i965_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i965_plane_format_mod_supported,
+};
-static struct drm_plane_funcs intel_plane_funcs = {
+static struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13283,7 +13460,7 @@ static struct drm_plane_funcs intel_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_primary_plane_format_mod_supported,
+ .format_mod_supported = i8xx_plane_format_mod_supported,
};
static int
@@ -13408,7 +13585,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_cursor_plane_format_mod_supported,
+ .format_mod_supported = intel_cursor_format_mod_supported,
};
static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
@@ -13466,6 +13643,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
+ const struct drm_plane_funcs *plane_funcs;
const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
unsigned int num_formats;
@@ -13521,6 +13699,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->check_plane = intel_check_primary_plane;
if (INTEL_GEN(dev_priv) >= 9) {
+ primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+ PLANE_PRIMARY);
+
if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
intel_primary_formats = skl_pri_planar_formats;
num_formats = ARRAY_SIZE(skl_pri_planar_formats);
@@ -13529,7 +13710,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(skl_primary_formats);
}
- if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
+ if (primary->has_ccs)
modifiers = skl_format_modifiers_ccs;
else
modifiers = skl_format_modifiers_noccs;
@@ -13537,6 +13718,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
primary->get_hw_state = skl_plane_get_hw_state;
+
+ plane_funcs = &skl_plane_funcs;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13545,6 +13728,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+
+ plane_funcs = &i965_plane_funcs;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13553,25 +13738,27 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_plane;
primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
+
+ plane_funcs = &i8xx_plane_funcs;
}
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, &intel_plane_funcs,
+ 0, plane_funcs,
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
@@ -13951,7 +14138,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (intel_crt_present(dev_priv))
intel_crt_init(dev_priv);
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ intel_ddi_init(dev_priv, PORT_F);
+ } else if (IS_GEN9_LP(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -13961,7 +14155,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
- intel_dsi_init(dev_priv);
+ vlv_dsi_init(dev_priv);
} else if (HAS_DDI(dev_priv)) {
int found;
@@ -14067,7 +14261,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
}
- intel_dsi_init(dev_priv);
+ vlv_dsi_init(dev_priv);
} else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
@@ -14124,14 +14318,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
drm_framebuffer_cleanup(fb);
- i915_gem_object_lock(intel_fb->obj);
- WARN_ON(!intel_fb->obj->framebuffer_references--);
- i915_gem_object_unlock(intel_fb->obj);
+ i915_gem_object_lock(obj);
+ WARN_ON(!obj->framebuffer_references--);
+ i915_gem_object_unlock(obj);
- i915_gem_object_put(intel_fb->obj);
+ i915_gem_object_put(obj);
kfree(intel_fb);
}
@@ -14140,8 +14335,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int *handle)
{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
if (obj->userptr.mm) {
DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
@@ -14349,11 +14543,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
break;
case DRM_FORMAT_NV12:
- if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS ||
- mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) {
- DRM_DEBUG_KMS("RC not to be enabled with NV12\n");
- goto err;
- }
if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("unsupported pixel format: %s\n",
@@ -14411,9 +14600,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
i, fb->pitches[i], stride_alignment);
goto err;
}
- }
- intel_fb->obj = obj;
+ fb->obj[i] = &obj->base;
+ }
ret = intel_fill_fb_info(dev_priv, fb);
if (ret)
@@ -14469,6 +14658,10 @@ static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int hdisplay_max, htotal_max;
+ int vdisplay_max, vtotal_max;
+
/*
* Can't reject DBLSCAN here because Xorg ddxen can add piles
* of DBLSCAN modes to the output's mode list when they detect
@@ -14498,6 +14691,36 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
+ vdisplay_max = 4096;
+ htotal_max = 8192;
+ vtotal_max = 8192;
+ } else if (INTEL_GEN(dev_priv) >= 3) {
+ hdisplay_max = 4096;
+ vdisplay_max = 4096;
+ htotal_max = 8192;
+ vtotal_max = 8192;
+ } else {
+ hdisplay_max = 2048;
+ vdisplay_max = 2048;
+ htotal_max = 4096;
+ vtotal_max = 4096;
+ }
+
+ if (mode->hdisplay > hdisplay_max ||
+ mode->hsync_start > htotal_max ||
+ mode->hsync_end > htotal_max ||
+ mode->htotal > htotal_max)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > vdisplay_max ||
+ mode->vsync_start > vtotal_max ||
+ mode->vsync_end > vtotal_max ||
+ mode->vtotal > vtotal_max)
+ return MODE_V_ILLEGAL;
+
return MODE_OK;
}
@@ -14955,6 +15178,7 @@ int intel_modeset_init(struct drm_device *dev)
}
}
+ /* maximum framebuffer dimensions */
if (IS_GEN2(dev_priv)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
@@ -14970,11 +15194,11 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
} else if (IS_GEN2(dev_priv)) {
- dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
- dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
} else {
- dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
- dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
}
dev->mode_config.fb_base = ggtt->gmadr.start;
@@ -15124,8 +15348,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
- WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe));
@@ -15139,12 +15363,12 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 val = I915_READ(DSPCNTR(i9xx_plane));
+ enum pipe pipe;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ return true;
- return (val & DISPLAY_PLANE_ENABLE) == 0 ||
- (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+ return pipe == crtc->pipe;
}
static void
@@ -15303,6 +15527,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
+
+ /* notify opregion of the sanitized encoder state */
+ intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
}
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15343,7 +15570,10 @@ static void readout_plane_state(struct intel_crtc *crtc)
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- bool visible = plane->get_hw_state(plane);
+ enum pipe pipe;
+ bool visible;
+
+ visible = plane->get_hw_state(plane, &pipe);
intel_set_plane_visible(crtc_state, plane_state, visible);
}
@@ -15442,9 +15672,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* rely on the connector_mask being accurate.
*/
encoder->base.crtc->state->connector_mask |=
- 1 << drm_connector_index(&connector->base);
+ drm_connector_mask(&connector->base);
encoder->base.crtc->state->encoder_mask |=
- 1 << drm_encoder_index(&encoder->base);
+ drm_encoder_mask(&encoder->base);
}
} else {
@@ -15510,11 +15740,20 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
for_each_intel_encoder(&dev_priv->drm, encoder) {
u64 get_domains;
enum intel_display_power_domain domain;
+ struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
continue;
- get_domains = encoder->get_power_domains(encoder);
+ /*
+ * MST-primary and inactive encoders don't have a crtc state
+ * and neither of these require any power domain references.
+ */
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
+ get_domains = encoder->get_power_domains(encoder, crtc_state);
for_each_power_domain(domain, get_domains)
intel_display_power_get(dev_priv, domain);
}
@@ -15690,6 +15929,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ flush_workqueue(dev_priv->modeset_wq);
+
flush_work(&dev_priv->atomic_helper.free_work);
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
@@ -15733,8 +15974,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder)
{
connector->encoder = encoder;
- drm_mode_connector_attach_encoder(&connector->base,
- &encoder->base);
+ drm_connector_attach_encoder(&connector->base, &encoder->base);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 2ef31617614a..9292001cdd14 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -126,6 +126,17 @@ enum port {
#define port_name(p) ((p) + 'A')
+enum tc_port {
+ PORT_TC_NONE = -1,
+
+ PORT_TC1 = 0,
+ PORT_TC2,
+ PORT_TC3,
+ PORT_TC4,
+
+ I915_MAX_TC_PORTS
+};
+
enum dpio_channel {
DPIO_CH0,
DPIO_CH1
@@ -144,7 +155,7 @@ enum aux_ch {
AUX_CH_B,
AUX_CH_C,
AUX_CH_D,
- _AUX_CH_E, /* does not exist */
+ AUX_CH_E, /* ICL+ */
AUX_CH_F,
};
@@ -185,8 +196,13 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
POWER_DOMAIN_AUX_IO_A,
+ POWER_DOMAIN_AUX_TBT1,
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
@@ -249,7 +265,7 @@ struct intel_link_m_n {
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if((plane_mask) & \
- BIT(drm_plane_index(&intel_plane->base)))
+ drm_plane_mask(&intel_plane->base)))
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
@@ -266,13 +282,17 @@ struct intel_link_m_n {
list_for_each_entry(intel_crtc, \
&(dev)->mode_config.crtc_list, \
base.head) \
- for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
+ for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base))
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_dp(dev, intel_encoder) \
+ for_each_intel_encoder(dev, intel_encoder) \
+ for_each_if(intel_encoder_is_dp(intel_encoder))
+
#define for_each_intel_connector_iter(intel_connector, iter) \
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 16faea30114a..cd0f649b57a5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -56,7 +56,7 @@ struct dp_link_dpll {
struct dpll dpll;
};
-static const struct dp_link_dpll gen4_dpll[] = {
+static const struct dp_link_dpll g4x_dpll[] = {
{ 162000,
{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
{ 270000,
@@ -256,6 +256,17 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
return 810000;
}
+static int icl_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
+
+ if (port == PORT_B)
+ return 540000;
+
+ return 810000;
+}
+
static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
@@ -285,10 +296,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
/* This should only be done once */
WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
- if (IS_CANNONLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- max_rate = cnl_max_source_rate(intel_dp);
+ if (INTEL_GEN(dev_priv) == 10)
+ max_rate = cnl_max_source_rate(intel_dp);
+ else
+ max_rate = icl_max_source_rate(intel_dp);
} else if (IS_GEN9_LP(dev_priv)) {
source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
@@ -516,7 +530,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
uint32_t DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+ "skipping pipe %c power sequencer kick due to port %c being active\n",
pipe_name(pipe), port_name(intel_dig_port->base.port)))
return;
@@ -532,9 +546,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_LINK_TRAIN_PAT_1;
if (IS_CHERRYVIEW(dev_priv))
- DP |= DP_PIPE_SELECT_CHV(pipe);
- else if (pipe == PIPE_B)
- DP |= DP_PIPEB_SELECT;
+ DP |= DP_PIPE_SEL_CHV(pipe);
+ else
+ DP |= DP_PIPE_SEL(pipe);
pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
@@ -557,7 +571,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
/*
* Similar magic as in intel_dp_enable_port().
* We _must_ do this port enable + disable trick
- * to make this power seqeuencer lock onto the port.
+ * to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
I915_WRITE(intel_dp->output_reg, DP);
@@ -586,14 +600,8 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
* We don't have power sequencer currently.
* Pick one that's not used by other ports.
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp;
-
- if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- intel_dp = enc_to_intel_dp(&encoder->base);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (encoder->type == INTEL_OUTPUT_EDP) {
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
@@ -785,19 +793,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
* should use them always.
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp;
-
- if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP &&
- encoder->type != INTEL_OUTPUT_DDI)
- continue;
-
- intel_dp = enc_to_intel_dp(&encoder->base);
-
- /* Skip pure DVI/HDMI DDI encoders */
- if (!i915_mmio_reg_valid(intel_dp->output_reg))
- continue;
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
@@ -939,7 +936,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
}
static uint32_t
-intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
@@ -947,14 +944,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
bool done;
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- if (has_aux_irq)
- done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(10));
- else
- done = wait_for(C, 10) == 0;
+ done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(10));
if (!done)
- DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
- has_aux_irq);
+ DRM_ERROR("dp aux hw did not signal timeout!\n");
#undef C
return status;
@@ -1019,7 +1012,6 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
}
static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
- bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider)
{
@@ -1040,7 +1032,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
return DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_INTERRUPT |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
timeout |
DP_AUX_CH_CTL_RECEIVE_ERROR |
@@ -1050,13 +1042,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
}
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
- bool has_aux_irq,
int send_bytes,
uint32_t unused)
{
return DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_INTERRUPT |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
@@ -1079,7 +1070,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
int i, ret, recv_bytes;
uint32_t status;
int try, clock = 0;
- bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
@@ -1134,7 +1124,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
- has_aux_irq,
send_bytes,
aux_clock_divider);
@@ -1151,7 +1140,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, send_ctl);
- status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+ status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
I915_WRITE(ch_ctl,
@@ -1350,6 +1339,9 @@ static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
case DP_AUX_D:
aux_ch = AUX_CH_D;
break;
+ case DP_AUX_E:
+ aux_ch = AUX_CH_E;
+ break;
case DP_AUX_F:
aux_ch = AUX_CH_F;
break;
@@ -1377,6 +1369,8 @@ intel_aux_power_domain(struct intel_dp *intel_dp)
return POWER_DOMAIN_AUX_C;
case AUX_CH_D:
return POWER_DOMAIN_AUX_D;
+ case AUX_CH_E:
+ return POWER_DOMAIN_AUX_E;
case AUX_CH_F:
return POWER_DOMAIN_AUX_F;
default:
@@ -1463,6 +1457,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_B:
case AUX_CH_C:
case AUX_CH_D:
+ case AUX_CH_E:
case AUX_CH_F:
return DP_AUX_CH_CTL(aux_ch);
default:
@@ -1481,6 +1476,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_B:
case AUX_CH_C:
case AUX_CH_D:
+ case AUX_CH_E:
case AUX_CH_F:
return DP_AUX_CH_DATA(aux_ch, index);
default:
@@ -1544,6 +1540,13 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
return max_rate >= 540000;
}
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
+{
+ int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
+
+ return max_rate >= 810000;
+}
+
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -1553,8 +1556,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
int i, count = 0;
if (IS_G4X(dev_priv)) {
- divisor = gen4_dpll;
- count = ARRAY_SIZE(gen4_dpll);
+ divisor = g4x_dpll;
+ count = ARRAY_SIZE(g4x_dpll);
} else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
@@ -1970,7 +1973,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_GEN7(dev_priv) && port == PORT_A) {
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1980,7 +1983,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- intel_dp->DP |= crtc->pipe << 29;
+ intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp;
@@ -2006,9 +2009,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
if (IS_CHERRYVIEW(dev_priv))
- intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
- else if (crtc->pipe == PIPE_B)
- intel_dp->DP |= DP_PIPEB_SELECT;
+ intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
+ else
+ intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
}
}
@@ -2630,52 +2633,66 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
}
+static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+ enum port port, enum pipe *pipe)
+{
+ enum pipe p;
+
+ for_each_pipe(dev_priv, p) {
+ u32 val = I915_READ(TRANS_DP_CTL(p));
+
+ if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
+ *pipe = p;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+
+ /* must initialize pipe to something for the asserts */
+ *pipe = PIPE_A;
+
+ return false;
+}
+
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe)
+{
+ bool ret;
+ u32 val;
+
+ val = I915_READ(dp_reg);
+
+ ret = val & DP_PORT_EN;
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ ret &= cpt_dp_port_selected(dev_priv, port, pipe);
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ else
+ *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+
+ return ret;
+}
+
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = encoder->port;
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
+ ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, pipe);
- tmp = I915_READ(intel_dp->output_reg);
-
- if (!(tmp & DP_PORT_EN))
- goto out;
-
- if (IS_GEN7(dev_priv) && port == PORT_A) {
- *pipe = PORT_TO_PIPE_CPT(tmp);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- enum pipe p;
-
- for_each_pipe(dev_priv, p) {
- u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
- if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
- *pipe = p;
- ret = true;
-
- goto out;
- }
- }
-
- DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
- i915_mmio_reg_offset(intel_dp->output_reg));
- } else if (IS_CHERRYVIEW(dev_priv)) {
- *pipe = DP_PORT_TO_PIPE_CHV(tmp);
- } else {
- *pipe = PORT_TO_PIPE(tmp);
- }
-
- ret = true;
-
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -2796,10 +2813,6 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
- intel_psr_disable(intel_dp, old_crtc_state);
-
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
@@ -2854,10 +2867,11 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
+ uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
- if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+ if (dp_train_pat & train_pat_mask)
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
- dp_train_pat & DP_TRAINING_PATTERN_MASK);
+ dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2868,7 +2882,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ switch (dp_train_pat & train_pat_mask) {
case DP_TRAINING_PATTERN_DISABLE:
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
@@ -2882,10 +2896,13 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
case DP_TRAINING_PATTERN_3:
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
break;
+ case DP_TRAINING_PATTERN_4:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
+ break;
}
I915_WRITE(DP_TP_CTL(port), temp);
- } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
@@ -3008,10 +3025,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
intel_edp_backlight_on(pipe_config, conn_state);
- intel_psr_enable(intel_dp, pipe_config);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder,
@@ -3043,11 +3057,11 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
edp_panel_vdd_off_sync(intel_dp);
/*
- * VLV seems to get confused when multiple power seqeuencers
+ * VLV seems to get confused when multiple power sequencers
* have the same port selected (even if only one has power/vdd
* enabled). The failure manifests as vlv_wait_port_ready() failing
* CHV on the other hand doesn't seem to mind having the same port
- * selected in multiple power seqeuencers, but let's clear the
+ * selected in multiple power sequencers, but let's clear the
* port select always when logically disconnecting a power sequencer
* from a port.
*/
@@ -3066,16 +3080,9 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->pps_mutex);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp;
- enum port port;
-
- if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- intel_dp = enc_to_intel_dp(&encoder->base);
- port = dp_to_dig_port(intel_dp)->base.port;
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = encoder->port;
WARN(intel_dp->active_pipe == pipe,
"stealing pipe %c power sequencer from active (e)DP port %c\n",
@@ -3197,14 +3204,14 @@ uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->base.port;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum port port = encoder->port;
- if (INTEL_GEN(dev_priv) >= 9) {
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ if (HAS_DDI(dev_priv))
return intel_ddi_dp_voltage_max(encoder);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_GEN7(dev_priv) && port == PORT_A)
+ else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
@@ -3216,33 +3223,11 @@ uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->base.port;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum port port = encoder->port;
- if (INTEL_GEN(dev_priv) >= 9) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
+ if (HAS_DDI(dev_priv)) {
+ return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@@ -3255,7 +3240,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_GEN7(dev_priv) && port == PORT_A) {
+ } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3450,7 +3435,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
}
static uint32_t
-gen4_signal_levels(uint8_t train_set)
+g4x_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -3487,9 +3472,9 @@ gen4_signal_levels(uint8_t train_set)
return signal_levels;
}
-/* Gen6's DP voltage swing and pre-emphasis control */
+/* SNB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
-gen6_edp_signal_levels(uint8_t train_set)
+snb_cpu_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3515,9 +3500,9 @@ gen6_edp_signal_levels(uint8_t train_set)
}
}
-/* Gen7's DP voltage swing and pre-emphasis control */
+/* IVB CPU eDP voltage swing and pre-emphasis control */
static uint32_t
-gen7_edp_signal_levels(uint8_t train_set)
+ivb_cpu_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3564,14 +3549,14 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_GEN7(dev_priv) && port == PORT_A) {
- signal_levels = gen7_edp_signal_levels(train_set);
+ } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+ signal_levels = ivb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev_priv) && port == PORT_A) {
- signal_levels = gen6_edp_signal_levels(train_set);
+ signal_levels = snb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
- signal_levels = gen4_signal_levels(train_set);
+ signal_levels = g4x_signal_levels(train_set);
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
}
@@ -3654,7 +3639,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
@@ -3683,8 +3668,9 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
/* always enable with pattern 1 (as per spec) */
- DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
- DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
+ DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+ DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+ DP_LINK_TRAIN_PAT_1;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
@@ -3739,8 +3725,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
- intel_psr_init_dpcd(intel_dp);
-
/*
* Read the eDP display control registers.
*
@@ -3756,6 +3740,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
+ /*
+ * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+ * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
+ */
+ intel_psr_init_dpcd(intel_dp);
+
/* Read the eDP 1.4+ supported link rates. */
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -3884,129 +3874,6 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
intel_dp->is_mst);
}
-static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state, bool disable_wa)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- u8 buf;
- int ret = 0;
- int count = 0;
- int attempts = 10;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
- DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
- ret = -EIO;
- goto out;
- }
-
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf & ~DP_TEST_SINK_START) < 0) {
- DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
- ret = -EIO;
- goto out;
- }
-
- do {
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_TEST_SINK_MISC, &buf) < 0) {
- ret = -EIO;
- goto out;
- }
- count = buf & DP_TEST_COUNT_MASK;
- } while (--attempts && count);
-
- if (attempts == 0) {
- DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
- ret = -ETIMEDOUT;
- }
-
- out:
- if (disable_wa)
- hsw_enable_ips(crtc_state);
- return ret;
-}
-
-static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- u8 buf;
- int ret;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
- return -EIO;
-
- if (!(buf & DP_TEST_CRC_SUPPORTED))
- return -ENOTTY;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
- return -EIO;
-
- if (buf & DP_TEST_SINK_START) {
- ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
- if (ret)
- return ret;
- }
-
- hsw_disable_ips(crtc_state);
-
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf | DP_TEST_SINK_START) < 0) {
- hsw_enable_ips(crtc_state);
- return -EIO;
- }
-
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
- return 0;
-}
-
-int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- u8 buf;
- int count, ret;
- int attempts = 6;
-
- ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
- if (ret)
- return ret;
-
- do {
- intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_TEST_SINK_MISC, &buf) < 0) {
- ret = -EIO;
- goto stop;
- }
- count = buf & DP_TEST_COUNT_MASK;
-
- } while (--attempts && count == 0);
-
- if (attempts == 0) {
- DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
- ret = -ETIMEDOUT;
- goto stop;
- }
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
- ret = -EIO;
- goto stop;
- }
-
-stop:
- intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
- return ret;
-}
-
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
@@ -4466,10 +4333,15 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
+ /* Handle CEC interrupts, if any */
+ drm_dp_cec_irq(&intel_dp->aux);
+
/* defer to the hotplug work for link retraining if needed */
if (intel_dp_needs_link_retrain(intel_dp))
return false;
+ intel_psr_short_pulse(intel_dp);
+
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
@@ -4537,14 +4409,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum drm_connector_status status;
-
- status = intel_panel_detect(dev_priv);
- if (status == connector_status_unknown)
- status = connector_status_connected;
-
- return status;
+ return connector_status_connected;
}
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
@@ -4780,6 +4645,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
intel_connector->detect_edid = edid;
intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ drm_dp_cec_set_edid(&intel_dp->aux, edid);
}
static void
@@ -4787,6 +4653,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
+ drm_dp_cec_unset_edid(&intel_dp->aux);
kfree(intel_connector->detect_edid);
intel_connector->detect_edid = NULL;
@@ -4805,7 +4672,7 @@ intel_dp_long_pulse(struct intel_connector *connector)
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
- /* Can't disconnect eDP, but you can close the lid... */
+ /* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
@@ -4975,6 +4842,7 @@ static int
intel_dp_connector_register(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = connector->dev;
int ret;
ret = intel_connector_register(connector);
@@ -4987,13 +4855,20 @@ intel_dp_connector_register(struct drm_connector *connector)
intel_dp->aux.name, connector->kdev->kobj.name);
intel_dp->aux.dev = connector->kdev;
- return drm_dp_aux_register(&intel_dp->aux);
+ ret = drm_dp_aux_register(&intel_dp->aux);
+ if (!ret)
+ drm_dp_cec_register_connector(&intel_dp->aux,
+ connector->name, dev->dev);
+ return ret;
}
static void
intel_dp_connector_unregister(struct drm_connector *connector)
{
- drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ drm_dp_cec_unregister_connector(&intel_dp->aux);
+ drm_dp_aux_unregister(&intel_dp->aux);
intel_connector_unregister(connector);
}
@@ -5319,14 +5194,14 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum pipe pipe;
- if ((intel_dp->DP & DP_PORT_EN) == 0)
- return INVALID_PIPE;
+ if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, &pipe))
+ return pipe;
- if (IS_CHERRYVIEW(dev_priv))
- return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
- else
- return PORT_TO_PIPE(intel_dp->DP);
+ return INVALID_PIPE;
}
void intel_dp_encoder_reset(struct drm_encoder *encoder)
@@ -5675,7 +5550,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
/*
* On some VLV machines the BIOS can leave the VDD
- * enabled even on power seqeuencers which aren't
+ * enabled even on power sequencers which aren't
* hooked up to any port. This would mess up the
* power domain tracking the first time we pick
* one of these power sequencers for use since
@@ -5683,7 +5558,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
* already on and therefore wouldn't grab the power
* domain reference. Disable VDD first to avoid this.
* This also avoids spuriously turning the VDD on as
- * soon as the new power seqeuencer gets initialized.
+ * soon as the new power sequencer gets initialized.
*/
if (force_disable_vdd) {
u32 pp = ironlake_get_pp_control(intel_dp);
@@ -5721,10 +5596,20 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- if (port == PORT_A)
+ switch (port) {
+ case PORT_A:
port_sel = PANEL_PORT_SELECT_DPA;
- else
+ break;
+ case PORT_C:
+ port_sel = PANEL_PORT_SELECT_DPC;
+ break;
+ case PORT_D:
port_sel = PANEL_PORT_SELECT_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ break;
+ }
}
pp_on |= port_sel;
@@ -6179,7 +6064,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
edid);
} else {
kfree(edid);
@@ -6268,8 +6153,8 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
/* Set connector link status to BAD and send a Uevent to notify
* userspace to do a modeset.
*/
- drm_mode_connector_set_link_status_property(connector,
- DRM_MODE_LINK_STATUS_BAD);
+ drm_connector_set_link_status_property(connector,
+ DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
drm_kms_helper_hotplug_event(connector->dev);
@@ -6382,7 +6267,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+ if (IS_G45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -6462,7 +6347,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->port = port;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hotplug.irq_port[port] = intel_dig_port;
if (port != PORT_A)
intel_infoframe_init(intel_dig_port);
@@ -6481,37 +6365,44 @@ err_connector_alloc:
return false;
}
-void intel_dp_mst_suspend(struct drm_device *dev)
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
+ struct intel_encoder *encoder;
- /* disable MST */
- for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
- if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+ if (encoder->type != INTEL_OUTPUT_DDI)
continue;
- if (intel_dig_port->dp.is_mst)
- drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (!intel_dp->can_mst)
+ continue;
+
+ if (intel_dp->is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
}
}
-void intel_dp_mst_resume(struct drm_device *dev)
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
+ struct intel_encoder *encoder;
- for (i = 0; i < I915_MAX_PORTS; i++) {
- struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
int ret;
- if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+ if (encoder->type != INTEL_OUTPUT_DDI)
+ continue;
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (!intel_dp->can_mst)
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
if (ret)
- intel_dp_check_mst_status(&intel_dig_port->dp);
+ intel_dp_check_mst_status(intel_dp);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 2bb2ceb9d463..357136f17f85 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -26,7 +26,7 @@
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
- uint8_t reg_val = 0;
+ u8 reg_val = 0;
/* Early return when display use other mechanism to enable backlight. */
if (!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP))
@@ -54,11 +54,11 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
-static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t read_val[2] = { 0x0 };
- uint16_t level = 0;
+ u8 read_val[2] = { 0x0 };
+ u16 level = 0;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
@@ -82,7 +82,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t vals[2] = { 0x0 };
+ u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+ u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 3fcaa98b9055..4da6e33c7fa1 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -219,32 +219,47 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
}
/*
- * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * Pick training pattern for channel equalization. Training pattern 4 for HBR3
+ * or for 1.4 devices that support it, training Pattern 3 for HBR2
* or 1.2 devices that support it, Training Pattern 2 otherwise.
*/
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
{
- u32 training_pattern = DP_TRAINING_PATTERN_2;
- bool source_tps3, sink_tps3;
+ bool source_tps3, sink_tps3, source_tps4, sink_tps4;
/*
+ * Intel platforms that support HBR3 also support TPS4. It is mandatory
+ * for all downstream devices that support HBR3. There are no known eDP
+ * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
+ * specification.
+ */
+ source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
+ sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd);
+ if (source_tps4 && sink_tps4) {
+ return DP_TRAINING_PATTERN_4;
+ } else if (intel_dp->link_rate == 810000) {
+ if (!source_tps4)
+ DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n");
+ if (!sink_tps4)
+ DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n");
+ }
+ /*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
-
if (source_tps3 && sink_tps3) {
- training_pattern = DP_TRAINING_PATTERN_3;
- } else if (intel_dp->link_rate == 540000) {
+ return DP_TRAINING_PATTERN_3;
+ } else if (intel_dp->link_rate >= 540000) {
if (!source_tps3)
- DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
+ DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
if (!sink_tps3)
- DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+ DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
}
- return training_pattern;
+ return DP_TRAINING_PATTERN_2;
}
static bool
@@ -256,11 +271,13 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
bool channel_eq = false;
training_pattern = intel_dp_training_pattern(intel_dp);
+ /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
+ if (training_pattern != DP_TRAINING_PATTERN_4)
+ training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,
- training_pattern |
- DP_LINK_SCRAMBLING_DISABLE)) {
+ training_pattern)) {
DRM_ERROR("failed to start channel equalization\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 5890500a3a8b..7e3e01607643 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -403,20 +403,10 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
-static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!intel_dp)
- return NULL;
- return &intel_dp->mst_encoders[0]->base.base;
-}
-
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
- .best_encoder = intel_mst_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
};
@@ -476,8 +466,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
- ret = drm_mode_connector_attach_encoder(&intel_connector->base,
- enc);
+ ret = drm_connector_attach_encoder(&intel_connector->base, enc);
if (ret)
goto err;
}
@@ -485,7 +474,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- ret = drm_mode_connector_set_path_property(connector, pathprop);
+ ret = drm_connector_set_path_property(connector, pathprop);
if (ret)
goto err;
@@ -524,7 +513,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
intel_connector->mst_port = NULL;
drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
- drm_connector_unreference(connector);
+ drm_connector_put(connector);
}
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 383fbc15113d..b51ad2917dbe 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -163,8 +163,8 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
- unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
- unsigned old_mask;
+ unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ unsigned int old_mask;
if (WARN_ON(pll == NULL))
return;
@@ -207,7 +207,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
- unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+ unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
/* PCH only available on ILK+ */
if (INTEL_GEN(dev_priv) < 5)
@@ -2525,6 +2525,77 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
return true;
}
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id)
+{
+ uint32_t cfgcr0, cfgcr1;
+ uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
+ const struct skl_wrpll_params *params;
+ int index, n_entries, link_clock;
+
+ /* Read back values from DPLL CFGCR registers */
+ cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+
+ dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
+ dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT;
+ pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
+ kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
+ qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
+ DPLL_CFGCR1_QDIV_MODE_SHIFT;
+ qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+
+ params = dev_priv->cdclk.hw.ref == 24000 ?
+ icl_dp_combo_pll_24MHz_values :
+ icl_dp_combo_pll_19_2MHz_values;
+ n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
+
+ for (index = 0; index < n_entries; index++) {
+ if (dco_integer == params[index].dco_integer &&
+ dco_fraction == params[index].dco_fraction &&
+ pdiv == params[index].pdiv &&
+ kdiv == params[index].kdiv &&
+ qdiv_mode == params[index].qdiv_mode &&
+ qdiv_ratio == params[index].qdiv_ratio)
+ break;
+ }
+
+ /* Map PLL Index to Link Clock */
+ switch (index) {
+ default:
+ MISSING_CASE(index);
+ /* fall through */
+ case 0:
+ link_clock = 540000;
+ break;
+ case 1:
+ link_clock = 270000;
+ break;
+ case 2:
+ link_clock = 162000;
+ break;
+ case 3:
+ link_clock = 324000;
+ break;
+ case 4:
+ link_clock = 216000;
+ break;
+ case 5:
+ link_clock = 432000;
+ break;
+ case 6:
+ link_clock = 648000;
+ break;
+ case 7:
+ link_clock = 810000;
+ break;
+ }
+
+ return link_clock;
+}
+
static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
{
return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
@@ -2569,6 +2640,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
switch (div1) {
default:
MISSING_CASE(div1);
+ /* fall through */
case 2:
hsdiv = 0;
break;
@@ -2742,25 +2814,31 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
MG_PLL_SSC_FLLEN |
MG_PLL_SSC_STEPSIZE(ssc_stepsize);
- pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART;
-
- if (refclk_khz != 38400) {
- pll_state->mg_pll_tdc_coldst_bias |=
- MG_PLL_TDC_COLDST_IREFINT_EN |
- MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
- MG_PLL_TDC_COLDST_COLDSTART |
- MG_PLL_TDC_TDCOVCCORR_EN |
- MG_PLL_TDC_TDCSEL(3);
-
- pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
- MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
- MG_PLL_BIAS_BIAS_BONUS(10) |
- MG_PLL_BIAS_BIASCAL_EN |
- MG_PLL_BIAS_CTRIM(12) |
- MG_PLL_BIAS_VREF_RDAC(4) |
- MG_PLL_BIAS_IREFTRIM(iref_trim);
+ pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
+ MG_PLL_TDC_COLDST_IREFINT_EN |
+ MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+ MG_PLL_TDC_TDCOVCCORR_EN |
+ MG_PLL_TDC_TDCSEL(3);
+
+ pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
+ MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+ MG_PLL_BIAS_BIAS_BONUS(10) |
+ MG_PLL_BIAS_BIASCAL_EN |
+ MG_PLL_BIAS_CTRIM(12) |
+ MG_PLL_BIAS_VREF_RDAC(4) |
+ MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+ if (refclk_khz == 38400) {
+ pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ pll_state->mg_pll_bias_mask = 0;
+ } else {
+ pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ pll_state->mg_pll_bias_mask = -1U;
}
+ pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
+ pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+
return true;
}
@@ -2787,10 +2865,17 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
case PORT_D:
case PORT_E:
case PORT_F:
- min = icl_port_to_mg_pll_id(port);
- max = min;
- ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
- &pll_state);
+ if (0 /* TODO: TBT PLLs */) {
+ min = DPLL_ID_ICL_TBTPLL;
+ max = min;
+ ret = icl_calc_dpll_state(crtc_state, encoder, clock,
+ &pll_state);
+ } else {
+ min = icl_port_to_mg_pll_id(port);
+ max = min;
+ ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
+ &pll_state);
+ }
break;
default:
MISSING_CASE(port);
@@ -2820,9 +2905,12 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
switch (id) {
default:
MISSING_CASE(id);
+ /* fall through */
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
return CNL_DPLL_ENABLE(id);
+ case DPLL_ID_ICL_TBTPLL:
+ return TBT_PLL_ENABLE;
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
@@ -2850,6 +2938,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
switch (id) {
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
+ case DPLL_ID_ICL_TBTPLL:
hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
break;
@@ -2859,18 +2948,41 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
case DPLL_ID_ICL_MGPLL4:
port = icl_mg_pll_id_to_port(id);
hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
hw_state->mg_clktop2_coreclkctl1 =
I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
hw_state->mg_clktop2_hsclkctl =
I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port));
hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port));
hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port));
hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port));
+
hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port));
hw_state->mg_pll_tdc_coldst_bias =
I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+
+ if (dev_priv->cdclk.hw.ref == 38400) {
+ hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ hw_state->mg_pll_bias_mask = 0;
+ } else {
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
+ }
+
+ hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
break;
default:
MISSING_CASE(id);
@@ -2898,19 +3010,48 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum port port = icl_mg_pll_id_to_port(pll->info->id);
+ u32 val;
+
+ /*
+ * Some of the following registers have reserved fields, so program
+ * these with RMW based on a mask. The mask can be fixed or generated
+ * during the calc/readout phase if the mask depends on some other HW
+ * state like refclk, see icl_calc_mg_pll_state().
+ */
+ val = I915_READ(MG_REFCLKIN_CTL(port));
+ val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+ val |= hw_state->mg_refclkin_ctl;
+ I915_WRITE(MG_REFCLKIN_CTL(port), val);
+
+ val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+ val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+ val |= hw_state->mg_clktop2_coreclkctl1;
+ I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val);
+
+ val = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+ val |= hw_state->mg_clktop2_hsclkctl;
+ I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val);
- I915_WRITE(MG_REFCLKIN_CTL(port), hw_state->mg_refclkin_ctl);
- I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port),
- hw_state->mg_clktop2_coreclkctl1);
- I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), hw_state->mg_clktop2_hsclkctl);
I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0);
I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1);
I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf);
I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock);
I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc);
- I915_WRITE(MG_PLL_BIAS(port), hw_state->mg_pll_bias);
- I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port),
- hw_state->mg_pll_tdc_coldst_bias);
+
+ val = I915_READ(MG_PLL_BIAS(port));
+ val &= ~hw_state->mg_pll_bias_mask;
+ val |= hw_state->mg_pll_bias;
+ I915_WRITE(MG_PLL_BIAS(port), val);
+
+ val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+ val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
+ val |= hw_state->mg_pll_tdc_coldst_bias;
+ I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val);
+
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port));
}
@@ -2936,6 +3077,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
switch (id) {
case DPLL_ID_ICL_DPLL0:
case DPLL_ID_ICL_DPLL1:
+ case DPLL_ID_ICL_TBTPLL:
icl_dpll_write(dev_priv, pll);
break;
case DPLL_ID_ICL_MGPLL1:
@@ -3034,6 +3176,7 @@ static const struct intel_shared_dpll_funcs icl_pll_funcs = {
static const struct dpll_info icl_plls[] = {
{ "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
{ "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
{ "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
{ "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 7a0cd564a9ee..7e522cf4f13f 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -114,23 +114,27 @@ enum intel_dpll_id {
*/
DPLL_ID_ICL_DPLL1 = 1,
/**
+ * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
+ */
+ DPLL_ID_ICL_TBTPLL = 2,
+ /**
* @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
*/
- DPLL_ID_ICL_MGPLL1 = 2,
+ DPLL_ID_ICL_MGPLL1 = 3,
/**
* @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
*/
- DPLL_ID_ICL_MGPLL2 = 3,
+ DPLL_ID_ICL_MGPLL2 = 4,
/**
* @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
*/
- DPLL_ID_ICL_MGPLL3 = 4,
+ DPLL_ID_ICL_MGPLL3 = 5,
/**
* @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
*/
- DPLL_ID_ICL_MGPLL4 = 5,
+ DPLL_ID_ICL_MGPLL4 = 6,
};
-#define I915_NUM_PLLS 6
+#define I915_NUM_PLLS 7
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -176,6 +180,8 @@ struct intel_dpll_hw_state {
uint32_t mg_pll_ssc;
uint32_t mg_pll_bias;
uint32_t mg_pll_tdc_coldst_bias;
+ uint32_t mg_pll_bias_mask;
+ uint32_t mg_pll_tdc_coldst_bias_mask;
};
/**
@@ -336,5 +342,7 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b8eefbffc77d..17af06d8a43e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -158,12 +158,6 @@
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
-/* Maximum cursor sizes */
-#define GEN2_CURSOR_WIDTH 64
-#define GEN2_CURSOR_HEIGHT 64
-#define MAX_CURSOR_WIDTH 256
-#define MAX_CURSOR_HEIGHT 256
-
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
@@ -194,7 +188,6 @@ enum intel_output_type {
struct intel_framebuffer {
struct drm_framebuffer base;
- struct drm_i915_gem_object *obj;
struct intel_rotation_info rot_info;
/* for each plane in the normal GTT view */
@@ -261,7 +254,8 @@ struct intel_encoder {
struct intel_crtc_state *pipe_config);
/* Returns a mask of power domains that need to be referenced as part
* of the hardware state readout code. */
- u64 (*get_power_domains)(struct intel_encoder *encoder);
+ u64 (*get_power_domains)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
@@ -310,6 +304,8 @@ struct intel_panel {
} backlight;
};
+struct intel_digital_port;
+
/*
* This structure serves as a translation layer between the generic HDCP code
* and the bus-specific code. What that means is that HDCP over HDMI differs
@@ -488,6 +484,8 @@ struct intel_atomic_state {
*/
bool skip_intermediate_wm;
+ bool rps_interactive;
+
/* Gen9+ only */
struct skl_ddb_values wm_results;
@@ -953,6 +951,7 @@ struct intel_plane {
enum pipe pipe;
bool can_scale;
bool has_fbc;
+ bool has_ccs;
int max_downscale;
uint32_t frontbuffer_bit;
@@ -971,7 +970,7 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
- bool (*get_hw_state)(struct intel_plane *plane);
+ bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
@@ -1004,7 +1003,7 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
-#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
+#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
i915_reg_t hdmi_reg;
@@ -1139,7 +1138,6 @@ struct intel_dp {
* register with to kick off an AUX transaction.
*/
uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
- bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
@@ -1252,22 +1250,29 @@ intel_attached_encoder(struct drm_connector *connector)
return to_intel_connector(connector)->encoder;
}
-static inline struct intel_digital_port *
-enc_to_dig_port(struct drm_encoder *encoder)
+static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
- switch (intel_encoder->type) {
+ switch (encoder->type) {
case INTEL_OUTPUT_DDI:
- WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ if (intel_encoder_is_dig_port(intel_encoder))
return container_of(encoder, struct intel_digital_port,
base.base);
- default:
+ else
return NULL;
- }
}
static inline struct intel_dp_mst_encoder *
@@ -1281,6 +1286,20 @@ static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
return &enc_to_dig_port(encoder)->dp;
}
+static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
+{
+ switch (encoder->type) {
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_EDP:
+ return true;
+ case INTEL_OUTPUT_DDI:
+ /* Skip pure HDMI/DVI DDI encoders */
+ return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
+ default:
+ return false;
+ }
+}
+
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
@@ -1337,9 +1356,6 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
-bool gen11_reset_one_iir(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int bit);
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
@@ -1376,6 +1392,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe);
void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder);
@@ -1391,8 +1409,6 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -1406,6 +1422,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
+ u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_map_plls_to_ports(struct drm_crtc *crtc,
@@ -1487,6 +1505,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+ enum port port);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
@@ -1614,6 +1635,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
+u16 skl_scaler_calc_phase(int sub, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
uint32_t pixel_format);
@@ -1643,6 +1665,9 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe);
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1660,8 +1685,6 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-int intel_dp_sink_crc(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
@@ -1675,8 +1698,8 @@ void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_mst_suspend(struct drm_device *dev);
-void intel_dp_mst_resume(struct drm_device *dev);
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
@@ -1706,6 +1729,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
@@ -1725,8 +1749,8 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-/* intel_dsi.c */
-void intel_dsi_init(struct drm_i915_private *dev_priv);
+/* vlv_dsi.c */
+void vlv_dsi_init(struct drm_i915_private *dev_priv);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1820,6 +1844,8 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
/* intel_lvds.c */
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe);
void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1866,7 +1892,6 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
void intel_panel_destroy_backlight(struct drm_connector *connector);
-enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
@@ -1910,12 +1935,12 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -2057,12 +2082,13 @@ void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
/* intel_sdvo.c */
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe);
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
i915_reg_t reg, enum port port);
/* intel_sprite.c */
-bool intel_format_is_yuv(u32 format);
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@ -2075,10 +2101,9 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane);
+bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
-bool intel_format_is_yuv(uint32_t format);
bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
@@ -2144,7 +2169,6 @@ void lspcon_resume(struct intel_lspcon *lspcon);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
/* intel_pipe_crc.c */
-int intel_pipe_crc_create(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt);
@@ -2160,5 +2184,4 @@ static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
{
}
#endif
-extern const struct file_operations i915_display_crc_ctl_fops;
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 7afeb9580f41..ad7c1cb32983 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -129,21 +129,29 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
-/* intel_dsi.c */
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
+/* vlv_dsi.c */
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
-/* intel_dsi_pll.c */
-bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
-int intel_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config);
-void intel_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config);
-void intel_disable_dsi_pll(struct intel_encoder *encoder);
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config);
-void intel_dsi_reset_clocks(struct intel_encoder *encoder,
- enum port port);
+/* vlv_dsi_pll.c */
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void vlv_dsi_pll_disable(struct intel_encoder *encoder);
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config);
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void bxt_dsi_pll_disable(struct intel_encoder *encoder);
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config);
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 4d6ffa7b3e7b..ac83d6b89ae0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -181,7 +181,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- wait_for_dsi_fifo_empty(intel_dsi, port);
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
data += len;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 61d908e0df0e..4e142ff49708 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -137,19 +137,15 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
- if (!(tmp & DVO_ENABLE))
- return false;
-
- *pipe = PORT_TO_PIPE(tmp);
+ *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
- return true;
+ return tmp & DVO_ENABLE;
}
static void intel_dvo_get_config(struct intel_encoder *encoder,
@@ -282,8 +278,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
- if (pipe == 1)
- dvo_val |= DVO_PIPE_B_SELECT;
+ dvo_val |= DVO_PIPE_SEL(pipe);
dvo_val |= DVO_PIPE_STALL;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
@@ -443,7 +438,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
int gpio;
bool dvoinit;
enum pipe pipe;
- uint32_t dpll[I915_MAX_PIPES];
+ u32 dpll[I915_MAX_PIPES];
enum port port;
/*
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 1590375f31cb..2d1952849d69 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -25,7 +25,6 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
-#include "i915_vgpu.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
@@ -230,6 +229,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
break;
default:
MISSING_CASE(class);
+ /* fall through */
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
@@ -302,6 +302,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
+ if (engine->context_size)
+ DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -456,28 +458,16 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
i915_gem_batch_pool_init(&engine->batch_pool, engine);
}
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
- /* Older GVT emulation depends upon intercepting CSB mmio */
- if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
- return true;
-
- return false;
-}
-
static void intel_engine_init_execlist(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- execlists->csb_use_mmio = csb_force_mmio(engine->i915);
-
execlists->port_mask = 1;
BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
execlists->queue_priority = INT_MIN;
- execlists->queue = RB_ROOT;
- execlists->first = NULL;
+ execlists->queue = RB_ROOT_CACHED;
}
/**
@@ -492,6 +482,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
i915_timeline_init(engine->i915, &engine->timeline, engine->name);
+ lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
@@ -499,7 +490,8 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+ unsigned int size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -515,7 +507,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj);
}
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
@@ -533,7 +525,7 @@ err_unref:
return ret;
}
-static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->scratch);
}
@@ -585,7 +577,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -645,6 +637,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
return 0;
}
+static void __intel_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_context_unpin(to_intel_context(ctx, engine));
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -658,7 +656,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
- struct intel_ring *ring;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_context *ce;
int ret;
engine->set_default_submission(engine);
@@ -670,18 +669,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ring = intel_context_pin(engine->i915->kernel_context, engine);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
+ ce = intel_context_pin(i915->kernel_context, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
/*
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
- if (engine->i915->preempt_context) {
- ring = intel_context_pin(engine->i915->preempt_context, engine);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
+ if (i915->preempt_context) {
+ ce = intel_context_pin(i915->preempt_context, engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
goto err_unpin_kernel;
}
}
@@ -690,7 +689,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
goto err_unpin_preempt;
- if (HWS_NEEDS_PHYSICAL(engine->i915))
+ if (HWS_NEEDS_PHYSICAL(i915))
ret = init_phys_status_page(engine);
else
ret = init_status_page(engine);
@@ -702,10 +701,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
- if (engine->i915->preempt_context)
- intel_context_unpin(engine->i915->preempt_context, engine);
+ if (i915->preempt_context)
+ __intel_context_unpin(i915->preempt_context, engine);
+
err_unpin_kernel:
- intel_context_unpin(engine->i915->kernel_context, engine);
+ __intel_context_unpin(i915->kernel_context, engine);
return ret;
}
@@ -718,6 +718,8 @@ err_unpin_kernel:
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+
intel_engine_cleanup_scratch(engine);
if (HWS_NEEDS_PHYSICAL(engine->i915))
@@ -732,9 +734,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (engine->i915->preempt_context)
- intel_context_unpin(engine->i915->preempt_context, engine);
- intel_context_unpin(engine->i915->kernel_context, engine);
+ if (i915->preempt_context)
+ __intel_context_unpin(i915->preempt_context, engine);
+ __intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
}
@@ -769,6 +771,35 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
return bbaddr;
}
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const u32 base = engine->mmio_base;
+ const i915_reg_t mode = RING_MI_MODE(base);
+ int err;
+
+ if (INTEL_GEN(dev_priv) < 3)
+ return -ENODEV;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+
+ err = 0;
+ if (__intel_wait_for_register_fw(dev_priv,
+ mode, MODE_IDLE, MODE_IDLE,
+ 1000, 0,
+ NULL)) {
+ GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+ err = -ETIMEDOUT;
+ }
+
+ /* A final mmio read to let GPU writes be hopefully flushed to memory */
+ POSTING_READ_FW(mode);
+
+ return err;
+}
+
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
switch (type) {
@@ -780,12 +811,32 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
}
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
+{
+ const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ u32 mcr_s_ss_select;
+ u32 slice = fls(sseu->slice_mask);
+ u32 subslice = fls(sseu->subslice_mask[slice]);
+
+ if (INTEL_GEN(dev_priv) == 10)
+ mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
+ GEN8_MCR_SUBSLICE(subslice);
+ else if (INTEL_GEN(dev_priv) >= 11)
+ mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
+ GEN11_MCR_SUBSLICE(subslice);
+ else
+ mcr_s_ss_select = 0;
+
+ return mcr_s_ss_select;
+}
+
static inline uint32_t
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
uint32_t mcr_slice_subslice_mask;
uint32_t mcr_slice_subslice_select;
+ uint32_t default_mcr_s_ss_select;
uint32_t mcr;
uint32_t ret;
enum forcewake_domains fw_domains;
@@ -802,6 +853,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
GEN8_MCR_SUBSLICE(subslice);
}
+ default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
+
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -812,11 +865,10 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
- /*
- * The HW expects the slice and sublice selectors to be reset to 0
- * after reading out the registers.
- */
- WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
+
+ WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
+ default_mcr_s_ss_select);
+
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
@@ -824,6 +876,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
ret = I915_READ_FW(reg);
mcr &= ~mcr_slice_subslice_mask;
+ mcr |= default_mcr_s_ss_select;
+
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
@@ -934,11 +988,24 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return true;
/* Waiting to drain ELSP? */
- if (READ_ONCE(engine->execlists.active))
- return false;
+ if (READ_ONCE(engine->execlists.active)) {
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->func(t->data);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
- /* ELSP is empty, but there are ready requests? */
- if (READ_ONCE(engine->execlists.first))
+ if (READ_ONCE(engine->execlists.active))
+ return false;
+ }
+
+ /* ELSP is empty, but there are ready requests? E.g. after reset */
+ if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
return false;
/* Ring stopped? */
@@ -978,8 +1045,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
*/
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
- const struct i915_gem_context * const kernel_context =
- engine->i915->kernel_context;
+ const struct intel_context *kernel_context =
+ to_intel_context(engine->i915->kernel_context, engine);
struct i915_request *rq;
lockdep_assert_held(&engine->i915->drm.struct_mutex);
@@ -991,7 +1058,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
*/
rq = __i915_gem_active_peek(&engine->timeline.last_request);
if (rq)
- return rq->ctx == kernel_context;
+ return rq->hw_context == kernel_context;
else
return engine->last_retired_context == kernel_context;
}
@@ -1006,6 +1073,28 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
}
/**
+ * intel_engines_sanitize: called after the GPU has lost power
+ * @i915: the i915 device
+ *
+ * Anytime we reset the GPU, either with an explicit GPU reset or through a
+ * PCI power cycle, the GPU loses state and we must reset our state tracking
+ * to match. Note that calling intel_engines_sanitize() if the GPU has not
+ * been reset results in much confusion!
+ */
+void intel_engines_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ GEM_TRACE("\n");
+
+ for_each_engine(engine, i915, id) {
+ if (engine->reset.reset)
+ engine->reset.reset(engine, NULL);
+ }
+}
+
+/**
* intel_engines_park: called when the GT is transitioning from busy->idle
* @i915: the i915 device
*
@@ -1043,6 +1132,11 @@ void intel_engines_park(struct drm_i915_private *i915)
if (engine->park)
engine->park(engine);
+ if (engine->pinned_default_state) {
+ i915_gem_object_unpin_map(engine->default_state);
+ engine->pinned_default_state = NULL;
+ }
+
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
}
@@ -1060,6 +1154,16 @@ void intel_engines_unpark(struct drm_i915_private *i915)
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
+ void *map;
+
+ /* Pin the default state for fast resets from atomic context. */
+ map = NULL;
+ if (engine->default_state)
+ map = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (!IS_ERR_OR_NULL(map))
+ engine->pinned_default_state = map;
+
if (engine->unpark)
engine->unpark(engine);
@@ -1067,6 +1171,26 @@ void intel_engines_unpark(struct drm_i915_private *i915)
}
}
+/**
+ * intel_engine_lost_context: called when the GPU is reset into unknown state
+ * @engine: the engine
+ *
+ * We have either reset the GPU or otherwise about to lose state tracking of
+ * the current GPU logical state (e.g. suspend). On next use, it is therefore
+ * imperative that we make no presumptions about the current state and load
+ * from scratch.
+ */
+void intel_engine_lost_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
+}
+
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
@@ -1151,7 +1275,7 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
rowsize, sizeof(u32),
line, sizeof(line),
false) >= sizeof(line));
- drm_printf(m, "%08zx %s\n", pos, line);
+ drm_printf(m, "[%04zx] %s\n", pos, line);
prev = buf + pos;
skip = false;
@@ -1166,6 +1290,8 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
&engine->execlists;
u64 addr;
+ if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
+ drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
I915_READ(RING_START(engine->mmio_base)));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
@@ -1232,12 +1358,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr);
- drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n",
+ drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n",
read, execlists->csb_head,
write,
intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
- yesno(test_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted)),
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
@@ -1287,6 +1411,39 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
}
}
+static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
+{
+ void *ring;
+ int size;
+
+ drm_printf(m,
+ "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
+ size = rq->tail - rq->head;
+ if (rq->tail < rq->head)
+ size += rq->ring->size;
+
+ ring = kmalloc(size, GFP_ATOMIC);
+ if (ring) {
+ const void *vaddr = rq->ring->vaddr;
+ unsigned int head = rq->head;
+ unsigned int len = 0;
+
+ if (rq->tail < head) {
+ len = rq->ring->size - head;
+ memcpy(ring, vaddr + head, len);
+ head = 0;
+ }
+ memcpy(ring + len, vaddr + head, size - len);
+
+ hexdump(m, ring, size);
+ kfree(ring);
+ }
+}
+
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
@@ -1296,6 +1453,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq, *last;
+ unsigned long flags;
struct rb_node *rb;
int count;
@@ -1336,11 +1494,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq = i915_gem_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
- drm_printf(m,
- "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
- rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
drm_printf(m, "\t\tring->head: 0x%08x\n",
@@ -1351,6 +1505,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
+
+ print_request_ring(m, rq);
}
rcu_read_unlock();
@@ -1362,7 +1518,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- spin_lock_irq(&engine->timeline.lock);
+ local_irq_save(flags);
+ spin_lock(&engine->timeline.lock);
last = NULL;
count = 0;
@@ -1384,7 +1541,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
last = NULL;
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
- for (rb = execlists->first; rb; rb = rb_next(rb)) {
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
@@ -1404,22 +1561,21 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request(m, last, "\t\tQ ");
}
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock(&engine->timeline.lock);
- spin_lock_irq(&b->rb_lock);
+ spin_lock(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
drm_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->rb_lock);
+ spin_unlock(&b->rb_lock);
+ local_irq_restore(flags);
- drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
+ drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
engine->irq_posted,
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)),
- yesno(test_bit(ENGINE_IRQ_EXECLIST,
&engine->irq_posted)));
drm_printf(m, "HWSP:\n");
@@ -1468,8 +1624,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine))
return -ENODEV;
- tasklet_disable(&execlists->tasklet);
- write_seqlock_irqsave(&engine->stats.lock, flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ write_seqlock(&engine->stats.lock);
if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY;
@@ -1493,8 +1649,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
}
unlock:
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
- tasklet_enable(&execlists->tasklet);
+ write_sequnlock(&engine->stats.lock);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
return err;
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index b431b6733cc1..01d1d2088f04 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -399,89 +399,6 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
return dev_priv->fbc.active;
}
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct drm_i915_private *dev_priv =
- container_of(__work, struct drm_i915_private, fbc.work.work);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_work *work = &fbc->work;
- struct intel_crtc *crtc = fbc->crtc;
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
-
- if (drm_crtc_vblank_get(&crtc->base)) {
- /* CRTC is now off, leave FBC deactivated */
- mutex_lock(&fbc->lock);
- work->scheduled = false;
- mutex_unlock(&fbc->lock);
- return;
- }
-
-retry:
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * WaFbcWaitForVBlankBeforeEnable:ilk,snb
- *
- * It is also worth mentioning that since work->scheduled_vblank can be
- * updated multiple times by the other threads, hitting the timeout is
- * not an error condition. We'll just end up hitting the "goto retry"
- * case below.
- */
- wait_event_timeout(vblank->queue,
- drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
- msecs_to_jiffies(50));
-
- mutex_lock(&fbc->lock);
-
- /* Were we cancelled? */
- if (!work->scheduled)
- goto out;
-
- /* Were we delayed again while this function was sleeping? */
- if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
- mutex_unlock(&fbc->lock);
- goto retry;
- }
-
- intel_fbc_hw_activate(dev_priv);
-
- work->scheduled = false;
-
-out:
- mutex_unlock(&fbc->lock);
- drm_crtc_vblank_put(&crtc->base);
-}
-
-static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_work *work = &fbc->work;
-
- WARN_ON(!mutex_is_locked(&fbc->lock));
- if (WARN_ON(!fbc->enabled))
- return;
-
- if (drm_crtc_vblank_get(&crtc->base)) {
- DRM_ERROR("vblank not available for FBC on pipe %c\n",
- pipe_name(crtc->pipe));
- return;
- }
-
- /* It is useless to call intel_fbc_cancel_work() or cancel_work() in
- * this function since we're not releasing fbc.lock, so it won't have an
- * opportunity to grab it to discover that it was cancelled. So we just
- * update the expected jiffy count. */
- work->scheduled = true;
- work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
- drm_crtc_vblank_put(&crtc->base);
-
- schedule_work(&work->work);
-}
-
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
const char *reason)
{
@@ -489,11 +406,6 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
WARN_ON(!mutex_is_locked(&fbc->lock));
- /* Calling cancel_work() here won't help due to the fact that the work
- * function grabs fbc->lock. Just set scheduled to false so the work
- * function can know it was cancelled. */
- fbc->work.scheduled = false;
-
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
@@ -924,13 +836,6 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
32 * fbc->threshold) * 8;
}
-static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
- struct intel_fbc_reg_params *params2)
-{
- /* We can use this since intel_fbc_get_reg_params() does a memset. */
- return memcmp(params1, params2, sizeof(*params1)) == 0;
-}
-
void intel_fbc_pre_update(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -953,6 +858,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
goto unlock;
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
+ fbc->flip_pending = true;
deactivate:
intel_fbc_deactivate(dev_priv, reason);
@@ -988,13 +894,15 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_reg_params old_params;
WARN_ON(!mutex_is_locked(&fbc->lock));
if (!fbc->enabled || fbc->crtc != crtc)
return;
+ fbc->flip_pending = false;
+ WARN_ON(fbc->active);
+
if (!i915_modparams.enable_fbc) {
intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
__intel_fbc_disable(dev_priv);
@@ -1002,25 +910,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
return;
}
- if (!intel_fbc_can_activate(crtc)) {
- WARN_ON(fbc->active);
- return;
- }
-
- old_params = fbc->params;
intel_fbc_get_reg_params(crtc, &fbc->params);
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (fbc->active &&
- intel_fbc_reg_params_equal(&old_params, &fbc->params))
+ if (!intel_fbc_can_activate(crtc))
return;
- intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
- intel_fbc_schedule_activation(crtc);
+ if (!fbc->busy_bits) {
+ intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
+ intel_fbc_hw_activate(dev_priv);
+ } else
+ intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
void intel_fbc_post_update(struct intel_crtc *crtc)
@@ -1085,7 +984,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
intel_fbc_recompress(dev_priv);
- else
+ else if (!fbc->flip_pending)
__intel_fbc_post_update(fbc->crtc);
}
@@ -1225,8 +1124,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
if (fbc->crtc == crtc)
__intel_fbc_disable(dev_priv);
mutex_unlock(&fbc->lock);
-
- cancel_work_sync(&fbc->work.work);
}
/**
@@ -1248,8 +1145,6 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
__intel_fbc_disable(dev_priv);
}
mutex_unlock(&fbc->lock);
-
- cancel_work_sync(&fbc->work.work);
}
static void intel_fbc_underrun_work_fn(struct work_struct *work)
@@ -1400,12 +1295,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
- fbc->work.scheduled = false;
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->has_fbc = false;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index e9e02b58b7be..fb2f9fce34cd 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -47,7 +47,7 @@
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
- struct drm_i915_gem_object *obj = ifbdev->fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
unsigned int origin =
ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
@@ -193,7 +193,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
- if (!intel_fb || WARN_ON(!intel_fb->obj)) {
+ if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
@@ -265,7 +265,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (intel_fb->obj->stolen && !prealloc)
+ if (intel_fb_obj(fb)->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -792,7 +792,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
*/
- if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
+ if (state == FBINFO_STATE_RUNNING &&
+ intel_fb_obj(&ifbdev->fb->base)->stolen)
memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 7fff0a0eceb4..c3379bde266f 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -153,8 +153,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
-
- intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 116f4ccf1bbd..560c7406ae40 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -27,6 +27,8 @@
#include "intel_guc_submission.h"
#include "i915_drv.h"
+static void guc_init_ggtt_pin_bias(struct intel_guc *guc);
+
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -73,7 +75,7 @@ void intel_guc_init_early(struct intel_guc *guc)
guc->notify = gen8_guc_raise_irq;
}
-int intel_guc_init_wq(struct intel_guc *guc)
+static int guc_init_wq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -124,7 +126,7 @@ int intel_guc_init_wq(struct intel_guc *guc)
return 0;
}
-void intel_guc_fini_wq(struct intel_guc *guc)
+static void guc_fini_wq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -135,6 +137,28 @@ void intel_guc_fini_wq(struct intel_guc *guc)
destroy_workqueue(guc->log.relay.flush_wq);
}
+int intel_guc_init_misc(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+ int ret;
+
+ guc_init_ggtt_pin_bias(guc);
+
+ ret = guc_init_wq(guc);
+ if (ret)
+ return ret;
+
+ intel_uc_fw_fetch(i915, &guc->fw);
+
+ return 0;
+}
+
+void intel_guc_fini_misc(struct intel_guc *guc)
+{
+ intel_uc_fw_fini(&guc->fw);
+ guc_fini_wq(guc);
+}
+
static int guc_shared_data_create(struct intel_guc *guc)
{
struct i915_vma *vma;
@@ -169,7 +193,7 @@ int intel_guc_init(struct intel_guc *guc)
ret = guc_shared_data_create(guc);
if (ret)
- return ret;
+ goto err_fetch;
GEM_BUG_ON(!guc->shared_data);
ret = intel_guc_log_create(&guc->log);
@@ -190,6 +214,8 @@ err_log:
intel_guc_log_destroy(&guc->log);
err_shared:
guc_shared_data_destroy(guc);
+err_fetch:
+ intel_uc_fw_fini(&guc->fw);
return ret;
}
@@ -201,14 +227,17 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
+ intel_uc_fw_fini(&guc->fw);
}
-static u32 get_log_control_flags(void)
+static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
- u32 level = i915_modparams.guc_log_level;
- u32 flags = 0;
+ u32 level = intel_guc_log_get_level(&guc->log);
+ u32 flags;
+ u32 ads;
- GEM_BUG_ON(level < 0);
+ ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
+ flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
if (!GUC_LOG_LEVEL_IS_ENABLED(level))
flags |= GUC_LOG_DEFAULT_DISABLED;
@@ -222,6 +251,78 @@ static u32 get_log_control_flags(void)
return flags;
}
+static u32 guc_ctl_feature_flags(struct intel_guc *guc)
+{
+ u32 flags = 0;
+
+ flags |= GUC_CTL_VCS2_ENABLED;
+
+ if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
+ flags |= GUC_CTL_KERNEL_SUBMISSIONS;
+ else
+ flags |= GUC_CTL_DISABLE_SCHEDULER;
+
+ return flags;
+}
+
+static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
+{
+ u32 flags = 0;
+
+ if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
+ u32 ctxnum, base;
+
+ base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
+ ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
+ base >>= PAGE_SHIFT;
+ flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
+ (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
+ }
+ return flags;
+}
+
+static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
+{
+ u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
+ u32 flags;
+
+ #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
+ #define UNIT SZ_1M
+ #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
+ #else
+ #define UNIT SZ_4K
+ #define FLAG 0
+ #endif
+
+ BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!DPC_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
+ BUILD_BUG_ON(!ISR_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
+
+ BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
+ (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
+ BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
+ (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
+ BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
+ (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
+
+ flags = GUC_LOG_VALID |
+ GUC_LOG_NOTIFY_ON_HALF_FULL |
+ FLAG |
+ ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
+ ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
+ ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
+ (offset << GUC_LOG_BUF_ADDR_SHIFT);
+
+ #undef UNIT
+ #undef FLAG
+
+ return flags;
+}
+
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
@@ -245,32 +346,13 @@ void intel_guc_init_params(struct intel_guc *guc)
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
- params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
- GUC_CTL_VCS2_ENABLED;
-
- params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
-
- params[GUC_CTL_DEBUG] = get_log_control_flags();
+ params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
+ params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
+ params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
+ params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
- /* If GuC submission is enabled, set up additional parameters here */
- if (USES_GUC_SUBMISSION(dev_priv)) {
- u32 ads = intel_guc_ggtt_offset(guc,
- guc->ads_vma) >> PAGE_SHIFT;
- u32 pgs = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
- u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
-
- params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
- params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
-
- pgs >>= PAGE_SHIFT;
- params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
- (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
-
- params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
-
- /* Unmask this bit to enable the GuC's internal scheduler */
- params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
- }
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
@@ -346,10 +428,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
ret = -EIO;
if (ret) {
- DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;"
- " ret=%d status=0x%08X response=0x%08X\n",
- action[0], ret, status,
- I915_READ(SOFT_SCRATCH(15)));
+ DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
+ action[0], ret, status);
goto out;
}
@@ -386,11 +466,13 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
* could happen that GuC sets the bit for 2nd interrupt but Host
* clears out the bit on handling the 1st interrupt.
*/
+ disable_rpm_wakeref_asserts(dev_priv);
spin_lock(&guc->irq_lock);
val = I915_READ(SOFT_SCRATCH(15));
msg = val & guc->msg_enabled_mask;
I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
spin_unlock(&guc->irq_lock);
+ enable_rpm_wakeref_asserts(dev_priv);
intel_guc_to_host_process_recv_msg(guc, msg);
}
@@ -532,13 +614,13 @@ int intel_guc_resume(struct intel_guc *guc)
*/
/**
- * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
+ * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
* @guc: intel_guc structure.
*
* This function will calculate and initialize the ggtt_pin_bias value based on
* overall WOPCM size and GuC WOPCM size.
*/
-void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc)
+static void guc_init_ggtt_pin_bias(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_i915(guc);
@@ -572,7 +654,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma))
goto err;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index f1265e122d30..4121928a495e 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -151,11 +151,10 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_init_params(struct intel_guc *guc);
-void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc);
-int intel_guc_init_wq(struct intel_guc *guc);
-void intel_guc_fini_wq(struct intel_guc *guc);
+int intel_guc_init_misc(struct intel_guc *guc);
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
+void intel_guc_fini_misc(struct intel_guc *guc);
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 0867ba76d445..1a0f2a39cef9 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -84,12 +84,12 @@
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
-#define GUC_LOG_CRASH_PAGES 1
#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_DPC_PAGES 7
+#define GUC_LOG_CRASH_MASK (0x1 << GUC_LOG_CRASH_SHIFT)
#define GUC_LOG_DPC_SHIFT 6
-#define GUC_LOG_ISR_PAGES 7
+#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT)
#define GUC_LOG_ISR_SHIFT 9
+#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT)
#define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_PAGE_FAULT_CONTROL 5
@@ -532,20 +532,6 @@ enum guc_log_buffer_type {
};
/**
- * DOC: GuC Log buffer Layout
- *
- * Page0 +-------------------------------+
- * | ISR state header (32 bytes) |
- * | DPC state header |
- * | Crash dump state header |
- * Page1 +-------------------------------+
- * | ISR logs |
- * Page9 +-------------------------------+
- * | DPC logs |
- * Page17 +-------------------------------+
- * | Crash Dump logs |
- * +-------------------------------+
- *
* Below state structure is used for coordination of retrieval of GuC firmware
* logs. Separate state is maintained for each log buffer type.
* read_ptr points to the location where i915 read last in log buffer and
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 401e1704d61e..6da61a71d28f 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -215,11 +215,11 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
switch (type) {
case GUC_ISR_LOG_BUFFER:
- return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+ return ISR_BUFFER_SIZE;
case GUC_DPC_LOG_BUFFER:
- return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+ return DPC_BUFFER_SIZE;
case GUC_CRASH_DUMP_LOG_BUFFER:
- return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+ return CRASH_BUFFER_SIZE;
default:
MISSING_CASE(type);
}
@@ -397,7 +397,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
lockdep_assert_held(&log->relay.lock);
/* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = GUC_LOG_SIZE;
+ subbuf_size = log->vma->size;
/*
* Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -452,13 +452,34 @@ int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct i915_vma *vma;
- unsigned long offset;
- u32 flags;
+ u32 guc_log_size;
int ret;
GEM_BUG_ON(log->vma);
- vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
+ /*
+ * GuC Log buffer Layout
+ *
+ * +===============================+ 00B
+ * | Crash dump state header |
+ * +-------------------------------+ 32B
+ * | DPC state header |
+ * +-------------------------------+ 64B
+ * | ISR state header |
+ * +-------------------------------+ 96B
+ * | |
+ * +===============================+ PAGE_SIZE (4KB)
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
+ * | DPC logs |
+ * +===============================+ + DPC_SIZE
+ * | ISR logs |
+ * +===============================+ + ISR_SIZE
+ */
+ guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
+ ISR_BUFFER_SIZE;
+
+ vma = intel_guc_allocate_vma(guc, guc_log_size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -466,20 +487,12 @@ int intel_guc_log_create(struct intel_guc_log *log)
log->vma = vma;
- /* each allocated unit is a page */
- flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
- (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
- (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
- (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
-
- offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT;
- log->flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ log->level = i915_modparams.guc_log_level;
return 0;
err:
- /* logging will be off */
- i915_modparams.guc_log_level = 0;
+ DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
return ret;
}
@@ -488,15 +501,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
i915_vma_unpin_and_release(&log->vma);
}
-int intel_guc_log_level_get(struct intel_guc_log *log)
-{
- GEM_BUG_ON(!log->vma);
- GEM_BUG_ON(i915_modparams.guc_log_level < 0);
-
- return i915_modparams.guc_log_level;
-}
-
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -504,33 +509,32 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
GEM_BUG_ON(!log->vma);
- GEM_BUG_ON(i915_modparams.guc_log_level < 0);
/*
* GuC is recognizing log levels starting from 0 to max, we're using 0
* as indication that logging should be disabled.
*/
- if (val < GUC_LOG_LEVEL_DISABLED || val > GUC_LOG_LEVEL_MAX)
+ if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
mutex_lock(&dev_priv->drm.struct_mutex);
- if (i915_modparams.guc_log_level == val) {
+ if (log->level == level) {
ret = 0;
goto out_unlock;
}
intel_runtime_pm_get(dev_priv);
- ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(val),
- GUC_LOG_LEVEL_IS_ENABLED(val),
- GUC_LOG_LEVEL_TO_VERBOSITY(val));
+ ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
+ GUC_LOG_LEVEL_IS_ENABLED(level),
+ GUC_LOG_LEVEL_TO_VERBOSITY(level));
intel_runtime_pm_put(dev_priv);
if (ret) {
DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
goto out_unlock;
}
- i915_modparams.guc_log_level = val;
+ log->level = level;
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index fa80535a6f9d..7bc763f10c03 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -30,15 +30,19 @@
#include <linux/workqueue.h>
#include "intel_guc_fwif.h"
+#include "i915_gem.h"
struct intel_guc;
-/*
- * The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap
- */
-#define GUC_LOG_SIZE ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \
- 1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT)
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define CRASH_BUFFER_SIZE SZ_2M
+#define DPC_BUFFER_SIZE SZ_8M
+#define ISR_BUFFER_SIZE SZ_8M
+#else
+#define CRASH_BUFFER_SIZE SZ_8K
+#define DPC_BUFFER_SIZE SZ_32K
+#define ISR_BUFFER_SIZE SZ_32K
+#endif
/*
* While we're using plain log level in i915, GuC controls are much more...
@@ -58,7 +62,7 @@ struct intel_guc;
#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
struct intel_guc_log {
- u32 flags;
+ u32 level;
struct i915_vma *vma;
struct {
void *buf_addr;
@@ -80,8 +84,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log);
int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
-int intel_guc_log_level_get(struct intel_guc_log *log);
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val);
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
int intel_guc_log_relay_open(struct intel_guc_log *log);
void intel_guc_log_relay_flush(struct intel_guc_log *log);
@@ -89,4 +92,9 @@ void intel_guc_log_relay_close(struct intel_guc_log *log);
void intel_guc_log_handle_flush_event(struct intel_guc_log *log);
+static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
+{
+ return log->level;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 2feb65096966..4aa5e6463e7b 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -513,8 +513,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
struct intel_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
- engine));
+ u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
spin_lock(&client->wq_lock);
@@ -537,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/
static void flush_ggtt_writes(struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
+ struct drm_i915_private *dev_priv = vma->vm->i915;
if (i915_vma_is_map_and_fenceable(vma))
POSTING_READ_FW(GUC_STATUS);
@@ -552,8 +551,8 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
- engine));
+ u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
+ engine)->lrc_desc);
u32 data[7];
/*
@@ -623,6 +622,22 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
}
+static void complete_preempt_context(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+ if (inject_preempt_hang(execlists))
+ return;
+
+ execlists_cancel_port_requests(execlists);
+ execlists_unwind_incomplete_requests(execlists);
+
+ wait_for_guc_preempt_report(engine);
+ intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+}
+
/**
* guc_submit() - Submit commands through GuC
* @engine: engine associated with the commands
@@ -681,9 +696,6 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
lockdep_assert_held(&engine->timeline.lock);
- rb = execlists->first;
- GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-
if (port_isset(port)) {
if (intel_engine_has_preemption(engine)) {
struct guc_preempt_work *preempt_work =
@@ -705,12 +717,12 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
}
GEM_BUG_ON(port_isset(port));
- while (rb) {
+ while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
- if (last && rq->ctx != last->ctx) {
+ if (last && rq->hw_context != last->hw_context) {
if (port == last_port) {
__list_del_many(&p->requests,
&rq->sched.link);
@@ -730,15 +742,13 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
submit = true;
}
- rb = rb_next(rb);
- rb_erase(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
done:
execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
- execlists->first = rb;
if (submit)
port_assign(port, last);
if (last)
@@ -747,7 +757,8 @@ done:
/* We must always keep the beast fed if we have work piled up */
GEM_BUG_ON(port_isset(execlists->port) &&
!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
- GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
+ !port_isset(execlists->port));
return submit;
}
@@ -793,20 +804,44 @@ static void guc_submission_tasklet(unsigned long data)
if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
- GUC_PREEMPT_FINISHED) {
- execlists_cancel_port_requests(&engine->execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- wait_for_guc_preempt_report(engine);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
- intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
- }
+ GUC_PREEMPT_FINISHED)
+ complete_preempt_context(engine);
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
}
+static struct i915_request *
+guc_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+
+ /*
+ * We're using worker to queue preemption requests from the tasklet in
+ * GuC submission mode.
+ * Even though tasklet was disabled, we may still have a worker queued.
+ * Let's make sure that all workers scheduled before disabling the
+ * tasklet are completed before continuing with the reset.
+ */
+ if (engine->i915->guc.preempt_wq)
+ flush_workqueue(engine->i915->guc.preempt_wq);
+
+ return i915_gem_find_active_request(engine);
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -876,8 +911,12 @@ static void guc_clients_doorbell_fini(struct intel_guc *guc)
__update_doorbell_desc(guc->preempt_client,
GUC_DOORBELL_INVALID);
}
- __destroy_doorbell(guc->execbuf_client);
- __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
+
+ if (guc->execbuf_client) {
+ __destroy_doorbell(guc->execbuf_client);
+ __update_doorbell_desc(guc->execbuf_client,
+ GUC_DOORBELL_INVALID);
+ }
}
/**
@@ -1090,7 +1129,8 @@ static void guc_clients_destroy(struct intel_guc *guc)
guc_client_free(client);
client = fetch_and_zero(&guc->execbuf_client);
- guc_client_free(client);
+ if (client)
+ guc_client_free(client);
}
/*
@@ -1119,7 +1159,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
WARN_ON(!guc_verify_doorbells(guc));
ret = guc_clients_create(guc);
if (ret)
- return ret;
+ goto err_pool;
for_each_engine(engine, dev_priv, id) {
guc->preempt_work[id].engine = engine;
@@ -1128,6 +1168,9 @@ int intel_guc_submission_init(struct intel_guc *guc)
return 0;
+err_pool:
+ guc_stage_desc_pool_destroy(guc);
+ return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1142,7 +1185,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_clients_destroy(guc);
WARN_ON(!guc_verify_doorbells(guc));
- guc_stage_desc_pool_destroy(guc);
+ if (guc->stage_desc_pool)
+ guc_stage_desc_pool_destroy(guc);
}
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
@@ -1225,6 +1269,31 @@ static void guc_submission_unpark(struct intel_engine_cs *engine)
intel_engine_pin_breadcrumbs_irq(engine);
}
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+ /*
+ * We inherit a bunch of functions from execlists that we'd like
+ * to keep using:
+ *
+ * engine->submit_request = execlists_submit_request;
+ * engine->cancel_requests = execlists_cancel_requests;
+ * engine->schedule = execlists_schedule;
+ *
+ * But we need to override the actual submission backend in order
+ * to talk to the GuC.
+ */
+ intel_execlists_set_default_submission(engine);
+
+ engine->execlists.tasklet.func = guc_submission_tasklet;
+
+ engine->park = guc_submission_park;
+ engine->unpark = guc_submission_unpark;
+
+ engine->reset.prepare = guc_reset_prepare;
+
+ engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+}
+
int intel_guc_submission_enable(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1263,14 +1332,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
guc_interrupts_capture(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_execlists * const execlists =
- &engine->execlists;
-
- execlists->tasklet.func = guc_submission_tasklet;
- engine->park = guc_submission_park;
- engine->unpark = guc_submission_unpark;
-
- engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+ engine->set_default_submission = guc_set_default_submission;
+ engine->set_default_submission(engine);
}
return 0;
@@ -1284,9 +1347,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_interrupts_release(dev_priv);
guc_clients_doorbell_fini(guc);
-
- /* Revert back to manual ELSP submission */
- intel_engines_reset_default_submission(dev_priv);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index a2fe7c8d4477..c22b3e18a0f5 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -47,6 +47,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_KABYLAKE(dev_priv))
return true;
+ if (IS_BROXTON(dev_priv))
+ return true;
return false;
}
@@ -90,6 +92,9 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
if (!i915_modparams.enable_gvt) {
DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
return 0;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index d47e346bd49e..2fc7a0dd0df9 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -294,6 +294,7 @@ static void hangcheck_store_sample(struct intel_engine_cs *engine,
engine->hangcheck.seqno = hc->seqno;
engine->hangcheck.action = hc->action;
engine->hangcheck.stalled = hc->stalled;
+ engine->hangcheck.wedged = hc->wedged;
}
static enum intel_engine_hangcheck_action
@@ -368,6 +369,9 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
hc->stalled = time_after(jiffies,
engine->hangcheck.action_timestamp + timeout);
+ hc->wedged = time_after(jiffies,
+ engine->hangcheck.action_timestamp +
+ I915_ENGINE_WEDGED_TIMEOUT);
}
static void hangcheck_declare_hang(struct drm_i915_private *i915,
@@ -409,7 +413,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
gpu_error.hangcheck_work.work);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int hung = 0, stuck = 0;
+ unsigned int hung = 0, stuck = 0, wedged = 0;
if (!i915_modparams.enable_hangcheck)
return;
@@ -440,6 +444,17 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
+
+ if (engine->hangcheck.wedged)
+ wedged |= intel_engine_flag(engine);
+ }
+
+ if (wedged) {
+ dev_err(dev_priv->drm.dev,
+ "GPU recovery timed out,"
+ " cancelling all in-flight rendering.\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(dev_priv);
}
if (hung)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d8cb53ef4351..8363fbd18ee8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -51,7 +51,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t enabled_bits;
+ u32 enabled_bits;
enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
@@ -59,6 +59,15 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
"HDMI port enabled, expecting disabled\n");
}
+static void
+assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+ TRANS_DDI_FUNC_ENABLE,
+ "HDMI transcoder function enabled, expecting disabled\n");
+}
+
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
@@ -144,7 +153,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -199,7 +208,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -259,7 +268,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -317,7 +326,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -376,19 +385,16 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- const uint32_t *data = frame;
+ const u32 *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- i915_reg_t data_reg;
int data_size = type == DP_SDP_VSC ?
VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
int i;
u32 val = I915_READ(ctl_reg);
- data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
-
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -442,7 +448,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+ u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
/* see comment above for the reason for this offset */
@@ -461,7 +467,8 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
@@ -491,6 +498,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
intel_hdmi->rgb_quant_range_selectable,
is_hdmi2_sink);
+ drm_hdmi_avi_infoframe_content_type(&frame.avi,
+ conn_state);
+
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
}
@@ -586,7 +596,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -727,7 +737,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -770,7 +780,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -823,7 +833,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -834,11 +844,11 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
u32 val = I915_READ(reg);
- assert_hdmi_port_disabled(intel_hdmi);
+ assert_hdmi_transcoder_func_disabled(dev_priv,
+ crtc_state->cpu_transcoder);
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@@ -856,7 +866,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
@@ -1161,33 +1171,16 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
+ ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
- tmp = I915_READ(intel_hdmi->hdmi_reg);
-
- if (!(tmp & SDVO_ENABLE))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else if (IS_CHERRYVIEW(dev_priv))
- *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
-
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -1421,8 +1414,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- temp &= ~SDVO_PIPE_B_SELECT;
- temp |= SDVO_ENABLE;
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
@@ -1577,14 +1570,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
/* check if we can do 8bpc */
status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
- /* if we can't do 8bpc we may still be able to do 12bpc */
- if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK && hdmi->has_hdmi_sink && !force_dvi)
- status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true, force_dvi);
+ if (hdmi->has_hdmi_sink && !force_dvi) {
+ /* if we can't do 8bpc we may still be able to do 12bpc */
+ if (status != MODE_OK && !HAS_GMCH_DISPLAY(dev_priv))
+ status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
+ true, force_dvi);
+
+ /* if we can't do 8,12bpc we may still be able to do 10bpc */
+ if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
+ status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
+ true, force_dvi);
+ }
return status;
}
-static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
+static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
+ int bpc)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
@@ -1596,6 +1598,9 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
if (HAS_GMCH_DISPLAY(dev_priv))
return false;
+ if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
+ return false;
+
if (crtc_state->pipe_bpp <= 8*3)
return false;
@@ -1603,7 +1608,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
return false;
/*
- * HDMI 12bpc affects the clocks, so it's only possible
+ * HDMI deep color affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
@@ -1618,16 +1623,24 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
if (crtc_state->ycbcr420) {
const struct drm_hdmi_info *hdmi = &info->hdmi;
- if (!(hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+ if (bpc == 12 && !(hdmi->y420_dc_modes &
+ DRM_EDID_YCBCR420_DC_36))
+ return false;
+ else if (bpc == 10 && !(hdmi->y420_dc_modes &
+ DRM_EDID_YCBCR420_DC_30))
return false;
} else {
- if (!(info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36))
+ if (bpc == 12 && !(info->edid_hdmi_dc_modes &
+ DRM_EDID_HDMI_DC_36))
+ return false;
+ else if (bpc == 10 && !(info->edid_hdmi_dc_modes &
+ DRM_EDID_HDMI_DC_30))
return false;
}
}
/* Display WA #1139: glk */
- if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
+ if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
crtc_state->base.adjusted_mode.htotal > 5460)
return false;
@@ -1637,7 +1650,8 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *config,
- int *clock_12bpc, int *clock_8bpc)
+ int *clock_12bpc, int *clock_10bpc,
+ int *clock_8bpc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
@@ -1649,6 +1663,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
/* YCBCR420 TMDS rate requirement is half the pixel clock */
config->port_clock /= 2;
*clock_12bpc /= 2;
+ *clock_10bpc /= 2;
*clock_8bpc /= 2;
config->ycbcr420 = true;
@@ -1676,6 +1691,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+ int clock_10bpc = clock_8bpc * 5 / 4;
int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
@@ -1702,12 +1718,14 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
pipe_config->pixel_multiplier = 2;
clock_8bpc *= 2;
+ clock_10bpc *= 2;
clock_12bpc *= 2;
}
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
- &clock_12bpc, &clock_8bpc)) {
+ &clock_12bpc, &clock_10bpc,
+ &clock_8bpc)) {
DRM_ERROR("Can't support YCBCR420 output\n");
return false;
}
@@ -1725,18 +1743,25 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
}
/*
- * HDMI is either 12 or 8, so if the display lets 10bpc sneak
- * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
- * outputs. We also need to check that the higher clock still fits
- * within limits.
+ * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
+ * to check that the higher clock still fits within limits.
*/
- if (hdmi_12bpc_possible(pipe_config) &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) {
+ if (hdmi_deep_color_possible(pipe_config, 12) &&
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
+ true, force_dvi) == MODE_OK) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
/* Need to adjust the port link by 1.5x for 12bpc. */
pipe_config->port_clock = clock_12bpc;
+ } else if (hdmi_deep_color_possible(pipe_config, 10) &&
+ hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
+ true, force_dvi) == MODE_OK) {
+ DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
+ desired_bpp = 10 * 3;
+
+ /* Need to adjust the port link by 1.25x for 10bpc. */
+ pipe_config->port_clock = clock_10bpc;
} else {
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
@@ -2071,6 +2096,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
+ drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
@@ -2257,7 +2283,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
- else if (IS_ICELAKE(dev_priv))
+ else if (HAS_PCH_ICP(dev_priv))
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
@@ -2352,7 +2378,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+ if (IS_G45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 43aa92beff2a..648a13c6043c 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -77,37 +77,6 @@
*/
/**
- * intel_hpd_port - return port hard associated with certain pin.
- * @dev_priv: private driver data pointer
- * @pin: the hpd pin to get associated port
- *
- * Return port that is associatade with @pin and PORT_NONE if no port is
- * hard associated with that @pin.
- */
-enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
- enum hpd_pin pin)
-{
- switch (pin) {
- case HPD_PORT_A:
- return PORT_A;
- case HPD_PORT_B:
- return PORT_B;
- case HPD_PORT_C:
- return PORT_C;
- case HPD_PORT_D:
- return PORT_D;
- case HPD_PORT_E:
- if (IS_CNL_WITH_PORT_F(dev_priv))
- return PORT_F;
- return PORT_E;
- case HPD_PORT_F:
- return PORT_F;
- default:
- return PORT_NONE; /* no port for this pin */
- }
-}
-
-/**
* intel_hpd_pin_default - return default pin associated with certain port.
* @dev_priv: private driver data pointer
* @port: the hpd port to get associated pin
@@ -241,25 +210,25 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
- int i;
+ enum hpd_pin pin;
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
- for_each_hpd_pin(i) {
+ for_each_hpd_pin(pin) {
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+ if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
continue;
- dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+ dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_connector *intel_connector = to_intel_connector(connector);
- if (intel_connector->encoder->hpd_pin == i) {
+ if (intel_connector->encoder->hpd_pin == pin) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
connector->name);
@@ -301,13 +270,18 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
return true;
}
+static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
+{
+ return intel_encoder_is_dig_port(encoder) &&
+ enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
+}
+
static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, hotplug.dig_port_work);
u32 long_port_mask, short_port_mask;
- struct intel_digital_port *intel_dig_port;
- int i;
+ struct intel_encoder *encoder;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
@@ -317,27 +291,27 @@ static void i915_digport_work_func(struct work_struct *work)
dev_priv->hotplug.short_port_mask = 0;
spin_unlock_irq(&dev_priv->irq_lock);
- for (i = 0; i < I915_MAX_PORTS; i++) {
- bool valid = false;
- bool long_hpd = false;
- intel_dig_port = dev_priv->hotplug.irq_port[i];
- if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_digital_port *dig_port;
+ enum port port = encoder->port;
+ bool long_hpd, short_hpd;
+ enum irqreturn ret;
+
+ if (!intel_encoder_has_hpd_pulse(encoder))
continue;
- if (long_port_mask & (1 << i)) {
- valid = true;
- long_hpd = true;
- } else if (short_port_mask & (1 << i))
- valid = true;
+ long_hpd = long_port_mask & BIT(port);
+ short_hpd = short_port_mask & BIT(port);
- if (valid) {
- enum irqreturn ret;
+ if (!long_hpd && !short_hpd)
+ continue;
- ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
- if (ret == IRQ_NONE) {
- /* fall back to old school hpd */
- old_bits |= (1 << intel_dig_port->base.hpd_pin);
- }
+ dig_port = enc_to_dig_port(&encoder->base);
+
+ ret = dig_port->hpd_pulse(dig_port, long_hpd);
+ if (ret == IRQ_NONE) {
+ /* fall back to old school hpd */
+ old_bits |= BIT(encoder->hpd_pin);
}
}
@@ -418,26 +392,24 @@ static void i915_hotplug_work_func(struct work_struct *work)
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask)
{
- int i;
- enum port port;
+ struct intel_encoder *encoder;
bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
- bool is_dig_port;
if (!pin_mask)
return;
spin_lock(&dev_priv->irq_lock);
- for_each_hpd_pin(i) {
- if (!(BIT(i) & pin_mask))
- continue;
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ enum hpd_pin pin = encoder->hpd_pin;
+ bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
- port = intel_hpd_pin_to_port(dev_priv, i);
- is_dig_port = port != PORT_NONE &&
- dev_priv->hotplug.irq_port[port];
+ if (!(BIT(pin) & pin_mask))
+ continue;
- if (is_dig_port) {
- bool long_hpd = long_mask & BIT(i);
+ if (has_hpd_pulse) {
+ bool long_hpd = long_mask & BIT(pin);
+ enum port port = encoder->port;
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
long_hpd ? "long" : "short");
@@ -455,7 +427,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
}
}
- if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+ if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
@@ -463,20 +435,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* interrupts on saner platforms.
*/
WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
- "Received HPD interrupt on pin %d although disabled\n", i);
+ "Received HPD interrupt on pin %d although disabled\n", pin);
continue;
}
- if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+ if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
continue;
- if (!is_dig_port) {
- dev_priv->hotplug.event_bits |= BIT(i);
+ if (!has_hpd_pulse) {
+ dev_priv->hotplug.event_bits |= BIT(pin);
queue_hp = true;
}
- if (intel_hpd_irq_storm_detect(dev_priv, i)) {
- dev_priv->hotplug.event_bits &= ~BIT(i);
+ if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
+ dev_priv->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
}
}
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 291285277403..ffcad5fad6a7 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -32,6 +32,14 @@ void intel_huc_init_early(struct intel_huc *huc)
intel_huc_fw_init_early(huc);
}
+int intel_huc_init_misc(struct intel_huc *huc)
+{
+ struct drm_i915_private *i915 = huc_to_i915(huc);
+
+ intel_uc_fw_fetch(i915, &huc->fw);
+ return 0;
+}
+
/**
* intel_huc_auth() - Authenticate HuC uCode
* @huc: intel_huc structure
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index aa854907abac..7e41d870b509 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -36,9 +36,15 @@ struct intel_huc {
};
void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init_misc(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
+static inline void intel_huc_fini_misc(struct intel_huc *huc)
+{
+ intel_uc_fw_fini(&huc->fw);
+}
+
static inline int intel_huc_sanitize(struct intel_huc *huc)
{
intel_uc_fw_sanitize(&huc->fw);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index e6875509bcd9..bef32b7c248e 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -77,12 +77,12 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
};
static const struct gmbus_pin gmbus_pins_icp[] = {
- [GMBUS_PIN_1_BXT] = { "dpa", GPIOA },
- [GMBUS_PIN_2_BXT] = { "dpb", GPIOB },
- [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOC },
- [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOD },
- [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOE },
- [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOF },
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
};
/* pin is expected to be valid */
@@ -361,15 +361,39 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
return ret;
}
+static inline
+unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+ GMBUS_BYTE_COUNT_MAX;
+}
+
static int
gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
unsigned short addr, u8 *buf, unsigned int len,
- u32 gmbus1_index)
+ u32 gmbus0_reg, u32 gmbus1_index)
{
+ unsigned int size = len;
+ bool burst_read = len > gmbus_max_xfer_size(dev_priv);
+ bool extra_byte_added = false;
+
+ if (burst_read) {
+ /*
+ * As per HW Spec, for 512Bytes need to read extra Byte and
+ * Ignore the extra byte read.
+ */
+ if (len == 512) {
+ extra_byte_added = true;
+ len++;
+ }
+ size = len % 256 + 256;
+ I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
+ }
+
I915_WRITE_FW(GMBUS1,
gmbus1_index |
GMBUS_CYCLE_WAIT |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (size << GMBUS_BYTE_COUNT_SHIFT) |
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
@@ -382,17 +406,34 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
val = I915_READ_FW(GMBUS3);
do {
+ if (extra_byte_added && len == 1)
+ break;
+
*buf++ = val & 0xff;
val >>= 8;
} while (--len && ++loop < 4);
+
+ if (burst_read && len == size - 4)
+ /* Reset the override bit */
+ I915_WRITE_FW(GMBUS0, gmbus0_reg);
}
return 0;
}
+/*
+ * HW spec says that 512Bytes in Burst read need special treatment.
+ * But it doesn't talk about other multiple of 256Bytes. And couldn't locate
+ * an I2C slave, which supports such a lengthy burst read too for experiments.
+ *
+ * So until things get clarified on HW support, to avoid the burst read length
+ * in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes.
+ */
+#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U
+
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
- u32 gmbus1_index)
+ u32 gmbus0_reg, u32 gmbus1_index)
{
u8 *buf = msg->buf;
unsigned int rx_size = msg->len;
@@ -400,10 +441,13 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
+ if (HAS_GMBUS_BURST_READ(dev_priv))
+ len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
+ else
+ len = min(rx_size, gmbus_max_xfer_size(dev_priv));
- ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
- buf, len, gmbus1_index);
+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len,
+ gmbus0_reg, gmbus1_index);
if (ret)
return ret;
@@ -462,7 +506,7 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
+ len = min(tx_size, gmbus_max_xfer_size(dev_priv));
ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
gmbus1_index);
@@ -491,7 +535,8 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
}
static int
-gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
+ u32 gmbus0_reg)
{
u32 gmbus1_index = 0;
u32 gmbus5 = 0;
@@ -509,7 +554,8 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
I915_WRITE_FW(GMBUS5, gmbus5);
if (msgs[1].flags & I2C_M_RD)
- ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
+ gmbus1_index);
else
ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
@@ -544,10 +590,12 @@ retry:
for (; i < num; i += inc) {
inc = 1;
if (gmbus_is_index_xfer(msgs, i, num)) {
- ret = gmbus_index_xfer(dev_priv, &msgs[i]);
+ ret = gmbus_index_xfer(dev_priv, &msgs[i],
+ gmbus0_source | bus->reg0);
inc = 2; /* an index transmission is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
- ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+ ret = gmbus_xfer_read(dev_priv, &msgs[i],
+ gmbus0_source | bus->reg0, 0);
} else {
ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
}
@@ -771,7 +819,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (HAS_PCH_NOP(dev_priv))
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 6269750e2b54..cdf19553ffac 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -62,6 +62,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
@@ -126,9 +127,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return platdev;
}
- pm_runtime_forbid(&platdev->dev);
- pm_runtime_set_active(&platdev->dev);
- pm_runtime_enable(&platdev->dev);
+ pm_runtime_no_callbacks(&platdev->dev);
return platdev;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7c4c8fb1dae4..174479232e94 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -137,6 +137,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_render_state.h"
+#include "i915_vgpu.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_workarounds.h"
@@ -164,7 +165,8 @@
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine,
+ struct intel_context *ce);
static void execlists_init_reg_state(u32 *reg_state,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
@@ -189,12 +191,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
!i915_request_completed(last));
}
-/**
- * intel_lr_context_descriptor_update() - calculate & cache the descriptor
- * descriptor for a pinned context
- * @ctx: Context to work on
- * @engine: Engine the descriptor will be used with
- *
+/*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
* expensive to calculate, we'll just do it once and cache the result,
@@ -204,7 +201,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
*
* bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-52: ctx ID, a globally unique tag
+ * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
* bits 53-54: mbz, reserved for use by hardware
* bits 55-63: group ID, currently unused and set to 0
*
@@ -222,9 +219,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
*/
static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -237,6 +234,11 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
/* bits 12-31 */
GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
+ /*
+ * The following 32bits are copied into the OA reports (dword 2).
+ * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
+ * anything below.
+ */
if (INTEL_GEN(ctx->i915) >= 11) {
GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
@@ -271,7 +273,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
find_priolist:
/* most positive priority is scheduled first, equal priorities fifo */
rb = NULL;
- parent = &execlists->queue.rb_node;
+ parent = &execlists->queue.rb_root.rb_node;
while (*parent) {
rb = *parent;
p = to_priolist(rb);
@@ -309,10 +311,7 @@ find_priolist:
p->priority = prio;
INIT_LIST_HEAD(&p->requests);
rb_link_node(&p->node, rb, parent);
- rb_insert_color(&p->node, &execlists->queue);
-
- if (first)
- execlists->first = &p->node;
+ rb_insert_color_cached(&p->node, &execlists->queue, first);
return p;
}
@@ -418,9 +417,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq)
{
- struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
+ struct intel_context *ce = rq->hw_context;
struct i915_hw_ppgtt *ppgtt =
- rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +429,7 @@ static u64 execlists_update_context(struct i915_request *rq)
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
- if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
+ if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
return ce->lrc_desc;
@@ -454,6 +453,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
unsigned int n;
/*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!engine->i915->gt.awake);
+
+ /*
* ELSQ note: the submit queue is not cleared after being submitted
* to the HW so we need to make sure we always clean it up. This is
* currently ensured by the fact that we always write the same number
@@ -495,14 +504,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
-static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
+static bool ctx_single_port_submission(const struct intel_context *ce)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- i915_gem_context_force_single_submission(ctx));
+ i915_gem_context_force_single_submission(ce->gem_context));
}
-static bool can_merge_ctx(const struct i915_gem_context *prev,
- const struct i915_gem_context *next)
+static bool can_merge_ctx(const struct intel_context *prev,
+ const struct intel_context *next)
{
if (prev != next)
return false;
@@ -552,11 +561,24 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
if (execlists->ctrl_reg)
writel(EL_CTRL_LOAD, execlists->ctrl_reg);
- execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
- execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
+ execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+}
+
+static void complete_preempt_context(struct intel_engine_execlists *execlists)
+{
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+ if (inject_preempt_hang(execlists))
+ return;
+
+ execlists_cancel_port_requests(execlists);
+ __unwind_incomplete_requests(container_of(execlists,
+ struct intel_engine_cs,
+ execlists));
}
-static bool __execlists_dequeue(struct intel_engine_cs *engine)
+static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
@@ -566,9 +588,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
struct rb_node *rb;
bool submit = false;
- lockdep_assert_held(&engine->timeline.lock);
-
- /* Hardware submission is through 2 ports. Conceptually each port
+ /*
+ * Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
* static for a context, and unique to each, so we only execute
* requests belonging to a single context from each ring. RING_HEAD
@@ -589,9 +610,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- rb = execlists->first;
- GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-
if (last) {
/*
* Don't resubmit or switch until all outstanding
@@ -602,8 +620,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(!port_count(&port[0]));
- if (port_count(&port[0]) > 1)
- return false;
/*
* If we write to ELSP a second time before the HW has had
@@ -613,11 +629,11 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* the HW to indicate that it has had a chance to respond.
*/
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
- return false;
+ return;
if (need_preempt(engine, last, execlists->queue_priority)) {
inject_preempt_context(engine);
- return false;
+ return;
}
/*
@@ -642,7 +658,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* priorities of the ports haven't been switch.
*/
if (port_count(&port[1]))
- return false;
+ return;
/*
* WaIdleLiteRestore:bdw,skl
@@ -655,7 +671,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
last->tail = last->wa_tail;
}
- while (rb) {
+ while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -671,7 +687,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* second request, and so we never need to tell the
* hardware about the first.
*/
- if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
+ if (last &&
+ !can_merge_ctx(rq->hw_context, last->hw_context)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
@@ -690,14 +707,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* the same context (even though a different
* request) to the second port.
*/
- if (ctx_single_port_submission(last->ctx) ||
- ctx_single_port_submission(rq->ctx)) {
+ if (ctx_single_port_submission(last->hw_context) ||
+ ctx_single_port_submission(rq->hw_context)) {
__list_del_many(&p->requests,
&rq->sched.link);
goto done;
}
- GEM_BUG_ON(last->ctx == rq->ctx);
+ GEM_BUG_ON(last->hw_context == rq->hw_context);
if (submit)
port_assign(port, last);
@@ -713,8 +730,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
submit = true;
}
- rb = rb_next(rb);
- rb_erase(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
@@ -740,35 +756,23 @@ done:
execlists->queue_priority =
port != execlists->port ? rq_prio(last) : INT_MIN;
- execlists->first = rb;
- if (submit)
+ if (submit) {
port_assign(port, last);
+ execlists_submit_ports(engine);
+ }
/* We must always keep the beast fed if we have work piled up */
- GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
+ !port_isset(execlists->port));
/* Re-evaluate the executing context setup after each preemptive kick */
if (last)
execlists_user_begin(execlists, execlists->port);
- return submit;
-}
-
-static void execlists_dequeue(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned long flags;
- bool submit;
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
- submit = __execlists_dequeue(engine);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
- if (submit)
- execlists_submit_ports(engine);
-
- GEM_BUG_ON(port_isset(execlists->port) &&
- !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
+ /* If the engine is now idle, so should be the flag; and vice versa. */
+ GEM_BUG_ON(execlists_is_active(&engine->execlists,
+ EXECLISTS_ACTIVE_USER) ==
+ !port_isset(engine->execlists.port));
}
void
@@ -799,82 +803,27 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
port++;
}
- execlists_user_end(execlists);
+ execlists_clear_all_active(execlists);
}
-static void clear_gtiir(struct intel_engine_cs *engine)
+static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
- struct drm_i915_private *dev_priv = engine->i915;
- int i;
-
/*
- * Clear any pending interrupt state.
- *
- * We do it twice out of paranoia that some of the IIR are
- * double buffered, and so if we only reset it once there may
- * still be an interrupt pending.
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
*/
- if (INTEL_GEN(dev_priv) >= 11) {
- static const struct {
- u8 bank;
- u8 bit;
- } gen11_gtiir[] = {
- [RCS] = {0, GEN11_RCS0},
- [BCS] = {0, GEN11_BCS},
- [_VCS(0)] = {1, GEN11_VCS(0)},
- [_VCS(1)] = {1, GEN11_VCS(1)},
- [_VCS(2)] = {1, GEN11_VCS(2)},
- [_VCS(3)] = {1, GEN11_VCS(3)},
- [_VECS(0)] = {1, GEN11_VECS(0)},
- [_VECS(1)] = {1, GEN11_VECS(1)},
- };
- unsigned long irqflags;
-
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir));
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- for (i = 0; i < 2; i++) {
- gen11_reset_one_iir(dev_priv,
- gen11_gtiir[engine->id].bank,
- gen11_gtiir[engine->id].bit);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- } else {
- static const u8 gtiir[] = {
- [RCS] = 0,
- [BCS] = 0,
- [VCS] = 1,
- [VCS2] = 1,
- [VECS] = 3,
- };
-
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
-
- for (i = 0; i < 2; i++) {
- I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
- engine->irq_keep_mask);
- POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
- }
- GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
- engine->irq_keep_mask);
- }
+ execlists->csb_head = execlists->csb_write_reset;
+ WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
}
-static void reset_irq(struct intel_engine_cs *engine)
+static void nop_submission_tasklet(unsigned long data)
{
- /* Mark all CS interrupts as complete */
- smp_store_mb(engine->execlists.active, 0);
- synchronize_hardirq(engine->i915->drm.irq);
-
- clear_gtiir(engine);
-
- /*
- * The port is checked prior to scheduling a tasklet, but
- * just in case we have suspended the tasklet to do the
- * wedging make sure that when it wakes, it decides there
- * is no work to do by clearing the irq_posted bit.
- */
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ /* The driver is wedged; don't process any more events. */
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -901,13 +850,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
- local_irq_save(flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists);
- reset_irq(engine);
-
- spin_lock(&engine->timeline.lock);
+ execlists_user_end(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
@@ -917,8 +864,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
}
/* Flush the queued requests to the timeline list (for retiring). */
- rb = execlists->first;
- while (rb) {
+ while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
@@ -928,8 +874,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
__i915_request_submit(rq);
}
- rb = rb_next(rb);
- rb_erase(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
@@ -938,221 +883,198 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority = INT_MIN;
- execlists->queue = RB_ROOT;
- execlists->first = NULL;
+ execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(port_isset(execlists->port));
- spin_unlock(&engine->timeline.lock);
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+static inline bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
+static void process_csb(struct intel_engine_cs *engine)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- struct drm_i915_private *dev_priv = engine->i915;
- bool fw = false;
+ const u32 * const buf = execlists->csb_status;
+ u8 head, tail;
/*
- * We can skip acquiring intel_runtime_pm_get() here as it was taken
- * on our behalf by the request (see i915_gem_mark_busy()) and it will
- * not be relinquished until the device is idle (see
- * i915_gem_idle_work_handler()). As a precaution, we make sure
- * that all ELSP are drained i.e. we have processed the CSB,
- * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ * Note that csb_write, csb_status may be either in HWSP or mmio.
+ * When reading from the csb_write mmio register, we have to be
+ * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+ * the low 4bits. As it happens we know the next 4bits are always
+ * zero and so we can simply masked off the low u8 of the register
+ * and treat it identically to reading from the HWSP (without having
+ * to use explicit shifting and masking, and probably bifurcating
+ * the code to handle the legacy mmio read).
*/
- GEM_BUG_ON(!dev_priv->gt.awake);
+ head = execlists->csb_head;
+ tail = READ_ONCE(*execlists->csb_write);
+ GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+ if (unlikely(head == tail))
+ return;
/*
- * Prefer doing test_and_clear_bit() as a two stage operation to avoid
- * imposing the cost of a locked atomic transaction when submitting a
- * new request (outside of the context-switch interrupt).
+ * Hopefully paired with a wmb() in HW!
+ *
+ * We must complete the read of the write pointer before any reads
+ * from the CSB, so that we do not see stale values. Without an rmb
+ * (lfence) the HW may speculatively perform the CSB[] reads *before*
+ * we perform the READ_ONCE(*csb_write).
*/
- while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
- /* The HWSP contains a (cacheable) mirror of the CSB */
- const u32 *buf =
- &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
- unsigned int head, tail;
+ rmb();
- if (unlikely(execlists->csb_use_mmio)) {
- buf = (u32 * __force)
- (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
- execlists->csb_head = -1; /* force mmio read of CSB ptrs */
- }
+ do {
+ struct i915_request *rq;
+ unsigned int status;
+ unsigned int count;
- /* Clear before reading to catch new interrupts */
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- smp_mb__after_atomic();
+ if (++head == GEN8_CSB_ENTRIES)
+ head = 0;
- if (unlikely(execlists->csb_head == -1)) { /* following a reset */
- if (!fw) {
- intel_uncore_forcewake_get(dev_priv,
- execlists->fw_domains);
- fw = true;
- }
+ /*
+ * We are flying near dragons again.
+ *
+ * We hold a reference to the request in execlist_port[]
+ * but no more than that. We are operating in softirq
+ * context and so cannot hold any mutex or sleep. That
+ * prevents us stopping the requests we are processing
+ * in port[] from being retired simultaneously (the
+ * breadcrumb will be complete before we see the
+ * context-switch). As we only hold the reference to the
+ * request, any pointer chasing underneath the request
+ * is subject to a potential use-after-free. Thus we
+ * store all of the bookkeeping within port[] as
+ * required, and avoid using unguarded pointers beneath
+ * request itself. The same applies to the atomic
+ * status notifier.
+ */
- head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
- tail = GEN8_CSB_WRITE_PTR(head);
- head = GEN8_CSB_READ_PTR(head);
- execlists->csb_head = head;
- } else {
- const int write_idx =
- intel_hws_csb_write_index(dev_priv) -
- I915_HWS_CSB_BUF0_INDEX;
+ GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
+ engine->name, head,
+ buf[2 * head + 0], buf[2 * head + 1],
+ execlists->active);
+
+ status = buf[2 * head];
+ if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
+ GEN8_CTX_STATUS_PREEMPTED))
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+ if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+
+ if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
+ continue;
+
+ /* We should never get a COMPLETED | IDLE_ACTIVE! */
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
- head = execlists->csb_head;
- tail = READ_ONCE(buf[write_idx]);
- rmb(); /* Hopefully paired with a wmb() in HW */
+ if (status & GEN8_CTX_STATUS_COMPLETE &&
+ buf[2*head + 1] == execlists->preempt_complete_status) {
+ GEM_TRACE("%s preempt-idle\n", engine->name);
+ complete_preempt_context(execlists);
+ continue;
}
- GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
- engine->name,
- head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
- tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
- while (head != tail) {
- struct i915_request *rq;
- unsigned int status;
- unsigned int count;
+ if (status & GEN8_CTX_STATUS_PREEMPTED &&
+ execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT))
+ continue;
- if (++head == GEN8_CSB_ENTRIES)
- head = 0;
+ GEM_BUG_ON(!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_USER));
- /* We are flying near dragons again.
- *
- * We hold a reference to the request in execlist_port[]
- * but no more than that. We are operating in softirq
- * context and so cannot hold any mutex or sleep. That
- * prevents us stopping the requests we are processing
- * in port[] from being retired simultaneously (the
- * breadcrumb will be complete before we see the
- * context-switch). As we only hold the reference to the
- * request, any pointer chasing underneath the request
- * is subject to a potential use-after-free. Thus we
- * store all of the bookkeeping within port[] as
- * required, and avoid using unguarded pointers beneath
- * request itself. The same applies to the atomic
- * status notifier.
+ rq = port_unpack(port, &count);
+ GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
+ engine->name,
+ port->context_id, count,
+ rq ? rq->global_seqno : 0,
+ rq ? rq->fence.context : 0,
+ rq ? rq->fence.seqno : 0,
+ intel_engine_get_seqno(engine),
+ rq ? rq_prio(rq) : 0);
+
+ /* Check the context/desc id for this event matches */
+ GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+
+ GEM_BUG_ON(count == 0);
+ if (--count == 0) {
+ /*
+ * On the final event corresponding to the
+ * submission of this context, we expect either
+ * an element-switch event or a completion
+ * event (and on completion, the active-idle
+ * marker). No more preemptions, lite-restore
+ * or otherwise.
*/
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+ GEM_BUG_ON(port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
+ GEM_BUG_ON(!port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
- status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
- GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
- engine->name, head,
- status, buf[2*head + 1],
- execlists->active);
-
- if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
- GEN8_CTX_STATUS_PREEMPTED))
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
- if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
-
- if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
- continue;
-
- /* We should never get a COMPLETED | IDLE_ACTIVE! */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
-
- if (status & GEN8_CTX_STATUS_COMPLETE &&
- buf[2*head + 1] == execlists->preempt_complete_status) {
- GEM_TRACE("%s preempt-idle\n", engine->name);
-
- execlists_cancel_port_requests(execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT));
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT);
- continue;
- }
-
- if (status & GEN8_CTX_STATUS_PREEMPTED &&
- execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT))
- continue;
+ /*
+ * We rely on the hardware being strongly
+ * ordered, that the breadcrumb write is
+ * coherent (visible from the CPU) before the
+ * user interrupt and CSB is processed.
+ */
+ GEM_BUG_ON(!i915_request_completed(rq));
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_USER));
+ execlists_context_schedule_out(rq,
+ INTEL_CONTEXT_SCHEDULE_OUT);
+ i915_request_put(rq);
- rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
- engine->name,
- port->context_id, count,
- rq ? rq->global_seqno : 0,
- rq ? rq->fence.context : 0,
- rq ? rq->fence.seqno : 0,
- intel_engine_get_seqno(engine),
- rq ? rq_prio(rq) : 0);
+ GEM_TRACE("%s completed ctx=%d\n",
+ engine->name, port->context_id);
- /* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
-
- GEM_BUG_ON(count == 0);
- if (--count == 0) {
- /*
- * On the final event corresponding to the
- * submission of this context, we expect either
- * an element-switch event or a completion
- * event (and on completion, the active-idle
- * marker). No more preemptions, lite-restore
- * or otherwise.
- */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
- GEM_BUG_ON(port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
- GEM_BUG_ON(!port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
-
- /*
- * We rely on the hardware being strongly
- * ordered, that the breadcrumb write is
- * coherent (visible from the CPU) before the
- * user interrupt and CSB is processed.
- */
- GEM_BUG_ON(!i915_request_completed(rq));
-
- execlists_context_schedule_out(rq,
- INTEL_CONTEXT_SCHEDULE_OUT);
- i915_request_put(rq);
-
- GEM_TRACE("%s completed ctx=%d\n",
- engine->name, port->context_id);
-
- port = execlists_port_complete(execlists, port);
- if (port_isset(port))
- execlists_user_begin(execlists, port);
- else
- execlists_user_end(execlists);
- } else {
- port_set(port, port_pack(rq, count));
- }
+ port = execlists_port_complete(execlists, port);
+ if (port_isset(port))
+ execlists_user_begin(execlists, port);
+ else
+ execlists_user_end(execlists);
+ } else {
+ port_set(port, port_pack(rq, count));
}
+ } while (head != tail);
- if (head != execlists->csb_head) {
- execlists->csb_head = head;
- writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
- dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
- }
- }
+ execlists->csb_head = head;
+}
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
+static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
+{
+ lockdep_assert_held(&engine->timeline.lock);
+
+ process_csb(engine);
+ if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
execlists_dequeue(engine);
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ unsigned long flags;
- if (fw)
- intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+ GEM_TRACE("%s awake?=%d, active=%x\n",
+ engine->name,
+ engine->i915->gt.awake,
+ engine->execlists.active);
- /* If the engine is now idle, so should be the flag; and vice versa. */
- GEM_BUG_ON(execlists_is_active(&engine->execlists,
- EXECLISTS_ACTIVE_USER) ==
- !port_isset(engine->execlists.port));
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ __execlists_submission_tasklet(engine);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static void queue_request(struct intel_engine_cs *engine,
@@ -1163,16 +1085,30 @@ static void queue_request(struct intel_engine_cs *engine,
&lookup_priolist(engine, prio)->requests);
}
-static void __submit_queue(struct intel_engine_cs *engine, int prio)
+static void __update_queue(struct intel_engine_cs *engine, int prio)
{
engine->execlists.queue_priority = prio;
- tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
+static void __submit_queue_imm(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ if (reset_in_progress(execlists))
+ return; /* defer until we restart the engine following reset */
+
+ if (execlists->tasklet.func == execlists_submission_tasklet)
+ __execlists_submission_tasklet(engine);
+ else
+ tasklet_hi_schedule(&execlists->tasklet);
}
static void submit_queue(struct intel_engine_cs *engine, int prio)
{
- if (prio > engine->execlists.queue_priority)
- __submit_queue(engine, prio);
+ if (prio > engine->execlists.queue_priority) {
+ __update_queue(engine, prio);
+ __submit_queue_imm(engine);
+ }
}
static void execlists_submit_request(struct i915_request *request)
@@ -1184,11 +1120,12 @@ static void execlists_submit_request(struct i915_request *request)
spin_lock_irqsave(&engine->timeline.lock, flags);
queue_request(engine, &request->sched, rq_prio(request));
- submit_queue(engine, rq_prio(request));
- GEM_BUG_ON(!engine->execlists.first);
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(list_empty(&request->sched.link));
+ submit_queue(engine, rq_prio(request));
+
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
@@ -1315,13 +1252,40 @@ static void execlists_schedule(struct i915_request *request,
}
if (prio > engine->execlists.queue_priority &&
- i915_sw_fence_done(&sched_to_request(node)->submit))
- __submit_queue(engine, prio);
+ i915_sw_fence_done(&sched_to_request(node)->submit)) {
+ /* defer submission until after all of our updates */
+ __update_queue(engine, prio);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
}
spin_unlock_irq(&engine->timeline.lock);
}
+static void execlists_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(ce->pin_count);
+
+ if (!ce->state)
+ return;
+
+ intel_ring_free(ce->ring);
+
+ GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+ i915_gem_object_put(ce->state->obj);
+}
+
+static void execlists_context_unpin(struct intel_context *ce)
+{
+ intel_ring_unpin(ce->ring);
+
+ ce->state->obj->pin_global--;
+ i915_gem_object_unpin_map(ce->state->obj);
+ i915_vma_unpin(ce->state);
+
+ i915_gem_context_put(ce->gem_context);
+}
+
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
{
unsigned int flags;
@@ -1345,21 +1309,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
}
-static struct intel_ring *
-execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_context *
+__execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
void *vaddr;
int ret;
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- goto out;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
- ret = execlists_context_deferred_alloc(ctx, engine);
+ ret = execlists_context_deferred_alloc(ctx, engine, ce);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
@@ -1378,17 +1336,17 @@ execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto unpin_map;
- intel_lr_context_descriptor_update(ctx, engine);
+ intel_lr_context_descriptor_update(ctx, engine, ce);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
+ GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
-out:
- return ce->ring;
+ return ce;
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
@@ -1399,33 +1357,33 @@ err:
return ERR_PTR(ret);
}
-static void execlists_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops execlists_context_ops = {
+ .unpin = execlists_context_unpin,
+ .destroy = execlists_context_destroy,
+};
+
+static struct intel_context *
+execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(ce->pin_count == 0);
- if (--ce->pin_count)
- return;
-
- intel_ring_unpin(ce->ring);
+ if (likely(ce->pin_count++))
+ return ce;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- ce->state->obj->pin_global--;
- i915_gem_object_unpin_map(ce->state->obj);
- i915_vma_unpin(ce->state);
+ ce->ops = &execlists_context_ops;
- i915_gem_context_put(ctx);
+ return __execlists_context_pin(engine, ctx, ce);
}
static int execlists_request_alloc(struct i915_request *request)
{
- struct intel_context *ce =
- to_intel_context(request->ctx, request->engine);
int ret;
- GEM_BUG_ON(!ce->pin_count);
+ GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
@@ -1538,29 +1496,56 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+struct lri {
+ i915_reg_t reg;
+ u32 value;
+};
+
+static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
{
- *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ GEM_BUG_ON(!count || count > 63);
- /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
- batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ *batch++ = MI_LOAD_REGISTER_IMM(count);
+ do {
+ *batch++ = i915_mmio_reg_offset(lri->reg);
+ *batch++ = lri->value;
+ } while (lri++, --count);
+ *batch++ = MI_NOOP;
- *batch++ = MI_LOAD_REGISTER_IMM(3);
+ return batch;
+}
- /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
- *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
- *batch++ = _MASKED_BIT_DISABLE(
- GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+{
+ static const struct lri lri[] = {
+ /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
+ {
+ COMMON_SLICE_CHICKEN2,
+ __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
+ 0),
+ },
+
+ /* BSpec: 11391 */
+ {
+ FF_SLICE_CHICKEN,
+ __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
+ FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+ },
+
+ /* BSpec: 11299 */
+ {
+ _3D_CHICKEN3,
+ __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
+ _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+ }
+ };
- /* BSpec: 11391 */
- *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
- *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- /* BSpec: 11299 */
- *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
- *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
+ batch = gen8_emit_flush_coherentl3_wa(engine, batch);
- *batch++ = MI_NOOP;
+ batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaClearSlmSpaceAtContextSwitch:kbl */
/* Actual scratch location is at 128 bytes offset */
@@ -1652,7 +1637,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1767,17 +1752,29 @@ static void enable_execlists(struct intel_engine_cs *engine)
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ I915_WRITE(RING_MI_MODE(engine->mmio_base),
+ _MASKED_BIT_DISABLE(STOP_RING));
+
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
engine->status_page.ggtt_offset);
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
- /* Following the reset, we need to reload the CSB read/write pointers */
- engine->execlists.csb_head = -1;
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ bool unexpected = false;
+
+ if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
+ DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+ unexpected = true;
+ }
+
+ return unexpected;
}
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
int ret;
ret = intel_mocs_init_engine(engine);
@@ -1785,13 +1782,14 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
return ret;
intel_engine_reset_breadcrumbs(engine);
- intel_engine_init_hangcheck(engine);
- enable_execlists(engine);
+ if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
- /* After a GPU reset, we may have requests to replay */
- if (execlists->first)
- tasklet_schedule(&execlists->tasklet);
+ intel_engine_dump(engine, &p, NULL);
+ }
+
+ enable_execlists(engine);
return 0;
}
@@ -1833,8 +1831,69 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
return 0;
}
-static void reset_common_ring(struct intel_engine_cs *engine,
- struct i915_request *request)
+static struct i915_request *
+execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *request, *active;
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /*
+ * We want to flush the pending context switches, having disabled
+ * the tasklet above, we can assume exclusive access to the execlists.
+ * For this allows us to catch up with an inflight preemption event,
+ * and avoid blaming an innocent request if the stall was due to the
+ * preemption itself.
+ */
+ process_csb(engine);
+
+ /*
+ * The last active request can then be no later than the last request
+ * now in ELSP[0]. So search backwards from there, so that if the GPU
+ * has advanced beyond the last CSB update, it will be pardoned.
+ */
+ active = NULL;
+ request = port_request(execlists->port);
+ if (request) {
+ /*
+ * Prevent the breadcrumb from advancing before we decide
+ * which request is currently active.
+ */
+ intel_engine_stop_cs(engine);
+
+ list_for_each_entry_from_reverse(request,
+ &engine->timeline.requests,
+ link) {
+ if (__i915_request_completed(request,
+ request->global_seqno))
+ break;
+
+ active = request;
+ }
+ }
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ return active;
+}
+
+static void execlists_reset(struct intel_engine_cs *engine,
+ struct i915_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
@@ -1844,8 +1903,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
engine->name, request ? request->global_seqno : 0,
intel_engine_get_seqno(engine));
- /* See execlists_cancel_requests() for the irq/spinlock split. */
- local_irq_save(flags);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
/*
* Catch up with any missed context-switch interrupts.
@@ -1857,14 +1915,14 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* requests were completed.
*/
execlists_cancel_port_requests(execlists);
- reset_irq(engine);
/* Push back any incomplete requests for replay after the reset. */
- spin_lock(&engine->timeline.lock);
__unwind_incomplete_requests(engine);
- spin_unlock(&engine->timeline.lock);
- local_irq_restore(flags);
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(&engine->execlists);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
/*
* If the request was innocent, we leave the request in the ELSP
@@ -1888,35 +1946,52 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
- if (engine->default_state) {
- void *defaults;
-
- defaults = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (!IS_ERR(defaults)) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- defaults + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- i915_gem_object_unpin_map(engine->default_state);
- }
+ regs = request->hw_context->lrc_reg_state;
+ if (engine->pinned_default_state) {
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
}
- execlists_init_reg_state(regs, request->ctx, engine, request->ring);
+ execlists_init_reg_state(regs,
+ request->gem_context, engine, request->ring);
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
- regs[CTX_RING_HEAD + 1] = request->postfix;
- request->ring->head = request->postfix;
+ request->ring->head = intel_ring_wrap(request->ring, request->postfix);
+ regs[CTX_RING_HEAD + 1] = request->ring->head;
+
intel_ring_update_space(request->ring);
/* Reset WaIdleLiteRestore:bdw,skl as well */
unwind_wa_tail(request);
}
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ /* After a GPU reset, we may have requests to replay */
+ if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
+ tasklet_schedule(&execlists->tasklet);
+
+ /*
+ * Flush the tasklet while we still have the forcewake to be sure
+ * that it is not allowed to sleep before we restart and reload a
+ * context.
+ *
+ * As before (with execlists_reset_prepare) we rely on the caller
+ * serialising multiple attempts to reset so that we know that we
+ * are the only one manipulating tasklet state.
+ */
+ __tasklet_enable_sync_once(&execlists->tasklet);
+
+ GEM_TRACE("%s\n", engine->name);
+}
+
static int intel_logical_ring_emit_pdps(struct i915_request *rq)
{
- struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_engine_cs *engine = rq->engine;
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
u32 *cs;
@@ -1955,15 +2030,15 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (rq->ctx->ppgtt &&
- (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
- !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+ if (rq->gem_context->ppgtt &&
+ (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+ !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
!intel_vgpu_active(rq->i915)) {
ret = intel_logical_ring_emit_pdps(rq);
if (ret)
return ret;
- rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
+ rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
}
cs = intel_ring_begin(rq, 6);
@@ -2217,13 +2292,15 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
kfree(engine);
}
-static void execlists_set_default_submission(struct intel_engine_cs *engine)
+void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests;
engine->schedule = execlists_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
+ engine->reset.prepare = execlists_reset_prepare;
+
engine->park = NULL;
engine->unpark = NULL;
@@ -2243,18 +2320,19 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
- engine->reset_hw = reset_common_ring;
- engine->context_pin = execlists_context_pin;
- engine->context_unpin = execlists_context_unpin;
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.reset = execlists_reset;
+ engine->reset.finish = execlists_reset_finish;
+ engine->context_pin = execlists_context_pin;
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
engine->emit_breadcrumb = gen8_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
- engine->set_default_submission = execlists_set_default_submission;
+ engine->set_default_submission = intel_execlists_set_default_submission;
if (INTEL_GEN(engine->i915) < 11) {
engine->irq_enable = gen8_logical_ring_enable_irq;
@@ -2294,28 +2372,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
static void
logical_ring_setup(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- enum forcewake_domains fw_domains;
-
intel_engine_setup_common(engine);
/* Intentionally left blank. */
engine->buffer = NULL;
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
- RING_ELSP(engine),
- FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_PTR(engine),
- FW_REG_READ | FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_BUF_BASE(engine),
- FW_REG_READ);
-
- engine->execlists.fw_domains = fw_domains;
-
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
@@ -2323,33 +2384,61 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_irqs(engine);
}
+static bool csb_force_mmio(struct drm_i915_private *i915)
+{
+ /* Older GVT emulation depends upon intercepting CSB mmio */
+ return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
+}
+
static int logical_ring_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
int ret;
ret = intel_engine_init_common(engine);
if (ret)
goto error;
- if (HAS_LOGICAL_RING_ELSQ(engine->i915)) {
- engine->execlists.submit_reg = engine->i915->regs +
+ if (HAS_LOGICAL_RING_ELSQ(i915)) {
+ execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
- engine->execlists.ctrl_reg = engine->i915->regs +
+ execlists->ctrl_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
} else {
- engine->execlists.submit_reg = engine->i915->regs +
+ execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_ELSP(engine));
}
- engine->execlists.preempt_complete_status = ~0u;
- if (engine->i915->preempt_context) {
+ execlists->preempt_complete_status = ~0u;
+ if (i915->preempt_context) {
struct intel_context *ce =
- to_intel_context(engine->i915->preempt_context, engine);
+ to_intel_context(i915->preempt_context, engine);
- engine->execlists.preempt_complete_status =
+ execlists->preempt_complete_status =
upper_32_bits(ce->lrc_desc);
}
+ execlists->csb_read =
+ i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
+ if (csb_force_mmio(i915)) {
+ execlists->csb_status = (u32 __force *)
+ (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+
+ execlists->csb_write = (u32 __force *)execlists->csb_read;
+ execlists->csb_write_reset =
+ _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
+ GEN8_CSB_ENTRIES - 1);
+ } else {
+ execlists->csb_status =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+
+ execlists->csb_write =
+ &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
+ execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
+ }
+ reset_csb_pointers(execlists);
+
return 0;
error:
@@ -2482,7 +2571,7 @@ static void execlists_init_reg_state(u32 *regs,
struct drm_i915_private *dev_priv = engine->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
u32 base = engine->mmio_base;
- bool rcs = engine->id == RCS;
+ bool rcs = engine->class == RENDER_CLASS;
/* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2550,7 +2639,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
- if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
@@ -2629,10 +2718,10 @@ err_unpin_ctx:
}
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
{
struct drm_i915_gem_object *ctx_obj;
- struct intel_context *ce = to_intel_context(ctx, engine);
struct i915_vma *vma;
uint32_t context_size;
struct intel_ring *ring;
@@ -2654,7 +2743,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4ec7d8dd13c8..4dfb78e3ec7e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -104,11 +104,6 @@ struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
-static inline uint64_t
-intel_lr_context_descriptor(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- return to_intel_context(ctx, engine)->lrc_desc;
-}
+void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 8ae8f42f430a..5dae16ccd9f1 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -116,7 +116,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
{
- uint8_t rev;
+ u8 rev;
if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
&rev) != 1) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 48f618dc9abb..f9f3b0885ba5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -44,8 +44,6 @@
/* Private structure for the integrated LVDS support */
struct intel_lvds_connector {
struct intel_connector base;
-
- struct notifier_block lid_notifier;
};
struct intel_lvds_pps {
@@ -85,34 +83,35 @@ static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *conn
return container_of(connector, struct intel_lvds_connector, base.base);
}
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(lvds_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+ else
+ *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+
+ return val & LVDS_PORT_EN;
+}
+
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- u32 tmp;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
- ret = false;
-
- tmp = I915_READ(lvds_encoder->reg);
-
- if (!(tmp & LVDS_PORT_EN))
- goto out;
-
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- ret = true;
+ ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
-out:
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
@@ -255,14 +254,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
+ temp &= ~LVDS_PIPE_SEL_MASK_CPT;
+ temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
+ temp &= ~LVDS_PIPE_SEL_MASK;
+ temp |= LVDS_PIPE_SEL(pipe);
}
/* set the corresponsding LVDS_BORDER bit */
@@ -454,26 +450,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
-/*
- * Detect the LVDS connection.
- *
- * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
- * connected and closed means disconnected. We also send hotplug events as
- * needed, using lid status notification from the input layer.
- */
static enum drm_connector_status
intel_lvds_detect(struct drm_connector *connector, bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- enum drm_connector_status status;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
- status = intel_panel_detect(dev_priv);
- if (status != connector_status_unknown)
- return status;
-
return connector_status_connected;
}
@@ -498,117 +477,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 1;
}
-static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Skipping forced modeset for %s\n", id->ident);
- return 1;
-}
-
-/* The GPU hangs up on these systems if modeset is performed on LID open */
-static const struct dmi_system_id intel_no_modeset_on_lid[] = {
- {
- .callback = intel_no_modeset_on_lid_dmi_callback,
- .ident = "Toshiba Tecra A11",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
- },
- },
-
- { } /* terminating entry */
-};
-
-/*
- * Lid events. Note the use of 'modeset':
- * - we set it to MODESET_ON_LID_OPEN on lid close,
- * and set it to MODESET_DONE on open
- * - we use it as a "only once" bit (ie we ignore
- * duplicate events where it was already properly set)
- * - the suspend/resume paths will set it to
- * MODESET_SUSPENDED and ignore the lid open event,
- * because they restore the mode ("lid open").
- */
-static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
- void *unused)
-{
- struct intel_lvds_connector *lvds_connector =
- container_of(nb, struct intel_lvds_connector, lid_notifier);
- struct drm_connector *connector = &lvds_connector->base.base;
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
- return NOTIFY_OK;
-
- mutex_lock(&dev_priv->modeset_restore_lock);
- if (dev_priv->modeset_restore == MODESET_SUSPENDED)
- goto exit;
- /*
- * check and update the status of LVDS connector after receiving
- * the LID nofication event.
- */
- connector->status = connector->funcs->detect(connector, false);
-
- /* Don't force modeset on machines where it causes a GPU lockup */
- if (dmi_check_system(intel_no_modeset_on_lid))
- goto exit;
- if (!acpi_lid_open()) {
- /* do modeset on next lid open event */
- dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
- goto exit;
- }
-
- if (dev_priv->modeset_restore == MODESET_DONE)
- goto exit;
-
- /*
- * Some old platform's BIOS love to wreak havoc while the lid is closed.
- * We try to detect this here and undo any damage. The split for PCH
- * platforms is rather conservative and a bit arbitrary expect that on
- * those platforms VGA disabling requires actual legacy VGA I/O access,
- * and as part of the cleanup in the hw state restore we also redisable
- * the vga plane.
- */
- if (!HAS_PCH_SPLIT(dev_priv))
- intel_display_resume(dev);
-
- dev_priv->modeset_restore = MODESET_DONE;
-
-exit:
- mutex_unlock(&dev_priv->modeset_restore_lock);
- return NOTIFY_OK;
-}
-
-static int
-intel_lvds_connector_register(struct drm_connector *connector)
-{
- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
- int ret;
-
- ret = intel_connector_register(connector);
- if (ret)
- return ret;
-
- lvds->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
- DRM_DEBUG_KMS("lid notifier registration failed\n");
- lvds->lid_notifier.notifier_call = NULL;
- }
-
- return 0;
-}
-
-static void
-intel_lvds_connector_unregister(struct drm_connector *connector)
-{
- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
-
- if (lvds->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&lvds->lid_notifier);
-
- intel_connector_unregister(connector);
-}
-
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
@@ -641,8 +509,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_lvds_connector_register,
- .early_unregister = intel_lvds_connector_unregister,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -948,7 +816,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* register is uninitialized.
*/
val = I915_READ(lvds_encoder->reg);
- if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ if (HAS_PCH_CPT(dev_priv))
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
+ else
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
+ if (val == 0)
val = dev_priv->vbt.bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
@@ -1003,8 +875,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
return;
/* Skip init on machines we know falsely report LVDS */
- if (dmi_check_system(intel_no_lvds))
+ if (dmi_check_system(intel_no_lvds)) {
+ WARN(!dev_priv->vbt.int_lvds_support,
+ "Useless DMI match. Internal LVDS support disabled by VBT\n");
+ return;
+ }
+
+ if (!dev_priv->vbt.int_lvds_support) {
+ DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
return;
+ }
if (HAS_PCH_SPLIT(dev_priv))
lvds_reg = PCH_LVDS;
@@ -1016,10 +896,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
- if (dev_priv->vbt.edp.support) {
- DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- return;
- }
}
pin = GMBUS_PIN_PANEL;
@@ -1108,8 +984,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* 2) check for VBT data
* 3) check to see if LVDS is already on
* if none of the above, no panel
- * 4) make sure lid is open
- * if closed, act like it's not there for now
*/
/*
@@ -1125,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_gmbus_get_adapter(dev_priv, pin));
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
edid);
} else {
kfree(edid);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index b39846613e3c..ca44bf368e24 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -40,7 +40,7 @@ int intel_connector_update_modes(struct drm_connector *connector,
{
int ret;
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index c58e5f53bab0..e034b4166d32 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -608,16 +608,16 @@ void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
-static struct intel_opregion *system_opregion;
-
+/*
+ * The only video events relevant to opregion are 0x80. These indicate either a
+ * docking event, lid switch or display switch request. In Linux, these are
+ * handled by the dock, button and video drivers.
+ */
static int intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
{
- /* The only video events relevant to opregion are 0x80. These indicate
- either a docking event, lid switch or display switch request. In
- Linux, these are handled by the dock, button and video drivers.
- */
-
+ struct intel_opregion *opregion = container_of(nb, struct intel_opregion,
+ acpi_notifier);
struct acpi_bus_event *event = data;
struct opregion_acpi *acpi;
int ret = NOTIFY_OK;
@@ -625,10 +625,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
- if (!system_opregion)
- return NOTIFY_DONE;
-
- acpi = system_opregion->acpi;
+ acpi = opregion->acpi;
if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
ret = NOTIFY_BAD;
@@ -638,10 +635,6 @@ static int intel_opregion_video_event(struct notifier_block *nb,
return ret;
}
-static struct notifier_block intel_opregion_notifier = {
- .notifier_call = intel_opregion_video_event,
-};
-
/*
* Initialise the DIDL field in opregion. This passes a list of devices to
* the firmware. Values are defined by section B.4.2 of the ACPI specification
@@ -797,8 +790,8 @@ void intel_opregion_register(struct drm_i915_private *dev_priv)
opregion->acpi->csts = 0;
opregion->acpi->drdy = 1;
- system_opregion = opregion;
- register_acpi_notifier(&intel_opregion_notifier);
+ opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
+ register_acpi_notifier(&opregion->acpi_notifier);
}
if (opregion->asle) {
@@ -822,8 +815,8 @@ void intel_opregion_unregister(struct drm_i915_private *dev_priv)
if (opregion->acpi) {
opregion->acpi->drdy = 0;
- system_opregion = NULL;
- unregister_acpi_notifier(&intel_opregion_notifier);
+ unregister_acpi_notifier(&opregion->acpi_notifier);
+ opregion->acpi_notifier.notifier_call = NULL;
}
/* just clear all opregion memory pointers now */
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index e0e437ba9e51..e8498a8cda3d 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -49,6 +49,7 @@ struct intel_opregion {
u32 vbt_size;
u32 *lid_state;
struct work_struct asle_work;
+ struct notifier_block acpi_notifier;
};
#define OPREGION_SIZE (8 * 1024)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index b443278e569c..4a9f139e7b73 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -375,26 +375,6 @@ out:
pipe_config->gmch_pfit.lvds_border_bits = border;
}
-enum drm_connector_status
-intel_panel_detect(struct drm_i915_private *dev_priv)
-{
- /* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) {
- return *dev_priv->opregion.lid_state & 0x1 ?
- connector_status_connected :
- connector_status_disconnected;
- }
-
- switch (i915_modparams.panel_ignore_lid) {
- case -2:
- return connector_status_connected;
- case -1:
- return connector_status_disconnected;
- default:
- return connector_status_unknown;
- }
-}
-
/**
* scale - scale values from one range to another
* @source_val: value in range [@source_min..@source_max]
@@ -406,11 +386,11 @@ intel_panel_detect(struct drm_i915_private *dev_priv)
* Return @source_val in range [@source_min..@source_max] scaled to range
* [@target_min..@target_max].
*/
-static uint32_t scale(uint32_t source_val,
- uint32_t source_min, uint32_t source_max,
- uint32_t target_min, uint32_t target_max)
+static u32 scale(u32 source_val,
+ u32 source_min, u32 source_max,
+ u32 target_min, u32 target_max)
{
- uint64_t target_val;
+ u64 target_val;
WARN_ON(source_min > source_max);
WARN_ON(target_min > target_max);
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 39a4e4edda07..849e1b69ba73 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -30,160 +30,6 @@
#include <linux/debugfs.h>
#include "intel_drv.h"
-struct pipe_crc_info {
- const char *name;
- struct drm_i915_private *dev_priv;
- enum pipe pipe;
-};
-
-static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
- return -ENODEV;
-
- spin_lock_irq(&pipe_crc->lock);
-
- if (pipe_crc->opened) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EBUSY; /* already open */
- }
-
- pipe_crc->opened = true;
- filep->private_data = inode->i_private;
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- spin_lock_irq(&pipe_crc->lock);
- pipe_crc->opened = false;
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-/* (6 fields, 8 chars each, space separated (5) + '\n') */
-#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
-/* account for \'0' */
-#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
-
-static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
-{
- lockdep_assert_held(&pipe_crc->lock);
- return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR);
-}
-
-static ssize_t
-i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
- loff_t *pos)
-{
- struct pipe_crc_info *info = filep->private_data;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
- char buf[PIPE_CRC_BUFFER_LEN];
- int n_entries;
- ssize_t bytes_read;
-
- /*
- * Don't allow user space to provide buffers not big enough to hold
- * a line of data.
- */
- if (count < PIPE_CRC_LINE_LEN)
- return -EINVAL;
-
- if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
- return 0;
-
- /* nothing to read */
- spin_lock_irq(&pipe_crc->lock);
- while (pipe_crc_data_count(pipe_crc) == 0) {
- int ret;
-
- if (filep->f_flags & O_NONBLOCK) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EAGAIN;
- }
-
- ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
- pipe_crc_data_count(pipe_crc), pipe_crc->lock);
- if (ret) {
- spin_unlock_irq(&pipe_crc->lock);
- return ret;
- }
- }
-
- /* We now have one or more entries to read */
- n_entries = count / PIPE_CRC_LINE_LEN;
-
- bytes_read = 0;
- while (n_entries > 0) {
- struct intel_pipe_crc_entry *entry =
- &pipe_crc->entries[pipe_crc->tail];
-
- if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR) < 1)
- break;
-
- BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
- pipe_crc->tail = (pipe_crc->tail + 1) &
- (INTEL_PIPE_CRC_ENTRIES_NR - 1);
-
- bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
- "%8u %8x %8x %8x %8x %8x\n",
- entry->frame, entry->crc[0],
- entry->crc[1], entry->crc[2],
- entry->crc[3], entry->crc[4]);
-
- spin_unlock_irq(&pipe_crc->lock);
-
- if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
- return -EFAULT;
-
- user_buf += PIPE_CRC_LINE_LEN;
- n_entries--;
-
- spin_lock_irq(&pipe_crc->lock);
- }
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return bytes_read;
-}
-
-static const struct file_operations i915_pipe_crc_fops = {
- .owner = THIS_MODULE,
- .open = i915_pipe_crc_open,
- .read = i915_pipe_crc_read,
- .release = i915_pipe_crc_release,
-};
-
-static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
- {
- .name = "i915_pipe_A_crc",
- .pipe = PIPE_A,
- },
- {
- .name = "i915_pipe_B_crc",
- .pipe = PIPE_B,
- },
- {
- .name = "i915_pipe_C_crc",
- .pipe = PIPE_C,
- },
-};
-
static const char * const pipe_crc_sources[] = {
"none",
"plane1",
@@ -197,29 +43,6 @@ static const char * const pipe_crc_sources[] = {
"auto",
};
-static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
-{
- BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
- return pipe_crc_sources[source];
-}
-
-static int display_crc_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- seq_printf(m, "%c %s\n", pipe_name(pipe),
- pipe_crc_source_name(dev_priv->pipe_crc[pipe].source));
-
- return 0;
-}
-
-static int display_crc_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, display_crc_ctl_show, inode->i_private);
-}
-
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
uint32_t *val)
{
@@ -616,177 +439,6 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
}
-static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source source)
-{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- enum intel_display_power_domain power_domain;
- u32 val = 0; /* shut up gcc */
- int ret;
-
- if (pipe_crc->source == source)
- return 0;
-
- /* forbid changing the source without going back to 'none' */
- if (pipe_crc->source && source)
- return -EINVAL;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
- DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
- return -EIO;
- }
-
- ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true);
- if (ret != 0)
- goto out;
-
- /* none -> real source transition */
- if (source) {
- struct intel_pipe_crc_entry *entries;
-
- DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
- pipe_name(pipe), pipe_crc_source_name(source));
-
- entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
- sizeof(pipe_crc->entries[0]),
- GFP_KERNEL);
- if (!entries) {
- ret = -ENOMEM;
- goto out;
- }
-
- spin_lock_irq(&pipe_crc->lock);
- kfree(pipe_crc->entries);
- pipe_crc->entries = entries;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
- }
-
- pipe_crc->source = source;
-
- I915_WRITE(PIPE_CRC_CTL(pipe), val);
- POSTING_READ(PIPE_CRC_CTL(pipe));
-
- /* real source -> none transition */
- if (!source) {
- struct intel_pipe_crc_entry *entries;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
-
- DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
- pipe_name(pipe));
-
- drm_modeset_lock(&crtc->base.mutex, NULL);
- if (crtc->base.state->active)
- intel_wait_for_vblank(dev_priv, pipe);
- drm_modeset_unlock(&crtc->base.mutex);
-
- spin_lock_irq(&pipe_crc->lock);
- entries = pipe_crc->entries;
- pipe_crc->entries = NULL;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
-
- kfree(entries);
-
- if (IS_G4X(dev_priv))
- g4x_undo_pipe_scramble_reset(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
- else if ((IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
- hsw_pipe_A_crc_wa(dev_priv, false);
- }
-
- ret = 0;
-
-out:
- intel_display_power_put(dev_priv, power_domain);
-
- return ret;
-}
-
-/*
- * Parse pipe CRC command strings:
- * command: wsp* object wsp+ name wsp+ source wsp*
- * object: 'pipe'
- * name: (A | B | C)
- * source: (none | plane1 | plane2 | pf)
- * wsp: (#0x20 | #0x9 | #0xA)+
- *
- * eg.:
- * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
- * "pipe A none" -> Stop CRC
- */
-static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
-{
- int n_words = 0;
-
- while (*buf) {
- char *end;
-
- /* skip leading white space */
- buf = skip_spaces(buf);
- if (!*buf)
- break; /* end of buffer */
-
- /* find end of word */
- for (end = buf; *end && !isspace(*end); end++)
- ;
-
- if (n_words == max_words) {
- DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
- max_words);
- return -EINVAL; /* ran out of words[] before bytes */
- }
-
- if (*end)
- *end++ = '\0';
- words[n_words++] = buf;
- buf = end;
- }
-
- return n_words;
-}
-
-enum intel_pipe_crc_object {
- PIPE_CRC_OBJECT_PIPE,
-};
-
-static const char * const pipe_crc_objects[] = {
- "pipe",
-};
-
-static int
-display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
-{
- int i;
-
- i = match_string(pipe_crc_objects, ARRAY_SIZE(pipe_crc_objects), buf);
- if (i < 0)
- return i;
-
- *o = i;
- return 0;
-}
-
-static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv,
- const char *buf, enum pipe *pipe)
-{
- const char name = buf[0];
-
- if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes))
- return -EINVAL;
-
- *pipe = name - 'A';
-
- return 0;
-}
-
static int
display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
{
@@ -805,81 +457,6 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return 0;
}
-static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
- char *buf, size_t len)
-{
-#define N_WORDS 3
- int n_words;
- char *words[N_WORDS];
- enum pipe pipe;
- enum intel_pipe_crc_object object;
- enum intel_pipe_crc_source source;
-
- n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
- if (n_words != N_WORDS) {
- DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
- N_WORDS);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_object(words[0], &object) < 0) {
- DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) {
- DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_source(words[2], &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
- return -EINVAL;
- }
-
- return pipe_crc_set_source(dev_priv, pipe, source);
-}
-
-static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- char *tmpbuf;
- int ret;
-
- if (len == 0)
- return 0;
-
- if (len > PAGE_SIZE - 1) {
- DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
- PAGE_SIZE);
- return -E2BIG;
- }
-
- tmpbuf = memdup_user_nul(ubuf, len);
- if (IS_ERR(tmpbuf))
- return PTR_ERR(tmpbuf);
-
- ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
-
- kfree(tmpbuf);
- if (ret < 0)
- return ret;
-
- *offp += len;
- return len;
-}
-
-const struct file_operations i915_display_crc_ctl_fops = {
- .owner = THIS_MODULE,
- .open = display_crc_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = display_crc_ctl_write
-};
-
void intel_display_crc_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -887,30 +464,8 @@ void intel_display_crc_init(struct drm_i915_private *dev_priv)
for_each_pipe(dev_priv, pipe) {
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- pipe_crc->opened = false;
spin_lock_init(&pipe_crc->lock);
- init_waitqueue_head(&pipe_crc->wq);
- }
-}
-
-int intel_pipe_crc_create(struct drm_minor *minor)
-{
- struct drm_i915_private *dev_priv = to_i915(minor->dev);
- struct dentry *ent;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
- struct pipe_crc_info *info = &i915_pipe_crc_data[i];
-
- info->dev_priv = dev_priv;
- ent = debugfs_create_file(info->name, S_IRUGO,
- minor->debugfs_root, info,
- &i915_pipe_crc_fops);
- if (!ent)
- return -ENOMEM;
}
-
- return 0;
}
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 53aaaa3e6886..43ae9de12ba3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6264,42 +6264,15 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
return limits;
}
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int new_power;
u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
- new_power = rps->power;
- switch (rps->power) {
- case LOW_POWER:
- if (val > rps->efficient_freq + 1 &&
- val > rps->cur_freq)
- new_power = BETWEEN;
- break;
-
- case BETWEEN:
- if (val <= rps->efficient_freq &&
- val < rps->cur_freq)
- new_power = LOW_POWER;
- else if (val >= rps->rp0_freq &&
- val > rps->cur_freq)
- new_power = HIGH_POWER;
- break;
+ lockdep_assert_held(&rps->power.mutex);
- case HIGH_POWER:
- if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
- val < rps->cur_freq)
- new_power = BETWEEN;
- break;
- }
- /* Max/min bins are special */
- if (val <= rps->min_freq_softlimit)
- new_power = LOW_POWER;
- if (val >= rps->max_freq_softlimit)
- new_power = HIGH_POWER;
- if (new_power == rps->power)
+ if (new_power == rps->power.mode)
return;
/* Note the units here are not exactly 1us, but 1280ns. */
@@ -6362,12 +6335,71 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_DOWN_IDLE_AVG);
skip_hw_write:
- rps->power = new_power;
- rps->up_threshold = threshold_up;
- rps->down_threshold = threshold_down;
+ rps->power.mode = new_power;
+ rps->power.up_threshold = threshold_up;
+ rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ int new_power;
+
+ new_power = rps->power.mode;
+ switch (rps->power.mode) {
+ case LOW_POWER:
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
+ new_power = LOW_POWER;
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val <= rps->min_freq_softlimit)
+ new_power = LOW_POWER;
+ if (val >= rps->max_freq_softlimit)
+ new_power = HIGH_POWER;
+
+ mutex_lock(&rps->power.mutex);
+ if (rps->power.interactive)
+ new_power = HIGH_POWER;
+ rps_set_power(dev_priv, new_power);
+ mutex_unlock(&rps->power.mutex);
rps->last_adj = 0;
}
+void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
+{
+ struct intel_rps *rps = &i915->gt_pm.rps;
+
+ if (INTEL_GEN(i915) < 6)
+ return;
+
+ mutex_lock(&rps->power.mutex);
+ if (interactive) {
+ if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
+ rps_set_power(i915, HIGH_POWER);
+ } else {
+ GEM_BUG_ON(!rps->power.interactive);
+ rps->power.interactive--;
+ }
+ mutex_unlock(&rps->power.mutex);
+}
+
static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -6780,7 +6812,7 @@ static void reset_rps(struct drm_i915_private *dev_priv,
u8 freq = rps->cur_freq;
/* force a reset */
- rps->power = -1;
+ rps->power.mode = -1;
rps->cur_freq = -1;
if (set(dev_priv, freq))
@@ -7347,11 +7379,11 @@ out:
static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
{
- if (WARN_ON(!dev_priv->vlv_pctx))
- return;
+ struct drm_i915_gem_object *pctx;
- i915_gem_object_put(dev_priv->vlv_pctx);
- dev_priv->vlv_pctx = NULL;
+ pctx = fetch_and_zero(&dev_priv->vlv_pctx);
+ if (pctx)
+ i915_gem_object_put(pctx);
}
static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
@@ -9604,6 +9636,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->pcu_lock);
+ mutex_init(&dev_priv->gt_pm.rps.power.mutex);
atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index db27f2faa1de..4bd5768731ee 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -56,51 +56,10 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static inline enum intel_display_power_domain
-psr_aux_domain(struct intel_dp *intel_dp)
-{
- /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
- * However, for non-A AUX ports the corresponding non-EDP transcoders
- * would have already enabled power well 2 and DC_OFF. This means we can
- * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
- * specific AUX_IO reference without powering up any extra wells.
- * Note that PSR is enabled only on Port A even though this function
- * returns the correct domain for other ports too.
- */
- return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
- intel_dp->aux_power_domain;
-}
-
-static void psr_aux_io_power_get(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
-
- if (INTEL_GEN(dev_priv) < 10)
- return;
-
- intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
-}
-
-static void psr_aux_io_power_put(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
-
- if (INTEL_GEN(dev_priv) < 10)
- return;
-
- intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
-}
-
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
{
u32 debug_mask, mask;
- /* No PSR interrupts on VLV/CHV */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return;
-
mask = EDP_PSR_ERROR(TRANSCODER_EDP);
debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
@@ -201,15 +160,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
-static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
-{
- uint8_t psr_caps = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
- return false;
- return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
uint8_t dprx = 0;
@@ -232,13 +182,13 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
- u8 val = 0;
+ u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- DRM_ERROR("Unable to get sink synchronization latency\n");
+ DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
@@ -250,13 +200,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
- if (intel_dp->psr_dpcd[0]) {
- dev_priv->psr.sink_support = true;
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ if (!intel_dp->psr_dpcd[0])
+ return;
+ DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
+
+ if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+ DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+ return;
}
+ dev_priv->psr.sink_support = true;
+ dev_priv->psr.sink_sync_latency =
+ intel_dp_get_sink_sync_latency(intel_dp);
if (INTEL_GEN(dev_priv) >= 9 &&
(intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ bool y_req = intel_dp->psr_dpcd[1] &
+ DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+ bool alpm = intel_dp_get_alpm_status(intel_dp);
+
/*
* All panels that supports PSR version 03h (PSR2 +
* Y-coordinate) can handle Y-coordinates in VSC but we are
@@ -268,49 +230,19 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* Y-coordinate requirement panels we would need to enable
* GTC first.
*/
- dev_priv->psr.sink_psr2_support =
- intel_dp_get_y_coord_required(intel_dp);
- DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
- ? "supported" : "not supported");
+ dev_priv->psr.sink_psr2_support = y_req && alpm;
+ DRM_DEBUG_KMS("PSR2 %ssupported\n",
+ dev_priv->psr.sink_psr2_support ? "" : "not ");
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.alpm =
- intel_dp_get_alpm_status(intel_dp);
- dev_priv->psr.sink_sync_latency =
- intel_dp_get_sink_sync_latency(intel_dp);
}
}
}
-static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t val;
-
- val = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
-}
-
-static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t val;
-
- /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
- val = I915_READ(VLV_VSCSDP(crtc->pipe));
- val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
- val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
- I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
-}
-
-static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
@@ -341,12 +273,6 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
}
-static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
-{
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-}
-
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -373,7 +299,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
/* Start with bits set for DDI_AUX_CTL register */
- aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+ aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
aux_clock_divider);
/* Select only valid bits for SRD_AUX_CTL */
@@ -381,7 +307,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
}
-static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -389,95 +315,64 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
u8 dpcd_val = DP_PSR_ENABLE;
/* Enable ALPM at sink for psr2 */
- if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE);
-
- if (dev_priv->psr.psr2_enabled)
+ if (dev_priv->psr.psr2_enabled) {
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2;
+ }
+
if (dev_priv->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+ if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
+ dpcd_val |= DP_PSR_CRC_VERIFICATION;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
-static void vlv_psr_enable_source(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
- /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
- I915_WRITE(VLV_PSRCTL(crtc->pipe),
- VLV_EDP_PSR_MODE_SW_TIMER |
- VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
- VLV_EDP_PSR_ENABLE);
-}
-
-static void vlv_psr_activate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- /*
- * Let's do the transition from PSR_state 1 (inactive) to
- * PSR_state 2 (transition to active - static frame transmission).
- * Then Hardware is responsible for the transition to
- * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
- */
- I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
- VLV_EDP_PSR_ACTIVE_ENTRY);
-}
-
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
- uint32_t max_sleep_time = 0x1f;
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- uint32_t val = EDP_PSR_ENABLE;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ /* sink_sync_latency of 8 means source has to wait for more than 8
+ * frames, we'll go with 9 frames for now
+ */
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
- if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
- val |= EDP_PSR_TP1_TIME_2500us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
- val |= EDP_PSR_TP1_TIME_500us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
+ if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+ val |= EDP_PSR_TP1_TIME_0us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP1_TIME_500us;
else
- val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_2500us;
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
- val |= EDP_PSR_TP2_TP3_TIME_2500us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
- val |= EDP_PSR_TP2_TP3_TIME_500us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP2_TP3_TIME_500us;
else
- val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
@@ -485,6 +380,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP1_TP2_SEL;
+ if (INTEL_GEN(dev_priv) >= 8)
+ val |= EDP_PSR_CRC_ENABLE;
+
val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
I915_WRITE(EDP_PSR_CTL, val);
}
@@ -494,15 +392,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ u32 val;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
@@ -513,36 +411,19 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
- val |= EDP_PSR2_TP2_TIME_2500;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
- val |= EDP_PSR2_TP2_TIME_500;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
- val |= EDP_PSR2_TP2_TIME_100;
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+ val |= EDP_PSR2_TP2_TIME_50us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ val |= EDP_PSR2_TP2_TIME_100us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR2_TP2_TIME_500us;
else
- val |= EDP_PSR2_TP2_TIME_50;
+ val |= EDP_PSR2_TP2_TIME_2500us;
I915_WRITE(EDP_PSR2_CTL, val);
}
-static void hsw_psr_activate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /* On HSW+ after we enable PSR on source it will activate it
- * as soon as it match configure idle_frame count. So
- * we just actually enable it here on activation time.
- */
-
- /* psr1 and psr2 are mutually exclusive.*/
- if (dev_priv->psr.psr2_enabled)
- hsw_activate_psr2(intel_dp);
- else
- hsw_activate_psr1(intel_dp);
-}
-
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -602,17 +483,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
+ if (dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return;
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->psr.link_standby) {
- DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
- return;
- }
-
if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
S3D_ENABLE) {
@@ -640,11 +515,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
- return;
- }
-
crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
@@ -656,27 +526,29 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (dev_priv->psr.psr2_enabled)
+ if (INTEL_GEN(dev_priv) >= 9)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- else
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
- dev_priv->psr.activate(intel_dp);
+ /* psr1 and psr2 are mutually exclusive.*/
+ if (dev_priv->psr.psr2_enabled)
+ hsw_activate_psr2(intel_dp);
+ else
+ hsw_activate_psr1(intel_dp);
+
dev_priv->psr.active = true;
}
-static void hsw_psr_enable_source(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- psr_aux_io_power_get(intel_dp);
-
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
* use hardcoded values PSR AUX transactions
*/
@@ -712,7 +584,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+ EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP);
}
}
@@ -746,64 +619,19 @@ void intel_psr_enable(struct intel_dp *intel_dp,
dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.setup_vsc(intel_dp, crtc_state);
- dev_priv->psr.enable_sink(intel_dp);
- dev_priv->psr.enable_source(intel_dp, crtc_state);
+ intel_psr_setup_vsc(intel_dp, crtc_state);
+ intel_psr_enable_sink(intel_dp);
+ intel_psr_enable_source(intel_dp, crtc_state);
dev_priv->psr.enabled = intel_dp;
- if (INTEL_GEN(dev_priv) >= 9) {
- intel_psr_activate(intel_dp);
- } else {
- /*
- * FIXME: Activation should happen immediately since this
- * function is just called after pipe is fully trained and
- * enabled.
- * However on some platforms we face issues when first
- * activation follows a modeset so quickly.
- * - On VLV/CHV we get bank screen on first activation
- * - On HSW/BDW we get a recoverable frozen screen until
- * next exit-activate sequence.
- */
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
- }
+ intel_psr_activate(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void vlv_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- uint32_t val;
-
- if (dev_priv->psr.active) {
- /* Put VLV PSR back to PSR_state 0 (disabled). */
- if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(crtc->pipe),
- VLV_EDP_PSR_IN_TRANS,
- 0,
- 1))
- WARN(1, "PSR transition took longer than expected\n");
-
- val = I915_READ(VLV_PSRCTL(crtc->pipe));
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- val &= ~VLV_EDP_PSR_ENABLE;
- val &= ~VLV_EDP_PSR_MODE_MASK;
- I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
-
- dev_priv->psr.active = false;
- } else {
- WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
- }
-}
-
-static void hsw_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
+static void
+intel_psr_disable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -842,8 +670,25 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
+}
+
+static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ lockdep_assert_held(&dev_priv->psr.lock);
+
+ if (!dev_priv->psr.enabled)
+ return;
+
+ intel_psr_disable_source(intel_dp);
- psr_aux_io_power_put(intel_dp);
+ /* Disable PSR on Sink */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+
+ dev_priv->psr.enabled = NULL;
}
/**
@@ -867,23 +712,49 @@ void intel_psr_disable(struct intel_dp *intel_dp,
return;
mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
+ intel_psr_disable_locked(intel_dp);
+ mutex_unlock(&dev_priv->psr.lock);
+ cancel_work_sync(&dev_priv->psr.work);
+}
- dev_priv->psr.disable_source(intel_dp, old_crtc_state);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg;
+ u32 mask;
- /* Disable PSR on Sink */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ if (!new_crtc_state->has_psr)
+ return 0;
- dev_priv->psr.enabled = NULL;
- mutex_unlock(&dev_priv->psr.lock);
+ /*
+ * The sole user right now is intel_pipe_update_start(),
+ * which won't race with psr_enable/disable, which is
+ * where psr2_enabled is written to. So, we don't need
+ * to acquire the psr.lock. More importantly, we want the
+ * latency inside intel_pipe_update_start() to be as low
+ * as possible, so no need to acquire psr.lock when it is
+ * not needed and will induce latencies in the atomic
+ * update path.
+ */
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
+ }
- cancel_delayed_work_sync(&dev_priv->psr.work);
+ /*
+ * Max time for PSR to idle = Inverse of the refresh rate +
+ * 6 ms of exit training time + 1.5 ms of aux channel
+ * handshake. 50 msec is defesive enough to cover everything.
+ */
+ return intel_wait_for_register(dev_priv, reg, mask,
+ EDP_PSR_STATUS_STATE_IDLE, 50);
}
-static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
+static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
{
struct intel_dp *intel_dp;
i915_reg_t reg;
@@ -894,21 +765,12 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
if (!intel_dp)
return false;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS;
- mask = EDP_PSR2_STATUS_STATE_MASK;
- } else {
- reg = EDP_PSR_STATUS;
- mask = EDP_PSR_STATUS_STATE_MASK;
- }
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- struct drm_crtc *crtc =
- dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- reg = VLV_PSRSTAT(pipe);
- mask = VLV_EDP_PSR_IN_TRANS;
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
}
mutex_unlock(&dev_priv->psr.lock);
@@ -925,17 +787,20 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
+ container_of(work, typeof(*dev_priv), psr.work);
mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled)
+ goto unlock;
+
/*
* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (!psr_wait_for_idle(dev_priv))
+ if (!__psr_wait_for_idle_locked(dev_priv))
goto unlock;
/*
@@ -943,7 +808,7 @@ static void intel_psr_work(struct work_struct *work)
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
- if (dev_priv->psr.busy_frontbuffer_bits)
+ if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
goto unlock;
intel_psr_activate(dev_priv->psr.enabled);
@@ -953,103 +818,24 @@ unlock:
static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
u32 val;
if (!dev_priv->psr.active)
return;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
- WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
- } else {
- val = I915_READ(EDP_PSR_CTL);
- WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
- }
+ if (dev_priv->psr.psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
} else {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /*
- * Here we do the transition drirectly from
- * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
- * PSR_state 5 (exit).
- * PSR State 4 (active with single frame update) can be skipped.
- * On PSR_state 5 (exit) Hardware is responsible to transition
- * back to PSR_state 1 (inactive).
- * Now we are at Same state after vlv_psr_enable_source.
- */
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- I915_WRITE(VLV_PSRCTL(pipe), val);
-
- /*
- * Send AUX wake up - Spec says after transitioning to PSR
- * active we have to send AUX wake up by writing 01h in DPCD
- * 600h of sink device.
- * XXX: This might slow down the transition, but without this
- * HW doesn't complete the transition to PSR_state 1 and we
- * never get the screen updated.
- */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
- DP_SET_POWER_D0);
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
}
-
dev_priv->psr.active = false;
}
/**
- * intel_psr_single_frame_update - Single Frame Update
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * Some platforms support a single frame update feature that is used to
- * send and update only one frame on Remote Frame Buffer.
- * So far it is only implemented for Valleyview and Cherryview because
- * hardware requires this to be done before a page flip.
- */
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits)
-{
- struct drm_crtc *crtc;
- enum pipe pipe;
- u32 val;
-
- if (!CAN_PSR(dev_priv))
- return;
-
- /*
- * Single frame update is already supported on BDW+ but it requires
- * many W/A and it isn't really needed.
- */
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- return;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /*
- * We need to set this bit before writing registers for a flip.
- * This bit will be self-clear when it gets to the PSR active state.
- */
- I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
- }
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
@@ -1071,7 +857,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -1114,7 +900,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -1131,8 +917,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
/* By definition flush = invalidate + flush */
if (frontbuffer_bits) {
- if (dev_priv->psr.psr2_enabled ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (dev_priv->psr.psr2_enabled) {
intel_psr_exit(dev_priv);
} else {
/*
@@ -1149,9 +934,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
}
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- if (!work_busy(&dev_priv->psr.work.work))
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
+ schedule_work(&dev_priv->psr.work);
mutex_unlock(&dev_priv->psr.lock);
}
@@ -1184,38 +967,64 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- /* On VLV and CHV only standby mode is supported. */
- dev_priv->psr.link_standby = true;
else
/* For new platforms let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
- /* Override link_standby x link_off defaults */
- if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
- DRM_DEBUG_KMS("PSR: Forcing link standby\n");
- dev_priv->psr.link_standby = true;
- }
- if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
- DRM_DEBUG_KMS("PSR: Forcing main link off\n");
- dev_priv->psr.link_standby = false;
+ INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+ mutex_init(&dev_priv->psr.lock);
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 val;
+ const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
+ DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
+ DP_PSR_LINK_CRC_ERROR;
+
+ if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled != intel_dp)
+ goto exit;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
+ DRM_ERROR("PSR_STATUS dpcd read failed\n");
+ goto exit;
}
- INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
- mutex_init(&dev_priv->psr.lock);
+ if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
+ DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ intel_psr_disable_locked(intel_dp);
+ }
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->psr.enable_source = vlv_psr_enable_source;
- dev_priv->psr.disable_source = vlv_psr_disable;
- dev_priv->psr.enable_sink = vlv_psr_enable_sink;
- dev_priv->psr.activate = vlv_psr_activate;
- dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
- } else {
- dev_priv->psr.has_hw_tracking = true;
- dev_priv->psr.enable_source = hsw_psr_enable_source;
- dev_priv->psr.disable_source = hsw_psr_disable;
- dev_priv->psr.enable_sink = hsw_psr_enable_sink;
- dev_priv->psr.activate = hsw_psr_activate;
- dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
+ DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
+ goto exit;
}
+
+ if (val & DP_PSR_RFB_STORAGE_ERROR)
+ DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+ if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+ DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+ if (val & DP_PSR_LINK_CRC_ERROR)
+ DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+
+ if (val & ~errors)
+ DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
+ val & ~errors);
+ if (val & errors)
+ intel_psr_disable_locked(intel_dp);
+ /* clear status register */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+
+ /* TODO: handle PSR2 errors */
+exit:
+ mutex_unlock(&psr->lock);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8f19349a6055..33faad3197fe 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -496,6 +496,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
engine->name, I915_READ_HEAD(engine));
+ /* Check that the ring offsets point within the ring! */
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+
intel_ring_update_space(ring);
I915_WRITE_HEAD(engine, ring->head);
I915_WRITE_TAIL(engine, ring->tail);
@@ -520,8 +524,6 @@ static int init_ring_common(struct intel_engine_cs *engine)
goto out;
}
- intel_engine_init_hangcheck(engine);
-
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
@@ -531,16 +533,33 @@ out:
return ret;
}
-static void reset_ring_common(struct intel_engine_cs *engine,
- struct i915_request *request)
+static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
{
- /*
- * RC6 must be prevented until the reset is complete and the engine
- * reinitialised. If it occurs in the middle of this sequence, the
- * state written to/loaded from the power context is ill-defined (e.g.
- * the PP_BASE_DIR may be lost).
- */
- assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
+ intel_engine_stop_cs(engine);
+
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ return i915_gem_find_active_request(engine);
+}
+
+static void skip_request(struct i915_request *rq)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset32(vaddr + head, MI_NOOP,
+ (rq->ring->size - head) / sizeof(u32));
+ head = 0;
+ }
+ memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
+}
+
+static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+ GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
/*
* Try to restore the logical GPU state to match the continuation
@@ -556,47 +575,18 @@ static void reset_ring_common(struct intel_engine_cs *engine,
* If the request was innocent, we try to replay the request with
* the restored context.
*/
- if (request) {
- struct drm_i915_private *dev_priv = request->i915;
- struct intel_context *ce = to_intel_context(request->ctx,
- engine);
- struct i915_hw_ppgtt *ppgtt;
-
- if (ce->state) {
- I915_WRITE(CCID,
- i915_ggtt_offset(ce->state) |
- BIT(8) /* must be set! */ |
- CCID_EXTENDED_STATE_SAVE |
- CCID_EXTENDED_STATE_RESTORE |
- CCID_EN);
- }
-
- ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
- if (ppgtt) {
- u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
-
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
-
- /* Wait for the PD reload to complete */
- if (intel_wait_for_register(dev_priv,
- RING_PP_DIR_BASE(engine),
- BIT(0), 0,
- 10))
- DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
-
- ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
- }
-
+ if (rq) {
/* If the rq hung, jump to its breadcrumb and skip the batch */
- if (request->fence.error == -EIO)
- request->ring->head = request->postfix;
- } else {
- engine->legacy_active_context = NULL;
- engine->legacy_active_ppgtt = NULL;
+ rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
+ if (rq->fence.error == -EIO)
+ skip_request(rq);
}
}
+static void reset_finish(struct intel_engine_cs *engine)
+{
+}
+
static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
@@ -1033,6 +1023,8 @@ int intel_ring_pin(struct intel_ring *ring,
flags |= PIN_OFFSET_BIAS | offset_bias;
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
@@ -1066,6 +1058,8 @@ err:
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
@@ -1093,6 +1087,7 @@ void intel_ring_unpin(struct intel_ring *ring)
static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
{
+ struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -1102,10 +1097,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
if (IS_ERR(obj))
return ERR_CAST(obj);
- /* mark ring buffers as read-only from GPU side by default */
- obj->gt_ro = 1;
+ /*
+ * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+ * if supported by the platform's GGTT.
+ */
+ if (vm->has_read_only)
+ i915_gem_object_set_readonly(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
goto err;
@@ -1169,10 +1168,46 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static int context_pin(struct intel_context *ce)
+static void intel_ring_context_destroy(struct intel_context *ce)
{
- struct i915_vma *vma = ce->state;
- int ret;
+ GEM_BUG_ON(ce->pin_count);
+
+ if (!ce->state)
+ return;
+
+ GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+ i915_gem_object_put(ce->state->obj);
+}
+
+static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+ int err = 0;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ err = gen6_ppgtt_pin(ppgtt);
+
+ return err;
+}
+
+static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ gen6_ppgtt_unpin(ppgtt);
+}
+
+static int __context_pin(struct intel_context *ce)
+{
+ struct i915_vma *vma;
+ int err;
+
+ vma = ce->state;
+ if (!vma)
+ return 0;
/*
* Clear this page out of any CPU caches for coherent swap-in/out.
@@ -1180,13 +1215,43 @@ static int context_pin(struct intel_context *ce)
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (ret)
- return ret;
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ if (err)
+ return err;
}
- return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
- PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
+ PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ return err;
+
+ /*
+ * And mark is as a globally pinned object to let the shrinker know
+ * it cannot reclaim the object until we release it.
+ */
+ vma->obj->pin_global++;
+
+ return 0;
+}
+
+static void __context_unpin(struct intel_context *ce)
+{
+ struct i915_vma *vma;
+
+ vma = ce->state;
+ if (!vma)
+ return;
+
+ vma->obj->pin_global--;
+ i915_vma_unpin(vma);
+}
+
+static void intel_ring_context_unpin(struct intel_context *ce)
+{
+ __context_unpin_ppgtt(ce->gem_context);
+ __context_unpin(ce);
+
+ i915_gem_context_put(ce->gem_context);
}
static struct i915_vma *
@@ -1243,7 +1308,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
}
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -1258,81 +1323,79 @@ err_obj:
return ERR_PTR(err);
}
-static struct intel_ring *
-intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_context *
+__ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
- int ret;
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- goto out;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+ int err;
if (!ce->state && engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
+ err = PTR_ERR(vma);
goto err;
}
ce->state = vma;
}
- if (ce->state) {
- ret = context_pin(ce);
- if (ret)
- goto err;
+ err = __context_pin(ce);
+ if (err)
+ goto err;
- ce->state->obj->pin_global++;
- }
+ err = __context_pin_ppgtt(ce->gem_context);
+ if (err)
+ goto err_unpin;
i915_gem_context_get(ctx);
-out:
/* One ringbuffer to rule them all */
- return engine->buffer;
+ GEM_BUG_ON(!engine->buffer);
+ ce->ring = engine->buffer;
+
+ return ce;
+err_unpin:
+ __context_unpin(ce);
err:
ce->pin_count = 0;
- return ERR_PTR(ret);
+ return ERR_PTR(err);
}
-static void intel_ring_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops ring_context_ops = {
+ .unpin = intel_ring_context_unpin,
+ .destroy = intel_ring_context_destroy,
+};
+
+static struct intel_context *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(ce->pin_count == 0);
- if (--ce->pin_count)
- return;
+ if (likely(ce->pin_count++))
+ return ce;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- if (ce->state) {
- ce->state->obj->pin_global--;
- i915_vma_unpin(ce->state);
- }
+ ce->ops = &ring_context_ops;
- i915_gem_context_put(ctx);
+ return __ring_context_pin(engine, ctx, ce);
}
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
- struct intel_ring *ring;
struct i915_timeline *timeline;
+ struct intel_ring *ring;
+ unsigned int size;
int err;
intel_engine_setup_common(engine);
- err = intel_engine_init_common(engine);
- if (err)
- goto err;
-
timeline = i915_timeline_create(engine->i915, engine->name);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
@@ -1354,8 +1417,23 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
+ size = PAGE_SIZE;
+ if (HAS_BROKEN_CS_TLB(engine->i915))
+ size = I830_WA_SIZE;
+ err = intel_engine_create_scratch(engine, size);
+ if (err)
+ goto err_unpin;
+
+ err = intel_engine_init_common(engine);
+ if (err)
+ goto err_scratch;
+
return 0;
+err_scratch:
+ intel_engine_cleanup_scratch(engine);
+err_unpin:
+ intel_ring_unpin(ring);
err_ring:
intel_ring_free(ring);
err:
@@ -1392,6 +1470,48 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
intel_ring_reset(engine->buffer, 0);
}
+static int load_pd_dir(struct i915_request *rq,
+ const struct i915_hw_ppgtt *ppgtt)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = PP_DIR_DCLV_2G;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = ppgtt->pd.base.ggtt_offset << 10;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int flush_pd_dir(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Stall until the page table load is complete */
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
static inline int mi_set_context(struct i915_request *rq, u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
@@ -1402,6 +1522,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
(HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
INTEL_INFO(i915)->num_rings - 1 :
0;
+ bool force_restore = false;
int len;
u32 *cs;
@@ -1415,6 +1536,12 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
len = 4;
if (IS_GEN7(i915))
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+ if (flags & MI_FORCE_RESTORE) {
+ GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
+ flags &= ~MI_FORCE_RESTORE;
+ force_restore = true;
+ len += 2;
+ }
cs = intel_ring_begin(rq, len);
if (IS_ERR(cs))
@@ -1439,9 +1566,29 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
}
}
+ if (force_restore) {
+ /*
+ * The HW doesn't handle being told to restore the current
+ * context very well. Quite often it likes goes to go off and
+ * sulk, especially when it is meant to be reloading PP_DIR.
+ * A very simple fix to force the reload is to simply switch
+ * away from the current context and back again.
+ *
+ * Note that the kernel_context will contain random state
+ * following the INHIBIT_RESTORE. We accept this since we
+ * never use the kernel_context state; it is merely a
+ * placeholder we use to flush other contexts.
+ */
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
+ engine)->state) |
+ MI_MM_SPACE_GTT |
+ MI_RESTORE_INHIBIT;
+ }
+
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
+ *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1509,31 +1656,28 @@ static int remap_l3(struct i915_request *rq, int slice)
static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *to_ctx = rq->ctx;
- struct i915_hw_ppgtt *to_mm =
- to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
- struct i915_gem_context *from_ctx = engine->legacy_active_context;
- struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
+ struct i915_gem_context *ctx = rq->gem_context;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ unsigned int unwind_mm = 0;
u32 hw_flags = 0;
int ret, i;
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- if (to_mm != from_mm ||
- (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
- trace_switch_mm(engine, to_ctx);
- ret = to_mm->switch_mm(to_mm, rq);
+ if (ppgtt) {
+ ret = load_pd_dir(rq, ppgtt);
if (ret)
goto err;
- to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
- engine->legacy_active_ppgtt = to_mm;
- hw_flags = MI_FORCE_RESTORE;
+ if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
+ unwind_mm = intel_engine_flag(engine);
+ ppgtt->pd_dirty_rings &= ~unwind_mm;
+ hw_flags = MI_FORCE_RESTORE;
+ }
}
- if (to_intel_context(to_ctx, engine)->state &&
- (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
+ if (rq->hw_context->state) {
GEM_BUG_ON(engine->id != RCS);
/*
@@ -1543,35 +1687,38 @@ static int switch_context(struct i915_request *rq)
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
- if (i915_gem_context_is_kernel(to_ctx))
+ if (i915_gem_context_is_kernel(ctx))
hw_flags = MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, hw_flags);
if (ret)
goto err_mm;
+ }
- engine->legacy_active_context = to_ctx;
+ if (ppgtt) {
+ ret = flush_pd_dir(rq);
+ if (ret)
+ goto err_mm;
}
- if (to_ctx->remap_slice) {
+ if (ctx->remap_slice) {
for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(to_ctx->remap_slice & BIT(i)))
+ if (!(ctx->remap_slice & BIT(i)))
continue;
ret = remap_l3(rq, i);
if (ret)
- goto err_ctx;
+ goto err_mm;
}
- to_ctx->remap_slice = 0;
+ ctx->remap_slice = 0;
}
return 0;
-err_ctx:
- engine->legacy_active_context = from_ctx;
err_mm:
- engine->legacy_active_ppgtt = from_mm;
+ if (unwind_mm)
+ ppgtt->pd_dirty_rings |= unwind_mm;
err:
return ret;
}
@@ -1580,7 +1727,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
+ GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
@@ -2006,11 +2153,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
intel_ring_init_semaphores(dev_priv, engine);
engine->init_hw = init_ring_common;
- engine->reset_hw = reset_ring_common;
+ engine->reset.prepare = reset_prepare;
+ engine->reset.reset = reset_ring;
+ engine->reset.finish = reset_finish;
engine->context_pin = intel_ring_context_pin;
- engine->context_unpin = intel_ring_context_unpin;
-
engine->request_alloc = ring_request_alloc;
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
@@ -2074,16 +2221,6 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (ret)
return ret;
- if (INTEL_GEN(dev_priv) >= 6) {
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- return ret;
- } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
- ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
- if (ret)
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 010750e8ee44..f5ffa6d31e82 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -122,7 +122,8 @@ struct intel_engine_hangcheck {
int deadlock;
struct intel_instdone instdone;
struct i915_request *active_request;
- bool stalled;
+ bool stalled:1;
+ bool wedged:1;
};
struct intel_ring {
@@ -192,6 +193,11 @@ struct i915_priolist {
int priority;
};
+struct st_preempt_hang {
+ struct completion completion;
+ bool inject_hang;
+};
+
/**
* struct intel_engine_execlists - execlist submission queue and port state
*
@@ -291,32 +297,49 @@ struct intel_engine_execlists {
/**
* @queue: queue of requests, in priority lists
*/
- struct rb_root queue;
+ struct rb_root_cached queue;
/**
- * @first: leftmost level in priority @queue
+ * @csb_read: control register for Context Switch buffer
+ *
+ * Note this register is always in mmio.
*/
- struct rb_node *first;
+ u32 __iomem *csb_read;
/**
- * @fw_domains: forcewake domains for irq tasklet
+ * @csb_write: control register for Context Switch buffer
+ *
+ * Note this register may be either mmio or HWSP shadow.
*/
- unsigned int fw_domains;
+ u32 *csb_write;
/**
- * @csb_head: context status buffer head
+ * @csb_status: status array for Context Switch buffer
+ *
+ * Note these register may be either mmio or HWSP shadow.
*/
- unsigned int csb_head;
+ u32 *csb_status;
/**
- * @csb_use_mmio: access csb through mmio, instead of hwsp
+ * @preempt_complete_status: expected CSB upon completing preemption
*/
- bool csb_use_mmio;
+ u32 preempt_complete_status;
/**
- * @preempt_complete_status: expected CSB upon completing preemption
+ * @csb_write_reset: reset value for CSB write pointer
+ *
+ * As the CSB write pointer maybe either in HWSP or as a field
+ * inside an mmio register, we want to reprogram it slightly
+ * differently to avoid later confusion.
*/
- u32 preempt_complete_status;
+ u32 csb_write_reset;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ u8 csb_head;
+
+ I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
};
#define INTEL_ENGINE_CS_MAX_NAME 8
@@ -342,11 +365,10 @@ struct intel_engine_cs {
struct i915_timeline timeline;
struct drm_i915_gem_object *default_state;
+ void *pinned_default_state;
- atomic_t irq_count;
unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
-#define ENGINE_IRQ_EXECLIST 1
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
@@ -378,6 +400,7 @@ struct intel_engine_cs {
unsigned int hangcheck_interrupts;
unsigned int irq_enabled;
+ unsigned int irq_count;
bool irq_armed : 1;
I915_SELFTEST_DECLARE(bool mock : 1);
@@ -423,18 +446,22 @@ struct intel_engine_cs {
void (*irq_disable)(struct intel_engine_cs *engine);
int (*init_hw)(struct intel_engine_cs *engine);
- void (*reset_hw)(struct intel_engine_cs *engine,
- struct i915_request *rq);
+
+ struct {
+ struct i915_request *(*prepare)(struct intel_engine_cs *engine);
+ void (*reset)(struct intel_engine_cs *engine,
+ struct i915_request *rq);
+ void (*finish)(struct intel_engine_cs *engine);
+ } reset;
void (*park)(struct intel_engine_cs *engine);
void (*unpark)(struct intel_engine_cs *engine);
void (*set_default_submission)(struct intel_engine_cs *engine);
- struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
- void (*context_unpin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
+ struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+
int (*request_alloc)(struct i915_request *rq);
int (*init_context)(struct i915_request *rq);
@@ -550,16 +577,7 @@ struct intel_engine_cs {
* to the kernel context and trash it as the save may not happen
* before the hardware is powered down.
*/
- struct i915_gem_context *last_retired_context;
-
- /* We track the current MI_SET_CONTEXT in order to eliminate
- * redudant context switches. This presumes that requests are not
- * reordered! Or when they are the tracking is updated along with
- * the emission of individual requests into the legacy command
- * stream (ring).
- */
- struct i915_gem_context *legacy_active_context;
- struct i915_hw_ppgtt *legacy_active_ppgtt;
+ struct intel_context *last_retired_context;
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
@@ -672,6 +690,12 @@ execlists_clear_active(struct intel_engine_execlists *execlists,
__clear_bit(bit, (unsigned long *)&execlists->active);
}
+static inline void
+execlists_clear_all_active(struct intel_engine_execlists *execlists)
+{
+ execlists->active = 0;
+}
+
static inline bool
execlists_is_active(const struct intel_engine_execlists *execlists,
unsigned int bit)
@@ -809,6 +833,19 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
return pos & (ring->size - 1);
}
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+ unsigned int pos)
+{
+ if (pos & -ring->size) /* must be strictly within the ring */
+ return false;
+
+ if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+ return false;
+
+ return true;
+}
+
static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
@@ -820,12 +857,7 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
- /* We could combine these into a single tail operation, but keeping
- * them as seperate tests will help identify the cause should one
- * ever fire.
- */
- GEM_BUG_ON(!IS_ALIGNED(tail, 8));
- GEM_BUG_ON(tail >= ring->size);
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
/*
* "Ring Buffer Use"
@@ -865,14 +897,19 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+ unsigned int size);
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
+
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
+int intel_engine_stop_cs(struct intel_engine_cs *engine);
+
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
@@ -918,11 +955,10 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
-static inline void intel_wait_init(struct intel_wait *wait,
- struct i915_request *rq)
+static inline void intel_wait_init(struct intel_wait *wait)
{
wait->tsk = current;
- wait->request = rq;
+ wait->request = NULL;
}
static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
@@ -1042,10 +1078,13 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
return cs;
}
+void intel_engines_sanitize(struct drm_i915_private *i915);
+
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+void intel_engine_lost_context(struct intel_engine_cs *engine);
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);
@@ -1123,4 +1162,24 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine);
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+
+static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
+{
+ if (!execlists->preempt_hang.inject_hang)
+ return false;
+
+ complete(&execlists->preempt_hang.completion);
+ return true;
+}
+
+#else
+
+static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
+{
+ return false;
+}
+
+#endif
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53a6eaa9671a..6b5aa3b074ec 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -128,10 +128,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_C";
case POWER_DOMAIN_AUX_D:
return "AUX_D";
+ case POWER_DOMAIN_AUX_E:
+ return "AUX_E";
case POWER_DOMAIN_AUX_F:
return "AUX_F";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
+ case POWER_DOMAIN_AUX_TBT1:
+ return "AUX_TBT1";
+ case POWER_DOMAIN_AUX_TBT2:
+ return "AUX_TBT2";
+ case POWER_DOMAIN_AUX_TBT3:
+ return "AUX_TBT3";
+ case POWER_DOMAIN_AUX_TBT4:
+ return "AUX_TBT4";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -382,7 +392,8 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
u32 val;
if (wait_fuses) {
- pg = SKL_PW_TO_PG(id);
+ pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) :
+ SKL_PW_TO_PG(id);
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
* before enabling the power well and PW1/PG1's own fuse
@@ -428,6 +439,43 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A)
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = power_well->id;
+ enum port port = ICL_AUX_PW_TO_PORT(id);
+ u32 val;
+
+ val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+ I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = power_well->id;
+ enum port port = ICL_AUX_PW_TO_PORT(id);
+ u32 val;
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+
+ val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+ I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
+ val & ~HSW_PWR_WELL_CTL_REQ(id));
+
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -1822,6 +1870,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -1894,6 +1943,105 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /* VDSC/joining */
+#define ICL_PW_3_POWER_DOMAINS ( \
+ ICL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - transcoder WD
+ * - KVMR (HW control)
+ */
+#define ICL_PW_2_POWER_DOMAINS ( \
+ ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - eDP/DSI VDSC
+ * - KVMR (HW control)
+ */
+#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ ICL_PW_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define ICL_DDI_IO_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define ICL_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define ICL_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define ICL_DDI_IO_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define ICL_DDI_IO_E_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define ICL_DDI_IO_F_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+
+#define ICL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_A))
+#define ICL_AUX_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B))
+#define ICL_AUX_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C))
+#define ICL_AUX_D_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D))
+#define ICL_AUX_E_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E))
+#define ICL_AUX_F_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F))
+#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1))
+#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2))
+#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3))
+#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -2451,6 +2599,157 @@ static struct i915_power_well cnl_power_wells[] = {
},
};
+static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_combo_phy_aux_power_well_enable,
+ .disable = icl_combo_phy_aux_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well icl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_1,
+ .hsw.has_fuses = true,
+ },
+ {
+ .name = "power well 2",
+ .domains = ICL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_2,
+ .hsw.has_fuses = true,
+ },
+ {
+ .name = "DC off",
+ .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_PW_DC_OFF,
+ },
+ {
+ .name = "power well 3",
+ .domains = ICL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_A,
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_B,
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_C,
+ },
+ {
+ .name = "DDI D IO",
+ .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_D,
+ },
+ {
+ .name = "DDI E IO",
+ .domains = ICL_DDI_IO_E_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_E,
+ },
+ {
+ .name = "DDI F IO",
+ .domains = ICL_DDI_IO_F_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_DDI_F,
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = ICL_DISP_PW_AUX_A,
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = ICL_DISP_PW_AUX_B,
+ },
+ {
+ .name = "AUX C",
+ .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_C,
+ },
+ {
+ .name = "AUX D",
+ .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_D,
+ },
+ {
+ .name = "AUX E",
+ .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_E,
+ },
+ {
+ .name = "AUX F",
+ .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_F,
+ },
+ {
+ .name = "AUX TBT1",
+ .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT1,
+ },
+ {
+ .name = "AUX TBT2",
+ .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT2,
+ },
+ {
+ .name = "AUX TBT3",
+ .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT3,
+ },
+ {
+ .name = "AUX TBT4",
+ .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_AUX_TBT4,
+ },
+ {
+ .name = "power well 4",
+ .domains = ICL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -2468,7 +2767,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
max_dc = 2;
mask = 0;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -2556,7 +2855,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_HASWELL(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ set_power_wells(power_domains, icl_power_wells);
+ } else if (IS_HASWELL(dev_priv)) {
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv)) {
set_power_wells(power_domains, bdw_power_wells);
@@ -2911,6 +3212,7 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
+ /* fall through */
case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
break;
@@ -3023,6 +3325,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
static void icl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
enum port port;
u32 val;
@@ -3051,8 +3355,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
I915_WRITE(ICL_PORT_CL_DW5(port), val);
}
- /* 4. Enable power well 1 (PG1) and aux IO power. */
- /* FIXME: ICL power wells code not here yet. */
+ /*
+ * 4. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
/* 5. Enable CDCLK. */
icl_init_cdclk(dev_priv);
@@ -3070,6 +3380,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
enum port port;
u32 val;
@@ -3083,8 +3395,15 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */
icl_uninit_cdclk(dev_priv);
- /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
- /* FIXME: ICL power wells code not here yet. */
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
/* 5. Disable Comp */
for (port = PORT_A; port <= PORT_B; port++) {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26975df4e593..812fe7b06f87 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1340,6 +1340,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
switch (crtc_state->pixel_multiplier) {
default:
WARN(1, "unknown pixel multiplier specified\n");
+ /* fall through */
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1400,33 +1401,40 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (active_outputs & intel_sdvo_connector->output_flag)
- return true;
+ return active_outputs & intel_sdvo_connector->output_flag;
+}
+
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = I915_READ(sdvo_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
else
- return false;
+ *pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT;
+
+ return val & SDVO_ENABLE;
}
static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
- u32 tmp;
+ bool ret;
- tmp = I915_READ(intel_sdvo->sdvo_reg);
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
- if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
- return false;
+ ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe);
- if (HAS_PCH_CPT(dev_priv))
- *pipe = PORT_TO_PIPE_CPT(tmp);
- else
- *pipe = PORT_TO_PIPE(tmp);
-
- return true;
+ return ret || active_outputs;
}
static void intel_sdvo_get_config(struct intel_encoder *encoder,
@@ -1553,8 +1561,8 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- temp &= ~SDVO_PIPE_B_SELECT;
- temp |= SDVO_ENABLE;
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
intel_sdvo_write_sdvox(intel_sdvo, temp);
temp &= ~SDVO_ENABLE;
@@ -1903,7 +1911,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
if (edid != NULL) {
if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
edid)) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
@@ -2306,14 +2314,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
switch (sdvo->controlled_output) {
case SDVO_OUTPUT_LVDS1:
mask |= SDVO_OUTPUT_LVDS1;
+ /* fall through */
case SDVO_OUTPUT_LVDS0:
mask |= SDVO_OUTPUT_LVDS0;
+ /* fall through */
case SDVO_OUTPUT_TMDS1:
mask |= SDVO_OUTPUT_TMDS1;
+ /* fall through */
case SDVO_OUTPUT_TMDS0:
mask |= SDVO_OUTPUT_TMDS0;
+ /* fall through */
case SDVO_OUTPUT_RGB1:
mask |= SDVO_OUTPUT_RGB1;
+ /* fall through */
case SDVO_OUTPUT_RGB0:
mask |= SDVO_OUTPUT_RGB0;
break;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ee23613f9fd4..f7026e887fa9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -41,20 +41,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-bool intel_format_is_yuv(u32 format)
-{
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_NV12:
- return true;
- default:
- return false;
- }
-}
-
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@@ -107,13 +93,21 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
- local_irq_disable();
-
if (min <= 0 || max <= 0)
- return;
+ goto irq_disable;
if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
- return;
+ goto irq_disable;
+
+ /*
+ * Wait for psr to idle out after enabling the VBL interrupts
+ * VBL interrupts will start the PSR exit and prevent a PSR
+ * re-entry as well.
+ */
+ if (intel_psr_wait_for_idle(new_crtc_state))
+ DRM_ERROR("PSR idle timed out, atomic update may fail\n");
+
+ local_irq_disable();
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
@@ -171,6 +165,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
trace_i915_pipe_update_vblank_evaded(crtc);
+ return;
+
+irq_disable:
+ local_irq_disable();
}
/**
@@ -284,13 +282,35 @@ skl_update_plane(struct intel_plane *plane,
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
int scaler_id = plane_state->scaler_id;
- const struct intel_scaler *scaler;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[scaler_id];
+ u16 y_hphase, uv_rgb_hphase;
+ u16 y_vphase, uv_rgb_vphase;
+
+ /* TODO: handle sub-pixel coordinates */
+ if (fb->format->format == DRM_FORMAT_NV12) {
+ y_hphase = skl_scaler_calc_phase(1, false);
+ y_vphase = skl_scaler_calc_phase(1, false);
+
+ /* MPEG2 chroma siting convention */
+ uv_rgb_hphase = skl_scaler_calc_phase(2, true);
+ uv_rgb_vphase = skl_scaler_calc_phase(2, false);
+ } else {
+ /* not used */
+ y_hphase = 0;
+ y_vphase = 0;
- scaler = &crtc_state->scaler_state.scalers[scaler_id];
+ uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+ }
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
((crtc_w + 1) << 16)|(crtc_h + 1));
@@ -327,19 +347,21 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
bool
-skl_plane_get_hw_state(struct intel_plane *plane)
+skl_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+ ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -380,7 +402,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
const s16 *csc = csc_matrix[plane_state->base.color_encoding];
/* Seems RGB data bypasses the CSC always */
- if (!intel_format_is_yuv(fb->format->format))
+ if (!fb->format->is_yuv)
return;
I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
@@ -415,7 +437,7 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
enum plane_id plane_id = plane->id;
int contrast, brightness, sh_scale, sh_sin, sh_cos;
- if (intel_format_is_yuv(fb->format->format) &&
+ if (fb->format->is_yuv &&
plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
/*
* Expand limited range to full range:
@@ -588,19 +610,21 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-vlv_plane_get_hw_state(struct intel_plane *plane)
+vlv_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+ ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -754,18 +778,20 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-ivb_plane_get_hw_state(struct intel_plane *plane)
+ivb_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+ ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -910,18 +936,20 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
}
static bool
-g4x_plane_get_hw_state(struct intel_plane *plane)
+g4x_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum pipe pipe = plane->pipe;
bool ret;
- power_domain = POWER_DOMAIN_PIPE(pipe);
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+ ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+
+ *pipe = plane->pipe;
intel_display_power_put(dev_priv, power_domain);
@@ -1010,7 +1038,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
- if (intel_format_is_yuv(fb->format->format) &&
+ if (fb->format->is_yuv &&
fb->format->format != DRM_FORMAT_NV12 &&
(src_x % 2 || src_w % 2)) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
@@ -1071,6 +1099,37 @@ intel_check_sprite_plane(struct intel_plane *plane,
return 0;
}
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+ const struct drm_intel_sprite_colorkey *set)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+ *key = *set;
+
+ /*
+ * We want src key enabled on the
+ * sprite and not on the primary.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_SOURCE)
+ key->flags = 0;
+
+ /*
+ * On SKL+ we want dst key enabled on
+ * the primary and not on the sprite.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ key->flags = 0;
+}
+
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1100,6 +1159,16 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
return -ENOENT;
+ /*
+ * SKL+ only plane 2 can do destination keying against plane 1.
+ * Also multiple planes can't do destination keying on the same
+ * pipe simultaneously.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 &&
+ to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(plane->dev);
@@ -1112,11 +1181,28 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
while (1) {
plane_state = drm_atomic_get_plane_state(state, plane);
ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret) {
- to_intel_plane_state(plane_state)->ckey = *set;
- ret = drm_atomic_commit(state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+ /*
+ * On some platforms we have to configure
+ * the dst colorkey on the primary plane.
+ */
+ if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ struct intel_crtc *crtc =
+ intel_get_crtc_for_pipe(dev_priv,
+ to_intel_plane(plane)->pipe);
+
+ plane_state = drm_atomic_get_plane_state(state,
+ crtc->base.primary);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
}
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
if (ret != -EDEADLK)
break;
@@ -1211,8 +1297,17 @@ static const uint64_t skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
-static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
+static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_YUYV:
@@ -1228,8 +1323,17 @@ static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool snb_mod_supported(uint32_t format, uint64_t modifier)
+static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -1246,8 +1350,17 @@ static bool snb_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
+static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ABGR8888:
@@ -1269,8 +1382,26 @@ static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
@@ -1302,38 +1433,48 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
}
}
-static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
- if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
- return false;
+static const struct drm_plane_funcs g4x_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = g4x_sprite_format_mod_supported,
+};
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
- modifier != DRM_FORMAT_MOD_LINEAR)
- return false;
+static const struct drm_plane_funcs snb_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = snb_sprite_format_mod_supported,
+};
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_mod_supported(format, modifier);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_mod_supported(format, modifier);
- else if (INTEL_GEN(dev_priv) >= 6)
- return snb_mod_supported(format, modifier);
- else
- return g4x_mod_supported(format, modifier);
-}
+static const struct drm_plane_funcs vlv_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = vlv_sprite_format_mod_supported,
+};
-static const struct drm_plane_funcs intel_sprite_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_sprite_plane_format_mod_supported,
+static const struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
};
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
@@ -1359,6 +1500,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
+ const struct drm_plane_funcs *plane_funcs;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
const uint64_t *modifiers;
@@ -1383,6 +1525,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->can_scale = true;
state->scaler_id = -1;
+ intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+ PLANE_SPRITE0 + plane);
+
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
intel_plane->get_hw_state = skl_plane_get_hw_state;
@@ -1396,10 +1541,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
}
- if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
+ if (intel_plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
modifiers = skl_plane_format_modifiers_noccs;
+
+ plane_funcs = &skl_plane_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
@@ -1411,6 +1558,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
modifiers = i9xx_plane_format_modifiers;
+
+ plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
@@ -1427,6 +1576,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
+
+ plane_funcs = &snb_sprite_funcs;
} else {
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
@@ -1439,9 +1590,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+
+ plane_funcs = &snb_sprite_funcs;
} else {
plane_formats = g4x_plane_formats;
num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+
+ plane_funcs = &g4x_sprite_funcs;
}
}
@@ -1468,14 +1623,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_sprite_plane_funcs,
+ possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_sprite_plane_funcs,
+ possible_crtcs, plane_funcs,
plane_formats, num_plane_formats,
modifiers,
DRM_PLANE_TYPE_OVERLAY,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b55b5c157e38..b5b04cb892e9 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -798,16 +798,12 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp = I915_READ(TV_CTL);
- if (!(tmp & TV_ENC_ENABLE))
- return false;
+ *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
- *pipe = PORT_TO_PIPE(tmp);
-
- return true;
+ return tmp & TV_ENC_ENABLE;
}
static void
@@ -1032,8 +1028,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
break;
}
- if (intel_crtc->pipe == 1)
- tv_ctl |= TV_ENC_PIPEB_SELECT;
+ tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
tv_ctl |= tv_mode->oversample;
if (tv_mode->progressive)
@@ -1157,12 +1152,9 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
/* Poll for TV detection */
- tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- if (intel_crtc->pipe == 1)
- tv_ctl |= TV_ENC_PIPEB_SELECT;
- else
- tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+ tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1355,8 +1347,7 @@ intel_tv_get_modes(struct drm_connector *connector)
mode_ptr = drm_mode_create(connector->dev);
if (!mode_ptr)
continue;
- strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
- mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0';
+ strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
mode_ptr->hdisplay = hactive_s;
mode_ptr->hsync_start = hactive_s + 1;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 1cffaf7b5dbe..7c95697e1a35 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -50,10 +50,10 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
return ret;
}
-static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
+static int __get_platform_enable_guc(struct drm_i915_private *i915)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct intel_uc_fw *guc_fw = &i915->guc.fw;
+ struct intel_uc_fw *huc_fw = &i915->huc.fw;
int enable_guc = 0;
/* Default is to enable GuC/HuC if we know their firmwares */
@@ -67,11 +67,11 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
return enable_guc;
}
-static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
+static int __get_default_guc_log_level(struct drm_i915_private *i915)
{
int guc_log_level;
- if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc())
+ if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
guc_log_level = GUC_LOG_LEVEL_DISABLED;
else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -86,7 +86,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
/**
* sanitize_options_early - sanitize uC related modparam options
- * @dev_priv: device private
+ * @i915: device private
*
* In case of "enable_guc" option this function will attempt to modify
* it only if it was initially set to "auto(-1)". Default value for this
@@ -101,14 +101,14 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
* unless GuC is enabled on given platform and the driver is compiled with
* debug config when this modparam will default to "enable(1..4)".
*/
-static void sanitize_options_early(struct drm_i915_private *dev_priv)
+static void sanitize_options_early(struct drm_i915_private *i915)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct intel_uc_fw *guc_fw = &i915->guc.fw;
+ struct intel_uc_fw *huc_fw = &i915->huc.fw;
/* A negative value means "use platform default" */
if (i915_modparams.enable_guc < 0)
- i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+ i915_modparams.enable_guc = __get_platform_enable_guc(i915);
DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
i915_modparams.enable_guc,
@@ -119,28 +119,28 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv)
if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
- !HAS_GUC(dev_priv) ? "no GuC hardware" :
- "no GuC firmware");
+ !HAS_GUC(i915) ? "no GuC hardware" :
+ "no GuC firmware");
}
/* Verify HuC firmware availability */
if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
- !HAS_HUC(dev_priv) ? "no HuC hardware" :
- "no HuC firmware");
+ !HAS_HUC(i915) ? "no HuC hardware" :
+ "no HuC firmware");
}
/* A negative value means "use platform/config default" */
if (i915_modparams.guc_log_level < 0)
i915_modparams.guc_log_level =
- __get_default_guc_log_level(dev_priv);
+ __get_default_guc_log_level(i915);
if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"guc_log_level", i915_modparams.guc_log_level,
- !HAS_GUC(dev_priv) ? "no GuC hardware" :
- "GuC not enabled");
+ !HAS_GUC(i915) ? "no GuC hardware" :
+ "GuC not enabled");
i915_modparams.guc_log_level = 0;
}
@@ -171,44 +171,30 @@ void intel_uc_init_early(struct drm_i915_private *i915)
intel_huc_init_early(huc);
sanitize_options_early(i915);
-
- if (USES_GUC(i915))
- intel_uc_fw_fetch(i915, &guc->fw);
-
- if (USES_HUC(i915))
- intel_uc_fw_fetch(i915, &huc->fw);
}
void intel_uc_cleanup_early(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- if (USES_HUC(i915))
- intel_uc_fw_fini(&huc->fw);
-
- if (USES_GUC(i915))
- intel_uc_fw_fini(&guc->fw);
guc_free_load_err_log(guc);
}
/**
* intel_uc_init_mmio - setup uC MMIO access
- *
- * @dev_priv: device private
+ * @i915: device private
*
* Setup minimal state necessary for MMIO accesses later in the
* initialization sequence.
*/
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
+void intel_uc_init_mmio(struct drm_i915_private *i915)
{
- intel_guc_init_send_regs(&dev_priv->guc);
+ intel_guc_init_send_regs(&i915->guc);
}
static void guc_capture_load_err_log(struct intel_guc *guc)
{
- if (!guc->log.vma || !i915_modparams.guc_log_level)
+ if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
return;
if (!guc->load_err_log)
@@ -225,11 +211,11 @@ static void guc_free_load_err_log(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
- gen9_enable_guc_interrupts(dev_priv);
+ gen9_enable_guc_interrupts(i915);
- if (HAS_GUC_CT(dev_priv))
+ if (HAS_GUC_CT(i915))
return intel_guc_ct_enable(&guc->ct);
guc->send = intel_guc_send_mmio;
@@ -239,60 +225,73 @@ static int guc_enable_communication(struct intel_guc *guc)
static void guc_disable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
- if (HAS_GUC_CT(dev_priv))
+ if (HAS_GUC_CT(i915))
intel_guc_ct_disable(&guc->ct);
- gen9_disable_guc_interrupts(dev_priv);
+ gen9_disable_guc_interrupts(i915);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
}
-int intel_uc_init_misc(struct drm_i915_private *dev_priv)
+int intel_uc_init_misc(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
int ret;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- intel_guc_init_ggtt_pin_bias(guc);
-
- ret = intel_guc_init_wq(guc);
+ ret = intel_guc_init_misc(guc);
if (ret)
return ret;
+ if (USES_HUC(i915)) {
+ ret = intel_huc_init_misc(huc);
+ if (ret)
+ goto err_guc;
+ }
+
return 0;
+
+err_guc:
+ intel_guc_fini_misc(guc);
+ return ret;
}
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
+void intel_uc_fini_misc(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- intel_guc_fini_wq(guc);
+ if (USES_HUC(i915))
+ intel_huc_fini_misc(huc);
+
+ intel_guc_fini_misc(guc);
}
-int intel_uc_init(struct drm_i915_private *dev_priv)
+int intel_uc_init(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
int ret;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GUC(i915))
return -ENODEV;
ret = intel_guc_init(guc);
if (ret)
return ret;
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (USES_GUC_SUBMISSION(i915)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -307,16 +306,16 @@ int intel_uc_init(struct drm_i915_private *dev_priv)
return 0;
}
-void intel_uc_fini(struct drm_i915_private *dev_priv)
+void intel_uc_fini(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- if (USES_GUC_SUBMISSION(dev_priv))
+ if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_fini(guc);
intel_guc_fini(guc);
@@ -340,22 +339,22 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
__intel_uc_reset_hw(i915);
}
-int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+int intel_uc_init_hw(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct intel_huc *huc = &dev_priv->huc;
+ struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
int ret, attempts;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- gen9_reset_guc_interrupts(dev_priv);
+ gen9_reset_guc_interrupts(i915);
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN9(dev_priv))
+ if (IS_GEN9(i915))
attempts = 3;
else
attempts = 1;
@@ -365,11 +364,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
* Always reset the GuC just before (re)loading, so
* that the state and timing are fairly predictable
*/
- ret = __intel_uc_reset_hw(dev_priv);
+ ret = __intel_uc_reset_hw(i915);
if (ret)
goto err_out;
- if (USES_HUC(dev_priv)) {
+ if (USES_HUC(i915)) {
ret = intel_huc_fw_upload(huc);
if (ret)
goto err_out;
@@ -392,24 +391,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_log_capture;
- if (USES_HUC(dev_priv)) {
+ if (USES_HUC(i915)) {
ret = intel_huc_auth(huc);
if (ret)
goto err_communication;
}
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (USES_GUC_SUBMISSION(i915)) {
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_communication;
}
- dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
+ dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
guc->fw.major_ver_found, guc->fw.minor_ver_found);
- dev_info(dev_priv->drm.dev, "GuC submission %s\n",
- enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
- dev_info(dev_priv->drm.dev, "HuC %s\n",
- enableddisabled(USES_HUC(dev_priv)));
+ dev_info(i915->drm.dev, "GuC submission %s\n",
+ enableddisabled(USES_GUC_SUBMISSION(i915)));
+ dev_info(i915->drm.dev, "HuC %s\n",
+ enableddisabled(USES_HUC(i915)));
return 0;
@@ -428,20 +427,20 @@ err_out:
if (GEM_WARN_ON(ret == -EIO))
ret = -EINVAL;
- dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
+ dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
return ret;
}
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
+void intel_uc_fini_hw(struct drm_i915_private *i915)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GUC(i915));
- if (USES_GUC_SUBMISSION(dev_priv))
+ if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_disable(guc);
guc_disable_communication(guc);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 448293eb638d..b892ca8396e8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1702,15 +1702,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
-
- I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
- if (__intel_wait_for_register_fw(dev_priv,
- mode, MODE_IDLE, MODE_IDLE,
- 500, 0,
- NULL))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
- engine->name);
+
+ if (intel_engine_stop_cs(engine))
+ DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
@@ -2099,21 +2093,25 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
{
struct intel_engine_cs *engine;
unsigned int tmp;
+ int ret;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- if (gen8_reset_engine_start(engine))
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ if (gen8_reset_engine_start(engine)) {
+ ret = -EIO;
goto not_ready;
+ }
+ }
if (INTEL_GEN(dev_priv) >= 11)
- return gen11_reset_engines(dev_priv, engine_mask);
+ ret = gen11_reset_engines(dev_priv, engine_mask);
else
- return gen6_reset_engines(dev_priv, engine_mask);
+ ret = gen6_reset_engines(dev_priv, engine_mask);
not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
gen8_reset_engine_cancel(engine);
- return -EIO;
+ return ret;
}
typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
@@ -2176,6 +2174,8 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
* Thus assume it is best to stop engines on all gens
* where we have a gpu reset.
*
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
*
* FIXME: Wa for more modern gens needs to be validated
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 47478d609630..2fbe93178fb2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -67,21 +67,21 @@ struct intel_uncore_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
- uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
+ u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
void (*mmio_writeb)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint8_t val, bool trace);
+ i915_reg_t r, u8 val, bool trace);
void (*mmio_writew)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint16_t val, bool trace);
+ i915_reg_t r, u16 val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint32_t val, bool trace);
+ i915_reg_t r, u32 val, bool trace);
};
struct intel_forcewake_range {
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 458468237b5f..bba98cf83cbd 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -318,6 +318,12 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_C,
DDC_BUS_DDI_D,
DDC_BUS_DDI_F,
+ ICL_DDC_BUS_DDI_A = 0x1,
+ ICL_DDC_BUS_DDI_B,
+ ICL_DDC_BUS_PORT_1 = 0x4,
+ ICL_DDC_BUS_PORT_2,
+ ICL_DDC_BUS_PORT_3,
+ ICL_DDC_BUS_PORT_4,
};
#define VBT_DP_MAX_LINK_RATE_HBR3 0
@@ -414,7 +420,9 @@ struct child_device_config {
u16 extended_type;
u8 dvo_function;
u8 dp_usb_type_c:1; /* 195 */
- u8 flags2_reserved:7; /* 195 */
+ u8 tbt:1; /* 209 */
+ u8 flags2_reserved:2; /* 195 */
+ u8 dp_port_trace_length:4; /* 209 */
u8 dp_gpio_index; /* 195 */
u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */
@@ -448,7 +456,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* defs->child_dev_size;
*/
- uint8_t devices[0];
+ u8 devices[0];
} __packed;
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
@@ -635,7 +643,7 @@ struct bdb_sdvo_lvds_options {
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
-#define BDB_DRIVER_FEATURE_EDP 3
+#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3
struct bdb_driver_features {
u8 boot_dev_algorithm:1;
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 2df3538ceba5..4bcdeaf8d98f 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,29 +48,58 @@
* - Public functions to init or apply the given workaround type.
*/
-static int wa_add(struct drm_i915_private *dev_priv,
- i915_reg_t addr,
- const u32 mask, const u32 val)
+static void wa_add(struct drm_i915_private *i915,
+ i915_reg_t reg, const u32 mask, const u32 val)
{
- const unsigned int idx = dev_priv->workarounds.count;
+ struct i915_workarounds *wa = &i915->workarounds;
+ unsigned int start = 0, end = wa->count;
+ unsigned int addr = i915_mmio_reg_offset(reg);
+ struct i915_wa_reg *r;
+
+ while (start < end) {
+ unsigned int mid = start + (end - start) / 2;
+
+ if (wa->reg[mid].addr < addr) {
+ start = mid + 1;
+ } else if (wa->reg[mid].addr > addr) {
+ end = mid;
+ } else {
+ r = &wa->reg[mid];
+
+ if ((mask & ~r->mask) == 0) {
+ DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
+ addr, r->mask, r->value);
+
+ r->value &= ~mask;
+ }
+
+ r->value |= val;
+ r->mask |= mask;
+ return;
+ }
+ }
- if (WARN_ON(idx >= I915_MAX_WA_REGS))
- return -ENOSPC;
+ if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
+ DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
+ addr, mask, val);
+ return;
+ }
- dev_priv->workarounds.reg[idx].addr = addr;
- dev_priv->workarounds.reg[idx].value = val;
- dev_priv->workarounds.reg[idx].mask = mask;
+ r = &wa->reg[wa->count++];
+ r->addr = addr;
+ r->value = val;
+ r->mask = mask;
- dev_priv->workarounds.count++;
+ while (r-- > wa->reg) {
+ GEM_BUG_ON(r[0].addr == r[1].addr);
+ if (r[1].addr > r[0].addr)
+ break;
- return 0;
+ swap(r[1], r[0]);
+ }
}
-#define WA_REG(addr, mask, val) do { \
- const int r = wa_add(dev_priv, (addr), (mask), (val)); \
- if (r) \
- return r; \
- } while (0)
+#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -463,6 +492,22 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
*/
WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+ /* Wa_2006611047:icl (pre-prod)
+ * Formerly known as WaDisableImprovedTdlClkGating
+ */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
+ GEN11_STATE_CACHE_REDIRECT_TO_CS);
+
+ /* Wa_2006665173:icl (pre-prod) */
+ if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+ GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+
return 0;
}
@@ -521,7 +566,7 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
*cs++ = MI_LOAD_REGISTER_IMM(w->count);
for (i = 0; i < w->count; i++) {
- *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+ *cs++ = w->reg[i].addr;
*cs++ = w->reg[i].value;
}
*cs++ = MI_NOOP;
@@ -647,6 +692,19 @@ static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ for_each_engine(engine, dev_priv, tmp) {
+ if (engine->id == RCS)
+ continue;
+
+ I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
+ }
+ }
}
static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
@@ -672,8 +730,74 @@ static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
+static void wa_init_mcr(struct drm_i915_private *dev_priv)
+{
+ const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ u32 mcr;
+ u32 mcr_slice_subslice_mask;
+
+ /*
+ * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
+ * L3Banks could be fused off in single slice scenario. If that is
+ * the case, we might need to program MCR select to a valid L3Bank
+ * by default, to make sure we correctly read certain registers
+ * later on (in the range 0xB100 - 0xB3FF).
+ * This might be incompatible with
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads.
+ * Fortunately, this should not happen in production hardware, so
+ * we only assert that this is the case (instead of implementing
+ * something more complex that requires checking the range of every
+ * MMIO read).
+ */
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ is_power_of_2(sseu->slice_mask)) {
+ /*
+ * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
+ * enabled subslice, no need to redirect MCR packet
+ */
+ u32 slice = fls(sseu->slice_mask);
+ u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
+ u8 ss_mask = sseu->subslice_mask[slice];
+
+ u8 enabled_mask = (ss_mask | ss_mask >>
+ GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
+ u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
+
+ /*
+ * Production silicon should have matched L3Bank and
+ * subslice enabled
+ */
+ WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
+ }
+
+ mcr = I915_READ(GEN8_MCR_SELECTOR);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
+ GEN11_MCR_SUBSLICE_MASK;
+ else
+ mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
+ GEN8_MCR_SUBSLICE_MASK;
+ /*
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
+ * Before any MMIO read into slice/subslice specific registers, MCR
+ * packet control register needs to be programmed to point to any
+ * enabled s/ss pair. Otherwise, incorrect values will be returned.
+ * This means each subsequent MMIO read will be forwarded to an
+ * specific s/ss combination, but this is OK since these registers
+ * are consistent across s/ss in almost all cases. In the rare
+ * occasions, such as INSTDONE, where this value is dependent
+ * on s/ss combo, the read should be done with read_subslice_reg.
+ */
+ mcr &= ~mcr_slice_subslice_mask;
+ mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
+ I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+}
+
static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
+ wa_init_mcr(dev_priv);
+
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
I915_WRITE(GAMT_CHKN_BIT_REG,
@@ -692,6 +816,8 @@ static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
+ wa_init_mcr(dev_priv);
+
/* This is not an Wa. Enable for better image quality */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
@@ -772,6 +898,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
PMFLUSHDONE_LNICRSDROP |
PMFLUSH_GAPL3UNBLOCK |
PMFLUSHDONE_LNEBLK);
+
+ /* Wa_1406463099:icl
+ * Formerly known as WaGamTlbPendError
+ */
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 91c72911be3c..7efb326badcd 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
static int igt_check_page_sizes(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
int err = 0;
@@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
static int igt_mock_exhaust_device_supported_pages(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
goto out_put;
}
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -458,7 +458,7 @@ out_device:
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
int bit;
@@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
/* Force the page size for this object */
obj->mm.page_sizes.sg = page_size;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_unpin;
@@ -570,6 +570,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
@@ -591,12 +592,13 @@ static void close_object_list(struct list_head *objects,
list_for_each_entry_safe(obj, on, objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (!IS_ERR(vma))
i915_vma_close(vma);
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
}
@@ -604,8 +606,8 @@ static void close_object_list(struct list_head *objects,
static int igt_mock_ppgtt_huge_fill(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
- unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
unsigned long page_num;
bool single = false;
LIST_HEAD(objects);
@@ -641,7 +643,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
break;
@@ -725,7 +727,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
static int igt_mock_ppgtt_64K(void *arg)
{
struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
struct drm_i915_gem_object *obj;
const struct object_info {
unsigned int size;
@@ -819,7 +821,7 @@ static int igt_mock_ppgtt_64K(void *arg)
*/
obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_object_unpin;
@@ -866,6 +868,7 @@ static int igt_mock_ppgtt_64K(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
}
@@ -887,8 +890,8 @@ out_object_put:
static struct i915_vma *
gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
{
- struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
- const int gen = INTEL_GEN(vma->vm->i915);
+ struct drm_i915_private *i915 = vma->vm->i915;
+ const int gen = INTEL_GEN(i915);
unsigned int count = vma->size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj;
struct i915_vma *batch;
@@ -919,12 +922,12 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
*cmd++ = val;
} else if (gen >= 4) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? 1 << 22 : 0);
+ (gen < 6 ? MI_USE_GGTT : 0);
*cmd++ = 0;
*cmd++ = offset;
*cmd++ = val;
} else {
- *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cmd++ = offset;
*cmd++ = val;
}
@@ -985,7 +988,10 @@ static int gpu_write(struct i915_vma *vma,
goto err_request;
}
- i915_vma_move_to_active(batch, rq, 0);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto err_request;
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
@@ -996,14 +1002,12 @@ static int gpu_write(struct i915_vma *vma,
if (err)
goto err_request;
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ i915_request_skip(rq, err);
err_request:
- __i915_request_add(rq, err == 0);
+ i915_request_add(rq);
return err;
}
@@ -1047,7 +1051,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u32 dword, u32 val)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -1100,7 +1105,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
I915_RND_STATE(prng);
@@ -1262,6 +1268,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
}
@@ -1323,6 +1330,7 @@ static int igt_ppgtt_internal_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
@@ -1391,6 +1399,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
@@ -1439,7 +1448,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1493,7 +1502,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1531,7 +1540,8 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1587,7 +1597,8 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
@@ -1690,20 +1701,20 @@ int i915_gem_huge_page_mock_selftests(void)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock");
+ ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- if (!i915_vm_is_48bit(&ppgtt->base)) {
+ if (!i915_vm_is_48bit(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;
}
/* If we were ever hit this then it's time to mock the 64K scratch */
- if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+ if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
pr_err("PPGTT missing 64K scratch page\n");
err = -EINVAL;
goto out_close;
@@ -1712,7 +1723,7 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
@@ -1720,7 +1731,7 @@ out_unlock:
i915_modparams.enable_ppgtt = saved_ppgtt;
- drm_dev_unref(&dev_priv->drm);
+ drm_dev_put(&dev_priv->drm);
return err;
}
@@ -1744,6 +1755,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return 0;
}
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return 0;
+
file = mock_file(dev_priv);
if (IS_ERR(file))
return PTR_ERR(file);
@@ -1758,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
}
if (ctx->ppgtt)
- ctx->ppgtt->base.scrub_64K = true;
+ ctx->ppgtt->vm.scrub_64K = true;
err = i915_subtests(tests, ctx);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 340a98c0c804..3a095c37c120 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -42,11 +42,21 @@ static int cpu_set(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
- if (needs_clflush & CLFLUSH_BEFORE)
+
+ if (needs_clflush & CLFLUSH_BEFORE) {
+ mb();
clflush(map+offset_in_page(offset) / sizeof(*map));
+ mb();
+ }
+
map[offset_in_page(offset) / sizeof(*map)] = v;
- if (needs_clflush & CLFLUSH_AFTER)
+
+ if (needs_clflush & CLFLUSH_AFTER) {
+ mb();
clflush(map+offset_in_page(offset) / sizeof(*map));
+ mb();
+ }
+
kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj);
@@ -68,8 +78,13 @@ static int cpu_get(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
- if (needs_clflush & CLFLUSH_BEFORE)
+
+ if (needs_clflush & CLFLUSH_BEFORE) {
+ mb();
clflush(map+offset_in_page(offset) / sizeof(*map));
+ mb();
+ }
+
*v = map[offset_in_page(offset) / sizeof(*map)];
kunmap_atomic(map);
@@ -199,7 +214,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- __i915_request_add(rq, false);
+ i915_request_add(rq);
i915_vma_unpin(vma);
return PTR_ERR(cs);
}
@@ -210,28 +225,24 @@ static int gpu_set(struct drm_i915_gem_object *obj,
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
} else if (INTEL_GEN(i915) >= 4) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
} else {
- *cs++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
*cs++ = MI_NOOP;
}
intel_ring_advance(rq, cs);
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma);
- reservation_object_lock(obj->resv, NULL);
- reservation_object_add_excl_fence(obj->resv, &rq->fence);
- reservation_object_unlock(obj->resv);
-
- __i915_request_add(rq, true);
+ i915_request_add(rq);
- return 0;
+ return err;
}
static bool always_valid(struct drm_i915_private *i915)
@@ -239,8 +250,16 @@ static bool always_valid(struct drm_i915_private *i915)
return true;
}
+static bool needs_fence_registers(struct drm_i915_private *i915)
+{
+ return !i915_terminally_wedged(&i915->gpu_error);
+}
+
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return false;
+
return intel_engine_can_store_dword(i915->engine[RCS]);
}
@@ -251,7 +270,7 @@ static const struct igt_coherency_mode {
bool (*valid)(struct drm_i915_private *i915);
} igt_coherency_mode[] = {
{ "cpu", cpu_set, cpu_get, always_valid },
- { "gtt", gtt_set, gtt_get, always_valid },
+ { "gtt", gtt_set, gtt_get, needs_fence_registers },
{ "wc", wc_set, wc_get, always_valid },
{ "gpu", gpu_set, NULL, needs_mi_store_dword },
{ },
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index ddb03f009232..1c92560d35da 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -23,9 +23,11 @@
*/
#include "../i915_selftest.h"
+#include "i915_random.h"
#include "igt_flush_test.h"
#include "mock_drm.h"
+#include "mock_gem_device.h"
#include "huge_gem_object.h"
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
@@ -62,12 +64,12 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
*cmd++ = value;
} else if (gen >= 4) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? 1 << 22 : 0);
+ (gen < 6 ? MI_USE_GGTT : 0);
*cmd++ = 0;
*cmd++ = offset;
*cmd++ = value;
} else {
- *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cmd++ = offset;
*cmd++ = value;
}
@@ -114,7 +116,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
@@ -169,24 +171,28 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (err)
goto err_request;
- i915_vma_move_to_active(batch, rq, 0);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
+
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
- i915_vma_move_to_active(vma, rq, 0);
i915_vma_unpin(vma);
- reservation_object_lock(obj->resv, NULL);
- reservation_object_add_excl_fence(obj->resv, &rq->fence);
- reservation_object_unlock(obj->resv);
-
- __i915_request_add(rq, true);
+ i915_request_add(rq);
return 0;
+skip_request:
+ i915_request_skip(rq, err);
err_request:
- __i915_request_add(rq, false);
+ i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
err_vma:
@@ -247,9 +253,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
}
for (; m < DW_PER_PAGE; m++) {
- if (map[m] != 0xdeadbeef) {
+ if (map[m] != STACK_MAGIC) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], 0xdeadbeef);
+ n, m, map[m], STACK_MAGIC);
err = -EINVAL;
goto out_unmap;
}
@@ -289,7 +295,7 @@ create_test_object(struct i915_gem_context *ctx,
{
struct drm_i915_gem_object *obj;
struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
u64 size;
int err;
@@ -305,7 +311,7 @@ create_test_object(struct i915_gem_context *ctx,
if (err)
return ERR_PTR(err);
- err = cpu_fill(obj, 0xdeadbeef);
+ err = cpu_fill(obj, STACK_MAGIC);
if (err) {
pr_err("Failed to fill object with cpu, err=%d\n",
err);
@@ -335,11 +341,15 @@ static int igt_ctx_exec(void *arg)
bool first_shared_gtt = true;
int err = -ENODEV;
- /* Create a few different contexts (with different mm) and write
+ /*
+ * Create a few different contexts (with different mm) and write
* through each ctx/mm using the GPU making sure those writes end
* up in the expected pages of our obj.
*/
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
@@ -366,6 +376,9 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
+ if (!engine->context_size)
+ continue; /* No logical context support in HW */
+
if (!intel_engine_can_store_dword(engine))
continue;
@@ -420,6 +433,237 @@ out_unlock:
return err;
}
+static int igt_ctx_readonly(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
+ struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
+ unsigned long ndwords, dw;
+ int err = -ENODEV;
+
+ /*
+ * Create a few read-only objects (with the occasional writable object)
+ * and try to write into these object checking that the GPU discards
+ * any write to a read-only object.
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
+ if (!ppgtt || !ppgtt->vm.has_read_only) {
+ err = 0;
+ goto out_unlock;
+ }
+
+ ndwords = 0;
+ dw = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct intel_engine_cs *engine;
+ unsigned int id;
+
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (!obj) {
+ obj = create_test_object(ctx, file, &objects);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+
+ if (prandom_u32_state(&prng) & 1)
+ i915_gem_object_set_readonly(obj);
+ }
+
+ intel_runtime_pm_get(i915);
+ err = gpu_fill(obj, ctx, engine, dw);
+ intel_runtime_pm_put(i915);
+ if (err) {
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ ndwords, dw, max_dwords(obj),
+ engine->name, ctx->hw_id,
+ yesno(!!ctx->ppgtt), err);
+ goto out_unlock;
+ }
+
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
+ dw = 0;
+ }
+ ndwords++;
+ }
+ }
+ pr_info("Submitted %lu dwords (across %u engines)\n",
+ ndwords, INTEL_INFO(i915)->num_rings);
+
+ dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
+ unsigned int num_writes;
+
+ num_writes = rem;
+ if (i915_gem_object_is_readonly(obj))
+ num_writes = 0;
+
+ err = cpu_check(obj, num_writes);
+ if (err)
+ break;
+
+ dw += rem;
+ }
+
+out_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ return err;
+}
+
+static __maybe_unused const char *
+__engine_name(struct drm_i915_private *i915, unsigned int engines)
+{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ if (engines == ALL_ENGINES)
+ return "all";
+
+ for_each_engine_masked(engine, i915, engines, tmp)
+ return engine->name;
+
+ return "none";
+}
+
+static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx,
+ unsigned int engines)
+{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+ int err;
+
+ GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ return err;
+
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (!engine_has_kernel_context_barrier(engine)) {
+ pr_err("kernel context not last on engine %s!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(i915->gt.active_requests);
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (engine->last_retired_context->gem_context != i915->kernel_context) {
+ pr_err("engine %s not idling in kernel context!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ return err;
+
+ if (i915->gt.active_requests) {
+ pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
+ i915->gt.active_requests);
+ return -EINVAL;
+ }
+
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ if (!intel_engine_has_kernel_context(engine)) {
+ pr_err("kernel context not last on engine %s!\n",
+ engine->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int igt_switch_to_kernel_context(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ int err;
+
+ /*
+ * A core premise of switching to the kernel context is that
+ * if an engine is already idling in the kernel context, we
+ * do not emit another request and wake it up. The other being
+ * that we do indeed end up idling in the kernel context.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&i915->drm.struct_mutex);
+ return PTR_ERR(ctx);
+ }
+
+ /* First check idling each individual engine */
+ for_each_engine(engine, i915, id) {
+ err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
+ if (err)
+ goto out_unlock;
+ }
+
+ /* Now en masse */
+ err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
+ if (err)
+ goto out_unlock;
+
+out_unlock:
+ GEM_TRACE_DUMP_ON(err);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kernel_context_close(ctx);
+ return err;
+}
+
static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -432,7 +676,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
continue;
@@ -447,14 +691,37 @@ static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
i915_gem_fini_aliasing_ppgtt(i915);
}
+int i915_gem_context_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_switch_to_kernel_context),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_put(&i915->drm);
+ return err;
+}
+
int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(igt_switch_to_kernel_context),
SUBTEST(igt_ctx_exec),
+ SUBTEST(igt_ctx_readonly),
};
bool fake_alias = false;
int err;
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return 0;
+
/* Install a fake aliasing gtt for exercise */
if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index 89dc25a5a53b..a7055b12e53c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -389,7 +389,7 @@ int i915_gem_dmabuf_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index ab9d7bee0aae..128ad1cf0647 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
u64 size;
for (size = 0;
- size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
size += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
return -EINVAL;
}
- if (list_empty(&i915->ggtt.base.inactive_list)) {
+ if (list_empty(&i915->ggtt.vm.inactive_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
@@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
+ list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
i915_vma_unpin(vma);
}
@@ -103,7 +103,7 @@ static int igt_evict_something(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_something(&ggtt->base,
+ err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -116,7 +116,7 @@ static int igt_evict_something(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict something */
- err = i915_gem_evict_something(&ggtt->base,
+ err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
@@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err);
@@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg)
unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict the node */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
err);
@@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg)
* i915_gtt_color_adjust throughout our driver, so using a mock color
* adjust will work just fine for our purposes.
*/
- ggtt->base.mm.color_adjust = mock_color_adjust;
+ ggtt->vm.mm.color_adjust = mock_color_adjust;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma);
/* Remove just the second vma */
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup;
@@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg)
*/
target.color = I915_CACHE_L3_LLC;
- err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL;
@@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
cleanup:
unpin_ggtt(i915);
cleanup_objects(i915);
- ggtt->base.mm.color_adjust = NULL;
+ ggtt->vm.mm.color_adjust = NULL;
return err;
}
@@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg)
goto cleanup;
/* Everything is pinned, nothing should happen */
- err = i915_gem_evict_vm(&ggtt->base);
+ err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg)
unpin_ggtt(i915);
- err = i915_gem_evict_vm(&ggtt->base);
+ err = i915_gem_evict_vm(&ggtt->vm);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg)
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
- err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
PIN_NOEVICT);
if (err)
goto out_locked;
@@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg)
goto out_locked;
}
- if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+ if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
PIN_NOEVICT)) {
kfree(r);
break;
@@ -490,7 +490,7 @@ int i915_gem_evict_mock_selftests(void)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -500,5 +500,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index f7dc926f4ef1..8e2e269db97e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -32,6 +32,20 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
+static void cleanup_freed_objects(struct drm_i915_private *i915)
+{
+ /*
+ * As we may hold onto the struct_mutex for inordinate lengths of
+ * time, the NMI khungtaskd detector may fire for the free objects
+ * worker.
+ */
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+}
+
static void fake_free_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -134,31 +148,34 @@ static int igt_ppgtt_alloc(void *arg)
{
struct drm_i915_private *dev_priv = arg;
struct i915_hw_ppgtt *ppgtt;
- u64 size, last;
- int err;
+ u64 size, last, limit;
+ int err = 0;
/* Allocate a ppggt and try to fill the entire range */
if (!USES_PPGTT(dev_priv))
return 0;
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return -ENOMEM;
+ ppgtt = __hw_ppgtt_create(dev_priv);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
- mutex_lock(&dev_priv->drm.struct_mutex);
- err = __hw_ppgtt_init(ppgtt, dev_priv);
- if (err)
- goto err_ppgtt;
-
- if (!ppgtt->base.allocate_va_range)
+ if (!ppgtt->vm.allocate_va_range)
goto err_ppgtt_cleanup;
+ /*
+ * While we only allocate the page tables here and so we could
+ * address a much larger GTT than we could actually fit into
+ * RAM, a practical limit is the amount of physical pages in the system.
+ * This should ensure that we do not run into the oomkiller during
+ * the test and take down the machine wilfully.
+ */
+ limit = totalram_pages << PAGE_SHIFT;
+ limit = min(ppgtt->vm.total, limit);
+
/* Check we can allocate the entire range */
- for (size = 4096;
- size <= ppgtt->base.total;
- size <<= 2) {
- err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
+ for (size = 4096; size <= limit; size <<= 2) {
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
if (err) {
if (err == -ENOMEM) {
pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
@@ -168,15 +185,15 @@ static int igt_ppgtt_alloc(void *arg)
goto err_ppgtt_cleanup;
}
- ppgtt->base.clear_range(&ppgtt->base, 0, size);
+ cond_resched();
+
+ ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
}
/* Check we can incrementally allocate the entire range */
- for (last = 0, size = 4096;
- size <= ppgtt->base.total;
- last = size, size <<= 2) {
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- last, size - last);
+ for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
+ last, size - last);
if (err) {
if (err == -ENOMEM) {
pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
@@ -185,13 +202,14 @@ static int igt_ppgtt_alloc(void *arg)
}
goto err_ppgtt_cleanup;
}
+
+ cond_resched();
}
err_ppgtt_cleanup:
- ppgtt->base.cleanup(&ppgtt->base);
-err_ppgtt:
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_ppgtt_put(ppgtt);
mutex_unlock(&dev_priv->drm.struct_mutex);
- kfree(ppgtt);
return err;
}
@@ -293,6 +311,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
i915_gem_object_put(obj);
kfree(order);
+
+ cleanup_freed_objects(i915);
}
return 0;
@@ -521,6 +541,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
+ cleanup_freed_objects(i915);
}
return 0;
@@ -607,6 +628,8 @@ err_put:
i915_gem_object_put(obj);
if (err)
return err;
+
+ cleanup_freed_objects(i915);
}
return 0;
@@ -791,6 +814,8 @@ err_obj:
kfree(order);
if (err)
return err;
+
+ cleanup_freed_objects(i915);
}
return 0;
@@ -859,6 +884,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
+ cleanup_freed_objects(i915);
return err;
}
@@ -951,6 +977,7 @@ static int shrink_boom(struct drm_i915_private *i915,
i915_gem_object_put(explode);
memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+ cleanup_freed_objects(i915);
}
return 0;
@@ -982,17 +1009,17 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
+ ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- GEM_BUG_ON(offset_in_page(ppgtt->base.total));
- GEM_BUG_ON(ppgtt->base.closed);
+ GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
+ GEM_BUG_ON(ppgtt->vm.closed);
- err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1061,18 +1088,18 @@ static int exercise_ggtt(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
restart:
- list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
- drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
+ list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
+ drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
if (hole_start < last)
continue;
- if (ggtt->base.mm.color_adjust)
- ggtt->base.mm.color_adjust(node, 0,
- &hole_start, &hole_end);
+ if (ggtt->vm.mm.color_adjust)
+ ggtt->vm.mm.color_adjust(node, 0,
+ &hole_start, &hole_end);
if (hole_start >= hole_end)
continue;
- err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+ err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
if (err)
break;
@@ -1134,7 +1161,7 @@ static int igt_ggtt_page(void *arg)
goto out_free;
memset(&tmp, 0, sizeof(tmp));
- err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+ err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
@@ -1147,9 +1174,9 @@ static int igt_ggtt_page(void *arg)
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, 0),
- offset, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
}
order = i915_random_order(count, &prng);
@@ -1188,7 +1215,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
- ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
+ ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
intel_runtime_pm_put(i915);
drm_mm_remove_node(&tmp);
out_unpin:
@@ -1217,6 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
+ const u64 limit = totalram_pages << PAGE_SHIFT;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
@@ -1229,7 +1257,7 @@ static int exercise_mock(struct drm_i915_private *i915,
ppgtt = ctx->ppgtt;
GEM_BUG_ON(!ppgtt);
- err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
mock_context_close(ctx);
return err;
@@ -1270,7 +1298,7 @@ static int igt_gtt_reserve(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1288,20 +1316,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1319,7 +1347,7 @@ static int igt_gtt_reserve(void *arg)
/* Now we start forcing evictions */
for (total = I915_GTT_PAGE_SIZE;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1337,20 +1365,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1371,7 +1399,7 @@ static int igt_gtt_reserve(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1383,18 +1411,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, i915->ggtt.base.total,
+ offset = random_offset(0, i915->ggtt.vm.total,
2*I915_GTT_PAGE_SIZE,
I915_GTT_MIN_ALIGNMENT);
- err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1429,8 +1457,8 @@ static int igt_gtt_insert(void *arg)
u64 start, end;
} invalid_insert[] = {
{
- i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
- 0, i915->ggtt.base.total,
+ i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
+ 0, i915->ggtt.vm.total,
},
{
2*I915_GTT_PAGE_SIZE, 0,
@@ -1460,7 +1488,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
- err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
@@ -1475,7 +1503,7 @@ static int igt_gtt_insert(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1493,15 +1521,15 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
@@ -1510,7 +1538,7 @@ static int igt_gtt_insert(void *arg)
}
if (err) {
pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1522,7 +1550,7 @@ static int igt_gtt_insert(void *arg)
list_for_each_entry(obj, &objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1542,7 +1570,7 @@ static int igt_gtt_insert(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1557,13 +1585,13 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1579,7 +1607,7 @@ static int igt_gtt_insert(void *arg)
/* And then force evictions */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
@@ -1597,19 +1625,19 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.base.total,
+ 0, i915->ggtt.vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.base.total, err);
+ total, i915->ggtt.vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1646,7 +1674,7 @@ int i915_gem_gtt_mock_selftests(void)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -1669,7 +1697,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_page),
};
- GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
+ GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index fbdb2419d418..c69cbd5aed52 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg)
obj = huge_gem_object(i915,
nreal * PAGE_SIZE,
- i915->ggtt.base.total + PAGE_SIZE);
+ i915->ggtt.vm.total + PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -169,9 +169,16 @@ static u64 tiled_offset(const struct tile *tile, u64 v)
v += y * tile->width;
v += div64_u64_rem(x, tile->width, &x) << tile->size;
v += x;
- } else {
+ } else if (tile->width == 128) {
const unsigned int ytile_span = 16;
- const unsigned int ytile_height = 32 * ytile_span;
+ const unsigned int ytile_height = 512;
+
+ v += y * ytile_span;
+ v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
+ v += x;
+ } else {
+ const unsigned int ytile_span = 32;
+ const unsigned int ytile_height = 256;
v += y * ytile_span;
v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
@@ -288,6 +295,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
kunmap(p);
if (err)
return err;
+
+ i915_vma_destroy(vma);
}
return 0;
@@ -311,7 +320,7 @@ static int igt_partial_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
- (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -347,6 +356,14 @@ static int igt_partial_tiling(void *arg)
unsigned int pitch;
struct tile tile;
+ if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ /*
+ * The swizzling pattern is actually unknown as it
+ * varies based on physical address of each page.
+ * See i915_gem_detect_bit_6_swizzle().
+ */
+ break;
+
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
@@ -357,7 +374,8 @@ static int igt_partial_tiling(void *arg)
break;
}
- if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
+ GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
+ if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
continue;
@@ -440,7 +458,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int err;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -454,12 +472,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
return PTR_ERR(rq);
}
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
i915_request_add(rq);
- i915_gem_object_set_active_reference(obj);
+ __i915_gem_object_release_unless_active(obj);
i915_vma_unpin(vma);
- return 0;
+
+ return err;
}
static bool assert_mmap_offset(struct drm_i915_private *i915,
@@ -488,6 +508,15 @@ static int igt_mmap_offset_exhaustion(void *arg)
u64 hole_start, hole_end;
int loop, err;
+ /* Disable background reaper */
+ mutex_lock(&i915->drm.struct_mutex);
+ if (!i915->gt.active_requests++)
+ i915_gem_unpark(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ cancel_delayed_work_sync(&i915->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gt.idle_work);
+ GEM_BUG_ON(!i915->gt.awake);
+
/* Trim the device mmap space to only a page */
memset(&resv, 0, sizeof(resv));
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
@@ -496,7 +525,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
err = drm_mm_reserve_node(mm, &resv);
if (err) {
pr_err("Failed to trim VMA manager, err=%d\n", err);
- return err;
+ goto out_park;
}
break;
}
@@ -538,6 +567,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
+ if (i915_terminally_wedged(&i915->gpu_error))
+ break;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -554,6 +586,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto err_obj;
}
+ /* NB we rely on the _active_ reference to access obj now */
GEM_BUG_ON(!i915_gem_object_is_active(obj));
err = i915_gem_object_create_mmap_offset(obj);
if (err) {
@@ -565,6 +598,13 @@ static int igt_mmap_offset_exhaustion(void *arg)
out:
drm_mm_remove_node(&resv);
+out_park:
+ mutex_lock(&i915->drm.struct_mutex);
+ if (--i915->gt.active_requests)
+ queue_delayed_work(i915->wq, &i915->gt.retire_work, 0);
+ else
+ queue_delayed_work(i915->wq, &i915->gt.idle_work, 0);
+ mutex_unlock(&i915->drm.struct_mutex);
return err;
err_obj:
i915_gem_object_put(obj);
@@ -586,7 +626,7 @@ int i915_gem_object_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index d16d74178e9d..1b70208eeea7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -24,3 +24,4 @@ selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
+selftest(contexts, i915_gem_context_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 94bc2e1898a4..c4aac6141e04 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -262,7 +262,7 @@ int i915_request_mock_selftests(void)
return -ENOMEM;
err = i915_subtests(tests, i915);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -286,7 +286,9 @@ static int begin_live_test(struct live_test *t,
t->func = func;
t->name = name;
- err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err);
@@ -342,9 +344,9 @@ static int live_nop_request(void *arg)
mutex_lock(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id) {
- IGT_TIMEOUT(end_time);
- struct i915_request *request;
+ struct i915_request *request = NULL;
unsigned long n, prime;
+ IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
err = begin_live_test(&t, i915, __func__, engine->name);
@@ -430,7 +432,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
- vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -466,7 +468,7 @@ empty_request(struct intel_engine_cs *engine,
goto out_request;
out_request:
- __i915_request_add(request, err == 0);
+ i915_request_add(request);
return err ? ERR_PTR(err) : request;
}
@@ -555,7 +557,8 @@ out_unlock:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
@@ -593,11 +596,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
} else if (gen >= 6) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
*cmd++ = lower_32_bits(vma->node.start);
- } else if (gen >= 4) {
- *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cmd++ = lower_32_bits(vma->node.start);
} else {
- *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
@@ -677,7 +677,9 @@ static int live_all_engines(void *arg)
i915_gem_object_set_active_reference(batch->obj);
}
- i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[id], 0);
+ GEM_BUG_ON(err);
+
i915_request_get(request[id]);
i915_request_add(request[id]);
}
@@ -787,7 +789,9 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err);
request[id]->batch = batch;
- i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[id], 0);
+ GEM_BUG_ON(err);
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_get(batch);
@@ -861,5 +865,9 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sequential_engines),
SUBTEST(live_empty_request),
};
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index addc5a599c4a..86c54ea37f48 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -210,6 +210,8 @@ int __i915_subtests(const char *caller,
return -EINTR;
pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ GEM_TRACE("Running %s/%s\n", caller, st->name);
+
err = st->func(data);
if (err && err != -EINTR) {
pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index e90f97236e50..ffa74290e054 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != &ctx->ppgtt->base) {
+ if (vma->vm != &ctx->ppgtt->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm =
- &ctx->ppgtt->base;
+ struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_vma *vma;
int err;
@@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg)
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
- INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
VALID(4096, PIN_GLOBAL),
@@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg)
VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
- VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
- VALID(i915->ggtt.base.total, PIN_GLOBAL),
- NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.vm.total, PIN_GLOBAL),
+ NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
- INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
@@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg)
* variable start, end and size.
*/
NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
- NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
+ NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+ NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
#endif
{ },
#undef NOSPACE
@@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg)
* focusing on error handling of boundary conditions.
*/
- GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
+ GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
+ vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma))
goto out;
@@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
static int igt_vma_rotate(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.base;
+ struct i915_address_space *vm = &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const struct intel_rotation_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 },
@@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma,
static int igt_vma_partial(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.base;
+ struct i915_address_space *vm = &i915->ggtt.vm;
const unsigned int npages = 1021; /* prime! */
struct drm_i915_gem_object *obj;
const struct phase {
@@ -734,7 +733,7 @@ int i915_vma_mock_selftests(void)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 0d06f559243f..af66e3d4e23a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -9,52 +9,8 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
-struct wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const void *symbol;
-};
-
-static void wedge_me(struct work_struct *work)
-{
- struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
- pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
-
- GEM_TRACE("%pS timed out.\n", w->symbol);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const void *symbol)
-{
- w->i915 = i915;
- w->symbol = symbol;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
- (W)->i915; \
- __fini_wedge((W)))
-
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
{
- struct wedge_me w;
-
cond_resched();
if (flags & I915_WAIT_LOCKED &&
@@ -63,8 +19,15 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
i915_gem_set_wedged(i915);
}
- wedge_on_timeout(&w, i915, HZ)
- i915_gem_wait_for_idle(i915, flags);
+ if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
+
+ GEM_TRACE("%pS timed out.\n", __builtin_return_address(0));
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(i915);
+ }
return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
new file mode 100644
index 000000000000..08e5ff11bbd9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
@@ -0,0 +1,58 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_WEDGE_ME_H
+#define IGT_WEDGE_ME_H
+
+#include <linux/workqueue.h>
+
+#include "../i915_gem.h"
+
+struct drm_i915_private;
+
+struct igt_wedge_me {
+ struct delayed_work work;
+ struct drm_i915_private *i915;
+ const char *name;
+};
+
+static void __igt_wedge_me(struct work_struct *work)
+{
+ struct igt_wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ pr_err("%s timed out, cancelling test.\n", w->name);
+
+ GEM_TRACE("%s timed out.\n", w->name);
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(w->i915);
+}
+
+static void __igt_init_wedge(struct igt_wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const char *name)
+{
+ w->i915 = i915;
+ w->name = name;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+static void __igt_fini_wedge(struct igt_wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->i915 = NULL;
+}
+
+#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \
+ for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \
+ (W)->i915; \
+ __igt_fini_wedge((W)))
+
+#endif /* IGT_WEDGE_ME_H */
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index d6926e7820e5..f03b407fdbe2 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -464,7 +464,7 @@ int intel_breadcrumbs_mock_selftests(void)
return -ENOMEM;
err = i915_subtests(tests, i915->engine[RCS]);
- drm_dev_unref(&i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index fb74e2cf8a0a..407c98fb9170 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -196,19 +196,23 @@ static int igt_guc_clients(void *args)
}
unreserve_doorbell(guc->execbuf_client);
- err = guc_clients_doorbell_init(guc);
+
+ __create_doorbell(guc->execbuf_client);
+ err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
if (err != -EIO) {
pr_err("unexpected (err = %d)", err);
- goto out;
+ goto out_db;
}
if (!available_dbs(guc, guc->execbuf_client->priority)) {
pr_err("doorbell not available when it should\n");
err = -EIO;
- goto out;
+ goto out_db;
}
+out_db:
/* clean after test */
+ __destroy_doorbell(guc->execbuf_client);
err = reserve_doorbell(guc->execbuf_client);
if (err) {
pr_err("failed to reserve back the doorbell back\n");
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 438e0b045a2c..65d66cdedd26 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_wedge_me.h"
#include "mock_context.h"
#include "mock_drm.h"
@@ -105,7 +106,10 @@ static int emit_recurse_batch(struct hang *h,
struct i915_request *rq)
{
struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_address_space *vm =
+ rq->gem_context->ppgtt ?
+ &rq->gem_context->ppgtt->vm :
+ &i915->ggtt.vm;
struct i915_vma *hws, *vma;
unsigned int flags;
u32 *batch;
@@ -127,13 +131,19 @@ static int emit_recurse_batch(struct hang *h,
if (err)
goto unpin_vma;
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
- i915_vma_move_to_active(hws, rq, 0);
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
@@ -168,7 +178,7 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(i915) >= 4) {
- *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
@@ -181,7 +191,7 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
- *batch++ = MI_STORE_DWORD_IMM;
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK;
@@ -190,7 +200,7 @@ static int emit_recurse_batch(struct hang *h,
batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK;
- *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
+ *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
@@ -202,6 +212,7 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
@@ -242,7 +253,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
err = emit_recurse_batch(h, rq);
if (err) {
- __i915_request_add(rq, false);
+ i915_request_add(rq);
return ERR_PTR(err);
}
@@ -315,7 +326,7 @@ static int igt_hang_sanitycheck(void *arg)
*h.batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
@@ -461,7 +472,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
@@ -560,6 +571,30 @@ struct active_engine {
#define TEST_SELF BIT(2)
#define TEST_PRIORITY BIT(3)
+static int active_request_put(struct i915_request *rq)
+{
+ int err = 0;
+
+ if (!rq)
+ return 0;
+
+ if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n",
+ rq->engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_global_seqno(rq));
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(rq->i915);
+ err = -EIO;
+ }
+
+ i915_request_put(rq);
+
+ return err;
+}
+
static int active_engine(void *data)
{
I915_RND_STATE(prng);
@@ -608,24 +643,20 @@ static int active_engine(void *data)
i915_request_add(new);
mutex_unlock(&engine->i915->drm.struct_mutex);
- if (old) {
- if (i915_request_wait(old, 0, HZ) < 0) {
- GEM_TRACE("%s timed out.\n", engine->name);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(engine->i915);
- i915_request_put(old);
- err = -EIO;
- break;
- }
- i915_request_put(old);
- }
+ err = active_request_put(old);
+ if (err)
+ break;
cond_resched();
}
- for (count = 0; count < ARRAY_SIZE(rq); count++)
- i915_request_put(rq[count]);
+ for (count = 0; count < ARRAY_SIZE(rq); count++) {
+ int err__ = active_request_put(rq[count]);
+
+ /* Keep the first error */
+ if (!err)
+ err = err__;
+ }
err_file:
mock_file_free(engine->i915, file);
@@ -719,7 +750,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
mutex_unlock(&i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
@@ -891,7 +922,7 @@ static u32 fake_hangcheck(struct i915_request *rq, u32 mask)
return reset_count;
}
-static int igt_wait_reset(void *arg)
+static int igt_reset_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_request *rq;
@@ -919,7 +950,7 @@ static int igt_wait_reset(void *arg)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -965,6 +996,170 @@ unlock:
return err;
}
+struct evict_vma {
+ struct completion completion;
+ struct i915_vma *vma;
+};
+
+static int evict_vma(void *data)
+{
+ struct evict_vma *arg = data;
+ struct i915_address_space *vm = arg->vma->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_mm_node evict = arg->vma->node;
+ int err;
+
+ complete(&arg->completion);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_evict_for_node(vm, &evict, 0);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
+static int __igt_reset_evict_vma(struct drm_i915_private *i915,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_gem_object *obj;
+ struct task_struct *tsk = NULL;
+ struct i915_request *rq;
+ struct evict_vma arg;
+ struct hang h;
+ int err;
+
+ if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ return 0;
+
+ /* Check that we can recover an unbind stuck on a hanging request */
+
+ global_reset_lock(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ if (err)
+ goto unlock;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fini;
+ }
+
+ arg.vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(arg.vma)) {
+ err = PTR_ERR(arg.vma);
+ goto out_obj;
+ }
+
+ rq = hang_create_request(&h, i915->engine[RCS]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_obj;
+ }
+
+ err = i915_vma_pin(arg.vma, 0, 0,
+ i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto out_obj;
+
+ err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unpin(arg.vma);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ goto out_rq;
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_until_running(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ i915_gem_set_wedged(i915);
+ goto out_reset;
+ }
+
+ init_completion(&arg.completion);
+
+ tsk = kthread_run(evict_vma, &arg, "igt/evict_vma");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ tsk = NULL;
+ goto out_reset;
+ }
+
+ wait_for_completion(&arg.completion);
+
+ if (wait_for(waitqueue_active(&rq->execute), 10)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("igt/evict_vma kthread did not wait\n");
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ i915_gem_set_wedged(i915);
+ goto out_reset;
+ }
+
+out_reset:
+ fake_hangcheck(rq, intel_engine_flag(rq->engine));
+
+ if (tsk) {
+ struct igt_wedge_me w;
+
+ /* The reset, even indirectly, should take less than 10ms. */
+ igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ err = kthread_stop(tsk);
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+out_rq:
+ i915_request_put(rq);
+out_obj:
+ i915_gem_object_put(obj);
+fini:
+ hang_fini(&h);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ global_reset_unlock(i915);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO;
+
+ return err;
+}
+
+static int igt_reset_evict_ggtt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+
+ return __igt_reset_evict_vma(i915, &i915->ggtt.vm);
+}
+
+static int igt_reset_evict_ppgtt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ int err;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = kernel_context(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ err = 0;
+ if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */
+ err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm);
+
+ kernel_context_close(ctx);
+ return err;
+}
+
static int wait_for_others(struct drm_i915_private *i915,
struct intel_engine_cs *exclude)
{
@@ -1014,7 +1209,7 @@ static int igt_reset_queue(void *arg)
}
i915_request_get(prev);
- __i915_request_add(prev, true);
+ i915_request_add(prev);
count = 0;
do {
@@ -1028,7 +1223,7 @@ static int igt_reset_queue(void *arg)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
/*
* XXX We don't handle resetting the kernel context
@@ -1161,7 +1356,7 @@ static int igt_handle_error(void *arg)
}
i915_request_get(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -1210,8 +1405,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
- SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
+ SUBTEST(igt_reset_wait),
+ SUBTEST(igt_reset_evict_ggtt),
+ SUBTEST(igt_reset_evict_ppgtt),
SUBTEST(igt_handle_error),
};
bool saved_hangcheck;
@@ -1220,6 +1417,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (!intel_has_gpu_reset(i915))
return 0;
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO; /* we're long past hope of a successful reset */
+
intel_runtime_pm_get(i915);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1b8a07125150..582566faef09 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
struct i915_request *rq,
u32 arbitration_command)
{
- struct i915_address_space *vm = &rq->ctx->ppgtt->base;
+ struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
@@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin,
if (err)
goto unpin_vma;
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
- i915_vma_move_to_active(hws, rq, 0);
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
@@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
@@ -155,7 +162,7 @@ spinner_create_request(struct spinner *spin,
err = emit_recurse_batch(spin, rq, arbitration_command);
if (err) {
- __i915_request_add(rq, false);
+ i915_request_add(rq);
return ERR_PTR(err);
}
@@ -444,16 +451,134 @@ err_wedged:
goto err_ctx_lo;
}
+static int live_preempt_hang(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (spinner_init(&spin_hi, i915))
+ goto err_unlock;
+
+ if (spinner_init(&spin_lo, i915))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+ ctx_lo = kernel_context(i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!wait_for_spinner(&spin_lo, rq)) {
+ GEM_TRACE("lo spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ init_completion(&engine->execlists.preempt_hang.completion);
+ engine->execlists.preempt_hang.inject_hang = true;
+
+ i915_request_add(rq);
+
+ if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
+ HZ / 10)) {
+ pr_err("Preemption did not occur within timeout!");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ i915_reset_engine(engine, NULL);
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ engine->execlists.preempt_hang.inject_hang = false;
+
+ if (!wait_for_spinner(&spin_hi, rq)) {
+ GEM_TRACE("hi spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ spinner_end(&spin_hi);
+ spinner_end(&spin_lo);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ spinner_fini(&spin_lo);
+err_spin_hi:
+ spinner_fini(&spin_hi);
+err_unlock:
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
+ SUBTEST(live_preempt_hang),
};
if (!HAS_EXECLISTS(i915))
return 0;
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 17444a3abbb9..0d39b3bf0c0d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,7 @@
#include "../i915_selftest.h"
+#include "igt_wedge_me.h"
#include "mock_context.h"
static struct drm_i915_gem_object *
@@ -33,7 +34,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
memset(cs, 0xc5, PAGE_SIZE);
i915_gem_object_unpin_map(result);
- vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -49,6 +50,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_pin;
}
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_req;
+
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
if (INTEL_GEN(ctx->i915) >= 8)
srm++;
@@ -67,15 +72,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
}
intel_ring_advance(rq, cs);
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
-
i915_gem_object_get(result);
i915_gem_object_set_active_reference(result);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
i915_vma_unpin(vma);
return result;
@@ -112,6 +112,7 @@ static int check_whitelist(const struct whitelist *w,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
+ struct igt_wedge_me wedge;
u32 *vaddr;
int err;
int i;
@@ -120,7 +121,11 @@ static int check_whitelist(const struct whitelist *w,
if (IS_ERR(results))
return PTR_ERR(results);
- err = i915_gem_object_set_to_cpu_domain(results, false);
+ err = 0;
+ igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
+ err = i915_gem_object_set_to_cpu_domain(results, false);
+ if (i915_terminally_wedged(&ctx->i915->gpu_error))
+ err = -EIO;
if (err)
goto out_put;
@@ -283,6 +288,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
};
int err;
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 501becc47c0c..8904f1ce64e3 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
+ unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915,
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
+
+ ce->gem_context = ctx;
+ }
+
ret = ida_simple_get(&i915->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
index 302f7d103635..ca682caf1062 100644
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
@@ -94,18 +94,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
vm_unmap_ram(vaddr, mock->npages);
}
-static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kmap_atomic(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
- kunmap_atomic(addr);
-}
-
static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
@@ -130,9 +118,7 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
.map = mock_dmabuf_kmap,
- .map_atomic = mock_dmabuf_kmap_atomic,
.unmap = mock_dmabuf_kunmap,
- .unmap_atomic = mock_dmabuf_kunmap_atomic,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 26bf29d97007..22a73da45ad5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -72,25 +72,34 @@ static void hw_delay_complete(struct timer_list *t)
spin_unlock(&engine->hw_lock);
}
-static struct intel_ring *
-mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static void mock_context_unpin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- if (!ce->pin_count++)
- i915_gem_context_get(ctx);
+ i915_gem_context_put(ce->gem_context);
+}
- return engine->buffer;
+static void mock_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(ce->pin_count);
}
-static void mock_context_unpin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static const struct intel_context_ops mock_context_ops = {
+ .unpin = mock_context_unpin,
+ .destroy = mock_context_destroy,
+};
+
+static struct intel_context *
+mock_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
- if (!--ce->pin_count)
- i915_gem_context_put(ctx);
+ if (!ce->pin_count++) {
+ i915_gem_context_get(ctx);
+ ce->ring = engine->buffer;
+ ce->ops = &mock_context_ops;
+ }
+
+ return ce;
}
static int mock_request_alloc(struct i915_request *request)
@@ -185,13 +194,14 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.status_page.page_addr = (void *)(engine + 1);
engine->base.context_pin = mock_context_pin;
- engine->base.context_unpin = mock_context_unpin;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
engine->base.emit_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
+ lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
+
intel_engine_init_breadcrumbs(&engine->base);
engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
@@ -204,8 +214,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
if (!engine->base.buffer)
goto err_breadcrumbs;
+ if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+ goto err_ring;
+
return &engine->base;
+err_ring:
+ mock_ring_free(engine->base.buffer);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(&engine->base);
i915_timeline_fini(&engine->base.timeline);
@@ -238,11 +253,15 @@ void mock_engine_free(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
+ struct intel_context *ce;
GEM_BUG_ON(timer_pending(&mock->hw_delay));
- if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context, engine);
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
+
+ __intel_context_unpin(engine->i915->kernel_context, engine);
mock_ring_free(engine->buffer);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 94baedfa0f74..43ed8b28aeaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -136,8 +136,6 @@ static struct dev_pm_domain pm_domain = {
struct drm_i915_private *mock_gem_device(void)
{
struct drm_i915_private *i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
struct pci_dev *pdev;
int err;
@@ -159,7 +157,8 @@ struct drm_i915_private *mock_gem_device(void)
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- WARN_ON(pm_runtime_get_sync(&pdev->dev));
+ if (pm_runtime_enabled(&pdev->dev))
+ WARN_ON(pm_runtime_get_sync(&pdev->dev));
i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
@@ -233,13 +232,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915);
mkwrite_device_info(i915)->ring_mask = BIT(0);
- i915->engine[RCS] = mock_engine(i915, "mock", RCS);
- if (!i915->engine[RCS])
- goto err_unlock;
-
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
- goto err_engine;
+ goto err_unlock;
+
+ i915->engine[RCS] = mock_engine(i915, "mock", RCS);
+ if (!i915->engine[RCS])
+ goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -247,9 +246,8 @@ struct drm_i915_private *mock_gem_device(void)
return i915;
-err_engine:
- for_each_engine(engine, i915, id)
- mock_engine_free(engine);
+err_context:
+ i915_gem_contexts_fini(i915);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
kmem_cache_destroy(i915->priorities);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 36c112088940..a140ea5c3a7c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -66,25 +66,21 @@ mock_ppgtt(struct drm_i915_private *i915,
return NULL;
kref_init(&ppgtt->ref);
- ppgtt->base.i915 = i915;
- ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
- ppgtt->base.file = ERR_PTR(-ENODEV);
-
- INIT_LIST_HEAD(&ppgtt->base.active_list);
- INIT_LIST_HEAD(&ppgtt->base.inactive_list);
- INIT_LIST_HEAD(&ppgtt->base.unbound_list);
-
- INIT_LIST_HEAD(&ppgtt->base.global_link);
- drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
-
- ppgtt->base.clear_range = nop_clear_range;
- ppgtt->base.insert_page = mock_insert_page;
- ppgtt->base.insert_entries = mock_insert_entries;
- ppgtt->base.bind_vma = mock_bind_ppgtt;
- ppgtt->base.unbind_vma = mock_unbind_ppgtt;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
- ppgtt->base.cleanup = mock_cleanup;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
+ ppgtt->vm.file = ERR_PTR(-ENODEV);
+
+ i915_address_space_init(&ppgtt->vm, i915);
+
+ ppgtt->vm.clear_range = nop_clear_range;
+ ppgtt->vm.insert_page = mock_insert_page;
+ ppgtt->vm.insert_entries = mock_insert_entries;
+ ppgtt->vm.cleanup = mock_cleanup;
+
+ ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt;
+ ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
return ppgtt;
}
@@ -105,29 +101,28 @@ void mock_init_ggtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- INIT_LIST_HEAD(&i915->vm_list);
-
- ggtt->base.i915 = i915;
+ ggtt->vm.i915 = i915;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
- ggtt->base.total = 4096 * PAGE_SIZE;
-
- ggtt->base.clear_range = nop_clear_range;
- ggtt->base.insert_page = mock_insert_page;
- ggtt->base.insert_entries = mock_insert_entries;
- ggtt->base.bind_vma = mock_bind_ggtt;
- ggtt->base.unbind_vma = mock_unbind_ggtt;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = mock_cleanup;
-
- i915_address_space_init(&ggtt->base, i915, "global");
+ ggtt->vm.total = 4096 * PAGE_SIZE;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ ggtt->vm.insert_page = mock_insert_page;
+ ggtt->vm.insert_entries = mock_insert_entries;
+ ggtt->vm.cleanup = mock_cleanup;
+
+ ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt;
+ ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ i915_address_space_init(&ggtt->vm, i915);
}
void mock_fini_ggtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- i915_address_space_fini(&ggtt->base);
+ i915_address_space_fini(&ggtt->vm);
}
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index f349b3920199..435a2c35ee8c 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -69,7 +69,7 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
}
}
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
@@ -342,11 +342,15 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
else
pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
- }
- ret = intel_compute_dsi_pll(encoder, pipe_config);
- if (ret)
- return false;
+ ret = bxt_dsi_pll_compute(encoder, pipe_config);
+ if (ret)
+ return false;
+ } else {
+ ret = vlv_dsi_pll_compute(encoder, pipe_config);
+ if (ret)
+ return false;
+ }
pipe_config->clock_set = true;
@@ -546,12 +550,12 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev_priv))
- bxt_dsi_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_dsi_device_ready(encoder);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_dsi_device_ready(encoder);
+ else
+ vlv_dsi_device_ready(encoder);
}
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
@@ -810,8 +814,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
- intel_disable_dsi_pll(encoder);
- intel_enable_dsi_pll(encoder, pipe_config);
+ if (IS_GEN9_LP(dev_priv)) {
+ bxt_dsi_pll_disable(encoder);
+ bxt_dsi_pll_enable(encoder, pipe_config);
+ } else {
+ vlv_dsi_pll_disable(encoder);
+ vlv_dsi_pll_enable(encoder, pipe_config);
+ }
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
@@ -929,11 +938,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
- IS_BROXTON(dev_priv))
- vlv_dsi_clear_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_dsi_clear_device_ready(encoder);
+ else
+ vlv_dsi_clear_device_ready(encoder);
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
@@ -949,7 +957,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- wait_for_dsi_fifo_empty(intel_dsi, port);
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
intel_dsi_port_disable(encoder);
usleep_range(2000, 5000);
@@ -979,11 +987,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
val & ~MIPIO_RST_CTRL);
}
- intel_disable_dsi_pll(encoder);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
+ bxt_dsi_pll_disable(encoder);
+ } else {
u32 val;
+ vlv_dsi_pll_disable(encoder);
+
val = I915_READ(DSPCLK_GATE_D);
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
@@ -1024,7 +1034,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
+ if (IS_GEN9_LP(dev_priv) && !bxt_dsi_pll_is_enabled(dev_priv))
goto out_put_power;
/* XXX: this only works for one DSI output */
@@ -1247,16 +1257,19 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(dev_priv)) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
+ pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+ pipe_config);
+ } else {
+ pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+ pipe_config);
+ }
- pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
- pipe_config);
- if (!pclk)
- return;
-
- pipe_config->base.adjusted_mode.crtc_clock = pclk;
- pipe_config->port_clock = pclk;
+ if (pclk) {
+ pipe_config->base.adjusted_mode.crtc_clock = pclk;
+ pipe_config->port_clock = pclk;
+ }
}
static enum drm_mode_status
@@ -1585,20 +1598,24 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
enum port port;
u32 val;
- if (!IS_GEMINILAKE(dev_priv)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- /* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+ if (IS_GEMINILAKE(dev_priv))
+ return;
- intel_dsi_reset_clocks(encoder, port);
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Panel commands can be sent when clock is in LP11 */
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
- val = I915_READ(MIPI_DSI_FUNC_PRG(port));
- val &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+ if (IS_GEN9_LP(dev_priv))
+ bxt_dsi_reset_clocks(encoder, port);
+ else
+ vlv_dsi_reset_clocks(encoder, port);
+ I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
- }
+ val = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ val &= ~VID_MODE_FORMAT_MASK;
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
}
}
@@ -1671,16 +1688,16 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- enum i9xx_plane_id plane;
+ enum i9xx_plane_id i9xx_plane;
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->encoder->crtc_mask == BIT(PIPE_B))
- plane = PLANE_B;
+ i9xx_plane = PLANE_B;
else
- plane = PLANE_A;
+ i9xx_plane = PLANE_A;
- val = I915_READ(DSPCNTR(plane));
+ val = I915_READ(DSPCNTR(i9xx_plane));
if (val & DISPPLANE_ROTATE_180)
orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
}
@@ -1713,7 +1730,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
}
}
-void intel_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_dsi *intel_dsi;
@@ -1730,14 +1747,10 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv))
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
- } else {
- DRM_ERROR("Unsupported Mipi device to reg base");
- return;
- }
+ else
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index 2ff2ee7f3b78..a132a8037ecc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -111,8 +111,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
*/
-static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -142,8 +142,8 @@ static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
return 0;
}
-static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config)
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -175,7 +175,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
DRM_DEBUG_KMS("DSI PLL locked\n");
}
-static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
@@ -192,7 +192,7 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
{
bool enabled;
u32 val;
@@ -229,7 +229,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
return enabled;
}
-static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
+void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
@@ -261,8 +261,8 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
bpp, pipe_bpp);
}
-static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config)
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -327,8 +327,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
return pclk;
}
-static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config)
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
u32 pclk;
u32 dsi_clk;
@@ -357,16 +357,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
return pclk;
}
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
- struct intel_crtc_state *config)
-{
- if (IS_GEN9_LP(to_i915(encoder->base.dev)))
- return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
- else
- return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
-}
-
-static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -480,8 +471,8 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
-static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -528,8 +519,8 @@ static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
return 0;
}
-static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config)
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -568,52 +559,7 @@ static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
DRM_DEBUG_KMS("DSI PLL locked\n");
}
-bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
-{
- if (IS_GEN9_LP(dev_priv))
- return bxt_dsi_pll_is_enabled(dev_priv);
-
- MISSING_CASE(INTEL_DEVID(dev_priv));
-
- return false;
-}
-
-int intel_compute_dsi_pll(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_compute_dsi_pll(encoder, config);
- else if (IS_GEN9_LP(dev_priv))
- return gen9lp_compute_dsi_pll(encoder, config);
-
- return -ENODEV;
-}
-
-void intel_enable_dsi_pll(struct intel_encoder *encoder,
- const struct intel_crtc_state *config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_enable_dsi_pll(encoder, config);
- else if (IS_GEN9_LP(dev_priv))
- gen9lp_enable_dsi_pll(encoder, config);
-}
-
-void intel_disable_dsi_pll(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_disable_dsi_pll(encoder);
- else if (IS_GEN9_LP(dev_priv))
- bxt_disable_dsi_pll(encoder);
-}
-
-static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
- enum port port)
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 tmp;
struct drm_device *dev = encoder->base.dev;
@@ -638,13 +584,3 @@ static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
}
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
-
-void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_GEN9_LP(dev_priv))
- gen9lp_dsi_reset_clocks(encoder, port);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_dsi_reset_clocks(encoder, port);
-}
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1d053bbefc02..5ea0c82f9957 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -35,12 +35,6 @@
#define MAX_CRTC 4
-struct imx_drm_device {
- struct drm_device *drm;
- unsigned int pipes;
- struct drm_atomic_state *state;
-};
-
#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
@@ -219,22 +213,12 @@ static int compare_of(struct device *dev, void *data)
static int imx_drm_bind(struct device *dev)
{
struct drm_device *drm;
- struct imx_drm_device *imxdrm;
int ret;
drm = drm_dev_alloc(&imx_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
- if (!imxdrm) {
- ret = -ENOMEM;
- goto err_unref;
- }
-
- imxdrm->drm = drm;
- drm->dev_private = imxdrm;
-
/*
* enable drm irq mode.
* - with irq_enabled = true, we can use the vblank feature.
@@ -306,8 +290,7 @@ err_unbind:
component_unbind_all(drm->dev, drm);
err_kms:
drm_mode_config_cleanup(drm);
-err_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -327,7 +310,7 @@ static void imx_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops imx_drm_ops = {
@@ -355,37 +338,15 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
static int imx_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct imx_drm_device *imxdrm;
-
- /* The drm_dev is NULL before .load hook is called */
- if (drm_dev == NULL)
- return 0;
-
- drm_kms_helper_poll_disable(drm_dev);
- imxdrm = drm_dev->dev_private;
- imxdrm->state = drm_atomic_helper_suspend(drm_dev);
- if (IS_ERR(imxdrm->state)) {
- drm_kms_helper_poll_enable(drm_dev);
- return PTR_ERR(imxdrm->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm_dev);
}
static int imx_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct imx_drm_device *imx_drm;
- if (drm_dev == NULL)
- return 0;
-
- imx_drm = drm_dev->dev_private;
- drm_atomic_helper_resume(drm_dev, imx_drm->state);
- drm_kms_helper_poll_enable(drm_dev);
-
- return 0;
+ return drm_mode_config_helper_resume(drm_dev);
}
#endif
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 15c2bec47a04..ab9c6f706eb3 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -10,7 +10,6 @@ struct drm_display_mode;
struct drm_encoder;
struct drm_framebuffer;
struct drm_plane;
-struct imx_drm_crtc;
struct platform_device;
struct imx_crtc_state {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index dd5312b02a8d..3bd0f8a18e74 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -143,7 +143,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
if (imx_ldb_ch->edid) {
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
imx_ldb_ch->edid);
num_modes = drm_add_edid_modes(connector, imx_ldb_ch->edid);
}
@@ -471,8 +471,7 @@ static int imx_ldb_register(struct drm_device *drm,
drm_connector_init(drm, &imx_ldb_ch->connector,
&imx_ldb_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
- encoder);
+ drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
}
if (imx_ldb_ch->panel) {
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index bc27c2699464..cffd3310240e 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -235,7 +235,7 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, tve->ddc);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -493,7 +493,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
- drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder);
+ drm_connector_attach_encoder(&tve->connector, &tve->encoder);
return 0;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index e83af0f2be86..7d4b710b837a 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -35,7 +35,6 @@
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
- struct imx_drm_crtc *imx_crtc;
/* plane[0] is the full plane, plane[1] is the partial plane */
struct ipu_plane *plane[2];
@@ -213,7 +212,7 @@ static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
+ u32 primary_plane_mask = drm_plane_mask(crtc->primary);
if (state->active && (primary_plane_mask & state->plane_mask) == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index aedecda9728a..aefd04e18f93 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -63,7 +63,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
}
if (imxpd->edid) {
- drm_mode_connector_update_edid_property(connector, imxpd->edid);
+ drm_connector_update_edid_property(connector, imxpd->edid);
num_modes = drm_add_edid_modes(connector, imxpd->edid);
}
@@ -197,7 +197,7 @@ static int imx_pd_register(struct drm_device *drm,
return ret;
}
} else {
- drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
+ drm_connector_attach_encoder(&imxpd->connector, encoder);
}
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 658b8dd45b83..2d6aa150a9ff 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -539,6 +539,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
int ret;
int i;
+ if (!path)
+ return 0;
+
for (i = 0; i < path_len; i++) {
enum mtk_ddp_comp_id comp_id = path[i];
struct device_node *node;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 8130f3dab661..87e4191c250e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -28,8 +28,12 @@
#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
+#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
-#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8
+#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
+#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
@@ -41,45 +45,89 @@
#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
#define INT_MUTEX BIT(1)
-#define MT8173_MUTEX_MOD_DISP_OVL0 BIT(11)
-#define MT8173_MUTEX_MOD_DISP_OVL1 BIT(12)
-#define MT8173_MUTEX_MOD_DISP_RDMA0 BIT(13)
-#define MT8173_MUTEX_MOD_DISP_RDMA1 BIT(14)
-#define MT8173_MUTEX_MOD_DISP_RDMA2 BIT(15)
-#define MT8173_MUTEX_MOD_DISP_WDMA0 BIT(16)
-#define MT8173_MUTEX_MOD_DISP_WDMA1 BIT(17)
-#define MT8173_MUTEX_MOD_DISP_COLOR0 BIT(18)
-#define MT8173_MUTEX_MOD_DISP_COLOR1 BIT(19)
-#define MT8173_MUTEX_MOD_DISP_AAL BIT(20)
-#define MT8173_MUTEX_MOD_DISP_GAMMA BIT(21)
-#define MT8173_MUTEX_MOD_DISP_UFOE BIT(22)
-#define MT8173_MUTEX_MOD_DISP_PWM0 BIT(23)
-#define MT8173_MUTEX_MOD_DISP_PWM1 BIT(24)
-#define MT8173_MUTEX_MOD_DISP_OD BIT(25)
-
-#define MT2701_MUTEX_MOD_DISP_OVL BIT(3)
-#define MT2701_MUTEX_MOD_DISP_WDMA BIT(6)
-#define MT2701_MUTEX_MOD_DISP_COLOR BIT(7)
-#define MT2701_MUTEX_MOD_DISP_BLS BIT(9)
-#define MT2701_MUTEX_MOD_DISP_RDMA0 BIT(10)
-#define MT2701_MUTEX_MOD_DISP_RDMA1 BIT(12)
+#define MT8173_MUTEX_MOD_DISP_OVL0 11
+#define MT8173_MUTEX_MOD_DISP_OVL1 12
+#define MT8173_MUTEX_MOD_DISP_RDMA0 13
+#define MT8173_MUTEX_MOD_DISP_RDMA1 14
+#define MT8173_MUTEX_MOD_DISP_RDMA2 15
+#define MT8173_MUTEX_MOD_DISP_WDMA0 16
+#define MT8173_MUTEX_MOD_DISP_WDMA1 17
+#define MT8173_MUTEX_MOD_DISP_COLOR0 18
+#define MT8173_MUTEX_MOD_DISP_COLOR1 19
+#define MT8173_MUTEX_MOD_DISP_AAL 20
+#define MT8173_MUTEX_MOD_DISP_GAMMA 21
+#define MT8173_MUTEX_MOD_DISP_UFOE 22
+#define MT8173_MUTEX_MOD_DISP_PWM0 23
+#define MT8173_MUTEX_MOD_DISP_PWM1 24
+#define MT8173_MUTEX_MOD_DISP_OD 25
+
+#define MT2712_MUTEX_MOD_DISP_PWM2 10
+#define MT2712_MUTEX_MOD_DISP_OVL0 11
+#define MT2712_MUTEX_MOD_DISP_OVL1 12
+#define MT2712_MUTEX_MOD_DISP_RDMA0 13
+#define MT2712_MUTEX_MOD_DISP_RDMA1 14
+#define MT2712_MUTEX_MOD_DISP_RDMA2 15
+#define MT2712_MUTEX_MOD_DISP_WDMA0 16
+#define MT2712_MUTEX_MOD_DISP_WDMA1 17
+#define MT2712_MUTEX_MOD_DISP_COLOR0 18
+#define MT2712_MUTEX_MOD_DISP_COLOR1 19
+#define MT2712_MUTEX_MOD_DISP_AAL0 20
+#define MT2712_MUTEX_MOD_DISP_UFOE 22
+#define MT2712_MUTEX_MOD_DISP_PWM0 23
+#define MT2712_MUTEX_MOD_DISP_PWM1 24
+#define MT2712_MUTEX_MOD_DISP_OD0 25
+#define MT2712_MUTEX_MOD2_DISP_AAL1 33
+#define MT2712_MUTEX_MOD2_DISP_OD1 34
+
+#define MT2701_MUTEX_MOD_DISP_OVL 3
+#define MT2701_MUTEX_MOD_DISP_WDMA 6
+#define MT2701_MUTEX_MOD_DISP_COLOR 7
+#define MT2701_MUTEX_MOD_DISP_BLS 9
+#define MT2701_MUTEX_MOD_DISP_RDMA0 10
+#define MT2701_MUTEX_MOD_DISP_RDMA1 12
#define MUTEX_SOF_SINGLE_MODE 0
#define MUTEX_SOF_DSI0 1
#define MUTEX_SOF_DSI1 2
#define MUTEX_SOF_DPI0 3
+#define MUTEX_SOF_DPI1 4
+#define MUTEX_SOF_DSI2 5
+#define MUTEX_SOF_DSI3 6
#define OVL0_MOUT_EN_COLOR0 0x1
#define OD_MOUT_EN_RDMA0 0x1
+#define OD1_MOUT_EN_RDMA1 BIT(16)
#define UFOE_MOUT_EN_DSI0 0x1
#define COLOR0_SEL_IN_OVL0 0x1
#define OVL1_MOUT_EN_COLOR1 0x1
#define GAMMA_MOUT_EN_RDMA1 0x1
-#define RDMA1_MOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DSI2 0x4
+#define RDMA0_SOUT_DSI3 0x5
+#define RDMA1_SOUT_DPI0 0x2
+#define RDMA1_SOUT_DPI1 0x3
+#define RDMA1_SOUT_DSI1 0x1
+#define RDMA1_SOUT_DSI2 0x4
+#define RDMA1_SOUT_DSI3 0x5
+#define RDMA2_SOUT_DPI0 0x2
+#define RDMA2_SOUT_DPI1 0x3
+#define RDMA2_SOUT_DSI1 0x1
+#define RDMA2_SOUT_DSI2 0x4
+#define RDMA2_SOUT_DSI3 0x5
#define DPI0_SEL_IN_RDMA1 0x1
+#define DPI0_SEL_IN_RDMA2 0x3
+#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
+#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI1_SEL_IN_RDMA1 0x1
+#define DSI1_SEL_IN_RDMA2 0x4
+#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
+#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
#define COLOR1_SEL_IN_OVL1 0x1
#define OVL_MOUT_EN_RDMA 0x1
@@ -108,12 +156,32 @@ static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
};
+static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1,
+ [DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_COLOR1] = MT2712_MUTEX_MOD_DISP_COLOR1,
+ [DDP_COMPONENT_OD0] = MT2712_MUTEX_MOD_DISP_OD0,
+ [DDP_COMPONENT_OD1] = MT2712_MUTEX_MOD2_DISP_OD1,
+ [DDP_COMPONENT_OVL0] = MT2712_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MT2712_MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MT2712_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_PWM1] = MT2712_MUTEX_MOD_DISP_PWM1,
+ [DDP_COMPONENT_PWM2] = MT2712_MUTEX_MOD_DISP_PWM2,
+ [DDP_COMPONENT_RDMA0] = MT2712_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT2712_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_RDMA2] = MT2712_MUTEX_MOD_DISP_RDMA2,
+ [DDP_COMPONENT_UFOE] = MT2712_MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MT2712_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1,
+};
+
static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
[DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
[DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA,
- [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD,
+ [DDP_COMPONENT_OD0] = MT8173_MUTEX_MOD_DISP_OD,
[DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1,
[DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0,
@@ -138,7 +206,7 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
*addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
value = OVL_MOUT_EN_RDMA;
- } else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
+ } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
*addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
value = OD_MOUT_EN_RDMA0;
} else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
@@ -150,9 +218,48 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
*addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
value = GAMMA_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD1_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI3;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI3;
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN;
- value = RDMA1_MOUT_DPI0;
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI3;
} else {
value = 0;
}
@@ -172,6 +279,33 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI3_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI3_SEL_IN_RDMA2;
} else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
*addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
value = COLOR1_SEL_IN_OVL1;
@@ -278,6 +412,7 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
@@ -288,13 +423,30 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
case DDP_COMPONENT_DSI1:
reg = MUTEX_SOF_DSI0;
break;
+ case DDP_COMPONENT_DSI2:
+ reg = MUTEX_SOF_DSI2;
+ break;
+ case DDP_COMPONENT_DSI3:
+ reg = MUTEX_SOF_DSI3;
+ break;
case DDP_COMPONENT_DPI0:
reg = MUTEX_SOF_DPI0;
break;
+ case DDP_COMPONENT_DPI1:
+ reg = MUTEX_SOF_DPI1;
+ break;
default:
- reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg |= ddp->mutex_mod[id];
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ if (ddp->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg |= 1 << ddp->mutex_mod[id];
+ writel_relaxed(reg, ddp->regs + offset);
+ } else {
+ offset = DISP_REG_MUTEX_MOD2(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg |= 1 << (ddp->mutex_mod[id] - 32);
+ writel_relaxed(reg, ddp->regs + offset);
+ }
return;
}
@@ -307,20 +459,32 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
case DDP_COMPONENT_DSI1:
+ case DDP_COMPONENT_DSI2:
+ case DDP_COMPONENT_DSI3:
case DDP_COMPONENT_DPI0:
+ case DDP_COMPONENT_DPI1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
break;
default:
- reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg &= ~(ddp->mutex_mod[id]);
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ if (ddp->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg &= ~(1 << ddp->mutex_mod[id]);
+ writel_relaxed(reg, ddp->regs + offset);
+ } else {
+ offset = DISP_REG_MUTEX_MOD2(mutex->id);
+ reg = readl_relaxed(ddp->regs + offset);
+ reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+ writel_relaxed(reg, ddp->regs + offset);
+ }
break;
}
}
@@ -407,6 +571,7 @@ static int mtk_ddp_remove(struct platform_device *pdev)
static const struct of_device_id ddp_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
+ { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
{ .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
{},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 4672317e3ad1..ff974d82a4a6 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -218,18 +218,25 @@ struct mtk_ddp_comp_match {
};
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
+ [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
[DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
+ [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, NULL },
+ [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, NULL },
[DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
- [DDP_COMPONENT_OD] = { MTK_DISP_OD, 0, &ddp_od },
+ [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
+ [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
+ [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
+ [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
[DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, NULL },
[DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, NULL },
[DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, NULL },
@@ -271,7 +278,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
if (comp_id == DDP_COMPONENT_BLS ||
comp_id == DDP_COMPONENT_DPI0 ||
+ comp_id == DDP_COMPONENT_DPI1 ||
comp_id == DDP_COMPONENT_DSI0 ||
+ comp_id == DDP_COMPONENT_DSI1 ||
+ comp_id == DDP_COMPONENT_DSI2 ||
+ comp_id == DDP_COMPONENT_DSI3 ||
comp_id == DDP_COMPONENT_PWM0) {
comp->regs = NULL;
comp->clk = NULL;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 0828cf8bf85c..7413ffeb3c9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -41,19 +41,25 @@ enum mtk_ddp_comp_type {
};
enum mtk_ddp_comp_id {
- DDP_COMPONENT_AAL,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_DPI0,
+ DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
DDP_COMPONENT_DSI1,
+ DDP_COMPONENT_DSI2,
+ DDP_COMPONENT_DSI3,
DDP_COMPONENT_GAMMA,
- DDP_COMPONENT_OD,
+ DDP_COMPONENT_OD0,
+ DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
+ DDP_COMPONENT_PWM2,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_RDMA2,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index a2ca90fc403c..39721119713b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -146,11 +146,37 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_OD0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_DPI0,
+ DDP_COMPONENT_PWM0,
+};
+
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_ext[] = {
+ DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_AAL1,
+ DDP_COMPONENT_OD1,
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DPI1,
+ DDP_COMPONENT_PWM1,
+};
+
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_third[] = {
+ DDP_COMPONENT_RDMA2,
+ DDP_COMPONENT_DSI3,
+ DDP_COMPONENT_PWM2,
+};
+
static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
- DDP_COMPONENT_AAL,
- DDP_COMPONENT_OD,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_OD0,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_UFOE,
DDP_COMPONENT_DSI0,
@@ -173,6 +199,15 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.shadow_register = true,
};
+static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
+ .main_path = mt2712_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
+ .ext_path = mt2712_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt2712_mtk_ddp_ext),
+ .third_path = mt2712_mtk_ddp_third,
+ .third_len = ARRAY_SIZE(mt2712_mtk_ddp_third),
+};
+
static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.main_path = mt8173_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
@@ -232,6 +267,11 @@ static int mtk_drm_kms_init(struct drm_device *drm)
if (ret < 0)
goto err_component_unbind;
+ ret = mtk_drm_crtc_create(drm, private->data->third_path,
+ private->data->third_len);
+ if (ret < 0)
+ goto err_component_unbind;
+
/* Use OVL device for all DMA memory allocations */
np = private->comp_node[private->data->main_path[0]] ?:
private->comp_node[private->data->ext_path[0]];
@@ -360,24 +400,44 @@ static const struct component_master_ops mtk_drm_ops = {
};
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
- { .compatible = "mediatek,mt2701-disp-ovl", .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL },
- { .compatible = "mediatek,mt2701-disp-rdma", .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA },
- { .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA },
- { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR },
- { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
- { .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL},
- { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
- { .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE },
- { .compatible = "mediatek,mt2701-dsi", .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI },
- { .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI },
- { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
- { .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
- { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS },
- { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM },
- { .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD },
+ { .compatible = "mediatek,mt2701-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8173-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt2701-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-wdma",
+ .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt2701-disp-color",
+ .data = (void *)MTK_DISP_COLOR },
+ { .compatible = "mediatek,mt8173-disp-color",
+ .data = (void *)MTK_DISP_COLOR },
+ { .compatible = "mediatek,mt8173-disp-aal",
+ .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8173-disp-gamma",
+ .data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8173-disp-ufoe",
+ .data = (void *)MTK_DISP_UFOE },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dpi",
+ .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt2701-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt2712-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8173-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt2701-disp-pwm",
+ .data = (void *)MTK_DISP_BLS },
+ { .compatible = "mediatek,mt8173-disp-pwm",
+ .data = (void *)MTK_DISP_PWM },
+ { .compatible = "mediatek,mt8173-disp-od",
+ .data = (void *)MTK_DISP_OD },
{ }
};
@@ -552,6 +612,8 @@ static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
static const struct of_device_id mtk_drm_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmsys",
.data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data},
{ .compatible = "mediatek,mt8173-mmsys",
.data = &mt8173_mmsys_driver_data},
{ }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index c3378c452c0a..ecc00ca3221d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -17,7 +17,7 @@
#include <linux/io.h>
#include "mtk_drm_ddp_comp.h"
-#define MAX_CRTC 2
+#define MAX_CRTC 3
#define MAX_CONNECTOR 2
struct device;
@@ -33,6 +33,9 @@ struct mtk_mmsys_driver_data {
unsigned int main_len;
const enum mtk_ddp_comp_id *ext_path;
unsigned int ext_len;
+ const enum mtk_ddp_comp_id *third_path;
+ unsigned int third_len;
+
bool shadow_register;
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index 0d8d506695f9..be5f6f1daf55 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -15,6 +15,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/dma-buf.h>
#include <linux/reservation.h>
@@ -22,78 +23,37 @@
#include "mtk_drm_fb.h"
#include "mtk_drm_gem.h"
-/*
- * mtk specific framebuffer structure.
- *
- * @fb: drm framebuffer object.
- * @gem_obj: array of gem objects.
- */
-struct mtk_drm_fb {
- struct drm_framebuffer base;
- /* For now we only support a single plane */
- struct drm_gem_object *gem_obj;
-};
-
-#define to_mtk_fb(x) container_of(x, struct mtk_drm_fb, base)
-
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- return mtk_fb->gem_obj;
-}
-
-static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle);
-}
-
-static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
- drm_framebuffer_cleanup(fb);
-
- drm_gem_object_put_unlocked(mtk_fb->gem_obj);
-
- kfree(mtk_fb);
-}
-
static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
- .create_handle = mtk_drm_fb_create_handle,
- .destroy = mtk_drm_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
-static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
+static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode,
struct drm_gem_object *obj)
{
- struct mtk_drm_fb *mtk_fb;
+ struct drm_framebuffer *fb;
int ret;
if (drm_format_num_planes(mode->pixel_format) != 1)
return ERR_PTR(-EINVAL);
- mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL);
- if (!mtk_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(dev, &mtk_fb->base, mode);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode);
- mtk_fb->gem_obj = obj;
+ fb->obj[0] = obj;
- ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
- kfree(mtk_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return mtk_fb;
+ return fb;
}
/*
@@ -110,7 +70,7 @@ int mtk_fb_wait(struct drm_framebuffer *fb)
if (!fb)
return 0;
- gem = mtk_fb_get_gem_obj(fb);
+ gem = fb->obj[0];
if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
return 0;
@@ -128,7 +88,7 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
- struct mtk_drm_fb *mtk_fb;
+ struct drm_framebuffer *fb;
struct drm_gem_object *gem;
unsigned int width = cmd->width;
unsigned int height = cmd->height;
@@ -151,13 +111,13 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
goto unreference;
}
- mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem);
- if (IS_ERR(mtk_fb)) {
- ret = PTR_ERR(mtk_fb);
+ fb = mtk_drm_framebuffer_init(dev, cmd, gem);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto unreference;
}
- return &mtk_fb->base;
+ return fb;
unreference:
drm_gem_object_put_unlocked(gem);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
index 9b2ae345a4e9..7f976b196a15 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
@@ -14,7 +14,6 @@
#ifndef MTK_DRM_FB_H
#define MTK_DRM_FB_H
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb);
int mtk_fb_wait(struct drm_framebuffer *fb);
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 2f4b0ffee598..f7e6aa1b5b7d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -95,11 +95,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (!mtk_fb_get_gem_obj(fb)) {
- DRM_DEBUG_KMS("buffer is null\n");
- return -EFAULT;
- }
-
if (!state->crtc)
return 0;
@@ -127,7 +122,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return;
- gem = mtk_fb_get_gem_obj(fb);
+ gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
pitch = fb->pitches[0];
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index aa0943ec32b0..66df1b177959 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -782,7 +782,7 @@ static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
dsi->conn.dpms = DRM_MODE_DPMS_OFF;
- drm_mode_connector_attach_encoder(&dsi->conn, &dsi->encoder);
+ drm_connector_attach_encoder(&dsi->conn, &dsi->encoder);
if (dsi->panel) {
ret = drm_panel_attach(dsi->panel, &dsi->conn);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 59a11026dceb..2d45d1dd9554 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1220,7 +1220,7 @@ static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(conn, edid);
+ drm_connector_update_edid_property(conn, edid);
ret = drm_add_edid_modes(conn, edid);
kfree(edid);
@@ -1306,7 +1306,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
hdmi->conn.interlace_allowed = true;
hdmi->conn.doublescan_allowed = false;
- ret = drm_mode_connector_attach_encoder(&hdmi->conn,
+ ret = drm_connector_attach_encoder(&hdmi->conn,
bridge->encoder);
if (ret) {
dev_err(hdmi->dev,
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index c9ad45686e7a..df7247cd93f9 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -329,6 +329,12 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
vclk_freq = mode->clock;
+ if (!vic) {
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
+ vclk_freq, vclk_freq, false);
+ return;
+ }
+
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
@@ -542,10 +548,12 @@ static enum drm_mode_status
dw_hdmi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
+ struct meson_drm *priv = connector->dev->dev_private;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
int vic = drm_match_cea_mode(mode);
+ enum drm_mode_status status;
DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
mode->base.id, mode->name, mode->vrefresh, mode->clock,
@@ -556,8 +564,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
/* Check against non-VIC supported modes */
if (!vic) {
- if (!meson_venc_hdmi_supported_mode(mode))
- return MODE_BAD;
+ status = meson_venc_hdmi_supported_mode(mode);
+ if (status != MODE_OK)
+ return status;
+
+ return meson_vclk_dmt_supported_freq(priv, mode->clock);
/* Check against supported VIC modes */
} else if (!meson_venc_hdmi_supported_vic(vic))
return MODE_BAD;
@@ -583,16 +594,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
vclk_freq, venc_freq, hdmi_freq);
- /* Finally filter by configurable vclk frequencies */
+ /* Finally filter by configurable vclk frequencies for VIC modes */
switch (vclk_freq) {
- case 25175:
- case 40000:
case 54000:
- case 65000:
case 74250:
- case 108000:
case 148500:
- case 162000:
case 297000:
case 594000:
return MODE_OK;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f0511220317f..ae5473257f72 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -320,32 +320,23 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
CTS_VDAC_EN, CTS_VDAC_EN);
}
-
+enum {
/* PLL O1 O2 O3 VP DV EN TX */
/* 4320 /4 /4 /1 /5 /1 => /2 /2 */
-#define MESON_VCLK_HDMI_ENCI_54000 1
+ MESON_VCLK_HDMI_ENCI_54000 = 1,
/* 4320 /4 /4 /1 /5 /1 => /1 /2 */
-#define MESON_VCLK_HDMI_DDR_54000 2
+ MESON_VCLK_HDMI_DDR_54000,
/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
-#define MESON_VCLK_HDMI_DDR_148500 3
-/* 4028 /4 /4 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_25175 4
-/* 3200 /4 /2 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_40000 5
-/* 5200 /4 /2 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_65000 6
+ MESON_VCLK_HDMI_DDR_148500,
/* 2970 /2 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_74250 7
-/* 4320 /4 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_108000 8
+ MESON_VCLK_HDMI_74250,
/* 2970 /1 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_148500 9
-/* 3240 /2 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_162000 10
+ MESON_VCLK_HDMI_148500,
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_297000 11
+ MESON_VCLK_HDMI_297000,
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_594000 12
+ MESON_VCLK_HDMI_594000
+};
struct meson_vclk_params {
unsigned int pll_base_freq;
@@ -411,46 +402,6 @@ struct meson_vclk_params {
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
- [MESON_VCLK_HDMI_25175] = {
- .pll_base_freq = 4028000,
- .pll_od1 = 4,
- .pll_od2 = 4,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_40000] = {
- .pll_base_freq = 3200000,
- .pll_od1 = 4,
- .pll_od2 = 2,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_65000] = {
- .pll_base_freq = 5200000,
- .pll_od1 = 4,
- .pll_od2 = 2,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_108000] = {
- .pll_base_freq = 4320000,
- .pll_od1 = 4,
- .pll_od2 = 1,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
- [MESON_VCLK_HDMI_162000] = {
- .pll_base_freq = 3240000,
- .pll_od1 = 2,
- .pll_od2 = 1,
- .pll_od3 = 1,
- .vid_pll_div = VID_PLL_DIV_5,
- .vclk_div = 2,
- },
};
static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -470,358 +421,217 @@ static inline unsigned int pll_od_to_reg(unsigned int od)
return 0;
}
-void meson_hdmi_pll_set(struct meson_drm *priv,
- unsigned int base,
- unsigned int od1,
- unsigned int od2,
- unsigned int od3)
+void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
+ unsigned int frac, unsigned int od1,
+ unsigned int od2, unsigned int od3)
{
unsigned int val;
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
- switch (base) {
- case 2970000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* Enable and unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- 0x7 << 28, 0x4 << 28);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4e00);
- break;
-
- case 3200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000242);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4aab);
- break;
-
- case 3240000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000243);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4800);
- break;
-
- case 3865000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000250);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4855);
- break;
-
- case 4028000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000253);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
-
- /* div_frac */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4eab);
- break;
-
- case 4320000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800025a);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
- break;
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000200 | m);
+ if (frac)
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0x00004000 | frac);
+ else
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
- case 5940000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800027b);
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 0xFFFF, 0x4c00);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
- break;
+ /* Enable and unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 0x7 << 28, 0x4 << 28);
- case 5200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800026c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
- /* unreset */
- regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- BIT(28), 0);
-
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
- val, (val & HDMI_PLL_LOCK), 10, 0);
- break;
- };
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
- switch (base) {
- case 2970000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 3200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000285);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb155);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 3240000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000287);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 3865000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a1);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb02b);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 4028000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a7);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb355);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 4320000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002b4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 5940000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002f7);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb200);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- case 5200000:
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002d8);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb2ab);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
- regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
- break;
-
- };
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000200 | m);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000 | frac);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
/* Reset PLL */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- HDMI_PLL_RESET, HDMI_PLL_RESET);
+ HDMI_PLL_RESET, HDMI_PLL_RESET);
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- HDMI_PLL_RESET, 0);
+ HDMI_PLL_RESET, 0);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
- };
+ }
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 3 << 16, pll_od_to_reg(od1) << 16);
+ 3 << 16, pll_od_to_reg(od1) << 16);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
- 3 << 21, pll_od_to_reg(od1) << 21);
+ 3 << 21, pll_od_to_reg(od1) << 21);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 3 << 22, pll_od_to_reg(od2) << 22);
+ 3 << 22, pll_od_to_reg(od2) << 22);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
- 3 << 23, pll_od_to_reg(od2) << 23);
+ 3 << 23, pll_od_to_reg(od2) << 23);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
- 3 << 18, pll_od_to_reg(od3) << 18);
+ 3 << 18, pll_od_to_reg(od3) << 18);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
- 3 << 19, pll_od_to_reg(od3) << 19);
+ 3 << 19, pll_od_to_reg(od3) << 19);
+
}
-void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci)
+#define XTAL_FREQ 24000
+
+static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
+ unsigned int pll_freq)
{
- unsigned int freq;
- unsigned int hdmi_tx_div;
- unsigned int venc_div;
+ /* The GXBB PLL has a /2 pre-multiplier */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ pll_freq /= 2;
- if (target == MESON_VCLK_TARGET_CVBS) {
- meson_venci_cvbs_clock_config(priv);
- return;
+ return pll_freq / XTAL_FREQ;
+}
+
+#define HDMI_FRAC_MAX_GXBB 4096
+#define HDMI_FRAC_MAX_GXL 1024
+
+static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
+ unsigned int m,
+ unsigned int pll_freq)
+{
+ unsigned int parent_freq = XTAL_FREQ;
+ unsigned int frac_max = HDMI_FRAC_MAX_GXL;
+ unsigned int frac_m;
+ unsigned int frac;
+
+ /* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ frac_max = HDMI_FRAC_MAX_GXBB;
+ parent_freq *= 2;
}
- hdmi_tx_div = vclk_freq / dac_freq;
+ /* We can have a perfect match !*/
+ if (pll_freq / m == parent_freq &&
+ pll_freq % m == 0)
+ return 0;
- if (hdmi_tx_div == 0) {
- pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
- dac_freq);
- return;
+ frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq);
+ frac_m = m * frac_max;
+ if (frac_m > frac)
+ return frac_max;
+ frac -= frac_m;
+
+ return min((u16)frac, (u16)(frac_max - 1));
+}
+
+static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
+ unsigned int m,
+ unsigned int frac)
+{
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ /* Empiric supported min/max dividers */
+ if (m < 53 || m > 123)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_GXBB)
+ return false;
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ /* Empiric supported min/max dividers */
+ if (m < 106 || m > 247)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_GXL)
+ return false;
}
- venc_div = vclk_freq / venc_freq;
+ return true;
+}
- if (venc_div == 0) {
- pr_err("Fatal Error, invalid HDMI venc freq %d\n",
- venc_freq);
- return;
+static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
+ unsigned int freq,
+ unsigned int *m,
+ unsigned int *frac,
+ unsigned int *od)
+{
+ /* Cycle from /16 to /2 */
+ for (*od = 16 ; *od > 1 ; *od >>= 1) {
+ *m = meson_hdmi_pll_get_m(priv, freq * *od);
+ if (!*m)
+ continue;
+ *frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od);
+
+ DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n",
+ freq, *m, *frac, *od);
+
+ if (meson_hdmi_pll_validate_params(priv, *m, *frac))
+ return true;
}
- switch (vclk_freq) {
- case 54000:
- if (hdmi_use_enci)
- freq = MESON_VCLK_HDMI_ENCI_54000;
- else
- freq = MESON_VCLK_HDMI_DDR_54000;
- break;
- case 25175:
- freq = MESON_VCLK_HDMI_25175;
- break;
- case 40000:
- freq = MESON_VCLK_HDMI_40000;
- break;
- case 65000:
- freq = MESON_VCLK_HDMI_65000;
- break;
- case 74250:
- freq = MESON_VCLK_HDMI_74250;
- break;
- case 108000:
- freq = MESON_VCLK_HDMI_108000;
- break;
- case 148500:
- if (dac_freq != 148500)
- freq = MESON_VCLK_HDMI_DDR_148500;
- else
- freq = MESON_VCLK_HDMI_148500;
- break;
- case 162000:
- freq = MESON_VCLK_HDMI_162000;
- break;
- case 297000:
- freq = MESON_VCLK_HDMI_297000;
- break;
- case 594000:
- freq = MESON_VCLK_HDMI_594000;
- break;
- default:
- pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
- vclk_freq);
+ return false;
+}
+
+/* pll_freq is the frequency after the OD dividers */
+enum drm_mode_status
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
+{
+ unsigned int od, m, frac;
+
+ /* In DMT mode, path after PLL is always /10 */
+ freq *= 10;
+
+ if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
+ return MODE_OK;
+
+ return MODE_CLOCK_RANGE;
+}
+EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq);
+
+/* pll_freq is the frequency after the OD dividers */
+static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
+ unsigned int pll_freq)
+{
+ unsigned int od, m, frac, od1, od2, od3;
+
+ if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
+ od3 = 1;
+ if (od < 4) {
+ od1 = 2;
+ od2 = 1;
+ } else {
+ od2 = od / 4;
+ od1 = od / od2;
+ }
+
+ DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n",
+ pll_freq, m, frac, od1, od2, od3);
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
+
return;
}
+ DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n",
+ pll_freq);
+}
+
+static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
+ unsigned int od1, unsigned int od2, unsigned int od3,
+ unsigned int vid_pll_div, unsigned int vclk_div,
+ unsigned int hdmi_tx_div, unsigned int venc_div,
+ bool hdmi_use_enci)
+{
/* Set HDMI-TX sys clock */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
CTS_HDMI_SYS_SEL_MASK, 0);
@@ -831,19 +641,49 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
CTS_HDMI_SYS_EN, CTS_HDMI_SYS_EN);
/* Set HDMI PLL rate */
- meson_hdmi_pll_set(priv, params[freq].pll_base_freq,
- params[freq].pll_od1,
- params[freq].pll_od2,
- params[freq].pll_od3);
+ if (!od1 && !od2 && !od3) {
+ meson_hdmi_pll_generic_set(priv, pll_base_freq);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ switch (pll_base_freq) {
+ case 2970000:
+ meson_hdmi_pll_set_params(priv, 0x3d, 0xe00,
+ od1, od2, od3);
+ break;
+ case 4320000:
+ meson_hdmi_pll_set_params(priv, 0x5a, 0,
+ od1, od2, od3);
+ break;
+ case 5940000:
+ meson_hdmi_pll_set_params(priv, 0x7b, 0xc00,
+ od1, od2, od3);
+ break;
+ }
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ switch (pll_base_freq) {
+ case 2970000:
+ meson_hdmi_pll_set_params(priv, 0x7b, 0x300,
+ od1, od2, od3);
+ break;
+ case 4320000:
+ meson_hdmi_pll_set_params(priv, 0xb4, 0,
+ od1, od2, od3);
+ break;
+ case 5940000:
+ meson_hdmi_pll_set_params(priv, 0xf7, 0x200,
+ od1, od2, od3);
+ break;
+ }
+ }
/* Setup vid_pll divider */
- meson_vid_pll_set(priv, params[freq].vid_pll_div);
+ meson_vid_pll_set(priv, vid_pll_div);
/* Set VCLK div */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_SEL_MASK, 0);
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
- VCLK_DIV_MASK, params[freq].vclk_div - 1);
+ VCLK_DIV_MASK, vclk_div - 1);
/* Set HDMI-TX source */
switch (hdmi_tx_div) {
@@ -981,4 +821,80 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_EN, VCLK_EN);
}
+
+void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ unsigned int vclk_freq, unsigned int venc_freq,
+ unsigned int dac_freq, bool hdmi_use_enci)
+{
+ unsigned int freq;
+ unsigned int hdmi_tx_div;
+ unsigned int venc_div;
+
+ if (target == MESON_VCLK_TARGET_CVBS) {
+ meson_venci_cvbs_clock_config(priv);
+ return;
+ } else if (target == MESON_VCLK_TARGET_DMT) {
+ /* The DMT clock path is fixed after the PLL:
+ * - automatic PLL freq + OD management
+ * - vid_pll_div = VID_PLL_DIV_5
+ * - vclk_div = 2
+ * - hdmi_tx_div = 1
+ * - venc_div = 1
+ * - encp encoder
+ */
+ meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
+ VID_PLL_DIV_5, 2, 1, 1, false);
+ return;
+ }
+
+ hdmi_tx_div = vclk_freq / dac_freq;
+
+ if (hdmi_tx_div == 0) {
+ pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
+ dac_freq);
+ return;
+ }
+
+ venc_div = vclk_freq / venc_freq;
+
+ if (venc_div == 0) {
+ pr_err("Fatal Error, invalid HDMI venc freq %d\n",
+ venc_freq);
+ return;
+ }
+
+ switch (vclk_freq) {
+ case 54000:
+ if (hdmi_use_enci)
+ freq = MESON_VCLK_HDMI_ENCI_54000;
+ else
+ freq = MESON_VCLK_HDMI_DDR_54000;
+ break;
+ case 74250:
+ freq = MESON_VCLK_HDMI_74250;
+ break;
+ case 148500:
+ if (dac_freq != 148500)
+ freq = MESON_VCLK_HDMI_DDR_148500;
+ else
+ freq = MESON_VCLK_HDMI_148500;
+ break;
+ case 297000:
+ freq = MESON_VCLK_HDMI_297000;
+ break;
+ case 594000:
+ freq = MESON_VCLK_HDMI_594000;
+ break;
+ default:
+ pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
+ vclk_freq);
+ return;
+ }
+
+ meson_vclk_set(priv, params[freq].pll_base_freq,
+ params[freq].pll_od1, params[freq].pll_od2,
+ params[freq].pll_od3, params[freq].vid_pll_div,
+ params[freq].vclk_div, hdmi_tx_div, venc_div,
+ hdmi_use_enci);
+}
EXPORT_SYMBOL_GPL(meson_vclk_setup);
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 0401b5213471..869fa3a3073e 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -24,11 +24,15 @@
enum {
MESON_VCLK_TARGET_CVBS = 0,
MESON_VCLK_TARGET_HDMI = 1,
+ MESON_VCLK_TARGET_DMT = 2,
};
/* 27MHz is the CVBS Pixel Clock */
#define MESON_VCLK_CVBS 27000
+enum drm_mode_status
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int vclk_freq, unsigned int venc_freq,
unsigned int dac_freq, bool hdmi_use_enci);
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 6e2701389801..514245e69b38 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -697,314 +697,6 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
},
};
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_640x480_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x31f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x90,
- .havon_end = 0x30f,
- .vavon_bline = 0x23,
- .vavon_eline = 0x202,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x60,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 2,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x20c,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_800x600_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x41f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0xD8,
- .havon_end = 0x3f7,
- .vavon_bline = 0x1b,
- .vavon_eline = 0x272,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x80,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 4,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x273,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1024x768_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 1343,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 296,
- .havon_end = 1319,
- .vavon_bline = 35,
- .vavon_eline = 802,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 136,
- .vso_begin = 30,
- .vso_end = 50,
- .vso_bline = 0,
- .vso_eline = 6,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 805,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1152x864_75 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x63f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x180,
- .havon_end = 0x5ff,
- .vavon_bline = 0x23,
- .vavon_eline = 0x382,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x80,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 3,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x383,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1280x1024_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x697,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x168,
- .havon_end = 0x667,
- .vavon_bline = 0x29,
- .vavon_eline = 0x428,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0x70,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 3,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x429,
- },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1600x1200_60 = {
- .encp = {
- .dvi_settings = 0x21,
- .video_mode = 0x4040,
- .video_mode_adv = 0x18,
- /* video_prog_mode */
- /* video_sync_mode */
- /* video_yc_dly */
- /* video_rgb_ctrl */
- /* video_filt_ctrl */
- /* video_ofld_voav_ofst */
- /* yfp1_htime */
- /* yfp2_htime */
- .max_pxcnt = 0x86f,
- /* hspuls_begin */
- /* hspuls_end */
- /* hspuls_switch */
- /* vspuls_begin */
- /* vspuls_end */
- /* vspuls_bline */
- /* vspuls_eline */
- .havon_begin = 0x1f0,
- .havon_end = 0x82f,
- .vavon_bline = 0x31,
- .vavon_eline = 0x4e0,
- /* eqpuls_begin */
- /* eqpuls_end */
- /* eqpuls_bline */
- /* eqpuls_eline */
- .hso_begin = 0,
- .hso_end = 0xc0,
- .vso_begin = 0x1e,
- .vso_end = 0x32,
- .vso_bline = 0,
- .vso_eline = 3,
- .vso_eline_present = true,
- /* sy_val */
- /* sy2_val */
- .max_lncnt = 0x4e1,
- },
-};
-
-struct meson_hdmi_venc_dmt_mode {
- struct drm_display_mode drm_mode;
- union meson_hdmi_venc_mode *mode;
-} meson_hdmi_venc_dmt_modes[] = {
- /* 640x480@60Hz */
- {
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- &meson_hdmi_encp_mode_640x480_60,
- },
- /* 800x600@60Hz */
- {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_800x600_60,
- },
- /* 1024x768@60Hz */
- {
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024,
- 1048, 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- &meson_hdmi_encp_mode_1024x768_60,
- },
- /* 1152x864@75Hz */
- {
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152,
- 1216, 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_1152x864_75,
- },
- /* 1280x1024@60Hz */
- {
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280,
- 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_1280x1024_60,
- },
- /* 1600x1200@60Hz */
- {
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600,
- 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- &meson_hdmi_encp_mode_1600x1200_60,
- },
- /* 1920x1080@60Hz */
- {
- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920,
- 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- &meson_hdmi_encp_mode_1080p60
- },
- { }, /* sentinel */
-};
-
struct meson_hdmi_venc_vic_mode {
unsigned int vic;
union meson_hdmi_venc_mode *mode;
@@ -1044,17 +736,20 @@ static unsigned long modulo(unsigned long a, unsigned long b)
return a;
}
-bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
+enum drm_mode_status
+meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
{
- struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
+ if (mode->flags & ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))
+ return MODE_BAD;
- while (vmode->mode) {
- if (drm_mode_equal(&vmode->drm_mode, mode))
- return true;
- vmode++;
- }
+ if (mode->hdisplay < 640 || mode->hdisplay > 1920)
+ return MODE_BAD_HVALUE;
- return false;
+ if (mode->vdisplay < 480 || mode->vdisplay > 1200)
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_mode);
@@ -1072,18 +767,29 @@ bool meson_venc_hdmi_supported_vic(int vic)
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
-static union meson_hdmi_venc_mode
-*meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode)
+void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode,
+ union meson_hdmi_venc_mode *dmt_mode)
{
- struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
-
- while (vmode->mode) {
- if (drm_mode_equal(&vmode->drm_mode, mode))
- return vmode->mode;
- vmode++;
- }
-
- return NULL;
+ memset(dmt_mode, 0, sizeof(*dmt_mode));
+
+ dmt_mode->encp.dvi_settings = 0x21;
+ dmt_mode->encp.video_mode = 0x4040;
+ dmt_mode->encp.video_mode_adv = 0x18;
+ dmt_mode->encp.max_pxcnt = mode->htotal - 1;
+ dmt_mode->encp.havon_begin = mode->htotal - mode->hsync_start;
+ dmt_mode->encp.havon_end = dmt_mode->encp.havon_begin +
+ mode->hdisplay - 1;
+ dmt_mode->encp.vavon_bline = mode->vtotal - mode->vsync_start;
+ dmt_mode->encp.vavon_eline = dmt_mode->encp.vavon_bline +
+ mode->vdisplay - 1;
+ dmt_mode->encp.hso_begin = 0;
+ dmt_mode->encp.hso_end = mode->hsync_end - mode->hsync_start;
+ dmt_mode->encp.vso_begin = 30;
+ dmt_mode->encp.vso_end = 50;
+ dmt_mode->encp.vso_bline = 0;
+ dmt_mode->encp.vso_eline = mode->vsync_end - mode->vsync_start;
+ dmt_mode->encp.vso_eline_present = true;
+ dmt_mode->encp.max_lncnt = mode->vtotal - 1;
}
static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic)
@@ -1120,6 +826,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
struct drm_display_mode *mode)
{
union meson_hdmi_venc_mode *vmode = NULL;
+ union meson_hdmi_venc_mode vmode_dmt;
bool use_enci = false;
bool venc_repeat = false;
bool hdmi_repeat = false;
@@ -1147,14 +854,17 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int sof_lines;
unsigned int vsync_lines;
- if (meson_venc_hdmi_supported_vic(vic))
+ if (meson_venc_hdmi_supported_vic(vic)) {
vmode = meson_venc_hdmi_get_vic_vmode(vic);
- else
- vmode = meson_venc_hdmi_get_dmt_vmode(mode);
- if (!vmode) {
- dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
- DRM_MODE_FMT "\n", __func__, DRM_MODE_ARG(mode));
- return;
+ if (!vmode) {
+ dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
+ DRM_MODE_FMT "\n", __func__,
+ DRM_MODE_ARG(mode));
+ return;
+ }
+ } else {
+ meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
+ vmode = &vmode_dmt;
}
/* Use VENCI for 480i and 576i and double HDMI pixels */
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 7c18a36a0dd0..97eaebbfa0c4 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -58,7 +58,8 @@ struct meson_cvbs_enci_mode {
};
/* HDMI Clock parameters */
-bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
+enum drm_mode_status
+meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
bool meson_venc_hdmi_supported_vic(int vic);
bool meson_venc_hdmi_venc_repeat(int vic);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 79d95ca8a0c0..f7945bae3b4a 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -282,7 +282,7 @@ int meson_venc_cvbs_create(struct meson_drm *priv)
encoder->possible_crtcs = BIT(0);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 8918539a19aa..acf7bfe68454 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1553,7 +1553,7 @@ static int mga_vga_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -1747,7 +1747,7 @@ int mgag200_modeset_init(struct mga_device *mdev)
return -1;
}
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
ret = mgag200_fbdev_init(mdev);
if (ret) {
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 38cbde971b48..843a9d40c05e 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -12,6 +12,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
select PM_OPP
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index cd40c050b2d7..7c773e003663 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/gpu/drm/msm
+ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
@@ -45,6 +46,33 @@ msm-y := \
disp/mdp5/mdp5_mixer.o \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
+ disp/dpu1/dpu_core_irq.o \
+ disp/dpu1/dpu_core_perf.o \
+ disp/dpu1/dpu_crtc.o \
+ disp/dpu1/dpu_encoder.o \
+ disp/dpu1/dpu_encoder_phys_cmd.o \
+ disp/dpu1/dpu_encoder_phys_vid.o \
+ disp/dpu1/dpu_formats.o \
+ disp/dpu1/dpu_hw_blk.o \
+ disp/dpu1/dpu_hw_catalog.o \
+ disp/dpu1/dpu_hw_cdm.o \
+ disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_interrupts.o \
+ disp/dpu1/dpu_hw_intf.o \
+ disp/dpu1/dpu_hw_lm.o \
+ disp/dpu1/dpu_hw_pingpong.o \
+ disp/dpu1/dpu_hw_sspp.o \
+ disp/dpu1/dpu_hw_top.o \
+ disp/dpu1/dpu_hw_util.o \
+ disp/dpu1/dpu_hw_vbif.o \
+ disp/dpu1/dpu_io_util.o \
+ disp/dpu1/dpu_irq.o \
+ disp/dpu1/dpu_kms.o \
+ disp/dpu1/dpu_mdss.o \
+ disp/dpu1/dpu_plane.o \
+ disp/dpu1/dpu_power_handle.o \
+ disp/dpu1/dpu_rm.o \
+ disp/dpu1/dpu_vbif.o \
msm_atomic.o \
msm_debugfs.o \
msm_drv.o \
@@ -62,7 +90,8 @@ msm-y := \
msm_ringbuffer.o \
msm_submitqueue.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+ disp/dpu1/dpu_dbg.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 3ebbeb3a9b68..669c2d4b070d 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -411,15 +411,6 @@ static const unsigned int a3xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
-{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- adreno_show(gpu, m);
-}
-#endif
-
/* would be nice to not have to duplicate the _show() stuff with printk(): */
static void a3xx_dump(struct msm_gpu *gpu)
{
@@ -427,6 +418,21 @@ static void a3xx_dump(struct msm_gpu *gpu)
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
+
+static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+
+ return state;
+}
+
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -450,9 +456,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a3xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 16d3d596638e..7c4e6dc1ed59 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -455,15 +455,19 @@ static const unsigned int a4xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
+static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- adreno_show(gpu, m);
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+ state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
+
+ return state;
}
-#endif
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
@@ -538,9 +542,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a4xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
.get_timestamp = a4xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index d39400e5bc42..bd84f71d27d8 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -19,6 +19,7 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
+#include <linux/iopoll.h>
#include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h"
@@ -1123,8 +1124,9 @@ static const u32 a5xx_registers[] = {
0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
- 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
- 0xB9A0, 0xB9BF, ~0
+ 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
+ 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
+ 0xAC60, 0xAC60, ~0,
};
static void a5xx_dump(struct msm_gpu *gpu)
@@ -1195,19 +1197,231 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+struct a5xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a5xx_gpu_state {
+ struct msm_gpu_state base;
+ u32 *hlsqregs;
+};
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+static int a5xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ &dumper->bo, &dumper->iova);
- /*
- * Temporarily disable hardware clock gating before going into
- * adreno_show to avoid issues while reading the registers
- */
+ if (IS_ERR(dumper->ptr))
+ return PTR_ERR(dumper->ptr);
+
+ return 0;
+}
+
+static void a5xx_crashdumper_free(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ msm_gem_put_iova(dumper->bo, gpu->aspace);
+ msm_gem_put_vaddr(dumper->bo);
+
+ drm_gem_object_unreference(dumper->bo);
+}
+
+static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ u32 val;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+ REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
+ val & 0x04, 100, 10000);
+}
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
+ { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
+ { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
+};
+
+static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
+ struct a5xx_gpu_state *a5xx_state)
+{
+ struct a5xx_crashdumper dumper = { 0 };
+ u32 offset, count = 0;
+ u64 *ptr;
+ int i;
+
+ if (a5xx_crashdumper_init(gpu, &dumper))
+ return;
+
+ /* The script will be written at offset 0 */
+ ptr = dumper.ptr;
+
+ /* Start writing the data at offset 256k */
+ offset = dumper.iova + (256 * SZ_1K);
+
+ /* Count how many additional registers to get from the HLSQ aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ count += a5xx_hlsq_aperture_regs[i].count;
+
+ a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ /* Build the crashdump script */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 type = a5xx_hlsq_aperture_regs[i].type;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ /* Write the register to select the desired bank */
+ *ptr++ = ((u64) type << 8);
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
+ (1 << 21) | 1;
+
+ *ptr++ = offset;
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
+ | c;
+
+ offset += c * sizeof(u32);
+ }
+
+ /* Write two zeros to close off the script */
+ *ptr++ = 0;
+ *ptr++ = 0;
+
+ if (a5xx_crashdumper_run(gpu, &dumper)) {
+ kfree(a5xx_state->hlsqregs);
+ a5xx_crashdumper_free(gpu, &dumper);
+ return;
+ }
+
+ /* Copy the data from the crashdumper to the state */
+ memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
+ count * sizeof(u32));
+
+ a5xx_crashdumper_free(gpu, &dumper);
+}
+
+static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
+ GFP_KERNEL);
+
+ if (!a5xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Temporarily disable hardware clock gating before reading the hw */
a5xx_set_hwcg(gpu, false);
- adreno_show(gpu, m);
+
+ /* First get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &(a5xx_state->base));
+
+ a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
+
+ /* Get the HLSQ regs with the help of the crashdumper */
+ a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
+
a5xx_set_hwcg(gpu, true);
+
+ return &a5xx_state->base;
+}
+
+static void a5xx_gpu_state_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ kfree(a5xx_state->hlsqregs);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a5xx_state);
+}
+
+int a5xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a5xx_gpu_state_destroy);
+}
+
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ int i, j;
+ u32 pos = 0;
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ /* Dump the additional a5xx HLSQ registers */
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ drm_printf(p, "registers-hlsq:\n");
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ for (j = 0; j < c; j++, pos++, o++) {
+ /*
+ * To keep the crashdump simple we pull the entire range
+ * for each register type but not all of the registers
+ * in the range are valid. Fortunately invalid registers
+ * stick out like a sore thumb with a value of
+ * 0xdeadbeef
+ */
+ if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ o << 2, a5xx_state->hlsqregs[pos]);
+ }
+ }
}
#endif
@@ -1239,11 +1453,15 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
.debugfs_init = a5xx_debugfs_init,
#endif
.gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 0ae5ace65462..44813624a286 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -35,6 +35,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 0, 6, 0),
@@ -45,6 +46,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
@@ -55,6 +57,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 3, 0, ANY_ID),
@@ -65,6 +68,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a330_pfp.fw",
},
.gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
@@ -75,6 +79,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 3, 0, ANY_ID),
@@ -85,6 +90,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 3, 0, 2),
@@ -96,6 +102,11 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
},
.gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
@@ -158,7 +169,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
@@ -316,6 +327,7 @@ static int adreno_suspend(struct device *dev)
#endif
static const struct dev_pm_ops adreno_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 17d0506d058c..38ac50b73829 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,6 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/ascii85.h>
#include <linux/pm_opp.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
@@ -368,40 +369,185 @@ bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return false;
}
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i, count = 0;
+
+ kref_init(&state->ref);
+
+ ktime_get_real_ts64(&state->time);
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int size = 0, j;
+
+ state->ring[i].fence = gpu->rb[i]->memptrs->fence;
+ state->ring[i].iova = gpu->rb[i]->iova;
+ state->ring[i].seqno = gpu->rb[i]->seqno;
+ state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
+ state->ring[i].wptr = get_wptr(gpu->rb[i]);
+
+ /* Copy at least 'wptr' dwords of the data */
+ size = state->ring[i].wptr;
+
+ /* After wptr find the last non zero dword to save space */
+ for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
+ if (gpu->rb[i]->start[j])
+ size = j + 1;
+
+ if (size) {
+ state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+ if (state->ring[i].data) {
+ memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data_size = size << 2;
+ }
+ }
+ }
+
+ /* Count the number of registers */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
+ count += adreno_gpu->registers[i + 1] -
+ adreno_gpu->registers[i] + 1;
+
+ state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
+ if (state->registers) {
+ int pos = 0;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 start = adreno_gpu->registers[i];
+ u32 end = adreno_gpu->registers[i + 1];
+ u32 addr;
+
+ for (addr = start; addr <= end; addr++) {
+ state->registers[pos++] = addr;
+ state->registers[pos++] = gpu_read(gpu, addr);
+ }
+ }
+
+ state->nr_registers = count;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state)
+{
int i;
- seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
+ for (i = 0; i < ARRAY_SIZE(state->ring); i++)
+ kfree(state->ring[i].data);
+
+ for (i = 0; state->bos && i < state->nr_bos; i++)
+ kvfree(state->bos[i].data);
+
+ kfree(state->bos);
+ kfree(state->comm);
+ kfree(state->cmd);
+ kfree(state->registers);
+}
+
+static void adreno_gpu_state_kref_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+
+ adreno_gpu_state_destroy(state);
+ kfree(state);
+}
+
+int adreno_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+
+static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+{
+ char out[ASCII85_BUFSZ];
+ long l, datalen, i;
+
+ if (!ptr || !len)
+ return;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will any data
+ * completely fill the entire allocated size of the buffer
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++) {
+ if (ptr[i])
+ datalen = (i << 2) + 1;
+ }
+
+ /* Skip printing the object if it is empty */
+ if (datalen == 0)
+ return;
+
+ l = ascii85_encode_len(datalen);
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(ptr[i], out));
+
+ drm_puts(p, "\n");
+}
+
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- for (i = 0; i < gpu->nr_rings; i++) {
- struct msm_ringbuffer *ring = gpu->rb[i];
+ drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
- seq_printf(m, "rb %d: fence: %d/%d\n", i,
- ring->memptrs->fence, ring->seqno);
+ drm_puts(p, "ringbuffer:\n");
- seq_printf(m, " rptr: %d\n",
- get_rptr(adreno_gpu, ring));
- seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ for (i = 0; i < gpu->nr_rings; i++) {
+ drm_printf(p, " - id: %d\n", i);
+ drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
+ drm_printf(p, " last-fence: %d\n", state->ring[i].seqno);
+ drm_printf(p, " retired-fence: %d\n", state->ring[i].fence);
+ drm_printf(p, " rptr: %d\n", state->ring[i].rptr);
+ drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
+ drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
+
+ adreno_show_object(p, state->ring[i].data,
+ state->ring[i].data_size);
}
- /* dump these out in a form that can be parsed by demsm: */
- seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
- for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
- uint32_t start = adreno_gpu->registers[i];
- uint32_t end = adreno_gpu->registers[i+1];
- uint32_t addr;
+ if (state->bos) {
+ drm_puts(p, "bos:\n");
- for (addr = start; addr <= end; addr++) {
- uint32_t val = gpu_read(gpu, addr);
- seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
+ for (i = 0; i < state->nr_bos; i++) {
+ drm_printf(p, " - iova: 0x%016llx\n",
+ state->bos[i].iova);
+ drm_printf(p, " size: %zd\n", state->bos[i].size);
+
+ adreno_show_object(p, state->bos[i].data,
+ state->bos[i].size);
}
}
+
+ drm_puts(p, "registers:\n");
+
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
}
#endif
@@ -565,7 +711,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_get_pwrlevels(&pdev->dev, gpu);
- pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index d6b0e7b813f4..4406776597fd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -84,6 +84,7 @@ struct adreno_info {
enum adreno_quirks quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
+ u32 inactive_period;
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
@@ -214,8 +215,9 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
@@ -228,6 +230,11 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+void adreno_gpu_state_destroy(struct msm_gpu_state *state);
+
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
+int adreno_gpu_state_put(struct msm_gpu_state *state);
+
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
new file mode 100644
index 000000000000..879c13fe74e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_trace.h"
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg: private data of callback handler
+ * @irq_idx: interrupt index
+ */
+static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
+{
+ struct dpu_kms *dpu_kms = arg;
+ struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+
+ pr_debug("irq_idx=%d\n", irq_idx);
+
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+ DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
+ }
+
+ atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+ /*
+ * Perform registered function callback
+ */
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+ if (cb->func)
+ cb->func(cb->arg, irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ /*
+ * Clear pending interrupt status in HW.
+ * NOTE: dpu_core_irq_callback_handler is protected by top-level
+ * spinlock, so it is safe to clear any interrupt status here.
+ */
+ dpu_kms->hw_intr->ops.clear_intr_status_nolock(
+ dpu_kms->hw_intr,
+ irq_idx);
+}
+
+int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type, u32 instance_idx)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.irq_idx_lookup)
+ return -EINVAL;
+
+ return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+ instance_idx);
+}
+
+/**
+ * _dpu_core_irq_enable - enable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->irq_obj.enable_counts ||
+ !dpu_kms->irq_obj.irq_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
+
+ if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+ ret = dpu_kms->hw_intr->ops.enable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ /* empty callback list but interrupt is enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n",
+ irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+/**
+ * _dpu_core_irq_disable - disable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
+
+ if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+ ret = dpu_kms->hw_intr->ops.disable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts == 2)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.get_interrupt_status)
+ return 0;
+
+ if (irq_idx < 0) {
+ DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
+ return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
+ irq_idx, clear);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ list_add_tail(&register_irq_cb->list,
+ &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ /* empty callback list but interrupt is still enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.clear_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.disable_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct dpu_irq *irq_obj = s->private;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+ int i, irq_count, enable_count, cb_count;
+
+ if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ for (i = 0; i < irq_obj->total_irqs; i++) {
+ spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+ cb_count = 0;
+ irq_count = atomic_read(&irq_obj->irq_counts[i]);
+ enable_count = atomic_read(&irq_obj->enable_counts[i]);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+ cb_count++;
+ spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+ if (irq_count || enable_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+ i, irq_count, enable_count, cb_count);
+ }
+
+ return 0;
+}
+
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
+ parent, &dpu_kms->irq_obj,
+ &dpu_debugfs_core_irq_fops);
+
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove(dpu_kms->irq_obj.debugfs_file);
+ dpu_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ spin_lock_init(&dpu_kms->irq_obj.cb_lock);
+
+ /* Create irq callbacks for all possible irq_idx */
+ dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+ dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(struct list_head), GFP_KERNEL);
+ dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
+ INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
+ atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
+ atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+ }
+}
+
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
+{
+ return 0;
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+ if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
+ !list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+ DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ kfree(dpu_kms->irq_obj.irq_cb_tbl);
+ kfree(dpu_kms->irq_obj.enable_counts);
+ kfree(dpu_kms->irq_obj.irq_counts);
+ dpu_kms->irq_obj.irq_cb_tbl = NULL;
+ dpu_kms->irq_obj.enable_counts = NULL;
+ dpu_kms->irq_obj.irq_counts = NULL;
+ dpu_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+{
+ /*
+ * Read interrupt status from all sources. Interrupt status are
+ * stored within hw_intr.
+ * Function will also clear the interrupt status after reading.
+ * Individual interrupt status bit will only get stored if it
+ * is enabled.
+ */
+ dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
+
+ /*
+ * Dispatch to HW driver to handle interrupt lookup that is being
+ * fired. When matching interrupt is located, HW driver will call to
+ * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
+ * dpu_core_irq_callback_handler will perform the registered function
+ * callback, and do the interrupt status clearing once the registered
+ * callback is finished.
+ */
+ dpu_kms->hw_intr->ops.dispatch_irqs(
+ dpu_kms->hw_intr,
+ dpu_core_irq_callback_handler,
+ dpu_kms);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 000000000000..5e98bba46af5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ * interrupt mapping table.
+ * @dpu_kms: DPU handle
+ * @intr_type: DPU HW interrupt type for lookup
+ * @instance_idx: DPU HW block instance defined in dpu_hw_mdss.h
+ * @return: irq_idx or -EINVAL when fail to lookup
+ */
+int dpu_core_irq_idx_lookup(
+ struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type,
+ uint32_t instance_idx);
+
+/**
+ * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is enabled if count is 0 before increment.
+ */
+int dpu_core_irq_enable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @clear: True to clear the irq after read
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ bool clear);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must exist until un-registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must match with registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent);
+
+/**
+ * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @dpu_kms: pointer to kms
+ */
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 000000000000..41c5191f9056
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,637 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+#define DPU_PERF_MODE_STRING_SIZE 128
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum dpu_perf_mode {
+ DPU_PERF_MODE_NORMAL,
+ DPU_PERF_MODE_MINIMUM,
+ DPU_PERF_MODE_FIXED,
+ DPU_PERF_MODE_MAX
+};
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return NULL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+ return dpu_crtc_is_enabled(crtc);
+}
+
+static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ bool intf_connected = false;
+
+ if (!crtc)
+ goto end;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+ _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+ DPU_DEBUG("video interface connected crtc:%d\n",
+ tmp_crtc->base.id);
+ intf_connected = true;
+ goto end;
+ }
+ }
+
+end:
+ return intf_connected;
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct dpu_core_perf_params *perf)
+{
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+
+ if (!kms || !kms->catalog || !crtc || !state || !perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_cstate = to_dpu_crtc_state(state);
+ memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+ if (!dpu_cstate->bw_control) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+ 1000ULL;
+ perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+ }
+ perf->core_clk_rate = kms->perf.max_core_clk_rate;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = 0;
+ perf->max_per_pipe_ib[i] = 0;
+ }
+ perf->core_clk_rate = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+ perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+ }
+ perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+ }
+
+ DPU_DEBUG(
+ "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 bw, threshold;
+ u64 bw_sum_of_intfs = 0;
+ enum dpu_crtc_client_type curr_client_type;
+ bool is_video_mode;
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+ return 0;
+
+ dpu_cstate = to_dpu_crtc_state(state);
+
+ /* obtain new values */
+ _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
+ curr_client_type = dpu_crtc_get_client_type(crtc);
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ (dpu_crtc_get_client_type(tmp_crtc) ==
+ curr_client_type) &&
+ (tmp_crtc != crtc)) {
+ struct dpu_crtc_state *tmp_cstate =
+ to_dpu_crtc_state(tmp_crtc->state);
+
+ DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id,
+ tmp_cstate->new_perf.bw_ctl[i],
+ tmp_cstate->bw_control);
+ /*
+ * For bw check only use the bw if the
+ * atomic property has been already set
+ */
+ if (tmp_cstate->bw_control)
+ bw_sum_of_intfs +=
+ tmp_cstate->new_perf.bw_ctl[i];
+ }
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+
+ is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+ threshold = (is_video_mode ||
+ _dpu_core_video_mode_intf_connected(crtc)) ?
+ kms->catalog->perf.max_bw_low :
+ kms->catalog->perf.max_bw_high;
+
+ DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+
+ if (!dpu_cstate->bw_control) {
+ DPU_DEBUG("bypass bandwidth check\n");
+ } else if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
+ }
+ }
+
+ return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+ struct drm_crtc *crtc, u32 bus_id)
+{
+ struct dpu_core_perf_params perf = { { 0 } };
+ enum dpu_crtc_client_type curr_client_type
+ = dpu_crtc_get_client_type(crtc);
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int ret = 0;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ curr_client_type ==
+ dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf.max_per_pipe_ib[bus_id] =
+ max(perf.max_per_pipe_ib[bus_id],
+ dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+
+ DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+ tmp_crtc->base.id, bus_id,
+ dpu_cstate->new_perf.bw_ctl[bus_id]);
+ }
+ }
+ return ret;
+}
+
+/**
+ * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ /* only do this for command mode rt client */
+ if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+ return;
+
+ /*
+ * If video interface present, cmd panel bandwidth cannot be
+ * released.
+ */
+ if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ dpu_crtc_get_intf_mode(tmp_crtc) ==
+ INTF_MODE_VIDEO)
+ return;
+ }
+
+ /* Release the bandwidth */
+ if (kms->perf.enable_bw_release) {
+ trace_dpu_cmd_release_bw(crtc->base.id);
+ DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ dpu_crtc->cur_perf.bw_ctl[i] = 0;
+ _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ }
+ }
+}
+
+static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
+{
+ struct dss_clk *core_clk = kms->perf.core_clk;
+
+ if (core_clk->max_rate && (rate > core_clk->max_rate))
+ rate = core_clk->max_rate;
+
+ core_clk->rate = rate;
+ return msm_dss_clk_set_rate(core_clk, 1);
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+ u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+ struct drm_crtc *crtc;
+ struct dpu_crtc_state *dpu_cstate;
+
+ drm_for_each_crtc(crtc, kms->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+ clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+ clk_rate);
+ clk_rate = clk_round_rate(kms->perf.core_clk->clk,
+ clk_rate);
+ }
+ }
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+ clk_rate = kms->perf.fix_core_clk_rate;
+
+ DPU_DEBUG("clk:%llu\n", clk_rate);
+
+ return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req)
+{
+ struct dpu_core_perf_params *new, *old;
+ int update_bus = 0, update_clk = 0;
+ u64 clk_rate = 0;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ priv = kms->dev->dev_private;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+ crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+ old = &dpu_crtc->cur_perf;
+ new = &dpu_cstate->new_perf;
+
+ if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl[i] >
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] >
+ old->max_per_pipe_ib[i]))) ||
+ (!params_changed && ((new->bw_ctl[i] <
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] <
+ old->max_per_pipe_ib[i])))) {
+ DPU_DEBUG(
+ "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed,
+ new->bw_ctl[i], old->bw_ctl[i]);
+ old->bw_ctl[i] = new->bw_ctl[i];
+ old->max_per_pipe_ib[i] =
+ new->max_per_pipe_ib[i];
+ update_bus |= BIT(i);
+ }
+ }
+
+ if ((params_changed &&
+ (new->core_clk_rate > old->core_clk_rate)) ||
+ (!params_changed &&
+ (new->core_clk_rate < old->core_clk_rate))) {
+ old->core_clk_rate = new->core_clk_rate;
+ update_clk = 1;
+ }
+ } else {
+ DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+ memset(old, 0, sizeof(*old));
+ memset(new, 0, sizeof(*new));
+ update_bus = ~0;
+ update_clk = 1;
+ }
+ trace_dpu_perf_crtc_update(crtc->base.id,
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ new->core_clk_rate, stop_req,
+ update_bus, update_clk);
+
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ if (update_bus & BIT(i)) {
+ ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ if (ret) {
+ DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
+ crtc->base.id, i);
+ return ret;
+ }
+ }
+ }
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+ trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+ ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
+ if (ret) {
+ DPU_ERROR("failed to set %s clock rate %llu\n",
+ kms->perf.core_clk->clk_name, clk_rate);
+ return ret;
+ }
+
+ kms->perf.core_clk_rate = clk_rate;
+ DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+ u32 perf_mode = 0;
+ char buf[10];
+
+ if (!perf)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtouint(buf, 0, &perf_mode))
+ return -EFAULT;
+
+ if (perf_mode >= DPU_PERF_MODE_MAX)
+ return -EFAULT;
+
+ if (perf_mode == DPU_PERF_MODE_FIXED) {
+ DRM_INFO("fix performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+ /* run the driver with max clk and BW vote */
+ perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+ perf->perf_tune.min_bus_vote =
+ (u64) cfg->max_bw_high * 1000;
+ DRM_INFO("minimum performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+ /* reset the perf tune params to 0 */
+ perf->perf_tune.min_core_clk = 0;
+ perf->perf_tune.min_bus_vote = 0;
+ DRM_INFO("normal performance mode\n");
+ }
+ perf->perf_tune.mode = perf_mode;
+
+ return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ int len = 0;
+ char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+
+ if (!perf)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf),
+ "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+ perf->perf_tune.mode,
+ perf->perf_tune.min_core_clk,
+ perf->perf_tune.min_bus_vote);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+ .open = simple_open,
+ .read = _dpu_core_perf_mode_read,
+ .write = _dpu_core_perf_mode_write,
+};
+
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+ debugfs_remove_recursive(perf->debugfs_root);
+ perf->debugfs_root = NULL;
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ struct dpu_mdss_cfg *catalog = perf->catalog;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ priv = perf->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+ if (!perf->debugfs_root) {
+ DPU_ERROR("failed to create core perf debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->max_core_clk_rate);
+ debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+ (u32 *)&perf->enable_bw_release);
+ debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_low);
+ debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_high);
+ debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_core_ib);
+ debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_llcc_ib);
+ debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_dram_ib);
+ debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+ (u32 *)perf, &dpu_core_perf_mode_fops);
+ debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->fix_core_clk_rate);
+ debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ib_vote);
+ debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ab_vote);
+
+ return 0;
+}
+#else
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+ if (!perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_perf_debugfs_destroy(perf);
+ perf->max_core_clk_rate = 0;
+ perf->core_clk = NULL;
+ perf->phandle = NULL;
+ perf->catalog = NULL;
+ perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk)
+{
+ perf->dev = dev;
+ perf->catalog = catalog;
+ perf->phandle = phandle;
+ perf->core_clk = core_clk;
+
+ perf->max_core_clk_rate = core_clk->max_rate;
+ if (!perf->max_core_clk_rate) {
+ DPU_DEBUG("optional max core clk rate, use default\n");
+ perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 000000000000..fbcbe0c7527a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+#include "dpu_power_handle.h"
+
+#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+ u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+ u32 mode;
+ u64 min_core_clk;
+ u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+ struct drm_device *dev;
+ struct dentry *debugfs_root;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_power_handle *phandle;
+ struct dss_clk *core_clk;
+ u64 core_clk_rate;
+ u64 max_core_clk_rate;
+ struct dpu_core_perf_tune perf_tune;
+ u32 enable_bw_release;
+ u64 fix_core_clk_rate;
+ u64 fix_core_ib_vote;
+ u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk);
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @perf: Pointer to core performance context
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
new file mode 100644
index 000000000000..80cbf75bc2ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -0,0 +1,2138 @@
+/*
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_rect.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_crtc.h"
+#include "dpu_plane.h"
+#include "dpu_encoder.h"
+#include "dpu_vbif.h"
+#include "dpu_power_handle.h"
+#include "dpu_core_perf.h"
+#include "dpu_trace.h"
+
+#define DPU_DRM_BLEND_OP_NOT_DEFINED 0
+#define DPU_DRM_BLEND_OP_OPAQUE 1
+#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
+#define DPU_DRM_BLEND_OP_COVERAGE 3
+#define DPU_DRM_BLEND_OP_MAX 4
+
+/* layer mixer index on dpu_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+#define MISR_BUFF_SIZE 256
+
+static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return NULL;
+ }
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_crtc *crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid dpu crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid drm device\n");
+ return -EINVAL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
+{
+ if (!rp)
+ return NULL;
+
+ return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
+{
+ struct dpu_crtc_res *res, *next;
+ struct drm_crtc *crtc;
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+ force ? "destroy" : "free_unused");
+
+ list_for_each_entry_safe(res, next, &rp->res_list, list) {
+ if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
+ continue;
+ DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ list_del(&res->list);
+ if (res->ops.put)
+ res->ops.put(res->val);
+ kfree(res);
+ }
+}
+
+/**
+ * _dpu_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ _dpu_crtc_rp_reclaim(rp, false);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ list_del_init(&rp->rp_list);
+ _dpu_crtc_rp_reclaim(rp, true);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+ DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+ return dpu_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _dpu_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _dpu_crtc_hw_blk_put(void *val)
+{
+ DPU_DEBUG("res://%pK\n", val);
+ dpu_hw_blk_put(val);
+}
+
+/**
+ * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
+ struct dpu_crtc_respool *dup_rp)
+{
+ struct dpu_crtc_res *res, *dup_res;
+ struct drm_crtc *crtc;
+
+ if (!rp || !dup_rp || !rp->rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+ mutex_lock(rp->rp_lock);
+ dup_rp->sequence_id = rp->sequence_id + 1;
+ INIT_LIST_HEAD(&dup_rp->res_list);
+ dup_rp->ops = rp->ops;
+ list_for_each_entry(res, &rp->res_list, list) {
+ dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
+ if (!dup_res) {
+ mutex_unlock(rp->rp_lock);
+ return;
+ }
+ INIT_LIST_HEAD(&dup_res->list);
+ atomic_set(&dup_res->refcount, 0);
+ dup_res->type = res->type;
+ dup_res->tag = res->tag;
+ dup_res->val = res->val;
+ dup_res->ops = res->ops;
+ dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
+ DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, dup_rp->sequence_id,
+ dup_res->type, dup_res->tag, dup_res->val,
+ atomic_read(&dup_res->refcount));
+ list_add_tail(&dup_res->list, &dup_rp->res_list);
+ if (dup_res->ops.get)
+ dup_res->ops.get(dup_res->val, 0, -1);
+ }
+
+ dup_rp->rp_lock = rp->rp_lock;
+ dup_rp->rp_head = rp->rp_head;
+ INIT_LIST_HEAD(&dup_rp->rp_list);
+ list_add_tail(&dup_rp->rp_list, rp->rp_head);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * @rp_lock: Pointer to serialization resource pool lock
+ * @rp_head: Pointer to crtc resource pool head
+ * return: None
+ */
+static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
+ struct mutex *rp_lock, struct list_head *rp_head)
+{
+ if (!rp || !rp_lock || !rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ mutex_lock(rp_lock);
+ rp->rp_lock = rp_lock;
+ rp->rp_head = rp_head;
+ INIT_LIST_HEAD(&rp->rp_list);
+ rp->sequence_id = 0;
+ INIT_LIST_HEAD(&rp->res_list);
+ rp->ops.get = _dpu_crtc_hw_blk_get;
+ rp->ops.put = _dpu_crtc_hw_blk_put;
+ list_add_tail(&rp->rp_list, rp->rp_head);
+ mutex_unlock(rp_lock);
+}
+
+static void dpu_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ DPU_DEBUG("\n");
+
+ if (!crtc)
+ return;
+
+ dpu_crtc->phandle = NULL;
+
+ drm_crtc_cleanup(crtc);
+ mutex_destroy(&dpu_crtc->crtc_lock);
+ kfree(dpu_crtc);
+}
+
+static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
+ struct dpu_plane_state *pstate)
+{
+ struct dpu_hw_mixer *lm = mixer->hw_lm;
+
+ /* default to opaque blending */
+ lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
+ DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST);
+}
+
+static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *crtc_state;
+ int lm_idx, lm_horiz_position;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ lm_horiz_position = 0;
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
+ struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer_cfg cfg;
+
+ if (!lm_roi || !drm_rect_visible(lm_roi))
+ continue;
+
+ cfg.out_width = drm_rect_width(lm_roi);
+ cfg.out_height = drm_rect_height(lm_roi);
+ cfg.right_mixer = lm_horiz_position++;
+ cfg.flags = 0;
+ hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+ }
+}
+
+static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+{
+ struct drm_plane *plane;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_format *format;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg *stage_cfg;
+
+ u32 flush_mask;
+ uint32_t stage_idx, lm_idx;
+ int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
+ bool bg_alpha_enable = false;
+
+ if (!dpu_crtc || !mixer) {
+ DPU_ERROR("invalid dpu_crtc or mixer\n");
+ return;
+ }
+
+ ctl = mixer->hw_ctl;
+ lm = mixer->hw_lm;
+ stage_cfg = &dpu_crtc->stage_cfg;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ state = plane->state;
+ if (!state)
+ continue;
+
+ pstate = to_dpu_plane_state(state);
+ fb = state->fb;
+
+ dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
+
+ DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+ crtc->base.id,
+ pstate->stage,
+ plane->base.id,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ state->fb ? state->fb->base.id : -1);
+
+ format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+ if (!format) {
+ DPU_ERROR("invalid format\n");
+ return;
+ }
+
+ if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
+ bg_alpha_enable = true;
+
+ stage_idx = zpos_cnt[pstate->stage]++;
+ stage_cfg->stage[pstate->stage][stage_idx] =
+ dpu_plane_pipe(plane);
+ stage_cfg->multirect_index[pstate->stage][stage_idx] =
+ pstate->multirect_index;
+
+ trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
+ state, pstate, stage_idx,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ format->base.pixel_format,
+ fb ? fb->modifier : 0);
+
+ /* blend config update */
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+
+ mixer[lm_idx].flush_mask |= flush_mask;
+
+ if (bg_alpha_enable && !format->alpha_enable)
+ mixer[lm_idx].mixer_op_mode = 0;
+ else
+ mixer[lm_idx].mixer_op_mode |=
+ 1 << pstate->stage;
+ }
+ }
+
+ _dpu_crtc_program_lm_output_roi(crtc);
+}
+
+/**
+ * _dpu_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_crtc_state;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+
+ int i;
+
+ if (!crtc)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_crtc_state = to_dpu_crtc_state(crtc->state);
+ mixer = dpu_crtc->mixers;
+
+ DPU_DEBUG("%s\n", dpu_crtc->name);
+
+ if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+ DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
+ return;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ DPU_ERROR("invalid lm or ctl assigned to mixer\n");
+ return;
+ }
+ mixer[i].mixer_op_mode = 0;
+ mixer[i].flush_mask = 0;
+ if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+ mixer[i].hw_ctl->ops.clear_all_blendstages(
+ mixer[i].hw_ctl);
+ }
+
+ /* initialize stage cfg */
+ memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ ctl = mixer[i].hw_ctl;
+ lm = mixer[i].hw_lm;
+
+ lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+ mixer[i].hw_lm->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - LM_0,
+ mixer[i].mixer_op_mode,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &dpu_crtc->stage_cfg);
+ }
+}
+
+/**
+ * _dpu_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dpu_crtc->event) {
+ DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ dpu_crtc->event);
+ trace_dpu_crtc_complete_flip(DRMID(crtc));
+ drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
+ dpu_crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ if (!crtc || !crtc->dev) {
+ DPU_ERROR("invalid crtc\n");
+ return INTF_MODE_NONE;
+ }
+
+ drm_for_each_encoder(encoder, crtc->dev)
+ if (encoder->crtc == crtc)
+ return dpu_encoder_get_intf_mode(encoder);
+
+ return INTF_MODE_NONE;
+}
+
+static void dpu_crtc_vblank_cb(void *data)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ /* keep statistics on vblank callback - with auto reset via debugfs */
+ if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
+ dpu_crtc->vblank_cb_time = ktime_get();
+ else
+ dpu_crtc->vblank_cb_count++;
+ _dpu_crtc_complete_flip(crtc);
+ drm_crtc_handle_vblank(crtc);
+ trace_dpu_crtc_vblank_cb(DRMID(crtc));
+}
+
+static void dpu_crtc_frame_event_work(struct kthread_work *work)
+{
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ struct drm_crtc *crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+ unsigned long flags;
+ bool frame_done = false;
+
+ if (!work) {
+ DPU_ERROR("invalid work handle\n");
+ return;
+ }
+
+ fevent = container_of(work, struct dpu_crtc_frame_event, work);
+ if (!fevent->crtc || !fevent->crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ crtc = fevent->crtc;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid kms handle\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+ DPU_ATRACE_BEGIN("crtc_frame_event");
+
+ DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ ktime_to_ns(fevent->ts));
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (atomic_read(&dpu_crtc->frame_pending) < 1) {
+ /* this should not happen */
+ DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
+ crtc->base.id,
+ fevent->event,
+ ktime_to_ns(fevent->ts),
+ atomic_read(&dpu_crtc->frame_pending));
+ } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
+ /* release bandwidth and other resources */
+ trace_dpu_crtc_frame_event_done(DRMID(crtc),
+ fevent->event);
+ dpu_core_perf_crtc_release_bw(crtc);
+ } else {
+ trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
+ fevent->event);
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
+ dpu_core_perf_crtc_update(crtc, 0, false);
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR))
+ frame_done = true;
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
+ DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
+ crtc->base.id, ktime_to_ns(fevent->ts));
+
+ if (frame_done)
+ complete_all(&dpu_crtc->frame_done_comp);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+ DPU_ATRACE_END("crtc_frame_event");
+}
+
+/*
+ * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
+ * registers this API to encoder for all frame event callbacks like
+ * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
+ * from different context - IRQ, user thread, commit_thread, etc. Each event
+ * should be carefully reviewed and should be processed in proper task context
+ * to avoid schedulin delay or properly manage the irq context's bottom half
+ * processing.
+ */
+static void dpu_crtc_frame_event_cb(void *data, u32 event)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ unsigned long flags;
+ u32 crtc_id;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ /* Nothing to do on idle event */
+ if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ crtc_id = drm_crtc_index(crtc);
+
+ trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
+ struct dpu_crtc_frame_event, list);
+ if (fevent)
+ list_del_init(&fevent->list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+ if (!fevent) {
+ DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+ return;
+ }
+
+ fevent->event = event;
+ fevent->crtc = crtc;
+ fevent->ts = ktime_get();
+ kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ trace_dpu_crtc_complete_commit(DRMID(crtc));
+}
+
+static void _dpu_crtc_setup_mixer_for_encoder(
+ struct drm_crtc *crtc,
+ struct drm_encoder *enc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_rm *rm = &dpu_kms->rm;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *last_valid_ctl = NULL;
+ int i;
+ struct dpu_rm_hw_iter lm_iter, ctl_iter;
+
+ dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
+ dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
+
+ /* Set up all the mixers and ctls reserved by this encoder */
+ for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
+ mixer = &dpu_crtc->mixers[i];
+
+ if (!dpu_rm_get_hw(rm, &lm_iter))
+ break;
+ mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
+
+ /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+ if (!dpu_rm_get_hw(rm, &ctl_iter)) {
+ DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
+ mixer->hw_lm->idx - LM_0);
+ mixer->hw_ctl = last_valid_ctl;
+ } else {
+ mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
+ last_valid_ctl = mixer->hw_ctl;
+ }
+
+ /* Shouldn't happen, mixers are always >= ctls */
+ if (!mixer->hw_ctl) {
+ DPU_ERROR("no valid ctls found for lm %d\n",
+ mixer->hw_lm->idx - LM_0);
+ return;
+ }
+
+ mixer->encoder = enc;
+
+ dpu_crtc->num_mixers++;
+ DPU_DEBUG("setup mixer %d: lm %d\n",
+ i, mixer->hw_lm->idx - LM_0);
+ DPU_DEBUG("setup mixer %d: ctl %d\n",
+ i, mixer->hw_ctl->idx - CTL_0);
+ }
+}
+
+static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
+
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ /* Check for mixers on all encoders attached to this crtc */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *adj_mode;
+ u32 crtc_split_width;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ adj_mode = &state->adjusted_mode;
+ crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ struct drm_rect *r = &cstate->lm_bounds[i];
+ r->x1 = crtc_split_width * i;
+ r->y1 = 0;
+ r->x2 = r->x1 + crtc_split_width;
+ r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+
+ trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+ }
+
+ drm_mode_debug_printmodeline(adj_mode);
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ unsigned long flags;
+ struct dpu_crtc_smmu_state_data *smmu_state;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dev = crtc->dev;
+ smmu_state = &dpu_crtc->smmu_state;
+
+ if (!dpu_crtc->num_mixers) {
+ _dpu_crtc_setup_mixers(crtc);
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+ }
+
+ if (dpu_crtc->event) {
+ WARN_ON(dpu_crtc->event);
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ /* encoder will trigger pending mask now */
+ dpu_encoder_trigger_kickoff_pending(encoder);
+ }
+
+ /*
+ * If no mixers have been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ _dpu_crtc_blend_setup(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+ unsigned long flags;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
+ return;
+ }
+
+ event_thread = &priv->event_thread[crtc->index];
+
+ if (dpu_crtc->event) {
+ DPU_DEBUG("already received dpu_crtc->event\n");
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ /*
+ * For planes without commit update, drm framework will not add
+ * those planes to current state since hardware update is not
+ * required. However, if those planes were power collapsed since
+ * last commit cycle, driver has to restore the hardware state
+ * of those planes explicitly here prior to plane flush.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc)
+ dpu_plane_restore(plane);
+
+ /* update performance setting before crtc kickoff */
+ dpu_core_perf_crtc_update(crtc, 1, false);
+
+ /*
+ * Final plane updates: Give each plane a chance to complete all
+ * required writes/flushing before crtc's "flush
+ * everything" call below.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (dpu_crtc->smmu_state.transition_error)
+ dpu_plane_set_error(plane, true);
+ dpu_plane_flush(plane);
+ }
+
+ /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * dpu_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ _dpu_crtc_rp_destroy(&cstate->rp);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(cstate);
+}
+
+static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret, rc = 0;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ if (!atomic_read(&dpu_crtc->frame_pending)) {
+ DPU_DEBUG("no frames pending\n");
+ return 0;
+ }
+
+ DPU_ATRACE_BEGIN("frame done completion wait");
+ ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
+ msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+ if (!ret) {
+ DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
+ rc = -ETIMEDOUT;
+ }
+ DPU_ATRACE_END("frame done completion wait");
+
+ return rc;
+}
+
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_crtc_state *cstate;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+ dev = crtc->dev;
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ priv = dpu_kms->dev->dev_private;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to start a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ DPU_ATRACE_BEGIN("crtc_commit");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct dpu_encoder_kickoff_params params = { 0 };
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ dpu_encoder_prepare_for_kickoff(encoder, &params);
+ }
+
+ /* wait for frame_event_done completion */
+ DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _dpu_crtc_wait_for_frame_done(crtc);
+ DPU_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+ goto end;
+ }
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ } else
+ DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+
+ dpu_crtc->play_count++;
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_kickoff(encoder);
+ }
+
+end:
+ reinit_completion(&dpu_crtc->frame_done_comp);
+ DPU_ATRACE_END("crtc_commit");
+}
+
+/**
+ * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
+ * @dpu_crtc: Pointer to dpu crtc structure
+ * @enable: Whether to enable/disable vblanks
+ *
+ * @Return: error code
+ */
+static int _dpu_crtc_vblank_enable_no_lock(
+ struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *enc;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ dev = crtc->dev;
+
+ if (enable) {
+ int ret;
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ ret = _dpu_crtc_power_enable(dpu_crtc, true);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc,
+ dpu_crtc_vblank_cb, (void *)crtc);
+ }
+ } else {
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc, NULL, NULL);
+ }
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ }
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int ret = 0;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+
+ if (!priv->kms) {
+ DPU_ERROR("invalid crtc kms\n");
+ return;
+ }
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /*
+ * If the vblank is enabled, release a power reference on suspend
+ * and take it back during resume (if it is still enabled).
+ */
+ trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
+ if (dpu_crtc->suspend == enable)
+ DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+ crtc->base.id, enable);
+ else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+
+ dpu_crtc->suspend = enable;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+/**
+ * dpu_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate, *old_cstate;
+
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return NULL;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ old_cstate = to_dpu_crtc_state(crtc->state);
+ cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return NULL;
+ }
+
+ /* duplicate base helper */
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+ _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
+ return &cstate->base;
+}
+
+/**
+ * dpu_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void dpu_crtc_reset(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ /* revert suspend actions, if necessary */
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, false);
+
+ /* remove previous state, if present */
+ if (crtc->state) {
+ dpu_crtc_destroy_state(crtc, crtc->state);
+ crtc->state = 0;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return;
+ }
+
+ _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
+ &dpu_crtc->rp_head);
+
+ cstate->base.crtc = crtc;
+ crtc->state = &cstate->base;
+}
+
+static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct dpu_crtc_mixer *m;
+ u32 i, misr_status;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
+
+ switch (event_type) {
+ case DPU_POWER_EVENT_POST_ENABLE:
+ /* restore encoder; crtc will be programmed during commit */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_virt_restore(encoder);
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, true,
+ dpu_crtc->misr_frame_count);
+ }
+ break;
+ case DPU_POWER_EVENT_PRE_DISABLE:
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ }
+ break;
+ case DPU_POWER_EVENT_POST_DISABLE:
+ /**
+ * Nothing to do. All the planes on the CRTC will be
+ * programmed for every frame
+ */
+ break;
+ default:
+ DPU_DEBUG("event:%d not handled\n", event_type);
+ break;
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void dpu_crtc_disable(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+ unsigned long flags;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ mode = &cstate->base.adjusted_mode;
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, true);
+
+ /* Disable/save vblank irq handling */
+ drm_crtc_vblank_off(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /* wait for frame_event_done completion */
+ if (_dpu_crtc_wait_for_frame_done(crtc))
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+
+ trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = false;
+
+ if (atomic_read(&dpu_crtc->frame_pending)) {
+ trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
+ atomic_read(&dpu_crtc->frame_pending));
+ dpu_core_perf_crtc_release_bw(crtc);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+ }
+
+ dpu_core_perf_crtc_update(crtc, 0, true);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
+ }
+
+ if (dpu_crtc->power_event)
+ dpu_power_handle_unregister_event(dpu_crtc->phandle,
+ dpu_crtc->power_event);
+
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+
+ /* disable clk & bw control until clk & bw properties are set */
+ cstate->bw_control = false;
+ cstate->bw_split_vote = false;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static void dpu_crtc_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder,
+ dpu_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
+ if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = true;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ /* Enable/restore vblank irq handling */
+ drm_crtc_vblank_on(crtc);
+
+ dpu_crtc->power_event = dpu_power_handle_register_event(
+ dpu_crtc->phandle,
+ DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
+ DPU_POWER_EVENT_PRE_DISABLE,
+ dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
+
+}
+
+struct plane_state {
+ struct dpu_plane_state *dpu_pstate;
+ const struct drm_plane_state *drm_pstate;
+ int stage;
+ u32 pipe_id;
+};
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct plane_state *pstates;
+ struct dpu_crtc_state *cstate;
+
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+
+ int cnt = 0, rc = 0, mixer_width, i, z_pos;
+
+ struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+ int multirect_count = 0;
+ const struct drm_plane_state *pipe_staged[SSPP_MAX];
+ int left_zpos_cnt = 0, right_zpos_cnt = 0;
+ struct drm_rect crtc_rect = { 0 };
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ if (!state->enable || !state->active) {
+ DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ crtc->base.id, state->enable, state->active);
+ goto end;
+ }
+
+ mode = &state->adjusted_mode;
+ DPU_DEBUG("%s: check", dpu_crtc->name);
+
+ /* force a full mode set if active state changed */
+ if (state->active_changed)
+ state->mode_changed = true;
+
+ memset(pipe_staged, 0, sizeof(pipe_staged));
+
+ mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ _dpu_crtc_setup_lm_bounds(crtc, state);
+
+ crtc_rect.x2 = mode->hdisplay;
+ crtc_rect.y2 = mode->vdisplay;
+
+ /* get plane state for all drm planes associated with crtc state */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ struct drm_rect dst, clip = crtc_rect;
+
+ if (IS_ERR_OR_NULL(pstate)) {
+ rc = PTR_ERR(pstate);
+ DPU_ERROR("%s: failed to get plane%d state, %d\n",
+ dpu_crtc->name, plane->base.id, rc);
+ goto end;
+ }
+ if (cnt >= DPU_STAGE_MAX * 4)
+ continue;
+
+ pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
+ pstates[cnt].drm_pstate = pstate;
+ pstates[cnt].stage = pstate->normalized_zpos;
+ pstates[cnt].pipe_id = dpu_plane_pipe(plane);
+
+ if (pipe_staged[pstates[cnt].pipe_id]) {
+ multirect_plane[multirect_count].r0 =
+ pipe_staged[pstates[cnt].pipe_id];
+ multirect_plane[multirect_count].r1 = pstate;
+ multirect_count++;
+
+ pipe_staged[pstates[cnt].pipe_id] = NULL;
+ } else {
+ pipe_staged[pstates[cnt].pipe_id] = pstate;
+ }
+
+ cnt++;
+
+ dst = drm_plane_state_dest(pstate);
+ if (!drm_rect_intersect(&clip, &dst) ||
+ !drm_rect_equals(&clip, &dst)) {
+ DPU_ERROR("invalid vertical/horizontal destination\n");
+ DPU_ERROR("display: " DRM_RECT_FMT " plane: "
+ DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
+ DRM_RECT_ARG(&dst));
+ rc = -E2BIG;
+ goto end;
+ }
+ }
+
+ for (i = 1; i < SSPP_MAX; i++) {
+ if (pipe_staged[i]) {
+ dpu_plane_clear_multirect(pipe_staged[i]);
+
+ if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
+ DPU_ERROR(
+ "r1 only virt plane:%d not supported\n",
+ pipe_staged[i]->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+ }
+
+ z_pos = -1;
+ for (i = 0; i < cnt; i++) {
+ /* reset counts at every new blend stage */
+ if (pstates[i].stage != z_pos) {
+ left_zpos_cnt = 0;
+ right_zpos_cnt = 0;
+ z_pos = pstates[i].stage;
+ }
+
+ /* verify z_pos setting before using it */
+ if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
+ DPU_ERROR("> %d plane stages assigned\n",
+ DPU_STAGE_MAX - DPU_STAGE_0);
+ rc = -EINVAL;
+ goto end;
+ } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+ if (left_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on left\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ left_zpos_cnt++;
+
+ } else {
+ if (right_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on right\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ right_zpos_cnt++;
+ }
+
+ pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
+ DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
+ }
+
+ for (i = 0; i < multirect_count; i++) {
+ if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
+ DPU_ERROR(
+ "multirect validation failed for planes (%d - %d)\n",
+ multirect_plane[i].r0->plane->base.id,
+ multirect_plane[i].r1->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ rc = dpu_core_perf_crtc_check(crtc, state);
+ if (rc) {
+ DPU_ERROR("crtc%d failed performance check %d\n",
+ crtc->base.id, rc);
+ goto end;
+ }
+
+ /* validate source split:
+ * use pstates sorted by stage to check planes on same stage
+ * we assume that all pipes are in source split so its valid to compare
+ * without taking into account left/right mixer placement
+ */
+ for (i = 1; i < cnt; i++) {
+ struct plane_state *prv_pstate, *cur_pstate;
+ struct drm_rect left_rect, right_rect;
+ int32_t left_pid, right_pid;
+ int32_t stage;
+
+ prv_pstate = &pstates[i - 1];
+ cur_pstate = &pstates[i];
+ if (prv_pstate->stage != cur_pstate->stage)
+ continue;
+
+ stage = cur_pstate->stage;
+
+ left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
+ left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
+
+ right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
+ right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
+
+ if (right_rect.x1 < left_rect.x1) {
+ swap(left_pid, right_pid);
+ swap(left_rect, right_rect);
+ }
+
+ /**
+ * - planes are enumerated in pipe-priority order such that
+ * planes with lower drm_id must be left-most in a shared
+ * blend-stage when using source split.
+ * - planes in source split must be contiguous in width
+ * - planes in source split must have same dest yoff and height
+ */
+ if (right_pid < left_pid) {
+ DPU_ERROR(
+ "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+ stage, left_pid, right_pid);
+ rc = -EINVAL;
+ goto end;
+ } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
+ DPU_ERROR("non-contiguous coordinates for src split. "
+ "stage: %d left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ } else if (left_rect.y1 != right_rect.y1 ||
+ drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
+ DPU_ERROR("source split at stage: %d. invalid "
+ "yoff/height: left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+end:
+ _dpu_crtc_rp_free_unused(&cstate->rp);
+ kfree(pstates);
+ return rc;
+}
+
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->vblank_requested = en;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_crtc_mixer *m;
+
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+
+ int i, out_width;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_crtc = s->private;
+ crtc = &dpu_crtc->base;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ mode = &crtc->state->adjusted_mode;
+ out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+ mode->hdisplay, mode->vdisplay);
+
+ seq_puts(s, "\n");
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm)
+ seq_printf(s, "\tmixer[%d] has no lm\n", i);
+ else if (!m->hw_ctl)
+ seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+ else
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
+ }
+
+ seq_puts(s, "\n");
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ state = plane->state;
+
+ if (!pstate || !state)
+ continue;
+
+ seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ if (plane->state->fb) {
+ fb = plane->state->fb;
+
+ seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
+ fb->base.id, (char *) &fb->format->format,
+ fb->width, fb->height);
+ for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
+ seq_printf(s, "cpp[%d]:%u ",
+ i, fb->format->cpp[i]);
+ seq_puts(s, "\n\t");
+
+ seq_printf(s, "modifier:%8llu ", fb->modifier);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+ seq_printf(s, "pitches[%d]:%8u ", i,
+ fb->pitches[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+ seq_printf(s, "offsets[%d]:%8u ", i,
+ fb->offsets[i]);
+ seq_puts(s, "\n");
+ }
+
+ seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+ state->src_x, state->src_y, state->src_w, state->src_h);
+
+ seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+ state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h);
+ seq_printf(s, "\tmultirect: mode: %d index: %d\n",
+ pstate->multirect_mode, pstate->multirect_index);
+
+ seq_puts(s, "\n");
+ }
+ if (dpu_crtc->vblank_cb_count) {
+ ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
+ s64 diff_ms = ktime_to_ms(diff);
+ s64 fps = diff_ms ? div_s64(
+ dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+ seq_printf(s,
+ "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+ fps, dpu_crtc->vblank_cb_count,
+ ktime_to_ms(diff), dpu_crtc->play_count);
+
+ /* reset time & count for next measurement */
+ dpu_crtc->vblank_cb_count = 0;
+ dpu_crtc->vblank_cb_time = ktime_set(0, 0);
+ }
+
+ seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _dpu_debugfs_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_crtc_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ u32 frame_count, enable;
+ size_t buff_copy;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy)) {
+ DPU_ERROR("buffer copy failed\n");
+ return -EINVAL;
+ }
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ dpu_crtc->misr_enable = enable;
+ dpu_crtc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ dpu_crtc->misr_data[i] = 0;
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
+ }
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_crtc_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ u32 misr_status;
+ ssize_t len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (!dpu_crtc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
+ m->hw_lm->idx - LM_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ dpu_crtc->misr_data[i]);
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ return len;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_res *res;
+ struct dpu_crtc_respool *rp;
+ int i;
+
+ seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
+ seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
+ seq_printf(s, "core_clk_rate: %llu\n",
+ dpu_crtc->cur_perf.core_clk_rate);
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ seq_printf(s, "bw_ctl[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.bw_ctl[i]);
+ seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.max_per_pipe_ib[i]);
+ }
+
+ mutex_lock(&dpu_crtc->rp_lock);
+ list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
+ seq_printf(s, "rp.%d: ", rp->sequence_id);
+ list_for_each_entry(res, &rp->res_list, list)
+ seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&dpu_crtc->rp_lock);
+
+ return 0;
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_crtc_misr_read,
+ .write = _dpu_crtc_misr_setup,
+ };
+
+ if (!crtc)
+ return -EINVAL;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms)
+ return -EINVAL;
+
+ dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
+ crtc->dev->primary->debugfs_root);
+ if (!dpu_crtc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0400,
+ dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_status_fops);
+ debugfs_create_file("state", 0600,
+ dpu_crtc->debugfs_root,
+ &dpu_crtc->base,
+ &dpu_crtc_debugfs_state_fops);
+ debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_misr_fops);
+
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return;
+ dpu_crtc = to_dpu_crtc(crtc);
+ debugfs_remove_recursive(dpu_crtc->debugfs_root);
+}
+#else
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int dpu_crtc_late_register(struct drm_crtc *crtc)
+{
+ return _dpu_crtc_init_debugfs(crtc);
+}
+
+static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
+{
+ _dpu_crtc_destroy_debugfs(crtc);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = dpu_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = dpu_crtc_reset,
+ .atomic_duplicate_state = dpu_crtc_duplicate_state,
+ .atomic_destroy_state = dpu_crtc_destroy_state,
+ .late_register = dpu_crtc_late_register,
+ .early_unregister = dpu_crtc_early_unregister,
+};
+
+static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+ .disable = dpu_crtc_disable,
+ .atomic_enable = dpu_crtc_enable,
+ .atomic_check = dpu_crtc_atomic_check,
+ .atomic_begin = dpu_crtc_atomic_begin,
+ .atomic_flush = dpu_crtc_atomic_flush,
+};
+
+/* initialize crtc */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+{
+ struct drm_crtc *crtc = NULL;
+ struct dpu_crtc *dpu_crtc = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct dpu_kms *kms = NULL;
+ int i;
+
+ priv = dev->dev_private;
+ kms = to_dpu_kms(priv->kms);
+
+ dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
+ if (!dpu_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &dpu_crtc->base;
+ crtc->dev = dev;
+
+ mutex_init(&dpu_crtc->crtc_lock);
+ spin_lock_init(&dpu_crtc->spin_lock);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+
+ mutex_init(&dpu_crtc->rp_lock);
+ INIT_LIST_HEAD(&dpu_crtc->rp_head);
+
+ init_completion(&dpu_crtc->frame_done_comp);
+
+ INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
+ INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
+ list_add(&dpu_crtc->frame_events[i].list,
+ &dpu_crtc->frame_event_list);
+ kthread_init_work(&dpu_crtc->frame_events[i].work,
+ dpu_crtc_frame_event_work);
+ }
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+ NULL);
+
+ drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+ plane->crtc = crtc;
+
+ /* save user friendly CRTC name for later */
+ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+ /* initialize event handling */
+ spin_lock_init(&dpu_crtc->event_lock);
+
+ dpu_crtc->phandle = &kms->phandle;
+
+ DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 000000000000..e87109e608e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+#include "dpu_hw_blk.h"
+
+#define DPU_CRTC_NAME_SIZE 12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE 4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT: RealTime client like video/cmd mode display
+ * voting through apps rsc
+ * @NRT_CLIENT: Non-RealTime client like WB display
+ * voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+ RT_CLIENT,
+ NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state: smmu state
+ * @ATTACHED: all the context banks are attached.
+ * @DETACHED: all the context banks are detached.
+ * @ATTACH_ALL_REQ: transient state of attaching context banks.
+ * @DETACH_ALL_REQ: transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+ ATTACHED = 0,
+ DETACHED,
+ ATTACH_ALL_REQ,
+ DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+ NONE,
+ PRE_COMMIT,
+ POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+ uint32_t state;
+ uint32_t transition_type;
+ uint32_t transition_error;
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm: LM HW Driver context
+ * @hw_ctl: CTL Path HW driver context
+ * @encoder: Encoder attached to this lm & ctl
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask: mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+ struct dpu_hw_mixer *hw_lm;
+ struct dpu_hw_ctl *hw_ctl;
+ struct drm_encoder *encoder;
+ u32 mixer_op_mode;
+ u32 flush_mask;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work: base work structure
+ * @crtc: Pointer to crtc handling this event
+ * @list: event list
+ * @ts: timestamp at queue entry
+ * @event: event identifier
+ */
+struct dpu_crtc_frame_event {
+ struct kthread_work work;
+ struct drm_crtc *crtc;
+ struct list_head list;
+ ktime_t ts;
+ u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT 16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base : Base drm crtc structure
+ * @name : ASCII description of this crtc
+ * @num_ctls : Number of ctl paths in use
+ * @num_mixers : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ * especially in the case of DSC Merge.
+ * @mixers : List of active mixers
+ * @event : Pointer to last received drm vblank event. If there is a
+ * pending vblank event, this will be non-null.
+ * @vsync_count : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg : H/w mixer stage configuration
+ * @debugfs_root : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count : frame count between crtc enable and disable
+ * @vblank_cb_time : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend : whether or not a suspend operation is in progress
+ * @enabled : whether the DPU CRTC is currently enabled. updated in the
+ * commit-thread, not state-swap time which is earlier, so
+ * safe to make decisions on during VBLANK on/off work
+ * @feature_list : list of color processing features supported on a crtc
+ * @active_list : list of color processing features are active
+ * @dirty_list : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @crtc_lock : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp : for frame_event_done synchronization
+ * @event_thread : Pointer to event handler thread
+ * @event_worker : Event worker queue
+ * @event_lock : Spinlock around event handling code
+ * @misr_enable : boolean entry indicates misr enable/disable status.
+ * @misr_frame_count : misr frame count provided by client
+ * @misr_data : store misr data before turning off the clocks.
+ * @phandle: Pointer to power handler
+ * @power_event : registered power event handle
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @rp_lock : serialization lock for resource pool
+ * @rp_head : list of active resource pool
+ * @scl3_cfg_lut : qseed3 lut config
+ */
+struct dpu_crtc {
+ struct drm_crtc base;
+ char name[DPU_CRTC_NAME_SIZE];
+
+ /* HW Resources reserved for the crtc */
+ u32 num_ctls;
+ u32 num_mixers;
+ bool mixers_swapped;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+ struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
+
+ struct drm_pending_vblank_event *event;
+ u32 vsync_count;
+
+ struct dpu_hw_stage_cfg stage_cfg;
+ struct dentry *debugfs_root;
+
+ u32 vblank_cb_count;
+ u64 play_count;
+ ktime_t vblank_cb_time;
+ bool vblank_requested;
+ bool suspend;
+ bool enabled;
+
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+ struct list_head ad_dirty;
+ struct list_head ad_active;
+
+ struct mutex crtc_lock;
+
+ atomic_t frame_pending;
+ struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+ struct list_head frame_event_list;
+ spinlock_t spin_lock;
+ struct completion frame_done_comp;
+
+ /* for handling internal event thread */
+ spinlock_t event_lock;
+ bool misr_enable;
+ u32 misr_frame_count;
+ u32 misr_data[CRTC_DUAL_MIXERS];
+
+ struct dpu_power_handle *phandle;
+ struct dpu_power_event *power_event;
+
+ struct dpu_core_perf_params cur_perf;
+
+ struct mutex rp_lock;
+ struct list_head rp_head;
+
+ struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct dpu_crtc_res_ops {
+ void *(*get)(void *val, u32 type, u64 tag);
+ void (*put)(void *val);
+};
+
+#define DPU_CRTC_RES_FLAG_FREE BIT(0)
+
+/**
+ * struct dpu_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct dpu_crtc_res {
+ struct list_head list;
+ u32 type;
+ u64 tag;
+ atomic_t refcount;
+ struct dpu_crtc_res_ops ops;
+ void *val;
+ u32 flags;
+};
+
+/**
+ * dpu_crtc_respool - crtc resource pool
+ * @rp_lock: pointer to serialization lock
+ * @rp_head: pointer to head of active resource pools of this crtc
+ * @rp_list: list of crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct dpu_crtc_respool {
+ struct mutex *rp_lock;
+ struct list_head *rp_head;
+ struct list_head rp_list;
+ u32 sequence_id;
+ struct list_head res_list;
+ struct dpu_crtc_res_ops ops;
+};
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @is_ppsplit : Whether current topology requires PPSplit special handling
+ * @bw_control : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
+ * Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ */
+struct dpu_crtc_state {
+ struct drm_crtc_state base;
+
+ bool bw_control;
+ bool bw_split_vote;
+
+ bool is_ppsplit;
+ struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+ uint64_t input_fence_timeout_ns;
+
+ struct dpu_core_perf_params new_perf;
+ struct dpu_crtc_respool rp;
+};
+
+#define to_dpu_crtc_state(x) \
+ container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ */
+static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ u32 mixer_width;
+
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+ mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+ return mixer_width;
+}
+
+/**
+ * dpu_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height
+ */
+static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ return mode->vdisplay;
+}
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return -EINVAL;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ return atomic_read(&dpu_crtc->frame_pending);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+ struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *cstate =
+ crtc ? to_dpu_crtc_state(crtc->state) : NULL;
+
+ if (!cstate)
+ return NRT_CLIENT;
+
+ return RT_CLIENT;
+}
+
+/**
+ * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
+{
+ return crtc ? crtc->enabled : false;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
new file mode 100644
index 000000000000..ae2aee7ed9e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
@@ -0,0 +1,2393 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm_runtime.h>
+
+#include "dpu_dbg.h"
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+
+#define DEFAULT_DBGBUS_DPU DPU_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT DPU_DBG_DUMP_IN_MEM
+#define REG_BASE_NAME_LEN 80
+
+#define DBGBUS_FLAGS_DSPP BIT(0)
+#define DBGBUS_DSPP_STATUS 0x34C
+
+#define DBGBUS_NAME_DPU "dpu"
+#define DBGBUS_NAME_VBIF_RT "vbif_rt"
+
+/* offsets from dpu top address for the debug buses */
+#define DBGBUS_SSPP0 0x188
+#define DBGBUS_AXI_INTF 0x194
+#define DBGBUS_SSPP1 0x298
+#define DBGBUS_DSPP 0x348
+#define DBGBUS_PERIPH 0x418
+
+#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON 0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
+#define MMSS_VBIF_TEST_BUS_OUT 0x230
+
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR 0x190
+#define MMSS_VBIF_SRC_ERR 0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
+#define MMSS_VBIF_ERR_INFO 0X1a0
+#define MMSS_VBIF_ERR_INFO_1 0x1a4
+#define MMSS_VBIF_CLIENT_NUM 14
+
+/**
+ * struct dpu_dbg_reg_base - register region base.
+ * may sub-ranges: sub-ranges are used for dumping
+ * or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len: buffer length used for manual register dumping
+ * @cb: callback for external dump function, null if not defined
+ * @cb_ptr: private pointer to callback function
+ */
+struct dpu_dbg_reg_base {
+ struct list_head reg_base_head;
+ char name[REG_BASE_NAME_LEN];
+ void __iomem *base;
+ size_t off;
+ size_t cnt;
+ size_t max_offset;
+ char *buf;
+ size_t buf_len;
+ void (*cb)(void *ptr);
+ void *cb_ptr;
+};
+
+struct dpu_debug_bus_entry {
+ u32 wr_addr;
+ u32 block_id;
+ u32 test_id;
+ void (*analyzer)(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val);
+};
+
+struct vbif_debug_bus_entry {
+ u32 disable_bus_addr;
+ u32 block_bus_addr;
+ u32 bit_offset;
+ u32 block_cnt;
+ u32 test_pnt_start;
+ u32 test_pnt_cnt;
+};
+
+struct dpu_dbg_debug_bus_common {
+ char *name;
+ u32 enable_mask;
+ bool include_in_deferred_work;
+ u32 flags;
+ u32 entries_size;
+ u32 *dumped_content;
+};
+
+struct dpu_dbg_dpu_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct dpu_debug_bus_entry *entries;
+ u32 top_blk_off;
+};
+
+struct dpu_dbg_vbif_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct dpu_dbg_base - global dpu debug base structure
+ * @reg_base_list: list of register dumping regions
+ * @dev: device pointer
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @dbgbus_dpu: debug bus structure for the dpu
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ */
+static struct dpu_dbg_base {
+ struct list_head reg_base_list;
+ struct device *dev;
+
+ struct work_struct dump_work;
+
+ struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
+ struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
+} dpu_dbg_base;
+
+static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & 0xFFF000))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+ { DBGBUS_SSPP0, 85, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+ { DBGBUS_SSPP1, 85, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* cursor 1 */
+ { DBGBUS_SSPP1, 80, 0 },
+ { DBGBUS_SSPP1, 80, 1 },
+ { DBGBUS_SSPP1, 80, 2 },
+ { DBGBUS_SSPP1, 80, 3 },
+ { DBGBUS_SSPP1, 80, 4 },
+ { DBGBUS_SSPP1, 80, 5 },
+ { DBGBUS_SSPP1, 80, 6 },
+ { DBGBUS_SSPP1, 80, 7 },
+
+ { DBGBUS_SSPP1, 81, 0 },
+ { DBGBUS_SSPP1, 81, 1 },
+ { DBGBUS_SSPP1, 81, 2 },
+ { DBGBUS_SSPP1, 81, 3 },
+ { DBGBUS_SSPP1, 81, 4 },
+ { DBGBUS_SSPP1, 81, 5 },
+ { DBGBUS_SSPP1, 81, 6 },
+ { DBGBUS_SSPP1, 81, 7 },
+
+ { DBGBUS_SSPP1, 82, 0 },
+ { DBGBUS_SSPP1, 82, 1 },
+ { DBGBUS_SSPP1, 82, 2 },
+ { DBGBUS_SSPP1, 82, 3 },
+ { DBGBUS_SSPP1, 82, 4 },
+ { DBGBUS_SSPP1, 82, 5 },
+ { DBGBUS_SSPP1, 82, 6 },
+ { DBGBUS_SSPP1, 82, 7 },
+
+ { DBGBUS_SSPP1, 83, 0 },
+ { DBGBUS_SSPP1, 83, 1 },
+ { DBGBUS_SSPP1, 83, 2 },
+ { DBGBUS_SSPP1, 83, 3 },
+ { DBGBUS_SSPP1, 83, 4 },
+ { DBGBUS_SSPP1, 83, 5 },
+ { DBGBUS_SSPP1, 83, 6 },
+ { DBGBUS_SSPP1, 83, 7 },
+
+ { DBGBUS_SSPP1, 84, 0 },
+ { DBGBUS_SSPP1, 84, 1 },
+ { DBGBUS_SSPP1, 84, 2 },
+ { DBGBUS_SSPP1, 84, 3 },
+ { DBGBUS_SSPP1, 84, 4 },
+ { DBGBUS_SSPP1, 84, 5 },
+ { DBGBUS_SSPP1, 84, 6 },
+ { DBGBUS_SSPP1, 84, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 0},
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 0},
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 0},
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 0},
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 0},
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 0},
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 0},
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 0},
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 0},
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 0},
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 0},
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 0},
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 0},
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 0},
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 9, 0},
+ { DBGBUS_SSPP0, 9, 1},
+ { DBGBUS_SSPP0, 9, 3},
+ { DBGBUS_SSPP0, 29, 0},
+ { DBGBUS_SSPP0, 29, 1},
+ { DBGBUS_SSPP0, 29, 3},
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 9, 0},
+ { DBGBUS_SSPP1, 9, 1},
+ { DBGBUS_SSPP1, 9, 3},
+ { DBGBUS_SSPP1, 29, 0},
+ { DBGBUS_SSPP1, 29, 1},
+ { DBGBUS_SSPP1, 29, 3},
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ { DBGBUS_PERIPH, 60, 0},
+};
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 84, 1},
+ { DBGBUS_DSPP, 84, 2},
+ { DBGBUS_DSPP, 84, 3},
+ { DBGBUS_DSPP, 84, 4},
+ { DBGBUS_DSPP, 84, 5},
+ { DBGBUS_DSPP, 84, 6},
+ { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 85, 1},
+ { DBGBUS_DSPP, 85, 2},
+ { DBGBUS_DSPP, 85, 3},
+ { DBGBUS_DSPP, 85, 4},
+ { DBGBUS_DSPP, 85, 5},
+ { DBGBUS_DSPP, 85, 6},
+ { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 86, 1},
+ { DBGBUS_DSPP, 86, 2},
+ { DBGBUS_DSPP, 86, 3},
+ { DBGBUS_DSPP, 86, 4},
+ { DBGBUS_DSPP, 86, 5},
+ { DBGBUS_DSPP, 86, 6},
+ { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 87, 1},
+ { DBGBUS_DSPP, 87, 2},
+ { DBGBUS_DSPP, 87, 3},
+ { DBGBUS_DSPP, 87, 4},
+ { DBGBUS_DSPP, 87, 5},
+ { DBGBUS_DSPP, 87, 6},
+ { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 88, 1},
+ { DBGBUS_DSPP, 88, 2},
+ { DBGBUS_DSPP, 88, 3},
+ { DBGBUS_DSPP, 88, 4},
+ { DBGBUS_DSPP, 88, 5},
+ { DBGBUS_DSPP, 88, 6},
+ { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 89, 1},
+ { DBGBUS_DSPP, 89, 2},
+ { DBGBUS_DSPP, 89, 3},
+ { DBGBUS_DSPP, 89, 4},
+ { DBGBUS_DSPP, 89, 5},
+ { DBGBUS_DSPP, 89, 6},
+ { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 90, 1},
+ { DBGBUS_DSPP, 90, 2},
+ { DBGBUS_DSPP, 90, 3},
+ { DBGBUS_DSPP, 90, 4},
+ { DBGBUS_DSPP, 90, 5},
+ { DBGBUS_DSPP, 90, 6},
+ { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 91, 1},
+ { DBGBUS_DSPP, 91, 2},
+ { DBGBUS_DSPP, 91, 3},
+ { DBGBUS_DSPP, 91, 4},
+ { DBGBUS_DSPP, 91, 5},
+ { DBGBUS_DSPP, 91, 6},
+ { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 92, 1},
+ { DBGBUS_DSPP, 92, 2},
+ { DBGBUS_DSPP, 92, 3},
+ { DBGBUS_DSPP, 92, 4},
+ { DBGBUS_DSPP, 92, 5},
+ { DBGBUS_DSPP, 92, 6},
+ { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 93, 1},
+ { DBGBUS_DSPP, 93, 2},
+ { DBGBUS_DSPP, 93, 3},
+ { DBGBUS_DSPP, 93, 4},
+ { DBGBUS_DSPP, 93, 5},
+ { DBGBUS_DSPP, 93, 6},
+ { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 94, 1},
+ { DBGBUS_DSPP, 94, 2},
+ { DBGBUS_DSPP, 94, 3},
+ { DBGBUS_DSPP, 94, 4},
+ { DBGBUS_DSPP, 94, 5},
+ { DBGBUS_DSPP, 94, 6},
+ { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 95, 1},
+ { DBGBUS_DSPP, 95, 2},
+ { DBGBUS_DSPP, 95, 3},
+ { DBGBUS_DSPP, 95, 4},
+ { DBGBUS_DSPP, 95, 5},
+ { DBGBUS_DSPP, 95, 6},
+ { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM5 */
+ { DBGBUS_DSPP, 110, 1},
+ { DBGBUS_DSPP, 110, 2},
+ { DBGBUS_DSPP, 110, 3},
+ { DBGBUS_DSPP, 110, 4},
+ { DBGBUS_DSPP, 110, 5},
+ { DBGBUS_DSPP, 110, 6},
+ { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 111, 1},
+ { DBGBUS_DSPP, 111, 2},
+ { DBGBUS_DSPP, 111, 3},
+ { DBGBUS_DSPP, 111, 4},
+ { DBGBUS_DSPP, 111, 5},
+ { DBGBUS_DSPP, 111, 6},
+ { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 112, 1},
+ { DBGBUS_DSPP, 112, 2},
+ { DBGBUS_DSPP, 112, 3},
+ { DBGBUS_DSPP, 112, 4},
+ { DBGBUS_DSPP, 112, 5},
+ { DBGBUS_DSPP, 112, 6},
+ { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 113, 1},
+ { DBGBUS_DSPP, 113, 2},
+ { DBGBUS_DSPP, 113, 3},
+ { DBGBUS_DSPP, 113, 4},
+ { DBGBUS_DSPP, 113, 5},
+ { DBGBUS_DSPP, 113, 6},
+ { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 114, 1},
+ { DBGBUS_DSPP, 114, 2},
+ { DBGBUS_DSPP, 114, 3},
+ { DBGBUS_DSPP, 114, 4},
+ { DBGBUS_DSPP, 114, 5},
+ { DBGBUS_DSPP, 114, 6},
+ { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 115, 1},
+ { DBGBUS_DSPP, 115, 2},
+ { DBGBUS_DSPP, 115, 3},
+ { DBGBUS_DSPP, 115, 4},
+ { DBGBUS_DSPP, 115, 5},
+ { DBGBUS_DSPP, 115, 6},
+ { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 116, 1},
+ { DBGBUS_DSPP, 116, 2},
+ { DBGBUS_DSPP, 116, 3},
+ { DBGBUS_DSPP, 116, 4},
+ { DBGBUS_DSPP, 116, 5},
+ { DBGBUS_DSPP, 116, 6},
+ { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 117, 1},
+ { DBGBUS_DSPP, 117, 2},
+ { DBGBUS_DSPP, 117, 3},
+ { DBGBUS_DSPP, 117, 4},
+ { DBGBUS_DSPP, 117, 5},
+ { DBGBUS_DSPP, 117, 6},
+ { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 118, 1},
+ { DBGBUS_DSPP, 118, 2},
+ { DBGBUS_DSPP, 118, 3},
+ { DBGBUS_DSPP, 118, 4},
+ { DBGBUS_DSPP, 118, 5},
+ { DBGBUS_DSPP, 118, 6},
+ { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 119, 1},
+ { DBGBUS_DSPP, 119, 2},
+ { DBGBUS_DSPP, 119, 3},
+ { DBGBUS_DSPP, 119, 4},
+ { DBGBUS_DSPP, 119, 5},
+ { DBGBUS_DSPP, 119, 6},
+ { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 120, 1},
+ { DBGBUS_DSPP, 120, 2},
+ { DBGBUS_DSPP, 120, 3},
+ { DBGBUS_DSPP, 120, 4},
+ { DBGBUS_DSPP, 120, 5},
+ { DBGBUS_DSPP, 120, 6},
+ { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ /* intf0-3 */
+ { DBGBUS_PERIPH, 0, 0},
+ { DBGBUS_PERIPH, 1, 0},
+ { DBGBUS_PERIPH, 2, 0},
+ { DBGBUS_PERIPH, 3, 0},
+
+ /* te counter wrapper */
+ { DBGBUS_PERIPH, 60, 0},
+
+ /* dsc0 */
+ { DBGBUS_PERIPH, 47, 0},
+ { DBGBUS_PERIPH, 47, 1},
+ { DBGBUS_PERIPH, 47, 2},
+ { DBGBUS_PERIPH, 47, 3},
+ { DBGBUS_PERIPH, 47, 4},
+ { DBGBUS_PERIPH, 47, 5},
+ { DBGBUS_PERIPH, 47, 6},
+ { DBGBUS_PERIPH, 47, 7},
+
+ /* dsc1 */
+ { DBGBUS_PERIPH, 48, 0},
+ { DBGBUS_PERIPH, 48, 1},
+ { DBGBUS_PERIPH, 48, 2},
+ { DBGBUS_PERIPH, 48, 3},
+ { DBGBUS_PERIPH, 48, 4},
+ { DBGBUS_PERIPH, 48, 5},
+ { DBGBUS_PERIPH, 48, 6},
+ { DBGBUS_PERIPH, 48, 7},
+
+ /* dsc2 */
+ { DBGBUS_PERIPH, 51, 0},
+ { DBGBUS_PERIPH, 51, 1},
+ { DBGBUS_PERIPH, 51, 2},
+ { DBGBUS_PERIPH, 51, 3},
+ { DBGBUS_PERIPH, 51, 4},
+ { DBGBUS_PERIPH, 51, 5},
+ { DBGBUS_PERIPH, 51, 6},
+ { DBGBUS_PERIPH, 51, 7},
+
+ /* dsc3 */
+ { DBGBUS_PERIPH, 52, 0},
+ { DBGBUS_PERIPH, 52, 1},
+ { DBGBUS_PERIPH, 52, 2},
+ { DBGBUS_PERIPH, 52, 3},
+ { DBGBUS_PERIPH, 52, 4},
+ { DBGBUS_PERIPH, 52, 5},
+ { DBGBUS_PERIPH, 52, 6},
+ { DBGBUS_PERIPH, 52, 7},
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* cdwn */
+ { DBGBUS_PERIPH, 80, 0},
+ { DBGBUS_PERIPH, 80, 1},
+ { DBGBUS_PERIPH, 80, 2},
+
+ { DBGBUS_PERIPH, 81, 0},
+ { DBGBUS_PERIPH, 81, 1},
+ { DBGBUS_PERIPH, 81, 2},
+
+ { DBGBUS_PERIPH, 82, 0},
+ { DBGBUS_PERIPH, 82, 1},
+ { DBGBUS_PERIPH, 82, 2},
+ { DBGBUS_PERIPH, 82, 3},
+ { DBGBUS_PERIPH, 82, 4},
+ { DBGBUS_PERIPH, 82, 5},
+ { DBGBUS_PERIPH, 82, 6},
+ { DBGBUS_PERIPH, 82, 7},
+
+ /* hdmi */
+ { DBGBUS_PERIPH, 68, 0},
+ { DBGBUS_PERIPH, 68, 1},
+ { DBGBUS_PERIPH, 68, 2},
+ { DBGBUS_PERIPH, 68, 3},
+ { DBGBUS_PERIPH, 68, 4},
+ { DBGBUS_PERIPH, 68, 5},
+
+ /* edp */
+ { DBGBUS_PERIPH, 69, 0},
+ { DBGBUS_PERIPH, 69, 1},
+ { DBGBUS_PERIPH, 69, 2},
+ { DBGBUS_PERIPH, 69, 3},
+ { DBGBUS_PERIPH, 69, 4},
+ { DBGBUS_PERIPH, 69, 5},
+
+ /* dsi0 */
+ { DBGBUS_PERIPH, 70, 0},
+ { DBGBUS_PERIPH, 70, 1},
+ { DBGBUS_PERIPH, 70, 2},
+ { DBGBUS_PERIPH, 70, 3},
+ { DBGBUS_PERIPH, 70, 4},
+ { DBGBUS_PERIPH, 70, 5},
+
+ /* dsi1 */
+ { DBGBUS_PERIPH, 71, 0},
+ { DBGBUS_PERIPH, 71, 1},
+ { DBGBUS_PERIPH, 71, 2},
+ { DBGBUS_PERIPH, 71, 3},
+ { DBGBUS_PERIPH, 71, 4},
+ { DBGBUS_PERIPH, 71, 5},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+ {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+ {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+ {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _dpu_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _dpu_dbg_enable_power(int enable)
+{
+ if (enable)
+ pm_runtime_get_sync(dpu_dbg_base.dev);
+ else
+ pm_runtime_put_sync(dpu_dbg_base.dev);
+}
+
+static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 status = 0;
+ struct dpu_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int list_size;
+ int i;
+ u32 offset;
+ void __iomem *mem_base = NULL;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base + bus->top_blk_off;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dump_mem = &bus->cmn.dumped_content;
+
+ /* will keep in memory 4 entries of 4 bytes each */
+ list_size = (bus->cmn.entries_size * 4 * 4);
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+ for (i = 0; i < bus->cmn.entries_size; i++) {
+ head = bus->entries + i;
+ writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+ mem_base + head->wr_addr);
+ wmb(); /* make sure test bits were written */
+
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
+ offset = DBGBUS_DSPP_STATUS;
+ /* keep DSPP test point enabled */
+ if (head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+ } else {
+ offset = head->wr_addr + 0x4;
+ }
+
+ status = readl_relaxed(mem_base + offset);
+
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id,
+ head->test_id, status);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = head->wr_addr;
+ dump_addr[i*4 + 1] = head->block_id;
+ dump_addr[i*4 + 2] = head->test_id;
+ dump_addr[i*4 + 3] = status;
+ }
+
+ if (head->analyzer)
+ head->analyzer(mem_base, head, status);
+
+ /* Disable debug bus once we are done */
+ writel_relaxed(0, mem_base + head->wr_addr);
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
+ head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
+ }
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+static void _dpu_dbg_dump_vbif_debug_bus_entry(
+ struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+ u32 *dump_addr, bool in_log)
+{
+ int i, j;
+ u32 val;
+
+ if (!dump_addr && !in_log)
+ return;
+
+ for (i = 0; i < head->block_cnt; i++) {
+ writel_relaxed(1 << (i + head->bit_offset),
+ mem_base + head->block_bus_addr);
+ /* make sure that current bus blcok enable */
+ wmb();
+ for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+ writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+ /* make sure that test point is enabled */
+ wmb();
+ val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+ if (dump_addr) {
+ *dump_addr++ = head->block_bus_addr;
+ *dump_addr++ = i;
+ *dump_addr++ = j;
+ *dump_addr++ = val;
+ }
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+ head->block_bus_addr, i, j, val);
+ }
+ }
+}
+
+static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 value, d0, d1;
+ unsigned long reg, reg1, reg2;
+ struct vbif_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int i, list_size = 0;
+ void __iomem *mem_base = NULL;
+ struct vbif_debug_bus_entry *dbg_bus;
+ u32 bus_size;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dbg_bus = bus->entries;
+ bus_size = bus->cmn.entries_size;
+ list_size = bus->cmn.entries_size;
+ dump_mem = &bus->cmn.dumped_content;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+ return;
+
+ /* allocate memory for each test point */
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+ list_size += (head->block_cnt * head->test_pnt_cnt);
+ }
+
+ /* 4 bytes * 4 entries for each test point*/
+ list_size *= 16;
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+
+ value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+ writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+ /* make sure that vbif core is on */
+ wmb();
+
+ /**
+ * Extract VBIF error info based on XIN halt and error status.
+ * If the XIN client is not in HALT state, or an error is detected,
+ * then retrieve the VBIF error info for it.
+ */
+ reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+ reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+ reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+ dev_err(dpu_dbg_base.dev,
+ "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+ reg, reg1, reg2);
+ reg >>= 16;
+ reg &= ~(reg1 | reg2);
+ for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+ if (!test_bit(0, &reg)) {
+ writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+ /* make sure reg write goes through */
+ wmb();
+
+ d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+ d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+ dev_err(dpu_dbg_base.dev,
+ "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+ i, d0, d1);
+ }
+ reg >>= 1;
+ }
+
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+
+ writel_relaxed(0, mem_base + head->disable_bus_addr);
+ writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+ /* make sure that other bus is off */
+ wmb();
+
+ _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+ in_log);
+ if (dump_addr)
+ dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+ }
+
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+/**
+ * _dpu_dump_array - dump array of register bases
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_dpu: whether to dump the dpu debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (dump_dbgbus_dpu)
+ _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
+
+ if (dump_dbgbus_vbif_rt)
+ _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
+}
+
+/**
+ * _dpu_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _dpu_dump_work(struct work_struct *work)
+{
+ _dpu_dump_array("dpudump_workitem",
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+}
+
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (queue_work && work_pending(&dpu_dbg_base.dump_work))
+ return;
+
+ if (!queue_work) {
+ _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
+ return;
+ }
+
+ /* schedule work to dump later */
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+ dump_dbgbus_vbif_rt;
+ schedule_work(&dpu_dbg_base.dump_work);
+}
+
+/*
+ * dpu_dbg_debugfs_open - debugfs open handler for debug dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+/**
+ * dpu_dbg_dump_write - debugfs write handler for debug dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t dpu_dbg_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ _dpu_dump_array("dump_debugfs", true, true);
+ return count;
+}
+
+static const struct file_operations dpu_dbg_dump_fops = {
+ .open = dpu_dbg_debugfs_open,
+ .write = dpu_dbg_dump_write,
+};
+
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+ char debug_name[80] = "";
+
+ if (!debugfs_root)
+ return -EINVAL;
+
+ debugfs_create_file("dump", 0600, debugfs_root, NULL,
+ &dpu_dbg_dump_fops);
+
+ if (dbg->dbgbus_dpu.entries) {
+ dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_dpu.cmn.name);
+ dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_dpu.cmn.enable_mask);
+ }
+
+ if (dbg->dbgbus_vbif_rt.entries) {
+ dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_vbif_rt.cmn.name);
+ dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_vbif_rt.cmn.enable_mask);
+ }
+
+ return 0;
+}
+
+static void _dpu_dbg_debugfs_destroy(void)
+{
+}
+
+void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+
+ memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
+ memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+ if (IS_MSM8998_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
+ dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
+ dbg->dbgbus_dpu.cmn.entries_size =
+ ARRAY_SIZE(dbg_bus_dpu_sdm845);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ /* vbif is unchanged vs 8998 */
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else {
+ pr_err("unsupported chipset id %X\n", hwversion);
+ }
+}
+
+int dpu_dbg_init(struct device *dev)
+{
+ if (!dev) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
+ dpu_dbg_base.dev = dev;
+
+ INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
+
+ return 0;
+}
+
+/**
+ * dpu_dbg_destroy - destroy dpu debug facilities
+ */
+void dpu_dbg_destroy(void)
+{
+ _dpu_dbg_debugfs_destroy();
+}
+
+void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+ dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
new file mode 100644
index 000000000000..1e6fa945f98b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DPU_DBG_H_
+#define DPU_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+enum dpu_dbg_dump_flag {
+ DPU_DBG_DUMP_IN_LOG = BIT(0),
+ DPU_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion: Chipset revision
+ */
+void dpu_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * dpu_dbg_init - initialize global dpu debug facilities: regdump
+ * @dev: device handle
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_init(struct device *dev);
+
+/**
+ * dpu_dbg_debugfs_register - register entries at the given debugfs dir
+ * @debugfs_root: debugfs root in which to create dpu debug entries
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
+
+/**
+ * dpu_dbg_destroy - destroy the global dpu debug facilities
+ * Returns: none
+ */
+void dpu_dbg_destroy(void);
+
+/**
+ * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
+ * @queue_work: whether to queue the dumping work to the work_struct
+ * @name: string indicating origin of dump
+ * @dump_dbgbus: dump the dpu debug bus
+ * @dump_vbif_rt: dump the vbif rt bus
+ * Returns: none
+ */
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt);
+
+/**
+ * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
+ * address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void dpu_dbg_set_dpu_top_offset(u32 blk_off);
+
+#else
+
+static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int dpu_dbg_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ return 0;
+}
+
+static inline void dpu_dbg_destroy(void)
+{
+}
+
+static inline void dpu_dbg_dump(bool queue_work, const char *name,
+ bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
+{
+}
+
+static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
new file mode 100644
index 000000000000..0bd3eda93e22
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -0,0 +1,2498 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_formats.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_crtc.h"
+#include "dpu_trace.h"
+#include "dpu_core_irq.h"
+
+#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+#define MISR_BUFF_SIZE 256
+
+#define IDLE_SHORT_TIMEOUT 1
+
+#define MAX_VDISPLAY_SPLIT 1080
+
+/**
+ * enum dpu_enc_rc_events - events for resource control state machine
+ * @DPU_ENC_RC_EVENT_KICKOFF:
+ * This event happens at NORMAL priority.
+ * Event that signals the start of the transfer. When this event is
+ * received, enable MDP/DSI core clocks. Regardless of the previous
+ * state, the resource should be in ON state at the end of this event.
+ * @DPU_ENC_RC_EVENT_FRAME_DONE:
+ * This event happens at INTERRUPT level.
+ * Event signals the end of the data transfer after the PP FRAME_DONE
+ * event. At the end of this event, a delayed work is scheduled to go to
+ * IDLE_PC state after IDLE_TIMEOUT time.
+ * @DPU_ENC_RC_EVENT_PRE_STOP:
+ * This event happens at NORMAL priority.
+ * This event, when received during the ON state, leave the RC STATE
+ * in the PRE_OFF state. It should be followed by the STOP event as
+ * part of encoder disable.
+ * If received during IDLE or OFF states, it will do nothing.
+ * @DPU_ENC_RC_EVENT_STOP:
+ * This event happens at NORMAL priority.
+ * When this event is received, disable all the MDP/DSI core clocks, and
+ * disable IRQs. It should be called from the PRE_OFF or IDLE states.
+ * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
+ * PRE_OFF is expected when PRE_STOP was executed during the ON state.
+ * Resource state should be in OFF at the end of the event.
+ * @DPU_ENC_RC_EVENT_ENTER_IDLE:
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ * This would disable MDP/DSI core clocks and change the resource state
+ * to IDLE.
+ */
+enum dpu_enc_rc_events {
+ DPU_ENC_RC_EVENT_KICKOFF = 1,
+ DPU_ENC_RC_EVENT_FRAME_DONE,
+ DPU_ENC_RC_EVENT_PRE_STOP,
+ DPU_ENC_RC_EVENT_STOP,
+ DPU_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum dpu_enc_rc_states - states that the resource control maintains
+ * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
+ * @DPU_ENC_RC_STATE_ON: Resource is in ON state
+ * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
+ * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum dpu_enc_rc_states {
+ DPU_ENC_RC_STATE_OFF,
+ DPU_ENC_RC_STATE_PRE_OFF,
+ DPU_ENC_RC_STATE_ON,
+ DPU_ENC_RC_STATE_IDLE
+};
+
+/**
+ * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
+ * encoders. Virtual encoder manages one "logical" display. Physical
+ * encoders manage one intf block, tied to a specific panel/sub-panel.
+ * Virtual encoder defers as much as possible to the physical encoders.
+ * Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client: Client handle to the bus scaling interface
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+ * Only valid after enable. Cleared as disable.
+ * @hw_pp Handle to the pingpong blocks used for the display. No.
+ * pingpong blocks can be different than num_phys_encs.
+ * @intfs_swapped Whether or not the phys_enc interfaces have been swapped
+ * for partial update right-only cases, such as pingpong
+ * split where virtual pingpong does not generate IRQs
+ * @crtc_vblank_cb: Callback into the upper layer / CRTC for
+ * notification of the VBLANK
+ * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb: Callback into CRTC that will flush & start
+ * all CTL paths
+ * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root: Debug file system root file node
+ * @enc_lock: Lock around physical encoder create/destroy and
+ access.
+ * @frame_busy_mask: Bitmask tracking which phys_enc we are still
+ * busy processing current command.
+ * Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb: callback handler for frame event
+ * @crtc_frame_event_cb_data: callback handler private data
+ * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timer: watchdog timer for frame done event
+ * @vsync_event_timer: vsync timer
+ * @disp_info: local copy of msm_display_info struct
+ * @misr_enable: misr enable/disable status
+ * @misr_frame_count: misr frame count before start capturing the data
+ * @idle_pc_supported: indicate if idle power collaps is supported
+ * @rc_lock: resource control mutex lock to protect
+ * virt encoder over various state changes
+ * @rc_state: resource controller state
+ * @delayed_off_work: delayed worker to schedule disabling of
+ * clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work: worker to handle vsync event for autorefresh
+ * @topology: topology of the display
+ * @mode_set_complete: flag to indicate modeset completion
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+struct dpu_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t enc_spinlock;
+ uint32_t bus_scaling_client;
+
+ uint32_t display_num_of_h_tiles;
+
+ unsigned int num_phys_encs;
+ struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+ struct dpu_encoder_phys *cur_master;
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+ bool intfs_swapped;
+
+ void (*crtc_vblank_cb)(void *);
+ void *crtc_vblank_cb_data;
+
+ struct dentry *debugfs_root;
+ struct mutex enc_lock;
+ DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+ void (*crtc_frame_event_cb)(void *, u32 event);
+ void *crtc_frame_event_cb_data;
+
+ atomic_t frame_done_timeout;
+ struct timer_list frame_done_timer;
+ struct timer_list vsync_event_timer;
+
+ struct msm_display_info disp_info;
+ bool misr_enable;
+ u32 misr_frame_count;
+
+ bool idle_pc_supported;
+ struct mutex rc_lock;
+ enum dpu_enc_rc_states rc_state;
+ struct kthread_delayed_work delayed_off_work;
+ struct kthread_work vsync_event_work;
+ struct msm_display_topology topology;
+ bool mode_set_complete;
+
+ u32 idle_timeout;
+};
+
+#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
+ bool enable)
+{
+ struct drm_encoder *drm_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu enc\n");
+ return -EINVAL;
+ }
+
+ drm_enc = &dpu_enc->base;
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_ERROR);
+}
+
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+ int32_t hw_id, struct dpu_encoder_wait_info *info);
+
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info)
+{
+ struct dpu_encoder_irq *irq;
+ u32 irq_status;
+ int ret;
+
+ if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* note: do master / slave checking outside */
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return -EWOULDBLOCK;
+ }
+
+ if (irq->irq_idx < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->name);
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+
+ ret = dpu_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ irq->hw_idx,
+ wait_info);
+
+ if (ret <= 0) {
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
+ irq->irq_idx, true);
+ if (irq_status) {
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ local_irq_save(flags);
+ irq->cb.func(phys_enc, irq->irq_idx);
+ local_irq_restore(flags);
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+ } else {
+ ret = 0;
+ trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+ intr_idx, irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+
+ return ret;
+}
+
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret = 0;
+
+ if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ if (irq->irq_idx >= 0) {
+ DPU_DEBUG_PHYS(phys_enc,
+ "skipping already registered irq %s type %d\n",
+ irq->name, irq->intr_type);
+ return 0;
+ }
+
+ irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
+ irq->intr_type, irq->hw_idx);
+ if (irq->irq_idx < 0) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to lookup IRQ index for %s type:%d\n",
+ irq->name, irq->intr_type);
+ return -EINVAL;
+ }
+
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to register IRQ callback for %s\n",
+ irq->name);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ irq->irq_idx, &irq->cb);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ return ret;
+}
+
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* silently skip irqs that weren't registered */
+ if (irq->irq_idx < 0) {
+ DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return 0;
+ }
+
+ ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("diable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ irq->irq_idx = -EINVAL;
+
+ return 0;
+}
+
+void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!hw_res || !drm_enc || !conn_state) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+ drm_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /* Query resources used by phys encs, expected to be without overlap */
+ memset(hw_res, 0, sizeof(*hw_res));
+ hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.get_hw_resources)
+ phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ }
+}
+
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ mutex_lock(&dpu_enc->enc_lock);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.destroy) {
+ phys->ops.destroy(phys);
+ --dpu_enc->num_phys_encs;
+ dpu_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (dpu_enc->num_phys_encs)
+ DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
+ dpu_enc->num_phys_encs);
+ dpu_enc->num_phys_encs = 0;
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ drm_encoder_cleanup(drm_enc);
+ mutex_destroy(&dpu_enc->enc_lock);
+
+ kfree(dpu_enc);
+}
+
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct split_pipe_cfg cfg = { 0 };
+ struct dpu_hw_mdp *hw_mdptop;
+ struct msm_display_info *disp_info;
+
+ if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ hw_mdptop = phys_enc->hw_mdptop;
+ disp_info = &dpu_enc->disp_info;
+
+ if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+ return;
+
+ /**
+ * disable split modes since encoder will be operating in as the only
+ * encoder, either for the entire use case in the case of, for example,
+ * single DSI, or for this frame in the case of left/right only partial
+ * update.
+ */
+ if (phys_enc->split_role == ENC_ROLE_SOLO) {
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ return;
+ }
+
+ cfg.en = true;
+ cfg.mode = phys_enc->intf_mode;
+ cfg.intf = interface;
+
+ if (cfg.en && phys_enc->ops.needs_single_flush &&
+ phys_enc->ops.needs_single_flush(phys_enc))
+ cfg.split_flush_en = true;
+
+ if (phys_enc->split_role == ENC_ROLE_MASTER) {
+ DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
+
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ }
+}
+
+static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode)
+{
+ struct drm_display_mode *cur_mode;
+
+ if (!connector || !adj_mode)
+ return;
+
+ list_for_each_entry(cur_mode, &connector->modes, head) {
+ if (cur_mode->vdisplay == adj_mode->vdisplay &&
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ cur_mode->vrefresh == adj_mode->vrefresh) {
+ adj_mode->private = cur_mode->private;
+ adj_mode->private_flags |= cur_mode->private_flags;
+ }
+ }
+}
+
+static struct msm_display_topology dpu_encoder_get_topology(
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct drm_display_mode *mode)
+{
+ struct msm_display_topology topology;
+ int i, intf_count = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* User split topology for width > 1080 */
+ topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+ topology.num_enc = 0;
+ topology.num_intf = intf_count;
+
+ return topology;
+}
+static int dpu_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+ struct msm_display_topology topology;
+ int i = 0;
+ int ret = 0;
+
+ if (!drm_enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+ drm_enc != 0, crtc_state != 0, conn_state != 0);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ mode = &crtc_state->mode;
+ adj_mode = &crtc_state->adjusted_mode;
+ trace_dpu_enc_atomic_check(DRMID(drm_enc));
+
+ /*
+ * display drivers may populate private fields of the drm display mode
+ * structure while registering possible modes of a connector with DRM.
+ * These private fields are not populated back while DRM invokes
+ * the mode_set callbacks. This module retrieves and populates the
+ * private fields of the given mode.
+ */
+ _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
+
+ /* perform atomic check on the first physical encoder (master) */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.atomic_check)
+ ret = phys->ops.atomic_check(phys, crtc_state,
+ conn_state);
+ else if (phys && phys->ops.mode_fixup)
+ if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+ ret = -EINVAL;
+
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "mode unsupported, phys idx %d\n", i);
+ break;
+ }
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ if (!ret) {
+ /*
+ * Avoid reserving resources when mode set is pending. Topology
+ * info may not be available to complete reservation.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)
+ && dpu_enc->mode_set_complete) {
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
+ conn_state, topology, true);
+ dpu_enc->mode_set_complete = false;
+ }
+ }
+
+ if (!ret)
+ drm_mode_set_crtcinfo(adj_mode, 0);
+
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
+ adj_mode->private_flags);
+
+ return ret;
+}
+
+static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
+ struct msm_display_info *disp_info)
+{
+ struct dpu_vsync_source_cfg vsync_cfg = { 0 };
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct drm_encoder *drm_enc;
+ int i;
+
+ if (!dpu_enc || !disp_info) {
+ DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
+ dpu_enc != NULL, disp_info != NULL);
+ return;
+ } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
+ DPU_ERROR("invalid num phys enc %d/%d\n",
+ dpu_enc->num_phys_encs,
+ (int) ARRAY_SIZE(dpu_enc->hw_pp));
+ return;
+ }
+
+ drm_enc = &dpu_enc->base;
+ /* this pointers are checked in virt_enable_helper */
+ priv = drm_enc->dev->dev_private;
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ hw_mdptop = dpu_kms->hw_mdp;
+ if (!hw_mdptop) {
+ DPU_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ if (hw_mdptop->ops.setup_vsync_source &&
+ disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
+
+ vsync_cfg.pp_count = dpu_enc->num_phys_encs;
+ if (disp_info->is_te_using_watchdog_timer)
+ vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
+ else
+ vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
+
+ hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+ }
+}
+
+static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.irq_control)
+ phys->ops.irq_control(phys, enable);
+ }
+
+}
+
+static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+ bool enable)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("encoder master not set\n");
+ return;
+ }
+
+ if (enable) {
+ /* enable DPU core clks */
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* enable all the irq */
+ _dpu_encoder_irq_control(drm_enc, true);
+
+ } else {
+ /* disable all the irq */
+ _dpu_encoder_irq_control(drm_enc, false);
+
+ /* disable DPU core clks */
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
+
+}
+
+static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
+ u32 sw_event)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *disp_thread;
+ bool is_vid_mode = false;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ is_vid_mode = dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return -EINVAL;
+ }
+ disp_thread = &priv->disp_thread[drm_enc->crtc->index];
+
+ /*
+ * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
+ * events and return early for other events (ie wb display).
+ */
+ if (!dpu_enc->idle_pc_supported &&
+ (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
+ sw_event != DPU_ENC_RC_EVENT_STOP &&
+ sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
+ return 0;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+ dpu_enc->rc_state, "begin");
+
+ switch (sw_event) {
+ case DPU_ENC_RC_EVENT_KICKOFF:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in ON state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
+ dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
+ _dpu_encoder_irq_control(drm_enc, true);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, true);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "kickoff");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_FRAME_DONE:
+ /*
+ * mutex lock is not used as this event happens at interrupt
+ * context. And locking is not required as, the other events
+ * like KICKOFF and STOP does a wait-for-idle before executing
+ * the resource_control
+ */
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ return -EINVAL;
+ }
+
+ /*
+ * schedule off work item only when there are no
+ * frames pending
+ */
+ if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
+ DRM_DEBUG_KMS("id:%d skip schedule work\n",
+ DRMID(drm_enc));
+ return 0;
+ }
+
+ kthread_queue_delayed_work(
+ &disp_thread->worker,
+ &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "frame done");
+ break;
+
+ case DPU_ENC_RC_EVENT_PRE_STOP:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (is_vid_mode &&
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ _dpu_encoder_irq_control(drm_enc, true);
+ }
+ /* skip if is already OFF or IDLE, resources are off already */
+ else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "pre stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_STOP:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in OFF state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
+ DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ /**
+ * expect to arrive here only if in either idle state or pre-off
+ * and in IDLE state the resources are already disabled
+ */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_ENTER_IDLE:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * if we are in ON but a frame was just kicked off,
+ * ignore the IDLE event, it's probably a stale timer event
+ */
+ if (dpu_enc->frame_busy_mask[0]) {
+ DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ if (is_vid_mode)
+ _dpu_encoder_irq_control(drm_enc, false);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "idle");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ default:
+ DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+ sw_event);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "error");
+ break;
+ }
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "end");
+ return 0;
+}
+
+static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct list_head *connector_list;
+ struct drm_connector *conn = NULL, *conn_iter;
+ struct dpu_rm_hw_iter pp_iter;
+ struct msm_display_topology topology;
+ enum dpu_rm_topology_name topology_name;
+ int i = 0, ret;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ connector_list = &dpu_kms->dev->mode_config.connector_list;
+
+ trace_dpu_enc_mode_set(DRMID(drm_enc));
+
+ list_for_each_entry(conn_iter, connector_list, head)
+ if (conn_iter->encoder == drm_enc)
+ conn = conn_iter;
+
+ if (!conn) {
+ DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
+ return;
+ } else if (!conn->state) {
+ DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
+ return;
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+ conn->state, topology, false);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "failed to reserve hw resources, %d\n", ret);
+ return;
+ }
+
+ dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ dpu_enc->hw_pp[i] = NULL;
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ break;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ }
+
+ topology_name = dpu_rm_get_topology_name(topology);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid pingpong block for the encoder\n");
+ return;
+ }
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->connector = conn->state->connector;
+ phys->topology_name = topology_name;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
+ }
+ }
+
+ dpu_enc->mode_set_complete = true;
+}
+
+static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ if (!dpu_enc || !dpu_enc->cur_master) {
+ DPU_ERROR("invalid dpu encoder/master\n");
+ return;
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+ dpu_enc->cur_master->hw_mdptop);
+
+ if (dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+ dpu_enc->cur_master->hw_mdptop,
+ dpu_kms->catalog);
+
+ _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+}
+
+void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
+ phys->ops.restore(phys);
+ }
+
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
+ dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+ struct drm_display_mode *cur_mode = NULL;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+ trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+ cur_mode->vdisplay);
+
+ dpu_enc->cur_master = NULL;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
+ DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
+ dpu_enc->cur_master = phys;
+ break;
+ }
+ }
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
+ return;
+ }
+
+ ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
+ ret);
+ return;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ if (phys != dpu_enc->cur_master) {
+ if (phys->ops.enable)
+ phys->ops.enable(phys);
+ }
+
+ if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
+ phys->ops.setup_misr(phys, true,
+ dpu_enc->misr_frame_count);
+ }
+
+ if (dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode *mode;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ } else if (!drm_enc->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_disable(DRMID(drm_enc));
+
+ /* wait for idle */
+ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.disable)
+ phys->ops.disable(phys);
+ }
+
+ /* after phys waits for frame-done, should be no more frames pending */
+ if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+ del_timer_sync(&dpu_enc->frame_done_timer);
+ }
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i])
+ dpu_enc->phys_encs[i]->connector = NULL;
+ }
+
+ dpu_enc->cur_master = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+
+ dpu_rm_release(&dpu_kms->rm, drm_enc);
+}
+
+static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
+ }
+
+ return INTF_MAX;
+}
+
+static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ unsigned long lock_flags;
+
+ if (!drm_enc || !phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_vblank_callback");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ if (dpu_enc->crtc_vblank_cb)
+ dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ atomic_inc(&phy_enc->vsync_cnt);
+ DPU_ATRACE_END("encoder_vblank_callback");
+}
+
+static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ if (!phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_underrun_callback");
+ atomic_inc(&phy_enc->underrun_cnt);
+ trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
+ DPU_ATRACE_END("encoder_underrun_callback");
+}
+
+void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+ void (*vbl_cb)(void *), void *vbl_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+ int i;
+
+ enable = vbl_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_vblank_cb = vbl_cb;
+ dpu_enc->crtc_vblank_cb_data = vbl_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.control_vblank_irq)
+ phys->ops.control_vblank_irq(phys, enable);
+ }
+}
+
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+ void (*frame_event_cb)(void *, u32 event),
+ void *frame_event_cb_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+
+ enable = frame_event_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_frame_event_cb = frame_event_cb;
+ dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *ready_phys, u32 event)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned int i;
+
+ if (event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ /**
+ * suppress frame_done without waiter,
+ * likely autorefresh
+ */
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
+ event, ready_phys->intf_idx);
+ return;
+ }
+
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] == ready_phys) {
+ clear_bit(i, dpu_enc->frame_busy_mask);
+ trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+ dpu_enc->frame_busy_mask[0]);
+ }
+ }
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ del_timer(&dpu_enc->frame_done_timer);
+
+ dpu_encoder_resource_control(drm_enc,
+ DPU_ENC_RC_EVENT_FRAME_DONE);
+
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data,
+ event);
+ }
+ } else {
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data, event);
+ }
+}
+
+static void dpu_encoder_off_work(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, delayed_off_work.work);
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ dpu_encoder_resource_control(&dpu_enc->base,
+ DPU_ENC_RC_EVENT_ENTER_IDLE);
+
+ dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
+ DPU_ENCODER_FRAME_EVENT_IDLE);
+}
+
+/**
+ * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+ struct dpu_hw_ctl *ctl;
+ int pending_kickoff_cnt;
+ u32 ret = UINT_MAX;
+
+ if (!drm_enc || !phys) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+ drm_enc != 0, phys != 0);
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ ctl = phys->hw_ctl;
+ if (!ctl || !ctl->ops.trigger_flush) {
+ DPU_ERROR("missing trigger cb\n");
+ return;
+ }
+
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+
+ if (extra_flush_bits && ctl->ops.update_pending_flush)
+ ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+ ctl->ops.trigger_flush(ctl);
+
+ if (ctl->ops.get_pending_flush)
+ ret = ctl->ops.get_pending_flush(ctl);
+
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
+ pending_kickoff_cnt, ctl->idx, ret);
+}
+
+/**
+ * _dpu_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+{
+ if (!phys) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
+ phys->ops.trigger_start(phys);
+}
+
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ ctl = phys_enc->hw_ctl;
+ if (ctl && ctl->ops.trigger_start) {
+ ctl->ops.trigger_start(ctl);
+ trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
+ }
+}
+
+static int dpu_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ int32_t hw_id,
+ struct dpu_encoder_wait_info *info)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+ s64 jiffies = msecs_to_jiffies(info->timeout_ms);
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(*(info->wq),
+ atomic_read(info->atomic_cnt) == 0, jiffies);
+ time = ktime_to_ms(ktime_get());
+
+ trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+ expected_time,
+ atomic_read(info->atomic_cnt));
+ /* If we timed out, counter is valid and time is less, wait again */
+ } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+ (time < expected_time));
+
+ return rc;
+}
+
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ ctl = phys_enc->hw_ctl;
+
+ if (!ctl || !ctl->ops.reset)
+ return;
+
+ DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+ ctl->idx);
+
+ rc = ctl->ops.reset(ctl);
+ if (rc) {
+ DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+/**
+ * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
+ * Iterate through the physical encoders and perform consolidated flush
+ * and/or control start triggering as needed. This is done in the virtual
+ * encoder rather than the individual physical ones in order to handle
+ * use cases that require visibility into multiple physical encoders at
+ * a time.
+ * dpu_enc: Pointer to virtual encoder structure
+ */
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ uint32_t i, pending_flush;
+ unsigned long lock_flags;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ pending_flush = 0x0;
+
+ /* update pending counts and trigger kickoff ctl flush atomically */
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
+ /* don't perform flush/start operations for slave encoders */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+ continue;
+
+ ctl = phys->hw_ctl;
+ if (!ctl)
+ continue;
+
+ if (phys->split_role != ENC_ROLE_SLAVE)
+ set_bit(i, dpu_enc->frame_busy_mask);
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ else if (ctl->ops.get_pending_flush)
+ pending_flush |= ctl->ops.get_pending_flush(ctl);
+ }
+
+ /* for split flush, combine pending flush masks and send to master */
+ if (pending_flush && dpu_enc->cur_master) {
+ _dpu_encoder_trigger_flush(
+ &dpu_enc->base,
+ dpu_enc->cur_master,
+ pending_flush);
+ }
+
+ _dpu_encoder_trigger_start(dpu_enc->cur_master);
+
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ unsigned int i;
+ struct dpu_hw_ctl *ctl;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->hw_ctl) {
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
+
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
+ }
+ }
+}
+
+static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
+ struct drm_display_mode *mode)
+{
+ u64 pclk_rate;
+ u32 pclk_period;
+ u32 line_time;
+
+ /*
+ * For linetime calculation, only operate on master encoder.
+ */
+ if (!dpu_enc->cur_master)
+ return 0;
+
+ if (!dpu_enc->cur_master->ops.get_line_count) {
+ DPU_ERROR("get_line_count function not defined\n");
+ return 0;
+ }
+
+ pclk_rate = mode->clock; /* pixel clock in kHz */
+ if (pclk_rate == 0) {
+ DPU_ERROR("pclk is 0, cannot calculate line time\n");
+ return 0;
+ }
+
+ pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+ if (pclk_period == 0) {
+ DPU_ERROR("pclk period is 0\n");
+ return 0;
+ }
+
+ /*
+ * Line time calculation based on Pixel clock and HTOTAL.
+ * Final unit is in ns.
+ */
+ line_time = (pclk_period * mode->htotal) / 1000;
+ if (line_time == 0) {
+ DPU_ERROR("line time calculation is 0\n");
+ return 0;
+ }
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+ pclk_rate, pclk_period, line_time);
+
+ return line_time;
+}
+
+static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
+ ktime_t *wakeup_time)
+{
+ struct drm_display_mode *mode;
+ struct dpu_encoder_virt *dpu_enc;
+ u32 cur_line;
+ u32 line_time;
+ u32 vtotal, time_to_vsync;
+ ktime_t cur_time;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (!drm_enc->crtc || !drm_enc->crtc->state) {
+ DPU_ERROR("crtc/crtc state object is NULL\n");
+ return -EINVAL;
+ }
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
+ if (!line_time)
+ return -EINVAL;
+
+ cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
+
+ vtotal = mode->vtotal;
+ if (cur_line >= vtotal)
+ time_to_vsync = line_time * vtotal;
+ else
+ time_to_vsync = line_time * (vtotal - cur_line);
+
+ if (time_to_vsync == 0) {
+ DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
+ vtotal);
+ return -EINVAL;
+ }
+
+ cur_time = ktime_get();
+ *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+ cur_line, vtotal, time_to_vsync,
+ ktime_to_ms(cur_time),
+ ktime_to_ms(*wakeup_time));
+ return 0;
+}
+
+static void dpu_encoder_vsync_event_handler(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ vsync_event_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return;
+ }
+ event_thread = &priv->event_thread[drm_enc->crtc->index];
+ if (!event_thread) {
+ DPU_ERROR("event_thread not found for crtc:%d\n",
+ drm_enc->crtc->index);
+ return;
+ }
+
+ del_timer(&dpu_enc->vsync_event_timer);
+}
+
+static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, vsync_event_work);
+ ktime_t wakeup_time;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
+ return;
+
+ trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ bool needs_hw_reset = false;
+ unsigned int i;
+
+ if (!drm_enc || !params) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
+
+ /* prepare for next kickoff, may include waiting on previous kickoff */
+ DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys) {
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys, params);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
+ }
+ }
+ DPU_ATRACE_END("enc_prepare_for_kickoff");
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+
+ /* if any phys needs reset, reset all phys, in-order */
+ if (needs_hw_reset) {
+ trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.hw_reset)
+ phys->ops.hw_reset(phys);
+ }
+ }
+}
+
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ ktime_t wakeup_time;
+ unsigned int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DPU_ATRACE_BEGIN("encoder_kickoff");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_kickoff(DRMID(drm_enc));
+
+ atomic_set(&dpu_enc->frame_done_timeout,
+ DPU_FRAME_DONE_TIMEOUT * 1000 /
+ drm_enc->crtc->state->adjusted_mode.vrefresh);
+ mod_timer(&dpu_enc->frame_done_timer, jiffies +
+ ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+
+ /* All phys encs are ready to go, trigger the kickoff */
+ _dpu_encoder_kickoff_phys(dpu_enc);
+
+ /* allow phys encs to handle any post-kickoff business */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.handle_post_kickoff)
+ phys->ops.handle_post_kickoff(phys);
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+ !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+ trace_dpu_enc_early_kickoff(DRMID(drm_enc),
+ ktime_to_ms(wakeup_time));
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+ }
+
+ DPU_ATRACE_END("encoder_kickoff");
+}
+
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.prepare_commit)
+ phys->ops.prepare_commit(phys);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_encoder_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_enc = s->private;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0,
+ atomic_read(&phys->vsync_cnt),
+ atomic_read(&phys->underrun_cnt));
+
+ switch (phys->intf_mode) {
+ case INTF_MODE_VIDEO:
+ seq_puts(s, "mode: video\n");
+ break;
+ case INTF_MODE_CMD:
+ seq_puts(s, "mode: command\n");
+ break;
+ default:
+ seq_puts(s, "mode: ???\n");
+ break;
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return 0;
+}
+
+static int _dpu_encoder_debugfs_status_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_encoder_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_encoder_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ size_t buff_copy;
+ u32 frame_count, enable;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy))
+ return -EINVAL;
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->misr_enable = enable;
+ dpu_enc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.setup_misr)
+ continue;
+
+ phys->ops.setup_misr(phys, enable, frame_count);
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_encoder_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+ int rc;
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ if (!dpu_enc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ } else if (dpu_enc->disp_info.capabilities &
+ ~MSM_DISPLAY_CAP_VID_MODE) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "unsupported\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.collect_misr)
+ continue;
+
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "Intf idx:%d\n", phys->intf_idx - INTF_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ phys->ops.collect_misr(phys));
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+ return len;
+}
+
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int i;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_encoder_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_encoder_misr_read,
+ .write = _dpu_encoder_misr_setup,
+ };
+
+ char name[DPU_NAME_SIZE];
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid encoder or kms\n");
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+ /* create overall sub-directory for the encoder */
+ dpu_enc->debugfs_root = debugfs_create_dir(name,
+ drm_enc->dev->primary->debugfs_root);
+ if (!dpu_enc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+
+ debugfs_create_file("misr_data", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ if (dpu_enc->phys_encs[i] &&
+ dpu_enc->phys_encs[i]->ops.late_register)
+ dpu_enc->phys_encs[i]->ops.late_register(
+ dpu_enc->phys_encs[i],
+ dpu_enc->debugfs_root);
+
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ if (!drm_enc)
+ return;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
+}
+#else
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+}
+#endif
+
+static int dpu_encoder_late_register(struct drm_encoder *encoder)
+{
+ return _dpu_encoder_init_debugfs(encoder);
+}
+
+static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+{
+ _dpu_encoder_destroy_debugfs(encoder);
+}
+
+static int dpu_encoder_virt_add_phys_encs(
+ u32 display_caps,
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_enc_phys_init_params *params)
+{
+ struct dpu_encoder_phys *enc = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /*
+ * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+ * in this function, check up-front.
+ */
+ if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+ ARRAY_SIZE(dpu_enc->phys_encs)) {
+ DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
+ dpu_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+ enc = dpu_encoder_phys_vid_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+ enc = dpu_encoder_phys_cmd_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ return 0;
+}
+
+static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
+ .handle_vblank_virt = dpu_encoder_vblank_callback,
+ .handle_underrun_virt = dpu_encoder_underrun_callback,
+ .handle_frame_done = dpu_encoder_frame_done_callback,
+};
+
+static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct msm_display_info *disp_info,
+ int *drm_enc_mode)
+{
+ int ret = 0;
+ int i = 0;
+ enum dpu_intf_type intf_type;
+ struct dpu_enc_phys_init_params phys_params;
+
+ if (!dpu_enc || !dpu_kms) {
+ DPU_ERROR("invalid arg(s), enc %d kms %d\n",
+ dpu_enc != 0, dpu_kms != 0);
+ return -EINVAL;
+ }
+
+ memset(&phys_params, 0, sizeof(phys_params));
+ phys_params.dpu_kms = dpu_kms;
+ phys_params.parent = &dpu_enc->base;
+ phys_params.parent_ops = &dpu_encoder_parent_ops;
+ phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
+
+ DPU_DEBUG("\n");
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+ *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ intf_type = INTF_DSI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_HDMI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_DP;
+ } else {
+ DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
+ return -EINVAL;
+ }
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+ DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+ if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
+ (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
+ dpu_enc->idle_pc_supported =
+ dpu_kms->catalog->caps->has_idle_pc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (disp_info->num_of_h_tiles > 1) {
+ if (i == 0)
+ phys_params.split_role = ENC_ROLE_MASTER;
+ else
+ phys_params.split_role = ENC_ROLE_SLAVE;
+ } else {
+ phys_params.split_role = ENC_ROLE_SOLO;
+ }
+
+ DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+ i, controller_id, phys_params.split_role);
+
+ phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
+ intf_type,
+ controller_id);
+ if (phys_params.intf_idx == INTF_MAX) {
+ DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
+ dpu_enc,
+ &phys_params);
+ if (ret)
+ DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+ }
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return ret;
+}
+
+static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ frame_done_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ u32 event;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+ priv = drm_enc->dev->dev_private;
+
+ if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
+ DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+ DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
+ return;
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
+ return;
+ }
+
+ DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+
+ event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+ dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
+}
+
+static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
+ .mode_set = dpu_encoder_virt_mode_set,
+ .disable = dpu_encoder_virt_disable,
+ .enable = dpu_kms_encoder_enable,
+ .atomic_check = dpu_encoder_virt_atomic_check,
+
+ /* This is called by dpu_kms_encoder_enable */
+ .commit = dpu_encoder_virt_enable,
+};
+
+static const struct drm_encoder_funcs dpu_encoder_funcs = {
+ .destroy = dpu_encoder_destroy,
+ .late_register = dpu_encoder_late_register,
+ .early_unregister = dpu_encoder_early_unregister,
+};
+
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+ int ret = 0;
+
+ dpu_enc = to_dpu_encoder_virt(enc);
+
+ mutex_init(&dpu_enc->enc_lock);
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
+ &drm_enc_mode);
+ if (ret)
+ goto fail;
+
+ dpu_enc->cur_master = NULL;
+ spin_lock_init(&dpu_enc->enc_spinlock);
+
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ timer_setup(&dpu_enc->frame_done_timer,
+ dpu_encoder_frame_done_timeout, 0);
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+ timer_setup(&dpu_enc->vsync_event_timer,
+ dpu_encoder_vsync_event_handler,
+ 0);
+
+
+ mutex_init(&dpu_enc->rc_lock);
+ kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+ dpu_encoder_off_work);
+ dpu_enc->idle_timeout = IDLE_TIMEOUT;
+
+ kthread_init_work(&dpu_enc->vsync_event_work,
+ dpu_encoder_vsync_event_work_handler);
+
+ memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
+
+ DPU_DEBUG_ENC(dpu_enc, "created\n");
+
+ return ret;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (drm_enc)
+ dpu_encoder_destroy(drm_enc);
+
+ return ret;
+
+
+}
+
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ int drm_enc_mode)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int rc = 0;
+
+ dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+ if (!dpu_enc)
+ return ERR_PTR(ENOMEM);
+
+ rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+ drm_enc_mode, NULL);
+ if (rc) {
+ devm_kfree(dev->dev, dpu_enc);
+ return ERR_PTR(rc);
+ }
+
+ drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
+ return &dpu_enc->base;
+}
+
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+ enum msm_event_wait event)
+{
+ int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ switch (event) {
+ case MSM_ENC_COMMIT_DONE:
+ fn_wait = phys->ops.wait_for_commit_done;
+ break;
+ case MSM_ENC_TX_COMPLETE:
+ fn_wait = phys->ops.wait_for_tx_complete;
+ break;
+ case MSM_ENC_VBLANK:
+ fn_wait = phys->ops.wait_for_vblank;
+ break;
+ default:
+ DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+ event);
+ return -EINVAL;
+ };
+
+ if (phys && fn_wait) {
+ DPU_ATRACE_BEGIN("wait_for_completion_event");
+ ret = fn_wait(phys);
+ DPU_ATRACE_END("wait_for_completion_event");
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!encoder) {
+ DPU_ERROR("invalid encoder\n");
+ return INTF_MODE_NONE;
+ }
+ dpu_enc = to_dpu_encoder_virt(encoder);
+
+ if (dpu_enc->cur_master)
+ return dpu_enc->cur_master->intf_mode;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys)
+ return phys->intf_mode;
+ }
+
+ return INTF_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 000000000000..60f809fc7c13
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE BIT(3)
+
+#define IDLE_TIMEOUT (66 - 16/2)
+
+/**
+ * Encoder functions and data types
+ * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ * interface
+ * @topology: Topology of the display
+ */
+struct dpu_encoder_hw_resources {
+ enum dpu_intf_mode intfs[INTF_MAX];
+ bool needs_cdm;
+ u32 display_num_of_h_tiles;
+};
+
+/**
+ * dpu_encoder_kickoff_params - info encoder requires at kickoff
+ * @affected_displays: bitmask, bit set means the ROI of the commit lies within
+ * the bounds of the physical display at the bit index
+ */
+struct dpu_encoder_kickoff_params {
+ unsigned long affected_displays;
+};
+
+/**
+ * dpu_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder: encoder pointer
+ * @hw_res: resource table to populate with encoder required resources
+ * @conn_state: report hw reqs based on this proposed connector state
+ */
+void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+
+/**
+ * dpu_encoder_register_vblank_callback - provide callback to encoder that
+ * will be called on the next vblank.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister and disable IRQs
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
+ void (*cb)(void *), void *data);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ * will be called after the request is complete, or other events.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+ void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @encoder: encoder pointer
+ * @params: kickoff time parameters
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+ struct dpu_encoder_kickoff_params *params);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ * kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder: encoder pointer
+ * @event: event to wait for
+ * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
+ * frames to hardware at a vblank or ctl_start
+ * Encoders will map this differently depending on the
+ * panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
+ * the panel. Encoders will map this differently
+ * depending on the panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+ enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_restore - restore the encoder configs
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+ struct drm_device *dev,
+ int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev: Pointer to drm device structure
+ * @enc: Pointer to the drm_encoder
+ * @disp_info: Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ * atomic commit, before any registers are written
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ * and command mode encoders.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+ u32 idle_timeout);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 000000000000..c7df8aad6613
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_encoder.h"
+
+#define DPU_ENCODER_NAME_MAX 16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS 84
+#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ * split-panel configuration, where one panel is master, and others slaves.
+ * Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+ ENC_ROLE_SOLO,
+ ENC_ROLE_MASTER,
+ ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING: Encoder transitioning to disable state
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED: Encoder is disabled
+ * @DPU_ENC_ENABLING: Encoder transitioning to enabled
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED: Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET: Encoder is enabled, but requires a hw_reset
+ * to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+ DPU_ENC_DISABLING,
+ DPU_ENC_DISABLED,
+ DPU_ENC_ENABLING,
+ DPU_ENC_ENABLED,
+ DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ * provides for the physical encoders to use to callback.
+ * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_frame_done: Notify virtual encoder that this phys encoder
+ * completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+ void (*handle_vblank_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_underrun_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_frame_done)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ * the containing virtual encoder.
+ * @late_register: DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit: MSM Atomic Call, start of atomic commit sequence
+ * @is_master: Whether this phys_enc is the current master
+ * encoder. Can be switched at enable time. Based
+ * on split_role and current mode (CMD/VID).
+ * @mode_fixup: DRM Call. Fixup a DRM mode.
+ * @mode_set: DRM Call. Set a DRM mode.
+ * This likely caches the mode, for use at enable.
+ * @enable: DRM Call. Enable a DRM mode.
+ * @disable: DRM Call. Disable mode.
+ * @atomic_check: DRM Call. Atomic check new DRM state.
+ * @destroy: DRM Call. Destroy and release resources.
+ * @get_hw_resources: Populate the structure with the hardware
+ * resources that this phys_enc is using.
+ * Expect no overlap between phys_encs.
+ * @control_vblank_irq Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done: Wait for hardware to have flushed the
+ * current pending frames to hardware
+ * @wait_for_tx_complete: Wait for hardware to transfer the pixels
+ * to the panel
+ * @wait_for_vblank: Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff: Do any work necessary prior to a kickoff
+ * For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff: Do any work necessary post-kickoff work
+ * @trigger_start: Process start event on physical encoder
+ * @needs_single_flush: Whether encoder slaves need to be flushed
+ * @setup_misr: Sets up MISR, enable and disables based on sysfs
+ * @collect_misr: Collects MISR data on frame update
+ * @hw_reset: Issue HW recovery such as CTL reset and clear
+ * DPU_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control: Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc: phys encoder can update the vsync_enable status
+ * on idle power collapse prepare
+ * @restore: Restore all the encoder configs.
+ * @get_line_count: Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+ int (*late_register)(struct dpu_encoder_phys *encoder,
+ struct dentry *debugfs_root);
+ void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+ bool (*is_master)(struct dpu_encoder_phys *encoder);
+ bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct dpu_encoder_phys *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*enable)(struct dpu_encoder_phys *encoder);
+ void (*disable)(struct dpu_encoder_phys *encoder);
+ int (*atomic_check)(struct dpu_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*destroy)(struct dpu_encoder_phys *encoder);
+ void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+ int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params);
+ void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+ bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+
+ void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
+ bool enable, u32 frame_count);
+ u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
+ void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
+ void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+ void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+ void (*restore)(struct dpu_encoder_phys *phys);
+ int (*get_line_count)(struct dpu_encoder_phys *phys);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
+ */
+enum dpu_intr_idx {
+ INTR_IDX_VSYNC,
+ INTR_IDX_PINGPONG,
+ INTR_IDX_UNDERRUN,
+ INTR_IDX_CTL_START,
+ INTR_IDX_RDPTR,
+ INTR_IDX_MAX,
+};
+
+/**
+ * dpu_encoder_irq - tracking structure for interrupts
+ * @name: string name of interrupt
+ * @intr_type: Encoder interrupt type
+ * @intr_idx: Encoder interrupt enumeration
+ * @hw_idx: HW Block ID
+ * @irq_idx: IRQ interface lookup index from DPU IRQ framework
+ * will be -EINVAL if IRQ is not registered
+ * @irq_cb: interrupt callback
+ */
+struct dpu_encoder_irq {
+ const char *name;
+ enum dpu_intr_type intr_type;
+ enum dpu_intr_idx intr_idx;
+ int hw_idx;
+ int irq_idx;
+ struct dpu_irq_callback cb;
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ * tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ * phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent: Pointer to the containing virtual encoder
+ * @connector: If a mode is set, cached pointer to the active connector
+ * @ops: Operations exposed to the virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop: Hardware interface to the top registers
+ * @hw_ctl: Hardware interface to the ctl registers
+ * @hw_cdm: Hardware interface to the cdm registers
+ * @cdm_cfg: Chroma-down hardware configuration
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @enabled: Whether the encoder has enabled and running a mode
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_mode: Interface mode
+ * @intf_idx: Interface index on dpu hardware
+ * @topology_name: topology selected for the display
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state: Enable state tracking
+ * @vblank_refcount: Reference count of vblank request
+ * @vsync_cnt: Vsync count for the physical encoder
+ * @underrun_cnt: Underrun count for the physical encoder
+ * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
+ * vs. the number of done/vblank irqs. Should hover
+ * between 0-2 Incremented when a new kickoff is
+ * scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
+ * pending.
+ * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ * @irq: IRQ tracking structures
+ */
+struct dpu_encoder_phys {
+ struct drm_encoder *parent;
+ struct drm_connector *connector;
+ struct dpu_encoder_phys_ops ops;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_cdm *hw_cdm;
+ struct dpu_hw_cdm_cfg cdm_cfg;
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode cached_mode;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf_mode intf_mode;
+ enum dpu_intf intf_idx;
+ enum dpu_rm_topology_name topology_name;
+ spinlock_t *enc_spinlock;
+ enum dpu_enc_enable_state enable_state;
+ atomic_t vblank_refcount;
+ atomic_t vsync_cnt;
+ atomic_t underrun_cnt;
+ atomic_t pending_ctlstart_cnt;
+ atomic_t pending_kickoff_cnt;
+ wait_queue_head_t pending_kickoff_wq;
+ struct dpu_encoder_irq irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+ atomic_inc_return(&phys->pending_ctlstart_cnt);
+ return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @hw_intf: Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ */
+struct dpu_encoder_phys_vid {
+ struct dpu_encoder_phys base;
+ struct dpu_hw_intf *hw_intf;
+ struct intf_timing_params timing_params;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
+ * after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+ struct dpu_encoder_phys base;
+ int stream_sel;
+ bool serialize_wait4pp;
+ int pp_timeout_report_cnt;
+ atomic_t pending_vblank_cnt;
+ wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_idx: Interface index this phys_enc will control
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+ struct dpu_kms *dpu_kms;
+ struct drm_encoder *parent;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf intf_idx;
+ spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+ wait_queue_head_t *wq;
+ atomic_t *atomic_cnt;
+ s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_hw_reset - issue ctl hw reset
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl hw reset. If state is currently
+ * DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+ return BLEND_3D_NONE;
+
+ if (phys_enc->split_role == ENC_ROLE_SOLO &&
+ phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+ return BLEND_3D_H_ROW_INT;
+
+ return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ * timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ * note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 000000000000..3084675ed425
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+ container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS 10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
+ struct dpu_encoder_phys_cmd *cmd_enc)
+{
+ return KICKOFF_TIMEOUT_MS;
+}
+
+static inline bool dpu_encoder_phys_cmd_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool dpu_encoder_phys_cmd_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+ return true;
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc)
+ return;
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.setup_intf_cfg)
+ return;
+
+ intf_cfg.intf = phys_enc->intf_idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ unsigned long lock_flags;
+ int new_cnt;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("pp_done_irq");
+ /* notify all synchronous clients first, then asynchronous clients */
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ new_cnt, event);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("rd_ptr_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+ wake_up_all(&cmd_enc->pending_vblank_wq);
+ DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("ctl_start_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+ /* Signal any waiting ctl start interrupt */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->hw_idx = phys_enc->hw_ctl->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->hw_idx = phys_enc->intf_idx;
+ irq->irq_idx = -EINVAL;
+}
+
+static void dpu_encoder_phys_cmd_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+
+ if (!phys_enc || !mode || !adj_mode) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ phys_enc->cached_mode = *adj_mode;
+ DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+ drm_mode_debug_printmodeline(adj_mode);
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ bool do_log = false;
+
+ if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+ return -EINVAL;
+
+ cmd_enc->pp_timeout_report_cnt++;
+ if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+ frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+ do_log = true;
+ } else if (cmd_enc->pp_timeout_report_cnt == 1) {
+ do_log = true;
+ }
+
+ trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ frame_event);
+
+ /* to avoid flooding, only log first time, and "dead" time */
+ if (do_log) {
+ DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->hw_ctl->idx - CTL_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc, frame_event);
+
+ return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+ &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+ else if (!ret)
+ cmd_enc->pp_timeout_report_cnt = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ int refcount;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable ? "true" : "false", refcount);
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+
+end:
+ if (ret) {
+ DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, ret,
+ enable ? "true" : "false", refcount);
+ }
+
+ return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_register_irq(phys_enc,
+ INTR_IDX_CTL_START);
+ } else {
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_CTL_START);
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+ }
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode;
+ bool tc_enable = true;
+ u32 vsync_hz;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ mode = &phys_enc->cached_mode;
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+ !phys_enc->hw_pp->ops.enable_tearcheck) {
+ DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+ if (vsync_hz <= 0) {
+ DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+ vsync_hz);
+ return;
+ }
+
+ tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+
+ /* enable external TE after kickoff to avoid premature autorefresh */
+ tc_cfg.hw_vsync_mode = 0;
+
+ /*
+ * By setting sync_cfg_height to near max register value, we essentially
+ * disable dpu hw generated TE signal, since hw TE will arrive first.
+ * Only caveat is if due to error, we hit wrap-around.
+ */
+ tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+ mode->vtotal, mode->vrefresh);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+ tc_cfg.rd_ptr_irq);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+ tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+ tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+ phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+ || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+ dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /**
+ * we do separate flush for each CTL and let
+ * CTL_START synchronize them
+ */
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+ _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ return;
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid phys encoder\n");
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+ DPU_ERROR("already enabled\n");
+ return;
+ }
+
+ dpu_encoder_phys_cmd_enable_helper(phys_enc);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+ struct dpu_encoder_phys *phys_enc, bool enable)
+{
+ if (!phys_enc || !phys_enc->hw_pp ||
+ !phys_enc->hw_pp->ops.connect_external_te)
+ return;
+
+ trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+ phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+ struct dpu_encoder_phys *phys_enc)
+{
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pingpong *hw_pp;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return -EINVAL;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp->ops.get_line_count)
+ return -EINVAL;
+
+ return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->enable_state);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+ return;
+ }
+
+ if (phys_enc->hw_pp->ops.enable_tearcheck)
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+ DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "\n");
+ hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ /* force pending_kickoff_cnt 0 to discard failed kickoff */
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+ DRMID(phys_enc->parent), ret,
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+ &wait_info);
+ if (ret == -ETIMEDOUT) {
+ DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+ ret = -EINVAL;
+ } else if (!ret)
+ ret = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (rc) {
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+ DRMID(phys_enc->parent), rc,
+ phys_enc->intf_idx - INTF_0);
+ }
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+ /* required for both controllers */
+ if (!rc && cmd_enc->serialize_wait4pp)
+ dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+ struct dpu_encoder_wait_info wait_info;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return rc;
+
+ wait_info.wq = &cmd_enc->pending_vblank_wq;
+ wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+ wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+ atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+ rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+ &wait_info);
+
+ return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ /**
+ * re-enable external TE, either for the first time after enabling
+ * or if disabled for Autorefresh
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+ struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_cmd_is_master;
+ ops->mode_set = dpu_encoder_phys_cmd_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
+ ops->enable = dpu_encoder_phys_cmd_enable;
+ ops->disable = dpu_encoder_phys_cmd_disable;
+ ops->destroy = dpu_encoder_phys_cmd_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+ ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+ ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+ ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+ ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+ ops->restore = dpu_encoder_phys_cmd_enable_helper;
+ ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+ ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+ ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ ret = -ENOMEM;
+ DPU_ERROR("failed to allocate\n");
+ goto fail;
+ }
+ phys_enc = &cmd_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail_mdp_init;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ cmd_enc->stream_sel = 0;
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->name = "ctl_start";
+ irq->intr_type = DPU_IRQ_TYPE_CTL_START;
+ irq->intr_idx = INTR_IDX_CTL_START;
+ irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->name = "pp_done";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
+ irq->intr_idx = INTR_IDX_PINGPONG;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->name = "pp_rd_ptr";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
+ irq->intr_idx = INTR_IDX_RDPTR;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+ atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+ DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+ return phys_enc;
+
+fail_mdp_init:
+ kfree(cmd_enc);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 000000000000..14fc7c2a6bb7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+ container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ bool ret = false;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE)
+ ret = true;
+
+ return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct dpu_encoder_phys_vid *vid_enc,
+ const struct drm_display_mode *mode,
+ struct intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+
+ if ((mode->htotal < mode->hsync_end)
+ || (mode->hsync_start < mode->hdisplay)
+ || (mode->vtotal < mode->vsync_end)
+ || (mode->vsync_start < mode->vdisplay)
+ || (mode->hsync_end < mode->hsync_start)
+ || (mode->vsync_end < mode->vsync_start)) {
+ DPU_ERROR(
+ "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal, mode->hdisplay);
+ DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->vdisplay);
+ return;
+ }
+
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ timing->hsync_polarity = 0;
+ timing->vsync_polarity = 0;
+ }
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ /*
+ * if (vid_enc->hw->cap->type == INTF_EDP) {
+ * display_v_start += mode->htotal - mode->hsync_start;
+ * display_v_end -= mode->hsync_start - mode->hdisplay;
+ * }
+ */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct dpu_encoder_phys_vid *vid_enc,
+ const struct intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ DPU_DEBUG_VIDENC(vid_enc,
+ "prog fetch is not needed, large vbp+vsw\n");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ DPU_DEBUG_VIDENC(vid_enc,
+ "less vfp than fetch req, using entire vfp\n");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ DPU_DEBUG_VIDENC(vid_enc,
+ "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ struct dpu_encoder_phys_vid *vid_enc =
+ to_dpu_encoder_phys_vid(phys_enc);
+ struct intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool dpu_encoder_phys_vid_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+
+ /*
+ * Modifying mode has consequences when the mode comes back to us
+ */
+ return true;
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct drm_display_mode mode;
+ struct intf_timing_params timing_params = { 0 };
+ const struct dpu_format *fmt = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ unsigned long lock_flags;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ mode = phys_enc->cached_mode;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ DPU_ERROR("timing engine setup is not supported\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ mode.hdisplay >>= 1;
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "split_role %d, halve horizontal %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+ mode.hsync_start, mode.hsync_end);
+ }
+
+ drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+ fmt = dpu_get_dpu_format(fmt_fourcc);
+ DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+ intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ &timing_params, fmt);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &timing_params);
+
+ vid_enc->timing_params = timing_params;
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_hw_ctl *hw_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = 0;
+ int new_cnt = -1, old_cnt = -1;
+
+ if (!phys_enc)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+ if (!hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("vblank_irq");
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ if (flush_register == 0)
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return false;
+
+ if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
+ return true;
+
+ return false;
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+}
+
+static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ /*
+ * Initialize irq->hw_idx only when irq is not registered.
+ * Prevent invalidating irq->irq_idx as modeset may be
+ * called many times during dfps.
+ */
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+}
+
+static void dpu_encoder_phys_vid_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_rm *rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !phys_enc->dpu_kms) {
+ DPU_ERROR("invalid encoder/kms\n");
+ return;
+ }
+
+ rm = &phys_enc->dpu_kms->rm;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (adj_mode) {
+ phys_enc->cached_mode = *adj_mode;
+ drm_mode_debug_printmodeline(adj_mode);
+ DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ }
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ struct dpu_encoder_phys_vid *vid_enc;
+ int refcount;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_VSYNC);
+
+end:
+ if (ret) {
+ DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret, enable,
+ refcount);
+ }
+ return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_intf *intf;
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ intf = vid_enc->hw_intf;
+ ctl = phys_enc->hw_ctl;
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+ dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+ /*
+ * For single flush cases (dual-ctl or pp-split), skip setting the
+ * flush bit for the slave intf, since both intfs use same ctl
+ * and HW will only flush the master.
+ */
+ if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+ !dpu_encoder_phys_vid_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
+
+ /* ctl_flush & timing engine enable will be triggered by framework */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED)
+ phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ kfree(vid_enc);
+}
+
+static void dpu_encoder_phys_vid_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !hw_res) {
+ DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+ phys_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf) {
+ DPU_ERROR("invalid arg(s), hw_intf\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int _dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc, bool notify)
+{
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ pr_err("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+ if (notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+ return 0;
+ }
+
+ /* Wait for kickoff to complete */
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+ &wait_info);
+
+ if (ret == -ETIMEDOUT) {
+ dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+ } else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+
+ return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc || !params) {
+ DPU_ERROR("invalid encoder/parameters\n");
+ return;
+ }
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.wait_reset_status)
+ return;
+
+ /*
+ * hw supports hardware initiated ctl reset, so before we kickoff a new
+ * frame, need to check and wait for hw initiated ctl reset completion
+ */
+ rc = ctl->ops.wait_reset_status(ctl);
+ if (rc) {
+ DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ ctl->idx, rc);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ unsigned long lock_flags;
+ int ret;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ if (dpu_encoder_phys_vid_is_master(phys_enc))
+ dpu_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+ ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret);
+ }
+ }
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+ /*
+ * Video mode must flush CTL before enabling timing engine
+ * Video encoders need to turn on their interfaces now
+ */
+ if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+ trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+ }
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ int ret;
+
+ if (!phys_enc)
+ return;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0,
+ enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (ret)
+ return;
+
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ } else {
+ dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ }
+}
+
+static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
+ bool enable, u32 frame_count)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+ vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+ enable, frame_count);
+}
+
+static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return 0;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+ vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ return -EINVAL;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+ return -EINVAL;
+
+ return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_vid_is_master;
+ ops->mode_set = dpu_encoder_phys_vid_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
+ ops->enable = dpu_encoder_phys_vid_enable;
+ ops->disable = dpu_encoder_phys_vid_disable;
+ ops->destroy = dpu_encoder_phys_vid_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+ ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
+ ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_vid *vid_enc = NULL;
+ struct dpu_rm_hw_iter iter;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ if (!p) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+ if (!vid_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phys_enc = &vid_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ /**
+ * hw_intf resource permanently assigned to this encoder
+ * Other resources allocated at atomic commit time by use case
+ */
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
+ while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
+ struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+ if (hw_intf->idx == p->intf_idx) {
+ vid_enc->hw_intf = hw_intf;
+ break;
+ }
+ }
+
+ if (!vid_enc->hw_intf) {
+ ret = -EINVAL;
+ DPU_ERROR("failed to get hw_intf\n");
+ goto fail;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ irq->name = "vsync_irq";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
+ irq->intr_idx = INTR_IDX_VSYNC;
+ irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+ return phys_enc;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (vid_enc)
+ dpu_encoder_phys_vid_destroy(phys_enc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
new file mode 100644
index 000000000000..bfcd165e96df
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -0,0 +1,1173 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+
+#include "msm_media_info.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+
+#define DPU_UBWC_META_MACRO_W_H 16
+#define DPU_UBWC_META_BLOCK_SIZE 256
+#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
+
+#define DPU_TILE_HEIGHT_DEFAULT 1
+#define DPU_TILE_HEIGHT_TILED 4
+#define DPU_TILE_HEIGHT_UBWC 4
+#define DPU_TILE_HEIGHT_NV12 8
+
+#define DPU_MAX_IMG_WIDTH 0x3FFF
+#define DPU_MAX_IMG_HEIGHT 0x3FFF
+
+/**
+ * DPU supported format packing, bpp, and other format
+ * information.
+ * DPU currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+/*
+ * struct dpu_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct dpu_media_color_map {
+ uint32_t format;
+ uint32_t color;
+};
+
+static const struct dpu_format dpu_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+};
+
+/*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct dpu_format dpu_format_map_tile[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+
+ PSEUDO_YUV_FMT_TILED(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct dpu_format dpu_format_map_ubwc[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
+ DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_p010[] = {
+ PSEUDO_YUV_FMT_LOOSE(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
+ DPU_FETCH_LINEAR, 2),
+};
+
+static const struct dpu_format dpu_format_map_p010_ubwc[] = {
+ PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_tp10_ubwc[] = {
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ * Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _dpu_get_v_h_subsample_rate(
+ enum dpu_chroma_samp_type chroma_sample,
+ uint32_t *v_sample,
+ uint32_t *h_sample)
+{
+ if (!v_sample || !h_sample)
+ return;
+
+ switch (chroma_sample) {
+ case DPU_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case DPU_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case DPU_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+{
+ static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
+ {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+ };
+ int color_fmt = -1;
+ int i;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+ if (DPU_FORMAT_IS_DX(fmt)) {
+ if (fmt->unpack_tight)
+ color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+ else
+ color_fmt = COLOR_FMT_P010_UBWC;
+ } else
+ color_fmt = COLOR_FMT_NV12_UBWC;
+ return color_fmt;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
+ if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+ color_fmt = dpu_media_ubwc_map[i].color;
+ break;
+ }
+ return color_fmt;
+}
+
+static int _dpu_format_get_plane_sizes_ubwc(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout)
+{
+ int i;
+ int color;
+ bool meta = DPU_FORMAT_IS_UBWC(fmt);
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ color = _dpu_format_get_media_color_ubwc(fmt);
+ if (color < 0) {
+ DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+ (char *)&fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ uint32_t y_sclines, uv_sclines;
+ uint32_t y_meta_scanlines = 0;
+ uint32_t uv_meta_scanlines = 0;
+
+ layout->num_planes = 2;
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+ y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+ uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+ uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+ uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ } else {
+ uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+ layout->num_planes = 1;
+
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+ }
+
+done:
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _dpu_format_get_plane_sizes_linear(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ /* Due to memset above, only need to set planes of interest */
+ if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+ layout->num_planes = 1;
+ layout->plane_size[0] = width * height * layout->format->bpp;
+ layout->plane_pitch[0] = width * layout->format->bpp;
+ } else {
+ uint32_t v_subsample, h_subsample;
+ uint32_t chroma_samp;
+ uint32_t bpp = 1;
+
+ chroma_samp = fmt->chroma_sample;
+ _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+ &h_subsample);
+
+ if (width % h_subsample || height % v_subsample) {
+ DRM_ERROR("mismatch in subsample vs dimensions\n");
+ return -EINVAL;
+ }
+
+ if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+ (DPU_FORMAT_IS_DX(fmt)))
+ bpp = 2;
+ layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+ layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[1] = layout->plane_pitch[1] *
+ (height / v_subsample);
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ layout->num_planes = 2;
+ layout->plane_size[1] *= 2;
+ layout->plane_pitch[1] *= 2;
+ } else {
+ /* planar */
+ layout->num_planes = 3;
+ layout->plane_size[2] = layout->plane_size[1];
+ layout->plane_pitch[2] = layout->plane_pitch[1];
+ }
+ }
+
+ /*
+ * linear format: allow user allocated pitches if they are greater than
+ * the requirement.
+ * ubwc format: pitch values are computed uniformly across
+ * all the components based on ubwc specifications.
+ */
+ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
+ if (pitches && layout->plane_pitch[i] < pitches[i])
+ layout->plane_pitch[i] = pitches[i];
+ }
+
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int dpu_format_get_plane_sizes(
+ const struct dpu_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ if (!layout || !fmt) {
+ DRM_ERROR("invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+ return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+ return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+}
+
+static int _dpu_format_populate_addrs_ubwc(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t base_addr = 0;
+ bool meta;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (aspace)
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ if (!base_addr) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+
+ meta = DPU_FORMAT_IS_UBWC(layout->format);
+
+ /* Per-format logic for verifying active planes */
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+ /* configure CbCr bitstream plane */
+ layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2] + layout->plane_size[3];
+
+ if (!meta)
+ goto done;
+
+ /* configure Y metadata plane */
+ layout->plane_addr[2] = base_addr;
+
+ /* configure CbCr metadata plane */
+ layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2];
+
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+ layout->plane_addr[1] = 0;
+
+ if (!meta)
+ goto done;
+
+ layout->plane_addr[2] = base_addr;
+ layout->plane_addr[3] = 0;
+ }
+done:
+ return 0;
+}
+
+static int _dpu_format_populate_addrs_linear(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ unsigned int i;
+
+ /* Can now check the pitches given vs pitches expected */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (layout->plane_pitch[i] > fb->pitches[i]) {
+ DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
+ }
+
+ /* Populate addresses for simple formats here */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (aspace)
+ layout->plane_addr[i] =
+ msm_framebuffer_iova(fb, aspace, i);
+ if (!layout->plane_addr[i]) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ int i, ret;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if ((fb->width > DPU_MAX_IMG_WIDTH) ||
+ (fb->height > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ layout->format = to_dpu_format(msm_framebuffer_format(fb));
+
+ /* Populate the plane sizes etc via get_format */
+ ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
+ layout, fb->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DPU_MAX_PLANES; ++i)
+ plane_addr[i] = layout->plane_addr[i];
+
+ /* Populate the addresses given the fb */
+ if (DPU_FORMAT_IS_UBWC(layout->format) ||
+ DPU_FORMAT_IS_TILE(layout->format))
+ ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ else
+ ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
+
+ /* check if anything changed */
+ if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos)
+{
+ int ret, i, num_base_fmt_planes;
+ const struct dpu_format *fmt;
+ struct dpu_hw_fmt_layout layout;
+ uint32_t bos_total_size = 0;
+
+ if (!msm_fmt || !cmd || !bos) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ fmt = to_dpu_format(msm_fmt);
+ num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+ ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+ &layout, cmd->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_base_fmt_planes; i++) {
+ if (!bos[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ if ((i == 0) || (bos[i] != bos[0]))
+ bos_total_size += bos[i]->size;
+ }
+
+ if (bos_total_size < layout.total_size) {
+ DRM_ERROR("buffers total size too small %u expected %u\n",
+ bos_total_size, layout.total_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier)
+{
+ uint32_t i = 0;
+ const struct dpu_format *fmt = NULL;
+ const struct dpu_format *map = NULL;
+ ssize_t map_size = 0;
+
+ /*
+ * Currently only support exactly zero or one modifier.
+ * All planes use the same modifier.
+ */
+ DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
+
+ switch (modifier) {
+ case 0:
+ map = dpu_format_map;
+ map_size = ARRAY_SIZE(dpu_format_map);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ map = dpu_format_map_ubwc;
+ map_size = ARRAY_SIZE(dpu_format_map_ubwc);
+ DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ (char *)&format);
+ break;
+ default:
+ DPU_ERROR("unsupported format modifier %llX\n", modifier);
+ return NULL;
+ }
+
+ for (i = 0; i < map_size; i++) {
+ if (format == map[i].base.pixel_format) {
+ fmt = &map[i];
+ break;
+ }
+ }
+
+ if (fmt == NULL)
+ DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+ (char *)&format, modifier);
+ else
+ DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+ (char *)&format, modifier,
+ DPU_FORMAT_IS_UBWC(fmt),
+ DPU_FORMAT_IS_YUV(fmt));
+
+ return fmt;
+}
+
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers)
+{
+ const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
+ modifiers);
+ if (fmt)
+ return &fmt->base;
+ return NULL;
+}
+
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max)
+{
+ uint32_t i, fourcc_format;
+
+ if (!format_list || !pixel_formats)
+ return 0;
+
+ for (i = 0, fourcc_format = 0;
+ format_list->fourcc_format && i < pixel_formats_max;
+ ++format_list) {
+ /* verify if listed format is in dpu_format_map? */
+
+ /* optionally return modified formats */
+ if (pixel_modifiers) {
+ /* assume same modifier for all fb planes */
+ pixel_formats[i] = format_list->fourcc_format;
+ pixel_modifiers[i++] = format_list->modifier;
+ } else {
+ /* assume base formats grouped together */
+ if (fourcc_format != format_list->fourcc_format) {
+ fourcc_format = format_list->fourcc_format;
+ pixel_formats[i++] = fourcc_format;
+ }
+ }
+ }
+
+ return i;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
new file mode 100644
index 000000000000..a54451d8d011
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_FORMATS_H
+#define _DPU_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ */
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier);
+
+#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
+
+/**
+ * dpu_get_msm_format - get an dpu_format by its msm_format base
+ * callback function registers with the msm_kms layer
+ * @kms: kms driver
+ * @format: DRM FourCC Code
+ * @modifiers: data layout modifier
+ */
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
+
+/**
+ * dpu_populate_formats - populate the given array with fourcc codes supported
+ * @format_list: pointer to list of possible formats
+ * @pixel_formats: array to populate with fourcc codes
+ * @pixel_modifiers: array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max);
+
+/**
+ * dpu_format_check_modified_format - validate format and buffers for
+ * dpu non-standard, i.e. modified format
+ * @kms: kms driver
+ * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format
+ * @cmd: fb_cmd2 structure user request
+ * @bos: gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+/**
+ * dpu_format_populate_layout - populate the given format layout based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ * are the same as before or 0 if new addresses were populated
+ */
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *fmtl);
+
+#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
new file mode 100644
index 000000000000..58d29e43faef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_blk.h"
+
+/* Serialization lock for dpu_hw_blk_list */
+static DEFINE_MUTEX(dpu_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(dpu_hw_blk_list);
+
+/**
+ * dpu_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum dpu_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&hw_blk->list);
+ hw_blk->type = type;
+ hw_blk->id = id;
+ atomic_set(&hw_blk->refcount, 0);
+
+ if (ops)
+ hw_blk->ops = *ops;
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_add(&hw_blk->list, &dpu_hw_blk_list);
+ mutex_unlock(&dpu_hw_blk_lock);
+
+ return 0;
+}
+
+/**
+ * dpu_hw_blk_destroy - destroy hw block object.
+ * @hw_blk: pointer to hw block object
+ * return: none
+ */
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ if (atomic_read(&hw_blk->refcount))
+ pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+ hw_blk->id);
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_del(&hw_blk->list);
+ mutex_unlock(&dpu_hw_blk_lock);
+}
+
+/**
+ * dpu_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
+{
+ struct dpu_hw_blk *curr;
+ int rc, refcount;
+
+ if (!hw_blk) {
+ mutex_lock(&dpu_hw_blk_lock);
+ list_for_each_entry(curr, &dpu_hw_blk_list, list) {
+ if ((curr->type != type) ||
+ (id >= 0 && curr->id != id) ||
+ (id < 0 &&
+ atomic_read(&curr->refcount)))
+ continue;
+
+ hw_blk = curr;
+ break;
+ }
+ mutex_unlock(&dpu_hw_blk_lock);
+ }
+
+ if (!hw_blk) {
+ pr_debug("no hw_blk:%d\n", type);
+ return NULL;
+ }
+
+ refcount = atomic_inc_return(&hw_blk->refcount);
+
+ if (refcount == 1 && hw_blk->ops.start) {
+ rc = hw_blk->ops.start(hw_blk);
+ if (rc) {
+ pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
+ goto error_start;
+ }
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+ hw_blk->id, refcount);
+ return hw_blk;
+
+error_start:
+ dpu_hw_blk_put(hw_blk);
+ return ERR_PTR(rc);
+}
+
+/**
+ * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+ atomic_read(&hw_blk->refcount));
+
+ if (!atomic_read(&hw_blk->refcount)) {
+ pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+ return;
+ }
+
+ if (atomic_dec_return(&hw_blk->refcount))
+ return;
+
+ if (hw_blk->ops.stop)
+ hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
new file mode 100644
index 000000000000..0f4ca8af1ec5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_BLK_H
+#define _DPU_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct dpu_hw_blk;
+
+/**
+ * struct dpu_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct dpu_hw_blk_ops {
+ int (*start)(struct dpu_hw_blk *);
+ void (*stop)(struct dpu_hw_blk *);
+};
+
+/**
+ * struct dpu_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct dpu_hw_blk {
+ struct list_head list;
+ u32 type;
+ int id;
+ atomic_t refcount;
+ struct dpu_hw_blk_ops ops;
+};
+
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops);
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
+
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
+#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 000000000000..44ee06398b1d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_kms.h"
+
+#define VIG_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define MIXER_SDM845_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH 2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
+
+#define MAX_HORZ_DECIMATION 4
+#define MAX_VERT_DECIMATION 4
+
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DOWNSCALE_RATIO 4
+#define SSPP_UNITY_SCALE 1
+
+#define STRCAT(X, Y) (X Y)
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps sdm845_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+};
+
+static struct dpu_mdp_cfg sdm845_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x45C,
+ .features = 0,
+ .highest_bank_bit = 0x2,
+ .has_dest_scaler = true,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ },
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static struct dpu_ctl_cfg sdm845_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0xE4,
+ .features = 0
+ },
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+static const struct dpu_sspp_blks_common sdm845_sspp_common = {
+ .maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .maxhdeciexp = MAX_HORZ_DECIMATION,
+ .maxvdeciexp = MAX_VERT_DECIMATION,
+};
+
+#define _VIG_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .id = DPU_SSPP_SCALER_QSEED3, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .virt_format_list = plane_formats, \
+ }
+
+#define _DMA_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .format_list = plane_formats, \
+ .virt_format_list = plane_formats, \
+ }
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = VIG_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_VIG, \
+ .clk_ctrl = _clkctrl \
+ }
+
+#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = DMA_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_DMA, \
+ .clk_ctrl = _clkctrl \
+ }
+
+static struct dpu_sspp_cfg sdm845_sspp[] = {
+ SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
+ sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
+ SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
+ sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
+ SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
+ sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
+ SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
+ sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
+ SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
+ sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
+ SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
+ sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
+ SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
+ sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
+ SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
+ sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+ 0xb0, 0xc8, 0xe0, 0xf8, 0x110
+ },
+};
+
+#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x320, \
+ .features = MIXER_SDM845_MASK, \
+ .sblk = &sdm845_lm_sblk, \
+ .ds = _ds, \
+ .pingpong = _pp, \
+ .lm_pair_mask = (1 << _lmpair) \
+ }
+
+static struct dpu_lm_cfg sdm845_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
+};
+
+/*************************************************************
+ * DS sub blocks config
+ *************************************************************/
+static const struct dpu_ds_top_cfg sdm845_ds_top = {
+ .name = "ds_top_0", .id = DS_TOP,
+ .base = 0x60000, .len = 0xc,
+ .maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
+ .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxupscale = MAX_UPSCALE_RATIO,
+};
+
+#define DS_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x800, \
+ .features = DPU_SSPP_SCALER_QSEED3, \
+ .top = &sdm845_ds_top \
+ }
+
+static struct dpu_ds_cfg sdm845_ds[] = {
+ DS_BLK("ds_0", DS_0, 0x800),
+ DS_BLK("ds_1", DS_1, 0x1000),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+ .te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+#define PP_BLK_TE(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_SPLIT_MASK, \
+ .sblk = &sdm845_pp_sblk_te \
+ }
+#define PP_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_MASK, \
+ .sblk = &sdm845_pp_sblk \
+ }
+
+static struct dpu_pingpong_cfg sdm845_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x280, \
+ .type = _type, \
+ .controller_id = _ctrl_id, \
+ .prog_fetch_lines_worst_case = 24 \
+ }
+
+static struct dpu_intf_cfg sdm845_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+};
+
+/*************************************************************
+ * CDM sub blocks config
+ *************************************************************/
+static struct dpu_cdm_cfg sdm845_cdm[] = {
+ {
+ .name = "cdm_0", .id = CDM_0,
+ .base = 0x79200, .len = 0x224,
+ .features = 0,
+ .intf_connect = BIT(INTF_3),
+ },
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static struct dpu_vbif_cfg sdm845_vbif[] = {
+ {
+ .name = "vbif_0", .id = VBIF_0,
+ .base = 0, .len = 0x1040,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+ .priority_lvl = sdm845_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 14,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
+static struct dpu_reg_dma_cfg sdm845_regdma = {
+ .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+ {.fl = 4, .lut = 0x357},
+ {.fl = 5, .lut = 0x3357},
+ {.fl = 6, .lut = 0x23357},
+ {.fl = 7, .lut = 0x223357},
+ {.fl = 8, .lut = 0x2223357},
+ {.fl = 9, .lut = 0x22223357},
+ {.fl = 10, .lut = 0x222223357},
+ {.fl = 11, .lut = 0x2222223357},
+ {.fl = 12, .lut = 0x22222223357},
+ {.fl = 13, .lut = 0x222222223357},
+ {.fl = 14, .lut = 0x1222222223357},
+ {.fl = 0, .lut = 0x11222222223357}
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x344556677},
+ {.fl = 11, .lut = 0x3344556677},
+ {.fl = 12, .lut = 0x23344556677},
+ {.fl = 13, .lut = 0x223344556677},
+ {.fl = 14, .lut = 0x1223344556677},
+ {.fl = 0, .lut = 0x112233344556677},
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static struct dpu_perf_cfg sdm845_perf_data = {
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .core_ib_ff = "6.0",
+ .core_clk_ff = "1.0",
+ .comp_ratio_rt =
+ "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
+ .comp_ratio_nrt =
+ "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sdm845_qos_linear),
+ .entries = sdm845_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+ .entries = sdm845_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+ .entries = sdm845_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
+/*************************************************************
+ * Hardware catalog init
+ *************************************************************/
+
+/*
+ * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sdm845_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .ds_count = ARRAY_SIZE(sdm845_ds),
+ .ds = sdm845_ds,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .cdm_count = ARRAY_SIZE(sdm845_cdm),
+ .cdm = sdm845_cdm,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sdm845_regdma,
+ .perf = sdm845_perf_data,
+ };
+}
+
+static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+ { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
+ { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+};
+
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+{
+ kfree(dpu_cfg);
+}
+
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+ int i;
+ struct dpu_mdss_cfg *dpu_cfg;
+
+ dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
+ if (!dpu_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+ if (cfg_handler[i].hw_rev == hw_rev) {
+ cfg_handler[i].cfg_init(dpu_cfg);
+ dpu_cfg->hwversion = hw_rev;
+ return dpu_cfg;
+ }
+ }
+
+ DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+ dpu_hw_catalog_deinit(dpu_cfg);
+ return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 000000000000..f0cb0d4fc80e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,804 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS 12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
+ ((MINOR & 0xFFF) << 16) |\
+ (STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev) ((rev) >> 28)
+#define DPU_HW_MINOR(rev) (((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev) ((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev) ((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2) \
+ (DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170 DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171 DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172 DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300 DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301 DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+
+
+#define DPU_HW_BLK_NAME_LEN 16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS 2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+ DPU_HW_UBWC_VER_10 = 0x100,
+ DPU_HW_UBWC_VER_20 = 0x200,
+ DPU_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev) ((rev) >= DPU_HW_UBWC_VER_20)
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
+ * compression initial revision
+ * @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX Maximum value
+
+ */
+enum {
+ DPU_MDP_PANIC_PER_PIPE = 0x1,
+ DPU_MDP_10BIT_SUPPORT,
+ DPU_MDP_BWC,
+ DPU_MDP_UBWC_1_0,
+ DPU_MDP_UBWC_1_5,
+ DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC, Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP Supports client driven prefetch
+ * @DPU_SSPP_MAX maximum value
+ */
+enum {
+ DPU_SSPP_SRC = 0x1,
+ DPU_SSPP_SCALER_QSEED2,
+ DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_RGB,
+ DPU_SSPP_CSC,
+ DPU_SSPP_CSC_10BIT,
+ DPU_SSPP_CURSOR,
+ DPU_SSPP_QOS,
+ DPU_SSPP_QOS_8LVL,
+ DPU_SSPP_EXCL_RECT,
+ DPU_SSPP_SMART_DMA_V1,
+ DPU_SSPP_SMART_DMA_V2,
+ DPU_SSPP_TS_PREFILL,
+ DPU_SSPP_TS_PREFILL_REC1,
+ DPU_SSPP_CDP,
+ DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC Gamma correction block
+ * @DPU_DIM_LAYER Layer mixer supports dim layer
+ * @DPU_MIXER_MAX maximum value
+ */
+enum {
+ DPU_MIXER_LAYER = 0x1,
+ DPU_MIXER_SOURCESPLIT,
+ DPU_MIXER_GC,
+ DPU_DIM_LAYER,
+ DPU_MIXER_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE Tear check block
+ * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER, Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+ DPU_PINGPONG_TE = 0x1,
+ DPU_PINGPONG_TE2,
+ DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SLAVE,
+ DPU_PINGPONG_DITHER,
+ DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY CTL supports video mode split display
+ * @DPU_CTL_MAX
+ */
+enum {
+ DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX maximum value
+ */
+enum {
+ DPU_VBIF_QOS_OTLIM = 0x1,
+ DPU_VBIF_QOS_REMAP,
+ DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len; \
+ unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info: HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+struct dpu_csc_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * struct dpu_format_extended - define dpu specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ * framebuffer planes
+ */
+struct dpu_format_extended {
+ uint32_t fourcc_format;
+ uint64_t modifier;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+ DPU_QOS_LUT_USAGE_LINEAR,
+ DPU_QOS_LUT_USAGE_MACROTILE,
+ DPU_QOS_LUT_USAGE_NRT,
+ DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+ u32 fl;
+ u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+ u32 nentry;
+ struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ * supported z order
+ * @qseed_type qseed2 or qseed3 support.
+ * @smart_dma_rev Supported version of SmartDMA feature.
+ * @ubwc_version UBWC feature version (0x0 for not supported)
+ * @has_src_split source split feature status
+ * @has_dim_layer dim layer feature status
+ * @has_idle_pc indicate if idle power collapse feature is supported
+ */
+struct dpu_caps {
+ u32 max_mixer_width;
+ u32 max_mixer_blendstages;
+ u32 qseed_type;
+ u32 smart_dma_rev;
+ u32 ubwc_version;
+ bool has_src_split;
+ bool has_dim_layer;
+ bool has_idle_pc;
+};
+
+/**
+ * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @maxhdeciexp: max horizontal decimation supported by this pipe
+ * (max is 2^value)
+ * @maxvdeciexp: max vertical decimation supported by this pipe
+ * (max is 2^value)
+ */
+struct dpu_sspp_blks_common {
+ u32 maxlinewidth;
+ u32 pixel_ram_size;
+ u32 maxhdeciexp;
+ u32 maxvdeciexp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ */
+struct dpu_sspp_sub_blks {
+ const struct dpu_sspp_blks_common *common;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ u32 maxdwnscale;
+ u32 maxupscale;
+ u32 smart_dma_priority;
+ u32 max_per_pipe_bw;
+ struct dpu_src_blk src_blk;
+ struct dpu_scaler_blk scaler_blk;
+ struct dpu_pp_blk csc_blk;
+ struct dpu_pp_blk hsic_blk;
+ struct dpu_pp_blk memcolor_blk;
+ struct dpu_pp_blk pcc_blk;
+ struct dpu_pp_blk igc_blk;
+
+ const struct dpu_format_extended *format_list;
+ const struct dpu_format_extended *virt_format_list;
+};
+
+/**
+ * struct dpu_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+ struct dpu_pp_blk gc;
+};
+
+struct dpu_pingpong_sub_blks {
+ struct dpu_pp_blk te;
+ struct dpu_pp_blk te2;
+ struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+ DPU_CLK_CTRL_NONE,
+ DPU_CLK_CTRL_VIG0,
+ DPU_CLK_CTRL_VIG1,
+ DPU_CLK_CTRL_VIG2,
+ DPU_CLK_CTRL_VIG3,
+ DPU_CLK_CTRL_VIG4,
+ DPU_CLK_CTRL_RGB0,
+ DPU_CLK_CTRL_RGB1,
+ DPU_CLK_CTRL_RGB2,
+ DPU_CLK_CTRL_RGB3,
+ DPU_CLK_CTRL_DMA0,
+ DPU_CLK_CTRL_DMA1,
+ DPU_CLK_CTRL_CURSOR0,
+ DPU_CLK_CTRL_CURSOR1,
+ DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+ DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off: register offset
+ * @bit_off: bit offset
+ */
+struct dpu_clk_ctrl_reg {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @highest_bank_bit: UBWC parameter
+ * @ubwc_static: ubwc static configuration
+ * @ubwc_swizzle: ubwc default swizzle setting
+ * @has_dest_scaler: indicates support of destination scaler
+ * @clk_ctrls clock control register definition
+ */
+struct dpu_mdp_cfg {
+ DPU_HW_BLK_INFO;
+ u32 highest_bank_bit;
+ u32 ubwc_static;
+ u32 ubwc_swizzle;
+ bool has_dest_scaler;
+ struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_ctl_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: SSPP sub-blocks information
+ * @xin_id: bus client identifier
+ * @clk_ctrl clock control identifier
+ * @type sspp type identifier
+ */
+struct dpu_sspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_sspp_sub_blks *sblk;
+ u32 xin_id;
+ enum dpu_clk_ctrl_type clk_ctrl;
+ u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: LM Sub-blocks information
+ * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds: ID of connected DS, DS_MAX if unsupported
+ * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_lm_sub_blks *sblk;
+ u32 pingpong;
+ u32 ds;
+ unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_ds_top_cfg - information of dest scaler top
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying features
+ * @version hw version of dest scaler
+ * @maxinputwidth maximum input line width
+ * @maxoutputwidth maximum output line width
+ * @maxupscale maximum upscale ratio
+ */
+struct dpu_ds_top_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 maxinputwidth;
+ u32 maxoutputwidth;
+ u32 maxupscale;
+};
+
+/**
+ * struct dpu_ds_cfg - information of dest scaler blocks
+ * @id enum identifying this block
+ * @base register offset wrt DS top offset
+ * @features bit mask identifying features
+ * @version hw version of the qseed block
+ * @top DS top information
+ */
+struct dpu_ds_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ const struct dpu_ds_top_cfg *top;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-blocks information
+ */
+struct dpu_pingpong_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intf_connect Bitmask of INTF IDs this CDM can connect to
+ */
+struct dpu_cdm_cfg {
+ DPU_HW_BLK_INFO;
+ unsigned long intf_connect;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ */
+struct dpu_intf_cfg {
+ DPU_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps pixel per seconds
+ * @ot_limit OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+ u64 pps;
+ u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count length of cfg
+ * @cfg pointer to array of configuration settings with
+ * ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+ u32 count;
+ struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl num of priority level
+ * @priority_lvl pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+ u32 npriority_lvl;
+ u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @ot_rd_limit default OT read limit
+ * @ot_wr_limit default OT write limit
+ * @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl dynamic OT write configuration table
+ * @qos_rt_tbl real-time QoS priority table
+ * @qos_nrt_tbl non-real-time QoS priority table
+ * @memtype_count number of defined memtypes
+ * @memtype array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+ DPU_HW_BLK_INFO;
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 xin_halt_timeout;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+ struct dpu_vbif_qos_tbl qos_rt_tbl;
+ struct dpu_vbif_qos_tbl qos_nrt_tbl;
+ u32 memtype_count;
+ u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @version version of lutdma hw block
+ * @trigger_sel_off offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 trigger_sel_off;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+ DPU_PERF_CDP_USAGE_RT,
+ DPU_PERF_CDP_USAGE_NRT,
+ DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+ bool rd_enable;
+ bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low low threshold of maximum bandwidth (kbps)
+ * @max_bw_high high threshold of maximum bandwidth (kbps)
+ * @min_core_ib minimum bandwidth for core (kbps)
+ * @min_core_ib minimum mnoc ib vote in kbps
+ * @min_llcc_ib minimum llcc ib vote in kbps
+ * @min_dram_ib minimum dram ib vote in kbps
+ * @core_ib_ff core instantaneous bandwidth fudge factor
+ * @core_clk_ff core clock fudge factor
+ * @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines undersized prefill in lines
+ * @xtra_prefill_lines extra prefill latency in lines
+ * @dest_scale_prefill_lines destination scaler latency in lines
+ * @macrotile_perfill_lines macrotile latency in lines
+ * @yuv_nv12_prefill_lines yuv_nv12 latency in lines
+ * @linear_prefill_lines linear latency in lines
+ * @downscaling_prefill_lines downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg cdp use case configurations
+ */
+struct dpu_perf_cfg {
+ u32 max_bw_low;
+ u32 max_bw_high;
+ u32 min_core_ib;
+ u32 min_llcc_ib;
+ u32 min_dram_ib;
+ const char *core_ib_ff;
+ const char *core_clk_ff;
+ const char *comp_ratio_rt;
+ const char *comp_ratio_nrt;
+ u32 undersized_prefill_lines;
+ u32 xtra_prefill_lines;
+ u32 dest_scale_prefill_lines;
+ u32 macrotile_prefill_lines;
+ u32 yuv_nv12_prefill_lines;
+ u32 linear_prefill_lines;
+ u32 downscaling_prefill_lines;
+ u32 amortizable_threshold;
+ u32 min_prefill_lines;
+ u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats Supported formats for dma pipe
+ * @cursor_formats Supported formats for cursor pipe
+ * @vig_formats Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+ u32 hwversion;
+
+ const struct dpu_caps *caps;
+
+ u32 mdp_count;
+ struct dpu_mdp_cfg *mdp;
+
+ u32 ctl_count;
+ struct dpu_ctl_cfg *ctl;
+
+ u32 sspp_count;
+ struct dpu_sspp_cfg *sspp;
+
+ u32 mixer_count;
+ struct dpu_lm_cfg *mixer;
+
+ u32 ds_count;
+ struct dpu_ds_cfg *ds;
+
+ u32 pingpong_count;
+ struct dpu_pingpong_cfg *pingpong;
+
+ u32 cdm_count;
+ struct dpu_cdm_cfg *cdm;
+
+ u32 intf_count;
+ struct dpu_intf_cfg *intf;
+
+ u32 vbif_count;
+ struct dpu_vbif_cfg *vbif;
+
+ u32 reg_dma_count;
+ struct dpu_reg_dma_cfg dma_cfg;
+
+ u32 ad_count;
+
+ /* Add additional block data structures here */
+
+ struct dpu_perf_cfg perf;
+ struct dpu_format_extended *dma_formats;
+ struct dpu_format_extended *cursor_formats;
+ struct dpu_format_extended *vig_formats;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+ u32 hw_rev;
+ void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DS(s) ((s)->ds)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev: caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+/**
+ * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
+ * @dpu_cfg: pointer returned from init function
+ */
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+
+/**
+ * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg: pointer to sspp cfg
+ */
+static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
+{
+ return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
+}
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
new file mode 100644
index 000000000000..3c9f028628ef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+
+static const struct dpu_format_extended plane_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended plane_formats_yuv[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV21, 0},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_NV61, 0},
+ {DRM_FORMAT_VYUY, 0},
+ {DRM_FORMAT_UYVY, 0},
+ {DRM_FORMAT_YUYV, 0},
+ {DRM_FORMAT_YVYU, 0},
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_YVU420, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended cursor_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended wb2_formats[] = {
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_YUYV, 0},
+
+ {0, 0},
+};
+
+static const struct dpu_format_extended rgb_10bit_formats[] = {
+ {DRM_FORMAT_BGRA1010102, 0},
+ {DRM_FORMAT_BGRX1010102, 0},
+ {DRM_FORMAT_RGBA1010102, 0},
+ {DRM_FORMAT_RGBX1010102, 0},
+ {DRM_FORMAT_ABGR2101010, 0},
+ {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XBGR2101010, 0},
+ {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB2101010, 0},
+ {DRM_FORMAT_XRGB2101010, 0},
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 000000000000..554874ba0c3b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct dpu_csc_cfg rgb2yuv_cfg = {
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->cdm_count; i++) {
+ if (cdm == m->cdm[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->cdm[i].base;
+ b->length = m->cdm[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CDM;
+ return &m->cdm[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
+ struct dpu_csc_cfg *data)
+{
+ dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+ return 0;
+}
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode = 0;
+ u32 out_size = 0;
+
+ if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+ opmode &= ~BIT(7);
+ else
+ opmode |= BIT(7);
+
+ /* ENABLE DWNS_H bit */
+ opmode |= BIT(1);
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_H field */
+ opmode &= ~(0x18);
+ /* CLEAR DWNS_H bit */
+ opmode &= ~BIT(1);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_H field (pixel drop is 0) */
+ opmode &= ~(0x18);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_H field (Average is 0x1) */
+ opmode &= ~(0x18);
+ opmode |= (0x1 << 0x3);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_H field (Average is 0x2) */
+ opmode &= ~(0x18);
+ opmode |= (0x2 << 0x3);
+ /* Co-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_H field (Average is 0x3) */
+ opmode &= ~(0x18);
+ opmode |= (0x3 << 0x3);
+
+ /* Off-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ pr_err("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* ENABLE DWNS_V bit */
+ opmode |= BIT(2);
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_V field */
+ opmode &= ~(0x60);
+ /* CLEAR DWNS_V bit */
+ opmode &= ~BIT(2);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_V field (pixel drop is 0) */
+ opmode &= ~(0x60);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_V field (Average is 0x1) */
+ opmode &= ~(0x60);
+ opmode |= (0x1 << 0x5);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_V field (Average is 0x2) */
+ opmode &= ~(0x60);
+ opmode |= (0x2 << 0x5);
+ /* Co-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_V field (Average is 0x3) */
+ opmode &= ~(0x60);
+ opmode |= (0x3 << 0x5);
+
+ /* Off-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= BIT(0); /* EN CDWN module */
+ else
+ opmode &= ~BIT(0);
+
+ out_size = (cfg->output_width & 0xFFFF) |
+ ((cfg->output_height & 0xFFFF) << 16);
+ DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+ ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cdm)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt = cdm->output_fmt;
+ struct cdm_output_cfg cdm_cfg = { 0 };
+ u32 opmode = 0;
+ u32 csc = 0;
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return -EINVAL;
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = BIT(0);
+ opmode |= (fmt->chroma_sample << 1);
+ cdm_cfg.intf_en = true;
+ }
+
+ csc |= BIT(2);
+ csc &= ~BIT(1);
+ csc |= BIT(0);
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+ DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
+{
+ struct cdm_output_cfg cdm_cfg = { 0 };
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
+ ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
+ ops->enable = dpu_hw_cdm_enable;
+ ops->disable = dpu_hw_cdm_disable;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp)
+{
+ struct dpu_hw_cdm *c;
+ struct dpu_cdm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _cdm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_cdm_ops(&c->ops, c->caps->features);
+ c->hw_mdp = hw_mdp;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ /*
+ * Perform any default initialization for the chroma down module
+ * @setup default csc coefficients
+ */
+ dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
+{
+ if (cdm)
+ dpu_hw_blk_destroy(&cdm->base);
+ kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 000000000000..5cceb1ecb8e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_cdm;
+
+struct dpu_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ const struct dpu_format *output_fmt;
+ u32 output_type;
+ int flags;
+};
+
+enum dpu_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+enum dpu_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+enum dpu_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @setup_csc: Programs the csc matrix
+ * @setup_cdwn: Sets up the chroma down sub module
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @disable: Puts the cdm in bypass mode
+ */
+struct dpu_hw_cdm_ops {
+ /**
+ * Programs the CSC matrix for conversion from RGB space to YUV space,
+ * it is optional to call this function as this matrix is automatically
+ * set during initialization, user should call this if it wants
+ * to program a different matrix than default matrix.
+ * @cdm: Pointer to the chroma down context structure
+ * @data Pointer to CSC configuration data
+ * return: 0 if success; error code otherwise
+ */
+ int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
+ struct dpu_csc_cfg *data);
+
+ /**
+ * Programs the Chroma downsample part.
+ * @cdm Pointer to chroma down context
+ */
+ int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Disable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ void (*disable)(struct dpu_hw_cdm *cdm);
+};
+
+struct dpu_hw_cdm {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct dpu_cdm_cfg *caps;
+ enum dpu_cdm idx;
+
+ /* mdp top hw driver */
+ struct dpu_hw_mdp *hw_mdp;
+
+ /* ops */
+ struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx: cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp);
+
+/**
+ * dpu_hw_cdm_destroy - destroys CDM driver context
+ * @cdm: pointer to CDM driver context
+ */
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 000000000000..06be7cf7ce50
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,540 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT3(lm) \
+ (0xA0 + (((lm) - LM_0) * 0x004))
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_PREPARE 0x0d0
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+
+#define CTL_MIXER_BORDER_OUT BIT(24)
+#define CTL_FLUSH_MASK_CTL BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US 2000
+
+static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->ctl_count; i++) {
+ if (ctl == m->ctl[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->ctl[i].base;
+ b->length = m->ctl[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CTL;
+ return &m->ctl[i];
+ }
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+ enum dpu_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ if (!ctx)
+ return 0x0;
+
+ return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp sspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (sspp) {
+ case SSPP_VIG0:
+ flushbits = BIT(0);
+ break;
+ case SSPP_VIG1:
+ flushbits = BIT(1);
+ break;
+ case SSPP_VIG2:
+ flushbits = BIT(2);
+ break;
+ case SSPP_VIG3:
+ flushbits = BIT(18);
+ break;
+ case SSPP_RGB0:
+ flushbits = BIT(3);
+ break;
+ case SSPP_RGB1:
+ flushbits = BIT(4);
+ break;
+ case SSPP_RGB2:
+ flushbits = BIT(5);
+ break;
+ case SSPP_RGB3:
+ flushbits = BIT(19);
+ break;
+ case SSPP_DMA0:
+ flushbits = BIT(11);
+ break;
+ case SSPP_DMA1:
+ flushbits = BIT(12);
+ break;
+ case SSPP_DMA2:
+ flushbits = BIT(24);
+ break;
+ case SSPP_DMA3:
+ flushbits = BIT(25);
+ break;
+ case SSPP_CURSOR0:
+ flushbits = BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ flushbits = BIT(23);
+ break;
+ default:
+ break;
+ }
+
+ return flushbits;
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm)
+{
+ uint32_t flushbits = 0;
+
+ switch (lm) {
+ case LM_0:
+ flushbits = BIT(6);
+ break;
+ case LM_1:
+ flushbits = BIT(7);
+ break;
+ case LM_2:
+ flushbits = BIT(8);
+ break;
+ case LM_3:
+ flushbits = BIT(9);
+ break;
+ case LM_4:
+ flushbits = BIT(10);
+ break;
+ case LM_5:
+ flushbits = BIT(20);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ flushbits |= CTL_FLUSH_MASK_CTL;
+
+ return flushbits;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(31);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(30);
+ break;
+ case INTF_2:
+ *flushbits |= BIT(29);
+ break;
+ case INTF_3:
+ *flushbits |= BIT(28);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_cdm cdm)
+{
+ switch (cdm) {
+ case CDM_0:
+ *flushbits |= BIT(26);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ ktime_t timeout;
+ u32 status;
+
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ /*
+ * it takes around 30us to have mdp finish resetting its ctl path
+ * poll every 50us so that reset should be completed at 1st poll
+ */
+ do {
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x1;
+ if (status)
+ usleep_range(20, 50);
+ } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+ return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+ DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 status;
+
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x01;
+ if (!status)
+ return 0;
+
+ pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+ pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+ }
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+ u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+ int i, j;
+ u8 stages;
+ int pipes_per_stage;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (stages < 0)
+ return;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+ if (!stage_cfg)
+ goto exit;
+
+ for (i = 0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + 1) & 0x7;
+ ext = i >= 7;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ enum dpu_sspp_multirect_index rect_index =
+ stage_cfg->multirect_index[i][j];
+
+ switch (stage_cfg->stage[i][j]) {
+ case SSPP_VIG0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+ } else {
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ }
+ break;
+ case SSPP_VIG1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+ } else {
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ }
+ break;
+ case SSPP_VIG2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ }
+ break;
+ case SSPP_VIG3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ }
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ break;
+ case SSPP_DMA0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ }
+ break;
+ case SSPP_DMA1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ }
+ break;
+ case SSPP_DMA2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 0;
+ }
+ break;
+ case SSPP_DMA3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 4;
+ }
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= ((i + 1) & 0xF) << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= ((i + 1) & 0xF) << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+exit:
+ DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+ }
+
+ switch (cfg->intf_mode_sel) {
+ case DPU_CTL_MODE_SEL_VID:
+ intf_cfg &= ~BIT(17);
+ intf_cfg &= ~(0x3 << 15);
+ break;
+ case DPU_CTL_MODE_SEL_CMD:
+ intf_cfg |= BIT(17);
+ intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+ break;
+ default:
+ pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+ return;
+ }
+
+ DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+ ops->trigger_start = dpu_hw_ctl_trigger_start;
+ ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->reset = dpu_hw_ctl_reset_control;
+ ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
+ ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+ ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_ctl *c;
+ struct dpu_ctl_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _ctl_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_ctl %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = idx;
+ c->mixer_count = m->mixer_count;
+ c->mixer_hw_caps = m->mixer;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 000000000000..c66a71f8b839
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_blk.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID: Video mode interface
+ * DPU_CTL_MODE_SEL_CMD: Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+ DPU_CTL_MODE_SEL_VID = 0,
+ DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+ enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+ enum dpu_sspp_multirect_index multirect_index
+ [DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf : Interface id
+ * @mode_3d: 3d mux configuration
+ * @intf_mode_sel: Interface mode, cmd / vid
+ * @stream_sel: Stream selection for multi-stream interfaces
+ */
+struct dpu_hw_intf_cfg {
+ enum dpu_intf intf;
+ enum dpu_3d_blend_mode mode_3d;
+ enum dpu_ctl_mode_sel intf_mode_sel;
+ int stream_sel;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * kickoff prepare is in progress hw operation for sw
+ * controlled interfaces: DSI cmd mode and WB interface
+ * are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Query the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * Write the value of the pending_flush_mask to hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Read the value of the flush register
+ * @ctx : ctl path ctx pointer
+ * @Return: value of the ctl flush register.
+ */
+ u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
+ int (*reset)(struct dpu_hw_ctl *c);
+
+ /*
+ * wait_reset_status - checks ctl reset status
+ * @ctx : ctl path ctx pointer
+ *
+ * This function checks the ctl reset status bit.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete.
+ * Returns: 0 on success or -error if reset incomplete within interval
+ */
+ int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+ uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_intf blk);
+
+ int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_cdm blk);
+
+ /**
+ * Set all blend stages to disabled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : ctl path ctx pointer
+ * @lm : layer mixer enumeration
+ * @cfg : blend stage configuration
+ */
+ void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct dpu_ctl_cfg *caps;
+ int mixer_count;
+ const struct dpu_lm_cfg *mixer_hw_caps;
+ u32 pending_flush_mask;
+
+ /* ops */
+ struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx: ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 000000000000..c0b7f0049365
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,1183 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF 0x0
+#define MDP_INTF_0_OFF 0x6A000
+#define MDP_INTF_1_OFF 0x6A800
+#define MDP_INTF_2_OFF 0x6B000
+#define MDP_INTF_3_OFF 0x6B800
+#define MDP_INTF_4_OFF 0x6C000
+#define MDP_AD4_0_OFF 0x7C000
+#define MDP_AD4_1_OFF 0x7D000
+#define MDP_AD4_INTR_EN_OFF 0x41c
+#define MDP_AD4_INTR_CLEAR_OFF 0x424
+#define MDP_AD4_INTR_STATUS_OFF 0x420
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define DPU_INTR_WB_0_DONE BIT(0)
+#define DPU_INTR_WB_1_DONE BIT(1)
+#define DPU_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
+#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
+#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
+#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
+#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_DONE BIT(8)
+#define DPU_INTR_PING_PONG_1_DONE BIT(9)
+#define DPU_INTR_PING_PONG_2_DONE BIT(10)
+#define DPU_INTR_PING_PONG_3_DONE BIT(11)
+#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
+#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
+#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
+#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_0_VSYNC BIT(25)
+#define DPU_INTR_INTF_1_VSYNC BIT(27)
+#define DPU_INTR_INTF_2_VSYNC BIT(29)
+#define DPU_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Ctl start interrupt status bit definitions
+ */
+#define DPU_INTR_CTL_0_START BIT(9)
+#define DPU_INTR_CTL_1_START BIT(10)
+#define DPU_INTR_CTL_2_START BIT(11)
+#define DPU_INTR_CTL_3_START BIT(12)
+#define DPU_INTR_CTL_4_START BIT(13)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
+#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
+#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
+#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
+#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
+#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
+#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
+#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
+#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define DPU_INTR_PROG_LINE BIT(8)
+
+/**
+ * AD4 interrupt status bit definitions
+ */
+#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
+#define DPU_INTR_DARKENH_UPDATED BIT(3)
+#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
+#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct dpu_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/**
+ * struct dpu_irq_type - maps each irq with i/f
+ * @intr_type: type of interrupt listed in dpu_intr_type
+ * @instance_idx: instance index of the associated HW block in DPU
+ * @irq_mask: corresponding bit in the interrupt status reg
+ * @reg_idx: which reg set to use
+ */
+struct dpu_irq_type {
+ u32 intr_type;
+ u32 instance_idx;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+/**
+ * List of DPU interrupt registers
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+ {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+ },
+ {
+ MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+ }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * a matching interface type and instance index.
+ */
+static const struct dpu_irq_type dpu_irq_map[] = {
+ /* BEGIN MAP_RANGE: 0-31, INTR */
+ /* irq_idx: 0-3 */
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
+ /* irq_idx: 4-7 */
+ { DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
+ /* irq_idx: 8-11 */
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_DONE, 0},
+ /* irq_idx: 12-15 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_RD_PTR, 0},
+ /* irq_idx: 16-19 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_WR_PTR, 0},
+ /* irq_idx: 20-23 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ /* irq_idx: 24-27 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
+ /* irq_idx: 28-31 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
+
+ /* BEGIN MAP_RANGE: 32-64, INTR2 */
+ /* irq_idx: 32-35 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 36-39 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_WR_PTR, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 40 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_RD_PTR, 1},
+ /* irq_idx: 41-45 */
+ { DPU_IRQ_TYPE_CTL_START, CTL_0,
+ DPU_INTR_CTL_0_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_1,
+ DPU_INTR_CTL_1_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_2,
+ DPU_INTR_CTL_2_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_3,
+ DPU_INTR_CTL_3_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_4,
+ DPU_INTR_CTL_4_START, 1},
+ /* irq_idx: 46-47 */
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
+ /* irq_idx: 48-51 */
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+ /* irq_idx: 52-55 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 56-59 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
+ /* irq_idx: 60-63 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+ /* BEGIN MAP_RANGE: 64-95 HIST */
+ /* irq_idx: 64-67 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+ DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+ DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 72-75 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+ DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+ DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 76-79 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+ DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 80-83 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+ DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 84-87 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+ DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+ DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 88-91 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 92-95 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+ /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+ /* irq_idx: 96-99 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+ DPU_INTR_VIDEO_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
+ /* irq_idx: 100-103 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
+ /* irq_idx: 104-107 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 108-111 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 112-115 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 116-119 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 120-123 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 124-127 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+ /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+ /* irq_idx: 128-131 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+ DPU_INTR_VIDEO_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
+ /* irq_idx: 132-135 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
+ /* irq_idx: 136-139 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 140-143 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 144-147 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 148-151 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 152-155 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 156-159 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+ /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+ /* irq_idx: 160-163 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+ DPU_INTR_VIDEO_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
+ /* irq_idx: 164-167 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
+ /* irq_idx: 168-171 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 172-175 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 176-179 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 180-183 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 184-187 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 188-191 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+ /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+ /* irq_idx: 192-195 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+ DPU_INTR_VIDEO_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
+ /* irq_idx: 196-199 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
+ /* irq_idx: 200-203 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 204-207 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 208-211 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 212-215 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 216-219 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 220-223 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+ /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+ /* irq_idx: 224-227 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+ DPU_INTR_VIDEO_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
+ /* irq_idx: 228-231 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
+ /* irq_idx: 232-235 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 236-239 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 240-243 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 244-247 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 248-251 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 252-255 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+ /* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
+ /* irq_idx: 256-259 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 260-263 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 264-267 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 268-271 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 272-275 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 276-279 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 280-283 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 284-287 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+
+ /* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
+ /* irq_idx: 288-291 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 292-295 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 296-299 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 300-303 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 304-307 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 308-311 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 312-315 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 315-319 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+};
+
+static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
+ u32 instance_idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
+ if (intr_type == dpu_irq_map[i].intr_type &&
+ instance_idx == dpu_irq_map[i].instance_idx)
+ return i;
+ }
+
+ pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+ intr_type, instance_idx);
+ return -EINVAL;
+}
+
+static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
+ uint32_t mask)
+{
+ if (!intr)
+ return;
+
+ DPU_REG_WRITE(&intr->hw, reg_off, mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *, int),
+ void *arg)
+{
+ int reg_idx;
+ int irq_idx;
+ int start_idx;
+ int end_idx;
+ u32 irq_status;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ /*
+ * The dispatcher will save the IRQ status before calling here.
+ * Now need to go through each IRQ status and find matching
+ * irq lookup index.
+ */
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+ irq_status = intr->save_irq_status[reg_idx];
+
+ /*
+ * Each Interrupt register has a range of 32 indexes, and
+ * that is static for dpu_irq_map.
+ */
+ start_idx = reg_idx * 32;
+ end_idx = start_idx + 32;
+
+ if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
+ end_idx > ARRAY_SIZE(dpu_irq_map))
+ continue;
+
+ /*
+ * Search through matching intr status from irq map.
+ * start_idx and end_idx defined the search range in
+ * the dpu_irq_map.
+ */
+ for (irq_idx = start_idx;
+ (irq_idx < end_idx) && irq_status;
+ irq_idx++)
+ if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
+ (dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+ else
+ intr->ops.clear_intr_status_nolock(
+ intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
+ }
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & irq->irq_mask) {
+ dbgstr = "DPU IRQ already set:";
+ } else {
+ dbgstr = "DPU IRQ enabled:";
+
+ cache_irq_mask |= irq->irq_mask;
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* Enabling interrupts with the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & irq->irq_mask) == 0) {
+ dbgstr = "DPU IRQ is already cleared:";
+ } else {
+ dbgstr = "DPU IRQ mask disable:";
+
+ cache_irq_mask &= ~irq->irq_mask;
+ /* Disable interrupts based on the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return 0;
+}
+
+static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
+ uint32_t *mask)
+{
+ if (!intr || !mask)
+ return -EINVAL;
+
+ *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+ | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
+ return 0;
+}
+
+static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
+{
+ int i;
+ u32 enable_mask;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ /* Read interrupt status */
+ intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[i].status_off);
+
+ /* Read enable mask */
+ enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
+
+ /* and clear the interrupt */
+ if (intr->save_irq_status[i])
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
+ intr->save_irq_status[i]);
+
+ /* Finally update IRQ status based on enable mask */
+ intr->save_irq_status[i] &= enable_mask;
+ }
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ int reg_idx;
+
+ if (!intr)
+ return;
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ dpu_irq_map[irq_idx].irq_mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ intr_status = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[reg_idx].status_off) &
+ dpu_irq_map[irq_idx].irq_mask;
+ if (intr_status && clear)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ intr_status);
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return intr_status;
+}
+
+static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
+{
+ ops->set_mask = dpu_hw_intr_set_mask;
+ ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
+ ops->enable_irq = dpu_hw_intr_enable_irq;
+ ops->disable_irq = dpu_hw_intr_disable_irq;
+ ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
+ ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
+ ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
+ ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
+ ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
+ ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
+ ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
+ ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+}
+
+static void __intr_offset(struct dpu_mdss_cfg *m,
+ void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+ hw->base_off = addr;
+ hw->blk_off = m->mdp[0].base;
+ hw->hwversion = m->hwversion;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intr *intr;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ __intr_offset(m, addr, &intr->hw);
+ __setup_intr_ops(&intr->ops);
+
+ intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+
+ intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->cache_irq_mask == NULL) {
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->save_irq_status == NULL) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&intr->irq_lock);
+
+ return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+ if (intr) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr->save_irq_status);
+ kfree(intr);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 000000000000..61e4cba36562
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP BIT(0)
+#define IRQ_SOURCE_DSI0 BIT(4)
+#define IRQ_SOURCE_DSI1 BIT(5)
+#define IRQ_SOURCE_HDMI BIT(8)
+#define IRQ_SOURCE_EDP BIT(12)
+#define IRQ_SOURCE_MHL BIT(16)
+
+/**
+ * dpu_intr_type - HW Interrupt Type
+ * @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
+ * @DPU_IRQ_TYPE_WB_WFD_COMP: WB WFD done
+ * @DPU_IRQ_TYPE_PING_PONG_COMP: PingPong done
+ * @DPU_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
+ * @DPU_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
+ * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
+ * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
+ * @DPU_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
+ * @DPU_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
+ * @DPU_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
+ * @DPU_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
+ * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
+ * @DPU_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
+ * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
+ * @DPU_IRQ_TYPE_WD_TIMER: Watchdog timer
+ * @DPU_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @DPU_IRQ_TYPE_AD4_BL_DONE: AD4 backlight
+ * @DPU_IRQ_TYPE_CTL_START: Control start
+ * @DPU_IRQ_TYPE_RESERVED: Reserved for expansion
+ */
+enum dpu_intr_type {
+ DPU_IRQ_TYPE_WB_ROT_COMP,
+ DPU_IRQ_TYPE_WB_WFD_COMP,
+ DPU_IRQ_TYPE_PING_PONG_COMP,
+ DPU_IRQ_TYPE_PING_PONG_RD_PTR,
+ DPU_IRQ_TYPE_PING_PONG_WR_PTR,
+ DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
+ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+ DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
+ DPU_IRQ_TYPE_INTF_UNDER_RUN,
+ DPU_IRQ_TYPE_INTF_VSYNC,
+ DPU_IRQ_TYPE_CWB_OVERFLOW,
+ DPU_IRQ_TYPE_HIST_VIG_DONE,
+ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
+ DPU_IRQ_TYPE_HIST_DSPP_DONE,
+ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+ DPU_IRQ_TYPE_WD_TIMER,
+ DPU_IRQ_TYPE_SFI_VIDEO_IN,
+ DPU_IRQ_TYPE_SFI_VIDEO_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_0_IN,
+ DPU_IRQ_TYPE_SFI_CMD_0_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_1_IN,
+ DPU_IRQ_TYPE_SFI_CMD_1_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_2_IN,
+ DPU_IRQ_TYPE_SFI_CMD_2_OUT,
+ DPU_IRQ_TYPE_PROG_LINE,
+ DPU_IRQ_TYPE_AD4_BL_DONE,
+ DPU_IRQ_TYPE_CTL_START,
+ DPU_IRQ_TYPE_RESERVED,
+};
+
+struct dpu_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct dpu_hw_intr_ops {
+ /**
+ * set_mask - Programs the given interrupt register with the
+ * given interrupt mask. Register value will get overwritten.
+ * @intr: HW interrupt handle
+ * @reg_off: MDSS HW register offset
+ * @irqmask: IRQ mask value
+ */
+ void (*set_mask)(
+ struct dpu_hw_intr *intr,
+ uint32_t reg,
+ uint32_t irqmask);
+
+ /**
+ * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+ * Used for all irq related ops
+ * @intr_type: Interrupt type defined in dpu_intr_type
+ * @instance_idx: HW interrupt block instance
+ * @return: irq_idx or -EINVAL for lookup fail
+ */
+ int (*irq_idx_lookup)(
+ enum dpu_intr_type intr_type,
+ u32 instance_idx);
+
+ /**
+ * enable_irq - Enable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*enable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * disable_irq - Disable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+ * any asserted IRQs). Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*clear_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * disable_all_irqs - Disables all the interrupts. Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * dispatch_irqs - IRQ dispatcher will call the given callback
+ * function when a matching interrupt status bit is
+ * found in the irq mapping table.
+ * @intr: HW interrupt handle
+ * @cbfunc: Callback function pointer
+ * @arg: Argument to pass back during callback
+ */
+ void (*dispatch_irqs)(
+ struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *arg, int irq_idx),
+ void *arg);
+
+ /**
+ * get_interrupt_statuses - Gets and store value from all interrupt
+ * status registers that are currently fired.
+ * @intr: HW interrupt handle
+ */
+ void (*get_interrupt_statuses)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * clear_interrupt_status - Clears HW interrupt status based on given
+ * lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_intr_status_nolock() - clears the HW interrupts without lock
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_intr_status_nolock)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * get_interrupt_status - Gets HW interrupt status, and clear if set,
+ * based on given lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
+ * get_valid_interrupts - Gets a mask of all valid interrupt sources
+ * within DPU. These are actually status bits
+ * within interrupt registers that specify the
+ * source of the interrupt in IRQs. For example,
+ * valid interrupt sources can be MDP, DSI,
+ * HDMI etc.
+ * @intr: HW interrupt handle
+ * @mask: Returning the interrupt source MASK
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_valid_interrupts)(
+ struct dpu_hw_intr *intr,
+ uint32_t *mask);
+};
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock: spinlock for accessing IRQ resources
+ */
+struct dpu_hw_intr {
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_hw_intr_ops ops;
+ u32 *cache_irq_mask;
+ u32 *save_irq_status;
+ u32 irq_idx_tbl_size;
+ spinlock_t irq_lock;
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 000000000000..d280df5613c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+#define INTF_PROG_ROT_START 0x174
+
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
+static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->intf_count; i++) {
+ if ((intf == m->intf[i].id) &&
+ (m->intf[i].type != INTF_NONE)) {
+ b->base_off = addr;
+ b->blk_off = m->intf[i].base;
+ b->length = m->intf[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_INTF;
+ return &m->intf[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+ u32 panel_format;
+ u32 intf_cfg;
+
+ /* read interface_cfg */
+ intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+ }
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) {
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ den_polarity = 0;
+ if (ctx->cap->type == INTF_HDMI) {
+ hsync_polarity = p->yres >= 720 ? 0 : 1;
+ vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+ DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+ DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+ DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+ DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+ struct dpu_hw_intf *intf,
+ u8 enable)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ /* Note: Display interface select is handled in top block hw layer */
+ DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+ struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_get_status(
+ struct dpu_hw_intf *intf,
+ struct intf_status *s)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ if (s->is_en) {
+ s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!intf)
+ return 0;
+
+ c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
+ ops->get_status = dpu_hw_intf_get_status;
+ ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+ ops->setup_misr = dpu_hw_intf_setup_misr;
+ ops->collect_misr = dpu_hw_intf_collect_misr;
+ ops->get_line_count = dpu_hw_intf_get_line_count;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intf *c;
+ struct dpu_intf_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _intf_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_intf %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ c->mdss = m;
+ _setup_intf_ops(&c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+ if (intf)
+ dpu_hw_blk_destroy(&intf->base);
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 000000000000..a79d735da68d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
+ */
+struct dpu_hw_intf_ops {
+ void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt);
+
+ void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct dpu_hw_intf *intf,
+ u8 enable);
+
+ void (*get_status)(struct dpu_hw_intf *intf,
+ struct intf_status *status);
+
+ void (*setup_misr)(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count);
+
+ u32 (*collect_misr)(struct dpu_hw_intf *intf);
+
+ u32 (*get_line_count)(struct dpu_hw_intf *intf);
+};
+
+struct dpu_hw_intf {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* intf */
+ enum dpu_intf idx;
+ const struct dpu_intf_cfg *cap;
+ const struct dpu_mdss_cfg *mdss;
+
+ /* ops */
+ struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx: interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf: Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 000000000000..4ab72b0f07a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_CONST_ALPHA 0x04
+#define LM_FG_COLOR_FILL_COLOR_0 0x08
+#define LM_FG_COLOR_FILL_COLOR_1 0x0C
+#define LM_FG_COLOR_FILL_SIZE 0x10
+#define LM_FG_COLOR_FILL_XY 0x14
+
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+
+static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mixer_count; i++) {
+ if (mixer == m->mixer[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mixer[i].base;
+ b->length = m->mixer[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_LM;
+ return &m->mixer[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+ const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+ int rc;
+
+ if (stage == DPU_STAGE_BASE)
+ rc = -EINVAL;
+ else if (stage <= sblk->maxblendstages)
+ rc = sblk->blendstage_base[stage - DPU_STAGE_0];
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *mixer)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 op_mode;
+
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ if (mixer->right_mixer)
+ op_mode |= BIT(31);
+ else
+ op_mode &= ~BIT(31);
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
+ void *cfg)
+{
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
+static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+ struct dpu_hw_lm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_mixer_out = dpu_hw_lm_setup_out;
+ if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+ else
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+ ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+ ops->setup_border_color = dpu_hw_lm_setup_border_color;
+ ops->setup_gc = dpu_hw_lm_gc;
+ ops->setup_misr = dpu_hw_lm_setup_misr;
+ ops->collect_misr = dpu_hw_lm_collect_misr;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mixer *c;
+ struct dpu_lm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _lm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_mixer_ops(m, &c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+ if (lm)
+ dpu_hw_blk_destroy(&lm->base);
+ kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 000000000000..e29e5dab31bf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct dpu_hw_color3_cfg {
+ u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en);
+ /**
+ * setup_gc : enable/disable gamma correction feature
+ */
+ void (*setup_gc)(struct dpu_hw_mixer *mixer,
+ void *cfg);
+
+ /* setup_misr: enables/disables MISR in HW register */
+ void (*setup_misr)(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count);
+
+ /* collect_misr: reads and stores MISR data from HW register */
+ u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
+};
+
+struct dpu_hw_mixer {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* lm */
+ enum dpu_lm idx;
+ const struct dpu_lm_cfg *cap;
+ const struct dpu_mdp_cfg *mdp;
+ const struct dpu_ctl_cfg *ctl;
+
+ /* ops */
+ struct dpu_hw_lm_ops ops;
+
+ /* store mixer info specific to display */
+ struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx: mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm: Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 000000000000..35e6bf930924
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,465 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME "dpu"
+
+#define DPU_NONE 0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE 9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE 6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE 3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES 4
+#endif
+
+#define PIPES_PER_STAGE 2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES 3
+#endif
+
+enum dpu_format_flags {
+ DPU_FORMAT_FLAG_YUV_BIT,
+ DPU_FORMAT_FLAG_DX_BIT,
+ DPU_FORMAT_FLAG_COMPRESSED_BIT,
+ DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X) \
+ (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X) \
+ (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA (1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA (1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN (1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA (1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA (1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN (1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO 0
+#define DPU_VSYNC1_SOURCE_GPIO 1
+#define DPU_VSYNC2_SOURCE_GPIO 2
+#define DPU_VSYNC_SOURCE_INTF_0 3
+#define DPU_VSYNC_SOURCE_INTF_1 4
+#define DPU_VSYNC_SOURCE_INTF_2 5
+#define DPU_VSYNC_SOURCE_INTF_3 6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4 11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3 12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2 13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1 14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0 15
+
+enum dpu_hw_blk_type {
+ DPU_HW_BLK_TOP = 0,
+ DPU_HW_BLK_SSPP,
+ DPU_HW_BLK_LM,
+ DPU_HW_BLK_CTL,
+ DPU_HW_BLK_CDM,
+ DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_INTF,
+ DPU_HW_BLK_WB,
+ DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+ MDP_TOP = 0x1,
+ MDP_MAX,
+};
+
+enum dpu_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum dpu_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum dpu_stage {
+ DPU_STAGE_BASE = 0,
+ DPU_STAGE_0,
+ DPU_STAGE_1,
+ DPU_STAGE_2,
+ DPU_STAGE_3,
+ DPU_STAGE_4,
+ DPU_STAGE_5,
+ DPU_STAGE_6,
+ DPU_STAGE_7,
+ DPU_STAGE_8,
+ DPU_STAGE_9,
+ DPU_STAGE_10,
+ DPU_STAGE_MAX
+};
+enum dpu_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum dpu_ds {
+ DS_TOP,
+ DS_0,
+ DS_1,
+ DS_MAX
+};
+
+enum dpu_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_MAX
+};
+
+enum dpu_cdm {
+ CDM_0 = 1,
+ CDM_1,
+ CDM_MAX
+};
+
+enum dpu_pingpong {
+ PINGPONG_0 = 1,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum dpu_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_MAX
+};
+
+enum dpu_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ INTF_EDP = 0x9,
+ INTF_DP = 0xa,
+ INTF_TYPE_MAX,
+
+ /* virtual interfaces */
+ INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum dpu_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum dpu_ad {
+ AD_0 = 0x1,
+ AD_1,
+ AD_MAX
+};
+
+enum dpu_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum dpu_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+ VBIF_0,
+ VBIF_1,
+ VBIF_MAX,
+ VBIF_RT = VBIF_0,
+ VBIF_NRT = VBIF_1
+};
+
+enum dpu_iommu_domain {
+ DPU_IOMMU_DOMAIN_UNSECURE,
+ DPU_IOMMU_DOMAIN_SECURE,
+ DPU_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED : Color components in single plane
+ * @DPU_PLANE_PLANAR : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+ DPU_PLANE_INTERLEAVED,
+ DPU_PLANE_PLANAR,
+ DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB : No chroma subsampling
+ * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420 : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+ DPU_CHROMA_RGB,
+ DPU_CHROMA_H2V1,
+ DPU_CHROMA_H1V2,
+ DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR : fetch is line by line
+ * @DPU_FETCH_TILE : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC : fetch and decompress data
+ */
+enum dpu_fetch_type {
+ DPU_FETCH_LINEAR,
+ DPU_FETCH_TILE,
+ DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+ COLOR_4BIT = 0,
+ COLOR_5BIT = 1, /* No 5-bit Alpha */
+ COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+ COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum dpu_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+ struct msm_format base;
+ enum dpu_plane_type fetch_planes;
+ u8 element[DPU_MAX_PLANES];
+ u8 bits[DPU_MAX_PLANES];
+ enum dpu_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb;
+ u8 unpack_tight;
+ u8 unpack_count;
+ u8 bpp;
+ u8 alpha_enable;
+ u8 num_planes;
+ enum dpu_fetch_type fetch_mode;
+ DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+ u16 tile_width;
+ u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+ const struct dpu_format *format;
+ uint32_t num_planes;
+ uint32_t width;
+ uint32_t height;
+ uint32_t total_size;
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ uint32_t plane_size[DPU_MAX_PLANES];
+ uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+ /* matrix coefficients in S15.16 format */
+ uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE (1 << 0)
+#define DPU_DBG_MASK_CDM (1 << 1)
+#define DPU_DBG_MASK_INTF (1 << 2)
+#define DPU_DBG_MASK_LM (1 << 3)
+#define DPU_DBG_MASK_CTL (1 << 4)
+#define DPU_DBG_MASK_PINGPONG (1 << 5)
+#define DPU_DBG_MASK_SSPP (1 << 6)
+#define DPU_DBG_MASK_WB (1 << 7)
+#define DPU_DBG_MASK_TOP (1 << 8)
+#define DPU_DBG_MASK_VBIF (1 << 9)
+#define DPU_DBG_MASK_ROT (1 << 10)
+
+#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 000000000000..cc3a623903f4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,250 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+
+static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->pingpong_count; i++) {
+ if (pp == m->pingpong[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->pingpong[i].base;
+ b->length = m->pingpong[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_PINGPONG;
+ return &m->pingpong[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *te)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int cfg;
+
+ if (!pp || !te)
+ return -EINVAL;
+ c = &pp->hw;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+ DPU_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ return 0;
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+ u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+ int rc;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+ val, (val & 0xffff) >= 1, 10, timeout_us);
+
+ return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!pp)
+ return -EINVAL;
+ c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+ return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+ bool enable_external_te)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 cfg;
+ int orig;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+ orig = (bool)(cfg & BIT(20));
+ if (enable_external_te)
+ cfg |= BIT(20);
+ else
+ cfg &= ~BIT(20);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+ return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+
+ if (!pp || !info)
+ return -EINVAL;
+ c = &pp->hw;
+
+ val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+ info->rd_ptr_init_val = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+ info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+ info->rd_ptr_line_count = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_LINE_COUNT);
+ info->wr_ptr_line_count = val & 0xffff;
+
+ return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 height, init;
+ u32 line = 0xFFFF;
+
+ if (!pp)
+ return 0;
+ c = &pp->hw;
+
+ init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+ height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+ if (height < init)
+ goto line_count_exit;
+
+ line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+ if (line < init)
+ line += (0xFFFF - init);
+ else
+ line -= init;
+
+line_count_exit:
+ return line;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
+ const struct dpu_pingpong_cfg *hw_cap)
+{
+ ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
+ ops->enable_tearcheck = dpu_hw_pp_enable_te;
+ ops->connect_external_te = dpu_hw_pp_connect_external_te;
+ ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
+ ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+ ops->get_line_count = dpu_hw_pp_get_line_count;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_pingpong *c;
+ struct dpu_pingpong_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _pingpong_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_pingpong_ops(&c->ops, c->caps);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+ if (pp)
+ dpu_hw_blk_destroy(&pp->base);
+ kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 000000000000..3caccd7d6a3e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+ u32 rd_ptr_init_val; /* value of rd pointer at vsync edge */
+ u32 rd_ptr_frame_count; /* num frames sent since enabling interface */
+ u32 rd_ptr_line_count; /* current line on panel (rd ptr) */
+ u32 wr_ptr_line_count; /* current line within pp fifo (wr ptr) */
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_tearcheck : program tear check values
+ * @enable_tearcheck : enables tear check
+ * @get_vsync_info : retries timing info of the panel
+ * @setup_dither : function to program the dither hw block
+ * @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *cfg);
+
+ /**
+ * enables tear check block
+ */
+ int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+ bool enable);
+
+ /**
+ * read, modify, write to either set or clear listening to external TE
+ * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+ */
+ int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+ bool enable_external_te);
+
+ /**
+ * provides the programmed and current
+ * line_count
+ */
+ int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info);
+
+ /**
+ * poll until write pointer transmission starts
+ * @Return: 0 on success, -ETIMEDOUT on timeout
+ */
+ int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+ /**
+ * Obtain current vertical line counter
+ */
+ u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_pingpong {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum dpu_pingpong idx;
+ const struct dpu_pingpong_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ * pingpong idx.
+ * @idx: Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 000000000000..c25b52a6b219
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1 0x16C
+#define SSPP_SRC_XY_REC1 0x168
+#define SSPP_OUT_SIZE_REC1 0x160
+#define SSPP_OUT_XY_REC1 0x164
+#define SSPP_SRC_FORMAT_REC1 0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
+#define SSPP_SRC_OP_MODE_REC1 0x17C
+#define SSPP_MULTIRECT_OPMODE 0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
+#define SSPP_EXCL_REC_SIZE_REC1 0x184
+#define SSPP_EXCL_REC_XY_REC1 0x188
+
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_EXCL_REC_CTL 0x40
+#define SSPP_UBWC_STATIC_CTRL 0x44
+#define SSPP_FETCH_CONFIG 0x048
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_QOS_CTRL 0x6C
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_CREQ_LUT_0 0x74
+#define SSPP_CREQ_LUT_1 0x78
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_TRAFFIC_SHAPER 0x130
+#define SSPP_CDP_CNTL 0x134
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
+#define SSPP_TRAFFIC_SHAPER_REC1 0x158
+#define SSPP_EXCL_REC_SIZE 0x1B4
+#define SSPP_EXCL_REC_XY 0x1B8
+#define SSPP_VIG_OP_MODE 0x0
+#define SSPP_VIG_CSC_10_OP_MODE 0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN BIT(17)
+#define VIG_OP_MEM_PROT_CONT BIT(15)
+#define VIG_OP_MEM_PROT_VAL BIT(14)
+#define VIG_OP_MEM_PROT_SAT BIT(13)
+#define VIG_OP_MEM_PROT_HUE BIT(12)
+#define VIG_OP_HIST BIT(8)
+#define VIG_OP_SKY_COL BIT(7)
+#define VIG_OP_FOIL BIT(6)
+#define VIG_OP_SKIN_COL BIT(5)
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN BIT(0)
+#define CSC_10BIT_OFFSET 4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK 19200000
+
+static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+ int s_id,
+ u32 *idx)
+{
+ int rc = 0;
+ const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+ if (!ctx)
+ return -EINVAL;
+
+ switch (s_id) {
+ case DPU_SSPP_SRC:
+ *idx = sblk->src_blk.base;
+ break;
+ case DPU_SSPP_SCALER_QSEED2:
+ case DPU_SSPP_SCALER_QSEED3:
+ case DPU_SSPP_SCALER_RGB:
+ *idx = sblk->scaler_blk.base;
+ break;
+ case DPU_SSPP_CSC:
+ case DPU_SSPP_CSC_10BIT:
+ *idx = sblk->csc_blk.base;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode)
+{
+ u32 mode_mask;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (index == DPU_SSPP_RECT_SOLO) {
+ /**
+ * if rect index is RECT_SOLO, we cannot expect a
+ * virtual plane sharing the same SSPP id. So we go
+ * and disable multirect
+ */
+ mode_mask = 0;
+ } else {
+ mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+ mode_mask |= index;
+ if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+ mode_mask |= BIT(2);
+ else
+ mode_mask &= ~BIT(2);
+ }
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+ _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+ !test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 chroma_samp, unpack, src_format;
+ u32 opmode = 0;
+ u32 fast_clear = 0;
+ u32 op_mode_off, unpack_pat_off, format_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+ op_mode_off = SSPP_SRC_OP_MODE;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+ format_off = SSPP_SRC_FORMAT;
+ } else {
+ op_mode_off = SSPP_SRC_OP_MODE_REC1;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+ format_off = SSPP_SRC_FORMAT_REC1;
+ }
+
+ c = &ctx->hw;
+ opmode = DPU_REG_READ(c, op_mode_off + idx);
+ opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+ MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+ if (flags & DPU_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & DPU_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == DPU_CHROMA_H2V1)
+ chroma_samp = DPU_CHROMA_H1V2;
+ else if (chroma_samp == DPU_CHROMA_H1V2)
+ chroma_samp = DPU_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & DPU_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ if (flags & DPU_SSPP_SOLID_FILL)
+ src_format |= BIT(22);
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+ if (DPU_FORMAT_IS_UBWC(fmt))
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ DPU_FETCH_CONFIG_RESET_VALUE |
+ ctx->mdp->highest_bank_bit << 18);
+ if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ fast_clear | (ctx->mdp->ubwc_swizzle) |
+ (ctx->mdp->highest_bank_bit << 4));
+ }
+ }
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ /* if this is YUV pixel format, enable CSC */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ src_format |= BIT(15);
+
+ if (DPU_FORMAT_IS_DX(fmt))
+ src_format |= BIT(14);
+
+ /* update scaler opmode, if appropriate */
+ if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+ else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+ _sspp_setup_csc10_opmode(ctx,
+ VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+
+ DPU_REG_WRITE(c, format_off + idx, src_format);
+ DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+ DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+ /* clear previous UBWC error */
+ DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+ return;
+
+ c = &ctx->hw;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < DPU_MAX_PLANES; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+ tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *sspp,
+ struct dpu_hw_pixel_ext *pe,
+ void *scaler_cfg)
+{
+ u32 idx;
+ struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+ (void)pe;
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+ || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ return;
+
+ dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+ ctx->cap->sblk->scaler_blk.version,
+ sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+ u32 idx;
+
+ if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+ return 0;
+
+ return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/**
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_index)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+ u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+ src_size_off = SSPP_SRC_SIZE;
+ src_xy_off = SSPP_SRC_XY;
+ out_size_off = SSPP_OUT_SIZE;
+ out_xy_off = SSPP_OUT_XY;
+ } else {
+ src_size_off = SSPP_SRC_SIZE_REC1;
+ src_xy_off = SSPP_SRC_XY_REC1;
+ out_size_off = SSPP_OUT_SIZE_REC1;
+ out_xy_off = SSPP_OUT_XY_REC1;
+ }
+
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+ src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+ drm_rect_width(&cfg->src_rect);
+ dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+ dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+ drm_rect_width(&cfg->dst_rect);
+
+ if (rect_index == DPU_SSPP_RECT_SOLO) {
+ ystride0 = (cfg->layout.plane_pitch[0]) |
+ (cfg->layout.plane_pitch[1] << 16);
+ ystride1 = (cfg->layout.plane_pitch[2]) |
+ (cfg->layout.plane_pitch[3] << 16);
+ } else {
+ ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+ ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+ if (rect_index == DPU_SSPP_RECT_0) {
+ ystride0 = (ystride0 & 0xFFFF0000) |
+ (cfg->layout.plane_pitch[0] & 0x0000FFFF);
+ ystride1 = (ystride1 & 0xFFFF0000)|
+ (cfg->layout.plane_pitch[2] & 0x0000FFFF);
+ } else {
+ ystride0 = (ystride0 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[0] << 16) &
+ 0xFFFF0000);
+ ystride1 = (ystride1 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[2] << 16) &
+ 0xFFFF0000);
+ }
+ }
+
+ /* rectangle register programming */
+ DPU_REG_WRITE(c, src_size_off + idx, src_size);
+ DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+ DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+ DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ int i;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO) {
+ for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+ cfg->layout.plane_addr[i]);
+ } else if (rect_mode == DPU_SSPP_RECT_0) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ }
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+ struct dpu_csc_cfg *data)
+{
+ u32 idx;
+ bool csc10 = false;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+ return;
+
+ if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+ idx += CSC_10BIT_OFFSET;
+ csc10 = true;
+ }
+
+ dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+ dpu_sspp_multirect_index rect_index)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+ else
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+ color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+ cfg->creq_lut >> 32);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ }
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+ u32 qos_ctrl = 0;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->vblank_en) {
+ qos_ctrl |= ((cfg->creq_vblank &
+ SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+ qos_ctrl |= ((cfg->danger_vblank &
+ SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+ qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg)
+{
+ u32 idx;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->tile_amortize_enable)
+ cdp_cntl |= BIT(2);
+ if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_SSPP_SRC, &features)) {
+ c->ops.setup_format = dpu_hw_sspp_setup_format;
+ c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+ c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+ c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+ c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+ }
+
+ if (test_bit(DPU_SSPP_QOS, &features)) {
+ c->ops.setup_danger_safe_lut =
+ dpu_hw_sspp_setup_danger_safe_lut;
+ c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+ c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+ }
+
+ if (test_bit(DPU_SSPP_CSC, &features) ||
+ test_bit(DPU_SSPP_CSC_10BIT, &features))
+ c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+ if (dpu_hw_sspp_multirect_enabled(c->cap))
+ c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+ c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+ c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+ }
+
+ if (test_bit(DPU_SSPP_CDP, &features))
+ c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if ((sspp < SSPP_MAX) && catalog && addr && b) {
+ for (i = 0; i < catalog->sspp_count; i++) {
+ if (sspp == catalog->sspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = catalog->sspp[i].base;
+ b->length = catalog->sspp[i].len;
+ b->hwversion = catalog->hwversion;
+ b->log_mask = DPU_DBG_MASK_SSPP;
+ return &catalog->sspp[i];
+ }
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe)
+{
+ struct dpu_hw_pipe *hw_pipe;
+ struct dpu_sspp_cfg *cfg;
+ int rc;
+
+ if (!addr || !catalog)
+ return ERR_PTR(-EINVAL);
+
+ hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ if (!hw_pipe)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(hw_pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ hw_pipe->catalog = catalog;
+ hw_pipe->mdp = &catalog->mdp[0];
+ hw_pipe->idx = idx;
+ hw_pipe->cap = cfg;
+ _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+ rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return hw_pipe;
+
+blk_init_error:
+ kzfree(hw_pipe);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 000000000000..4d81e5f5ce1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,424 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR BIT(0)
+#define DPU_SSPP_FLIP_UD BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90 BIT(2)
+#define DPU_SSPP_ROT_90 BIT(3)
+#define DPU_SSPP_SOLID_FILL BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
+ (1UL << DPU_SSPP_SCALER_QSEED2) | \
+ (1UL << DPU_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+ DPU_SSPP_COMP_0,
+ DPU_SSPP_COMP_1_2,
+ DPU_SSPP_COMP_2,
+ DPU_SSPP_COMP_3,
+
+ DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+ DPU_SSPP_RECT_SOLO = 0,
+ DPU_SSPP_RECT_0,
+ DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+ DPU_SSPP_MULTIRECT_NONE = 0,
+ DPU_SSPP_MULTIRECT_PARALLEL,
+ DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+ DPU_FRAME_LINEAR,
+ DPU_FRAME_TILE_A4X,
+ DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+ DPU_SCALE_FILTER_NEAREST = 0,
+ DPU_SCALE_FILTER_BIL,
+ DPU_SCALE_FILTER_PCMN,
+ DPU_SCALE_FILTER_CA,
+ DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+ DPU_SCALE_ALPHA_PIXEL_REP,
+ DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+ DPU_SCALE_2D_4X4,
+ DPU_SCALE_2D_CIR,
+ DPU_SCALE_1D_SEP,
+ DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[DPU_MAX_PLANES];
+ int phase_step_x[DPU_MAX_PLANES];
+ int init_phase_y[DPU_MAX_PLANES];
+ int phase_step_y[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[DPU_MAX_PLANES];
+ int num_ext_pxls_right[DPU_MAX_PLANES];
+ int num_ext_pxls_top[DPU_MAX_PLANES];
+ int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[DPU_MAX_PLANES];
+ int right_ftch[DPU_MAX_PLANES];
+ int top_ftch[DPU_MAX_PLANES];
+ int btm_ftch[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[DPU_MAX_PLANES];
+ int right_rpt[DPU_MAX_PLANES];
+ int top_rpt[DPU_MAX_PLANES];
+ int btm_rpt[DPU_MAX_PLANES];
+
+ uint32_t roi_w[DPU_MAX_PLANES];
+ uint32_t roi_h[DPU_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+ enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout: format layout information for programming buffer to hardware
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index: index of the rectangle of SSPP
+ * @mode: parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+ struct dpu_hw_fmt_layout layout;
+ struct drm_rect src_rect;
+ struct drm_rect dst_rect;
+ enum dpu_sspp_multirect_index index;
+ enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ bool vblank_en;
+ bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_pipe_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+ u64 size;
+ u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+ /**
+ * setup_format - setup pixel format cropping rectangle, flip
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Extra flags for format config
+ * @index: rectangle index in multirect
+ */
+ void (*setup_format)(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_rects)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_pe - setup pipe pixel extension
+ * @ctx: Pointer to pipe context
+ * @pe_ext: Pointer to pixel ext settings
+ */
+ void (*setup_pe)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @ctx: Pointer to pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ * @index: rectangle index in multirect
+ */
+ void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_multirect - setup multirect configuration
+ * @ctx: Pointer to pipe context
+ * @index: rectangle index in multirect
+ * @mode: parallel fetch / time multiplex multirect mode
+ */
+
+ void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_sharp_cfg *cfg);
+
+ /**
+ * setup_danger_safe_lut - setup danger safe LUTs
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_creq_lut - setup CREQ LUT
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+ void *cfg);
+
+ /**
+ * setup_scaler - setup scaler
+ * @ctx: Pointer to pipe context
+ * @pipe_cfg: Pointer to pipe configuration
+ * @pe_cfg: Pointer to pixel extension configuration
+ * @scaler_cfg: Pointer to scaler configuration
+ */
+ void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct dpu_hw_pixel_ext *pe_cfg,
+ void *scaler_cfg);
+
+ /**
+ * get_scaler_ver - get scaler h/w version
+ * @ctx: Pointer to pipe context
+ */
+ u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+ /**
+ * setup_cdp - setup client driven prefetch
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to cdp configuration
+ */
+ void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_mdp_cfg *mdp;
+
+ /* Pipe */
+ enum dpu_sspp idx;
+ const struct dpu_sspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_sspp_ops ops;
+};
+
+/**
+ * dpu_hw_pipe - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pipe, base);
+}
+
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx: Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ * @is_virtual_pipe: is this pipe virtual pipe
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 000000000000..db2798e862fc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,398 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE 0x28
+#define UBWC_STATIC 0x144
+
+#define FLD_SPLIT_DISPLAY_CMD BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
+
+#define TE_LINE_INTERVAL 0x3F4
+
+#define TRAFFIC_SHAPER_EN BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+#define MDP_WD_TIMER_0_CTL 0x380
+#define MDP_WD_TIMER_0_CTL2 0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
+
+#define MDP_TICK_COUNT 16
+#define XO_CLK_RATE 19200
+#define MS_TICKS_IN_SEC 1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+ ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL 0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 upper_pipe = 0;
+ u32 lower_pipe = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->en) {
+ if (cfg->mode == INTF_MODE_CMD) {
+ lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+ /* interface controlling sw trigger */
+ if (cfg->intf == INTF_2)
+ lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+ else
+ lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = lower_pipe;
+ } else {
+ if (cfg->intf == INTF_2) {
+ lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+ } else {
+ lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+ }
+ }
+ }
+
+ DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 out_ctl = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->intf_en)
+ out_ctl |= BIT(19);
+
+ DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off, bit_off;
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ if (!mdp)
+ return false;
+
+ c = &mdp->hw;
+
+ if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+ return false;
+
+ reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+ bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+ reg_val = DPU_REG_READ(c, reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(bit_off);
+ else
+ new_val = reg_val & ~BIT(bit_off);
+
+ DPU_REG_WRITE(c, reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(bit_off));
+
+ return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, DANGER_STATUS);
+ status->mdp = (value >> 0) & 0x3;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+ static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+ if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+ return;
+
+ c = &mdp->hw;
+ reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+ for (i = 0; i < cfg->pp_count; i++) {
+ int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+ if (pp_idx >= ARRAY_SIZE(pp_offset))
+ continue;
+
+ reg &= ~(0xf << pp_offset[pp_idx]);
+ reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+ }
+ DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+ if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+ cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+ switch (cfg->vsync_source) {
+ case DPU_VSYNC_SOURCE_WD_TIMER_4:
+ wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_4_CTL;
+ wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_3:
+ wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_3_CTL;
+ wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_2:
+ wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_2_CTL;
+ wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_1:
+ wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_1_CTL;
+ wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_0:
+ default:
+ wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_0_CTL;
+ wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+ break;
+ }
+
+ DPU_REG_WRITE(c, wd_load_value,
+ CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+ DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+ reg = DPU_REG_READ(c, wd_ctl2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ DPU_REG_WRITE(c, wd_ctl2, reg);
+
+ /* make sure that timers are enabled/disabled for vsync state */
+ wmb();
+ }
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, SAFE_STATUS);
+ status->mdp = (value >> 0) & 0x1;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_blk_reg_map c;
+
+ if (!mdp || !m)
+ return;
+
+ if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
+ return;
+
+ /* force blk offset to zero to access beginning of register region */
+ c = mdp->hw;
+ c.blk_off = 0x0;
+ DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!mdp)
+ return;
+
+ c = &mdp->hw;
+
+ DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+ ops->setup_cdm_output = dpu_hw_setup_cdm_output;
+ ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+ ops->get_danger_status = dpu_hw_get_danger_status;
+ ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+ ops->get_safe_status = dpu_hw_get_safe_status;
+ ops->reset_ubwc = dpu_hw_reset_ubwc;
+ ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->mdp_count; i++) {
+ if (mdp == m->mdp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mdp[i].base;
+ b->length = m->mdp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_TOP;
+ return &m->mdp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mdp *mdp;
+ const struct dpu_mdp_cfg *cfg;
+ int rc;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &mdp->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(mdp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ mdp->idx = idx;
+ mdp->caps = cfg;
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+ rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+
+ return mdp;
+
+blk_init_error:
+ kzfree(mdp);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+ if (mdp)
+ dpu_hw_blk_destroy(&mdp->base);
+ kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 000000000000..899925aaa6d7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+ bool en;
+ bool rd_client;
+ u32 client_id;
+ u32 bpc_denom;
+ u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en : Enable/disable dual pipe confguration
+ * @mode : Panel interface mode
+ * @intf : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ * flushed
+ */
+struct split_pipe_cfg {
+ bool en;
+ enum dpu_intf_mode mode;
+ enum dpu_intf intf;
+ bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @intf_en : enable/disable interface output
+ */
+struct cdm_output_cfg {
+ bool intf_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+ u8 mdp;
+ u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ * watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+ u32 pp_count;
+ u32 frame_rate;
+ u32 ppnumber[PINGPONG_MAX];
+ u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+ /** setup_split_pipe() : Regsiters are not double buffered, thisk
+ * function should be called before timing control enable
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *p);
+
+ /**
+ * setup_cdm_output() : Setup selection control of the cdm data path
+ * @mdp : mdp top context driver
+ * @cfg : cdm output configuration
+ */
+ void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg);
+
+ /**
+ * setup_traffic_shaper() : Setup traffic shaper control
+ * @mdp : mdp top context driver
+ * @cfg : traffic shaper configuration
+ */
+ void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+ struct traffic_shaper_cfg *cfg);
+
+ /**
+ * setup_clk_force_ctrl - set clock force control
+ * @mdp: mdp top context driver
+ * @clk_ctrl: clock to be controlled
+ * @enable: force on enable
+ * @return: if the clock is forced-on by this function
+ */
+ bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+ /**
+ * get_danger_status - get danger status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * setup_vsync_source - setup vsync source configuration details
+ * @mdp: mdp top context driver
+ * @cfg: vsync source selection configuration
+ */
+ void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg);
+
+ /**
+ * get_safe_status - get safe status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * reset_ubwc - reset top level UBWC configuration
+ * @mdp: mdp top context driver
+ * @m: pointer to mdss catalog data
+ */
+ void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
+
+ /**
+ * intf_audio_select - select the external interface for audio
+ * @mdp: mdp top context driver
+ */
+ void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* top */
+ enum dpu_mdp idx;
+ const struct dpu_mdp_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * to_dpu_hw_mdp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mdp, base);
+}
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
new file mode 100644
index 000000000000..4cabae480a7b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -0,0 +1,368 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
+
+/* DPU_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION 0x00
+#define QSEED3_OP_MODE 0x04
+#define QSEED3_RGB2Y_COEFF 0x08
+#define QSEED3_PHASE_INIT 0x0C
+#define QSEED3_PHASE_STEP_Y_H 0x10
+#define QSEED3_PHASE_STEP_Y_V 0x14
+#define QSEED3_PHASE_STEP_UV_H 0x18
+#define QSEED3_PHASE_STEP_UV_V 0x1C
+#define QSEED3_PRELOAD 0x20
+#define QSEED3_DE_SHARPEN 0x24
+#define QSEED3_DE_SHARPEN_CTL 0x28
+#define QSEED3_DE_SHAPE_CTL 0x2C
+#define QSEED3_DE_THRESHOLD 0x30
+#define QSEED3_DE_ADJUST_DATA_0 0x34
+#define QSEED3_DE_ADJUST_DATA_1 0x38
+#define QSEED3_DE_ADJUST_DATA_2 0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
+#define QSEED3_SRC_SIZE_UV 0x44
+#define QSEED3_DST_SIZE 0x48
+#define QSEED3_COEF_LUT_CTRL 0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT 0
+#define QSEED3_COEF_LUT_DIR_BIT 1
+#define QSEED3_COEF_LUT_Y_CIR_BIT 2
+#define QSEED3_COEF_LUT_UV_CIR_BIT 3
+#define QSEED3_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3_BUFFER_CTRL 0x50
+#define QSEED3_CLK_CTRL0 0x54
+#define QSEED3_CLK_CTRL1 0x58
+#define QSEED3_CLK_STATUS 0x5C
+#define QSEED3_MISR_CTRL 0x70
+#define QSEED3_MISR_SIGNATURE_0 0x74
+#define QSEED3_MISR_SIGNATURE_1 0x78
+#define QSEED3_PHASE_INIT_Y_H 0x90
+#define QSEED3_PHASE_INIT_Y_V 0x94
+#define QSEED3_PHASE_INIT_UV_H 0x98
+#define QSEED3_PHASE_INIT_UV_V 0x9C
+#define QSEED3_COEF_LUT 0x100
+#define QSEED3_FILTERS 5
+#define QSEED3_LUT_REGIONS 4
+#define QSEED3_CIRCULAR_LUTS 9
+#define QSEED3_SEPARABLE_LUTS 10
+#define QSEED3_LUT_SIZE 60
+#define QSEED3_ENABLE 2
+#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name)
+{
+ /* don't need to mutex protect this */
+ if (c->log_mask & dpu_hw_util_log_mask)
+ DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+ name, c->blk_off + reg_off, val);
+ writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
+{
+ return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *dpu_hw_util_get_log_mask_ptr(void)
+{
+ return &dpu_hw_util_log_mask;
+}
+
+static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int i, j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset, lut_len;
+ u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+ (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->dir_lut;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->cir_lut +
+ scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[2] = scaler3_cfg->cir_lut +
+ scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[3] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[4] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+ lut_addr = QSEED3_COEF_LUT + offset
+ + off_tbl[filter][i][1];
+ lut_len = off_tbl[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+ u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+ u32 adjust_a, adjust_b, adjust_c;
+
+ if (!de_cfg->enable)
+ return;
+
+ sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+ ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+ ((de_cfg->prec_shift & 0x7) << 13) |
+ ((de_cfg->clip & 0x7) << 16);
+
+ shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+ ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (de_cfg->thr_low & 0x3FF) |
+ ((de_cfg->thr_high & 0x3FF) << 16);
+
+ adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+ ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+ ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+ ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format)
+{
+ u32 op_mode = 0;
+ u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+ if (!scaler3_cfg->enable)
+ goto end;
+
+ op_mode |= BIT(0);
+ op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+ if (format && DPU_FORMAT_IS_YUV(format)) {
+ op_mode |= BIT(12);
+ op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+ }
+
+ op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+ op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+ preload =
+ ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+ ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+ ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+ ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+ src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+ ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+ if (scaler3_cfg->de.enable) {
+ _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+ op_mode |= BIT(8);
+ }
+
+ if (scaler3_cfg->lut_flag)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
+ scaler_offset);
+
+ if (scaler_version == 0x1002) {
+ phase_init =
+ ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+ ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+ ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+ ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+ } else {
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+ scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+ scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+ scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+ scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+ }
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+ scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+ scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+ scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+ scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+ DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+ if (format && !DPU_FORMAT_IS_DX(format))
+ op_mode |= BIT(14);
+
+ if (format && format->alpha_enable) {
+ op_mode |= BIT(10);
+ if (scaler_version == 0x1002)
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+ else
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+ }
+
+ DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset)
+{
+ return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10)
+{
+ static const u32 matrix_shift = 7;
+ u32 clamp_shift = csc10 ? 16 : 8;
+ u32 val;
+
+ /* matrix coeff - convert S15.16 to S4.9 */
+ val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off, val);
+ val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+ DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
new file mode 100644
index 000000000000..1240f505ca53
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -0,0 +1,348 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_UTIL_H
+#define _DPU_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "dpu_hw_mdss.h"
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+struct dpu_format_extended;
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @xin_id xin id
+ * @hwversion mdss hw version number
+ */
+struct dpu_hw_blk_reg_map {
+ void __iomem *base_off;
+ u32 blk_off;
+ u32 length;
+ u32 xin_id;
+ u32 hwversion;
+ u32 log_mask;
+};
+
+/**
+ * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable: detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip: clip shift
+ * @ limit: limit value
+ * @ thr_quiet: quiet threshold
+ * @ thr_dieout: dieout threshold
+ * @ thr_high: low threshold
+ * @ thr_high: high threshold
+ * @ prec_shift: precision shift
+ * @ adjust_a: A-coefficients for mapping curve
+ * @ adjust_b: B-coefficients for mapping curve
+ * @ adjust_c: C-coefficients for mapping curve
+ */
+struct dpu_hw_scaler3_de_cfg {
+ u32 enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable: scaler enable
+ * @dir_en: direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x: horizontal preload value
+ * @ preload_y: vertical preload value
+ * @ src_width: source width
+ * @ src_height: source height
+ * @ dst_width: destination width
+ * @ dst_height: destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg: blend coefficients configuration
+ * @ lut_flag: scaler LUT update flags
+ * 0x1 swap LUT bank
+ * 0x2 update 2D filter LUT
+ * 0x4 update y circular filter LUT
+ * 0x8 update uv circular filter LUT
+ * 0x10 update y separable filter LUT
+ * 0x20 update uv separable filter LUT
+ * @ dir_lut_idx: 2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut: pointer to 2D LUT
+ * @ cir_lut: pointer to circular filter LUT
+ * @ sep_lut: pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct dpu_hw_scaler3_cfg {
+ u32 enable;
+ u32 dir_en;
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ u32 preload_x[DPU_MAX_PLANES];
+ u32 preload_y[DPU_MAX_PLANES];
+ u32 src_width[DPU_MAX_PLANES];
+ u32 src_height[DPU_MAX_PLANES];
+
+ u32 dst_width;
+ u32 dst_height;
+
+ u32 y_rgb_filter_cfg;
+ u32 uv_filter_cfg;
+ u32 alpha_filter_cfg;
+ u32 blend_cfg;
+
+ u32 lut_flag;
+ u32 dir_lut_idx;
+
+ u32 y_rgb_cir_lut_idx;
+ u32 uv_cir_lut_idx;
+ u32 y_rgb_sep_lut_idx;
+ u32 uv_sep_lut_idx;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_hw_scaler3_de_cfg de;
+};
+
+struct dpu_hw_scaler3_lut_cfg {
+ bool is_configured;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+};
+
+/**
+ * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch: Number of extra pixels to overfetch from left
+ * @right_ftch: Number of extra pixels to overfetch from right
+ * @top_ftch: Number of extra lines to overfetch from top
+ * @btm_ftch: Number of extra lines to overfetch from bottom
+ * @left_rpt: Number of extra pixels to repeat from left
+ * @right_rpt: Number of extra pixels to repeat from right
+ * @top_rpt: Number of extra lines to repeat from top
+ * @btm_rpt: Number of extra lines to repeat from bottom
+ */
+struct dpu_drm_pix_ext_v1 {
+ /*
+ * Number of pixels ext in left, right, top and bottom direction
+ * for all color components.
+ */
+ int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
+ int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top
+ * and bottom directions from source image for scaling.
+ */
+ int32_t left_ftch[DPU_MAX_PLANES];
+ int32_t right_ftch[DPU_MAX_PLANES];
+ int32_t top_ftch[DPU_MAX_PLANES];
+ int32_t btm_ftch[DPU_MAX_PLANES];
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int32_t left_rpt[DPU_MAX_PLANES];
+ int32_t right_rpt[DPU_MAX_PLANES];
+ int32_t top_rpt[DPU_MAX_PLANES];
+ int32_t btm_rpt[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable: Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip: Clip coefficient
+ * @limit: Detail enhancer limit factor
+ * @thr_quiet: Quite zone threshold
+ * @thr_dieout: Die-out zone threshold
+ * @thr_low: Linear zone left threshold
+ * @thr_high: Linear zone right threshold
+ * @prec_shift: Detail enhancer precision
+ * @adjust_a: Mapping curves A coefficients
+ * @adjust_b: Mapping curves B coefficients
+ * @adjust_c: Mapping curves C coefficients
+ */
+struct dpu_drm_de_v1 {
+ uint32_t enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+/**
+ * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
+ * @enable: Scaler enable
+ * @dir_en: Detail enhancer enable
+ * @pe: Pixel extension settings
+ * @horz_decimate: Horizontal decimation factor
+ * @vert_decimate: Vertical decimation factor
+ * @init_phase_x: Initial scaler phase values for x
+ * @phase_step_x: Phase step values for x
+ * @init_phase_y: Initial scaler phase values for y
+ * @phase_step_y: Phase step values for y
+ * @preload_x: Horizontal preload value
+ * @preload_y: Vertical preload value
+ * @src_width: Source width
+ * @src_height: Source height
+ * @dst_width: Destination width
+ * @dst_height: Destination height
+ * @y_rgb_filter_cfg: Y/RGB plane filter configuration
+ * @uv_filter_cfg: UV plane filter configuration
+ * @alpha_filter_cfg: Alpha filter configuration
+ * @blend_cfg: Selection of blend coefficients
+ * @lut_flag: LUT configuration flags
+ * @dir_lut_idx: 2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx: UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx: UV separable LUT index
+ * @de: Detail enhancer settings
+ */
+struct dpu_drm_scaler_v2 {
+ /*
+ * General definitions
+ */
+ uint32_t enable;
+ uint32_t dir_en;
+
+ /*
+ * Pix ext settings
+ */
+ struct dpu_drm_pix_ext_v1 pe;
+
+ /*
+ * Decimation settings
+ */
+ uint32_t horz_decimate;
+ uint32_t vert_decimate;
+
+ /*
+ * Phase settings
+ */
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ uint32_t preload_x[DPU_MAX_PLANES];
+ uint32_t preload_y[DPU_MAX_PLANES];
+ uint32_t src_width[DPU_MAX_PLANES];
+ uint32_t src_height[DPU_MAX_PLANES];
+
+ uint32_t dst_width;
+ uint32_t dst_height;
+
+ uint32_t y_rgb_filter_cfg;
+ uint32_t uv_filter_cfg;
+ uint32_t alpha_filter_cfg;
+ uint32_t blend_cfg;
+
+ uint32_t lut_flag;
+ uint32_t dir_lut_idx;
+
+ /* for Y(RGB) and UV planes*/
+ uint32_t y_rgb_cir_lut_idx;
+ uint32_t uv_cir_lut_idx;
+ uint32_t y_rgb_sep_lut_idx;
+ uint32_t uv_sep_lut_idx;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_drm_de_v1 de;
+};
+
+
+u32 *dpu_hw_util_get_log_mask_ptr(void);
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name);
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
+
+#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
+#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
+
+#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+void *dpu_hw_util_get_dir(void);
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format);
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset);
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10);
+
+#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 000000000000..d43905525f92
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_dbg.h"
+
+#define VBIF_VERSION 0x0000
+#define VBIF_CLK_FORCE_CTRL0 0x0008
+#define VBIF_CLK_FORCE_CTRL1 0x000C
+#define VBIF_QOS_REMAP_00 0x0020
+#define VBIF_QOS_REMAP_01 0x0024
+#define VBIF_QOS_REMAP_10 0x0028
+#define VBIF_QOS_REMAP_11 0x002C
+#define VBIF_WRITE_GATHER_EN 0x00AC
+#define VBIF_IN_RD_LIM_CONF0 0x00B0
+#define VBIF_IN_RD_LIM_CONF1 0x00B4
+#define VBIF_IN_RD_LIM_CONF2 0x00B8
+#define VBIF_IN_WR_LIM_CONF0 0x00C0
+#define VBIF_IN_WR_LIM_CONF1 0x00C4
+#define VBIF_IN_WR_LIM_CONF2 0x00C8
+#define VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
+#define VBIF_XIN_PND_ERR 0x0190
+#define VBIF_XIN_SRC_ERR 0x0194
+#define VBIF_XIN_CLR_ERR 0x019C
+#define VBIF_XIN_HALT_CTRL0 0x0200
+#define VBIF_XIN_HALT_CTRL1 0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 pnd, src;
+
+ if (!vbif)
+ return;
+ c = &vbif->hw;
+ pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+ src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+ if (pnd_errors)
+ *pnd_errors = pnd;
+ if (src_errors)
+ *src_errors = src;
+
+ DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register so
+ * 16 bit fields maximum across two registers
+ */
+ if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+ return;
+
+ c = &vbif->hw;
+
+ if (xin_id >= 8) {
+ xin_id -= 8;
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+ } else {
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+ }
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (value & 0x7) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0xFF << bit_off);
+ reg_val |= (limit) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+ u32 limit;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ limit = (reg_val >> bit_off) & 0xFF;
+
+ return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+ if (enable)
+ reg_val |= BIT(xin_id);
+ else
+ reg_val &= ~BIT(xin_id);
+
+ DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+ return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+ if (!vbif)
+ return;
+
+ c = &vbif->hw;
+
+ reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+ reg_shift = (xin_id & 0x7) * 4;
+
+ reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+ mask = 0x7 << reg_shift;
+
+ reg_val &= ~mask;
+ reg_val |= (remap_level << reg_shift) & mask;
+
+ reg_val_lvl &= ~mask;
+ reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val;
+
+ if (!vbif || xin_id >= MAX_XIN_COUNT)
+ return;
+
+ c = &vbif->hw;
+
+ reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+ reg_val |= BIT(xin_id);
+ DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+ unsigned long cap)
+{
+ ops->set_limit_conf = dpu_hw_set_limit_conf;
+ ops->get_limit_conf = dpu_hw_get_limit_conf;
+ ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+ ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+ if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+ ops->set_qos_remap = dpu_hw_set_qos_remap;
+ ops->set_mem_type = dpu_hw_set_mem_type;
+ ops->clear_errors = dpu_hw_clear_errors;
+ ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->vbif_count; i++) {
+ if (vbif == m->vbif[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->vbif[i].base;
+ b->length = m->vbif[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_VBIF;
+ return &m->vbif[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_vbif *c;
+ const struct dpu_vbif_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_vbif_ops(&c->ops, c->cap->features);
+
+ /* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+ return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+ kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 000000000000..471ff673c045
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+ /**
+ * set_limit_conf - set transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @limit: outstanding transaction limit
+ */
+ void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit);
+
+ /**
+ * get_limit_conf - get transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @return: outstanding transaction limit
+ */
+ u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd);
+
+ /**
+ * set_halt_ctrl - set halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @enable: halt control enable
+ */
+ void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable);
+
+ /**
+ * get_halt_ctrl - get halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @return: halt control enable
+ */
+ bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id);
+
+ /**
+ * set_qos_remap - set QoS priority remap
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @level: priority level
+ * @remap_level: remapped level
+ */
+ void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level);
+
+ /**
+ * set_mem_type - set memory type
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @value: memory type value
+ */
+ void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value);
+
+ /**
+ * clear_errors - clear any vbif errors
+ * This function clears any detected pending/source errors
+ * on the VBIF interface, and optionally returns the detected
+ * error mask(s).
+ * @vbif: vbif context driver
+ * @pnd_errors: pointer to pending error reporting variable
+ * @src_errors: pointer to source error reporting variable
+ */
+ void (*clear_errors)(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors);
+
+ /**
+ * set_write_gather_en - set write_gather enable
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ */
+ void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+ /* base */
+ struct dpu_hw_blk_reg_map hw;
+
+ /* vbif */
+ enum dpu_vbif idx;
+ const struct dpu_vbif_cfg *cap;
+
+ /* ops */
+ struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 000000000000..5b2bc9b65b15
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define INTF_INTR_EN 0x1C0
+#define INTF_INTR_STATUS 0x1C4
+#define INTF_INTR_CLEAR 0x1C8
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define HW_EVENTS_CTL 0x37C
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define HDMI_DP_CORE_SELECT 0x408
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define DCE_SEL 0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
new file mode 100644
index 000000000000..790d39f816dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -0,0 +1,203 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "dpu_io_util.h"
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+}
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ for (i--; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+
+ return rc;
+}
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_dss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ u32 i, rc = 0;
+ const char *clock_name;
+ int num_clk = 0;
+
+ if (!pdev || !mp)
+ return -EINVAL;
+
+ mp->num_clk = 0;
+ num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+ if (num_clk <= 0) {
+ pr_debug("clocks are not defined\n");
+ return 0;
+ }
+
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct dss_clk) * num_clk,
+ GFP_KERNEL);
+ if (!mp->clk_config)
+ return -ENOMEM;
+
+ for (i = 0; i < num_clk; i++) {
+ rc = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i,
+ &clock_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+ i);
+ break;
+ }
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ }
+
+ rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+ goto err;
+ }
+
+ rc = of_clk_set_defaults(pdev->dev.of_node, false);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+ goto err;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ u32 rate = clk_get_rate(mp->clk_config[i].clk);
+ if (!rate)
+ continue;
+ mp->clk_config[i].rate = rate;
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+ }
+
+ mp->num_clk = num_clk;
+ return 0;
+
+err:
+ msm_dss_put_clk(mp->clk_config, num_clk);
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
new file mode 100644
index 000000000000..bc07381d7429
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IO_UTIL_H__
+#define __DPU_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
+#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
+
+struct dss_gpio {
+ unsigned int gpio;
+ unsigned int value;
+ char gpio_name[32];
+};
+
+enum dss_clk_type {
+ DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+ DSS_CLK_PCLK,
+};
+
+struct dss_clk {
+ struct clk *clk; /* clk handle */
+ char clk_name[32];
+ enum dss_clk_type type;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+struct dss_module_power {
+ unsigned int num_gpio;
+ struct dss_gpio *gpio_config;
+ unsigned int num_clk;
+ struct dss_clk *clk_config;
+};
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp);
+#endif /* __DPU_IO_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
new file mode 100644
index 000000000000..d5e6ce0140cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_irq.h"
+#include "dpu_core_irq.h"
+
+irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ return dpu_core_irq(dpu_kms);
+}
+
+void dpu_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return;
+ }
+
+ dpu_core_irq_preinstall(dpu_kms);
+}
+
+int dpu_irq_postinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int rc;
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ rc = dpu_core_irq_postinstall(dpu_kms);
+
+ return rc;
+}
+
+void dpu_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_irq_uninstall(dpu_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
new file mode 100644
index 000000000000..3e147f7176e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IRQ_H__
+#define __DPU_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * dpu_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask: enable status of MDSS level interrupt
+ * @domain: interrupt domain of this controller
+ */
+struct dpu_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
+/**
+ * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq - MDSS level IRQ handler
+ * @kms: pointer to kms context
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_irq(struct msm_kms *kms);
+
+#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 000000000000..7dd6bd2d6d37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1345 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+
+#include "dpu_kms.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_vbif.h"
+#include "dpu_encoder.h"
+#include "dpu_plane.h"
+#include "dpu_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+static const char * const iommu_ports[] = {
+ "mdp_0",
+};
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+static unsigned long dpu_iomap_size(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DRM_ERROR("failed to get memory resource: %s\n", name);
+ return 0;
+ }
+
+ return resource_size(res);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+ bool danger_status)
+{
+ struct dpu_kms *kms = (struct dpu_kms *)s->private;
+ struct msm_drm_private *priv;
+ struct dpu_danger_safe_status status;
+ int i;
+
+ if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ DPU_ERROR("invalid arg(s)\n");
+ return 0;
+ }
+
+ priv = kms->dev->dev_private;
+ memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+ pm_runtime_get_sync(&kms->pdev->dev);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ }
+ pm_runtime_put_sync(&kms->pdev->dev);
+
+ seq_printf(s, "MDP : 0x%x\n", status.mdp);
+
+ for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+ seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
+ status.sspp[i]);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, true);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, false);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+
+static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_danger);
+ dpu_kms->debugfs_danger = NULL;
+}
+
+static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->debugfs_danger = debugfs_create_dir("danger",
+ parent);
+ if (!dpu_kms->debugfs_danger) {
+ DPU_ERROR("failed to create danger debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_safe_stats_fops);
+
+ return 0;
+}
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+ struct dpu_debugfs_regset32 *regset;
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ void __iomem *base;
+ uint32_t i, addr;
+
+ if (!s || !s->private)
+ return 0;
+
+ regset = s->private;
+
+ dpu_kms = regset->dpu_kms;
+ if (!dpu_kms || !dpu_kms->mmio)
+ return 0;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return 0;
+
+ priv = dev->dev_private;
+ if (!priv)
+ return 0;
+
+ base = dpu_kms->mmio + regset->offset;
+
+ /* insert padding spaces, if needed */
+ if (regset->offset & 0xF) {
+ seq_printf(s, "[%x]", regset->offset & ~0xF);
+ for (i = 0; i < (regset->offset & 0xF); i += 4)
+ seq_puts(s, " ");
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* main register output */
+ for (i = 0; i < regset->blk_len; i += 4) {
+ addr = regset->offset + i;
+ if ((addr & 0xF) == 0x0)
+ seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+ seq_printf(s, " %08x", readl_relaxed(base + i));
+ }
+ seq_puts(s, "\n");
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+ .open = dpu_debugfs_open_regset32,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+ if (regset) {
+ regset->offset = offset;
+ regset->blk_len = length;
+ regset->dpu_kms = dpu_kms;
+ }
+}
+
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset)
+{
+ if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+ return NULL;
+
+ /* make sure offset is a multiple of 4 */
+ regset->offset = round_down(regset->offset, 4);
+
+ return debugfs_create_file(name, mode, parent,
+ regset, &dpu_fops_regset32);
+}
+
+static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+{
+ void *p;
+ int rc;
+
+ p = dpu_hw_util_get_log_mask_ptr();
+
+ if (!dpu_kms || !p)
+ return -EINVAL;
+
+ dpu_kms->debugfs_root = debugfs_create_dir("debug",
+ dpu_kms->dev->primary->debugfs_root);
+ if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
+ DRM_ERROR("debugfs create_dir failed %ld\n",
+ PTR_ERR(dpu_kms->debugfs_root));
+ return PTR_ERR(dpu_kms->debugfs_root);
+ }
+
+ rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
+ if (rc) {
+ DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
+ return rc;
+ }
+
+ /* allow root to be NULL */
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+
+ (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+
+ rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+ /* don't need to NULL check debugfs_root */
+ if (dpu_kms) {
+ dpu_debugfs_vbif_destroy(dpu_kms);
+ dpu_debugfs_danger_destroy(dpu_kms);
+ dpu_debugfs_core_irq_destroy(dpu_kms);
+ debugfs_remove_recursive(dpu_kms->debugfs_root);
+ }
+}
+#else
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_device *dev;
+ struct drm_encoder *encoder;
+
+ if (!kms)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+
+ if (!dev || !dev->dev_private)
+ return;
+ priv = dev->dev_private;
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc != NULL)
+ dpu_encoder_prepare_commit(encoder);
+}
+
+/*
+ * Override the encoder enable since we need to setup the inline rotator and do
+ * some crtc magic before enabling any bridge that might be present.
+ */
+void dpu_kms_encoder_enable(struct drm_encoder *encoder)
+{
+ const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
+ struct drm_crtc *crtc = encoder->crtc;
+
+ /* Forward this enable call to the commit hook */
+ if (funcs && funcs->commit)
+ funcs->commit(encoder);
+
+ if (crtc && crtc->state->active) {
+ trace_dpu_kms_enc_enable(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+}
+
+static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ /* If modeset is required, kickoff is run in encoder_enable */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ continue;
+
+ if (crtc->state->active) {
+ trace_dpu_kms_commit(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+ }
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ if (!kms || !old_state)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
+ return;
+ priv = dpu_kms->dev->dev_private;
+
+ DPU_ATRACE_BEGIN("kms_complete_commit");
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ dpu_crtc_complete_commit(crtc, old_crtc_state);
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ if (!kms || !crtc || !crtc->state) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ dev = crtc->dev;
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * Wait for post-flush if necessary to delay before
+ * plane_cleanup. For example, wait for vsync in case of video
+ * mode panels. This may be a no-op for command mode panels.
+ */
+ trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+ ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+ if (ret && ret != -EWOULDBLOCK) {
+ DPU_ERROR("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ int i, rc;
+
+ /*TODO: Support two independent DSI connectors */
+ encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR_OR_NULL(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return;
+ }
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (!priv->dsi[i]) {
+ DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
+ return;
+ }
+
+ rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ i, rc);
+ continue;
+ }
+ }
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ * for underlying displays
+ * @dev: Pointer to drm device structure
+ * @priv: Pointer to private drm device data
+ * @dpu_kms: Pointer to dpu kms structure
+ * Returns: Zero on success
+ */
+static void _dpu_kms_setup_displays(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+
+ /**
+ * Extend this function to initialize other
+ * types of displays
+ */
+}
+
+static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+ priv->num_crtcs = 0;
+
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->funcs->destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < priv->num_connectors; i++)
+ priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+ priv->num_connectors = 0;
+
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+ priv->num_encoders = 0;
+}
+
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_crtc *crtc;
+
+ struct msm_drm_private *priv;
+ struct dpu_mdss_cfg *catalog;
+
+ int primary_planes_idx = 0, i, ret;
+ int max_crtc_count;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+ catalog = dpu_kms->catalog;
+
+ /*
+ * Create encoder and query display drivers to create
+ * bridges and connectors
+ */
+ _dpu_kms_setup_displays(dev, priv, dpu_kms);
+
+ max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+ /* Create the planes */
+ for (i = 0; i < catalog->sspp_count; i++) {
+ bool primary = true;
+
+ if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
+ || primary_planes_idx >= max_crtc_count)
+ primary = false;
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
+ (1UL << max_crtc_count) - 1, 0);
+ if (IS_ERR(plane)) {
+ DPU_ERROR("dpu_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary)
+ primary_planes[primary_planes_idx++] = plane;
+ }
+
+ max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+ /* Create one CRTC per encoder */
+ for (i = 0; i < max_crtc_count; i++) {
+ crtc = dpu_crtc_init(dev, primary_planes[i]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /* All CRTCs are compatible with all encoders */
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+fail:
+ _dpu_kms_drm_obj_destroy(dpu_kms);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_device *dev;
+ int rc;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+
+ rc = _dpu_debugfs_init(dpu_kms);
+ if (rc)
+ DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
+
+ return rc;
+}
+#endif
+
+static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ int i;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return;
+
+ if (dpu_kms->hw_intr)
+ dpu_hw_intr_destroy(dpu_kms->hw_intr);
+ dpu_kms->hw_intr = NULL;
+
+ if (dpu_kms->power_event)
+ dpu_power_handle_unregister_event(
+ &dpu_kms->phandle, dpu_kms->power_event);
+
+ /* safe to call these more than once during shutdown */
+ _dpu_debugfs_destroy(dpu_kms);
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ if (dpu_kms->catalog) {
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ }
+ }
+
+ if (dpu_kms->rm_init)
+ dpu_rm_destroy(&dpu_kms->rm);
+ dpu_kms->rm_init = false;
+
+ if (dpu_kms->catalog)
+ dpu_hw_catalog_deinit(dpu_kms->catalog);
+ dpu_kms->catalog = NULL;
+
+ if (dpu_kms->core_client)
+ dpu_power_client_destroy(&dpu_kms->phandle,
+ dpu_kms->core_client);
+ dpu_kms->core_client = NULL;
+
+ if (dpu_kms->vbif[VBIF_NRT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+
+ if (dpu_kms->vbif[VBIF_RT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+
+ if (dpu_kms->mmio)
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+ dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+
+ dpu_dbg_destroy();
+ _dpu_kms_hw_destroy(dpu_kms);
+}
+
+static int dpu_kms_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct dpu_kms *dpu_kms;
+ int ret = 0, num_crtcs = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ /* disable hot-plug polling */
+ drm_kms_helper_poll_disable(ddev);
+
+ /* acquire modeset lock(s) */
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ DPU_ATRACE_BEGIN("kms_pm_suspend");
+
+ ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+ if (ret)
+ goto unlock;
+
+ /* save current state for resume */
+ if (dpu_kms->suspend_state)
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+ if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
+ DRM_ERROR("failed to back up suspend state\n");
+ dpu_kms->suspend_state = NULL;
+ goto unlock;
+ }
+
+ /* create atomic state to disable all CRTCs */
+ state = drm_atomic_state_alloc(ddev);
+ if (IS_ERR_OR_NULL(state)) {
+ DRM_ERROR("failed to allocate crtc disable state\n");
+ goto unlock;
+ }
+
+ state->acquire_ctx = &ctx;
+
+ /* check for nothing to do */
+ if (num_crtcs == 0) {
+ DRM_DEBUG("all crtcs are already in the off state\n");
+ drm_atomic_state_put(state);
+ goto suspended;
+ }
+
+ /* commit the "disable all" state */
+ ret = drm_atomic_commit(state);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable crtcs, %d\n", ret);
+ drm_atomic_state_put(state);
+ goto unlock;
+ }
+
+suspended:
+ dpu_kms->suspend_block = true;
+
+unlock:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ DPU_ATRACE_END("kms_pm_suspend");
+ return 0;
+}
+
+static int dpu_kms_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct dpu_kms *dpu_kms;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ DPU_ATRACE_BEGIN("kms_pm_resume");
+
+ drm_mode_config_reset(ddev);
+
+ drm_modeset_lock_all(ddev);
+
+ dpu_kms->suspend_block = false;
+
+ if (dpu_kms->suspend_state) {
+ dpu_kms->suspend_state->acquire_ctx =
+ ddev->mode_config.acquire_ctx;
+ ret = drm_atomic_commit(dpu_kms->suspend_state);
+ if (ret < 0) {
+ DRM_ERROR("failed to restore state, %d\n", ret);
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ }
+ dpu_kms->suspend_state = NULL;
+ }
+ drm_modeset_unlock_all(ddev);
+
+ /* enable hot-plug polling */
+ drm_kms_helper_poll_enable(ddev);
+
+ DPU_ATRACE_END("kms_pm_resume");
+ return 0;
+}
+
+static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ bool cmd_mode)
+{
+ struct msm_display_info info;
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ int i, rc = 0;
+
+ memset(&info, 0, sizeof(info));
+
+ info.intf_type = encoder->encoder_type;
+ info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ /* TODO: No support for DSI swap */
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (priv->dsi[i]) {
+ info.h_tile_instance[info.num_of_h_tiles] = i;
+ info.num_of_h_tiles++;
+ }
+ }
+
+ rc = dpu_encoder_setup(encoder->dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = dpu_kms_hw_init,
+ .irq_preinstall = dpu_irq_preinstall,
+ .irq_postinstall = dpu_irq_postinstall,
+ .irq_uninstall = dpu_irq_uninstall,
+ .irq = dpu_irq,
+ .prepare_commit = dpu_kms_prepare_commit,
+ .commit = dpu_kms_commit,
+ .complete_commit = dpu_kms_complete_commit,
+ .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
+ .enable_vblank = dpu_kms_enable_vblank,
+ .disable_vblank = dpu_kms_disable_vblank,
+ .check_modified_format = dpu_format_check_modified_format,
+ .get_format = dpu_get_msm_format,
+ .round_pixclk = dpu_kms_round_pixclk,
+ .pm_suspend = dpu_kms_pm_suspend,
+ .pm_resume = dpu_kms_pm_resume,
+ .destroy = dpu_kms_destroy,
+ .set_encoder_mode = _dpu_kms_set_encoder_mode,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = dpu_kms_debugfs_init,
+#endif
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
+{
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+}
+
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_mmu *mmu;
+
+ mmu = dpu_kms->base.aspace->mmu;
+
+ mmu->funcs->detach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_put(dpu_kms->base.aspace);
+
+ return 0;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+ struct iommu_domain *domain;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return 0;
+
+ aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
+ domain, "dpu1");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ dpu_kms->base.aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ DPU_ERROR("failed to attach iommu %d\n", ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ return ret;
+}
+
+static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+ char *clock_name)
+{
+ struct dss_module_power *mp = &dpu_kms->mp;
+ int i;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name))
+ return &mp->clk_config[i];
+ }
+
+ return NULL;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+ struct dss_clk *clk;
+
+ clk = _dpu_kms_get_clk(dpu_kms, clock_name);
+ if (!clk)
+ return -EINVAL;
+
+ return clk_get_rate(clk->clk);
+}
+
+static void dpu_kms_handle_power_event(u32 event_type, void *usr)
+{
+ struct dpu_kms *dpu_kms = usr;
+
+ if (!dpu_kms)
+ return;
+
+ if (event_type == DPU_POWER_EVENT_POST_ENABLE)
+ dpu_vbif_init_memtypes(dpu_kms);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i, rc = -EINVAL;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ goto end;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+ if (!dev) {
+ DPU_ERROR("invalid device\n");
+ goto end;
+ }
+
+ rc = dpu_dbg_init(&dpu_kms->pdev->dev);
+ if (rc) {
+ DRM_ERROR("failed to init dpu dbg: %d\n", rc);
+ goto end;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("invalid private data\n");
+ goto dbg_destroy;
+ }
+
+ dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ rc = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", rc);
+ dpu_kms->mmio = NULL;
+ goto error;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", rc);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ goto error;
+ }
+ dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ } else {
+ dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
+ "vbif_nrt");
+ }
+
+ dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+ if (IS_ERR(dpu_kms->reg_dma)) {
+ dpu_kms->reg_dma = NULL;
+ DPU_DEBUG("REG_DMA is not defined");
+ } else {
+ dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
+ }
+
+ dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
+ "core");
+ if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
+ rc = PTR_ERR(dpu_kms->core_client);
+ if (!dpu_kms->core_client)
+ rc = -EINVAL;
+ DPU_ERROR("dpu power client create failed: %d\n", rc);
+ dpu_kms->core_client = NULL;
+ goto error;
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ _dpu_kms_core_hw_rev_init(dpu_kms);
+
+ pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+ dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+ if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+ rc = PTR_ERR(dpu_kms->catalog);
+ if (!dpu_kms->catalog)
+ rc = -EINVAL;
+ DPU_ERROR("catalog init failed: %d\n", rc);
+ dpu_kms->catalog = NULL;
+ goto power_error;
+ }
+
+ dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ rc = _dpu_kms_mmu_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
+ dpu_kms->dev);
+ if (rc) {
+ DPU_ERROR("rm init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ dpu_kms->rm_init = true;
+
+ dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+ rc = PTR_ERR(dpu_kms->hw_mdp);
+ if (!dpu_kms->hw_mdp)
+ rc = -EINVAL;
+ DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+ dpu_kms->hw_mdp = NULL;
+ goto power_error;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+ rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+ if (!dpu_kms->hw_vbif[vbif_idx])
+ rc = -EINVAL;
+ DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ goto power_error;
+ }
+ }
+
+ rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+ &dpu_kms->phandle,
+ _dpu_kms_get_clk(dpu_kms, "core"));
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ goto perf_err;
+ }
+
+ dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ rc = PTR_ERR(dpu_kms->hw_intr);
+ DPU_ERROR("hw_intr init failed: %d\n", rc);
+ dpu_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
+ /*
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width =
+ dpu_kms->catalog->caps->max_mixer_width * 2;
+ dev->mode_config.max_height = 4096;
+
+ /*
+ * Support format modifiers for compression etc.
+ */
+ dev->mode_config.allow_fb_modifiers = true;
+
+ /*
+ * Handle (re)initializations during power enable
+ */
+ dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+ dpu_kms->power_event = dpu_power_handle_register_event(
+ &dpu_kms->phandle,
+ DPU_POWER_EVENT_POST_ENABLE,
+ dpu_kms_handle_power_event, dpu_kms, "kms");
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+
+drm_obj_init_err:
+ dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+ _dpu_kms_hw_destroy(dpu_kms);
+dbg_destroy:
+ dpu_dbg_destroy();
+end:
+ return rc;
+}
+
+struct msm_kms *dpu_kms_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int irq;
+
+ if (!dev || !dev->dev_private) {
+ DPU_ERROR("drm device node invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv = dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+ if (irq < 0) {
+ DPU_ERROR("failed to get irq: %d\n", irq);
+ return ERR_PTR(irq);
+ }
+ dpu_kms->base.irq = irq;
+
+ return &dpu_kms->base;
+}
+
+static int dpu_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *ddev = dev_get_drvdata(master);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct dpu_kms *dpu_kms;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+ if (!dpu_kms)
+ return -ENOMEM;
+
+ mp = &dpu_kms->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ return ret;
+ }
+
+ dpu_power_resource_init(pdev, &dpu_kms->phandle);
+
+ platform_set_drvdata(pdev, dpu_kms);
+
+ msm_kms_init(&dpu_kms->base, &kms_funcs);
+ dpu_kms->dev = ddev;
+ dpu_kms->pdev = pdev;
+
+ pm_runtime_enable(&pdev->dev);
+ dpu_kms->rpm_enabled = true;
+
+ priv->kms = &dpu_kms->base;
+ return ret;
+}
+
+static void dpu_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+ mp->num_clk = 0;
+
+ if (dpu_kms->rpm_enabled)
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct component_ops dpu_ops = {
+ .bind = dpu_bind,
+ .unbind = dpu_unbind,
+};
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dpu_ops);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dpu_ops);
+ return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, false);
+ if (rc)
+ DPU_ERROR("resource disable failed: %d\n", rc);
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (rc)
+ DPU_ERROR("clock disable failed rc:%d\n", rc);
+
+exit:
+ return rc;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (rc) {
+ DPU_ERROR("clock enable failed rc:%d\n", rc);
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, true);
+ if (rc)
+ DPU_ERROR("resource enable failed: %d\n", rc);
+
+exit:
+ return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,sdm845-dpu", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+ .probe = dpu_dev_probe,
+ .remove = dpu_dev_remove,
+ .driver = {
+ .name = "msm_dpu",
+ .of_match_table = dpu_dt_match,
+ .pm = &dpu_pm_ops,
+ },
+};
+
+void __init msm_dpu_register(void)
+{
+ platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+ platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 000000000000..66d466628e2b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_dbg.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_power_handle.h"
+#include "dpu_irq.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_KMS)) \
+ DRM_DEBUG(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ DRM_ERROR(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ * This macro is similar to the standard ktime_compare() function, but
+ * attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+ ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE 12
+
+/* timeout in frames waiting for frame done */
+#define DPU_FRAME_DONE_TIMEOUT 60
+
+/*
+ * struct dpu_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct dpu_irq_callback {
+ struct list_head list;
+ void (*func)(void *arg, int irq_idx);
+ void *arg;
+};
+
+/**
+ * struct dpu_irq: IRQ structure contains callback registration info
+ * @total_irq: total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl: array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock: callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct dpu_irq {
+ u32 total_irqs;
+ struct list_head *irq_cb_tbl;
+ atomic_t *enable_counts;
+ atomic_t *irq_counts;
+ spinlock_t cb_lock;
+ struct dentry *debugfs_file;
+};
+
+struct dpu_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ int core_rev;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_power_handle phandle;
+ struct dpu_power_client *core_client;
+ struct dpu_power_event *power_event;
+
+ /* directory entry for debugfs */
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_danger;
+ struct dentry *debugfs_vbif;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+ unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct dpu_hw_intr *hw_intr;
+ struct dpu_irq irq_obj;
+
+ struct dpu_core_perf perf;
+
+ /* saved atomic state during system suspend */
+ struct drm_atomic_state *suspend_state;
+ bool suspend_block;
+
+ struct dpu_rm rm;
+ bool rm_init;
+
+ struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+
+ bool has_danger_ctrl;
+
+ struct platform_device *pdev;
+ bool rpm_enabled;
+ struct dss_module_power mp;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+ ((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
+/**
+ * dpu_kms_is_suspend_state - whether or not the system is pm suspended
+ * @dev: Pointer to drm device
+ * Return: Suspend status
+ */
+static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
+{
+ if (!ddev_to_msm_kms(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
+}
+
+/**
+ * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
+ * suspend status
+ * @dev: Pointer to drm device
+ * Return: True if commits should be rejected due to pm suspend
+ */
+static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
+{
+ if (!dpu_kms_is_suspend_state(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
+}
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
+ */
+struct dpu_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct dpu_kms *dpu_kms;
+};
+
+/**
+ * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize dpu_debugfs_regset32 structures for use
+ * with dpu_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * dpu_debugfs_setup_regset32 to initialize it.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ * or debugfs_remove_recursive() (on a parent directory) to remove the
+ * file
+ */
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE 4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void dpu_kms_encoder_enable(struct drm_encoder *encoder);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms: poiner to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
new file mode 100644
index 000000000000..9e533b86682c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -0,0 +1,245 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include "dpu_kms.h"
+
+#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+
+#define HW_INTR_STATUS 0x0010
+
+struct dpu_mdss {
+ struct msm_mdss base;
+ void __iomem *mmio;
+ unsigned long mmio_len;
+ u32 hwversion;
+ struct dss_module_power mp;
+ struct dpu_irq_controller irq_controller;
+};
+
+static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+{
+ struct dpu_mdss *dpu_mdss = arg;
+ u32 interrupts;
+
+ interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
+
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+ unsigned int mapping;
+ int rc;
+
+ mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
+ hwirq);
+ if (mapping == 0) {
+ DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
+ return IRQ_NONE;
+ }
+
+ rc = generic_handle_irq(mapping);
+ if (rc < 0) {
+ DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
+ hwirq, mapping, rc);
+ return IRQ_NONE;
+ }
+
+ interrupts &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void dpu_mdss_irq_mask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static void dpu_mdss_irq_unmask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip dpu_mdss_irq_chip = {
+ .name = "dpu_mdss",
+ .irq_mask = dpu_mdss_irq_mask,
+ .irq_unmask = dpu_mdss_irq_unmask,
+};
+
+static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct dpu_mdss *dpu_mdss = domain->host_data;
+ int ret;
+
+ irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
+ ret = irq_set_chip_data(irq, dpu_mdss);
+
+ return ret;
+}
+
+static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
+ .map = dpu_mdss_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ dev = dpu_mdss->base.dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &dpu_mdss_irqdomain_ops, dpu_mdss);
+ if (!domain) {
+ DPU_ERROR("failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ dpu_mdss->irq_controller.enabled_mask = 0;
+ dpu_mdss->irq_controller.domain = domain;
+
+ return 0;
+}
+
+static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+{
+ if (dpu_mdss->irq_controller.domain) {
+ irq_domain_remove(dpu_mdss->irq_controller.domain);
+ dpu_mdss->irq_controller.domain = NULL;
+ }
+ return 0;
+}
+static int dpu_mdss_enable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (ret)
+ DPU_ERROR("clock enable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static int dpu_mdss_disable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (ret)
+ DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static void dpu_mdss_destroy(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+
+ pm_runtime_disable(dev->dev);
+ priv->mdss = NULL;
+}
+
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = dpu_mdss_enable,
+ .disable = dpu_mdss_disable,
+ .destroy = dpu_mdss_destroy,
+};
+
+int dpu_mdss_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct resource *res;
+ struct dpu_mdss *dpu_mdss;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+ if (!dpu_mdss)
+ return -ENOMEM;
+
+ dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
+ if (IS_ERR(dpu_mdss->mmio))
+ return PTR_ERR(dpu_mdss->mmio);
+
+ DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
+ if (!res) {
+ DRM_ERROR("failed to get memory resource for mdss\n");
+ return -ENOMEM;
+ }
+ dpu_mdss->mmio_len = resource_size(res);
+
+ mp = &dpu_mdss->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ goto clk_parse_err;
+ }
+
+ dpu_mdss->base.dev = dev;
+ dpu_mdss->base.funcs = &mdss_funcs;
+
+ ret = _dpu_mdss_irq_domain_add(dpu_mdss);
+ if (ret)
+ goto irq_domain_error;
+
+ ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
+ if (ret) {
+ DPU_ERROR("failed to init irq: %d\n", ret);
+ goto irq_error;
+ }
+
+ pm_runtime_enable(dev->dev);
+
+ pm_runtime_get_sync(dev->dev);
+ dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
+ pm_runtime_put_sync(dev->dev);
+
+ priv->mdss = &dpu_mdss->base;
+
+ return ret;
+
+irq_error:
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+irq_domain_error:
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_parse_err:
+ devm_kfree(&pdev->dev, mp->clk_config);
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 000000000000..b640e39ebaca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1963 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+#define DPU_NAME_SIZE 12
+
+#define DPU_PLANE_COLOR_FILL_FLAG BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+ R0,
+ R1,
+ R_MAX
+};
+
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+#define DEFAULT_REFRESH_RATE 60
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ * this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+ DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+ DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+ DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+ struct drm_plane base;
+
+ struct mutex lock;
+
+ enum dpu_sspp pipe;
+ uint32_t features; /* capabilities from catalog */
+ uint32_t nformats;
+ uint32_t formats[64];
+
+ struct dpu_hw_pipe *pipe_hw;
+ struct dpu_hw_pipe_cfg pipe_cfg;
+ struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+ uint32_t color_fill;
+ bool is_error;
+ bool is_rt_pipe;
+ bool is_virtual;
+ struct list_head mplane_list;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_csc_cfg *csc_ptr;
+
+ const struct dpu_sspp_sub_blks *pipe_sblk;
+ char pipe_name[DPU_NAME_SIZE];
+
+ /* debugfs related stuff */
+ struct dentry *debugfs_root;
+ struct dpu_debugfs_regset32 debugfs_src;
+ struct dpu_debugfs_regset32 debugfs_scaler;
+ struct dpu_debugfs_regset32 debugfs_csc;
+ bool debugfs_default_scale;
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv;
+
+ if (!plane || !plane->dev)
+ return NULL;
+ priv = plane->dev->dev_private;
+ if (!priv)
+ return NULL;
+ return to_dpu_kms(priv->kms);
+}
+
+static bool dpu_plane_enabled(struct drm_plane_state *state)
+{
+ return state && state->fb && state->crtc;
+}
+
+static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
+{
+ return state && state->crtc;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane: Pointer to drm plane
+ * @fmt: Pointer to source buffer format
+ * @src_wdith: width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+ const struct dpu_format *fmt, u32 src_width)
+{
+ struct dpu_plane *pdpu, *tmp;
+ struct dpu_plane_state *pstate;
+ u32 fixed_buff_size;
+ u32 total_fl;
+
+ if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+ DPU_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+ fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+
+ list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+ if (!dpu_plane_enabled(tmp->base.state))
+ continue;
+ DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
+ pdpu->base.base.id, tmp->base.base.id,
+ src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ src_width = max_t(u32, src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ }
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == DPU_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ total_fl = (fixed_buff_size) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ plane->base.id, pdpu->pipe - SSPP_VIG0,
+ (char *)&fmt->base.pixel_format,
+ src_width, total_fl);
+
+ return total_fl;
+}
+
+/**
+ * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl)
+{
+ int i;
+
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
+
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
+
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
+
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u64 qos_lut;
+ u32 total_fl = 0, lut_usage;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments plane %d fb %d\n",
+ plane != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ lut_usage = DPU_QOS_LUT_USAGE_NRT;
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+ total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect));
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+ lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+ else
+ lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+ }
+
+ qos_lut = _dpu_plane_get_qos_lut(
+ &pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
+ pdpu->pipe_qos_cfg.creq_lut = qos_lut;
+
+ trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ pdpu->is_rt_pipe, total_fl, qos_lut);
+
+ pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u32 danger_lut, safe_lut;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ } else {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ }
+ }
+
+ pdpu->pipe_qos_cfg.danger_lut = danger_lut;
+ pdpu->pipe_qos_cfg.safe_lut = safe_lut;
+
+ trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->fetch_mode : 0,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ fmt ? fmt->fetch_mode : -1,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane: Pointer to drm plane
+ * @enable: true to enable QoS control
+ * @flags: QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+ bool enable, u32 flags)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
+ return;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+ pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
+ pdpu->pipe_qos_cfg.danger_vblank =
+ pdpu->pipe_sblk->danger_vblank;
+ pdpu->pipe_qos_cfg.vblank_en = enable;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+ /* this feature overrules previous VBLANK_CTRL */
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ }
+
+ if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+ pdpu->pipe_qos_cfg.danger_safe_en = enable;
+
+ if (!pdpu->is_rt_pipe) {
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.danger_safe_en = false;
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ pdpu->pipe_qos_cfg.danger_safe_en,
+ pdpu->pipe_qos_cfg.vblank_en,
+ pdpu->pipe_qos_cfg.creq_vblank,
+ pdpu->pipe_qos_cfg.danger_vblank,
+ pdpu->is_rt_pipe);
+
+ pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->is_rt_pipe)
+ goto end;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+end:
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane: Pointer to drm plane
+ * @crtc: Pointer to drm crtc
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+ struct drm_crtc *crtc)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_ot_params ot_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev || !crtc) {
+ DPU_ERROR("invalid arguments plane %d crtc %d\n",
+ plane != 0, crtc != 0);
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+ ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+ ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ ot_params.is_wfd = !pdpu->is_rt_pipe;
+ ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.vbif_idx = VBIF_RT;
+ ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ ot_params.rd = true;
+
+ dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane: Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_qos_params qos_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = VBIF_RT;
+ qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+ qos_params.is_rt = pdpu->is_rt_pipe;
+
+ DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+ plane->base.id, qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt,
+ qos_params.clk_ctrl);
+
+ dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+/**
+ * _dpu_plane_get_aspace: gets the address space
+ */
+static int _dpu_plane_get_aspace(
+ struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ struct msm_gem_address_space **aspace)
+{
+ struct dpu_kms *kms;
+
+ if (!pdpu || !pstate || !aspace) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_plane_get_kms(&pdpu->base);
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ *aspace = kms->base.aspace;
+
+ return 0;
+}
+
+static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+ struct dpu_plane_state *pstate,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ struct msm_gem_address_space *aspace = NULL;
+ int ret;
+
+ if (!plane || !pstate || !pipe_cfg || !fb) {
+ DPU_ERROR(
+ "invalid arg(s), plane %d state %d cfg %d fb %d\n",
+ plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
+ return;
+ }
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
+ return;
+ }
+
+ ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+ if (ret == -EAGAIN)
+ DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+ else if (ret)
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+ trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+ &pipe_cfg->layout,
+ pstate->multirect_index);
+ pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+ pstate->multirect_index);
+ }
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+ struct dpu_hw_scaler3_cfg *scale_cfg,
+ const struct dpu_format *fmt,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ uint32_t i;
+
+ if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+ !chroma_subsmpl_v) {
+ DPU_ERROR(
+ "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+ !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
+ chroma_subsmpl_v);
+ return;
+ }
+
+ memset(scale_cfg, 0, sizeof(*scale_cfg));
+ memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ scale_cfg->src_width[i] = src_w;
+ scale_cfg->src_height[i] = src_h;
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ scale_cfg->src_width[i] /= chroma_subsmpl_h;
+ scale_cfg->src_height[i] /= chroma_subsmpl_v;
+ }
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ pstate->pixel_ext.num_ext_pxls_top[i] =
+ scale_cfg->src_height[i];
+ pstate->pixel_ext.num_ext_pxls_left[i] =
+ scale_cfg->src_width[i];
+ }
+ if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+ && (src_w == dst_w))
+ return;
+
+ scale_cfg->dst_width = dst_w;
+ scale_cfg->dst_height = dst_h;
+ scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+ scale_cfg->lut_flag = 0;
+ scale_cfg->blend_cfg = 1;
+ scale_cfg->enable = 1;
+}
+
+static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+{
+ static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+ };
+ static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+ };
+
+ if (!pdpu) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+ else
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+
+ DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+ pdpu->csc_ptr->csc_mv[0],
+ pdpu->csc_ptr->csc_mv[1],
+ pdpu->csc_ptr->csc_mv[2]);
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ const struct dpu_format *fmt, bool color_fill)
+{
+ struct dpu_hw_pixel_ext *pe;
+ uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+ if (!pdpu || !fmt || !pstate) {
+ DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+ pdpu != 0, fmt != 0, pstate != 0);
+ return;
+ }
+
+ pe = &pstate->pixel_ext;
+
+ /* don't chroma subsample if decimating */
+ chroma_subsmpl_h =
+ drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+ chroma_subsmpl_v =
+ drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+ /* update scaler. calculate default config for QSEED3 */
+ _dpu_plane_setup_scaler3(pdpu, pstate,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect),
+ drm_rect_height(&pdpu->pipe_cfg.src_rect),
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect),
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+ &pstate->scaler3_cfg, fmt,
+ chroma_subsmpl_h, chroma_subsmpl_v);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu: Pointer to DPU plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+ uint32_t color, uint32_t alpha)
+{
+ const struct dpu_format *fmt;
+ const struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+
+ if (!pdpu || !pdpu->base.state) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ }
+
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
+ return -EINVAL;
+ }
+
+ plane = &pdpu->base;
+ pstate = to_dpu_plane_state(plane->state);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /*
+ * select fill format to match user property expectation,
+ * h/w only supports RGB variants
+ */
+ fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+ /* update sspp */
+ if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+ pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+ (color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+ pstate->multirect_index);
+
+ /* override scaler/decimation if solid fill */
+ pdpu->pipe_cfg.src_rect.x1 = 0;
+ pdpu->pipe_cfg.src_rect.y1 = 0;
+ pdpu->pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+ pdpu->pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+
+ if (pdpu->pipe_hw->ops.setup_format)
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+ fmt, DPU_SSPP_SOLID_FILL,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_rects)
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_pe)
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+ }
+
+ return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!drm_state)
+ return;
+
+ pstate = to_dpu_plane_state(drm_state);
+
+ pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+ pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+ struct dpu_plane_state *pstate[R_MAX];
+ const struct drm_plane_state *drm_state[R_MAX];
+ struct drm_rect src[R_MAX], dst[R_MAX];
+ struct dpu_plane *dpu_plane[R_MAX];
+ const struct dpu_format *fmt[R_MAX];
+ int i, buffer_lines;
+ unsigned int max_tile_height = 1;
+ bool parallel_fetch_qualified = true;
+ bool has_tiled_rect = false;
+
+ for (i = 0; i < R_MAX; i++) {
+ const struct msm_format *msm_fmt;
+
+ drm_state[i] = i ? plane->r1 : plane->r0;
+ msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+ fmt[i] = to_dpu_format(msm_fmt);
+
+ if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+ has_tiled_rect = true;
+ if (fmt[i]->tile_height > max_tile_height)
+ max_tile_height = fmt[i]->tile_height;
+ }
+ }
+
+ for (i = 0; i < R_MAX; i++) {
+ int width_threshold;
+
+ pstate[i] = to_dpu_plane_state(drm_state[i]);
+ dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+ if (pstate[i] == NULL) {
+ DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+ drm_state[i]->plane->base.id);
+ return -EINVAL;
+ }
+
+ src[i].x1 = drm_state[i]->src_x >> 16;
+ src[i].y1 = drm_state[i]->src_y >> 16;
+ src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+ src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+ dst[i] = drm_plane_state_dest(drm_state[i]);
+
+ if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+ drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "scaling is not supported in multirect mode\n");
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(fmt[i])) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "Unsupported format for multirect mode\n");
+ return -EINVAL;
+ }
+
+ /**
+ * SSPP PD_MEM is split half - one for each RECT.
+ * Tiled formats need 5 lines of buffering while fetching
+ * whereas linear formats need only 2 lines.
+ * So we cannot support more than half of the supported SSPP
+ * width for tiled formats.
+ */
+ width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+ if (has_tiled_rect)
+ width_threshold /= 2;
+
+ if (parallel_fetch_qualified &&
+ drm_rect_width(&src[i]) > width_threshold)
+ parallel_fetch_qualified = false;
+
+ }
+
+ /* Validate RECT's and set the mode */
+
+ /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+ if (parallel_fetch_qualified) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ goto done;
+ }
+
+ /* TIME_MX Mode */
+ buffer_lines = 2 * max_tile_height;
+
+ if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+ dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ } else {
+ DPU_ERROR(
+ "No multirect mode possible for the planes (%d - %d)\n",
+ drm_state[R0]->plane->base.id,
+ drm_state[R1]->plane->base.id);
+ return -EINVAL;
+ }
+
+done:
+ if (dpu_plane[R0]->is_virtual) {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
+ } else {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+ };
+
+ DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+ pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+ DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+ pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+ return 0;
+}
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !flush_sspp) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ pstate = to_dpu_plane_state(plane->state);
+
+ *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+ struct dpu_hw_fmt_layout layout;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ struct dma_fence *fence;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
+ return ret;
+ }
+
+ /* cache aspace */
+ pstate->aspace = aspace;
+
+ /*
+ * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+ * we can use msm_atomic_prepare_fb() instead of doing the
+ * implicit fence and fb prepare by hand here.
+ */
+ obj = msm_framebuffer_bo(new_state->fb, 0);
+ msm_obj = to_msm_bo(obj);
+ fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ if (fence)
+ drm_atomic_set_fence_for_plane(new_state, fence);
+
+ if (pstate->aspace) {
+ ret = msm_framebuffer_prepare(new_state->fb,
+ pstate->aspace);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
+ }
+ }
+
+ /* validate framebuffer layout before commit */
+ ret = dpu_format_populate_layout(pstate->aspace,
+ new_state->fb, &layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *old_pstate;
+
+ if (!old_state || !old_state->fb)
+ return;
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+ struct drm_rect *fb_rect,
+ uint32_t min_src_size)
+{
+ /* Ensure fb size is supported */
+ if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+ drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+ return false;
+
+ /* Ensure src rect is above the minimum size */
+ if (drm_rect_width(src) < min_src_size ||
+ drm_rect_height(src) < min_src_size)
+ return false;
+
+ /* Ensure src is fully encapsulated in fb */
+ return drm_rect_intersect(fb_rect, src) &&
+ drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int ret = 0;
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ const struct dpu_format *fmt;
+ struct drm_rect src, dst, fb_rect = { 0 };
+ uint32_t max_upscale = 1, max_downscale = 1;
+ uint32_t min_src_size, max_linewidth;
+ int hscale = 1, vscale = 1;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(state);
+
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ fb_rect.x2 = state->fb->width;
+ fb_rect.y2 = state->fb->height;
+
+ max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+
+ if (pdpu->features & DPU_SSPP_SCALER) {
+ max_downscale = pdpu->pipe_sblk->maxdwnscale;
+ max_upscale = pdpu->pipe_sblk->maxupscale;
+ }
+ if (drm_rect_width(&src) < drm_rect_width(&dst))
+ hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
+ else
+ hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
+ if (drm_rect_height(&src) < drm_rect_height(&dst))
+ vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
+ else
+ vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
+
+ DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
+ dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
+
+ if (!dpu_plane_enabled(state))
+ goto exit;
+
+ fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+
+ min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+ if (DPU_FORMAT_IS_YUV(fmt) &&
+ (!(pdpu->features & DPU_SSPP_SCALER) ||
+ !(pdpu->features & (BIT(DPU_SSPP_CSC)
+ | BIT(DPU_SSPP_CSC_10BIT))))) {
+ DPU_ERROR_PLANE(pdpu,
+ "plane doesn't have scaler/csc for yuv\n");
+ ret = -EINVAL;
+
+ /* check src bounds */
+ } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+ DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -E2BIG;
+
+ /* valid yuv image */
+ } else if (DPU_FORMAT_IS_YUV(fmt) &&
+ (src.x1 & 0x1 || src.y1 & 0x1 ||
+ drm_rect_width(&src) & 0x1 ||
+ drm_rect_height(&src) & 0x1)) {
+ DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -EINVAL;
+
+ /* min dst support */
+ } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+ DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&dst));
+ ret = -EINVAL;
+
+ /* check decimated source width */
+ } else if (drm_rect_width(&src) > max_linewidth) {
+ DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&src), max_linewidth);
+ ret = -E2BIG;
+
+ /* check scaler capability */
+ } else if (hscale < 0 || vscale < 0) {
+ DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
+ DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
+ ret = -E2BIG;
+ }
+
+exit:
+ return ret;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (!state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
+
+ return dpu_plane_sspp_atomic_check(plane, state);
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+
+ /*
+ * These updates have to be done immediately before the plane flush
+ * timing, and may not be moved to the atomic_update/mode_set functions.
+ */
+ if (pdpu->is_error)
+ /* force white frame with 100% alpha pipe output on error */
+ _dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+ else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+ else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
+ pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+
+ /* flag h/w flush complete */
+ if (plane->state)
+ pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = error;
+}
+
+static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ uint32_t nplanes, src_flags;
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_pstate;
+ const struct dpu_format *fmt;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_rect src, dst;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return -EINVAL;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return -EINVAL;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+
+ pstate = to_dpu_plane_state(state);
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ crtc = state->crtc;
+ fb = state->fb;
+ if (!crtc || !fb) {
+ DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
+ crtc != 0, fb != 0);
+ return -EINVAL;
+ }
+ fmt = to_dpu_format(msm_framebuffer_format(fb));
+ nplanes = fmt->num_planes;
+
+ memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+
+ _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+
+ pstate->pending = true;
+
+ pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
+ ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
+ crtc->base.id, DRM_RECT_ARG(&dst),
+ (char *)&fmt->base.pixel_format,
+ DPU_FORMAT_IS_UBWC(fmt));
+
+ pdpu->pipe_cfg.src_rect = src;
+ pdpu->pipe_cfg.dst_rect = dst;
+
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+
+ /* override for color fill */
+ if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+ /* skip remaining processing on color fill */
+ return 0;
+ }
+
+ if (pdpu->pipe_hw->ops.setup_rects) {
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+ }
+
+ if (pdpu->pipe_hw->ops.setup_pe &&
+ (pstate->multirect_index != DPU_SSPP_RECT_1))
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ /**
+ * when programmed in multirect mode, scalar block will be
+ * bypassed. Still we need to update alpha and bitwidth
+ * ONLY for RECT0
+ */
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+
+ if (pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(
+ pdpu->pipe_hw,
+ pstate->multirect_index,
+ pstate->multirect_mode);
+
+ if (pdpu->pipe_hw->ops.setup_format) {
+ src_flags = 0x0;
+
+ /* update format */
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_cdp) {
+ struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+ memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+
+ cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+ [DPU_PERF_CDP_USAGE_RT].rd_enable;
+ cdp_cfg->ubwc_meta_enable =
+ DPU_FORMAT_IS_UBWC(fmt);
+ cdp_cfg->tile_amortize_enable =
+ DPU_FORMAT_IS_UBWC(fmt) ||
+ DPU_FORMAT_IS_TILE(fmt);
+ cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+ pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+ }
+
+ /* update csc */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ _dpu_plane_setup_csc(pdpu);
+ else
+ pdpu->csc_ptr = 0;
+ }
+
+ _dpu_plane_set_qos_lut(plane, fb);
+ _dpu_plane_set_danger_lut(plane, fb);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ _dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+ _dpu_plane_set_ot_limit(plane, crtc);
+ }
+
+ _dpu_plane_set_qos_remap(plane);
+ return 0;
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+ pstate = to_dpu_plane_state(state);
+
+ trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+ pstate->multirect_mode);
+
+ pstate->pending = true;
+
+ if (is_dpu_plane_virtual(plane) &&
+ pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
+ DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = false;
+ state = plane->state;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (!dpu_plane_sspp_enabled(state)) {
+ _dpu_plane_atomic_disable(plane, old_state);
+ } else {
+ int ret;
+
+ ret = dpu_plane_sspp_atomic_update(plane, old_state);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+void dpu_plane_restore(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* last plane state is same as current state */
+ dpu_plane_atomic_update(plane, plane->state);
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (pdpu) {
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ mutex_destroy(&pdpu->lock);
+
+ drm_plane_helper_disable(plane, NULL);
+
+ /* this will destroy the states as well */
+ drm_plane_cleanup(plane);
+
+ if (pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+ kfree(pdpu);
+ }
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ return;
+ }
+
+ pstate = to_dpu_plane_state(state);
+
+ /* remove ref count for frame buffers */
+ if (state->fb)
+ drm_framebuffer_put(state->fb);
+
+ kfree(pstate);
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return NULL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return NULL;
+ }
+
+ old_state = to_dpu_plane_state(plane->state);
+ pdpu = to_dpu_plane(plane);
+ pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return NULL;
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ pstate->pending = false;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+ return &pstate->base;
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* remove previous state, if present */
+ if (plane->state) {
+ dpu_plane_destroy_state(plane, plane->state);
+ plane->state = 0;
+ }
+
+ pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return;
+ }
+
+ pstate->base.plane = plane;
+
+ plane->state = &pstate->base;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t _dpu_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!cfg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ dpu_plane_danger_signal_ctrl(plane, enable);
+ DPU_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int disable_panic;
+ char buf[10];
+
+ if (!cfg)
+ return -EFAULT;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &disable_panic))
+ return -EFAULT;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ DPU_DEBUG("Disabling danger:\n");
+ _dpu_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ DPU_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _dpu_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+ .open = simple_open,
+ .read = _dpu_plane_danger_read,
+ .write = _dpu_plane_danger_write,
+};
+
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_kms *kms;
+ struct msm_drm_private *priv;
+ const struct dpu_sspp_sub_blks *sblk = 0;
+ const struct dpu_sspp_cfg *cfg = 0;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (pdpu && pdpu->pipe_hw)
+ cfg = pdpu->pipe_hw->cap;
+ if (cfg)
+ sblk = cfg->sblk;
+
+ if (!sblk)
+ return 0;
+
+ /* create overall sub-directory for the pipe */
+ pdpu->debugfs_root =
+ debugfs_create_dir(pdpu->pipe_name,
+ plane->dev->primary->debugfs_root);
+
+ if (!pdpu->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_x32("features", 0600,
+ pdpu->debugfs_root, &pdpu->features);
+
+ /* add register dump support */
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("src_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_src);
+
+ if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("scaler_blk", 0400,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_scaler);
+ debugfs_create_bool("default_scaling",
+ 0600,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_default_scale);
+ }
+
+ if (cfg->features & BIT(DPU_SSPP_CSC) ||
+ cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("csc_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_csc);
+ }
+
+ debugfs_create_u32("xin_id",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ debugfs_create_file("disable_danger",
+ 0600,
+ pdpu->debugfs_root,
+ kms, &dpu_plane_danger_enable);
+
+ return 0;
+}
+
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+ pdpu = to_dpu_plane(plane);
+
+ debugfs_remove_recursive(pdpu->debugfs_root);
+}
+#else
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ return 0;
+}
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+}
+#endif
+
+static int dpu_plane_late_register(struct drm_plane *plane)
+{
+ return _dpu_plane_init_debugfs(plane);
+}
+
+static void dpu_plane_early_unregister(struct drm_plane *plane)
+{
+ _dpu_plane_destroy_debugfs(plane);
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dpu_plane_destroy,
+ .reset = dpu_plane_reset,
+ .atomic_duplicate_state = dpu_plane_duplicate_state,
+ .atomic_destroy_state = dpu_plane_destroy_state,
+ .late_register = dpu_plane_late_register,
+ .early_unregister = dpu_plane_early_unregister,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+bool is_dpu_plane_virtual(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->is_virtual : false;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id)
+{
+ struct drm_plane *plane = NULL, *master_plane = NULL;
+ const struct dpu_format_extended *format_list;
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ enum drm_plane_type type;
+ int zpos_max = DPU_ZPOS_MAX;
+ int ret = -EINVAL;
+
+ if (!dev) {
+ DPU_ERROR("[%u]device is NULL\n", pipe);
+ goto exit;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("[%u]private data is NULL\n", pipe);
+ goto exit;
+ }
+
+ if (!priv->kms) {
+ DPU_ERROR("[%u]invalid KMS reference\n", pipe);
+ goto exit;
+ }
+ kms = to_dpu_kms(priv->kms);
+
+ if (!kms->catalog) {
+ DPU_ERROR("[%u]invalid catalog reference\n", pipe);
+ goto exit;
+ }
+
+ /* create and zero local structure */
+ pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+ if (!pdpu) {
+ DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
+ pdpu->is_virtual = (master_plane_id != 0);
+ INIT_LIST_HEAD(&pdpu->mplane_list);
+ master_plane = drm_plane_find(dev, NULL, master_plane_id);
+ if (master_plane) {
+ struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
+
+ list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
+ }
+
+ /* initialize underlying h/w driver */
+ pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
+ master_plane_id != 0);
+ if (IS_ERR(pdpu->pipe_hw)) {
+ DPU_ERROR("[%u]SSPP init failed\n", pipe);
+ ret = PTR_ERR(pdpu->pipe_hw);
+ goto clean_plane;
+ } else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+ goto clean_sspp;
+ }
+
+ /* cache features mask for later */
+ pdpu->features = pdpu->pipe_hw->cap->features;
+ pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR("[%u]invalid sblk\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (!master_plane_id)
+ format_list = pdpu->pipe_sblk->format_list;
+ else
+ format_list = pdpu->pipe_sblk->virt_format_list;
+
+ pdpu->nformats = dpu_populate_formats(format_list,
+ pdpu->formats,
+ 0,
+ ARRAY_SIZE(pdpu->formats));
+
+ if (!pdpu->nformats) {
+ DPU_ERROR("[%u]no valid formats for plane\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (pdpu->features & BIT(DPU_SSPP_CURSOR))
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_plane)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ pdpu->formats, pdpu->nformats,
+ NULL, type, NULL);
+ if (ret)
+ goto clean_sspp;
+
+ pdpu->catalog = kms->catalog;
+
+ if (kms->catalog->mixer_count &&
+ kms->catalog->mixer[0].sblk->maxblendstages) {
+ zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
+ if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
+ zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
+ }
+
+ ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+ if (ret)
+ DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+ /* success! finalize initialization */
+ drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+ /* save user friendly pipe name for later */
+ snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
+
+ mutex_init(&pdpu->lock);
+
+ DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+ pipe, plane->base.id, master_plane_id);
+ return plane;
+
+clean_sspp:
+ if (pdpu && pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+ kfree(pdpu);
+exit:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 000000000000..f6fe6ddc7a3a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base: base drm plane state object
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: cached plane property values
+ * @aspace: pointer to address space for input/output buffers
+ * @input_fence: dereferenced input fence pointer
+ * @stage: assigned by crtc blender
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending: whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
+ * @cdp_cfg: CDP configuration
+ */
+struct dpu_plane_state {
+ struct drm_plane_state base;
+ struct msm_gem_address_space *aspace;
+ void *input_fence;
+ enum dpu_stage stage;
+ uint32_t multirect_index;
+ uint32_t multirect_mode;
+ bool pending;
+
+ /* scaler configuration */
+ struct dpu_hw_scaler3_cfg scaler3_cfg;
+ struct dpu_hw_pixel_ext pixel_ext;
+
+ struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+ const struct drm_plane_state *r0;
+ const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+ container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane: Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * is_dpu_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ * false - if the plane is primary
+ */
+bool is_dpu_plane_virtual(struct drm_plane *plane);
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush mask
+ * @plane: Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp);
+
+/**
+ * dpu_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_restore(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_kickoff - final plane operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_kickoff(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ * a regular plane initialization. A non-zero primary plane
+ * id will be passed for a virtual pipe initialization.
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ * against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_wait_input_fence - wait for input fence object
+ * @plane: Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane: Pointer to DRM plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
+/**
+ * dpu_plane_set_revalidate - sets revalidate flag which forces a full
+ * validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
new file mode 100644
index 000000000000..a68f1249388c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include "dpu_power_handle.h"
+#include "dpu_trace.h"
+
+static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
+ [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
+};
+
+const char *dpu_power_handle_get_dbus_name(u32 bus_id)
+{
+ if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
+ return data_bus_name[bus_id];
+
+ return NULL;
+}
+
+static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
+ u32 event_type)
+{
+ struct dpu_power_event *event;
+
+ list_for_each_entry(event, &phandle->event_list, list) {
+ if (event->event_type & event_type)
+ event->cb_fnc(event_type, event->usr);
+ }
+}
+
+struct dpu_power_client *dpu_power_client_create(
+ struct dpu_power_handle *phandle, char *client_name)
+{
+ struct dpu_power_client *client;
+ static u32 id;
+
+ if (!client_name || !phandle) {
+ pr_err("client name is null or invalid power data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&phandle->phandle_lock);
+ strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+ client->usecase_ndx = VOTE_INDEX_DISABLE;
+ client->id = id;
+ client->active = true;
+ pr_debug("client %s created:%pK id :%d\n", client_name,
+ client, id);
+ id++;
+ list_add(&client->list, &phandle->power_client_clist);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return client;
+}
+
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client)
+{
+ if (!client || !phandle) {
+ pr_err("reg bus vote: invalid client handle\n");
+ } else if (!client->active) {
+ pr_err("dpu power deinit already done\n");
+ kfree(client);
+ } else {
+ pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+ client->name, client, client->id);
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(client);
+ }
+}
+
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ phandle->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&phandle->power_client_clist);
+ INIT_LIST_HEAD(&phandle->event_list);
+
+ mutex_init(&phandle->phandle_lock);
+}
+
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ struct dpu_power_client *curr_client, *next_client;
+ struct dpu_power_event *curr_event, *next_event;
+
+ if (!phandle || !pdev) {
+ pr_err("invalid input param\n");
+ return;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ list_for_each_entry_safe(curr_client, next_client,
+ &phandle->power_client_clist, list) {
+ pr_err("cliend:%s-%d still registered with refcount:%d\n",
+ curr_client->name, curr_client->id,
+ curr_client->refcount);
+ curr_client->active = false;
+ list_del(&curr_client->list);
+ }
+
+ list_for_each_entry_safe(curr_event, next_event,
+ &phandle->event_list, list) {
+ pr_err("event:%d, client:%s still registered\n",
+ curr_event->event_type,
+ curr_event->client_name);
+ curr_event->active = false;
+ list_del(&curr_event->list);
+ }
+ mutex_unlock(&phandle->phandle_lock);
+}
+
+int dpu_power_resource_enable(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, bool enable)
+{
+ bool changed = false;
+ u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+ struct dpu_power_client *client;
+
+ if (!phandle || !pclient) {
+ pr_err("invalid input argument\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ if (enable)
+ pclient->refcount++;
+ else if (pclient->refcount)
+ pclient->refcount--;
+
+ if (pclient->refcount)
+ pclient->usecase_ndx = VOTE_INDEX_LOW;
+ else
+ pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+ list_for_each_entry(client, &phandle->power_client_clist, list) {
+ if (client->usecase_ndx < VOTE_INDEX_MAX &&
+ client->usecase_ndx > max_usecase_ndx)
+ max_usecase_ndx = client->usecase_ndx;
+ }
+
+ if (phandle->current_usecase_ndx != max_usecase_ndx) {
+ changed = true;
+ prev_usecase_ndx = phandle->current_usecase_ndx;
+ phandle->current_usecase_ndx = max_usecase_ndx;
+ }
+
+ pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+ __builtin_return_address(0), changed, max_usecase_ndx,
+ pclient->name, pclient->id, enable, pclient->refcount);
+
+ if (!changed)
+ goto end;
+
+ if (enable) {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_ENABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_ENABLE);
+
+ } else {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_DISABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_DISABLE);
+ }
+
+end:
+ mutex_unlock(&phandle->phandle_lock);
+ return 0;
+}
+
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name)
+{
+ struct dpu_power_event *event;
+
+ if (!phandle) {
+ pr_err("invalid power handle\n");
+ return ERR_PTR(-EINVAL);
+ } else if (!cb_fnc || !event_type) {
+ pr_err("no callback fnc or event type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ event->event_type = event_type;
+ event->cb_fnc = cb_fnc;
+ event->usr = usr;
+ strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+ event->active = true;
+
+ mutex_lock(&phandle->phandle_lock);
+ list_add(&event->list, &phandle->event_list);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return event;
+}
+
+void dpu_power_handle_unregister_event(
+ struct dpu_power_handle *phandle,
+ struct dpu_power_event *event)
+{
+ if (!phandle || !event) {
+ pr_err("invalid phandle or event\n");
+ } else if (!event->active) {
+ pr_err("power handle deinit already done\n");
+ kfree(event);
+ } else {
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&event->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(event);
+ }
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
new file mode 100644
index 000000000000..344f74464eca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DPU_POWER_HANDLE_H_
+#define _DPU_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
+#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
+
+#include "dpu_io_util.h"
+
+/* event will be triggered before power handler disable */
+#define DPU_POWER_EVENT_PRE_DISABLE 0x1
+
+/* event will be triggered after power handler disable */
+#define DPU_POWER_EVENT_POST_DISABLE 0x2
+
+/* event will be triggered before power handler enable */
+#define DPU_POWER_EVENT_PRE_ENABLE 0x4
+
+/* event will be triggered after power handler enable */
+#define DPU_POWER_EVENT_POST_ENABLE 0x8
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+ VOTE_INDEX_DISABLE,
+ VOTE_INDEX_LOW,
+ VOTE_INDEX_MAX,
+};
+
+/**
+ * enum dpu_power_handle_data_bus_client - type of axi bus clients
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum dpu_power_handle_data_bus_client {
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum DPU_POWER_HANDLE_DBUS_ID {
+ DPU_POWER_HANDLE_DBUS_ID_MNOC,
+ DPU_POWER_HANDLE_DBUS_ID_LLCC,
+ DPU_POWER_HANDLE_DBUS_ID_EBI,
+ DPU_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
+ * struct dpu_power_client: stores the power client for dpu driver
+ * @name: name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount: current refcount if multiple modules are using same
+ * same client for enable/disable. Power module will
+ * aggregate the refcount and vote accordingly for this
+ * client.
+ * @id: assigned during create. helps for debugging.
+ * @list: list to attach power handle master list
+ * @ab: arbitrated bandwidth for each bus client
+ * @ib: instantaneous bandwidth for each bus client
+ * @active: inidcates the state of dpu power handle
+ */
+struct dpu_power_client {
+ char name[MAX_CLIENT_NAME_LEN];
+ short usecase_ndx;
+ short refcount;
+ u32 id;
+ struct list_head list;
+ u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ bool active;
+};
+
+/*
+ * struct dpu_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to DPU_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of dpu power handle
+ */
+struct dpu_power_event {
+ char client_name[MAX_CLIENT_NAME_LEN];
+ void (*cb_fnc)(u32 event_type, void *usr);
+ void *usr;
+ u32 event_type;
+ struct list_head list;
+ bool active;
+};
+
+/**
+ * struct dpu_power_handle: power handle main struct
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @event_list: current power handle event list
+ */
+struct dpu_power_handle {
+ struct list_head power_client_clist;
+ struct mutex phandle_lock;
+ struct device *dev;
+ u32 current_usecase_ndx;
+ struct list_head event_list;
+};
+
+/**
+ * dpu_power_resource_init() - initializes the dpu power handle
+ * @pdev: platform device to search the power resources
+ * @pdata: power handle to store the power resources
+ */
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_resource_deinit() - release the dpu power handle
+ * @pdev: platform device for power resources
+ * @pdata: power handle containing the resources
+ *
+ * Return: error code.
+ */
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_client_create() - create the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
+ char *client_name);
+
+/**
+ * dpu_power_client_destroy() - destroy the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client);
+
+/**
+ * dpu_power_resource_enable() - enable/disable the power resources
+ * @pdata: power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int dpu_power_resource_enable(struct dpu_power_handle *pdata,
+ struct dpu_power_client *pclient, bool enable);
+
+/**
+ * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle: power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, int enable);
+
+/**
+ * dpu_power_handle_register_event - register a callback function for an event.
+ * Clients can register for multiple events with a single register.
+ * Any block with access to phandle can register for the event
+ * notification.
+ * @phandle: power handle containing the resources
+ * @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback on event trigger
+ *
+ * Return: event pointer if success, or error code otherwise
+ */
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name);
+/**
+ * dpu_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle: power handle containing the resources
+ * @event: event pointer returned after power handle register
+ */
+void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
+ struct dpu_power_event *event);
+
+/**
+ * dpu_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id: data bus identifier
+ * Return: Pointer to name string if success; NULL otherwise
+ */
+const char *dpu_power_handle_get_dbus_name(u32 bus_id);
+
+#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 000000000000..13c0a36d4ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+ (t).num_comp_enc == (r).num_enc && \
+ (t).num_intf == (r).num_intf)
+
+struct dpu_rm_topology_def {
+ enum dpu_rm_topology_name top_name;
+ int num_lm;
+ int num_comp_enc;
+ int num_intf;
+ int num_ctl;
+ int needs_split_display;
+};
+
+static const struct dpu_rm_topology_def g_top_table[] = {
+ { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
+ { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
+ { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
+ { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
+};
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @top_ctrl: topology control preference from kernel client
+ * @top: selected topology for the display
+ * @hw_res: Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+ uint64_t top_ctrl;
+ const struct dpu_rm_topology_def *topology;
+ struct dpu_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct dpu_rm_rsvp - Use Case Reservation tagging structure
+ * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ * By using as a tag, rather than lists of pointers to HW blocks used
+ * we can avoid some list management since we don't know how many blocks
+ * of each type a given use case may require.
+ * @list: List head for list of all reservations
+ * @seq: Global RSVP sequence number for debugging, especially for
+ * differentiating differenct allocations for same encoder.
+ * @enc_id: Reservations are tracked by Encoder DRM object ID.
+ * CRTCs may be connected to multiple Encoders.
+ * An encoder or connector id identifies the display path.
+ * @topology DRM<->HW topology use case
+ */
+struct dpu_rm_rsvp {
+ struct list_head list;
+ uint32_t seq;
+ uint32_t enc_id;
+ enum dpu_rm_topology_name topology;
+};
+
+/**
+ * struct dpu_rm_hw_blk - hardware block tracking list member
+ * @list: List head for list of all hardware blocks tracking items
+ * @rsvp: Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt: Temporary pointer used during reservation to the incoming
+ * request. Will be swapped into rsvp if proposal is accepted
+ * @type: Type of hardware block this structure tracks
+ * @id: Hardware ID number, within it's own space, ie. LM_X
+ * @catalog: Pointer to the hardware catalog entry for this block
+ * @hw: Pointer to the hardware register access object for this block
+ */
+struct dpu_rm_hw_blk {
+ struct list_head list;
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_rsvp *rsvp_nxt;
+ enum dpu_hw_blk_type type;
+ uint32_t id;
+ struct dpu_hw_blk *hw;
+};
+
+/**
+ * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum dpu_rm_dbg_rsvp_stage {
+ DPU_RM_STAGE_BEGIN,
+ DPU_RM_STAGE_AFTER_CLEAR,
+ DPU_RM_STAGE_AFTER_RSVPNEXT,
+ DPU_RM_STAGE_FINAL
+};
+
+static void _dpu_rm_print_rsvps(
+ struct dpu_rm *rm,
+ enum dpu_rm_dbg_rsvp_stage stage)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ DPU_DEBUG("%d\n", stage);
+
+ list_for_each_entry(rsvp, &rm->rsvps, list) {
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+ rsvp->enc_id, rsvp->topology);
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (!blk->rsvp && !blk->rsvp_nxt)
+ continue;
+
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
+ (blk->rsvp) ? blk->rsvp->seq : 0,
+ (blk->rsvp) ? blk->rsvp->enc_id : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+ blk->type, blk->id);
+ }
+ }
+}
+
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
+{
+ return rm->hw_mdp;
+}
+
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology)
+{
+ int i;
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+ return g_top_table[i].top_name;
+
+ return DPU_RM_TOPOLOGY_NONE;
+}
+
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type)
+{
+ memset(iter, 0, sizeof(*iter));
+ iter->enc_id = enc_id;
+ iter->type = type;
+}
+
+static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ struct list_head *blk_list;
+
+ if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
+ DPU_ERROR("invalid rm\n");
+ return false;
+ }
+
+ i->hw = NULL;
+ blk_list = &rm->hw_blks[i->type];
+
+ if (i->blk && (&i->blk->list == blk_list)) {
+ DPU_DEBUG("attempt resume iteration past last\n");
+ return false;
+ }
+
+ i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+ list_for_each_entry_continue(i->blk, blk_list, list) {
+ struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
+
+ if (i->blk->type != i->type) {
+ DPU_ERROR("found incorrect block type %d on %d list\n",
+ i->blk->type, i->type);
+ return false;
+ }
+
+ if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ i->hw = i->blk->hw;
+ DPU_DEBUG("found type %d id %d for enc %d\n",
+ i->type, i->blk->id, i->enc_id);
+ return true;
+ }
+ }
+
+ DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+ return false;
+}
+
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _dpu_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
+{
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ dpu_hw_lm_destroy(hw);
+ break;
+ case DPU_HW_BLK_CTL:
+ dpu_hw_ctl_destroy(hw);
+ break;
+ case DPU_HW_BLK_CDM:
+ dpu_hw_cdm_destroy(hw);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ dpu_hw_pingpong_destroy(hw);
+ break;
+ case DPU_HW_BLK_INTF:
+ dpu_hw_intf_destroy(hw);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ break;
+ }
+}
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
+ enum dpu_hw_blk_type type;
+
+ if (!rm) {
+ DPU_ERROR("invalid rm\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+ list_del(&rsvp_cur->list);
+ kfree(rsvp_cur);
+ }
+
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+ list) {
+ list_del(&hw_cur->list);
+ _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ kfree(hw_cur);
+ }
+ }
+
+ dpu_hw_mdp_destroy(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+
+ mutex_destroy(&rm->rm_lock);
+
+ return 0;
+}
+
+static int _dpu_rm_hw_blk_create(
+ struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ enum dpu_hw_blk_type type,
+ uint32_t id,
+ void *hw_catalog_info)
+{
+ struct dpu_rm_hw_blk *blk;
+ struct dpu_hw_mdp *hw_mdp;
+ void *hw;
+
+ hw_mdp = rm->hw_mdp;
+
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ hw = dpu_hw_lm_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw = dpu_hw_ctl_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CDM:
+ hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ hw = dpu_hw_pingpong_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_INTF:
+ hw = dpu_hw_intf_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(hw)) {
+ DPU_ERROR("failed hw object creation: type %d, err %ld\n",
+ type, PTR_ERR(hw));
+ return -EFAULT;
+ }
+
+ blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+ if (!blk) {
+ _dpu_rm_hw_destroy(type, hw);
+ return -ENOMEM;
+ }
+
+ blk->type = type;
+ blk->id = id;
+ blk->hw = hw;
+ list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+ return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev)
+{
+ int rc, i;
+ enum dpu_hw_blk_type type;
+
+ if (!rm || !cat || !mmio || !dev) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ /* Clear, setup lists */
+ memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
+ INIT_LIST_HEAD(&rm->rsvps);
+ for (type = 0; type < DPU_HW_BLK_MAX; type++)
+ INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+ rm->dev = dev;
+
+ /* Some of the sub-blocks require an mdptop to be created */
+ rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
+ if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+ rc = PTR_ERR(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+ DPU_ERROR("failed: mdp hw not available\n");
+ goto fail;
+ }
+
+ /* Interrogate HW catalog and create tracking items for hw blocks */
+ for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+ if (lm->pingpong == PINGPONG_MAX) {
+ DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
+ cat->mixer[i].id, &cat->mixer[i]);
+ if (rc) {
+ DPU_ERROR("failed: lm hw not available\n");
+ goto fail;
+ }
+
+ if (!rm->lm_max_width) {
+ rm->lm_max_width = lm->sblk->maxwidth;
+ } else if (rm->lm_max_width != lm->sblk->maxwidth) {
+ /*
+ * Don't expect to have hw where lm max widths differ.
+ * If found, take the min.
+ */
+ DPU_ERROR("unsupported: lm maxwidth differs\n");
+ if (rm->lm_max_width > lm->sblk->maxwidth)
+ rm->lm_max_width = lm->sblk->maxwidth;
+ }
+ }
+
+ for (i = 0; i < cat->pingpong_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
+ cat->pingpong[i].id, &cat->pingpong[i]);
+ if (rc) {
+ DPU_ERROR("failed: pp hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->intf_count; i++) {
+ if (cat->intf[i].type == INTF_NONE) {
+ DPU_DEBUG("skip intf %d with type none\n", i);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
+ cat->intf[i].id, &cat->intf[i]);
+ if (rc) {
+ DPU_ERROR("failed: intf hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->ctl_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
+ cat->ctl[i].id, &cat->ctl[i]);
+ if (rc) {
+ DPU_ERROR("failed: ctl hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->cdm_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
+ cat->cdm[i].id, &cat->cdm[i]);
+ if (rc) {
+ DPU_ERROR("failed: cdm hw not available\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ dpu_rm_destroy(rm);
+
+ return rc;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ * proposed use case requirements, incl. hardwired dependent blocks like
+ * pingpong
+ * @rm: dpu resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ * blocks connected to the lm (pp) is available and appropriate
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ * NULL if pp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ * as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs,
+ struct dpu_rm_hw_blk *lm,
+ struct dpu_rm_hw_blk **pp,
+ struct dpu_rm_hw_blk *primary_lm)
+{
+ const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
+ struct dpu_rm_hw_iter iter;
+
+ *pp = NULL;
+
+ DPU_DEBUG("check lm %d pp %d\n",
+ lm_cfg->id, lm_cfg->pingpong);
+
+ /* Check if this layer mixer is a peer of the proposed primary LM */
+ if (primary_lm) {
+ const struct dpu_lm_cfg *prim_lm_cfg =
+ to_dpu_hw_mixer(primary_lm->hw)->cap;
+
+ if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+ prim_lm_cfg->id);
+ return false;
+ }
+ }
+
+ /* Already reserved? */
+ if (RESERVED_BY_OTHER(lm, rsvp)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ return false;
+ }
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id == lm_cfg->pingpong) {
+ *pp = iter.blk;
+ break;
+ }
+ }
+
+ if (!*pp) {
+ DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+ return false;
+ }
+
+ if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
+ (*pp)->id);
+ return false;
+ }
+
+ return true;
+}
+
+static int _dpu_rm_reserve_lms(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+
+{
+ struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
+ struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter_i, iter_j;
+ int lm_count = 0;
+ int i, rc = 0;
+
+ if (!reqs->topology->num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+ return -EINVAL;
+ }
+
+ /* Find a primary mixer */
+ dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_i)) {
+ memset(&lm, 0, sizeof(lm));
+ memset(&pp, 0, sizeof(pp));
+
+ lm_count = 0;
+ lm[lm_count] = iter_i.blk;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, lm[lm_count],
+ &pp[lm_count], NULL))
+ continue;
+
+ ++lm_count;
+
+ /* Valid primary mixer found, find matching peers */
+ dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_j)) {
+ if (iter_i.blk == iter_j.blk)
+ continue;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, iter_j.blk,
+ &pp[lm_count], iter_i.blk))
+ continue;
+
+ lm[lm_count] = iter_j.blk;
+ ++lm_count;
+ }
+ }
+
+ if (lm_count != reqs->topology->num_lm) {
+ DPU_DEBUG("unable to find appropriate mixers\n");
+ return -ENAVAIL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lm); i++) {
+ if (!lm[i])
+ break;
+
+ lm[i]->rsvp_nxt = rsvp;
+ pp[i]->rsvp_nxt = rsvp;
+
+ trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
+ pp[i]->id);
+ }
+
+ return rc;
+}
+
+static int _dpu_rm_reserve_ctls(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ const struct dpu_rm_topology_def *top)
+{
+ struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter;
+ int i = 0;
+
+ memset(&ctls, 0, sizeof(ctls));
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+ unsigned long features = ctl->caps->features;
+ bool has_split_display;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+ DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+
+ if (top->needs_split_display != has_split_display)
+ continue;
+
+ ctls[i] = iter.blk;
+ DPU_DEBUG("ctl %d match\n", iter.blk->id);
+
+ if (++i == top->num_ctl)
+ break;
+ }
+
+ if (i != top->num_ctl)
+ return -ENAVAIL;
+
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+ ctls[i]->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
+ rsvp->enc_id);
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_cdm(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type)
+{
+ struct dpu_rm_hw_iter iter;
+
+ DRM_DEBUG_KMS("type %d id %d\n", type, id);
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
+ const struct dpu_cdm_cfg *caps = cdm->caps;
+ bool match = false;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
+ match = test_bit(id, &caps->intf_connect);
+
+ DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
+ iter.blk->type, iter.blk->id, rsvp->enc_id,
+ caps->intf_connect, match);
+
+ if (!match)
+ continue;
+
+ trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ iter.blk->rsvp_nxt = rsvp;
+ break;
+ }
+
+ if (!iter.hw) {
+ DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_intf(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type,
+ bool needs_cdm)
+{
+ struct dpu_rm_hw_iter iter;
+ int ret = 0;
+
+ /* Find the block entry in the rm, and note the reservation */
+ dpu_rm_init_hw_iter(&iter, 0, type);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id != id)
+ continue;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ DPU_ERROR("type %d id %d already reserved\n", type, id);
+ return -ENAVAIL;
+ }
+
+ iter.blk->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ break;
+ }
+
+ /* Shouldn't happen since intfs are fixed at probe */
+ if (!iter.hw) {
+ DPU_ERROR("couldn't find type %d id %d\n", type, id);
+ return -EINVAL;
+ }
+
+ if (needs_cdm)
+ ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
+
+ return ret;
+}
+
+static int _dpu_rm_reserve_intf_related_hw(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_encoder_hw_resources *hw_res)
+{
+ int i, ret = 0;
+ u32 id;
+
+ for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+ if (hw_res->intfs[i] == INTF_MODE_NONE)
+ continue;
+ id = i + INTF_0;
+ ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+ DPU_HW_BLK_INTF, hw_res->needs_cdm);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int _dpu_rm_make_next_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+{
+ int ret;
+ struct dpu_rm_topology_def topology;
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->topology->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+ if (ret) {
+ DPU_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
+ if (ret && !reqs->topology->needs_split_display) {
+ memcpy(&topology, reqs->topology, sizeof(topology));
+ topology.needs_split_display = true;
+ _dpu_rm_reserve_ctls(rm, rsvp, &topology);
+ }
+ if (ret) {
+ DPU_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
+ ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_requirements *reqs,
+ struct msm_display_topology req_topology)
+{
+ int i;
+
+ memset(reqs, 0, sizeof(*reqs));
+
+ dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+ req_topology)) {
+ reqs->topology = &g_top_table[i];
+ break;
+ }
+ }
+
+ if (!reqs->topology) {
+ DPU_ERROR("invalid topology for the display\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Set the requirement based on caps if not set from user space
+ * This will ensure to select LM tied with DS blocks
+ * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+ */
+ if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+ conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+ reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
+
+ DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
+ reqs->hw_res.display_num_of_h_tiles);
+ DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+ reqs->topology->num_lm, reqs->topology->num_ctl,
+ reqs->topology->top_name,
+ reqs->topology->needs_split_display);
+
+ return 0;
+}
+
+static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *i;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ if (list_empty(&rm->rsvps))
+ return NULL;
+
+ list_for_each_entry(i, &rm->rsvps, list)
+ if (i->enc_id == enc->base.id)
+ return i;
+
+ return NULL;
+}
+
+static struct drm_connector *_dpu_rm_get_connector(
+ struct drm_encoder *enc)
+{
+ struct drm_connector *conn = NULL;
+ struct list_head *connector_list =
+ &enc->dev->mode_config.connector_list;
+
+ list_for_each_entry(conn, connector_list, head)
+ if (conn->encoder == enc)
+ return conn;
+
+ return NULL;
+}
+
+/**
+ * _dpu_rm_release_rsvp - release resources and release a reservation
+ * @rm: KMS handle
+ * @rsvp: RSVP pointer to release and release resources for
+ */
+static void _dpu_rm_release_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector *conn)
+{
+ struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ if (!rsvp)
+ return;
+
+ DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+ list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+ if (rsvp == rsvp_c) {
+ list_del(&rsvp_c->list);
+ break;
+ }
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp == rsvp) {
+ blk->rsvp = NULL;
+ DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ if (blk->rsvp_nxt == rsvp) {
+ blk->rsvp_nxt = NULL;
+ DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ }
+ }
+
+ kfree(rsvp);
+}
+
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct drm_connector *conn;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _dpu_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ conn = _dpu_rm_get_connector(enc);
+ if (!conn) {
+ DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ _dpu_rm_release_rsvp(rm, rsvp, conn);
+end:
+ mutex_unlock(&rm->rm_lock);
+}
+
+static int _dpu_rm_commit_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+ int ret = 0;
+
+ /* Swap next rsvp to be the active */
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp_nxt) {
+ blk->rsvp = blk->rsvp_nxt;
+ blk->rsvp_nxt = NULL;
+ }
+ }
+ }
+
+ if (!ret)
+ DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
+ rsvp->topology);
+
+ return ret;
+}
+
+int dpu_rm_reserve(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only)
+{
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_requirements reqs;
+ int ret;
+
+ if (!rm || !enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ /* Check if this is just a page-flip */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+ conn_state->connector->base.id, enc->base.id,
+ crtc_state->crtc->base.id, test_only);
+
+ mutex_lock(&rm->rm_lock);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
+
+ ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
+ conn_state, &reqs, topology);
+ if (ret) {
+ DPU_ERROR("failed to populate hw requirements\n");
+ goto end;
+ }
+
+ /*
+ * We only support one active reservation per-hw-block. But to implement
+ * transactional semantics for test-only, and for allowing failure while
+ * modifying your existing reservation, over the course of this
+ * function we can have two reservations:
+ * Current: Existing reservation
+ * Next: Proposed reservation. The proposed reservation may fail, or may
+ * be discarded if in test-only mode.
+ * If reservation is successful, and we're not in test-only, then we
+ * replace the current with the next.
+ */
+ rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
+
+ /*
+ * User can request that we clear out any reservation during the
+ * atomic_check phase by using this CLEAR bit
+ */
+ if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+ DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+ rsvp_cur->seq, rsvp_cur->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+ rsvp_cur = NULL;
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
+ }
+
+ /* Check the proposed reservation, store it in hw's "next" field */
+ ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ rsvp_nxt, &reqs);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
+
+ if (ret) {
+ DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+ /*
+ * Normally, if test_only, test the reservation and then undo
+ * However, if the user requests LOCK, then keep the reservation
+ * made during the atomic_check phase.
+ */
+ DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else {
+ if (test_only && RM_RQ_LOCK(&reqs))
+ DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+ ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ }
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
+
+end:
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 000000000000..ffd1841a6067
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+/**
+ * enum dpu_rm_topology_name - HW resource use case in use by connector
+ * @DPU_RM_TOPOLOGY_NONE: No topology in use currently
+ * @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum dpu_rm_topology_name {
+ DPU_RM_TOPOLOGY_NONE = 0,
+ DPU_RM_TOPOLOGY_SINGLEPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+ DPU_RM_TOPOLOGY_MAX,
+};
+
+/**
+ * enum dpu_rm_topology_control - HW resource use case in use by connector
+ * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ * test, reserve the resources for this display.
+ * Normal behavior would not impact the reservation
+ * list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ * release any reservation held by this display.
+ * Normal behavior would not impact the
+ * reservation list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
+ */
+enum dpu_rm_topology_control {
+ DPU_RM_TOPCTL_RESERVE_LOCK,
+ DPU_RM_TOPCTL_RESERVE_CLEAR,
+ DPU_RM_TOPCTL_DS,
+};
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ * list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct dpu_rm {
+ struct drm_device *dev;
+ struct list_head rsvps;
+ struct list_head hw_blks[DPU_HW_BLK_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+ uint32_t lm_max_width;
+ uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
+};
+
+/**
+ * struct dpu_rm_hw_blk - resource manager internal structure
+ * forward declaration for single iterator definition without void pointer
+ */
+struct dpu_rm_hw_blk;
+
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+ void *hw;
+ struct dpu_rm_hw_blk *blk;
+ uint32_t enc_id;
+ enum dpu_hw_blk_type type;
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through dpu_rm_get_* functions.
+ * HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @topology: Pointer to topology info for the display
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+
+/**
+ * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
+ * This is never reserved, and is usable by any display.
+ * @rm: DPU Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ * using dpu_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type);
+/**
+ * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ * Meant to do a single pass through the hardware list to iteratively
+ * retrieve hardware blocks of a given type for a given encoder.
+ * Initialize an iterator object.
+ * Set hw block type of interest. Set encoder id of interest, 0 for any.
+ * Function returns first hw of type for that encoder.
+ * Subsequent calls will return the next reserved hw of that type in-order.
+ * Iterator HW pointer will be null on failure to find hw.
+ * @rm: DPU Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+
+/**
+ * dpu_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int dpu_rm_check_property_topctl(uint64_t val);
+
+/**
+ * dpu_rm_get_topology_name - returns the name of the the given topology
+ * definition
+ * @topology: topology definition
+ * @Return: name of the topology
+ */
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology);
+
+#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 000000000000..ae0ca5076238
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,1007 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+ u32 lut, u32 lut_usage),
+ TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(bool, rt)
+ __field(u32, fl)
+ __field(u64, lut)
+ __field(u32, lut_usage)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->rt = rt;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->lut_usage = lut_usage;
+ ),
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+ __entry->pnum, __entry->fmt,
+ __entry->rt, __entry->fl,
+ __entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+ u32 safe_lut),
+ TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, danger_lut)
+ __field(u32, safe_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->danger_lut = danger_lut;
+ __entry->safe_lut = safe_lut;
+ ),
+ TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->danger_lut,
+ __entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+ TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, vbif_idx)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->vbif_idx = vbif_idx;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_perf_update_bus,
+ TP_PROTO(int client, unsigned long long ab_quota,
+ unsigned long long ib_quota),
+ TP_ARGS(client, ab_quota, ib_quota),
+ TP_STRUCT__entry(
+ __field(int, client)
+ __field(u64, ab_quota)
+ __field(u64, ib_quota)
+ ),
+ TP_fast_assign(
+ __entry->client = client;
+ __entry->ab_quota = ab_quota;
+ __entry->ib_quota = ib_quota;
+ ),
+ TP_printk("Request client:%d ab=%llu ib=%llu",
+ __entry->client,
+ __entry->ab_quota,
+ __entry->ib_quota)
+)
+
+
+TRACE_EVENT(dpu_cmd_release_bw,
+ TP_PROTO(u32 crtc_id),
+ TP_ARGS(crtc_id),
+ TP_STRUCT__entry(
+ __field(u32, crtc_id)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ ),
+ TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+ TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+ u64 bw_ctl_ebi, u32 core_clk_rate,
+ bool stop_req, u32 update_bus, u32 update_clk),
+ TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
+ stop_req, update_bus, update_clk),
+ TP_STRUCT__entry(
+ __field(u32, crtc)
+ __field(u64, bw_ctl_mnoc)
+ __field(u64, bw_ctl_llcc)
+ __field(u64, bw_ctl_ebi)
+ __field(u32, core_clk_rate)
+ __field(bool, stop_req)
+ __field(u32, update_bus)
+ __field(u32, update_clk)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->bw_ctl_mnoc = bw_ctl_mnoc;
+ __entry->bw_ctl_llcc = bw_ctl_llcc;
+ __entry->bw_ctl_ebi = bw_ctl_ebi;
+ __entry->core_clk_rate = core_clk_rate;
+ __entry->stop_req = stop_req;
+ __entry->update_bus = update_bus;
+ __entry->update_clk = update_clk;
+ ),
+ TP_printk(
+ "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ __entry->crtc,
+ __entry->bw_ctl_mnoc,
+ __entry->bw_ctl_llcc,
+ __entry->bw_ctl_ebi,
+ __entry->core_clk_rate,
+ __entry->stop_req,
+ __entry->update_bus,
+ __entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ __field( enum dpu_pingpong, pp_idx )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ __entry->pp_idx = pp_idx;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ ),
+ TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+ TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+ TP_ARGS(drm_id, hdisplay, vdisplay),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, hdisplay )
+ __field( int, vdisplay )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hdisplay = hdisplay;
+ __entry->vdisplay = vdisplay;
+ ),
+ TP_printk("id=%u, mode=%dx%d",
+ __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+ TP_PROTO(uint32_t drm_id, int val),
+ TP_ARGS(drm_id, val),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, val )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->val = val;
+ ),
+ TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+ TP_PROTO(uint32_t drm_id, int count),
+ TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+ TP_PROTO(uint32_t drm_id, int ctl_idx),
+ TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+ TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+ TP_ARGS(drm_id, flags, private_flags),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, flags )
+ __field( int, private_flags )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->flags = flags;
+ __entry->private_flags = private_flags;
+ ),
+ TP_printk("id=%u, flags=%u, private_flags=%d",
+ __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ ),
+ TP_printk("id=%u, enable=%s",
+ __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+ TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+ int rc_state, const char *stage),
+ TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, sw_event )
+ __field( bool, idle_pc_supported )
+ __field( int, rc_state )
+ __string( stage_str, stage )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->sw_event = sw_event;
+ __entry->idle_pc_supported = idle_pc_supported;
+ __entry->rc_state = rc_state;
+ __assign_str(stage_str, stage);
+ ),
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+ __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+ __entry->idle_pc_supported ? "true" : "false",
+ __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+ TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, event, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+ __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+ TP_PROTO(uint32_t drm_id, unsigned int idx,
+ unsigned long frame_busy_mask),
+ TP_ARGS(drm_id, idx, frame_busy_mask),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, idx )
+ __field( unsigned long, frame_busy_mask )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->idx = idx;
+ __entry->frame_busy_mask = frame_busy_mask;
+ ),
+ TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+ __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+ TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+ pending_flush_ret),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( int, pending_kickoff_cnt )
+ __field( int, ctl_idx )
+ __field( u32, pending_flush_ret )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->pending_kickoff_cnt = pending_kickoff_cnt;
+ __entry->ctl_idx = ctl_idx;
+ __entry->pending_flush_ret = pending_flush_ret;
+ ),
+ TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ "pending_flush_ret=%u", __entry->drm_id,
+ __entry->intf_idx, __entry->pending_kickoff_cnt,
+ __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( ktime_t, time )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->time = time;
+ ),
+ TP_printk("id=%u, time=%lld", __entry->drm_id,
+ ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+ TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+ s64 expected_time, int atomic_cnt),
+ TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int32_t, hw_id )
+ __field( int, rc )
+ __field( s64, time )
+ __field( s64, expected_time )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hw_id = hw_id;
+ __entry->rc = rc;
+ __entry->time = time;
+ __entry->expected_time = expected_time;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+ __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, pp, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+ __entry->pp, __entry->enable ? "true" : "false",
+ __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+ u32 event),
+ TP_ARGS(drm_id, pp, new_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, new_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->new_count = new_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+ __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+ int kickoff_count, u32 event),
+ TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, timeout_count )
+ __field( int, kickoff_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->timeout_count = timeout_count;
+ __entry->kickoff_count = kickoff_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+ __entry->drm_id, __entry->pp, __entry->timeout_count,
+ __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, intf_idx, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+ __entry->intf_idx, __entry->enable ? "true" : "false",
+ __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+ TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+ struct drm_plane_state *state, struct dpu_plane_state *pstate,
+ uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+ uint64_t modifier),
+ TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+ pixel_format, modifier),
+ TP_STRUCT__entry(
+ __field( uint32_t, crtc_id )
+ __field( uint32_t, plane_id )
+ __field( struct drm_plane_state*,state )
+ __field( struct dpu_plane_state*,pstate )
+ __field( uint32_t, stage_idx )
+ __field( enum dpu_sspp, sspp )
+ __field( uint32_t, pixel_format )
+ __field( uint64_t, modifier )
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ __entry->plane_id = plane_id;
+ __entry->state = state;
+ __entry->pstate = pstate;
+ __entry->stage_idx = stage_idx;
+ __entry->sspp = sspp;
+ __entry->pixel_format = pixel_format;
+ __entry->modifier = modifier;
+ ),
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
+ "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+ "multirect_index:%d multirect_mode:%u pix_format:%u "
+ "modifier:%llu",
+ __entry->crtc_id, __entry->plane_id,
+ __entry->state->fb ? __entry->state->fb->base.id : -1,
+ __entry->state->src_w >> 16, __entry->state->src_h >> 16,
+ __entry->state->src_x >> 16, __entry->state->src_y >> 16,
+ __entry->state->crtc_w, __entry->state->crtc_h,
+ __entry->state->crtc_x, __entry->state->crtc_y,
+ __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
+ __entry->pstate->multirect_index,
+ __entry->pstate->multirect_mode, __entry->pixel_format,
+ __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+ TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+ TP_ARGS(drm_id, mixer, bounds),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, mixer )
+ __field( struct drm_rect *, bounds )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->mixer = mixer;
+ __entry->bounds = bounds;
+ ),
+ TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+ __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+ TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+ struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enc_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( uint32_t, enc_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enc_id = enc_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
+ "vblank_req:%s}",
+ __entry->drm_id, __entry->enc_id,
+ __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+ __entry->drm_id, __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+ TP_PROTO(uint32_t drm_id, int frame_pending),
+ TP_ARGS(drm_id, frame_pending),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, frame_pending )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->frame_pending = frame_pending;
+ ),
+ TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+ __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+ TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+ enum dpu_sspp_multirect_index multirect_index),
+ TP_ARGS(index, layout, multirect_index),
+ TP_STRUCT__entry(
+ __field( enum dpu_sspp, index )
+ __field( struct dpu_hw_fmt_layout*, layout )
+ __field( enum dpu_sspp_multirect_index, multirect_index)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->layout = layout;
+ __entry->multirect_index = multirect_index;
+ ),
+ TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+ "multirect_index:%d", __entry->index, __entry->layout->width,
+ __entry->layout->height, __entry->layout->plane_addr[0],
+ __entry->layout->plane_size[0],
+ __entry->layout->plane_addr[1],
+ __entry->layout->plane_size[1],
+ __entry->layout->plane_addr[2],
+ __entry->layout->plane_size[2],
+ __entry->layout->plane_addr[3],
+ __entry->layout->plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+ TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+ TP_ARGS(drm_id, is_virtual, multirect_mode),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, is_virtual )
+ __field( uint32_t, multirect_mode )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->is_virtual = is_virtual;
+ __entry->multirect_mode = multirect_mode;
+ ),
+ TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+ __entry->is_virtual ? "true" : "false",
+ __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
+ __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
+ uint32_t pp_id),
+ TP_ARGS(id, type, enc_id, pp_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ __field( uint32_t, pp_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ __entry->pp_id = pp_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->type, __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+ TP_PROTO(enum dpu_vbif index, u32 xin_id),
+ TP_ARGS(index, xin_id),
+ TP_STRUCT__entry(
+ __field( enum dpu_vbif, index )
+ __field( u32, xin_id )
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->xin_id = xin_id;
+ ),
+ TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+ TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+ TP_ARGS(pp, cfg),
+ TP_STRUCT__entry(
+ __field( enum dpu_pingpong, pp )
+ __field( u32, cfg )
+ ),
+ TP_fast_assign(
+ __entry->pp = pp;
+ __entry->cfg = cfg;
+ ),
+ TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( int, enable_count )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->enable_count = enable_count;
+ ),
+ TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
+ __entry->enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( struct dpu_irq_callback *, callback)
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->callback = callback;
+ ),
+ TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+ __entry->callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+ TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+ TP_ARGS(dev, stop_req, clk_rate),
+ TP_STRUCT__entry(
+ __field( struct drm_device *, dev )
+ __field( bool, stop_req )
+ __field( u64, clk_rate )
+ ),
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->stop_req = stop_req;
+ __entry->clk_rate = clk_rate;
+ ),
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+ __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+ trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 000000000000..295528292296
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif: Pointer to hardware vbif driver
+ * @xin_id: Client interface identifier
+ * @return: 0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ ktime_t timeout;
+ bool status;
+ int rc;
+
+ if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+ for (;;) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ if (status)
+ break;
+ if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ break;
+ }
+ usleep_range(501, 1000);
+ }
+
+ if (!status) {
+ rc = -ETIMEDOUT;
+ DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+ vbif->idx - VBIF_0, xin_id);
+ } else {
+ rc = 0;
+ DPU_DEBUG("VBIF %d client %d is halted\n",
+ vbif->idx - VBIF_0, xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @ot_lim: Pointer to OT limit to be modified
+ * @params: Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+ u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+ u64 pps;
+ const struct dpu_vbif_dynamic_ot_tbl *tbl;
+ u32 i;
+
+ if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+ return;
+
+ /* Dynamic OT setting done only for WFD */
+ if (!params->is_wfd)
+ return;
+
+ pps = params->frame_rate;
+ pps *= params->width;
+ pps *= params->height;
+
+ tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+ &vbif->cap->dynamic_ot_wr_tbl;
+
+ for (i = 0; i < tbl->count; i++) {
+ if (pps <= tbl->cfg[i].pps) {
+ *ot_lim = tbl->cfg[i].ot_limit;
+ break;
+ }
+ }
+
+ DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ vbif->idx - VBIF_0, params->xin_id,
+ params->width, params->height, params->frame_rate,
+ pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ * @return: OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+ struct dpu_vbif_set_ot_params *params)
+{
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ if (vbif->cap->default_ot_wr_limit && !params->rd)
+ ot_lim = vbif->cap->default_ot_wr_limit;
+ else if (vbif->cap->default_ot_rd_limit && params->rd)
+ ot_lim = vbif->cap->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt/catalog,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+ if (vbif && vbif->ops.get_limit_conf) {
+ val = vbif->ops.get_limit_conf(vbif,
+ params->xin_id, params->rd);
+ if (val == ot_lim)
+ ot_lim = 0;
+ }
+
+exit:
+ DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+ vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ u32 ot_lim;
+ int ret, i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
+ vbif = dpu_kms->hw_vbif[i];
+ }
+
+ if (!vbif || !mdp) {
+ DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+ vbif != 0, mdp != 0);
+ return;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_limit_conf ||
+ !vbif->ops.set_halt_ctrl)
+ return;
+
+ /* set write_gather_en for all write clients */
+ if (vbif->ops.set_write_gather_en && !params->rd)
+ vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+ ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+ if (ot_lim == 0)
+ goto exit;
+
+ trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+ params->vbif_idx);
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+ return;
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ const struct dpu_vbif_qos_tbl *qos_tbl;
+ int i;
+
+ if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
+ vbif = dpu_kms->hw_vbif[i];
+ break;
+ }
+ }
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+ return;
+ }
+
+ if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+ DPU_DEBUG("qos remap not supported\n");
+ return;
+ }
+
+ qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+ &vbif->cap->qos_nrt_tbl;
+
+ if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+ DPU_DEBUG("qos tbl not defined\n");
+ return;
+ }
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+ DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+ params->vbif_idx, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ }
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ u32 i, pnd, src;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->ops.clear_errors) {
+ vbif->ops.clear_errors(vbif, &pnd, &src);
+ if (pnd || src) {
+ DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
+ vbif->idx - VBIF_0, pnd, src);
+ }
+ }
+ }
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ int i, j;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+ for (j = 0; j < vbif->cap->memtype_count; j++)
+ vbif->ops.set_mem_type(
+ vbif, j, vbif->cap->memtype[j]);
+ }
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_vbif);
+ dpu_kms->debugfs_vbif = NULL;
+}
+
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+ char vbif_name[32];
+ struct dentry *debugfs_vbif;
+ int i, j;
+
+ dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
+ if (!dpu_kms->debugfs_vbif) {
+ DPU_ERROR("failed to create vbif debugfs\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+ debugfs_vbif = debugfs_create_dir(vbif_name,
+ dpu_kms->debugfs_vbif);
+
+ debugfs_create_u32("features", 0600, debugfs_vbif,
+ (u32 *)&vbif->features);
+
+ debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+ (u32 *)&vbif->xin_halt_timeout);
+
+ debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_rd_limit);
+
+ debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_wr_limit);
+
+ for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+
+ for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 000000000000..f17af52dbbd5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ bool rd;
+ bool is_wfd;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+ u32 xin_id;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+ bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+ u32 vbif_idx;
+ u32 xin_id;
+ u32 clk_ctrl;
+ u32 num;
+ bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms: DPU handler
+ * @params: Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms: DPU handler
+ * @params: Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
+#else
+static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
+ struct dentry *debugfs_root)
+{
+ return 0;
+}
+static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644
index 000000000000..4f12e5c534c8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -0,0 +1,1376 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+ ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+ (((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+ /* Venus NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV12,
+
+ /* Venus NV21:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * V U V U V U V U V U V U . . . . ^
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV21,
+ /* Venus NV12_MVTB:
+ * Two YUV 4:2:0 images/views one after the other
+ * in a top-bottom layout, same as NV12
+ * with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_1
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_2
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * View_1 begin at: 0 (zero)
+ * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((2*(Y_Stride * Y_Scanlines)
+ * + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+ */
+ COLOR_FMT_NV12_MVTB,
+ /*
+ * The buffer can be of 2 types:
+ * (1) Venus NV12 UBWC Progressive
+ * (2) Venus NV12 UBWC Interlaced
+ *
+ * (1) Venus NV12 UBWC Progressive Buffer Format:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Y_Stride = align(Width, 128)
+ * UV_Stride = align(Width, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ *
+ *
+ * (2) Venus NV12 UBWC Interlaced Buffer Format:
+ * Compressed Macro-tile format for NV12 interlaced.
+ * Contains 8 planes in the following order -
+ * (A) Y_Meta_Top_Field_Plane
+ * (B) Y_UBWC_Top_Field_Plane
+ * (C) UV_Meta_Top_Field_Plane
+ * (D) UV_UBWC_Top_Field_Plane
+ * (E) Y_Meta_Bottom_Field_Plane
+ * (F) Y_UBWC_Bottom_Field_Plane
+ * (G) UV_Meta_Bottom_Field_Plane
+ * (H) UV_UBWC_Bottom_Field_Plane
+ * Y_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Top_Field_Plane.
+ * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+ * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit Y samples for top field of an interlaced frame.
+ *
+ * UV_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Top_Field_Plane.
+ * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+ * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit subsampled color difference samples for top field of an
+ * interlaced frame.
+ *
+ * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+ * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+ * format for bottom field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+ * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+ *
+ * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+ * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+ * macro-tile format for bottom field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+ * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit subsampled color difference samples for bottom
+ * field of an interlaced frame.
+ *
+ * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * <-----Y_TF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_TF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_TF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_TF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_TF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-----Y_BF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_BF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_BF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_BF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_BF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Half_height = (Height+1)>>1
+ * Y_TF_Stride = align(Width, 128)
+ * UV_TF_Stride = align(Width, 128)
+ * Y_TF_Scanlines = align(Half_height, 32)
+ * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+ * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+ * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_TF_Meta_Plane_size =
+ * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+ * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_TF_Meta_Plane_size =
+ * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+ * Y_BF_Stride = align(Width, 128)
+ * UV_BF_Stride = align(Width, 128)
+ * Y_BF_Scanlines = align(Half_height, 32)
+ * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+ * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+ * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_BF_Meta_Plane_size =
+ * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+ * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_BF_Meta_Plane_size =
+ * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+ * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+ * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+ * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+ * + max(Extradata, Y_TF_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_UBWC,
+ /* Venus NV12 10-bit UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 4/3, 128)
+ * UV_Stride = align(Width * 4/3, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_BPP10_UBWC,
+ /* Venus RGBA8888 format:
+ * Contains 1 plane in the following order -
+ * (A) RGBA plane
+ *
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Plane_size + Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888,
+ /* Venus RGBA8888 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888_UBWC,
+ /* Venus RGBA1010102 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 256)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA1010102_UBWC,
+ /* Venus RGB565 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGB plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 2, 128)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGB565_UBWC,
+ /* P010 UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 2, 256)
+ * UV_Stride = align(Width * 2, 256)
+ * Y_Scanlines = align(Height, 16)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_P010_UBWC,
+ /* Venus P010:
+ * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+ * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width * 2 aligned to 128
+ * UV_Stride : Width * 2 aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010 COLOR_FMT_P010
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+ (void)height;
+ (void)width;
+
+ /*
+ * In the future, calculate the size based on the w/h but just
+ * hardcode it for now since 16K satisfies all current usecases.
+ */
+ return 16 * 1024;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ alignment = 16;
+ break;
+ default:
+ return 0;
+ }
+ sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 16;
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 32;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+ int y_tile_width = 0, y_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_width = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_tile_width = 48;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+ y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+ return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+ int y_tile_height = 0, y_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ y_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+ y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+ return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+ int uv_tile_width = 0, uv_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_width = 16;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ uv_tile_width = 24;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+ uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+ return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+ int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ uv_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+ uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+ return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment = 0, stride = 0, bpp = 4;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 128;
+ break;
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 256;
+ bpp = 2;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ alignment = 256;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+ return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment = 0, scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 32;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+ return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+ int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_width = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+ rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+ return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+ int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+ rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+ return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(
+ int color_fmt, int width, int height)
+{
+ const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+ unsigned int uv_alignment = 0, size = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+ unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+ unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+ unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+ unsigned int rgb_stride = 0, rgb_scanlines = 0;
+ unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+ unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_P010:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane +
+ MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_MVTB:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane;
+ size = 2 * size + extra_size;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines =
+ VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines =
+ VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane)*2 +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888:
+ rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
+ size = rgb_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+ 4096);
+ rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+ height);
+ rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+ rgb_meta_scanlines, 4096);
+ size = rgb_ubwc_plane + rgb_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+ int color_fmt, int width, int height)
+{
+ unsigned int offset = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_MVTB:
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines;
+ offset = y_plane + uv_plane;
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return offset;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index b001699297c4..457c29dba4a1 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -201,7 +201,7 @@ static void blend_setup(struct drm_crtc *crtc)
int idx = idxs[pipe_id];
if (idx > 0) {
const struct mdp_format *format =
- to_mdp_format(msm_framebuffer_format(plane->fb));
+ to_mdp_format(msm_framebuffer_format(plane->state->fb));
alpha[idx-1] = format->alpha_enable;
}
}
@@ -665,7 +665,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 4b646bf9c214..44d1cda56974 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -125,6 +125,8 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);
+
/* see 119ecb7fd */
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_put(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 4a645926edb7..2bfb39082f54 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -341,7 +341,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (panel) {
+ if (!IS_ERR(panel)) {
drm_panel_disable(panel);
drm_panel_unprepare(panel);
}
@@ -410,7 +410,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (panel) {
+ if (!IS_ERR(panel)) {
drm_panel_prepare(panel);
drm_panel_enable(panel);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index e3b1c86b7aae..5368e621999c 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -34,9 +34,12 @@ static enum drm_connector_status mdp4_lvds_connector_detect(
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
- if (!mdp4_lvds_connector->panel)
+ if (!mdp4_lvds_connector->panel) {
mdp4_lvds_connector->panel =
of_drm_find_panel(mdp4_lvds_connector->panel_node);
+ if (IS_ERR(mdp4_lvds_connector->panel))
+ mdp4_lvds_connector->panel = NULL;
+ }
return mdp4_lvds_connector->panel ?
connector_status_connected :
@@ -129,7 +132,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 20e956e14c21..79ff653d8081 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -68,7 +68,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
@@ -167,8 +167,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
msm_framebuffer_iova(fb, kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 3));
-
- plane->fb = fb;
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 10271359789e..b1da9ce54379 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -65,7 +65,7 @@ struct mdp5_crtc {
struct drm_gem_object *scanout_bo;
uint64_t iova;
uint32_t width, height;
- uint32_t x, y;
+ int x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -760,20 +760,31 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
* Cursor Region Of Interest (ROI) is a plane read from cursor
* buffer to render. The ROI region is determined by the visibility of
* the cursor point. In the default Cursor image the cursor point will
- * be at the top left of the cursor image, unless it is specified
- * otherwise using hotspot feature.
+ * be at the top left of the cursor image.
*
+ * Without rotation:
* If the cursor point reaches the right (xres - x < cursor.width) or
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
* width and ROI height need to be evaluated to crop the cursor image
* accordingly.
* (xres-x) will be new cursor width when x > (xres - cursor.width)
* (yres-y) will be new cursor height when y > (yres - cursor.height)
+ *
+ * With rotation:
+ * We get negative x and/or y coordinates.
+ * (cursor.width - abs(x)) will be new cursor width when x < 0
+ * (cursor.height - abs(y)) will be new cursor width when y < 0
*/
- *roi_w = min(mdp5_crtc->cursor.width, xres -
+ if (mdp5_crtc->cursor.x >= 0)
+ *roi_w = min(mdp5_crtc->cursor.width, xres -
mdp5_crtc->cursor.x);
- *roi_h = min(mdp5_crtc->cursor.height, yres -
+ else
+ *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
+ if (mdp5_crtc->cursor.y >= 0)
+ *roi_h = min(mdp5_crtc->cursor.height, yres -
mdp5_crtc->cursor.y);
+ else
+ *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
}
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
@@ -783,7 +794,7 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t blendcfg, stride;
- uint32_t x, y, width, height;
+ uint32_t x, y, src_x, src_y, width, height;
uint32_t roi_w, roi_h;
int lm;
@@ -800,6 +811,26 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
get_roi(crtc, &roi_w, &roi_h);
+ /* If cusror buffer overlaps due to rotation on the
+ * upper or left screen border the pixel offset inside
+ * the cursor buffer of the ROI is the positive overlap
+ * distance.
+ */
+ if (mdp5_crtc->cursor.x < 0) {
+ src_x = abs(mdp5_crtc->cursor.x);
+ x = 0;
+ } else {
+ src_x = 0;
+ }
+ if (mdp5_crtc->cursor.y < 0) {
+ src_y = abs(mdp5_crtc->cursor.y);
+ y = 0;
+ } else {
+ src_y = 0;
+ }
+ DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
+ crtc->name, x, y, roi_w, roi_h, src_x, src_y);
+
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -812,6 +843,9 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
+ MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
+ MDP5_LM_CURSOR_XY_SRC_X(src_x));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
mdp5_crtc->cursor.iova);
@@ -932,8 +966,9 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
if (unlikely(!crtc->state->enable))
return 0;
- mdp5_crtc->cursor.x = x = max(x, 0);
- mdp5_crtc->cursor.y = y = max(y, 0);
+ /* accept negative x/y coordinates up to maximum cursor overlap */
+ mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
+ mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
get_roi(crtc, &roi_w, &roi_h);
@@ -1207,7 +1242,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
"unref cursor", unref_cursor_worker);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 9af94e35f678..fcd44d1d1068 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -319,7 +319,17 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
mdp5_cstate->ctl = ctl;
mdp5_cstate->pipeline.intf = intf;
- mdp5_cstate->defer_start = true;
+
+ /*
+ * This is a bit awkward, but we want to flush the CTL and hit the
+ * START bit at most once for an atomic update. In the non-full-
+ * modeset case, this is done from crtc->atomic_flush(), but that
+ * is too early in the case of full modeset, in which case we
+ * defer to encoder->enable(). But we need to *know* whether
+ * encoder->enable() will be called to do this:
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ mdp5_cstate->defer_start = true;
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 6e12e275deba..bddd625ab91b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -170,6 +170,8 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct device *dev = &mdp5_kms->pdev->dev;
struct mdp5_global_state *global_state;
+ drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
+
global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index f2a0db7a8a03..1cc4e57f0226 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -20,12 +20,10 @@
#include "msm_drv.h"
#include "mdp5_kms.h"
-/*
- * If needed, this can become more specific: something like struct mdp5_mdss,
- * which contains a 'struct msm_mdss base' member.
- */
-struct msm_mdss {
- struct drm_device *dev;
+#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
+
+struct mdp5_mdss {
+ struct msm_mdss base;
void __iomem *mmio, *vbif;
@@ -41,22 +39,22 @@ struct msm_mdss {
} irqcontroller;
};
-static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
{
- msm_writel(data, mdss->mmio + reg);
+ msm_writel(data, mdp5_mdss->mmio + reg);
}
-static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
{
- return msm_readl(mdss->mmio + reg);
+ return msm_readl(mdp5_mdss->mmio + reg);
}
static irqreturn_t mdss_irq(int irq, void *arg)
{
- struct msm_mdss *mdss = arg;
+ struct mdp5_mdss *mdp5_mdss = arg;
u32 intr;
- intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+ intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
@@ -64,7 +62,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
irq_hw_number_t hwirq = fls(intr) - 1;
generic_handle_irq(irq_find_mapping(
- mdss->irqcontroller.domain, hwirq));
+ mdp5_mdss->irqcontroller.domain, hwirq));
intr &= ~(1 << hwirq);
}
@@ -84,19 +82,19 @@ static irqreturn_t mdss_irq(int irq, void *arg)
static void mdss_hw_mask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static void mdss_hw_unmask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
@@ -109,13 +107,13 @@ static struct irq_chip mdss_hw_irq_chip = {
static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- struct msm_mdss *mdss = d->host_data;
+ struct mdp5_mdss *mdp5_mdss = d->host_data;
if (!(VALID_IRQS & (1 << hwirq)))
return -EPERM;
irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdss);
+ irq_set_chip_data(irq, mdp5_mdss);
return 0;
}
@@ -126,90 +124,99 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
};
-static int mdss_irq_domain_init(struct msm_mdss *mdss)
+static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
{
- struct device *dev = mdss->dev->dev;
+ struct device *dev = mdp5_mdss->base.dev->dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
- mdss);
+ mdp5_mdss);
if (!d) {
dev_err(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
- mdss->irqcontroller.enabled_mask = 0;
- mdss->irqcontroller.domain = d;
+ mdp5_mdss->irqcontroller.enabled_mask = 0;
+ mdp5_mdss->irqcontroller.domain = d;
return 0;
}
-int msm_mdss_enable(struct msm_mdss *mdss)
+static int mdp5_mdss_enable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- clk_prepare_enable(mdss->ahb_clk);
- if (mdss->axi_clk)
- clk_prepare_enable(mdss->axi_clk);
- if (mdss->vsync_clk)
- clk_prepare_enable(mdss->vsync_clk);
+ clk_prepare_enable(mdp5_mdss->ahb_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_prepare_enable(mdp5_mdss->axi_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_prepare_enable(mdp5_mdss->vsync_clk);
return 0;
}
-int msm_mdss_disable(struct msm_mdss *mdss)
+static int mdp5_mdss_disable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- if (mdss->vsync_clk)
- clk_disable_unprepare(mdss->vsync_clk);
- if (mdss->axi_clk)
- clk_disable_unprepare(mdss->axi_clk);
- clk_disable_unprepare(mdss->ahb_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_disable_unprepare(mdp5_mdss->vsync_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_disable_unprepare(mdp5_mdss->axi_clk);
+ clk_disable_unprepare(mdp5_mdss->ahb_clk);
return 0;
}
-static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
{
- struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+ struct platform_device *pdev =
+ to_platform_device(mdp5_mdss->base.dev->dev);
- mdss->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(mdss->ahb_clk))
- mdss->ahb_clk = NULL;
+ mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(mdp5_mdss->ahb_clk))
+ mdp5_mdss->ahb_clk = NULL;
- mdss->axi_clk = msm_clk_get(pdev, "bus");
- if (IS_ERR(mdss->axi_clk))
- mdss->axi_clk = NULL;
+ mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
+ if (IS_ERR(mdp5_mdss->axi_clk))
+ mdp5_mdss->axi_clk = NULL;
- mdss->vsync_clk = msm_clk_get(pdev, "vsync");
- if (IS_ERR(mdss->vsync_clk))
- mdss->vsync_clk = NULL;
+ mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+ if (IS_ERR(mdp5_mdss->vsync_clk))
+ mdp5_mdss->vsync_clk = NULL;
return 0;
}
-void msm_mdss_destroy(struct drm_device *dev)
+static void mdp5_mdss_destroy(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss = priv->mdss;
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
- if (!mdss)
+ if (!mdp5_mdss)
return;
- irq_domain_remove(mdss->irqcontroller.domain);
- mdss->irqcontroller.domain = NULL;
+ irq_domain_remove(mdp5_mdss->irqcontroller.domain);
+ mdp5_mdss->irqcontroller.domain = NULL;
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
pm_runtime_disable(dev->dev);
}
-int msm_mdss_init(struct drm_device *dev)
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = mdp5_mdss_enable,
+ .disable = mdp5_mdss_disable,
+ .destroy = mdp5_mdss_destroy,
+};
+
+int mdp5_mdss_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss;
+ struct mdp5_mdss *mdp5_mdss;
int ret;
DBG("");
@@ -217,40 +224,40 @@ int msm_mdss_init(struct drm_device *dev)
if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
return 0;
- mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
- if (!mdss) {
+ mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
+ if (!mdp5_mdss) {
ret = -ENOMEM;
goto fail;
}
- mdss->dev = dev;
+ mdp5_mdss->base.dev = dev;
- mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
- if (IS_ERR(mdss->mmio)) {
- ret = PTR_ERR(mdss->mmio);
+ mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+ if (IS_ERR(mdp5_mdss->mmio)) {
+ ret = PTR_ERR(mdp5_mdss->mmio);
goto fail;
}
- mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
- if (IS_ERR(mdss->vbif)) {
- ret = PTR_ERR(mdss->vbif);
+ mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdp5_mdss->vbif)) {
+ ret = PTR_ERR(mdp5_mdss->vbif);
goto fail;
}
- ret = msm_mdss_get_clocks(mdss);
+ ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
/* Regulator to enable GDSCs in downstream kernels */
- mdss->vdd = devm_regulator_get(dev->dev, "vdd");
- if (IS_ERR(mdss->vdd)) {
- ret = PTR_ERR(mdss->vdd);
+ mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+ if (IS_ERR(mdp5_mdss->vdd)) {
+ ret = PTR_ERR(mdp5_mdss->vdd);
goto fail;
}
- ret = regulator_enable(mdss->vdd);
+ ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
@@ -258,25 +265,26 @@ int msm_mdss_init(struct drm_device *dev)
}
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
- mdss_irq, 0, "mdss_isr", mdss);
+ mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
- ret = mdss_irq_domain_init(mdss);
+ ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
- priv->mdss = mdss;
+ mdp5_mdss->base.funcs = &mdss_funcs;
+ priv->mdss = &mdp5_mdss->base;
pm_runtime_enable(dev->dev);
return 0;
fail_irq:
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
fail:
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index e09bc53a0e65..7d306c5acd09 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -46,7 +46,7 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
@@ -512,7 +512,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
if (plane_enabled(new_state)) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline =
- mdp5_crtc_get_pipeline(plane->crtc);
+ mdp5_crtc_get_pipeline(new_state->crtc);
int ret;
ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
@@ -1029,8 +1029,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_img_w, src_img_h,
src_x + src_w, src_y, src_w, src_h);
- plane->fb = fb;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index b744bcc7d8ad..ff8164cc6738 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -208,6 +208,9 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
+ if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+ goto fail;
+
msm_dsi->encoder = encoder;
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 70d9a9a47acd..08f3fc6771b7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -100,6 +100,7 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+bool msm_dsi_manager_validate_current_config(u8 id);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
@@ -149,6 +150,7 @@ static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
#endif
/* dsi host */
+struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
@@ -162,7 +164,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings);
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi);
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode);
@@ -175,13 +178,29 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req);
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi);
void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 0327bb54b01b..dcdfb1bb54f9 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -136,20 +136,58 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
+const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_v2,
+ .link_clk_disable = dsi_link_clk_disable_v2,
+ .clk_init_ver = dsi_clk_init_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_v2,
+ .tx_buf_get = dsi_tx_buf_get_v2,
+ .tx_buf_put = NULL,
+ .dma_base_get = dsi_dma_base_get_v2,
+ .calc_clk_rate = dsi_calc_clk_rate_v2,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = NULL,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = dsi_clk_init_6g_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
- {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
+ &apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
- &msm8974_apq8084_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3,
+ &msm8994_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1,
+ &msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
+ &msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 9cfdcf1c95d5..16c507911110 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -40,10 +40,22 @@ struct msm_dsi_config {
const int num_dsi;
};
+struct msm_dsi_host_cfg_ops {
+ int (*link_clk_enable)(struct msm_dsi_host *msm_host);
+ void (*link_clk_disable)(struct msm_dsi_host *msm_host);
+ int (*clk_init_ver)(struct msm_dsi_host *msm_host);
+ int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size);
+ void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
+ void (*tx_buf_put)(struct msm_dsi_host *msm_host);
+ int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+};
+
struct msm_dsi_cfg_handler {
u32 major;
u32 minor;
const struct msm_dsi_config *cfg;
+ const struct msm_dsi_host_cfg_ops *ops;
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 2f1a2780658a..96fb5f635314 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -118,6 +118,7 @@ struct msm_dsi_host {
struct clk *byte_intf_clk;
u32 byte_clk_rate;
+ u32 pixel_clk_rate;
u32 esc_clk_rate;
/* DSI v2 specific clocks */
@@ -332,6 +333,54 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
return 0;
}
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->src_clk = msm_clk_get(pdev, "src");
+
+ if (IS_ERR(msm_host->src_clk)) {
+ ret = PTR_ERR(msm_host->src_clk);
+ pr_err("%s: can't find src clock. ret=%d\n",
+ __func__, ret);
+ msm_host->src_clk = NULL;
+ return ret;
+ }
+
+ msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
+ if (!msm_host->esc_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get esc clock parent. ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
+ if (!msm_host->dsi_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get src clock parent. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+ if (IS_ERR(msm_host->byte_intf_clk)) {
+ ret = PTR_ERR(msm_host->byte_intf_clk);
+ pr_err("%s: can't find byte_intf clock. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -379,19 +428,6 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
- cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) {
- msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
- if (IS_ERR(msm_host->byte_intf_clk)) {
- ret = PTR_ERR(msm_host->byte_intf_clk);
- pr_err("%s: can't find byte_intf clock. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- } else {
- msm_host->byte_intf_clk = NULL;
- }
-
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
@@ -406,31 +442,8 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- msm_host->src_clk = msm_clk_get(pdev, "src");
- if (IS_ERR(msm_host->src_clk)) {
- ret = PTR_ERR(msm_host->src_clk);
- pr_err("%s: can't find src clock. ret=%d\n",
- __func__, ret);
- msm_host->src_clk = NULL;
- goto exit;
- }
-
- msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
- if (!msm_host->esc_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get esc clock parent. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
- if (!msm_host->dsi_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get src clock parent. ret=%d\n",
- __func__, ret);
- }
- }
+ if (cfg_hnd->ops->clk_init_ver)
+ ret = cfg_hnd->ops->clk_init_ver(msm_host);
exit:
return ret;
}
@@ -498,7 +511,7 @@ int msm_dsi_runtime_resume(struct device *dev)
return dsi_bus_clk_enable(msm_host);
}
-static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -511,7 +524,7 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
@@ -566,7 +579,7 @@ error:
return ret;
}
-static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
{
int ret;
@@ -592,7 +605,7 @@ static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
@@ -634,98 +647,121 @@ error:
return ret;
}
-static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ if (msm_host->byte_intf_clk)
+ clk_disable_unprepare(msm_host->byte_intf_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- return dsi_link_clk_enable_6g(msm_host);
- else
- return dsi_link_clk_enable_v2(msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
+{
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->src_clk);
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
}
-static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ struct drm_display_mode *mode = msm_host->mode;
+ u32 pclk_rate;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->pixel_clk);
- if (msm_host->byte_intf_clk)
- clk_disable_unprepare(msm_host->byte_intf_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- } else {
- clk_disable_unprepare(msm_host->pixel_clk);
- clk_disable_unprepare(msm_host->src_clk);
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- }
+ pclk_rate = mode->clock * 1000;
+
+ /*
+ * For dual DSI mode, the current DRM mode has the complete width of the
+ * panel. Since, the complete panel is driven by two DSI controllers,
+ * the clock rates have to be split between the two dsi controllers.
+ * Adjust the byte and pixel clock rates for each dsi host accordingly.
+ */
+ if (is_dual_dsi)
+ pclk_rate /= 2;
+
+ return pclk_rate;
}
-static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- struct drm_display_mode *mode = msm_host->mode;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- u32 pclk_rate;
+ u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
+ u64 pclk_bpp = (u64)pclk_rate * bpp;
- if (!mode) {
- pr_err("%s: mode not set\n", __func__);
- return -EINVAL;
- }
-
- pclk_rate = mode->clock * 1000;
- if (lanes > 0) {
- msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
- } else {
+ if (lanes == 0) {
pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
- msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+ lanes = 1;
}
- DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+ do_div(pclk_bpp, (8 * lanes));
- msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ msm_host->pixel_clk_rate = pclk_rate;
+ msm_host->byte_clk_rate = pclk_bpp;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- unsigned int esc_mhz, esc_div;
- unsigned long byte_mhz;
+ DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
+ msm_host->byte_clk_rate);
- msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
+}
- /*
- * esc clock is byte clock followed by a 4 bit divider,
- * we need to find an escape clock frequency within the
- * mipi DSI spec range within the maximum divider limit
- * We iterate here between an escape clock frequencey
- * between 20 Mhz to 5 Mhz and pick up the first one
- * that can be supported by our divider
- */
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ if (!msm_host->mode) {
+ pr_err("%s: mode not set\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_calc_pclk(msm_host, is_dual_dsi);
+ msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ return 0;
+}
- byte_mhz = msm_host->byte_clk_rate / 1000000;
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ u64 pclk_bpp;
+ unsigned int esc_mhz, esc_div;
+ unsigned long byte_mhz;
- for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
- esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
+ dsi_calc_pclk(msm_host, is_dual_dsi);
- /*
- * TODO: Ideally, we shouldn't know what sort of divider
- * is available in mmss_cc, we're just assuming that
- * it'll always be a 4 bit divider. Need to come up with
- * a better way here.
- */
- if (esc_div >= 1 && esc_div <= 16)
- break;
- }
+ pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
+ do_div(pclk_bpp, 8);
+ msm_host->src_clk_rate = pclk_bpp;
- if (esc_mhz < 5)
- return -EINVAL;
+ /*
+ * esc clock is byte clock followed by a 4 bit divider,
+ * we need to find an escape clock frequency within the
+ * mipi DSI spec range within the maximum divider limit
+ * We iterate here between an escape clock frequencey
+ * between 20 Mhz to 5 Mhz and pick up the first one
+ * that can be supported by our divider
+ */
+
+ byte_mhz = msm_host->byte_clk_rate / 1000000;
- msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+ for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
+ esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
- DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
- msm_host->src_clk_rate);
+ /*
+ * TODO: Ideally, we shouldn't know what sort of divider
+ * is available in mmss_cc, we're just assuming that
+ * it'll always be a 4 bit divider. Need to come up with
+ * a better way here.
+ */
+ if (esc_div >= 1 && esc_div <= 16)
+ break;
}
+ if (esc_mhz < 5)
+ return -EINVAL;
+
+ msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+
+ DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+ msm_host->src_clk_rate);
+
return 0;
}
@@ -885,7 +921,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_CTRL, data);
}
-static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
@@ -897,10 +933,26 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
u32 ha_end = ha_start + mode->hdisplay;
u32 va_start = v_total - mode->vsync_start;
u32 va_end = va_start + mode->vdisplay;
+ u32 hdisplay = mode->hdisplay;
u32 wc;
DBG("");
+ /*
+ * For dual DSI mode, the current DRM mode has
+ * the complete width of the panel. Since, the complete
+ * panel is driven by two DSI controllers, the horizontal
+ * timings have to be split between the two dsi controllers.
+ * Adjust the DSI host timing values accordingly.
+ */
+ if (is_dual_dsi) {
+ h_total /= 2;
+ hs_end /= 2;
+ ha_start /= 2;
+ ha_end /= 2;
+ hdisplay /= 2;
+ }
+
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
@@ -921,7 +973,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
/* image data and 1 byte write_memory_start cmd */
- wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
@@ -931,7 +983,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
MIPI_DSI_DCS_LONG_WRITE));
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
- DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+ DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
}
}
@@ -1015,50 +1067,37 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
}
}
-/* dsi_cmd */
-static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- int ret;
uint64_t iova;
+ u8 *data;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
- if (IS_ERR(msm_host->tx_gem_obj)) {
- ret = PTR_ERR(msm_host->tx_gem_obj);
- pr_err("%s: failed to allocate gem, %d\n",
- __func__, ret);
- msm_host->tx_gem_obj = NULL;
- return ret;
- }
+ data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
+ priv->kms->aspace,
+ &msm_host->tx_gem_obj, &iova);
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &iova);
- if (ret) {
- pr_err("%s: failed to get iova, %d\n", __func__, ret);
- return ret;
- }
+ if (IS_ERR(data)) {
+ msm_host->tx_gem_obj = NULL;
+ return PTR_ERR(data);
+ }
- if (iova & 0x07) {
- pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
- return -EINVAL;
- }
+ msm_host->tx_size = msm_host->tx_gem_obj->size;
- msm_host->tx_size = msm_host->tx_gem_obj->size;
- } else {
- msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
+ return 0;
+}
+
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+
+ msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
&msm_host->tx_buf_paddr, GFP_KERNEL);
- if (!msm_host->tx_buf) {
- ret = -ENOMEM;
- pr_err("%s: failed to allocate tx buf, %d\n",
- __func__, ret);
- return ret;
- }
+ if (!msm_host->tx_buf)
+ return -ENOMEM;
- msm_host->tx_size = size;
- }
+ msm_host->tx_size = size;
return 0;
}
@@ -1089,6 +1128,21 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
msm_host->tx_buf_paddr);
}
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
+{
+ return msm_gem_get_vaddr(msm_host->tx_gem_obj);
+}
+
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
+{
+ return msm_host->tx_buf;
+}
+
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
+{
+ msm_gem_put_vaddr(msm_host->tx_gem_obj);
+}
+
/*
* prepare cmd buffer to be txed
*/
@@ -1113,15 +1167,11 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
return -EINVAL;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- pr_err("%s: get vaddr failed, %d\n", __func__, ret);
- return ret;
- }
- } else {
- data = msm_host->tx_buf;
+ data = cfg_hnd->ops->tx_buf_get(msm_host);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
}
/* MSM specific command format in memory */
@@ -1142,8 +1192,8 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size);
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- msm_gem_put_vaddr(msm_host->tx_gem_obj);
+ if (cfg_hnd->ops->tx_buf_put)
+ cfg_hnd->ops->tx_buf_put(msm_host);
return len;
}
@@ -1190,24 +1240,38 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
return msg->rx_len;
}
-static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
+
+ if (!dma_base)
+ return -EINVAL;
+
+ return msm_gem_get_iova(msm_host->tx_gem_obj,
+ priv->kms->aspace, dma_base);
+}
+
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+ if (!dma_base)
+ return -EINVAL;
+
+ *dma_base = msm_host->tx_buf_paddr;
+ return 0;
+}
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
uint64_t dma_base;
bool triggered;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &dma_base);
- if (ret) {
- pr_err("%s: failed to get iova: %d\n", __func__, ret);
- return ret;
- }
- } else {
- dma_base = msm_host->tx_buf_paddr;
+ ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
}
reinit_completion(&msm_host->dma_comp);
@@ -1845,6 +1909,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct platform_device *pdev = msm_host->pdev;
int ret;
@@ -1865,7 +1930,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
}
msm_host->dev = dev;
- ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+ ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
return ret;
@@ -1898,7 +1963,7 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
* output
*/
if (check_defer && msm_host->device_node) {
- if (!of_drm_find_panel(msm_host->device_node))
+ if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
if (!of_drm_find_bridge(msm_host->device_node))
return -EPROBE_DEFER;
}
@@ -1923,6 +1988,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
/* TODO: make sure dsi_cmd_mdp is idle.
* Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
@@ -1935,7 +2001,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
- dsi_link_clk_enable(msm_host);
+ cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -1956,6 +2022,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
@@ -1965,7 +2032,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
}
@@ -2129,7 +2196,6 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
@@ -2155,14 +2221,16 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
+ if (msm_host->dsi_clk_src) {
ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
if (ret) {
pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
__func__, ret);
goto exit;
}
+ }
+ if (msm_host->esc_clk_src) {
ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
if (ret) {
pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
@@ -2189,12 +2257,14 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
}
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req)
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- ret = dsi_calc_clk_rate(msm_host);
+ ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
@@ -2256,9 +2326,11 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
}
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings)
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret = 0;
mutex_lock(&msm_host->dev_mutex);
@@ -2277,7 +2349,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
}
pm_runtime_get_sync(&msm_host->pdev->dev);
- ret = dsi_link_clk_enable(msm_host);
+ ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
@@ -2291,7 +2363,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto fail_disable_clk;
}
- dsi_timing_setup(msm_host);
+ dsi_timing_setup(msm_host, is_dual_dsi);
dsi_sw_reset(msm_host);
dsi_ctrl_config(msm_host, true, phy_shared_timings);
@@ -2304,7 +2376,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
return 0;
fail_disable_clk:
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
@@ -2316,6 +2388,7 @@ unlock_ret:
int msm_dsi_host_power_off(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
mutex_lock(&msm_host->dev_mutex);
if (!msm_host->power_on) {
@@ -2330,7 +2403,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4cb1cb68878b..5224010d90e4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -134,8 +134,9 @@ static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
{
struct msm_dsi_phy_clk_request clk_req;
int ret;
+ bool is_dual_dsi = IS_DUAL_DSI();
- msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req);
+ msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
@@ -305,102 +306,25 @@ static void dsi_mgr_connector_destroy(struct drm_connector *connector)
kfree(dsi_connector);
}
-static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
-{
- struct drm_display_mode *mode, *m;
-
- /* Only support left-right mode */
- list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
- mode->clock >>= 1;
- mode->hdisplay >>= 1;
- mode->hsync_start >>= 1;
- mode->hsync_end >>= 1;
- mode->htotal >>= 1;
- drm_mode_set_name(mode);
- }
-}
-
-static int dsi_dual_connector_tile_init(
- struct drm_connector *connector, int id)
-{
- struct drm_display_mode *mode;
- /* Fake topology id */
- char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
-
- if (connector->tile_group) {
- DBG("Tile property has been initialized");
- return 0;
- }
-
- /* Use the first mode only for now */
- mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode,
- head);
- if (!mode)
- return -EINVAL;
-
- connector->tile_group = drm_mode_get_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group)
- connector->tile_group = drm_mode_create_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group) {
- pr_err("%s: failed to create tile group\n", __func__);
- return -ENOMEM;
- }
-
- connector->has_tile = true;
- connector->tile_is_single_monitor = true;
-
- /* mode has been fixed */
- connector->tile_h_size = mode->hdisplay;
- connector->tile_v_size = mode->vdisplay;
-
- /* Only support left-right mode */
- connector->num_h_tile = 2;
- connector->num_v_tile = 1;
-
- connector->tile_v_loc = 0;
- connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
-
- return 0;
-}
-
static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
- int ret, num;
+ int num;
if (!panel)
return 0;
- /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
- * panel should not attach to any connector.
- * Only temporarily attach panel to the current connector here,
- * to let panel set mode to this connector.
+ /*
+ * In dual DSI mode, we have one connector that can be
+ * attached to the drm_panel.
*/
drm_panel_attach(panel, connector);
num = drm_panel_get_modes(panel);
- drm_panel_detach(panel);
if (!num)
return 0;
- if (IS_DUAL_DSI()) {
- /* report half resolution to user */
- dsi_dual_connector_fix_modes(connector);
- ret = dsi_dual_connector_tile_init(connector, id);
- if (ret)
- return ret;
- ret = drm_mode_connector_set_tile_property(connector);
- if (ret) {
- pr_err("%s: set tile property failed, %d\n",
- __func__, ret);
- return ret;
- }
- }
-
return num;
}
@@ -454,11 +378,11 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (ret)
goto phy_en_fail;
- /* Do nothing with the host if it is DSI 1 in case of dual DSI */
- if (is_dual_dsi && (DSI_1 == id))
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
- ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]);
+ ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail;
@@ -466,7 +390,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host,
- &phy_shared_timings[DSI_1]);
+ &phy_shared_timings[DSI_1], is_dual_dsi);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
__func__, ret);
@@ -556,11 +480,11 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
return;
/*
- * Do nothing with the host if it is DSI 1 in case of dual DSI.
+ * Do nothing with the host if it is slave-DSI in case of dual DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
* won't be diabled until both PHYs request disable.
*/
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
if (panel) {
@@ -621,7 +545,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
@@ -684,11 +608,28 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, msm_dsi->encoder);
+ drm_connector_attach_encoder(connector, msm_dsi->encoder);
return connector;
}
+bool msm_dsi_manager_validate_current_config(u8 id)
+{
+ bool is_dual_dsi = IS_DUAL_DSI();
+
+ /*
+ * For dual DSI, we only have one drm panel. For this
+ * use case, we register only one bridge/connector.
+ * Skip bridge/connector initialisation if it is
+ * slave-DSI for dual DSI configuration.
+ */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) {
+ DBG("Skip bridge registration for slave DSI->id: %d\n", id);
+ return false;
+ }
+ return true;
+}
+
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
@@ -751,12 +692,8 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
connector_list = &dev->mode_config.connector_list;
list_for_each_entry(connector, connector_list, head) {
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id)
- return connector;
- }
+ if (drm_connector_has_possible_encoder(connector, encoder))
+ return connector;
}
return ERR_PTR(-ENODEV);
@@ -836,6 +773,7 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
struct msm_drm_private *priv;
struct msm_kms *kms;
struct drm_encoder *encoder;
+ bool cmd_mode;
/*
* drm_device pointer is assigned to msm_dsi only in the modeset_init
@@ -850,10 +788,11 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
priv = dev->dev_private;
kms = priv->kms;
encoder = msm_dsi_get_encoder(msm_dsi);
+ cmd_mode = !(device_flags &
+ MIPI_DSI_MODE_VIDEO);
if (encoder && kms->funcs->set_encoder_mode)
- if (!(device_flags & MIPI_DSI_MODE_VIDEO))
- kms->funcs->set_encoder_mode(kms, encoder, true);
+ kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
}
int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index c4c37a7df637..4c03f0b7343e 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -798,6 +798,8 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
return ERR_PTR(-ENOMEM);
}
+ spin_lock_init(&pll_10nm->postdiv_lock);
+
pll = &pll_10nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 6f3fc6b0f0a3..058ff92a0207 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -56,7 +56,7 @@ static int edp_connector_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- drm_mode_connector_update_edid_property(connector, drm_edid);
+ drm_connector_update_edid_property(connector, drm_edid);
if (drm_edid)
ret = drm_add_edid_modes(connector, drm_edid);
@@ -134,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_mode_connector_attach_encoder(connector, edp->encoder);
+ drm_connector_attach_encoder(connector, edp->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index c0848dfedd50..e9c9a0af508e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -392,7 +392,7 @@ static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
ret = drm_add_edid_modes(connector, edid);
@@ -477,7 +477,7 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
return ERR_PTR(ret);
}
- drm_mode_connector_attach_encoder(connector, hdmi->encoder);
+ drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0635c3da7f4..c1f1779c980f 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -71,12 +71,15 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
+ if (kms->funcs->commit) {
+ DRM_DEBUG_ATOMIC("triggering commit\n");
+ kms->funcs->commit(kms, state);
+ }
+
msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1ff3fda245d1..f0da0d3c8a80 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -16,26 +16,101 @@
*/
#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_debugfs.h"
-static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+struct msm_gpu_show_priv {
+ struct msm_gpu_state *state;
+ struct drm_device *dev;
+};
+
+static int msm_gpu_show(struct seq_file *m, void *arg)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ drm_printf(&p, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, show_priv->state, &p);
+
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ return 0;
+}
+
+static int msm_gpu_release(struct inode *inode, struct file *file)
{
+ struct seq_file *m = file->private_data;
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ gpu->funcs->gpu_state_put(show_priv->state);
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ kfree(show_priv);
+
+ return single_release(inode, file);
+}
+
+static int msm_gpu_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_show_priv *show_priv;
+ int ret;
- if (gpu) {
- seq_printf(m, "%s Status:\n", gpu->name);
- pm_runtime_get_sync(&gpu->pdev->dev);
- gpu->funcs->show(gpu, m);
- pm_runtime_put_sync(&gpu->pdev->dev);
+ if (!gpu)
+ return -ENODEV;
+
+ show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
+ if (!show_priv)
+ return -ENOMEM;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ show_priv->state = gpu->funcs->gpu_state_get(gpu);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(show_priv->state)) {
+ ret = PTR_ERR(show_priv->state);
+ kfree(show_priv);
+ return ret;
}
- return 0;
+ show_priv->dev = dev;
+
+ return single_open(file, msm_gpu_show, show_priv);
}
+static const struct file_operations msm_gpu_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_gpu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = msm_gpu_release,
+};
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -105,7 +180,6 @@ static int show_locked(struct seq_file *m, void *arg)
}
static struct drm_info_list msm_debugfs_list[] = {
- {"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
@@ -158,6 +232,9 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
+ debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
+ dev, &msm_gpu_fops);
+
if (priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 021a0b6f9a59..46876bc8b707 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -15,6 +16,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
#include <drm/drm_of.h>
#include "msm_drv.h"
@@ -149,7 +152,7 @@ struct vblank_event {
bool enable;
};
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
{
struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
struct msm_vblank_ctrl, work);
@@ -197,7 +200,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- queue_work(priv->wq, &vbl_ctrl->work);
+ kthread_queue_work(&priv->disp_thread[crtc_id].worker,
+ &vbl_ctrl->work);
return 0;
}
@@ -208,19 +212,36 @@ static int msm_drm_uninit(struct device *dev)
struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
+ struct msm_mdss *mdss = priv->mdss;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
+ int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- cancel_work_sync(&vbl_ctrl->work);
+ kthread_flush_work(&vbl_ctrl->work);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
kfree(vbl_ev);
}
+ /* clean up display commit/event worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->disp_thread[i].thread) {
+ kthread_flush_worker(&priv->disp_thread[i].worker);
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_flush_worker(&priv->event_thread[i].worker);
+ kthread_stop(priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+
msm_gem_shrinker_cleanup(ddev);
drm_kms_helper_poll_fini(ddev);
@@ -243,9 +264,6 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
- flush_workqueue(priv->atomic_wq);
- destroy_workqueue(priv->atomic_wq);
-
if (kms && kms->funcs)
kms->funcs->destroy(kms);
@@ -258,7 +276,8 @@ static int msm_drm_uninit(struct device *dev)
component_unbind_all(dev, ddev);
- msm_mdss_destroy(ddev);
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
ddev->dev_private = NULL;
drm_dev_unref(ddev);
@@ -268,6 +287,10 @@ static int msm_drm_uninit(struct device *dev)
return 0;
}
+#define KMS_MDP4 4
+#define KMS_MDP5 5
+#define KMS_DPU 3
+
static int get_mdp_ver(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -357,7 +380,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
struct drm_device *ddev;
struct msm_drm_private *priv;
struct msm_kms *kms;
- int ret;
+ struct msm_mdss *mdss;
+ int ret, i;
+ struct sched_param param;
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
@@ -369,53 +394,61 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- drm_dev_unref(ddev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_unref_drm_dev;
}
ddev->dev_private = priv;
priv->dev = ddev;
- ret = msm_mdss_init(ddev);
- if (ret) {
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
+ switch (get_mdp_ver(pdev)) {
+ case KMS_MDP5:
+ ret = mdp5_mdss_init(ddev);
+ break;
+ case KMS_DPU:
+ ret = dpu_mdss_init(ddev);
+ break;
+ default:
+ ret = 0;
+ break;
}
+ if (ret)
+ goto err_free_priv;
+
+ mdss = priv->mdss;
priv->wq = alloc_ordered_workqueue("msm", 0);
- priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+ kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev);
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
- if (ret) {
- msm_mdss_destroy(ddev);
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
- }
+ if (ret)
+ goto err_destroy_mdss;
ret = msm_init_vram(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
- case 4:
+ case KMS_MDP4:
kms = mdp4_kms_init(ddev);
priv->kms = kms;
break;
- case 5:
+ case KMS_MDP5:
kms = mdp5_kms_init(ddev);
break;
+ case KMS_DPU:
+ kms = dpu_kms_init(ddev);
+ priv->kms = kms;
+ break;
default:
kms = ERR_PTR(-ENODEV);
break;
@@ -430,24 +463,100 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
*/
dev_err(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
- goto fail;
+ goto err_msm_uninit;
}
+ /* Enable normalization of plane zpos */
+ ddev->mode_config.normalize_zpos = true;
+
if (kms) {
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev, "kms hw init failed: %d\n", ret);
- goto fail;
+ goto err_msm_uninit;
}
}
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
+ for (i = 0; i < priv->num_crtcs; i++) {
+
+ /* initialize display thread */
+ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->disp_thread[i].worker);
+ priv->disp_thread[i].dev = ddev;
+ priv->disp_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->disp_thread[i].worker,
+ "crtc_commit:%d", priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->disp_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_commit kthread\n");
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ /* initialize event thread */
+ priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->event_thread[i].worker);
+ priv->event_thread[i].dev = ddev;
+ priv->event_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->event_thread[i].worker,
+ "crtc_event:%d", priv->event_thread[i].crtc_id);
+ /**
+ * event thread should also run at same priority as disp_thread
+ * because it is handling frame_done events. A lower priority
+ * event thread and higher priority disp_thread can causes
+ * frame_pending counters beyond 2. This can lead to commit
+ * failure at crtc commit level.
+ */
+ ret = sched_setscheduler(priv->event_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display event thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ }
+
+ if ((!priv->disp_thread[i].thread) ||
+ !priv->event_thread[i].thread) {
+ /* clean up previously created threads if any */
+ for ( ; i >= 0; i--) {
+ if (priv->disp_thread[i].thread) {
+ kthread_stop(
+ priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_stop(
+ priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+ goto err_msm_uninit;
+ }
+ }
+
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
dev_err(dev, "failed to initialize vblank\n");
- goto fail;
+ goto err_msm_uninit;
}
if (kms) {
@@ -456,13 +565,13 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
pm_runtime_put_sync(dev);
if (ret < 0) {
dev_err(dev, "failed to install IRQ handler\n");
- goto fail;
+ goto err_msm_uninit;
}
}
ret = drm_dev_register(ddev, 0);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_mode_config_reset(ddev);
@@ -473,15 +582,23 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ret = msm_debugfs_late_init(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_kms_helper_poll_init(ddev);
return 0;
-fail:
+err_msm_uninit:
msm_drm_uninit(dev);
return ret;
+err_destroy_mdss:
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
+err_free_priv:
+ kfree(priv);
+err_unref_drm_dev:
+ drm_dev_unref(ddev);
+ return ret;
}
/*
@@ -894,16 +1011,35 @@ static struct drm_driver msm_driver = {
static int msm_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_suspend)
+ return kms->funcs->pm_suspend(dev);
drm_kms_helper_poll_disable(ddev);
+ priv->pm_state = drm_atomic_helper_suspend(ddev);
+ if (IS_ERR(priv->pm_state)) {
+ drm_kms_helper_poll_enable(ddev);
+ return PTR_ERR(priv->pm_state);
+ }
+
return 0;
}
static int msm_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_resume)
+ return kms->funcs->pm_resume(dev);
+ drm_atomic_helper_resume(ddev, priv->pm_state);
drm_kms_helper_poll_enable(ddev);
return 0;
@@ -915,11 +1051,12 @@ static int msm_runtime_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_disable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->disable(mdss);
return 0;
}
@@ -928,11 +1065,12 @@ static int msm_runtime_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_enable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->enable(mdss);
return 0;
}
@@ -1031,12 +1169,13 @@ static int add_display_components(struct device *dev,
int ret;
/*
- * MDP5 based devices don't have a flat hierarchy. There is a top level
- * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
- * children devices, find the MDP5 node, and then add the interfaces
- * to our components list.
+ * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+ * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+ * Populate the children devices, find the MDP5/DPU node, and then add
+ * the interfaces to our components list.
*/
- if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+ if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
+ of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate children devices\n");
@@ -1146,8 +1285,9 @@ static int msm_pdev_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
- { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
+ { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
+ { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
+ { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -1169,6 +1309,7 @@ static int __init msm_drm_register(void)
DBG("init");
msm_mdp_register();
+ msm_dpu_register();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@@ -1185,6 +1326,7 @@ static void __exit msm_drm_unregister(void)
msm_edp_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
+ msm_dpu_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b2da1fbf81e0..0cba86ed3f54 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -33,6 +34,7 @@
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <asm/sizes.h>
+#include <linux/kthread.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -54,6 +56,12 @@ struct msm_fence_context;
struct msm_gem_address_space;
struct msm_gem_vma;
+#define MAX_CRTCS 8
+#define MAX_PLANES 20
+#define MAX_ENCODERS 8
+#define MAX_BRIDGES 8
+#define MAX_CONNECTORS 8
+
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
@@ -68,12 +76,77 @@ enum msm_mdp_plane_property {
};
struct msm_vblank_ctrl {
- struct work_struct work;
+ struct kthread_work work;
struct list_head event_list;
spinlock_t lock;
};
#define MSM_GPU_MAX_RINGS 4
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID: EDID supported
+ */
+enum msm_display_caps {
+ MSM_DISPLAY_CAP_VID_MODE = BIT(0),
+ MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
+ MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
+ MSM_DISPLAY_CAP_EDID = BIT(3),
+};
+
+/**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ */
+enum msm_event_wait {
+ MSM_ENC_COMMIT_DONE = 0,
+ MSM_ENC_TX_COMPLETE,
+ MSM_ENC_VBLANK,
+};
+
+/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+ * @num_enc: number of compression encoder blocks used
+ * @num_intf: number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+ u32 num_lm;
+ u32 num_enc;
+ u32 num_intf;
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @capabilities: Bitmask of display flags
+ * @num_of_h_tiles: Number of horizontal tiles in case of split interface
+ * @h_tile_instance: Controller instance used per tile. Number of elements is
+ * based on num_of_h_tiles
+ * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
+ * used instead of panel TE in cmd mode panels
+ */
+struct msm_display_info {
+ int intf_type;
+ uint32_t capabilities;
+ uint32_t num_of_h_tiles;
+ uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+ bool is_te_using_watchdog_timer;
+};
+
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+ struct drm_device *dev;
+ struct task_struct *thread;
+ unsigned int crtc_id;
+ struct kthread_worker worker;
+};
struct msm_drm_private {
@@ -84,7 +157,7 @@ struct msm_drm_private {
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
- /* top level MDSS wrapper device (for MDP5 only) */
+ /* top level MDSS wrapper device (for MDP5/DPU only) */
struct msm_mdss *mdss;
/* possibly this should be in the kms component, but it is
@@ -115,22 +188,24 @@ struct msm_drm_private {
struct list_head inactive_list;
struct workqueue_struct *wq;
- struct workqueue_struct *atomic_wq;
unsigned int num_planes;
- struct drm_plane *planes[16];
+ struct drm_plane *planes[MAX_PLANES];
unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ struct drm_crtc *crtcs[MAX_CRTCS];
+
+ struct msm_drm_thread disp_thread[MAX_CRTCS];
+ struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
- struct drm_encoder *encoders[8];
+ struct drm_encoder *encoders[MAX_ENCODERS];
unsigned int num_bridges;
- struct drm_bridge *bridges[8];
+ struct drm_bridge *bridges[MAX_BRIDGES];
unsigned int num_connectors;
- struct drm_connector *connectors[8];
+ struct drm_connector *connectors[MAX_CONNECTORS];
/* Properties */
struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
@@ -150,6 +225,7 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
+ struct drm_atomic_state *pm_state;
};
struct msm_format {
@@ -174,6 +250,9 @@ struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -285,6 +364,8 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
+void __init msm_dpu_register(void);
+void __exit msm_dpu_unregister(void);
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 7a16242bf8bf..2a7348aeb38d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -25,49 +26,20 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
- struct drm_gem_object *planes[MAX_PLANE];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return drm_gem_handle_create(file_priv,
- msm_fb->planes[0], handle);
-}
-
-static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = fb->format->num_planes;
-
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < n; i++) {
- struct drm_gem_object *bo = msm_fb->planes[i];
-
- drm_gem_object_put_unlocked(bo);
- }
-
- kfree(msm_fb);
-}
-
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
- .create_handle = msm_framebuffer_create_handle,
- .destroy = msm_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
@@ -77,7 +49,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
- msm_gem_describe(msm_fb->planes[i], m);
+ msm_gem_describe(fb->obj[i], m);
}
}
#endif
@@ -90,12 +62,11 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+ ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -107,26 +78,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], aspace);
+ msm_gem_put_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- if (!msm_fb->planes[plane])
+ if (!fb->obj[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+ return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return msm_fb->planes[plane];
+ return drm_gem_fb_get_obj(fb, plane);
}
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
@@ -202,7 +170,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
- if (n > ARRAY_SIZE(msm_fb->planes)) {
+ if (n > ARRAY_SIZE(fb->obj)) {
ret = -EINVAL;
goto fail;
}
@@ -221,7 +189,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
goto fail;
}
- msm_fb->planes[i] = bos[i];
+ msm_fb->base.obj[i] = bos[i];
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 1c09acfb4028..f388944c93e2 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -20,10 +20,11 @@
#include "msm_mmu.h"
#include "msm_fence.h"
+#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
-
+#include <linux/devcoredump.h>
/*
* Power Management:
@@ -273,6 +274,123 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret;
}
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct msm_gpu *gpu = data;
+ struct drm_print_iterator iter;
+ struct drm_printer p;
+ struct msm_gpu_state *state;
+
+ state = msm_gpu_crashstate_get(gpu);
+ if (!state)
+ return 0;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "---\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(&p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
+ if (state->comm)
+ drm_printf(&p, "comm: %s\n", state->comm);
+ if (state->cmd)
+ drm_printf(&p, "cmdline: %s\n", state->cmd);
+
+ gpu->funcs->show(gpu, state, &p);
+
+ msm_gpu_crashstate_put(gpu);
+
+ return count - iter.remain;
+}
+
+static void msm_gpu_devcoredump_free(void *data)
+{
+ struct msm_gpu *gpu = data;
+
+ msm_gpu_crashstate_put(gpu);
+}
+
+static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
+ struct msm_gem_object *obj, u64 iova, u32 flags)
+{
+ struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
+
+ /* Don't record write only objects */
+
+ state_bo->size = obj->base.size;
+ state_bo->iova = iova;
+
+ /* Only store the data for buffer objects marked for read */
+ if ((flags & MSM_SUBMIT_BO_READ)) {
+ void *ptr;
+
+ state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
+ if (!state_bo->data)
+ return;
+
+ ptr = msm_gem_get_vaddr_active(&obj->base);
+ if (IS_ERR(ptr)) {
+ kvfree(state_bo->data);
+ return;
+ }
+
+ memcpy(state_bo->data, ptr, obj->base.size);
+ msm_gem_put_vaddr(&obj->base);
+ }
+
+ state->nr_bos++;
+}
+
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, char *comm, char *cmd)
+{
+ struct msm_gpu_state *state;
+
+ /* Only save one crash state at a time */
+ if (gpu->crashstate)
+ return;
+
+ state = gpu->funcs->gpu_state_get(gpu);
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ /* Fill in the additional crash state information */
+ state->comm = kstrdup(comm, GFP_KERNEL);
+ state->cmd = kstrdup(cmd, GFP_KERNEL);
+
+ if (submit) {
+ int i;
+
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (i = 0; state->bos && i < submit->nr_bos; i++)
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova, submit->bos[i].flags);
+ }
+
+ /* Set the active crash state to be dumped on failure */
+ gpu->crashstate = state;
+
+ /* FIXME: Release the crashstate if this errors out? */
+ dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
+}
+#else
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
+ char *cmd)
+{
+}
+#endif
+
/*
* Hangcheck detection for locked gpu:
*/
@@ -314,6 +432,7 @@ static void recover_worker(struct work_struct *work)
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ char *comm = NULL, *cmd = NULL;
int i;
mutex_lock(&dev->struct_mutex);
@@ -327,7 +446,7 @@ static void recover_worker(struct work_struct *work)
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- char *cmd;
+ comm = kstrdup(task->comm, GFP_ATOMIC);
/*
* So slightly annoying, in other paths like
@@ -340,22 +459,28 @@ static void recover_worker(struct work_struct *work)
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
mutex_lock(&dev->struct_mutex);
+ }
+ rcu_read_unlock();
+ if (comm && cmd) {
dev_err(dev->dev, "%s: offending task: %s (%s)\n",
- gpu->name, task->comm, cmd);
+ gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
- "offending task: %s (%s)", task->comm, cmd);
-
- kfree(cmd);
- } else {
+ "offending task: %s (%s)", comm, cmd);
+ } else
msm_rd_dump_submit(priv->hangrd, submit, NULL);
- }
- rcu_read_unlock();
}
+ /* Record the crash state */
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ kfree(cmd);
+ kfree(comm);
/*
* Update all the rings with the latest and greatest fence.. this
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index b8241179175a..1c6105bc55c7 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -27,6 +27,7 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
@@ -64,11 +65,14 @@ struct msm_gpu_funcs {
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
- void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+ void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
/* for generation specific debugfs: */
int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+ struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
+ int (*gpu_state_put)(struct msm_gpu_state *state);
};
struct msm_gpu {
@@ -129,6 +133,8 @@ struct msm_gpu {
u64 busy_cycles;
ktime_t time;
} devfreq;
+
+ struct msm_gpu_state *crashstate;
};
/* It turns out that all targets use the same ringbuffer size */
@@ -175,6 +181,38 @@ struct msm_gpu_submitqueue {
struct kref ref;
};
+struct msm_gpu_state_bo {
+ u64 iova;
+ size_t size;
+ void *data;
+};
+
+struct msm_gpu_state {
+ struct kref ref;
+ struct timespec64 time;
+
+ struct {
+ u64 iova;
+ u32 fence;
+ u32 seqno;
+ u32 rptr;
+ u32 wptr;
+ void *data;
+ int data_size;
+ } ring[MSM_GPU_MAX_RINGS];
+
+ int nr_registers;
+ u32 *registers;
+
+ u32 rbbm_status;
+
+ char *comm;
+ char *cmd;
+
+ int nr_bos;
+ struct msm_gpu_state_bo *bos;
+};
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
@@ -254,4 +292,32 @@ static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
kref_put(&queue->ref, msm_submitqueue_destroy);
}
+static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = NULL;
+
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ kref_get(&gpu->crashstate->ref);
+ state = gpu->crashstate;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+
+ return state;
+}
+
+static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
+{
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ if (gpu->funcs->gpu_state_put(gpu->crashstate))
+ gpu->crashstate = NULL;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+}
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index dfd92947de2c..fd88cebb6adb 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -42,6 +43,7 @@ struct msm_kms_funcs {
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
/* modeset, bracketing atomic_commit(): */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
/* functions to wait for atomic commit completed on each CRTC */
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
@@ -50,6 +52,11 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms,
const uint32_t format,
const uint64_t modifiers);
+ /* do format checking on format modified through fb_cmd2 modifiers */
+ int (*check_modified_format)(const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
@@ -60,6 +67,9 @@ struct msm_kms_funcs {
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
+ /* pm suspend/resume hooks */
+ int (*pm_suspend)(struct device *dev);
+ int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
@@ -86,9 +96,20 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
-int msm_mdss_init(struct drm_device *dev);
-void msm_mdss_destroy(struct drm_device *dev);
-int msm_mdss_enable(struct msm_mdss *mdss);
-int msm_mdss_disable(struct msm_mdss *mdss);
+struct msm_kms *dpu_kms_init(struct drm_device *dev);
+
+struct msm_mdss_funcs {
+ int (*enable)(struct msm_mdss *mdss);
+ int (*disable)(struct msm_mdss *mdss);
+ void (*destroy)(struct drm_device *dev);
+};
+
+struct msm_mdss {
+ struct drm_device *dev;
+ const struct msm_mdss_funcs *funcs;
+};
+
+int mdp5_mdss_init(struct drm_device *dev);
+int dpu_mdss_init(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6aa6ee16dcbd..2c569e264df3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1017,7 +1017,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 4feab0a5419d..e7af95d37ddb 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -556,6 +556,6 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 9805d2cdc1a1..73d41abbb510 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -716,6 +716,6 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
entry->location != DCB_LOC_ON_CHIP)
nv04_tmds_slave_init(encoder);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 01664357d3e1..de4490b4ed30 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -244,7 +244,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
/* Attach it to the specified connector. */
get_slave_funcs(encoder)->create_resources(encoder, connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 6d99f11fee4e..6a4ca139cf5d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -821,6 +821,6 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
encoder->possible_clones = 0;
nv17_tv_create_resources(encoder, connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 9bae4db84cfb..8412119bd940 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -136,12 +136,24 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
{
struct nouveau_cli *cli = (void *)device->object.client;
struct nv50_disp_core_channel_dma_v0 *args = data;
+ u8 type = NVIF_MEM_COHERENT;
int ret;
mutex_init(&dmac->lock);
- ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
- &dmac->push);
+ /* Pascal added support for 47-bit physical addresses, but some
+ * parts of EVO still only accept 40-bit PAs.
+ *
+ * To avoid issues on systems with large amounts of RAM, and on
+ * systems where an IOMMU maps pages at a high address, we need
+ * to allocate push buffers in VRAM instead.
+ *
+ * This appears to match NVIDIA's behaviour on Pascal.
+ */
+ if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
+ type |= NVIF_MEM_VRAM;
+
+ ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
if (ret)
return ret;
@@ -216,6 +228,19 @@ void
evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
+
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
}
@@ -424,7 +449,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_dac_help);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -850,7 +875,7 @@ nv50_mstc_get_modes(struct drm_connector *connector)
int ret = 0;
mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
- drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
+ drm_connector_update_edid_property(&mstc->connector, mstc->edid);
if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
@@ -927,11 +952,11 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
nouveau_conn_attach_properties(&mstc->connector);
for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
- drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+ drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
- drm_mode_connector_set_path_property(&mstc->connector, path);
+ drm_connector_set_path_property(&mstc->connector, path);
return 0;
}
@@ -1007,7 +1032,7 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
mstc->port = NULL;
drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
- drm_connector_unreference(&mstc->connector);
+ drm_connector_put(&mstc->connector);
}
static void
@@ -1418,7 +1443,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_sor_help);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nv50_disp *disp = nv50_disp(encoder->dev);
@@ -1576,7 +1601,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_pior_help);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -2232,6 +2257,9 @@ nv50_display_create(struct drm_device *dev)
connector->funcs->destroy(connector);
}
+ /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
+ dev->vblank_disable_immediate = true;
+
out:
if (ret)
nv50_display_destroy(dev);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index c5a9bc1af5af..2187922e8dc2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -586,7 +586,6 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
wndw->id = index;
wndw->interlock.type = interlock_type;
wndw->interlock.data = interlock_data;
- wndw->ctxdma.parent = &wndw->wndw.base.user;
wndw->ctxdma.parent = &wndw->wndw.base.user;
INIT_LIST_HEAD(&wndw->ctxdma.list);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 20754d9e6883..8407651f6ac6 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -78,7 +78,7 @@ struct nvif_mclass {
#define nvif_mclass(o,m) ({ \
struct nvif_object *object = (o); \
struct nvif_sclass *sclass; \
- const typeof(m[0]) *mclass = (m); \
+ typeof(m[0]) *mclass = (m); \
int ret = -ENODEV; \
int cnt, i, j; \
\
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index e2211bb2cf79..e67a471331b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -139,7 +139,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
if (chan->ntfy) {
nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
- drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
+ drm_gem_object_put_unlocked(&chan->ntfy->gem);
}
if (chan->heap.block_size)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index af68eae4c626..51932c72334e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -363,19 +363,11 @@ module_param_named(hdmimhz, nouveau_hdmimhz, int, 0400);
struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type)
{
- struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *enc;
- int i, id;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- id = connector->encoder_ids[i];
- if (!id)
- break;
+ int i;
- enc = drm_encoder_find(dev, NULL, id);
- if (!enc)
- continue;
+ drm_connector_for_each_possible_encoder(connector, enc, i) {
nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
@@ -420,7 +412,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
- struct nouveau_encoder *nv_encoder;
+ struct nouveau_encoder *nv_encoder = NULL;
struct drm_encoder *encoder;
int i, panel = -ENODEV;
@@ -436,14 +428,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
}
}
- for (i = 0; nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- int id = connector->encoder_ids[i];
- if (id == 0)
- break;
-
- encoder = drm_encoder_find(dev, NULL, id);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
@@ -565,7 +550,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
@@ -590,7 +575,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
else
nv_connector->edid = drm_get_edid(connector, i2c);
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
@@ -672,7 +657,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
@@ -736,7 +721,7 @@ out:
status = connector_status_unknown;
#endif
- drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+ drm_connector_update_edid_property(connector, nv_connector->edid);
nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 963a4dba8213..9109b69cd052 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -160,7 +160,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
args.ustate = value;
}
+ ret = pm_runtime_get_sync(drm->dev);
+ if (IS_ERR_VALUE(ret) && ret != -EACCES)
+ return ret;
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
+ pm_runtime_put_autosuspend(drm->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ec7861457b84..139368b31916 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -205,7 +205,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
if (fb->nvbo)
- drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
+ drm_gem_object_put_unlocked(&fb->nvbo->gem);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -287,7 +287,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
if (ret == 0)
return &fb->base;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ERR_PTR(ret);
}
@@ -945,7 +945,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
return ret;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
return ret;
}
@@ -960,7 +960,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index f5d3158f0378..c7ec86d6c3c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -908,8 +908,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
- if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL)))
- return ret;
+ if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
ret = nouveau_cli_init(drm, name, cli);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 85c1f10bc2b6..844498c4267c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
nouveau_vma_del(&nouveau_fb->vma);
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_framebuffer_unreference(&nouveau_fb->base);
+ drm_framebuffer_put(&nouveau_fb->base);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index e6ccafcb9c41..b56524d343c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -274,7 +274,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->gem);
return ret;
}
@@ -354,7 +354,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_unreference_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->gem);
}
}
@@ -400,14 +400,14 @@ retry:
nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
res_bo = NULL;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
continue;
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_PRINTK(err, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
ret = -EINVAL;
break;
}
@@ -894,7 +894,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
ret = lret;
nouveau_bo_sync_for_cpu(nvbo);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
@@ -913,7 +913,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
nvbo = nouveau_gem_object(gem);
nouveau_bo_sync_for_device(nvbo);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -930,7 +930,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
return -ENOENT;
ret = nouveau_gem_info(file_priv, gem, req);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 44178b4c3599..08a1ab6b150d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -69,8 +69,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
- if (kstrtol(buf, 10, &value) == -EINVAL)
- return count;
+ if (kstrtol(buf, 10, &value))
+ return -EINVAL;
therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST,
value / 1000);
@@ -102,8 +102,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
- if (kstrtol(buf, 10, &value) == -EINVAL)
- return count;
+ if (kstrtol(buf, 10, &value))
+ return -EINVAL;
therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST,
value / 1000);
@@ -156,7 +156,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
long value;
int ret;
- if (kstrtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value))
return -EINVAL;
ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY, value);
@@ -179,7 +179,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
long value;
int ret;
- if (kstrtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value))
return -EINVAL;
ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY, value);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 1ada186fab77..039e23548e08 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -36,7 +36,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
ret = drm_dev_register(drm, 0);
if (ret < 0) {
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 8c093ca4222e..8edb9f2a4269 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
- * All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
- * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index d0322ce85172..1a47c40e171b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -87,11 +87,12 @@ nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->info) {
- if ((engine = nvkm_engine_ref(engine))) {
+ if (!IS_ERR((engine = nvkm_engine_ref(engine)))) {
int ret = engine->func->info(engine, mthd, data);
nvkm_engine_unref(&engine);
return ret;
}
+ return PTR_ERR(engine);
}
return -ENOSYS;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 78597da6313a..0e372a190d3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -23,6 +23,10 @@
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
#include "priv.h"
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
static int
nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
{
@@ -105,6 +109,15 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
unsigned long pgsize_bitmap;
int ret;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
+
if (!tdev->func->iommu_bit)
return;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
index 29e6dd58ac48..525f95d06429 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -52,7 +52,7 @@ void
gf119_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->base.engine.subdev.device;
- const u64 mask = 0x00000001 << chan->chid.user;
+ const u32 mask = 0x00000001 << chan->chid.user;
if (!en) {
nvkm_mask(device, 0x610090, mask, 0x00000000);
nvkm_mask(device, 0x6100a0, mask, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 57719f675eec..bcf32d92ee5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -166,8 +166,8 @@ void
nv50_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->base.engine.subdev.device;
- const u64 mask = 0x00010001 << chan->chid.user;
- const u64 data = en ? 0x00010000 : 0x00000000;
+ const u32 mask = 0x00010001 << chan->chid.user;
+ const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000;
nvkm_mask(device, 0x610028, mask, data);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 19173ea19096..3b3327789ae7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -25,24 +25,31 @@
#include <nvif/class.h>
static void
-gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
+gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm)
{
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730));
- u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734));
+ u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730 + (sm * 0x80)));
+ u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734 + (sm * 0x80)));
const struct nvkm_enum *warp;
char glob[128];
nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
- nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
+ nvkm_error(subdev, "GPC%i/TPC%i/SM%d trap: "
"global %08x [%s] warp %04x [%s]\n",
- gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
+ gpc, tpc, sm, gerr, glob, werr, warp ? warp->name : "");
+
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730 + sm * 0x80), 0x00000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr);
+}
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730), 0x00000000);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734), gerr);
+static void
+gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
+{
+ gv100_gr_trap_sm(gr, gpc, tpc, 0);
+ gv100_gr_trap_sm(gr, gpc, tpc, 1);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
index 20b6fc8243e0..71524548de32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -58,8 +58,14 @@ nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
h->ecount = nvbios_rd08(b, h->offset + 0x5);
h->base_id = nvbios_rd08(b, h->offset + 0x0f);
- h->boost_id = nvbios_rd08(b, h->offset + 0x10);
- h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ if (h->hlen > 0x10)
+ h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+ else
+ h->boost_id = 0xff;
+ if (h->hlen > 0x11)
+ h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ else
+ h->tdp_id = 0xff;
return 0;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index 007bf4af33b9..16ad91c91a7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -133,8 +133,14 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev)
}
}
- return nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
- &fault->event);
+ ret = nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
+ &fault->event);
+ if (ret)
+ return ret;
+
+ if (fault->func->oneinit)
+ ret = fault->func->oneinit(fault);
+ return ret;
}
static void *
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 73c7728b5969..3cd610d7deb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -176,8 +176,17 @@ gv100_fault_init(struct nvkm_fault *fault)
nvkm_notify_get(&fault->nrpfb);
}
+static int
+gv100_fault_oneinit(struct nvkm_fault *fault)
+{
+ return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
+ gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
+ &fault->nrpfb);
+}
+
static const struct nvkm_fault_func
gv100_fault = {
+ .oneinit = gv100_fault_oneinit,
.init = gv100_fault_init,
.fini = gv100_fault_fini,
.intr = gv100_fault_intr,
@@ -192,15 +201,5 @@ int
gv100_fault_new(struct nvkm_device *device, int index,
struct nvkm_fault **pfault)
{
- struct nvkm_fault *fault;
- int ret;
-
- ret = nvkm_fault_new_(&gv100_fault, device, index, &fault);
- *pfault = fault;
- if (ret)
- return ret;
-
- return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
- gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
- &fault->nrpfb);
+ return nvkm_fault_new_(&gv100_fault, device, index, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index 44843ecf12b0..e4d2f5234fd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -20,6 +20,7 @@ int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
int index, struct nvkm_fault **);
struct nvkm_fault_func {
+ int (*oneinit)(struct nvkm_fault *);
void (*init)(struct nvkm_fault *);
void (*fini)(struct nvkm_fault *);
void (*intr)(struct nvkm_fault *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.
+++ /dev/null
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index a721354249ce..d02e183717dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -414,6 +414,20 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
{
struct ls_ucode_img *_img;
u32 pos = 0;
+ u32 max_desc_size = 0;
+ u8 *gdesc;
+
+ /* Figure out how large we need gdesc to be. */
+ list_for_each_entry(_img, imgs, node) {
+ const struct acr_r352_ls_func *ls_func =
+ acr->func->ls_func[_img->falcon_id];
+
+ max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
+ }
+
+ gdesc = kmalloc(max_desc_size, GFP_KERNEL);
+ if (!gdesc)
+ return -ENOMEM;
nvkm_kmap(wpr_blob);
@@ -421,7 +435,6 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
const struct acr_r352_ls_func *ls_func =
acr->func->ls_func[_img->falcon_id];
- u8 gdesc[ls_func->bl_desc_size];
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -447,6 +460,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
nvkm_done(wpr_blob);
+ kfree(gdesc);
+
return 0;
}
@@ -771,7 +786,11 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
struct fw_bl_desc *hsbl_desc;
void *bl, *blob_data, *hsbl_code, *hsbl_data;
u32 code_size;
- u8 bl_desc[bl_desc_size];
+ u8 *bl_desc;
+
+ bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
+ if (!bl_desc)
+ return -ENOMEM;
/* Find the bootloader descriptor for our blob and copy it */
if (blob == acr->load_blob) {
@@ -802,7 +821,6 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
code_size, hsbl_desc->start_tag, 0, false);
/* Generate the BL header */
- memset(bl_desc, 0, bl_desc_size);
acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
/*
@@ -811,6 +829,7 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
bl_desc_size, 0);
+ kfree(bl_desc);
return hsbl_desc->start_tag << 8;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
index 866877b88797..978ad0790367 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -265,6 +265,19 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
{
struct ls_ucode_img *_img;
u32 pos = 0;
+ u32 max_desc_size = 0;
+ u8 *gdesc;
+
+ list_for_each_entry(_img, imgs, node) {
+ const struct acr_r352_ls_func *ls_func =
+ acr->func->ls_func[_img->falcon_id];
+
+ max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
+ }
+
+ gdesc = kmalloc(max_desc_size, GFP_KERNEL);
+ if (!gdesc)
+ return -ENOMEM;
nvkm_kmap(wpr_blob);
@@ -272,7 +285,6 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
const struct acr_r352_ls_func *ls_func =
acr->func->ls_func[_img->falcon_id];
- u8 gdesc[ls_func->bl_desc_size];
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -298,6 +310,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
nvkm_done(wpr_blob);
+ kfree(gdesc);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 30491d132d59..df8b919dcf09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -129,6 +129,7 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
return 0;
}
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
@@ -144,3 +145,4 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
index 632e9545e292..28ca29d0eeee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
@@ -74,6 +74,7 @@ gp10b_secboot_new(struct nvkm_device *device, int index,
return 0;
}
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
@@ -91,3 +92,4 @@ MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 92fe125ce22e..f34c06bb5bd7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -4,7 +4,7 @@
* Copyright (C) 2010 Nokia Corporation
*
* Original Driver Author: Imre Deak <imre.deak@nokia.com>
- * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@ti.com>
* Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index b5d8a00df811..a1f1dc18407a 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -2,7 +2,7 @@
* Toppoly TD028TTEC1 panel support
*
* Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Neo 1973 code (jbt6k74.c):
* Copyright (C) 2006-2007 by OpenMoko, Inc.
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index acef7ece5783..07d00a186f15 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
@@ -82,7 +82,7 @@ static void __exit omap_dss_exit(void)
module_init(omap_dss_init);
module_exit(omap_dss_exit);
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 7f3ac6b13b56..84f274c4a4cb 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 424143128cd4..9e7fcbd57e52 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 3d662e6805eb..9fcc50217133 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index d4a680629825..74467b308721 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 0b908e9de792..cb80ddaa19d2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 847c78ade024..38302631b64b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
@@ -180,6 +180,9 @@ struct dss_pll_hw {
/* DRA7 errata i886: use high N & M to avoid jitter */
bool errata_i886;
+
+ /* DRA7 errata i932: retry pll lock on failure */
+ bool errata_i932;
};
struct dss_pll {
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 078b0e8216c3..ff362b38bf0d 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -16,6 +16,7 @@
#define DSS_SUBSYS_NAME "PLL"
+#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -381,6 +382,22 @@ static int dss_wait_hsdiv_ack(struct dss_pll *pll, u32 hsdiv_ack_mask)
return -ETIMEDOUT;
}
+static bool pll_is_locked(u32 stat)
+{
+ /*
+ * Required value for each bitfield listed below
+ *
+ * PLL_STATUS[6] = 0 PLL_BYPASS
+ * PLL_STATUS[5] = 0 PLL_HIGHJITTER
+ *
+ * PLL_STATUS[3] = 0 PLL_LOSSREF
+ * PLL_STATUS[2] = 0 PLL_RECAL
+ * PLL_STATUS[1] = 1 PLL_LOCK
+ * PLL_STATUS[0] = 1 PLL_CTRL_RESET_DONE
+ */
+ return ((stat & 0x6f) == 0x3);
+}
+
int dss_pll_write_config_type_a(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo)
{
@@ -436,18 +453,54 @@ int dss_pll_write_config_type_a(struct dss_pll *pll,
l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */
writel_relaxed(l, base + PLL_CONFIGURATION2);
- writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+ if (hw->errata_i932) {
+ int cnt = 0;
+ u32 sleep_time;
+ const u32 max_lock_retries = 20;
- if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
- DSSERR("DSS DPLL GO bit not going down.\n");
- r = -EIO;
- goto err;
- }
+ /*
+ * Calculate wait time for PLL LOCK
+ * 1000 REFCLK cycles in us.
+ */
+ sleep_time = DIV_ROUND_UP(1000*1000*1000, cinfo->fint);
- if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
- DSSERR("cannot lock DSS DPLL\n");
- r = -EIO;
- goto err;
+ for (cnt = 0; cnt < max_lock_retries; cnt++) {
+ writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+
+ /**
+ * read the register back to ensure the write is
+ * flushed
+ */
+ readl_relaxed(base + PLL_GO);
+
+ usleep_range(sleep_time, sleep_time + 5);
+ l = readl_relaxed(base + PLL_STATUS);
+
+ if (pll_is_locked(l) &&
+ !(readl_relaxed(base + PLL_GO) & 0x1))
+ break;
+
+ }
+
+ if (cnt == max_lock_retries) {
+ DSSERR("cannot lock PLL\n");
+ r = -EIO;
+ goto err;
+ }
+ } else {
+ writel_relaxed(1, base + PLL_GO); /* PLL_GO */
+
+ if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
+ DSSERR("DSS DPLL GO bit not going down.\n");
+ r = -EIO;
+ goto err;
+ }
+
+ if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
+ DSSERR("cannot lock DSS DPLL\n");
+ r = -EIO;
+ goto err;
+ }
}
l = readl_relaxed(base + PLL_CONFIGURATION2);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 1e2c931f6acf..69c3b7a3d5c7 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 24d1ced210bd..ac01907dcc34 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* VENC settings from TI's DSS driver
*
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index 585ed94ccf17..cb46311f92c9 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -134,6 +134,7 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.has_refsel = true,
.errata_i886 = true,
+ .errata_i932 = true,
};
struct dss_pll *dss_video_pll_init(struct dss_device *dss,
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 5cde26ac937b..2ddb856666c4 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -126,14 +126,14 @@ static int omap_connector_get_modes(struct drm_connector *connector)
if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
drm_edid_is_valid(edid)) {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
connector, edid);
n = drm_add_edid_modes(connector, edid);
omap_connector->hdmi_mode =
drm_detect_hdmi_monitor(edid);
} else {
- drm_mode_connector_update_edid_property(
+ drm_connector_update_edid_property(
connector, NULL);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index b42e286616b0..91cf043f2b6b 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -30,16 +30,11 @@ static int gem_show(struct seq_file *m, void *arg)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(m, "All Objects:\n");
+ mutex_lock(&priv->list_lock);
omap_gem_describe_objects(&priv->obj_list, m);
-
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&priv->list_lock);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index ef3b0e3571ec..1b6601e9b107 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -274,7 +274,7 @@ static int omap_modeset_init(struct drm_device *dev)
if (IS_ERR(crtc))
return PTR_ERR(crtc);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << crtc_idx);
priv->crtcs[priv->num_crtcs++] = crtc;
@@ -493,7 +493,7 @@ static struct drm_driver omap_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = omap_gem_prime_export,
.gem_prime_import = omap_gem_prime_import,
- .gem_free_object = omap_gem_free_object,
+ .gem_free_object_unlocked = omap_gem_free_object,
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
@@ -540,7 +540,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
- spin_lock_init(&priv->list_lock);
+ mutex_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
/* Allocate and initialize the DRM device. */
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 6eaee4df4559..f27c8e216adf 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -71,7 +71,7 @@ struct omap_drm_private {
struct workqueue_struct *wq;
/* lock for obj_list below */
- spinlock_t list_lock;
+ struct mutex list_lock;
/* list of GEM objects: */
struct list_head obj_list;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 5fd22ca73913..9f1e3d8f8488 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -51,9 +52,6 @@ static const u32 formats[] = {
/* per-plane info for the fb: */
struct plane {
- struct drm_gem_object *bo;
- u32 pitch;
- u32 offset;
dma_addr_t dma_addr;
};
@@ -68,56 +66,28 @@ struct omap_framebuffer {
struct mutex lock;
};
-static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- return drm_gem_handle_create(file_priv,
- omap_fb->planes[0].bo, handle);
-}
-
-static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = fb->format->num_planes;
-
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
-
- drm_gem_object_unreference_unlocked(plane->bo);
- }
-
- kfree(omap_fb);
-}
-
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
- .create_handle = omap_framebuffer_create_handle,
- .destroy = omap_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
-static u32 get_linear_addr(struct plane *plane,
+static u32 get_linear_addr(struct drm_framebuffer *fb,
const struct drm_format_info *format, int n, int x, int y)
{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ struct plane *plane = &omap_fb->planes[n];
u32 offset;
- offset = plane->offset
+ offset = fb->offsets[n]
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
- + (y * plane->pitch / (n == 0 ? 1 : format->vsub));
+ + (y * fb->pitches[n] / (n == 0 ? 1 : format->vsub));
return plane->dma_addr + offset;
}
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- struct plane *plane = &omap_fb->planes[0];
-
- return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -176,7 +146,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -201,12 +171,12 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x += w - 1;
/* Note: x and y are in TILER units, not pixels */
- omap_gem_rotated_dma_addr(plane->bo, orient, x, y,
+ omap_gem_rotated_dma_addr(fb->obj[0], orient, x, y,
&info->paddr);
info->rotation_type = OMAP_DSS_ROT_TILER;
info->rotation = state->rotation ?: DRM_MODE_ROTATE_0;
/* Note: stride in TILER units, not pixels */
- info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
+ info->screen_width = omap_gem_tiled_stride(fb->obj[0], orient);
} else {
switch (state->rotation & DRM_MODE_ROTATE_MASK) {
case 0:
@@ -221,10 +191,10 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
break;
}
- info->paddr = get_linear_addr(plane, format, 0, x, y);
+ info->paddr = get_linear_addr(fb, format, 0, x, y);
info->rotation_type = OMAP_DSS_ROT_NONE;
info->rotation = DRM_MODE_ROTATE_0;
- info->screen_width = plane->pitch;
+ info->screen_width = fb->pitches[0];
}
/* convert to pixels: */
@@ -234,11 +204,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
plane = &omap_fb->planes[1];
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
- omap_gem_rotated_dma_addr(plane->bo, orient, x/2, y/2,
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
- info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
+ info->p_uv_addr = get_linear_addr(fb, format, 1, x, y);
}
} else {
info->p_uv_addr = 0;
@@ -261,10 +231,10 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_pin(plane->bo, &plane->dma_addr);
+ ret = omap_gem_pin(fb->obj[i], &plane->dma_addr);
if (ret)
goto fail;
- omap_gem_dma_sync_buffer(plane->bo, DMA_TO_DEVICE);
+ omap_gem_dma_sync_buffer(fb->obj[i], DMA_TO_DEVICE);
}
omap_fb->pin_count++;
@@ -276,7 +246,7 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
fail:
for (i--; i >= 0; i--) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_unpin(plane->bo);
+ omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
@@ -302,54 +272,25 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_unpin(plane->bo);
+ omap_gem_unpin(fb->obj[i]);
plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
}
-/* iterate thru all the connectors, returning ones that are attached
- * to the same fb..
- */
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from)
-{
- struct drm_device *dev = fb->dev;
- struct list_head *connector_list = &dev->mode_config.connector_list;
- struct drm_connector *connector = from;
-
- if (!from)
- return list_first_entry_or_null(connector_list, typeof(*from),
- head);
-
- list_for_each_entry_from(connector, connector_list, head) {
- if (connector != from) {
- struct drm_encoder *encoder = connector->encoder;
- struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
- if (crtc && crtc->primary->fb == fb)
- return connector;
-
- }
- }
-
- return NULL;
-}
-
#ifdef CONFIG_DEBUG_FS
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->format->format);
for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
- i, plane->offset, plane->pitch);
- omap_gem_describe(plane->bo, m);
+ i, fb->offsets[n], fb->pitches[i]);
+ omap_gem_describe(fb->obj[i], m);
}
}
#endif
@@ -454,9 +395,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
goto fail;
}
- plane->bo = bos[i];
- plane->offset = mode_cmd->offsets[i];
- plane->pitch = pitch;
+ fb->obj[i] = bos[i];
plane->dma_addr = 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index 94ad5f9e4404..c20cb4bc714d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -38,8 +38,6 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct drm_plane_state *state, struct omap_overlay_info *info);
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 0f66c74a54b0..d958cc813a94 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -170,13 +170,11 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
goto fail;
}
- mutex_lock(&dev->struct_mutex);
-
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
- goto fail_unlock;
+ goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
@@ -212,12 +210,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
- mutex_unlock(&dev->struct_mutex);
-
return 0;
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
fail:
if (ret) {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 17a53d207978..4ba5d035c590 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -47,6 +47,9 @@ struct omap_gem_object {
/** roll applied when mapping to DMM */
u32 roll;
+ /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
+ struct mutex lock;
+
/**
* dma_addr contains the buffer DMA address. It is valid for
*
@@ -137,14 +140,12 @@ struct omap_drm_usergart {
*/
/** get mmap offset */
-static u64 mmap_offset(struct drm_gem_object *obj)
+u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
@@ -156,7 +157,7 @@ static u64 mmap_offset(struct drm_gem_object *obj)
return drm_vma_node_offset_addr(&obj->vma_node);
}
-static bool is_contiguous(struct omap_gem_object *omap_obj)
+static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{
if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
return true;
@@ -171,14 +172,14 @@ static bool is_contiguous(struct omap_gem_object *omap_obj)
* Eviction
*/
-static void evict_entry(struct drm_gem_object *obj,
+static void omap_gem_evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
int n = priv->usergart[fmt].height;
size_t size = PAGE_SIZE * n;
- loff_t off = mmap_offset(obj) +
+ loff_t off = omap_gem_mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
@@ -199,7 +200,7 @@ static void evict_entry(struct drm_gem_object *obj,
}
/* Evict a buffer from usergart, if it is mapped there */
-static void evict(struct drm_gem_object *obj)
+static void omap_gem_evict(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
@@ -213,7 +214,7 @@ static void evict(struct drm_gem_object *obj)
&priv->usergart[fmt].entry[i];
if (entry->obj == obj)
- evict_entry(obj, fmt, entry);
+ omap_gem_evict_entry(obj, fmt, entry);
}
}
}
@@ -222,7 +223,10 @@ static void evict(struct drm_gem_object *obj)
* Page Management
*/
-/** ensure backing pages are allocated */
+/*
+ * Ensure backing pages are allocated. Must be called with the omap_obj.lock
+ * held.
+ */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -232,7 +236,14 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
int i, ret;
dma_addr_t *addrs;
- WARN_ON(omap_obj->pages);
+ lockdep_assert_held(&omap_obj->lock);
+
+ /*
+ * If not using shmem (in which case backing pages don't need to be
+ * allocated) or if pages are already allocated we're done.
+ */
+ if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
+ return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
@@ -288,35 +299,15 @@ free_pages:
return ret;
}
-/* acquire pages when needed (for example, for DMA where physically
- * contiguous buffer is not required
- */
-static int get_pages(struct drm_gem_object *obj, struct page ***pages)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
-
- if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
- ret = omap_gem_attach_pages(obj);
- if (ret) {
- dev_err(obj->dev->dev, "could not attach pages\n");
- return ret;
- }
- }
-
- /* TODO: even phys-contig.. we should have a list of pages? */
- *pages = omap_obj->pages;
-
- return 0;
-}
-
-/** release backing pages */
+/* Release backing pages. Must be called with the omap_obj.lock held. */
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
unsigned int npages = obj->size >> PAGE_SHIFT;
unsigned int i;
+ lockdep_assert_held(&omap_obj->lock);
+
for (i = 0; i < npages; i++) {
if (omap_obj->dma_addrs[i])
dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
@@ -336,16 +327,6 @@ u32 omap_gem_flags(struct drm_gem_object *obj)
return to_omap_bo(obj)->flags;
}
-u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
-{
- u64 offset;
-
- mutex_lock(&obj->dev->struct_mutex);
- offset = mmap_offset(obj);
- mutex_unlock(&obj->dev->struct_mutex);
- return offset;
-}
-
/** get mmap size */
size_t omap_gem_mmap_size(struct drm_gem_object *obj)
{
@@ -371,7 +352,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
*/
/* Normal handling for the case of faulting in non-tiled buffers */
-static int fault_1d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -385,18 +366,19 @@ static int fault_1d(struct drm_gem_object *obj,
omap_gem_cpu_sync_page(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
- BUG_ON(!is_contiguous(omap_obj));
+ BUG_ON(!omap_gem_is_contiguous(omap_obj));
pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
}
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vmf_insert_mixed(vma, vmf->address,
+ __pfn_to_pfn_t(pfn, PFN_DEV));
}
/* Special handling for the case of faulting in 2d tiled buffers */
-static int fault_2d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -407,7 +389,8 @@ static int fault_2d(struct drm_gem_object *obj,
unsigned long pfn;
pgoff_t pgoff, base_pgoff;
unsigned long vaddr;
- int i, ret, slots;
+ int i, err, slots;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
/*
* Note the height of the slot is also equal to the number of pages
@@ -443,7 +426,7 @@ static int fault_2d(struct drm_gem_object *obj,
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
- evict_entry(entry->obj, fmt, entry);
+ omap_gem_evict_entry(entry->obj, fmt, entry);
entry->obj = obj;
entry->obj_pgoff = base_pgoff;
@@ -473,9 +456,10 @@ static int fault_2d(struct drm_gem_object *obj,
memset(pages + slots, 0,
sizeof(struct page *) * (n - slots));
- ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
- if (ret) {
- dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+ err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+ if (err) {
+ ret = vmf_error(err);
+ dev_err(obj->dev->dev, "failed to pin: %d\n", err);
return ret;
}
@@ -485,7 +469,10 @@ static int fault_2d(struct drm_gem_object *obj,
pfn, pfn << PAGE_SHIFT);
for (i = n; i > 0; i--) {
- vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_mixed(vma,
+ vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ if (ret & VM_FAULT_ERROR)
+ break;
pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
@@ -494,7 +481,7 @@ static int fault_2d(struct drm_gem_object *obj,
priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
% NUM_USERGART_ENTRIES;
- return 0;
+ return ret;
}
/**
@@ -509,24 +496,25 @@ static int fault_2d(struct drm_gem_object *obj,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-int omap_gem_fault(struct vm_fault *vmf)
+vm_fault_t omap_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- struct drm_device *dev = obj->dev;
- struct page **pages;
- int ret;
+ int err;
+ vm_fault_t ret;
/* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet
*/
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
/* if a shmem backed object, make sure we have pages attached now */
- ret = get_pages(obj, &pages);
- if (ret)
+ err = omap_gem_attach_pages(obj);
+ if (err) {
+ ret = vmf_error(err);
goto fail;
+ }
/* where should we do corresponding put_pages().. we are mapping
* the original page, rather than thru a GART, so we can't rely
@@ -535,28 +523,14 @@ int omap_gem_fault(struct vm_fault *vmf)
*/
if (omap_obj->flags & OMAP_BO_TILED)
- ret = fault_2d(obj, vma, vmf);
+ ret = omap_gem_fault_2d(obj, vma, vmf);
else
- ret = fault_1d(obj, vma, vmf);
+ ret = omap_gem_fault_1d(obj, vma, vmf);
fail:
- mutex_unlock(&dev->struct_mutex);
- switch (ret) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ mutex_unlock(&omap_obj->lock);
+ return ret;
}
/** We override mainly to fix up some of the vm mapping flags.. */
@@ -689,21 +663,22 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
omap_obj->roll = roll;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
/* if we aren't mapped yet, we don't need to do anything */
if (omap_obj->block) {
- struct page **pages;
- ret = get_pages(obj, &pages);
+ ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
- ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+
+ ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
+ roll, true);
if (ret)
dev_err(obj->dev->dev, "could not repin: %d\n", ret);
}
fail:
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
return ret;
}
@@ -722,7 +697,7 @@ fail:
* the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
* unmapped from the CPU.
*/
-static inline bool is_cached_coherent(struct drm_gem_object *obj)
+static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -738,7 +713,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (is_cached_coherent(obj))
+ if (omap_gem_is_cached_coherent(obj))
return;
if (omap_obj->dma_addrs[pgoff]) {
@@ -758,7 +733,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
struct page **pages = omap_obj->pages;
bool dirty = false;
- if (is_cached_coherent(obj))
+ if (omap_gem_is_cached_coherent(obj))
return;
for (i = 0; i < npages; i++) {
@@ -804,18 +779,17 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
- if (!is_contiguous(omap_obj) && priv->has_dmm) {
+ if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->dma_addr_cnt == 0) {
- struct page **pages;
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
BUG_ON(omap_obj->block);
- ret = get_pages(obj, &pages);
+ ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
@@ -835,7 +809,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
}
/* TODO: enable async refill.. */
- ret = tiler_pin(block, pages, npages,
+ ret = tiler_pin(block, omap_obj->pages, npages,
omap_obj->roll, true);
if (ret) {
tiler_release(block);
@@ -853,7 +827,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
omap_obj->dma_addr_cnt++;
*dma_addr = omap_obj->dma_addr;
- } else if (is_contiguous(omap_obj)) {
+ } else if (omap_gem_is_contiguous(omap_obj)) {
*dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
@@ -861,7 +835,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
}
fail:
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
return ret;
}
@@ -879,7 +853,8 @@ void omap_gem_unpin(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
+
if (omap_obj->dma_addr_cnt > 0) {
omap_obj->dma_addr_cnt--;
if (omap_obj->dma_addr_cnt == 0) {
@@ -898,7 +873,7 @@ void omap_gem_unpin(struct drm_gem_object *obj)
}
}
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
}
/* Get rotated scanout address (only valid if already pinned), at the
@@ -911,13 +886,16 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- mutex_lock(&obj->dev->struct_mutex);
+ mutex_lock(&omap_obj->lock);
+
if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
(omap_obj->flags & OMAP_BO_TILED)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
- mutex_unlock(&obj->dev->struct_mutex);
+
+ mutex_unlock(&omap_obj->lock);
+
return ret;
}
@@ -944,17 +922,27 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap)
{
- int ret;
- if (!remap) {
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (!omap_obj->pages)
- return -ENOMEM;
- *pages = omap_obj->pages;
- return 0;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&omap_obj->lock);
+
+ if (remap) {
+ ret = omap_gem_attach_pages(obj);
+ if (ret)
+ goto unlock;
}
- mutex_lock(&obj->dev->struct_mutex);
- ret = get_pages(obj, pages);
- mutex_unlock(&obj->dev->struct_mutex);
+
+ if (!omap_obj->pages) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ *pages = omap_obj->pages;
+
+unlock:
+ mutex_unlock(&omap_obj->lock);
+
return ret;
}
@@ -969,23 +957,34 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
-/* Get kernel virtual address for CPU access.. this more or less only
- * exists for omap_fbdev. This should be called with struct_mutex
- * held.
+/*
+ * Get kernel virtual address for CPU access.. this more or less only
+ * exists for omap_fbdev.
*/
void *omap_gem_vaddr(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ void *vaddr;
+ int ret;
+
+ mutex_lock(&omap_obj->lock);
+
if (!omap_obj->vaddr) {
- struct page **pages;
- int ret = get_pages(obj, &pages);
- if (ret)
- return ERR_PTR(ret);
- omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ ret = omap_gem_attach_pages(obj);
+ if (ret) {
+ vaddr = ERR_PTR(ret);
+ goto unlock;
+ }
+
+ omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}
- return omap_obj->vaddr;
+
+ vaddr = omap_obj->vaddr;
+
+unlock:
+ mutex_unlock(&omap_obj->lock);
+ return vaddr;
}
#endif
@@ -1001,6 +1000,7 @@ int omap_gem_resume(struct drm_device *dev)
struct omap_gem_object *omap_obj;
int ret = 0;
+ mutex_lock(&priv->list_lock);
list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
if (omap_obj->block) {
struct drm_gem_object *obj = &omap_obj->base;
@@ -1012,12 +1012,14 @@ int omap_gem_resume(struct drm_device *dev)
omap_obj->roll, true);
if (ret) {
dev_err(dev->dev, "could not repin: %d\n", ret);
- return ret;
+ goto done;
}
}
}
- return 0;
+done:
+ mutex_unlock(&priv->list_lock);
+ return ret;
}
#endif
@@ -1033,6 +1035,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off = drm_vma_node_start(&obj->vma_node);
+ mutex_lock(&omap_obj->lock);
+
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
@@ -1050,6 +1054,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, " %zu", obj->size);
}
+ mutex_unlock(&omap_obj->lock);
+
seq_printf(m, "\n");
}
@@ -1081,17 +1087,21 @@ void omap_gem_free_object(struct drm_gem_object *obj)
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- evict(obj);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ omap_gem_evict(obj);
- spin_lock(&priv->list_lock);
+ mutex_lock(&priv->list_lock);
list_del(&omap_obj->mm_list);
- spin_unlock(&priv->list_lock);
+ mutex_unlock(&priv->list_lock);
- /* this means the object is still pinned.. which really should
- * not happen. I think..
+ /*
+ * We own the sole reference to the object at this point, but to keep
+ * lockdep happy, we must still take the omap_obj_lock to call
+ * omap_gem_detach_pages(). This should hardly make any difference as
+ * there can't be any lock contention.
*/
+ mutex_lock(&omap_obj->lock);
+
+ /* The object should not be pinned. */
WARN_ON(omap_obj->dma_addr_cnt > 0);
if (omap_obj->pages) {
@@ -1110,8 +1120,12 @@ void omap_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, omap_obj->sgt);
}
+ mutex_unlock(&omap_obj->lock);
+
drm_gem_object_release(obj);
+ mutex_destroy(&omap_obj->lock);
+
kfree(omap_obj);
}
@@ -1167,6 +1181,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
obj = &omap_obj->base;
omap_obj->flags = flags;
+ mutex_init(&omap_obj->lock);
if (flags & OMAP_BO_TILED) {
/*
@@ -1206,9 +1221,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
goto err_release;
}
- spin_lock(&priv->list_lock);
+ mutex_lock(&priv->list_lock);
list_add(&omap_obj->mm_list, &priv->obj_list);
- spin_unlock(&priv->list_lock);
+ mutex_unlock(&priv->list_lock);
return obj;
@@ -1231,16 +1246,15 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
if (sgt->orig_nents != 1 && !priv->has_dmm)
return ERR_PTR(-EINVAL);
- mutex_lock(&dev->struct_mutex);
-
gsize.bytes = PAGE_ALIGN(size);
obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
- if (!obj) {
- obj = ERR_PTR(-ENOMEM);
- goto done;
- }
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
omap_obj = to_omap_bo(obj);
+
+ mutex_lock(&omap_obj->lock);
+
omap_obj->sgt = sgt;
if (sgt->orig_nents == 1) {
@@ -1276,7 +1290,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
}
done:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&omap_obj->lock);
return obj;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index a78bde05193a..c1c45fbde155 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -21,6 +21,7 @@
#define __OMAPDRM_GEM_H__
#include <linux/types.h>
+#include <linux/mm_types.h>
enum dma_data_direction;
@@ -80,7 +81,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
-int omap_gem_fault(struct vm_fault *vmf);
+vm_fault_t omap_gem_fault(struct vm_fault *vmf);
int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 8e41d649e248..ec04a69ade46 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -93,23 +93,6 @@ static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
return 0;
}
-
-static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
- unsigned long page_num)
-{
- struct drm_gem_object *obj = buffer->priv;
- struct page **pages;
- omap_gem_get_pages(obj, &pages, false);
- omap_gem_cpu_sync_page(obj, page_num);
- return kmap_atomic(pages[page_num]);
-}
-
-static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
- unsigned long page_num, void *addr)
-{
- kunmap_atomic(addr);
-}
-
static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
unsigned long page_num)
{
@@ -148,8 +131,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .map_atomic = omap_gem_dmabuf_kmap_atomic,
- .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
.map = omap_gem_dmabuf_kmap,
.unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 25682ff3449a..6020c30a33b3 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -46,6 +46,15 @@ config DRM_PANEL_ILITEK_IL9322
Say Y here if you want to enable support for Ilitek IL9322
QVGA (320x240) RGB, YUV and ITU-T BT.656 panels.
+config DRM_PANEL_ILITEK_ILI9881C
+ tristate "Ilitek ILI9881C-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Ilitek ILI9881c controller.
+
config DRM_PANEL_INNOLUX_P079ZCA
tristate "Innolux P079ZCA panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index f26efc11d746..5ccaaa9d13af 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
+obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
new file mode 100644
index 000000000000..3ad4a46c4e94
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018, Bootlin
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct ili9881c {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct backlight_device *backlight;
+ struct regulator *power;
+ struct gpio_desc *reset;
+};
+
+enum ili9881c_op {
+ ILI9881C_SWITCH_PAGE,
+ ILI9881C_COMMAND,
+};
+
+struct ili9881c_instr {
+ enum ili9881c_op op;
+
+ union arg {
+ struct cmd {
+ u8 cmd;
+ u8 data;
+ } cmd;
+ u8 page;
+ } arg;
+};
+
+#define ILI9881C_SWITCH_PAGE_INSTR(_page) \
+ { \
+ .op = ILI9881C_SWITCH_PAGE, \
+ .arg = { \
+ .page = (_page), \
+ }, \
+ }
+
+#define ILI9881C_COMMAND_INSTR(_cmd, _data) \
+ { \
+ .op = ILI9881C_COMMAND, \
+ .arg = { \
+ .cmd = { \
+ .cmd = (_cmd), \
+ .data = (_data), \
+ }, \
+ }, \
+ }
+
+static const struct ili9881c_instr ili9881c_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x03),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x06),
+ ILI9881C_COMMAND_INSTR(0x07, 0x06),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x18),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x04),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x03),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x25),
+ ILI9881C_COMMAND_INSTR(0x10, 0x25),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xC0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x04),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x02),
+ ILI9881C_COMMAND_INSTR(0x61, 0x02),
+ ILI9881C_COMMAND_INSTR(0x62, 0x02),
+ ILI9881C_COMMAND_INSTR(0x63, 0x02),
+ ILI9881C_COMMAND_INSTR(0x64, 0x02),
+ ILI9881C_COMMAND_INSTR(0x65, 0x02),
+ ILI9881C_COMMAND_INSTR(0x66, 0x02),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x06),
+ ILI9881C_COMMAND_INSTR(0x70, 0x07),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x02),
+ ILI9881C_COMMAND_INSTR(0x76, 0x02),
+ ILI9881C_COMMAND_INSTR(0x77, 0x02),
+ ILI9881C_COMMAND_INSTR(0x78, 0x02),
+ ILI9881C_COMMAND_INSTR(0x79, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x80, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x83, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x84, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x85, 0x06),
+ ILI9881C_COMMAND_INSTR(0x86, 0x07),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x22),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3A, 0xA4),
+ ILI9881C_COMMAND_INSTR(0x8D, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x53, 0xDC),
+ ILI9881C_COMMAND_INSTR(0x55, 0xA7),
+ ILI9881C_COMMAND_INSTR(0x50, 0x78),
+ ILI9881C_COMMAND_INSTR(0x51, 0x78),
+ ILI9881C_COMMAND_INSTR(0x31, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x14),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x39),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x46),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x12),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x25),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x19),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA8, 0xa6),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x85),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x51),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x22),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x2d),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x4f),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x3F),
+ ILI9881C_COMMAND_INSTR(0xC0, 0x2A),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x3a),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x45),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x11),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x24),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xC8, 0xaa),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x96),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x1C),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x1B),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x51),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x22),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x3F),
+};
+
+static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
+{
+ return container_of(panel, struct ili9881c, panel);
+}
+
+/*
+ * The panel seems to accept some private DCS commands that map
+ * directly to registers.
+ *
+ * It is organised by page, with each page having its own set of
+ * registers, and the first page looks like it's holding the standard
+ * DCS commands.
+ *
+ * So before any attempt at sending a command or data, we have to be
+ * sure if we're in the right page or not.
+ */
+static int ili9881c_switch_page(struct ili9881c *ctx, u8 page)
+{
+ u8 buf[4] = { 0xff, 0x98, 0x81, page };
+ int ret;
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ili9881c_send_cmd_data(struct ili9881c *ctx, u8 cmd, u8 data)
+{
+ u8 buf[2] = { cmd, data };
+ int ret;
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ili9881c_prepare(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+ unsigned int i;
+ int ret;
+
+ /* Power the panel */
+ ret = regulator_enable(ctx->power);
+ if (ret)
+ return ret;
+ msleep(5);
+
+ /* And reset it */
+ gpiod_set_value(ctx->reset, 1);
+ msleep(20);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(20);
+
+ for (i = 0; i < ARRAY_SIZE(ili9881c_init); i++) {
+ const struct ili9881c_instr *instr = &ili9881c_init[i];
+
+ if (instr->op == ILI9881C_SWITCH_PAGE)
+ ret = ili9881c_switch_page(ctx, instr->arg.page);
+ else if (instr->op == ILI9881C_COMMAND)
+ ret = ili9881c_send_cmd_data(ctx, instr->arg.cmd.cmd,
+ instr->arg.cmd.data);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = ili9881c_switch_page(ctx, 0);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ili9881c_enable(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ msleep(120);
+
+ mipi_dsi_dcs_set_display_on(ctx->dsi);
+ backlight_enable(ctx->backlight);
+
+ return 0;
+}
+
+static int ili9881c_disable(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int ili9881c_unprepare(struct drm_panel *panel)
+{
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+ mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ regulator_disable(ctx->power);
+ gpiod_set_value(ctx->reset, 1);
+
+ return 0;
+}
+
+static const struct drm_display_mode bananapi_default_mode = {
+ .clock = 62000,
+ .vrefresh = 60,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 10,
+ .hsync_end = 720 + 10 + 20,
+ .htotal = 720 + 10 + 20 + 30,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 10,
+ .vsync_end = 1280 + 10 + 10,
+ .vtotal = 1280 + 10 + 10 + 20,
+};
+
+static int ili9881c_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct ili9881c *ctx = panel_to_ili9881c(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &bananapi_default_mode);
+ if (!mode) {
+ dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ bananapi_default_mode.hdisplay,
+ bananapi_default_mode.vdisplay,
+ bananapi_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ panel->connector->display_info.width_mm = 62;
+ panel->connector->display_info.height_mm = 110;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ili9881c_funcs = {
+ .prepare = ili9881c_prepare,
+ .unprepare = ili9881c_unprepare,
+ .enable = ili9881c_enable,
+ .disable = ili9881c_disable,
+ .get_modes = ili9881c_get_modes,
+};
+
+static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct device_node *np;
+ struct ili9881c *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &dsi->dev;
+ ctx->panel.funcs = &ili9881c_funcs;
+
+ ctx->power = devm_regulator_get(&dsi->dev, "power");
+ if (IS_ERR(ctx->power)) {
+ dev_err(&dsi->dev, "Couldn't get our power regulator\n");
+ return PTR_ERR(ctx->power);
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ np = of_parse_phandle(dsi->dev.of_node, "backlight", 0);
+ if (np) {
+ ctx->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!ctx->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct ili9881c *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ if (ctx->backlight)
+ put_device(&ctx->backlight->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ili9881c_of_match[] = {
+ { .compatible = "bananapi,lhr050h41" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9881c_of_match);
+
+static struct mipi_dsi_driver ili9881c_dsi_driver = {
+ .probe = ili9881c_dsi_probe,
+ .remove = ili9881c_dsi_remove,
+ .driver = {
+ .name = "ili9881c-dsi",
+ .of_match_table = ili9881c_of_match,
+ },
+};
+module_mipi_dsi_driver(ili9881c_dsi_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Ilitek ILI9881C Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 57df39b5c589..72edb334d997 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drmP.h>
@@ -20,12 +21,41 @@
#include <video/mipi_display.h>
+struct panel_init_cmd {
+ size_t len;
+ const char *data;
+};
+
+#define _INIT_CMD(...) { \
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+struct panel_desc {
+ const struct drm_display_mode *mode;
+ unsigned int bpc;
+ struct {
+ unsigned int width;
+ unsigned int height;
+ } size;
+
+ unsigned long flags;
+ enum mipi_dsi_pixel_format format;
+ const struct panel_init_cmd *init_cmds;
+ unsigned int lanes;
+ const char * const *supply_names;
+ unsigned int num_supplies;
+ unsigned int sleep_mode_delay;
+ unsigned int power_down_delay;
+};
+
struct innolux_panel {
struct drm_panel base;
struct mipi_dsi_device *link;
+ const struct panel_desc *desc;
struct backlight_device *backlight;
- struct regulator *supply;
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
struct gpio_desc *enable_gpio;
bool prepared;
@@ -72,12 +102,16 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
return err;
}
+ if (innolux->desc->sleep_mode_delay)
+ msleep(innolux->desc->sleep_mode_delay);
+
gpiod_set_value_cansleep(innolux->enable_gpio, 0);
- /* T8: 80ms - 1000ms */
- msleep(80);
+ if (innolux->desc->power_down_delay)
+ msleep(innolux->desc->power_down_delay);
- err = regulator_disable(innolux->supply);
+ err = regulator_bulk_disable(innolux->desc->num_supplies,
+ innolux->supplies);
if (err < 0)
return err;
@@ -89,24 +123,55 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
static int innolux_panel_prepare(struct drm_panel *panel)
{
struct innolux_panel *innolux = to_innolux_panel(panel);
- int err, regulator_err;
+ int err;
if (innolux->prepared)
return 0;
gpiod_set_value_cansleep(innolux->enable_gpio, 0);
- err = regulator_enable(innolux->supply);
+ err = regulator_bulk_enable(innolux->desc->num_supplies,
+ innolux->supplies);
if (err < 0)
return err;
- /* T2: 15ms - 1000ms */
- usleep_range(15000, 16000);
+ /* p079zca: t2 (20ms), p097pfg: t4 (15ms) */
+ usleep_range(20000, 21000);
gpiod_set_value_cansleep(innolux->enable_gpio, 1);
- /* T4: 15ms - 1000ms */
- usleep_range(15000, 16000);
+ /* p079zca: t4, p097pfg: t5 */
+ usleep_range(20000, 21000);
+
+ if (innolux->desc->init_cmds) {
+ const struct panel_init_cmd *cmds =
+ innolux->desc->init_cmds;
+ unsigned int i;
+
+ for (i = 0; cmds[i].len != 0; i++) {
+ const struct panel_init_cmd *cmd = &cmds[i];
+
+ err = mipi_dsi_generic_write(innolux->link, cmd->data,
+ cmd->len);
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to write command %u\n", i);
+ goto poweroff;
+ }
+
+ /*
+ * Included by random guessing, because without this
+ * (or at least, some delay), the panel sometimes
+ * didn't appear to pick up the command sequence.
+ */
+ err = mipi_dsi_dcs_nop(innolux->link);
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to send DCS nop: %d\n", err);
+ goto poweroff;
+ }
+ }
+ }
err = mipi_dsi_dcs_exit_sleep_mode(innolux->link);
if (err < 0) {
@@ -133,12 +198,9 @@ static int innolux_panel_prepare(struct drm_panel *panel)
return 0;
poweroff:
- regulator_err = regulator_disable(innolux->supply);
- if (regulator_err)
- DRM_DEV_ERROR(panel->dev, "failed to disable regulator: %d\n",
- regulator_err);
-
gpiod_set_value_cansleep(innolux->enable_gpio, 0);
+ regulator_bulk_disable(innolux->desc->num_supplies, innolux->supplies);
+
return err;
}
@@ -162,7 +224,11 @@ static int innolux_panel_enable(struct drm_panel *panel)
return 0;
}
-static const struct drm_display_mode default_mode = {
+static const char * const innolux_p079zca_supply_names[] = {
+ "power",
+};
+
+static const struct drm_display_mode innolux_p079zca_mode = {
.clock = 56900,
.hdisplay = 768,
.hsync_start = 768 + 40,
@@ -175,15 +241,181 @@ static const struct drm_display_mode default_mode = {
.vrefresh = 60,
};
+static const struct panel_desc innolux_p079zca_panel_desc = {
+ .mode = &innolux_p079zca_mode,
+ .bpc = 8,
+ .size = {
+ .width = 120,
+ .height = 160,
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .supply_names = innolux_p079zca_supply_names,
+ .num_supplies = ARRAY_SIZE(innolux_p079zca_supply_names),
+ .power_down_delay = 80, /* T8: 80ms - 1000ms */
+};
+
+static const char * const innolux_p097pfg_supply_names[] = {
+ "avdd",
+ "avee",
+};
+
+static const struct drm_display_mode innolux_p097pfg_mode = {
+ .clock = 229000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 100,
+ .hsync_end = 1536 + 100 + 24,
+ .htotal = 1536 + 100 + 24 + 100,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 100,
+ .vsync_end = 2048 + 100 + 2,
+ .vtotal = 2048 + 100 + 2 + 18,
+ .vrefresh = 60,
+};
+
+/*
+ * Display manufacturer failed to provide init sequencing according to
+ * https://chromium-review.googlesource.com/c/chromiumos/third_party/coreboot/+/892065/
+ * so the init sequence stems from a register dump of a working panel.
+ */
+static const struct panel_init_cmd innolux_p097pfg_init_cmds[] = {
+ /* page 0 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x00),
+ _INIT_CMD(0xB1, 0xE8, 0x11),
+ _INIT_CMD(0xB2, 0x25, 0x02),
+ _INIT_CMD(0xB5, 0x08, 0x00),
+ _INIT_CMD(0xBC, 0x0F, 0x00),
+ _INIT_CMD(0xB8, 0x03, 0x06, 0x00, 0x00),
+ _INIT_CMD(0xBD, 0x01, 0x90, 0x14, 0x14),
+ _INIT_CMD(0x6F, 0x01),
+ _INIT_CMD(0xC0, 0x03),
+ _INIT_CMD(0x6F, 0x02),
+ _INIT_CMD(0xC1, 0x0D),
+ _INIT_CMD(0xD9, 0x01, 0x09, 0x70),
+ _INIT_CMD(0xC5, 0x12, 0x21, 0x00),
+ _INIT_CMD(0xBB, 0x93, 0x93),
+
+ /* page 1 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x01),
+ _INIT_CMD(0xB3, 0x3C, 0x3C),
+ _INIT_CMD(0xB4, 0x0F, 0x0F),
+ _INIT_CMD(0xB9, 0x45, 0x45),
+ _INIT_CMD(0xBA, 0x14, 0x14),
+ _INIT_CMD(0xCA, 0x02),
+ _INIT_CMD(0xCE, 0x04),
+ _INIT_CMD(0xC3, 0x9B, 0x9B),
+ _INIT_CMD(0xD8, 0xC0, 0x03),
+ _INIT_CMD(0xBC, 0x82, 0x01),
+ _INIT_CMD(0xBD, 0x9E, 0x01),
+
+ /* page 2 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x02),
+ _INIT_CMD(0xB0, 0x82),
+ _INIT_CMD(0xD1, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x82, 0x00, 0xA5,
+ 0x00, 0xC1, 0x00, 0xEA, 0x01, 0x0D, 0x01, 0x40),
+ _INIT_CMD(0xD2, 0x01, 0x6A, 0x01, 0xA8, 0x01, 0xDC, 0x02, 0x29,
+ 0x02, 0x67, 0x02, 0x68, 0x02, 0xA8, 0x02, 0xF0),
+ _INIT_CMD(0xD3, 0x03, 0x19, 0x03, 0x49, 0x03, 0x67, 0x03, 0x8C,
+ 0x03, 0xA6, 0x03, 0xC7, 0x03, 0xDE, 0x03, 0xEC),
+ _INIT_CMD(0xD4, 0x03, 0xFF, 0x03, 0xFF),
+ _INIT_CMD(0xE0, 0x00, 0x00, 0x00, 0x86, 0x00, 0xC5, 0x00, 0xE5,
+ 0x00, 0xFF, 0x01, 0x26, 0x01, 0x45, 0x01, 0x75),
+ _INIT_CMD(0xE1, 0x01, 0x9C, 0x01, 0xD5, 0x02, 0x05, 0x02, 0x4D,
+ 0x02, 0x86, 0x02, 0x87, 0x02, 0xC3, 0x03, 0x03),
+ _INIT_CMD(0xE2, 0x03, 0x2A, 0x03, 0x56, 0x03, 0x72, 0x03, 0x94,
+ 0x03, 0xAC, 0x03, 0xCB, 0x03, 0xE0, 0x03, 0xED),
+ _INIT_CMD(0xE3, 0x03, 0xFF, 0x03, 0xFF),
+
+ /* page 3 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x03),
+ _INIT_CMD(0xB0, 0x00, 0x00, 0x00, 0x00),
+ _INIT_CMD(0xB1, 0x00, 0x00, 0x00, 0x00),
+ _INIT_CMD(0xB2, 0x00, 0x00, 0x06, 0x04, 0x01, 0x40, 0x85),
+ _INIT_CMD(0xB3, 0x10, 0x07, 0xFC, 0x04, 0x01, 0x40, 0x80),
+ _INIT_CMD(0xB6, 0xF0, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01,
+ 0x40, 0x80),
+ _INIT_CMD(0xBA, 0xC5, 0x07, 0x00, 0x04, 0x11, 0x25, 0x8C),
+ _INIT_CMD(0xBB, 0xC5, 0x07, 0x00, 0x03, 0x11, 0x25, 0x8C),
+ _INIT_CMD(0xC0, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80),
+ _INIT_CMD(0xC1, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80),
+ _INIT_CMD(0xC4, 0x00, 0x00),
+ _INIT_CMD(0xEF, 0x41),
+
+ /* page 4 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x04),
+ _INIT_CMD(0xEC, 0x4C),
+
+ /* page 5 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x05),
+ _INIT_CMD(0xB0, 0x13, 0x03, 0x03, 0x01),
+ _INIT_CMD(0xB1, 0x30, 0x00),
+ _INIT_CMD(0xB2, 0x02, 0x02, 0x00),
+ _INIT_CMD(0xB3, 0x82, 0x23, 0x82, 0x9D),
+ _INIT_CMD(0xB4, 0xC5, 0x75, 0x24, 0x57),
+ _INIT_CMD(0xB5, 0x00, 0xD4, 0x72, 0x11, 0x11, 0xAB, 0x0A),
+ _INIT_CMD(0xB6, 0x00, 0x00, 0xD5, 0x72, 0x24, 0x56),
+ _INIT_CMD(0xB7, 0x5C, 0xDC, 0x5C, 0x5C),
+ _INIT_CMD(0xB9, 0x0C, 0x00, 0x00, 0x01, 0x00),
+ _INIT_CMD(0xC0, 0x75, 0x11, 0x11, 0x54, 0x05),
+ _INIT_CMD(0xC6, 0x00, 0x00, 0x00, 0x00),
+ _INIT_CMD(0xD0, 0x00, 0x48, 0x08, 0x00, 0x00),
+ _INIT_CMD(0xD1, 0x00, 0x48, 0x09, 0x00, 0x00),
+
+ /* page 6 */
+ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x06),
+ _INIT_CMD(0xB0, 0x02, 0x32, 0x32, 0x08, 0x2F),
+ _INIT_CMD(0xB1, 0x2E, 0x15, 0x14, 0x13, 0x12),
+ _INIT_CMD(0xB2, 0x11, 0x10, 0x00, 0x3D, 0x3D),
+ _INIT_CMD(0xB3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xB4, 0x3D, 0x32),
+ _INIT_CMD(0xB5, 0x03, 0x32, 0x32, 0x09, 0x2F),
+ _INIT_CMD(0xB6, 0x2E, 0x1B, 0x1A, 0x19, 0x18),
+ _INIT_CMD(0xB7, 0x17, 0x16, 0x01, 0x3D, 0x3D),
+ _INIT_CMD(0xB8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xB9, 0x3D, 0x32),
+ _INIT_CMD(0xC0, 0x01, 0x32, 0x32, 0x09, 0x2F),
+ _INIT_CMD(0xC1, 0x2E, 0x1A, 0x1B, 0x16, 0x17),
+ _INIT_CMD(0xC2, 0x18, 0x19, 0x03, 0x3D, 0x3D),
+ _INIT_CMD(0xC3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xC4, 0x3D, 0x32),
+ _INIT_CMD(0xC5, 0x00, 0x32, 0x32, 0x08, 0x2F),
+ _INIT_CMD(0xC6, 0x2E, 0x14, 0x15, 0x10, 0x11),
+ _INIT_CMD(0xC7, 0x12, 0x13, 0x02, 0x3D, 0x3D),
+ _INIT_CMD(0xC8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+ _INIT_CMD(0xC9, 0x3D, 0x32),
+
+ {},
+};
+
+static const struct panel_desc innolux_p097pfg_panel_desc = {
+ .mode = &innolux_p097pfg_mode,
+ .bpc = 8,
+ .size = {
+ .width = 147,
+ .height = 196,
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_cmds = innolux_p097pfg_init_cmds,
+ .lanes = 4,
+ .supply_names = innolux_p097pfg_supply_names,
+ .num_supplies = ARRAY_SIZE(innolux_p097pfg_supply_names),
+ .sleep_mode_delay = 100, /* T15 */
+};
+
static int innolux_panel_get_modes(struct drm_panel *panel)
{
+ struct innolux_panel *innolux = to_innolux_panel(panel);
+ const struct drm_display_mode *m = innolux->desc->mode;
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(panel->drm, m);
if (!mode) {
DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ m->hdisplay, m->vdisplay, m->vrefresh);
return -ENOMEM;
}
@@ -191,9 +423,11 @@ static int innolux_panel_get_modes(struct drm_panel *panel)
drm_mode_probed_add(panel->connector, mode);
- panel->connector->display_info.width_mm = 120;
- panel->connector->display_info.height_mm = 160;
- panel->connector->display_info.bpc = 8;
+ panel->connector->display_info.width_mm =
+ innolux->desc->size.width;
+ panel->connector->display_info.height_mm =
+ innolux->desc->size.height;
+ panel->connector->display_info.bpc = innolux->desc->bpc;
return 1;
}
@@ -207,19 +441,42 @@ static const struct drm_panel_funcs innolux_panel_funcs = {
};
static const struct of_device_id innolux_of_match[] = {
- { .compatible = "innolux,p079zca", },
+ { .compatible = "innolux,p079zca",
+ .data = &innolux_p079zca_panel_desc
+ },
+ { .compatible = "innolux,p097pfg",
+ .data = &innolux_p097pfg_panel_desc
+ },
{ }
};
MODULE_DEVICE_TABLE(of, innolux_of_match);
-static int innolux_panel_add(struct innolux_panel *innolux)
+static int innolux_panel_add(struct mipi_dsi_device *dsi,
+ const struct panel_desc *desc)
{
- struct device *dev = &innolux->link->dev;
- int err;
+ struct innolux_panel *innolux;
+ struct device *dev = &dsi->dev;
+ int err, i;
+
+ innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL);
+ if (!innolux)
+ return -ENOMEM;
+
+ innolux->desc = desc;
+
+ innolux->supplies = devm_kcalloc(dev, desc->num_supplies,
+ sizeof(*innolux->supplies),
+ GFP_KERNEL);
+ if (!innolux->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < desc->num_supplies; i++)
+ innolux->supplies[i].supply = desc->supply_names[i];
- innolux->supply = devm_regulator_get(dev, "power");
- if (IS_ERR(innolux->supply))
- return PTR_ERR(innolux->supply);
+ err = devm_regulator_bulk_get(dev, desc->num_supplies,
+ innolux->supplies);
+ if (err < 0)
+ return err;
innolux->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
@@ -230,15 +487,21 @@ static int innolux_panel_add(struct innolux_panel *innolux)
}
innolux->backlight = devm_of_find_backlight(dev);
-
if (IS_ERR(innolux->backlight))
return PTR_ERR(innolux->backlight);
drm_panel_init(&innolux->base);
innolux->base.funcs = &innolux_panel_funcs;
- innolux->base.dev = &innolux->link->dev;
+ innolux->base.dev = dev;
+
+ err = drm_panel_add(&innolux->base);
+ if (err < 0)
+ return err;
+
+ mipi_dsi_set_drvdata(dsi, innolux);
+ innolux->link = dsi;
- return drm_panel_add(&innolux->base);
+ return 0;
}
static void innolux_panel_del(struct innolux_panel *innolux)
@@ -249,28 +512,19 @@ static void innolux_panel_del(struct innolux_panel *innolux)
static int innolux_panel_probe(struct mipi_dsi_device *dsi)
{
- struct innolux_panel *innolux;
+ const struct panel_desc *desc;
int err;
- dsi->lanes = 4;
- dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_LPM;
-
- innolux = devm_kzalloc(&dsi->dev, sizeof(*innolux), GFP_KERNEL);
- if (!innolux)
- return -ENOMEM;
-
- mipi_dsi_set_drvdata(dsi, innolux);
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->mode_flags = desc->flags;
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
- innolux->link = dsi;
-
- err = innolux_panel_add(innolux);
+ err = innolux_panel_add(dsi, desc);
if (err < 0)
return err;
- err = mipi_dsi_attach(dsi);
- return err;
+ return mipi_dsi_attach(dsi);
}
static int innolux_panel_remove(struct mipi_dsi_device *dsi)
@@ -292,7 +546,6 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
err);
- drm_panel_detach(&innolux->base);
innolux_panel_del(innolux);
return 0;
@@ -318,5 +571,6 @@ static struct mipi_dsi_driver innolux_panel_driver = {
module_mipi_dsi_driver(innolux_panel_driver);
MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
MODULE_DESCRIPTION("Innolux P079ZCA panel driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 0a94ab79a6c0..99caa7835e7b 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -500,7 +500,6 @@ static int jdi_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
ret);
- drm_panel_detach(&jdi->base);
jdi_panel_del(jdi);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 5185819c5b79..8a1687887ae9 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -282,7 +282,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
{
struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
- drm_panel_detach(&lvds->panel);
drm_panel_remove(&lvds->panel);
panel_lvds_disable(&lvds->panel);
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 90f1ae4af93c..87fa316e1d7b 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -14,8 +14,6 @@
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
-#define DRV_NAME "orisetech_otm8009a"
-
#define OTM8009A_BACKLIGHT_DEFAULT 240
#define OTM8009A_BACKLIGHT_MAX 255
@@ -98,6 +96,20 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
DRM_WARN("mipi dsi dcs write buffer failed\n");
}
+static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
+ size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* data will be sent in dsi hs mode (ie. no lpm) */
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ otm8009a_dcs_write_buf(ctx, data, len);
+
+ /* restore back the dsi lpm mode */
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+}
+
#define dcs_write_seq(ctx, seq...) \
({ \
static const u8 d[] = { seq }; \
@@ -248,11 +260,7 @@ static int otm8009a_disable(struct drm_panel *panel)
if (!ctx->enabled)
return 0; /* This is not an issue so we return 0 here */
- /* Power off the backlight. Note: end-user still controls brightness */
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- ret = backlight_update_status(ctx->bl_dev);
- if (ret)
- return ret;
+ backlight_disable(ctx->bl_dev);
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret)
@@ -316,13 +324,6 @@ static int otm8009a_prepare(struct drm_panel *panel)
ctx->prepared = true;
- /*
- * Power on the backlight. Note: end-user still controls brightness
- * Note: ctx->prepared must be true before updating the backlight.
- */
- ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(ctx->bl_dev);
-
return 0;
}
@@ -330,6 +331,11 @@ static int otm8009a_enable(struct drm_panel *panel)
{
struct otm8009a *ctx = panel_to_otm8009a(panel);
+ if (ctx->enabled)
+ return 0;
+
+ backlight_enable(ctx->bl_dev);
+
ctx->enabled = true;
return 0;
@@ -387,7 +393,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
*/
data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
data[1] = bd->props.brightness;
- otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
/* set Brightness Control & Backlight on */
data[1] = 0x24;
@@ -399,7 +405,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
/* Update Brightness Control & Backlight */
data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
- otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
return 0;
}
@@ -444,11 +450,14 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->panel.dev = dev;
ctx->panel.funcs = &otm8009a_drm_funcs;
- ctx->bl_dev = backlight_device_register(DRV_NAME "_backlight", dev, ctx,
- &otm8009a_backlight_ops, NULL);
+ ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
+ dsi->host->dev, ctx,
+ &otm8009a_backlight_ops,
+ NULL);
if (IS_ERR(ctx->bl_dev)) {
- dev_err(dev, "failed to register backlight device\n");
- return PTR_ERR(ctx->bl_dev);
+ ret = PTR_ERR(ctx->bl_dev);
+ dev_err(dev, "failed to register backlight: %d\n", ret);
+ return ret;
}
ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
@@ -466,11 +475,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
return ret;
}
- DRM_INFO(DRV_NAME "_panel %ux%u@%u %ubpp dsi %udl - ready\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh,
- mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
-
return 0;
}
@@ -481,8 +485,6 @@ static int otm8009a_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
- backlight_device_unregister(ctx->bl_dev);
-
return 0;
}
@@ -496,7 +498,7 @@ static struct mipi_dsi_driver orisetech_otm8009a_driver = {
.probe = otm8009a_probe,
.remove = otm8009a_remove,
.driver = {
- .name = DRV_NAME "_panel",
+ .name = "panel-orisetech-otm8009a",
.of_match_table = orisetech_otm8009a_of_match,
},
};
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 74a806121f80..cb4dfb98be0f 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -299,7 +299,6 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
- drm_panel_detach(&wuxga_nt->base);
wuxga_nt_panel_del(wuxga_nt);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index a188a3959f1a..6ad827b93ae1 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -823,7 +823,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
int ret, i;
ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
- if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
+ if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) {
dev_err(ctx->dev, "read id failed\n");
ctx->error = -EIO;
return;
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 71c09ed436ae..75f925390551 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -292,7 +292,6 @@ static int seiko_panel_remove(struct platform_device *pdev)
{
struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
- drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
seiko_panel_disable(&panel->base);
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index 6bf8730f1a21..02fc0f5423d4 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -418,7 +418,6 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
- drm_panel_detach(&sharp->base);
sharp_panel_del(sharp);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 494aa9b1628a..e5cae0050f52 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -327,7 +327,6 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
- drm_panel_detach(&sharp_nt->base);
sharp_nt_panel_del(sharp_nt);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index cbf1ab404ee7..5b5d0a24e713 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -252,7 +252,7 @@ static int panel_simple_get_modes(struct drm_panel *panel)
/* probe EDID if a DDC bus is available */
if (p->ddc) {
struct edid *edid = drm_get_edid(panel->connector, p->ddc);
- drm_mode_connector_update_edid_property(panel->connector, edid);
+ drm_connector_update_edid_property(panel->connector, edid);
if (edid) {
num += drm_add_edid_modes(panel->connector, edid);
kfree(edid);
@@ -364,7 +364,6 @@ static int panel_simple_remove(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
- drm_panel_detach(&panel->base);
drm_panel_remove(&panel->base);
panel_simple_disable(&panel->base);
@@ -581,6 +580,34 @@ static const struct panel_desc auo_b133htn01 = {
},
};
+static const struct display_timing auo_g070vvn01_timings = {
+ .pixelclock = { 33300000, 34209000, 45000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 20, 40, 200 },
+ .hback_porch = { 87, 40, 1 },
+ .hsync_len = { 1, 48, 87 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 5, 13, 200 },
+ .vback_porch = { 31, 31, 29 },
+ .vsync_len = { 1, 1, 3 },
+};
+
+static const struct panel_desc auo_g070vvn01 = {
+ .timings = &auo_g070vvn01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .delay = {
+ .prepare = 200,
+ .enable = 50,
+ .disable = 50,
+ .unprepare = 1000,
+ },
+};
+
static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000,
.hdisplay = 800,
@@ -687,7 +714,7 @@ static const struct panel_desc auo_p320hvn03 = {
.enable = 450,
.unprepare = 500,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -745,6 +772,28 @@ static const struct panel_desc avic_tm070ddh03 = {
},
};
+static const struct drm_display_mode boe_hv070wsa_mode = {
+ .clock = 40800,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 90,
+ .hsync_end = 1024 + 90 + 90,
+ .htotal = 1024 + 90 + 90 + 90,
+ .vdisplay = 600,
+ .vsync_start = 600 + 3,
+ .vsync_end = 600 + 3 + 4,
+ .vtotal = 600 + 3 + 4 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc boe_hv070wsa = {
+ .modes = &boe_hv070wsa_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 154,
+ .height = 90,
+ },
+};
+
static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
{
.clock = 71900,
@@ -857,6 +906,61 @@ static const struct panel_desc chunghwa_claa101wb01 = {
},
};
+static const struct drm_display_mode dataimage_scf0700c48ggu18_mode = {
+ .clock = 33260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 128,
+ .htotal = 800 + 40 + 128 + 88,
+ .vdisplay = 480,
+ .vsync_start = 480 + 10,
+ .vsync_end = 480 + 10 + 2,
+ .vtotal = 480 + 10 + 2 + 33,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc dataimage_scf0700c48ggu18 = {
+ .modes = &dataimage_scf0700c48ggu18_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
+static const struct display_timing dlc_dlc0700yzg_1_timing = {
+ .pixelclock = { 45000000, 51200000, 57000000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 100, 106, 113 },
+ .hback_porch = { 100, 106, 113 },
+ .hsync_len = { 100, 108, 114 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 8, 11, 15 },
+ .vback_porch = { 8, 11, 15 },
+ .vsync_len = { 9, 13, 15 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc dlc_dlc0700yzg_1 = {
+ .timings = &dlc_dlc0700yzg_1_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 30,
+ .enable = 200,
+ .disable = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+};
+
static const struct drm_display_mode edt_et057090dhu_mode = {
.clock = 25175,
.hdisplay = 640,
@@ -909,6 +1013,18 @@ static const struct panel_desc edt_etm0700g0dh6 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
};
+static const struct panel_desc edt_etm0700g0bdh6 = {
+ .modes = &edt_etm0700g0dh6_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
.clock = 32260,
.hdisplay = 800,
@@ -1086,6 +1202,36 @@ static const struct panel_desc innolux_at070tn92 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing innolux_g070y2_l01_timing = {
+ .pixelclock = { 28000000, 29500000, 32000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 61, 91, 141 },
+ .hback_porch = { 60, 90, 140 },
+ .hsync_len = { 12, 12, 12 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 4, 9, 30 },
+ .vback_porch = { 4, 8, 28 },
+ .vsync_len = { 2, 2, 2 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc innolux_g070y2_l01 = {
+ .timings = &innolux_g070y2_l01_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .delay = {
+ .prepare = 10,
+ .enable = 100,
+ .disable = 100,
+ .unprepare = 800,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 },
@@ -1217,6 +1363,30 @@ static const struct panel_desc innolux_n156bge_l21 = {
},
};
+static const struct drm_display_mode innolux_tv123wam_mode = {
+ .clock = 206016,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 80,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 10,
+ .vtotal = 1440 + 3 + 10 + 27,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc innolux_tv123wam = {
+ .modes = &innolux_tv123wam_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+};
+
static const struct drm_display_mode innolux_zj070na_01p_mode = {
.clock = 51501,
.hdisplay = 1024,
@@ -1247,8 +1417,8 @@ static const struct display_timing koe_tx31d200vm0baa_timing = {
.hback_porch = { 16, 36, 56 },
.hsync_len = { 8, 8, 8 },
.vactive = { 480, 480, 480 },
- .vfront_porch = { 6, 21, 33.5 },
- .vback_porch = { 6, 21, 33.5 },
+ .vfront_porch = { 6, 21, 33 },
+ .vback_porch = { 6, 21, 33 },
.vsync_len = { 8, 8, 8 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
@@ -1511,6 +1681,33 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 10,
+ .vtotal = 272 + 2 + 10 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
+ .modes = &newhaven_nhd_43_480272ef_atxl_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
+ DRM_BUS_FLAG_SYNC_POSEDGE,
+};
+
static const struct display_timing nlt_nl192108ac18_02d_timing = {
.pixelclock = { 130000000, 148350000, 163000000 },
.hactive = { 1920, 1920, 1920 },
@@ -1696,6 +1893,36 @@ static const struct panel_desc qd43003c0_40 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing rocktech_rk070er9427_timing = {
+ .pixelclock = { 26400000, 33300000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 1, 1 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc rocktech_rk070er9427 = {
+ .timings = &rocktech_rk070er9427_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 41,
+ .enable = 50,
+ .unprepare = 41,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
.clock = 271560,
.hdisplay = 2560,
@@ -1764,6 +1991,30 @@ static const struct panel_desc samsung_ltn140at29_301 = {
},
};
+static const struct drm_display_mode sharp_lq035q7db03_mode = {
+ .clock = 5500,
+ .hdisplay = 240,
+ .hsync_start = 240 + 16,
+ .hsync_end = 240 + 16 + 7,
+ .htotal = 240 + 16 + 7 + 5,
+ .vdisplay = 320,
+ .vsync_start = 320 + 9,
+ .vsync_end = 320 + 9 + 1,
+ .vtotal = 320 + 9 + 1 + 7,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc sharp_lq035q7db03 = {
+ .modes = &sharp_lq035q7db03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 54,
+ .height = 72,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct display_timing sharp_lq101k1ly04_timing = {
.pixelclock = { 60000000, 65000000, 80000000 },
.hactive = { 1280, 1280, 1280 },
@@ -2095,6 +2346,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
+ .compatible = "auo,g070vvn01",
+ .data = &auo_g070vvn01,
+ }, {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
@@ -2113,6 +2367,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "avic,tm070ddh03",
.data = &avic_tm070ddh03,
}, {
+ .compatible = "boe,hv070wsa-100",
+ .data = &boe_hv070wsa
+ }, {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
@@ -2125,6 +2382,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "chunghwa,claa101wb01",
.data = &chunghwa_claa101wb01
}, {
+ .compatible = "dataimage,scf0700c48ggu18",
+ .data = &dataimage_scf0700c48ggu18,
+ }, {
+ .compatible = "dlc,dlc0700yzg-1",
+ .data = &dlc_dlc0700yzg_1,
+ }, {
.compatible = "edt,et057090dhu",
.data = &edt_et057090dhu,
}, {
@@ -2134,6 +2397,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0700g0dh6",
.data = &edt_etm0700g0dh6,
}, {
+ .compatible = "edt,etm0700g0bdh6",
+ .data = &edt_etm0700g0bdh6,
+ }, {
+ .compatible = "edt,etm0700g0edh6",
+ .data = &edt_etm0700g0bdh6,
+ }, {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
@@ -2155,10 +2424,13 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,at070tn92",
.data = &innolux_at070tn92,
}, {
- .compatible ="innolux,g101ice-l01",
+ .compatible = "innolux,g070y2-l01",
+ .data = &innolux_g070y2_l01,
+ }, {
+ .compatible = "innolux,g101ice-l01",
.data = &innolux_g101ice_l01
}, {
- .compatible ="innolux,g121i1-l01",
+ .compatible = "innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
.compatible = "innolux,g121x1-l03",
@@ -2170,6 +2442,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
+ .compatible = "innolux,tv123wam",
+ .data = &innolux_tv123wam,
+ }, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
@@ -2206,6 +2481,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
+ .compatible = "newhaven,nhd-4.3-480272ef-atxl",
+ .data = &newhaven_nhd_43_480272ef_atxl,
+ }, {
.compatible = "nlt,nl192108ac18-02d",
.data = &nlt_nl192108ac18_02d,
}, {
@@ -2227,6 +2505,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "qiaodian,qd43003c0-40",
.data = &qd43003c0_40,
}, {
+ .compatible = "rocktech,rk070er9427",
+ .data = &rocktech_rk070er9427,
+ }, {
.compatible = "samsung,lsn122dl01-c01",
.data = &samsung_lsn122dl01_c01,
}, {
@@ -2236,6 +2517,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "samsung,ltn140at29-301",
.data = &samsung_ltn140at29_301,
}, {
+ .compatible = "sharp,lq035q7db03",
+ .data = &sharp_lq035q7db03,
+ }, {
.compatible = "sharp,lq101k1ly04",
.data = &sharp_lq101k1ly04,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 358c64ef1922..74284e5afc5d 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -419,7 +419,6 @@ static int st7789v_remove(struct spi_device *spi)
{
struct st7789v *ctx = spi_get_drvdata(spi);
- drm_panel_detach(&ctx->panel);
drm_panel_remove(&ctx->panel);
if (ctx->backlight)
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index 19a8189dc54f..0c70f0e91d21 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -4,6 +4,7 @@ pl111_drm-y += pl111_display.o \
pl111_drv.o
pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
+pl111_drm-$(CONFIG_ARCH_NOMADIK) += pl111_nomadik.o
pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
obj-$(CONFIG_DRM_PL111) += pl111_drm.o
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 19b0d006a54a..754f6b25f265 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -63,7 +63,7 @@ pl111_mode_valid(struct drm_crtc *crtc,
* We use the pixelclock to also account for interlaced modes, the
* resulting bandwidth is in bytes per second.
*/
- bw = mode->clock * 1000; /* In Hz */
+ bw = mode->clock * 1000ULL; /* In Hz */
bw = bw * mode->hdisplay * mode->vdisplay * cpp;
bw = div_u64(bw, mode->htotal * mode->vtotal);
@@ -223,48 +223,84 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
/* Hard-code TFT panel */
cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
+ /* On the ST Micro variant, assume all 24 bits are connected */
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_CDWID_24;
- /* Note that the the hardware's format reader takes 'r' from
+ /*
+ * Note that the the ARM hardware's format reader takes 'r' from
* the low bit, while DRM formats list channels from high bit
- * to low bit as you read left to right.
+ * to low bit as you read left to right. The ST Micro version of
+ * the PL110 (LCDC) however uses the standard DRM format.
*/
switch (fb->format->format) {
+ case DRM_FORMAT_BGR888:
+ /* Only supported on the ST Micro variant */
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_LCDBPP24_PACKED | CNTL_BGR;
+ break;
+ case DRM_FORMAT_RGB888:
+ /* Only supported on the ST Micro variant */
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_LCDBPP24_PACKED;
+ break;
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
- cntl |= CNTL_LCDBPP24;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP24 | CNTL_BGR;
+ else
+ cntl |= CNTL_LCDBPP24;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
- cntl |= CNTL_LCDBPP24 | CNTL_BGR;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP24;
+ else
+ cntl |= CNTL_LCDBPP24 | CNTL_BGR;
break;
case DRM_FORMAT_BGR565:
if (priv->variant->is_pl110)
cntl |= CNTL_LCDBPP16;
+ else if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565 | CNTL_BGR;
else
cntl |= CNTL_LCDBPP16_565;
break;
case DRM_FORMAT_RGB565:
if (priv->variant->is_pl110)
- cntl |= CNTL_LCDBPP16;
+ cntl |= CNTL_LCDBPP16 | CNTL_BGR;
+ else if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565;
else
- cntl |= CNTL_LCDBPP16_565;
- cntl |= CNTL_BGR;
+ cntl |= CNTL_LCDBPP16_565 | CNTL_BGR;
break;
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_XBGR1555:
cntl |= CNTL_LCDBPP16;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_5551 | CNTL_BGR;
break;
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
- cntl |= CNTL_LCDBPP16 | CNTL_BGR;
+ cntl |= CNTL_LCDBPP16;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_5551;
+ else
+ cntl |= CNTL_BGR;
break;
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_XBGR4444:
cntl |= CNTL_LCDBPP16_444;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_444 | CNTL_BGR;
break;
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XRGB4444:
- cntl |= CNTL_LCDBPP16_444 | CNTL_BGR;
+ cntl |= CNTL_LCDBPP16_444;
+ if (priv->variant->st_bitmux_control)
+ cntl |= CNTL_ST_1XBPP_444;
+ else
+ cntl |= CNTL_BGR;
break;
default:
WARN_ONCE(true, "Unknown FB format 0x%08x\n",
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index ce4501d0ab48..1aa015ccacef 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -36,11 +36,14 @@ struct drm_minor;
* struct pl111_variant_data - encodes IP differences
* @name: the name of this variant
* @is_pl110: this is the early PL110 variant
+ * @is_lcdc: this is the ST Microelectronics Nomadik LCDC variant
* @external_bgr: this is the Versatile Pl110 variant with external
* BGR/RGB routing
* @broken_clockdivider: the clock divider is broken and we need to
* use the supplied clock directly
* @broken_vblank: the vblank IRQ is broken on this variant
+ * @st_bitmux_control: this variant is using the ST Micro bitmux
+ * extensions to the control register
* @formats: array of supported pixel formats on this variant
* @nformats: the length of the array of supported pixel formats
* @fb_bpp: desired bits per pixel on the default framebuffer
@@ -48,9 +51,11 @@ struct drm_minor;
struct pl111_variant_data {
const char *name;
bool is_pl110;
+ bool is_lcdc;
bool external_bgr;
bool broken_clockdivider;
bool broken_vblank;
+ bool st_bitmux_control;
const u32 *formats;
unsigned int nformats;
unsigned int fb_bpp;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 454ff0804642..47fe30223444 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -75,6 +75,7 @@
#include "pl111_drm.h"
#include "pl111_versatile.h"
+#include "pl111_nomadik.h"
#define DRIVER_DESC "DRM module for PL111"
@@ -249,6 +250,8 @@ static struct drm_driver pl111_drm_driver = {
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
@@ -288,8 +291,8 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
priv->memory_bw = 0;
}
- /* The two variants swap this register */
- if (variant->is_pl110) {
+ /* The two main variants swap this register */
+ if (variant->is_pl110 || variant->is_lcdc) {
priv->ienb = CLCD_PL110_IENB;
priv->ctrl = CLCD_PL110_CNTL;
} else {
@@ -301,13 +304,15 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
if (IS_ERR(priv->regs)) {
dev_err(dev, "%s failed mmio\n", __func__);
ret = PTR_ERR(priv->regs);
- goto dev_unref;
+ goto dev_put;
}
/* This may override some variant settings */
ret = pl111_versatile_init(dev, priv);
if (ret)
- goto dev_unref;
+ goto dev_put;
+
+ pl111_nomadik_init(dev);
/* turn off interrupts before requesting the irq */
writel(0, priv->regs + priv->ienb);
@@ -321,16 +326,16 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
ret = pl111_modeset_init(drm);
if (ret != 0)
- goto dev_unref;
+ goto dev_put;
ret = drm_dev_register(drm, 0);
if (ret < 0)
- goto dev_unref;
+ goto dev_put;
return 0;
-dev_unref:
- drm_dev_unref(drm);
+dev_put:
+ drm_dev_put(drm);
of_reserved_mem_device_release(dev);
return ret;
@@ -347,7 +352,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
of_reserved_mem_device_release(dev);
return 0;
@@ -400,16 +405,50 @@ static const struct pl111_variant_data pl111_variant = {
.fb_bpp = 32,
};
+static const u32 pl110_nomadik_pixel_formats[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+};
+
+static const struct pl111_variant_data pl110_nomadik_variant = {
+ .name = "LCDC (PL110 Nomadik)",
+ .formats = pl110_nomadik_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_nomadik_pixel_formats),
+ .is_lcdc = true,
+ .st_bitmux_control = true,
+ .broken_vblank = true,
+ .fb_bpp = 16,
+};
+
static const struct amba_id pl111_id_table[] = {
{
.id = 0x00041110,
.mask = 0x000fffff,
- .data = (void*)&pl110_variant,
+ .data = (void *)&pl110_variant,
+ },
+ {
+ .id = 0x00180110,
+ .mask = 0x00fffffe,
+ .data = (void *)&pl110_nomadik_variant,
},
{
.id = 0x00041111,
.mask = 0x000fffff,
- .data = (void*)&pl111_variant,
+ .data = (void *)&pl111_variant,
},
{0, 0},
};
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.c b/drivers/gpu/drm/pl111/pl111_nomadik.c
new file mode 100644
index 000000000000..6f385e59be22
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_nomadik.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include "pl111_nomadik.h"
+
+#define PMU_CTRL_OFFSET 0x0000
+#define PMU_CTRL_LCDNDIF BIT(26)
+
+void pl111_nomadik_init(struct device *dev)
+{
+ struct regmap *pmu_regmap;
+
+ /*
+ * Just bail out of this is not found, we could be running
+ * multiplatform on something else than Nomadik.
+ */
+ pmu_regmap =
+ syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
+ if (IS_ERR(pmu_regmap))
+ return;
+
+ /*
+ * This bit in the PMU controller multiplexes the two graphics
+ * blocks found in the Nomadik STn8815. The other one is called
+ * MDIF (Master Display Interface) and gets muxed out here.
+ */
+ regmap_update_bits(pmu_regmap,
+ PMU_CTRL_OFFSET,
+ PMU_CTRL_LCDNDIF,
+ 0);
+ dev_info(dev, "set Nomadik PMU mux to CLCD mode\n");
+}
+EXPORT_SYMBOL_GPL(pl111_nomadik_init);
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.h b/drivers/gpu/drm/pl111/pl111_nomadik.h
new file mode 100644
index 000000000000..19d663d46353
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_nomadik.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/device.h>
+
+#ifndef PL111_NOMADIK_H
+#define PL111_NOMADIK_H
+#endif
+
+#ifdef CONFIG_ARCH_NOMADIK
+
+void pl111_nomadik_init(struct device *dev);
+
+#else
+
+static inline void pl111_nomadik_init(struct device *dev)
+{
+}
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 768207fbbae3..0570c6826bff 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1086,7 +1086,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
/* we get HPD via client monitors config */
connector->polled = DRM_CONNECTOR_POLL_HPD;
encoder->possible_crtcs = 1 << num_output;
- drm_mode_connector_attach_encoder(&qxl_output->base,
+ drm_connector_attach_encoder(&qxl_output->base,
&qxl_output->enc);
drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 7cb214577275..e37f0097f744 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -50,12 +50,6 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence)
return "release";
}
-static bool qxl_nop_signaling(struct dma_fence *fence)
-{
- /* fences are always automatically signaled, so just pretend we did this.. */
- return true;
-}
-
static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
@@ -119,7 +113,6 @@ signaled:
static const struct dma_fence_ops qxl_fence_ops = {
.get_driver_name = qxl_get_driver_name,
.get_timeline_name = qxl_get_timeline_name,
- .enable_signaling = qxl_nop_signaling,
.wait = qxl_fence_wait,
};
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index b9302c918271..d587779a80b4 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,19 +5676,29 @@ int ci_dpm_init(struct radeon_device *rdev)
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
+ enum pci_bus_speed speed_cap;
+ struct pci_dev *root = rdev->pdev->bus->self;
int ret;
- u32 mask;
pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
if (pi == NULL)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
pi->sys_pcie_mask = 0;
- else
- pi->sys_pcie_mask = mask;
+ } else {
+ if (speed_cap == PCIE_SPEED_8_0GT)
+ pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50 |
+ RADEON_PCIE_SPEED_80;
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50;
+ else
+ pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+ }
pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 7c73bc7e2f85..ebce4601a305 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9499,9 +9499,10 @@ int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
+ enum pci_bus_speed speed_cap;
int bridge_pos, gpu_pos;
- u32 speed_cntl, mask, current_data_rate;
- int ret, i;
+ u32 speed_cntl, current_data_rate;
+ int i;
u16 tmp16;
if (pci_is_root_bus(rdev->pdev->bus))
@@ -9516,23 +9517,24 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
if (!(rdev->flags & RADEON_IS_PCIE))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN)
return;
- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ if ((speed_cap != PCIE_SPEED_8_0GT) &&
+ (speed_cap != PCIE_SPEED_5_0GT))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
- } else if (mask & DRM_PCIE_SPEED_50) {
+ } else if (speed_cap == PCIE_SPEED_5_0GT) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -9548,7 +9550,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
if (!gpu_pos)
return;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
/* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
@@ -9636,9 +9638,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
- if (mask & DRM_PCIE_SPEED_80)
+ if (speed_cap == PCIE_SPEED_8_0GT)
tmp16 |= 3; /* gen3 */
- else if (mask & DRM_PCIE_SPEED_50)
+ else if (speed_cap == PCIE_SPEED_5_0GT)
tmp16 |= 2; /* gen2 */
else
tmp16 |= 1; /* gen1 */
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 73d4c5348116..5e044c98fca2 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1327,9 +1327,9 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
case RADEON_PCIE_GEN3:
return RADEON_PCIE_GEN3;
default:
- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
+ if ((sys_mask & RADEON_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
return RADEON_PCIE_GEN3;
- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
+ else if ((sys_mask & RADEON_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
return RADEON_PCIE_GEN2;
else
return RADEON_PCIE_GEN1;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4a2eb409aacc..1a6f6edb3515 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1653,6 +1653,10 @@ struct radeon_pm {
struct radeon_dpm dpm;
};
+#define RADEON_PCIE_SPEED_25 1
+#define RADEON_PCIE_SPEED_50 2
+#define RADEON_PCIE_SPEED_80 4
+
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2aea2bdff99b..414642e5b7a3 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -244,23 +244,15 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *best_encoder = NULL;
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *best_encoder;
+ struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
int i;
best_encoder = connector_funcs->best_encoder(connector);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -270,7 +262,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
radeon_atombios_connected_scratch_regs(connector, encoder, connected);
else
radeon_combios_connected_scratch_regs(connector, encoder, connected);
-
}
}
@@ -279,17 +270,11 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
+
return NULL;
}
@@ -383,20 +368,23 @@ static int radeon_ddc_get_modes(struct drm_connector *connector)
int ret;
if (radeon_connector->edid) {
- drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
+ drm_connector_update_edid_property(connector, radeon_connector->edid);
ret = drm_add_edid_modes(connector, radeon_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
return 0;
}
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ struct drm_encoder *encoder;
+ int i;
+
+ /* pick the first one */
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -436,19 +424,19 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct drm_connector *conflict;
struct radeon_connector *radeon_conflict;
- int i;
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
+ struct drm_encoder *enc;
+ int i;
+
if (conflict == connector)
continue;
radeon_conflict = to_radeon_connector(conflict);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (conflict->encoder_ids[i] == 0)
- break;
+ drm_connector_for_each_possible_encoder(conflict, enc, i) {
/* if the IDs match */
- if (conflict->encoder_ids[i] == encoder->base.id) {
+ if (enc == encoder) {
if (conflict->status != connector_status_connected)
continue;
@@ -1256,7 +1244,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
const struct drm_encoder_helper_funcs *encoder_funcs;
- int i, r;
+ int r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1374,15 +1362,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ int i;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1458,18 +1440,11 @@ exit:
/* okay need to be smart in here about which encoder to pick */
static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1484,8 +1459,9 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder, i)
+ return encoder;
+
return NULL;
}
@@ -1628,14 +1604,7 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
struct radeon_encoder *radeon_encoder;
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1657,14 +1626,7 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
int i;
bool found = false;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index cd8a3ee16649..f920be236cc9 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -195,11 +195,11 @@ static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
radeon_connector->edid = edid;
DRM_DEBUG_KMS("edid retrieved %p\n", edid);
if (radeon_connector->edid) {
- drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ drm_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+ drm_connector_update_edid_property(&radeon_connector->base, NULL);
return ret;
}
@@ -290,7 +290,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ drm_connector_set_path_property(connector, pathprop);
return connector;
}
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c6ee80216cf4..c341fb2a5b56 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -211,7 +211,7 @@ radeon_link_encoder_connector(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices) {
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
radeon_encoder_add_backlight(radeon_encoder, connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index edbb4cd519fd..ba2fd295697f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -307,7 +307,7 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
if (bo == NULL)
return NULL;
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
@@ -320,9 +320,8 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 48f4b273e316..0c7f228db6e3 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2009 VMware, Inc.
*
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8689fcca051c..cbb67e9ffb3a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -947,11 +947,11 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
-static int radeon_ttm_fault(struct vm_fault *vmf)
+static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct radeon_device *rdev;
- int r;
+ vm_fault_t ret;
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL) {
@@ -959,9 +959,9 @@ static int radeon_ttm_fault(struct vm_fault *vmf)
}
rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
- r = ttm_vm_ops->fault(vmf);
+ ret = ttm_vm_ops->fault(vmf);
up_read(&rdev->pm.mclk_lock);
- return r;
+ return ret;
}
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 1907c950d76f..85c604d29235 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7082,9 +7082,10 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
+ enum pci_bus_speed speed_cap;
int bridge_pos, gpu_pos;
- u32 speed_cntl, mask, current_data_rate;
- int ret, i;
+ u32 speed_cntl, current_data_rate;
+ int i;
u16 tmp16;
if (pci_is_root_bus(rdev->pdev->bus))
@@ -7099,23 +7100,24 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
if (!(rdev->flags & RADEON_IS_PCIE))
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN)
return;
- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ if ((speed_cap != PCIE_SPEED_8_0GT) &&
+ (speed_cap != PCIE_SPEED_5_0GT))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
- } else if (mask & DRM_PCIE_SPEED_50) {
+ } else if (speed_cap == PCIE_SPEED_5_0GT) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -7131,7 +7133,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
if (!gpu_pos)
return;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (speed_cap == PCIE_SPEED_8_0GT) {
/* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
@@ -7219,9 +7221,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
- if (mask & DRM_PCIE_SPEED_80)
+ if (speed_cap == PCIE_SPEED_8_0GT)
tmp16 |= 3; /* gen3 */
- else if (mask & DRM_PCIE_SPEED_50)
+ else if (speed_cap == PCIE_SPEED_5_0GT)
tmp16 |= 2; /* gen2 */
else
tmp16 |= 1; /* gen1 */
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index fea88078cf8e..8fb60b3af015 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,8 +6899,9 @@ int si_dpm_init(struct radeon_device *rdev)
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
+ enum pci_bus_speed speed_cap;
+ struct pci_dev *root = rdev->pdev->bus->self;
int ret;
- u32 mask;
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
if (si_pi == NULL)
@@ -6910,11 +6911,20 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret)
+ speed_cap = pcie_get_speed_cap(root);
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
si_pi->sys_pcie_mask = 0;
- else
- si_pi->sys_pcie_mask = mask;
+ } else {
+ if (speed_cap == PCIE_SPEED_8_0GT)
+ si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50 |
+ RADEON_PCIE_SPEED_80;
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+ RADEON_PCIE_SPEED_50;
+ else
+ si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+ }
si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(rdev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 155ad840f3c5..4c39de3f4f0f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -353,7 +353,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
drm_connector_helper_add(connector, &rcar_lvds_conn_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
return ret;
@@ -434,8 +434,8 @@ static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
ret = -EPROBE_DEFER;
} else {
lvds->panel = of_drm_find_panel(remote);
- if (!lvds->panel)
- ret = -EPROBE_DEFER;
+ if (IS_ERR(lvds->panel))
+ ret = PTR_ERR(lvds->panel);
}
done:
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index c6fbdcd87c16..8ad0d773dc33 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -275,7 +275,7 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
dp->sink_has_audio = drm_detect_monitor_audio(edid);
ret = drm_add_edid_modes(connector, edid);
if (ret)
- drm_mode_connector_update_edid_property(connector,
+ drm_connector_update_edid_property(connector,
edid);
}
mutex_unlock(&dp->lock);
@@ -1062,7 +1062,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_ERROR("failed to attach connector and encoder\n");
goto err_free_connector;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index eb3042c6d1b2..3105965fc260 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -792,7 +792,6 @@ err_config_video:
int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
{
- u32 val;
int ret;
ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
@@ -801,11 +800,7 @@ int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
return ret;
}
- val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
- writel(val, dp->regs + SPDIF_CTRL_ADDR);
+ writel(0, dp->regs + SPDIF_CTRL_ADDR);
/* clearn the audio config and reset */
writel(0, dp->regs + AUDIO_SRC_CNTL);
@@ -929,12 +924,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
{
u32 val;
- val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
- writel(val, dp->regs + SPDIF_CTRL_ADDR);
-
writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
@@ -942,9 +931,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
- val |= SPDIF_FIFO_MID_RANGE(0xe0);
- val |= SPDIF_JITTER_THRSH(0xe0);
- val |= SPDIF_JITTER_AVG_WIN(7);
writel(val, dp->regs + SPDIF_CTRL_ADDR);
clk_prepare_enable(dp->spdif_clk);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index d53d5a09547f..662b6cb5d3f0 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -595,7 +595,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (dsi->panel)
+ if (!IS_ERR(dsi->panel))
return drm_panel_attach(dsi->panel, &dsi->connector);
return -EINVAL;
@@ -1129,7 +1129,7 @@ static int dw_mipi_dsi_register(struct drm_device *drm,
&dw_mipi_dsi_atomic_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 88d0774c97bd..1c02b3e61299 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -565,7 +565,7 @@ static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
if (edid) {
hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -634,7 +634,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
- drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+ drm_connector_attach_encoder(&hdmi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index d4f4118b482d..ea18cb2a76c0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -18,52 +18,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_psr.h"
-#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
-
-struct rockchip_drm_fb {
- struct drm_framebuffer fb;
- struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
-};
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane)
-{
- struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
-
- if (plane >= ROCKCHIP_MAX_FB_BUFFER)
- return NULL;
-
- return rk_fb->obj[plane];
-}
-
-static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
- int i;
-
- for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
- drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
-
- drm_framebuffer_cleanup(fb);
- kfree(rockchip_fb);
-}
-
-static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
-
- return drm_gem_handle_create(file_priv,
- rockchip_fb->obj[0], handle);
-}
-
static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int flags, unsigned int color,
@@ -75,46 +36,45 @@ static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
}
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
- .destroy = rockchip_drm_fb_destroy,
- .create_handle = rockchip_drm_fb_create_handle,
- .dirty = rockchip_drm_fb_dirty,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = rockchip_drm_fb_dirty,
};
-static struct rockchip_drm_fb *
+static struct drm_framebuffer *
rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
int ret;
int i;
- rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
- if (!rockchip_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(dev, &rockchip_fb->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
for (i = 0; i < num_planes; i++)
- rockchip_fb->obj[i] = obj[i];
+ fb->obj[i] = obj[i];
- ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
- &rockchip_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize framebuffer: %d\n",
ret);
- kfree(rockchip_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
- return rockchip_fb;
+ return fb;
}
static struct drm_framebuffer *
rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
struct drm_gem_object *obj;
unsigned int hsub;
@@ -153,13 +113,13 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
objs[i] = obj;
}
- rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
- if (IS_ERR(rockchip_fb)) {
- ret = PTR_ERR(rockchip_fb);
+ fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
goto err_gem_object_unreference;
}
- return &rockchip_fb->fb;
+ return fb;
err_gem_object_unreference:
for (i--; i >= 0; i--)
@@ -242,13 +202,13 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
- struct rockchip_drm_fb *rockchip_fb;
+ struct drm_framebuffer *fb;
- rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
- if (IS_ERR(rockchip_fb))
- return ERR_CAST(rockchip_fb);
+ fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+ if (IS_ERR(fb))
+ return ERR_CAST(fb);
- return &rockchip_fb->fb;
+ return fb;
}
void rockchip_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
index 2fe47f1ee98f..f1265cb1aee8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -22,7 +22,4 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
void rockchip_drm_mode_config_init(struct drm_device *dev);
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane);
#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 2121345a61af..1359e5c773e4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -243,18 +243,6 @@ static enum vop_data_format vop_convert_format(uint32_t format)
}
}
-static bool is_yuv_support(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV24:
- return true;
- default:
- return false;
- }
-}
-
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
uint32_t dst, bool is_horizontal,
int vsu_mode, int *vskiplines)
@@ -298,7 +286,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint16_t cbcr_ver_scl_mode = SCALE_NONE;
int hsub = drm_format_horz_chroma_subsampling(pixel_format);
int vsub = drm_format_vert_chroma_subsampling(pixel_format);
- bool is_yuv = is_yuv_support(pixel_format);
+ const struct drm_format_info *info;
+ bool is_yuv = false;
uint16_t cbcr_src_w = src_w / hsub;
uint16_t cbcr_src_h = src_h / vsub;
uint16_t vsu_mode;
@@ -306,6 +295,11 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint32_t val;
int vskiplines;
+ info = drm_format_info(pixel_format);
+
+ if (info->is_yuv)
+ is_yuv = true;
+
if (dst_w > 3840) {
DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
return;
@@ -486,6 +480,31 @@ static void vop_line_flag_irq_disable(struct vop *vop)
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
+static int vop_core_clks_enable(struct vop *vop)
+{
+ int ret;
+
+ ret = clk_enable(vop->hclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(vop->aclk);
+ if (ret < 0)
+ goto err_disable_hclk;
+
+ return 0;
+
+err_disable_hclk:
+ clk_disable(vop->hclk);
+ return ret;
+}
+
+static void vop_core_clks_disable(struct vop *vop)
+{
+ clk_disable(vop->aclk);
+ clk_disable(vop->hclk);
+}
+
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
@@ -497,17 +516,13 @@ static int vop_enable(struct drm_crtc *crtc)
return ret;
}
- ret = clk_enable(vop->hclk);
+ ret = vop_core_clks_enable(vop);
if (WARN_ON(ret < 0))
goto err_put_pm_runtime;
ret = clk_enable(vop->dclk);
if (WARN_ON(ret < 0))
- goto err_disable_hclk;
-
- ret = clk_enable(vop->aclk);
- if (WARN_ON(ret < 0))
- goto err_disable_dclk;
+ goto err_disable_core;
/*
* Slave iommu shares power, irq and clock with vop. It was associated
@@ -519,7 +534,7 @@ static int vop_enable(struct drm_crtc *crtc)
if (ret) {
DRM_DEV_ERROR(vop->dev,
"failed to attach dma mapping, %d\n", ret);
- goto err_disable_aclk;
+ goto err_disable_dclk;
}
spin_lock(&vop->reg_lock);
@@ -552,18 +567,14 @@ static int vop_enable(struct drm_crtc *crtc)
spin_unlock(&vop->reg_lock);
- enable_irq(vop->irq);
-
drm_crtc_vblank_on(crtc);
return 0;
-err_disable_aclk:
- clk_disable(vop->aclk);
err_disable_dclk:
clk_disable(vop->dclk);
-err_disable_hclk:
- clk_disable(vop->hclk);
+err_disable_core:
+ vop_core_clks_disable(vop);
err_put_pm_runtime:
pm_runtime_put_sync(vop->dev);
return ret;
@@ -599,8 +610,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
vop_dsp_hold_valid_irq_disable(vop);
- disable_irq(vop->irq);
-
vop->is_enabled = false;
/*
@@ -609,8 +618,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
clk_disable(vop->dclk);
- clk_disable(vop->aclk);
- clk_disable(vop->hclk);
+ vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
mutex_unlock(&vop->vop_lock);
@@ -666,7 +674,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+ if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) {
DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
@@ -728,7 +736,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
return;
}
- obj = rockchip_fb_get_gem_obj(fb, 0);
+ obj = fb->obj[0];
rk_obj = to_rockchip_obj(obj);
actual_w = drm_rect_width(src) >> 16;
@@ -753,12 +761,12 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
- if (is_yuv_support(fb->format->format)) {
+ if (fb->format->is_yuv) {
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
int bpp = fb->format->cpp[1];
- uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+ uv_obj = fb->obj[1];
rk_uv_obj = to_rockchip_obj(uv_obj);
offset = (src->x1 >> 16) * bpp / hsub;
@@ -1178,6 +1186,18 @@ static irqreturn_t vop_isr(int irq, void *data)
int ret = IRQ_NONE;
/*
+ * The irq is shared with the iommu. If the runtime-pm state of the
+ * vop-device is disabled the irq has to be targeted at the iommu.
+ */
+ if (!pm_runtime_get_if_in_use(vop->dev))
+ return IRQ_NONE;
+
+ if (vop_core_clks_enable(vop)) {
+ DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
+ goto out;
+ }
+
+ /*
* interrupt register has interrupt status, enable and clear bits, we
* must hold irq_lock to avoid a race with enable/disable_vblank().
*/
@@ -1192,7 +1212,7 @@ static irqreturn_t vop_isr(int irq, void *data)
/* This is expected for vop iommu irqs, since the irq is shared */
if (!active_irqs)
- return IRQ_NONE;
+ goto out_disable;
if (active_irqs & DSP_HOLD_VALID_INTR) {
complete(&vop->dsp_hold_completion);
@@ -1218,6 +1238,10 @@ static irqreturn_t vop_isr(int irq, void *data)
DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
active_irqs);
+out_disable:
+ vop_core_clks_disable(vop);
+out:
+ pm_runtime_put(vop->dev);
return ret;
}
@@ -1278,7 +1302,7 @@ static int vop_create_crtc(struct vop *vop)
for (i = 0; i < vop_data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win_data = vop_win->data;
- unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
+ unsigned long possible_crtcs = drm_crtc_mask(crtc);
if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
continue;
@@ -1596,9 +1620,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_disable_pm_runtime;
- /* IRQ is initially disabled; it gets enabled in power_on */
- disable_irq(vop->irq);
-
return 0;
err_disable_pm_runtime:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 084acdd0019a..fcb91041a666 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -331,16 +331,19 @@ static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
{
int lb_mode;
- if (width > 2560)
- lb_mode = LB_RGB_3840X2;
- else if (width > 1920)
- lb_mode = LB_RGB_2560X4;
- else if (!is_yuv)
- lb_mode = LB_RGB_1920X5;
- else if (width > 1280)
- lb_mode = LB_YUV_3840X5;
- else
- lb_mode = LB_YUV_2560X8;
+ if (is_yuv) {
+ if (width > 1280)
+ lb_mode = LB_YUV_3840X5;
+ else
+ lb_mode = LB_YUV_2560X8;
+ } else {
+ if (width > 2560)
+ lb_mode = LB_RGB_3840X2;
+ else if (width > 1920)
+ lb_mode = LB_RGB_2560X4;
+ else
+ lb_mode = LB_RGB_1920X5;
+ }
return lb_mode;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index e67f4ea28c0e..456bd9f13bae 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
of_property_read_u32(endpoint, "reg", &endpoint_id);
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
&lvds->panel, &lvds->bridge);
- if (!ret)
+ if (!ret) {
+ of_node_put(endpoint);
break;
+ }
}
if (!child_count) {
DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
@@ -432,7 +434,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
drm_connector_helper_add(connector,
&rockchip_lvds_connector_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach encoder: %d\n", ret);
@@ -446,14 +448,12 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_free_connector;
}
} else {
- lvds->bridge->encoder = encoder;
ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
goto err_free_encoder;
}
- encoder->bridge = lvds->bridge;
}
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 2db89bed52e8..7559a820bd43 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -971,7 +971,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dma && dma->buflist) {
- if (cmdbuf->dma_idx > dma->buf_count) {
+ if (cmdbuf->dma_idx >= dma->buf_count) {
DRM_ERROR
("vertex buffer index %u out of range (0-%u)\n",
cmdbuf->dma_idx, dma->buf_count - 1);
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index bd0377c0d2ee..7665883f81d4 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -20,7 +20,6 @@
# OTHER DEALINGS IN THE SOFTWARE.
#
#
-ccflags-y := -Iinclude/drm
gpu-sched-y := gpu_scheduler.o sched_fence.o
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 44d480768dfe..4fc211e19d6e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -21,6 +21,29 @@
*
*/
+/**
+ * DOC: Overview
+ *
+ * The GPU scheduler provides entities which allow userspace to push jobs
+ * into software queues which are then scheduled on a hardware run queue.
+ * The software queues have a priority among them. The scheduler selects the entities
+ * from the run queue using a FIFO. The scheduler provides dependency handling
+ * features among jobs. The driver is supposed to provide callback functions for
+ * backend operations to the scheduler like submitting a job to hardware run queue,
+ * returning the dependencies of a job etc.
+ *
+ * The organisation of the scheduler is the following:
+ *
+ * 1. Each hw run queue has one scheduler
+ * 2. Each scheduler has multiple run queues with different priorities
+ * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
+ * 3. Each scheduler run queue has a queue of entities to schedule
+ * 4. Entities themselves maintain a queue of jobs that will be scheduled on
+ * the hardware.
+ *
+ * The jobs in a entity are always scheduled in the order that they were pushed.
+ */
+
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
@@ -39,14 +62,30 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-/* Initialize a given run queue struct */
-static void drm_sched_rq_init(struct drm_sched_rq *rq)
+/**
+ * drm_sched_rq_init - initialize a given run queue struct
+ *
+ * @rq: scheduler run queue
+ *
+ * Initializes a scheduler runqueue.
+ */
+static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_rq *rq)
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->current_entity = NULL;
+ rq->sched = sched;
}
+/**
+ * drm_sched_rq_add_entity - add an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Adds a scheduler entity to the run queue.
+ */
static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
@@ -57,6 +96,14 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
spin_unlock(&rq->lock);
}
+/**
+ * drm_sched_rq_remove_entity - remove an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Removes a scheduler entity from the run queue.
+ */
static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
@@ -70,9 +117,9 @@ static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
}
/**
- * Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity - Select an entity which could provide a job to run
*
- * @rq The run queue to check.
+ * @rq: scheduler run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/
@@ -112,30 +159,33 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
- * Init a context entity used by scheduler when submit to HW ring.
+ * drm_sched_entity_init - Init a context entity used by scheduler when
+ * submit to HW ring.
+ *
+ * @entity: scheduler entity to init
+ * @rq_list: the list of run queue on which jobs from this
+ * entity can be submitted
+ * @num_rq_list: number of run queue in rq_list
+ * @guilty: atomic_t set to 1 when a job on this queue
+ * is found to be guilty causing a timeout
*
- * @sched The pointer to the scheduler
- * @entity The pointer to a valid drm_sched_entity
- * @rq The run queue this entity belongs
- * @guilty atomic_t set to 1 when a job on this queue
- * is found to be guilty causing a timeout
+ * Note: the rq_list should have atleast one element to schedule
+ * the entity
*
- * return 0 if succeed. negative error code on failure
+ * Returns 0 on success or a negative error code on failure.
*/
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity,
- struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+ struct drm_sched_rq **rq_list,
+ unsigned int num_rq_list,
atomic_t *guilty)
{
- if (!(sched && entity && rq))
+ if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
- entity->rq = rq;
- entity->sched = sched;
+ entity->rq = rq_list[0];
entity->guilty = guilty;
- entity->fini_status = 0;
entity->last_scheduled = NULL;
spin_lock_init(&entity->rq_lock);
@@ -149,40 +199,27 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
EXPORT_SYMBOL(drm_sched_entity_init);
/**
- * Query if entity is initialized
+ * drm_sched_entity_is_idle - Check if entity is idle
*
- * @sched Pointer to scheduler instance
- * @entity The pointer to a valid scheduler entity
- *
- * return true if entity is initialized, false otherwise
-*/
-static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
-{
- return entity->sched == sched &&
- entity->rq != NULL;
-}
-
-/**
- * Check if entity is idle
+ * @entity: scheduler entity
*
- * @entity The pointer to a valid scheduler entity
- *
- * Return true if entity don't has any unscheduled jobs.
+ * Returns true if the entity does not have any unscheduled jobs.
*/
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb();
- if (spsc_queue_peek(&entity->job_queue) == NULL)
+
+ if (list_empty(&entity->list) ||
+ spsc_queue_peek(&entity->job_queue) == NULL)
return true;
return false;
}
/**
- * Check if entity is ready
+ * drm_sched_entity_is_ready - Check if entity is ready
*
- * @entity The pointer to a valid scheduler entity
+ * @entity: scheduler entity
*
* Return true if entity could provide a job.
*/
@@ -210,44 +247,67 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
/**
- * Destroy a context entity
+ * drm_sched_entity_flush - Flush a context entity
*
- * @sched Pointer to scheduler instance
- * @entity The pointer to a valid scheduler entity
+ * @entity: scheduler entity
+ * @timeout: time to wait in for Q to become empty in jiffies.
*
- * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
+ * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting,
* removes the entity from the runqueue and returns an error when the process was killed.
+ *
+ * Returns the remaining time in jiffies left from the input timeout
*/
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
- if (!drm_sched_entity_is_initialized(sched, entity))
- return;
+ struct drm_gpu_scheduler *sched;
+ struct task_struct *last_user;
+ long ret = timeout;
+
+ sched = entity->rq->sched;
/**
* The client will not queue more IBs during this fini, consume existing
* queued IBs or discard them on SIGKILL
*/
- if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
- entity->fini_status = -ERESTARTSYS;
- else
- entity->fini_status = wait_event_killable(sched->job_scheduled,
- drm_sched_entity_is_idle(entity));
- drm_sched_entity_set_rq(entity, NULL);
+ if (current->flags & PF_EXITING) {
+ if (timeout)
+ ret = wait_event_timeout(
+ sched->job_scheduled,
+ drm_sched_entity_is_idle(entity),
+ timeout);
+ } else
+ wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity));
+
+
+ /* For killed process disable any more IBs enqueue right now */
+ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
+ if ((!last_user || last_user == current->group_leader) &&
+ (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+ drm_sched_rq_remove_entity(entity->rq, entity);
+
+ return ret;
}
-EXPORT_SYMBOL(drm_sched_entity_do_release);
+EXPORT_SYMBOL(drm_sched_entity_flush);
/**
- * Destroy a context entity
+ * drm_sched_entity_cleanup - Destroy a context entity
+ *
+ * @entity: scheduler entity
*
- * @sched Pointer to scheduler instance
- * @entity The pointer to a valid scheduler entity
+ * This should be called after @drm_sched_entity_do_release. It goes over the
+ * entity and signals all jobs with an error code if the process was killed.
*
- * The second one then goes over the entity and signals all jobs with an error code.
*/
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
- if (entity->fini_status) {
+ struct drm_gpu_scheduler *sched;
+
+ sched = entity->rq->sched;
+ drm_sched_rq_remove_entity(entity->rq, entity);
+
+ /* Consumption of existing IBs wasn't completed. Forcefully
+ * remove them here.
+ */
+ if (spsc_queue_peek(&entity->job_queue)) {
struct drm_sched_job *job;
int r;
@@ -267,27 +327,43 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
struct drm_sched_fence *s_fence = job->s_fence;
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
- r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb);
- if (r == -ENOENT)
+
+ /*
+ * When pipe is hanged by older entity, new entity might
+ * not even have chance to submit it's first job to HW
+ * and so entity->last_scheduled will remain NULL
+ */
+ if (!entity->last_scheduled) {
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
- else if (r)
- DRM_ERROR("fence add callback failed (%d)\n", r);
+ } else {
+ r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb);
+ if (r == -ENOENT)
+ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n", r);
+ }
}
}
dma_fence_put(entity->last_scheduled);
entity->last_scheduled = NULL;
}
-EXPORT_SYMBOL(drm_sched_entity_cleanup);
+EXPORT_SYMBOL(drm_sched_entity_fini);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+/**
+ * drm_sched_entity_fini - Destroy a context entity
+ *
+ * @entity: scheduler entity
+ *
+ * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
+ */
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
- drm_sched_entity_do_release(sched, entity);
- drm_sched_entity_cleanup(sched, entity);
+ drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ drm_sched_entity_fini(entity);
}
-EXPORT_SYMBOL(drm_sched_entity_fini);
+EXPORT_SYMBOL(drm_sched_entity_destroy);
static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
@@ -295,7 +371,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
- drm_sched_wakeup(entity->sched);
+ drm_sched_wakeup(entity->rq->sched);
}
static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -306,29 +382,43 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
dma_fence_put(f);
}
+/**
+ * drm_sched_entity_set_rq - Sets the run queue for an entity
+ *
+ * @entity: scheduler entity
+ * @rq: scheduler run queue
+ *
+ * Sets the run queue for an entity and removes the entity from the previous
+ * run queue in which was present.
+ */
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
struct drm_sched_rq *rq)
{
if (entity->rq == rq)
return;
- spin_lock(&entity->rq_lock);
-
- if (entity->rq)
- drm_sched_rq_remove_entity(entity->rq, entity);
+ BUG_ON(!rq);
+ spin_lock(&entity->rq_lock);
+ drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
- if (rq)
- drm_sched_rq_add_entity(rq, entity);
-
+ drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_rq);
+/**
+ * drm_sched_dependency_optimized
+ *
+ * @fence: the dependency fence
+ * @entity: the entity which depends on the above fence
+ *
+ * Returns true if the dependency can be optimized and false otherwise
+ */
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
@@ -345,7 +435,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence * fence = entity->dependency;
struct drm_sched_fence *s_fence;
@@ -390,7 +480,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
static struct drm_sched_job *
drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job = to_drm_sched_job(
spsc_queue_peek(&entity->job_queue));
@@ -413,9 +503,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
}
/**
- * Submit a job to the job queue
+ * drm_sched_entity_push_job - Submit a job to the entity's job queue
*
- * @sched_job The pointer to job required to submit
+ * @sched_job: job to submit
+ * @entity: scheduler entity
*
* Note: To guarantee that the order of insertion to queue matches
* the job's fence sequence number this function should be
@@ -431,12 +522,18 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
+ WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
if (first) {
/* Add the entity to the run queue */
spin_lock(&entity->rq_lock);
+ if (!entity->rq) {
+ DRM_ERROR("Trying to push to a killed entity\n");
+ spin_unlock(&entity->rq_lock);
+ return;
+ }
drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
drm_sched_wakeup(sched);
@@ -452,24 +549,28 @@ static void drm_sched_job_finish(struct work_struct *work)
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
- /* remove job from ring_mirror_list */
- spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- struct drm_sched_job *next;
-
- spin_unlock(&sched->job_list_lock);
- cancel_delayed_work_sync(&s_job->work_tdr);
- spin_lock(&sched->job_list_lock);
+ /*
+ * Canceling the timeout without removing our job from the ring mirror
+ * list is safe, as we will only end up in this worker if our jobs
+ * finished fence has been signaled. So even if some another worker
+ * manages to find this job as the next job in the list, the fence
+ * signaled check below will prevent the timeout to be restarted.
+ */
+ cancel_delayed_work_sync(&s_job->work_tdr);
- /* queue TDR for next job */
- next = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ spin_lock(&sched->job_list_lock);
+ /* queue TDR for next job */
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !list_is_last(&s_job->node, &sched->ring_mirror_list)) {
+ struct drm_sched_job *next = list_next_entry(s_job, node);
- if (next)
+ if (!dma_fence_is_signaled(&next->s_fence->finished))
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
+ /* remove job from ring_mirror_list */
+ list_del(&s_job->node);
spin_unlock(&sched->job_list_lock);
+
dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@@ -506,6 +607,13 @@ static void drm_sched_job_timedout(struct work_struct *work)
job->sched->ops->timedout_job(job);
}
+/**
+ * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
+ *
+ * @sched: scheduler instance
+ * @bad: bad scheduler job
+ *
+ */
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job;
@@ -550,6 +658,12 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
}
EXPORT_SYMBOL(drm_sched_hw_job_reset);
+/**
+ * drm_sched_job_recovery - recover jobs after a reset
+ *
+ * @sched: scheduler instance
+ *
+ */
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
@@ -599,16 +713,23 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
EXPORT_SYMBOL(drm_sched_job_recovery);
/**
- * Init a sched_job with basic field
+ * drm_sched_job_init - init a scheduler job
+ *
+ * @job: scheduler job to init
+ * @entity: scheduler entity to use
+ * @owner: job owner for debugging
*
- * Note: Refer to drm_sched_entity_push_job documentation
+ * Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
+ *
+ * Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner)
{
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
+
job->sched = sched;
job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq;
@@ -626,7 +747,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_init);
/**
- * Return ture if we can push more jobs to the hw.
+ * drm_sched_ready - is the scheduler ready
+ *
+ * @sched: scheduler instance
+ *
+ * Return true if we can push more jobs to the hw, otherwise false.
*/
static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
{
@@ -635,7 +760,10 @@ static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
}
/**
- * Wake up the scheduler when it is ready
+ * drm_sched_wakeup - Wake up the scheduler when it is ready
+ *
+ * @sched: scheduler instance
+ *
*/
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
{
@@ -644,8 +772,12 @@ static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
}
/**
- * Select next entity to process
-*/
+ * drm_sched_select_entity - Select next entity to process
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the entity to process or NULL if none are found.
+ */
static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
{
@@ -665,6 +797,14 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
return entity;
}
+/**
+ * drm_sched_process_job - process a job
+ *
+ * @f: fence
+ * @cb: fence callbacks
+ *
+ * Called after job has finished execution.
+ */
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct drm_sched_fence *s_fence =
@@ -680,6 +820,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
wake_up_interruptible(&sched->wake_up_worker);
}
+/**
+ * drm_sched_blocked - check if the scheduler is blocked
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if blocked, otherwise false.
+ */
static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
{
if (kthread_should_park()) {
@@ -690,6 +837,13 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
return false;
}
+/**
+ * drm_sched_main - main scheduler thread
+ *
+ * @param: scheduler instance
+ *
+ * Returns 0.
+ */
static int drm_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
@@ -744,15 +898,17 @@ static int drm_sched_main(void *param)
}
/**
- * Init a gpu scheduler instance
+ * drm_sched_init - Init a gpu scheduler instance
*
- * @sched The pointer to the scheduler
- * @ops The backend operations for this scheduler.
- * @hw_submissions Number of hw submissions to do.
- * @name Name used for debugging
+ * @sched: scheduler instance
+ * @ops: backend operations for this scheduler
+ * @hw_submission: number of hw submissions that can be in flight
+ * @hang_limit: number of times to allow a job to hang before dropping it
+ * @timeout: timeout value in jiffies for the scheduler
+ * @name: name used for debugging
*
* Return 0 on success, otherwise error code.
-*/
+ */
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
unsigned hw_submission,
@@ -767,7 +923,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->timeout = timeout;
sched->hang_limit = hang_limit;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
- drm_sched_rq_init(&sched->sched_rq[i]);
+ drm_sched_rq_init(sched, &sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
@@ -788,9 +944,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
EXPORT_SYMBOL(drm_sched_init);
/**
- * Destroy a gpu scheduler
+ * drm_sched_fini - Destroy a gpu scheduler
+ *
+ * @sched: scheduler instance
*
- * @sched The pointer to the scheduler
+ * Tears down and cleans up the scheduler.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index df4461648e3f..d8d2dff9ea2f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -81,11 +81,6 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
return (const char *)fence->sched->name;
}
-static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
-{
- return true;
-}
-
/**
* drm_sched_fence_free - free up the fence memory
*
@@ -134,18 +129,12 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
- .enable_signaling = drm_sched_fence_enable_signaling,
- .signaled = NULL,
- .wait = dma_fence_default_wait,
.release = drm_sched_fence_release_scheduled,
};
const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
- .enable_signaling = drm_sched_fence_enable_signaling,
- .signaled = NULL,
- .wait = dma_fence_default_wait,
.release = drm_sched_fence_release_finished,
};
@@ -172,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
- fence->sched = entity->sched;
+ fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index 54acc117550c..6b943ea1c57d 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -19,7 +19,9 @@ selftest(align64, igt_align64)
selftest(evict, igt_evict)
selftest(evict_range, igt_evict_range)
selftest(bottomup, igt_bottomup)
+selftest(lowest, igt_lowest)
selftest(topdown, igt_topdown)
+selftest(highest, igt_highest)
selftest(color, igt_color)
selftest(color_evict, igt_color_evict)
selftest(color_evict_range, igt_color_evict_range)
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 933af1c25387..fbed2c90fd51 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -1825,6 +1825,77 @@ err:
return ret;
}
+static int __igt_once(unsigned int mode)
+{
+ struct drm_mm mm;
+ struct drm_mm_node rsvd_lo, rsvd_hi, node;
+ int err;
+
+ drm_mm_init(&mm, 0, 7);
+
+ memset(&rsvd_lo, 0, sizeof(rsvd_lo));
+ rsvd_lo.start = 1;
+ rsvd_lo.size = 1;
+ err = drm_mm_reserve_node(&mm, &rsvd_lo);
+ if (err) {
+ pr_err("Could not reserve low node\n");
+ goto err;
+ }
+
+ memset(&rsvd_hi, 0, sizeof(rsvd_hi));
+ rsvd_hi.start = 5;
+ rsvd_hi.size = 1;
+ err = drm_mm_reserve_node(&mm, &rsvd_hi);
+ if (err) {
+ pr_err("Could not reserve low node\n");
+ goto err_lo;
+ }
+
+ if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
+ pr_err("Expected a hole after lo and high nodes!\n");
+ err = -EINVAL;
+ goto err_hi;
+ }
+
+ memset(&node, 0, sizeof(node));
+ err = drm_mm_insert_node_generic(&mm, &node,
+ 2, 0, 0,
+ mode | DRM_MM_INSERT_ONCE);
+ if (!err) {
+ pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
+ node.start);
+ err = -EINVAL;
+ goto err_node;
+ }
+
+ err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
+ if (err) {
+ pr_err("Could not insert the node into the available hole!\n");
+ err = -EINVAL;
+ goto err_hi;
+ }
+
+err_node:
+ drm_mm_remove_node(&node);
+err_hi:
+ drm_mm_remove_node(&rsvd_hi);
+err_lo:
+ drm_mm_remove_node(&rsvd_lo);
+err:
+ drm_mm_takedown(&mm);
+ return err;
+}
+
+static int igt_lowest(void *ignored)
+{
+ return __igt_once(DRM_MM_INSERT_LOW);
+}
+
+static int igt_highest(void *ignored)
+{
+ return __igt_once(DRM_MM_INSERT_HIGH);
+}
+
static void separate_adjacent_colors(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 40df8887fc17..fc66167b0641 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -675,7 +675,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
if (ret < 0)
goto err_cleanup;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
goto err_backlight;
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index df0a282b9615..57b870e1e696 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -332,7 +332,7 @@ static void sti_cursor_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane);
+ drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 90c46b49c931..832fc43960ee 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -224,7 +224,7 @@ static int sti_bind(struct device *dev)
ret = sti_init(ddev);
if (ret)
- goto err_drm_dev_unref;
+ goto err_drm_dev_put;
ret = component_bind_all(ddev->dev, ddev);
if (ret)
@@ -248,8 +248,8 @@ err_register:
drm_mode_config_cleanup(ddev);
err_cleanup:
sti_cleanup(ddev);
-err_drm_dev_unref:
- drm_dev_unref(ddev);
+err_drm_dev_put:
+ drm_dev_put(ddev);
return ret;
}
@@ -259,7 +259,7 @@ static void sti_unbind(struct device *dev)
drm_dev_unregister(ddev);
sti_cleanup(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
}
static const struct component_master_ops sti_ops = {
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index a5979cd25cc7..b08376b7611b 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -387,7 +387,9 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
if (!dvo->panel) {
dvo->panel = of_drm_find_panel(dvo->panel_node);
- if (dvo->panel)
+ if (IS_ERR(dvo->panel))
+ dvo->panel = NULL;
+ else
drm_panel_attach(dvo->panel, connector);
}
@@ -484,7 +486,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_dvo_connector_helper_funcs);
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 9b2c47051b51..c32de6cbf061 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -211,7 +211,11 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
- struct drm_crtc *crtc = drm_plane->crtc;
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock(&drm_plane->mutex, NULL);
+ crtc = drm_plane->state->crtc;
+ drm_modeset_unlock(&drm_plane->mutex);
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -879,7 +883,7 @@ static void sti_gdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane);
+ drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 67bbdb49fffc..49438337f70d 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -709,7 +709,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_hda_connector_helper_funcs);
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 58f431102512..34cdc4644435 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -977,7 +977,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid);
count = drm_add_edid_modes(connector, edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
kfree(edid);
return count;
@@ -1290,7 +1290,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi->drm_connector = drm_connector;
- err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ err = drm_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 106be8c4e58b..03ac3b4a4469 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1260,7 +1260,7 @@ static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane);
+ drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 8698e08313e1..f2021b23554d 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -148,16 +148,16 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
ret = drv_load(ddev);
if (ret)
- goto err_unref;
+ goto err_put;
ret = drm_dev_register(ddev, 0);
if (ret)
- goto err_unref;
+ goto err_put;
return 0;
-err_unref:
- drm_dev_unref(ddev);
+err_put:
+ drm_dev_put(ddev);
return ret;
}
@@ -170,7 +170,7 @@ static int stm_drm_platform_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drv_unload(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index d997a6014d6c..808d9fb627e9 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -457,6 +457,14 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
int target_max = target + CLK_TOLERANCE_HZ;
int result;
+ result = clk_round_rate(ldev->pixel_clk, target);
+
+ DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
+
+ /* Filter modes according to the max frequency supported by the pads */
+ if (result > ldev->caps.pad_max_freq_hz)
+ return MODE_CLOCK_HIGH;
+
/*
* Accept all "preferred" modes:
* - this is important for panels because panel clock tolerances are
@@ -468,10 +476,6 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return MODE_OK;
- result = clk_round_rate(ldev->pixel_clk, target);
-
- DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
-
/*
* Filter modes according to the clock value, particularly useful for
* hdmi modes that require precise pixel clocks.
@@ -991,11 +995,15 @@ static int ltdc_get_caps(struct drm_device *ddev)
* does not work on 2nd layer.
*/
ldev->caps.non_alpha_only_l1 = true;
+ ldev->caps.pad_max_freq_hz = 90000000;
+ if (ldev->caps.hw_version == HWVER_10200)
+ ldev->caps.pad_max_freq_hz = 65000000;
break;
case HWVER_20101:
ldev->caps.reg_ofs = REG_OFS_4;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
ldev->caps.non_alpha_only_l1 = false;
+ ldev->caps.pad_max_freq_hz = 150000000;
break;
default:
return -ENODEV;
@@ -1074,8 +1082,11 @@ int ltdc_load(struct drm_device *ddev)
}
}
- if (!IS_ERR(rstc))
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ usleep_range(10, 20);
reset_control_deassert(rstc);
+ }
/* Disable interrupts */
reg_clear(ldev->regs, LTDC_IER,
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index 1e16d6afb0d2..d5afb8960867 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -18,6 +18,7 @@ struct ltdc_caps {
u32 bus_width; /* bus width (32 or 64 bits) */
const u32 *pix_fmt_hw; /* supported pixel formats */
bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
+ int pad_max_freq_hz; /* max frequency supported by pad */
};
#define LTDC_MAX_LAYER 4
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 156a865c3e6d..c2c042287c19 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -68,4 +68,11 @@ config DRM_SUN8I_MIXER
graphics mixture and feed graphics to TCON, If M is
selected the module will be called sun8i-mixer.
+config DRM_SUN8I_TCON_TOP
+ tristate
+ default DRM_SUN4I if DRM_SUN8I_MIXER!=n
+ help
+ TCON TOP is responsible for configuring display pipeline for
+ HTMI, TVE and LCD.
+
endif
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 9c81301d0eed..0eb38ac8e86e 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o
obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
+obj-$(CONFIG_DRM_SUN8I_TCON_TOP) += sun8i_tcon_top.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index de0a76dfa1a2..d7950b52a1fd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -86,12 +86,6 @@ static inline bool sun4i_backend_format_is_packed_yuv422(uint32_t format)
}
}
-static inline bool sun4i_backend_format_is_yuv(uint32_t format)
-{
- return sun4i_backend_format_is_planar_yuv(format) ||
- sun4i_backend_format_is_packed_yuv422(format);
-}
-
static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
{
int i;
@@ -304,7 +298,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
val);
- if (sun4i_backend_format_is_yuv(fb->format->format))
+ if (fb->format->is_yuv)
return sun4i_backend_update_yuv_format(backend, layer, plane);
ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
@@ -384,7 +378,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
*/
paddr -= PHYS_OFFSET;
- if (sun4i_backend_format_is_yuv(fb->format->format))
+ if (fb->format->is_yuv)
return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
/* Write the 32 lower bits of the address (in bits) */
@@ -502,7 +496,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
num_alpha_planes++;
- if (sun4i_backend_format_is_yuv(fb->format->format)) {
+ if (fb->format->is_yuv) {
DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
num_yuv_planes++;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 2d7c57406715..3eedf335a935 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -242,7 +242,7 @@ struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
/* Set possible_crtcs to this crtc for overlay planes */
for (i = 0; planes[i]; i++) {
- uint32_t possible_crtcs = BIT(drm_crtc_index(&scrtc->crtc));
+ uint32_t possible_crtcs = drm_crtc_mask(&scrtc->crtc);
struct drm_plane *plane = planes[i];
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 50d19605c38f..dd19d674055c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -26,6 +26,7 @@
#include "sun4i_frontend.h"
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
+#include "sun8i_tcon_top.h"
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
@@ -143,7 +144,7 @@ cleanup_mode_config:
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
free_drm:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -156,7 +157,7 @@ static void sun4i_drv_unbind(struct device *dev)
sun4i_framebuffer_free(drm);
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops sun4i_drv_master_ops = {
@@ -197,6 +198,28 @@ static bool sun4i_drv_node_is_tcon(struct device_node *node)
return !!of_match_node(sun4i_tcon_of_table, node);
}
+static bool sun4i_drv_node_is_tcon_with_ch0(struct device_node *node)
+{
+ const struct of_device_id *match;
+
+ match = of_match_node(sun4i_tcon_of_table, node);
+ if (match) {
+ struct sun4i_tcon_quirks *quirks;
+
+ quirks = (struct sun4i_tcon_quirks *)match->data;
+
+ return quirks->has_channel_0;
+ }
+
+ return false;
+}
+
+static bool sun4i_drv_node_is_tcon_top(struct device_node *node)
+{
+ return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+ !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
static int compare_of(struct device *dev, void *data)
{
DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
@@ -231,12 +254,69 @@ struct endpoint_list {
DECLARE_KFIFO(fifo, struct device_node *, 16);
};
+static void sun4i_drv_traverse_endpoints(struct endpoint_list *list,
+ struct device_node *node,
+ int port_id)
+{
+ struct device_node *ep, *remote, *port;
+
+ port = of_graph_get_port_by_id(node, port_id);
+ if (!port) {
+ DRM_DEBUG_DRIVER("No output to bind on port %d\n", port_id);
+ return;
+ }
+
+ for_each_available_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ DRM_DEBUG_DRIVER("Error retrieving the output node\n");
+ continue;
+ }
+
+ if (sun4i_drv_node_is_tcon(node)) {
+ /*
+ * TCON TOP is always probed before TCON. However, TCON
+ * points back to TCON TOP when it is source for HDMI.
+ * We have to skip it here to prevent infinite looping
+ * between TCON TOP and TCON.
+ */
+ if (sun4i_drv_node_is_tcon_top(remote)) {
+ DRM_DEBUG_DRIVER("TCON output endpoint is TCON TOP... skipping\n");
+ of_node_put(remote);
+ continue;
+ }
+
+ /*
+ * If the node is our TCON with channel 0, the first
+ * port is used for panel or bridges, and will not be
+ * part of the component framework.
+ */
+ if (sun4i_drv_node_is_tcon_with_ch0(node)) {
+ struct of_endpoint endpoint;
+
+ if (of_graph_parse_endpoint(ep, &endpoint)) {
+ DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+ of_node_put(remote);
+ continue;
+ }
+
+ if (!endpoint.id) {
+ DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+ of_node_put(remote);
+ continue;
+ }
+ }
+ }
+
+ kfifo_put(&list->fifo, remote);
+ }
+}
+
static int sun4i_drv_add_endpoints(struct device *dev,
struct endpoint_list *list,
struct component_match **match,
struct device_node *node)
{
- struct device_node *port, *ep, *remote;
int count = 0;
/*
@@ -272,41 +352,13 @@ static int sun4i_drv_add_endpoints(struct device *dev,
count++;
}
- /* Inputs are listed first, then outputs */
- port = of_graph_get_port_by_id(node, 1);
- if (!port) {
- DRM_DEBUG_DRIVER("No output to bind\n");
- return count;
- }
+ /* each node has at least one output */
+ sun4i_drv_traverse_endpoints(list, node, 1);
- for_each_available_child_of_node(port, ep) {
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- DRM_DEBUG_DRIVER("Error retrieving the output node\n");
- of_node_put(remote);
- continue;
- }
-
- /*
- * If the node is our TCON, the first port is used for
- * panel or bridges, and will not be part of the
- * component framework.
- */
- if (sun4i_drv_node_is_tcon(node)) {
- struct of_endpoint endpoint;
-
- if (of_graph_parse_endpoint(ep, &endpoint)) {
- DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
- continue;
- }
-
- if (!endpoint.id) {
- DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
- continue;
- }
- }
-
- kfifo_put(&list->fifo, remote);
+ /* TCON TOP has second and third output */
+ if (sun4i_drv_node_is_tcon_top(node)) {
+ sun4i_drv_traverse_endpoints(list, node, 3);
+ sun4i_drv_traverse_endpoints(list, node, 5);
}
return count;
@@ -366,6 +418,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-h3-display-engine" },
+ { .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index fa4bcd092eaf..061d2e0d9011 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -220,7 +220,7 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
DRM_DEBUG_DRIVER("Monitor is %s monitor\n",
hdmi->hdmi_monitor ? "an HDMI" : "a DVI");
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
cec_s_phys_addr_from_edid(hdmi->cec_adap, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -623,7 +623,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
ret = cec_register_adapter(hdmi->cec_adap, dev);
if (ret < 0)
goto err_cleanup_connector;
- drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
+ drm_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index be3f14d7746d..af7dcb6da351 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -136,7 +136,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
/* The LVDS encoder can only work with the TCON channel 0 */
- lvds->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+ lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (tcon->panel) {
drm_connector_helper_add(&lvds->connector,
@@ -149,7 +149,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&lvds->connector,
+ drm_connector_attach_encoder(&lvds->connector,
&lvds->encoder);
ret = drm_panel_attach(tcon->panel, &lvds->connector);
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index f2fa1f210509..bf068da6b12e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -202,7 +202,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
/* The RGB encoder can only work with the TCON channel 0 */
- rgb->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+ rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
if (tcon->panel) {
drm_connector_helper_add(&rgb->connector,
@@ -215,7 +215,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&rgb->connector,
+ drm_connector_attach_encoder(&rgb->connector,
&rgb->encoder);
ret = drm_panel_attach(tcon->panel, &rgb->connector);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 8232b39e16ca..3fb084f802e2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -766,12 +766,14 @@ static int sun4i_tcon_init_regmap(struct device *dev,
*/
static struct sunxi_engine *
sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
- struct device_node *node)
+ struct device_node *node,
+ u32 port_id)
{
struct device_node *port, *ep, *remote;
struct sunxi_engine *engine = ERR_PTR(-EINVAL);
+ u32 reg = 0;
- port = of_graph_get_port_by_id(node, 0);
+ port = of_graph_get_port_by_id(node, port_id);
if (!port)
return ERR_PTR(-EINVAL);
@@ -801,8 +803,21 @@ sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
if (remote == engine->node)
goto out_put_remote;
+ /*
+ * According to device tree binding input ports have even id
+ * number and output ports have odd id. Since component with
+ * more than one input and one output (TCON TOP) exits, correct
+ * remote input id has to be calculated by subtracting 1 from
+ * remote output id. If this for some reason can't be done, 0
+ * is used as input port id.
+ */
+ of_node_put(port);
+ port = of_graph_get_remote_port(ep);
+ if (!of_property_read_u32(port, "reg", &reg) && reg > 0)
+ reg -= 1;
+
/* keep looking through upstream ports */
- engine = sun4i_tcon_find_engine_traverse(drv, remote);
+ engine = sun4i_tcon_find_engine_traverse(drv, remote, reg);
out_put_remote:
of_node_put(remote);
@@ -925,7 +940,7 @@ static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
/* Fallback to old method by traversing input endpoints */
of_node_put(port);
- return sun4i_tcon_find_engine_traverse(drv, node);
+ return sun4i_tcon_find_engine_traverse(drv, node, 0);
}
static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -1067,23 +1082,25 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
goto err_free_dotclock;
}
- /*
- * If we have an LVDS panel connected to the TCON, we should
- * just probe the LVDS connector. Otherwise, just probe RGB as
- * we used to.
- */
- remote = of_graph_get_remote_node(dev->of_node, 1, 0);
- if (of_device_is_compatible(remote, "panel-lvds"))
- if (can_lvds)
- ret = sun4i_lvds_init(drm, tcon);
+ if (tcon->quirks->has_channel_0) {
+ /*
+ * If we have an LVDS panel connected to the TCON, we should
+ * just probe the LVDS connector. Otherwise, just probe RGB as
+ * we used to.
+ */
+ remote = of_graph_get_remote_node(dev->of_node, 1, 0);
+ if (of_device_is_compatible(remote, "panel-lvds"))
+ if (can_lvds)
+ ret = sun4i_lvds_init(drm, tcon);
+ else
+ ret = -EINVAL;
else
- ret = -EINVAL;
- else
- ret = sun4i_rgb_init(drm, tcon);
- of_node_put(remote);
+ ret = sun4i_rgb_init(drm, tcon);
+ of_node_put(remote);
- if (ret < 0)
- goto err_free_dotclock;
+ if (ret < 0)
+ goto err_free_dotclock;
+ }
if (tcon->quirks->needs_de_be_mux) {
/*
@@ -1137,13 +1154,19 @@ static const struct component_ops sun4i_tcon_ops = {
static int sun4i_tcon_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ const struct sun4i_tcon_quirks *quirks;
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
- if (ret == -EPROBE_DEFER)
- return ret;
+ quirks = of_device_get_match_data(&pdev->dev);
+
+ /* panels and bridges are present only on TCONs with channel 0 */
+ if (quirks->has_channel_0) {
+ ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ }
return component_add(&pdev->dev, &sun4i_tcon_ops);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index b070d522ed8d..1a838d208211 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -623,7 +623,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
}
tv->connector.interlace_allowed = true;
- drm_mode_connector_attach_encoder(&tv->connector, &tv->encoder);
+ drm_connector_attach_encoder(&tv->connector, &tv->encoder);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index bfbf761f0c1d..e3b34a345546 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -13,6 +13,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/slab.h>
#include <linux/phy/phy.h>
@@ -247,10 +248,8 @@ static u16 sun6i_dsi_crc_compute(u8 const *buffer, size_t len)
return crc_ccitt(0xffff, buffer, len);
}
-static u16 sun6i_dsi_crc_repeat_compute(u8 pd, size_t len)
+static u16 sun6i_dsi_crc_repeat(u8 pd, u8 *buffer, size_t len)
{
- u8 buffer[len];
-
memset(buffer, pd, len);
return sun6i_dsi_crc_compute(buffer, len);
@@ -274,11 +273,11 @@ static u32 sun6i_dsi_build_blk0_pkt(u8 vc, u16 wc)
wc & 0xff, wc >> 8);
}
-static u32 sun6i_dsi_build_blk1_pkt(u16 pd, size_t len)
+static u32 sun6i_dsi_build_blk1_pkt(u16 pd, u8 *buffer, size_t len)
{
u32 val = SUN6I_DSI_BLK_PD(pd);
- return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat_compute(pd, len));
+ return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat(pd, buffer, len));
}
static void sun6i_dsi_inst_abort(struct sun6i_dsi *dsi)
@@ -452,6 +451,54 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct mipi_dsi_device *device = dsi->device;
unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp, hfp, hsa, hblk, vblk;
+ size_t bytes;
+ u8 *buffer;
+
+ /* Do all timing calculations up front to allocate buffer space */
+
+ /*
+ * A sync period is composed of a blanking packet (4 bytes +
+ * payload + 2 bytes) and a sync event packet (4 bytes). Its
+ * minimal size is therefore 10 bytes
+ */
+#define HSA_PACKET_OVERHEAD 10
+ hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+
+ /*
+ * The backporch is set using a blanking packet (4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore 6 bytes
+ */
+#define HBP_PACKET_OVERHEAD 6
+ hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+
+ /*
+ * The frontporch is set using a blanking packet (4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore 6 bytes
+ */
+#define HFP_PACKET_OVERHEAD 6
+ hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+
+ /*
+ * hblk seems to be the line + porches length.
+ */
+ hblk = mode->htotal * Bpp - hsa;
+
+ /*
+ * And I'm not entirely sure what vblk is about. The driver in
+ * Allwinner BSP is using a rather convoluted calculation
+ * there only for 4 lanes. However, using 0 (the !4 lanes
+ * case) even with a 4 lanes screen seems to work...
+ */
+ vblk = 0;
+
+ /* How many bytes do we need to send all payloads? */
+ bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk);
+ buffer = kmalloc(bytes, GFP_KERNEL);
+ if (WARN_ON(!buffer))
+ return;
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
@@ -485,63 +532,37 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
SUN6I_DSI_BASIC_SIZE1_VT(mode->vtotal));
- /*
- * A sync period is composed of a blanking packet (4 bytes +
- * payload + 2 bytes) and a sync event packet (4 bytes). Its
- * minimal size is therefore 10 bytes
- */
-#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
- (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+ /* sync */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hsa));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA1_REG,
- sun6i_dsi_build_blk1_pkt(0, hsa));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hsa));
- /*
- * The backporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
-#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
- (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+ /* backporch */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hbp));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP1_REG,
- sun6i_dsi_build_blk1_pkt(0, hbp));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hbp));
- /*
- * The frontporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
-#define HFP_PACKET_OVERHEAD 6
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
- (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+ /* frontporch */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hfp));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP1_REG,
- sun6i_dsi_build_blk1_pkt(0, hfp));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hfp));
- /*
- * hblk seems to be the line + porches length.
- */
- hblk = mode->htotal * Bpp - hsa;
+ /* hblk */
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, hblk));
regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK1_REG,
- sun6i_dsi_build_blk1_pkt(0, hblk));
+ sun6i_dsi_build_blk1_pkt(0, buffer, hblk));
- /*
- * And I'm not entirely sure what vblk is about. The driver in
- * Allwinner BSP is using a rather convoluted calculation
- * there only for 4 lanes. However, using 0 (the !4 lanes
- * case) even with a 4 lanes screen seems to work...
- */
- vblk = 0;
+ /* vblk */
regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK0_REG,
sun6i_dsi_build_blk0_pkt(device->channel, vblk));
regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK1_REG,
- sun6i_dsi_build_blk1_pkt(0, vblk));
+ sun6i_dsi_build_blk1_pkt(0, buffer, vblk));
+
+ kfree(buffer);
}
static int sun6i_dsi_start(struct sun6i_dsi *dsi,
@@ -812,8 +833,8 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
dsi->device = device;
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (!dsi->panel)
- return -EINVAL;
+ if (IS_ERR(dsi->panel))
+ return PTR_ERR(dsi->panel);
dev_info(host->dev, "Attached device %s\n", device->name);
@@ -920,7 +941,7 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
goto err_cleanup_connector;
}
- drm_mode_connector_attach_encoder(&dsi->connector, &dsi->encoder);
+ drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
drm_panel_attach(dsi->panel, &dsi->connector);
return 0;
@@ -1040,7 +1061,7 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
return 0;
}
-static int sun6i_dsi_runtime_resume(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
@@ -1069,7 +1090,7 @@ static int sun6i_dsi_runtime_resume(struct device *dev)
return 0;
}
-static int sun6i_dsi_runtime_suspend(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 9f40a44b456b..31875b636434 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -12,6 +12,7 @@
#include <drm/drm_crtc_helper.h>
#include "sun8i_dw_hdmi.h"
+#include "sun8i_tcon_top.h"
static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -41,6 +42,44 @@ sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
+{
+ return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+ !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
+static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm,
+ struct device_node *node)
+{
+ struct device_node *port, *ep, *remote, *remote_port;
+ u32 crtcs = 0;
+
+ remote = of_graph_get_remote_node(node, 0, -1);
+ if (!remote)
+ return 0;
+
+ if (sun8i_dw_hdmi_node_is_tcon_top(remote)) {
+ port = of_graph_get_port_by_id(remote, 4);
+ if (!port)
+ goto crtcs_exit;
+
+ for_each_child_of_node(port, ep) {
+ remote_port = of_graph_get_remote_port(ep);
+ if (remote_port) {
+ crtcs |= drm_of_crtc_port_mask(drm, remote_port);
+ of_node_put(remote_port);
+ }
+ }
+ } else {
+ crtcs = drm_of_find_possible_crtcs(drm, node);
+ }
+
+crtcs_exit:
+ of_node_put(remote);
+
+ return crtcs;
+}
+
static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -63,7 +102,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
- encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+ encoder->possible_crtcs =
+ sun8i_dw_hdmi_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
* supposed to be connected to, it's because the CRTC has
@@ -181,7 +221,7 @@ static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
-struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
+static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.probe = sun8i_dw_hdmi_probe,
.remove = sun8i_dw_hdmi_remove,
.driver = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 79154f0f674a..aadbe0a10b0c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -98,7 +98,8 @@
#define SUN8I_HDMI_PHY_PLL_CFG1_LDO2_EN BIT(29)
#define SUN8I_HDMI_PHY_PLL_CFG1_LDO1_EN BIT(28)
#define SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 BIT(27)
-#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT 26
#define SUN8I_HDMI_PHY_PLL_CFG1_PLLEN BIT(25)
#define SUN8I_HDMI_PHY_PLL_CFG1_LDO_VSET(x) ((x) << 22)
#define SUN8I_HDMI_PHY_PLL_CFG1_UNKNOWN(x) ((x) << 20)
@@ -147,6 +148,7 @@ struct sun8i_hdmi_phy;
struct sun8i_hdmi_phy_variant {
bool has_phy_clk;
+ bool has_second_pll;
void (*phy_init)(struct sun8i_hdmi_phy *phy);
void (*phy_disable)(struct dw_hdmi *hdmi,
struct sun8i_hdmi_phy *phy);
@@ -160,6 +162,7 @@ struct sun8i_hdmi_phy {
struct clk *clk_mod;
struct clk *clk_phy;
struct clk *clk_pll0;
+ struct clk *clk_pll1;
unsigned int rcal;
struct regmap *regs;
struct reset_control *rst_phy;
@@ -188,6 +191,7 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void);
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev);
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+ bool second_parent);
#endif /* _SUN8I_DW_HDMI_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 5a52fc489a9d..82502b351aec 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -183,7 +183,13 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK, 0);
- regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, pll_cfg1_init);
+ /*
+ * NOTE: We have to be careful not to overwrite PHY parent
+ * clock selection bit and clock divider.
+ */
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ (u32)~SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+ pll_cfg1_init);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG,
(u32)~SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK,
pll_cfg2_init);
@@ -352,6 +358,10 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
SUN8I_HDMI_PHY_ANA_CFG3_SCLEN |
SUN8I_HDMI_PHY_ANA_CFG3_SDAEN);
+ /* reset PHY PLL clock parent */
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK, 0);
+
/* set HW control of CEC pins */
regmap_write(phy->regs, SUN8I_HDMI_PHY_CEC_REG, 0);
@@ -386,6 +396,14 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
.name = "phy"
};
+static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
+ .has_phy_clk = true,
+ .has_second_pll = true,
+ .phy_init = &sun8i_hdmi_phy_init_h3,
+ .phy_disable = &sun8i_hdmi_phy_disable_h3,
+ .phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
.phy_init = &sun8i_hdmi_phy_init_a83t,
.phy_disable = &sun8i_hdmi_phy_disable_a83t,
@@ -401,6 +419,10 @@ static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{
+ .compatible = "allwinner,sun50i-a64-hdmi-phy",
+ .data = &sun50i_a64_hdmi_phy,
+ },
+ {
.compatible = "allwinner,sun8i-a83t-hdmi-phy",
.data = &sun8i_a83t_hdmi_phy,
},
@@ -472,18 +494,30 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
goto err_put_clk_mod;
}
- ret = sun8i_phy_clk_create(phy, dev);
+ if (phy->variant->has_second_pll) {
+ phy->clk_pll1 = of_clk_get_by_name(node, "pll-1");
+ if (IS_ERR(phy->clk_pll1)) {
+ dev_err(dev, "Could not get pll-1 clock\n");
+ ret = PTR_ERR(phy->clk_pll1);
+ goto err_put_clk_pll0;
+ }
+ }
+
+ ret = sun8i_phy_clk_create(phy, dev,
+ phy->variant->has_second_pll);
if (ret) {
dev_err(dev, "Couldn't create the PHY clock\n");
- goto err_put_clk_pll0;
+ goto err_put_clk_pll1;
}
+
+ clk_prepare_enable(phy->clk_phy);
}
phy->rst_phy = of_reset_control_get_shared(node, "phy");
if (IS_ERR(phy->rst_phy)) {
dev_err(dev, "Could not get phy reset control\n");
ret = PTR_ERR(phy->rst_phy);
- goto err_put_clk_pll0;
+ goto err_disable_clk_phy;
}
ret = reset_control_deassert(phy->rst_phy);
@@ -514,9 +548,12 @@ err_deassert_rst_phy:
reset_control_assert(phy->rst_phy);
err_put_rst_phy:
reset_control_put(phy->rst_phy);
+err_disable_clk_phy:
+ clk_disable_unprepare(phy->clk_phy);
+err_put_clk_pll1:
+ clk_put(phy->clk_pll1);
err_put_clk_pll0:
- if (phy->variant->has_phy_clk)
- clk_put(phy->clk_pll0);
+ clk_put(phy->clk_pll0);
err_put_clk_mod:
clk_put(phy->clk_mod);
err_put_clk_bus:
@@ -531,13 +568,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
clk_disable_unprepare(phy->clk_mod);
clk_disable_unprepare(phy->clk_bus);
+ clk_disable_unprepare(phy->clk_phy);
reset_control_assert(phy->rst_phy);
reset_control_put(phy->rst_phy);
- if (phy->variant->has_phy_clk)
- clk_put(phy->clk_pll0);
+ clk_put(phy->clk_pll0);
+ clk_put(phy->clk_pll1);
clk_put(phy->clk_mod);
clk_put(phy->clk_bus);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
index faea449812f8..a4d31fe3abff 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
@@ -22,35 +22,45 @@ static int sun8i_phy_clk_determine_rate(struct clk_hw *hw,
{
unsigned long rate = req->rate;
unsigned long best_rate = 0;
+ struct clk_hw *best_parent = NULL;
struct clk_hw *parent;
int best_div = 1;
- int i;
+ int i, p;
- parent = clk_hw_get_parent(hw);
-
- for (i = 1; i <= 16; i++) {
- unsigned long ideal = rate * i;
- unsigned long rounded;
-
- rounded = clk_hw_round_rate(parent, ideal);
+ for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
+ parent = clk_hw_get_parent_by_index(hw, p);
+ if (!parent)
+ continue;
- if (rounded == ideal) {
- best_rate = rounded;
- best_div = i;
- break;
+ for (i = 1; i <= 16; i++) {
+ unsigned long ideal = rate * i;
+ unsigned long rounded;
+
+ rounded = clk_hw_round_rate(parent, ideal);
+
+ if (rounded == ideal) {
+ best_rate = rounded;
+ best_div = i;
+ best_parent = parent;
+ break;
+ }
+
+ if (!best_rate ||
+ abs(rate - rounded / i) <
+ abs(rate - best_rate / best_div)) {
+ best_rate = rounded;
+ best_div = i;
+ best_parent = parent;
+ }
}
- if (!best_rate ||
- abs(rate - rounded / i) <
- abs(rate - best_rate / best_div)) {
- best_rate = rounded;
- best_div = i;
- }
+ if (best_rate / best_div == rate)
+ break;
}
req->rate = best_rate / best_div;
req->best_parent_rate = best_rate;
- req->best_parent_hw = parent;
+ req->best_parent_hw = best_parent;
return 0;
}
@@ -95,22 +105,58 @@ static int sun8i_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static u8 sun8i_phy_clk_get_parent(struct clk_hw *hw)
+{
+ struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+ u32 reg;
+
+ regmap_read(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, &reg);
+ reg = (reg & SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK) >>
+ SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT;
+
+ return reg;
+}
+
+static int sun8i_phy_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+
+ if (index > 1)
+ return -EINVAL;
+
+ regmap_update_bits(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+ index << SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT);
+
+ return 0;
+}
+
static const struct clk_ops sun8i_phy_clk_ops = {
.determine_rate = sun8i_phy_clk_determine_rate,
.recalc_rate = sun8i_phy_clk_recalc_rate,
.set_rate = sun8i_phy_clk_set_rate,
+
+ .get_parent = sun8i_phy_clk_get_parent,
+ .set_parent = sun8i_phy_clk_set_parent,
};
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+ bool second_parent)
{
struct clk_init_data init;
struct sun8i_phy_clk *priv;
- const char *parents[1];
+ const char *parents[2];
parents[0] = __clk_get_name(phy->clk_pll0);
if (!parents[0])
return -ENODEV;
+ if (second_parent) {
+ parents[1] = __clk_get_name(phy->clk_pll1);
+ if (!parents[1])
+ return -ENODEV;
+ }
+
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -118,7 +164,7 @@ int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
init.name = "hdmi-phy-clk";
init.ops = &sun8i_phy_clk_ops;
init.parent_names = parents;
- init.num_parents = 1;
+ init.num_parents = second_parent ? 2 : 1;
init.flags = CLK_SET_RATE_PARENT;
priv->phy = phy;
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 126899d6f0d3..fc3713608f78 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -21,8 +21,9 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
-#include <linux/reset.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/reset.h>
#include "sun4i_drv.h"
#include "sun8i_mixer.h"
@@ -322,6 +323,42 @@ static struct regmap_config sun8i_mixer_regmap_config = {
.max_register = 0xbfffc, /* guessed */
};
+static int sun8i_mixer_of_get_id(struct device_node *node)
+{
+ struct device_node *port, *ep;
+ int ret = -EINVAL;
+
+ /* output is port 1 */
+ port = of_graph_get_port_by_id(node, 1);
+ if (!port)
+ return -EINVAL;
+
+ /* try to find downstream endpoint */
+ for_each_available_child_of_node(port, ep) {
+ struct device_node *remote;
+ u32 reg;
+
+ remote = of_graph_get_remote_endpoint(ep);
+ if (!remote)
+ continue;
+
+ ret = of_property_read_u32(remote, "reg", &reg);
+ if (!ret) {
+ of_node_put(remote);
+ of_node_put(ep);
+ of_node_put(port);
+
+ return reg;
+ }
+
+ of_node_put(remote);
+ }
+
+ of_node_put(port);
+
+ return ret;
+}
+
static int sun8i_mixer_bind(struct device *dev, struct device *master,
void *data)
{
@@ -353,8 +390,16 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
dev_set_drvdata(dev, mixer);
mixer->engine.ops = &sun8i_engine_ops;
mixer->engine.node = dev->of_node;
- /* The ID of the mixer currently doesn't matter */
- mixer->engine.id = -1;
+
+ /*
+ * While this function can fail, we shouldn't do anything
+ * if this happens. Some early DE2 DT entries don't provide
+ * mixer id but work nevertheless because matching between
+ * TCON and mixer is done by comparing node pointers (old
+ * way) instead comparing ids. If this function fails and
+ * id is needed, it will fail during id matching anyway.
+ */
+ mixer->engine.id = sun8i_mixer_of_get_id(dev->of_node);
mixer->cfg = of_device_get_match_data(dev);
if (!mixer->cfg)
@@ -432,14 +477,14 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(0),
SUN8I_MIXER_BLEND_COLOR_BLACK);
- /* Fixed zpos for now */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE, 0x43210);
-
plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
for (i = 0; i < plane_cnt; i++)
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(i),
SUN8I_MIXER_BLEND_MODE_DEF);
+ regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+
return 0;
err_disable_bus_clk:
@@ -500,6 +545,22 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.vi_num = 1,
};
+static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
+ .ccsc = 0,
+ .mod_rate = 297000000,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
+static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
+ .ccsc = 1,
+ .mod_rate = 297000000,
+ .scaler_mask = 0x3,
+ .ui_num = 1,
+ .vi_num = 1,
+};
+
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
@@ -522,6 +583,14 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.data = &sun8i_h3_mixer0_cfg,
},
{
+ .compatible = "allwinner,sun8i-r40-de2-mixer-0",
+ .data = &sun8i_r40_mixer0_cfg,
+ },
+ {
+ .compatible = "allwinner,sun8i-r40-de2-mixer-1",
+ .data = &sun8i_r40_mixer1_cfg,
+ },
+ {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index f34e70c42adf..406c42e752d7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -44,6 +44,7 @@
#define SUN8I_MIXER_BLEND_CK_MIN(x) (0x10e0 + 0x04 * (x))
#define SUN8I_MIXER_BLEND_OUTCTL 0x10fc
+#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe)
#define SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(pipe) BIT(pipe)
/* colors are always in AARRGGBB format */
@@ -51,6 +52,9 @@
/* The following numbers are some still unknown magic numbers */
#define SUN8I_MIXER_BLEND_MODE_DEF 0x03010301
+#define SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(n) (0xf << ((n) << 2))
+#define SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(n) ((n) << 2)
+
#define SUN8I_MIXER_BLEND_OUTCTL_INTERLACED BIT(1)
#define SUN8I_MIXER_FBFMT_ARGB8888 0
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
new file mode 100644
index 000000000000..55fe398d8290
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#include <drm/drmP.h>
+
+#include <dt-bindings/clock/sun8i-tcon-top.h>
+
+#include <linux/bitfield.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "sun8i_tcon_top.h"
+
+static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node)
+{
+ return !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
+int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon)
+{
+ struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 val;
+
+ if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
+ dev_err(dev, "Device is not TCON TOP!\n");
+ return -EINVAL;
+ }
+
+ if (tcon < 2 || tcon > 3) {
+ dev_err(dev, "TCON index must be 2 or 3!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tcon_top->reg_lock, flags);
+
+ val = readl(tcon_top->regs + TCON_TOP_GATE_SRC_REG);
+ val &= ~TCON_TOP_HDMI_SRC_MSK;
+ val |= FIELD_PREP(TCON_TOP_HDMI_SRC_MSK, tcon - 1);
+ writel(val, tcon_top->regs + TCON_TOP_GATE_SRC_REG);
+
+ spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(sun8i_tcon_top_set_hdmi_src);
+
+int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon)
+{
+ struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 reg;
+
+ if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
+ dev_err(dev, "Device is not TCON TOP!\n");
+ return -EINVAL;
+ }
+
+ if (mixer > 1) {
+ dev_err(dev, "Mixer index is too high!\n");
+ return -EINVAL;
+ }
+
+ if (tcon > 3) {
+ dev_err(dev, "TCON index is too high!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tcon_top->reg_lock, flags);
+
+ reg = readl(tcon_top->regs + TCON_TOP_PORT_SEL_REG);
+ if (mixer == 0) {
+ reg &= ~TCON_TOP_PORT_DE0_MSK;
+ reg |= FIELD_PREP(TCON_TOP_PORT_DE0_MSK, tcon);
+ } else {
+ reg &= ~TCON_TOP_PORT_DE1_MSK;
+ reg |= FIELD_PREP(TCON_TOP_PORT_DE1_MSK, tcon);
+ }
+ writel(reg, tcon_top->regs + TCON_TOP_PORT_SEL_REG);
+
+ spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(sun8i_tcon_top_de_config);
+
+
+static struct clk_hw *sun8i_tcon_top_register_gate(struct device *dev,
+ const char *parent,
+ void __iomem *regs,
+ spinlock_t *lock,
+ u8 bit, int name_index)
+{
+ const char *clk_name, *parent_name;
+ int ret, index;
+
+ index = of_property_match_string(dev->of_node, "clock-names", parent);
+ if (index < 0)
+ return ERR_PTR(index);
+
+ parent_name = of_clk_get_parent_name(dev->of_node, index);
+
+ ret = of_property_read_string_index(dev->of_node,
+ "clock-output-names", name_index,
+ &clk_name);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw_register_gate(dev, clk_name, parent_name,
+ CLK_SET_RATE_PARENT,
+ regs + TCON_TOP_GATE_SRC_REG,
+ bit, 0, lock);
+};
+
+static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct clk_hw_onecell_data *clk_data;
+ struct sun8i_tcon_top *tcon_top;
+ struct resource *res;
+ void __iomem *regs;
+ int ret, i;
+
+ tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL);
+ if (!tcon_top)
+ return -ENOMEM;
+
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * CLK_NUM,
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+ tcon_top->clk_data = clk_data;
+
+ spin_lock_init(&tcon_top->reg_lock);
+
+ tcon_top->rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(tcon_top->rst)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(tcon_top->rst);
+ }
+
+ tcon_top->bus = devm_clk_get(dev, "bus");
+ if (IS_ERR(tcon_top->bus)) {
+ dev_err(dev, "Couldn't get the bus clock\n");
+ return PTR_ERR(tcon_top->bus);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ tcon_top->regs = regs;
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ ret = reset_control_deassert(tcon_top->rst);
+ if (ret) {
+ dev_err(dev, "Could not deassert ctrl reset control\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tcon_top->bus);
+ if (ret) {
+ dev_err(dev, "Could not enable bus clock\n");
+ goto err_assert_reset;
+ }
+
+ /*
+ * TCON TOP has two muxes, which select parent clock for each TCON TV
+ * channel clock. Parent could be either TCON TV or TVE clock. For now
+ * we leave this fixed to TCON TV, since TVE driver for R40 is not yet
+ * implemented. Once it is, graph needs to be traversed to determine
+ * if TVE is active on each TCON TV. If it is, mux should be switched
+ * to TVE clock parent.
+ */
+ clk_data->hws[CLK_TCON_TOP_TV0] =
+ sun8i_tcon_top_register_gate(dev, "tcon-tv0", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_TV0_GATE, 0);
+
+ clk_data->hws[CLK_TCON_TOP_TV1] =
+ sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_TV1_GATE, 1);
+
+ clk_data->hws[CLK_TCON_TOP_DSI] =
+ sun8i_tcon_top_register_gate(dev, "dsi", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_DSI_GATE, 2);
+
+ for (i = 0; i < CLK_NUM; i++)
+ if (IS_ERR(clk_data->hws[i])) {
+ ret = PTR_ERR(clk_data->hws[i]);
+ goto err_unregister_gates;
+ }
+
+ clk_data->num = CLK_NUM;
+
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ clk_data);
+ if (ret)
+ goto err_unregister_gates;
+
+ dev_set_drvdata(dev, tcon_top);
+
+ return 0;
+
+err_unregister_gates:
+ for (i = 0; i < CLK_NUM; i++)
+ if (clk_data->hws[i])
+ clk_hw_unregister_gate(clk_data->hws[i]);
+ clk_disable_unprepare(tcon_top->bus);
+err_assert_reset:
+ reset_control_assert(tcon_top->rst);
+
+ return ret;
+}
+
+static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+ struct clk_hw_onecell_data *clk_data = tcon_top->clk_data;
+ int i;
+
+ of_clk_del_provider(dev->of_node);
+ for (i = 0; i < CLK_NUM; i++)
+ clk_hw_unregister_gate(clk_data->hws[i]);
+
+ clk_disable_unprepare(tcon_top->bus);
+ reset_control_assert(tcon_top->rst);
+}
+
+static const struct component_ops sun8i_tcon_top_ops = {
+ .bind = sun8i_tcon_top_bind,
+ .unbind = sun8i_tcon_top_unbind,
+};
+
+static int sun8i_tcon_top_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &sun8i_tcon_top_ops);
+}
+
+static int sun8i_tcon_top_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sun8i_tcon_top_ops);
+
+ return 0;
+}
+
+/* sun4i_drv uses this list to check if a device node is a TCON TOP */
+const struct of_device_id sun8i_tcon_top_of_table[] = {
+ { .compatible = "allwinner,sun8i-r40-tcon-top" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
+EXPORT_SYMBOL(sun8i_tcon_top_of_table);
+
+static struct platform_driver sun8i_tcon_top_platform_driver = {
+ .probe = sun8i_tcon_top_probe,
+ .remove = sun8i_tcon_top_remove,
+ .driver = {
+ .name = "sun8i-tcon-top",
+ .of_match_table = sun8i_tcon_top_of_table,
+ },
+};
+module_platform_driver(sun8i_tcon_top_platform_driver);
+
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner R40 TCON TOP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.h b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h
new file mode 100644
index 000000000000..0390584a330e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _SUN8I_TCON_TOP_H_
+#define _SUN8I_TCON_TOP_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define TCON_TOP_TCON_TV_SETUP_REG 0x00
+
+#define TCON_TOP_PORT_SEL_REG 0x1C
+#define TCON_TOP_PORT_DE0_MSK GENMASK(1, 0)
+#define TCON_TOP_PORT_DE1_MSK GENMASK(5, 4)
+
+#define TCON_TOP_GATE_SRC_REG 0x20
+#define TCON_TOP_HDMI_SRC_MSK GENMASK(29, 28)
+#define TCON_TOP_TCON_TV1_GATE 24
+#define TCON_TOP_TCON_TV0_GATE 20
+#define TCON_TOP_TCON_DSI_GATE 16
+
+#define CLK_NUM 3
+
+struct sun8i_tcon_top {
+ struct clk *bus;
+ struct clk_hw_onecell_data *clk_data;
+ void __iomem *regs;
+ struct reset_control *rst;
+
+ /*
+ * spinlock is used to synchronize access to same
+ * register where multiple clock gates can be set.
+ */
+ spinlock_t reg_lock;
+};
+
+extern const struct of_device_id sun8i_tcon_top_of_table[];
+
+int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon);
+int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon);
+
+#endif /* _SUN8I_TCON_TOP_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 9a540330cb79..28c15c6ef1ef 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -27,7 +27,8 @@
#include "sun8i_ui_scaler.h"
static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
- int overlay, bool enable)
+ int overlay, bool enable, unsigned int zpos,
+ unsigned int old_zpos)
{
u32 val;
@@ -43,18 +44,36 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
- if (enable)
- val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
- else
- val = 0;
+ if (!enable || zpos != old_zpos) {
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
+ 0);
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
- SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
+ 0);
+ }
+
+ if (enable) {
+ val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+
+ val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
+ val);
+ }
}
static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+ int overlay, struct drm_plane *plane,
+ unsigned int zpos)
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
@@ -137,10 +156,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+ SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
outsize);
return 0;
@@ -236,30 +255,35 @@ static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
- sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false);
+ sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
+ old_zpos);
}
static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+ unsigned int zpos = plane->state->normalized_zpos;
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
if (!plane->state->visible) {
sun8i_ui_layer_enable(mixer, layer->channel,
- layer->overlay, false);
+ layer->overlay, false, 0, old_zpos);
return;
}
sun8i_ui_layer_update_coord(mixer, layer->channel,
- layer->overlay, plane);
+ layer->overlay, plane, zpos);
sun8i_ui_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_ui_layer_update_buffer(mixer, layer->channel,
layer->overlay, plane);
- sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, true);
+ sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay,
+ true, zpos, old_zpos);
}
static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
@@ -307,6 +331,7 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
int channel = mixer->cfg->vi_num + index;
struct sun8i_ui_layer *layer;
+ unsigned int plane_cnt;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
@@ -327,8 +352,10 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
- /* fixed zpos for now */
- ret = drm_plane_create_zpos_immutable_property(&layer->plane, channel);
+ plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+
+ ret = drm_plane_create_zpos_property(&layer->plane, channel,
+ 0, plane_cnt - 1);
if (ret) {
dev_err(drm->dev, "Couldn't add zpos property\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 5877f8ef5895..f4fe97813f94 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -21,7 +21,8 @@
#include "sun8i_vi_scaler.h"
static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
- int overlay, bool enable)
+ int overlay, bool enable, unsigned int zpos,
+ unsigned int old_zpos)
{
u32 val;
@@ -37,18 +38,36 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val);
- if (enable)
- val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
- else
- val = 0;
+ if (!enable || zpos != old_zpos) {
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
+ 0);
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
- SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
+ 0);
+ }
+
+ if (enable) {
+ val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+
+ val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
+ val);
+ }
}
static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+ int overlay, struct drm_plane *plane,
+ unsigned int zpos)
{
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
@@ -130,10 +149,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+ SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
outsize);
return 0;
@@ -264,30 +283,35 @@ static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
- sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false);
+ sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
+ old_zpos);
}
static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+ unsigned int zpos = plane->state->normalized_zpos;
+ unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
if (!plane->state->visible) {
sun8i_vi_layer_enable(mixer, layer->channel,
- layer->overlay, false);
+ layer->overlay, false, 0, old_zpos);
return;
}
sun8i_vi_layer_update_coord(mixer, layer->channel,
- layer->overlay, plane);
+ layer->overlay, plane, zpos);
sun8i_vi_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_vi_layer_update_buffer(mixer, layer->channel,
layer->overlay, plane);
- sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, true);
+ sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay,
+ true, zpos, old_zpos);
}
static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
@@ -351,6 +375,7 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
int index)
{
struct sun8i_vi_layer *layer;
+ unsigned int plane_cnt;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
@@ -368,8 +393,10 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
- /* fixed zpos for now */
- ret = drm_plane_create_zpos_immutable_property(&layer->plane, index);
+ plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+
+ ret = drm_plane_create_zpos_property(&layer->plane, index,
+ 0, plane_cnt - 1);
if (ret) {
dev_err(drm->dev, "Couldn't add zpos property\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c3afe7b2237e..965088afcfad 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2312,7 +2312,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
* POWER_CONTROL registers during CRTC enabling.
*/
if (dc->soc->coupled_pm && dc->pipe == 1) {
- u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE;
+ u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
struct device_link *link;
struct device *partner;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 87c5d89bc9ba..ee6ca8fa1c65 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1052,7 +1052,7 @@ static int tegra_dsi_init(struct host1x_client *client)
drm_encoder_helper_add(&dsi->output.encoder,
&tegra_dsi_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&dsi->output.connector,
+ drm_connector_attach_encoder(&dsi->output.connector,
&dsi->output.encoder);
drm_connector_register(&dsi->output.connector);
@@ -1411,6 +1411,9 @@ static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
struct tegra_output *output = &dsi->output;
output->panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(output->panel))
+ output->panel = NULL;
+
if (output->panel && output->connector.dev) {
drm_panel_attach(output->panel, &output->connector);
drm_helper_hpd_irq_event(output->connector.dev);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 00a5c9f32254..4f80100ff5f3 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -582,18 +582,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
return 0;
}
-static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
- unsigned long page)
-{
- return NULL;
-}
-
-static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
- unsigned long page,
- void *addr)
-{
-}
-
static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
{
return NULL;
@@ -634,8 +622,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.release = tegra_gem_prime_release,
.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
.end_cpu_access = tegra_gem_prime_end_cpu_access,
- .map_atomic = tegra_gem_prime_kmap_atomic,
- .unmap_atomic = tegra_gem_prime_kunmap_atomic,
.map = tegra_gem_prime_kmap,
.unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 784739a9f497..0082468f703c 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1488,7 +1488,7 @@ static int tegra_hdmi_init(struct host1x_client *client)
drm_encoder_helper_add(&hdmi->output.encoder,
&tegra_hdmi_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&hdmi->output.connector,
+ drm_connector_attach_encoder(&hdmi->output.connector,
&hdmi->output.encoder);
drm_connector_register(&hdmi->output.connector);
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index ffe34bd0bb9d..c662efc7e413 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -37,7 +37,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, output->ddc);
cec_notifier_set_phys_addr_from_edid(output->notifier, edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
err = drm_add_edid_modes(connector, edid);
@@ -110,8 +110,8 @@ int tegra_output_probe(struct tegra_output *output)
panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
if (panel) {
output->panel = of_drm_find_panel(panel);
- if (!output->panel)
- return -EPROBE_DEFER;
+ if (IS_ERR(output->panel))
+ return PTR_ERR(output->panel);
of_node_put(panel);
}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 78ec5193741d..28a78d3120bc 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -289,7 +289,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
drm_encoder_helper_add(&output->encoder,
&tegra_rgb_encoder_helper_funcs);
- drm_mode_connector_attach_encoder(&output->connector,
+ drm_connector_attach_encoder(&output->connector,
&output->encoder);
drm_connector_register(&output->connector);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7d2a955fc515..d7fe9f15def1 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2622,7 +2622,7 @@ static int tegra_sor_init(struct host1x_client *client)
encoder, NULL);
drm_encoder_helper_add(&sor->output.encoder, helpers);
- drm_mode_connector_attach_encoder(&sor->output.connector,
+ drm_connector_attach_encoder(&sor->output.connector,
&sor->output.encoder);
drm_connector_register(&sor->output.connector);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index b8a5e4ed22e6..0fb300d41a09 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -378,7 +378,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
if (!priv->external_connector &&
((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
dev_err(dev, "no encoders/connectors found\n");
- ret = -ENXIO;
+ ret = -EPROBE_DEFER;
goto init_failed;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index d651bdd6597e..b4eaf9bc87f8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -103,12 +103,11 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
struct drm_encoder *encoder)
{
struct drm_connector *connector;
- int i;
- list_for_each_entry(connector, &ddev->mode_config.connector_list, head)
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
- if (connector->encoder_ids[i] == encoder->base.id)
- return connector;
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ if (drm_connector_has_possible_encoder(connector, encoder))
+ return connector;
+ }
dev_err(ddev->dev, "No connector found for %s encoder (id %d)\n",
encoder->name, encoder->base.id);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index d616d64a6725..a1acab39d87f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -223,7 +223,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index c45cabb38db0..daebf1aa6b0a 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -173,7 +173,7 @@ static int tfp410_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
ret = drm_add_edid_modes(connector, edid);
@@ -240,7 +240,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index 4592a5e3f20b..16f4b5c91f1b 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -20,6 +20,17 @@ config TINYDRM_ILI9225
If M is selected the module will be called ili9225.
+config TINYDRM_ILI9341
+ tristate "DRM support for ILI9341 display panels"
+ depends on DRM_TINYDRM && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver for the following Ilitek ILI9341 panels:
+ * YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
+
+ If M is selected the module will be called ili9341.
+
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM_TINYDRM && SPI
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 49a111929724..14d99080665a 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
# Displays
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
+obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 24a33bf862fa..19c7f70adfa5 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -204,7 +204,7 @@ static int tinydrm_register(struct tinydrm_device *tdev)
if (ret)
return ret;
- ret = drm_fb_cma_fbdev_init_with_funcs(drm, 0, 0, tdev->fb_funcs);
+ ret = drm_fbdev_generic_setup(drm, 0);
if (ret)
DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
@@ -214,7 +214,6 @@ static int tinydrm_register(struct tinydrm_device *tdev)
static void tinydrm_unregister(struct tinydrm_device *tdev)
{
drm_atomic_helper_shutdown(tdev->drm);
- drm_fb_cma_fbdev_fini(tdev->drm);
drm_dev_unregister(tdev->drm);
}
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 841c69aba059..455fefe012f5 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -368,7 +368,6 @@ static struct drm_driver ili9225_driver = {
DRIVER_ATOMIC,
.fops = &ili9225_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
new file mode 100644
index 000000000000..6701037749a7
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9341 panels
+ *
+ * Copyright 2018 David Lechner <david@lechnology.com>
+ *
+ * Based on mi0283qt.c:
+ * Copyright 2016 Noralf Trønnes
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <video/mipi_display.h>
+
+#define ILI9341_FRMCTR1 0xb1
+#define ILI9341_DISCTRL 0xb6
+#define ILI9341_ETMOD 0xb7
+
+#define ILI9341_PWCTRL1 0xc0
+#define ILI9341_PWCTRL2 0xc1
+#define ILI9341_VMCTRL1 0xc5
+#define ILI9341_VMCTRL2 0xc7
+#define ILI9341_PWCTRLA 0xcb
+#define ILI9341_PWCTRLB 0xcf
+
+#define ILI9341_PGAMCTRL 0xe0
+#define ILI9341_NGAMCTRL 0xe1
+#define ILI9341_DTCTRLA 0xe8
+#define ILI9341_DTCTRLB 0xea
+#define ILI9341_PWRSEQ 0xed
+
+#define ILI9341_EN3GAM 0xf2
+#define ILI9341_PUMPCTRL 0xf7
+
+#define ILI9341_MADCTL_BGR BIT(3)
+#define ILI9341_MADCTL_MV BIT(5)
+#define ILI9341_MADCTL_MX BIT(6)
+#define ILI9341_MADCTL_MY BIT(7)
+
+static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ u8 addr_mode;
+ int ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(mipi);
+ if (ret < 0)
+ return;
+ if (ret == 1)
+ goto out_enable;
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+
+ mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
+ mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+ mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
+ mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+ mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
+ mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+
+ /* Power Control */
+ mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x23);
+ mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x10);
+ /* VCOM */
+ mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x3e, 0x28);
+ mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0x86);
+
+ /* Memory Access Control */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+
+ /* Frame Rate */
+ mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+
+ /* Gamma */
+ mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x00);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+ mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+ 0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
+ 0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
+ mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+ 0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
+ 0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
+
+ /* DDRAM */
+ mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+
+ /* Display */
+ mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(100);
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(100);
+
+out_enable:
+ switch (mipi->rotation) {
+ default:
+ addr_mode = ILI9341_MADCTL_MX;
+ break;
+ case 90:
+ addr_mode = ILI9341_MADCTL_MV;
+ break;
+ case 180:
+ addr_mode = ILI9341_MADCTL_MY;
+ break;
+ case 270:
+ addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
+ ILI9341_MADCTL_MX;
+ break;
+ }
+ addr_mode |= ILI9341_MADCTL_BGR;
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
+ .enable = yx240qv29_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode yx240qv29_mode = {
+ TINYDRM_MODE(240, 320, 37, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
+
+static struct drm_driver ili9341_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .fops = &ili9341_fops,
+ TINYDRM_GEM_DRIVER_OPS,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "ili9341",
+ .desc = "Ilitek ILI9341",
+ .date = "20180514",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id ili9341_of_match[] = {
+ { .compatible = "adafruit,yx240qv29" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9341_of_match);
+
+static const struct spi_device_id ili9341_id[] = {
+ { "yx240qv29", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ili9341_id);
+
+static int ili9341_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(mipi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(mipi->reset);
+ }
+
+ dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ mipi->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(mipi->backlight))
+ return PTR_ERR(mipi->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, dc);
+ if (ret)
+ return ret;
+
+ ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs,
+ &ili9341_driver, &yx240qv29_mode, rotation);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void ili9341_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver ili9341_spi_driver = {
+ .driver = {
+ .name = "ili9341",
+ .of_match_table = ili9341_of_match,
+ },
+ .id_table = ili9341_id,
+ .probe = ili9341_probe,
+ .shutdown = ili9341_shutdown,
+};
+module_spi_driver(ili9341_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9341 DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 015d03f2acba..d7bb4c5e6657 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -154,7 +154,6 @@ static struct drm_driver mi0283qt_driver = {
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 4d1fb31a781f..cb3441e51d5f 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -260,6 +260,8 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
/**
* mipi_dbi_enable_flush - MIPI DBI enable helper
* @mipi: MIPI DBI structure
+ * @crtc_state: CRTC state
+ * @plane_state: Plane state
*
* This function sets &mipi_dbi->enabled, flushes the whole framebuffer and
* enables the backlight. Drivers can use this in their
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 5c29e3803ecb..2fcbc3067d71 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -304,7 +304,6 @@ static struct drm_driver st7586_driver = {
DRIVER_ATOMIC,
.fops = &st7586_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 6c7b15c9da4f..3081bc57c116 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -120,7 +120,6 @@ static struct drm_driver st7735r_driver = {
DRIVER_ATOMIC,
.fops = &st7735r_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5d8688e522d1..7c484729f9b2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -287,12 +287,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (ret) {
if (bdev->driver->move_notify) {
- struct ttm_mem_reg tmp_mem = *mem;
- *mem = bo->mem;
- bo->mem = tmp_mem;
+ swap(*mem, bo->mem);
bdev->driver->move_notify(bo, false, mem);
- bo->mem = *mem;
- *mem = tmp_mem;
+ swap(*mem, bo->mem);
}
goto out_err;
@@ -590,12 +587,18 @@ static void ttm_bo_release(struct kref *kref)
kref_put(&bo->list_kref, ttm_bo_release_list);
}
+void ttm_bo_put(struct ttm_buffer_object *bo)
+{
+ kref_put(&bo->kref, ttm_bo_release);
+}
+EXPORT_SYMBOL(ttm_bo_put);
+
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
*p_bo = NULL;
- kref_put(&bo->kref, ttm_bo_release);
+ ttm_bo_put(bo);
}
EXPORT_SYMBOL(ttm_bo_unref);
@@ -1201,7 +1204,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
if (!resv)
ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f2c167702eef..046a6dda690a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -463,7 +463,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
- ttm_bo_unref(&fbo->bo);
+ ttm_bo_put(fbo->bo);
kfree(fbo);
}
@@ -492,8 +492,9 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (!fbo)
return -ENOMEM;
+ ttm_bo_get(bo);
fbo->base = *bo;
- fbo->bo = ttm_bo_reference(bo);
+ fbo->bo = bo;
/**
* Fix up members that we shouldn't copy directly:
@@ -730,7 +731,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
- ttm_bo_unref(&ghost_obj);
+ ttm_bo_put(ghost_obj);
}
*old_mem = *new_mem;
@@ -786,7 +787,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
- ttm_bo_unref(&ghost_obj);
+ ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
@@ -851,7 +852,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
bo->ttm = NULL;
ttm_bo_unreserve(ghost);
- ttm_bo_unref(&ghost);
+ ttm_bo_put(ghost);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index c7ece7613a6a..6fe91c1b692d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -44,10 +44,11 @@
#define TTM_BO_VM_NUM_PREFAULT 16
-static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
- int ret = 0;
+ vm_fault_t ret = 0;
+ int err = 0;
if (likely(!bo->moving))
goto out_unlock;
@@ -67,20 +68,20 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
- ttm_bo_reference(bo);
+ ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = dma_fence_wait(bo->moving, true);
- if (unlikely(ret != 0)) {
- ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ err = dma_fence_wait(bo->moving, true);
+ if (unlikely(err != 0)) {
+ ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
goto out_unlock;
}
@@ -105,7 +106,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset;
}
-static int ttm_bo_vm_fault(struct vm_fault *vmf)
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -116,8 +117,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
unsigned long pfn;
struct ttm_tt *ttm = NULL;
struct page *page;
- int ret;
+ int err;
int i;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
@@ -129,17 +131,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
- ret = ttm_bo_reserve(bo, true, true, NULL);
- if (unlikely(ret != 0)) {
- if (ret != -EBUSY)
+ err = ttm_bo_reserve(bo, true, true, NULL);
+ if (unlikely(err != 0)) {
+ if (err != -EBUSY)
return VM_FAULT_NOPAGE;
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- ttm_bo_reference(bo);
+ ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
}
return VM_FAULT_RETRY;
@@ -163,8 +165,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
}
if (bdev->driver->fault_reserve_notify) {
- ret = bdev->driver->fault_reserve_notify(bo);
- switch (ret) {
+ err = bdev->driver->fault_reserve_notify(bo);
+ switch (err) {
case 0:
break;
case -EBUSY:
@@ -192,13 +194,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
goto out_unlock;
}
- ret = ttm_mem_io_lock(man, true);
- if (unlikely(ret != 0)) {
+ err = ttm_mem_io_lock(man, true);
+ if (unlikely(err != 0)) {
ret = VM_FAULT_NOPAGE;
goto out_unlock;
}
- ret = ttm_mem_io_reserve_vm(bo);
- if (unlikely(ret != 0)) {
+ err = ttm_mem_io_reserve_vm(bo);
+ if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
goto out_io_unlock;
}
@@ -266,23 +268,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
}
if (vma->vm_flags & VM_MIXEDMAP)
- ret = vm_insert_mixed(&cvma, address,
+ ret = vmf_insert_mixed(&cvma, address,
__pfn_to_pfn_t(pfn, PFN_DEV));
else
- ret = vm_insert_pfn(&cvma, address, pfn);
+ ret = vmf_insert_pfn(&cvma, address, pfn);
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
*/
- if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
break;
- else if (unlikely(ret != 0)) {
- ret =
- (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ else if (unlikely(ret & VM_FAULT_ERROR))
goto out_io_unlock;
- }
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
@@ -303,14 +302,14 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
- (void)ttm_bo_reference(bo);
+ ttm_bo_get(bo);
}
static void ttm_bo_vm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
vma->vm_private_data = NULL;
}
@@ -462,7 +461,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
- ttm_bo_unref(&bo);
+ ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_mmap);
@@ -472,8 +471,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
if (vma->vm_pgoff != 0)
return -EACCES;
+ ttm_bo_get(bo);
+
vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = ttm_bo_reference(bo);
+ vma->vm_private_data = bo;
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 6e2d1300b457..f841accc2c00 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -47,13 +47,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 16
@@ -222,52 +216,6 @@ static struct kobj_type ttm_pool_kobj_type = {
static struct ttm_pool_manager *_manager;
-#ifndef CONFIG_X86
-static int set_pages_wb(struct page *page, int numpages)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < numpages; i++)
- unmap_page_from_agp(page++);
-#endif
- return 0;
-}
-
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif
-
/**
* Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
@@ -302,13 +250,13 @@ static void ttm_pages_put(struct page *pages[], unsigned npages,
unsigned int i, pages_nr = (1 << order);
if (order == 0) {
- if (set_pages_array_wb(pages, npages))
+ if (ttm_set_pages_array_wb(pages, npages))
pr_err("Failed to set %d pages to wb!\n", npages);
}
for (i = 0; i < npages; ++i) {
if (order > 0) {
- if (set_pages_wb(pages[i], pages_nr))
+ if (ttm_set_pages_wb(pages[i], pages_nr))
pr_err("Failed to set %d pages to wb!\n", pages_nr);
}
__free_pages(pages[i], order);
@@ -498,12 +446,12 @@ static int ttm_set_pages_caching(struct page **pages,
/* Set page caching */
switch (cstate) {
case tt_uncached:
- r = set_pages_array_uc(pages, cpages);
+ r = ttm_set_pages_array_uc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to uc!\n", cpages);
break;
case tt_wc:
- r = set_pages_array_wc(pages, cpages);
+ r = ttm_set_pages_array_wc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to wc!\n", cpages);
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3f14c1cc0789..507be7ac1165 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -50,12 +50,7 @@
#include <linux/kthread.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
@@ -268,54 +263,19 @@ static struct kobj_type ttm_pool_kobj_type = {
.default_attrs = ttm_pool_attrs,
};
-#ifndef CONFIG_X86
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif /* for !CONFIG_X86 */
-
static int ttm_set_pages_caching(struct dma_pool *pool,
struct page **pages, unsigned cpages)
{
int r = 0;
/* Set page caching */
if (pool->type & IS_UC) {
- r = set_pages_array_uc(pages, cpages);
+ r = ttm_set_pages_array_uc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to uc!\n",
pool->dev_name, cpages);
}
if (pool->type & IS_WC) {
- r = set_pages_array_wc(pages, cpages);
+ r = ttm_set_pages_array_wc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to wc!\n",
pool->dev_name, cpages);
@@ -389,17 +349,14 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
{
struct page *page = d_page->p;
- unsigned i, num_pages;
+ unsigned num_pages;
/* Don't set WB on WB page pool. */
if (!(pool->type & IS_CACHED)) {
num_pages = pool->size / PAGE_SIZE;
- for (i = 0; i < num_pages; ++i, ++page) {
- if (set_pages_array_wb(&page, 1)) {
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, 1);
- }
- }
+ if (ttm_set_pages_wb(page, num_pages))
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, num_pages);
}
list_del(&d_page->page_list);
@@ -420,7 +377,7 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
/* Don't set WB on WB page pool. */
if (npages && !(pool->type & IS_CACHED) &&
- set_pages_array_wb(pages, npages))
+ ttm_set_pages_array_wb(pages, npages))
pr_err("%s: Failed to set %d pages to wb!\n",
pool->dev_name, npages);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a1e543972ca7..e3a0691582ff 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -38,9 +38,7 @@
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
/**
* Allocates a ttm structure for the given BO.
@@ -115,10 +113,9 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
return 0;
}
-#ifdef CONFIG_X86
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
+static int ttm_tt_set_page_caching(struct page *p,
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
int ret = 0;
@@ -129,26 +126,18 @@ static inline int ttm_tt_set_page_caching(struct page *p,
/* p isn't in the default caching state, set it to
* writeback first to free its current memtype. */
- ret = set_pages_wb(p, 1);
+ ret = ttm_set_pages_wb(p, 1);
if (ret)
return ret;
}
if (c_new == tt_wc)
- ret = set_memory_wc((unsigned long) page_address(p), 1);
+ ret = ttm_set_pages_wc(p, 1);
else if (c_new == tt_uncached)
- ret = set_pages_uc(p, 1);
+ ret = ttm_set_pages_uc(p, 1);
return ret;
}
-#else /* CONFIG_X86 */
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
-{
- return 0;
-}
-#endif /* CONFIG_X86 */
/*
* Change caching policy for the linear kernel map
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 09dc585aa46f..68e88bed77ca 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -99,7 +99,7 @@ static int udl_get_modes(struct drm_connector *connector)
struct udl_drm_connector,
connector);
- drm_mode_connector_update_edid_property(connector, udl_connector->edid);
+ drm_connector_update_edid_property(connector, udl_connector->edid);
if (udl_connector->edid)
return drm_add_edid_modes(connector, udl_connector->edid);
return 0;
@@ -200,7 +200,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
drm_connector_register(connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
connector->polled = DRM_CONNECTOR_POLL_HPD |
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index 0a20695eb120..556f62662aa9 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -29,7 +29,6 @@ struct udl_drm_dmabuf_attachment {
};
static int udl_attach_dma_buf(struct dma_buf *dmabuf,
- struct device *dev,
struct dma_buf_attachment *attach)
{
struct udl_drm_dmabuf_attachment *udl_attach;
@@ -158,27 +157,12 @@ static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
return NULL;
}
-static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- /* TODO */
-
- return NULL;
-}
-
static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
/* TODO */
}
-static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num,
- void *addr)
-{
- /* TODO */
-}
-
static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
struct vm_area_struct *vma)
{
@@ -193,9 +177,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
.map_dma_buf = udl_map_dma_buf,
.unmap_dma_buf = udl_unmap_dma_buf,
.map = udl_dmabuf_kmap,
- .map_atomic = udl_dmabuf_kmap_atomic,
.unmap = udl_dmabuf_kunmap,
- .unmap_atomic = udl_dmabuf_kunmap_atomic,
.mmap = udl_dmabuf_mmap,
.release = drm_gem_dmabuf_release,
};
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 55c0cc309198..e9e9b1ff678e 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -16,6 +16,7 @@
#include <linux/usb.h>
#include <drm/drm_gem.h>
+#include <linux/mm_types.h>
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
@@ -112,7 +113,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd);
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr);
@@ -136,7 +137,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int udl_gem_fault(struct vm_fault *vmf);
+vm_fault_t udl_gem_fault(struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d5583190f3e4..dbb62f6eb48a 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int bytes_identical = 0;
struct urb *urb;
int aligned_x;
- int bpp = fb->base.format->cpp[0];
+ int log_bpp;
+
+ BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
+ log_bpp = __ffs(fb->base.format->cpp[0]);
if (!fb->active_16)
return 0;
@@ -125,12 +128,12 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
for (i = y; i < y + height ; i++) {
const int line_offset = fb->base.pitches[0] * i;
- const int byte_offset = line_offset + (x * bpp);
- const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
- if (udl_render_hline(dev, bpp, &urb,
+ const int byte_offset = line_offset + (x << log_bpp);
+ const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
+ if (udl_render_hline(dev, log_bpp, &urb,
(char *) fb->obj->vmapping,
&cmd, byte_offset, dev_byte_offset,
- width * bpp,
+ width << log_bpp,
&bytes_identical, &bytes_sent))
goto error;
}
@@ -149,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
error:
atomic_add(bytes_sent, &udl->bytes_sent);
atomic_add(bytes_identical, &udl->bytes_identical);
- atomic_add(width*height*bpp, &udl->bytes_rendered);
+ atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
@@ -175,7 +178,7 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
pos = (unsigned long)info->fix.smem_start + offset;
- pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
+ pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
pos, size);
/* We don't want the framebuffer to be mapped encrypted */
@@ -221,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user)
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
@@ -233,7 +236,7 @@ static int udl_fb_open(struct fb_info *info, int user)
}
#endif
- pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
+ pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, ufbdev->fb_count);
return 0;
@@ -258,7 +261,7 @@ static int udl_fb_release(struct fb_info *info, int user)
}
#endif
- pr_warn("released /dev/fb%d user=%d count=%d\n",
+ pr_debug("released /dev/fb%d user=%d count=%d\n",
info->node, user, ufbdev->fb_count);
return 0;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 9a15cce22cce..d5a23295dd80 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -100,13 +100,12 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-int udl_gem_fault(struct vm_fault *vmf)
+vm_fault_t udl_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
struct page *page;
unsigned int page_offset;
- int ret = 0;
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
@@ -114,17 +113,7 @@ int udl_gem_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
- ret = vm_insert_page(vma, vmf->address, page);
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_page(vma, vmf->address, page);
}
int udl_gem_get_pages(struct udl_gem_object *obj)
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index d518de8f496b..f455f095a146 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -170,25 +170,19 @@ static void udl_free_urb_list(struct drm_device *dev)
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
- int ret;
- unsigned long flags;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
+ down(&udl->urbs.limit_sem);
- /* Getting interrupted means a leak, but ok at shutdown*/
- ret = down_interruptible(&udl->urbs.limit_sem);
- if (ret)
- break;
-
- spin_lock_irqsave(&udl->urbs.lock, flags);
+ spin_lock_irq(&udl->urbs.lock);
node = udl->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
- spin_unlock_irqrestore(&udl->urbs.lock, flags);
+ spin_unlock_irq(&udl->urbs.lock);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
@@ -205,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev)
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
struct udl_device *udl = dev->dev_private;
- int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
+ size_t wanted_size = count * size;
spin_lock_init(&udl->urbs.lock);
+retry:
udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
- while (i < count) {
+ sema_init(&udl->urbs.limit_sem, 0);
+ udl->urbs.count = 0;
+ udl->urbs.available = 0;
+
+ while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
@@ -231,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
}
unode->urb = urb;
- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
+ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
+ if (size > PAGE_SIZE) {
+ size /= 2;
+ udl_free_urb_list(dev);
+ goto retry;
+ }
break;
}
@@ -246,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
list_add_tail(&unode->entry, &udl->urbs.list);
- i++;
+ up(&udl->urbs.limit_sem);
+ udl->urbs.count++;
+ udl->urbs.available++;
}
- sema_init(&udl->urbs.limit_sem, i);
- udl->urbs.count = i;
- udl->urbs.available = i;
-
- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
+ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
- return i;
+ return udl->urbs.count;
}
struct urb *udl_get_urb(struct drm_device *dev)
@@ -265,7 +267,6 @@ struct urb *udl_get_urb(struct drm_device *dev)
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
- unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
@@ -276,14 +277,14 @@ struct urb *udl_get_urb(struct drm_device *dev)
goto error;
}
- spin_lock_irqsave(&udl->urbs.lock, flags);
+ spin_lock_irq(&udl->urbs.lock);
BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
entry = udl->urbs.list.next;
list_del_init(entry);
udl->urbs.available--;
- spin_unlock_irqrestore(&udl->urbs.lock, flags);
+ spin_unlock_irq(&udl->urbs.lock);
unode = list_entry(entry, struct urb_node, entry);
urb = unode->urb;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 5bcae7649795..7e37765cf5ac 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -243,7 +243,7 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
memcpy(buf, udl->mode_buf, udl->mode_buf_len);
retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
- DRM_INFO("write mode info %d\n", udl->mode_buf_len);
+ DRM_DEBUG("write mode info %d\n", udl->mode_buf_len);
return retval;
}
@@ -366,7 +366,6 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
struct drm_device *dev = crtc->dev;
- unsigned long flags;
struct drm_framebuffer *old_fb = crtc->primary->fb;
if (old_fb) {
@@ -377,10 +376,10 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (event)
drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
crtc->primary->fb = fb;
return 0;
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index b992644c17e6..ce87661e544f 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
-#include <linux/prefetch.h>
#include <asm/unaligned.h>
#include <drm/drmP.h>
@@ -51,9 +50,6 @@ static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
int start = width;
int end = width;
- prefetch((void *) front);
- prefetch((void *) back);
-
for (j = 0; j < width; j++) {
if (back[j] != front[j]) {
start = j;
@@ -83,12 +79,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel)
((pixel >> 8) & 0xf800));
}
-static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp)
+static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp)
{
- u16 pixel_val16 = 0;
- if (bpp == 2)
+ u16 pixel_val16;
+ if (log_bpp == 1)
pixel_val16 = *(const uint16_t *)pixel;
- else if (bpp == 4)
+ else
pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
return pixel_val16;
}
@@ -125,8 +121,9 @@ static void udl_compress_hline16(
const u8 *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
- const uint8_t *const cmd_buffer_end, int bpp)
+ const uint8_t *const cmd_buffer_end, int log_bpp)
{
+ const int bpp = 1 << log_bpp;
const u8 *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
uint8_t *cmd = *command_buffer_ptr;
@@ -139,8 +136,6 @@ static void udl_compress_hline16(
const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
uint16_t pixel_val16;
- prefetchw((void *) cmd); /* pull in one cache line at least */
-
*cmd++ = 0xaf;
*cmd++ = 0x6b;
*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
@@ -153,12 +148,11 @@ static void udl_compress_hline16(
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
- cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
- (unsigned long)(pixel_end - pixel) / bpp,
- (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
+ cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL,
+ (unsigned long)(pixel_end - pixel) >> log_bpp,
+ (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp);
- prefetch_range((void *) pixel, cmd_pixel_end - pixel);
- pixel_val16 = get_pixel_val16(pixel, bpp);
+ pixel_val16 = get_pixel_val16(pixel, log_bpp);
while (pixel < cmd_pixel_end) {
const u8 *const start = pixel;
@@ -170,7 +164,7 @@ static void udl_compress_hline16(
pixel += bpp;
while (pixel < cmd_pixel_end) {
- pixel_val16 = get_pixel_val16(pixel, bpp);
+ pixel_val16 = get_pixel_val16(pixel, log_bpp);
if (pixel_val16 != repeating_pixel_val16)
break;
pixel += bpp;
@@ -179,10 +173,10 @@ static void udl_compress_hline16(
if (unlikely(pixel > start + bpp)) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = (((start -
- raw_pixel_start) / bpp) + 1) & 0xFF;
+ raw_pixel_start) >> log_bpp) + 1) & 0xFF;
/* immediately after raw data is repeat byte */
- *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
+ *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
@@ -192,14 +186,14 @@ static void udl_compress_hline16(
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
- *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+ *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF;
} else {
/* undo unused byte */
cmd--;
}
- *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
- dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
+ *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF;
+ dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2;
}
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
@@ -222,19 +216,19 @@ static void udl_compress_hline16(
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
- u32 base16 = 0 + (device_byte_offset / bpp) * 2;
+ u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
- BUG_ON(!(bpp == 2 || bpp == 4));
+ BUG_ON(!(log_bpp == 1 || log_bpp == 2));
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
@@ -244,7 +238,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
udl_compress_hline16(&next_pixel,
line_end, &base16,
- (u8 **) &cmd, (u8 *) cmd_end, bpp);
+ (u8 **) &cmd, (u8 *) cmd_end, log_bpp);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 7b1e2a549a71..54d96518a131 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -227,37 +227,19 @@ v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
}
-int v3d_gem_fault(struct vm_fault *vmf)
+vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct v3d_bo *bo = to_v3d_bo(obj);
- unsigned long pfn;
+ pfn_t pfn;
pgoff_t pgoff;
- int ret;
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = page_to_pfn(bo->pages[pgoff]);
-
- ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+
+ return vmf_insert_mixed(vma, vmf->address, pfn);
}
int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index cdb582043b4f..2a85fa68ffea 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv;
+ struct drm_sched_rq *rq;
int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- drm_sched_entity_init(&v3d->queue[i].sched,
- &v3d_priv->sched_entity[i],
- &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- NULL);
+ rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
}
file->driver_priv = v3d_priv;
@@ -146,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_entity_fini(&v3d->queue[q].sched,
- &v3d_priv->sched_entity[q]);
+ drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
}
kfree(v3d_priv);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index a043ac3aae98..e6fed696ad86 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -2,6 +2,7 @@
/* Copyright (C) 2015-2018 Broadcom */
#include <linux/reservation.h>
+#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
@@ -25,7 +26,6 @@ struct v3d_queue_state {
u64 fence_context;
u64 emit_seqno;
- u64 finished_seqno;
};
struct v3d_dev {
@@ -85,6 +85,11 @@ struct v3d_dev {
*/
struct mutex reset_lock;
+ /* Lock taken when creating and pushing the GPU scheduler
+ * jobs, to keep the sched-fence seqnos in order.
+ */
+ struct mutex sched_lock;
+
struct {
u32 num_allocated;
u32 pages_allocated;
@@ -179,6 +184,8 @@ struct v3d_job {
/* GPU virtual addresses of the start/end of the CL job. */
u32 start, end;
+
+ u32 timedout_ctca, timedout_ctra;
};
struct v3d_exec_info {
@@ -248,7 +255,7 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int v3d_gem_fault(struct vm_fault *vmf);
+vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 087d49c8cb12..50bfcf9a8a1a 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -35,24 +35,7 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
return "v3d-render";
}
-static bool v3d_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
-static bool v3d_fence_signaled(struct dma_fence *fence)
-{
- struct v3d_fence *f = to_v3d_fence(fence);
- struct v3d_dev *v3d = to_v3d_dev(f->dev);
-
- return v3d->queue[f->queue].finished_seqno >= f->seqno;
-}
-
const struct dma_fence_ops v3d_fence_ops = {
.get_driver_name = v3d_fence_get_driver_name,
.get_timeline_name = v3d_fence_get_timeline_name,
- .enable_signaling = v3d_fence_enable_signaling,
- .signaled = v3d_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b513f9189caf..5ce24098a5fd 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -550,9 +550,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
+ mutex_lock(&v3d->sched_lock);
if (exec->bin.start != exec->bin.end) {
ret = drm_sched_job_init(&exec->bin.base,
- &v3d->queue[V3D_BIN].sched,
&v3d_priv->sched_entity[V3D_BIN],
v3d_priv);
if (ret)
@@ -567,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
ret = drm_sched_job_init(&exec->render.base,
- &v3d->queue[V3D_RENDER].sched,
&v3d_priv->sched_entity[V3D_RENDER],
v3d_priv);
if (ret)
@@ -576,6 +575,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
kref_get(&exec->refcount); /* put by scheduler job completion */
drm_sched_entity_push_job(&exec->render.base,
&v3d_priv->sched_entity[V3D_RENDER]);
+ mutex_unlock(&v3d->sched_lock);
v3d_attach_object_fences(exec);
@@ -594,6 +594,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
return 0;
fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
fail:
v3d_exec_put(exec);
@@ -615,6 +616,7 @@ v3d_gem_init(struct drm_device *dev)
spin_lock_init(&v3d->job_lock);
mutex_init(&v3d->bo_lock);
mutex_init(&v3d->reset_lock);
+ mutex_init(&v3d->sched_lock);
/* Note: We don't allocate address 0. Various bits of HW
* treat 0 as special, such as the occlusion query counters
@@ -650,17 +652,14 @@ void
v3d_gem_destroy(struct drm_device *dev)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
- enum v3d_queue q;
v3d_sched_fini(v3d);
/* Waiting for exec to finish would need to be done before
* unregistering V3D.
*/
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- WARN_ON(v3d->queue[q].emit_seqno !=
- v3d->queue[q].finished_seqno);
- }
+ WARN_ON(v3d->bin_job);
+ WARN_ON(v3d->render_job);
drm_mm_takedown(&v3d->mm);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 77e1fa046c10..e07514eb11b5 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -87,15 +87,12 @@ v3d_irq(int irq, void *arg)
}
if (intsts & V3D_INT_FLDONE) {
- v3d->queue[V3D_BIN].finished_seqno++;
dma_fence_signal(v3d->bin_job->bin.done_fence);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
- v3d->queue[V3D_RENDER].finished_seqno++;
dma_fence_signal(v3d->render_job->render.done_fence);
-
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index fc13282dfc2f..854046565989 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -222,6 +222,7 @@
#define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n)
#define V3D_CLE_CT0RA 0x00118
#define V3D_CLE_CT1RA 0x0011c
+#define V3D_CLE_CTNRA(n) (V3D_CLE_CT0RA + 4 * n)
#define V3D_CLE_CT0LC 0x00120
#define V3D_CLE_CT1LC 0x00124
#define V3D_CLE_CT0PC 0x00128
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index b07bece9417d..a5501581d96b 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -14,8 +14,8 @@
* to the HW only when it has completed the last one, instead of
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
* v3d_job_dependency() to manage the dependency between bin and
- * render, instead of having the clients submit jobs with using the
- * HW's semaphores to interlock between them.
+ * render, instead of having the clients submit jobs using the HW's
+ * semaphores to interlock between them.
*/
#include <linux/kthread.h>
@@ -114,8 +114,8 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
v3d_invalidate_caches(v3d);
fence = v3d_fence_create(v3d, q);
- if (!fence)
- return fence;
+ if (IS_ERR(fence))
+ return NULL;
if (job->done_fence)
dma_fence_put(job->done_fence);
@@ -153,7 +153,25 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_exec_info *exec = job->exec;
struct v3d_dev *v3d = exec->v3d;
+ enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
enum v3d_queue q;
+ u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
+ u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+
+ /* If the current address or return address have changed, then
+ * the GPU has probably made progress and we should delay the
+ * reset. This could fail if the GPU got in an infinite loop
+ * in the CL, but that is pretty unlikely outside of an i-g-t
+ * testcase.
+ */
+ if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
+ job->timedout_ctca = ctca;
+ job->timedout_ctra = ctra;
+
+ schedule_delayed_work(&job->base.work_tdr,
+ job->base.sched->timeout);
+ return;
+ }
mutex_lock(&v3d->reset_lock);
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 4a3a868235f8..b303703bc7f3 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -19,6 +19,7 @@ vc4-y := \
vc4_plane.o \
vc4_render_cl.o \
vc4_trace_points.o \
+ vc4_txp.o \
vc4_v3d.o \
vc4_validate.o \
vc4_validate_shaders.o
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index add9cc97a3b6..8dcce7182bb7 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -721,7 +721,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
return dmabuf;
}
-int vc4_fault(struct vm_fault *vmf)
+vm_fault_t vc4_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index c8650bbcbcb3..0e6a121858d1 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -46,6 +46,8 @@ struct vc4_crtc_state {
struct drm_crtc_state base;
/* Dlist area for this CRTC configuration. */
struct drm_mm_node mm;
+ bool feed_txp;
+ bool txp_armed;
};
static inline struct vc4_crtc_state *
@@ -324,10 +326,8 @@ static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
return NULL;
}
-static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void vc4_crtc_config_pv(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
@@ -338,12 +338,6 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
- bool debug_dump_regs = false;
-
- if (debug_dump_regs) {
- DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
- vc4_crtc_dump_regs(vc4_crtc);
- }
/* Reset the PV fifo. */
CRTC_WRITE(PV_CONTROL, 0);
@@ -419,6 +413,49 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
PV_CONTROL_CLK_SELECT) |
PV_CONTROL_FIFO_CLR |
PV_CONTROL_EN);
+}
+
+static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ bool debug_dump_regs = false;
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
+ vc4_crtc_dump_regs(vc4_crtc);
+ }
+
+ if (vc4_crtc->channel == 2) {
+ u32 dispctrl;
+ u32 dsp3_mux;
+
+ /*
+ * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
+ * FIFO X'.
+ * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
+ *
+ * DSP3 is connected to FIFO2 unless the transposer is
+ * enabled. In this case, FIFO 2 is directly accessed by the
+ * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
+ * route.
+ */
+ if (vc4_state->feed_txp)
+ dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
+ else
+ dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
+
+ dispctrl = HVS_READ(SCALER_DISPCTRL) &
+ ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
+ }
+
+ if (!vc4_state->feed_txp)
+ vc4_crtc_config_pv(crtc);
HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
SCALER_DISPBKGND_AUTOHS |
@@ -499,6 +536,13 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
}
}
+void vc4_crtc_txp_armed(struct drm_crtc_state *state)
+{
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+
+ vc4_state->txp_armed = true;
+}
+
static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -514,8 +558,11 @@ static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
- vc4_crtc->event = crtc->state->event;
- crtc->state->event = NULL;
+
+ if (!vc4_state->feed_txp || vc4_state->txp_armed) {
+ vc4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
vc4_state->mm.start);
@@ -533,8 +580,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- struct drm_crtc_state *state = crtc->state;
- struct drm_display_mode *mode = &state->adjusted_mode;
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
require_hvs_enabled(dev);
@@ -546,15 +593,21 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
/* Turn on the scaler, which will wait for vstart to start
* compositing.
+ * When feeding the transposer, we should operate in oneshot
+ * mode.
*/
HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel),
VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay, SCALER_DISPCTRLX_HEIGHT) |
- SCALER_DISPCTRLX_ENABLE);
+ SCALER_DISPCTRLX_ENABLE |
+ (vc4_state->feed_txp ? SCALER_DISPCTRLX_ONESHOT : 0));
- /* Turn on the pixel valve, which will emit the vstart signal. */
- CRTC_WRITE(PV_V_CONTROL,
- CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+ /* When feeding the transposer block the pixelvalve is unneeded and
+ * should not be enabled.
+ */
+ if (!vc4_state->feed_txp)
+ CRTC_WRITE(PV_V_CONTROL,
+ CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -579,8 +632,10 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane *plane;
unsigned long flags;
const struct drm_plane_state *plane_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
u32 dlist_count = 0;
- int ret;
+ int ret, i;
/* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.)
@@ -600,6 +655,24 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
+ for_each_new_connector_in_state(state->state, conn, conn_state, i) {
+ if (conn_state->crtc != crtc)
+ continue;
+
+ /* The writeback connector is implemented using the transposer
+ * block which is directly taking its data from the HVS FIFO.
+ */
+ if (conn->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) {
+ state->no_vblank = true;
+ vc4_state->feed_txp = true;
+ } else {
+ state->no_vblank = false;
+ vc4_state->feed_txp = false;
+ }
+
+ break;
+ }
+
return 0;
}
@@ -713,7 +786,8 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (vc4_crtc->event &&
- (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
+ (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) ||
+ vc4_state->feed_txp)) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
@@ -721,6 +795,13 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+void vc4_crtc_handle_vblank(struct vc4_crtc *crtc)
+{
+ crtc->t_vblank = ktime_get();
+ drm_crtc_handle_vblank(&crtc->base);
+ vc4_crtc_handle_page_flip(crtc);
+}
+
static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
{
struct vc4_crtc *vc4_crtc = data;
@@ -728,10 +809,8 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
irqreturn_t ret = IRQ_NONE;
if (stat & PV_INT_VFP_START) {
- vc4_crtc->t_vblank = ktime_get();
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
- drm_crtc_handle_vblank(&vc4_crtc->base);
- vc4_crtc_handle_page_flip(vc4_crtc);
+ vc4_crtc_handle_vblank(vc4_crtc);
ret = IRQ_HANDLED;
}
@@ -862,7 +941,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
* is released.
*/
drm_atomic_set_fb_for_plane(plane->state, fb);
- plane->fb = fb;
vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
vc4_async_page_flip_complete);
@@ -885,12 +963,15 @@ static int vc4_page_flip(struct drm_crtc *crtc,
static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct vc4_crtc_state *vc4_state;
+ struct vc4_crtc_state *vc4_state, *old_vc4_state;
vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
return NULL;
+ old_vc4_state = to_vc4_crtc_state(crtc->state);
+ vc4_state->feed_txp = old_vc4_state->feed_txp;
+
__drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
return &vc4_state->base;
}
@@ -988,9 +1069,17 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, drm) {
- struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ struct vc4_encoder *vc4_encoder;
int i;
+ /* HVS FIFO2 can feed the TXP IP. */
+ if (crtc_data->hvs_channel == 2 &&
+ encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
+ encoder->possible_crtcs |= drm_crtc_mask(crtc);
+ continue;
+ }
+
+ vc4_encoder = to_vc4_encoder(encoder);
for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) {
if (vc4_encoder->type == encoder_types[i]) {
vc4_encoder->clock_select = i;
@@ -1057,7 +1146,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
&vc4_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
- primary_plane->crtc = crtc;
vc4_crtc->channel = vc4_crtc->data->hvs_channel;
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
@@ -1083,7 +1171,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(plane))
continue;
- plane->possible_crtcs = 1 << drm_crtc_index(crtc);
+ plane->possible_crtcs = drm_crtc_mask(crtc);
}
/* Set up the legacy cursor after overlay initialization,
@@ -1092,8 +1180,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
*/
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
if (!IS_ERR(cursor_plane)) {
- cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc);
- cursor_plane->crtc = crtc;
+ cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
crtc->cursor = cursor_plane;
}
@@ -1121,7 +1208,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
err_destroy_planes:
list_for_each_entry_safe(destroy_plane, temp,
&drm->mode_config.plane_list, head) {
- if (destroy_plane->possible_crtcs == 1 << drm_crtc_index(crtc))
+ if (destroy_plane->possible_crtcs == drm_crtc_mask(crtc))
destroy_plane->funcs->destroy(destroy_plane);
}
err:
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 5db06bdb5f27..7a0003de71ab 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -21,6 +21,7 @@ static const struct drm_info_list vc4_debugfs_list[] = {
{"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1},
{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
{"vec_regs", vc4_vec_debugfs_regs, 0},
+ {"txp_regs", vc4_txp_debugfs_regs, 0},
{"hvs_regs", vc4_hvs_debugfs_regs, 0},
{"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
{"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 466d0a27b415..04270a14fcaa 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -288,7 +288,7 @@ static int vc4_drm_bind(struct device *dev)
ret = vc4_bo_cache_init(drm);
if (ret)
- goto dev_unref;
+ goto dev_put;
drm_mode_config_init(drm);
@@ -313,8 +313,8 @@ unbind_all:
gem_destroy:
vc4_gem_destroy(drm);
vc4_bo_cache_destroy(drm);
-dev_unref:
- drm_dev_unref(drm);
+dev_put:
+ drm_dev_put(drm);
return ret;
}
@@ -331,7 +331,7 @@ static void vc4_drm_unbind(struct device *dev)
drm_atomic_private_obj_fini(&vc4->ctm_manager);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops vc4_drm_ops = {
@@ -344,6 +344,7 @@ static struct platform_driver *const component_drivers[] = {
&vc4_vec_driver,
&vc4_dpi_driver,
&vc4_dsi_driver,
+ &vc4_txp_driver,
&vc4_hvs_driver,
&vc4_crtc_driver,
&vc4_v3d_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 554a4e810d5b..bd6ef1f31822 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/mm_types.h>
#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
@@ -72,6 +73,7 @@ struct vc4_dev {
struct vc4_dpi *dpi;
struct vc4_dsi *dsi1;
struct vc4_vec *vec;
+ struct vc4_txp *txp;
struct vc4_hang_state *hang_state;
@@ -674,7 +676,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vc4_fault(struct vm_fault *vmf);
+vm_fault_t vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
@@ -697,6 +699,8 @@ bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
+void vc4_crtc_txp_armed(struct drm_crtc_state *state);
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
@@ -744,6 +748,10 @@ int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
extern struct platform_driver vc4_vec_driver;
int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
+/* vc4_txp.c */
+extern struct platform_driver vc4_txp_driver;
+int vc4_txp_debugfs_regs(struct seq_file *m, void *unused);
+
/* vc4_irq.c */
irqreturn_t vc4_irq(int irq, void *arg);
void vc4_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 8aa897835118..0c607eb33d7e 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -814,7 +814,9 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
+ drm_bridge_disable(dsi->bridge);
vc4_dsi_ulps(dsi, true);
+ drm_bridge_post_disable(dsi->bridge);
clk_disable_unprepare(dsi->pll_phy_clock);
clk_disable_unprepare(dsi->escape_clock);
@@ -1089,21 +1091,6 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
/* Display reset sequence timeout */
DSI_PORT_WRITE(PR_TO_CNT, 100000);
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- DSI_PORT_WRITE(DISP0_CTRL,
- VC4_SET_FIELD(dsi->divider,
- DSI_DISP0_PIX_CLK_DIV) |
- VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
- VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
- DSI_DISP0_LP_STOP_CTRL) |
- DSI_DISP0_ST_END |
- DSI_DISP0_ENABLE);
- } else {
- DSI_PORT_WRITE(DISP0_CTRL,
- DSI_DISP0_COMMAND_MODE |
- DSI_DISP0_ENABLE);
- }
-
/* Set up DISP1 for transferring long command payloads through
* the pixfifo.
*/
@@ -1128,6 +1115,25 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_ulps(dsi, false);
+ drm_bridge_pre_enable(dsi->bridge);
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ DSI_PORT_WRITE(DISP0_CTRL,
+ VC4_SET_FIELD(dsi->divider,
+ DSI_DISP0_PIX_CLK_DIV) |
+ VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
+ VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
+ DSI_DISP0_LP_STOP_CTRL) |
+ DSI_DISP0_ST_END |
+ DSI_DISP0_ENABLE);
+ } else {
+ DSI_PORT_WRITE(DISP0_CTRL,
+ DSI_DISP0_COMMAND_MODE |
+ DSI_DISP0_ENABLE);
+ }
+
+ drm_bridge_enable(dsi->bridge);
+
if (debug_dump_regs) {
DRM_INFO("DSI regs after:\n");
vc4_dsi_dump_regs(dsi);
@@ -1606,8 +1612,18 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&panel, &dsi->bridge);
- if (ret)
+ if (ret) {
+ /* If the bridge or panel pointed by dev->of_node is not
+ * enabled, just return 0 here so that we don't prevent the DRM
+ * dev from being registered. Of course that means the DSI
+ * encoder won't be exposed, but that's not a problem since
+ * nothing is connected to it.
+ */
+ if (ret == -ENODEV)
+ return 0;
+
return ret;
+ }
if (panel) {
dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
@@ -1639,6 +1655,12 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
dev_err(dev, "bridge attach failed: %d\n", ret);
return ret;
}
+ /* Disable the atomic helper calls into the bridge. We
+ * manually call the bridge pre_enable / enable / etc. calls
+ * from our driver, since we need to sequence them within the
+ * encoder's enable/disable paths.
+ */
+ dsi->encoder->bridge = NULL;
pm_runtime_enable(dev);
@@ -1652,7 +1674,8 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- pm_runtime_disable(dev);
+ if (dsi->bridge)
+ pm_runtime_disable(dev);
vc4_dsi_encoder_destroy(dsi->encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_fence.c b/drivers/gpu/drm/vc4/vc4_fence.c
index dbf5a5a5d5f5..580214e2158c 100644
--- a/drivers/gpu/drm/vc4/vc4_fence.c
+++ b/drivers/gpu/drm/vc4/vc4_fence.c
@@ -33,11 +33,6 @@ static const char *vc4_fence_get_timeline_name(struct dma_fence *fence)
return "vc4-v3d";
}
-static bool vc4_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool vc4_fence_signaled(struct dma_fence *fence)
{
struct vc4_fence *f = to_vc4_fence(fence);
@@ -49,8 +44,5 @@ static bool vc4_fence_signaled(struct dma_fence *fence)
const struct dma_fence_ops vc4_fence_ops = {
.get_driver_name = vc4_fence_get_driver_name,
.get_timeline_name = vc4_fence_get_timeline_name,
- .enable_signaling = vc4_fence_enable_signaling,
.signaled = vc4_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b8d50533e2bb..fd5522fd179e 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -285,7 +285,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
drm_rgb_quant_range_selectable(edid);
}
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -329,7 +329,7 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return connector;
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 8a411e5f8776..ca5aa7fba769 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -153,18 +153,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
- /* Make sure that drm_atomic_helper_wait_for_vblanks()
- * actually waits for vblank. If we're doing a full atomic
- * modeset (as opposed to a vc4_update_plane() short circuit),
- * then we need to wait for scanout to be done with our
- * display lists before we free it and potentially reallocate
- * and overwrite the dlist memory with a new modeset.
- */
- state->legacy_cursor_update = false;
+ drm_atomic_helper_fake_vblank(state);
drm_atomic_helper_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_wait_for_flip_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index a951ec75d01f..cfb50fedfa2b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -470,12 +470,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
struct drm_framebuffer *fb = state->fb;
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+ u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
int num_planes = drm_format_num_planes(format->drm);
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
u32 lbm_size, tiling;
unsigned long irqflags;
+ u32 hvs_format = format->hvs;
int ret, i;
ret = vc4_plane_setup_clipping_and_scaling(state);
@@ -515,7 +517,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
scl1 = vc4_get_scl_field(state, 0);
}
- switch (fb->modifier) {
+ switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR:
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
@@ -538,6 +540,49 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
break;
}
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+ uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+
+ /* Column-based NV12 or RGBA.
+ */
+ if (fb->format->num_planes > 1) {
+ if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) {
+ DRM_DEBUG_KMS("SAND format only valid for NV12/21");
+ return -EINVAL;
+ }
+ hvs_format = HVS_PIXEL_FORMAT_H264;
+ } else {
+ if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) {
+ DRM_DEBUG_KMS("SAND256 format only valid for H.264");
+ return -EINVAL;
+ }
+ }
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ tiling = SCALER_CTL0_TILING_64B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tiling = SCALER_CTL0_TILING_128B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tiling = SCALER_CTL0_TILING_256B_OR_T;
+ break;
+ default:
+ break;
+ }
+
+ if (param > SCALER_TILE_HEIGHT_MASK) {
+ DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
+ return -EINVAL;
+ }
+
+ pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
+ break;
+ }
+
default:
DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
(long long)fb->modifier);
@@ -547,8 +592,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
+ VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
- (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+ (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
(vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
@@ -610,8 +656,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Pitch word 1/2 */
for (i = 1; i < num_planes; i++) {
- vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
+ if (hvs_format != HVS_PIXEL_FORMAT_H264) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->pitches[i],
+ SCALER_SRC_PITCH));
+ } else {
+ vc4_dlist_write(vc4_state, pitch0);
+ }
}
/* Colorspace conversion words */
@@ -813,18 +864,21 @@ static int vc4_prepare_fb(struct drm_plane *plane,
struct dma_fence *fence;
int ret;
- if ((plane->state->fb == state->fb) || !state->fb)
+ if (!state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ fence = reservation_object_get_excl_rcu(bo->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+
+ if (plane->state->fb == state->fb)
+ return 0;
+
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
- fence = reservation_object_get_excl_rcu(bo->resv);
- drm_atomic_set_fence_for_plane(state, fence);
-
return 0;
}
@@ -851,7 +905,7 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
static void vc4_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
@@ -869,13 +923,32 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
case DRM_FORMAT_BGR565:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
- return true;
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ return true;
+ default:
+ return false;
+ }
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ switch (fourcc_mod_broadcom_mod(modifier)) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case DRM_FORMAT_MOD_BROADCOM_SAND64:
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ return true;
+ default:
+ return false;
+ }
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
default:
return (modifier == DRM_FORMAT_MOD_LINEAR);
}
@@ -903,6 +976,9 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
unsigned i;
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
+ DRM_FORMAT_MOD_BROADCOM_SAND128,
+ DRM_FORMAT_MOD_BROADCOM_SAND64,
+ DRM_FORMAT_MOD_BROADCOM_SAND256,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index d1fb6fec46eb..d6864fa4bd14 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -1031,6 +1031,12 @@ enum hvs_pixel_format {
#define SCALER_SRC_PITCH_MASK VC4_MASK(15, 0)
#define SCALER_SRC_PITCH_SHIFT 0
+/* PITCH0/1/2 fields for tiled (SAND). */
+#define SCALER_TILE_SKIP_0_MASK VC4_MASK(18, 16)
+#define SCALER_TILE_SKIP_0_SHIFT 16
+#define SCALER_TILE_HEIGHT_MASK VC4_MASK(15, 0)
+#define SCALER_TILE_HEIGHT_SHIFT 0
+
/* PITCH0 fields for T-tiled. */
#define SCALER_PITCH0_TILE_WIDTH_L_MASK VC4_MASK(22, 16)
#define SCALER_PITCH0_TILE_WIDTH_L_SHIFT 16
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
new file mode 100644
index 000000000000..6e23c50168f9
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Broadcom
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_writeback.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+/* Base address of the output. Raster formats must be 4-byte aligned,
+ * T and LT must be 16-byte aligned or maybe utile-aligned (docs are
+ * inconsistent, but probably utile).
+ */
+#define TXP_DST_PTR 0x00
+
+/* Pitch in bytes for raster images, 16-byte aligned. For tiled, it's
+ * the width in tiles.
+ */
+#define TXP_DST_PITCH 0x04
+/* For T-tiled imgaes, DST_PITCH should be the number of tiles wide,
+ * shifted up.
+ */
+# define TXP_T_TILE_WIDTH_SHIFT 7
+/* For LT-tiled images, DST_PITCH should be the number of utiles wide,
+ * shifted up.
+ */
+# define TXP_LT_TILE_WIDTH_SHIFT 4
+
+/* Pre-rotation width/height of the image. Must match HVS config.
+ *
+ * If TFORMAT and 32-bit, limit is 1920 for 32-bit and 3840 to 16-bit
+ * and width/height must be tile or utile-aligned as appropriate. If
+ * transposing (rotating), width is limited to 1920.
+ *
+ * Height is limited to various numbers between 4088 and 4095. I'd
+ * just use 4088 to be safe.
+ */
+#define TXP_DIM 0x08
+# define TXP_HEIGHT_SHIFT 16
+# define TXP_HEIGHT_MASK GENMASK(31, 16)
+# define TXP_WIDTH_SHIFT 0
+# define TXP_WIDTH_MASK GENMASK(15, 0)
+
+#define TXP_DST_CTRL 0x0c
+/* These bits are set to 0x54 */
+#define TXP_PILOT_SHIFT 24
+#define TXP_PILOT_MASK GENMASK(31, 24)
+/* Bits 22-23 are set to 0x01 */
+#define TXP_VERSION_SHIFT 22
+#define TXP_VERSION_MASK GENMASK(23, 22)
+
+/* Powers down the internal memory. */
+# define TXP_POWERDOWN BIT(21)
+
+/* Enables storing the alpha component in 8888/4444, instead of
+ * filling with ~ALPHA_INVERT.
+ */
+# define TXP_ALPHA_ENABLE BIT(20)
+
+/* 4 bits, each enables stores for a channel in each set of 4 bytes.
+ * Set to 0xf for normal operation.
+ */
+# define TXP_BYTE_ENABLE_SHIFT 16
+# define TXP_BYTE_ENABLE_MASK GENMASK(19, 16)
+
+/* Debug: Generate VSTART again at EOF. */
+# define TXP_VSTART_AT_EOF BIT(15)
+
+/* Debug: Terminate the current frame immediately. Stops AXI
+ * writes.
+ */
+# define TXP_ABORT BIT(14)
+
+# define TXP_DITHER BIT(13)
+
+/* Inverts alpha if TXP_ALPHA_ENABLE, chooses fill value for
+ * !TXP_ALPHA_ENABLE.
+ */
+# define TXP_ALPHA_INVERT BIT(12)
+
+/* Note: I've listed the channels here in high bit (in byte 3/2/1) to
+ * low bit (in byte 0) order.
+ */
+# define TXP_FORMAT_SHIFT 8
+# define TXP_FORMAT_MASK GENMASK(11, 8)
+# define TXP_FORMAT_ABGR4444 0
+# define TXP_FORMAT_ARGB4444 1
+# define TXP_FORMAT_BGRA4444 2
+# define TXP_FORMAT_RGBA4444 3
+# define TXP_FORMAT_BGR565 6
+# define TXP_FORMAT_RGB565 7
+/* 888s are non-rotated, raster-only */
+# define TXP_FORMAT_BGR888 8
+# define TXP_FORMAT_RGB888 9
+# define TXP_FORMAT_ABGR8888 12
+# define TXP_FORMAT_ARGB8888 13
+# define TXP_FORMAT_BGRA8888 14
+# define TXP_FORMAT_RGBA8888 15
+
+/* If TFORMAT is set, generates LT instead of T format. */
+# define TXP_LINEAR_UTILE BIT(7)
+
+/* Rotate output by 90 degrees. */
+# define TXP_TRANSPOSE BIT(6)
+
+/* Generate a tiled format for V3D. */
+# define TXP_TFORMAT BIT(5)
+
+/* Generates some undefined test mode output. */
+# define TXP_TEST_MODE BIT(4)
+
+/* Request odd field from HVS. */
+# define TXP_FIELD BIT(3)
+
+/* Raise interrupt when idle. */
+# define TXP_EI BIT(2)
+
+/* Set when generating a frame, clears when idle. */
+# define TXP_BUSY BIT(1)
+
+/* Starts a frame. Self-clearing. */
+# define TXP_GO BIT(0)
+
+/* Number of lines received and committed to memory. */
+#define TXP_PROGRESS 0x10
+
+#define TXP_READ(offset) readl(txp->regs + (offset))
+#define TXP_WRITE(offset, val) writel(val, txp->regs + (offset))
+
+struct vc4_txp {
+ struct platform_device *pdev;
+
+ struct drm_writeback_connector connector;
+
+ void __iomem *regs;
+};
+
+static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_txp, connector.encoder);
+}
+
+static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
+{
+ return container_of(conn, struct vc4_txp, connector.base);
+}
+
+#define TXP_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} txp_regs[] = {
+ TXP_REG(TXP_DST_PTR),
+ TXP_REG(TXP_DST_PITCH),
+ TXP_REG(TXP_DIM),
+ TXP_REG(TXP_DST_CTRL),
+ TXP_REG(TXP_PROGRESS),
+};
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_txp_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_txp *txp = vc4->txp;
+ int i;
+
+ if (!txp)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(txp_regs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ txp_regs[i].name, txp_regs[i].reg,
+ TXP_READ(txp_regs[i].reg));
+ }
+
+ return 0;
+}
+#endif
+
+static int vc4_txp_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static enum drm_mode_status
+vc4_txp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ int w = mode->hdisplay, h = mode->vdisplay;
+
+ if (w < mode_config->min_width || w > mode_config->max_width)
+ return MODE_BAD_HVALUE;
+
+ if (h < mode_config->min_height || h > mode_config->max_height)
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
+}
+
+static const u32 drm_fmts[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+};
+
+static const u32 txp_fmts[] = {
+ TXP_FORMAT_RGB888,
+ TXP_FORMAT_BGR888,
+ TXP_FORMAT_ARGB8888,
+ TXP_FORMAT_ABGR8888,
+ TXP_FORMAT_ARGB8888,
+ TXP_FORMAT_ABGR8888,
+ TXP_FORMAT_RGBA8888,
+ TXP_FORMAT_BGRA8888,
+ TXP_FORMAT_RGBA8888,
+ TXP_FORMAT_BGRA8888,
+};
+
+static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_gem_cma_object *gem;
+ struct drm_framebuffer *fb;
+ int i;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
+ conn_state->crtc);
+
+ fb = conn_state->writeback_job->fb;
+ if (fb->width != crtc_state->mode.hdisplay ||
+ fb->height != crtc_state->mode.vdisplay) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
+ if (fb->format->format == drm_fmts[i])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(drm_fmts))
+ return -EINVAL;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ /* Pitch must be aligned on 16 bytes. */
+ if (fb->pitches[0] & GENMASK(3, 0))
+ return -EINVAL;
+
+ vc4_crtc_txp_armed(crtc_state);
+
+ return 0;
+}
+
+static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
+ struct drm_connector_state *conn_state)
+{
+ struct vc4_txp *txp = connector_to_vc4_txp(conn);
+ struct drm_gem_cma_object *gem;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ u32 ctrl;
+ int i;
+
+ if (WARN_ON(!conn_state->writeback_job ||
+ !conn_state->writeback_job->fb))
+ return;
+
+ mode = &conn_state->crtc->state->adjusted_mode;
+ fb = conn_state->writeback_job->fb;
+
+ for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
+ if (fb->format->format == drm_fmts[i])
+ break;
+ }
+
+ if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
+ return;
+
+ ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
+ VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
+ VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
+
+ if (fb->format->has_alpha)
+ ctrl |= TXP_ALPHA_ENABLE;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
+ TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
+ TXP_WRITE(TXP_DIM,
+ VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
+ VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT));
+
+ TXP_WRITE(TXP_DST_CTRL, ctrl);
+
+ drm_writeback_queue_job(&txp->connector, conn_state->writeback_job);
+}
+
+static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
+ .get_modes = vc4_txp_connector_get_modes,
+ .mode_valid = vc4_txp_connector_mode_valid,
+ .atomic_check = vc4_txp_connector_atomic_check,
+ .atomic_commit = vc4_txp_connector_atomic_commit,
+};
+
+static enum drm_connector_status
+vc4_txp_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void vc4_txp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs vc4_txp_connector_funcs = {
+ .detect = vc4_txp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vc4_txp_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
+{
+ struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
+
+ if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) {
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ TXP_WRITE(TXP_DST_CTRL, TXP_ABORT);
+
+ while (TXP_READ(TXP_DST_CTRL) & TXP_BUSY &&
+ time_before(jiffies, timeout))
+ ;
+
+ WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY);
+ }
+
+ TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+}
+
+static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = {
+ .disable = vc4_txp_encoder_disable,
+};
+
+static irqreturn_t vc4_txp_interrupt(int irq, void *data)
+{
+ struct vc4_txp *txp = data;
+
+ TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI);
+ vc4_crtc_handle_vblank(to_vc4_crtc(txp->connector.base.state->crtc));
+ drm_writeback_signal_completion(&txp->connector, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_txp *txp;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ txp = devm_kzalloc(dev, sizeof(*txp), GFP_KERNEL);
+ if (!txp)
+ return -ENOMEM;
+
+ txp->pdev = pdev;
+
+ txp->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(txp->regs))
+ return PTR_ERR(txp->regs);
+
+ drm_connector_helper_add(&txp->connector.base,
+ &vc4_txp_connector_helper_funcs);
+ ret = drm_writeback_connector_init(drm, &txp->connector,
+ &vc4_txp_connector_funcs,
+ &vc4_txp_encoder_helper_funcs,
+ drm_fmts, ARRAY_SIZE(drm_fmts));
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
+ dev_name(dev), txp);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, txp);
+ vc4->txp = txp;
+
+ return 0;
+}
+
+static void vc4_txp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_txp *txp = dev_get_drvdata(dev);
+
+ vc4_txp_connector_destroy(&txp->connector.base);
+
+ vc4->txp = NULL;
+}
+
+static const struct component_ops vc4_txp_ops = {
+ .bind = vc4_txp_bind,
+ .unbind = vc4_txp_unbind,
+};
+
+static int vc4_txp_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_txp_ops);
+}
+
+static int vc4_txp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_txp_ops);
+ return 0;
+}
+
+static const struct of_device_id vc4_txp_dt_match[] = {
+ { .compatible = "brcm,bcm2835-txp" },
+ { /* sentinel */ },
+};
+
+struct platform_driver vc4_txp_driver = {
+ .probe = vc4_txp_probe,
+ .remove = vc4_txp_remove,
+ .driver = {
+ .name = "vc4_txp",
+ .of_match_table = vc4_txp_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 3a9a302247a2..8e7facb6514e 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -404,7 +404,7 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
VC4_VEC_TV_MODE_NTSC);
vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC];
- drm_mode_connector_attach_encoder(connector, vec->encoder);
+ drm_connector_attach_encoder(connector, vec->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 2524ff116f00..0e5620f76ee0 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -61,23 +61,22 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
kfree(vgem_obj);
}
-static int vgem_gem_fault(struct vm_fault *vmf)
+static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
- int ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
loff_t num_pages;
pgoff_t page_offset;
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
- if (page_offset > num_pages)
+ if (page_offset >= num_pages)
return VM_FAULT_SIGBUS;
- ret = -ENOENT;
mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index a5edd86603d9..25503b933599 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -28,6 +28,7 @@
#include "virtgpu_drv.h"
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#define XRES_MIN 32
#define YRES_MIN 32
@@ -48,16 +49,6 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
-static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct virtio_gpu_framebuffer *virtio_gpu_fb
- = to_virtio_gpu_framebuffer(fb);
-
- drm_gem_object_put_unlocked(virtio_gpu_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(virtio_gpu_fb);
-}
-
static int
virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -71,20 +62,9 @@ virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
}
-static int
-virtio_gpu_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct virtio_gpu_framebuffer *virtio_gpu_fb =
- to_virtio_gpu_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, virtio_gpu_fb->obj, handle);
-}
-
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
- .create_handle = virtio_gpu_framebuffer_create_handle,
- .destroy = virtio_gpu_user_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
.dirty = virtio_gpu_framebuffer_surface_dirty,
};
@@ -97,7 +77,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
int ret;
struct virtio_gpu_object *bo;
- vgfb->obj = obj;
+ vgfb->base.obj[0] = obj;
bo = gem_to_virtio_gpu_obj(obj);
@@ -105,7 +85,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
- vgfb->obj = NULL;
+ vgfb->base.obj[0] = NULL;
return ret;
}
@@ -302,8 +282,6 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
- primary->crtc = crtc;
- cursor->crtc = crtc;
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -314,7 +292,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
encoder->possible_crtcs = 1 << index;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
drm_connector_register(connector);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d25c8ca224aa..65605e207bbe 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -124,7 +124,6 @@ struct virtio_gpu_output {
struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
uint32_t hw_res_handle;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 8af69ab58b89..a121b1c79522 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -46,7 +46,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
int bpp = fb->base.format->cpp[0];
int x2, y2;
unsigned long flags;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
if ((width <= 0) ||
(x + width > fb->base.width) ||
@@ -121,7 +121,7 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
unsigned int num_clips)
{
struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
struct drm_clip_rect norect;
struct drm_clip_rect *clips_ptr;
int left, right, top, bottom;
@@ -305,8 +305,8 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&vgfbdev->helper);
- if (vgfb->obj)
- vgfb->obj = NULL;
+ if (vgfb->base.obj[0])
+ vgfb->base.obj[0] = NULL;
drm_fb_helper_fini(&vgfbdev->helper);
drm_framebuffer_cleanup(&vgfb->base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 23353521f903..00c742a441bf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -36,11 +36,6 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
return "controlq";
}
-static bool virtio_enable_signaling(struct dma_fence *f)
-{
- return true;
-}
-
static bool virtio_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -67,9 +62,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
- .enable_signaling = virtio_enable_signaling,
.signaled = virtio_signaled,
- .wait = dma_fence_default_wait,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 71ba455af915..dc5b5b2b7aab 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -154,7 +154,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
if (bo->dumb) {
virtio_gpu_cmd_transfer_to_host_2d
@@ -208,7 +208,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
handle = 0;
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
new file mode 100644
index 000000000000..986297da51bf
--- /dev/null
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -0,0 +1,3 @@
+vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o
+
+obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
new file mode 100644
index 000000000000..875fca662ac0
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct vkms_output *output = container_of(timer, struct vkms_output,
+ vblank_hrtimer);
+ struct drm_crtc *crtc = &output->crtc;
+ int ret_overrun;
+ bool ret;
+
+ ret = drm_crtc_handle_vblank(crtc);
+ if (!ret)
+ DRM_ERROR("vkms failure on handling vblank");
+
+ ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+ output->period_ns);
+
+ return HRTIMER_RESTART;
+}
+
+static int vkms_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ out->vblank_hrtimer.function = &vkms_vblank_simulate;
+ out->period_ns = ktime_set(0, vblank->framedur_ns);
+ hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+static void vkms_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+ hrtimer_cancel(&out->vblank_hrtimer);
+}
+
+bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ struct vkms_output *output = &vkmsdev->output;
+
+ *vblank_time = output->vblank_hrtimer.node.expires;
+
+ return true;
+}
+
+static const struct drm_crtc_funcs vkms_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = vkms_enable_vblank,
+ .disable_vblank = vkms_disable_vblank,
+};
+
+static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+
+static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_off(crtc);
+}
+
+static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
+ .atomic_flush = vkms_crtc_atomic_flush,
+ .atomic_enable = vkms_crtc_atomic_enable,
+ .atomic_disable = vkms_crtc_atomic_disable,
+};
+
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary, struct drm_plane *cursor)
+{
+ int ret;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+ &vkms_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init CRTC\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
new file mode 100644
index 000000000000..6e728b825259
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -0,0 +1,156 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "vkms_drv.h"
+
+#define DRIVER_NAME "vkms"
+#define DRIVER_DESC "Virtual Kernel Mode Setting"
+#define DRIVER_DATE "20180514"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static struct vkms_device *vkms_device;
+
+static const struct file_operations vkms_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = drm_gem_mmap,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .release = drm_release,
+};
+
+static const struct vm_operations_struct vkms_gem_vm_ops = {
+ .fault = vkms_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static void vkms_release(struct drm_device *dev)
+{
+ struct vkms_device *vkms = container_of(dev, struct vkms_device, drm);
+
+ platform_device_unregister(vkms->platform);
+ drm_atomic_helper_shutdown(&vkms->drm);
+ drm_mode_config_cleanup(&vkms->drm);
+ drm_dev_fini(&vkms->drm);
+}
+
+static struct drm_driver vkms_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
+ .release = vkms_release,
+ .fops = &vkms_driver_fops,
+ .dumb_create = vkms_dumb_create,
+ .dumb_map_offset = vkms_dumb_map,
+ .gem_vm_ops = &vkms_gem_vm_ops,
+ .gem_free_object_unlocked = vkms_gem_free_object,
+ .get_vblank_timestamp = vkms_get_vblank_timestamp,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static const struct drm_mode_config_funcs vkms_mode_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int vkms_modeset_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = &vkms_mode_funcs;
+ dev->mode_config.min_width = XRES_MIN;
+ dev->mode_config.min_height = YRES_MIN;
+ dev->mode_config.max_width = XRES_MAX;
+ dev->mode_config.max_height = YRES_MAX;
+
+ return vkms_output_init(vkmsdev);
+}
+
+static int __init vkms_init(void)
+{
+ int ret;
+
+ vkms_device = kzalloc(sizeof(*vkms_device), GFP_KERNEL);
+ if (!vkms_device)
+ return -ENOMEM;
+
+ ret = drm_dev_init(&vkms_device->drm, &vkms_driver, NULL);
+ if (ret)
+ goto out_free;
+
+ vkms_device->platform =
+ platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ if (IS_ERR(vkms_device->platform)) {
+ ret = PTR_ERR(vkms_device->platform);
+ goto out_fini;
+ }
+
+ vkms_device->drm.irq_enabled = true;
+
+ ret = drm_vblank_init(&vkms_device->drm, 1);
+ if (ret) {
+ DRM_ERROR("Failed to vblank\n");
+ goto out_fini;
+ }
+
+ ret = vkms_modeset_init(vkms_device);
+ if (ret)
+ goto out_unregister;
+
+ ret = drm_dev_register(&vkms_device->drm, 0);
+ if (ret)
+ goto out_unregister;
+
+ return 0;
+
+out_unregister:
+ platform_device_unregister(vkms_device->platform);
+
+out_fini:
+ drm_dev_fini(&vkms_device->drm);
+
+out_free:
+ kfree(vkms_device);
+ return ret;
+}
+
+static void __exit vkms_exit(void)
+{
+ if (!vkms_device) {
+ DRM_INFO("vkms_device is NULL.\n");
+ return;
+ }
+
+ drm_dev_unregister(&vkms_device->drm);
+ drm_dev_put(&vkms_device->drm);
+
+ kfree(vkms_device);
+}
+
+module_init(vkms_init);
+module_exit(vkms_exit);
+
+MODULE_AUTHOR("Haneen Mohammed <hamohammed.sa@gmail.com>");
+MODULE_AUTHOR("Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
new file mode 100644
index 000000000000..07be29f2dc44
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -0,0 +1,78 @@
+#ifndef _VKMS_DRV_H_
+#define _VKMS_DRV_H_
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_encoder.h>
+#include <linux/hrtimer.h>
+
+#define XRES_MIN 32
+#define YRES_MIN 32
+
+#define XRES_DEF 1024
+#define YRES_DEF 768
+
+#define XRES_MAX 8192
+#define YRES_MAX 8192
+
+static const u32 vkms_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+struct vkms_output {
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct hrtimer vblank_hrtimer;
+ ktime_t period_ns;
+ struct drm_pending_vblank_event *event;
+};
+
+struct vkms_device {
+ struct drm_device drm;
+ struct platform_device *platform;
+ struct vkms_output output;
+};
+
+struct vkms_gem_object {
+ struct drm_gem_object gem;
+ struct mutex pages_lock; /* Page lock used in page fault handler */
+ struct page **pages;
+};
+
+#define drm_crtc_to_vkms_output(target) \
+ container_of(target, struct vkms_output, crtc)
+
+#define drm_device_to_vkms_device(target) \
+ container_of(target, struct vkms_device, drm)
+
+/* CRTC */
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary, struct drm_plane *cursor);
+
+bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq);
+
+int vkms_output_init(struct vkms_device *vkmsdev);
+
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev);
+
+/* Gem stuff */
+struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size);
+
+int vkms_gem_fault(struct vm_fault *vmf);
+
+int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+void vkms_gem_free_object(struct drm_gem_object *obj);
+
+#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
new file mode 100644
index 000000000000..c7e38368602b
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/shmem_fs.h>
+
+#include "vkms_drv.h"
+
+static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
+ u64 size)
+{
+ struct vkms_gem_object *obj;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ size = roundup(size, PAGE_SIZE);
+ ret = drm_gem_object_init(dev, &obj->gem, size);
+ if (ret) {
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ mutex_init(&obj->pages_lock);
+
+ return obj;
+}
+
+void vkms_gem_free_object(struct drm_gem_object *obj)
+{
+ struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
+ gem);
+
+ kvfree(gem->pages);
+ mutex_destroy(&gem->pages_lock);
+ drm_gem_object_release(obj);
+ kfree(gem);
+}
+
+int vkms_gem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vkms_gem_object *obj = vma->vm_private_data;
+ unsigned long vaddr = vmf->address;
+ pgoff_t page_offset;
+ loff_t num_pages;
+ int ret;
+
+ page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
+ num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
+
+ if (page_offset > num_pages)
+ return VM_FAULT_SIGBUS;
+
+ ret = -ENOENT;
+ mutex_lock(&obj->pages_lock);
+ if (obj->pages) {
+ get_page(obj->pages[page_offset]);
+ vmf->page = obj->pages[page_offset];
+ ret = 0;
+ }
+ mutex_unlock(&obj->pages_lock);
+ if (ret) {
+ struct page *page;
+ struct address_space *mapping;
+
+ mapping = file_inode(obj->gem.filp)->i_mapping;
+ page = shmem_read_mapping_page(mapping, page_offset);
+
+ if (!IS_ERR(page)) {
+ vmf->page = page;
+ ret = 0;
+ } else {
+ switch (PTR_ERR(page)) {
+ case -ENOSPC:
+ case -ENOMEM:
+ ret = VM_FAULT_OOM;
+ break;
+ case -EBUSY:
+ ret = VM_FAULT_RETRY;
+ break;
+ case -EFAULT:
+ case -EINVAL:
+ ret = VM_FAULT_SIGBUS;
+ break;
+ default:
+ WARN_ON(PTR_ERR(page));
+ ret = VM_FAULT_SIGBUS;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size)
+{
+ struct vkms_gem_object *obj;
+ int ret;
+
+ if (!file || !dev || !handle)
+ return ERR_PTR(-EINVAL);
+
+ obj = __vkms_gem_create(dev, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ ret = drm_gem_handle_create(file, &obj->gem, handle);
+ drm_gem_object_put_unlocked(&obj->gem);
+ if (ret) {
+ drm_gem_object_release(&obj->gem);
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ return &obj->gem;
+}
+
+int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_object *gem_obj;
+ u64 pitch, size;
+
+ if (!args || !dev || !file)
+ return -EINVAL;
+
+ pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+ size = pitch * args->height;
+
+ if (!size)
+ return -EINVAL;
+
+ gem_obj = vkms_gem_create(dev, file, &args->handle, size);
+ if (IS_ERR(gem_obj))
+ return PTR_ERR(gem_obj);
+
+ args->size = gem_obj->size;
+ args->pitch = pitch;
+
+ DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+
+ return 0;
+}
+
+int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (!obj->filp) {
+ ret = -EINVAL;
+ goto unref;
+ }
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto unref;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+unref:
+ drm_gem_object_put_unlocked(obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
new file mode 100644
index 000000000000..901012cb1af1
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+static void vkms_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vkms_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_encoder_funcs vkms_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return count;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+};
+
+int vkms_output_init(struct vkms_device *vkmsdev)
+{
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_device *dev = &vkmsdev->drm;
+ struct drm_connector *connector = &output->connector;
+ struct drm_encoder *encoder = &output->encoder;
+ struct drm_crtc *crtc = &output->crtc;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = vkms_plane_init(vkmsdev);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = vkms_crtc_init(dev, crtc, primary, NULL);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret) {
+ DRM_ERROR("Failed to init connector\n");
+ goto err_connector;
+ }
+
+ drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+
+ ret = drm_connector_register(connector);
+ if (ret) {
+ DRM_ERROR("Failed to register connector\n");
+ goto err_connector_register;
+ }
+
+ ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ goto err_encoder;
+ }
+ encoder->possible_crtcs = 1;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ goto err_attach;
+ }
+
+ drm_mode_config_reset(dev);
+
+ return 0;
+
+err_attach:
+ drm_encoder_cleanup(encoder);
+
+err_encoder:
+ drm_connector_unregister(connector);
+
+err_connector_register:
+ drm_connector_cleanup(connector);
+
+err_connector:
+ drm_crtc_cleanup(crtc);
+
+err_crtc:
+ drm_plane_cleanup(primary);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
new file mode 100644
index 000000000000..9f75b1e2c1c4
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+static const struct drm_plane_funcs vkms_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void vkms_primary_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
+static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
+ .atomic_update = vkms_primary_plane_update,
+};
+
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ struct drm_plane *plane;
+ const u32 *formats;
+ int ret, nformats;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ formats = vkms_formats;
+ nformats = ARRAY_SIZE(vkms_formats);
+
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &vkms_plane_funcs,
+ formats, nformats,
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ kfree(plane);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, &vkms_primary_helper_funcs);
+
+ return plane;
+}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 8c308dac99c5..6b28a326f8bb 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI && X86 && MMU
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 794cc9d5c9b0..09b2aa08363e 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
- vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
+ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
index 9ce2466a5d00..69c4253fbfbb 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
index 2dfd57c5f463..9cbba0e8ce6a 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -46,10 +47,10 @@
* the SVGA3D protocol and remain reserved; they should not be used in the
* future.
*
- * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * IDs between 1040 and 2999 (inclusive) are available for use by the
* current SVGA3D protocol.
*
- * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * FIFO clients other than SVGA3D should stay below 1000, or at 3000
* and up.
*/
@@ -89,19 +90,19 @@ typedef enum {
SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069,
SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070,
SVGA_3D_CMD_GENERATE_MIPMAPS = 1071,
- SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072,
- SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073,
- SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074,
- SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075,
- SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076,
- SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077,
- SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078,
- SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079,
+ SVGA_3D_CMD_DEAD4 = 1072,
+ SVGA_3D_CMD_DEAD5 = 1073,
+ SVGA_3D_CMD_DEAD6 = 1074,
+ SVGA_3D_CMD_DEAD7 = 1075,
+ SVGA_3D_CMD_DEAD8 = 1076,
+ SVGA_3D_CMD_DEAD9 = 1077,
+ SVGA_3D_CMD_DEAD10 = 1078,
+ SVGA_3D_CMD_DEAD11 = 1079,
SVGA_3D_CMD_ACTIVATE_SURFACE = 1080,
SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081,
SVGA_3D_CMD_SCREEN_DMA = 1082,
- SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083,
- SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084,
+ SVGA_3D_CMD_DEAD1 = 1083,
+ SVGA_3D_CMD_DEAD2 = 1084,
SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
@@ -217,7 +218,7 @@ typedef enum {
SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177,
SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178,
SVGA_3D_CMD_DX_PRED_COPY = 1179,
- SVGA_3D_CMD_DX_STRETCHBLT = 1180,
+ SVGA_3D_CMD_DX_PRESENTBLT = 1180,
SVGA_3D_CMD_DX_GENMIPS = 1181,
SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182,
SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183,
@@ -254,7 +255,7 @@ typedef enum {
SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214,
SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215,
SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216,
- SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217,
+ SVGA_3D_CMD_DX_BIND_ALL_SHADER = 1217,
SVGA_3D_CMD_DX_HINT = 1218,
SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219,
SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
@@ -262,17 +263,47 @@ typedef enum {
SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
/*
- * Reserve some IDs to be used for the DX11 shader types.
+ * Reserve some IDs to be used for the SM5 shader types.
*/
SVGA_3D_CMD_DX_RESERVED1 = 1223,
SVGA_3D_CMD_DX_RESERVED2 = 1224,
SVGA_3D_CMD_DX_RESERVED3 = 1225,
- SVGA_3D_CMD_DX_MAX = 1226,
- SVGA_3D_CMD_MAX = 1226,
+ SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER = 1226,
+ SVGA_3D_CMD_DX_MAX = 1227,
+
+ SVGA_3D_CMD_SCREEN_COPY = 1227,
+
+ /*
+ * Reserve some IDs to be used for video.
+ */
+ SVGA_3D_CMD_VIDEO_RESERVED1 = 1228,
+ SVGA_3D_CMD_VIDEO_RESERVED2 = 1229,
+ SVGA_3D_CMD_VIDEO_RESERVED3 = 1230,
+ SVGA_3D_CMD_VIDEO_RESERVED4 = 1231,
+ SVGA_3D_CMD_VIDEO_RESERVED5 = 1232,
+ SVGA_3D_CMD_VIDEO_RESERVED6 = 1233,
+ SVGA_3D_CMD_VIDEO_RESERVED7 = 1234,
+ SVGA_3D_CMD_VIDEO_RESERVED8 = 1235,
+
+ SVGA_3D_CMD_GROW_OTABLE = 1236,
+ SVGA_3D_CMD_DX_GROW_COTABLE = 1237,
+ SVGA_3D_CMD_INTRA_SURFACE_COPY = 1238,
+
+ SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 = 1239,
+
+ SVGA_3D_CMD_DX_RESOLVE_COPY = 1240,
+ SVGA_3D_CMD_DX_PRED_RESOLVE_COPY = 1241,
+ SVGA_3D_CMD_DX_PRED_CONVERT_REGION = 1242,
+ SVGA_3D_CMD_DX_PRED_CONVERT = 1243,
+ SVGA_3D_CMD_WHOLE_SURFACE_COPY = 1244,
+
+ SVGA_3D_CMD_MAX = 1245,
SVGA_3D_CMD_FUTURE_MAX = 3000
} SVGAFifo3dCmdId;
+#define SVGA_NUM_3D_CMD (SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)
+
/*
* FIFO command format definitions:
*/
@@ -301,7 +332,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
@@ -327,7 +358,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
@@ -459,6 +490,28 @@ struct {
#include "vmware_pack_end.h"
SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
+/*
+ * Perform a surface copy within the same image.
+ * The src/dest boxes are allowed to overlap.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId surface;
+ SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdIntraSurfaceCopy; /* SVGA_3D_CMD_INTRA_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 srcSid;
+ uint32 destSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWholeSurfaceCopy; /* SVGA_3D_CMD_WHOLE_SURFACE_COPY */
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -772,6 +825,17 @@ struct {
#include "vmware_pack_end.h"
SVGA3dVertexElement;
+/*
+ * Should the vertex element respect the stream value? The high bit of the
+ * stream should be set to indicate that the stream should be respected. If
+ * the high bit is not set, the stream will be ignored and replaced by the index
+ * of the position of the currently considered vertex element.
+ *
+ * All guests should set this bit and correctly specify the stream going
+ * forward.
+ */
+#define SVGA3D_VERTEX_ELEMENT_RESPECT_STREAM (1 << 7)
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -1102,8 +1166,6 @@ struct {
#include "vmware_pack_end.h"
SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
-
-
typedef
#include "vmware_pack_begin.h"
struct {
@@ -1147,38 +1209,6 @@ struct SVGA3dCmdScreenDMA {
SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */
/*
- * Set Unity Surface Cookie
- *
- * Associates the supplied cookie with the surface id for use with
- * Unity. This cookie is a hint from guest to host, there is no way
- * for the guest to readback the cookie and the host is free to drop
- * the cookie association at will. The default value for the cookie
- * on all surfaces is 0.
- */
-
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdSetUnitySurfaceCookie {
- uint32 sid;
- uint64 cookie;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
-
-/*
- * Open a context-specific surface in a non-context-specific manner.
- */
-
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdOpenContextSurface {
- uint32 sid;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
-
-
-/*
* Logic ops
*/
@@ -1324,7 +1354,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceFormat format;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surface1Flags;
uint32 numMipLevels;
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
@@ -1332,7 +1362,11 @@ struct {
SVGAMobId mobid;
uint32 arraySize;
uint32 mobPitch;
- uint32 pad[5];
+ SVGA3dSurface2Flags surface2Flags;
+ uint8 multisamplePattern;
+ uint8 qualityLevel;
+ uint8 pad0[2];
+ uint32 pad1[3];
}
#include "vmware_pack_end.h"
SVGAOTableSurfaceEntry;
@@ -1360,7 +1394,8 @@ struct {
SVGAOTableShaderEntry;
#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
-#define SVGA_STFLAG_PRIMARY (1 << 0)
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+#define SVGA_STFLAG_RESERVED (1 << 1) /* Added with cap SVGA_CAP_HP_CMD_QUEUE */
typedef uint32 SVGAScreenTargetFlags;
typedef
@@ -1528,6 +1563,25 @@ struct {
#include "vmware_pack_end.h"
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+/*
+ * Guests using SVGA_3D_CMD_GROW_OTABLE are promising that
+ * the new OTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * (Otherwise, guests should use one of the SetOTableBase commands.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAOTableType type;
+ PPN64 baseAddress;
+ uint32 sizeInBytes;
+ uint32 validSizeInBytes;
+ SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGrowOTable; /* SVGA_3D_CMD_GROW_OTABLE */
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -1615,7 +1669,7 @@ typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDefineGBSurface {
uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
uint32 numMipLevels;
uint32 multisampleCount;
@@ -1626,6 +1680,45 @@ struct SVGA3dCmdDefineGBSurface {
SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
/*
+ * Defines a guest-backed surface, adding the arraySize field.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+ uint32 sid;
+ SVGA3dSurface1Flags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+ uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Defines a guest-backed surface, adding the larger flags.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v3 {
+ uint32 sid;
+ SVGA3dSurfaceAllFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dMSPattern multisamplePattern;
+ SVGA3dMSQualityLevel qualityLevel;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v3; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */
+
+/*
* Destroy a guest-backed surface.
*/
@@ -1672,7 +1765,7 @@ SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
typedef
#include "vmware_pack_begin.h"
-struct{
+struct SVGA3dCmdCondBindGBSurface {
uint32 sid;
SVGAMobId testMobid;
SVGAMobId mobid;
@@ -2066,6 +2159,26 @@ struct {
uint32 mobOffset;
}
#include "vmware_pack_end.h"
-SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/
+SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 stid;
+ SVGA3dSurfaceImageId dest;
+
+ uint32 statusMobId;
+ uint32 statusMobOffset;
+
+ /* Reserved fields */
+ uint32 mustBeInvalidId;
+ uint32 mustBeZero;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenCopy; /* SVGA_3D_CMD_SCREEN_COPY */
+
+#define SVGA_SCREEN_COPY_STATUS_FAILURE 0x00
+#define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01
+#define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF
#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
index c18b663f360f..f256560049bf 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -229,9 +230,9 @@ typedef enum {
SVGA3D_DEVCAP_DEAD2 = 94,
/*
- * Does the device support the DX commands?
+ * Does the device support DXContexts?
*/
- SVGA3D_DEVCAP_DX = 95,
+ SVGA3D_DEVCAP_DXCONTEXT = 95,
/*
* What is the maximum size of a texture array?
@@ -241,21 +242,47 @@ typedef enum {
SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
/*
- * What is the maximum number of vertex buffers that can
- * be used in the DXContext inputAssembly?
+ * What is the maximum number of vertex buffers or vertex input registers
+ * that can be expected to work correctly with a DXContext?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
+ * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
+ * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
+ * but only the registers up to this cap value are guaranteed to render
+ * correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
*/
SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
/*
- * What is the maximum number of constant buffers
- * that can be expected to work correctly with a
- * DX context?
+ * What is the maximum number of constant buffers that can be expected to
+ * work correctly with a DX context?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
*/
SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
/*
* Does the device support provoking vertex control?
- * If zero, the first vertex will always be the provoking vertex.
+ *
+ * If this cap is present, the provokingVertexLast field in the
+ * rasterizer state is enabled. (Guests can then set it to FALSE,
+ * meaning that the first vertex is the provoking vertex, or TRUE,
+ * meaning that the last verteix is the provoking vertex.)
+ *
+ * If this cap is FALSE, then guests should set the provokingVertexLast
+ * to FALSE, otherwise rendering behavior is undefined.
*/
SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
@@ -281,7 +308,7 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
- SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122,
+ SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 = 122,
SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
@@ -320,8 +347,8 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
- SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161,
- SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162,
+ SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 = 161,
+ SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT = 162,
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
@@ -339,8 +366,8 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
- SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180,
- SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181,
+ SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 = 180,
+ SVGA3D_DEVCAP_DXFMT_X24_G8_UINT = 181,
SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
@@ -404,6 +431,17 @@ typedef enum {
SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
+ /*
+ * Advertises shaderModel 4.1 support, independent blend-states,
+ * cube-map arrays, and a higher vertex input registers limit.
+ *
+ * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
+ */
+ SVGA3D_DEVCAP_SM41 = 244,
+
+ SVGA3D_DEVCAP_MULTISAMPLE_2X = 245,
+ SVGA3D_DEVCAP_MULTISAMPLE_4X = 246,
+
SVGA3D_DEVCAP_MAX /* This must be the last index. */
} SVGA3dDevCapIndex;
@@ -419,9 +457,7 @@ typedef enum {
* MIPS: Does the format support mip levels?
* ARRAY: Does the format support texture arrays?
* VOLUME: Does the format support having volume?
- * MULTISAMPLE_2: Does the format support 2x multisample?
- * MULTISAMPLE_4: Does the format support 4x multisample?
- * MULTISAMPLE_8: Does the format support 8x multisample?
+ * MULTISAMPLE: Does the format support multisample?
*/
#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1)
@@ -432,20 +468,8 @@ typedef enum {
#define SVGA3D_DXFMT_ARRAY (1 << 6)
#define SVGA3D_DXFMT_VOLUME (1 << 7)
#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8)
-#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9)
-#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10)
-#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11)
-#define SVGADX_DXFMT_MAX (1 << 12)
-
-/*
- * Convenience mask for any multisample capability.
- *
- * The multisample bits imply both load and render capability.
- */
-#define SVGA3D_DXFMT_MULTISAMPLE ( \
- SVGADX_DXFMT_MULTISAMPLE_2 | \
- SVGADX_DXFMT_MULTISAMPLE_4 | \
- SVGADX_DXFMT_MULTISAMPLE_8 )
+#define SVGA3D_DXFMT_MULTISAMPLE (1 << 9)
+#define SVGA3D_DXFMT_MAX (1 << 10)
typedef union {
Bool b;
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
index 8c5ae608cfb4..7a49c94df221 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ * Copyright 2012-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -56,6 +57,16 @@ typedef uint32 SVGA3dInputClassification;
#define SVGA3D_RESOURCE_TYPE_MAX 7
typedef uint32 SVGA3dResourceType;
+#define SVGA3D_COLOR_WRITE_ENABLE_RED (1 << 0)
+#define SVGA3D_COLOR_WRITE_ENABLE_GREEN (1 << 1)
+#define SVGA3D_COLOR_WRITE_ENABLE_BLUE (1 << 2)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALPHA (1 << 3)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALL (SVGA3D_COLOR_WRITE_ENABLE_RED | \
+ SVGA3D_COLOR_WRITE_ENABLE_GREEN | \
+ SVGA3D_COLOR_WRITE_ENABLE_BLUE | \
+ SVGA3D_COLOR_WRITE_ENABLE_ALPHA)
+typedef uint8 SVGA3dColorWriteEnable;
+
#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0
#define SVGA3D_DEPTH_WRITE_MASK_ALL 1
typedef uint8 SVGA3dDepthWriteMask;
@@ -88,17 +99,28 @@ typedef uint8 SVGA3dCullMode;
#define SVGA3D_COMPARISON_MAX 9
typedef uint8 SVGA3dComparisonFunc;
+/*
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE disables MSAA for all primitives.
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE, which is supported in SM41,
+ * disables MSAA for lines only.
+ */
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE 0
+#define SVGA3D_MULTISAMPLE_RAST_ENABLE 1
+#define SVGA3D_MULTISAMPLE_RAST_DX_MAX 1
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE 2
+#define SVGA3D_MULTISAMPLE_RAST_MAX 2
+typedef uint8 SVGA3dMultisampleRastEnable;
+
#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_VERTEXINPUTREGISTERS 16
+#define SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS 32
#define SVGA3D_DX_MAX_SOTARGETS 4
#define SVGA3D_DX_MAX_SRVIEWS 128
#define SVGA3D_DX_MAX_CONSTBUFFERS 16
#define SVGA3D_DX_MAX_SAMPLERS 16
-/* Id limits */
-static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
-static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
+#define SVGA3D_DX_MAX_CONSTBUF_BINDING_SIZE (4096 * 4 * (uint32)sizeof(uint32))
-typedef uint32 SVGA3dSurfaceId;
typedef uint32 SVGA3dShaderResourceViewId;
typedef uint32 SVGA3dRenderTargetViewId;
typedef uint32 SVGA3dDepthStencilViewId;
@@ -194,20 +216,6 @@ SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
typedef
#include "vmware_pack_begin.h"
-struct SVGA3dReplyFormatData {
- uint32 formatSupport;
- uint32 msaa2xQualityLevels:5;
- uint32 msaa4xQualityLevels:5;
- uint32 msaa8xQualityLevels:5;
- uint32 msaa16xQualityLevels:5;
- uint32 msaa32xQualityLevels:5;
- uint32 pad:7;
-}
-#include "vmware_pack_end.h"
-SVGA3dReplyFormatData;
-
-typedef
-#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetSingleConstantBuffer {
uint32 slot;
SVGA3dShaderType type;
@@ -624,6 +632,28 @@ SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvertRegion {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dBox destBox;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dBox srcBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvertRegion; /* SVGA_3D_CMD_DX_PRED_CONVERT_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvert {
+ SVGA3dSurfaceId dstSid;
+ SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvert; /* SVGA_3D_CMD_DX_PRED_CONVERT */
+
+typedef
+#include "vmware_pack_begin.h"
struct SVGA3dCmdDXBufferCopy {
SVGA3dSurfaceId dest;
SVGA3dSurfaceId src;
@@ -635,23 +665,57 @@ struct SVGA3dCmdDXBufferCopy {
SVGA3dCmdDXBufferCopy;
/* SVGA_3D_CMD_DX_BUFFER_COPY */
-typedef uint32 SVGA3dDXStretchBltMode;
-#define SVGADX_STRETCHBLT_LINEAR (1 << 0)
-#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
+/*
+ * Perform a surface copy between a multisample, and a non-multisampled
+ * surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXResolveCopy; /* SVGA_3D_CMD_DX_RESOLVE_COPY */
+
+/*
+ * Perform a predicated surface copy between a multisample, and a
+ * non-multisampled surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId dstSid;
+ uint32 dstSubResource;
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredResolveCopy; /* SVGA_3D_CMD_DX_PRED_RESOLVE_COPY */
+
+typedef uint32 SVGA3dDXPresentBltMode;
+#define SVGADX_PRESENTBLT_LINEAR (1 << 0)
+#define SVGADX_PRESENTBLT_FORCE_SRC_SRGB (1 << 1)
+#define SVGADX_PRESENTBLT_FORCE_SRC_XRBIAS (1 << 2)
+#define SVGADX_PRESENTBLT_MODE_MAX (1 << 3)
typedef
#include "vmware_pack_begin.h"
-struct SVGA3dCmdDXStretchBlt {
+struct SVGA3dCmdDXPresentBlt {
SVGA3dSurfaceId srcSid;
uint32 srcSubResource;
SVGA3dSurfaceId dstSid;
uint32 destSubResource;
SVGA3dBox boxSrc;
SVGA3dBox boxDest;
- SVGA3dDXStretchBltMode mode;
+ SVGA3dDXPresentBltMode mode;
}
#include "vmware_pack_end.h"
-SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
+SVGA3dCmdDXPresentBlt; /* SVGA_3D_CMD_DX_PRESENTBLT*/
typedef
#include "vmware_pack_begin.h"
@@ -662,26 +726,6 @@ struct SVGA3dCmdDXGenMips {
SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
/*
- * Defines a resource/DX surface. Resources share the surfaceId namespace.
- *
- */
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdDefineGBSurface_v2 {
- uint32 sid;
- SVGA3dSurfaceFlags surfaceFlags;
- SVGA3dSurfaceFormat format;
- uint32 numMipLevels;
- uint32 multisampleCount;
- SVGA3dTextureFilter autogenFilter;
- SVGA3dSize size;
- uint32 arraySize;
- uint32 pad;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
-
-/*
* Update a sub-resource in a guest-backed resource.
* (Inform the device that the guest-contents have been updated.)
*/
@@ -724,7 +768,8 @@ SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
/*
* Raw byte wise transfer from a buffer surface into another surface
- * of the requested box.
+ * of the requested box. Supported if 3d is enabled and SVGA_CAP_DX
+ * is set. This command does not take a context.
*/
typedef
#include "vmware_pack_begin.h"
@@ -773,6 +818,93 @@ struct SVGA3dCmdDXSurfaceCopyAndReadback {
SVGA3dCmdDXSurfaceCopyAndReadback;
/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
+/*
+ * SVGA_DX_HINT_NONE: Does nothing.
+ *
+ * SVGA_DX_HINT_PREFETCH_OBJECT:
+ * SVGA_DX_HINT_PREEVICT_OBJECT:
+ * Consumes a SVGAObjectRef, and hints that the host should consider
+ * fetching/evicting the specified object.
+ *
+ * An id of SVGA3D_INVALID_ID can be used if the guest isn't sure
+ * what object was affected. (For instance, if the guest knows that
+ * it is about to evict a DXShader, but doesn't know precisely which one,
+ * the device can still use this to help limit it's search, or track
+ * how many page-outs have happened.)
+ *
+ * SVGA_DX_HINT_PREFETCH_COBJECT:
+ * SVGA_DX_HINT_PREEVICT_COBJECT:
+ * Same as the above, except they consume an SVGACObjectRef.
+ */
+typedef uint32 SVGADXHintId;
+#define SVGA_DX_HINT_NONE 0
+#define SVGA_DX_HINT_PREFETCH_OBJECT 1
+#define SVGA_DX_HINT_PREEVICT_OBJECT 2
+#define SVGA_DX_HINT_PREFETCH_COBJECT 3
+#define SVGA_DX_HINT_PREEVICT_COBJECT 4
+#define SVGA_DX_HINT_MAX 5
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAObjectRef {
+ SVGAOTableType type;
+ uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGAObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACObjectRef {
+ SVGACOTableType type;
+ uint32 cid;
+ uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGACObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXHint {
+ SVGADXHintId hintId;
+
+ /*
+ * Followed by variable sized data depending on the hintId.
+ */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXHint;
+/* SVGA_3D_CMD_DX_HINT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferUpdate {
+ SVGA3dSurfaceId sid;
+ uint32 x;
+ uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferUpdate;
+/* SVGA_3D_CMD_DX_BUFFER_UPDATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetConstantBufferOffset {
+ uint32 slot;
+ uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetConstantBufferOffset;
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetVSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */
+
typedef
#include "vmware_pack_begin.h"
@@ -789,7 +921,7 @@ struct {
uint32 firstArraySlice;
uint32 mipLevels;
uint32 arraySize;
- } tex;
+ } tex; /* 1d, 2d, 3d, cube */
struct {
uint32 firstElement;
uint32 numElements;
@@ -844,6 +976,7 @@ struct SVGA3dRenderTargetViewDesc {
struct {
uint32 firstElement;
uint32 numElements;
+ uint32 padding0;
} buffer;
struct {
uint32 mipSlice;
@@ -964,9 +1097,6 @@ SVGA3dInputElementDesc;
typedef
#include "vmware_pack_begin.h"
struct {
- /*
- * XXX: How many of these can there be?
- */
uint32 elid;
uint32 numDescs;
SVGA3dInputElementDesc desc[32];
@@ -1007,7 +1137,7 @@ struct SVGA3dDXBlendStatePerRT {
uint8 srcBlendAlpha;
uint8 destBlendAlpha;
uint8 blendOpAlpha;
- uint8 renderTargetWriteMask;
+ SVGA3dColorWriteEnable renderTargetWriteMask;
uint8 logicOpEnable;
uint8 logicOp;
uint16 pad0;
@@ -1125,7 +1255,7 @@ struct {
float slopeScaledDepthBias;
uint8 depthClipEnable;
uint8 scissorEnable;
- uint8 multisampleEnable;
+ SVGA3dMultisampleRastEnable multisampleEnable;
uint8 antialiasedLineEnable;
float lineWidth;
uint8 lineStippleEnable;
@@ -1152,7 +1282,7 @@ struct SVGA3dCmdDXDefineRasterizerState {
float slopeScaledDepthBias;
uint8 depthClipEnable;
uint8 scissorEnable;
- uint8 multisampleEnable;
+ SVGA3dMultisampleRastEnable multisampleEnable;
uint8 antialiasedLineEnable;
float lineWidth;
uint8 lineStippleEnable;
@@ -1222,21 +1352,6 @@ struct SVGA3dCmdDXDestroySamplerState {
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
-/*
- */
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dSignatureEntry {
- uint8 systemValue;
- uint8 reg; /* register is a reserved word */
- uint16 mask;
- uint8 registerComponentType;
- uint8 minPrecision;
- uint16 pad0;
-}
-#include "vmware_pack_end.h"
-SVGA3dSignatureEntry;
-
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineShader {
@@ -1254,12 +1369,7 @@ struct SVGACOTableDXShaderEntry {
uint32 sizeInBytes;
uint32 offsetInBytes;
SVGAMobId mobid;
- uint32 numInputSignatureEntries;
- uint32 numOutputSignatureEntries;
-
- uint32 numPatchConstantSignatureEntries;
-
- uint32 pad;
+ uint32 pad[4];
}
#include "vmware_pack_end.h"
SVGACOTableDXShaderEntry;
@@ -1283,6 +1393,25 @@ struct SVGA3dCmdDXBindShader {
#include "vmware_pack_end.h"
SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllShader {
+ uint32 cid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllShader; /* SVGA_3D_CMD_DX_BIND_ALL_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXCondBindAllShader {
+ uint32 cid;
+ SVGAMobId testMobid;
+ SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXCondBindAllShader; /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */
+
/*
* The maximum number of streamout decl's in each streamout entry.
*/
@@ -1356,7 +1485,6 @@ SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
*
* This command allows the guest to bind a mob to a context-object table.
*/
-
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetCOTable {
@@ -1368,6 +1496,26 @@ struct SVGA3dCmdDXSetCOTable {
#include "vmware_pack_end.h"
SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
+/*
+ * Guests using SVGA_3D_CMD_DX_GROW_COTABLE are promising that
+ * the new COTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * If there is an old cotable mob bound, it also has to still be valid.
+ *
+ * (Otherwise, guests should use the DXSetCOTableBase command.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGrowCOTable {
+ uint32 cid;
+ uint32 mobid;
+ SVGACOTableType type;
+ uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGrowCOTable; /* SVGA_3D_CMD_DX_GROW_COTABLE */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXReadbackCOTable {
@@ -1471,7 +1619,7 @@ struct SVGADXContextMobFormat {
SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
- uint32 pad7[381];
+ uint32 pad7[380];
}
#include "vmware_pack_end.h"
SVGADXContextMobFormat;
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
index a1c36877ad55..b22a67f15660 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -62,7 +63,9 @@
* Maximum size in dwords of shader text the SVGA device will allow.
* Currently 8 MB.
*/
-#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32))
+#define SVGA3D_MAX_SHADER_MEMORY_BYTES (8 * 1024 * 1024)
+#define SVGA3D_MAX_SHADER_MEMORY (SVGA3D_MAX_SHADER_MEMORY_BYTES / \
+ sizeof(uint32))
#define SVGA3D_MAX_CLIP_PLANES 6
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
index b44ce648f592..bdfc404c91e3 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index babe7cb84fc2..f2bfd3d80598 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2008-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,189 +25,355 @@
*
**************************************************************************/
-#include <linux/kernel.h>
-
-#ifdef __KERNEL__
-
-#include <drm/vmwgfx_drm.h>
-#define surf_size_struct struct drm_vmw_size
-
-#else /* __KERNEL__ */
+/*
+ * svga3d_surfacedefs.h --
+ *
+ * Surface definitions and inlineable utilities for SVGA3d.
+ */
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
-#endif /* ARRAY_SIZE */
+#ifndef _SVGA3D_SURFACEDEFS_H_
+#define _SVGA3D_SURFACEDEFS_H_
-#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
-#define surf_size_struct SVGA3dSize
-#define u32 uint32
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_MODULE
+#include "includeCheck.h"
-#endif /* __KERNEL__ */
+#include <linux/kernel.h>
+#include <drm/vmwgfx_drm.h>
#include "svga3d_reg.h"
+#define surf_size_struct struct drm_vmw_size
+
/*
- * enum svga3d_block_desc describes the active data channels in a block.
- *
- * There can be at-most four active channels in a block:
- * 1. Red, bump W, luminance and depth are stored in the first channel.
- * 2. Green, bump V and stencil are stored in the second channel.
- * 3. Blue and bump U are stored in the third channel.
- * 4. Alpha and bump Q are stored in the fourth channel.
- *
- * Block channels can be used to store compressed and buffer data:
- * 1. For compressed formats, only the data channel is used and its size
- * is equal to that of a singular block in the compression scheme.
- * 2. For buffer formats, only the data channel is used and its size is
- * exactly one byte in length.
- * 3. In each case the bit depth represent the size of a singular block.
- *
- * Note: Compressed and IEEE formats do not use the bitMask structure.
+ * enum svga3d_block_desc - describes generic properties about formats.
*/
-
enum svga3d_block_desc {
- SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
- SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
- data */
- SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
- data */
- SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
- U and V */
- SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
- data */
- SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
- data */
- SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
- channel */
- SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
- data */
- SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
- data */
- SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
- data */
- SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
- data */
- SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
- SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
- channel */
- SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
- data */
- SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
- data */
- SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
- data depending on the
- compression method used */
- SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
- floating point
- representation in
- all channels */
- SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
- data. */
- SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
- SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
- SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
- SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
- SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
- e.g., NV12. */
- SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
- Y, U, V, e.g., YV12. */
-
- SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_GREEN,
- SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
- SVGA3DBLOCKDESC_SRGB,
+ /* Nothing special can be said about this format. */
+ SVGA3DBLOCKDESC_NONE = 0,
+
+ /* Format contains Blue/U data */
+ SVGA3DBLOCKDESC_BLUE = 1 << 0,
+ SVGA3DBLOCKDESC_W = 1 << 0,
+ SVGA3DBLOCKDESC_BUMP_L = 1 << 0,
+
+ /* Format contains Green/V data */
+ SVGA3DBLOCKDESC_GREEN = 1 << 1,
+ SVGA3DBLOCKDESC_V = 1 << 1,
+
+ /* Format contains Red/W/Luminance data */
+ SVGA3DBLOCKDESC_RED = 1 << 2,
+ SVGA3DBLOCKDESC_U = 1 << 2,
+ SVGA3DBLOCKDESC_LUMINANCE = 1 << 2,
+
+ /* Format contains Alpha/Q data */
+ SVGA3DBLOCKDESC_ALPHA = 1 << 3,
+ SVGA3DBLOCKDESC_Q = 1 << 3,
+
+ /* Format is a buffer */
+ SVGA3DBLOCKDESC_BUFFER = 1 << 4,
+
+ /* Format is compressed */
+ SVGA3DBLOCKDESC_COMPRESSED = 1 << 5,
+
+ /* Format uses IEEE floating point */
+ SVGA3DBLOCKDESC_FP = 1 << 6,
+
+ /* Three separate blocks store data. */
+ SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 7,
+
+ /* 2 planes of Y, UV, e.g., NV12. */
+ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 8,
+
+ /* 3 planes of separate Y, U, V, e.g., YV12. */
+ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 9,
+
+ /* Block with a stencil channel */
+ SVGA3DBLOCKDESC_STENCIL = 1 << 11,
+
+ /* Typeless format */
+ SVGA3DBLOCKDESC_TYPELESS = 1 << 12,
+
+ /* Channels are signed integers */
+ SVGA3DBLOCKDESC_SINT = 1 << 13,
+
+ /* Channels are unsigned integers */
+ SVGA3DBLOCKDESC_UINT = 1 << 14,
+
+ /* Channels are normalized (when sampling) */
+ SVGA3DBLOCKDESC_NORM = 1 << 15,
+
+ /* Channels are in SRGB */
+ SVGA3DBLOCKDESC_SRGB = 1 << 16,
+
+ /* Shared exponent */
+ SVGA3DBLOCKDESC_EXP = 1 << 17,
+
+ /* Format contains color data. */
+ SVGA3DBLOCKDESC_COLOR = 1 << 18,
+ /* Format contains depth data. */
+ SVGA3DBLOCKDESC_DEPTH = 1 << 19,
+ /* Format contains bump data. */
+ SVGA3DBLOCKDESC_BUMP = 1 << 20,
+
+ /* Format contains YUV video data. */
+ SVGA3DBLOCKDESC_YUV_VIDEO = 1 << 21,
+
+ /* For mixed unsigned/signed formats. */
+ SVGA3DBLOCKDESC_MIXED = 1 << 22,
+
+ /* For distingushing CxV8U8. */
+ SVGA3DBLOCKDESC_CX = 1 << 23,
+
+ /* Different compressed format groups. */
+ SVGA3DBLOCKDESC_BC1 = 1 << 24,
+ SVGA3DBLOCKDESC_BC2 = 1 << 25,
+ SVGA3DBLOCKDESC_BC3 = 1 << 26,
+ SVGA3DBLOCKDESC_BC4 = 1 << 27,
+ SVGA3DBLOCKDESC_BC5 = 1 << 28,
+
+ SVGA3DBLOCKDESC_A_UINT = SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_A_UNORM = SVGA3DBLOCKDESC_A_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_R_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_R_UNORM = SVGA3DBLOCKDESC_R_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_R_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_R_SNORM = SVGA3DBLOCKDESC_R_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_G_UINT = SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RG_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RG_UNORM = SVGA3DBLOCKDESC_RG_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RG_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RG_SNORM = SVGA3DBLOCKDESC_RG_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGB_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGB_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGB_UNORM = SVGA3DBLOCKDESC_RGB_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGB_UNORM_SRGB = SVGA3DBLOCKDESC_RGB_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA_UINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGBA_UNORM = SVGA3DBLOCKDESC_RGBA_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGBA_UNORM_SRGB = SVGA3DBLOCKDESC_RGBA_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA_SINT = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGBA_SNORM = SVGA3DBLOCKDESC_RGBA_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_FP |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V,
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_LUMINANCE,
+ SVGA3DBLOCKDESC_BUMP_L |
+ SVGA3DBLOCKDESC_MIXED |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
- SVGA3DBLOCKDESC_W,
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
- SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_MIXED |
+ SVGA3DBLOCKDESC_BUMP,
SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
- SVGA3DBLOCKDESC_V |
- SVGA3DBLOCKDESC_W |
- SVGA3DBLOCKDESC_Q,
- SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
- SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_Q |
+ SVGA3DBLOCKDESC_BUMP,
+ SVGA3DBLOCKDESC_L_UNORM = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_LA_UNORM = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
- SVGA3DBLOCKDESC_IEEE_FP,
+ SVGA3DBLOCKDESC_FP |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
- SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
- SVGA3DBLOCKDESC_BLUE,
- SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
- SVGA3DBLOCKDESC_ALPHA,
- SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
- SVGA3DBLOCKDESC_STENCIL,
- SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
- SVGA3DBLOCKDESC_Y,
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_COLOR,
SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
- SVGA3DBLOCKDESC_Y |
- SVGA3DBLOCKDESC_U_VIDEO |
- SVGA3DBLOCKDESC_V_VIDEO,
- SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
- SVGA3DBLOCKDESC_EXP,
- SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
- SVGA3DBLOCKDESC_SRGB,
- SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_2PLANAR_YUV,
- SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
- SVGA3DBLOCKDESC_3PLANAR_YUV,
+ SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_RGB_EXP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN |
+ SVGA3DBLOCKDESC_BLUE |
+ SVGA3DBLOCKDESC_EXP |
+ SVGA3DBLOCKDESC_COLOR,
+
+ SVGA3DBLOCKDESC_COMP_TYPELESS = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_TYPELESS,
+ SVGA3DBLOCKDESC_COMP_UNORM = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_UINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_COMP_SNORM = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_NORM |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC1_COMP_TYPELESS = SVGA3DBLOCKDESC_BC1 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC1_COMP_UNORM = SVGA3DBLOCKDESC_BC1 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC1_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC2_COMP_TYPELESS = SVGA3DBLOCKDESC_BC2 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC2_COMP_UNORM = SVGA3DBLOCKDESC_BC2 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC2_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC3_COMP_TYPELESS = SVGA3DBLOCKDESC_BC3 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC3_COMP_UNORM = SVGA3DBLOCKDESC_BC3 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC3_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_BC4_COMP_TYPELESS = SVGA3DBLOCKDESC_BC4 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC4_COMP_UNORM = SVGA3DBLOCKDESC_BC4 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC4_COMP_SNORM = SVGA3DBLOCKDESC_BC4 |
+ SVGA3DBLOCKDESC_COMP_SNORM,
+ SVGA3DBLOCKDESC_BC5_COMP_TYPELESS = SVGA3DBLOCKDESC_BC5 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC5_COMP_UNORM = SVGA3DBLOCKDESC_BC5 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 |
+ SVGA3DBLOCKDESC_COMP_SNORM,
+
+ SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_2PLANAR_YUV |
+ SVGA3DBLOCKDESC_COLOR,
+ SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_YUV_VIDEO |
+ SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_3PLANAR_YUV |
+ SVGA3DBLOCKDESC_COLOR,
+
+ SVGA3DBLOCKDESC_DEPTH_UINT = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_UINT,
+ SVGA3DBLOCKDESC_DEPTH_UNORM = SVGA3DBLOCKDESC_DEPTH_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL,
+ SVGA3DBLOCKDESC_DS_UINT = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL |
+ SVGA3DBLOCKDESC_UINT,
+ SVGA3DBLOCKDESC_DS_UNORM = SVGA3DBLOCKDESC_DS_UINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_DEPTH_FP = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_FP,
+
+ SVGA3DBLOCKDESC_UV_UINT = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_UINT,
+ SVGA3DBLOCKDESC_UV_SNORM = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_NORM,
+ SVGA3DBLOCKDESC_UVCX_SNORM = SVGA3DBLOCKDESC_UV_SNORM |
+ SVGA3DBLOCKDESC_CX,
+ SVGA3DBLOCKDESC_UVWQ_SNORM = SVGA3DBLOCKDESC_UVWQ |
+ SVGA3DBLOCKDESC_SINT |
+ SVGA3DBLOCKDESC_NORM,
};
-/*
- * SVGA3dSurfaceDesc describes the actual pixel data.
- *
- * This structure provides the following information:
- * 1. Block description.
- * 2. Dimensions of a block in the surface.
- * 3. Size of block in bytes.
- * 4. Bit depth of the pixel data.
- * 5. Channel bit depths and masks (if applicable).
- */
struct svga3d_channel_def {
union {
u8 blue;
- u8 u;
+ u8 w_bump;
+ u8 l_bump;
u8 uv_video;
u8 u_video;
};
union {
u8 green;
- u8 v;
u8 stencil;
+ u8 v_bump;
u8 v_video;
};
union {
u8 red;
- u8 w;
+ u8 u_bump;
u8 luminance;
- u8 y;
+ u8 y_video;
u8 depth;
u8 data;
};
union {
u8 alpha;
- u8 q;
+ u8 q_bump;
u8 exp;
};
};
+/*
+ * struct svga3d_surface_desc - describes the actual pixel data.
+ *
+ * @format: Format
+ * @block_desc: Block description
+ * @block_size: Dimensions in pixels of a block
+ * @bytes_per_block: Size of block in bytes
+ * @pitch_bytes_per_block: Size of a block in bytes for purposes of pitch
+ * @bit_depth: Channel bit depths
+ * @bit_offset: Channel bit masks (in bits offset from the start of the pointer)
+ */
struct svga3d_surface_desc {
SVGA3dSurfaceFormat format;
enum svga3d_block_desc block_desc;
+
surf_size_struct block_size;
u32 bytes_per_block;
u32 pitch_bytes_per_block;
- u32 total_bit_depth;
struct svga3d_channel_def bit_depth;
struct svga3d_channel_def bit_offset;
};
@@ -215,729 +381,728 @@ struct svga3d_surface_desc {
static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 0, 0,
- 0, {{0}, {0}, {0}, {0}},
+ {{0}, {0}, {0}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {6}, {5}, {0}},
+ {{5}, {6}, {5}, {0}},
{{0}, {5}, {11}, {0}}},
- {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 2, 2,
- 15, {{5}, {5}, {5}, {0}},
+ {{5}, {5}, {5}, {0}},
{{0}, {5}, {10}, {0}}},
- {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {5}, {5}, {1}},
+ {{5}, {5}, {5}, {1}},
{{0}, {5}, {10}, {15}}},
- {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{4}, {4}, {4}, {4}},
+ {{4}, {4}, {4}, {4}},
{{0}, {4}, {8}, {12}}},
- {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {8}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {1}, {15}, {0}},
- {{0}, {15}, {0}, {0}}},
+ {{0}, {1}, {15}, {0}},
+ {{0}, {0}, {1}, {0}}},
- {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
+ {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_L_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
- {1 , 1, 1}, 1, 1,
- 8, {{0}, {0}, {4}, {4}},
+ {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA_UNORM,
+ {1, 1, 1}, 1, 1,
+ {{0}, {0}, {4}, {4}},
{{0}, {0}, {0}, {4}}},
- {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
+ {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_L_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
+ {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {8}, {8}},
+ {{0}, {0}, {8}, {8}},
{{0}, {0}, {0}, {8}}},
- {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT1, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT2, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT3, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT4, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_DXT5, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {8}, {8}},
- {{0}, {0}, {0}, {8}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
{SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 2, 2,
- 16, {{5}, {5}, {6}, {0}},
- {{11}, {6}, {0}, {0}}},
+ {{6}, {5}, {5}, {0}},
+ {{10}, {5}, {0}, {0}}},
{SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
- {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
+ {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 3, 3,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
{SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
{SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
+ {{10}, {10}, {10}, {2}},
{{0}, {10}, {20}, {30}}},
- {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{8}, {8}, {0}, {0}},
- {{8}, {0}, {0}, {0}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
- {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
- {{24}, {16}, {8}, {0}}},
+ {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
- {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UVCX_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{8}, {8}, {0}, {0}},
- {{8}, {0}, {0}, {0}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
{SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
{SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
+ {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_A_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {8}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
{SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
{SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{16}, {16}, {0}, {0}},
- {{16}, {0}, {0}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
- {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
- {{0}, {0}, {16}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
- {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
{SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2,
- 16, {{8}, {0}, {8}, {0}},
+ {2, 1, 1}, 4, 4,
+ {{8}, {0}, {8}, {0}},
{{0}, {0}, {8}, {0}}},
{SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
- {1, 1, 1}, 2, 2,
- 16, {{8}, {0}, {8}, {0}},
+ {2, 1, 1}, 4, 4,
+ {{8}, {0}, {8}, {0}},
{{8}, {0}, {0}, {0}}},
{SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
{2, 2, 1}, 6, 2,
- 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
- {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
{SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
- {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB_UINT,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
- {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
+ {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_RGB_SINT,
{1, 1, 1}, 12, 12,
- 96, {{32}, {32}, {32}, {0}},
+ {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
- {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
+ {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG_UINT,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_RG_SINT,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 8, 8,
- 64, {{0}, {8}, {32}, {0}},
+ {{0}, {8}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
{SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 8, 8,
- 64, {{0}, {8}, {32}, {0}},
+ {{0}, {8}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
+ {SVGA3D_R32_FLOAT_X8X24, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 8, 8,
- 64, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
+ {SVGA3D_X32_G8X24_UINT, SVGA3DBLOCKDESC_G_UINT,
{1, 1, 1}, 8, 8,
- 64, {{0}, {8}, {0}, {0}},
+ {{0}, {8}, {0}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
{SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 4, 4,
- 32, {{10}, {11}, {11}, {0}},
- {{0}, {10}, {21}, {0}}},
+ {{10}, {11}, {11}, {0}},
+ {{22}, {11}, {0}, {0}}},
- {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
- {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
+ {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_UINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_RG_SINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_R_UINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_R_SINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
+ {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
- {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
+ {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
- {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R24_UNORM_X8, SVGA3DBLOCKDESC_R_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {24}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
+ {SVGA3D_X24_G8_UINT, SVGA3DBLOCKDESC_G_UINT,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {0}, {0}},
+ {{0}, {8}, {0}, {0}},
{{0}, {24}, {0}, {0}}},
- {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG_UINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
+ {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_RG_SINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_R_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_R_UINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_R_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_R_SINT,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_R_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_R_UINT,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_R_SNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
+ {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_R_SINT,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
+ {SVGA3D_P8, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {8}, {0}},
+ {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
+ {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGB_EXP,
{1, 1, 1}, 4, 4,
- 32, {{9}, {9}, {9}, {5}},
+ {{9}, {9}, {9}, {5}},
{{18}, {9}, {0}, {27}}},
- {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
- {{0}, {8}, {0}, {0}}},
+ {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_NONE,
+ {2, 1, 1}, 4, 4,
+ {{0}, {8}, {8}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
- {1, 1, 1}, 2, 2,
- 16, {{0}, {8}, {8}, {0}},
+ {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_NONE,
+ {2, 1, 1}, 4, 4,
+ {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
- {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_BC1_COMP_TYPELESS,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_BC2_COMP_TYPELESS,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_BC3_COMP_TYPELESS,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_BC4_COMP_TYPELESS,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_ATI1, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_BC4_COMP_SNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_BC5_COMP_TYPELESS,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_ATI2, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_BC5_COMP_SNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+ {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
+ {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_UNORM_SRGB,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {0}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
- {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
+ {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {8}, {24}, {0}},
- {{0}, {24}, {0}, {0}}},
+ {{0}, {8}, {24}, {0}},
+ {{0}, {0}, {8}, {0}}},
{SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
{2, 2, 1}, 6, 2,
- 48, {{0}, {0}, {48}, {0}},
+ {{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
{SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 16, 16,
- 128, {{32}, {32}, {32}, {32}},
+ {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
{SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
- {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 8, 8,
- 64, {{16}, {16}, {16}, {16}},
+ {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
{SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 8, 8,
- 64, {{0}, {32}, {32}, {0}},
+ {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
- {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{10}, {10}, {10}, {2}},
- {{0}, {10}, {20}, {30}}},
+ {{10}, {10}, {10}, {2}},
+ {{20}, {10}, {0}, {30}}},
- {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
- {{24}, {16}, {8}, {0}}},
+ {{8}, {8}, {8}, {8}},
+ {{16}, {8}, {0}, {24}}},
{SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{0}, {16}, {16}, {0}},
- {{0}, {0}, {16}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
- {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
{1, 1, 1}, 4, 4,
- 32, {{16}, {16}, {0}, {0}},
- {{16}, {0}, {0}, {0}}},
+ {{0}, {16}, {16}, {0}},
+ {{0}, {16}, {0}, {0}}},
{SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 4, 4,
- 32, {{0}, {0}, {32}, {0}},
+ {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
+ {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
{1, 1, 1}, 2, 2,
- 16, {{8}, {8}, {0}, {0}},
- {{8}, {0}, {0}, {0}}},
+ {{0}, {8}, {8}, {0}},
+ {{0}, {8}, {0}, {0}}},
{SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
+ {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{0}, {0}, {16}, {0}},
+ {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
+ {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_A_UNORM,
{1, 1, 1}, 1, 1,
- 8, {{0}, {0}, {0}, {8}},
+ {{0}, {0}, {0}, {8}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {6}, {5}, {0}},
+ {{5}, {6}, {5}, {0}},
{{0}, {5}, {11}, {0}}},
- {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 2, 2,
- 16, {{5}, {5}, {5}, {1}},
+ {{5}, {5}, {5}, {1}},
{{0}, {5}, {10}, {15}}},
- {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+ {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
{1, 1, 1}, 4, 4,
- 32, {{8}, {8}, {8}, {8}},
+ {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
+ {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
{1, 1, 1}, 4, 4,
- 24, {{8}, {8}, {8}, {0}},
+ {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
- {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
{4, 4, 1}, 8, 8,
- 64, {{0}, {0}, {64}, {0}},
+ {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+ {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
{4, 4, 1}, 16, 16,
- 128, {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
-
};
static inline u32 clamped_umul32(u32 a, u32 b)
@@ -946,6 +1111,10 @@ static inline u32 clamped_umul32(u32 a, u32 b)
return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
}
+/**
+ * svga3dsurface_get_desc - Look up the appropriate SVGA3dSurfaceDesc for the
+ * given format.
+ */
static inline const struct svga3d_surface_desc *
svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
{
@@ -955,23 +1124,10 @@ svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
}
-/*
- *----------------------------------------------------------------------
- *
- * svga3dsurface_get_mip_size --
- *
- * Given a base level size and the mip level, compute the size of
- * the mip level.
- *
- * Results:
- * See above.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
+/**
+ * svga3dsurface_get_mip_size - Given a base level size and the mip level,
+ * compute the size of the mip level.
*/
-
static inline surf_size_struct
svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
{
@@ -1018,28 +1174,17 @@ svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
return pitch;
}
-/*
- *-----------------------------------------------------------------------------
- *
- * svga3dsurface_get_image_buffer_size --
- *
- * Return the number of bytes of buffer space required to store
- * one image of a surface, optionally using the specified pitch.
- *
- * If pitch is zero, it is assumed that rows are tightly packed.
+/**
+ * svga3dsurface_get_image_buffer_size - Calculates image buffer size.
*
- * This function is overflow-safe. If the result would have
- * overflowed, instead we return MAX_UINT32.
+ * Return the number of bytes of buffer space required to store one image of a
+ * surface, optionally using the specified pitch.
*
- * Results:
- * Byte count.
+ * If pitch is zero, it is assumed that rows are tightly packed.
*
- * Side effects:
- * None.
- *
- *-----------------------------------------------------------------------------
+ * This function is overflow-safe. If the result would have overflowed, instead
+ * we return MAX_UINT32.
*/
-
static inline u32
svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
const surf_size_struct *size,
@@ -1067,6 +1212,9 @@ svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
return total_size;
}
+/**
+ * svga3dsurface_get_serialized_size - Get the serialized size for the image.
+ */
static inline u32
svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
surf_size_struct base_level_size,
@@ -1087,6 +1235,26 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
return total_size * num_layers;
}
+/**
+ * svga3dsurface_get_serialized_size_extended - Returns the number of bytes
+ * required for a surface with given parameters. Support for sample count.
+ */
+static inline u32
+svga3dsurface_get_serialized_size_extended(SVGA3dSurfaceFormat format,
+ surf_size_struct base_level_size,
+ u32 num_mip_levels,
+ u32 num_layers,
+ u32 num_samples)
+{
+ uint64_t total_size =
+ svga3dsurface_get_serialized_size(format,
+ base_level_size,
+ num_mip_levels,
+ num_layers);
+ total_size *= max_t(u32, 1, num_samples);
+
+ return min_t(uint64_t, total_size, (uint64_t)U32_MAX);
+}
/**
* svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
@@ -1206,3 +1374,5 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
}
return svga3dsurface_is_dx_screen_target_format(format);
}
+
+#endif /* _SVGA3D_SURFACEDEFS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
index 27b33ba88430..308370665a8e 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2012-2015 VMware, Inc. All rights reserved.
+ * Copyright 2012-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -44,9 +45,21 @@
#define SVGA3D_INVALID_ID ((uint32)-1)
+typedef uint8 SVGABool8; /* 8-bit Bool definition */
typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
typedef uint32 SVGA3dColor; /* a, r, g, b */
+typedef uint32 SVGA3dSurfaceId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 numerator;
+ uint32 denominator;
+}
+#include "vmware_pack_end.h"
+SVGA3dFraction64;
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCopyRect {
@@ -145,7 +158,7 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_BUMPU8V8 = 20,
SVGA3D_BUMPL6V5U5 = 21,
SVGA3D_BUMPX8L8V8U8 = 22,
- SVGA3D_BUMPL8V8U8 = 23,
+ SVGA3D_FORMAT_DEAD1 = 23,
SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
@@ -204,8 +217,8 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_R32G32_SINT = 59,
SVGA3D_R32G8X24_TYPELESS = 60,
SVGA3D_D32_FLOAT_S8X24_UINT = 61,
- SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
- SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
+ SVGA3D_R32_FLOAT_X8X24 = 62,
+ SVGA3D_X32_G8X24_UINT = 63,
SVGA3D_R10G10B10A2_TYPELESS = 64,
SVGA3D_R10G10B10A2_UINT = 65,
SVGA3D_R11G11B10_FLOAT = 66,
@@ -223,8 +236,8 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_R32_SINT = 78,
SVGA3D_R24G8_TYPELESS = 79,
SVGA3D_D24_UNORM_S8_UINT = 80,
- SVGA3D_R24_UNORM_X8_TYPELESS = 81,
- SVGA3D_X24_TYPELESS_G8_UINT = 82,
+ SVGA3D_R24_UNORM_X8 = 81,
+ SVGA3D_X24_G8_UINT = 82,
SVGA3D_R8G8_TYPELESS = 83,
SVGA3D_R8G8_UNORM = 84,
SVGA3D_R8G8_UINT = 85,
@@ -296,92 +309,114 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
-typedef enum SVGA3dSurfaceFlags {
- SVGA3D_SURFACE_CUBEMAP = (1 << 0),
+/*
+ * SVGA3d Surface Flags --
+ */
+#define SVGA3D_SURFACE_CUBEMAP (1 << 0)
- /*
- * HINT flags are not enforced by the device but are useful for
- * performance.
- */
- SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
- SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
- SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
- SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
- SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
- SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
- SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
- SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
- SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
- SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11),
+/*
+ * HINT flags are not enforced by the device but are useful for
+ * performance.
+ */
+#define SVGA3D_SURFACE_HINT_STATIC (CONST64U(1) << 1)
+#define SVGA3D_SURFACE_HINT_DYNAMIC (CONST64U(1) << 2)
+#define SVGA3D_SURFACE_HINT_INDEXBUFFER (CONST64U(1) << 3)
+#define SVGA3D_SURFACE_HINT_VERTEXBUFFER (CONST64U(1) << 4)
+#define SVGA3D_SURFACE_HINT_TEXTURE (CONST64U(1) << 5)
+#define SVGA3D_SURFACE_HINT_RENDERTARGET (CONST64U(1) << 6)
+#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL (CONST64U(1) << 7)
+#define SVGA3D_SURFACE_HINT_WRITEONLY (CONST64U(1) << 8)
+#define SVGA3D_SURFACE_MASKABLE_ANTIALIAS (CONST64U(1) << 9)
+#define SVGA3D_SURFACE_AUTOGENMIPMAPS (CONST64U(1) << 10)
+
+#define SVGA3D_SURFACE_DECODE_RENDERTARGET (CONST64U(1) << 11)
- /*
- * Is this surface using a base-level pitch for it's mob backing?
- *
- * This flag is not intended to be set by guest-drivers, but is instead
- * set by the device when the surface is bound to a mob with a specified
- * pitch.
- */
- SVGA3D_SURFACE_MOB_PITCH = (1 << 12),
+/*
+ * Is this surface using a base-level pitch for it's mob backing?
+ *
+ * This flag is not intended to be set by guest-drivers, but is instead
+ * set by the device when the surface is bound to a mob with a specified
+ * pitch.
+ */
+#define SVGA3D_SURFACE_MOB_PITCH (CONST64U(1) << 12)
- SVGA3D_SURFACE_INACTIVE = (1 << 13),
- SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14),
- SVGA3D_SURFACE_VOLUME = (1 << 15),
+#define SVGA3D_SURFACE_INACTIVE (CONST64U(1) << 13)
+#define SVGA3D_SURFACE_HINT_RT_LOCKABLE (CONST64U(1) << 14)
+#define SVGA3D_SURFACE_VOLUME (CONST64U(1) << 15)
- /*
- * Required to be set on a surface to bind it to a screen target.
- */
- SVGA3D_SURFACE_SCREENTARGET = (1 << 16),
+/*
+ * Required to be set on a surface to bind it to a screen target.
+ */
+#define SVGA3D_SURFACE_SCREENTARGET (CONST64U(1) << 16)
- /*
- * Align images in the guest-backing mob to 16-bytes.
- */
- SVGA3D_SURFACE_ALIGN16 = (1 << 17),
+/*
+ * Align images in the guest-backing mob to 16-bytes.
+ */
+#define SVGA3D_SURFACE_ALIGN16 (CONST64U(1) << 17)
- SVGA3D_SURFACE_1D = (1 << 18),
- SVGA3D_SURFACE_ARRAY = (1 << 19),
+#define SVGA3D_SURFACE_1D (CONST64U(1) << 18)
+#define SVGA3D_SURFACE_ARRAY (CONST64U(1) << 19)
- /*
- * Bind flags.
- * These are enforced for any surface defined with DefineGBSurface_v2.
- */
- SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20),
- SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21),
- SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22),
- SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23),
- SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24),
- SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25),
- SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26),
+/*
+ * Bind flags.
+ * These are enforced for any surface defined with DefineGBSurface_v2.
+ */
+#define SVGA3D_SURFACE_BIND_VERTEX_BUFFER (CONST64U(1) << 20)
+#define SVGA3D_SURFACE_BIND_INDEX_BUFFER (CONST64U(1) << 21)
+#define SVGA3D_SURFACE_BIND_CONSTANT_BUFFER (CONST64U(1) << 22)
+#define SVGA3D_SURFACE_BIND_SHADER_RESOURCE (CONST64U(1) << 23)
+#define SVGA3D_SURFACE_BIND_RENDER_TARGET (CONST64U(1) << 24)
+#define SVGA3D_SURFACE_BIND_DEPTH_STENCIL (CONST64U(1) << 25)
+#define SVGA3D_SURFACE_BIND_STREAM_OUTPUT (CONST64U(1) << 26)
- /*
- * A note on staging flags:
- *
- * The STAGING flags notes that the surface will not be used directly by the
- * drawing pipeline, i.e. that it will not be bound to any bind point.
- * Staging surfaces may be used by copy operations to move data in and out
- * of other surfaces.
- *
- * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
- * updates indirectly, i.e. the surface will not be updated directly, but
- * will receive copies from staging surfaces.
- */
- SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27),
- SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28),
- SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29),
+/*
+ * The STAGING flags notes that the surface will not be used directly by the
+ * drawing pipeline, i.e. that it will not be bound to any bind point.
+ * Staging surfaces may be used by copy operations to move data in and out
+ * of other surfaces. No bind flags may be set on surfaces with this flag.
+ *
+ * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+ * updates indirectly, i.e. the surface will not be updated directly, but
+ * will receive copies from staging surfaces.
+ */
+#define SVGA3D_SURFACE_STAGING_UPLOAD (CONST64U(1) << 27)
+#define SVGA3D_SURFACE_STAGING_DOWNLOAD (CONST64U(1) << 28)
+#define SVGA3D_SURFACE_HINT_INDIRECT_UPDATE (CONST64U(1) << 29)
- /*
- * Setting this flag allow this surface to be used with the
- * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
- * buffer surfaces, an no bind flags are allowed to be set on surfaces
- * with this flag.
- */
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30),
+/*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
+ * buffer surfaces, and no bind flags are allowed to be set on surfaces
+ * with this flag.
+ */
+#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30)
- /*
- * Marker for the last defined bit.
- */
- SVGA3D_SURFACE_FLAG_MAX = (1 << 31),
-} SVGA3dSurfaceFlags;
+/*
+ * Reserved for video operations.
+ */
+#define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31)
+
+/*
+ * Specifies that a surface is multisample, and therefore requires the full
+ * mob-backing to store all the samples.
+ */
+#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32)
+
+#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 33)
+
+/*
+ * Surface flags types:
+ *
+ * SVGA3dSurface1Flags: Lower 32-bits of flags.
+ * SVGA3dSurface2Flags: Upper 32-bits of flags.
+ * SVGA3dSurfaceAllFlags: Full 64-bits of flags.
+ */
+typedef uint32 SVGA3dSurface1Flags;
+typedef uint32 SVGA3dSurface2Flags;
+typedef uint64 SVGA3dSurfaceAllFlags;
+
+#define SVGA3D_SURFACE_FLAGS1_MASK ((uint64_t)MAX_UINT32)
+#define SVGA3D_SURFACE_FLAGS2_MASK (MAX_UINT64 & ~SVGA3D_SURFACE_FLAGS1_MASK)
#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \
( SVGA3D_SURFACE_MOB_PITCH | \
@@ -392,29 +427,41 @@ typedef enum SVGA3dSurfaceFlags {
SVGA3D_SURFACE_STAGING_UPLOAD | \
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE \
+ )
+
+#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_MULTISAMPLE \
)
#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
- SVGA3D_SURFACE_DECODE_RENDERTARGET | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
- SVGA3D_SURFACE_ARRAY | \
SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE \
+ )
+
+#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_MULTISAMPLE \
)
#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
- SVGA3D_SURFACE_DECODE_RENDERTARGET | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
@@ -426,12 +473,36 @@ typedef enum SVGA3dSurfaceFlags {
SVGA3D_SURFACE_STAGING_UPLOAD | \
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE \
+ )
+
+#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
+ SVGA3D_SURFACE_ARRAY | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_MOB_PITCH \
+ )
+
+#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_SCREENTARGET | \
+ SVGA3D_SURFACE_MOB_PITCH \
)
#define SVGA3D_SURFACE_DX_ONLY_MASK \
( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+ )
#define SVGA3D_SURFACE_STAGING_MASK \
( SVGA3D_SURFACE_STAGING_UPLOAD | \
@@ -487,7 +558,7 @@ typedef enum {
/*
* Indicates that this format can be converted to any RGB format for which
- * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified.
*/
SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
@@ -498,22 +569,22 @@ typedef enum {
/*
* Indicated that this format can be read as an SRGB texture (meaning that the
- * sampler will linearize the looked up data)
+ * sampler will linearize the looked up data).
*/
SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
/*
- * Indicates that this format can be used in the bumpmap instructions
+ * Indicates that this format can be used in the bumpmap instructions.
*/
SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
/*
- * Indicates that this format can be sampled by the displacement map sampler
+ * Indicates that this format can be sampled by the displacement map sampler.
*/
SVGA3DFORMAT_OP_DMAP = 0x00020000,
/*
- * Indicates that this format cannot be used with texture filtering
+ * Indicates that this format cannot be used with texture filtering.
*/
SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
@@ -530,18 +601,18 @@ typedef enum {
SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
/*
- * Indicates that this format cannot be used with alpha blending
+ * Indicates that this format cannot be used with alpha blending.
*/
SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
/*
* Indicates that the device can auto-generated sublevels for resources
- * of this format
+ * of this format.
*/
SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
/*
- * Indicates that this format can be used by vertex texture sampler
+ * Indicates that this format can be used by vertex texture sampler.
*/
SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
@@ -1501,7 +1572,6 @@ union SVGADXQueryResultUnion {
#include "vmware_pack_end.h"
SVGADXQueryResultUnion;
-
typedef enum {
SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */
SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */
@@ -1533,9 +1603,9 @@ typedef
struct {
union {
struct {
- uint16 function; /* SVGA3dFogFunction */
- uint8 type; /* SVGA3dFogType */
- uint8 base; /* SVGA3dFogBase */
+ uint16 function; /* SVGA3dFogFunction */
+ uint8 type; /* SVGA3dFogType */
+ uint8 base; /* SVGA3dFogBase */
};
uint32 uintValue;
};
@@ -1547,19 +1617,27 @@ SVGA3dFogMode;
* Uniquely identify one image (a 1D/2D/3D array) from a surface. This
* is a surface ID as well as face/mipmap indices.
*/
-
typedef
#include "vmware_pack_begin.h"
struct SVGA3dSurfaceImageId {
- uint32 sid;
- uint32 face;
- uint32 mipmap;
+ uint32 sid;
+ uint32 face;
+ uint32 mipmap;
}
#include "vmware_pack_end.h"
SVGA3dSurfaceImageId;
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dSubSurfaceId {
+ uint32 sid;
+ uint32 subResourceId;
+}
+#include "vmware_pack_end.h"
+SVGA3dSubSurfaceId;
+
+typedef
+#include "vmware_pack_begin.h"
struct {
uint32 width;
uint32 height;
@@ -1582,13 +1660,18 @@ typedef enum {
SVGA_OTABLE_DX9_MAX = 5,
SVGA_OTABLE_DXCONTEXT = 5,
- SVGA_OTABLE_MAX = 6
-} SVGAOTableType;
+ SVGA_OTABLE_DX_MAX = 6,
-/*
- * Deprecated.
- */
-#define SVGA_OTABLE_COUNT 4
+ SVGA_OTABLE_RESERVED1 = 6,
+ SVGA_OTABLE_RESERVED2 = 7,
+
+ /*
+ * Additions to this table need to be tied to HW-version features and
+ * checkpointed accordingly.
+ */
+ SVGA_OTABLE_DEVEL_MAX = 8,
+ SVGA_OTABLE_MAX = 8
+} SVGAOTableType;
typedef enum {
SVGA_COTABLE_MIN = 0,
@@ -1605,7 +1688,7 @@ typedef enum {
SVGA_COTABLE_DXSHADER = 10,
SVGA_COTABLE_DX10_MAX = 11,
SVGA_COTABLE_UAVIEW = 11,
- SVGA_COTABLE_MAX
+ SVGA_COTABLE_MAX = 12,
} SVGACOTableType;
/*
@@ -1626,8 +1709,37 @@ typedef enum SVGAMobFormat {
SVGA3D_MOBFMT_PREDX_MAX = 7,
SVGA3D_MOBFMT_EMPTY = 7,
SVGA3D_MOBFMT_MAX,
+
+ /*
+ * This isn't actually used by the guest, but is a mob-format used
+ * internally by the SVGA device (and is therefore not binary compatible).
+ */
+ SVGA3D_MOBFMT_HB,
} SVGAMobFormat;
#define SVGA3D_MOB_EMPTY_BASE 1
+/*
+ * Multisample pattern types.
+ */
+
+typedef enum SVGA3dMSPattern {
+ SVGA3D_MS_PATTERN_NONE = 0,
+ SVGA3D_MS_PATTERN_MIN = 0,
+ SVGA3D_MS_PATTERN_STANDARD = 1,
+ SVGA3D_MS_PATTERN_CENTER = 2,
+ SVGA3D_MS_PATTERN_MAX = 3,
+} SVGA3dMSPattern;
+
+/*
+ * Precision settings for each sample.
+ */
+
+typedef enum SVGA3dMSQualityLevel {
+ SVGA3D_MS_QUALITY_NONE = 0,
+ SVGA3D_MS_QUALITY_MIN = 0,
+ SVGA3D_MS_QUALITY_FULL = 1,
+ SVGA3D_MS_QUALITY_MAX = 2,
+} SVGA3dMSQualityLevel;
+
#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index 884b1d1fb85f..acb41e28e46f 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index faf6d9b2b891..e5385146e7fc 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc. All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 88e72bf9a534..056f54b35d73 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc. All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -63,16 +64,26 @@ typedef uint32 SVGAMobId;
#define SVGA_MAX_BITS_PER_PIXEL 32
#define SVGA_MAX_DEPTH 24
#define SVGA_MAX_DISPLAYS 10
+#define SVGA_MAX_SCREEN_SIZE 8192
+#define SVGA_SCREEN_ROOT_LIMIT (SVGA_MAX_SCREEN_SIZE * SVGA_MAX_DISPLAYS)
+
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
* cursor bypass mode. This is still supported, but no new guest
* drivers should use it.
*/
-#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
-#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
+#define SVGA_CURSOR_ON_HIDE 0x0
+#define SVGA_CURSOR_ON_SHOW 0x1
+
+/*
+ * Remove the cursor from the framebuffer
+ * because we need to see what's under it
+ */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2
+
+/* Put the cursor back in the framebuffer so the user can see it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3
/*
* The maximum framebuffer size that can traced for guests unless the
@@ -101,7 +112,10 @@ typedef uint32 SVGAMobId;
#define SVGA_VERSION_0 0
#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
-/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+/*
+ * "Invalid" value for all SVGA IDs.
+ * (Version ID, screen object ID, surface ID...)
+ */
#define SVGA_ID_INVALID 0xFFFFFFFF
/* Port offsets, relative to BAR0 */
@@ -154,7 +168,7 @@ enum {
SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
- SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
+ SVGA_REG_GUEST_ID = 23, /* (Deprecated) */
SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
@@ -186,7 +200,14 @@ enum {
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
- SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
+
+ /*
+ * Max primary memory.
+ * See SVGA_CAP_NO_BB_RESTRICTION.
+ */
+ SVGA_REG_MAX_PRIMARY_MEM = 50,
+ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,
+
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_CMD_PREPEND_LOW = 53,
@@ -194,7 +215,10 @@ enum {
SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
SVGA_REG_MOB_MAX_SIZE = 57,
- SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
+ SVGA_REG_BLANK_SCREEN_TARGETS = 58,
+ SVGA_REG_CAP2 = 59,
+ SVGA_REG_DEVEL_CAP = 60,
+ SVGA_REG_TOP = 61, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -392,6 +416,7 @@ typedef enum {
SVGA_CB_CONTEXT_0 = 0x0,
SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
SVGA_CB_CONTEXT_MAX = 0x2,
+ SVGA_CB_CONTEXT_HP_MAX = 0x2,
} SVGACBContext;
@@ -448,6 +473,18 @@ typedef enum {
* due to an error. No IRQ is raised.
*/
SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+
+ /*
+ * Written by the host when the host finished a
+ * SVGA_DC_CMD_ASYNC_STOP_QUEUE request for this command buffer
+ * queue. The offset of the first byte not processed is stored in
+ * the errorOffset field of the command buffer header. All guest
+ * visible side effects of commands till that point are guaranteed
+ * to be finished before this is written. The
+ * SVGA_IRQFLAG_COMMAND_BUFFER IRQ is raised as long as the
+ * SVGA_CB_FLAG_NO_IRQ is not set.
+ */
+ SVGA_CB_STATUS_PARTIAL_COMPLETE = 7,
} SVGACBStatus;
typedef enum {
@@ -460,8 +497,8 @@ typedef enum {
typedef
#include "vmware_pack_begin.h"
struct {
- volatile SVGACBStatus status;
- volatile uint32 errorOffset;
+ volatile SVGACBStatus status; /* Modified by device. */
+ volatile uint32 errorOffset; /* Modified by device. */
uint64 id;
SVGACBFlags flags;
uint32 length;
@@ -472,7 +509,9 @@ struct {
uint32 mobOffset;
} mob;
} ptr;
- uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
+ uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise,
+ * modified by device.
+ */
uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
uint32 mustBeZero[6];
}
@@ -483,20 +522,26 @@ typedef enum {
SVGA_DC_CMD_NOP = 0,
SVGA_DC_CMD_START_STOP_CONTEXT = 1,
SVGA_DC_CMD_PREEMPT = 2,
- SVGA_DC_CMD_MAX = 3,
- SVGA_DC_CMD_FORCE_UINT = MAX_UINT32,
+ SVGA_DC_CMD_START_QUEUE = 3, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_DC_CMD_ASYNC_STOP_QUEUE = 4, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE = 5, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_DC_CMD_MAX = 6,
} SVGADeviceContextCmdId;
-typedef struct {
+/*
+ * Starts or stops both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1.
+ */
+
+typedef struct SVGADCCmdStartStop {
uint32 enable;
- SVGACBContext context;
+ SVGACBContext context; /* Must be zero */
} SVGADCCmdStartStop;
/*
* SVGADCCmdPreempt --
*
* This command allows the guest to request that all command buffers
- * on the specified context be preempted that can be. After execution
+ * on SVGA_CB_CONTEXT_0 be preempted that can be. After execution
* of this command all command buffers that were preempted will
* already have SVGA_CB_STATUS_PREEMPTED written into the status
* field. The device might still be processing a command buffer,
@@ -506,12 +551,69 @@ typedef struct {
* command buffer header set to zero.
*/
-typedef struct {
- SVGACBContext context;
+typedef struct SVGADCCmdPreempt {
+ SVGACBContext context; /* Must be zero */
uint32 ignoreIDZero;
} SVGADCCmdPreempt;
/*
+ * Starts the requested command buffer processing queue. Valid only
+ * if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ *
+ * For a command queue to be considered runnable it must be enabled
+ * and any corresponding higher priority queues must also be enabled.
+ * For example in order for command buffers to be processed on
+ * SVGA_CB_CONTEXT_0 both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1 must
+ * be enabled. But for commands to be runnable on SVGA_CB_CONTEXT_1
+ * only that queue must be enabled.
+ */
+
+typedef struct SVGADCCmdStartQueue {
+ SVGACBContext context;
+} SVGADCCmdStartQueue;
+
+/*
+ * Requests the SVGA device to stop processing the requested command
+ * buffer queue as soon as possible. The guest knows the stop has
+ * completed when one of the following happens.
+ *
+ * 1) A command buffer status of SVGA_CB_STATUS_PARTIAL_COMPLETE is returned
+ * 2) A command buffer error is encountered with would stop the queue
+ * regardless of the async stop request.
+ * 3) All command buffers that have been submitted complete successfully.
+ * 4) The stop completes synchronously if no command buffers are
+ * active on the queue when it is issued.
+ *
+ * If the command queue is not in a runnable state there is no
+ * guarentee this async stop will finish. For instance if the high
+ * priority queue is not enabled and a stop is requested on the low
+ * priority queue, the high priority queue must be reenabled to
+ * guarantee that the async stop will finish.
+ *
+ * This command along with SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE can be used
+ * to implement mid command buffer preemption.
+ *
+ * Valid only if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ */
+
+typedef struct SVGADCCmdAsyncStopQueue {
+ SVGACBContext context;
+} SVGADCCmdAsyncStopQueue;
+
+/*
+ * Requests the SVGA device to throw away any full command buffers on
+ * the requested command queue that have not been started. For a
+ * driver to know which command buffers were thrown away a driver
+ * should only issue this command when the queue is stopped, for
+ * whatever reason.
+ */
+
+typedef struct SVGADCCmdEmptyQueue {
+ SVGACBContext context;
+} SVGADCCmdEmptyQueue;
+
+
+/*
* SVGAGMRImageFormat --
*
* This is a packed representation of the source 2D image format
@@ -536,7 +638,7 @@ typedef struct SVGAGMRImageFormat {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
- uint32 reserved : 16; /* Must be zero */
+ uint32 reserved : 16; /* Must be zero */
};
uint32 value;
@@ -672,8 +774,36 @@ SVGASignedPoint;
* SVGA_CAP_GBOBJECTS --
* Enable guest-backed objects and surfaces.
*
- * SVGA_CAP_CMD_BUFFERS_3 --
- * Enable support for command buffers in a mob.
+ * SVGA_CAP_DX --
+ * Enable support for DX commands, and command buffers in a mob.
+ *
+ * SVGA_CAP_HP_CMD_QUEUE --
+ * Enable support for the high priority command queue, and the
+ * ScreenCopy command.
+ *
+ * SVGA_CAP_NO_BB_RESTRICTION --
+ * Allow ScreenTargets to be defined without regard to the 32-bpp
+ * bounding-box memory restrictions. ie:
+ *
+ * The summed memory usage of all screens (assuming they were defined as
+ * 32-bpp) must always be less than the value of the
+ * SVGA_REG_MAX_PRIMARY_MEM register.
+ *
+ * If this cap is not present, the 32-bpp bounding box around all screens
+ * must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM
+ * register.
+ *
+ * If the cap is present, the bounding box restriction is lifted (and only
+ * the screen-sum limit applies).
+ *
+ * (Note that this is a slight lie... there is still a sanity limit on any
+ * dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even
+ * when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be
+ * large enough to express any possible topology without holes between
+ * monitors.)
+ *
+ * SVGA_CAP_CAP2_REGISTER --
+ * If this cap is present, the SVGA_REG_CAP2 register is supported.
*/
#define SVGA_CAP_NONE 0x00000000
@@ -699,8 +829,30 @@ SVGASignedPoint;
#define SVGA_CAP_GBOBJECTS 0x08000000
#define SVGA_CAP_DX 0x10000000
#define SVGA_CAP_HP_CMD_QUEUE 0x20000000
+#define SVGA_CAP_NO_BB_RESTRICTION 0x40000000
+#define SVGA_CAP_CAP2_REGISTER 0x80000000
-#define SVGA_CAP_CMD_RESERVED 0x80000000
+/*
+ * The SVGA_REG_CAP2 register is an additional set of SVGA capability bits.
+ *
+ * SVGA_CAP2_GROW_OTABLE --
+ * Allow the GrowOTable/DXGrowCOTable commands.
+ *
+ * SVGA_CAP2_INTRA_SURFACE_COPY --
+ * Allow the IntraSurfaceCopy command.
+ *
+ * SVGA_CAP2_DX2 --
+ * Allow the DefineGBSurface_v3, WholeSurfaceCopy.
+ *
+ * SVGA_CAP2_RESERVED --
+ * Reserve the last bit for extending the SVGA capabilities to some
+ * future mechanisms.
+ */
+#define SVGA_CAP2_NONE 0x00000000
+#define SVGA_CAP2_GROW_OTABLE 0x00000001
+#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
+#define SVGA_CAP2_DX2 0x00000004
+#define SVGA_CAP2_RESERVED 0x80000000
/*
@@ -722,7 +874,8 @@ typedef enum {
SVGABackdoorCapDeviceCaps = 0,
SVGABackdoorCapFifoCaps = 1,
SVGABackdoorCap3dHWVersion = 2,
- SVGABackdoorCapMax = 3,
+ SVGABackdoorCapDeviceCaps2 = 3,
+ SVGABackdoorCapMax = 4,
} SVGABackdoorCapType;
@@ -1914,16 +2067,6 @@ SVGAFifoCmdRemapGMR2;
#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
-/*
- * To simplify autoDetect display configuration, support a minimum of
- * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
- * numDisplays = 2
- * maxWidth = numDisplay * 1920 = 3840
- * maxHeight = rotated width of single monitor = 1920
- * vramSize = maxWidth * maxHeight * 4 = 29491200
- */
-#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024)
-
#if defined(VMX86_SERVER)
#define SVGA_VRAM_SIZE (4 * 1024 * 1024)
#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
index 2e8ba4df8de9..350bbc6fab02 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2015 VMware, Inc. All rights reserved.
+ * Copyright 2015 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -40,7 +41,10 @@ typedef uint64 PPN64;
typedef bool Bool;
+#define MAX_UINT64 U64_MAX
#define MAX_UINT32 U32_MAX
#define MAX_UINT16 U16_MAX
+#define CONST64U(x) x##ULL
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
index 7e7b0ce34aa2..75308bd0d970 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -1,25 +1,2 @@
-/**********************************************************
- * Copyright 2015 VMware, Inc. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
index e2e440ed3d44..e93d6f28b68c 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -1,25 +1,2 @@
-/**********************************************************
- * Copyright 2015 VMware, Inc. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
__packed
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 55d32ae43aa4..0b9ee7fb45d6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index bf2e77ad5a20..6a2a9d69043b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index e8c94b19db7b..fc6673cde289 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2017 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2017 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
new file mode 100644
index 000000000000..2dda03345761
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -0,0 +1,1123 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+#include "drm/ttm/ttm_object.h"
+
+
+/**
+ * struct vmw_user_buffer_object - User-space-visible buffer object
+ *
+ * @prime: The prime object providing user visibility.
+ * @vbo: The struct vmw_buffer_object
+ */
+struct vmw_user_buffer_object {
+ struct ttm_prime_object prime;
+ struct vmw_buffer_object vbo;
+};
+
+
+/**
+ * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the
+ * TTM buffer object.
+ */
+static struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vmw_buffer_object, base);
+}
+
+
+/**
+ * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_user_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
+ * object.
+ */
+static struct vmw_user_buffer_object *
+vmw_user_buffer_object(struct ttm_buffer_object *bo)
+{
+ struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+ return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
+}
+
+
+/**
+ * vmw_bo_pin_in_placement - Validate a buffer to placement.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @placement: The placement to pin it.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ struct ttm_placement *placement,
+ bool interruptible)
+{
+ struct ttm_operation_ctx ctx = {interruptible, false };
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+ uint32_t new_flags;
+
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, placement, &ctx);
+
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_write_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer if true.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ struct ttm_operation_ctx ctx = {interruptible, false };
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+ uint32_t new_flags;
+
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (buf->pin_count > 0) {
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ goto out_unreserve;
+ }
+
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ if (likely(ret == 0) || ret == -ERESTARTSYS)
+ goto out_unreserve;
+
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+
+out_unreserve:
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
+
+ ttm_bo_unreserve(bo);
+err:
+ ttm_write_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram - Move a buffer to vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+ interruptible);
+}
+
+
+/**
+ * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to pin.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ struct ttm_operation_ctx ctx = {interruptible, false };
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement placement;
+ struct ttm_place place;
+ int ret = 0;
+ uint32_t new_flags;
+
+ place = vmw_vram_placement.placement[0];
+ place.lpfn = bo->num_pages;
+ placement.num_placement = 1;
+ placement.placement = &place;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &place;
+
+ ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err_unlock;
+
+ /*
+ * Is this buffer already in vram but not at the start of it?
+ * In that case, evict it first because TTM isn't good at handling
+ * that situation.
+ */
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start < bo->num_pages &&
+ bo->mem.start > 0 &&
+ buf->pin_count == 0) {
+ ctx.interruptible = false;
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+ }
+
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, &placement, &ctx);
+
+ /* For some reason we didn't end up at the start of vram */
+ WARN_ON(ret == 0 && bo->offset != 0);
+ if (!ret)
+ vmw_bo_pin_reserved(buf, true);
+
+ ttm_bo_unreserve(bo);
+err_unlock:
+ ttm_write_unlock(&dev_priv->reservation_sem);
+
+ return ret;
+}
+
+
+/**
+ * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * This function takes the reservation_sem in write mode.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to unpin.
+ * @interruptible: Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_unpin(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible)
+{
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+ if (unlikely(ret != 0))
+ goto err;
+
+ vmw_bo_pin_reserved(buf, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+ SVGAGuestPtr *ptr)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+ ptr->offset = bo->offset;
+ } else {
+ ptr->gmrId = bo->mem.start;
+ ptr->offset = 0;
+ }
+}
+
+
+/**
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
+ *
+ * @vbo: The buffer object. Must be reserved.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
+{
+ struct ttm_operation_ctx ctx = { false, true };
+ struct ttm_place pl;
+ struct ttm_placement placement;
+ struct ttm_buffer_object *bo = &vbo->base;
+ uint32_t old_mem_type = bo->mem.mem_type;
+ int ret;
+
+ lockdep_assert_held(&bo->resv->lock.base);
+
+ if (pin) {
+ if (vbo->pin_count++ > 0)
+ return;
+ } else {
+ WARN_ON(vbo->pin_count <= 0);
+ if (--vbo->pin_count > 0)
+ return;
+ }
+
+ pl.fpfn = 0;
+ pl.lpfn = 0;
+ pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+ | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+ if (pin)
+ pl.flags |= TTM_PL_FLAG_NO_EVICT;
+
+ memset(&placement, 0, sizeof(placement));
+ placement.num_placement = 1;
+ placement.placement = &pl;
+
+ ret = ttm_bo_validate(bo, &placement, &ctx);
+
+ BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}
+
+
+/**
+ * vmw_bo_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
+{
+ struct ttm_buffer_object *bo = &vbo->base;
+ bool not_used;
+ void *virtual;
+ int ret;
+
+ virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+ if (virtual)
+ return virtual;
+
+ ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ if (ret)
+ DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+ return ttm_kmap_obj_virtual(&vbo->map, &not_used);
+}
+
+
+/**
+ * vmw_bo_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_buffer_object_map_and_cache().
+ */
+void vmw_bo_unmap(struct vmw_buffer_object *vbo)
+{
+ if (vbo->map.bo == NULL)
+ return;
+
+ ttm_bo_kunmap(&vbo->map);
+}
+
+
+/**
+ * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
+ bool user)
+{
+ static size_t struct_size, user_struct_size;
+ size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+
+ if (unlikely(struct_size == 0)) {
+ size_t backend_size = ttm_round_pot(vmw_tt_size);
+
+ struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_buffer_object));
+ user_struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+ }
+
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ page_array_size +=
+ ttm_round_pot(num_pages * sizeof(dma_addr_t));
+
+ return ((user) ? user_struct_size : struct_size) +
+ page_array_size;
+}
+
+
+/**
+ * vmw_bo_bo_free - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+void vmw_bo_bo_free(struct ttm_buffer_object *bo)
+{
+ struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+ vmw_bo_unmap(vmw_bo);
+ kfree(vmw_bo);
+}
+
+
+/**
+ * vmw_user_bo_destroy - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+
+ vmw_bo_unmap(&vmw_user_bo->vbo);
+ ttm_prime_object_kfree(vmw_user_bo, prime);
+}
+
+
+/**
+ * vmw_bo_init - Initialize a vmw buffer object
+ *
+ * @dev_priv: Pointer to the device private struct
+ * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
+ * @size: Buffer object size in bytes.
+ * @placement: Initial placement.
+ * @interruptible: Whether waits should be performed interruptible.
+ * @bo_free: The buffer object destructor.
+ * Returns: Zero on success, negative error code on error.
+ *
+ * Note that on error, the code will free the buffer object.
+ */
+int vmw_bo_init(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible,
+ void (*bo_free)(struct ttm_buffer_object *bo))
+{
+ struct ttm_bo_device *bdev = &dev_priv->bdev;
+ size_t acc_size;
+ int ret;
+ bool user = (bo_free == &vmw_user_bo_destroy);
+
+ WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
+
+ acc_size = vmw_bo_acc_size(dev_priv, size, user);
+ memset(vmw_bo, 0, sizeof(*vmw_bo));
+
+ INIT_LIST_HEAD(&vmw_bo->res_list);
+
+ ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+ ttm_bo_type_device, placement,
+ 0, interruptible, acc_size,
+ NULL, NULL, bo_free);
+ return ret;
+}
+
+
+/**
+ * vmw_user_bo_release - TTM reference base object release callback for
+ * vmw user buffer objects
+ *
+ * @p_base: The TTM base object pointer about to be unreferenced.
+ *
+ * Clears the TTM base object pointer and drops the reference the
+ * base object has on the underlying struct vmw_buffer_object.
+ */
+static void vmw_user_bo_release(struct ttm_base_object **p_base)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+ struct ttm_base_object *base = *p_base;
+ struct ttm_buffer_object *bo;
+
+ *p_base = NULL;
+
+ if (unlikely(base == NULL))
+ return;
+
+ vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+ prime.base);
+ bo = &vmw_user_bo->vbo.base;
+ ttm_bo_unref(&bo);
+}
+
+
+/**
+ * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+ * for vmw user buffer objects
+ *
+ * @base: Pointer to the TTM base object
+ * @ref_type: Reference type of the reference reaching zero.
+ *
+ * Called when user-space drops its last synccpu reference on the buffer
+ * object, Either explicitly or as part of a cleanup file close.
+ */
+static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
+ enum ttm_ref_type ref_type)
+{
+ struct vmw_user_buffer_object *user_bo;
+
+ user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
+
+ switch (ref_type) {
+ case TTM_REF_SYNCCPU_WRITE:
+ ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ break;
+ default:
+ WARN_ONCE(true, "Undefined buffer object reference release.\n");
+ }
+}
+
+
+/**
+ * vmw_user_bo_alloc - Allocate a user buffer object
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the buffer object.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
+ * should be assigned.
+ * Return: Zero on success, negative error code on error.
+ */
+int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_vbo,
+ struct ttm_base_object **p_base)
+{
+ struct vmw_user_buffer_object *user_bo;
+ struct ttm_buffer_object *tmp;
+ int ret;
+
+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+ if (unlikely(!user_bo)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
+ &vmw_vram_sys_placement, true,
+ &vmw_user_bo_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&user_bo->vbo.base);
+ ret = ttm_prime_object_init(tfile,
+ size,
+ &user_bo->prime,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_bo_release,
+ &vmw_user_bo_ref_obj_release);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ goto out_no_base_object;
+ }
+
+ *p_vbo = &user_bo->vbo;
+ if (p_base) {
+ *p_base = &user_bo->prime.base;
+ kref_get(&(*p_base)->refcount);
+ }
+ *handle = user_bo->prime.base.hash.key;
+
+out_no_base_object:
+ return ret;
+}
+
+
+/**
+ * vmw_user_bo_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+
+ if (unlikely(bo->destroy != vmw_user_bo_destroy))
+ return -EPERM;
+
+ vmw_user_bo = vmw_user_buffer_object(bo);
+
+ /* Check that the caller has opened the object. */
+ if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
+ return 0;
+
+ DRM_ERROR("Could not grant buffer access.\n");
+ return -EPERM;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ * Return: Zero on success, Negative error code on error. In particular,
+ * -EBUSY will be returned if a dontblock operation is requested and the
+ * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
+ * interrupted by a signal.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ struct ttm_buffer_object *bo = &user_bo->vbo.base;
+ bool existed;
+ int ret;
+
+ if (flags & drm_vmw_synccpu_allow_cs) {
+ bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
+ long lret;
+
+ lret = reservation_object_wait_timeout_rcu
+ (bo->resv, true, true,
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ if (!lret)
+ return -EBUSY;
+ else if (lret < 0)
+ return lret;
+ return 0;
+ }
+
+ ret = ttm_bo_synccpu_write_grab
+ (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
+ if (ret != 0 || existed)
+ ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+
+ return ret;
+}
+
+/**
+ * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_bo_synccpu_release(uint32_t handle,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
+{
+ if (!(flags & drm_vmw_synccpu_allow_cs))
+ return ttm_ref_object_base_unref(tfile, handle,
+ TTM_REF_SYNCCPU_WRITE);
+
+ return 0;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_synccpu_arg *arg =
+ (struct drm_vmw_synccpu_arg *) data;
+ struct vmw_buffer_object *vbo;
+ struct vmw_user_buffer_object *user_bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_base_object *buffer_base;
+ int ret;
+
+ if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+ || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+ drm_vmw_synccpu_dontblock |
+ drm_vmw_synccpu_allow_cs)) != 0) {
+ DRM_ERROR("Illegal synccpu flags.\n");
+ return -EINVAL;
+ }
+
+ switch (arg->op) {
+ case drm_vmw_synccpu_grab:
+ ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
+ &buffer_base);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_bo = container_of(vbo, struct vmw_user_buffer_object,
+ vbo);
+ ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+ vmw_bo_unreference(&vbo);
+ ttm_base_object_unref(&buffer_base);
+ if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+ ret != -EBUSY)) {
+ DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ case drm_vmw_synccpu_release:
+ ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+ arg->flags);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ default:
+ DRM_ERROR("Invalid synccpu operation.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
+ * allocation functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and allocates a
+ * struct vmw_user_buffer_object bo.
+ */
+int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_alloc_dmabuf_arg *arg =
+ (union drm_vmw_alloc_dmabuf_arg *)data;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+ struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+ struct vmw_buffer_object *vbo;
+ uint32_t handle;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ req->size, false, &handle, &vbo,
+ NULL);
+ if (unlikely(ret != 0))
+ goto out_no_bo;
+
+ rep->handle = handle;
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+
+ vmw_bo_unreference(&vbo);
+
+out_no_bo:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+
+ return ret;
+}
+
+
+/**
+ * vmw_bo_unref_ioctl - Generic handle close ioctl.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and closes a
+ * handle to a TTM base object, optionally freeing the object.
+ */
+int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_unref_dmabuf_arg *arg =
+ (struct drm_vmw_unref_dmabuf_arg *)data;
+
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ arg->handle,
+ TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
+ *
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle
+ * @out: Pointer to a where a pointer to the embedded
+ * struct vmw_buffer_object should be placed.
+ * @p_base: Pointer to where a pointer to the TTM base object should be
+ * placed, or NULL if no such pointer is required.
+ * Return: Zero on success, Negative error code on error.
+ *
+ * Both the output base object pointer and the vmw buffer object pointer
+ * will be refcounted.
+ */
+int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_buffer_object **out,
+ struct ttm_base_object **p_base)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -ESRCH;
+ }
+
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+ ttm_base_object_unref(&base);
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -EINVAL;
+ }
+
+ vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+ prime.base);
+ (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
+ if (p_base)
+ *p_base = base;
+ else
+ ttm_base_object_unref(&base);
+ *out = &vmw_user_bo->vbo;
+
+ return 0;
+}
+
+
+/**
+ * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
+ *
+ * @tfile: The TTM object file to register the handle with.
+ * @vbo: The embedded vmw buffer object.
+ * @handle: Pointer to where the new handle should be placed.
+ * Return: Zero on success, Negative error code on error.
+ */
+int vmw_user_bo_reference(struct ttm_object_file *tfile,
+ struct vmw_buffer_object *vbo,
+ uint32_t *handle)
+{
+ struct vmw_user_buffer_object *user_bo;
+
+ if (vbo->base.destroy != vmw_user_bo_destroy)
+ return -EINVAL;
+
+ user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
+
+ *handle = user_bo->prime.base.hash.key;
+ return ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_USAGE, NULL, false);
+}
+
+
+/**
+ * vmw_bo_fence_single - Utility function to fence a single TTM buffer
+ * object without unreserving it.
+ *
+ * @bo: Pointer to the struct ttm_buffer_object to fence.
+ * @fence: Pointer to the fence. If NULL, this function will
+ * insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ if (fence == NULL) {
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ reservation_object_add_excl_fence(bo->resv, &fence->base);
+ dma_fence_put(&fence->base);
+ } else
+ reservation_object_add_excl_fence(bo->resv, &fence->base);
+}
+
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_buffer_object *vbo;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ args->size, false, &args->handle,
+ &vbo, NULL);
+ if (unlikely(ret != 0))
+ goto out_no_bo;
+
+ vmw_bo_unreference(&vbo);
+out_no_bo:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_buffer_object *out_buf;
+ int ret;
+
+ ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
+ if (ret != 0)
+ return -EINVAL;
+
+ *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+ vmw_bo_unreference(&out_buf);
+ return 0;
+}
+
+
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
+int vmw_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ handle, TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_bo_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
+{
+ /* Is @bo embedded in a struct vmw_buffer_object? */
+ if (bo->destroy != vmw_bo_bo_free &&
+ bo->destroy != vmw_user_bo_destroy)
+ return;
+
+ /* Kill any cached kernel maps before swapout */
+ vmw_bo_unmap(vmw_buffer_object(bo));
+}
+
+
+/**
+ * vmw_bo_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * Detaches cached maps and device bindings that require that the
+ * buffer doesn't move.
+ */
+void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_buffer_object *vbo;
+
+ if (mem == NULL)
+ return;
+
+ /* Make sure @bo is embedded in a struct vmw_buffer_object? */
+ if (bo->destroy != vmw_bo_bo_free &&
+ bo->destroy != vmw_user_bo_destroy)
+ return;
+
+ vbo = container_of(bo, struct vmw_buffer_object, base);
+
+ /*
+ * Kill any cached kernel maps before move to or from VRAM.
+ * With other types of moves, the underlying pages stay the same,
+ * and the map can be kept.
+ */
+ if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+ vmw_bo_unmap(vbo);
+
+ /*
+ * If we're moving a backup MOB out of MOB placement, then make sure we
+ * read back all resource content first, and unbind the MOB from
+ * the resource.
+ */
+ if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+ vmw_resource_unbind_list(vbo);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 9f45d5004cae..e7e4655d3f36 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 36c7b6c839c0..3b75af9bf85f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 3767ac335aca..7c3cb8efd11a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -38,7 +38,7 @@ struct vmw_user_context {
struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
spinlock_t cotable_lock;
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
};
static void vmw_user_context_free(struct vmw_resource *res);
@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
* specified in the parameter. 0 otherwise.
*/
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_dma_buffer *mob)
+ struct vmw_buffer_object *mob)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
if (mob == NULL) {
if (uctx->dx_query_mob) {
uctx->dx_query_mob->dx_query_ctx = NULL;
- vmw_dmabuf_unreference(&uctx->dx_query_mob);
+ vmw_bo_unreference(&uctx->dx_query_mob);
uctx->dx_query_mob = NULL;
}
@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
mob->dx_query_ctx = ctx_res;
if (!uctx->dx_query_mob)
- uctx->dx_query_mob = vmw_dmabuf_reference(mob);
+ uctx->dx_query_mob = vmw_bo_reference(mob);
return 0;
}
@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
*
* @ctx_res: The context resource
*/
-struct vmw_dma_buffer *
+struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
struct vmw_user_context *uctx =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index cbf54ea7b4c0..1d45714e1d5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
mutex_unlock(&dev_priv->binding_mutex);
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_fence_single_bo(&res->backup->base, fence);
+ vmw_bo_fence_single(&res->backup->base, fence);
vmw_fence_obj_unreference(&fence);
return 0;
@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
- struct vmw_dma_buffer *buf, *old_buf = res->backup;
+ struct vmw_buffer_object *buf, *old_buf = res->backup;
struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
size_t old_size = res->backup_size;
size_t old_size_read_back = vcotbl->size_read_back;
@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (!buf)
return -ENOMEM;
- ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
- true, vmw_dmabuf_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+ true, vmw_bo_bo_free);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
/* Let go of the old mob. */
list_del(&res->mob_head);
list_add_tail(&res->mob_head, &buf->res_list);
- vmw_dmabuf_unreference(&old_buf);
+ vmw_bo_unreference(&old_buf);
res->id = vcotbl->type;
return 0;
@@ -491,7 +491,7 @@ out_map_new:
ttm_bo_kunmap(&old_map);
out_wait:
ttm_bo_unreserve(bo);
- vmw_dmabuf_unreference(&buf);
+ vmw_bo_unreference(&buf);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
deleted file mode 100644
index d59d9dd16ebc..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include <drm/ttm/ttm_placement.h>
-
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-
-
-/**
- * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @placement: The placement to pin it.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- struct ttm_placement *placement,
- bool interruptible)
-{
- struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
- uint32_t new_flags;
-
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv);
-
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- if (buf->pin_count > 0)
- ret = ttm_bo_mem_compat(placement, &bo->mem,
- &new_flags) == true ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, placement, &ctx);
-
- if (!ret)
- vmw_bo_pin_reserved(buf, true);
-
- ttm_bo_unreserve(bo);
-
-err:
- ttm_write_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @pin: Pin buffer if true.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
- uint32_t new_flags;
-
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv);
-
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- if (buf->pin_count > 0) {
- ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
- &new_flags) == true ? 0 : -EINVAL;
- goto out_unreserve;
- }
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
- if (likely(ret == 0) || ret == -ERESTARTSYS)
- goto out_unreserve;
-
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
-
-out_unreserve:
- if (!ret)
- vmw_bo_pin_reserved(buf, true);
-
- ttm_bo_unreserve(bo);
-err:
- ttm_write_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to move.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
- interruptible);
-}
-
-/**
- * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to pin.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement placement;
- struct ttm_place place;
- int ret = 0;
- uint32_t new_flags;
-
- place = vmw_vram_placement.placement[0];
- place.lpfn = bo->num_pages;
- placement.num_placement = 1;
- placement.placement = &place;
- placement.num_busy_placement = 1;
- placement.busy_placement = &place;
-
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err_unlock;
-
- /*
- * Is this buffer already in vram but not at the start of it?
- * In that case, evict it first because TTM isn't good at handling
- * that situation.
- */
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->num_pages &&
- bo->mem.start > 0 &&
- buf->pin_count == 0) {
- ctx.interruptible = false;
- (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
- }
-
- if (buf->pin_count > 0)
- ret = ttm_bo_mem_compat(&placement, &bo->mem,
- &new_flags) == true ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, &placement, &ctx);
-
- /* For some reason we didn't end up at the start of vram */
- WARN_ON(ret == 0 && bo->offset != 0);
- if (!ret)
- vmw_bo_pin_reserved(buf, true);
-
- ttm_bo_unreserve(bo);
-err_unlock:
- ttm_write_unlock(&dev_priv->reservation_sem);
-
- return ret;
-}
-
-/**
- * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
- *
- * This function takes the reservation_sem in write mode.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to unpin.
- * @interruptible: Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible)
-{
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_reserve(bo, interruptible, false, NULL);
- if (unlikely(ret != 0))
- goto err;
-
- vmw_bo_pin_reserved(buf, false);
-
- ttm_bo_unreserve(bo);
-
-err:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
- * of a buffer.
- *
- * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
- * @ptr: SVGAGuestPtr returning the result.
- */
-void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
- SVGAGuestPtr *ptr)
-{
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
- ptr->offset = bo->offset;
- } else {
- ptr->gmrId = bo->mem.start;
- ptr->offset = 0;
- }
-}
-
-
-/**
- * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
- *
- * @vbo: The buffer object. Must be reserved.
- * @pin: Whether to pin or unpin.
- *
- */
-void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
-{
- struct ttm_operation_ctx ctx = { false, true };
- struct ttm_place pl;
- struct ttm_placement placement;
- struct ttm_buffer_object *bo = &vbo->base;
- uint32_t old_mem_type = bo->mem.mem_type;
- int ret;
-
- lockdep_assert_held(&bo->resv->lock.base);
-
- if (pin) {
- if (vbo->pin_count++ > 0)
- return;
- } else {
- WARN_ON(vbo->pin_count <= 0);
- if (--vbo->pin_count > 0)
- return;
- }
-
- pl.fpfn = 0;
- pl.lpfn = 0;
- pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
- | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
- if (pin)
- pl.flags |= TTM_PL_FLAG_NO_EVICT;
-
- memset(&placement, 0, sizeof(placement));
- placement.num_placement = 1;
- placement.placement = &pl;
-
- ret = ttm_bo_validate(bo, &placement, &ctx);
-
- BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
-}
-
-
-/*
- * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
- *
- * @vbo: The buffer object whose map we are tearing down.
- *
- * This function tears down a cached map set up using
- * vmw_dma_buffer_map_and_cache().
- */
-void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
-{
- if (vbo->map.bo == NULL)
- return;
-
- ttm_bo_kunmap(&vbo->map);
-}
-
-
-/*
- * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
- *
- * @vbo: The buffer object to map
- * Return: A kernel virtual address or NULL if mapping failed.
- *
- * This function maps a buffer object into the kernel address space, or
- * returns the virtual kernel address of an already existing map. The virtual
- * address remains valid as long as the buffer object is pinned or reserved.
- * The cached map is torn down on either
- * 1) Buffer object move
- * 2) Buffer object swapout
- * 3) Buffer object destruction
- *
- */
-void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
-{
- struct ttm_buffer_object *bo = &vbo->base;
- bool not_used;
- void *virtual;
- int ret;
-
- virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
- if (virtual)
- return virtual;
-
- ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
- if (ret)
- DRM_ERROR("Buffer object map failed: %d.\n", ret);
-
- return ttm_kmap_obj_virtual(&vbo->map, &not_used);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 09cc721160c4..bb6dbbe18835 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -137,6 +137,12 @@
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
+ union drm_vmw_gb_surface_create_ext_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
+ union drm_vmw_gb_surface_reference_ext_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -153,9 +159,9 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
@@ -219,11 +225,17 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
- vmw_user_dmabuf_synccpu_ioctl,
+ vmw_user_bo_synccpu_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
+ vmw_gb_surface_define_ext_ioctl,
+ DRM_AUTH | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
+ vmw_gb_surface_reference_ext_ioctl,
+ DRM_AUTH | DRM_RENDER_ALLOW),
};
static const struct pci_device_id vmw_pci_id_list[] = {
@@ -258,6 +270,15 @@ MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
+static void vmw_print_capabilities2(uint32_t capabilities2)
+{
+ DRM_INFO("Capabilities2:\n");
+ if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
+ DRM_INFO(" Grow oTable.\n");
+ if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
+ DRM_INFO(" IntraSurface copy.\n");
+}
+
static void vmw_print_capabilities(uint32_t capabilities)
{
DRM_INFO("Capabilities:\n");
@@ -321,7 +342,7 @@ static void vmw_print_capabilities(uint32_t capabilities)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
- struct vmw_dma_buffer *vbo;
+ struct vmw_buffer_object *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
@@ -335,9 +356,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (!vbo)
return -ENOMEM;
- ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
- &vmw_sys_ne_placement, false,
- &vmw_dmabuf_bo_free);
+ ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
+ &vmw_sys_ne_placement, false,
+ &vmw_bo_bo_free);
if (unlikely(ret != 0))
return ret;
@@ -358,7 +379,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
- vmw_dmabuf_unreference(&vbo);
+ vmw_bo_unreference(&vbo);
} else
dev_priv->dummy_query_bo = vbo;
@@ -460,7 +481,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL);
- vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+ vmw_bo_unreference(&dev_priv->dummy_query_bo);
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
@@ -644,6 +665,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
+ mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
@@ -683,6 +705,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+
+ if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
+ dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
+ }
+
+
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
@@ -751,6 +779,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
vmw_print_capabilities(dev_priv->capabilities);
+ if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
+ vmw_print_capabilities2(dev_priv->capabilities2);
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0))
@@ -883,7 +913,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->has_mob) {
spin_lock(&dev_priv->cap_lock);
- vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
spin_unlock(&dev_priv->cap_lock);
}
@@ -898,9 +928,23 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
goto out_no_fifo;
+ if (dev_priv->has_dx) {
+ /*
+ * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
+ * support
+ */
+ if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+ SVGA3D_DEVCAP_SM41);
+ dev_priv->has_sm4_1 = vmw_read(dev_priv,
+ SVGA_REG_DEV_CAP);
+ }
+ }
+
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
- DRM_INFO("Atomic: %s\n",
- (dev->driver->driver_features & DRIVER_ATOMIC) ? "yes" : "no");
+ DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
+ ? "yes." : "no.");
+ DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
VMWGFX_REPO, VMWGFX_GIT_VERSION);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5fcbe1620d50..1abe21758b0d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -43,10 +43,10 @@
#include <linux/sync_file.h>
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20180322"
+#define VMWGFX_DRIVER_DATE "20180704"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 14
-#define VMWGFX_DRIVER_PATCHLEVEL 1
+#define VMWGFX_DRIVER_MINOR 15
+#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -83,10 +83,10 @@
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
- bool gb_aware;
+ bool gb_aware; /* user-space is guest-backed aware */
};
-struct vmw_dma_buffer {
+struct vmw_buffer_object {
struct ttm_buffer_object base;
struct list_head res_list;
s32 pin_count;
@@ -120,7 +120,7 @@ struct vmw_resource {
unsigned long backup_size;
bool res_dirty; /* Protected by backup buffer reserved */
bool backup_dirty; /* Protected by backup buffer reserved */
- struct vmw_dma_buffer *backup;
+ struct vmw_buffer_object *backup;
unsigned long backup_offset;
unsigned long pin_count; /* Protected by resource reserved */
const struct vmw_res_func *func;
@@ -166,7 +166,7 @@ struct vmw_surface_offset;
struct vmw_surface {
struct vmw_resource res;
- uint32_t flags;
+ SVGA3dSurfaceAllFlags flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
struct drm_vmw_size base_size;
@@ -180,6 +180,8 @@ struct vmw_surface {
SVGA3dTextureFilter autogen_filter;
uint32_t multisample_count;
struct list_head view_list;
+ SVGA3dMSPattern multisample_pattern;
+ SVGA3dMSQualityLevel quality_level;
};
struct vmw_marker_queue {
@@ -304,7 +306,7 @@ struct vmw_sw_context{
uint32_t cmd_bounce_size;
struct list_head resource_list;
struct list_head ctx_resource_list; /* For contexts and cotables */
- struct vmw_dma_buffer *cur_query_bo;
+ struct vmw_buffer_object *cur_query_bo;
struct list_head res_relocations;
uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
@@ -315,7 +317,7 @@ struct vmw_sw_context{
bool staged_bindings_inuse;
struct list_head staged_cmd_res;
struct vmw_resource_val_node *dx_ctx_node;
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
};
@@ -386,6 +388,7 @@ struct vmw_private {
uint32_t initial_height;
u32 *mmio_virt;
uint32_t capabilities;
+ uint32_t capabilities2;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t max_mob_pages;
@@ -397,6 +400,7 @@ struct vmw_private {
spinlock_t cap_lock;
bool has_dx;
bool assume_16bpp;
+ bool has_sm4_1;
/*
* VGA registers.
@@ -412,6 +416,15 @@ struct vmw_private {
uint32_t num_displays;
/*
+ * Currently requested_layout_mutex is used to protect the gui
+ * positionig state in display unit. With that use case currently this
+ * mutex is only taken during layout ioctl and atomic check_modeset.
+ * Other display unit state can be protected with this mutex but that
+ * needs careful consideration.
+ */
+ struct mutex requested_layout_mutex;
+
+ /*
* Framebuffer info.
*/
@@ -513,8 +526,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex.
*/
- struct vmw_dma_buffer *dummy_query_bo;
- struct vmw_dma_buffer *pinned_bo;
+ struct vmw_buffer_object *dummy_query_bo;
+ struct vmw_buffer_object *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
@@ -623,43 +636,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf);
+ struct vmw_buffer_object **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
-extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
-extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interuptable,
- void (*bo_free) (struct ttm_buffer_object *bo));
-extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile);
-extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf,
- struct ttm_base_object **p_base);
-extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf,
- uint32_t *handle);
-extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
- uint32_t cur_validate_node);
-extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
-extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t id, struct vmw_dma_buffer **out,
- struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -670,43 +653,70 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup,
- struct vmw_dma_buffer *new_backup,
+ struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset);
-extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
-extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
-extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
- struct vmw_fence_obj *fence);
+extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
-
+extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
/**
- * DMA buffer helper routines - vmwgfx_dmabuf.c
+ * Buffer object helper functions - vmwgfx_bo.c
*/
-extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- struct ttm_placement *placement,
+extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
+ struct vmw_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible);
+extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible);
+extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *buf,
+ bool interruptible);
+extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_buffer_object *bo,
bool interruptible);
-extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible);
-extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool interruptible);
-extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- bool interruptible);
-extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo,
- bool interruptible);
+extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
+ struct vmw_buffer_object *bo,
+ bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
-extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
-extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo);
-extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo);
+extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
+extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_init(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interuptable,
+ void (*bo_free)(struct ttm_buffer_object *bo));
+extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile);
+extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_buffer_object **p_dma_buf,
+ struct ttm_base_object **p_base);
+extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
+ struct vmw_buffer_object *dma_buf,
+ uint32_t *handle);
+extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+ uint32_t id, struct vmw_buffer_object **out,
+ struct ttm_base_object **base);
+extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence);
+extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
+extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
+extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -758,7 +768,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
/**
- * TTM buffer object driver - vmwgfx_buffer.c
+ * TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
extern const size_t vmw_tt_size;
@@ -1041,8 +1051,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_dma_buffer *mob);
-extern struct vmw_dma_buffer *
+ struct vmw_buffer_object *mob);
+extern struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
@@ -1070,14 +1080,22 @@ extern int vmw_surface_validate(struct vmw_private *dev_priv,
struct vmw_surface *srf);
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
- uint32_t svga3d_flags,
+ SVGA3dSurfaceAllFlags svga3d_flags,
SVGA3dSurfaceFormat format,
bool for_scanout,
uint32_t num_mip_levels,
uint32_t multisample_count,
uint32_t array_size,
struct drm_vmw_size size,
+ SVGA3dMSPattern multisample_pattern,
+ SVGA3dMSQualityLevel quality_level,
struct vmw_surface **srf_out);
+extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv);
/*
* Shader management - vmwgfx_shader.c
@@ -1224,6 +1242,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
u32 w, u32 h,
struct vmw_diff_cpy *diff);
+/* Host messaging -vmwgfx_msg.c: */
+int vmw_host_get_guestinfo(const char *guest_info_param,
+ char *buffer, size_t *length);
+int vmw_host_log(const char *log);
+
/**
* Inline helper functions
*/
@@ -1243,9 +1266,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
return srf;
}
-static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
+static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
{
- struct vmw_dma_buffer *tmp_buf = *buf;
+ struct vmw_buffer_object *tmp_buf = *buf;
*buf = NULL;
if (tmp_buf != NULL) {
@@ -1255,7 +1278,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
}
}
-static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
+static inline struct vmw_buffer_object *
+vmw_bo_reference(struct vmw_buffer_object *buf)
{
if (ttm_bo_reference(&buf->base))
return buf;
@@ -1302,10 +1326,4 @@ static inline void vmw_mmio_write(u32 value, u32 *addr)
{
WRITE_ONCE(*addr, value);
}
-
-/**
- * Add vmw_msg module function
- */
-extern int vmw_host_log(const char *log);
-
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c9d5cc237124..1f134570b759 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
struct list_head head;
struct drm_hash_item hash;
struct vmw_resource *res;
- struct vmw_dma_buffer *new_backup;
+ struct vmw_buffer_object *new_backup;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
u32 first_usage : 1;
@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_dma_buffer **vmw_bo_p);
+ struct vmw_buffer_object **vmw_bo_p);
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_dma_buffer *vbo,
+ struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
/**
@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
}
vmw_resource_unreserve(res, switch_backup, val->new_backup,
val->new_backup_offset);
- vmw_dmabuf_unreference(&val->new_backup);
+ vmw_bo_unreference(&val->new_backup);
}
}
@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
}
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_dma_buffer *vbo,
+ struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node)
{
@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret;
if (res->backup) {
- struct vmw_dma_buffer *vbo = res->backup;
+ struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
}
if (sw_context->dx_query_mob) {
- struct vmw_dma_buffer *expected_dx_query_mob;
+ struct vmw_buffer_object *expected_dx_query_mob;
expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
- struct vmw_dma_buffer *backup = res->backup;
+ struct vmw_buffer_object *backup = res->backup;
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
/* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) {
- struct vmw_dma_buffer *vbo = res->backup;
+ struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
@@ -821,7 +821,7 @@ out_no_reloc:
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindAllQuery body;
@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *new_query_bo,
+ struct vmw_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ vmw_bo_unreference(&dev_priv->pinned_bo);
}
if (!sw_context->needs_post_query_barrier) {
@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
- vmw_dmabuf_reference(sw_context->cur_query_bo);
+ vmw_bo_reference(sw_context->cur_query_bo);
}
}
}
@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_dma_buffer **vmw_bo_p)
+ struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_dma_buffer *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
- NULL);
+ ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL;
@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
return 0;
out_no_reloc:
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
}
@@ -1343,15 +1342,14 @@ out_no_reloc:
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
- struct vmw_dma_buffer **vmw_bo_p)
+ struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_dma_buffer *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
- NULL);
+ ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL;
@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return 0;
out_no_reloc:
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
}
@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
SVGA3dCmdDXBindQuery q;
} *cmd;
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
int ret;
@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndGBQuery q;
@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndQuery q;
@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery q;
@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery q;
@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dma_buffer *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
header);
out_no_surface:
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
int ret;
struct {
@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_dma_buffer *dma_buf;
+ struct vmw_buffer_object *dma_buf;
int ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
if (val_node->first_usage)
val_node->no_buffer_needed = true;
- vmw_dmabuf_unreference(&val_node->new_backup);
+ vmw_bo_unreference(&val_node->new_backup);
val_node->new_backup = dma_buf;
val_node->new_backup_offset = backup_offset;
@@ -3118,6 +3116,32 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
&cmd->body.destSid, NULL);
}
+/**
+ * vmw_cmd_intra_surface_copy -
+ * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdIntraSurfaceCopy body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
+ return -EINVAL;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.surface.sid, NULL);
+}
+
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3232,9 +3256,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
false, false, false),
@@ -3473,6 +3497,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
&vmw_cmd_dx_transfer_from_buffer,
true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
+ true, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
@@ -3701,8 +3727,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
bool interruptible,
bool validate_as_mob)
{
- struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
- base);
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
struct ttm_operation_ctx ctx = { interruptible, true };
int ret;
@@ -4423,7 +4449,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock:
return;
@@ -4432,7 +4458,7 @@ out_no_emit:
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
- vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+ vmw_bo_unreference(&dev_priv->pinned_bo);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 54e300365a5c..b913a56f3426 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -42,7 +42,7 @@ struct vmw_fb_par {
void *vmalloc;
struct mutex bo_mutex;
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
unsigned bo_size;
struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode;
@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr;
- struct vmw_dma_buffer *vbo = par->vmw_bo;
+ struct vmw_buffer_object *vbo = par->vmw_bo;
void *virtual;
if (!READ_ONCE(par->dirty.active))
@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
- virtual = vmw_dma_buffer_map_and_cache(vbo);
+ virtual = vmw_bo_map_and_cache(vbo);
if (!virtual)
goto out_unreserve;
@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
*/
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
- size_t size, struct vmw_dma_buffer **out)
+ size_t size, struct vmw_buffer_object **out)
{
- struct vmw_dma_buffer *vmw_bo;
+ struct vmw_buffer_object *vmw_bo;
int ret;
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
goto err_unlock;
}
- ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+ ret = vmw_bo_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement,
false,
- &vmw_dmabuf_bo_free);
+ &vmw_bo_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
@@ -439,38 +439,13 @@ static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
static int vmwgfx_set_config_internal(struct drm_mode_set *set)
{
struct drm_crtc *crtc = set->crtc;
- struct drm_framebuffer *fb;
- struct drm_crtc *tmp;
- struct drm_device *dev = set->crtc->dev;
struct drm_modeset_acquire_ctx ctx;
int ret;
drm_modeset_acquire_init(&ctx, 0);
restart:
- /*
- * NOTE: ->set_config can also disable other crtcs (if we steal all
- * connectors from it), hence we need to refcount the fbs across all
- * crtcs. Atomic modeset will have saner semantics ...
- */
- drm_for_each_crtc(tmp, dev)
- tmp->primary->old_fb = tmp->primary->fb;
-
- fb = set->fb;
-
ret = crtc->funcs->set_config(set, &ctx);
- if (ret == 0) {
- crtc->primary->crtc = crtc;
- crtc->primary->fb = fb;
- }
-
- drm_for_each_crtc(tmp, dev) {
- if (tmp->primary->fb)
- drm_framebuffer_get(tmp->primary->fb);
- if (tmp->primary->old_fb)
- drm_framebuffer_put(tmp->primary->old_fb);
- tmp->primary->old_fb = NULL;
- }
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
@@ -516,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
}
if (par->vmw_bo && detach_bo && unref_bo)
- vmw_dmabuf_unreference(&par->vmw_bo);
+ vmw_bo_unreference(&par->vmw_bo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 9ed544f8958f..3d546d409334 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
struct vmw_private *dev_priv = fman->dev_priv;
struct vmwgfx_wait_cb cb;
long ret = timeout;
- unsigned long irq_flags;
if (likely(vmw_fence_obj_signaled(fence)))
return timeout;
@@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv);
- spin_lock_irqsave(f->lock, irq_flags);
+ spin_lock(f->lock);
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
@@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
cb.task = current;
list_add(&cb.base.node, &f->cb_list);
- while (ret > 0) {
+ for (;;) {
__vmw_fences_update(fman);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
- break;
+ /*
+ * We can use the barrier free __set_current_state() since
+ * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
+ * fence spinlock.
+ */
if (intr)
__set_current_state(TASK_INTERRUPTIBLE);
else
__set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irqrestore(f->lock, irq_flags);
- ret = schedule_timeout(ret);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
+ if (ret == 0 && timeout > 0)
+ ret = 1;
+ break;
+ }
- spin_lock_irqsave(f->lock, irq_flags);
- if (ret > 0 && intr && signal_pending(current))
+ if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
- }
+ break;
+ }
+ if (ret == 0)
+ break;
+
+ spin_unlock(f->lock);
+
+ ret = schedule_timeout(ret);
+
+ spin_lock(f->lock);
+ }
+ __set_current_state(TASK_RUNNING);
if (!list_empty(&cb.base.node))
list_del(&cb.base.node);
- __set_current_state(TASK_RUNNING);
out:
- spin_unlock_irqrestore(f->lock, irq_flags);
+ spin_unlock(f->lock);
vmw_seqno_waiter_remove(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 20224dba9d8e..c9382933c2b9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2012 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index a1c68e6a689e..d0fd147ef75f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 66ffa1d4759c..007a0cc7f232 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index f2f9d88131f2..ddb1e9365a3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c5e8eae0dbe2..172a6ba6539c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -56,6 +56,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
break;
+ case DRM_VMW_PARAM_HW_CAPS2:
+ param->value = dev_priv->capabilities2;
+ break;
case DRM_VMW_PARAM_FIFO_CAPS:
param->value = dev_priv->fifo.capabilities;
break;
@@ -113,6 +116,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_DX:
param->value = dev_priv->has_dx;
break;
+ case DRM_VMW_PARAM_SM4_1:
+ param->value = dev_priv->has_sm4_1;
+ break;
default:
return -EINVAL;
}
@@ -122,15 +128,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
{
- /* If the header is updated, update the format test as well! */
- BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX);
-
- if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 &&
- cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM)
- fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 |
- SVGADX_DXFMT_MULTISAMPLE_4 |
- SVGADX_DXFMT_MULTISAMPLE_8);
- else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
+ /*
+ * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
+ * to check the sample count supported by virtual device. Since there
+ * never was support for multisample count for backing MOB return 0.
+ */
+ if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
return 0;
return fmt_value;
@@ -377,8 +380,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
}
vfb = vmw_framebuffer_to_vfb(fb);
- if (!vfb->dmabuf) {
- DRM_ERROR("Framebuffer not dmabuf backed.\n");
+ if (!vfb->bo) {
+ DRM_ERROR("Framebuffer not buffer backed.\n");
ret = -EINVAL;
goto out_no_ttm_lock;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index b9239ba067c4..c3ad4478266b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 01f2dc9e6f52..23beff5d8e3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
-static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
+static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *bo,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
@@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
+ ret = ttm_bo_reserve(&bo->base, true, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
}
- ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
@@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
ttm_bo_kunmap(&map);
err_unreserve:
- ttm_bo_unreserve(&dmabuf->base);
+ ttm_bo_unreserve(&bo->base);
return ret;
}
@@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
if (vps->surf)
vmw_surface_unreference(&vps->surf);
- if (vps->dmabuf)
- vmw_dmabuf_unreference(&vps->dmabuf);
+ if (vps->bo)
+ vmw_bo_unreference(&vps->bo);
if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->dmabuf) {
- vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
- vmw_dmabuf_reference(vps->dmabuf);
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vmw_bo_reference(vps->bo);
} else {
vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
vmw_surface_reference(vps->surf);
@@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
}
du->cursor_surface = vps->surf;
- du->cursor_dmabuf = vps->dmabuf;
+ du->cursor_bo = vps->bo;
if (vps->surf) {
du->cursor_age = du->cursor_surface->snooper.age;
@@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vps->surf->snooper.image,
64, 64, hotspot_x,
hotspot_y);
- } else if (vps->dmabuf) {
- ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
- plane->state->crtc_w,
- plane->state->crtc_h,
- hotspot_x, hotspot_y);
+ } else if (vps->bo) {
+ ret = vmw_cursor_update_bo(dev_priv, vps->bo,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ hotspot_x, hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return;
@@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
ret = -EINVAL;
}
- if (!vmw_framebuffer_to_vfb(fb)->dmabuf)
+ if (!vmw_framebuffer_to_vfb(fb)->bo)
surface = vmw_framebuffer_to_vfbs(fb)->surface;
if (surface && !surface->snooper.image) {
@@ -535,9 +535,9 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
- int connector_mask = 1 << drm_connector_index(&du->connector);
+ int connector_mask = drm_connector_mask(&du->connector);
bool has_primary = new_state->plane_mask &
- BIT(drm_plane_index(crtc->primary));
+ drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */
if (has_primary != new_state->enable)
@@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
if (vps->surf)
(void) vmw_surface_reference(vps->surf);
- if (vps->dmabuf)
- (void) vmw_dmabuf_reference(vps->dmabuf);
+ if (vps->bo)
+ (void) vmw_bo_reference(vps->bo);
state = &vps->base;
@@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
if (vps->surf)
vmw_surface_unreference(&vps->surf);
- if (vps->dmabuf)
- vmw_dmabuf_unreference(&vps->dmabuf);
+ if (vps->bo)
+ vmw_bo_unreference(&vps->bo);
drm_atomic_helper_plane_destroy_state(plane, state);
}
@@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
/**
* vmw_kms_readback - Perform a readback from the screen system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
@@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
*mode_cmd,
- bool is_dmabuf_proxy)
+ bool is_bo_proxy)
{
struct drm_device *dev = dev_priv->dev;
@@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handles[0];
- vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
+ vfbs->is_bo_proxy = is_bo_proxy;
*out = &vfbs->base;
@@ -1038,30 +1038,30 @@ out_err1:
}
/*
- * Dmabuf framebuffer code
+ * Buffer-object framebuffer code
*/
-static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
{
- struct vmw_framebuffer_dmabuf *vfbd =
+ struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
drm_framebuffer_cleanup(framebuffer);
- vmw_dmabuf_unreference(&vfbd->buffer);
+ vmw_bo_unreference(&vfbd->buffer);
if (vfbd->base.user_obj)
ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
-static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
+static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_framebuffer_dmabuf *vfbd =
+ struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
int ret, increment = 1;
@@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
true, true, NULL);
break;
case vmw_du_screen_object:
- ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
- clips, NULL, num_clips,
- increment, true, NULL, NULL);
+ ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
+ clips, NULL, num_clips,
+ increment, true, NULL, NULL);
break;
case vmw_du_legacy:
- ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
- clips, num_clips, increment);
+ ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
+ clips, num_clips, increment);
break;
default:
ret = -EINVAL;
@@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
return ret;
}
-static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
- .destroy = vmw_framebuffer_dmabuf_destroy,
- .dirty = vmw_framebuffer_dmabuf_dirty,
+static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+ .destroy = vmw_framebuffer_bo_destroy,
+ .dirty = vmw_framebuffer_bo_dirty,
};
/**
- * Pin the dmabuffer in a location suitable for access by the
+ * Pin the bofer in a location suitable for access by the
* display system.
*/
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
struct ttm_placement *placement;
int ret;
- buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (!buf)
@@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
switch (dev_priv->active_display_unit) {
case vmw_du_legacy:
vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+ ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
vmw_overlay_resume_all(dev_priv);
break;
case vmw_du_screen_object:
case vmw_du_screen_target:
- if (vfb->dmabuf) {
+ if (vfb->bo) {
if (dev_priv->capabilities & SVGA_CAP_3D) {
/*
* Use surface DMA to get content to
@@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
placement = &vmw_mob_placement;
}
- return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement,
- false);
+ return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
default:
return -EINVAL;
}
@@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
- buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
if (WARN_ON(!buf))
return 0;
- return vmw_dmabuf_unpin(dev_priv, buf, false);
+ return vmw_bo_unpin(dev_priv, buf, false);
}
/**
- * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ * vmw_create_bo_proxy - create a proxy surface for the buffer object
*
* @dev: DRM device
* @mode_cmd: parameters for the new surface
- * @dmabuf_mob: MOB backing the DMA buf
+ * @bo_mob: MOB backing the buffer object
* @srf_out: newly created surface
*
- * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * When the content FB is a buffer object, we create a surface as a proxy to the
* same buffer. This way we can do a surface copy rather than a surface DMA.
* This is a more efficient approach
*
* RETURNS:
* 0 on success, error code otherwise
*/
-static int vmw_create_dmabuf_proxy(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct vmw_dma_buffer *dmabuf_mob,
- struct vmw_surface **srf_out)
+static int vmw_create_bo_proxy(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct vmw_buffer_object *bo_mob,
+ struct vmw_surface **srf_out)
{
uint32_t format;
struct drm_vmw_size content_base_size = {0};
@@ -1239,15 +1238,17 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
content_base_size.depth = 1;
ret = vmw_surface_gb_priv_define(dev,
- 0, /* kernel visible only */
- 0, /* flags */
- format,
- true, /* can be a scanout buffer */
- 1, /* num of mip levels */
- 0,
- 0,
- content_base_size,
- srf_out);
+ 0, /* kernel visible only */
+ 0, /* flags */
+ format,
+ true, /* can be a scanout buffer */
+ 1, /* num of mip levels */
+ 0,
+ 0,
+ content_base_size,
+ SVGA3D_MS_PATTERN_NONE,
+ SVGA3D_MS_QUALITY_NONE,
+ srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
@@ -1258,8 +1259,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
- vmw_dmabuf_unreference(&res->backup);
- res->backup = vmw_dmabuf_reference(dmabuf_mob);
+ vmw_bo_unreference(&res->backup);
+ res->backup = vmw_bo_reference(bo_mob);
res->backup_offset = 0;
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1269,21 +1270,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
-static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- struct vmw_framebuffer **out,
- const struct drm_mode_fb_cmd2
- *mode_cmd)
+static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
+ struct vmw_buffer_object *bo,
+ struct vmw_framebuffer **out,
+ const struct drm_mode_fb_cmd2
+ *mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
- struct vmw_framebuffer_dmabuf *vfbd;
+ struct vmw_framebuffer_bo *vfbd;
unsigned int requested_size;
struct drm_format_name_buf format_name;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
- if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
+ if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
@@ -1312,20 +1313,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
}
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
- vfbd->base.dmabuf = true;
- vfbd->buffer = vmw_dmabuf_reference(dmabuf);
+ vfbd->base.bo = true;
+ vfbd->buffer = vmw_bo_reference(bo);
vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
- &vmw_framebuffer_dmabuf_funcs);
+ &vmw_framebuffer_bo_funcs);
if (ret)
goto out_err2;
return 0;
out_err2:
- vmw_dmabuf_unreference(&dmabuf);
+ vmw_bo_unreference(&bo);
kfree(vfbd);
out_err1:
return ret;
@@ -1354,57 +1355,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* vmw_kms_new_framebuffer - Create a new framebuffer.
*
* @dev_priv: Pointer to device private struct.
- * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
+ * @bo: Pointer to buffer object to wrap the kms framebuffer around.
+ * Either @bo or @surface must be NULL.
* @surface: Pointer to a surface to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
- * @only_2d: No presents will occur to this dma buffer based framebuffer. This
- * Helps the code to do some important optimizations.
+ * Either @bo or @surface must be NULL.
+ * @only_2d: No presents will occur to this buffer object based framebuffer.
+ * This helps the code to do some important optimizations.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
+ struct vmw_buffer_object *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
- bool is_dmabuf_proxy = false;
+ bool is_bo_proxy = false;
int ret;
/*
* We cannot use the SurfaceDMA command in an non-accelerated VM,
- * therefore, wrap the DMA buf in a surface so we can use the
+ * therefore, wrap the buffer object in a surface so we can use the
* SurfaceCopy command.
*/
if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
- dmabuf && only_2d &&
+ bo && only_2d &&
mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) {
- ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
- dmabuf, &surface);
+ ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+ bo, &surface);
if (ret)
return ERR_PTR(ret);
- is_dmabuf_proxy = true;
+ is_bo_proxy = true;
}
/* Create the new framebuffer depending one what we have */
if (surface) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd,
- is_dmabuf_proxy);
+ is_bo_proxy);
/*
- * vmw_create_dmabuf_proxy() adds a reference that is no longer
+ * vmw_create_bo_proxy() adds a reference that is no longer
* needed
*/
- if (is_dmabuf_proxy)
+ if (is_bo_proxy)
vmw_surface_unreference(&surface);
- } else if (dmabuf) {
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
- mode_cmd);
+ } else if (bo) {
+ ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
+ mode_cmd);
} else {
BUG();
}
@@ -1430,23 +1431,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
- struct vmw_dma_buffer *bo = NULL;
+ struct vmw_buffer_object *bo = NULL;
struct ttm_base_object *user_obj;
int ret;
- /**
- * This code should be conditioned on Screen Objects not being used.
- * If screen objects are used, we can allocate a GMR to hold the
- * requested framebuffer.
- */
-
- if (!vmw_kms_validate_mode_vram(dev_priv,
- mode_cmd->pitches[0],
- mode_cmd->height)) {
- DRM_ERROR("Requested mode exceed bounding box limit.\n");
- return ERR_PTR(-ENOMEM);
- }
-
/*
* Take a reference on the user object of the resource
* backing the kms fb. This ensures that user-space handle
@@ -1466,7 +1454,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* End conditioned code.
*/
- /* returns either a dmabuf or surface */
+ /* returns either a bo or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
mode_cmd->handles[0],
&surface, &bo);
@@ -1494,7 +1482,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
- vmw_dmabuf_unreference(&bo);
+ vmw_bo_unreference(&bo);
if (surface)
vmw_surface_unreference(&surface);
@@ -1508,7 +1496,168 @@ err_out:
return &vfb->base;
}
+/**
+ * vmw_kms_check_display_memory - Validates display memory required for a
+ * topology
+ * @dev: DRM device
+ * @num_rects: number of drm_rect in rects
+ * @rects: array of drm_rect representing the topology to validate indexed by
+ * crtc index.
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_display_memory(struct drm_device *dev,
+ uint32_t num_rects,
+ struct drm_rect *rects)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_rect bounding_box = {0};
+ u64 total_pixels = 0, pixel_mem, bb_mem;
+ int i;
+
+ for (i = 0; i < num_rects; i++) {
+ /*
+ * Currently this check is limiting the topology within max
+ * texture/screentarget size. This should change in future when
+ * user-space support multiple fb with topology.
+ */
+ if (rects[i].x1 < 0 || rects[i].y1 < 0 ||
+ rects[i].x2 > mode_config->max_width ||
+ rects[i].y2 > mode_config->max_height) {
+ DRM_ERROR("Invalid GUI layout.\n");
+ return -EINVAL;
+ }
+ /* Bounding box upper left is at (0,0). */
+ if (rects[i].x2 > bounding_box.x2)
+ bounding_box.x2 = rects[i].x2;
+
+ if (rects[i].y2 > bounding_box.y2)
+ bounding_box.y2 = rects[i].y2;
+
+ total_pixels += (u64) drm_rect_width(&rects[i]) *
+ (u64) drm_rect_height(&rects[i]);
+ }
+
+ /* Virtual svga device primary limits are always in 32-bpp. */
+ pixel_mem = total_pixels * 4;
+
+ /*
+ * For HV10 and below prim_bb_mem is vram size. When
+ * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
+ * limit on primary bounding box
+ */
+ if (pixel_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Combined output size too large.\n");
+ return -EINVAL;
+ }
+
+ /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
+ if (dev_priv->active_display_unit != vmw_du_screen_target ||
+ !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
+ bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
+
+ if (bb_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Topology is beyond supported limits.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_kms_check_topology - Validates topology in drm_atomic_state
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_topology(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_rect *rects;
+ struct drm_crtc *crtc;
+ uint32_t i;
+ int ret = 0;
+
+ rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
+ GFP_KERNEL);
+ if (!rects)
+ return -ENOMEM;
+
+ mutex_lock(&dev_priv->requested_layout_mutex);
+
+ drm_for_each_crtc(crtc, dev) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_crtc_state *crtc_state = crtc->state;
+
+ i = drm_crtc_index(crtc);
+
+ if (crtc_state && crtc_state->enable) {
+ rects[i].x1 = du->gui_x;
+ rects[i].y1 = du->gui_y;
+ rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
+ rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+ }
+ }
+
+ /* Determine change to topology due to new atomic state */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct vmw_connector_state *vmw_conn_state;
+
+ if (!new_crtc_state->enable && old_crtc_state->enable) {
+ rects[i].x1 = 0;
+ rects[i].y1 = 0;
+ rects[i].x2 = 0;
+ rects[i].y2 = 0;
+ continue;
+ }
+
+ if (!du->pref_active) {
+ ret = -EINVAL;
+ goto clean;
+ }
+
+ /*
+ * For vmwgfx each crtc has only one connector attached and it
+ * is not changed so don't really need to check the
+ * crtc->connector_mask and iterate over it.
+ */
+ connector = &du->connector;
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ goto clean;
+ }
+
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+ vmw_conn_state->gui_x = du->gui_x;
+ vmw_conn_state->gui_y = du->gui_y;
+
+ rects[i].x1 = du->gui_x;
+ rects[i].y1 = du->gui_y;
+ rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
+ rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
+ }
+
+ ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
+ rects);
+
+clean:
+ mutex_unlock(&dev_priv->requested_layout_mutex);
+ kfree(rects);
+ return ret;
+}
/**
* vmw_kms_atomic_check_modeset- validate state object for modeset changes
@@ -1520,36 +1669,39 @@ err_out:
* us to assign a value to mode->crtc_clock so that
* drm_calc_timestamping_constants() won't throw an error message
*
- * RETURNS
+ * Returns:
* Zero for success or -errno
*/
static int
vmw_kms_atomic_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- struct vmw_private *dev_priv = vmw_priv(dev);
- int i;
-
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- unsigned long requested_bb_mem = 0;
+ struct drm_crtc_state *crtc_state;
+ bool need_modeset = false;
+ int i, ret;
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- if (crtc->primary->fb) {
- int cpp = crtc->primary->fb->pitches[0] /
- crtc->primary->fb->width;
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
- requested_bb_mem += crtc->mode.hdisplay * cpp *
- crtc->mode.vdisplay;
- }
+ if (!state->allow_modeset)
+ return ret;
- if (requested_bb_mem > dev_priv->prim_bb_mem)
- return -EINVAL;
- }
+ /*
+ * Legacy path do not set allow_modeset properly like
+ * @drm_atomic_helper_update_plane, This will result in unnecessary call
+ * to vmw_kms_check_topology. So extra set of check.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ need_modeset = true;
}
- return drm_atomic_helper_check(dev, state);
+ if (need_modeset)
+ return vmw_kms_check_topology(dev, state);
+
+ return ret;
}
static const struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1841,40 +1993,49 @@ void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
}
-
-/*
- * Small shared kms functions.
+/**
+ * vmw_du_update_layout - Update the display unit with topology from resolution
+ * plugin and generate DRM uevent
+ * @dev_priv: device private
+ * @num_rects: number of drm_rect in rects
+ * @rects: toplogy to update
*/
-
-static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
- struct drm_vmw_rect *rects)
+static int vmw_du_update_layout(struct vmw_private *dev_priv,
+ unsigned int num_rects, struct drm_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_connector *con;
+ struct drm_connector_list_iter conn_iter;
- mutex_lock(&dev->mode_config.mutex);
-
-#if 0
- {
- unsigned int i;
-
- DRM_INFO("%s: new layout ", __func__);
- for (i = 0; i < num; i++)
- DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
- rects[i].w, rects[i].h);
- DRM_INFO("\n");
+ /*
+ * Currently only gui_x/y is protected with requested_layout_mutex.
+ */
+ mutex_lock(&dev_priv->requested_layout_mutex);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(con, &conn_iter) {
+ du = vmw_connector_to_du(con);
+ if (num_rects > du->unit) {
+ du->pref_width = drm_rect_width(&rects[du->unit]);
+ du->pref_height = drm_rect_height(&rects[du->unit]);
+ du->pref_active = true;
+ du->gui_x = rects[du->unit].x1;
+ du->gui_y = rects[du->unit].y1;
+ } else {
+ du->pref_width = 800;
+ du->pref_height = 600;
+ du->pref_active = false;
+ du->gui_x = 0;
+ du->gui_y = 0;
+ }
}
-#endif
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev_priv->requested_layout_mutex);
+ mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
- if (num > du->unit) {
- du->pref_width = rects[du->unit].w;
- du->pref_height = rects[du->unit].h;
- du->pref_active = true;
- du->gui_x = rects[du->unit].x;
- du->gui_y = rects[du->unit].y;
+ if (num_rects > du->unit) {
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property,
du->gui_x);
@@ -1882,9 +2043,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
(&con->base, dev->mode_config.suggested_y_property,
du->gui_y);
} else {
- du->pref_width = 800;
- du->pref_height = 600;
- du->pref_active = false;
drm_object_property_set_value
(&con->base, dev->mode_config.suggested_x_property,
0);
@@ -1894,8 +2052,8 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
}
con->status = vmw_du_connector_detect(con, true);
}
-
mutex_unlock(&dev->mode_config.mutex);
+
drm_sysfs_hotplug_event(dev);
return 0;
@@ -2110,7 +2268,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
drm_mode_probed_add(connector, mode);
}
- drm_mode_connector_list_update(connector);
+ drm_connector_list_update(connector);
/* Move the prefered mode first, help apps pick the right mode. */
drm_mode_sort(&connector->modes);
@@ -2195,7 +2353,25 @@ vmw_du_connector_atomic_get_property(struct drm_connector *connector,
return 0;
}
-
+/**
+ * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Update preferred topology of display unit as per ioctl request. The topology
+ * is expressed as array of drm_vmw_rect.
+ * e.g.
+ * [0 0 640 480] [640 0 800 600] [0 480 640 480]
+ *
+ * NOTE:
+ * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
+ * device limit on topology, x + w and y + h (lower right) cannot be greater
+ * than INT_MAX. So topology beyond these limits will return with error.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -2204,15 +2380,12 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_update_layout_arg *)data;
void __user *user_rects;
struct drm_vmw_rect *rects;
+ struct drm_rect *drm_rects;
unsigned rects_size;
- int ret;
- int i;
- u64 total_pixels = 0;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_vmw_rect bounding_box = {0};
+ int ret, i;
if (!arg->num_outputs) {
- struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+ struct drm_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
}
@@ -2231,52 +2404,29 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
goto out_free;
}
- for (i = 0; i < arg->num_outputs; ++i) {
- if (rects[i].x < 0 ||
- rects[i].y < 0 ||
- rects[i].x + rects[i].w > mode_config->max_width ||
- rects[i].y + rects[i].h > mode_config->max_height) {
- DRM_ERROR("Invalid GUI layout.\n");
- ret = -EINVAL;
- goto out_free;
- }
-
- /*
- * bounding_box.w and bunding_box.h are used as
- * lower-right coordinates
- */
- if (rects[i].x + rects[i].w > bounding_box.w)
- bounding_box.w = rects[i].x + rects[i].w;
-
- if (rects[i].y + rects[i].h > bounding_box.h)
- bounding_box.h = rects[i].y + rects[i].h;
+ drm_rects = (struct drm_rect *)rects;
- total_pixels += (u64) rects[i].w * (u64) rects[i].h;
- }
-
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- /*
- * For Screen Targets, the limits for a toplogy are:
- * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
- * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
- */
- u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4;
- u64 pixel_mem = total_pixels * 4;
+ for (i = 0; i < arg->num_outputs; i++) {
+ struct drm_vmw_rect curr_rect;
- if (bb_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Topology is beyond supported limits.\n");
- ret = -EINVAL;
+ /* Verify user-space for overflow as kernel use drm_rect */
+ if ((rects[i].x + rects[i].w > INT_MAX) ||
+ (rects[i].y + rects[i].h > INT_MAX)) {
+ ret = -ERANGE;
goto out_free;
}
- if (pixel_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Combined output size too large\n");
- ret = -EINVAL;
- goto out_free;
- }
+ curr_rect = rects[i];
+ drm_rects[i].x1 = curr_rect.x;
+ drm_rects[i].y1 = curr_rect.y;
+ drm_rects[i].x2 = curr_rect.x + curr_rect.w;
+ drm_rects[i].y2 = curr_rect.y + curr_rect.h;
}
- vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
+ ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
+
+ if (ret == 0)
+ vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
out_free:
kfree(rects);
@@ -2322,9 +2472,10 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
} else {
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
+ struct drm_plane *plane = crtc->primary;
+
+ if (plane->state->fb == &framebuffer->base)
+ units[num_units++] = vmw_crtc_to_du(crtc);
}
}
@@ -2422,7 +2573,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
* interrupted by a signal.
*/
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
bool interruptible,
bool validate_as_mob,
bool for_cpu_blit)
@@ -2454,7 +2605,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_buffer_prepare.
*/
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
{
if (buf)
ttm_bo_unreserve(&buf->base);
@@ -2477,7 +2628,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
*/
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep)
@@ -2489,7 +2640,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
file_priv ? &handle : NULL);
if (buf)
- vmw_fence_single_bo(&buf->base, fence);
+ vmw_bo_fence_single(&buf->base, fence);
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
@@ -2517,7 +2668,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
struct vmw_resource *res = ctx->res;
vmw_kms_helper_buffer_revert(ctx->buf);
- vmw_dmabuf_unreference(&ctx->buf);
+ vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -2562,7 +2713,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
if (ret)
goto out_unreserve;
- ctx->buf = vmw_dmabuf_reference(res->backup);
+ ctx->buf = vmw_bo_reference(res->backup);
}
ret = vmw_resource_validate(res);
if (ret)
@@ -2595,7 +2746,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
- vmw_dmabuf_unreference(&ctx->buf);
+ vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -2806,6 +2957,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
{
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_plane *plane = crtc->primary;
struct vmw_framebuffer *vfb;
mutex_lock(&dev_priv->global_kms_state_mutex);
@@ -2813,7 +2965,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
if (!du->is_implicit)
goto out_unlock;
- vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
+ vfb = vmw_framebuffer_to_vfb(plane->state->fb);
WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
dev_priv->implicit_fb != vfb);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6b7c012719f1..31311298ec0b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -90,7 +90,7 @@ struct vmw_kms_dirty {
#define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base)
#define vmw_framebuffer_to_vfbd(x) \
- container_of(x, struct vmw_framebuffer_dmabuf, base.base)
+ container_of(x, struct vmw_framebuffer_bo, base.base)
/**
* Base class for framebuffers
@@ -102,7 +102,7 @@ struct vmw_framebuffer {
struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
- bool dmabuf;
+ bool bo;
struct ttm_base_object *user_obj;
uint32_t user_handle;
};
@@ -117,15 +117,15 @@ struct vmw_clip_rect {
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
- struct vmw_dma_buffer *buffer;
+ struct vmw_buffer_object *buffer;
struct list_head head;
- bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
+ bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
};
-struct vmw_framebuffer_dmabuf {
+struct vmw_framebuffer_bo {
struct vmw_framebuffer base;
- struct vmw_dma_buffer *buffer;
+ struct vmw_buffer_object *buffer;
};
@@ -161,18 +161,18 @@ struct vmw_crtc_state {
*
* @base DRM plane object
* @surf Display surface for STDU
- * @dmabuf display dmabuf for SOU
+ * @bo display bo for SOU
* @content_fb_type Used by STDU.
- * @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit
+ * @bo_size Size of the bo, used by Screen Object Display Unit
* @pinned pin count for STDU display surface
*/
struct vmw_plane_state {
struct drm_plane_state base;
struct vmw_surface *surf;
- struct vmw_dma_buffer *dmabuf;
+ struct vmw_buffer_object *bo;
int content_fb_type;
- unsigned long dmabuf_size;
+ unsigned long bo_size;
int pinned;
@@ -192,6 +192,24 @@ struct vmw_connector_state {
struct drm_connector_state base;
bool is_implicit;
+
+ /**
+ * @gui_x:
+ *
+ * vmwgfx connector property representing the x position of this display
+ * unit (connector is synonymous to display unit) in overall topology.
+ * This is what the device expect as xRoot while creating screen.
+ */
+ int gui_x;
+
+ /**
+ * @gui_y:
+ *
+ * vmwgfx connector property representing the y position of this display
+ * unit (connector is synonymous to display unit) in overall topology.
+ * This is what the device expect as yRoot while creating screen.
+ */
+ int gui_y;
};
/**
@@ -209,7 +227,7 @@ struct vmw_display_unit {
struct drm_plane cursor;
struct vmw_surface *cursor_surface;
- struct vmw_dma_buffer *cursor_dmabuf;
+ struct vmw_buffer_object *cursor_bo;
size_t cursor_age;
int cursor_x;
@@ -243,7 +261,7 @@ struct vmw_display_unit {
struct vmw_validation_ctx {
struct vmw_resource *res;
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
};
#define vmw_crtc_to_du(x) \
@@ -291,14 +309,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_kms_dirty *dirty);
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
bool interruptible,
bool validate_as_mob,
bool for_cpu_blit);
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep);
@@ -316,7 +334,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
+ struct vmw_buffer_object *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -384,11 +402,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
*/
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment);
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips, int increment);
int vmw_kms_update_proxy(struct vmw_resource *res,
const struct drm_clip_rect *clips,
unsigned num_clips,
@@ -408,14 +426,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc);
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- struct drm_clip_rect *clips,
- struct drm_vmw_rect *vclips,
- unsigned num_clips, int increment,
- bool interruptible,
- struct vmw_fence_obj **out_fence,
- struct drm_crtc *crtc);
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ unsigned int num_clips, int increment,
+ bool interruptible,
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc);
int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 4a5907e3f560..723578117191 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -438,7 +438,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_connector;
}
- (void) drm_mode_connector_attach_encoder(connector, encoder);
+ (void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
}
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment)
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips, int increment)
{
size_t fifo_size;
int i;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
index efd1ffd68185..e53bc639a754 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index d07c585e3c1d..7ed179d30ec5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
- vmw_fence_single_bo(bo, NULL);
+ vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
}
@@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
- vmw_fence_single_bo(bo, NULL);
+ vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
ttm_bo_unref(&batch->otable_bo);
@@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
if (bo) {
- vmw_fence_single_bo(bo, NULL);
+ vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
}
vmw_fifo_resource_dec(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 21d746bdc922..8b9270f31409 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
- * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -31,6 +31,7 @@
#include <linux/frame.h>
#include <asm/hypervisor.h>
#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
#include "vmwgfx_msg.h"
@@ -234,7 +235,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
(HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
- DRM_ERROR("Failed to get reply size\n");
+ DRM_ERROR("Failed to get reply size for host message.\n");
return -EINVAL;
}
@@ -245,7 +246,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL);
if (!reply) {
- DRM_ERROR("Cannot allocate memory for reply\n");
+ DRM_ERROR("Cannot allocate memory for host message reply.\n");
return -ENOMEM;
}
@@ -338,7 +339,8 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
if (!msg) {
- DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
+ DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
+ guest_info_param);
return -ENOMEM;
}
@@ -374,7 +376,7 @@ out_msg:
out_open:
*length = 0;
kfree(msg);
- DRM_ERROR("Failed to get %s", guest_info_param);
+ DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
return -EINVAL;
}
@@ -403,7 +405,7 @@ int vmw_host_log(const char *log)
msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) {
- DRM_ERROR("Cannot allocate memory for log message\n");
+ DRM_ERROR("Cannot allocate memory for host log message.\n");
return -ENOMEM;
}
@@ -422,7 +424,7 @@ out_msg:
vmw_close_channel(&channel);
out_open:
kfree(msg);
- DRM_ERROR("Failed to send log\n");
+ DRM_ERROR("Failed to send host log message.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 8545488aa0cf..4907e50fb20a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -1,16 +1,29 @@
-/*
- * Copyright (C) 2016, VMware, Inc.
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
+ **************************************************************************
*
* Based on code from vmware.c and vmmouse.c.
* Author:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 222c9c2123a1..9f1b9d289bec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -38,7 +38,7 @@
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream {
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
bool claimed;
bool paused;
struct drm_vmw_control_stream_arg saved;
@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
* -ERESTARTSYS if interrupted by a signal.
*/
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
* used with GMRs instead of being locked to vram.
*/
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
bool pin, bool inter)
{
if (!pin)
- return vmw_dmabuf_unpin(dev_priv, buf, inter);
+ return vmw_bo_unpin(dev_priv, buf, inter);
if (dev_priv->active_display_unit == vmw_du_legacy)
- return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
+ return vmw_bo_pin_in_vram(dev_priv, buf, inter);
- return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
+ return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
}
/**
@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
}
if (!pause) {
- vmw_dmabuf_unreference(&stream->buf);
+ vmw_bo_unreference(&stream->buf);
stream->paused = false;
} else {
stream->paused = true;
@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted.
*/
static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
+ struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
}
if (stream->buf != buf)
- stream->buf = vmw_dmabuf_reference(buf);
+ stream->buf = vmw_bo_reference(buf);
stream->saved = *arg;
/* stream is no longer stopped/paused */
stream->paused = false;
@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct drm_vmw_control_stream_arg *arg =
(struct drm_vmw_control_stream_arg *)data;
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
struct vmw_resource *res;
int ret;
@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
+ ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
if (ret)
goto out_unlock;
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
- vmw_dmabuf_unreference(&buf);
+ vmw_bo_unreference(&buf);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 0d42a46521fc..0861c821a7fe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2013 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -40,7 +40,6 @@
*/
static int vmw_prime_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
struct dma_buf_attachment *attach)
{
return -ENOSYS;
@@ -72,17 +71,6 @@ static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
}
-static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
-{
- return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
-{
-
-}
static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
unsigned long page_num)
{
@@ -109,9 +97,7 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
.map = vmw_prime_dmabuf_kmap,
- .map_atomic = vmw_prime_dmabuf_kmap_atomic,
.unmap = vmw_prime_dmabuf_kunmap,
- .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index dce798053a96..e99f6cdbb091 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6b3a942b18df..92003ea5a219 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,7 +27,6 @@
#include "vmwgfx_drv.h"
#include <drm/vmwgfx_drm.h>
-#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_resource_priv.h"
@@ -35,29 +34,6 @@
#define VMW_RES_EVICT_ERR_COUNT 10
-struct vmw_user_dma_buffer {
- struct ttm_prime_object prime;
- struct vmw_dma_buffer dma;
-};
-
-struct vmw_bo_user_rep {
- uint32_t handle;
- uint64_t map_handle;
-};
-
-static inline struct vmw_dma_buffer *
-vmw_dma_buffer(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct vmw_dma_buffer, base);
-}
-
-static inline struct vmw_user_dma_buffer *
-vmw_user_dma_buffer(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
-}
-
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{
kref_get(&res->kref);
@@ -116,7 +92,7 @@ static void vmw_resource_release(struct kref *kref)
res->backup_dirty = false;
list_del_init(&res->mob_head);
ttm_bo_unreserve(bo);
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
}
if (likely(res->hw_destroy != NULL)) {
@@ -287,7 +263,7 @@ out_bad_resource:
}
/**
- * Helper function that looks either a surface or dmabuf.
+ * Helper function that looks either a surface or bo.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
@@ -295,7 +271,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf)
+ struct vmw_buffer_object **out_buf)
{
struct vmw_resource *res;
int ret;
@@ -311,513 +287,11 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
*out_surf = NULL;
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
+ ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
return ret;
}
/**
- * Buffer management.
- */
-
-/**
- * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @size: The requested buffer size.
- * @user: Whether this is an ordinary dma buffer or a user dma buffer.
- */
-static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
- bool user)
-{
- static size_t struct_size, user_struct_size;
- size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
- if (unlikely(struct_size == 0)) {
- size_t backend_size = ttm_round_pot(vmw_tt_size);
-
- struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_dma_buffer));
- user_struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
- }
-
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
- page_array_size +=
- ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
- return ((user) ? user_struct_size : struct_size) +
- page_array_size;
-}
-
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- vmw_dma_buffer_unmap(vmw_bo);
- kfree(vmw_bo);
-}
-
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
- vmw_dma_buffer_unmap(&vmw_user_bo->dma);
- ttm_prime_object_kfree(vmw_user_bo, prime);
-}
-
-int vmw_dmabuf_init(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interruptible,
- void (*bo_free) (struct ttm_buffer_object *bo))
-{
- struct ttm_bo_device *bdev = &dev_priv->bdev;
- size_t acc_size;
- int ret;
- bool user = (bo_free == &vmw_user_dmabuf_destroy);
-
- BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
-
- acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
- memset(vmw_bo, 0, sizeof(*vmw_bo));
-
- INIT_LIST_HEAD(&vmw_bo->res_list);
-
- ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
- 0, interruptible, acc_size,
- NULL, NULL, bo_free);
- return ret;
-}
-
-static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
-{
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_base_object *base = *p_base;
- struct ttm_buffer_object *bo;
-
- *p_base = NULL;
-
- if (unlikely(base == NULL))
- return;
-
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
- prime.base);
- bo = &vmw_user_bo->dma.base;
- ttm_bo_unref(&bo);
-}
-
-static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
- enum ttm_ref_type ref_type)
-{
- struct vmw_user_dma_buffer *user_bo;
- user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
-
- switch (ref_type) {
- case TTM_REF_SYNCCPU_WRITE:
- ttm_bo_synccpu_write_release(&user_bo->dma.base);
- break;
- default:
- BUG();
- }
-}
-
-/**
- * vmw_user_dmabuf_alloc - Allocate a user dma buffer
- *
- * @dev_priv: Pointer to a struct device private.
- * @tfile: Pointer to a struct ttm_object_file on which to register the user
- * object.
- * @size: Size of the dma buffer.
- * @shareable: Boolean whether the buffer is shareable with other open files.
- * @handle: Pointer to where the handle value should be assigned.
- * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
- * should be assigned.
- */
-int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t size,
- bool shareable,
- uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf,
- struct ttm_base_object **p_base)
-{
- struct vmw_user_dma_buffer *user_bo;
- struct ttm_buffer_object *tmp;
- int ret;
-
- user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(!user_bo)) {
- DRM_ERROR("Failed to allocate a buffer.\n");
- return -ENOMEM;
- }
-
- ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
- (dev_priv->has_mob) ?
- &vmw_sys_placement :
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
- if (unlikely(ret != 0))
- return ret;
-
- tmp = ttm_bo_reference(&user_bo->dma.base);
- ret = ttm_prime_object_init(tfile,
- size,
- &user_bo->prime,
- shareable,
- ttm_buffer_type,
- &vmw_user_dmabuf_release,
- &vmw_user_dmabuf_ref_obj_release);
- if (unlikely(ret != 0)) {
- ttm_bo_unref(&tmp);
- goto out_no_base_object;
- }
-
- *p_dma_buf = &user_bo->dma;
- if (p_base) {
- *p_base = &user_bo->prime.base;
- kref_get(&(*p_base)->refcount);
- }
- *handle = user_bo->prime.base.hash.key;
-
-out_no_base_object:
- return ret;
-}
-
-/**
- * vmw_user_dmabuf_verify_access - verify access permissions on this
- * buffer object.
- *
- * @bo: Pointer to the buffer object being accessed
- * @tfile: Identifying the caller.
- */
-int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
- struct ttm_object_file *tfile)
-{
- struct vmw_user_dma_buffer *vmw_user_bo;
-
- if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
- return -EPERM;
-
- vmw_user_bo = vmw_user_dma_buffer(bo);
-
- /* Check that the caller has opened the object. */
- if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
- return 0;
-
- DRM_ERROR("Could not grant buffer access.\n");
- return -EPERM;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
- * access, idling previous GPU operations on the buffer and optionally
- * blocking it for further command submissions.
- *
- * @user_bo: Pointer to the buffer object being grabbed for CPU access
- * @tfile: Identifying the caller.
- * @flags: Flags indicating how the grab should be performed.
- *
- * A blocking grab will be automatically released when @tfile is closed.
- */
-static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
- struct ttm_object_file *tfile,
- uint32_t flags)
-{
- struct ttm_buffer_object *bo = &user_bo->dma.base;
- bool existed;
- int ret;
-
- if (flags & drm_vmw_synccpu_allow_cs) {
- bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
- long lret;
-
- lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
- if (!lret)
- return -EBUSY;
- else if (lret < 0)
- return lret;
- return 0;
- }
-
- ret = ttm_bo_synccpu_write_grab
- (bo, !!(flags & drm_vmw_synccpu_dontblock));
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed, false);
- if (ret != 0 || existed)
- ttm_bo_synccpu_write_release(&user_bo->dma.base);
-
- return ret;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
- * and unblock command submission on the buffer if blocked.
- *
- * @handle: Handle identifying the buffer object.
- * @tfile: Identifying the caller.
- * @flags: Flags indicating the type of release.
- */
-static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
- struct ttm_object_file *tfile,
- uint32_t flags)
-{
- if (!(flags & drm_vmw_synccpu_allow_cs))
- return ttm_ref_object_base_unref(tfile, handle,
- TTM_REF_SYNCCPU_WRITE);
-
- return 0;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
- * functionality.
- *
- * @dev: Identifies the drm device.
- * @data: Pointer to the ioctl argument.
- * @file_priv: Identifies the caller.
- *
- * This function checks the ioctl arguments for validity and calls the
- * relevant synccpu functions.
- */
-int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_synccpu_arg *arg =
- (struct drm_vmw_synccpu_arg *) data;
- struct vmw_dma_buffer *dma_buf;
- struct vmw_user_dma_buffer *user_bo;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct ttm_base_object *buffer_base;
- int ret;
-
- if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
- || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
- drm_vmw_synccpu_dontblock |
- drm_vmw_synccpu_allow_cs)) != 0) {
- DRM_ERROR("Illegal synccpu flags.\n");
- return -EINVAL;
- }
-
- switch (arg->op) {
- case drm_vmw_synccpu_grab:
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
- &buffer_base);
- if (unlikely(ret != 0))
- return ret;
-
- user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
- dma);
- ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
- vmw_dmabuf_unreference(&dma_buf);
- ttm_base_object_unref(&buffer_base);
- if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
- ret != -EBUSY)) {
- DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
- (unsigned int) arg->handle);
- return ret;
- }
- break;
- case drm_vmw_synccpu_release:
- ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
- arg->flags);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
- (unsigned int) arg->handle);
- return ret;
- }
- break;
- default:
- DRM_ERROR("Invalid synccpu operation.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- union drm_vmw_alloc_dmabuf_arg *arg =
- (union drm_vmw_alloc_dmabuf_arg *)data;
- struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
- struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_dma_buffer *dma_buf;
- uint32_t handle;
- int ret;
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- req->size, false, &handle, &dma_buf,
- NULL);
- if (unlikely(ret != 0))
- goto out_no_dmabuf;
-
- rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
- rep->cur_gmr_id = handle;
- rep->cur_gmr_offset = 0;
-
- vmw_dmabuf_unreference(&dma_buf);
-
-out_no_dmabuf:
- ttm_read_unlock(&dev_priv->reservation_sem);
-
- return ret;
-}
-
-int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_unref_dmabuf_arg *arg =
- (struct drm_vmw_unref_dmabuf_arg *)data;
-
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- arg->handle,
- TTM_REF_USAGE);
-}
-
-int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_dma_buffer **out,
- struct ttm_base_object **p_base)
-{
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_base_object *base;
-
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL)) {
- pr_err("Invalid buffer object handle 0x%08lx\n",
- (unsigned long)handle);
- return -ESRCH;
- }
-
- if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
- ttm_base_object_unref(&base);
- pr_err("Invalid buffer object handle 0x%08lx\n",
- (unsigned long)handle);
- return -EINVAL;
- }
-
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
- prime.base);
- (void)ttm_bo_reference(&vmw_user_bo->dma.base);
- if (p_base)
- *p_base = base;
- else
- ttm_base_object_unref(&base);
- *out = &vmw_user_bo->dma;
-
- return 0;
-}
-
-int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
- struct vmw_dma_buffer *dma_buf,
- uint32_t *handle)
-{
- struct vmw_user_dma_buffer *user_bo;
-
- if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
- return -EINVAL;
-
- user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
-
- *handle = user_bo->prime.base.hash.key;
- return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL, false);
-}
-
-/**
- * vmw_dumb_create - Create a dumb kms buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @args: Pointer to a struct drm_mode_create_dumb structure
- *
- * This is a driver callback for the core drm create_dumb functionality.
- * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
- * that the arguments have a different format.
- */
-int vmw_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_dma_buffer *dma_buf;
- int ret;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- args->size, false, &args->handle,
- &dma_buf, NULL);
- if (unlikely(ret != 0))
- goto out_no_dmabuf;
-
- vmw_dmabuf_unreference(&dma_buf);
-out_no_dmabuf:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
- * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * @offset: The address space offset returned.
- *
- * This is a driver callback for the core drm dumb_map_offset functionality.
- */
-int vmw_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_dma_buffer *out_buf;
- int ret;
-
- ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
- if (ret != 0)
- return -EINVAL;
-
- *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
- vmw_dmabuf_unreference(&out_buf);
- return 0;
-}
-
-/**
- * vmw_dumb_destroy - Destroy a dumb boffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- *
- * This is a driver callback for the core drm dumb_destroy functionality.
- */
-int vmw_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
-}
-
-/**
* vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
*
* @res: The resource for which to allocate a backup buffer.
@@ -829,7 +303,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
{
unsigned long size =
(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
- struct vmw_dma_buffer *backup;
+ struct vmw_buffer_object *backup;
int ret;
if (likely(res->backup)) {
@@ -841,16 +315,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
if (unlikely(!backup))
return -ENOMEM;
- ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+ ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
res->func->backup_placement,
interruptible,
- &vmw_dmabuf_bo_free);
+ &vmw_bo_bo_free);
if (unlikely(ret != 0))
- goto out_no_dmabuf;
+ goto out_no_bo;
res->backup = backup;
-out_no_dmabuf:
+out_no_bo:
return ret;
}
@@ -919,7 +393,7 @@ out_bind_failed:
*/
void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup,
- struct vmw_dma_buffer *new_backup,
+ struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset)
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -931,11 +405,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base);
list_del_init(&res->mob_head);
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
}
if (new_backup) {
- res->backup = vmw_dmabuf_reference(new_backup);
+ res->backup = vmw_bo_reference(new_backup);
lockdep_assert_held(&new_backup->base.resv->lock.base);
list_add_tail(&res->mob_head, &new_backup->res_list);
} else {
@@ -959,6 +433,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* for a resource and in that case, allocate
* one, reserve and validate it.
*
+ * @ticket: The ww aqcquire context to use, or NULL if trylocking.
* @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
@@ -966,7 +441,8 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* reserved and validated backup buffer.
*/
static int
-vmw_resource_check_buffer(struct vmw_resource *res,
+vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
+ struct vmw_resource *res,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
@@ -985,7 +461,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -1003,11 +479,11 @@ vmw_resource_check_buffer(struct vmw_resource *res,
return 0;
out_no_validate:
- ttm_eu_backoff_reservation(NULL, &val_list);
+ ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
if (backup_dirty)
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
return ret;
}
@@ -1050,10 +526,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
* vmw_resource_backoff_reservation - Unreserve and unreference a
* backup buffer
*.
+ * @ticket: The ww acquire ctx used for reservation.
* @val_buf: Backup buffer information.
*/
static void
-vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
+ struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
@@ -1062,7 +540,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
- ttm_eu_backoff_reservation(NULL, &val_list);
+ ttm_eu_backoff_reservation(ticket, &val_list);
ttm_bo_unref(&val_buf->bo);
}
@@ -1070,10 +548,12 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
* vmw_resource_do_evict - Evict a resource, and transfer its data
* to a backup buffer.
*
+ * @ticket: The ww acquire ticket to use, or NULL if trylocking.
* @res: The resource to evict.
* @interruptible: Whether to wait interruptible.
*/
-static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
+ struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
@@ -1083,7 +563,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
val_buf.bo = NULL;
val_buf.shared = false;
- ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
+ ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -1098,7 +578,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
res->backup_dirty = true;
res->res_dirty = false;
out_no_unbind:
- vmw_resource_backoff_reservation(&val_buf);
+ vmw_resource_backoff_reservation(ticket, &val_buf);
return ret;
}
@@ -1152,7 +632,8 @@ int vmw_resource_validate(struct vmw_resource *res)
write_unlock(&dev_priv->resource_lock);
- ret = vmw_resource_do_evict(evict_res, true);
+ /* Trylock backup buffers with a NULL ticket. */
+ ret = vmw_resource_do_evict(NULL, evict_res, true);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
@@ -1171,7 +652,7 @@ int vmw_resource_validate(struct vmw_resource *res)
goto out_no_validate;
else if (!res->func->needs_backup && res->backup) {
list_del_init(&res->mob_head);
- vmw_dmabuf_unreference(&res->backup);
+ vmw_bo_unreference(&res->backup);
}
return 0;
@@ -1180,109 +661,39 @@ out_no_validate:
return ret;
}
-/**
- * vmw_fence_single_bo - Utility function to fence a single TTM buffer
- * object without unreserving it.
- *
- * @bo: Pointer to the struct ttm_buffer_object to fence.
- * @fence: Pointer to the fence. If NULL, this function will
- * insert a fence into the command stream..
- *
- * Contrary to the ttm_eu version of this function, it takes only
- * a single buffer object instead of a list, and it also doesn't
- * unreserve the buffer object, which needs to be done separately.
- */
-void vmw_fence_single_bo(struct ttm_buffer_object *bo,
- struct vmw_fence_obj *fence)
-{
- struct ttm_bo_device *bdev = bo->bdev;
-
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
-
- if (fence == NULL) {
- vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- reservation_object_add_excl_fence(bo->resv, &fence->base);
- dma_fence_put(&fence->base);
- } else
- reservation_object_add_excl_fence(bo->resv, &fence->base);
-}
/**
- * vmw_resource_move_notify - TTM move_notify_callback
+ * vmw_resource_unbind_list
*
- * @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
- * region the move is taking place.
+ * @vbo: Pointer to the current backing MOB.
*
* Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory.
- * Note that this function should not race with the resource
- * validation code as long as it accesses only members of struct
- * resource that remain static while bo::res is !NULL and
- * while we have @bo reserved. struct resource::backup is *not* a
- * static member. The resource validation code will take care
- * to set @bo::res to NULL, while having @bo reserved when the
- * buffer is no longer bound to the resource, so @bo:res can be
- * used to determine whether there is a need to unbind and whether
- * it is safe to unbind.
+ * Note that this function will not race with the resource
+ * validation code, since resource validation and eviction
+ * both require the backup buffer to be reserved.
*/
-void vmw_resource_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
{
- struct vmw_dma_buffer *dma_buf;
-
- if (mem == NULL)
- return;
-
- if (bo->destroy != vmw_dmabuf_bo_free &&
- bo->destroy != vmw_user_dmabuf_destroy)
- return;
-
- dma_buf = container_of(bo, struct vmw_dma_buffer, base);
-
- /*
- * Kill any cached kernel maps before move. An optimization could
- * be to do this iff source or destination memory type is VRAM.
- */
- vmw_dma_buffer_unmap(dma_buf);
- if (mem->mem_type != VMW_PL_MOB) {
- struct vmw_resource *res, *n;
- struct ttm_validate_buffer val_buf;
+ struct vmw_resource *res, *next;
+ struct ttm_validate_buffer val_buf = {
+ .bo = &vbo->base,
+ .shared = false
+ };
- val_buf.bo = bo;
- val_buf.shared = false;
+ lockdep_assert_held(&vbo->base.resv->lock.base);
+ list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
+ if (!res->func->unbind)
+ continue;
- list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
-
- if (unlikely(res->func->unbind == NULL))
- continue;
-
- (void) res->func->unbind(res, true, &val_buf);
- res->backup_dirty = true;
- res->res_dirty = false;
- list_del_init(&res->mob_head);
- }
-
- (void) ttm_bo_wait(bo, false, false);
+ (void) res->func->unbind(res, true, &val_buf);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+ list_del_init(&res->mob_head);
}
-}
-
-
-/**
- * vmw_resource_swap_notify - swapout notify callback.
- *
- * @bo: The buffer object to be swapped out.
- */
-void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
-{
- if (bo->destroy != vmw_dmabuf_bo_free &&
- bo->destroy != vmw_user_dmabuf_destroy)
- return;
- /* Kill any cached kernel maps before swapout */
- vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
+ (void) ttm_bo_wait(&vbo->base, false, false);
}
@@ -1294,7 +705,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
* Read back cached states from the device if they exist. This function
* assumings binding_mutex is held.
*/
-int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
{
struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv;
@@ -1344,7 +755,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
- struct vmw_dma_buffer *dx_query_mob;
+ struct vmw_buffer_object *dx_query_mob;
struct ttm_bo_device *bdev = bo->bdev;
struct vmw_private *dev_priv;
@@ -1353,7 +764,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
mutex_lock(&dev_priv->binding_mutex);
- dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+ dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex);
return;
@@ -1368,7 +779,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
/* Create a fence and attach the BO to it */
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_fence_single_bo(bo, fence);
+ vmw_bo_fence_single(bo, fence);
if (fence != NULL)
vmw_fence_obj_unreference(&fence);
@@ -1405,6 +816,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
struct vmw_resource *evict_res;
unsigned err_count = 0;
int ret;
+ struct ww_acquire_ctx ticket;
do {
write_lock(&dev_priv->resource_lock);
@@ -1418,7 +830,8 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
- ret = vmw_resource_do_evict(evict_res, false);
+ /* Wait lock backup buffers with a ticket. */
+ ret = vmw_resource_do_evict(&ticket, evict_res, false);
if (unlikely(ret != 0)) {
write_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
@@ -1481,7 +894,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
goto out_no_reserve;
if (res->pin_count == 0) {
- struct vmw_dma_buffer *vbo = NULL;
+ struct vmw_buffer_object *vbo = NULL;
if (res->backup) {
vbo = res->backup;
@@ -1539,7 +952,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
WARN_ON(res->pin_count == 0);
if (--res->pin_count == 0 && res->backup) {
- struct vmw_dma_buffer *vbo = res->backup;
+ struct vmw_buffer_object *vbo = res->backup;
(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index ac05968a832b..a8c1c5ebd71d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2014 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 3d667e903beb..ad0de7f0cd60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit {
SVGAFifoCmdBlitScreenToGMRFB body;
};
-struct vmw_kms_sou_dmabuf_blit {
+struct vmw_kms_sou_bo_blit {
uint32 header;
SVGAFifoCmdBlitGMRFBToScreen body;
};
@@ -83,7 +83,7 @@ struct vmw_screen_object_unit {
struct vmw_display_unit base;
unsigned long buffer_size; /**< Size of allocated buffer */
- struct vmw_dma_buffer *buffer; /**< Backing store buffer */
+ struct vmw_buffer_object *buffer; /**< Backing store buffer */
bool defined;
};
@@ -109,7 +109,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
*/
static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou,
- uint32_t x, uint32_t y,
+ int x, int y,
struct drm_display_mode *mode)
{
size_t fifo_size;
@@ -139,13 +139,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
cmd->obj.size.width = mode->hdisplay;
cmd->obj.size.height = mode->vdisplay;
- if (sou->base.is_implicit) {
- cmd->obj.root.x = x;
- cmd->obj.root.y = y;
- } else {
- cmd->obj.root.x = sou->base.gui_x;
- cmd->obj.root.y = sou->base.gui_y;
- }
+ cmd->obj.root.x = x;
+ cmd->obj.root.y = y;
sou->base.set_gui_x = cmd->obj.root.x;
sou->base.set_gui_y = cmd->obj.root.y;
@@ -222,12 +217,11 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct vmw_plane_state *vps;
int ret;
-
- sou = vmw_crtc_to_sou(crtc);
+ sou = vmw_crtc_to_sou(crtc);
dev_priv = vmw_priv(crtc->dev);
- ps = crtc->primary->state;
- fb = ps->fb;
- vps = vmw_plane_state_to_vps(ps);
+ ps = crtc->primary->state;
+ fb = ps->fb;
+ vps = vmw_plane_state_to_vps(ps);
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
@@ -240,11 +234,25 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
if (vfb) {
- sou->buffer = vps->dmabuf;
- sou->buffer_size = vps->dmabuf_size;
+ struct drm_connector_state *conn_state;
+ struct vmw_connector_state *vmw_conn_state;
+ int x, y;
+
+ sou->buffer = vps->bo;
+ sou->buffer_size = vps->bo_size;
+
+ if (sou->base.is_implicit) {
+ x = crtc->x;
+ y = crtc->y;
+ } else {
+ conn_state = sou->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
+ }
- ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
- &crtc->mode);
+ ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y);
@@ -408,10 +416,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc ?
plane->state->crtc : old_state->crtc;
- if (vps->dmabuf)
- vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
- vmw_dmabuf_unreference(&vps->dmabuf);
- vps->dmabuf_size = 0;
+ if (vps->bo)
+ vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
+ vmw_bo_unreference(&vps->bo);
+ vps->bo_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state);
}
@@ -440,8 +448,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
if (!new_fb) {
- vmw_dmabuf_unreference(&vps->dmabuf);
- vps->dmabuf_size = 0;
+ vmw_bo_unreference(&vps->bo);
+ vps->bo_size = 0;
return 0;
}
@@ -449,22 +457,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev);
- if (vps->dmabuf) {
- if (vps->dmabuf_size == size) {
+ if (vps->bo) {
+ if (vps->bo_size == size) {
/*
* Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called.
*/
- return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
+ return vmw_bo_pin_in_vram(dev_priv, vps->bo,
true);
}
- vmw_dmabuf_unreference(&vps->dmabuf);
- vps->dmabuf_size = 0;
+ vmw_bo_unreference(&vps->bo);
+ vps->bo_size = 0;
}
- vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
- if (!vps->dmabuf)
+ vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
+ if (!vps->bo)
return -ENOMEM;
vmw_svga_enable(dev_priv);
@@ -473,22 +481,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
+ ret = vmw_bo_init(dev_priv, vps->bo, size,
&vmw_vram_ne_placement,
- false, &vmw_dmabuf_bo_free);
+ false, &vmw_bo_bo_free);
vmw_overlay_resume_all(dev_priv);
if (ret) {
- vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
+ vps->bo = NULL; /* vmw_bo_init frees on error */
return ret;
}
- vps->dmabuf_size = size;
+ vps->bo_size = size;
/*
* TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped.
*/
- return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
+ return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
}
@@ -512,10 +520,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
vclips.w = crtc->mode.hdisplay;
vclips.h = crtc->mode.vdisplay;
- if (vfb->dmabuf)
- ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL,
- &vclips, 1, 1, true,
- &fence, crtc);
+ if (vfb->bo)
+ ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
+ &vclips, 1, 1, true,
+ &fence, crtc);
else
ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
&vclips, NULL, 0, 0,
@@ -527,8 +535,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
*/
if (ret != 0)
DRM_ERROR("Failed to update screen.\n");
-
- crtc->primary->fb = plane->state->fb;
} else {
/*
* When disabling a plane, CRTC and FB should always be NULL
@@ -697,7 +703,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_connector;
}
- (void) drm_mode_connector_attach_encoder(connector, encoder);
+ (void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
@@ -777,11 +783,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
return 0;
}
-static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
- struct vmw_dma_buffer *buf =
- container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ struct vmw_buffer_object *buf =
+ container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
int depth = framebuffer->base.format->depth;
struct {
@@ -972,13 +978,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
}
/**
- * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
*
* @dirty: The closure structure.
*
* Commits a previously built command buffer of readback clips.
*/
-static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{
if (!dirty->num_hits) {
vmw_fifo_commit(dirty->dev_priv, 0);
@@ -986,20 +992,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
}
vmw_fifo_commit(dirty->dev_priv,
- sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ sizeof(struct vmw_kms_sou_bo_blit) *
dirty->num_hits);
}
/**
- * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ * vmw_sou_bo_clip - Callback to encode a readback cliprect.
*
* @dirty: The closure structure
*
* Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
*/
-static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
{
- struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+ struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
blit += dirty->num_hits;
blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
@@ -1014,10 +1020,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
*
* @dev_priv: Pointer to the device private structure.
- * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @framebuffer: Pointer to the buffer-object backed framebuffer.
* @clips: Array of clip rects.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
@@ -1027,12 +1033,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
- * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only.
+ * @crtc: If crtc is passed, perform bo dirty on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
@@ -1041,8 +1047,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc)
{
- struct vmw_dma_buffer *buf =
- container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+ struct vmw_buffer_object *buf =
+ container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
@@ -1052,14 +1058,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
if (ret)
return ret;
- ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+ ret = do_bo_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0))
goto out_revert;
dirty.crtc = crtc;
- dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
- dirty.clip = vmw_sou_dmabuf_clip;
- dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+ dirty.fifo_commit = vmw_sou_bo_fifo_commit;
+ dirty.clip = vmw_sou_bo_clip;
+ dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty);
@@ -1118,12 +1124,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
/**
* vmw_kms_sou_readback - Perform a readback from the screen object system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
@@ -1141,8 +1147,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
uint32_t num_clips,
struct drm_crtc *crtc)
{
- struct vmw_dma_buffer *buf =
- container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_buffer_object *buf =
+ container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
@@ -1151,7 +1157,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
if (ret)
return ret;
- ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+ ret = do_bo_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0))
goto out_revert;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 73b8e9a16368..fe4842ca3b6e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
SVGA3dShaderType type,
uint8_t num_input_sig,
uint8_t num_output_sig,
- struct vmw_dma_buffer *byte_code,
+ struct vmw_buffer_object *byte_code,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_shader *shader = vmw_res_to_shader(res);
@@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
res->backup_size = size;
if (byte_code) {
- res->backup = vmw_dmabuf_reference(byte_code);
+ res->backup = vmw_bo_reference(byte_code);
res->backup_offset = offset;
}
shader->size = size;
@@ -306,7 +306,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -537,7 +537,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
}
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
+ struct vmw_buffer_object *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
@@ -801,7 +801,7 @@ out:
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
+ struct vmw_buffer_object *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type)
@@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_dma_buffer *buffer = NULL;
+ struct vmw_buffer_object *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
if (buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
+ ret = vmw_user_bo_lookup(tfile, buffer_handle,
&buffer, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader "
@@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
- vmw_dmabuf_unreference(&buffer);
+ vmw_bo_unreference(&buffer);
return ret;
}
@@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct list_head *list)
{
struct ttm_operation_ctx ctx = { false, true };
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
int ret;
@@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(!buf))
return -ENOMEM;
- ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
- true, vmw_dmabuf_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
+ true, vmw_bo_bo_free);
if (unlikely(ret != 0))
goto out;
@@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
res, list);
vmw_resource_unreference(&res);
no_reserve:
- vmw_dmabuf_unreference(&buf);
+ vmw_bo_unreference(&buf);
out:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index a0cb310665cc..6ebc5affde14 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index d3573c37c436..e9b6b7baa009 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index 268738387b5e..b80c7252f2fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 67331f01ef32..93f6b96ca7bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
*
- * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * COPYRIGHT (C) 2014-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -44,7 +44,7 @@
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
SEPARATE_SURFACE,
- SEPARATE_DMA
+ SEPARATE_BO
};
/**
@@ -58,7 +58,7 @@ enum stdu_content_type {
* @bottom: Bottom side of bounding box.
* @fb_left: Left side of the framebuffer/content bounding box
* @fb_top: Top of the framebuffer/content bounding box
- * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @buf: buffer object when DMA-ing between buffer and screen targets.
* @sid: Surface ID when copying between surface and screen targets.
*/
struct vmw_stdu_dirty {
@@ -68,7 +68,7 @@ struct vmw_stdu_dirty {
s32 fb_left, fb_top;
u32 pitch;
union {
- struct vmw_dma_buffer *buf;
+ struct vmw_buffer_object *buf;
u32 sid;
};
};
@@ -178,13 +178,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
cmd->body.height = mode->vdisplay;
cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
cmd->body.dpi = 0;
- if (stdu->base.is_implicit) {
- cmd->body.xRoot = crtc_x;
- cmd->body.yRoot = crtc_y;
- } else {
- cmd->body.xRoot = stdu->base.gui_x;
- cmd->body.yRoot = stdu->base.gui_y;
- }
+ cmd->body.xRoot = crtc_x;
+ cmd->body.yRoot = crtc_y;
+
stdu->base.set_gui_x = cmd->body.xRoot;
stdu->base.set_gui_y = cmd->body.yRoot;
@@ -374,11 +370,14 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
- int ret;
-
+ struct drm_connector_state *conn_state;
+ struct vmw_connector_state *vmw_conn_state;
+ int x, y, ret;
- stdu = vmw_crtc_to_stdu(crtc);
+ stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
+ conn_state = stdu->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
if (stdu->defined) {
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
@@ -397,8 +396,16 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!crtc->state->enable)
return;
+ if (stdu->base.is_implicit) {
+ x = crtc->x;
+ y = crtc->y;
+ } else {
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
+ }
+
vmw_svga_enable(dev_priv);
- ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, crtc->x, crtc->y);
+ ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
if (ret)
DRM_ERROR("Failed to define Screen Target of size %dx%d\n",
@@ -414,6 +421,7 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct drm_plane_state *plane_state = crtc->primary->state;
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
struct vmw_framebuffer *vfb;
@@ -422,7 +430,7 @@ static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- fb = crtc->primary->fb;
+ fb = plane_state->fb;
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
@@ -507,14 +515,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
/**
- * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
*
* @dirty: The closure structure.
*
* Encodes a surface DMA command cliprect and updates the bounding box
* for the DMA.
*/
-static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -542,14 +550,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
*
* @dirty: The closure structure.
*
* Fills in the missing fields in a DMA command, and optionally encodes
* a screen target update command, depending on transfer direction.
*/
-static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -593,13 +601,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
/**
- * vmw_stdu_dmabuf_cpu_clip - Callback to encode a CPU blit
+ * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
*
* @dirty: The closure structure.
*
* This function calculates the bounding box for all the incoming clips.
*/
-static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -623,14 +631,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
/**
- * vmw_stdu_dmabuf_cpu_commit - Callback to do a CPU blit from DMAbuf
+ * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object
*
* @dirty: The closure structure.
*
* For the special case when we cannot create a proxy surface in a
* 2D VM, we have to do a CPU blit ourselves.
*/
-static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
@@ -651,7 +659,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
if (width == 0 || height == 0)
return;
- /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */
+ /* Assume we are blitting from Guest (bo) to Host (display_srf) */
dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
dst_bo = &stdu->display_srf->res.backup->base;
dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
@@ -711,13 +719,13 @@ out_cleanup:
}
/**
- * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
* framebuffer and the screen target system.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm-file identifying the caller. May be
* set to NULL, but then @user_fence_rep must also be set to NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
* @clips: Array of clip rects. Either @clips or @vclips must be NULL.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
@@ -746,8 +754,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
bool interruptible,
struct drm_crtc *crtc)
{
- struct vmw_dma_buffer *buf =
- container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+ struct vmw_buffer_object *buf =
+ container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_stdu_dirty ddirty;
int ret;
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
@@ -769,8 +777,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
ddirty.fb_left = ddirty.fb_top = S32_MAX;
ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf;
- ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
- ddirty.base.clip = vmw_stdu_dmabuf_clip;
+ ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
+ ddirty.base.clip = vmw_stdu_bo_clip;
ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
num_clips * sizeof(SVGA3dCopyBox) +
sizeof(SVGA3dCmdSurfaceDMASuffix);
@@ -779,8 +787,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
if (cpu_blit) {
- ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit;
- ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip;
+ ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
+ ddirty.base.clip = vmw_stdu_bo_cpu_clip;
ddirty.base.fifo_reserve_size = 0;
}
@@ -926,7 +934,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (ret)
return ret;
- if (vfbs->is_dmabuf_proxy) {
+ if (vfbs->is_bo_proxy) {
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
if (ret)
goto out_finish;
@@ -1074,7 +1082,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
* @new_state: info on the new plane state, including the FB
*
* This function allocates a new display surface if the content is
- * backed by a DMA. The display surface is pinned here, and it'll
+ * backed by a buffer object. The display surface is pinned here, and it'll
* be unpinned in .cleanup_fb()
*
* Returns 0 on success
@@ -1104,13 +1112,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
}
vfb = vmw_framebuffer_to_vfb(new_fb);
- new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+ new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
new_vfbs->surface->base_size.height == vdisplay)
new_content_type = SAME_AS_DISPLAY;
- else if (vfb->dmabuf)
- new_content_type = SEPARATE_DMA;
+ else if (vfb->bo)
+ new_content_type = SEPARATE_BO;
else
new_content_type = SEPARATE_SURFACE;
@@ -1123,10 +1131,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
display_base_size.depth = 1;
/*
- * If content buffer is a DMA buf, then we have to construct
- * surface info
+ * If content buffer is a buffer object, then we have to
+ * construct surface info
*/
- if (new_content_type == SEPARATE_DMA) {
+ if (new_content_type == SEPARATE_BO) {
switch (new_fb->format->cpp[0]*8) {
case 32:
@@ -1149,6 +1157,9 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
content_srf.flags = 0;
content_srf.mip_levels[0] = 1;
content_srf.multisample_count = 0;
+ content_srf.multisample_pattern =
+ SVGA3D_MS_PATTERN_NONE;
+ content_srf.quality_level = SVGA3D_MS_QUALITY_NONE;
} else {
content_srf = *new_vfbs->surface;
}
@@ -1177,6 +1188,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
content_srf.multisample_count,
0,
display_base_size,
+ content_srf.multisample_pattern,
+ content_srf.quality_level,
&vps->surf);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
@@ -1211,12 +1224,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
vps->content_fb_type = new_content_type;
/*
- * This should only happen if the DMA buf is too large to create a
+ * This should only happen if the buffer object is too large to create a
* proxy surface for.
- * If we are a 2D VM with a DMA buffer then we have to use CPU blit
+ * If we are a 2D VM with a buffer object then we have to use CPU blit
* so cache these mappings
*/
- if (vps->content_fb_type == SEPARATE_DMA &&
+ if (vps->content_fb_type == SEPARATE_BO &&
!(dev_priv->capabilities & SVGA_CAP_3D))
vps->cpp = new_fb->pitches[0] / new_fb->width;
@@ -1275,7 +1288,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
if (ret)
DRM_ERROR("Failed to bind surface to STDU.\n");
- if (vfb->dmabuf)
+ if (vfb->bo)
ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
&vclips, 1, 1, true, false,
crtc);
@@ -1285,8 +1298,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1, 1, NULL, crtc);
if (ret)
DRM_ERROR("Failed to update STDU.\n");
-
- crtc->primary->fb = plane->state->fb;
} else {
crtc = old_state->crtc;
stdu = vmw_crtc_to_stdu(crtc);
@@ -1487,7 +1498,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_connector;
}
- (void) drm_mode_connector_attach_encoder(connector, encoder);
+ (void) drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index b236c48bf265..e125233e074b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -33,6 +33,10 @@
#include "vmwgfx_binding.h"
#include "device_include/svga3d_surfacedefs.h"
+#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
+#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
+#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+ (svga3d_flags & ((uint64_t)U32_MAX))
/**
* struct vmw_user_surface - User-space visible surface resource
@@ -81,7 +85,16 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_surface_destroy(struct vmw_resource *res);
-
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+ struct drm_vmw_gb_surface_create_ext_req *req,
+ struct drm_vmw_gb_surface_create_rep *rep,
+ struct drm_file *file_priv);
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+ struct drm_vmw_surface_arg *req,
+ struct drm_vmw_gb_surface_ref_ext_rep *rep,
+ struct drm_file *file_priv);
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
@@ -224,7 +237,12 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
+ /*
+ * Downcast of surfaceFlags, was upcasted when received from user-space,
+ * since driver internally stores as 64 bit.
+ * For legacy surface define only 32 bit flag is supported.
+ */
+ cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
cmd->body.format = srf->format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
@@ -468,7 +486,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -760,7 +778,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf = &user_srf->srf;
res = &srf->res;
- srf->flags = req->flags;
+ /* Driver internally stores as 64-bit flags */
+ srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
srf->format = req->format;
srf->scanout = req->scanout;
@@ -785,6 +804,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->multisample_count = 0;
+ srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ srf->quality_level = SVGA3D_MS_QUALITY_NONE;
cur_bo_offset = 0;
cur_offset = srf->offsets;
@@ -842,12 +863,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
- ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
- res->backup_size,
- true,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
+ ret = vmw_user_bo_alloc(dev_priv, tfile,
+ res->backup_size,
+ true,
+ &backup_handle,
+ &res->backup,
+ &user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
@@ -990,7 +1011,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- rep->flags = srf->flags;
+ /* Downcast of flags when sending back to user space */
+ rep->flags = (uint32_t)srf->flags;
rep->format = srf->format;
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
@@ -1031,6 +1053,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface_v2 body;
} *cmd2;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface_v3 body;
+ } *cmd3;
if (likely(res->id != -1))
return 0;
@@ -1047,7 +1073,11 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- if (srf->array_size > 0) {
+ if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
+ cmd_len = sizeof(cmd3->body);
+ submit_len = sizeof(*cmd3);
+ } else if (srf->array_size > 0) {
/* has_dx checked on creation time. */
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
cmd_len = sizeof(cmd2->body);
@@ -1060,6 +1090,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = vmw_fifo_reserve(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
+ cmd3 = (typeof(cmd3))cmd;
if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
@@ -1067,12 +1098,27 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- if (srf->array_size > 0) {
+ if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ cmd3->header.id = cmd_id;
+ cmd3->header.size = cmd_len;
+ cmd3->body.sid = srf->res.id;
+ cmd3->body.surfaceFlags = srf->flags;
+ cmd3->body.format = srf->format;
+ cmd3->body.numMipLevels = srf->mip_levels[0];
+ cmd3->body.multisampleCount = srf->multisample_count;
+ cmd3->body.multisamplePattern = srf->multisample_pattern;
+ cmd3->body.qualityLevel = srf->quality_level;
+ cmd3->body.autogenFilter = srf->autogen_filter;
+ cmd3->body.size.width = srf->base_size.width;
+ cmd3->body.size.height = srf->base_size.height;
+ cmd3->body.size.depth = srf->base_size.depth;
+ cmd3->body.arraySize = srf->array_size;
+ } else if (srf->array_size > 0) {
cmd2->header.id = cmd_id;
cmd2->header.size = cmd_len;
cmd2->body.sid = srf->res.id;
cmd2->body.surfaceFlags = srf->flags;
- cmd2->body.format = cpu_to_le32(srf->format);
+ cmd2->body.format = srf->format;
cmd2->body.numMipLevels = srf->mip_levels[0];
cmd2->body.multisampleCount = srf->multisample_count;
cmd2->body.autogenFilter = srf->autogen_filter;
@@ -1085,7 +1131,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
+ cmd->body.format = srf->format;
cmd->body.numMipLevels = srf->mip_levels[0];
cmd->body.multisampleCount = srf->multisample_count;
cmd->body.autogenFilter = srf->autogen_filter;
@@ -1210,7 +1256,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
- vmw_fence_single_bo(val_buf->bo, fence);
+ vmw_bo_fence_single(val_buf->bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
@@ -1256,194 +1302,55 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
/**
* vmw_gb_surface_define_ioctl - Ioctl function implementing
- * the user surface define functionality.
+ * the user surface define functionality.
*
- * @dev: Pointer to a struct drm_device.
- * @data: Pointer to data copied from / to user-space.
- * @file_priv: Pointer to a drm file private structure.
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
- struct vmw_surface *srf;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
union drm_vmw_gb_surface_create_arg *arg =
(union drm_vmw_gb_surface_create_arg *)data;
- struct drm_vmw_gb_surface_create_req *req = &arg->req;
struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret;
- uint32_t size;
- uint32_t backup_handle = 0;
-
- if (req->multisample_count != 0)
- return -EINVAL;
-
- if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
- return -EINVAL;
+ struct drm_vmw_gb_surface_create_ext_req req_ext;
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
-
- size = vmw_user_surface_size + 128;
-
- /* Define a surface based on the parameters. */
- ret = vmw_surface_gb_priv_define(dev,
- size,
- req->svga3d_flags,
- req->format,
- req->drm_surface_flags & drm_vmw_surface_flag_scanout,
- req->mip_levels,
- req->multisample_count,
- req->array_size,
- req->base_size,
- &srf);
- if (unlikely(ret != 0))
- return ret;
-
- user_srf = container_of(srf, struct vmw_user_surface, srf);
- if (drm_is_primary_client(file_priv))
- user_srf->master = drm_master_get(file_priv->master);
+ req_ext.base = arg->req;
+ req_ext.version = drm_vmw_gb_surface_v1;
+ req_ext.svga3d_flags_upper_32_bits = 0;
+ req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
+ req_ext.must_be_zero = 0;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- res = &user_srf->srf.res;
-
-
- if (req->buffer_handle != SVGA3D_INVALID_ID) {
- ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
- &res->backup,
- &user_srf->backup_base);
- if (ret == 0) {
- if (res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
- DRM_ERROR("Surface backup buffer is too small.\n");
- vmw_dmabuf_unreference(&res->backup);
- ret = -EINVAL;
- goto out_unlock;
- } else {
- backup_handle = req->buffer_handle;
- }
- }
- } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
- ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
- res->backup_size,
- req->drm_surface_flags &
- drm_vmw_surface_flag_shareable,
- &backup_handle,
- &res->backup,
- &user_srf->backup_base);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- tmp = vmw_resource_reference(res);
- ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
- req->drm_surface_flags &
- drm_vmw_surface_flag_shareable,
- VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- rep->handle = user_srf->prime.base.hash.key;
- rep->backup_size = res->backup_size;
- if (res->backup) {
- rep->buffer_map_handle =
- drm_vma_node_offset_addr(&res->backup->base.vma_node);
- rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
- rep->buffer_handle = backup_handle;
- } else {
- rep->buffer_map_handle = 0;
- rep->buffer_size = 0;
- rep->buffer_handle = SVGA3D_INVALID_ID;
- }
-
- vmw_resource_unreference(&res);
-
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
+ return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
}
/**
* vmw_gb_surface_reference_ioctl - Ioctl function implementing
- * the user surface reference functionality.
+ * the user surface reference functionality.
*
- * @dev: Pointer to a struct drm_device.
- * @data: Pointer to data copied from / to user-space.
- * @file_priv: Pointer to a drm file private structure.
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
*/
int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_gb_surface_reference_arg *arg =
(union drm_vmw_gb_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
- struct ttm_base_object *base;
- uint32_t backup_handle;
- int ret = -EINVAL;
+ struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
+ int ret;
+
+ ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
- ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
- req->handle_type, &base);
if (unlikely(ret != 0))
return ret;
- user_srf = container_of(base, struct vmw_user_surface, prime.base);
- srf = &user_srf->srf;
- if (!srf->res.backup) {
- DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
- goto out_bad_resource;
- }
-
- mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
- ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
- &backup_handle);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a GB surface "
- "backup buffer.\n");
- (void) ttm_ref_object_base_unref(tfile, base->hash.key,
- TTM_REF_USAGE);
- goto out_bad_resource;
- }
-
- rep->creq.svga3d_flags = srf->flags;
- rep->creq.format = srf->format;
- rep->creq.mip_levels = srf->mip_levels[0];
- rep->creq.drm_surface_flags = 0;
- rep->creq.multisample_count = srf->multisample_count;
- rep->creq.autogen_filter = srf->autogen_filter;
- rep->creq.array_size = srf->array_size;
- rep->creq.buffer_handle = backup_handle;
- rep->creq.base_size = srf->base_size;
- rep->crep.handle = user_srf->prime.base.hash.key;
- rep->crep.backup_size = srf->res.backup_size;
- rep->crep.buffer_handle = backup_handle;
- rep->crep.buffer_map_handle =
- drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
- rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
-
-out_bad_resource:
- ttm_base_object_unref(&base);
+ rep->creq = rep_ext.creq.base;
+ rep->crep = rep_ext.crep;
return ret;
}
@@ -1461,6 +1368,8 @@ out_bad_resource:
* @multisample_count:
* @array_size: Surface array size.
* @size: width, heigh, depth of the surface requested
+ * @multisample_pattern: Multisampling pattern when msaa is supported
+ * @quality_level: Precision settings
* @user_srf_out: allocated user_srf. Set to NULL on failure.
*
* GB surfaces allocated by this function will not have a user mode handle, and
@@ -1470,13 +1379,15 @@ out_bad_resource:
*/
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
- uint32_t svga3d_flags,
+ SVGA3dSurfaceAllFlags svga3d_flags,
SVGA3dSurfaceFormat format,
bool for_scanout,
uint32_t num_mip_levels,
uint32_t multisample_count,
uint32_t array_size,
struct drm_vmw_size size,
+ SVGA3dMSPattern multisample_pattern,
+ SVGA3dMSQualityLevel quality_level,
struct vmw_surface **srf_out)
{
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -1487,7 +1398,8 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
};
struct vmw_surface *srf;
int ret;
- u32 num_layers;
+ u32 num_layers = 1;
+ u32 sample_count = 1;
*srf_out = NULL;
@@ -1562,19 +1474,23 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->array_size = array_size;
srf->multisample_count = multisample_count;
+ srf->multisample_pattern = multisample_pattern;
+ srf->quality_level = quality_level;
if (array_size)
num_layers = array_size;
else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
num_layers = SVGA3D_MAX_SURFACE_FACES;
- else
- num_layers = 1;
+
+ if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
+ sample_count = srf->multisample_count;
srf->res.backup_size =
- svga3dsurface_get_serialized_size(srf->format,
- srf->base_size,
- srf->mip_levels[0],
- num_layers);
+ svga3dsurface_get_serialized_size_extended(srf->format,
+ srf->base_size,
+ srf->mip_levels[0],
+ num_layers,
+ sample_count);
if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.backup_size += sizeof(SVGA3dDXSOState);
@@ -1599,3 +1515,266 @@ out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
+
+/**
+ * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_gb_surface_create_ext_arg *arg =
+ (union drm_vmw_gb_surface_create_ext_arg *)data;
+ struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
+ struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+
+ return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_gb_surface_reference_ext_arg *arg =
+ (union drm_vmw_gb_surface_reference_ext_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
+
+ return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_define_internal - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Request argument from user-space.
+ * @rep: Response argument to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+ struct drm_vmw_gb_surface_create_ext_req *req,
+ struct drm_vmw_gb_surface_create_rep *rep,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+ uint32_t size;
+ uint32_t backup_handle = 0;
+ SVGA3dSurfaceAllFlags svga3d_flags_64 =
+ SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
+ req->base.svga3d_flags);
+
+ if (!dev_priv->has_sm4_1) {
+ /*
+ * If SM4_1 is not support then cannot send 64-bit flag to
+ * device.
+ */
+ if (req->svga3d_flags_upper_32_bits != 0)
+ return -EINVAL;
+
+ if (req->base.multisample_count != 0)
+ return -EINVAL;
+
+ if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
+ return -EINVAL;
+
+ if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
+ return -EINVAL;
+ }
+
+ if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
+ req->base.multisample_count == 0)
+ return -EINVAL;
+
+ if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ size = vmw_user_surface_size + 128;
+
+ /* Define a surface based on the parameters. */
+ ret = vmw_surface_gb_priv_define(dev,
+ size,
+ svga3d_flags_64,
+ req->base.format,
+ req->base.drm_surface_flags &
+ drm_vmw_surface_flag_scanout,
+ req->base.mip_levels,
+ req->base.multisample_count,
+ req->base.array_size,
+ req->base.base_size,
+ req->multisample_pattern,
+ req->quality_level,
+ &srf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_srf = container_of(srf, struct vmw_user_surface, srf);
+ if (drm_is_primary_client(file_priv))
+ user_srf->master = drm_master_get(file_priv->master);
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ res = &user_srf->srf.res;
+
+ if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
+ ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
+ &res->backup,
+ &user_srf->backup_base);
+ if (ret == 0) {
+ if (res->backup->base.num_pages * PAGE_SIZE <
+ res->backup_size) {
+ DRM_ERROR("Surface backup buffer too small.\n");
+ vmw_bo_unreference(&res->backup);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else {
+ backup_handle = req->base.buffer_handle;
+ }
+ }
+ } else if (req->base.drm_surface_flags &
+ drm_vmw_surface_flag_create_buffer)
+ ret = vmw_user_bo_alloc(dev_priv, tfile,
+ res->backup_size,
+ req->base.drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ &backup_handle,
+ &res->backup,
+ &user_srf->backup_base);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->base.drm_surface_flags &
+ drm_vmw_surface_flag_shareable,
+ VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->handle = user_srf->prime.base.hash.key;
+ rep->backup_size = res->backup_size;
+ if (res->backup) {
+ rep->buffer_map_handle =
+ drm_vma_node_offset_addr(&res->backup->base.vma_node);
+ rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+ rep->buffer_handle = backup_handle;
+ } else {
+ rep->buffer_map_handle = 0;
+ rep->buffer_size = 0;
+ rep->buffer_handle = SVGA3D_INVALID_ID;
+ }
+
+ vmw_resource_unreference(&res);
+
+out_unlock:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_internal - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Pointer to user-space request surface arg.
+ * @rep: Pointer to response to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+ struct drm_vmw_surface_arg *req,
+ struct drm_vmw_gb_surface_ref_ext_rep *rep,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ uint32_t backup_handle;
+ int ret = -EINVAL;
+
+ ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
+ req->handle_type, &base);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
+ srf = &user_srf->srf;
+ if (!srf->res.backup) {
+ DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+ goto out_bad_resource;
+ }
+
+ mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+ ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a GB surface "
+ "backup buffer.\n");
+ (void) ttm_ref_object_base_unref(tfile, base->hash.key,
+ TTM_REF_USAGE);
+ goto out_bad_resource;
+ }
+
+ rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
+ rep->creq.base.format = srf->format;
+ rep->creq.base.mip_levels = srf->mip_levels[0];
+ rep->creq.base.drm_surface_flags = 0;
+ rep->creq.base.multisample_count = srf->multisample_count;
+ rep->creq.base.autogen_filter = srf->autogen_filter;
+ rep->creq.base.array_size = srf->array_size;
+ rep->creq.base.buffer_handle = backup_handle;
+ rep->creq.base.base_size = srf->base_size;
+ rep->crep.handle = user_srf->prime.base.hash.key;
+ rep->crep.backup_size = srf->res.backup_size;
+ rep->crep.buffer_handle = backup_handle;
+ rep->crep.buffer_map_handle =
+ drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+ rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+ rep->creq.version = drm_vmw_gb_surface_v1;
+ rep->creq.svga3d_flags_upper_32_bits =
+ SVGA3D_FLAGS_UPPER_32(srf->flags);
+ rep->creq.multisample_pattern = srf->multisample_pattern;
+ rep->creq.quality_level = srf->quality_level;
+ rep->creq.must_be_zero = 0;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 21111fd091f9..31786b200afc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
struct ttm_object_file *tfile =
vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
- return vmw_user_dmabuf_verify_access(bo, tfile);
+ return vmw_user_bo_verify_access(bo, tfile);
}
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_mem_reg *mem)
{
- vmw_resource_move_notify(bo, mem);
+ vmw_bo_move_notify(bo, mem);
vmw_query_move_notify(bo, mem);
}
@@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
- vmw_resource_swap_notify(bo);
+ vmw_bo_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index e771091d2cd3..7b1e5a5cbd2c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
index b4162fd78600..ebc1d83c34b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2012-2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index b3786c1a4e80..6b6d5ab82ec3 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -623,7 +623,7 @@ static int displback_initwait(struct xen_drm_front_info *front_info)
if (ret < 0)
return ret;
- DRM_INFO("Have %d conector(s)\n", cfg->num_connectors);
+ DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
/* Create event channels for all connectors and publish */
ret = xen_drm_front_evtchnl_create_all(front_info);
if (ret < 0)
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index 2c2479b571ae..5693b4a4b02b 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -126,12 +126,12 @@ struct xen_drm_front_drm_info {
static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
{
- return (u64)fb;
+ return (uintptr_t)fb;
}
static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
{
- return (u64)gem_obj;
+ return (uintptr_t)gem_obj;
}
int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
index 8099cb343ae3..d333b67cc1a0 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
@@ -122,7 +122,7 @@ static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
}
#define xen_page_to_vaddr(page) \
- ((phys_addr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
+ ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
static int backend_unmap(struct xen_drm_front_shbuf *buf)
{
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index 13ea90f7a185..78655269d843 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -272,7 +272,7 @@ static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -326,7 +326,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
drm_connector_helper_add(&hdmi->connector,
&zx_hdmi_connector_helper_funcs);
- drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+ drm_connector_attach_encoder(&hdmi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index d1931f5ea0b2..ae8c53b4b261 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -446,7 +446,7 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
static void zx_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
index 0de1a71ca4e0..b73afb212fb2 100644
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -297,7 +297,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
DRM_MODE_CONNECTOR_Composite);
drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index 3e7e33cd3dfa..23d1ff4355a0 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -109,7 +109,7 @@ static int zx_vga_connector_get_modes(struct drm_connector *connector)
*/
zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_HAS_DEVICE);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -175,7 +175,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
drm_connector_helper_add(connector, &zx_vga_connector_helper_funcs);
- ret = drm_mode_connector_attach_encoder(connector, encoder);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
DRM_DEV_ERROR(dev, "failed to attach encoder: %d\n", ret);
goto clean_connector;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 48685cddbad1..474b00e19697 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -122,6 +122,8 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
return IPUV3_COLORSPACE_YUV;
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_RGB24:
@@ -190,6 +192,8 @@ int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
return (24 * pixel_stride) >> 3;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_XRGB32:
return (32 * pixel_stride) >> 3;
default:
break;
@@ -1401,6 +1405,8 @@ static int ipu_probe(struct platform_device *pdev)
return -ENODEV;
ipu->id = of_alias_get_id(np, "ipu");
+ if (ipu->id < 0)
+ ipu->id = 0;
if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
IS_ENABLED(CONFIG_DRM)) {
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 9f2d9ec42add..a9d2501500a1 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -188,6 +188,12 @@ static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
case V4L2_PIX_FMT_RGB32:
/* R G B A <=> [32:0] A:B:G:R */
return DRM_FORMAT_XBGR8888;
+ case V4L2_PIX_FMT_XBGR32:
+ /* B G R X <=> [32:0] X:R:G:B */
+ return DRM_FORMAT_XRGB8888;
+ case V4L2_PIX_FMT_XRGB32:
+ /* X R G B <=> [32:0] B:G:R:X */
+ return DRM_FORMAT_BGRX8888;
case V4L2_PIX_FMT_UYVY:
return DRM_FORMAT_UYVY;
case V4L2_PIX_FMT_YUYV:
@@ -269,9 +275,20 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
{
+ u32 ilo, sly;
+
+ if (stride < 0) {
+ stride = -stride;
+ ilo = 0x100000 - (stride / 8);
+ } else {
+ ilo = stride / 8;
+ }
+
+ sly = (stride * 2) - 1;
+
ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_ILO, stride / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_SLY, (stride * 2) - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo);
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly);
};
EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
@@ -530,17 +547,17 @@ static const struct ipu_rgb def_bgra_16 = {
#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * (y) / 4) + (x) / 2)
+ (pix->width * ((y) / 2) / 2) + (x) / 2)
#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * pix->height / 4) + \
- (pix->width * (y) / 4) + (x) / 2)
+ (pix->width * ((y) / 2) / 2) + (x) / 2)
#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * (y) / 2) + (x) / 2)
#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * pix->height / 2) + \
(pix->width * (y) / 2) + (x) / 2)
#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * (y) / 2) + (x))
+ (pix->width * ((y) / 2)) + (x))
#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * y) + (x))
@@ -776,6 +793,8 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_XBGR32:
offset = image->rect.left * 4 +
image->rect.top * pix->bytesperline;
break;
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 5450a2db1219..954eefe144e2 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -224,14 +224,18 @@ static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
* Find the CSI data format and data width for the given V4L2 media
* bus pixel format code.
*/
-static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
+static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
+ enum v4l2_mbus_type mbus_type)
{
switch (mbus_code) {
case MEDIA_BUS_FMT_BGR565_2X8_BE:
case MEDIA_BUS_FMT_BGR565_2X8_LE:
case MEDIA_BUS_FMT_RGB565_2X8_BE:
case MEDIA_BUS_FMT_RGB565_2X8_LE:
- cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+ if (mbus_type == V4L2_MBUS_CSI2)
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+ else
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RGB565;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
@@ -247,6 +251,12 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
cfg->mipi_dt = MIPI_DT_RGB555;
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
+ cfg->mipi_dt = MIPI_DT_RGB888;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ break;
case MEDIA_BUS_FMT_UYVY8_2X8:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
cfg->mipi_dt = MIPI_DT_YUV422;
@@ -318,13 +328,17 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
/*
* Fill a CSI bus config struct from mbus_config and mbus_framefmt.
*/
-static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
+static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
struct v4l2_mbus_config *mbus_cfg,
struct v4l2_mbus_framefmt *mbus_fmt)
{
+ int ret;
+
memset(csicfg, 0, sizeof(*csicfg));
- mbus_code_to_bus_cfg(csicfg, mbus_fmt->code);
+ ret = mbus_code_to_bus_cfg(csicfg, mbus_fmt->code, mbus_cfg->type);
+ if (ret < 0)
+ return ret;
switch (mbus_cfg->type) {
case V4L2_MBUS_PARALLEL:
@@ -356,6 +370,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
/* will never get here, keep compiler quiet */
break;
}
+
+ return 0;
}
int ipu_csi_init_interface(struct ipu_csi *csi,
@@ -365,8 +381,11 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 width, height, data = 0;
+ int ret;
- fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+ ret = fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+ if (ret < 0)
+ return ret;
/* set default sensor frame width and height */
width = mbus_fmt->width;
@@ -587,11 +606,14 @@ int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 temp;
+ int ret;
if (vc > 3)
return -EINVAL;
- mbus_code_to_bus_cfg(&cfg, mbus_fmt->code);
+ ret = mbus_code_to_bus_cfg(&cfg, mbus_fmt->code, V4L2_MBUS_CSI2);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&csi->lock, flags);
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 524a717ab28e..f4081962784c 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -227,6 +227,12 @@ static const struct ipu_image_pixfmt image_convert_formats[] = {
.fourcc = V4L2_PIX_FMT_BGR32,
.bpp = 32,
}, {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .bpp = 32,
+ }, {
.fourcc = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.uv_width_dec = 2,
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 0f70e8847540..2f8db9d62551 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -128,7 +128,8 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
list_for_each_entry(pre, &ipu_pre_list, list) {
if (pre_node == pre->dev->of_node) {
mutex_unlock(&ipu_pre_list_mutex);
- device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
+ device_link_add(dev, pre->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
of_node_put(pre_node);
return pre;
}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 83f9dd934a5d..38a3a9764e49 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -100,7 +100,8 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
list_for_each_entry(prg, &ipu_prg_list, list) {
if (prg_node == prg->dev->of_node) {
mutex_unlock(&ipu_prg_list_mutex);
- device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
+ device_link_add(dev, prg->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
prg->id = ipu_id;
of_node_put(prg_node);
return prg;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index fc4adf3d34e8..a96bf46bc483 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -103,9 +103,11 @@
* runtime pm. If true, writing ON and OFF to the vga_switcheroo debugfs
* interface is a no-op so as not to interfere with runtime pm
* @list: client list
+ * @vga_dev: pci device, indicate which GPU is bound to current audio client
*
* Registered client. A client can be either a GPU or an audio device on a GPU.
- * For audio clients, the @fb_info and @active members are bogus.
+ * For audio clients, the @fb_info and @active members are bogus. For GPU
+ * clients, the @vga_dev is bogus.
*/
struct vga_switcheroo_client {
struct pci_dev *pdev;
@@ -116,6 +118,7 @@ struct vga_switcheroo_client {
bool active;
bool driver_power_control;
struct list_head list;
+ struct pci_dev *vga_dev;
};
/*
@@ -161,9 +164,8 @@ struct vgasr_priv {
};
#define ID_BIT_AUDIO 0x100
-#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
-#define client_is_vga(c) ((c)->id == VGA_SWITCHEROO_UNKNOWN_ID || \
- !client_is_audio(c))
+#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
+#define client_is_vga(c) (!client_is_audio(c))
#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
@@ -192,14 +194,29 @@ static void vga_switcheroo_enable(void)
vgasr_priv.handler->init();
list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (client->id != VGA_SWITCHEROO_UNKNOWN_ID)
+ if (!client_is_vga(client) ||
+ client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
continue;
+
ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
return;
client->id = ret;
}
+
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (!client_is_audio(client) ||
+ client_id(client) != VGA_SWITCHEROO_UNKNOWN_ID)
+ continue;
+
+ ret = vgasr_priv.handler->get_client_id(client->vga_dev);
+ if (ret < 0)
+ return;
+
+ client->id = ret | ID_BIT_AUDIO;
+ }
+
vga_switcheroo_debugfs_init(&vgasr_priv);
vgasr_priv.active = true;
}
@@ -272,7 +289,9 @@ EXPORT_SYMBOL(vga_switcheroo_handler_flags);
static int register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id, bool active,
+ enum vga_switcheroo_client_id id,
+ struct pci_dev *vga_dev,
+ bool active,
bool driver_power_control)
{
struct vga_switcheroo_client *client;
@@ -287,6 +306,7 @@ static int register_client(struct pci_dev *pdev,
client->id = id;
client->active = active;
client->driver_power_control = driver_power_control;
+ client->vga_dev = vga_dev;
mutex_lock(&vgasr_mutex);
list_add_tail(&client->list, &vgasr_priv.clients);
@@ -319,7 +339,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
bool driver_power_control)
{
- return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID,
+ return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID, NULL,
pdev == vga_default_device(),
driver_power_control);
}
@@ -329,19 +349,40 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
* vga_switcheroo_register_audio_client - register audio client
* @pdev: client pci device
* @ops: client callbacks
- * @id: client identifier
+ * @vga_dev: pci device which is bound to current audio client
*
* Register audio client (audio device on a GPU). The client is assumed
* to use runtime PM. Beforehand, vga_switcheroo_client_probe_defer()
* shall be called to ensure that all prerequisites are met.
*
- * Return: 0 on success, -ENOMEM on memory allocation error.
+ * Return: 0 on success, -ENOMEM on memory allocation error, -EINVAL on getting
+ * client id error.
*/
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id)
+ struct pci_dev *vga_dev)
{
- return register_client(pdev, ops, id | ID_BIT_AUDIO, false, true);
+ enum vga_switcheroo_client_id id = VGA_SWITCHEROO_UNKNOWN_ID;
+
+ /*
+ * if vga_switcheroo has enabled, that mean two GPU clients and also
+ * handler are registered. Get audio client id from bound GPU client
+ * id directly, otherwise, set it as VGA_SWITCHEROO_UNKNOWN_ID,
+ * it will set to correct id in later when vga_switcheroo_enable()
+ * is called.
+ */
+ mutex_lock(&vgasr_mutex);
+ if (vgasr_priv.active) {
+ id = vgasr_priv.handler->get_client_id(vga_dev);
+ if (id < 0) {
+ mutex_unlock(&vgasr_mutex);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&vgasr_mutex);
+
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, vga_dev,
+ false, true);
}
EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f10840ad465c..ccf42663a908 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -937,6 +937,18 @@ config SENSORS_MCP3021
This driver can also be built as a module. If so, the module
will be called mcp3021.
+config SENSORS_MLXREG_FAN
+ tristate "Mellanox Mellanox FAN driver"
+ depends on MELLANOX_PLATFORM
+ imply THERMAL
+ select REGMAP
+ help
+ This option enables support for the FAN control on the Mellanox
+ Ethernet and InfiniBand switches. The driver can be activated by the
+ platform device add call. Say Y to enable these. To compile this
+ driver as a module, choose 'M' here: the module will be called
+ mlxreg-fan.
+
config SENSORS_TC654
tristate "Microchip TC654/TC655 and compatibles"
depends on I2C
@@ -1256,6 +1268,16 @@ config SENSORS_NCT7904
This driver can also be built as a module. If so, the module
will be called nct7904.
+config SENSORS_NPCM7XX
+ tristate "Nuvoton NPCM750 and compatible PWM and Fan controllers"
+ imply THERMAL
+ help
+ This driver provides support for Nuvoton NPCM750/730/715/705 PWM
+ and Fan controllers.
+
+ This driver can also be built as a module. If so, the module
+ will be called npcm750-pwm-fan.
+
config SENSORS_NSA320
tristate "ZyXEL NSA320 and compatible fan speed and temperature sensors"
depends on GPIOLIB && OF
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e7d52a36e6c4..842c92f83ce6 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -129,11 +129,13 @@ obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_TC654) += tc654.o
+obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o
obj-$(CONFIG_SENSORS_NCT7904) += nct7904.o
+obj-$(CONFIG_SENSORS_NPCM7XX) += npcm750-pwm-fan.o
obj-$(CONFIG_SENSORS_NSA320) += nsa320-hwmon.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 9ef84998c7f3..90837f7c7d0f 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -194,8 +194,7 @@ struct adt7475_data {
struct mutex lock;
unsigned long measure_updated;
- unsigned long limits_updated;
- char valid;
+ bool valid;
u8 config4;
u8 config5;
@@ -326,6 +325,9 @@ static ssize_t show_voltage(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
unsigned short val;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
switch (sattr->nr) {
case ALARM:
return sprintf(buf, "%d\n",
@@ -381,6 +383,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
switch (sattr->nr) {
case HYSTERSIS:
mutex_lock(&data->lock);
@@ -625,6 +630,9 @@ static ssize_t show_point2(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out, val;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
mutex_lock(&data->lock);
out = (data->range[sattr->index] >> 4) & 0x0F;
val = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
@@ -683,6 +691,9 @@ static ssize_t show_tach(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int out;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (sattr->nr == ALARM)
out = (data->alarms >> (sattr->index + 10)) & 1;
else
@@ -720,6 +731,9 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]);
}
@@ -729,6 +743,9 @@ static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwmchan[sattr->index]);
}
@@ -738,6 +755,9 @@ static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->pwmctl[sattr->index]);
}
@@ -945,6 +965,9 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
int i = clamp_val(data->range[sattr->index] & 0xf, 0,
ARRAY_SIZE(pwmfreq_table) - 1);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", pwmfreq_table[i]);
}
@@ -1035,6 +1058,10 @@ static ssize_t cpu0_vid_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
@@ -1385,6 +1412,121 @@ static void adt7475_remove_files(struct i2c_client *client,
sysfs_remove_group(&client->dev.kobj, &vid_attr_group);
}
+static int adt7475_update_limits(struct i2c_client *client)
+{
+ struct adt7475_data *data = i2c_get_clientdata(client);
+ int i;
+ int ret;
+
+ ret = adt7475_read(REG_CONFIG4);
+ if (ret < 0)
+ return ret;
+ data->config4 = ret;
+
+ ret = adt7475_read(REG_CONFIG5);
+ if (ret < 0)
+ return ret;
+ data->config5 = ret;
+
+ for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+ if (!(data->has_voltage & (1 << i)))
+ continue;
+ /* Adjust values so they match the input precision */
+ ret = adt7475_read(VOLTAGE_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[MIN][i] = ret << 2;
+
+ ret = adt7475_read(VOLTAGE_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[MAX][i] = ret << 2;
+ }
+
+ if (data->has_voltage & (1 << 5)) {
+ ret = adt7475_read(REG_VTT_MIN);
+ if (ret < 0)
+ return ret;
+ data->voltage[MIN][5] = ret << 2;
+
+ ret = adt7475_read(REG_VTT_MAX);
+ if (ret < 0)
+ return ret;
+ data->voltage[MAX][5] = ret << 2;
+ }
+
+ for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+ /* Adjust values so they match the input precision */
+ ret = adt7475_read(TEMP_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[MIN][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[MAX][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_TMIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[AUTOMIN][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_THERM_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[THERM][i] = ret << 2;
+
+ ret = adt7475_read(TEMP_OFFSET_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[OFFSET][i] = ret;
+ }
+ adt7475_read_hystersis(client);
+
+ for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+ if (i == 3 && !data->has_fan4)
+ continue;
+ ret = adt7475_read_word(client, TACH_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->tach[MIN][i] = ret;
+ }
+
+ for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+ if (i == 1 && !data->has_pwm2)
+ continue;
+ ret = adt7475_read(PWM_MAX_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[MAX][i] = ret;
+
+ ret = adt7475_read(PWM_MIN_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[MIN][i] = ret;
+ /* Set the channel and control information */
+ adt7475_read_pwm(client, i);
+ }
+
+ ret = adt7475_read(TEMP_TRANGE_REG(0));
+ if (ret < 0)
+ return ret;
+ data->range[0] = ret;
+
+ ret = adt7475_read(TEMP_TRANGE_REG(1));
+ if (ret < 0)
+ return ret;
+ data->range[1] = ret;
+
+ ret = adt7475_read(TEMP_TRANGE_REG(2));
+ if (ret < 0)
+ return ret;
+ data->range[2] = ret;
+
+ return 0;
+}
+
static int adt7475_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1562,6 +1704,11 @@ static int adt7475_probe(struct i2c_client *client,
(data->bypass_attn & (1 << 3)) ? " in3" : "",
(data->bypass_attn & (1 << 4)) ? " in4" : "");
+ /* Limits and settings, should never change update more than once */
+ ret = adt7475_update_limits(client);
+ if (ret)
+ goto eremove;
+
return 0;
eremove:
@@ -1658,121 +1805,122 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
}
}
-static struct adt7475_data *adt7475_update_device(struct device *dev)
+static int adt7475_update_measure(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7475_data *data = i2c_get_clientdata(client);
u16 ext;
int i;
+ int ret;
- mutex_lock(&data->lock);
+ ret = adt7475_read(REG_STATUS2);
+ if (ret < 0)
+ return ret;
+ data->alarms = ret << 8;
- /* Measurement values update every 2 seconds */
- if (time_after(jiffies, data->measure_updated + HZ * 2) ||
- !data->valid) {
- data->alarms = adt7475_read(REG_STATUS2) << 8;
- data->alarms |= adt7475_read(REG_STATUS1);
-
- ext = (adt7475_read(REG_EXTEND2) << 8) |
- adt7475_read(REG_EXTEND1);
- for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
- if (!(data->has_voltage & (1 << i)))
- continue;
- data->voltage[INPUT][i] =
- (adt7475_read(VOLTAGE_REG(i)) << 2) |
- ((ext >> (i * 2)) & 3);
- }
+ ret = adt7475_read(REG_STATUS1);
+ if (ret < 0)
+ return ret;
+ data->alarms |= ret;
- for (i = 0; i < ADT7475_TEMP_COUNT; i++)
- data->temp[INPUT][i] =
- (adt7475_read(TEMP_REG(i)) << 2) |
- ((ext >> ((i + 5) * 2)) & 3);
+ ret = adt7475_read(REG_EXTEND2);
+ if (ret < 0)
+ return ret;
- if (data->has_voltage & (1 << 5)) {
- data->alarms |= adt7475_read(REG_STATUS4) << 24;
- ext = adt7475_read(REG_EXTEND3);
- data->voltage[INPUT][5] = adt7475_read(REG_VTT) << 2 |
- ((ext >> 4) & 3);
- }
+ ext = (ret << 8);
- for (i = 0; i < ADT7475_TACH_COUNT; i++) {
- if (i == 3 && !data->has_fan4)
- continue;
- data->tach[INPUT][i] =
- adt7475_read_word(client, TACH_REG(i));
- }
+ ret = adt7475_read(REG_EXTEND1);
+ if (ret < 0)
+ return ret;
- /* Updated by hw when in auto mode */
- for (i = 0; i < ADT7475_PWM_COUNT; i++) {
- if (i == 1 && !data->has_pwm2)
- continue;
- data->pwm[INPUT][i] = adt7475_read(PWM_REG(i));
- }
+ ext |= ret;
+
+ for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+ if (!(data->has_voltage & (1 << i)))
+ continue;
+ ret = adt7475_read(VOLTAGE_REG(i));
+ if (ret < 0)
+ return ret;
+ data->voltage[INPUT][i] =
+ (ret << 2) |
+ ((ext >> (i * 2)) & 3);
+ }
- if (data->has_vid)
- data->vid = adt7475_read(REG_VID) & 0x3f;
+ for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+ ret = adt7475_read(TEMP_REG(i));
+ if (ret < 0)
+ return ret;
+ data->temp[INPUT][i] =
+ (ret << 2) |
+ ((ext >> ((i + 5) * 2)) & 3);
+ }
- data->measure_updated = jiffies;
+ if (data->has_voltage & (1 << 5)) {
+ ret = adt7475_read(REG_STATUS4);
+ if (ret < 0)
+ return ret;
+ data->alarms |= ret << 24;
+
+ ret = adt7475_read(REG_EXTEND3);
+ if (ret < 0)
+ return ret;
+ ext = ret;
+
+ ret = adt7475_read(REG_VTT);
+ if (ret < 0)
+ return ret;
+ data->voltage[INPUT][5] = ret << 2 |
+ ((ext >> 4) & 3);
}
- /* Limits and settings, should never change update every 60 seconds */
- if (time_after(jiffies, data->limits_updated + HZ * 60) ||
- !data->valid) {
- data->config4 = adt7475_read(REG_CONFIG4);
- data->config5 = adt7475_read(REG_CONFIG5);
-
- for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
- if (!(data->has_voltage & (1 << i)))
- continue;
- /* Adjust values so they match the input precision */
- data->voltage[MIN][i] =
- adt7475_read(VOLTAGE_MIN_REG(i)) << 2;
- data->voltage[MAX][i] =
- adt7475_read(VOLTAGE_MAX_REG(i)) << 2;
- }
+ for (i = 0; i < ADT7475_TACH_COUNT; i++) {
+ if (i == 3 && !data->has_fan4)
+ continue;
+ ret = adt7475_read_word(client, TACH_REG(i));
+ if (ret < 0)
+ return ret;
+ data->tach[INPUT][i] = ret;
+ }
- if (data->has_voltage & (1 << 5)) {
- data->voltage[MIN][5] = adt7475_read(REG_VTT_MIN) << 2;
- data->voltage[MAX][5] = adt7475_read(REG_VTT_MAX) << 2;
- }
+ /* Updated by hw when in auto mode */
+ for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+ if (i == 1 && !data->has_pwm2)
+ continue;
+ ret = adt7475_read(PWM_REG(i));
+ if (ret < 0)
+ return ret;
+ data->pwm[INPUT][i] = ret;
+ }
- for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
- /* Adjust values so they match the input precision */
- data->temp[MIN][i] =
- adt7475_read(TEMP_MIN_REG(i)) << 2;
- data->temp[MAX][i] =
- adt7475_read(TEMP_MAX_REG(i)) << 2;
- data->temp[AUTOMIN][i] =
- adt7475_read(TEMP_TMIN_REG(i)) << 2;
- data->temp[THERM][i] =
- adt7475_read(TEMP_THERM_REG(i)) << 2;
- data->temp[OFFSET][i] =
- adt7475_read(TEMP_OFFSET_REG(i));
- }
- adt7475_read_hystersis(client);
+ if (data->has_vid) {
+ ret = adt7475_read(REG_VID);
+ if (ret < 0)
+ return ret;
+ data->vid = ret & 0x3f;
+ }
- for (i = 0; i < ADT7475_TACH_COUNT; i++) {
- if (i == 3 && !data->has_fan4)
- continue;
- data->tach[MIN][i] =
- adt7475_read_word(client, TACH_MIN_REG(i));
- }
+ return 0;
+}
- for (i = 0; i < ADT7475_PWM_COUNT; i++) {
- if (i == 1 && !data->has_pwm2)
- continue;
- data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i));
- data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i));
- /* Set the channel and control information */
- adt7475_read_pwm(client, i);
- }
+static struct adt7475_data *adt7475_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7475_data *data = i2c_get_clientdata(client);
+ int ret;
- data->range[0] = adt7475_read(TEMP_TRANGE_REG(0));
- data->range[1] = adt7475_read(TEMP_TRANGE_REG(1));
- data->range[2] = adt7475_read(TEMP_TRANGE_REG(2));
+ mutex_lock(&data->lock);
- data->limits_updated = jiffies;
- data->valid = 1;
+ /* Measurement values update every 2 seconds */
+ if (time_after(jiffies, data->measure_updated + HZ * 2) ||
+ !data->valid) {
+ ret = adt7475_update_measure(dev);
+ if (ret) {
+ data->valid = false;
+ mutex_unlock(&data->lock);
+ return ERR_PTR(ret);
+ }
+ data->measure_updated = jiffies;
+ data->valid = true;
}
mutex_unlock(&data->lock);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 1ea7ca510f84..aaebeb726d6a 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -443,8 +443,10 @@ static int emc1403_probe(struct i2c_client *client,
switch (id->driver_data) {
case emc1404:
data->groups[2] = &emc1404_group;
+ /* fall through */
case emc1403:
data->groups[1] = &emc1403_group;
+ /* fall through */
case emc1402:
data->groups[0] = &emc1402_group;
}
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index e88c01961948..33d51281272b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -394,12 +394,16 @@ static const char * const hwmon_power_attr_templates[] = {
[hwmon_power_cap_hyst] = "power%d_cap_hyst",
[hwmon_power_cap_max] = "power%d_cap_max",
[hwmon_power_cap_min] = "power%d_cap_min",
+ [hwmon_power_min] = "power%d_min",
[hwmon_power_max] = "power%d_max",
+ [hwmon_power_lcrit] = "power%d_lcrit",
[hwmon_power_crit] = "power%d_crit",
[hwmon_power_label] = "power%d_label",
[hwmon_power_alarm] = "power%d_alarm",
[hwmon_power_cap_alarm] = "power%d_cap_alarm",
+ [hwmon_power_min_alarm] = "power%d_min_alarm",
[hwmon_power_max_alarm] = "power%d_max_alarm",
+ [hwmon_power_lcrit_alarm] = "power%d_lcrit_alarm",
[hwmon_power_crit_alarm] = "power%d_crit_alarm",
};
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 69031a0f7ed2..2f3f875c06ac 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -22,7 +22,6 @@
* struct iio_hwmon_state - device instance state
* @channels: filled with array of channels from iio
* @num_channels: number of channels in channels (saves counting twice)
- * @hwmon_dev: associated hwmon device
* @attr_group: the group of attributes
* @groups: null terminated array of attribute groups
* @attrs: null terminated array of attribute pointers.
@@ -30,7 +29,6 @@
struct iio_hwmon_state {
struct iio_channel *channels;
int num_channels;
- struct device *hwmon_dev;
struct attribute_group attr_group;
const struct attribute_group *groups[2];
struct attribute **attrs;
@@ -68,12 +66,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
enum iio_chan_type type;
struct iio_channel *channels;
const char *name = "iio_hwmon";
+ struct device *hwmon_dev;
char *sname;
if (dev->of_node && dev->of_node->name)
name = dev->of_node->name;
- channels = iio_channel_get_all(dev);
+ channels = devm_iio_channel_get_all(dev);
if (IS_ERR(channels)) {
if (PTR_ERR(channels) == -ENODEV)
return -EPROBE_DEFER;
@@ -81,10 +80,8 @@ static int iio_hwmon_probe(struct platform_device *pdev)
}
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (st == NULL)
+ return -ENOMEM;
st->channels = channels;
@@ -95,22 +92,18 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->attrs = devm_kcalloc(dev,
st->num_channels + 1, sizeof(*st->attrs),
GFP_KERNEL);
- if (st->attrs == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (st->attrs == NULL)
+ return -ENOMEM;
for (i = 0; i < st->num_channels; i++) {
a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
- if (a == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (a == NULL)
+ return -ENOMEM;
sysfs_attr_init(&a->dev_attr.attr);
ret = iio_get_channel_type(&st->channels[i], &type);
if (ret < 0)
- goto error_release_channels;
+ return ret;
switch (type) {
case IIO_VOLTAGE:
@@ -134,13 +127,11 @@ static int iio_hwmon_probe(struct platform_device *pdev)
humidity_i++);
break;
default:
- ret = -EINVAL;
- goto error_release_channels;
- }
- if (a->dev_attr.attr.name == NULL) {
- ret = -ENOMEM;
- goto error_release_channels;
+ return -EINVAL;
}
+ if (a->dev_attr.attr.name == NULL)
+ return -ENOMEM;
+
a->dev_attr.show = iio_hwmon_read_val;
a->dev_attr.attr.mode = S_IRUGO;
a->index = i;
@@ -151,34 +142,13 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->groups[0] = &st->attr_group;
sname = devm_kstrdup(dev, name, GFP_KERNEL);
- if (!sname) {
- ret = -ENOMEM;
- goto error_release_channels;
- }
+ if (!sname)
+ return -ENOMEM;
strreplace(sname, '-', '_');
- st->hwmon_dev = hwmon_device_register_with_groups(dev, sname, st,
- st->groups);
- if (IS_ERR(st->hwmon_dev)) {
- ret = PTR_ERR(st->hwmon_dev);
- goto error_release_channels;
- }
- platform_set_drvdata(pdev, st);
- return 0;
-
-error_release_channels:
- iio_channel_release_all(channels);
- return ret;
-}
-
-static int iio_hwmon_remove(struct platform_device *pdev)
-{
- struct iio_hwmon_state *st = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(st->hwmon_dev);
- iio_channel_release_all(st->channels);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, sname, st,
+ st->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct of_device_id iio_hwmon_of_match[] = {
@@ -193,7 +163,6 @@ static struct platform_driver __refdata iio_hwmon_driver = {
.of_match_table = iio_hwmon_of_match,
},
.probe = iio_hwmon_probe,
- .remove = iio_hwmon_remove,
};
module_platform_driver(iio_hwmon_driver);
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 17c6460ae351..bb15d7816a29 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -99,12 +99,8 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 7 1700X", 20000 },
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen 7 2700X", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
- { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
- { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */
+ { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
new file mode 100644
index 000000000000..de46577c7d5a
--- /dev/null
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+// Copyright (c) 2018 Vadim Pasternak <vadimp@mellanox.com>
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#define MLXREG_FAN_MAX_TACHO 12
+#define MLXREG_FAN_MAX_STATE 10
+#define MLXREG_FAN_MIN_DUTY 51 /* 20% */
+#define MLXREG_FAN_MAX_DUTY 255 /* 100% */
+/*
+ * Minimum and maximum FAN allowed speed in percent: from 20% to 100%. Values
+ * MLXREG_FAN_MAX_STATE + x, where x is between 2 and 10 are used for
+ * setting FAN speed dynamic minimum. For example, if value is set to 14 (40%)
+ * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
+ * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
+ */
+#define MLXREG_FAN_SPEED_MIN (MLXREG_FAN_MAX_STATE + 2)
+#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2)
+#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */
+#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
+#define MLXREG_FAN_TACHO_DIVIDER_DEF 1132
+/*
+ * FAN datasheet defines the formula for RPM calculations as RPM = 15/t-high.
+ * The logic in a programmable device measures the time t-high by sampling the
+ * tachometer every t-sample (with the default value 11.32 uS) and increment
+ * a counter (N) as long as the pulse has not change:
+ * RPM = 15 / (t-sample * (K + Regval)), where:
+ * Regval: is the value read from the programmable device register;
+ * - 0xff - represents tachometer fault;
+ * - 0xfe - represents tachometer minimum value , which is 4444 RPM;
+ * - 0x00 - represents tachometer maximum value , which is 300000 RPM;
+ * K: is 44 and it represents the minimum allowed samples per pulse;
+ * N: is equal K + Regval;
+ * In order to calculate RPM from the register value the following formula is
+ * used: RPM = 15 / ((Regval + K) * 11.32) * 10^(-6)), which in the
+ * default case is modified to:
+ * RPM = 15000000 * 100 / ((Regval + 44) * 1132);
+ * - for Regval 0x00, RPM will be 15000000 * 100 / (44 * 1132) = 30115;
+ * - for Regval 0xfe, RPM will be 15000000 * 100 / ((254 + 44) * 1132) = 4446;
+ * In common case the formula is modified to:
+ * RPM = 15000000 * 100 / ((Regval + samples) * divider).
+ */
+#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
+ ((rval) + (s)) * (d)))
+#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
+#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \
+ MLXREG_FAN_MAX_STATE, \
+ MLXREG_FAN_MAX_DUTY))
+#define MLXREG_FAN_PWM_STATE2DUTY(stat) (DIV_ROUND_CLOSEST((stat) * \
+ MLXREG_FAN_MAX_DUTY, \
+ MLXREG_FAN_MAX_STATE))
+
+/*
+ * struct mlxreg_fan_tacho - tachometer data (internal use):
+ *
+ * @connected: indicates if tachometer is connected;
+ * @reg: register offset;
+ * @mask: fault mask;
+ */
+struct mlxreg_fan_tacho {
+ bool connected;
+ u32 reg;
+ u32 mask;
+};
+
+/*
+ * struct mlxreg_fan_pwm - PWM data (internal use):
+ *
+ * @connected: indicates if PWM is connected;
+ * @reg: register offset;
+ */
+struct mlxreg_fan_pwm {
+ bool connected;
+ u32 reg;
+};
+
+/*
+ * struct mlxreg_fan - private data (internal use):
+ *
+ * @dev: basic device;
+ * @regmap: register map of parent device;
+ * @tacho: tachometer data;
+ * @pwm: PWM data;
+ * @samples: minimum allowed samples per pulse;
+ * @divider: divider value for tachometer RPM calculation;
+ * @cooling: cooling device levels;
+ * @cdev: cooling device;
+ */
+struct mlxreg_fan {
+ struct device *dev;
+ void *regmap;
+ struct mlxreg_core_platform_data *pdata;
+ struct mlxreg_fan_tacho tacho[MLXREG_FAN_MAX_TACHO];
+ struct mlxreg_fan_pwm pwm;
+ int samples;
+ int divider;
+ u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
+ struct thermal_cooling_device *cdev;
+};
+
+static int
+mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct mlxreg_fan *fan = dev_get_drvdata(dev);
+ struct mlxreg_fan_tacho *tacho;
+ u32 regval;
+ int err;
+
+ switch (type) {
+ case hwmon_fan:
+ tacho = &fan->tacho[channel];
+ switch (attr) {
+ case hwmon_fan_input:
+ err = regmap_read(fan->regmap, tacho->reg, &regval);
+ if (err)
+ return err;
+
+ *val = MLXREG_FAN_GET_RPM(regval, fan->divider,
+ fan->samples);
+ break;
+
+ case hwmon_fan_fault:
+ err = regmap_read(fan->regmap, tacho->reg, &regval);
+ if (err)
+ return err;
+
+ *val = MLXREG_FAN_GET_FAULT(regval, tacho->mask);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err)
+ return err;
+
+ *val = regval;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct mlxreg_fan *fan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < MLXREG_FAN_MIN_DUTY ||
+ val > MLXREG_FAN_MAX_DUTY)
+ return -EINVAL;
+ return regmap_write(fan->regmap, fan->pwm.reg, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t
+mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ if (!(((struct mlxreg_fan *)data)->tacho[channel].connected))
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+
+ case hwmon_pwm:
+ if (!(((struct mlxreg_fan *)data)->pwm.connected))
+ return 0;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const u32 mlxreg_fan_hwmon_fan_config[] = {
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ 0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_fan = {
+ .type = hwmon_fan,
+ .config = mlxreg_fan_hwmon_fan_config,
+};
+
+static const u32 mlxreg_fan_hwmon_pwm_config[] = {
+ HWMON_PWM_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info mlxreg_fan_hwmon_pwm = {
+ .type = hwmon_pwm,
+ .config = mlxreg_fan_hwmon_pwm_config,
+};
+
+static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
+ &mlxreg_fan_hwmon_fan,
+ &mlxreg_fan_hwmon_pwm,
+ NULL
+};
+
+static const struct hwmon_ops mlxreg_fan_hwmon_hwmon_ops = {
+ .is_visible = mlxreg_fan_is_visible,
+ .read = mlxreg_fan_read,
+ .write = mlxreg_fan_write,
+};
+
+static const struct hwmon_chip_info mlxreg_fan_hwmon_chip_info = {
+ .ops = &mlxreg_fan_hwmon_hwmon_ops,
+ .info = mlxreg_fan_hwmon_info,
+};
+
+static int mlxreg_fan_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MLXREG_FAN_MAX_STATE;
+ return 0;
+}
+
+static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+
+{
+ struct mlxreg_fan *fan = cdev->devdata;
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ *state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+
+ return 0;
+}
+
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+
+{
+ struct mlxreg_fan *fan = cdev->devdata;
+ unsigned long cur_state;
+ u32 regval;
+ int i;
+ int err;
+
+ /*
+ * Verify if this request is for changing allowed FAN dynamical
+ * minimum. If it is - update cooling levels accordingly and update
+ * state, if current state is below the newly requested minimum state.
+ * For example, if current state is 5, and minimal state is to be
+ * changed from 4 to 6, fan->cooling_levels[0 to 5] will be changed all
+ * from 4 to 6. And state 5 (fan->cooling_levels[4]) should be
+ * overwritten.
+ */
+ if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
+ state -= MLXREG_FAN_MAX_STATE;
+ for (i = 0; i < state; i++)
+ fan->cooling_levels[i] = state;
+ for (i = state; i <= MLXREG_FAN_MAX_STATE; i++)
+ fan->cooling_levels[i] = i;
+
+ err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query PWM duty\n");
+ return err;
+ }
+
+ cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+ if (state < cur_state)
+ return 0;
+
+ state = cur_state;
+ }
+
+ if (state > MLXREG_FAN_MAX_STATE)
+ return -EINVAL;
+
+ /* Normalize the state to the valid speed range. */
+ state = fan->cooling_levels[state];
+ err = regmap_write(fan->regmap, fan->pwm.reg,
+ MLXREG_FAN_PWM_STATE2DUTY(state));
+ if (err) {
+ dev_err(fan->dev, "Failed to write PWM duty\n");
+ return err;
+ }
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+ .get_max_state = mlxreg_fan_get_max_state,
+ .get_cur_state = mlxreg_fan_get_cur_state,
+ .set_cur_state = mlxreg_fan_set_cur_state,
+};
+
+static int mlxreg_fan_config(struct mlxreg_fan *fan,
+ struct mlxreg_core_platform_data *pdata)
+{
+ struct mlxreg_core_data *data = pdata->data;
+ bool configured = false;
+ int tacho_num = 0, i;
+
+ fan->samples = MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF;
+ fan->divider = MLXREG_FAN_TACHO_DIVIDER_DEF;
+ for (i = 0; i < pdata->counter; i++, data++) {
+ if (strnstr(data->label, "tacho", sizeof(data->label))) {
+ if (tacho_num == MLXREG_FAN_MAX_TACHO) {
+ dev_err(fan->dev, "too many tacho entries: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->tacho[tacho_num].reg = data->reg;
+ fan->tacho[tacho_num].mask = data->mask;
+ fan->tacho[tacho_num++].connected = true;
+ } else if (strnstr(data->label, "pwm", sizeof(data->label))) {
+ if (fan->pwm.connected) {
+ dev_err(fan->dev, "duplicate pwm entry: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->pwm.reg = data->reg;
+ fan->pwm.connected = true;
+ } else if (strnstr(data->label, "conf", sizeof(data->label))) {
+ if (configured) {
+ dev_err(fan->dev, "duplicate conf entry: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ /* Validate that conf parameters are not zeros. */
+ if (!data->mask || !data->bit) {
+ dev_err(fan->dev, "invalid conf entry params: %s\n",
+ data->label);
+ return -EINVAL;
+ }
+ fan->samples = data->mask;
+ fan->divider = data->bit;
+ configured = true;
+ } else {
+ dev_err(fan->dev, "invalid label: %s\n", data->label);
+ return -EINVAL;
+ }
+ }
+
+ /* Init cooling levels per PWM state. */
+ for (i = 0; i < MLXREG_FAN_SPEED_MIN_LEVEL; i++)
+ fan->cooling_levels[i] = MLXREG_FAN_SPEED_MIN_LEVEL;
+ for (i = MLXREG_FAN_SPEED_MIN_LEVEL; i <= MLXREG_FAN_MAX_STATE; i++)
+ fan->cooling_levels[i] = i;
+
+ return 0;
+}
+
+static int mlxreg_fan_probe(struct platform_device *pdev)
+{
+ struct mlxreg_core_platform_data *pdata;
+ struct mlxreg_fan *fan;
+ struct device *hwm;
+ int err;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to get platform data.\n");
+ return -EINVAL;
+ }
+
+ fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+ if (!fan)
+ return -ENOMEM;
+
+ fan->dev = &pdev->dev;
+ fan->regmap = pdata->regmap;
+ platform_set_drvdata(pdev, fan);
+
+ err = mlxreg_fan_config(fan, pdata);
+ if (err)
+ return err;
+
+ hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan",
+ fan,
+ &mlxreg_fan_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwm)) {
+ dev_err(&pdev->dev, "Failed to register hwmon device\n");
+ return PTR_ERR(hwm);
+ }
+
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan,
+ &mlxreg_fan_cooling_ops);
+ if (IS_ERR(fan->cdev)) {
+ dev_err(&pdev->dev, "Failed to register cooling device\n");
+ return PTR_ERR(fan->cdev);
+ }
+ }
+
+ return 0;
+}
+
+static int mlxreg_fan_remove(struct platform_device *pdev)
+{
+ struct mlxreg_fan *fan = platform_get_drvdata(pdev);
+
+ if (IS_REACHABLE(CONFIG_THERMAL))
+ thermal_cooling_device_unregister(fan->cdev);
+
+ return 0;
+}
+
+static struct platform_driver mlxreg_fan_driver = {
+ .driver = {
+ .name = "mlxreg-fan",
+ },
+ .probe = mlxreg_fan_probe,
+ .remove = mlxreg_fan_remove,
+};
+
+module_platform_driver(mlxreg_fan_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox FAN driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mlxreg-fan");
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index f9d1349c3286..c6bd61e4695a 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1050,8 +1050,8 @@ struct nct6775_data {
u64 beeps;
u8 pwm_num; /* number of pwm */
- u8 pwm_mode[NUM_FAN]; /* 1->DC variable voltage,
- * 0->PWM variable duty cycle
+ u8 pwm_mode[NUM_FAN]; /* 0->DC variable voltage,
+ * 1->PWM variable duty cycle
*/
enum pwm_enable pwm_enable[NUM_FAN];
/* 0->off
@@ -2541,7 +2541,7 @@ static void pwm_update_registers(struct nct6775_data *data, int nr)
case thermal_cruise:
nct6775_write_value(data, data->REG_TARGET[nr],
data->target_temp[nr]);
- /* intentional */
+ /* fall through */
default:
reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 95a68ab175c7..7815ddf149f6 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -77,7 +77,7 @@ struct nct7904_data {
};
/* Access functions */
-static int nct7904_bank_lock(struct nct7904_data *data, unsigned bank)
+static int nct7904_bank_lock(struct nct7904_data *data, unsigned int bank)
{
int ret;
@@ -99,7 +99,7 @@ static inline void nct7904_bank_release(struct nct7904_data *data)
/* Read 1-byte register. Returns unsigned reg or -ERRNO on error. */
static int nct7904_read_reg(struct nct7904_data *data,
- unsigned bank, unsigned reg)
+ unsigned int bank, unsigned int reg)
{
struct i2c_client *client = data->client;
int ret;
@@ -117,7 +117,7 @@ static int nct7904_read_reg(struct nct7904_data *data,
* -ERRNO on error.
*/
static int nct7904_read_reg16(struct nct7904_data *data,
- unsigned bank, unsigned reg)
+ unsigned int bank, unsigned int reg)
{
struct i2c_client *client = data->client;
int ret, hi;
@@ -139,7 +139,7 @@ static int nct7904_read_reg16(struct nct7904_data *data,
/* Write 1-byte register. Returns 0 or -ERRNO on error. */
static int nct7904_write_reg(struct nct7904_data *data,
- unsigned bank, unsigned reg, u8 val)
+ unsigned int bank, unsigned int reg, u8 val)
{
struct i2c_client *client = data->client;
int ret;
@@ -159,7 +159,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
unsigned int cnt, rpm;
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_fan_input:
ret = nct7904_read_reg16(data, BANK_0,
FANIN1_HV_REG + channel * 2);
@@ -200,7 +200,7 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
index = nct7904_chan_to_index[channel];
- switch(attr) {
+ switch (attr) {
case hwmon_in_input:
ret = nct7904_read_reg16(data, BANK_0,
VSEN1_HV_REG + index * 2);
@@ -236,7 +236,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret, temp;
- switch(attr) {
+ switch (attr) {
case hwmon_temp_input:
if (channel == 0)
ret = nct7904_read_reg16(data, BANK_0, LTD_HV_REG);
@@ -276,7 +276,7 @@ static int nct7904_read_pwm(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
ret = nct7904_read_reg(data, BANK_3, FANCTL1_OUT_REG + channel);
if (ret < 0)
@@ -301,7 +301,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
struct nct7904_data *data = dev_get_drvdata(dev);
int ret;
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
if (val < 0 || val > 255)
return -EINVAL;
@@ -322,7 +322,7 @@ static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
static umode_t nct7904_pwm_is_visible(const void *_data, u32 attr, int channel)
{
- switch(attr) {
+ switch (attr) {
case hwmon_pwm_input:
case hwmon_pwm_enable:
return S_IRUGO | S_IWUSR;
@@ -431,15 +431,15 @@ static const struct hwmon_channel_info nct7904_in = {
};
static const u32 nct7904_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ 0
};
static const struct hwmon_channel_info nct7904_fan = {
@@ -448,11 +448,11 @@ static const struct hwmon_channel_info nct7904_fan = {
};
static const u32 nct7904_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ 0
};
static const struct hwmon_channel_info nct7904_pwm = {
@@ -461,16 +461,16 @@ static const struct hwmon_channel_info nct7904_pwm = {
};
static const u32 nct7904_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- 0
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ 0
};
static const struct hwmon_channel_info nct7904_temp = {
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
new file mode 100644
index 000000000000..8474d601aa63
--- /dev/null
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -0,0 +1,1057 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2018 Nuvoton Technology corporation.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+
+/* NPCM7XX PWM registers */
+#define NPCM7XX_PWM_REG_BASE(base, n) ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_PWM_REG_PR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_PWM_REG_CSR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_PWM_REG_CR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_PWM_REG_CNRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x0C + (12 * (ch)))
+#define NPCM7XX_PWM_REG_CMRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x10 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PDRx(base, n, ch) \
+ (NPCM7XX_PWM_REG_BASE(base, n) + 0x14 + (12 * (ch)))
+#define NPCM7XX_PWM_REG_PIER(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x3C)
+#define NPCM7XX_PWM_REG_PIIR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x40)
+
+#define NPCM7XX_PWM_CTRL_CH0_MODE_BIT BIT(3)
+#define NPCM7XX_PWM_CTRL_CH1_MODE_BIT BIT(11)
+#define NPCM7XX_PWM_CTRL_CH2_MODE_BIT BIT(15)
+#define NPCM7XX_PWM_CTRL_CH3_MODE_BIT BIT(19)
+
+#define NPCM7XX_PWM_CTRL_CH0_INV_BIT BIT(2)
+#define NPCM7XX_PWM_CTRL_CH1_INV_BIT BIT(10)
+#define NPCM7XX_PWM_CTRL_CH2_INV_BIT BIT(14)
+#define NPCM7XX_PWM_CTRL_CH3_INV_BIT BIT(18)
+
+#define NPCM7XX_PWM_CTRL_CH0_EN_BIT BIT(0)
+#define NPCM7XX_PWM_CTRL_CH1_EN_BIT BIT(8)
+#define NPCM7XX_PWM_CTRL_CH2_EN_BIT BIT(12)
+#define NPCM7XX_PWM_CTRL_CH3_EN_BIT BIT(16)
+
+/* Define the maximum PWM channel number */
+#define NPCM7XX_PWM_MAX_CHN_NUM 8
+#define NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE 4
+#define NPCM7XX_PWM_MAX_MODULES 2
+
+/* Define the Counter Register, value = 100 for match 100% */
+#define NPCM7XX_PWM_COUNTER_DEFAULT_NUM 255
+#define NPCM7XX_PWM_CMR_DEFAULT_NUM 127
+#define NPCM7XX_PWM_CMR_MAX 255
+
+/* default all PWM channels PRESCALE2 = 1 */
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 0x4
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 0x40
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 0x400
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3 0x4000
+
+#define PWM_OUTPUT_FREQ_25KHZ 25000
+#define PWN_CNT_DEFAULT 256
+#define MIN_PRESCALE1 2
+#define NPCM7XX_PWM_PRESCALE_SHIFT_CH01 8
+
+#define NPCM7XX_PWM_PRESCALE2_DEFAULT (NPCM7XX_PWM_PRESCALE2_DEFAULT_CH0 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH1 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH2 | \
+ NPCM7XX_PWM_PRESCALE2_DEFAULT_CH3)
+
+#define NPCM7XX_PWM_CTRL_MODE_DEFAULT (NPCM7XX_PWM_CTRL_CH0_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH1_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH2_MODE_BIT | \
+ NPCM7XX_PWM_CTRL_CH3_MODE_BIT)
+
+/* NPCM7XX FAN Tacho registers */
+#define NPCM7XX_FAN_REG_BASE(base, n) ((base) + ((n) * 0x1000L))
+
+#define NPCM7XX_FAN_REG_TCNT1(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x00)
+#define NPCM7XX_FAN_REG_TCRA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x02)
+#define NPCM7XX_FAN_REG_TCRB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x04)
+#define NPCM7XX_FAN_REG_TCNT2(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x06)
+#define NPCM7XX_FAN_REG_TPRSC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x08)
+#define NPCM7XX_FAN_REG_TCKC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0A)
+#define NPCM7XX_FAN_REG_TMCTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0C)
+#define NPCM7XX_FAN_REG_TICTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0E)
+#define NPCM7XX_FAN_REG_TICLR(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x10)
+#define NPCM7XX_FAN_REG_TIEN(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x12)
+#define NPCM7XX_FAN_REG_TCPA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x14)
+#define NPCM7XX_FAN_REG_TCPB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x16)
+#define NPCM7XX_FAN_REG_TCPCFG(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x18)
+#define NPCM7XX_FAN_REG_TINASEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1A)
+#define NPCM7XX_FAN_REG_TINBSEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1C)
+
+#define NPCM7XX_FAN_TCKC_CLKX_NONE 0
+#define NPCM7XX_FAN_TCKC_CLK1_APB BIT(0)
+#define NPCM7XX_FAN_TCKC_CLK2_APB BIT(3)
+
+#define NPCM7XX_FAN_TMCTRL_TBEN BIT(6)
+#define NPCM7XX_FAN_TMCTRL_TAEN BIT(5)
+#define NPCM7XX_FAN_TMCTRL_TBEDG BIT(4)
+#define NPCM7XX_FAN_TMCTRL_TAEDG BIT(3)
+#define NPCM7XX_FAN_TMCTRL_MODE_5 BIT(2)
+
+#define NPCM7XX_FAN_TICLR_CLEAR_ALL GENMASK(5, 0)
+#define NPCM7XX_FAN_TICLR_TFCLR BIT(5)
+#define NPCM7XX_FAN_TICLR_TECLR BIT(4)
+#define NPCM7XX_FAN_TICLR_TDCLR BIT(3)
+#define NPCM7XX_FAN_TICLR_TCCLR BIT(2)
+#define NPCM7XX_FAN_TICLR_TBCLR BIT(1)
+#define NPCM7XX_FAN_TICLR_TACLR BIT(0)
+
+#define NPCM7XX_FAN_TIEN_ENABLE_ALL GENMASK(5, 0)
+#define NPCM7XX_FAN_TIEN_TFIEN BIT(5)
+#define NPCM7XX_FAN_TIEN_TEIEN BIT(4)
+#define NPCM7XX_FAN_TIEN_TDIEN BIT(3)
+#define NPCM7XX_FAN_TIEN_TCIEN BIT(2)
+#define NPCM7XX_FAN_TIEN_TBIEN BIT(1)
+#define NPCM7XX_FAN_TIEN_TAIEN BIT(0)
+
+#define NPCM7XX_FAN_TICTRL_TFPND BIT(5)
+#define NPCM7XX_FAN_TICTRL_TEPND BIT(4)
+#define NPCM7XX_FAN_TICTRL_TDPND BIT(3)
+#define NPCM7XX_FAN_TICTRL_TCPND BIT(2)
+#define NPCM7XX_FAN_TICTRL_TBPND BIT(1)
+#define NPCM7XX_FAN_TICTRL_TAPND BIT(0)
+
+#define NPCM7XX_FAN_TCPCFG_HIBEN BIT(7)
+#define NPCM7XX_FAN_TCPCFG_EQBEN BIT(6)
+#define NPCM7XX_FAN_TCPCFG_LOBEN BIT(5)
+#define NPCM7XX_FAN_TCPCFG_CPBSEL BIT(4)
+#define NPCM7XX_FAN_TCPCFG_HIAEN BIT(3)
+#define NPCM7XX_FAN_TCPCFG_EQAEN BIT(2)
+#define NPCM7XX_FAN_TCPCFG_LOAEN BIT(1)
+#define NPCM7XX_FAN_TCPCFG_CPASEL BIT(0)
+
+/* FAN General Definition */
+/* Define the maximum FAN channel number */
+#define NPCM7XX_FAN_MAX_MODULE 8
+#define NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE 2
+#define NPCM7XX_FAN_MAX_CHN_NUM 16
+
+/*
+ * Get Fan Tach Timeout (base on clock 214843.75Hz, 1 cnt = 4.654us)
+ * Timeout 94ms ~= 0x5000
+ * (The minimum FAN speed could to support ~640RPM/pulse 1,
+ * 320RPM/pulse 2, ...-- 10.6Hz)
+ */
+#define NPCM7XX_FAN_TIMEOUT 0x5000
+#define NPCM7XX_FAN_TCNT 0xFFFF
+#define NPCM7XX_FAN_TCPA (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+#define NPCM7XX_FAN_TCPB (NPCM7XX_FAN_TCNT - NPCM7XX_FAN_TIMEOUT)
+
+#define NPCM7XX_FAN_POLL_TIMER_200MS 200
+#define NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION 2
+#define NPCM7XX_FAN_TINASEL_FANIN_DEFAULT 0
+#define NPCM7XX_FAN_CLK_PRESCALE 255
+
+#define NPCM7XX_FAN_CMPA 0
+#define NPCM7XX_FAN_CMPB 1
+
+/* Obtain the fan number */
+#define NPCM7XX_FAN_INPUT(fan, cmp) (((fan) << 1) + (cmp))
+
+/* fan sample status */
+#define FAN_DISABLE 0xFF
+#define FAN_INIT 0x00
+#define FAN_PREPARE_TO_GET_FIRST_CAPTURE 0x01
+#define FAN_ENOUGH_SAMPLE 0x02
+
+struct npcm7xx_fan_dev {
+ u8 fan_st_flg;
+ u8 fan_pls_per_rev;
+ u16 fan_cnt;
+ u32 fan_cnt_tmp;
+};
+
+struct npcm7xx_cooling_device {
+ char name[THERMAL_NAME_LENGTH];
+ struct npcm7xx_pwm_fan_data *data;
+ struct thermal_cooling_device *tcdev;
+ int pwm_port;
+ u8 *cooling_levels;
+ u8 max_state;
+ u8 cur_state;
+};
+
+struct npcm7xx_pwm_fan_data {
+ void __iomem *pwm_base;
+ void __iomem *fan_base;
+ unsigned long pwm_clk_freq;
+ unsigned long fan_clk_freq;
+ struct clk *pwm_clk;
+ struct clk *fan_clk;
+ struct mutex pwm_lock[NPCM7XX_PWM_MAX_MODULES];
+ spinlock_t fan_lock[NPCM7XX_FAN_MAX_MODULE];
+ int fan_irq[NPCM7XX_FAN_MAX_MODULE];
+ bool pwm_present[NPCM7XX_PWM_MAX_CHN_NUM];
+ bool fan_present[NPCM7XX_FAN_MAX_CHN_NUM];
+ u32 input_clk_freq;
+ struct timer_list fan_timer;
+ struct npcm7xx_fan_dev fan_dev[NPCM7XX_FAN_MAX_CHN_NUM];
+ struct npcm7xx_cooling_device *cdev[NPCM7XX_PWM_MAX_CHN_NUM];
+ u8 fan_select;
+};
+
+static int npcm7xx_pwm_config_set(struct npcm7xx_pwm_fan_data *data,
+ int channel, u16 val)
+{
+ u32 pwm_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 tmp_buf, ctrl_en_bit, env_bit;
+
+ /*
+ * Config PWM Comparator register for setting duty cycle
+ */
+ mutex_lock(&data->pwm_lock[module]);
+
+ /* write new CMR value */
+ iowrite32(val, NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pwm_ch));
+ tmp_buf = ioread32(NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+
+ switch (pwm_ch) {
+ case 0:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH0_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH0_INV_BIT;
+ break;
+ case 1:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH1_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH1_INV_BIT;
+ break;
+ case 2:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH2_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH2_INV_BIT;
+ break;
+ case 3:
+ ctrl_en_bit = NPCM7XX_PWM_CTRL_CH3_EN_BIT;
+ env_bit = NPCM7XX_PWM_CTRL_CH3_INV_BIT;
+ break;
+ default:
+ mutex_unlock(&data->pwm_lock[module]);
+ return -ENODEV;
+ }
+
+ if (val == 0) {
+ /* Disable PWM */
+ tmp_buf &= ~ctrl_en_bit;
+ tmp_buf |= env_bit;
+ } else {
+ /* Enable PWM */
+ tmp_buf |= ctrl_en_bit;
+ tmp_buf &= ~env_bit;
+ }
+
+ iowrite32(tmp_buf, NPCM7XX_PWM_REG_CR(data->pwm_base, module));
+ mutex_unlock(&data->pwm_lock[module]);
+
+ return 0;
+}
+
+static inline void npcm7xx_fan_start_capture(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp)
+{
+ u8 fan_id;
+ u8 reg_mode;
+ u8 reg_int;
+ unsigned long flags;
+
+ fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+ /* to check whether any fan tach is enable */
+ if (data->fan_dev[fan_id].fan_st_flg != FAN_DISABLE) {
+ /* reset status */
+ spin_lock_irqsave(&data->fan_lock[fan], flags);
+
+ data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /*
+ * the interrupt enable bits do not need to be cleared before
+ * it sets, the interrupt enable bits are cleared only on reset.
+ * the clock unit control register is behaving in the same
+ * manner that the interrupt enable register behave.
+ */
+ if (cmp == NPCM7XX_FAN_CMPA) {
+ /* enable interrupt */
+ iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TAIEN |
+ NPCM7XX_FAN_TIEN_TEIEN),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ reg_mode = NPCM7XX_FAN_TCKC_CLK1_APB
+ | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+
+ /* start to Capture */
+ iowrite8(reg_mode, NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+ } else {
+ /* enable interrupt */
+ iowrite8(reg_int | (NPCM7XX_FAN_TIEN_TBIEN |
+ NPCM7XX_FAN_TIEN_TFIEN),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ reg_mode =
+ NPCM7XX_FAN_TCKC_CLK2_APB
+ | ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base,
+ fan));
+
+ /* start to Capture */
+ iowrite8(reg_mode,
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+ }
+
+ spin_unlock_irqrestore(&data->fan_lock[fan], flags);
+ }
+}
+
+/*
+ * Enable a background timer to poll fan tach value, (200ms * 4)
+ * to polling all fan
+ */
+static void npcm7xx_fan_polling(struct timer_list *t)
+{
+ struct npcm7xx_pwm_fan_data *data;
+ int i;
+
+ data = from_timer(data, t, fan_timer);
+
+ /*
+ * Polling two module per one round,
+ * FAN01 & FAN89 / FAN23 & FAN1011 / FAN45 & FAN1213 / FAN67 & FAN1415
+ */
+ for (i = data->fan_select; i < NPCM7XX_FAN_MAX_MODULE;
+ i = i + 4) {
+ /* clear the flag and reset the counter (TCNT) */
+ iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, i));
+
+ if (data->fan_present[i * 2]) {
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT1(data->fan_base, i));
+ npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPA);
+ }
+ if (data->fan_present[(i * 2) + 1]) {
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT2(data->fan_base, i));
+ npcm7xx_fan_start_capture(data, i, NPCM7XX_FAN_CMPB);
+ }
+ }
+
+ data->fan_select++;
+ data->fan_select &= 0x3;
+
+ /* reset the timer interval */
+ data->fan_timer.expires = jiffies +
+ msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+ add_timer(&data->fan_timer);
+}
+
+static inline void npcm7xx_fan_compute(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp, u8 fan_id, u8 flag_int,
+ u8 flag_mode, u8 flag_clear)
+{
+ u8 reg_int;
+ u8 reg_mode;
+ u16 fan_cap;
+
+ if (cmp == NPCM7XX_FAN_CMPA)
+ fan_cap = ioread16(NPCM7XX_FAN_REG_TCRA(data->fan_base, fan));
+ else
+ fan_cap = ioread16(NPCM7XX_FAN_REG_TCRB(data->fan_base, fan));
+
+ /* clear capature flag, H/W will auto reset the NPCM7XX_FAN_TCNTx */
+ iowrite8(flag_clear, NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+ if (data->fan_dev[fan_id].fan_st_flg == FAN_INIT) {
+ /* First capture, drop it */
+ data->fan_dev[fan_id].fan_st_flg =
+ FAN_PREPARE_TO_GET_FIRST_CAPTURE;
+
+ /* reset counter */
+ data->fan_dev[fan_id].fan_cnt_tmp = 0;
+ } else if (data->fan_dev[fan_id].fan_st_flg < FAN_ENOUGH_SAMPLE) {
+ /*
+ * collect the enough sample,
+ * (ex: 2 pulse fan need to get 2 sample)
+ */
+ data->fan_dev[fan_id].fan_cnt_tmp +=
+ (NPCM7XX_FAN_TCNT - fan_cap);
+
+ data->fan_dev[fan_id].fan_st_flg++;
+ } else {
+ /* get enough sample or fan disable */
+ if (data->fan_dev[fan_id].fan_st_flg == FAN_ENOUGH_SAMPLE) {
+ data->fan_dev[fan_id].fan_cnt_tmp +=
+ (NPCM7XX_FAN_TCNT - fan_cap);
+
+ /* compute finial average cnt per pulse */
+ data->fan_dev[fan_id].fan_cnt =
+ data->fan_dev[fan_id].fan_cnt_tmp /
+ FAN_ENOUGH_SAMPLE;
+
+ data->fan_dev[fan_id].fan_st_flg = FAN_INIT;
+ }
+
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* disable interrupt */
+ iowrite8((reg_int & ~flag_int),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+ reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /* stop capturing */
+ iowrite8((reg_mode & ~flag_mode),
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+ }
+}
+
+static inline void npcm7xx_check_cmp(struct npcm7xx_pwm_fan_data *data,
+ u8 fan, u8 cmp, u8 flag)
+{
+ u8 reg_int;
+ u8 reg_mode;
+ u8 flag_timeout;
+ u8 flag_cap;
+ u8 flag_clear;
+ u8 flag_int;
+ u8 flag_mode;
+ u8 fan_id;
+
+ fan_id = NPCM7XX_FAN_INPUT(fan, cmp);
+
+ if (cmp == NPCM7XX_FAN_CMPA) {
+ flag_cap = NPCM7XX_FAN_TICTRL_TAPND;
+ flag_timeout = NPCM7XX_FAN_TICTRL_TEPND;
+ flag_int = NPCM7XX_FAN_TIEN_TAIEN | NPCM7XX_FAN_TIEN_TEIEN;
+ flag_mode = NPCM7XX_FAN_TCKC_CLK1_APB;
+ flag_clear = NPCM7XX_FAN_TICLR_TACLR | NPCM7XX_FAN_TICLR_TECLR;
+ } else {
+ flag_cap = NPCM7XX_FAN_TICTRL_TBPND;
+ flag_timeout = NPCM7XX_FAN_TICTRL_TFPND;
+ flag_int = NPCM7XX_FAN_TIEN_TBIEN | NPCM7XX_FAN_TIEN_TFIEN;
+ flag_mode = NPCM7XX_FAN_TCKC_CLK2_APB;
+ flag_clear = NPCM7XX_FAN_TICLR_TBCLR | NPCM7XX_FAN_TICLR_TFCLR;
+ }
+
+ if (flag & flag_timeout) {
+ reg_int = ioread8(NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* disable interrupt */
+ iowrite8((reg_int & ~flag_int),
+ NPCM7XX_FAN_REG_TIEN(data->fan_base, fan));
+
+ /* clear interrupt flag */
+ iowrite8(flag_clear,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, fan));
+
+ reg_mode = ioread8(NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /* stop capturing */
+ iowrite8((reg_mode & ~flag_mode),
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, fan));
+
+ /*
+ * If timeout occurs (NPCM7XX_FAN_TIMEOUT), the fan doesn't
+ * connect or speed is lower than 10.6Hz (320RPM/pulse2).
+ * In these situation, the RPM output should be zero.
+ */
+ data->fan_dev[fan_id].fan_cnt = 0;
+ } else {
+ /* input capture is occurred */
+ if (flag & flag_cap)
+ npcm7xx_fan_compute(data, fan, cmp, fan_id, flag_int,
+ flag_mode, flag_clear);
+ }
+}
+
+static irqreturn_t npcm7xx_fan_isr(int irq, void *dev_id)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_id;
+ unsigned long flags;
+ int module;
+ u8 flag;
+
+ module = irq - data->fan_irq[0];
+ spin_lock_irqsave(&data->fan_lock[module], flags);
+
+ flag = ioread8(NPCM7XX_FAN_REG_TICTRL(data->fan_base, module));
+ if (flag > 0) {
+ npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPA, flag);
+ npcm7xx_check_cmp(data, module, NPCM7XX_FAN_CMPB, flag);
+ spin_unlock_irqrestore(&data->fan_lock[module], flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&data->fan_lock[module], flags);
+
+ return IRQ_NONE;
+}
+
+static int npcm7xx_read_pwm(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+ u32 pmw_ch = (channel % NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+ u32 module = (channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = ioread32
+ (NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pmw_ch));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int npcm7xx_write_pwm(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > NPCM7XX_PWM_CMR_MAX)
+ return -EINVAL;
+ err = npcm7xx_pwm_config_set(data, channel, (u16)val);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static umode_t npcm7xx_pwm_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct npcm7xx_pwm_fan_data *data = _data;
+
+ if (!data->pwm_present[channel])
+ return 0;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int npcm7xx_read_fan(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct npcm7xx_pwm_fan_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = 0;
+ if (data->fan_dev[channel].fan_cnt <= 0)
+ return data->fan_dev[channel].fan_cnt;
+
+ /* Convert the raw reading to RPM */
+ if (data->fan_dev[channel].fan_cnt > 0 &&
+ data->fan_dev[channel].fan_pls_per_rev > 0)
+ *val = ((data->input_clk_freq * 60) /
+ (data->fan_dev[channel].fan_cnt *
+ data->fan_dev[channel].fan_pls_per_rev));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t npcm7xx_fan_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct npcm7xx_pwm_fan_data *data = _data;
+
+ if (!data->fan_present[channel])
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int npcm7xx_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_read_pwm(dev, attr, channel, val);
+ case hwmon_fan:
+ return npcm7xx_read_fan(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int npcm7xx_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_write_pwm(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t npcm7xx_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return npcm7xx_pwm_is_visible(data, attr, channel);
+ case hwmon_fan:
+ return npcm7xx_fan_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
+}
+
+static const u32 npcm7xx_pwm_config[] = {
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info npcm7xx_pwm = {
+ .type = hwmon_pwm,
+ .config = npcm7xx_pwm_config,
+};
+
+static const u32 npcm7xx_fan_config[] = {
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info npcm7xx_fan = {
+ .type = hwmon_fan,
+ .config = npcm7xx_fan_config,
+};
+
+static const struct hwmon_channel_info *npcm7xx_info[] = {
+ &npcm7xx_pwm,
+ &npcm7xx_fan,
+ NULL
+};
+
+static const struct hwmon_ops npcm7xx_hwmon_ops = {
+ .is_visible = npcm7xx_is_visible,
+ .read = npcm7xx_read,
+ .write = npcm7xx_write,
+};
+
+static const struct hwmon_chip_info npcm7xx_chip_info = {
+ .ops = &npcm7xx_hwmon_ops,
+ .info = npcm7xx_info,
+};
+
+static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data)
+{
+ int m, ch;
+ u32 prescale_val, output_freq;
+
+ data->pwm_clk_freq = clk_get_rate(data->pwm_clk);
+
+ /* Adjust NPCM7xx PWMs output frequency to ~25Khz */
+ output_freq = data->pwm_clk_freq / PWN_CNT_DEFAULT;
+ prescale_val = DIV_ROUND_CLOSEST(output_freq, PWM_OUTPUT_FREQ_25KHZ);
+
+ /* If prescale_val = 0, then the prescale output clock is stopped */
+ if (prescale_val < MIN_PRESCALE1)
+ prescale_val = MIN_PRESCALE1;
+ /*
+ * prescale_val need to decrement in one because in the PWM Prescale
+ * register the Prescale value increment by one
+ */
+ prescale_val--;
+
+ /* Setting PWM Prescale Register value register to both modules */
+ prescale_val |= (prescale_val << NPCM7XX_PWM_PRESCALE_SHIFT_CH01);
+
+ for (m = 0; m < NPCM7XX_PWM_MAX_MODULES ; m++) {
+ iowrite32(prescale_val, NPCM7XX_PWM_REG_PR(data->pwm_base, m));
+ iowrite32(NPCM7XX_PWM_PRESCALE2_DEFAULT,
+ NPCM7XX_PWM_REG_CSR(data->pwm_base, m));
+ iowrite32(NPCM7XX_PWM_CTRL_MODE_DEFAULT,
+ NPCM7XX_PWM_REG_CR(data->pwm_base, m));
+
+ for (ch = 0; ch < NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE; ch++) {
+ iowrite32(NPCM7XX_PWM_COUNTER_DEFAULT_NUM,
+ NPCM7XX_PWM_REG_CNRx(data->pwm_base, m, ch));
+ }
+ }
+
+ return output_freq / ((prescale_val & 0xf) + 1);
+}
+
+static void npcm7xx_fan_init(struct npcm7xx_pwm_fan_data *data)
+{
+ int md;
+ int ch;
+ int i;
+ u32 apb_clk_freq;
+
+ for (md = 0; md < NPCM7XX_FAN_MAX_MODULE; md++) {
+ /* stop FAN0~7 clock */
+ iowrite8(NPCM7XX_FAN_TCKC_CLKX_NONE,
+ NPCM7XX_FAN_REG_TCKC(data->fan_base, md));
+
+ /* disable all interrupt */
+ iowrite8(0x00, NPCM7XX_FAN_REG_TIEN(data->fan_base, md));
+
+ /* clear all interrupt */
+ iowrite8(NPCM7XX_FAN_TICLR_CLEAR_ALL,
+ NPCM7XX_FAN_REG_TICLR(data->fan_base, md));
+
+ /* set FAN0~7 clock prescaler */
+ iowrite8(NPCM7XX_FAN_CLK_PRESCALE,
+ NPCM7XX_FAN_REG_TPRSC(data->fan_base, md));
+
+ /* set FAN0~7 mode (high-to-low transition) */
+ iowrite8((NPCM7XX_FAN_TMCTRL_MODE_5 | NPCM7XX_FAN_TMCTRL_TBEN |
+ NPCM7XX_FAN_TMCTRL_TAEN),
+ NPCM7XX_FAN_REG_TMCTRL(data->fan_base, md));
+
+ /* set FAN0~7 Initial Count/Cap */
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT1(data->fan_base, md));
+ iowrite16(NPCM7XX_FAN_TCNT,
+ NPCM7XX_FAN_REG_TCNT2(data->fan_base, md));
+
+ /* set FAN0~7 compare (equal to count) */
+ iowrite8((NPCM7XX_FAN_TCPCFG_EQAEN | NPCM7XX_FAN_TCPCFG_EQBEN),
+ NPCM7XX_FAN_REG_TCPCFG(data->fan_base, md));
+
+ /* set FAN0~7 compare value */
+ iowrite16(NPCM7XX_FAN_TCPA,
+ NPCM7XX_FAN_REG_TCPA(data->fan_base, md));
+ iowrite16(NPCM7XX_FAN_TCPB,
+ NPCM7XX_FAN_REG_TCPB(data->fan_base, md));
+
+ /* set FAN0~7 fan input FANIN 0~15 */
+ iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+ NPCM7XX_FAN_REG_TINASEL(data->fan_base, md));
+ iowrite8(NPCM7XX_FAN_TINASEL_FANIN_DEFAULT,
+ NPCM7XX_FAN_REG_TINBSEL(data->fan_base, md));
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE; i++) {
+ ch = md * NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE + i;
+ data->fan_dev[ch].fan_st_flg = FAN_DISABLE;
+ data->fan_dev[ch].fan_pls_per_rev =
+ NPCM7XX_FAN_DEFAULT_PULSE_PER_REVOLUTION;
+ data->fan_dev[ch].fan_cnt = 0;
+ }
+ }
+
+ apb_clk_freq = clk_get_rate(data->fan_clk);
+
+ /* Fan tach input clock = APB clock / prescalar, default is 255. */
+ data->input_clk_freq = apb_clk_freq / (NPCM7XX_FAN_CLK_PRESCALE + 1);
+}
+
+static int
+npcm7xx_pwm_cz_get_max_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->max_state;
+
+ return 0;
+}
+
+static int
+npcm7xx_pwm_cz_get_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->cur_state;
+
+ return 0;
+}
+
+static int
+npcm7xx_pwm_cz_set_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long state)
+{
+ struct npcm7xx_cooling_device *cdev = tcdev->devdata;
+ int ret;
+
+ if (state > cdev->max_state)
+ return -EINVAL;
+
+ cdev->cur_state = state;
+ ret = npcm7xx_pwm_config_set(cdev->data, cdev->pwm_port,
+ cdev->cooling_levels[cdev->cur_state]);
+
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops npcm7xx_pwm_cool_ops = {
+ .get_max_state = npcm7xx_pwm_cz_get_max_state,
+ .get_cur_state = npcm7xx_pwm_cz_get_cur_state,
+ .set_cur_state = npcm7xx_pwm_cz_set_cur_state,
+};
+
+static int npcm7xx_create_pwm_cooling(struct device *dev,
+ struct device_node *child,
+ struct npcm7xx_pwm_fan_data *data,
+ u32 pwm_port, u8 num_levels)
+{
+ int ret;
+ struct npcm7xx_cooling_device *cdev;
+
+ cdev = devm_kzalloc(dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->cooling_levels = devm_kzalloc(dev, num_levels, GFP_KERNEL);
+ if (!cdev->cooling_levels)
+ return -ENOMEM;
+
+ cdev->max_state = num_levels - 1;
+ ret = of_property_read_u8_array(child, "cooling-levels",
+ cdev->cooling_levels,
+ num_levels);
+ if (ret) {
+ dev_err(dev, "Property 'cooling-levels' cannot be read.\n");
+ return ret;
+ }
+ snprintf(cdev->name, THERMAL_NAME_LENGTH, "%s%d", child->name,
+ pwm_port);
+
+ cdev->tcdev = thermal_of_cooling_device_register(child,
+ cdev->name,
+ cdev,
+ &npcm7xx_pwm_cool_ops);
+ if (IS_ERR(cdev->tcdev))
+ return PTR_ERR(cdev->tcdev);
+
+ cdev->data = data;
+ cdev->pwm_port = pwm_port;
+
+ data->cdev[pwm_port] = cdev;
+
+ return 0;
+}
+
+static int npcm7xx_en_pwm_fan(struct device *dev,
+ struct device_node *child,
+ struct npcm7xx_pwm_fan_data *data)
+{
+ u8 *fan_ch;
+ u32 pwm_port;
+ int ret, fan_cnt;
+ u8 index, ch;
+
+ ret = of_property_read_u32(child, "reg", &pwm_port);
+ if (ret)
+ return ret;
+
+ data->pwm_present[pwm_port] = true;
+ ret = npcm7xx_pwm_config_set(data, pwm_port,
+ NPCM7XX_PWM_CMR_DEFAULT_NUM);
+
+ ret = of_property_count_u8_elems(child, "cooling-levels");
+ if (ret > 0) {
+ ret = npcm7xx_create_pwm_cooling(dev, child, data, pwm_port,
+ ret);
+ if (ret)
+ return ret;
+ }
+
+ fan_cnt = of_property_count_u8_elems(child, "fan-tach-ch");
+ if (fan_cnt < 1)
+ return -EINVAL;
+
+ fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+ if (!fan_ch)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(child, "fan-tach-ch", fan_ch, fan_cnt);
+ if (ret)
+ return ret;
+
+ for (ch = 0; ch < fan_cnt; ch++) {
+ index = fan_ch[ch];
+ data->fan_present[index] = true;
+ data->fan_dev[index].fan_st_flg = FAN_INIT;
+ }
+
+ return 0;
+}
+
+static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np, *child;
+ struct npcm7xx_pwm_fan_data *data;
+ struct resource *res;
+ struct device *hwmon;
+ char name[20];
+ int ret, cnt;
+ u32 output_freq;
+ u32 i;
+
+ np = dev->of_node;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
+ if (!res) {
+ dev_err(dev, "pwm resource not found\n");
+ return -ENODEV;
+ }
+
+ data->pwm_base = devm_ioremap_resource(dev, res);
+ dev_dbg(dev, "pwm base resource is %pR\n", res);
+ if (IS_ERR(data->pwm_base))
+ return PTR_ERR(data->pwm_base);
+
+ data->pwm_clk = devm_clk_get(dev, "pwm");
+ if (IS_ERR(data->pwm_clk)) {
+ dev_err(dev, "couldn't get pwm clock\n");
+ return PTR_ERR(data->pwm_clk);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fan");
+ if (!res) {
+ dev_err(dev, "fan resource not found\n");
+ return -ENODEV;
+ }
+
+ data->fan_base = devm_ioremap_resource(dev, res);
+ dev_dbg(dev, "fan base resource is %pR\n", res);
+ if (IS_ERR(data->fan_base))
+ return PTR_ERR(data->fan_base);
+
+ data->fan_clk = devm_clk_get(dev, "fan");
+ if (IS_ERR(data->fan_clk)) {
+ dev_err(dev, "couldn't get fan clock\n");
+ return PTR_ERR(data->fan_clk);
+ }
+
+ output_freq = npcm7xx_pwm_init(data);
+ npcm7xx_fan_init(data);
+
+ for (cnt = 0; cnt < NPCM7XX_PWM_MAX_MODULES ; cnt++)
+ mutex_init(&data->pwm_lock[cnt]);
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_MODULE; i++) {
+ spin_lock_init(&data->fan_lock[i]);
+
+ data->fan_irq[i] = platform_get_irq(pdev, i);
+ if (data->fan_irq[i] < 0) {
+ dev_err(dev, "get IRQ fan%d failed\n", i);
+ return data->fan_irq[i];
+ }
+
+ sprintf(name, "NPCM7XX-FAN-MD%d", i);
+ ret = devm_request_irq(dev, data->fan_irq[i], npcm7xx_fan_isr,
+ 0, name, (void *)data);
+ if (ret) {
+ dev_err(dev, "register IRQ fan%d failed\n", i);
+ return ret;
+ }
+ }
+
+ for_each_child_of_node(np, child) {
+ ret = npcm7xx_en_pwm_fan(dev, child, data);
+ if (ret) {
+ dev_err(dev, "enable pwm and fan failed\n");
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "npcm7xx_pwm_fan",
+ data, &npcm7xx_chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_err(dev, "unable to register hwmon device\n");
+ return PTR_ERR(hwmon);
+ }
+
+ for (i = 0; i < NPCM7XX_FAN_MAX_CHN_NUM; i++) {
+ if (data->fan_present[i]) {
+ /* fan timer initialization */
+ data->fan_timer.expires = jiffies +
+ msecs_to_jiffies(NPCM7XX_FAN_POLL_TIMER_200MS);
+ timer_setup(&data->fan_timer,
+ npcm7xx_fan_polling, 0);
+ add_timer(&data->fan_timer);
+ break;
+ }
+ }
+
+ pr_info("NPCM7XX PWM-FAN Driver probed, output Freq %dHz[PWM], input Freq %dHz[FAN]\n",
+ output_freq, data->input_clk_freq);
+
+ return 0;
+}
+
+static const struct of_device_id of_pwm_fan_match_table[] = {
+ { .compatible = "nuvoton,npcm750-pwm-fan", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match_table);
+
+static struct platform_driver npcm7xx_pwm_fan_driver = {
+ .probe = npcm7xx_pwm_fan_probe,
+ .driver = {
+ .name = "npcm7xx_pwm_fan",
+ .of_match_table = of_pwm_fan_match_table,
+ },
+};
+
+module_platform_driver(npcm7xx_pwm_fan_driver);
+
+MODULE_DESCRIPTION("Nuvoton NPCM7XX PWM and Fan Tacho driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index e71aec69e76e..a82018aaf473 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -130,7 +130,7 @@ config SENSORS_MAX34440
default n
help
If you say yes here you get hardware monitoring support for Maxim
- MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461.
+ MAX34440, MAX34441, MAX34446, MAX34451, MAX34460, and MAX34461.
This driver can also be built as a module. If so, the module will
be called max34440.
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 74a1f6f68fb3..47576c460010 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -27,7 +27,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446, max34460, max34461 };
+enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -44,6 +44,9 @@ enum chips { max34440, max34441, max34446, max34460, max34461 };
#define MAX34440_STATUS_OT_FAULT BIT(5)
#define MAX34440_STATUS_OT_WARN BIT(6)
+#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
+#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
+
struct max34440_data {
int id;
struct pmbus_driver_info info;
@@ -67,7 +70,7 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
MAX34440_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_AVG:
- if (data->id != max34446)
+ if (data->id != max34446 && data->id != max34451)
return -ENXIO;
ret = pmbus_read_word_data(client, page,
MAX34446_MFR_IOUT_AVG);
@@ -143,7 +146,7 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_IOUT_PEAK, 0);
- if (!ret && data->id == max34446)
+ if (!ret && (data->id == max34446 || data->id == max34451))
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_IOUT_AVG, 0);
@@ -202,6 +205,58 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static int max34451_set_supported_funcs(struct i2c_client *client,
+ struct max34440_data *data)
+{
+ /*
+ * Each of the channel 0-15 can be configured to monitor the following
+ * functions based on MFR_CHANNEL_CONFIG[5:0]
+ * 0x10: Sequencing + voltage monitoring (only valid for PAGES 0–11)
+ * 0x20: Voltage monitoring (no sequencing)
+ * 0x21: Voltage read only
+ * 0x22: Current monitoring
+ * 0x23: Current read only
+ * 0x30: General-purpose input active low
+ * 0x34: General-purpose input active high
+ * 0x00: Disabled
+ */
+
+ int page, rv;
+
+ for (page = 0; page < 16; page++) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (rv < 0)
+ return rv;
+
+ rv = i2c_smbus_read_word_data(client,
+ MAX34451_MFR_CHANNEL_CONFIG);
+ if (rv < 0)
+ return rv;
+
+ switch (rv & MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK) {
+ case 0x10:
+ case 0x20:
+ data->info.func[page] = PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_STATUS_VOUT;
+ break;
+ case 0x21:
+ data->info.func[page] = PMBUS_HAVE_VOUT;
+ break;
+ case 0x22:
+ data->info.func[page] = PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT;
+ break;
+ case 0x23:
+ data->info.func[page] = PMBUS_HAVE_IOUT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
static struct pmbus_driver_info max34440_info[] = {
[max34440] = {
.pages = 14,
@@ -325,6 +380,30 @@ static struct pmbus_driver_info max34440_info[] = {
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
+ [max34451] = {
+ .pages = 21,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ /* func 0-15 is set dynamically before probing */
+ .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
[max34460] = {
.pages = 18,
.format[PSC_VOLTAGE_OUT] = direct,
@@ -398,6 +477,7 @@ static int max34440_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max34440_data *data;
+ int rv;
data = devm_kzalloc(&client->dev, sizeof(struct max34440_data),
GFP_KERNEL);
@@ -406,6 +486,12 @@ static int max34440_probe(struct i2c_client *client,
data->id = id->driver_data;
data->info = max34440_info[id->driver_data];
+ if (data->id == max34451) {
+ rv = max34451_set_supported_funcs(client, data);
+ if (rv)
+ return rv;
+ }
+
return pmbus_do_probe(client, id, &data->info);
}
@@ -413,6 +499,7 @@ static const struct i2c_device_id max34440_id[] = {
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
+ {"max34451", max34451},
{"max34460", max34460},
{"max34461", max34461},
{}
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 51970bae3c4a..9cd66cabb84f 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -463,7 +463,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
msg[num-1].len++;
}
- status = i2c_transfer(adapter, msg, num);
+ status = __i2c_transfer(adapter, msg, num);
if (status < 0)
goto cleanup;
if (status != num) {
@@ -524,9 +524,24 @@ cleanup:
* This executes an SMBus protocol operation, and returns a negative
* errno code else zero on success.
*/
-s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
- char read_write, u8 command, int protocol,
- union i2c_smbus_data *data)
+s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int protocol, union i2c_smbus_data *data)
+{
+ s32 res;
+
+ i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
+ res = __i2c_smbus_xfer(adapter, addr, flags, read_write,
+ command, protocol, data);
+ i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
+
+ return res;
+}
+EXPORT_SYMBOL(i2c_smbus_xfer);
+
+s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int protocol, union i2c_smbus_data *data)
{
unsigned long orig_jiffies;
int try;
@@ -543,8 +558,6 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB;
if (adapter->algo->smbus_xfer) {
- i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
-
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
for (res = 0, try = 0; try <= adapter->retries; try++) {
@@ -557,7 +570,6 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
orig_jiffies + adapter->timeout))
break;
}
- i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
if (res != -EOPNOTSUPP || !adapter->algo->master_xfer)
goto trace;
@@ -579,7 +591,7 @@ trace:
return res;
}
-EXPORT_SYMBOL(i2c_smbus_xfer);
+EXPORT_SYMBOL(__i2c_smbus_xfer);
/**
* i2c_smbus_read_i2c_block_data_or_emulated - read block or emulate
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 5f178384876f..44a7a255ef74 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -419,10 +419,11 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
int write, void *buffer, unsigned *bufflen,
- struct request_sense *sense, int timeout,
+ struct scsi_sense_hdr *sshdr, int timeout,
req_flags_t rq_flags)
{
struct cdrom_info *info = drive->driver_data;
+ struct scsi_sense_hdr local_sshdr;
int retries = 10;
bool failed;
@@ -430,6 +431,9 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
"rq_flags: 0x%x",
cmd[0], write, timeout, rq_flags);
+ if (!sshdr)
+ sshdr = &local_sshdr;
+
/* start of retry loop */
do {
struct request *rq;
@@ -456,8 +460,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
if (buffer)
*bufflen = scsi_req(rq)->resid_len;
- if (sense)
- memcpy(sense, scsi_req(rq)->sense, sizeof(*sense));
+ scsi_normalize_sense(scsi_req(rq)->sense,
+ scsi_req(rq)->sense_len, sshdr);
/*
* FIXME: we should probably abort/retry or something in case of
@@ -469,12 +473,10 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
* The request failed. Retry if it was due to a unit
* attention status (usually means media was changed).
*/
- struct request_sense *reqbuf = scsi_req(rq)->sense;
-
- if (reqbuf->sense_key == UNIT_ATTENTION)
+ if (sshdr->sense_key == UNIT_ATTENTION)
cdrom_saw_media_change(drive);
- else if (reqbuf->sense_key == NOT_READY &&
- reqbuf->asc == 4 && reqbuf->ascq != 4) {
+ else if (sshdr->sense_key == NOT_READY &&
+ sshdr->asc == 4 && sshdr->ascq != 4) {
/*
* The drive is in the process of loading
* a disk. Retry, but wait a little to give
@@ -864,7 +866,7 @@ static void msf_from_bcd(struct atapi_msf *msf)
msf->frame = bcd2bin(msf->frame);
}
-int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
+int cdrom_check_status(ide_drive_t *drive, struct scsi_sense_hdr *sshdr)
{
struct cdrom_info *info = drive->driver_data;
struct cdrom_device_info *cdi;
@@ -886,12 +888,11 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
*/
cmd[7] = cdi->sanyo_slot % 3;
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sshdr, 0, RQF_QUIET);
}
static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
- unsigned long *sectors_per_frame,
- struct request_sense *sense)
+ unsigned long *sectors_per_frame)
{
struct {
__be32 lba;
@@ -908,7 +909,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_READ_CDVD_CAPACITY;
- stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0,
+ stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, NULL, 0,
RQF_QUIET);
if (stat)
return stat;
@@ -944,8 +945,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
}
static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
- int format, char *buf, int buflen,
- struct request_sense *sense)
+ int format, char *buf, int buflen)
{
unsigned char cmd[BLK_MAX_CDB];
@@ -962,11 +962,11 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
if (msf_flag)
cmd[1] = 2;
- return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, NULL, 0, RQF_QUIET);
}
/* Try to read the entire TOC for the disk into our internal buffer. */
-int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
+int ide_cd_read_toc(ide_drive_t *drive)
{
int stat, ntracks, i;
struct cdrom_info *info = drive->driver_data;
@@ -996,14 +996,13 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
* Check to see if the existing data is still valid. If it is,
* just return.
*/
- (void) cdrom_check_status(drive, sense);
+ (void) cdrom_check_status(drive, NULL);
if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
return 0;
/* try to get the total cdrom capacity and sector size */
- stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame,
- sense);
+ stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame);
if (stat)
toc->capacity = 0x1fffff;
@@ -1016,7 +1015,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
/* first read just the header, so we know how long the TOC is */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
- sizeof(struct atapi_toc_header), sense);
+ sizeof(struct atapi_toc_header));
if (stat)
return stat;
@@ -1036,7 +1035,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
- sizeof(struct atapi_toc_entry), sense);
+ sizeof(struct atapi_toc_entry));
if (stat && toc->hdr.first_track > 1) {
/*
@@ -1056,8 +1055,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
(ntracks + 1) *
- sizeof(struct atapi_toc_entry),
- sense);
+ sizeof(struct atapi_toc_entry));
if (stat)
return stat;
@@ -1094,7 +1092,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (toc->hdr.first_track != CDROM_LEADOUT) {
/* read the multisession information */
stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
- sizeof(ms_tmp), sense);
+ sizeof(ms_tmp));
if (stat)
return stat;
@@ -1108,7 +1106,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
/* re-read multisession information using MSF format */
stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
- sizeof(ms_tmp), sense);
+ sizeof(ms_tmp));
if (stat)
return stat;
@@ -1412,7 +1410,7 @@ static sector_t ide_cdrom_capacity(ide_drive_t *drive)
{
unsigned long capacity, sectors_per_frame;
- if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
+ if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame))
return 0;
return capacity * sectors_per_frame;
@@ -1710,9 +1708,8 @@ static unsigned int idecd_check_events(struct gendisk *disk,
static int idecd_revalidate_disk(struct gendisk *disk)
{
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
- struct request_sense sense;
- ide_cd_read_toc(info->drive, &sense);
+ ide_cd_read_toc(info->drive);
return 0;
}
@@ -1736,7 +1733,6 @@ static int ide_cd_probe(ide_drive_t *drive)
{
struct cdrom_info *info;
struct gendisk *g;
- struct request_sense sense;
ide_debug_log(IDE_DBG_PROBE, "driver_req: %s, media: 0x%x",
drive->driver_req, drive->media);
@@ -1785,7 +1781,7 @@ static int ide_cd_probe(ide_drive_t *drive)
goto failed;
}
- ide_cd_read_toc(drive, &sense);
+ ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
device_add_disk(&drive->gendev, g);
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 04f0f310a856..a69dc7f61c4d 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -98,11 +98,11 @@ void ide_cd_log_error(const char *, struct request *, struct request_sense *);
/* ide-cd.c functions used by ide-cd_ioctl.c */
int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
- unsigned *, struct request_sense *, int, req_flags_t);
-int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
+ unsigned *, struct scsi_sense_hdr *, int, req_flags_t);
+int ide_cd_read_toc(ide_drive_t *);
int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
void ide_cdrom_update_speed(ide_drive_t *, u8 *);
-int cdrom_check_status(ide_drive_t *, struct request_sense *);
+int cdrom_check_status(ide_drive_t *, struct scsi_sense_hdr *);
/* ide-cd_ioctl.c */
int ide_cdrom_open_real(struct cdrom_device_info *, int);
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index b1322400887b..4a6e1a413ead 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -43,14 +43,14 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
{
ide_drive_t *drive = cdi->handle;
struct media_event_desc med;
- struct request_sense sense;
+ struct scsi_sense_hdr sshdr;
int stat;
if (slot_nr != CDSL_CURRENT)
return -EINVAL;
- stat = cdrom_check_status(drive, &sense);
- if (!stat || sense.sense_key == UNIT_ATTENTION)
+ stat = cdrom_check_status(drive, &sshdr);
+ if (!stat || sshdr.sense_key == UNIT_ATTENTION)
return CDS_DISC_OK;
if (!cdrom_get_media_event(cdi, &med)) {
@@ -62,8 +62,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
return CDS_NO_DISC;
}
- if (sense.sense_key == NOT_READY && sense.asc == 0x04
- && sense.ascq == 0x04)
+ if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04
+ && sshdr.ascq == 0x04)
return CDS_DISC_OK;
/*
@@ -71,8 +71,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
* just return TRAY_OPEN since ATAPI doesn't provide
* any other way to detect this...
*/
- if (sense.sense_key == NOT_READY) {
- if (sense.asc == 0x3a && sense.ascq == 1)
+ if (sshdr.sense_key == NOT_READY) {
+ if (sshdr.asc == 0x3a && sshdr.ascq == 1)
return CDS_NO_DISC;
else
return CDS_TRAY_OPEN;
@@ -105,8 +105,7 @@ unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
/* Eject the disk if EJECTFLAG is 0.
If EJECTFLAG is 1, try to reload the disk. */
static
-int cdrom_eject(ide_drive_t *drive, int ejectflag,
- struct request_sense *sense)
+int cdrom_eject(ide_drive_t *drive, int ejectflag)
{
struct cdrom_info *cd = drive->driver_data;
struct cdrom_device_info *cdi = &cd->devinfo;
@@ -129,20 +128,16 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
cmd[0] = GPCMD_START_STOP_UNIT;
cmd[4] = loej | (ejectflag != 0);
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
}
/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
static
-int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
- struct request_sense *sense)
+int ide_cd_lockdoor(ide_drive_t *drive, int lockflag)
{
- struct request_sense my_sense;
+ struct scsi_sense_hdr sshdr;
int stat;
- if (sense == NULL)
- sense = &my_sense;
-
/* If the drive cannot lock the door, just pretend. */
if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) {
stat = 0;
@@ -155,14 +150,14 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
cmd[4] = lockflag ? 1 : 0;
stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
- sense, 0, 0);
+ &sshdr, 0, 0);
}
/* If we got an illegal field error, the drive
probably cannot lock the door. */
if (stat != 0 &&
- sense->sense_key == ILLEGAL_REQUEST &&
- (sense->asc == 0x24 || sense->asc == 0x20)) {
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ (sshdr.asc == 0x24 || sshdr.asc == 0x20)) {
printk(KERN_ERR "%s: door locking not supported\n",
drive->name);
drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
@@ -170,7 +165,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
}
/* no medium, that's alright. */
- if (stat != 0 && sense->sense_key == NOT_READY && sense->asc == 0x3a)
+ if (stat != 0 && sshdr.sense_key == NOT_READY && sshdr.asc == 0x3a)
stat = 0;
if (stat == 0) {
@@ -186,23 +181,22 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
int ide_cdrom_tray_move(struct cdrom_device_info *cdi, int position)
{
ide_drive_t *drive = cdi->handle;
- struct request_sense sense;
if (position) {
- int stat = ide_cd_lockdoor(drive, 0, &sense);
+ int stat = ide_cd_lockdoor(drive, 0);
if (stat)
return stat;
}
- return cdrom_eject(drive, !position, &sense);
+ return cdrom_eject(drive, !position);
}
int ide_cdrom_lock_door(struct cdrom_device_info *cdi, int lock)
{
ide_drive_t *drive = cdi->handle;
- return ide_cd_lockdoor(drive, lock, NULL);
+ return ide_cd_lockdoor(drive, lock);
}
/*
@@ -213,7 +207,6 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
- struct request_sense sense;
u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE];
int stat;
unsigned char cmd[BLK_MAX_CDB];
@@ -236,7 +229,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
cmd[5] = speed & 0xff;
}
- stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
+ stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
if (!ide_cdrom_get_capabilities(drive, buf)) {
ide_cdrom_update_speed(drive, buf);
@@ -252,11 +245,10 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
struct atapi_toc *toc;
ide_drive_t *drive = cdi->handle;
struct cdrom_info *info = drive->driver_data;
- struct request_sense sense;
int ret;
if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
- ret = ide_cd_read_toc(drive, &sense);
+ ret = ide_cd_read_toc(drive);
if (ret)
return ret;
}
@@ -300,7 +292,6 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
- struct request_sense sense;
struct request *rq;
int ret;
@@ -315,7 +306,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
* lock it again.
*/
if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
- (void)ide_cd_lockdoor(drive, 1, &sense);
+ (void)ide_cd_lockdoor(drive, 1);
return ret;
}
@@ -355,7 +346,6 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
struct atapi_toc_entry *first_toc, *last_toc;
unsigned long lba_start, lba_end;
int stat;
- struct request_sense sense;
unsigned char cmd[BLK_MAX_CDB];
stat = ide_cd_get_toc_entry(drive, ti->cdti_trk0, &first_toc);
@@ -380,7 +370,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
}
static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
@@ -391,7 +381,7 @@ static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
int stat;
/* Make sure our saved TOC is valid. */
- stat = ide_cd_read_toc(drive, NULL);
+ stat = ide_cd_read_toc(drive);
if (stat)
return stat;
@@ -461,8 +451,8 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
layer. the packet must be complete, as we do not
touch it at all. */
- if (cgc->sense)
- memset(cgc->sense, 0, sizeof(struct request_sense));
+ if (cgc->sshdr)
+ memset(cgc->sshdr, 0, sizeof(*cgc->sshdr));
if (cgc->quiet)
flags |= RQF_QUIET;
@@ -470,7 +460,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
cgc->data_direction == CGC_DATA_WRITE,
cgc->buffer, &len,
- cgc->sense, cgc->timeout, flags);
+ cgc->sshdr, cgc->timeout, flags);
if (!cgc->stat)
cgc->buflen -= len;
return cgc->stat;
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 12e7c6c102c1..6eb64c6f0802 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -79,7 +79,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj,
*/
switch (mode) {
case UVERBS_LOOKUP_READ:
- return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
+ return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
-EBUSY : 0;
case UVERBS_LOOKUP_WRITE:
/* lock is exclusive */
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index de3ee606034c..eec83757d55f 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -900,9 +900,7 @@ static int trigger_sbr(struct hfi1_devdata *dd)
* delay after a reset is required. Per spec requirements,
* the link is either working or not after that point.
*/
- pci_reset_bridge_secondary_bus(dev->bus->self);
-
- return 0;
+ return pci_reset_bus(dev);
}
/*
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index ba160f99cf8e..c643d80c5a53 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -421,7 +421,7 @@ tx_finish:
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index aa7f2342d4eb..081aa91fc162 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1480,7 +1480,7 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
}
fwnode = &dsaf_node->fwnode;
} else if (is_acpi_device_node(dev->fwnode)) {
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
ret = acpi_node_get_property_reference(dev->fwnode,
"dsaf-handle", 0, &args);
@@ -1488,7 +1488,7 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
dev_err(dev, "could not find dsaf-handle\n");
return ret;
}
- fwnode = acpi_fwnode_handle(args.adev);
+ fwnode = args.fwnode;
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
@@ -4917,16 +4917,14 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
continue;
pdev = of_find_device_by_node(net_node);
} else if (is_acpi_device_node(dev->fwnode)) {
- struct acpi_reference_args args;
- struct fwnode_handle *fwnode;
+ struct fwnode_reference_args args;
ret = acpi_node_get_property_reference(dev->fwnode,
"eth-handle",
i, &args);
if (ret)
continue;
- fwnode = acpi_fwnode_handle(args.adev);
- pdev = hns_roce_find_pdev(fwnode);
+ pdev = hns_roce_find_pdev(args.fwnode);
} else {
dev_err(dev, "cannot read data from DT or ACPI\n");
return -ENXIO;
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index a77d020cc89d..009be8889d71 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -311,7 +311,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
- domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
+ domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request);
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index 0c8aec62a425..61558788b3fa 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -95,7 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
}
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
@@ -107,7 +107,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
skb_pull(skb, sizeof(*mdata));
return rc;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index d73e14699aa9..f37cbad022a2 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2028,8 +2028,7 @@ static void srpt_release_channel_work(struct work_struct *w)
target_sess_cmd_list_set_waiting(se_sess);
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(se_sess);
- transport_deregister_session(se_sess);
+ target_remove_session(se_sess);
ch->sess = NULL;
if (ch->using_rdma_cm)
@@ -2220,16 +2219,16 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
pr_debug("registering session %s\n", ch->sess_name);
if (sport->port_guid_tpg.se_tpg_wwn)
- ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0,
TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL, i_port_id, ch,
NULL);
/* Retry without leading "0x" */
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
+ ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
if (IS_ERR_OR_NULL(ch->sess)) {
@@ -3610,11 +3609,9 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
/**
* srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
* @wwn: Corresponds to $driver/$port.
- * @group: Not used.
* @name: $tpg.
*/
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
const char *name)
{
struct srpt_port *sport = wwn->priv;
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index a4e404aaf64b..5c7afdec192c 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -57,8 +57,8 @@ MODULE_LICENSE("GPL v2");
#define HIL_DATA 0x1
#define HIL_CMD 0x3
#define HIL_IRQ 2
- #define hil_readb(p) readb(p)
- #define hil_writeb(v,p) writeb((v),(p))
+ #define hil_readb(p) readb((const volatile void __iomem *)(p))
+ #define hil_writeb(v, p) writeb((v), (volatile void __iomem *)(p))
#else
#error "HIL is not supported on this platform"
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ddcbbdb5d658..511ff9a1d6d9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -367,6 +367,9 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
iova_len = roundup_pow_of_two(iova_len);
+ if (dev->bus_dma_mask)
+ dma_limit &= dev->bus_dma_mask;
+
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index e9233db16e03..d564d21245c5 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -8,7 +8,7 @@ config ARM_GIC
bool
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_PM
@@ -34,7 +34,7 @@ config GIC_NON_BANKED
config ARM_GIC_V3
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -66,7 +66,7 @@ config ARM_NVIC
config ARM_VIC
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
config ARM_VIC_NR
int
@@ -93,14 +93,14 @@ config ATMEL_AIC_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config ATMEL_AIC5_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config I8259
@@ -137,7 +137,7 @@ config DW_APB_ICTL
config FARADAY_FTINTC010
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
config HISILICON_IRQ_MBIGEN
@@ -162,7 +162,7 @@ config CLPS711X_IRQCHIP
bool
depends on ARCH_CLPS711X
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
select SPARSE_IRQ
default y
@@ -181,7 +181,7 @@ config OMAP_IRQCHIP
config ORION_IRQCHIP
bool
select IRQ_DOMAIN
- select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_MULTI_HANDLER
config PIC32_EVIC
bool
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 4eca5c763766..606efa64adff 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
*/
info->scratchpad[0].ul = mc_bus_dev->icid;
msi_info = msi_get_domain_info(msi_domain->parent);
+
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
}
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 25a98de5cfb2..8d6d009d1d58 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
{
struct pci_dev *pdev, *alias_dev;
struct msi_domain_info *msi_info;
- int alias_count = 0;
+ int alias_count = 0, minnvec = 1;
if (!dev_is_pci(dev))
return -EINVAL;
@@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
- return msi_info->ops->msi_prepare(domain->parent,
- dev, max(nvec, alias_count), info);
+ /*
+ * Always allocate a power of 2, and special case device 0 for
+ * broken systems where the DevID is not wired (and all devices
+ * appear as DevID 0). For that reason, we generously allocate a
+ * minimum of 32 MSIs for DevID 0. If you want more because all
+ * your devices are aliasing to DevID 0, consider fixing your HW.
+ */
+ nvec = max(nvec, alias_count);
+ if (!info->scratchpad[0].ul)
+ minnvec = 32;
+ nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+ return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
}
static struct msi_domain_ops its_pci_msi_ops = {
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 8881a053c173..7b8e87b493fe 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = dev_id;
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
return msi_info->ops->msi_prepare(domain->parent,
dev, nvec, info);
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d7842d312d3e..316a57530f6d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -23,6 +23,8 @@
#include <linux/dma-iommu.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/msi.h>
@@ -160,7 +162,7 @@ static struct {
} vpe_proxy;
static LIST_HEAD(its_nodes);
-static DEFINE_SPINLOCK(its_lock);
+static DEFINE_RAW_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
static struct irq_domain *its_parent;
@@ -1421,112 +1423,176 @@ static struct irq_chip its_irq_chip = {
.irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
};
+
/*
* How we allocate LPIs:
*
- * The GIC has id_bits bits for interrupt identifiers. From there, we
- * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
- * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
- * bits to the right.
+ * lpi_range_list contains ranges of LPIs that are to available to
+ * allocate from. To allocate LPIs, just pick the first range that
+ * fits the required allocation, and reduce it by the required
+ * amount. Once empty, remove the range from the list.
+ *
+ * To free a range of LPIs, add a free range to the list, sort it and
+ * merge the result if the new range happens to be adjacent to an
+ * already free block.
*
- * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+ * The consequence of the above is that allocation is cost is low, but
+ * freeing is expensive. We assumes that freeing rarely occurs.
*/
-#define IRQS_PER_CHUNK_SHIFT 5
-#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
-#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
-static unsigned long *lpi_bitmap;
-static u32 lpi_chunks;
-static DEFINE_SPINLOCK(lpi_lock);
+static DEFINE_MUTEX(lpi_range_lock);
+static LIST_HEAD(lpi_range_list);
+
+struct lpi_range {
+ struct list_head entry;
+ u32 base_id;
+ u32 span;
+};
-static int its_lpi_to_chunk(int lpi)
+static struct lpi_range *mk_lpi_range(u32 base, u32 span)
{
- return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
+ struct lpi_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (range) {
+ INIT_LIST_HEAD(&range->entry);
+ range->base_id = base;
+ range->span = span;
+ }
+
+ return range;
}
-static int its_chunk_to_lpi(int chunk)
+static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
{
- return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
+ struct lpi_range *ra, *rb;
+
+ ra = container_of(a, struct lpi_range, entry);
+ rb = container_of(b, struct lpi_range, entry);
+
+ return rb->base_id - ra->base_id;
}
-static int __init its_lpi_init(u32 id_bits)
+static void merge_lpi_ranges(void)
{
- lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
+ struct lpi_range *range, *tmp;
- lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long),
- GFP_KERNEL);
- if (!lpi_bitmap) {
- lpi_chunks = 0;
- return -ENOMEM;
+ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+ if (!list_is_last(&range->entry, &lpi_range_list) &&
+ (tmp->base_id == (range->base_id + range->span))) {
+ tmp->base_id = range->base_id;
+ tmp->span += range->span;
+ list_del(&range->entry);
+ kfree(range);
+ }
}
+}
- pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
- return 0;
+static int alloc_lpi_range(u32 nr_lpis, u32 *base)
+{
+ struct lpi_range *range, *tmp;
+ int err = -ENOSPC;
+
+ mutex_lock(&lpi_range_lock);
+
+ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+ if (range->span >= nr_lpis) {
+ *base = range->base_id;
+ range->base_id += nr_lpis;
+ range->span -= nr_lpis;
+
+ if (range->span == 0) {
+ list_del(&range->entry);
+ kfree(range);
+ }
+
+ err = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&lpi_range_lock);
+
+ pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
+ return err;
}
-static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
+static int free_lpi_range(u32 base, u32 nr_lpis)
{
- unsigned long *bitmap = NULL;
- int chunk_id;
- int nr_chunks;
- int i;
+ struct lpi_range *new;
+ int err = 0;
+
+ mutex_lock(&lpi_range_lock);
+
+ new = mk_lpi_range(base, nr_lpis);
+ if (!new) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&new->entry, &lpi_range_list);
+ list_sort(NULL, &lpi_range_list, lpi_range_cmp);
+ merge_lpi_ranges();
+out:
+ mutex_unlock(&lpi_range_lock);
+ return err;
+}
+
+static int __init its_lpi_init(u32 id_bits)
+{
+ u32 lpis = (1UL << id_bits) - 8192;
+ u32 numlpis;
+ int err;
+
+ numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
+
+ if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
+ lpis = numlpis;
+ pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
+ lpis);
+ }
- nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
+ /*
+ * Initializing the allocator is just the same as freeing the
+ * full range of LPIs.
+ */
+ err = free_lpi_range(8192, lpis);
+ pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
+ return err;
+}
- spin_lock(&lpi_lock);
+static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+{
+ unsigned long *bitmap = NULL;
+ int err = 0;
do {
- chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
- 0, nr_chunks, 0);
- if (chunk_id < lpi_chunks)
+ err = alloc_lpi_range(nr_irqs, base);
+ if (!err)
break;
- nr_chunks--;
- } while (nr_chunks > 0);
+ nr_irqs /= 2;
+ } while (nr_irqs > 0);
- if (!nr_chunks)
+ if (err)
goto out;
- bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK),
- sizeof(long),
- GFP_ATOMIC);
+ bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
if (!bitmap)
goto out;
- for (i = 0; i < nr_chunks; i++)
- set_bit(chunk_id + i, lpi_bitmap);
-
- *base = its_chunk_to_lpi(chunk_id);
- *nr_ids = nr_chunks * IRQS_PER_CHUNK;
+ *nr_ids = nr_irqs;
out:
- spin_unlock(&lpi_lock);
-
if (!bitmap)
*base = *nr_ids = 0;
return bitmap;
}
-static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
{
- int lpi;
-
- spin_lock(&lpi_lock);
-
- for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
- int chunk = its_lpi_to_chunk(lpi);
-
- BUG_ON(chunk > lpi_chunks);
- if (test_bit(chunk, lpi_bitmap)) {
- clear_bit(chunk, lpi_bitmap);
- } else {
- pr_err("Bad LPI chunk %d\n", chunk);
- }
- }
-
- spin_unlock(&lpi_lock);
-
+ WARN_ON(free_lpi_range(base, nr_ids));
kfree(bitmap);
}
@@ -1559,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void)
{
phys_addr_t paddr;
- lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
+ lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
if (!gic_rdists->prop_page) {
pr_err("Failed to allocate PROPBASE\n");
@@ -1997,12 +2063,12 @@ static void its_cpu_init_collections(void)
{
struct its_node *its;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry)
its_cpu_init_collection(its);
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
}
static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
@@ -2134,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!its_alloc_device_table(its, dev_id))
return NULL;
+ if (WARN_ON(!is_power_of_2(nvecs)))
+ nvecs = roundup_pow_of_two(nvecs);
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
- * We allocate at least one chunk worth of LPIs bet device,
- * and thus that many ITEs. The device may require less though.
+ * Even if the device wants a single LPI, the ITT must be
+ * sized as a power of two (and you need at least one bit...).
*/
- nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
+ nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);
if (alloc_lpis) {
- lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+ lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
col_map = kcalloc(nr_lpis, sizeof(*col_map),
GFP_KERNEL);
@@ -2379,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
/* If all interrupts have been freed, start mopping the floor */
if (bitmap_empty(its_dev->event_map.lpi_map,
its_dev->event_map.nr_lpis)) {
- its_lpi_free_chunks(its_dev->event_map.lpi_map,
- its_dev->event_map.lpi_base,
- its_dev->event_map.nr_lpis);
+ its_lpi_free(its_dev->event_map.lpi_map,
+ its_dev->event_map.lpi_base,
+ its_dev->event_map.nr_lpis);
kfree(its_dev->event_map.col_map);
/* Unmap device/itt */
@@ -2780,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
}
if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
- its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+ its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
its_free_prop_table(vm->vprop_page);
}
}
@@ -2795,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
BUG_ON(!vm);
- bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
+ bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
if (!bitmap)
return -ENOMEM;
if (nr_ids < nr_irqs) {
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
return -ENOMEM;
}
vprop_page = its_allocate_prop_table(GFP_KERNEL);
if (!vprop_page) {
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
return -ENOMEM;
}
@@ -2833,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
if (i > 0)
its_vpe_irq_domain_free(domain, virq, i - 1);
- its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
@@ -3070,7 +3139,7 @@ static int its_save_disable(void)
struct its_node *its;
int err = 0;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
@@ -3102,7 +3171,7 @@ err:
writel_relaxed(its->ctlr_save, base + GITS_CTLR);
}
}
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
return err;
}
@@ -3112,7 +3181,7 @@ static void its_restore_enable(void)
struct its_node *its;
int ret;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
int i;
@@ -3164,7 +3233,7 @@ static void its_restore_enable(void)
GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
its_cpu_init_collection(its);
}
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
}
static struct syscore_ops its_syscore_ops = {
@@ -3398,9 +3467,9 @@ static int __init its_probe_one(struct resource *res,
if (err)
goto out_free_tables;
- spin_lock(&its_lock);
+ raw_spin_lock(&its_lock);
list_add(&its->entry, &its_nodes);
- spin_unlock(&its_lock);
+ raw_spin_unlock(&its_lock);
return 0;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 76ea56d779a1..e214181b77b7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = {
.flags = IRQCHIP_SET_TYPE_MASKED,
};
-#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
+#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
@@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
*/
typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
- gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+ gic_data.rdists.gicd_typer = typer;
gic_irqs = GICD_TYPER_IRQS(typer);
if (gic_irqs > 1020)
gic_irqs = 1020;
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index fc5953dea509..2ff08986b536 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -165,6 +165,7 @@ static int __init intc_1chip_of_init(struct device_node *node,
return ingenic_intc_of_init(node, 1);
}
IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
+IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
static int __init intc_2chip_of_init(struct device_node *node,
struct device_node *parent)
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 3a7e8905a97e..3df527fcf4e1 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -159,6 +159,7 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
};
static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
+ { .exti = 0, .irq_parent = 6 },
{ .exti = 1, .irq_parent = 7 },
{ .exti = 2, .irq_parent = 8 },
{ .exti = 3, .irq_parent = 9 },
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6e0c2814d032..ef5560b848ab 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -1321,7 +1322,7 @@ static inline void capinc_tty_exit(void) { }
* /proc/capi/capi20:
* minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int capi20_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capi20_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct list_head *l;
@@ -1344,7 +1345,7 @@ static int capi20_proc_show(struct seq_file *m, void *v)
* /proc/capi/capi20ncci:
* applid ncci
*/
-static int capi20ncci_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capi20ncci_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct capincci *np;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index ee510f901720..e8949f3dcae1 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -2451,7 +2452,7 @@ lower_callback(struct notifier_block *nb, unsigned long val, void *v)
* /proc/capi/capidrv:
* nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int capidrv_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capidrv_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%lu %lu %lu %lu\n",
global.ap.nrecvctlpkt,
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 20d0a080a2b0..ecdeb89645d0 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -739,6 +739,7 @@ static void read_int_callback(struct urb *urb)
case HD_OPEN_B2CHANNEL_ACK:
++channel;
+ /* fall through */
case HD_OPEN_B1CHANNEL_ACK:
bcs = cs->bcs + channel;
update_basstate(ucs, BS_B1OPEN << channel, 0);
@@ -752,6 +753,7 @@ static void read_int_callback(struct urb *urb)
case HD_CLOSE_B2CHANNEL_ACK:
++channel;
+ /* fall through */
case HD_CLOSE_B1CHANNEL_ACK:
bcs = cs->bcs + channel;
update_basstate(ucs, 0, BS_B1OPEN << channel);
@@ -765,6 +767,7 @@ static void read_int_callback(struct urb *urb)
case HD_B2_FLOW_CONTROL:
++channel;
+ /* fall through */
case HD_B1_FLOW_CONTROL:
bcs = cs->bcs + channel;
atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
@@ -972,16 +975,14 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
+ usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
+ usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel),
+ ubc->isoinbuf + k * BAS_INBUFSIZE,
+ BAS_INBUFSIZE, read_iso_callback, bcs,
+ BAS_FRAMETIME);
- urb->dev = bcs->cs->hw.bas->udev;
- urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel);
urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE;
- urb->transfer_buffer_length = BAS_INBUFSIZE;
urb->number_of_packets = BAS_NUMFRAMES;
- urb->interval = BAS_FRAMETIME;
- urb->complete = read_iso_callback;
- urb->context = bcs;
for (j = 0; j < BAS_NUMFRAMES; j++) {
urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
urb->iso_frame_desc[j].length = BAS_MAXFRAME;
@@ -1005,15 +1006,15 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
- urb->dev = bcs->cs->hw.bas->udev;
- urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel);
+ usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
+ usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel),
+ ubc->isooutbuf->data,
+ sizeof(ubc->isooutbuf->data),
+ write_iso_callback, &ubc->isoouturbs[k],
+ BAS_FRAMETIME);
+
urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = ubc->isooutbuf->data;
- urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
urb->number_of_packets = BAS_NUMFRAMES;
- urb->interval = BAS_FRAMETIME;
- urb->complete = write_iso_callback;
- urb->context = &ubc->isoouturbs[k];
for (j = 0; j < BAS_NUMFRAMES; ++j) {
urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
urb->iso_frame_desc[j].length = BAS_NORMFRAME;
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index ae2b2669af1b..8eb28a83832e 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -361,6 +361,7 @@ modehdlc(struct bchannel *bch, int protocol)
switch (protocol) {
case -1: /* used for init */
bch->state = -1;
+ /* fall through */
case ISDN_P_NONE:
if (bch->state == ISDN_P_NONE)
break;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 34c93874af23..ebb3fa2e1d00 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1296,6 +1296,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
case (-1): /* used for init */
bch->state = -1;
bch->nr = bc;
+ /* fall through */
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0;
@@ -2219,7 +2220,7 @@ hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct hfc_pci *card;
struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
- card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC);
+ card = kzalloc(sizeof(struct hfc_pci), GFP_KERNEL);
if (!card) {
printk(KERN_ERR "No kmem for HFC card\n");
return err;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 17cc879ad2bb..6d05946b445e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -819,6 +819,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
int fifon = fifo->fifonum;
int i;
int hdlc = 0;
+ unsigned long flags;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) "
@@ -835,7 +836,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
return;
}
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->dch) {
rx_skb = fifo->dch->rx_skb;
maxlen = fifo->dch->maxlen;
@@ -844,7 +845,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
if (fifo->bch) {
if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) {
fifo->bch->dropcnt += len;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
maxlen = bchannel_get_rxbuf(fifo->bch, len);
@@ -854,7 +855,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
skb_trim(rx_skb, 0);
pr_warning("%s.B%d: No bufferspace for %d bytes\n",
hw->name, fifo->bch->nr, len);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
maxlen = fifo->bch->maxlen;
@@ -878,7 +879,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
} else {
printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
hw->name, __func__);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
}
@@ -888,7 +889,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
"for fifo(%d) HFCUSB_D_RX\n",
hw->name, __func__, fifon);
skb_trim(rx_skb, 0);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
}
@@ -942,7 +943,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
/* deliver transparent data to layer2 */
recv_Bchannel(fifo->bch, MISDN_ID_ANY, false);
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
static void
@@ -979,18 +980,19 @@ rx_iso_complete(struct urb *urb)
__u8 *buf;
static __u8 eof[8];
__u8 s0_state;
+ unsigned long flags;
fifon = fifo->fifonum;
status = urb->status;
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
/*
* ISO transfer only partially completed,
@@ -1096,15 +1098,16 @@ rx_int_complete(struct urb *urb)
struct usb_fifo *fifo = (struct usb_fifo *) urb->context;
struct hfcsusb *hw = fifo->hw;
static __u8 eof[8];
+ unsigned long flags;
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
fifon = fifo->fifonum;
if ((!fifo->active) || (urb->status)) {
@@ -1172,12 +1175,13 @@ tx_iso_complete(struct urb *urb)
int *tx_idx;
int frame_complete, fifon, status, fillempty = 0;
__u8 threshbit, *p;
+ unsigned long flags;
- spin_lock(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (fifo->stop_gracefull) {
fifo->stop_gracefull = 0;
fifo->active = 0;
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
@@ -1195,7 +1199,7 @@ tx_iso_complete(struct urb *urb)
} else {
printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
hw->name, __func__);
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
return;
}
@@ -1375,7 +1379,7 @@ tx_iso_complete(struct urb *urb)
hw->name, __func__,
symbolic(urb_errlist, status), status, fifon);
}
- spin_unlock(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 1fc290659e94..3e01012be4ab 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -887,6 +887,7 @@ release_card(struct inf_hw *card) {
release_card(card->sc[i]);
card->sc[i] = NULL;
}
+ /* fall through */
default:
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index b791688d0228..386731ec2489 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -972,6 +972,7 @@ isar_pump_statev_fax(struct isar_ch *ch, u8 devt) {
break;
case PCTRL_CMD_FTM:
p1 = 2;
+ /* fall through */
case PCTRL_CMD_FTH:
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_SILON, 1, &p1);
@@ -1177,6 +1178,7 @@ setup_pump(struct isar_ch *ch) {
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG,
PMOD_DTMF, 1, param);
}
+ /* fall through */
case ISDN_P_B_MODEM_ASYNC:
ctrl = PMOD_DATAMODEM;
if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) {
@@ -1268,6 +1270,7 @@ setup_iom2(struct isar_ch *ch) {
case ISDN_P_B_MODEM_ASYNC:
case ISDN_P_B_T30_FAX:
cmsb |= IOM_CTRL_RCV;
+ /* fall through */
case ISDN_P_B_L2DTMF:
if (test_bit(FLG_DTMFSEND, &ch->bch.Flags))
cmsb |= IOM_CTRL_RCV;
@@ -1560,6 +1563,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
ich->is->name, hh->id);
ret = -EINVAL;
}
+ /* fall through */
default:
pr_info("%s: %s unknown prim(%x,%x)\n",
ich->is->name, __func__, hh->prim, hh->id);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 89d9ba8ed535..2b317cb63d06 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1084,7 +1084,7 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC);
+ card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL);
if (!card) {
pr_info("No kmem for Netjet\n");
return err;
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index a18b605fb4f2..b161456c942e 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -207,6 +207,7 @@ modehdlc(struct BCState *bcs, int mode, int bc)
bcs->mode = 1;
bcs->channel = bc;
bc = 0;
+ /* fall through */
case (L1_MODE_NULL):
if (bcs->mode == L1_MODE_NULL)
return;
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index ddec47a911a0..9ee06328784c 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -1012,7 +1012,7 @@ dummy_pstack(struct PStack *st, int pr, void *arg) {
static int
init_PStack(struct PStack **stp) {
- *stp = kmalloc(sizeof(struct PStack), GFP_ATOMIC);
+ *stp = kmalloc(sizeof(struct PStack), GFP_KERNEL);
if (!*stp)
return -ENOMEM;
(*stp)->next = NULL;
@@ -1369,6 +1369,7 @@ leased_l1l2(struct PStack *st, int pr, void *arg)
case (PH_ACTIVATE | INDICATION):
case (PH_ACTIVATE | CONFIRM):
event = EV_LEASED;
+ /* fall through */
case (PH_DEACTIVATE | INDICATION):
case (PH_DEACTIVATE | CONFIRM):
if (test_bit(FLG_TWO_DCHAN, &chanp->cs->HW_Flags))
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 7108bdb8742e..b12e6cae26c2 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1029,7 +1029,7 @@ static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
*cs_out = NULL;
- cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
+ cs = kzalloc(sizeof(struct IsdnCardState), GFP_KERNEL);
if (!cs) {
printk(KERN_WARNING
"HiSax: No memory for IsdnCardState(card %d)\n",
@@ -1059,12 +1059,12 @@ static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
"HiSax: Card Type %d out of range\n", card->typ);
goto outf_cs;
}
- if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
+ if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_KERNEL))) {
printk(KERN_WARNING
"HiSax: No memory for dlog(card %d)\n", cardnr + 1);
goto outf_cs;
}
- if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
+ if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_KERNEL))) {
printk(KERN_WARNING
"HiSax: No memory for status_buf(card %d)\n",
cardnr + 1);
@@ -1123,7 +1123,7 @@ static int hisax_cs_setup(int cardnr, struct IsdnCard *card,
{
int ret;
- if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) {
+ if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_KERNEL))) {
printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
ll_unload(cs);
goto outf_cs;
@@ -1843,6 +1843,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg)
case PH_DEACTIVATE | REQUEST:
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
skb_queue_purge(&bcs->squeue);
+ /* fall through */
default:
B_L2L1(b_if, pr, arg);
break;
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c
index 35c6df6534ec..a6d8af02354a 100644
--- a/drivers/isdn/hisax/gazel.c
+++ b/drivers/isdn/hisax/gazel.c
@@ -108,6 +108,7 @@ ReadISAC(struct IsdnCardState *cs, u_char offset)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
return (readreg(cs->hw.gazel.isac, off2));
case R753:
@@ -125,6 +126,7 @@ WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
writereg(cs->hw.gazel.isac, off2, value);
break;
@@ -203,6 +205,7 @@ ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
return (readreg(cs->hw.gazel.hscx[hscx], off2));
case R753:
@@ -220,6 +223,7 @@ WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
switch (cs->subtyp) {
case R647:
off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+ /* fall through */
case R685:
writereg(cs->hw.gazel.hscx[hscx], off2, value);
break;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 97ecb3073045..1d4cd01d4685 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -432,16 +432,12 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe,
{
int k;
- urb->dev = dev;
- urb->pipe = pipe;
- urb->complete = complete;
+ usb_fill_int_urb(urb, dev, pipe, buf, packet_size * num_packets,
+ complete, context, interval);
+
urb->number_of_packets = num_packets;
- urb->transfer_buffer_length = packet_size * num_packets;
- urb->context = context;
- urb->transfer_buffer = buf;
urb->transfer_flags = URB_ISO_ASAP;
urb->actual_length = 0;
- urb->interval = interval;
for (k = 0; k < num_packets; k++) {
urb->iso_frame_desc[k].offset = packet_size * k;
urb->iso_frame_desc[k].length = packet_size;
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index d01ff116797b..82c1879f5664 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1089,6 +1089,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
break;
case PCTRL_CMD_FTM:
p1 = 2;
+ /* fall through */
case PCTRL_CMD_FTH:
sendmsg(cs, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_SILON, 1, &p1);
@@ -1097,6 +1098,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
case PCTRL_CMD_FRM:
if (frm_extra_delay)
mdelay(frm_extra_delay);
+ /* fall through */
case PCTRL_CMD_FRH:
p1 = bcs->hw.isar.mod = bcs->hw.isar.newmod;
bcs->hw.isar.newmod = 0;
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index da0a1c6aa329..98f60d1523f4 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -88,6 +88,7 @@ l3_1tr6_setup_req(struct l3_process *pc, u_char pr, void *arg)
break;
case 'C':
channel = 0x08;
+ /* fall through */
case 'P':
channel |= 0x80;
teln++;
diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c
index 18a3484b1f7e..368d152a8f1d 100644
--- a/drivers/isdn/hisax/l3dss1.c
+++ b/drivers/isdn/hisax/l3dss1.c
@@ -1282,6 +1282,7 @@ l3dss1_setup_req(struct l3_process *pc, u_char pr,
switch (0x5f & *teln) {
case 'C':
channel = 0x08;
+ /* fall through */
case 'P':
channel |= 0x80;
teln++;
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 1cb9930d5e24..f207fda691c7 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -408,15 +408,10 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev,
{
int k;
- urb->dev = dev;
- urb->pipe = pipe;
- urb->interval = 1;
- urb->transfer_buffer = buf;
+ usb_fill_int_urb(urb, dev, pipe, buf, num_packets * packet_size,
+ complete, context, 1);
+
urb->number_of_packets = num_packets;
- urb->transfer_buffer_length = num_packets * packet_size;
- urb->actual_length = 0;
- urb->complete = complete;
- urb->context = context;
urb->transfer_flags = URB_ISO_ASAP;
for (k = 0; k < num_packets; k++) {
urb->iso_frame_desc[k].offset = packet_size * k;
diff --git a/drivers/isdn/hysdn/hysdn_boot.c b/drivers/isdn/hysdn/hysdn_boot.c
index 4a0425378f37..ba177c3a621b 100644
--- a/drivers/isdn/hysdn/hysdn_boot.c
+++ b/drivers/isdn/hysdn/hysdn_boot.c
@@ -99,6 +99,7 @@ pof_handle_data(hysdn_card *card, int datlen)
case TAG_CBOOTDTA:
DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
+ /* fall through */
case TAG_BOOTDTA:
if (card->debug_flags & LOG_POF_RECORD)
hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
@@ -137,6 +138,7 @@ pof_handle_data(hysdn_card *card, int datlen)
case TAG_CABSDATA:
DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
+ /* fall through */
case TAG_ABSDATA:
if (card->debug_flags & LOG_POF_RECORD)
hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 960f26348bb5..b730037a0e2d 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -787,7 +787,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m)
cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */
cmd.parm.cmsg.para[4] = 0;
cmd.parm.cmsg.para[5] = l;
- strncpy(&cmd.parm.cmsg.para[6], id, l);
+ memcpy(&cmd.parm.cmsg.para[6], id, l);
cmd.command = CAPI_PUT_MESSAGE;
cmd.driver = info->isdn_driver;
cmd.arg = info->isdn_channel;
@@ -877,7 +877,7 @@ isdn_tty_resume(char *id, modem_info *info, atemu *m)
cmd.parm.cmsg.para[3] = 5; /* 16 bit 0x0005 Resume */
cmd.parm.cmsg.para[4] = 0;
cmd.parm.cmsg.para[5] = l;
- strncpy(&cmd.parm.cmsg.para[6], id, l);
+ memcpy(&cmd.parm.cmsg.para[6], id, l);
cmd.command = CAPI_PUT_MESSAGE;
info->dialing = 1;
// strcpy(dev->num[i], n);
diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c
index 8b74ce412524..2a5f6668756c 100644
--- a/drivers/isdn/i4l/isdn_v110.c
+++ b/drivers/isdn/i4l/isdn_v110.c
@@ -354,6 +354,7 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n");
return line;
}
+ /* else: fall through */
case 128:
m[line] = 128; /* leftmost -> set byte to 1000000 */
mbit = 64; /* current bit in the matrix line */
@@ -386,20 +387,28 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
switch (++line % 10) {
case 1:
m[line++] = 0xfe;
+ /* fall through */
case 2:
m[line++] = 0xfe;
+ /* fall through */
case 3:
m[line++] = 0xfe;
+ /* fall through */
case 4:
m[line++] = 0xfe;
+ /* fall through */
case 5:
m[line++] = 0xbf;
+ /* fall through */
case 6:
m[line++] = 0xfe;
+ /* fall through */
case 7:
m[line++] = 0xfe;
+ /* fall through */
case 8:
m[line++] = 0xfe;
+ /* fall through */
case 9:
m[line++] = 0xfe;
}
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 422dced7c90a..d97c6dd52223 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -539,6 +539,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
rq.protocol = ISDN_P_NT_S0;
if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
rq.protocol = ISDN_P_NT_E1;
+ /* fall through */
case ISDN_P_LAPD_TE:
ch->recv = mISDN_queue_message;
ch->peer = &dev->D.st->own;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6e3a998f3370..44097a3e0fcc 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -57,12 +57,13 @@ config LEDS_AAT1290
depends on PINCTRL
help
This option enables support for the LEDs on the AAT1290.
+
config LEDS_APU
- tristate "Front panel LED support for PC Engines APU/APU2 boards"
+ tristate "Front panel LED support for PC Engines APU/APU2/APU3 boards"
depends on LEDS_CLASS
depends on X86 && DMI
help
- This driver makes the PC Engines APU/APU2 front panel LEDs
+ This driver makes the PC Engines APU/APU2/APU3 front panel LEDs
accessible from userspace programs through the LED subsystem.
To compile this driver as a module, choose M here: the
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 431123b048a2..17d73db1456e 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -103,15 +103,16 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
EXPORT_SYMBOL_GPL(led_trigger_show);
/* Caller must ensure led_cdev->trigger_lock held */
-void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
+int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
{
unsigned long flags;
char *event = NULL;
char *envp[2];
const char *name;
+ int ret;
if (!led_cdev->trigger && !trig)
- return;
+ return 0;
name = trig ? trig->name : "none";
event = kasprintf(GFP_KERNEL, "TRIGGER=%s", name);
@@ -126,7 +127,10 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
led_stop_software_blink(led_cdev);
if (led_cdev->trigger->deactivate)
led_cdev->trigger->deactivate(led_cdev);
+ device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
led_cdev->trigger = NULL;
+ led_cdev->trigger_data = NULL;
+ led_cdev->activated = false;
led_set_brightness(led_cdev, LED_OFF);
}
if (trig) {
@@ -134,8 +138,20 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
write_unlock_irqrestore(&trig->leddev_list_lock, flags);
led_cdev->trigger = trig;
+
if (trig->activate)
- trig->activate(led_cdev);
+ ret = trig->activate(led_cdev);
+ else
+ ret = 0;
+
+ if (ret)
+ goto err_activate;
+
+ ret = device_add_groups(led_cdev->dev, trig->groups);
+ if (ret) {
+ dev_err(led_cdev->dev, "Failed to add trigger attributes\n");
+ goto err_add_groups;
+ }
}
if (event) {
@@ -146,6 +162,23 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
"%s: Error sending uevent\n", __func__);
kfree(event);
}
+
+ return 0;
+
+err_add_groups:
+
+ if (trig->deactivate)
+ trig->deactivate(led_cdev);
+err_activate:
+
+ led_cdev->trigger = NULL;
+ led_cdev->trigger_data = NULL;
+ write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
+ list_del(&led_cdev->trig_list);
+ write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ led_set_brightness(led_cdev, LED_OFF);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(led_trigger_set);
diff --git a/drivers/leds/leds-apu.c b/drivers/leds/leds-apu.c
index 8c93d68964c7..8d42e46e2de3 100644
--- a/drivers/leds/leds-apu.c
+++ b/drivers/leds/leds-apu.c
@@ -102,6 +102,13 @@ static const struct apu_led_profile apu2_led_profile[] = {
{ "apu2:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
};
+/* Same as apu2_led_profile, but with "3" in the LED names. */
+static const struct apu_led_profile apu3_led_profile[] = {
+ { "apu3:green:1", LED_ON, APU2_FCH_GPIO_BASE + 68 * APU2_IOSIZE },
+ { "apu3:green:2", LED_OFF, APU2_FCH_GPIO_BASE + 69 * APU2_IOSIZE },
+ { "apu3:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
+};
+
static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
{
.ident = "apu",
@@ -134,6 +141,30 @@ static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu2")
}
},
+ /* PC Engines APU3 with "Legacy" bios < 4.0.8 */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "APU3")
+ }
+ },
+ /* PC Engines APU3 with "Legacy" bios >= 4.0.8 */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "apu3")
+ }
+ },
+ /* PC Engines APU2 with "Mainline" bios */
+ {
+ .ident = "apu3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu3")
+ }
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, apu_led_dmi_table);
@@ -235,6 +266,14 @@ static int __init apu_led_probe(struct platform_device *pdev)
apu_led->platform = APU2_LED_PLATFORM;
apu_led->num_led_instances = ARRAY_SIZE(apu2_led_profile);
apu_led->iosize = APU2_IOSIZE;
+ } else if (dmi_match(DMI_BOARD_NAME, "APU3") ||
+ dmi_match(DMI_BOARD_NAME, "apu3") ||
+ dmi_match(DMI_BOARD_NAME, "PC Engines apu3")) {
+ apu_led->profile = apu3_led_profile;
+ /* Otherwise identical to APU2. */
+ apu_led->platform = APU2_LED_PLATFORM;
+ apu_led->num_led_instances = ARRAY_SIZE(apu3_led_profile);
+ apu_led->iosize = APU2_IOSIZE;
}
spin_lock_init(&apu_led->lock);
@@ -259,7 +298,10 @@ static int __init apu_led_init(void)
if (!(dmi_match(DMI_PRODUCT_NAME, "APU") ||
dmi_match(DMI_PRODUCT_NAME, "APU2") ||
dmi_match(DMI_PRODUCT_NAME, "apu2") ||
- dmi_match(DMI_PRODUCT_NAME, "PC Engines apu2"))) {
+ dmi_match(DMI_PRODUCT_NAME, "PC Engines apu2") ||
+ dmi_match(DMI_PRODUCT_NAME, "APU3") ||
+ dmi_match(DMI_PRODUCT_NAME, "apu3") ||
+ dmi_match(DMI_PRODUCT_NAME, "PC Engines apu3"))) {
pr_err("Unknown PC Engines board: %s\n",
dmi_get_system_info(DMI_PRODUCT_NAME));
return -ENODEV;
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 437173d1712c..4f413a7c5f05 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -1,17 +1,6 @@
-/*
- * TI lm3692x LED Driver
- *
- * Copyright (C) 2017 Texas Instruments
- *
- * Author: Dan Murphy <dmurphy@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * Data sheet is located
- * http://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
- */
+// SPDX-License-Identifier: GPL-2.0
+// TI LM3692x LED chip family driver
+// Copyright (C) 2017-18 Texas Instruments Incorporated - http://www.ti.com/
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -26,6 +15,9 @@
#include <linux/slab.h>
#include <uapi/linux/uleds.h>
+#define LM36922_MODEL 0
+#define LM36923_MODEL 1
+
#define LM3692X_REV 0x0
#define LM3692X_RESET 0x1
#define LM3692X_EN 0x10
@@ -44,6 +36,9 @@
#define LM3692X_DEVICE_EN BIT(0)
#define LM3692X_LED1_EN BIT(1)
#define LM3692X_LED2_EN BIT(2)
+#define LM36923_LED3_EN BIT(3)
+#define LM3692X_ENABLE_MASK (LM3692X_DEVICE_EN | LM3692X_LED1_EN | \
+ LM3692X_LED2_EN | LM36923_LED3_EN)
/* Brightness Control Bits */
#define LM3692X_BL_ADJ_POL BIT(0)
@@ -109,6 +104,8 @@
* @enable_gpio - VDDIO/EN gpio to enable communication interface
* @regulator - LED supply regulator pointer
* @label - LED label
+ * @led_enable - LED sync to be enabled
+ * @model_id - Current device model ID enumerated
*/
struct lm3692x_led {
struct mutex lock;
@@ -118,6 +115,8 @@ struct lm3692x_led {
struct gpio_desc *enable_gpio;
struct regulator *regulator;
char label[LED_MAX_NAME_SIZE];
+ int led_enable;
+ int model_id;
};
static const struct reg_default lm3692x_reg_defs[] = {
@@ -200,6 +199,7 @@ out:
static int lm3692x_init(struct lm3692x_led *led)
{
+ int enable_state;
int ret;
if (led->regulator) {
@@ -226,9 +226,25 @@ static int lm3692x_init(struct lm3692x_led *led)
/*
* For glitch free operation, the following data should
- * only be written while device enable bit is 0
+ * only be written while LEDx enable bits are 0 and the device enable
+ * bit is set to 1.
* per Section 7.5.14 of the data sheet
*/
+ ret = regmap_write(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN);
+ if (ret)
+ goto out;
+
+ /* Set the brightness to 0 so when enabled the LEDs do not come
+ * on with full brightness.
+ */
+ ret = regmap_write(led->regmap, LM3692X_BRT_MSB, 0);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_LSB, 0);
+ if (ret)
+ goto out;
+
ret = regmap_write(led->regmap, LM3692X_PWM_CTRL,
LM3692X_PWM_FILTER_100 | LM3692X_PWM_SAMP_24MHZ);
if (ret)
@@ -258,6 +274,38 @@ static int lm3692x_init(struct lm3692x_led *led)
if (ret)
goto out;
+ switch (led->led_enable) {
+ case 0:
+ default:
+ if (led->model_id == LM36923_MODEL)
+ enable_state = LM3692X_LED1_EN | LM3692X_LED2_EN |
+ LM36923_LED3_EN;
+ else
+ enable_state = LM3692X_LED1_EN | LM3692X_LED2_EN;
+
+ break;
+ case 1:
+ enable_state = LM3692X_LED1_EN;
+ break;
+ case 2:
+ enable_state = LM3692X_LED2_EN;
+ break;
+
+ case 3:
+ if (led->model_id == LM36923_MODEL) {
+ enable_state = LM36923_LED3_EN;
+ break;
+ }
+
+ ret = -EINVAL;
+ dev_err(&led->client->dev,
+ "LED3 sync not available on this device\n");
+ goto out;
+ }
+
+ ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_ENABLE_MASK,
+ enable_state | LM3692X_DEVICE_EN);
+
return ret;
out:
dev_err(&led->client->dev, "Fail writing initialization values\n");
@@ -274,52 +322,75 @@ out:
return ret;
}
-
-static int lm3692x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3692x_probe_dt(struct lm3692x_led *led)
{
- int ret;
- struct lm3692x_led *led;
- struct device_node *np = client->dev.of_node;
- struct device_node *child_node;
+ struct fwnode_handle *child = NULL;
const char *name;
+ int ret;
- led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
- if (!led)
- return -ENOMEM;
-
- for_each_available_child_of_node(np, child_node) {
- led->led_dev.default_trigger = of_get_property(child_node,
- "linux,default-trigger",
- NULL);
-
- ret = of_property_read_string(child_node, "label", &name);
- if (!ret)
- snprintf(led->label, sizeof(led->label),
- "%s:%s", id->name, name);
- else
- snprintf(led->label, sizeof(led->label),
- "%s::backlight_cluster", id->name);
- };
-
- led->enable_gpio = devm_gpiod_get_optional(&client->dev,
+ led->enable_gpio = devm_gpiod_get_optional(&led->client->dev,
"enable", GPIOD_OUT_LOW);
if (IS_ERR(led->enable_gpio)) {
ret = PTR_ERR(led->enable_gpio);
- dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
+ dev_err(&led->client->dev, "Failed to get enable gpio: %d\n",
+ ret);
return ret;
}
- led->regulator = devm_regulator_get(&client->dev, "vled");
+ led->regulator = devm_regulator_get(&led->client->dev, "vled");
if (IS_ERR(led->regulator))
led->regulator = NULL;
- led->client = client;
+ child = device_get_next_child_node(&led->client->dev, child);
+ if (!child) {
+ dev_err(&led->client->dev, "No LED Child node\n");
+ return -ENODEV;
+ }
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led->led_dev.default_trigger);
+
+ ret = fwnode_property_read_string(child, "label", &name);
+ if (ret)
+ snprintf(led->label, sizeof(led->label),
+ "%s::", led->client->name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s:%s", led->client->name, name);
+
+ ret = fwnode_property_read_u32(child, "reg", &led->led_enable);
+ if (ret) {
+ dev_err(&led->client->dev, "reg DT property missing\n");
+ return ret;
+ }
+
led->led_dev.name = led->label;
- led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
- mutex_init(&led->lock);
+ ret = devm_led_classdev_register(&led->client->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&led->client->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ led->led_dev.dev->of_node = to_of_node(child);
+
+ return 0;
+}
+static int lm3692x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lm3692x_led *led;
+ int ret;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ mutex_init(&led->lock);
+ led->client = client;
+ led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
+ led->model_id = id->driver_data;
i2c_set_clientdata(client, led);
led->regmap = devm_regmap_init_i2c(client, &lm3692x_regmap_config);
@@ -330,15 +401,13 @@ static int lm3692x_probe(struct i2c_client *client,
return ret;
}
- ret = lm3692x_init(led);
+ ret = lm3692x_probe_dt(led);
if (ret)
return ret;
- ret = devm_led_classdev_register(&client->dev, &led->led_dev);
- if (ret) {
- dev_err(&client->dev, "led register err: %d\n", ret);
+ ret = lm3692x_init(led);
+ if (ret)
return ret;
- }
return 0;
}
@@ -348,6 +417,12 @@ static int lm3692x_remove(struct i2c_client *client)
struct lm3692x_led *led = i2c_get_clientdata(client);
int ret;
+ ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN, 0);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to disable regulator\n");
+ return ret;
+ }
+
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
@@ -364,8 +439,8 @@ static int lm3692x_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3692x_id[] = {
- { "lm36922", 0 },
- { "lm36923", 1 },
+ { "lm36922", LM36922_MODEL },
+ { "lm36923", LM36923_MODEL },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm3692x_id);
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 5ec730a31b65..de3623e0d094 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -1,32 +1,21 @@
-/*
- * LEDs driver for LT3593 controllers
- *
- * See the datasheet at http://cds.linear.com/docs/Datasheet/3593f.pdf
- *
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- *
- * Based on leds-gpio.c,
- *
- * Copyright (C) 2007 8D Technologies inc.
- * Raphael Assenat <raph@8d.com>
- * Copyright (C) 2008 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2009,2018 Daniel Mack <daniel@zonque.org>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <uapi/linux/uleds.h>
struct lt3593_led_data {
+ char name[LED_MAX_NAME_SIZE];
struct led_classdev cdev;
- unsigned gpio;
+ struct gpio_desc *gpiod;
};
static int lt3593_led_set(struct led_classdev *led_cdev,
@@ -46,137 +35,168 @@ static int lt3593_led_set(struct led_classdev *led_cdev,
*/
if (value == 0) {
- gpio_set_value_cansleep(led_dat->gpio, 0);
+ gpiod_set_value_cansleep(led_dat->gpiod, 0);
return 0;
}
pulses = 32 - (value * 32) / 255;
if (pulses == 0) {
- gpio_set_value_cansleep(led_dat->gpio, 0);
+ gpiod_set_value_cansleep(led_dat->gpiod, 0);
mdelay(1);
- gpio_set_value_cansleep(led_dat->gpio, 1);
+ gpiod_set_value_cansleep(led_dat->gpiod, 1);
return 0;
}
- gpio_set_value_cansleep(led_dat->gpio, 1);
+ gpiod_set_value_cansleep(led_dat->gpiod, 1);
while (pulses--) {
- gpio_set_value_cansleep(led_dat->gpio, 0);
+ gpiod_set_value_cansleep(led_dat->gpiod, 0);
udelay(1);
- gpio_set_value_cansleep(led_dat->gpio, 1);
+ gpiod_set_value_cansleep(led_dat->gpiod, 1);
udelay(1);
}
return 0;
}
-static int create_lt3593_led(const struct gpio_led *template,
- struct lt3593_led_data *led_dat, struct device *parent)
+static struct lt3593_led_data *lt3593_led_probe_pdata(struct device *dev)
{
+ struct gpio_led_platform_data *pdata = dev_get_platdata(dev);
+ const struct gpio_led *template = &pdata->leds[0];
+ struct lt3593_led_data *led_data;
int ret, state;
- /* skip leds on GPIOs that aren't available */
- if (!gpio_is_valid(template->gpio)) {
- dev_info(parent, "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
- KBUILD_MODNAME, template->gpio, template->name);
- return 0;
- }
+ if (pdata->num_leds != 1)
+ return ERR_PTR(-EINVAL);
- led_dat->cdev.name = template->name;
- led_dat->cdev.default_trigger = template->default_trigger;
- led_dat->gpio = template->gpio;
+ led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
+ if (!led_data)
+ return ERR_PTR(-ENOMEM);
- led_dat->cdev.brightness_set_blocking = lt3593_led_set;
+ led_data->cdev.name = template->name;
+ led_data->cdev.default_trigger = template->default_trigger;
+ led_data->cdev.brightness_set_blocking = lt3593_led_set;
state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
- led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
+ led_data->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led_data->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = devm_gpio_request_one(parent, template->gpio, state ?
+ ret = devm_gpio_request_one(dev, template->gpio, state ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
template->name);
if (ret < 0)
- return ret;
-
- ret = led_classdev_register(parent, &led_dat->cdev);
- if (ret < 0)
- return ret;
+ return ERR_PTR(ret);
- dev_info(parent, "%s: registered LT3593 LED '%s' at GPIO %d\n",
- KBUILD_MODNAME, template->name, template->gpio);
+ led_data->gpiod = gpio_to_desc(template->gpio);
+ if (!led_data->gpiod)
+ return ERR_PTR(-EPROBE_DEFER);
- return 0;
-}
+ ret = devm_led_classdev_register(dev, &led_data->cdev);
+ if (ret < 0)
+ return ERR_PTR(ret);
-static void delete_lt3593_led(struct lt3593_led_data *led)
-{
- if (!gpio_is_valid(led->gpio))
- return;
+ dev_info(dev, "registered LT3593 LED '%s' at GPIO %d\n",
+ template->name, template->gpio);
- led_classdev_unregister(&led->cdev);
+ return led_data;
}
static int lt3593_led_probe(struct platform_device *pdev)
{
- struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct lt3593_led_data *leds_data;
- int i, ret = 0;
+ struct device *dev = &pdev->dev;
+ struct lt3593_led_data *led_data;
+ struct fwnode_handle *child;
+ int ret, state = LEDS_GPIO_DEFSTATE_OFF;
+ enum gpiod_flags flags = GPIOD_OUT_LOW;
+ const char *tmp;
+
+ if (dev_get_platdata(dev)) {
+ led_data = lt3593_led_probe_pdata(dev);
+ if (IS_ERR(led_data))
+ return PTR_ERR(led_data);
+
+ goto out;
+ }
- if (!pdata)
- return -EBUSY;
+ if (!dev->of_node)
+ return -ENODEV;
- leds_data = devm_kcalloc(&pdev->dev,
- pdata->num_leds, sizeof(struct lt3593_led_data),
- GFP_KERNEL);
- if (!leds_data)
+ led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
+ if (!led_data)
return -ENOMEM;
- for (i = 0; i < pdata->num_leds; i++) {
- ret = create_lt3593_led(&pdata->leds[i], &leds_data[i],
- &pdev->dev);
- if (ret < 0)
- goto err;
+ if (device_get_child_node_count(dev) != 1) {
+ dev_err(dev, "Device must have exactly one LED sub-node.");
+ return -EINVAL;
}
- platform_set_drvdata(pdev, leds_data);
+ led_data->gpiod = devm_gpiod_get(dev, "lltc,ctrl", 0);
+ if (IS_ERR(led_data->gpiod))
+ return PTR_ERR(led_data->gpiod);
- return 0;
+ child = device_get_next_child_node(dev, NULL);
-err:
- for (i = i - 1; i >= 0; i--)
- delete_lt3593_led(&leds_data[i]);
+ ret = fwnode_property_read_string(child, "label", &tmp);
+ if (ret < 0)
+ snprintf(led_data->name, sizeof(led_data->name),
+ "lt3593::");
+ else
+ snprintf(led_data->name, sizeof(led_data->name),
+ "lt3593:%s", tmp);
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led_data->cdev.default_trigger);
+
+ if (!fwnode_property_read_string(child, "default-state", &tmp)) {
+ if (!strcmp(tmp, "keep")) {
+ state = LEDS_GPIO_DEFSTATE_KEEP;
+ flags = GPIOD_ASIS;
+ } else if (!strcmp(tmp, "on")) {
+ state = LEDS_GPIO_DEFSTATE_ON;
+ flags = GPIOD_OUT_HIGH;
+ }
+ }
- return ret;
-}
+ led_data->cdev.name = led_data->name;
+ led_data->cdev.brightness_set_blocking = lt3593_led_set;
+ led_data->cdev.brightness = state ? LED_FULL : LED_OFF;
-static int lt3593_led_remove(struct platform_device *pdev)
-{
- int i;
- struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct lt3593_led_data *leds_data;
+ ret = devm_led_classdev_register(dev, &led_data->cdev);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ return ret;
+ }
- leds_data = platform_get_drvdata(pdev);
+ led_data->cdev.dev->of_node = dev->of_node;
- for (i = 0; i < pdata->num_leds; i++)
- delete_lt3593_led(&leds_data[i]);
+out:
+ platform_set_drvdata(pdev, led_data);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id of_lt3593_leds_match[] = {
+ { .compatible = "lltc,lt3593", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lt3593_leds_match);
+#endif
+
static struct platform_driver lt3593_led_driver = {
.probe = lt3593_led_probe,
- .remove = lt3593_led_remove,
.driver = {
.name = "leds-lt3593",
+ .of_match_table = of_match_ptr(of_lt3593_leds_match),
},
};
module_platform_driver(lt3593_led_driver);
-MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
MODULE_DESCRIPTION("LED driver for LT3593 controllers");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:leds-lt3593");
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index 4edf74f1d6d4..8c019c28f9f5 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -268,7 +268,7 @@ static int max8997_led_probe(struct platform_device *pdev)
mode = pdata->led_pdata->mode[led->id];
brightness = pdata->led_pdata->brightness[led->id];
- max8997_led_set_mode(led, pdata->led_pdata->mode[led->id]);
+ max8997_led_set_mode(led, mode);
if (brightness > led->cdev.max_brightness)
brightness = led->cdev.max_brightness;
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 14fe5cd43232..a0a7dc2ef87c 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -42,8 +42,8 @@
struct ns2_led_data {
struct led_classdev cdev;
- unsigned cmd;
- unsigned slow;
+ unsigned int cmd;
+ unsigned int slow;
bool can_sleep;
unsigned char sata; /* True when SATA mode active. */
rwlock_t rw_lock; /* Lock GPIOs. */
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index a2559b4fdfff..4018af769969 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -10,7 +10,6 @@ if LEDS_TRIGGERS
config LEDS_TRIGGER_TIMER
tristate "LED Timer Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a programmable timer
via sysfs. Some LED hardware can be programmed to start
@@ -21,7 +20,6 @@ config LEDS_TRIGGER_TIMER
config LEDS_TRIGGER_ONESHOT
tristate "LED One-shot Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to blink in one-shot pulses with parameters
controlled via sysfs. It's useful to notify the user on
@@ -36,7 +34,6 @@ config LEDS_TRIGGER_ONESHOT
config LEDS_TRIGGER_DISK
bool "LED Disk Trigger"
depends on IDE_GD_ATA || ATA
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by disk activity.
If unsure, say Y.
@@ -44,14 +41,12 @@ config LEDS_TRIGGER_DISK
config LEDS_TRIGGER_MTD
bool "LED MTD (NAND/NOR) Trigger"
depends on MTD
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by MTD activity.
If unsure, say N.
config LEDS_TRIGGER_HEARTBEAT
tristate "LED Heartbeat Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a CPU load average.
The flash frequency is a hyperbolic function of the 1-minute
@@ -60,7 +55,6 @@ config LEDS_TRIGGER_HEARTBEAT
config LEDS_TRIGGER_BACKLIGHT
tristate "LED backlight Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled as a backlight device: they
turn off and on when the display is blanked and unblanked.
@@ -69,7 +63,6 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
@@ -79,7 +72,6 @@ config LEDS_TRIGGER_CPU
config LEDS_TRIGGER_ACTIVITY
tristate "LED activity Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by an immediate CPU usage.
The flash frequency and duty cycle varies from faint flashes to
@@ -88,7 +80,6 @@ config LEDS_TRIGGER_ACTIVITY
config LEDS_TRIGGER_GPIO
tristate "LED GPIO Trigger"
- depends on LEDS_TRIGGERS
depends on GPIOLIB || COMPILE_TEST
help
This allows LEDs to be controlled by gpio events. It's good
@@ -101,7 +92,6 @@ config LEDS_TRIGGER_GPIO
config LEDS_TRIGGER_DEFAULT_ON
tristate "LED Default ON Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be initialised in the ON state.
If unsure, say Y.
@@ -111,7 +101,6 @@ comment "iptables trigger is under Netfilter config (LED target)"
config LEDS_TRIGGER_TRANSIENT
tristate "LED Transient Trigger"
- depends on LEDS_TRIGGERS
help
This allows one time activation of a transient state on
GPIO/PWM based hardware.
@@ -119,7 +108,6 @@ config LEDS_TRIGGER_TRANSIENT
config LEDS_TRIGGER_CAMERA
tristate "LED Camera Flash/Torch Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled as a camera flash/torch device.
This enables direct flash/torch on/off by the driver, kernel space.
@@ -127,7 +115,6 @@ config LEDS_TRIGGER_CAMERA
config LEDS_TRIGGER_PANIC
bool "LED Panic Trigger"
- depends on LEDS_TRIGGERS
help
This allows LEDs to be configured to blink on a kernel panic.
Enabling this option will allow to mark certain LEDs as panic indicators,
@@ -137,7 +124,7 @@ config LEDS_TRIGGER_PANIC
config LEDS_TRIGGER_NETDEV
tristate "LED Netdev Trigger"
- depends on NET && LEDS_TRIGGERS
+ depends on NET
help
This allows LEDs to be controlled by network device activity.
If unsure, say Y.
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 5081894082bd..bcbf41c90c30 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -7,8 +7,8 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
@@ -37,7 +37,6 @@ static void led_activity_function(struct timer_list *t)
struct activity_data *activity_data = from_timer(activity_data, t,
timer);
struct led_classdev *led_cdev = activity_data->led_cdev;
- struct timespec boot_time;
unsigned int target;
unsigned int usage;
int delay;
@@ -57,8 +56,6 @@ static void led_activity_function(struct timer_list *t)
return;
}
- get_monotonic_boottime(&boot_time);
-
cpus = 0;
curr_used = 0;
@@ -76,7 +73,7 @@ static void led_activity_function(struct timer_list *t)
* down to 16us, ensuring we won't overflow 32-bit computations below
* even up to 3k CPUs, while keeping divides cheap on smaller systems.
*/
- curr_boot = timespec_to_ns(&boot_time) * cpus;
+ curr_boot = ktime_get_boot_ns() * cpus;
diff_boot = (curr_boot - activity_data->last_boot) >> 16;
diff_used = (curr_used - activity_data->last_used) >> 16;
activity_data->last_boot = curr_boot;
@@ -155,8 +152,7 @@ static void led_activity_function(struct timer_list *t)
static ssize_t led_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct activity_data *activity_data = led_cdev->trigger_data;
+ struct activity_data *activity_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", activity_data->invert);
}
@@ -165,8 +161,7 @@ static ssize_t led_invert_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct activity_data *activity_data = led_cdev->trigger_data;
+ struct activity_data *activity_data = led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
@@ -181,21 +176,21 @@ static ssize_t led_invert_store(struct device *dev,
static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
-static void activity_activate(struct led_classdev *led_cdev)
+static struct attribute *activity_led_attrs[] = {
+ &dev_attr_invert.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(activity_led);
+
+static int activity_activate(struct led_classdev *led_cdev)
{
struct activity_data *activity_data;
- int rc;
activity_data = kzalloc(sizeof(*activity_data), GFP_KERNEL);
if (!activity_data)
- return;
+ return -ENOMEM;
- led_cdev->trigger_data = activity_data;
- rc = device_create_file(led_cdev->dev, &dev_attr_invert);
- if (rc) {
- kfree(led_cdev->trigger_data);
- return;
- }
+ led_set_trigger_data(led_cdev, activity_data);
activity_data->led_cdev = led_cdev;
timer_setup(&activity_data->timer, led_activity_function, 0);
@@ -203,26 +198,24 @@ static void activity_activate(struct led_classdev *led_cdev)
led_cdev->blink_brightness = led_cdev->max_brightness;
led_activity_function(&activity_data->timer);
set_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = true;
+
+ return 0;
}
static void activity_deactivate(struct led_classdev *led_cdev)
{
- struct activity_data *activity_data = led_cdev->trigger_data;
-
- if (led_cdev->activated) {
- del_timer_sync(&activity_data->timer);
- device_remove_file(led_cdev->dev, &dev_attr_invert);
- kfree(activity_data);
- clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = false;
- }
+ struct activity_data *activity_data = led_get_trigger_data(led_cdev);
+
+ del_timer_sync(&activity_data->timer);
+ kfree(activity_data);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
}
static struct led_trigger activity_led_trigger = {
.name = "activity",
.activate = activity_activate,
.deactivate = activity_deactivate,
+ .groups = activity_led_groups,
};
static int activity_reboot_notifier(struct notifier_block *nb,
@@ -272,4 +265,4 @@ module_exit(activity_exit);
MODULE_AUTHOR("Willy Tarreau <w@1wt.eu>");
MODULE_DESCRIPTION("Activity LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 1ca1f1608f76..c2b57beef718 100644
--- a/drivers/leds/trigger/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -64,8 +64,7 @@ static int fb_notifier_callback(struct notifier_block *p,
static ssize_t bl_trig_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct bl_trig_notifier *n = led->trigger_data;
+ struct bl_trig_notifier *n = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", n->invert);
}
@@ -73,8 +72,8 @@ static ssize_t bl_trig_invert_show(struct device *dev,
static ssize_t bl_trig_invert_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t num)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct bl_trig_notifier *n = led->trigger_data;
+ struct led_classdev *led = led_trigger_get_led(dev);
+ struct bl_trig_notifier *n = led_trigger_get_drvdata(dev);
unsigned long invert;
int ret;
@@ -97,22 +96,22 @@ static ssize_t bl_trig_invert_store(struct device *dev,
}
static DEVICE_ATTR(inverted, 0644, bl_trig_invert_show, bl_trig_invert_store);
-static void bl_trig_activate(struct led_classdev *led)
+static struct attribute *bl_trig_attrs[] = {
+ &dev_attr_inverted.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(bl_trig);
+
+static int bl_trig_activate(struct led_classdev *led)
{
int ret;
struct bl_trig_notifier *n;
n = kzalloc(sizeof(struct bl_trig_notifier), GFP_KERNEL);
- led->trigger_data = n;
- if (!n) {
- dev_err(led->dev, "unable to allocate backlight trigger\n");
- return;
- }
-
- ret = device_create_file(led->dev, &dev_attr_inverted);
- if (ret)
- goto err_invert;
+ if (!n)
+ return -ENOMEM;
+ led_set_trigger_data(led, n);
n->led = led;
n->brightness = led->brightness;
@@ -122,46 +121,25 @@ static void bl_trig_activate(struct led_classdev *led)
ret = fb_register_client(&n->notifier);
if (ret)
dev_err(led->dev, "unable to register backlight trigger\n");
- led->activated = true;
- return;
-
-err_invert:
- led->trigger_data = NULL;
- kfree(n);
+ return 0;
}
static void bl_trig_deactivate(struct led_classdev *led)
{
- struct bl_trig_notifier *n =
- (struct bl_trig_notifier *) led->trigger_data;
-
- if (led->activated) {
- device_remove_file(led->dev, &dev_attr_inverted);
- fb_unregister_client(&n->notifier);
- kfree(n);
- led->activated = false;
- }
+ struct bl_trig_notifier *n = led_get_trigger_data(led);
+
+ fb_unregister_client(&n->notifier);
+ kfree(n);
}
static struct led_trigger bl_led_trigger = {
.name = "backlight",
.activate = bl_trig_activate,
- .deactivate = bl_trig_deactivate
+ .deactivate = bl_trig_deactivate,
+ .groups = bl_trig_groups,
};
-
-static int __init bl_trig_init(void)
-{
- return led_trigger_register(&bl_led_trigger);
-}
-
-static void __exit bl_trig_exit(void)
-{
- led_trigger_unregister(&bl_led_trigger);
-}
-
-module_init(bl_trig_init);
-module_exit(bl_trig_exit);
+module_led_trigger(bl_led_trigger);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("Backlight emulation LED trigger");
diff --git a/drivers/leds/trigger/ledtrig-camera.c b/drivers/leds/trigger/ledtrig-camera.c
index 9bd73a8bad5c..091a09a20c58 100644
--- a/drivers/leds/trigger/ledtrig-camera.c
+++ b/drivers/leds/trigger/ledtrig-camera.c
@@ -10,7 +10,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -54,4 +53,4 @@ module_exit(ledtrig_camera_exit);
MODULE_DESCRIPTION("LED Trigger for Camera Flash/Torch Control");
MODULE_AUTHOR("Milo Kim");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-default-on.c b/drivers/leds/trigger/ledtrig-default-on.c
index ff455cb46680..7f6d9219711e 100644
--- a/drivers/leds/trigger/ledtrig-default-on.c
+++ b/drivers/leds/trigger/ledtrig-default-on.c
@@ -8,7 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -17,29 +16,18 @@
#include <linux/leds.h>
#include "../leds.h"
-static void defon_trig_activate(struct led_classdev *led_cdev)
+static int defon_trig_activate(struct led_classdev *led_cdev)
{
led_set_brightness_nosleep(led_cdev, led_cdev->max_brightness);
+ return 0;
}
static struct led_trigger defon_led_trigger = {
.name = "default-on",
.activate = defon_trig_activate,
};
-
-static int __init defon_trig_init(void)
-{
- return led_trigger_register(&defon_led_trigger);
-}
-
-static void __exit defon_trig_exit(void)
-{
- led_trigger_unregister(&defon_led_trigger);
-}
-
-module_init(defon_trig_init);
-module_exit(defon_trig_exit);
+module_led_trigger(defon_led_trigger);
MODULE_AUTHOR("Nick Forbes <nick.forbes@incepta.com>");
MODULE_DESCRIPTION("Default-ON LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index 8891e88d54dd..ed0db8ed825f 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -6,7 +6,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -29,7 +28,7 @@ struct gpio_trig_data {
static irqreturn_t gpio_trig_irq(int irq, void *_led)
{
struct led_classdev *led = _led;
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_get_trigger_data(led);
int tmp;
tmp = gpio_get_value_cansleep(gpio_data->gpio);
@@ -52,8 +51,7 @@ static irqreturn_t gpio_trig_irq(int irq, void *_led)
static ssize_t gpio_trig_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", gpio_data->desired_brightness);
}
@@ -61,8 +59,7 @@ static ssize_t gpio_trig_brightness_show(struct device *dev,
static ssize_t gpio_trig_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
unsigned desired_brightness;
int ret;
@@ -82,8 +79,7 @@ static DEVICE_ATTR(desired_brightness, 0644, gpio_trig_brightness_show,
static ssize_t gpio_trig_inverted_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", gpio_data->inverted);
}
@@ -91,8 +87,8 @@ static ssize_t gpio_trig_inverted_show(struct device *dev,
static ssize_t gpio_trig_inverted_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct led_classdev *led = led_trigger_get_led(dev);
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
unsigned long inverted;
int ret;
@@ -116,8 +112,7 @@ static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show,
static ssize_t gpio_trig_gpio_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", gpio_data->gpio);
}
@@ -125,8 +120,8 @@ static ssize_t gpio_trig_gpio_show(struct device *dev,
static ssize_t gpio_trig_gpio_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct led_classdev *led = dev_get_drvdata(dev);
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct led_classdev *led = led_trigger_get_led(dev);
+ struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
unsigned gpio;
int ret;
@@ -163,76 +158,45 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
}
static DEVICE_ATTR(gpio, 0644, gpio_trig_gpio_show, gpio_trig_gpio_store);
-static void gpio_trig_activate(struct led_classdev *led)
+static struct attribute *gpio_trig_attrs[] = {
+ &dev_attr_desired_brightness.attr,
+ &dev_attr_inverted.attr,
+ &dev_attr_gpio.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpio_trig);
+
+static int gpio_trig_activate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data;
- int ret;
gpio_data = kzalloc(sizeof(*gpio_data), GFP_KERNEL);
if (!gpio_data)
- return;
-
- ret = device_create_file(led->dev, &dev_attr_gpio);
- if (ret)
- goto err_gpio;
-
- ret = device_create_file(led->dev, &dev_attr_inverted);
- if (ret)
- goto err_inverted;
-
- ret = device_create_file(led->dev, &dev_attr_desired_brightness);
- if (ret)
- goto err_brightness;
+ return -ENOMEM;
gpio_data->led = led;
- led->trigger_data = gpio_data;
- led->activated = true;
-
- return;
-
-err_brightness:
- device_remove_file(led->dev, &dev_attr_inverted);
+ led_set_trigger_data(led, gpio_data);
-err_inverted:
- device_remove_file(led->dev, &dev_attr_gpio);
-
-err_gpio:
- kfree(gpio_data);
+ return 0;
}
static void gpio_trig_deactivate(struct led_classdev *led)
{
- struct gpio_trig_data *gpio_data = led->trigger_data;
+ struct gpio_trig_data *gpio_data = led_get_trigger_data(led);
- if (led->activated) {
- device_remove_file(led->dev, &dev_attr_gpio);
- device_remove_file(led->dev, &dev_attr_inverted);
- device_remove_file(led->dev, &dev_attr_desired_brightness);
- if (gpio_data->gpio != 0)
- free_irq(gpio_to_irq(gpio_data->gpio), led);
- kfree(gpio_data);
- led->activated = false;
- }
+ if (gpio_data->gpio != 0)
+ free_irq(gpio_to_irq(gpio_data->gpio), led);
+ kfree(gpio_data);
}
static struct led_trigger gpio_led_trigger = {
.name = "gpio",
.activate = gpio_trig_activate,
.deactivate = gpio_trig_deactivate,
+ .groups = gpio_trig_groups,
};
-
-static int __init gpio_trig_init(void)
-{
- return led_trigger_register(&gpio_led_trigger);
-}
-module_init(gpio_trig_init);
-
-static void __exit gpio_trig_exit(void)
-{
- led_trigger_unregister(&gpio_led_trigger);
-}
-module_exit(gpio_trig_exit);
+module_led_trigger(gpio_led_trigger);
MODULE_AUTHOR("Felipe Balbi <me@felipebalbi.com>");
MODULE_DESCRIPTION("GPIO LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index f0896de410b8..7a2b12e19329 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -9,8 +9,8 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -96,8 +96,8 @@ static void led_heartbeat_function(struct timer_list *t)
static ssize_t led_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+ struct heartbeat_trig_data *heartbeat_data =
+ led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", heartbeat_data->invert);
}
@@ -105,8 +105,8 @@ static ssize_t led_invert_show(struct device *dev,
static ssize_t led_invert_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+ struct heartbeat_trig_data *heartbeat_data =
+ led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
@@ -121,22 +121,22 @@ static ssize_t led_invert_store(struct device *dev,
static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
-static void heartbeat_trig_activate(struct led_classdev *led_cdev)
+static struct attribute *heartbeat_trig_attrs[] = {
+ &dev_attr_invert.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(heartbeat_trig);
+
+static int heartbeat_trig_activate(struct led_classdev *led_cdev)
{
struct heartbeat_trig_data *heartbeat_data;
- int rc;
heartbeat_data = kzalloc(sizeof(*heartbeat_data), GFP_KERNEL);
if (!heartbeat_data)
- return;
+ return -ENOMEM;
- led_cdev->trigger_data = heartbeat_data;
+ led_set_trigger_data(led_cdev, heartbeat_data);
heartbeat_data->led_cdev = led_cdev;
- rc = device_create_file(led_cdev->dev, &dev_attr_invert);
- if (rc) {
- kfree(led_cdev->trigger_data);
- return;
- }
timer_setup(&heartbeat_data->timer, led_heartbeat_function, 0);
heartbeat_data->phase = 0;
@@ -144,26 +144,25 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
led_cdev->blink_brightness = led_cdev->max_brightness;
led_heartbeat_function(&heartbeat_data->timer);
set_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = true;
+
+ return 0;
}
static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
{
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
-
- if (led_cdev->activated) {
- del_timer_sync(&heartbeat_data->timer);
- device_remove_file(led_cdev->dev, &dev_attr_invert);
- kfree(heartbeat_data);
- clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
- led_cdev->activated = false;
- }
+ struct heartbeat_trig_data *heartbeat_data =
+ led_get_trigger_data(led_cdev);
+
+ del_timer_sync(&heartbeat_data->timer);
+ kfree(heartbeat_data);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
}
static struct led_trigger heartbeat_led_trigger = {
.name = "heartbeat",
.activate = heartbeat_trig_activate,
.deactivate = heartbeat_trig_deactivate,
+ .groups = heartbeat_trig_groups,
};
static int heartbeat_reboot_notifier(struct notifier_block *nb,
@@ -213,4 +212,4 @@ module_exit(heartbeat_trig_exit);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Heartbeat LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 6df4781a6308..3dd3ed46d473 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -94,8 +94,7 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
static ssize_t device_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
ssize_t len;
spin_lock_bh(&trigger_data->lock);
@@ -109,8 +108,7 @@ static ssize_t device_name_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
if (size >= IFNAMSIZ)
return -EINVAL;
@@ -150,8 +148,7 @@ static DEVICE_ATTR_RW(device_name);
static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
enum netdev_led_attr attr)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
int bit;
switch (attr) {
@@ -174,8 +171,7 @@ static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
size_t size, enum netdev_led_attr attr)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
int bit;
@@ -255,8 +251,7 @@ static DEVICE_ATTR_RW(rx);
static ssize_t interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n",
jiffies_to_msecs(atomic_read(&trigger_data->interval)));
@@ -266,8 +261,7 @@ static ssize_t interval_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
unsigned long value;
int ret;
@@ -288,15 +282,23 @@ static ssize_t interval_store(struct device *dev,
static DEVICE_ATTR_RW(interval);
+static struct attribute *netdev_trig_attrs[] = {
+ &dev_attr_device_name.attr,
+ &dev_attr_link.attr,
+ &dev_attr_rx.attr,
+ &dev_attr_tx.attr,
+ &dev_attr_interval.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(netdev_trig);
+
static int netdev_trig_notify(struct notifier_block *nb,
unsigned long evt, void *dv)
{
struct net_device *dev =
netdev_notifier_info_to_dev((struct netdev_notifier_info *)dv);
- struct led_netdev_data *trigger_data = container_of(nb,
- struct
- led_netdev_data,
- notifier);
+ struct led_netdev_data *trigger_data =
+ container_of(nb, struct led_netdev_data, notifier);
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
&& evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
@@ -342,10 +344,8 @@ static int netdev_trig_notify(struct notifier_block *nb,
/* here's the real work! */
static void netdev_trig_work(struct work_struct *work)
{
- struct led_netdev_data *trigger_data = container_of(work,
- struct
- led_netdev_data,
- work.work);
+ struct led_netdev_data *trigger_data =
+ container_of(work, struct led_netdev_data, work.work);
struct rtnl_link_stats64 *dev_stats;
unsigned int new_activity;
struct rtnl_link_stats64 temp;
@@ -388,14 +388,14 @@ static void netdev_trig_work(struct work_struct *work)
(atomic_read(&trigger_data->interval)*2));
}
-static void netdev_trig_activate(struct led_classdev *led_cdev)
+static int netdev_trig_activate(struct led_classdev *led_cdev)
{
struct led_netdev_data *trigger_data;
int rc;
trigger_data = kzalloc(sizeof(struct led_netdev_data), GFP_KERNEL);
if (!trigger_data)
- return;
+ return -ENOMEM;
spin_lock_init(&trigger_data->lock);
@@ -412,69 +412,34 @@ static void netdev_trig_activate(struct led_classdev *led_cdev)
atomic_set(&trigger_data->interval, msecs_to_jiffies(50));
trigger_data->last_activity = 0;
- led_cdev->trigger_data = trigger_data;
+ led_set_trigger_data(led_cdev, trigger_data);
- rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
- if (rc)
- goto err_out;
- rc = device_create_file(led_cdev->dev, &dev_attr_link);
- if (rc)
- goto err_out_device_name;
- rc = device_create_file(led_cdev->dev, &dev_attr_rx);
- if (rc)
- goto err_out_link;
- rc = device_create_file(led_cdev->dev, &dev_attr_tx);
- if (rc)
- goto err_out_rx;
- rc = device_create_file(led_cdev->dev, &dev_attr_interval);
- if (rc)
- goto err_out_tx;
rc = register_netdevice_notifier(&trigger_data->notifier);
if (rc)
- goto err_out_interval;
- return;
-
-err_out_interval:
- device_remove_file(led_cdev->dev, &dev_attr_interval);
-err_out_tx:
- device_remove_file(led_cdev->dev, &dev_attr_tx);
-err_out_rx:
- device_remove_file(led_cdev->dev, &dev_attr_rx);
-err_out_link:
- device_remove_file(led_cdev->dev, &dev_attr_link);
-err_out_device_name:
- device_remove_file(led_cdev->dev, &dev_attr_device_name);
-err_out:
- led_cdev->trigger_data = NULL;
- kfree(trigger_data);
+ kfree(trigger_data);
+
+ return rc;
}
static void netdev_trig_deactivate(struct led_classdev *led_cdev)
{
- struct led_netdev_data *trigger_data = led_cdev->trigger_data;
-
- if (trigger_data) {
- unregister_netdevice_notifier(&trigger_data->notifier);
+ struct led_netdev_data *trigger_data = led_get_trigger_data(led_cdev);
- device_remove_file(led_cdev->dev, &dev_attr_device_name);
- device_remove_file(led_cdev->dev, &dev_attr_link);
- device_remove_file(led_cdev->dev, &dev_attr_rx);
- device_remove_file(led_cdev->dev, &dev_attr_tx);
- device_remove_file(led_cdev->dev, &dev_attr_interval);
+ unregister_netdevice_notifier(&trigger_data->notifier);
- cancel_delayed_work_sync(&trigger_data->work);
+ cancel_delayed_work_sync(&trigger_data->work);
- if (trigger_data->net_dev)
- dev_put(trigger_data->net_dev);
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
- kfree(trigger_data);
- }
+ kfree(trigger_data);
}
static struct led_trigger netdev_led_trigger = {
.name = "netdev",
.activate = netdev_trig_activate,
.deactivate = netdev_trig_deactivate,
+ .groups = netdev_trig_groups,
};
static int __init netdev_trig_init(void)
diff --git a/drivers/leds/trigger/ledtrig-oneshot.c b/drivers/leds/trigger/ledtrig-oneshot.c
index b8ea9f0f1e19..95c9be4b6e7e 100644
--- a/drivers/leds/trigger/ledtrig-oneshot.c
+++ b/drivers/leds/trigger/ledtrig-oneshot.c
@@ -8,7 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -29,8 +28,8 @@ struct oneshot_trig_data {
static ssize_t led_shot(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
+ struct oneshot_trig_data *oneshot_data = led_trigger_get_drvdata(dev);
led_blink_set_oneshot(led_cdev,
&led_cdev->blink_delay_on, &led_cdev->blink_delay_off,
@@ -42,8 +41,7 @@ static ssize_t led_shot(struct device *dev,
static ssize_t led_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+ struct oneshot_trig_data *oneshot_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%u\n", oneshot_data->invert);
}
@@ -51,8 +49,8 @@ static ssize_t led_invert_show(struct device *dev,
static ssize_t led_invert_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
+ struct oneshot_trig_data *oneshot_data = led_trigger_get_drvdata(dev);
unsigned long state;
int ret;
@@ -73,7 +71,7 @@ static ssize_t led_invert_store(struct device *dev,
static ssize_t led_delay_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
}
@@ -81,7 +79,7 @@ static ssize_t led_delay_on_show(struct device *dev,
static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
int ret;
@@ -93,10 +91,11 @@ static ssize_t led_delay_on_store(struct device *dev,
return size;
}
+
static ssize_t led_delay_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
}
@@ -104,7 +103,7 @@ static ssize_t led_delay_off_show(struct device *dev,
static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
int ret;
@@ -122,59 +121,36 @@ static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
static DEVICE_ATTR(shot, 0200, NULL, led_shot);
-static void oneshot_trig_activate(struct led_classdev *led_cdev)
+static struct attribute *oneshot_trig_attrs[] = {
+ &dev_attr_delay_on.attr,
+ &dev_attr_delay_off.attr,
+ &dev_attr_invert.attr,
+ &dev_attr_shot.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(oneshot_trig);
+
+static int oneshot_trig_activate(struct led_classdev *led_cdev)
{
struct oneshot_trig_data *oneshot_data;
- int rc;
oneshot_data = kzalloc(sizeof(*oneshot_data), GFP_KERNEL);
if (!oneshot_data)
- return;
-
- led_cdev->trigger_data = oneshot_data;
-
- rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
- if (rc)
- goto err_out_trig_data;
- rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
- if (rc)
- goto err_out_delayon;
- rc = device_create_file(led_cdev->dev, &dev_attr_invert);
- if (rc)
- goto err_out_delayoff;
- rc = device_create_file(led_cdev->dev, &dev_attr_shot);
- if (rc)
- goto err_out_invert;
+ return -ENOMEM;
+
+ led_set_trigger_data(led_cdev, oneshot_data);
led_cdev->blink_delay_on = DEFAULT_DELAY;
led_cdev->blink_delay_off = DEFAULT_DELAY;
- led_cdev->activated = true;
-
- return;
-
-err_out_invert:
- device_remove_file(led_cdev->dev, &dev_attr_invert);
-err_out_delayoff:
- device_remove_file(led_cdev->dev, &dev_attr_delay_off);
-err_out_delayon:
- device_remove_file(led_cdev->dev, &dev_attr_delay_on);
-err_out_trig_data:
- kfree(led_cdev->trigger_data);
+ return 0;
}
static void oneshot_trig_deactivate(struct led_classdev *led_cdev)
{
- struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+ struct oneshot_trig_data *oneshot_data = led_get_trigger_data(led_cdev);
- if (led_cdev->activated) {
- device_remove_file(led_cdev->dev, &dev_attr_delay_on);
- device_remove_file(led_cdev->dev, &dev_attr_delay_off);
- device_remove_file(led_cdev->dev, &dev_attr_invert);
- device_remove_file(led_cdev->dev, &dev_attr_shot);
- kfree(oneshot_data);
- led_cdev->activated = false;
- }
+ kfree(oneshot_data);
/* Stop blinking */
led_set_brightness(led_cdev, LED_OFF);
@@ -184,20 +160,9 @@ static struct led_trigger oneshot_led_trigger = {
.name = "oneshot",
.activate = oneshot_trig_activate,
.deactivate = oneshot_trig_deactivate,
+ .groups = oneshot_trig_groups,
};
-
-static int __init oneshot_trig_init(void)
-{
- return led_trigger_register(&oneshot_led_trigger);
-}
-
-static void __exit oneshot_trig_exit(void)
-{
- led_trigger_unregister(&oneshot_led_trigger);
-}
-
-module_init(oneshot_trig_init);
-module_exit(oneshot_trig_exit);
+module_led_trigger(oneshot_led_trigger);
MODULE_AUTHOR("Fabio Baltieri <fabio.baltieri@gmail.com>");
MODULE_DESCRIPTION("One-shot LED trigger");
diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index 8d09327b5719..7c14983781ee 100644
--- a/drivers/leds/trigger/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -8,7 +8,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
@@ -21,7 +20,7 @@
static ssize_t led_delay_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
}
@@ -29,7 +28,7 @@ static ssize_t led_delay_on_show(struct device *dev,
static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
ssize_t ret = -EINVAL;
@@ -46,7 +45,7 @@ static ssize_t led_delay_on_store(struct device *dev,
static ssize_t led_delay_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
}
@@ -54,7 +53,7 @@ static ssize_t led_delay_off_show(struct device *dev,
static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
ssize_t ret = -EINVAL;
@@ -71,37 +70,23 @@ static ssize_t led_delay_off_store(struct device *dev,
static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
-static void timer_trig_activate(struct led_classdev *led_cdev)
-{
- int rc;
-
- led_cdev->trigger_data = NULL;
-
- rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
- if (rc)
- return;
- rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
- if (rc)
- goto err_out_delayon;
+static struct attribute *timer_trig_attrs[] = {
+ &dev_attr_delay_on.attr,
+ &dev_attr_delay_off.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(timer_trig);
+static int timer_trig_activate(struct led_classdev *led_cdev)
+{
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
&led_cdev->blink_delay_off);
- led_cdev->activated = true;
- return;
-
-err_out_delayon:
- device_remove_file(led_cdev->dev, &dev_attr_delay_on);
+ return 0;
}
static void timer_trig_deactivate(struct led_classdev *led_cdev)
{
- if (led_cdev->activated) {
- device_remove_file(led_cdev->dev, &dev_attr_delay_on);
- device_remove_file(led_cdev->dev, &dev_attr_delay_off);
- led_cdev->activated = false;
- }
-
/* Stop blinking */
led_set_brightness(led_cdev, LED_OFF);
}
@@ -110,21 +95,10 @@ static struct led_trigger timer_led_trigger = {
.name = "timer",
.activate = timer_trig_activate,
.deactivate = timer_trig_deactivate,
+ .groups = timer_trig_groups,
};
-
-static int __init timer_trig_init(void)
-{
- return led_trigger_register(&timer_led_trigger);
-}
-
-static void __exit timer_trig_exit(void)
-{
- led_trigger_unregister(&timer_led_trigger);
-}
-
-module_init(timer_trig_init);
-module_exit(timer_trig_exit);
+module_led_trigger(timer_led_trigger);
MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
MODULE_DESCRIPTION("Timer LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index 9d1769073562..a80bb82aacc2 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -42,8 +42,8 @@ static void transient_timer_function(struct timer_list *t)
static ssize_t transient_activate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
return sprintf(buf, "%d\n", transient_data->activate);
}
@@ -51,8 +51,9 @@ static ssize_t transient_activate_show(struct device *dev,
static ssize_t transient_activate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct led_classdev *led_cdev = led_trigger_get_led(dev);
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
unsigned long state;
ssize_t ret;
@@ -94,8 +95,7 @@ static ssize_t transient_activate_store(struct device *dev,
static ssize_t transient_duration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data = led_trigger_get_drvdata(dev);
return sprintf(buf, "%lu\n", transient_data->duration);
}
@@ -103,8 +103,8 @@ static ssize_t transient_duration_show(struct device *dev,
static ssize_t transient_duration_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
unsigned long state;
ssize_t ret;
@@ -119,8 +119,8 @@ static ssize_t transient_duration_store(struct device *dev,
static ssize_t transient_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
int state;
state = (transient_data->state == LED_FULL) ? 1 : 0;
@@ -130,8 +130,8 @@ static ssize_t transient_state_show(struct device *dev,
static ssize_t transient_state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ led_trigger_get_drvdata(dev);
unsigned long state;
ssize_t ret;
@@ -152,82 +152,46 @@ static DEVICE_ATTR(duration, 0644, transient_duration_show,
transient_duration_store);
static DEVICE_ATTR(state, 0644, transient_state_show, transient_state_store);
-static void transient_trig_activate(struct led_classdev *led_cdev)
+static struct attribute *transient_trig_attrs[] = {
+ &dev_attr_activate.attr,
+ &dev_attr_duration.attr,
+ &dev_attr_state.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(transient_trig);
+
+static int transient_trig_activate(struct led_classdev *led_cdev)
{
- int rc;
struct transient_trig_data *tdata;
tdata = kzalloc(sizeof(struct transient_trig_data), GFP_KERNEL);
- if (!tdata) {
- dev_err(led_cdev->dev,
- "unable to allocate transient trigger\n");
- return;
- }
- led_cdev->trigger_data = tdata;
- tdata->led_cdev = led_cdev;
+ if (!tdata)
+ return -ENOMEM;
- rc = device_create_file(led_cdev->dev, &dev_attr_activate);
- if (rc)
- goto err_out;
-
- rc = device_create_file(led_cdev->dev, &dev_attr_duration);
- if (rc)
- goto err_out_duration;
-
- rc = device_create_file(led_cdev->dev, &dev_attr_state);
- if (rc)
- goto err_out_state;
+ led_set_trigger_data(led_cdev, tdata);
+ tdata->led_cdev = led_cdev;
timer_setup(&tdata->timer, transient_timer_function, 0);
- led_cdev->activated = true;
-
- return;
-
-err_out_state:
- device_remove_file(led_cdev->dev, &dev_attr_duration);
-err_out_duration:
- device_remove_file(led_cdev->dev, &dev_attr_activate);
-err_out:
- dev_err(led_cdev->dev, "unable to register transient trigger\n");
- led_cdev->trigger_data = NULL;
- kfree(tdata);
+
+ return 0;
}
static void transient_trig_deactivate(struct led_classdev *led_cdev)
{
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data = led_get_trigger_data(led_cdev);
- if (led_cdev->activated) {
- del_timer_sync(&transient_data->timer);
- led_set_brightness_nosleep(led_cdev,
- transient_data->restore_state);
- device_remove_file(led_cdev->dev, &dev_attr_activate);
- device_remove_file(led_cdev->dev, &dev_attr_duration);
- device_remove_file(led_cdev->dev, &dev_attr_state);
- led_cdev->trigger_data = NULL;
- led_cdev->activated = false;
- kfree(transient_data);
- }
+ del_timer_sync(&transient_data->timer);
+ led_set_brightness_nosleep(led_cdev, transient_data->restore_state);
+ kfree(transient_data);
}
static struct led_trigger transient_trigger = {
.name = "transient",
.activate = transient_trig_activate,
.deactivate = transient_trig_deactivate,
+ .groups = transient_trig_groups,
};
-
-static int __init transient_trig_init(void)
-{
- return led_trigger_register(&transient_trigger);
-}
-
-static void __exit transient_trig_exit(void)
-{
- led_trigger_unregister(&transient_trigger);
-}
-
-module_init(transient_trig_init);
-module_exit(transient_trig_exit);
+module_led_trigger(transient_trigger);
MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
MODULE_DESCRIPTION("Transient LED trigger");
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 9c03f35d9df1..439bf90d084d 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -17,23 +17,25 @@ menuconfig NVM
if NVM
-config NVM_DEBUG
- bool "Open-Channel SSD debugging support"
- default n
- ---help---
- Exposes a debug management interface to create/remove targets at:
+config NVM_PBLK
+ tristate "Physical Block Device Open-Channel SSD target"
+ help
+ Allows an open-channel SSD to be exposed as a block device to the
+ host. The target assumes the device exposes raw flash and must be
+ explicitly managed by the host.
- /sys/module/lnvm/parameters/configure_debug
+ Please note the disk format is considered EXPERIMENTAL for now.
- It is required to create/remove targets without IOCTLs.
+if NVM_PBLK
-config NVM_PBLK
- tristate "Physical Block Device Open-Channel SSD target"
- ---help---
- Allows an open-channel SSD to be exposed as a block device to the
- host. The target assumes the device exposes raw flash and must be
- explicitly managed by the host.
+config NVM_PBLK_DEBUG
+ bool "PBlk Debug Support"
+ default n
+ help
+ Enables debug support for pblk. This includes extra checks, more
+ vocal error messages, and extra tracking fields in the pblk sysfs
+ entries.
- Please note the disk format is considered EXPERIMENTAL for now.
+endif # NVM_PBLK_DEBUG
endif # NVM
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index b1c6d7eb6115..f565a56b898a 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -27,7 +27,8 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
int nr_entries = pblk_get_secs(bio);
int i, ret;
- generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);
+ generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio),
+ &pblk->disk->part0);
/* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to
@@ -67,7 +68,7 @@ retry:
atomic64_add(nr_entries, &pblk->user_wa);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_entries, &pblk->inflight_writes);
atomic_long_add(nr_entries, &pblk->req_writes);
#endif
@@ -75,7 +76,7 @@ retry:
pblk_rl_inserted(&pblk->rl, nr_entries);
out:
- generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
+ generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
return ret;
}
@@ -123,7 +124,7 @@ retry:
atomic64_add(valid_entries, &pblk->gc_wa);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(valid_entries, &pblk->inflight_writes);
atomic_long_add(valid_entries, &pblk->recov_gc_writes);
#endif
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index ed9cc977c8b3..00984b486fea 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -35,7 +35,7 @@ static void pblk_line_mark_bb(struct work_struct *work)
line = &pblk->lines[pblk_ppa_to_line(*ppa)];
pos = pblk_ppa_to_pos(&dev->geo, *ppa);
- pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
+ pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
line->id, pos);
}
@@ -51,12 +51,12 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
struct ppa_addr *ppa;
int pos = pblk_ppa_to_pos(geo, ppa_addr);
- pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
+ pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
atomic_long_inc(&pblk->erase_failed);
atomic_dec(&line->blk_in_line);
if (test_and_set_bit(pos, line->blk_bitmap))
- pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
+ pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
line->id, pos);
/* Not necessary to mark bad blocks on 2.0 spec. */
@@ -194,7 +194,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
u64 paddr;
int line_id;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a device address */
BUG_ON(pblk_addr_in_cache(ppa));
BUG_ON(pblk_ppa_empty(ppa));
@@ -264,6 +264,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
switch (type) {
case PBLK_WRITE:
kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
+ /* fall through */
case PBLK_WRITE_INT:
pool = &pblk->w_rq_pool;
break;
@@ -274,7 +275,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
pool = &pblk->e_rq_pool;
break;
default:
- pr_err("pblk: trying to free unknown rqd type\n");
+ pblk_err(pblk, "trying to free unknown rqd type\n");
return;
}
@@ -310,7 +311,7 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
if (ret != PBLK_EXPOSED_PAGE_SIZE) {
- pr_err("pblk: could not add page to bio\n");
+ pblk_err(pblk, "could not add page to bio\n");
mempool_free(page, &pblk->page_bio_pool);
goto err;
}
@@ -410,7 +411,7 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
line->state = PBLK_LINESTATE_CORRUPT;
line->gc_group = PBLK_LINEGC_NONE;
move_list = &l_mg->corrupt_list;
- pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
+ pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
line->id, vsc,
line->sec_in_line,
lm->high_thrs, lm->mid_thrs);
@@ -430,7 +431,7 @@ void pblk_discard(struct pblk *pblk, struct bio *bio)
void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
{
atomic_long_inc(&pblk->write_failed);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
pblk_print_failed_rqd(pblk, rqd, rqd->error);
#endif
}
@@ -452,9 +453,9 @@ void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
atomic_long_inc(&pblk->read_failed);
break;
default:
- pr_err("pblk: unknown read error:%d\n", rqd->error);
+ pblk_err(pblk, "unknown read error:%d\n", rqd->error);
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
pblk_print_failed_rqd(pblk, rqd, rqd->error);
#endif
}
@@ -470,7 +471,7 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
atomic_inc(&pblk->inflight_io);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
if (pblk_check_io(pblk, rqd))
return NVM_IO_ERR;
#endif
@@ -484,7 +485,7 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
atomic_inc(&pblk->inflight_io);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
if (pblk_check_io(pblk, rqd))
return NVM_IO_ERR;
#endif
@@ -517,7 +518,7 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
for (i = 0; i < nr_secs; i++) {
page = vmalloc_to_page(kaddr);
if (!page) {
- pr_err("pblk: could not map vmalloc bio\n");
+ pblk_err(pblk, "could not map vmalloc bio\n");
bio_put(bio);
bio = ERR_PTR(-ENOMEM);
goto out;
@@ -525,7 +526,7 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
if (ret != PAGE_SIZE) {
- pr_err("pblk: could not add page to bio\n");
+ pblk_err(pblk, "could not add page to bio\n");
bio_put(bio);
bio = ERR_PTR(-ENOMEM);
goto out;
@@ -711,7 +712,7 @@ next_rq:
while (test_bit(pos, line->blk_bitmap)) {
paddr += min;
if (pblk_boundary_paddr_checks(pblk, paddr)) {
- pr_err("pblk: corrupt emeta line:%d\n",
+ pblk_err(pblk, "corrupt emeta line:%d\n",
line->id);
bio_put(bio);
ret = -EINTR;
@@ -723,7 +724,7 @@ next_rq:
}
if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
- pr_err("pblk: corrupt emeta line:%d\n",
+ pblk_err(pblk, "corrupt emeta line:%d\n",
line->id);
bio_put(bio);
ret = -EINTR;
@@ -738,7 +739,7 @@ next_rq:
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
- pr_err("pblk: emeta I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
bio_put(bio);
goto free_rqd_dma;
}
@@ -843,7 +844,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
*/
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
- pr_err("pblk: smeta I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
bio_put(bio);
goto free_ppa_list;
}
@@ -905,7 +906,7 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- pr_err("pblk: could not sync erase line:%d,blk:%d\n",
+ pblk_err(pblk, "could not sync erase line:%d,blk:%d\n",
pblk_ppa_to_line(ppa),
pblk_ppa_to_pos(geo, ppa));
@@ -945,7 +946,7 @@ int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
ret = pblk_blk_erase_sync(pblk, ppa);
if (ret) {
- pr_err("pblk: failed to erase line %d\n", line->id);
+ pblk_err(pblk, "failed to erase line %d\n", line->id);
return ret;
}
} while (1);
@@ -1012,7 +1013,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
list_add_tail(&line->list, &l_mg->bad_list);
spin_unlock(&l_mg->free_lock);
- pr_debug("pblk: line %d is bad\n", line->id);
+ pblk_debug(pblk, "line %d is bad\n", line->id);
return 0;
}
@@ -1122,7 +1123,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
line->cur_sec = off + lm->smeta_sec;
if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
- pr_debug("pblk: line smeta I/O failed. Retry\n");
+ pblk_debug(pblk, "line smeta I/O failed. Retry\n");
return 0;
}
@@ -1154,7 +1155,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
spin_unlock(&line->lock);
list_add_tail(&line->list, &l_mg->bad_list);
- pr_err("pblk: unexpected line %d is bad\n", line->id);
+ pblk_err(pblk, "unexpected line %d is bad\n", line->id);
return 0;
}
@@ -1299,7 +1300,7 @@ struct pblk_line *pblk_line_get(struct pblk *pblk)
retry:
if (list_empty(&l_mg->free_list)) {
- pr_err("pblk: no free lines\n");
+ pblk_err(pblk, "no free lines\n");
return NULL;
}
@@ -1315,7 +1316,7 @@ retry:
list_add_tail(&line->list, &l_mg->bad_list);
- pr_debug("pblk: line %d is bad\n", line->id);
+ pblk_debug(pblk, "line %d is bad\n", line->id);
goto retry;
}
@@ -1329,7 +1330,7 @@ retry:
list_add(&line->list, &l_mg->corrupt_list);
goto retry;
default:
- pr_err("pblk: failed to prepare line %d\n", line->id);
+ pblk_err(pblk, "failed to prepare line %d\n", line->id);
list_add(&line->list, &l_mg->free_list);
l_mg->nr_free_lines++;
return NULL;
@@ -1477,7 +1478,7 @@ static void pblk_line_close_meta_sync(struct pblk *pblk)
ret = pblk_submit_meta_io(pblk, line);
if (ret) {
- pr_err("pblk: sync meta line %d failed (%d)\n",
+ pblk_err(pblk, "sync meta line %d failed (%d)\n",
line->id, ret);
return;
}
@@ -1507,7 +1508,7 @@ void __pblk_pipeline_flush(struct pblk *pblk)
ret = pblk_recov_pad(pblk);
if (ret) {
- pr_err("pblk: could not close data on teardown(%d)\n", ret);
+ pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
return;
}
@@ -1687,7 +1688,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- pr_err("pblk: could not async erase line:%d,blk:%d\n",
+ pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
pblk_ppa_to_line(ppa),
pblk_ppa_to_pos(geo, ppa));
}
@@ -1726,7 +1727,7 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
struct list_head *move_list;
int i;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
"pblk: corrupt closed line %d\n", line->id);
#endif
@@ -1856,7 +1857,7 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
* Only send one inflight I/O per LUN. Since we map at a page
* granurality, all ppas in the I/O will map to the same LUN
*/
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
int i;
for (i = 1; i < nr_ppas; i++)
@@ -1866,7 +1867,8 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
if (ret == -ETIME || ret == -EINTR)
- pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
+ pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
+ -ret);
}
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
@@ -1901,7 +1903,7 @@ void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
struct pblk_lun *rlun;
int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
int i;
for (i = 1; i < nr_ppas; i++)
@@ -1951,7 +1953,7 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
{
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a cache address */
BUG_ON(!pblk_addr_in_cache(ppa));
BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
@@ -1966,7 +1968,7 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
struct ppa_addr ppa_l2p, ppa_gc;
int ret = 1;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a cache address */
BUG_ON(!pblk_addr_in_cache(ppa_new));
BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
@@ -2003,14 +2005,14 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
{
struct ppa_addr ppa_l2p;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a device address */
BUG_ON(pblk_addr_in_cache(ppa_mapped));
#endif
/* Invalidate and discard padded entries */
if (lba == ADDR_EMPTY) {
atomic64_inc(&pblk->pad_wa);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->padded_wb);
#endif
if (!pblk_ppa_empty(ppa_mapped))
@@ -2036,7 +2038,7 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
goto out;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
#endif
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 080469d90b40..157c2567c9e8 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -90,7 +90,7 @@ static void pblk_gc_line_ws(struct work_struct *work)
gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
if (!gc_rq->data) {
- pr_err("pblk: could not GC line:%d (%d/%d)\n",
+ pblk_err(pblk, "could not GC line:%d (%d/%d)\n",
line->id, *line->vsc, gc_rq->nr_secs);
goto out;
}
@@ -98,7 +98,7 @@ static void pblk_gc_line_ws(struct work_struct *work)
/* Read from GC victim block */
ret = pblk_submit_read_gc(pblk, gc_rq);
if (ret) {
- pr_err("pblk: failed GC read in line:%d (err:%d)\n",
+ pblk_err(pblk, "failed GC read in line:%d (err:%d)\n",
line->id, ret);
goto out;
}
@@ -146,7 +146,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
ret = pblk_line_read_emeta(pblk, line, emeta_buf);
if (ret) {
- pr_err("pblk: line %d read emeta failed (%d)\n",
+ pblk_err(pblk, "line %d read emeta failed (%d)\n",
line->id, ret);
pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
return NULL;
@@ -160,7 +160,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
ret = pblk_recov_check_emeta(pblk, emeta_buf);
if (ret) {
- pr_err("pblk: inconsistent emeta (line %d)\n",
+ pblk_err(pblk, "inconsistent emeta (line %d)\n",
line->id);
pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
return NULL;
@@ -201,7 +201,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
} else {
lba_list = get_lba_list_from_emeta(pblk, line);
if (!lba_list) {
- pr_err("pblk: could not interpret emeta (line %d)\n",
+ pblk_err(pblk, "could not interpret emeta (line %d)\n",
line->id);
goto fail_free_invalid_bitmap;
}
@@ -213,7 +213,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
spin_unlock(&line->lock);
if (sec_left < 0) {
- pr_err("pblk: corrupted GC line (%d)\n", line->id);
+ pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
goto fail_free_lba_list;
}
@@ -289,7 +289,7 @@ fail_free_ws:
kref_put(&line->ref, pblk_line_put);
atomic_dec(&gc->read_inflight_gc);
- pr_err("pblk: Failed to GC line %d\n", line->id);
+ pblk_err(pblk, "failed to GC line %d\n", line->id);
}
static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
@@ -297,7 +297,7 @@ static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
struct pblk_gc *gc = &pblk->gc;
struct pblk_line_ws *line_ws;
- pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
+ pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
if (!line_ws)
@@ -351,7 +351,7 @@ static int pblk_gc_read(struct pblk *pblk)
pblk_gc_kick(pblk);
if (pblk_gc_line(pblk, line))
- pr_err("pblk: failed to GC line %d\n", line->id);
+ pblk_err(pblk, "failed to GC line %d\n", line->id);
return 0;
}
@@ -522,8 +522,8 @@ static int pblk_gc_reader_ts(void *data)
io_schedule();
}
-#ifdef CONFIG_NVM_DEBUG
- pr_info("pblk: flushing gc pipeline, %d lines left\n",
+#ifdef CONFIG_NVM_PBLK_DEBUG
+ pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
atomic_read(&gc->pipeline_gc));
#endif
@@ -540,7 +540,7 @@ static int pblk_gc_reader_ts(void *data)
static void pblk_gc_start(struct pblk *pblk)
{
pblk->gc.gc_active = 1;
- pr_debug("pblk: gc start\n");
+ pblk_debug(pblk, "gc start\n");
}
void pblk_gc_should_start(struct pblk *pblk)
@@ -605,14 +605,14 @@ int pblk_gc_init(struct pblk *pblk)
gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
if (IS_ERR(gc->gc_ts)) {
- pr_err("pblk: could not allocate GC main kthread\n");
+ pblk_err(pblk, "could not allocate GC main kthread\n");
return PTR_ERR(gc->gc_ts);
}
gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
"pblk-gc-writer-ts");
if (IS_ERR(gc->gc_writer_ts)) {
- pr_err("pblk: could not allocate GC writer kthread\n");
+ pblk_err(pblk, "could not allocate GC writer kthread\n");
ret = PTR_ERR(gc->gc_writer_ts);
goto fail_free_main_kthread;
}
@@ -620,7 +620,7 @@ int pblk_gc_init(struct pblk *pblk)
gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
"pblk-gc-reader-ts");
if (IS_ERR(gc->gc_reader_ts)) {
- pr_err("pblk: could not allocate GC reader kthread\n");
+ pblk_err(pblk, "could not allocate GC reader kthread\n");
ret = PTR_ERR(gc->gc_reader_ts);
goto fail_free_writer_kthread;
}
@@ -641,7 +641,7 @@ int pblk_gc_init(struct pblk *pblk)
gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
if (!gc->gc_line_reader_wq) {
- pr_err("pblk: could not allocate GC line reader workqueue\n");
+ pblk_err(pblk, "could not allocate GC line reader workqueue\n");
ret = -ENOMEM;
goto fail_free_reader_kthread;
}
@@ -650,7 +650,7 @@ int pblk_gc_init(struct pblk *pblk)
gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!gc->gc_reader_wq) {
- pr_err("pblk: could not allocate GC reader workqueue\n");
+ pblk_err(pblk, "could not allocate GC reader workqueue\n");
ret = -ENOMEM;
goto fail_free_reader_line_wq;
}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index b57f764d6a16..537e98f2b24a 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -91,7 +91,7 @@ static size_t pblk_trans_map_size(struct pblk *pblk)
return entry_size * pblk->rl.nr_secs;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
static u32 pblk_l2p_crc(struct pblk *pblk)
{
size_t map_size;
@@ -117,13 +117,13 @@ static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
} else {
line = pblk_recov_l2p(pblk);
if (IS_ERR(line)) {
- pr_err("pblk: could not recover l2p table\n");
+ pblk_err(pblk, "could not recover l2p table\n");
return -EFAULT;
}
}
-#ifdef CONFIG_NVM_DEBUG
- pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
+#ifdef CONFIG_NVM_PBLK_DEBUG
+ pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
#endif
/* Free full lines directly as GC has not been started yet */
@@ -166,7 +166,7 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
static void pblk_rwb_free(struct pblk *pblk)
{
if (pblk_rb_tear_down_check(&pblk->rwb))
- pr_err("pblk: write buffer error on tear down\n");
+ pblk_err(pblk, "write buffer error on tear down\n");
pblk_rb_data_free(&pblk->rwb);
vfree(pblk_rb_entries_ref(&pblk->rwb));
@@ -179,11 +179,14 @@ static int pblk_rwb_init(struct pblk *pblk)
struct pblk_rb_entry *entries;
unsigned long nr_entries, buffer_size;
unsigned int power_size, power_seg_sz;
+ int pgs_in_buffer;
- if (write_buffer_size && (write_buffer_size > pblk->pgs_in_buffer))
+ pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns;
+
+ if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
buffer_size = write_buffer_size;
else
- buffer_size = pblk->pgs_in_buffer;
+ buffer_size = pgs_in_buffer;
nr_entries = pblk_rb_calculate_size(buffer_size);
@@ -200,7 +203,8 @@ static int pblk_rwb_init(struct pblk *pblk)
/* Minimum pages needed within a lun */
#define ADDR_POOL_SIZE 64
-static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
+static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
+ struct nvm_addrf_12 *dst)
{
struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
int power_len;
@@ -208,14 +212,14 @@ static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
/* Re-calculate channel and lun format to adapt to configuration */
power_len = get_count_order(geo->num_ch);
if (1 << power_len != geo->num_ch) {
- pr_err("pblk: supports only power-of-two channel config.\n");
+ pblk_err(pblk, "supports only power-of-two channel config.\n");
return -EINVAL;
}
dst->ch_len = power_len;
power_len = get_count_order(geo->num_lun);
if (1 << power_len != geo->num_lun) {
- pr_err("pblk: supports only power-of-two LUN config.\n");
+ pblk_err(pblk, "supports only power-of-two LUN config.\n");
return -EINVAL;
}
dst->lun_len = power_len;
@@ -282,18 +286,19 @@ static int pblk_set_addrf(struct pblk *pblk)
case NVM_OCSSD_SPEC_12:
div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
if (mod) {
- pr_err("pblk: bad configuration of sectors/pages\n");
+ pblk_err(pblk, "bad configuration of sectors/pages\n");
return -EINVAL;
}
- pblk->addrf_len = pblk_set_addrf_12(geo, (void *)&pblk->addrf);
+ pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
+ (void *)&pblk->addrf);
break;
case NVM_OCSSD_SPEC_20:
pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
- &pblk->uaddrf);
+ &pblk->uaddrf);
break;
default:
- pr_err("pblk: OCSSD revision not supported (%d)\n",
+ pblk_err(pblk, "OCSSD revision not supported (%d)\n",
geo->version);
return -EINVAL;
}
@@ -366,15 +371,13 @@ static int pblk_core_init(struct pblk *pblk)
atomic64_set(&pblk->nr_flush, 0);
pblk->nr_flush_rst = 0;
- pblk->pgs_in_buffer = geo->mw_cunits * geo->all_luns;
-
pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
- pr_err("pblk: vector list too big(%u > %u)\n",
+ pblk_err(pblk, "vector list too big(%u > %u)\n",
pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS);
return -EINVAL;
}
@@ -607,7 +610,7 @@ static int pblk_luns_init(struct pblk *pblk)
/* TODO: Implement unbalanced LUN support */
if (geo->num_lun < 0) {
- pr_err("pblk: unbalanced LUN config.\n");
+ pblk_err(pblk, "unbalanced LUN config.\n");
return -EINVAL;
}
@@ -716,10 +719,11 @@ static int pblk_setup_line_meta_12(struct pblk *pblk, struct pblk_line *line,
/*
* In 1.2 spec. chunk state is not persisted by the device. Thus
- * some of the values are reset each time pblk is instantiated.
+ * some of the values are reset each time pblk is instantiated,
+ * so we have to assume that the block is closed.
*/
if (lun_bb_meta[line->id] == NVM_BLK_T_FREE)
- chunk->state = NVM_CHK_ST_FREE;
+ chunk->state = NVM_CHK_ST_CLOSED;
else
chunk->state = NVM_CHK_ST_OFFLINE;
@@ -1026,7 +1030,7 @@ add_emeta_page:
lm->emeta_sec[0], geo->clba);
if (lm->min_blk_line > lm->blk_per_line) {
- pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
+ pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
lm->blk_per_line);
return -EINVAL;
}
@@ -1078,7 +1082,7 @@ static int pblk_lines_init(struct pblk *pblk)
}
if (!nr_free_chks) {
- pr_err("pblk: too many bad blocks prevent for sane instance\n");
+ pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
return -EINTR;
}
@@ -1108,7 +1112,7 @@ static int pblk_writer_init(struct pblk *pblk)
int err = PTR_ERR(pblk->writer_ts);
if (err != -EINTR)
- pr_err("pblk: could not allocate writer kthread (%d)\n",
+ pblk_err(pblk, "could not allocate writer kthread (%d)\n",
err);
return err;
}
@@ -1154,7 +1158,7 @@ static void pblk_tear_down(struct pblk *pblk, bool graceful)
pblk_rb_sync_l2p(&pblk->rwb);
pblk_rl_free(&pblk->rl);
- pr_debug("pblk: consistent tear down (graceful:%d)\n", graceful);
+ pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
}
static void pblk_exit(void *private, bool graceful)
@@ -1165,8 +1169,8 @@ static void pblk_exit(void *private, bool graceful)
pblk_gc_exit(pblk, graceful);
pblk_tear_down(pblk, graceful);
-#ifdef CONFIG_NVM_DEBUG
- pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
+#ifdef CONFIG_NVM_PBLK_DEBUG
+ pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
#endif
pblk_free(pblk);
@@ -1189,34 +1193,35 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
struct pblk *pblk;
int ret;
- /* pblk supports 1.2 and 2.0 versions */
+ pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
+ if (!pblk)
+ return ERR_PTR(-ENOMEM);
+
+ pblk->dev = dev;
+ pblk->disk = tdisk;
+ pblk->state = PBLK_STATE_RUNNING;
+ pblk->gc.gc_enabled = 0;
+
if (!(geo->version == NVM_OCSSD_SPEC_12 ||
geo->version == NVM_OCSSD_SPEC_20)) {
- pr_err("pblk: OCSSD version not supported (%u)\n",
+ pblk_err(pblk, "OCSSD version not supported (%u)\n",
geo->version);
+ kfree(pblk);
return ERR_PTR(-EINVAL);
}
if (geo->version == NVM_OCSSD_SPEC_12 && geo->dom & NVM_RSP_L2P) {
- pr_err("pblk: host-side L2P table not supported. (%x)\n",
+ pblk_err(pblk, "host-side L2P table not supported. (%x)\n",
geo->dom);
+ kfree(pblk);
return ERR_PTR(-EINVAL);
}
- pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
- if (!pblk)
- return ERR_PTR(-ENOMEM);
-
- pblk->dev = dev;
- pblk->disk = tdisk;
- pblk->state = PBLK_STATE_RUNNING;
- pblk->gc.gc_enabled = 0;
-
spin_lock_init(&pblk->resubmit_lock);
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_set(&pblk->inflight_writes, 0);
atomic_long_set(&pblk->padded_writes, 0);
atomic_long_set(&pblk->padded_wb, 0);
@@ -1241,38 +1246,38 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
ret = pblk_core_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize core\n");
+ pblk_err(pblk, "could not initialize core\n");
goto fail;
}
ret = pblk_lines_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize lines\n");
+ pblk_err(pblk, "could not initialize lines\n");
goto fail_free_core;
}
ret = pblk_rwb_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize write buffer\n");
+ pblk_err(pblk, "could not initialize write buffer\n");
goto fail_free_lines;
}
ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
if (ret) {
- pr_err("pblk: could not initialize maps\n");
+ pblk_err(pblk, "could not initialize maps\n");
goto fail_free_rwb;
}
ret = pblk_writer_init(pblk);
if (ret) {
if (ret != -EINTR)
- pr_err("pblk: could not initialize write thread\n");
+ pblk_err(pblk, "could not initialize write thread\n");
goto fail_free_l2p;
}
ret = pblk_gc_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize gc\n");
+ pblk_err(pblk, "could not initialize gc\n");
goto fail_stop_writer;
}
@@ -1287,8 +1292,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
- pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
- tdisk->disk_name,
+ pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
geo->all_luns, pblk->l_mg.nr_lines,
(unsigned long long)pblk->rl.nr_secs,
pblk->rwb.nr_entries);
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 55e9442a99e2..f6eec0212dfc 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -111,7 +111,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
} while (iter > 0);
up_write(&pblk_rb_lock);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_set(&rb->inflight_flush_point, 0);
#endif
@@ -308,7 +308,7 @@ void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
entry = &rb->entries[ring_pos];
flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Caller must guarantee that the entry is free */
BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
#endif
@@ -332,7 +332,7 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
entry = &rb->entries[ring_pos];
flags = READ_ONCE(entry->w_ctx.flags);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Caller must guarantee that the entry is free */
BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
#endif
@@ -362,7 +362,7 @@ static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
return 0;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_inc(&rb->inflight_flush_point);
#endif
@@ -547,7 +547,7 @@ try:
page = virt_to_page(entry->data);
if (!page) {
- pr_err("pblk: could not allocate write bio page\n");
+ pblk_err(pblk, "could not allocate write bio page\n");
flags &= ~PBLK_WRITTEN_DATA;
flags |= PBLK_SUBMITTED_ENTRY;
/* Release flags on context. Protect from writes */
@@ -557,7 +557,7 @@ try:
if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
rb->seg_size) {
- pr_err("pblk: could not add page to write bio\n");
+ pblk_err(pblk, "could not add page to write bio\n");
flags &= ~PBLK_WRITTEN_DATA;
flags |= PBLK_SUBMITTED_ENTRY;
/* Release flags on context. Protect from writes */
@@ -576,19 +576,19 @@ try:
if (pad) {
if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
- pr_err("pblk: could not pad page in write bio\n");
+ pblk_err(pblk, "could not pad page in write bio\n");
return NVM_IO_ERR;
}
if (pad < pblk->min_write_pgs)
atomic64_inc(&pblk->pad_dist[pad - 1]);
else
- pr_warn("pblk: padding more than min. sectors\n");
+ pblk_warn(pblk, "padding more than min. sectors\n");
atomic64_add(pad, &pblk->pad_wa);
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(pad, &pblk->padded_writes);
#endif
@@ -613,7 +613,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
int ret = 1;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Caller must ensure that the access will not cause an overflow */
BUG_ON(pos >= rb->nr_entries);
#endif
@@ -820,7 +820,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->subm,
rb->sync,
rb->l2p_update,
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_read(&rb->inflight_flush_point),
#else
0,
@@ -838,7 +838,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->subm,
rb->sync,
rb->l2p_update,
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_read(&rb->inflight_flush_point),
#else
0,
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 18694694e5f0..5a46d7f9302f 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -28,7 +28,7 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
sector_t lba, struct ppa_addr ppa,
int bio_iter, bool advanced_bio)
{
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a cache address */
BUG_ON(pblk_ppa_empty(ppa));
BUG_ON(!pblk_addr_in_cache(ppa));
@@ -79,7 +79,7 @@ retry:
WARN_ON(test_and_set_bit(i, read_bitmap));
meta_list[i].lba = cpu_to_le64(lba);
advanced_bio = true;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
} else {
@@ -97,7 +97,7 @@ next:
else
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads);
#endif
}
@@ -117,13 +117,13 @@ static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
continue;
if (lba != blba + i) {
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
struct ppa_addr *p;
p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
- print_ppa(&pblk->dev->geo, p, "seq", i);
+ print_ppa(pblk, p, "seq", i);
#endif
- pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
+ pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
lba, (u64)blba + i);
WARN_ON(1);
}
@@ -149,14 +149,14 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
meta_lba = le64_to_cpu(meta_lba_list[j].lba);
if (lba != meta_lba) {
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
struct ppa_addr *p;
int nr_ppas = rqd->nr_ppas;
p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
- print_ppa(&pblk->dev->geo, p, "seq", j);
+ print_ppa(pblk, p, "seq", j);
#endif
- pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
+ pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
lba, meta_lba);
WARN_ON(1);
}
@@ -185,7 +185,7 @@ static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
static void pblk_end_user_read(struct bio *bio)
{
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
#endif
bio_endio(bio);
@@ -199,7 +199,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
struct bio *int_bio = rqd->bio;
unsigned long start_time = r_ctx->start_time;
- generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
+ generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
@@ -212,7 +212,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
if (put_line)
pblk_read_put_rqd_kref(pblk, rqd);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
#endif
@@ -231,74 +231,36 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
__pblk_end_io_read(pblk, rqd, true);
}
-static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
- struct bio *orig_bio, unsigned int bio_init_idx,
- unsigned long *read_bitmap)
+static void pblk_end_partial_read(struct nvm_rq *rqd)
{
- struct pblk_sec_meta *meta_list = rqd->meta_list;
- struct bio *new_bio;
+ struct pblk *pblk = rqd->private;
+ struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
+ struct pblk_pr_ctx *pr_ctx = r_ctx->private;
+ struct bio *new_bio = rqd->bio;
+ struct bio *bio = pr_ctx->orig_bio;
struct bio_vec src_bv, dst_bv;
- void *ppa_ptr = NULL;
- void *src_p, *dst_p;
- dma_addr_t dma_ppa_list = 0;
- __le64 *lba_list_mem, *lba_list_media;
- int nr_secs = rqd->nr_ppas;
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
+ int bio_init_idx = pr_ctx->bio_init_idx;
+ unsigned long *read_bitmap = pr_ctx->bitmap;
+ int nr_secs = pr_ctx->orig_nr_secs;
int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
- int i, ret, hole;
-
- /* Re-use allocated memory for intermediate lbas */
- lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
- lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
-
- new_bio = bio_alloc(GFP_KERNEL, nr_holes);
-
- if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
- goto fail_add_pages;
-
- if (nr_holes != new_bio->bi_vcnt) {
- pr_err("pblk: malformed bio\n");
- goto fail;
- }
-
- for (i = 0; i < nr_secs; i++)
- lba_list_mem[i] = meta_list[i].lba;
-
- new_bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
-
- rqd->bio = new_bio;
- rqd->nr_ppas = nr_holes;
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
-
- if (unlikely(nr_holes == 1)) {
- ppa_ptr = rqd->ppa_list;
- dma_ppa_list = rqd->dma_ppa_list;
- rqd->ppa_addr = rqd->ppa_list[0];
- }
-
- ret = pblk_submit_io_sync(pblk, rqd);
- if (ret) {
- bio_put(rqd->bio);
- pr_err("pblk: sync read IO submission failed\n");
- goto fail;
- }
-
- if (rqd->error) {
- atomic_long_inc(&pblk->read_failed);
-#ifdef CONFIG_NVM_DEBUG
- pblk_print_failed_rqd(pblk, rqd, rqd->error);
-#endif
- }
+ __le64 *lba_list_mem, *lba_list_media;
+ void *src_p, *dst_p;
+ int hole, i;
if (unlikely(nr_holes == 1)) {
struct ppa_addr ppa;
ppa = rqd->ppa_addr;
- rqd->ppa_list = ppa_ptr;
- rqd->dma_ppa_list = dma_ppa_list;
+ rqd->ppa_list = pr_ctx->ppa_ptr;
+ rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
rqd->ppa_list[0] = ppa;
}
+ /* Re-use allocated memory for intermediate lbas */
+ lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
+ lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
+
for (i = 0; i < nr_secs; i++) {
lba_list_media[i] = meta_list[i].lba;
meta_list[i].lba = lba_list_mem[i];
@@ -316,7 +278,7 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
meta_list[hole].lba = lba_list_media[i];
src_bv = new_bio->bi_io_vec[i++];
- dst_bv = orig_bio->bi_io_vec[bio_init_idx + hole];
+ dst_bv = bio->bi_io_vec[bio_init_idx + hole];
src_p = kmap_atomic(src_bv.bv_page);
dst_p = kmap_atomic(dst_bv.bv_page);
@@ -334,19 +296,107 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
} while (hole < nr_secs);
bio_put(new_bio);
+ kfree(pr_ctx);
/* restore original request */
rqd->bio = NULL;
rqd->nr_ppas = nr_secs;
+ bio_endio(bio);
__pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_DONE;
+}
-fail:
- /* Free allocated pages in new bio */
+static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
+ unsigned int bio_init_idx,
+ unsigned long *read_bitmap,
+ int nr_holes)
+{
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
+ struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
+ struct pblk_pr_ctx *pr_ctx;
+ struct bio *new_bio, *bio = r_ctx->private;
+ __le64 *lba_list_mem;
+ int nr_secs = rqd->nr_ppas;
+ int i;
+
+ /* Re-use allocated memory for intermediate lbas */
+ lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
+
+ new_bio = bio_alloc(GFP_KERNEL, nr_holes);
+
+ if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
+ goto fail_bio_put;
+
+ if (nr_holes != new_bio->bi_vcnt) {
+ WARN_ONCE(1, "pblk: malformed bio\n");
+ goto fail_free_pages;
+ }
+
+ pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
+ if (!pr_ctx)
+ goto fail_free_pages;
+
+ for (i = 0; i < nr_secs; i++)
+ lba_list_mem[i] = meta_list[i].lba;
+
+ new_bio->bi_iter.bi_sector = 0; /* internal bio */
+ bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
+
+ rqd->bio = new_bio;
+ rqd->nr_ppas = nr_holes;
+ rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
+ pr_ctx->ppa_ptr = NULL;
+ pr_ctx->orig_bio = bio;
+ bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
+ pr_ctx->bio_init_idx = bio_init_idx;
+ pr_ctx->orig_nr_secs = nr_secs;
+ r_ctx->private = pr_ctx;
+
+ if (unlikely(nr_holes == 1)) {
+ pr_ctx->ppa_ptr = rqd->ppa_list;
+ pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
+ rqd->ppa_addr = rqd->ppa_list[0];
+ }
+ return 0;
+
+fail_free_pages:
pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
-fail_add_pages:
- pr_err("pblk: failed to perform partial read\n");
+fail_bio_put:
+ bio_put(new_bio);
+
+ return -ENOMEM;
+}
+
+static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
+ unsigned int bio_init_idx,
+ unsigned long *read_bitmap, int nr_secs)
+{
+ int nr_holes;
+ int ret;
+
+ nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
+
+ if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
+ nr_holes))
+ return NVM_IO_ERR;
+
+ rqd->end_io = pblk_end_partial_read;
+
+ ret = pblk_submit_io(pblk, rqd);
+ if (ret) {
+ bio_put(rqd->bio);
+ pblk_err(pblk, "partial read IO submission failed\n");
+ goto err;
+ }
+
+ return NVM_IO_OK;
+
+err:
+ pblk_err(pblk, "failed to perform partial read\n");
+
+ /* Free allocated pages in new bio */
+ pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
__pblk_end_io_read(pblk, rqd, false);
return NVM_IO_ERR;
}
@@ -359,7 +409,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->inflight_reads);
#endif
@@ -382,7 +432,7 @@ retry:
WARN_ON(test_and_set_bit(0, read_bitmap));
meta_list[0].lba = cpu_to_le64(lba);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
} else {
@@ -401,7 +451,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
unsigned int bio_init_idx;
- unsigned long read_bitmap; /* Max 64 ppas per request */
+ DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
int ret = NVM_IO_ERR;
/* logic error: lba out-of-bounds. Ignore read request */
@@ -411,9 +461,10 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_ERR;
}
- generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
+ generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
+ &pblk->disk->part0);
- bitmap_zero(&read_bitmap, nr_secs);
+ bitmap_zero(read_bitmap, nr_secs);
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
@@ -436,7 +487,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd->dma_meta_list);
if (!rqd->meta_list) {
- pr_err("pblk: not able to allocate ppa list\n");
+ pblk_err(pblk, "not able to allocate ppa list\n");
goto fail_rqd_free;
}
@@ -444,32 +495,32 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
- pblk_read_ppalist_rq(pblk, rqd, bio, blba, &read_bitmap);
+ pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
} else {
- pblk_read_rq(pblk, rqd, bio, blba, &read_bitmap);
+ pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
}
- if (bitmap_full(&read_bitmap, nr_secs)) {
+ if (bitmap_full(read_bitmap, nr_secs)) {
atomic_inc(&pblk->inflight_io);
__pblk_end_io_read(pblk, rqd, false);
return NVM_IO_DONE;
}
/* All sectors are to be read from the device */
- if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
+ if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
struct bio *int_bio = NULL;
/* Clone read bio to deal with read errors internally */
int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
if (!int_bio) {
- pr_err("pblk: could not clone read bio\n");
+ pblk_err(pblk, "could not clone read bio\n");
goto fail_end_io;
}
rqd->bio = int_bio;
if (pblk_submit_io(pblk, rqd)) {
- pr_err("pblk: read IO submission failed\n");
+ pblk_err(pblk, "read IO submission failed\n");
ret = NVM_IO_ERR;
goto fail_end_io;
}
@@ -480,8 +531,15 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
- return pblk_partial_read(pblk, rqd, bio, bio_init_idx, &read_bitmap);
+ ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
+ nr_secs);
+ if (ret)
+ goto fail_meta_free;
+
+ return NVM_IO_OK;
+fail_meta_free:
+ nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
fail_rqd_free:
pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
@@ -514,7 +572,7 @@ static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(valid_secs, &pblk->inflight_reads);
#endif
@@ -548,7 +606,7 @@ static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
rqd->ppa_addr = ppa_l2p;
valid_secs = 1;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->inflight_reads);
#endif
@@ -595,7 +653,8 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
- pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
+ pblk_err(pblk, "could not allocate GC bio (%lu)\n",
+ PTR_ERR(bio));
goto err_free_dma;
}
@@ -609,7 +668,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
if (pblk_submit_io_sync(pblk, &rqd)) {
ret = -EIO;
- pr_err("pblk: GC read request failed\n");
+ pblk_err(pblk, "GC read request failed\n");
goto err_free_bio;
}
@@ -619,12 +678,12 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
if (rqd.error) {
atomic_long_inc(&pblk->read_failed_gc);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
pblk_print_failed_rqd(pblk, &rqd, rqd.error);
#endif
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index 3a5069183859..e232e47e1353 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -77,7 +77,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
}
if (nr_valid_lbas != nr_lbas)
- pr_err("pblk: line %d - inconsistent lba list(%llu/%llu)\n",
+ pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
line->id, nr_valid_lbas, nr_lbas);
line->left_msecs = 0;
@@ -184,7 +184,7 @@ next_read_rq:
/* If read fails, more padding is needed */
ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
- pr_err("pblk: I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "I/O submission failed: %d\n", ret);
return ret;
}
@@ -194,7 +194,7 @@ next_read_rq:
* we cannot recover from here. Need FTL log.
*/
if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
- pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
+ pblk_err(pblk, "L2P recovery failed (%d)\n", rqd->error);
return -EINTR;
}
@@ -273,7 +273,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
next_pad_rq:
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
if (rq_ppas < pblk->min_write_pgs) {
- pr_err("pblk: corrupted pad line %d\n", line->id);
+ pblk_err(pblk, "corrupted pad line %d\n", line->id);
goto fail_free_pad;
}
@@ -342,7 +342,7 @@ next_pad_rq:
ret = pblk_submit_io(pblk, rqd);
if (ret) {
- pr_err("pblk: I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "I/O submission failed: %d\n", ret);
pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
goto fail_free_bio;
}
@@ -356,12 +356,12 @@ next_pad_rq:
if (!wait_for_completion_io_timeout(&pad_rq->wait,
msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: pad write timed out\n");
+ pblk_err(pblk, "pad write timed out\n");
ret = -ETIME;
}
if (!pblk_line_is_full(line))
- pr_err("pblk: corrupted padded line: %d\n", line->id);
+ pblk_err(pblk, "corrupted padded line: %d\n", line->id);
vfree(data);
free_rq:
@@ -461,7 +461,7 @@ next_rq:
ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
- pr_err("pblk: I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "I/O submission failed: %d\n", ret);
return ret;
}
@@ -501,11 +501,11 @@ next_rq:
ret = pblk_recov_pad_oob(pblk, line, pad_secs);
if (ret)
- pr_err("pblk: OOB padding failed (err:%d)\n", ret);
+ pblk_err(pblk, "OOB padding failed (err:%d)\n", ret);
ret = pblk_recov_read_oob(pblk, line, p, r_ptr);
if (ret)
- pr_err("pblk: OOB read failed (err:%d)\n", ret);
+ pblk_err(pblk, "OOB read failed (err:%d)\n", ret);
left_ppas = 0;
}
@@ -592,7 +592,7 @@ next_rq:
ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
- pr_err("pblk: I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "I/O submission failed: %d\n", ret);
bio_put(bio);
return ret;
}
@@ -671,14 +671,14 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
ret = pblk_recov_scan_oob(pblk, line, p, &done);
if (ret) {
- pr_err("pblk: could not recover L2P from OOB\n");
+ pblk_err(pblk, "could not recover L2P from OOB\n");
goto out;
}
if (!done) {
ret = pblk_recov_scan_all_oob(pblk, line, p);
if (ret) {
- pr_err("pblk: could not recover L2P from OOB\n");
+ pblk_err(pblk, "could not recover L2P from OOB\n");
goto out;
}
}
@@ -737,14 +737,15 @@ static int pblk_recov_check_line_version(struct pblk *pblk,
struct line_header *header = &emeta->header;
if (header->version_major != EMETA_VERSION_MAJOR) {
- pr_err("pblk: line major version mismatch: %d, expected: %d\n",
- header->version_major, EMETA_VERSION_MAJOR);
+ pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
+ header->version_major, EMETA_VERSION_MAJOR);
return 1;
}
-#ifdef NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
if (header->version_minor > EMETA_VERSION_MINOR)
- pr_info("pblk: newer line minor version found: %d\n", line_v);
+ pblk_info(pblk, "newer line minor version found: %d\n",
+ header->version_minor);
#endif
return 0;
@@ -851,7 +852,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
continue;
if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
- pr_err("pblk: found incompatible line version %u\n",
+ pblk_err(pblk, "found incompatible line version %u\n",
smeta_buf->header.version_major);
return ERR_PTR(-EINVAL);
}
@@ -863,7 +864,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
}
if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
- pr_debug("pblk: ignore line %u due to uuid mismatch\n",
+ pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
i);
continue;
}
@@ -887,7 +888,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
pblk_recov_line_add_ordered(&recov_list, line);
found_lines++;
- pr_debug("pblk: recovering data line %d, seq:%llu\n",
+ pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
line->id, smeta_buf->seq_nr);
}
@@ -947,7 +948,7 @@ next:
line->emeta = NULL;
} else {
if (open_lines > 1)
- pr_err("pblk: failed to recover L2P\n");
+ pblk_err(pblk, "failed to recover L2P\n");
open_lines++;
line->meta_line = meta_line;
@@ -976,7 +977,7 @@ next:
out:
if (found_lines != recovered_lines)
- pr_err("pblk: failed to recover all found lines %d/%d\n",
+ pblk_err(pblk, "failed to recover all found lines %d/%d\n",
found_lines, recovered_lines);
return data_line;
@@ -999,7 +1000,7 @@ int pblk_recov_pad(struct pblk *pblk)
ret = pblk_recov_pad_oob(pblk, line, left_msecs);
if (ret) {
- pr_err("pblk: Tear down padding failed (%d)\n", ret);
+ pblk_err(pblk, "tear down padding failed (%d)\n", ret);
return ret;
}
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index 88a0a7c407aa..9fc3dfa168b4 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -268,7 +268,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
spin_unlock(&l_mg->free_lock);
if (nr_free_lines != free_line_cnt)
- pr_err("pblk: corrupted free line list:%d/%d\n",
+ pblk_err(pblk, "corrupted free line list:%d/%d\n",
nr_free_lines, free_line_cnt);
sz = snprintf(page, PAGE_SIZE - sz,
@@ -421,7 +421,7 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
return sz;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page)
{
return snprintf(page, PAGE_SIZE,
@@ -598,7 +598,7 @@ static struct attribute sys_padding_dist = {
.mode = 0644,
};
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
static struct attribute sys_stats_debug_attr = {
.name = "stats",
.mode = 0444,
@@ -619,7 +619,7 @@ static struct attribute *pblk_attrs[] = {
&sys_write_amp_mileage,
&sys_write_amp_trip,
&sys_padding_dist,
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
&sys_stats_debug_attr,
#endif
NULL,
@@ -654,7 +654,7 @@ static ssize_t pblk_sysfs_show(struct kobject *kobj, struct attribute *attr,
return pblk_sysfs_get_write_amp_trip(pblk, buf);
else if (strcmp(attr->name, "padding_dist") == 0)
return pblk_sysfs_get_padding_dist(pblk, buf);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
else if (strcmp(attr->name, "stats") == 0)
return pblk_sysfs_stats_debug(pblk, buf);
#endif
@@ -697,8 +697,7 @@ int pblk_sysfs_init(struct gendisk *tdisk)
kobject_get(&parent_dev->kobj),
"%s", "pblk");
if (ret) {
- pr_err("pblk: could not register %s/pblk\n",
- tdisk->disk_name);
+ pblk_err(pblk, "could not register\n");
return ret;
}
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index f353e52941f5..ee774a86cf1e 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -38,7 +38,7 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
/* Release flags on context. Protect from writes */
smp_store_release(&w_ctx->flags, flags);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_dec(&rwb->inflight_flush_point);
#endif
}
@@ -51,7 +51,7 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
c_ctx->nr_padded);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
#endif
@@ -78,7 +78,7 @@ static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
unsigned long flags;
unsigned long pos;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
#endif
@@ -196,7 +196,7 @@ static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
list_add_tail(&r_ctx->list, &pblk->resubmit_list);
spin_unlock(&pblk->resubmit_lock);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
#endif
}
@@ -238,7 +238,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
if (!recovery) {
- pr_err("pblk: could not allocate recovery work\n");
+ pblk_err(pblk, "could not allocate recovery work\n");
return;
}
@@ -258,7 +258,7 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
pblk_end_w_fail(pblk, rqd);
return;
}
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
else
WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
#endif
@@ -279,7 +279,7 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
if (rqd->error) {
pblk_log_write_err(pblk, rqd);
- pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
+ pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
line->w_err_gc->has_write_err = 1;
}
@@ -356,11 +356,11 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
if ((!secs_to_sync && secs_to_flush)
|| (secs_to_sync < 0)
|| (secs_to_sync > secs_avail && !secs_to_flush)) {
- pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
+ pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
secs_avail, secs_to_sync, secs_to_flush);
}
#endif
@@ -397,7 +397,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
l_mg->emeta_alloc_type, GFP_KERNEL);
if (IS_ERR(bio)) {
- pr_err("pblk: failed to map emeta io");
+ pblk_err(pblk, "failed to map emeta io");
ret = PTR_ERR(bio);
goto fail_free_rqd;
}
@@ -428,7 +428,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
ret = pblk_submit_io(pblk, rqd);
if (ret) {
- pr_err("pblk: emeta I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
goto fail_rollback;
}
@@ -518,7 +518,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
/* Assign lbas to ppas and populate request structure */
err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
if (err) {
- pr_err("pblk: could not setup write request: %d\n", err);
+ pblk_err(pblk, "could not setup write request: %d\n", err);
return NVM_IO_ERR;
}
@@ -527,7 +527,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
/* Submit data write for current data line */
err = pblk_submit_io(pblk, rqd);
if (err) {
- pr_err("pblk: data I/O submission failed: %d\n", err);
+ pblk_err(pblk, "data I/O submission failed: %d\n", err);
return NVM_IO_ERR;
}
@@ -549,7 +549,8 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
/* Submit metadata write for previous data line */
err = pblk_submit_meta_io(pblk, meta_line);
if (err) {
- pr_err("pblk: metadata I/O submission failed: %d", err);
+ pblk_err(pblk, "metadata I/O submission failed: %d",
+ err);
return NVM_IO_ERR;
}
}
@@ -614,7 +615,7 @@ static int pblk_submit_write(struct pblk *pblk)
secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
secs_to_flush);
if (secs_to_sync > pblk->max_write_pgs) {
- pr_err("pblk: bad buffer sync calculation\n");
+ pblk_err(pblk, "bad buffer sync calculation\n");
return 1;
}
@@ -633,14 +634,14 @@ static int pblk_submit_write(struct pblk *pblk)
if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
secs_avail)) {
- pr_err("pblk: corrupted write bio\n");
+ pblk_err(pblk, "corrupted write bio\n");
goto fail_put_bio;
}
if (pblk_submit_io_set(pblk, rqd))
goto fail_free_bio;
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(secs_to_sync, &pblk->sub_writes);
#endif
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 34cc1d64a9d4..4760af7b6499 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -119,6 +119,16 @@ struct pblk_g_ctx {
u64 lba;
};
+/* partial read context */
+struct pblk_pr_ctx {
+ struct bio *orig_bio;
+ DECLARE_BITMAP(bitmap, NVM_MAX_VLBA);
+ unsigned int orig_nr_secs;
+ unsigned int bio_init_idx;
+ void *ppa_ptr;
+ dma_addr_t dma_ppa_list;
+};
+
/* Pad context */
struct pblk_pad_rq {
struct pblk *pblk;
@@ -193,7 +203,7 @@ struct pblk_rb {
spinlock_t w_lock; /* Write lock */
spinlock_t s_lock; /* Sync lock */
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
#endif
};
@@ -608,9 +618,6 @@ struct pblk {
int min_write_pgs; /* Minimum amount of pages required by controller */
int max_write_pgs; /* Maximum amount of pages supported by controller */
- int pgs_in_buffer; /* Number of pages that need to be held in buffer to
- * guarantee successful reads.
- */
sector_t capacity; /* Device capacity when bad blocks are subtracted */
@@ -639,7 +646,7 @@ struct pblk {
u64 nr_flush_rst; /* Flushes reset value for pad dist.*/
atomic64_t nr_flush; /* Number of flush/fua I/O */
-#ifdef CONFIG_NVM_DEBUG
+#ifdef CONFIG_NVM_PBLK_DEBUG
/* Non-persistent debug counters, 4kb sector I/Os */
atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
@@ -706,6 +713,15 @@ struct pblk_line_ws {
#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
+#define pblk_err(pblk, fmt, ...) \
+ pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
+#define pblk_info(pblk, fmt, ...) \
+ pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
+#define pblk_warn(pblk, fmt, ...) \
+ pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
+#define pblk_debug(pblk, fmt, ...) \
+ pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
+
/*
* pblk ring buffer operations
*/
@@ -1282,20 +1298,22 @@ static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
return !(nr_secs % pblk->min_write_pgs);
}
-#ifdef CONFIG_NVM_DEBUG
-static inline void print_ppa(struct nvm_geo *geo, struct ppa_addr *p,
+#ifdef CONFIG_NVM_PBLK_DEBUG
+static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
char *msg, int error)
{
+ struct nvm_geo *geo = &pblk->dev->geo;
+
if (p->c.is_cached) {
- pr_err("ppa: (%s: %x) cache line: %llu\n",
+ pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n",
msg, error, (u64)p->c.line);
} else if (geo->version == NVM_OCSSD_SPEC_12) {
- pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
+ pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
msg, error,
p->g.ch, p->g.lun, p->g.blk,
p->g.pg, p->g.pl, p->g.sec);
} else {
- pr_err("ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
+ pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
msg, error,
p->m.grp, p->m.pu, p->m.chk, p->m.sec);
}
@@ -1307,16 +1325,16 @@ static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
int bit = -1;
if (rqd->nr_ppas == 1) {
- print_ppa(&pblk->dev->geo, &rqd->ppa_addr, "rqd", error);
+ print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
return;
}
while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
bit + 1)) < rqd->nr_ppas) {
- print_ppa(&pblk->dev->geo, &rqd->ppa_list[bit], "rqd", error);
+ print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
}
- pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
+ pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
}
static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
@@ -1347,7 +1365,7 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
continue;
}
- print_ppa(geo, ppa, "boundary", i);
+ print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i);
return 1;
}
@@ -1377,7 +1395,7 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_OPEN) {
- pr_err("pblk: bad ppa: line:%d,state:%d\n",
+ pblk_err(pblk, "bad ppa: line:%d,state:%d\n",
line->id, line->state);
WARN_ON(1);
spin_unlock(&line->lock);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index e63d29a95e76..841c005d8ebb 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -15,6 +15,12 @@ config ARM_MHU
The controller has 3 mailbox channels, the last of which can be
used in Secure mode only.
+config IMX_MBOX
+ tristate "i.MX Mailbox"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ Mailbox implementation for i.MX Messaging Unit (MU).
+
config PLATFORM_MHU
tristate "Platform MHU Mailbox"
depends on OF
@@ -189,4 +195,14 @@ config STM32_IPCC
Mailbox implementation for STMicroelectonics STM32 family chips
with hardware for Inter-Processor Communication Controller (IPCC)
between processors. Say Y here if you want to have this support.
+
+config MTK_CMDQ_MBOX
+ tristate "MediaTek CMDQ Mailbox Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select MTK_INFRACFG
+ help
+ Say yes here to add support for the MediaTek Command Queue (CMDQ)
+ mailbox driver. The CMDQ is used to help read/write registers with
+ critical time limitation, such as updating display configuration
+ during the vblank.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 4d501bea7863..c818b5d011ae 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
obj-$(CONFIG_ARM_MHU) += arm_mhu.o
+obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
+
obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
@@ -40,3 +42,5 @@ obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
+
+obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
new file mode 100644
index 000000000000..363d35d5e49d
--- /dev/null
+++ b/drivers/mailbox/imx-mailbox.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+/* Transmit Register */
+#define IMX_MU_xTRn(x) (0x00 + 4 * (x))
+/* Receive Register */
+#define IMX_MU_xRRn(x) (0x10 + 4 * (x))
+/* Status Register */
+#define IMX_MU_xSR 0x20
+#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
+#define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
+#define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
+#define IMX_MU_xSR_BRDIP BIT(9)
+
+/* Control Register */
+#define IMX_MU_xCR 0x24
+/* General Purpose Interrupt Enable */
+#define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
+/* Receive Interrupt Enable */
+#define IMX_MU_xCR_RIEn(x) BIT(24 + (3 - (x)))
+/* Transmit Interrupt Enable */
+#define IMX_MU_xCR_TIEn(x) BIT(20 + (3 - (x)))
+/* General Purpose Interrupt Request */
+#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
+
+#define IMX_MU_CHANS 16
+#define IMX_MU_CHAN_NAME_SIZE 20
+
+enum imx_mu_chan_type {
+ IMX_MU_TYPE_TX, /* Tx */
+ IMX_MU_TYPE_RX, /* Rx */
+ IMX_MU_TYPE_TXDB, /* Tx doorbell */
+ IMX_MU_TYPE_RXDB, /* Rx doorbell */
+};
+
+struct imx_mu_con_priv {
+ unsigned int idx;
+ char irq_desc[IMX_MU_CHAN_NAME_SIZE];
+ enum imx_mu_chan_type type;
+ struct mbox_chan *chan;
+ struct tasklet_struct txdb_tasklet;
+};
+
+struct imx_mu_priv {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t xcr_lock; /* control register lock */
+
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[IMX_MU_CHANS];
+
+ struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
+ struct clk *clk;
+ int irq;
+
+ bool side_b;
+};
+
+static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct imx_mu_priv, mbox);
+}
+
+static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
+{
+ iowrite32(val, priv->base + offs);
+}
+
+static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
+{
+ return ioread32(priv->base + offs);
+}
+
+static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->xcr_lock, flags);
+ val = imx_mu_read(priv, IMX_MU_xCR);
+ val &= ~clr;
+ val |= set;
+ imx_mu_write(priv, val, IMX_MU_xCR);
+ spin_unlock_irqrestore(&priv->xcr_lock, flags);
+
+ return val;
+}
+
+static void imx_mu_txdb_tasklet(unsigned long data)
+{
+ struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
+
+ mbox_chan_txdone(cp->chan, 0);
+}
+
+static irqreturn_t imx_mu_isr(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ u32 val, ctrl, dat;
+
+ ctrl = imx_mu_read(priv, IMX_MU_xCR);
+ val = imx_mu_read(priv, IMX_MU_xSR);
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ val &= IMX_MU_xSR_TEn(cp->idx) &
+ (ctrl & IMX_MU_xCR_TIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RX:
+ val &= IMX_MU_xSR_RFn(cp->idx) &
+ (ctrl & IMX_MU_xCR_RIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RXDB:
+ val &= IMX_MU_xSR_GIPn(cp->idx) &
+ (ctrl & IMX_MU_xCR_GIEn(cp->idx));
+ break;
+ default:
+ break;
+ }
+
+ if (!val)
+ return IRQ_NONE;
+
+ if (val == IMX_MU_xSR_TEn(cp->idx)) {
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
+ mbox_chan_txdone(chan, 0);
+ } else if (val == IMX_MU_xSR_RFn(cp->idx)) {
+ dat = imx_mu_read(priv, IMX_MU_xRRn(cp->idx));
+ mbox_chan_received_data(chan, (void *)&dat);
+ } else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
+ imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), IMX_MU_xSR);
+ mbox_chan_received_data(chan, NULL);
+ } else {
+ dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int imx_mu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ u32 *arg = data;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_write(priv, *arg, IMX_MU_xTRn(cp->idx));
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_TXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
+ tasklet_schedule(&cp->txdb_tasklet);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_startup(struct mbox_chan *chan)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ int ret;
+
+ if (cp->type == IMX_MU_TYPE_TXDB) {
+ /* Tx doorbell don't have ACK support */
+ tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
+ (unsigned long)cp);
+ return 0;
+ }
+
+ ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED, cp->irq_desc,
+ chan);
+ if (ret) {
+ dev_err(priv->dev,
+ "Unable to acquire IRQ %d\n", priv->irq);
+ return ret;
+ }
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_RX:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_RXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIEn(cp->idx), 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void imx_mu_shutdown(struct mbox_chan *chan)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+
+ if (cp->type == IMX_MU_TYPE_TXDB)
+ tasklet_kill(&cp->txdb_tasklet);
+
+ imx_mu_xcr_rmw(priv, 0,
+ IMX_MU_xCR_TIEn(cp->idx) | IMX_MU_xCR_RIEn(cp->idx));
+
+ free_irq(priv->irq, chan);
+}
+
+static const struct mbox_chan_ops imx_mu_ops = {
+ .send_data = imx_mu_send_data,
+ .startup = imx_mu_startup,
+ .shutdown = imx_mu_shutdown,
+};
+
+static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type, idx, chan;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+ chan = type * 4 + idx;
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
+static void imx_mu_init_generic(struct imx_mu_priv *priv)
+{
+ if (priv->side_b)
+ return;
+
+ /* Set default MU configuration */
+ imx_mu_write(priv, 0, IMX_MU_xCR);
+}
+
+static int imx_mu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *iomem;
+ struct imx_mu_priv *priv;
+ unsigned int i;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ if (PTR_ERR(priv->clk) != -ENOENT)
+ return PTR_ERR(priv->clk);
+
+ priv->clk = NULL;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i % 4;
+ cp->type = i >> 2;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
+
+ spin_lock_init(&priv->xcr_lock);
+
+ priv->mbox.dev = dev;
+ priv->mbox.ops = &imx_mu_ops;
+ priv->mbox.chans = priv->mbox_chans;
+ priv->mbox.num_chans = IMX_MU_CHANS;
+ priv->mbox.of_xlate = imx_mu_xlate;
+ priv->mbox.txdone_irq = true;
+
+ platform_set_drvdata(pdev, priv);
+
+ imx_mu_init_generic(priv);
+
+ return mbox_controller_register(&priv->mbox);
+}
+
+static int imx_mu_remove(struct platform_device *pdev)
+{
+ struct imx_mu_priv *priv = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&priv->mbox);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id imx_mu_dt_ids[] = {
+ { .compatible = "fsl,imx6sx-mu" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
+
+static struct platform_driver imx_mu_driver = {
+ .probe = imx_mu_probe,
+ .remove = imx_mu_remove,
+ .driver = {
+ .name = "imx_mu",
+ .of_match_table = imx_mu_dt_ids,
+ },
+};
+module_platform_driver(imx_mu_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+MODULE_DESCRIPTION("Message Unit driver for i.MX");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index a7040163dd43..b8b2b3533f46 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
- if (!mb_base)
- return -ENOMEM;
+ mb_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mb_base))
+ return PTR_ERR(mb_base);
/* Setup mailbox links */
for (i = 0; i < MBOX_CNT; i++) {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
new file mode 100644
index 000000000000..aec46d5d3506
--- /dev/null
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/of_device.h>
+
+#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
+#define CMDQ_IRQ_MASK 0xffff
+#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
+
+#define CMDQ_CURR_IRQ_STATUS 0x10
+#define CMDQ_THR_SLOT_CYCLES 0x30
+#define CMDQ_THR_BASE 0x100
+#define CMDQ_THR_SIZE 0x80
+#define CMDQ_THR_WARM_RESET 0x00
+#define CMDQ_THR_ENABLE_TASK 0x04
+#define CMDQ_THR_SUSPEND_TASK 0x08
+#define CMDQ_THR_CURR_STATUS 0x0c
+#define CMDQ_THR_IRQ_STATUS 0x10
+#define CMDQ_THR_IRQ_ENABLE 0x14
+#define CMDQ_THR_CURR_ADDR 0x20
+#define CMDQ_THR_END_ADDR 0x24
+#define CMDQ_THR_WAIT_TOKEN 0x30
+#define CMDQ_THR_PRIORITY 0x40
+
+#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
+#define CMDQ_THR_ENABLED 0x1
+#define CMDQ_THR_DISABLED 0x0
+#define CMDQ_THR_SUSPEND 0x1
+#define CMDQ_THR_RESUME 0x0
+#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
+#define CMDQ_THR_DO_WARM_RESET BIT(0)
+#define CMDQ_THR_IRQ_DONE 0x1
+#define CMDQ_THR_IRQ_ERROR 0x12
+#define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
+#define CMDQ_THR_IS_WAITING BIT(31)
+
+#define CMDQ_JUMP_BY_OFFSET 0x10000000
+#define CMDQ_JUMP_BY_PA 0x10000001
+
+struct cmdq_thread {
+ struct mbox_chan *chan;
+ void __iomem *base;
+ struct list_head task_busy_list;
+ u32 priority;
+ bool atomic_exec;
+};
+
+struct cmdq_task {
+ struct cmdq *cmdq;
+ struct list_head list_entry;
+ dma_addr_t pa_base;
+ struct cmdq_thread *thread;
+ struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
+};
+
+struct cmdq {
+ struct mbox_controller mbox;
+ void __iomem *base;
+ u32 irq;
+ u32 thread_nr;
+ struct cmdq_thread *thread;
+ struct clk *clock;
+ bool suspended;
+};
+
+static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ u32 status;
+
+ writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
+
+ /* If already disabled, treat as suspended successful. */
+ if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
+ return 0;
+
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
+ status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
+ dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
+ (u32)(thread->base - cmdq->base));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_thread_resume(struct cmdq_thread *thread)
+{
+ writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
+}
+
+static void cmdq_init(struct cmdq *cmdq)
+{
+ WARN_ON(clk_enable(cmdq->clock) < 0);
+ writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
+ clk_disable(cmdq->clock);
+}
+
+static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ u32 warm_reset;
+
+ writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
+ warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
+ 0, 10)) {
+ dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
+ (u32)(thread->base - cmdq->base));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ cmdq_thread_reset(cmdq, thread);
+ writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
+}
+
+/* notify GCE to re-fetch commands by setting GCE thread PC */
+static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
+{
+ writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
+ thread->base + CMDQ_THR_CURR_ADDR);
+}
+
+static void cmdq_task_insert_into_thread(struct cmdq_task *task)
+{
+ struct device *dev = task->cmdq->mbox.dev;
+ struct cmdq_thread *thread = task->thread;
+ struct cmdq_task *prev_task = list_last_entry(
+ &thread->task_busy_list, typeof(*task), list_entry);
+ u64 *prev_task_base = prev_task->pkt->va_base;
+
+ /* let previous task jump to this task */
+ dma_sync_single_for_cpu(dev, prev_task->pa_base,
+ prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
+ prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
+ (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
+ dma_sync_single_for_device(dev, prev_task->pa_base,
+ prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
+
+ cmdq_thread_invalidate_fetched_data(thread);
+}
+
+static bool cmdq_command_is_wfe(u64 cmd)
+{
+ u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
+ u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
+ u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
+
+ return ((cmd & wfe_mask) == (wfe_op | wfe_option));
+}
+
+/* we assume tasks in the same display GCE thread are waiting the same event. */
+static void cmdq_task_remove_wfe(struct cmdq_task *task)
+{
+ struct device *dev = task->cmdq->mbox.dev;
+ u64 *base = task->pkt->va_base;
+ int i;
+
+ dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
+ DMA_TO_DEVICE);
+ for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
+ if (cmdq_command_is_wfe(base[i]))
+ base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
+ CMDQ_JUMP_PASS;
+ dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
+ DMA_TO_DEVICE);
+}
+
+static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
+{
+ return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
+}
+
+static void cmdq_thread_wait_end(struct cmdq_thread *thread,
+ unsigned long end_pa)
+{
+ struct device *dev = thread->chan->mbox->dev;
+ unsigned long curr_pa;
+
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
+ curr_pa, curr_pa == end_pa, 1, 20))
+ dev_err(dev, "GCE thread cannot run to end.\n");
+}
+
+static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
+{
+ struct cmdq_task_cb *cb = &task->pkt->async_cb;
+ struct cmdq_cb_data data;
+
+ WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
+ data.sta = sta;
+ data.data = cb->data;
+ cb->cb(data);
+
+ list_del(&task->list_entry);
+}
+
+static void cmdq_task_handle_error(struct cmdq_task *task)
+{
+ struct cmdq_thread *thread = task->thread;
+ struct cmdq_task *next_task;
+
+ dev_err(task->cmdq->mbox.dev, "task 0x%p error\n", task);
+ WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0);
+ next_task = list_first_entry_or_null(&thread->task_busy_list,
+ struct cmdq_task, list_entry);
+ if (next_task)
+ writel(next_task->pa_base, thread->base + CMDQ_THR_CURR_ADDR);
+ cmdq_thread_resume(thread);
+}
+
+static void cmdq_thread_irq_handler(struct cmdq *cmdq,
+ struct cmdq_thread *thread)
+{
+ struct cmdq_task *task, *tmp, *curr_task = NULL;
+ u32 curr_pa, irq_flag, task_end_pa;
+ bool err;
+
+ irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
+ writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
+
+ /*
+ * When ISR call this function, another CPU core could run
+ * "release task" right before we acquire the spin lock, and thus
+ * reset / disable this GCE thread, so we need to check the enable
+ * bit of this GCE thread.
+ */
+ if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
+ return;
+
+ if (irq_flag & CMDQ_THR_IRQ_ERROR)
+ err = true;
+ else if (irq_flag & CMDQ_THR_IRQ_DONE)
+ err = false;
+ else
+ return;
+
+ curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
+ if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
+ curr_task = task;
+
+ if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
+ cmdq_task_exec_done(task, CMDQ_CB_NORMAL);
+ kfree(task);
+ } else if (err) {
+ cmdq_task_exec_done(task, CMDQ_CB_ERROR);
+ cmdq_task_handle_error(curr_task);
+ kfree(task);
+ }
+
+ if (curr_task)
+ break;
+ }
+
+ if (list_empty(&thread->task_busy_list)) {
+ cmdq_thread_disable(cmdq, thread);
+ clk_disable(cmdq->clock);
+ }
+}
+
+static irqreturn_t cmdq_irq_handler(int irq, void *dev)
+{
+ struct cmdq *cmdq = dev;
+ unsigned long irq_status, flags = 0L;
+ int bit;
+
+ irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & CMDQ_IRQ_MASK;
+ if (!(irq_status ^ CMDQ_IRQ_MASK))
+ return IRQ_NONE;
+
+ for_each_clear_bit(bit, &irq_status, fls(CMDQ_IRQ_MASK)) {
+ struct cmdq_thread *thread = &cmdq->thread[bit];
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ cmdq_thread_irq_handler(cmdq, thread);
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cmdq_suspend(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+ struct cmdq_thread *thread;
+ int i;
+ bool task_running = false;
+
+ cmdq->suspended = true;
+
+ for (i = 0; i < cmdq->thread_nr; i++) {
+ thread = &cmdq->thread[i];
+ if (!list_empty(&thread->task_busy_list)) {
+ task_running = true;
+ break;
+ }
+ }
+
+ if (task_running)
+ dev_warn(dev, "exist running task(s) in suspend\n");
+
+ clk_unprepare(cmdq->clock);
+
+ return 0;
+}
+
+static int cmdq_resume(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+
+ WARN_ON(clk_prepare(cmdq->clock) < 0);
+ cmdq->suspended = false;
+ return 0;
+}
+
+static int cmdq_remove(struct platform_device *pdev)
+{
+ struct cmdq *cmdq = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&cmdq->mbox);
+ clk_unprepare(cmdq->clock);
+
+ if (cmdq->mbox.chans)
+ devm_kfree(&pdev->dev, cmdq->mbox.chans);
+
+ if (cmdq->thread)
+ devm_kfree(&pdev->dev, cmdq->thread);
+
+ devm_kfree(&pdev->dev, cmdq);
+
+ return 0;
+}
+
+static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task;
+ unsigned long curr_pa, end_pa;
+
+ /* Client should not flush new tasks if suspended. */
+ WARN_ON(cmdq->suspended);
+
+ task = kzalloc(sizeof(*task), GFP_ATOMIC);
+ task->cmdq = cmdq;
+ INIT_LIST_HEAD(&task->list_entry);
+ task->pa_base = pkt->pa_base;
+ task->thread = thread;
+ task->pkt = pkt;
+
+ if (list_empty(&thread->task_busy_list)) {
+ WARN_ON(clk_enable(cmdq->clock) < 0);
+ WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
+
+ writel(task->pa_base, thread->base + CMDQ_THR_CURR_ADDR);
+ writel(task->pa_base + pkt->cmd_buf_size,
+ thread->base + CMDQ_THR_END_ADDR);
+ writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
+ writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
+ writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
+ } else {
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
+
+ /*
+ * Atomic execution should remove the following wfe, i.e. only
+ * wait event at first task, and prevent to pause when running.
+ */
+ if (thread->atomic_exec) {
+ /* GCE is executing if command is not WFE */
+ if (!cmdq_thread_is_in_wfe(thread)) {
+ cmdq_thread_resume(thread);
+ cmdq_thread_wait_end(thread, end_pa);
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ /* set to this task directly */
+ writel(task->pa_base,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ } else {
+ cmdq_task_insert_into_thread(task);
+ cmdq_task_remove_wfe(task);
+ smp_mb(); /* modify jump before enable thread */
+ }
+ } else {
+ /* check boundary */
+ if (curr_pa == end_pa - CMDQ_INST_SIZE ||
+ curr_pa == end_pa) {
+ /* set to this task directly */
+ writel(task->pa_base,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ } else {
+ cmdq_task_insert_into_thread(task);
+ smp_mb(); /* modify jump before enable thread */
+ }
+ }
+ writel(task->pa_base + pkt->cmd_buf_size,
+ thread->base + CMDQ_THR_END_ADDR);
+ cmdq_thread_resume(thread);
+ }
+ list_move_tail(&task->list_entry, &thread->task_busy_list);
+
+ return 0;
+}
+
+static int cmdq_mbox_startup(struct mbox_chan *chan)
+{
+ return 0;
+}
+
+static void cmdq_mbox_shutdown(struct mbox_chan *chan)
+{
+}
+
+static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
+ .send_data = cmdq_mbox_send_data,
+ .startup = cmdq_mbox_startup,
+ .shutdown = cmdq_mbox_shutdown,
+};
+
+static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int ind = sp->args[0];
+ struct cmdq_thread *thread;
+
+ if (ind >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
+ thread->priority = sp->args[1];
+ thread->atomic_exec = (sp->args[2] != 0);
+ thread->chan = &mbox->chans[ind];
+
+ return &mbox->chans[ind];
+}
+
+static int cmdq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct cmdq *cmdq;
+ int err, i;
+
+ cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
+ if (!cmdq)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cmdq->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cmdq->base)) {
+ dev_err(dev, "failed to ioremap gce\n");
+ return PTR_ERR(cmdq->base);
+ }
+
+ cmdq->irq = platform_get_irq(pdev, 0);
+ if (!cmdq->irq) {
+ dev_err(dev, "failed to get irq\n");
+ return -EINVAL;
+ }
+ err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
+ "mtk_cmdq", cmdq);
+ if (err < 0) {
+ dev_err(dev, "failed to register ISR (%d)\n", err);
+ return err;
+ }
+
+ dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
+ dev, cmdq->base, cmdq->irq);
+
+ cmdq->clock = devm_clk_get(dev, "gce");
+ if (IS_ERR(cmdq->clock)) {
+ dev_err(dev, "failed to get gce clk\n");
+ return PTR_ERR(cmdq->clock);
+ }
+
+ cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
+ cmdq->mbox.dev = dev;
+ cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
+ sizeof(*cmdq->mbox.chans), GFP_KERNEL);
+ if (!cmdq->mbox.chans)
+ return -ENOMEM;
+
+ cmdq->mbox.num_chans = cmdq->thread_nr;
+ cmdq->mbox.ops = &cmdq_mbox_chan_ops;
+ cmdq->mbox.of_xlate = cmdq_xlate;
+
+ /* make use of TXDONE_BY_ACK */
+ cmdq->mbox.txdone_irq = false;
+ cmdq->mbox.txdone_poll = false;
+
+ cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
+ sizeof(*cmdq->thread), GFP_KERNEL);
+ if (!cmdq->thread)
+ return -ENOMEM;
+
+ for (i = 0; i < cmdq->thread_nr; i++) {
+ cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
+ CMDQ_THR_SIZE * i;
+ INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
+ cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
+ }
+
+ err = mbox_controller_register(&cmdq->mbox);
+ if (err < 0) {
+ dev_err(dev, "failed to register mailbox: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, cmdq);
+ WARN_ON(clk_prepare(cmdq->clock) < 0);
+
+ cmdq_init(cmdq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cmdq_pm_ops = {
+ .suspend = cmdq_suspend,
+ .resume = cmdq_resume,
+};
+
+static const struct of_device_id cmdq_of_ids[] = {
+ {.compatible = "mediatek,mt8173-gce", .data = (void *)16},
+ {}
+};
+
+static struct platform_driver cmdq_drv = {
+ .probe = cmdq_probe,
+ .remove = cmdq_remove,
+ .driver = {
+ .name = "mtk_cmdq",
+ .pm = &cmdq_pm_ops,
+ .of_match_table = cmdq_of_ids,
+ }
+};
+
+static int __init cmdq_drv_init(void)
+{
+ return platform_driver_register(&cmdq_drv);
+}
+
+static void __exit cmdq_drv_exit(void)
+{
+ platform_driver_unregister(&cmdq_drv);
+}
+
+subsys_initcall(cmdq_drv_init);
+module_exit(cmdq_drv_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index e1e2c085e68e..db66e952a871 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OMAP mailbox driver
*
@@ -6,15 +7,6 @@
*
* Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
* Suman Anna <s-anna@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/interrupt.h>
@@ -77,6 +69,10 @@ struct omap_mbox_queue {
bool full;
};
+struct omap_mbox_match_data {
+ u32 intr_type;
+};
+
struct omap_mbox_device {
struct device *dev;
struct mutex cfg_lock;
@@ -646,18 +642,21 @@ static const struct dev_pm_ops omap_mbox_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
};
+static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
+static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
+
static const struct of_device_id omap_mailbox_of_match[] = {
{
.compatible = "ti,omap2-mailbox",
- .data = (void *)MBOX_INTR_CFG_TYPE1,
+ .data = &omap2_data,
},
{
.compatible = "ti,omap3-mailbox",
- .data = (void *)MBOX_INTR_CFG_TYPE1,
+ .data = &omap2_data,
},
{
.compatible = "ti,omap4-mailbox",
- .data = (void *)MBOX_INTR_CFG_TYPE2,
+ .data = &omap4_data,
},
{
/* end */
@@ -700,7 +699,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
struct omap_mbox_fifo *fifo;
struct device_node *node = pdev->dev.of_node;
struct device_node *child;
- const struct of_device_id *match;
+ const struct omap_mbox_match_data *match_data;
u32 intr_type, info_count;
u32 num_users, num_fifos;
u32 tmp[3];
@@ -712,10 +711,10 @@ static int omap_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- match = of_match_device(omap_mailbox_of_match, &pdev->dev);
- if (!match)
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
return -ENODEV;
- intr_type = (u32)match->data;
+ intr_type = match_data->intr_type;
if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
return -ENODEV;
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index 5d04738c3c8a..5bceafbf6699 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -25,6 +25,17 @@
#define Q_STATE_OFFSET(queue) ((queue) * 0x4)
#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000)
+#define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid))
+#define SPROXY_THREAD_DATA_OFFSET(tid, reg) \
+ (SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4)
+
+#define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid))
+
+#define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF)
+
+#define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid))
+#define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31)
+
/**
* struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
* @queue_id: Queue Number for this path
@@ -42,14 +53,18 @@ struct ti_msgmgr_valid_queue_desc {
* @queue_count: Number of Queues
* @max_message_size: Message size in bytes
* @max_messages: Number of messages
- * @q_slices: Number of queue engines
- * @q_proxies: Number of queue proxies per page
* @data_first_reg: First data register for proxy data region
* @data_last_reg: Last data register for proxy data region
+ * @status_cnt_mask: Mask for getting the status value
+ * @status_err_mask: Mask for getting the error value, if applicable
* @tx_polled: Do I need to use polled mechanism for tx
* @tx_poll_timeout_ms: Timeout in ms if polled
* @valid_queues: List of Valid queues that the processor can access
+ * @data_region_name: Name of the proxy data region
+ * @status_region_name: Name of the proxy status region
+ * @ctrl_region_name: Name of the proxy control region
* @num_valid_queues: Number of valid queues
+ * @is_sproxy: Is this an Secure Proxy instance?
*
* This structure is used in of match data to describe how integration
* for a specific compatible SoC is done.
@@ -58,14 +73,18 @@ struct ti_msgmgr_desc {
u8 queue_count;
u8 max_message_size;
u8 max_messages;
- u8 q_slices;
- u8 q_proxies;
u8 data_first_reg;
u8 data_last_reg;
+ u32 status_cnt_mask;
+ u32 status_err_mask;
bool tx_polled;
int tx_poll_timeout_ms;
const struct ti_msgmgr_valid_queue_desc *valid_queues;
+ const char *data_region_name;
+ const char *status_region_name;
+ const char *ctrl_region_name;
int num_valid_queues;
+ bool is_sproxy;
};
/**
@@ -78,6 +97,7 @@ struct ti_msgmgr_desc {
* @queue_buff_start: First register of Data Buffer
* @queue_buff_end: Last (or confirmation) register of Data buffer
* @queue_state: Queue status register
+ * @queue_ctrl: Queue Control register
* @chan: Mailbox channel
* @rx_buff: Receive buffer pointer allocated at probe, max_message_size
*/
@@ -90,6 +110,7 @@ struct ti_queue_inst {
void __iomem *queue_buff_start;
void __iomem *queue_buff_end;
void __iomem *queue_state;
+ void __iomem *queue_ctrl;
struct mbox_chan *chan;
u32 *rx_buff;
};
@@ -100,6 +121,7 @@ struct ti_queue_inst {
* @desc: Description of the SoC integration
* @queue_proxy_region: Queue proxy region where queue buffers are located
* @queue_state_debug_region: Queue status register regions
+ * @queue_ctrl_region: Queue Control register regions
* @num_valid_queues: Number of valid queues defined for the processor
* Note: other queues are probably reserved for other processors
* in the SoC.
@@ -112,6 +134,7 @@ struct ti_msgmgr_inst {
const struct ti_msgmgr_desc *desc;
void __iomem *queue_proxy_region;
void __iomem *queue_state_debug_region;
+ void __iomem *queue_ctrl_region;
u8 num_valid_queues;
struct ti_queue_inst *qinsts;
struct mbox_controller mbox;
@@ -120,25 +143,54 @@ struct ti_msgmgr_inst {
/**
* ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
+ * @d: Description of message manager
* @qinst: Queue instance for which we check the number of pending messages
*
* Return: number of messages pending in the queue (0 == no pending messages)
*/
-static inline int ti_msgmgr_queue_get_num_messages(struct ti_queue_inst *qinst)
+static inline int
+ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst)
{
u32 val;
+ u32 status_cnt_mask = d->status_cnt_mask;
/*
* We cannot use relaxed operation here - update may happen
* real-time.
*/
- val = readl(qinst->queue_state) & Q_STATE_ENTRY_COUNT_MASK;
- val >>= __ffs(Q_STATE_ENTRY_COUNT_MASK);
+ val = readl(qinst->queue_state) & status_cnt_mask;
+ val >>= __ffs(status_cnt_mask);
return val;
}
/**
+ * ti_msgmgr_queue_is_error() - Check to see if there is queue error
+ * @d: Description of message manager
+ * @qinst: Queue instance for which we check the number of pending messages
+ *
+ * Return: true if error, else false
+ */
+static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst)
+{
+ u32 val;
+
+ /* Msgmgr has no error detection */
+ if (!d->is_sproxy)
+ return false;
+
+ /*
+ * We cannot use relaxed operation here - update may happen
+ * real-time.
+ */
+ val = readl(qinst->queue_state) & d->status_err_mask;
+
+ return val ? true : false;
+}
+
+/**
* ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
* @irq: Interrupt number
* @p: Channel Pointer
@@ -171,8 +223,14 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
return IRQ_NONE;
}
+ desc = inst->desc;
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on Rx channel %s\n", qinst->name);
+ return IRQ_NONE;
+ }
+
/* Do I actually have messages to read? */
- msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
if (!msg_count) {
/* Shared IRQ? */
dev_dbg(dev, "Spurious event - 0 pending data!\n");
@@ -185,7 +243,6 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
* of how many bytes I should be reading. Let the client figure this
* out.. I just read the full message and pass it on..
*/
- desc = inst->desc;
message.len = desc->max_message_size;
message.buf = (u8 *)qinst->rx_buff;
@@ -228,12 +285,20 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc = inst->desc;
int msg_count;
if (qinst->is_tx)
return false;
- msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
return msg_count ? true : false;
}
@@ -247,12 +312,25 @@ static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc = inst->desc;
int msg_count;
if (!qinst->is_tx)
return false;
- msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
+
+ if (desc->is_sproxy) {
+ /* In secure proxy, msg_count indicates how many we can send */
+ return msg_count ? true : false;
+ }
/* if we have any messages pending.. */
return msg_count ? false : true;
@@ -282,6 +360,11 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
}
desc = inst->desc;
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
if (desc->max_message_size < message->len) {
dev_err(dev, "Queue %s message length %zu > max %d\n",
qinst->name, message->len, desc->max_message_size);
@@ -315,6 +398,53 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
}
/**
+ * ti_msgmgr_queue_rx_irq_req() - RX IRQ request
+ * @dev: device pointer
+ * @d: descriptor for ti_msgmgr
+ * @qinst: Queue instance
+ * @chan: Channel pointer
+ */
+static int ti_msgmgr_queue_rx_irq_req(struct device *dev,
+ const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst,
+ struct mbox_chan *chan)
+{
+ int ret = 0;
+ char of_rx_irq_name[7];
+ struct device_node *np;
+
+ snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
+ "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id);
+
+ /* Get the IRQ if not found */
+ if (qinst->irq < 0) {
+ np = of_node_get(dev->of_node);
+ if (!np)
+ return -ENODATA;
+ qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
+ of_node_put(np);
+
+ if (qinst->irq < 0) {
+ dev_err(dev,
+ "QID %d PID %d:No IRQ[%s]: %d\n",
+ qinst->queue_id, qinst->proxy_id,
+ of_rx_irq_name, qinst->irq);
+ return qinst->irq;
+ }
+ }
+
+ /* With the expectation that the IRQ might be shared in SoC */
+ ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
+ IRQF_SHARED, qinst->name, chan);
+ if (ret) {
+ dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
+ qinst->irq, qinst->name, ret);
+ }
+
+ return ret;
+}
+
+/**
* ti_msgmgr_queue_startup() - Startup queue
* @chan: Channel pointer
*
@@ -322,19 +452,39 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
*/
static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
{
- struct ti_queue_inst *qinst = chan->con_priv;
struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *d = inst->desc;
int ret;
+ int msg_count;
+
+ /*
+ * If sproxy is starting and can send messages, we are a Tx thread,
+ * else Rx
+ */
+ if (d->is_sproxy) {
+ qinst->is_tx = (readl(qinst->queue_ctrl) &
+ SPROXY_THREAD_CTRL_DIR_MASK) ? false : true;
+
+ msg_count = ti_msgmgr_queue_get_num_messages(d, qinst);
+
+ if (!msg_count && qinst->is_tx) {
+ dev_err(dev, "%s: Cannot transmit with 0 credits!\n",
+ qinst->name);
+ return -EINVAL;
+ }
+ }
if (!qinst->is_tx) {
- /*
- * With the expectation that the IRQ might be shared in SoC
- */
- ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
- IRQF_SHARED, qinst->name, chan);
+ /* Allocate usage buffer for rx */
+ qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL);
+ if (!qinst->rx_buff)
+ return -ENOMEM;
+ /* Request IRQ */
+ ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan);
if (ret) {
- dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
- qinst->irq, qinst->name, ret);
+ kfree(qinst->rx_buff);
return ret;
}
}
@@ -350,8 +500,10 @@ static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
{
struct ti_queue_inst *qinst = chan->con_priv;
- if (!qinst->is_tx)
+ if (!qinst->is_tx) {
free_irq(qinst->irq, chan);
+ kfree(qinst->rx_buff);
+ }
}
/**
@@ -368,20 +520,38 @@ static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
struct ti_msgmgr_inst *inst;
int req_qid, req_pid;
struct ti_queue_inst *qinst;
- int i;
+ const struct ti_msgmgr_desc *d;
+ int i, ncells;
inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
if (WARN_ON(!inst))
return ERR_PTR(-EINVAL);
- /* #mbox-cells is 2 */
- if (p->args_count != 2) {
- dev_err(inst->dev, "Invalid arguments in dt[%d] instead of 2\n",
- p->args_count);
+ d = inst->desc;
+
+ if (d->is_sproxy)
+ ncells = 1;
+ else
+ ncells = 2;
+ if (p->args_count != ncells) {
+ dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n",
+ p->args_count, ncells);
return ERR_PTR(-EINVAL);
}
- req_qid = p->args[0];
- req_pid = p->args[1];
+ if (ncells == 1) {
+ req_qid = 0;
+ req_pid = p->args[0];
+ } else {
+ req_qid = p->args[0];
+ req_pid = p->args[1];
+ }
+
+ if (d->is_sproxy) {
+ if (req_pid > d->num_valid_queues)
+ goto err;
+ qinst = &inst->qinsts[req_pid];
+ return qinst->chan;
+ }
for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
i++, qinst++) {
@@ -389,6 +559,7 @@ static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
return qinst->chan;
}
+err:
dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %s\n",
req_qid, req_pid, p->np->name);
return ERR_PTR(-ENOENT);
@@ -415,6 +586,8 @@ static int ti_msgmgr_queue_setup(int idx, struct device *dev,
struct ti_queue_inst *qinst,
struct mbox_chan *chan)
{
+ char *dir;
+
qinst->proxy_id = qd->proxy_id;
qinst->queue_id = qd->queue_id;
@@ -424,40 +597,43 @@ static int ti_msgmgr_queue_setup(int idx, struct device *dev,
return -ERANGE;
}
- qinst->is_tx = qd->is_tx;
- snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
- dev_name(dev), qinst->is_tx ? "tx" : "rx", qinst->queue_id,
- qinst->proxy_id);
-
- if (!qinst->is_tx) {
- char of_rx_irq_name[7];
-
- snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
- "rx_%03d", qinst->queue_id);
-
- qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
- if (qinst->irq < 0) {
- dev_crit(dev,
- "[%d]QID %d PID %d:No IRQ[%s]: %d\n",
- idx, qinst->queue_id, qinst->proxy_id,
- of_rx_irq_name, qinst->irq);
- return qinst->irq;
- }
- /* Allocate usage buffer for rx */
- qinst->rx_buff = devm_kzalloc(dev,
- d->max_message_size, GFP_KERNEL);
- if (!qinst->rx_buff)
- return -ENOMEM;
+ if (d->is_sproxy) {
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
+ d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
+ d->data_last_reg);
+ qinst->queue_state = inst->queue_state_debug_region +
+ SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id);
+ qinst->queue_ctrl = inst->queue_ctrl_region +
+ SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id);
+
+ /* XXX: DONOT read registers here!.. Some may be unusable */
+ dir = "thr";
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d",
+ dev_name(dev), dir, qinst->proxy_id);
+ } else {
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
+ d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
+ d->data_last_reg);
+ qinst->queue_state =
+ inst->queue_state_debug_region +
+ Q_STATE_OFFSET(qinst->queue_id);
+ qinst->is_tx = qd->is_tx;
+ dir = qinst->is_tx ? "tx" : "rx";
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
+ dev_name(dev), dir, qinst->queue_id, qinst->proxy_id);
}
- qinst->queue_buff_start = inst->queue_proxy_region +
- Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_first_reg);
- qinst->queue_buff_end = inst->queue_proxy_region +
- Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_last_reg);
- qinst->queue_state = inst->queue_state_debug_region +
- Q_STATE_OFFSET(qinst->queue_id);
qinst->chan = chan;
+ /* Setup an error value for IRQ - Lazy allocation */
+ qinst->irq = -EINVAL;
+
chan->con_priv = qinst;
dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
@@ -494,19 +670,37 @@ static const struct ti_msgmgr_desc k2g_desc = {
.queue_count = 64,
.max_message_size = 64,
.max_messages = 128,
- .q_slices = 1,
- .q_proxies = 1,
+ .data_region_name = "queue_proxy_region",
+ .status_region_name = "queue_state_debug_region",
.data_first_reg = 16,
.data_last_reg = 31,
+ .status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK,
.tx_polled = false,
.valid_queues = k2g_valid_queues,
.num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
+ .is_sproxy = false,
+};
+
+static const struct ti_msgmgr_desc am654_desc = {
+ .queue_count = 190,
+ .num_valid_queues = 190,
+ .max_message_size = 60,
+ .data_region_name = "target_data",
+ .status_region_name = "rt",
+ .ctrl_region_name = "scfg",
+ .data_first_reg = 0,
+ .data_last_reg = 14,
+ .status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK,
+ .tx_polled = false,
+ .is_sproxy = true,
};
static const struct of_device_id ti_msgmgr_of_match[] = {
{.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
+ {.compatible = "ti,am654-secure-proxy", .data = &am654_desc},
{ /* Sentinel */ }
};
+
MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
static int ti_msgmgr_probe(struct platform_device *pdev)
@@ -546,17 +740,25 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
inst->desc = desc;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "queue_proxy_region");
+ desc->data_region_name);
inst->queue_proxy_region = devm_ioremap_resource(dev, res);
if (IS_ERR(inst->queue_proxy_region))
return PTR_ERR(inst->queue_proxy_region);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "queue_state_debug_region");
+ desc->status_region_name);
inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
if (IS_ERR(inst->queue_state_debug_region))
return PTR_ERR(inst->queue_state_debug_region);
+ if (desc->is_sproxy) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ desc->ctrl_region_name);
+ inst->queue_ctrl_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inst->queue_ctrl_region))
+ return PTR_ERR(inst->queue_ctrl_region);
+ }
+
dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
inst->queue_proxy_region, inst->queue_state_debug_region);
@@ -578,12 +780,29 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
return -ENOMEM;
inst->chans = chans;
- for (i = 0, queue_desc = desc->valid_queues;
- i < queue_count; i++, qinst++, chans++, queue_desc++) {
- ret = ti_msgmgr_queue_setup(i, dev, np, inst,
- desc, queue_desc, qinst, chans);
- if (ret)
- return ret;
+ if (desc->is_sproxy) {
+ struct ti_msgmgr_valid_queue_desc sproxy_desc;
+
+ /* All proxies may be valid in Secure Proxy instance */
+ for (i = 0; i < queue_count; i++, qinst++, chans++) {
+ sproxy_desc.queue_id = 0;
+ sproxy_desc.proxy_id = i;
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, &sproxy_desc, qinst,
+ chans);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* Only Some proxies are valid in Message Manager */
+ for (i = 0, queue_desc = desc->valid_queues;
+ i < queue_count; i++, qinst++, chans++, queue_desc++) {
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, queue_desc, qinst,
+ chans);
+ if (ret)
+ return ret;
+ }
}
mbox = &inst->mbox;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index d6bf294f3907..05f82ff6f016 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -328,13 +328,6 @@ struct cached_dev {
*/
atomic_t has_dirty;
- /*
- * Set to zero by things that touch the backing volume-- except
- * writeback. Incremented by writeback. Used to determine when to
- * accelerate idle writeback.
- */
- atomic_t backing_idle;
-
struct bch_ratelimit writeback_rate;
struct delayed_work writeback_rate_update;
@@ -423,9 +416,9 @@ struct cache {
/*
* When allocating new buckets, prio_write() gets first dibs - since we
* may not be allocate at all without writing priorities and gens.
- * prio_buckets[] contains the last buckets we wrote priorities to (so
- * gc can mark them as metadata), prio_next[] contains the buckets
- * allocated for the next prio write.
+ * prio_last_buckets[] contains the last buckets we wrote priorities to
+ * (so gc can mark them as metadata), prio_buckets[] contains the
+ * buckets allocated for the next prio write.
*/
uint64_t *prio_buckets;
uint64_t *prio_last_buckets;
@@ -474,6 +467,7 @@ struct cache {
struct gc_stat {
size_t nodes;
+ size_t nodes_pre;
size_t key_bytes;
size_t nkeys;
@@ -514,6 +508,8 @@ struct cache_set {
struct cache_accounting accounting;
unsigned long flags;
+ atomic_t idle_counter;
+ atomic_t at_max_writeback_rate;
struct cache_sb sb;
@@ -523,8 +519,10 @@ struct cache_set {
struct bcache_device **devices;
unsigned devices_max_used;
+ atomic_t attached_dev_nr;
struct list_head cached_devs;
uint64_t cached_dev_sectors;
+ atomic_long_t flash_dev_dirty_sectors;
struct closure caching;
struct closure sb_write;
@@ -603,6 +601,10 @@ struct cache_set {
*/
atomic_t rescale;
/*
+ * used for GC, identify if any front side I/Os is inflight
+ */
+ atomic_t search_inflight;
+ /*
* When we invalidate buckets, we use both the priority and the amount
* of good data to determine which buckets to reuse first - to weight
* those together consistently we keep track of the smallest nonzero
@@ -995,7 +997,7 @@ void bch_open_buckets_free(struct cache_set *);
int bch_cache_allocator_start(struct cache *ca);
void bch_debug_exit(void);
-int bch_debug_init(struct kobject *);
+void bch_debug_init(struct kobject *kobj);
void bch_request_exit(void);
int bch_request_init(void);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index f3403b45bc28..596c93b44e9b 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -366,6 +366,10 @@ EXPORT_SYMBOL(bch_btree_keys_init);
/* Binary tree stuff for auxiliary search trees */
+/*
+ * return array index next to j when does in-order traverse
+ * of a binary tree which is stored in a linear array
+ */
static unsigned inorder_next(unsigned j, unsigned size)
{
if (j * 2 + 1 < size) {
@@ -379,6 +383,10 @@ static unsigned inorder_next(unsigned j, unsigned size)
return j;
}
+/*
+ * return array index previous to j when does in-order traverse
+ * of a binary tree which is stored in a linear array
+ */
static unsigned inorder_prev(unsigned j, unsigned size)
{
if (j * 2 < size) {
@@ -421,6 +429,10 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
return j;
}
+/*
+ * Return the cacheline index in bset_tree->data, where j is index
+ * from a linear array which stores the auxiliar binary tree
+ */
static unsigned to_inorder(unsigned j, struct bset_tree *t)
{
return __to_inorder(j, t->size, t->extra);
@@ -441,6 +453,10 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
return j;
}
+/*
+ * Return an index from a linear array which stores the auxiliar binary
+ * tree, j is the cacheline index of t->data.
+ */
static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
{
return __inorder_to_tree(j, t->size, t->extra);
@@ -546,6 +562,20 @@ static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
return low;
}
+/*
+ * Calculate mantissa value for struct bkey_float.
+ * If most significant bit of f->exponent is not set, then
+ * - f->exponent >> 6 is 0
+ * - p[0] points to bkey->low
+ * - p[-1] borrows bits from KEY_INODE() of bkey->high
+ * if most isgnificant bits of f->exponent is set, then
+ * - f->exponent >> 6 is 1
+ * - p[0] points to bits from KEY_INODE() of bkey->high
+ * - p[-1] points to other bits from KEY_INODE() of
+ * bkey->high too.
+ * See make_bfloat() to check when most significant bit of f->exponent
+ * is set or not.
+ */
static inline unsigned bfloat_mantissa(const struct bkey *k,
struct bkey_float *f)
{
@@ -570,6 +600,16 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
BUG_ON(m < l || m > r);
BUG_ON(bkey_next(p) != m);
+ /*
+ * If l and r have different KEY_INODE values (different backing
+ * device), f->exponent records how many least significant bits
+ * are different in KEY_INODE values and sets most significant
+ * bits to 1 (by +64).
+ * If l and r have same KEY_INODE value, f->exponent records
+ * how many different bits in least significant bits of bkey->low.
+ * See bfloat_mantiss() how the most significant bit of
+ * f->exponent is used to calculate bfloat mantissa value.
+ */
if (KEY_INODE(l) != KEY_INODE(r))
f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
else
@@ -633,6 +673,15 @@ void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
}
EXPORT_SYMBOL(bch_bset_init_next);
+/*
+ * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to
+ * accelerate bkey search in a btree node (pointed by bset_tree->data in
+ * memory). After search in the auxiliar tree by calling bset_search_tree(),
+ * a struct bset_search_iter is returned which indicates range [l, r] from
+ * bset_tree->data where the searching bkey might be inside. Then a followed
+ * linear comparison does the exact search, see __bch_bset_search() for how
+ * the auxiliary tree is used.
+ */
void bch_bset_build_written_tree(struct btree_keys *b)
{
struct bset_tree *t = bset_tree_last(b);
@@ -898,6 +947,17 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
unsigned inorder, j, n = 1;
do {
+ /*
+ * A bit trick here.
+ * If p < t->size, (int)(p - t->size) is a minus value and
+ * the most significant bit is set, right shifting 31 bits
+ * gets 1. If p >= t->size, the most significant bit is
+ * not set, right shifting 31 bits gets 0.
+ * So the following 2 lines equals to
+ * if (p >= t->size)
+ * p = 0;
+ * but a branch instruction is avoided.
+ */
unsigned p = n << 4;
p &= ((int) (p - t->size)) >> 31;
@@ -907,6 +967,9 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
f = &t->tree[j];
/*
+ * Similar bit trick, use subtract operation to avoid a branch
+ * instruction.
+ *
* n = (f->mantissa > bfloat_mantissa())
* ? j * 2
* : j * 2 + 1;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 547c9eedc2f4..c19f7716df88 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -90,6 +90,9 @@
#define MAX_NEED_GC 64
#define MAX_SAVE_PRIO 72
+#define MAX_GC_TIMES 100
+#define MIN_GC_NODES 100
+#define GC_SLEEP_MS 100
#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
@@ -1008,6 +1011,13 @@ retry:
BUG_ON(b->level != level);
}
+ if (btree_node_io_error(b)) {
+ rw_unlock(write, b);
+ return ERR_PTR(-EIO);
+ }
+
+ BUG_ON(!b->written);
+
b->parent = parent;
b->accessed = 1;
@@ -1019,13 +1029,6 @@ retry:
for (; i <= b->keys.nsets; i++)
prefetch(b->keys.set[i].data);
- if (btree_node_io_error(b)) {
- rw_unlock(write, b);
- return ERR_PTR(-EIO);
- }
-
- BUG_ON(!b->written);
-
return b;
}
@@ -1520,6 +1523,32 @@ static unsigned btree_gc_count_keys(struct btree *b)
return ret;
}
+static size_t btree_gc_min_nodes(struct cache_set *c)
+{
+ size_t min_nodes;
+
+ /*
+ * Since incremental GC would stop 100ms when front
+ * side I/O comes, so when there are many btree nodes,
+ * if GC only processes constant (100) nodes each time,
+ * GC would last a long time, and the front side I/Os
+ * would run out of the buckets (since no new bucket
+ * can be allocated during GC), and be blocked again.
+ * So GC should not process constant nodes, but varied
+ * nodes according to the number of btree nodes, which
+ * realized by dividing GC into constant(100) times,
+ * so when there are many btree nodes, GC can process
+ * more nodes each time, otherwise, GC will process less
+ * nodes each time (but no less than MIN_GC_NODES)
+ */
+ min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
+ if (min_nodes < MIN_GC_NODES)
+ min_nodes = MIN_GC_NODES;
+
+ return min_nodes;
+}
+
+
static int btree_gc_recurse(struct btree *b, struct btree_op *op,
struct closure *writes, struct gc_stat *gc)
{
@@ -1585,6 +1614,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
r->b = NULL;
+ if (atomic_read(&b->c->search_inflight) &&
+ gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
+ gc->nodes_pre = gc->nodes;
+ ret = -EAGAIN;
+ break;
+ }
+
if (need_resched()) {
ret = -EAGAIN;
break;
@@ -1753,7 +1789,10 @@ static void bch_btree_gc(struct cache_set *c)
closure_sync(&writes);
cond_resched();
- if (ret && ret != -EAGAIN)
+ if (ret == -EAGAIN)
+ schedule_timeout_interruptible(msecs_to_jiffies
+ (GC_SLEEP_MS));
+ else if (ret)
pr_warn("gc failed!");
} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
@@ -1834,8 +1873,14 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
do {
k = bch_btree_iter_next_filter(&iter, &b->keys,
bch_ptr_bad);
- if (k)
+ if (k) {
btree_node_prefetch(b, k);
+ /*
+ * initiallize c->gc_stats.nodes
+ * for incremental GC
+ */
+ b->c->gc_stats.nodes++;
+ }
if (p)
ret = btree(check_recurse, p, b, op);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index d211e2c25b6b..68e9d926134d 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -152,7 +152,7 @@ static inline bool btree_node_ ## flag(struct btree *b) \
{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
\
static inline void set_btree_node_ ## flag(struct btree *b) \
-{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \
+{ set_bit(BTREE_NODE_ ## flag, &b->flags); }
enum btree_flags {
BTREE_NODE_io_error,
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 0e14969182c6..618253683d40 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -199,11 +199,16 @@ static const struct file_operations debug_ops = {
.release = single_release
};
-int __init closure_debug_init(void)
+void __init closure_debug_init(void)
{
- closure_debug = debugfs_create_file("closures",
- 0400, bcache_debug, NULL, &debug_ops);
- return IS_ERR_OR_NULL(closure_debug);
+ if (!IS_ERR_OR_NULL(bcache_debug))
+ /*
+ * it is unnecessary to check return value of
+ * debugfs_create_file(), we should not care
+ * about this.
+ */
+ closure_debug = debugfs_create_file(
+ "closures", 0400, bcache_debug, NULL, &debug_ops);
}
#endif
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 71427eb5fdae..7c2c5bc7c88b 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -186,13 +186,13 @@ static inline void closure_sync(struct closure *cl)
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
-int closure_debug_init(void);
+void closure_debug_init(void);
void closure_debug_create(struct closure *cl);
void closure_debug_destroy(struct closure *cl);
#else
-static inline int closure_debug_init(void) { return 0; }
+static inline void closure_debug_init(void) {}
static inline void closure_debug_create(struct closure *cl) {}
static inline void closure_debug_destroy(struct closure *cl) {}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index d030ce3025a6..12034c07257b 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -110,11 +110,15 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
struct bio_vec bv, cbv;
struct bvec_iter iter, citer = { 0 };
- check = bio_clone_kmalloc(bio, GFP_NOIO);
+ check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
if (!check)
return;
+ check->bi_disk = bio->bi_disk;
check->bi_opf = REQ_OP_READ;
+ check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
+ check->bi_iter.bi_size = bio->bi_iter.bi_size;
+ bch_bio_map(check, NULL);
if (bch_bio_alloc_pages(check, GFP_NOIO))
goto out_put;
@@ -248,11 +252,12 @@ void bch_debug_exit(void)
debugfs_remove_recursive(bcache_debug);
}
-int __init bch_debug_init(struct kobject *kobj)
+void __init bch_debug_init(struct kobject *kobj)
{
- if (!IS_ENABLED(CONFIG_DEBUG_FS))
- return 0;
-
+ /*
+ * it is unnecessary to check return value of
+ * debugfs_create_file(), we should not care
+ * about this.
+ */
bcache_debug = debugfs_create_dir("bcache", NULL);
- return IS_ERR_OR_NULL(bcache_debug);
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 18f1b5239620..10748c626a1d 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -828,6 +828,7 @@ void bch_journal_free(struct cache_set *c)
free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
free_fifo(&c->journal.pin);
+ free_heap(&c->flush_btree);
}
int bch_journal_alloc(struct cache_set *c)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index ae67f5fa8047..7dbe8b6316a0 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -107,7 +107,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
/*
* The journalling code doesn't handle the case where the keys to insert
* is bigger than an empty write: If we just return -ENOMEM here,
- * bio_insert() and bio_invalidate() will insert the keys created so far
+ * bch_data_insert_keys() will insert the keys created so far
* and finish the rest when the keylist is empty.
*/
if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
@@ -667,8 +667,7 @@ static void backing_request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- generic_end_io_acct(s->d->disk->queue,
- bio_data_dir(s->orig_bio),
+ generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
@@ -702,6 +701,8 @@ static void search_free(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
+ atomic_dec(&s->d->c->search_inflight);
+
if (s->iop.bio)
bio_put(s->iop.bio);
@@ -719,6 +720,7 @@ static inline struct search *search_alloc(struct bio *bio,
closure_init(&s->cl, NULL);
do_bio_hook(s, bio, request_endio);
+ atomic_inc(&d->c->search_inflight);
s->orig_bio = bio;
s->cache_miss = NULL;
@@ -1062,8 +1064,7 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io = ddip->bi_end_io;
bio->bi_private = ddip->bi_private;
- generic_end_io_acct(ddip->d->disk->queue,
- bio_data_dir(bio),
+ generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
&ddip->d->disk->part0, ddip->start_time);
if (bio->bi_status) {
@@ -1102,6 +1103,44 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
generic_make_request(bio);
}
+static void quit_max_writeback_rate(struct cache_set *c,
+ struct cached_dev *this_dc)
+{
+ int i;
+ struct bcache_device *d;
+ struct cached_dev *dc;
+
+ /*
+ * mutex bch_register_lock may compete with other parallel requesters,
+ * or attach/detach operations on other backing device. Waiting to
+ * the mutex lock may increase I/O request latency for seconds or more.
+ * To avoid such situation, if mutext_trylock() failed, only writeback
+ * rate of current cached device is set to 1, and __update_write_back()
+ * will decide writeback rate of other cached devices (remember now
+ * c->idle_counter is 0 already).
+ */
+ if (mutex_trylock(&bch_register_lock)) {
+ for (i = 0; i < c->devices_max_used; i++) {
+ if (!c->devices[i])
+ continue;
+
+ if (UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+
+ d = c->devices[i];
+ dc = container_of(d, struct cached_dev, disk);
+ /*
+ * set writeback rate to default minimum value,
+ * then let update_writeback_rate() to decide the
+ * upcoming rate.
+ */
+ atomic_long_set(&dc->writeback_rate.rate, 1);
+ }
+ mutex_unlock(&bch_register_lock);
+ } else
+ atomic_long_set(&this_dc->writeback_rate.rate, 1);
+}
+
/* Cached devices - read & write stuff */
static blk_qc_t cached_dev_make_request(struct request_queue *q,
@@ -1119,8 +1158,25 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
return BLK_QC_T_NONE;
}
- atomic_set(&dc->backing_idle, 0);
- generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
+ if (likely(d->c)) {
+ if (atomic_read(&d->c->idle_counter))
+ atomic_set(&d->c->idle_counter, 0);
+ /*
+ * If at_max_writeback_rate of cache set is true and new I/O
+ * comes, quit max writeback rate of all cached devices
+ * attached to this cache set, and set at_max_writeback_rate
+ * to false.
+ */
+ if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
+ atomic_set(&d->c->at_max_writeback_rate, 0);
+ quit_max_writeback_rate(d->c, dc);
+ }
+ }
+
+ generic_start_io_acct(q,
+ bio_op(bio),
+ bio_sectors(bio),
+ &d->disk->part0);
bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
@@ -1229,7 +1285,6 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
struct search *s;
struct closure *cl;
struct bcache_device *d = bio->bi_disk->private_data;
- int rw = bio_data_dir(bio);
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
@@ -1237,7 +1292,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
return BLK_QC_T_NONE;
}
- generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
+ generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
s = search_alloc(bio, d);
cl = &s->cl;
@@ -1254,7 +1309,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
flash_dev_nodata,
bcache_wq);
return BLK_QC_T_NONE;
- } else if (rw) {
+ } else if (bio_data_dir(bio)) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index fa4058e43202..55a37641aa95 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -181,7 +181,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
goto err;
}
- sb->last_mount = get_seconds();
+ sb->last_mount = (u32)ktime_get_real_seconds();
err = NULL;
get_page(bh->b_page);
@@ -696,12 +696,14 @@ static void bcache_device_detach(struct bcache_device *d)
{
lockdep_assert_held(&bch_register_lock);
+ atomic_dec(&d->c->attached_dev_nr);
+
if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
struct uuid_entry *u = d->c->uuids + d->id;
SET_UUID_FLASH_ONLY(u, 0);
memcpy(u->uuid, invalid_uuid, 16);
- u->invalidated = cpu_to_le32(get_seconds());
+ u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
bch_uuid_write(d->c);
}
@@ -796,11 +798,12 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
return idx;
if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
- BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
- !(d->disk = alloc_disk(BCACHE_MINORS))) {
- ida_simple_remove(&bcache_device_idx, idx);
- return -ENOMEM;
- }
+ BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
+ goto err;
+
+ d->disk = alloc_disk(BCACHE_MINORS);
+ if (!d->disk)
+ goto err;
set_capacity(d->disk, sectors);
snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
@@ -834,6 +837,11 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
blk_queue_write_cache(q, true, true);
return 0;
+
+err:
+ ida_simple_remove(&bcache_device_idx, idx);
+ return -ENOMEM;
+
}
/* Cached device */
@@ -1027,7 +1035,7 @@ void bch_cached_dev_detach(struct cached_dev *dc)
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
uint8_t *set_uuid)
{
- uint32_t rtime = cpu_to_le32(get_seconds());
+ uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
struct uuid_entry *u;
struct cached_dev *exist_dc, *t;
@@ -1070,7 +1078,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
(BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
memcpy(u->uuid, invalid_uuid, 16);
- u->invalidated = cpu_to_le32(get_seconds());
+ u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
u = NULL;
}
@@ -1138,6 +1146,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
+ atomic_inc(&c->attached_dev_nr);
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
@@ -1285,6 +1294,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
pr_info("registered backing device %s", dc->backing_dev_name);
list_add(&dc->list, &uncached_devices);
+ /* attach to a matched cache set if it exists */
list_for_each_entry(c, &bch_cache_sets, list)
bch_cached_dev_attach(dc, c, NULL);
@@ -1311,6 +1321,8 @@ static void flash_dev_free(struct closure *cl)
{
struct bcache_device *d = container_of(cl, struct bcache_device, cl);
mutex_lock(&bch_register_lock);
+ atomic_long_sub(bcache_dev_sectors_dirty(d),
+ &d->c->flash_dev_dirty_sectors);
bcache_device_free(d);
mutex_unlock(&bch_register_lock);
kobject_put(&d->kobj);
@@ -1390,7 +1402,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
get_random_bytes(u->uuid, 16);
memset(u->label, 0, 32);
- u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
+ u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
SET_UUID_FLASH_ONLY(u, 1);
u->sectors = size >> 9;
@@ -1687,6 +1699,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
c->devices_max_used = 0;
+ atomic_set(&c->attached_dev_nr, 0);
c->btree_pages = bucket_pages(c);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
@@ -1894,7 +1907,7 @@ static void run_cache_set(struct cache_set *c)
goto err;
closure_sync(&cl);
- c->sb.last_mount = get_seconds();
+ c->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
list_for_each_entry_safe(dc, t, &uncached_devices, list)
@@ -2163,8 +2176,12 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!try_module_get(THIS_MODULE))
return -EBUSY;
- if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
- !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
+ path = kstrndup(buffer, size, GFP_KERNEL);
+ if (!path)
+ goto err;
+
+ sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
+ if (!sb)
goto err;
err = "failed to open device";
@@ -2324,13 +2341,21 @@ static int __init bcache_init(void)
return bcache_major;
}
- if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
- !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
- bch_request_init() ||
- bch_debug_init(bcache_kobj) || closure_debug_init() ||
+ bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
+ if (!bcache_wq)
+ goto err;
+
+ bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
+ if (!bcache_kobj)
+ goto err;
+
+ if (bch_request_init() ||
sysfs_create_files(bcache_kobj, files))
goto err;
+ bch_debug_init(bcache_kobj);
+ closure_debug_init();
+
return 0;
err:
bcache_exit();
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 225b15aa0340..81d3520b0702 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -149,6 +149,7 @@ SHOW(__bch_cached_dev)
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
+ int wb = dc->writeback_running;
#define var(stat) (dc->stat)
@@ -170,7 +171,8 @@ SHOW(__bch_cached_dev)
var_printf(writeback_running, "%i");
var_print(writeback_delay);
var_print(writeback_percent);
- sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
+ sysfs_hprint(writeback_rate,
+ wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
sysfs_printf(io_error_limit, "%i", dc->error_limit);
sysfs_printf(io_disable, "%i", dc->io_disable);
@@ -188,15 +190,22 @@ SHOW(__bch_cached_dev)
char change[20];
s64 next_io;
- bch_hprint(rate, dc->writeback_rate.rate << 9);
- bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
- bch_hprint(target, dc->writeback_rate_target << 9);
- bch_hprint(proportional,dc->writeback_rate_proportional << 9);
- bch_hprint(integral, dc->writeback_rate_integral_scaled << 9);
- bch_hprint(change, dc->writeback_rate_change << 9);
-
- next_io = div64_s64(dc->writeback_rate.next - local_clock(),
- NSEC_PER_MSEC);
+ /*
+ * Except for dirty and target, other values should
+ * be 0 if writeback is not running.
+ */
+ bch_hprint(rate,
+ wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
+ : 0);
+ bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
+ bch_hprint(target, dc->writeback_rate_target << 9);
+ bch_hprint(proportional,
+ wb ? dc->writeback_rate_proportional << 9 : 0);
+ bch_hprint(integral,
+ wb ? dc->writeback_rate_integral_scaled << 9 : 0);
+ bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
+ next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
+ NSEC_PER_MSEC) : 0;
return sprintf(buf,
"rate:\t\t%s/sec\n"
@@ -255,8 +264,19 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
- sysfs_strtoul_clamp(writeback_rate,
- dc->writeback_rate.rate, 1, INT_MAX);
+ if (attr == &sysfs_writeback_rate) {
+ ssize_t ret;
+ long int v = atomic_long_read(&dc->writeback_rate.rate);
+
+ ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
+
+ if (!ret) {
+ atomic_long_set(&dc->writeback_rate.rate, v);
+ ret = size;
+ }
+
+ return ret;
+ }
sysfs_strtoul_clamp(writeback_rate_update_seconds,
dc->writeback_rate_update_seconds,
@@ -338,8 +358,8 @@ STORE(__cached_dev)
if (!v)
return size;
}
-
- pr_err("Can't attach %s: cache set not found", buf);
+ if (v == -ENOENT)
+ pr_err("Can't attach %s: cache set not found", buf);
return v;
}
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index fc479b026d6d..b15256bcf0e7 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -200,7 +200,7 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
{
uint64_t now = local_clock();
- d->next += div_u64(done * NSEC_PER_SEC, d->rate);
+ d->next += div_u64(done * NSEC_PER_SEC, atomic_long_read(&d->rate));
/* Bound the time. Don't let us fall further than 2 seconds behind
* (this prevents unnecessary backlog that would make it impossible
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index cced87f8eb27..f7b0133c9d2f 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -442,7 +442,7 @@ struct bch_ratelimit {
* Rate at which we want to do work, in units per second
* The units here correspond to the units passed to bch_next_delay()
*/
- uint32_t rate;
+ atomic_long_t rate;
};
static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index ad45ebe1a74b..481d4cf38ac0 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -27,7 +27,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
* flash-only devices
*/
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
- bcache_flash_devs_sectors_dirty(c);
+ atomic_long_read(&c->flash_dev_dirty_sectors);
/*
* Unfortunately there is no control of global dirty data. If the
@@ -104,11 +104,56 @@ static void __update_writeback_rate(struct cached_dev *dc)
dc->writeback_rate_proportional = proportional_scaled;
dc->writeback_rate_integral_scaled = integral_scaled;
- dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
- dc->writeback_rate.rate = new_rate;
+ dc->writeback_rate_change = new_rate -
+ atomic_long_read(&dc->writeback_rate.rate);
+ atomic_long_set(&dc->writeback_rate.rate, new_rate);
dc->writeback_rate_target = target;
}
+static bool set_at_max_writeback_rate(struct cache_set *c,
+ struct cached_dev *dc)
+{
+ /*
+ * Idle_counter is increased everytime when update_writeback_rate() is
+ * called. If all backing devices attached to the same cache set have
+ * identical dc->writeback_rate_update_seconds values, it is about 6
+ * rounds of update_writeback_rate() on each backing device before
+ * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
+ * to each dc->writeback_rate.rate.
+ * In order to avoid extra locking cost for counting exact dirty cached
+ * devices number, c->attached_dev_nr is used to calculate the idle
+ * throushold. It might be bigger if not all cached device are in write-
+ * back mode, but it still works well with limited extra rounds of
+ * update_writeback_rate().
+ */
+ if (atomic_inc_return(&c->idle_counter) <
+ atomic_read(&c->attached_dev_nr) * 6)
+ return false;
+
+ if (atomic_read(&c->at_max_writeback_rate) != 1)
+ atomic_set(&c->at_max_writeback_rate, 1);
+
+ atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
+
+ /* keep writeback_rate_target as existing value */
+ dc->writeback_rate_proportional = 0;
+ dc->writeback_rate_integral_scaled = 0;
+ dc->writeback_rate_change = 0;
+
+ /*
+ * Check c->idle_counter and c->at_max_writeback_rate agagain in case
+ * new I/O arrives during before set_at_max_writeback_rate() returns.
+ * Then the writeback rate is set to 1, and its new value should be
+ * decided via __update_writeback_rate().
+ */
+ if ((atomic_read(&c->idle_counter) <
+ atomic_read(&c->attached_dev_nr) * 6) ||
+ !atomic_read(&c->at_max_writeback_rate))
+ return false;
+
+ return true;
+}
+
static void update_writeback_rate(struct work_struct *work)
{
struct cached_dev *dc = container_of(to_delayed_work(work),
@@ -136,13 +181,20 @@ static void update_writeback_rate(struct work_struct *work)
return;
}
- down_read(&dc->writeback_lock);
-
- if (atomic_read(&dc->has_dirty) &&
- dc->writeback_percent)
- __update_writeback_rate(dc);
+ if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
+ /*
+ * If the whole cache set is idle, set_at_max_writeback_rate()
+ * will set writeback rate to a max number. Then it is
+ * unncessary to update writeback rate for an idle cache set
+ * in maximum writeback rate number(s).
+ */
+ if (!set_at_max_writeback_rate(c, dc)) {
+ down_read(&dc->writeback_lock);
+ __update_writeback_rate(dc);
+ up_read(&dc->writeback_lock);
+ }
+ }
- up_read(&dc->writeback_lock);
/*
* CACHE_SET_IO_DISABLE might be set via sysfs interface,
@@ -422,27 +474,6 @@ static void read_dirty(struct cached_dev *dc)
delay = writeback_delay(dc, size);
- /* If the control system would wait for at least half a
- * second, and there's been no reqs hitting the backing disk
- * for awhile: use an alternate mode where we have at most
- * one contiguous set of writebacks in flight at a time. If
- * someone wants to do IO it will be quick, as it will only
- * have to contend with one operation in flight, and we'll
- * be round-tripping data to the backing disk as quickly as
- * it can accept it.
- */
- if (delay >= HZ / 2) {
- /* 3 means at least 1.5 seconds, up to 7.5 if we
- * have slowed way down.
- */
- if (atomic_inc_return(&dc->backing_idle) >= 3) {
- /* Wait for current I/Os to finish */
- closure_sync(&cl);
- /* And immediately launch a new set. */
- delay = 0;
- }
- }
-
while (!kthread_should_stop() &&
!test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
delay) {
@@ -476,6 +507,9 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
if (!d)
return;
+ if (UUID_FLASH_ONLY(&c->uuids[inode]))
+ atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
+
stripe = offset_to_stripe(d, offset);
stripe_offset = offset & (d->stripe_size - 1);
@@ -673,10 +707,14 @@ static int bch_writeback_thread(void *arg)
}
/* Init */
+#define INIT_KEYS_EACH_TIME 500000
+#define INIT_KEYS_SLEEP_MS 100
struct sectors_dirty_init {
struct btree_op op;
unsigned inode;
+ size_t count;
+ struct bkey start;
};
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
@@ -691,18 +729,37 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
KEY_START(k), KEY_SIZE(k));
+ op->count++;
+ if (atomic_read(&b->c->search_inflight) &&
+ !(op->count % INIT_KEYS_EACH_TIME)) {
+ bkey_copy_key(&op->start, k);
+ return -EAGAIN;
+ }
+
return MAP_CONTINUE;
}
void bch_sectors_dirty_init(struct bcache_device *d)
{
struct sectors_dirty_init op;
+ int ret;
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
-
- bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
- sectors_dirty_init_fn, 0);
+ op.count = 0;
+ op.start = KEY(op.inode, 0, 0);
+
+ do {
+ ret = bch_btree_map_keys(&op.op, d->c, &op.start,
+ sectors_dirty_init_fn, 0);
+ if (ret == -EAGAIN)
+ schedule_timeout_interruptible(
+ msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
+ else if (ret < 0) {
+ pr_warn("sectors dirty init failed, ret=%d!", ret);
+ break;
+ }
+ } while (ret == -EAGAIN);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -715,7 +772,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_running = true;
dc->writeback_percent = 10;
dc->writeback_delay = 30;
- dc->writeback_rate.rate = 1024;
+ atomic_long_set(&dc->writeback_rate.rate, 1024);
dc->writeback_rate_minimum = 8;
dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 610fb01de629..3745d7004c47 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -28,25 +28,6 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
-static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
-{
- uint64_t i, ret = 0;
-
- mutex_lock(&bch_register_lock);
-
- for (i = 0; i < c->devices_max_used; i++) {
- struct bcache_device *d = c->devices[i];
-
- if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
- continue;
- ret += bcache_dev_sectors_dirty(d);
- }
-
- mutex_unlock(&bch_register_lock);
-
- return ret;
-}
-
static inline unsigned offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b0dd7027848b..20f7e4ef5342 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -609,7 +609,8 @@ static void start_io_acct(struct dm_io *io)
io->start_time = jiffies;
- generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0);
+ generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
+ &dm_disk(md)->part0);
atomic_set(&dm_disk(md)->part0.in_flight[rw],
atomic_inc_return(&md->pending[rw]));
@@ -628,7 +629,8 @@ static void end_io_acct(struct dm_io *io)
int pending;
int rw = bio_data_dir(bio);
- generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time);
+ generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
+ io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 021cbf9ef1bf..e8a74e92be30 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -304,15 +304,6 @@ static void recover_bitmaps(struct md_thread *thread)
while (cinfo->recovery_map) {
slot = fls64((u64)cinfo->recovery_map) - 1;
- /* Clear suspend_area associated with the bitmap */
- spin_lock_irq(&cinfo->suspend_lock);
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- }
- spin_unlock_irq(&cinfo->suspend_lock);
-
snprintf(str, 64, "bitmap%04d", slot);
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
@@ -331,14 +322,30 @@ static void recover_bitmaps(struct md_thread *thread)
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
goto clear_bit;
}
+
+ /* Clear suspend_area associated with the bitmap */
+ spin_lock_irq(&cinfo->suspend_lock);
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ list_del(&s->list);
+ kfree(s);
+ }
+ spin_unlock_irq(&cinfo->suspend_lock);
+
if (hi > 0) {
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;
/* wake up thread to continue resync in case resync
* is not finished */
if (mddev->recovery_cp != MaxSector) {
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
+ /*
+ * clear the REMOTE flag since we will launch
+ * resync thread in current node.
+ */
+ clear_bit(MD_RESYNCING_REMOTE,
+ &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
}
}
clear_bit:
@@ -457,6 +464,11 @@ static void process_suspend_info(struct mddev *mddev,
struct suspend_info *s;
if (!hi) {
+ /*
+ * clear the REMOTE flag since resync or recovery is finished
+ * in remote node.
+ */
+ clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
remove_suspend_info(mddev, slot);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -585,6 +597,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
revalidate_disk(mddev->gendisk);
break;
case RESYNCING:
+ set_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
process_suspend_info(mddev, le32_to_cpu(msg->slot),
le64_to_cpu(msg->low),
le64_to_cpu(msg->high));
@@ -1265,8 +1278,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
static int resync_finish(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
dlm_unlock_sync(cinfo->resync_lockres);
- return resync_info_update(mddev, 0, 0);
+
+ /*
+ * If resync thread is interrupted so we can't say resync is finished,
+ * another node will launch resync thread to continue.
+ */
+ if (test_bit(MD_CLOSING, &mddev->flags))
+ return 0;
+ else
+ return resync_info_update(mddev, 0, 0);
}
static int area_resyncing(struct mddev *mddev, int direction,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 994aed2f9dff..724def2f9eaa 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -204,10 +204,6 @@ static int start_readonly;
*/
static bool create_on_open = true;
-/* bio_clone_mddev
- * like bio_clone_bioset, but with a local bio set
- */
-
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
@@ -335,6 +331,7 @@ EXPORT_SYMBOL(md_handle_request);
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
+ const int sgrp = op_stat_group(bio_op(bio));
struct mddev *mddev = q->queuedata;
unsigned int sectors;
int cpu;
@@ -363,8 +360,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
md_handle_request(mddev, bio);
cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[sgrp]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[sgrp], sectors);
part_stat_unlock();
return BLK_QC_T_NONE;
@@ -7680,6 +7677,23 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
resync -= atomic_read(&mddev->recovery_active);
if (resync == 0) {
+ if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Faulty, &rdev->flags) &&
+ rdev->recovery_offset != MaxSector &&
+ rdev->recovery_offset) {
+ seq_printf(seq, "\trecover=REMOTE");
+ return 1;
+ }
+ if (mddev->reshape_position != MaxSector)
+ seq_printf(seq, "\treshape=REMOTE");
+ else
+ seq_printf(seq, "\tresync=REMOTE");
+ return 1;
+ }
if (mddev->recovery_cp < MaxSector) {
seq_printf(seq, "\tresync=PENDING");
return 1;
@@ -8046,8 +8060,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
- curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]) -
+ curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2d148bdaba74..8afd6bfdbfb9 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -496,6 +496,7 @@ enum recovery_flags {
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
+ MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};
static inline int __must_check mddev_lock(struct mddev *mddev)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 2b775abf377b..7416db70c6cc 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -717,7 +717,6 @@ static void r5c_disable_writeback_async(struct work_struct *work)
static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
- struct bio *bio;
struct r5l_meta_block *block;
unsigned long flags;
u32 crc;
@@ -730,7 +729,6 @@ static void r5l_submit_current_io(struct r5l_log *log)
block->meta_size = cpu_to_le32(io->meta_offset);
crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
block->checksum = cpu_to_le32(crc);
- bio = io->current_bio;
log->current_io = NULL;
spin_lock_irqsave(&log->io_list_lock, flags);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2031506a0ecd..81eaa221216c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -409,16 +409,14 @@ void raid5_release_stripe(struct stripe_head *sh)
md_wakeup_thread(conf->mddev->thread);
return;
slow_path:
- local_irq_save(flags);
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
- if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
+ if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) {
INIT_LIST_HEAD(&list);
hash = sh->hash_lock_index;
do_release_stripe(conf, sh, &list);
- spin_unlock(&conf->device_lock);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
release_inactive_stripe_list(conf, &list, hash);
}
- local_irq_restore(flags);
}
static inline void remove_hash(struct stripe_head *sh)
@@ -4521,6 +4519,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
s->failed++;
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
+ else if (!rdev) {
+ rdev = rcu_dereference(
+ conf->disks[i].replacement);
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ do_recovery = 1;
+ }
}
if (test_bit(R5_InJournal, &dev->flags))
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index b7fad0ec5710..030b2602faf0 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -74,7 +74,7 @@ void cec_queue_event_fh(struct cec_fh *fh,
const struct cec_event *new_ev, u64 ts)
{
static const u16 max_events[CEC_NUM_EVENTS] = {
- 1, 1, 800, 800, 8, 8,
+ 1, 1, 800, 800, 8, 8, 8, 8
};
struct cec_event_entry *entry;
unsigned int ev_idx = new_ev->event - 1;
@@ -176,6 +176,22 @@ void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
}
EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event);
+/* Notify userspace that the 5V pin changed state at the given time. */
+void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
+{
+ struct cec_event ev = {
+ .event = is_high ? CEC_EVENT_PIN_5V_HIGH :
+ CEC_EVENT_PIN_5V_LOW,
+ };
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list)
+ cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
+ mutex_unlock(&adap->devnode.lock);
+}
+EXPORT_SYMBOL_GPL(cec_queue_pin_5v_event);
+
/*
* Queue a new message for this filehandle.
*
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 10b67fc40318..b6536bbad530 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -579,6 +579,14 @@ static int cec_open(struct inode *inode, struct file *filp)
cec_queue_event_fh(fh, &ev, 0);
}
}
+ if (adap->pin && adap->pin->ops->read_5v) {
+ err = adap->pin->ops->read_5v(adap);
+ if (err >= 0) {
+ ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
+ CEC_EVENT_PIN_5V_LOW;
+ cec_queue_event_fh(fh, &ev, 0);
+ }
+ }
#endif
list_add(&fh->list, &devnode->fhs);
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index 40891f4f842b..c95d4583498e 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -500,7 +500,7 @@ void smsdvb_debugfs_release(struct smsdvb_client_t *client)
client->debugfs = NULL;
}
-int smsdvb_debugfs_register(void)
+void smsdvb_debugfs_register(void)
{
struct dentry *d;
@@ -517,15 +517,15 @@ int smsdvb_debugfs_register(void)
d = debugfs_create_dir("smsdvb", usb_debug_root);
if (IS_ERR_OR_NULL(d)) {
pr_err("Couldn't create sysfs node for smsdvb\n");
- return PTR_ERR(d);
- } else {
- smsdvb_debugfs_usb_root = d;
+ return;
}
- return 0;
+ smsdvb_debugfs_usb_root = d;
}
void smsdvb_debugfs_unregister(void)
{
+ if (!smsdvb_debugfs_usb_root)
+ return;
debugfs_remove_recursive(smsdvb_debugfs_usb_root);
smsdvb_debugfs_usb_root = NULL;
}
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index c0faad1ba428..43cfd1dbda01 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1047,9 +1047,9 @@ static void smsdvb_release(struct dvb_frontend *fe)
static const struct dvb_frontend_ops smsdvb_fe_ops = {
.info = {
.name = "Siano Mobile Digital MDTV Receiver",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 250000,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h
index b15754d95ec0..befeb9817e54 100644
--- a/drivers/media/common/siano/smsdvb.h
+++ b/drivers/media/common/siano/smsdvb.h
@@ -107,7 +107,7 @@ struct RECEPTION_STATISTICS_PER_SLICES_S {
int smsdvb_debugfs_create(struct smsdvb_client_t *client);
void smsdvb_debugfs_release(struct smsdvb_client_t *client);
-int smsdvb_debugfs_register(void);
+void smsdvb_debugfs_register(void);
void smsdvb_debugfs_unregister(void);
#else
@@ -119,10 +119,7 @@ static inline int smsdvb_debugfs_create(struct smsdvb_client_t *client)
static inline void smsdvb_debugfs_release(struct smsdvb_client_t *client) {}
-static inline int smsdvb_debugfs_register(void)
-{
- return 0;
-};
+static inline void smsdvb_debugfs_register(void) {}
static inline void smsdvb_debugfs_unregister(void) {};
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index f32ec7342ef0..5653e8eebe2b 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -1377,6 +1377,11 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
struct vb2_buffer *vb;
int ret;
+ if (q->error) {
+ dprintk(1, "fatal error occurred on queue\n");
+ return -EIO;
+ }
+
vb = q->bufs[index];
switch (vb->state) {
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index f1178f6f434d..aff0ab7bf83d 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -222,7 +222,7 @@ struct vb2_dc_attachment {
enum dma_data_direction dma_dir;
};
-static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_dc_attachment *attach;
@@ -358,7 +358,6 @@ static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
.map_dma_buf = vb2_dc_dmabuf_ops_map,
.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
.map = vb2_dc_dmabuf_ops_kmap,
- .map_atomic = vb2_dc_dmabuf_ops_kmap,
.vmap = vb2_dc_dmabuf_ops_vmap,
.mmap = vb2_dc_dmabuf_ops_mmap,
.release = vb2_dc_dmabuf_ops_release,
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 753ed3138dcc..015e737095cd 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -371,7 +371,7 @@ struct vb2_dma_sg_attachment {
enum dma_data_direction dma_dir;
};
-static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_dma_sg_attachment *attach;
@@ -507,7 +507,6 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
.map = vb2_dma_sg_dmabuf_ops_kmap,
- .map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
.release = vb2_dma_sg_dmabuf_ops_release,
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 359fb9804d16..6dfbd5b05907 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -209,7 +209,7 @@ struct vb2_vmalloc_attachment {
enum dma_data_direction dma_dir;
};
-static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_vmalloc_attachment *attach;
@@ -346,7 +346,6 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
.map = vb2_vmalloc_dmabuf_ops_kmap,
- .map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
.vmap = vb2_vmalloc_dmabuf_ops_vmap,
.mmap = vb2_vmalloc_dmabuf_ops_mmap,
.release = vb2_vmalloc_dmabuf_ops_release,
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 1310526b0d49..4d371cea0d5d 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -1391,7 +1391,7 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
struct dvb_ca_slot *sl;
slot = info->num;
- if ((slot > ca->slot_count) || (slot < 0)) {
+ if ((slot >= ca->slot_count) || (slot < 0)) {
err = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index ce25aef39008..c4e7ebfe4d29 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -894,21 +894,67 @@ static int dvb_frontend_start(struct dvb_frontend *fe)
}
static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
- u32 *freq_min, u32 *freq_max)
+ u32 *freq_min, u32 *freq_max,
+ u32 *tolerance)
{
- *freq_min = max(fe->ops.info.frequency_min, fe->ops.tuner_ops.info.frequency_min);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 tuner_min = fe->ops.tuner_ops.info.frequency_min_hz;
+ u32 tuner_max = fe->ops.tuner_ops.info.frequency_max_hz;
+ u32 frontend_min = fe->ops.info.frequency_min_hz;
+ u32 frontend_max = fe->ops.info.frequency_max_hz;
+
+ *freq_min = max(frontend_min, tuner_min);
- if (fe->ops.info.frequency_max == 0)
- *freq_max = fe->ops.tuner_ops.info.frequency_max;
- else if (fe->ops.tuner_ops.info.frequency_max == 0)
- *freq_max = fe->ops.info.frequency_max;
+ if (frontend_max == 0)
+ *freq_max = tuner_max;
+ else if (tuner_max == 0)
+ *freq_max = frontend_max;
else
- *freq_max = min(fe->ops.info.frequency_max, fe->ops.tuner_ops.info.frequency_max);
+ *freq_max = min(frontend_max, tuner_max);
if (*freq_min == 0 || *freq_max == 0)
dev_warn(fe->dvb->device,
"DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
fe->dvb->num, fe->id);
+
+ /* If the standard is for satellite, convert frequencies to kHz */
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ case SYS_TURBO:
+ case SYS_ISDBS:
+ *freq_min /= kHz;
+ *freq_max /= kHz;
+ if (tolerance)
+ *tolerance = fe->ops.info.frequency_tolerance_hz / kHz;
+
+ break;
+ default:
+ if (tolerance)
+ *tolerance = fe->ops.info.frequency_tolerance_hz;
+ break;
+ }
+}
+
+static u32 dvb_frontend_get_stepsize(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 fe_step = fe->ops.info.frequency_stepsize_hz;
+ u32 tuner_step = fe->ops.tuner_ops.info.frequency_step_hz;
+ u32 step = max(fe_step, tuner_step);
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ case SYS_TURBO:
+ case SYS_ISDBS:
+ step /= kHz;
+ break;
+ default:
+ break;
+ }
+
+ return step;
}
static int dvb_frontend_check_parameters(struct dvb_frontend *fe)
@@ -918,7 +964,7 @@ static int dvb_frontend_check_parameters(struct dvb_frontend *fe)
u32 freq_max;
/* range check: frequency */
- dvb_frontend_get_frequency_limits(fe, &freq_min, &freq_max);
+ dvb_frontend_get_frequency_limits(fe, &freq_min, &freq_max, NULL);
if ((freq_min && c->frequency < freq_min) ||
(freq_max && c->frequency > freq_max)) {
dev_warn(fe->dvb->device, "DVB: adapter %i frontend %i frequency %u out of range (%u..%u)\n",
@@ -2244,8 +2290,8 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
case SYS_ISDBT:
case SYS_DTMB:
fepriv->min_delay = HZ / 20;
- fepriv->step_size = fe->ops.info.frequency_stepsize * 2;
- fepriv->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ fepriv->step_size = dvb_frontend_get_stepsize(fe) * 2;
+ fepriv->max_drift = (dvb_frontend_get_stepsize(fe) * 2) + 1;
break;
default:
/*
@@ -2374,9 +2420,17 @@ static int dvb_frontend_handle_ioctl(struct file *file,
case FE_GET_INFO: {
struct dvb_frontend_info *info = parg;
-
- memcpy(info, &fe->ops.info, sizeof(struct dvb_frontend_info));
- dvb_frontend_get_frequency_limits(fe, &info->frequency_min, &info->frequency_max);
+ memset(info, 0, sizeof(*info));
+
+ strcpy(info->name, fe->ops.info.name);
+ info->symbol_rate_min = fe->ops.info.symbol_rate_min;
+ info->symbol_rate_max = fe->ops.info.symbol_rate_max;
+ info->symbol_rate_tolerance = fe->ops.info.symbol_rate_tolerance;
+ info->caps = fe->ops.info.caps;
+ info->frequency_stepsize = dvb_frontend_get_stepsize(fe);
+ dvb_frontend_get_frequency_limits(fe, &info->frequency_min,
+ &info->frequency_max,
+ &info->frequency_tolerance);
/*
* Associate the 4 delivery systems supported by DVBv3
@@ -2406,10 +2460,10 @@ static int dvb_frontend_handle_ioctl(struct file *file,
dev_err(fe->dvb->device,
"%s: doesn't know how to handle a DVBv3 call to delivery system %i\n",
__func__, c->delivery_system);
- fe->ops.info.type = FE_OFDM;
+ info->type = FE_OFDM;
}
dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
- __func__, c->delivery_system, fe->ops.info.type);
+ __func__, c->delivery_system, info->type);
/* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 64d6793674b9..3c8778570331 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -440,8 +440,10 @@ static int dvb_register_media_device(struct dvb_device *dvbdev,
if (!dvbdev->entity)
return 0;
- link = media_create_intf_link(dvbdev->entity, &dvbdev->intf_devnode->intf,
- MEDIA_LNK_FL_ENABLED);
+ link = media_create_intf_link(dvbdev->entity,
+ &dvbdev->intf_devnode->intf,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
#endif
@@ -599,7 +601,8 @@ static int dvb_create_io_intf_links(struct dvb_adapter *adap,
if (strncmp(entity->name, name, strlen(name)))
continue;
link = media_create_intf_link(entity, intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
}
@@ -754,14 +757,16 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
media_device_for_each_intf(intf, mdev) {
if (intf->type == MEDIA_INTF_T_DVB_CA && ca) {
link = media_create_intf_link(ca, intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
}
if (intf->type == MEDIA_INTF_T_DVB_FE && tuner) {
link = media_create_intf_link(tuner, intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
}
@@ -773,7 +778,8 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
*/
if (intf->type == MEDIA_INTF_T_DVB_DVR && demux) {
link = media_create_intf_link(demux, intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link)
return -ENOMEM;
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 9ecaa9d0744a..048285134cdf 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -739,6 +739,16 @@ config DVB_TC90522
Toshiba TC90522 2xISDB-S 8PSK + 2xISDB-T OFDM demodulator.
Say Y when you want to support this frontend.
+config DVB_MN88443X
+ tristate "Socionext MN88443x"
+ depends on DVB_CORE && I2C
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A driver for Socionext/Panasonic MN884433 and MN884434
+ ISDB-S + ISDB-T demodulator.
+ Say Y when you want to support this frontend.
+
comment "Digital terrestrial only tuners/PLL"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index 67a783fd5ed0..779dfd027b24 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -125,6 +125,7 @@ obj-$(CONFIG_DVB_AF9033) += af9033.o
obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o
obj-$(CONFIG_DVB_TC90522) += tc90522.o
+obj-$(CONFIG_DVB_MN88443X) += mn88443x.o
obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
obj-$(CONFIG_DVB_HELENE) += helene.o
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 482bce49819a..f3acbb57d48c 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1136,10 +1136,9 @@ static const struct dvb_frontend_ops af9013_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Afatech AF9013",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 250000,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index aaed7cfe5f66..0cd57013ea25 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1020,10 +1020,9 @@ static const struct dvb_frontend_ops af9033_ops = {
.delsys = {SYS_DVBT},
.info = {
.name = "Afatech AF9033 (DVB-T)",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 250000,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 9b2f2da1d989..f59a102b0a64 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -419,9 +419,9 @@ static const struct dvb_frontend_ops as102_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Abilis AS102 DVB-T",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_INVERSION_AUTO
| FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4
| FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index 9746c6dd7fb8..52ce0e6e2a15 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -468,9 +468,9 @@ static int ascot2e_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops ascot2e_tuner_ops = {
.info = {
.name = "Sony ASCOT2E",
- .frequency_min = 1000000,
- .frequency_max = 1200000000,
- .frequency_step = 25000,
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 1200 * MHz,
+ .frequency_step_hz = 25 * kHz,
},
.init = ascot2e_init,
.release = ascot2e_release,
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index 7b0f3239dbba..cbcc65dc9d54 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -428,9 +428,9 @@ static const struct dvb_frontend_ops atbm8830_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "AltoBeam ATBM8830/8831 DMB-TH",
- .frequency_min = 474000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 10000,
+ .frequency_min_hz = 474 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 10 * kHz,
.caps =
FE_CAN_FEC_AUTO |
FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index 8f659bd1c79e..076f737aa8c0 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -897,9 +897,9 @@ static const struct dvb_frontend_ops au8522_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Auvitek AU8522 QAM/8VSB Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index 05df133dc5be..e92542b92d34 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -840,10 +840,8 @@ static const struct dvb_frontend_ops bcm3510_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Broadcom BCM3510 VSB/QAM frontend",
- .frequency_min = 54000000,
- .frequency_max = 803000000,
- /* stepsize is just a guess */
- .frequency_stepsize = 0,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 803 * MHz,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index 9ffd2c7ac74a..961380162cdd 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -412,9 +412,9 @@ static const struct dvb_frontend_ops cx22700_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Conexant CX22700 DVB-T",
- .frequency_min = 470000000,
- .frequency_max = 860000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c
index e8b1e6b7e7e5..ab9b2924bcca 100644
--- a/drivers/media/dvb-frontends/cx22702.c
+++ b/drivers/media/dvb-frontends/cx22702.c
@@ -622,9 +622,9 @@ static const struct dvb_frontend_ops cx22702_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Conexant CX22702 DVB-T",
- .frequency_min = 177000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 177 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 2f3a1c237489..9441bdc73097 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -629,10 +629,10 @@ static const struct dvb_frontend_ops cx24110_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24110 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 037db3e9d2dd..91a5033b6bd7 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -533,10 +533,10 @@ static void cx24113_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops cx24113_tuner_ops = {
.info = {
- .name = "Conexant CX24113",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 125,
+ .name = "Conexant CX24113",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 125 * kHz,
},
.release = cx24113_release,
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index 2dbc7349d870..220f26663647 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -1465,10 +1465,10 @@ static const struct dvb_frontend_ops cx24116_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24116/CX24118",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index ba55d75d916c..667bc8be848d 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -1622,10 +1622,10 @@ static const struct dvb_frontend_ops cx24117_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24117/CX24132",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index ccbabdae6a69..dd3ec316e7c2 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -1555,10 +1555,10 @@ static const struct dvb_frontend_ops cx24120_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24120/CX24118",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index bf33e7390aaf..e49215020a93 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1111,10 +1111,10 @@ static const struct dvb_frontend_ops cx24123_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24123/CX24109",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index c2e7caf9b010..eb1d7478fa8d 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -431,8 +431,8 @@ int cxd2820r_get_tune_settings_t(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 500;
- s->step_size = fe->ops.info.frequency_stepsize * 2;
- s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ s->step_size = fe->ops.info.frequency_stepsize_hz * 2;
+ s->max_drift = (fe->ops.info.frequency_stepsize_hz * 2) + 1;
return 0;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index e641fde75379..f330ec1710b4 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -426,8 +426,8 @@ int cxd2820r_get_tune_settings_t2(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 1500;
- s->step_size = fe->ops.info.frequency_stepsize * 2;
- s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ s->step_size = fe->ops.info.frequency_stepsize_hz * 2;
+ s->max_drift = (fe->ops.info.frequency_stepsize_hz * 2) + 1;
return 0;
}
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 85905d3503ff..c98093ed3dd7 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3942,9 +3942,8 @@ static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Sony CXD2841ER DVB-S/S2 demodulator",
- .frequency_min = 500000,
- .frequency_max = 2500000,
- .frequency_stepsize = 0,
+ .frequency_min_hz = 500 * MHz,
+ .frequency_max_hz = 2500 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500,
@@ -3988,8 +3987,8 @@ static struct dvb_frontend_ops cxd2841er_t_c_ops = {
FE_CAN_HIERARCHY_AUTO |
FE_CAN_MUTE_TS |
FE_CAN_2G_MODULATION,
- .frequency_min = 42000000,
- .frequency_max = 1002000000,
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 1002 * MHz,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000
},
diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
index bd9101e246d5..f87e27481ea7 100644
--- a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
@@ -1833,9 +1833,9 @@ static enum dvbfe_algo cxd2880_get_frontend_algo(struct dvb_frontend *fe)
static struct dvb_frontend_ops cxd2880_dvbt_t2_ops = {
.info = {
.name = "Sony CXD2880",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 1000,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 1 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index 932d235118e2..37ebd5af8fd4 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -726,10 +726,10 @@ static void dib0070_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops dib0070_ops = {
.info = {
- .name = "DiBcom DiB0070",
- .frequency_min = 45000000,
- .frequency_max = 860000000,
- .frequency_step = 1000,
+ .name = "DiBcom DiB0070",
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.release = dib0070_release,
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index ee7af34979ed..44a074261e69 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -2578,9 +2578,9 @@ static int dib0090_set_params(struct dvb_frontend *fe)
static const struct dvb_tuner_ops dib0090_ops = {
.info = {
.name = "DiBcom DiB0090",
- .frequency_min = 45000000,
- .frequency_max = 860000000,
- .frequency_step = 1000,
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.release = dib0090_release,
@@ -2593,9 +2593,9 @@ static const struct dvb_tuner_ops dib0090_ops = {
static const struct dvb_tuner_ops dib0090_fw_ops = {
.info = {
.name = "DiBcom DiB0090",
- .frequency_min = 45000000,
- .frequency_max = 860000000,
- .frequency_step = 1000,
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.release = dib0090_release,
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 5861f346db49..bbbd53280477 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -786,9 +786,9 @@ static const struct dvb_frontend_ops dib3000mb_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000M-B DVB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 7e5d474806a5..c9e1db251723 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -944,9 +944,9 @@ static const struct dvb_frontend_ops dib3000mc_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000MC/P",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index 6a1d357d0c7c..b79358d09de6 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -1443,9 +1443,9 @@ static const struct dvb_frontend_ops dib7000m_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000MA/MB/PA/PB/MC",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 5a8dbc0b25fb..58387860b62d 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2824,9 +2824,9 @@ static const struct dvb_frontend_ops dib7000p_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000PC",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 22eec8f65485..3c3f8cb14845 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -4390,9 +4390,9 @@ static const struct dvb_frontend_ops dib8000_ops = {
.delsys = { SYS_ISDBT },
.info = {
.name = "DiBcom 8000 ISDB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index b8edb55696bb..0183fb1346ef 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -2553,9 +2553,9 @@ static const struct dvb_frontend_ops dib9000_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 9000",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 5706898e84cc..9628d4067fe1 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -2841,8 +2841,7 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o
/* coef = 188/204 */
max_bit_rate =
(ext_attr->curr_symbol_rate / 8) * nr_bits * 188;
- /* pass through as b/c Annex A/c need following settings */
- /* fall-through */
+ /* fall-through - as b/c Annex A/C need following settings */
case DRX_STANDARD_ITU_B:
rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, FEC_OC_FCT_USAGE__PRE, 0);
if (rc != 0) {
@@ -11811,8 +11810,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
block_hdr.CRC = be16_to_cpu(*(__be16 *)(mc_data));
mc_data += sizeof(u16);
- pr_debug("%u: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n",
- (unsigned)(mc_data - mc_data_init), block_hdr.addr,
+ pr_debug("%zd: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n",
+ (mc_data - mc_data_init), block_hdr.addr,
block_hdr.size, block_hdr.flags, block_hdr.CRC);
/* Check block header on:
@@ -11842,8 +11841,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
mc_block_nr_bytes,
mc_data, 0x0000)) {
rc = -EIO;
- pr_err("error writing firmware at pos %u\n",
- (unsigned)(mc_data - mc_data_init));
+ pr_err("error writing firmware at pos %zd\n",
+ mc_data - mc_data_init);
goto release;
}
break;
@@ -11866,8 +11865,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
(u16)bytes_to_comp,
(u8 *)mc_data_buffer,
0x0000)) {
- pr_err("error reading firmware at pos %u\n",
- (unsigned)(mc_data - mc_data_init));
+ pr_err("error reading firmware at pos %zd\n",
+ mc_data - mc_data_init);
return -EIO;
}
@@ -11875,8 +11874,8 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
bytes_to_comp);
if (result) {
- pr_err("error verifying firmware at pos %u\n",
- (unsigned)(mc_data - mc_data_init));
+ pr_err("error verifying firmware at pos %zd\n",
+ mc_data - mc_data_init);
return -EIO;
}
@@ -12374,9 +12373,9 @@ static const struct dvb_frontend_ops drx39xxj_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Micronas DRX39xxj family Frontend",
- .frequency_stepsize = 62500,
- .frequency_min = 51000000,
- .frequency_max = 858000000,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 3b7d31a22d82..684d428efb0d 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -1970,8 +1970,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
switch (p->transmission_mode) {
default: /* Not set, detect it automatically */
operationMode |= SC_RA_RAM_OP_AUTO_MODE__M;
- /* try first guess DRX_FFTMODE_8K */
- /* fall through */
+ /* fall through - try first guess DRX_FFTMODE_8K */
case TRANSMISSION_MODE_8K:
transmissionParams |= SC_RA_RAM_OP_PARAM_MODE_8K;
if (state->type_A) {
@@ -2144,8 +2143,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
switch (p->modulation) {
default:
operationMode |= SC_RA_RAM_OP_AUTO_CONST__M;
- /* try first guess DRX_CONSTELLATION_QAM64 */
- /* fall through */
+ /* fall through - try first guess DRX_CONSTELLATION_QAM64 */
case QAM_64:
transmissionParams |= SC_RA_RAM_OP_PARAM_CONST_QAM64;
if (state->type_A) {
@@ -2912,10 +2910,9 @@ static const struct dvb_frontend_ops drxd_ops = {
.delsys = { SYS_DVBT},
.info = {
.name = "Micronas DRXD DVB-T",
- .frequency_min = 47125000,
- .frequency_max = 855250000,
- .frequency_stepsize = 166667,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 47125 * kHz,
+ .frequency_max_hz = 855250 * kHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 5a26ad93be10..f1886945a7bc 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -3270,13 +3270,11 @@ static int dvbt_sc_command(struct drxk_state *state,
case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
status |= write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1);
- /* All commands using 1 parameters */
- /* fall through */
+ /* fall through - All commands using 1 parameters */
case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
case OFDM_SC_RA_RAM_CMD_USER_IO:
status |= write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0);
- /* All commands using 0 parameters */
- /* fall through */
+ /* fall through - All commands using 0 parameters */
case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
case OFDM_SC_RA_RAM_CMD_NULL:
/* Write command */
@@ -3784,8 +3782,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case TRANSMISSION_MODE_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
- /* try first guess DRX_FFTMODE_8K */
- /* fall through */
+ /* fall through - try first guess DRX_FFTMODE_8K */
case TRANSMISSION_MODE_8K:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K;
break;
@@ -3799,8 +3796,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
default:
case GUARD_INTERVAL_AUTO:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
- /* try first guess DRX_GUARD_1DIV4 */
- /* fall through */
+ /* fall through - try first guess DRX_GUARD_1DIV4 */
case GUARD_INTERVAL_1_4:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4;
break;
@@ -3841,8 +3837,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case QAM_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
- /* try first guess DRX_CONSTELLATION_QAM64 */
- /* fall through */
+ /* fall through - try first guess DRX_CONSTELLATION_QAM64 */
case QAM_64:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64;
break;
@@ -3885,8 +3880,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case FEC_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
- /* try first guess DRX_CODERATE_2DIV3 */
- /* fall through */
+ /* fall through - try first guess DRX_CODERATE_2DIV3 */
case FEC_2_3:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3;
break;
@@ -6744,13 +6738,13 @@ static const struct dvb_frontend_ops drxk_ops = {
/* .delsys will be filled dynamically */
.info = {
.name = "DRXK",
- .frequency_min = 47000000,
- .frequency_max = 865000000,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 865 * MHz,
/* For DVB-C */
- .symbol_rate_min = 870000,
+ .symbol_rate_min = 870000,
.symbol_rate_max = 11700000,
/* For DVB-T */
- .frequency_stepsize = 166667,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 2ff90e5eabce..46a55146cb07 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -1100,10 +1100,10 @@ static const struct dvb_frontend_ops ds3000_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Montage Technology DS3000",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1011, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1011 * kHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index e3894ff403d7..6d4b2eec67b4 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -799,6 +799,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
struct dvb_pll_priv *priv = NULL;
int ret;
const struct dvb_pll_desc *desc;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
b1 = kmalloc(1, GFP_KERNEL);
if (!b1)
@@ -844,8 +845,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
strncpy(fe->ops.tuner_ops.info.name, desc->name,
sizeof(fe->ops.tuner_ops.info.name));
- fe->ops.tuner_ops.info.frequency_min = desc->min;
- fe->ops.tuner_ops.info.frequency_max = desc->max;
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ case SYS_TURBO:
+ case SYS_ISDBS:
+ fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz;
+ fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz;
+ break;
+ default:
+ fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
+ fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
+ }
+
if (!desc->initdata)
fe->ops.tuner_ops.init = NULL;
if (!desc->sleepdata)
@@ -884,6 +896,17 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (!dvb_pll_attach(fe, client->addr, client->adapter, desc_id))
return -ENOMEM;
+ /*
+ * Unset tuner_ops.release (== dvb_pll_release)
+ * which has been just set in the above dvb_pll_attach(),
+ * because if tuner_ops.release was left defined,
+ * this module would be 'put' twice on exit:
+ * once by dvb_frontend_detach() and another by dvb_module_release().
+ *
+ * dvb_pll_release is instead executed in the i2c driver's .remove(),
+ * keeping dvb_pll_attach untouched for legacy (dvb_attach) drivers.
+ */
+ fe->ops.tuner_ops.release = NULL;
dev_info(&client->dev, "DVB Simple Tuner attached.\n");
return 0;
}
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index 6650d4f61ef2..a4cbcae7967d 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -170,9 +170,9 @@ static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Dummy DVB-T",
- .frequency_min = 0,
- .frequency_max = 863250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 0,
+ .frequency_max_hz = 863250 * kHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO |
@@ -201,11 +201,11 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "Dummy DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 51000000,
- .frequency_max = 858000000,
- .symbol_rate_min = (57840000/2)/64, /* SACLK/64 == (XIN/2)/64 */
- .symbol_rate_max = (57840000/2)/4, /* SACLK/4 */
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
+ .symbol_rate_min = (57840000 / 2) / 64, /* SACLK/64 == (XIN/2)/64 */
+ .symbol_rate_max = (57840000 / 2) / 4, /* SACLK/4 */
.caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
FE_CAN_QAM_128 | FE_CAN_QAM_256 |
FE_CAN_FEC_AUTO | FE_CAN_INVERSION_AUTO
@@ -230,10 +230,10 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Dummy DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 250, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 250 * kHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index a772ef8bfe1c..238f09aa72f2 100644
--- a/drivers/media/dvb-frontends/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -355,9 +355,9 @@ static const struct dvb_frontend_ops gp8psk_fe_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Genpix DVB-S",
- .frequency_min = 800000,
- .frequency_max = 2250000,
- .frequency_stepsize = 100,
+ .frequency_min_hz = 800 * MHz,
+ .frequency_max_hz = 2250 * MHz,
+ .frequency_stepsize_hz = 100 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index a0d0b53c91d7..d7790cb98a0c 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -666,7 +666,7 @@ static int helene_set_params_s(struct dvb_frontend *fe)
return 0;
}
-static int helene_set_params(struct dvb_frontend *fe)
+static int helene_set_params_t(struct dvb_frontend *fe)
{
u8 data[MAX_WRITE_REGSIZE];
u32 frequency;
@@ -835,6 +835,19 @@ static int helene_set_params(struct dvb_frontend *fe)
return 0;
}
+static int helene_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ if (p->delivery_system == SYS_DVBT ||
+ p->delivery_system == SYS_DVBT2 ||
+ p->delivery_system == SYS_ISDBT ||
+ p->delivery_system == SYS_DVBC_ANNEX_A)
+ return helene_set_params_t(fe);
+
+ return helene_set_params_s(fe);
+}
+
static int helene_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct helene_priv *priv = fe->tuner_priv;
@@ -843,26 +856,26 @@ static int helene_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static const struct dvb_tuner_ops helene_tuner_ops = {
+static const struct dvb_tuner_ops helene_tuner_ops_t = {
.info = {
.name = "Sony HELENE Ter tuner",
- .frequency_min = 1000000,
- .frequency_max = 1200000000,
- .frequency_step = 25000,
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 1200 * MHz,
+ .frequency_step_hz = 25 * kHz,
},
.init = helene_init,
.release = helene_release,
.sleep = helene_sleep,
- .set_params = helene_set_params,
+ .set_params = helene_set_params_t,
.get_frequency = helene_get_frequency,
};
static const struct dvb_tuner_ops helene_tuner_ops_s = {
.info = {
.name = "Sony HELENE Sat tuner",
- .frequency_min = 500000,
- .frequency_max = 2500000,
- .frequency_step = 1000,
+ .frequency_min_hz = 500 * MHz,
+ .frequency_max_hz = 2500 * MHz,
+ .frequency_step_hz = 1 * MHz,
},
.init = helene_init,
.release = helene_release,
@@ -871,6 +884,20 @@ static const struct dvb_tuner_ops helene_tuner_ops_s = {
.get_frequency = helene_get_frequency,
};
+static const struct dvb_tuner_ops helene_tuner_ops = {
+ .info = {
+ .name = "Sony HELENE Sat/Ter tuner",
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 2500 * MHz,
+ .frequency_step_hz = 25 * kHz,
+ },
+ .init = helene_init,
+ .release = helene_release,
+ .sleep = helene_sleep,
+ .set_params = helene_set_params,
+ .get_frequency = helene_get_frequency,
+};
+
/* power-on tuner
* call once after reset
*/
@@ -897,7 +924,10 @@ static int helene_x_pon(struct helene_priv *priv)
helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
/* 0x81 - 0x94 */
- data[0] = 0x18; /* xtal 24 MHz */
+ if (priv->xtal == SONY_HELENE_XTAL_16000)
+ data[0] = 0x10; /* xtal 16 MHz */
+ else
+ data[0] = 0x18; /* xtal 24 MHz */
data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
data[3] = 0x80; /* REFOUT signal output 500mVpp */
@@ -1032,7 +1062,7 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
- memcpy(&fe->ops.tuner_ops, &helene_tuner_ops,
+ memcpy(&fe->ops.tuner_ops, &helene_tuner_ops_t,
sizeof(struct dvb_tuner_ops));
fe->tuner_priv = priv;
dev_info(&priv->i2c->dev,
@@ -1042,6 +1072,59 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
}
EXPORT_SYMBOL(helene_attach);
+static int helene_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct helene_config *config = client->dev.platform_data;
+ struct dvb_frontend *fe = config->fe;
+ struct device *dev = &client->dev;
+ struct helene_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->i2c_address = client->addr;
+ priv->i2c = client->adapter;
+ priv->set_tuner_data = config->set_tuner_priv;
+ priv->set_tuner = config->set_tuner_callback;
+ priv->xtal = config->xtal;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (helene_x_pon(priv) != 0)
+ return -EINVAL;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ memcpy(&fe->ops.tuner_ops, &helene_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = priv;
+ i2c_set_clientdata(client, priv);
+
+ dev_info(dev, "Sony HELENE attached on addr=%x at I2C adapter %p\n",
+ priv->i2c_address, priv->i2c);
+
+ return 0;
+}
+
+static const struct i2c_device_id helene_id[] = {
+ { "helene", },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, helene_id);
+
+static struct i2c_driver helene_driver = {
+ .driver = {
+ .name = "helene",
+ },
+ .probe = helene_probe,
+ .id_table = helene_id,
+};
+module_i2c_driver(helene_driver);
+
MODULE_DESCRIPTION("Sony HELENE Sat/Ter tuner driver");
MODULE_AUTHOR("Abylay Ospan <aospan@netup.ru>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/helene.h b/drivers/media/dvb-frontends/helene.h
index c9fc81c7e4e7..8562d01bc93e 100644
--- a/drivers/media/dvb-frontends/helene.h
+++ b/drivers/media/dvb-frontends/helene.h
@@ -39,6 +39,7 @@ enum helene_xtal {
* @set_tuner_callback: Callback function that notifies the parent driver
* which tuner is active now
* @xtal: Cristal frequency as described by &enum helene_xtal
+ * @fe: Frontend for which connects this tuner
*/
struct helene_config {
u8 i2c_address;
@@ -46,6 +47,8 @@ struct helene_config {
void *set_tuner_priv;
int (*set_tuner_callback)(void *, int);
enum helene_xtal xtal;
+
+ struct dvb_frontend *fe;
};
#if IS_REACHABLE(CONFIG_DVB_HELENE)
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 5e7e265a52e6..02bc08081971 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -330,9 +330,9 @@ static int horus3a_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops horus3a_tuner_ops = {
.info = {
.name = "Sony Horus3a",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 1000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 1 * MHz,
},
.init = horus3a_init,
.release = horus3a_release,
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index 04f7f6854f73..c3a6e81ae87f 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -353,10 +353,10 @@ static void itd1000_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops itd1000_tuner_ops = {
.info = {
- .name = "Integrant ITD1000",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 125, /* kHz for QPSK frontends */
+ .name = "Integrant ITD1000",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 125 * kHz,
},
.release = itd1000_release,
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 965012ad5c59..a30707b61b1f 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -135,8 +135,8 @@ static int ix2505v_set_params(struct dvb_frontend *fe)
u8 gain, cc, ref, psc, local_osc, lpf;
u8 data[4] = {0};
- if ((frequency < fe->ops.info.frequency_min)
- || (frequency > fe->ops.info.frequency_max))
+ if ((frequency < fe->ops.info.frequency_min_hz / kHz)
+ || (frequency > fe->ops.info.frequency_max_hz / kHz))
return -EINVAL;
if (state->config->tuner_gain)
@@ -256,8 +256,8 @@ static int ix2505v_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops ix2505v_tuner_ops = {
.info = {
.name = "Sharp IX2505V (B0017)",
- .frequency_min = 950000,
- .frequency_max = 2175000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2175 * MHz
},
.release = ix2505v_release,
.set_params = ix2505v_set_params,
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 249c18761e6e..9afb5bf6424b 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -575,10 +575,9 @@ static const struct dvb_frontend_ops l64781_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "LSI L64781 DVB-T",
- /* .frequency_min = ???,*/
- /* .frequency_max = ???,*/
- .frequency_stepsize = 166666,
- /* .frequency_tolerance = ???,*/
+ /* .frequency_min_hz = ???,*/
+ /* .frequency_max_hz = ???,*/
+ .frequency_stepsize_hz = 166666,
/* .symbol_rate_tolerance = ???,*/
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 9854096839ae..408151e33fa7 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1349,9 +1349,9 @@ static const struct dvb_frontend_ops lg2160_ops = {
.delsys = { SYS_ATSCMH },
.info = {
.name = "LG Electronics LG2160 ATSC/MH Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
},
.i2c_gate_ctrl = lg216x_i2c_gate_ctrl,
#if 0
@@ -1375,9 +1375,9 @@ static const struct dvb_frontend_ops lg2161_ops = {
.delsys = { SYS_ATSCMH },
.info = {
.name = "LG Electronics LG2161 ATSC/MH Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
},
.i2c_gate_ctrl = lg216x_i2c_gate_ctrl,
#if 0
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 735d73060265..857e9b4d69b4 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1164,9 +1164,9 @@ static const struct dvb_frontend_ops lgdt3304_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3304 VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
.i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl,
@@ -1187,9 +1187,9 @@ static const struct dvb_frontend_ops lgdt3305_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3305 VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
.i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl,
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 32de824476db..0e1f5daaf20c 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2157,9 +2157,9 @@ static const struct dvb_frontend_ops lgdt3306a_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3306A VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_AUTO | FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
.i2c_gate_ctrl = lgdt3306a_i2c_gate_ctrl,
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index f6731738b073..10d584ce538d 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -944,9 +944,9 @@ static const struct dvb_frontend_ops lgdt3302_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3302 VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 5056941, /* QAM 64 */
.symbol_rate_max = 10762000, /* VSB 8 */
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
@@ -966,9 +966,9 @@ static const struct dvb_frontend_ops lgdt3303_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3303 VSB/QAM Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 5056941, /* QAM 64 */
.symbol_rate_max = 10762000, /* VSB 8 */
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
diff --git a/drivers/media/dvb-frontends/lgs8gl5.c b/drivers/media/dvb-frontends/lgs8gl5.c
index f47e5a1af16d..07e5bcee9c1e 100644
--- a/drivers/media/dvb-frontends/lgs8gl5.c
+++ b/drivers/media/dvb-frontends/lgs8gl5.c
@@ -416,10 +416,9 @@ static const struct dvb_frontend_ops lgs8gl5_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "Legend Silicon LGS-8GL5 DMB-TH",
- .frequency_min = 474000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 10000,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 474 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 10 * kHz,
.caps = FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_32 |
FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index 84af8a12f26a..a6bcf1571d10 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -985,9 +985,9 @@ static const struct dvb_frontend_ops lgs8gxx_ops = {
.delsys = { SYS_DTMB },
.info = {
.name = "Legend Silicon LGS8913/LGS8GXX DMB-TH",
- .frequency_min = 474000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 10000,
+ .frequency_min_hz = 474 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 10 * kHz,
.caps =
FE_CAN_FEC_AUTO |
FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 65d157fe76d1..dffd2d4bf1c8 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1300,9 +1300,9 @@ static const struct dvb_frontend_ops m88ds3103_ops = {
.delsys = {SYS_DVBS, SYS_DVBS2},
.info = {
.name = "Montage Technology M88DS3103",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 496ce27fa63a..d5bc85501f9e 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -759,10 +759,10 @@ static const struct dvb_frontend_ops m88rs2000_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "M88RS2000 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1000, /* kHz for QPSK frontends */
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1 * MHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 377cd984b069..da505a5d035f 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1808,10 +1808,9 @@ static const struct dvb_frontend_ops mb86a16_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Fujitsu MB86A16 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 3000,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 3 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500,
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index c3b1b88e2e7a..66fc77db0e75 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2111,9 +2111,9 @@ static const struct dvb_frontend_ops mb86a20s_ops = {
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_QAM_AUTO |
FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO,
/* Actually, those values depend on the used tuner */
- .frequency_min = 45000000,
- .frequency_max = 864000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_stepsize_hz = 62500,
},
.release = mb86a20s_release,
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
new file mode 100644
index 000000000000..9ec1aeef03d5
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Socionext MN88443x series demodulator driver for ISDB-S/ISDB-T.
+//
+// Copyright (c) 2018 Socionext Inc.
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <media/dvb_math.h>
+
+#include "mn88443x.h"
+
+/* ISDB-S registers */
+#define ATSIDU_S 0x2f
+#define ATSIDL_S 0x30
+#define TSSET_S 0x31
+#define AGCREAD_S 0x5a
+#define CPMON1_S 0x5e
+#define CPMON1_S_FSYNC BIT(5)
+#define CPMON1_S_ERRMON BIT(4)
+#define CPMON1_S_SIGOFF BIT(3)
+#define CPMON1_S_W2LOCK BIT(2)
+#define CPMON1_S_W1LOCK BIT(1)
+#define CPMON1_S_DW1LOCK BIT(0)
+#define TRMON_S 0x60
+#define BERCNFLG_S 0x68
+#define BERCNFLG_S_BERVRDY BIT(5)
+#define BERCNFLG_S_BERVCHK BIT(4)
+#define BERCNFLG_S_BERDRDY BIT(3)
+#define BERCNFLG_S_BERDCHK BIT(2)
+#define CNRDXU_S 0x69
+#define CNRDXL_S 0x6a
+#define CNRDYU_S 0x6b
+#define CNRDYL_S 0x6c
+#define BERVRDU_S 0x71
+#define BERVRDL_S 0x72
+#define DOSET1_S 0x73
+
+/* Primary ISDB-T */
+#define PLLASET1 0x00
+#define PLLASET2 0x01
+#define PLLBSET1 0x02
+#define PLLBSET2 0x03
+#define PLLSET 0x04
+#define OUTCSET 0x08
+#define OUTCSET_CHDRV_8MA 0xff
+#define OUTCSET_CHDRV_4MA 0x00
+#define PLDWSET 0x09
+#define PLDWSET_NORMAL 0x00
+#define PLDWSET_PULLDOWN 0xff
+#define HIZSET1 0x0a
+#define HIZSET2 0x0b
+
+/* Secondary ISDB-T (for MN884434 only) */
+#define RCVSET 0x00
+#define TSSET1_M 0x01
+#define TSSET2_M 0x02
+#define TSSET3_M 0x03
+#define INTACSET 0x08
+#define HIZSET3 0x0b
+
+/* ISDB-T registers */
+#define TSSET1 0x05
+#define TSSET1_TSASEL_MASK GENMASK(4, 3)
+#define TSSET1_TSASEL_ISDBT (0x0 << 3)
+#define TSSET1_TSASEL_ISDBS (0x1 << 3)
+#define TSSET1_TSASEL_NONE (0x2 << 3)
+#define TSSET1_TSBSEL_MASK GENMASK(2, 1)
+#define TSSET1_TSBSEL_ISDBS (0x0 << 1)
+#define TSSET1_TSBSEL_ISDBT (0x1 << 1)
+#define TSSET1_TSBSEL_NONE (0x2 << 1)
+#define TSSET2 0x06
+#define TSSET3 0x07
+#define TSSET3_INTASEL_MASK GENMASK(7, 6)
+#define TSSET3_INTASEL_T (0x0 << 6)
+#define TSSET3_INTASEL_S (0x1 << 6)
+#define TSSET3_INTASEL_NONE (0x2 << 6)
+#define TSSET3_INTBSEL_MASK GENMASK(5, 4)
+#define TSSET3_INTBSEL_S (0x0 << 4)
+#define TSSET3_INTBSEL_T (0x1 << 4)
+#define TSSET3_INTBSEL_NONE (0x2 << 4)
+#define OUTSET2 0x0d
+#define PWDSET 0x0f
+#define PWDSET_OFDMPD_MASK GENMASK(3, 2)
+#define PWDSET_OFDMPD_DOWN BIT(3)
+#define PWDSET_PSKPD_MASK GENMASK(1, 0)
+#define PWDSET_PSKPD_DOWN BIT(1)
+#define CLKSET1_T 0x11
+#define MDSET_T 0x13
+#define MDSET_T_MDAUTO_MASK GENMASK(7, 4)
+#define MDSET_T_MDAUTO_AUTO (0xf << 4)
+#define MDSET_T_MDAUTO_MANUAL (0x0 << 4)
+#define MDSET_T_FFTS_MASK GENMASK(3, 2)
+#define MDSET_T_FFTS_MODE1 (0x0 << 2)
+#define MDSET_T_FFTS_MODE2 (0x1 << 2)
+#define MDSET_T_FFTS_MODE3 (0x2 << 2)
+#define MDSET_T_GI_MASK GENMASK(1, 0)
+#define MDSET_T_GI_1_32 (0x0 << 0)
+#define MDSET_T_GI_1_16 (0x1 << 0)
+#define MDSET_T_GI_1_8 (0x2 << 0)
+#define MDSET_T_GI_1_4 (0x3 << 0)
+#define MDASET_T 0x14
+#define ADCSET1_T 0x20
+#define ADCSET1_T_REFSEL_MASK GENMASK(1, 0)
+#define ADCSET1_T_REFSEL_2V (0x3 << 0)
+#define ADCSET1_T_REFSEL_1_5V (0x2 << 0)
+#define ADCSET1_T_REFSEL_1V (0x1 << 0)
+#define NCOFREQU_T 0x24
+#define NCOFREQM_T 0x25
+#define NCOFREQL_T 0x26
+#define FADU_T 0x27
+#define FADM_T 0x28
+#define FADL_T 0x29
+#define AGCSET2_T 0x2c
+#define AGCSET2_T_IFPOLINV_INC BIT(0)
+#define AGCSET2_T_RFPOLINV_INC BIT(1)
+#define AGCV3_T 0x3e
+#define MDRD_T 0xa2
+#define MDRD_T_SEGID_MASK GENMASK(5, 4)
+#define MDRD_T_SEGID_13 (0x0 << 4)
+#define MDRD_T_SEGID_1 (0x1 << 4)
+#define MDRD_T_SEGID_3 (0x2 << 4)
+#define MDRD_T_FFTS_MASK GENMASK(3, 2)
+#define MDRD_T_FFTS_MODE1 (0x0 << 2)
+#define MDRD_T_FFTS_MODE2 (0x1 << 2)
+#define MDRD_T_FFTS_MODE3 (0x2 << 2)
+#define MDRD_T_GI_MASK GENMASK(1, 0)
+#define MDRD_T_GI_1_32 (0x0 << 0)
+#define MDRD_T_GI_1_16 (0x1 << 0)
+#define MDRD_T_GI_1_8 (0x2 << 0)
+#define MDRD_T_GI_1_4 (0x3 << 0)
+#define SSEQRD_T 0xa3
+#define SSEQRD_T_SSEQSTRD_MASK GENMASK(3, 0)
+#define SSEQRD_T_SSEQSTRD_RESET (0x0 << 0)
+#define SSEQRD_T_SSEQSTRD_TUNING (0x1 << 0)
+#define SSEQRD_T_SSEQSTRD_AGC (0x2 << 0)
+#define SSEQRD_T_SSEQSTRD_SEARCH (0x3 << 0)
+#define SSEQRD_T_SSEQSTRD_CLOCK_SYNC (0x4 << 0)
+#define SSEQRD_T_SSEQSTRD_FREQ_SYNC (0x8 << 0)
+#define SSEQRD_T_SSEQSTRD_FRAME_SYNC (0x9 << 0)
+#define SSEQRD_T_SSEQSTRD_SYNC (0xa << 0)
+#define SSEQRD_T_SSEQSTRD_LOCK (0xb << 0)
+#define AGCRDU_T 0xa8
+#define AGCRDL_T 0xa9
+#define CNRDU_T 0xbe
+#define CNRDL_T 0xbf
+#define BERFLG_T 0xc0
+#define BERFLG_T_BERDRDY BIT(7)
+#define BERFLG_T_BERDCHK BIT(6)
+#define BERFLG_T_BERVRDYA BIT(5)
+#define BERFLG_T_BERVCHKA BIT(4)
+#define BERFLG_T_BERVRDYB BIT(3)
+#define BERFLG_T_BERVCHKB BIT(2)
+#define BERFLG_T_BERVRDYC BIT(1)
+#define BERFLG_T_BERVCHKC BIT(0)
+#define BERRDU_T 0xc1
+#define BERRDM_T 0xc2
+#define BERRDL_T 0xc3
+#define BERLENRDU_T 0xc4
+#define BERLENRDL_T 0xc5
+#define ERRFLG_T 0xc6
+#define ERRFLG_T_BERDOVF BIT(7)
+#define ERRFLG_T_BERVOVFA BIT(6)
+#define ERRFLG_T_BERVOVFB BIT(5)
+#define ERRFLG_T_BERVOVFC BIT(4)
+#define ERRFLG_T_NERRFA BIT(3)
+#define ERRFLG_T_NERRFB BIT(2)
+#define ERRFLG_T_NERRFC BIT(1)
+#define ERRFLG_T_NERRF BIT(0)
+#define DOSET1_T 0xcf
+
+#define CLK_LOW 4000000
+#define CLK_DIRECT 20200000
+#define CLK_MAX 25410000
+
+#define S_T_FREQ 8126984 /* 512 / 63 MHz */
+
+struct mn88443x_spec {
+ bool primary;
+};
+
+struct mn88443x_priv {
+ const struct mn88443x_spec *spec;
+
+ struct dvb_frontend fe;
+ struct clk *mclk;
+ struct gpio_desc *reset_gpio;
+ u32 clk_freq;
+ u32 if_freq;
+
+ /* Common */
+ bool use_clkbuf;
+
+ /* ISDB-S */
+ struct i2c_client *client_s;
+ struct regmap *regmap_s;
+
+ /* ISDB-T */
+ struct i2c_client *client_t;
+ struct regmap *regmap_t;
+};
+
+static void mn88443x_cmn_power_on(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ clk_prepare_enable(chip->mclk);
+
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+ usleep_range(100, 1000);
+ gpiod_set_value_cansleep(chip->reset_gpio, 0);
+
+ if (chip->spec->primary) {
+ regmap_write(r_t, OUTCSET, OUTCSET_CHDRV_8MA);
+ regmap_write(r_t, PLDWSET, PLDWSET_NORMAL);
+ regmap_write(r_t, HIZSET1, 0x80);
+ regmap_write(r_t, HIZSET2, 0xe0);
+ } else {
+ regmap_write(r_t, HIZSET3, 0x8f);
+ }
+}
+
+static void mn88443x_cmn_power_off(struct mn88443x_priv *chip)
+{
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+
+ clk_disable_unprepare(chip->mclk);
+}
+
+static void mn88443x_s_sleep(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ regmap_update_bits(r_t, PWDSET, PWDSET_PSKPD_MASK,
+ PWDSET_PSKPD_DOWN);
+}
+
+static void mn88443x_s_wake(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ regmap_update_bits(r_t, PWDSET, PWDSET_PSKPD_MASK, 0);
+}
+
+static void mn88443x_s_tune(struct mn88443x_priv *chip,
+ struct dtv_frontend_properties *c)
+{
+ struct regmap *r_s = chip->regmap_s;
+
+ regmap_write(r_s, ATSIDU_S, c->stream_id >> 8);
+ regmap_write(r_s, ATSIDL_S, c->stream_id);
+ regmap_write(r_s, TSSET_S, 0);
+}
+
+static int mn88443x_s_read_status(struct mn88443x_priv *chip,
+ struct dtv_frontend_properties *c,
+ enum fe_status *status)
+{
+ struct regmap *r_s = chip->regmap_s;
+ u32 cpmon, tmpu, tmpl, flg;
+ u64 tmp;
+
+ /* Sync detection */
+ regmap_read(r_s, CPMON1_S, &cpmon);
+
+ *status = 0;
+ if (cpmon & CPMON1_S_FSYNC)
+ *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ if (cpmon & CPMON1_S_W2LOCK)
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
+
+ /* Signal strength */
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ if (*status & FE_HAS_SIGNAL) {
+ u32 agc;
+
+ regmap_read(r_s, AGCREAD_S, &tmpu);
+ agc = tmpu << 8;
+
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = agc;
+ }
+
+ /* C/N rate */
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ if (*status & FE_HAS_VITERBI) {
+ u32 cnr = 0, x, y, d;
+ u64 d_3 = 0;
+
+ regmap_read(r_s, CNRDXU_S, &tmpu);
+ regmap_read(r_s, CNRDXL_S, &tmpl);
+ x = (tmpu << 8) | tmpl;
+ regmap_read(r_s, CNRDYU_S, &tmpu);
+ regmap_read(r_s, CNRDYL_S, &tmpl);
+ y = (tmpu << 8) | tmpl;
+
+ /* CNR[dB]: 10 * log10(D) - 30.74 / D^3 - 3 */
+ /* D = x^2 / (2^15 * y - x^2) */
+ d = (y << 15) - x * x;
+ if (d > 0) {
+ /* (2^4 * D)^3 = 2^12 * D^3 */
+ /* 3.074 * 2^(12 + 24) = 211243671486 */
+ d_3 = div_u64(16 * x * x, d);
+ d_3 = d_3 * d_3 * d_3;
+ if (d_3)
+ d_3 = div_u64(211243671486ULL, d_3);
+ }
+
+ if (d_3) {
+ /* 0.3 * 2^24 = 5033164 */
+ tmp = (s64)2 * intlog10(x) - intlog10(abs(d)) - d_3
+ - 5033164;
+ cnr = div_u64(tmp * 10000, 1 << 24);
+ }
+
+ if (cnr) {
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].uvalue = cnr;
+ }
+ }
+
+ /* BER */
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ regmap_read(r_s, BERCNFLG_S, &flg);
+
+ if ((*status & FE_HAS_VITERBI) && (flg & BERCNFLG_S_BERVRDY)) {
+ u32 bit_err, bit_cnt;
+
+ regmap_read(r_s, BERVRDU_S, &tmpu);
+ regmap_read(r_s, BERVRDL_S, &tmpl);
+ bit_err = (tmpu << 8) | tmpl;
+ bit_cnt = (1 << 13) * 204;
+
+ if (bit_cnt) {
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = bit_err;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = bit_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void mn88443x_t_sleep(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ regmap_update_bits(r_t, PWDSET, PWDSET_OFDMPD_MASK,
+ PWDSET_OFDMPD_DOWN);
+}
+
+static void mn88443x_t_wake(struct mn88443x_priv *chip)
+{
+ struct regmap *r_t = chip->regmap_t;
+
+ regmap_update_bits(r_t, PWDSET, PWDSET_OFDMPD_MASK, 0);
+}
+
+static bool mn88443x_t_is_valid_clk(u32 adckt, u32 if_freq)
+{
+ if (if_freq == DIRECT_IF_57MHZ) {
+ if (adckt >= CLK_DIRECT && adckt <= 21000000)
+ return true;
+ if (adckt >= 25300000 && adckt <= CLK_MAX)
+ return true;
+ } else if (if_freq == DIRECT_IF_44MHZ) {
+ if (adckt >= 25000000 && adckt <= CLK_MAX)
+ return true;
+ } else if (if_freq >= LOW_IF_4MHZ && if_freq < DIRECT_IF_44MHZ) {
+ if (adckt >= CLK_DIRECT && adckt <= CLK_MAX)
+ return true;
+ }
+
+ return false;
+}
+
+static int mn88443x_t_set_freq(struct mn88443x_priv *chip)
+{
+ struct device *dev = &chip->client_s->dev;
+ struct regmap *r_t = chip->regmap_t;
+ s64 adckt, nco, ad_t;
+ u32 m, v;
+
+ /* Clock buffer (but not supported) or XTAL */
+ if (chip->clk_freq >= CLK_LOW && chip->clk_freq < CLK_DIRECT) {
+ chip->use_clkbuf = true;
+ regmap_write(r_t, CLKSET1_T, 0x07);
+
+ adckt = 0;
+ } else {
+ chip->use_clkbuf = false;
+ regmap_write(r_t, CLKSET1_T, 0x00);
+
+ adckt = chip->clk_freq;
+ }
+ if (!mn88443x_t_is_valid_clk(adckt, chip->if_freq)) {
+ dev_err(dev, "Invalid clock, CLK:%d, ADCKT:%lld, IF:%d\n",
+ chip->clk_freq, adckt, chip->if_freq);
+ return -EINVAL;
+ }
+
+ /* Direct IF or Low IF */
+ if (chip->if_freq == DIRECT_IF_57MHZ ||
+ chip->if_freq == DIRECT_IF_44MHZ)
+ nco = adckt * 2 - chip->if_freq;
+ else
+ nco = -((s64)chip->if_freq);
+ nco = div_s64(nco << 24, adckt);
+ ad_t = div_s64(adckt << 22, S_T_FREQ);
+
+ regmap_write(r_t, NCOFREQU_T, nco >> 16);
+ regmap_write(r_t, NCOFREQM_T, nco >> 8);
+ regmap_write(r_t, NCOFREQL_T, nco);
+ regmap_write(r_t, FADU_T, ad_t >> 16);
+ regmap_write(r_t, FADM_T, ad_t >> 8);
+ regmap_write(r_t, FADL_T, ad_t);
+
+ /* Level of IF */
+ m = ADCSET1_T_REFSEL_MASK;
+ v = ADCSET1_T_REFSEL_1_5V;
+ regmap_update_bits(r_t, ADCSET1_T, m, v);
+
+ /* Polarity of AGC */
+ v = AGCSET2_T_IFPOLINV_INC | AGCSET2_T_RFPOLINV_INC;
+ regmap_update_bits(r_t, AGCSET2_T, v, v);
+
+ /* Lower output level of AGC */
+ regmap_write(r_t, AGCV3_T, 0x00);
+
+ regmap_write(r_t, MDSET_T, 0xfa);
+
+ return 0;
+}
+
+static void mn88443x_t_tune(struct mn88443x_priv *chip,
+ struct dtv_frontend_properties *c)
+{
+ struct regmap *r_t = chip->regmap_t;
+ u32 m, v;
+
+ m = MDSET_T_MDAUTO_MASK | MDSET_T_FFTS_MASK | MDSET_T_GI_MASK;
+ v = MDSET_T_MDAUTO_AUTO | MDSET_T_FFTS_MODE3 | MDSET_T_GI_1_8;
+ regmap_update_bits(r_t, MDSET_T, m, v);
+
+ regmap_write(r_t, MDASET_T, 0);
+}
+
+static int mn88443x_t_read_status(struct mn88443x_priv *chip,
+ struct dtv_frontend_properties *c,
+ enum fe_status *status)
+{
+ struct regmap *r_t = chip->regmap_t;
+ u32 seqrd, st, flg, tmpu, tmpm, tmpl;
+ u64 tmp;
+
+ /* Sync detection */
+ regmap_read(r_t, SSEQRD_T, &seqrd);
+ st = seqrd & SSEQRD_T_SSEQSTRD_MASK;
+
+ *status = 0;
+ if (st >= SSEQRD_T_SSEQSTRD_SYNC)
+ *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ if (st >= SSEQRD_T_SSEQSTRD_FRAME_SYNC)
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
+
+ /* Signal strength */
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ if (*status & FE_HAS_SIGNAL) {
+ u32 agc;
+
+ regmap_read(r_t, AGCRDU_T, &tmpu);
+ regmap_read(r_t, AGCRDL_T, &tmpl);
+ agc = (tmpu << 8) | tmpl;
+
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = agc;
+ }
+
+ /* C/N rate */
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ if (*status & FE_HAS_VITERBI) {
+ u32 cnr;
+
+ regmap_read(r_t, CNRDU_T, &tmpu);
+ regmap_read(r_t, CNRDL_T, &tmpl);
+
+ if (tmpu || tmpl) {
+ /* CNR[dB]: 10 * (log10(65536 / value) + 0.2) */
+ /* intlog10(65536) = 80807124, 0.2 * 2^24 = 3355443 */
+ tmp = (u64)80807124 - intlog10((tmpu << 8) | tmpl)
+ + 3355443;
+ cnr = div_u64(tmp * 10000, 1 << 24);
+ } else {
+ cnr = 0;
+ }
+
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].uvalue = cnr;
+ }
+
+ /* BER */
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ regmap_read(r_t, BERFLG_T, &flg);
+
+ if ((*status & FE_HAS_VITERBI) && (flg & BERFLG_T_BERVRDYA)) {
+ u32 bit_err, bit_cnt;
+
+ regmap_read(r_t, BERRDU_T, &tmpu);
+ regmap_read(r_t, BERRDM_T, &tmpm);
+ regmap_read(r_t, BERRDL_T, &tmpl);
+ bit_err = (tmpu << 16) | (tmpm << 8) | tmpl;
+
+ regmap_read(r_t, BERLENRDU_T, &tmpu);
+ regmap_read(r_t, BERLENRDL_T, &tmpl);
+ bit_cnt = ((tmpu << 8) | tmpl) * 203 * 8;
+
+ if (bit_cnt) {
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = bit_err;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = bit_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static int mn88443x_sleep(struct dvb_frontend *fe)
+{
+ struct mn88443x_priv *chip = fe->demodulator_priv;
+
+ mn88443x_s_sleep(chip);
+ mn88443x_t_sleep(chip);
+
+ return 0;
+}
+
+static int mn88443x_set_frontend(struct dvb_frontend *fe)
+{
+ struct mn88443x_priv *chip = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct regmap *r_s = chip->regmap_s;
+ struct regmap *r_t = chip->regmap_t;
+ u8 tssel = 0, intsel = 0;
+
+ if (c->delivery_system == SYS_ISDBS) {
+ mn88443x_s_wake(chip);
+ mn88443x_t_sleep(chip);
+
+ tssel = TSSET1_TSASEL_ISDBS;
+ intsel = TSSET3_INTASEL_S;
+ } else if (c->delivery_system == SYS_ISDBT) {
+ mn88443x_s_sleep(chip);
+ mn88443x_t_wake(chip);
+
+ mn88443x_t_set_freq(chip);
+
+ tssel = TSSET1_TSASEL_ISDBT;
+ intsel = TSSET3_INTASEL_T;
+ }
+
+ regmap_update_bits(r_t, TSSET1,
+ TSSET1_TSASEL_MASK | TSSET1_TSBSEL_MASK,
+ tssel | TSSET1_TSBSEL_NONE);
+ regmap_write(r_t, TSSET2, 0);
+ regmap_update_bits(r_t, TSSET3,
+ TSSET3_INTASEL_MASK | TSSET3_INTBSEL_MASK,
+ intsel | TSSET3_INTBSEL_NONE);
+
+ regmap_write(r_t, DOSET1_T, 0x95);
+ regmap_write(r_s, DOSET1_S, 0x80);
+
+ if (c->delivery_system == SYS_ISDBS)
+ mn88443x_s_tune(chip, c);
+ else if (c->delivery_system == SYS_ISDBT)
+ mn88443x_t_tune(chip, c);
+
+ if (fe->ops.tuner_ops.set_params) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ return 0;
+}
+
+static int mn88443x_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *s)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ s->min_delay_ms = 850;
+
+ if (c->delivery_system == SYS_ISDBS) {
+ s->max_drift = 30000 * 2 + 1;
+ s->step_size = 30000;
+ } else if (c->delivery_system == SYS_ISDBT) {
+ s->max_drift = 142857 * 2 + 1;
+ s->step_size = 142857 * 2;
+ }
+
+ return 0;
+}
+
+static int mn88443x_read_status(struct dvb_frontend *fe, enum fe_status *status)
+{
+ struct mn88443x_priv *chip = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ if (c->delivery_system == SYS_ISDBS)
+ return mn88443x_s_read_status(chip, c, status);
+
+ if (c->delivery_system == SYS_ISDBT)
+ return mn88443x_t_read_status(chip, c, status);
+
+ return -EINVAL;
+}
+
+static const struct dvb_frontend_ops mn88443x_ops = {
+ .delsys = { SYS_ISDBS, SYS_ISDBT },
+ .info = {
+ .name = "Socionext MN88443x",
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 2071 * MHz,
+ .symbol_rate_min = 28860000,
+ .symbol_rate_max = 28860000,
+ .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO,
+ },
+
+ .sleep = mn88443x_sleep,
+ .set_frontend = mn88443x_set_frontend,
+ .get_tune_settings = mn88443x_get_tune_settings,
+ .read_status = mn88443x_read_status,
+};
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int mn88443x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mn88443x_config *conf = client->dev.platform_data;
+ struct mn88443x_priv *chip;
+ struct device *dev = &client->dev;
+ int ret;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ if (dev->of_node)
+ chip->spec = of_device_get_match_data(dev);
+ else
+ chip->spec = (struct mn88443x_spec *)id->driver_data;
+ if (!chip->spec)
+ return -EINVAL;
+
+ chip->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(chip->mclk) && !conf) {
+ dev_err(dev, "Failed to request mclk: %ld\n",
+ PTR_ERR(chip->mclk));
+ return PTR_ERR(chip->mclk);
+ }
+
+ ret = of_property_read_u32(dev->of_node, "if-frequency",
+ &chip->if_freq);
+ if (ret && !conf) {
+ dev_err(dev, "Failed to load IF frequency: %d.\n", ret);
+ return ret;
+ }
+
+ chip->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->reset_gpio)) {
+ dev_err(dev, "Failed to request reset_gpio: %ld\n",
+ PTR_ERR(chip->reset_gpio));
+ return PTR_ERR(chip->reset_gpio);
+ }
+
+ if (conf) {
+ chip->mclk = conf->mclk;
+ chip->if_freq = conf->if_freq;
+ chip->reset_gpio = conf->reset_gpio;
+
+ *conf->fe = &chip->fe;
+ }
+
+ chip->client_s = client;
+ chip->regmap_s = devm_regmap_init_i2c(chip->client_s, &regmap_config);
+ if (IS_ERR(chip->regmap_s))
+ return PTR_ERR(chip->regmap_s);
+
+ /*
+ * Chip has two I2C addresses for each satellite/terrestrial system.
+ * ISDB-T uses address ISDB-S + 4, so we register a dummy client.
+ */
+ chip->client_t = i2c_new_dummy(client->adapter, client->addr + 4);
+ if (!chip->client_t)
+ return -ENODEV;
+
+ chip->regmap_t = devm_regmap_init_i2c(chip->client_t, &regmap_config);
+ if (IS_ERR(chip->regmap_t)) {
+ ret = PTR_ERR(chip->regmap_t);
+ goto err_i2c_t;
+ }
+
+ chip->clk_freq = clk_get_rate(chip->mclk);
+
+ memcpy(&chip->fe.ops, &mn88443x_ops, sizeof(mn88443x_ops));
+ chip->fe.demodulator_priv = chip;
+ i2c_set_clientdata(client, chip);
+
+ mn88443x_cmn_power_on(chip);
+ mn88443x_s_sleep(chip);
+ mn88443x_t_sleep(chip);
+
+ return 0;
+
+err_i2c_t:
+ i2c_unregister_device(chip->client_t);
+
+ return ret;
+}
+
+static int mn88443x_remove(struct i2c_client *client)
+{
+ struct mn88443x_priv *chip = i2c_get_clientdata(client);
+
+ mn88443x_cmn_power_off(chip);
+
+ i2c_unregister_device(chip->client_t);
+
+ return 0;
+}
+
+static const struct mn88443x_spec mn88443x_spec_pri = {
+ .primary = true,
+};
+
+static const struct mn88443x_spec mn88443x_spec_sec = {
+ .primary = false,
+};
+
+static const struct of_device_id mn88443x_of_match[] = {
+ { .compatible = "socionext,mn884433", .data = &mn88443x_spec_pri, },
+ { .compatible = "socionext,mn884434-0", .data = &mn88443x_spec_pri, },
+ { .compatible = "socionext,mn884434-1", .data = &mn88443x_spec_sec, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mn88443x_of_match);
+
+static const struct i2c_device_id mn88443x_i2c_id[] = {
+ { "mn884433", (kernel_ulong_t)&mn88443x_spec_pri },
+ { "mn884434-0", (kernel_ulong_t)&mn88443x_spec_pri },
+ { "mn884434-1", (kernel_ulong_t)&mn88443x_spec_sec },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mn88443x_i2c_id);
+
+static struct i2c_driver mn88443x_driver = {
+ .driver = {
+ .name = "mn88443x",
+ .of_match_table = of_match_ptr(mn88443x_of_match),
+ },
+ .probe = mn88443x_probe,
+ .remove = mn88443x_remove,
+ .id_table = mn88443x_i2c_id,
+};
+
+module_i2c_driver(mn88443x_driver);
+
+MODULE_AUTHOR("Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>");
+MODULE_DESCRIPTION("Socionext MN88443x series demodulator driver.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/dvb-frontends/mn88443x.h b/drivers/media/dvb-frontends/mn88443x.h
new file mode 100644
index 000000000000..b19aaf6a1ea3
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88443x.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Socionext MN88443x series demodulator driver for ISDB-S/ISDB-T.
+ *
+ * Copyright (c) 2018 Socionext Inc.
+ */
+
+#ifndef MN88443X_H
+#define MN88443X_H
+
+#include <media/dvb_frontend.h>
+
+/* ISDB-T IF frequency */
+#define DIRECT_IF_57MHZ 57000000
+#define DIRECT_IF_44MHZ 44000000
+#define LOW_IF_4MHZ 4000000
+
+struct mn88443x_config {
+ struct clk *mclk;
+ u32 if_freq;
+ struct gpio_desc *reset_gpio;
+
+ /* Everything after that is returned by the driver. */
+ struct dvb_frontend **fe;
+};
+
+#endif /* MN88443X_H */
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index e2a3fc521620..aad07adda37d 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -559,8 +559,8 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
dprintk("%s: Freq %d\n", __func__, p->frequency);
- if ((p->frequency < fe->ops.info.frequency_min)
- || (p->frequency > fe->ops.info.frequency_max))
+ if ((p->frequency < fe->ops.info.frequency_min_hz / kHz)
+ || (p->frequency > fe->ops.info.frequency_max_hz / kHz))
return -EINVAL;
if (((int)p->inversion < INVERSION_OFF)
@@ -755,10 +755,10 @@ static const struct dvb_frontend_ops mt312_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Zarlink ???? DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
/* FIXME: adjust freq to real used xtal */
- .frequency_stepsize = (MT312_PLL_CLK / 1000) / 128,
+ .frequency_stepsize_hz = MT312_PLL_CLK / 128,
.symbol_rate_min = MT312_SYS_CLK / 128, /* FIXME as above */
.symbol_rate_max = MT312_SYS_CLK / 2,
.caps =
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index a440b76444d3..da3e466d50e2 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -567,10 +567,9 @@ static const struct dvb_frontend_ops mt352_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Zarlink MT352 DVB-T",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 274d8fca0763..295f37d5f10e 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -784,10 +784,8 @@ static struct dvb_frontend_ops mxl_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "MaxLinear MxL5xx DVB-S/S2 tuner-demodulator",
- .frequency_min = 300000,
- .frequency_max = 2350000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 300 * MHz,
+ .frequency_max_hz = 2350 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index a6cc4952eb74..0961e686ff68 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -1212,9 +1212,9 @@ static const struct dvb_frontend_ops nxt200x_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Nextwave NXT200X VSB/QAM frontend",
- .frequency_min = 54000000,
- .frequency_max = 860000000,
- .frequency_stepsize = 166666, /* stepsize is just a guess */
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_stepsize_hz = 166666, /* stepsize is just a guess */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_8VSB | FE_CAN_QAM_64 | FE_CAN_QAM_256
diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
index 109a635d166a..72e447e8ba64 100644
--- a/drivers/media/dvb-frontends/nxt6000.c
+++ b/drivers/media/dvb-frontends/nxt6000.c
@@ -596,9 +596,9 @@ static const struct dvb_frontend_ops nxt6000_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "NxtWave NXT6000 DVB-T",
- .frequency_min = 0,
- .frequency_max = 863250000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 0,
+ .frequency_max_hz = 863250 * kHz,
+ .frequency_stepsize_hz = 62500,
/*.frequency_tolerance = *//* FIXME: 12% of SR */
.symbol_rate_min = 0, /* FIXME */
.symbol_rate_max = 9360000, /* FIXME */
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index b4c9aadcb552..fc35f37eb3c0 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -583,9 +583,9 @@ static const struct dvb_frontend_ops or51132_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51132 VSB/QAM Frontend",
- .frequency_min = 44000000,
- .frequency_max = 958000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 44 * MHz,
+ .frequency_max_hz = 958 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index b65ba34fd00a..a39bbd8ff1f0 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -530,10 +530,10 @@ struct dvb_frontend* or51211_attach(const struct or51211_config* config,
static const struct dvb_frontend_ops or51211_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
- .name = "Oren OR51211 VSB Frontend",
- .frequency_min = 44000000,
- .frequency_max = 958000000,
- .frequency_stepsize = 166666,
+ .name = "Oren OR51211 VSB Frontend",
+ .frequency_min_hz = 44 * MHz,
+ .frequency_max_hz = 958 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_8VSB
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 7bbfe11d11ed..adc9046d5a90 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -159,8 +159,8 @@ static int rtl2830_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 500;
- s->step_size = fe->ops.info.frequency_stepsize * 2;
- s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ s->step_size = fe->ops.info.frequency_stepsize_hz * 2;
+ s->max_drift = (fe->ops.info.frequency_stepsize_hz * 2) + 1;
return 0;
}
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index fa3b8169c1a5..2f1f5cbaf03c 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -408,8 +408,8 @@ static int rtl2832_get_tune_settings(struct dvb_frontend *fe,
dev_dbg(&client->dev, "\n");
s->min_delay_ms = 1000;
- s->step_size = fe->ops.info.frequency_stepsize * 2;
- s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ s->step_size = fe->ops.info.frequency_stepsize_hz * 2;
+ s->max_drift = (fe->ops.info.frequency_stepsize_hz * 2) + 1;
return 0;
}
@@ -841,9 +841,9 @@ static const struct dvb_frontend_ops rtl2832_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Realtek RTL2832 (DVB-T)",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index c6e78d870ccd..d448d9d4879c 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -301,7 +301,7 @@ static int rtl2832_sdr_submit_urbs(struct rtl2832_sdr_dev *dev)
for (i = 0; i < dev->urbs_initialized; i++) {
dev_dbg(&pdev->dev, "submit urb=%d\n", i);
- ret = usb_submit_urb(dev->urb_list[i], GFP_ATOMIC);
+ ret = usb_submit_urb(dev->urb_list[i], GFP_KERNEL);
if (ret) {
dev_err(&pdev->dev,
"Could not submit urb no. %d - get them all back\n",
@@ -345,7 +345,7 @@ static int rtl2832_sdr_alloc_stream_bufs(struct rtl2832_sdr_dev *dev)
for (dev->buf_num = 0; dev->buf_num < MAX_BULK_BUFS; dev->buf_num++) {
dev->buf_list[dev->buf_num] = usb_alloc_coherent(dev->udev,
- BULK_BUFFER_SIZE, GFP_ATOMIC,
+ BULK_BUFFER_SIZE, GFP_KERNEL,
&dev->dma_addr[dev->buf_num]);
if (!dev->buf_list[dev->buf_num]) {
dev_dbg(&pdev->dev, "alloc buf=%d failed\n",
@@ -390,7 +390,7 @@ static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_dev *dev)
/* allocate the URBs */
for (i = 0; i < MAX_BULK_BUFS; i++) {
dev_dbg(&pdev->dev, "alloc urb=%d\n", i);
- dev->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
+ dev->urb_list[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_list[i]) {
for (j = 0; j < i; j++)
usb_free_urb(dev->urb_list[j]);
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index a23ba8727218..ceeb0c3551ce 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -999,9 +999,9 @@ static const struct dvb_frontend_ops s5h1409_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1409 QAM/8VSB Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index af5962807f2c..98aeed1d2284 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -918,9 +918,9 @@ static const struct dvb_frontend_ops s5h1411_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1411 QAM/8VSB Frontend",
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 54 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index 8b2222530227..a65cdf8e8cd9 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -934,10 +934,10 @@ static const struct dvb_frontend_ops s5h1420_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Samsung S5H1420/PnpNetwork PN1010 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
/* .symbol_rate_tolerance = ???,*/
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index 740a60df0455..4dc3febc0e12 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -370,9 +370,9 @@ static const struct dvb_frontend_ops s5h1432_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Samsung s5h1432 DVB-T Frontend",
- .frequency_min = 177000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 177 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index 6c9015236655..79276871112a 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -510,15 +510,14 @@ static const struct dvb_frontend_ops s921_ops = {
/* Use dib8000 values per default */
.info = {
.name = "Sharp S921",
- .frequency_min = 470000000,
+ .frequency_min_hz = 470 * MHz,
/*
* Max should be 770MHz instead, according with Sharp docs,
* but Leadership doc says it works up to 806 MHz. This is
* required to get channel 69, used in Brazil
*/
- .frequency_max = 806000000,
- .frequency_tolerance = 0,
- .caps = FE_CAN_INVERSION_AUTO |
+ .frequency_max_hz = 806 * MHz,
+ .caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 2dd336f95cbf..feacd8da421d 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -1120,7 +1120,7 @@ static const struct dvb_frontend_ops si2165_ops = {
.symbol_rate_min = 1000000,
.symbol_rate_max = 7200000,
/* For DVB-T */
- .frequency_stepsize = 166667,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index 9b32a1b3205e..8546a236d452 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -870,10 +870,9 @@ static const struct dvb_frontend_ops si21xx_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "SL SI21XX DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index 1d57a20093fc..8d31cf3f4f07 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -584,9 +584,9 @@ static const struct dvb_frontend_ops sp8870_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Spase SP8870 DVB-T",
- .frequency_min = 470000000,
- .frequency_max = 860000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index 57a0d0ae2b48..c02f50995df4 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -594,9 +594,9 @@ static const struct dvb_frontend_ops sp887x_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Spase SP887x DVB-T",
- .frequency_min = 50500000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 50500 * kHz,
+ .frequency_max_hz = 858000 * kHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 3c654ae16e78..874e9c9125d6 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1584,10 +1584,8 @@ static const struct dvb_frontend_ops stb0899_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STB0899 Multistandard",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
.symbol_rate_min = 5000000,
.symbol_rate_max = 45000000,
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index 69c03892f2da..786b9eccde00 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -188,8 +188,8 @@ static int stb6000_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops stb6000_tuner_ops = {
.info = {
.name = "ST STB6000",
- .frequency_min = 950000,
- .frequency_max = 2150000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz
},
.release = stb6000_release,
.sleep = stb6000_sleep,
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 3a851f524b16..30ac584dfab3 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -527,9 +527,8 @@ static int stb6100_set_params(struct dvb_frontend *fe)
static const struct dvb_tuner_ops stb6100_ops = {
.info = {
.name = "STB6100 Silicon Tuner",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.init = stb6100_init,
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index f947ed947aae..c9a9fa4e2c1b 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -533,10 +533,9 @@ static const struct dvb_frontend_ops stv0288_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "ST STV0288 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1000, /* kHz for QPSK frontends */
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index b823c04e24d3..9a9915f71483 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -694,9 +694,9 @@ static const struct dvb_frontend_ops stv0297_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0297 DVB-C",
- .frequency_min = 47000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000,
.caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 633b90e6fe86..4f466394a16c 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -717,10 +717,9 @@ static const struct dvb_frontend_ops stv0299_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "ST STV0299 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 5435c908e298..5b91e740e135 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -1693,10 +1693,9 @@ static const struct dvb_frontend_ops stv0367ter_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "ST STV0367 DVB-T",
- .frequency_min = 47000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 15625,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 15625,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
@@ -2867,9 +2866,9 @@ static const struct dvb_frontend_ops stv0367cab_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0367 DVB-C",
- .frequency_min = 47000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000,
.caps = 0x400 |/* FE_CAN_QAM_4 */
@@ -3273,10 +3272,9 @@ static const struct dvb_frontend_ops stv0367ddb_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBT },
.info = {
.name = "ST STV0367 DDB DVB-C/T",
- .frequency_min = 47000000,
- .frequency_max = 865000000,
- .frequency_stepsize = 166667,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 865 * MHz,
+ .frequency_stepsize_hz = 166667,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000,
.caps = /* DVB-C */
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 72f17b97ca04..254618a06140 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1875,10 +1875,9 @@ static const struct dvb_frontend_ops stv0900_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV0900 frontend",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500,
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 9133f65d4623..a0622bb71803 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -4905,10 +4905,8 @@ static const struct dvb_frontend_ops stv090x_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV090x Multistandard",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/stv0910.c b/drivers/media/dvb-frontends/stv0910.c
index 41444fa1c0bb..4c86073f1a8d 100644
--- a/drivers/media/dvb-frontends/stv0910.c
+++ b/drivers/media/dvb-frontends/stv0910.c
@@ -682,8 +682,8 @@ static int get_bit_error_rate_s(struct stv *state, u32 *bernumerator,
return -EINVAL;
if ((regs[0] & 0x80) == 0) {
- state->last_berdenominator = 1 << ((state->berscale * 2) +
- 10 + 3);
+ state->last_berdenominator = 1ULL << ((state->berscale * 2) +
+ 10 + 3);
state->last_bernumerator = ((u32)(regs[0] & 0x7F) << 16) |
((u32)regs[1] << 8) | regs[2];
if (state->last_bernumerator < 256 && state->berscale < 6) {
@@ -1724,10 +1724,8 @@ static const struct dvb_frontend_ops stv0910_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "ST STV0910",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
.symbol_rate_min = 100000,
.symbol_rate_max = 70000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 6aad0efa3174..7db9a5bceccc 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -371,9 +371,9 @@ static int stv6110_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
static const struct dvb_tuner_ops stv6110_tuner_ops = {
.info = {
.name = "ST STV6110",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 1000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 1 * MHz,
},
.init = stv6110_init,
.release = stv6110_release,
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index d8950028d021..82c002d3833a 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -347,10 +347,9 @@ static void stv6110x_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops stv6110x_ops = {
.info = {
- .name = "STV6110(A) Silicon Tuner",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 0,
+ .name = "STV6110(A) Silicon Tuner",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.release = stv6110x_release
};
diff --git a/drivers/media/dvb-frontends/stv6111.c b/drivers/media/dvb-frontends/stv6111.c
index 9b715b6fe152..0cf460111acb 100644
--- a/drivers/media/dvb-frontends/stv6111.c
+++ b/drivers/media/dvb-frontends/stv6111.c
@@ -646,9 +646,8 @@ static int get_rf_strength(struct dvb_frontend *fe, u16 *st)
static const struct dvb_tuner_ops tuner_ops = {
.info = {
.name = "ST STV6111",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 0
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.set_params = set_params,
.release = release,
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 7abf6b0916ed..2ad81a438d6a 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -714,8 +714,8 @@ static const struct dvb_frontend_ops tc90522_ops_sat = {
.delsys = { SYS_ISDBS },
.info = {
.name = "Toshiba TC90522 ISDB-S module",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
.caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_AUTO |
FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO |
FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO,
@@ -734,9 +734,9 @@ static const struct dvb_frontend_ops tc90522_ops_ter = {
.delsys = { SYS_ISDBT },
.info = {
.name = "Toshiba TC90522 ISDB-T module",
- .frequency_min = 470000000,
- .frequency_max = 770000000,
- .frequency_stepsize = 142857,
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 770 * MHz,
+ .frequency_stepsize_hz = 142857,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index 4f588ebde39d..5cd885d4ea04 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -487,11 +487,11 @@ static const struct dvb_frontend_ops tda10021_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10021 DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 47000000,
- .frequency_max = 862000000,
- .symbol_rate_min = (XIN/2)/64, /* SACLK/64 == (XIN/2)/64 */
- .symbol_rate_max = (XIN/2)/4, /* SACLK/4 */
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
+ .symbol_rate_min = (XIN / 2) / 64, /* SACLK/64 == (XIN/2)/64 */
+ .symbol_rate_max = (XIN / 2) / 4, /* SACLK/4 */
#if 0
.frequency_tolerance = ???,
.symbol_rate_tolerance = ???, /* ppm */ /* == 8% (spec p. 5) */
diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
index 6c84916234e3..0a9a54563ebe 100644
--- a/drivers/media/dvb-frontends/tda10023.c
+++ b/drivers/media/dvb-frontends/tda10023.c
@@ -575,9 +575,9 @@ static const struct dvb_frontend_ops tda10023_ops = {
.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10023 DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 47000000,
- .frequency_max = 862000000,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 0, /* set in tda10023_attach */
.symbol_rate_max = 0, /* set in tda10023_attach */
.caps = 0x400 | //FE_CAN_QAM_4
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index de82a2558e15..c01d60a88af2 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1156,9 +1156,9 @@ static const struct dvb_frontend_ops tda10048_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "NXP TDA10048HN DVB-T",
- .frequency_min = 177000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 177 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index 7dcfb4a4b2d0..d402e4b722ca 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -1249,9 +1249,9 @@ static const struct dvb_frontend_ops tda10045_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10045H DVB-T",
- .frequency_min = 51000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
@@ -1319,9 +1319,9 @@ static const struct dvb_frontend_ops tda10046_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10046H DVB-T",
- .frequency_min = 51000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 1ed67c08e699..097c42d3f8c2 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -681,8 +681,8 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
cmd.args[5] = (c->frequency >> 0) & 0xff;
cmd.args[6] = ((c->symbol_rate / 1000) >> 8) & 0xff;
cmd.args[7] = ((c->symbol_rate / 1000) >> 0) & 0xff;
- cmd.args[8] = (tda10071_ops.info.frequency_tolerance >> 8) & 0xff;
- cmd.args[9] = (tda10071_ops.info.frequency_tolerance >> 0) & 0xff;
+ cmd.args[8] = ((tda10071_ops.info.frequency_tolerance_hz / 1000) >> 8) & 0xff;
+ cmd.args[9] = ((tda10071_ops.info.frequency_tolerance_hz / 1000) >> 0) & 0xff;
cmd.args[10] = rolloff;
cmd.args[11] = inversion;
cmd.args[12] = pilot;
@@ -1106,9 +1106,9 @@ static const struct dvb_frontend_ops tda10071_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "NXP TDA10071",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_tolerance = 5000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_tolerance_hz = 5 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index 1a95c521e97f..8323e4e53d66 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -710,9 +710,9 @@ static const struct dvb_frontend_ops tda10086_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA10086 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index 2e1d36ae943b..5ce58612315d 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -1154,6 +1154,7 @@ static int set_params(struct dvb_frontend *fe)
default:
return -EINVAL;
}
+ break;
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
if (bw <= 6000000)
@@ -1214,9 +1215,9 @@ static int get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
static const struct dvb_tuner_ops tuner_ops = {
.info = {
.name = "NXP TDA18271C2D",
- .frequency_min = 47125000,
- .frequency_max = 865000000,
- .frequency_step = 62500
+ .frequency_min_hz = 47125 * kHz,
+ .frequency_max_hz = 865 * MHz,
+ .frequency_step_hz = 62500
},
.init = init,
.sleep = sleep,
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 3ef7140ed7f3..8766c9ff6680 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -231,9 +231,9 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
info = &fe->ops.tuner_ops.info;
memcpy(info->name, config->name, sizeof(config->name));
- info->frequency_min = config->frequency_min;
- info->frequency_max = config->frequency_max;
- info->frequency_step = config->frequency_offst;
+ info->frequency_min_hz = config->frequency_min;
+ info->frequency_max_hz = config->frequency_max;
+ info->frequency_step_hz = config->frequency_offst;
printk(KERN_DEBUG "%s: Attaching TDA665x (%s) tuner\n", __func__, info->name);
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 29b4f64c030c..53b26060db7e 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -453,10 +453,9 @@ static const struct dvb_frontend_ops tda8083_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA8083 DVB-S",
- .frequency_min = 920000, /* TDA8060 */
- .frequency_max = 2200000, /* TDA8060 */
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- /* .frequency_tolerance = ???,*/
+ .frequency_min_hz = 920 * MHz, /* TDA8060 */
+ .frequency_max_hz = 2200 * MHz, /* TDA8060 */
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 12000000,
.symbol_rate_max = 30000000,
/* .symbol_rate_tolerance = ???,*/
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index f72a54e7eb23..500f50b81b66 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -163,10 +163,9 @@ static void tda8261_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops tda8261_ops = {
.info = {
- .name = "TDA8261",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 0
+ .name = "TDA8261",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.set_params = tda8261_set_params,
@@ -190,7 +189,7 @@ struct dvb_frontend *tda8261_attach(struct dvb_frontend *fe,
fe->tuner_priv = state;
fe->ops.tuner_ops = tda8261_ops;
- fe->ops.tuner_ops.info.frequency_step = div_tab[config->step_size];
+ fe->ops.tuner_ops.info.frequency_step_hz = div_tab[config->step_size] * kHz;
pr_info("%s: Attaching TDA8261 8PSK/QPSK tuner\n", __func__);
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index da427b4c2aaa..100da5d5fdc5 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -131,8 +131,8 @@ static int tda826x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops tda826x_tuner_ops = {
.info = {
.name = "Philips TDA826X",
- .frequency_min = 950000,
- .frequency_max = 2175000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2175 * MHz
},
.release = tda826x_release,
.sleep = tda826x_sleep,
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index c55882a8da06..3e3e40878633 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -498,8 +498,8 @@ static int ts2020_read_signal_strength(struct dvb_frontend *fe,
static const struct dvb_tuner_ops ts2020_tuner_ops = {
.info = {
.name = "TS2020",
- .frequency_min = 950000,
- .frequency_max = 2150000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz
},
.init = ts2020_init,
.release = ts2020_release,
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 1d41abd47f04..b233b7be0b84 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -155,9 +155,9 @@ static int tua6100_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops tua6100_tuner_ops = {
.info = {
.name = "Infineon TUA6100",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_step = 1000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_step_hz = 1 * MHz,
},
.release = tua6100_release,
.sleep = tua6100_sleep,
diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
index 17600989f121..eb1249d81310 100644
--- a/drivers/media/dvb-frontends/ves1820.c
+++ b/drivers/media/dvb-frontends/ves1820.c
@@ -412,9 +412,9 @@ static const struct dvb_frontend_ops ves1820_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "VLSI VES1820 DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 47000000,
- .frequency_max = 862000000,
+ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_16 |
FE_CAN_QAM_32 |
FE_CAN_QAM_64 |
diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
index 0c7b3286b04d..ddc5bfd84cd5 100644
--- a/drivers/media/dvb-frontends/ves1x93.c
+++ b/drivers/media/dvb-frontends/ves1x93.c
@@ -516,10 +516,10 @@ static const struct dvb_frontend_ops ves1x93_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "VLSI VES1x93 DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
/* .symbol_rate_tolerance = ???,*/
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 89dd65ae88ad..f1c92338015d 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -311,8 +311,8 @@ static int zl10036_set_params(struct dvb_frontend *fe)
/* ensure correct values
* maybe redundant as core already checks this */
- if ((frequency < fe->ops.info.frequency_min)
- || (frequency > fe->ops.info.frequency_max))
+ if ((frequency < fe->ops.info.frequency_min_hz / kHz)
+ || (frequency > fe->ops.info.frequency_max_hz / kHz))
return -EINVAL;
/*
@@ -443,8 +443,8 @@ static int zl10036_init(struct dvb_frontend *fe)
static const struct dvb_tuner_ops zl10036_tuner_ops = {
.info = {
.name = "Zarlink ZL10036",
- .frequency_min = 950000,
- .frequency_max = 2175000
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2175 * MHz
},
.init = zl10036_init,
.release = zl10036_release,
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index c9901f45deb7..42e63a3fa121 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -635,10 +635,9 @@ static const struct dvb_frontend_ops zl10353_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Zarlink ZL10353 DVB-T",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
diff --git a/drivers/media/firewire/firedtv-fe.c b/drivers/media/firewire/firedtv-fe.c
index a2ef4ede8ebe..69087ae6c1d0 100644
--- a/drivers/media/firewire/firedtv-fe.c
+++ b/drivers/media/firewire/firedtv-fe.c
@@ -152,7 +152,7 @@ static int fdtv_set_frontend(struct dvb_frontend *fe)
void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
{
struct dvb_frontend_ops *ops = &fdtv->fe.ops;
- struct dvb_frontend_info *fi = &ops->info;
+ struct dvb_frontend_internal_info *fi = &ops->info;
ops->init = fdtv_dvb_init;
ops->sleep = fdtv_sleep;
@@ -174,9 +174,9 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
case FIREDTV_DVB_S:
ops->delsys[0] = SYS_DVBS;
- fi->frequency_min = 950000;
- fi->frequency_max = 2150000;
- fi->frequency_stepsize = 125;
+ fi->frequency_min_hz = 950 * MHz;
+ fi->frequency_max_hz = 2150 * MHz;
+ fi->frequency_stepsize_hz = 125 * kHz;
fi->symbol_rate_min = 1000000;
fi->symbol_rate_max = 40000000;
@@ -194,9 +194,9 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
ops->delsys[0] = SYS_DVBS;
ops->delsys[1] = SYS_DVBS2;
- fi->frequency_min = 950000;
- fi->frequency_max = 2150000;
- fi->frequency_stepsize = 125;
+ fi->frequency_min_hz = 950 * MHz;
+ fi->frequency_max_hz = 2150 * MHz;
+ fi->frequency_stepsize_hz = 125 * kHz;
fi->symbol_rate_min = 1000000;
fi->symbol_rate_max = 40000000;
@@ -214,9 +214,9 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
case FIREDTV_DVB_C:
ops->delsys[0] = SYS_DVBC_ANNEX_A;
- fi->frequency_min = 47000000;
- fi->frequency_max = 866000000;
- fi->frequency_stepsize = 62500;
+ fi->frequency_min_hz = 47 * MHz;
+ fi->frequency_max_hz = 866 * MHz;
+ fi->frequency_stepsize_hz = 62500;
fi->symbol_rate_min = 870000;
fi->symbol_rate_max = 6900000;
@@ -232,9 +232,9 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
case FIREDTV_DVB_T:
ops->delsys[0] = SYS_DVBT;
- fi->frequency_min = 49000000;
- fi->frequency_max = 861000000;
- fi->frequency_stepsize = 62500;
+ fi->frequency_min_hz = 49 * MHz;
+ fi->frequency_max_hz = 861 * MHz;
+ fi->frequency_stepsize_hz = 62500;
fi->caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_2_3 |
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 341452fe98df..82af97430e5b 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -326,6 +326,16 @@ config VIDEO_AD5820
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
+config VIDEO_AK7375
+ tristate "AK7375 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ help
+ This is a driver for the AK7375 camera lens voice coil.
+ AK7375 is a 12 bit DAC with 120mA output current sink
+ capability. This is designed for linear control of
+ voice coil motors, controlled via I2C serial interface.
+
config VIDEO_DW9714
tristate "DW9714 lens voice coil support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
@@ -336,6 +346,16 @@ config VIDEO_DW9714
capability. This is designed for linear control of
voice coil motors, controlled via I2C serial interface.
+config VIDEO_DW9807_VCM
+ tristate "DW9807 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a driver for the DW9807 camera lens voice coil.
+ DW9807 is a 10 bit DAC with 100mA output current sink
+ capability. This is designed for linear control of
+ voice coil motors, controlled via I2C serial interface.
+
config VIDEO_SAA7110
tristate "Philips SAA7110 video decoder"
depends on VIDEO_V4L2 && I2C
@@ -378,7 +398,7 @@ config VIDEO_TVP514X
depends on VIDEO_V4L2 && I2C
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the TI TVP5146/47
+ This is a Video4Linux2 sensor driver for the TI TVP5146/47
decoder. It is currently working with the TI OMAP3 camera
controller.
@@ -580,7 +600,7 @@ config VIDEO_IMX258
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the Sony
+ This is a Video4Linux2 sensor driver for the Sony
IMX258 camera.
To compile this driver as a module, choose M here: the
@@ -591,7 +611,7 @@ config VIDEO_IMX274
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a V4L2 sensor-level driver for the Sony IMX274
+ This is a V4L2 sensor driver for the Sony IMX274
CMOS image sensor.
config VIDEO_OV2640
@@ -599,7 +619,7 @@ config VIDEO_OV2640
depends on VIDEO_V4L2 && I2C
depends on MEDIA_CAMERA_SUPPORT
help
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV2640 camera.
To compile this driver as a module, choose M here: the
@@ -611,19 +631,31 @@ config VIDEO_OV2659
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV2659 camera.
To compile this driver as a module, choose M here: the
module will be called ov2659.
+config VIDEO_OV2680
+ tristate "OmniVision OV2680 sensor support"
+ depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
+ depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
+ ---help---
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV2680 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov2680.
+
config VIDEO_OV2685
tristate "OmniVision OV2685 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV2685 camera.
To compile this driver as a module, choose M here: the
@@ -636,7 +668,7 @@ config VIDEO_OV5640
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the Omnivision
+ This is a Video4Linux2 sensor driver for the Omnivision
OV5640 camera sensor with a MIPI CSI-2 interface.
config VIDEO_OV5645
@@ -646,7 +678,7 @@ config VIDEO_OV5645
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV5645 camera.
To compile this driver as a module, choose M here: the
@@ -658,7 +690,7 @@ config VIDEO_OV5647
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV5647 camera.
To compile this driver as a module, choose M here: the
@@ -669,7 +701,7 @@ config VIDEO_OV6650
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV6650 camera.
To compile this driver as a module, choose M here: the
@@ -682,7 +714,7 @@ config VIDEO_OV5670
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV5670 camera.
To compile this driver as a module, choose M here: the
@@ -693,7 +725,7 @@ config VIDEO_OV5695
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV5695 camera.
To compile this driver as a module, choose M here: the
@@ -705,7 +737,7 @@ config VIDEO_OV7251
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV7251 camera.
To compile this driver as a module, choose M here: the
@@ -716,7 +748,7 @@ config VIDEO_OV772X
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV772x camera.
To compile this driver as a module, choose M here: the
@@ -727,7 +759,7 @@ config VIDEO_OV7640
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV7640 camera.
To compile this driver as a module, choose M here: the
@@ -739,7 +771,7 @@ config VIDEO_OV7670
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV7670 VGA camera. It currently only works with the M88ALP01
controller.
@@ -748,14 +780,14 @@ config VIDEO_OV7740
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
config VIDEO_OV9650
tristate "OmniVision OV9650/OV9652 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
---help---
- This is a V4L2 sensor-level driver for the Omnivision
+ This is a V4L2 sensor driver for the Omnivision
OV9650 and OV9652 camera sensors.
config VIDEO_OV13858
@@ -764,7 +796,7 @@ config VIDEO_OV13858
depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the OmniVision
+ This is a Video4Linux2 sensor driver for the OmniVision
OV13858 camera.
config VIDEO_VS6624
@@ -772,7 +804,7 @@ config VIDEO_VS6624
depends on VIDEO_V4L2 && I2C
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the ST VS6624
+ This is a Video4Linux2 sensor driver for the ST VS6624
camera.
To compile this driver as a module, choose M here: the
@@ -800,7 +832,7 @@ config VIDEO_MT9P031
depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
---help---
- This is a Video4Linux2 sensor-level driver for the Aptina
+ This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt9p031 5 Mpixel camera.
config VIDEO_MT9T001
@@ -808,7 +840,7 @@ config VIDEO_MT9T001
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the Aptina
+ This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt0t001 3 Mpixel camera.
config VIDEO_MT9T112
@@ -816,7 +848,7 @@ config VIDEO_MT9T112
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the Aptina
+ This is a Video4Linux2 sensor driver for the Aptina
(Micron) MT9T111 and MT9T112 3 Mpixel camera.
To compile this driver as a module, choose M here: the
@@ -827,7 +859,7 @@ config VIDEO_MT9V011
depends on I2C && VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
+ This is a Video4Linux2 sensor driver for the Micron
mt0v011 1.3 Mpixel camera. It currently only works with the
em28xx driver.
@@ -838,9 +870,20 @@ config VIDEO_MT9V032
select REGMAP_I2C
select V4L2_FWNODE
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
+ This is a Video4Linux2 sensor driver for the Micron
MT9V032 752x480 CMOS sensor.
+config VIDEO_MT9V111
+ tristate "Aptina MT9V111 sensor support"
+ depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
+ help
+ This is a Video4Linux2 sensor driver for the Aptina/Micron
+ MT9V111 sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mt9v111.
+
config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
@@ -857,12 +900,23 @@ config VIDEO_NOON010PC30
source "drivers/media/i2c/m5mols/Kconfig"
+config VIDEO_RJ54N1
+ tristate "Sharp RJ54N1CB0C sensor support"
+ depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
+ help
+ This is a V4L2 sensor driver for Sharp RJ54N1CB0C CMOS image
+ sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rj54n1.
+
config VIDEO_S5K6AA
tristate "Samsung S5K6AAFX sensor support"
depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
---help---
- This is a V4L2 sensor-level driver for Samsung S5K6AA(FX) 1.3M
+ This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
camera sensor with an embedded SoC image signal processor.
config VIDEO_S5K6A3
@@ -870,7 +924,7 @@ config VIDEO_S5K6A3
depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
---help---
- This is a V4L2 sensor-level driver for Samsung S5K6A3 raw
+ This is a V4L2 sensor driver for Samsung S5K6A3 raw
camera sensor.
config VIDEO_S5K4ECGX
@@ -878,7 +932,7 @@ config VIDEO_S5K4ECGX
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select CRC32
---help---
- This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
+ This is a V4L2 sensor driver for Samsung S5K4ECGX 5M
camera sensor with an embedded SoC image signal processor.
config VIDEO_S5K5BAF
@@ -886,7 +940,7 @@ config VIDEO_S5K5BAF
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
---help---
- This is a V4L2 sensor-level driver for Samsung S5K5BAF 2M
+ This is a V4L2 sensor driver for Samsung S5K5BAF 2M
camera sensor with an embedded SoC image signal processor.
source "drivers/media/i2c/smiapp/Kconfig"
@@ -897,7 +951,7 @@ config VIDEO_S5C73M3
depends on I2C && SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
---help---
- This is a V4L2 sensor-level driver for Samsung S5C73M3
+ This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
comment "Flash devices"
@@ -1002,6 +1056,7 @@ config VIDEO_I2C
tristate "I2C transport video support"
depends on VIDEO_V4L2 && I2C
select VIDEOBUF2_VMALLOC
+ imply HWMON
---help---
Enable the I2C transport video support which supports the
following:
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index d679d57cd3b3..a94eb03d10d4 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -23,7 +23,9 @@ obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
obj-$(CONFIG_VIDEO_AD5820) += ad5820.o
+obj-$(CONFIG_VIDEO_AK7375) += ak7375.o
obj-$(CONFIG_VIDEO_DW9714) += dw9714.o
+obj-$(CONFIG_VIDEO_DW9807_VCM) += dw9807-vcm.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
@@ -63,6 +65,7 @@ obj-$(CONFIG_VIDEO_SONY_BTF_MPX) += sony-btf-mpx.o
obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
+obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
obj-$(CONFIG_VIDEO_OV2685) += ov2685.o
obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
obj-$(CONFIG_VIDEO_OV5645) += ov5645.o
@@ -84,8 +87,10 @@ obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
obj-$(CONFIG_VIDEO_MT9T112) += mt9t112.o
obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
+obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
+obj-$(CONFIG_VIDEO_RJ54N1) += rj54n1cb0c.o
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
obj-$(CONFIG_VIDEO_S5K6A3) += s5k6a3.o
obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 91ff06088572..5b008b0002c0 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -1134,6 +1134,7 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
goto err_hdl;
}
state->pad.flags = MEDIA_PAD_FL_SINK;
+ sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err)
goto err_hdl;
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 25d24a3f10a7..de10367d550b 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -461,6 +461,22 @@ static int adv7180_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
return 0;
}
+static int adv7180_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct adv7180_state *state = to_state(sd);
+
+ if (state->curr_norm & V4L2_STD_525_60) {
+ fi->interval.numerator = 1001;
+ fi->interval.denominator = 30000;
+ } else {
+ fi->interval.numerator = 1;
+ fi->interval.denominator = 25;
+ }
+
+ return 0;
+}
+
static void adv7180_set_power_pin(struct adv7180_state *state, bool on)
{
if (!state->pwdn_gpio)
@@ -644,6 +660,9 @@ static int adv7180_mbus_fmt(struct v4l2_subdev *sd,
fmt->width = 720;
fmt->height = state->curr_norm & V4L2_STD_525_60 ? 480 : 576;
+ if (state->field == V4L2_FIELD_ALTERNATE)
+ fmt->height /= 2;
+
return 0;
}
@@ -711,11 +730,11 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
switch (format->format.field) {
case V4L2_FIELD_NONE:
- if (!(state->chip_info->flags & ADV7180_FLAG_I2P))
- format->format.field = V4L2_FIELD_INTERLACED;
- break;
+ if (state->chip_info->flags & ADV7180_FLAG_I2P)
+ break;
+ /* fall through */
default:
- format->format.field = V4L2_FIELD_INTERLACED;
+ format->format.field = V4L2_FIELD_ALTERNATE;
break;
}
@@ -817,6 +836,7 @@ static int adv7180_subscribe_event(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.s_std = adv7180_s_std,
.g_std = adv7180_g_std,
+ .g_frame_interval = adv7180_g_frame_interval,
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
.s_routing = adv7180_s_routing,
@@ -1291,7 +1311,7 @@ static int adv7180_probe(struct i2c_client *client,
return -ENOMEM;
state->client = client;
- state->field = V4L2_FIELD_INTERLACED;
+ state->field = V4L2_FIELD_ALTERNATE;
state->chip_info = (struct adv7180_chip_info *)id->driver_data;
state->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
@@ -1335,7 +1355,7 @@ static int adv7180_probe(struct i2c_client *client,
goto err_unregister_vpp_client;
state->pad.flags = MEDIA_PAD_FL_SOURCE;
- sd->entity.flags |= MEDIA_ENT_F_ATV_DECODER;
+ sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
ret = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (ret)
goto err_free_ctrl;
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
index 820b44ed56a8..469be87a3761 100644
--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -284,7 +284,7 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
adv748x_csi2_set_virtual_channel(tx, 0);
adv748x_subdev_init(&tx->sd, state, &adv748x_csi2_ops,
- MEDIA_ENT_F_UNKNOWN,
+ MEDIA_ENT_F_VID_IF_BRIDGE,
is_txa(tx) ? "txa" : "txb");
/* Ensure that matching is based upon the endpoint fwnodes */
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 5731751d3f2a..55c2ea0720d9 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1847,6 +1847,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
goto err_hdl;
}
state->pad.flags = MEDIA_PAD_FL_SINK;
+ sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err)
goto err_hdl;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index cac2081e876e..668be2bca57a 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3108,12 +3108,9 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
return -EINVAL;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), &bus_cfg);
- if (ret) {
- of_node_put(endpoint);
- return ret;
- }
-
of_node_put(endpoint);
+ if (ret)
+ return ret;
if (!of_property_read_u32(np, "default-input", &v))
state->pdata.default_input = v;
@@ -3502,6 +3499,7 @@ static int adv76xx_probe(struct i2c_client *client,
for (i = 0; i < state->source_pad; ++i)
state->pads[i].flags = MEDIA_PAD_FL_SINK;
state->pads[state->source_pad].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_DV_DECODER;
err = media_entity_pads_init(&sd->entity, state->source_pad + 1,
state->pads);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index fddac32e5051..4f8fbdd00e35 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3102,7 +3102,7 @@ static int adv7842_ddr_ram_test(struct v4l2_subdev *sd)
sdp_write(sd, 0x12, 0x00); /* Disable 3D comb, Frame TBC & 3DNR */
io_write(sd, 0xFF, 0x04); /* Reset memory controller */
- mdelay(5);
+ usleep_range(5000, 6000);
sdp_write(sd, 0x12, 0x00); /* Disable 3D Comb, Frame TBC & 3DNR */
sdp_io_write(sd, 0x2A, 0x01); /* Memory BIST Initialisation */
@@ -3116,12 +3116,12 @@ static int adv7842_ddr_ram_test(struct v4l2_subdev *sd)
sdp_io_write(sd, 0x7d, 0x00); /* Memory BIST Initialisation */
sdp_io_write(sd, 0x7e, 0x1a); /* Memory BIST Initialisation */
- mdelay(5);
+ usleep_range(5000, 6000);
sdp_io_write(sd, 0xd9, 0xd5); /* Enable BIST Test */
sdp_write(sd, 0x12, 0x05); /* Enable FRAME TBC & 3D COMB */
- mdelay(20);
+ msleep(20);
for (i = 0; i < 10; i++) {
u8 result = sdp_io_read(sd, 0xdb);
@@ -3132,7 +3132,7 @@ static int adv7842_ddr_ram_test(struct v4l2_subdev *sd)
else
pass++;
}
- mdelay(20);
+ msleep(20);
}
v4l2_dbg(1, debug, sd,
@@ -3541,6 +3541,7 @@ static int adv7842_probe(struct i2c_client *client,
INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
adv7842_delayed_work_enable_hotplug);
+ sd->entity.function = MEDIA_ENT_F_DV_DECODER;
state->pad.flags = MEDIA_PAD_FL_SOURCE;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err)
diff --git a/drivers/media/i2c/ak7375.c b/drivers/media/i2c/ak7375.c
new file mode 100644
index 000000000000..7b14b11605ca
--- /dev/null
+++ b/drivers/media/i2c/ak7375.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define AK7375_MAX_FOCUS_POS 4095
+/*
+ * This sets the minimum granularity for the focus positions.
+ * A value of 1 gives maximum accuracy for a desired focus position
+ */
+#define AK7375_FOCUS_STEPS 1
+/*
+ * This acts as the minimum granularity of lens movement.
+ * Keep this value power of 2, so the control steps can be
+ * uniformly adjusted for gradual lens movement, with desired
+ * number of control steps.
+ */
+#define AK7375_CTRL_STEPS 64
+#define AK7375_CTRL_DELAY_US 1000
+
+#define AK7375_REG_POSITION 0x0
+#define AK7375_REG_CONT 0x2
+#define AK7375_MODE_ACTIVE 0x0
+#define AK7375_MODE_STANDBY 0x40
+
+/* ak7375 device structure */
+struct ak7375_device {
+ struct v4l2_ctrl_handler ctrls_vcm;
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl *focus;
+ /* active or standby mode */
+ bool active;
+};
+
+static inline struct ak7375_device *to_ak7375_vcm(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct ak7375_device, ctrls_vcm);
+}
+
+static inline struct ak7375_device *sd_to_ak7375_vcm(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ak7375_device, sd);
+}
+
+static int ak7375_i2c_write(struct ak7375_device *ak7375,
+ u8 addr, u16 data, u8 size)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ak7375->sd);
+ u8 buf[3];
+ int ret;
+
+ if (size != 1 && size != 2)
+ return -EINVAL;
+ buf[0] = addr;
+ buf[size] = data & 0xff;
+ if (size == 2)
+ buf[1] = (data >> 8) & 0xff;
+ ret = i2c_master_send(client, (const char *)buf, size + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != size + 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int ak7375_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ak7375_device *dev_vcm = to_ak7375_vcm(ctrl);
+
+ if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE)
+ return ak7375_i2c_write(dev_vcm, AK7375_REG_POSITION,
+ ctrl->val << 4, 2);
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ak7375_vcm_ctrl_ops = {
+ .s_ctrl = ak7375_set_ctrl,
+};
+
+static int ak7375_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(sd->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sd->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ak7375_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ pm_runtime_put(sd->dev);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops ak7375_int_ops = {
+ .open = ak7375_open,
+ .close = ak7375_close,
+};
+
+static const struct v4l2_subdev_ops ak7375_ops = { };
+
+static void ak7375_subdev_cleanup(struct ak7375_device *ak7375_dev)
+{
+ v4l2_async_unregister_subdev(&ak7375_dev->sd);
+ v4l2_ctrl_handler_free(&ak7375_dev->ctrls_vcm);
+ media_entity_cleanup(&ak7375_dev->sd.entity);
+}
+
+static int ak7375_init_controls(struct ak7375_device *dev_vcm)
+{
+ struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm;
+ const struct v4l2_ctrl_ops *ops = &ak7375_vcm_ctrl_ops;
+
+ v4l2_ctrl_handler_init(hdl, 1);
+
+ dev_vcm->focus = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
+ 0, AK7375_MAX_FOCUS_POS, AK7375_FOCUS_STEPS, 0);
+
+ if (hdl->error)
+ dev_err(dev_vcm->sd.dev, "%s fail error: 0x%x\n",
+ __func__, hdl->error);
+ dev_vcm->sd.ctrl_handler = hdl;
+
+ return hdl->error;
+}
+
+static int ak7375_probe(struct i2c_client *client)
+{
+ struct ak7375_device *ak7375_dev;
+ int ret;
+
+ ak7375_dev = devm_kzalloc(&client->dev, sizeof(*ak7375_dev),
+ GFP_KERNEL);
+ if (!ak7375_dev)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ak7375_dev->sd, client, &ak7375_ops);
+ ak7375_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ak7375_dev->sd.internal_ops = &ak7375_int_ops;
+ ak7375_dev->sd.entity.function = MEDIA_ENT_F_LENS;
+
+ ret = ak7375_init_controls(ak7375_dev);
+ if (ret)
+ goto err_cleanup;
+
+ ret = media_entity_pads_init(&ak7375_dev->sd.entity, 0, NULL);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = v4l2_async_register_subdev(&ak7375_dev->sd);
+ if (ret < 0)
+ goto err_cleanup;
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+err_cleanup:
+ v4l2_ctrl_handler_free(&ak7375_dev->ctrls_vcm);
+ media_entity_cleanup(&ak7375_dev->sd.entity);
+
+ return ret;
+}
+
+static int ak7375_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+
+ ak7375_subdev_cleanup(ak7375_dev);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position, so it consumes least current
+ * The lens position is gradually moved in units of AK7375_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused ak7375_vcm_suspend(struct device *dev)
+{
+
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+ int ret, val;
+
+ if (!ak7375_dev->active)
+ return 0;
+
+ for (val = ak7375_dev->focus->val & ~(AK7375_CTRL_STEPS - 1);
+ val >= 0; val -= AK7375_CTRL_STEPS) {
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION,
+ val << 4, 2);
+ if (ret)
+ dev_err_once(dev, "%s I2C failure: %d\n",
+ __func__, ret);
+ usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10);
+ }
+
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT,
+ AK7375_MODE_STANDBY, 1);
+ if (ret)
+ dev_err(dev, "%s I2C failure: %d\n", __func__, ret);
+
+ ak7375_dev->active = false;
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position to the value set by the user
+ * through v4l2_ctrl_ops s_ctrl handler
+ * The lens position is gradually moved in units of AK7375_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused ak7375_vcm_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+ int ret, val;
+
+ if (ak7375_dev->active)
+ return 0;
+
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT,
+ AK7375_MODE_ACTIVE, 1);
+ if (ret) {
+ dev_err(dev, "%s I2C failure: %d\n", __func__, ret);
+ return ret;
+ }
+
+ for (val = ak7375_dev->focus->val % AK7375_CTRL_STEPS;
+ val <= ak7375_dev->focus->val;
+ val += AK7375_CTRL_STEPS) {
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION,
+ val << 4, 2);
+ if (ret)
+ dev_err_ratelimited(dev, "%s I2C failure: %d\n",
+ __func__, ret);
+ usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10);
+ }
+
+ ak7375_dev->active = true;
+
+ return 0;
+}
+
+static const struct of_device_id ak7375_of_table[] = {
+ { .compatible = "asahi-kasei,ak7375" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ak7375_of_table);
+
+static const struct dev_pm_ops ak7375_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ak7375_vcm_suspend, ak7375_vcm_resume)
+ SET_RUNTIME_PM_OPS(ak7375_vcm_suspend, ak7375_vcm_resume, NULL)
+};
+
+static struct i2c_driver ak7375_i2c_driver = {
+ .driver = {
+ .name = "ak7375",
+ .pm = &ak7375_pm_ops,
+ .of_match_table = ak7375_of_table,
+ },
+ .probe_new = ak7375_probe,
+ .remove = ak7375_remove,
+};
+module_i2c_driver(ak7375_i2c_driver);
+
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_DESCRIPTION("AK7375 VCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h
index fb13a624d2e3..c323b1af1f83 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.h
+++ b/drivers/media/i2c/cx25840/cx25840-core.h
@@ -45,6 +45,35 @@ enum cx25840_media_pads {
CX25840_NUM_PADS
};
+/**
+ * struct cx25840_state - a device instance private data
+ * @c: i2c_client struct representing this device
+ * @sd: our V4L2 sub-device
+ * @hdl: our V4L2 control handler
+ * @volume: audio volume V4L2 control (non-cx2583x devices only)
+ * @mute: audio mute V4L2 control (non-cx2583x devices only)
+ * @pvr150_workaround: whether we enable workaround for Hauppauge PVR150
+ * hardware bug (audio dropping out)
+ * @radio: set if we are currently in the radio mode, otherwise
+ * the current mode is non-radio (that is, video)
+ * @std: currently set video standard
+ * @vid_input: currently set video input
+ * @aud_input: currently set audio input
+ * @audclk_freq: currently set audio sample rate
+ * @audmode: currently set audio mode (when in non-radio mode)
+ * @vbi_line_offset: vbi line number offset
+ * @id: exact device model
+ * @rev: raw device id read from the chip
+ * @is_initialized: whether we have already loaded firmware into the chip
+ * and initialized it
+ * @vbi_regs_offset: offset of vbi regs
+ * @fw_wait: wait queue to wake an initalization function up when
+ * firmware loading (on a separate workqueue) finishes
+ * @fw_work: a work that actually loads the firmware on a separate
+ * workqueue
+ * @ir_state: a pointer to chip IR controller private data
+ * @pads: array of supported chip pads (currently only a stub)
+ */
struct cx25840_state {
struct i2c_client *c;
struct v4l2_subdev sd;
@@ -66,8 +95,8 @@ struct cx25840_state {
u32 rev;
int is_initialized;
unsigned vbi_regs_offset;
- wait_queue_head_t fw_wait; /* wake up when the fw load is finished */
- struct work_struct fw_work; /* work entry for fw load */
+ wait_queue_head_t fw_wait;
+ struct work_struct fw_work;
struct cx25840_ir_state *ir_state;
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_pad pads[CX25840_NUM_PADS];
diff --git a/drivers/media/i2c/dw9807-vcm.c b/drivers/media/i2c/dw9807-vcm.c
new file mode 100644
index 000000000000..8ba3920b6e2f
--- /dev/null
+++ b/drivers/media/i2c/dw9807-vcm.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define DW9807_MAX_FOCUS_POS 1023
+/*
+ * This sets the minimum granularity for the focus positions.
+ * A value of 1 gives maximum accuracy for a desired focus position.
+ */
+#define DW9807_FOCUS_STEPS 1
+/*
+ * This acts as the minimum granularity of lens movement.
+ * Keep this value power of 2, so the control steps can be
+ * uniformly adjusted for gradual lens movement, with desired
+ * number of control steps.
+ */
+#define DW9807_CTRL_STEPS 16
+#define DW9807_CTRL_DELAY_US 1000
+
+#define DW9807_CTL_ADDR 0x02
+/*
+ * DW9807 separates two registers to control the VCM position.
+ * One for MSB value, another is LSB value.
+ */
+#define DW9807_MSB_ADDR 0x03
+#define DW9807_LSB_ADDR 0x04
+#define DW9807_STATUS_ADDR 0x05
+#define DW9807_MODE_ADDR 0x06
+#define DW9807_RESONANCE_ADDR 0x07
+
+#define MAX_RETRY 10
+
+struct dw9807_device {
+ struct v4l2_ctrl_handler ctrls_vcm;
+ struct v4l2_subdev sd;
+ u16 current_val;
+};
+
+static inline struct dw9807_device *sd_to_dw9807_vcm(
+ struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct dw9807_device, sd);
+}
+
+static int dw9807_i2c_check(struct i2c_client *client)
+{
+ const char status_addr = DW9807_STATUS_ADDR;
+ char status_result;
+ int ret;
+
+ ret = i2c_master_send(client, &status_addr, sizeof(status_addr));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write STATUS address fail ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(client, &status_result, sizeof(status_result));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read STATUS value fail ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return status_result;
+}
+
+static int dw9807_set_dac(struct i2c_client *client, u16 data)
+{
+ const char tx_data[3] = {
+ DW9807_MSB_ADDR, ((data >> 8) & 0x03), (data & 0xff)
+ };
+ int val, ret;
+
+ /*
+ * According to the datasheet, need to check the bus status before we
+ * write VCM position. This ensure that we really write the value
+ * into the register
+ */
+ ret = readx_poll_timeout(dw9807_i2c_check, client, val, val <= 0,
+ DW9807_CTRL_DELAY_US, MAX_RETRY * DW9807_CTRL_DELAY_US);
+
+ if (ret || val < 0) {
+ if (ret) {
+ dev_warn(&client->dev,
+ "Cannot do the write operation because VCM is busy\n");
+ }
+
+ return ret ? -EBUSY : val;
+ }
+
+ /* Write VCM position to registers */
+ ret = i2c_master_send(client, tx_data, sizeof(tx_data));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "I2C write MSB fail ret=%d\n", ret);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw9807_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct dw9807_device *dev_vcm = container_of(ctrl->handler,
+ struct dw9807_device, ctrls_vcm);
+
+ if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) {
+ struct i2c_client *client = v4l2_get_subdevdata(&dev_vcm->sd);
+
+ dev_vcm->current_val = ctrl->val;
+ return dw9807_set_dac(client, ctrl->val);
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops dw9807_vcm_ctrl_ops = {
+ .s_ctrl = dw9807_set_ctrl,
+};
+
+static int dw9807_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rval;
+
+ rval = pm_runtime_get_sync(sd->dev);
+ if (rval < 0) {
+ pm_runtime_put_noidle(sd->dev);
+ return rval;
+ }
+
+ return 0;
+}
+
+static int dw9807_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ pm_runtime_put(sd->dev);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops dw9807_int_ops = {
+ .open = dw9807_open,
+ .close = dw9807_close,
+};
+
+static const struct v4l2_subdev_ops dw9807_ops = { };
+
+static void dw9807_subdev_cleanup(struct dw9807_device *dw9807_dev)
+{
+ v4l2_async_unregister_subdev(&dw9807_dev->sd);
+ v4l2_ctrl_handler_free(&dw9807_dev->ctrls_vcm);
+ media_entity_cleanup(&dw9807_dev->sd.entity);
+}
+
+static int dw9807_init_controls(struct dw9807_device *dev_vcm)
+{
+ struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm;
+ const struct v4l2_ctrl_ops *ops = &dw9807_vcm_ctrl_ops;
+ struct i2c_client *client = v4l2_get_subdevdata(&dev_vcm->sd);
+
+ v4l2_ctrl_handler_init(hdl, 1);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
+ 0, DW9807_MAX_FOCUS_POS, DW9807_FOCUS_STEPS, 0);
+
+ dev_vcm->sd.ctrl_handler = hdl;
+ if (hdl->error) {
+ dev_err(&client->dev, "%s fail error: 0x%x\n",
+ __func__, hdl->error);
+ return hdl->error;
+ }
+
+ return 0;
+}
+
+static int dw9807_probe(struct i2c_client *client)
+{
+ struct dw9807_device *dw9807_dev;
+ int rval;
+
+ dw9807_dev = devm_kzalloc(&client->dev, sizeof(*dw9807_dev),
+ GFP_KERNEL);
+ if (dw9807_dev == NULL)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&dw9807_dev->sd, client, &dw9807_ops);
+ dw9807_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dw9807_dev->sd.internal_ops = &dw9807_int_ops;
+
+ rval = dw9807_init_controls(dw9807_dev);
+ if (rval)
+ goto err_cleanup;
+
+ rval = media_entity_pads_init(&dw9807_dev->sd.entity, 0, NULL);
+ if (rval < 0)
+ goto err_cleanup;
+
+ dw9807_dev->sd.entity.function = MEDIA_ENT_F_LENS;
+
+ rval = v4l2_async_register_subdev(&dw9807_dev->sd);
+ if (rval < 0)
+ goto err_cleanup;
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+err_cleanup:
+ dw9807_subdev_cleanup(dw9807_dev);
+
+ return rval;
+}
+
+static int dw9807_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ dw9807_subdev_cleanup(dw9807_dev);
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position, so it consumes least current
+ * The lens position is gradually moved in units of DW9807_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused dw9807_vcm_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd);
+ const char tx_data[2] = { DW9807_CTL_ADDR, 0x01 };
+ int ret, val;
+
+ for (val = dw9807_dev->current_val & ~(DW9807_CTRL_STEPS - 1);
+ val >= 0; val -= DW9807_CTRL_STEPS) {
+ ret = dw9807_set_dac(client, val);
+ if (ret)
+ dev_err_once(dev, "%s I2C failure: %d", __func__, ret);
+ usleep_range(DW9807_CTRL_DELAY_US, DW9807_CTRL_DELAY_US + 10);
+ }
+
+ /* Power down */
+ ret = i2c_master_send(client, tx_data, sizeof(tx_data));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write CTL fail ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position to the value set by the user
+ * through v4l2_ctrl_ops s_ctrl handler
+ * The lens position is gradually moved in units of DW9807_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused dw9807_vcm_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd);
+ const char tx_data[2] = { DW9807_CTL_ADDR, 0x00 };
+ int ret, val;
+
+ /* Power on */
+ ret = i2c_master_send(client, tx_data, sizeof(tx_data));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write CTL fail ret = %d\n", ret);
+ return ret;
+ }
+
+ for (val = dw9807_dev->current_val % DW9807_CTRL_STEPS;
+ val < dw9807_dev->current_val + DW9807_CTRL_STEPS - 1;
+ val += DW9807_CTRL_STEPS) {
+ ret = dw9807_set_dac(client, val);
+ if (ret)
+ dev_err_ratelimited(dev, "%s I2C failure: %d",
+ __func__, ret);
+ usleep_range(DW9807_CTRL_DELAY_US, DW9807_CTRL_DELAY_US + 10);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id dw9807_of_table[] = {
+ { .compatible = "dongwoon,dw9807-vcm" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dw9807_of_table);
+
+static const struct dev_pm_ops dw9807_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw9807_vcm_suspend, dw9807_vcm_resume)
+ SET_RUNTIME_PM_OPS(dw9807_vcm_suspend, dw9807_vcm_resume, NULL)
+};
+
+static struct i2c_driver dw9807_i2c_driver = {
+ .driver = {
+ .name = "dw9807",
+ .pm = &dw9807_pm_ops,
+ .of_match_table = dw9807_of_table,
+ },
+ .probe_new = dw9807_probe,
+ .remove = dw9807_remove,
+};
+
+module_i2c_driver(dw9807_i2c_driver);
+
+MODULE_AUTHOR("Chiang, Alan <alanx.chiang@intel.com>");
+MODULE_DESCRIPTION("DW9807 VCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index e9eff9039ef5..37ef38947e01 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1446,6 +1446,7 @@ static int et8ek8_probe(struct i2c_client *client,
sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sensor->subdev.internal_ops = &et8ek8_internal_ops;
+ sensor->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&sensor->subdev.entity, 1, &sensor->pad);
if (ret < 0) {
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index f3b124723aa0..31a1e2294843 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1221,6 +1221,14 @@ static int imx258_probe(struct i2c_client *client)
if (val != 19200000)
return -EINVAL;
+ /*
+ * Check that the device is mounted upside down. The driver only
+ * supports a single pixel order right now.
+ */
+ ret = device_property_read_u32(&client->dev, "rotation", &val);
+ if (ret || val != 180)
+ return -EINVAL;
+
imx258 = devm_kzalloc(&client->dev, sizeof(*imx258), GFP_KERNEL);
if (!imx258)
return -ENOMEM;
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 63fb94e7da37..f8c70f1a34fe 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -5,6 +5,7 @@
*
* Leon Luo <leonl@leopardimaging.com>
* Edwin Zou <edwinz@leopardimaging.com>
+ * Luca Ceresoli <luca@lucaceresoli.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -25,6 +26,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
@@ -74,7 +76,7 @@
*/
#define IMX274_MIN_EXPOSURE_TIME (4 * 260 / 72)
-#define IMX274_DEFAULT_MODE IMX274_MODE_3840X2160
+#define IMX274_DEFAULT_MODE IMX274_BINNING_OFF
#define IMX274_MAX_WIDTH (3840)
#define IMX274_MAX_HEIGHT (2160)
#define IMX274_MAX_FRAME_RATE (120)
@@ -111,6 +113,20 @@
#define IMX274_SHR_REG_LSB 0x300C /* SHR */
#define IMX274_SVR_REG_MSB 0x300F /* SVR */
#define IMX274_SVR_REG_LSB 0x300E /* SVR */
+#define IMX274_HTRIM_EN_REG 0x3037
+#define IMX274_HTRIM_START_REG_LSB 0x3038
+#define IMX274_HTRIM_START_REG_MSB 0x3039
+#define IMX274_HTRIM_END_REG_LSB 0x303A
+#define IMX274_HTRIM_END_REG_MSB 0x303B
+#define IMX274_VWIDCUTEN_REG 0x30DD
+#define IMX274_VWIDCUT_REG_LSB 0x30DE
+#define IMX274_VWIDCUT_REG_MSB 0x30DF
+#define IMX274_VWINPOS_REG_LSB 0x30E0
+#define IMX274_VWINPOS_REG_MSB 0x30E1
+#define IMX274_WRITE_VSIZE_REG_LSB 0x3130
+#define IMX274_WRITE_VSIZE_REG_MSB 0x3131
+#define IMX274_Y_OUT_SIZE_REG_LSB 0x3132
+#define IMX274_Y_OUT_SIZE_REG_MSB 0x3133
#define IMX274_VMAX_REG_1 0x30FA /* VMAX, MSB */
#define IMX274_VMAX_REG_2 0x30F9 /* VMAX */
#define IMX274_VMAX_REG_3 0x30F8 /* VMAX, LSB */
@@ -140,17 +156,35 @@ static const struct regmap_config imx274_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-enum imx274_mode {
- IMX274_MODE_3840X2160,
- IMX274_MODE_1920X1080,
- IMX274_MODE_1280X720,
+enum imx274_binning {
+ IMX274_BINNING_OFF,
+ IMX274_BINNING_2_1,
+ IMX274_BINNING_3_1,
};
/*
- * imx274 format related structure
+ * Parameters for each imx274 readout mode.
+ *
+ * These are the values to configure the sensor in one of the
+ * implemented modes.
+ *
+ * @init_regs: registers to initialize the mode
+ * @bin_ratio: downscale factor (e.g. 3 for 3:1 binning)
+ * @min_frame_len: Minimum frame length for each mode (see "Frame Rate
+ * Adjustment (CSI-2)" in the datasheet)
+ * @min_SHR: Minimum SHR register value (see "Shutter Setting (CSI-2)" in the
+ * datasheet)
+ * @max_fps: Maximum frames per second
+ * @nocpiop: Number of clocks per internal offset period (see "Integration Time
+ * in Each Readout Drive Mode (CSI-2)" in the datasheet)
*/
struct imx274_frmfmt {
- struct v4l2_frmsize_discrete size;
+ const struct reg_8 *init_regs;
+ unsigned int bin_ratio;
+ int min_frame_len;
+ int min_SHR;
+ int max_fps;
+ int nocpiop;
};
/*
@@ -197,31 +231,14 @@ static const struct reg_8 imx274_mode1_3840x2160_raw10[] = {
{0x3004, 0x01},
{0x3005, 0x01},
{0x3006, 0x00},
- {0x3007, 0x02},
+ {0x3007, 0xa2},
{0x3018, 0xA2}, /* output XVS, HVS */
{0x306B, 0x05},
{0x30E2, 0x01},
- {0x30F6, 0x07}, /* HMAX, 263 */
- {0x30F7, 0x01}, /* HMAX */
-
- {0x30dd, 0x01}, /* crop to 2160 */
- {0x30de, 0x06},
- {0x30df, 0x00},
- {0x30e0, 0x12},
- {0x30e1, 0x00},
- {0x3037, 0x01}, /* to crop to 3840 */
- {0x3038, 0x0c},
- {0x3039, 0x00},
- {0x303a, 0x0c},
- {0x303b, 0x0f},
{0x30EE, 0x01},
- {0x3130, 0x86},
- {0x3131, 0x08},
- {0x3132, 0x7E},
- {0x3133, 0x08},
{0x3342, 0x0A},
{0x3343, 0x00},
{0x3344, 0x16},
@@ -255,32 +272,14 @@ static const struct reg_8 imx274_mode3_1920x1080_raw10[] = {
{0x3004, 0x02},
{0x3005, 0x21},
{0x3006, 0x00},
- {0x3007, 0x11},
+ {0x3007, 0xb1},
{0x3018, 0xA2}, /* output XVS, HVS */
{0x306B, 0x05},
{0x30E2, 0x02},
- {0x30F6, 0x04}, /* HMAX, 260 */
- {0x30F7, 0x01}, /* HMAX */
-
- {0x30dd, 0x01}, /* to crop to 1920x1080 */
- {0x30de, 0x05},
- {0x30df, 0x00},
- {0x30e0, 0x04},
- {0x30e1, 0x00},
- {0x3037, 0x01},
- {0x3038, 0x0c},
- {0x3039, 0x00},
- {0x303a, 0x0c},
- {0x303b, 0x0f},
-
{0x30EE, 0x01},
- {0x3130, 0x4E},
- {0x3131, 0x04},
- {0x3132, 0x46},
- {0x3133, 0x04},
{0x3342, 0x0A},
{0x3343, 0x00},
{0x3344, 0x1A},
@@ -313,31 +312,14 @@ static const struct reg_8 imx274_mode5_1280x720_raw10[] = {
{0x3004, 0x03},
{0x3005, 0x31},
{0x3006, 0x00},
- {0x3007, 0x09},
+ {0x3007, 0xa9},
{0x3018, 0xA2}, /* output XVS, HVS */
{0x306B, 0x05},
{0x30E2, 0x03},
- {0x30F6, 0x04}, /* HMAX, 260 */
- {0x30F7, 0x01}, /* HMAX */
-
- {0x30DD, 0x01},
- {0x30DE, 0x07},
- {0x30DF, 0x00},
- {0x40E0, 0x04},
- {0x30E1, 0x00},
- {0x3030, 0xD4},
- {0x3031, 0x02},
- {0x3032, 0xD0},
- {0x3033, 0x02},
-
{0x30EE, 0x01},
- {0x3130, 0xE2},
- {0x3131, 0x02},
- {0x3132, 0xDE},
- {0x3133, 0x02},
{0x3342, 0x0A},
{0x3343, 0x00},
{0x3344, 0x1B},
@@ -476,58 +458,35 @@ static const struct reg_8 imx274_tp_regs[] = {
{IMX274_TABLE_END, 0x00}
};
-static const struct reg_8 *mode_table[] = {
- [IMX274_MODE_3840X2160] = imx274_mode1_3840x2160_raw10,
- [IMX274_MODE_1920X1080] = imx274_mode3_1920x1080_raw10,
- [IMX274_MODE_1280X720] = imx274_mode5_1280x720_raw10,
-};
-
-/*
- * imx274 format related structure
- */
+/* nocpiop happens to be the same number for the implemented modes */
static const struct imx274_frmfmt imx274_formats[] = {
- { {3840, 2160} },
- { {1920, 1080} },
- { {1280, 720} },
-};
-
-/*
- * minimal frame length for each mode
- * refer to datasheet section "Frame Rate Adjustment (CSI-2)"
- */
-static const int min_frame_len[] = {
- 4550, /* mode 1, 4K */
- 2310, /* mode 3, 1080p */
- 2310 /* mode 5, 720p */
-};
-
-/*
- * minimal numbers of SHR register
- * refer to datasheet table "Shutter Setting (CSI-2)"
- */
-static const int min_SHR[] = {
- 12, /* mode 1, 4K */
- 8, /* mode 3, 1080p */
- 8 /* mode 5, 720p */
-};
-
-static const int max_frame_rate[] = {
- 60, /* mode 1 , 4K */
- 120, /* mode 3, 1080p */
- 120 /* mode 5, 720p */
-};
-
-/*
- * Number of clocks per internal offset period
- * a constant based on mode
- * refer to section "Integration Time in Each Readout Drive Mode (CSI-2)"
- * in the datasheet
- * for the implemented 3 modes, it happens to be the same number
- */
-static const int nocpiop[] = {
- 112, /* mode 1 , 4K */
- 112, /* mode 3, 1080p */
- 112 /* mode 5, 720p */
+ {
+ /* mode 1, 4K */
+ .bin_ratio = 1,
+ .init_regs = imx274_mode1_3840x2160_raw10,
+ .min_frame_len = 4550,
+ .min_SHR = 12,
+ .max_fps = 60,
+ .nocpiop = 112,
+ },
+ {
+ /* mode 3, 1080p */
+ .bin_ratio = 2,
+ .init_regs = imx274_mode3_1920x1080_raw10,
+ .min_frame_len = 2310,
+ .min_SHR = 8,
+ .max_fps = 120,
+ .nocpiop = 112,
+ },
+ {
+ /* mode 5, 720p */
+ .bin_ratio = 3,
+ .init_regs = imx274_mode5_1280x720_raw10,
+ .min_frame_len = 2310,
+ .min_SHR = 8,
+ .max_fps = 120,
+ .nocpiop = 112,
+ },
};
/*
@@ -549,29 +508,40 @@ struct imx274_ctrls {
/*
* struct stim274 - imx274 device structure
* @sd: V4L2 subdevice structure
- * @pd: Media pad structure
+ * @pad: Media pad structure
* @client: Pointer to I2C client
* @ctrls: imx274 control structure
+ * @crop: rect to be captured
+ * @compose: compose rect, i.e. output resolution
* @format: V4L2 media bus frame format structure
+ * (width and height are in sync with the compose rect)
* @frame_rate: V4L2 frame rate structure
* @regmap: Pointer to regmap structure
* @reset_gpio: Pointer to reset gpio
* @lock: Mutex structure
- * @mode_index: Resolution mode index
+ * @mode: Parameters for the selected readout mode
*/
struct stimx274 {
struct v4l2_subdev sd;
struct media_pad pad;
struct i2c_client *client;
struct imx274_ctrls ctrls;
+ struct v4l2_rect crop;
struct v4l2_mbus_framefmt format;
struct v4l2_fract frame_interval;
struct regmap *regmap;
struct gpio_desc *reset_gpio;
struct mutex lock; /* mutex lock for operations */
- u32 mode_index;
+ const struct imx274_frmfmt *mode;
};
+#define IMX274_ROUND(dim, step, flags) \
+ ((flags) & V4L2_SEL_FLAG_GE \
+ ? roundup((dim), (step)) \
+ : ((flags) & V4L2_SEL_FLAG_LE \
+ ? rounddown((dim), (step)) \
+ : rounddown((dim) + (step) / 2, (step))))
+
/*
* Function declaration
*/
@@ -602,20 +572,18 @@ static inline struct stimx274 *to_imx274(struct v4l2_subdev *sd)
}
/*
- * imx274_regmap_util_write_table_8 - Function for writing register table
- * @regmap: Pointer to device reg map structure
- * @table: Table containing register values
- * @wait_ms_addr: Flag for performing delay
- * @end_addr: Flag for incating end of table
+ * Writing a register table
+ *
+ * @priv: Pointer to device
+ * @table: Table containing register values (with optional delays)
*
* This is used to write register table into sensor's reg map.
*
* Return: 0 on success, errors otherwise
*/
-static int imx274_regmap_util_write_table_8(struct regmap *regmap,
- const struct reg_8 table[],
- u16 wait_ms_addr, u16 end_addr)
+static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
{
+ struct regmap *regmap = priv->regmap;
int err = 0;
const struct reg_8 *next;
u8 val;
@@ -627,8 +595,8 @@ static int imx274_regmap_util_write_table_8(struct regmap *regmap,
for (next = table;; next++) {
if ((next->addr != range_start + range_count) ||
- (next->addr == end_addr) ||
- (next->addr == wait_ms_addr) ||
+ (next->addr == IMX274_TABLE_END) ||
+ (next->addr == IMX274_TABLE_WAIT_MS) ||
(range_count == max_range_vals)) {
if (range_count == 1)
err = regmap_write(regmap,
@@ -647,10 +615,10 @@ static int imx274_regmap_util_write_table_8(struct regmap *regmap,
range_count = 0;
/* Handle special address values */
- if (next->addr == end_addr)
+ if (next->addr == IMX274_TABLE_END)
break;
- if (next->addr == wait_ms_addr) {
+ if (next->addr == IMX274_TABLE_WAIT_MS) {
msleep_range(next->val);
continue;
}
@@ -697,25 +665,42 @@ static inline int imx274_write_reg(struct stimx274 *priv, u16 addr, u8 val)
return err;
}
-static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
+/**
+ * Write a multibyte register.
+ *
+ * Uses a bulk write where possible.
+ *
+ * @priv: Pointer to device structure
+ * @addr: Address of the LSB register. Other registers must be
+ * consecutive, least-to-most significant.
+ * @val: Value to be written to the register (cpu endianness)
+ * @nbytes: Number of bits to write (range: [1..3])
+ */
+static int imx274_write_mbreg(struct stimx274 *priv, u16 addr, u32 val,
+ size_t nbytes)
{
- return imx274_regmap_util_write_table_8(priv->regmap,
- table, IMX274_TABLE_WAIT_MS, IMX274_TABLE_END);
+ __le32 val_le = cpu_to_le32(val);
+ int err;
+
+ err = regmap_bulk_write(priv->regmap, addr, &val_le, nbytes);
+ if (err)
+ dev_err(&priv->client->dev,
+ "%s : i2c bulk write failed, %x = %x (%zu bytes)\n",
+ __func__, addr, val, nbytes);
+ else
+ dev_dbg(&priv->client->dev,
+ "%s : addr 0x%x, val=0x%x (%zu bytes)\n",
+ __func__, addr, val, nbytes);
+ return err;
}
/*
- * imx274_mode_regs - Function for set mode registers per mode index
+ * Set mode registers to start stream.
* @priv: Pointer to device structure
- * @mode: Mode index value
- *
- * This is used to start steam per mode index.
- * mode = 0, start stream for sensor Mode 1: 4K/raw10
- * mode = 1, start stream for sensor Mode 3: 1080p/raw10
- * mode = 2, start stream for sensor Mode 5: 720p/raw10
*
* Return: 0 on success, errors otherwise
*/
-static int imx274_mode_regs(struct stimx274 *priv, int mode)
+static int imx274_mode_regs(struct stimx274 *priv)
{
int err = 0;
@@ -727,7 +712,7 @@ static int imx274_mode_regs(struct stimx274 *priv, int mode)
if (err)
return err;
- err = imx274_write_table(priv, mode_table[mode]);
+ err = imx274_write_table(priv, priv->mode->init_regs);
return err;
}
@@ -830,6 +815,114 @@ static int imx274_s_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
+static int imx274_binning_goodness(struct stimx274 *imx274,
+ int w, int ask_w,
+ int h, int ask_h, u32 flags)
+{
+ struct device *dev = &imx274->client->dev;
+ const int goodness = 100000;
+ int val = 0;
+
+ if (flags & V4L2_SEL_FLAG_GE) {
+ if (w < ask_w)
+ val -= goodness;
+ if (h < ask_h)
+ val -= goodness;
+ }
+
+ if (flags & V4L2_SEL_FLAG_LE) {
+ if (w > ask_w)
+ val -= goodness;
+ if (h > ask_h)
+ val -= goodness;
+ }
+
+ val -= abs(w - ask_w);
+ val -= abs(h - ask_h);
+
+ dev_dbg(dev, "%s: ask %dx%d, size %dx%d, goodness %d\n",
+ __func__, ask_w, ask_h, w, h, val);
+
+ return val;
+}
+
+/**
+ * Helper function to change binning and set both compose and format.
+ *
+ * We have two entry points to change binning: set_fmt and
+ * set_selection(COMPOSE). Both have to compute the new output size
+ * and set it in both the compose rect and the frame format size. We
+ * also need to do the same things after setting cropping to restore
+ * 1:1 binning.
+ *
+ * This function contains the common code for these three cases, it
+ * has many arguments in order to accommodate the needs of all of
+ * them.
+ *
+ * Must be called with imx274->lock locked.
+ *
+ * @imx274: The device object
+ * @cfg: The pad config we are editing for TRY requests
+ * @which: V4L2_SUBDEV_FORMAT_ACTIVE or V4L2_SUBDEV_FORMAT_TRY from the caller
+ * @width: Input-output parameter: set to the desired width before
+ * the call, contains the chosen value after returning successfully
+ * @height: Input-output parameter for height (see @width)
+ * @flags: Selection flags from struct v4l2_subdev_selection, or 0 if not
+ * available (when called from set_fmt)
+ */
+static int __imx274_change_compose(struct stimx274 *imx274,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which,
+ u32 *width,
+ u32 *height,
+ u32 flags)
+{
+ struct device *dev = &imx274->client->dev;
+ const struct v4l2_rect *cur_crop;
+ struct v4l2_mbus_framefmt *tgt_fmt;
+ unsigned int i;
+ const struct imx274_frmfmt *best_mode = &imx274_formats[0];
+ int best_goodness = INT_MIN;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY) {
+ cur_crop = &cfg->try_crop;
+ tgt_fmt = &cfg->try_fmt;
+ } else {
+ cur_crop = &imx274->crop;
+ tgt_fmt = &imx274->format;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(imx274_formats); i++) {
+ unsigned int ratio = imx274_formats[i].bin_ratio;
+
+ int goodness = imx274_binning_goodness(
+ imx274,
+ cur_crop->width / ratio, *width,
+ cur_crop->height / ratio, *height,
+ flags);
+
+ if (goodness >= best_goodness) {
+ best_goodness = goodness;
+ best_mode = &imx274_formats[i];
+ }
+ }
+
+ *width = cur_crop->width / best_mode->bin_ratio;
+ *height = cur_crop->height / best_mode->bin_ratio;
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ imx274->mode = best_mode;
+
+ dev_dbg(dev, "%s: selected %u:1 binning\n",
+ __func__, best_mode->bin_ratio);
+
+ tgt_fmt->width = *width;
+ tgt_fmt->height = *height;
+ tgt_fmt->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
/**
* imx274_get_fmt - Get the pad format
* @sd: Pointer to V4L2 Sub device structure
@@ -868,45 +961,238 @@ static int imx274_set_fmt(struct v4l2_subdev *sd,
{
struct v4l2_mbus_framefmt *fmt = &format->format;
struct stimx274 *imx274 = to_imx274(sd);
- struct i2c_client *client = imx274->client;
- int index;
-
- dev_dbg(&client->dev,
- "%s: width = %d height = %d code = %d\n",
- __func__, fmt->width, fmt->height, fmt->code);
+ int err = 0;
mutex_lock(&imx274->lock);
- for (index = 0; index < ARRAY_SIZE(imx274_formats); index++) {
- if (imx274_formats[index].size.width == fmt->width &&
- imx274_formats[index].size.height == fmt->height)
- break;
- }
+ err = __imx274_change_compose(imx274, cfg, format->which,
+ &fmt->width, &fmt->height, 0);
- if (index >= ARRAY_SIZE(imx274_formats)) {
- /* default to first format */
- index = 0;
- }
-
- imx274->mode_index = index;
+ if (err)
+ goto out;
- if (fmt->width > IMX274_MAX_WIDTH)
- fmt->width = IMX274_MAX_WIDTH;
- if (fmt->height > IMX274_MAX_HEIGHT)
- fmt->height = IMX274_MAX_HEIGHT;
- fmt->width = fmt->width & (~IMX274_MASK_LSB_2_BITS);
- fmt->height = fmt->height & (~IMX274_MASK_LSB_2_BITS);
+ /*
+ * __imx274_change_compose already set width and height in the
+ * applicable format, but we need to keep all other format
+ * values, so do a full copy here
+ */
fmt->field = V4L2_FIELD_NONE;
-
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
cfg->try_fmt = *fmt;
else
imx274->format = *fmt;
+out:
+ mutex_unlock(&imx274->lock);
+
+ return err;
+}
+
+static int imx274_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+ const struct v4l2_rect *src_crop;
+ const struct v4l2_mbus_framefmt *src_fmt;
+ int ret = 0;
+
+ if (sel->pad != 0)
+ return -EINVAL;
+
+ if (sel->target == V4L2_SEL_TGT_CROP_BOUNDS) {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = IMX274_MAX_WIDTH;
+ sel->r.height = IMX274_MAX_HEIGHT;
+ return 0;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ src_crop = &cfg->try_crop;
+ src_fmt = &cfg->try_fmt;
+ } else {
+ src_crop = &imx274->crop;
+ src_fmt = &imx274->format;
+ }
+
+ mutex_lock(&imx274->lock);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *src_crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = src_crop->width;
+ sel->r.height = src_crop->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = src_fmt->width;
+ sel->r.height = src_fmt->height;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&imx274->lock);
+
+ return ret;
+}
+
+static int imx274_set_selection_crop(struct stimx274 *imx274,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_rect *tgt_crop;
+ struct v4l2_rect new_crop;
+ bool size_changed;
+
+ /*
+ * h_step could be 12 or 24 depending on the binning. But we
+ * won't know the binning until we choose the mode later in
+ * __imx274_change_compose(). Thus let's be safe and use the
+ * most conservative value in all cases.
+ */
+ const u32 h_step = 24;
+
+ new_crop.width = min_t(u32,
+ IMX274_ROUND(sel->r.width, h_step, sel->flags),
+ IMX274_MAX_WIDTH);
+
+ /* Constraint: HTRIMMING_END - HTRIMMING_START >= 144 */
+ if (new_crop.width < 144)
+ new_crop.width = 144;
+
+ new_crop.left = min_t(u32,
+ IMX274_ROUND(sel->r.left, h_step, 0),
+ IMX274_MAX_WIDTH - new_crop.width);
+
+ new_crop.height = min_t(u32,
+ IMX274_ROUND(sel->r.height, 2, sel->flags),
+ IMX274_MAX_HEIGHT);
+
+ new_crop.top = min_t(u32, IMX274_ROUND(sel->r.top, 2, 0),
+ IMX274_MAX_HEIGHT - new_crop.height);
+
+ sel->r = new_crop;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ tgt_crop = &cfg->try_crop;
+ else
+ tgt_crop = &imx274->crop;
+
+ mutex_lock(&imx274->lock);
+
+ size_changed = (new_crop.width != tgt_crop->width ||
+ new_crop.height != tgt_crop->height);
+
+ /* __imx274_change_compose needs the new size in *tgt_crop */
+ *tgt_crop = new_crop;
+
+ /* if crop size changed then reset the output image size */
+ if (size_changed)
+ __imx274_change_compose(imx274, cfg, sel->which,
+ &new_crop.width, &new_crop.height,
+ sel->flags);
+
mutex_unlock(&imx274->lock);
+
return 0;
}
+static int imx274_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ if (sel->pad != 0)
+ return -EINVAL;
+
+ if (sel->target == V4L2_SEL_TGT_CROP)
+ return imx274_set_selection_crop(imx274, cfg, sel);
+
+ if (sel->target == V4L2_SEL_TGT_COMPOSE) {
+ int err;
+
+ mutex_lock(&imx274->lock);
+ err = __imx274_change_compose(imx274, cfg, sel->which,
+ &sel->r.width, &sel->r.height,
+ sel->flags);
+ mutex_unlock(&imx274->lock);
+
+ /*
+ * __imx274_change_compose already set width and
+ * height in set->r, we still need to set top-left
+ */
+ if (!err) {
+ sel->r.top = 0;
+ sel->r.left = 0;
+ }
+
+ return err;
+ }
+
+ return -EINVAL;
+}
+
+static int imx274_apply_trimming(struct stimx274 *imx274)
+{
+ u32 h_start;
+ u32 h_end;
+ u32 hmax;
+ u32 v_cut;
+ s32 v_pos;
+ u32 write_v_size;
+ u32 y_out_size;
+ int err;
+
+ h_start = imx274->crop.left + 12;
+ h_end = h_start + imx274->crop.width;
+
+ /* Use the minimum allowed value of HMAX */
+ /* Note: except in mode 1, (width / 16 + 23) is always < hmax_min */
+ /* Note: 260 is the minimum HMAX in all implemented modes */
+ hmax = max_t(u32, 260, (imx274->crop.width) / 16 + 23);
+
+ /* invert v_pos if VFLIP */
+ v_pos = imx274->ctrls.vflip->cur.val ?
+ (-imx274->crop.top / 2) : (imx274->crop.top / 2);
+ v_cut = (IMX274_MAX_HEIGHT - imx274->crop.height) / 2;
+ write_v_size = imx274->crop.height + 22;
+ y_out_size = imx274->crop.height + 14;
+
+ err = imx274_write_mbreg(imx274, IMX274_HMAX_REG_LSB, hmax, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_HTRIM_EN_REG, 1, 1);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_HTRIM_START_REG_LSB,
+ h_start, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_HTRIM_END_REG_LSB,
+ h_end, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_VWIDCUTEN_REG, 1, 1);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_VWIDCUT_REG_LSB,
+ v_cut, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_VWINPOS_REG_LSB,
+ v_pos, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_WRITE_VSIZE_REG_LSB,
+ write_v_size, 2);
+ if (!err)
+ err = imx274_write_mbreg(imx274, IMX274_Y_OUT_SIZE_REG_LSB,
+ y_out_size, 2);
+
+ return err;
+}
+
/**
* imx274_g_frame_interval - Get the frame interval
* @sd: Pointer to V4L2 Sub device structure
@@ -1035,14 +1321,19 @@ static int imx274_s_stream(struct v4l2_subdev *sd, int on)
struct stimx274 *imx274 = to_imx274(sd);
int ret = 0;
- dev_dbg(&imx274->client->dev, "%s : %s, mode index = %d\n", __func__,
- on ? "Stream Start" : "Stream Stop", imx274->mode_index);
+ dev_dbg(&imx274->client->dev, "%s : %s, mode index = %td\n", __func__,
+ on ? "Stream Start" : "Stream Stop",
+ imx274->mode - &imx274_formats[0]);
mutex_lock(&imx274->lock);
if (on) {
/* load mode registers */
- ret = imx274_mode_regs(imx274, imx274->mode_index);
+ ret = imx274_mode_regs(imx274);
+ if (ret)
+ goto fail;
+
+ ret = imx274_apply_trimming(imx274);
if (ret)
goto fail;
@@ -1075,8 +1366,7 @@ static int imx274_s_stream(struct v4l2_subdev *sd, int on)
}
mutex_unlock(&imx274->lock);
- dev_dbg(&imx274->client->dev,
- "%s : Done: mode = %d\n", __func__, imx274->mode_index);
+ dev_dbg(&imx274->client->dev, "%s : Done\n", __func__);
return 0;
fail:
@@ -1146,14 +1436,14 @@ static int imx274_clamp_coarse_time(struct stimx274 *priv, u32 *val,
if (err)
return err;
- if (*frame_length < min_frame_len[priv->mode_index])
- *frame_length = min_frame_len[priv->mode_index];
+ if (*frame_length < priv->mode->min_frame_len)
+ *frame_length = priv->mode->min_frame_len;
*val = *frame_length - *val; /* convert to raw shr */
if (*val > *frame_length - IMX274_SHR_LIMIT_CONST)
*val = *frame_length - IMX274_SHR_LIMIT_CONST;
- else if (*val < min_SHR[priv->mode_index])
- *val = min_SHR[priv->mode_index];
+ else if (*val < priv->mode->min_SHR)
+ *val = priv->mode->min_SHR;
return 0;
}
@@ -1182,15 +1472,6 @@ static int imx274_set_digital_gain(struct stimx274 *priv, u32 dgain)
reg_val & IMX274_MASK_LSB_4_BITS);
}
-static inline void imx274_calculate_gain_regs(struct reg_8 regs[2], u16 gain)
-{
- regs->addr = IMX274_ANALOG_GAIN_ADDR_MSB;
- regs->val = (gain >> IMX274_SHIFT_8_BITS) & IMX274_MASK_LSB_3_BITS;
-
- (regs + 1)->addr = IMX274_ANALOG_GAIN_ADDR_LSB;
- (regs + 1)->val = (gain) & IMX274_MASK_LSB_8_BITS;
-}
-
/*
* imx274_set_gain - Function called when setting gain
* @priv: Pointer to device structure
@@ -1204,10 +1485,8 @@ static inline void imx274_calculate_gain_regs(struct reg_8 regs[2], u16 gain)
*/
static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl)
{
- struct reg_8 reg_list[2];
int err;
u32 gain, analog_gain, digital_gain, gain_reg;
- int i;
gain = (u32)(ctrl->val);
@@ -1248,14 +1527,10 @@ static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl)
if (gain_reg > IMX274_GAIN_REG_MAX)
gain_reg = IMX274_GAIN_REG_MAX;
- imx274_calculate_gain_regs(reg_list, (u16)gain_reg);
-
- for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
- err = imx274_write_reg(priv, reg_list[i].addr,
- reg_list[i].val);
- if (err)
- goto fail;
- }
+ err = imx274_write_mbreg(priv, IMX274_ANALOG_GAIN_ADDR_LSB, gain_reg,
+ 2);
+ if (err)
+ goto fail;
if (IMX274_GAIN_CONST - gain_reg == 0) {
err = -EINVAL;
@@ -1277,16 +1552,6 @@ fail:
return err;
}
-static inline void imx274_calculate_coarse_time_regs(struct reg_8 regs[2],
- u32 coarse_time)
-{
- regs->addr = IMX274_SHR_REG_MSB;
- regs->val = (coarse_time >> IMX274_SHIFT_8_BITS)
- & IMX274_MASK_LSB_8_BITS;
- (regs + 1)->addr = IMX274_SHR_REG_LSB;
- (regs + 1)->val = (coarse_time) & IMX274_MASK_LSB_8_BITS;
-}
-
/*
* imx274_set_coarse_time - Function called when setting SHR value
* @priv: Pointer to device structure
@@ -1298,10 +1563,8 @@ static inline void imx274_calculate_coarse_time_regs(struct reg_8 regs[2],
*/
static int imx274_set_coarse_time(struct stimx274 *priv, u32 *val)
{
- struct reg_8 reg_list[2];
int err;
u32 coarse_time, frame_length;
- int i;
coarse_time = *val;
@@ -1310,16 +1573,9 @@ static int imx274_set_coarse_time(struct stimx274 *priv, u32 *val)
if (err)
goto fail;
- /* prepare SHR registers */
- imx274_calculate_coarse_time_regs(reg_list, coarse_time);
-
- /* write to SHR registers */
- for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
- err = imx274_write_reg(priv, reg_list[i].addr,
- reg_list[i].val);
- if (err)
- goto fail;
- }
+ err = imx274_write_mbreg(priv, IMX274_SHR_REG_LSB, coarse_time, 2);
+ if (err)
+ goto fail;
*val = frame_length - coarse_time;
return 0;
@@ -1365,7 +1621,7 @@ static int imx274_set_exposure(struct stimx274 *priv, int val)
}
coarse_time = (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2 * val
- - nocpiop[priv->mode_index]) / hmax;
+ - priv->mode->nocpiop) / hmax;
/* step 2: convert exposure_time into SHR value */
@@ -1375,7 +1631,7 @@ static int imx274_set_exposure(struct stimx274 *priv, int val)
goto fail;
priv->ctrls.exposure->val =
- (coarse_time * hmax + nocpiop[priv->mode_index])
+ (coarse_time * hmax + priv->mode->nocpiop)
/ (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2);
dev_dbg(&priv->client->dev,
@@ -1448,19 +1704,6 @@ static int imx274_set_test_pattern(struct stimx274 *priv, int val)
return err;
}
-static inline void imx274_calculate_frame_length_regs(struct reg_8 regs[3],
- u32 frame_length)
-{
- regs->addr = IMX274_VMAX_REG_1;
- regs->val = (frame_length >> IMX274_SHIFT_16_BITS)
- & IMX274_MASK_LSB_4_BITS;
- (regs + 1)->addr = IMX274_VMAX_REG_2;
- (regs + 1)->val = (frame_length >> IMX274_SHIFT_8_BITS)
- & IMX274_MASK_LSB_8_BITS;
- (regs + 2)->addr = IMX274_VMAX_REG_3;
- (regs + 2)->val = (frame_length) & IMX274_MASK_LSB_8_BITS;
-}
-
/*
* imx274_set_frame_length - Function called when setting frame length
* @priv: Pointer to device structure
@@ -1472,23 +1715,17 @@ static inline void imx274_calculate_frame_length_regs(struct reg_8 regs[3],
*/
static int imx274_set_frame_length(struct stimx274 *priv, u32 val)
{
- struct reg_8 reg_list[3];
int err;
u32 frame_length;
- int i;
dev_dbg(&priv->client->dev, "%s : input length = %d\n",
__func__, val);
frame_length = (u32)val;
- imx274_calculate_frame_length_regs(reg_list, frame_length);
- for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
- err = imx274_write_reg(priv, reg_list[i].addr,
- reg_list[i].val);
- if (err)
- goto fail;
- }
+ err = imx274_write_mbreg(priv, IMX274_VMAX_REG_3, frame_length, 3);
+ if (err)
+ goto fail;
return 0;
@@ -1529,10 +1766,9 @@ static int imx274_set_frame_interval(struct stimx274 *priv,
/ frame_interval.numerator);
/* boundary check */
- if (req_frame_rate > max_frame_rate[priv->mode_index]) {
+ if (req_frame_rate > priv->mode->max_fps) {
frame_interval.numerator = 1;
- frame_interval.denominator =
- max_frame_rate[priv->mode_index];
+ frame_interval.denominator = priv->mode->max_fps;
} else if (req_frame_rate < IMX274_MIN_FRAME_RATE) {
frame_interval.numerator = 1;
frame_interval.denominator = IMX274_MIN_FRAME_RATE;
@@ -1589,6 +1825,8 @@ fail:
static const struct v4l2_subdev_pad_ops imx274_pad_ops = {
.get_fmt = imx274_get_fmt,
.set_fmt = imx274_set_fmt,
+ .get_selection = imx274_get_selection,
+ .set_selection = imx274_set_selection,
};
static const struct v4l2_subdev_video_ops imx274_video_ops = {
@@ -1632,6 +1870,18 @@ static int imx274_probe(struct i2c_client *client,
mutex_init(&imx274->lock);
+ /* initialize format */
+ imx274->mode = &imx274_formats[IMX274_DEFAULT_MODE];
+ imx274->crop.width = IMX274_MAX_WIDTH;
+ imx274->crop.height = IMX274_MAX_HEIGHT;
+ imx274->format.width = imx274->crop.width / imx274->mode->bin_ratio;
+ imx274->format.height = imx274->crop.height / imx274->mode->bin_ratio;
+ imx274->format.field = V4L2_FIELD_NONE;
+ imx274->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ imx274->format.colorspace = V4L2_COLORSPACE_SRGB;
+ imx274->frame_interval.numerator = 1;
+ imx274->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+
/* initialize regmap */
imx274->regmap = devm_regmap_init_i2c(client, &imx274_regmap_config);
if (IS_ERR(imx274->regmap)) {
@@ -1720,16 +1970,6 @@ static int imx274_probe(struct i2c_client *client,
goto err_ctrls;
}
- /* initialize format */
- imx274->mode_index = IMX274_MODE_3840X2160;
- imx274->format.width = imx274_formats[0].size.width;
- imx274->format.height = imx274_formats[0].size.height;
- imx274->format.field = V4L2_FIELD_NONE;
- imx274->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
- imx274->format.colorspace = V4L2_COLORSPACE_SRGB;
- imx274->frame_interval.numerator = 1;
- imx274->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
-
/* load default control values */
ret = imx274_load_default(imx274);
if (ret) {
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index b600e03aa94b..49c6644cbba7 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -1,6 +1,6 @@
/*
* drivers/media/i2c/lm3560.c
- * General device driver for TI lm3560, FLASH LED Driver
+ * General device driver for TI lm3559, lm3560, FLASH LED Driver
*
* Copyright (C) 2013 Texas Instruments
*
@@ -465,6 +465,7 @@ static int lm3560_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3560_id_table[] = {
+ {LM3559_NAME, 0},
{LM3560_NAME, 0},
{}
};
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 6a9e068462fd..b385f2b632ad 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -793,6 +793,7 @@ static int mt9m032_probe(struct i2c_client *client,
v4l2_ctrl_cluster(2, &sensor->hflip);
sensor->subdev.ctrl_handler = &sensor->ctrls;
+ sensor->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&sensor->subdev.entity, 1, &sensor->pad);
if (ret < 0)
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 91d822fc4443..715be3632b01 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -1111,6 +1111,7 @@ static int mt9p031_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&mt9p031->subdev, client, &mt9p031_subdev_ops);
mt9p031->subdev.internal_ops = &mt9p031_subdev_internal_ops;
+ mt9p031->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
mt9p031->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9p031->subdev.entity, 1, &mt9p031->pad);
if (ret < 0)
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 9d981d9f5686..f683d2cb0486 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -943,6 +943,7 @@ static int mt9t001_probe(struct i2c_client *client,
mt9t001->subdev.internal_ops = &mt9t001_subdev_internal_ops;
mt9t001->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ mt9t001->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
mt9t001->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9t001->subdev.entity, 1, &mt9t001->pad);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 4de63b2df334..f74730d24d8f 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -1162,6 +1162,7 @@ static int mt9v032_probe(struct i2c_client *client,
mt9v032->subdev.internal_ops = &mt9v032_subdev_internal_ops;
mt9v032->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ mt9v032->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
mt9v032->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9v032->subdev.entity, 1, &mt9v032->pad);
if (ret < 0)
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
new file mode 100644
index 000000000000..b5410aeb5fe2
--- /dev/null
+++ b/drivers/media/i2c/mt9v111.c
@@ -0,0 +1,1298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 sensor driver for Aptina MT9V111 image sensor
+ * Copyright (C) 2018 Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Based on mt9v032 driver
+ * Copyright (C) 2010, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * Based on mt9v011 driver
+ * Copyright (c) 2009 Mauro Carvalho Chehab <mchehab@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/module.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * MT9V111 is a 1/4-Inch CMOS digital image sensor with an integrated
+ * Image Flow Processing (IFP) engine and a sensor core loosely based on
+ * MT9V011.
+ *
+ * The IFP can produce several output image formats from the sensor core
+ * output. This driver currently supports only YUYV format permutations.
+ *
+ * The driver allows manual frame rate control through s_frame_interval subdev
+ * operation or V4L2_CID_V/HBLANK controls, but it is known that the
+ * auto-exposure algorithm might modify the programmed frame rate. While the
+ * driver initially programs the sensor with auto-exposure and
+ * auto-white-balancing enabled, it is possible to disable them and more
+ * precisely control the frame rate.
+ *
+ * While it seems possible to instruct the auto-exposure control algorithm to
+ * respect a programmed frame rate when adjusting the pixel integration time,
+ * registers controlling this feature are not documented in the public
+ * available sensor manual used to develop this driver (09005aef80e90084,
+ * MT9V111_1.fm - Rev. G 1/05 EN).
+ */
+
+#define MT9V111_CHIP_ID_HIGH 0x82
+#define MT9V111_CHIP_ID_LOW 0x3a
+
+#define MT9V111_R01_ADDR_SPACE 0x01
+#define MT9V111_R01_IFP 0x01
+#define MT9V111_R01_CORE 0x04
+
+#define MT9V111_IFP_R06_OPMODE_CTRL 0x06
+#define MT9V111_IFP_R06_OPMODE_CTRL_AWB_EN BIT(1)
+#define MT9V111_IFP_R06_OPMODE_CTRL_AE_EN BIT(14)
+#define MT9V111_IFP_R07_IFP_RESET 0x07
+#define MT9V111_IFP_R07_IFP_RESET_MASK BIT(0)
+#define MT9V111_IFP_R08_OUTFMT_CTRL 0x08
+#define MT9V111_IFP_R08_OUTFMT_CTRL_FLICKER BIT(11)
+#define MT9V111_IFP_R08_OUTFMT_CTRL_PCLK BIT(5)
+#define MT9V111_IFP_R3A_OUTFMT_CTRL2 0x3a
+#define MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_CBCR BIT(0)
+#define MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_YC BIT(1)
+#define MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_MASK GENMASK(2, 0)
+#define MT9V111_IFP_RA5_HPAN 0xa5
+#define MT9V111_IFP_RA6_HZOOM 0xa6
+#define MT9V111_IFP_RA7_HOUT 0xa7
+#define MT9V111_IFP_RA8_VPAN 0xa8
+#define MT9V111_IFP_RA9_VZOOM 0xa9
+#define MT9V111_IFP_RAA_VOUT 0xaa
+#define MT9V111_IFP_DECIMATION_MASK GENMASK(9, 0)
+#define MT9V111_IFP_DECIMATION_FREEZE BIT(15)
+
+#define MT9V111_CORE_R03_WIN_HEIGHT 0x03
+#define MT9V111_CORE_R03_WIN_V_OFFS 2
+#define MT9V111_CORE_R04_WIN_WIDTH 0x04
+#define MT9V111_CORE_R04_WIN_H_OFFS 114
+#define MT9V111_CORE_R05_HBLANK 0x05
+#define MT9V111_CORE_R05_MIN_HBLANK 0x09
+#define MT9V111_CORE_R05_MAX_HBLANK GENMASK(9, 0)
+#define MT9V111_CORE_R05_DEF_HBLANK 0x26
+#define MT9V111_CORE_R06_VBLANK 0x06
+#define MT9V111_CORE_R06_MIN_VBLANK 0x03
+#define MT9V111_CORE_R06_MAX_VBLANK GENMASK(11, 0)
+#define MT9V111_CORE_R06_DEF_VBLANK 0x04
+#define MT9V111_CORE_R07_OUT_CTRL 0x07
+#define MT9V111_CORE_R07_OUT_CTRL_SAMPLE BIT(4)
+#define MT9V111_CORE_R09_PIXEL_INT 0x09
+#define MT9V111_CORE_R09_PIXEL_INT_MASK GENMASK(11, 0)
+#define MT9V111_CORE_R0D_CORE_RESET 0x0d
+#define MT9V111_CORE_R0D_CORE_RESET_MASK BIT(0)
+#define MT9V111_CORE_RFF_CHIP_VER 0xff
+
+#define MT9V111_PIXEL_ARRAY_WIDTH 640
+#define MT9V111_PIXEL_ARRAY_HEIGHT 480
+
+#define MT9V111_MAX_CLKIN 27000000
+
+/* The default sensor configuration at startup time. */
+static struct v4l2_mbus_framefmt mt9v111_def_fmt = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+};
+
+struct mt9v111_dev {
+ struct device *dev;
+ struct i2c_client *client;
+
+ u8 addr_space;
+
+ struct v4l2_subdev sd;
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad;
+#endif
+
+ struct v4l2_ctrl *auto_awb;
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl_handler ctrls;
+
+ /* Output image format and sizes. */
+ struct v4l2_mbus_framefmt fmt;
+ unsigned int fps;
+
+ /* Protects power up/down sequences. */
+ struct mutex pwr_mutex;
+ int pwr_count;
+
+ /* Protects stream on/off sequences. */
+ struct mutex stream_mutex;
+ bool streaming;
+
+ /* Flags to mark HW settings as not yet applied. */
+ bool pending;
+
+ /* Clock provider and system clock frequency. */
+ struct clk *clk;
+ u32 sysclk;
+
+ struct gpio_desc *oe;
+ struct gpio_desc *standby;
+ struct gpio_desc *reset;
+};
+
+#define sd_to_mt9v111(__sd) container_of((__sd), struct mt9v111_dev, sd)
+
+/*
+ * mt9v111_mbus_fmt - List all media bus formats supported by the driver.
+ *
+ * Only list the media bus code here. The image sizes are freely configurable
+ * in the pixel array sizes range.
+ *
+ * The desired frame interval, in the supported frame interval range, is
+ * obtained by configuring blanking as the sensor does not have a PLL but
+ * only a fixed clock divider that generates the output pixel clock.
+ */
+static struct mt9v111_mbus_fmt {
+ u32 code;
+} mt9v111_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ },
+};
+
+static u32 mt9v111_frame_intervals[] = {5, 10, 15, 20, 30};
+
+/*
+ * mt9v111_frame_sizes - List sensor's supported resolutions.
+ *
+ * Resolution generated through decimation in the IFP block from the
+ * full VGA pixel array.
+ */
+static struct v4l2_rect mt9v111_frame_sizes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ },
+ {
+ .width = 352,
+ .height = 288
+ },
+ {
+ .width = 320,
+ .height = 240,
+ },
+ {
+ .width = 176,
+ .height = 144,
+ },
+ {
+ .width = 160,
+ .height = 120,
+ },
+};
+
+/* --- Device I/O access --- */
+
+static int __mt9v111_read(struct i2c_client *c, u8 reg, u16 *val)
+{
+ struct i2c_msg msg[2];
+ __be16 buf;
+ int ret;
+
+ msg[0].addr = c->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &reg;
+
+ msg[1].addr = c->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 2;
+ msg[1].buf = (char *)&buf;
+
+ ret = i2c_transfer(c->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&c->dev, "i2c read transfer error: %d\n", ret);
+ return ret;
+ }
+
+ *val = be16_to_cpu(buf);
+
+ dev_dbg(&c->dev, "%s: %x=%x\n", __func__, reg, *val);
+
+ return 0;
+}
+
+static int __mt9v111_write(struct i2c_client *c, u8 reg, u16 val)
+{
+ struct i2c_msg msg;
+ u8 buf[3] = { 0 };
+ int ret;
+
+ buf[0] = reg;
+ buf[1] = val >> 8;
+ buf[2] = val & 0xff;
+
+ msg.addr = c->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = (char *)buf;
+
+ dev_dbg(&c->dev, "%s: %x = %x%x\n", __func__, reg, buf[1], buf[2]);
+
+ ret = i2c_transfer(c->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&c->dev, "i2c write transfer error: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __mt9v111_addr_space_select(struct i2c_client *c, u16 addr_space)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(c);
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ u16 val;
+ int ret;
+
+ if (mt9v111->addr_space == addr_space)
+ return 0;
+
+ ret = __mt9v111_write(c, MT9V111_R01_ADDR_SPACE, addr_space);
+ if (ret)
+ return ret;
+
+ /* Verify address space has been updated */
+ ret = __mt9v111_read(c, MT9V111_R01_ADDR_SPACE, &val);
+ if (ret)
+ return ret;
+
+ if (val != addr_space)
+ return -EINVAL;
+
+ mt9v111->addr_space = addr_space;
+
+ return 0;
+}
+
+static int mt9v111_read(struct i2c_client *c, u8 addr_space, u8 reg, u16 *val)
+{
+ int ret;
+
+ /* Select register address space first. */
+ ret = __mt9v111_addr_space_select(c, addr_space);
+ if (ret)
+ return ret;
+
+ ret = __mt9v111_read(c, reg, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mt9v111_write(struct i2c_client *c, u8 addr_space, u8 reg, u16 val)
+{
+ int ret;
+
+ /* Select register address space first. */
+ ret = __mt9v111_addr_space_select(c, addr_space);
+ if (ret)
+ return ret;
+
+ ret = __mt9v111_write(c, reg, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mt9v111_update(struct i2c_client *c, u8 addr_space, u8 reg,
+ u16 mask, u16 val)
+{
+ u16 current_val;
+ int ret;
+
+ /* Select register address space first. */
+ ret = __mt9v111_addr_space_select(c, addr_space);
+ if (ret)
+ return ret;
+
+ /* Read the current register value, then update it. */
+ ret = __mt9v111_read(c, reg, &current_val);
+ if (ret)
+ return ret;
+
+ current_val &= ~mask;
+ current_val |= (val & mask);
+ ret = __mt9v111_write(c, reg, current_val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* --- Sensor HW operations --- */
+
+static int __mt9v111_power_on(struct v4l2_subdev *sd)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ int ret;
+
+ ret = clk_prepare_enable(mt9v111->clk);
+ if (ret)
+ return ret;
+
+ clk_set_rate(mt9v111->clk, mt9v111->sysclk);
+
+ gpiod_set_value(mt9v111->standby, 0);
+ usleep_range(500, 1000);
+
+ gpiod_set_value(mt9v111->oe, 1);
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int __mt9v111_power_off(struct v4l2_subdev *sd)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+
+ gpiod_set_value(mt9v111->oe, 0);
+ usleep_range(500, 1000);
+
+ gpiod_set_value(mt9v111->standby, 1);
+ usleep_range(500, 1000);
+
+ clk_disable_unprepare(mt9v111->clk);
+
+ return 0;
+}
+
+static int __mt9v111_hw_reset(struct mt9v111_dev *mt9v111)
+{
+ if (!mt9v111->reset)
+ return -EINVAL;
+
+ gpiod_set_value(mt9v111->reset, 1);
+ usleep_range(500, 1000);
+
+ gpiod_set_value(mt9v111->reset, 0);
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int __mt9v111_sw_reset(struct mt9v111_dev *mt9v111)
+{
+ struct i2c_client *c = mt9v111->client;
+ int ret;
+
+ /* Software reset core and IFP blocks. */
+
+ ret = mt9v111_update(c, MT9V111_R01_CORE,
+ MT9V111_CORE_R0D_CORE_RESET,
+ MT9V111_CORE_R0D_CORE_RESET_MASK, 1);
+ if (ret)
+ return ret;
+ usleep_range(500, 1000);
+
+ ret = mt9v111_update(c, MT9V111_R01_CORE,
+ MT9V111_CORE_R0D_CORE_RESET,
+ MT9V111_CORE_R0D_CORE_RESET_MASK, 0);
+ if (ret)
+ return ret;
+ usleep_range(500, 1000);
+
+ ret = mt9v111_update(c, MT9V111_R01_IFP,
+ MT9V111_IFP_R07_IFP_RESET,
+ MT9V111_IFP_R07_IFP_RESET_MASK, 1);
+ if (ret)
+ return ret;
+ usleep_range(500, 1000);
+
+ ret = mt9v111_update(c, MT9V111_R01_IFP,
+ MT9V111_IFP_R07_IFP_RESET,
+ MT9V111_IFP_R07_IFP_RESET_MASK, 0);
+ if (ret)
+ return ret;
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111,
+ struct v4l2_fract *tpf)
+{
+ unsigned int fps = tpf->numerator ?
+ tpf->denominator / tpf->numerator :
+ tpf->denominator;
+ unsigned int best_diff;
+ unsigned int frm_cols;
+ unsigned int row_pclk;
+ unsigned int best_fps;
+ unsigned int pclk;
+ unsigned int diff;
+ unsigned int idx;
+ unsigned int hb;
+ unsigned int vb;
+ unsigned int i;
+ int ret;
+
+ /* Approximate to the closest supported frame interval. */
+ best_diff = ~0L;
+ for (i = 0, idx = 0; i < ARRAY_SIZE(mt9v111_frame_intervals); i++) {
+ diff = abs(fps - mt9v111_frame_intervals[i]);
+ if (diff < best_diff) {
+ idx = i;
+ best_diff = diff;
+ }
+ }
+ fps = mt9v111_frame_intervals[idx];
+
+ /*
+ * The sensor does not provide a PLL circuitry and pixel clock is
+ * generated dividing the master clock source by two.
+ *
+ * Trow = (W + Hblank + 114) * 2 * (1 / SYSCLK)
+ * TFrame = Trow * (H + Vblank + 2)
+ *
+ * FPS = (SYSCLK / 2) / (Trow * (H + Vblank + 2))
+ *
+ * This boils down to tune H and V blanks to best approximate the
+ * above equation.
+ *
+ * Test all available H/V blank values, until we reach the
+ * desired frame rate.
+ */
+ best_fps = vb = hb = 0;
+ pclk = DIV_ROUND_CLOSEST(mt9v111->sysclk, 2);
+ row_pclk = MT9V111_PIXEL_ARRAY_WIDTH + 7 + MT9V111_CORE_R04_WIN_H_OFFS;
+ frm_cols = MT9V111_PIXEL_ARRAY_HEIGHT + 7 + MT9V111_CORE_R03_WIN_V_OFFS;
+
+ best_diff = ~0L;
+ for (vb = MT9V111_CORE_R06_MIN_VBLANK;
+ vb < MT9V111_CORE_R06_MAX_VBLANK; vb++) {
+ for (hb = MT9V111_CORE_R05_MIN_HBLANK;
+ hb < MT9V111_CORE_R05_MAX_HBLANK; hb += 10) {
+ unsigned int t_frame = (row_pclk + hb) *
+ (frm_cols + vb);
+ unsigned int t_fps = DIV_ROUND_CLOSEST(pclk, t_frame);
+
+ diff = abs(fps - t_fps);
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_fps = t_fps;
+
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (diff == 0)
+ break;
+ }
+
+ ret = v4l2_ctrl_s_ctrl_int64(mt9v111->hblank, hb);
+ if (ret)
+ return ret;
+
+ ret = v4l2_ctrl_s_ctrl_int64(mt9v111->vblank, vb);
+ if (ret)
+ return ret;
+
+ tpf->numerator = 1;
+ tpf->denominator = best_fps;
+
+ return 0;
+}
+
+static int mt9v111_hw_config(struct mt9v111_dev *mt9v111)
+{
+ struct i2c_client *c = mt9v111->client;
+ unsigned int ret;
+ u16 outfmtctrl2;
+
+ /* Force device reset. */
+ ret = __mt9v111_hw_reset(mt9v111);
+ if (ret == -EINVAL)
+ ret = __mt9v111_sw_reset(mt9v111);
+ if (ret)
+ return ret;
+
+ /* Configure internal clock sample rate. */
+ ret = mt9v111->sysclk < DIV_ROUND_CLOSEST(MT9V111_MAX_CLKIN, 2) ?
+ mt9v111_update(c, MT9V111_R01_CORE,
+ MT9V111_CORE_R07_OUT_CTRL,
+ MT9V111_CORE_R07_OUT_CTRL_SAMPLE, 1) :
+ mt9v111_update(c, MT9V111_R01_CORE,
+ MT9V111_CORE_R07_OUT_CTRL,
+ MT9V111_CORE_R07_OUT_CTRL_SAMPLE, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure output image format components ordering.
+ *
+ * TODO: IFP block can also output several RGB permutations, we only
+ * support YUYV permutations at the moment.
+ */
+ switch (mt9v111->fmt.code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ outfmtctrl2 = MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_YC;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ outfmtctrl2 = MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_CBCR;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ outfmtctrl2 = MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_YC |
+ MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_CBCR;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ outfmtctrl2 = 0;
+ break;
+ }
+
+ ret = mt9v111_update(c, MT9V111_R01_IFP, MT9V111_IFP_R3A_OUTFMT_CTRL2,
+ MT9V111_IFP_R3A_OUTFMT_CTRL2_SWAP_MASK,
+ outfmtctrl2);
+ if (ret)
+ return ret;
+
+ /*
+ * Do not change default sensor's core configuration:
+ * output the whole 640x480 pixel array, skip 18 columns and 6 rows.
+ *
+ * Instead, control the output image size through IFP block.
+ *
+ * TODO: No zoom&pan support. Currently we control the output image
+ * size only through decimation, with no zoom support.
+ */
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA5_HPAN,
+ MT9V111_IFP_DECIMATION_FREEZE);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA8_VPAN,
+ MT9V111_IFP_DECIMATION_FREEZE);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA6_HZOOM,
+ MT9V111_IFP_DECIMATION_FREEZE |
+ MT9V111_PIXEL_ARRAY_WIDTH);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA9_VZOOM,
+ MT9V111_IFP_DECIMATION_FREEZE |
+ MT9V111_PIXEL_ARRAY_HEIGHT);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA7_HOUT,
+ MT9V111_IFP_DECIMATION_FREEZE |
+ mt9v111->fmt.width);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RAA_VOUT,
+ mt9v111->fmt.height);
+ if (ret)
+ return ret;
+
+ /* Apply controls to set auto exp, auto awb and timings */
+ ret = v4l2_ctrl_handler_setup(&mt9v111->ctrls);
+ if (ret)
+ return ret;
+
+ /*
+ * Set pixel integration time to the whole frame time.
+ * This value controls the the shutter delay when running with AE
+ * disabled. If longer than frame time, it affects the output
+ * frame rate.
+ */
+ return mt9v111_write(c, MT9V111_R01_CORE, MT9V111_CORE_R09_PIXEL_INT,
+ MT9V111_PIXEL_ARRAY_HEIGHT);
+}
+
+/* --- V4L2 subdev operations --- */
+
+static int mt9v111_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ int pwr_count;
+ int ret = 0;
+
+ mutex_lock(&mt9v111->pwr_mutex);
+
+ /*
+ * Make sure we're transitioning from 0 to 1, or viceversa,
+ * before actually changing the power state.
+ */
+ pwr_count = mt9v111->pwr_count;
+ pwr_count += on ? 1 : -1;
+ if (pwr_count == !!on) {
+ ret = on ? __mt9v111_power_on(sd) :
+ __mt9v111_power_off(sd);
+ if (!ret)
+ /* All went well, updated power counter. */
+ mt9v111->pwr_count = pwr_count;
+
+ mutex_unlock(&mt9v111->pwr_mutex);
+
+ return ret;
+ }
+
+ /*
+ * Update power counter to keep track of how many nested calls we
+ * received.
+ */
+ WARN_ON(pwr_count < 0 || pwr_count > 1);
+ mt9v111->pwr_count = pwr_count;
+
+ mutex_unlock(&mt9v111->pwr_mutex);
+
+ return ret;
+}
+
+static int mt9v111_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
+ int ret;
+
+ mutex_lock(&mt9v111->stream_mutex);
+
+ if (mt9v111->streaming == enable) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return 0;
+ }
+
+ ret = mt9v111_s_power(subdev, enable);
+ if (ret)
+ goto error_unlock;
+
+ if (enable && mt9v111->pending) {
+ ret = mt9v111_hw_config(mt9v111);
+ if (ret)
+ goto error_unlock;
+
+ /*
+ * No need to update control here as far as only H/VBLANK are
+ * supported and immediately programmed to registers in .s_ctrl
+ */
+
+ mt9v111->pending = false;
+ }
+
+ mt9v111->streaming = enable ? true : false;
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+
+error_unlock:
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return ret;
+}
+
+static int mt9v111_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *ival)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ struct v4l2_fract *tpf = &ival->interval;
+ unsigned int fps = tpf->numerator ?
+ tpf->denominator / tpf->numerator :
+ tpf->denominator;
+ unsigned int max_fps;
+
+ if (!tpf->numerator)
+ tpf->numerator = 1;
+
+ mutex_lock(&mt9v111->stream_mutex);
+
+ if (mt9v111->streaming) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return -EBUSY;
+ }
+
+ if (mt9v111->fps == fps) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return 0;
+ }
+
+ /* Make sure frame rate/image sizes constraints are respected. */
+ if (mt9v111->fmt.width < QVGA_WIDTH &&
+ mt9v111->fmt.height < QVGA_HEIGHT)
+ max_fps = 90;
+ else if (mt9v111->fmt.width < CIF_WIDTH &&
+ mt9v111->fmt.height < CIF_HEIGHT)
+ max_fps = 60;
+ else
+ max_fps = mt9v111->sysclk <
+ DIV_ROUND_CLOSEST(MT9V111_MAX_CLKIN, 2) ? 15 :
+ 30;
+
+ if (fps > max_fps) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return -EINVAL;
+ }
+
+ mt9v111_calc_frame_rate(mt9v111, tpf);
+
+ mt9v111->fps = fps;
+ mt9v111->pending = true;
+
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+}
+
+static int mt9v111_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *ival)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+ struct v4l2_fract *tpf = &ival->interval;
+
+ mutex_lock(&mt9v111->stream_mutex);
+
+ tpf->numerator = 1;
+ tpf->denominator = mt9v111->fps;
+
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *__mt9v111_get_pad_format(
+ struct mt9v111_dev *mt9v111,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+#if IS_ENABLED(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ return v4l2_subdev_get_try_format(&mt9v111->sd, cfg, pad);
+#else
+ return &cfg->try_fmt;
+#endif
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &mt9v111->fmt;
+ default:
+ return NULL;
+ }
+}
+
+static int mt9v111_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index > ARRAY_SIZE(mt9v111_formats) - 1)
+ return -EINVAL;
+
+ code->code = mt9v111_formats[code->index].code;
+
+ return 0;
+}
+
+static int mt9v111_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ unsigned int i;
+
+ if (fie->pad || fie->index >= ARRAY_SIZE(mt9v111_frame_intervals))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mt9v111_frame_sizes); i++)
+ if (fie->width == mt9v111_frame_sizes[i].width &&
+ fie->height == mt9v111_frame_sizes[i].height)
+ break;
+
+ if (i == ARRAY_SIZE(mt9v111_frame_sizes))
+ return -EINVAL;
+
+ fie->interval.numerator = 1;
+ fie->interval.denominator = mt9v111_frame_intervals[fie->index];
+
+ return 0;
+}
+
+static int mt9v111_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->pad || fse->index >= ARRAY_SIZE(mt9v111_frame_sizes))
+ return -EINVAL;
+
+ fse->min_width = mt9v111_frame_sizes[fse->index].width;
+ fse->max_width = mt9v111_frame_sizes[fse->index].width;
+ fse->min_height = mt9v111_frame_sizes[fse->index].height;
+ fse->max_height = mt9v111_frame_sizes[fse->index].height;
+
+ return 0;
+}
+
+static int mt9v111_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
+
+ if (format->pad)
+ return -EINVAL;
+
+ mutex_lock(&mt9v111->stream_mutex);
+ format->format = *__mt9v111_get_pad_format(mt9v111, cfg, format->pad,
+ format->which);
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+}
+
+static int mt9v111_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
+ struct v4l2_mbus_framefmt new_fmt;
+ struct v4l2_mbus_framefmt *__fmt;
+ unsigned int best_fit = ~0L;
+ unsigned int idx = 0;
+ unsigned int i;
+
+ mutex_lock(&mt9v111->stream_mutex);
+ if (mt9v111->streaming) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return -EBUSY;
+ }
+
+ if (format->pad) {
+ mutex_unlock(&mt9v111->stream_mutex);
+ return -EINVAL;
+ }
+
+ /* Update mbus format code and sizes. */
+ for (i = 0; i < ARRAY_SIZE(mt9v111_formats); i++) {
+ if (format->format.code == mt9v111_formats[i].code) {
+ new_fmt.code = mt9v111_formats[i].code;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(mt9v111_formats))
+ new_fmt.code = mt9v111_formats[0].code;
+
+ for (i = 0; i < ARRAY_SIZE(mt9v111_frame_sizes); i++) {
+ unsigned int fit = abs(mt9v111_frame_sizes[i].width -
+ format->format.width) +
+ abs(mt9v111_frame_sizes[i].height -
+ format->format.height);
+ if (fit < best_fit) {
+ best_fit = fit;
+ idx = i;
+
+ if (fit == 0)
+ break;
+ }
+ }
+ new_fmt.width = mt9v111_frame_sizes[idx].width;
+ new_fmt.height = mt9v111_frame_sizes[idx].height;
+
+ /* Update the device (or pad) format if it has changed. */
+ __fmt = __mt9v111_get_pad_format(mt9v111, cfg, format->pad,
+ format->which);
+
+ /* Format hasn't changed, stop here. */
+ if (__fmt->code == new_fmt.code &&
+ __fmt->width == new_fmt.width &&
+ __fmt->height == new_fmt.height)
+ goto done;
+
+ /* Update the format and sizes, then mark changes as pending. */
+ __fmt->code = new_fmt.code;
+ __fmt->width = new_fmt.width;
+ __fmt->height = new_fmt.height;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ mt9v111->pending = true;
+
+ dev_dbg(mt9v111->dev, "%s: mbus_code: %x - (%ux%u)\n",
+ __func__, __fmt->code, __fmt->width, __fmt->height);
+
+done:
+ format->format = *__fmt;
+
+ mutex_unlock(&mt9v111->stream_mutex);
+
+ return 0;
+}
+
+static int mt9v111_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ cfg->try_fmt = mt9v111_def_fmt;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops mt9v111_core_ops = {
+ .s_power = mt9v111_s_power,
+};
+
+static const struct v4l2_subdev_video_ops mt9v111_video_ops = {
+ .s_stream = mt9v111_s_stream,
+ .s_frame_interval = mt9v111_s_frame_interval,
+ .g_frame_interval = mt9v111_g_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops mt9v111_pad_ops = {
+ .init_cfg = mt9v111_init_cfg,
+ .enum_mbus_code = mt9v111_enum_mbus_code,
+ .enum_frame_size = mt9v111_enum_frame_size,
+ .enum_frame_interval = mt9v111_enum_frame_interval,
+ .get_fmt = mt9v111_get_format,
+ .set_fmt = mt9v111_set_format,
+};
+
+static const struct v4l2_subdev_ops mt9v111_ops = {
+ .core = &mt9v111_core_ops,
+ .video = &mt9v111_video_ops,
+ .pad = &mt9v111_pad_ops,
+};
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+static const struct media_entity_operations mt9v111_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+#endif
+
+/* --- V4L2 ctrl --- */
+static int mt9v111_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9v111_dev *mt9v111 = container_of(ctrl->handler,
+ struct mt9v111_dev,
+ ctrls);
+ int ret;
+
+ mutex_lock(&mt9v111->pwr_mutex);
+ /*
+ * If sensor is powered down, just cache new control values,
+ * no actual register access.
+ */
+ if (!mt9v111->pwr_count) {
+ mt9v111->pending = true;
+ mutex_unlock(&mt9v111->pwr_mutex);
+ return 0;
+ }
+ mutex_unlock(&mt9v111->pwr_mutex);
+
+ /*
+ * Flickering control gets disabled if both auto exp and auto awb
+ * are disabled too. If any of the two is enabled, enable it.
+ *
+ * Disabling flickering when ae and awb are off allows a more precise
+ * control of the programmed frame rate.
+ */
+ if (mt9v111->auto_exp->is_new || mt9v111->auto_awb->is_new) {
+ if (mt9v111->auto_exp->val == V4L2_EXPOSURE_MANUAL &&
+ mt9v111->auto_awb->val == V4L2_WHITE_BALANCE_MANUAL)
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_IFP,
+ MT9V111_IFP_R08_OUTFMT_CTRL,
+ MT9V111_IFP_R08_OUTFMT_CTRL_FLICKER,
+ 0);
+ else
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_IFP,
+ MT9V111_IFP_R08_OUTFMT_CTRL,
+ MT9V111_IFP_R08_OUTFMT_CTRL_FLICKER,
+ 1);
+ if (ret)
+ return ret;
+ }
+
+ ret = -EINVAL;
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_IFP,
+ MT9V111_IFP_R06_OPMODE_CTRL,
+ MT9V111_IFP_R06_OPMODE_CTRL_AWB_EN,
+ ctrl->val == V4L2_WHITE_BALANCE_AUTO ?
+ MT9V111_IFP_R06_OPMODE_CTRL_AWB_EN : 0);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_IFP,
+ MT9V111_IFP_R06_OPMODE_CTRL,
+ MT9V111_IFP_R06_OPMODE_CTRL_AE_EN,
+ ctrl->val == V4L2_EXPOSURE_AUTO ?
+ MT9V111_IFP_R06_OPMODE_CTRL_AE_EN : 0);
+ break;
+ case V4L2_CID_HBLANK:
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_CORE,
+ MT9V111_CORE_R05_HBLANK,
+ MT9V111_CORE_R05_MAX_HBLANK,
+ mt9v111->hblank->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = mt9v111_update(mt9v111->client, MT9V111_R01_CORE,
+ MT9V111_CORE_R06_VBLANK,
+ MT9V111_CORE_R06_MAX_VBLANK,
+ mt9v111->vblank->val);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mt9v111_ctrl_ops = {
+ .s_ctrl = mt9v111_s_ctrl,
+};
+
+static int mt9v111_chip_probe(struct mt9v111_dev *mt9v111)
+{
+ int ret;
+ u16 val;
+
+ ret = __mt9v111_power_on(&mt9v111->sd);
+ if (ret)
+ return ret;
+
+ ret = mt9v111_read(mt9v111->client, MT9V111_R01_CORE,
+ MT9V111_CORE_RFF_CHIP_VER, &val);
+ if (ret)
+ goto power_off;
+
+ if ((val >> 8) != MT9V111_CHIP_ID_HIGH &&
+ (val & 0xff) != MT9V111_CHIP_ID_LOW) {
+ dev_err(mt9v111->dev,
+ "Unable to identify MT9V111 chip: 0x%2x%2x\n",
+ val >> 8, val & 0xff);
+ ret = -EIO;
+ goto power_off;
+ }
+
+ dev_dbg(mt9v111->dev, "Chip identified: 0x%2x%2x\n",
+ val >> 8, val & 0xff);
+
+power_off:
+ __mt9v111_power_off(&mt9v111->sd);
+
+ return ret;
+}
+
+static int mt9v111_probe(struct i2c_client *client)
+{
+ struct mt9v111_dev *mt9v111;
+ struct v4l2_fract tpf;
+ int ret;
+
+ mt9v111 = devm_kzalloc(&client->dev, sizeof(*mt9v111), GFP_KERNEL);
+ if (!mt9v111)
+ return -ENOMEM;
+
+ mt9v111->dev = &client->dev;
+ mt9v111->client = client;
+
+ mt9v111->clk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(mt9v111->clk))
+ return PTR_ERR(mt9v111->clk);
+
+ mt9v111->sysclk = clk_get_rate(mt9v111->clk);
+ if (mt9v111->sysclk > MT9V111_MAX_CLKIN)
+ return -EINVAL;
+
+ mt9v111->oe = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(mt9v111->oe)) {
+ dev_err(&client->dev, "Unable to get GPIO \"enable\": %ld\n",
+ PTR_ERR(mt9v111->oe));
+ return PTR_ERR(mt9v111->oe);
+ }
+
+ mt9v111->standby = devm_gpiod_get_optional(&client->dev, "standby",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(mt9v111->standby)) {
+ dev_err(&client->dev, "Unable to get GPIO \"standby\": %ld\n",
+ PTR_ERR(mt9v111->standby));
+ return PTR_ERR(mt9v111->standby);
+ }
+
+ mt9v111->reset = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(mt9v111->reset)) {
+ dev_err(&client->dev, "Unable to get GPIO \"reset\": %ld\n",
+ PTR_ERR(mt9v111->reset));
+ return PTR_ERR(mt9v111->reset);
+ }
+
+ mutex_init(&mt9v111->pwr_mutex);
+ mutex_init(&mt9v111->stream_mutex);
+
+ v4l2_ctrl_handler_init(&mt9v111->ctrls, 5);
+
+ mt9v111->auto_awb = v4l2_ctrl_new_std(&mt9v111->ctrls,
+ &mt9v111_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1,
+ V4L2_WHITE_BALANCE_AUTO);
+ if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
+ ret = PTR_ERR(mt9v111->auto_awb);
+ goto error_free_ctrls;
+ }
+
+ mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
+ &mt9v111_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL,
+ 0, V4L2_EXPOSURE_AUTO);
+ if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
+ ret = PTR_ERR(mt9v111->auto_exp);
+ goto error_free_ctrls;
+ }
+
+ /* Initialize timings */
+ mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
+ V4L2_CID_HBLANK,
+ MT9V111_CORE_R05_MIN_HBLANK,
+ MT9V111_CORE_R05_MAX_HBLANK, 1,
+ MT9V111_CORE_R05_DEF_HBLANK);
+ if (IS_ERR_OR_NULL(mt9v111->hblank)) {
+ ret = PTR_ERR(mt9v111->hblank);
+ goto error_free_ctrls;
+ }
+
+ mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
+ V4L2_CID_VBLANK,
+ MT9V111_CORE_R06_MIN_VBLANK,
+ MT9V111_CORE_R06_MAX_VBLANK, 1,
+ MT9V111_CORE_R06_DEF_VBLANK);
+ if (IS_ERR_OR_NULL(mt9v111->vblank)) {
+ ret = PTR_ERR(mt9v111->vblank);
+ goto error_free_ctrls;
+ }
+
+ /* PIXEL_RATE is fixed: just expose it to user space. */
+ v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
+ DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
+
+ mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
+
+ /* Start with default configuration: 640x480 UYVY. */
+ mt9v111->fmt = mt9v111_def_fmt;
+
+ /* Re-calculate blankings for 640x480@15fps. */
+ mt9v111->fps = 15;
+ tpf.numerator = 1;
+ tpf.denominator = mt9v111->fps;
+ mt9v111_calc_frame_rate(mt9v111, &tpf);
+
+ mt9v111->pwr_count = 0;
+ mt9v111->addr_space = MT9V111_R01_IFP;
+ mt9v111->pending = true;
+
+ v4l2_i2c_subdev_init(&mt9v111->sd, client, &mt9v111_ops);
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ mt9v111->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ mt9v111->sd.entity.ops = &mt9v111_subdev_entity_ops;
+ mt9v111->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
+ if (ret)
+ goto error_free_ctrls;
+#endif
+
+ ret = mt9v111_chip_probe(mt9v111);
+ if (ret)
+ goto error_free_ctrls;
+
+ ret = v4l2_async_register_subdev(&mt9v111->sd);
+ if (ret)
+ goto error_free_ctrls;
+
+ return 0;
+
+error_free_ctrls:
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&mt9v111->sd.entity);
+#endif
+
+ mutex_destroy(&mt9v111->pwr_mutex);
+ mutex_destroy(&mt9v111->stream_mutex);
+
+ return ret;
+}
+
+static int mt9v111_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
+
+ v4l2_async_unregister_subdev(sd);
+
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&sd->entity);
+#endif
+
+ mutex_destroy(&mt9v111->pwr_mutex);
+ mutex_destroy(&mt9v111->stream_mutex);
+
+ devm_gpiod_put(mt9v111->dev, mt9v111->oe);
+ devm_gpiod_put(mt9v111->dev, mt9v111->standby);
+ devm_gpiod_put(mt9v111->dev, mt9v111->reset);
+
+ devm_clk_put(mt9v111->dev, mt9v111->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mt9v111_of_match[] = {
+ { .compatible = "aptina,mt9v111", },
+ { /* sentinel */ },
+};
+
+static struct i2c_driver mt9v111_driver = {
+ .driver = {
+ .name = "mt9v111",
+ .of_match_table = mt9v111_of_match,
+ },
+ .probe_new = mt9v111_probe,
+ .remove = mt9v111_remove,
+};
+
+module_i2c_driver(mt9v111_driver);
+
+MODULE_DESCRIPTION("V4L2 sensor driver for Aptina MT9V111");
+MODULE_AUTHOR("Jacopo Mondi <jacopo@jmondi.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
new file mode 100644
index 000000000000..f753a1c333ef
--- /dev/null
+++ b/drivers/media/i2c/ov2680.c
@@ -0,0 +1,1186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Omnivision OV2680 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2018 Linaro Ltd
+ *
+ * Based on OV5640 Sensor Driver
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2014-2017 Mentor Graphics Inc.
+ *
+ */
+
+#include <asm/unaligned.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#define OV2680_XVCLK_VALUE 24000000
+
+#define OV2680_CHIP_ID 0x2680
+
+#define OV2680_REG_STREAM_CTRL 0x0100
+#define OV2680_REG_SOFT_RESET 0x0103
+
+#define OV2680_REG_CHIP_ID_HIGH 0x300a
+#define OV2680_REG_CHIP_ID_LOW 0x300b
+
+#define OV2680_REG_R_MANUAL 0x3503
+#define OV2680_REG_GAIN_PK 0x350a
+#define OV2680_REG_EXPOSURE_PK_HIGH 0x3500
+#define OV2680_REG_TIMING_HTS 0x380c
+#define OV2680_REG_TIMING_VTS 0x380e
+#define OV2680_REG_FORMAT1 0x3820
+#define OV2680_REG_FORMAT2 0x3821
+
+#define OV2680_REG_ISP_CTRL00 0x5080
+
+#define OV2680_FRAME_RATE 30
+
+#define OV2680_REG_VALUE_8BIT 1
+#define OV2680_REG_VALUE_16BIT 2
+#define OV2680_REG_VALUE_24BIT 3
+
+#define OV2680_WIDTH_MAX 1600
+#define OV2680_HEIGHT_MAX 1200
+
+enum ov2680_mode_id {
+ OV2680_MODE_QUXGA_800_600,
+ OV2680_MODE_720P_1280_720,
+ OV2680_MODE_UXGA_1600_1200,
+ OV2680_MODE_MAX,
+};
+
+struct reg_value {
+ u16 reg_addr;
+ u8 val;
+};
+
+static const char * const ov2680_supply_name[] = {
+ "DOVDD",
+ "DVDD",
+ "AVDD",
+};
+
+#define OV2680_NUM_SUPPLIES ARRAY_SIZE(ov2680_supply_name)
+
+struct ov2680_mode_info {
+ const char *name;
+ enum ov2680_mode_id id;
+ u32 width;
+ u32 height;
+ const struct reg_value *reg_data;
+ u32 reg_data_size;
+};
+
+struct ov2680_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *exposure;
+ };
+ struct {
+ struct v4l2_ctrl *auto_gain;
+ struct v4l2_ctrl *gain;
+ };
+
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *test_pattern;
+};
+
+struct ov2680_dev {
+ struct i2c_client *i2c_client;
+ struct v4l2_subdev sd;
+
+ struct media_pad pad;
+ struct clk *xvclk;
+ u32 xvclk_freq;
+ struct regulator_bulk_data supplies[OV2680_NUM_SUPPLIES];
+
+ struct gpio_desc *reset_gpio;
+ struct mutex lock; /* protect members */
+
+ bool mode_pending_changes;
+ bool is_enabled;
+ bool is_streaming;
+
+ struct ov2680_ctrls ctrls;
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_fract frame_interval;
+
+ const struct ov2680_mode_info *current_mode;
+};
+
+static const char * const test_pattern_menu[] = {
+ "Disabled",
+ "Color Bars",
+ "Random Data",
+ "Square",
+ "Black Image",
+};
+
+static const int ov2680_hv_flip_bayer_order[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+};
+
+static const struct reg_value ov2680_setting_30fps_QUXGA_800_600[] = {
+ {0x3086, 0x01}, {0x370a, 0x23}, {0x3808, 0x03}, {0x3809, 0x20},
+ {0x380a, 0x02}, {0x380b, 0x58}, {0x380c, 0x06}, {0x380d, 0xac},
+ {0x380e, 0x02}, {0x380f, 0x84}, {0x3811, 0x04}, {0x3813, 0x04},
+ {0x3814, 0x31}, {0x3815, 0x31}, {0x3820, 0xc0}, {0x4008, 0x00},
+ {0x4009, 0x03}, {0x4837, 0x1e}, {0x3501, 0x4e}, {0x3502, 0xe0},
+};
+
+static const struct reg_value ov2680_setting_30fps_720P_1280_720[] = {
+ {0x3086, 0x00}, {0x3808, 0x05}, {0x3809, 0x00}, {0x380a, 0x02},
+ {0x380b, 0xd0}, {0x380c, 0x06}, {0x380d, 0xa8}, {0x380e, 0x05},
+ {0x380f, 0x0e}, {0x3811, 0x08}, {0x3813, 0x06}, {0x3814, 0x11},
+ {0x3815, 0x11}, {0x3820, 0xc0}, {0x4008, 0x00},
+};
+
+static const struct reg_value ov2680_setting_30fps_UXGA_1600_1200[] = {
+ {0x3086, 0x00}, {0x3501, 0x4e}, {0x3502, 0xe0}, {0x3808, 0x06},
+ {0x3809, 0x40}, {0x380a, 0x04}, {0x380b, 0xb0}, {0x380c, 0x06},
+ {0x380d, 0xa8}, {0x380e, 0x05}, {0x380f, 0x0e}, {0x3811, 0x00},
+ {0x3813, 0x00}, {0x3814, 0x11}, {0x3815, 0x11}, {0x3820, 0xc0},
+ {0x4008, 0x00}, {0x4837, 0x18}
+};
+
+static const struct ov2680_mode_info ov2680_mode_init_data = {
+ "mode_quxga_800_600", OV2680_MODE_QUXGA_800_600, 800, 600,
+ ov2680_setting_30fps_QUXGA_800_600,
+ ARRAY_SIZE(ov2680_setting_30fps_QUXGA_800_600),
+};
+
+static const struct ov2680_mode_info ov2680_mode_data[OV2680_MODE_MAX] = {
+ {"mode_quxga_800_600", OV2680_MODE_QUXGA_800_600,
+ 800, 600, ov2680_setting_30fps_QUXGA_800_600,
+ ARRAY_SIZE(ov2680_setting_30fps_QUXGA_800_600)},
+ {"mode_720p_1280_720", OV2680_MODE_720P_1280_720,
+ 1280, 720, ov2680_setting_30fps_720P_1280_720,
+ ARRAY_SIZE(ov2680_setting_30fps_720P_1280_720)},
+ {"mode_uxga_1600_1200", OV2680_MODE_UXGA_1600_1200,
+ 1600, 1200, ov2680_setting_30fps_UXGA_1600_1200,
+ ARRAY_SIZE(ov2680_setting_30fps_UXGA_1600_1200)},
+};
+
+static struct ov2680_dev *to_ov2680_dev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov2680_dev, sd);
+}
+
+static struct device *ov2680_to_dev(struct ov2680_dev *sensor)
+{
+ return &sensor->i2c_client->dev;
+}
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct ov2680_dev,
+ ctrls.handler)->sd;
+}
+
+static int __ov2680_write_reg(struct ov2680_dev *sensor, u16 reg,
+ unsigned int len, u32 val)
+{
+ struct i2c_client *client = sensor->i2c_client;
+ u8 buf[6];
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2) {
+ dev_err(&client->dev, "write error: reg=0x%4x: %d\n", reg, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+#define ov2680_write_reg(s, r, v) \
+ __ov2680_write_reg(s, r, OV2680_REG_VALUE_8BIT, v)
+
+#define ov2680_write_reg16(s, r, v) \
+ __ov2680_write_reg(s, r, OV2680_REG_VALUE_16BIT, v)
+
+#define ov2680_write_reg24(s, r, v) \
+ __ov2680_write_reg(s, r, OV2680_REG_VALUE_24BIT, v)
+
+static int __ov2680_read_reg(struct ov2680_dev *sensor, u16 reg,
+ unsigned int len, u32 *val)
+{
+ struct i2c_client *client = sensor->i2c_client;
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2] = { reg >> 8, reg & 0xff };
+ u8 data_buf[4] = { 0, };
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "read error: reg=0x%4x: %d\n", reg, ret);
+ return -EIO;
+ }
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+#define ov2680_read_reg(s, r, v) \
+ __ov2680_read_reg(s, r, OV2680_REG_VALUE_8BIT, v)
+
+#define ov2680_read_reg16(s, r, v) \
+ __ov2680_read_reg(s, r, OV2680_REG_VALUE_16BIT, v)
+
+#define ov2680_read_reg24(s, r, v) \
+ __ov2680_read_reg(s, r, OV2680_REG_VALUE_24BIT, v)
+
+static int ov2680_mod_reg(struct ov2680_dev *sensor, u16 reg, u8 mask, u8 val)
+{
+ u32 readval;
+ int ret;
+
+ ret = ov2680_read_reg(sensor, reg, &readval);
+ if (ret < 0)
+ return ret;
+
+ readval &= ~mask;
+ val &= mask;
+ val |= readval;
+
+ return ov2680_write_reg(sensor, reg, val);
+}
+
+static int ov2680_load_regs(struct ov2680_dev *sensor,
+ const struct ov2680_mode_info *mode)
+{
+ const struct reg_value *regs = mode->reg_data;
+ unsigned int i;
+ int ret = 0;
+ u16 reg_addr;
+ u8 val;
+
+ for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
+ reg_addr = regs->reg_addr;
+ val = regs->val;
+
+ ret = ov2680_write_reg(sensor, reg_addr, val);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void ov2680_power_up(struct ov2680_dev *sensor)
+{
+ if (!sensor->reset_gpio)
+ return;
+
+ gpiod_set_value(sensor->reset_gpio, 0);
+ usleep_range(5000, 10000);
+}
+
+static void ov2680_power_down(struct ov2680_dev *sensor)
+{
+ if (!sensor->reset_gpio)
+ return;
+
+ gpiod_set_value(sensor->reset_gpio, 1);
+ usleep_range(5000, 10000);
+}
+
+static int ov2680_bayer_order(struct ov2680_dev *sensor)
+{
+ u32 format1;
+ u32 format2;
+ u32 hv_flip;
+ int ret;
+
+ ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT1, &format1);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT2, &format2);
+ if (ret < 0)
+ return ret;
+
+ hv_flip = (format2 & BIT(2) << 1) | (format1 & BIT(2));
+
+ sensor->fmt.code = ov2680_hv_flip_bayer_order[hv_flip];
+
+ return 0;
+}
+
+static int ov2680_vflip_enable(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(2));
+ if (ret < 0)
+ return ret;
+
+ return ov2680_bayer_order(sensor);
+}
+
+static int ov2680_vflip_disable(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(0));
+ if (ret < 0)
+ return ret;
+
+ return ov2680_bayer_order(sensor);
+}
+
+static int ov2680_hflip_enable(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(2));
+ if (ret < 0)
+ return ret;
+
+ return ov2680_bayer_order(sensor);
+}
+
+static int ov2680_hflip_disable(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(0));
+ if (ret < 0)
+ return ret;
+
+ return ov2680_bayer_order(sensor);
+}
+
+static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
+{
+ int ret;
+
+ if (!value)
+ return ov2680_mod_reg(sensor, OV2680_REG_ISP_CTRL00, BIT(7), 0);
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_ISP_CTRL00, 0x03, value - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_ISP_CTRL00, BIT(7), BIT(7));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ov2680_gain_set(struct ov2680_dev *sensor, bool auto_gain)
+{
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ u32 gain;
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(1),
+ auto_gain ? 0 : BIT(1));
+ if (ret < 0)
+ return ret;
+
+ if (auto_gain || !ctrls->gain->is_new)
+ return 0;
+
+ gain = ctrls->gain->val;
+
+ ret = ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain);
+
+ return 0;
+}
+
+static int ov2680_gain_get(struct ov2680_dev *sensor)
+{
+ u32 gain;
+ int ret;
+
+ ret = ov2680_read_reg16(sensor, OV2680_REG_GAIN_PK, &gain);
+ if (ret)
+ return ret;
+
+ return gain;
+}
+
+static int ov2680_exposure_set(struct ov2680_dev *sensor, bool auto_exp)
+{
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ u32 exp;
+ int ret;
+
+ ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(0),
+ auto_exp ? 0 : BIT(0));
+ if (ret < 0)
+ return ret;
+
+ if (auto_exp || !ctrls->exposure->is_new)
+ return 0;
+
+ exp = (u32)ctrls->exposure->val;
+ exp <<= 4;
+
+ return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, exp);
+}
+
+static int ov2680_exposure_get(struct ov2680_dev *sensor)
+{
+ int ret;
+ u32 exp;
+
+ ret = ov2680_read_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, &exp);
+ if (ret)
+ return ret;
+
+ return exp >> 4;
+}
+
+static int ov2680_stream_enable(struct ov2680_dev *sensor)
+{
+ return ov2680_write_reg(sensor, OV2680_REG_STREAM_CTRL, 1);
+}
+
+static int ov2680_stream_disable(struct ov2680_dev *sensor)
+{
+ return ov2680_write_reg(sensor, OV2680_REG_STREAM_CTRL, 0);
+}
+
+static int ov2680_mode_set(struct ov2680_dev *sensor)
+{
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ int ret;
+
+ ret = ov2680_gain_set(sensor, false);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_exposure_set(sensor, false);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_load_regs(sensor, sensor->current_mode);
+ if (ret < 0)
+ return ret;
+
+ if (ctrls->auto_gain->val) {
+ ret = ov2680_gain_set(sensor, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls->auto_exp->val == V4L2_EXPOSURE_AUTO) {
+ ret = ov2680_exposure_set(sensor, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ sensor->mode_pending_changes = false;
+
+ return 0;
+}
+
+static int ov2680_mode_restore(struct ov2680_dev *sensor)
+{
+ int ret;
+
+ ret = ov2680_load_regs(sensor, &ov2680_mode_init_data);
+ if (ret < 0)
+ return ret;
+
+ return ov2680_mode_set(sensor);
+}
+
+static int ov2680_power_off(struct ov2680_dev *sensor)
+{
+ if (!sensor->is_enabled)
+ return 0;
+
+ clk_disable_unprepare(sensor->xvclk);
+ ov2680_power_down(sensor);
+ regulator_bulk_disable(OV2680_NUM_SUPPLIES, sensor->supplies);
+ sensor->is_enabled = false;
+
+ return 0;
+}
+
+static int ov2680_power_on(struct ov2680_dev *sensor)
+{
+ struct device *dev = ov2680_to_dev(sensor);
+ int ret;
+
+ if (sensor->is_enabled)
+ return 0;
+
+ ret = regulator_bulk_enable(OV2680_NUM_SUPPLIES, sensor->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ if (!sensor->reset_gpio) {
+ ret = ov2680_write_reg(sensor, OV2680_REG_SOFT_RESET, 0x01);
+ if (ret != 0) {
+ dev_err(dev, "sensor soft reset failed\n");
+ return ret;
+ }
+ usleep_range(1000, 2000);
+ } else {
+ ov2680_power_down(sensor);
+ ov2680_power_up(sensor);
+ }
+
+ ret = clk_prepare_enable(sensor->xvclk);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_mode_restore(sensor);
+ if (ret < 0)
+ goto disable;
+
+ sensor->is_enabled = true;
+
+ /* Set clock lane into LP-11 state */
+ ov2680_stream_enable(sensor);
+ usleep_range(1000, 2000);
+ ov2680_stream_disable(sensor);
+
+ return 0;
+
+disable:
+ dev_err(dev, "failed to enable sensor: %d\n", ret);
+ ov2680_power_off(sensor);
+
+ return ret;
+}
+
+static int ov2680_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ int ret = 0;
+
+ mutex_lock(&sensor->lock);
+
+ if (on)
+ ret = ov2680_power_on(sensor);
+ else
+ ret = ov2680_power_off(sensor);
+
+ mutex_unlock(&sensor->lock);
+
+ if (on && ret == 0) {
+ ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ov2680_s_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+
+ mutex_lock(&sensor->lock);
+ fi->interval = sensor->frame_interval;
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+}
+
+static int ov2680_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ int ret = 0;
+
+ mutex_lock(&sensor->lock);
+
+ if (sensor->is_streaming == !!enable)
+ goto unlock;
+
+ if (enable && sensor->mode_pending_changes) {
+ ret = ov2680_mode_set(sensor);
+ if (ret < 0)
+ goto unlock;
+ }
+
+ if (enable)
+ ret = ov2680_stream_enable(sensor);
+ else
+ ret = ov2680_stream_disable(sensor);
+
+ sensor->is_streaming = !!enable;
+
+unlock:
+ mutex_unlock(&sensor->lock);
+
+ return ret;
+}
+
+static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+
+ if (code->pad != 0 || code->index != 0)
+ return -EINVAL;
+
+ code->code = sensor->fmt.code;
+
+ return 0;
+}
+
+static int ov2680_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct v4l2_mbus_framefmt *fmt = NULL;
+ int ret = 0;
+
+ if (format->pad != 0)
+ return -EINVAL;
+
+ mutex_lock(&sensor->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ fmt = v4l2_subdev_get_try_format(&sensor->sd, cfg, format->pad);
+#else
+ ret = -ENOTTY;
+#endif
+ } else {
+ fmt = &sensor->fmt;
+ }
+
+ if (fmt)
+ format->format = *fmt;
+
+ mutex_unlock(&sensor->lock);
+
+ return ret;
+}
+
+static int ov2680_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *try_fmt;
+#endif
+ const struct ov2680_mode_info *mode;
+ int ret = 0;
+
+ if (format->pad != 0)
+ return -EINVAL;
+
+ mutex_lock(&sensor->lock);
+
+ if (sensor->is_streaming) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ mode = v4l2_find_nearest_size(ov2680_mode_data,
+ ARRAY_SIZE(ov2680_mode_data), width,
+ height, fmt->width, fmt->height);
+ if (!mode) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ try_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *try_fmt;
+#else
+ ret = -ENOTTY;
+#endif
+
+ goto unlock;
+ }
+
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = sensor->fmt.code;
+ fmt->colorspace = sensor->fmt.colorspace;
+
+ sensor->current_mode = mode;
+ sensor->fmt = format->format;
+ sensor->mode_pending_changes = true;
+
+unlock:
+ mutex_unlock(&sensor->lock);
+
+ return ret;
+}
+
+static int ov2680_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = {
+ .width = 800,
+ .height = 600,
+ }
+ };
+
+ return ov2680_set_fmt(sd, cfg, &fmt);
+}
+
+static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= OV2680_MODE_MAX || index < 0)
+ return -EINVAL;
+
+ fse->min_width = ov2680_mode_data[index].width;
+ fse->min_height = ov2680_mode_data[index].height;
+ fse->max_width = ov2680_mode_data[index].width;
+ fse->max_height = ov2680_mode_data[index].height;
+
+ return 0;
+}
+
+static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct v4l2_fract tpf;
+
+ if (fie->index >= OV2680_MODE_MAX || fie->width > OV2680_WIDTH_MAX ||
+ fie->height > OV2680_HEIGHT_MAX ||
+ fie->which > V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ tpf.denominator = OV2680_FRAME_RATE;
+ tpf.numerator = 1;
+
+ fie->interval = tpf;
+
+ return 0;
+}
+
+static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ int val;
+
+ if (!sensor->is_enabled)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ val = ov2680_gain_get(sensor);
+ if (val < 0)
+ return val;
+ ctrls->gain->val = val;
+ break;
+ case V4L2_CID_EXPOSURE:
+ val = ov2680_exposure_get(sensor);
+ if (val < 0)
+ return val;
+ ctrls->exposure->val = val;
+ break;
+ }
+
+ return 0;
+}
+
+static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+
+ if (!sensor->is_enabled)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ return ov2680_gain_set(sensor, !!ctrl->val);
+ case V4L2_CID_GAIN:
+ return ov2680_gain_set(sensor, !!ctrls->auto_gain->val);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return ov2680_exposure_set(sensor, !!ctrl->val);
+ case V4L2_CID_EXPOSURE:
+ return ov2680_exposure_set(sensor, !!ctrls->auto_exp->val);
+ case V4L2_CID_VFLIP:
+ if (sensor->is_streaming)
+ return -EBUSY;
+ if (ctrl->val)
+ return ov2680_vflip_enable(sensor);
+ else
+ return ov2680_vflip_disable(sensor);
+ case V4L2_CID_HFLIP:
+ if (sensor->is_streaming)
+ return -EBUSY;
+ if (ctrl->val)
+ return ov2680_hflip_enable(sensor);
+ else
+ return ov2680_hflip_disable(sensor);
+ case V4L2_CID_TEST_PATTERN:
+ return ov2680_test_pattern_set(sensor, ctrl->val);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ov2680_ctrl_ops = {
+ .g_volatile_ctrl = ov2680_g_volatile_ctrl,
+ .s_ctrl = ov2680_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops ov2680_core_ops = {
+ .s_power = ov2680_s_power,
+};
+
+static const struct v4l2_subdev_video_ops ov2680_video_ops = {
+ .g_frame_interval = ov2680_s_g_frame_interval,
+ .s_frame_interval = ov2680_s_g_frame_interval,
+ .s_stream = ov2680_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
+ .init_cfg = ov2680_init_cfg,
+ .enum_mbus_code = ov2680_enum_mbus_code,
+ .get_fmt = ov2680_get_fmt,
+ .set_fmt = ov2680_set_fmt,
+ .enum_frame_size = ov2680_enum_frame_size,
+ .enum_frame_interval = ov2680_enum_frame_interval,
+};
+
+static const struct v4l2_subdev_ops ov2680_subdev_ops = {
+ .core = &ov2680_core_ops,
+ .video = &ov2680_video_ops,
+ .pad = &ov2680_pad_ops,
+};
+
+static int ov2680_mode_init(struct ov2680_dev *sensor)
+{
+ const struct ov2680_mode_info *init_mode;
+
+ /* set initial mode */
+ sensor->fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ sensor->fmt.width = 800;
+ sensor->fmt.height = 600;
+ sensor->fmt.field = V4L2_FIELD_NONE;
+ sensor->fmt.colorspace = V4L2_COLORSPACE_SRGB;
+
+ sensor->frame_interval.denominator = OV2680_FRAME_RATE;
+ sensor->frame_interval.numerator = 1;
+
+ init_mode = &ov2680_mode_init_data;
+
+ sensor->current_mode = init_mode;
+
+ sensor->mode_pending_changes = true;
+
+ return 0;
+}
+
+static int ov2680_v4l2_init(struct ov2680_dev *sensor)
+{
+ const struct v4l2_ctrl_ops *ops = &ov2680_ctrl_ops;
+ struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret = 0;
+
+ v4l2_i2c_subdev_init(&sensor->sd, sensor->i2c_client,
+ &ov2680_subdev_ops);
+
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+#endif
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
+ if (ret < 0)
+ return ret;
+
+ v4l2_ctrl_handler_init(hdl, 7);
+
+ hdl->lock = &sensor->lock;
+
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ ctrls->test_pattern = v4l2_ctrl_new_std_menu_items(hdl,
+ &ov2680_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ 0, 0, test_pattern_menu);
+
+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ 0, 32767, 1, 0);
+
+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
+ 0, 1, 1, 1);
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 2047, 1, 0);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ goto cleanup_entity;
+ }
+
+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
+
+ sensor->sd.ctrl_handler = hdl;
+
+ ret = v4l2_async_register_subdev(&sensor->sd);
+ if (ret < 0)
+ goto cleanup_entity;
+
+ return 0;
+
+cleanup_entity:
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(hdl);
+
+ return ret;
+}
+
+static int ov2680_get_regulators(struct ov2680_dev *sensor)
+{
+ int i;
+
+ for (i = 0; i < OV2680_NUM_SUPPLIES; i++)
+ sensor->supplies[i].supply = ov2680_supply_name[i];
+
+ return devm_regulator_bulk_get(&sensor->i2c_client->dev,
+ OV2680_NUM_SUPPLIES,
+ sensor->supplies);
+}
+
+static int ov2680_check_id(struct ov2680_dev *sensor)
+{
+ struct device *dev = ov2680_to_dev(sensor);
+ u32 chip_id;
+ int ret;
+
+ ov2680_power_on(sensor);
+
+ ret = ov2680_read_reg16(sensor, OV2680_REG_CHIP_ID_HIGH, &chip_id);
+ if (ret < 0) {
+ dev_err(dev, "failed to read chip id high\n");
+ return -ENODEV;
+ }
+
+ if (chip_id != OV2680_CHIP_ID) {
+ dev_err(dev, "chip id: 0x%04x does not match expected 0x%04x\n",
+ chip_id, OV2680_CHIP_ID);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int ov2860_parse_dt(struct ov2680_dev *sensor)
+{
+ struct device *dev = ov2680_to_dev(sensor);
+ int ret;
+
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(sensor->reset_gpio);
+ if (ret < 0) {
+ dev_dbg(dev, "error while getting reset gpio: %d\n", ret);
+ return ret;
+ }
+
+ sensor->xvclk = devm_clk_get(dev, "xvclk");
+ if (IS_ERR(sensor->xvclk)) {
+ dev_err(dev, "xvclk clock missing or invalid\n");
+ return PTR_ERR(sensor->xvclk);
+ }
+
+ sensor->xvclk_freq = clk_get_rate(sensor->xvclk);
+ if (sensor->xvclk_freq != OV2680_XVCLK_VALUE) {
+ dev_err(dev, "wrong xvclk frequency %d HZ, expected: %d Hz\n",
+ sensor->xvclk_freq, OV2680_XVCLK_VALUE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ov2680_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ov2680_dev *sensor;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->i2c_client = client;
+
+ ret = ov2860_parse_dt(sensor);
+ if (ret < 0)
+ return -EINVAL;
+
+ ret = ov2680_mode_init(sensor);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2680_get_regulators(sensor);
+ if (ret < 0) {
+ dev_err(dev, "failed to get regulators\n");
+ return ret;
+ }
+
+ mutex_init(&sensor->lock);
+
+ ret = ov2680_v4l2_init(sensor);
+ if (ret < 0)
+ goto lock_destroy;
+
+ ret = ov2680_check_id(sensor);
+ if (ret < 0)
+ goto error_cleanup;
+
+ dev_info(dev, "ov2680 init correctly\n");
+
+ return 0;
+
+error_cleanup:
+ dev_err(dev, "ov2680 init fail: %d\n", ret);
+
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_async_unregister_subdev(&sensor->sd);
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+
+lock_destroy:
+ mutex_destroy(&sensor->lock);
+
+ return ret;
+}
+
+static int ov2680_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+
+ v4l2_async_unregister_subdev(&sensor->sd);
+ mutex_destroy(&sensor->lock);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+
+ return 0;
+}
+
+static int __maybe_unused ov2680_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+
+ if (sensor->is_streaming)
+ ov2680_stream_disable(sensor);
+
+ return 0;
+}
+
+static int __maybe_unused ov2680_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ int ret;
+
+ if (sensor->is_streaming) {
+ ret = ov2680_stream_enable(sensor);
+ if (ret < 0)
+ goto stream_disable;
+ }
+
+ return 0;
+
+stream_disable:
+ ov2680_stream_disable(sensor);
+ sensor->is_streaming = false;
+
+ return ret;
+}
+
+static const struct dev_pm_ops ov2680_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov2680_suspend, ov2680_resume)
+};
+
+static const struct of_device_id ov2680_dt_ids[] = {
+ { .compatible = "ovti,ov2680" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ov2680_dt_ids);
+
+static struct i2c_driver ov2680_i2c_driver = {
+ .driver = {
+ .name = "ov2680",
+ .pm = &ov2680_pm_ops,
+ .of_match_table = of_match_ptr(ov2680_dt_ids),
+ },
+ .probe_new = ov2680_probe,
+ .remove = ov2680_remove,
+};
+module_i2c_driver(ov2680_i2c_driver);
+
+MODULE_AUTHOR("Rui Miguel Silva <rui.silva@linaro.org>");
+MODULE_DESCRIPTION("OV2680 CMOS Image Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index f6e40cc9745c..071f4bc240ca 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -30,7 +30,7 @@
/* min/typical/max system clock (xclk) frequencies */
#define OV5640_XCLK_MIN 6000000
-#define OV5640_XCLK_MAX 24000000
+#define OV5640_XCLK_MAX 54000000
#define OV5640_DEFAULT_SLAVE_ID 0x3c
@@ -64,6 +64,7 @@
#define OV5640_REG_TIMING_DVPVO 0x380a
#define OV5640_REG_TIMING_HTS 0x380c
#define OV5640_REG_TIMING_VTS 0x380e
+#define OV5640_REG_TIMING_TC_REG20 0x3820
#define OV5640_REG_TIMING_TC_REG21 0x3821
#define OV5640_REG_AEC_CTRL00 0x3a00
#define OV5640_REG_AEC_B50_STEP 0x3a08
@@ -199,6 +200,8 @@ struct ov5640_ctrls {
struct v4l2_ctrl *contrast;
struct v4l2_ctrl *hue;
struct v4l2_ctrl *test_pattern;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
};
struct ov5640_dev {
@@ -212,6 +215,7 @@ struct ov5640_dev {
struct regulator_bulk_data supplies[OV5640_NUM_SUPPLIES];
struct gpio_desc *reset_gpio;
struct gpio_desc *pwdn_gpio;
+ bool upside_down;
/* lock to protect all members below */
struct mutex lock;
@@ -341,7 +345,7 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
static const struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
{0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -360,7 +364,7 @@ static const struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
static const struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -379,7 +383,7 @@ static const struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
static const struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
{0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -399,7 +403,7 @@ static const struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
static const struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -418,7 +422,7 @@ static const struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
static const struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
{0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -437,7 +441,7 @@ static const struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
static const struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -456,7 +460,7 @@ static const struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
static const struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
{0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -475,7 +479,7 @@ static const struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
static const struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -494,7 +498,7 @@ static const struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
static const struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
{0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -513,7 +517,7 @@ static const struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
static const struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -532,7 +536,7 @@ static const struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
static const struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
{0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -551,7 +555,7 @@ static const struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
static const struct reg_value ov5640_setting_15fps_PAL_720_576[] = {
{0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
@@ -571,7 +575,7 @@ static const struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
{0x3008, 0x42, 0, 0},
{0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
@@ -591,7 +595,7 @@ static const struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
static const struct reg_value ov5640_setting_15fps_720P_1280_720[] = {
{0x3035, 0x41, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
@@ -611,7 +615,7 @@ static const struct reg_value ov5640_setting_30fps_1080P_1920_1080[] = {
{0x3008, 0x42, 0, 0},
{0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
+ {0x3814, 0x11, 0, 0},
{0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
@@ -644,7 +648,7 @@ static const struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
{0x3008, 0x42, 0, 0},
{0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
+ {0x3814, 0x11, 0, 0},
{0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
@@ -673,10 +677,9 @@ static const struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
};
static const struct reg_value ov5640_setting_15fps_QSXGA_2592_1944[] = {
- {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0},
{0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
+ {0x3814, 0x11, 0, 0},
{0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
{0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
{0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
@@ -1340,6 +1343,27 @@ static int ov5640_binning_on(struct ov5640_dev *sensor)
return temp ? 1 : 0;
}
+static int ov5640_set_binning(struct ov5640_dev *sensor, bool enable)
+{
+ int ret;
+
+ /*
+ * TIMING TC REG21:
+ * - [0]: Horizontal binning enable
+ */
+ ret = ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG21,
+ BIT(0), enable ? BIT(0) : 0);
+ if (ret)
+ return ret;
+ /*
+ * TIMING TC REG20:
+ * - [0]: Undocumented, but hardcoded init sequences
+ * are always setting REG21/REG20 bit 0 to same value...
+ */
+ return ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG20,
+ BIT(0), enable ? BIT(0) : 0);
+}
+
static int ov5640_set_virtual_channel(struct ov5640_dev *sensor)
{
struct i2c_client *client = sensor->i2c_client;
@@ -1389,24 +1413,16 @@ static const struct ov5640_mode_info *
ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
int width, int height, bool nearest)
{
- const struct ov5640_mode_info *mode = NULL;
- int i;
-
- for (i = OV5640_NUM_MODES - 1; i >= 0; i--) {
- mode = &ov5640_mode_data[fr][i];
-
- if (!mode->reg_data)
- continue;
+ const struct ov5640_mode_info *mode;
- if ((nearest && mode->hact <= width &&
- mode->vact <= height) ||
- (!nearest && mode->hact == width &&
- mode->vact == height))
- break;
- }
+ mode = v4l2_find_nearest_size(ov5640_mode_data[fr],
+ ARRAY_SIZE(ov5640_mode_data[fr]),
+ hact, vact,
+ width, height);
- if (nearest && i < 0)
- mode = &ov5640_mode_data[fr][0];
+ if (!mode ||
+ (!nearest && (mode->hact != width || mode->vact != height)))
+ return NULL;
return mode;
}
@@ -1640,6 +1656,9 @@ static int ov5640_set_mode(struct ov5640_dev *sensor,
if (ret < 0)
return ret;
+ ret = ov5640_set_binning(sensor, dn_mode != SCALING);
+ if (ret < 0)
+ return ret;
ret = ov5640_set_ae_target(sensor, sensor->ae_target);
if (ret < 0)
return ret;
@@ -1947,9 +1966,11 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- sensor->current_mode = new_mode;
- sensor->fmt = *mbus_fmt;
- sensor->pending_mode_change = true;
+ if (new_mode != sensor->current_mode) {
+ sensor->current_mode = new_mode;
+ sensor->fmt = *mbus_fmt;
+ sensor->pending_mode_change = true;
+ }
out:
mutex_unlock(&sensor->lock);
return ret;
@@ -2193,6 +2214,43 @@ static int ov5640_set_ctrl_light_freq(struct ov5640_dev *sensor, int value)
BIT(2) : 0);
}
+static int ov5640_set_ctrl_hflip(struct ov5640_dev *sensor, int value)
+{
+ /*
+ * If sensor is mounted upside down, mirror logic is inversed.
+ *
+ * Sensor is a BSI (Back Side Illuminated) one,
+ * so image captured is physically mirrored.
+ * This is why mirror logic is inversed in
+ * order to cancel this mirror effect.
+ */
+
+ /*
+ * TIMING TC REG21:
+ * - [2]: ISP mirror
+ * - [1]: Sensor mirror
+ */
+ return ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG21,
+ BIT(2) | BIT(1),
+ (!(value ^ sensor->upside_down)) ?
+ (BIT(2) | BIT(1)) : 0);
+}
+
+static int ov5640_set_ctrl_vflip(struct ov5640_dev *sensor, int value)
+{
+ /* If sensor is mounted upside down, flip logic is inversed */
+
+ /*
+ * TIMING TC REG20:
+ * - [2]: ISP vflip
+ * - [1]: Sensor vflip
+ */
+ return ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG20,
+ BIT(2) | BIT(1),
+ (value ^ sensor->upside_down) ?
+ (BIT(2) | BIT(1)) : 0);
+}
+
static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
@@ -2264,6 +2322,12 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_POWER_LINE_FREQUENCY:
ret = ov5640_set_ctrl_light_freq(sensor, ctrl->val);
break;
+ case V4L2_CID_HFLIP:
+ ret = ov5640_set_ctrl_hflip(sensor, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = ov5640_set_ctrl_vflip(sensor, ctrl->val);
+ break;
default:
ret = -EINVAL;
break;
@@ -2325,6 +2389,10 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(test_pattern_menu) - 1,
0, 0, test_pattern_menu);
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
ctrls->light_freq =
v4l2_ctrl_new_std_menu(hdl, ops,
@@ -2435,9 +2503,17 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
sensor->current_fr = frame_rate;
sensor->frame_interval = fi->interval;
- sensor->current_mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
- mode->vact, true);
- sensor->pending_mode_change = true;
+ mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
+ mode->vact, true);
+ if (!mode) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (mode != sensor->current_mode) {
+ sensor->current_mode = mode;
+ sensor->pending_mode_change = true;
+ }
out:
mutex_unlock(&sensor->lock);
return ret;
@@ -2558,6 +2634,7 @@ static int ov5640_probe(struct i2c_client *client,
struct fwnode_handle *endpoint;
struct ov5640_dev *sensor;
struct v4l2_mbus_framefmt *fmt;
+ u32 rotation;
int ret;
sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
@@ -2583,6 +2660,22 @@ static int ov5640_probe(struct i2c_client *client,
sensor->ae_target = 52;
+ /* optional indication of physical rotation of sensor */
+ ret = fwnode_property_read_u32(dev_fwnode(&client->dev), "rotation",
+ &rotation);
+ if (!ret) {
+ switch (rotation) {
+ case 180:
+ sensor->upside_down = true;
+ /* fall through */
+ case 0:
+ break;
+ default:
+ dev_warn(dev, "%u degrees rotation is not supported, ignoring...\n",
+ rotation);
+ }
+ }
+
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev),
NULL);
if (!endpoint) {
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index b3f762578f7f..1722cdab0daf 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -510,8 +510,8 @@ static const struct reg_value ov5645_setting_full[] = {
};
static const s64 link_freq[] = {
- 222880000,
- 334320000
+ 224000000,
+ 336000000
};
static const struct ov5645_mode_info ov5645_mode_info_data[] = {
@@ -520,7 +520,7 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = {
.height = 960,
.data = ov5645_setting_sxga,
.data_size = ARRAY_SIZE(ov5645_setting_sxga),
- .pixel_clock = 111440000,
+ .pixel_clock = 112000000,
.link_freq = 0 /* an index in link_freq[] */
},
{
@@ -528,7 +528,7 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = {
.height = 1080,
.data = ov5645_setting_1080p,
.data_size = ARRAY_SIZE(ov5645_setting_1080p),
- .pixel_clock = 167160000,
+ .pixel_clock = 168000000,
.link_freq = 1 /* an index in link_freq[] */
},
{
@@ -536,7 +536,7 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = {
.height = 1944,
.data = ov5645_setting_full,
.data_size = ARRAY_SIZE(ov5645_setting_full),
- .pixel_clock = 167160000,
+ .pixel_clock = 168000000,
.link_freq = 1 /* an index in link_freq[] */
},
};
@@ -1145,7 +1145,8 @@ static int ov5645_probe(struct i2c_client *client,
return ret;
}
- if (xclk_freq != 23880000) {
+ /* external clock must be 24MHz, allow 1% tolerance */
+ if (xclk_freq < 23760000 || xclk_freq > 24240000) {
dev_err(dev, "external clock frequency %u is not supported\n",
xclk_freq);
return -EINVAL;
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 3474ef832c1e..31bf577b0bd3 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1744,14 +1744,12 @@ static int ov7670_parse_dt(struct device *dev,
return -EINVAL;
ret = v4l2_fwnode_endpoint_parse(ep, &bus_cfg);
- if (ret) {
- fwnode_handle_put(ep);
+ fwnode_handle_put(ep);
+ if (ret)
return ret;
- }
if (bus_cfg.bus_type != V4L2_MBUS_PARALLEL) {
dev_err(dev, "Unsupported media bus type\n");
- fwnode_handle_put(ep);
return ret;
}
info->mbus_config = bus_cfg.bus.parallel.flags;
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index e2550708abc8..7158c31d8403 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -419,11 +419,18 @@ struct ov772x_priv {
struct gpio_desc *rstb_gpio;
const struct ov772x_color_format *cfmt;
const struct ov772x_win_size *win;
- unsigned short flag_vflip:1;
- unsigned short flag_hflip:1;
+ struct v4l2_ctrl *vflip_ctrl;
+ struct v4l2_ctrl *hflip_ctrl;
/* band_filter = COM8[5] ? 256 - BDBASE : 0 */
- unsigned short band_filter;
+ struct v4l2_ctrl *band_filter_ctrl;
unsigned int fps;
+ /* lock to protect power_count and streaming */
+ struct mutex lock;
+ int power_count;
+ int streaming;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_pad pad;
+#endif
};
/*
@@ -542,9 +549,19 @@ static struct ov772x_priv *to_ov772x(struct v4l2_subdev *sd)
return container_of(sd, struct ov772x_priv, subdev);
}
-static inline int ov772x_read(struct i2c_client *client, u8 addr)
+static int ov772x_read(struct i2c_client *client, u8 addr)
{
- return i2c_smbus_read_byte_data(client, addr);
+ int ret;
+ u8 val;
+
+ ret = i2c_master_send(client, &addr, 1);
+ if (ret < 0)
+ return ret;
+ ret = i2c_master_recv(client, &val, 1);
+ if (ret < 0)
+ return ret;
+
+ return val;
}
static inline int ov772x_write(struct i2c_client *client, u8 addr, u8 value)
@@ -587,39 +604,40 @@ static int ov772x_s_stream(struct v4l2_subdev *sd, int enable)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(sd);
+ int ret = 0;
- if (!enable) {
- ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE);
- return 0;
- }
+ mutex_lock(&priv->lock);
- ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0);
+ if (priv->streaming == enable)
+ goto done;
- dev_dbg(&client->dev, "format %d, win %s\n",
- priv->cfmt->code, priv->win->name);
+ ret = ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE,
+ enable ? 0 : SOFT_SLEEP_MODE);
+ if (ret)
+ goto done;
- return 0;
+ if (enable) {
+ dev_dbg(&client->dev, "format %d, win %s\n",
+ priv->cfmt->code, priv->win->name);
+ }
+ priv->streaming = enable;
+
+done:
+ mutex_unlock(&priv->lock);
+
+ return ret;
}
-static int ov772x_set_frame_rate(struct ov772x_priv *priv,
- struct v4l2_fract *tpf,
- const struct ov772x_color_format *cfmt,
- const struct ov772x_win_size *win)
+static unsigned int ov772x_select_fps(struct ov772x_priv *priv,
+ struct v4l2_fract *tpf)
{
- struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
- unsigned long fin = clk_get_rate(priv->clk);
unsigned int fps = tpf->numerator ?
tpf->denominator / tpf->numerator :
tpf->denominator;
unsigned int best_diff;
- unsigned int fsize;
- unsigned int pclk;
unsigned int diff;
unsigned int idx;
unsigned int i;
- u8 clkrc = 0;
- u8 com4 = 0;
- int ret;
/* Approximate to the closest supported frame interval. */
best_diff = ~0L;
@@ -630,7 +648,25 @@ static int ov772x_set_frame_rate(struct ov772x_priv *priv,
best_diff = diff;
}
}
- fps = ov772x_frame_intervals[idx];
+
+ return ov772x_frame_intervals[idx];
+}
+
+static int ov772x_set_frame_rate(struct ov772x_priv *priv,
+ unsigned int fps,
+ const struct ov772x_color_format *cfmt,
+ const struct ov772x_win_size *win)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
+ unsigned long fin = clk_get_rate(priv->clk);
+ unsigned int best_diff;
+ unsigned int fsize;
+ unsigned int pclk;
+ unsigned int diff;
+ unsigned int i;
+ u8 clkrc = 0;
+ u8 com4 = 0;
+ int ret;
/* Use image size (with blankings) to calculate desired pixel clock. */
switch (cfmt->com7 & OFMT_MASK) {
@@ -695,10 +731,6 @@ static int ov772x_set_frame_rate(struct ov772x_priv *priv,
if (ret < 0)
return ret;
- tpf->numerator = 1;
- tpf->denominator = fps;
- priv->fps = tpf->denominator;
-
return 0;
}
@@ -719,8 +751,37 @@ static int ov772x_s_frame_interval(struct v4l2_subdev *sd,
{
struct ov772x_priv *priv = to_ov772x(sd);
struct v4l2_fract *tpf = &ival->interval;
+ unsigned int fps;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
- return ov772x_set_frame_rate(priv, tpf, priv->cfmt, priv->win);
+ if (priv->streaming) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ fps = ov772x_select_fps(priv, tpf);
+
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any changes to H/W at this time. Instead
+ * the frame rate will be restored right after power-up.
+ */
+ if (priv->power_count > 0) {
+ ret = ov772x_set_frame_rate(priv, fps, priv->cfmt, priv->win);
+ if (ret)
+ goto error;
+ }
+
+ tpf->numerator = 1;
+ tpf->denominator = fps;
+ priv->fps = fps;
+
+error:
+ mutex_unlock(&priv->lock);
+
+ return ret;
}
static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -732,17 +793,25 @@ static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl)
int ret = 0;
u8 val;
+ /* v4l2_ctrl_lock() locks our own mutex */
+
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any controls to H/W at this time. Instead
+ * the controls will be restored right after power-up.
+ */
+ if (priv->power_count == 0)
+ return 0;
+
switch (ctrl->id) {
case V4L2_CID_VFLIP:
val = ctrl->val ? VFLIP_IMG : 0x00;
- priv->flag_vflip = ctrl->val;
- if (priv->info->flags & OV772X_FLAG_VFLIP)
+ if (priv->info && (priv->info->flags & OV772X_FLAG_VFLIP))
val ^= VFLIP_IMG;
return ov772x_mask_set(client, COM3, VFLIP_IMG, val);
case V4L2_CID_HFLIP:
val = ctrl->val ? HFLIP_IMG : 0x00;
- priv->flag_hflip = ctrl->val;
- if (priv->info->flags & OV772X_FLAG_HFLIP)
+ if (priv->info && (priv->info->flags & OV772X_FLAG_HFLIP))
val ^= HFLIP_IMG;
return ov772x_mask_set(client, COM3, HFLIP_IMG, val);
case V4L2_CID_BAND_STOP_FILTER:
@@ -761,8 +830,7 @@ static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl)
ret = ov772x_mask_set(client, BDBASE,
0xff, val);
}
- if (!ret)
- priv->band_filter = ctrl->val;
+
return ret;
}
@@ -824,10 +892,10 @@ static int ov772x_power_on(struct ov772x_priv *priv)
* available to handle this cleanly, request the GPIO temporarily
* to avoid conflicts.
*/
- priv->rstb_gpio = gpiod_get_optional(&client->dev, "rstb",
+ priv->rstb_gpio = gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(priv->rstb_gpio)) {
- dev_info(&client->dev, "Unable to get GPIO \"rstb\"");
+ dev_info(&client->dev, "Unable to get GPIO \"reset\"");
return PTR_ERR(priv->rstb_gpio);
}
@@ -855,12 +923,45 @@ static int ov772x_power_off(struct ov772x_priv *priv)
return 0;
}
+static int ov772x_set_params(struct ov772x_priv *priv,
+ const struct ov772x_color_format *cfmt,
+ const struct ov772x_win_size *win);
+
static int ov772x_s_power(struct v4l2_subdev *sd, int on)
{
struct ov772x_priv *priv = to_ov772x(sd);
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (priv->power_count == !on) {
+ if (on) {
+ ret = ov772x_power_on(priv);
+ /*
+ * Restore the format, the frame rate, and
+ * the controls
+ */
+ if (!ret)
+ ret = ov772x_set_params(priv, priv->cfmt,
+ priv->win);
+ } else {
+ ret = ov772x_power_off(priv);
+ }
+ }
- return on ? ov772x_power_on(priv) :
- ov772x_power_off(priv);
+ if (!ret) {
+ /* Update the power count. */
+ priv->power_count += on ? 1 : -1;
+ WARN(priv->power_count < 0, "Unbalanced power count\n");
+ WARN(priv->power_count > 1, "Duplicated s_power call\n");
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
}
static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height)
@@ -901,19 +1002,14 @@ static void ov772x_select_params(const struct v4l2_mbus_framefmt *mf,
*win = ov772x_select_win(mf->width, mf->height);
}
-static int ov772x_set_params(struct ov772x_priv *priv,
- const struct ov772x_color_format *cfmt,
- const struct ov772x_win_size *win)
+static int ov772x_edgectrl(struct ov772x_priv *priv)
{
struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
- struct v4l2_fract tpf;
int ret;
- u8 val;
- /* Reset hardware. */
- ov772x_reset(client);
+ if (!priv->info)
+ return 0;
- /* Edge Ctrl. */
if (priv->info->edgectrl.strength & OV772X_MANUAL_EDGE_CTRL) {
/*
* Manual Edge Control Mode.
@@ -924,19 +1020,19 @@ static int ov772x_set_params(struct ov772x_priv *priv,
ret = ov772x_mask_set(client, DSPAUTO, EDGE_ACTRL, 0x00);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
ret = ov772x_mask_set(client,
EDGE_TRSHLD, OV772X_EDGE_THRESHOLD_MASK,
priv->info->edgectrl.threshold);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
ret = ov772x_mask_set(client,
EDGE_STRNGT, OV772X_EDGE_STRENGTH_MASK,
priv->info->edgectrl.strength);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
} else if (priv->info->edgectrl.upper > priv->info->edgectrl.lower) {
/*
@@ -948,15 +1044,34 @@ static int ov772x_set_params(struct ov772x_priv *priv,
EDGE_UPPER, OV772X_EDGE_UPPER_MASK,
priv->info->edgectrl.upper);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
ret = ov772x_mask_set(client,
EDGE_LOWER, OV772X_EDGE_LOWER_MASK,
priv->info->edgectrl.lower);
if (ret < 0)
- goto ov772x_set_fmt_error;
+ return ret;
}
+ return 0;
+}
+
+static int ov772x_set_params(struct ov772x_priv *priv,
+ const struct ov772x_color_format *cfmt,
+ const struct ov772x_win_size *win)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
+ int ret;
+ u8 val;
+
+ /* Reset hardware. */
+ ov772x_reset(client);
+
+ /* Edge Ctrl. */
+ ret = ov772x_edgectrl(priv);
+ if (ret < 0)
+ return ret;
+
/* Format and window size. */
ret = ov772x_write(client, HSTART, win->rect.left >> 2);
if (ret < 0)
@@ -1007,13 +1122,13 @@ static int ov772x_set_params(struct ov772x_priv *priv,
/* Set COM3. */
val = cfmt->com3;
- if (priv->info->flags & OV772X_FLAG_VFLIP)
+ if (priv->info && (priv->info->flags & OV772X_FLAG_VFLIP))
val |= VFLIP_IMG;
- if (priv->info->flags & OV772X_FLAG_HFLIP)
+ if (priv->info && (priv->info->flags & OV772X_FLAG_HFLIP))
val |= HFLIP_IMG;
- if (priv->flag_vflip)
+ if (priv->vflip_ctrl->val)
val ^= VFLIP_IMG;
- if (priv->flag_hflip)
+ if (priv->hflip_ctrl->val)
val ^= HFLIP_IMG;
ret = ov772x_mask_set(client,
@@ -1027,18 +1142,18 @@ static int ov772x_set_params(struct ov772x_priv *priv,
goto ov772x_set_fmt_error;
/* COM4, CLKRC: Set pixel clock and framerate. */
- tpf.numerator = 1;
- tpf.denominator = priv->fps;
- ret = ov772x_set_frame_rate(priv, &tpf, cfmt, win);
+ ret = ov772x_set_frame_rate(priv, priv->fps, cfmt, win);
if (ret < 0)
goto ov772x_set_fmt_error;
/* Set COM8. */
- if (priv->band_filter) {
+ if (priv->band_filter_ctrl->val) {
+ unsigned short band_filter = priv->band_filter_ctrl->val;
+
ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
if (!ret)
ret = ov772x_mask_set(client, BDBASE,
- 0xff, 256 - priv->band_filter);
+ 0xff, 256 - band_filter);
if (ret < 0)
goto ov772x_set_fmt_error;
}
@@ -1102,7 +1217,7 @@ static int ov772x_set_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf = &format->format;
const struct ov772x_color_format *cfmt;
const struct ov772x_win_size *win;
- int ret;
+ int ret = 0;
if (format->pad)
return -EINVAL;
@@ -1123,30 +1238,50 @@ static int ov772x_set_fmt(struct v4l2_subdev *sd,
return 0;
}
- ret = ov772x_set_params(priv, cfmt, win);
- if (ret < 0)
- return ret;
+ mutex_lock(&priv->lock);
+
+ if (priv->streaming) {
+ ret = -EBUSY;
+ goto error;
+ }
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any changes to H/W at this time. Instead
+ * the format will be restored right after power-up.
+ */
+ if (priv->power_count > 0) {
+ ret = ov772x_set_params(priv, cfmt, win);
+ if (ret < 0)
+ goto error;
+ }
priv->win = win;
priv->cfmt = cfmt;
- return 0;
+error:
+ mutex_unlock(&priv->lock);
+
+ return ret;
}
static int ov772x_video_probe(struct ov772x_priv *priv)
{
struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
- u8 pid, ver;
+ int pid, ver, midh, midl;
const char *devname;
int ret;
- ret = ov772x_s_power(&priv->subdev, 1);
+ ret = ov772x_power_on(priv);
if (ret < 0)
return ret;
/* Check and show product ID and manufacturer ID. */
pid = ov772x_read(client, PID);
+ if (pid < 0)
+ return pid;
ver = ov772x_read(client, VER);
+ if (ver < 0)
+ return ver;
switch (VERSION(pid, ver)) {
case OV7720:
@@ -1162,17 +1297,21 @@ static int ov772x_video_probe(struct ov772x_priv *priv)
goto done;
}
+ midh = ov772x_read(client, MIDH);
+ if (midh < 0)
+ return midh;
+ midl = ov772x_read(client, MIDL);
+ if (midl < 0)
+ return midl;
+
dev_info(&client->dev,
"%s Product ID %0x:%0x Manufacturer ID %x:%x\n",
- devname,
- pid,
- ver,
- ov772x_read(client, MIDH),
- ov772x_read(client, MIDL));
+ devname, pid, ver, midh, midl);
+
ret = v4l2_ctrl_handler_setup(&priv->hdl);
done:
- ov772x_s_power(&priv->subdev, 0);
+ ov772x_power_off(priv);
return ret;
}
@@ -1250,48 +1389,54 @@ static int ov772x_probe(struct i2c_client *client,
struct i2c_adapter *adapter = client->adapter;
int ret;
- if (!client->dev.platform_data) {
- dev_err(&client->dev, "Missing ov772x platform data\n");
+ if (!client->dev.of_node && !client->dev.platform_data) {
+ dev_err(&client->dev,
+ "Missing ov772x platform data for non-DT device\n");
return -EINVAL;
}
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_PROTOCOL_MANGLING)) {
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev,
- "I2C-Adapter doesn't support SMBUS_BYTE_DATA or PROTOCOL_MANGLING\n");
+ "I2C-Adapter doesn't support SMBUS_BYTE_DATA\n");
return -EIO;
}
- client->flags |= I2C_CLIENT_SCCB;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->info = client->dev.platform_data;
+ mutex_init(&priv->lock);
v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops);
+ priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_ctrl_handler_init(&priv->hdl, 3);
- v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
- V4L2_CID_BAND_STOP_FILTER, 0, 256, 1, 0);
+ /* Use our mutex for the controls */
+ priv->hdl.lock = &priv->lock;
+ priv->vflip_ctrl = v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ priv->hflip_ctrl = v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ priv->band_filter_ctrl = v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops,
+ V4L2_CID_BAND_STOP_FILTER,
+ 0, 256, 1, 0);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error)
- return priv->hdl.error;
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto error_mutex_destroy;
+ }
- priv->clk = clk_get(&client->dev, "xclk");
+ priv->clk = clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&client->dev, "Unable to get xclk clock\n");
ret = PTR_ERR(priv->clk);
goto error_ctrl_free;
}
- priv->pwdn_gpio = gpiod_get_optional(&client->dev, "pwdn",
+ priv->pwdn_gpio = gpiod_get_optional(&client->dev, "powerdown",
GPIOD_OUT_LOW);
if (IS_ERR(priv->pwdn_gpio)) {
- dev_info(&client->dev, "Unable to get GPIO \"pwdn\"");
+ dev_info(&client->dev, "Unable to get GPIO \"powerdown\"");
ret = PTR_ERR(priv->pwdn_gpio);
goto error_clk_put;
}
@@ -1300,16 +1445,26 @@ static int ov772x_probe(struct i2c_client *client,
if (ret < 0)
goto error_gpio_put;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ priv->pad.flags = MEDIA_PAD_FL_SOURCE;
+ priv->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&priv->subdev.entity, 1, &priv->pad);
+ if (ret < 0)
+ goto error_gpio_put;
+#endif
+
priv->cfmt = &ov772x_cfmts[0];
priv->win = &ov772x_win_sizes[0];
priv->fps = 15;
ret = v4l2_async_register_subdev(&priv->subdev);
if (ret)
- goto error_gpio_put;
+ goto error_entity_cleanup;
return 0;
+error_entity_cleanup:
+ media_entity_cleanup(&priv->subdev.entity);
error_gpio_put:
if (priv->pwdn_gpio)
gpiod_put(priv->pwdn_gpio);
@@ -1317,6 +1472,8 @@ error_clk_put:
clk_put(priv->clk);
error_ctrl_free:
v4l2_ctrl_handler_free(&priv->hdl);
+error_mutex_destroy:
+ mutex_destroy(&priv->lock);
return ret;
}
@@ -1325,11 +1482,13 @@ static int ov772x_remove(struct i2c_client *client)
{
struct ov772x_priv *priv = to_ov772x(i2c_get_clientdata(client));
+ media_entity_cleanup(&priv->subdev.entity);
clk_put(priv->clk);
if (priv->pwdn_gpio)
gpiod_put(priv->pwdn_gpio);
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
+ mutex_destroy(&priv->lock);
return 0;
}
@@ -1340,9 +1499,17 @@ static const struct i2c_device_id ov772x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ov772x_id);
+static const struct of_device_id ov772x_of_match[] = {
+ { .compatible = "ovti,ov7725", },
+ { .compatible = "ovti,ov7720", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ov772x_of_match);
+
static struct i2c_driver ov772x_i2c_driver = {
.driver = {
.name = "ov772x",
+ .of_match_table = ov772x_of_match,
},
.probe = ov772x_probe,
.remove = ov772x_remove,
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
new file mode 100644
index 000000000000..6ad998ad1b16
--- /dev/null
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -0,0 +1,1437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for RJ54N1CB0C CMOS Image Sensor from Sharp
+ *
+ * Copyright (C) 2018, Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+
+#include <media/i2c/rj54n1cb0c.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#define RJ54N1_DEV_CODE 0x0400
+#define RJ54N1_DEV_CODE2 0x0401
+#define RJ54N1_OUT_SEL 0x0403
+#define RJ54N1_XY_OUTPUT_SIZE_S_H 0x0404
+#define RJ54N1_X_OUTPUT_SIZE_S_L 0x0405
+#define RJ54N1_Y_OUTPUT_SIZE_S_L 0x0406
+#define RJ54N1_XY_OUTPUT_SIZE_P_H 0x0407
+#define RJ54N1_X_OUTPUT_SIZE_P_L 0x0408
+#define RJ54N1_Y_OUTPUT_SIZE_P_L 0x0409
+#define RJ54N1_LINE_LENGTH_PCK_S_H 0x040a
+#define RJ54N1_LINE_LENGTH_PCK_S_L 0x040b
+#define RJ54N1_LINE_LENGTH_PCK_P_H 0x040c
+#define RJ54N1_LINE_LENGTH_PCK_P_L 0x040d
+#define RJ54N1_RESIZE_N 0x040e
+#define RJ54N1_RESIZE_N_STEP 0x040f
+#define RJ54N1_RESIZE_STEP 0x0410
+#define RJ54N1_RESIZE_HOLD_H 0x0411
+#define RJ54N1_RESIZE_HOLD_L 0x0412
+#define RJ54N1_H_OBEN_OFS 0x0413
+#define RJ54N1_V_OBEN_OFS 0x0414
+#define RJ54N1_RESIZE_CONTROL 0x0415
+#define RJ54N1_STILL_CONTROL 0x0417
+#define RJ54N1_INC_USE_SEL_H 0x0425
+#define RJ54N1_INC_USE_SEL_L 0x0426
+#define RJ54N1_MIRROR_STILL_MODE 0x0427
+#define RJ54N1_INIT_START 0x0428
+#define RJ54N1_SCALE_1_2_LEV 0x0429
+#define RJ54N1_SCALE_4_LEV 0x042a
+#define RJ54N1_Y_GAIN 0x04d8
+#define RJ54N1_APT_GAIN_UP 0x04fa
+#define RJ54N1_RA_SEL_UL 0x0530
+#define RJ54N1_BYTE_SWAP 0x0531
+#define RJ54N1_OUT_SIGPO 0x053b
+#define RJ54N1_WB_SEL_WEIGHT_I 0x054e
+#define RJ54N1_BIT8_WB 0x0569
+#define RJ54N1_HCAPS_WB 0x056a
+#define RJ54N1_VCAPS_WB 0x056b
+#define RJ54N1_HCAPE_WB 0x056c
+#define RJ54N1_VCAPE_WB 0x056d
+#define RJ54N1_EXPOSURE_CONTROL 0x058c
+#define RJ54N1_FRAME_LENGTH_S_H 0x0595
+#define RJ54N1_FRAME_LENGTH_S_L 0x0596
+#define RJ54N1_FRAME_LENGTH_P_H 0x0597
+#define RJ54N1_FRAME_LENGTH_P_L 0x0598
+#define RJ54N1_PEAK_H 0x05b7
+#define RJ54N1_PEAK_50 0x05b8
+#define RJ54N1_PEAK_60 0x05b9
+#define RJ54N1_PEAK_DIFF 0x05ba
+#define RJ54N1_IOC 0x05ef
+#define RJ54N1_TG_BYPASS 0x0700
+#define RJ54N1_PLL_L 0x0701
+#define RJ54N1_PLL_N 0x0702
+#define RJ54N1_PLL_EN 0x0704
+#define RJ54N1_RATIO_TG 0x0706
+#define RJ54N1_RATIO_T 0x0707
+#define RJ54N1_RATIO_R 0x0708
+#define RJ54N1_RAMP_TGCLK_EN 0x0709
+#define RJ54N1_OCLK_DSP 0x0710
+#define RJ54N1_RATIO_OP 0x0711
+#define RJ54N1_RATIO_O 0x0712
+#define RJ54N1_OCLK_SEL_EN 0x0713
+#define RJ54N1_CLK_RST 0x0717
+#define RJ54N1_RESET_STANDBY 0x0718
+#define RJ54N1_FWFLG 0x07fe
+
+#define E_EXCLK (1 << 7)
+#define SOFT_STDBY (1 << 4)
+#define SEN_RSTX (1 << 2)
+#define TG_RSTX (1 << 1)
+#define DSP_RSTX (1 << 0)
+
+#define RESIZE_HOLD_SEL (1 << 2)
+#define RESIZE_GO (1 << 1)
+
+/*
+ * When cropping, the camera automatically centers the cropped region, there
+ * doesn't seem to be a way to specify an explicit location of the rectangle.
+ */
+#define RJ54N1_COLUMN_SKIP 0
+#define RJ54N1_ROW_SKIP 0
+#define RJ54N1_MAX_WIDTH 1600
+#define RJ54N1_MAX_HEIGHT 1200
+
+#define PLL_L 2
+#define PLL_N 0x31
+
+/* I2C addresses: 0x50, 0x51, 0x60, 0x61 */
+
+/* RJ54N1CB0C has only one fixed colorspace per pixelcode */
+struct rj54n1_datafmt {
+ u32 code;
+ enum v4l2_colorspace colorspace;
+};
+
+/* Find a data format by a pixel code in an array */
+static const struct rj54n1_datafmt *rj54n1_find_datafmt(
+ u32 code, const struct rj54n1_datafmt *fmt,
+ int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ if (fmt[i].code == code)
+ return fmt + i;
+
+ return NULL;
+}
+
+static const struct rj54n1_datafmt rj54n1_colour_fmts[] = {
+ {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
+};
+
+struct rj54n1_clock_div {
+ u8 ratio_tg; /* can be 0 or an odd number */
+ u8 ratio_t;
+ u8 ratio_r;
+ u8 ratio_op;
+ u8 ratio_o;
+};
+
+struct rj54n1 {
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
+ struct clk *clk;
+ struct gpio_desc *pwup_gpio;
+ struct gpio_desc *enable_gpio;
+ struct rj54n1_clock_div clk_div;
+ const struct rj54n1_datafmt *fmt;
+ struct v4l2_rect rect; /* Sensor window */
+ unsigned int tgclk_mhz;
+ bool auto_wb;
+ unsigned short width; /* Output window */
+ unsigned short height;
+ unsigned short resize; /* Sensor * 1024 / resize = Output */
+ unsigned short scale;
+ u8 bank;
+};
+
+struct rj54n1_reg_val {
+ u16 reg;
+ u8 val;
+};
+
+static const struct rj54n1_reg_val bank_4[] = {
+ {0x417, 0},
+ {0x42c, 0},
+ {0x42d, 0xf0},
+ {0x42e, 0},
+ {0x42f, 0x50},
+ {0x430, 0xf5},
+ {0x431, 0x16},
+ {0x432, 0x20},
+ {0x433, 0},
+ {0x434, 0xc8},
+ {0x43c, 8},
+ {0x43e, 0x90},
+ {0x445, 0x83},
+ {0x4ba, 0x58},
+ {0x4bb, 4},
+ {0x4bc, 0x20},
+ {0x4db, 4},
+ {0x4fe, 2},
+};
+
+static const struct rj54n1_reg_val bank_5[] = {
+ {0x514, 0},
+ {0x516, 0},
+ {0x518, 0},
+ {0x51a, 0},
+ {0x51d, 0xff},
+ {0x56f, 0x28},
+ {0x575, 0x40},
+ {0x5bc, 0x48},
+ {0x5c1, 6},
+ {0x5e5, 0x11},
+ {0x5e6, 0x43},
+ {0x5e7, 0x33},
+ {0x5e8, 0x21},
+ {0x5e9, 0x30},
+ {0x5ea, 0x0},
+ {0x5eb, 0xa5},
+ {0x5ec, 0xff},
+ {0x5fe, 2},
+};
+
+static const struct rj54n1_reg_val bank_7[] = {
+ {0x70a, 0},
+ {0x714, 0xff},
+ {0x715, 0xff},
+ {0x716, 0x1f},
+ {0x7FE, 2},
+};
+
+static const struct rj54n1_reg_val bank_8[] = {
+ {0x800, 0x00},
+ {0x801, 0x01},
+ {0x802, 0x61},
+ {0x805, 0x00},
+ {0x806, 0x00},
+ {0x807, 0x00},
+ {0x808, 0x00},
+ {0x809, 0x01},
+ {0x80A, 0x61},
+ {0x80B, 0x00},
+ {0x80C, 0x01},
+ {0x80D, 0x00},
+ {0x80E, 0x00},
+ {0x80F, 0x00},
+ {0x810, 0x00},
+ {0x811, 0x01},
+ {0x812, 0x61},
+ {0x813, 0x00},
+ {0x814, 0x11},
+ {0x815, 0x00},
+ {0x816, 0x41},
+ {0x817, 0x00},
+ {0x818, 0x51},
+ {0x819, 0x01},
+ {0x81A, 0x1F},
+ {0x81B, 0x00},
+ {0x81C, 0x01},
+ {0x81D, 0x00},
+ {0x81E, 0x11},
+ {0x81F, 0x00},
+ {0x820, 0x41},
+ {0x821, 0x00},
+ {0x822, 0x51},
+ {0x823, 0x00},
+ {0x824, 0x00},
+ {0x825, 0x00},
+ {0x826, 0x47},
+ {0x827, 0x01},
+ {0x828, 0x4F},
+ {0x829, 0x00},
+ {0x82A, 0x00},
+ {0x82B, 0x00},
+ {0x82C, 0x30},
+ {0x82D, 0x00},
+ {0x82E, 0x40},
+ {0x82F, 0x00},
+ {0x830, 0xB3},
+ {0x831, 0x00},
+ {0x832, 0xE3},
+ {0x833, 0x00},
+ {0x834, 0x00},
+ {0x835, 0x00},
+ {0x836, 0x00},
+ {0x837, 0x00},
+ {0x838, 0x00},
+ {0x839, 0x01},
+ {0x83A, 0x61},
+ {0x83B, 0x00},
+ {0x83C, 0x01},
+ {0x83D, 0x00},
+ {0x83E, 0x00},
+ {0x83F, 0x00},
+ {0x840, 0x00},
+ {0x841, 0x01},
+ {0x842, 0x61},
+ {0x843, 0x00},
+ {0x844, 0x1D},
+ {0x845, 0x00},
+ {0x846, 0x00},
+ {0x847, 0x00},
+ {0x848, 0x00},
+ {0x849, 0x01},
+ {0x84A, 0x1F},
+ {0x84B, 0x00},
+ {0x84C, 0x05},
+ {0x84D, 0x00},
+ {0x84E, 0x19},
+ {0x84F, 0x01},
+ {0x850, 0x21},
+ {0x851, 0x01},
+ {0x852, 0x5D},
+ {0x853, 0x00},
+ {0x854, 0x00},
+ {0x855, 0x00},
+ {0x856, 0x19},
+ {0x857, 0x01},
+ {0x858, 0x21},
+ {0x859, 0x00},
+ {0x85A, 0x00},
+ {0x85B, 0x00},
+ {0x85C, 0x00},
+ {0x85D, 0x00},
+ {0x85E, 0x00},
+ {0x85F, 0x00},
+ {0x860, 0xB3},
+ {0x861, 0x00},
+ {0x862, 0xE3},
+ {0x863, 0x00},
+ {0x864, 0x00},
+ {0x865, 0x00},
+ {0x866, 0x00},
+ {0x867, 0x00},
+ {0x868, 0x00},
+ {0x869, 0xE2},
+ {0x86A, 0x00},
+ {0x86B, 0x01},
+ {0x86C, 0x06},
+ {0x86D, 0x00},
+ {0x86E, 0x00},
+ {0x86F, 0x00},
+ {0x870, 0x60},
+ {0x871, 0x8C},
+ {0x872, 0x10},
+ {0x873, 0x00},
+ {0x874, 0xE0},
+ {0x875, 0x00},
+ {0x876, 0x27},
+ {0x877, 0x01},
+ {0x878, 0x00},
+ {0x879, 0x00},
+ {0x87A, 0x00},
+ {0x87B, 0x03},
+ {0x87C, 0x00},
+ {0x87D, 0x00},
+ {0x87E, 0x00},
+ {0x87F, 0x00},
+ {0x880, 0x00},
+ {0x881, 0x00},
+ {0x882, 0x00},
+ {0x883, 0x00},
+ {0x884, 0x00},
+ {0x885, 0x00},
+ {0x886, 0xF8},
+ {0x887, 0x00},
+ {0x888, 0x03},
+ {0x889, 0x00},
+ {0x88A, 0x64},
+ {0x88B, 0x00},
+ {0x88C, 0x03},
+ {0x88D, 0x00},
+ {0x88E, 0xB1},
+ {0x88F, 0x00},
+ {0x890, 0x03},
+ {0x891, 0x01},
+ {0x892, 0x1D},
+ {0x893, 0x00},
+ {0x894, 0x03},
+ {0x895, 0x01},
+ {0x896, 0x4B},
+ {0x897, 0x00},
+ {0x898, 0xE5},
+ {0x899, 0x00},
+ {0x89A, 0x01},
+ {0x89B, 0x00},
+ {0x89C, 0x01},
+ {0x89D, 0x04},
+ {0x89E, 0xC8},
+ {0x89F, 0x00},
+ {0x8A0, 0x01},
+ {0x8A1, 0x01},
+ {0x8A2, 0x61},
+ {0x8A3, 0x00},
+ {0x8A4, 0x01},
+ {0x8A5, 0x00},
+ {0x8A6, 0x00},
+ {0x8A7, 0x00},
+ {0x8A8, 0x00},
+ {0x8A9, 0x00},
+ {0x8AA, 0x7F},
+ {0x8AB, 0x03},
+ {0x8AC, 0x00},
+ {0x8AD, 0x00},
+ {0x8AE, 0x00},
+ {0x8AF, 0x00},
+ {0x8B0, 0x00},
+ {0x8B1, 0x00},
+ {0x8B6, 0x00},
+ {0x8B7, 0x01},
+ {0x8B8, 0x00},
+ {0x8B9, 0x00},
+ {0x8BA, 0x02},
+ {0x8BB, 0x00},
+ {0x8BC, 0xFF},
+ {0x8BD, 0x00},
+ {0x8FE, 2},
+};
+
+static const struct rj54n1_reg_val bank_10[] = {
+ {0x10bf, 0x69}
+};
+
+/* Clock dividers - these are default register values, divider = register + 1 */
+static const struct rj54n1_clock_div clk_div = {
+ .ratio_tg = 3 /* default: 5 */,
+ .ratio_t = 4 /* default: 1 */,
+ .ratio_r = 4 /* default: 0 */,
+ .ratio_op = 1 /* default: 5 */,
+ .ratio_o = 9 /* default: 0 */,
+};
+
+static struct rj54n1 *to_rj54n1(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct rj54n1, subdev);
+}
+
+static int reg_read(struct i2c_client *client, const u16 reg)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int ret;
+
+ /* set bank */
+ if (rj54n1->bank != reg >> 8) {
+ dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8);
+ ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8);
+ if (ret < 0)
+ return ret;
+ rj54n1->bank = reg >> 8;
+ }
+ return i2c_smbus_read_byte_data(client, reg & 0xff);
+}
+
+static int reg_write(struct i2c_client *client, const u16 reg,
+ const u8 data)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int ret;
+
+ /* set bank */
+ if (rj54n1->bank != reg >> 8) {
+ dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8);
+ ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8);
+ if (ret < 0)
+ return ret;
+ rj54n1->bank = reg >> 8;
+ }
+ dev_dbg(&client->dev, "[0x%x] = 0x%x\n", reg & 0xff, data);
+ return i2c_smbus_write_byte_data(client, reg & 0xff, data);
+}
+
+static int reg_set(struct i2c_client *client, const u16 reg,
+ const u8 data, const u8 mask)
+{
+ int ret;
+
+ ret = reg_read(client, reg);
+ if (ret < 0)
+ return ret;
+ return reg_write(client, reg, (ret & ~mask) | (data & mask));
+}
+
+static int reg_write_multiple(struct i2c_client *client,
+ const struct rj54n1_reg_val *rv, const int n)
+{
+ int i, ret;
+
+ for (i = 0; i < n; i++) {
+ ret = reg_write(client, rv->reg, rv->val);
+ if (ret < 0)
+ return ret;
+ rv++;
+ }
+
+ return 0;
+}
+
+static int rj54n1_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index >= ARRAY_SIZE(rj54n1_colour_fmts))
+ return -EINVAL;
+
+ code->code = rj54n1_colour_fmts[code->index].code;
+ return 0;
+}
+
+static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* Switch between preview and still shot modes */
+ return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
+}
+
+static int rj54n1_set_rect(struct i2c_client *client,
+ u16 reg_x, u16 reg_y, u16 reg_xy,
+ u32 width, u32 height)
+{
+ int ret;
+
+ ret = reg_write(client, reg_xy,
+ ((width >> 4) & 0x70) |
+ ((height >> 8) & 7));
+
+ if (!ret)
+ ret = reg_write(client, reg_x, width & 0xff);
+ if (!ret)
+ ret = reg_write(client, reg_y, height & 0xff);
+
+ return ret;
+}
+
+/*
+ * Some commands, specifically certain initialisation sequences, require
+ * a commit operation.
+ */
+static int rj54n1_commit(struct i2c_client *client)
+{
+ int ret = reg_write(client, RJ54N1_INIT_START, 1);
+ msleep(10);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_INIT_START, 0);
+ return ret;
+}
+
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h);
+
+static int rj54n1_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ const struct v4l2_rect *rect = &sel->r;
+ int output_w, output_h, input_w = rect->width, input_h = rect->height;
+ int ret;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /* arbitrary minimum width and height, edges unimportant */
+ v4l_bound_align_image(&input_w, 8, RJ54N1_MAX_WIDTH, 0,
+ &input_h, 8, RJ54N1_MAX_HEIGHT, 0, 0);
+
+ output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize;
+ output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize;
+
+ dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n",
+ input_w, input_h, rj54n1->resize, output_w, output_h);
+
+ ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
+ if (ret < 0)
+ return ret;
+
+ rj54n1->width = output_w;
+ rj54n1->height = output_h;
+ rj54n1->resize = ret;
+ rj54n1->rect.width = input_w;
+ rj54n1->rect.height = input_h;
+
+ return 0;
+}
+
+static int rj54n1_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = RJ54N1_COLUMN_SKIP;
+ sel->r.top = RJ54N1_ROW_SKIP;
+ sel->r.width = RJ54N1_MAX_WIDTH;
+ sel->r.height = RJ54N1_MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = rj54n1->rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rj54n1_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ if (format->pad)
+ return -EINVAL;
+
+ mf->code = rj54n1->fmt->code;
+ mf->colorspace = rj54n1->fmt->colorspace;
+ mf->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ mf->xfer_func = V4L2_XFER_FUNC_SRGB;
+ mf->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mf->field = V4L2_FIELD_NONE;
+ mf->width = rj54n1->width;
+ mf->height = rj54n1->height;
+
+ return 0;
+}
+
+/*
+ * The actual geometry configuration routine. It scales the input window into
+ * the output one, updates the window sizes and returns an error or the resize
+ * coefficient on success. Note: we only use the "Fixed Scaling" on this camera.
+ */
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ unsigned int skip, resize, input_w = *in_w, input_h = *in_h,
+ output_w = *out_w, output_h = *out_h;
+ u16 inc_sel, wb_bit8, wb_left, wb_right, wb_top, wb_bottom;
+ unsigned int peak, peak_50, peak_60;
+ int ret;
+
+ /*
+ * We have a problem with crops, where the window is larger than 512x384
+ * and output window is larger than a half of the input one. In this
+ * case we have to either reduce the input window to equal or below
+ * 512x384 or the output window to equal or below 1/2 of the input.
+ */
+ if (output_w > max(512U, input_w / 2)) {
+ if (2 * output_w > RJ54N1_MAX_WIDTH) {
+ input_w = RJ54N1_MAX_WIDTH;
+ output_w = RJ54N1_MAX_WIDTH / 2;
+ } else {
+ input_w = output_w * 2;
+ }
+
+ dev_dbg(&client->dev, "Adjusted output width: in %u, out %u\n",
+ input_w, output_w);
+ }
+
+ if (output_h > max(384U, input_h / 2)) {
+ if (2 * output_h > RJ54N1_MAX_HEIGHT) {
+ input_h = RJ54N1_MAX_HEIGHT;
+ output_h = RJ54N1_MAX_HEIGHT / 2;
+ } else {
+ input_h = output_h * 2;
+ }
+
+ dev_dbg(&client->dev, "Adjusted output height: in %u, out %u\n",
+ input_h, output_h);
+ }
+
+ /* Idea: use the read mode for snapshots, handle separate geometries */
+ ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L,
+ RJ54N1_Y_OUTPUT_SIZE_S_L,
+ RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h);
+ if (!ret)
+ ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_P_L,
+ RJ54N1_Y_OUTPUT_SIZE_P_L,
+ RJ54N1_XY_OUTPUT_SIZE_P_H, output_w, output_h);
+
+ if (ret < 0)
+ return ret;
+
+ if (output_w > input_w && output_h > input_h) {
+ input_w = output_w;
+ input_h = output_h;
+
+ resize = 1024;
+ } else {
+ unsigned int resize_x, resize_y;
+ resize_x = (input_w * 1024 + output_w / 2) / output_w;
+ resize_y = (input_h * 1024 + output_h / 2) / output_h;
+
+ /* We want max(resize_x, resize_y), check if it still fits */
+ if (resize_x > resize_y &&
+ (output_h * resize_x + 512) / 1024 > RJ54N1_MAX_HEIGHT)
+ resize = (RJ54N1_MAX_HEIGHT * 1024 + output_h / 2) /
+ output_h;
+ else if (resize_y > resize_x &&
+ (output_w * resize_y + 512) / 1024 > RJ54N1_MAX_WIDTH)
+ resize = (RJ54N1_MAX_WIDTH * 1024 + output_w / 2) /
+ output_w;
+ else
+ resize = max(resize_x, resize_y);
+
+ /* Prohibited value ranges */
+ switch (resize) {
+ case 2040 ... 2047:
+ resize = 2039;
+ break;
+ case 4080 ... 4095:
+ resize = 4079;
+ break;
+ case 8160 ... 8191:
+ resize = 8159;
+ break;
+ case 16320 ... 16384:
+ resize = 16319;
+ }
+ }
+
+ /* Set scaling */
+ ret = reg_write(client, RJ54N1_RESIZE_HOLD_L, resize & 0xff);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESIZE_HOLD_H, resize >> 8);
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Configure a skipping bitmask. The sensor will select a skipping value
+ * among set bits automatically. This is very unclear in the datasheet
+ * too. I was told, in this register one enables all skipping values,
+ * that are required for a specific resize, and the camera selects
+ * automatically, which ones to use. But it is unclear how to identify,
+ * which cropping values are needed. Secondly, why don't we just set all
+ * bits and let the camera choose? Would it increase processing time and
+ * reduce the framerate? Using 0xfffc for INC_USE_SEL doesn't seem to
+ * improve the image quality or stability for larger frames (see comment
+ * above), but I didn't check the framerate.
+ */
+ skip = min(resize / 1024, 15U);
+
+ inc_sel = 1 << skip;
+
+ if (inc_sel <= 2)
+ inc_sel = 0xc;
+ else if (resize & 1023 && skip < 15)
+ inc_sel |= 1 << (skip + 1);
+
+ ret = reg_write(client, RJ54N1_INC_USE_SEL_L, inc_sel & 0xfc);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8);
+
+ if (!rj54n1->auto_wb) {
+ /* Auto white balance window */
+ wb_left = output_w / 16;
+ wb_right = (3 * output_w / 4 - 3) / 4;
+ wb_top = output_h / 16;
+ wb_bottom = (3 * output_h / 4 - 3) / 4;
+ wb_bit8 = ((wb_left >> 2) & 0x40) | ((wb_top >> 4) & 0x10) |
+ ((wb_right >> 6) & 4) | ((wb_bottom >> 8) & 1);
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_BIT8_WB, wb_bit8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_HCAPS_WB, wb_left);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_VCAPS_WB, wb_top);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_HCAPE_WB, wb_right);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_VCAPE_WB, wb_bottom);
+ }
+
+ /* Antiflicker */
+ peak = 12 * RJ54N1_MAX_WIDTH * (1 << 14) * resize / rj54n1->tgclk_mhz /
+ 10000;
+ peak_50 = peak / 6;
+ peak_60 = peak / 5;
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_H,
+ ((peak_50 >> 4) & 0xf0) | (peak_60 >> 8));
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_50, peak_50);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_60, peak_60);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_DIFF, peak / 150);
+
+ /* Start resizing */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
+ RESIZE_HOLD_SEL | RESIZE_GO | 1);
+
+ if (ret < 0)
+ return ret;
+
+ /* Constant taken from manufacturer's example */
+ msleep(230);
+
+ ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | 1);
+ if (ret < 0)
+ return ret;
+
+ *in_w = (output_w * resize + 512) / 1024;
+ *in_h = (output_h * resize + 512) / 1024;
+ *out_w = output_w;
+ *out_h = output_h;
+
+ dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n",
+ *in_w, *in_h, resize, output_w, output_h, skip);
+
+ return resize;
+}
+
+static int rj54n1_set_clock(struct i2c_client *client)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int ret;
+
+ /* Enable external clock */
+ ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY);
+ /* Leave stand-by. Note: use this when implementing suspend / resume */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK);
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PLL_L, PLL_L);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PLL_N, PLL_N);
+
+ /* TGCLK dividers */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_TG,
+ rj54n1->clk_div.ratio_tg);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_T,
+ rj54n1->clk_div.ratio_t);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_R,
+ rj54n1->clk_div.ratio_r);
+
+ /* Enable TGCLK & RAMP */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RAMP_TGCLK_EN, 3);
+
+ /* Disable clock output */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_OCLK_DSP, 0);
+
+ /* Set divisors */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_OP,
+ rj54n1->clk_div.ratio_op);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RATIO_O,
+ rj54n1->clk_div.ratio_o);
+
+ /* Enable OCLK */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1);
+
+ /* Use PLL for Timing Generator, write 2 to reserved bits */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_TG_BYPASS, 2);
+
+ /* Take sensor out of reset */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESET_STANDBY,
+ E_EXCLK | SEN_RSTX);
+ /* Enable PLL */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PLL_EN, 1);
+
+ /* Wait for PLL to stabilise */
+ msleep(10);
+
+ /* Enable clock to frequency divider */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_CLK_RST, 1);
+
+ if (!ret)
+ ret = reg_read(client, RJ54N1_CLK_RST);
+ if (ret != 1) {
+ dev_err(&client->dev,
+ "Resetting RJ54N1CB0C clock failed: %d!\n", ret);
+ return -EIO;
+ }
+
+ /* Start the PLL */
+ ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1);
+
+ /* Enable OCLK */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1);
+
+ return ret;
+}
+
+static int rj54n1_reg_init(struct i2c_client *client)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int ret = rj54n1_set_clock(client);
+
+ if (!ret)
+ ret = reg_write_multiple(client, bank_7, ARRAY_SIZE(bank_7));
+ if (!ret)
+ ret = reg_write_multiple(client, bank_10, ARRAY_SIZE(bank_10));
+
+ /* Set binning divisors */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_SCALE_1_2_LEV, 3 | (7 << 4));
+ if (!ret)
+ ret = reg_write(client, RJ54N1_SCALE_4_LEV, 0xf);
+
+ /* Switch to fixed resize mode */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
+ RESIZE_HOLD_SEL | 1);
+
+ /* Set gain */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_Y_GAIN, 0x84);
+
+ /*
+ * Mirror the image back: default is upside down and left-to-right...
+ * Set manual preview / still shot switching
+ */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_MIRROR_STILL_MODE, 0x27);
+
+ if (!ret)
+ ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4));
+
+ /* Auto exposure area */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_EXPOSURE_CONTROL, 0x80);
+ /* Check current auto WB config */
+ if (!ret)
+ ret = reg_read(client, RJ54N1_WB_SEL_WEIGHT_I);
+ if (ret >= 0) {
+ rj54n1->auto_wb = ret & 0x80;
+ ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5));
+ }
+ if (!ret)
+ ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8));
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESET_STANDBY,
+ E_EXCLK | DSP_RSTX | SEN_RSTX);
+
+ /* Commit init */
+ if (!ret)
+ ret = rj54n1_commit(client);
+
+ /* Take DSP, TG, sensor out of reset */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RESET_STANDBY,
+ E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX);
+
+ /* Start register update? Same register as 0x?FE in many bank_* sets */
+ if (!ret)
+ ret = reg_write(client, RJ54N1_FWFLG, 2);
+
+ /* Constant taken from manufacturer's example */
+ msleep(700);
+
+ return ret;
+}
+
+static int rj54n1_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ const struct rj54n1_datafmt *fmt;
+ int output_w, output_h, max_w, max_h,
+ input_w = rj54n1->rect.width, input_h = rj54n1->rect.height;
+ int align = mf->code == MEDIA_BUS_FMT_SBGGR10_1X10 ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE ||
+ mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE;
+ int ret;
+
+ if (format->pad)
+ return -EINVAL;
+
+ dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
+ __func__, mf->code, mf->width, mf->height);
+
+ fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
+ ARRAY_SIZE(rj54n1_colour_fmts));
+ if (!fmt) {
+ fmt = rj54n1->fmt;
+ mf->code = fmt->code;
+ }
+
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = fmt->colorspace;
+
+ v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align,
+ &mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *mf;
+ return 0;
+ }
+
+ /*
+ * Verify if the sensor has just been powered on. TODO: replace this
+ * with proper PM, when a suitable API is available.
+ */
+ ret = reg_read(client, RJ54N1_RESET_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & E_EXCLK)) {
+ ret = rj54n1_reg_init(client);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */
+ switch (mf->code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ break;
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ break;
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 5);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ /* Special case: a raw mode with 10 bits of data per clock tick */
+ if (!ret)
+ ret = reg_set(client, RJ54N1_OCLK_SEL_EN,
+ (mf->code == MEDIA_BUS_FMT_SBGGR10_1X10) << 1, 2);
+
+ if (ret < 0)
+ return ret;
+
+ /* Supported scales 1:1 >= scale > 1:16 */
+ max_w = mf->width * (16 * 1024 - 1) / 1024;
+ if (input_w > max_w)
+ input_w = max_w;
+ max_h = mf->height * (16 * 1024 - 1) / 1024;
+ if (input_h > max_h)
+ input_h = max_h;
+
+ output_w = mf->width;
+ output_h = mf->height;
+
+ ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
+ if (ret < 0)
+ return ret;
+
+ fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
+ ARRAY_SIZE(rj54n1_colour_fmts));
+
+ rj54n1->fmt = fmt;
+ rj54n1->resize = ret;
+ rj54n1->rect.width = input_w;
+ rj54n1->rect.height = input_h;
+ rj54n1->width = output_w;
+ rj54n1->height = output_h;
+
+ mf->width = output_w;
+ mf->height = output_h;
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = fmt->colorspace;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int rj54n1_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg < 0x400 || reg->reg > 0x1fff)
+ /* Registers > 0x0800 are only available from Sharp support */
+ return -EINVAL;
+
+ reg->size = 1;
+ reg->val = reg_read(client, reg->reg);
+
+ if (reg->val > 0xff)
+ return -EIO;
+
+ return 0;
+}
+
+static int rj54n1_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg < 0x400 || reg->reg > 0x1fff)
+ /* Registers >= 0x0800 are only available from Sharp support */
+ return -EINVAL;
+
+ if (reg_write(client, reg->reg, reg->val) < 0)
+ return -EIO;
+
+ return 0;
+}
+#endif
+
+static int rj54n1_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ if (on) {
+ if (rj54n1->pwup_gpio)
+ gpiod_set_value(rj54n1->pwup_gpio, 1);
+ if (rj54n1->enable_gpio)
+ gpiod_set_value(rj54n1->enable_gpio, 1);
+
+ msleep(1);
+
+ return clk_prepare_enable(rj54n1->clk);
+ }
+
+ clk_disable_unprepare(rj54n1->clk);
+
+ if (rj54n1->enable_gpio)
+ gpiod_set_value(rj54n1->enable_gpio, 0);
+ if (rj54n1->pwup_gpio)
+ gpiod_set_value(rj54n1->pwup_gpio, 0);
+
+ return 0;
+}
+
+static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rj54n1 *rj54n1 = container_of(ctrl->handler, struct rj54n1, hdl);
+ struct v4l2_subdev *sd = &rj54n1->subdev;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int data;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 1);
+ else
+ data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 1, 1);
+ if (data < 0)
+ return -EIO;
+ return 0;
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 2);
+ else
+ data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 2, 2);
+ if (data < 0)
+ return -EIO;
+ return 0;
+ case V4L2_CID_GAIN:
+ if (reg_write(client, RJ54N1_Y_GAIN, ctrl->val * 2) < 0)
+ return -EIO;
+ return 0;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ /* Auto WB area - whole image */
+ if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->val << 7,
+ 0x80) < 0)
+ return -EIO;
+ rj54n1->auto_wb = ctrl->val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops rj54n1_ctrl_ops = {
+ .s_ctrl = rj54n1_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = rj54n1_g_register,
+ .s_register = rj54n1_s_register,
+#endif
+ .s_power = rj54n1_s_power,
+};
+
+static const struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
+ .s_stream = rj54n1_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops rj54n1_subdev_pad_ops = {
+ .enum_mbus_code = rj54n1_enum_mbus_code,
+ .get_selection = rj54n1_get_selection,
+ .set_selection = rj54n1_set_selection,
+ .get_fmt = rj54n1_get_fmt,
+ .set_fmt = rj54n1_set_fmt,
+};
+
+static const struct v4l2_subdev_ops rj54n1_subdev_ops = {
+ .core = &rj54n1_subdev_core_ops,
+ .video = &rj54n1_subdev_video_ops,
+ .pad = &rj54n1_subdev_pad_ops,
+};
+
+/*
+ * Interface active, can use i2c. If it fails, it can indeed mean, that
+ * this wasn't our capture interface, so, we wait for the right one
+ */
+static int rj54n1_video_probe(struct i2c_client *client,
+ struct rj54n1_pdata *priv)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ int data1, data2;
+ int ret;
+
+ ret = rj54n1_s_power(&rj54n1->subdev, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Read out the chip version register */
+ data1 = reg_read(client, RJ54N1_DEV_CODE);
+ data2 = reg_read(client, RJ54N1_DEV_CODE2);
+
+ if (data1 != 0x51 || data2 != 0x10) {
+ ret = -ENODEV;
+ dev_info(&client->dev, "No RJ54N1CB0C found, read 0x%x:0x%x\n",
+ data1, data2);
+ goto done;
+ }
+
+ /* Configure IOCTL polarity from the platform data: 0 or 1 << 7. */
+ ret = reg_write(client, RJ54N1_IOC, priv->ioctl_high << 7);
+ if (ret < 0)
+ goto done;
+
+ dev_info(&client->dev, "Detected a RJ54N1CB0C chip ID 0x%x:0x%x\n",
+ data1, data2);
+
+ ret = v4l2_ctrl_handler_setup(&rj54n1->hdl);
+
+done:
+ rj54n1_s_power(&rj54n1->subdev, 0);
+ return ret;
+}
+
+static int rj54n1_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct rj54n1 *rj54n1;
+ struct i2c_adapter *adapter = client->adapter;
+ struct rj54n1_pdata *rj54n1_priv;
+ int ret;
+
+ if (!client->dev.platform_data) {
+ dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
+ return -EINVAL;
+ }
+
+ rj54n1_priv = client->dev.platform_data;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_warn(&adapter->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n");
+ return -EIO;
+ }
+
+ rj54n1 = devm_kzalloc(&client->dev, sizeof(struct rj54n1), GFP_KERNEL);
+ if (!rj54n1)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&rj54n1->subdev, client, &rj54n1_subdev_ops);
+ v4l2_ctrl_handler_init(&rj54n1->hdl, 4);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_GAIN, 0, 127, 1, 66);
+ v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
+ if (rj54n1->hdl.error)
+ return rj54n1->hdl.error;
+
+ rj54n1->clk_div = clk_div;
+ rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
+ rj54n1->rect.top = RJ54N1_ROW_SKIP;
+ rj54n1->rect.width = RJ54N1_MAX_WIDTH;
+ rj54n1->rect.height = RJ54N1_MAX_HEIGHT;
+ rj54n1->width = RJ54N1_MAX_WIDTH;
+ rj54n1->height = RJ54N1_MAX_HEIGHT;
+ rj54n1->fmt = &rj54n1_colour_fmts[0];
+ rj54n1->resize = 1024;
+ rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) /
+ (clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
+
+ rj54n1->clk = clk_get(&client->dev, NULL);
+ if (IS_ERR(rj54n1->clk)) {
+ ret = PTR_ERR(rj54n1->clk);
+ goto err_free_ctrl;
+ }
+
+ rj54n1->pwup_gpio = gpiod_get_optional(&client->dev, "powerup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(rj54n1->pwup_gpio)) {
+ dev_info(&client->dev, "Unable to get GPIO \"powerup\": %ld\n",
+ PTR_ERR(rj54n1->pwup_gpio));
+ ret = PTR_ERR(rj54n1->pwup_gpio);
+ goto err_clk_put;
+ }
+
+ rj54n1->enable_gpio = gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(rj54n1->enable_gpio)) {
+ dev_info(&client->dev, "Unable to get GPIO \"enable\": %ld\n",
+ PTR_ERR(rj54n1->enable_gpio));
+ ret = PTR_ERR(rj54n1->enable_gpio);
+ goto err_gpio_put;
+ }
+
+ ret = rj54n1_video_probe(client, rj54n1_priv);
+ if (ret < 0)
+ goto err_gpio_put;
+
+ ret = v4l2_async_register_subdev(&rj54n1->subdev);
+ if (ret)
+ goto err_gpio_put;
+
+ return 0;
+
+err_gpio_put:
+ if (rj54n1->enable_gpio)
+ gpiod_put(rj54n1->enable_gpio);
+
+ if (rj54n1->pwup_gpio)
+ gpiod_put(rj54n1->pwup_gpio);
+
+err_clk_put:
+ clk_put(rj54n1->clk);
+
+err_free_ctrl:
+ v4l2_ctrl_handler_free(&rj54n1->hdl);
+
+ return ret;
+}
+
+static int rj54n1_remove(struct i2c_client *client)
+{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+
+ if (rj54n1->enable_gpio)
+ gpiod_put(rj54n1->enable_gpio);
+ if (rj54n1->pwup_gpio)
+ gpiod_put(rj54n1->pwup_gpio);
+
+ clk_put(rj54n1->clk);
+ v4l2_ctrl_handler_free(&rj54n1->hdl);
+ v4l2_async_unregister_subdev(&rj54n1->subdev);
+
+ return 0;
+}
+
+static const struct i2c_device_id rj54n1_id[] = {
+ { "rj54n1cb0c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rj54n1_id);
+
+static struct i2c_driver rj54n1_i2c_driver = {
+ .driver = {
+ .name = "rj54n1cb0c",
+ },
+ .probe = rj54n1_probe,
+ .remove = rj54n1_remove,
+ .id_table = rj54n1_id,
+};
+
+module_i2c_driver(rj54n1_i2c_driver);
+
+MODULE_DESCRIPTION("Sharp RJ54N1CB0C Camera driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index e1f8208581aa..1236683da8f7 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -1892,7 +1892,7 @@ static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w,
val -= SCALING_GOODNESS_EXTREME;
dev_dbg(&client->dev, "w %d ask_w %d h %d ask_h %d goodness %d\n",
- w, ask_h, h, ask_h, val);
+ w, ask_w, h, ask_h, val);
return val;
}
@@ -2764,6 +2764,7 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
struct v4l2_fwnode_endpoint *bus_cfg;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
+ u32 rotation;
int i;
int rval;
@@ -2800,6 +2801,21 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
dev_dbg(dev, "lanes %u\n", hwcfg->lanes);
+ rval = fwnode_property_read_u32(fwnode, "rotation", &rotation);
+ if (!rval) {
+ switch (rotation) {
+ case 180:
+ hwcfg->module_board_orient =
+ SMIAPP_MODULE_BOARD_ORIENT_180;
+ /* Fall through */
+ case 0:
+ break;
+ default:
+ dev_err(dev, "invalid rotation %u\n", rotation);
+ goto out_err;
+ }
+ }
+
/* NVM size is not mandatory */
fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
@@ -3171,4 +3187,4 @@ module_i2c_driver(smiapp_i2c_driver);
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>");
MODULE_DESCRIPTION("Generic SMIA/SMIA++ camera module driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 806383500313..14377af7c888 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -834,7 +834,7 @@ static int ov772x_set_params(struct ov772x_priv *priv,
* set COM8
*/
if (priv->band_filter) {
- ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
+ ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
if (!ret)
ret = ov772x_mask_set(client, BDBASE,
0xff, 256 - priv->band_filter);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 393bbbbbaad7..44c41933415a 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1918,7 +1918,8 @@ static int tc358743_probe_of(struct tc358743_state *state)
endpoint = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep));
if (IS_ERR(endpoint)) {
dev_err(dev, "failed to parse endpoint\n");
- return PTR_ERR(endpoint);
+ ret = PTR_ERR(endpoint);
+ goto put_node;
}
if (endpoint->bus_type != V4L2_MBUS_CSI2 ||
@@ -2013,6 +2014,8 @@ disable_clk:
clk_disable_unprepare(refclk);
free_endpoint:
v4l2_fwnode_endpoint_free(endpoint);
+put_node:
+ of_node_put(ep);
return ret;
}
#else
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 039a92c3294a..d114ac5243ec 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2570,7 +2570,7 @@ static int tda1997x_probe(struct i2c_client *client,
id->name, i2c_adapter_id(client->adapter),
client->addr);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
- sd->entity.function = MEDIA_ENT_F_DTV_DECODER;
+ sd->entity.function = MEDIA_ENT_F_DV_DECODER;
sd->entity.ops = &tda1997x_media_ops;
/* set allowed mbus modes based on chip, bus-type, and bus-width */
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 6a9890531d01..675b9ae212ab 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -1084,7 +1084,7 @@ tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
#if defined(CONFIG_MEDIA_CONTROLLER)
decoder->pad.flags = MEDIA_PAD_FL_SOURCE;
decoder->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- decoder->sd.entity.flags |= MEDIA_ENT_F_ATV_DECODER;
+ decoder->sd.entity.function = MEDIA_ENT_F_ATV_DECODER;
ret = media_entity_pads_init(&decoder->sd.entity, 1, &decoder->pad);
if (ret < 0) {
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index b162c2fe62c3..76e6bed5a1da 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -872,7 +872,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
f = &format->format;
f->width = decoder->rect.width;
- f->height = decoder->rect.height;
+ f->height = decoder->rect.height / 2;
f->code = MEDIA_BUS_FMT_UYVY8_2X8;
f->field = V4L2_FIELD_ALTERNATE;
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 4599b7e28a8d..4f5c627579c7 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1010,7 +1010,7 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
#if defined(CONFIG_MEDIA_CONTROLLER)
device->pad.flags = MEDIA_PAD_FL_SOURCE;
device->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- device->sd.entity.flags |= MEDIA_ENT_F_ATV_DECODER;
+ device->sd.entity.function = MEDIA_ENT_F_ATV_DECODER;
error = media_entity_pads_init(&device->sd.entity, 1, &device->pad);
if (error < 0)
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 0b347cc19aa5..06d29d8f6be8 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/freezer.h>
+#include <linux/hwmon.h>
#include <linux/kthread.h>
#include <linux/i2c.h>
#include <linux/list.h>
@@ -77,6 +78,9 @@ struct video_i2c_chip {
/* xfer function */
int (*xfer)(struct video_i2c_data *data, char *buf);
+
+ /* hwmon init function */
+ int (*hwmon_init)(struct video_i2c_data *data);
};
static int amg88xx_xfer(struct video_i2c_data *data, char *buf)
@@ -101,6 +105,74 @@ static int amg88xx_xfer(struct video_i2c_data *data, char *buf)
return (ret == 2) ? 0 : -EIO;
}
+#if IS_ENABLED(CONFIG_HWMON)
+
+static const u32 amg88xx_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info amg88xx_temp = {
+ .type = hwmon_temp,
+ .config = amg88xx_temp_config,
+};
+
+static const struct hwmon_channel_info *amg88xx_info[] = {
+ &amg88xx_temp,
+ NULL
+};
+
+static umode_t amg88xx_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int amg88xx_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct video_i2c_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int tmp = i2c_smbus_read_word_data(client, 0x0e);
+
+ if (tmp < 0)
+ return tmp;
+
+ /*
+ * Check for sign bit, this isn't a two's complement value but an
+ * absolute temperature that needs to be inverted in the case of being
+ * negative.
+ */
+ if (tmp & BIT(11))
+ tmp = -(tmp & 0x7ff);
+
+ *val = (tmp * 625) / 10;
+
+ return 0;
+}
+
+static const struct hwmon_ops amg88xx_hwmon_ops = {
+ .is_visible = amg88xx_is_visible,
+ .read = amg88xx_read,
+};
+
+static const struct hwmon_chip_info amg88xx_chip_info = {
+ .ops = &amg88xx_hwmon_ops,
+ .info = amg88xx_info,
+};
+
+static int amg88xx_hwmon_init(struct video_i2c_data *data)
+{
+ void *hwmon = devm_hwmon_device_register_with_info(&data->client->dev,
+ "amg88xx", data, &amg88xx_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+#else
+#define amg88xx_hwmon_init NULL
+#endif
+
#define AMG88XX 0
static const struct video_i2c_chip video_i2c_chip[] = {
@@ -111,6 +183,7 @@ static const struct video_i2c_chip video_i2c_chip[] = {
.buffer_size = 128,
.bpp = 16,
.xfer = &amg88xx_xfer,
+ .hwmon_init = amg88xx_hwmon_init,
},
};
@@ -505,6 +578,14 @@ static int video_i2c_probe(struct i2c_client *client,
video_set_drvdata(&data->vdev, data);
i2c_set_clientdata(client, data);
+ if (data->chip->hwmon_init) {
+ ret = data->chip->hwmon_init(data);
+ if (ret < 0) {
+ dev_warn(&client->dev,
+ "failed to register hwmon device\n");
+ }
+ }
+
ret = video_register_device(&data->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0)
goto error_unregister_device;
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 1658816a9844..bc9825f4a73d 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -770,7 +770,7 @@ static int vs6624_probe(struct i2c_client *client,
return ret;
}
/* wait 100ms before any further i2c writes are performed */
- mdelay(100);
+ msleep(100);
sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL);
if (sensor == NULL)
@@ -782,7 +782,7 @@ static int vs6624_probe(struct i2c_client *client,
vs6624_writeregs(sd, vs6624_p1);
vs6624_write(sd, VS6624_MICRO_EN, 0x2);
vs6624_write(sd, VS6624_DIO_EN, 0x1);
- mdelay(10);
+ usleep_range(10000, 11000);
vs6624_writeregs(sd, vs6624_p2);
vs6624_writeregs(sd, vs6624_default);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index ae59c3177555..fcdf3d5dc4b6 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -16,9 +16,6 @@
* GNU General Public License for more details.
*/
-/* We need to access legacy defines from linux/media.h */
-#define __NEED_MEDIA_LEGACY_API
-
#include <linux/compat.h>
#include <linux/export.h>
#include <linux/idr.h>
@@ -28,6 +25,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/usb.h>
+#include <linux/version.h>
#include <media/media-device.h>
#include <media/media-devnode.h>
@@ -35,6 +33,16 @@
#ifdef CONFIG_MEDIA_CONTROLLER
+/*
+ * Legacy defines from linux/media.h. This is the only place we need this
+ * so we just define it here. The media.h header doesn't expose it to the
+ * kernel to prevent it from being used by drivers, but here (and only here!)
+ * we need it to handle the legacy behavior.
+ */
+#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff
+#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_F_OLD_BASE | \
+ MEDIA_ENT_SUBTYPE_MASK)
+
/* -----------------------------------------------------------------------------
* Userspace API
*/
@@ -259,6 +267,7 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
memset(&kentity, 0, sizeof(kentity));
kentity.id = entity->graph_obj.id;
kentity.function = entity->function;
+ kentity.flags = entity->flags;
strlcpy(kentity.name, entity->name,
sizeof(kentity.name));
@@ -324,6 +333,7 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
kpad.id = pad->graph_obj.id;
kpad.entity_id = pad->entity->graph_obj.id;
kpad.flags = pad->flags;
+ kpad.index = pad->index;
if (copy_to_user(upad, &kpad, sizeof(kpad)))
ret = -EFAULT;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index de3f44b8dec6..cf05e11da01b 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3511,7 +3511,7 @@ static void bttv_irq_debug_low_latency(struct bttv *btv, u32 rc)
}
pr_notice("%d: Uhm. Looks like we have unusual high IRQ latencies\n",
btv->c.nr);
- pr_notice("%d: Lets try to catch the culpit red-handed ...\n",
+ pr_notice("%d: Lets try to catch the culprit red-handed ...\n",
btv->c.nr);
dump_stack();
}
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 2e33b7236672..b98de2a22f78 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -1739,9 +1739,9 @@ static const struct dvb_frontend_ops dst_dvbt_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DST DVB-T",
- .frequency_min = 137000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 137 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_FEC_AUTO |
FE_CAN_QAM_AUTO |
FE_CAN_QAM_16 |
@@ -1768,10 +1768,10 @@ static const struct dvb_frontend_ops dst_dvbs_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "DST DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1000, /* kHz for QPSK frontends */
- .frequency_tolerance = 29500,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1 * MHz,
+ .frequency_tolerance_hz = 29500 * kHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
/* . symbol_rate_tolerance = ???,*/
@@ -1797,9 +1797,9 @@ static const struct dvb_frontend_ops dst_dvbc_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "DST DVB-C",
- .frequency_stepsize = 62500,
- .frequency_min = 51000000,
- .frequency_max = 858000000,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_FEC_AUTO |
@@ -1826,9 +1826,9 @@ static const struct dvb_frontend_ops dst_atsc_ops = {
.delsys = { SYS_ATSC },
.info = {
.name = "DST ATSC",
- .frequency_stepsize = 62500,
- .frequency_min = 510000000,
- .frequency_max = 858000000,
+ .frequency_min_hz = 510 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_FEC_AUTO | FE_CAN_QAM_AUTO | FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index 5ef6e2051d45..2f810b7130e6 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -386,10 +386,6 @@ static int advbt771_samsung_tdtc9251dh0_tuner_calc_regs(struct dvb_frontend *fe,
bs = 0x02;
else if (c->frequency < 470000000)
bs = 0x02;
- else if (c->frequency < 600000000)
- bs = 0x08;
- else if (c->frequency < 730000000)
- bs = 0x08;
else
bs = 0x08;
@@ -606,8 +602,8 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
if (card->fe != NULL) {
card->fe->ops.tuner_ops.calc_regs = thomson_dtt7579_tuner_calc_regs;
- card->fe->ops.info.frequency_min = 174000000;
- card->fe->ops.info.frequency_max = 862000000;
+ card->fe->ops.info.frequency_min_hz = 174 * MHz;
+ card->fe->ops.info.frequency_max_hz = 862 * MHz;
}
break;
@@ -659,8 +655,8 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
card->fe = dvb_attach(mt352_attach, &advbt771_samsung_tdtc9251dh0_config, card->i2c_adapter);
if (card->fe != NULL) {
card->fe->ops.tuner_ops.calc_regs = advbt771_samsung_tdtc9251dh0_tuner_calc_regs;
- card->fe->ops.info.frequency_min = 174000000;
- card->fe->ops.info.frequency_max = 862000000;
+ card->fe->ops.info.frequency_min_hz = 174 * MHz;
+ card->fe->ops.info.frequency_max_hz = 862 * MHz;
}
break;
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index c8b1a6206c65..4885e833c052 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -670,7 +670,7 @@ static int cobalt_probe(struct pci_dev *pci_dev,
/* FIXME - module parameter arrays constrain max instances */
i = atomic_inc_return(&cobalt_instance) - 1;
- cobalt = kzalloc(sizeof(struct cobalt), GFP_ATOMIC);
+ cobalt = kzalloc(sizeof(struct cobalt), GFP_KERNEL);
if (cobalt == NULL)
return -ENOMEM;
cobalt->pci_dev = pci_dev;
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 8f314ca320c7..0c389a3fb4e5 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -1134,8 +1134,6 @@ free_mem:
free_workqueues:
destroy_workqueue(cx->in_work_queue);
err:
- if (retval == 0)
- retval = -ENODEV;
CX18_ERR("Error %d on initialization\n", retval);
v4l2_device_unregister(&cx->v4l2_dev);
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index 70aec9bb7e95..62bc8049b320 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -346,7 +346,7 @@ static int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
mutex_unlock(&inter->fpga_mutex);
for (;;) {
- mdelay(50);
+ msleep(50);
mutex_lock(&inter->fpga_mutex);
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 9f50748fdf56..ed3210dc50bc 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1497,20 +1497,20 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the demod into reset and protect the eeprom */
mc417_gpio_clear(dev, GPIO_15 | GPIO_14);
- mdelay(100);
+ msleep(100);
/* Bring the demod and blaster out of reset */
mc417_gpio_set(dev, GPIO_15 | GPIO_14);
- mdelay(100);
+ msleep(100);
/* Force the TDA8295A into reset and back */
cx23885_gpio_enable(dev, GPIO_2, 1);
cx23885_gpio_set(dev, GPIO_2);
- mdelay(20);
+ msleep(20);
cx23885_gpio_clear(dev, GPIO_2);
- mdelay(20);
+ msleep(20);
cx23885_gpio_set(dev, GPIO_2);
- mdelay(20);
+ msleep(20);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1200:
/* GPIO-0 tda10048 demodulator reset */
@@ -1518,9 +1518,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x00000005);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1539,9 +1539,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x00000005);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1400:
@@ -1551,9 +1551,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x00000005);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
@@ -1564,9 +1564,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x000f0000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x0000000f);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
@@ -1578,9 +1578,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x000f0000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x0000000f);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
@@ -1596,9 +1596,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00040000);
- mdelay(20);
+ msleep(20);
cx_clear(GP0_IO, 0x00000004);
- mdelay(20);
+ msleep(20);
cx_set(GP0_IO, 0x00040004);
break;
case CX23885_BOARD_TBS_6920:
@@ -1608,11 +1608,11 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_write(MC417_CTL, 0x00000036);
cx_write(MC417_OEN, 0x00001000);
cx_set(MC417_RWD, 0x00000002);
- mdelay(200);
+ msleep(200);
cx_clear(MC417_RWD, 0x00000800);
- mdelay(200);
+ msleep(200);
cx_set(MC417_RWD, 0x00000800);
- mdelay(200);
+ msleep(200);
break;
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
/* GPIO-0 INTA from CiMax1
@@ -1630,7 +1630,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x00040000); /* GPIO as out */
/* GPIO1 and GPIO2 as INTA and INTB from CiMaxes, reset low */
cx_clear(GP0_IO, 0x00030004);
- mdelay(100);/* reset delay */
+ msleep(100);/* reset delay */
cx_set(GP0_IO, 0x00040004); /* GPIO as out, reset high */
cx_write(MC417_CTL, 0x00000037);/* enable GPIO3-18 pins */
/* GPIO-15 IN as ~ACK, rest as OUT */
@@ -1653,7 +1653,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx23885_gpio_enable(dev, GPIO_9 | GPIO_6 | GPIO_5, 1);
cx23885_gpio_set(dev, GPIO_9 | GPIO_6 | GPIO_5);
cx23885_gpio_clear(dev, GPIO_9);
- mdelay(20);
+ msleep(20);
cx23885_gpio_set(dev, GPIO_9);
break;
case CX23885_BOARD_MYGICA_X8506:
@@ -1664,18 +1664,18 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* GPIO-2 demod reset */
cx23885_gpio_enable(dev, GPIO_0 | GPIO_1 | GPIO_2, 1);
cx23885_gpio_clear(dev, GPIO_1 | GPIO_2);
- mdelay(100);
+ msleep(100);
cx23885_gpio_set(dev, GPIO_0 | GPIO_1 | GPIO_2);
- mdelay(100);
+ msleep(100);
break;
case CX23885_BOARD_MYGICA_X8558PRO:
/* GPIO-0 reset first ATBM8830 */
/* GPIO-1 reset second ATBM8830 */
cx23885_gpio_enable(dev, GPIO_0 | GPIO_1, 1);
cx23885_gpio_clear(dev, GPIO_0 | GPIO_1);
- mdelay(100);
+ msleep(100);
cx23885_gpio_set(dev, GPIO_0 | GPIO_1);
- mdelay(100);
+ msleep(100);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1699,11 +1699,11 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
/* Put the demod into reset and protect the eeprom */
mc417_gpio_clear(dev, GPIO_14 | GPIO_13);
- mdelay(100);
+ msleep(100);
/* Bring the demod out of reset */
mc417_gpio_set(dev, GPIO_14);
- mdelay(100);
+ msleep(100);
/* CX24228 GPIO */
/* Connected to IF / Mux */
@@ -1728,7 +1728,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x00060000); /* GPIO-1,2 as out */
/* GPIO-0 as INT, reset & TMS low */
cx_clear(GP0_IO, 0x00010006);
- mdelay(100);/* reset delay */
+ msleep(100);/* reset delay */
cx_set(GP0_IO, 0x00000004); /* reset high */
cx_write(MC417_CTL, 0x00000037);/* enable GPIO-3..18 pins */
/* GPIO-17 is TDO in, GPIO-15 is ~RDY in, rest is out */
@@ -1747,36 +1747,36 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
cx23885_gpio_clear(dev, GPIO_8 | GPIO_9);
- mdelay(100);
+ msleep(100);
cx23885_gpio_set(dev, GPIO_8 | GPIO_9);
- mdelay(100);
+ msleep(100);
break;
case CX23885_BOARD_AVERMEDIA_HC81R:
cx_clear(MC417_CTL, 1);
/* GPIO-0,1,2 setup direction as output */
cx_set(GP0_IO, 0x00070000);
- mdelay(10);
+ usleep_range(10000, 11000);
/* AF9013 demod reset */
cx_set(GP0_IO, 0x00010001);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_clear(GP0_IO, 0x00010001);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_set(GP0_IO, 0x00010001);
- mdelay(10);
+ usleep_range(10000, 11000);
/* demod tune? */
cx_clear(GP0_IO, 0x00030003);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_set(GP0_IO, 0x00020002);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_set(GP0_IO, 0x00010001);
- mdelay(10);
+ usleep_range(10000, 11000);
cx_clear(GP0_IO, 0x00020002);
/* XC3028L tuner reset */
cx_set(GP0_IO, 0x00040004);
cx_clear(GP0_IO, 0x00040004);
cx_set(GP0_IO, 0x00040004);
- mdelay(60);
+ msleep(60);
break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_S952:
@@ -1785,7 +1785,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_write(MC417_CTL, 0x00000037);
cx23885_gpio_enable(dev, GPIO_2 | GPIO_11, 1);
cx23885_gpio_clear(dev, GPIO_2 | GPIO_11);
- mdelay(100);
+ msleep(100);
cx23885_gpio_set(dev, GPIO_2 | GPIO_11);
break;
case CX23885_BOARD_DVBSKY_T980C:
@@ -1807,7 +1807,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x00060002); /* GPIO 1/2 as output */
cx_clear(GP0_IO, 0x00010004); /* GPIO 0 as input */
- mdelay(100); /* reset delay */
+ msleep(100); /* reset delay */
cx_set(GP0_IO, 0x00060004); /* GPIO as out, reset high */
cx_clear(GP0_IO, 0x00010002);
cx_write(MC417_CTL, 0x00000037); /* enable GPIO3-18 pins */
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 94b996ff12a9..39804d830305 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -667,7 +667,7 @@ static void cx23885_reset(struct cx23885_dev *dev)
/* clear dma in progress */
cx23885_clear_bridge_error(dev);
- mdelay(100);
+ msleep(100);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
720*4, 0);
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
deleted file mode 100644
index ada26d4acfb4..000000000000
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c
+++ /dev/null
@@ -1,679 +0,0 @@
-/*
- * Driver for the Conexant CX25821 PCIe bridge
- *
- * Copyright (C) 2009 Conexant Systems Inc.
- * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "cx25821-video.h"
-#include "cx25821-audio-upstream.h"
-
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/syscalls.h>
-#include <linux/file.h>
-#include <linux/fcntl.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
-MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
-MODULE_LICENSE("GPL");
-
-static int _intr_msk = FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF |
- FLD_AUD_SRC_SYNC | FLD_AUD_SRC_OPC_ERR;
-
-static int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
- const struct sram_channel *ch,
- unsigned int bpl, u32 risc)
-{
- unsigned int i, lines;
- u32 cdt;
-
- if (ch->cmds_start == 0) {
- cx_write(ch->ptr1_reg, 0);
- cx_write(ch->ptr2_reg, 0);
- cx_write(ch->cnt2_reg, 0);
- cx_write(ch->cnt1_reg, 0);
- return 0;
- }
-
- bpl = (bpl + 7) & ~7; /* alignment */
- cdt = ch->cdt;
- lines = ch->fifo_size / bpl;
-
- if (lines > 3)
- lines = 3;
-
- BUG_ON(lines < 2);
-
- /* write CDT */
- for (i = 0; i < lines; i++) {
- cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
- cx_write(cdt + 16 * i + 4, 0);
- cx_write(cdt + 16 * i + 8, 0);
- cx_write(cdt + 16 * i + 12, 0);
- }
-
- /* write CMDS */
- cx_write(ch->cmds_start + 0, risc);
-
- cx_write(ch->cmds_start + 4, 0);
- cx_write(ch->cmds_start + 8, cdt);
- cx_write(ch->cmds_start + 12, AUDIO_CDT_SIZE_QW);
- cx_write(ch->cmds_start + 16, ch->ctrl_start);
-
- /* IQ size */
- cx_write(ch->cmds_start + 20, AUDIO_IQ_SIZE_DW);
-
- for (i = 24; i < 80; i += 4)
- cx_write(ch->cmds_start + i, 0);
-
- /* fill registers */
- cx_write(ch->ptr1_reg, ch->fifo_start);
- cx_write(ch->ptr2_reg, cdt);
- cx_write(ch->cnt2_reg, AUDIO_CDT_SIZE_QW);
- cx_write(ch->cnt1_reg, AUDIO_CLUSTER_SIZE_QW - 1);
-
- return 0;
-}
-
-static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
- __le32 *rp,
- dma_addr_t databuf_phys_addr,
- unsigned int bpl,
- int fifo_enable)
-{
- unsigned int line;
- const struct sram_channel *sram_ch =
- dev->channels[dev->_audio_upstream_channel].sram_channels;
- int offset = 0;
-
- /* scan lines */
- for (line = 0; line < LINES_PER_AUDIO_BUFFER; line++) {
- *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl);
- *(rp++) = cpu_to_le32(databuf_phys_addr + offset);
- *(rp++) = cpu_to_le32(0); /* bits 63-32 */
-
- /* Check if we need to enable the FIFO
- * after the first 3 lines.
- * For the upstream audio channel,
- * the risc engine will enable the FIFO */
- if (fifo_enable && line == 2) {
- *(rp++) = RISC_WRITECR;
- *(rp++) = sram_ch->dma_ctl;
- *(rp++) = sram_ch->fld_aud_fifo_en;
- *(rp++) = 0x00000020;
- }
-
- offset += AUDIO_LINE_SIZE;
- }
-
- return rp;
-}
-
-static int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
- struct pci_dev *pci,
- unsigned int bpl, unsigned int lines)
-{
- __le32 *rp;
- int fifo_enable = 0;
- int frame = 0, i = 0;
- int frame_size = AUDIO_DATA_BUF_SZ;
- int databuf_offset = 0;
- int risc_flag = RISC_CNT_INC;
- dma_addr_t risc_phys_jump_addr;
-
- /* Virtual address of Risc buffer program */
- rp = dev->_risc_virt_addr;
-
- /* sync instruction */
- *(rp++) = cpu_to_le32(RISC_RESYNC | AUDIO_SYNC_LINE);
-
- for (frame = 0; frame < NUM_AUDIO_FRAMES; frame++) {
- databuf_offset = frame_size * frame;
-
- if (frame == 0) {
- fifo_enable = 1;
- risc_flag = RISC_CNT_RESET;
- } else {
- fifo_enable = 0;
- risc_flag = RISC_CNT_INC;
- }
-
- /* Calculate physical jump address */
- if ((frame + 1) == NUM_AUDIO_FRAMES) {
- risc_phys_jump_addr =
- dev->_risc_phys_start_addr +
- RISC_SYNC_INSTRUCTION_SIZE;
- } else {
- risc_phys_jump_addr =
- dev->_risc_phys_start_addr +
- RISC_SYNC_INSTRUCTION_SIZE +
- AUDIO_RISC_DMA_BUF_SIZE * (frame + 1);
- }
-
- rp = cx25821_risc_field_upstream_audio(dev, rp,
- dev->_audiodata_buf_phys_addr + databuf_offset,
- bpl, fifo_enable);
-
- if (USE_RISC_NOOP_AUDIO) {
- for (i = 0; i < NUM_NO_OPS; i++)
- *(rp++) = cpu_to_le32(RISC_NOOP);
- }
-
- /* Loop to (Nth)FrameRISC or to Start of Risc program &
- * generate IRQ */
- *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag);
- *(rp++) = cpu_to_le32(risc_phys_jump_addr);
- *(rp++) = cpu_to_le32(0);
-
- /* Recalculate virtual address based on frame index */
- rp = dev->_risc_virt_addr + RISC_SYNC_INSTRUCTION_SIZE / 4 +
- (AUDIO_RISC_DMA_BUF_SIZE * (frame + 1) / 4);
- }
-
- return 0;
-}
-
-static void cx25821_free_memory_audio(struct cx25821_dev *dev)
-{
- if (dev->_risc_virt_addr) {
- pci_free_consistent(dev->pci, dev->_audiorisc_size,
- dev->_risc_virt_addr, dev->_risc_phys_addr);
- dev->_risc_virt_addr = NULL;
- }
-
- if (dev->_audiodata_buf_virt_addr) {
- pci_free_consistent(dev->pci, dev->_audiodata_buf_size,
- dev->_audiodata_buf_virt_addr,
- dev->_audiodata_buf_phys_addr);
- dev->_audiodata_buf_virt_addr = NULL;
- }
-}
-
-void cx25821_stop_upstream_audio(struct cx25821_dev *dev)
-{
- const struct sram_channel *sram_ch =
- dev->channels[AUDIO_UPSTREAM_SRAM_CHANNEL_B].sram_channels;
- u32 tmp = 0;
-
- if (!dev->_audio_is_running) {
- printk(KERN_DEBUG
- pr_fmt("No audio file is currently running so return!\n"));
- return;
- }
- /* Disable RISC interrupts */
- cx_write(sram_ch->int_msk, 0);
-
- /* Turn OFF risc and fifo enable in AUD_DMA_CNTRL */
- tmp = cx_read(sram_ch->dma_ctl);
- cx_write(sram_ch->dma_ctl,
- tmp & ~(sram_ch->fld_aud_fifo_en | sram_ch->fld_aud_risc_en));
-
- /* Clear data buffer memory */
- if (dev->_audiodata_buf_virt_addr)
- memset(dev->_audiodata_buf_virt_addr, 0,
- dev->_audiodata_buf_size);
-
- dev->_audio_is_running = 0;
- dev->_is_first_audio_frame = 0;
- dev->_audioframe_count = 0;
- dev->_audiofile_status = END_OF_FILE;
-
- flush_work(&dev->_audio_work_entry);
-
- kfree(dev->_audiofilename);
-}
-
-void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev)
-{
- if (dev->_audio_is_running)
- cx25821_stop_upstream_audio(dev);
-
- cx25821_free_memory_audio(dev);
-}
-
-static int cx25821_get_audio_data(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch)
-{
- struct file *file;
- int frame_index_temp = dev->_audioframe_index;
- int i = 0;
- int frame_size = AUDIO_DATA_BUF_SZ;
- int frame_offset = frame_size * frame_index_temp;
- char mybuf[AUDIO_LINE_SIZE];
- loff_t file_offset = dev->_audioframe_count * frame_size;
- char *p = NULL;
-
- if (dev->_audiofile_status == END_OF_FILE)
- return 0;
-
- file = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0);
- if (IS_ERR(file)) {
- pr_err("%s(): ERROR opening file(%s) with errno = %ld!\n",
- __func__, dev->_audiofilename, -PTR_ERR(file));
- return PTR_ERR(file);
- }
-
- if (dev->_audiodata_buf_virt_addr)
- p = (char *)dev->_audiodata_buf_virt_addr + frame_offset;
-
- for (i = 0; i < dev->_audio_lines_count; i++) {
- int n = kernel_read(file, mybuf, AUDIO_LINE_SIZE, &file_offset);
- if (n < AUDIO_LINE_SIZE) {
- pr_info("Done: exit %s() since no more bytes to read from Audio file\n",
- __func__);
- dev->_audiofile_status = END_OF_FILE;
- fput(file);
- return 0;
- }
- dev->_audiofile_status = IN_PROGRESS;
- if (p) {
- memcpy(p, mybuf, n);
- p += n;
- }
- }
- dev->_audioframe_count++;
- fput(file);
-
- return 0;
-}
-
-static void cx25821_audioups_handler(struct work_struct *work)
-{
- struct cx25821_dev *dev = container_of(work, struct cx25821_dev,
- _audio_work_entry);
-
- if (!dev) {
- pr_err("ERROR %s(): since container_of(work_struct) FAILED!\n",
- __func__);
- return;
- }
-
- cx25821_get_audio_data(dev, dev->channels[dev->_audio_upstream_channel].
- sram_channels);
-}
-
-static int cx25821_openfile_audio(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch)
-{
- char *p = (void *)dev->_audiodata_buf_virt_addr;
- struct file *file;
- loff_t file_offset = 0;
- int i, j;
-
- file = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0);
- if (IS_ERR(file)) {
- pr_err("%s(): ERROR opening file(%s) with errno = %ld!\n",
- __func__, dev->_audiofilename, PTR_ERR(file));
- return PTR_ERR(file);
- }
-
- for (j = 0; j < NUM_AUDIO_FRAMES; j++) {
- for (i = 0; i < dev->_audio_lines_count; i++) {
- char buf[AUDIO_LINE_SIZE];
- loff_t offset = file_offset;
- int n = kernel_read(file, buf, AUDIO_LINE_SIZE, &file_offset);
-
- if (n < AUDIO_LINE_SIZE) {
- pr_info("Done: exit %s() since no more bytes to read from Audio file\n",
- __func__);
- dev->_audiofile_status = END_OF_FILE;
- fput(file);
- return 0;
- }
-
- if (p)
- memcpy(p + offset, buf, n);
- }
- dev->_audioframe_count++;
- }
- dev->_audiofile_status = IN_PROGRESS;
- fput(file);
- return 0;
-}
-
-static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch,
- int bpl)
-{
- int ret = 0;
- dma_addr_t dma_addr;
- dma_addr_t data_dma_addr;
-
- cx25821_free_memory_audio(dev);
-
- dev->_risc_virt_addr = pci_alloc_consistent(dev->pci,
- dev->audio_upstream_riscbuf_size, &dma_addr);
- dev->_risc_virt_start_addr = dev->_risc_virt_addr;
- dev->_risc_phys_start_addr = dma_addr;
- dev->_risc_phys_addr = dma_addr;
- dev->_audiorisc_size = dev->audio_upstream_riscbuf_size;
-
- if (!dev->_risc_virt_addr) {
- printk(KERN_DEBUG
- pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning\n"));
- return -ENOMEM;
- }
- /* Clear out memory at address */
- memset(dev->_risc_virt_addr, 0, dev->_audiorisc_size);
-
- /* For Audio Data buffer allocation */
- dev->_audiodata_buf_virt_addr = pci_alloc_consistent(dev->pci,
- dev->audio_upstream_databuf_size, &data_dma_addr);
- dev->_audiodata_buf_phys_addr = data_dma_addr;
- dev->_audiodata_buf_size = dev->audio_upstream_databuf_size;
-
- if (!dev->_audiodata_buf_virt_addr) {
- printk(KERN_DEBUG
- pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning\n"));
- return -ENOMEM;
- }
- /* Clear out memory at address */
- memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size);
-
- ret = cx25821_openfile_audio(dev, sram_ch);
- if (ret < 0)
- return ret;
-
- /* Creating RISC programs */
- ret = cx25821_risc_buffer_upstream_audio(dev, dev->pci, bpl,
- dev->_audio_lines_count);
- if (ret < 0) {
- printk(KERN_DEBUG
- pr_fmt("ERROR creating audio upstream RISC programs!\n"));
- goto error;
- }
-
- return 0;
-
-error:
- return ret;
-}
-
-static int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
- u32 status)
-{
- int i = 0;
- u32 int_msk_tmp;
- const struct sram_channel *channel = dev->channels[chan_num].sram_channels;
- dma_addr_t risc_phys_jump_addr;
- __le32 *rp;
-
- if (status & FLD_AUD_SRC_RISCI1) {
- /* Get interrupt_index of the program that interrupted */
- u32 prog_cnt = cx_read(channel->gpcnt);
-
- /* Since we've identified our IRQ, clear our bits from the
- * interrupt mask and interrupt status registers */
- cx_write(channel->int_msk, 0);
- cx_write(channel->int_stat, cx_read(channel->int_stat));
-
- spin_lock(&dev->slock);
-
- while (prog_cnt != dev->_last_index_irq) {
- /* Update _last_index_irq */
- if (dev->_last_index_irq < (NUMBER_OF_PROGRAMS - 1))
- dev->_last_index_irq++;
- else
- dev->_last_index_irq = 0;
-
- dev->_audioframe_index = dev->_last_index_irq;
-
- schedule_work(&dev->_audio_work_entry);
- }
-
- if (dev->_is_first_audio_frame) {
- dev->_is_first_audio_frame = 0;
-
- if (dev->_risc_virt_start_addr != NULL) {
- risc_phys_jump_addr =
- dev->_risc_phys_start_addr +
- RISC_SYNC_INSTRUCTION_SIZE +
- AUDIO_RISC_DMA_BUF_SIZE;
-
- rp = cx25821_risc_field_upstream_audio(dev,
- dev->_risc_virt_start_addr + 1,
- dev->_audiodata_buf_phys_addr,
- AUDIO_LINE_SIZE, FIFO_DISABLE);
-
- if (USE_RISC_NOOP_AUDIO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
- *(rp++) =
- cpu_to_le32(RISC_NOOP);
- }
- }
- /* Jump to 2nd Audio Frame */
- *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 |
- RISC_CNT_RESET);
- *(rp++) = cpu_to_le32(risc_phys_jump_addr);
- *(rp++) = cpu_to_le32(0);
- }
- }
-
- spin_unlock(&dev->slock);
- } else {
- if (status & FLD_AUD_SRC_OF)
- pr_warn("%s(): Audio Received Overflow Error Interrupt!\n",
- __func__);
-
- if (status & FLD_AUD_SRC_SYNC)
- pr_warn("%s(): Audio Received Sync Error Interrupt!\n",
- __func__);
-
- if (status & FLD_AUD_SRC_OPC_ERR)
- pr_warn("%s(): Audio Received OpCode Error Interrupt!\n",
- __func__);
-
- /* Read and write back the interrupt status register to clear
- * our bits */
- cx_write(channel->int_stat, cx_read(channel->int_stat));
- }
-
- if (dev->_audiofile_status == END_OF_FILE) {
- pr_warn("EOF Channel Audio Framecount = %d\n",
- dev->_audioframe_count);
- return -1;
- }
- /* ElSE, set the interrupt mask register, re-enable irq. */
- int_msk_tmp = cx_read(channel->int_msk);
- cx_write(channel->int_msk, int_msk_tmp |= _intr_msk);
-
- return 0;
-}
-
-static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
-{
- struct cx25821_dev *dev = dev_id;
- u32 audio_status;
- int handled = 0;
- const struct sram_channel *sram_ch;
-
- if (!dev)
- return -1;
-
- sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels;
-
- audio_status = cx_read(sram_ch->int_stat);
-
- /* Only deal with our interrupt */
- if (audio_status) {
- handled = cx25821_audio_upstream_irq(dev,
- dev->_audio_upstream_channel, audio_status);
- }
-
- if (handled < 0)
- cx25821_stop_upstream_audio(dev);
- else
- handled += handled;
-
- return IRQ_RETVAL(handled);
-}
-
-static void cx25821_wait_fifo_enable(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch)
-{
- int count = 0;
- u32 tmp;
-
- do {
- /* Wait 10 microsecond before checking to see if the FIFO is
- * turned ON. */
- udelay(10);
-
- tmp = cx_read(sram_ch->dma_ctl);
-
- /* 10 millisecond timeout */
- if (count++ > 1000) {
- pr_err("ERROR: %s() fifo is NOT turned on. Timeout!\n",
- __func__);
- return;
- }
-
- } while (!(tmp & sram_ch->fld_aud_fifo_en));
-
-}
-
-static int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
- const struct sram_channel *sram_ch)
-{
- u32 tmp = 0;
- int err = 0;
-
- /* Set the physical start address of the RISC program in the initial
- * program counter(IPC) member of the CMDS. */
- cx_write(sram_ch->cmds_start + 0, dev->_risc_phys_addr);
- /* Risc IPC High 64 bits 63-32 */
- cx_write(sram_ch->cmds_start + 4, 0);
-
- /* reset counter */
- cx_write(sram_ch->gpcnt_ctl, 3);
-
- /* Set the line length (It looks like we do not need to set the
- * line length) */
- cx_write(sram_ch->aud_length, AUDIO_LINE_SIZE & FLD_AUD_DST_LN_LNGTH);
-
- /* Set the input mode to 16-bit */
- tmp = cx_read(sram_ch->aud_cfg);
- tmp |= FLD_AUD_SRC_ENABLE | FLD_AUD_DST_PK_MODE | FLD_AUD_CLK_ENABLE |
- FLD_AUD_MASTER_MODE | FLD_AUD_CLK_SELECT_PLL_D |
- FLD_AUD_SONY_MODE;
- cx_write(sram_ch->aud_cfg, tmp);
-
- /* Read and write back the interrupt status register to clear it */
- tmp = cx_read(sram_ch->int_stat);
- cx_write(sram_ch->int_stat, tmp);
-
- /* Clear our bits from the interrupt status register. */
- cx_write(sram_ch->int_stat, _intr_msk);
-
- /* Set the interrupt mask register, enable irq. */
- cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit));
- tmp = cx_read(sram_ch->int_msk);
- cx_write(sram_ch->int_msk, tmp |= _intr_msk);
-
- err = request_irq(dev->pci->irq, cx25821_upstream_irq_audio,
- IRQF_SHARED, dev->name, dev);
- if (err < 0) {
- pr_err("%s: can't get upstream IRQ %d\n", dev->name,
- dev->pci->irq);
- goto fail_irq;
- }
-
- /* Start the DMA engine */
- tmp = cx_read(sram_ch->dma_ctl);
- cx_set(sram_ch->dma_ctl, tmp | sram_ch->fld_aud_risc_en);
-
- dev->_audio_is_running = 1;
- dev->_is_first_audio_frame = 1;
-
- /* The fifo_en bit turns on by the first Risc program */
- cx25821_wait_fifo_enable(dev, sram_ch);
-
- return 0;
-
-fail_irq:
- cx25821_dev_unregister(dev);
- return err;
-}
-
-int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
-{
- const struct sram_channel *sram_ch;
- int err = 0;
-
- if (dev->_audio_is_running) {
- pr_warn("Audio Channel is still running so return!\n");
- return 0;
- }
-
- dev->_audio_upstream_channel = channel_select;
- sram_ch = dev->channels[channel_select].sram_channels;
-
- /* Work queue */
- INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler);
-
- dev->_last_index_irq = 0;
- dev->_audio_is_running = 0;
- dev->_audioframe_count = 0;
- dev->_audiofile_status = RESET_STATUS;
- dev->_audio_lines_count = LINES_PER_AUDIO_BUFFER;
- _line_size = AUDIO_LINE_SIZE;
-
- if ((dev->input_audiofilename) &&
- (strcmp(dev->input_audiofilename, "") != 0))
- dev->_audiofilename = kstrdup(dev->input_audiofilename,
- GFP_KERNEL);
- else
- dev->_audiofilename = kstrdup(_defaultAudioName,
- GFP_KERNEL);
-
- if (!dev->_audiofilename) {
- err = -ENOMEM;
- goto error;
- }
-
- cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
- _line_size, 0);
-
- dev->audio_upstream_riscbuf_size =
- AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS +
- RISC_SYNC_INSTRUCTION_SIZE;
- dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS;
-
- /* Allocating buffers and prepare RISC program */
- err = cx25821_audio_upstream_buffer_prepare(dev, sram_ch,
- _line_size);
- if (err < 0) {
- pr_err("%s: Failed to set up Audio upstream buffers!\n",
- dev->name);
- goto error;
- }
- /* Start RISC engine */
- cx25821_start_audio_dma_upstream(dev, sram_ch);
-
- return 0;
-
-error:
- cx25821_dev_unregister(dev);
-
- return err;
-}
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.h b/drivers/media/pci/cx25821/cx25821-audio-upstream.h
deleted file mode 100644
index 2bc875d1ec9f..000000000000
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Driver for the Conexant CX25821 PCIe bridge
- *
- * Copyright (C) 2009 Conexant Systems Inc.
- * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- */
-
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
-
-#define NUM_AUDIO_PROGS 8
-#define NUM_AUDIO_FRAMES 8
-#define END_OF_FILE 0
-#define IN_PROGRESS 1
-#define RESET_STATUS -1
-#define FIFO_DISABLE 0
-#define FIFO_ENABLE 1
-#define NUM_NO_OPS 4
-
-#define RISC_READ_INSTRUCTION_SIZE 12
-#define RISC_JUMP_INSTRUCTION_SIZE 12
-#define RISC_WRITECR_INSTRUCTION_SIZE 16
-#define RISC_SYNC_INSTRUCTION_SIZE 4
-#define DWORD_SIZE 4
-#define AUDIO_SYNC_LINE 4
-
-#define LINES_PER_AUDIO_BUFFER 15
-#define AUDIO_LINE_SIZE 128
-#define AUDIO_DATA_BUF_SZ (AUDIO_LINE_SIZE * LINES_PER_AUDIO_BUFFER)
-
-#define USE_RISC_NOOP_AUDIO 1
-
-#ifdef USE_RISC_NOOP_AUDIO
-#define AUDIO_RISC_DMA_BUF_SIZE \
- (LINES_PER_AUDIO_BUFFER * RISC_READ_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS * DWORD_SIZE + \
- RISC_JUMP_INSTRUCTION_SIZE)
-#endif
-
-#ifndef USE_RISC_NOOP_AUDIO
-#define AUDIO_RISC_DMA_BUF_SIZE \
- (LINES_PER_AUDIO_BUFFER * RISC_READ_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + RISC_JUMP_INSTRUCTION_SIZE)
-#endif
-
-static int _line_size;
-char *_defaultAudioName = "/root/audioGOOD.wav";
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 040c6c251d3a..2f0171134f7e 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -428,7 +428,7 @@ static void cx25821_registers_init(struct cx25821_dev *dev)
tmp |= FLD_USE_ALT_PLL_REF;
cx_write(CLK_RST, tmp & ~(FLD_VID_I_CLK_NOE | FLD_VID_J_CLK_NOE));
- mdelay(100);
+ msleep(100);
}
int cx25821_sram_channel_setup(struct cx25821_dev *dev,
@@ -803,7 +803,7 @@ static void cx25821_initialize(struct cx25821_dev *dev)
cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
cx_write(PAD_CTRL, 0x12); /* for I2C */
cx25821_registers_init(dev); /* init Pecos registers */
- mdelay(100);
+ msleep(100);
for (i = 0; i < VID_CHANNEL_NUM; i++) {
cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
diff --git a/drivers/media/pci/cx25821/cx25821-gpio.c b/drivers/media/pci/cx25821/cx25821-gpio.c
index 76b8f619e55a..f5ffaf880e5f 100644
--- a/drivers/media/pci/cx25821/cx25821-gpio.c
+++ b/drivers/media/pci/cx25821/cx25821-gpio.c
@@ -88,7 +88,7 @@ void cx25821_gpio_init(struct cx25821_dev *dev)
default:
/* set GPIO 5 to select the path for Medusa/Athena */
cx25821_set_gpiopin_logicvalue(dev, 5, 1);
- mdelay(20);
+ msleep(20);
break;
}
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
deleted file mode 100644
index 6c838c8a7924..000000000000
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ /dev/null
@@ -1,673 +0,0 @@
-/*
- * Driver for the Conexant CX25821 PCIe bridge
- *
- * Copyright (C) 2009 Conexant Systems Inc.
- * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "cx25821-video.h"
-#include "cx25821-video-upstream.h"
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
-MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
-MODULE_LICENSE("GPL");
-
-static int _intr_msk = FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC |
- FLD_VID_SRC_OPC_ERR;
-
-int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
- const struct sram_channel *ch,
- unsigned int bpl, u32 risc)
-{
- unsigned int i, lines;
- u32 cdt;
-
- if (ch->cmds_start == 0) {
- cx_write(ch->ptr1_reg, 0);
- cx_write(ch->ptr2_reg, 0);
- cx_write(ch->cnt2_reg, 0);
- cx_write(ch->cnt1_reg, 0);
- return 0;
- }
-
- bpl = (bpl + 7) & ~7; /* alignment */
- cdt = ch->cdt;
- lines = ch->fifo_size / bpl;
-
- if (lines > 4)
- lines = 4;
-
- BUG_ON(lines < 2);
-
- /* write CDT */
- for (i = 0; i < lines; i++) {
- cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
- cx_write(cdt + 16 * i + 4, 0);
- cx_write(cdt + 16 * i + 8, 0);
- cx_write(cdt + 16 * i + 12, 0);
- }
-
- /* write CMDS */
- cx_write(ch->cmds_start + 0, risc);
-
- cx_write(ch->cmds_start + 4, 0);
- cx_write(ch->cmds_start + 8, cdt);
- cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
- cx_write(ch->cmds_start + 16, ch->ctrl_start);
-
- cx_write(ch->cmds_start + 20, VID_IQ_SIZE_DW);
-
- for (i = 24; i < 80; i += 4)
- cx_write(ch->cmds_start + i, 0);
-
- /* fill registers */
- cx_write(ch->ptr1_reg, ch->fifo_start);
- cx_write(ch->ptr2_reg, cdt);
- cx_write(ch->cnt2_reg, (lines * 16) >> 3);
- cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
-
- return 0;
-}
-
-static __le32 *cx25821_update_riscprogram(struct cx25821_channel *chan,
- __le32 *rp, unsigned int offset,
- unsigned int bpl, u32 sync_line,
- unsigned int lines, int fifo_enable,
- int field_type)
-{
- struct cx25821_video_out_data *out = chan->out;
- unsigned int line, i;
- int dist_betwn_starts = bpl * 2;
-
- *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
-
- if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++)
- *(rp++) = cpu_to_le32(RISC_NOOP);
- }
-
- /* scan lines */
- for (line = 0; line < lines; line++) {
- *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl);
- *(rp++) = cpu_to_le32(out->_data_buf_phys_addr + offset);
- *(rp++) = cpu_to_le32(0); /* bits 63-32 */
-
- if ((lines <= NTSC_FIELD_HEIGHT)
- || (line < (NTSC_FIELD_HEIGHT - 1)) || !(out->is_60hz)) {
- offset += dist_betwn_starts;
- }
- }
-
- return rp;
-}
-
-static __le32 *cx25821_risc_field_upstream(struct cx25821_channel *chan, __le32 *rp,
- dma_addr_t databuf_phys_addr,
- unsigned int offset, u32 sync_line,
- unsigned int bpl, unsigned int lines,
- int fifo_enable, int field_type)
-{
- struct cx25821_video_out_data *out = chan->out;
- unsigned int line, i;
- const struct sram_channel *sram_ch = chan->sram_channels;
- int dist_betwn_starts = bpl * 2;
-
- /* sync instruction */
- if (sync_line != NO_SYNC_LINE)
- *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
-
- if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++)
- *(rp++) = cpu_to_le32(RISC_NOOP);
- }
-
- /* scan lines */
- for (line = 0; line < lines; line++) {
- *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl);
- *(rp++) = cpu_to_le32(databuf_phys_addr + offset);
- *(rp++) = cpu_to_le32(0); /* bits 63-32 */
-
- if ((lines <= NTSC_FIELD_HEIGHT)
- || (line < (NTSC_FIELD_HEIGHT - 1)) || !(out->is_60hz))
- /* to skip the other field line */
- offset += dist_betwn_starts;
-
- /* check if we need to enable the FIFO after the first 4 lines
- * For the upstream video channel, the risc engine will enable
- * the FIFO. */
- if (fifo_enable && line == 3) {
- *(rp++) = cpu_to_le32(RISC_WRITECR);
- *(rp++) = cpu_to_le32(sram_ch->dma_ctl);
- *(rp++) = cpu_to_le32(FLD_VID_FIFO_EN);
- *(rp++) = cpu_to_le32(0x00000001);
- }
- }
-
- return rp;
-}
-
-static int cx25821_risc_buffer_upstream(struct cx25821_channel *chan,
- struct pci_dev *pci,
- unsigned int top_offset,
- unsigned int bpl, unsigned int lines)
-{
- struct cx25821_video_out_data *out = chan->out;
- __le32 *rp;
- int fifo_enable = 0;
- /* get line count for single field */
- int singlefield_lines = lines >> 1;
- int odd_num_lines = singlefield_lines;
- int frame = 0;
- int frame_size = 0;
- int databuf_offset = 0;
- int risc_program_size = 0;
- int risc_flag = RISC_CNT_RESET;
- unsigned int bottom_offset = bpl;
- dma_addr_t risc_phys_jump_addr;
-
- if (out->is_60hz) {
- odd_num_lines = singlefield_lines + 1;
- risc_program_size = FRAME1_VID_PROG_SIZE;
- frame_size = (bpl == Y411_LINE_SZ) ?
- FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422;
- } else {
- risc_program_size = PAL_VID_PROG_SIZE;
- frame_size = (bpl == Y411_LINE_SZ) ?
- FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
- }
-
- /* Virtual address of Risc buffer program */
- rp = out->_dma_virt_addr;
-
- for (frame = 0; frame < NUM_FRAMES; frame++) {
- databuf_offset = frame_size * frame;
-
- if (UNSET != top_offset) {
- fifo_enable = (frame == 0) ? FIFO_ENABLE : FIFO_DISABLE;
- rp = cx25821_risc_field_upstream(chan, rp,
- out->_data_buf_phys_addr +
- databuf_offset, top_offset, 0, bpl,
- odd_num_lines, fifo_enable, ODD_FIELD);
- }
-
- fifo_enable = FIFO_DISABLE;
-
- /* Even Field */
- rp = cx25821_risc_field_upstream(chan, rp,
- out->_data_buf_phys_addr +
- databuf_offset, bottom_offset,
- 0x200, bpl, singlefield_lines,
- fifo_enable, EVEN_FIELD);
-
- if (frame == 0) {
- risc_flag = RISC_CNT_RESET;
- risc_phys_jump_addr = out->_dma_phys_start_addr +
- risc_program_size;
- } else {
- risc_phys_jump_addr = out->_dma_phys_start_addr;
- risc_flag = RISC_CNT_INC;
- }
-
- /* Loop to 2ndFrameRISC or to Start of Risc
- * program & generate IRQ
- */
- *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag);
- *(rp++) = cpu_to_le32(risc_phys_jump_addr);
- *(rp++) = cpu_to_le32(0);
- }
-
- return 0;
-}
-
-void cx25821_stop_upstream_video(struct cx25821_channel *chan)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- const struct sram_channel *sram_ch = chan->sram_channels;
- u32 tmp = 0;
-
- if (!out->_is_running) {
- pr_info("No video file is currently running so return!\n");
- return;
- }
-
- /* Set the interrupt mask register, disable irq. */
- cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) & ~(1 << sram_ch->irq_bit));
-
- /* Disable RISC interrupts */
- tmp = cx_read(sram_ch->int_msk);
- cx_write(sram_ch->int_msk, tmp & ~_intr_msk);
-
- /* Turn OFF risc and fifo enable */
- tmp = cx_read(sram_ch->dma_ctl);
- cx_write(sram_ch->dma_ctl, tmp & ~(FLD_VID_FIFO_EN | FLD_VID_RISC_EN));
-
- free_irq(dev->pci->irq, chan);
-
- /* Clear data buffer memory */
- if (out->_data_buf_virt_addr)
- memset(out->_data_buf_virt_addr, 0, out->_data_buf_size);
-
- out->_is_running = 0;
- out->_is_first_frame = 0;
- out->_frame_count = 0;
- out->_file_status = END_OF_FILE;
-
- tmp = cx_read(VID_CH_MODE_SEL);
- cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00);
-}
-
-void cx25821_free_mem_upstream(struct cx25821_channel *chan)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
-
- if (out->_is_running)
- cx25821_stop_upstream_video(chan);
-
- if (out->_dma_virt_addr) {
- pci_free_consistent(dev->pci, out->_risc_size,
- out->_dma_virt_addr, out->_dma_phys_addr);
- out->_dma_virt_addr = NULL;
- }
-
- if (out->_data_buf_virt_addr) {
- pci_free_consistent(dev->pci, out->_data_buf_size,
- out->_data_buf_virt_addr,
- out->_data_buf_phys_addr);
- out->_data_buf_virt_addr = NULL;
- }
-}
-
-int cx25821_write_frame(struct cx25821_channel *chan,
- const char __user *data, size_t count)
-{
- struct cx25821_video_out_data *out = chan->out;
- int line_size = (out->_pixel_format == PIXEL_FRMT_411) ?
- Y411_LINE_SZ : Y422_LINE_SZ;
- int frame_size = 0;
- int frame_offset = 0;
- int curpos = out->curpos;
-
- if (out->is_60hz)
- frame_size = (line_size == Y411_LINE_SZ) ?
- FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422;
- else
- frame_size = (line_size == Y411_LINE_SZ) ?
- FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
-
- if (curpos == 0) {
- out->cur_frame_index = out->_frame_index;
- if (wait_event_interruptible(out->waitq, out->cur_frame_index != out->_frame_index))
- return -EINTR;
- out->cur_frame_index = out->_frame_index;
- }
-
- frame_offset = out->cur_frame_index ? frame_size : 0;
-
- if (frame_size - curpos < count)
- count = frame_size - curpos;
- if (copy_from_user((__force char *)out->_data_buf_virt_addr + frame_offset + curpos,
- data, count))
- return -EFAULT;
- curpos += count;
- if (curpos == frame_size) {
- out->_frame_count++;
- curpos = 0;
- }
- out->curpos = curpos;
-
- return count;
-}
-
-static int cx25821_upstream_buffer_prepare(struct cx25821_channel *chan,
- const struct sram_channel *sram_ch,
- int bpl)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- int ret = 0;
- dma_addr_t dma_addr;
- dma_addr_t data_dma_addr;
-
- if (out->_dma_virt_addr != NULL)
- pci_free_consistent(dev->pci, out->upstream_riscbuf_size,
- out->_dma_virt_addr, out->_dma_phys_addr);
-
- out->_dma_virt_addr = pci_alloc_consistent(dev->pci,
- out->upstream_riscbuf_size, &dma_addr);
- out->_dma_virt_start_addr = out->_dma_virt_addr;
- out->_dma_phys_start_addr = dma_addr;
- out->_dma_phys_addr = dma_addr;
- out->_risc_size = out->upstream_riscbuf_size;
-
- if (!out->_dma_virt_addr) {
- pr_err("FAILED to allocate memory for Risc buffer! Returning\n");
- return -ENOMEM;
- }
-
- /* Clear memory at address */
- memset(out->_dma_virt_addr, 0, out->_risc_size);
-
- if (out->_data_buf_virt_addr != NULL)
- pci_free_consistent(dev->pci, out->upstream_databuf_size,
- out->_data_buf_virt_addr,
- out->_data_buf_phys_addr);
- /* For Video Data buffer allocation */
- out->_data_buf_virt_addr = pci_alloc_consistent(dev->pci,
- out->upstream_databuf_size, &data_dma_addr);
- out->_data_buf_phys_addr = data_dma_addr;
- out->_data_buf_size = out->upstream_databuf_size;
-
- if (!out->_data_buf_virt_addr) {
- pr_err("FAILED to allocate memory for data buffer! Returning\n");
- return -ENOMEM;
- }
-
- /* Clear memory at address */
- memset(out->_data_buf_virt_addr, 0, out->_data_buf_size);
-
- /* Create RISC programs */
- ret = cx25821_risc_buffer_upstream(chan, dev->pci, 0, bpl,
- out->_lines_count);
- if (ret < 0) {
- pr_info("Failed creating Video Upstream Risc programs!\n");
- goto error;
- }
-
- return 0;
-
-error:
- return ret;
-}
-
-static int cx25821_video_upstream_irq(struct cx25821_channel *chan, u32 status)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- u32 int_msk_tmp;
- const struct sram_channel *channel = chan->sram_channels;
- int singlefield_lines = NTSC_FIELD_HEIGHT;
- int line_size_in_bytes = Y422_LINE_SZ;
- int odd_risc_prog_size = 0;
- dma_addr_t risc_phys_jump_addr;
- __le32 *rp;
-
- if (status & FLD_VID_SRC_RISC1) {
- /* We should only process one program per call */
- u32 prog_cnt = cx_read(channel->gpcnt);
-
- /* Since we've identified our IRQ, clear our bits from the
- * interrupt mask and interrupt status registers */
- int_msk_tmp = cx_read(channel->int_msk);
- cx_write(channel->int_msk, int_msk_tmp & ~_intr_msk);
- cx_write(channel->int_stat, _intr_msk);
-
- wake_up(&out->waitq);
-
- spin_lock(&dev->slock);
-
- out->_frame_index = prog_cnt;
-
- if (out->_is_first_frame) {
- out->_is_first_frame = 0;
-
- if (out->is_60hz) {
- singlefield_lines += 1;
- odd_risc_prog_size = ODD_FLD_NTSC_PROG_SIZE;
- } else {
- singlefield_lines = PAL_FIELD_HEIGHT;
- odd_risc_prog_size = ODD_FLD_PAL_PROG_SIZE;
- }
-
- if (out->_dma_virt_start_addr != NULL) {
- line_size_in_bytes =
- (out->_pixel_format ==
- PIXEL_FRMT_411) ? Y411_LINE_SZ :
- Y422_LINE_SZ;
- risc_phys_jump_addr =
- out->_dma_phys_start_addr +
- odd_risc_prog_size;
-
- rp = cx25821_update_riscprogram(chan,
- out->_dma_virt_start_addr, TOP_OFFSET,
- line_size_in_bytes, 0x0,
- singlefield_lines, FIFO_DISABLE,
- ODD_FIELD);
-
- /* Jump to Even Risc program of 1st Frame */
- *(rp++) = cpu_to_le32(RISC_JUMP);
- *(rp++) = cpu_to_le32(risc_phys_jump_addr);
- *(rp++) = cpu_to_le32(0);
- }
- }
-
- spin_unlock(&dev->slock);
- } else {
- if (status & FLD_VID_SRC_UF)
- pr_err("%s(): Video Received Underflow Error Interrupt!\n",
- __func__);
-
- if (status & FLD_VID_SRC_SYNC)
- pr_err("%s(): Video Received Sync Error Interrupt!\n",
- __func__);
-
- if (status & FLD_VID_SRC_OPC_ERR)
- pr_err("%s(): Video Received OpCode Error Interrupt!\n",
- __func__);
- }
-
- if (out->_file_status == END_OF_FILE) {
- pr_err("EOF Channel 1 Framecount = %d\n", out->_frame_count);
- return -1;
- }
- /* ElSE, set the interrupt mask register, re-enable irq. */
- int_msk_tmp = cx_read(channel->int_msk);
- cx_write(channel->int_msk, int_msk_tmp |= _intr_msk);
-
- return 0;
-}
-
-static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
-{
- struct cx25821_channel *chan = dev_id;
- struct cx25821_dev *dev = chan->dev;
- u32 vid_status;
- int handled = 0;
- const struct sram_channel *sram_ch;
-
- if (!dev)
- return -1;
-
- sram_ch = chan->sram_channels;
-
- vid_status = cx_read(sram_ch->int_stat);
-
- /* Only deal with our interrupt */
- if (vid_status)
- handled = cx25821_video_upstream_irq(chan, vid_status);
-
- return IRQ_RETVAL(handled);
-}
-
-static void cx25821_set_pixelengine(struct cx25821_channel *chan,
- const struct sram_channel *ch,
- int pix_format)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- int width = WIDTH_D1;
- int height = out->_lines_count;
- int num_lines, odd_num_lines;
- u32 value;
- int vip_mode = OUTPUT_FRMT_656;
-
- value = ((pix_format & 0x3) << 12) | (vip_mode & 0x7);
- value &= 0xFFFFFFEF;
- value |= out->is_60hz ? 0 : 0x10;
- cx_write(ch->vid_fmt_ctl, value);
-
- /* set number of active pixels in each line.
- * Default is 720 pixels in both NTSC and PAL format */
- cx_write(ch->vid_active_ctl1, width);
-
- num_lines = (height / 2) & 0x3FF;
- odd_num_lines = num_lines;
-
- if (out->is_60hz)
- odd_num_lines += 1;
-
- value = (num_lines << 16) | odd_num_lines;
-
- /* set number of active lines in field 0 (top) and field 1 (bottom) */
- cx_write(ch->vid_active_ctl2, value);
-
- cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
-}
-
-static int cx25821_start_video_dma_upstream(struct cx25821_channel *chan,
- const struct sram_channel *sram_ch)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- u32 tmp = 0;
- int err = 0;
-
- /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
- * channel A-C
- */
- tmp = cx_read(VID_CH_MODE_SEL);
- cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
-
- /* Set the physical start address of the RISC program in the initial
- * program counter(IPC) member of the cmds.
- */
- cx_write(sram_ch->cmds_start + 0, out->_dma_phys_addr);
- /* Risc IPC High 64 bits 63-32 */
- cx_write(sram_ch->cmds_start + 4, 0);
-
- /* reset counter */
- cx_write(sram_ch->gpcnt_ctl, 3);
-
- /* Clear our bits from the interrupt status register. */
- cx_write(sram_ch->int_stat, _intr_msk);
-
- /* Set the interrupt mask register, enable irq. */
- cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit));
- tmp = cx_read(sram_ch->int_msk);
- cx_write(sram_ch->int_msk, tmp |= _intr_msk);
-
- err = request_irq(dev->pci->irq, cx25821_upstream_irq,
- IRQF_SHARED, dev->name, chan);
- if (err < 0) {
- pr_err("%s: can't get upstream IRQ %d\n",
- dev->name, dev->pci->irq);
- goto fail_irq;
- }
-
- /* Start the DMA engine */
- tmp = cx_read(sram_ch->dma_ctl);
- cx_set(sram_ch->dma_ctl, tmp | FLD_VID_RISC_EN);
-
- out->_is_running = 1;
- out->_is_first_frame = 1;
-
- return 0;
-
-fail_irq:
- cx25821_dev_unregister(dev);
- return err;
-}
-
-int cx25821_vidupstream_init(struct cx25821_channel *chan,
- int pixel_format)
-{
- struct cx25821_video_out_data *out = chan->out;
- struct cx25821_dev *dev = chan->dev;
- const struct sram_channel *sram_ch;
- u32 tmp;
- int err = 0;
- int data_frame_size = 0;
- int risc_buffer_size = 0;
-
- if (out->_is_running) {
- pr_info("Video Channel is still running so return!\n");
- return 0;
- }
-
- sram_ch = chan->sram_channels;
-
- out->is_60hz = dev->tvnorm & V4L2_STD_525_60;
-
- /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
- * channel A-C
- */
- tmp = cx_read(VID_CH_MODE_SEL);
- cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
-
- out->_is_running = 0;
- out->_frame_count = 0;
- out->_file_status = RESET_STATUS;
- out->_lines_count = out->is_60hz ? 480 : 576;
- out->_pixel_format = pixel_format;
- out->_line_size = (out->_pixel_format == PIXEL_FRMT_422) ?
- (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
- data_frame_size = out->is_60hz ? NTSC_DATA_BUF_SZ : PAL_DATA_BUF_SZ;
- risc_buffer_size = out->is_60hz ?
- NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE;
-
- out->_is_running = 0;
- out->_frame_count = 0;
- out->_file_status = RESET_STATUS;
- out->_lines_count = out->is_60hz ? 480 : 576;
- out->_pixel_format = pixel_format;
- out->_line_size = (out->_pixel_format == PIXEL_FRMT_422) ?
- (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
- out->curpos = 0;
- init_waitqueue_head(&out->waitq);
-
- err = cx25821_sram_channel_setup_upstream(dev, sram_ch,
- out->_line_size, 0);
-
- /* setup fifo + format */
- cx25821_set_pixelengine(chan, sram_ch, out->_pixel_format);
-
- out->upstream_riscbuf_size = risc_buffer_size * 2;
- out->upstream_databuf_size = data_frame_size * 2;
-
- /* Allocating buffers and prepare RISC program */
- err = cx25821_upstream_buffer_prepare(chan, sram_ch, out->_line_size);
- if (err < 0) {
- pr_err("%s: Failed to set up Video upstream buffers!\n",
- dev->name);
- goto error;
- }
-
- cx25821_start_video_dma_upstream(chan, sram_ch);
-
- return 0;
-
-error:
- cx25821_dev_unregister(dev);
-
- return err;
-}
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.h b/drivers/media/pci/cx25821/cx25821-video-upstream.h
deleted file mode 100644
index b6cf95f2d11b..000000000000
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Driver for the Conexant CX25821 PCIe bridge
- *
- * Copyright (C) 2009 Conexant Systems Inc.
- * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- */
-
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
-
-#define OUTPUT_FRMT_656 0
-#define OPEN_FILE_1 0
-#define NUM_PROGS 8
-#define NUM_FRAMES 2
-#define ODD_FIELD 0
-#define EVEN_FIELD 1
-#define TOP_OFFSET 0
-#define FIFO_DISABLE 0
-#define FIFO_ENABLE 1
-#define TEST_FRAMES 5
-#define END_OF_FILE 0
-#define IN_PROGRESS 1
-#define RESET_STATUS -1
-#define NUM_NO_OPS 5
-
-/* PAL and NTSC line sizes and number of lines. */
-#define WIDTH_D1 720
-#define NTSC_LINES_PER_FRAME 480
-#define PAL_LINES_PER_FRAME 576
-#define PAL_LINE_SZ 1440
-#define Y422_LINE_SZ 1440
-#define Y411_LINE_SZ 1080
-#define NTSC_FIELD_HEIGHT 240
-#define NTSC_ODD_FLD_LINES 241
-#define PAL_FIELD_HEIGHT 288
-
-#define FRAME_SIZE_NTSC_Y422 (NTSC_LINES_PER_FRAME * Y422_LINE_SZ)
-#define FRAME_SIZE_NTSC_Y411 (NTSC_LINES_PER_FRAME * Y411_LINE_SZ)
-#define FRAME_SIZE_PAL_Y422 (PAL_LINES_PER_FRAME * Y422_LINE_SZ)
-#define FRAME_SIZE_PAL_Y411 (PAL_LINES_PER_FRAME * Y411_LINE_SZ)
-
-#define NTSC_DATA_BUF_SZ (Y422_LINE_SZ * NTSC_LINES_PER_FRAME)
-#define PAL_DATA_BUF_SZ (Y422_LINE_SZ * PAL_LINES_PER_FRAME)
-
-#define RISC_WRITECR_INSTRUCTION_SIZE 16
-#define RISC_SYNC_INSTRUCTION_SIZE 4
-#define JUMP_INSTRUCTION_SIZE 12
-#define MAXSIZE_NO_OPS 36
-#define DWORD_SIZE 4
-
-#define USE_RISC_NOOP_VIDEO 1
-
-#ifdef USE_RISC_NOOP_VIDEO
-#define PAL_US_VID_PROG_SIZE \
- (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
- NUM_NO_OPS * DWORD_SIZE)
-
-#define PAL_RISC_BUF_SIZE (2 * PAL_US_VID_PROG_SIZE)
-
-#define PAL_VID_PROG_SIZE \
- ((PAL_FIELD_HEIGHT * 2) * 3 * DWORD_SIZE + \
- 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE + 2 * NUM_NO_OPS * DWORD_SIZE)
-
-#define ODD_FLD_PAL_PROG_SIZE \
- (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- NUM_NO_OPS * DWORD_SIZE)
-
-#define ODD_FLD_NTSC_PROG_SIZE \
- (NTSC_ODD_FLD_LINES * 3 * DWORD_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- NUM_NO_OPS * DWORD_SIZE)
-
-#define NTSC_US_VID_PROG_SIZE \
- ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE + \
- NUM_NO_OPS * DWORD_SIZE)
-
-#define NTSC_RISC_BUF_SIZE \
- (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
-
-#define FRAME1_VID_PROG_SIZE \
- ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + \
- 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE + 2 * NUM_NO_OPS * DWORD_SIZE)
-
-#endif
-
-#ifndef USE_RISC_NOOP_VIDEO
-#define PAL_US_VID_PROG_SIZE \
- (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + RISC_SYNC_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE)
-
-#define PAL_RISC_BUF_SIZE (2 * PAL_US_VID_PROG_SIZE)
-
-#define PAL_VID_PROG_SIZE \
- ((PAL_FIELD_HEIGHT * 2) * 3 * DWORD_SIZE + \
- 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE)
-
-#define ODD_FLD_PAL_PROG_SIZE \
- (PAL_FIELD_HEIGHT * 3 * DWORD_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
-
-#define ODD_FLD_NTSC_PROG_SIZE \
- (NTSC_ODD_FLD_LINES * 3 * DWORD_SIZE + \
- RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
-
-#define NTSC_US_VID_PROG_SIZE \
- ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
-
-#define NTSC_RISC_BUF_SIZE \
- (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
-
-#define FRAME1_VID_PROG_SIZE \
- ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + \
- 2 * RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + \
- JUMP_INSTRUCTION_SIZE)
-
-#endif
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index b3eb2dabb30b..25eba4ac4499 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -432,18 +432,6 @@ extern int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
const struct sram_channel *ch,
unsigned int bpl, u32 risc);
-extern int cx25821_vidupstream_init(struct cx25821_channel *chan, int pixel_format);
-extern int cx25821_audio_upstream_init(struct cx25821_dev *dev,
- int channel_select);
-extern int cx25821_write_frame(struct cx25821_channel *chan,
- const char __user *data, size_t count);
-extern void cx25821_free_mem_upstream(struct cx25821_channel *chan);
-extern void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev);
-extern void cx25821_stop_upstream_video(struct cx25821_channel *chan);
-extern void cx25821_stop_upstream_audio(struct cx25821_dev *dev);
-extern int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
- const struct sram_channel *ch,
- unsigned int bpl, u32 risc);
extern void cx25821_set_pixel_format(struct cx25821_dev *dev, int channel,
u32 format);
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index e5c3387cd1e8..89a65478ae36 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -962,8 +962,11 @@ static int cx88_audio_initdev(struct pci_dev *pci,
goto error;
/* If there's a wm8775 then add a Line-In ALC switch */
- if (core->sd_wm8775)
- snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
+ if (core->sd_wm8775) {
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
+ if (err < 0)
+ goto error;
+ }
strcpy(card->driver, "CX88x");
sprintf(card->shortname, "Conexant CX%x", pci->device);
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 4c92d2388c26..07e1483e987d 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3307,9 +3307,9 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
cx_write(MO_GP2_IO, 0xcf7);
- mdelay(50);
+ msleep(50);
cx_write(MO_GP2_IO, 0xef5);
- mdelay(50);
+ msleep(50);
cx_write(MO_GP2_IO, 0xcf7);
usleep_range(10000, 20000);
break;
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index ccfde28d4af2..90b208747e0f 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1226,9 +1226,9 @@ static int dvb_register(struct cx8802_dev *dev)
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 1);
- mdelay(200);
+ msleep(200);
/* Select RF connector callback */
fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set;
@@ -1248,9 +1248,9 @@ static int dvb_register(struct cx8802_dev *dev)
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 9);
- mdelay(200);
+ msleep(200);
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
0x0e,
@@ -1267,9 +1267,9 @@ static int dvb_register(struct cx8802_dev *dev)
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 1);
- mdelay(200);
+ msleep(200);
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_5_gold,
0x0e,
@@ -1289,9 +1289,9 @@ static int dvb_register(struct cx8802_dev *dev)
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 1);
- mdelay(200);
+ msleep(200);
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&pchdtv_hd5500,
0x59,
@@ -1583,9 +1583,9 @@ static int dvb_register(struct cx8802_dev *dev)
cx_set(MO_GP0_IO, 0x0101);
cx_clear(MO_GP0_IO, 0x01);
- mdelay(100);
+ msleep(100);
cx_set(MO_GP0_IO, 0x01);
- mdelay(200);
+ msleep(200);
fe0->dvb.frontend = dvb_attach(stv0299_attach,
&samsung_stv0299_config,
diff --git a/drivers/media/pci/ddbridge/Makefile b/drivers/media/pci/ddbridge/Makefile
index 9b9e35f171b7..5b6d5bbc38af 100644
--- a/drivers/media/pci/ddbridge/Makefile
+++ b/drivers/media/pci/ddbridge/Makefile
@@ -4,7 +4,8 @@
#
ddbridge-objs := ddbridge-main.o ddbridge-core.o ddbridge-ci.o \
- ddbridge-hw.o ddbridge-i2c.o ddbridge-max.o ddbridge-mci.o
+ ddbridge-hw.o ddbridge-i2c.o ddbridge-max.o ddbridge-mci.o \
+ ddbridge-sx8.o
obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index d5b0d1eaf3ad..c1b982e8e6c9 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1191,6 +1191,13 @@ static const struct lnbh25_config lnbh25_cfg = {
.data2_config = LNBH25_TEN
};
+static int has_lnbh25(struct i2c_adapter *i2c, u8 adr)
+{
+ u8 val;
+
+ return i2c_read_reg(i2c, adr, 0, &val) ? 0 : 1;
+}
+
static int demod_attach_stv0910(struct ddb_input *input, int type, int tsfast)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
@@ -1224,14 +1231,15 @@ static int demod_attach_stv0910(struct ddb_input *input, int type, int tsfast)
/* attach lnbh25 - leftshift by one as the lnbh25 driver expects 8bit
* i2c addresses
*/
- lnbcfg.i2c_address = (((input->nr & 1) ? 0x0d : 0x0c) << 1);
- if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) {
+ if (has_lnbh25(i2c, 0x0d))
+ lnbcfg.i2c_address = (((input->nr & 1) ? 0x0d : 0x0c) << 1);
+ else
lnbcfg.i2c_address = (((input->nr & 1) ? 0x09 : 0x08) << 1);
- if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) {
- dev_err(dev, "No LNBH25 found!\n");
- dvb_frontend_detach(dvb->fe);
- return -ENODEV;
- }
+
+ if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) {
+ dev_err(dev, "No LNBH25 found!\n");
+ dvb_frontend_detach(dvb->fe);
+ return -ENODEV;
}
return 0;
@@ -1584,8 +1592,8 @@ static int dvb_input_attach(struct ddb_input *input)
if (demod_attach_dummy(input) < 0)
goto err_detach;
break;
- case DDB_TUNER_MCI:
- if (ddb_fe_attach_mci(input) < 0)
+ case DDB_TUNER_MCI_SX8:
+ if (ddb_fe_attach_mci(input, port->type) < 0)
goto err_detach;
break;
default:
@@ -1842,6 +1850,7 @@ static void ddb_port_probe(struct ddb_port *port)
{
struct ddb *dev = port->dev;
u32 l = port->lnr;
+ struct ddb_link *link = &dev->link[l];
u8 id, type;
port->name = "NO MODULE";
@@ -1851,7 +1860,7 @@ static void ddb_port_probe(struct ddb_port *port)
/* Handle missing ports and ports without I2C */
if (dummy_tuner && !port->nr &&
- dev->link[0].ids.device == 0x0005) {
+ link->ids.device == 0x0005) {
port->name = "DUMMY";
port->class = DDB_PORT_TUNER;
port->type = DDB_TUNER_DUMMY;
@@ -1865,14 +1874,14 @@ static void ddb_port_probe(struct ddb_port *port)
return;
}
- if (port->nr == 1 && dev->link[l].info->type == DDB_OCTOPUS_CI &&
- dev->link[l].info->i2c_mask == 1) {
+ if (port->nr == 1 && link->info->type == DDB_OCTOPUS_CI &&
+ link->info->i2c_mask == 1) {
port->name = "NO TAB";
port->class = DDB_PORT_NONE;
return;
}
- if (dev->link[l].info->type == DDB_OCTOPUS_MAX) {
+ if (link->info->type == DDB_OCTOPUS_MAX) {
port->name = "DUAL DVB-S2 MAX";
port->type_name = "MXL5XX";
port->class = DDB_PORT_TUNER;
@@ -1883,17 +1892,17 @@ static void ddb_port_probe(struct ddb_port *port)
return;
}
- if (dev->link[l].info->type == DDB_OCTOPUS_MCI) {
- if (port->nr >= dev->link[l].info->mci)
+ if (link->info->type == DDB_OCTOPUS_MCI) {
+ if (port->nr >= link->info->mci_ports)
return;
port->name = "DUAL MCI";
port->type_name = "MCI";
port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_MCI;
+ port->type = DDB_TUNER_MCI + link->info->mci_type;
return;
}
- if (port->nr > 1 && dev->link[l].info->type == DDB_OCTOPUS_CI) {
+ if (port->nr > 1 && link->info->type == DDB_OCTOPUS_CI) {
port->name = "CI internal";
port->type_name = "INTERNAL";
port->class = DDB_PORT_CI;
@@ -1978,7 +1987,7 @@ static void ddb_port_probe(struct ddb_port *port)
port->class = DDB_PORT_TUNER;
if (id == 0x51) {
if (port->nr == 0 &&
- dev->link[l].info->ts_quirks & TS_QUIRK_REVERSED)
+ link->info->ts_quirks & TS_QUIRK_REVERSED)
port->type = DDB_TUNER_DVBS_STV0910_PR;
else
port->type = DDB_TUNER_DVBS_STV0910_P;
diff --git a/drivers/media/pci/ddbridge/ddbridge-hw.c b/drivers/media/pci/ddbridge/ddbridge-hw.c
index 1d3ee6accdd5..f3cbac07b41f 100644
--- a/drivers/media/pci/ddbridge/ddbridge-hw.c
+++ b/drivers/media/pci/ddbridge/ddbridge-hw.c
@@ -318,7 +318,8 @@ static const struct ddb_info ddb_s2x_48 = {
.port_num = 4,
.i2c_mask = 0x00,
.tempmon_irq = 24,
- .mci = 4
+ .mci_ports = 4,
+ .mci_type = 0,
};
/****************************************************************************/
diff --git a/drivers/media/pci/ddbridge/ddbridge-i2c.c b/drivers/media/pci/ddbridge/ddbridge-i2c.c
index 667340c86ea7..5a28d7611713 100644
--- a/drivers/media/pci/ddbridge/ddbridge-i2c.c
+++ b/drivers/media/pci/ddbridge/ddbridge-i2c.c
@@ -73,7 +73,10 @@ static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
}
return -EIO;
}
- if (val & 0x70000)
+ val &= 0x70000;
+ if (val == 0x20000)
+ dev_err(dev->dev, "I2C bus error\n");
+ if (val)
return -EIO;
return 0;
}
diff --git a/drivers/media/pci/ddbridge/ddbridge-max.c b/drivers/media/pci/ddbridge/ddbridge-max.c
index 739e4b444cf4..8da1c7b91577 100644
--- a/drivers/media/pci/ddbridge/ddbridge-max.c
+++ b/drivers/media/pci/ddbridge/ddbridge-max.c
@@ -457,21 +457,29 @@ int ddb_fe_attach_mxl5xx(struct ddb_input *input)
/******************************************************************************/
/* MAX MCI related functions */
-int ddb_fe_attach_mci(struct ddb_input *input)
+int ddb_fe_attach_mci(struct ddb_input *input, u32 type)
{
struct ddb *dev = input->port->dev;
struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
struct ddb_port *port = input->port;
struct ddb_link *link = &dev->link[port->lnr];
int demod, tuner;
+ struct mci_cfg cfg;
demod = input->nr;
tuner = demod & 3;
- if (fmode == 3)
- tuner = 0;
- dvb->fe = ddb_mci_attach(input, 0, demod, &dvb->set_input);
+ switch (type) {
+ case DDB_TUNER_MCI_SX8:
+ cfg = ddb_max_sx8_cfg;
+ if (fmode == 3)
+ tuner = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dvb->fe = ddb_mci_attach(input, &cfg, demod, &dvb->set_input);
if (!dvb->fe) {
- dev_err(dev->dev, "No MAXSX8 found!\n");
+ dev_err(dev->dev, "No MCI card found!\n");
return -ENODEV;
}
if (!dvb->set_input) {
diff --git a/drivers/media/pci/ddbridge/ddbridge-max.h b/drivers/media/pci/ddbridge/ddbridge-max.h
index 82efc53baa94..9838c73973b6 100644
--- a/drivers/media/pci/ddbridge/ddbridge-max.h
+++ b/drivers/media/pci/ddbridge/ddbridge-max.h
@@ -25,6 +25,6 @@
int ddb_lnb_init_fmode(struct ddb *dev, struct ddb_link *link, u32 fm);
int ddb_fe_attach_mxl5xx(struct ddb_input *input);
-int ddb_fe_attach_mci(struct ddb_input *input);
+int ddb_fe_attach_mci(struct ddb_input *input, u32 type);
#endif /* _DDBRIDGE_MAX_H */
diff --git a/drivers/media/pci/ddbridge/ddbridge-mci.c b/drivers/media/pci/ddbridge/ddbridge-mci.c
index 4ac634fc96e4..97384ae9ad27 100644
--- a/drivers/media/pci/ddbridge/ddbridge-mci.c
+++ b/drivers/media/pci/ddbridge/ddbridge-mci.c
@@ -2,9 +2,9 @@
/*
* ddbridge-mci.c: Digital Devices microcode interface
*
- * Copyright (C) 2017 Digital Devices GmbH
- * Ralph Metzler <rjkm@metzlerbros.de>
- * Marcus Metzler <mocm@metzlerbros.de>
+ * Copyright (C) 2017-2018 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -22,42 +22,6 @@
static LIST_HEAD(mci_list);
-static const u32 MCLK = (1550000000 / 12);
-static const u32 MAX_DEMOD_LDPC_BITRATE = (1550000000 / 6);
-static const u32 MAX_LDPC_BITRATE = (720000000);
-
-struct mci_base {
- struct list_head mci_list;
- void *key;
- struct ddb_link *link;
- struct completion completion;
-
- struct device *dev;
- struct mutex tuner_lock; /* concurrent tuner access lock */
- u8 adr;
- struct mutex mci_lock; /* concurrent MCI access lock */
- int count;
-
- u8 tuner_use_count[MCI_TUNER_MAX];
- u8 assigned_demod[MCI_DEMOD_MAX];
- u32 used_ldpc_bitrate[MCI_DEMOD_MAX];
- u8 demod_in_use[MCI_DEMOD_MAX];
- u32 iq_mode;
-};
-
-struct mci {
- struct mci_base *base;
- struct dvb_frontend fe;
- int nr;
- int demod;
- int tuner;
- int first_time_lock;
- int started;
- struct mci_result signal_info;
-
- u32 bb_mode;
-};
-
static int mci_reset(struct mci *state)
{
struct ddb_link *link = state->base->link;
@@ -84,7 +48,7 @@ static int mci_reset(struct mci *state)
return 0;
}
-static int mci_config(struct mci *state, u32 config)
+int ddb_mci_config(struct mci *state, u32 config)
{
struct ddb_link *link = state->base->link;
@@ -122,16 +86,16 @@ static int _mci_cmd_unlocked(struct mci *state,
return 0;
}
-static int mci_cmd(struct mci *state,
- struct mci_command *command,
- struct mci_result *result)
+int ddb_mci_cmd(struct mci *state,
+ struct mci_command *command,
+ struct mci_result *result)
{
int stat;
mutex_lock(&state->base->mci_lock);
stat = _mci_cmd_unlocked(state,
(u32 *)command, sizeof(*command) / sizeof(u32),
- (u32 *)result, sizeof(*result) / sizeof(u32));
+ (u32 *)result, sizeof(*result) / sizeof(u32));
mutex_unlock(&state->base->mci_lock);
return stat;
}
@@ -143,344 +107,6 @@ static void mci_handler(void *priv)
complete(&base->completion);
}
-static void release(struct dvb_frontend *fe)
-{
- struct mci *state = fe->demodulator_priv;
-
- state->base->count--;
- if (state->base->count == 0) {
- list_del(&state->base->mci_list);
- kfree(state->base);
- }
- kfree(state);
-}
-
-static int read_status(struct dvb_frontend *fe, enum fe_status *status)
-{
- int stat;
- struct mci *state = fe->demodulator_priv;
- struct mci_command cmd;
- struct mci_result res;
-
- cmd.command = MCI_CMD_GETSTATUS;
- cmd.demod = state->demod;
- stat = mci_cmd(state, &cmd, &res);
- if (stat)
- return stat;
- *status = 0x00;
- if (res.status == SX8_DEMOD_WAIT_MATYPE)
- *status = 0x0f;
- if (res.status == SX8_DEMOD_LOCKED)
- *status = 0x1f;
- return stat;
-}
-
-static int mci_set_tuner(struct dvb_frontend *fe, u32 tuner, u32 on)
-{
- struct mci *state = fe->demodulator_priv;
- struct mci_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tuner = state->tuner;
- cmd.command = on ? SX8_CMD_INPUT_ENABLE : SX8_CMD_INPUT_DISABLE;
- return mci_cmd(state, &cmd, NULL);
-}
-
-static int stop(struct dvb_frontend *fe)
-{
- struct mci *state = fe->demodulator_priv;
- struct mci_command cmd;
- u32 input = state->tuner;
-
- memset(&cmd, 0, sizeof(cmd));
- if (state->demod != DEMOD_UNUSED) {
- cmd.command = MCI_CMD_STOP;
- cmd.demod = state->demod;
- mci_cmd(state, &cmd, NULL);
- if (state->base->iq_mode) {
- cmd.command = MCI_CMD_STOP;
- cmd.demod = state->demod;
- cmd.output = 0;
- mci_cmd(state, &cmd, NULL);
- mci_config(state, SX8_TSCONFIG_MODE_NORMAL);
- }
- }
- mutex_lock(&state->base->tuner_lock);
- state->base->tuner_use_count[input]--;
- if (!state->base->tuner_use_count[input])
- mci_set_tuner(fe, input, 0);
- if (state->demod < MCI_DEMOD_MAX)
- state->base->demod_in_use[state->demod] = 0;
- state->base->used_ldpc_bitrate[state->nr] = 0;
- state->demod = DEMOD_UNUSED;
- state->base->assigned_demod[state->nr] = DEMOD_UNUSED;
- state->base->iq_mode = 0;
- mutex_unlock(&state->base->tuner_lock);
- state->started = 0;
- return 0;
-}
-
-static int start(struct dvb_frontend *fe, u32 flags, u32 modmask, u32 ts_config)
-{
- struct mci *state = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- u32 used_ldpc_bitrate = 0, free_ldpc_bitrate;
- u32 used_demods = 0;
- struct mci_command cmd;
- u32 input = state->tuner;
- u32 bits_per_symbol = 0;
- int i, stat = 0;
-
- if (p->symbol_rate >= (MCLK / 2))
- flags &= ~1;
- if ((flags & 3) == 0)
- return -EINVAL;
-
- if (flags & 2) {
- u32 tmp = modmask;
-
- bits_per_symbol = 1;
- while (tmp & 1) {
- tmp >>= 1;
- bits_per_symbol++;
- }
- }
-
- mutex_lock(&state->base->tuner_lock);
- if (state->base->iq_mode) {
- stat = -EBUSY;
- goto unlock;
- }
- for (i = 0; i < MCI_DEMOD_MAX; i++) {
- used_ldpc_bitrate += state->base->used_ldpc_bitrate[i];
- if (state->base->demod_in_use[i])
- used_demods++;
- }
- if (used_ldpc_bitrate >= MAX_LDPC_BITRATE ||
- ((ts_config & SX8_TSCONFIG_MODE_MASK) >
- SX8_TSCONFIG_MODE_NORMAL && used_demods > 0)) {
- stat = -EBUSY;
- goto unlock;
- }
- free_ldpc_bitrate = MAX_LDPC_BITRATE - used_ldpc_bitrate;
- if (free_ldpc_bitrate > MAX_DEMOD_LDPC_BITRATE)
- free_ldpc_bitrate = MAX_DEMOD_LDPC_BITRATE;
-
- while (p->symbol_rate * bits_per_symbol > free_ldpc_bitrate)
- bits_per_symbol--;
-
- if (bits_per_symbol < 2) {
- stat = -EBUSY;
- goto unlock;
- }
- i = (p->symbol_rate > (MCLK / 2)) ? 3 : 7;
- while (i >= 0 && state->base->demod_in_use[i])
- i--;
- if (i < 0) {
- stat = -EBUSY;
- goto unlock;
- }
- state->base->demod_in_use[i] = 1;
- state->base->used_ldpc_bitrate[state->nr] = p->symbol_rate
- * bits_per_symbol;
- state->demod = i;
- state->base->assigned_demod[state->nr] = i;
-
- if (!state->base->tuner_use_count[input])
- mci_set_tuner(fe, input, 1);
- state->base->tuner_use_count[input]++;
- state->base->iq_mode = (ts_config > 1);
-unlock:
- mutex_unlock(&state->base->tuner_lock);
- if (stat)
- return stat;
- memset(&cmd, 0, sizeof(cmd));
-
- if (state->base->iq_mode) {
- cmd.command = SX8_CMD_SELECT_IQOUT;
- cmd.demod = state->demod;
- cmd.output = 0;
- mci_cmd(state, &cmd, NULL);
- mci_config(state, ts_config);
- }
- if (p->stream_id != NO_STREAM_ID_FILTER && p->stream_id != 0x80000000)
- flags |= 0x80;
- dev_dbg(state->base->dev, "MCI-%d: tuner=%d demod=%d\n",
- state->nr, state->tuner, state->demod);
- cmd.command = MCI_CMD_SEARCH_DVBS;
- cmd.dvbs2_search.flags = flags;
- cmd.dvbs2_search.s2_modulation_mask =
- modmask & ((1 << (bits_per_symbol - 1)) - 1);
- cmd.dvbs2_search.retry = 2;
- cmd.dvbs2_search.frequency = p->frequency * 1000;
- cmd.dvbs2_search.symbol_rate = p->symbol_rate;
- cmd.dvbs2_search.scrambling_sequence_index =
- p->scrambling_sequence_index;
- cmd.dvbs2_search.input_stream_id =
- (p->stream_id != NO_STREAM_ID_FILTER) ? p->stream_id : 0;
- cmd.tuner = state->tuner;
- cmd.demod = state->demod;
- cmd.output = state->nr;
- if (p->stream_id == 0x80000000)
- cmd.output |= 0x80;
- stat = mci_cmd(state, &cmd, NULL);
- if (stat)
- stop(fe);
- return stat;
-}
-
-static int start_iq(struct dvb_frontend *fe, u32 ts_config)
-{
- struct mci *state = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- u32 used_demods = 0;
- struct mci_command cmd;
- u32 input = state->tuner;
- int i, stat = 0;
-
- mutex_lock(&state->base->tuner_lock);
- if (state->base->iq_mode) {
- stat = -EBUSY;
- goto unlock;
- }
- for (i = 0; i < MCI_DEMOD_MAX; i++)
- if (state->base->demod_in_use[i])
- used_demods++;
- if (used_demods > 0) {
- stat = -EBUSY;
- goto unlock;
- }
- state->demod = 0;
- state->base->assigned_demod[state->nr] = 0;
- if (!state->base->tuner_use_count[input])
- mci_set_tuner(fe, input, 1);
- state->base->tuner_use_count[input]++;
- state->base->iq_mode = (ts_config > 1);
-unlock:
- mutex_unlock(&state->base->tuner_lock);
- if (stat)
- return stat;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.command = SX8_CMD_START_IQ;
- cmd.dvbs2_search.frequency = p->frequency * 1000;
- cmd.dvbs2_search.symbol_rate = p->symbol_rate;
- cmd.tuner = state->tuner;
- cmd.demod = state->demod;
- cmd.output = 7;
- mci_config(state, ts_config);
- stat = mci_cmd(state, &cmd, NULL);
- if (stat)
- stop(fe);
- return stat;
-}
-
-static int set_parameters(struct dvb_frontend *fe)
-{
- int stat = 0;
- struct mci *state = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- u32 ts_config, iq_mode = 0, isi;
-
- if (state->started)
- stop(fe);
-
- isi = p->stream_id;
- if (isi != NO_STREAM_ID_FILTER)
- iq_mode = (isi & 0x30000000) >> 28;
-
- switch (iq_mode) {
- case 1:
- ts_config = (SX8_TSCONFIG_TSHEADER | SX8_TSCONFIG_MODE_IQ);
- break;
- case 2:
- ts_config = (SX8_TSCONFIG_TSHEADER | SX8_TSCONFIG_MODE_IQ);
- break;
- default:
- ts_config = SX8_TSCONFIG_MODE_NORMAL;
- break;
- }
-
- if (iq_mode != 2) {
- u32 flags = 3;
- u32 mask = 3;
-
- if (p->modulation == APSK_16 ||
- p->modulation == APSK_32) {
- flags = 2;
- mask = 15;
- }
- stat = start(fe, flags, mask, ts_config);
- } else {
- stat = start_iq(fe, ts_config);
- }
-
- if (!stat) {
- state->started = 1;
- state->first_time_lock = 1;
- state->signal_info.status = SX8_DEMOD_WAIT_SIGNAL;
- }
-
- return stat;
-}
-
-static int tune(struct dvb_frontend *fe, bool re_tune,
- unsigned int mode_flags,
- unsigned int *delay, enum fe_status *status)
-{
- int r;
-
- if (re_tune) {
- r = set_parameters(fe);
- if (r)
- return r;
- }
- r = read_status(fe, status);
- if (r)
- return r;
-
- if (*status & FE_HAS_LOCK)
- return 0;
- *delay = HZ / 10;
- return 0;
-}
-
-static enum dvbfe_algo get_algo(struct dvb_frontend *fe)
-{
- return DVBFE_ALGO_HW;
-}
-
-static int set_input(struct dvb_frontend *fe, int input)
-{
- struct mci *state = fe->demodulator_priv;
-
- state->tuner = input;
- dev_dbg(state->base->dev, "MCI-%d: input=%d\n", state->nr, input);
- return 0;
-}
-
-static struct dvb_frontend_ops mci_ops = {
- .delsys = { SYS_DVBS, SYS_DVBS2 },
- .info = {
- .name = "Digital Devices MaxSX8 MCI DVB-S/S2/S2X",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 0,
- .frequency_tolerance = 0,
- .symbol_rate_min = 100000,
- .symbol_rate_max = 100000000,
- .caps = FE_CAN_INVERSION_AUTO |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK |
- FE_CAN_2G_MODULATION |
- FE_CAN_MULTISTREAM,
- },
- .get_frontend_algo = get_algo,
- .tune = tune,
- .release = release,
- .read_status = read_status,
-};
-
static struct mci_base *match_base(void *key)
{
struct mci_base *p;
@@ -498,8 +124,7 @@ static int probe(struct mci *state)
}
struct dvb_frontend
-*ddb_mci_attach(struct ddb_input *input,
- int mci_type, int nr,
+*ddb_mci_attach(struct ddb_input *input, struct mci_cfg *cfg, int nr,
int (**fn_set_input)(struct dvb_frontend *fe, int input))
{
struct ddb_port *port = input->port;
@@ -507,9 +132,9 @@ struct dvb_frontend
struct ddb_link *link = &dev->link[port->lnr];
struct mci_base *base;
struct mci *state;
- void *key = mci_type ? (void *)port : (void *)link;
+ void *key = cfg->type ? (void *)port : (void *)link;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
+ state = kzalloc(cfg->state_size, GFP_KERNEL);
if (!state)
return NULL;
@@ -518,7 +143,7 @@ struct dvb_frontend
base->count++;
state->base = base;
} else {
- base = kzalloc(sizeof(*base), GFP_KERNEL);
+ base = kzalloc(cfg->base_size, GFP_KERNEL);
if (!base)
goto fail;
base->key = key;
@@ -535,15 +160,17 @@ struct dvb_frontend
goto fail;
}
list_add(&base->mci_list, &mci_list);
+ if (cfg->base_init)
+ cfg->base_init(base);
}
- state->fe.ops = mci_ops;
+ memcpy(&state->fe.ops, cfg->fe_ops, sizeof(struct dvb_frontend_ops));
state->fe.demodulator_priv = state;
state->nr = nr;
- *fn_set_input = set_input;
-
+ *fn_set_input = cfg->set_input;
state->tuner = nr;
state->demod = nr;
-
+ if (cfg->init)
+ cfg->init(state);
return &state->fe;
fail:
kfree(state);
diff --git a/drivers/media/pci/ddbridge/ddbridge-mci.h b/drivers/media/pci/ddbridge/ddbridge-mci.h
index 209cc2b92dff..24241111c634 100644
--- a/drivers/media/pci/ddbridge/ddbridge-mci.h
+++ b/drivers/media/pci/ddbridge/ddbridge-mci.h
@@ -2,9 +2,9 @@
/*
* ddbridge-mci.h: Digital Devices micro code interface
*
- * Copyright (C) 2017 Digital Devices GmbH
- * Marcus Metzler <mocm@metzlerbros.de>
- * Ralph Metzler <rjkm@metzlerbros.de>
+ * Copyright (C) 2017-2018 Digital Devices GmbH
+ * Marcus Metzler <mocm@metzlerbros.de>
+ * Ralph Metzler <rjkm@metzlerbros.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -42,6 +42,22 @@
#define SX8_TSCONFIG_MODE_NORMAL (0x00000001)
#define SX8_TSCONFIG_MODE_IQ (0x00000003)
+/*
+ * IQMode is only available on MaxSX8 on a single tuner
+ *
+ * IQ_MODE_SAMPLES
+ * sampling rate is 1550/24 MHz (64.583 MHz)
+ * channel agc is frozen, to allow stitching the FFT results together
+ *
+ * IQ_MODE_VTM
+ * sampling rate is the supplied symbolrate
+ * channel agc is active
+ *
+ * in both cases down sampling is done with a RRC Filter (currently fixed to
+ * alpha = 0.05) which causes some (ca 5%) aliasing at the edges from
+ * outside the spectrum
+ */
+
#define SX8_TSCONFIG_TSHEADER (0x00000004)
#define SX8_TSCONFIG_BURST (0x00000008)
@@ -51,55 +67,65 @@
#define SX8_TSCONFIG_BURSTSIZE_8K (0x00000020)
#define SX8_TSCONFIG_BURSTSIZE_16K (0x00000030)
-#define SX8_DEMOD_STOPPED (0)
-#define SX8_DEMOD_IQ_MODE (1)
-#define SX8_DEMOD_WAIT_SIGNAL (2)
-#define SX8_DEMOD_WAIT_MATYPE (3)
-#define SX8_DEMOD_TIMEOUT (14)
-#define SX8_DEMOD_LOCKED (15)
-
-#define MCI_CMD_STOP (0x01)
-#define MCI_CMD_GETSTATUS (0x02)
-#define MCI_CMD_GETSIGNALINFO (0x03)
-#define MCI_CMD_RFPOWER (0x04)
-
-#define MCI_CMD_SEARCH_DVBS (0x10)
+#define SX8_DEMOD_STOPPED (0)
+#define SX8_DEMOD_IQ_MODE (1)
+#define SX8_DEMOD_WAIT_SIGNAL (2)
+#define SX8_DEMOD_WAIT_MATYPE (3)
+#define SX8_DEMOD_TIMEOUT (14)
+#define SX8_DEMOD_LOCKED (15)
-#define MCI_CMD_GET_IQSYMBOL (0x30)
+#define MCI_CMD_STOP (0x01)
+#define MCI_CMD_GETSTATUS (0x02)
+#define MCI_CMD_GETSIGNALINFO (0x03)
+#define MCI_CMD_RFPOWER (0x04)
-#define SX8_CMD_INPUT_ENABLE (0x40)
-#define SX8_CMD_INPUT_DISABLE (0x41)
-#define SX8_CMD_START_IQ (0x42)
-#define SX8_CMD_STOP_IQ (0x43)
-#define SX8_CMD_SELECT_IQOUT (0x44)
-#define SX8_CMD_SELECT_TSOUT (0x45)
+#define MCI_CMD_SEARCH_DVBS (0x10)
-#define SX8_ERROR_UNSUPPORTED (0x80)
+#define MCI_CMD_GET_IQSYMBOL (0x30)
-#define SX8_SUCCESS(status) (status < SX8_ERROR_UNSUPPORTED)
+#define SX8_CMD_INPUT_ENABLE (0x40)
+#define SX8_CMD_INPUT_DISABLE (0x41)
+#define SX8_CMD_START_IQ (0x42)
+#define SX8_CMD_STOP_IQ (0x43)
+#define SX8_CMD_ENABLE_IQOUTPUT (0x44)
+#define SX8_CMD_DISABLE_IQOUTPUT (0x45)
-#define SX8_CMD_DIAG_READ8 (0xE0)
-#define SX8_CMD_DIAG_READ32 (0xE1)
-#define SX8_CMD_DIAG_WRITE8 (0xE2)
-#define SX8_CMD_DIAG_WRITE32 (0xE3)
+#define MCI_STATUS_OK (0x00)
+#define MCI_STATUS_UNSUPPORTED (0x80)
+#define MCI_STATUS_RETRY (0xFD)
+#define MCI_STATUS_NOT_READY (0xFE)
+#define MCI_STATUS_ERROR (0xFF)
-#define SX8_CMD_DIAG_READRF (0xE8)
-#define SX8_CMD_DIAG_WRITERF (0xE9)
+#define MCI_SUCCESS(status) ((status & MCI_STATUS_UNSUPPORTED) == 0)
struct mci_command {
union {
u32 command_word;
struct {
- u8 command;
- u8 tuner;
- u8 demod;
- u8 output;
+ u8 command;
+ u8 tuner;
+ u8 demod;
+ u8 output;
};
};
union {
u32 params[31];
struct {
+ /*
+ * Bit 0: DVB-S Enabled
+ * Bit 1: DVB-S2 Enabled
+ * Bit 7: InputStreamID
+ */
u8 flags;
+ /*
+ * Bit 0: QPSK,
+ * Bit 1: 8PSK/8APSK
+ * Bit 2: 16APSK
+ * Bit 3: 32APSK
+ * Bit 4: 64APSK
+ * Bit 5: 128APSK
+ * Bit 6: 256APSK
+ */
u8 s2_modulation_mask;
u8 rsvd1;
u8 retry;
@@ -108,7 +134,36 @@ struct mci_command {
u8 input_stream_id;
u8 rsvd2[3];
u32 scrambling_sequence_index;
+ u32 frequency_range;
} dvbs2_search;
+
+ struct {
+ u8 tap;
+ u8 rsvd;
+ u16 point;
+ } get_iq_symbol;
+
+ struct {
+ /*
+ * Bit 0: 0=VTM/1=SCAN
+ * Bit 1: Set Gain
+ */
+ u8 flags;
+ u8 roll_off;
+ u8 rsvd1;
+ u8 rsvd2;
+ u32 frequency;
+ u32 symbol_rate; /* Only in VTM mode */
+ u16 gain;
+ } sx8_start_iq;
+
+ struct {
+ /*
+ * Bit 1:0 = STVVGLNA Gain.
+ * 0 = AGC, 1 = 0dB, 2 = Minimum, 3 = Maximum
+ */
+ u8 flags;
+ } sx8_input_enable;
};
};
@@ -116,41 +171,92 @@ struct mci_result {
union {
u32 status_word;
struct {
- u8 status;
- u8 rsvd;
+ u8 status;
+ u8 mode;
u16 time;
};
};
union {
u32 result[27];
struct {
+ /* 1 = DVB-S, 2 = DVB-S2X */
u8 standard;
/* puncture rate for DVB-S */
u8 pls_code;
- /* 7-6: rolloff, 5-2: rsrvd, 1:short, 0:pilots */
+ /* 2-0: rolloff */
u8 roll_off;
u8 rsvd;
+ /* actual frequency in Hz */
u32 frequency;
+ /* actual symbolrate in Hz */
u32 symbol_rate;
+ /* channel power in dBm x 100 */
s16 channel_power;
+ /* band power in dBm x 100 */
s16 band_power;
+ /*
+ * SNR in dB x 100
+ * Note: negative values are valid in DVB-S2
+ */
s16 signal_to_noise;
s16 rsvd2;
+ /*
+ * Counter for packet errors
+ * (set to 0 on start command)
+ */
u32 packet_errors;
+ /* Bit error rate: PreRS in DVB-S, PreBCH in DVB-S2X */
u32 ber_numerator;
u32 ber_denominator;
} dvbs2_signal_info;
+
struct {
- u8 i_symbol;
- u8 q_symbol;
- } dvbs2_signal_iq;
+ s16 i;
+ s16 q;
+ } iq_symbol;
};
u32 version[4];
};
+struct mci_base {
+ struct list_head mci_list;
+ void *key;
+ struct ddb_link *link;
+ struct completion completion;
+ struct device *dev;
+ struct mutex tuner_lock; /* concurrent tuner access lock */
+ struct mutex mci_lock; /* concurrent MCI access lock */
+ int count;
+ int type;
+};
+
+struct mci {
+ struct mci_base *base;
+ struct dvb_frontend fe;
+ int nr;
+ int demod;
+ int tuner;
+};
+
+struct mci_cfg {
+ int type;
+ struct dvb_frontend_ops *fe_ops;
+ u32 base_size;
+ u32 state_size;
+ int (*init)(struct mci *mci);
+ int (*base_init)(struct mci_base *mci_base);
+ int (*set_input)(struct dvb_frontend *fe, int input);
+};
+
+/* defined in ddbridge-sx8.c */
+extern const struct mci_cfg ddb_max_sx8_cfg;
+
+int ddb_mci_cmd(struct mci *state, struct mci_command *command,
+ struct mci_result *result);
+int ddb_mci_config(struct mci *state, u32 config);
+
struct dvb_frontend
-*ddb_mci_attach(struct ddb_input *input,
- int mci_type, int nr,
+*ddb_mci_attach(struct ddb_input *input, struct mci_cfg *cfg, int nr,
int (**fn_set_input)(struct dvb_frontend *fe, int input));
#endif /* _DDBRIDGE_MCI_H_ */
diff --git a/drivers/media/pci/ddbridge/ddbridge-regs.h b/drivers/media/pci/ddbridge/ddbridge-regs.h
index b978b5991940..f9e1cbb99b53 100644
--- a/drivers/media/pci/ddbridge/ddbridge-regs.h
+++ b/drivers/media/pci/ddbridge/ddbridge-regs.h
@@ -34,14 +34,6 @@
#define GPIO_DIRECTION 0x28
/* ------------------------------------------------------------------------- */
-/* MDIO */
-
-#define MDIO_CTRL 0x20
-#define MDIO_ADR 0x24
-#define MDIO_REG 0x28
-#define MDIO_VAL 0x2C
-
-/* ------------------------------------------------------------------------- */
#define BOARD_CONTROL 0x30
diff --git a/drivers/media/pci/ddbridge/ddbridge-sx8.c b/drivers/media/pci/ddbridge/ddbridge-sx8.c
new file mode 100644
index 000000000000..64f05f5ee039
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-sx8.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ddbridge-sx8.c: Digital Devices MAX SX8 driver
+ *
+ * Copyright (C) 2018 Digital Devices GmbH
+ * Marcus Metzler <mocm@metzlerbros.de>
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ddbridge.h"
+#include "ddbridge-io.h"
+#include "ddbridge-mci.h"
+
+static const u32 MCLK = (1550000000 / 12);
+static const u32 MAX_LDPC_BITRATE = (720000000);
+static const u32 MAX_DEMOD_LDPC_BITRATE = (1550000000 / 6);
+
+#define SX8_TUNER_NUM 4
+#define SX8_DEMOD_NUM 8
+#define SX8_DEMOD_NONE 0xff
+
+struct sx8_base {
+ struct mci_base mci_base;
+
+ u8 tuner_use_count[SX8_TUNER_NUM];
+ u32 gain_mode[SX8_TUNER_NUM];
+
+ u32 used_ldpc_bitrate[SX8_DEMOD_NUM];
+ u8 demod_in_use[SX8_DEMOD_NUM];
+ u32 iq_mode;
+ u32 burst_size;
+ u32 direct_mode;
+};
+
+struct sx8 {
+ struct mci mci;
+
+ int first_time_lock;
+ int started;
+ struct mci_result signal_info;
+
+ u32 bb_mode;
+ u32 local_frequency;
+};
+
+static void release(struct dvb_frontend *fe)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+
+ mci_base->count--;
+ if (mci_base->count == 0) {
+ list_del(&mci_base->mci_list);
+ kfree(mci_base);
+ }
+ kfree(state);
+}
+
+static int get_info(struct dvb_frontend *fe)
+{
+ int stat;
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.command = MCI_CMD_GETSIGNALINFO;
+ cmd.demod = state->mci.demod;
+ stat = ddb_mci_cmd(&state->mci, &cmd, &state->signal_info);
+ return stat;
+}
+
+static int get_snr(struct dvb_frontend *fe)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ p->cnr.len = 1;
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ p->cnr.stat[0].svalue =
+ (s64)state->signal_info.dvbs2_signal_info.signal_to_noise
+ * 10;
+ return 0;
+}
+
+static int get_strength(struct dvb_frontend *fe)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ s32 str;
+
+ str = 100000 -
+ (state->signal_info.dvbs2_signal_info.channel_power
+ * 10 + 108750);
+ p->strength.len = 1;
+ p->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ p->strength.stat[0].svalue = str;
+ return 0;
+}
+
+static int read_status(struct dvb_frontend *fe, enum fe_status *status)
+{
+ int stat;
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_command cmd;
+ struct mci_result res;
+
+ cmd.command = MCI_CMD_GETSTATUS;
+ cmd.demod = state->mci.demod;
+ stat = ddb_mci_cmd(&state->mci, &cmd, &res);
+ if (stat)
+ return stat;
+ *status = 0x00;
+ get_info(fe);
+ get_strength(fe);
+ if (res.status == SX8_DEMOD_WAIT_MATYPE)
+ *status = 0x0f;
+ if (res.status == SX8_DEMOD_LOCKED) {
+ *status = 0x1f;
+ get_snr(fe);
+ }
+ return stat;
+}
+
+static int mci_set_tuner(struct dvb_frontend *fe, u32 tuner, u32 on)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+ struct sx8_base *sx8_base = (struct sx8_base *)mci_base;
+ struct mci_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.tuner = state->mci.tuner;
+ cmd.command = on ? SX8_CMD_INPUT_ENABLE : SX8_CMD_INPUT_DISABLE;
+ cmd.sx8_input_enable.flags = sx8_base->gain_mode[state->mci.tuner];
+ return ddb_mci_cmd(&state->mci, &cmd, NULL);
+}
+
+static int stop(struct dvb_frontend *fe)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+ struct sx8_base *sx8_base = (struct sx8_base *)mci_base;
+ struct mci_command cmd;
+ u32 input = state->mci.tuner;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if (state->mci.demod != SX8_DEMOD_NONE) {
+ cmd.command = MCI_CMD_STOP;
+ cmd.demod = state->mci.demod;
+ ddb_mci_cmd(&state->mci, &cmd, NULL);
+ if (sx8_base->iq_mode) {
+ cmd.command = SX8_CMD_DISABLE_IQOUTPUT;
+ cmd.demod = state->mci.demod;
+ cmd.output = 0;
+ ddb_mci_cmd(&state->mci, &cmd, NULL);
+ ddb_mci_config(&state->mci, SX8_TSCONFIG_MODE_NORMAL);
+ }
+ }
+ mutex_lock(&mci_base->tuner_lock);
+ sx8_base->tuner_use_count[input]--;
+ if (!sx8_base->tuner_use_count[input])
+ mci_set_tuner(fe, input, 0);
+ if (state->mci.demod < SX8_DEMOD_NUM) {
+ sx8_base->demod_in_use[state->mci.demod] = 0;
+ state->mci.demod = SX8_DEMOD_NONE;
+ }
+ sx8_base->used_ldpc_bitrate[state->mci.nr] = 0;
+ sx8_base->iq_mode = 0;
+ mutex_unlock(&mci_base->tuner_lock);
+ state->started = 0;
+ return 0;
+}
+
+static int start(struct dvb_frontend *fe, u32 flags, u32 modmask, u32 ts_config)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+ struct sx8_base *sx8_base = (struct sx8_base *)mci_base;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 used_ldpc_bitrate = 0, free_ldpc_bitrate;
+ u32 used_demods = 0;
+ struct mci_command cmd;
+ u32 input = state->mci.tuner;
+ u32 bits_per_symbol = 0;
+ int i = -1, stat = 0;
+
+ if (p->symbol_rate >= (MCLK / 2))
+ flags &= ~1;
+ if ((flags & 3) == 0)
+ return -EINVAL;
+
+ if (flags & 2) {
+ u32 tmp = modmask;
+
+ bits_per_symbol = 1;
+ while (tmp & 1) {
+ tmp >>= 1;
+ bits_per_symbol++;
+ }
+ }
+
+ mutex_lock(&mci_base->tuner_lock);
+ if (sx8_base->iq_mode) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+
+ if (sx8_base->direct_mode) {
+ if (p->symbol_rate >= MCLK / 2) {
+ if (state->mci.nr < 4)
+ i = state->mci.nr;
+ } else {
+ i = state->mci.nr;
+ }
+ } else {
+ for (i = 0; i < SX8_DEMOD_NUM; i++) {
+ used_ldpc_bitrate += sx8_base->used_ldpc_bitrate[i];
+ if (sx8_base->demod_in_use[i])
+ used_demods++;
+ }
+ if (used_ldpc_bitrate >= MAX_LDPC_BITRATE ||
+ ((ts_config & SX8_TSCONFIG_MODE_MASK) >
+ SX8_TSCONFIG_MODE_NORMAL && used_demods > 0)) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+ free_ldpc_bitrate = MAX_LDPC_BITRATE - used_ldpc_bitrate;
+ if (free_ldpc_bitrate > MAX_DEMOD_LDPC_BITRATE)
+ free_ldpc_bitrate = MAX_DEMOD_LDPC_BITRATE;
+
+ while (p->symbol_rate * bits_per_symbol > free_ldpc_bitrate)
+ bits_per_symbol--;
+ if (bits_per_symbol < 2) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+
+ modmask &= ((1 << (bits_per_symbol - 1)) - 1);
+ if (((flags & 0x02) != 0) && modmask == 0) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+
+ i = (p->symbol_rate > (MCLK / 2)) ? 3 : 7;
+ while (i >= 0 && sx8_base->demod_in_use[i])
+ i--;
+ }
+
+ if (i < 0) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+ sx8_base->demod_in_use[i] = 1;
+ sx8_base->used_ldpc_bitrate[state->mci.nr] = p->symbol_rate
+ * bits_per_symbol;
+ state->mci.demod = i;
+
+ if (!sx8_base->tuner_use_count[input])
+ mci_set_tuner(fe, input, 1);
+ sx8_base->tuner_use_count[input]++;
+ sx8_base->iq_mode = (ts_config > 1);
+unlock:
+ mutex_unlock(&mci_base->tuner_lock);
+ if (stat)
+ return stat;
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (sx8_base->iq_mode) {
+ cmd.command = SX8_CMD_ENABLE_IQOUTPUT;
+ cmd.demod = state->mci.demod;
+ cmd.output = 0;
+ ddb_mci_cmd(&state->mci, &cmd, NULL);
+ ddb_mci_config(&state->mci, ts_config);
+ }
+ if (p->stream_id != NO_STREAM_ID_FILTER && p->stream_id != 0x80000000)
+ flags |= 0x80;
+ dev_dbg(mci_base->dev, "MCI-%d: tuner=%d demod=%d\n",
+ state->mci.nr, state->mci.tuner, state->mci.demod);
+ cmd.command = MCI_CMD_SEARCH_DVBS;
+ cmd.dvbs2_search.flags = flags;
+ cmd.dvbs2_search.s2_modulation_mask = modmask;
+ cmd.dvbs2_search.retry = 2;
+ cmd.dvbs2_search.frequency = p->frequency * 1000;
+ cmd.dvbs2_search.symbol_rate = p->symbol_rate;
+ cmd.dvbs2_search.scrambling_sequence_index =
+ p->scrambling_sequence_index | 0x80000000;
+ cmd.dvbs2_search.input_stream_id =
+ (p->stream_id != NO_STREAM_ID_FILTER) ? p->stream_id : 0;
+ cmd.tuner = state->mci.tuner;
+ cmd.demod = state->mci.demod;
+ cmd.output = state->mci.nr;
+ if (p->stream_id == 0x80000000)
+ cmd.output |= 0x80;
+ stat = ddb_mci_cmd(&state->mci, &cmd, NULL);
+ if (stat)
+ stop(fe);
+ return stat;
+}
+
+static int start_iq(struct dvb_frontend *fe, u32 flags, u32 roll_off,
+ u32 ts_config)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+ struct sx8_base *sx8_base = (struct sx8_base *)mci_base;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 used_demods = 0;
+ struct mci_command cmd;
+ u32 input = state->mci.tuner;
+ int i, stat = 0;
+
+ mutex_lock(&mci_base->tuner_lock);
+ if (sx8_base->iq_mode) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+ for (i = 0; i < SX8_DEMOD_NUM; i++)
+ if (sx8_base->demod_in_use[i])
+ used_demods++;
+ if (used_demods > 0) {
+ stat = -EBUSY;
+ goto unlock;
+ }
+ state->mci.demod = 0;
+ if (!sx8_base->tuner_use_count[input])
+ mci_set_tuner(fe, input, 1);
+ sx8_base->tuner_use_count[input]++;
+ sx8_base->iq_mode = (ts_config > 1);
+unlock:
+ mutex_unlock(&mci_base->tuner_lock);
+ if (stat)
+ return stat;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.command = SX8_CMD_START_IQ;
+ cmd.sx8_start_iq.flags = flags;
+ cmd.sx8_start_iq.roll_off = roll_off;
+ cmd.sx8_start_iq.frequency = p->frequency * 1000;
+ cmd.sx8_start_iq.symbol_rate = p->symbol_rate;
+ cmd.tuner = state->mci.tuner;
+ cmd.demod = state->mci.demod;
+ stat = ddb_mci_cmd(&state->mci, &cmd, NULL);
+ if (stat)
+ stop(fe);
+ ddb_mci_config(&state->mci, ts_config);
+ return stat;
+}
+
+static int set_parameters(struct dvb_frontend *fe)
+{
+ int stat = 0;
+ struct sx8 *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 ts_config = SX8_TSCONFIG_MODE_NORMAL, iq_mode = 0, isi;
+
+ if (state->started)
+ stop(fe);
+
+ isi = p->stream_id;
+ if (isi != NO_STREAM_ID_FILTER)
+ iq_mode = (isi & 0x30000000) >> 28;
+
+ if (iq_mode)
+ ts_config = (SX8_TSCONFIG_TSHEADER | SX8_TSCONFIG_MODE_IQ);
+ if (iq_mode < 3) {
+ u32 mask;
+
+ switch (p->modulation) {
+ /* uncomment whenever these modulations hit the DVB API
+ * case APSK_256:
+ * mask = 0x7f;
+ * break;
+ * case APSK_128:
+ * mask = 0x3f;
+ * break;
+ * case APSK_64:
+ * mask = 0x1f;
+ * break;
+ */
+ case APSK_32:
+ mask = 0x0f;
+ break;
+ case APSK_16:
+ mask = 0x07;
+ break;
+ default:
+ mask = 0x03;
+ break;
+ }
+ stat = start(fe, 3, mask, ts_config);
+ } else {
+ u32 flags = (iq_mode == 2) ? 1 : 0;
+
+ stat = start_iq(fe, flags, 4, ts_config);
+ }
+ if (!stat) {
+ state->started = 1;
+ state->first_time_lock = 1;
+ state->signal_info.status = SX8_DEMOD_WAIT_SIGNAL;
+ }
+
+ return stat;
+}
+
+static int tune(struct dvb_frontend *fe, bool re_tune,
+ unsigned int mode_flags,
+ unsigned int *delay, enum fe_status *status)
+{
+ int r;
+
+ if (re_tune) {
+ r = set_parameters(fe);
+ if (r)
+ return r;
+ }
+ r = read_status(fe, status);
+ if (r)
+ return r;
+
+ if (*status & FE_HAS_LOCK)
+ return 0;
+ *delay = HZ / 10;
+ return 0;
+}
+
+static enum dvbfe_algo get_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
+static int set_input(struct dvb_frontend *fe, int input)
+{
+ struct sx8 *state = fe->demodulator_priv;
+ struct mci_base *mci_base = state->mci.base;
+
+ if (input >= SX8_TUNER_NUM)
+ return -EINVAL;
+
+ state->mci.tuner = input;
+ dev_dbg(mci_base->dev, "MCI-%d: input=%d\n", state->mci.nr, input);
+ return 0;
+}
+
+static struct dvb_frontend_ops sx8_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
+ .info = {
+ .name = "Digital Devices MaxSX8 MCI DVB-S/S2/S2X",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .symbol_rate_min = 100000,
+ .symbol_rate_max = 100000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_2G_MODULATION |
+ FE_CAN_MULTISTREAM,
+ },
+ .get_frontend_algo = get_algo,
+ .tune = tune,
+ .release = release,
+ .read_status = read_status,
+};
+
+static int init(struct mci *mci)
+{
+ struct sx8 *state = (struct sx8 *)mci;
+
+ state->mci.demod = SX8_DEMOD_NONE;
+ return 0;
+}
+
+const struct mci_cfg ddb_max_sx8_cfg = {
+ .type = 0,
+ .fe_ops = &sx8_ops,
+ .base_size = sizeof(struct sx8_base),
+ .state_size = sizeof(struct sx8),
+ .init = init,
+ .set_input = set_input,
+};
diff --git a/drivers/media/pci/ddbridge/ddbridge.h b/drivers/media/pci/ddbridge/ddbridge.h
index a66b1125cc74..8a354dfb6c22 100644
--- a/drivers/media/pci/ddbridge/ddbridge.h
+++ b/drivers/media/pci/ddbridge/ddbridge.h
@@ -120,21 +120,23 @@ struct ddb_info {
#define DDB_OCTOPUS_MCI 9
char *name;
u32 i2c_mask;
+ u32 board_control;
+ u32 board_control_2;
+
u8 port_num;
u8 led_num;
u8 fan_num;
u8 temp_num;
u8 temp_bus;
- u32 board_control;
- u32 board_control_2;
- u8 mdio_num;
u8 con_clock; /* use a continuous clock */
u8 ts_quirks;
#define TS_QUIRK_SERIAL 1
#define TS_QUIRK_REVERSED 2
#define TS_QUIRK_ALT_OSC 8
+ u8 mci_ports;
+ u8 mci_type;
+
u32 tempmon_irq;
- u8 mci;
const struct ddb_regmap *regmap;
};
@@ -255,7 +257,6 @@ struct ddb_port {
#define DDB_CI_EXTERNAL_XO2_B 13
#define DDB_TUNER_DVBS_STV0910_PR 14
#define DDB_TUNER_DVBC2T2I_SONY_P 15
-#define DDB_TUNER_MCI 16
#define DDB_TUNER_XO2 32
#define DDB_TUNER_DVBS_STV0910 (DDB_TUNER_XO2 + 0)
@@ -265,6 +266,9 @@ struct ddb_port {
#define DDB_TUNER_ATSC_ST (DDB_TUNER_XO2 + 4)
#define DDB_TUNER_DVBC2T2I_SONY (DDB_TUNER_XO2 + 5)
+#define DDB_TUNER_MCI 48
+#define DDB_TUNER_MCI_SX8 (DDB_TUNER_MCI + 0)
+
struct ddb_input *input[2];
struct ddb_output *output;
struct dvb_ca_en50221 *en;
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index c9db108751a7..1ddb0576fb7b 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -986,6 +986,9 @@ static int dm1105_probe(struct pci_dev *pdev,
int ret = -ENOMEM;
int i;
+ if (dm1105_devcount >= ARRAY_SIZE(card))
+ return -ENODEV;
+
dev = kzalloc(sizeof(struct dm1105_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 6b2ffdc96961..dd727098daf4 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -999,7 +999,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
int vbi_buf_size;
struct ivtv *itv;
- itv = kzalloc(sizeof(struct ivtv), GFP_ATOMIC);
+ itv = kzalloc(sizeof(struct ivtv), GFP_KERNEL);
if (itv == NULL)
return -ENOMEM;
itv->pdev = pdev;
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index 522cd111e399..e9ce54dd5e01 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -293,6 +293,7 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
.platform_data = &pdata,
};
+ memset(&pdata, 0, sizeof(pdata));
pdata.pvr150_workaround = itv->pvr150_workaround;
sd = v4l2_i2c_new_subdev_board(&itv->v4l2_dev, adap,
&cx25840_info, NULL);
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index b19058e36853..5ddaa8ed11a5 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -1178,7 +1178,7 @@ static int ivtvfb_init_card(struct ivtv *itv)
}
itv->osd_info = kzalloc(sizeof(struct osd_info),
- GFP_ATOMIC|__GFP_NOWARN);
+ GFP_KERNEL|__GFP_NOWARN);
if (itv->osd_info == NULL) {
IVTVFB_ERR("Failed to allocate memory for osd_info\n");
return -ENOMEM;
diff --git a/drivers/media/pci/mantis/mantis_vp3030.c b/drivers/media/pci/mantis/mantis_vp3030.c
index 14f6e153000c..9797c9fd8259 100644
--- a/drivers/media/pci/mantis/mantis_vp3030.c
+++ b/drivers/media/pci/mantis/mantis_vp3030.c
@@ -42,8 +42,8 @@ static struct zl10353_config mantis_vp3030_config = {
static struct tda665x_config env57h12d5_config = {
.name = "ENV57H12D5 (ET-50DT)",
.addr = 0x60,
- .frequency_min = 47000000,
- .frequency_max = 862000000,
+ .frequency_min = 47 * MHz,
+ .frequency_max = 862 * MHz,
.frequency_offst = 3616667,
.ref_multiplier = 6, /* 1/6 MHz */
.ref_divider = 100000, /* 1/6 MHz */
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
index b13e319d24b7..5f1613aec93c 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -214,11 +214,6 @@ static int netup_i2c_xfer(struct i2c_adapter *adap,
struct netup_i2c *i2c = i2c_get_adapdata(adap);
u16 reg;
- if (num <= 0) {
- dev_dbg(i2c->adap.dev.parent,
- "%s(): num == %d\n", __func__, num);
- return -EINVAL;
- }
spin_lock_irqsave(&i2c->lock, flags);
if (i2c->state != STATE_DONE) {
dev_dbg(i2c->adap.dev.parent,
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index fda969a85684..7f878fc41b7e 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1443,9 +1443,7 @@ static struct pci_driver pt1_driver = {
.probe = pt1_probe,
.remove = pt1_remove,
.id_table = pt1_id_table,
-#ifdef CONFIG_PM_SLEEP
.driver.pm = &pt1_pm_ops,
-#endif
};
module_pci_driver(pt1_driver);
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index 64ab91c24c18..221de91a8bae 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -629,12 +629,12 @@ static __poll_t fops_poll(struct file *file, poll_table *wait)
port->last_poll_msecs_diff);
if (!video_is_registered(port->v4l_device))
- return -EIO;
+ return EPOLLERR;
if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
if (atomic_inc_return(&port->v4l_reader_count) == 1) {
if (saa7164_vbi_initialize(port) < 0)
- return -EINVAL;
+ return EPOLLERR;
saa7164_vbi_start_streaming(port);
msleep(200);
}
@@ -644,7 +644,7 @@ static __poll_t fops_poll(struct file *file, poll_table *wait)
if ((file->f_flags & O_NONBLOCK) == 0) {
if (wait_event_interruptible(port->wait_read,
saa7164_vbi_next_buf(port))) {
- return -ERESTARTSYS;
+ return EPOLLERR;
}
}
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 069c4a853346..1858efedaf1a 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -116,6 +116,7 @@ static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2)
* @sequence: sequence number of acquired buffer
* @active: current active buffer
* @lock: used in videobuf2 callback
+ * @v4l_lock: serialize its video4linux ioctls
* @tcount: Number of top frames
* @bcount: Number of bottom frames
* @overflow: Number of FIFO overflows
@@ -145,6 +146,7 @@ struct sta2x11_vip {
unsigned int sequence;
struct vip_buffer *active; /* current active buffer */
spinlock_t lock; /* Used in videobuf2 callback */
+ struct mutex v4l_lock;
/* Interrupt counters */
int tcount, bcount;
@@ -385,6 +387,8 @@ static const struct vb2_ops vip_video_qops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
@@ -870,6 +874,7 @@ static int sta2x11_vip_init_buffer(struct sta2x11_vip *vip)
vip->vb_vidq.mem_ops = &vb2_dma_contig_memops;
vip->vb_vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vip->vb_vidq.dev = &vip->pdev->dev;
+ vip->vb_vidq.lock = &vip->v4l_lock;
err = vb2_queue_init(&vip->vb_vidq);
if (err)
return err;
@@ -1034,6 +1039,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
vip->std = V4L2_STD_PAL;
vip->format = formats_50[0];
vip->config = config;
+ mutex_init(&vip->v4l_lock);
ret = sta2x11_vip_init_controls(vip);
if (ret)
@@ -1080,6 +1086,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
vip->video_dev = video_dev_template;
vip->video_dev.v4l2_dev = &vip->v4l2_dev;
vip->video_dev.queue = &vip->vb_vidq;
+ vip->video_dev.lock = &vip->v4l_lock;
video_set_drvdata(&vip->video_dev, vip);
ret = video_register_device(&vip->video_dev, VFL_TYPE_GRABBER, -1);
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index 0ea8dd44026c..3a06c000f97b 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -1190,6 +1190,14 @@ int tw686x_video_init(struct tw686x_dev *dev)
return err;
}
+ /* Initialize vc->dev and vc->ch for the error path */
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc = &dev->video_channels[ch];
+
+ vc->dev = dev;
+ vc->ch = ch;
+ }
+
for (ch = 0; ch < max_channels(dev); ch++) {
struct tw686x_video_channel *vc = &dev->video_channels[ch];
struct video_device *vdev;
@@ -1198,9 +1206,6 @@ int tw686x_video_init(struct tw686x_dev *dev)
spin_lock_init(&vc->qlock);
INIT_LIST_HEAD(&vc->vidq_queued);
- vc->dev = dev;
- vc->ch = ch;
-
/* default settings */
err = tw686x_set_standard(vc, V4L2_STD_NTSC);
if (err)
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 2728376b04b5..b25c8d3c1c31 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -90,7 +90,7 @@ config VIDEO_PXA27x
This is a v4l2 driver for the PXA27x Quick Capture Interface
config VIDEO_QCOM_CAMSS
- tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
+ tristate "Qualcomm V4L2 Camera Subsystem driver"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
select VIDEOBUF2_DMA_SG
@@ -384,8 +384,8 @@ config VIDEO_SH_VEU
config VIDEO_RENESAS_FDP1
tristate "Renesas Fine Display Processor"
depends on VIDEO_DEV && VIDEO_V4L2
- depends on ARCH_SHMOBILE || COMPILE_TEST
- depends on (!ARCH_RENESAS && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
---help---
@@ -514,6 +514,9 @@ config VIDEO_VIM2M
---help---
This is a virtual test device for the memory-to-memory driver
framework.
+
+source "drivers/media/platform/vicodec/Kconfig"
+
endif #V4L_TEST_DRIVERS
menuconfig DVB_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 04bc1502a30e..08640ba87fc2 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
obj-$(CONFIG_VIDEO_VIMC) += vimc/
obj-$(CONFIG_VIDEO_VIVID) += vivid/
obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
+obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
@@ -88,7 +89,7 @@ obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/
-obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss-8x16/
+obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss/
obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index e5be21a31640..e8db4df1e7c4 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -1106,23 +1106,20 @@ static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
struct device_node *ep = NULL;
struct device_node *remote;
- while (1) {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- return -EINVAL;
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ return -EINVAL;
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- of_node_put(ep);
- return -EINVAL;
- }
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- /* Remote node to connect */
- isi->entity.node = remote;
- isi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- isi->entity.asd.match.fwnode = of_fwnode_handle(remote);
- return 0;
- }
+ /* Remote node to connect */
+ isi->entity.node = remote;
+ isi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ isi->entity.asd.match.fwnode = of_fwnode_handle(remote);
+ return 0;
}
static int isi_graph_init(struct atmel_isi *isi)
diff --git a/drivers/media/platform/cadence/Kconfig b/drivers/media/platform/cadence/Kconfig
index 3bf0f2454384..cf6124da3c54 100644
--- a/drivers/media/platform/cadence/Kconfig
+++ b/drivers/media/platform/cadence/Kconfig
@@ -11,6 +11,7 @@ if VIDEO_CADENCE
config VIDEO_CADENCE_CSI2RX
tristate "Cadence MIPI-CSI2 RX Controller"
+ depends on VIDEO_V4L2
depends on MEDIA_CONTROLLER
depends on VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
@@ -22,6 +23,7 @@ config VIDEO_CADENCE_CSI2RX
config VIDEO_CADENCE_CSI2TX
tristate "Cadence MIPI-CSI2 TX Controller"
+ depends on VIDEO_V4L2
depends on MEDIA_CONTROLLER
depends on VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index a0f02916006b..43e43c7b3e98 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -13,6 +13,7 @@
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index dfa1d88d955b..40d0de690ff4 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
index 69f8242209c2..d2861749d640 100644
--- a/drivers/media/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -23,6 +23,11 @@ struct cec_gpio {
int hpd_irq;
bool hpd_is_high;
ktime_t hpd_ts;
+
+ struct gpio_desc *v5_gpio;
+ int v5_irq;
+ bool v5_is_high;
+ ktime_t v5_ts;
};
static bool cec_gpio_read(struct cec_adapter *adap)
@@ -65,6 +70,26 @@ static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv)
return IRQ_HANDLED;
}
+static irqreturn_t cec_5v_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+ bool is_high = gpiod_get_value(cec->v5_gpio);
+
+ if (is_high == cec->v5_is_high)
+ return IRQ_HANDLED;
+ cec->v5_ts = ktime_get();
+ cec->v5_is_high = is_high;
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t cec_5v_gpio_irq_handler_thread(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_queue_pin_5v_event(cec->adap, cec->v5_is_high, cec->v5_ts);
+ return IRQ_HANDLED;
+}
+
static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
@@ -119,6 +144,9 @@ static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file)
if (cec->hpd_gpio)
seq_printf(file, "hpd: %s\n",
cec->hpd_is_high ? "high" : "low");
+ if (cec->v5_gpio)
+ seq_printf(file, "5V: %s\n",
+ cec->v5_is_high ? "high" : "low");
}
static int cec_gpio_read_hpd(struct cec_adapter *adap)
@@ -130,6 +158,15 @@ static int cec_gpio_read_hpd(struct cec_adapter *adap)
return gpiod_get_value(cec->hpd_gpio);
}
+static int cec_gpio_read_5v(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->v5_gpio)
+ return -ENOTTY;
+ return gpiod_get_value(cec->v5_gpio);
+}
+
static void cec_gpio_free(struct cec_adapter *adap)
{
cec_gpio_disable_irq(adap);
@@ -144,6 +181,7 @@ static const struct cec_pin_ops cec_gpio_pin_ops = {
.status = cec_gpio_status,
.free = cec_gpio_free,
.read_hpd = cec_gpio_read_hpd,
+ .read_5v = cec_gpio_read_5v,
};
static int cec_gpio_probe(struct platform_device *pdev)
@@ -167,6 +205,10 @@ static int cec_gpio_probe(struct platform_device *pdev)
if (IS_ERR(cec->hpd_gpio))
return PTR_ERR(cec->hpd_gpio);
+ cec->v5_gpio = devm_gpiod_get_optional(dev, "v5", GPIOD_IN);
+ if (IS_ERR(cec->v5_gpio))
+ return PTR_ERR(cec->v5_gpio);
+
cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
@@ -185,6 +227,18 @@ static int cec_gpio_probe(struct platform_device *pdev)
return ret;
}
+ if (cec->v5_gpio) {
+ cec->v5_irq = gpiod_to_irq(cec->v5_gpio);
+ ret = devm_request_threaded_irq(dev, cec->v5_irq,
+ cec_5v_gpio_irq_handler,
+ cec_5v_gpio_irq_handler_thread,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "v5-gpio", cec);
+ if (ret)
+ return ret;
+ }
+
ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret) {
cec_delete_adapter(cec->adap);
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 68ed2a564ad1..d26c2d85a009 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -261,11 +261,12 @@ void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
/*
- * Only queue a single JPEG into the bitstream buffer, except
- * to increase payload over 512 bytes or if in hold state.
+ * Only queue two JPEGs into the bitstream buffer to keep
+ * latency low. We need at least one complete buffer and the
+ * header of another buffer (for prescan) in the bitstream.
*/
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
- (coda_get_bitstream_payload(ctx) >= 512) && !ctx->hold)
+ ctx->num_metas > 1)
break;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
@@ -389,32 +390,29 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
struct coda_q_data *q_data, u32 fourcc)
{
struct coda_dev *dev = ctx->dev;
- int width, height;
- int ysize;
+ unsigned int ysize, ycbcr_size;
int ret;
int i;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 ||
ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 ||
- ctx->codec->dst_fourcc == V4L2_PIX_FMT_MPEG4) {
- width = round_up(q_data->width, 16);
- height = round_up(q_data->height, 16);
- } else {
- width = round_up(q_data->width, 8);
- height = q_data->height;
- }
- ysize = width * height;
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_MPEG4)
+ ysize = round_up(q_data->rect.width, 16) *
+ round_up(q_data->rect.height, 16);
+ else
+ ysize = round_up(q_data->rect.width, 8) * q_data->rect.height;
+
+ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
+ ycbcr_size = round_up(ysize, 4096) + ysize / 2;
+ else
+ ycbcr_size = ysize + ysize / 2;
/* Allocate frame buffers */
for (i = 0; i < ctx->num_internal_frames; i++) {
- size_t size;
+ size_t size = ycbcr_size;
char *name;
- if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
- size = round_up(ysize, 4096) + ysize / 2;
- else
- size = ysize + ysize / 2;
/* Add space for mvcol buffers */
if (dev->devtype->product != CODA_DX6 &&
(ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
@@ -499,8 +497,8 @@ static int coda_alloc_context_buffers(struct coda_ctx *ctx,
if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) {
/* worst case slice size */
- size = (DIV_ROUND_UP(q_data->width, 16) *
- DIV_ROUND_UP(q_data->height, 16)) * 3200 / 8 + 512;
+ size = (DIV_ROUND_UP(q_data->rect.width, 16) *
+ DIV_ROUND_UP(q_data->rect.height, 16)) * 3200 / 8 + 512;
ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size,
"slicebuf");
if (ret < 0)
@@ -538,6 +536,8 @@ static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
{
struct vb2_buffer *vb = &buf->vb2_buf;
struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_src;
+ struct v4l2_rect *r;
size_t bufsize;
int ret;
int i;
@@ -551,6 +551,23 @@ static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
if (dev->devtype->product == CODA_960)
bufsize /= 1024;
coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
+ if (dev->devtype->product == CODA_960 &&
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 &&
+ header_code == CODA_HEADER_H264_SPS) {
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ r = &q_data_src->rect;
+
+ if (r->width % 16 || r->height % 16) {
+ u32 crop_right = round_up(r->width, 16) - r->width;
+ u32 crop_bottom = round_up(r->height, 16) - r->height;
+
+ coda_write(dev, crop_right,
+ CODA9_CMD_ENC_HEADER_FRAME_CROP_H);
+ coda_write(dev, crop_bottom,
+ CODA9_CMD_ENC_HEADER_FRAME_CROP_V);
+ header_code |= CODA9_HEADER_FRAME_CROP;
+ }
+ }
coda_write(dev, header_code, CODA_CMD_ENC_HEADER_CODE);
ret = coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER);
if (ret < 0) {
@@ -632,7 +649,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
struct coda_q_data *q_data_src;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- mb_width = DIV_ROUND_UP(q_data_src->width, 16);
+ mb_width = DIV_ROUND_UP(q_data_src->rect.width, 16);
w128 = mb_width * 128;
w64 = mb_width * 64;
@@ -721,6 +738,7 @@ static u32 coda_supported_firmwares[] = {
CODA_FIRMWARE_VERNUM(CODA_HX4, 1, 4, 50),
CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 5),
+ CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 9),
CODA_FIRMWARE_VERNUM(CODA_960, 2, 3, 10),
CODA_FIRMWARE_VERNUM(CODA_960, 3, 1, 1),
};
@@ -928,25 +946,25 @@ static int coda_start_encoding(struct coda_ctx *ctx)
value = 0;
switch (dev->devtype->product) {
case CODA_DX6:
- value = (q_data_src->width & CODADX6_PICWIDTH_MASK)
+ value = (q_data_src->rect.width & CODADX6_PICWIDTH_MASK)
<< CODADX6_PICWIDTH_OFFSET;
- value |= (q_data_src->height & CODADX6_PICHEIGHT_MASK)
+ value |= (q_data_src->rect.height & CODADX6_PICHEIGHT_MASK)
<< CODA_PICHEIGHT_OFFSET;
break;
case CODA_HX4:
case CODA_7541:
if (dst_fourcc == V4L2_PIX_FMT_H264) {
- value = (round_up(q_data_src->width, 16) &
+ value = (round_up(q_data_src->rect.width, 16) &
CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
- value |= (round_up(q_data_src->height, 16) &
+ value |= (round_up(q_data_src->rect.height, 16) &
CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
break;
}
/* fallthrough */
case CODA_960:
- value = (q_data_src->width & CODA7_PICWIDTH_MASK)
+ value = (q_data_src->rect.width & CODA7_PICWIDTH_MASK)
<< CODA7_PICWIDTH_OFFSET;
- value |= (q_data_src->height & CODA7_PICHEIGHT_MASK)
+ value |= (q_data_src->rect.height & CODA7_PICHEIGHT_MASK)
<< CODA_PICHEIGHT_OFFSET;
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
@@ -1198,6 +1216,27 @@ static int coda_start_encoding(struct coda_ctx *ctx)
goto out;
/*
+ * If visible width or height are not aligned to macroblock
+ * size, the crop_right and crop_bottom SPS fields must be set
+ * to the difference between visible and coded size. This is
+ * only supported by CODA960 firmware. All others do not allow
+ * writing frame cropping parameters, so we have to manually
+ * fix up the SPS RBSP (Sequence Parameter Set Raw Byte
+ * Sequence Payload) ourselves.
+ */
+ if (ctx->dev->devtype->product != CODA_960 &&
+ ((q_data_src->rect.width % 16) ||
+ (q_data_src->rect.height % 16))) {
+ ret = coda_h264_sps_fixup(ctx, q_data_src->rect.width,
+ q_data_src->rect.height,
+ &ctx->vpu_header[0][0],
+ &ctx->vpu_header_size[0],
+ sizeof(ctx->vpu_header[0]));
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
* Get PPS in the first frame and copy it to an
* intermediate buffer.
*/
@@ -1362,7 +1401,8 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
if (dev->devtype->product == CODA_960) {
coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX);
- coda_write(dev, q_data_src->width, CODA9_CMD_ENC_PIC_SRC_STRIDE);
+ coda_write(dev, q_data_src->bytesperline,
+ CODA9_CMD_ENC_PIC_SRC_STRIDE);
coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC);
reg = CODA9_CMD_ENC_PIC_SRC_ADDR_Y;
@@ -1582,10 +1622,8 @@ static int coda_decoder_reqbufs(struct coda_ctx *ctx,
static bool coda_reorder_enable(struct coda_ctx *ctx)
{
- const char * const *profile_names;
- const char * const *level_names;
struct coda_dev *dev = ctx->dev;
- int profile, level;
+ int profile;
if (dev->devtype->product != CODA_HX4 &&
dev->devtype->product != CODA_7541 &&
@@ -1599,24 +1637,9 @@ static bool coda_reorder_enable(struct coda_ctx *ctx)
return true;
profile = coda_h264_profile(ctx->params.h264_profile_idc);
- if (profile < 0) {
- v4l2_warn(&dev->v4l2_dev, "Invalid H264 Profile: %d\n",
- ctx->params.h264_profile_idc);
- return false;
- }
-
- level = coda_h264_level(ctx->params.h264_level_idc);
- if (level < 0) {
- v4l2_warn(&dev->v4l2_dev, "Invalid H264 Level: %d\n",
- ctx->params.h264_level_idc);
- return false;
- }
-
- profile_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
- level_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
-
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "H264 Profile/Level: %s L%s\n",
- profile_names[profile], level_names[level]);
+ if (profile < 0)
+ v4l2_warn(&dev->v4l2_dev, "Unknown H264 Profile: %u\n",
+ ctx->params.h264_profile_idc);
/* Baseline profile does not support reordering */
return profile > V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
@@ -1694,6 +1717,8 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
coda_write(dev, 512, CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE);
}
}
+ if (src_fourcc == V4L2_PIX_FMT_JPEG)
+ coda_write(dev, 0, CODA_CMD_DEC_SEQ_JPG_THUMB_EN);
if (dev->devtype->product != CODA_960)
coda_write(dev, 0, CODA_CMD_DEC_SEQ_SRC_SIZE);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index c7631e117dd3..726b3b93a486 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -569,8 +569,6 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
f->fmt.pix.height * 2;
break;
case V4L2_PIX_FMT_JPEG:
- f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
- /* fallthrough */
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_MPEG4:
case V4L2_PIX_FMT_MPEG2:
@@ -935,6 +933,40 @@ static int coda_g_selection(struct file *file, void *fh,
return 0;
}
+static int coda_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_q_data *q_data;
+
+ if (ctx->inst_type == CODA_INST_ENCODER &&
+ s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->target == V4L2_SEL_TGT_CROP) {
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = clamp(s->r.width, 2U, q_data->width);
+ s->r.height = clamp(s->r.height, 2U, q_data->height);
+
+ if (s->flags & V4L2_SEL_FLAG_LE) {
+ s->r.width = round_up(s->r.width, 2);
+ s->r.height = round_up(s->r.height, 2);
+ } else {
+ s->r.width = round_down(s->r.width, 2);
+ s->r.height = round_down(s->r.height, 2);
+ }
+
+ q_data->rect = s->r;
+
+ return 0;
+ }
+
+ return coda_g_selection(file, fh, s);
+}
+
static int coda_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec)
{
@@ -1148,6 +1180,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = coda_g_selection,
+ .vidioc_s_selection = coda_s_selection,
.vidioc_try_encoder_cmd = coda_try_encoder_cmd,
.vidioc_encoder_cmd = coda_encoder_cmd,
@@ -1297,28 +1330,10 @@ static void coda_job_abort(void *priv)
"Aborting task\n");
}
-static void coda_lock(void *m2m_priv)
-{
- struct coda_ctx *ctx = m2m_priv;
- struct coda_dev *pcdev = ctx->dev;
-
- mutex_lock(&pcdev->dev_mutex);
-}
-
-static void coda_unlock(void *m2m_priv)
-{
- struct coda_ctx *ctx = m2m_priv;
- struct coda_dev *pcdev = ctx->dev;
-
- mutex_unlock(&pcdev->dev_mutex);
-}
-
static const struct v4l2_m2m_ops coda_m2m_ops = {
.device_run = coda_device_run,
.job_ready = coda_job_ready,
.job_abort = coda_job_abort,
- .lock = coda_lock,
- .unlock = coda_unlock,
};
static void set_default_params(struct coda_ctx *ctx)
@@ -1413,6 +1428,72 @@ static int coda_buf_prepare(struct vb2_buffer *vb)
return 0;
}
+static void coda_update_menu_ctrl(struct v4l2_ctrl *ctrl, int value)
+{
+ if (!ctrl)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+
+ /*
+ * Extend the control range if the parsed stream contains a known but
+ * unsupported value or level.
+ */
+ if (value > ctrl->maximum) {
+ __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, value,
+ ctrl->menu_skip_mask & ~(1 << value),
+ ctrl->default_value);
+ } else if (value < ctrl->minimum) {
+ __v4l2_ctrl_modify_range(ctrl, value, ctrl->maximum,
+ ctrl->menu_skip_mask & ~(1 << value),
+ ctrl->default_value);
+ }
+
+ __v4l2_ctrl_s_ctrl(ctrl, value);
+
+ v4l2_ctrl_unlock(ctrl);
+}
+
+static void coda_update_h264_profile_ctrl(struct coda_ctx *ctx)
+{
+ const char * const *profile_names;
+ int profile;
+
+ profile = coda_h264_profile(ctx->params.h264_profile_idc);
+ if (profile < 0) {
+ v4l2_warn(&ctx->dev->v4l2_dev, "Invalid H264 Profile: %u\n",
+ ctx->params.h264_profile_idc);
+ return;
+ }
+
+ coda_update_menu_ctrl(ctx->h264_profile_ctrl, profile);
+
+ profile_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Parsed H264 Profile: %s\n",
+ profile_names[profile]);
+}
+
+static void coda_update_h264_level_ctrl(struct coda_ctx *ctx)
+{
+ const char * const *level_names;
+ int level;
+
+ level = coda_h264_level(ctx->params.h264_level_idc);
+ if (level < 0) {
+ v4l2_warn(&ctx->dev->v4l2_dev, "Invalid H264 Level: %u\n",
+ ctx->params.h264_level_idc);
+ return;
+ }
+
+ coda_update_menu_ctrl(ctx->h264_level_ctrl, level);
+
+ level_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Parsed H264 Level: %s\n",
+ level_names[level]);
+}
+
static void coda_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -1441,8 +1522,11 @@ static void coda_buf_queue(struct vb2_buffer *vb)
* whether to enable reordering during sequence
* initialization.
*/
- if (!ctx->params.h264_profile_idc)
+ if (!ctx->params.h264_profile_idc) {
coda_sps_parse_profile(ctx, vb);
+ coda_update_h264_profile_ctrl(ctx);
+ coda_update_h264_level_ctrl(ctx);
+ }
}
mutex_lock(&ctx->bitstream_mutex);
@@ -1538,12 +1622,12 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
goto out;
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- if ((q_data_src->width != q_data_dst->width &&
- round_up(q_data_src->width, 16) != q_data_dst->width) ||
- (q_data_src->height != q_data_dst->height &&
- round_up(q_data_src->height, 16) != q_data_dst->height)) {
+ if ((q_data_src->rect.width != q_data_dst->width &&
+ round_up(q_data_src->rect.width, 16) != q_data_dst->width) ||
+ (q_data_src->rect.height != q_data_dst->height &&
+ round_up(q_data_src->rect.height, 16) != q_data_dst->height)) {
v4l2_err(v4l2_dev, "can't convert %dx%d to %dx%d\n",
- q_data_src->width, q_data_src->height,
+ q_data_src->rect.width, q_data_src->rect.height,
q_data_dst->width, q_data_dst->height);
ret = -EINVAL;
goto err;
@@ -1652,6 +1736,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
ctx->bitstream.vaddr, ctx->bitstream.size);
ctx->runcounter = 0;
ctx->aborting = 0;
+ ctx->hold = false;
}
if (!ctx->streamon_out && !ctx->streamon_cap)
@@ -1880,6 +1965,47 @@ static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx)
V4L2_CID_JPEG_RESTART_INTERVAL, 0, 100, 1, 0);
}
+static void coda_decode_ctrls(struct coda_ctx *ctx)
+{
+ u64 mask;
+ u8 max;
+
+ ctx->h264_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+ if (ctx->h264_profile_ctrl)
+ ctx->h264_profile_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ if (ctx->dev->devtype->product == CODA_HX4 ||
+ ctx->dev->devtype->product == CODA_7541) {
+ max = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ mask = ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0));
+ } else if (ctx->dev->devtype->product == CODA_960) {
+ max = V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ mask = ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1));
+ } else {
+ return;
+ }
+ ctx->h264_level_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, max, mask,
+ max);
+ if (ctx->h264_level_ctrl)
+ ctx->h264_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+}
+
static int coda_ctrls_setup(struct coda_ctx *ctx)
{
v4l2_ctrl_handler_init(&ctx->ctrls, 2);
@@ -1893,6 +2019,9 @@ static int coda_ctrls_setup(struct coda_ctx *ctx)
coda_jpeg_encode_ctrls(ctx);
else
coda_encode_ctrls(ctx);
+ } else {
+ if (ctx->cvd->src_formats[0] == V4L2_PIX_FMT_H264)
+ coda_decode_ctrls(ctx);
}
if (ctx->ctrls.error) {
@@ -2092,9 +2221,9 @@ static int coda_open(struct file *file)
INIT_LIST_HEAD(&ctx->buffer_meta_list);
spin_lock_init(&ctx->buffer_meta_lock);
- coda_lock(ctx);
+ mutex_lock(&dev->dev_mutex);
list_add(&ctx->list, &dev->instances);
- coda_unlock(ctx);
+ mutex_unlock(&dev->dev_mutex);
v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n",
ctx->idx, ctx);
@@ -2142,9 +2271,9 @@ static int coda_release(struct file *file)
flush_work(&ctx->seq_end_work);
}
- coda_lock(ctx);
+ mutex_lock(&dev->dev_mutex);
list_del(&ctx->list);
- coda_unlock(ctx);
+ mutex_unlock(&dev->dev_mutex);
if (ctx->dev->devtype->product == CODA_DX6)
coda_free_aux_buf(dev, &ctx->workbuf);
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/coda/coda-h264.c
index 0e27412e01f5..635356a839cf 100644
--- a/drivers/media/platform/coda/coda-h264.c
+++ b/drivers/media/platform/coda/coda-h264.c
@@ -108,6 +108,325 @@ int coda_h264_level(int level_idc)
case 32: return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
case 40: return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
case 41: return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ case 42: return V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+ case 50: return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
+ case 51: return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
default: return -EINVAL;
}
}
+
+struct rbsp {
+ char *buf;
+ int size;
+ int pos;
+};
+
+static inline int rbsp_read_bit(struct rbsp *rbsp)
+{
+ int shift = 7 - (rbsp->pos % 8);
+ int ofs = rbsp->pos++ / 8;
+
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ return (rbsp->buf[ofs] >> shift) & 1;
+}
+
+static inline int rbsp_write_bit(struct rbsp *rbsp, int bit)
+{
+ int shift = 7 - (rbsp->pos % 8);
+ int ofs = rbsp->pos++ / 8;
+
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ rbsp->buf[ofs] &= ~(1 << shift);
+ rbsp->buf[ofs] |= bit << shift;
+
+ return 0;
+}
+
+static inline int rbsp_read_bits(struct rbsp *rbsp, int num, int *val)
+{
+ int i, ret;
+ int tmp = 0;
+
+ if (num > 32)
+ return -EINVAL;
+
+ for (i = 0; i < num; i++) {
+ ret = rbsp_read_bit(rbsp);
+ if (ret < 0)
+ return ret;
+ tmp |= ret << (num - i - 1);
+ }
+
+ if (val)
+ *val = tmp;
+
+ return 0;
+}
+
+static int rbsp_write_bits(struct rbsp *rbsp, int num, int value)
+{
+ int ret;
+
+ while (num--) {
+ ret = rbsp_write_bit(rbsp, (value >> num) & 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *val)
+{
+ int leading_zero_bits = 0;
+ unsigned int tmp = 0;
+ int ret;
+
+ while ((ret = rbsp_read_bit(rbsp)) == 0)
+ leading_zero_bits++;
+ if (ret < 0)
+ return ret;
+
+ if (leading_zero_bits > 0) {
+ ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp);
+ if (ret)
+ return ret;
+ }
+
+ if (val)
+ *val = (1 << leading_zero_bits) - 1 + tmp;
+
+ return 0;
+}
+
+static int rbsp_write_uev(struct rbsp *rbsp, unsigned int value)
+{
+ int i;
+ int ret;
+ int tmp = value + 1;
+ int leading_zero_bits = fls(tmp) - 1;
+
+ for (i = 0; i < leading_zero_bits; i++) {
+ ret = rbsp_write_bit(rbsp, 0);
+ if (ret)
+ return ret;
+ }
+
+ return rbsp_write_bits(rbsp, leading_zero_bits + 1, tmp);
+}
+
+static int rbsp_read_sev(struct rbsp *rbsp, int *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = rbsp_read_uev(rbsp, &tmp);
+ if (ret)
+ return ret;
+
+ if (val) {
+ if (tmp & 1)
+ *val = (tmp + 1) / 2;
+ else
+ *val = -(tmp / 2);
+ }
+
+ return 0;
+}
+
+/**
+ * coda_h264_sps_fixup - fixes frame cropping values in h.264 SPS
+ * @ctx: encoder context
+ * @width: visible width
+ * @height: visible height
+ * @buf: buffer containing h.264 SPS RBSP, starting with NAL header
+ * @size: modified RBSP size return value
+ * @max_size: available size in buf
+ *
+ * Rewrites the frame cropping values in an h.264 SPS RBSP correctly for the
+ * given visible width and height.
+ */
+int coda_h264_sps_fixup(struct coda_ctx *ctx, int width, int height, char *buf,
+ int *size, int max_size)
+{
+ int profile_idc;
+ unsigned int pic_order_cnt_type;
+ int pic_width_in_mbs_minus1, pic_height_in_map_units_minus1;
+ int frame_mbs_only_flag, frame_cropping_flag;
+ int vui_parameters_present_flag;
+ unsigned int crop_right, crop_bottom;
+ struct rbsp sps;
+ int pos;
+ int ret;
+
+ if (*size < 8 || *size >= max_size)
+ return -EINVAL;
+
+ sps.buf = buf + 5; /* Skip NAL header */
+ sps.size = *size - 5;
+
+ profile_idc = sps.buf[0];
+ /* Skip constraint_set[0-5]_flag, reserved_zero_2bits */
+ /* Skip level_idc */
+ sps.pos = 24;
+
+ /* seq_parameter_set_id */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ if (profile_idc == 100 || profile_idc == 110 || profile_idc == 122 ||
+ profile_idc == 244 || profile_idc == 44 || profile_idc == 83 ||
+ profile_idc == 86 || profile_idc == 118 || profile_idc == 128 ||
+ profile_idc == 138 || profile_idc == 139 || profile_idc == 134 ||
+ profile_idc == 135) {
+ dev_err(ctx->fh.vdev->dev_parent,
+ "%s: Handling profile_idc %d not implemented\n",
+ __func__, profile_idc);
+ return -EINVAL;
+ }
+
+ /* log2_max_frame_num_minus4 */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ ret = rbsp_read_uev(&sps, &pic_order_cnt_type);
+ if (ret)
+ return ret;
+
+ if (pic_order_cnt_type == 0) {
+ /* log2_max_pic_order_cnt_lsb_minus4 */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+ } else if (pic_order_cnt_type == 1) {
+ unsigned int i, num_ref_frames_in_pic_order_cnt_cycle;
+
+ /* delta_pic_order_always_zero_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ /* offset_for_non_ref_pic */
+ ret = rbsp_read_sev(&sps, NULL);
+ if (ret)
+ return ret;
+ /* offset_for_top_to_bottom_field */
+ ret = rbsp_read_sev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ ret = rbsp_read_uev(&sps,
+ &num_ref_frames_in_pic_order_cnt_cycle);
+ if (ret)
+ return ret;
+ for (i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++) {
+ /* offset_for_ref_frame */
+ ret = rbsp_read_sev(&sps, NULL);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* max_num_ref_frames */
+ ret = rbsp_read_uev(&sps, NULL);
+ if (ret)
+ return ret;
+
+ /* gaps_in_frame_num_value_allowed_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ ret = rbsp_read_uev(&sps, &pic_width_in_mbs_minus1);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &pic_height_in_map_units_minus1);
+ if (ret)
+ return ret;
+ frame_mbs_only_flag = ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ if (!frame_mbs_only_flag) {
+ /* mb_adaptive_frame_field_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ }
+ /* direct_8x8_inference_flag */
+ ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+
+ /* Mark position of the frame cropping flag */
+ pos = sps.pos;
+ frame_cropping_flag = ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ if (frame_cropping_flag) {
+ unsigned int crop_left, crop_top;
+
+ ret = rbsp_read_uev(&sps, &crop_left);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &crop_right);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &crop_top);
+ if (ret)
+ return ret;
+ ret = rbsp_read_uev(&sps, &crop_bottom);
+ if (ret)
+ return ret;
+ }
+ vui_parameters_present_flag = ret = rbsp_read_bit(&sps);
+ if (ret < 0)
+ return ret;
+ if (vui_parameters_present_flag) {
+ dev_err(ctx->fh.vdev->dev_parent,
+ "%s: Handling vui_parameters not implemented\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ crop_right = round_up(width, 16) - width;
+ crop_bottom = round_up(height, 16) - height;
+ crop_right /= 2;
+ if (frame_mbs_only_flag)
+ crop_bottom /= 2;
+ else
+ crop_bottom /= 4;
+
+
+ sps.size = max_size - 5;
+ sps.pos = pos;
+ frame_cropping_flag = 1;
+ ret = rbsp_write_bit(&sps, frame_cropping_flag);
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, 0); /* crop_left */
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, crop_right);
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, 0); /* crop_top */
+ if (ret)
+ return ret;
+ ret = rbsp_write_uev(&sps, crop_bottom);
+ if (ret)
+ return ret;
+ ret = rbsp_write_bit(&sps, 0); /* vui_parameters_present_flag */
+ if (ret)
+ return ret;
+ ret = rbsp_write_bit(&sps, 1);
+ if (ret)
+ return ret;
+
+ *size = 5 + DIV_ROUND_UP(sps.pos, 8);
+
+ return 0;
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index c70cfab2433f..19ac0b9dc6eb 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -214,6 +214,8 @@ struct coda_ctx {
enum v4l2_quantization quantization;
struct coda_params params;
struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *h264_profile_ctrl;
+ struct v4l2_ctrl *h264_level_ctrl;
struct v4l2_fh fh;
int gopcounter;
int runcounter;
@@ -303,6 +305,8 @@ int coda_h264_padding(int size, char *p);
int coda_h264_profile(int profile_idc);
int coda_h264_level(int level_idc);
int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb);
+int coda_h264_sps_fixup(struct coda_ctx *ctx, int width, int height, char *buf,
+ int *size, int max_size);
bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
int coda_jpeg_write_tables(struct coda_ctx *ctx);
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 3b650b8aabe9..5e7b00a97671 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -157,6 +157,7 @@
#define CODA_CMD_DEC_SEQ_START_BYTE 0x190
#define CODA_CMD_DEC_SEQ_PS_BB_START 0x194
#define CODA_CMD_DEC_SEQ_PS_BB_SIZE 0x198
+#define CODA_CMD_DEC_SEQ_JPG_THUMB_EN 0x19c
#define CODA_CMD_DEC_SEQ_MP4_ASP_CLASS 0x19c
#define CODA_MP4_CLASS_MPEG4 0
#define CODA_CMD_DEC_SEQ_X264_MV_EN 0x19c
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index 7f610320426d..c551a25d90d9 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -18,6 +18,7 @@
*
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index ba157827192c..ddcad7b3e76c 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -11,6 +11,7 @@
* GNU General Public License for more details.
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ctype.h>
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 7be636237acf..0f324055cc9f 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1114,6 +1114,14 @@ vpif_init_free_channel_objects:
return err;
}
+static void free_vpif_objs(void)
+{
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
+ kfree(vpif_obj.dev[i]);
+}
+
static int vpif_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
@@ -1255,11 +1263,6 @@ static __init int vpif_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!pdev->dev.platform_data) {
- dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
- return -EINVAL;
- }
-
vpif_dev = &pdev->dev;
err = initialize_vpif();
@@ -1271,7 +1274,7 @@ static __init int vpif_probe(struct platform_device *pdev)
err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
if (err) {
v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
- return err;
+ goto vpif_free;
}
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
@@ -1314,7 +1317,10 @@ static __init int vpif_probe(struct platform_device *pdev)
if (vpif_obj.sd[i])
vpif_obj.sd[i]->grp_id = 1 << i;
}
- vpif_probe_complete();
+ err = vpif_probe_complete();
+ if (err) {
+ goto probe_subdev_out;
+ }
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
@@ -1334,6 +1340,8 @@ probe_subdev_out:
kfree(vpif_obj.sd);
vpif_unregister:
v4l2_device_unregister(&vpif_obj.v4l2_dev);
+vpif_free:
+ free_vpif_objs();
return err;
}
@@ -1355,8 +1363,8 @@ static int vpif_remove(struct platform_device *device)
ch = vpif_obj.dev[i];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
- kfree(vpif_obj.dev[i]);
}
+ free_vpif_objs();
return 0;
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index e9ff27949a91..c9d2f6c5311a 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -713,7 +713,7 @@ static __poll_t gsc_m2m_poll(struct file *file,
__poll_t ret;
if (mutex_lock_interruptible(&gsc->lock))
- return -ERESTARTSYS;
+ return EPOLLERR;
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
mutex_unlock(&gsc->lock);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 55ba696b8cf4..a920164f53f1 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -384,12 +384,17 @@ static void __isp_video_try_fmt(struct fimc_isp *isp,
struct v4l2_pix_format_mplane *pixm,
const struct fimc_fmt **fmt)
{
- *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+ const struct fimc_fmt *__fmt;
+
+ __fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+ if (fmt)
+ *fmt = __fmt;
pixm->colorspace = V4L2_COLORSPACE_SRGB;
pixm->field = V4L2_FIELD_NONE;
- pixm->num_planes = (*fmt)->memplanes;
- pixm->pixelformat = (*fmt)->fourcc;
+ pixm->num_planes = __fmt->memplanes;
+ pixm->pixelformat = __fmt->fourcc;
/*
* TODO: double check with the docmentation these width/height
* constraints are correct.
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 78b48a1fa26c..deb499f76412 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1201,8 +1201,7 @@ static const struct media_device_ops fimc_md_ops = {
static ssize_t fimc_md_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct fimc_md *fmd = platform_get_drvdata(pdev);
+ struct fimc_md *fmd = dev_get_drvdata(dev);
if (fmd->user_subdev_api)
return strlcpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE);
@@ -1214,8 +1213,7 @@ static ssize_t fimc_md_sysfs_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct fimc_md *fmd = platform_get_drvdata(pdev);
+ struct fimc_md *fmd = dev_get_drvdata(dev);
bool subdev_api;
int i;
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index cba46a656338..b4e28a299e26 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -891,8 +891,7 @@ e_clkput:
static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
@@ -921,8 +920,7 @@ static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
static int s5pcsis_pm_resume(struct device *dev, bool runtime)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index e41510ce69a4..0273302aa741 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1414,7 +1414,7 @@ static int viu_of_probe(struct platform_device *op)
sizeof(struct viu_reg), DRV_NAME)) {
dev_err(&op->dev, "Error while requesting mem region\n");
ret = -EBUSY;
- goto err;
+ goto err_irq;
}
/* remap registers */
@@ -1422,7 +1422,7 @@ static int viu_of_probe(struct platform_device *op)
if (!viu_regs) {
dev_err(&op->dev, "Can't map register set\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
/* Prepare our private structure */
@@ -1430,7 +1430,7 @@ static int viu_of_probe(struct platform_device *op)
if (!viu_dev) {
dev_err(&op->dev, "Can't allocate private structure\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
viu_dev->vr = viu_regs;
@@ -1446,16 +1446,21 @@ static int viu_of_probe(struct platform_device *op)
ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
if (ret < 0) {
dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
- goto err;
+ goto err_irq;
}
ad = i2c_get_adapter(0);
+ if (!ad) {
+ ret = -EFAULT;
+ dev_err(&op->dev, "couldn't get i2c adapter\n");
+ goto err_v4l2;
+ }
v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
if (viu_dev->hdl.error) {
ret = viu_dev->hdl.error;
dev_err(&op->dev, "couldn't register control\n");
- goto err_vdev;
+ goto err_i2c;
}
/* This control handler will inherit the control(s) from the
sub-device(s). */
@@ -1471,7 +1476,7 @@ static int viu_of_probe(struct platform_device *op)
vdev = video_device_alloc();
if (vdev == NULL) {
ret = -ENOMEM;
- goto err_vdev;
+ goto err_hdl;
}
*vdev = viu_template;
@@ -1492,7 +1497,7 @@ static int viu_of_probe(struct platform_device *op)
ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
video_device_release(viu_dev->vdev);
- goto err_vdev;
+ goto err_unlock;
}
/* enable VIU clock */
@@ -1500,12 +1505,12 @@ static int viu_of_probe(struct platform_device *op)
if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to lookup the clock!\n");
ret = PTR_ERR(clk);
- goto err_clk;
+ goto err_vdev;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&op->dev, "failed to enable the clock!\n");
- goto err_clk;
+ goto err_vdev;
}
viu_dev->clk = clk;
@@ -1516,7 +1521,7 @@ static int viu_of_probe(struct platform_device *op)
if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
dev_err(&op->dev, "Request VIU IRQ failed.\n");
ret = -ENODEV;
- goto err_irq;
+ goto err_clk;
}
mutex_unlock(&viu_dev->lock);
@@ -1524,16 +1529,19 @@ static int viu_of_probe(struct platform_device *op)
dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
return ret;
-err_irq:
- clk_disable_unprepare(viu_dev->clk);
err_clk:
- video_unregister_device(viu_dev->vdev);
+ clk_disable_unprepare(viu_dev->clk);
err_vdev:
- v4l2_ctrl_handler_free(&viu_dev->hdl);
+ video_unregister_device(viu_dev->vdev);
+err_unlock:
mutex_unlock(&viu_dev->lock);
+err_hdl:
+ v4l2_ctrl_handler_free(&viu_dev->hdl);
+err_i2c:
i2c_put_adapter(ad);
+err_v4l2:
v4l2_device_unregister(&viu_dev->v4l2_dev);
-err:
+err_irq:
irq_dispose_mapping(viu_irq);
return ret;
}
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 1e4195144f39..5f84d2aa47ab 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -181,20 +181,6 @@ static void deinterlace_job_abort(void *priv)
v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
}
-static void deinterlace_lock(void *priv)
-{
- struct deinterlace_ctx *ctx = priv;
- struct deinterlace_dev *pcdev = ctx->dev;
- mutex_lock(&pcdev->dev_mutex);
-}
-
-static void deinterlace_unlock(void *priv)
-{
- struct deinterlace_ctx *ctx = priv;
- struct deinterlace_dev *pcdev = ctx->dev;
- mutex_unlock(&pcdev->dev_mutex);
-}
-
static void dma_callback(void *data)
{
struct deinterlace_ctx *curr_ctx = data;
@@ -856,6 +842,8 @@ static const struct vb2_ops deinterlace_qops = {
.queue_setup = deinterlace_queue_setup,
.buf_prepare = deinterlace_buf_prepare,
.buf_queue = deinterlace_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
@@ -872,6 +860,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->lock = &ctx->dev->dev_mutex;
q_data[V4L2_M2M_SRC].fmt = &formats[0];
q_data[V4L2_M2M_SRC].width = 640;
q_data[V4L2_M2M_SRC].height = 480;
@@ -890,6 +879,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->dev = ctx->dev->v4l2_dev.dev;
+ dst_vq->lock = &ctx->dev->dev_mutex;
q_data[V4L2_M2M_DST].fmt = &formats[0];
q_data[V4L2_M2M_DST].width = 640;
q_data[V4L2_M2M_DST].height = 480;
@@ -956,9 +946,9 @@ static __poll_t deinterlace_poll(struct file *file,
struct deinterlace_ctx *ctx = file->private_data;
__poll_t ret;
- deinterlace_lock(ctx);
+ mutex_lock(&ctx->dev->dev_mutex);
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- deinterlace_unlock(ctx);
+ mutex_unlock(&ctx->dev->dev_mutex);
return ret;
}
@@ -992,8 +982,6 @@ static const struct v4l2_m2m_ops m2m_ops = {
.device_run = deinterlace_device_run,
.job_ready = deinterlace_job_ready,
.job_abort = deinterlace_job_abort,
- .lock = deinterlace_lock,
- .unlock = deinterlace_unlock,
};
static int deinterlace_probe(struct platform_device *pdev)
@@ -1040,7 +1028,6 @@ static int deinterlace_probe(struct platform_device *pdev)
}
video_set_drvdata(vfd, pcdev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", deinterlace_videodev.name);
v4l2_info(&pcdev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
" Device registered as /dev/video%d\n", vfd->num);
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
index 8040a6285c3f..cd4be38ab5ac 100644
--- a/drivers/media/platform/meson/ao-cec.c
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -524,7 +524,7 @@ static int meson_ao_cec_transmit(struct cec_adapter *adap, u8 attempts,
return ret;
if (reg == TX_BUSY) {
- dev_err(&ao_cec->pdev->dev, "%s: busy TX: aborting\n",
+ dev_dbg(&ao_cec->pdev->dev, "%s: busy TX: aborting\n",
__func__);
meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret);
}
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index 328e8f650d9b..4f24da8afecc 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -861,14 +861,9 @@ static int mtk_jpeg_job_ready(void *priv)
return (ctx->state == MTK_JPEG_RUNNING) ? 1 : 0;
}
-static void mtk_jpeg_job_abort(void *priv)
-{
-}
-
static const struct v4l2_m2m_ops mtk_jpeg_m2m_ops = {
.device_run = mtk_jpeg_device_run,
.job_ready = mtk_jpeg_job_ready,
- .job_abort = mtk_jpeg_job_abort,
};
static int mtk_jpeg_queue_init(void *priv, struct vb2_queue *src_vq,
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
index 583d47724ee8..ceffc31cc6eb 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -394,20 +394,6 @@ static bool mtk_mdp_ctx_state_is_set(struct mtk_mdp_ctx *ctx, u32 mask)
return ret;
}
-static void mtk_mdp_ctx_lock(struct vb2_queue *vq)
-{
- struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
-
- mutex_lock(&ctx->mdp_dev->lock);
-}
-
-static void mtk_mdp_ctx_unlock(struct vb2_queue *vq)
-{
- struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
-
- mutex_unlock(&ctx->mdp_dev->lock);
-}
-
static void mtk_mdp_set_frame_size(struct mtk_mdp_frame *frame, int width,
int height)
{
@@ -455,10 +441,6 @@ static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
pm_runtime_put(&ctx->mdp_dev->pdev->dev);
}
-static void mtk_mdp_m2m_job_abort(void *priv)
-{
-}
-
/* The color format (num_planes) must be already configured. */
static void mtk_mdp_prepare_addr(struct mtk_mdp_ctx *ctx,
struct vb2_buffer *vb,
@@ -625,10 +607,10 @@ static const struct vb2_ops mtk_mdp_m2m_qops = {
.queue_setup = mtk_mdp_m2m_queue_setup,
.buf_prepare = mtk_mdp_m2m_buf_prepare,
.buf_queue = mtk_mdp_m2m_buf_queue,
- .wait_prepare = mtk_mdp_ctx_unlock,
- .wait_finish = mtk_mdp_ctx_lock,
.stop_streaming = mtk_mdp_m2m_stop_streaming,
.start_streaming = mtk_mdp_m2m_start_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int mtk_mdp_m2m_querycap(struct file *file, void *fh,
@@ -996,6 +978,7 @@ static int mtk_mdp_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->dev = &ctx->mdp_dev->pdev->dev;
+ src_vq->lock = &ctx->mdp_dev->lock;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -1010,6 +993,7 @@ static int mtk_mdp_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->dev = &ctx->mdp_dev->pdev->dev;
+ dst_vq->lock = &ctx->mdp_dev->lock;
return vb2_queue_init(dst_vq);
}
@@ -1227,7 +1211,6 @@ static const struct v4l2_file_operations mtk_mdp_m2m_fops = {
static const struct v4l2_m2m_ops mtk_mdp_m2m_ops = {
.device_run = mtk_mdp_m2m_device_run,
- .job_abort = mtk_mdp_m2m_job_abort,
};
int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp)
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 86f0a7134365..0c8a8b4c4e1c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -1400,6 +1400,11 @@ int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
0, 32, 1, 1);
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl,
+ &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ 0, V4L2_MPEG_VIDEO_VP9_PROFILE_0);
if (ctx->ctrl_hdl.error) {
mtk_v4l2_err("Adding control failed %d",
@@ -1411,28 +1416,10 @@ int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
return 0;
}
-static void m2mops_vdec_lock(void *m2m_priv)
-{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
-
- mtk_v4l2_debug(3, "[%d]", ctx->id);
- mutex_lock(&ctx->dev->dev_mutex);
-}
-
-static void m2mops_vdec_unlock(void *m2m_priv)
-{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
-
- mtk_v4l2_debug(3, "[%d]", ctx->id);
- mutex_unlock(&ctx->dev->dev_mutex);
-}
-
const struct v4l2_m2m_ops mtk_vdec_m2m_ops = {
.device_run = m2mops_vdec_device_run,
.job_ready = m2mops_vdec_job_ready,
.job_abort = m2mops_vdec_job_abort,
- .lock = m2mops_vdec_lock,
- .unlock = m2mops_vdec_unlock,
};
static const struct vb2_ops mtk_vdec_vb2_ops = {
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 1b1a28abbf1f..6ad408514a99 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -1181,26 +1181,10 @@ static void m2mops_venc_job_abort(void *priv)
ctx->state = MTK_STATE_ABORT;
}
-static void m2mops_venc_lock(void *m2m_priv)
-{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
-
- mutex_lock(&ctx->dev->dev_mutex);
-}
-
-static void m2mops_venc_unlock(void *m2m_priv)
-{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
-
- mutex_unlock(&ctx->dev->dev_mutex);
-}
-
const struct v4l2_m2m_ops mtk_venc_m2m_ops = {
.device_run = m2mops_venc_device_run,
.job_ready = m2mops_venc_job_ready,
.job_abort = m2mops_venc_job_abort,
- .lock = m2mops_venc_lock,
- .unlock = m2mops_venc_unlock,
};
void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index 1ff6a93262b7..f8d35e3ac1dc 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -960,4 +960,4 @@ static struct platform_driver mtk_vpu_driver = {
module_platform_driver(mtk_vpu_driver);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Mediatek Video Prosessor Unit driver");
+MODULE_DESCRIPTION("Mediatek Video Processor Unit driver");
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 5a8eff60e95f..64195c4ddeaf 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -250,20 +250,6 @@ static void emmaprp_job_abort(void *priv)
v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
}
-static void emmaprp_lock(void *priv)
-{
- struct emmaprp_ctx *ctx = priv;
- struct emmaprp_dev *pcdev = ctx->dev;
- mutex_lock(&pcdev->dev_mutex);
-}
-
-static void emmaprp_unlock(void *priv)
-{
- struct emmaprp_ctx *ctx = priv;
- struct emmaprp_dev *pcdev = ctx->dev;
- mutex_unlock(&pcdev->dev_mutex);
-}
-
static inline void emmaprp_dump_regs(struct emmaprp_dev *pcdev)
{
dprintk(pcdev,
@@ -747,6 +733,8 @@ static const struct vb2_ops emmaprp_qops = {
.queue_setup = emmaprp_queue_setup,
.buf_prepare = emmaprp_buf_prepare,
.buf_queue = emmaprp_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
@@ -763,6 +751,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->lock = &ctx->dev->dev_mutex;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -776,6 +765,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->dev = ctx->dev->v4l2_dev.dev;
+ dst_vq->lock = &ctx->dev->dev_mutex;
return vb2_queue_init(dst_vq);
}
@@ -885,8 +875,6 @@ static const struct video_device emmaprp_videodev = {
static const struct v4l2_m2m_ops m2m_ops = {
.device_run = emmaprp_device_run,
.job_abort = emmaprp_job_abort,
- .lock = emmaprp_lock,
- .unlock = emmaprp_unlock,
};
static int emmaprp_probe(struct platform_device *pdev)
@@ -934,7 +922,6 @@ static int emmaprp_probe(struct platform_device *pdev)
vfd->v4l2_dev = &pcdev->v4l2_dev;
video_set_drvdata(vfd, pcdev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", emmaprp_videodev.name);
pcdev->vfd = vfd;
v4l2_info(&pcdev->v4l2_dev, EMMAPRP_MODULE_NAME
" Device registered as /dev/video%d\n", vfd->num);
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index d827b6c285a6..4b5e55d41ad4 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -8,6 +8,7 @@ config VIDEO_OMAP2_VOUT
depends on MMU
depends on FB_OMAP2 || (COMPILE_TEST && FB_OMAP2=n)
depends on ARCH_OMAP2 || ARCH_OMAP3 || COMPILE_TEST
+ depends on VIDEO_V4L2
select VIDEOBUF_GEN
select VIDEOBUF_DMA_CONTIG
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index f22cf351e3ee..842e2235047d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -300,7 +300,7 @@ static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
static int isp_xclk_init(struct isp_device *isp)
{
struct device_node *np = isp->dev->of_node;
- struct clk_init_data init;
+ struct clk_init_data init = {};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
@@ -971,7 +971,7 @@ static void isp_resume_module_pipeline(struct media_entity *me)
* Returns 0 if suspend left in idle state all the submodules properly,
* or returns 1 if a general Reset is required to suspend the submodules.
*/
-static int isp_suspend_modules(struct isp_device *isp)
+static int __maybe_unused isp_suspend_modules(struct isp_device *isp)
{
unsigned long timeout;
@@ -1005,7 +1005,7 @@ static int isp_suspend_modules(struct isp_device *isp)
* isp_resume_modules - Resume ISP submodules.
* @isp: OMAP3 ISP device
*/
-static void isp_resume_modules(struct isp_device *isp)
+static void __maybe_unused isp_resume_modules(struct isp_device *isp)
{
omap3isp_stat_resume(&isp->isp_aewb);
omap3isp_stat_resume(&isp->isp_af);
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index d85ffbfb7c1f..b6e9e93bde7a 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2375,8 +2375,6 @@ static int pxa_camera_probe(struct platform_device *pdev)
.src_maxburst = 8,
.direction = DMA_DEV_TO_MEM,
};
- dma_cap_mask_t mask;
- struct pxad_param params;
char clk_name[V4L2_CLK_NAME_SIZE];
int irq;
int err = 0, i;
@@ -2450,34 +2448,20 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->base = base;
/* request dma */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_cap_set(DMA_PRIVATE, mask);
-
- params.prio = 0;
- params.drcmr = 68;
- pcdev->dma_chans[0] =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &params, &pdev->dev, "CI_Y");
+ pcdev->dma_chans[0] = dma_request_slave_channel(&pdev->dev, "CI_Y");
if (!pcdev->dma_chans[0]) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
return -ENODEV;
}
- params.drcmr = 69;
- pcdev->dma_chans[1] =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &params, &pdev->dev, "CI_U");
+ pcdev->dma_chans[1] = dma_request_slave_channel(&pdev->dev, "CI_U");
if (!pcdev->dma_chans[1]) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
err = -ENODEV;
goto exit_free_dma_y;
}
- params.drcmr = 70;
- pcdev->dma_chans[2] =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &params, &pdev->dev, "CI_V");
+ pcdev->dma_chans[2] = dma_request_slave_channel(&pdev->dev, "CI_V");
if (!pcdev->dma_chans[2]) {
dev_err(&pdev->dev, "Can't request DMA for V\n");
err = -ENODEV;
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.h b/drivers/media/platform/qcom/camss-8x16/camss-vfe.h
deleted file mode 100644
index 53d5b66a9dfb..000000000000
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * camss-vfe.h
- *
- * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
- *
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef QC_MSM_CAMSS_VFE_H
-#define QC_MSM_CAMSS_VFE_H
-
-#include <linux/clk.h>
-#include <linux/spinlock_types.h>
-#include <media/media-entity.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-subdev.h>
-
-#include "camss-video.h"
-
-#define MSM_VFE_PAD_SINK 0
-#define MSM_VFE_PAD_SRC 1
-#define MSM_VFE_PADS_NUM 2
-
-#define MSM_VFE_LINE_NUM 4
-#define MSM_VFE_IMAGE_MASTERS_NUM 7
-#define MSM_VFE_COMPOSITE_IRQ_NUM 4
-
-#define MSM_VFE_VFE0_UB_SIZE 1023
-#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
-#define MSM_VFE_VFE1_UB_SIZE 1535
-#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-
-enum vfe_output_state {
- VFE_OUTPUT_OFF,
- VFE_OUTPUT_RESERVED,
- VFE_OUTPUT_SINGLE,
- VFE_OUTPUT_CONTINUOUS,
- VFE_OUTPUT_IDLE,
- VFE_OUTPUT_STOPPING
-};
-
-enum vfe_line_id {
- VFE_LINE_NONE = -1,
- VFE_LINE_RDI0 = 0,
- VFE_LINE_RDI1 = 1,
- VFE_LINE_RDI2 = 2,
- VFE_LINE_PIX = 3
-};
-
-struct vfe_output {
- u8 wm_num;
- u8 wm_idx[3];
-
- int active_buf;
- struct camss_buffer *buf[2];
- struct camss_buffer *last_buffer;
- struct list_head pending_bufs;
-
- unsigned int drop_update_idx;
-
- enum vfe_output_state state;
- unsigned int sequence;
- int wait_sof;
- int wait_reg_update;
- struct completion sof;
- struct completion reg_update;
-};
-
-struct vfe_line {
- enum vfe_line_id id;
- struct v4l2_subdev subdev;
- struct media_pad pads[MSM_VFE_PADS_NUM];
- struct v4l2_mbus_framefmt fmt[MSM_VFE_PADS_NUM];
- struct v4l2_rect compose;
- struct v4l2_rect crop;
- struct camss_video video_out;
- struct vfe_output output;
-};
-
-struct vfe_device {
- u8 id;
- void __iomem *base;
- u32 irq;
- char irq_name[30];
- struct camss_clock *clock;
- int nclocks;
- struct completion reset_complete;
- struct completion halt_complete;
- struct mutex power_lock;
- int power_count;
- struct mutex stream_lock;
- int stream_count;
- spinlock_t output_lock;
- enum vfe_line_id wm_output_map[MSM_VFE_IMAGE_MASTERS_NUM];
- struct vfe_line line[MSM_VFE_LINE_NUM];
- u32 reg_update;
- u8 was_streaming;
-};
-
-struct resources;
-
-int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res);
-
-int msm_vfe_register_entities(struct vfe_device *vfe,
- struct v4l2_device *v4l2_dev);
-
-void msm_vfe_unregister_entities(struct vfe_device *vfe);
-
-void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id);
-void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id);
-
-void msm_vfe_stop_streaming(struct vfe_device *vfe);
-
-#endif /* QC_MSM_CAMSS_VFE_H */
diff --git a/drivers/media/platform/qcom/camss-8x16/Makefile b/drivers/media/platform/qcom/camss/Makefile
index 3c4024fbb768..f5e6e255f2a1 100644
--- a/drivers/media/platform/qcom/camss-8x16/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -3,8 +3,12 @@
qcom-camss-objs += \
camss.o \
camss-csid.o \
+ camss-csiphy-2ph-1-0.o \
+ camss-csiphy-3ph-1-0.o \
camss-csiphy.o \
camss-ispif.o \
+ camss-vfe-4-1.o \
+ camss-vfe-4-7.o \
camss-vfe.o \
camss-video.o \
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 226f36ef7419..729b31891466 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-csid.c
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/completion.h>
@@ -21,9 +13,11 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
#include "camss-csid.h"
@@ -34,21 +28,35 @@
#define CAMSS_CSID_HW_VERSION 0x0
#define CAMSS_CSID_CORE_CTRL_0 0x004
#define CAMSS_CSID_CORE_CTRL_1 0x008
-#define CAMSS_CSID_RST_CMD 0x00c
-#define CAMSS_CSID_CID_LUT_VC_n(n) (0x010 + 0x4 * (n))
-#define CAMSS_CSID_CID_n_CFG(n) (0x020 + 0x4 * (n))
-#define CAMSS_CSID_IRQ_CLEAR_CMD 0x060
-#define CAMSS_CSID_IRQ_MASK 0x064
-#define CAMSS_CSID_IRQ_STATUS 0x068
-#define CAMSS_CSID_TG_CTRL 0x0a0
+#define CAMSS_CSID_RST_CMD(v) ((v) == CAMSS_8x16 ? 0x00c : 0x010)
+#define CAMSS_CSID_CID_LUT_VC_n(v, n) \
+ (((v) == CAMSS_8x16 ? 0x010 : 0x014) + 0x4 * (n))
+#define CAMSS_CSID_CID_n_CFG(v, n) \
+ (((v) == CAMSS_8x16 ? 0x020 : 0x024) + 0x4 * (n))
+#define CAMSS_CSID_CID_n_CFG_ISPIF_EN BIT(0)
+#define CAMSS_CSID_CID_n_CFG_RDI_EN BIT(1)
+#define CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT 4
+#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_8 (0 << 8)
+#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16 (1 << 8)
+#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB (0 << 9)
+#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_MSB (1 << 9)
+#define CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP (0 << 10)
+#define CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING (1 << 10)
+#define CAMSS_CSID_IRQ_CLEAR_CMD(v) ((v) == CAMSS_8x16 ? 0x060 : 0x064)
+#define CAMSS_CSID_IRQ_MASK(v) ((v) == CAMSS_8x16 ? 0x064 : 0x068)
+#define CAMSS_CSID_IRQ_STATUS(v) ((v) == CAMSS_8x16 ? 0x068 : 0x06c)
+#define CAMSS_CSID_TG_CTRL(v) ((v) == CAMSS_8x16 ? 0x0a0 : 0x0a8)
#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436
#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437
-#define CAMSS_CSID_TG_VC_CFG 0x0a4
+#define CAMSS_CSID_TG_VC_CFG(v) ((v) == CAMSS_8x16 ? 0x0a4 : 0x0ac)
#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff
#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f
-#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0ac + 0xc * (n))
-#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b0 + 0xc * (n))
-#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0b4 + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_0(v, n) \
+ (((v) == CAMSS_8x16 ? 0x0ac : 0x0b4) + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_1(v, n) \
+ (((v) == CAMSS_8x16 ? 0x0b0 : 0x0b8) + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_2(v, n) \
+ (((v) == CAMSS_8x16 ? 0x0b4 : 0x0bc) + 0xc * (n))
#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12
#define DATA_TYPE_YUV422_8BIT 0x1e
@@ -56,15 +64,17 @@
#define DATA_TYPE_RAW_8BIT 0x2a
#define DATA_TYPE_RAW_10BIT 0x2b
#define DATA_TYPE_RAW_12BIT 0x2c
+#define DATA_TYPE_RAW_14BIT 0x2d
#define DECODE_FORMAT_UNCOMPRESSED_6_BIT 0x0
#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2
#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3
+#define DECODE_FORMAT_UNCOMPRESSED_14_BIT 0x8
#define CSID_RESET_TIMEOUT_MS 500
-struct csid_fmts {
+struct csid_format {
u32 code;
u8 data_type;
u8 decode_format;
@@ -72,7 +82,7 @@ struct csid_fmts {
u8 spp; /* bus samples per pixel */
};
-static const struct csid_fmts csid_input_fmts[] = {
+static const struct csid_format csid_formats_8x16[] = {
{
MEDIA_BUS_FMT_UYVY8_2X8,
DATA_TYPE_YUV422_8BIT,
@@ -184,20 +194,241 @@ static const struct csid_fmts csid_input_fmts[] = {
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
- }
+ },
+ {
+ MEDIA_BUS_FMT_Y10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
};
-static const struct csid_fmts *csid_get_fmt_entry(u32 code)
+static const struct csid_format csid_formats_8x96[] = {
+ {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ DATA_TYPE_RAW_14BIT,
+ DECODE_FORMAT_UNCOMPRESSED_14_BIT,
+ 14,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_Y10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+};
+
+static u32 csid_find_code(u32 *code, unsigned int n_code,
+ unsigned int index, u32 req_code)
+{
+ int i;
+
+ if (!req_code && (index >= n_code))
+ return 0;
+
+ for (i = 0; i < n_code; i++)
+ if (req_code) {
+ if (req_code == code[i])
+ return req_code;
+ } else {
+ if (i == index)
+ return code[i];
+ }
+
+ return code[0];
+}
+
+static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
+ unsigned int index, u32 src_req_code)
+{
+ if (csid->camss->version == CAMSS_8x16) {
+ if (index > 0)
+ return 0;
+
+ return sink_code;
+ } else if (csid->camss->version == CAMSS_8x96) {
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_Y10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ default:
+ if (index > 0)
+ return 0;
+
+ return sink_code;
+ }
+ } else {
+ return 0;
+ }
+}
+
+static const struct csid_format *csid_get_fmt_entry(
+ const struct csid_format *formats,
+ unsigned int nformat,
+ u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
- if (code == csid_input_fmts[i].code)
- return &csid_input_fmts[i];
+ for (i = 0; i < nformat; i++)
+ if (code == formats[i].code)
+ return &formats[i];
WARN(1, "Unknown format\n");
- return &csid_input_fmts[0];
+ return &formats[0];
}
/*
@@ -210,10 +441,11 @@ static const struct csid_fmts *csid_get_fmt_entry(u32 code)
static irqreturn_t csid_isr(int irq, void *dev)
{
struct csid_device *csid = dev;
+ enum camss_version ver = csid->camss->version;
u32 value;
- value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS);
- writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD);
+ value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS(ver));
+ writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD(ver));
if ((value >> 11) & 0x1)
complete(&csid->reset_complete);
@@ -227,7 +459,7 @@ static irqreturn_t csid_isr(int irq, void *dev)
*/
static int csid_set_clock_rates(struct csid_device *csid)
{
- struct device *dev = to_device_index(csid, csid->id);
+ struct device *dev = csid->camss->dev;
u32 pixel_clock;
int i, j;
int ret;
@@ -240,11 +472,16 @@ static int csid_set_clock_rates(struct csid_device *csid)
struct camss_clock *clock = &csid->clock[i];
if (!strcmp(clock->name, "csi0") ||
- !strcmp(clock->name, "csi1")) {
- u8 bpp = csid_get_fmt_entry(
- csid->fmt[MSM_CSIPHY_PAD_SINK].code)->bpp;
+ !strcmp(clock->name, "csi1") ||
+ !strcmp(clock->name, "csi2") ||
+ !strcmp(clock->name, "csi3")) {
+ const struct csid_format *f = csid_get_fmt_entry(
+ csid->formats,
+ csid->nformats,
+ csid->fmt[MSM_CSIPHY_PAD_SINK].code);
u8 num_lanes = csid->phy.lane_cnt;
- u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4);
+ u64 min_rate = pixel_clock * f->bpp /
+ (2 * num_lanes * 4);
long rate;
camss_add_clock_margin(&min_rate);
@@ -294,13 +531,13 @@ static int csid_reset(struct csid_device *csid)
reinit_completion(&csid->reset_complete);
- writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD);
+ writel_relaxed(0x7fff, csid->base +
+ CAMSS_CSID_RST_CMD(csid->camss->version));
time = wait_for_completion_timeout(&csid->reset_complete,
msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
if (!time) {
- dev_err(to_device_index(csid, csid->id),
- "CSID reset timeout\n");
+ dev_err(csid->camss->dev, "CSID reset timeout\n");
return -EIO;
}
@@ -317,25 +554,33 @@ static int csid_reset(struct csid_device *csid)
static int csid_set_power(struct v4l2_subdev *sd, int on)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
- struct device *dev = to_device_index(csid, csid->id);
+ struct device *dev = csid->camss->dev;
int ret;
if (on) {
u32 hw_version;
- ret = regulator_enable(csid->vdda);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
+ ret = regulator_enable(csid->vdda);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
ret = csid_set_clock_rates(csid);
if (ret < 0) {
regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
return ret;
}
ret = camss_enable_clocks(csid->nclocks, csid->clock, dev);
if (ret < 0) {
regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
return ret;
}
@@ -346,6 +591,7 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
disable_irq(csid->irq);
camss_disable_clocks(csid->nclocks, csid->clock);
regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
return ret;
}
@@ -355,6 +601,7 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
disable_irq(csid->irq);
camss_disable_clocks(csid->nclocks, csid->clock);
ret = regulator_disable(csid->vdda);
+ pm_runtime_put_sync(dev);
}
return ret;
@@ -373,6 +620,7 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct csid_testgen_config *tg = &csid->testgen;
+ enum camss_version ver = csid->camss->version;
u32 val;
if (enable) {
@@ -383,7 +631,7 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
ret = v4l2_ctrl_handler_setup(&csid->ctrls);
if (ret < 0) {
- dev_err(to_device_index(csid, csid->id),
+ dev_err(csid->camss->dev,
"could not sync v4l2 controls: %d\n", ret);
return ret;
}
@@ -392,40 +640,47 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
!media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
return -ENOLINK;
- dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
- data_type;
-
if (tg->enabled) {
/* Config Test Generator */
struct v4l2_mbus_framefmt *f =
&csid->fmt[MSM_CSID_PAD_SRC];
- u8 bpp = csid_get_fmt_entry(f->code)->bpp;
- u8 spp = csid_get_fmt_entry(f->code)->spp;
- u32 num_bytes_per_line = f->width * bpp * spp / 8;
+ const struct csid_format *format = csid_get_fmt_entry(
+ csid->formats, csid->nformats, f->code);
+ u32 num_bytes_per_line =
+ f->width * format->bpp * format->spp / 8;
u32 num_lines = f->height;
/* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */
/* 1:0 VC */
val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) |
((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13);
- writel_relaxed(val, csid->base + CAMSS_CSID_TG_VC_CFG);
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_VC_CFG(ver));
/* 28:16 bytes per lines, 12:0 num of lines */
val = ((num_bytes_per_line & 0x1fff) << 16) |
(num_lines & 0x1fff);
writel_relaxed(val, csid->base +
- CAMSS_CSID_TG_DT_n_CGG_0(0));
+ CAMSS_CSID_TG_DT_n_CGG_0(ver, 0));
+
+ dt = format->data_type;
/* 5:0 data type */
val = dt;
writel_relaxed(val, csid->base +
- CAMSS_CSID_TG_DT_n_CGG_1(0));
+ CAMSS_CSID_TG_DT_n_CGG_1(ver, 0));
/* 2:0 output test pattern */
val = tg->payload_mode;
writel_relaxed(val, csid->base +
- CAMSS_CSID_TG_DT_n_CGG_2(0));
+ CAMSS_CSID_TG_DT_n_CGG_2(ver, 0));
+
+ df = format->decode_format;
} else {
+ struct v4l2_mbus_framefmt *f =
+ &csid->fmt[MSM_CSID_PAD_SINK];
+ const struct csid_format *format = csid_get_fmt_entry(
+ csid->formats, csid->nformats, f->code);
struct csid_phy_config *phy = &csid->phy;
val = phy->lane_cnt - 1;
@@ -439,30 +694,54 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
writel_relaxed(val,
csid->base + CAMSS_CSID_CORE_CTRL_1);
+
+ dt = format->data_type;
+ df = format->decode_format;
}
/* Config LUT */
dt_shift = (cid % 4) * 8;
- df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
- decode_format;
- val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
+ val = readl_relaxed(csid->base +
+ CAMSS_CSID_CID_LUT_VC_n(ver, vc));
val &= ~(0xff << dt_shift);
val |= dt << dt_shift;
- writel_relaxed(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_CID_LUT_VC_n(ver, vc));
+
+ val = CAMSS_CSID_CID_n_CFG_ISPIF_EN;
+ val |= CAMSS_CSID_CID_n_CFG_RDI_EN;
+ val |= df << CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT;
+ val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP;
+
+ if (csid->camss->version == CAMSS_8x96) {
+ u32 sink_code = csid->fmt[MSM_CSID_PAD_SINK].code;
+ u32 src_code = csid->fmt[MSM_CSID_PAD_SRC].code;
+
+ if ((sink_code == MEDIA_BUS_FMT_SBGGR10_1X10 &&
+ src_code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) ||
+ (sink_code == MEDIA_BUS_FMT_Y10_1X10 &&
+ src_code == MEDIA_BUS_FMT_Y10_2X8_PADHI_LE)) {
+ val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING;
+ val |= CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16;
+ val |= CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB;
+ }
+ }
- val = (df << 4) | 0x3;
- writel_relaxed(val, csid->base + CAMSS_CSID_CID_n_CFG(cid));
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_CID_n_CFG(ver, cid));
if (tg->enabled) {
val = CAMSS_CSID_TG_CTRL_ENABLE;
- writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_CTRL(ver));
}
} else {
if (tg->enabled) {
val = CAMSS_CSID_TG_CTRL_DISABLE;
- writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
+ writel_relaxed(val, csid->base +
+ CAMSS_CSID_TG_CTRL(ver));
}
}
@@ -510,12 +789,12 @@ static void csid_try_format(struct csid_device *csid,
case MSM_CSID_PAD_SINK:
/* Set format on sink pad */
- for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
- if (fmt->code == csid_input_fmts[i].code)
+ for (i = 0; i < csid->nformats; i++)
+ if (fmt->code == csid->formats[i].code)
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(csid_input_fmts))
+ if (i >= csid->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -528,23 +807,23 @@ static void csid_try_format(struct csid_device *csid,
case MSM_CSID_PAD_SRC:
if (csid->testgen_mode->cur.val == 0) {
- /* Test generator is disabled, keep pad formats */
- /* in sync - set and return a format same as sink pad */
- struct v4l2_mbus_framefmt format;
+ /* Test generator is disabled, */
+ /* keep pad formats in sync */
+ u32 code = fmt->code;
- format = *__csid_get_format(csid, cfg,
- MSM_CSID_PAD_SINK, which);
- *fmt = format;
+ *fmt = *__csid_get_format(csid, cfg,
+ MSM_CSID_PAD_SINK, which);
+ fmt->code = csid_src_pad_code(csid, fmt->code, 0, code);
} else {
- /* Test generator is enabled, set format on source*/
+ /* Test generator is enabled, set format on source */
/* pad to allow test generator usage */
- for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
- if (csid_input_fmts[i].code == fmt->code)
+ for (i = 0; i < csid->nformats; i++)
+ if (csid->formats[i].code == fmt->code)
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(csid_input_fmts))
+ if (i >= csid->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -570,27 +849,29 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_CSID_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(csid_input_fmts))
+ if (code->index >= csid->nformats)
return -EINVAL;
- code->code = csid_input_fmts[code->index].code;
+ code->code = csid->formats[code->index].code;
} else {
if (csid->testgen_mode->cur.val == 0) {
- if (code->index > 0)
- return -EINVAL;
+ struct v4l2_mbus_framefmt *sink_fmt;
- format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SINK,
- code->which);
+ sink_fmt = __csid_get_format(csid, cfg,
+ MSM_CSID_PAD_SINK,
+ code->which);
- code->code = format->code;
+ code->code = csid_src_pad_code(csid, sink_fmt->code,
+ code->index, 0);
+ if (!code->code)
+ return -EINVAL;
} else {
- if (code->index >= ARRAY_SIZE(csid_input_fmts))
+ if (code->index >= csid->nformats)
return -EINVAL;
- code->code = csid_input_fmts[code->index].code;
+ code->code = csid->formats[code->index].code;
}
}
@@ -798,17 +1079,30 @@ static const struct v4l2_ctrl_ops csid_ctrl_ops = {
*
* Return 0 on success or a negative error code otherwise
*/
-int msm_csid_subdev_init(struct csid_device *csid,
+int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
const struct resources *res, u8 id)
{
- struct device *dev = to_device_index(csid, id);
+ struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *r;
int i, j;
int ret;
+ csid->camss = camss;
csid->id = id;
+ if (camss->version == CAMSS_8x16) {
+ csid->formats = csid_formats_8x16;
+ csid->nformats =
+ ARRAY_SIZE(csid_formats_8x16);
+ } else if (camss->version == CAMSS_8x96) {
+ csid->formats = csid_formats_8x96;
+ csid->nformats =
+ ARRAY_SIZE(csid_formats_8x96);
+ } else {
+ return -EINVAL;
+ }
+
/* Memory */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
@@ -980,6 +1274,8 @@ static int csid_link_setup(struct media_entity *entity,
static const struct v4l2_subdev_core_ops csid_core_ops = {
.s_power = csid_set_power,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops csid_video_ops = {
@@ -1020,12 +1316,13 @@ int msm_csid_register_entity(struct csid_device *csid,
{
struct v4l2_subdev *sd = &csid->subdev;
struct media_pad *pads = csid->pads;
- struct device *dev = to_device_index(csid, csid->id);
+ struct device *dev = csid->camss->dev;
int ret;
v4l2_subdev_init(sd, &csid_v4l2_ops);
sd->internal_ops = &csid_v4l2_internal_ops;
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
MSM_CSID_NAME, csid->id);
v4l2_set_subdevdata(sd, csid);
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 8682d3081bc3..1824b3745e10 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -1,19 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss-csid.h
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
*
* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_CSID_H
#define QC_MSM_CAMSS_CSID_H
@@ -50,6 +42,7 @@ struct csid_phy_config {
};
struct csid_device {
+ struct camss *camss;
u8 id;
struct v4l2_subdev subdev;
struct media_pad pads[MSM_CSID_PADS_NUM];
@@ -65,11 +58,13 @@ struct csid_device {
struct v4l2_mbus_framefmt fmt[MSM_CSID_PADS_NUM];
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *testgen_mode;
+ const struct csid_format *formats;
+ unsigned int nformats;
};
struct resources;
-int msm_csid_subdev_init(struct csid_device *csid,
+int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
const struct resources *res, u8 id);
int msm_csid_register_entity(struct csid_device *csid,
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
new file mode 100644
index 000000000000..c832539397d7
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-csiphy-2ph-1-0.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module 2phase v1.0
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ */
+
+#include "camss-csiphy.h"
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
+#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
+#define CAMSS_CSI_PHY_GLBL_RESET 0x140
+#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144
+#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164
+#define CAMSS_CSI_PHY_HW_VERSION 0x188
+#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n))
+#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n))
+#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n))
+#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec
+#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4
+
+static void csiphy_hw_version_read(struct csiphy_device *csiphy,
+ struct device *dev)
+{
+ u8 hw_version = readl_relaxed(csiphy->base +
+ CAMSS_CSI_PHY_HW_VERSION);
+
+ dev_dbg(dev, "CSIPHY HW Version = 0x%02x\n", hw_version);
+}
+
+/*
+ * csiphy_reset - Perform software reset on CSIPHY module
+ * @csiphy: CSIPHY device
+ */
+static void csiphy_reset(struct csiphy_device *csiphy)
+{
+ writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+ usleep_range(5000, 8000);
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+}
+
+/*
+ * csiphy_settle_cnt_calc - Calculate settle count value
+ *
+ * Helper function to calculate settle count value. This is
+ * based on the CSI2 T_hs_settle parameter which in turn
+ * is calculated based on the CSI2 transmitter pixel clock
+ * frequency.
+ *
+ * Return settle count value or 0 if the CSI2 pixel clock
+ * frequency is not available
+ */
+static u8 csiphy_settle_cnt_calc(u32 pixel_clock, u8 bpp, u8 num_lanes,
+ u32 timer_clk_rate)
+{
+ u32 mipi_clock; /* Hz */
+ u32 ui; /* ps */
+ u32 timer_period; /* ps */
+ u32 t_hs_prepare_max; /* ps */
+ u32 t_hs_prepare_zero_min; /* ps */
+ u32 t_hs_settle; /* ps */
+ u8 settle_cnt;
+
+ mipi_clock = pixel_clock * bpp / (2 * num_lanes);
+ ui = div_u64(1000000000000LL, mipi_clock);
+ ui /= 2;
+ t_hs_prepare_max = 85000 + 6 * ui;
+ t_hs_prepare_zero_min = 145000 + 10 * ui;
+ t_hs_settle = (t_hs_prepare_max + t_hs_prepare_zero_min) / 2;
+
+ timer_period = div_u64(1000000000000LL, timer_clk_rate);
+ settle_cnt = t_hs_settle / timer_period - 1;
+
+ return settle_cnt;
+}
+
+static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg,
+ u32 pixel_clock, u8 bpp, u8 lane_mask)
+{
+ struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ u8 settle_cnt;
+ u8 val, l = 0;
+ int i = 0;
+
+ settle_cnt = csiphy_settle_cnt_calc(pixel_clock, bpp, c->num_data,
+ csiphy->timer_clk_rate);
+
+ writel_relaxed(0x1, csiphy->base +
+ CAMSS_CSI_PHY_GLBL_T_INIT_CFG0);
+ writel_relaxed(0x1, csiphy->base +
+ CAMSS_CSI_PHY_T_WAKEUP_CFG0);
+
+ val = 0x1;
+ val |= lane_mask << 1;
+ writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+
+ val = cfg->combo_mode << 4;
+ writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
+
+ for (i = 0; i <= c->num_data; i++) {
+ if (i == c->num_data)
+ l = c->clk.pos;
+ else
+ l = c->data[i].pos;
+
+ writel_relaxed(0x10, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG2(l));
+ writel_relaxed(settle_cnt, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG3(l));
+ writel_relaxed(0x3f, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_MASKn(l));
+ writel_relaxed(0x3f, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(l));
+ }
+}
+
+static void csiphy_lanes_disable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg)
+{
+ struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ u8 l = 0;
+ int i = 0;
+
+ for (i = 0; i <= c->num_data; i++) {
+ if (i == c->num_data)
+ l = c->clk.pos;
+ else
+ l = c->data[i].pos;
+
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_LNn_CFG2(l));
+ }
+
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+}
+
+/*
+ * csiphy_isr - CSIPHY module interrupt handler
+ * @irq: Interrupt line
+ * @dev: CSIPHY device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csiphy_isr(int irq, void *dev)
+{
+ struct csiphy_device *csiphy = dev;
+ u8 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 val = readl_relaxed(csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_STATUSn(i));
+ writel_relaxed(val, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
+ writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
+ writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
+ writel_relaxed(0x0, csiphy->base +
+ CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
+ }
+
+ return IRQ_HANDLED;
+}
+
+const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = {
+ .hw_version_read = csiphy_hw_version_read,
+ .reset = csiphy_reset,
+ .lanes_enable = csiphy_lanes_enable,
+ .lanes_disable = csiphy_lanes_disable,
+ .isr = csiphy_isr,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
new file mode 100644
index 000000000000..bcd0dfd33618
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-csiphy-3ph-1-0.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSIPHY Module 3phase v1.0
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ */
+
+#include "camss-csiphy.h"
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
+#define CSIPHY_3PH_LNn_CFG2(n) (0x004 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT BIT(3)
+#define CSIPHY_3PH_LNn_CFG3(n) (0x008 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG4(n) (0x00c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS 0xa4
+#define CSIPHY_3PH_LNn_CFG5(n) (0x010 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG5_T_HS_DTERM 0x02
+#define CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT 0x50
+#define CSIPHY_3PH_LNn_TEST_IMP(n) (0x01c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP 0xa
+#define CSIPHY_3PH_LNn_MISC1(n) (0x028 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_MISC1_IS_CLKLANE BIT(2)
+#define CSIPHY_3PH_LNn_CFG6(n) (0x02c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT BIT(0)
+#define CSIPHY_3PH_LNn_CFG7(n) (0x030 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG7_SWI_T_INIT 0x2
+#define CSIPHY_3PH_LNn_CFG8(n) (0x034 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP BIT(0)
+#define CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE BIT(1)
+#define CSIPHY_3PH_LNn_CFG9(n) (0x038 + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP 0x1
+#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15(n) (0x03c + 0x100 * (n))
+#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL 0xb8
+
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(n) (0x800 + 0x4 * (n))
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B BIT(0)
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID BIT(1)
+#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(n) (0x8b0 + 0x4 * (n))
+
+static void csiphy_hw_version_read(struct csiphy_device *csiphy,
+ struct device *dev)
+{
+ u32 hw_version;
+
+ writel(CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID,
+ csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+
+ hw_version = readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(12));
+ hw_version |= readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(13)) << 8;
+ hw_version |= readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(14)) << 16;
+ hw_version |= readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(15)) << 24;
+
+ dev_err(dev, "CSIPHY 3PH HW Version = 0x%08x\n", hw_version);
+}
+
+/*
+ * csiphy_reset - Perform software reset on CSIPHY module
+ * @csiphy: CSIPHY device
+ */
+static void csiphy_reset(struct csiphy_device *csiphy)
+{
+ writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+ usleep_range(5000, 8000);
+ writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+}
+
+static irqreturn_t csiphy_isr(int irq, void *dev)
+{
+ struct csiphy_device *csiphy = dev;
+ int i;
+
+ for (i = 0; i < 11; i++) {
+ int c = i + 22;
+ u8 val = readl_relaxed(csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(i));
+
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(c));
+ }
+
+ writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
+ writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
+
+ for (i = 22; i < 33; i++)
+ writel_relaxed(0x0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csiphy_settle_cnt_calc - Calculate settle count value
+ *
+ * Helper function to calculate settle count value. This is
+ * based on the CSI2 T_hs_settle parameter which in turn
+ * is calculated based on the CSI2 transmitter pixel clock
+ * frequency.
+ *
+ * Return settle count value or 0 if the CSI2 pixel clock
+ * frequency is not available
+ */
+static u8 csiphy_settle_cnt_calc(u32 pixel_clock, u8 bpp, u8 num_lanes,
+ u32 timer_clk_rate)
+{
+ u32 mipi_clock; /* Hz */
+ u32 ui; /* ps */
+ u32 timer_period; /* ps */
+ u32 t_hs_prepare_max; /* ps */
+ u32 t_hs_settle; /* ps */
+ u8 settle_cnt;
+
+ mipi_clock = pixel_clock * bpp / (2 * num_lanes);
+ ui = div_u64(1000000000000LL, mipi_clock);
+ ui /= 2;
+ t_hs_prepare_max = 85000 + 6 * ui;
+ t_hs_settle = t_hs_prepare_max;
+
+ timer_period = div_u64(1000000000000LL, timer_clk_rate);
+ settle_cnt = t_hs_settle / timer_period - 6;
+
+ return settle_cnt;
+}
+
+static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg,
+ u32 pixel_clock, u8 bpp, u8 lane_mask)
+{
+ struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ u8 settle_cnt;
+ u8 val, l = 0;
+ int i;
+
+ settle_cnt = csiphy_settle_cnt_calc(pixel_clock, bpp, c->num_data,
+ csiphy->timer_clk_rate);
+
+ val = BIT(c->clk.pos);
+ for (i = 0; i < c->num_data; i++)
+ val |= BIT(c->data[i].pos * 2);
+
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+
+ val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+
+ for (i = 0; i <= c->num_data; i++) {
+ if (i == c->num_data)
+ l = 7;
+ else
+ l = c->data[i].pos * 2;
+
+ val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
+ val |= 0x17;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
+
+ val = CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG2(l));
+
+ val = settle_cnt;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG3(l));
+
+ val = CSIPHY_3PH_LNn_CFG5_T_HS_DTERM |
+ CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG5(l));
+
+ val = CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG6(l));
+
+ val = CSIPHY_3PH_LNn_CFG7_SWI_T_INIT;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG7(l));
+
+ val = CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP |
+ CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG8(l));
+
+ val = CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG9(l));
+
+ val = CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_TEST_IMP(l));
+
+ val = CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL;
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_LNn_CSI_LANE_CTRL15(l));
+ }
+
+ val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
+
+ val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG4(l));
+
+ val = CSIPHY_3PH_LNn_MISC1_IS_CLKLANE;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_MISC1(l));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(11));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(12));
+
+ val = 0xfb;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(13));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(14));
+
+ val = 0x7f;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(15));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(16));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(17));
+
+ val = 0xef;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(18));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(19));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(20));
+
+ val = 0xff;
+ writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(21));
+}
+
+static void csiphy_lanes_disable(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg)
+{
+ writel_relaxed(0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+
+ writel_relaxed(0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+}
+
+const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = {
+ .hw_version_read = csiphy_hw_version_read,
+ .reset = csiphy_reset,
+ .lanes_enable = csiphy_lanes_enable,
+ .lanes_disable = csiphy_lanes_disable,
+ .isr = csiphy_isr,
+};
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 7e61caba6a2d..4559f3b1b38c 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-csiphy.c
*
* Qualcomm MSM Camera Subsystem - CSIPHY Module
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2016-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/delay.h>
@@ -21,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
@@ -30,131 +23,75 @@
#define MSM_CSIPHY_NAME "msm_csiphy"
-#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
-#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
-#define CAMSS_CSI_PHY_GLBL_RESET 0x140
-#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144
-#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164
-#define CAMSS_CSI_PHY_HW_VERSION 0x188
-#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n))
-#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n))
-#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n))
-#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec
-#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4
-
-static const struct {
+struct csiphy_format {
u32 code;
u8 bpp;
-} csiphy_formats[] = {
- {
- MEDIA_BUS_FMT_UYVY8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_VYUY8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_YUYV8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_YVYU8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SBGGR8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SGBRG8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SGRBG8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SRGGB8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SBGGR10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SGBRG10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SGRBG10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SRGGB10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SBGGR12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SGBRG12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SGRBG12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SRGGB12_1X12,
- 12,
- }
+};
+
+static const struct csiphy_format csiphy_formats_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+};
+
+static const struct csiphy_format csiphy_formats_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
};
/*
* csiphy_get_bpp - map media bus format to bits per pixel
+ * @formats: supported media bus formats array
+ * @nformats: size of @formats array
* @code: media bus format code
*
* Return number of bits per pixel
*/
-static u8 csiphy_get_bpp(u32 code)
+static u8 csiphy_get_bpp(const struct csiphy_format *formats,
+ unsigned int nformats, u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(csiphy_formats); i++)
- if (code == csiphy_formats[i].code)
- return csiphy_formats[i].bpp;
+ for (i = 0; i < nformats; i++)
+ if (code == formats[i].code)
+ return formats[i].bpp;
WARN(1, "Unknown format\n");
- return csiphy_formats[0].bpp;
-}
-
-/*
- * csiphy_isr - CSIPHY module interrupt handler
- * @irq: Interrupt line
- * @dev: CSIPHY device
- *
- * Return IRQ_HANDLED on success
- */
-static irqreturn_t csiphy_isr(int irq, void *dev)
-{
- struct csiphy_device *csiphy = dev;
- u8 i;
-
- for (i = 0; i < 8; i++) {
- u8 val = readl_relaxed(csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_STATUSn(i));
- writel_relaxed(val, csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
- writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
- writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
- writel_relaxed(0x0, csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
- }
-
- return IRQ_HANDLED;
+ return formats[0].bpp;
}
/*
@@ -163,7 +100,7 @@ static irqreturn_t csiphy_isr(int irq, void *dev)
*/
static int csiphy_set_clock_rates(struct csiphy_device *csiphy)
{
- struct device *dev = to_device_index(csiphy, csiphy->id);
+ struct device *dev = csiphy->camss->dev;
u32 pixel_clock;
int i, j;
int ret;
@@ -176,8 +113,10 @@ static int csiphy_set_clock_rates(struct csiphy_device *csiphy)
struct camss_clock *clock = &csiphy->clock[i];
if (!strcmp(clock->name, "csiphy0_timer") ||
- !strcmp(clock->name, "csiphy1_timer")) {
- u8 bpp = csiphy_get_bpp(
+ !strcmp(clock->name, "csiphy1_timer") ||
+ !strcmp(clock->name, "csiphy2_timer")) {
+ u8 bpp = csiphy_get_bpp(csiphy->formats,
+ csiphy->nformats,
csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data;
u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4);
@@ -221,17 +160,6 @@ static int csiphy_set_clock_rates(struct csiphy_device *csiphy)
}
/*
- * csiphy_reset - Perform software reset on CSIPHY module
- * @csiphy: CSIPHY device
- */
-static void csiphy_reset(struct csiphy_device *csiphy)
-{
- writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
- usleep_range(5000, 8000);
- writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
-}
-
-/*
* csiphy_set_power - Power on/off CSIPHY module
* @sd: CSIPHY V4L2 subdevice
* @on: Requested power state
@@ -241,31 +169,38 @@ static void csiphy_reset(struct csiphy_device *csiphy)
static int csiphy_set_power(struct v4l2_subdev *sd, int on)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
- struct device *dev = to_device_index(csiphy, csiphy->id);
+ struct device *dev = csiphy->camss->dev;
if (on) {
- u8 hw_version;
int ret;
- ret = csiphy_set_clock_rates(csiphy);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
+ ret = csiphy_set_clock_rates(csiphy);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
ret = camss_enable_clocks(csiphy->nclocks, csiphy->clock, dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
return ret;
+ }
enable_irq(csiphy->irq);
- csiphy_reset(csiphy);
+ csiphy->ops->reset(csiphy);
- hw_version = readl_relaxed(csiphy->base +
- CAMSS_CSI_PHY_HW_VERSION);
- dev_dbg(dev, "CSIPHY HW Version = 0x%02x\n", hw_version);
+ csiphy->ops->hw_version_read(csiphy, dev);
} else {
disable_irq(csiphy->irq);
camss_disable_clocks(csiphy->nclocks, csiphy->clock);
+
+ pm_runtime_put_sync(dev);
}
return 0;
@@ -291,58 +226,6 @@ static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg)
}
/*
- * csiphy_settle_cnt_calc - Calculate settle count value
- * @csiphy: CSIPHY device
- *
- * Helper function to calculate settle count value. This is
- * based on the CSI2 T_hs_settle parameter which in turn
- * is calculated based on the CSI2 transmitter pixel clock
- * frequency.
- *
- * Return settle count value or 0 if the CSI2 pixel clock
- * frequency is not available
- */
-static u8 csiphy_settle_cnt_calc(struct csiphy_device *csiphy)
-{
- u8 bpp = csiphy_get_bpp(
- csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
- u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data;
- u32 pixel_clock; /* Hz */
- u32 mipi_clock; /* Hz */
- u32 ui; /* ps */
- u32 timer_period; /* ps */
- u32 t_hs_prepare_max; /* ps */
- u32 t_hs_prepare_zero_min; /* ps */
- u32 t_hs_settle; /* ps */
- u8 settle_cnt;
- int ret;
-
- ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock);
- if (ret) {
- dev_err(to_device_index(csiphy, csiphy->id),
- "Cannot get CSI2 transmitter's pixel clock\n");
- return 0;
- }
- if (!pixel_clock) {
- dev_err(to_device_index(csiphy, csiphy->id),
- "Got pixel clock == 0, cannot continue\n");
- return 0;
- }
-
- mipi_clock = pixel_clock * bpp / (2 * num_lanes);
- ui = div_u64(1000000000000LL, mipi_clock);
- ui /= 2;
- t_hs_prepare_max = 85000 + 6 * ui;
- t_hs_prepare_zero_min = 145000 + 10 * ui;
- t_hs_settle = (t_hs_prepare_max + t_hs_prepare_zero_min) / 2;
-
- timer_period = div_u64(1000000000000LL, csiphy->timer_clk_rate);
- settle_cnt = t_hs_settle / timer_period;
-
- return settle_cnt;
-}
-
-/*
* csiphy_stream_on - Enable streaming on CSIPHY module
* @csiphy: CSIPHY device
*
@@ -354,14 +237,24 @@ static u8 csiphy_settle_cnt_calc(struct csiphy_device *csiphy)
static int csiphy_stream_on(struct csiphy_device *csiphy)
{
struct csiphy_config *cfg = &csiphy->cfg;
+ u32 pixel_clock;
u8 lane_mask = csiphy_get_lane_mask(&cfg->csi2->lane_cfg);
- u8 settle_cnt;
+ u8 bpp = csiphy_get_bpp(csiphy->formats, csiphy->nformats,
+ csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
u8 val;
- int i = 0;
+ int ret;
- settle_cnt = csiphy_settle_cnt_calc(csiphy);
- if (!settle_cnt)
+ ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock);
+ if (ret) {
+ dev_err(csiphy->camss->dev,
+ "Cannot get CSI2 transmitter's pixel clock\n");
return -EINVAL;
+ }
+ if (!pixel_clock) {
+ dev_err(csiphy->camss->dev,
+ "Got pixel clock == 0, cannot continue\n");
+ return -EINVAL;
+ }
val = readl_relaxed(csiphy->base_clk_mux);
if (cfg->combo_mode && (lane_mask & 0x18) == 0x18) {
@@ -372,34 +265,9 @@ static int csiphy_stream_on(struct csiphy_device *csiphy)
val |= cfg->csid_id;
}
writel_relaxed(val, csiphy->base_clk_mux);
+ wmb();
- writel_relaxed(0x1, csiphy->base +
- CAMSS_CSI_PHY_GLBL_T_INIT_CFG0);
- writel_relaxed(0x1, csiphy->base +
- CAMSS_CSI_PHY_T_WAKEUP_CFG0);
-
- val = 0x1;
- val |= lane_mask << 1;
- writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
-
- val = cfg->combo_mode << 4;
- writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
-
- while (lane_mask) {
- if (lane_mask & 0x1) {
- writel_relaxed(0x10, csiphy->base +
- CAMSS_CSI_PHY_LNn_CFG2(i));
- writel_relaxed(settle_cnt, csiphy->base +
- CAMSS_CSI_PHY_LNn_CFG3(i));
- writel_relaxed(0x3f, csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_MASKn(i));
- writel_relaxed(0x3f, csiphy->base +
- CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
- }
-
- lane_mask >>= 1;
- i++;
- }
+ csiphy->ops->lanes_enable(csiphy, cfg, pixel_clock, bpp, lane_mask);
return 0;
}
@@ -412,19 +280,7 @@ static int csiphy_stream_on(struct csiphy_device *csiphy)
*/
static void csiphy_stream_off(struct csiphy_device *csiphy)
{
- u8 lane_mask = csiphy_get_lane_mask(&csiphy->cfg.csi2->lane_cfg);
- int i = 0;
-
- while (lane_mask) {
- if (lane_mask & 0x1)
- writel_relaxed(0x0, csiphy->base +
- CAMSS_CSI_PHY_LNn_CFG2(i));
-
- lane_mask >>= 1;
- i++;
- }
-
- writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
+ csiphy->ops->lanes_disable(csiphy, &csiphy->cfg);
}
@@ -489,12 +345,12 @@ static void csiphy_try_format(struct csiphy_device *csiphy,
case MSM_CSIPHY_PAD_SINK:
/* Set format on sink pad */
- for (i = 0; i < ARRAY_SIZE(csiphy_formats); i++)
- if (fmt->code == csiphy_formats[i].code)
+ for (i = 0; i < csiphy->nformats; i++)
+ if (fmt->code == csiphy->formats[i].code)
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(csiphy_formats))
+ if (i >= csiphy->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -530,10 +386,10 @@ static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_CSIPHY_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(csiphy_formats))
+ if (code->index >= csiphy->nformats)
return -EINVAL;
- code->code = csiphy_formats[code->index].code;
+ code->code = csiphy->formats[code->index].code;
} else {
if (code->index > 0)
return -EINVAL;
@@ -677,18 +533,32 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
*
* Return 0 on success or a negative error code otherwise
*/
-int msm_csiphy_subdev_init(struct csiphy_device *csiphy,
+int msm_csiphy_subdev_init(struct camss *camss,
+ struct csiphy_device *csiphy,
const struct resources *res, u8 id)
{
- struct device *dev = to_device_index(csiphy, id);
+ struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *r;
int i, j;
int ret;
+ csiphy->camss = camss;
csiphy->id = id;
csiphy->cfg.combo_mode = 0;
+ if (camss->version == CAMSS_8x16) {
+ csiphy->ops = &csiphy_ops_2ph_1_0;
+ csiphy->formats = csiphy_formats_8x16;
+ csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x16);
+ } else if (camss->version == CAMSS_8x96) {
+ csiphy->ops = &csiphy_ops_3ph_1_0;
+ csiphy->formats = csiphy_formats_8x96;
+ csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x96);
+ } else {
+ return -EINVAL;
+ }
+
/* Memory */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
@@ -717,7 +587,8 @@ int msm_csiphy_subdev_init(struct csiphy_device *csiphy,
csiphy->irq = r->start;
snprintf(csiphy->irq_name, sizeof(csiphy->irq_name), "%s_%s%d",
dev_name(dev), MSM_CSIPHY_NAME, csiphy->id);
- ret = devm_request_irq(dev, csiphy->irq, csiphy_isr,
+
+ ret = devm_request_irq(dev, csiphy->irq, csiphy->ops->isr,
IRQF_TRIGGER_RISING, csiphy->irq_name, csiphy);
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
@@ -846,7 +717,7 @@ int msm_csiphy_register_entity(struct csiphy_device *csiphy,
{
struct v4l2_subdev *sd = &csiphy->subdev;
struct media_pad *pads = csiphy->pads;
- struct device *dev = to_device_index(csiphy, csiphy->id);
+ struct device *dev = csiphy->camss->dev;
int ret;
v4l2_subdev_init(sd, &csiphy_v4l2_ops);
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index ba8781122065..376f865ad383 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -1,24 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss-csiphy.h
*
* Qualcomm MSM Camera Subsystem - CSIPHY Module
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2016-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_CSIPHY_H
#define QC_MSM_CAMSS_CSIPHY_H
#include <linux/clk.h>
+#include <linux/interrupt.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
@@ -49,7 +42,22 @@ struct csiphy_config {
struct csiphy_csi2_cfg *csi2;
};
+struct csiphy_device;
+
+struct csiphy_hw_ops {
+ void (*hw_version_read)(struct csiphy_device *csiphy,
+ struct device *dev);
+ void (*reset)(struct csiphy_device *csiphy);
+ void (*lanes_enable)(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg,
+ u32 pixel_clock, u8 bpp, u8 lane_mask);
+ void (*lanes_disable)(struct csiphy_device *csiphy,
+ struct csiphy_config *cfg);
+ irqreturn_t (*isr)(int irq, void *dev);
+};
+
struct csiphy_device {
+ struct camss *camss;
u8 id;
struct v4l2_subdev subdev;
struct media_pad pads[MSM_CSIPHY_PADS_NUM];
@@ -62,11 +70,15 @@ struct csiphy_device {
u32 timer_clk_rate;
struct csiphy_config cfg;
struct v4l2_mbus_framefmt fmt[MSM_CSIPHY_PADS_NUM];
+ const struct csiphy_hw_ops *ops;
+ const struct csiphy_format *formats;
+ unsigned int nformats;
};
struct resources;
-int msm_csiphy_subdev_init(struct csiphy_device *csiphy,
+int msm_csiphy_subdev_init(struct camss *camss,
+ struct csiphy_device *csiphy,
const struct resources *res, u8 id);
int msm_csiphy_register_entity(struct csiphy_device *csiphy,
@@ -74,4 +86,7 @@ int msm_csiphy_register_entity(struct csiphy_device *csiphy,
void msm_csiphy_unregister_entity(struct csiphy_device *csiphy);
+extern const struct csiphy_hw_ops csiphy_ops_2ph_1_0;
+extern const struct csiphy_hw_ops csiphy_ops_3ph_1_0;
+
#endif /* QC_MSM_CAMSS_CSIPHY_H */
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index 9d1af9353c1d..7f269021d08c 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-ispif.c
*
* Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/completion.h>
@@ -22,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
@@ -31,12 +24,6 @@
#define MSM_ISPIF_NAME "msm_ispif"
-#define ispif_line_array(ptr_line) \
- ((const struct ispif_line (*)[]) &(ptr_line[-(ptr_line->id)]))
-
-#define to_ispif(ptr_line) \
- container_of(ispif_line_array(ptr_line), struct ispif_device, ptr_line)
-
#define ISPIF_RST_CMD_0 0x008
#define ISPIF_RST_CMD_0_STROBED_RST_EN (1 << 0)
#define ISPIF_RST_CMD_0_MISC_LOGIC_RST (1 << 1)
@@ -89,6 +76,13 @@
(0x254 + 0x200 * (m) + 0x4 * (n))
#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) \
(0x264 + 0x200 * (m) + 0x4 * (n))
+/* PACK_CFG registers are 8x96 only */
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(m, n) \
+ (0x270 + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(m, n) \
+ (0x27c + 0x200 * (m) + 0x4 * (n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(c) \
+ (1 << ((cid % 8) * 4))
#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) \
(0x2c0 + 0x200 * (m) + 0x4 * (n))
#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) \
@@ -109,7 +103,7 @@ enum ispif_intf_cmd {
CMD_ALL_NO_CHANGE = 0xffffffff,
};
-static const u32 ispif_formats[] = {
+static const u32 ispif_formats_8x16[] = {
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_YUYV8_2X8,
@@ -126,16 +120,107 @@ static const u32 ispif_formats[] = {
MEDIA_BUS_FMT_SGBRG12_1X12,
MEDIA_BUS_FMT_SGRBG12_1X12,
MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_Y10_1X10,
+};
+
+static const u32 ispif_formats_8x96[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
};
/*
- * ispif_isr - ISPIF module interrupt handler
+ * ispif_isr_8x96 - ISPIF module interrupt handler for 8x96
* @irq: Interrupt line
* @dev: ISPIF device
*
* Return IRQ_HANDLED on success
*/
-static irqreturn_t ispif_isr(int irq, void *dev)
+static irqreturn_t ispif_isr_8x96(int irq, void *dev)
+{
+ struct ispif_device *ispif = dev;
+ u32 value0, value1, value2, value3, value4, value5;
+
+ value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0));
+ value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0));
+ value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0));
+ value3 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(1));
+ value4 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(1));
+ value5 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(1));
+
+ writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0));
+ writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0));
+ writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0));
+ writel_relaxed(value3, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(1));
+ writel_relaxed(value4, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(1));
+ writel_relaxed(value5, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(1));
+
+ writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
+
+ if ((value0 >> 27) & 0x1)
+ complete(&ispif->reset_complete);
+
+ if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 pix0 overflow\n");
+
+ if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi0 overflow\n");
+
+ if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 pix1 overflow\n");
+
+ if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi1 overflow\n");
+
+ if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE0 rdi2 overflow\n");
+
+ if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 pix0 overflow\n");
+
+ if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 rdi0 overflow\n");
+
+ if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 pix1 overflow\n");
+
+ if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 rdi1 overflow\n");
+
+ if (unlikely(value5 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
+ dev_err_ratelimited(to_device(ispif), "VFE1 rdi2 overflow\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ispif_isr_8x16 - ISPIF module interrupt handler for 8x16
+ * @irq: Interrupt line
+ * @dev: ISPIF device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t ispif_isr_8x16(int irq, void *dev)
{
struct ispif_device *ispif = dev;
u32 value0, value1, value2;
@@ -183,6 +268,14 @@ static int ispif_reset(struct ispif_device *ispif)
u32 val;
int ret;
+ ret = camss_pm_domain_on(to_camss(ispif), PM_DOMAIN_VFE0);
+ if (ret < 0)
+ return ret;
+
+ ret = camss_pm_domain_on(to_camss(ispif), PM_DOMAIN_VFE1);
+ if (ret < 0)
+ return ret;
+
ret = camss_enable_clocks(ispif->nclocks_for_reset,
ispif->clock_for_reset,
to_device(ispif));
@@ -215,12 +308,15 @@ static int ispif_reset(struct ispif_device *ispif)
msecs_to_jiffies(ISPIF_RESET_TIMEOUT_MS));
if (!time) {
dev_err(to_device(ispif), "ISPIF reset timeout\n");
- return -EIO;
+ ret = -EIO;
}
camss_disable_clocks(ispif->nclocks_for_reset, ispif->clock_for_reset);
- return 0;
+ camss_pm_domain_off(to_camss(ispif), PM_DOMAIN_VFE0);
+ camss_pm_domain_off(to_camss(ispif), PM_DOMAIN_VFE1);
+
+ return ret;
}
/*
@@ -233,7 +329,7 @@ static int ispif_reset(struct ispif_device *ispif)
static int ispif_set_power(struct v4l2_subdev *sd, int on)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
- struct ispif_device *ispif = to_ispif(line);
+ struct ispif_device *ispif = line->ispif;
struct device *dev = to_device(ispif);
int ret = 0;
@@ -246,12 +342,19 @@ static int ispif_set_power(struct v4l2_subdev *sd, int on)
goto exit;
}
- ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto exit;
+ ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev);
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
+ goto exit;
+ }
+
ret = ispif_reset(ispif);
if (ret < 0) {
+ pm_runtime_put_sync(dev);
camss_disable_clocks(ispif->nclocks, ispif->clock);
goto exit;
}
@@ -266,6 +369,7 @@ static int ispif_set_power(struct v4l2_subdev *sd, int on)
goto exit;
} else if (ispif->power_count == 1) {
camss_disable_clocks(ispif->nclocks, ispif->clock);
+ pm_runtime_put_sync(dev);
}
ispif->power_count--;
@@ -578,6 +682,55 @@ static void ispif_config_irq(struct ispif_device *ispif, enum ispif_intf intf,
}
/*
+ * ispif_config_pack - Config packing for PRDI mode
+ * @ispif: ISPIF device
+ * @code: media bus format code
+ * @intf: VFE interface
+ * @cid: desired CID to handle
+ * @vfe: VFE HW module id
+ * @enable: enable or disable
+ */
+static void ispif_config_pack(struct ispif_device *ispif, u32 code,
+ enum ispif_intf intf, u8 cid, u8 vfe, u8 enable)
+{
+ u32 addr, val;
+
+ if (code != MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE &&
+ code != MEDIA_BUS_FMT_Y10_2X8_PADHI_LE)
+ return;
+
+ switch (intf) {
+ case RDI0:
+ if (cid < 8)
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 0);
+ else
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 0);
+ break;
+ case RDI1:
+ if (cid < 8)
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 1);
+ else
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 1);
+ break;
+ case RDI2:
+ if (cid < 8)
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 2);
+ else
+ addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 2);
+ break;
+ default:
+ return;
+ }
+
+ if (enable)
+ val = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(cid);
+ else
+ val = 0;
+
+ writel_relaxed(val, ispif->base + addr);
+}
+
+/*
* ispif_set_intf_cmd - Set command to enable/disable interface
* @ispif: ISPIF device
* @cmd: interface command
@@ -619,7 +772,7 @@ static void ispif_set_intf_cmd(struct ispif_device *ispif, u8 cmd,
static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
- struct ispif_device *ispif = to_ispif(line);
+ struct ispif_device *ispif = line->ispif;
enum ispif_intf intf = line->interface;
u8 csid = line->csid_id;
u8 vfe = line->vfe_id;
@@ -645,6 +798,10 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
ispif_select_csid(ispif, intf, csid, vfe, 1);
ispif_select_cid(ispif, intf, cid, vfe, 1);
ispif_config_irq(ispif, intf, vfe, 1);
+ if (to_camss(ispif)->version == CAMSS_8x96)
+ ispif_config_pack(ispif,
+ line->fmt[MSM_ISPIF_PAD_SINK].code,
+ intf, cid, vfe, 1);
ispif_set_intf_cmd(ispif, CMD_ENABLE_FRAME_BOUNDARY,
intf, vfe, vc);
} else {
@@ -658,6 +815,10 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
mutex_lock(&ispif->config_lock);
+ if (to_camss(ispif)->version == CAMSS_8x96)
+ ispif_config_pack(ispif,
+ line->fmt[MSM_ISPIF_PAD_SINK].code,
+ intf, cid, vfe, 0);
ispif_config_irq(ispif, intf, vfe, 0);
ispif_select_cid(ispif, intf, cid, vfe, 0);
ispif_select_csid(ispif, intf, csid, vfe, 0);
@@ -710,12 +871,12 @@ static void ispif_try_format(struct ispif_line *line,
case MSM_ISPIF_PAD_SINK:
/* Set format on sink pad */
- for (i = 0; i < ARRAY_SIZE(ispif_formats); i++)
- if (fmt->code == ispif_formats[i])
+ for (i = 0; i < line->nformats; i++)
+ if (fmt->code == line->formats[i])
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(ispif_formats))
+ if (i >= line->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -753,10 +914,10 @@ static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_ISPIF_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(ispif_formats))
+ if (code->index >= line->nformats)
return -EINVAL;
- code->code = ispif_formats[code->index];
+ code->code = line->formats[code->index];
} else {
if (code->index > 0)
return -EINVAL;
@@ -907,6 +1068,36 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
int i;
int ret;
+ /* Number of ISPIF lines - same as number of CSID hardware modules */
+ if (to_camss(ispif)->version == CAMSS_8x16)
+ ispif->line_num = 2;
+ else if (to_camss(ispif)->version == CAMSS_8x96)
+ ispif->line_num = 4;
+ else
+ return -EINVAL;
+
+ ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line),
+ GFP_KERNEL);
+ if (!ispif->line)
+ return -ENOMEM;
+
+ for (i = 0; i < ispif->line_num; i++) {
+ ispif->line[i].ispif = ispif;
+ ispif->line[i].id = i;
+
+ if (to_camss(ispif)->version == CAMSS_8x16) {
+ ispif->line[i].formats = ispif_formats_8x16;
+ ispif->line[i].nformats =
+ ARRAY_SIZE(ispif_formats_8x16);
+ } else if (to_camss(ispif)->version == CAMSS_8x96) {
+ ispif->line[i].formats = ispif_formats_8x96;
+ ispif->line[i].nformats =
+ ARRAY_SIZE(ispif_formats_8x96);
+ } else {
+ return -EINVAL;
+ }
+ }
+
/* Memory */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
@@ -935,8 +1126,14 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
ispif->irq = r->start;
snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s",
dev_name(dev), MSM_ISPIF_NAME);
- ret = devm_request_irq(dev, ispif->irq, ispif_isr,
+ if (to_camss(ispif)->version == CAMSS_8x16)
+ ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x16,
+ IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
+ else if (to_camss(ispif)->version == CAMSS_8x96)
+ ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x96,
IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
+ else
+ ret = -EINVAL;
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
return ret;
@@ -987,9 +1184,6 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
clock->nfreqs = 0;
}
- for (i = 0; i < ARRAY_SIZE(ispif->line); i++)
- ispif->line[i].id = i;
-
mutex_init(&ispif->power_lock);
ispif->power_count = 0;
@@ -1108,7 +1302,7 @@ int msm_ispif_register_entities(struct ispif_device *ispif,
int ret;
int i;
- for (i = 0; i < ARRAY_SIZE(ispif->line); i++) {
+ for (i = 0; i < ispif->line_num; i++) {
struct v4l2_subdev *sd = &ispif->line[i].subdev;
struct media_pad *pads = ispif->line[i].pads;
@@ -1169,7 +1363,7 @@ void msm_ispif_unregister_entities(struct ispif_device *ispif)
mutex_destroy(&ispif->power_lock);
mutex_destroy(&ispif->config_lock);
- for (i = 0; i < ARRAY_SIZE(ispif->line); i++) {
+ for (i = 0; i < ispif->line_num; i++) {
struct v4l2_subdev *sd = &ispif->line[i].subdev;
v4l2_device_unregister_subdev(sd);
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-ispif.h b/drivers/media/platform/qcom/camss/camss-ispif.h
index f668306020c3..1a5ba2425a42 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-ispif.h
+++ b/drivers/media/platform/qcom/camss/camss-ispif.h
@@ -1,19 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss-ispif.h
*
* Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module
*
* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_ISPIF_H
#define QC_MSM_CAMSS_ISPIF_H
@@ -23,14 +15,11 @@
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
-/* Number of ISPIF lines - same as number of CSID hardware modules */
-#define MSM_ISPIF_LINE_NUM 2
-
#define MSM_ISPIF_PAD_SINK 0
#define MSM_ISPIF_PAD_SRC 1
#define MSM_ISPIF_PADS_NUM 2
-#define MSM_ISPIF_VFE_NUM 1
+#define MSM_ISPIF_VFE_NUM 2
enum ispif_intf {
PIX0,
@@ -46,6 +35,7 @@ struct ispif_intf_cmd_reg {
};
struct ispif_line {
+ struct ispif_device *ispif;
u8 id;
u8 csid_id;
u8 vfe_id;
@@ -53,6 +43,8 @@ struct ispif_line {
struct v4l2_subdev subdev;
struct media_pad pads[MSM_ISPIF_PADS_NUM];
struct v4l2_mbus_framefmt fmt[MSM_ISPIF_PADS_NUM];
+ const u32 *formats;
+ unsigned int nformats;
};
struct ispif_device {
@@ -69,7 +61,8 @@ struct ispif_device {
struct mutex power_lock;
struct ispif_intf_cmd_reg intf_cmd[MSM_ISPIF_VFE_NUM];
struct mutex config_lock;
- struct ispif_line line[MSM_ISPIF_LINE_NUM];
+ unsigned int line_num;
+ struct ispif_line *line;
};
struct resources_ispif;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
new file mode 100644
index 000000000000..da3a9fed9f2d
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -0,0 +1,1018 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe-4-1.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.1
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+
+#include "camss-vfe.h"
+
+#define VFE_0_HW_VERSION 0x000
+
+#define VFE_0_GLOBAL_RESET_CMD 0x00c
+#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
+#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
+#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3)
+#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4)
+#define VFE_0_GLOBAL_RESET_CMD_TIMER BIT(5)
+#define VFE_0_GLOBAL_RESET_CMD_PM BIT(6)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(7)
+#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(8)
+
+#define VFE_0_MODULE_CFG 0x018
+#define VFE_0_MODULE_CFG_DEMUX BIT(2)
+#define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE BIT(3)
+#define VFE_0_MODULE_CFG_SCALE_ENC BIT(23)
+#define VFE_0_MODULE_CFG_CROP_ENC BIT(27)
+
+#define VFE_0_CORE_CFG 0x01c
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
+
+#define VFE_0_IRQ_CMD 0x024
+#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0)
+
+#define VFE_0_IRQ_MASK_0 0x028
+#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1)
+#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_MASK_1 0x02c
+#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0)
+#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
+#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_CLEAR_0 0x030
+#define VFE_0_IRQ_CLEAR_1 0x034
+
+#define VFE_0_IRQ_STATUS_0 0x038
+#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_STATUS_1 0x03c
+#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_COMPOSITE_MASK_0 0x40
+#define VFE_0_VIOLATION_STATUS 0x48
+
+#define VFE_0_BUS_CMD 0x4c
+#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x)
+
+#define VFE_0_BUS_CFG 0x050
+
+#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2))
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(1)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2)
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
+ (0x088 + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
+ (0x08c + 0x24 * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
+
+#define VFE_0_BUS_PING_PONG_STATUS 0x268
+
+#define VFE_0_BUS_BDG_CMD 0x2c0
+#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
+
+#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
+#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
+#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
+#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
+#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
+#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
+#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
+#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
+#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
+#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5
+
+#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
+#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2)
+#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
+#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) BIT(16 + (r))
+
+#define VFE_0_CAMIF_CMD 0x2f4
+#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
+#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
+#define VFE_0_CAMIF_CMD_NO_CHANGE 3
+#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2)
+#define VFE_0_CAMIF_CFG 0x2f8
+#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6)
+#define VFE_0_CAMIF_FRAME_CFG 0x300
+#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304
+#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308
+#define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c
+#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314
+#define VFE_0_CAMIF_STATUS 0x31c
+#define VFE_0_CAMIF_STATUS_HALT BIT(31)
+
+#define VFE_0_REG_UPDATE 0x378
+#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n))
+#define VFE_0_REG_UPDATE_line_n(n) \
+ ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
+
+#define VFE_0_DEMUX_CFG 0x424
+#define VFE_0_DEMUX_CFG_PERIOD 0x3
+#define VFE_0_DEMUX_GAIN_0 0x428
+#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
+#define VFE_0_DEMUX_GAIN_1 0x42c
+#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
+#define VFE_0_DEMUX_EVEN_CFG 0x438
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
+#define VFE_0_DEMUX_ODD_CFG 0x43c
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
+
+#define VFE_0_SCALE_ENC_Y_CFG 0x75c
+#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760
+#define VFE_0_SCALE_ENC_Y_H_PHASE 0x764
+#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c
+#define VFE_0_SCALE_ENC_Y_V_PHASE 0x770
+#define VFE_0_SCALE_ENC_CBCR_CFG 0x778
+#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c
+#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780
+#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790
+#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794
+
+#define VFE_0_CROP_ENC_Y_WIDTH 0x854
+#define VFE_0_CROP_ENC_Y_HEIGHT 0x858
+#define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c
+#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860
+
+#define VFE_0_CLAMP_ENC_MAX_CFG 0x874
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
+#define VFE_0_CLAMP_ENC_MIN_CFG 0x878
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
+
+#define VFE_0_CGC_OVERRIDE_1 0x974
+#define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) BIT(x)
+
+#define CAMIF_TIMEOUT_SLEEP_US 1000
+#define CAMIF_TIMEOUT_ALL_US 1000000
+
+#define MSM_VFE_VFE0_UB_SIZE 1023
+#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
+
+static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+{
+ u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
+
+ dev_dbg(dev, "VFE HW Version = 0x%08x\n", hw_version);
+}
+
+static u16 vfe_get_ub_size(u8 vfe_id)
+{
+ if (vfe_id == 0)
+ return MSM_VFE_VFE0_UB_SIZE_RDI;
+
+ return 0;
+}
+
+static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits & ~clr_bits, vfe->base + reg);
+}
+
+static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits | set_bits, vfe->base + reg);
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
+ VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
+ VFE_0_GLOBAL_RESET_CMD_PM |
+ VFE_0_GLOBAL_RESET_CMD_TIMER |
+ VFE_0_GLOBAL_RESET_CMD_REGISTER |
+ VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
+ VFE_0_GLOBAL_RESET_CMD_BUS |
+ VFE_0_GLOBAL_RESET_CMD_CAMIF |
+ VFE_0_GLOBAL_RESET_CMD_CORE;
+
+ writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
+}
+
+static void vfe_halt_request(struct vfe_device *vfe)
+{
+ writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
+ vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_halt_clear(struct vfe_device *vfe)
+{
+ writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+}
+
+static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
+}
+
+#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
+
+static int vfe_word_per_line(u32 format, u32 pixel_per_line)
+{
+ int val = 0;
+
+ switch (format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ val = CALC_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CALC_WORD(pixel_per_line, 2, 8);
+ break;
+ }
+
+ return val;
+}
+
+static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
+ u16 *width, u16 *height, u16 *bytesperline)
+{
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ if (plane == 1)
+ *height /= 2;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ break;
+ }
+}
+
+static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
+ struct v4l2_pix_format_mplane *pix,
+ u8 plane, u32 enable)
+{
+ u32 reg;
+
+ if (enable) {
+ u16 width = 0, height = 0, bytesperline = 0, wpl;
+
+ vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
+
+ wpl = vfe_word_per_line(pix->pixelformat, width);
+
+ reg = height - 1;
+ reg |= ((wpl + 1) / 2 - 1) << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+
+ wpl = vfe_word_per_line(pix->pixelformat, bytesperline);
+
+ reg = 0x3;
+ reg |= (height - 1) << 4;
+ reg |= wpl << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ } else {
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ }
+}
+
+static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+
+ reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
+
+ reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
+ & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
+
+ writel_relaxed(reg,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+}
+
+static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
+ u32 pattern)
+{
+ writel_relaxed(pattern,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
+}
+
+static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm,
+ u16 offset, u16 depth)
+{
+ u32 reg;
+
+ reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
+ depth;
+ writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
+}
+
+static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
+{
+ wmb();
+ writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
+ wmb();
+}
+
+static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
+}
+
+static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
+}
+
+static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
+
+ return (reg >> wm) & 0x1;
+}
+
+static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
+{
+ if (enable)
+ writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG);
+ else
+ writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
+}
+
+static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
+ VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
+{
+ writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
+ vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
+}
+
+static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
+ u8 enable)
+{
+ struct vfe_line *line = container_of(output, struct vfe_line, output);
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ unsigned int i;
+
+ for (i = 0; i < output->wm_num; i++) {
+ if (i == 0) {
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ } else if (i == 1) {
+ reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+ } else {
+ /* On current devices output->wm_num is always <= 2 */
+ break;
+ }
+
+ if (output->wm_idx[i] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
+ reg);
+ }
+}
+
+static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line,
+ u8 enable)
+{
+ /* empty */
+}
+static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
+{
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
+ VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
+
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
+ cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
+ wmb();
+ writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
+ wmb();
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
+}
+
+static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id line_id, u8 enable)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
+ VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
+ VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ }
+}
+
+static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
+ enum vfe_line_id line_id, u8 enable)
+{
+ struct vfe_output *output = &vfe->line[line_id].output;
+ unsigned int i;
+ u32 irq_en0;
+ u32 irq_en1;
+ u32 comp_mask = 0;
+
+ irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
+ irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
+ for (i = 0; i < output->wm_num; i++) {
+ irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
+ output->wm_idx[i]);
+ comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
+ }
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ }
+}
+
+static void vfe_enable_irq_common(struct vfe_device *vfe)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
+ VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
+
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+}
+
+static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val, even_cfg, odd_cfg;
+
+ writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
+
+ val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
+
+ val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
+ break;
+ }
+
+ writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
+ writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
+}
+
+static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
+{
+ if (input / output >= 16)
+ return 0;
+
+ if (input / output >= 8)
+ return 1;
+
+ if (input / output >= 4)
+ return 2;
+
+ return 3;
+}
+
+static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 input, output;
+ u8 interp_reso;
+ u32 phase_mult;
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width;
+ output = line->compose.width;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height;
+ output = line->compose.height;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width;
+ output = line->compose.width / 2;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height;
+ output = line->compose.height;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
+ output = line->compose.height / 2;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (13 + interp_reso)) / output;
+ reg = (interp_reso << 20) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
+}
+
+static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 first, last;
+
+ first = line->crop.left;
+ last = line->crop.left + line->crop.width - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
+
+ first = line->crop.left / 2;
+ last = line->crop.left / 2 + line->crop.width / 2 - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
+ first = line->crop.top / 2;
+ last = line->crop.top / 2 + line->crop.height / 2 - 1;
+ }
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
+}
+
+static void vfe_set_clamp_cfg(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
+
+ val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
+}
+
+static void vfe_set_qos(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
+ u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
+
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
+ writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static void vfe_set_ds(struct vfe_device *vfe)
+{
+ /* empty */
+}
+
+static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm);
+
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val);
+ else
+ vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val);
+
+ wmb();
+}
+
+static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val;
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
+ break;
+ }
+
+ writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2;
+ val |= line->fmt[MSM_VFE_PAD_SINK].height << 16;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
+
+ val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
+
+ val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
+}
+
+static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable)
+{
+ u32 cmd;
+
+ cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE;
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+ wmb();
+
+ if (enable)
+ cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY;
+ else
+ cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY;
+
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+}
+
+static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
+{
+ u32 val = VFE_0_MODULE_CFG_DEMUX |
+ VFE_0_MODULE_CFG_CHROMA_UPSAMPLE |
+ VFE_0_MODULE_CFG_SCALE_ENC |
+ VFE_0_MODULE_CFG_CROP_ENC;
+
+ if (enable)
+ writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG);
+ else
+ writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG);
+}
+
+static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
+ val,
+ (val & VFE_0_CAMIF_STATUS_HALT),
+ CAMIF_TIMEOUT_SLEEP_US,
+ CAMIF_TIMEOUT_ALL_US);
+ if (ret < 0)
+ dev_err(dev, "%s: camif stop timeout\n", __func__);
+
+ return ret;
+}
+
+static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
+{
+ *value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
+ *value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
+
+ writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0);
+ writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1);
+
+ wmb();
+ writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
+}
+
+static void vfe_violation_read(struct vfe_device *vfe)
+{
+ u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
+
+ pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
+}
+
+/*
+ * vfe_isr - ISPIF module interrupt handler
+ * @irq: Interrupt line
+ * @dev: VFE device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 value0, value1;
+ int i, j;
+
+ vfe->ops->isr_read(vfe, &value0, &value1);
+
+ trace_printk("VFE: status0 = 0x%08x, status1 = 0x%08x\n",
+ value0, value1);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
+ vfe->isr_ops.reset_ack(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION)
+ vfe->ops->violation_read(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
+ vfe->isr_ops.halt_ack(vfe);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
+ vfe->isr_ops.reg_update(vfe, i);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
+ vfe->isr_ops.sof(vfe, VFE_LINE_PIX);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+ if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
+ vfe->isr_ops.sof(vfe, i);
+
+ for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
+ vfe->isr_ops.comp_done(vfe, i);
+ for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
+ if (vfe->wm_output_map[j] == VFE_LINE_PIX)
+ value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
+ }
+
+ for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
+ vfe->isr_ops.wm_done(vfe, i);
+
+ return IRQ_HANDLED;
+}
+
+const struct vfe_hw_ops vfe_ops_4_1 = {
+ .hw_version_read = vfe_hw_version_read,
+ .get_ub_size = vfe_get_ub_size,
+ .global_reset = vfe_global_reset,
+ .halt_request = vfe_halt_request,
+ .halt_clear = vfe_halt_clear,
+ .wm_enable = vfe_wm_enable,
+ .wm_frame_based = vfe_wm_frame_based,
+ .wm_line_based = vfe_wm_line_based,
+ .wm_set_framedrop_period = vfe_wm_set_framedrop_period,
+ .wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern,
+ .wm_set_ub_cfg = vfe_wm_set_ub_cfg,
+ .bus_reload_wm = vfe_bus_reload_wm,
+ .wm_set_ping_addr = vfe_wm_set_ping_addr,
+ .wm_set_pong_addr = vfe_wm_set_pong_addr,
+ .wm_get_ping_pong_status = vfe_wm_get_ping_pong_status,
+ .bus_enable_wr_if = vfe_bus_enable_wr_if,
+ .bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi,
+ .wm_set_subsample = vfe_wm_set_subsample,
+ .bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi,
+ .set_xbar_cfg = vfe_set_xbar_cfg,
+ .set_realign_cfg = vfe_set_realign_cfg,
+ .set_rdi_cid = vfe_set_rdi_cid,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+ .enable_irq_wm_line = vfe_enable_irq_wm_line,
+ .enable_irq_pix_line = vfe_enable_irq_pix_line,
+ .enable_irq_common = vfe_enable_irq_common,
+ .set_demux_cfg = vfe_set_demux_cfg,
+ .set_scale_cfg = vfe_set_scale_cfg,
+ .set_crop_cfg = vfe_set_crop_cfg,
+ .set_clamp_cfg = vfe_set_clamp_cfg,
+ .set_qos = vfe_set_qos,
+ .set_ds = vfe_set_ds,
+ .set_cgc_override = vfe_set_cgc_override,
+ .set_camif_cfg = vfe_set_camif_cfg,
+ .set_camif_cmd = vfe_set_camif_cmd,
+ .set_module_cfg = vfe_set_module_cfg,
+ .camif_wait_for_stop = vfe_camif_wait_for_stop,
+ .isr_read = vfe_isr_read,
+ .violation_read = vfe_violation_read,
+ .isr = vfe_isr,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
new file mode 100644
index 000000000000..4c584bffd179
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe-4-7.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.7
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+
+#include "camss-vfe.h"
+
+#define VFE_0_HW_VERSION 0x000
+
+#define VFE_0_GLOBAL_RESET_CMD 0x018
+#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
+#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
+#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3)
+#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4)
+#define VFE_0_GLOBAL_RESET_CMD_PM BIT(5)
+#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(6)
+#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(7)
+#define VFE_0_GLOBAL_RESET_CMD_DSP BIT(8)
+#define VFE_0_GLOBAL_RESET_CMD_IDLE_CGC BIT(9)
+
+#define VFE_0_MODULE_LENS_EN 0x040
+#define VFE_0_MODULE_LENS_EN_DEMUX BIT(2)
+#define VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE BIT(3)
+
+#define VFE_0_MODULE_ZOOM_EN 0x04c
+#define VFE_0_MODULE_ZOOM_EN_SCALE_ENC BIT(1)
+#define VFE_0_MODULE_ZOOM_EN_CROP_ENC BIT(2)
+#define VFE_0_MODULE_ZOOM_EN_REALIGN_BUF BIT(9)
+
+#define VFE_0_CORE_CFG 0x050
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
+#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
+#define VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN BIT(4)
+
+#define VFE_0_IRQ_CMD 0x058
+#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0)
+
+#define VFE_0_IRQ_MASK_0 0x05c
+#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1)
+#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_MASK_1 0x060
+#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0)
+#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
+#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_CLEAR_0 0x064
+#define VFE_0_IRQ_CLEAR_1 0x068
+
+#define VFE_0_IRQ_STATUS_0 0x06c
+#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0)
+#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
+#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
+ ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
+#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
+#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
+#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31)
+#define VFE_0_IRQ_STATUS_1 0x070
+#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7)
+#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8)
+#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29)
+
+#define VFE_0_IRQ_COMPOSITE_MASK_0 0x074
+#define VFE_0_VIOLATION_STATUS 0x07c
+
+#define VFE_0_BUS_CMD 0x80
+#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x)
+
+#define VFE_0_BUS_CFG 0x084
+
+#define VFE_0_BUS_XBAR_CFG_x(x) (0x90 + 0x4 * ((x) / 2))
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(2)
+#define VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN BIT(3)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTRA (0x1 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER (0x2 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0x0
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 0xc
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 0xd
+#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 0xe
+
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x0a0 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x0a4 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x0ac + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x0b4 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT 1
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2)
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x0b8 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x0bc + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x0c0 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
+ (0x0c4 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
+ (0x0c8 + 0x2c * (n))
+#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
+
+#define VFE_0_BUS_PING_PONG_STATUS 0x338
+
+#define VFE_0_BUS_BDG_CMD 0x400
+#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
+
+#define VFE_0_BUS_BDG_QOS_CFG_0 0x404
+#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa9aaa9
+#define VFE_0_BUS_BDG_QOS_CFG_1 0x408
+#define VFE_0_BUS_BDG_QOS_CFG_2 0x40c
+#define VFE_0_BUS_BDG_QOS_CFG_3 0x410
+#define VFE_0_BUS_BDG_QOS_CFG_4 0x414
+#define VFE_0_BUS_BDG_QOS_CFG_5 0x418
+#define VFE_0_BUS_BDG_QOS_CFG_6 0x41c
+#define VFE_0_BUS_BDG_QOS_CFG_7 0x420
+#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa9
+
+#define VFE_0_BUS_BDG_DS_CFG_0 0x424
+#define VFE_0_BUS_BDG_DS_CFG_0_CFG 0xcccc0011
+#define VFE_0_BUS_BDG_DS_CFG_1 0x428
+#define VFE_0_BUS_BDG_DS_CFG_2 0x42c
+#define VFE_0_BUS_BDG_DS_CFG_3 0x430
+#define VFE_0_BUS_BDG_DS_CFG_4 0x434
+#define VFE_0_BUS_BDG_DS_CFG_5 0x438
+#define VFE_0_BUS_BDG_DS_CFG_6 0x43c
+#define VFE_0_BUS_BDG_DS_CFG_7 0x440
+#define VFE_0_BUS_BDG_DS_CFG_8 0x444
+#define VFE_0_BUS_BDG_DS_CFG_9 0x448
+#define VFE_0_BUS_BDG_DS_CFG_10 0x44c
+#define VFE_0_BUS_BDG_DS_CFG_11 0x450
+#define VFE_0_BUS_BDG_DS_CFG_12 0x454
+#define VFE_0_BUS_BDG_DS_CFG_13 0x458
+#define VFE_0_BUS_BDG_DS_CFG_14 0x45c
+#define VFE_0_BUS_BDG_DS_CFG_15 0x460
+#define VFE_0_BUS_BDG_DS_CFG_16 0x464
+#define VFE_0_BUS_BDG_DS_CFG_16_CFG 0x40000103
+
+#define VFE_0_RDI_CFG_x(x) (0x46c + (0x4 * (x)))
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
+#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
+#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
+#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2)
+#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
+
+#define VFE_0_CAMIF_CMD 0x478
+#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
+#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
+#define VFE_0_CAMIF_CMD_NO_CHANGE 3
+#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2)
+#define VFE_0_CAMIF_CFG 0x47c
+#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6)
+#define VFE_0_CAMIF_FRAME_CFG 0x484
+#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x488
+#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x48c
+#define VFE_0_CAMIF_SUBSAMPLE_CFG 0x490
+#define VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN 0x498
+#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x49c
+#define VFE_0_CAMIF_STATUS 0x4a4
+#define VFE_0_CAMIF_STATUS_HALT BIT(31)
+
+#define VFE_0_REG_UPDATE 0x4ac
+#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n))
+#define VFE_0_REG_UPDATE_line_n(n) \
+ ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
+
+#define VFE_0_DEMUX_CFG 0x560
+#define VFE_0_DEMUX_CFG_PERIOD 0x3
+#define VFE_0_DEMUX_GAIN_0 0x564
+#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
+#define VFE_0_DEMUX_GAIN_1 0x568
+#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
+#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
+#define VFE_0_DEMUX_EVEN_CFG 0x574
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
+#define VFE_0_DEMUX_ODD_CFG 0x578
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
+#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
+
+#define VFE_0_SCALE_ENC_Y_CFG 0x91c
+#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x920
+#define VFE_0_SCALE_ENC_Y_H_PHASE 0x924
+#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x934
+#define VFE_0_SCALE_ENC_Y_V_PHASE 0x938
+#define VFE_0_SCALE_ENC_CBCR_CFG 0x948
+#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x94c
+#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x950
+#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x960
+#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x964
+
+#define VFE_0_CROP_ENC_Y_WIDTH 0x974
+#define VFE_0_CROP_ENC_Y_HEIGHT 0x978
+#define VFE_0_CROP_ENC_CBCR_WIDTH 0x97c
+#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x980
+
+#define VFE_0_CLAMP_ENC_MAX_CFG 0x984
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
+#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
+#define VFE_0_CLAMP_ENC_MIN_CFG 0x988
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
+#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
+
+#define VFE_0_REALIGN_BUF_CFG 0xaac
+#define VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL BIT(2)
+#define VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL BIT(3)
+#define VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE BIT(4)
+
+#define CAMIF_TIMEOUT_SLEEP_US 1000
+#define CAMIF_TIMEOUT_ALL_US 1000000
+
+#define MSM_VFE_VFE0_UB_SIZE 2047
+#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
+#define MSM_VFE_VFE1_UB_SIZE 1535
+#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
+
+static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+{
+ u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
+
+ dev_err(dev, "VFE HW Version = 0x%08x\n", hw_version);
+}
+
+static u16 vfe_get_ub_size(u8 vfe_id)
+{
+ if (vfe_id == 0)
+ return MSM_VFE_VFE0_UB_SIZE_RDI;
+ else if (vfe_id == 1)
+ return MSM_VFE_VFE1_UB_SIZE_RDI;
+
+ return 0;
+}
+
+static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits & ~clr_bits, vfe->base + reg);
+}
+
+static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
+{
+ u32 bits = readl_relaxed(vfe->base + reg);
+
+ writel_relaxed(bits | set_bits, vfe->base + reg);
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_IDLE_CGC |
+ VFE_0_GLOBAL_RESET_CMD_DSP |
+ VFE_0_GLOBAL_RESET_CMD_TESTGEN |
+ VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
+ VFE_0_GLOBAL_RESET_CMD_PM |
+ VFE_0_GLOBAL_RESET_CMD_REGISTER |
+ VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
+ VFE_0_GLOBAL_RESET_CMD_BUS |
+ VFE_0_GLOBAL_RESET_CMD_CAMIF |
+ VFE_0_GLOBAL_RESET_CMD_CORE;
+
+ writel_relaxed(BIT(31), vfe->base + VFE_0_IRQ_MASK_0);
+ wmb();
+ writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
+}
+
+static void vfe_halt_request(struct vfe_device *vfe)
+{
+ writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
+ vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_halt_clear(struct vfe_device *vfe)
+{
+ writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
+}
+
+static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+}
+
+static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ if (enable)
+ vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
+ else
+ vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
+ 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
+}
+
+#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
+
+static int vfe_word_per_line_by_pixel(u32 format, u32 pixel_per_line)
+{
+ int val = 0;
+
+ switch (format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ val = CALC_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CALC_WORD(pixel_per_line, 2, 8);
+ break;
+ }
+
+ return val;
+}
+
+static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
+{
+ return CALC_WORD(bytes_per_line, 1, 8);
+}
+
+static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
+ u16 *width, u16 *height, u16 *bytesperline)
+{
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ if (plane == 1)
+ *height /= 2;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_UYVY:
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[plane].bytesperline;
+ break;
+
+ }
+}
+
+static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
+ struct v4l2_pix_format_mplane *pix,
+ u8 plane, u32 enable)
+{
+ u32 reg;
+
+ if (enable) {
+ u16 width = 0, height = 0, bytesperline = 0, wpl;
+
+ vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
+
+ wpl = vfe_word_per_line_by_pixel(pix->pixelformat, width);
+
+ reg = height - 1;
+ reg |= ((wpl + 3) / 4 - 1) << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+
+ wpl = vfe_word_per_line_by_bytes(bytesperline);
+
+ reg = 0x3;
+ reg |= (height - 1) << 2;
+ reg |= ((wpl + 1) / 2) << 16;
+
+ writel_relaxed(reg, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ } else {
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
+ writel_relaxed(0, vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
+ }
+}
+
+static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+
+ reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
+
+ reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
+ & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
+
+ writel_relaxed(reg,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
+}
+
+static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
+ u32 pattern)
+{
+ writel_relaxed(pattern,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
+}
+
+static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm,
+ u16 offset, u16 depth)
+{
+ u32 reg;
+
+ reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
+ depth;
+ writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
+}
+
+static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
+{
+ wmb();
+ writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
+ wmb();
+}
+
+static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
+}
+
+static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
+{
+ writel_relaxed(addr,
+ vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
+}
+
+static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
+{
+ u32 reg;
+
+ reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
+
+ return (reg >> wm) & 0x1;
+}
+
+static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
+{
+ if (enable)
+ writel_relaxed(0x101, vfe->base + VFE_0_BUS_CFG);
+ else
+ writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
+}
+
+static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
+ VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
+{
+ writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
+ vfe->base +
+ VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
+}
+
+static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id)
+{
+ u32 reg;
+
+ reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
+
+ switch (id) {
+ case VFE_LINE_RDI0:
+ default:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI1:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ case VFE_LINE_RDI2:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+ break;
+ }
+
+ if (wm % 2 == 1)
+ reg <<= 16;
+
+ vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
+}
+
+static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
+ u8 enable)
+{
+ struct vfe_line *line = container_of(output, struct vfe_line, output);
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+
+ switch (p) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
+ VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
+
+ if (output->wm_idx[0] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+
+ reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+
+ if (output->wm_idx[1] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
+ reg);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_UYVY:
+ reg = VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN;
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
+
+ if (p == V4L2_PIX_FMT_YUYV || p == V4L2_PIX_FMT_YVYU)
+ reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+
+ if (output->wm_idx[0] % 2 == 1)
+ reg <<= 16;
+
+ if (enable)
+ vfe_reg_set(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+ else
+ vfe_reg_clr(vfe,
+ VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
+ reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line,
+ u8 enable)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 val = VFE_0_MODULE_ZOOM_EN_REALIGN_BUF;
+
+ if (p != V4L2_PIX_FMT_YUYV && p != V4L2_PIX_FMT_YVYU &&
+ p != V4L2_PIX_FMT_VYUY && p != V4L2_PIX_FMT_UYVY)
+ return;
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val);
+ return;
+ }
+
+ val = VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE;
+
+ if (p == V4L2_PIX_FMT_UYVY || p == V4L2_PIX_FMT_YUYV)
+ val |= VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL;
+ else
+ val |= VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL;
+
+ writel_relaxed(val, vfe->base + VFE_0_REALIGN_BUF_CFG);
+}
+
+static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
+{
+ vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
+ VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
+
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
+ cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
+ wmb();
+ writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
+ wmb();
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
+}
+
+static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id line_id, u8 enable)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
+ VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
+ VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ }
+}
+
+static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
+ enum vfe_line_id line_id, u8 enable)
+{
+ struct vfe_output *output = &vfe->line[line_id].output;
+ unsigned int i;
+ u32 irq_en0;
+ u32 irq_en1;
+ u32 comp_mask = 0;
+
+ irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
+ irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
+ irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
+ irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
+ for (i = 0; i < output->wm_num; i++) {
+ irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
+ output->wm_idx[i]);
+ comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
+ }
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+ vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
+ }
+}
+
+static void vfe_enable_irq_common(struct vfe_device *vfe)
+{
+ u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
+ u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
+ VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
+
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
+ vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
+}
+
+static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val, even_cfg, odd_cfg;
+
+ writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
+
+ val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
+
+ val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
+ writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
+ odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
+ break;
+ }
+
+ writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
+ writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
+}
+
+static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
+{
+ if (input / output >= 16)
+ return 0;
+
+ if (input / output >= 8)
+ return 1;
+
+ if (input / output >= 4)
+ return 2;
+
+ return 3;
+}
+
+static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 input, output;
+ u8 interp_reso;
+ u32 phase_mult;
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
+ output = line->compose.width - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ output = line->compose.height - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
+
+ writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
+ output = line->compose.width / 2 - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
+
+ input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ output = line->compose.height - 1;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
+ output = line->compose.height / 2 - 1;
+ reg = (output << 16) | input;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
+
+ interp_reso = vfe_calc_interp_reso(input, output);
+ phase_mult = input * (1 << (14 + interp_reso)) / output;
+ reg = (interp_reso << 28) | phase_mult;
+ writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
+}
+
+static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
+ u32 reg;
+ u16 first, last;
+
+ first = line->crop.left;
+ last = line->crop.left + line->crop.width - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
+
+ first = line->crop.left / 2;
+ last = line->crop.left / 2 + line->crop.width / 2 - 1;
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
+
+ first = line->crop.top;
+ last = line->crop.top + line->crop.height - 1;
+ if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
+ first = line->crop.top / 2;
+ last = line->crop.top / 2 + line->crop.height / 2 - 1;
+ }
+ reg = (first << 16) | last;
+ writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
+}
+
+static void vfe_set_clamp_cfg(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MAX_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
+
+ val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
+ VFE_0_CLAMP_ENC_MIN_CFG_CH2;
+
+ writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
+}
+
+static void vfe_set_qos(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
+ u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
+
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
+ writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static void vfe_set_ds(struct vfe_device *vfe)
+{
+ u32 val = VFE_0_BUS_BDG_DS_CFG_0_CFG;
+ u32 val16 = VFE_0_BUS_BDG_DS_CFG_16_CFG;
+
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_0);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_1);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_2);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_3);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_4);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_5);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_6);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_7);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_8);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_9);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_10);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_11);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_12);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_13);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_14);
+ writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_15);
+ writel_relaxed(val16, vfe->base + VFE_0_BUS_BDG_DS_CFG_16);
+}
+
+static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
+{
+ /* empty */
+}
+
+static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
+{
+ u32 val;
+
+ switch (line->fmt[MSM_VFE_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
+ break;
+ }
+
+ val |= VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN;
+ writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
+ val |= (line->fmt[MSM_VFE_PAD_SINK].height - 1) << 16;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
+
+ val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN);
+
+ val = 0xffffffff;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
+
+ val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
+ vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
+
+ val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
+ writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
+}
+
+static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable)
+{
+ u32 cmd;
+
+ cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE;
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+ wmb();
+
+ if (enable)
+ cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY;
+ else
+ cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY;
+
+ writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
+}
+
+static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
+{
+ u32 val_lens = VFE_0_MODULE_LENS_EN_DEMUX |
+ VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE;
+ u32 val_zoom = VFE_0_MODULE_ZOOM_EN_SCALE_ENC |
+ VFE_0_MODULE_ZOOM_EN_CROP_ENC;
+
+ if (enable) {
+ vfe_reg_set(vfe, VFE_0_MODULE_LENS_EN, val_lens);
+ vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
+ } else {
+ vfe_reg_clr(vfe, VFE_0_MODULE_LENS_EN, val_lens);
+ vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
+ }
+}
+
+static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
+ val,
+ (val & VFE_0_CAMIF_STATUS_HALT),
+ CAMIF_TIMEOUT_SLEEP_US,
+ CAMIF_TIMEOUT_ALL_US);
+ if (ret < 0)
+ dev_err(dev, "%s: camif stop timeout\n", __func__);
+
+ return ret;
+}
+
+static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
+{
+ *value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
+ *value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
+
+ writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0);
+ writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1);
+
+ wmb();
+ writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
+}
+
+static void vfe_violation_read(struct vfe_device *vfe)
+{
+ u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
+
+ pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
+}
+
+/*
+ * vfe_isr - ISPIF module interrupt handler
+ * @irq: Interrupt line
+ * @dev: VFE device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 value0, value1;
+ int i, j;
+
+ vfe->ops->isr_read(vfe, &value0, &value1);
+
+ trace_printk("VFE: status0 = 0x%08x, status1 = 0x%08x\n",
+ value0, value1);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
+ vfe->isr_ops.reset_ack(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION)
+ vfe->ops->violation_read(vfe);
+
+ if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
+ vfe->isr_ops.halt_ack(vfe);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
+ vfe->isr_ops.reg_update(vfe, i);
+
+ if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
+ vfe->isr_ops.sof(vfe, VFE_LINE_PIX);
+
+ for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+ if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
+ vfe->isr_ops.sof(vfe, i);
+
+ for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
+ vfe->isr_ops.comp_done(vfe, i);
+ for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
+ if (vfe->wm_output_map[j] == VFE_LINE_PIX)
+ value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
+ }
+
+ for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
+ if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
+ vfe->isr_ops.wm_done(vfe, i);
+
+ return IRQ_HANDLED;
+}
+
+const struct vfe_hw_ops vfe_ops_4_7 = {
+ .hw_version_read = vfe_hw_version_read,
+ .get_ub_size = vfe_get_ub_size,
+ .global_reset = vfe_global_reset,
+ .halt_request = vfe_halt_request,
+ .halt_clear = vfe_halt_clear,
+ .wm_enable = vfe_wm_enable,
+ .wm_frame_based = vfe_wm_frame_based,
+ .wm_line_based = vfe_wm_line_based,
+ .wm_set_framedrop_period = vfe_wm_set_framedrop_period,
+ .wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern,
+ .wm_set_ub_cfg = vfe_wm_set_ub_cfg,
+ .bus_reload_wm = vfe_bus_reload_wm,
+ .wm_set_ping_addr = vfe_wm_set_ping_addr,
+ .wm_set_pong_addr = vfe_wm_set_pong_addr,
+ .wm_get_ping_pong_status = vfe_wm_get_ping_pong_status,
+ .bus_enable_wr_if = vfe_bus_enable_wr_if,
+ .bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi,
+ .wm_set_subsample = vfe_wm_set_subsample,
+ .bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi,
+ .set_xbar_cfg = vfe_set_xbar_cfg,
+ .set_realign_cfg = vfe_set_realign_cfg,
+ .set_rdi_cid = vfe_set_rdi_cid,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+ .enable_irq_wm_line = vfe_enable_irq_wm_line,
+ .enable_irq_pix_line = vfe_enable_irq_pix_line,
+ .enable_irq_common = vfe_enable_irq_common,
+ .set_demux_cfg = vfe_set_demux_cfg,
+ .set_scale_cfg = vfe_set_scale_cfg,
+ .set_crop_cfg = vfe_set_crop_cfg,
+ .set_clamp_cfg = vfe_set_clamp_cfg,
+ .set_qos = vfe_set_qos,
+ .set_ds = vfe_set_ds,
+ .set_cgc_override = vfe_set_cgc_override,
+ .set_camif_cfg = vfe_set_camif_cfg,
+ .set_camif_cmd = vfe_set_camif_cmd,
+ .set_module_cfg = vfe_set_module_cfg,
+ .camif_wait_for_stop = vfe_camif_wait_for_stop,
+ .isr_read = vfe_isr_read,
+ .violation_read = vfe_violation_read,
+ .isr = vfe_isr,
+};
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index a6329a8a7c4a..ed6a557de65d 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -1,28 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
-#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spinlock_types.h>
#include <linux/spinlock.h>
#include <media/media-entity.h>
@@ -38,194 +30,7 @@
((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)]))
#define to_vfe(ptr_line) \
- container_of(vfe_line_array(ptr_line), struct vfe_device, ptr_line)
-
-#define VFE_0_HW_VERSION 0x000
-
-#define VFE_0_GLOBAL_RESET_CMD 0x00c
-#define VFE_0_GLOBAL_RESET_CMD_CORE (1 << 0)
-#define VFE_0_GLOBAL_RESET_CMD_CAMIF (1 << 1)
-#define VFE_0_GLOBAL_RESET_CMD_BUS (1 << 2)
-#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG (1 << 3)
-#define VFE_0_GLOBAL_RESET_CMD_REGISTER (1 << 4)
-#define VFE_0_GLOBAL_RESET_CMD_TIMER (1 << 5)
-#define VFE_0_GLOBAL_RESET_CMD_PM (1 << 6)
-#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR (1 << 7)
-#define VFE_0_GLOBAL_RESET_CMD_TESTGEN (1 << 8)
-
-#define VFE_0_MODULE_CFG 0x018
-#define VFE_0_MODULE_CFG_DEMUX (1 << 2)
-#define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE (1 << 3)
-#define VFE_0_MODULE_CFG_SCALE_ENC (1 << 23)
-#define VFE_0_MODULE_CFG_CROP_ENC (1 << 27)
-
-#define VFE_0_CORE_CFG 0x01c
-#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
-#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
-#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
-#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
-
-#define VFE_0_IRQ_CMD 0x024
-#define VFE_0_IRQ_CMD_GLOBAL_CLEAR (1 << 0)
-
-#define VFE_0_IRQ_MASK_0 0x028
-#define VFE_0_IRQ_MASK_0_CAMIF_SOF (1 << 0)
-#define VFE_0_IRQ_MASK_0_CAMIF_EOF (1 << 1)
-#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
-#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
- ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
-#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
-#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
-#define VFE_0_IRQ_MASK_0_RESET_ACK (1 << 31)
-#define VFE_0_IRQ_MASK_1 0x02c
-#define VFE_0_IRQ_MASK_1_CAMIF_ERROR (1 << 0)
-#define VFE_0_IRQ_MASK_1_VIOLATION (1 << 7)
-#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK (1 << 8)
-#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) (1 << ((n) + 9))
-#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) (1 << ((n) + 29))
-
-#define VFE_0_IRQ_CLEAR_0 0x030
-#define VFE_0_IRQ_CLEAR_1 0x034
-
-#define VFE_0_IRQ_STATUS_0 0x038
-#define VFE_0_IRQ_STATUS_0_CAMIF_SOF (1 << 0)
-#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
-#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
- ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
-#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
-#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
-#define VFE_0_IRQ_STATUS_0_RESET_ACK (1 << 31)
-#define VFE_0_IRQ_STATUS_1 0x03c
-#define VFE_0_IRQ_STATUS_1_VIOLATION (1 << 7)
-#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK (1 << 8)
-#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) (1 << ((n) + 29))
-
-#define VFE_0_IRQ_COMPOSITE_MASK_0 0x40
-#define VFE_0_VIOLATION_STATUS 0x48
-
-#define VFE_0_BUS_CMD 0x4c
-#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) (1 << (x))
-
-#define VFE_0_BUS_CFG 0x050
-
-#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2))
-#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN (1 << 1)
-#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
-#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
-
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1F << 2)
-
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
- (0x088 + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
- (0x08c + 0x24 * (n))
-#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
-
-#define VFE_0_BUS_PING_PONG_STATUS 0x268
-
-#define VFE_0_BUS_BDG_CMD 0x2c0
-#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
-
-#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
-#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
-#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
-#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
-#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
-#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
-#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
-#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
-#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
-#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5
-
-#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
-#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
-#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
-#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
-#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
-#define VFE_0_RDI_CFG_x_RDI_EN_BIT (1 << 2)
-#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
-#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) (1 << (16 + (r)))
-
-#define VFE_0_CAMIF_CMD 0x2f4
-#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
-#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
-#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS (1 << 2)
-#define VFE_0_CAMIF_CFG 0x2f8
-#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN (1 << 6)
-#define VFE_0_CAMIF_FRAME_CFG 0x300
-#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304
-#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308
-#define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c
-#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314
-#define VFE_0_CAMIF_STATUS 0x31c
-#define VFE_0_CAMIF_STATUS_HALT (1 << 31)
-
-#define VFE_0_REG_UPDATE 0x378
-#define VFE_0_REG_UPDATE_RDIn(n) (1 << (1 + (n)))
-#define VFE_0_REG_UPDATE_line_n(n) \
- ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
-
-#define VFE_0_DEMUX_CFG 0x424
-#define VFE_0_DEMUX_CFG_PERIOD 0x3
-#define VFE_0_DEMUX_GAIN_0 0x428
-#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
-#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
-#define VFE_0_DEMUX_GAIN_1 0x42c
-#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
-#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
-#define VFE_0_DEMUX_EVEN_CFG 0x438
-#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
-#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
-#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
-#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
-#define VFE_0_DEMUX_ODD_CFG 0x43c
-#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
-#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
-#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
-#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
-
-#define VFE_0_SCALE_ENC_Y_CFG 0x75c
-#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760
-#define VFE_0_SCALE_ENC_Y_H_PHASE 0x764
-#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c
-#define VFE_0_SCALE_ENC_Y_V_PHASE 0x770
-#define VFE_0_SCALE_ENC_CBCR_CFG 0x778
-#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c
-#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780
-#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790
-#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794
-
-#define VFE_0_CROP_ENC_Y_WIDTH 0x854
-#define VFE_0_CROP_ENC_Y_HEIGHT 0x858
-#define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c
-#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860
-
-#define VFE_0_CLAMP_ENC_MAX_CFG 0x874
-#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
-#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
-#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
-#define VFE_0_CLAMP_ENC_MIN_CFG 0x878
-#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
-#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
-#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
-
-#define VFE_0_CGC_OVERRIDE_1 0x974
-#define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) (1 << (x))
+ container_of(vfe_line_array(ptr_line), struct vfe_device, line)
/* VFE reset timeout */
#define VFE_RESET_TIMEOUT_MS 50
@@ -238,633 +43,230 @@
#define VFE_NEXT_SOF_MS 500
-#define CAMIF_TIMEOUT_SLEEP_US 1000
-#define CAMIF_TIMEOUT_ALL_US 1000000
-
#define SCALER_RATIO_MAX 16
-static const struct {
+struct vfe_format {
u32 code;
u8 bpp;
-} vfe_formats[] = {
- {
- MEDIA_BUS_FMT_UYVY8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_VYUY8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_YUYV8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_YVYU8_2X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SBGGR8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SGBRG8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SGRBG8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SRGGB8_1X8,
- 8,
- },
- {
- MEDIA_BUS_FMT_SBGGR10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SGBRG10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SGRBG10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SRGGB10_1X10,
- 10,
- },
- {
- MEDIA_BUS_FMT_SBGGR12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SGBRG12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SGRBG12_1X12,
- 12,
- },
- {
- MEDIA_BUS_FMT_SRGGB12_1X12,
- 12,
- }
+};
+
+static const struct vfe_format formats_rdi_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+};
+
+static const struct vfe_format formats_pix_8x16[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+};
+
+static const struct vfe_format formats_rdi_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, 16 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
+ { MEDIA_BUS_FMT_Y10_1X10, 10 },
+ { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, 16 },
+};
+
+static const struct vfe_format formats_pix_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
+ { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
+ { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
+ { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
};
/*
* vfe_get_bpp - map media bus format to bits per pixel
+ * @formats: supported media bus formats array
+ * @nformats: size of @formats array
* @code: media bus format code
*
* Return number of bits per pixel
*/
-static u8 vfe_get_bpp(u32 code)
+static u8 vfe_get_bpp(const struct vfe_format *formats,
+ unsigned int nformats, u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(vfe_formats); i++)
- if (code == vfe_formats[i].code)
- return vfe_formats[i].bpp;
+ for (i = 0; i < nformats; i++)
+ if (code == formats[i].code)
+ return formats[i].bpp;
WARN(1, "Unknown format\n");
- return vfe_formats[0].bpp;
-}
-
-static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
-{
- u32 bits = readl_relaxed(vfe->base + reg);
-
- writel_relaxed(bits & ~clr_bits, vfe->base + reg);
-}
-
-static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
-{
- u32 bits = readl_relaxed(vfe->base + reg);
-
- writel_relaxed(bits | set_bits, vfe->base + reg);
-}
-
-static void vfe_global_reset(struct vfe_device *vfe)
-{
- u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
- VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
- VFE_0_GLOBAL_RESET_CMD_PM |
- VFE_0_GLOBAL_RESET_CMD_TIMER |
- VFE_0_GLOBAL_RESET_CMD_REGISTER |
- VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
- VFE_0_GLOBAL_RESET_CMD_BUS |
- VFE_0_GLOBAL_RESET_CMD_CAMIF |
- VFE_0_GLOBAL_RESET_CMD_CORE;
-
- writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
-}
-
-static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
-{
- if (enable)
- vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
- 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
- else
- vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
- 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
+ return formats[0].bpp;
}
-static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
-{
- if (enable)
- vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
- 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
- else
- vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
- 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
-}
-
-#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
-
-static int vfe_word_per_line(uint32_t format, uint32_t pixel_per_line)
+static u32 vfe_find_code(u32 *code, unsigned int n_code,
+ unsigned int index, u32 req_code)
{
- int val = 0;
-
- switch (format) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- val = CALC_WORD(pixel_per_line, 1, 8);
- break;
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_YVYU:
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_VYUY:
- val = CALC_WORD(pixel_per_line, 2, 8);
- break;
- }
-
- return val;
-}
-
-static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
- u16 *width, u16 *height, u16 *bytesperline)
-{
- switch (pix->pixelformat) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
- if (plane == 1)
- *height /= 2;
- break;
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
- break;
- }
-}
-
-static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
- struct v4l2_pix_format_mplane *pix,
- u8 plane, u32 enable)
-{
- u32 reg;
-
- if (enable) {
- u16 width = 0, height = 0, bytesperline = 0, wpl;
-
- vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
-
- wpl = vfe_word_per_line(pix->pixelformat, width);
-
- reg = height - 1;
- reg |= ((wpl + 1) / 2 - 1) << 16;
-
- writel_relaxed(reg, vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
-
- wpl = vfe_word_per_line(pix->pixelformat, bytesperline);
-
- reg = 0x3;
- reg |= (height - 1) << 4;
- reg |= wpl << 16;
-
- writel_relaxed(reg, vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
- } else {
- writel_relaxed(0, vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
- writel_relaxed(0, vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
- }
-}
-
-static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
-{
- u32 reg;
-
- reg = readl_relaxed(vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
-
- reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
-
- reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
- & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
-
- writel_relaxed(reg,
- vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
-}
-
-static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
- u32 pattern)
-{
- writel_relaxed(pattern,
- vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
-}
-
-static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, u16 offset,
- u16 depth)
-{
- u32 reg;
-
- reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
- depth;
- writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
-}
-
-static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
-{
- wmb();
- writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
- wmb();
-}
-
-static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
-{
- writel_relaxed(addr,
- vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
-}
-
-static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
-{
- writel_relaxed(addr,
- vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
-}
-
-static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
-{
- u32 reg;
-
- reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
-
- return (reg >> wm) & 0x1;
-}
-
-static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
-{
- if (enable)
- writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG);
- else
- writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
-}
-
-static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
- enum vfe_line_id id)
-{
- u32 reg;
-
- reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
- reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
- vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
-
- reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
- reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
- VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
- vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
-
- switch (id) {
- case VFE_LINE_RDI0:
- default:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- case VFE_LINE_RDI1:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- case VFE_LINE_RDI2:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- }
-
- if (wm % 2 == 1)
- reg <<= 16;
-
- vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
-}
-
-static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
-{
- writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
- vfe->base +
- VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
-}
-
-static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
- enum vfe_line_id id)
-{
- u32 reg;
-
- reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
- vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg);
-
- reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
- vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
-
- switch (id) {
- case VFE_LINE_RDI0:
- default:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- case VFE_LINE_RDI1:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- case VFE_LINE_RDI2:
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- break;
- }
-
- if (wm % 2 == 1)
- reg <<= 16;
-
- vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
-}
+ int i;
-static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
- u8 enable)
-{
- struct vfe_line *line = container_of(output, struct vfe_line, output);
- u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
- u32 reg;
- unsigned int i;
+ if (!req_code && (index >= n_code))
+ return 0;
- for (i = 0; i < output->wm_num; i++) {
- if (i == 0) {
- reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
- VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
- } else if (i == 1) {
- reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
- if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
- reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+ for (i = 0; i < n_code; i++)
+ if (req_code) {
+ if (req_code == code[i])
+ return req_code;
} else {
- /* On current devices output->wm_num is always <= 2 */
- break;
+ if (i == index)
+ return code[i];
}
- if (output->wm_idx[i] % 2 == 1)
- reg <<= 16;
-
- if (enable)
- vfe_reg_set(vfe,
- VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
- reg);
- else
- vfe_reg_clr(vfe,
- VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
- reg);
- }
-}
-
-static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
-{
- vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
- VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
-
- vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
- cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
+ return code[0];
}
-static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
+ unsigned int index, u32 src_req_code)
{
- vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
- wmb();
- writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
- wmb();
-}
-
-static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
- enum vfe_line_id line_id, u8 enable)
-{
- u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
- VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
- u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
- VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
-
- if (enable) {
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
- } else {
- vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
- }
-}
-
-static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
- enum vfe_line_id line_id, u8 enable)
-{
- struct vfe_output *output = &vfe->line[line_id].output;
- unsigned int i;
- u32 irq_en0;
- u32 irq_en1;
- u32 comp_mask = 0;
-
- irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
- irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
- irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
- irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
- irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
- for (i = 0; i < output->wm_num; i++) {
- irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
- output->wm_idx[i]);
- comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
- }
-
- if (enable) {
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
- vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
- } else {
- vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
- vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
- }
-}
-
-static void vfe_enable_irq_common(struct vfe_device *vfe)
-{
- u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
- u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
- VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
-
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
- vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
-}
-
-static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
-{
- u32 val, even_cfg, odd_cfg;
-
- writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
-
- val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
- writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
-
- val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
- writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
+ struct vfe_device *vfe = to_vfe(line);
- switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
- odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
- odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
- break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- default:
- even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
- odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
- odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
- break;
- }
+ if (vfe->camss->version == CAMSS_8x16)
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ default:
+ if (index > 0)
+ return 0;
- writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
- writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
-}
+ return sink_code;
+ }
+ else if (vfe->camss->version == CAMSS_8x96)
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YVYU8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_VYUY8_1_5X8,
+ };
+
+ return vfe_find_code(src_code, ARRAY_SIZE(src_code),
+ index, src_req_code);
+ }
+ default:
+ if (index > 0)
+ return 0;
-static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
-{
- if (input / output >= 16)
+ return sink_code;
+ }
+ else
return 0;
-
- if (input / output >= 8)
- return 1;
-
- if (input / output >= 4)
- return 2;
-
- return 3;
-}
-
-static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
-{
- u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
- u32 reg;
- u16 input, output;
- u8 interp_reso;
- u32 phase_mult;
-
- writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
-
- input = line->fmt[MSM_VFE_PAD_SINK].width;
- output = line->compose.width;
- reg = (output << 16) | input;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
-
- interp_reso = vfe_calc_interp_reso(input, output);
- phase_mult = input * (1 << (13 + interp_reso)) / output;
- reg = (interp_reso << 20) | phase_mult;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
-
- input = line->fmt[MSM_VFE_PAD_SINK].height;
- output = line->compose.height;
- reg = (output << 16) | input;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
-
- interp_reso = vfe_calc_interp_reso(input, output);
- phase_mult = input * (1 << (13 + interp_reso)) / output;
- reg = (interp_reso << 20) | phase_mult;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
-
- writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
-
- input = line->fmt[MSM_VFE_PAD_SINK].width;
- output = line->compose.width / 2;
- reg = (output << 16) | input;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
-
- interp_reso = vfe_calc_interp_reso(input, output);
- phase_mult = input * (1 << (13 + interp_reso)) / output;
- reg = (interp_reso << 20) | phase_mult;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
-
- input = line->fmt[MSM_VFE_PAD_SINK].height;
- output = line->compose.height;
- if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
- output = line->compose.height / 2;
- reg = (output << 16) | input;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
-
- interp_reso = vfe_calc_interp_reso(input, output);
- phase_mult = input * (1 << (13 + interp_reso)) / output;
- reg = (interp_reso << 20) | phase_mult;
- writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
-}
-
-static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
-{
- u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
- u32 reg;
- u16 first, last;
-
- first = line->crop.left;
- last = line->crop.left + line->crop.width - 1;
- reg = (first << 16) | last;
- writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
-
- first = line->crop.top;
- last = line->crop.top + line->crop.height - 1;
- reg = (first << 16) | last;
- writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
-
- first = line->crop.left / 2;
- last = line->crop.left / 2 + line->crop.width / 2 - 1;
- reg = (first << 16) | last;
- writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
-
- first = line->crop.top;
- last = line->crop.top + line->crop.height - 1;
- if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
- first = line->crop.top / 2;
- last = line->crop.top / 2 + line->crop.height / 2 - 1;
- }
- reg = (first << 16) | last;
- writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
-}
-
-static void vfe_set_clamp_cfg(struct vfe_device *vfe)
-{
- u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
- VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
- VFE_0_CLAMP_ENC_MAX_CFG_CH2;
-
- writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
-
- val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
- VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
- VFE_0_CLAMP_ENC_MIN_CFG_CH2;
-
- writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
}
/*
@@ -879,12 +281,12 @@ static int vfe_reset(struct vfe_device *vfe)
reinit_completion(&vfe->reset_complete);
- vfe_global_reset(vfe);
+ vfe->ops->global_reset(vfe);
time = wait_for_completion_timeout(&vfe->reset_complete,
msecs_to_jiffies(VFE_RESET_TIMEOUT_MS));
if (!time) {
- dev_err(to_device(vfe), "VFE reset timeout\n");
+ dev_err(vfe->camss->dev, "VFE reset timeout\n");
return -EIO;
}
@@ -903,13 +305,12 @@ static int vfe_halt(struct vfe_device *vfe)
reinit_completion(&vfe->halt_complete);
- writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
- vfe->base + VFE_0_BUS_BDG_CMD);
+ vfe->ops->halt_request(vfe);
time = wait_for_completion_timeout(&vfe->halt_complete,
msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
if (!time) {
- dev_err(to_device(vfe), "VFE halt timeout\n");
+ dev_err(vfe->camss->dev, "VFE halt timeout\n");
return -EIO;
}
@@ -927,10 +328,6 @@ static void vfe_init_outputs(struct vfe_device *vfe)
output->buf[0] = NULL;
output->buf[1] = NULL;
INIT_LIST_HEAD(&output->pending_bufs);
-
- output->wm_num = 1;
- if (vfe->line[i].id == VFE_LINE_PIX)
- output->wm_num = 2;
}
}
@@ -942,115 +339,6 @@ static void vfe_reset_output_maps(struct vfe_device *vfe)
vfe->wm_output_map[i] = VFE_LINE_NONE;
}
-static void vfe_set_qos(struct vfe_device *vfe)
-{
- u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
- u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
-
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
- writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
- writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
-}
-
-static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
-{
- u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm);
-
- if (enable)
- vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val);
- else
- vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val);
-
- wmb();
-}
-
-static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
-{
- u32 val = VFE_0_MODULE_CFG_DEMUX |
- VFE_0_MODULE_CFG_CHROMA_UPSAMPLE |
- VFE_0_MODULE_CFG_SCALE_ENC |
- VFE_0_MODULE_CFG_CROP_ENC;
-
- if (enable)
- writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG);
- else
- writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG);
-}
-
-static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
-{
- u32 val;
-
- switch (line->fmt[MSM_VFE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
- break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- default:
- val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
- break;
- }
-
- writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
-
- val = line->fmt[MSM_VFE_PAD_SINK].width * 2;
- val |= line->fmt[MSM_VFE_PAD_SINK].height << 16;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
-
- val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
-
- val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
-
- val = 0xffffffff;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0);
-
- val = 0xffffffff;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
-
- val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
- vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
-
- val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
- writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
-}
-
-static void vfe_set_camif_cmd(struct vfe_device *vfe, u32 cmd)
-{
- writel_relaxed(VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS,
- vfe->base + VFE_0_CAMIF_CMD);
-
- writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
-}
-
-static int vfe_camif_wait_for_stop(struct vfe_device *vfe)
-{
- u32 val;
- int ret;
-
- ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
- val,
- (val & VFE_0_CAMIF_STATUS_HALT),
- CAMIF_TIMEOUT_SLEEP_US,
- CAMIF_TIMEOUT_ALL_US);
- if (ret < 0)
- dev_err(to_device(vfe), "%s: camif stop timeout\n", __func__);
-
- return ret;
-}
-
static void vfe_output_init_addrs(struct vfe_device *vfe,
struct vfe_output *output, u8 sync)
{
@@ -1071,10 +359,10 @@ static void vfe_output_init_addrs(struct vfe_device *vfe,
else
pong_addr = ping_addr;
- vfe_wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
- vfe_wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
+ vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
+ vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
if (sync)
- vfe_bus_reload_wm(vfe, output->wm_idx[i]);
+ vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
}
}
@@ -1090,9 +378,9 @@ static void vfe_output_update_ping_addr(struct vfe_device *vfe,
else
addr = 0;
- vfe_wm_set_ping_addr(vfe, output->wm_idx[i], addr);
+ vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i], addr);
if (sync)
- vfe_bus_reload_wm(vfe, output->wm_idx[i]);
+ vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
}
}
@@ -1108,9 +396,9 @@ static void vfe_output_update_pong_addr(struct vfe_device *vfe,
else
addr = 0;
- vfe_wm_set_pong_addr(vfe, output->wm_idx[i], addr);
+ vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i], addr);
if (sync)
- vfe_bus_reload_wm(vfe, output->wm_idx[i]);
+ vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
}
}
@@ -1154,12 +442,13 @@ static void vfe_output_frame_drop(struct vfe_device *vfe,
drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
for (i = 0; i < output->wm_num; i++) {
- vfe_wm_set_framedrop_period(vfe, output->wm_idx[i],
- drop_period);
- vfe_wm_set_framedrop_pattern(vfe, output->wm_idx[i],
- drop_pattern);
+ vfe->ops->wm_set_framedrop_period(vfe, output->wm_idx[i],
+ drop_period);
+ vfe->ops->wm_set_framedrop_pattern(vfe, output->wm_idx[i],
+ drop_pattern);
}
- vfe_reg_update(vfe, container_of(output, struct vfe_line, output)->id);
+ vfe->ops->reg_update(vfe,
+ container_of(output, struct vfe_line, output)->id);
}
static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output)
@@ -1214,7 +503,7 @@ static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
break;
case VFE_OUTPUT_SINGLE:
default:
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Next buf in wrong state! %d\n",
output->state);
break;
@@ -1234,7 +523,7 @@ static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
vfe_output_frame_drop(vfe, output, 0);
break;
default:
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Last buff in wrong state! %d\n",
output->state);
break;
@@ -1263,7 +552,7 @@ static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
output->state = VFE_OUTPUT_CONTINUOUS;
} else {
vfe_buf_add_pending(output, new_buf);
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Inactive buffer is busy\n");
}
break;
@@ -1278,7 +567,7 @@ static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
output->state = VFE_OUTPUT_SINGLE;
} else {
vfe_buf_add_pending(output, new_buf);
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Output idle with buffer set!\n");
}
break;
@@ -1294,6 +583,7 @@ static int vfe_get_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
+ struct v4l2_format *f = &line->video_out.active_fmt;
unsigned long flags;
int i;
int wm_idx;
@@ -1302,17 +592,29 @@ static int vfe_get_output(struct vfe_line *line)
output = &line->output;
if (output->state != VFE_OUTPUT_OFF) {
- dev_err(to_device(vfe), "Output is running\n");
+ dev_err(vfe->camss->dev, "Output is running\n");
goto error;
}
output->state = VFE_OUTPUT_RESERVED;
output->active_buf = 0;
+ switch (f->fmt.pix_mp.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ output->wm_num = 2;
+ break;
+ default:
+ output->wm_num = 1;
+ break;
+ }
+
for (i = 0; i < output->wm_num; i++) {
wm_idx = vfe_reserve_wm(vfe, line->id);
if (wm_idx < 0) {
- dev_err(to_device(vfe), "Can not reserve wm\n");
+ dev_err(vfe->camss->dev, "Can not reserve wm\n");
goto error_get_wm;
}
output->wm_idx[i] = wm_idx;
@@ -1356,27 +658,21 @@ static int vfe_enable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
+ const struct vfe_hw_ops *ops = vfe->ops;
unsigned long flags;
unsigned int i;
u16 ub_size;
- switch (vfe->id) {
- case 0:
- ub_size = MSM_VFE_VFE0_UB_SIZE_RDI;
- break;
- case 1:
- ub_size = MSM_VFE_VFE1_UB_SIZE_RDI;
- break;
- default:
+ ub_size = ops->get_ub_size(vfe->id);
+ if (!ub_size)
return -EINVAL;
- }
spin_lock_irqsave(&vfe->output_lock, flags);
- vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line->id);
+ ops->reg_update_clear(vfe, line->id);
if (output->state != VFE_OUTPUT_RESERVED) {
- dev_err(to_device(vfe), "Output is not in reserved state %d\n",
+ dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
output->state);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return -EINVAL;
@@ -1418,42 +714,43 @@ static int vfe_enable_output(struct vfe_line *line)
vfe_output_init_addrs(vfe, output, 0);
if (line->id != VFE_LINE_PIX) {
- vfe_set_cgc_override(vfe, output->wm_idx[0], 1);
- vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
- vfe_bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
- vfe_wm_set_subsample(vfe, output->wm_idx[0]);
- vfe_set_rdi_cid(vfe, line->id, 0);
- vfe_wm_set_ub_cfg(vfe, output->wm_idx[0],
- (ub_size + 1) * output->wm_idx[0], ub_size);
- vfe_wm_frame_based(vfe, output->wm_idx[0], 1);
- vfe_wm_enable(vfe, output->wm_idx[0], 1);
- vfe_bus_reload_wm(vfe, output->wm_idx[0]);
+ ops->set_cgc_override(vfe, output->wm_idx[0], 1);
+ ops->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
+ ops->bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
+ ops->wm_set_subsample(vfe, output->wm_idx[0]);
+ ops->set_rdi_cid(vfe, line->id, 0);
+ ops->wm_set_ub_cfg(vfe, output->wm_idx[0],
+ (ub_size + 1) * output->wm_idx[0], ub_size);
+ ops->wm_frame_based(vfe, output->wm_idx[0], 1);
+ ops->wm_enable(vfe, output->wm_idx[0], 1);
+ ops->bus_reload_wm(vfe, output->wm_idx[0]);
} else {
ub_size /= output->wm_num;
for (i = 0; i < output->wm_num; i++) {
- vfe_set_cgc_override(vfe, output->wm_idx[i], 1);
- vfe_wm_set_subsample(vfe, output->wm_idx[i]);
- vfe_wm_set_ub_cfg(vfe, output->wm_idx[i],
- (ub_size + 1) * output->wm_idx[i],
- ub_size);
- vfe_wm_line_based(vfe, output->wm_idx[i],
+ ops->set_cgc_override(vfe, output->wm_idx[i], 1);
+ ops->wm_set_subsample(vfe, output->wm_idx[i]);
+ ops->wm_set_ub_cfg(vfe, output->wm_idx[i],
+ (ub_size + 1) * output->wm_idx[i],
+ ub_size);
+ ops->wm_line_based(vfe, output->wm_idx[i],
&line->video_out.active_fmt.fmt.pix_mp,
i, 1);
- vfe_wm_enable(vfe, output->wm_idx[i], 1);
- vfe_bus_reload_wm(vfe, output->wm_idx[i]);
+ ops->wm_enable(vfe, output->wm_idx[i], 1);
+ ops->bus_reload_wm(vfe, output->wm_idx[i]);
}
- vfe_enable_irq_pix_line(vfe, 0, line->id, 1);
- vfe_set_module_cfg(vfe, 1);
- vfe_set_camif_cfg(vfe, line);
- vfe_set_xbar_cfg(vfe, output, 1);
- vfe_set_demux_cfg(vfe, line);
- vfe_set_scale_cfg(vfe, line);
- vfe_set_crop_cfg(vfe, line);
- vfe_set_clamp_cfg(vfe);
- vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY);
+ ops->enable_irq_pix_line(vfe, 0, line->id, 1);
+ ops->set_module_cfg(vfe, 1);
+ ops->set_camif_cfg(vfe, line);
+ ops->set_realign_cfg(vfe, line, 1);
+ ops->set_xbar_cfg(vfe, output, 1);
+ ops->set_demux_cfg(vfe, line);
+ ops->set_scale_cfg(vfe, line);
+ ops->set_crop_cfg(vfe, line);
+ ops->set_clamp_cfg(vfe);
+ ops->set_camif_cmd(vfe, 1);
}
- vfe_reg_update(vfe, line->id);
+ ops->reg_update(vfe, line->id);
spin_unlock_irqrestore(&vfe->output_lock, flags);
@@ -1464,6 +761,7 @@ static int vfe_disable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
+ const struct vfe_hw_ops *ops = vfe->ops;
unsigned long flags;
unsigned long time;
unsigned int i;
@@ -1476,43 +774,45 @@ static int vfe_disable_output(struct vfe_line *line)
time = wait_for_completion_timeout(&output->sof,
msecs_to_jiffies(VFE_NEXT_SOF_MS));
if (!time)
- dev_err(to_device(vfe), "VFE sof timeout\n");
+ dev_err(vfe->camss->dev, "VFE sof timeout\n");
spin_lock_irqsave(&vfe->output_lock, flags);
for (i = 0; i < output->wm_num; i++)
- vfe_wm_enable(vfe, output->wm_idx[i], 0);
+ ops->wm_enable(vfe, output->wm_idx[i], 0);
- vfe_reg_update(vfe, line->id);
+ ops->reg_update(vfe, line->id);
output->wait_reg_update = 1;
spin_unlock_irqrestore(&vfe->output_lock, flags);
time = wait_for_completion_timeout(&output->reg_update,
msecs_to_jiffies(VFE_NEXT_SOF_MS));
if (!time)
- dev_err(to_device(vfe), "VFE reg update timeout\n");
+ dev_err(vfe->camss->dev, "VFE reg update timeout\n");
spin_lock_irqsave(&vfe->output_lock, flags);
if (line->id != VFE_LINE_PIX) {
- vfe_wm_frame_based(vfe, output->wm_idx[0], 0);
- vfe_bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id);
- vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
- vfe_set_cgc_override(vfe, output->wm_idx[0], 0);
+ ops->wm_frame_based(vfe, output->wm_idx[0], 0);
+ ops->bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0],
+ line->id);
+ ops->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
+ ops->set_cgc_override(vfe, output->wm_idx[0], 0);
spin_unlock_irqrestore(&vfe->output_lock, flags);
} else {
for (i = 0; i < output->wm_num; i++) {
- vfe_wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
- vfe_set_cgc_override(vfe, output->wm_idx[i], 0);
+ ops->wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
+ ops->set_cgc_override(vfe, output->wm_idx[i], 0);
}
- vfe_enable_irq_pix_line(vfe, 0, line->id, 0);
- vfe_set_module_cfg(vfe, 0);
- vfe_set_xbar_cfg(vfe, output, 0);
+ ops->enable_irq_pix_line(vfe, 0, line->id, 0);
+ ops->set_module_cfg(vfe, 0);
+ ops->set_realign_cfg(vfe, line, 0);
+ ops->set_xbar_cfg(vfe, output, 0);
- vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY);
+ ops->set_camif_cmd(vfe, 0);
spin_unlock_irqrestore(&vfe->output_lock, flags);
- vfe_camif_wait_for_stop(vfe);
+ ops->camif_wait_for_stop(vfe, vfe->camss->dev);
}
return 0;
@@ -1532,11 +832,13 @@ static int vfe_enable(struct vfe_line *line)
mutex_lock(&vfe->stream_lock);
if (!vfe->stream_count) {
- vfe_enable_irq_common(vfe);
+ vfe->ops->enable_irq_common(vfe);
+
+ vfe->ops->bus_enable_wr_if(vfe, 1);
- vfe_bus_enable_wr_if(vfe, 1);
+ vfe->ops->set_qos(vfe);
- vfe_set_qos(vfe);
+ vfe->ops->set_ds(vfe);
}
vfe->stream_count++;
@@ -1563,7 +865,7 @@ error_get_output:
mutex_lock(&vfe->stream_lock);
if (vfe->stream_count == 1)
- vfe_bus_enable_wr_if(vfe, 0);
+ vfe->ops->bus_enable_wr_if(vfe, 0);
vfe->stream_count--;
@@ -1589,7 +891,7 @@ static int vfe_disable(struct vfe_line *line)
mutex_lock(&vfe->stream_lock);
if (vfe->stream_count == 1)
- vfe_bus_enable_wr_if(vfe, 0);
+ vfe->ops->bus_enable_wr_if(vfe, 0);
vfe->stream_count--;
@@ -1628,7 +930,7 @@ static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
unsigned long flags;
spin_lock_irqsave(&vfe->output_lock, flags);
- vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
+ vfe->ops->reg_update_clear(vfe, line_id);
output = &vfe->line[line_id].output;
@@ -1698,19 +1000,19 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
u64 ts = ktime_get_ns();
unsigned int i;
- active_index = vfe_wm_get_ping_pong_status(vfe, wm);
+ active_index = vfe->ops->wm_get_ping_pong_status(vfe, wm);
spin_lock_irqsave(&vfe->output_lock, flags);
if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Received wm done for unmapped index\n");
goto out_unlock;
}
output = &vfe->line[vfe->wm_output_map[wm]].output;
if (output->active_buf == active_index) {
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Active buffer mismatch!\n");
goto out_unlock;
}
@@ -1718,7 +1020,7 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
ready_buf = output->buf[!active_index];
if (!ready_buf) {
- dev_err_ratelimited(to_device(vfe),
+ dev_err_ratelimited(vfe->camss->dev,
"Missing ready buf %d %d!\n",
!active_index, output->state);
goto out_unlock;
@@ -1740,12 +1042,12 @@ static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
if (active_index)
for (i = 0; i < output->wm_num; i++)
- vfe_wm_set_ping_addr(vfe, output->wm_idx[i],
- new_addr[i]);
+ vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i],
+ new_addr[i]);
else
for (i = 0; i < output->wm_num; i++)
- vfe_wm_set_pong_addr(vfe, output->wm_idx[i],
- new_addr[i]);
+ vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i],
+ new_addr[i]);
spin_unlock_irqrestore(&vfe->output_lock, flags);
@@ -1776,67 +1078,15 @@ static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp)
}
}
-/*
- * vfe_isr - ISPIF module interrupt handler
- * @irq: Interrupt line
- * @dev: VFE device
- *
- * Return IRQ_HANDLED on success
- */
-static irqreturn_t vfe_isr(int irq, void *dev)
+static inline void vfe_isr_reset_ack(struct vfe_device *vfe)
{
- struct vfe_device *vfe = dev;
- u32 value0, value1;
- u32 violation;
- int i, j;
-
- value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
- value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
-
- writel_relaxed(value0, vfe->base + VFE_0_IRQ_CLEAR_0);
- writel_relaxed(value1, vfe->base + VFE_0_IRQ_CLEAR_1);
-
- wmb();
- writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
-
- if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
- complete(&vfe->reset_complete);
-
- if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) {
- violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
- dev_err_ratelimited(to_device(vfe),
- "VFE: violation = 0x%08x\n", violation);
- }
-
- if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) {
- complete(&vfe->halt_complete);
- writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
- }
-
- for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
- if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
- vfe_isr_reg_update(vfe, i);
-
- if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
- vfe_isr_sof(vfe, VFE_LINE_PIX);
-
- for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
- if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
- vfe_isr_sof(vfe, i);
-
- for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
- if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
- vfe_isr_comp_done(vfe, i);
- for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
- if (vfe->wm_output_map[j] == VFE_LINE_PIX)
- value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
- }
-
- for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
- if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
- vfe_isr_wm_done(vfe, i);
+ complete(&vfe->reset_complete);
+}
- return IRQ_HANDLED;
+static inline void vfe_isr_halt_ack(struct vfe_device *vfe)
+{
+ complete(&vfe->halt_complete);
+ vfe->ops->halt_clear(vfe);
}
/*
@@ -1847,7 +1097,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
*/
static int vfe_set_clock_rates(struct vfe_device *vfe)
{
- struct device *dev = to_device(vfe);
+ struct device *dev = vfe->camss->dev;
u32 pixel_clock[MSM_VFE_LINE_NUM];
int i, j;
int ret;
@@ -1862,7 +1112,8 @@ static int vfe_set_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (!strcmp(clock->name, "camss_vfe_vfe")) {
+ if (!strcmp(clock->name, "vfe0") ||
+ !strcmp(clock->name, "vfe1")) {
u64 min_rate = 0;
long rate;
@@ -1873,8 +1124,11 @@ static int vfe_set_clock_rates(struct vfe_device *vfe)
if (j == VFE_LINE_PIX) {
tmp = pixel_clock[j];
} else {
- bpp = vfe_get_bpp(vfe->line[j].
- fmt[MSM_VFE_PAD_SINK].code);
+ struct vfe_line *l = &vfe->line[j];
+
+ bpp = vfe_get_bpp(l->formats,
+ l->nformats,
+ l->fmt[MSM_VFE_PAD_SINK].code);
tmp = pixel_clock[j] * bpp / 64;
}
@@ -1940,7 +1194,8 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (!strcmp(clock->name, "camss_vfe_vfe")) {
+ if (!strcmp(clock->name, "vfe0") ||
+ !strcmp(clock->name, "vfe1")) {
u64 min_rate = 0;
unsigned long rate;
@@ -1951,8 +1206,11 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
if (j == VFE_LINE_PIX) {
tmp = pixel_clock[j];
} else {
- bpp = vfe_get_bpp(vfe->line[j].
- fmt[MSM_VFE_PAD_SINK].code);
+ struct vfe_line *l = &vfe->line[j];
+
+ bpp = vfe_get_bpp(l->formats,
+ l->nformats,
+ l->fmt[MSM_VFE_PAD_SINK].code);
tmp = pixel_clock[j] * bpp / 64;
}
@@ -1984,12 +1242,20 @@ static int vfe_get(struct vfe_device *vfe)
mutex_lock(&vfe->power_lock);
if (vfe->power_count == 0) {
+ ret = camss_pm_domain_on(vfe->camss, vfe->id);
+ if (ret < 0)
+ goto error_pm_domain;
+
+ ret = pm_runtime_get_sync(vfe->camss->dev);
+ if (ret < 0)
+ goto error_pm_runtime_get;
+
ret = vfe_set_clock_rates(vfe);
if (ret < 0)
goto error_clocks;
ret = camss_enable_clocks(vfe->nclocks, vfe->clock,
- to_device(vfe));
+ vfe->camss->dev);
if (ret < 0)
goto error_clocks;
@@ -2015,6 +1281,12 @@ error_reset:
camss_disable_clocks(vfe->nclocks, vfe->clock);
error_clocks:
+ pm_runtime_put_sync(vfe->camss->dev);
+
+error_pm_runtime_get:
+ camss_pm_domain_off(vfe->camss, vfe->id);
+
+error_pm_domain:
mutex_unlock(&vfe->power_lock);
return ret;
@@ -2029,7 +1301,7 @@ static void vfe_put(struct vfe_device *vfe)
mutex_lock(&vfe->power_lock);
if (vfe->power_count == 0) {
- dev_err(to_device(vfe), "vfe power off on power_count == 0\n");
+ dev_err(vfe->camss->dev, "vfe power off on power_count == 0\n");
goto exit;
} else if (vfe->power_count == 1) {
if (vfe->was_streaming) {
@@ -2037,6 +1309,8 @@ static void vfe_put(struct vfe_device *vfe)
vfe_halt(vfe);
}
camss_disable_clocks(vfe->nclocks, vfe->clock);
+ pm_runtime_put_sync(vfe->camss->dev);
+ camss_pm_domain_off(vfe->camss, vfe->id);
}
vfe->power_count--;
@@ -2046,26 +1320,6 @@ exit:
}
/*
- * vfe_video_pad_to_line - Get pointer to VFE line by media pad
- * @pad: Media pad
- *
- * Return pointer to vfe line structure
- */
-static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad)
-{
- struct media_pad *vfe_pad;
- struct v4l2_subdev *subdev;
-
- vfe_pad = media_entity_remote_pad(pad);
- if (vfe_pad == NULL)
- return NULL;
-
- subdev = media_entity_to_v4l2_subdev(vfe_pad->entity);
-
- return container_of(subdev, struct vfe_line, subdev);
-}
-
-/*
* vfe_queue_buffer - Add empty buffer
* @vid: Video device structure
* @buf: Buffer to be enqueued
@@ -2078,16 +1332,11 @@ static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad)
static int vfe_queue_buffer(struct camss_video *vid,
struct camss_buffer *buf)
{
- struct vfe_device *vfe = &vid->camss->vfe;
- struct vfe_line *line;
+ struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
+ struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
- line = vfe_video_pad_to_line(&vid->pad);
- if (!line) {
- dev_err(to_device(vfe), "Can not queue buffer\n");
- return -1;
- }
output = &line->output;
spin_lock_irqsave(&vfe->output_lock, flags);
@@ -2112,16 +1361,11 @@ static int vfe_queue_buffer(struct camss_video *vid,
static int vfe_flush_buffers(struct camss_video *vid,
enum vb2_buffer_state state)
{
- struct vfe_device *vfe = &vid->camss->vfe;
- struct vfe_line *line;
+ struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
+ struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
- line = vfe_video_pad_to_line(&vid->pad);
- if (!line) {
- dev_err(to_device(vfe), "Can not flush buffers\n");
- return -1;
- }
output = &line->output;
spin_lock_irqsave(&vfe->output_lock, flags);
@@ -2158,15 +1402,11 @@ static int vfe_set_power(struct v4l2_subdev *sd, int on)
int ret;
if (on) {
- u32 hw_version;
-
ret = vfe_get(vfe);
if (ret < 0)
return ret;
- hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_dbg(to_device(vfe),
- "VFE HW Version = 0x%08x\n", hw_version);
+ vfe->ops->hw_version_read(vfe, vfe->camss->dev);
} else {
vfe_put(vfe);
}
@@ -2192,12 +1432,12 @@ static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
if (enable) {
ret = vfe_enable(line);
if (ret < 0)
- dev_err(to_device(vfe),
+ dev_err(vfe->camss->dev,
"Failed to enable vfe outputs\n");
} else {
ret = vfe_disable(line);
if (ret < 0)
- dev_err(to_device(vfe),
+ dev_err(vfe->camss->dev,
"Failed to disable vfe outputs\n");
}
@@ -2286,12 +1526,12 @@ static void vfe_try_format(struct vfe_line *line,
case MSM_VFE_PAD_SINK:
/* Set format on sink pad */
- for (i = 0; i < ARRAY_SIZE(vfe_formats); i++)
- if (fmt->code == vfe_formats[i].code)
+ for (i = 0; i < line->nformats; i++)
+ if (fmt->code == line->formats[i].code)
break;
/* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(vfe_formats))
+ if (i >= line->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
@@ -2304,11 +1544,11 @@ static void vfe_try_format(struct vfe_line *line,
case MSM_VFE_PAD_SRC:
/* Set and return a format same as sink pad */
-
code = fmt->code;
- *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
- which);
+ *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
+
+ fmt->code = vfe_src_pad_code(line, fmt->code, 0, code);
if (line->id == VFE_LINE_PIX) {
struct v4l2_rect *rect;
@@ -2317,34 +1557,6 @@ static void vfe_try_format(struct vfe_line *line,
fmt->width = rect->width;
fmt->height = rect->height;
-
- switch (fmt->code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- if (code == MEDIA_BUS_FMT_YUYV8_1_5X8)
- fmt->code = MEDIA_BUS_FMT_YUYV8_1_5X8;
- else
- fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- if (code == MEDIA_BUS_FMT_YVYU8_1_5X8)
- fmt->code = MEDIA_BUS_FMT_YVYU8_1_5X8;
- else
- fmt->code = MEDIA_BUS_FMT_YVYU8_2X8;
- break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- default:
- if (code == MEDIA_BUS_FMT_UYVY8_1_5X8)
- fmt->code = MEDIA_BUS_FMT_UYVY8_1_5X8;
- else
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- if (code == MEDIA_BUS_FMT_VYUY8_1_5X8)
- fmt->code = MEDIA_BUS_FMT_VYUY8_1_5X8;
- else
- fmt->code = MEDIA_BUS_FMT_VYUY8_2X8;
- break;
- }
}
break;
@@ -2448,21 +1660,22 @@ static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_VFE_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(vfe_formats))
+ if (code->index >= line->nformats)
return -EINVAL;
- code->code = vfe_formats[code->index].code;
+ code->code = line->formats[code->index].code;
} else {
- if (code->index > 0)
- return -EINVAL;
+ struct v4l2_mbus_framefmt *sink_fmt;
- format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
- code->which);
+ sink_fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
+ code->which);
- code->code = format->code;
+ code->code = vfe_src_pad_code(line, sink_fmt->code,
+ code->index, 0);
+ if (!code->code)
+ return -EINVAL;
}
return 0;
@@ -2751,15 +1964,29 @@ static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
*
* Return 0 on success or a negative error code otherwise
*/
-int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res)
+int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
+ const struct resources *res, u8 id)
{
- struct device *dev = to_device(vfe);
+ struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *r;
- struct camss *camss = to_camss(vfe);
int i, j;
int ret;
+ vfe->isr_ops.reset_ack = vfe_isr_reset_ack;
+ vfe->isr_ops.halt_ack = vfe_isr_halt_ack;
+ vfe->isr_ops.reg_update = vfe_isr_reg_update;
+ vfe->isr_ops.sof = vfe_isr_sof;
+ vfe->isr_ops.comp_done = vfe_isr_comp_done;
+ vfe->isr_ops.wm_done = vfe_isr_wm_done;
+
+ if (camss->version == CAMSS_8x16)
+ vfe->ops = &vfe_ops_4_1;
+ else if (camss->version == CAMSS_8x96)
+ vfe->ops = &vfe_ops_4_7;
+ else
+ return -EINVAL;
+
/* Memory */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
@@ -2781,7 +2008,7 @@ int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res)
vfe->irq = r->start;
snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d",
dev_name(dev), MSM_VFE_NAME, vfe->id);
- ret = devm_request_irq(dev, vfe->irq, vfe_isr,
+ ret = devm_request_irq(dev, vfe->irq, vfe->ops->isr,
IRQF_TRIGGER_RISING, vfe->irq_name, vfe);
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
@@ -2836,16 +2063,38 @@ int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res)
spin_lock_init(&vfe->output_lock);
- vfe->id = 0;
+ vfe->camss = camss;
+ vfe->id = id;
vfe->reg_update = 0;
for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
- vfe->line[i].video_out.type =
- V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- vfe->line[i].video_out.camss = camss;
- vfe->line[i].id = i;
- init_completion(&vfe->line[i].output.sof);
- init_completion(&vfe->line[i].output.reg_update);
+ struct vfe_line *l = &vfe->line[i];
+
+ l->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ l->video_out.camss = camss;
+ l->id = i;
+ init_completion(&l->output.sof);
+ init_completion(&l->output.reg_update);
+
+ if (camss->version == CAMSS_8x16) {
+ if (i == VFE_LINE_PIX) {
+ l->formats = formats_pix_8x16;
+ l->nformats = ARRAY_SIZE(formats_pix_8x16);
+ } else {
+ l->formats = formats_rdi_8x16;
+ l->nformats = ARRAY_SIZE(formats_rdi_8x16);
+ }
+ } else if (camss->version == CAMSS_8x96) {
+ if (i == VFE_LINE_PIX) {
+ l->formats = formats_pix_8x96;
+ l->nformats = ARRAY_SIZE(formats_pix_8x96);
+ } else {
+ l->formats = formats_rdi_8x96;
+ l->nformats = ARRAY_SIZE(formats_rdi_8x96);
+ }
+ } else {
+ return -EINVAL;
+ }
}
init_completion(&vfe->reset_complete);
@@ -2968,7 +2217,7 @@ void msm_vfe_stop_streaming(struct vfe_device *vfe)
int msm_vfe_register_entities(struct vfe_device *vfe,
struct v4l2_device *v4l2_dev)
{
- struct device *dev = to_device(vfe);
+ struct device *dev = vfe->camss->dev;
struct v4l2_subdev *sd;
struct media_pad *pads;
struct camss_video *video_out;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
new file mode 100644
index 000000000000..0d10071ae881
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss-vfe.h
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
+ *
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2018 Linaro Ltd.
+ */
+#ifndef QC_MSM_CAMSS_VFE_H
+#define QC_MSM_CAMSS_VFE_H
+
+#include <linux/clk.h>
+#include <linux/spinlock_types.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "camss-video.h"
+
+#define MSM_VFE_PAD_SINK 0
+#define MSM_VFE_PAD_SRC 1
+#define MSM_VFE_PADS_NUM 2
+
+#define MSM_VFE_LINE_NUM 4
+#define MSM_VFE_IMAGE_MASTERS_NUM 7
+#define MSM_VFE_COMPOSITE_IRQ_NUM 4
+
+enum vfe_output_state {
+ VFE_OUTPUT_OFF,
+ VFE_OUTPUT_RESERVED,
+ VFE_OUTPUT_SINGLE,
+ VFE_OUTPUT_CONTINUOUS,
+ VFE_OUTPUT_IDLE,
+ VFE_OUTPUT_STOPPING
+};
+
+enum vfe_line_id {
+ VFE_LINE_NONE = -1,
+ VFE_LINE_RDI0 = 0,
+ VFE_LINE_RDI1 = 1,
+ VFE_LINE_RDI2 = 2,
+ VFE_LINE_PIX = 3
+};
+
+struct vfe_output {
+ u8 wm_num;
+ u8 wm_idx[3];
+
+ int active_buf;
+ struct camss_buffer *buf[2];
+ struct camss_buffer *last_buffer;
+ struct list_head pending_bufs;
+
+ unsigned int drop_update_idx;
+
+ enum vfe_output_state state;
+ unsigned int sequence;
+ int wait_sof;
+ int wait_reg_update;
+ struct completion sof;
+ struct completion reg_update;
+};
+
+struct vfe_line {
+ enum vfe_line_id id;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MSM_VFE_PADS_NUM];
+ struct v4l2_mbus_framefmt fmt[MSM_VFE_PADS_NUM];
+ struct v4l2_rect compose;
+ struct v4l2_rect crop;
+ struct camss_video video_out;
+ struct vfe_output output;
+ const struct vfe_format *formats;
+ unsigned int nformats;
+};
+
+struct vfe_device;
+
+struct vfe_hw_ops {
+ void (*hw_version_read)(struct vfe_device *vfe, struct device *dev);
+ u16 (*get_ub_size)(u8 vfe_id);
+ void (*global_reset)(struct vfe_device *vfe);
+ void (*halt_request)(struct vfe_device *vfe);
+ void (*halt_clear)(struct vfe_device *vfe);
+ void (*wm_enable)(struct vfe_device *vfe, u8 wm, u8 enable);
+ void (*wm_frame_based)(struct vfe_device *vfe, u8 wm, u8 enable);
+ void (*wm_line_based)(struct vfe_device *vfe, u32 wm,
+ struct v4l2_pix_format_mplane *pix,
+ u8 plane, u32 enable);
+ void (*wm_set_framedrop_period)(struct vfe_device *vfe, u8 wm, u8 per);
+ void (*wm_set_framedrop_pattern)(struct vfe_device *vfe, u8 wm,
+ u32 pattern);
+ void (*wm_set_ub_cfg)(struct vfe_device *vfe, u8 wm, u16 offset,
+ u16 depth);
+ void (*bus_reload_wm)(struct vfe_device *vfe, u8 wm);
+ void (*wm_set_ping_addr)(struct vfe_device *vfe, u8 wm, u32 addr);
+ void (*wm_set_pong_addr)(struct vfe_device *vfe, u8 wm, u32 addr);
+ int (*wm_get_ping_pong_status)(struct vfe_device *vfe, u8 wm);
+ void (*bus_enable_wr_if)(struct vfe_device *vfe, u8 enable);
+ void (*bus_connect_wm_to_rdi)(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id);
+ void (*wm_set_subsample)(struct vfe_device *vfe, u8 wm);
+ void (*bus_disconnect_wm_from_rdi)(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id id);
+ void (*set_xbar_cfg)(struct vfe_device *vfe, struct vfe_output *output,
+ u8 enable);
+ void (*set_rdi_cid)(struct vfe_device *vfe, enum vfe_line_id id,
+ u8 cid);
+ void (*set_realign_cfg)(struct vfe_device *vfe, struct vfe_line *line,
+ u8 enable);
+ void (*reg_update)(struct vfe_device *vfe, enum vfe_line_id line_id);
+ void (*reg_update_clear)(struct vfe_device *vfe,
+ enum vfe_line_id line_id);
+ void (*enable_irq_wm_line)(struct vfe_device *vfe, u8 wm,
+ enum vfe_line_id line_id, u8 enable);
+ void (*enable_irq_pix_line)(struct vfe_device *vfe, u8 comp,
+ enum vfe_line_id line_id, u8 enable);
+ void (*enable_irq_common)(struct vfe_device *vfe);
+ void (*set_demux_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_scale_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_crop_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_clamp_cfg)(struct vfe_device *vfe);
+ void (*set_qos)(struct vfe_device *vfe);
+ void (*set_ds)(struct vfe_device *vfe);
+ void (*set_cgc_override)(struct vfe_device *vfe, u8 wm, u8 enable);
+ void (*set_camif_cfg)(struct vfe_device *vfe, struct vfe_line *line);
+ void (*set_camif_cmd)(struct vfe_device *vfe, u8 enable);
+ void (*set_module_cfg)(struct vfe_device *vfe, u8 enable);
+ int (*camif_wait_for_stop)(struct vfe_device *vfe, struct device *dev);
+ void (*isr_read)(struct vfe_device *vfe, u32 *value0, u32 *value1);
+ void (*violation_read)(struct vfe_device *vfe);
+ irqreturn_t (*isr)(int irq, void *dev);
+};
+
+struct vfe_isr_ops {
+ void (*reset_ack)(struct vfe_device *vfe);
+ void (*halt_ack)(struct vfe_device *vfe);
+ void (*reg_update)(struct vfe_device *vfe, enum vfe_line_id line_id);
+ void (*sof)(struct vfe_device *vfe, enum vfe_line_id line_id);
+ void (*comp_done)(struct vfe_device *vfe, u8 comp);
+ void (*wm_done)(struct vfe_device *vfe, u8 wm);
+};
+
+struct vfe_device {
+ struct camss *camss;
+ u8 id;
+ void __iomem *base;
+ u32 irq;
+ char irq_name[30];
+ struct camss_clock *clock;
+ int nclocks;
+ struct completion reset_complete;
+ struct completion halt_complete;
+ struct mutex power_lock;
+ int power_count;
+ struct mutex stream_lock;
+ int stream_count;
+ spinlock_t output_lock;
+ enum vfe_line_id wm_output_map[MSM_VFE_IMAGE_MASTERS_NUM];
+ struct vfe_line line[MSM_VFE_LINE_NUM];
+ u32 reg_update;
+ u8 was_streaming;
+ const struct vfe_hw_ops *ops;
+ struct vfe_isr_ops isr_ops;
+};
+
+struct resources;
+
+int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
+ const struct resources *res, u8 id);
+
+int msm_vfe_register_entities(struct vfe_device *vfe,
+ struct v4l2_device *v4l2_dev);
+
+void msm_vfe_unregister_entities(struct vfe_device *vfe);
+
+void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id);
+void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id);
+
+void msm_vfe_stop_streaming(struct vfe_device *vfe);
+
+extern const struct vfe_hw_ops vfe_ops_4_1;
+extern const struct vfe_hw_ops vfe_ops_4_7;
+
+#endif /* QC_MSM_CAMSS_VFE_H */
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index ffaa2849e0c1..c9bb0d023db4 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss-video.c
*
* Qualcomm MSM Camera Subsystem - V4L2 device node
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/slab.h>
#include <media/media-entity.h>
@@ -49,7 +41,7 @@ struct camss_format_info {
unsigned int bpp[3];
};
-static const struct camss_format_info formats_rdi[] = {
+static const struct camss_format_info formats_rdi_8x16[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
@@ -82,9 +74,60 @@ static const struct camss_format_info formats_rdi[] = {
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
};
-static const struct camss_format_info formats_pix[] = {
+static const struct camss_format_info formats_rdi_8x96[] = {
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_PIX_FMT_SBGGR10, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, V4L2_PIX_FMT_SBGGR14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, V4L2_PIX_FMT_SGBRG14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, V4L2_PIX_FMT_SGRBG14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, V4L2_PIX_FMT_SRGGB14P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 14 } },
+ { MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
+ { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, V4L2_PIX_FMT_Y10, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+};
+
+static const struct camss_format_info formats_pix_8x16[] = {
{ MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1,
@@ -119,6 +162,49 @@ static const struct camss_format_info formats_pix[] = {
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
};
+static const struct camss_format_info formats_pix_8x96[] = {
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
+ { { 1, 1 } }, { { 1, 2 } }, { 8 } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
+};
+
/* -----------------------------------------------------------------------------
* Helper functions
*/
@@ -798,11 +884,24 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
mutex_init(&video->lock);
- video->formats = formats_rdi;
- video->nformats = ARRAY_SIZE(formats_rdi);
- if (is_pix) {
- video->formats = formats_pix;
- video->nformats = ARRAY_SIZE(formats_pix);
+ if (video->camss->version == CAMSS_8x16) {
+ if (is_pix) {
+ video->formats = formats_pix_8x16;
+ video->nformats = ARRAY_SIZE(formats_pix_8x16);
+ } else {
+ video->formats = formats_rdi_8x16;
+ video->nformats = ARRAY_SIZE(formats_rdi_8x16);
+ }
+ } else if (video->camss->version == CAMSS_8x96) {
+ if (is_pix) {
+ video->formats = formats_pix_8x96;
+ video->nformats = ARRAY_SIZE(formats_pix_8x96);
+ } else {
+ video->formats = formats_rdi_8x96;
+ video->nformats = ARRAY_SIZE(formats_rdi_8x96);
+ }
+ } else {
+ goto error_video_register;
}
ret = msm_video_init_format(video);
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.h b/drivers/media/platform/qcom/camss/camss-video.h
index 38bd1f2eec54..aa35e8cc6fd5 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-video.h
+++ b/drivers/media/platform/qcom/camss/camss-video.h
@@ -1,19 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss-video.h
*
* Qualcomm MSM Camera Subsystem - V4L2 device node
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_VIDEO_H
#define QC_MSM_CAMSS_VIDEO_H
diff --git a/drivers/media/platform/qcom/camss-8x16/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 23fda6207a23..dcc0c30ef1b1 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -1,19 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* camss.c
*
* Qualcomm MSM Camera Subsystem - Core
*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/media-bus-format.h>
@@ -22,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -36,12 +30,11 @@
#define CAMSS_CLOCK_MARGIN_NUMERATOR 105
#define CAMSS_CLOCK_MARGIN_DENOMINATOR 100
-static const struct resources csiphy_res[] = {
+static const struct resources csiphy_res_8x16[] = {
/* CSIPHY0 */
{
.regulator = { NULL },
- .clock = { "camss_top_ahb", "ispif_ahb",
- "camss_ahb", "csiphy0_timer" },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
@@ -53,8 +46,7 @@ static const struct resources csiphy_res[] = {
/* CSIPHY1 */
{
.regulator = { NULL },
- .clock = { "camss_top_ahb", "ispif_ahb",
- "camss_ahb", "csiphy1_timer" },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
@@ -64,12 +56,11 @@ static const struct resources csiphy_res[] = {
}
};
-static const struct resources csid_res[] = {
+static const struct resources csid_res_8x16[] = {
/* CSID0 */
{
.regulator = { "vdda" },
- .clock = { "camss_top_ahb", "ispif_ahb",
- "csi0_ahb", "camss_ahb",
+ .clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
"csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
.clock_rate = { { 0 },
{ 0 },
@@ -86,8 +77,7 @@ static const struct resources csid_res[] = {
/* CSID1 */
{
.regulator = { "vdda" },
- .clock = { "camss_top_ahb", "ispif_ahb",
- "csi1_ahb", "camss_ahb",
+ .clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
"csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
.clock_rate = { { 0 },
{ 0 },
@@ -102,35 +92,195 @@ static const struct resources csid_res[] = {
},
};
-static const struct resources_ispif ispif_res = {
+static const struct resources_ispif ispif_res_8x16 = {
/* ISPIF */
- .clock = { "camss_top_ahb", "camss_ahb", "ispif_ahb",
+ .clock = { "top_ahb", "ahb", "ispif_ahb",
"csi0", "csi0_pix", "csi0_rdi",
"csi1", "csi1_pix", "csi1_rdi" },
- .clock_for_reset = { "camss_vfe_vfe", "camss_csi_vfe" },
+ .clock_for_reset = { "vfe0", "csi_vfe0" },
.reg = { "ispif", "csi_clk_mux" },
.interrupt = "ispif"
};
-static const struct resources vfe_res = {
+static const struct resources vfe_res_8x16[] = {
+ /* VFE0 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "vfe0", "csi_vfe0",
+ "vfe_ahb", "vfe_axi", "ahb" },
+ .clock_rate = { { 0 },
+ { 50000000, 80000000, 100000000, 160000000,
+ 177780000, 200000000, 266670000, 320000000,
+ 400000000, 465000000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" }
+ }
+};
+
+static const struct resources csiphy_res_8x96[] = {
+ /* CSIPHY0 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 } },
+ .reg = { "csiphy0", "csiphy0_clk_mux" },
+ .interrupt = { "csiphy0" }
+ },
+
+ /* CSIPHY1 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 } },
+ .reg = { "csiphy1", "csiphy1_clk_mux" },
+ .interrupt = { "csiphy1" }
+ },
+
+ /* CSIPHY2 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy2_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 } },
+ .reg = { "csiphy2", "csiphy2_clk_mux" },
+ .interrupt = { "csiphy2" }
+ }
+};
+
+static const struct resources csid_res_8x96[] = {
+ /* CSID0 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
+ "csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" }
+ },
+
+ /* CSID1 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
+ "csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" }
+ },
+
+ /* CSID2 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi2_ahb", "ahb",
+ "csi2", "csi2_phy", "csi2_pix", "csi2_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" }
+ },
+
+ /* CSID3 */
+ {
+ .regulator = { "vdda" },
+ .clock = { "top_ahb", "ispif_ahb", "csi3_ahb", "ahb",
+ "csi3", "csi3_phy", "csi3_pix", "csi3_rdi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 200000000, 266666667 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid3" },
+ .interrupt = { "csid3" }
+ }
+};
+
+static const struct resources_ispif ispif_res_8x96 = {
+ /* ISPIF */
+ .clock = { "top_ahb", "ahb", "ispif_ahb",
+ "csi0", "csi0_pix", "csi0_rdi",
+ "csi1", "csi1_pix", "csi1_rdi",
+ "csi2", "csi2_pix", "csi2_rdi",
+ "csi3", "csi3_pix", "csi3_rdi" },
+ .clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" },
+ .reg = { "ispif", "csi_clk_mux" },
+ .interrupt = "ispif"
+};
+
+static const struct resources vfe_res_8x96[] = {
/* VFE0 */
- .regulator = { NULL },
- .clock = { "camss_top_ahb", "camss_vfe_vfe", "camss_csi_vfe",
- "iface", "bus", "camss_ahb" },
- .clock_rate = { { 0 },
- { 50000000, 80000000, 100000000, 160000000,
- 177780000, 200000000, 266670000, 320000000,
- 400000000, 465000000 },
- { 0 },
- { 0 },
- { 0 },
- { 0 },
- { 0 },
- { 0 },
- { 0 } },
- .reg = { "vfe0" },
- .interrupt = { "vfe0" }
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ahb", "vfe0", "csi_vfe0", "vfe_ahb",
+ "vfe0_ahb", "vfe_axi", "vfe0_stream"},
+ .clock_rate = { { 0 },
+ { 0 },
+ { 75000000, 100000000, 300000000,
+ 320000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" }
+ },
+
+ /* VFE1 */
+ {
+ .regulator = { NULL },
+ .clock = { "top_ahb", "ahb", "vfe1", "csi_vfe1", "vfe_ahb",
+ "vfe1_ahb", "vfe_axi", "vfe1_stream"},
+ .clock_rate = { { 0 },
+ { 0 },
+ { 75000000, 100000000, 300000000,
+ 320000000, 480000000, 600000000 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 } },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" }
+ }
};
/*
@@ -245,6 +395,26 @@ int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock)
return 0;
}
+int camss_pm_domain_on(struct camss *camss, int id)
+{
+ if (camss->version == CAMSS_8x96) {
+ camss->genpd_link[id] = device_link_add(camss->dev,
+ camss->genpd[id], DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+
+ if (!camss->genpd_link[id])
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void camss_pm_domain_off(struct camss *camss, int id)
+{
+ if (camss->version == CAMSS_8x96)
+ device_link_del(camss->genpd_link[id]);
+}
+
/*
* camss_of_parse_endpoint_node - Parse port endpoint node
* @dev: Device
@@ -304,6 +474,7 @@ static int camss_of_parse_ports(struct device *dev,
if (of_device_is_available(node))
notifier->num_subdevs++;
+ of_node_put(node);
size = sizeof(*notifier->subdevs) * notifier->num_subdevs;
notifier->subdevs = devm_kzalloc(dev, size, GFP_KERNEL);
if (!notifier->subdevs) {
@@ -334,16 +505,16 @@ static int camss_of_parse_ports(struct device *dev,
}
remote = of_graph_get_remote_port_parent(node);
- of_node_put(node);
-
if (!remote) {
dev_err(dev, "Cannot get remote parent\n");
+ of_node_put(node);
return -EINVAL;
}
csd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
csd->asd.match.fwnode = of_fwnode_handle(remote);
}
+ of_node_put(node);
return notifier->num_subdevs;
}
@@ -356,11 +527,29 @@ static int camss_of_parse_ports(struct device *dev,
*/
static int camss_init_subdevices(struct camss *camss)
{
+ const struct resources *csiphy_res;
+ const struct resources *csid_res;
+ const struct resources_ispif *ispif_res;
+ const struct resources *vfe_res;
unsigned int i;
int ret;
- for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) {
- ret = msm_csiphy_subdev_init(&camss->csiphy[i],
+ if (camss->version == CAMSS_8x16) {
+ csiphy_res = csiphy_res_8x16;
+ csid_res = csid_res_8x16;
+ ispif_res = &ispif_res_8x16;
+ vfe_res = vfe_res_8x16;
+ } else if (camss->version == CAMSS_8x96) {
+ csiphy_res = csiphy_res_8x96;
+ csid_res = csid_res_8x96;
+ ispif_res = &ispif_res_8x96;
+ vfe_res = vfe_res_8x96;
+ } else {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < camss->csiphy_num; i++) {
+ ret = msm_csiphy_subdev_init(camss, &camss->csiphy[i],
&csiphy_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
@@ -370,8 +559,8 @@ static int camss_init_subdevices(struct camss *camss)
}
}
- for (i = 0; i < ARRAY_SIZE(camss->csid); i++) {
- ret = msm_csid_subdev_init(&camss->csid[i],
+ for (i = 0; i < camss->csid_num; i++) {
+ ret = msm_csid_subdev_init(camss, &camss->csid[i],
&csid_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
@@ -381,17 +570,21 @@ static int camss_init_subdevices(struct camss *camss)
}
}
- ret = msm_ispif_subdev_init(&camss->ispif, &ispif_res);
+ ret = msm_ispif_subdev_init(&camss->ispif, ispif_res);
if (ret < 0) {
dev_err(camss->dev, "Failed to init ispif sub-device: %d\n",
ret);
return ret;
}
- ret = msm_vfe_subdev_init(&camss->vfe, &vfe_res);
- if (ret < 0) {
- dev_err(camss->dev, "Fail to init vfe sub-device: %d\n", ret);
- return ret;
+ for (i = 0; i < camss->vfe_num; i++) {
+ ret = msm_vfe_subdev_init(camss, &camss->vfe[i],
+ &vfe_res[i], i);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Fail to init vfe%d sub-device: %d\n", i, ret);
+ return ret;
+ }
}
return 0;
@@ -405,10 +598,10 @@ static int camss_init_subdevices(struct camss *camss)
*/
static int camss_register_entities(struct camss *camss)
{
- int i, j;
+ int i, j, k;
int ret;
- for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) {
+ for (i = 0; i < camss->csiphy_num; i++) {
ret = msm_csiphy_register_entity(&camss->csiphy[i],
&camss->v4l2_dev);
if (ret < 0) {
@@ -419,7 +612,7 @@ static int camss_register_entities(struct camss *camss)
}
}
- for (i = 0; i < ARRAY_SIZE(camss->csid); i++) {
+ for (i = 0; i < camss->csid_num; i++) {
ret = msm_csid_register_entity(&camss->csid[i],
&camss->v4l2_dev);
if (ret < 0) {
@@ -437,15 +630,19 @@ static int camss_register_entities(struct camss *camss)
goto err_reg_ispif;
}
- ret = msm_vfe_register_entities(&camss->vfe, &camss->v4l2_dev);
- if (ret < 0) {
- dev_err(camss->dev, "Failed to register vfe entities: %d\n",
- ret);
- goto err_reg_vfe;
+ for (i = 0; i < camss->vfe_num; i++) {
+ ret = msm_vfe_register_entities(&camss->vfe[i],
+ &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to register vfe%d entities: %d\n",
+ i, ret);
+ goto err_reg_vfe;
+ }
}
- for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) {
- for (j = 0; j < ARRAY_SIZE(camss->csid); j++) {
+ for (i = 0; i < camss->csiphy_num; i++) {
+ for (j = 0; j < camss->csid_num; j++) {
ret = media_create_pad_link(
&camss->csiphy[i].subdev.entity,
MSM_CSIPHY_PAD_SRC,
@@ -463,8 +660,8 @@ static int camss_register_entities(struct camss *camss)
}
}
- for (i = 0; i < ARRAY_SIZE(camss->csid); i++) {
- for (j = 0; j < ARRAY_SIZE(camss->ispif.line); j++) {
+ for (i = 0; i < camss->csid_num; i++) {
+ for (j = 0; j < camss->ispif.line_num; j++) {
ret = media_create_pad_link(
&camss->csid[i].subdev.entity,
MSM_CSID_PAD_SRC,
@@ -482,39 +679,42 @@ static int camss_register_entities(struct camss *camss)
}
}
- for (i = 0; i < ARRAY_SIZE(camss->ispif.line); i++) {
- for (j = 0; j < ARRAY_SIZE(camss->vfe.line); j++) {
- ret = media_create_pad_link(
- &camss->ispif.line[i].subdev.entity,
- MSM_ISPIF_PAD_SRC,
- &camss->vfe.line[j].subdev.entity,
- MSM_VFE_PAD_SINK,
- 0);
- if (ret < 0) {
- dev_err(camss->dev,
- "Failed to link %s->%s entities: %d\n",
- camss->ispif.line[i].subdev.entity.name,
- camss->vfe.line[j].subdev.entity.name,
- ret);
- goto err_link;
+ for (i = 0; i < camss->ispif.line_num; i++)
+ for (k = 0; k < camss->vfe_num; k++)
+ for (j = 0; j < ARRAY_SIZE(camss->vfe[k].line); j++) {
+ ret = media_create_pad_link(
+ &camss->ispif.line[i].subdev.entity,
+ MSM_ISPIF_PAD_SRC,
+ &camss->vfe[k].line[j].subdev.entity,
+ MSM_VFE_PAD_SINK,
+ 0);
+ if (ret < 0) {
+ dev_err(camss->dev,
+ "Failed to link %s->%s entities: %d\n",
+ camss->ispif.line[i].subdev.entity.name,
+ camss->vfe[k].line[j].subdev.entity.name,
+ ret);
+ goto err_link;
+ }
}
- }
- }
return 0;
err_link:
- msm_vfe_unregister_entities(&camss->vfe);
+ i = camss->vfe_num;
err_reg_vfe:
+ for (i--; i >= 0; i--)
+ msm_vfe_unregister_entities(&camss->vfe[i]);
+
msm_ispif_unregister_entities(&camss->ispif);
err_reg_ispif:
- i = ARRAY_SIZE(camss->csid);
+ i = camss->csid_num;
err_reg_csid:
for (i--; i >= 0; i--)
msm_csid_unregister_entity(&camss->csid[i]);
- i = ARRAY_SIZE(camss->csiphy);
+ i = camss->csiphy_num;
err_reg_csiphy:
for (i--; i >= 0; i--)
msm_csiphy_unregister_entity(&camss->csiphy[i]);
@@ -532,14 +732,16 @@ static void camss_unregister_entities(struct camss *camss)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++)
+ for (i = 0; i < camss->csiphy_num; i++)
msm_csiphy_unregister_entity(&camss->csiphy[i]);
- for (i = 0; i < ARRAY_SIZE(camss->csid); i++)
+ for (i = 0; i < camss->csid_num; i++)
msm_csid_unregister_entity(&camss->csid[i]);
msm_ispif_unregister_entities(&camss->ispif);
- msm_vfe_unregister_entities(&camss->vfe);
+
+ for (i = 0; i < camss->vfe_num; i++)
+ msm_vfe_unregister_entities(&camss->vfe[i]);
}
static int camss_subdev_notifier_bound(struct v4l2_async_notifier *async,
@@ -631,6 +833,35 @@ static int camss_probe(struct platform_device *pdev)
camss->dev = dev;
platform_set_drvdata(pdev, camss);
+ if (of_device_is_compatible(dev->of_node, "qcom,msm8916-camss")) {
+ camss->version = CAMSS_8x16;
+ camss->csiphy_num = 2;
+ camss->csid_num = 2;
+ camss->vfe_num = 1;
+ } else if (of_device_is_compatible(dev->of_node,
+ "qcom,msm8996-camss")) {
+ camss->version = CAMSS_8x96;
+ camss->csiphy_num = 3;
+ camss->csid_num = 4;
+ camss->vfe_num = 2;
+ } else {
+ return -EINVAL;
+ }
+
+ camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy),
+ GFP_KERNEL);
+ if (!camss->csiphy)
+ return -ENOMEM;
+
+ camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid),
+ GFP_KERNEL);
+ if (!camss->csid)
+ return -ENOMEM;
+
+ camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL);
+ if (!camss->vfe)
+ return -ENOMEM;
+
ret = camss_of_parse_ports(dev, &camss->notifier);
if (ret < 0)
return ret;
@@ -687,6 +918,23 @@ static int camss_probe(struct platform_device *pdev)
}
}
+ if (camss->version == CAMSS_8x96) {
+ camss->genpd[PM_DOMAIN_VFE0] = dev_pm_domain_attach_by_id(
+ camss->dev, PM_DOMAIN_VFE0);
+ if (IS_ERR(camss->genpd[PM_DOMAIN_VFE0]))
+ return PTR_ERR(camss->genpd[PM_DOMAIN_VFE0]);
+
+ camss->genpd[PM_DOMAIN_VFE1] = dev_pm_domain_attach_by_id(
+ camss->dev, PM_DOMAIN_VFE1);
+ if (IS_ERR(camss->genpd[PM_DOMAIN_VFE1])) {
+ dev_pm_domain_detach(camss->genpd[PM_DOMAIN_VFE0],
+ true);
+ return PTR_ERR(camss->genpd[PM_DOMAIN_VFE1]);
+ }
+ }
+
+ pm_runtime_enable(dev);
+
return 0;
err_register_subdevs:
@@ -703,6 +951,13 @@ void camss_delete(struct camss *camss)
media_device_unregister(&camss->media_dev);
media_device_cleanup(&camss->media_dev);
+ pm_runtime_disable(camss->dev);
+
+ if (camss->version == CAMSS_8x96) {
+ dev_pm_domain_detach(camss->genpd[PM_DOMAIN_VFE0], true);
+ dev_pm_domain_detach(camss->genpd[PM_DOMAIN_VFE1], true);
+ }
+
kfree(camss);
}
@@ -714,9 +969,12 @@ void camss_delete(struct camss *camss)
*/
static int camss_remove(struct platform_device *pdev)
{
+ unsigned int i;
+
struct camss *camss = platform_get_drvdata(pdev);
- msm_vfe_stop_streaming(&camss->vfe);
+ for (i = 0; i < camss->vfe_num; i++)
+ msm_vfe_stop_streaming(&camss->vfe[i]);
v4l2_async_notifier_unregister(&camss->notifier);
camss_unregister_entities(camss);
@@ -729,17 +987,35 @@ static int camss_remove(struct platform_device *pdev)
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss" },
+ { .compatible = "qcom,msm8996-camss" },
{ }
};
MODULE_DEVICE_TABLE(of, camss_dt_match);
+static int camss_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int camss_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops camss_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(camss_runtime_suspend, camss_runtime_resume, NULL)
+};
+
static struct platform_driver qcom_camss_driver = {
.probe = camss_probe,
.remove = camss_remove,
.driver = {
.name = "qcom-camss",
.of_match_table = camss_dt_match,
+ .pm = &camss_pm_ops,
},
};
diff --git a/drivers/media/platform/qcom/camss-8x16/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 4ad223443e4b..418996d8dad8 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -1,23 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* camss.h
*
* Qualcomm MSM Camera Subsystem - Core
*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015-2017 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2015-2018 Linaro Ltd.
*/
#ifndef QC_MSM_CAMSS_H
#define QC_MSM_CAMSS_H
+#include <linux/device.h>
#include <linux/types.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
@@ -31,9 +24,6 @@
#include "camss-ispif.h"
#include "camss-vfe.h"
-#define CAMSS_CSID_NUM 2
-#define CAMSS_CSIPHY_NUM 2
-
#define to_camss(ptr_module) \
container_of(ptr_module, struct camss, ptr_module)
@@ -50,7 +40,7 @@
#define to_device_index(ptr_module, index) \
(to_camss_index(ptr_module, index)->dev)
-#define CAMSS_RES_MAX 15
+#define CAMSS_RES_MAX 17
struct resources {
char *regulator[CAMSS_RES_MAX];
@@ -67,16 +57,33 @@ struct resources_ispif {
char *interrupt;
};
+enum pm_domain {
+ PM_DOMAIN_VFE0,
+ PM_DOMAIN_VFE1,
+ PM_DOMAIN_COUNT
+};
+
+enum camss_version {
+ CAMSS_8x16,
+ CAMSS_8x96,
+};
+
struct camss {
+ enum camss_version version;
struct v4l2_device v4l2_dev;
struct v4l2_async_notifier notifier;
struct media_device media_dev;
struct device *dev;
- struct csiphy_device csiphy[CAMSS_CSIPHY_NUM];
- struct csid_device csid[CAMSS_CSID_NUM];
+ int csiphy_num;
+ struct csiphy_device *csiphy;
+ int csid_num;
+ struct csid_device *csid;
struct ispif_device ispif;
- struct vfe_device vfe;
+ int vfe_num;
+ struct vfe_device *vfe;
atomic_t ref_count;
+ struct device *genpd[PM_DOMAIN_COUNT];
+ struct device_link *genpd_link[PM_DOMAIN_COUNT];
};
struct camss_camera_interface {
@@ -101,6 +108,8 @@ int camss_enable_clocks(int nclocks, struct camss_clock *clock,
struct device *dev);
void camss_disable_clocks(int nclocks, struct camss_clock *clock);
int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock);
+int camss_pm_domain_on(struct camss *camss, int id);
+void camss_pm_domain_off(struct camss *camss, int id);
void camss_delete(struct camss *camss);
#endif /* QC_MSM_CAMSS_H */
diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile
index bfd4edf7c83f..b44b11b03e12 100644
--- a/drivers/media/platform/qcom/venus/Makefile
+++ b/drivers/media/platform/qcom/venus/Makefile
@@ -2,7 +2,8 @@
# Makefile for Qualcomm Venus driver
venus-core-objs += core.o helpers.o firmware.o \
- hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o
+ hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o \
+ hfi_parser.o
venus-dec-objs += vdec.o vdec_ctrls.o
venus-enc-objs += venc.o venc_ctrls.o
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 41eef376eb2d..bb6add9d340e 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -152,6 +152,83 @@ static void venus_clks_disable(struct venus_core *core)
clk_disable_unprepare(core->clks[i]);
}
+static u32 to_v4l2_codec_type(u32 codec)
+{
+ switch (codec) {
+ case HFI_VIDEO_CODEC_H264:
+ return V4L2_PIX_FMT_H264;
+ case HFI_VIDEO_CODEC_H263:
+ return V4L2_PIX_FMT_H263;
+ case HFI_VIDEO_CODEC_MPEG1:
+ return V4L2_PIX_FMT_MPEG1;
+ case HFI_VIDEO_CODEC_MPEG2:
+ return V4L2_PIX_FMT_MPEG2;
+ case HFI_VIDEO_CODEC_MPEG4:
+ return V4L2_PIX_FMT_MPEG4;
+ case HFI_VIDEO_CODEC_VC1:
+ return V4L2_PIX_FMT_VC1_ANNEX_G;
+ case HFI_VIDEO_CODEC_VP8:
+ return V4L2_PIX_FMT_VP8;
+ case HFI_VIDEO_CODEC_VP9:
+ return V4L2_PIX_FMT_VP9;
+ case HFI_VIDEO_CODEC_DIVX:
+ case HFI_VIDEO_CODEC_DIVX_311:
+ return V4L2_PIX_FMT_XVID;
+ default:
+ return 0;
+ }
+}
+
+static int venus_enumerate_codecs(struct venus_core *core, u32 type)
+{
+ const struct hfi_inst_ops dummy_ops = {};
+ struct venus_inst *inst;
+ u32 codec, codecs;
+ unsigned int i;
+ int ret;
+
+ if (core->res->hfi_version != HFI_VERSION_1XX)
+ return 0;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ mutex_init(&inst->lock);
+ inst->core = core;
+ inst->session_type = type;
+ if (type == VIDC_SESSION_TYPE_DEC)
+ codecs = core->dec_codecs;
+ else
+ codecs = core->enc_codecs;
+
+ ret = hfi_session_create(inst, &dummy_ops);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < MAX_CODEC_NUM; i++) {
+ codec = (1 << i) & codecs;
+ if (!codec)
+ continue;
+
+ ret = hfi_session_init(inst, to_v4l2_codec_type(codec));
+ if (ret)
+ goto done;
+
+ ret = hfi_session_deinit(inst);
+ if (ret)
+ goto done;
+ }
+
+done:
+ hfi_session_destroy(inst);
+err:
+ mutex_destroy(&inst->lock);
+ kfree(inst);
+
+ return ret;
+}
+
static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -219,6 +296,14 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
goto err_venus_shutdown;
+ ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
+ if (ret)
+ goto err_venus_shutdown;
+
+ ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
+ if (ret)
+ goto err_venus_shutdown;
+
ret = v4l2_device_register(dev, &core->v4l2_dev);
if (ret)
goto err_core_deinit;
@@ -365,9 +450,31 @@ static const struct venus_resources msm8996_res = {
.fwname = "qcom/venus-4.2/venus.mdt",
};
+static const struct freq_tbl sdm845_freq_table[] = {
+ { 1944000, 380000000 }, /* 4k UHD @ 60 */
+ { 972000, 320000000 }, /* 4k UHD @ 30 */
+ { 489600, 200000000 }, /* 1080p @ 60 */
+ { 244800, 100000000 }, /* 1080p @ 30 */
+};
+
+static const struct venus_resources sdm845_res = {
+ .freq_tbl = sdm845_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
+ .clks = {"core", "iface", "bus" },
+ .clks_num = 3,
+ .max_load = 2563200,
+ .hfi_version = HFI_VERSION_4XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/venus-5.2/venus.mdt",
+};
+
static const struct of_device_id venus_dt_match[] = {
{ .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
{ .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
+ { .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
{ }
};
MODULE_DEVICE_TABLE(of, venus_dt_match);
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 0360d295f4c8..2f02365f4818 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -57,6 +57,30 @@ struct venus_format {
u32 type;
};
+#define MAX_PLANES 4
+#define MAX_FMT_ENTRIES 32
+#define MAX_CAP_ENTRIES 32
+#define MAX_ALLOC_MODE_ENTRIES 16
+#define MAX_CODEC_NUM 32
+
+struct raw_formats {
+ u32 buftype;
+ u32 fmt;
+};
+
+struct venus_caps {
+ u32 codec;
+ u32 domain;
+ bool cap_bufs_mode_dynamic;
+ unsigned int num_caps;
+ struct hfi_capability caps[MAX_CAP_ENTRIES];
+ unsigned int num_pl;
+ struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT];
+ unsigned int num_fmts;
+ struct raw_formats fmts[MAX_FMT_ENTRIES];
+ bool valid; /* used only for Venus v1xx */
+};
+
/**
* struct venus_core - holds core parameters valid for all instances
*
@@ -65,6 +89,8 @@ struct venus_format {
* @clks: an array of struct clk pointers
* @core0_clk: a struct clk pointer for core0
* @core1_clk: a struct clk pointer for core1
+ * @core0_bus_clk: a struct clk pointer for core0 bus clock
+ * @core1_bus_clk: a struct clk pointer for core1 bus clock
* @vdev_dec: a reference to video device structure for decoder instances
* @vdev_enc: a reference to video device structure for encoder instances
* @v4l2_dev: a holder for v4l2 device structure
@@ -94,6 +120,8 @@ struct venus_core {
struct clk *clks[VIDC_CLKS_NUM_MAX];
struct clk *core0_clk;
struct clk *core1_clk;
+ struct clk *core0_bus_clk;
+ struct clk *core1_bus_clk;
struct video_device *vdev_dec;
struct video_device *vdev_enc;
struct v4l2_device v4l2_dev;
@@ -109,8 +137,8 @@ struct venus_core {
unsigned int error;
bool sys_error;
const struct hfi_core_ops *core_ops;
- u32 enc_codecs;
- u32 dec_codecs;
+ unsigned long enc_codecs;
+ unsigned long dec_codecs;
unsigned int max_sessions_supported;
#define ENC_ROTATION_CAPABILITY 0x1
#define ENC_SCALING_CAPABILITY 0x2
@@ -120,6 +148,8 @@ struct venus_core {
void *priv;
const struct hfi_ops *ops;
struct delayed_work work;
+ struct venus_caps caps[MAX_CODEC_NUM];
+ unsigned int codecs_count;
};
struct vdec_controls {
@@ -160,10 +190,12 @@ struct venc_controls {
u32 mpeg4;
u32 h264;
u32 vpx;
+ u32 hevc;
} profile;
struct {
u32 mpeg4;
u32 h264;
+ u32 hevc;
} level;
};
@@ -185,6 +217,7 @@ struct venus_buffer {
* @list: used for attach an instance to the core
* @lock: instance lock
* @core: a reference to the core struct
+ * @dpbbufs: a list of decoded picture buffers
* @internalbufs: a list of internal bufferes
* @registeredbufs: a list of registered capture bufferes
* @delayed_process a list of delayed buffers
@@ -209,9 +242,15 @@ struct venus_buffer {
* @num_output_bufs: holds number of output buffers
* @input_buf_size holds input buffer size
* @output_buf_size: holds output buffer size
+ * @output2_buf_size: holds secondary decoder output buffer size
+ * @dpb_buftype: decoded picture buffer type
+ * @dpb_fmt: decoded picture buffer raw format
+ * @opb_buftype: output picture buffer type
+ * @opb_fmt: output picture buffer raw format
* @reconfig: a flag raised by decoder when the stream resolution changed
* @reconfig_width: holds the new width
* @reconfig_height: holds the new height
+ * @hfi_codec: current codec for this instance in HFI space
* @sequence_cap: a sequence counter for capture queue
* @sequence_out: a sequence counter for output queue
* @m2m_dev: a reference to m2m device structure
@@ -224,27 +263,12 @@ struct venus_buffer {
* @priv: a private for HFI operations callbacks
* @session_type: the type of the session (decoder or encoder)
* @hprop: a union used as a holder by get property
- * @cap_width: width capability
- * @cap_height: height capability
- * @cap_mbs_per_frame: macroblocks per frame capability
- * @cap_mbs_per_sec: macroblocks per second capability
- * @cap_framerate: framerate capability
- * @cap_scale_x: horizontal scaling capability
- * @cap_scale_y: vertical scaling capability
- * @cap_bitrate: bitrate capability
- * @cap_hier_p: hier capability
- * @cap_ltr_count: LTR count capability
- * @cap_secure_output2_threshold: secure OUTPUT2 threshold capability
- * @cap_bufs_mode_static: buffers allocation mode capability
- * @cap_bufs_mode_dynamic: buffers allocation mode capability
- * @pl_count: count of supported profiles/levels
- * @pl: supported profiles/levels
- * @bufreq: holds buffer requirements
*/
struct venus_inst {
struct list_head list;
struct mutex lock;
struct venus_core *core;
+ struct list_head dpbbufs;
struct list_head internalbufs;
struct list_head registeredbufs;
struct list_head delayed_process;
@@ -273,9 +297,15 @@ struct venus_inst {
unsigned int num_output_bufs;
unsigned int input_buf_size;
unsigned int output_buf_size;
+ unsigned int output2_buf_size;
+ u32 dpb_buftype;
+ u32 dpb_fmt;
+ u32 opb_buftype;
+ u32 opb_fmt;
bool reconfig;
u32 reconfig_width;
u32 reconfig_height;
+ u32 hfi_codec;
u32 sequence_cap;
u32 sequence_out;
struct v4l2_m2m_dev *m2m_dev;
@@ -287,24 +317,12 @@ struct venus_inst {
const struct hfi_inst_ops *ops;
u32 session_type;
union hfi_get_property hprop;
- struct hfi_capability cap_width;
- struct hfi_capability cap_height;
- struct hfi_capability cap_mbs_per_frame;
- struct hfi_capability cap_mbs_per_sec;
- struct hfi_capability cap_framerate;
- struct hfi_capability cap_scale_x;
- struct hfi_capability cap_scale_y;
- struct hfi_capability cap_bitrate;
- struct hfi_capability cap_hier_p;
- struct hfi_capability cap_ltr_count;
- struct hfi_capability cap_secure_output2_threshold;
- bool cap_bufs_mode_static;
- bool cap_bufs_mode_dynamic;
- unsigned int pl_count;
- struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT];
- struct hfi_buffer_requirements bufreq[HFI_BUFFER_TYPE_MAX];
};
+#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
+#define IS_V3(core) ((core)->res->hfi_version == HFI_VERSION_3XX)
+#define IS_V4(core) ((core)->res->hfi_version == HFI_VERSION_4XX)
+
#define ctrl_to_inst(ctrl) \
container_of((ctrl)->handler, struct venus_inst, ctrl_handler)
@@ -318,4 +336,18 @@ static inline void *to_hfi_priv(struct venus_core *core)
return core->priv;
}
+static inline struct venus_caps *
+venus_caps_by_codec(struct venus_core *core, u32 codec, u32 domain)
+{
+ unsigned int c;
+
+ for (c = 0; c < core->codecs_count; c++) {
+ if (core->caps[c].codec == codec &&
+ core->caps[c].domain == domain)
+ return &core->caps[c];
+ }
+
+ return NULL;
+}
+
#endif
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 0ce9559a2924..cd3b96e6f24b 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -13,6 +13,7 @@
*
*/
#include <linux/clk.h>
+#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
@@ -24,6 +25,7 @@
#include "core.h"
#include "helpers.h"
#include "hfi_helper.h"
+#include "hfi_venus_io.h"
struct intbuf {
struct list_head list;
@@ -69,6 +71,9 @@ bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
case V4L2_PIX_FMT_XVID:
codec = HFI_VIDEO_CODEC_DIVX;
break;
+ case V4L2_PIX_FMT_HEVC:
+ codec = HFI_VIDEO_CODEC_HEVC;
+ break;
default:
return false;
}
@@ -83,6 +88,106 @@ bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
}
EXPORT_SYMBOL_GPL(venus_helper_check_codec);
+static int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
+{
+ struct intbuf *buf;
+ int ret = 0;
+
+ list_for_each_entry(buf, &inst->dpbbufs, list) {
+ struct hfi_frame_data fdata;
+
+ memset(&fdata, 0, sizeof(fdata));
+ fdata.alloc_len = buf->size;
+ fdata.device_addr = buf->da;
+ fdata.buffer_type = buf->type;
+
+ ret = hfi_session_process_buf(inst, &fdata);
+ if (ret)
+ goto fail;
+ }
+
+fail:
+ return ret;
+}
+
+int venus_helper_free_dpb_bufs(struct venus_inst *inst)
+{
+ struct intbuf *buf, *n;
+
+ list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
+ list_del_init(&buf->list);
+ dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
+ buf->attrs);
+ kfree(buf);
+ }
+
+ INIT_LIST_HEAD(&inst->dpbbufs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_free_dpb_bufs);
+
+int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev;
+ enum hfi_version ver = core->res->hfi_version;
+ struct hfi_buffer_requirements bufreq;
+ u32 buftype = inst->dpb_buftype;
+ unsigned int dpb_size = 0;
+ struct intbuf *buf;
+ unsigned int i;
+ u32 count;
+ int ret;
+
+ /* no need to allocate dpb buffers */
+ if (!inst->dpb_fmt)
+ return 0;
+
+ if (inst->dpb_buftype == HFI_BUFFER_OUTPUT)
+ dpb_size = inst->output_buf_size;
+ else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2)
+ dpb_size = inst->output2_buf_size;
+
+ if (!dpb_size)
+ return 0;
+
+ ret = venus_helper_get_bufreq(inst, buftype, &bufreq);
+ if (ret)
+ return ret;
+
+ count = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+
+ for (i = 0; i < count; i++) {
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ buf->type = buftype;
+ buf->size = dpb_size;
+ buf->attrs = DMA_ATTR_WRITE_COMBINE |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
+ buf->attrs);
+ if (!buf->va) {
+ kfree(buf);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_add_tail(&buf->list, &inst->dpbbufs);
+ }
+
+ return 0;
+
+fail:
+ venus_helper_free_dpb_bufs(inst);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_alloc_dpb_bufs);
+
static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
{
struct venus_core *core = inst->core;
@@ -166,21 +271,38 @@ static int intbufs_unset_buffers(struct venus_inst *inst)
return ret;
}
-static const unsigned int intbuf_types[] = {
- HFI_BUFFER_INTERNAL_SCRATCH,
- HFI_BUFFER_INTERNAL_SCRATCH_1,
- HFI_BUFFER_INTERNAL_SCRATCH_2,
+static const unsigned int intbuf_types_1xx[] = {
+ HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_1XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_1XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_1XX),
+ HFI_BUFFER_INTERNAL_PERSIST,
+ HFI_BUFFER_INTERNAL_PERSIST_1,
+};
+
+static const unsigned int intbuf_types_4xx[] = {
+ HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_4XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_4XX),
+ HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_4XX),
HFI_BUFFER_INTERNAL_PERSIST,
HFI_BUFFER_INTERNAL_PERSIST_1,
};
static int intbufs_alloc(struct venus_inst *inst)
{
- unsigned int i;
+ const unsigned int *intbuf;
+ size_t arr_sz, i;
int ret;
- for (i = 0; i < ARRAY_SIZE(intbuf_types); i++) {
- ret = intbufs_set_buffer(inst, intbuf_types[i]);
+ if (IS_V4(inst->core)) {
+ arr_sz = ARRAY_SIZE(intbuf_types_4xx);
+ intbuf = intbuf_types_4xx;
+ } else {
+ arr_sz = ARRAY_SIZE(intbuf_types_1xx);
+ intbuf = intbuf_types_1xx;
+ }
+
+ for (i = 0; i < arr_sz; i++) {
+ ret = intbufs_set_buffer(inst, intbuf[i]);
if (ret)
goto error;
}
@@ -257,20 +379,23 @@ static int load_scale_clocks(struct venus_core *core)
set_freq:
- if (core->res->hfi_version == HFI_VERSION_3XX) {
- ret = clk_set_rate(clk, freq);
- ret |= clk_set_rate(core->core0_clk, freq);
- ret |= clk_set_rate(core->core1_clk, freq);
- } else {
- ret = clk_set_rate(clk, freq);
- }
+ ret = clk_set_rate(clk, freq);
+ if (ret)
+ goto err;
- if (ret) {
- dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
- return ret;
- }
+ ret = clk_set_rate(core->core0_clk, freq);
+ if (ret)
+ goto err;
+
+ ret = clk_set_rate(core->core1_clk, freq);
+ if (ret)
+ goto err;
return 0;
+
+err:
+ dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
+ return ret;
}
static void fill_buffer_desc(const struct venus_buffer *buf,
@@ -325,7 +450,10 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
fdata.flags |= HFI_BUFFERFLAG_EOS;
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fdata.buffer_type = HFI_BUFFER_OUTPUT;
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ fdata.buffer_type = HFI_BUFFER_OUTPUT;
+ else
+ fdata.buffer_type = inst->opb_buftype;
fdata.filled_len = 0;
fdata.offset = 0;
}
@@ -337,18 +465,16 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
return 0;
}
-static inline int is_reg_unreg_needed(struct venus_inst *inst)
+static bool is_dynamic_bufmode(struct venus_inst *inst)
{
- if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
- inst->core->res->hfi_version == HFI_VERSION_3XX)
- return 0;
+ struct venus_core *core = inst->core;
+ struct venus_caps *caps;
- if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
- inst->cap_bufs_mode_dynamic &&
- inst->core->res->hfi_version == HFI_VERSION_1XX)
+ caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
+ if (!caps)
return 0;
- return 1;
+ return caps->cap_bufs_mode_dynamic;
}
static int session_unregister_bufs(struct venus_inst *inst)
@@ -357,7 +483,7 @@ static int session_unregister_bufs(struct venus_inst *inst)
struct hfi_buffer_desc bd;
int ret = 0;
- if (!is_reg_unreg_needed(inst))
+ if (is_dynamic_bufmode(inst))
return 0;
list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
@@ -377,7 +503,7 @@ static int session_register_bufs(struct venus_inst *inst)
struct venus_buffer *buf;
int ret = 0;
- if (!is_reg_unreg_needed(inst))
+ if (is_dynamic_bufmode(inst))
return 0;
list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
@@ -392,6 +518,20 @@ static int session_register_bufs(struct venus_inst *inst)
return ret;
}
+static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
+{
+ switch (v4l2_fmt) {
+ case V4L2_PIX_FMT_NV12:
+ return HFI_COLOR_FORMAT_NV12;
+ case V4L2_PIX_FMT_NV21:
+ return HFI_COLOR_FORMAT_NV21;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
struct hfi_buffer_requirements *req)
{
@@ -423,6 +563,104 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
}
EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
+static u32 get_framesize_raw_nv12(u32 width, u32 height)
+{
+ u32 y_stride, uv_stride, y_plane;
+ u32 y_sclines, uv_sclines, uv_plane;
+ u32 size;
+
+ y_stride = ALIGN(width, 128);
+ uv_stride = ALIGN(width, 128);
+ y_sclines = ALIGN(height, 32);
+ uv_sclines = ALIGN(((height + 1) >> 1), 16);
+
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + SZ_4K;
+ size = y_plane + uv_plane + SZ_8K;
+
+ return ALIGN(size, SZ_4K);
+}
+
+static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height)
+{
+ u32 y_meta_stride, y_meta_plane;
+ u32 y_stride, y_plane;
+ u32 uv_meta_stride, uv_meta_plane;
+ u32 uv_stride, uv_plane;
+ u32 extradata = SZ_16K;
+
+ y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
+ y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(height, 8), 16);
+ y_meta_plane = ALIGN(y_meta_plane, SZ_4K);
+
+ y_stride = ALIGN(width, 128);
+ y_plane = ALIGN(y_stride * ALIGN(height, 32), SZ_4K);
+
+ uv_meta_stride = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
+ uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(height / 2, 8), 16);
+ uv_meta_plane = ALIGN(uv_meta_plane, SZ_4K);
+
+ uv_stride = ALIGN(width, 128);
+ uv_plane = ALIGN(uv_stride * ALIGN(height / 2, 32), SZ_4K);
+
+ return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane +
+ max(extradata, y_stride * 48), SZ_4K);
+}
+
+u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
+{
+ switch (hfi_fmt) {
+ case HFI_COLOR_FORMAT_NV12:
+ case HFI_COLOR_FORMAT_NV21:
+ return get_framesize_raw_nv12(width, height);
+ case HFI_COLOR_FORMAT_NV12_UBWC:
+ return get_framesize_raw_nv12_ubwc(width, height);
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_framesz_raw);
+
+u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height)
+{
+ u32 hfi_fmt, sz;
+ bool compressed;
+
+ switch (v4l2_fmt) {
+ case V4L2_PIX_FMT_MPEG:
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_H264_NO_SC:
+ case V4L2_PIX_FMT_H264_MVC:
+ case V4L2_PIX_FMT_H263:
+ case V4L2_PIX_FMT_MPEG1:
+ case V4L2_PIX_FMT_MPEG2:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_XVID:
+ case V4L2_PIX_FMT_VC1_ANNEX_G:
+ case V4L2_PIX_FMT_VC1_ANNEX_L:
+ case V4L2_PIX_FMT_VP8:
+ case V4L2_PIX_FMT_VP9:
+ case V4L2_PIX_FMT_HEVC:
+ compressed = true;
+ break;
+ default:
+ compressed = false;
+ break;
+ }
+
+ if (compressed) {
+ sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
+ return ALIGN(sz, SZ_4K);
+ }
+
+ hfi_fmt = to_hfi_raw_fmt(v4l2_fmt);
+ if (!hfi_fmt)
+ return 0;
+
+ return venus_helper_get_framesz_raw(hfi_fmt, width, height);
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_framesz);
+
int venus_helper_set_input_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height)
{
@@ -438,12 +676,13 @@ int venus_helper_set_input_resolution(struct venus_inst *inst,
EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
int venus_helper_set_output_resolution(struct venus_inst *inst,
- unsigned int width, unsigned int height)
+ unsigned int width, unsigned int height,
+ u32 buftype)
{
u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
struct hfi_framesize fs;
- fs.buffer_type = HFI_BUFFER_OUTPUT;
+ fs.buffer_type = buftype;
fs.width = width;
fs.height = height;
@@ -451,8 +690,37 @@ int venus_helper_set_output_resolution(struct venus_inst *inst,
}
EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
+int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_WORK_MODE;
+ struct hfi_video_work_mode wm;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ wm.video_work_mode = mode;
+
+ return hfi_session_set_property(inst, ptype, &wm);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
+
+int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
+{
+ const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
+ struct hfi_videocores_usage_type cu;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ cu.video_core_enable_mask = usage;
+
+ return hfi_session_set_property(inst, ptype, &cu);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
+
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
- unsigned int output_bufs)
+ unsigned int output_bufs,
+ unsigned int output2_bufs)
{
u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
struct hfi_buffer_count_actual buf_count;
@@ -468,41 +736,122 @@ int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
buf_count.type = HFI_BUFFER_OUTPUT;
buf_count.count_actual = output_bufs;
- return hfi_session_set_property(inst, ptype, &buf_count);
+ ret = hfi_session_set_property(inst, ptype, &buf_count);
+ if (ret)
+ return ret;
+
+ if (output2_bufs) {
+ buf_count.type = HFI_BUFFER_OUTPUT2;
+ buf_count.count_actual = output2_bufs;
+
+ ret = hfi_session_set_property(inst, ptype, &buf_count);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
-int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
+int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
+ u32 buftype)
{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
struct hfi_uncompressed_format_select fmt;
- u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
- int ret;
+
+ fmt.buffer_type = buftype;
+ fmt.format = hfi_format;
+
+ return hfi_session_set_property(inst, ptype, &fmt);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_raw_format);
+
+int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
+{
+ u32 hfi_format, buftype;
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
- fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ buftype = HFI_BUFFER_OUTPUT;
else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
- fmt.buffer_type = HFI_BUFFER_INPUT;
+ buftype = HFI_BUFFER_INPUT;
else
return -EINVAL;
- switch (pixfmt) {
- case V4L2_PIX_FMT_NV12:
- fmt.format = HFI_COLOR_FORMAT_NV12;
- break;
- case V4L2_PIX_FMT_NV21:
- fmt.format = HFI_COLOR_FORMAT_NV21;
- break;
- default:
+ hfi_format = to_hfi_raw_fmt(pixfmt);
+ if (!hfi_format)
return -EINVAL;
- }
- ret = hfi_session_set_property(inst, ptype, &fmt);
+ return venus_helper_set_raw_format(inst, hfi_format, buftype);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
+
+int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
+ bool out2_en)
+{
+ struct hfi_multi_stream multi = {0};
+ u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+ int ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT;
+ multi.enable = out_en;
+
+ ret = hfi_session_set_property(inst, ptype, &multi);
+ if (ret)
+ return ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT2;
+ multi.enable = out2_en;
+
+ return hfi_session_set_property(inst, ptype, &multi);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_multistream);
+
+int venus_helper_set_dyn_bufmode(struct venus_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
+ struct hfi_buffer_alloc_mode mode;
+ int ret;
+
+ if (!is_dynamic_bufmode(inst))
+ return 0;
+
+ mode.type = HFI_BUFFER_OUTPUT;
+ mode.mode = HFI_BUFFER_MODE_DYNAMIC;
+
+ ret = hfi_session_set_property(inst, ptype, &mode);
if (ret)
return ret;
+ mode.type = HFI_BUFFER_OUTPUT2;
+
+ return hfi_session_set_property(inst, ptype, &mode);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_dyn_bufmode);
+
+int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
+ struct hfi_buffer_size_actual bufsz;
+
+ bufsz.type = buftype;
+ bufsz.size = bufsize;
+
+ return hfi_session_set_property(inst, ptype, &bufsz);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_bufsize);
+
+unsigned int venus_helper_get_opb_size(struct venus_inst *inst)
+{
+ /* the encoder has only one output */
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ return inst->output_buf_size;
+
+ if (inst->opb_buftype == HFI_BUFFER_OUTPUT)
+ return inst->output_buf_size;
+ else if (inst->opb_buftype == HFI_BUFFER_OUTPUT2)
+ return inst->output2_buf_size;
+
return 0;
}
-EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
+EXPORT_SYMBOL_GPL(venus_helper_get_opb_size);
static void delayed_process_buf_func(struct work_struct *work)
{
@@ -602,9 +951,10 @@ EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int out_buf_size = venus_helper_get_opb_size(inst);
if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
- vb2_plane_size(vb, 0) < inst->output_buf_size)
+ vb2_plane_size(vb, 0) < out_buf_size)
return -EINVAL;
if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
vb2_plane_size(vb, 0) < inst->input_buf_size)
@@ -674,6 +1024,8 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
if (ret)
hfi_session_abort(inst);
+ venus_helper_free_dpb_bufs(inst);
+
load_scale_clocks(core);
INIT_LIST_HEAD(&inst->registeredbufs);
}
@@ -712,8 +1064,14 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst)
if (ret)
goto err_unload_res;
+ ret = venus_helper_queue_dpb_bufs(inst);
+ if (ret)
+ goto err_session_stop;
+
return 0;
+err_session_stop:
+ hfi_session_stop(inst);
err_unload_res:
hfi_session_unload_res(inst);
err_unreg_bufs:
@@ -766,3 +1124,113 @@ void venus_helper_init_instance(struct venus_inst *inst)
}
}
EXPORT_SYMBOL_GPL(venus_helper_init_instance);
+
+static bool find_fmt_from_caps(struct venus_caps *caps, u32 buftype, u32 fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < caps->num_fmts; i++) {
+ if (caps->fmts[i].buftype == buftype &&
+ caps->fmts[i].fmt == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
+ u32 *out_fmt, u32 *out2_fmt, bool ubwc)
+{
+ struct venus_core *core = inst->core;
+ struct venus_caps *caps;
+ u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt);
+ bool found, found_ubwc;
+
+ *out_fmt = *out2_fmt = 0;
+
+ if (!fmt)
+ return -EINVAL;
+
+ caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
+ if (!caps)
+ return -EINVAL;
+
+ if (ubwc) {
+ ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
+ found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
+ ubwc_fmt);
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
+
+ if (found_ubwc && found) {
+ *out_fmt = ubwc_fmt;
+ *out2_fmt = fmt;
+ return 0;
+ }
+ }
+
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
+ if (found) {
+ *out_fmt = fmt;
+ *out2_fmt = 0;
+ return 0;
+ }
+
+ found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
+ if (found) {
+ *out_fmt = 0;
+ *out2_fmt = fmt;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
+
+int venus_helper_power_enable(struct venus_core *core, u32 session_type,
+ bool enable)
+{
+ void __iomem *ctrl, *stat;
+ u32 val;
+ int ret;
+
+ if (!IS_V3(core) && !IS_V4(core))
+ return 0;
+
+ if (IS_V3(core)) {
+ if (session_type == VIDC_SESSION_TYPE_DEC)
+ ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
+ else
+ ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
+ if (enable)
+ writel(0, ctrl);
+ else
+ writel(1, ctrl);
+
+ return 0;
+ }
+
+ if (session_type == VIDC_SESSION_TYPE_DEC) {
+ ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
+ stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
+ } else {
+ ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
+ stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
+ }
+
+ if (enable) {
+ writel(0, ctrl);
+
+ ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
+ if (ret)
+ return ret;
+ } else {
+ writel(1, ctrl);
+
+ ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_power_enable);
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index 971392be5df5..2475f284f396 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -33,14 +33,33 @@ void venus_helper_m2m_device_run(void *priv);
void venus_helper_m2m_job_abort(void *priv);
int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
struct hfi_buffer_requirements *req);
+u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height);
+u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height);
int venus_helper_set_input_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height);
int venus_helper_set_output_resolution(struct venus_inst *inst,
- unsigned int width, unsigned int height);
+ unsigned int width, unsigned int height,
+ u32 buftype);
+int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
+int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage);
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
- unsigned int output_bufs);
+ unsigned int output_bufs,
+ unsigned int output2_bufs);
+int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
+ u32 buftype);
int venus_helper_set_color_format(struct venus_inst *inst, u32 fmt);
+int venus_helper_set_dyn_bufmode(struct venus_inst *inst);
+int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype);
+int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
+ bool out2_en);
+unsigned int venus_helper_get_opb_size(struct venus_inst *inst);
void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf);
void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx);
void venus_helper_init_instance(struct venus_inst *inst);
+int venus_helper_get_out_fmts(struct venus_inst *inst, u32 fmt, u32 *out_fmt,
+ u32 *out2_fmt, bool ubwc);
+int venus_helper_alloc_dpb_bufs(struct venus_inst *inst);
+int venus_helper_free_dpb_bufs(struct venus_inst *inst);
+int venus_helper_power_enable(struct venus_core *core, u32 session_type,
+ bool enable);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index bca894a00c07..24207829982f 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -49,6 +49,8 @@ static u32 to_codec_type(u32 pixfmt)
return HFI_VIDEO_CODEC_VP9;
case V4L2_PIX_FMT_XVID:
return HFI_VIDEO_CODEC_DIVX;
+ case V4L2_PIX_FMT_HEVC:
+ return HFI_VIDEO_CODEC_HEVC;
default:
return 0;
}
@@ -203,13 +205,12 @@ int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
{
struct venus_core *core = inst->core;
const struct hfi_ops *ops = core->ops;
- u32 codec;
int ret;
- codec = to_codec_type(pixfmt);
+ inst->hfi_codec = to_codec_type(pixfmt);
reinit_completion(&inst->done);
- ret = ops->session_init(inst, inst->session_type, codec);
+ ret = ops->session_init(inst, inst->session_type, inst->hfi_codec);
if (ret)
return ret;
@@ -312,7 +313,7 @@ int hfi_session_continue(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
- if (core->res->hfi_version != HFI_VERSION_3XX)
+ if (core->res->hfi_version == HFI_VERSION_1XX)
return 0;
return core->ops->session_continue(inst);
@@ -473,7 +474,8 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
if (fd->buffer_type == HFI_BUFFER_INPUT)
return ops->session_etb(inst, fd);
- else if (fd->buffer_type == HFI_BUFFER_OUTPUT)
+ else if (fd->buffer_type == HFI_BUFFER_OUTPUT ||
+ fd->buffer_type == HFI_BUFFER_OUTPUT2)
return ops->session_ftb(inst, fd);
return -EINVAL;
diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h
index 5466b7d60dd0..6038d8e0ab22 100644
--- a/drivers/media/platform/qcom/venus/hfi.h
+++ b/drivers/media/platform/qcom/venus/hfi.h
@@ -74,6 +74,16 @@ struct hfi_event_data {
u32 tag;
u32 profile;
u32 level;
+ /* the following properties start appear from v4 onwards */
+ u32 bit_depth;
+ u32 pic_struct;
+ u32 colour_space;
+ u32 entropy_mode;
+ u32 buf_count;
+ struct {
+ u32 left, top;
+ u32 width, height;
+ } input_crop;
};
/* define core states */
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 1cfeb7743041..e8389d8d8c48 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1166,6 +1166,63 @@ pkt_session_set_property_3xx(struct hfi_session_set_property_pkt *pkt,
return ret;
}
+static int
+pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ void *prop_data;
+
+ if (!pkt || !cookie || !pdata)
+ return -EINVAL;
+
+ prop_data = &pkt->data[1];
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+ pkt->data[0] = ptype;
+
+ /*
+ * Any session set property which is different in 3XX packetization
+ * should be added as a new case below. All unchanged session set
+ * properties will be handled in the default case.
+ */
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: {
+ struct hfi_buffer_count_actual *in = pdata;
+ struct hfi_buffer_count_actual_4xx *count = prop_data;
+
+ count->count_actual = in->count_actual;
+ count->type = in->type;
+ count->count_min_host = in->count_actual;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_WORK_MODE: {
+ struct hfi_video_work_mode *in = pdata, *wm = prop_data;
+
+ wm->video_work_mode = in->video_work_mode;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE: {
+ struct hfi_videocores_usage_type *in = pdata, *cu = prop_data;
+
+ cu->video_core_enable_mask = in->video_core_enable_mask;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
+ /* not implemented on Venus 4xx */
+ break;
+ default:
+ return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
+ }
+
+ return 0;
+}
+
int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
void *cookie, u32 ptype)
{
@@ -1181,7 +1238,10 @@ int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
if (hfi_ver == HFI_VERSION_1XX)
return pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
- return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
+ if (hfi_ver == HFI_VERSION_3XX)
+ return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
+
+ return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
}
void pkt_set_version(enum hfi_version version)
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index 55d8eb21403a..15804ad7e65d 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -121,6 +121,7 @@
#define HFI_EXTRADATA_METADATA_FILLER 0x7fe00002
#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000e
+#define HFI_INDEX_EXTRADATA_OUTPUT_CROP 0x0700000f
#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM 0x07000010
#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7f100003
@@ -376,13 +377,18 @@
#define HFI_BUFFER_OUTPUT2 0x3
#define HFI_BUFFER_INTERNAL_PERSIST 0x4
#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5
-#define HFI_BUFFER_INTERNAL_SCRATCH 0x1000001
-#define HFI_BUFFER_EXTRADATA_INPUT 0x1000002
-#define HFI_BUFFER_EXTRADATA_OUTPUT 0x1000003
-#define HFI_BUFFER_EXTRADATA_OUTPUT2 0x1000004
-#define HFI_BUFFER_INTERNAL_SCRATCH_1 0x1000005
-#define HFI_BUFFER_INTERNAL_SCRATCH_2 0x1000006
-
+#define HFI_BUFFER_INTERNAL_SCRATCH(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0x6 : 0x1000001)
+#define HFI_BUFFER_INTERNAL_SCRATCH_1(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0x7 : 0x1000005)
+#define HFI_BUFFER_INTERNAL_SCRATCH_2(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0x8 : 0x1000006)
+#define HFI_BUFFER_EXTRADATA_INPUT(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0xc : 0x1000002)
+#define HFI_BUFFER_EXTRADATA_OUTPUT(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0xa : 0x1000003)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2(ver) \
+ (((ver) == HFI_VERSION_4XX) ? 0xb : 0x1000004)
#define HFI_BUFFER_TYPE_MAX 11
#define HFI_BUFFER_MODE_STATIC 0x1000001
@@ -424,12 +430,14 @@
#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED 0x100e
#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT 0x100f
#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED 0x1010
+#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
/*
* HFI_PROPERTY_CONFIG_COMMON_START
* HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000
*/
#define HFI_PROPERTY_CONFIG_FRAME_RATE 0x2001
+#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE 0x2002
/*
* HFI_PROPERTY_PARAM_VDEC_COMMON_START
@@ -438,6 +446,9 @@
#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001
#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR 0x1003002
#define HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2 0x1003003
+#define HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH 0x1003007
+#define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT 0x1003009
+#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE 0x100300a
/*
* HFI_PROPERTY_CONFIG_VDEC_COMMON_START
@@ -518,6 +529,7 @@
enum hfi_version {
HFI_VERSION_1XX,
HFI_VERSION_3XX,
+ HFI_VERSION_4XX
};
struct hfi_buffer_info {
@@ -767,12 +779,56 @@ struct hfi_framesize {
u32 height;
};
+#define VIDC_CORE_ID_DEFAULT 0
+#define VIDC_CORE_ID_1 1
+#define VIDC_CORE_ID_2 2
+#define VIDC_CORE_ID_3 3
+
+struct hfi_videocores_usage_type {
+ u32 video_core_enable_mask;
+};
+
+#define VIDC_WORK_MODE_1 1
+#define VIDC_WORK_MODE_2 2
+
+struct hfi_video_work_mode {
+ u32 video_work_mode;
+};
+
struct hfi_h264_vui_timing_info {
u32 enable;
u32 fixed_framerate;
u32 time_scale;
};
+struct hfi_bit_depth {
+ u32 buffer_type;
+ u32 bit_depth;
+};
+
+struct hfi_picture_type {
+ u32 is_sync_frame;
+ u32 picture_type;
+};
+
+struct hfi_pic_struct {
+ u32 progressive_only;
+};
+
+struct hfi_colour_space {
+ u32 colour_space;
+};
+
+struct hfi_extradata_input_crop {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
#define HFI_COLOR_FORMAT_MONOCHROME 0x01
#define HFI_COLOR_FORMAT_NV12 0x02
#define HFI_COLOR_FORMAT_NV21 0x03
@@ -802,10 +858,23 @@ struct hfi_uncompressed_format_select {
u32 format;
};
+struct hfi_uncompressed_plane_constraints {
+ u32 stride_multiples;
+ u32 max_stride;
+ u32 min_plane_buffer_height_multiple;
+ u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_info {
+ u32 format;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints plane_constraints[1];
+};
+
struct hfi_uncompressed_format_supported {
u32 buffer_type;
u32 format_entries;
- u32 format_info[1];
+ struct hfi_uncompressed_plane_info plane_info[1];
};
struct hfi_uncompressed_plane_actual {
@@ -819,19 +888,6 @@ struct hfi_uncompressed_plane_actual_info {
struct hfi_uncompressed_plane_actual plane_format[1];
};
-struct hfi_uncompressed_plane_constraints {
- u32 stride_multiples;
- u32 max_stride;
- u32 min_plane_buffer_height_multiple;
- u32 buffer_alignment;
-};
-
-struct hfi_uncompressed_plane_info {
- u32 format;
- u32 num_planes;
- struct hfi_uncompressed_plane_constraints plane_format[1];
-};
-
struct hfi_uncompressed_plane_actual_constraints_info {
u32 buffer_type;
u32 num_planes;
@@ -961,6 +1017,12 @@ struct hfi_buffer_count_actual {
u32 count_actual;
};
+struct hfi_buffer_count_actual_4xx {
+ u32 type;
+ u32 count_actual;
+ u32 count_min_host;
+};
+
struct hfi_buffer_size_actual {
u32 type;
u32 size;
@@ -971,6 +1033,14 @@ struct hfi_buffer_display_hold_count_actual {
u32 hold_count;
};
+/* HFI 4XX reorder the fields, use these macros */
+#define HFI_BUFREQ_HOLD_COUNT(bufreq, ver) \
+ ((ver) == HFI_VERSION_4XX ? 0 : (bufreq)->hold_count)
+#define HFI_BUFREQ_COUNT_MIN(bufreq, ver) \
+ ((ver) == HFI_VERSION_4XX ? (bufreq)->hold_count : (bufreq)->count_min)
+#define HFI_BUFREQ_COUNT_MIN_HOST(bufreq, ver) \
+ ((ver) == HFI_VERSION_4XX ? (bufreq)->count_min : 0)
+
struct hfi_buffer_requirements {
u32 type;
u32 size;
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 90c93d9603dc..0ecdaa15c296 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -21,14 +21,21 @@
#include "hfi.h"
#include "hfi_helper.h"
#include "hfi_msgs.h"
+#include "hfi_parser.h"
static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
struct hfi_msg_event_notify_pkt *pkt)
{
+ enum hfi_version ver = core->res->hfi_version;
struct hfi_event_data event = {0};
int num_properties_changed;
struct hfi_framesize *frame_sz;
struct hfi_profile_level *profile_level;
+ struct hfi_bit_depth *pixel_depth;
+ struct hfi_pic_struct *pic_struct;
+ struct hfi_colour_space *colour_info;
+ struct hfi_buffer_requirements *bufreq;
+ struct hfi_extradata_input_crop *crop;
u8 *data_ptr;
u32 ptype;
@@ -60,14 +67,52 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
frame_sz = (struct hfi_framesize *)data_ptr;
event.width = frame_sz->width;
event.height = frame_sz->height;
- data_ptr += sizeof(frame_sz);
+ data_ptr += sizeof(*frame_sz);
break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
data_ptr += sizeof(u32);
profile_level = (struct hfi_profile_level *)data_ptr;
event.profile = profile_level->profile;
event.level = profile_level->level;
- data_ptr += sizeof(profile_level);
+ data_ptr += sizeof(*profile_level);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
+ data_ptr += sizeof(u32);
+ pixel_depth = (struct hfi_bit_depth *)data_ptr;
+ event.bit_depth = pixel_depth->bit_depth;
+ data_ptr += sizeof(*pixel_depth);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
+ data_ptr += sizeof(u32);
+ pic_struct = (struct hfi_pic_struct *)data_ptr;
+ event.pic_struct = pic_struct->progressive_only;
+ data_ptr += sizeof(*pic_struct);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
+ data_ptr += sizeof(u32);
+ colour_info = (struct hfi_colour_space *)data_ptr;
+ event.colour_space = colour_info->colour_space;
+ data_ptr += sizeof(*colour_info);
+ break;
+ case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+ data_ptr += sizeof(u32);
+ event.entropy_mode = *(u32 *)data_ptr;
+ data_ptr += sizeof(u32);
+ break;
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ data_ptr += sizeof(u32);
+ bufreq = (struct hfi_buffer_requirements *)data_ptr;
+ event.buf_count = HFI_BUFREQ_COUNT_MIN(bufreq, ver);
+ data_ptr += sizeof(*bufreq);
+ break;
+ case HFI_INDEX_EXTRADATA_INPUT_CROP:
+ data_ptr += sizeof(u32);
+ crop = (struct hfi_extradata_input_crop *)data_ptr;
+ event.input_crop.left = crop->left;
+ event.input_crop.top = crop->top;
+ event.input_crop.width = crop->width;
+ event.input_crop.height = crop->height;
+ data_ptr += sizeof(*crop);
break;
default:
break;
@@ -173,81 +218,28 @@ static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
void *packet)
{
struct hfi_msg_sys_init_done_pkt *pkt = packet;
- u32 rem_bytes, read_bytes = 0, num_properties;
- u32 error, ptype;
- u8 *data;
+ int rem_bytes;
+ u32 error;
error = pkt->error_type;
if (error != HFI_ERR_NONE)
- goto err_no_prop;
-
- num_properties = pkt->num_properties;
+ goto done;
- if (!num_properties) {
+ if (!pkt->num_properties) {
error = HFI_ERR_SYS_INVALID_PARAMETER;
- goto err_no_prop;
+ goto done;
}
rem_bytes = pkt->hdr.size - sizeof(*pkt) + sizeof(u32);
-
- if (!rem_bytes) {
+ if (rem_bytes <= 0) {
/* missing property data */
error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
- goto err_no_prop;
+ goto done;
}
- data = (u8 *)&pkt->data[0];
-
- if (core->res->hfi_version == HFI_VERSION_3XX)
- goto err_no_prop;
-
- while (num_properties && rem_bytes >= sizeof(u32)) {
- ptype = *((u32 *)data);
- data += sizeof(u32);
+ error = hfi_parser(core, inst, pkt->data, rem_bytes);
- switch (ptype) {
- case HFI_PROPERTY_PARAM_CODEC_SUPPORTED: {
- struct hfi_codec_supported *prop;
-
- prop = (struct hfi_codec_supported *)data;
-
- if (rem_bytes < sizeof(*prop)) {
- error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
- break;
- }
-
- read_bytes += sizeof(*prop) + sizeof(u32);
- core->dec_codecs = prop->dec_codecs;
- core->enc_codecs = prop->enc_codecs;
- break;
- }
- case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED: {
- struct hfi_max_sessions_supported *prop;
-
- if (rem_bytes < sizeof(*prop)) {
- error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
- break;
- }
-
- prop = (struct hfi_max_sessions_supported *)data;
- read_bytes += sizeof(*prop) + sizeof(u32);
- core->max_sessions_supported = prop->max_sessions;
- break;
- }
- default:
- error = HFI_ERR_SYS_INVALID_PARAMETER;
- break;
- }
-
- if (error)
- break;
-
- rem_bytes -= read_bytes;
- data += read_bytes;
- num_properties--;
- }
-
-err_no_prop:
+done:
core->error = error;
complete(&core->done);
}
@@ -325,51 +317,6 @@ static void hfi_sys_pc_prepare_done(struct venus_core *core,
dev_dbg(core->dev, "pc prepare done (error %x)\n", pkt->error_type);
}
-static void
-hfi_copy_cap_prop(struct hfi_capability *in, struct venus_inst *inst)
-{
- if (!in || !inst)
- return;
-
- switch (in->capability_type) {
- case HFI_CAPABILITY_FRAME_WIDTH:
- inst->cap_width = *in;
- break;
- case HFI_CAPABILITY_FRAME_HEIGHT:
- inst->cap_height = *in;
- break;
- case HFI_CAPABILITY_MBS_PER_FRAME:
- inst->cap_mbs_per_frame = *in;
- break;
- case HFI_CAPABILITY_MBS_PER_SECOND:
- inst->cap_mbs_per_sec = *in;
- break;
- case HFI_CAPABILITY_FRAMERATE:
- inst->cap_framerate = *in;
- break;
- case HFI_CAPABILITY_SCALE_X:
- inst->cap_scale_x = *in;
- break;
- case HFI_CAPABILITY_SCALE_Y:
- inst->cap_scale_y = *in;
- break;
- case HFI_CAPABILITY_BITRATE:
- inst->cap_bitrate = *in;
- break;
- case HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS:
- inst->cap_hier_p = *in;
- break;
- case HFI_CAPABILITY_ENC_LTR_COUNT:
- inst->cap_ltr_count = *in;
- break;
- case HFI_CAPABILITY_CP_OUTPUT2_THRESH:
- inst->cap_secure_output2_threshold = *in;
- break;
- default:
- break;
- }
-}
-
static unsigned int
session_get_prop_profile_level(struct hfi_msg_session_property_info_pkt *pkt,
struct hfi_profile_level *profile_level)
@@ -459,248 +406,27 @@ done:
complete(&inst->done);
}
-static u32 init_done_read_prop(struct venus_core *core, struct venus_inst *inst,
- struct hfi_msg_session_init_done_pkt *pkt)
-{
- struct device *dev = core->dev;
- u32 rem_bytes, num_props;
- u32 ptype, next_offset = 0;
- u32 err;
- u8 *data;
-
- rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt) + sizeof(u32);
- if (!rem_bytes) {
- dev_err(dev, "%s: missing property info\n", __func__);
- return HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
- }
-
- err = pkt->error_type;
- if (err)
- return err;
-
- data = (u8 *)&pkt->data[0];
- num_props = pkt->num_properties;
-
- while (err == HFI_ERR_NONE && num_props && rem_bytes >= sizeof(u32)) {
- ptype = *((u32 *)data);
- next_offset = sizeof(u32);
-
- switch (ptype) {
- case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED: {
- struct hfi_codec_mask_supported *masks =
- (struct hfi_codec_mask_supported *)
- (data + next_offset);
-
- next_offset += sizeof(*masks);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED: {
- struct hfi_capabilities *caps;
- struct hfi_capability *cap;
- u32 num_caps;
-
- if ((rem_bytes - next_offset) < sizeof(*cap)) {
- err = HFI_ERR_SESSION_INVALID_PARAMETER;
- break;
- }
-
- caps = (struct hfi_capabilities *)(data + next_offset);
-
- num_caps = caps->num_capabilities;
- cap = &caps->data[0];
- next_offset += sizeof(u32);
-
- while (num_caps &&
- (rem_bytes - next_offset) >= sizeof(u32)) {
- hfi_copy_cap_prop(cap, inst);
- cap++;
- next_offset += sizeof(*cap);
- num_caps--;
- }
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: {
- struct hfi_uncompressed_format_supported *prop =
- (struct hfi_uncompressed_format_supported *)
- (data + next_offset);
- u32 num_fmt_entries;
- u8 *fmt;
- struct hfi_uncompressed_plane_info *inf;
-
- if ((rem_bytes - next_offset) < sizeof(*prop)) {
- err = HFI_ERR_SESSION_INVALID_PARAMETER;
- break;
- }
-
- num_fmt_entries = prop->format_entries;
- next_offset = sizeof(*prop) - sizeof(u32);
- fmt = (u8 *)&prop->format_info[0];
-
- dev_dbg(dev, "uncomm format support num entries:%u\n",
- num_fmt_entries);
-
- while (num_fmt_entries) {
- struct hfi_uncompressed_plane_constraints *cnts;
- u32 bytes_to_skip;
-
- inf = (struct hfi_uncompressed_plane_info *)fmt;
-
- if ((rem_bytes - next_offset) < sizeof(*inf)) {
- err = HFI_ERR_SESSION_INVALID_PARAMETER;
- break;
- }
-
- dev_dbg(dev, "plane info: fmt:%x, planes:%x\n",
- inf->format, inf->num_planes);
-
- cnts = &inf->plane_format[0];
- dev_dbg(dev, "%u %u %u %u\n",
- cnts->stride_multiples,
- cnts->max_stride,
- cnts->min_plane_buffer_height_multiple,
- cnts->buffer_alignment);
-
- bytes_to_skip = sizeof(*inf) - sizeof(*cnts) +
- inf->num_planes * sizeof(*cnts);
-
- fmt += bytes_to_skip;
- next_offset += bytes_to_skip;
- num_fmt_entries--;
- }
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED: {
- struct hfi_properties_supported *prop =
- (struct hfi_properties_supported *)
- (data + next_offset);
-
- next_offset += sizeof(*prop) - sizeof(u32)
- + prop->num_properties * sizeof(u32);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED: {
- struct hfi_profile_level_supported *prop =
- (struct hfi_profile_level_supported *)
- (data + next_offset);
- struct hfi_profile_level *pl;
- unsigned int prop_count = 0;
- unsigned int count = 0;
- u8 *ptr;
-
- ptr = (u8 *)&prop->profile_level[0];
- prop_count = prop->profile_count;
-
- if (prop_count > HFI_MAX_PROFILE_COUNT)
- prop_count = HFI_MAX_PROFILE_COUNT;
-
- while (prop_count) {
- ptr++;
- pl = (struct hfi_profile_level *)ptr;
-
- inst->pl[count].profile = pl->profile;
- inst->pl[count].level = pl->level;
- prop_count--;
- count++;
- ptr += sizeof(*pl) / sizeof(u32);
- }
-
- inst->pl_count = count;
- next_offset += sizeof(*prop) - sizeof(*pl) +
- prop->profile_count * sizeof(*pl);
-
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED: {
- next_offset +=
- sizeof(struct hfi_interlace_format_supported);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED: {
- struct hfi_nal_stream_format *nal =
- (struct hfi_nal_stream_format *)
- (data + next_offset);
- dev_dbg(dev, "NAL format: %x\n", nal->format);
- next_offset += sizeof(*nal);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT: {
- next_offset += sizeof(u32);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE: {
- u32 *max_seq_sz = (u32 *)(data + next_offset);
-
- dev_dbg(dev, "max seq header sz: %x\n", *max_seq_sz);
- next_offset += sizeof(u32);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: {
- next_offset += sizeof(struct hfi_intra_refresh);
- num_props--;
- break;
- }
- case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED: {
- struct hfi_buffer_alloc_mode_supported *prop =
- (struct hfi_buffer_alloc_mode_supported *)
- (data + next_offset);
- unsigned int i;
-
- for (i = 0; i < prop->num_entries; i++) {
- if (prop->buffer_type == HFI_BUFFER_OUTPUT ||
- prop->buffer_type == HFI_BUFFER_OUTPUT2) {
- switch (prop->data[i]) {
- case HFI_BUFFER_MODE_STATIC:
- inst->cap_bufs_mode_static = true;
- break;
- case HFI_BUFFER_MODE_DYNAMIC:
- inst->cap_bufs_mode_dynamic = true;
- break;
- default:
- break;
- }
- }
- }
- next_offset += sizeof(*prop) -
- sizeof(u32) + prop->num_entries * sizeof(u32);
- num_props--;
- break;
- }
- default:
- dev_dbg(dev, "%s: default case %#x\n", __func__, ptype);
- break;
- }
-
- rem_bytes -= next_offset;
- data += next_offset;
- }
-
- return err;
-}
-
static void hfi_session_init_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_init_done_pkt *pkt = packet;
- unsigned int error;
+ int rem_bytes;
+ u32 error;
error = pkt->error_type;
if (error != HFI_ERR_NONE)
goto done;
- if (core->res->hfi_version != HFI_VERSION_1XX)
+ if (!IS_V1(core))
goto done;
- error = init_done_read_prop(core, inst, pkt);
+ rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt) + sizeof(u32);
+ if (rem_bytes <= 0) {
+ error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
+ goto done;
+ }
+ error = hfi_parser(core, inst, pkt->data, rem_bytes);
done:
inst->error = error;
complete(&inst->done);
@@ -779,7 +505,8 @@ static void hfi_session_ftb_done(struct venus_core *core,
error = HFI_ERR_SESSION_INVALID_PARAMETER;
}
- if (buffer_type != HFI_BUFFER_OUTPUT)
+ if (buffer_type != HFI_BUFFER_OUTPUT &&
+ buffer_type != HFI_BUFFER_OUTPUT2)
goto done;
if (hfi_flags & HFI_BUFFERFLAG_EOS)
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
new file mode 100644
index 000000000000..2293d936e49c
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Linaro Ltd.
+ *
+ * Author: Stanimir Varbanov <stanimir.varbanov@linaro.org>
+ */
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "hfi_helper.h"
+#include "hfi_parser.h"
+
+typedef void (*func)(struct venus_caps *cap, const void *data,
+ unsigned int size);
+
+static void init_codecs(struct venus_core *core)
+{
+ struct venus_caps *caps = core->caps, *cap;
+ unsigned long bit;
+
+ for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
+ cap = &caps[core->codecs_count++];
+ cap->codec = BIT(bit);
+ cap->domain = VIDC_SESSION_TYPE_DEC;
+ cap->valid = false;
+ }
+
+ for_each_set_bit(bit, &core->enc_codecs, MAX_CODEC_NUM) {
+ cap = &caps[core->codecs_count++];
+ cap->codec = BIT(bit);
+ cap->domain = VIDC_SESSION_TYPE_ENC;
+ cap->valid = false;
+ }
+}
+
+static void for_each_codec(struct venus_caps *caps, unsigned int caps_num,
+ u32 codecs, u32 domain, func cb, void *data,
+ unsigned int size)
+{
+ struct venus_caps *cap;
+ unsigned int i;
+
+ for (i = 0; i < caps_num; i++) {
+ cap = &caps[i];
+ if (cap->valid && cap->domain == domain)
+ continue;
+ if (cap->codec & codecs && cap->domain == domain)
+ cb(cap, data, size);
+ }
+}
+
+static void
+fill_buf_mode(struct venus_caps *cap, const void *data, unsigned int num)
+{
+ const u32 *type = data;
+
+ if (*type == HFI_BUFFER_MODE_DYNAMIC)
+ cap->cap_bufs_mode_dynamic = true;
+}
+
+static void
+parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_buffer_alloc_mode_supported *mode = data;
+ u32 num_entries = mode->num_entries;
+ u32 *type;
+
+ if (num_entries > MAX_ALLOC_MODE_ENTRIES)
+ return;
+
+ type = mode->data;
+
+ while (num_entries--) {
+ if (mode->buffer_type == HFI_BUFFER_OUTPUT ||
+ mode->buffer_type == HFI_BUFFER_OUTPUT2)
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps),
+ codecs, domain, fill_buf_mode, type, 1);
+
+ type++;
+ }
+}
+
+static void fill_profile_level(struct venus_caps *cap, const void *data,
+ unsigned int num)
+{
+ const struct hfi_profile_level *pl = data;
+
+ memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
+ cap->num_pl += num;
+}
+
+static void
+parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_profile_level_supported *pl = data;
+ struct hfi_profile_level *proflevel = pl->profile_level;
+ struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {};
+
+ if (pl->profile_count > HFI_MAX_PROFILE_COUNT)
+ return;
+
+ memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel));
+
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ fill_profile_level, pl_arr, pl->profile_count);
+}
+
+static void
+fill_caps(struct venus_caps *cap, const void *data, unsigned int num)
+{
+ const struct hfi_capability *caps = data;
+
+ memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
+ cap->num_caps += num;
+}
+
+static void
+parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_capabilities *caps = data;
+ struct hfi_capability *cap = caps->data;
+ u32 num_caps = caps->num_capabilities;
+ struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {};
+
+ if (num_caps > MAX_CAP_ENTRIES)
+ return;
+
+ memcpy(caps_arr, cap, num_caps * sizeof(*cap));
+
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ fill_caps, caps_arr, num_caps);
+}
+
+static void fill_raw_fmts(struct venus_caps *cap, const void *fmts,
+ unsigned int num_fmts)
+{
+ const struct raw_formats *formats = fmts;
+
+ memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
+ cap->num_fmts += num_fmts;
+}
+
+static void
+parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+{
+ struct hfi_uncompressed_format_supported *fmt = data;
+ struct hfi_uncompressed_plane_info *pinfo = fmt->plane_info;
+ struct hfi_uncompressed_plane_constraints *constr;
+ struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {};
+ u32 entries = fmt->format_entries;
+ unsigned int i = 0;
+ u32 num_planes;
+
+ while (entries) {
+ num_planes = pinfo->num_planes;
+
+ rawfmts[i].fmt = pinfo->format;
+ rawfmts[i].buftype = fmt->buffer_type;
+ i++;
+
+ if (pinfo->num_planes > MAX_PLANES)
+ break;
+
+ pinfo = (void *)pinfo + sizeof(*constr) * num_planes +
+ 2 * sizeof(u32);
+ entries--;
+ }
+
+ for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
+ fill_raw_fmts, rawfmts, i);
+}
+
+static void parse_codecs(struct venus_core *core, void *data)
+{
+ struct hfi_codec_supported *codecs = data;
+
+ core->dec_codecs = codecs->dec_codecs;
+ core->enc_codecs = codecs->enc_codecs;
+
+ if (IS_V1(core)) {
+ core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC;
+ core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
+ }
+}
+
+static void parse_max_sessions(struct venus_core *core, const void *data)
+{
+ const struct hfi_max_sessions_supported *sessions = data;
+
+ core->max_sessions_supported = sessions->max_sessions;
+}
+
+static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
+{
+ struct hfi_codec_mask_supported *mask = data;
+
+ *codecs = mask->codecs;
+ *domain = mask->video_domains;
+}
+
+static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain)
+{
+ if (!inst || !IS_V1(inst->core))
+ return;
+
+ *codecs = inst->hfi_codec;
+ *domain = inst->session_type;
+}
+
+static void parser_fini(struct venus_inst *inst, u32 codecs, u32 domain)
+{
+ struct venus_caps *caps, *cap;
+ unsigned int i;
+ u32 dom;
+
+ if (!inst || !IS_V1(inst->core))
+ return;
+
+ caps = inst->core->caps;
+ dom = inst->session_type;
+
+ for (i = 0; i < MAX_CODEC_NUM; i++) {
+ cap = &caps[i];
+ if (cap->codec & codecs && cap->domain == dom)
+ cap->valid = true;
+ }
+}
+
+u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
+ u32 size)
+{
+ unsigned int words_count = size >> 2;
+ u32 *word = buf, *data, codecs = 0, domain = 0;
+
+ if (size % 4)
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ parser_init(inst, &codecs, &domain);
+
+ while (words_count) {
+ data = word + 1;
+
+ switch (*word) {
+ case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
+ parse_codecs(core, data);
+ init_codecs(core);
+ break;
+ case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED:
+ parse_max_sessions(core, data);
+ break;
+ case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED:
+ parse_codecs_mask(&codecs, &domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+ parse_raw_formats(core, codecs, domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
+ parse_caps(core, codecs, domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
+ parse_profile_level(core, codecs, domain, data);
+ break;
+ case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED:
+ parse_alloc_mode(core, codecs, domain, data);
+ break;
+ default:
+ break;
+ }
+
+ word++;
+ words_count--;
+ }
+
+ parser_fini(inst, codecs, domain);
+
+ return HFI_ERR_NONE;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.h b/drivers/media/platform/qcom/venus/hfi_parser.h
new file mode 100644
index 000000000000..3e931c747e19
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_parser.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Linaro Ltd. */
+#ifndef __VENUS_HFI_PARSER_H__
+#define __VENUS_HFI_PARSER_H__
+
+#include "core.h"
+
+u32 hfi_parser(struct venus_core *core, struct venus_inst *inst,
+ void *buf, u32 size);
+
+#define WHICH_CAP_MIN 0
+#define WHICH_CAP_MAX 1
+#define WHICH_CAP_STEP 2
+
+static inline u32 get_cap(struct venus_inst *inst, u32 type, u32 which)
+{
+ struct venus_core *core = inst->core;
+ struct hfi_capability *cap = NULL;
+ struct venus_caps *caps;
+ unsigned int i;
+
+ caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
+ if (!caps)
+ return 0;
+
+ for (i = 0; i < caps->num_caps; i++) {
+ if (caps->caps[i].capability_type == type) {
+ cap = &caps->caps[i];
+ break;
+ }
+ }
+
+ if (!cap)
+ return 0;
+
+ switch (which) {
+ case WHICH_CAP_MIN:
+ return cap->min;
+ case WHICH_CAP_MAX:
+ return cap->max;
+ case WHICH_CAP_STEP:
+ return cap->step_size;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline u32 cap_min(struct venus_inst *inst, u32 type)
+{
+ return get_cap(inst, type, WHICH_CAP_MIN);
+}
+
+static inline u32 cap_max(struct venus_inst *inst, u32 type)
+{
+ return get_cap(inst, type, WHICH_CAP_MAX);
+}
+
+static inline u32 cap_step(struct venus_inst *inst, u32 type)
+{
+ return get_cap(inst, type, WHICH_CAP_STEP);
+}
+
+static inline u32 frame_width_min(struct venus_inst *inst)
+{
+ return cap_min(inst, HFI_CAPABILITY_FRAME_WIDTH);
+}
+
+static inline u32 frame_width_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_FRAME_WIDTH);
+}
+
+static inline u32 frame_width_step(struct venus_inst *inst)
+{
+ return cap_step(inst, HFI_CAPABILITY_FRAME_WIDTH);
+}
+
+static inline u32 frame_height_min(struct venus_inst *inst)
+{
+ return cap_min(inst, HFI_CAPABILITY_FRAME_HEIGHT);
+}
+
+static inline u32 frame_height_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_FRAME_HEIGHT);
+}
+
+static inline u32 frame_height_step(struct venus_inst *inst)
+{
+ return cap_step(inst, HFI_CAPABILITY_FRAME_HEIGHT);
+}
+
+static inline u32 frate_min(struct venus_inst *inst)
+{
+ return cap_min(inst, HFI_CAPABILITY_FRAMERATE);
+}
+
+static inline u32 frate_max(struct venus_inst *inst)
+{
+ return cap_max(inst, HFI_CAPABILITY_FRAMERATE);
+}
+
+static inline u32 frate_step(struct venus_inst *inst)
+{
+ return cap_step(inst, HFI_CAPABILITY_FRAMERATE);
+}
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 734ce11b0ed0..124085556b94 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -532,6 +532,24 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
u32 val;
int ret;
+ if (IS_V4(hdev->core)) {
+ val = venus_readl(hdev, WRAPPER_CPU_AXI_HALT);
+ val |= WRAPPER_CPU_AXI_HALT_HALT;
+ venus_writel(hdev, WRAPPER_CPU_AXI_HALT, val);
+
+ ret = readl_poll_timeout(base + WRAPPER_CPU_AXI_HALT_STATUS,
+ val,
+ val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
+ POLL_INTERVAL_US,
+ VBIF_AXI_HALT_ACK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "AXI bus port halt timeout\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
/* Halt AXI and AXI IMEM VBIF Access */
val = venus_readl(hdev, VBIF_AXI_HALT_CTRL0);
val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
@@ -861,6 +879,14 @@ static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
if (ret)
dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
+ /*
+ * Idle indicator is disabled by default on some 4xx firmware versions,
+ * enable it explicitly in order to make suspend functional by checking
+ * WFI (wait-for-interrupt) bit.
+ */
+ if (IS_V4(hdev->core))
+ venus_sys_idle_indicator = true;
+
ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
if (ret)
dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
@@ -1073,6 +1099,10 @@ static int venus_core_init(struct venus_core *core)
if (ret)
dev_warn(dev, "failed to send image version pkt to fw\n");
+ ret = venus_sys_set_default_properties(hdev);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1117,10 +1147,6 @@ static int venus_session_init(struct venus_inst *inst, u32 session_type,
struct hfi_session_init_pkt pkt;
int ret;
- ret = venus_sys_set_default_properties(hdev);
- if (ret)
- return ret;
-
ret = pkt_session_init(&pkt, inst, session_type, codec);
if (ret)
goto err;
@@ -1426,13 +1452,40 @@ static int venus_suspend_1xx(struct venus_core *core)
return 0;
}
+static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
+{
+ u32 ctrl_status, cpu_status;
+
+ cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+
+ if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
+ ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
+ return true;
+
+ return false;
+}
+
+static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
+{
+ u32 ctrl_status, cpu_status;
+
+ cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+
+ if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
+ ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
+ return true;
+
+ return false;
+}
+
static int venus_suspend_3xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
- u32 ctrl_status, wfi_status;
+ bool val;
int ret;
- int cnt = 100;
if (!hdev->power_enabled || hdev->suspended)
return 0;
@@ -1446,29 +1499,30 @@ static int venus_suspend_3xx(struct venus_core *core)
return -EINVAL;
}
- ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
- if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
- wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
- ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
-
- ret = venus_prepare_power_collapse(hdev, false);
- if (ret) {
- dev_err(dev, "prepare for power collapse fail (%d)\n",
- ret);
- return ret;
- }
+ /*
+ * Power collapse sequence for Venus 3xx and 4xx versions:
+ * 1. Check for ARM9 and video core to be idle by checking WFI bit
+ * (bit 0) in CPU status register and by checking Idle (bit 30) in
+ * Control status register for video core.
+ * 2. Send a command to prepare for power collapse.
+ * 3. Check for WFI and PC_READY bits.
+ */
+ ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
+ 1500, 100 * 1500);
+ if (ret)
+ return ret;
- cnt = 100;
- while (cnt--) {
- wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
- ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
- if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY &&
- wfi_status & BIT(0))
- break;
- usleep_range(1000, 1500);
- }
+ ret = venus_prepare_power_collapse(hdev, false);
+ if (ret) {
+ dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
+ return ret;
}
+ ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
+ 1500, 100 * 1500);
+ if (ret)
+ return ret;
+
mutex_lock(&hdev->lock);
ret = venus_power_off(hdev);
@@ -1487,7 +1541,7 @@ static int venus_suspend_3xx(struct venus_core *core)
static int venus_suspend(struct venus_core *core)
{
- if (core->res->hfi_version == HFI_VERSION_3XX)
+ if (IS_V3(core) || IS_V4(core))
return venus_suspend_3xx(core);
return venus_suspend_1xx(core);
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
index 98cc350113ab..def0926a6dee 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus_io.h
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -104,10 +104,20 @@
#define WRAPPER_CPU_CLOCK_CONFIG (WRAPPER_BASE + 0x2000)
#define WRAPPER_CPU_AXI_HALT (WRAPPER_BASE + 0x2008)
+#define WRAPPER_CPU_AXI_HALT_HALT BIT(16)
#define WRAPPER_CPU_AXI_HALT_STATUS (WRAPPER_BASE + 0x200c)
+#define WRAPPER_CPU_AXI_HALT_STATUS_IDLE BIT(24)
#define WRAPPER_CPU_CGC_DIS (WRAPPER_BASE + 0x2010)
#define WRAPPER_CPU_STATUS (WRAPPER_BASE + 0x2014)
+#define WRAPPER_CPU_STATUS_WFI BIT(0)
#define WRAPPER_SW_RESET (WRAPPER_BASE + 0x3000)
+/* Venus 4xx */
+#define WRAPPER_VCODEC0_MMCC_POWER_STATUS (WRAPPER_BASE + 0x90)
+#define WRAPPER_VCODEC0_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x94)
+
+#define WRAPPER_VCODEC1_MMCC_POWER_STATUS (WRAPPER_BASE + 0x110)
+#define WRAPPER_VCODEC1_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x114)
+
#endif
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 49bbd1861d3a..dfbbbf0f746f 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -24,33 +25,11 @@
#include <media/videobuf2-dma-sg.h>
#include "hfi_venus_io.h"
+#include "hfi_parser.h"
#include "core.h"
#include "helpers.h"
#include "vdec.h"
-static u32 get_framesize_uncompressed(unsigned int plane, u32 width, u32 height)
-{
- u32 y_stride, uv_stride, y_plane;
- u32 y_sclines, uv_sclines, uv_plane;
- u32 size;
-
- y_stride = ALIGN(width, 128);
- uv_stride = ALIGN(width, 128);
- y_sclines = ALIGN(height, 32);
- uv_sclines = ALIGN(((height + 1) >> 1), 16);
-
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + SZ_4K;
- size = y_plane + uv_plane + SZ_8K;
-
- return ALIGN(size, SZ_4K);
-}
-
-static u32 get_framesize_compressed(unsigned int width, unsigned int height)
-{
- return ((width * height * 3 / 2) / 2) + 128;
-}
-
/*
* Three resons to keep MPLANE formats (despite that the number of planes
* currently is one):
@@ -99,6 +78,10 @@ static const struct venus_format vdec_formats[] = {
.pixfmt = V4L2_PIX_FMT_XVID,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
},
};
@@ -159,7 +142,6 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct venus_format *fmt;
- unsigned int p;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
@@ -173,14 +155,12 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
- pixmp->width = 1280;
- pixmp->height = 720;
}
- pixmp->width = clamp(pixmp->width, inst->cap_width.min,
- inst->cap_width.max);
- pixmp->height = clamp(pixmp->height, inst->cap_height.min,
- inst->cap_height.max);
+ pixmp->width = clamp(pixmp->width, frame_width_min(inst),
+ frame_width_max(inst));
+ pixmp->height = clamp(pixmp->height, frame_height_min(inst),
+ frame_height_max(inst));
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
pixmp->height = ALIGN(pixmp->height, 32);
@@ -190,18 +170,14 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
pixmp->num_planes = fmt->num_planes;
pixmp->flags = 0;
- if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- for (p = 0; p < pixmp->num_planes; p++) {
- pfmt[p].sizeimage =
- get_framesize_uncompressed(p, pixmp->width,
- pixmp->height);
- pfmt[p].bytesperline = ALIGN(pixmp->width, 128);
- }
- } else {
- pfmt[0].sizeimage = get_framesize_compressed(pixmp->width,
- pixmp->height);
+ pfmt[0].sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
+ pixmp->width,
+ pixmp->height);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 128);
+ else
pfmt[0].bytesperline = 0;
- }
return fmt;
}
@@ -442,12 +418,12 @@ static int vdec_enum_framesizes(struct file *file, void *fh,
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise.min_width = inst->cap_width.min;
- fsize->stepwise.max_width = inst->cap_width.max;
- fsize->stepwise.step_width = inst->cap_width.step_size;
- fsize->stepwise.min_height = inst->cap_height.min;
- fsize->stepwise.max_height = inst->cap_height.max;
- fsize->stepwise.step_height = inst->cap_height.step_size;
+ fsize->stepwise.min_width = frame_width_min(inst);
+ fsize->stepwise.max_width = frame_width_max(inst);
+ fsize->stepwise.step_width = frame_width_step(inst);
+ fsize->stepwise.min_height = frame_height_min(inst);
+ fsize->stepwise.max_height = frame_height_max(inst);
+ fsize->stepwise.step_height = frame_height_step(inst);
return 0;
}
@@ -544,11 +520,41 @@ static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
static int vdec_set_properties(struct venus_inst *inst)
{
struct vdec_controls *ctr = &inst->controls.dec;
+ struct hfi_enable en = { .enable = 1 };
+ u32 ptype;
+ int ret;
+
+ if (ctr->post_loop_deb_mode) {
+ ptype = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+ ret = hfi_session_set_property(inst, ptype, &en);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+#define is_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_UBWC_BASE))
+
+static int vdec_output_conf(struct venus_inst *inst)
+{
struct venus_core *core = inst->core;
struct hfi_enable en = { .enable = 1 };
+ u32 width = inst->out_width;
+ u32 height = inst->out_height;
+ u32 out_fmt, out2_fmt;
+ bool ubwc = false;
u32 ptype;
int ret;
+ ret = venus_helper_set_work_mode(inst, VIDC_WORK_MODE_2);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_1);
+ if (ret)
+ return ret;
+
if (core->res->hfi_version == HFI_VERSION_1XX) {
ptype = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
ret = hfi_session_set_property(inst, ptype, &en);
@@ -556,27 +562,84 @@ static int vdec_set_properties(struct venus_inst *inst)
return ret;
}
- if (core->res->hfi_version == HFI_VERSION_3XX ||
- inst->cap_bufs_mode_dynamic) {
- struct hfi_buffer_alloc_mode mode;
+ /* Force searching UBWC formats for bigger then HD resolutions */
+ if (width > 1920 && height > ALIGN(1080, 32))
+ ubwc = true;
+
+ /* For Venus v4 UBWC format is mandatory */
+ if (IS_V4(core))
+ ubwc = true;
+
+ ret = venus_helper_get_out_fmts(inst, inst->fmt_cap->pixfmt, &out_fmt,
+ &out2_fmt, ubwc);
+ if (ret)
+ return ret;
+
+ inst->output_buf_size =
+ venus_helper_get_framesz_raw(out_fmt, width, height);
+ inst->output2_buf_size =
+ venus_helper_get_framesz_raw(out2_fmt, width, height);
+
+ if (is_ubwc_fmt(out_fmt)) {
+ inst->opb_buftype = HFI_BUFFER_OUTPUT2;
+ inst->opb_fmt = out2_fmt;
+ inst->dpb_buftype = HFI_BUFFER_OUTPUT;
+ inst->dpb_fmt = out_fmt;
+ } else if (is_ubwc_fmt(out2_fmt)) {
+ inst->opb_buftype = HFI_BUFFER_OUTPUT;
+ inst->opb_fmt = out_fmt;
+ inst->dpb_buftype = HFI_BUFFER_OUTPUT2;
+ inst->dpb_fmt = out2_fmt;
+ } else {
+ inst->opb_buftype = HFI_BUFFER_OUTPUT;
+ inst->opb_fmt = out_fmt;
+ inst->dpb_buftype = 0;
+ inst->dpb_fmt = 0;
+ }
+
+ ret = venus_helper_set_raw_format(inst, inst->opb_fmt,
+ inst->opb_buftype);
+ if (ret)
+ return ret;
- ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
- mode.type = HFI_BUFFER_OUTPUT;
- mode.mode = HFI_BUFFER_MODE_DYNAMIC;
+ if (inst->dpb_fmt) {
+ ret = venus_helper_set_multistream(inst, false, true);
+ if (ret)
+ return ret;
- ret = hfi_session_set_property(inst, ptype, &mode);
+ ret = venus_helper_set_raw_format(inst, inst->dpb_fmt,
+ inst->dpb_buftype);
if (ret)
return ret;
- }
- if (ctr->post_loop_deb_mode) {
- ptype = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
- en.enable = 1;
- ret = hfi_session_set_property(inst, ptype, &en);
+ ret = venus_helper_set_output_resolution(inst, width, height,
+ HFI_BUFFER_OUTPUT2);
if (ret)
return ret;
}
+ if (IS_V3(core) || IS_V4(core)) {
+ if (inst->output2_buf_size) {
+ ret = venus_helper_set_bufsize(inst,
+ inst->output2_buf_size,
+ HFI_BUFFER_OUTPUT2);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->output_buf_size) {
+ ret = venus_helper_set_bufsize(inst,
+ inst->output_buf_size,
+ HFI_BUFFER_OUTPUT);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = venus_helper_set_dyn_bufmode(inst);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -603,19 +666,32 @@ deinit:
return ret;
}
-static int vdec_cap_num_buffers(struct venus_inst *inst, unsigned int *num)
+static int vdec_num_buffers(struct venus_inst *inst, unsigned int *in_num,
+ unsigned int *out_num)
{
+ enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
int ret;
+ *in_num = *out_num = 0;
+
ret = vdec_init_session(inst);
if (ret)
return ret;
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+ if (ret)
+ goto deinit;
+
+ *in_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+ if (ret)
+ goto deinit;
- *num = bufreq.count_actual;
+ *out_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+deinit:
hfi_session_deinit(inst);
return ret;
@@ -626,10 +702,12 @@ static int vdec_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
- unsigned int p, num;
+ unsigned int in_num, out_num;
int ret = 0;
if (*num_planes) {
+ unsigned int output_buf_size = venus_helper_get_opb_size(inst);
+
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
*num_planes != inst->fmt_out->num_planes)
return -EINVAL;
@@ -643,41 +721,35 @@ static int vdec_queue_setup(struct vb2_queue *q,
return -EINVAL;
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
- sizes[0] < inst->output_buf_size)
+ sizes[0] < output_buf_size)
return -EINVAL;
return 0;
}
+ ret = vdec_num_buffers(inst, &in_num, &out_num);
+ if (ret)
+ return ret;
+
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
*num_planes = inst->fmt_out->num_planes;
- sizes[0] = get_framesize_compressed(inst->out_width,
+ sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
+ inst->out_width,
inst->out_height);
inst->input_buf_size = sizes[0];
+ *num_buffers = max(*num_buffers, in_num);
inst->num_input_bufs = *num_buffers;
-
- ret = vdec_cap_num_buffers(inst, &num);
- if (ret)
- break;
-
- inst->num_output_bufs = num;
+ inst->num_output_bufs = out_num;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
*num_planes = inst->fmt_cap->num_planes;
-
- ret = vdec_cap_num_buffers(inst, &num);
- if (ret)
- break;
-
- *num_buffers = max(*num_buffers, num);
-
- for (p = 0; p < *num_planes; p++)
- sizes[p] = get_framesize_uncompressed(p, inst->width,
- inst->height);
-
- inst->num_output_bufs = *num_buffers;
+ sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt,
+ inst->width,
+ inst->height);
inst->output_buf_size = sizes[0];
+ *num_buffers = max(*num_buffers, out_num);
+ inst->num_output_bufs = *num_buffers;
break;
default:
ret = -EINVAL;
@@ -689,6 +761,7 @@ static int vdec_queue_setup(struct vb2_queue *q,
static int vdec_verify_conf(struct venus_inst *inst)
{
+ enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
int ret;
@@ -700,14 +773,14 @@ static int vdec_verify_conf(struct venus_inst *inst)
return ret;
if (inst->num_output_bufs < bufreq.count_actual ||
- inst->num_output_bufs < bufreq.count_min)
+ inst->num_output_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (ret)
return ret;
- if (inst->num_input_bufs < bufreq.count_min)
+ if (inst->num_input_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
return -EINVAL;
return 0;
@@ -716,8 +789,6 @@ static int vdec_verify_conf(struct venus_inst *inst)
static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct venus_inst *inst = vb2_get_drv_priv(q);
- struct venus_core *core = inst->core;
- u32 ptype;
int ret;
mutex_lock(&inst->lock);
@@ -746,24 +817,20 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
if (ret)
goto deinit_sess;
- if (core->res->hfi_version == HFI_VERSION_3XX) {
- struct hfi_buffer_size_actual buf_sz;
-
- ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
- buf_sz.type = HFI_BUFFER_OUTPUT;
- buf_sz.size = inst->output_buf_size;
-
- ret = hfi_session_set_property(inst, ptype, &buf_sz);
- if (ret)
- goto deinit_sess;
- }
+ ret = vdec_output_conf(inst);
+ if (ret)
+ goto deinit_sess;
ret = vdec_verify_conf(inst);
if (ret)
goto deinit_sess;
ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
- VB2_MAX_FRAME);
+ VB2_MAX_FRAME, VB2_MAX_FRAME);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_alloc_dpb_bufs(inst);
if (ret)
goto deinit_sess;
@@ -818,9 +885,11 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vbuf->field = V4L2_FIELD_NONE;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ unsigned int opb_sz = venus_helper_get_opb_size(inst);
+
vb = &vbuf->vb2_buf;
vb->planes[0].bytesused =
- max_t(unsigned int, inst->output_buf_size, bytesused);
+ max_t(unsigned int, opb_sz, bytesused);
vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
@@ -901,22 +970,7 @@ static void vdec_inst_init(struct venus_inst *inst)
inst->fps = 30;
inst->timeperframe.numerator = 1;
inst->timeperframe.denominator = 30;
-
- inst->cap_width.min = 64;
- inst->cap_width.max = 1920;
- if (inst->core->res->hfi_version == HFI_VERSION_3XX)
- inst->cap_width.max = 3840;
- inst->cap_width.step_size = 1;
- inst->cap_height.min = 64;
- inst->cap_height.max = ALIGN(1080, 32);
- if (inst->core->res->hfi_version == HFI_VERSION_3XX)
- inst->cap_height.max = ALIGN(2160, 32);
- inst->cap_height.step_size = 1;
- inst->cap_framerate.min = 1;
- inst->cap_framerate.max = 30;
- inst->cap_framerate.step_size = 1;
- inst->cap_mbs_per_frame.min = 16;
- inst->cap_mbs_per_frame.max = 8160;
+ inst->hfi_codec = HFI_VIDEO_CODEC_H264;
}
static const struct v4l2_m2m_ops vdec_m2m_ops = {
@@ -973,6 +1027,7 @@ static int vdec_open(struct file *file)
if (!inst)
return -ENOMEM;
+ INIT_LIST_HEAD(&inst->dpbbufs);
INIT_LIST_HEAD(&inst->registeredbufs);
INIT_LIST_HEAD(&inst->internalbufs);
INIT_LIST_HEAD(&inst->list);
@@ -1080,12 +1135,18 @@ static int vdec_probe(struct platform_device *pdev)
if (!core)
return -EPROBE_DEFER;
- if (core->res->hfi_version == HFI_VERSION_3XX) {
+ if (IS_V3(core) || IS_V4(core)) {
core->core0_clk = devm_clk_get(dev, "core");
if (IS_ERR(core->core0_clk))
return PTR_ERR(core->core0_clk);
}
+ if (IS_V4(core)) {
+ core->core0_bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(core->core0_bus_clk))
+ return PTR_ERR(core->core0_bus_clk);
+ }
+
platform_set_drvdata(pdev, core);
vdev = video_device_alloc();
@@ -1130,15 +1191,21 @@ static int vdec_remove(struct platform_device *pdev)
static __maybe_unused int vdec_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
- if (core->res->hfi_version == HFI_VERSION_1XX)
+ if (IS_V1(core))
return 0;
- writel(0, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true);
+ if (ret)
+ return ret;
+
+ if (IS_V4(core))
+ clk_disable_unprepare(core->core0_bus_clk);
+
clk_disable_unprepare(core->core0_clk);
- writel(1, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
- return 0;
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
}
static __maybe_unused int vdec_runtime_resume(struct device *dev)
@@ -1146,13 +1213,29 @@ static __maybe_unused int vdec_runtime_resume(struct device *dev)
struct venus_core *core = dev_get_drvdata(dev);
int ret;
- if (core->res->hfi_version == HFI_VERSION_1XX)
+ if (IS_V1(core))
return 0;
- writel(0, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, true);
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(core->core0_clk);
- writel(1, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+ if (ret)
+ goto err_power_disable;
+ if (IS_V4(core))
+ ret = clk_prepare_enable(core->core0_bus_clk);
+
+ if (ret)
+ goto err_unprepare_core0;
+
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
+
+err_unprepare_core0:
+ clk_disable_unprepare(core->core0_clk);
+err_power_disable:
+ venus_helper_power_enable(core, VIDC_SESSION_TYPE_DEC, false);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c
index 032839bbc967..f4604b0cd57e 100644
--- a/drivers/media/platform/qcom/venus/vdec_ctrls.c
+++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c
@@ -29,7 +29,7 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
ctr->profile = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
@@ -54,7 +54,7 @@ static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
ret = hfi_session_get_property(inst, ptype, &hprop);
if (!ret)
ctr->profile = hprop.profile_level.profile;
@@ -130,8 +130,10 @@ int vdec_ctrl_init(struct venus_inst *inst)
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
- ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_VPX_PROFILE, 0, 3, 1, 0);
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 6b2ce479584e..41249d1443fa 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -24,38 +25,13 @@
#include <media/v4l2-ctrls.h>
#include "hfi_venus_io.h"
+#include "hfi_parser.h"
#include "core.h"
#include "helpers.h"
#include "venc.h"
#define NUM_B_FRAMES_MAX 4
-static u32 get_framesize_uncompressed(unsigned int plane, u32 width, u32 height)
-{
- u32 y_stride, uv_stride, y_plane;
- u32 y_sclines, uv_sclines, uv_plane;
- u32 size;
-
- y_stride = ALIGN(width, 128);
- uv_stride = ALIGN(width, 128);
- y_sclines = ALIGN(height, 32);
- uv_sclines = ALIGN(((height + 1) >> 1), 16);
-
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + SZ_4K;
- size = y_plane + uv_plane + SZ_8K;
- size = ALIGN(size, SZ_4K);
-
- return size;
-}
-
-static u32 get_framesize_compressed(u32 width, u32 height)
-{
- u32 sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
-
- return ALIGN(sz, SZ_4K);
-}
-
/*
* Three resons to keep MPLANE formats (despite that the number of planes
* currently is one):
@@ -84,6 +60,10 @@ static const struct venus_format venc_formats[] = {
.pixfmt = V4L2_PIX_FMT_VP8,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
};
@@ -223,7 +203,7 @@ static int venc_v4l2_to_hfi(int id, int value)
case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC:
return HFI_H264_ENTROPY_CABAC;
}
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
switch (value) {
case 0:
default:
@@ -245,6 +225,46 @@ static int venc_v4l2_to_hfi(int id, int value)
case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
return HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
}
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ default:
+ return HFI_HEVC_PROFILE_MAIN;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ return HFI_HEVC_PROFILE_MAIN_STILL_PIC;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ return HFI_HEVC_PROFILE_MAIN10;
+ }
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ default:
+ return HFI_HEVC_LEVEL_1;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return HFI_HEVC_LEVEL_2;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return HFI_HEVC_LEVEL_21;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return HFI_HEVC_LEVEL_3;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return HFI_HEVC_LEVEL_31;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return HFI_HEVC_LEVEL_4;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return HFI_HEVC_LEVEL_41;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return HFI_HEVC_LEVEL_5;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return HFI_HEVC_LEVEL_51;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ return HFI_HEVC_LEVEL_52;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
+ return HFI_HEVC_LEVEL_6;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
+ return HFI_HEVC_LEVEL_61;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
+ return HFI_HEVC_LEVEL_62;
+ }
}
return 0;
@@ -283,7 +303,6 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct venus_format *fmt;
- unsigned int p;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
@@ -297,14 +316,12 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
- pixmp->width = 1280;
- pixmp->height = 720;
}
- pixmp->width = clamp(pixmp->width, inst->cap_width.min,
- inst->cap_width.max);
- pixmp->height = clamp(pixmp->height, inst->cap_height.min,
- inst->cap_height.max);
+ pixmp->width = clamp(pixmp->width, frame_width_min(inst),
+ frame_width_max(inst));
+ pixmp->height = clamp(pixmp->height, frame_height_min(inst),
+ frame_height_max(inst));
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
pixmp->height = ALIGN(pixmp->height, 32);
@@ -317,19 +334,14 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
pixmp->num_planes = fmt->num_planes;
pixmp->flags = 0;
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- for (p = 0; p < pixmp->num_planes; p++) {
- pfmt[p].sizeimage =
- get_framesize_uncompressed(p, pixmp->width,
- pixmp->height);
+ pfmt[0].sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
+ pixmp->width,
+ pixmp->height);
- pfmt[p].bytesperline = ALIGN(pixmp->width, 128);
- }
- } else {
- pfmt[0].sizeimage = get_framesize_compressed(pixmp->width,
- pixmp->height);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 128);
+ else
pfmt[0].bytesperline = 0;
- }
return fmt;
}
@@ -553,12 +565,12 @@ static int venc_enum_framesizes(struct file *file, void *fh,
if (fsize->index)
return -EINVAL;
- fsize->stepwise.min_width = inst->cap_width.min;
- fsize->stepwise.max_width = inst->cap_width.max;
- fsize->stepwise.step_width = inst->cap_width.step_size;
- fsize->stepwise.min_height = inst->cap_height.min;
- fsize->stepwise.max_height = inst->cap_height.max;
- fsize->stepwise.step_height = inst->cap_height.step_size;
+ fsize->stepwise.min_width = frame_width_min(inst);
+ fsize->stepwise.max_width = frame_width_max(inst);
+ fsize->stepwise.step_width = frame_width_step(inst);
+ fsize->stepwise.min_height = frame_height_min(inst);
+ fsize->stepwise.max_height = frame_height_max(inst);
+ fsize->stepwise.step_height = frame_height_step(inst);
return 0;
}
@@ -586,18 +598,18 @@ static int venc_enum_frameintervals(struct file *file, void *fh,
if (!fival->width || !fival->height)
return -EINVAL;
- if (fival->width > inst->cap_width.max ||
- fival->width < inst->cap_width.min ||
- fival->height > inst->cap_height.max ||
- fival->height < inst->cap_height.min)
+ if (fival->width > frame_width_max(inst) ||
+ fival->width < frame_width_min(inst) ||
+ fival->height > frame_height_max(inst) ||
+ fival->height < frame_height_min(inst))
return -EINVAL;
fival->stepwise.min.numerator = 1;
- fival->stepwise.min.denominator = inst->cap_framerate.max;
+ fival->stepwise.min.denominator = frate_max(inst);
fival->stepwise.max.numerator = 1;
- fival->stepwise.max.denominator = inst->cap_framerate.min;
+ fival->stepwise.max.denominator = frate_min(inst);
fival->stepwise.step.numerator = 1;
- fival->stepwise.step.denominator = inst->cap_framerate.max;
+ fival->stepwise.step.denominator = frate_max(inst);
return 0;
}
@@ -642,6 +654,14 @@ static int venc_set_properties(struct venus_inst *inst)
u32 ptype, rate_control, bitrate, profile = 0, level = 0;
int ret;
+ ret = venus_helper_set_work_mode(inst, VIDC_WORK_MODE_2);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_2);
+ if (ret)
+ return ret;
+
ptype = HFI_PROPERTY_CONFIG_FRAME_RATE;
frate.buffer_type = HFI_BUFFER_OUTPUT;
frate.framerate = inst->fps * (1 << 16);
@@ -756,7 +776,7 @@ static int venc_set_properties(struct venus_inst *inst)
level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_LEVEL,
ctr->level.h264);
} else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) {
- profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_VPX_PROFILE,
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
ctr->profile.vpx);
level = 0;
} else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_MPEG4) {
@@ -767,6 +787,11 @@ static int venc_set_properties(struct venus_inst *inst)
} else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H263) {
profile = 0;
level = 0;
+ } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ ctr->profile.hevc);
+ level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ ctr->level.hevc);
}
ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
@@ -794,7 +819,8 @@ static int venc_init_session(struct venus_inst *inst)
goto deinit;
ret = venus_helper_set_output_resolution(inst, inst->width,
- inst->height);
+ inst->height,
+ HFI_BUFFER_OUTPUT);
if (ret)
goto deinit;
@@ -835,7 +861,7 @@ static int venc_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
- unsigned int p, num, min = 4;
+ unsigned int num, min = 4;
int ret = 0;
if (*num_planes) {
@@ -870,16 +896,18 @@ static int venc_queue_setup(struct vb2_queue *q,
*num_buffers = max(*num_buffers, num);
inst->num_input_bufs = *num_buffers;
- for (p = 0; p < *num_planes; ++p)
- sizes[p] = get_framesize_uncompressed(p, inst->width,
- inst->height);
+ sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
+ inst->width,
+ inst->height);
inst->input_buf_size = sizes[0];
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
*num_planes = inst->fmt_cap->num_planes;
*num_buffers = max(*num_buffers, min);
inst->num_output_bufs = *num_buffers;
- sizes[0] = get_framesize_compressed(inst->width, inst->height);
+ sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt,
+ inst->width,
+ inst->height);
inst->output_buf_size = sizes[0];
break;
default:
@@ -892,6 +920,7 @@ static int venc_queue_setup(struct vb2_queue *q,
static int venc_verify_conf(struct venus_inst *inst)
{
+ enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
int ret;
@@ -903,7 +932,7 @@ static int venc_verify_conf(struct venus_inst *inst)
return ret;
if (inst->num_output_bufs < bufreq.count_actual ||
- inst->num_output_bufs < bufreq.count_min)
+ inst->num_output_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
@@ -911,7 +940,7 @@ static int venc_verify_conf(struct venus_inst *inst)
return ret;
if (inst->num_input_bufs < bufreq.count_actual ||
- inst->num_input_bufs < bufreq.count_min)
+ inst->num_input_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
return -EINVAL;
return 0;
@@ -952,7 +981,7 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
goto deinit_sess;
ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
- inst->num_output_bufs);
+ inst->num_output_bufs, 0);
if (ret)
goto deinit_sess;
@@ -1090,22 +1119,7 @@ static void venc_inst_init(struct venus_inst *inst)
inst->fps = 15;
inst->timeperframe.numerator = 1;
inst->timeperframe.denominator = 15;
-
- inst->cap_width.min = 96;
- inst->cap_width.max = 1920;
- if (inst->core->res->hfi_version == HFI_VERSION_3XX)
- inst->cap_width.max = 3840;
- inst->cap_width.step_size = 2;
- inst->cap_height.min = 64;
- inst->cap_height.max = ALIGN(1080, 32);
- if (inst->core->res->hfi_version == HFI_VERSION_3XX)
- inst->cap_height.max = ALIGN(2160, 32);
- inst->cap_height.step_size = 2;
- inst->cap_framerate.min = 1;
- inst->cap_framerate.max = 30;
- inst->cap_framerate.step_size = 1;
- inst->cap_mbs_per_frame.min = 24;
- inst->cap_mbs_per_frame.max = 8160;
+ inst->hfi_codec = HFI_VIDEO_CODEC_H264;
}
static int venc_open(struct file *file)
@@ -1118,6 +1132,7 @@ static int venc_open(struct file *file)
if (!inst)
return -ENOMEM;
+ INIT_LIST_HEAD(&inst->dpbbufs);
INIT_LIST_HEAD(&inst->registeredbufs);
INIT_LIST_HEAD(&inst->internalbufs);
INIT_LIST_HEAD(&inst->list);
@@ -1224,12 +1239,18 @@ static int venc_probe(struct platform_device *pdev)
if (!core)
return -EPROBE_DEFER;
- if (core->res->hfi_version == HFI_VERSION_3XX) {
+ if (IS_V3(core) || IS_V4(core)) {
core->core1_clk = devm_clk_get(dev, "core");
if (IS_ERR(core->core1_clk))
return PTR_ERR(core->core1_clk);
}
+ if (IS_V4(core)) {
+ core->core1_bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(core->core1_bus_clk))
+ return PTR_ERR(core->core1_bus_clk);
+ }
+
platform_set_drvdata(pdev, core);
vdev = video_device_alloc();
@@ -1274,15 +1295,21 @@ static int venc_remove(struct platform_device *pdev)
static __maybe_unused int venc_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
- if (core->res->hfi_version == HFI_VERSION_1XX)
+ if (IS_V1(core))
return 0;
- writel(0, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true);
+ if (ret)
+ return ret;
+
+ if (IS_V4(core))
+ clk_disable_unprepare(core->core1_bus_clk);
+
clk_disable_unprepare(core->core1_clk);
- writel(1, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
- return 0;
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
}
static __maybe_unused int venc_runtime_resume(struct device *dev)
@@ -1290,13 +1317,29 @@ static __maybe_unused int venc_runtime_resume(struct device *dev)
struct venus_core *core = dev_get_drvdata(dev);
int ret;
- if (core->res->hfi_version == HFI_VERSION_1XX)
+ if (IS_V1(core))
return 0;
- writel(0, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+ ret = venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, true);
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(core->core1_clk);
- writel(1, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+ if (ret)
+ goto err_power_disable;
+
+ if (IS_V4(core))
+ ret = clk_prepare_enable(core->core1_bus_clk);
+
+ if (ret)
+ goto err_unprepare_core1;
+
+ return venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
+err_unprepare_core1:
+ clk_disable_unprepare(core->core1_clk);
+err_power_disable:
+ venus_helper_power_enable(core, VIDC_SESSION_TYPE_ENC, false);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 21e938a28662..459101728d26 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -101,7 +101,7 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
ctr->profile.h264 = ctrl->val;
break;
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
ctr->profile.vpx = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
@@ -248,6 +248,11 @@ int venc_ctrl_init(struct venus_inst *inst)
V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
0, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
+
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX,
BITRATE_STEP, BITRATE_DEFAULT);
@@ -257,9 +262,6 @@ int venc_ctrl_init(struct venus_inst *inst)
BITRATE_STEP, BITRATE_DEFAULT_PEAK);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_VPX_PROFILE, 0, 3, 1, 0);
-
- v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index 2988031d285d..b47af8eb145a 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rcar-fcp.c -- R-Car Frame Compression Processor Driver
*
* Copyright (C) 2016 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/device.h>
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
index baf4eafff6e3..e3eb8fee2536 100644
--- a/drivers/media/platform/rcar-vin/Kconfig
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config VIDEO_RCAR_CSI2
tristate "R-Car MIPI CSI-2 Receiver"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF
diff --git a/drivers/media/platform/rcar-vin/Makefile b/drivers/media/platform/rcar-vin/Makefile
index 5ab803d3e7c1..00d809f5d2c1 100644
--- a/drivers/media/platform/rcar-vin/Makefile
+++ b/drivers/media/platform/rcar-vin/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rcar-vin-objs = rcar-core.o rcar-dma.o rcar-v4l2.o
obj-$(CONFIG_VIDEO_RCAR_CSI2) += rcar-csi2.o
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index d3072e166a1c..ce09799976ef 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Renesas R-Car VIN
*
@@ -7,11 +8,6 @@
* Copyright (C) 2008 Magnus Damm
*
* Based on the soc-camera rcar_vin driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
@@ -46,6 +42,8 @@
*/
#define rvin_group_id_to_master(vin) ((vin) < 4 ? 0 : 4)
+#define v4l2_dev_to_vin(d) container_of(d, struct rvin_dev, v4l2_dev)
+
/* -----------------------------------------------------------------------------
* Media Controller link notification
*/
@@ -169,9 +167,37 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
/* Add the new link to the existing mask and check if it works. */
csi_id = rvin_group_entity_to_csi_id(group, link->source->entity);
+
+ if (csi_id == -ENODEV) {
+ struct v4l2_subdev *sd;
+ unsigned int i;
+
+ /*
+ * Make sure the source entity subdevice is registered as
+ * a parallel input of one of the enabled VINs if it is not
+ * one of the CSI-2 subdevices.
+ *
+ * No hardware configuration required for parallel inputs,
+ * we can return here.
+ */
+ sd = media_entity_to_v4l2_subdev(link->source->entity);
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (group->vin[i] && group->vin[i]->parallel &&
+ group->vin[i]->parallel->subdev == sd) {
+ group->vin[i]->is_csi = false;
+ ret = 0;
+ goto out;
+ }
+ }
+
+ vin_err(vin, "Subdevice %s not registered to any VIN\n",
+ link->source->entity->name);
+ ret = -ENODEV;
+ goto out;
+ }
+
channel = rvin_group_csi_pad_to_channel(link->source->index);
mask_new = mask & rvin_group_get_mask(vin, csi_id, channel);
-
vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
if (!mask_new) {
@@ -181,6 +207,11 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
/* New valid CHSEL found, set the new value. */
ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
+ if (ret)
+ goto out;
+
+ vin->is_csi = true;
+
out:
mutex_unlock(&group->lock);
@@ -359,8 +390,6 @@ out:
* Async notifier
*/
-#define notifier_to_vin(n) container_of(n, struct rvin_dev, notifier)
-
static int rvin_find_pad(struct v4l2_subdev *sd, int direction)
{
unsigned int pad;
@@ -376,12 +405,12 @@ static int rvin_find_pad(struct v4l2_subdev *sd, int direction)
}
/* -----------------------------------------------------------------------------
- * Digital async notifier
+ * Parallel async notifier
*/
/* The vin lock should be held when calling the subdevice attach and detach */
-static int rvin_digital_subdevice_attach(struct rvin_dev *vin,
- struct v4l2_subdev *subdev)
+static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
+ struct v4l2_subdev *subdev)
{
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -392,15 +421,20 @@ static int rvin_digital_subdevice_attach(struct rvin_dev *vin,
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE);
if (ret < 0)
return ret;
- vin->digital->source_pad = ret;
+ vin->parallel->source_pad = ret;
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK);
- vin->digital->sink_pad = ret < 0 ? 0 : ret;
+ vin->parallel->sink_pad = ret < 0 ? 0 : ret;
+
+ if (vin->info->use_mc) {
+ vin->parallel->subdev = subdev;
+ return 0;
+ }
/* Find compatible subdevices mbus format */
vin->mbus_code = 0;
code.index = 0;
- code.pad = vin->digital->source_pad;
+ code.pad = vin->parallel->source_pad;
while (!vin->mbus_code &&
!v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code)) {
code.index++;
@@ -450,23 +484,27 @@ static int rvin_digital_subdevice_attach(struct rvin_dev *vin,
vin->vdev.ctrl_handler = &vin->ctrl_handler;
- vin->digital->subdev = subdev;
+ vin->parallel->subdev = subdev;
return 0;
}
-static void rvin_digital_subdevice_detach(struct rvin_dev *vin)
+static void rvin_parallel_subdevice_detach(struct rvin_dev *vin)
{
rvin_v4l2_unregister(vin);
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ vin->parallel->subdev = NULL;
- vin->vdev.ctrl_handler = NULL;
- vin->digital->subdev = NULL;
+ if (!vin->info->use_mc) {
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ vin->vdev.ctrl_handler = NULL;
+ }
}
-static int rvin_digital_notify_complete(struct v4l2_async_notifier *notifier)
+static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ struct media_entity *source;
+ struct media_entity *sink;
int ret;
ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
@@ -475,31 +513,50 @@ static int rvin_digital_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
- return rvin_v4l2_register(vin);
+ if (!video_is_registered(&vin->vdev)) {
+ ret = rvin_v4l2_register(vin);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!vin->info->use_mc)
+ return 0;
+
+ /* If we're running with media-controller, link the subdevs. */
+ source = &vin->parallel->subdev->entity;
+ sink = &vin->vdev.entity;
+
+ ret = media_create_pad_link(source, vin->parallel->source_pad,
+ sink, vin->parallel->sink_pad, 0);
+ if (ret)
+ vin_err(vin, "Error adding link from %s to %s: %d\n",
+ source->name, sink->name, ret);
+
+ return ret;
}
-static void rvin_digital_notify_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- vin_dbg(vin, "unbind digital subdev %s\n", subdev->name);
+ vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name);
mutex_lock(&vin->lock);
- rvin_digital_subdevice_detach(vin);
+ rvin_parallel_subdevice_detach(vin);
mutex_unlock(&vin->lock);
}
-static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
int ret;
mutex_lock(&vin->lock);
- ret = rvin_digital_subdevice_attach(vin, subdev);
+ ret = rvin_parallel_subdevice_attach(vin, subdev);
mutex_unlock(&vin->lock);
if (ret)
return ret;
@@ -507,70 +564,71 @@ static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
v4l2_set_subdev_hostdata(subdev, vin);
vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n",
- subdev->name, vin->digital->source_pad,
- vin->digital->sink_pad);
+ subdev->name, vin->parallel->source_pad,
+ vin->parallel->sink_pad);
return 0;
}
-static const struct v4l2_async_notifier_operations rvin_digital_notify_ops = {
- .bound = rvin_digital_notify_bound,
- .unbind = rvin_digital_notify_unbind,
- .complete = rvin_digital_notify_complete,
+static const struct v4l2_async_notifier_operations rvin_parallel_notify_ops = {
+ .bound = rvin_parallel_notify_bound,
+ .unbind = rvin_parallel_notify_unbind,
+ .complete = rvin_parallel_notify_complete,
};
-static int rvin_digital_parse_v4l2(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
+static int rvin_parallel_parse_v4l2(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
- struct rvin_graph_entity *rvge =
- container_of(asd, struct rvin_graph_entity, asd);
+ struct rvin_parallel_entity *rvpe =
+ container_of(asd, struct rvin_parallel_entity, asd);
if (vep->base.port || vep->base.id)
return -ENOTCONN;
- vin->mbus_cfg.type = vep->bus_type;
+ vin->parallel = rvpe;
+ vin->parallel->mbus_type = vep->bus_type;
- switch (vin->mbus_cfg.type) {
+ switch (vin->parallel->mbus_type) {
case V4L2_MBUS_PARALLEL:
vin_dbg(vin, "Found PARALLEL media bus\n");
- vin->mbus_cfg.flags = vep->bus.parallel.flags;
+ vin->parallel->mbus_flags = vep->bus.parallel.flags;
break;
case V4L2_MBUS_BT656:
vin_dbg(vin, "Found BT656 media bus\n");
- vin->mbus_cfg.flags = 0;
+ vin->parallel->mbus_flags = 0;
break;
default:
vin_err(vin, "Unknown media bus type\n");
return -EINVAL;
}
- vin->digital = rvge;
-
return 0;
}
-static int rvin_digital_graph_init(struct rvin_dev *vin)
+static int rvin_parallel_init(struct rvin_dev *vin)
{
int ret;
- ret = v4l2_async_notifier_parse_fwnode_endpoints(
- vin->dev, &vin->notifier,
- sizeof(struct rvin_graph_entity), rvin_digital_parse_v4l2);
+ ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(
+ vin->dev, &vin->notifier, sizeof(struct rvin_parallel_entity),
+ 0, rvin_parallel_parse_v4l2);
if (ret)
return ret;
- if (!vin->digital)
- return -ENODEV;
+ /* If using mc, it's fine not to have any input registered. */
+ if (!vin->parallel)
+ return vin->info->use_mc ? 0 : -ENODEV;
- vin_dbg(vin, "Found digital subdevice %pOF\n",
- to_of_node(vin->digital->asd.match.fwnode));
+ vin_dbg(vin, "Found parallel subdevice %pOF\n",
+ to_of_node(vin->parallel->asd.match.fwnode));
- vin->notifier.ops = &rvin_digital_notify_ops;
+ vin->notifier.ops = &rvin_parallel_notify_ops;
ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
return ret;
}
@@ -583,7 +641,7 @@ static int rvin_digital_graph_init(struct rvin_dev *vin)
static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
const struct rvin_group_route *route;
unsigned int i;
int ret;
@@ -596,7 +654,8 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
/* Register all video nodes for the group. */
for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (vin->group->vin[i]) {
+ if (vin->group->vin[i] &&
+ !video_is_registered(&vin->group->vin[i]->vdev)) {
ret = rvin_v4l2_register(vin->group->vin[i]);
if (ret)
return ret;
@@ -649,7 +708,7 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
unsigned int i;
for (i = 0; i < RCAR_VIN_NUM; i++)
@@ -673,7 +732,7 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
unsigned int i;
mutex_lock(&vin->group->lock);
@@ -707,11 +766,9 @@ static int rvin_mc_parse_of_endpoint(struct device *dev,
return -EINVAL;
if (!of_device_is_available(to_of_node(asd->match.fwnode))) {
-
vin_dbg(vin, "OF device %pOF disabled, ignoring\n",
to_of_node(asd->match.fwnode));
return -ENOTCONN;
-
}
if (vin->group->csi[vep->base.id].fwnode) {
@@ -736,12 +793,6 @@ static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
mutex_lock(&vin->group->lock);
- /* If there already is a notifier something has gone wrong, bail out. */
- if (WARN_ON(vin->group->notifier)) {
- mutex_unlock(&vin->group->lock);
- return -EINVAL;
- }
-
/* If not all VIN's are registered don't register the notifier. */
for (i = 0; i < RCAR_VIN_NUM; i++)
if (vin->group->vin[i])
@@ -753,19 +804,16 @@ static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
}
/*
- * Have all VIN's look for subdevices. Some subdevices will overlap
- * but the parser function can handle it, so each subdevice will
- * only be registered once with the notifier.
+ * Have all VIN's look for CSI-2 subdevices. Some subdevices will
+ * overlap but the parser function can handle it, so each subdevice
+ * will only be registered once with the group notifier.
*/
-
- vin->group->notifier = &vin->notifier;
-
for (i = 0; i < RCAR_VIN_NUM; i++) {
if (!vin->group->vin[i])
continue;
ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(
- vin->group->vin[i]->dev, vin->group->notifier,
+ vin->group->vin[i]->dev, &vin->group->notifier,
sizeof(struct v4l2_async_subdev), 1,
rvin_mc_parse_of_endpoint);
if (ret) {
@@ -776,11 +824,15 @@ static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
mutex_unlock(&vin->group->lock);
- vin->group->notifier->ops = &rvin_group_notify_ops;
+ if (!vin->group->notifier.num_subdevs)
+ return 0;
- ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
+ vin->group->notifier.ops = &rvin_group_notify_ops;
+ ret = v4l2_async_notifier_register(&vin->v4l2_dev,
+ &vin->group->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
return ret;
}
@@ -791,10 +843,6 @@ static int rvin_mc_init(struct rvin_dev *vin)
{
int ret;
- /* All our sources are CSI-2 */
- vin->mbus_cfg.type = V4L2_MBUS_CSI2;
- vin->mbus_cfg.flags = 0;
-
vin->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
if (ret)
@@ -974,7 +1022,51 @@ static const struct rvin_info rcar_info_r8a7796 = {
.routes = rcar_info_r8a7796_routes,
};
-static const struct rvin_group_route _rcar_info_r8a77970_routes[] = {
+static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 2, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 4, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 5, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 6, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 6, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 6, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 7, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 7, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77965 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77965_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77970_routes[] = {
{ .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
{ .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
{ .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(3) },
@@ -990,7 +1082,19 @@ static const struct rvin_info rcar_info_r8a77970 = {
.use_mc = true,
.max_width = 4096,
.max_height = 4096,
- .routes = _rcar_info_r8a77970_routes,
+ .routes = rcar_info_r8a77970_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77995 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77995_routes,
};
static const struct of_device_id rvin_of_id_table[] = {
@@ -1031,9 +1135,17 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a7796,
},
{
+ .compatible = "renesas,vin-r8a77965",
+ .data = &rcar_info_r8a77965,
+ },
+ {
.compatible = "renesas,vin-r8a77970",
.data = &rcar_info_r8a77970,
},
+ {
+ .compatible = "renesas,vin-r8a77995",
+ .data = &rcar_info_r8a77995,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, rvin_of_id_table);
@@ -1085,20 +1197,35 @@ static int rcar_vin_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, vin);
- if (vin->info->use_mc)
+
+ if (vin->info->use_mc) {
ret = rvin_mc_init(vin);
- else
- ret = rvin_digital_graph_init(vin);
- if (ret < 0)
- goto error;
+ if (ret)
+ goto error_dma_unregister;
+ }
+
+ ret = rvin_parallel_init(vin);
+ if (ret)
+ goto error_group_unregister;
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
return 0;
-error:
+
+error_group_unregister:
+ if (vin->info->use_mc) {
+ mutex_lock(&vin->group->lock);
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_notifier_unregister(&vin->group->notifier);
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
+ }
+ mutex_unlock(&vin->group->lock);
+ rvin_group_put(vin);
+ }
+
+error_dma_unregister:
rvin_dma_unregister(vin);
- v4l2_async_notifier_cleanup(&vin->notifier);
return ret;
}
@@ -1116,8 +1243,10 @@ static int rcar_vin_remove(struct platform_device *pdev)
if (vin->info->use_mc) {
mutex_lock(&vin->group->lock);
- if (vin->group->notifier == &vin->notifier)
- vin->group->notifier = NULL;
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_notifier_unregister(&vin->group->notifier);
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
+ }
mutex_unlock(&vin->group->lock);
rvin_group_put(vin);
} else {
@@ -1142,4 +1271,4 @@ module_platform_driver(rcar_vin_driver);
MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index daef72d410a3..dc5ae8025832 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -339,6 +339,7 @@ enum rcar_csi2_pads {
struct rcar_csi2_info {
int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps);
+ int (*confirm_start)(struct rcar_csi2 *priv);
const struct rcsi2_mbps_reg *hsfreqrange;
unsigned int csi0clkfreqrange;
bool clear_ulps;
@@ -545,6 +546,13 @@ static int rcsi2_start(struct rcar_csi2 *priv)
if (ret)
return ret;
+ /* Confirm start */
+ if (priv->info->confirm_start) {
+ ret = priv->info->confirm_start(priv);
+ if (ret)
+ return ret;
+ }
+
/* Clear Ultra Low Power interrupt. */
if (priv->info->clear_ulps)
rcsi2_write(priv, INTSTATE_REG,
@@ -881,6 +889,11 @@ static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
{
+ return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
+}
+
+static int rcsi2_confirm_start_v3m_e3(struct rcar_csi2 *priv)
+{
static const struct phtw_value step1[] = {
{ .data = 0xed, .code = 0x34 },
{ .data = 0xed, .code = 0x44 },
@@ -890,12 +903,6 @@ static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
{ /* sentinel */ },
};
- int ret;
-
- ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
- if (ret)
- return ret;
-
return rcsi2_phtw_write_array(priv, step1);
}
@@ -949,6 +956,7 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {
static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = {
.init_phtw = rcsi2_init_phtw_v3m_e3,
+ .confirm_start = rcsi2_confirm_start_v3m_e3,
};
static const struct of_device_id rcar_csi2_of_table[] = {
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index ac07f99e3516..92323310f735 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Renesas R-Car VIN
*
@@ -7,11 +8,6 @@
* Copyright (C) 2008 Magnus Damm
*
* Based on the soc-camera rcar_vin driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/delay.h>
@@ -123,6 +119,7 @@
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
#define VNDMR2_HPS (1 << 29)
+#define VNDMR2_CES (1 << 28)
#define VNDMR2_FTEV (1 << 17)
#define VNDMR2_VLV(n) ((n & 0xf) << 12)
@@ -659,8 +656,12 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
- vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
- VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
+ if (!vin->is_csi &&
+ vin->parallel->mbus_type == V4L2_MBUS_BT656)
+ vnmc |= VNMC_INF_YUV8_BT656;
+ else
+ vnmc |= VNMC_INF_YUV8_BT601;
+
input_is_yuv = true;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
@@ -668,8 +669,12 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY10_2X10:
/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
- vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
- VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
+ if (!vin->is_csi &&
+ vin->parallel->mbus_type == V4L2_MBUS_BT656)
+ vnmc |= VNMC_INF_YUV10_BT656;
+ else
+ vnmc |= VNMC_INF_YUV10_BT601;
+
input_is_yuv = true;
break;
default:
@@ -682,13 +687,19 @@ static int rvin_setup(struct rvin_dev *vin)
else
dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
- /* Hsync Signal Polarity Select */
- if (!(vin->mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
- dmr2 |= VNDMR2_HPS;
+ if (!vin->is_csi) {
+ /* Hsync Signal Polarity Select */
+ if (!(vin->parallel->mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_HPS;
+
+ /* Vsync Signal Polarity Select */
+ if (!(vin->parallel->mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_VPS;
- /* Vsync Signal Polarity Select */
- if (!(vin->mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
- dmr2 |= VNDMR2_VPS;
+ /* Data Enable Polarity Select */
+ if (vin->parallel->mbus_flags & V4L2_MBUS_DATA_ENABLE_LOW)
+ dmr2 |= VNDMR2_CES;
+ }
/*
* Output format
@@ -733,8 +744,8 @@ static int rvin_setup(struct rvin_dev *vin)
vnmc |= VNMC_BPS;
if (vin->info->model == RCAR_GEN3) {
- /* Select between CSI-2 and Digital input */
- if (vin->mbus_cfg.type == V4L2_MBUS_CSI2)
+ /* Select between CSI-2 and parallel input */
+ if (vin->is_csi)
vnmc &= ~VNMC_DPINE;
else
vnmc |= VNMC_DPINE;
@@ -856,7 +867,7 @@ static int rvin_capture_start(struct rvin_dev *vin)
/* Continuous Frame Capture Mode */
rvin_write(vin, VNFC_C_FRAME, VNFC_REG);
- vin->state = RUNNING;
+ vin->state = STARTING;
return 0;
}
@@ -910,6 +921,20 @@ static irqreturn_t rvin_irq(int irq, void *data)
vnms = rvin_read(vin, VNMS_REG);
slot = (vnms & VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
+ /*
+ * To hand buffers back in a known order to userspace start
+ * to capture first from slot 0.
+ */
+ if (vin->state == STARTING) {
+ if (slot != 0) {
+ vin_dbg(vin, "Starting sync slot: %d\n", slot);
+ goto done;
+ }
+
+ vin_dbg(vin, "Capture start synced!\n");
+ vin->state = RUNNING;
+ }
+
/* Capture frame */
if (vin->queue_buf[slot]) {
vin->queue_buf[slot]->field = vin->format.field;
@@ -1074,7 +1099,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
/* No media controller used, simply pass operation to subdevice. */
if (!vin->info->use_mc) {
- ret = v4l2_subdev_call(vin->digital->subdev, video, s_stream,
+ ret = v4l2_subdev_call(vin->parallel->subdev, video, s_stream,
on);
return ret == -ENOIOCTLCMD ? 0 : ret;
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index e78fba84d590..5a54779cfc27 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Renesas R-Car VIN
*
@@ -7,11 +8,6 @@
* Copyright (C) 2008 Magnus Damm
*
* Based on the soc-camera rcar_vin driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/pm_runtime.h>
@@ -144,7 +140,7 @@ static int rvin_reset_format(struct rvin_dev *vin)
{
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .pad = vin->digital->source_pad,
+ .pad = vin->parallel->source_pad,
};
int ret;
@@ -175,7 +171,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
struct v4l2_subdev_pad_config *pad_cfg;
struct v4l2_subdev_format format = {
.which = which,
- .pad = vin->digital->source_pad,
+ .pad = vin->parallel->source_pad,
};
enum v4l2_field field;
u32 width, height;
@@ -517,7 +513,7 @@ static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
if (timings->pad)
return -EINVAL;
- timings->pad = vin->digital->sink_pad;
+ timings->pad = vin->parallel->sink_pad;
ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
@@ -569,7 +565,7 @@ static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
if (cap->pad)
return -EINVAL;
- cap->pad = vin->digital->sink_pad;
+ cap->pad = vin->parallel->sink_pad;
ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
@@ -587,7 +583,7 @@ static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital->sink_pad;
+ edid->pad = vin->parallel->sink_pad;
ret = v4l2_subdev_call(sd, pad, get_edid, edid);
@@ -605,7 +601,7 @@ static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital->sink_pad;
+ edid->pad = vin->parallel->sink_pad;
ret = v4l2_subdev_call(sd, pad, set_edid, edid);
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index c2aef789b491..0b13b34d03e3 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for Renesas R-Car VIN
*
@@ -7,11 +8,6 @@
* Copyright (C) 2008 Magnus Damm
*
* Based on the soc-camera rcar_vin driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __RCAR_VIN__
@@ -53,11 +49,13 @@ enum rvin_csi_id {
/**
* STOPPED - No operation in progress
+ * STARTING - Capture starting up
* RUNNING - Operation in progress have buffers
* STOPPING - Stopping operation
*/
enum rvin_dma_state {
STOPPED = 0,
+ STARTING,
RUNNING,
STOPPING,
};
@@ -73,16 +71,22 @@ struct rvin_video_format {
};
/**
- * struct rvin_graph_entity - Video endpoint from async framework
+ * struct rvin_parallel_entity - Parallel video input endpoint descriptor
* @asd: sub-device descriptor for async framework
* @subdev: subdevice matched using async framework
+ * @mbus_type: media bus type
+ * @mbus_flags: media bus configuration flags
* @source_pad: source pad of remote subdevice
* @sink_pad: sink pad of remote subdevice
+ *
*/
-struct rvin_graph_entity {
+struct rvin_parallel_entity {
struct v4l2_async_subdev asd;
struct v4l2_subdev *subdev;
+ enum v4l2_mbus_type mbus_type;
+ unsigned int mbus_flags;
+
unsigned int source_pad;
unsigned int sink_pad;
};
@@ -146,7 +150,8 @@ struct rvin_info {
* @v4l2_dev: V4L2 device
* @ctrl_handler: V4L2 control handler
* @notifier: V4L2 asynchronous subdevs notifier
- * @digital: entity in the DT for local digital subdevice
+ *
+ * @parallel: parallel input subdevice descriptor
*
* @group: Gen3 CSI group
* @id: Gen3 group id for this VIN
@@ -164,7 +169,8 @@ struct rvin_info {
* @sequence: V4L2 buffers sequence number
* @state: keeps track of operation state
*
- * @mbus_cfg: media bus configuration from DT
+ * @is_csi: flag to mark the VIN as using a CSI-2 subdevice
+ *
* @mbus_code: media bus format code
* @format: active V4L2 pixel format
*
@@ -182,7 +188,8 @@ struct rvin_dev {
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
- struct rvin_graph_entity *digital;
+
+ struct rvin_parallel_entity *parallel;
struct rvin_group *group;
unsigned int id;
@@ -199,7 +206,8 @@ struct rvin_dev {
unsigned int sequence;
enum rvin_dma_state state;
- struct v4l2_mbus_config mbus_cfg;
+ bool is_csi;
+
u32 mbus_code;
struct v4l2_pix_format format;
@@ -209,7 +217,7 @@ struct rvin_dev {
v4l2_std_id std;
};
-#define vin_to_source(vin) ((vin)->digital->subdev)
+#define vin_to_source(vin) ((vin)->parallel->subdev)
/* Debug */
#define vin_dbg(d, fmt, arg...) dev_dbg(d->dev, fmt, ##arg)
@@ -225,8 +233,7 @@ struct rvin_dev {
*
* @lock: protects the count, notifier, vin and csi members
* @count: number of enabled VIN instances found in DT
- * @notifier: pointer to the notifier of a VIN which handles the
- * groups async sub-devices.
+ * @notifier: group notifier for CSI-2 async subdevices
* @vin: VIN instances which are part of the group
* @csi: array of pairs of fwnode and subdev pointers
* to all CSI-2 subdevices.
@@ -238,7 +245,7 @@ struct rvin_group {
struct mutex lock;
unsigned int count;
- struct v4l2_async_notifier *notifier;
+ struct v4l2_async_notifier notifier;
struct rvin_dev *vin[RCAR_VIN_NUM];
struct {
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index dc7e280c91b4..81413ab52475 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Gen3 Digital Radio Interface (DRIF) driver
*
* Copyright (C) 2017 Renesas Electronics Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@@ -1499,5 +1495,5 @@ module_platform_driver(rcar_drif_driver);
MODULE_DESCRIPTION("Renesas R-Car Gen3 DRIF driver");
MODULE_ALIAS("platform:" RCAR_DRIF_DRV_NAME);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>");
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index b13dec3081e5..2a15b7cca338 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas R-Car Fine Display Processor
*
@@ -8,11 +9,6 @@
*
* This code is developed and inspired from the vim2m, rcar_jpu,
* m2m-deinterlace, and vsp1 drivers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version
*/
#include <linux/clk.h>
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 8b44a849ab41..e5c882423f57 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author: Mikhail Ulyanov
* Copyright (C) 2014-2015 Cogent Embedded, Inc. <source@cogentembedded.com>
@@ -11,10 +12,6 @@
* 1) Rotation
* 2) Cropping
* 3) V4L2_CID_JPEG_ACTIVE_MARKER
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <asm/unaligned.h>
@@ -198,7 +195,6 @@
* @vfd_decoder: video device node for decoder mem2mem mode
* @m2m_dev: v4l2 mem2mem device data
* @curr: pointer to current context
- * @irq_queue: interrupt handler waitqueue
* @regs: JPEG IP registers mapping
* @irq: JPEG IP irq
* @clk: JPEG IP clock
@@ -213,7 +209,6 @@ struct jpu {
struct video_device vfd_decoder;
struct v4l2_m2m_dev *m2m_dev;
struct jpu_ctx *curr;
- wait_queue_head_t irq_queue;
void __iomem *regs;
unsigned int irq;
@@ -1494,24 +1489,8 @@ static void jpu_device_run(void *priv)
spin_unlock_irqrestore(&ctx->jpu->lock, flags);
}
-static int jpu_job_ready(void *priv)
-{
- return 1;
-}
-
-static void jpu_job_abort(void *priv)
-{
- struct jpu_ctx *ctx = priv;
-
- if (!wait_event_timeout(ctx->jpu->irq_queue, !ctx->jpu->curr,
- msecs_to_jiffies(JPU_JOB_TIMEOUT)))
- jpu_cleanup(ctx, true);
-}
-
static const struct v4l2_m2m_ops jpu_m2m_ops = {
.device_run = jpu_device_run,
- .job_ready = jpu_job_ready,
- .job_abort = jpu_job_abort,
};
/*
@@ -1592,9 +1571,6 @@ static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
v4l2_m2m_job_finish(jpu->m2m_dev, curr_ctx->fh.m2m_ctx);
- /* ...wakeup abort routine if needed */
- wake_up(&jpu->irq_queue);
-
return IRQ_HANDLED;
handled:
@@ -1628,7 +1604,6 @@ static int jpu_probe(struct platform_device *pdev)
if (!jpu)
return -ENOMEM;
- init_waitqueue_head(&jpu->irq_queue);
mutex_init(&jpu->mutex);
spin_lock_init(&jpu->lock);
jpu->dev = &pdev->dev;
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index fe4fe944592d..ad782901cd7a 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -254,6 +254,18 @@ static const struct ceu_fmt ceu_fmt_list[] = {
.fourcc = V4L2_PIX_FMT_YUYV,
.bpp = 16,
},
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .bpp = 16,
+ },
};
static const struct ceu_fmt *get_ceu_fmt_from_fourcc(unsigned int fourcc)
@@ -272,6 +284,9 @@ static bool ceu_fmt_mplane(struct v4l2_pix_format_mplane *pix)
{
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
return false;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
@@ -380,7 +395,9 @@ static int ceu_hw_config(struct ceu_device *ceudev)
switch (pix->pixelformat) {
/* Data fetch sync mode */
case V4L2_PIX_FMT_YUYV:
- /* TODO: handle YUYV permutations through DTARY bits. */
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
camcr = CEU_CAMCR_JPEG;
cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
cfzsr = (pix->height << 16) | pix->width;
@@ -568,6 +585,9 @@ static void ceu_calc_plane_sizes(struct ceu_device *ceudev,
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
pix->num_planes = 1;
bpl = pix->width * ceu_fmt->bpp / 8;
szimage = pix->height * bpl;
@@ -762,42 +782,59 @@ static const struct vb2_ops ceu_vb2_ops = {
/* --- CEU image formats handling --- */
/*
- * ceu_try_fmt() - test format on CEU and sensor
+ * __ceu_try_fmt() - test format on CEU and sensor
* @ceudev: The CEU device.
* @v4l2_fmt: format to test.
+ * @sd_mbus_code: the media bus code accepted by the subdevice; output param.
*
* Returns 0 for success, < 0 for errors.
*/
-static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
+static int __ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt,
+ u32 *sd_mbus_code)
{
struct ceu_subdev *ceu_sd = ceudev->sd;
struct v4l2_pix_format_mplane *pix = &v4l2_fmt->fmt.pix_mp;
struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
struct v4l2_subdev_pad_config pad_cfg;
const struct ceu_fmt *ceu_fmt;
+ u32 mbus_code_old;
+ u32 mbus_code;
int ret;
/*
* Set format on sensor sub device: bus format used to produce memory
- * format is selected at initialization time.
+ * format is selected depending on YUV component ordering or
+ * at initialization time.
*/
struct v4l2_subdev_format sd_format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
- .format = {
- .code = ceu_sd->mbus_fmt.mbus_code,
- },
};
+ mbus_code_old = ceu_sd->mbus_fmt.mbus_code;
+
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUYV:
+ mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mbus_code = MEDIA_BUS_FMT_UYVY8_2X8;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ mbus_code = MEDIA_BUS_FMT_YVYU8_2X8;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ mbus_code = MEDIA_BUS_FMT_VYUY8_2X8;
+ break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
+ mbus_code = ceu_sd->mbus_fmt.mbus_code;
break;
default:
pix->pixelformat = V4L2_PIX_FMT_NV16;
+ mbus_code = ceu_sd->mbus_fmt.mbus_code;
break;
}
@@ -808,9 +845,25 @@ static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
&pix->height, 4, CEU_MAX_HEIGHT, 4, 0);
v4l2_fill_mbus_format_mplane(&sd_format.format, pix);
+
+ /*
+ * Try with the mbus_code matching YUYV components ordering first,
+ * if that one fails, fallback to default selected at initialization
+ * time.
+ */
+ sd_format.format.code = mbus_code;
ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_cfg, &sd_format);
- if (ret)
- return ret;
+ if (ret) {
+ if (ret == -EINVAL) {
+ /* fallback */
+ sd_format.format.code = mbus_code_old;
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt,
+ &pad_cfg, &sd_format);
+ }
+
+ if (ret)
+ return ret;
+ }
/* Apply size returned by sensor as the CEU can't scale. */
v4l2_fill_pix_format_mplane(pix, &sd_format.format);
@@ -818,16 +871,30 @@ static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
/* Calculate per-plane sizes based on image format. */
ceu_calc_plane_sizes(ceudev, ceu_fmt, pix);
+ /* Report to caller the configured mbus format. */
+ *sd_mbus_code = sd_format.format.code;
+
return 0;
}
/*
+ * ceu_try_fmt() - Wrapper for __ceu_try_fmt; discard configured mbus_fmt
+ */
+static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
+{
+ u32 mbus_code;
+
+ return __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code);
+}
+
+/*
* ceu_set_fmt() - Apply the supplied format to both sensor and CEU
*/
static int ceu_set_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
{
struct ceu_subdev *ceu_sd = ceudev->sd;
struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ u32 mbus_code;
int ret;
/*
@@ -836,15 +903,13 @@ static int ceu_set_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
*/
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .format = {
- .code = ceu_sd->mbus_fmt.mbus_code,
- },
};
- ret = ceu_try_fmt(ceudev, v4l2_fmt);
+ ret = __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code);
if (ret)
return ret;
+ format.format.code = mbus_code;
v4l2_fill_mbus_format_mplane(&format.format, &v4l2_fmt->fmt.pix_mp);
ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, NULL, &format);
if (ret)
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index fa1ba98c96dc..356821c2dacf 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -64,43 +64,44 @@ static void rga_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
+static void rga_buf_return_buffers(struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
+
static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct rga_ctx *ctx = vb2_get_drv_priv(q);
struct rockchip_rga *rga = ctx->rga;
- int ret, i;
+ int ret;
ret = pm_runtime_get_sync(rga->dev);
-
- if (!ret)
- return 0;
-
- for (i = 0; i < q->num_buffers; ++i) {
- if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
- VB2_BUF_STATE_QUEUED);
- }
+ if (ret < 0) {
+ rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
+ return ret;
}
- return ret;
+ return 0;
}
static void rga_buf_stop_streaming(struct vb2_queue *q)
{
struct rga_ctx *ctx = vb2_get_drv_priv(q);
struct rockchip_rga *rga = ctx->rga;
- struct vb2_v4l2_buffer *vbuf;
-
- for (;;) {
- if (V4L2_TYPE_IS_OUTPUT(q->type))
- vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- else
- vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- if (!vbuf)
- break;
- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
- }
+ rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR);
pm_runtime_put(rga->dev);
}
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index d508a8ba6f89..ab5a6f95044a 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -39,18 +39,6 @@
static int debug;
module_param(debug, int, 0644);
-static void job_abort(void *prv)
-{
- struct rga_ctx *ctx = prv;
- struct rockchip_rga *rga = ctx->rga;
-
- if (!rga->curr) /* No job currently running */
- return;
-
- wait_event_timeout(rga->irq_queue,
- !rga->curr, msecs_to_jiffies(RGA_TIMEOUT));
-}
-
static void device_run(void *prv)
{
struct rga_ctx *ctx = prv;
@@ -104,8 +92,6 @@ static irqreturn_t rga_isr(int irq, void *prv)
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
v4l2_m2m_job_finish(rga->m2m_dev, ctx->fh.m2m_ctx);
-
- wake_up(&rga->irq_queue);
}
return IRQ_HANDLED;
@@ -113,7 +99,6 @@ static irqreturn_t rga_isr(int irq, void *prv)
static struct v4l2_m2m_ops rga_m2m_ops = {
.device_run = device_run,
- .job_abort = job_abort,
};
static int
@@ -838,8 +823,6 @@ static int rga_probe(struct platform_device *pdev)
spin_lock_init(&rga->ctrl_lock);
mutex_init(&rga->mutex);
- init_waitqueue_head(&rga->irq_queue);
-
ret = rga_parse_dt(rga);
if (ret)
dev_err(&pdev->dev, "Unable to parse OF data\n");
@@ -882,7 +865,6 @@ static int rga_probe(struct platform_device *pdev)
vfd->v4l2_dev = &rga->v4l2_dev;
video_set_drvdata(vfd, rga);
- snprintf(vfd->name, sizeof(vfd->name), "%s", rga_videodev.name);
rga->vfd = vfd;
platform_set_drvdata(pdev, rga);
@@ -943,7 +925,7 @@ static int rga_remove(struct platform_device *pdev)
{
struct rockchip_rga *rga = platform_get_drvdata(pdev);
- dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, &rga->cmdbuf_virt,
+ dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
free_pages((unsigned long)rga->src_mmu_pages, 3);
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
index 5d43e7ea88af..72d8a159fa7b 100644
--- a/drivers/media/platform/rockchip/rga/rga.h
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -86,8 +86,6 @@ struct rockchip_rga {
/* ctrl parm lock */
spinlock_t ctrl_lock;
- wait_queue_head_t irq_queue;
-
struct rga_ctx *curr;
dma_addr_t cmdbuf_phy;
void *cmdbuf_virt;
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 9ab8e7ee2e1e..c02dce8b4c6c 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -117,6 +117,8 @@ static int sensor_set_power(struct camif_dev *camif, int on)
if (camif->sensor.power_count == !on)
err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+ if (err == -ENOIOCTLCMD)
+ err = 0;
if (!err)
sensor->power_count += on ? 1 : -1;
@@ -599,7 +601,7 @@ static __poll_t s3c_camif_poll(struct file *file,
mutex_lock(&camif->lock);
if (vp->owner && vp->owner != file->private_data)
- ret = -EBUSY;
+ ret = EPOLLERR;
else
ret = vb2_poll(&vp->vb_queue, file, wait);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 66aa8cf1d048..e901201b6fcc 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -142,6 +142,8 @@ static const struct vb2_ops g2d_qops = {
.queue_setup = g2d_queue_setup,
.buf_prepare = g2d_buf_prepare,
.buf_queue = g2d_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
@@ -481,19 +483,6 @@ static int vidioc_s_crop(struct file *file, void *prv, const struct v4l2_crop *c
return 0;
}
-static void job_abort(void *prv)
-{
- struct g2d_ctx *ctx = prv;
- struct g2d_dev *dev = ctx->dev;
-
- if (dev->curr == NULL) /* No job currently running */
- return;
-
- wait_event_timeout(dev->irq_queue,
- dev->curr == NULL,
- msecs_to_jiffies(G2D_TIMEOUT));
-}
-
static void device_run(void *prv)
{
struct g2d_ctx *ctx = prv;
@@ -563,7 +552,6 @@ static irqreturn_t g2d_isr(int irq, void *prv)
v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
dev->curr = NULL;
- wake_up(&dev->irq_queue);
return IRQ_HANDLED;
}
@@ -613,7 +601,6 @@ static const struct video_device g2d_videodev = {
static const struct v4l2_m2m_ops g2d_m2m_ops = {
.device_run = device_run,
- .job_abort = job_abort,
};
static const struct of_device_id exynos_g2d_match[];
@@ -633,7 +620,6 @@ static int g2d_probe(struct platform_device *pdev)
spin_lock_init(&dev->ctrl_lock);
mutex_init(&dev->mutex);
atomic_set(&dev->num_inst, 0);
- init_waitqueue_head(&dev->irq_queue);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -702,7 +688,6 @@ static int g2d_probe(struct platform_device *pdev)
goto rel_vdev;
}
video_set_drvdata(vfd, dev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name);
dev->vfd = vfd;
v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
vfd->num);
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index dd812b557e87..9ffb458a1b93 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -31,7 +31,6 @@ struct g2d_dev {
struct g2d_ctx *curr;
struct g2d_variant *variant;
int irq;
- wait_queue_head_t irq_queue;
};
struct g2d_frame {
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 79b63da27f53..04fd2e0493c0 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2467,26 +2467,19 @@ static int s5p_jpeg_job_ready(void *priv)
return 1;
}
-static void s5p_jpeg_job_abort(void *priv)
-{
-}
-
static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
.device_run = s5p_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
- .job_abort = s5p_jpeg_job_abort,
};
static struct v4l2_m2m_ops exynos3250_jpeg_m2m_ops = {
.device_run = exynos3250_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
- .job_abort = s5p_jpeg_job_abort,
};
static struct v4l2_m2m_ops exynos4_jpeg_m2m_ops = {
.device_run = exynos4_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
- .job_abort = s5p_jpeg_job_abort,
};
/*
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index a80251ed3143..927a1235408d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_buf *dst_buf, *src_buf;
- size_t dec_y_addr;
+ struct s5p_mfc_buf *dst_buf, *src_buf;
+ u32 dec_y_addr;
unsigned int frame_type;
/* Make sure we actually have a new frame before continuing. */
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
return;
- dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
+ dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
/* Copy timestamp / timecode from decoded src to dst and set
appropriate flags. */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
- == dec_y_addr) {
- dst_buf->b->timecode =
- src_buf->b->timecode;
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
+ if (addr == dec_y_addr) {
+ dst_buf->b->timecode = src_buf->b->timecode;
dst_buf->b->vb2_buf.timestamp =
src_buf->b->vb2_buf.timestamp;
dst_buf->b->flags &=
@@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *dst_buf;
- size_t dspl_y_addr;
+ u32 dspl_y_addr;
unsigned int frame_type;
- dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
+ dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
if (IS_MFCV6_PLUS(dev))
frame_type = s5p_mfc_hw_call(dev->mfc_ops,
get_disp_frame_type, ctx);
@@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
/* The MFC returns address of the buffer, now we have to
* check which videobuf does it correspond to */
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
/* Check if this is the buffer we're looking for */
- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
- == dspl_y_addr) {
+ if (addr == dspl_y_addr) {
list_del(&dst_buf->list);
ctx->dst_queue_cnt--;
dst_buf->b->sequence = ctx->sequence;
@@ -1449,8 +1450,7 @@ static int s5p_mfc_remove(struct platform_device *pdev)
static int s5p_mfc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+ struct s5p_mfc_dev *m_dev = dev_get_drvdata(dev);
int ret;
if (m_dev->num_inst == 0)
@@ -1484,8 +1484,7 @@ static int s5p_mfc_suspend(struct device *dev)
static int s5p_mfc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+ struct s5p_mfc_dev *m_dev = dev_get_drvdata(dev);
if (m_dev->num_inst == 0)
return 0;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 570f391f2cfd..3ad4f5073002 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -692,12 +692,12 @@ static struct mfc_control controls[] = {
.default_value = 10,
},
{
- .id = V4L2_CID_MPEG_VIDEO_VPX_PROFILE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
+ .maximum = V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ .default_value = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
+ .menu_skip_mask = 0,
},
{
.id = V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
@@ -2057,7 +2057,7 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:
p->codec.vp8.rc_p_frame_qp = ctrl->val;
break;
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
p->codec.vp8.profile = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
@@ -2711,4 +2711,3 @@ void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx)
f.fmt.pix_mp.pixelformat = DEF_DST_FMT_ENC;
ctx->dst_fmt = find_format(&f, MFC_FMT_ENC);
}
-
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 1a0cde017fdf..1d274c64de09 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sh-mobile VEU mem2mem driver
*
* Copyright (C) 2012 Renesas Electronics Corporation
* Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
* Copyright (C) 2008 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License as
- * published by the Free Software Foundation
*/
#include <linux/err.h>
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 4dccf29e9d78..6135e13e24d4 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Video Output Unit (VOU) driver
*
* Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/dma-mapping.h>
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 9897213f2618..0a2c0daaffef 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Driver for SuperH Mobile CEU interface
*
@@ -7,11 +8,6 @@
*
* Copyright (C) 2006, Sascha Hauer, Pengutronix
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/init.h>
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
index ce00e90d4e3c..6745a6e3f464 100644
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic Platform Camera Driver
*
* Copyright (C) 2008 Magnus Damm
* Based on mt9m001 driver,
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/init.h>
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
index 232d508c5b66..0b42acd4e3a6 100644
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -339,22 +339,6 @@ static void register_decoders(struct delta_dev *delta)
}
}
-static void delta_lock(void *priv)
-{
- struct delta_ctx *ctx = priv;
- struct delta_dev *delta = ctx->dev;
-
- mutex_lock(&delta->lock);
-}
-
-static void delta_unlock(void *priv)
-{
- struct delta_ctx *ctx = priv;
- struct delta_dev *delta = ctx->dev;
-
- mutex_unlock(&delta->lock);
-}
-
static int delta_open_decoder(struct delta_ctx *ctx, u32 streamformat,
u32 pixelformat, const struct delta_dec **pdec)
{
@@ -1099,8 +1083,6 @@ static const struct v4l2_m2m_ops delta_m2m_ops = {
.device_run = delta_device_run,
.job_ready = delta_job_ready,
.job_abort = delta_job_abort,
- .lock = delta_lock,
- .unlock = delta_unlock,
};
/*
diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c
index 15080cb00fa7..5a807c7c5e79 100644
--- a/drivers/media/platform/sti/hva/hva-v4l2.c
+++ b/drivers/media/platform/sti/hva/hva-v4l2.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <media/v4l2-event.h>
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 2e1933d872ee..721564176d8c 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -22,7 +22,9 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/videodev2.h>
@@ -84,19 +86,14 @@
enum state {
STOPPED = 0,
+ WAIT_FOR_BUFFER,
RUNNING,
- STOPPING,
};
#define MIN_WIDTH 16U
-#define MAX_WIDTH 2048U
+#define MAX_WIDTH 2592U
#define MIN_HEIGHT 16U
-#define MAX_HEIGHT 2048U
-
-#define MIN_JPEG_WIDTH 16U
-#define MAX_JPEG_WIDTH 2592U
-#define MIN_JPEG_HEIGHT 16U
-#define MAX_JPEG_HEIGHT 2592U
+#define MAX_HEIGHT 2592U
#define TIMEOUT_MS 1000
@@ -194,7 +191,7 @@ static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
reg_write(base, reg, reg_read(base, reg) & ~mask);
}
-static int dcmi_start_capture(struct stm32_dcmi *dcmi);
+static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf);
static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
struct dcmi_buf *buf,
@@ -206,6 +203,8 @@ static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
if (!buf)
return;
+ list_del_init(&buf->list);
+
vbuf = &buf->vb;
vbuf->sequence = dcmi->sequence++;
@@ -223,6 +222,8 @@ static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
{
+ struct dcmi_buf *buf;
+
spin_lock_irq(&dcmi->irqlock);
if (dcmi->state != RUNNING) {
@@ -232,34 +233,30 @@ static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
/* Restart a new DMA transfer with next buffer */
if (list_empty(&dcmi->buffers)) {
- dev_err(dcmi->dev, "%s: No more buffer queued, cannot capture buffer\n",
- __func__);
- dcmi->errors_count++;
- dcmi->active = NULL;
-
+ dev_dbg(dcmi->dev, "Capture restart is deferred to next buffer queueing\n");
+ dcmi->state = WAIT_FOR_BUFFER;
spin_unlock_irq(&dcmi->irqlock);
- return -EINVAL;
+ return 0;
}
-
- dcmi->active = list_entry(dcmi->buffers.next,
- struct dcmi_buf, list);
- list_del_init(&dcmi->active->list);
+ buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
+ dcmi->active = buf;
spin_unlock_irq(&dcmi->irqlock);
- return dcmi_start_capture(dcmi);
+ return dcmi_start_capture(dcmi, buf);
}
static void dcmi_dma_callback(void *param)
{
struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
- struct dma_chan *chan = dcmi->dma_chan;
struct dma_tx_state state;
enum dma_status status;
struct dcmi_buf *buf = dcmi->active;
+ spin_lock_irq(&dcmi->irqlock);
+
/* Check DMA status */
- status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
+ status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
switch (status) {
case DMA_IN_PROGRESS:
@@ -270,6 +267,9 @@ static void dcmi_dma_callback(void *param)
break;
case DMA_ERROR:
dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
+
+ /* Return buffer to V4L2 in error state */
+ dcmi_buffer_done(dcmi, buf, 0, -EIO);
break;
case DMA_COMPLETE:
dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
@@ -277,15 +277,19 @@ static void dcmi_dma_callback(void *param)
/* Return buffer to V4L2 */
dcmi_buffer_done(dcmi, buf, buf->size, 0);
+ spin_unlock_irq(&dcmi->irqlock);
+
/* Restart capture */
if (dcmi_restart_capture(dcmi))
dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n",
__func__);
- break;
+ return;
default:
dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
break;
}
+
+ spin_unlock_irq(&dcmi->irqlock);
}
static int dcmi_start_dma(struct stm32_dcmi *dcmi,
@@ -313,10 +317,11 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
/* Prepare a DMA transaction */
desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
buf->size,
- DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
if (!desc) {
- dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer size %zu\n",
- __func__, buf->size);
+ dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n",
+ __func__, &buf->paddr, buf->size);
return -EINVAL;
}
@@ -336,10 +341,9 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi,
return 0;
}
-static int dcmi_start_capture(struct stm32_dcmi *dcmi)
+static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf)
{
int ret;
- struct dcmi_buf *buf = dcmi->active;
if (!buf)
return -EINVAL;
@@ -382,7 +386,6 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
{
struct dma_tx_state state;
enum dma_status status;
- struct dma_chan *chan = dcmi->dma_chan;
struct dcmi_buf *buf = dcmi->active;
if (!buf)
@@ -390,8 +393,7 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
/*
* Because of variable JPEG buffer size sent by sensor,
- * DMA transfer never completes due to transfer size
- * never reached.
+ * DMA transfer never completes due to transfer size never reached.
* In order to ensure that all the JPEG data are transferred
* in active buffer memory, DMA is drained.
* Then DMA tx status gives the amount of data transferred
@@ -400,10 +402,10 @@ static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
*/
/* Drain DMA */
- dmaengine_synchronize(chan);
+ dmaengine_synchronize(dcmi->dma_chan);
/* Get DMA residue to get JPEG size */
- status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
+ status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
if (status != DMA_ERROR && state.residue < buf->size) {
/* Return JPEG buffer to V4L2 with received JPEG buffer size */
dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0);
@@ -430,18 +432,6 @@ static irqreturn_t dcmi_irq_thread(int irq, void *arg)
spin_lock_irq(&dcmi->irqlock);
- /* Stop capture is required */
- if (dcmi->state == STOPPING) {
- reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
-
- dcmi->state = STOPPED;
-
- complete(&dcmi->complete);
-
- spin_unlock_irq(&dcmi->irqlock);
- return IRQ_HANDLED;
- }
-
if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) {
dcmi->errors_count++;
if (dcmi->misr & IT_OVR)
@@ -495,8 +485,6 @@ static int dcmi_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- dcmi->active = NULL;
-
dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n",
*nbuffers, size);
@@ -554,21 +542,24 @@ static void dcmi_buf_queue(struct vb2_buffer *vb)
spin_lock_irq(&dcmi->irqlock);
- if (dcmi->state == RUNNING && !dcmi->active) {
+ /* Enqueue to video buffers list */
+ list_add_tail(&buf->list, &dcmi->buffers);
+
+ if (dcmi->state == WAIT_FOR_BUFFER) {
+ dcmi->state = RUNNING;
dcmi->active = buf;
dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
buf->vb.vb2_buf.index);
spin_unlock_irq(&dcmi->irqlock);
- if (dcmi_start_capture(dcmi))
+ if (dcmi_start_capture(dcmi, buf))
dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
__func__);
- } else {
- /* Enqueue to video buffers list */
- list_add_tail(&buf->list, &dcmi->buffers);
- spin_unlock_irq(&dcmi->irqlock);
+ return;
}
+
+ spin_unlock_irq(&dcmi->irqlock);
}
static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -578,9 +569,9 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
u32 val = 0;
int ret;
- ret = clk_enable(dcmi->mclk);
+ ret = pm_runtime_get_sync(dcmi->dev);
if (ret) {
- dev_err(dcmi->dev, "%s: Failed to start streaming, cannot enable clock\n",
+ dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync\n",
__func__);
goto err_release_buffers;
}
@@ -590,7 +581,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret && ret != -ENOIOCTLCMD) {
dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error",
__func__);
- goto err_disable_clock;
+ goto err_pm_put;
}
spin_lock_irq(&dcmi->irqlock);
@@ -636,13 +627,10 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Enable dcmi */
reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
- dcmi->state = RUNNING;
-
dcmi->sequence = 0;
dcmi->errors_count = 0;
dcmi->overrun_count = 0;
dcmi->buffers_count = 0;
- dcmi->active = NULL;
/*
* Start transfer if at least one buffer has been queued,
@@ -650,17 +638,20 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
*/
if (list_empty(&dcmi->buffers)) {
dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n");
+ dcmi->state = WAIT_FOR_BUFFER;
spin_unlock_irq(&dcmi->irqlock);
return 0;
}
- dcmi->active = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
- list_del_init(&dcmi->active->list);
+ buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
+ dcmi->active = buf;
+
+ dcmi->state = RUNNING;
dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
spin_unlock_irq(&dcmi->irqlock);
- ret = dcmi_start_capture(dcmi);
+ ret = dcmi_start_capture(dcmi, buf);
if (ret) {
dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n",
__func__);
@@ -675,8 +666,8 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
err_subdev_streamoff:
v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
-err_disable_clock:
- clk_disable(dcmi->mclk);
+err_pm_put:
+ pm_runtime_put(dcmi->dev);
err_release_buffers:
spin_lock_irq(&dcmi->irqlock);
@@ -684,15 +675,11 @@ err_release_buffers:
* Return all buffers to vb2 in QUEUED state.
* This will give ownership back to userspace
*/
- if (dcmi->active) {
- buf = dcmi->active;
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- dcmi->active = NULL;
- }
list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
list_del_init(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
+ dcmi->active = NULL;
spin_unlock_irq(&dcmi->irqlock);
return ret;
@@ -702,8 +689,6 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
{
struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
struct dcmi_buf *buf, *node;
- unsigned long time_ms = msecs_to_jiffies(TIMEOUT_MS);
- long timeout;
int ret;
/* Disable stream on the sub device */
@@ -713,13 +698,6 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
__func__, ret);
spin_lock_irq(&dcmi->irqlock);
- dcmi->state = STOPPING;
- spin_unlock_irq(&dcmi->irqlock);
-
- timeout = wait_for_completion_interruptible_timeout(&dcmi->complete,
- time_ms);
-
- spin_lock_irq(&dcmi->irqlock);
/* Disable interruptions */
reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
@@ -727,29 +705,21 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
/* Disable DCMI */
reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
- if (!timeout) {
- dev_err(dcmi->dev, "%s: Timeout during stop streaming\n",
- __func__);
- dcmi->state = STOPPED;
- }
-
/* Return all queued buffers to vb2 in ERROR state */
- if (dcmi->active) {
- buf = dcmi->active;
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- dcmi->active = NULL;
- }
list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
list_del_init(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
+ dcmi->active = NULL;
+ dcmi->state = STOPPED;
+
spin_unlock_irq(&dcmi->irqlock);
/* Stop all pending DMA operations */
dmaengine_terminate_all(dcmi->dma_chan);
- clk_disable(dcmi->mclk);
+ pm_runtime_put(dcmi->dev);
if (dcmi->errors_count)
dev_warn(dcmi->dev, "Some errors found while streaming: errors=%d (overrun=%d), buffers=%d\n",
@@ -843,14 +813,8 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
}
/* Limit to hardware capabilities */
- if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
- pix->width = clamp(pix->width, MIN_JPEG_WIDTH, MAX_JPEG_WIDTH);
- pix->height =
- clamp(pix->height, MIN_JPEG_HEIGHT, MAX_JPEG_HEIGHT);
- } else {
- pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
- pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
- }
+ pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
+ pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
/* No crop if JPEG is requested */
do_crop = dcmi->do_crop && (pix->pixelformat != V4L2_PIX_FMT_JPEG);
@@ -1605,23 +1569,20 @@ static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
struct device_node *ep = NULL;
struct device_node *remote;
- while (1) {
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- return -EINVAL;
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ return -EINVAL;
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- of_node_put(ep);
- return -EINVAL;
- }
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- /* Remote node to connect */
- dcmi->entity.node = remote;
- dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
- return 0;
- }
+ /* Remote node to connect */
+ dcmi->entity.node = remote;
+ dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
+ return 0;
}
static int dcmi_graph_init(struct stm32_dcmi *dcmi)
@@ -1696,23 +1657,20 @@ static int dcmi_probe(struct platform_device *pdev)
}
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
+ of_node_put(np);
if (ret) {
dev_err(&pdev->dev, "Could not parse the endpoint\n");
- of_node_put(np);
return -ENODEV;
}
if (ep.bus_type == V4L2_MBUS_CSI2) {
dev_err(&pdev->dev, "CSI bus not supported\n");
- of_node_put(np);
return -ENODEV;
}
dcmi->bus.flags = ep.bus.parallel.flags;
dcmi->bus.bus_width = ep.bus.parallel.bus_width;
dcmi->bus.data_shift = ep.bus.parallel.data_shift;
- of_node_put(np);
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev, "Could not get irq\n");
@@ -1751,12 +1709,6 @@ static int dcmi_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- ret = clk_prepare(mclk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to prepare mclk %p\n", mclk);
- goto err_dma_release;
- }
-
spin_lock_init(&dcmi->irqlock);
mutex_init(&dcmi->lock);
init_completion(&dcmi->complete);
@@ -1772,7 +1724,7 @@ static int dcmi_probe(struct platform_device *pdev)
/* Initialize the top-level structure */
ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
if (ret)
- goto err_clk_unprepare;
+ goto err_dma_release;
dcmi->vdev = video_device_alloc();
if (!dcmi->vdev) {
@@ -1832,14 +1784,15 @@ static int dcmi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Probe done\n");
platform_set_drvdata(pdev, dcmi);
+
+ pm_runtime_enable(&pdev->dev);
+
return 0;
err_device_release:
video_device_release(dcmi->vdev);
err_device_unregister:
v4l2_device_unregister(&dcmi->v4l2_dev);
-err_clk_unprepare:
- clk_unprepare(dcmi->mclk);
err_dma_release:
dma_release_channel(dcmi->dma_chan);
@@ -1850,20 +1803,72 @@ static int dcmi_remove(struct platform_device *pdev)
{
struct stm32_dcmi *dcmi = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+
v4l2_async_notifier_unregister(&dcmi->notifier);
v4l2_device_unregister(&dcmi->v4l2_dev);
- clk_unprepare(dcmi->mclk);
+
dma_release_channel(dcmi->dma_chan);
return 0;
}
+static __maybe_unused int dcmi_runtime_suspend(struct device *dev)
+{
+ struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dcmi->mclk);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_runtime_resume(struct device *dev)
+{
+ struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dcmi->mclk);
+ if (ret)
+ dev_err(dev, "%s: Failed to prepare_enable clock\n", __func__);
+
+ return ret;
+}
+
+static __maybe_unused int dcmi_suspend(struct device *dev)
+{
+ /* disable clock */
+ pm_runtime_force_suspend(dev);
+
+ /* change pinctrl state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_resume(struct device *dev)
+{
+ /* restore pinctl default state */
+ pinctrl_pm_select_default_state(dev);
+
+ /* clock enable */
+ pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dcmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dcmi_suspend, dcmi_resume)
+ SET_RUNTIME_PM_OPS(dcmi_runtime_suspend,
+ dcmi_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_dcmi_driver = {
.probe = dcmi_probe,
.remove = dcmi_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(stm32_dcmi_of_match),
+ .pm = &dcmi_pm_ops,
},
};
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index e395aa85c8ad..d70871d0ad2d 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -953,23 +953,6 @@ static void job_abort(void *priv)
ctx->aborting = 1;
}
-/*
- * Lock access to the device
- */
-static void vpe_lock(void *priv)
-{
- struct vpe_ctx *ctx = priv;
- struct vpe_dev *dev = ctx->dev;
- mutex_lock(&dev->dev_mutex);
-}
-
-static void vpe_unlock(void *priv)
-{
- struct vpe_ctx *ctx = priv;
- struct vpe_dev *dev = ctx->dev;
- mutex_unlock(&dev->dev_mutex);
-}
-
static void vpe_dump_regs(struct vpe_dev *dev)
{
#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
@@ -2434,8 +2417,6 @@ static const struct v4l2_m2m_ops m2m_ops = {
.device_run = device_run,
.job_ready = job_ready,
.job_abort = job_abort,
- .lock = vpe_lock,
- .unlock = vpe_unlock,
};
static int vpe_runtime_get(struct platform_device *pdev)
@@ -2485,7 +2466,6 @@ static void vpe_fw_cb(struct platform_device *pdev)
}
video_set_drvdata(vfd, dev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
vfd->num);
}
diff --git a/drivers/media/platform/vicodec/Kconfig b/drivers/media/platform/vicodec/Kconfig
new file mode 100644
index 000000000000..2503bcb1529f
--- /dev/null
+++ b/drivers/media/platform/vicodec/Kconfig
@@ -0,0 +1,13 @@
+config VIDEO_VICODEC
+ tristate "Virtual Codec Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ default n
+ help
+ Driver for a Virtual Codec
+
+ This driver can be compared to the vim2m driver for emulating
+ a video device node that exposes an emulated hardware codec.
+
+ When in doubt, say N.
diff --git a/drivers/media/platform/vicodec/Makefile b/drivers/media/platform/vicodec/Makefile
new file mode 100644
index 000000000000..197229428953
--- /dev/null
+++ b/drivers/media/platform/vicodec/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+vicodec-objs := vicodec-core.o vicodec-codec.o
+
+obj-$(CONFIG_VIDEO_VICODEC) += vicodec.o
diff --git a/drivers/media/platform/vicodec/vicodec-codec.c b/drivers/media/platform/vicodec/vicodec-codec.c
new file mode 100644
index 000000000000..2d047646f614
--- /dev/null
+++ b/drivers/media/platform/vicodec/vicodec-codec.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2016 Tom aan de Wiel
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * 8x8 Fast Walsh Hadamard Transform in sequency order based on the paper:
+ *
+ * A Recursive Algorithm for Sequency-Ordered Fast Walsh Transforms,
+ * R.D. Brown, 1977
+ */
+
+#include <linux/string.h>
+#include "vicodec-codec.h"
+
+#define ALL_ZEROS 15
+#define DEADZONE_WIDTH 20
+
+static const uint8_t zigzag[64] = {
+ 0,
+ 1, 8,
+ 2, 9, 16,
+ 3, 10, 17, 24,
+ 4, 11, 18, 25, 32,
+ 5, 12, 19, 26, 33, 40,
+ 6, 13, 20, 27, 34, 41, 48,
+ 7, 14, 21, 28, 35, 42, 49, 56,
+ 15, 22, 29, 36, 43, 50, 57,
+ 23, 30, 37, 44, 51, 58,
+ 31, 38, 45, 52, 59,
+ 39, 46, 53, 60,
+ 47, 54, 61,
+ 55, 62,
+ 63,
+};
+
+
+static int rlc(const s16 *in, __be16 *output, int blocktype)
+{
+ s16 block[8 * 8];
+ s16 *wp = block;
+ int i = 0;
+ int x, y;
+ int ret = 0;
+
+ /* read in block from framebuffer */
+ int lastzero_run = 0;
+ int to_encode;
+
+ for (y = 0; y < 8; y++) {
+ for (x = 0; x < 8; x++) {
+ *wp = in[x + y * 8];
+ wp++;
+ }
+ }
+
+ /* keep track of amount of trailing zeros */
+ for (i = 63; i >= 0 && !block[zigzag[i]]; i--)
+ lastzero_run++;
+
+ *output++ = (blocktype == PBLOCK ? htons(PFRAME_BIT) : 0);
+ ret++;
+
+ to_encode = 8 * 8 - (lastzero_run > 14 ? lastzero_run : 0);
+
+ i = 0;
+ while (i < to_encode) {
+ int cnt = 0;
+ int tmp;
+
+ /* count leading zeros */
+ while ((tmp = block[zigzag[i]]) == 0 && cnt < 14) {
+ cnt++;
+ i++;
+ if (i == to_encode) {
+ cnt--;
+ break;
+ }
+ }
+ /* 4 bits for run, 12 for coefficient (quantization by 4) */
+ *output++ = htons((cnt | tmp << 4));
+ i++;
+ ret++;
+ }
+ if (lastzero_run > 14) {
+ *output = htons(ALL_ZEROS | 0);
+ ret++;
+ }
+
+ return ret;
+}
+
+/*
+ * This function will worst-case increase rlc_in by 65*2 bytes:
+ * one s16 value for the header and 8 * 8 coefficients of type s16.
+ */
+static s16 derlc(const __be16 **rlc_in, s16 *dwht_out)
+{
+ /* header */
+ const __be16 *input = *rlc_in;
+ s16 ret = ntohs(*input++);
+ int dec_count = 0;
+ s16 block[8 * 8 + 16];
+ s16 *wp = block;
+ int i;
+
+ /*
+ * Now de-compress, it expands one byte to up to 15 bytes
+ * (or fills the remainder of the 64 bytes with zeroes if it
+ * is the last byte to expand).
+ *
+ * So block has to be 8 * 8 + 16 bytes, the '+ 16' is to
+ * allow for overflow if the incoming data was malformed.
+ */
+ while (dec_count < 8 * 8) {
+ s16 in = ntohs(*input++);
+ int length = in & 0xf;
+ int coeff = in >> 4;
+
+ /* fill remainder with zeros */
+ if (length == 15) {
+ for (i = 0; i < 64 - dec_count; i++)
+ *wp++ = 0;
+ break;
+ }
+
+ for (i = 0; i < length; i++)
+ *wp++ = 0;
+ *wp++ = coeff;
+ dec_count += length + 1;
+ }
+
+ wp = block;
+
+ for (i = 0; i < 64; i++) {
+ int pos = zigzag[i];
+ int y = pos / 8;
+ int x = pos % 8;
+
+ dwht_out[x + y * 8] = *wp++;
+ }
+ *rlc_in = input;
+ return ret;
+}
+
+static const int quant_table[] = {
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 2, 2, 2, 2, 2, 2, 3, 6,
+ 2, 2, 2, 2, 2, 3, 6, 6,
+ 2, 2, 2, 2, 3, 6, 6, 6,
+ 2, 2, 2, 3, 6, 6, 6, 6,
+ 2, 2, 3, 6, 6, 6, 6, 8,
+};
+
+static const int quant_table_p[] = {
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 6,
+ 3, 3, 3, 3, 3, 3, 6, 6,
+ 3, 3, 3, 3, 3, 6, 6, 9,
+ 3, 3, 3, 3, 6, 6, 9, 9,
+ 3, 3, 3, 6, 6, 9, 9, 10,
+};
+
+static void quantize_intra(s16 *coeff, s16 *de_coeff)
+{
+ const int *quant = quant_table;
+ int i, j;
+
+ for (j = 0; j < 8; j++) {
+ for (i = 0; i < 8; i++, quant++, coeff++, de_coeff++) {
+ *coeff >>= *quant;
+ if (*coeff >= -DEADZONE_WIDTH &&
+ *coeff <= DEADZONE_WIDTH)
+ *coeff = *de_coeff = 0;
+ else
+ *de_coeff = *coeff << *quant;
+ }
+ }
+}
+
+static void dequantize_intra(s16 *coeff)
+{
+ const int *quant = quant_table;
+ int i, j;
+
+ for (j = 0; j < 8; j++)
+ for (i = 0; i < 8; i++, quant++, coeff++)
+ *coeff <<= *quant;
+}
+
+static void quantize_inter(s16 *coeff, s16 *de_coeff)
+{
+ const int *quant = quant_table_p;
+ int i, j;
+
+ for (j = 0; j < 8; j++) {
+ for (i = 0; i < 8; i++, quant++, coeff++, de_coeff++) {
+ *coeff >>= *quant;
+ if (*coeff >= -DEADZONE_WIDTH &&
+ *coeff <= DEADZONE_WIDTH)
+ *coeff = *de_coeff = 0;
+ else
+ *de_coeff = *coeff << *quant;
+ }
+ }
+}
+
+static void dequantize_inter(s16 *coeff)
+{
+ const int *quant = quant_table_p;
+ int i, j;
+
+ for (j = 0; j < 8; j++)
+ for (i = 0; i < 8; i++, quant++, coeff++)
+ *coeff <<= *quant;
+}
+
+static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
+ unsigned int input_step, bool intra)
+{
+ /* we'll need more than 8 bits for the transformed coefficients */
+ s32 workspace1[8], workspace2[8];
+ const u8 *tmp = block;
+ s16 *out = output_block;
+ int add = intra ? 256 : 0;
+ unsigned int i;
+
+ /* stage 1 */
+ stride *= input_step;
+
+ for (i = 0; i < 8; i++, tmp += stride, out += 8) {
+ if (input_step == 1) {
+ workspace1[0] = tmp[0] + tmp[1] - add;
+ workspace1[1] = tmp[0] - tmp[1];
+
+ workspace1[2] = tmp[2] + tmp[3] - add;
+ workspace1[3] = tmp[2] - tmp[3];
+
+ workspace1[4] = tmp[4] + tmp[5] - add;
+ workspace1[5] = tmp[4] - tmp[5];
+
+ workspace1[6] = tmp[6] + tmp[7] - add;
+ workspace1[7] = tmp[6] - tmp[7];
+ } else {
+ workspace1[0] = tmp[0] + tmp[2] - add;
+ workspace1[1] = tmp[0] - tmp[2];
+
+ workspace1[2] = tmp[4] + tmp[6] - add;
+ workspace1[3] = tmp[4] - tmp[6];
+
+ workspace1[4] = tmp[8] + tmp[10] - add;
+ workspace1[5] = tmp[8] - tmp[10];
+
+ workspace1[6] = tmp[12] + tmp[14] - add;
+ workspace1[7] = tmp[12] - tmp[14];
+ }
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0] = workspace2[0] + workspace2[4];
+ out[1] = workspace2[0] - workspace2[4];
+ out[2] = workspace2[1] - workspace2[5];
+ out[3] = workspace2[1] + workspace2[5];
+ out[4] = workspace2[2] + workspace2[6];
+ out[5] = workspace2[2] - workspace2[6];
+ out[6] = workspace2[3] - workspace2[7];
+ out[7] = workspace2[3] + workspace2[7];
+ }
+
+ out = output_block;
+
+ for (i = 0; i < 8; i++, out++) {
+ /* stage 1 */
+ workspace1[0] = out[0] + out[1 * 8];
+ workspace1[1] = out[0] - out[1 * 8];
+
+ workspace1[2] = out[2 * 8] + out[3 * 8];
+ workspace1[3] = out[2 * 8] - out[3 * 8];
+
+ workspace1[4] = out[4 * 8] + out[5 * 8];
+ workspace1[5] = out[4 * 8] - out[5 * 8];
+
+ workspace1[6] = out[6 * 8] + out[7 * 8];
+ workspace1[7] = out[6 * 8] - out[7 * 8];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+ /* stage 3 */
+ out[0 * 8] = workspace2[0] + workspace2[4];
+ out[1 * 8] = workspace2[0] - workspace2[4];
+ out[2 * 8] = workspace2[1] - workspace2[5];
+ out[3 * 8] = workspace2[1] + workspace2[5];
+ out[4 * 8] = workspace2[2] + workspace2[6];
+ out[5 * 8] = workspace2[2] - workspace2[6];
+ out[6 * 8] = workspace2[3] - workspace2[7];
+ out[7 * 8] = workspace2[3] + workspace2[7];
+ }
+}
+
+/*
+ * Not the nicest way of doing it, but P-blocks get twice the range of
+ * that of the I-blocks. Therefore we need a type bigger than 8 bits.
+ * Furthermore values can be negative... This is just a version that
+ * works with 16 signed data
+ */
+static void fwht16(const s16 *block, s16 *output_block, int stride, int intra)
+{
+ /* we'll need more than 8 bits for the transformed coefficients */
+ s32 workspace1[8], workspace2[8];
+ const s16 *tmp = block;
+ s16 *out = output_block;
+ int i;
+
+ for (i = 0; i < 8; i++, tmp += stride, out += 8) {
+ /* stage 1 */
+ workspace1[0] = tmp[0] + tmp[1];
+ workspace1[1] = tmp[0] - tmp[1];
+
+ workspace1[2] = tmp[2] + tmp[3];
+ workspace1[3] = tmp[2] - tmp[3];
+
+ workspace1[4] = tmp[4] + tmp[5];
+ workspace1[5] = tmp[4] - tmp[5];
+
+ workspace1[6] = tmp[6] + tmp[7];
+ workspace1[7] = tmp[6] - tmp[7];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0] = workspace2[0] + workspace2[4];
+ out[1] = workspace2[0] - workspace2[4];
+ out[2] = workspace2[1] - workspace2[5];
+ out[3] = workspace2[1] + workspace2[5];
+ out[4] = workspace2[2] + workspace2[6];
+ out[5] = workspace2[2] - workspace2[6];
+ out[6] = workspace2[3] - workspace2[7];
+ out[7] = workspace2[3] + workspace2[7];
+ }
+
+ out = output_block;
+
+ for (i = 0; i < 8; i++, out++) {
+ /* stage 1 */
+ workspace1[0] = out[0] + out[1*8];
+ workspace1[1] = out[0] - out[1*8];
+
+ workspace1[2] = out[2*8] + out[3*8];
+ workspace1[3] = out[2*8] - out[3*8];
+
+ workspace1[4] = out[4*8] + out[5*8];
+ workspace1[5] = out[4*8] - out[5*8];
+
+ workspace1[6] = out[6*8] + out[7*8];
+ workspace1[7] = out[6*8] - out[7*8];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0*8] = workspace2[0] + workspace2[4];
+ out[1*8] = workspace2[0] - workspace2[4];
+ out[2*8] = workspace2[1] - workspace2[5];
+ out[3*8] = workspace2[1] + workspace2[5];
+ out[4*8] = workspace2[2] + workspace2[6];
+ out[5*8] = workspace2[2] - workspace2[6];
+ out[6*8] = workspace2[3] - workspace2[7];
+ out[7*8] = workspace2[3] + workspace2[7];
+ }
+}
+
+static void ifwht(const s16 *block, s16 *output_block, int intra)
+{
+ /*
+ * we'll need more than 8 bits for the transformed coefficients
+ * use native unit of cpu
+ */
+ int workspace1[8], workspace2[8];
+ int inter = intra ? 0 : 1;
+ const s16 *tmp = block;
+ s16 *out = output_block;
+ int i;
+
+ for (i = 0; i < 8; i++, tmp += 8, out += 8) {
+ /* stage 1 */
+ workspace1[0] = tmp[0] + tmp[1];
+ workspace1[1] = tmp[0] - tmp[1];
+
+ workspace1[2] = tmp[2] + tmp[3];
+ workspace1[3] = tmp[2] - tmp[3];
+
+ workspace1[4] = tmp[4] + tmp[5];
+ workspace1[5] = tmp[4] - tmp[5];
+
+ workspace1[6] = tmp[6] + tmp[7];
+ workspace1[7] = tmp[6] - tmp[7];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ out[0] = workspace2[0] + workspace2[4];
+ out[1] = workspace2[0] - workspace2[4];
+ out[2] = workspace2[1] - workspace2[5];
+ out[3] = workspace2[1] + workspace2[5];
+ out[4] = workspace2[2] + workspace2[6];
+ out[5] = workspace2[2] - workspace2[6];
+ out[6] = workspace2[3] - workspace2[7];
+ out[7] = workspace2[3] + workspace2[7];
+ }
+
+ out = output_block;
+
+ for (i = 0; i < 8; i++, out++) {
+ /* stage 1 */
+ workspace1[0] = out[0] + out[1 * 8];
+ workspace1[1] = out[0] - out[1 * 8];
+
+ workspace1[2] = out[2 * 8] + out[3 * 8];
+ workspace1[3] = out[2 * 8] - out[3 * 8];
+
+ workspace1[4] = out[4 * 8] + out[5 * 8];
+ workspace1[5] = out[4 * 8] - out[5 * 8];
+
+ workspace1[6] = out[6 * 8] + out[7 * 8];
+ workspace1[7] = out[6 * 8] - out[7 * 8];
+
+ /* stage 2 */
+ workspace2[0] = workspace1[0] + workspace1[2];
+ workspace2[1] = workspace1[0] - workspace1[2];
+ workspace2[2] = workspace1[1] - workspace1[3];
+ workspace2[3] = workspace1[1] + workspace1[3];
+
+ workspace2[4] = workspace1[4] + workspace1[6];
+ workspace2[5] = workspace1[4] - workspace1[6];
+ workspace2[6] = workspace1[5] - workspace1[7];
+ workspace2[7] = workspace1[5] + workspace1[7];
+
+ /* stage 3 */
+ if (inter) {
+ int d;
+
+ out[0 * 8] = workspace2[0] + workspace2[4];
+ out[1 * 8] = workspace2[0] - workspace2[4];
+ out[2 * 8] = workspace2[1] - workspace2[5];
+ out[3 * 8] = workspace2[1] + workspace2[5];
+ out[4 * 8] = workspace2[2] + workspace2[6];
+ out[5 * 8] = workspace2[2] - workspace2[6];
+ out[6 * 8] = workspace2[3] - workspace2[7];
+ out[7 * 8] = workspace2[3] + workspace2[7];
+
+ for (d = 0; d < 8; d++)
+ out[8 * d] >>= 6;
+ } else {
+ int d;
+
+ out[0 * 8] = workspace2[0] + workspace2[4];
+ out[1 * 8] = workspace2[0] - workspace2[4];
+ out[2 * 8] = workspace2[1] - workspace2[5];
+ out[3 * 8] = workspace2[1] + workspace2[5];
+ out[4 * 8] = workspace2[2] + workspace2[6];
+ out[5 * 8] = workspace2[2] - workspace2[6];
+ out[6 * 8] = workspace2[3] - workspace2[7];
+ out[7 * 8] = workspace2[3] + workspace2[7];
+
+ for (d = 0; d < 8; d++) {
+ out[8 * d] >>= 6;
+ out[8 * d] += 128;
+ }
+ }
+ }
+}
+
+static void fill_encoder_block(const u8 *input, s16 *dst,
+ unsigned int stride, unsigned int input_step)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++, input += input_step)
+ *dst++ = *input;
+ input += (stride - 8) * input_step;
+ }
+}
+
+static int var_intra(const s16 *input)
+{
+ int32_t mean = 0;
+ int32_t ret = 0;
+ const s16 *tmp = input;
+ int i;
+
+ for (i = 0; i < 8 * 8; i++, tmp++)
+ mean += *tmp;
+ mean /= 64;
+ tmp = input;
+ for (i = 0; i < 8 * 8; i++, tmp++)
+ ret += (*tmp - mean) < 0 ? -(*tmp - mean) : (*tmp - mean);
+ return ret;
+}
+
+static int var_inter(const s16 *old, const s16 *new)
+{
+ int32_t ret = 0;
+ int i;
+
+ for (i = 0; i < 8 * 8; i++, old++, new++)
+ ret += (*old - *new) < 0 ? -(*old - *new) : (*old - *new);
+ return ret;
+}
+
+static int decide_blocktype(const u8 *cur, const u8 *reference,
+ s16 *deltablock, unsigned int stride,
+ unsigned int input_step)
+{
+ s16 tmp[64];
+ s16 old[64];
+ s16 *work = tmp;
+ unsigned int k, l;
+ int vari;
+ int vard;
+
+ fill_encoder_block(cur, tmp, stride, input_step);
+ fill_encoder_block(reference, old, 8, 1);
+ vari = var_intra(tmp);
+
+ for (k = 0; k < 8; k++) {
+ for (l = 0; l < 8; l++) {
+ *deltablock = *work - *reference;
+ deltablock++;
+ work++;
+ reference++;
+ }
+ }
+ deltablock -= 64;
+ vard = var_inter(old, tmp);
+ return vari <= vard ? IBLOCK : PBLOCK;
+}
+
+static void fill_decoder_block(u8 *dst, const s16 *input, int stride)
+{
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 8; j++)
+ *dst++ = *input++;
+ dst += stride - 8;
+ }
+}
+
+static void add_deltas(s16 *deltas, const u8 *ref, int stride)
+{
+ int k, l;
+
+ for (k = 0; k < 8; k++) {
+ for (l = 0; l < 8; l++) {
+ *deltas += *ref++;
+ /*
+ * Due to quantizing, it might possible that the
+ * decoded coefficients are slightly out of range
+ */
+ if (*deltas < 0)
+ *deltas = 0;
+ else if (*deltas > 255)
+ *deltas = 255;
+ deltas++;
+ }
+ ref += stride - 8;
+ }
+}
+
+static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max,
+ struct cframe *cf, u32 height, u32 width,
+ unsigned int input_step,
+ bool is_intra, bool next_is_intra)
+{
+ u8 *input_start = input;
+ __be16 *rlco_start = *rlco;
+ s16 deltablock[64];
+ __be16 pframe_bit = htons(PFRAME_BIT);
+ u32 encoding = 0;
+ unsigned int last_size = 0;
+ unsigned int i, j;
+
+ for (j = 0; j < height / 8; j++) {
+ for (i = 0; i < width / 8; i++) {
+ /* intra code, first frame is always intra coded. */
+ int blocktype = IBLOCK;
+ unsigned int size;
+
+ if (!is_intra)
+ blocktype = decide_blocktype(input, refp,
+ deltablock, width, input_step);
+ if (is_intra || blocktype == IBLOCK) {
+ fwht(input, cf->coeffs, width, input_step, 1);
+ quantize_intra(cf->coeffs, cf->de_coeffs);
+ blocktype = IBLOCK;
+ } else {
+ /* inter code */
+ encoding |= FRAME_PCODED;
+ fwht16(deltablock, cf->coeffs, 8, 0);
+ quantize_inter(cf->coeffs, cf->de_coeffs);
+ }
+ if (!next_is_intra) {
+ ifwht(cf->de_coeffs, cf->de_fwht, blocktype);
+
+ if (blocktype == PBLOCK)
+ add_deltas(cf->de_fwht, refp, 8);
+ fill_decoder_block(refp, cf->de_fwht, 8);
+ }
+
+ input += 8 * input_step;
+ refp += 8 * 8;
+
+ if (encoding & FRAME_UNENCODED)
+ continue;
+
+ size = rlc(cf->coeffs, *rlco, blocktype);
+ if (last_size == size &&
+ !memcmp(*rlco + 1, *rlco - size + 1, 2 * size - 2)) {
+ __be16 *last_rlco = *rlco - size;
+ s16 hdr = ntohs(*last_rlco);
+
+ if (!((*last_rlco ^ **rlco) & pframe_bit) &&
+ (hdr & DUPS_MASK) < DUPS_MASK)
+ *last_rlco = htons(hdr + 2);
+ else
+ *rlco += size;
+ } else {
+ *rlco += size;
+ }
+ if (*rlco >= rlco_max)
+ encoding |= FRAME_UNENCODED;
+ last_size = size;
+ }
+ input += width * 7 * input_step;
+ }
+ if (encoding & FRAME_UNENCODED) {
+ u8 *out = (u8 *)rlco_start;
+
+ input = input_start;
+ /*
+ * The compressed stream should never contain the magic
+ * header, so when we copy the YUV data we replace 0xff
+ * by 0xfe. Since YUV is limited range such values
+ * shouldn't appear anyway.
+ */
+ for (i = 0; i < height * width; i++, input += input_step)
+ *out++ = (*input == 0xff) ? 0xfe : *input;
+ *rlco = (__be16 *)out;
+ }
+ return encoding;
+}
+
+u32 encode_frame(struct raw_frame *frm, struct raw_frame *ref_frm,
+ struct cframe *cf, bool is_intra, bool next_is_intra)
+{
+ unsigned int size = frm->height * frm->width;
+ __be16 *rlco = cf->rlc_data;
+ __be16 *rlco_max;
+ u32 encoding;
+
+ rlco_max = rlco + size / 2 - 256;
+ encoding = encode_plane(frm->luma, ref_frm->luma, &rlco, rlco_max, cf,
+ frm->height, frm->width,
+ 1, is_intra, next_is_intra);
+ if (encoding & FRAME_UNENCODED)
+ encoding |= LUMA_UNENCODED;
+ encoding &= ~FRAME_UNENCODED;
+ rlco_max = rlco + size / 8 - 256;
+ encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max, cf,
+ frm->height / 2, frm->width / 2,
+ frm->chroma_step, is_intra, next_is_intra);
+ if (encoding & FRAME_UNENCODED)
+ encoding |= CB_UNENCODED;
+ encoding &= ~FRAME_UNENCODED;
+ rlco_max = rlco + size / 8 - 256;
+ encoding |= encode_plane(frm->cr, ref_frm->cr, &rlco, rlco_max, cf,
+ frm->height / 2, frm->width / 2,
+ frm->chroma_step, is_intra, next_is_intra);
+ if (encoding & FRAME_UNENCODED)
+ encoding |= CR_UNENCODED;
+ encoding &= ~FRAME_UNENCODED;
+ cf->size = (rlco - cf->rlc_data) * sizeof(*rlco);
+ return encoding;
+}
+
+static void decode_plane(struct cframe *cf, const __be16 **rlco, u8 *ref,
+ u32 height, u32 width, bool uncompressed)
+{
+ unsigned int copies = 0;
+ s16 copy[8 * 8];
+ s16 stat;
+ unsigned int i, j;
+
+ if (uncompressed) {
+ memcpy(ref, *rlco, width * height);
+ *rlco += width * height / 2;
+ return;
+ }
+
+ /*
+ * When decoding each macroblock the rlco pointer will be increased
+ * by 65 * 2 bytes worst-case.
+ * To avoid overflow the buffer has to be 65/64th of the actual raw
+ * image size, just in case someone feeds it malicious data.
+ */
+ for (j = 0; j < height / 8; j++) {
+ for (i = 0; i < width / 8; i++) {
+ u8 *refp = ref + j * 8 * width + i * 8;
+
+ if (copies) {
+ memcpy(cf->de_fwht, copy, sizeof(copy));
+ if (stat & PFRAME_BIT)
+ add_deltas(cf->de_fwht, refp, width);
+ fill_decoder_block(refp, cf->de_fwht, width);
+ copies--;
+ continue;
+ }
+
+ stat = derlc(rlco, cf->coeffs);
+
+ if (stat & PFRAME_BIT)
+ dequantize_inter(cf->coeffs);
+ else
+ dequantize_intra(cf->coeffs);
+
+ ifwht(cf->coeffs, cf->de_fwht,
+ (stat & PFRAME_BIT) ? 0 : 1);
+
+ copies = (stat & DUPS_MASK) >> 1;
+ if (copies)
+ memcpy(copy, cf->de_fwht, sizeof(copy));
+ if (stat & PFRAME_BIT)
+ add_deltas(cf->de_fwht, refp, width);
+ fill_decoder_block(refp, cf->de_fwht, width);
+ }
+ }
+}
+
+void decode_frame(struct cframe *cf, struct raw_frame *ref, u32 hdr_flags)
+{
+ const __be16 *rlco = cf->rlc_data;
+
+ decode_plane(cf, &rlco, ref->luma, cf->height, cf->width,
+ hdr_flags & VICODEC_FL_LUMA_IS_UNCOMPRESSED);
+ decode_plane(cf, &rlco, ref->cb, cf->height / 2, cf->width / 2,
+ hdr_flags & VICODEC_FL_CB_IS_UNCOMPRESSED);
+ decode_plane(cf, &rlco, ref->cr, cf->height / 2, cf->width / 2,
+ hdr_flags & VICODEC_FL_CR_IS_UNCOMPRESSED);
+}
diff --git a/drivers/media/platform/vicodec/vicodec-codec.h b/drivers/media/platform/vicodec/vicodec-codec.h
new file mode 100644
index 000000000000..cdfad1332a3e
--- /dev/null
+++ b/drivers/media/platform/vicodec/vicodec-codec.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2016 Tom aan de Wiel
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef VICODEC_RLC_H
+#define VICODEC_RLC_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
+/*
+ * The compressed format consists of a cframe_hdr struct followed by the
+ * compressed frame data. The header contains the size of that data.
+ * Each Y, Cb and Cr plane is compressed separately. If the compressed
+ * size of each plane becomes larger than the uncompressed size, then
+ * that plane is stored uncompressed and the corresponding bit is set
+ * in the flags field of the header.
+ *
+ * Each compressed plane consists of macroblocks and each macroblock
+ * is run-length-encoded. Each macroblock starts with a 16 bit value.
+ * Bit 15 indicates if this is a P-coded macroblock (1) or not (0).
+ * P-coded macroblocks contain a delta against the previous frame.
+ *
+ * Bits 1-12 contain a number. If non-zero, then this same macroblock
+ * repeats that number of times. This results in a high degree of
+ * compression for generated images like colorbars.
+ *
+ * Following this macroblock header the MB coefficients are run-length
+ * encoded: the top 12 bits contain the coefficient, the bottom 4 bits
+ * tell how many times this coefficient occurs. The value 0xf indicates
+ * that the remainder of the macroblock should be filled with zeroes.
+ *
+ * All 16 and 32 bit values are stored in big-endian (network) order.
+ *
+ * Each cframe_hdr starts with an 8 byte magic header that is
+ * guaranteed not to occur in the compressed frame data. This header
+ * can be used to sync to the next frame.
+ *
+ * This codec uses the Fast Walsh Hadamard Transform. Tom aan de Wiel
+ * developed this as part of a university project, specifically for use
+ * with this driver. His project report can be found here:
+ *
+ * https://hverkuil.home.xs4all.nl/fwht.pdf
+ */
+
+/*
+ * Note: bit 0 of the header must always be 0. Otherwise it cannot
+ * be guaranteed that the magic 8 byte sequence (see below) can
+ * never occur in the rlc output.
+ */
+#define PFRAME_BIT (1 << 15)
+#define DUPS_MASK 0x1ffe
+
+/*
+ * This is a sequence of 8 bytes with the low 4 bits set to 0xf.
+ *
+ * This sequence cannot occur in the encoded data
+ */
+#define VICODEC_MAGIC1 0x4f4f4f4f
+#define VICODEC_MAGIC2 0xffffffff
+
+#define VICODEC_VERSION 1
+
+#define VICODEC_MAX_WIDTH 3840
+#define VICODEC_MAX_HEIGHT 2160
+#define VICODEC_MIN_WIDTH 640
+#define VICODEC_MIN_HEIGHT 480
+
+#define PBLOCK 0
+#define IBLOCK 1
+
+/* Set if this is an interlaced format */
+#define VICODEC_FL_IS_INTERLACED BIT(0)
+/* Set if this is a bottom-first (NTSC) interlaced format */
+#define VICODEC_FL_IS_BOTTOM_FIRST BIT(1)
+/* Set if each 'frame' contains just one field */
+#define VICODEC_FL_IS_ALTERNATE BIT(2)
+/*
+ * If VICODEC_FL_IS_ALTERNATE was set, then this is set if this
+ * 'frame' is the bottom field, else it is the top field.
+ */
+#define VICODEC_FL_IS_BOTTOM_FIELD BIT(3)
+/* Set if this frame is uncompressed */
+#define VICODEC_FL_LUMA_IS_UNCOMPRESSED BIT(4)
+#define VICODEC_FL_CB_IS_UNCOMPRESSED BIT(5)
+#define VICODEC_FL_CR_IS_UNCOMPRESSED BIT(6)
+
+struct cframe_hdr {
+ u32 magic1;
+ u32 magic2;
+ __be32 version;
+ __be32 width, height;
+ __be32 flags;
+ __be32 colorspace;
+ __be32 xfer_func;
+ __be32 ycbcr_enc;
+ __be32 quantization;
+ __be32 size;
+};
+
+struct cframe {
+ unsigned int width, height;
+ __be16 *rlc_data;
+ s16 coeffs[8 * 8];
+ s16 de_coeffs[8 * 8];
+ s16 de_fwht[8 * 8];
+ u32 size;
+};
+
+struct raw_frame {
+ unsigned int width, height;
+ unsigned int chroma_step;
+ u8 *luma, *cb, *cr;
+};
+
+#define FRAME_PCODED BIT(0)
+#define FRAME_UNENCODED BIT(1)
+#define LUMA_UNENCODED BIT(2)
+#define CB_UNENCODED BIT(3)
+#define CR_UNENCODED BIT(4)
+
+u32 encode_frame(struct raw_frame *frm, struct raw_frame *ref_frm,
+ struct cframe *cf, bool is_intra, bool next_is_intra);
+void decode_frame(struct cframe *cf, struct raw_frame *ref, u32 hdr_flags);
+
+#endif
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
new file mode 100644
index 000000000000..408cd55d3580
--- /dev/null
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -0,0 +1,1506 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A virtual codec example device.
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This is a virtual codec device driver for testing the codec framework.
+ * It simulates a device that uses memory buffers for both source and
+ * destination and encodes or decodes the data.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vicodec-codec.h"
+
+MODULE_DESCRIPTION("Virtual codec device");
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_LICENSE("GPL v2");
+
+static bool multiplanar;
+module_param(multiplanar, bool, 0444);
+MODULE_PARM_DESC(multiplanar,
+ " use multi-planar API instead of single-planar API");
+
+static unsigned int debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, " activates debug info");
+
+#define VICODEC_NAME "vicodec"
+#define MAX_WIDTH 4096U
+#define MIN_WIDTH 640U
+#define MAX_HEIGHT 2160U
+#define MIN_HEIGHT 480U
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+
+static void vicodec_dev_release(struct device *dev)
+{
+}
+
+static struct platform_device vicodec_pdev = {
+ .name = VICODEC_NAME,
+ .dev.release = vicodec_dev_release,
+};
+
+/* Per-queue, driver-specific private data */
+struct vicodec_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int flags;
+ unsigned int sizeimage;
+ unsigned int sequence;
+ u32 fourcc;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+struct vicodec_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device enc_vfd;
+ struct video_device dec_vfd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+#endif
+
+ struct mutex enc_mutex;
+ struct mutex dec_mutex;
+ spinlock_t enc_lock;
+ spinlock_t dec_lock;
+
+ struct v4l2_m2m_dev *enc_dev;
+ struct v4l2_m2m_dev *dec_dev;
+};
+
+struct vicodec_ctx {
+ struct v4l2_fh fh;
+ struct vicodec_dev *dev;
+ bool is_enc;
+ spinlock_t *lock;
+
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *ctrl_gop_size;
+ unsigned int gop_size;
+ unsigned int gop_cnt;
+
+ /* Abort requested by m2m */
+ int aborting;
+ struct vb2_v4l2_buffer *last_src_buf;
+ struct vb2_v4l2_buffer *last_dst_buf;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quantization;
+
+ /* Source and destination queue data */
+ struct vicodec_q_data q_data[2];
+ struct raw_frame ref_frame;
+ u8 *compressed_frame;
+ u32 cur_buf_offset;
+ u32 comp_max_size;
+ u32 comp_size;
+ u32 comp_magic_cnt;
+ u32 comp_frame_size;
+ bool comp_has_frame;
+ bool comp_has_next_frame;
+};
+
+static const u32 pixfmts_yuv[] = {
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+};
+
+static inline struct vicodec_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct vicodec_ctx, fh);
+}
+
+static struct vicodec_q_data *get_q_data(struct vicodec_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return &ctx->q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return &ctx->q_data[V4L2_M2M_DST];
+ default:
+ WARN_ON(1);
+ break;
+ }
+ return NULL;
+}
+
+static void encode(struct vicodec_ctx *ctx,
+ struct vicodec_q_data *q_data,
+ u8 *p_in, u8 *p_out)
+{
+ unsigned int size = q_data->width * q_data->height;
+ struct cframe_hdr *p_hdr;
+ struct cframe cf;
+ struct raw_frame rf;
+ u32 encoding;
+
+ rf.width = q_data->width;
+ rf.height = q_data->height;
+ rf.luma = p_in;
+
+ switch (q_data->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ rf.cb = rf.luma + size;
+ rf.cr = rf.cb + size / 4;
+ rf.chroma_step = 1;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ rf.cr = rf.luma + size;
+ rf.cb = rf.cr + size / 4;
+ rf.chroma_step = 1;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ rf.cb = rf.luma + size;
+ rf.cr = rf.cb + 1;
+ rf.chroma_step = 2;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ rf.cr = rf.luma + size;
+ rf.cb = rf.cr + 1;
+ rf.chroma_step = 2;
+ break;
+ }
+
+ cf.width = q_data->width;
+ cf.height = q_data->height;
+ cf.rlc_data = (__be16 *)(p_out + sizeof(*p_hdr));
+
+ encoding = encode_frame(&rf, &ctx->ref_frame, &cf, !ctx->gop_cnt,
+ ctx->gop_cnt == ctx->gop_size - 1);
+ if (encoding != FRAME_PCODED)
+ ctx->gop_cnt = 0;
+ if (++ctx->gop_cnt == ctx->gop_size)
+ ctx->gop_cnt = 0;
+
+ p_hdr = (struct cframe_hdr *)p_out;
+ p_hdr->magic1 = VICODEC_MAGIC1;
+ p_hdr->magic2 = VICODEC_MAGIC2;
+ p_hdr->version = htonl(VICODEC_VERSION);
+ p_hdr->width = htonl(cf.width);
+ p_hdr->height = htonl(cf.height);
+ p_hdr->flags = htonl(q_data->flags);
+ if (encoding & LUMA_UNENCODED)
+ p_hdr->flags |= htonl(VICODEC_FL_LUMA_IS_UNCOMPRESSED);
+ if (encoding & CB_UNENCODED)
+ p_hdr->flags |= htonl(VICODEC_FL_CB_IS_UNCOMPRESSED);
+ if (encoding & CR_UNENCODED)
+ p_hdr->flags |= htonl(VICODEC_FL_CR_IS_UNCOMPRESSED);
+ p_hdr->colorspace = htonl(ctx->colorspace);
+ p_hdr->xfer_func = htonl(ctx->xfer_func);
+ p_hdr->ycbcr_enc = htonl(ctx->ycbcr_enc);
+ p_hdr->quantization = htonl(ctx->quantization);
+ p_hdr->size = htonl(cf.size);
+ ctx->ref_frame.width = cf.width;
+ ctx->ref_frame.height = cf.height;
+}
+
+static int decode(struct vicodec_ctx *ctx,
+ struct vicodec_q_data *q_data,
+ u8 *p_in, u8 *p_out)
+{
+ unsigned int size = q_data->width * q_data->height;
+ unsigned int i;
+ struct cframe_hdr *p_hdr;
+ struct cframe cf;
+ u8 *p;
+
+ p_hdr = (struct cframe_hdr *)p_in;
+ cf.width = ntohl(p_hdr->width);
+ cf.height = ntohl(p_hdr->height);
+ q_data->flags = ntohl(p_hdr->flags);
+ ctx->colorspace = ntohl(p_hdr->colorspace);
+ ctx->xfer_func = ntohl(p_hdr->xfer_func);
+ ctx->ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
+ ctx->quantization = ntohl(p_hdr->quantization);
+ cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr));
+
+ if (p_hdr->magic1 != VICODEC_MAGIC1 ||
+ p_hdr->magic2 != VICODEC_MAGIC2 ||
+ ntohl(p_hdr->version) != VICODEC_VERSION ||
+ cf.width < VICODEC_MIN_WIDTH ||
+ cf.width > VICODEC_MAX_WIDTH ||
+ cf.height < VICODEC_MIN_HEIGHT ||
+ cf.height > VICODEC_MAX_HEIGHT ||
+ (cf.width & 7) || (cf.height & 7))
+ return -EINVAL;
+
+ /* TODO: support resolution changes */
+ if (cf.width != q_data->width || cf.height != q_data->height)
+ return -EINVAL;
+
+ decode_frame(&cf, &ctx->ref_frame, q_data->flags);
+ memcpy(p_out, ctx->ref_frame.luma, size);
+ p_out += size;
+
+ switch (q_data->fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ memcpy(p_out, ctx->ref_frame.cb, size / 4);
+ p_out += size / 4;
+ memcpy(p_out, ctx->ref_frame.cr, size / 4);
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ memcpy(p_out, ctx->ref_frame.cr, size / 4);
+ p_out += size / 4;
+ memcpy(p_out, ctx->ref_frame.cb, size / 4);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ for (i = 0, p = p_out; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cb[i];
+ for (i = 0, p = p_out + 1; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cr[i];
+ break;
+ case V4L2_PIX_FMT_NV21:
+ for (i = 0, p = p_out; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cr[i];
+ for (i = 0, p = p_out + 1; i < size / 4; i++, p += 2)
+ *p = ctx->ref_frame.cb[i];
+ break;
+ }
+ return 0;
+}
+
+static int device_process(struct vicodec_ctx *ctx,
+ struct vb2_v4l2_buffer *in_vb,
+ struct vb2_v4l2_buffer *out_vb)
+{
+ struct vicodec_dev *dev = ctx->dev;
+ struct vicodec_q_data *q_out, *q_cap;
+ u8 *p_in, *p_out;
+ int ret;
+
+ q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_cap = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (ctx->is_enc)
+ p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
+ else
+ p_in = ctx->compressed_frame;
+ p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
+ if (!p_in || !p_out) {
+ v4l2_err(&dev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return -EFAULT;
+ }
+
+ if (ctx->is_enc) {
+ struct cframe_hdr *p_hdr = (struct cframe_hdr *)p_out;
+
+ encode(ctx, q_out, p_in, p_out);
+ vb2_set_plane_payload(&out_vb->vb2_buf, 0,
+ sizeof(*p_hdr) + ntohl(p_hdr->size));
+ } else {
+ ret = decode(ctx, q_cap, p_in, p_out);
+ if (ret)
+ return ret;
+ vb2_set_plane_payload(&out_vb->vb2_buf, 0,
+ q_cap->width * q_cap->height * 3 / 2);
+ }
+
+ out_vb->sequence = q_cap->sequence++;
+ out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp;
+
+ if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ out_vb->timecode = in_vb->timecode;
+ out_vb->field = in_vb->field;
+ out_vb->flags &= ~V4L2_BUF_FLAG_LAST;
+ out_vb->flags |= in_vb->flags &
+ (V4L2_BUF_FLAG_TIMECODE |
+ V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+
+ return 0;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/* device_run() - prepares and starts the device */
+static void device_run(void *priv)
+{
+ static const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+ struct vicodec_ctx *ctx = priv;
+ struct vicodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct vicodec_q_data *q_out;
+ u32 state;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ state = VB2_BUF_STATE_DONE;
+ if (device_process(ctx, src_buf, dst_buf))
+ state = VB2_BUF_STATE_ERROR;
+ ctx->last_dst_buf = dst_buf;
+
+ spin_lock(ctx->lock);
+ if (!ctx->comp_has_next_frame && src_buf == ctx->last_src_buf) {
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+ if (ctx->is_enc) {
+ src_buf->sequence = q_out->sequence++;
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, state);
+ } else if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == ctx->cur_buf_offset) {
+ src_buf->sequence = q_out->sequence++;
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, state);
+ ctx->cur_buf_offset = 0;
+ ctx->comp_has_next_frame = false;
+ }
+ v4l2_m2m_buf_done(dst_buf, state);
+ ctx->comp_size = 0;
+ ctx->comp_magic_cnt = 0;
+ ctx->comp_has_frame = false;
+ spin_unlock(ctx->lock);
+
+ if (ctx->is_enc)
+ v4l2_m2m_job_finish(dev->enc_dev, ctx->fh.m2m_ctx);
+ else
+ v4l2_m2m_job_finish(dev->dec_dev, ctx->fh.m2m_ctx);
+}
+
+static void job_remove_out_buf(struct vicodec_ctx *ctx, u32 state)
+{
+ struct vb2_v4l2_buffer *src_buf;
+ struct vicodec_q_data *q_out;
+
+ q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ spin_lock(ctx->lock);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ src_buf->sequence = q_out->sequence++;
+ v4l2_m2m_buf_done(src_buf, state);
+ ctx->cur_buf_offset = 0;
+ spin_unlock(ctx->lock);
+}
+
+static int job_ready(void *priv)
+{
+ static const u8 magic[] = {
+ 0x4f, 0x4f, 0x4f, 0x4f, 0xff, 0xff, 0xff, 0xff
+ };
+ struct vicodec_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_buf;
+ u8 *p_out;
+ u8 *p;
+ u32 sz;
+ u32 state;
+
+ if (ctx->is_enc || ctx->comp_has_frame)
+ return 1;
+
+restart:
+ ctx->comp_has_next_frame = false;
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!src_buf)
+ return 0;
+ p_out = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ sz = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ p = p_out + ctx->cur_buf_offset;
+
+ state = VB2_BUF_STATE_DONE;
+
+ if (!ctx->comp_size) {
+ state = VB2_BUF_STATE_ERROR;
+ for (; p < p_out + sz; p++) {
+ u32 copy;
+
+ p = memchr(p, magic[ctx->comp_magic_cnt], sz);
+ if (!p) {
+ ctx->comp_magic_cnt = 0;
+ break;
+ }
+ copy = sizeof(magic) - ctx->comp_magic_cnt;
+ if (p_out + sz - p < copy)
+ copy = p_out + sz - p;
+ memcpy(ctx->compressed_frame + ctx->comp_magic_cnt,
+ p, copy);
+ ctx->comp_magic_cnt += copy;
+ if (!memcmp(ctx->compressed_frame, magic, ctx->comp_magic_cnt)) {
+ p += copy;
+ state = VB2_BUF_STATE_DONE;
+ break;
+ }
+ ctx->comp_magic_cnt = 0;
+ }
+ if (ctx->comp_magic_cnt < sizeof(magic)) {
+ job_remove_out_buf(ctx, state);
+ goto restart;
+ }
+ ctx->comp_size = sizeof(magic);
+ }
+ if (ctx->comp_size < sizeof(struct cframe_hdr)) {
+ struct cframe_hdr *p_hdr = (struct cframe_hdr *)ctx->compressed_frame;
+ u32 copy = sizeof(struct cframe_hdr) - ctx->comp_size;
+
+ if (copy > p_out + sz - p)
+ copy = p_out + sz - p;
+ memcpy(ctx->compressed_frame + ctx->comp_size,
+ p, copy);
+ p += copy;
+ ctx->comp_size += copy;
+ if (ctx->comp_size < sizeof(struct cframe_hdr)) {
+ job_remove_out_buf(ctx, state);
+ goto restart;
+ }
+ ctx->comp_frame_size = ntohl(p_hdr->size) + sizeof(*p_hdr);
+ if (ctx->comp_frame_size > ctx->comp_max_size)
+ ctx->comp_frame_size = ctx->comp_max_size;
+ }
+ if (ctx->comp_size < ctx->comp_frame_size) {
+ u32 copy = ctx->comp_frame_size - ctx->comp_size;
+
+ if (copy > p_out + sz - p)
+ copy = p_out + sz - p;
+ memcpy(ctx->compressed_frame + ctx->comp_size,
+ p, copy);
+ p += copy;
+ ctx->comp_size += copy;
+ if (ctx->comp_size < ctx->comp_frame_size) {
+ job_remove_out_buf(ctx, state);
+ goto restart;
+ }
+ }
+ ctx->cur_buf_offset = p - p_out;
+ ctx->comp_has_frame = true;
+ ctx->comp_has_next_frame = false;
+ if (sz - ctx->cur_buf_offset >= sizeof(struct cframe_hdr)) {
+ struct cframe_hdr *p_hdr = (struct cframe_hdr *)p;
+ u32 frame_size = ntohl(p_hdr->size);
+ u32 remaining = sz - ctx->cur_buf_offset - sizeof(*p_hdr);
+
+ if (!memcmp(p, magic, sizeof(magic)))
+ ctx->comp_has_next_frame = remaining >= frame_size;
+ }
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct vicodec_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/*
+ * video ioctls
+ */
+
+static u32 find_fmt(u32 fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixfmts_yuv); i++)
+ if (pixfmts_yuv[i] == fmt)
+ return fmt;
+ return pixfmts_yuv[0];
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, VICODEC_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, VICODEC_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", VICODEC_NAME);
+ cap->device_caps = V4L2_CAP_STREAMING |
+ (multiplanar ?
+ V4L2_CAP_VIDEO_M2M_MPLANE :
+ V4L2_CAP_VIDEO_M2M);
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out)
+{
+ bool is_yuv = (is_enc && is_out) || (!is_enc && !is_out);
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(f->type) && !multiplanar)
+ return -EINVAL;
+ if (!V4L2_TYPE_IS_MULTIPLANAR(f->type) && multiplanar)
+ return -EINVAL;
+ if (f->index >= (is_yuv ? ARRAY_SIZE(pixfmts_yuv) : 1))
+ return -EINVAL;
+
+ if (is_yuv)
+ f->pixelformat = pixfmts_yuv[f->index];
+ else
+ f->pixelformat = V4L2_PIX_FMT_FWHT;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+
+ return enum_fmt(f, ctx->is_enc, false);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+
+ return enum_fmt(f, ctx->is_enc, true);
+}
+
+static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct vicodec_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (multiplanar)
+ return -EINVAL;
+ pix = &f->fmt.pix;
+ pix->width = q_data->width;
+ pix->height = q_data->height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = q_data->fourcc;
+ if (q_data->fourcc == V4L2_PIX_FMT_FWHT)
+ pix->bytesperline = 0;
+ else
+ pix->bytesperline = q_data->width;
+ pix->sizeimage = q_data->sizeimage;
+ pix->colorspace = ctx->colorspace;
+ pix->xfer_func = ctx->xfer_func;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (!multiplanar)
+ return -EINVAL;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->width = q_data->width;
+ pix_mp->height = q_data->height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = q_data->fourcc;
+ pix_mp->num_planes = 1;
+ if (q_data->fourcc == V4L2_PIX_FMT_FWHT)
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ else
+ pix_mp->plane_fmt[0].bytesperline = q_data->width;
+ pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quantization;
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(file2ctx(file), f);
+}
+
+static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &f->fmt.pix;
+ pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH) & ~7;
+ pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+ pix->bytesperline = pix->width;
+ pix->sizeimage = pix->width * pix->height * 3 / 2;
+ pix->field = V4L2_FIELD_NONE;
+ if (pix->pixelformat == V4L2_PIX_FMT_FWHT) {
+ pix->bytesperline = 0;
+ pix->sizeimage += sizeof(struct cframe_hdr);
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH) & ~7;
+ pix_mp->height =
+ clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT) & ~7;
+ pix_mp->plane_fmt[0].bytesperline = pix_mp->width;
+ pix_mp->plane_fmt[0].sizeimage =
+ pix_mp->width * pix_mp->height * 3 / 2;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->num_planes = 1;
+ if (pix_mp->pixelformat == V4L2_PIX_FMT_FWHT) {
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ pix_mp->plane_fmt[0].sizeimage +=
+ sizeof(struct cframe_hdr);
+ }
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (multiplanar)
+ return -EINVAL;
+ pix = &f->fmt.pix;
+ pix->pixelformat = ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(f->fmt.pix.pixelformat);
+ pix->colorspace = ctx->colorspace;
+ pix->xfer_func = ctx->xfer_func;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (!multiplanar)
+ return -EINVAL;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->pixelformat = ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(pix_mp->pixelformat);
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->quantization = ctx->quantization;
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(ctx, f);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (multiplanar)
+ return -EINVAL;
+ pix = &f->fmt.pix;
+ pix->pixelformat = !ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(pix->pixelformat);
+ if (!pix->colorspace)
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (!multiplanar)
+ return -EINVAL;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->pixelformat = !ctx->is_enc ? V4L2_PIX_FMT_FWHT :
+ find_fmt(pix_mp->pixelformat);
+ if (!pix_mp->colorspace)
+ pix_mp->colorspace = V4L2_COLORSPACE_REC709;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(ctx, f);
+}
+
+static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
+{
+ struct vicodec_q_data *q_data;
+ struct vb2_queue *vq;
+ bool fmt_changed = true;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &f->fmt.pix;
+ if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
+ fmt_changed =
+ q_data->fourcc != pix->pixelformat ||
+ q_data->width != pix->width ||
+ q_data->height != pix->height;
+
+ if (vb2_is_busy(vq) && fmt_changed)
+ return -EBUSY;
+
+ q_data->fourcc = pix->pixelformat;
+ q_data->width = pix->width;
+ q_data->height = pix->height;
+ q_data->sizeimage = pix->sizeimage;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pix_mp = &f->fmt.pix_mp;
+ if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type))
+ fmt_changed =
+ q_data->fourcc != pix_mp->pixelformat ||
+ q_data->width != pix_mp->width ||
+ q_data->height != pix_mp->height;
+
+ if (vb2_is_busy(vq) && fmt_changed)
+ return -EBUSY;
+
+ q_data->fourcc = pix_mp->pixelformat;
+ q_data->width = pix_mp->width;
+ q_data->height = pix_mp->height;
+ q_data->sizeimage = pix_mp->plane_fmt[0].sizeimage;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fourcc: %08x\n",
+ f->type, q_data->width, q_data->height, q_data->fourcc);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(file2ctx(file), f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_pix_format *pix;
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = vidioc_s_fmt(file2ctx(file), f);
+ if (!ret) {
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &f->fmt.pix;
+ ctx->colorspace = pix->colorspace;
+ ctx->xfer_func = pix->xfer_func;
+ ctx->ycbcr_enc = pix->ycbcr_enc;
+ ctx->quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pix_mp = &f->fmt.pix_mp;
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->xfer_func = pix_mp->xfer_func;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quantization = pix_mp->quantization;
+ break;
+ default:
+ break;
+ }
+ }
+ return ret;
+}
+
+static void vicodec_mark_last_buf(struct vicodec_ctx *ctx)
+{
+ static const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ spin_lock(ctx->lock);
+ ctx->last_src_buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
+ if (!ctx->last_src_buf && ctx->last_dst_buf) {
+ ctx->last_dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+ spin_unlock(ctx->lock);
+}
+
+static int vicodec_try_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP)
+ return -EINVAL;
+
+ if (ec->flags & V4L2_ENC_CMD_STOP_AT_GOP_END)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vicodec_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = vicodec_try_encoder_cmd(file, fh, ec);
+ if (ret < 0)
+ return ret;
+
+ vicodec_mark_last_buf(ctx);
+ return 0;
+}
+
+static int vicodec_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+
+ if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vicodec_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct vicodec_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = vicodec_try_decoder_cmd(file, fh, dc);
+ if (ret < 0)
+ return ret;
+
+ vicodec_mark_last_buf(ctx);
+ return 0;
+}
+
+static int vicodec_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ switch (fsize->pixel_format) {
+ case V4L2_PIX_FMT_FWHT:
+ break;
+ default:
+ if (find_fmt(fsize->pixel_format) == fsize->pixel_format)
+ break;
+ return -EINVAL;
+ }
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+
+ fsize->stepwise.min_width = MIN_WIDTH;
+ fsize->stepwise.max_width = MAX_WIDTH;
+ fsize->stepwise.step_width = 8;
+ fsize->stepwise.min_height = MIN_HEIGHT;
+ fsize->stepwise.max_height = MAX_HEIGHT;
+ fsize->stepwise.step_height = 8;
+
+ return 0;
+}
+
+static int vicodec_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static const struct v4l2_ioctl_ops vicodec_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_encoder_cmd = vicodec_try_encoder_cmd,
+ .vidioc_encoder_cmd = vicodec_encoder_cmd,
+ .vidioc_try_decoder_cmd = vicodec_try_decoder_cmd,
+ .vidioc_decoder_cmd = vicodec_decoder_cmd,
+ .vidioc_enum_framesizes = vicodec_enum_framesizes,
+
+ .vidioc_subscribe_event = vicodec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+
+/*
+ * Queue operations
+ */
+
+static int vicodec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vicodec_q_data *q_data = get_q_data(ctx, vq->type);
+ unsigned int size = q_data->sizeimage;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+ return 0;
+}
+
+static int vicodec_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vicodec_q_data *q_data;
+
+ dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dprintk(ctx->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ dprintk(ctx->dev,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vicodec_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void vicodec_return_bufs(struct vb2_queue *q, u32 state)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vbuf == NULL)
+ return;
+ spin_lock(ctx->lock);
+ v4l2_m2m_buf_done(vbuf, state);
+ spin_unlock(ctx->lock);
+ }
+}
+
+static int vicodec_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct vicodec_q_data *q_data = get_q_data(ctx, q->type);
+ unsigned int size = q_data->width * q_data->height;
+
+ q_data->sequence = 0;
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ return 0;
+
+ ctx->ref_frame.width = ctx->ref_frame.height = 0;
+ ctx->ref_frame.luma = kvmalloc(size * 3 / 2, GFP_KERNEL);
+ ctx->comp_max_size = size * 3 / 2 + sizeof(struct cframe_hdr);
+ ctx->compressed_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL);
+ if (!ctx->ref_frame.luma || !ctx->compressed_frame) {
+ kvfree(ctx->ref_frame.luma);
+ kvfree(ctx->compressed_frame);
+ vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
+ return -ENOMEM;
+ }
+ ctx->ref_frame.cb = ctx->ref_frame.luma + size;
+ ctx->ref_frame.cr = ctx->ref_frame.cb + size / 4;
+ ctx->last_src_buf = NULL;
+ ctx->last_dst_buf = NULL;
+ v4l2_ctrl_grab(ctx->ctrl_gop_size, true);
+ ctx->gop_size = v4l2_ctrl_g_ctrl(ctx->ctrl_gop_size);
+ ctx->gop_cnt = 0;
+ ctx->cur_buf_offset = 0;
+ ctx->comp_size = 0;
+ ctx->comp_magic_cnt = 0;
+ ctx->comp_has_frame = false;
+
+ return 0;
+}
+
+static void vicodec_stop_streaming(struct vb2_queue *q)
+{
+ struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
+
+ vicodec_return_bufs(q, VB2_BUF_STATE_ERROR);
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ return;
+
+ kvfree(ctx->ref_frame.luma);
+ kvfree(ctx->compressed_frame);
+ v4l2_ctrl_grab(ctx->ctrl_gop_size, false);
+}
+
+static const struct vb2_ops vicodec_qops = {
+ .queue_setup = vicodec_queue_setup,
+ .buf_prepare = vicodec_buf_prepare,
+ .buf_queue = vicodec_buf_queue,
+ .start_streaming = vicodec_start_streaming,
+ .stop_streaming = vicodec_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct vicodec_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = (multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &vicodec_qops;
+ src_vq->mem_ops = &vb2_vmalloc_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = ctx->is_enc ? &ctx->dev->enc_mutex :
+ &ctx->dev->dec_mutex;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = (multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &vicodec_qops;
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = src_vq->lock;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int vicodec_open(struct file *file)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct vicodec_dev *dev = video_drvdata(file);
+ struct vicodec_ctx *ctx = NULL;
+ struct v4l2_ctrl_handler *hdl;
+ unsigned int size;
+ int rc = 0;
+
+ if (mutex_lock_interruptible(vfd->lock))
+ return -ERESTARTSYS;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto open_unlock;
+ }
+
+ if (vfd == &dev->enc_vfd)
+ ctx->is_enc = true;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ ctx->ctrl_gop_size = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 1, 16, 1, 10);
+ if (hdl->error) {
+ rc = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ kfree(ctx);
+ goto open_unlock;
+ }
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ ctx->q_data[V4L2_M2M_SRC].fourcc =
+ ctx->is_enc ? V4L2_PIX_FMT_YUV420 : V4L2_PIX_FMT_FWHT;
+ ctx->q_data[V4L2_M2M_SRC].width = 1280;
+ ctx->q_data[V4L2_M2M_SRC].height = 720;
+ size = 1280 * 720 * 3 / 2;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
+ ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
+ ctx->q_data[V4L2_M2M_DST].fourcc =
+ ctx->is_enc ? V4L2_PIX_FMT_FWHT : V4L2_PIX_FMT_YUV420;
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+
+ size += sizeof(struct cframe_hdr);
+ if (ctx->is_enc) {
+ ctx->q_data[V4L2_M2M_DST].sizeimage = size;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->enc_dev, ctx,
+ &queue_init);
+ ctx->lock = &dev->enc_lock;
+ } else {
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->dec_dev, ctx,
+ &queue_init);
+ ctx->lock = &dev->dec_lock;
+ }
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ rc = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ goto open_unlock;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+open_unlock:
+ mutex_unlock(vfd->lock);
+ return rc;
+}
+
+static int vicodec_release(struct file *file)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct vicodec_ctx *ctx = file2ctx(file);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ mutex_lock(vfd->lock);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(vfd->lock);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vicodec_fops = {
+ .owner = THIS_MODULE,
+ .open = vicodec_open,
+ .release = vicodec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device vicodec_videodev = {
+ .name = VICODEC_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &vicodec_fops,
+ .ioctl_ops = &vicodec_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+ .job_ready = job_ready,
+};
+
+static int vicodec_probe(struct platform_device *pdev)
+{
+ struct vicodec_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->enc_lock);
+ spin_lock_init(&dev->dec_lock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &pdev->dev;
+ strlcpy(dev->mdev.model, "vicodec", sizeof(dev->mdev.model));
+ media_device_init(&dev->mdev);
+ dev->v4l2_dev.mdev = &dev->mdev;
+#endif
+
+ mutex_init(&dev->enc_mutex);
+ mutex_init(&dev->dec_mutex);
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->enc_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->enc_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init vicodec device\n");
+ ret = PTR_ERR(dev->enc_dev);
+ goto unreg_dev;
+ }
+
+ dev->dec_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->dec_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init vicodec device\n");
+ ret = PTR_ERR(dev->dec_dev);
+ goto err_enc_m2m;
+ }
+
+ dev->enc_vfd = vicodec_videodev;
+ vfd = &dev->enc_vfd;
+ vfd->lock = &dev->enc_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ strlcpy(vfd->name, "vicodec-enc", sizeof(vfd->name));
+ v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_dec_m2m;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ dev->dec_vfd = vicodec_videodev;
+ vfd = &dev->dec_vfd;
+ vfd->lock = &dev->dec_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ strlcpy(vfd->name, "vicodec-dec", sizeof(vfd->name));
+ v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto unreg_enc;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ ret = v4l2_m2m_register_media_controller(dev->enc_dev,
+ &dev->enc_vfd, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto unreg_m2m;
+ }
+
+ ret = v4l2_m2m_register_media_controller(dev->dec_dev,
+ &dev->dec_vfd, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto unreg_m2m_enc_mc;
+ }
+
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto unreg_m2m_dec_mc;
+ }
+#endif
+ return 0;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+unreg_m2m_dec_mc:
+ v4l2_m2m_unregister_media_controller(dev->dec_dev);
+unreg_m2m_enc_mc:
+ v4l2_m2m_unregister_media_controller(dev->enc_dev);
+unreg_m2m:
+ video_unregister_device(&dev->dec_vfd);
+#endif
+unreg_enc:
+ video_unregister_device(&dev->enc_vfd);
+err_dec_m2m:
+ v4l2_m2m_release(dev->dec_dev);
+err_enc_m2m:
+ v4l2_m2m_release(dev->enc_dev);
+unreg_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int vicodec_remove(struct platform_device *pdev)
+{
+ struct vicodec_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " VICODEC_NAME);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->enc_dev);
+ v4l2_m2m_unregister_media_controller(dev->dec_dev);
+ media_device_cleanup(&dev->mdev);
+#endif
+
+ v4l2_m2m_release(dev->enc_dev);
+ v4l2_m2m_release(dev->dec_dev);
+ video_unregister_device(&dev->enc_vfd);
+ video_unregister_device(&dev->dec_vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return 0;
+}
+
+static struct platform_driver vicodec_pdrv = {
+ .probe = vicodec_probe,
+ .remove = vicodec_remove,
+ .driver = {
+ .name = VICODEC_NAME,
+ },
+};
+
+static void __exit vicodec_exit(void)
+{
+ platform_driver_unregister(&vicodec_pdrv);
+ platform_device_unregister(&vicodec_pdev);
+}
+
+static int __init vicodec_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&vicodec_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&vicodec_pdrv);
+ if (ret)
+ platform_device_unregister(&vicodec_pdev);
+
+ return ret;
+}
+
+module_init(vicodec_init);
+module_exit(vicodec_exit);
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 1fb887293337..c01e1592ad0a 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -34,6 +34,13 @@ struct video_mux {
int active;
};
+static const struct v4l2_mbus_framefmt video_mux_format_mbus_default = {
+ .width = 1,
+ .height = 1,
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .field = V4L2_FIELD_NONE,
+};
+
static inline struct video_mux *v4l2_subdev_to_video_mux(struct v4l2_subdev *sd)
{
return container_of(sd, struct video_mux, subdev);
@@ -180,6 +187,88 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
if (!source_mbusformat)
return -EINVAL;
+ /* No size limitations except V4L2 compliance requirements */
+ v4l_bound_align_image(&sdformat->format.width, 1, 65536, 0,
+ &sdformat->format.height, 1, 65536, 0, 0);
+
+ /* All formats except LVDS and vendor specific formats are acceptable */
+ switch (sdformat->format.code) {
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ case MEDIA_BUS_FMT_BGR565_2X8_BE:
+ case MEDIA_BUS_FMT_BGR565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_GBR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_ARGB8888_1X32:
+ case MEDIA_BUS_FMT_RGB888_1X32_PADHI:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_UYVY8_1_5X8:
+ case MEDIA_BUS_FMT_VYUY8_1_5X8:
+ case MEDIA_BUS_FMT_YUYV8_1_5X8:
+ case MEDIA_BUS_FMT_YVYU8_1_5X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_VYUY10_2X10:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_YVYU10_2X10:
+ case MEDIA_BUS_FMT_Y12_1X12:
+ case MEDIA_BUS_FMT_UYVY12_2X12:
+ case MEDIA_BUS_FMT_VYUY12_2X12:
+ case MEDIA_BUS_FMT_YUYV12_2X12:
+ case MEDIA_BUS_FMT_YVYU12_2X12:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
+ case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VYUY10_1X20:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YVYU10_1X20:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ case MEDIA_BUS_FMT_VYUY12_1X24:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ case MEDIA_BUS_FMT_YVYU12_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_AYUV8_1X32:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ case MEDIA_BUS_FMT_JPEG_1X8:
+ case MEDIA_BUS_FMT_AHSV8888_1X32:
+ break;
+ default:
+ sdformat->format.code = MEDIA_BUS_FMT_Y8_1X8;
+ break;
+ }
+ if (sdformat->format.field == V4L2_FIELD_ANY)
+ sdformat->format.field = V4L2_FIELD_NONE;
+
mutex_lock(&vmux->lock);
/* Source pad mirrors active sink pad, no limitations on sink pads */
@@ -197,7 +286,27 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
return 0;
}
+static int video_mux_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+ struct v4l2_mbus_framefmt *mbusformat;
+ unsigned int i;
+
+ mutex_lock(&vmux->lock);
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ mbusformat = v4l2_subdev_get_try_format(sd, cfg, i);
+ *mbusformat = video_mux_format_mbus_default;
+ }
+
+ mutex_unlock(&vmux->lock);
+
+ return 0;
+}
+
static const struct v4l2_subdev_pad_ops video_mux_pad_ops = {
+ .init_cfg = video_mux_init_cfg,
.get_fmt = video_mux_get_format,
.set_fmt = video_mux_set_format,
};
@@ -214,8 +323,8 @@ static int video_mux_probe(struct platform_device *pdev)
struct device_node *ep;
struct video_mux *vmux;
unsigned int num_pads = 0;
+ unsigned int i;
int ret;
- int i;
vmux = devm_kzalloc(dev, sizeof(*vmux), GFP_KERNEL);
if (!vmux)
@@ -260,9 +369,11 @@ static int video_mux_probe(struct platform_device *pdev)
sizeof(*vmux->format_mbus),
GFP_KERNEL);
- for (i = 0; i < num_pads - 1; i++)
- vmux->pads[i].flags = MEDIA_PAD_FL_SINK;
- vmux->pads[num_pads - 1].flags = MEDIA_PAD_FL_SOURCE;
+ for (i = 0; i < num_pads; i++) {
+ vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK
+ : MEDIA_PAD_FL_SOURCE;
+ vmux->format_mbus[i] = video_mux_format_mbus_default;
+ }
vmux->subdev.entity.function = MEDIA_ENT_F_VID_MUX;
ret = media_entity_pads_init(&vmux->subdev.entity, num_pads,
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 065483e62db4..462099a141e4 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -140,6 +140,9 @@ static struct vim2m_fmt *find_format(struct v4l2_format *f)
struct vim2m_dev {
struct v4l2_device v4l2_dev;
struct video_device vfd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+#endif
atomic_t num_inst;
struct mutex dev_mutex;
@@ -1016,11 +1019,10 @@ static int vim2m_probe(struct platform_device *pdev)
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- goto unreg_dev;
+ goto unreg_v4l2;
}
video_set_drvdata(vfd, dev);
- snprintf(vfd->name, sizeof(vfd->name), "%s", vim2m_videodev.name);
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
@@ -1031,15 +1033,39 @@ static int vim2m_probe(struct platform_device *pdev)
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
- goto err_m2m;
+ goto unreg_dev;
+ }
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &pdev->dev;
+ strlcpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
+ media_device_init(&dev->mdev);
+ dev->v4l2_dev.mdev = &dev->mdev;
+
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev,
+ vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto unreg_m2m;
}
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto unreg_m2m_mc;
+ }
+#endif
return 0;
-err_m2m:
+#ifdef CONFIG_MEDIA_CONTROLLER
+unreg_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+unreg_m2m:
v4l2_m2m_release(dev->m2m_dev);
- video_unregister_device(&dev->vfd);
+#endif
unreg_dev:
+ video_unregister_device(&dev->vfd);
+unreg_v4l2:
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
@@ -1050,6 +1076,12 @@ static int vim2m_remove(struct platform_device *pdev)
struct vim2m_dev *dev = platform_get_drvdata(pdev);
v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_NAME);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+ media_device_cleanup(&dev->mdev);
+#endif
v4l2_m2m_release(dev->m2m_dev);
del_timer_sync(&dev->timer);
video_unregister_device(&dev->vfd);
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index fe088a953860..9246f265de31 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -328,7 +328,6 @@ static int vimc_probe(struct platform_device *pdev)
if (ret) {
media_device_cleanup(&vimc->mdev);
vimc_rm_subdevs(vimc);
- kfree(vimc);
return ret;
}
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 6b0bfa091592..5429193fbb91 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -295,7 +295,7 @@ static int vivid_user_vid_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
- dev->gain->val = dev->jiffies_vid_cap & 0xff;
+ dev->gain->val = (jiffies_to_msecs(jiffies) / 1000) & 0xff;
break;
}
return 0;
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 3fdb280c36ca..f06003bb8e42 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -477,7 +477,7 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
/* Updates stream time, only update at the start of a new frame. */
if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
- (buf->vb.sequence & 1) == 0)
+ (dev->vid_cap_seq_count & 1) == 0)
dev->ms_vid_cap =
jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 33f632331474..56c62122a81a 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -53,6 +53,7 @@ struct vsp1_uif;
#define VSP1_HAS_HGO (1 << 7)
#define VSP1_HAS_HGT (1 << 8)
#define VSP1_HAS_BRS (1 << 9)
+#define VSP1_HAS_EXT_DL (1 << 10)
struct vsp1_device_info {
u32 version;
@@ -68,6 +69,8 @@ struct vsp1_device_info {
bool uapi;
};
+#define vsp1_feature(vsp1, f) ((vsp1)->info->features & (f))
+
struct vsp1_device {
struct device *dev;
const struct vsp1_device_info *info;
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index d9b9cdd8fbe2..26289adaf658 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -22,29 +22,79 @@
#define VSP1_DLH_INT_ENABLE (1 << 1)
#define VSP1_DLH_AUTO_START (1 << 0)
+#define VSP1_DLH_EXT_PRE_CMD_EXEC (1 << 9)
+#define VSP1_DLH_EXT_POST_CMD_EXEC (1 << 8)
+
struct vsp1_dl_header_list {
u32 num_bytes;
u32 addr;
-} __attribute__((__packed__));
+} __packed;
struct vsp1_dl_header {
u32 num_lists;
struct vsp1_dl_header_list lists[8];
u32 next_header;
u32 flags;
-} __attribute__((__packed__));
+} __packed;
+
+/**
+ * struct vsp1_dl_ext_header - Extended display list header
+ * @padding: padding zero bytes for alignment
+ * @pre_ext_dl_num_cmd: number of pre-extended command bodies to parse
+ * @flags: enables or disables execution of the pre and post command
+ * @pre_ext_dl_plist: start address of pre-extended display list bodies
+ * @post_ext_dl_num_cmd: number of post-extended command bodies to parse
+ * @post_ext_dl_plist: start address of post-extended display list bodies
+ */
+struct vsp1_dl_ext_header {
+ u32 padding;
+
+ /*
+ * The datasheet represents flags as stored before pre_ext_dl_num_cmd,
+ * expecting 32-bit accesses. The flags are appropriate to the whole
+ * header, not just the pre_ext command, and thus warrant being
+ * separated out. Due to byte ordering, and representing as 16 bit
+ * values here, the flags must be positioned after the
+ * pre_ext_dl_num_cmd.
+ */
+ u16 pre_ext_dl_num_cmd;
+ u16 flags;
+ u32 pre_ext_dl_plist;
+
+ u32 post_ext_dl_num_cmd;
+ u32 post_ext_dl_plist;
+} __packed;
+
+struct vsp1_dl_header_extended {
+ struct vsp1_dl_header header;
+ struct vsp1_dl_ext_header ext;
+} __packed;
struct vsp1_dl_entry {
u32 addr;
u32 data;
-} __attribute__((__packed__));
+} __packed;
+
+/**
+ * struct vsp1_pre_ext_dl_body - Pre Extended Display List Body
+ * @opcode: Extended display list command operation code
+ * @flags: Pre-extended command flags. These are specific to each command
+ * @address_set: Source address set pointer. Must have 16-byte alignment
+ * @reserved: Zero bits for alignment.
+ */
+struct vsp1_pre_ext_dl_body {
+ u32 opcode;
+ u32 flags;
+ u32 address_set;
+ u32 reserved;
+} __packed;
/**
* struct vsp1_dl_body - Display list body
* @list: entry in the display list list of bodies
* @free: entry in the pool free body list
+ * @refcnt: reference tracking for the body
* @pool: pool to which this body belongs
- * @vsp1: the VSP1 device
* @entries: array of entries
* @dma: DMA address of the entries
* @size: size of the DMA memory in bytes
@@ -58,7 +108,6 @@ struct vsp1_dl_body {
refcount_t refcnt;
struct vsp1_dl_body_pool *pool;
- struct vsp1_device *vsp1;
struct vsp1_dl_entry *entries;
dma_addr_t dma;
@@ -93,13 +142,40 @@ struct vsp1_dl_body_pool {
};
/**
+ * struct vsp1_cmd_pool - Display List commands pool
+ * @dma: DMA address of the entries
+ * @size: size of the full DMA memory pool in bytes
+ * @mem: CPU memory pointer for the pool
+ * @cmds: Array of command structures for the pool
+ * @free: Free pool entries
+ * @lock: Protects the free list
+ * @vsp1: the VSP1 device
+ */
+struct vsp1_dl_cmd_pool {
+ /* DMA allocation */
+ dma_addr_t dma;
+ size_t size;
+ void *mem;
+
+ struct vsp1_dl_ext_cmd *cmds;
+ struct list_head free;
+
+ spinlock_t lock;
+
+ struct vsp1_device *vsp1;
+};
+
+/**
* struct vsp1_dl_list - Display list
* @list: entry in the display list manager lists
* @dlm: the display list manager
- * @header: display list header, NULL for headerless lists
+ * @header: display list header
+ * @extension: extended display list header. NULL for normal lists
* @dma: DMA address for the header
* @body0: first display list body
* @bodies: list of extra display list bodies
+ * @pre_cmd: pre command to be issued through extended dl header
+ * @post_cmd: post command to be issued through extended dl header
* @has_chain: if true, indicates that there's a partition chain
* @chain: entry in the display list partition chain
* @internal: whether the display list is used for internal purpose
@@ -109,26 +185,24 @@ struct vsp1_dl_list {
struct vsp1_dl_manager *dlm;
struct vsp1_dl_header *header;
+ struct vsp1_dl_ext_header *extension;
dma_addr_t dma;
struct vsp1_dl_body *body0;
struct list_head bodies;
+ struct vsp1_dl_ext_cmd *pre_cmd;
+ struct vsp1_dl_ext_cmd *post_cmd;
+
bool has_chain;
struct list_head chain;
bool internal;
};
-enum vsp1_dl_mode {
- VSP1_DL_MODE_HEADER,
- VSP1_DL_MODE_HEADERLESS,
-};
-
/**
* struct vsp1_dl_manager - Display List manager
* @index: index of the related WPF
- * @mode: display list operation mode (header or headerless)
* @singleshot: execute the display list in single-shot mode
* @vsp1: the VSP1 device
* @lock: protects the free, active, queued, and pending lists
@@ -137,10 +211,10 @@ enum vsp1_dl_mode {
* @queued: list queued to the hardware (written to the DL registers)
* @pending: list waiting to be queued to the hardware
* @pool: body pool for the display list bodies
+ * @cmdpool: commands pool for extended display list
*/
struct vsp1_dl_manager {
unsigned int index;
- enum vsp1_dl_mode mode;
bool singleshot;
struct vsp1_device *vsp1;
@@ -151,6 +225,7 @@ struct vsp1_dl_manager {
struct vsp1_dl_list *pending;
struct vsp1_dl_body_pool *pool;
+ struct vsp1_dl_cmd_pool *cmdpool;
};
/* -----------------------------------------------------------------------------
@@ -314,12 +389,164 @@ void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
}
/* -----------------------------------------------------------------------------
+ * Display List Extended Command Management
+ */
+
+enum vsp1_extcmd_type {
+ VSP1_EXTCMD_AUTODISP,
+ VSP1_EXTCMD_AUTOFLD,
+};
+
+struct vsp1_extended_command_info {
+ u16 opcode;
+ size_t body_size;
+};
+
+static const struct vsp1_extended_command_info vsp1_extended_commands[] = {
+ [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 },
+ [VSP1_EXTCMD_AUTOFLD] = { 0x03, 160 },
+};
+
+/**
+ * vsp1_dl_cmd_pool_create - Create a pool of commands from a single allocation
+ * @vsp1: The VSP1 device
+ * @type: The command pool type
+ * @num_cmds: The number of commands to allocate
+ *
+ * Allocate a pool of commands each with enough memory to contain the private
+ * data of each command. The allocation sizes are dependent upon the command
+ * type.
+ *
+ * Return a pointer to the pool on success or NULL if memory can't be allocated.
+ */
+static struct vsp1_dl_cmd_pool *
+vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
+ unsigned int num_cmds)
+{
+ struct vsp1_dl_cmd_pool *pool;
+ unsigned int i;
+ size_t cmd_size;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free);
+
+ pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
+ if (!pool->cmds) {
+ kfree(pool);
+ return NULL;
+ }
+
+ cmd_size = sizeof(struct vsp1_pre_ext_dl_body) +
+ vsp1_extended_commands[type].body_size;
+ cmd_size = ALIGN(cmd_size, 16);
+
+ pool->size = cmd_size * num_cmds;
+ pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
+ GFP_KERNEL);
+ if (!pool->mem) {
+ kfree(pool->cmds);
+ kfree(pool);
+ return NULL;
+ }
+
+ for (i = 0; i < num_cmds; ++i) {
+ struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
+ size_t cmd_offset = i * cmd_size;
+ /* data_offset must be 16 byte aligned for DMA. */
+ size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) +
+ cmd_offset;
+
+ cmd->pool = pool;
+ cmd->opcode = vsp1_extended_commands[type].opcode;
+
+ /*
+ * TODO: Auto-disp can utilise more than one extended body
+ * command per cmd.
+ */
+ cmd->num_cmds = 1;
+ cmd->cmds = pool->mem + cmd_offset;
+ cmd->cmd_dma = pool->dma + cmd_offset;
+
+ cmd->data = pool->mem + data_offset;
+ cmd->data_dma = pool->dma + data_offset;
+
+ list_add_tail(&cmd->free, &pool->free);
+ }
+
+ return pool;
+}
+
+static
+struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
+{
+ struct vsp1_dl_ext_cmd *cmd = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ if (!list_empty(&pool->free)) {
+ cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
+ free);
+ list_del(&cmd->free);
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return cmd;
+}
+
+static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd)
+{
+ unsigned long flags;
+
+ if (!cmd)
+ return;
+
+ /* Reset flags, these mark data usage. */
+ cmd->flags = 0;
+
+ spin_lock_irqsave(&cmd->pool->lock, flags);
+ list_add_tail(&cmd->free, &cmd->pool->free);
+ spin_unlock_irqrestore(&cmd->pool->lock, flags);
+}
+
+static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (pool->mem)
+ dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
+ pool->dma);
+
+ kfree(pool->cmds);
+ kfree(pool);
+}
+
+struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+
+ if (dl->pre_cmd)
+ return dl->pre_cmd;
+
+ dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool);
+
+ return dl->pre_cmd;
+}
+
+/* ----------------------------------------------------------------------------
* Display List Transaction Management
*/
static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
{
struct vsp1_dl_list *dl;
+ size_t header_offset;
dl = kzalloc(sizeof(*dl), GFP_KERNEL);
if (!dl)
@@ -332,16 +559,14 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
dl->body0 = vsp1_dl_body_get(dlm->pool);
if (!dl->body0)
return NULL;
- if (dlm->mode == VSP1_DL_MODE_HEADER) {
- size_t header_offset = dl->body0->max_entries
- * sizeof(*dl->body0->entries);
- dl->header = ((void *)dl->body0->entries) + header_offset;
- dl->dma = dl->body0->dma + header_offset;
+ header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
- memset(dl->header, 0, sizeof(*dl->header));
- dl->header->lists[0].addr = dl->body0->dma;
- }
+ dl->header = ((void *)dl->body0->entries) + header_offset;
+ dl->dma = dl->body0->dma + header_offset;
+
+ memset(dl->header, 0, sizeof(*dl->header));
+ dl->header->lists[0].addr = dl->body0->dma;
return dl;
}
@@ -398,7 +623,7 @@ struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
/* This function must be called with the display list manager lock held.*/
static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
{
- struct vsp1_dl_list *dl_child;
+ struct vsp1_dl_list *dl_next;
if (!dl)
return;
@@ -408,14 +633,20 @@ static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
* hardware operation.
*/
if (dl->has_chain) {
- list_for_each_entry(dl_child, &dl->chain, chain)
- __vsp1_dl_list_put(dl_child);
+ list_for_each_entry(dl_next, &dl->chain, chain)
+ __vsp1_dl_list_put(dl_next);
}
dl->has_chain = false;
vsp1_dl_list_bodies_put(dl);
+ vsp1_dl_ext_cmd_put(dl->pre_cmd);
+ vsp1_dl_ext_cmd_put(dl->post_cmd);
+
+ dl->pre_cmd = NULL;
+ dl->post_cmd = NULL;
+
/*
* body0 is reused as as an optimisation as presently every display list
* has at least one body, thus we reinitialise the entries list.
@@ -473,16 +704,9 @@ struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
*
* The reference must be explicitly released by a call to vsp1_dl_body_put()
* when the body isn't needed anymore.
- *
- * Additional bodies are only usable for display lists in header mode.
- * Attempting to add a body to a header-less display list will return an error.
*/
int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
{
- /* Multi-body lists are only available in header mode. */
- if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
- return -EINVAL;
-
refcount_inc(&dlb->refcnt);
list_add_tail(&dlb->list, &dl->bodies);
@@ -503,22 +727,23 @@ int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
* Adding a display list to a chain passes ownership of the display list to
* the head display list item. The chain is released when the head dl item is
* put back with __vsp1_dl_list_put().
- *
- * Chained display lists are only usable in header mode. Attempts to add a
- * display list to a chain in header-less mode will return an error.
*/
int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
struct vsp1_dl_list *dl)
{
- /* Chained lists are only available in header mode. */
- if (head->dlm->mode != VSP1_DL_MODE_HEADER)
- return -EINVAL;
-
head->has_chain = true;
list_add_tail(&dl->chain, &head->chain);
return 0;
}
+static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd)
+{
+ cmd->cmds[0].opcode = cmd->opcode;
+ cmd->cmds[0].flags = cmd->flags;
+ cmd->cmds[0].address_set = cmd->data_dma;
+ cmd->cmds[0].reserved = 0;
+}
+
static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
{
struct vsp1_dl_manager *dlm = dl->dlm;
@@ -571,6 +796,27 @@ static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
*/
dl->header->flags = VSP1_DLH_INT_ENABLE;
}
+
+ if (!dl->extension)
+ return;
+
+ dl->extension->flags = 0;
+
+ if (dl->pre_cmd) {
+ dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma;
+ dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds;
+ dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC;
+
+ vsp1_dl_ext_cmd_fill_header(dl->pre_cmd);
+ }
+
+ if (dl->post_cmd) {
+ dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma;
+ dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds;
+ dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC;
+
+ vsp1_dl_ext_cmd_fill_header(dl->post_cmd);
+ }
}
static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
@@ -581,17 +827,10 @@ static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
return false;
/*
- * Check whether the VSP1 has taken the update. In headerless mode the
- * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
- * register, and in header mode by clearing the UPDHDR bit in the CMD
- * register.
+ * Check whether the VSP1 has taken the update. The hardware indicates
+ * this by clearing the UPDHDR bit in the CMD register.
*/
- if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
- return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
- & VI6_DL_BODY_SIZE_UPD);
- else
- return !!(vsp1_read(vsp1, VI6_CMD(dlm->index))
- & VI6_CMD_UPDHDR);
+ return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
}
static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
@@ -599,26 +838,14 @@ static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
struct vsp1_dl_manager *dlm = dl->dlm;
struct vsp1_device *vsp1 = dlm->vsp1;
- if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
- /*
- * In headerless mode, program the hardware directly with the
- * display list body address and size and set the UPD bit. The
- * bit will be cleared by the hardware when the display list
- * processing starts.
- */
- vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0->dma);
- vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
- (dl->body0->num_entries * sizeof(*dl->header->lists)));
- } else {
- /*
- * In header mode, program the display list header address. If
- * the hardware is idle (single-shot mode or first frame in
- * continuous mode) it will then be started independently. If
- * the hardware is operating, the VI6_DL_HDR_REF_ADDR register
- * will be updated with the display list address.
- */
- vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
- }
+ /*
+ * Program the display list header address. If the hardware is idle
+ * (single-shot mode or first frame in continuous mode) it will then be
+ * started independently. If the hardware is operating, the
+ * VI6_DL_HDR_REF_ADDR register will be updated with the display list
+ * address.
+ */
+ vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
}
static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
@@ -673,18 +900,16 @@ static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
{
struct vsp1_dl_manager *dlm = dl->dlm;
- struct vsp1_dl_list *dl_child;
+ struct vsp1_dl_list *dl_next;
unsigned long flags;
- if (dlm->mode == VSP1_DL_MODE_HEADER) {
- /* Fill the header for the head and chained display lists. */
- vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
+ /* Fill the header for the head and chained display lists. */
+ vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
- list_for_each_entry(dl_child, &dl->chain, chain) {
- bool last = list_is_last(&dl_child->chain, &dl->chain);
+ list_for_each_entry(dl_next, &dl->chain, chain) {
+ bool last = list_is_last(&dl_next->chain, &dl->chain);
- vsp1_dl_list_fill_header(dl_child, last);
- }
+ vsp1_dl_list_fill_header(dl_next, last);
}
dl->internal = internal;
@@ -713,7 +938,7 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
* has completed at frame end. If the flag is not returned display list
* completion has been delayed by one frame because the display list commit
* raced with the frame end interrupt. The function always returns with the flag
- * set in header mode as display list processing is then not continuous and
+ * set in single-shot mode as display list processing is then not continuous and
* races never occur.
*
* The VSP1_DL_FRAME_END_INTERNAL flag indicates that the previous display list
@@ -722,6 +947,8 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
*/
unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
{
+ struct vsp1_device *vsp1 = dlm->vsp1;
+ u32 status = vsp1_read(vsp1, VI6_STATUS);
unsigned int flags = 0;
spin_lock(&dlm->lock);
@@ -747,6 +974,14 @@ unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
goto done;
/*
+ * Progressive streams report only TOP fields. If we have a BOTTOM
+ * field, we are interlaced, and expect the frame to complete on the
+ * next frame end interrupt.
+ */
+ if (status & VI6_STATUS_FLD_STD(dlm->index))
+ goto done;
+
+ /*
* The device starts processing the queued display list right after the
* frame end interrupt. The display list thus becomes active.
*/
@@ -781,16 +1016,17 @@ done:
/* Hardware Setup */
void vsp1_dlm_setup(struct vsp1_device *vsp1)
{
+ unsigned int i;
u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
| VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
| VI6_DL_CTRL_DLE;
+ u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT)
+ | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT;
- /*
- * The DRM pipeline operates with display lists in Continuous Frame
- * Mode, all other pipelines use manual start.
- */
- if (vsp1->drm)
- ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
+ for (i = 0; i < vsp1->info->wpf_count; ++i)
+ vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl);
+ }
vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
@@ -831,8 +1067,6 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
return NULL;
dlm->index = index;
- dlm->mode = index == 0 && !vsp1->info->uapi
- ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
dlm->singleshot = vsp1->info->uapi;
dlm->vsp1 = vsp1;
@@ -841,14 +1075,16 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
/*
* Initialize the display list body and allocate DMA memory for the body
- * and the optional header. Both are allocated together to avoid memory
+ * and the header. Both are allocated together to avoid memory
* fragmentation, with the header located right after the body in
* memory. An extra body is allocated on top of the prealloc to account
* for the cached body used by the vsp1_pipeline object.
*/
- header_size = dlm->mode == VSP1_DL_MODE_HEADER
- ? ALIGN(sizeof(struct vsp1_dl_header), 8)
- : 0;
+ header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ?
+ sizeof(struct vsp1_dl_header_extended) :
+ sizeof(struct vsp1_dl_header);
+
+ header_size = ALIGN(header_size, 8);
dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
VSP1_DL_NUM_ENTRIES, header_size);
@@ -859,12 +1095,28 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
struct vsp1_dl_list *dl;
dl = vsp1_dl_list_alloc(dlm);
- if (!dl)
+ if (!dl) {
+ vsp1_dlm_destroy(dlm);
return NULL;
+ }
+
+ /* The extended header immediately follows the header. */
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL))
+ dl->extension = (void *)dl->header
+ + sizeof(*dl->header);
list_add_tail(&dl->list, &dlm->free);
}
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
+ dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
+ VSP1_EXTCMD_AUTOFLD, prealloc);
+ if (!dlm->cmdpool) {
+ vsp1_dlm_destroy(dlm);
+ return NULL;
+ }
+ }
+
return dlm;
}
@@ -881,4 +1133,5 @@ void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
}
vsp1_dl_body_pool_destroy(dlm->pool);
+ vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool);
}
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h
index 7dba0469c92e..125750dc8b5c 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.h
+++ b/drivers/media/platform/vsp1/vsp1_dl.h
@@ -20,6 +20,33 @@ struct vsp1_dl_manager;
#define VSP1_DL_FRAME_END_COMPLETED BIT(0)
#define VSP1_DL_FRAME_END_INTERNAL BIT(1)
+/**
+ * struct vsp1_dl_ext_cmd - Extended Display command
+ * @pool: pool to which this command belongs
+ * @free: entry in the pool of free commands list
+ * @opcode: command type opcode
+ * @flags: flags used by the command
+ * @cmds: array of command bodies for this extended cmd
+ * @num_cmds: quantity of commands in @cmds array
+ * @cmd_dma: DMA address of the command body
+ * @data: memory allocation for command-specific data
+ * @data_dma: DMA address for command-specific data
+ */
+struct vsp1_dl_ext_cmd {
+ struct vsp1_dl_cmd_pool *pool;
+ struct list_head free;
+
+ u8 opcode;
+ u32 flags;
+
+ struct vsp1_pre_ext_dl_body *cmds;
+ unsigned int num_cmds;
+ dma_addr_t cmd_dma;
+
+ void *data;
+ dma_addr_t data_dma;
+};
+
void vsp1_dlm_setup(struct vsp1_device *vsp1);
struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
@@ -33,6 +60,7 @@ struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm);
struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
void vsp1_dl_list_put(struct vsp1_dl_list *dl);
struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl);
+struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl);
void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal);
struct vsp1_dl_body_pool *
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index a99fc0ced7a7..b9c0f695d002 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -670,9 +670,11 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
drm_pipe->width = cfg->width;
drm_pipe->height = cfg->height;
+ pipe->interlaced = cfg->interlaced;
- dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u\n",
- __func__, pipe_index, cfg->width, cfg->height);
+ dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u%s\n",
+ __func__, pipe_index, cfg->width, cfg->height,
+ pipe->interlaced ? "i" : "");
mutex_lock(&vsp1->drm->lock);
@@ -803,7 +805,7 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
*/
fmtinfo = vsp1_get_format_info(vsp1, cfg->pixelformat);
if (!fmtinfo) {
- dev_dbg(vsp1->dev, "Unsupport pixel format %08x for RPF\n",
+ dev_dbg(vsp1->dev, "Unsupported pixel format %08x for RPF\n",
cfg->pixelformat);
return -EINVAL;
}
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 5d82f6ee56ea..b6619c9c18bb 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -265,7 +265,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
/* Instantiate all the entities. */
- if (vsp1->info->features & VSP1_HAS_BRS) {
+ if (vsp1_feature(vsp1, VSP1_HAS_BRS)) {
vsp1->brs = vsp1_brx_create(vsp1, VSP1_ENTITY_BRS);
if (IS_ERR(vsp1->brs)) {
ret = PTR_ERR(vsp1->brs);
@@ -275,7 +275,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities);
}
- if (vsp1->info->features & VSP1_HAS_BRU) {
+ if (vsp1_feature(vsp1, VSP1_HAS_BRU)) {
vsp1->bru = vsp1_brx_create(vsp1, VSP1_ENTITY_BRU);
if (IS_ERR(vsp1->bru)) {
ret = PTR_ERR(vsp1->bru);
@@ -285,7 +285,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities);
}
- if (vsp1->info->features & VSP1_HAS_CLU) {
+ if (vsp1_feature(vsp1, VSP1_HAS_CLU)) {
vsp1->clu = vsp1_clu_create(vsp1);
if (IS_ERR(vsp1->clu)) {
ret = PTR_ERR(vsp1->clu);
@@ -311,7 +311,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
- if (vsp1->info->features & VSP1_HAS_HGO && vsp1->info->uapi) {
+ if (vsp1_feature(vsp1, VSP1_HAS_HGO) && vsp1->info->uapi) {
vsp1->hgo = vsp1_hgo_create(vsp1);
if (IS_ERR(vsp1->hgo)) {
ret = PTR_ERR(vsp1->hgo);
@@ -322,7 +322,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
&vsp1->entities);
}
- if (vsp1->info->features & VSP1_HAS_HGT && vsp1->info->uapi) {
+ if (vsp1_feature(vsp1, VSP1_HAS_HGT) && vsp1->info->uapi) {
vsp1->hgt = vsp1_hgt_create(vsp1);
if (IS_ERR(vsp1->hgt)) {
ret = PTR_ERR(vsp1->hgt);
@@ -353,7 +353,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
}
- if (vsp1->info->features & VSP1_HAS_LUT) {
+ if (vsp1_feature(vsp1, VSP1_HAS_LUT)) {
vsp1->lut = vsp1_lut_create(vsp1);
if (IS_ERR(vsp1->lut)) {
ret = PTR_ERR(vsp1->lut);
@@ -387,7 +387,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
}
- if (vsp1->info->features & VSP1_HAS_SRU) {
+ if (vsp1_feature(vsp1, VSP1_HAS_SRU)) {
vsp1->sru = vsp1_sru_create(vsp1);
if (IS_ERR(vsp1->sru)) {
ret = PTR_ERR(vsp1->sru);
@@ -537,7 +537,7 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED);
- if (vsp1->info->features & VSP1_HAS_BRS)
+ if (vsp1_feature(vsp1, VSP1_HAS_BRS))
vsp1_write(vsp1, VI6_DPR_ILV_BRS_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
@@ -754,7 +754,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
.model = "VSP2-D",
.gen = 3,
- .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL,
.lif_count = 1,
.rpf_count = 5,
.uif_count = 1,
@@ -774,7 +774,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPDL_GEN3,
.model = "VSP2-DL",
.gen = 3,
- .features = VSP1_HAS_BRS | VSP1_HAS_BRU,
+ .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
.lif_count = 2,
.rpf_count = 5,
.uif_count = 2,
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
index 743d8f0db45c..ae646c9ef337 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -104,6 +104,7 @@ struct vsp1_partition {
* @entities: list of entities in the pipeline
* @stream_config: cached stream configuration for video pipelines
* @configured: when false the @stream_config shall be written to the hardware
+ * @interlaced: True when the pipeline is configured in interlaced mode
* @partitions: The number of partitions used to process one frame
* @partition: The current partition for configuration to process
* @part_table: The pre-calculated partitions used by the pipeline
@@ -142,6 +143,7 @@ struct vsp1_pipeline {
struct vsp1_dl_body *stream_config;
bool configured;
+ bool interlaced;
unsigned int partitions;
struct vsp1_partition *partition;
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 0d249ff9f564..3738ff2f7b85 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -28,6 +28,7 @@
#define VI6_SRESET_SRTS(n) (1 << (n))
#define VI6_STATUS 0x0038
+#define VI6_STATUS_FLD_STD(n) (1 << ((n) + 28))
#define VI6_STATUS_SYS_ACT(n) (1 << ((n) + 8))
#define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12)
@@ -72,7 +73,7 @@
#define VI6_DL_SWAP_WDS (1 << 1)
#define VI6_DL_SWAP_BTS (1 << 0)
-#define VI6_DL_EXT_CTRL 0x011c
+#define VI6_DL_EXT_CTRL(n) (0x011c + (n) * 36)
#define VI6_DL_EXT_CTRL_NWE (1 << 16)
#define VI6_DL_EXT_CTRL_POLINT_MASK (0x3f << 8)
#define VI6_DL_EXT_CTRL_POLINT_SHIFT 8
@@ -80,6 +81,8 @@
#define VI6_DL_EXT_CTRL_EXPRI (1 << 4)
#define VI6_DL_EXT_CTRL_EXT (1 << 0)
+#define VI6_DL_EXT_AUTOFLD_INT BIT(0)
+
#define VI6_DL_BODY_SIZE 0x0120
#define VI6_DL_BODY_SIZE_UPD (1 << 24)
#define VI6_DL_BODY_SIZE_BS_MASK (0x1ffff << 0)
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 69e5fe6e6b50..f8005b60b9d2 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -20,6 +20,18 @@
#define RPF_MAX_WIDTH 8190
#define RPF_MAX_HEIGHT 8190
+/* Pre extended display list command data structure. */
+struct vsp1_extcmd_auto_fld_body {
+ u32 top_y0;
+ u32 bottom_y0;
+ u32 top_c0;
+ u32 bottom_c0;
+ u32 top_c1;
+ u32 bottom_c1;
+ u32 reserved0;
+ u32 reserved1;
+} __packed;
+
/* -----------------------------------------------------------------------------
* Device Access
*/
@@ -64,6 +76,14 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
pstride |= format->plane_fmt[1].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+ /*
+ * pstride has both STRIDE_Y and STRIDE_C, but multiplying the whole
+ * of pstride by 2 is conveniently OK here as we are multiplying both
+ * values.
+ */
+ if (pipe->interlaced)
+ pstride *= 2;
+
vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_PSTRIDE, pstride);
/* Format */
@@ -100,6 +120,9 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
top = compose->top;
}
+ if (pipe->interlaced)
+ top /= 2;
+
vsp1_rpf_write(rpf, dlb, VI6_RPF_LOC,
(left << VI6_RPF_LOC_HCOORD_SHIFT) |
(top << VI6_RPF_LOC_VCOORD_SHIFT));
@@ -169,6 +192,36 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
}
+static void vsp1_rpf_configure_autofld(struct vsp1_rwpf *rpf,
+ struct vsp1_dl_list *dl)
+{
+ const struct v4l2_pix_format_mplane *format = &rpf->format;
+ struct vsp1_dl_ext_cmd *cmd;
+ struct vsp1_extcmd_auto_fld_body *auto_fld;
+ u32 offset_y, offset_c;
+
+ cmd = vsp1_dl_get_pre_cmd(dl);
+ if (WARN_ONCE(!cmd, "Failed to obtain an autofld cmd"))
+ return;
+
+ /* Re-index our auto_fld to match the current RPF. */
+ auto_fld = cmd->data;
+ auto_fld = &auto_fld[rpf->entity.index];
+
+ auto_fld->top_y0 = rpf->mem.addr[0];
+ auto_fld->top_c0 = rpf->mem.addr[1];
+ auto_fld->top_c1 = rpf->mem.addr[2];
+
+ offset_y = format->plane_fmt[0].bytesperline;
+ offset_c = format->plane_fmt[1].bytesperline;
+
+ auto_fld->bottom_y0 = rpf->mem.addr[0] + offset_y;
+ auto_fld->bottom_c0 = rpf->mem.addr[1] + offset_c;
+ auto_fld->bottom_c1 = rpf->mem.addr[2] + offset_c;
+
+ cmd->flags |= VI6_DL_EXT_AUTOFLD_INT | BIT(16 + rpf->entity.index);
+}
+
static void rpf_configure_frame(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl,
@@ -221,6 +274,11 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
crop.left += pipe->partition->rpf.left;
}
+ if (pipe->interlaced) {
+ crop.height = round_down(crop.height / 2, fmtinfo->vsub);
+ crop.top = round_down(crop.top / 2, fmtinfo->vsub);
+ }
+
vsp1_rpf_write(rpf, dlb, VI6_RPF_SRC_BSIZE,
(crop.width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
(crop.height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
@@ -249,9 +307,17 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
fmtinfo->swap_uv)
swap(mem.addr[1], mem.addr[2]);
- vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_Y, mem.addr[0]);
- vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C0, mem.addr[1]);
- vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C1, mem.addr[2]);
+ /*
+ * Interlaced pipelines will use the extended pre-cmd to process
+ * SRCM_ADDR_{Y,C0,C1}
+ */
+ if (pipe->interlaced) {
+ vsp1_rpf_configure_autofld(rpf, dl);
+ } else {
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_Y, mem.addr[0]);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C0, mem.addr[1]);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C1, mem.addr[2]);
+ }
}
static void rpf_partition(struct vsp1_entity *entity,
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 23c8f706b3f2..c2a1a7f97e26 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -141,13 +141,13 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
if (wpf->entity.index != 0) {
/* Only WPF0 supports flipping. */
num_flip_ctrls = 0;
- } else if (vsp1->info->features & VSP1_HAS_WPF_HFLIP) {
+ } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP)) {
/*
* When horizontal flip is supported the WPF implements three
* controls (horizontal flip, vertical flip and rotation).
*/
num_flip_ctrls = 3;
- } else if (vsp1->info->features & VSP1_HAS_WPF_VFLIP) {
+ } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_VFLIP)) {
/*
* When only vertical flip is supported the WPF implements a
* single control (vertical flip).
@@ -276,7 +276,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
vsp1_wpf_write(wpf, dlb, VI6_WPF_DSWAP, fmtinfo->swap);
- if (vsp1->info->features & VSP1_HAS_WPF_HFLIP &&
+ if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP) &&
wpf->entity.index == 0)
vsp1_wpf_write(wpf, dlb, VI6_WPF_ROT_CTRL,
VI6_WPF_ROT_CTRL_LN16 |
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 8f9f8dfc3497..11aa94f189cb 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1096,7 +1096,7 @@ static __poll_t wl1273_fm_fops_poll(struct file *file,
struct wl1273_core *core = radio->core;
if (radio->owner && radio->owner != file)
- return -EBUSY;
+ return EPOLLERR;
radio->owner = file;
diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c
index 05c66701a899..1ebbf0217142 100644
--- a/drivers/media/radio/si4713/radio-usb-si4713.c
+++ b/drivers/media/radio/si4713/radio-usb-si4713.c
@@ -370,9 +370,6 @@ static int si4713_transfer(struct i2c_adapter *i2c_adapter,
int retval = -EINVAL;
int i;
- if (num <= 0)
- return 0;
-
for (i = 0; i < num; i++) {
if (msgs[i].flags & I2C_M_RD)
retval = si4713_i2c_read(radio, msgs[i].buf, msgs[i].len);
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 81b150e5dfdb..8b97fd1f0cea 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -196,14 +196,16 @@ void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
*/
void lirc_bpf_free(struct rc_dev *rcdev)
{
- struct bpf_prog **progs;
+ struct bpf_prog_array_item *item;
if (!rcdev->raw->progs)
return;
- progs = rcu_dereference(rcdev->raw->progs)->progs;
- while (*progs)
- bpf_prog_put(*progs++);
+ item = rcu_dereference(rcdev->raw->progs)->items;
+ while (item->prog) {
+ bpf_prog_put(item->prog);
+ item++;
+ }
bpf_prog_array_free(rcdev->raw->progs);
}
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index b5b9d87ba75c..fbec1a13dc6a 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -610,9 +610,9 @@ static int e4000_dvb_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops e4000_dvb_tuner_ops = {
.info = {
- .name = "Elonics E4000",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
+ .name = "Elonics E4000",
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
},
.init = e4000_dvb_init,
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index 145407dee3db..a983899c6b0b 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -472,10 +472,10 @@ static int fc0011_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
static const struct dvb_tuner_ops fc0011_tuner_ops = {
.info = {
- .name = "Fitipower FC0011",
+ .name = "Fitipower FC0011",
- .frequency_min = 45000000,
- .frequency_max = 1000000000,
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 1000 * MHz,
},
.release = fc0011_release,
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index 625ac6f51c39..e992b98ae5bc 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -415,11 +415,10 @@ exit:
static const struct dvb_tuner_ops fc0012_tuner_ops = {
.info = {
- .name = "Fitipower FC0012",
+ .name = "Fitipower FC0012",
- .frequency_min = 37000000, /* estimate */
- .frequency_max = 862000000, /* estimate */
- .frequency_step = 0,
+ .frequency_min_hz = 37 * MHz, /* estimate */
+ .frequency_max_hz = 862 * MHz, /* estimate */
},
.release = fc0012_release,
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index e606118d1a9b..fc62afb1450d 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -574,11 +574,10 @@ exit:
static const struct dvb_tuner_ops fc0013_tuner_ops = {
.info = {
- .name = "Fitipower FC0013",
+ .name = "Fitipower FC0013",
- .frequency_min = 37000000, /* estimate */
- .frequency_max = 1680000000, /* CHECK */
- .frequency_step = 0,
+ .frequency_min_hz = 37 * MHz, /* estimate */
+ .frequency_max_hz = 1680 * MHz, /* CHECK */
},
.release = fc0013_release,
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 743184ae0d26..db26892aac84 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -355,9 +355,9 @@ static int fc2580_dvb_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops fc2580_dvb_tuner_ops = {
.info = {
- .name = "FCI FC2580",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
+ .name = "FCI FC2580",
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
},
.init = fc2580_dvb_init,
diff --git a/drivers/media/tuners/it913x.c b/drivers/media/tuners/it913x.c
index 27e5bc1c3cb5..b5eb39921e95 100644
--- a/drivers/media/tuners/it913x.c
+++ b/drivers/media/tuners/it913x.c
@@ -375,9 +375,9 @@ err:
static const struct dvb_tuner_ops it913x_tuner_ops = {
.info = {
- .name = "ITE IT913X",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
+ .name = "ITE IT913X",
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
},
.init = it913x_init,
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index 9f3e0fd4cad9..3df2f23a40be 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -569,9 +569,9 @@ err:
static const struct dvb_tuner_ops m88rs6000t_tuner_ops = {
.info = {
- .name = "Montage M88RS6000 Internal Tuner",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .name = "Montage M88RS6000 Internal Tuner",
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.init = m88rs6000t_init,
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index 20ceb72e530b..721d8f722efb 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -377,10 +377,10 @@ static void max2165_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops max2165_tuner_ops = {
.info = {
- .name = "Maxim MAX2165",
- .frequency_min = 470000000,
- .frequency_max = 862000000,
- .frequency_step = 50000,
+ .name = "Maxim MAX2165",
+ .frequency_min_hz = 470 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = max2165_release,
diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
index 403c6b2aa53b..2023e081d9ad 100644
--- a/drivers/media/tuners/mc44s803.c
+++ b/drivers/media/tuners/mc44s803.c
@@ -300,10 +300,10 @@ static int mc44s803_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops mc44s803_tuner_ops = {
.info = {
- .name = "Freescale MC44S803",
- .frequency_min = 48000000,
- .frequency_max = 1000000000,
- .frequency_step = 100000,
+ .name = "Freescale MC44S803",
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 1000 * MHz,
+ .frequency_step_hz = 100 * kHz,
},
.release = mc44s803_release,
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 3d3c6815b6a7..4ace77cfe285 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -395,10 +395,10 @@ static void mt2060_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops mt2060_tuner_ops = {
.info = {
- .name = "Microtune MT2060",
- .frequency_min = 48000000,
- .frequency_max = 860000000,
- .frequency_step = 50000,
+ .name = "Microtune MT2060",
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = mt2060_release,
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 80dc3e241b4a..f4c8a7293ebb 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -2200,10 +2200,9 @@ static int mt2063_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
static const struct dvb_tuner_ops mt2063_ops = {
.info = {
.name = "MT2063 Silicon Tuner",
- .frequency_min = 45000000,
- .frequency_max = 865000000,
- .frequency_step = 0,
- },
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 865 * MHz,
+ },
.init = mt2063_init,
.sleep = MT2063_Sleep,
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index 659bf19dc434..086a7b7cf634 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -235,10 +235,10 @@ static void mt2131_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops mt2131_tuner_ops = {
.info = {
- .name = "Microtune MT2131",
- .frequency_min = 48000000,
- .frequency_max = 860000000,
- .frequency_step = 50000,
+ .name = "Microtune MT2131",
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = mt2131_release,
diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
index f4545b7f5da2..e6cc78720de4 100644
--- a/drivers/media/tuners/mt2266.c
+++ b/drivers/media/tuners/mt2266.c
@@ -304,10 +304,10 @@ static void mt2266_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops mt2266_tuner_ops = {
.info = {
- .name = "Microtune MT2266",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_step = 50000,
+ .name = "Microtune MT2266",
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = mt2266_release,
.init = mt2266_init,
diff --git a/drivers/media/tuners/mxl301rf.c b/drivers/media/tuners/mxl301rf.c
index 57b0e4862aaf..c628435a1b06 100644
--- a/drivers/media/tuners/mxl301rf.c
+++ b/drivers/media/tuners/mxl301rf.c
@@ -271,8 +271,8 @@ static const struct dvb_tuner_ops mxl301rf_ops = {
.info = {
.name = "MaxLinear MxL301RF",
- .frequency_min = 93000000,
- .frequency_max = 803142857,
+ .frequency_min_hz = 93 * MHz,
+ .frequency_max_hz = 803 * MHz + 142857,
},
.init = mxl301rf_init,
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 355ef2959b7d..ec584316c812 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -4075,10 +4075,10 @@ static void mxl5005s_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops mxl5005s_tuner_ops = {
.info = {
- .name = "MaxLinear MXL5005S",
- .frequency_min = 48000000,
- .frequency_max = 860000000,
- .frequency_step = 50000,
+ .name = "MaxLinear MXL5005S",
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = mxl5005s_release,
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 4081fd97c3b2..54d9226aaff7 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -59,8 +59,6 @@ MODULE_PARM_DESC(debug, "set debug level");
/* ------------------------------------------------------------------------- */
-#define MHz 1000000
-
enum mxl5007t_mode {
MxL_MODE_ISDBT = 0,
MxL_MODE_DVBT = 1,
diff --git a/drivers/media/tuners/qm1d1b0004.c b/drivers/media/tuners/qm1d1b0004.c
index b4495cc1626b..008ad870c00f 100644
--- a/drivers/media/tuners/qm1d1b0004.c
+++ b/drivers/media/tuners/qm1d1b0004.c
@@ -186,8 +186,8 @@ static const struct dvb_tuner_ops qm1d1b0004_ops = {
.info = {
.name = "Sharp qm1d1b0004",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.init = qm1d1b0004_init,
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 642a065b9a07..83ca5dc047ea 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -388,8 +388,8 @@ static const struct dvb_tuner_ops qm1d1c0042_ops = {
.info = {
.name = "Sharp QM1D1C0042",
- .frequency_min = 950000,
- .frequency_max = 2150000,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
},
.init = qm1d1c0042_init,
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index b92be882ab3c..4565c06b1617 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -394,10 +394,10 @@ static int qt1010_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops qt1010_tuner_ops = {
.info = {
- .name = "Quantek QT1010",
- .frequency_min = QT1010_MIN_FREQ,
- .frequency_max = QT1010_MAX_FREQ,
- .frequency_step = QT1010_STEP,
+ .name = "Quantek QT1010",
+ .frequency_min_hz = QT1010_MIN_FREQ,
+ .frequency_max_hz = QT1010_MAX_FREQ,
+ .frequency_step_hz = QT1010_STEP,
},
.release = qt1010_release,
diff --git a/drivers/media/tuners/qt1010_priv.h b/drivers/media/tuners/qt1010_priv.h
index 4cb78ecc8985..f25324c63067 100644
--- a/drivers/media/tuners/qt1010_priv.h
+++ b/drivers/media/tuners/qt1010_priv.h
@@ -71,12 +71,14 @@ reg def meaning
2f 00 ? not used?
*/
-#define QT1010_STEP 125000 /* 125 kHz used by Windows drivers,
- hw could be more precise but we don't
- know how to use */
-#define QT1010_MIN_FREQ 48000000 /* 48 MHz */
-#define QT1010_MAX_FREQ 860000000 /* 860 MHz */
-#define QT1010_OFFSET 1246000000 /* 1246 MHz */
+#define QT1010_STEP (125 * kHz) /*
+ * used by Windows drivers,
+ * hw could be more precise but we don't
+ * know how to use
+ */
+#define QT1010_MIN_FREQ (48 * MHz)
+#define QT1010_MAX_FREQ (860 * MHz)
+#define QT1010_OFFSET (1246 * MHz)
#define QT1010_WR 0
#define QT1010_RD 1
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 3e14b9e2e763..ba4be08a8551 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -2297,9 +2297,9 @@ static void r820t_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops r820t_tuner_ops = {
.info = {
- .name = "Rafael Micro R820T",
- .frequency_min = 42000000,
- .frequency_max = 1002000000,
+ .name = "Rafael Micro R820T",
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 1002 * MHz,
},
.init = r820t_init,
.release = r820t_release,
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 9e34d31d724d..a08d8fe2bb1b 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -387,9 +387,9 @@ static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops si2157_ops = {
.info = {
- .name = "Silicon Labs Si2141/Si2146/2147/2148/2157/2158",
- .frequency_min = 42000000,
- .frequency_max = 870000000,
+ .name = "Silicon Labs Si2141/Si2146/2147/2148/2157/2158",
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 870 * MHz,
},
.init = si2157_init,
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index 7b8068354fea..8326106ec2e3 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -175,11 +175,11 @@ static int tda18212_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops tda18212_tuner_ops = {
.info = {
- .name = "NXP TDA18212",
+ .name = "NXP TDA18212",
- .frequency_min = 48000000,
- .frequency_max = 864000000,
- .frequency_step = 1000,
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.set_params = tda18212_set_params,
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index c56fcf5d48e3..cbbd4d5e15da 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -269,11 +269,11 @@ static void tda18218_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops tda18218_tuner_ops = {
.info = {
- .name = "NXP TDA18218",
+ .name = "NXP TDA18218",
- .frequency_min = 174000000,
- .frequency_max = 864000000,
- .frequency_step = 1000,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_step_hz = 1 * kHz,
},
.release = tda18218_release,
diff --git a/drivers/media/tuners/tda18250.c b/drivers/media/tuners/tda18250.c
index 20d12b063380..20d10ef45ab6 100644
--- a/drivers/media/tuners/tda18250.c
+++ b/drivers/media/tuners/tda18250.c
@@ -740,9 +740,9 @@ static int tda18250_sleep(struct dvb_frontend *fe)
static const struct dvb_tuner_ops tda18250_ops = {
.info = {
- .name = "NXP TDA18250",
- .frequency_min = 42000000,
- .frequency_max = 870000000,
+ .name = "NXP TDA18250",
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 870 * MHz,
},
.init = tda18250_init,
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 147155553648..4d69029229e4 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -1240,9 +1240,9 @@ static int tda18271_set_config(struct dvb_frontend *fe, void *priv_cfg)
static const struct dvb_tuner_ops tda18271_tuner_ops = {
.info = {
.name = "NXP TDA18271HD",
- .frequency_min = 45000000,
- .frequency_max = 864000000,
- .frequency_step = 62500
+ .frequency_min_hz = 45 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_step_hz = 62500
},
.init = tda18271_init,
.sleep = tda18271_sleep,
diff --git a/drivers/media/tuners/tda827x.c b/drivers/media/tuners/tda827x.c
index 8400808f8f7f..4391dabba510 100644
--- a/drivers/media/tuners/tda827x.c
+++ b/drivers/media/tuners/tda827x.c
@@ -816,9 +816,9 @@ static int tda827x_initial_sleep(struct dvb_frontend *fe)
static const struct dvb_tuner_ops tda827xo_tuner_ops = {
.info = {
.name = "Philips TDA827X",
- .frequency_min = 55000000,
- .frequency_max = 860000000,
- .frequency_step = 250000
+ .frequency_min_hz = 55 * MHz,
+ .frequency_max_hz = 860 * MHz,
+ .frequency_step_hz = 250 * kHz
},
.release = tda827x_release,
.init = tda827x_initial_init,
@@ -832,9 +832,9 @@ static const struct dvb_tuner_ops tda827xo_tuner_ops = {
static const struct dvb_tuner_ops tda827xa_tuner_ops = {
.info = {
.name = "Philips TDA827XA",
- .frequency_min = 44000000,
- .frequency_max = 906000000,
- .frequency_step = 62500
+ .frequency_min_hz = 44 * MHz,
+ .frequency_max_hz = 906 * MHz,
+ .frequency_step_hz = 62500
},
.release = tda827x_release,
.init = tda827x_init,
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index 9d70378fe2d3..5c89a130b47d 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -164,9 +164,9 @@ static int tua9001_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops tua9001_tuner_ops = {
.info = {
- .name = "Infineon TUA9001",
- .frequency_min = 170000000,
- .frequency_max = 862000000,
+ .name = "Infineon TUA9001",
+ .frequency_min_hz = 170 * MHz,
+ .frequency_max_hz = 862 * MHz,
},
.init = tua9001_init,
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 36b88f820239..29c1473f2e9f 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -670,6 +670,7 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
int rc, j;
struct tuner_params *t_params;
unsigned int freq = params->frequency;
+ bool mono = params->audmode == V4L2_TUNER_MODE_MONO;
tun = priv->tun;
@@ -736,8 +737,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
config |= TDA9887_PORT2_ACTIVE;
if (t_params->intercarrier_mode)
config |= TDA9887_INTERCARRIER;
-/* if (t_params->port1_set_for_fm_mono)
- config &= ~TDA9887_PORT1_ACTIVE;*/
+ if (t_params->port1_set_for_fm_mono && mono)
+ config &= ~TDA9887_PORT1_ACTIVE;
if (t_params->fm_gain_normal)
config |= TDA9887_GAIN_NORMAL;
if (t_params->radio_if == 2)
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 84744e138982..aa6861dcd3fd 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -376,9 +376,8 @@ static int load_all_firmwares(struct dvb_frontend *fe,
tuner_err("Firmware type ");
dump_firm_type(type);
printk(KERN_CONT
- "(%x), id %llx is corrupted (size=%d, expected %d)\n",
- type, (unsigned long long)id,
- (unsigned)(endp - p), size);
+ "(%x), id %llx is corrupted (size=%zd, expected %d)\n",
+ type, (unsigned long long)id, (endp - p), size);
goto corrupt;
}
@@ -616,8 +615,8 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
}
if ((size + p > endp)) {
- tuner_err("missing bytes: need %d, have %d\n",
- size, (int)(endp - p));
+ tuner_err("missing bytes: need %d, have %zd\n",
+ size, (endp - p));
return -EINVAL;
}
@@ -1440,9 +1439,9 @@ unlock:
static const struct dvb_tuner_ops xc2028_dvb_tuner_ops = {
.info = {
.name = "Xceive XC3028",
- .frequency_min = 42000000,
- .frequency_max = 864000000,
- .frequency_step = 50000,
+ .frequency_min_hz = 42 * MHz,
+ .frequency_max_hz = 864 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.set_config = xc2028_set_config,
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index f0fa8da08afa..eb6d65dae748 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -398,8 +398,8 @@ static int xc_set_rf_frequency(struct xc4000_priv *priv, u32 freq_hz)
dprintk(1, "%s(%u)\n", __func__, freq_hz);
- if ((freq_hz > xc4000_tuner_ops.info.frequency_max) ||
- (freq_hz < xc4000_tuner_ops.info.frequency_min))
+ if ((freq_hz > xc4000_tuner_ops.info.frequency_max_hz) ||
+ (freq_hz < xc4000_tuner_ops.info.frequency_min_hz))
return -EINVAL;
freq_code = (u16)(freq_hz / 15625);
@@ -815,9 +815,9 @@ static int xc4000_fwupload(struct dvb_frontend *fe)
p += sizeof(size);
if (!size || size > endp - p) {
- printk(KERN_ERR "Firmware type (%x), id %llx is corrupted (size=%d, expected %d)\n",
+ printk(KERN_ERR "Firmware type (%x), id %llx is corrupted (size=%zd, expected %d)\n",
type, (unsigned long long)id,
- (unsigned)(endp - p), size);
+ endp - p, size);
goto corrupt;
}
@@ -1635,10 +1635,10 @@ static void xc4000_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops xc4000_tuner_ops = {
.info = {
- .name = "Xceive XC4000",
- .frequency_min = 1000000,
- .frequency_max = 1023000000,
- .frequency_step = 50000,
+ .name = "Xceive XC4000",
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 1023 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = xc4000_release,
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index f7a8d05d1758..f6b65278e502 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -460,8 +460,8 @@ static int xc_set_rf_frequency(struct xc5000_priv *priv, u32 freq_hz)
dprintk(1, "%s(%u)\n", __func__, freq_hz);
- if ((freq_hz > xc5000_tuner_ops.info.frequency_max) ||
- (freq_hz < xc5000_tuner_ops.info.frequency_min))
+ if ((freq_hz > xc5000_tuner_ops.info.frequency_max_hz) ||
+ (freq_hz < xc5000_tuner_ops.info.frequency_min_hz))
return -EINVAL;
freq_code = (u16)(freq_hz / 15625);
@@ -1350,10 +1350,10 @@ static int xc5000_set_config(struct dvb_frontend *fe, void *priv_cfg)
static const struct dvb_tuner_ops xc5000_tuner_ops = {
.info = {
- .name = "Xceive XC5000",
- .frequency_min = 1000000,
- .frequency_max = 1023000000,
- .frequency_step = 50000,
+ .name = "Xceive XC5000",
+ .frequency_min_hz = 1 * MHz,
+ .frequency_max_hz = 1023 * MHz,
+ .frequency_step_hz = 50 * kHz,
},
.release = xc5000_release,
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 70e187971590..62b45062b1e6 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -133,7 +133,7 @@ static void au0828_irq_callback(struct urb *urb)
au0828_isocdbg("au0828_irq_callback called: status kill\n");
return;
default: /* unknown error */
- au0828_isocdbg("urb completition error %d.\n", urb->status);
+ au0828_isocdbg("urb completion error %d.\n", urb->status);
break;
}
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 0f13192634c7..9e5b3e7c3ef5 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -15,7 +15,7 @@ config VIDEO_CX231XX
config VIDEO_CX231XX_RC
bool "Conexant cx231xx Remote Controller additional support"
- depends on RC_CORE
+ depends on RC_CORE=y || RC_CORE=VIDEO_CX231XX
depends on VIDEO_CX231XX
default y
---help---
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index c4a84fb930b6..32ee7b3f21c9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -112,7 +112,7 @@ static void cx231xx_audio_isocirq(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- dev_dbg(dev->dev, "urb completition error %d.\n",
+ dev_dbg(dev->dev, "urb completion error %d.\n",
urb->status);
break;
}
@@ -126,6 +126,7 @@ static void cx231xx_audio_isocirq(struct urb *urb)
stride = runtime->frame_bits >> 3;
for (i = 0; i < urb->number_of_packets; i++) {
+ unsigned long flags;
int length = urb->iso_frame_desc[i].actual_length /
stride;
cp = (unsigned char *)urb->transfer_buffer +
@@ -148,7 +149,7 @@ static void cx231xx_audio_isocirq(struct urb *urb)
length * stride);
}
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_irqsave(substream, flags);
dev->adev.hwptr_done_capture += length;
if (dev->adev.hwptr_done_capture >=
@@ -163,7 +164,7 @@ static void cx231xx_audio_isocirq(struct urb *urb)
runtime->period_size;
period_elapsed = 1;
}
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
}
if (period_elapsed)
snd_pcm_period_elapsed(substream);
@@ -202,7 +203,7 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- dev_dbg(dev->dev, "urb completition error %d.\n",
+ dev_dbg(dev->dev, "urb completion error %d.\n",
urb->status);
break;
}
@@ -216,6 +217,7 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
stride = runtime->frame_bits >> 3;
if (1) {
+ unsigned long flags;
int length = urb->actual_length /
stride;
cp = (unsigned char *)urb->transfer_buffer;
@@ -234,7 +236,7 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
length * stride);
}
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_irqsave(substream, flags);
dev->adev.hwptr_done_capture += length;
if (dev->adev.hwptr_done_capture >=
@@ -249,7 +251,7 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
runtime->period_size;
period_elapsed = 1;
}
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
}
if (period_elapsed)
snd_pcm_period_elapsed(substream);
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 53d846dea3d2..493c2dca6244 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -799,6 +799,7 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
+ unsigned long flags;
int i;
switch (urb->status) {
@@ -815,9 +816,9 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
}
/* Copy data from URB */
- spin_lock(&dev->video_mode.slock);
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
dev->video_mode.isoc_ctl.isoc_copy(dev, urb);
- spin_unlock(&dev->video_mode.slock);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
@@ -844,6 +845,7 @@ static void cx231xx_bulk_irq_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
+ unsigned long flags;
switch (urb->status) {
case 0: /* success */
@@ -862,9 +864,9 @@ static void cx231xx_bulk_irq_callback(struct urb *urb)
}
/* Copy data from URB */
- spin_lock(&dev->video_mode.slock);
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
dev->video_mode.bulk_ctl.bulk_copy(dev, urb);
- spin_unlock(&dev->video_mode.slock);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
/* Reset urb buffers */
urb->status = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 6e1bef2a45bb..15a91169e749 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -376,8 +376,6 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
struct cx231xx *dev = bus->dev;
int addr, rc, i, byte;
- if (num <= 0)
- return 0;
mutex_lock(&dev->i2c_lock);
for (i = 0; i < num; i++) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index b621cf1aa96b..10b2eb7338ad 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -305,6 +305,7 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
+ unsigned long flags;
switch (urb->status) {
case 0: /* success */
@@ -316,14 +317,14 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
return;
default: /* error */
dev_err(dev->dev,
- "urb completition error %d.\n", urb->status);
+ "urb completion error %d.\n", urb->status);
break;
}
/* Copy data from URB */
- spin_lock(&dev->vbi_mode.slock);
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
- spin_unlock(&dev->vbi_mode.slock);
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
/* Reset status */
urb->status = 0;
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 082b8d67244b..df4412245a8a 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -96,10 +96,13 @@ config DVB_USB_GL861
tristate "Genesys Logic GL861 USB2.0 support"
depends on DVB_USB_V2
select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TC90522 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the MSI Megasky 580 (55801) DVB-T USB2.0
- receiver with USB ID 0db0:5581.
+ receiver with USB ID 0db0:5581, Friio White ISDB-T receiver
+ with USB ID 0x7a69:0001.
config DVB_USB_LME2510
tristate "LME DM04/QQBOX DVB-S USB2.0 support"
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index 9d154fdae45b..3338b21d8b25 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -6,10 +6,14 @@
*
* see Documentation/media/dvb-drivers/dvb-usb.rst for more information
*/
+#include <linux/string.h>
+
#include "gl861.h"
#include "zl10353.h"
#include "qt1010.h"
+#include "tc90522.h"
+#include "dvb-pll.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -26,10 +30,14 @@ static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
if (wo) {
req = GL861_REQ_I2C_WRITE;
type = GL861_WRITE;
+ buf = kmemdup(wbuf, wlen, GFP_KERNEL);
} else { /* rw */
req = GL861_REQ_I2C_READ;
type = GL861_READ;
+ buf = kmalloc(rlen, GFP_KERNEL);
}
+ if (!buf)
+ return -ENOMEM;
switch (wlen) {
case 1:
@@ -42,24 +50,19 @@ static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
default:
dev_err(&d->udev->dev, "%s: wlen=%d, aborting\n",
KBUILD_MODNAME, wlen);
+ kfree(buf);
return -EINVAL;
}
- buf = NULL;
- if (rlen > 0) {
- buf = kmalloc(rlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
+
usleep_range(1000, 2000); /* avoid I2C errors */
ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, type,
value, index, buf, rlen, 2000);
- if (rlen > 0) {
- if (ret > 0)
- memcpy(rbuf, buf, rlen);
- kfree(buf);
- }
+ if (!wo && ret > 0)
+ memcpy(rbuf, buf, rlen);
+
+ kfree(buf);
return ret;
}
@@ -162,11 +165,478 @@ static struct dvb_usb_device_properties gl861_props = {
}
};
+
+/*
+ * For Friio
+ */
+
+struct friio_priv {
+ struct i2c_adapter *demod_sub_i2c;
+ struct i2c_client *i2c_client_demod;
+ struct i2c_client *i2c_client_tuner;
+ struct i2c_adapter tuner_adap;
+};
+
+struct friio_config {
+ struct i2c_board_info demod_info;
+ struct tc90522_config demod_cfg;
+
+ struct i2c_board_info tuner_info;
+ struct dvb_pll_config tuner_cfg;
+};
+
+static const struct friio_config friio_config = {
+ .demod_info = { I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x18), },
+ .tuner_info = { I2C_BOARD_INFO("tua6034_friio", 0x60), },
+};
+
+/* For another type of I2C:
+ * message sent by a USB control-read/write transaction with data stage.
+ * Used in init/config of Friio.
+ */
+static int
+gl861_i2c_write_ex(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(wlen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, wbuf, wlen);
+ ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
+ GL861_REQ_I2C_RAW, GL861_WRITE,
+ addr << (8 + 1), 0x0100, buf, wlen, 2000);
+ kfree(buf);
+ return ret;
+}
+
+static int
+gl861_i2c_read_ex(struct dvb_usb_device *d, u8 addr, u8 *rbuf, u16 rlen)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(rlen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
+ GL861_REQ_I2C_READ, GL861_READ,
+ addr << (8 + 1), 0x0100, buf, rlen, 2000);
+ if (ret > 0 && rlen > 0)
+ memcpy(buf, rbuf, rlen);
+ kfree(buf);
+ return ret;
+}
+
+/* For I2C transactions to the tuner of Friio (dvb_pll).
+ *
+ * Friio uses irregular USB encapsulation for tuner i2c transactions:
+ * write transacions are encapsulated with a different USB 'request' value.
+ *
+ * Although all transactions are sent via the demod(tc90522)
+ * and the demod provides an i2c adapter for them, it cannot be used in Friio
+ * since it assumes using the same parent adapter with the demod,
+ * which does not use the request value and uses same one for both read/write.
+ * So we define a dedicated i2c adapter here.
+ */
+
+static int
+friio_i2c_tuner_read(struct dvb_usb_device *d, struct i2c_msg *msg)
+{
+ struct friio_priv *priv;
+ u8 addr;
+
+ priv = d_to_priv(d);
+ addr = priv->i2c_client_demod->addr;
+ return gl861_i2c_read_ex(d, addr, msg->buf, msg->len);
+}
+
+static int
+friio_i2c_tuner_write(struct dvb_usb_device *d, struct i2c_msg *msg)
+{
+ u8 *buf;
+ int ret;
+ struct friio_priv *priv;
+
+ priv = d_to_priv(d);
+
+ if (msg->len < 1)
+ return -EINVAL;
+
+ buf = kmalloc(msg->len + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ buf[0] = msg->addr << 1;
+ memcpy(buf + 1, msg->buf, msg->len);
+
+ ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
+ GL861_REQ_I2C_RAW, GL861_WRITE,
+ priv->i2c_client_demod->addr << (8 + 1),
+ 0xFE, buf, msg->len + 1, 2000);
+ kfree(buf);
+ return ret;
+}
+
+static int friio_tuner_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int i;
+
+ if (num > 2)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ for (i = 0; i < num; i++) {
+ int ret;
+
+ if (msg[i].flags & I2C_M_RD)
+ ret = friio_i2c_tuner_read(d, &msg[i]);
+ else
+ ret = friio_i2c_tuner_write(d, &msg[i]);
+
+ if (ret < 0)
+ break;
+
+ usleep_range(1000, 2000); /* avoid I2C errors */
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+ return i;
+}
+
+static struct i2c_algorithm friio_tuner_i2c_algo = {
+ .master_xfer = friio_tuner_i2c_xfer,
+ .functionality = gl861_i2c_func,
+};
+
+/* GPIO control in Friio */
+
+#define FRIIO_CTL_LNB (1 << 0)
+#define FRIIO_CTL_STROBE (1 << 1)
+#define FRIIO_CTL_CLK (1 << 2)
+#define FRIIO_CTL_LED (1 << 3)
+
+#define FRIIO_LED_RUNNING 0x6400ff64
+#define FRIIO_LED_STOPPED 0x96ff00ff
+
+/* control PIC16F676 attached to Friio */
+static int friio_ext_ctl(struct dvb_usb_device *d,
+ u32 sat_color, int power_on)
+{
+ int i, ret;
+ struct i2c_msg msg;
+ u8 *buf;
+ u32 mask;
+ u8 power = (power_on) ? FRIIO_CTL_LNB : 0;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ msg.addr = 0x00;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = buf;
+ buf[0] = 0x00;
+
+ /* send 2bit header (&B10) */
+ buf[1] = power | FRIIO_CTL_LED | FRIIO_CTL_STROBE;
+ ret = i2c_transfer(&d->i2c_adap, &msg, 1);
+ buf[1] |= FRIIO_CTL_CLK;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+
+ buf[1] = power | FRIIO_CTL_STROBE;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+ buf[1] |= FRIIO_CTL_CLK;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+
+ /* send 32bit(satur, R, G, B) data in serial */
+ mask = 1 << 31;
+ for (i = 0; i < 32; i++) {
+ buf[1] = power | FRIIO_CTL_STROBE;
+ if (sat_color & mask)
+ buf[1] |= FRIIO_CTL_LED;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+ buf[1] |= FRIIO_CTL_CLK;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+ mask >>= 1;
+ }
+
+ /* set the strobe off */
+ buf[1] = power;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+ buf[1] |= FRIIO_CTL_CLK;
+ ret += i2c_transfer(&d->i2c_adap, &msg, 1);
+
+ kfree(buf);
+ return (ret == 70) ? 0 : -EREMOTEIO;
+}
+
+/* init/config of gl861 for Friio */
+/* NOTE:
+ * This function cannot be moved to friio_init()/dvb_usbv2_init(),
+ * because the init defined here must be done before any activities like I2C,
+ * but friio_init() is called by dvb-usbv2 after {_frontend, _tuner}_attach(),
+ * where I2C communication is used.
+ * Thus this function is set to be called from _power_ctl().
+ *
+ * Since it will be called on the early init stage
+ * where the i2c adapter is not initialized yet,
+ * we cannot use i2c_transfer() here.
+ */
+static int friio_reset(struct dvb_usb_device *d)
+{
+ int i, ret;
+ u8 wbuf[2], rbuf[2];
+
+ static const u8 friio_init_cmds[][2] = {
+ {0x33, 0x08}, {0x37, 0x40}, {0x3a, 0x1f}, {0x3b, 0xff},
+ {0x3c, 0x1f}, {0x3d, 0xff}, {0x38, 0x00}, {0x35, 0x00},
+ {0x39, 0x00}, {0x36, 0x00},
+ };
+
+ ret = usb_set_interface(d->udev, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ wbuf[0] = 0x11;
+ wbuf[1] = 0x02;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+ usleep_range(2000, 3000);
+
+ wbuf[0] = 0x11;
+ wbuf[1] = 0x00;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Check if the dev is really a Friio White, since it might be
+ * another device, Friio Black, with the same VID/PID.
+ */
+
+ usleep_range(1000, 2000);
+ wbuf[0] = 0x03;
+ wbuf[1] = 0x80;
+ ret = gl861_i2c_write_ex(d, 0x09, wbuf, 2);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(2000, 3000);
+ ret = gl861_i2c_read_ex(d, 0x09, rbuf, 2);
+ if (ret < 0)
+ return ret;
+ if (rbuf[0] != 0xff || rbuf[1] != 0xff)
+ return -ENODEV;
+
+
+ usleep_range(1000, 2000);
+ ret = gl861_i2c_write_ex(d, 0x48, wbuf, 2);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(2000, 3000);
+ ret = gl861_i2c_read_ex(d, 0x48, rbuf, 2);
+ if (ret < 0)
+ return ret;
+ if (rbuf[0] != 0xff || rbuf[1] != 0xff)
+ return -ENODEV;
+
+ wbuf[0] = 0x30;
+ wbuf[1] = 0x04;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ wbuf[0] = 0x00;
+ wbuf[1] = 0x01;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ wbuf[0] = 0x06;
+ wbuf[1] = 0x0f;
+ ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(friio_init_cmds); i++) {
+ ret = gl861_i2c_msg(d, 0x00, (u8 *)friio_init_cmds[i], 2,
+ NULL, 0);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * DVB callbacks for Friio
+ */
+
+static int friio_power_ctrl(struct dvb_usb_device *d, int onoff)
+{
+ return onoff ? friio_reset(d) : 0;
+}
+
+static int friio_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ const struct i2c_board_info *info;
+ struct dvb_usb_device *d;
+ struct tc90522_config cfg;
+ struct i2c_client *cl;
+ struct friio_priv *priv;
+
+ info = &friio_config.demod_info;
+ d = adap_to_d(adap);
+ cl = dvb_module_probe("tc90522", info->type,
+ &d->i2c_adap, info->addr, &cfg);
+ if (!cl)
+ return -ENODEV;
+ adap->fe[0] = cfg.fe;
+
+ /* ignore cfg.tuner_i2c and create new one */
+ priv = adap_to_priv(adap);
+ priv->i2c_client_demod = cl;
+ priv->tuner_adap.algo = &friio_tuner_i2c_algo;
+ priv->tuner_adap.dev.parent = &d->udev->dev;
+ strlcpy(priv->tuner_adap.name, d->name, sizeof(priv->tuner_adap.name));
+ strlcat(priv->tuner_adap.name, "-tuner", sizeof(priv->tuner_adap.name));
+ priv->demod_sub_i2c = &priv->tuner_adap;
+ i2c_set_adapdata(&priv->tuner_adap, d);
+
+ return i2c_add_adapter(&priv->tuner_adap);
+}
+
+static int friio_frontend_detach(struct dvb_usb_adapter *adap)
+{
+ struct friio_priv *priv;
+
+ priv = adap_to_priv(adap);
+ i2c_del_adapter(&priv->tuner_adap);
+ dvb_module_release(priv->i2c_client_demod);
+ return 0;
+}
+
+static int friio_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ const struct i2c_board_info *info;
+ struct dvb_pll_config cfg;
+ struct i2c_client *cl;
+ struct friio_priv *priv;
+
+ priv = adap_to_priv(adap);
+ info = &friio_config.tuner_info;
+ cfg = friio_config.tuner_cfg;
+ cfg.fe = adap->fe[0];
+
+ cl = dvb_module_probe("dvb_pll", info->type,
+ priv->demod_sub_i2c, info->addr, &cfg);
+ if (!cl)
+ return -ENODEV;
+ priv->i2c_client_tuner = cl;
+ return 0;
+}
+
+static int friio_tuner_detach(struct dvb_usb_adapter *adap)
+{
+ struct friio_priv *priv;
+
+ priv = adap_to_priv(adap);
+ dvb_module_release(priv->i2c_client_tuner);
+ return 0;
+}
+
+static int friio_init(struct dvb_usb_device *d)
+{
+ int i;
+ int ret;
+ struct friio_priv *priv;
+
+ static const u8 demod_init[][2] = {
+ {0x01, 0x40}, {0x04, 0x38}, {0x05, 0x40}, {0x07, 0x40},
+ {0x0f, 0x4f}, {0x11, 0x21}, {0x12, 0x0b}, {0x13, 0x2f},
+ {0x14, 0x31}, {0x16, 0x02}, {0x21, 0xc4}, {0x22, 0x20},
+ {0x2c, 0x79}, {0x2d, 0x34}, {0x2f, 0x00}, {0x30, 0x28},
+ {0x31, 0x31}, {0x32, 0xdf}, {0x38, 0x01}, {0x39, 0x78},
+ {0x3b, 0x33}, {0x3c, 0x33}, {0x48, 0x90}, {0x51, 0x68},
+ {0x5e, 0x38}, {0x71, 0x00}, {0x72, 0x08}, {0x77, 0x00},
+ {0xc0, 0x21}, {0xc1, 0x10}, {0xe4, 0x1a}, {0xea, 0x1f},
+ {0x77, 0x00}, {0x71, 0x00}, {0x71, 0x00}, {0x76, 0x0c},
+ };
+
+ /* power on LNA? */
+ ret = friio_ext_ctl(d, FRIIO_LED_STOPPED, true);
+ if (ret < 0)
+ return ret;
+ msleep(20);
+
+ /* init/config demod */
+ priv = d_to_priv(d);
+ for (i = 0; i < ARRAY_SIZE(demod_init); i++) {
+ int ret;
+
+ ret = i2c_master_send(priv->i2c_client_demod, demod_init[i], 2);
+ if (ret < 0)
+ return ret;
+ }
+ msleep(100);
+ return 0;
+}
+
+static void friio_exit(struct dvb_usb_device *d)
+{
+ friio_ext_ctl(d, FRIIO_LED_STOPPED, false);
+}
+
+static int friio_streaming_ctrl(struct dvb_frontend *fe, int onoff)
+{
+ u32 led_color;
+
+ led_color = onoff ? FRIIO_LED_RUNNING : FRIIO_LED_STOPPED;
+ return friio_ext_ctl(fe_to_d(fe), led_color, true);
+}
+
+
+static struct dvb_usb_device_properties friio_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .adapter_nr = adapter_nr,
+
+ .size_of_priv = sizeof(struct friio_priv),
+
+ .i2c_algo = &gl861_i2c_algo,
+ .power_ctrl = friio_power_ctrl,
+ .frontend_attach = friio_frontend_attach,
+ .frontend_detach = friio_frontend_detach,
+ .tuner_attach = friio_tuner_attach,
+ .tuner_detach = friio_tuner_detach,
+ .init = friio_init,
+ .exit = friio_exit,
+ .streaming_ctrl = friio_streaming_ctrl,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .stream = DVB_USB_STREAM_BULK(0x01, 8, 16384),
+ }
+ }
+};
+
static const struct usb_device_id gl861_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580_55801,
&gl861_props, "MSI Mega Sky 55801 DVB-T USB2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_ALINK, USB_VID_ALINK_DTU,
&gl861_props, "A-LINK DTU DVB-T USB2.0", NULL) },
+ { DVB_USB_DEVICE(USB_VID_774, USB_PID_FRIIO_WHITE,
+ &friio_props, "774 Friio White ISDB-T USB2.0", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, gl861_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.h b/drivers/media/usb/dvb-usb-v2/gl861.h
index b651b857e034..02c00e10748a 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.h
+++ b/drivers/media/usb/dvb-usb-v2/gl861.h
@@ -9,5 +9,6 @@
#define GL861_REQ_I2C_WRITE 0x01
#define GL861_REQ_I2C_READ 0x02
+#define GL861_REQ_I2C_RAW 0x03
#endif
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index 221cf46b4140..9f74453799a2 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -554,9 +554,9 @@ static const struct dvb_frontend_ops mxl111sf_demod_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "MaxLinear MxL111SF DVB-T demodulator",
- .frequency_min = 177000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 166666,
+ .frequency_min_hz = 177 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 240d736bf1bb..92b3b9221a21 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -465,9 +465,9 @@ static const struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
.info = {
.name = "MaxLinear MxL111SF",
#if 0
- .frequency_min = ,
- .frequency_max = ,
- .frequency_step = ,
+ .frequency_min_hz = ,
+ .frequency_max_hz = ,
+ .frequency_step_hz = ,
#endif
},
#if 0
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c76e78f9638a..a970224a94bd 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1732,7 +1732,7 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
goto exit;
ret = rtl28xxu_rd_reg(d, IR_RX_BC, &buf[0]);
- if (ret)
+ if (ret || buf[0] > sizeof(buf))
goto err;
len = buf[0];
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index b0499f95ec45..024c751eb165 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -40,7 +40,7 @@ static void usb_urb_complete(struct urb *urb)
return;
default: /* error */
dev_dbg_ratelimited(&stream->udev->dev,
- "%s: urb completition failed=%d\n",
+ "%s: urb completion failed=%d\n",
__func__, urb->status);
break;
}
@@ -69,7 +69,7 @@ static void usb_urb_complete(struct urb *urb)
break;
default:
dev_err(&stream->udev->dev,
- "%s: unknown endpoint type in completition handler\n",
+ "%s: unknown endpoint type in completion handler\n",
KBUILD_MODNAME);
return;
}
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index b8a1c62a0682..513df955eaa3 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -312,12 +312,6 @@ config DVB_USB_DTV5100
help
Say Y here to support the AME DTV-5100 USB2.0 DVB-T receiver.
-config DVB_USB_FRIIO
- tristate "Friio ISDB-T USB2.0 Receiver support"
- depends on DVB_USB
- help
- Say Y here to support the Japanese DTV receiver Friio.
-
config DVB_USB_AZ6027
tristate "Azurewave DVB-S/S2 USB2.0 AZ6027 support"
depends on DVB_USB
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index 9ad2618408ef..407d90ca8be0 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -71,9 +71,6 @@ obj-$(CONFIG_DVB_USB_DTV5100) += dvb-usb-dtv5100.o
dvb-usb-cinergyT2-objs := cinergyT2-core.o cinergyT2-fe.o
obj-$(CONFIG_DVB_USB_CINERGY_T2) += dvb-usb-cinergyT2.o
-dvb-usb-friio-objs := friio.o friio-fe.o
-obj-$(CONFIG_DVB_USB_FRIIO) += dvb-usb-friio.o
-
dvb-usb-az6027-objs := az6027.o
obj-$(CONFIG_DVB_USB_AZ6027) += dvb-usb-az6027.o
diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c
index 7fbbc954da16..09cc3a21af65 100644
--- a/drivers/media/usb/dvb-usb/af9005-fe.c
+++ b/drivers/media/usb/dvb-usb/af9005-fe.c
@@ -1455,9 +1455,9 @@ static const struct dvb_frontend_ops af9005_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "AF9005 USB DVB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 250000,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index 5a2f81311fb7..df71df7ed524 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -295,9 +295,9 @@ static const struct dvb_frontend_ops cinergyt2_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = DRIVER_NAME,
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
+ .frequency_min_hz = 174 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 166667,
.caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2
| FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4
| FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index c53a969bc6be..091389fdf89e 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -1745,6 +1745,7 @@ static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
return -ENODEV;
} else {
+ /* FIXME: check if it is fe_adap[1] */
if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
return -ENODEV;
}
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index 7e75aae34fb8..1ca3a51b2ae3 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -230,9 +230,9 @@ static const struct dvb_frontend_ops dtt200u_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "WideView USB DVB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 250000,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 250 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 0d4fdd34a710..9ce8b4d79d1f 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -2101,14 +2101,12 @@ static struct dvb_usb_device_properties s6x0_properties = {
}
};
-static struct dvb_usb_device_properties *p1100;
static const struct dvb_usb_device_description d1100 = {
"Prof 1100 USB ",
{&dw2102_table[PROF_1100], NULL},
{NULL},
};
-static struct dvb_usb_device_properties *s660;
static const struct dvb_usb_device_description d660 = {
"TeVii S660 USB",
{&dw2102_table[TEVII_S660], NULL},
@@ -2127,14 +2125,12 @@ static const struct dvb_usb_device_description d480_2 = {
{NULL},
};
-static struct dvb_usb_device_properties *p7500;
static const struct dvb_usb_device_description d7500 = {
"Prof 7500 USB DVB-S2",
{&dw2102_table[PROF_7500], NULL},
{NULL},
};
-static struct dvb_usb_device_properties *s421;
static const struct dvb_usb_device_description d421 = {
"TeVii S421 PCI",
{&dw2102_table[TEVII_S421], NULL},
@@ -2334,6 +2330,11 @@ static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int retval = -ENOMEM;
+ struct dvb_usb_device_properties *p1100;
+ struct dvb_usb_device_properties *s660;
+ struct dvb_usb_device_properties *p7500;
+ struct dvb_usb_device_properties *s421;
+
p1100 = kmemdup(&s6x0_properties,
sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
if (!p1100)
@@ -2402,8 +2403,16 @@ static int dw2102_probe(struct usb_interface *intf,
0 == dvb_usb_device_init(intf, &t220_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
- THIS_MODULE, NULL, adapter_nr))
+ THIS_MODULE, NULL, adapter_nr)) {
+
+ /* clean up copied properties */
+ kfree(s421);
+ kfree(p7500);
+ kfree(s660);
+ kfree(p1100);
+
return 0;
+ }
retval = -ENODEV;
kfree(s421);
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 932f262452eb..e6bd0ed8d789 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -133,10 +133,10 @@ static int jdvbt90502_pll_set_freq(struct jdvbt90502_state *state, u32 freq)
u32 f;
deb_fe("%s: freq=%d, step=%d\n", __func__, freq,
- state->frontend.ops.info.frequency_stepsize);
+ state->frontend.ops.info.frequency_stepsize_hz);
/* freq -> oscilator frequency conversion. */
/* freq: 473,000,000 + n*6,000,000 [+ 142857 (center freq. shift)] */
- f = freq / state->frontend.ops.info.frequency_stepsize;
+ f = freq / state->frontend.ops.info.frequency_stepsize_hz;
/* add 399[1/7 MHZ] = 57MHz for the IF */
f += 399;
/* add center frequency shift if necessary */
@@ -413,10 +413,9 @@ static const struct dvb_frontend_ops jdvbt90502_ops = {
.delsys = { SYS_ISDBT },
.info = {
.name = "Comtech JDVBT90502 ISDB-T",
- .frequency_min = 473000000, /* UHF 13ch, center */
- .frequency_max = 767142857, /* UHF 62ch, center */
- .frequency_stepsize = JDVBT90502_PLL_CLK / JDVBT90502_PLL_DIVIDER,
- .frequency_tolerance = 0,
+ .frequency_min_hz = 473000000, /* UHF 13ch, center */
+ .frequency_max_hz = 767142857, /* UHF 62ch, center */
+ .frequency_stepsize_hz = JDVBT90502_PLL_CLK / JDVBT90502_PLL_DIVIDER,
/* NOTE: this driver ignores all parameters but frequency. */
.caps = FE_CAN_INVERSION_AUTO |
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index 51b026fa6bfb..22554d9abd43 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -255,9 +255,6 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
int i, j;
int ret = 0;
- if (!num)
- return -EINVAL;
-
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
diff --git a/drivers/media/usb/dvb-usb/usb-urb.c b/drivers/media/usb/dvb-usb/usb-urb.c
index 5e05963f4220..9771f0954c69 100644
--- a/drivers/media/usb/dvb-usb/usb-urb.c
+++ b/drivers/media/usb/dvb-usb/usb-urb.c
@@ -33,7 +33,7 @@ static void usb_urb_complete(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- deb_ts("urb completition error %d.\n", urb->status);
+ deb_ts("urb completion error %d.\n", urb->status);
break;
}
@@ -57,7 +57,7 @@ static void usb_urb_complete(struct urb *urb)
stream->complete(stream, b, urb->actual_length);
break;
default:
- err("unknown endpoint type in completition handler.");
+ err("unknown endpoint type in completion handler.");
return;
}
usb_submit_urb(urb,GFP_ATOMIC);
diff --git a/drivers/media/usb/dvb-usb/vp702x-fe.c b/drivers/media/usb/dvb-usb/vp702x-fe.c
index ae48146e005c..9eb811452f2e 100644
--- a/drivers/media/usb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/usb/dvb-usb/vp702x-fe.c
@@ -349,10 +349,9 @@ static const struct dvb_frontend_ops vp702x_fe_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Twinhan DST-like frontend (VP7021/VP7020) DVB-S",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 1000, /* kHz for QPSK frontends */
- .frequency_tolerance = 0,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 1 * MHz,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
diff --git a/drivers/media/usb/dvb-usb/vp7045-fe.c b/drivers/media/usb/dvb-usb/vp7045-fe.c
index f86040173b8d..1173ae29885b 100644
--- a/drivers/media/usb/dvb-usb/vp7045-fe.c
+++ b/drivers/media/usb/dvb-usb/vp7045-fe.c
@@ -162,9 +162,9 @@ static const struct dvb_frontend_ops vp7045_fe_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Twinhan VP7045/46 USB DVB-T",
- .frequency_min = 44250000,
- .frequency_max = 867250000,
- .frequency_stepsize = 1000,
+ .frequency_min_hz = 44250 * kHz,
+ .frequency_max_hz = 867250 * kHz,
+ .frequency_stepsize_hz = 1 * kHz,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 6c8438311d3b..71c829f31d3b 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -543,7 +543,7 @@ static const struct em28xx_reg_seq hauppauge_dualhd_dvb[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xdf, 0xff, 100}, /* demod 2 reset */
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
- {EM2874_R5F_TS_ENABLE, 0x44, 0xff, 50},
+ {EM2874_R5F_TS_ENABLE, 0x00, 0xff, 50}, /* disable TS filters */
{EM2874_R5D_TS1_PKT_SIZE, 0x05, 0xff, 50},
{EM2874_R5E_TS2_PKT_SIZE, 0x05, 0xff, 50},
{-1, -1, -1, -1},
@@ -2688,8 +2688,6 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28178_BOARD_PCTV_292E },
{ USB_DEVICE(0x2040, 0x8268), /* Hauppauge Retail WinTV-soloHD Bulk */
.driver_info = EM28178_BOARD_PCTV_292E },
- { USB_DEVICE(0x2040, 0x8268), /* Hauppauge WinTV-soloHD alt. PID */
- .driver_info = EM28178_BOARD_PCTV_292E },
{ USB_DEVICE(0x0413, 0x6f07),
.driver_info = EM2861_BOARD_LEADTEK_VC100 },
{ USB_DEVICE(0xeb1a, 0x8179),
@@ -2854,13 +2852,13 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
em28xx_write_reg(dev, EM2880_R04_GPO, 0x01);
usleep_range(10000, 11000);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xdc);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc);
- mdelay(70);
+ msleep(70);
break;
case EM2870_BOARD_TERRATEC_XS_MT2060:
/*
@@ -2868,11 +2866,11 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
* demod work
*/
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
- mdelay(70);
+ msleep(70);
break;
case EM2870_BOARD_PINNACLE_PCTV_DVB:
/*
@@ -2880,11 +2878,11 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
* DVB-T demod work
*/
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde);
- mdelay(70);
+ msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
- mdelay(70);
+ msleep(70);
break;
case EM2820_BOARD_GADMEI_UTV310:
case EM2820_BOARD_MSI_VOX_USB_2:
@@ -3376,7 +3374,9 @@ void em28xx_free_device(struct kref *ref)
if (!dev->disconnected)
em28xx_release_resources(dev);
- kfree(dev->alt_max_pkt_size_isoc);
+ if (dev->ts == PRIMARY_TS)
+ kfree(dev->alt_max_pkt_size_isoc);
+
kfree(dev);
}
EXPORT_SYMBOL_GPL(em28xx_free_device);
@@ -3861,6 +3861,17 @@ static int em28xx_usb_probe(struct usb_interface *intf,
dev->has_video = false;
}
+ if (dev->board.has_dual_ts &&
+ (dev->tuner_type != TUNER_ABSENT || INPUT(0)->type)) {
+ /*
+ * The logic with sets alternate is not ready for dual-tuners
+ * which analog modes.
+ */
+ dev_err(&intf->dev,
+ "We currently don't support analog TV or stream capture on dual tuners.\n");
+ has_video = false;
+ }
+
/* Select USB transfer types to use */
if (has_video) {
if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index f70845e7d8c6..5657f8710ca6 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -655,12 +655,12 @@ int em28xx_capture_start(struct em28xx *dev, int start)
rc = em28xx_write_reg_bits(dev,
EM2874_R5F_TS_ENABLE,
start ? EM2874_TS1_CAPTURE_ENABLE : 0x00,
- EM2874_TS1_CAPTURE_ENABLE);
+ EM2874_TS1_CAPTURE_ENABLE | EM2874_TS1_FILTER_ENABLE | EM2874_TS1_NULL_DISCARD);
else
rc = em28xx_write_reg_bits(dev,
EM2874_R5F_TS_ENABLE,
start ? EM2874_TS2_CAPTURE_ENABLE : 0x00,
- EM2874_TS2_CAPTURE_ENABLE);
+ EM2874_TS2_CAPTURE_ENABLE | EM2874_TS2_FILTER_ENABLE | EM2874_TS2_NULL_DISCARD);
} else {
/* FIXME: which is the best order? */
/* video registers are sampled by VREF */
@@ -1053,7 +1053,7 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
/* submit urbs and enables IRQ */
for (i = 0; i < usb_bufs->num_bufs; i++) {
- rc = usb_submit_urb(usb_bufs->urb[i], GFP_ATOMIC);
+ rc = usb_submit_urb(usb_bufs->urb[i], GFP_KERNEL);
if (rc) {
dev_err(&dev->intf->dev,
"submit of urb %i failed (error=%i)\n", i, rc);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index b778d8a1983e..a73faf12f7e4 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -218,7 +218,9 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
dvb_alt = dev->dvb_alt_isoc;
}
- usb_set_interface(udev, dev->ifnum, dvb_alt);
+ if (!dev->board.has_dual_ts)
+ usb_set_interface(udev, dev->ifnum, dvb_alt);
+
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 6458682bc6e2..e19d6342e0d0 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -559,10 +559,6 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
dev->cur_i2c_bus = bus;
}
- if (num <= 0) {
- rt_mutex_unlock(&dev->i2c_bus_lock);
- return 0;
- }
for (i = 0; i < num; i++) {
addr = msgs[i].addr << 1;
if (!msgs[i].len) {
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
index 05b1126f263e..62aeebcdd7f7 100644
--- a/drivers/media/usb/go7007/go7007-driver.c
+++ b/drivers/media/usb/go7007/go7007-driver.c
@@ -448,13 +448,14 @@ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buf
{
u32 *bytesused;
struct go7007_buffer *vb_tmp = NULL;
+ unsigned long flags;
if (vb == NULL) {
- spin_lock(&go->spinlock);
+ spin_lock_irqsave(&go->spinlock, flags);
if (!list_empty(&go->vidq_active))
vb = go->active_buf =
list_first_entry(&go->vidq_active, struct go7007_buffer, list);
- spin_unlock(&go->spinlock);
+ spin_unlock_irqrestore(&go->spinlock, flags);
go->next_seq++;
return vb;
}
@@ -468,7 +469,7 @@ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buf
vb->vb.vb2_buf.timestamp = ktime_get_ns();
vb_tmp = vb;
- spin_lock(&go->spinlock);
+ spin_lock_irqsave(&go->spinlock, flags);
list_del(&vb->list);
if (list_empty(&go->vidq_active))
vb = NULL;
@@ -476,7 +477,7 @@ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buf
vb = list_first_entry(&go->vidq_active,
struct go7007_buffer, list);
go->active_buf = vb;
- spin_unlock(&go->spinlock);
+ spin_unlock_irqrestore(&go->spinlock, flags);
vb2_buffer_done(&vb_tmp->vb.vb2_buf, VB2_BUF_STATE_DONE);
return vb;
}
diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
index f84a2130f033..137fc253b122 100644
--- a/drivers/media/usb/go7007/snd-go7007.c
+++ b/drivers/media/usb/go7007/snd-go7007.c
@@ -75,13 +75,14 @@ static void parse_audio_stream_data(struct go7007 *go, u8 *buf, int length)
struct go7007_snd *gosnd = go->snd_context;
struct snd_pcm_runtime *runtime = gosnd->substream->runtime;
int frames = bytes_to_frames(runtime, length);
+ unsigned long flags;
- spin_lock(&gosnd->lock);
+ spin_lock_irqsave(&gosnd->lock, flags);
gosnd->hw_ptr += frames;
if (gosnd->hw_ptr >= runtime->buffer_size)
gosnd->hw_ptr -= runtime->buffer_size;
gosnd->avail += frames;
- spin_unlock(&gosnd->lock);
+ spin_unlock_irqrestore(&gosnd->lock, flags);
if (gosnd->w_idx + length > runtime->dma_bytes) {
int cpy = runtime->dma_bytes - gosnd->w_idx;
@@ -92,13 +93,13 @@ static void parse_audio_stream_data(struct go7007 *go, u8 *buf, int length)
}
memcpy(runtime->dma_area + gosnd->w_idx, buf, length);
gosnd->w_idx += length;
- spin_lock(&gosnd->lock);
+ spin_lock_irqsave(&gosnd->lock, flags);
if (gosnd->avail < runtime->period_size) {
- spin_unlock(&gosnd->lock);
+ spin_unlock_irqrestore(&gosnd->lock, flags);
return;
}
gosnd->avail -= runtime->period_size;
- spin_unlock(&gosnd->lock);
+ spin_unlock_irqrestore(&gosnd->lock, flags);
if (gosnd->capturing)
snd_pcm_period_elapsed(gosnd->substream);
}
diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c
index 0cfdf8a1e19d..f993f6280c56 100644
--- a/drivers/media/usb/gspca/kinect.c
+++ b/drivers/media/usb/gspca/kinect.c
@@ -163,7 +163,7 @@ static int send_cmd(struct gspca_dev *gspca_dev, uint16_t cmd, void *cmdbuf,
actual_len = kinect_read(udev, ibuf, 0x200);
} while (actual_len == 0);
gspca_dbg(gspca_dev, D_USBO, "Control reply: %d\n", actual_len);
- if (actual_len < sizeof(*rhdr)) {
+ if (actual_len < (int)sizeof(*rhdr)) {
pr_err("send_cmd: Input control transfer failed (%d)\n",
actual_len);
return actual_len < 0 ? actual_len : -EREMOTEIO;
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index 6d692fb3e8dd..34085a0b15a1 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -597,7 +597,7 @@ static int hackrf_submit_urbs(struct hackrf_dev *dev)
for (i = 0; i < dev->urbs_initialized; i++) {
dev_dbg(dev->dev, "submit urb=%d\n", i);
- ret = usb_submit_urb(dev->urb_list[i], GFP_ATOMIC);
+ ret = usb_submit_urb(dev->urb_list[i], GFP_KERNEL);
if (ret) {
dev_err(dev->dev, "Could not submit URB no. %d - get them all back\n",
i);
@@ -636,7 +636,7 @@ static int hackrf_alloc_stream_bufs(struct hackrf_dev *dev)
for (dev->buf_num = 0; dev->buf_num < MAX_BULK_BUFS; dev->buf_num++) {
dev->buf_list[dev->buf_num] = usb_alloc_coherent(dev->udev,
- BULK_BUFFER_SIZE, GFP_ATOMIC,
+ BULK_BUFFER_SIZE, GFP_KERNEL,
&dev->dma_addr[dev->buf_num]);
if (!dev->buf_list[dev->buf_num]) {
dev_dbg(dev->dev, "alloc buf=%d failed\n",
@@ -689,7 +689,7 @@ static int hackrf_alloc_urbs(struct hackrf_dev *dev, bool rcv)
/* allocate the URBs */
for (i = 0; i < MAX_BULK_BUFS; i++) {
dev_dbg(dev->dev, "alloc urb=%d\n", i);
- dev->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
+ dev->urb_list[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_list[i]) {
for (j = 0; j < i; j++)
usb_free_urb(dev->urb_list[j]);
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index c71ddefd2e58..5a3cb614a211 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -117,9 +117,6 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs,
struct hdpvr_device *dev = i2c_get_adapdata(i2c_adapter);
int retval = 0, addr;
- if (num <= 0)
- return 0;
-
mutex_lock(&dev->i2c_mutex);
addr = msgs[0].addr << 1;
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 77b759a0bcd9..504e413edcd2 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -802,6 +802,7 @@ int stk1160_vb2_setup(struct stk1160 *dev)
q->buf_struct_size = sizeof(struct stk1160_buffer);
q->ops = &stk1160_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
+ q->lock = &dev->vb_queue_lock;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
rc = vb2_queue_init(q);
@@ -827,7 +828,6 @@ int stk1160_video_register(struct stk1160 *dev)
* It will be used to protect *only* v4l2 ioctls.
*/
dev->vdev.lock = &dev->v4l_lock;
- dev->vdev.queue->lock = &dev->vb_queue_lock;
/* This will be used to set video_device parent */
dev->vdev.v4l2_dev = &dev->v4l2_dev;
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index c811fc6cf48a..3a4e545c6037 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -266,6 +266,11 @@ static int register_dvb(struct tm6000_core *dev)
ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
THIS_MODULE, &dev->udev->dev, adapter_nr);
+ if (ret < 0) {
+ pr_err("tm6000: couldn't register the adapter!\n");
+ goto err;
+ }
+
dvb->adapter.priv = dev;
if (dvb->frontend) {
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c
index 659b63febf85..ccd1adf862b1 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/media/usb/tm6000/tm6000-i2c.c
@@ -145,8 +145,6 @@ static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
struct tm6000_core *dev = i2c_adap->algo_data;
int addr, rc, i, byte;
- if (num <= 0)
- return 0;
for (i = 0; i < num; i++) {
addr = (msgs[i].addr << 1) & 0xff;
i2c_dprintk(2, "%s %s addr=0x%x len=%d:",
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
index 6ea05d909024..278bf6c5855b 100644
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
@@ -247,9 +247,9 @@ static const struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "TechnoTrend/Hauppauge DEC2000-t Frontend",
- .frequency_min = 51000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 858 * MHz,
+ .frequency_stepsize_hz = 62500,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
@@ -270,9 +270,9 @@ static const struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "TechnoTrend/Hauppauge DEC3000-s Frontend",
- .frequency_min = 950000,
- .frequency_max = 2150000,
- .frequency_stepsize = 125,
+ .frequency_min_hz = 950 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 125 * kHz,
.symbol_rate_min = 1000000, /* guessed */
.symbol_rate_max = 45000000, /* guessed */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 2c2ca77fa01f..4ce38246ed64 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -126,6 +126,7 @@ static void usbtv_audio_urb_received(struct urb *urb)
struct snd_pcm_runtime *runtime = substream->runtime;
size_t i, frame_bytes, chunk_length, buffer_pos, period_pos;
int period_elapsed;
+ unsigned long flags;
void *urb_current;
switch (urb->status) {
@@ -179,12 +180,12 @@ static void usbtv_audio_urb_received(struct urb *urb)
}
}
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_irqsave(substream, flags);
chip->snd_buffer_pos = buffer_pos;
chip->snd_period_pos = period_pos;
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
if (period_elapsed)
snd_pcm_period_elapsed(substream);
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index a36b4fb949fa..c2ad102bd693 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -20,6 +20,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <media/v4l2-ctrls.h>
@@ -971,12 +972,30 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
return 0;
}
+static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
+ const u8 *data)
+{
+ s32 value = mapping->get(mapping, UVC_GET_CUR, data);
+
+ if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
+ struct uvc_menu_info *menu = mapping->menu_info;
+ unsigned int i;
+
+ for (i = 0; i < mapping->menu_count; ++i, ++menu) {
+ if (menu->value == value) {
+ value = i;
+ break;
+ }
+ }
+ }
+
+ return value;
+}
+
static int __uvc_ctrl_get(struct uvc_video_chain *chain,
struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
s32 *value)
{
- struct uvc_menu_info *menu;
- unsigned int i;
int ret;
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
@@ -993,18 +1012,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
ctrl->loaded = 1;
}
- *value = mapping->get(mapping, UVC_GET_CUR,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
-
- if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
- menu = mapping->menu_info;
- for (i = 0; i < mapping->menu_count; ++i, ++menu) {
- if (menu->value == *value) {
- *value = i;
- break;
- }
- }
- }
+ *value = __uvc_ctrl_get_value(mapping,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
return 0;
}
@@ -1125,7 +1134,7 @@ done:
}
/*
- * Mapping V4L2 controls to UVC controls can be straighforward if done well.
+ * Mapping V4L2 controls to UVC controls can be straightforward if done well.
* Most of the UVC controls exist in V4L2, and can be mapped directly. Some
* must be grouped (for instance the Red Balance, Blue Balance and Do White
* Balance V4L2 controls use the White Balance Component UVC control) or
@@ -1216,53 +1225,135 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
ev->u.ctrl.default_value = v4l2_ctrl.default_value;
}
-static void uvc_ctrl_send_event(struct uvc_fh *handle,
- struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
- s32 value, u32 changes)
+/*
+ * Send control change events to all subscribers for the @ctrl control. By
+ * default the subscriber that generated the event, as identified by @handle,
+ * is not notified unless it has set the V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK flag.
+ * @handle can be NULL for asynchronous events related to auto-update controls,
+ * in which case all subscribers are notified.
+ */
+static void uvc_ctrl_send_event(struct uvc_video_chain *chain,
+ struct uvc_fh *handle, struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping, s32 value, u32 changes)
{
+ struct v4l2_fh *originator = handle ? &handle->vfh : NULL;
struct v4l2_subscribed_event *sev;
struct v4l2_event ev;
if (list_empty(&mapping->ev_subs))
return;
- uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, value, changes);
+ uvc_ctrl_fill_event(chain, &ev, ctrl, mapping, value, changes);
list_for_each_entry(sev, &mapping->ev_subs, node) {
- if (sev->fh && (sev->fh != &handle->vfh ||
+ if (sev->fh != originator ||
(sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK) ||
- (changes & V4L2_EVENT_CTRL_CH_FLAGS)))
+ (changes & V4L2_EVENT_CTRL_CH_FLAGS))
v4l2_event_queue_fh(sev->fh, &ev);
}
}
-static void uvc_ctrl_send_slave_event(struct uvc_fh *handle,
- struct uvc_control *master, u32 slave_id,
- const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
+/*
+ * Send control change events for the slave of the @master control identified
+ * by the V4L2 ID @slave_id. The @handle identifies the event subscriber that
+ * generated the event and may be NULL for auto-update events.
+ */
+static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
+ struct uvc_fh *handle, struct uvc_control *master, u32 slave_id)
{
struct uvc_control_mapping *mapping = NULL;
struct uvc_control *ctrl = NULL;
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
- unsigned int i;
s32 val = 0;
- /*
- * We can skip sending an event for the slave if the slave
- * is being modified in the same transaction.
- */
- for (i = 0; i < xctrls_count; i++) {
- if (xctrls[i].id == slave_id)
- return;
- }
-
__uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0);
if (ctrl == NULL)
return;
- if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
+ if (__uvc_ctrl_get(chain, ctrl, mapping, &val) == 0)
changes |= V4L2_EVENT_CTRL_CH_VALUE;
- uvc_ctrl_send_event(handle, ctrl, mapping, val, changes);
+ uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
+}
+
+static void uvc_ctrl_status_event_work(struct work_struct *work)
+{
+ struct uvc_device *dev = container_of(work, struct uvc_device,
+ async_ctrl.work);
+ struct uvc_ctrl_work *w = &dev->async_ctrl;
+ struct uvc_video_chain *chain = w->chain;
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl = w->ctrl;
+ struct uvc_fh *handle;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&chain->ctrl_mutex);
+
+ handle = ctrl->handle;
+ ctrl->handle = NULL;
+
+ list_for_each_entry(mapping, &ctrl->info.mappings, list) {
+ s32 value = __uvc_ctrl_get_value(mapping, w->data);
+
+ /*
+ * handle may be NULL here if the device sends auto-update
+ * events without a prior related control set from userspace.
+ */
+ for (i = 0; i < ARRAY_SIZE(mapping->slave_ids); ++i) {
+ if (!mapping->slave_ids[i])
+ break;
+
+ uvc_ctrl_send_slave_event(chain, handle, ctrl,
+ mapping->slave_ids[i]);
+ }
+
+ uvc_ctrl_send_event(chain, handle, ctrl, mapping, value,
+ V4L2_EVENT_CTRL_CH_VALUE);
+ }
+
+ mutex_unlock(&chain->ctrl_mutex);
+
+ /* Resubmit the URB. */
+ w->urb->interval = dev->int_ep->desc.bInterval;
+ ret = usb_submit_urb(w->urb, GFP_KERNEL);
+ if (ret < 0)
+ uvc_printk(KERN_ERR, "Failed to resubmit status URB (%d).\n",
+ ret);
+}
+
+bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data)
+{
+ struct uvc_device *dev = chain->dev;
+ struct uvc_ctrl_work *w = &dev->async_ctrl;
+
+ if (list_empty(&ctrl->info.mappings)) {
+ ctrl->handle = NULL;
+ return false;
+ }
+
+ w->data = data;
+ w->urb = urb;
+ w->chain = chain;
+ w->ctrl = ctrl;
+
+ schedule_work(&w->work);
+
+ return true;
+}
+
+static bool uvc_ctrl_xctrls_has_control(const struct v4l2_ext_control *xctrls,
+ unsigned int xctrls_count, u32 id)
+{
+ unsigned int i;
+
+ for (i = 0; i < xctrls_count; ++i) {
+ if (xctrls[i].id == id)
+ return true;
+ }
+
+ return false;
}
static void uvc_ctrl_send_events(struct uvc_fh *handle,
@@ -1277,29 +1368,39 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
for (i = 0; i < xctrls_count; ++i) {
ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
+ if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ /* Notification will be sent from an Interrupt event. */
+ continue;
+
for (j = 0; j < ARRAY_SIZE(mapping->slave_ids); ++j) {
- if (!mapping->slave_ids[j])
+ u32 slave_id = mapping->slave_ids[j];
+
+ if (!slave_id)
break;
- uvc_ctrl_send_slave_event(handle, ctrl,
- mapping->slave_ids[j],
- xctrls, xctrls_count);
+
+ /*
+ * We can skip sending an event for the slave if the
+ * slave is being modified in the same transaction.
+ */
+ if (uvc_ctrl_xctrls_has_control(xctrls, xctrls_count,
+ slave_id))
+ continue;
+
+ uvc_ctrl_send_slave_event(handle->chain, handle, ctrl,
+ slave_id);
}
/*
* If the master is being modified in the same transaction
* flags may change too.
*/
- if (mapping->master_id) {
- for (j = 0; j < xctrls_count; j++) {
- if (xctrls[j].id == mapping->master_id) {
- changes |= V4L2_EVENT_CTRL_CH_FLAGS;
- break;
- }
- }
- }
+ if (mapping->master_id &&
+ uvc_ctrl_xctrls_has_control(xctrls, xctrls_count,
+ mapping->master_id))
+ changes |= V4L2_EVENT_CTRL_CH_FLAGS;
- uvc_ctrl_send_event(handle, ctrl, mapping, xctrls[i].value,
- changes);
+ uvc_ctrl_send_event(handle->chain, handle, ctrl, mapping,
+ xctrls[i].value, changes);
}
}
@@ -1472,9 +1573,10 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
}
-int uvc_ctrl_set(struct uvc_video_chain *chain,
+int uvc_ctrl_set(struct uvc_fh *handle,
struct v4l2_ext_control *xctrl)
{
+ struct uvc_video_chain *chain = handle->chain;
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
s32 value;
@@ -1581,6 +1683,9 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
mapping->set(mapping, value,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ ctrl->handle = handle;
+
ctrl->dirty = 1;
ctrl->modified = 1;
return 0;
@@ -1612,7 +1717,9 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
| (data[0] & UVC_CONTROL_CAP_SET ?
UVC_CTRL_FLAG_SET_CUR : 0)
| (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
- UVC_CTRL_FLAG_AUTO_UPDATE : 0);
+ UVC_CTRL_FLAG_AUTO_UPDATE : 0)
+ | (data[0] & UVC_CONTROL_CAP_ASYNCHRONOUS ?
+ UVC_CTRL_FLAG_ASYNCHRONOUS : 0);
kfree(data);
return ret;
@@ -2173,6 +2280,8 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
struct uvc_entity *entity;
unsigned int i;
+ INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work);
+
/* Walk the entities list and instantiate controls */
list_for_each_entry(entity, &dev->entities, list) {
struct uvc_control *ctrl;
@@ -2241,6 +2350,8 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev)
struct uvc_entity *entity;
unsigned int i;
+ cancel_work_sync(&dev->async_ctrl.work);
+
/* Free controls and control mappings for all entities. */
list_for_each_entry(entity, &dev->entities, list) {
for (i = 0; i < entity->ncontrols; ++i) {
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 8e138201330f..d46dc432456c 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -100,6 +100,11 @@ static struct uvc_format_desc uvc_fmts[] = {
.fcc = V4L2_PIX_FMT_GREY,
},
{
+ .name = "IR 8-bit (L8_IR)",
+ .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
.name = "Greyscale 10-bit (Y10 )",
.guid = UVC_GUID_FORMAT_Y10,
.fcc = V4L2_PIX_FMT_Y10,
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 7b710410584a..0722dc684378 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -78,7 +78,24 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
/* --------------------------------------------------------------------------
* Status interrupt endpoint
*/
-static void uvc_event_streaming(struct uvc_device *dev, u8 *data, int len)
+struct uvc_streaming_status {
+ u8 bStatusType;
+ u8 bOriginator;
+ u8 bEvent;
+ u8 bValue[];
+} __packed;
+
+struct uvc_control_status {
+ u8 bStatusType;
+ u8 bOriginator;
+ u8 bEvent;
+ u8 bSelector;
+ u8 bAttribute;
+ u8 bValue[];
+} __packed;
+
+static void uvc_event_streaming(struct uvc_device *dev,
+ struct uvc_streaming_status *status, int len)
{
if (len < 3) {
uvc_trace(UVC_TRACE_STATUS, "Invalid streaming status event "
@@ -86,31 +103,97 @@ static void uvc_event_streaming(struct uvc_device *dev, u8 *data, int len)
return;
}
- if (data[2] == 0) {
+ if (status->bEvent == 0) {
if (len < 4)
return;
uvc_trace(UVC_TRACE_STATUS, "Button (intf %u) %s len %d\n",
- data[1], data[3] ? "pressed" : "released", len);
- uvc_input_report_key(dev, KEY_CAMERA, data[3]);
+ status->bOriginator,
+ status->bValue[0] ? "pressed" : "released", len);
+ uvc_input_report_key(dev, KEY_CAMERA, status->bValue[0]);
} else {
uvc_trace(UVC_TRACE_STATUS,
"Stream %u error event %02x len %d.\n",
- data[1], data[2], len);
+ status->bOriginator, status->bEvent, len);
}
}
-static void uvc_event_control(struct uvc_device *dev, u8 *data, int len)
+#define UVC_CTRL_VALUE_CHANGE 0
+#define UVC_CTRL_INFO_CHANGE 1
+#define UVC_CTRL_FAILURE_CHANGE 2
+#define UVC_CTRL_MIN_CHANGE 3
+#define UVC_CTRL_MAX_CHANGE 4
+
+static struct uvc_control *uvc_event_entity_find_ctrl(struct uvc_entity *entity,
+ u8 selector)
{
- char *attrs[3] = { "value", "info", "failure" };
+ struct uvc_control *ctrl;
+ unsigned int i;
+
+ for (i = 0, ctrl = entity->controls; i < entity->ncontrols; i++, ctrl++)
+ if (ctrl->info.selector == selector)
+ return ctrl;
+
+ return NULL;
+}
- if (len < 6 || data[2] != 0 || data[4] > 2) {
+static struct uvc_control *uvc_event_find_ctrl(struct uvc_device *dev,
+ const struct uvc_control_status *status,
+ struct uvc_video_chain **chain)
+{
+ list_for_each_entry((*chain), &dev->chains, list) {
+ struct uvc_entity *entity;
+ struct uvc_control *ctrl;
+
+ list_for_each_entry(entity, &(*chain)->entities, chain) {
+ if (entity->id != status->bOriginator)
+ continue;
+
+ ctrl = uvc_event_entity_find_ctrl(entity,
+ status->bSelector);
+ if (ctrl)
+ return ctrl;
+ }
+ }
+
+ return NULL;
+}
+
+static bool uvc_event_control(struct urb *urb,
+ const struct uvc_control_status *status, int len)
+{
+ static const char *attrs[] = { "value", "info", "failure", "min", "max" };
+ struct uvc_device *dev = urb->context;
+ struct uvc_video_chain *chain;
+ struct uvc_control *ctrl;
+
+ if (len < 6 || status->bEvent != 0 ||
+ status->bAttribute >= ARRAY_SIZE(attrs)) {
uvc_trace(UVC_TRACE_STATUS, "Invalid control status event "
"received.\n");
- return;
+ return false;
}
uvc_trace(UVC_TRACE_STATUS, "Control %u/%u %s change len %d.\n",
- data[1], data[3], attrs[data[4]], len);
+ status->bOriginator, status->bSelector,
+ attrs[status->bAttribute], len);
+
+ /* Find the control. */
+ ctrl = uvc_event_find_ctrl(dev, status, &chain);
+ if (!ctrl)
+ return false;
+
+ switch (status->bAttribute) {
+ case UVC_CTRL_VALUE_CHANGE:
+ return uvc_ctrl_status_event(urb, chain, ctrl, status->bValue);
+
+ case UVC_CTRL_INFO_CHANGE:
+ case UVC_CTRL_FAILURE_CHANGE:
+ case UVC_CTRL_MIN_CHANGE:
+ case UVC_CTRL_MAX_CHANGE:
+ break;
+ }
+
+ return false;
}
static void uvc_status_complete(struct urb *urb)
@@ -138,13 +221,23 @@ static void uvc_status_complete(struct urb *urb)
len = urb->actual_length;
if (len > 0) {
switch (dev->status[0] & 0x0f) {
- case UVC_STATUS_TYPE_CONTROL:
- uvc_event_control(dev, dev->status, len);
+ case UVC_STATUS_TYPE_CONTROL: {
+ struct uvc_control_status *status =
+ (struct uvc_control_status *)dev->status;
+
+ if (uvc_event_control(urb, status, len))
+ /* The URB will be resubmitted in work context. */
+ return;
break;
+ }
- case UVC_STATUS_TYPE_STREAMING:
- uvc_event_streaming(dev, dev->status, len);
+ case UVC_STATUS_TYPE_STREAMING: {
+ struct uvc_streaming_status *status =
+ (struct uvc_streaming_status *)dev->status;
+
+ uvc_event_streaming(dev, status, len);
break;
+ }
default:
uvc_trace(UVC_TRACE_STATUS, "Unknown status event "
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index bd32914259ae..18a7384b50ee 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -994,7 +994,7 @@ static int uvc_ioctl_s_ctrl(struct file *file, void *fh,
if (ret < 0)
return ret;
- ret = uvc_ctrl_set(chain, &xctrl);
+ ret = uvc_ctrl_set(handle, &xctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
return ret;
@@ -1069,7 +1069,7 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
return ret;
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- ret = uvc_ctrl_set(chain, ctrl);
+ ret = uvc_ctrl_set(handle, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
ctrls->error_idx = commit ? ctrls->count : i;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index a88b2e51a666..86a99f461fd8 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -73,17 +73,57 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
u8 intfnum, u8 cs, void *data, u16 size)
{
int ret;
+ u8 error;
+ u8 tmp;
ret = __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size,
UVC_CTRL_CONTROL_TIMEOUT);
- if (ret != size) {
- uvc_printk(KERN_ERR, "Failed to query (%s) UVC control %u on "
- "unit %u: %d (exp. %u).\n", uvc_query_name(query), cs,
- unit, ret, size);
- return -EIO;
+ if (likely(ret == size))
+ return 0;
+
+ uvc_printk(KERN_ERR,
+ "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
+ uvc_query_name(query), cs, unit, ret, size);
+
+ if (ret != -EPIPE)
+ return ret;
+
+ tmp = *(u8 *)data;
+
+ ret = __uvc_query_ctrl(dev, UVC_GET_CUR, 0, intfnum,
+ UVC_VC_REQUEST_ERROR_CODE_CONTROL, data, 1,
+ UVC_CTRL_CONTROL_TIMEOUT);
+
+ error = *(u8 *)data;
+ *(u8 *)data = tmp;
+
+ if (ret != 1)
+ return ret < 0 ? ret : -EPIPE;
+
+ uvc_trace(UVC_TRACE_CONTROL, "Control error %u\n", error);
+
+ switch (error) {
+ case 0:
+ /* Cannot happen - we received a STALL */
+ return -EPIPE;
+ case 1: /* Not ready */
+ return -EBUSY;
+ case 2: /* Wrong state */
+ return -EILSEQ;
+ case 3: /* Power */
+ return -EREMOTE;
+ case 4: /* Out of range */
+ return -ERANGE;
+ case 5: /* Invalid unit */
+ case 6: /* Invalid control */
+ case 7: /* Invalid Request */
+ case 8: /* Invalid value within range */
+ return -EINVAL;
+ default: /* reserved or unknown */
+ break;
}
- return 0;
+ return -EPIPE;
}
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
@@ -1232,6 +1272,8 @@ static void uvc_video_validate_buffer(const struct uvc_streaming *stream,
static void uvc_video_next_buffers(struct uvc_streaming *stream,
struct uvc_buffer **video_buf, struct uvc_buffer **meta_buf)
{
+ uvc_video_validate_buffer(stream, *video_buf);
+
if (*meta_buf) {
struct vb2_v4l2_buffer *vb2_meta = &(*meta_buf)->buf;
const struct vb2_v4l2_buffer *vb2_video = &(*video_buf)->buf;
@@ -1270,10 +1312,8 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
do {
ret = uvc_video_decode_start(stream, buf, mem,
urb->iso_frame_desc[i].actual_length);
- if (ret == -EAGAIN) {
- uvc_video_validate_buffer(stream, buf);
+ if (ret == -EAGAIN)
uvc_video_next_buffers(stream, &buf, &meta_buf);
- }
} while (ret == -EAGAIN);
if (ret < 0)
@@ -1289,10 +1329,8 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
uvc_video_decode_end(stream, buf, mem,
urb->iso_frame_desc[i].actual_length);
- if (buf->state == UVC_BUF_STATE_READY) {
- uvc_video_validate_buffer(stream, buf);
+ if (buf->state == UVC_BUF_STATE_READY)
uvc_video_next_buffers(stream, &buf, &meta_buf);
- }
}
}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index be5cf179228b..e5f5d84f1d1d 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -12,6 +12,7 @@
#include <linux/usb/video.h>
#include <linux/uvcvideo.h>
#include <linux/videodev2.h>
+#include <linux/workqueue.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -157,6 +158,9 @@
#define UVC_GUID_FORMAT_D3DFMT_L8 \
{0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \
+ {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
/* ------------------------------------------------------------------------
@@ -256,6 +260,8 @@ struct uvc_control {
initialized:1;
u8 *uvc_data;
+
+ struct uvc_fh *handle; /* File handle that last changed the control. */
};
struct uvc_format_desc {
@@ -600,6 +606,14 @@ struct uvc_device {
u8 *status;
struct input_dev *input;
char input_phys[64];
+
+ struct uvc_ctrl_work {
+ struct work_struct work;
+ struct urb *urb;
+ struct uvc_video_chain *chain;
+ struct uvc_control *ctrl;
+ const void *data;
+ } async_ctrl;
};
enum uvc_handle_state {
@@ -753,6 +767,8 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
int uvc_ctrl_init_device(struct uvc_device *dev);
void uvc_ctrl_cleanup_device(struct uvc_device *dev);
int uvc_ctrl_restore_values(struct uvc_device *dev);
+bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data);
int uvc_ctrl_begin(struct uvc_video_chain *chain);
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
@@ -770,7 +786,7 @@ static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
}
int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
-int uvc_ctrl_set(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
+int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index d29e45516eb7..599c1cbff3b9 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -431,6 +431,20 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Use Previous Specific Frame",
NULL,
};
+ static const char * const vp8_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
+ static const char * const vp9_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
static const char * const flash_led_mode[] = {
"Off",
@@ -614,6 +628,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return mpeg4_profile;
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
return vpx_golden_frame_sel;
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ return vp8_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ return vp9_profile;
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
return jpeg_chroma_subsampling;
case V4L2_CID_DV_TX_MODE:
@@ -839,7 +857,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: return "VPX Profile";
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
/* HEVC controls */
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value";
@@ -1180,6 +1199,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_DEINTERLACING_MODE:
case V4L2_CID_TUNE_DEEMPHASIS:
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
case V4L2_CID_DETECT_MD_MODE:
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
@@ -3137,9 +3158,22 @@ static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
/* If OK, then make the new values permanent. */
update_flag = is_cur_manual(master) != is_new_manual(master);
- for (i = 0; i < master->ncontrols; i++)
+
+ for (i = 0; i < master->ncontrols; i++) {
+ /*
+ * If we switch from auto to manual mode, and this cluster
+ * contains volatile controls, then all non-master controls
+ * have to be marked as changed. The 'new' value contains
+ * the volatile value (obtained by update_from_auto_cluster),
+ * which now has to become the current value.
+ */
+ if (i && update_flag && is_new_manual(master) &&
+ master->has_volatiles && master->cluster[i])
+ master->cluster[i]->has_changed = true;
+
new_to_cur(fh, master->cluster[i], ch_flags |
((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ }
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 4ffd7d60a901..69e775930fc4 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -202,7 +202,7 @@ static void v4l2_device_release(struct device *cd)
mutex_unlock(&videodev_lock);
#if defined(CONFIG_MEDIA_CONTROLLER)
- if (v4l2_dev->mdev) {
+ if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) {
/* Remove interfaces and interface links */
media_devnode_remove(vdev->intf_devnode);
if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN)
@@ -733,19 +733,22 @@ static void determine_valid_ioctls(struct video_device *vdev)
BASE_VIDIOC_PRIVATE);
}
-static int video_register_media_controller(struct video_device *vdev, int type)
+static int video_register_media_controller(struct video_device *vdev)
{
#if defined(CONFIG_MEDIA_CONTROLLER)
u32 intf_type;
int ret;
- if (!vdev->v4l2_dev->mdev)
+ /* Memory-to-memory devices are more complex and use
+ * their own function to register its mc entities.
+ */
+ if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M)
return 0;
vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
vdev->entity.function = MEDIA_ENT_F_UNKNOWN;
- switch (type) {
+ switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
intf_type = MEDIA_INTF_T_V4L_VIDEO;
vdev->entity.function = MEDIA_ENT_F_IO_V4L;
@@ -808,7 +811,8 @@ static int video_register_media_controller(struct video_device *vdev, int type)
link = media_create_intf_link(&vdev->entity,
&vdev->intf_devnode->intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link) {
media_devnode_remove(vdev->intf_devnode);
media_device_unregister_entity(&vdev->entity);
@@ -993,7 +997,7 @@ int __video_register_device(struct video_device *vdev,
v4l2_device_get(vdev->v4l2_dev);
/* Part 5: Register the entity. */
- ret = video_register_media_controller(vdev, type);
+ ret = video_register_media_controller(vdev);
/* Part 6: Activate this minor. The char device can now be used. */
set_bit(V4L2_FL_REGISTERED, &vdev->flags);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 937c6de85606..3940e55c72f1 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -267,7 +267,8 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
link = media_create_intf_link(&sd->entity,
&vdev->intf_devnode->intf,
- MEDIA_LNK_FL_ENABLED);
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
if (!link) {
err = -ENOMEM;
goto clean_up;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 3f77aa318035..169bdbb1f61a 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -154,6 +154,10 @@ static void v4l2_fwnode_endpoint_parse_parallel_bus(
flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
+ if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v))
+ flags |= v ? V4L2_MBUS_DATA_ENABLE_HIGH :
+ V4L2_MBUS_DATA_ENABLE_LOW;
+
bus->flags = flags;
}
@@ -739,7 +743,7 @@ static struct fwnode_handle *v4l2_fwnode_reference_get_int_prop(
const char * const *props, unsigned int nprops)
{
struct fwnode_reference_args fwnode_args;
- unsigned int *args = fwnode_args.args;
+ u64 *args = fwnode_args.args;
struct fwnode_handle *child;
int ret;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index dd210067151f..54afc9c7ee6e 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -29,6 +29,7 @@
#include <media/v4l2-device.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mc.h>
+#include <media/v4l2-mem2mem.h>
#include <trace/events/v4l2.h>
@@ -125,6 +126,42 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
}
EXPORT_SYMBOL(v4l2_video_std_construct);
+/* Fill in the fields of a v4l2_standard structure according to the
+ * 'id' and 'vs->index' parameters. Returns negative on error. */
+int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id)
+{
+ v4l2_std_id curr_id = 0;
+ unsigned int index = vs->index, i, j = 0;
+ const char *descr = "";
+
+ /* Return -ENODATA if the id for the current input
+ or output is 0, meaning that it doesn't support this API. */
+ if (id == 0)
+ return -ENODATA;
+
+ /* Return norm array in a canonical way */
+ for (i = 0; i <= index && id; i++) {
+ /* last std value in the standards array is 0, so this
+ while always ends there since (id & 0) == 0. */
+ while ((id & standards[j].std) != standards[j].std)
+ j++;
+ curr_id = standards[j].std;
+ descr = standards[j].descr;
+ j++;
+ if (curr_id == 0)
+ break;
+ if (curr_id != V4L2_STD_PAL &&
+ curr_id != V4L2_STD_SECAM &&
+ curr_id != V4L2_STD_NTSC)
+ id &= ~curr_id;
+ }
+ if (i <= index)
+ return -EINVAL;
+
+ v4l2_video_std_construct(vs, curr_id, descr);
+ return 0;
+}
+
/* ----------------------------------------------------------------- */
/* some arrays for pretty-printing debug messages of enum types */
@@ -1147,6 +1184,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break;
case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break;
case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break;
+ case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break;
case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
@@ -1222,6 +1260,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break;
case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break;
case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break;
+ case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break;
+ case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break;
+ case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break;
case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break;
@@ -1274,6 +1316,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VP8: descr = "VP8"; break;
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
+ case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break;
case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break;
@@ -1753,36 +1796,8 @@ static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
{
struct video_device *vfd = video_devdata(file);
struct v4l2_standard *p = arg;
- v4l2_std_id id = vfd->tvnorms, curr_id = 0;
- unsigned int index = p->index, i, j = 0;
- const char *descr = "";
-
- /* Return -ENODATA if the tvnorms for the current input
- or output is 0, meaning that it doesn't support this API. */
- if (id == 0)
- return -ENODATA;
- /* Return norm array in a canonical way */
- for (i = 0; i <= index && id; i++) {
- /* last std value in the standards array is 0, so this
- while always ends there since (id & 0) == 0. */
- while ((id & standards[j].std) != standards[j].std)
- j++;
- curr_id = standards[j].std;
- descr = standards[j].descr;
- j++;
- if (curr_id == 0)
- break;
- if (curr_id != V4L2_STD_PAL &&
- curr_id != V4L2_STD_SECAM &&
- curr_id != V4L2_STD_NTSC)
- id &= ~curr_id;
- }
- if (i <= index)
- return -EINVAL;
-
- v4l2_video_std_construct(p, curr_id, descr);
- return 0;
+ return v4l_video_std_enumstd(p, vfd->tvnorms);
}
static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
@@ -2662,11 +2677,62 @@ static bool v4l2_is_known_ioctl(unsigned int cmd)
return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
}
+#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV)
+static bool v4l2_ioctl_m2m_queue_is_output(unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case VIDIOC_CREATE_BUFS: {
+ struct v4l2_create_buffers *cbufs = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(cbufs->format.type);
+ }
+ case VIDIOC_REQBUFS: {
+ struct v4l2_requestbuffers *rbufs = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(rbufs->type);
+ }
+ case VIDIOC_QBUF:
+ case VIDIOC_DQBUF:
+ case VIDIOC_QUERYBUF:
+ case VIDIOC_PREPARE_BUF: {
+ struct v4l2_buffer *buf = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(buf->type);
+ }
+ case VIDIOC_EXPBUF: {
+ struct v4l2_exportbuffer *expbuf = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(expbuf->type);
+ }
+ case VIDIOC_STREAMON:
+ case VIDIOC_STREAMOFF: {
+ int *type = arg;
+
+ return V4L2_TYPE_IS_OUTPUT(*type);
+ }
+ default:
+ return false;
+ }
+}
+#endif
+
static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev,
- unsigned int cmd)
+ struct v4l2_fh *vfh, unsigned int cmd,
+ void *arg)
{
if (_IOC_NR(cmd) >= V4L2_IOCTLS)
return vdev->lock;
+#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV)
+ if (vfh && vfh->m2m_ctx &&
+ (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) {
+ bool is_output = v4l2_ioctl_m2m_queue_is_output(cmd, arg);
+ struct v4l2_m2m_queue_ctx *ctx = is_output ?
+ &vfh->m2m_ctx->out_q_ctx : &vfh->m2m_ctx->cap_q_ctx;
+
+ if (ctx->q.lock)
+ return ctx->q.lock;
+ }
+#endif
if (vdev->queue && vdev->queue->lock &&
(v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE))
return vdev->queue->lock;
@@ -2733,7 +2799,7 @@ static long __video_do_ioctl(struct file *file,
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
vfh = file->private_data;
- lock = v4l2_ioctl_get_lock(vfd, cmd);
+ lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg);
if (lock && mutex_lock_interruptible(lock))
return -ERESTARTSYS;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index c4f963d96a79..ce9bd1b91210 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -17,9 +17,11 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <media/media-device.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
@@ -50,9 +52,38 @@ module_param(debug, bool, 0644);
* offsets but for different queues */
#define DST_QUEUE_OFF_BASE (1 << 30)
+enum v4l2_m2m_entity_type {
+ MEM2MEM_ENT_TYPE_SOURCE,
+ MEM2MEM_ENT_TYPE_SINK,
+ MEM2MEM_ENT_TYPE_PROC
+};
+
+static const char * const m2m_entity_name[] = {
+ "source",
+ "sink",
+ "proc"
+};
/**
* struct v4l2_m2m_dev - per-device context
+ * @source: &struct media_entity pointer with the source entity
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @source_pad: &struct media_pad with the source pad.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @sink: &struct media_entity pointer with the sink entity
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @sink_pad: &struct media_pad with the sink pad.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @proc: &struct media_entity pointer with the M2M device itself.
+ * @proc_pads: &struct media_pad with the @proc pads.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @intf_devnode: &struct media_intf devnode pointer with the interface
+ * with controls the M2M device.
* @curr_ctx: currently running instance
* @job_queue: instances queued to run
* @job_spinlock: protects job_queue
@@ -60,6 +91,15 @@ module_param(debug, bool, 0644);
*/
struct v4l2_m2m_dev {
struct v4l2_m2m_ctx *curr_ctx;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_entity *source;
+ struct media_pad source_pad;
+ struct media_entity sink;
+ struct media_pad sink_pad;
+ struct media_entity proc;
+ struct media_pad proc_pads[2];
+ struct media_intf_devnode *intf_devnode;
+#endif
struct list_head job_queue;
spinlock_t job_spinlock;
@@ -107,6 +147,24 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
+void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
+ }
+
+ b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return &b->vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
+
void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
@@ -209,15 +267,24 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
}
-void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+/*
+ * __v4l2_m2m_try_queue() - queue a job
+ * @m2m_dev: m2m device
+ * @m2m_ctx: m2m context
+ *
+ * Check if this context is ready to queue a job.
+ *
+ * This function can run in interrupt context.
+ */
+static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
{
- struct v4l2_m2m_dev *m2m_dev;
unsigned long flags_job, flags_out, flags_cap;
- m2m_dev = m2m_ctx->m2m_dev;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
if (!m2m_ctx->out_q_ctx.q.streaming
@@ -275,7 +342,25 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
m2m_ctx->job_flags |= TRANS_QUEUED;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+}
+/**
+ * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
+ * @m2m_ctx: m2m context
+ *
+ * Check if this context is ready to queue a job. If suitable,
+ * run the next queued job on the mem2mem device.
+ *
+ * This function shouldn't run in interrupt context.
+ *
+ * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
+ * and then run another job for another context.
+ */
+void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
+
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
v4l2_m2m_try_run(m2m_dev);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
@@ -300,7 +385,8 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
m2m_ctx->job_flags |= TRANS_ABORT;
if (m2m_ctx->job_flags & TRANS_RUNNING) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ if (m2m_dev->m2m_ops->job_abort)
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
wait_event(m2m_ctx->finished,
!(m2m_ctx->job_flags & TRANS_RUNNING));
@@ -339,7 +425,6 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
* allow more than one job on the job_queue per instance, each has
* to be scheduled separately after the previous one finishes. */
v4l2_m2m_try_schedule(m2m_ctx);
- v4l2_m2m_try_run(m2m_dev);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
@@ -595,12 +680,179 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL(v4l2_m2m_mmap);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
+{
+ media_remove_intf_links(&m2m_dev->intf_devnode->intf);
+ media_devnode_remove(m2m_dev->intf_devnode);
+
+ media_entity_remove_links(m2m_dev->source);
+ media_entity_remove_links(&m2m_dev->sink);
+ media_entity_remove_links(&m2m_dev->proc);
+ media_device_unregister_entity(m2m_dev->source);
+ media_device_unregister_entity(&m2m_dev->sink);
+ media_device_unregister_entity(&m2m_dev->proc);
+ kfree(m2m_dev->source->name);
+ kfree(m2m_dev->sink.name);
+ kfree(m2m_dev->proc.name);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
+
+static int v4l2_m2m_register_entity(struct media_device *mdev,
+ struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
+ struct video_device *vdev, int function)
+{
+ struct media_entity *entity;
+ struct media_pad *pads;
+ char *name;
+ unsigned int len;
+ int num_pads;
+ int ret;
+
+ switch (type) {
+ case MEM2MEM_ENT_TYPE_SOURCE:
+ entity = m2m_dev->source;
+ pads = &m2m_dev->source_pad;
+ pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ num_pads = 1;
+ break;
+ case MEM2MEM_ENT_TYPE_SINK:
+ entity = &m2m_dev->sink;
+ pads = &m2m_dev->sink_pad;
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ num_pads = 1;
+ break;
+ case MEM2MEM_ENT_TYPE_PROC:
+ entity = &m2m_dev->proc;
+ pads = m2m_dev->proc_pads;
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ num_pads = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
+ if (type != MEM2MEM_ENT_TYPE_PROC) {
+ entity->info.dev.major = VIDEO_MAJOR;
+ entity->info.dev.minor = vdev->minor;
+ }
+ len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
+ name = kmalloc(len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
+ entity->name = name;
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+ if (ret)
+ return ret;
+ ret = media_device_register_entity(mdev, entity);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function)
+{
+ struct media_device *mdev = vdev->v4l2_dev->mdev;
+ struct media_link *link;
+ int ret;
+
+ if (!mdev)
+ return 0;
+
+ /* A memory-to-memory device consists in two
+ * DMA engine and one video processing entities.
+ * The DMA engine entities are linked to a V4L interface
+ */
+
+ /* Create the three entities with their pads */
+ m2m_dev->source = &vdev->entity;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
+ if (ret)
+ return ret;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_PROC, vdev, function);
+ if (ret)
+ goto err_rel_entity0;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
+ if (ret)
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+ ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+
+ /* Create video interface */
+ m2m_dev->intf_devnode = media_devnode_create(mdev,
+ MEDIA_INTF_T_V4L_VIDEO, 0,
+ VIDEO_MAJOR, vdev->minor);
+ if (!m2m_dev->intf_devnode) {
+ ret = -ENOMEM;
+ goto err_rm_links1;
+ }
+
+ /* Connect the two DMA engines to the interface */
+ link = media_create_intf_link(m2m_dev->source,
+ &m2m_dev->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+
+ link = media_create_intf_link(&m2m_dev->sink,
+ &m2m_dev->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_intf_link;
+ }
+ return 0;
+
+err_rm_intf_link:
+ media_remove_intf_links(&m2m_dev->intf_devnode->intf);
+err_rm_devnode:
+ media_devnode_remove(m2m_dev->intf_devnode);
+err_rm_links1:
+ media_entity_remove_links(&m2m_dev->sink);
+err_rm_links0:
+ media_entity_remove_links(&m2m_dev->proc);
+ media_entity_remove_links(m2m_dev->source);
+err_rel_entity2:
+ media_device_unregister_entity(&m2m_dev->proc);
+ kfree(m2m_dev->proc.name);
+err_rel_entity1:
+ media_device_unregister_entity(&m2m_dev->sink);
+ kfree(m2m_dev->sink.name);
+err_rel_entity0:
+ media_device_unregister_entity(m2m_dev->source);
+ kfree(m2m_dev->source->name);
+ return ret;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
+#endif
+
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
- if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
- WARN_ON(!m2m_ops->job_abort))
+ if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
return ERR_PTR(-EINVAL);
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 6a7f7f75dfd7..2b63fa6b6fc9 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -494,6 +494,28 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_DV_TIMINGS:
return v4l2_subdev_call(sd, video, s_dv_timings, arg);
+
+ case VIDIOC_SUBDEV_G_STD:
+ return v4l2_subdev_call(sd, video, g_std, arg);
+
+ case VIDIOC_SUBDEV_S_STD: {
+ v4l2_std_id *std = arg;
+
+ return v4l2_subdev_call(sd, video, s_std, *std);
+ }
+
+ case VIDIOC_SUBDEV_ENUMSTD: {
+ struct v4l2_standard *p = arg;
+ v4l2_std_id id;
+
+ if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
+ return -EINVAL;
+
+ return v4l_video_std_enumstd(p, id);
+ }
+
+ case VIDIOC_SUBDEV_QUERYSTD:
+ return v4l2_subdev_call(sd, video, querystd, arg);
#endif
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 8d731d6c3e54..63389f075f1d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -116,12 +116,14 @@ config FSL_CORENET_CF
config FSL_IFC
bool
- depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A
+ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM
config JZ4780_NEMC
bool "Ingenic JZ4780 SoC NEMC driver"
default y
- depends on MACH_JZ4780
+ depends on MACH_JZ4780 || COMPILE_TEST
+ depends on HAS_IOMEM && OF
help
This driver is for the NAND/External Memory Controller (NEMC) in
the Ingenic JZ4780. This controller is used to handle external
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a625ac4e2872..e6b4ae558767 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -642,6 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
freereq = 0;
if (event != MPI_EVENT_EVENT_CHANGE)
break;
+ /* else: fall through */
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
@@ -1779,7 +1780,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
struct proc_dir_entry *dent;
#endif
- ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
+ ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_KERNEL);
if (ioc == NULL) {
printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
return -ENOMEM;
@@ -1886,6 +1887,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
ioc->errata_flag_1064 = 1;
+ /* fall through */
case MPI_MANUFACTPAGE_DEVICEID_FC909:
case MPI_MANUFACTPAGE_DEVICEID_FC929:
case MPI_MANUFACTPAGE_DEVICEID_FC919:
@@ -1930,6 +1932,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
+ /* fall through */
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 4470630dd545..8d22d6134a89 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2514,8 +2514,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
if (mpt_config(ioc, &cfg) == 0) {
ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf;
if (strlen(pdata->BoardTracerNumber) > 1) {
- strncpy(karg.serial_number, pdata->BoardTracerNumber, 24);
- karg.serial_number[24-1]='\0';
+ strlcpy(karg.serial_number,
+ pdata->BoardTracerNumber, 24);
}
}
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 06b175420be9..b15fdc626fb8 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1292,7 +1292,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* SCSI needs scsi_cmnd lookup table!
* (with size equal to req_depth*PtrSz!)
*/
- ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
+ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_KERNEL);
if (!ioc->ScsiLookup) {
error = -ENOMEM;
goto out_mptfc_probe;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 76a66da33996..b8cf2658649e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -4327,6 +4327,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
}
}
mpt_findImVolumes(ioc);
+ /* fall through */
case MPTSAS_ADD_DEVICE:
memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 6b16946f9b05..8fd5ec4d6042 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -67,10 +67,8 @@ static struct file *cxl_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
- struct qstr this;
- struct path path;
struct file *file;
- struct inode *inode = NULL;
+ struct inode *inode;
int rc;
/* strongly inspired by anon_inode_getfile() */
@@ -91,23 +89,11 @@ static struct file *cxl_getfile(const char *name,
goto err_fs;
}
- file = ERR_PTR(-ENOMEM);
- this.name = name;
- this.len = strlen(name);
- this.hash = 0;
- path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this);
- if (!path.dentry)
+ file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
+ flags & (O_ACCMODE | O_NONBLOCK), fops);
+ if (IS_ERR(file))
goto err_inode;
- path.mnt = mntget(cxl_vfs_mount);
- d_instantiate(path.dentry, inode);
-
- file = alloc_file(&path, OPEN_FMODE(flags), fops);
- if (IS_ERR(file)) {
- path_put(&path);
- goto err_fs;
- }
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
return file;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 7b370466a227..896e2df9400f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -35,38 +35,45 @@
#include <uapi/linux/pcitest.h>
-#define DRV_MODULE_NAME "pci-endpoint-test"
-
-#define PCI_ENDPOINT_TEST_MAGIC 0x0
-
-#define PCI_ENDPOINT_TEST_COMMAND 0x4
-#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
-#define COMMAND_RAISE_MSI_IRQ BIT(1)
-#define MSI_NUMBER_SHIFT 2
-/* 6 bits for MSI number */
-#define COMMAND_READ BIT(8)
-#define COMMAND_WRITE BIT(9)
-#define COMMAND_COPY BIT(10)
-
-#define PCI_ENDPOINT_TEST_STATUS 0x8
-#define STATUS_READ_SUCCESS BIT(0)
-#define STATUS_READ_FAIL BIT(1)
-#define STATUS_WRITE_SUCCESS BIT(2)
-#define STATUS_WRITE_FAIL BIT(3)
-#define STATUS_COPY_SUCCESS BIT(4)
-#define STATUS_COPY_FAIL BIT(5)
-#define STATUS_IRQ_RAISED BIT(6)
-#define STATUS_SRC_ADDR_INVALID BIT(7)
-#define STATUS_DST_ADDR_INVALID BIT(8)
-
-#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc
+#define DRV_MODULE_NAME "pci-endpoint-test"
+
+#define IRQ_TYPE_UNDEFINED -1
+#define IRQ_TYPE_LEGACY 0
+#define IRQ_TYPE_MSI 1
+#define IRQ_TYPE_MSIX 2
+
+#define PCI_ENDPOINT_TEST_MAGIC 0x0
+
+#define PCI_ENDPOINT_TEST_COMMAND 0x4
+#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_MSI_IRQ BIT(1)
+#define COMMAND_RAISE_MSIX_IRQ BIT(2)
+#define COMMAND_READ BIT(3)
+#define COMMAND_WRITE BIT(4)
+#define COMMAND_COPY BIT(5)
+
+#define PCI_ENDPOINT_TEST_STATUS 0x8
+#define STATUS_READ_SUCCESS BIT(0)
+#define STATUS_READ_FAIL BIT(1)
+#define STATUS_WRITE_SUCCESS BIT(2)
+#define STATUS_WRITE_FAIL BIT(3)
+#define STATUS_COPY_SUCCESS BIT(4)
+#define STATUS_COPY_FAIL BIT(5)
+#define STATUS_IRQ_RAISED BIT(6)
+#define STATUS_SRC_ADDR_INVALID BIT(7)
+#define STATUS_DST_ADDR_INVALID BIT(8)
+
+#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
-#define PCI_ENDPOINT_TEST_SIZE 0x1c
-#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
+#define PCI_ENDPOINT_TEST_SIZE 0x1c
+#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
+
+#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
+#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
static DEFINE_IDA(pci_endpoint_test_ida);
@@ -77,6 +84,10 @@ static bool no_msi;
module_param(no_msi, bool, 0444);
MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
+static int irq_type = IRQ_TYPE_MSI;
+module_param(irq_type, int, 0444);
+MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
+
enum pci_barno {
BAR_0,
BAR_1,
@@ -103,7 +114,7 @@ struct pci_endpoint_test {
struct pci_endpoint_test_data {
enum pci_barno test_reg_bar;
size_t alignment;
- bool no_msi;
+ int irq_type;
};
static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
@@ -147,6 +158,100 @@ static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
+{
+ struct pci_dev *pdev = test->pdev;
+
+ pci_free_irq_vectors(pdev);
+}
+
+static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
+ int type)
+{
+ int irq = -1;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ bool res = true;
+
+ switch (type) {
+ case IRQ_TYPE_LEGACY:
+ irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (irq < 0)
+ dev_err(dev, "Failed to get Legacy interrupt\n");
+ break;
+ case IRQ_TYPE_MSI:
+ irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
+ if (irq < 0)
+ dev_err(dev, "Failed to get MSI interrupts\n");
+ break;
+ case IRQ_TYPE_MSIX:
+ irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
+ if (irq < 0)
+ dev_err(dev, "Failed to get MSI-X interrupts\n");
+ break;
+ default:
+ dev_err(dev, "Invalid IRQ type selected\n");
+ }
+
+ if (irq < 0) {
+ irq = 0;
+ res = false;
+ }
+ test->num_irqs = irq;
+
+ return res;
+}
+
+static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
+{
+ int i;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+
+ for (i = 0; i < test->num_irqs; i++)
+ devm_free_irq(dev, pci_irq_vector(pdev, i), test);
+
+ test->num_irqs = 0;
+}
+
+static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
+{
+ int i;
+ int err;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+
+ for (i = 0; i < test->num_irqs; i++) {
+ err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+ pci_endpoint_test_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, test);
+ if (err)
+ goto fail;
+ }
+
+ return true;
+
+fail:
+ switch (irq_type) {
+ case IRQ_TYPE_LEGACY:
+ dev_err(dev, "Failed to request IRQ %d for Legacy\n",
+ pci_irq_vector(pdev, i));
+ break;
+ case IRQ_TYPE_MSI:
+ dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
+ pci_irq_vector(pdev, i),
+ i + 1);
+ break;
+ case IRQ_TYPE_MSIX:
+ dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
+ pci_irq_vector(pdev, i),
+ i + 1);
+ break;
+ }
+
+ return false;
+}
+
static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
@@ -179,6 +284,9 @@ static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
{
u32 val;
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
+ IRQ_TYPE_LEGACY);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
COMMAND_RAISE_LEGACY_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
@@ -190,14 +298,18 @@ static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
}
static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
- u8 msi_num)
+ u16 msi_num, bool msix)
{
u32 val;
struct pci_dev *pdev = test->pdev;
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
+ msix == false ? IRQ_TYPE_MSI :
+ IRQ_TYPE_MSIX);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- msi_num << MSI_NUMBER_SHIFT |
- COMMAND_RAISE_MSI_IRQ);
+ msix == false ? COMMAND_RAISE_MSI_IRQ :
+ COMMAND_RAISE_MSIX_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
@@ -230,6 +342,11 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
if (size > SIZE_MAX - alignment)
goto err;
+ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type option\n");
+ goto err;
+ }
+
orig_src_addr = dma_alloc_coherent(dev, size + alignment,
&orig_src_phys_addr, GFP_KERNEL);
if (!orig_src_addr) {
@@ -281,8 +398,10 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
+ COMMAND_COPY);
wait_for_completion(&test->irq_raised);
@@ -318,6 +437,11 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
if (size > SIZE_MAX - alignment)
goto err;
+ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type option\n");
+ goto err;
+ }
+
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@@ -348,8 +472,10 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
+ COMMAND_READ);
wait_for_completion(&test->irq_raised);
@@ -379,6 +505,11 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
if (size > SIZE_MAX - alignment)
goto err;
+ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type option\n");
+ goto err;
+ }
+
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@@ -403,8 +534,10 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
+ COMMAND_WRITE);
wait_for_completion(&test->irq_raised);
@@ -417,6 +550,38 @@ err:
return ret;
}
+static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
+ int req_irq_type)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+
+ if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type option\n");
+ return false;
+ }
+
+ if (irq_type == req_irq_type)
+ return true;
+
+ pci_endpoint_test_release_irq(test);
+ pci_endpoint_test_free_irq_vectors(test);
+
+ if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
+ goto err;
+
+ if (!pci_endpoint_test_request_irq(test))
+ goto err;
+
+ irq_type = req_irq_type;
+ return true;
+
+err:
+ pci_endpoint_test_free_irq_vectors(test);
+ irq_type = IRQ_TYPE_UNDEFINED;
+ return false;
+}
+
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -436,7 +601,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
ret = pci_endpoint_test_legacy_irq(test);
break;
case PCITEST_MSI:
- ret = pci_endpoint_test_msi_irq(test, arg);
+ case PCITEST_MSIX:
+ ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
break;
case PCITEST_WRITE:
ret = pci_endpoint_test_write(test, arg);
@@ -447,6 +613,12 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
case PCITEST_COPY:
ret = pci_endpoint_test_copy(test, arg);
break;
+ case PCITEST_SET_IRQTYPE:
+ ret = pci_endpoint_test_set_irq(test, arg);
+ break;
+ case PCITEST_GET_IRQTYPE:
+ ret = irq_type;
+ break;
}
ret:
@@ -462,9 +634,7 @@ static const struct file_operations pci_endpoint_test_fops = {
static int pci_endpoint_test_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int i;
int err;
- int irq = 0;
int id;
char name[20];
enum pci_barno bar;
@@ -486,11 +656,14 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->alignment = 0;
test->pdev = pdev;
+ if (no_msi)
+ irq_type = IRQ_TYPE_LEGACY;
+
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
test_reg_bar = data->test_reg_bar;
test->alignment = data->alignment;
- no_msi = data->no_msi;
+ irq_type = data->irq_type;
}
init_completion(&test->irq_raised);
@@ -510,28 +683,11 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!no_msi) {
- irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
- if (irq < 0)
- dev_err(dev, "Failed to get MSI interrupts\n");
- test->num_irqs = irq;
- }
-
- err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
- IRQF_SHARED, DRV_MODULE_NAME, test);
- if (err) {
- dev_err(dev, "Failed to request IRQ %d\n", pdev->irq);
- goto err_disable_msi;
- }
+ if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
+ goto err_disable_irq;
- for (i = 1; i < irq; i++) {
- err = devm_request_irq(dev, pci_irq_vector(pdev, i),
- pci_endpoint_test_irqhandler,
- IRQF_SHARED, DRV_MODULE_NAME, test);
- if (err)
- dev_err(dev, "failed to request IRQ %d for MSI %d\n",
- pci_irq_vector(pdev, i), i + 1);
- }
+ if (!pci_endpoint_test_request_irq(test))
+ goto err_disable_irq;
for (bar = BAR_0; bar <= BAR_5; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
@@ -590,12 +746,10 @@ err_iounmap:
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
+ pci_endpoint_test_release_irq(test);
- for (i = 0; i < irq; i++)
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
-
-err_disable_msi:
- pci_disable_msi(pdev);
+err_disable_irq:
+ pci_endpoint_test_free_irq_vectors(test);
pci_release_regions(pdev);
err_disable_pdev:
@@ -607,7 +761,6 @@ err_disable_pdev:
static void pci_endpoint_test_remove(struct pci_dev *pdev)
{
int id;
- int i;
enum pci_barno bar;
struct pci_endpoint_test *test = pci_get_drvdata(pdev);
struct miscdevice *misc_device = &test->miscdev;
@@ -624,9 +777,10 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
- for (i = 0; i < test->num_irqs; i++)
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
- pci_disable_msi(pdev);
+
+ pci_endpoint_test_release_irq(test);
+ pci_endpoint_test_free_irq_vectors(test);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c763b404510f..6c94474e36f4 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/dma/pxa-dma.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mmc/host.h>
@@ -637,10 +636,8 @@ static int pxamci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct pxamci_host *host = NULL;
- struct resource *r, *dmarx, *dmatx;
- struct pxad_param param_rx, param_tx;
+ struct resource *r;
int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
- dma_cap_mask_t mask;
ret = pxamci_of_init(pdev);
if (ret)
@@ -739,34 +736,14 @@ static int pxamci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mmc);
- if (!pdev->dev.of_node) {
- dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!dmarx || !dmatx) {
- ret = -ENXIO;
- goto out;
- }
- param_rx.prio = PXAD_PRIO_LOWEST;
- param_rx.drcmr = dmarx->start;
- param_tx.prio = PXAD_PRIO_LOWEST;
- param_tx.drcmr = dmatx->start;
- }
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->dma_chan_rx =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param_rx, &pdev->dev, "rx");
+ host->dma_chan_rx = dma_request_slave_channel(&pdev->dev, "rx");
if (host->dma_chan_rx == NULL) {
dev_err(&pdev->dev, "unable to request rx dma channel\n");
ret = -ENODEV;
goto out;
}
- host->dma_chan_tx =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param_tx, &pdev->dev, "tx");
+ host->dma_chan_tx = dma_request_slave_channel(&pdev->dev, "tx");
if (host->dma_chan_tx == NULL) {
dev_err(&pdev->dev, "unable to request tx dma channel\n");
ret = -ENODEV;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 46ab7feec6b6..c77f537323ec 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -24,7 +24,7 @@ config MTD_TESTS
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
- ---help---
+ help
RedBoot is a ROM monitor and bootloader which deals with multiple
'images' in flash devices by putting a table one of the erase
blocks on the device, similar to a partition table, which gives
@@ -45,7 +45,7 @@ if MTD_REDBOOT_PARTS
config MTD_REDBOOT_DIRECTORY_BLOCK
int "Location of RedBoot partition table"
default "-1"
- ---help---
+ help
This option is the Linux counterpart to the
CYGNUM_REDBOOT_FIS_DIRECTORY_BLOCK RedBoot compile time
option.
@@ -75,7 +75,7 @@ endif # MTD_REDBOOT_PARTS
config MTD_CMDLINE_PARTS
tristate "Command line partition table parsing"
depends on MTD
- ---help---
+ help
Allow generic configuration of the MTD partition tables via the kernel
command line. Multiple flash resources are supported for hardware where
different kinds of flash memory are available.
@@ -112,7 +112,7 @@ config MTD_CMDLINE_PARTS
config MTD_AFS_PARTS
tristate "ARM Firmware Suite partition parsing"
depends on (ARM || ARM64)
- ---help---
+ help
The ARM Firmware Suite allows the user to divide flash devices into
multiple 'images'. Each such image has a header containing its name
and offset/size etc.
@@ -136,7 +136,7 @@ config MTD_OF_PARTS
config MTD_AR7_PARTS
tristate "TI AR7 partitioning support"
- ---help---
+ help
TI AR7 partitioning support
config MTD_BCM63XX_PARTS
@@ -170,7 +170,7 @@ config MTD_BLOCK
tristate "Caching block device access to MTD devices"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
Although most flash chips have an erase size too large to be useful
as block devices, it is possible to use MTD devices which are based
on RAM chips in this manner. This block device is a user of MTD
@@ -205,7 +205,7 @@ config FTL
tristate "FTL (Flash Translation Layer) support"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
This provides support for the original Flash Translation Layer which
is part of the PCMCIA specification. It uses a kind of pseudo-
file system on a flash device to emulate a block device with
@@ -222,7 +222,7 @@ config NFTL
tristate "NFTL (NAND Flash Translation Layer) support"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
This provides support for the NAND Flash Translation Layer which is
used on M-Systems' DiskOnChip devices. It uses a kind of pseudo-
file system on a flash device to emulate a block device with
@@ -246,7 +246,7 @@ config INFTL
tristate "INFTL (Inverse NAND Flash Translation Layer) support"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
This provides support for the Inverse NAND Flash Translation
Layer which is used on M-Systems' newer DiskOnChip devices. It
uses a kind of pseudo-file system on a flash device to emulate
@@ -261,10 +261,10 @@ config INFTL
not use it.
config RFD_FTL
- tristate "Resident Flash Disk (Flash Translation Layer) support"
+ tristate "Resident Flash Disk (Flash Translation Layer) support"
depends on BLOCK
select MTD_BLKDEVS
- ---help---
+ help
This provides support for the flash translation layer known
as the Resident Flash Disk (RFD), as used by the Embedded BIOS
of General Software. There is a blurb at:
@@ -308,7 +308,7 @@ config MTD_SWAP
select MTD_BLKDEVS
help
Provides volatile block device driver on top of mtd partition
- suitable for swapping. The mapping of written blocks is not saved.
+ suitable for swapping. The mapping of written blocks is not saved.
The driver provides wear leveling by storing erase counter into the
OOB.
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index bbfa1f129266..39ec32a29051 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -44,7 +44,7 @@ choice
prompt "Flash cmd/query data swapping"
depends on MTD_CFI_ADV_OPTIONS
default MTD_CFI_NOSWAP
- ---help---
+ help
This option defines the way in which the CPU attempts to arrange
data bits when writing the 'magic' commands to the chips. Saying
'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 1b64ac8c5bc8..72428b6bfc47 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1216,7 +1216,6 @@ static inline int do_read_secsi_onechip(struct map_info *map,
size_t grouplen)
{
DECLARE_WAITQUEUE(wait, current);
- unsigned long timeo = jiffies + HZ;
retry:
mutex_lock(&chip->mutex);
@@ -1229,7 +1228,6 @@ static inline int do_read_secsi_onechip(struct map_info *map,
schedule();
remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + HZ;
goto retry;
}
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index b57ceea21513..837b04ab96a9 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -202,16 +202,19 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
struct cfi_private *cfi = map->fldrv_priv;
__u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
#ifdef CONFIG_MODULES
- char probename[sizeof(VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X))];
cfi_cmdset_fn_t *probe_function;
+ char *probename;
- sprintf(probename, VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X), type);
+ probename = kasprintf(GFP_KERNEL, "cfi_cmdset_%4.4X", type);
+ if (!probename)
+ return NULL;
probe_function = __symbol_get(probename);
if (!probe_function) {
request_module("cfi_cmdset_%4.4X", type);
probe_function = __symbol_get(probename);
}
+ kfree(probename);
if (probe_function) {
struct mtd_info *mtd;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 57b02c4b3f63..e514d57a0419 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -5,7 +5,7 @@ menu "Self-contained MTD device drivers"
config MTD_PMC551
tristate "Ramix PMC551 PCI Mezzanine RAM card support"
depends on PCI
- ---help---
+ help
This provides a MTD device driver for the Ramix PMC551 RAM PCI card
from Ramix Inc. <http://www.ramix.com/products/memory/pmc551.html>.
These devices come in memory configurations from 32M - 1G. If you
@@ -209,7 +209,7 @@ config MTD_DOCG3
select BCH
select BCH_CONST_PARAMS
select BITREVERSE
- ---help---
+ help
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index e84563d2067f..cbfafc453274 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -28,11 +28,9 @@
#include <linux/spi/flash.h>
#include <linux/mtd/spi-nor.h>
-#define MAX_CMD_SIZE 6
struct m25p {
struct spi_mem *spimem;
struct spi_nor spi_nor;
- u8 command[MAX_CMD_SIZE];
};
static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
@@ -70,7 +68,7 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
- SPI_MEM_OP_DUMMY(0, 1),
+ SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(len, buf, 1));
size_t remaining = len;
int ret;
@@ -78,7 +76,6 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
/* get transfer protocols. */
op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
- op.dummy.buswidth = op.addr.buswidth;
op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
@@ -202,6 +199,9 @@ static int m25p_probe(struct spi_mem *spimem)
if (data && data->name)
nor->mtd.name = data->name;
+ if (!nor->mtd.name)
+ nor->mtd.name = spi_mem_get_name(spimem);
+
/* For some (historical?) reason many platforms provide two different
* names in flash_platform_data: "name" and "type". Quite often name is
* set to "m25p80" and then "type" provides a real chip name.
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index c1312b141ae0..33593122e49b 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -223,6 +223,7 @@ static int powernv_flash_set_driver_info(struct device *dev,
mtd->_read = powernv_flash_read;
mtd->_write = powernv_flash_write;
mtd->dev.parent = dev;
+ mtd_set_of_node(mtd, dev->of_node);
return 0;
}
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 1897f33fe3e7..10d24efb4629 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -394,9 +394,8 @@ static int sst25l_probe(struct spi_device *spi)
flash->mtd.numeraseregions);
- ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
- data ? data->parts : NULL,
- data ? data->nr_parts : 0);
+ ret = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
if (ret)
return -ENODEV;
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
index 3a19cbee24d7..a5a332fbd593 100644
--- a/drivers/mtd/lpddr/Kconfig
+++ b/drivers/mtd/lpddr/Kconfig
@@ -13,10 +13,10 @@ config MTD_QINFO_PROBE
depends on MTD_LPDDR
tristate "Detect flash chips by QINFO probe"
help
- Device Information for LPDDR chips is offered through the Overlay
- Window QINFO interface, permits software to be used for entire
- families of devices. This serves similar purpose of CFI on legacy
- Flash products
+ Device Information for LPDDR chips is offered through the Overlay
+ Window QINFO interface, permits software to be used for entire
+ families of devices. This serves similar purpose of CFI on legacy
+ Flash products
config MTD_LPDDR2_NVM
# ARM dependency is only for writel_relaxed()
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index 5d73db2a496d..c950c880ad59 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -476,7 +476,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
return -EINVAL;
}
/* Parse partitions and register the MTD device */
- return mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ return mtd_device_register(mtd, NULL, 0);
}
/*
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index bdc1283f30fb..afb36bff13a7 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -207,13 +207,13 @@ config MTD_ICHXROM
BE VERY CAREFUL.
config MTD_ESB2ROM
- tristate "BIOS flash chip on Intel ESB Controller Hub 2"
- depends on X86 && MTD_JEDECPROBE && PCI
- help
- Support for treating the BIOS flash chip on ESB2 motherboards
- as an MTD device - with this you can reprogram your BIOS.
+ tristate "BIOS flash chip on Intel ESB Controller Hub 2"
+ depends on X86 && MTD_JEDECPROBE && PCI
+ help
+ Support for treating the BIOS flash chip on ESB2 motherboards
+ as an MTD device - with this you can reprogram your BIOS.
- BE VERY CAREFUL.
+ BE VERY CAREFUL.
config MTD_CK804XROM
tristate "BIOS flash chip on Nvidia CK804"
@@ -401,12 +401,12 @@ config MTD_PISMO
When built as a module, it will be called pismo.ko
config MTD_LATCH_ADDR
- tristate "Latch-assisted Flash Chip Support"
- depends on MTD_COMPLEX_MAPPINGS
- help
- Map driver which allows flashes to be partially physically addressed
- and have the upper address lines set by a board specific code.
+ tristate "Latch-assisted Flash Chip Support"
+ depends on MTD_COMPLEX_MAPPINGS
+ help
+ Map driver which allows flashes to be partially physically addressed
+ and have the upper address lines set by a board specific code.
- If compiled as a module, it will be called latch-addr-flash.
+ If compiled as a module, it will be called latch-addr-flash.
endmenu
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 385305e66fd1..9d9723693217 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -239,6 +239,9 @@ static int gpio_flash_probe(struct platform_device *pdev)
state->map.bankwidth = pdata->width;
state->map.size = state->win_size * (1 << state->gpio_count);
state->map.virt = ioremap_nocache(memory->start, state->map.size);
+ if (!state->map.virt)
+ return -ENOMEM;
+
state->map.phys = NO_XIP;
state->map.map_priv_1 = (unsigned long)state;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index a0b8fa7849a9..815e2db87955 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -88,9 +88,8 @@ static int __init init_impa7(void)
if (impa7_mtd[i]) {
impa7_mtd[i]->owner = THIS_MODULE;
devicesfound++;
- mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
- partitions,
- ARRAY_SIZE(partitions));
+ mtd_device_register(impa7_mtd[i], partitions,
+ ARRAY_SIZE(partitions));
} else {
iounmap((void __iomem *)impa7_map[i].virt);
}
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index dd5d6855f543..69503aef981e 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -71,7 +71,7 @@ static int vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
- return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
+ return mtd_device_register(p->info, NULL, 0);
}
static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 6dc97aa667dc..51db24b7f88d 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -197,9 +197,8 @@ static int latch_addr_flash_probe(struct platform_device *dev)
}
info->mtd->dev.parent = &dev->dev;
- mtd_device_parse_register(info->mtd, NULL, NULL,
- latch_addr_data->parts,
- latch_addr_data->nr_parts);
+ mtd_device_register(info->mtd, latch_addr_data->parts,
+ latch_addr_data->nr_parts);
return 0;
iounmap:
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 3a06ecfc55ff..80a187167c92 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -97,8 +97,7 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
goto err_out;
}
info->mtd->dev.parent = &dev->dev;
- err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
- pdata->nr_parts);
+ err = mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
if (err)
goto err_out;
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index bb580bc16445..c07f21b20463 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -59,9 +59,9 @@ static int __init init_soleng_maps(void)
return -ENXIO;
}
}
- printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n",
- soleng_flash_map.phys & 0x1fffffff,
- soleng_eprom_map.phys & 0x1fffffff);
+ printk(KERN_NOTICE "Solution Engine: Flash at 0x%pap, EPROM at 0x%pap\n",
+ &soleng_flash_map.phys,
+ &soleng_eprom_map.phys);
flash_mtd->owner = THIS_MODULE;
eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index cd67c85cc87d..02389528f622 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -160,8 +160,12 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
pr_debug("MTD_read\n");
- if (*ppos + count > mtd->size)
- count = mtd->size - *ppos;
+ if (*ppos + count > mtd->size) {
+ if (*ppos < mtd->size)
+ count = mtd->size - *ppos;
+ else
+ count = 0;
+ }
if (!count)
return 0;
@@ -246,7 +250,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
pr_debug("MTD_write\n");
- if (*ppos == mtd->size)
+ if (*ppos >= mtd->size)
return -ENOSPC;
if (*ppos + count > mtd->size)
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 42395df06be9..97ac219c082e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1155,21 +1155,29 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
int ret_code;
ops->retlen = ops->oobretlen = 0;
- if (!mtd->_read_oob)
- return -EOPNOTSUPP;
ret_code = mtd_check_oob_ops(mtd, from, ops);
if (ret_code)
return ret_code;
ledtrig_mtd_activity();
+
+ /* Check the validity of a potential fallback on mtd->_read */
+ if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
+ return -EOPNOTSUPP;
+
+ if (mtd->_read_oob)
+ ret_code = mtd->_read_oob(mtd, from, ops);
+ else
+ ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
+ ops->datbuf);
+
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
* similar to mtd->_read(), returning a non-negative integer
* representing max bitflips. In other cases, mtd->_read_oob() may
* return -EUCLEAN. In all cases, perform similar logic to mtd_read().
*/
- ret_code = mtd->_read_oob(mtd, from, ops);
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@@ -1184,8 +1192,7 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
int ret;
ops->retlen = ops->oobretlen = 0;
- if (!mtd->_write_oob)
- return -EOPNOTSUPP;
+
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
@@ -1194,7 +1201,16 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
return ret;
ledtrig_mtd_activity();
- return mtd->_write_oob(mtd, to, ops);
+
+ /* Check the validity of a potential fallback on mtd->_write */
+ if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
+ return -EOPNOTSUPP;
+
+ if (mtd->_write_oob)
+ return mtd->_write_oob(mtd, to, ops);
+ else
+ return mtd->_write(mtd, to, ops->len, &ops->retlen,
+ ops->datbuf);
}
EXPORT_SYMBOL_GPL(mtd_write_oob);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index f8d3a015cdad..52e2cb35fc79 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -322,22 +322,6 @@ static inline void free_partition(struct mtd_part *p)
kfree(p);
}
-/**
- * mtd_parse_part - parse MTD partition looking for subpartitions
- *
- * @slave: part that is supposed to be a container and should be parsed
- * @types: NULL-terminated array with names of partition parsers to try
- *
- * Some partitions are kind of containers with extra subpartitions (volumes).
- * There can be various formats of such containers. This function tries to use
- * specified parsers to analyze given partition and registers found
- * subpartitions on success.
- */
-static int mtd_parse_part(struct mtd_part *slave, const char *const *types)
-{
- return parse_mtd_partitions(&slave->mtd, types, NULL);
-}
-
static struct mtd_part *allocate_partition(struct mtd_info *parent,
const struct mtd_partition *part, int partno,
uint64_t cur_offset)
@@ -735,8 +719,8 @@ int add_mtd_partitions(struct mtd_info *master,
add_mtd_device(&slave->mtd);
mtd_add_partition_attrs(slave);
- if (parts[i].types)
- mtd_parse_part(slave, parts[i].types);
+ /* Look for subpartitions */
+ parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
cur_offset = slave->offset + slave->mtd.size;
}
@@ -812,6 +796,12 @@ static const char * const default_mtd_part_types[] = {
NULL
};
+/* Check DT only when looking for subpartitions. */
+static const char * const default_subpartition_types[] = {
+ "ofpart",
+ NULL
+};
+
static int mtd_part_do_parse(struct mtd_part_parser *parser,
struct mtd_info *master,
struct mtd_partitions *pparts,
@@ -882,7 +872,9 @@ static int mtd_part_of_parse(struct mtd_info *master,
const char *fixed = "fixed-partitions";
int ret, err = 0;
- np = of_get_child_by_name(mtd_get_of_node(master), "partitions");
+ np = mtd_get_of_node(master);
+ if (!mtd_is_partition(master))
+ np = of_get_child_by_name(np, "partitions");
of_property_for_each_string(np, "compatible", prop, compat) {
parser = mtd_part_get_compatible_parser(compat);
if (!parser)
@@ -945,7 +937,8 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
int ret, err = 0;
if (!types)
- types = default_mtd_part_types;
+ types = mtd_is_partition(master) ? default_subpartition_types :
+ default_mtd_part_types;
for ( ; *types; types++) {
/*
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 88c7d3b4ff8b..9033215e62ea 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -4,3 +4,4 @@ config MTD_NAND_CORE
source "drivers/mtd/nand/onenand/Kconfig"
source "drivers/mtd/nand/raw/Kconfig"
+source "drivers/mtd/nand/spi/Kconfig"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 3f0cb87f1a57..7ecd80c0a66e 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-y += onenand/
obj-y += raw/
+obj-y += spi/
diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c
index d5ccaf943b91..acad17ec6581 100644
--- a/drivers/mtd/nand/onenand/generic.c
+++ b/drivers/mtd/nand/onenand/generic.c
@@ -66,9 +66,8 @@ static int generic_onenand_probe(struct platform_device *pdev)
goto out_iounmap;
}
- err = mtd_device_parse_register(&info->mtd, NULL, NULL,
- pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ err = mtd_device_register(&info->mtd, pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
platform_set_drvdata(pdev, info);
diff --git a/drivers/mtd/nand/onenand/samsung.c b/drivers/mtd/nand/onenand/samsung.c
index 4cce4c0311ca..e64d0fdf7eb5 100644
--- a/drivers/mtd/nand/onenand/samsung.c
+++ b/drivers/mtd/nand/onenand/samsung.c
@@ -933,9 +933,8 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
- err = mtd_device_parse_register(mtd, NULL, NULL,
- pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ err = mtd_device_register(mtd, pdata ? pdata->parts : NULL,
+ pdata ? pdata->nr_parts : 0);
if (err) {
dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
onenand_release(mtd);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 6871ff0fd300..5fc9a1bde4ac 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -44,12 +44,12 @@ config MTD_NAND_DENALI
tristate
config MTD_NAND_DENALI_PCI
- tristate "Support Denali NAND controller on Intel Moorestown"
+ tristate "Support Denali NAND controller on Intel Moorestown"
select MTD_NAND_DENALI
depends on PCI
- help
- Enable the driver for NAND flash on Intel Moorestown, using the
- Denali NAND controller core.
+ help
+ Enable the driver for NAND flash on Intel Moorestown, using the
+ Denali NAND controller core.
config MTD_NAND_DENALI_DT
tristate "Support Denali NAND controller as a DT device"
@@ -77,9 +77,10 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "NAND Flash device on OMAP2, OMAP3, OMAP4 and Keystone"
- depends on (ARCH_OMAP2PLUS || ARCH_KEYSTONE)
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on HAS_IOMEM
help
- Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
+ Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
config MTD_NAND_OMAP_BCH
@@ -137,7 +138,7 @@ config MTD_NAND_NDFC
depends on 4xx
select MTD_NAND_ECC_SMC
help
- NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+ NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
config MTD_NAND_S3C2410_CLKSTOP
bool "Samsung S3C NAND IDLE clock stop"
@@ -152,6 +153,7 @@ config MTD_NAND_S3C2410_CLKSTOP
config MTD_NAND_TANGO
tristate "NAND Flash support for Tango chips"
depends on ARCH_TANGO || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables the NAND Flash controller on Tango chips.
@@ -168,40 +170,40 @@ config MTD_NAND_DISKONCHIP
these devices.
config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- bool "Advanced detection options for DiskOnChip"
- depends on MTD_NAND_DISKONCHIP
- help
- This option allows you to specify nonstandard address at which to
- probe for a DiskOnChip, or to change the detection options. You
- are unlikely to need any of this unless you are using LinuxBIOS.
- Say 'N'.
+ bool "Advanced detection options for DiskOnChip"
+ depends on MTD_NAND_DISKONCHIP
+ help
+ This option allows you to specify nonstandard address at which to
+ probe for a DiskOnChip, or to change the detection options. You
+ are unlikely to need any of this unless you are using LinuxBIOS.
+ Say 'N'.
config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
- hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- depends on MTD_NAND_DISKONCHIP
- default "0"
- ---help---
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option allows you to specify a single address at which to probe
- for the device, which is useful if you have other devices in that
- range which get upset when they are probed.
-
- (Note that on PowerPC, the normal probe will only check at
- 0xE4000000.)
-
- Normally, you should leave this set to zero, to allow the probe at
- the normal addresses.
+ hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ depends on MTD_NAND_DISKONCHIP
+ default "0"
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option allows you to specify a single address at which to probe
+ for the device, which is useful if you have other devices in that
+ range which get upset when they are probed.
+
+ (Note that on PowerPC, the normal probe will only check at
+ 0xE4000000.)
+
+ Normally, you should leave this set to zero, to allow the probe at
+ the normal addresses.
config MTD_NAND_DISKONCHIP_PROBE_HIGH
- bool "Probe high addresses"
- depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option changes to make it probe between 0xFFFC8000 and
- 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
- useful to you. Say 'N'.
+ bool "Probe high addresses"
+ depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ help
+ By default, the probe for DiskOnChip devices will look for a
+ DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+ This option changes to make it probe between 0xFFFC8000 and
+ 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
+ useful to you. Say 'N'.
config MTD_NAND_DISKONCHIP_BBTWRITE
bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
@@ -247,7 +249,8 @@ config MTD_NAND_DOCG4
config MTD_NAND_SHARPSL
tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
- depends on ARCH_PXA
+ depends on ARCH_PXA || COMPILE_TEST
+ depends on HAS_IOMEM
config MTD_NAND_CAFE
tristate "NAND support for OLPC CAFÉ chip"
@@ -274,7 +277,9 @@ config MTD_NAND_CS553X
config MTD_NAND_ATMEL
tristate "Support for NAND Flash / SmartMedia on AT91"
- depends on ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select GENERIC_ALLOCATOR
select MFD_ATMEL_SMC
help
Enables support for NAND Flash / Smart Media Card interface
@@ -294,7 +299,8 @@ config MTD_NAND_MARVELL
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
chips) NAND controller. This is the default for the PHYTEC 3250
@@ -305,7 +311,8 @@ config MTD_NAND_SLC_LPC32XX
config MTD_NAND_MLC_LPC32XX
tristate "NXP LPC32xx MLC Controller"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
controller. This is the default for the WORK92105 controller
@@ -339,17 +346,18 @@ config MTD_NAND_NANDSIM
MTD nand layer.
config MTD_NAND_GPMI_NAND
- tristate "GPMI NAND Flash Controller driver"
- depends on MTD_NAND && MXS_DMA
- help
- Enables NAND Flash support for IMX23, IMX28 or IMX6.
- The GPMI controller is very powerful, with the help of BCH
- module, it can do the hardware ECC. The GPMI supports several
- NAND flashs at the same time.
+ tristate "GPMI NAND Flash Controller driver"
+ depends on MXS_DMA
+ help
+ Enables NAND Flash support for IMX23, IMX28 or IMX6.
+ The GPMI controller is very powerful, with the help of BCH
+ module, it can do the hardware ECC. The GPMI supports several
+ NAND flashs at the same time.
config MTD_NAND_BRCMNAND
tristate "Broadcom STB NAND controller"
- depends on ARM || ARM64 || MIPS
+ depends on ARM || ARM64 || MIPS || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables the Broadcom NAND controller driver. The controller was
originally designed for Set-Top Box but is used on various BCM7xxx,
@@ -358,6 +366,7 @@ config MTD_NAND_BRCMNAND
config MTD_NAND_BCM47XXNFLASH
tristate "Support for NAND flash on BCM4706 BCMA bus"
depends on BCMA_NFLASH
+ depends on BCMA
help
BCMA bus can have various flash memories attached, they are
registered by bcma as platform devices. This enables driver for
@@ -399,7 +408,8 @@ config MTD_NAND_FSL_ELBC
config MTD_NAND_FSL_IFC
tristate "NAND support for Freescale IFC controller"
- depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A
+ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM
select FSL_IFC
select MEMORY
help
@@ -437,7 +447,8 @@ config MTD_NAND_VF610_NFC
config MTD_NAND_MXC
tristate "MXC NAND support"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the driver for the NAND flash controller on the
MXC processors.
@@ -451,15 +462,17 @@ config MTD_NAND_SH_FLCTL
for NAND Flash using FLCTL.
config MTD_NAND_DAVINCI
- tristate "Support NAND on DaVinci/Keystone SoC"
- depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)
- help
+ tristate "Support NAND on DaVinci/Keystone SoC"
+ depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
Enable the driver for NAND flash chips on Texas Instruments
DaVinci/Keystone processors.
config MTD_NAND_TXX9NDFMC
tristate "NAND Flash support for TXx9 SoC"
- depends on SOC_TX4938 || SOC_TX4939
+ depends on SOC_TX4938 || SOC_TX4939 || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the NAND flash controller on the TXx9 SoCs.
@@ -471,28 +484,31 @@ config MTD_NAND_SOCRATES
config MTD_NAND_NUC900
tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
- depends on ARCH_W90X900
+ depends on ARCH_W90X900 || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the driver for the NAND Flash on evaluation board based
on w90p910 / NUC9xx.
config MTD_NAND_JZ4740
tristate "Support for JZ4740 SoC NAND controller"
- depends on MACH_JZ4740
+ depends on MACH_JZ4740 || COMPILE_TEST
+ depends on HAS_IOMEM
help
- Enables support for NAND Flash on JZ4740 SoC based boards.
+ Enables support for NAND Flash on JZ4740 SoC based boards.
config MTD_NAND_JZ4780
tristate "Support for NAND on JZ4780 SoC"
- depends on MACH_JZ4780 && JZ4780_NEMC
+ depends on JZ4780_NEMC
help
Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
based boards, using the BCH controller for hardware error correction.
config MTD_NAND_FSMC
tristate "Support for NAND on ST Micros FSMC"
- depends on OF
- depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
+ depends on OF && HAS_IOMEM
+ depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300 || \
+ COMPILE_TEST
help
Enables support for NAND Flash chips on the ST Microelectronics
Flexible Static Memory Controller (FSMC)
@@ -506,19 +522,22 @@ config MTD_NAND_XWAY
config MTD_NAND_SUNXI
tristate "Support for NAND on Allwinner SoCs"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NAND Flash chips on Allwinner SoCs.
config MTD_NAND_HISI504
tristate "Support for NAND controller on Hisilicon SoC Hip04"
depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NAND controller on Hisilicon SoC Hip04.
config MTD_NAND_QCOM
tristate "Support for NAND on QCOM SoCs"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NAND flash chips on SoCs containing the EBI2 NAND
controller. This controller is found on IPQ806x SoC.
@@ -526,8 +545,20 @@ config MTD_NAND_QCOM
config MTD_NAND_MTK
tristate "Support for NAND controller on MTK SoCs"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enables support for NAND controller on MTK SoCs.
This controller is found on mt27xx, mt81xx, mt65xx SoCs.
+config MTD_NAND_TEGRA
+ tristate "Support for NAND controller on NVIDIA Tegra"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables support for NAND flash controller on NVIDIA Tegra SoC.
+ The driver has been developed and tested on a Tegra 2 SoC. DMA
+ support, raw read/write page as well as HW ECC read/write page
+ is supported. Extra OOB bytes when using HW ECC are currently
+ not supported.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 165b7ef9e9a1..d5a5f9832b88 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
+obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_amd.o
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 12f6753d47ae..a068b214ebaa 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -52,7 +52,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/genalloc.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
@@ -129,6 +128,11 @@
#define DEFAULT_TIMEOUT_MS 1000
#define MIN_DMA_LEN 128
+static bool atmel_nand_avoid_dma __read_mostly;
+
+MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
+module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
+
enum atmel_nand_rb_type {
ATMEL_NAND_NO_RB,
ATMEL_NAND_NATIVE_RB,
@@ -197,7 +201,7 @@ struct atmel_nand_controller_ops {
int (*remove)(struct atmel_nand_controller *nc);
void (*nand_init)(struct atmel_nand_controller *nc,
struct atmel_nand *nand);
- int (*ecc_init)(struct atmel_nand *nand);
+ int (*ecc_init)(struct nand_chip *chip);
int (*setup_data_interface)(struct atmel_nand *nand, int csline,
const struct nand_data_interface *conf);
};
@@ -211,7 +215,7 @@ struct atmel_nand_controller_caps {
};
struct atmel_nand_controller {
- struct nand_hw_control base;
+ struct nand_controller base;
const struct atmel_nand_controller_caps *caps;
struct device *dev;
struct regmap *smc;
@@ -222,7 +226,7 @@ struct atmel_nand_controller {
};
static inline struct atmel_nand_controller *
-to_nand_controller(struct nand_hw_control *ctl)
+to_nand_controller(struct nand_controller *ctl)
{
return container_of(ctl, struct atmel_nand_controller, base);
}
@@ -234,7 +238,7 @@ struct atmel_smc_nand_controller {
};
static inline struct atmel_smc_nand_controller *
-to_smc_nand_controller(struct nand_hw_control *ctl)
+to_smc_nand_controller(struct nand_controller *ctl)
{
return container_of(to_nand_controller(ctl),
struct atmel_smc_nand_controller, base);
@@ -258,7 +262,7 @@ struct atmel_hsmc_nand_controller {
};
static inline struct atmel_hsmc_nand_controller *
-to_hsmc_nand_controller(struct nand_hw_control *ctl)
+to_hsmc_nand_controller(struct nand_controller *ctl)
{
return container_of(to_nand_controller(ctl),
struct atmel_hsmc_nand_controller, base);
@@ -1128,9 +1132,8 @@ static int atmel_nand_pmecc_init(struct nand_chip *chip)
return 0;
}
-static int atmel_nand_ecc_init(struct atmel_nand *nand)
+static int atmel_nand_ecc_init(struct nand_chip *chip)
{
- struct nand_chip *chip = &nand->base;
struct atmel_nand_controller *nc;
int ret;
@@ -1165,12 +1168,11 @@ static int atmel_nand_ecc_init(struct atmel_nand *nand)
return 0;
}
-static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
+static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
{
- struct nand_chip *chip = &nand->base;
int ret;
- ret = atmel_nand_ecc_init(nand);
+ ret = atmel_nand_ecc_init(chip);
if (ret)
return ret;
@@ -1553,23 +1555,7 @@ static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
chip->select_chip = atmel_hsmc_nand_select_chip;
}
-static int atmel_nand_detect(struct atmel_nand *nand)
-{
- struct nand_chip *chip = &nand->base;
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct atmel_nand_controller *nc;
- int ret;
-
- nc = to_nand_controller(chip->controller);
-
- ret = nand_scan_ident(mtd, nand->numcs, NULL);
- if (ret)
- dev_err(nc->dev, "nand_scan_ident() failed: %d\n", ret);
-
- return ret;
-}
-
-static int atmel_nand_unregister(struct atmel_nand *nand)
+static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
{
struct nand_chip *chip = &nand->base;
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -1585,60 +1571,6 @@ static int atmel_nand_unregister(struct atmel_nand *nand)
return 0;
}
-static int atmel_nand_register(struct atmel_nand *nand)
-{
- struct nand_chip *chip = &nand->base;
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct atmel_nand_controller *nc;
- int ret;
-
- nc = to_nand_controller(chip->controller);
-
- if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
- /*
- * We keep the MTD name unchanged to avoid breaking platforms
- * where the MTD cmdline parser is used and the bootloader
- * has not been updated to use the new naming scheme.
- */
- mtd->name = "atmel_nand";
- } else if (!mtd->name) {
- /*
- * If the new bindings are used and the bootloader has not been
- * updated to pass a new mtdparts parameter on the cmdline, you
- * should define the following property in your nand node:
- *
- * label = "atmel_nand";
- *
- * This way, mtd->name will be set by the core when
- * nand_set_flash_node() is called.
- */
- mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
- "%s:nand.%d", dev_name(nc->dev),
- nand->cs[0].id);
- if (!mtd->name) {
- dev_err(nc->dev, "Failed to allocate mtd->name\n");
- return -ENOMEM;
- }
- }
-
- ret = nand_scan_tail(mtd);
- if (ret) {
- dev_err(nc->dev, "nand_scan_tail() failed: %d\n", ret);
- return ret;
- }
-
- ret = mtd_device_register(mtd, NULL, 0);
- if (ret) {
- dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
- nand_cleanup(chip);
- return ret;
- }
-
- list_add_tail(&nand->node, &nc->chips);
-
- return 0;
-}
-
static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
struct device_node *np,
int reg_cells)
@@ -1750,6 +1682,8 @@ static int
atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
struct atmel_nand *nand)
{
+ struct nand_chip *chip = &nand->base;
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
/* No card inserted, skip this NAND. */
@@ -1760,15 +1694,22 @@ atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
nc->caps->ops->nand_init(nc, nand);
- ret = atmel_nand_detect(nand);
- if (ret)
+ ret = nand_scan(mtd, nand->numcs);
+ if (ret) {
+ dev_err(nc->dev, "NAND scan failed: %d\n", ret);
return ret;
+ }
- ret = nc->caps->ops->ecc_init(nand);
- if (ret)
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
return ret;
+ }
+
+ list_add_tail(&nand->node, &nc->chips);
- return atmel_nand_register(nand);
+ return 0;
}
static int
@@ -1778,7 +1719,7 @@ atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
int ret;
list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
- ret = atmel_nand_unregister(nand);
+ ret = atmel_nand_controller_remove_nand(nand);
if (ret)
return ret;
}
@@ -1953,6 +1894,51 @@ static const struct of_device_id atmel_matrix_of_ids[] = {
{ /* sentinel */ },
};
+static int atmel_nand_attach_chip(struct nand_chip *chip)
+{
+ struct atmel_nand_controller *nc = to_nand_controller(chip->controller);
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nc->caps->ops->ecc_init(chip);
+ if (ret)
+ return ret;
+
+ if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "atmel_nand";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your nand node:
+ *
+ * label = "atmel_nand";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nc->dev),
+ nand->cs[0].id);
+ if (!mtd->name) {
+ dev_err(nc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops atmel_nand_controller_ops = {
+ .attach_chip = atmel_nand_attach_chip,
+};
+
static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
struct platform_device *pdev,
const struct atmel_nand_controller_caps *caps)
@@ -1961,7 +1947,8 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
struct device_node *np = dev->of_node;
int ret;
- nand_hw_control_init(&nc->base);
+ nand_controller_init(&nc->base);
+ nc->base.ops = &atmel_nand_controller_ops;
INIT_LIST_HEAD(&nc->chips);
nc->dev = dev;
nc->caps = caps;
@@ -1977,7 +1964,7 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
return ret;
}
- if (nc->caps->has_dma) {
+ if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -2045,7 +2032,7 @@ atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
return ret;
}
- nc->ebi_csa_offs = (unsigned int)match->data;
+ nc->ebi_csa_offs = (uintptr_t)match->data;
/*
* The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
@@ -2214,9 +2201,9 @@ atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
return -ENOMEM;
}
- nc->sram.virt = gen_pool_dma_alloc(nc->sram.pool,
- ATMEL_NFC_SRAM_SIZE,
- &nc->sram.dma);
+ nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
+ ATMEL_NFC_SRAM_SIZE,
+ &nc->sram.dma);
if (!nc->sram.virt) {
dev_err(nc->base.dev,
"Could not allocate memory from the NFC SRAM pool\n");
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index df0ef1f1e2f5..35f5c84cd331 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -8,7 +8,6 @@
*/
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 1306aaa7a8bf..4b90d5b380c2 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -114,7 +114,7 @@ enum {
struct brcmnand_controller {
struct device *dev;
- struct nand_hw_control controller;
+ struct nand_controller controller;
void __iomem *nand_base;
void __iomem *nand_fc; /* flash cache */
void __iomem *flash_dma_base;
@@ -2208,6 +2208,40 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
return 0;
}
+static int brcmnand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ /*
+ * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
+ * to/from, and have nand_base pass us a bounce buffer instead, as
+ * needed.
+ */
+ chip->options |= NAND_USE_BOUNCE_BUFFER;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (brcmnand_setup_dev(host))
+ return -ENXIO;
+
+ chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
+
+ /* only use our internal HW threshold */
+ mtd->bitflip_threshold = 1;
+
+ ret = brcmstb_choose_ecc_layout(host);
+
+ return ret;
+}
+
+static const struct nand_controller_ops brcmnand_controller_ops = {
+ .attach_chip = brcmnand_attach_chip,
+};
+
static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
{
struct brcmnand_controller *ctrl = host->ctrl;
@@ -2267,33 +2301,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_writereg(ctrl, cfg_offs,
nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- return ret;
-
- chip->options |= NAND_NO_SUBPAGE_WRITE;
- /*
- * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
- * to/from, and have nand_base pass us a bounce buffer instead, as
- * needed.
- */
- chip->options |= NAND_USE_BOUNCE_BUFFER;
-
- if (chip->bbt_options & NAND_BBT_USE_FLASH)
- chip->bbt_options |= NAND_BBT_NO_OOB;
-
- if (brcmnand_setup_dev(host))
- return -ENXIO;
-
- chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
- /* only use our internal HW threshold */
- mtd->bitflip_threshold = 1;
-
- ret = brcmstb_choose_ecc_layout(host);
- if (ret)
- return ret;
-
- ret = nand_scan_tail(mtd);
+ ret = nand_scan(mtd, 1);
if (ret)
return ret;
@@ -2433,7 +2441,8 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
init_completion(&ctrl->done);
init_completion(&ctrl->dma_done);
- nand_hw_control_init(&ctrl->controller);
+ nand_controller_init(&ctrl->controller);
+ ctrl->controller.ops = &brcmnand_controller_ops;
INIT_LIST_HEAD(&ctrl->host_list);
/* NAND register range */
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index d721f489b38b..1dbe43adcfe7 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -67,6 +67,7 @@ struct cafe_priv {
int nr_data;
int data_pos;
int page_addr;
+ bool usedma;
dma_addr_t dmaaddr;
unsigned char *dmabuf;
};
@@ -121,7 +122,7 @@ static void cafe_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
- if (usedma)
+ if (cafe->usedma)
memcpy(cafe->dmabuf + cafe->datalen, buf, len);
else
memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
@@ -137,7 +138,7 @@ static void cafe_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
- if (usedma)
+ if (cafe->usedma)
memcpy(buf, cafe->dmabuf + cafe->datalen, len);
else
memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
@@ -253,7 +254,7 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
/* NB: The datasheet lies -- we really should be subtracting 1 here */
cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
cafe_writel(cafe, 0x90000000, NAND_IRQ);
- if (usedma && (ctl1 & (3<<25))) {
+ if (cafe->usedma && (ctl1 & (3<<25))) {
uint32_t dmactl = 0xc0000000 + cafe->datalen;
/* If WR or RD bits set, set up DMA */
if (ctl1 & (1<<26)) {
@@ -345,11 +346,6 @@ static irqreturn_t cafe_nand_interrupt(int irq, void *id)
return IRQ_HANDLED;
}
-static void cafe_nand_bug(struct mtd_info *mtd)
-{
- BUG();
-}
-
static int cafe_nand_write_oob(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
@@ -598,6 +594,76 @@ static int cafe_mul(int x)
return gf4096_mul(x, 0xe01);
}
+static int cafe_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int err = 0;
+
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
+ &cafe->dmaaddr, GFP_KERNEL);
+ if (!cafe->dmabuf)
+ return -ENOMEM;
+
+ /* Set up DMA address */
+ cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
+ cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
+
+ cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
+ cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
+
+ /* Restore the DMA flag */
+ cafe->usedma = usedma;
+
+ cafe->ctl2 = BIT(27); /* Reed-Solomon ECC */
+ if (mtd->writesize == 2048)
+ cafe->ctl2 |= BIT(29); /* 2KiB page size */
+
+ /* Set up ECC according to the type of chip we found */
+ mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
+ if (mtd->writesize == 2048) {
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
+ } else if (mtd->writesize == 512) {
+ cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
+ cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
+ } else {
+ dev_warn(&cafe->pdev->dev,
+ "Unexpected NAND flash writesize %d. Aborting\n",
+ mtd->writesize);
+ err = -ENOTSUPP;
+ goto out_free_dma;
+ }
+
+ cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+ cafe->nand.ecc.size = mtd->writesize;
+ cafe->nand.ecc.bytes = 14;
+ cafe->nand.ecc.strength = 4;
+ cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
+ cafe->nand.ecc.write_oob = cafe_nand_write_oob;
+ cafe->nand.ecc.read_page = cafe_nand_read_page;
+ cafe->nand.ecc.read_oob = cafe_nand_read_oob;
+
+ return 0;
+
+ out_free_dma:
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+
+ return err;
+}
+
+static void cafe_nand_detach_chip(struct nand_chip *chip)
+{
+ struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
+}
+
+static const struct nand_controller_ops cafe_nand_controller_ops = {
+ .attach_chip = cafe_nand_attach_chip,
+ .detach_chip = cafe_nand_detach_chip,
+};
+
static int cafe_nand_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -605,7 +671,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
struct cafe_priv *cafe;
uint32_t ctrl;
int err = 0;
- int old_dma;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -713,65 +778,15 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe_readl(cafe, GLOBAL_CTRL),
cafe_readl(cafe, GLOBAL_IRQ_MASK));
- /* Do not use the DMA for the nand_scan_ident() */
- old_dma = usedma;
- usedma = 0;
+ /* Do not use the DMA during the NAND identification */
+ cafe->usedma = 0;
/* Scan to find existence of the device */
- err = nand_scan_ident(mtd, 2, NULL);
+ cafe->nand.dummy_controller.ops = &cafe_nand_controller_ops;
+ err = nand_scan(mtd, 2);
if (err)
goto out_irq;
- cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
- &cafe->dmaaddr, GFP_KERNEL);
- if (!cafe->dmabuf) {
- err = -ENOMEM;
- goto out_irq;
- }
-
- /* Set up DMA address */
- cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
- cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
-
- cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
- cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
-
- /* Restore the DMA flag */
- usedma = old_dma;
-
- cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */
- if (mtd->writesize == 2048)
- cafe->ctl2 |= 1<<29; /* 2KiB page size */
-
- /* Set up ECC according to the type of chip we found */
- mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
- if (mtd->writesize == 2048) {
- cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
- cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
- } else if (mtd->writesize == 512) {
- cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
- cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
- } else {
- pr_warn("Unexpected NAND flash writesize %d. Aborting\n",
- mtd->writesize);
- goto out_free_dma;
- }
- cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
- cafe->nand.ecc.size = mtd->writesize;
- cafe->nand.ecc.bytes = 14;
- cafe->nand.ecc.strength = 4;
- cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
- cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
- cafe->nand.ecc.correct = (void *)cafe_nand_bug;
- cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
- cafe->nand.ecc.write_oob = cafe_nand_write_oob;
- cafe->nand.ecc.read_page = cafe_nand_read_page;
- cafe->nand.ecc.read_oob = cafe_nand_read_oob;
-
- err = nand_scan_tail(mtd);
- if (err)
- goto out_free_dma;
-
pci_set_drvdata(pdev, mtd);
mtd->name = "cafe_nand";
@@ -783,8 +798,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
out_cleanup_nand:
nand_cleanup(&cafe->nand);
- out_free_dma:
- dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
diff --git a/drivers/mtd/nand/raw/cmx270_nand.c b/drivers/mtd/nand/raw/cmx270_nand.c
index 02d6751e9efe..b66e254b6802 100644
--- a/drivers/mtd/nand/raw/cmx270_nand.c
+++ b/drivers/mtd/nand/raw/cmx270_nand.c
@@ -200,8 +200,8 @@ static int __init cmx270_init(void)
}
/* Register the partitions */
- ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
- partition_info, NUM_PARTITIONS);
+ ret = mtd_device_register(cmx270_nand_mtd, partition_info,
+ NUM_PARTITIONS);
if (ret)
goto err_scan;
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index 82269fde9e66..beafad62e7d5 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -310,8 +310,7 @@ static int __init cs553x_init(void)
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
if (cs553x_mtd[i]) {
/* If any devices registered, return success. Else the last error. */
- mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
- NULL, 0);
+ mtd_device_register(cs553x_mtd[i], NULL, 0);
err = 0;
}
}
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index cd12e5abafde..40145e206a6b 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -53,15 +53,14 @@
struct davinci_nand_info {
struct nand_chip chip;
- struct device *dev;
+ struct platform_device *pdev;
bool is_readmode;
void __iomem *base;
void __iomem *vaddr;
- uint32_t ioaddr;
- uint32_t current_cs;
+ void __iomem *current_cs;
uint32_t mask_chipsel;
uint32_t mask_ale;
@@ -102,17 +101,17 @@ static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
- uint32_t addr = info->current_cs;
+ void __iomem *addr = info->current_cs;
struct nand_chip *nand = mtd_to_nand(mtd);
/* Did the control lines change? */
if (ctrl & NAND_CTRL_CHANGE) {
if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
- addr |= info->mask_cle;
+ addr += info->mask_cle;
else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
- addr |= info->mask_ale;
+ addr += info->mask_ale;
- nand->IO_ADDR_W = (void __iomem __force *)addr;
+ nand->IO_ADDR_W = addr;
}
if (cmd != NAND_CMD_NONE)
@@ -122,14 +121,14 @@ static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
- uint32_t addr = info->ioaddr;
+
+ info->current_cs = info->vaddr;
/* maybe kick in a second chipselect */
if (chip > 0)
- addr |= info->mask_chipsel;
- info->current_cs = addr;
+ info->current_cs += info->mask_chipsel;
- info->chip.IO_ADDR_W = (void __iomem __force *)addr;
+ info->chip.IO_ADDR_W = info->current_cs;
info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
}
@@ -319,7 +318,7 @@ static int nand_davinci_correct_4bit(struct mtd_info *mtd,
/* Unpack ten bytes into eight 10 bit values. We know we're
* little-endian, and use type punning for less shifting/masking.
*/
- if (WARN_ON(0x01 & (unsigned) ecc_code))
+ if (WARN_ON(0x01 & (uintptr_t)ecc_code))
return -EINVAL;
ecc16 = (unsigned short *)ecc_code;
@@ -441,9 +440,9 @@ static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+ if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
- else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+ else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
else
ioread8_rep(chip->IO_ADDR_R, buf, len);
@@ -454,9 +453,9 @@ static void nand_davinci_write_buf(struct mtd_info *mtd,
{
struct nand_chip *chip = mtd_to_nand(mtd);
- if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+ if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
- else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+ else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
else
iowrite8_rep(chip->IO_ADDR_R, buf, len);
@@ -606,6 +605,104 @@ static struct davinci_nand_pdata
}
#endif
+static int davinci_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
+ int ret = 0;
+
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ switch (info->chip.ecc.mode) {
+ case NAND_ECC_NONE:
+ pdata->ecc_bits = 0;
+ break;
+ case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
+ /*
+ * This driver expects Hamming based ECC when ecc_mode is set
+ * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
+ * avoid adding an extra ->ecc_algo field to
+ * davinci_nand_pdata.
+ */
+ info->chip.ecc.algo = NAND_ECC_HAMMING;
+ break;
+ case NAND_ECC_HW:
+ if (pdata->ecc_bits == 4) {
+ /*
+ * No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ return ret;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+ info->chip.ecc.algo = NAND_ECC_BCH;
+ } else {
+ /* 1bit ecc hamming */
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ info->chip.ecc.algo = NAND_ECC_HAMMING;
+ }
+ info->chip.ecc.size = 512;
+ info->chip.ecc.strength = pdata->ecc_bits;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = mtd->writesize / 512;
+
+ if (!chunks || mtd->oobsize < 16) {
+ dev_dbg(&info->pdev->dev, "too small\n");
+ return -EINVAL;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
+ } else if (chunks == 4 || chunks == 8) {
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ } else {
+ return -EIO;
+ }
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops davinci_nand_controller_ops = {
+ .attach_chip = davinci_nand_attach_chip,
+};
+
static int nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata;
@@ -659,7 +756,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
return -EADDRNOTAVAIL;
}
- info->dev = &pdev->dev;
+ info->pdev = pdev;
info->base = base;
info->vaddr = vaddr;
@@ -680,9 +777,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.bbt_md = pdata->bbt_md;
info->timing = pdata->timing;
- info->ioaddr = (uint32_t __force) vaddr;
-
- info->current_cs = info->ioaddr;
+ info->current_cs = info->vaddr;
info->core_chipsel = pdata->core_chipsel;
info->mask_chipsel = pdata->mask_chipsel;
@@ -711,100 +806,15 @@ static int nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
+ info->chip.dummy_controller.ops = &davinci_nand_controller_ops;
+ ret = nand_scan(mtd, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
return ret;
}
- switch (info->chip.ecc.mode) {
- case NAND_ECC_NONE:
- pdata->ecc_bits = 0;
- break;
- case NAND_ECC_SOFT:
- pdata->ecc_bits = 0;
- /*
- * This driver expects Hamming based ECC when ecc_mode is set
- * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
- * avoid adding an extra ->ecc_algo field to
- * davinci_nand_pdata.
- */
- info->chip.ecc.algo = NAND_ECC_HAMMING;
- break;
- case NAND_ECC_HW:
- if (pdata->ecc_bits == 4) {
- /* No sanity checks: CPUs must support this,
- * and the chips may not use NAND_BUSWIDTH_16.
- */
-
- /* No sharing 4-bit hardware between chipselects yet */
- spin_lock_irq(&davinci_nand_lock);
- if (ecc4_busy)
- ret = -EBUSY;
- else
- ecc4_busy = true;
- spin_unlock_irq(&davinci_nand_lock);
-
- if (ret == -EBUSY)
- return ret;
-
- info->chip.ecc.calculate = nand_davinci_calculate_4bit;
- info->chip.ecc.correct = nand_davinci_correct_4bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
- info->chip.ecc.bytes = 10;
- info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
- info->chip.ecc.algo = NAND_ECC_BCH;
- } else {
- /* 1bit ecc hamming */
- info->chip.ecc.calculate = nand_davinci_calculate_1bit;
- info->chip.ecc.correct = nand_davinci_correct_1bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
- info->chip.ecc.bytes = 3;
- info->chip.ecc.algo = NAND_ECC_HAMMING;
- }
- info->chip.ecc.size = 512;
- info->chip.ecc.strength = pdata->ecc_bits;
- break;
- default:
- return -EINVAL;
- }
-
- /* Update ECC layout if needed ... for 1-bit HW ECC, the default
- * is OK, but it allocates 6 bytes when only 3 are needed (for
- * each 512 bytes). For the 4-bit HW ECC, that default is not
- * usable: 10 bytes are needed, not 6.
- */
- if (pdata->ecc_bits == 4) {
- int chunks = mtd->writesize / 512;
-
- if (!chunks || mtd->oobsize < 16) {
- dev_dbg(&pdev->dev, "too small\n");
- ret = -EINVAL;
- goto err;
- }
-
- /* For small page chips, preserve the manufacturer's
- * badblock marking data ... and make sure a flash BBT
- * table marker fits in the free bytes.
- */
- if (chunks == 1) {
- mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
- } else if (chunks == 4 || chunks == 8) {
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
- info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
- } else {
- ret = -EIO;
- goto err;
- }
- }
-
- ret = nand_scan_tail(mtd);
- if (ret < 0)
- goto err;
-
if (pdata->parts)
- ret = mtd_device_parse_register(mtd, NULL, NULL,
- pdata->parts, pdata->nr_parts);
+ ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
else
ret = mtd_device_register(mtd, NULL, 0);
if (ret < 0)
@@ -819,11 +829,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
err_cleanup_nand:
nand_cleanup(&info->chip);
-err:
- spin_lock_irq(&davinci_nand_lock);
- if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
- ecc4_busy = false;
- spin_unlock_irq(&davinci_nand_lock);
return ret;
}
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 2a302a1d1430..ca18612c4201 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -51,14 +51,6 @@ MODULE_LICENSE("GPL");
#define DENALI_INVALID_BANK -1
#define DENALI_NR_BANKS 4
-/*
- * The bus interface clock, clk_x, is phase aligned with the core clock. The
- * clk_x is an integral multiple N of the core clk. The value N is configured
- * at IP delivery time, and its available value is 4, 5, or 6. We need to align
- * to the largest value to make it work with any possible configuration.
- */
-#define DENALI_CLK_X_MULT 6
-
static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
@@ -954,7 +946,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
const struct nand_sdr_timings *timings;
- unsigned long t_clk;
+ unsigned long t_x, mult_x;
int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
int addr_2_data_mask;
@@ -965,15 +957,24 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
return PTR_ERR(timings);
/* clk_x period in picoseconds */
- t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
- if (!t_clk)
+ t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
+ if (!t_x)
+ return -EINVAL;
+
+ /*
+ * The bus interface clock, clk_x, is phase aligned with the core clock.
+ * The clk_x is an integral multiple N of the core clk. The value N is
+ * configured at IP delivery time, and its available value is 4, 5, 6.
+ */
+ mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
+ if (mult_x < 4 || mult_x > 6)
return -EINVAL;
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
/* tREA -> ACC_CLKS */
- acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
+ acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
tmp = ioread32(denali->reg + ACC_CLKS);
@@ -982,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
iowrite32(tmp, denali->reg + ACC_CLKS);
/* tRWH -> RE_2_WE */
- re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
+ re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
tmp = ioread32(denali->reg + RE_2_WE);
@@ -991,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
iowrite32(tmp, denali->reg + RE_2_WE);
/* tRHZ -> RE_2_RE */
- re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
+ re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
tmp = ioread32(denali->reg + RE_2_RE);
@@ -1005,8 +1006,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
* With WE_2_RE properly set, the Denali controller automatically takes
* care of the delay; the driver need not set NAND_WAIT_TCCS.
*/
- we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
- t_clk);
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
@@ -1021,7 +1021,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
if (denali->revision < 0x0501)
addr_2_data_mask >>= 1;
- addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
+ addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
@@ -1031,7 +1031,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
/* tREH, tWH -> RDWR_EN_HI_CNT */
rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
- t_clk);
+ t_x);
rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
@@ -1040,11 +1040,10 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
/* tRP, tWP -> RDWR_EN_LO_CNT */
- rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
- t_clk);
+ rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
- t_clk);
- rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
+ t_x);
+ rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
@@ -1054,8 +1053,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
/* tCS, tCEA -> CS_SETUP_CNT */
- cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
- (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
+ cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
+ (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
0);
cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
@@ -1120,33 +1119,6 @@ int denali_calc_ecc_bytes(int step_size, int strength)
}
EXPORT_SYMBOL(denali_calc_ecc_bytes);
-static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
- struct denali_nand_info *denali)
-{
- int oobavail = mtd->oobsize - denali->oob_skip_bytes;
- int ret;
-
- /*
- * If .size and .strength are already set (usually by DT),
- * check if they are supported by this controller.
- */
- if (chip->ecc.size && chip->ecc.strength)
- return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
-
- /*
- * We want .size and .strength closest to the chip's requirement
- * unless NAND_ECC_MAXIMIZE is requested.
- */
- if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
- ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
- if (!ret)
- return 0;
- }
-
- /* Max ECC strength is the last thing we can do */
- return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
-}
-
static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
@@ -1233,62 +1205,12 @@ static int denali_multidev_fixup(struct denali_nand_info *denali)
return 0;
}
-int denali_init(struct denali_nand_info *denali)
+static int denali_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = &denali->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
- u32 features = ioread32(denali->reg + FEATURES);
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
int ret;
- mtd->dev.parent = denali->dev;
- denali_hw_init(denali);
-
- init_completion(&denali->complete);
- spin_lock_init(&denali->irq_lock);
-
- denali_clear_irq_all(denali);
-
- ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
- IRQF_SHARED, DENALI_NAND_NAME, denali);
- if (ret) {
- dev_err(denali->dev, "Unable to request IRQ\n");
- return ret;
- }
-
- denali_enable_irq(denali);
- denali_reset_banks(denali);
-
- denali->active_bank = DENALI_INVALID_BANK;
-
- nand_set_flash_node(chip, denali->dev->of_node);
- /* Fallback to the default name if DT did not give "label" property */
- if (!mtd->name)
- mtd->name = "denali-nand";
-
- chip->select_chip = denali_select_chip;
- chip->read_byte = denali_read_byte;
- chip->write_byte = denali_write_byte;
- chip->read_word = denali_read_word;
- chip->cmd_ctrl = denali_cmd_ctrl;
- chip->dev_ready = denali_dev_ready;
- chip->waitfunc = denali_waitfunc;
-
- if (features & FEATURES__INDEX_ADDR) {
- denali->host_read = denali_indexed_read;
- denali->host_write = denali_indexed_write;
- } else {
- denali->host_read = denali_direct_read;
- denali->host_write = denali_direct_write;
- }
-
- /* clk rate info is needed for setup_data_interface */
- if (denali->clk_x_rate)
- chip->setup_data_interface = denali_setup_data_interface;
-
- ret = nand_scan_ident(mtd, denali->max_banks, NULL);
- if (ret)
- goto disable_irq;
-
if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
denali->dma_avail = 1;
@@ -1317,10 +1239,11 @@ int denali_init(struct denali_nand_info *denali)
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
chip->options |= NAND_NO_SUBPAGE_WRITE;
- ret = denali_ecc_setup(mtd, chip, denali);
+ ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
+ mtd->oobsize - denali->oob_skip_bytes);
if (ret) {
dev_err(denali->dev, "Failed to setup ECC settings.\n");
- goto disable_irq;
+ return ret;
}
dev_dbg(denali->dev,
@@ -1364,7 +1287,7 @@ int denali_init(struct denali_nand_info *denali)
ret = denali_multidev_fixup(denali);
if (ret)
- goto disable_irq;
+ return ret;
/*
* This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
@@ -1372,26 +1295,92 @@ int denali_init(struct denali_nand_info *denali)
* guarantee DMA-safe alignment.
*/
denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
- if (!denali->buf) {
- ret = -ENOMEM;
- goto disable_irq;
+ if (!denali->buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void denali_detach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ kfree(denali->buf);
+}
+
+static const struct nand_controller_ops denali_controller_ops = {
+ .attach_chip = denali_attach_chip,
+ .detach_chip = denali_detach_chip,
+};
+
+int denali_init(struct denali_nand_info *denali)
+{
+ struct nand_chip *chip = &denali->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 features = ioread32(denali->reg + FEATURES);
+ int ret;
+
+ mtd->dev.parent = denali->dev;
+ denali_hw_init(denali);
+
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
+
+ denali_clear_irq_all(denali);
+
+ ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
+ IRQF_SHARED, DENALI_NAND_NAME, denali);
+ if (ret) {
+ dev_err(denali->dev, "Unable to request IRQ\n");
+ return ret;
}
- ret = nand_scan_tail(mtd);
+ denali_enable_irq(denali);
+ denali_reset_banks(denali);
+
+ denali->active_bank = DENALI_INVALID_BANK;
+
+ nand_set_flash_node(chip, denali->dev->of_node);
+ /* Fallback to the default name if DT did not give "label" property */
+ if (!mtd->name)
+ mtd->name = "denali-nand";
+
+ chip->select_chip = denali_select_chip;
+ chip->read_byte = denali_read_byte;
+ chip->write_byte = denali_write_byte;
+ chip->read_word = denali_read_word;
+ chip->cmd_ctrl = denali_cmd_ctrl;
+ chip->dev_ready = denali_dev_ready;
+ chip->waitfunc = denali_waitfunc;
+
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
+ /* clk rate info is needed for setup_data_interface */
+ if (denali->clk_rate && denali->clk_x_rate)
+ chip->setup_data_interface = denali_setup_data_interface;
+
+ chip->dummy_controller.ops = &denali_controller_ops;
+ ret = nand_scan(mtd, denali->max_banks);
if (ret)
- goto free_buf;
+ goto disable_irq;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
goto cleanup_nand;
}
+
return 0;
cleanup_nand:
nand_cleanup(chip);
-free_buf:
- kfree(denali->buf);
disable_irq:
denali_disable_irq(denali);
@@ -1404,7 +1393,6 @@ void denali_remove(struct denali_nand_info *denali)
struct mtd_info *mtd = nand_to_mtd(&denali->nand);
nand_release(mtd);
- kfree(denali->buf);
denali_disable_irq(denali);
}
EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index 9ad33d237378..1f8feaf924eb 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -300,6 +300,7 @@
struct denali_nand_info {
struct nand_chip nand;
+ unsigned long clk_rate; /* core clock rate */
unsigned long clk_x_rate; /* bus interface clock rate */
int active_bank; /* currently selected bank */
struct device *dev;
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 5869e90cc14b..0faaad032e5f 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -27,7 +27,9 @@
struct denali_dt {
struct denali_nand_info denali;
- struct clk *clk;
+ struct clk *clk; /* core clock */
+ struct clk *clk_x; /* bus interface clock */
+ struct clk *clk_ecc; /* ECC circuit clock */
};
struct denali_dt_data {
@@ -79,63 +81,99 @@ MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
static int denali_dt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct resource *res;
struct denali_dt *dt;
const struct denali_dt_data *data;
struct denali_nand_info *denali;
int ret;
- dt = devm_kzalloc(&pdev->dev, sizeof(*dt), GFP_KERNEL);
+ dt = devm_kzalloc(dev, sizeof(*dt), GFP_KERNEL);
if (!dt)
return -ENOMEM;
denali = &dt->denali;
- data = of_device_get_match_data(&pdev->dev);
+ data = of_device_get_match_data(dev);
if (data) {
denali->revision = data->revision;
denali->caps = data->caps;
denali->ecc_caps = data->ecc_caps;
}
- denali->dev = &pdev->dev;
+ denali->dev = dev;
denali->irq = platform_get_irq(pdev, 0);
if (denali->irq < 0) {
- dev_err(&pdev->dev, "no irq defined\n");
+ dev_err(dev, "no irq defined\n");
return denali->irq;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg");
- denali->reg = devm_ioremap_resource(&pdev->dev, res);
+ denali->reg = devm_ioremap_resource(dev, res);
if (IS_ERR(denali->reg))
return PTR_ERR(denali->reg);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
- denali->host = devm_ioremap_resource(&pdev->dev, res);
+ denali->host = devm_ioremap_resource(dev, res);
if (IS_ERR(denali->host))
return PTR_ERR(denali->host);
- dt->clk = devm_clk_get(&pdev->dev, NULL);
+ /*
+ * A single anonymous clock is supported for the backward compatibility.
+ * New platforms should support all the named clocks.
+ */
+ dt->clk = devm_clk_get(dev, "nand");
+ if (IS_ERR(dt->clk))
+ dt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(dt->clk)) {
- dev_err(&pdev->dev, "no clk available\n");
+ dev_err(dev, "no clk available\n");
return PTR_ERR(dt->clk);
}
+
+ dt->clk_x = devm_clk_get(dev, "nand_x");
+ if (IS_ERR(dt->clk_x))
+ dt->clk_x = NULL;
+
+ dt->clk_ecc = devm_clk_get(dev, "ecc");
+ if (IS_ERR(dt->clk_ecc))
+ dt->clk_ecc = NULL;
+
ret = clk_prepare_enable(dt->clk);
if (ret)
return ret;
- /*
- * Hardcode the clock rate for the backward compatibility.
- * This works for both SOCFPGA and UniPhier.
- */
- denali->clk_x_rate = 200000000;
+ ret = clk_prepare_enable(dt->clk_x);
+ if (ret)
+ goto out_disable_clk;
+
+ ret = clk_prepare_enable(dt->clk_ecc);
+ if (ret)
+ goto out_disable_clk_x;
+
+ if (dt->clk_x) {
+ denali->clk_rate = clk_get_rate(dt->clk);
+ denali->clk_x_rate = clk_get_rate(dt->clk_x);
+ } else {
+ /*
+ * Hardcode the clock rates for the backward compatibility.
+ * This works for both SOCFPGA and UniPhier.
+ */
+ dev_notice(dev,
+ "necessary clock is missing. default clock rates are used.\n");
+ denali->clk_rate = 50000000;
+ denali->clk_x_rate = 200000000;
+ }
ret = denali_init(denali);
if (ret)
- goto out_disable_clk;
+ goto out_disable_clk_ecc;
platform_set_drvdata(pdev, dt);
return 0;
+out_disable_clk_ecc:
+ clk_disable_unprepare(dt->clk_ecc);
+out_disable_clk_x:
+ clk_disable_unprepare(dt->clk_x);
out_disable_clk:
clk_disable_unprepare(dt->clk);
@@ -147,6 +185,8 @@ static int denali_dt_remove(struct platform_device *pdev)
struct denali_dt *dt = platform_get_drvdata(pdev);
denali_remove(&dt->denali);
+ clk_disable_unprepare(dt->clk_ecc);
+ clk_disable_unprepare(dt->clk_x);
clk_disable_unprepare(dt->clk);
return 0;
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index 49cb3e1f8bd0..7c8efc4c7bdf 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -73,6 +73,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->irq = dev->irq;
denali->ecc_caps = &denali_pci_ecc_caps;
denali->nand.ecc.options |= NAND_ECC_MAXIMIZE;
+ denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
ret = pci_request_regions(dev, DENALI_NAND_NAME);
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 8d10061abb4b..3c46188dd6d2 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1291,7 +1291,7 @@ static int __init nftl_scan_bbt(struct mtd_info *mtd)
this->bbt_md = NULL;
}
- ret = this->scan_bbt(mtd);
+ ret = nand_create_bbt(this);
if (ret)
return ret;
@@ -1338,7 +1338,7 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
this->bbt_md->pattern = "TBB_SYSM";
}
- ret = this->scan_bbt(mtd);
+ ret = nand_create_bbt(this);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c
index 1314aa99b9ab..a3f04315c05c 100644
--- a/drivers/mtd/nand/raw/docg4.c
+++ b/drivers/mtd/nand/raw/docg4.c
@@ -1227,10 +1227,9 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
* required within a nand driver because they are performed by the nand
* infrastructure code as part of nand_scan(). In this case they need
* to be initialized here because we skip call to nand_scan_ident() (the
- * first half of nand_scan()). The call to nand_scan_ident() is skipped
- * because for this device the chip id is not read in the manner of a
- * standard nand device. Unfortunately, nand_scan_ident() does other
- * things as well, such as call nand_set_defaults().
+ * first half of nand_scan()). The call to nand_scan_ident() could be
+ * skipped because for this device the chip id is not read in the manner
+ * of a standard nand device.
*/
struct nand_chip *nand = mtd_to_nand(mtd);
@@ -1257,8 +1256,8 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->ecc.strength = DOCG4_T;
nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
- nand->controller = &nand->hwcontrol;
- nand_hw_control_init(nand->controller);
+ nand->controller = &nand->dummy_controller;
+ nand_controller_init(nand->controller);
/* methods */
nand->cmdfunc = docg4_command;
@@ -1315,6 +1314,40 @@ static int __init read_id_reg(struct mtd_info *mtd)
static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+static int docg4_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct docg4_priv *doc = (struct docg4_priv *)(chip + 1);
+ int ret;
+
+ init_mtd_structs(mtd);
+
+ /* Initialize kernel BCH algorithm */
+ doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
+ if (!doc->bch)
+ return -EINVAL;
+
+ reset(mtd);
+
+ ret = read_id_reg(mtd);
+ if (ret)
+ free_bch(doc->bch);
+
+ return ret;
+}
+
+static void docg4_detach_chip(struct nand_chip *chip)
+{
+ struct docg4_priv *doc = (struct docg4_priv *)(chip + 1);
+
+ free_bch(doc->bch);
+}
+
+static const struct nand_controller_ops docg4_controller_ops = {
+ .attach_chip = docg4_attach_chip,
+ .detach_chip = docg4_detach_chip,
+};
+
static int __init probe_docg4(struct platform_device *pdev)
{
struct mtd_info *mtd;
@@ -1341,7 +1374,7 @@ static int __init probe_docg4(struct platform_device *pdev)
nand = kzalloc(len, GFP_KERNEL);
if (nand == NULL) {
retval = -ENOMEM;
- goto fail_unmap;
+ goto unmap;
}
mtd = nand_to_mtd(nand);
@@ -1350,46 +1383,35 @@ static int __init probe_docg4(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
doc->virtadr = virtadr;
doc->dev = dev;
-
- init_mtd_structs(mtd);
-
- /* initialize kernel bch algorithm */
- doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
- if (doc->bch == NULL) {
- retval = -EINVAL;
- goto fail;
- }
-
platform_set_drvdata(pdev, doc);
- reset(mtd);
- retval = read_id_reg(mtd);
- if (retval == -ENODEV) {
- dev_warn(dev, "No diskonchip G4 device found.\n");
- goto fail;
- }
-
- retval = nand_scan_tail(mtd);
+ /*
+ * Running nand_scan() with maxchips == 0 will skip nand_scan_ident(),
+ * which is a specific operation with this driver and done in the
+ * ->attach_chip callback.
+ */
+ nand->dummy_controller.ops = &docg4_controller_ops;
+ retval = nand_scan(mtd, 0);
if (retval)
- goto fail;
+ goto free_nand;
retval = read_factory_bbt(mtd);
if (retval)
- goto fail;
+ goto cleanup_nand;
retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
if (retval)
- goto fail;
+ goto cleanup_nand;
doc->mtd = mtd;
+
return 0;
-fail:
- nand_release(mtd); /* deletes partitions and mtd devices */
- free_bch(doc->bch);
+cleanup_nand:
+ nand_cleanup(nand);
+free_nand:
kfree(nand);
-
-fail_unmap:
+unmap:
iounmap(virtadr);
return retval;
@@ -1399,7 +1421,6 @@ static int __exit cleanup_docg4(struct platform_device *pdev)
{
struct docg4_priv *doc = platform_get_drvdata(pdev);
nand_release(doc->mtd);
- free_bch(doc->bch);
kfree(mtd_to_nand(doc->mtd));
iounmap(doc->virtadr);
return 0;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 51f0b340bc0d..55f449b711fd 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -61,7 +61,7 @@ struct fsl_elbc_mtd {
/* Freescale eLBC FCM controller information */
struct fsl_elbc_fcm_ctrl {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct fsl_elbc_mtd *chips[MAX_BANKS];
u8 __iomem *addr; /* Address of assigned FCM buffer */
@@ -637,9 +637,9 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
}
-static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
+static int fsl_elbc_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
@@ -700,12 +700,16 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
dev_err(priv->dev,
"fsl_elbc_init: page size %d is not supported\n",
mtd->writesize);
- return -1;
+ return -ENOTSUPP;
}
return 0;
}
+static const struct nand_controller_ops fsl_elbc_controller_ops = {
+ .attach_chip = fsl_elbc_attach_chip,
+};
+
static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
@@ -879,7 +883,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
}
elbc_fcm_ctrl->counter++;
- nand_hw_control_init(&elbc_fcm_ctrl->controller);
+ nand_controller_init(&elbc_fcm_ctrl->controller);
fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
} else {
elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
@@ -910,15 +914,8 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
if (ret)
goto err;
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- goto err;
-
- ret = fsl_elbc_chip_init_tail(mtd);
- if (ret)
- goto err;
-
- ret = nand_scan_tail(mtd);
+ priv->chip.controller->ops = &fsl_elbc_controller_ops;
+ ret = nand_scan(mtd, 1);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 382b67e97174..24f59d0066af 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -51,7 +51,7 @@ struct fsl_ifc_mtd {
/* overview of the fsl ifc controller */
struct fsl_ifc_nand_ctrl {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
void __iomem *addr; /* Address of assigned IFC buffer */
@@ -225,7 +225,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int bufnum = nctrl->page & priv->bufnum_mask;
int sector_start = bufnum * chip->ecc.steps;
int sector_end = sector_start + chip->ecc.steps - 1;
- __be32 *eccstat_regs;
+ __be32 __iomem *eccstat_regs;
eccstat_regs = ifc->ifc_nand.nand_eccstat;
eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
@@ -714,9 +714,9 @@ static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
return nand_prog_page_end_op(chip);
}
-static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
+static int fsl_ifc_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
@@ -757,6 +757,10 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
return 0;
}
+static const struct nand_controller_ops fsl_ifc_controller_ops = {
+ .attach_chip = fsl_ifc_attach_chip,
+};
+
static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
@@ -1004,7 +1008,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
ifc_nand_ctrl->addr = NULL;
fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
- nand_hw_control_init(&ifc_nand_ctrl->controller);
+ nand_controller_init(&ifc_nand_ctrl->controller);
} else {
ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
}
@@ -1046,15 +1050,8 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
if (ret)
goto err;
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- goto err;
-
- ret = fsl_ifc_chip_init_tail(mtd);
- if (ret)
- goto err;
-
- ret = nand_scan_tail(mtd);
+ priv->chip.controller->ops = &fsl_ifc_controller_ops;
+ ret = nand_scan(mtd, 1);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index f4a5a317d4ae..f418236fa020 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -62,7 +62,7 @@
reg)
/* fsmc controller registers for NAND flash */
-#define PC 0x00
+#define FSMC_PC 0x00
/* pc register definitions */
#define FSMC_RESET (1 << 0)
#define FSMC_WAITON (1 << 1)
@@ -273,12 +273,13 @@ static void fsmc_nand_setup(struct fsmc_nand_data *host,
tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (host->nand.options & NAND_BUSWIDTH_16)
- writel_relaxed(value | FSMC_DEVWID_16, host->regs_va + PC);
+ writel_relaxed(value | FSMC_DEVWID_16,
+ host->regs_va + FSMC_PC);
else
- writel_relaxed(value | FSMC_DEVWID_8, host->regs_va + PC);
+ writel_relaxed(value | FSMC_DEVWID_8, host->regs_va + FSMC_PC);
- writel_relaxed(readl(host->regs_va + PC) | tclr | tar,
- host->regs_va + PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) | tclr | tar,
+ host->regs_va + FSMC_PC);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + COMM);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + ATTRIB);
}
@@ -371,12 +372,12 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
- writel_relaxed(readl(host->regs_va + PC) & ~FSMC_ECCPLEN_256,
- host->regs_va + PC);
- writel_relaxed(readl(host->regs_va + PC) & ~FSMC_ECCEN,
- host->regs_va + PC);
- writel_relaxed(readl(host->regs_va + PC) | FSMC_ECCEN,
- host->regs_va + PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCPLEN_256,
+ host->regs_va + FSMC_PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCEN,
+ host->regs_va + FSMC_PC);
+ writel_relaxed(readl(host->regs_va + FSMC_PC) | FSMC_ECCEN,
+ host->regs_va + FSMC_PC);
}
/*
@@ -546,7 +547,7 @@ static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
int i;
- if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(uint32_t)) &&
IS_ALIGNED(len, sizeof(uint32_t))) {
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
@@ -569,7 +570,7 @@ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
int i;
- if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(uint32_t)) &&
IS_ALIGNED(len, sizeof(uint32_t))) {
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
@@ -618,11 +619,11 @@ static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
if (chipnr > 0)
return;
- pc = readl(host->regs_va + PC);
+ pc = readl(host->regs_va + FSMC_PC);
if (chipnr < 0)
- writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + PC);
+ writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
else
- writel_relaxed(pc | FSMC_ENABLE, host->regs_va + PC);
+ writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
/* nCE line must be asserted before starting any operation */
mb();
@@ -740,7 +741,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+ nand_read_data_op(chip, p, eccsize, false);
for (j = 0; j < eccbytes;) {
struct mtd_oob_region oobregion;
@@ -918,6 +919,82 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
return 0;
}
+static int fsmc_nand_attach_chip(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+
+ if (AMBA_REV_BITS(host->pid) >= 8) {
+ switch (mtd->oobsize) {
+ case 16:
+ case 64:
+ case 128:
+ case 224:
+ case 256:
+ break;
+ default:
+ dev_warn(host->dev,
+ "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
+
+ return 0;
+ }
+
+ switch (nand->ecc.mode) {
+ case NAND_ECC_HW:
+ dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
+ nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+ nand->ecc.correct = nand_correct_data;
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
+ break;
+
+ case NAND_ECC_SOFT:
+ if (nand->ecc.algo == NAND_ECC_BCH) {
+ dev_info(host->dev,
+ "Using 4-bit SW BCH ECC scheme\n");
+ break;
+ }
+
+ case NAND_ECC_ON_DIE:
+ break;
+
+ default:
+ dev_err(host->dev, "Unsupported ECC mode!\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Don't set layout for BCH4 SW ECC. This will be
+ * generated later in nand_bch_init() later.
+ */
+ if (nand->ecc.mode == NAND_ECC_HW) {
+ switch (mtd->oobsize) {
+ case 16:
+ case 64:
+ case 128:
+ mtd_set_ooblayout(mtd,
+ &fsmc_ecc1_ooblayout_ops);
+ break;
+ default:
+ dev_warn(host->dev,
+ "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops fsmc_nand_controller_ops = {
+ .attach_chip = fsmc_nand_attach_chip,
+};
+
/*
* fsmc_nand_probe - Probe function
* @pdev: platform device structure
@@ -1047,76 +1124,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
/*
* Scan to find existence of the device
*/
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret) {
- dev_err(&pdev->dev, "No NAND Device found!\n");
- goto release_dma_write_chan;
- }
-
- if (AMBA_REV_BITS(host->pid) >= 8) {
- switch (mtd->oobsize) {
- case 16:
- case 64:
- case 128:
- case 224:
- case 256:
- break;
- default:
- dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
- mtd->oobsize);
- ret = -EINVAL;
- goto release_dma_write_chan;
- }
-
- mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
- } else {
- switch (nand->ecc.mode) {
- case NAND_ECC_HW:
- dev_info(&pdev->dev, "Using 1-bit HW ECC scheme\n");
- nand->ecc.calculate = fsmc_read_hwecc_ecc1;
- nand->ecc.correct = nand_correct_data;
- nand->ecc.bytes = 3;
- nand->ecc.strength = 1;
- break;
-
- case NAND_ECC_SOFT:
- if (nand->ecc.algo == NAND_ECC_BCH) {
- dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
- break;
- }
-
- case NAND_ECC_ON_DIE:
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported ECC mode!\n");
- goto release_dma_write_chan;
- }
-
- /*
- * Don't set layout for BCH4 SW ECC. This will be
- * generated later in nand_bch_init() later.
- */
- if (nand->ecc.mode == NAND_ECC_HW) {
- switch (mtd->oobsize) {
- case 16:
- case 64:
- case 128:
- mtd_set_ooblayout(mtd,
- &fsmc_ecc1_ooblayout_ops);
- break;
- default:
- dev_warn(&pdev->dev,
- "No oob scheme defined for oobsize %d\n",
- mtd->oobsize);
- ret = -EINVAL;
- goto release_dma_write_chan;
- }
- }
- }
-
- /* Second stage of scan to fill MTD data-structures */
- ret = nand_scan_tail(mtd);
+ nand->dummy_controller.ops = &fsmc_nand_controller_ops;
+ ret = nand_scan(mtd, 1);
if (ret)
goto release_dma_write_chan;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index 83697b8df871..88ea2203e263 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale GPMI NAND Flash Driver
*
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/delay.h>
#include <linux/clk.h>
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index f6aa358a3452..1c1ebbc82824 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale GPMI NAND Flash Driver
*
* Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/clk.h>
#include <linux/slab.h>
@@ -757,9 +744,9 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
* [2] Allocate a read/write data buffer.
* The gpmi_alloc_dma_buffer can be called twice.
* We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
- * is called before the nand_scan_ident; and we allocate a buffer
- * of the real NAND page size when the gpmi_alloc_dma_buffer is
- * called after the nand_scan_ident.
+ * is called before the NAND identification; and we allocate a
+ * buffer of the real NAND page size when the gpmi_alloc_dma_buffer
+ * is called after.
*/
this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
GFP_DMA | GFP_KERNEL);
@@ -957,7 +944,6 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
struct mtd_info *mtd = nand_to_mtd(chip);
- void *payload_virt;
dma_addr_t payload_phys;
unsigned int i;
unsigned char *status;
@@ -967,7 +953,6 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
dev_dbg(this->dev, "page number is : %d\n", page);
- payload_virt = this->payload_virt;
payload_phys = this->payload_phys;
if (virt_addr_valid(buf)) {
@@ -976,7 +961,6 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size,
DMA_FROM_DEVICE);
if (!dma_mapping_error(this->dev, dest_phys)) {
- payload_virt = buf;
payload_phys = dest_phys;
direct = true;
}
@@ -1881,6 +1865,34 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
return 0;
}
+static int gpmi_nand_attach_chip(struct nand_chip *chip)
+{
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ int ret;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (of_property_read_bool(this->dev->of_node,
+ "fsl,no-blockmark-swap"))
+ this->swap_block_mark = false;
+ }
+ dev_dbg(this->dev, "Blockmark swapping %sabled\n",
+ this->swap_block_mark ? "en" : "dis");
+
+ ret = gpmi_init_last(this);
+ if (ret)
+ return ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ return 0;
+}
+
+static const struct nand_controller_ops gpmi_nand_controller_ops = {
+ .attach_chip = gpmi_nand_attach_chip,
+};
+
static int gpmi_nand_init(struct gpmi_nand_data *this)
{
struct nand_chip *chip = &this->nand;
@@ -1921,33 +1933,15 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
- ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);
- if (ret)
- goto err_out;
-
- if (chip->bbt_options & NAND_BBT_USE_FLASH) {
- chip->bbt_options |= NAND_BBT_NO_OOB;
-
- if (of_property_read_bool(this->dev->of_node,
- "fsl,no-blockmark-swap"))
- this->swap_block_mark = false;
- }
- dev_dbg(this->dev, "Blockmark swapping %sabled\n",
- this->swap_block_mark ? "en" : "dis");
-
- ret = gpmi_init_last(this);
- if (ret)
- goto err_out;
-
- chip->options |= NAND_SKIP_BBTSCAN;
- ret = nand_scan_tail(mtd);
+ chip->dummy_controller.ops = &gpmi_nand_controller_ops;
+ ret = nand_scan(mtd, GPMI_IS_MX6(this) ? 2 : 1);
if (ret)
goto err_out;
ret = nand_boot_init(this);
if (ret)
goto err_nand_cleanup;
- ret = chip->scan_bbt(mtd);
+ ret = nand_create_bbt(chip);
if (ret)
goto err_nand_cleanup;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index 6aa10d6962d6..69cd0cbde4f2 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Freescale GPMI NAND Flash Driver
*
* Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
#define __DRIVERS_MTD_NAND_GPMI_NAND_H
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index a1e009c8e556..950dc7789296 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -709,9 +709,50 @@ static int hisi_nfc_ecc_probe(struct hinfc_host *host)
return 0;
}
+static int hisi_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct hinfc_host *host = nand_get_controller_data(chip);
+ int flag;
+
+ host->buffer = dmam_alloc_coherent(host->dev,
+ mtd->writesize + mtd->oobsize,
+ &host->dma_buffer, GFP_KERNEL);
+ if (!host->buffer)
+ return -ENOMEM;
+
+ host->dma_oob = host->dma_buffer + mtd->writesize;
+ memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
+
+ flag = hinfc_read(host, HINFC504_CON);
+ flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
+ switch (mtd->writesize) {
+ case 2048:
+ flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT);
+ break;
+ /*
+ * TODO: add more pagesize support,
+ * default pagesize has been set in hisi_nfc_host_init
+ */
+ default:
+ dev_err(host->dev, "NON-2KB page size nand flash\n");
+ return -EINVAL;
+ }
+ hinfc_write(host, flag, HINFC504_CON);
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ hisi_nfc_ecc_probe(host);
+
+ return 0;
+}
+
+static const struct nand_controller_ops hisi_nfc_controller_ops = {
+ .attach_chip = hisi_nfc_attach_chip,
+};
+
static int hisi_nfc_probe(struct platform_device *pdev)
{
- int ret = 0, irq, flag, max_chips = HINFC504_MAX_CHIP;
+ int ret = 0, irq, max_chips = HINFC504_MAX_CHIP;
struct device *dev = &pdev->dev;
struct hinfc_host *host;
struct nand_chip *chip;
@@ -769,42 +810,11 @@ static int hisi_nfc_probe(struct platform_device *pdev)
return ret;
}
- ret = nand_scan_ident(mtd, max_chips, NULL);
+ chip->dummy_controller.ops = &hisi_nfc_controller_ops;
+ ret = nand_scan(mtd, max_chips);
if (ret)
return ret;
- host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
- &host->dma_buffer, GFP_KERNEL);
- if (!host->buffer)
- return -ENOMEM;
-
- host->dma_oob = host->dma_buffer + mtd->writesize;
- memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
-
- flag = hinfc_read(host, HINFC504_CON);
- flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
- switch (mtd->writesize) {
- case 2048:
- flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT); break;
- /*
- * TODO: add more pagesize support,
- * default pagesize has been set in hisi_nfc_host_init
- */
- default:
- dev_err(dev, "NON-2KB page size nand flash\n");
- return -EINVAL;
- }
- hinfc_write(host, flag, HINFC504_CON);
-
- if (chip->ecc.mode == NAND_ECC_HW)
- hisi_nfc_ecc_probe(host);
-
- ret = nand_scan_tail(mtd);
- if (ret) {
- dev_err(dev, "nand_scan_tail failed: %d\n", ret);
- return ret;
- }
-
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "Err MTD partition=%d\n", ret);
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index 613b00a9604b..a7515452bc59 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,9 +24,9 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
-#include <asm/mach-jz4740/jz4740_nand.h>
+#include <linux/platform_data/jz4740/jz4740_nand.h>
#define JZ_REG_NAND_CTRL 0x50
#define JZ_REG_NAND_ECC_CTRL 0x100
@@ -330,7 +331,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
if (chipnr == 0) {
/* Detect first chip. */
- ret = nand_scan_ident(mtd, 1, NULL);
+ ret = nand_scan(mtd, 1);
if (ret)
goto notfound_id;
@@ -355,7 +356,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
mtd->size += chip->chipsize;
}
- dev_info(&pdev->dev, "Found chip %i on bank %i\n", chipnr, bank);
+ dev_info(&pdev->dev, "Found chip %zu on bank %i\n", chipnr, bank);
return 0;
notfound_id:
@@ -367,6 +368,24 @@ notfound_id:
return ret;
}
+static int jz_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ struct jz_nand_platform_data *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (pdata && pdata->ident_callback)
+ pdata->ident_callback(pdev, mtd, &pdata->partitions,
+ &pdata->num_partitions);
+
+ return 0;
+}
+
+static const struct nand_controller_ops jz_nand_controller_ops = {
+ .attach_chip = jz_nand_attach_chip,
+};
+
static int jz_nand_probe(struct platform_device *pdev)
{
int ret;
@@ -410,6 +429,7 @@ static int jz_nand_probe(struct platform_device *pdev)
chip->chip_delay = 50;
chip->cmd_ctrl = jz_nand_cmd_ctrl;
chip->select_chip = jz_nand_select_chip;
+ chip->dummy_controller.ops = &jz_nand_controller_ops;
if (nand->busy_gpio)
chip->dev_ready = jz_nand_dev_ready;
@@ -455,33 +475,20 @@ static int jz_nand_probe(struct platform_device *pdev)
goto err_iounmap_mmio;
}
- if (pdata && pdata->ident_callback) {
- pdata->ident_callback(pdev, mtd, &pdata->partitions,
- &pdata->num_partitions);
- }
-
- ret = nand_scan_tail(mtd);
- if (ret) {
- dev_err(&pdev->dev, "Failed to scan NAND\n");
- goto err_unclaim_banks;
- }
-
- ret = mtd_device_parse_register(mtd, NULL, NULL,
- pdata ? pdata->partitions : NULL,
- pdata ? pdata->num_partitions : 0);
+ ret = mtd_device_register(mtd, pdata ? pdata->partitions : NULL,
+ pdata ? pdata->num_partitions : 0);
if (ret) {
dev_err(&pdev->dev, "Failed to add mtd device\n");
- goto err_nand_release;
+ goto err_cleanup_nand;
}
dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n");
return 0;
-err_nand_release:
- nand_release(mtd);
-err_unclaim_banks:
+err_cleanup_nand:
+ nand_cleanup(chip);
while (chipnr--) {
unsigned char bank = nand->banks[chipnr];
jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
diff --git a/drivers/mtd/nand/raw/jz4780_nand.c b/drivers/mtd/nand/raw/jz4780_nand.c
index e69f6ae4c539..db4fa60bd52a 100644
--- a/drivers/mtd/nand/raw/jz4780_nand.c
+++ b/drivers/mtd/nand/raw/jz4780_nand.c
@@ -44,7 +44,7 @@ struct jz4780_nand_cs {
struct jz4780_nand_controller {
struct device *dev;
struct jz4780_bch *bch;
- struct nand_hw_control controller;
+ struct nand_controller controller;
unsigned int num_banks;
struct list_head chips;
int selected;
@@ -65,7 +65,8 @@ static inline struct jz4780_nand_chip *to_jz4780_nand_chip(struct mtd_info *mtd)
return container_of(mtd_to_nand(mtd), struct jz4780_nand_chip, chip);
}
-static inline struct jz4780_nand_controller *to_jz4780_nand_controller(struct nand_hw_control *ctrl)
+static inline struct jz4780_nand_controller
+*to_jz4780_nand_controller(struct nand_controller *ctrl)
{
return container_of(ctrl, struct jz4780_nand_controller, controller);
}
@@ -157,9 +158,8 @@ static int jz4780_nand_ecc_correct(struct mtd_info *mtd, u8 *dat,
return jz4780_bch_correct(nfc->bch, &params, dat, read_ecc);
}
-static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *dev)
+static int jz4780_nand_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller);
int eccbytes;
@@ -170,7 +170,8 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
switch (chip->ecc.mode) {
case NAND_ECC_HW:
if (!nfc->bch) {
- dev_err(dev, "HW BCH selected, but BCH controller not found\n");
+ dev_err(nfc->dev,
+ "HW BCH selected, but BCH controller not found\n");
return -ENODEV;
}
@@ -179,15 +180,16 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
chip->ecc.correct = jz4780_nand_ecc_correct;
/* fall through */
case NAND_ECC_SOFT:
- dev_info(dev, "using %s (strength %d, size %d, bytes %d)\n",
- (nfc->bch) ? "hardware BCH" : "software ECC",
- chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
+ dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
+ (nfc->bch) ? "hardware BCH" : "software ECC",
+ chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
break;
case NAND_ECC_NONE:
- dev_info(dev, "not using ECC\n");
+ dev_info(nfc->dev, "not using ECC\n");
break;
default:
- dev_err(dev, "ECC mode %d not supported\n", chip->ecc.mode);
+ dev_err(nfc->dev, "ECC mode %d not supported\n",
+ chip->ecc.mode);
return -EINVAL;
}
@@ -199,7 +201,7 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
if (eccbytes > mtd->oobsize - 2) {
- dev_err(dev,
+ dev_err(nfc->dev,
"invalid ECC config: required %d ECC bytes, but only %d are available",
eccbytes, mtd->oobsize - 2);
return -EINVAL;
@@ -210,6 +212,10 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
return 0;
}
+static const struct nand_controller_ops jz4780_nand_controller_ops = {
+ .attach_chip = jz4780_nand_attach_chip,
+};
+
static int jz4780_nand_init_chip(struct platform_device *pdev,
struct jz4780_nand_controller *nfc,
struct device_node *np,
@@ -279,15 +285,8 @@ static int jz4780_nand_init_chip(struct platform_device *pdev,
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- return ret;
-
- ret = jz4780_nand_init_ecc(nand, dev);
- if (ret)
- return ret;
-
- ret = nand_scan_tail(mtd);
+ chip->controller->ops = &jz4780_nand_controller_ops;
+ ret = nand_scan(mtd, 1);
if (ret)
return ret;
@@ -368,7 +367,7 @@ static int jz4780_nand_probe(struct platform_device *pdev)
nfc->dev = dev;
nfc->num_banks = num_banks;
- nand_hw_control_init(&nfc->controller);
+ nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
ret = jz4780_nand_init_chips(nfc, pdev);
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 052d123a8304..e82abada130a 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -184,6 +184,7 @@ static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
};
struct lpc32xx_nand_host {
+ struct platform_device *pdev;
struct nand_chip nand_chip;
struct lpc32xx_mlc_platform_data *pdata;
struct clk *clk;
@@ -653,6 +654,32 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
return ncfg;
}
+static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+ struct device *dev = &host->pdev->dev;
+
+ host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dma_buf)
+ return -ENOMEM;
+
+ host->dummy_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dummy_buf)
+ return -ENOMEM;
+
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+ host->mlcsubpages = mtd->writesize / 512;
+
+ return 0;
+}
+
+static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
+ .attach_chip = lpc32xx_nand_attach_chip,
+};
+
/*
* Probe for NAND controller
*/
@@ -669,6 +696,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
if (!host)
return -ENOMEM;
+ host->pdev = pdev;
+
rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->io_base = devm_ioremap_resource(&pdev->dev, rc);
if (IS_ERR(host->io_base))
@@ -748,31 +777,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
}
- /*
- * Scan to find existance of the device and
- * Get the type of NAND device SMALL block or LARGE block
- */
- res = nand_scan_ident(mtd, 1, NULL);
- if (res)
- goto release_dma_chan;
-
- host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
- if (!host->dma_buf) {
- res = -ENOMEM;
- goto release_dma_chan;
- }
-
- host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
- if (!host->dummy_buf) {
- res = -ENOMEM;
- goto release_dma_chan;
- }
-
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
- mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
- host->mlcsubpages = mtd->writesize / 512;
-
/* initially clear interrupt status */
readb(MLC_IRQ_SR(host->io_base));
@@ -794,10 +798,11 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
/*
- * Fills out all the uninitialized function pointers with the defaults
- * And scans for a bad block table if appropriate.
+ * Scan to find existence of the device and get the type of NAND device:
+ * SMALL block or LARGE block.
*/
- res = nand_scan_tail(mtd);
+ nand_chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ res = nand_scan(mtd, 1);
if (res)
goto free_irq;
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 42820aa1abab..a4e8b7e75135 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -779,6 +779,46 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
return ncfg;
}
+static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+ /* OOB and ECC CPU and DMA work areas */
+ host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
+
+ /*
+ * Small page FLASH has a unique OOB layout, but large and huge
+ * page FLASH use the standard layout. Small page FLASH uses a
+ * custom BBT marker layout.
+ */
+ if (mtd->writesize <= 512)
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+
+ /* These sizes remain the same regardless of page size */
+ chip->ecc.size = 256;
+ chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
+ chip->ecc.prepad = 0;
+ chip->ecc.postpad = 0;
+
+ /*
+ * Use a custom BBT marker setup for small page FLASH that
+ * won't interfere with the ECC layout. Large and huge page
+ * FLASH use the standard layout.
+ */
+ if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
+ mtd->writesize <= 512) {
+ chip->bbt_td = &bbt_smallpage_main_descr;
+ chip->bbt_md = &bbt_smallpage_mirror_descr;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
+ .attach_chip = lpc32xx_nand_attach_chip,
+};
+
/*
* Probe for NAND controller
*/
@@ -884,41 +924,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
/* Find NAND device */
- res = nand_scan_ident(mtd, 1, NULL);
- if (res)
- goto release_dma;
-
- /* OOB and ECC CPU and DMA work areas */
- host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
-
- /*
- * Small page FLASH has a unique OOB layout, but large and huge
- * page FLASH use the standard layout. Small page FLASH uses a
- * custom BBT marker layout.
- */
- if (mtd->writesize <= 512)
- mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
-
- /* These sizes remain the same regardless of page size */
- chip->ecc.size = 256;
- chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
- chip->ecc.prepad = chip->ecc.postpad = 0;
-
- /*
- * Use a custom BBT marker setup for small page FLASH that
- * won't interfere with the ECC layout. Large and huge page
- * FLASH use the standard layout.
- */
- if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
- mtd->writesize <= 512) {
- chip->bbt_td = &bbt_smallpage_main_descr;
- chip->bbt_md = &bbt_smallpage_mirror_descr;
- }
-
- /*
- * Fills out all the uninitialized function pointers with the defaults
- */
- res = nand_scan_tail(mtd);
+ chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ res = nand_scan(mtd, 1);
if (res)
goto release_dma;
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index ebb1d141b900..7af4d6213ee5 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -318,7 +318,7 @@ struct marvell_nfc_caps {
* @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
*/
struct marvell_nfc {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct device *dev;
void __iomem *regs;
struct clk *core_clk;
@@ -335,7 +335,7 @@ struct marvell_nfc {
u8 *dma_buf;
};
-static inline struct marvell_nfc *to_marvell_nfc(struct nand_hw_control *ctrl)
+static inline struct marvell_nfc *to_marvell_nfc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct marvell_nfc, controller);
}
@@ -650,11 +650,6 @@ static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
return;
}
- /*
- * Do not change the timing registers when using the DT property
- * marvell,nand-keep-config; in that case ->ndtr0 and ->ndtr1 from the
- * marvell_nand structure are supposedly empty.
- */
writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
@@ -2157,6 +2152,7 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd,
break;
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
+ case NAND_ECC_ON_DIE:
if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
mtd->writesize != SZ_2K) {
dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
@@ -2294,6 +2290,111 @@ static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
return 0;
}
+static int marvell_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(nfc->dev);
+ int ret;
+
+ if (pdata && pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Save the chip-specific fields of NDCR */
+ marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
+ if (chip->options & NAND_BUSWIDTH_16)
+ marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ /*
+ * On small page NANDs, only one cycle is needed to pass the
+ * column address.
+ */
+ if (mtd->writesize <= 512) {
+ marvell_nand->addr_cyc = 1;
+ } else {
+ marvell_nand->addr_cyc = 2;
+ marvell_nand->ndcr |= NDCR_RA_START;
+ }
+
+ /*
+ * Now add the number of cycles needed to pass the row
+ * address.
+ *
+ * Addressing a chip using CS 2 or 3 should also need the third row
+ * cycle but due to inconsistance in the documentation and lack of
+ * hardware to test this situation, this case is not supported.
+ */
+ if (chip->options & NAND_ROW_ADDR_3)
+ marvell_nand->addr_cyc += 3;
+ else
+ marvell_nand->addr_cyc += 2;
+
+ if (pdata) {
+ chip->ecc.size = pdata->ecc_step_size;
+ chip->ecc.strength = pdata->ecc_strength;
+ }
+
+ ret = marvell_nand_ecc_init(mtd, &chip->ecc);
+ if (ret) {
+ dev_err(nfc->dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ /*
+ * Subpage write not available with hardware ECC, prohibit also
+ * subpage read as in userspace subpage access would still be
+ * allowed and subpage write, if used, would lead to numerous
+ * uncorrectable ECC errors.
+ */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "main-storage";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nfc->dev),
+ marvell_nand->sels[0].cs);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops marvell_nand_controller_ops = {
+ .attach_chip = marvell_nand_attach_chip,
+};
+
static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
struct device_node *np)
{
@@ -2436,105 +2537,10 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
chip->options |= NAND_BUSWIDTH_AUTO;
- ret = nand_scan_ident(mtd, marvell_nand->nsels, NULL);
- if (ret) {
- dev_err(dev, "could not identify the nand chip\n");
- return ret;
- }
-
- if (pdata && pdata->flash_bbt)
- chip->bbt_options |= NAND_BBT_USE_FLASH;
- if (chip->bbt_options & NAND_BBT_USE_FLASH) {
- /*
- * We'll use a bad block table stored in-flash and don't
- * allow writing the bad block marker to the flash.
- */
- chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
- chip->bbt_td = &bbt_main_descr;
- chip->bbt_md = &bbt_mirror_descr;
- }
-
- /* Save the chip-specific fields of NDCR */
- marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
- if (chip->options & NAND_BUSWIDTH_16)
- marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
-
- /*
- * On small page NANDs, only one cycle is needed to pass the
- * column address.
- */
- if (mtd->writesize <= 512) {
- marvell_nand->addr_cyc = 1;
- } else {
- marvell_nand->addr_cyc = 2;
- marvell_nand->ndcr |= NDCR_RA_START;
- }
-
- /*
- * Now add the number of cycles needed to pass the row
- * address.
- *
- * Addressing a chip using CS 2 or 3 should also need the third row
- * cycle but due to inconsistance in the documentation and lack of
- * hardware to test this situation, this case is not supported.
- */
- if (chip->options & NAND_ROW_ADDR_3)
- marvell_nand->addr_cyc += 3;
- else
- marvell_nand->addr_cyc += 2;
-
- if (pdata) {
- chip->ecc.size = pdata->ecc_step_size;
- chip->ecc.strength = pdata->ecc_strength;
- }
-
- ret = marvell_nand_ecc_init(mtd, &chip->ecc);
- if (ret) {
- dev_err(dev, "ECC init failed: %d\n", ret);
- return ret;
- }
-
- if (chip->ecc.mode == NAND_ECC_HW) {
- /*
- * Subpage write not available with hardware ECC, prohibit also
- * subpage read as in userspace subpage access would still be
- * allowed and subpage write, if used, would lead to numerous
- * uncorrectable ECC errors.
- */
- chip->options |= NAND_NO_SUBPAGE_WRITE;
- }
-
- if (pdata || nfc->caps->legacy_of_bindings) {
- /*
- * We keep the MTD name unchanged to avoid breaking platforms
- * where the MTD cmdline parser is used and the bootloader
- * has not been updated to use the new naming scheme.
- */
- mtd->name = "pxa3xx_nand-0";
- } else if (!mtd->name) {
- /*
- * If the new bindings are used and the bootloader has not been
- * updated to pass a new mtdparts parameter on the cmdline, you
- * should define the following property in your NAND node, ie:
- *
- * label = "main-storage";
- *
- * This way, mtd->name will be set by the core when
- * nand_set_flash_node() is called.
- */
- mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
- "%s:nand.%d", dev_name(nfc->dev),
- marvell_nand->sels[0].cs);
- if (!mtd->name) {
- dev_err(nfc->dev, "Failed to allocate mtd->name\n");
- return -ENOMEM;
- }
- }
-
- ret = nand_scan_tail(mtd);
+ ret = nand_scan(mtd, marvell_nand->nsels);
if (ret) {
- dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ dev_err(dev, "could not scan the nand chip\n");
return ret;
}
@@ -2612,8 +2618,6 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
dev);
struct dma_slave_config config = {};
struct resource *r;
- dma_cap_mask_t mask;
- struct pxad_param param;
int ret;
if (!IS_ENABLED(CONFIG_PXA_DMA)) {
@@ -2626,20 +2630,7 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
if (ret)
return ret;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- dev_err(nfc->dev, "No resource defined for data DMA\n");
- return -ENXIO;
- }
-
- param.drcmr = r->start;
- param.prio = PXAD_PRIO_LOWEST;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- nfc->dma_chan =
- dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param, nfc->dev,
- "data");
+ nfc->dma_chan = dma_request_slave_channel(nfc->dev, "data");
if (!nfc->dma_chan) {
dev_err(nfc->dev,
"Unable to request data DMA channel\n");
@@ -2677,6 +2668,21 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
return 0;
}
+static void marvell_nfc_reset(struct marvell_nfc *nfc)
+{
+ /*
+ * ECC operations and interruptions are only enabled when specifically
+ * needed. ECC shall not be activated in the early stages (fails probe).
+ * Arbiter flag, even if marked as "reserved", must be set (empirical).
+ * SPARE_EN bit must always be set or ECC bytes will not be at the same
+ * offset in the read page and this will fail the protection.
+ */
+ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
+ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
+ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+}
+
static int marvell_nfc_init(struct marvell_nfc *nfc)
{
struct device_node *np = nfc->dev->of_node;
@@ -2715,17 +2721,7 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
if (!nfc->caps->is_nfcv2)
marvell_nfc_init_dma(nfc);
- /*
- * ECC operations and interruptions are only enabled when specifically
- * needed. ECC shall not be activated in the early stages (fails probe).
- * Arbiter flag, even if marked as "reserved", must be set (empirical).
- * SPARE_EN bit must always be set or ECC bytes will not be at the same
- * offset in the read page and this will fail the protection.
- */
- writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
- NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
- writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
- writel_relaxed(0, nfc->regs + NDECCCTRL);
+ marvell_nfc_reset(nfc);
return 0;
}
@@ -2744,7 +2740,8 @@ static int marvell_nfc_probe(struct platform_device *pdev)
return -ENOMEM;
nfc->dev = dev;
- nand_hw_control_init(&nfc->controller);
+ nand_controller_init(&nfc->controller);
+ nfc->controller.ops = &marvell_nand_controller_ops;
INIT_LIST_HEAD(&nfc->chips);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2772,17 +2769,19 @@ static int marvell_nfc_probe(struct platform_device *pdev)
return ret;
nfc->reg_clk = devm_clk_get(&pdev->dev, "reg");
- if (PTR_ERR(nfc->reg_clk) != -ENOENT) {
- if (!IS_ERR(nfc->reg_clk)) {
- ret = clk_prepare_enable(nfc->reg_clk);
- if (ret)
- goto unprepare_core_clk;
- } else {
+ if (IS_ERR(nfc->reg_clk)) {
+ if (PTR_ERR(nfc->reg_clk) != -ENOENT) {
ret = PTR_ERR(nfc->reg_clk);
goto unprepare_core_clk;
}
+
+ nfc->reg_clk = NULL;
}
+ ret = clk_prepare_enable(nfc->reg_clk);
+ if (ret)
+ goto unprepare_core_clk;
+
marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
ret = devm_request_irq(dev, irq, marvell_nfc_isr,
@@ -2840,6 +2839,49 @@ static int marvell_nfc_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused marvell_nfc_suspend(struct device *dev)
+{
+ struct marvell_nfc *nfc = dev_get_drvdata(dev);
+ struct marvell_nand_chip *chip;
+
+ list_for_each_entry(chip, &nfc->chips, node)
+ marvell_nfc_wait_ndrun(&chip->chip);
+
+ clk_disable_unprepare(nfc->reg_clk);
+ clk_disable_unprepare(nfc->core_clk);
+
+ return 0;
+}
+
+static int __maybe_unused marvell_nfc_resume(struct device *dev)
+{
+ struct marvell_nfc *nfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(nfc->reg_clk);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Reset nfc->selected_chip so the next command will cause the timing
+ * registers to be restored in marvell_nfc_select_chip().
+ */
+ nfc->selected_chip = NULL;
+
+ /* Reset registers that have lost their contents */
+ marvell_nfc_reset(nfc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops marvell_nfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(marvell_nfc_suspend, marvell_nfc_resume)
+};
+
static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
.max_cs_nb = 4,
.max_rb_nb = 2,
@@ -2924,6 +2966,7 @@ static struct platform_driver marvell_nfc_driver = {
.driver = {
.name = "marvell-nfc",
.of_match_table = marvell_nfc_of_ids,
+ .pm = &marvell_nfc_pm_ops,
},
.id_table = marvell_nfc_platform_ids,
.probe = marvell_nfc_probe,
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 75c845adb050..57b5ed1699e3 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -145,7 +145,7 @@ struct mtk_nfc_clk {
};
struct mtk_nfc {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct mtk_ecc_config ecc_cfg;
struct mtk_nfc_clk clk;
struct mtk_ecc *ecc;
@@ -1250,13 +1250,54 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
return 0;
}
+static int mtk_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ int len;
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16bits buswidth not supported");
+ return -EINVAL;
+ }
+
+ /* store bbt magic in page, cause OOB is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ ret = mtk_nfc_ecc_init(dev, mtd);
+ if (ret)
+ return ret;
+
+ ret = mtk_nfc_set_spare_per_sector(&mtk_nand->spare_per_sector, mtd);
+ if (ret)
+ return ret;
+
+ mtk_nfc_set_fdm(&mtk_nand->fdm, mtd);
+ mtk_nfc_set_bad_mark_ctl(&mtk_nand->bad_mark, mtd);
+
+ len = mtd->writesize + mtd->oobsize;
+ nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!nfc->buffer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct nand_controller_ops mtk_nfc_controller_ops = {
+ .attach_chip = mtk_nfc_attach_chip,
+};
+
static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
struct device_node *np)
{
struct mtk_nfc_nand_chip *chip;
struct nand_chip *nand;
struct mtd_info *mtd;
- int nsels, len;
+ int nsels;
u32 tmp;
int ret;
int i;
@@ -1324,40 +1365,11 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
mtk_nfc_hw_init(nfc);
- ret = nand_scan_ident(mtd, nsels, NULL);
- if (ret)
- return ret;
-
- /* store bbt magic in page, cause OOB is not protected */
- if (nand->bbt_options & NAND_BBT_USE_FLASH)
- nand->bbt_options |= NAND_BBT_NO_OOB;
-
- ret = mtk_nfc_ecc_init(dev, mtd);
- if (ret)
- return -EINVAL;
-
- if (nand->options & NAND_BUSWIDTH_16) {
- dev_err(dev, "16bits buswidth not supported");
- return -EINVAL;
- }
-
- ret = mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
- if (ret)
- return ret;
-
- mtk_nfc_set_fdm(&chip->fdm, mtd);
- mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
-
- len = mtd->writesize + mtd->oobsize;
- nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
- if (!nfc->buffer)
- return -ENOMEM;
-
- ret = nand_scan_tail(mtd);
+ ret = nand_scan(mtd, nsels);
if (ret)
return ret;
- ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "mtd parse partition error\n");
nand_release(mtd);
@@ -1443,6 +1455,7 @@ static int mtk_nfc_probe(struct platform_device *pdev)
spin_lock_init(&nfc->controller.lock);
init_waitqueue_head(&nfc->controller.wq);
INIT_LIST_HEAD(&nfc->chips);
+ nfc->controller.ops = &mtk_nfc_controller_ops;
/* probe defer if not ready */
nfc->ecc = of_mtk_ecc_get(np);
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 26cef218bb43..4c9214dea424 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2008 Sascha Hauer, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
*/
#include <linux/delay.h>
@@ -34,8 +21,6 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/of_device.h>
-
-#include <asm/mach/flash.h>
#include <linux/platform_data/mtd-mxc_nand.h>
#define DRIVER_NAME "mxc_nand"
@@ -1686,7 +1671,7 @@ static const struct of_device_id mxcnd_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
-static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+static int mxcnd_probe_dt(struct mxc_nand_host *host)
{
struct device_node *np = host->dev->of_node;
const struct of_device_id *of_id =
@@ -1700,12 +1685,80 @@ static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
return 0;
}
#else
-static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+static int mxcnd_probe_dt(struct mxc_nand_host *host)
{
return 1;
}
#endif
+static int mxcnd_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ struct device *dev = mtd->dev.parent;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_HW:
+ chip->ecc.read_page = mxc_nand_read_page;
+ chip->ecc.read_page_raw = mxc_nand_read_page_raw;
+ chip->ecc.read_oob = mxc_nand_read_oob;
+ chip->ecc.write_page = mxc_nand_write_page_ecc;
+ chip->ecc.write_page_raw = mxc_nand_write_page_raw;
+ chip->ecc.write_oob = mxc_nand_write_oob;
+ break;
+
+ case NAND_ECC_SOFT:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Allocate the right size buffer now */
+ devm_kfree(dev, (void *)host->data_buf);
+ host->data_buf = devm_kzalloc(dev, mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!host->data_buf)
+ return -ENOMEM;
+
+ /* Call preset again, with correct writesize chip time */
+ host->devtype_data->preset(mtd);
+
+ if (!chip->ecc.bytes) {
+ if (host->eccsize == 8)
+ chip->ecc.bytes = 18;
+ else if (host->eccsize == 4)
+ chip->ecc.bytes = 9;
+ }
+
+ /*
+ * Experimentation shows that i.MX NFC can only handle up to 218 oob
+ * bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
+ * into copying invalid data to/from the spare IO buffer, as this
+ * might cause ECC data corruption when doing sub-page write to a
+ * partially written page.
+ */
+ host->used_oobsize = min(mtd->oobsize, 218U);
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ if (is_imx21_nfc(host) || is_imx27_nfc(host))
+ chip->ecc.strength = 1;
+ else
+ chip->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops mxcnd_controller_ops = {
+ .attach_chip = mxcnd_attach_chip,
+};
+
static int mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
@@ -1845,71 +1898,9 @@ static int mxcnd_probe(struct platform_device *pdev)
host->devtype_data->irq_control(host, 1);
}
- /* first scan to find the device and get the page size */
- err = nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL);
- if (err)
- goto escan;
-
- switch (this->ecc.mode) {
- case NAND_ECC_HW:
- this->ecc.read_page = mxc_nand_read_page;
- this->ecc.read_page_raw = mxc_nand_read_page_raw;
- this->ecc.read_oob = mxc_nand_read_oob;
- this->ecc.write_page = mxc_nand_write_page_ecc;
- this->ecc.write_page_raw = mxc_nand_write_page_raw;
- this->ecc.write_oob = mxc_nand_write_oob;
- break;
-
- case NAND_ECC_SOFT:
- break;
-
- default:
- err = -EINVAL;
- goto escan;
- }
-
- if (this->bbt_options & NAND_BBT_USE_FLASH) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- }
-
- /* allocate the right size buffer now */
- devm_kfree(&pdev->dev, (void *)host->data_buf);
- host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
- GFP_KERNEL);
- if (!host->data_buf) {
- err = -ENOMEM;
- goto escan;
- }
-
- /* Call preset again, with correct writesize this time */
- host->devtype_data->preset(mtd);
-
- if (!this->ecc.bytes) {
- if (host->eccsize == 8)
- this->ecc.bytes = 18;
- else if (host->eccsize == 4)
- this->ecc.bytes = 9;
- }
-
- /*
- * Experimentation shows that i.MX NFC can only handle up to 218 oob
- * bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
- * into copying invalid data to/from the spare IO buffer, as this
- * might cause ECC data corruption when doing sub-page write to a
- * partially written page.
- */
- host->used_oobsize = min(mtd->oobsize, 218U);
-
- if (this->ecc.mode == NAND_ECC_HW) {
- if (is_imx21_nfc(host) || is_imx27_nfc(host))
- this->ecc.strength = 1;
- else
- this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
- }
-
- /* second phase scan */
- err = nand_scan_tail(mtd);
+ /* Scan the NAND device */
+ this->dummy_controller.ops = &mxcnd_controller_ops;
+ err = nand_scan(mtd, is_imx25_nfc(host) ? 4 : 1);
if (err)
goto escan;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index b01d15ec4c56..d527e448ce19 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -2668,8 +2668,8 @@ static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
return subop && instr_idx < subop->ninstrs;
}
-static int nand_subop_get_start_off(const struct nand_subop *subop,
- unsigned int instr_idx)
+static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
if (instr_idx)
return 0;
@@ -2688,12 +2688,12 @@ static int nand_subop_get_start_off(const struct nand_subop *subop,
*
* Given an address instruction, returns the offset of the first cycle to issue.
*/
-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
return nand_subop_get_start_off(subop, instr_idx);
}
@@ -2710,14 +2710,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
*
* Given an address instruction, returns the number of address cycle to issue.
*/
-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
int start_off, end_off;
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
start_off = nand_subop_get_addr_start_off(subop, instr_idx);
@@ -2742,12 +2742,12 @@ EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
*
* Given a data instruction, returns the offset to start from.
*/
-int nand_subop_get_data_start_off(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- !nand_instr_is_data(&subop->instrs[instr_idx]))
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
return nand_subop_get_start_off(subop, instr_idx);
}
@@ -2764,14 +2764,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
*
* Returns the length of the chunk of data to send/receive.
*/
-int nand_subop_get_data_len(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
int start_off = 0, end_off;
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- !nand_instr_is_data(&subop->instrs[instr_idx]))
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
start_off = nand_subop_get_data_start_off(subop, instr_idx);
@@ -2967,6 +2967,23 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
/**
+ * nand_read_page_raw_notsupp - dummy read raw page function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Returns -ENOTSUPP unconditionally.
+ */
+int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(nand_read_page_raw_notsupp);
+
+/**
* nand_read_page_raw - [INTERN] read raw page data without ecc
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -3960,6 +3977,22 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
return ret;
}
+/**
+ * nand_write_page_raw_notsupp - dummy raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Returns -ENOTSUPP unconditionally.
+ */
+int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_required, int page)
+{
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(nand_write_page_raw_notsupp);
/**
* nand_write_page_raw - [INTERN] raw page write function
@@ -4965,12 +4998,10 @@ static void nand_set_defaults(struct nand_chip *chip)
chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
if (!chip->read_buf || chip->read_buf == nand_read_buf)
chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
- if (!chip->scan_bbt)
- chip->scan_bbt = nand_default_bbt;
if (!chip->controller) {
- chip->controller = &chip->hwcontrol;
- nand_hw_control_init(chip->controller);
+ chip->controller = &chip->dummy_controller;
+ nand_controller_init(chip->controller);
}
if (!chip->buf_align)
@@ -5120,6 +5151,8 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_onfi_params *p;
+ struct onfi_params *onfi;
+ int onfi_version = 0;
char id[4];
int i, ret, val;
@@ -5168,30 +5201,35 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
}
}
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->fixup_onfi_param_page)
+ chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
+
/* Check version */
val = le16_to_cpu(p->revision);
- if (val & (1 << 5))
- chip->parameters.onfi.version = 23;
- else if (val & (1 << 4))
- chip->parameters.onfi.version = 22;
- else if (val & (1 << 3))
- chip->parameters.onfi.version = 21;
- else if (val & (1 << 2))
- chip->parameters.onfi.version = 20;
- else if (val & (1 << 1))
- chip->parameters.onfi.version = 10;
-
- if (!chip->parameters.onfi.version) {
+ if (val & ONFI_VERSION_2_3)
+ onfi_version = 23;
+ else if (val & ONFI_VERSION_2_2)
+ onfi_version = 22;
+ else if (val & ONFI_VERSION_2_1)
+ onfi_version = 21;
+ else if (val & ONFI_VERSION_2_0)
+ onfi_version = 20;
+ else if (val & ONFI_VERSION_1_0)
+ onfi_version = 10;
+
+ if (!onfi_version) {
pr_info("unsupported ONFI version: %d\n", val);
goto free_onfi_param_page;
- } else {
- ret = 1;
}
sanitize_string(p->manufacturer, sizeof(p->manufacturer));
sanitize_string(p->model, sizeof(p->model));
- strncpy(chip->parameters.model, p->model,
- sizeof(chip->parameters.model) - 1);
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_onfi_param_page;
+ }
mtd->writesize = le32_to_cpu(p->byte_per_page);
@@ -5219,7 +5257,7 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
if (p->ecc_bits != 0xff) {
chip->ecc_strength_ds = p->ecc_bits;
chip->ecc_step_ds = 512;
- } else if (chip->parameters.onfi.version >= 21 &&
+ } else if (onfi_version >= 21 &&
(le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
/*
@@ -5246,19 +5284,33 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
bitmap_set(chip->parameters.set_feature_list,
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
- chip->parameters.onfi.tPROG = le16_to_cpu(p->t_prog);
- chip->parameters.onfi.tBERS = le16_to_cpu(p->t_bers);
- chip->parameters.onfi.tR = le16_to_cpu(p->t_r);
- chip->parameters.onfi.tCCS = le16_to_cpu(p->t_ccs);
- chip->parameters.onfi.async_timing_mode =
- le16_to_cpu(p->async_timing_mode);
- chip->parameters.onfi.vendor_revision =
- le16_to_cpu(p->vendor_revision);
- memcpy(chip->parameters.onfi.vendor, p->vendor,
- sizeof(p->vendor));
+ onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
+ if (!onfi) {
+ ret = -ENOMEM;
+ goto free_model;
+ }
+
+ onfi->version = onfi_version;
+ onfi->tPROG = le16_to_cpu(p->t_prog);
+ onfi->tBERS = le16_to_cpu(p->t_bers);
+ onfi->tR = le16_to_cpu(p->t_r);
+ onfi->tCCS = le16_to_cpu(p->t_ccs);
+ onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
+ memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
+ chip->parameters.onfi = onfi;
+
+ /* Identification done, free the full ONFI parameter page and exit */
+ kfree(p);
+
+ return 1;
+
+free_model:
+ kfree(chip->parameters.model);
free_onfi_param_page:
kfree(p);
+
return ret;
}
@@ -5321,8 +5373,11 @@ static int nand_flash_detect_jedec(struct nand_chip *chip)
sanitize_string(p->manufacturer, sizeof(p->manufacturer));
sanitize_string(p->model, sizeof(p->model));
- strncpy(chip->parameters.model, p->model,
- sizeof(chip->parameters.model) - 1);
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_jedec_param_page;
+ }
mtd->writesize = le32_to_cpu(p->byte_per_page);
@@ -5511,8 +5566,9 @@ static bool find_full_id_nand(struct nand_chip *chip,
chip->onfi_timing_mode_default =
type->onfi_timing_mode_default;
- strncpy(chip->parameters.model, type->name,
- sizeof(chip->parameters.model) - 1);
+ chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
+ if (!chip->parameters.model)
+ return false;
return true;
}
@@ -5651,7 +5707,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
}
}
- chip->parameters.onfi.version = 0;
if (!type->name || !type->pagesize) {
/* Check if the chip is ONFI compliant */
ret = nand_flash_detect_onfi(chip);
@@ -5671,8 +5726,9 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
if (!type->name)
return -ENODEV;
- strncpy(chip->parameters.model, type->name,
- sizeof(chip->parameters.model) - 1);
+ chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
+ if (!chip->parameters.model)
+ return -ENOMEM;
chip->chipsize = (uint64_t)type->chipsize << 20;
@@ -5702,7 +5758,9 @@ ident_done:
mtd->name);
pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
- return -EINVAL;
+ ret = -EINVAL;
+
+ goto free_detect_allocation;
}
nand_decode_bbm_options(chip);
@@ -5739,6 +5797,11 @@ ident_done:
(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
return 0;
+
+free_detect_allocation:
+ kfree(chip->parameters.model);
+
+ return ret;
}
static const char * const nand_ecc_modes[] = {
@@ -5777,6 +5840,7 @@ static int of_get_nand_ecc_mode(struct device_node *np)
static const char * const nand_ecc_algos[] = {
[NAND_ECC_HAMMING] = "hamming",
[NAND_ECC_BCH] = "bch",
+ [NAND_ECC_RS] = "rs",
};
static int of_get_nand_ecc_algo(struct device_node *np)
@@ -5858,6 +5922,9 @@ static int nand_dt_init(struct nand_chip *chip)
if (of_get_nand_bus_width(dn) == 16)
chip->options |= NAND_BUSWIDTH_16;
+ if (of_property_read_bool(dn, "nand-is-boot-medium"))
+ chip->options |= NAND_IS_BOOT_MEDIUM;
+
if (of_get_nand_on_flash_bbt(dn))
chip->bbt_options |= NAND_BBT_USE_FLASH;
@@ -5885,7 +5952,7 @@ static int nand_dt_init(struct nand_chip *chip)
}
/**
- * nand_scan_ident - [NAND Interface] Scan for the NAND device
+ * nand_scan_ident - Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: number of chips to scan for
* @table: alternative NAND ID table
@@ -5893,9 +5960,13 @@ static int nand_dt_init(struct nand_chip *chip)
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
+ * This helper used to be called directly from controller drivers that needed
+ * to tweak some ECC-related parameters before nand_scan_tail(). This separation
+ * prevented dynamic allocations during this phase which was unconvenient and
+ * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
*/
-int nand_scan_ident(struct mtd_info *mtd, int maxchips,
- struct nand_flash_dev *table)
+static int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
{
int i, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd_to_nand(mtd);
@@ -5969,7 +6040,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return 0;
}
-EXPORT_SYMBOL(nand_scan_ident);
+
+static void nand_scan_ident_cleanup(struct nand_chip *chip)
+{
+ kfree(chip->parameters.model);
+ kfree(chip->parameters.onfi);
+}
static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
{
@@ -6077,24 +6153,17 @@ static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
* by the controller and the calculated ECC bytes fit within the chip's OOB.
* On success, the calculated ECC bytes is set.
*/
-int nand_check_ecc_caps(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail)
+static int
+nand_check_ecc_caps(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
int preset_step = chip->ecc.size;
int preset_strength = chip->ecc.strength;
- int nsteps, ecc_bytes;
+ int ecc_bytes, nsteps = mtd->writesize / preset_step;
int i, j;
- if (WARN_ON(oobavail < 0))
- return -EINVAL;
-
- if (!preset_step || !preset_strength)
- return -ENODATA;
-
- nsteps = mtd->writesize / preset_step;
-
for (i = 0; i < caps->nstepinfos; i++) {
stepinfo = &caps->stepinfos[i];
@@ -6127,7 +6196,6 @@ int nand_check_ecc_caps(struct nand_chip *chip,
return -ENOTSUPP;
}
-EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
/**
* nand_match_ecc_req - meet the chip's requirement with least ECC bytes
@@ -6139,8 +6207,9 @@ EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
* number of ECC bytes (i.e. with the largest number of OOB-free bytes).
* On success, the chosen ECC settings are set.
*/
-int nand_match_ecc_req(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail)
+static int
+nand_match_ecc_req(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
@@ -6151,9 +6220,6 @@ int nand_match_ecc_req(struct nand_chip *chip,
int best_ecc_bytes_total = INT_MAX;
int i, j;
- if (WARN_ON(oobavail < 0))
- return -EINVAL;
-
/* No information provided by the NAND chip */
if (!req_step || !req_strength)
return -ENOTSUPP;
@@ -6212,7 +6278,6 @@ int nand_match_ecc_req(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL_GPL(nand_match_ecc_req);
/**
* nand_maximize_ecc - choose the max ECC strength available
@@ -6223,8 +6288,9 @@ EXPORT_SYMBOL_GPL(nand_match_ecc_req);
* Choose the max ECC strength that is supported on the controller, and can fit
* within the chip's OOB. On success, the chosen ECC settings are set.
*/
-int nand_maximize_ecc(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail)
+static int
+nand_maximize_ecc(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
@@ -6234,9 +6300,6 @@ int nand_maximize_ecc(struct nand_chip *chip,
int best_strength, best_ecc_bytes;
int i, j;
- if (WARN_ON(oobavail < 0))
- return -EINVAL;
-
for (i = 0; i < caps->nstepinfos; i++) {
stepinfo = &caps->stepinfos[i];
step_size = stepinfo->stepsize;
@@ -6285,7 +6348,44 @@ int nand_maximize_ecc(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL_GPL(nand_maximize_ecc);
+
+/**
+ * nand_ecc_choose_conf - Set the ECC strength and ECC step size
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the ECC configuration according to following logic
+ *
+ * 1. If both ECC step size and ECC strength are already set (usually by DT)
+ * then check if it is supported by this controller.
+ * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
+ * 3. Otherwise, try to match the ECC step size and ECC strength closest
+ * to the chip's requirement. If available OOB size can't fit the chip
+ * requirement then fallback to the maximum ECC step size and ECC strength.
+ *
+ * On success, the chosen ECC settings are set.
+ */
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
+ return -EINVAL;
+
+ if (chip->ecc.size && chip->ecc.strength)
+ return nand_check_ecc_caps(chip, caps, oobavail);
+
+ if (chip->ecc.options & NAND_ECC_MAXIMIZE)
+ return nand_maximize_ecc(chip, caps, oobavail);
+
+ if (!nand_match_ecc_req(chip, caps, oobavail))
+ return 0;
+
+ return nand_maximize_ecc(chip, caps, oobavail);
+}
+EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
/*
* Check if the chip configuration meet the datasheet requirements.
@@ -6322,14 +6422,14 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
}
/**
- * nand_scan_tail - [NAND Interface] Scan for the NAND device
+ * nand_scan_tail - Scan for the NAND device
* @mtd: MTD device structure
*
* This is the second phase of the normal nand_scan() function. It fills out
* all the uninitialized function pointers with the defaults and scans for a
* bad block table if appropriate.
*/
-int nand_scan_tail(struct mtd_info *mtd)
+static int nand_scan_tail(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -6636,7 +6736,7 @@ int nand_scan_tail(struct mtd_info *mtd)
return 0;
/* Build bad block table */
- ret = chip->scan_bbt(mtd);
+ ret = nand_create_bbt(chip);
if (ret)
goto err_nand_manuf_cleanup;
@@ -6653,24 +6753,27 @@ err_free_buf:
return ret;
}
-EXPORT_SYMBOL(nand_scan_tail);
-/*
- * is_module_text_address() isn't exported, and it's mostly a pointless
- * test if this is a module _anyway_ -- they'd have to try _really_ hard
- * to call us from in-kernel code if the core NAND support is modular.
- */
-#ifdef MODULE
-#define caller_is_module() (1)
-#else
-#define caller_is_module() \
- is_module_text_address((unsigned long)__builtin_return_address(0))
-#endif
+static int nand_attach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->attach_chip)
+ return chip->controller->ops->attach_chip(chip);
+
+ return 0;
+}
+
+static void nand_detach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->detach_chip)
+ chip->controller->ops->detach_chip(chip);
+}
/**
* nand_scan_with_ids - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
- * @maxchips: number of chips to scan for
+ * @maxchips: number of chips to scan for. @nand_scan_ident() will not be run if
+ * this parameter is zero (useful for specific drivers that must
+ * handle this part of the process themselves, e.g docg4).
* @ids: optional flash IDs table
*
* This fills out all the uninitialized function pointers with the defaults.
@@ -6680,11 +6783,30 @@ EXPORT_SYMBOL(nand_scan_tail);
int nand_scan_with_ids(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *ids)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
- ret = nand_scan_ident(mtd, maxchips, ids);
- if (!ret)
- ret = nand_scan_tail(mtd);
+ if (maxchips) {
+ ret = nand_scan_ident(mtd, maxchips, ids);
+ if (ret)
+ return ret;
+ }
+
+ ret = nand_attach(chip);
+ if (ret)
+ goto cleanup_ident;
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ goto detach_chip;
+
+ return 0;
+
+detach_chip:
+ nand_detach(chip);
+cleanup_ident:
+ nand_scan_ident_cleanup(chip);
+
return ret;
}
EXPORT_SYMBOL(nand_scan_with_ids);
@@ -6712,7 +6834,14 @@ void nand_cleanup(struct nand_chip *chip)
/* Free manufacturer priv data. */
nand_manufacturer_cleanup(chip);
+
+ /* Free controller specific allocations after chip identification */
+ nand_detach(chip);
+
+ /* Free identification phase allocations */
+ nand_scan_ident_cleanup(chip);
}
+
EXPORT_SYMBOL_GPL(nand_cleanup);
/**
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index d9f4ceff2568..39db352f8757 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -1349,15 +1349,14 @@ static int nand_create_badblock_pattern(struct nand_chip *this)
}
/**
- * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
- * @mtd: MTD device structure
+ * nand_create_bbt - [NAND Interface] Select a default bad block table for the device
+ * @this: NAND chip object
*
* This function selects the default bad block table support for the device and
* calls the nand_scan_bbt function.
*/
-int nand_default_bbt(struct mtd_info *mtd)
+int nand_create_bbt(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
int ret;
/* Is a flash based bad block table requested? */
@@ -1383,8 +1382,9 @@ int nand_default_bbt(struct mtd_info *mtd)
return ret;
}
- return nand_scan_bbt(mtd, this->badblock_pattern);
+ return nand_scan_bbt(nand_to_mtd(this), this->badblock_pattern);
}
+EXPORT_SYMBOL(nand_create_bbt);
/**
* nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index d542908a0ebb..4ffbb26e76d6 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -100,6 +100,16 @@ static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
struct mtd_info *mtd = nand_to_mtd(chip);
u16 column = ((u16)addr << 8) | addr;
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_ADDR(1, &addr, 0),
+ NAND_OP_8BIT_DATA_OUT(1, &val, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
chip->write_byte(mtd, val);
@@ -473,6 +483,19 @@ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
WARN(1, "Invalid OOB size");
break;
}
+
+ /*
+ * The datasheet of H27UCG8T2BTR mentions that the "Redundant
+ * Area Size" is encoded "per 8KB" (page size). This chip uses
+ * a page size of 16KiB. The datasheet mentions an OOB size of
+ * 1.280 bytes, but the OOB size encoded in the ID bytes (using
+ * the existing logic above) is 640 bytes.
+ * Update the OOB size for this chip by taking the value
+ * determined above and scaling it to the actual page size (so
+ * the actual OOB size for this chip is: 640 * 16k / 8k).
+ */
+ if (chip->id.data[1] == 0xde)
+ mtd->oobsize *= mtd->writesize / SZ_8K;
}
}
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 5ec4c90a637d..f5dc0a7a2456 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -16,12 +16,33 @@
*/
#include <linux/mtd/rawnand.h>
+#include <linux/slab.h>
/*
- * Special Micron status bit that indicates when the block has been
- * corrected by on-die ECC and should be rewritten
+ * Special Micron status bit 3 indicates that the block has been
+ * corrected by on-die ECC and should be rewritten.
*/
-#define NAND_STATUS_WRITE_RECOMMENDED BIT(3)
+#define NAND_ECC_STATUS_WRITE_RECOMMENDED BIT(3)
+
+/*
+ * On chips with 8-bit ECC and additional bit can be used to distinguish
+ * cases where a errors were corrected without needing a rewrite
+ *
+ * Bit 4 Bit 3 Bit 0 Description
+ * ----- ----- ----- -----------
+ * 0 0 0 No Errors
+ * 0 0 1 Multiple uncorrected errors
+ * 0 1 0 4 - 6 errors corrected, recommend rewrite
+ * 0 1 1 Reserved
+ * 1 0 0 1 - 3 errors corrected
+ * 1 0 1 Reserved
+ * 1 1 0 7 - 8 errors corrected, recommend rewrite
+ */
+#define NAND_ECC_STATUS_MASK (BIT(4) | BIT(3) | BIT(0))
+#define NAND_ECC_STATUS_UNCORRECTABLE BIT(0)
+#define NAND_ECC_STATUS_4_6_CORRECTED BIT(3)
+#define NAND_ECC_STATUS_1_3_CORRECTED BIT(4)
+#define NAND_ECC_STATUS_7_8_CORRECTED (BIT(4) | BIT(3))
struct nand_onfi_vendor_micron {
u8 two_plane_read;
@@ -43,6 +64,16 @@ struct nand_onfi_vendor_micron {
u8 param_revision;
} __packed;
+struct micron_on_die_ecc {
+ bool forced;
+ bool enabled;
+ void *rawbuf;
+};
+
+struct micron_nand {
+ struct micron_on_die_ecc ecc;
+};
+
static int micron_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
{
struct nand_chip *chip = mtd_to_nand(mtd);
@@ -57,9 +88,10 @@ static int micron_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
static int micron_nand_onfi_init(struct nand_chip *chip)
{
struct nand_parameters *p = &chip->parameters;
- struct nand_onfi_vendor_micron *micron = (void *)p->onfi.vendor;
- if (chip->parameters.onfi.version && p->onfi.vendor_revision) {
+ if (p->onfi) {
+ struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor;
+
chip->read_retries = micron->read_retry_options;
chip->setup_read_retry = micron_nand_setup_read_retry;
}
@@ -74,8 +106,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
return 0;
}
-static int micron_nand_on_die_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
+static int micron_nand_on_die_4_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
{
if (section >= 4)
return -ERANGE;
@@ -86,8 +119,9 @@ static int micron_nand_on_die_ooblayout_ecc(struct mtd_info *mtd, int section,
return 0;
}
-static int micron_nand_on_die_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
+static int micron_nand_on_die_4_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
{
if (section >= 4)
return -ERANGE;
@@ -98,19 +132,161 @@ static int micron_nand_on_die_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
-static const struct mtd_ooblayout_ops micron_nand_on_die_ooblayout_ops = {
- .ecc = micron_nand_on_die_ooblayout_ecc,
- .free = micron_nand_on_die_ooblayout_free,
+static const struct mtd_ooblayout_ops micron_nand_on_die_4_ooblayout_ops = {
+ .ecc = micron_nand_on_die_4_ooblayout_ecc,
+ .free = micron_nand_on_die_4_ooblayout_free,
+};
+
+static int micron_nand_on_die_8_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = mtd->oobsize - chip->ecc.total;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int micron_nand_on_die_8_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = mtd->oobsize - chip->ecc.total - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_nand_on_die_8_ooblayout_ops = {
+ .ecc = micron_nand_on_die_8_ooblayout_ecc,
+ .free = micron_nand_on_die_8_ooblayout_free,
};
static int micron_nand_on_die_ecc_setup(struct nand_chip *chip, bool enable)
{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
+ int ret;
+
+ if (micron->ecc.forced)
+ return 0;
+
+ if (micron->ecc.enabled == enable)
+ return 0;
if (enable)
feature[0] |= ONFI_FEATURE_ON_DIE_ECC_EN;
- return nand_set_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
+ ret = nand_set_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
+ if (!ret)
+ micron->ecc.enabled = enable;
+
+ return ret;
+}
+
+static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
+ void *buf, int page,
+ int oob_required)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int step, max_bitflips = 0;
+ int ret;
+
+ if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) {
+ if (status & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+ }
+
+ /*
+ * The internal ECC doesn't tell us the number of bitflips that have
+ * been corrected, but tells us if it recommends to rewrite the block.
+ * If it's the case, we need to read the page in raw mode and compare
+ * its content to the corrected version to extract the actual number of
+ * bitflips.
+ * But before we do that, we must make sure we have all OOB bytes read
+ * in non-raw mode, even if the user did not request those bytes.
+ */
+ if (!oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ ret = nand_read_page_op(chip, page, 0, micron->ecc.rawbuf,
+ mtd->writesize + mtd->oobsize);
+ if (ret)
+ return ret;
+
+ for (step = 0; step < chip->ecc.steps; step++) {
+ unsigned int offs, i, nbitflips = 0;
+ u8 *rawbuf, *corrbuf;
+
+ offs = step * chip->ecc.size;
+ rawbuf = micron->ecc.rawbuf + offs;
+ corrbuf = buf + offs;
+
+ for (i = 0; i < chip->ecc.size; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ offs = (step * 16) + 4;
+ rawbuf = micron->ecc.rawbuf + mtd->writesize + offs;
+ corrbuf = chip->oob_poi + offs;
+
+ for (i = 0; i < chip->ecc.bytes + 4; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ if (WARN_ON(nbitflips > chip->ecc.strength))
+ return -EINVAL;
+
+ max_bitflips = max(nbitflips, max_bitflips);
+ mtd->ecc_stats.corrected += nbitflips;
+ }
+
+ return max_bitflips;
+}
+
+static int micron_nand_on_die_ecc_status_8(struct nand_chip *chip, u8 status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * With 8/512 we have more information but still don't know precisely
+ * how many bit-flips were seen.
+ */
+ switch (status & NAND_ECC_STATUS_MASK) {
+ case NAND_ECC_STATUS_UNCORRECTABLE:
+ mtd->ecc_stats.failed++;
+ return 0;
+ case NAND_ECC_STATUS_1_3_CORRECTED:
+ mtd->ecc_stats.corrected += 3;
+ return 3;
+ case NAND_ECC_STATUS_4_6_CORRECTED:
+ mtd->ecc_stats.corrected += 6;
+ /* rewrite recommended */
+ return 6;
+ case NAND_ECC_STATUS_7_8_CORRECTED:
+ mtd->ecc_stats.corrected += 8;
+ /* rewrite recommended */
+ return 8;
+ default:
+ return 0;
+ }
}
static int
@@ -137,24 +313,18 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
if (ret)
goto out;
- if (status & NAND_STATUS_FAIL)
- mtd->ecc_stats.failed++;
-
- /*
- * The internal ECC doesn't tell us the number of bitflips
- * that have been corrected, but tells us if it recommends to
- * rewrite the block. If it's the case, then we pretend we had
- * a number of bitflips equal to the ECC strength, which will
- * hint the NAND core to rewrite the block.
- */
- else if (status & NAND_STATUS_WRITE_RECOMMENDED)
- max_bitflips = chip->ecc.strength;
-
ret = nand_read_data_op(chip, buf, mtd->writesize, false);
if (!ret && oob_required)
ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
false);
+ if (chip->ecc.strength == 4)
+ max_bitflips = micron_nand_on_die_ecc_status_4(chip, status,
+ buf, page,
+ oob_required);
+ else
+ max_bitflips = micron_nand_on_die_ecc_status_8(chip, status);
+
out:
micron_nand_on_die_ecc_setup(chip, false);
@@ -195,6 +365,9 @@ enum {
MICRON_ON_DIE_MANDATORY,
};
+#define MICRON_ID_INTERNAL_ECC_MASK GENMASK(1, 0)
+#define MICRON_ID_ECC_ENABLED BIT(7)
+
/*
* Try to detect if the NAND support on-die ECC. To do this, we enable
* the feature, and read back if it has been enabled as expected. We
@@ -207,42 +380,52 @@ enum {
*/
static int micron_supports_on_die_ecc(struct nand_chip *chip)
{
- u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
+ u8 id[5];
int ret;
- if (!chip->parameters.onfi.version)
+ if (!chip->parameters.onfi)
return MICRON_ON_DIE_UNSUPPORTED;
if (chip->bits_per_cell != 1)
return MICRON_ON_DIE_UNSUPPORTED;
+ /*
+ * We only support on-die ECC of 4/512 or 8/512
+ */
+ if (chip->ecc_strength_ds != 4 && chip->ecc_strength_ds != 8)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /* 0x2 means on-die ECC is available. */
+ if (chip->id.len != 5 ||
+ (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
ret = micron_nand_on_die_ecc_setup(chip, true);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
- ret = nand_get_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
- if (ret < 0)
- return ret;
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
- if ((feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN) == 0)
+ if (!(id[4] & MICRON_ID_ECC_ENABLED))
return MICRON_ON_DIE_UNSUPPORTED;
ret = micron_nand_on_die_ecc_setup(chip, false);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
- ret = nand_get_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
- if (ret < 0)
- return ret;
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
- if (feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN)
+ if (id[4] & MICRON_ID_ECC_ENABLED)
return MICRON_ON_DIE_MANDATORY;
/*
- * Some Micron NANDs have an on-die ECC of 4/512, some other
- * 8/512. We only support the former.
+ * We only support on-die ECC of 4/512 or 8/512
*/
- if (chip->ecc_strength_ds != 4)
+ if (chip->ecc_strength_ds != 4 && chip->ecc_strength_ds != 8)
return MICRON_ON_DIE_UNSUPPORTED;
return MICRON_ON_DIE_SUPPORTED;
@@ -251,44 +434,116 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
static int micron_nand_init(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct micron_nand *micron;
int ondie;
int ret;
+ micron = kzalloc(sizeof(*micron), GFP_KERNEL);
+ if (!micron)
+ return -ENOMEM;
+
+ nand_set_manufacturer_data(chip, micron);
+
ret = micron_nand_onfi_init(chip);
if (ret)
- return ret;
+ goto err_free_manuf_data;
if (mtd->writesize == 2048)
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
ondie = micron_supports_on_die_ecc(chip);
- if (ondie == MICRON_ON_DIE_MANDATORY) {
+ if (ondie == MICRON_ON_DIE_MANDATORY &&
+ chip->ecc.mode != NAND_ECC_ON_DIE) {
pr_err("On-die ECC forcefully enabled, not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_free_manuf_data;
}
if (chip->ecc.mode == NAND_ECC_ON_DIE) {
if (ondie == MICRON_ON_DIE_UNSUPPORTED) {
pr_err("On-die ECC selected but not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_free_manuf_data;
+ }
+
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ micron->ecc.forced = true;
+ micron->ecc.enabled = true;
+ }
+
+ /*
+ * In case of 4bit on-die ECC, we need a buffer to store a
+ * page dumped in raw mode so that we can compare its content
+ * to the same page after ECC correction happened and extract
+ * the real number of bitflips from this comparison.
+ * That's not needed for 8-bit ECC, because the status expose
+ * a better approximation of the number of bitflips in a page.
+ */
+ if (chip->ecc_strength_ds == 4) {
+ micron->ecc.rawbuf = kmalloc(mtd->writesize +
+ mtd->oobsize,
+ GFP_KERNEL);
+ if (!micron->ecc.rawbuf) {
+ ret = -ENOMEM;
+ goto err_free_manuf_data;
+ }
}
- chip->ecc.bytes = 8;
+ if (chip->ecc_strength_ds == 4)
+ mtd_set_ooblayout(mtd,
+ &micron_nand_on_die_4_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd,
+ &micron_nand_on_die_8_ooblayout_ops);
+
+ chip->ecc.bytes = chip->ecc_strength_ds * 2;
chip->ecc.size = 512;
- chip->ecc.strength = 4;
+ chip->ecc.strength = chip->ecc_strength_ds;
chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops);
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
+ chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
+ } else {
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ }
}
return 0;
+
+err_free_manuf_data:
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+
+ return ret;
+}
+
+static void micron_nand_cleanup(struct nand_chip *chip)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+}
+
+static void micron_fixup_onfi_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ /*
+ * MT29F1G08ABAFAWP-ITE:F and possibly others report 00 00 for the
+ * revision number field of the ONFI parameter page. Assume ONFI
+ * version 1.0 if the revision number is 00 00.
+ */
+ if (le16_to_cpu(p->revision) == 0)
+ p->revision = cpu_to_le16(ONFI_VERSION_1_0);
}
const struct nand_manufacturer_ops micron_nand_manuf_ops = {
.init = micron_nand_init,
+ .cleanup = micron_nand_cleanup,
+ .fixup_onfi_param_page = micron_fixup_onfi_param_page,
};
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index 7c4e4a371bbc..ebc7b5f76f77 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -13,6 +13,8 @@
#include <linux/export.h>
#include <linux/mtd/rawnand.h>
+#define ONFI_DYN_TIMING_MAX U16_MAX
+
static const struct nand_data_interface onfi_sdr_timings[] = {
/* Mode 0 */
{
@@ -292,6 +294,7 @@ int onfi_fill_data_interface(struct nand_chip *chip,
int timing_mode)
{
struct nand_data_interface *iface = &chip->data_interface;
+ struct onfi_params *onfi = chip->parameters.onfi;
if (type != NAND_SDR_IFACE)
return -EINVAL;
@@ -303,20 +306,35 @@ int onfi_fill_data_interface(struct nand_chip *chip,
/*
* Initialize timings that cannot be deduced from timing mode:
- * tR, tPROG, tCCS, ...
+ * tPROG, tBERS, tR and tCCS.
* These information are part of the ONFI parameter page.
*/
- if (chip->parameters.onfi.version) {
- struct nand_parameters *params = &chip->parameters;
+ if (onfi) {
+ struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000ULL * onfi->tPROG;
+ timings->tBERS_max = 1000000ULL * onfi->tBERS;
+ timings->tR_max = 1000000ULL * onfi->tR;
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * onfi->tCCS;
+ } else {
struct nand_sdr_timings *timings = &iface->timings.sdr;
+ /*
+ * For non-ONFI chips we use the highest possible value for
+ * tPROG and tBERS. tR and tCCS will take the default values
+ * precised in the ONFI specification for timing mode 0,
+ * respectively 200us and 500ns.
+ */
/* microseconds -> picoseconds */
- timings->tPROG_max = 1000000ULL * params->onfi.tPROG;
- timings->tBERS_max = 1000000ULL * params->onfi.tBERS;
- timings->tR_max = 1000000ULL * params->onfi.tR;
+ timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
+ timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
+ timings->tR_max = 1000000ULL * 200000000ULL;
/* nanoseconds -> picoseconds */
- timings->tCCS_min = 1000UL * params->onfi.tCCS;
+ timings->tCCS_min = 1000UL * 500000;
}
return 0;
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index f8edacde49ab..71ac034aee9c 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2192,6 +2192,48 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
return;
}
+static int ns_attach_chip(struct nand_chip *chip)
+{
+ unsigned int eccsteps, eccbytes;
+
+ if (!bch)
+ return 0;
+
+ if (!mtd_nand_has_bch()) {
+ NS_ERR("BCH ECC support is disabled\n");
+ return -EINVAL;
+ }
+
+ /* Use 512-byte ecc blocks */
+ eccsteps = nsmtd->writesize / 512;
+ eccbytes = ((bch * 13) + 7) / 8;
+
+ /* Do not bother supporting small page devices */
+ if (nsmtd->oobsize < 64 || !eccsteps) {
+ NS_ERR("BCH not available on small page devices\n");
+ return -EINVAL;
+ }
+
+ if (((eccbytes * eccsteps) + 2) > nsmtd->oobsize) {
+ NS_ERR("Invalid BCH value %u\n", bch);
+ return -EINVAL;
+ }
+
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.size = 512;
+ chip->ecc.strength = bch;
+ chip->ecc.bytes = eccbytes;
+
+ NS_INFO("Using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
+
+ return 0;
+}
+
+static const struct nand_controller_ops ns_controller_ops = {
+ .attach_chip = ns_attach_chip,
+};
+
/*
* Module initialization function
*/
@@ -2276,44 +2318,10 @@ static int __init ns_init_module(void)
if ((retval = parse_gravepages()) != 0)
goto error;
- retval = nand_scan_ident(nsmtd, 1, NULL);
- if (retval) {
- NS_ERR("cannot scan NAND Simulator device\n");
- goto error;
- }
-
- if (bch) {
- unsigned int eccsteps, eccbytes;
- if (!mtd_nand_has_bch()) {
- NS_ERR("BCH ECC support is disabled\n");
- retval = -EINVAL;
- goto error;
- }
- /* use 512-byte ecc blocks */
- eccsteps = nsmtd->writesize/512;
- eccbytes = (bch*13+7)/8;
- /* do not bother supporting small page devices */
- if ((nsmtd->oobsize < 64) || !eccsteps) {
- NS_ERR("bch not available on small page devices\n");
- retval = -EINVAL;
- goto error;
- }
- if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
- NS_ERR("invalid bch value %u\n", bch);
- retval = -EINVAL;
- goto error;
- }
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_BCH;
- chip->ecc.size = 512;
- chip->ecc.strength = bch;
- chip->ecc.bytes = eccbytes;
- NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
- }
-
- retval = nand_scan_tail(nsmtd);
+ chip->dummy_controller.ops = &ns_controller_ops;
+ retval = nand_scan(nsmtd, 1);
if (retval) {
- NS_ERR("can't register NAND Simulator\n");
+ NS_ERR("Could not scan NAND Simulator device\n");
goto error;
}
@@ -2337,7 +2345,7 @@ static int __init ns_init_module(void)
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
- if ((retval = chip->scan_bbt(nsmtd)) != 0)
+ if ((retval = nand_create_bbt(chip)) != 0)
goto err_exit;
if ((retval = parse_badblocks(nand, nsmtd)) != 0)
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index d8a806894937..540fa1a0ea24 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -39,7 +39,7 @@ struct ndfc_controller {
void __iomem *ndfcbase;
struct nand_chip chip;
int chip_select;
- struct nand_hw_control ndfc_control;
+ struct nand_controller ndfc_control;
};
static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
@@ -218,7 +218,7 @@ static int ndfc_probe(struct platform_device *ofdev)
ndfc = &ndfc_ctrl[cs];
ndfc->chip_select = cs;
- nand_hw_control_init(&ndfc->ndfc_control);
+ nand_controller_init(&ndfc->ndfc_control);
ndfc->ofdev = ofdev;
dev_set_drvdata(&ofdev->dev, ndfc);
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index e50c64adc3c8..4546ac0bed4a 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -144,12 +144,6 @@ static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
0xac, 0x6b, 0xff, 0x99, 0x7b};
static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
-/* Shared among all NAND instances to synchronize access to the ECC Engine */
-static struct nand_hw_control omap_gpmc_controller = {
- .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
-};
-
struct omap_nand_info {
struct nand_chip nand;
struct platform_device *pdev;
@@ -1915,106 +1909,26 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
.free = omap_sw_ooblayout_free,
};
-static int omap_nand_probe(struct platform_device *pdev)
+static int omap_nand_attach_chip(struct nand_chip *chip)
{
- struct omap_nand_info *info;
- struct mtd_info *mtd;
- struct nand_chip *nand_chip;
- int err;
- dma_cap_mask_t mask;
- struct resource *res;
- struct device *dev = &pdev->dev;
- int min_oobbytes = BADBLOCK_MARKER_LENGTH;
- int oobbytes_per_step;
-
- info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->pdev = pdev;
-
- err = omap_get_dt_info(dev, info);
- if (err)
- return err;
-
- info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
- if (!info->ops) {
- dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
- return -ENODEV;
- }
-
- nand_chip = &info->nand;
- mtd = nand_to_mtd(nand_chip);
- mtd->dev.parent = &pdev->dev;
- nand_chip->ecc.priv = NULL;
- nand_set_flash_node(nand_chip, dev->of_node);
-
- if (!mtd->name) {
- mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "omap2-nand.%d", info->gpmc_cs);
- if (!mtd->name) {
- dev_err(&pdev->dev, "Failed to set MTD name\n");
- return -ENOMEM;
- }
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(nand_chip->IO_ADDR_R))
- return PTR_ERR(nand_chip->IO_ADDR_R);
-
- info->phys_base = res->start;
-
- nand_chip->controller = &omap_gpmc_controller;
-
- nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
- nand_chip->cmd_ctrl = omap_hwcontrol;
-
- info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
- GPIOD_IN);
- if (IS_ERR(info->ready_gpiod)) {
- dev_err(dev, "failed to get ready gpio\n");
- return PTR_ERR(info->ready_gpiod);
- }
-
- /*
- * If RDY/BSY line is connected to OMAP then use the omap ready
- * function and the generic nand_wait function which reads the status
- * register after monitoring the RDY/BSY line. Otherwise use a standard
- * chip delay which is slightly more than tR (AC Timing) of the NAND
- * device and read status register until you get a failure or success
- */
- if (info->ready_gpiod) {
- nand_chip->dev_ready = omap_dev_ready;
- nand_chip->chip_delay = 0;
- } else {
- nand_chip->waitfunc = omap_wait;
- nand_chip->chip_delay = 50;
- }
-
- if (info->flash_bbt)
- nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
-
- /* scan NAND device connected to chip controller */
- nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
- err = nand_scan_ident(mtd, 1, NULL);
- if (err) {
- dev_err(&info->pdev->dev,
- "scan failed, may be bus-width mismatch\n");
- goto return_error;
- }
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct device *dev = &info->pdev->dev;
+ int min_oobbytes = BADBLOCK_MARKER_LENGTH;
+ int oobbytes_per_step;
+ dma_cap_mask_t mask;
+ int err;
- if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
- nand_chip->bbt_options |= NAND_BBT_NO_OOB;
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
else
- nand_chip->options |= NAND_SKIP_BBTSCAN;
+ chip->options |= NAND_SKIP_BBTSCAN;
- /* re-populate low-level callbacks based on xfer modes */
+ /* Re-populate low-level callbacks based on xfer modes */
switch (info->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
- nand_chip->read_buf = omap_read_buf_pref;
- nand_chip->write_buf = omap_write_buf_pref;
+ chip->read_buf = omap_read_buf_pref;
+ chip->write_buf = omap_write_buf_pref;
break;
case NAND_OMAP_POLLED:
@@ -2024,12 +1938,11 @@ static int omap_nand_probe(struct platform_device *pdev)
case NAND_OMAP_PREFETCH_DMA:
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- info->dma = dma_request_chan(pdev->dev.parent, "rxtx");
+ info->dma = dma_request_chan(dev, "rxtx");
if (IS_ERR(info->dma)) {
- dev_err(&pdev->dev, "DMA engine request failed\n");
- err = PTR_ERR(info->dma);
- goto return_error;
+ dev_err(dev, "DMA engine request failed\n");
+ return PTR_ERR(info->dma);
} else {
struct dma_slave_config cfg;
@@ -2042,222 +1955,306 @@ static int omap_nand_probe(struct platform_device *pdev)
cfg.dst_maxburst = 16;
err = dmaengine_slave_config(info->dma, &cfg);
if (err) {
- dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+ dev_err(dev,
+ "DMA engine slave config failed: %d\n",
err);
- goto return_error;
+ return err;
}
- nand_chip->read_buf = omap_read_buf_dma_pref;
- nand_chip->write_buf = omap_write_buf_dma_pref;
+ chip->read_buf = omap_read_buf_dma_pref;
+ chip->write_buf = omap_write_buf_dma_pref;
}
break;
case NAND_OMAP_PREFETCH_IRQ:
- info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
+ info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
if (info->gpmc_irq_fifo <= 0) {
- dev_err(&pdev->dev, "error getting fifo irq\n");
- err = -ENODEV;
- goto return_error;
+ dev_err(dev, "Error getting fifo IRQ\n");
+ return -ENODEV;
}
- err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo,
- omap_nand_irq, IRQF_SHARED,
- "gpmc-nand-fifo", info);
+ err = devm_request_irq(dev, info->gpmc_irq_fifo,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-fifo", info);
if (err) {
- dev_err(&pdev->dev, "requesting irq(%d) error:%d",
- info->gpmc_irq_fifo, err);
+ dev_err(dev, "Requesting IRQ %d, error %d\n",
+ info->gpmc_irq_fifo, err);
info->gpmc_irq_fifo = 0;
- goto return_error;
+ return err;
}
- info->gpmc_irq_count = platform_get_irq(pdev, 1);
+ info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
if (info->gpmc_irq_count <= 0) {
- dev_err(&pdev->dev, "error getting count irq\n");
- err = -ENODEV;
- goto return_error;
+ dev_err(dev, "Error getting IRQ count\n");
+ return -ENODEV;
}
- err = devm_request_irq(&pdev->dev, info->gpmc_irq_count,
- omap_nand_irq, IRQF_SHARED,
- "gpmc-nand-count", info);
+ err = devm_request_irq(dev, info->gpmc_irq_count,
+ omap_nand_irq, IRQF_SHARED,
+ "gpmc-nand-count", info);
if (err) {
- dev_err(&pdev->dev, "requesting irq(%d) error:%d",
- info->gpmc_irq_count, err);
+ dev_err(dev, "Requesting IRQ %d, error %d\n",
+ info->gpmc_irq_count, err);
info->gpmc_irq_count = 0;
- goto return_error;
+ return err;
}
- nand_chip->read_buf = omap_read_buf_irq_pref;
- nand_chip->write_buf = omap_write_buf_irq_pref;
+ chip->read_buf = omap_read_buf_irq_pref;
+ chip->write_buf = omap_write_buf_irq_pref;
break;
default:
- dev_err(&pdev->dev,
- "xfer_type(%d) not supported!\n", info->xfer_type);
- err = -EINVAL;
- goto return_error;
+ dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
+ return -EINVAL;
}
- if (!omap2_nand_ecc_check(info)) {
- err = -EINVAL;
- goto return_error;
- }
+ if (!omap2_nand_ecc_check(info))
+ return -EINVAL;
/*
* Bail out earlier to let NAND_ECC_SOFT code create its own
* ooblayout instead of using ours.
*/
if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
- nand_chip->ecc.mode = NAND_ECC_SOFT;
- nand_chip->ecc.algo = NAND_ECC_HAMMING;
- goto scan_tail;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ return 0;
}
- /* populate MTD interface based on ECC scheme */
+ /* Populate MTD interface based on ECC scheme */
switch (info->ecc_opt) {
case OMAP_ECC_HAM1_CODE_HW:
- pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.bytes = 3;
- nand_chip->ecc.size = 512;
- nand_chip->ecc.strength = 1;
- nand_chip->ecc.calculate = omap_calculate_ecc;
- nand_chip->ecc.hwctl = omap_enable_hwecc;
- nand_chip->ecc.correct = omap_correct_data;
+ dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.bytes = 3;
+ chip->ecc.size = 512;
+ chip->ecc.strength = 1;
+ chip->ecc.calculate = omap_calculate_ecc;
+ chip->ecc.hwctl = omap_enable_hwecc;
+ chip->ecc.correct = omap_correct_data;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
- oobbytes_per_step = nand_chip->ecc.bytes;
+ oobbytes_per_step = chip->ecc.bytes;
- if (!(nand_chip->options & NAND_BUSWIDTH_16))
- min_oobbytes = 1;
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ min_oobbytes = 1;
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
- nand_chip->ecc.bytes = 7;
- nand_chip->ecc.strength = 4;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 7;
+ chip->ecc.strength = 4;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
- oobbytes_per_step = nand_chip->ecc.bytes + 1;
- /* software bch library is used for locating errors */
- nand_chip->ecc.priv = nand_bch_init(mtd);
- if (!nand_chip->ecc.priv) {
- dev_err(&info->pdev->dev, "unable to use BCH library\n");
- err = -EINVAL;
- goto return_error;
+ oobbytes_per_step = chip->ecc.bytes + 1;
+ /* Software BCH library is used for locating errors */
+ chip->ecc.priv = nand_bch_init(mtd);
+ if (!chip->ecc.priv) {
+ dev_err(dev, "Unable to use BCH library\n");
+ return -EINVAL;
}
break;
case OMAP_ECC_BCH4_CODE_HW:
pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
/* 14th bit is kept reserved for ROM-code compatibility */
- nand_chip->ecc.bytes = 7 + 1;
- nand_chip->ecc.strength = 4;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.read_page = omap_read_page_bch;
- nand_chip->ecc.write_page = omap_write_page_bch;
- nand_chip->ecc.write_subpage = omap_write_subpage_bch;
+ chip->ecc.bytes = 7 + 1;
+ chip->ecc.strength = 4;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
- oobbytes_per_step = nand_chip->ecc.bytes;
+ oobbytes_per_step = chip->ecc.bytes;
err = elm_config(info->elm_dev, BCH4_ECC,
- mtd->writesize / nand_chip->ecc.size,
- nand_chip->ecc.size, nand_chip->ecc.bytes);
+ mtd->writesize / chip->ecc.size,
+ chip->ecc.size, chip->ecc.bytes);
if (err < 0)
- goto return_error;
+ return err;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
- nand_chip->ecc.bytes = 13;
- nand_chip->ecc.strength = 8;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 13;
+ chip->ecc.strength = 8;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
- oobbytes_per_step = nand_chip->ecc.bytes + 1;
- /* software bch library is used for locating errors */
- nand_chip->ecc.priv = nand_bch_init(mtd);
- if (!nand_chip->ecc.priv) {
- dev_err(&info->pdev->dev, "unable to use BCH library\n");
- err = -EINVAL;
- goto return_error;
+ oobbytes_per_step = chip->ecc.bytes + 1;
+ /* Software BCH library is used for locating errors */
+ chip->ecc.priv = nand_bch_init(mtd);
+ if (!chip->ecc.priv) {
+ dev_err(dev, "unable to use BCH library\n");
+ return -EINVAL;
}
break;
case OMAP_ECC_BCH8_CODE_HW:
pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
/* 14th bit is kept reserved for ROM-code compatibility */
- nand_chip->ecc.bytes = 13 + 1;
- nand_chip->ecc.strength = 8;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.read_page = omap_read_page_bch;
- nand_chip->ecc.write_page = omap_write_page_bch;
- nand_chip->ecc.write_subpage = omap_write_subpage_bch;
+ chip->ecc.bytes = 13 + 1;
+ chip->ecc.strength = 8;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
- oobbytes_per_step = nand_chip->ecc.bytes;
+ oobbytes_per_step = chip->ecc.bytes;
err = elm_config(info->elm_dev, BCH8_ECC,
- mtd->writesize / nand_chip->ecc.size,
- nand_chip->ecc.size, nand_chip->ecc.bytes);
+ mtd->writesize / chip->ecc.size,
+ chip->ecc.size, chip->ecc.bytes);
if (err < 0)
- goto return_error;
+ return err;
break;
case OMAP_ECC_BCH16_CODE_HW:
- pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
- nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = 512;
- nand_chip->ecc.bytes = 26;
- nand_chip->ecc.strength = 16;
- nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
- nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.read_page = omap_read_page_bch;
- nand_chip->ecc.write_page = omap_write_page_bch;
- nand_chip->ecc.write_subpage = omap_write_subpage_bch;
+ pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 26;
+ chip->ecc.strength = 16;
+ chip->ecc.hwctl = omap_enable_hwecc_bch;
+ chip->ecc.correct = omap_elm_correct_data;
+ chip->ecc.read_page = omap_read_page_bch;
+ chip->ecc.write_page = omap_write_page_bch;
+ chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
- oobbytes_per_step = nand_chip->ecc.bytes;
+ oobbytes_per_step = chip->ecc.bytes;
err = elm_config(info->elm_dev, BCH16_ECC,
- mtd->writesize / nand_chip->ecc.size,
- nand_chip->ecc.size, nand_chip->ecc.bytes);
+ mtd->writesize / chip->ecc.size,
+ chip->ecc.size, chip->ecc.bytes);
if (err < 0)
- goto return_error;
+ return err;
break;
default:
- dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n");
- err = -EINVAL;
- goto return_error;
+ dev_err(dev, "Invalid or unsupported ECC scheme\n");
+ return -EINVAL;
}
- /* check if NAND device's OOB is enough to store ECC signatures */
+ /* Check if NAND device's OOB is enough to store ECC signatures */
min_oobbytes += (oobbytes_per_step *
- (mtd->writesize / nand_chip->ecc.size));
+ (mtd->writesize / chip->ecc.size));
if (mtd->oobsize < min_oobbytes) {
- dev_err(&info->pdev->dev,
- "not enough OOB bytes required = %d, available=%d\n",
+ dev_err(dev,
+ "Not enough OOB bytes: required = %d, available=%d\n",
min_oobbytes, mtd->oobsize);
- err = -EINVAL;
- goto return_error;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops omap_nand_controller_ops = {
+ .attach_chip = omap_nand_attach_chip,
+};
+
+/* Shared among all NAND instances to synchronize access to the ECC Engine */
+static struct nand_controller omap_gpmc_controller = {
+ .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
+ .ops = &omap_nand_controller_ops,
+};
+
+static int omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int err;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pdev = pdev;
+
+ err = omap_get_dt_info(dev, info);
+ if (err)
+ return err;
+
+ info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
+ if (!info->ops) {
+ dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
+ return -ENODEV;
}
-scan_tail:
- /* second phase scan */
- err = nand_scan_tail(mtd);
+ nand_chip = &info->nand;
+ mtd = nand_to_mtd(nand_chip);
+ mtd->dev.parent = &pdev->dev;
+ nand_chip->ecc.priv = NULL;
+ nand_set_flash_node(nand_chip, dev->of_node);
+
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "omap2-nand.%d", info->gpmc_cs);
+ if (!mtd->name) {
+ dev_err(&pdev->dev, "Failed to set MTD name\n");
+ return -ENOMEM;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nand_chip->IO_ADDR_R))
+ return PTR_ERR(nand_chip->IO_ADDR_R);
+
+ info->phys_base = res->start;
+
+ nand_chip->controller = &omap_gpmc_controller;
+
+ nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
+ nand_chip->cmd_ctrl = omap_hwcontrol;
+
+ info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
+ GPIOD_IN);
+ if (IS_ERR(info->ready_gpiod)) {
+ dev_err(dev, "failed to get ready gpio\n");
+ return PTR_ERR(info->ready_gpiod);
+ }
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * function and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line. Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (info->ready_gpiod) {
+ nand_chip->dev_ready = omap_dev_ready;
+ nand_chip->chip_delay = 0;
+ } else {
+ nand_chip->waitfunc = omap_wait;
+ nand_chip->chip_delay = 50;
+ }
+
+ if (info->flash_bbt)
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ /* scan NAND device connected to chip controller */
+ nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
+
+ err = nand_scan(mtd, 1);
if (err)
goto return_error;
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 7825fd3ce66b..52d435285a3f 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -18,7 +18,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <linux/platform_data/mtd-orion_nand.h>
struct orion_nand_info {
@@ -52,7 +52,7 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
void __iomem *io_base = chip->IO_ADDR_R;
-#if __LINUX_ARM_ARCH__ >= 5
+#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
uint64_t *buf64;
#endif
int i = 0;
@@ -61,7 +61,7 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
*buf++ = readb(io_base);
len--;
}
-#if __LINUX_ARM_ARCH__ >= 5
+#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
buf64 = (uint64_t *)buf;
while (i < len/8) {
/*
@@ -153,9 +153,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
if (board->width == 16)
nc->options |= NAND_BUSWIDTH_16;
- if (board->dev_ready)
- nc->dev_ready = board->dev_ready;
-
platform_set_drvdata(pdev, info);
/* Not all platforms can gate the clock, so it is not
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
index d649d5944826..01b00bb69c1e 100644
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ b/drivers/mtd/nand/raw/oxnas_nand.c
@@ -32,7 +32,7 @@
#define OXNAS_NAND_MAX_CHIPS 1
struct oxnas_nand_ctrl {
- struct nand_hw_control base;
+ struct nand_controller base;
void __iomem *io_base;
struct clk *clk;
struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
@@ -96,7 +96,7 @@ static int oxnas_nand_probe(struct platform_device *pdev)
if (!oxnas)
return -ENOMEM;
- nand_hw_control_init(&oxnas->base);
+ nand_controller_init(&oxnas->base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
oxnas->io_base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index 925a1323604d..222626df4b96 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -67,12 +67,10 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.select_chip = pdata->ctrl.select_chip;
data->chip.write_buf = pdata->ctrl.write_buf;
data->chip.read_buf = pdata->ctrl.read_buf;
- data->chip.read_byte = pdata->ctrl.read_byte;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
data->chip.bbt_options |= pdata->chip.bbt_options;
- data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
data->chip.ecc.mode = NAND_ECC_SOFT;
data->chip.ecc.algo = NAND_ECC_HAMMING;
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 6a5519f0ff25..d1d470bb32e4 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -213,6 +213,8 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
+#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
/*
* Flags used in DMA descriptor preparation helper functions
* (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
@@ -245,6 +247,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
* @tx_sgl_start - start index in data sgl for tx.
* @rx_sgl_pos - current index in data sgl for rx.
* @rx_sgl_start - start index in data sgl for rx.
+ * @wait_second_completion - wait for second DMA desc completion before making
+ * the NAND transfer completion.
+ * @txn_done - completion for NAND transfer.
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
*/
struct bam_transaction {
struct bam_cmd_element *bam_ce;
@@ -258,6 +265,10 @@ struct bam_transaction {
u32 tx_sgl_start;
u32 rx_sgl_pos;
u32 rx_sgl_start;
+ bool wait_second_completion;
+ struct completion txn_done;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
};
/*
@@ -354,7 +365,7 @@ struct nandc_regs {
* from all connected NAND devices pagesize
*/
struct qcom_nand_controller {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct list_head host_list;
struct device *dev;
@@ -504,6 +515,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn->data_sgl = bam_txn_buf;
+ init_completion(&bam_txn->txn_done);
+
return bam_txn;
}
@@ -523,11 +536,33 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn->tx_sgl_start = 0;
bam_txn->rx_sgl_pos = 0;
bam_txn->rx_sgl_start = 0;
+ bam_txn->last_data_desc = NULL;
+ bam_txn->wait_second_completion = false;
sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
QPIC_PER_CW_CMD_SGL);
sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
QPIC_PER_CW_DATA_SGL);
+
+ reinit_completion(&bam_txn->txn_done);
+}
+
+/* Callback for DMA descriptor completion */
+static void qpic_bam_dma_done(void *data)
+{
+ struct bam_transaction *bam_txn = data;
+
+ /*
+ * In case of data transfer with NAND, 2 callbacks will be generated.
+ * One for command channel and another one for data channel.
+ * If current transaction has data descriptors
+ * (i.e. wait_second_completion is true), then set this to false
+ * and wait for second DMA descriptor completion.
+ */
+ if (bam_txn->wait_second_completion)
+ bam_txn->wait_second_completion = false;
+ else
+ complete(&bam_txn->txn_done);
}
static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
@@ -756,6 +791,12 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
desc->dma_desc = dma_desc;
+ /* update last data/command descriptor */
+ if (chan == nandc->cmd_chan)
+ bam_txn->last_cmd_desc = dma_desc;
+ else
+ bam_txn->last_data_desc = dma_desc;
+
list_add_tail(&desc->node, &nandc->desc_list);
return 0;
@@ -1055,7 +1096,8 @@ static void config_nand_page_read(struct qcom_nand_controller *nandc)
* Helper to prepare DMA descriptors for configuring registers
* before reading each codeword in NAND page.
*/
-static void config_nand_cw_read(struct qcom_nand_controller *nandc)
+static void
+config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
{
if (nandc->props->is_bam)
write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
@@ -1064,19 +1106,25 @@ static void config_nand_cw_read(struct qcom_nand_controller *nandc)
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
- NAND_BAM_NEXT_SGL);
+ if (use_ecc) {
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+ read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+ } else {
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ }
}
/*
* Helper to prepare dma descriptors to configure registers needed for reading a
* single codeword in page
*/
-static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
+static void
+config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
+ bool use_ecc)
{
config_nand_page_read(nandc);
- config_nand_cw_read(nandc);
+ config_nand_cw_read(nandc, use_ecc);
}
/*
@@ -1157,7 +1205,7 @@ static int nandc_param(struct qcom_nand_host *host)
nandc->buf_count = 512;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
- config_nand_single_cw_page_read(nandc);
+ config_nand_single_cw_page_read(nandc, false);
read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
nandc->buf_count, 0);
@@ -1273,10 +1321,20 @@ static int submit_descs(struct qcom_nand_controller *nandc)
cookie = dmaengine_submit(desc->dma_desc);
if (nandc->props->is_bam) {
+ bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+ if (bam_txn->last_data_desc) {
+ bam_txn->last_data_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_data_desc->callback_param = bam_txn;
+ bam_txn->wait_second_completion = true;
+ }
+
dma_async_issue_pending(nandc->tx_chan);
dma_async_issue_pending(nandc->rx_chan);
+ dma_async_issue_pending(nandc->cmd_chan);
- if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
+ if (!wait_for_completion_timeout(&bam_txn->txn_done,
+ QPIC_NAND_COMPLETION_TIMEOUT))
return -ETIMEDOUT;
} else {
if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
@@ -1512,20 +1570,180 @@ struct read_stats {
__le32 erased_cw;
};
+/* reads back FLASH_STATUS register set by the controller */
+static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int i;
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* performs raw read for one codeword */
+static int
+qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf, int page, int cw)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ host->use_ecc = false;
+
+ clear_bam_transaction(nandc);
+ set_address(host, host->cw_size * cw, page);
+ update_rw_regs(host, 1, true);
+ config_nand_page_read(nandc);
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (cw == (ecc->steps - 1)) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) * 4);
+ oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ if (nandc->props->is_bam) {
+ nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
+
+ nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
+
+ nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
+ }
+
+ config_nand_cw_read(nandc, false);
+
+ read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ reg_off += data_size1;
+
+ read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ reg_off += oob_size1;
+
+ read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ reg_off += data_size2;
+
+ read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+
+ ret = submit_descs(nandc);
+ free_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
+ return ret;
+ }
+
+ return check_flash_errors(host, 1);
+}
+
+/*
+ * Bitflips can happen in erased codewords also so this function counts the
+ * number of 0 in each CW for which ECC engine returns the uncorrectable
+ * error. The page will be assumed as erased if this count is less than or
+ * equal to the ecc->strength for each CW.
+ *
+ * 1. Both DATA and OOB need to be checked for number of 0. The
+ * top-level API can be called with only data buf or OOB buf so use
+ * chip->data_buf if data buf is null and chip->oob_poi if oob buf
+ * is null for copying the raw bytes.
+ * 2. Perform raw read for all the CW which has uncorrectable errors.
+ * 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
+ * The BBM and spare bytes bit flip won’t affect the ECC so don’t check
+ * the number of bitflips in this area.
+ */
+static int
+check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf, unsigned long uncorrectable_cws,
+ int page, unsigned int max_bitflips)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *cw_data_buf, *cw_oob_buf;
+ int cw, data_size, oob_size, ret = 0;
+
+ if (!data_buf) {
+ data_buf = chip->data_buf;
+ chip->pagebuf = -1;
+ }
+
+ if (!oob_buf) {
+ oob_buf = chip->oob_poi;
+ chip->pagebuf = -1;
+ }
+
+ for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
+ if (cw == (ecc->steps - 1)) {
+ data_size = ecc->size - ((ecc->steps - 1) * 4);
+ oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
+ } else {
+ data_size = host->cw_data;
+ oob_size = host->ecc_bytes_hw;
+ }
+
+ /* determine starting buffer address for current CW */
+ cw_data_buf = data_buf + (cw * host->cw_data);
+ cw_oob_buf = oob_buf + (cw * ecc->bytes);
+
+ ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
+ cw_oob_buf, page, cw);
+ if (ret)
+ return ret;
+
+ /*
+ * make sure it isn't an erased page reported
+ * as not-erased by HW because of a few bitflips
+ */
+ ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
+ cw_oob_buf + host->bbm_size,
+ oob_size, NULL,
+ 0, ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ }
+ }
+
+ return max_bitflips;
+}
+
/*
* reads back status registers set by the controller to notify page read
* errors. this is equivalent to what 'ecc->correct()' would do.
*/
static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
- u8 *oob_buf)
+ u8 *oob_buf, int page)
{
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- unsigned int max_bitflips = 0;
+ unsigned int max_bitflips = 0, uncorrectable_cws = 0;
struct read_stats *buf;
+ bool flash_op_err = false, erased;
int i;
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
buf = (struct read_stats *)nandc->reg_read_buf;
nandc_read_buffer_sync(nandc, true);
@@ -1546,48 +1764,49 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
buffer = le32_to_cpu(buf->buffer);
erased_cw = le32_to_cpu(buf->erased_cw);
- if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
- bool erased;
-
- /* ignore erased codeword errors */
+ /*
+ * Check ECC failure for each codeword. ECC failure can
+ * happen in either of the following conditions
+ * 1. If number of bitflips are greater than ECC engine
+ * capability.
+ * 2. If this codeword contains all 0xff for which erased
+ * codeword detection check will be done.
+ */
+ if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
+ /*
+ * For BCH ECC, ignore erased codeword errors, if
+ * ERASED_CW bits are set.
+ */
if (host->bch_enabled) {
erased = (erased_cw & ERASED_CW) == ERASED_CW ?
true : false;
- } else {
+ /*
+ * For RS ECC, HW reports the erased CW by placing
+ * special characters at certain offsets in the buffer.
+ * These special characters will be valid only if
+ * complete page is read i.e. data_buf is not NULL.
+ */
+ } else if (data_buf) {
erased = erased_chunk_check_and_fixup(data_buf,
data_len);
+ } else {
+ erased = false;
}
- if (erased) {
- data_buf += data_len;
- if (oob_buf)
- oob_buf += oob_len + ecc->bytes;
- continue;
- }
-
- if (buffer & BS_UNCORRECTABLE_BIT) {
- int ret, ecclen, extraooblen;
- void *eccbuf;
-
- eccbuf = oob_buf ? oob_buf + oob_len : NULL;
- ecclen = oob_buf ? host->ecc_bytes_hw : 0;
- extraooblen = oob_buf ? oob_len : 0;
-
- /*
- * make sure it isn't an erased page reported
- * as not-erased by HW because of a few bitflips
- */
- ret = nand_check_erased_ecc_chunk(data_buf,
- data_len, eccbuf, ecclen, oob_buf,
- extraooblen, ecc->strength);
- if (ret < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += ret;
- max_bitflips =
- max_t(unsigned int, max_bitflips, ret);
- }
- }
+ if (!erased)
+ uncorrectable_cws |= BIT(i);
+ /*
+ * Check if MPU or any other operational error (timeout,
+ * device failure, etc.) happened for this codeword and
+ * make flash_op_err true. If flash_op_err is set, then
+ * EIO will be returned for page read.
+ */
+ } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ flash_op_err = true;
+ /*
+ * No ECC or operational errors happened. Check the number of
+ * bits corrected and update the ecc_stats.corrected.
+ */
} else {
unsigned int stat;
@@ -1596,12 +1815,21 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
max_bitflips = max(max_bitflips, stat);
}
- data_buf += data_len;
+ if (data_buf)
+ data_buf += data_len;
if (oob_buf)
oob_buf += oob_len + ecc->bytes;
}
- return max_bitflips;
+ if (flash_op_err)
+ return -EIO;
+
+ if (!uncorrectable_cws)
+ return max_bitflips;
+
+ return check_for_erased_page(host, data_buf_start, oob_buf_start,
+ uncorrectable_cws, page,
+ max_bitflips);
}
/*
@@ -1609,11 +1837,12 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
* ecc->read_oob()
*/
static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
- u8 *oob_buf)
+ u8 *oob_buf, int page)
{
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
int i, ret;
config_nand_page_read(nandc);
@@ -1644,7 +1873,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
}
}
- config_nand_cw_read(nandc);
+ config_nand_cw_read(nandc, true);
if (data_buf)
read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
@@ -1674,12 +1903,14 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
}
ret = submit_descs(nandc);
- if (ret)
- dev_err(nandc->dev, "failure to read page/oob\n");
-
free_descs(nandc);
- return ret;
+ if (ret) {
+ dev_err(nandc->dev, "failure to read page/oob\n");
+ return ret;
+ }
+
+ return parse_read_errors(host, data_buf_start, oob_buf_start, page);
}
/*
@@ -1704,7 +1935,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
set_address(host, host->cw_size * (ecc->steps - 1), page);
update_rw_regs(host, 1, true);
- config_nand_single_cw_page_read(nandc);
+ config_nand_single_cw_page_read(nandc, host->use_ecc);
read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
@@ -1724,20 +1955,14 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
u8 *data_buf, *oob_buf = NULL;
- int ret;
nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
clear_bam_transaction(nandc);
- ret = read_page_ecc(host, data_buf, oob_buf);
- if (ret) {
- dev_err(nandc->dev, "failure to read page\n");
- return ret;
- }
- return parse_read_errors(host, data_buf, oob_buf);
+ return read_page_ecc(host, data_buf, oob_buf, page);
}
/* implements ecc->read_page_raw() */
@@ -1746,77 +1971,20 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- u8 *data_buf, *oob_buf;
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int i, ret;
- int read_loc;
-
- nand_read_page_op(chip, page, 0, NULL, 0);
- data_buf = buf;
- oob_buf = chip->oob_poi;
-
- host->use_ecc = false;
+ int cw, ret;
+ u8 *data_buf = buf, *oob_buf = chip->oob_poi;
- clear_bam_transaction(nandc);
- update_rw_regs(host, ecc->steps, true);
- config_nand_page_read(nandc);
-
- for (i = 0; i < ecc->steps; i++) {
- int data_size1, data_size2, oob_size1, oob_size2;
- int reg_off = FLASH_BUF_ACC;
-
- data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
- oob_size1 = host->bbm_size;
-
- if (i == (ecc->steps - 1)) {
- data_size2 = ecc->size - data_size1 -
- ((ecc->steps - 1) << 2);
- oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
- host->spare_bytes;
- } else {
- data_size2 = host->cw_data - data_size1;
- oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
- }
-
- if (nandc->props->is_bam) {
- read_loc = 0;
- nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
- read_loc += data_size1;
-
- nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
- read_loc += oob_size1;
-
- nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
- read_loc += data_size2;
-
- nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
- }
-
- config_nand_cw_read(nandc);
-
- read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
- reg_off += data_size1;
- data_buf += data_size1;
-
- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
- reg_off += oob_size1;
- oob_buf += oob_size1;
-
- read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
- reg_off += data_size2;
- data_buf += data_size2;
+ for (cw = 0; cw < ecc->steps; cw++) {
+ ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
+ page, cw);
+ if (ret)
+ return ret;
- read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
- oob_buf += oob_size2;
+ data_buf += host->cw_data;
+ oob_buf += ecc->bytes;
}
- ret = submit_descs(nandc);
- if (ret)
- dev_err(nandc->dev, "failure to read raw page\n");
-
- free_descs(nandc);
-
return 0;
}
@@ -1827,7 +1995,6 @@ static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int ret;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1836,11 +2003,7 @@ static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
set_address(host, 0, page);
update_rw_regs(host, ecc->steps, true);
- ret = read_page_ecc(host, NULL, chip->oob_poi);
- if (ret)
- dev_err(nandc->dev, "failure to read oob\n");
-
- return ret;
+ return read_page_ecc(host, NULL, chip->oob_poi, page);
}
/* implements ecc->write_page() */
@@ -1988,11 +2151,9 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
/*
* implements ecc->write_oob()
*
- * the NAND controller cannot write only data or only oob within a codeword,
- * since ecc is calculated for the combined codeword. we first copy the
- * entire contents for the last codeword(data + oob), replace the old oob
- * with the new one in chip->oob_poi, and then write the entire codeword.
- * this read-copy-write operation results in a slight performance loss.
+ * the NAND controller cannot write only data or only OOB within a codeword
+ * since ECC is calculated for the combined codeword. So update the OOB from
+ * chip->oob_poi, and pad the data area with OxFF before writing.
*/
static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
@@ -2005,19 +2166,13 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int ret;
host->use_ecc = true;
-
- clear_bam_transaction(nandc);
- ret = copy_last_cw(host, page);
- if (ret)
- return ret;
-
- clear_read_regs(nandc);
clear_bam_transaction(nandc);
/* calculate the data and oob size for the last codeword/step */
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = mtd->oobavail;
+ memset(nandc->data_buffer, 0xff, host->cw_data);
/* override new oob content to last codeword */
mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
0, mtd->oobavail);
@@ -2049,7 +2204,6 @@ static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int page, ret, bbpos, bad = 0;
- u32 flash_status;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
@@ -2066,9 +2220,7 @@ static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
if (ret)
goto err;
- flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
-
- if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ if (check_flash_errors(host, 1)) {
dev_warn(nandc->dev, "error when trying to read BBM\n");
goto err;
}
@@ -2315,27 +2467,40 @@ static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
.free = qcom_nand_ooblayout_free,
};
-static int qcom_nand_host_setup(struct qcom_nand_host *host)
+static int
+qcom_nandc_calc_ecc_bytes(int step_size, int strength)
+{
+ return strength == 4 ? 12 : 16;
+}
+NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
+ NANDC_STEP_SIZE, 4, 8);
+
+static int qcom_nand_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int cwperpage, bad_block_byte;
+ int cwperpage, bad_block_byte, ret;
bool wide_bus;
int ecc_mode = 1;
+ /* controller only supports 512 bytes data steps */
+ ecc->size = NANDC_STEP_SIZE;
+ wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
+ cwperpage = mtd->writesize / NANDC_STEP_SIZE;
+
/*
- * the controller requires each step consists of 512 bytes of data.
- * bail out if DT has populated a wrong step size.
+ * Each CW has 4 available OOB bytes which will be protected with ECC
+ * so remaining bytes can be used for ECC.
*/
- if (ecc->size != NANDC_STEP_SIZE) {
- dev_err(nandc->dev, "invalid ecc size\n");
- return -EINVAL;
+ ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
+ mtd->oobsize - (cwperpage * 4));
+ if (ret) {
+ dev_err(nandc->dev, "No valid ECC settings possible\n");
+ return ret;
}
- wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
-
if (ecc->strength >= 8) {
/* 8 bit ECC defaults to BCH ECC on all platforms */
host->bch_enabled = true;
@@ -2403,7 +2568,6 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
- cwperpage = mtd->writesize / ecc->size;
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
@@ -2419,12 +2583,6 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
* for 8 bit ECC
*/
host->cw_size = host->cw_data + ecc->bytes;
-
- if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
- dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
- return -EINVAL;
- }
-
bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
@@ -2482,6 +2640,10 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
return 0;
}
+static const struct nand_controller_ops qcom_nandc_ops = {
+ .attach_chip = qcom_nand_attach_chip,
+};
+
static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
{
int ret;
@@ -2570,7 +2732,8 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
INIT_LIST_HEAD(&nandc->desc_list);
INIT_LIST_HEAD(&nandc->host_list);
- nand_hw_control_init(&nandc->controller);
+ nand_controller_init(&nandc->controller);
+ nandc->controller.ops = &qcom_nandc_ops;
return 0;
}
@@ -2623,9 +2786,9 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
return 0;
}
-static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
- struct qcom_nand_host *host,
- struct device_node *dn)
+static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
{
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -2672,30 +2835,13 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
/* set up initial status value */
host->status = NAND_STATUS_READY | NAND_STATUS_WP;
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- return ret;
-
- ret = qcom_nand_host_setup(host);
-
- return ret;
-}
-
-static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
- struct qcom_nand_host *host,
- struct device_node *dn)
-{
- struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
- int ret;
-
- ret = nand_scan_tail(mtd);
+ ret = nand_scan(mtd, 1);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
- nand_cleanup(mtd_to_nand(mtd));
+ nand_cleanup(chip);
return ret;
}
@@ -2704,28 +2850,9 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
{
struct device *dev = nandc->dev;
struct device_node *dn = dev->of_node, *child;
- struct qcom_nand_host *host, *tmp;
+ struct qcom_nand_host *host;
int ret;
- for_each_available_child_of_node(dn, child) {
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- of_node_put(child);
- return -ENOMEM;
- }
-
- ret = qcom_nand_host_init(nandc, host, child);
- if (ret) {
- devm_kfree(dev, host);
- continue;
- }
-
- list_add_tail(&host->node, &nandc->host_list);
- }
-
- if (list_empty(&nandc->host_list))
- return -ENODEV;
-
if (nandc->props->is_bam) {
free_bam_transaction(nandc);
nandc->bam_txn = alloc_bam_transaction(nandc);
@@ -2736,12 +2863,20 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
}
}
- list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
- ret = qcom_nand_mtd_register(nandc, host, child);
+ for_each_available_child_of_node(dn, child) {
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ ret = qcom_nand_host_init_and_register(nandc, host, child);
if (ret) {
- list_del(&host->node);
devm_kfree(dev, host);
+ continue;
}
+
+ list_add_tail(&host->node, &nandc->host_list);
}
if (list_empty(&nandc->host_list))
@@ -2799,14 +2934,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
nandc->props = dev_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nandc->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(nandc->base))
- return PTR_ERR(nandc->base);
-
- nandc->base_phys = res->start;
- nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
-
nandc->core_clk = devm_clk_get(dev, "core");
if (IS_ERR(nandc->core_clk))
return PTR_ERR(nandc->core_clk);
@@ -2819,9 +2946,21 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nandc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nandc->base))
+ return PTR_ERR(nandc->base);
+
+ nandc->base_phys = res->start;
+ nandc->base_dma = dma_map_resource(dev, res->start,
+ resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (!nandc->base_dma)
+ return -ENXIO;
+
ret = qcom_nandc_alloc(nandc);
if (ret)
- goto err_core_clk;
+ goto err_nandc_alloc;
ret = clk_prepare_enable(nandc->core_clk);
if (ret)
@@ -2847,6 +2986,9 @@ err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
+ dma_unmap_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
return ret;
}
@@ -2854,16 +2996,21 @@ err_core_clk:
static int qcom_nandc_remove(struct platform_device *pdev)
{
struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct qcom_nand_host *host;
list_for_each_entry(host, &nandc->host_list, node)
nand_release(nand_to_mtd(&host->chip));
+
qcom_nandc_unalloc(nandc);
clk_disable_unprepare(nandc->aon_clk);
clk_disable_unprepare(nandc->core_clk);
+ dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+
return 0;
}
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index 19661c5d3220..c21e8892394a 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -162,7 +162,7 @@ enum s3c_nand_clk_state {
*/
struct s3c2410_nand_info {
/* mtd info */
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct s3c2410_nand_mtd *mtds;
struct s3c2410_platform_nand *platform;
@@ -802,8 +802,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
mtdinfo->name = set->name;
- return mtd_device_parse_register(mtdinfo, NULL, NULL,
- set->partitions, set->nr_partitions);
+ return mtd_device_register(mtdinfo, set->partitions,
+ set->nr_partitions);
}
return -ENODEV;
@@ -915,20 +915,19 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
}
/**
- * s3c2410_nand_update_chip - post probe update
- * @info: The controller instance.
- * @nmtd: The driver version of the MTD instance.
+ * s3c2410_nand_attach_chip - Init the ECC engine after NAND scan
+ * @chip: The NAND chip
*
- * This routine is called after the chip probe has successfully completed
- * and the relevant per-chip information updated. This call ensure that
+ * This hook is called by the core after the identification of the NAND chip,
+ * once the relevant per-chip information is up to date.. This call ensure that
* we update the internal state accordingly.
*
* The internal state is currently limited to the ECC state information.
*/
-static int s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *nmtd)
+static int s3c2410_nand_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = &nmtd->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
switch (chip->ecc.mode) {
@@ -998,6 +997,10 @@ static int s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
return 0;
}
+static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
+ .attach_chip = s3c2410_nand_attach_chip,
+};
+
static const struct of_device_id s3c24xx_nand_dt_ids[] = {
{
.compatible = "samsung,s3c2410-nand",
@@ -1094,7 +1097,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- nand_hw_control_init(&info->controller);
+ nand_controller_init(&info->controller);
+ info->controller.ops = &s3c24xx_nand_controller_ops;
/* get the clock source and enable it */
@@ -1134,8 +1138,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
- sets = (plat != NULL) ? plat->sets : NULL;
- nr_sets = (plat != NULL) ? plat->nr_sets : 1;
+ if (!plat->sets || plat->nr_sets < 1) {
+ err = -EINVAL;
+ goto exit_error;
+ }
+
+ sets = plat->sets;
+ nr_sets = plat->nr_sets;
info->mtd_count = nr_sets;
@@ -1152,7 +1161,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
nmtd = info->mtds;
- for (setno = 0; setno < nr_sets; setno++, nmtd++) {
+ for (setno = 0; setno < nr_sets; setno++, nmtd++, sets++) {
struct mtd_info *mtd = nand_to_mtd(&nmtd->chip);
pr_debug("initialising set %d (%p, info %p)\n",
@@ -1161,22 +1170,11 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
s3c2410_nand_init_chip(info, nmtd, sets);
- err = nand_scan_ident(mtd, (sets) ? sets->nr_chips : 1, NULL);
- if (err)
- goto exit_error;
-
- err = s3c2410_nand_update_chip(info, nmtd);
- if (err < 0)
- goto exit_error;
-
- err = nand_scan_tail(mtd);
+ err = nand_scan(mtd, sets ? sets->nr_chips : 1);
if (err)
goto exit_error;
s3c2410_nand_add_partition(info, nmtd, sets);
-
- if (sets != NULL)
- sets++;
}
/* initialise the hardware */
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index c7abceffcc40..bb8866e05ff7 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1002,10 +1002,17 @@ static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
flctl->index += len;
}
-static int flctl_chip_init_tail(struct mtd_info *mtd)
+static int flctl_chip_attach_chip(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- struct nand_chip *chip = &flctl->chip;
+
+ /*
+ * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
+ * Add the SEL_16BIT flag in flctl->flcmncr_base.
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ flctl->flcmncr_base |= SEL_16BIT;
if (mtd->writesize == 512) {
flctl->page_size = 0;
@@ -1063,6 +1070,10 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
return 0;
}
+static const struct nand_controller_ops flctl_nand_controller_ops = {
+ .attach_chip = flctl_chip_attach_chip,
+};
+
static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
{
struct sh_flctl *flctl = dev_id;
@@ -1191,25 +1202,8 @@ static int flctl_probe(struct platform_device *pdev)
flctl_setup_dma(flctl);
- ret = nand_scan_ident(flctl_mtd, 1, NULL);
- if (ret)
- goto err_chip;
-
- if (nand->options & NAND_BUSWIDTH_16) {
- /*
- * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
- * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign
- * flctl->flcmncr_base to pdata->flcmncr_val.
- */
- pdata->flcmncr_val |= SEL_16BIT;
- flctl->flcmncr_base = pdata->flcmncr_val;
- }
-
- ret = flctl_chip_init_tail(flctl_mtd);
- if (ret)
- goto err_chip;
-
- ret = nand_scan_tail(flctl_mtd);
+ nand->dummy_controller.ops = &flctl_nand_controller_ops;
+ ret = nand_scan(flctl_mtd, 1);
if (ret)
goto err_chip;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index e93df02c825e..fc171b17a39b 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -21,10 +21,7 @@
#include <linux/mtd/sharpsl.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
+#include <linux/io.h>
struct sharpsl_nand {
struct nand_chip chip;
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index 7f5044a79f01..73aafe8c3ef3 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -160,19 +160,9 @@ static struct nand_flash_dev nand_xd_flash_ids[] = {
{NULL}
};
-int sm_register_device(struct mtd_info *mtd, int smartmedia)
+static int sm_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
-
- chip->options |= NAND_SKIP_BBTSCAN;
-
- /* Scan for card properties */
- ret = nand_scan_ident(mtd, 1, smartmedia ?
- nand_smartmedia_flash_ids : nand_xd_flash_ids);
-
- if (ret)
- return ret;
+ struct mtd_info *mtd = nand_to_mtd(chip);
/* Bad block marker position */
chip->badblockpos = 0x05;
@@ -187,12 +177,33 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
else
return -ENODEV;
- ret = nand_scan_tail(mtd);
+ return 0;
+}
+
+static const struct nand_controller_ops sm_controller_ops = {
+ .attach_chip = sm_attach_chip,
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_flash_dev *flash_ids;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+ /* Scan for card properties */
+ chip->dummy_controller.ops = &sm_controller_ops;
+ flash_ids = smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids;
+ ret = nand_scan_with_ids(mtd, 1, flash_ids);
if (ret)
return ret;
- return mtd_device_register(mtd, NULL, 0);
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ nand_cleanup(chip);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(sm_register_device);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index d831a141a196..1f0b7ee38df5 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -29,14 +29,12 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/reset.h>
@@ -127,7 +125,7 @@
#define NFC_CMD_TYPE_MSK GENMASK(31, 30)
#define NFC_NORMAL_OP (0 << 30)
#define NFC_ECC_OP (1 << 30)
-#define NFC_PAGE_OP (2 << 30)
+#define NFC_PAGE_OP (2U << 30)
/* define bit use in NFC_RCMD_SET */
#define NFC_READ_CMD_MSK GENMASK(7, 0)
@@ -234,7 +232,7 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
* controller events
*/
struct sunxi_nfc {
- struct nand_hw_control controller;
+ struct nand_controller controller;
struct device *dev;
void __iomem *regs;
struct clk *ahb_clk;
@@ -247,7 +245,7 @@ struct sunxi_nfc {
struct dma_chan *dmac;
};
-static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
+static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct sunxi_nfc, controller);
}
@@ -544,7 +542,7 @@ static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
{
- uint8_t ret;
+ uint8_t ret = 0;
sunxi_nfc_read_buf(mtd, &ret, 1);
@@ -1816,12 +1814,21 @@ static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
}
}
-static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
- struct device_node *np)
+static int sunxi_nand_attach_chip(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ struct device_node *np = nand_get_flash_node(nand);
int ret;
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ nand->options |= NAND_SUBPAGE_READ;
+
if (!ecc->size) {
ecc->size = nand->ecc_step_ds;
ecc->strength = nand->ecc_strength_ds;
@@ -1846,6 +1853,10 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
return 0;
}
+static const struct nand_controller_ops sunxi_nand_controller_ops = {
+ .attach_chip = sunxi_nand_attach_chip,
+};
+
static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
struct device_node *np)
{
@@ -1911,6 +1922,8 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
nand->chip_delay = 200;
nand->controller = &nfc->controller;
+ nand->controller->ops = &sunxi_nand_controller_ops;
+
/*
* Set the ECC mode to the default value in case nothing is specified
* in the DT.
@@ -1927,30 +1940,10 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
- ret = nand_scan_ident(mtd, nsels, NULL);
+ ret = nand_scan(mtd, nsels);
if (ret)
return ret;
- if (nand->bbt_options & NAND_BBT_USE_FLASH)
- nand->bbt_options |= NAND_BBT_NO_OOB;
-
- if (nand->options & NAND_NEED_SCRAMBLING)
- nand->options |= NAND_NO_SUBPAGE_WRITE;
-
- nand->options |= NAND_SUBPAGE_READ;
-
- ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
- if (ret) {
- dev_err(dev, "ECC init failed: %d\n", ret);
- return ret;
- }
-
- ret = nand_scan_tail(mtd);
- if (ret) {
- dev_err(dev, "nand_scan_tail failed: %d\n", ret);
- return ret;
- }
-
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
@@ -2012,7 +2005,7 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
return -ENOMEM;
nfc->dev = dev;
- nand_hw_control_init(&nfc->controller);
+ nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index f2052fae21c7..72698691727d 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -83,7 +83,7 @@
#define MAX_CS 4
struct tango_nfc {
- struct nand_hw_control hw;
+ struct nand_controller hw;
void __iomem *reg_base;
void __iomem *mem_base;
void __iomem *pbus_base;
@@ -517,6 +517,28 @@ static int tango_set_timings(struct mtd_info *mtd, int csline,
return 0;
}
+static int tango_attach_chip(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ ecc->mode = NAND_ECC_HW;
+ ecc->algo = NAND_ECC_BCH;
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
+
+ ecc->read_page_raw = tango_read_page_raw;
+ ecc->write_page_raw = tango_write_page_raw;
+ ecc->read_page = tango_read_page;
+ ecc->write_page = tango_write_page;
+ ecc->read_oob = tango_read_oob;
+ ecc->write_oob = tango_write_oob;
+
+ return 0;
+}
+
+static const struct nand_controller_ops tango_controller_ops = {
+ .attach_chip = tango_attach_chip,
+};
+
static int chip_init(struct device *dev, struct device_node *np)
{
u32 cs;
@@ -566,22 +588,7 @@ static int chip_init(struct device *dev, struct device_node *np)
mtd_set_ooblayout(mtd, &tango_nand_ooblayout_ops);
mtd->dev.parent = dev;
- err = nand_scan_ident(mtd, 1, NULL);
- if (err)
- return err;
-
- ecc->mode = NAND_ECC_HW;
- ecc->algo = NAND_ECC_BCH;
- ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
-
- ecc->read_page_raw = tango_read_page_raw;
- ecc->write_page_raw = tango_write_page_raw;
- ecc->read_page = tango_read_page;
- ecc->write_page = tango_write_page;
- ecc->read_oob = tango_read_oob;
- ecc->write_oob = tango_write_oob;
-
- err = nand_scan_tail(mtd);
+ err = nand_scan(mtd, 1);
if (err)
return err;
@@ -654,7 +661,8 @@ static int tango_nand_probe(struct platform_device *pdev)
return PTR_ERR(nfc->chan);
platform_set_drvdata(pdev, nfc);
- nand_hw_control_init(&nfc->hw);
+ nand_controller_init(&nfc->hw);
+ nfc->hw.ops = &tango_controller_ops;
nfc->freq_kHz = clk_get_rate(clk) / 1000;
for_each_child_of_node(pdev->dev.of_node, np) {
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
new file mode 100644
index 000000000000..79da1efc88d1
--- /dev/null
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -0,0 +1,1246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
+ * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
+ * Copyright (C) 2012 Avionic Design GmbH
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#define COMMAND 0x00
+#define COMMAND_GO BIT(31)
+#define COMMAND_CLE BIT(30)
+#define COMMAND_ALE BIT(29)
+#define COMMAND_PIO BIT(28)
+#define COMMAND_TX BIT(27)
+#define COMMAND_RX BIT(26)
+#define COMMAND_SEC_CMD BIT(25)
+#define COMMAND_AFT_DAT BIT(24)
+#define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20)
+#define COMMAND_A_VALID BIT(19)
+#define COMMAND_B_VALID BIT(18)
+#define COMMAND_RD_STATUS_CHK BIT(17)
+#define COMMAND_RBSY_CHK BIT(16)
+#define COMMAND_CE(x) BIT(8 + ((x) & 0x7))
+#define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4)
+#define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0)
+
+#define STATUS 0x04
+
+#define ISR 0x08
+#define ISR_CORRFAIL_ERR BIT(24)
+#define ISR_UND BIT(7)
+#define ISR_OVR BIT(6)
+#define ISR_CMD_DONE BIT(5)
+#define ISR_ECC_ERR BIT(4)
+
+#define IER 0x0c
+#define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16)
+#define IER_UND BIT(7)
+#define IER_OVR BIT(6)
+#define IER_CMD_DONE BIT(5)
+#define IER_ECC_ERR BIT(4)
+#define IER_GIE BIT(0)
+
+#define CONFIG 0x10
+#define CONFIG_HW_ECC BIT(31)
+#define CONFIG_ECC_SEL BIT(30)
+#define CONFIG_ERR_COR BIT(29)
+#define CONFIG_PIPE_EN BIT(28)
+#define CONFIG_TVAL_4 (0 << 24)
+#define CONFIG_TVAL_6 (1 << 24)
+#define CONFIG_TVAL_8 (2 << 24)
+#define CONFIG_SKIP_SPARE BIT(23)
+#define CONFIG_BUS_WIDTH_16 BIT(21)
+#define CONFIG_COM_BSY BIT(20)
+#define CONFIG_PS_256 (0 << 16)
+#define CONFIG_PS_512 (1 << 16)
+#define CONFIG_PS_1024 (2 << 16)
+#define CONFIG_PS_2048 (3 << 16)
+#define CONFIG_PS_4096 (4 << 16)
+#define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14)
+#define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14)
+#define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff)
+
+#define TIMING_1 0x14
+#define TIMING_TRP_RESP(x) (((x) & 0xf) << 28)
+#define TIMING_TWB(x) (((x) & 0xf) << 24)
+#define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20)
+#define TIMING_TWHR(x) (((x) & 0xf) << 16)
+#define TIMING_TCS(x) (((x) & 0x3) << 14)
+#define TIMING_TWH(x) (((x) & 0x3) << 12)
+#define TIMING_TWP(x) (((x) & 0xf) << 8)
+#define TIMING_TRH(x) (((x) & 0x3) << 4)
+#define TIMING_TRP(x) (((x) & 0xf) << 0)
+
+#define RESP 0x18
+
+#define TIMING_2 0x1c
+#define TIMING_TADL(x) ((x) & 0xf)
+
+#define CMD_REG1 0x20
+#define CMD_REG2 0x24
+#define ADDR_REG1 0x28
+#define ADDR_REG2 0x2c
+
+#define DMA_MST_CTRL 0x30
+#define DMA_MST_CTRL_GO BIT(31)
+#define DMA_MST_CTRL_IN (0 << 30)
+#define DMA_MST_CTRL_OUT BIT(30)
+#define DMA_MST_CTRL_PERF_EN BIT(29)
+#define DMA_MST_CTRL_IE_DONE BIT(28)
+#define DMA_MST_CTRL_REUSE BIT(27)
+#define DMA_MST_CTRL_BURST_1 (2 << 24)
+#define DMA_MST_CTRL_BURST_4 (3 << 24)
+#define DMA_MST_CTRL_BURST_8 (4 << 24)
+#define DMA_MST_CTRL_BURST_16 (5 << 24)
+#define DMA_MST_CTRL_IS_DONE BIT(20)
+#define DMA_MST_CTRL_EN_A BIT(2)
+#define DMA_MST_CTRL_EN_B BIT(1)
+
+#define DMA_CFG_A 0x34
+#define DMA_CFG_B 0x38
+
+#define FIFO_CTRL 0x3c
+#define FIFO_CTRL_CLR_ALL BIT(3)
+
+#define DATA_PTR 0x40
+#define TAG_PTR 0x44
+#define ECC_PTR 0x48
+
+#define DEC_STATUS 0x4c
+#define DEC_STATUS_A_ECC_FAIL BIT(1)
+#define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000
+#define DEC_STATUS_ERR_COUNT_SHIFT 16
+
+#define HWSTATUS_CMD 0x50
+#define HWSTATUS_MASK 0x54
+#define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24)
+#define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16)
+#define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8)
+#define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0)
+
+#define BCH_CONFIG 0xcc
+#define BCH_ENABLE BIT(0)
+#define BCH_TVAL_4 (0 << 4)
+#define BCH_TVAL_8 (1 << 4)
+#define BCH_TVAL_14 (2 << 4)
+#define BCH_TVAL_16 (3 << 4)
+
+#define DEC_STAT_RESULT 0xd0
+#define DEC_STAT_BUF 0xd4
+#define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000
+#define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24
+#define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000
+#define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16
+#define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00
+#define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8
+
+#define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off))
+
+#define SKIP_SPARE_BYTES 4
+#define BITS_PER_STEP_RS 18
+#define BITS_PER_STEP_BCH 13
+
+#define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
+#define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY
+#define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \
+ HWSTATUS_RDSTATUS_VALUE(0) | \
+ HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
+ HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
+
+struct tegra_nand_controller {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct completion command_complete;
+ struct completion dma_complete;
+ bool last_read_error;
+ int cur_cs;
+ struct nand_chip *chip;
+};
+
+struct tegra_nand_chip {
+ struct nand_chip chip;
+ struct gpio_desc *wp_gpio;
+ struct mtd_oob_region ecc;
+ u32 config;
+ u32 config_ecc;
+ u32 bch_config;
+ int cs[1];
+};
+
+static inline struct tegra_nand_controller *
+ to_tegra_ctrl(struct nand_controller *hw_ctrl)
+{
+ return container_of(hw_ctrl, struct tegra_nand_controller, controller);
+}
+
+static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
+{
+ return container_of(chip, struct tegra_nand_chip, chip);
+}
+
+static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
+ BITS_PER_BYTE);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = SKIP_SPARE_BYTES;
+ oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ return 0;
+}
+
+static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ return -ERANGE;
+}
+
+static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
+ .ecc = tegra_nand_ooblayout_rs_ecc,
+ .free = tegra_nand_ooblayout_no_free,
+};
+
+static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
+ BITS_PER_BYTE);
+
+ if (section > 0)
+ return -ERANGE;
+
+ oobregion->offset = SKIP_SPARE_BYTES;
+ oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
+ .ecc = tegra_nand_ooblayout_bch_ecc,
+ .free = tegra_nand_ooblayout_no_free,
+};
+
+static irqreturn_t tegra_nand_irq(int irq, void *data)
+{
+ struct tegra_nand_controller *ctrl = data;
+ u32 isr, dma;
+
+ isr = readl_relaxed(ctrl->regs + ISR);
+ dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
+ dev_dbg(ctrl->dev, "isr %08x\n", isr);
+
+ if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
+ return IRQ_NONE;
+
+ /*
+ * The bit name is somewhat missleading: This is also set when
+ * HW ECC was successful. The data sheet states:
+ * Correctable OR Un-correctable errors occurred in the DMA transfer...
+ */
+ if (isr & ISR_CORRFAIL_ERR)
+ ctrl->last_read_error = true;
+
+ if (isr & ISR_CMD_DONE)
+ complete(&ctrl->command_complete);
+
+ if (isr & ISR_UND)
+ dev_err(ctrl->dev, "FIFO underrun\n");
+
+ if (isr & ISR_OVR)
+ dev_err(ctrl->dev, "FIFO overrun\n");
+
+ /* handle DMA interrupts */
+ if (dma & DMA_MST_CTRL_IS_DONE) {
+ writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
+ complete(&ctrl->dma_complete);
+ }
+
+ /* clear interrupts */
+ writel_relaxed(isr, ctrl->regs + ISR);
+
+ return IRQ_HANDLED;
+}
+
+static const char * const tegra_nand_reg_names[] = {
+ "COMMAND",
+ "STATUS",
+ "ISR",
+ "IER",
+ "CONFIG",
+ "TIMING",
+ NULL,
+ "TIMING2",
+ "CMD_REG1",
+ "CMD_REG2",
+ "ADDR_REG1",
+ "ADDR_REG2",
+ "DMA_MST_CTRL",
+ "DMA_CFG_A",
+ "DMA_CFG_B",
+ "FIFO_CTRL",
+};
+
+static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
+{
+ u32 reg;
+ int i;
+
+ dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
+ for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
+ const char *reg_name = tegra_nand_reg_names[i];
+
+ if (!reg_name)
+ continue;
+
+ reg = readl_relaxed(ctrl->regs + (i * 4));
+ dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
+ }
+}
+
+static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
+{
+ u32 isr, dma;
+
+ disable_irq(ctrl->irq);
+
+ /* Abort current command/DMA operation */
+ writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
+ writel_relaxed(0, ctrl->regs + COMMAND);
+
+ /* clear interrupts */
+ isr = readl_relaxed(ctrl->regs + ISR);
+ writel_relaxed(isr, ctrl->regs + ISR);
+ dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
+ writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
+
+ reinit_completion(&ctrl->command_complete);
+ reinit_completion(&ctrl->dma_complete);
+
+ enable_irq(ctrl->irq);
+}
+
+static int tegra_nand_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ const struct nand_op_instr *instr_data_in = NULL;
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ unsigned int op_id, size = 0, offset = 0;
+ bool first_cmd = true;
+ u32 reg, cmd = 0;
+ int ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int naddrs, i;
+ const u8 *addrs;
+ u32 addr1 = 0, addr2 = 0;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd) {
+ cmd |= COMMAND_CLE;
+ writel_relaxed(instr->ctx.cmd.opcode,
+ ctrl->regs + CMD_REG1);
+ } else {
+ cmd |= COMMAND_SEC_CMD;
+ writel_relaxed(instr->ctx.cmd.opcode,
+ ctrl->regs + CMD_REG2);
+ }
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ addr1 |= *addrs++ << (BITS_PER_BYTE * i);
+ naddrs -= i;
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ addr2 |= *addrs++ << (BITS_PER_BYTE * i);
+
+ writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
+ writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ size = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
+ COMMAND_RX | COMMAND_A_VALID;
+
+ instr_data_in = instr;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ size = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
+ COMMAND_TX | COMMAND_A_VALID;
+ memcpy(&reg, instr->ctx.data.buf.out + offset, size);
+
+ writel_relaxed(reg, ctrl->regs + RESP);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ cmd |= COMMAND_RBSY_CHK;
+ break;
+ }
+ }
+
+ cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
+ writel_relaxed(cmd, ctrl->regs + COMMAND);
+ ret = wait_for_completion_timeout(&ctrl->command_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "COMMAND timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ return -ETIMEDOUT;
+ }
+
+ if (instr_data_in) {
+ reg = readl_relaxed(ctrl->regs + RESP);
+ memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
+ }
+
+ return 0;
+}
+
+static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
+ NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
+ );
+
+static int tegra_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
+ check_only);
+}
+
+static void tegra_nand_select_chip(struct mtd_info *mtd, int die_nr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+
+ WARN_ON(die_nr >= (int)ARRAY_SIZE(nand->cs));
+
+ if (die_nr < 0 || die_nr > 0) {
+ ctrl->cur_cs = -1;
+ return;
+ }
+
+ ctrl->cur_cs = nand->cs[die_nr];
+}
+
+static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
+ struct nand_chip *chip, bool enable)
+{
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+
+ if (chip->ecc.algo == NAND_ECC_BCH && enable)
+ writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
+ else
+ writel_relaxed(0, ctrl->regs + BCH_CONFIG);
+
+ if (enable)
+ writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
+ else
+ writel_relaxed(nand->config, ctrl->regs + CONFIG);
+}
+
+static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
+ void *buf, void *oob_buf, int oob_len, int page,
+ bool read)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ dma_addr_t dma_addr = 0, dma_addr_oob = 0;
+ u32 addr1, cmd, dma_ctrl;
+ int ret;
+
+ if (read) {
+ writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
+ writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
+ } else {
+ writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
+ writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
+ }
+ cmd = COMMAND_CLE | COMMAND_SEC_CMD;
+
+ /* Lower 16-bits are column, by default 0 */
+ addr1 = page << 16;
+
+ if (!buf)
+ addr1 |= mtd->writesize;
+ writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
+ } else {
+ cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
+ }
+
+ if (buf) {
+ dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
+ ret = dma_mapping_error(ctrl->dev, dma_addr);
+ if (ret) {
+ dev_err(ctrl->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
+ writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
+ }
+
+ if (oob_buf) {
+ dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
+ dir);
+ ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
+ if (ret) {
+ dev_err(ctrl->dev, "dma mapping error\n");
+ ret = -EINVAL;
+ goto err_unmap_dma_page;
+ }
+
+ writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
+ writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
+ }
+
+ dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
+ DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
+ DMA_MST_CTRL_BURST_16;
+
+ if (buf)
+ dma_ctrl |= DMA_MST_CTRL_EN_A;
+ if (oob_buf)
+ dma_ctrl |= DMA_MST_CTRL_EN_B;
+
+ if (read)
+ dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
+ else
+ dma_ctrl |= DMA_MST_CTRL_OUT;
+
+ writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
+
+ cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
+ COMMAND_CE(ctrl->cur_cs);
+
+ if (buf)
+ cmd |= COMMAND_A_VALID;
+ if (oob_buf)
+ cmd |= COMMAND_B_VALID;
+
+ if (read)
+ cmd |= COMMAND_RX;
+ else
+ cmd |= COMMAND_TX | COMMAND_AFT_DAT;
+
+ writel_relaxed(cmd, ctrl->regs + COMMAND);
+
+ ret = wait_for_completion_timeout(&ctrl->command_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "COMMAND timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ ret = -ETIMEDOUT;
+ goto err_unmap_dma;
+ }
+
+ ret = wait_for_completion_timeout(&ctrl->dma_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ctrl->dev, "DMA timeout\n");
+ tegra_nand_dump_reg(ctrl);
+ tegra_nand_controller_abort(ctrl);
+ ret = -ETIMEDOUT;
+ goto err_unmap_dma;
+ }
+ ret = 0;
+
+err_unmap_dma:
+ if (oob_buf)
+ dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
+err_unmap_dma_page:
+ if (buf)
+ dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
+
+ return ret;
+}
+
+static int tegra_nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
+ mtd->oobsize, page, true);
+}
+
+static int tegra_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
+ mtd->oobsize, page, false);
+}
+
+static int tegra_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
+ mtd->oobsize, page, true);
+}
+
+static int tegra_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
+ mtd->oobsize, page, false);
+}
+
+static int tegra_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+ u32 dec_stat, max_corr_cnt;
+ unsigned long fail_sec_flag;
+ int ret;
+
+ tegra_nand_hw_ecc(ctrl, chip, true);
+ ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
+ tegra_nand_hw_ecc(ctrl, chip, false);
+ if (ret)
+ return ret;
+
+ /* No correctable or un-correctable errors, page must have 0 bitflips */
+ if (!ctrl->last_read_error)
+ return 0;
+
+ /*
+ * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
+ * which contains information for all ECC selections.
+ *
+ * Note that since we do not use Command Queues DEC_RESULT does not
+ * state the number of pages we can read from the DEC_STAT_BUF. But
+ * since CORRFAIL_ERR did occur during page read we do have a valid
+ * result in DEC_STAT_BUF.
+ */
+ ctrl->last_read_error = false;
+ dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
+
+ fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
+ DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
+
+ max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
+ DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
+
+ if (fail_sec_flag) {
+ int bit, max_bitflips = 0;
+
+ /*
+ * Since we do not support subpage writes, a complete page
+ * is either written or not. We can take a shortcut here by
+ * checking wheather any of the sector has been successful
+ * read. If at least one sectors has been read successfully,
+ * the page must have been a written previously. It cannot
+ * be an erased page.
+ *
+ * E.g. controller might return fail_sec_flag with 0x4, which
+ * would mean only the third sector failed to correct. The
+ * page must have been written and the third sector is really
+ * not correctable anymore.
+ */
+ if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
+ mtd->ecc_stats.failed += hweight8(fail_sec_flag);
+ return max_corr_cnt;
+ }
+
+ /*
+ * All sectors failed to correct, but the ECC isn't smart
+ * enough to figure out if a page is really just erased.
+ * Read OOB data and check whether data/OOB is completely
+ * erased or if error correction just failed for all sub-
+ * pages.
+ */
+ ret = tegra_nand_read_oob(mtd, chip, page);
+ if (ret < 0)
+ return ret;
+
+ for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
+ u8 *data = buf + (chip->ecc.size * bit);
+ u8 *oob = chip->oob_poi + nand->ecc.offset +
+ (chip->ecc.bytes * bit);
+
+ ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
+ oob, chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max(ret, max_bitflips);
+ }
+ }
+
+ return max_t(unsigned int, max_corr_cnt, max_bitflips);
+ } else {
+ int corr_sec_flag;
+
+ corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
+ DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
+
+ /*
+ * The value returned in the register is the maximum of
+ * bitflips encountered in any of the ECC regions. As there is
+ * no way to get the number of bitflips in a specific regions
+ * we are not able to deliver correct stats but instead
+ * overestimate the number of corrected bitflips by assuming
+ * that all regions where errors have been corrected
+ * encountered the maximum number of bitflips.
+ */
+ mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
+
+ return max_corr_cnt;
+ }
+}
+
+static int tegra_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ void *oob_buf = oob_required ? chip->oob_poi : NULL;
+ int ret;
+
+ tegra_nand_hw_ecc(ctrl, chip, true);
+ ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
+ 0, page, false);
+ tegra_nand_hw_ecc(ctrl, chip, false);
+
+ return ret;
+}
+
+static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
+ const struct nand_sdr_timings *timings)
+{
+ /*
+ * The period (and all other timings in this function) is in ps,
+ * so need to take care here to avoid integer overflows.
+ */
+ unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
+ unsigned int period = DIV_ROUND_UP(1000000, rate);
+ u32 val, reg = 0;
+
+ val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
+ timings->tRC_min), period);
+ reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
+
+ val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
+ max(timings->tALS_min, timings->tALH_min)),
+ period);
+ reg |= TIMING_TCS(OFFSET(val, 2));
+
+ val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
+ period);
+ reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
+
+ reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
+ reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
+ reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
+ reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
+ reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
+
+ writel_relaxed(reg, ctrl->regs + TIMING_1);
+
+ val = DIV_ROUND_UP(timings->tADL_min, period);
+ reg = TIMING_TADL(OFFSET(val, 3));
+
+ writel_relaxed(reg, ctrl->regs + TIMING_2);
+}
+
+static int tegra_nand_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ const struct nand_sdr_timings *timings;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ tegra_nand_setup_timing(ctrl, timings);
+
+ return 0;
+}
+
+static const int rs_strength_bootable[] = { 4 };
+static const int rs_strength[] = { 4, 6, 8 };
+static const int bch_strength_bootable[] = { 8, 16 };
+static const int bch_strength[] = { 4, 8, 14, 16 };
+
+static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
+ int strength_len, int bits_per_step,
+ int oobsize)
+{
+ bool maximize = chip->ecc.options & NAND_ECC_MAXIMIZE;
+ int i;
+
+ /*
+ * Loop through available strengths. Backwards in case we try to
+ * maximize the BCH strength.
+ */
+ for (i = 0; i < strength_len; i++) {
+ int strength_sel, bytes_per_step, bytes_per_page;
+
+ if (maximize) {
+ strength_sel = strength[strength_len - i - 1];
+ } else {
+ strength_sel = strength[i];
+
+ if (strength_sel < chip->ecc_strength_ds)
+ continue;
+ }
+
+ bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
+ BITS_PER_BYTE);
+ bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
+
+ /* Check whether strength fits OOB */
+ if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
+ return strength_sel;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
+{
+ const int *strength;
+ int strength_len, bits_per_step;
+
+ switch (chip->ecc.algo) {
+ case NAND_ECC_RS:
+ bits_per_step = BITS_PER_STEP_RS;
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ strength = rs_strength_bootable;
+ strength_len = ARRAY_SIZE(rs_strength_bootable);
+ } else {
+ strength = rs_strength;
+ strength_len = ARRAY_SIZE(rs_strength);
+ }
+ break;
+ case NAND_ECC_BCH:
+ bits_per_step = BITS_PER_STEP_BCH;
+ if (chip->options & NAND_IS_BOOT_MEDIUM) {
+ strength = bch_strength_bootable;
+ strength_len = ARRAY_SIZE(bch_strength_bootable);
+ } else {
+ strength = bch_strength;
+ strength_len = ARRAY_SIZE(bch_strength);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tegra_nand_get_strength(chip, strength, strength_len,
+ bits_per_step, oobsize);
+}
+
+static int tegra_nand_attach_chip(struct nand_chip *chip)
+{
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bits_per_step;
+ int ret;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = 512;
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ if (chip->ecc_step_ds != 512) {
+ dev_err(ctrl->dev, "Unsupported step size %d\n",
+ chip->ecc_step_ds);
+ return -EINVAL;
+ }
+
+ chip->ecc.read_page = tegra_nand_read_page_hwecc;
+ chip->ecc.write_page = tegra_nand_write_page_hwecc;
+ chip->ecc.read_page_raw = tegra_nand_read_page_raw;
+ chip->ecc.write_page_raw = tegra_nand_write_page_raw;
+ chip->ecc.read_oob = tegra_nand_read_oob;
+ chip->ecc.write_oob = tegra_nand_write_oob;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ nand->config |= CONFIG_BUS_WIDTH_16;
+
+ if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+ if (mtd->writesize < 2048)
+ chip->ecc.algo = NAND_ECC_RS;
+ else
+ chip->ecc.algo = NAND_ECC_BCH;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_BCH && mtd->writesize < 2048) {
+ dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
+ return -EINVAL;
+ }
+
+ if (!chip->ecc.strength) {
+ ret = tegra_nand_select_strength(chip, mtd->oobsize);
+ if (ret < 0) {
+ dev_err(ctrl->dev,
+ "No valid strength found, minimum %d\n",
+ chip->ecc_strength_ds);
+ return ret;
+ }
+
+ chip->ecc.strength = ret;
+ }
+
+ nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
+ CONFIG_SKIP_SPARE_SIZE_4;
+
+ switch (chip->ecc.algo) {
+ case NAND_ECC_RS:
+ bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
+ mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
+ nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
+ CONFIG_ERR_COR;
+ switch (chip->ecc.strength) {
+ case 4:
+ nand->config_ecc |= CONFIG_TVAL_4;
+ break;
+ case 6:
+ nand->config_ecc |= CONFIG_TVAL_6;
+ break;
+ case 8:
+ nand->config_ecc |= CONFIG_TVAL_8;
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC strength %d not supported\n",
+ chip->ecc.strength);
+ return -EINVAL;
+ }
+ break;
+ case NAND_ECC_BCH:
+ bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
+ mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
+ nand->bch_config = BCH_ENABLE;
+ switch (chip->ecc.strength) {
+ case 4:
+ nand->bch_config |= BCH_TVAL_4;
+ break;
+ case 8:
+ nand->bch_config |= BCH_TVAL_8;
+ break;
+ case 14:
+ nand->bch_config |= BCH_TVAL_14;
+ break;
+ case 16:
+ nand->bch_config |= BCH_TVAL_16;
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC strength %d not supported\n",
+ chip->ecc.strength);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(ctrl->dev, "ECC algorithm not supported\n");
+ return -EINVAL;
+ }
+
+ dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
+ chip->ecc.algo == NAND_ECC_BCH ? "BCH" : "RS",
+ chip->ecc.strength);
+
+ chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
+
+ switch (mtd->writesize) {
+ case 256:
+ nand->config |= CONFIG_PS_256;
+ break;
+ case 512:
+ nand->config |= CONFIG_PS_512;
+ break;
+ case 1024:
+ nand->config |= CONFIG_PS_1024;
+ break;
+ case 2048:
+ nand->config |= CONFIG_PS_2048;
+ break;
+ case 4096:
+ nand->config |= CONFIG_PS_4096;
+ break;
+ default:
+ dev_err(ctrl->dev, "Unsupported writesize %d\n",
+ mtd->writesize);
+ return -ENODEV;
+ }
+
+ /* Store complete configuration for HW ECC in config_ecc */
+ nand->config_ecc |= nand->config;
+
+ /* Non-HW ECC read/writes complete OOB */
+ nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
+ writel_relaxed(nand->config, ctrl->regs + CONFIG);
+
+ return 0;
+}
+
+static const struct nand_controller_ops tegra_nand_controller_ops = {
+ .attach_chip = &tegra_nand_attach_chip,
+};
+
+static int tegra_nand_chips_init(struct device *dev,
+ struct tegra_nand_controller *ctrl)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *np_nand;
+ int nsels, nchips = of_get_child_count(np);
+ struct tegra_nand_chip *nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+ u32 cs;
+
+ if (nchips != 1) {
+ dev_err(dev, "Currently only one NAND chip supported\n");
+ return -EINVAL;
+ }
+
+ np_nand = of_get_next_child(np, NULL);
+
+ nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
+ if (nsels != 1) {
+ dev_err(dev, "Missing/invalid reg property\n");
+ return -EINVAL;
+ }
+
+ /* Retrieve CS id, currently only single die NAND supported */
+ ret = of_property_read_u32(np_nand, "reg", &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n", ret);
+ return ret;
+ }
+
+ nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand->cs[0] = cs;
+
+ nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
+
+ if (IS_ERR(nand->wp_gpio)) {
+ ret = PTR_ERR(nand->wp_gpio);
+ dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
+ return ret;
+ }
+
+ chip = &nand->chip;
+ chip->controller = &ctrl->controller;
+
+ mtd = nand_to_mtd(chip);
+
+ mtd->dev.parent = dev;
+ mtd->owner = THIS_MODULE;
+
+ nand_set_flash_node(chip, np_nand);
+
+ if (!mtd->name)
+ mtd->name = "tegra_nand";
+
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
+ chip->exec_op = tegra_nand_exec_op;
+ chip->select_chip = tegra_nand_select_chip;
+ chip->setup_data_interface = tegra_nand_setup_data_interface;
+
+ ret = nand_scan(mtd, 1);
+ if (ret)
+ return ret;
+
+ mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ ctrl->chip = chip;
+
+ return 0;
+}
+
+static int tegra_nand_probe(struct platform_device *pdev)
+{
+ struct reset_control *rst;
+ struct tegra_nand_controller *ctrl;
+ struct resource *res;
+ int err = 0;
+
+ ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = &pdev->dev;
+ nand_controller_init(&ctrl->controller);
+ ctrl->controller.ops = &tegra_nand_controller_ops;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctrl->regs))
+ return PTR_ERR(ctrl->regs);
+
+ rst = devm_reset_control_get(&pdev->dev, "nand");
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ ctrl->clk = devm_clk_get(&pdev->dev, "nand");
+ if (IS_ERR(ctrl->clk))
+ return PTR_ERR(ctrl->clk);
+
+ err = clk_prepare_enable(ctrl->clk);
+ if (err)
+ return err;
+
+ err = reset_control_reset(rst);
+ if (err) {
+ dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
+ goto err_disable_clk;
+ }
+
+ writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
+ writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
+ writel_relaxed(INT_MASK, ctrl->regs + IER);
+
+ init_completion(&ctrl->command_complete);
+ init_completion(&ctrl->dma_complete);
+
+ ctrl->irq = platform_get_irq(pdev, 0);
+ err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
+ dev_name(&pdev->dev), ctrl);
+ if (err) {
+ dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
+ goto err_disable_clk;
+ }
+
+ writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
+
+ err = tegra_nand_chips_init(ctrl->dev, ctrl);
+ if (err)
+ goto err_disable_clk;
+
+ platform_set_drvdata(pdev, ctrl);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(ctrl->clk);
+ return err;
+}
+
+static int tegra_nand_remove(struct platform_device *pdev)
+{
+ struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = ctrl->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ nand_cleanup(chip);
+
+ clk_disable_unprepare(ctrl->clk);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_nand_of_match[] = {
+ { .compatible = "nvidia,tegra20-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
+
+static struct platform_driver tegra_nand_driver = {
+ .driver = {
+ .name = "tegra-nand",
+ .of_match_table = tegra_nand_of_match,
+ },
+ .probe = tegra_nand_probe,
+ .remove = tegra_nand_remove,
+};
+module_platform_driver(tegra_nand_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
+MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
+MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
+MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index b567d212fe7d..4d61a14fcb65 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -20,7 +20,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
-#include <asm/txx9/ndfmc.h>
+#include <linux/platform_data/txx9/ndfmc.h>
/* TXX9 NDFMC Registers */
#define TXX9_NDFDTR 0x00
@@ -73,7 +73,7 @@ struct txx9ndfmc_drvdata {
void __iomem *base;
unsigned char hold; /* in gbusclock */
unsigned char spw; /* in gbusclock */
- struct nand_hw_control hw_control;
+ struct nand_controller controller;
};
static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -254,23 +254,25 @@ static void txx9ndfmc_initialize(struct platform_device *dev)
#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
-static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
+static int txx9ndfmc_attach_chip(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
-
- ret = nand_scan_ident(mtd, 1, NULL);
- if (!ret) {
- if (mtd->writesize >= 512) {
- /* Hardware ECC 6 byte ECC per 512 Byte data */
- chip->ecc.size = 512;
- chip->ecc.bytes = 6;
- }
- ret = nand_scan_tail(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (mtd->writesize >= 512) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
+ } else {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
}
- return ret;
+
+ return 0;
}
+static const struct nand_controller_ops txx9ndfmc_controller_ops = {
+ .attach_chip = txx9ndfmc_attach_chip,
+};
+
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
@@ -303,7 +305,8 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
(gbusclk + 500000) / 1000000, hold, spw);
- nand_hw_control_init(&drvdata->hw_control);
+ nand_controller_init(&drvdata->controller);
+ drvdata->controller.ops = &txx9ndfmc_controller_ops;
platform_set_drvdata(dev, drvdata);
txx9ndfmc_initialize(dev);
@@ -332,12 +335,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
chip->ecc.correct = txx9ndfmc_correct_data;
chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
chip->ecc.mode = NAND_ECC_HW;
- /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
chip->ecc.strength = 1;
chip->chip_delay = 100;
- chip->controller = &drvdata->hw_control;
+ chip->controller = &drvdata->controller;
nand_set_controller_data(chip, txx9_priv);
txx9_priv->dev = dev;
@@ -359,14 +359,14 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
- if (txx9ndfmc_nand_scan(mtd)) {
+ if (nand_scan(mtd, 1)) {
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
}
mtd->name = txx9_priv->mtdname;
- mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ mtd_device_register(mtd, NULL, 0);
drvdata->mtds[i] = mtd;
}
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index d5a22fc96878..6f6dcbf9095b 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -747,6 +747,69 @@ static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
}
}
+static int vf610_nfc_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ vf610_nfc_init_controller(nfc);
+
+ /* Bad block options. */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* Single buffer only, max 256 OOB minus ECC status */
+ if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
+ dev_err(nfc->dev, "Unsupported flash page size\n");
+ return -ENXIO;
+ }
+
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return 0;
+
+ if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
+ dev_err(nfc->dev, "Unsupported flash with hwecc\n");
+ return -ENXIO;
+ }
+
+ if (chip->ecc.size != mtd->writesize) {
+ dev_err(nfc->dev, "Step size needs to be page size\n");
+ return -ENXIO;
+ }
+
+ /* Only 64 byte ECC layouts known */
+ if (mtd->oobsize > 64)
+ mtd->oobsize = 64;
+
+ /* Use default large page ECC layout defined in NAND core */
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ if (chip->ecc.strength == 32) {
+ nfc->ecc_mode = ECC_60_BYTE;
+ chip->ecc.bytes = 60;
+ } else if (chip->ecc.strength == 24) {
+ nfc->ecc_mode = ECC_45_BYTE;
+ chip->ecc.bytes = 45;
+ } else {
+ dev_err(nfc->dev, "Unsupported ECC strength\n");
+ return -ENXIO;
+ }
+
+ chip->ecc.read_page = vf610_nfc_read_page;
+ chip->ecc.write_page = vf610_nfc_write_page;
+ chip->ecc.read_page_raw = vf610_nfc_read_page_raw;
+ chip->ecc.write_page_raw = vf610_nfc_write_page_raw;
+ chip->ecc.read_oob = vf610_nfc_read_oob;
+ chip->ecc.write_oob = vf610_nfc_write_oob;
+
+ chip->ecc.size = PAGE_2K;
+
+ return 0;
+}
+
+static const struct nand_controller_ops vf610_nfc_controller_ops = {
+ .attach_chip = vf610_nfc_attach_chip,
+};
+
static int vf610_nfc_probe(struct platform_device *pdev)
{
struct vf610_nfc *nfc;
@@ -827,67 +890,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
vf610_nfc_preinit_controller(nfc);
- /* first scan to find the device and get the page size */
- err = nand_scan_ident(mtd, 1, NULL);
- if (err)
- goto err_disable_clk;
-
- vf610_nfc_init_controller(nfc);
-
- /* Bad block options. */
- if (chip->bbt_options & NAND_BBT_USE_FLASH)
- chip->bbt_options |= NAND_BBT_NO_OOB;
-
- /* Single buffer only, max 256 OOB minus ECC status */
- if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
- dev_err(nfc->dev, "Unsupported flash page size\n");
- err = -ENXIO;
- goto err_disable_clk;
- }
-
- if (chip->ecc.mode == NAND_ECC_HW) {
- if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
- dev_err(nfc->dev, "Unsupported flash with hwecc\n");
- err = -ENXIO;
- goto err_disable_clk;
- }
-
- if (chip->ecc.size != mtd->writesize) {
- dev_err(nfc->dev, "Step size needs to be page size\n");
- err = -ENXIO;
- goto err_disable_clk;
- }
-
- /* Only 64 byte ECC layouts known */
- if (mtd->oobsize > 64)
- mtd->oobsize = 64;
-
- /* Use default large page ECC layout defined in NAND core */
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
- if (chip->ecc.strength == 32) {
- nfc->ecc_mode = ECC_60_BYTE;
- chip->ecc.bytes = 60;
- } else if (chip->ecc.strength == 24) {
- nfc->ecc_mode = ECC_45_BYTE;
- chip->ecc.bytes = 45;
- } else {
- dev_err(nfc->dev, "Unsupported ECC strength\n");
- err = -ENXIO;
- goto err_disable_clk;
- }
-
- chip->ecc.read_page = vf610_nfc_read_page;
- chip->ecc.write_page = vf610_nfc_write_page;
- chip->ecc.read_page_raw = vf610_nfc_read_page_raw;
- chip->ecc.write_page_raw = vf610_nfc_write_page_raw;
- chip->ecc.read_oob = vf610_nfc_read_oob;
- chip->ecc.write_oob = vf610_nfc_write_oob;
-
- chip->ecc.size = PAGE_2K;
- }
-
- /* second phase scan */
- err = nand_scan_tail(mtd);
+ /* Scan the NAND chip */
+ chip->dummy_controller.ops = &vf610_nfc_controller_ops;
+ err = nand_scan(mtd, 1);
if (err)
goto err_disable_clk;
diff --git a/drivers/mtd/nand/spi/Kconfig b/drivers/mtd/nand/spi/Kconfig
new file mode 100644
index 000000000000..7c37d2929b68
--- /dev/null
+++ b/drivers/mtd/nand/spi/Kconfig
@@ -0,0 +1,7 @@
+menuconfig MTD_SPI_NAND
+ tristate "SPI NAND device Support"
+ select MTD_NAND_CORE
+ depends on SPI_MASTER
+ select SPI_MEM
+ help
+ This is the framework for the SPI NAND device drivers.
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
new file mode 100644
index 000000000000..b74e074b363a
--- /dev/null
+++ b/drivers/mtd/nand/spi/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+spinand-objs := core.o macronix.o micron.o winbond.o
+obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
new file mode 100644
index 000000000000..30f83649c481
--- /dev/null
+++ b/drivers/mtd/nand/spi/core.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#define pr_fmt(fmt) "spi-nand: " fmt
+
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/spinand.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
+ const struct nand_page_io_req *req,
+ u16 *column)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int shift;
+
+ if (nand->memorg.planes_per_lun < 2)
+ return;
+
+ /* The plane number is passed in MSB just above the column address */
+ shift = fls(nand->memorg.pagesize);
+ *column |= req->pos.plane << shift;
+}
+
+static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+{
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ spinand->scratchbuf);
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ *val = *spinand->scratchbuf;
+ return 0;
+}
+
+static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ spinand->scratchbuf);
+
+ *spinand->scratchbuf = val;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_read_status(struct spinand_device *spinand, u8 *status)
+{
+ return spinand_read_reg_op(spinand, REG_STATUS, status);
+}
+
+static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (WARN_ON(spinand->cur_target < 0 ||
+ spinand->cur_target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ *cfg = spinand->cfg_cache[spinand->cur_target];
+ return 0;
+}
+
+static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ if (WARN_ON(spinand->cur_target < 0 ||
+ spinand->cur_target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ if (spinand->cfg_cache[spinand->cur_target] == cfg)
+ return 0;
+
+ ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
+ if (ret)
+ return ret;
+
+ spinand->cfg_cache[spinand->cur_target] = cfg;
+ return 0;
+}
+
+/**
+ * spinand_upd_cfg() - Update the configuration register
+ * @spinand: the spinand device
+ * @mask: the mask encoding the bits to update in the config reg
+ * @val: the new value to apply
+ *
+ * Update the configuration register.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
+{
+ int ret;
+ u8 cfg;
+
+ ret = spinand_get_cfg(spinand, &cfg);
+ if (ret)
+ return ret;
+
+ cfg &= ~mask;
+ cfg |= val;
+
+ return spinand_set_cfg(spinand, cfg);
+}
+
+/**
+ * spinand_select_target() - Select a specific NAND target/die
+ * @spinand: the spinand device
+ * @target: the target/die to select
+ *
+ * Select a new target/die. If chip only has one die, this function is a NOOP.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_select_target(struct spinand_device *spinand, unsigned int target)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ if (WARN_ON(target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ if (spinand->cur_target == target)
+ return 0;
+
+ if (nand->memorg.ntargets == 1) {
+ spinand->cur_target = target;
+ return 0;
+ }
+
+ ret = spinand->select_target(spinand, target);
+ if (ret)
+ return ret;
+
+ spinand->cur_target = target;
+ return 0;
+}
+
+static int spinand_init_cfg_cache(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct device *dev = &spinand->spimem->spi->dev;
+ unsigned int target;
+ int ret;
+
+ spinand->cfg_cache = devm_kcalloc(dev,
+ nand->memorg.ntargets,
+ sizeof(*spinand->cfg_cache),
+ GFP_KERNEL);
+ if (!spinand->cfg_cache)
+ return -ENOMEM;
+
+ for (target = 0; target < nand->memorg.ntargets; target++) {
+ ret = spinand_select_target(spinand, target);
+ if (ret)
+ return ret;
+
+ /*
+ * We use spinand_read_reg_op() instead of spinand_get_cfg()
+ * here to bypass the config cache.
+ */
+ ret = spinand_read_reg_op(spinand, REG_CFG,
+ &spinand->cfg_cache[target]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spinand_init_quad_enable(struct spinand_device *spinand)
+{
+ bool enable = false;
+
+ if (!(spinand->flags & SPINAND_HAS_QE_BIT))
+ return 0;
+
+ if (spinand->op_templates.read_cache->data.buswidth == 4 ||
+ spinand->op_templates.write_cache->data.buswidth == 4 ||
+ spinand->op_templates.update_cache->data.buswidth == 4)
+ enable = true;
+
+ return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
+ enable ? CFG_QUAD_ENABLE : 0);
+}
+
+static int spinand_ecc_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
+ enable ? CFG_ECC_ENABLE : 0);
+}
+
+static int spinand_write_enable_op(struct spinand_device *spinand)
+{
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_load_page_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, &req->pos);
+ struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_read_from_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct spi_mem_op op = *spinand->op_templates.read_cache;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_page_io_req adjreq = *req;
+ unsigned int nbytes = 0;
+ void *buf = NULL;
+ u16 column = 0;
+ int ret;
+
+ if (req->datalen) {
+ adjreq.datalen = nanddev_page_size(nand);
+ adjreq.dataoffs = 0;
+ adjreq.databuf.in = spinand->databuf;
+ buf = spinand->databuf;
+ nbytes = adjreq.datalen;
+ }
+
+ if (req->ooblen) {
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
+ adjreq.ooboffs = 0;
+ adjreq.oobbuf.in = spinand->oobbuf;
+ nbytes += nanddev_per_page_oobsize(nand);
+ if (!buf) {
+ buf = spinand->oobbuf;
+ column = nanddev_page_size(nand);
+ }
+ }
+
+ spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+ op.addr.val = column;
+
+ /*
+ * Some controllers are limited in term of max RX data size. In this
+ * case, just repeat the READ_CACHE operation after updating the
+ * column.
+ */
+ while (nbytes) {
+ op.data.buf.in = buf;
+ op.data.nbytes = nbytes;
+ ret = spi_mem_adjust_op_size(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ buf += op.data.nbytes;
+ nbytes -= op.data.nbytes;
+ op.addr.val += op.data.nbytes;
+ }
+
+ if (req->datalen)
+ memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
+ req->datalen);
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+ req->ooblen);
+ }
+
+ return 0;
+}
+
+static int spinand_write_to_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct spi_mem_op op = *spinand->op_templates.write_cache;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_page_io_req adjreq = *req;
+ unsigned int nbytes = 0;
+ void *buf = NULL;
+ u16 column = 0;
+ int ret;
+
+ memset(spinand->databuf, 0xff,
+ nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand));
+
+ if (req->datalen) {
+ memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
+ req->datalen);
+ adjreq.dataoffs = 0;
+ adjreq.datalen = nanddev_page_size(nand);
+ adjreq.databuf.out = spinand->databuf;
+ nbytes = adjreq.datalen;
+ buf = spinand->databuf;
+ }
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
+ req->ooblen);
+
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
+ adjreq.ooboffs = 0;
+ nbytes += nanddev_per_page_oobsize(nand);
+ if (!buf) {
+ buf = spinand->oobbuf;
+ column = nanddev_page_size(nand);
+ }
+ }
+
+ spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+
+ op = *spinand->op_templates.write_cache;
+ op.addr.val = column;
+
+ /*
+ * Some controllers are limited in term of max TX data size. In this
+ * case, split the operation into one LOAD CACHE and one or more
+ * LOAD RANDOM CACHE.
+ */
+ while (nbytes) {
+ op.data.buf.out = buf;
+ op.data.nbytes = nbytes;
+
+ ret = spi_mem_adjust_op_size(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ buf += op.data.nbytes;
+ nbytes -= op.data.nbytes;
+ op.addr.val += op.data.nbytes;
+
+ /*
+ * We need to use the RANDOM LOAD CACHE operation if there's
+ * more than one iteration, because the LOAD operation resets
+ * the cache to 0xff.
+ */
+ if (nbytes) {
+ column = op.addr.val;
+ op = *spinand->op_templates.update_cache;
+ op.addr.val = column;
+ }
+ }
+
+ return 0;
+}
+
+static int spinand_program_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, &req->pos);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_erase_op(struct spinand_device *spinand,
+ const struct nand_pos *pos)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, pos);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int spinand_wait(struct spinand_device *spinand, u8 *s)
+{
+ unsigned long timeo = jiffies + msecs_to_jiffies(400);
+ u8 status;
+ int ret;
+
+ do {
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ return ret;
+
+ if (!(status & STATUS_BUSY))
+ goto out;
+ } while (time_before(jiffies, timeo));
+
+ /*
+ * Extra read, just in case the STATUS_READY bit has changed
+ * since our last check
+ */
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ return ret;
+
+out:
+ if (s)
+ *s = status;
+
+ return status & STATUS_BUSY ? -ETIMEDOUT : 0;
+}
+
+static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
+{
+ struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
+ SPINAND_MAX_ID_LEN);
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (!ret)
+ memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
+
+ return ret;
+}
+
+static int spinand_reset_op(struct spinand_device *spinand)
+{
+ struct spi_mem_op op = SPINAND_RESET_OP;
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ return spinand_wait(spinand, NULL);
+}
+
+static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
+{
+ return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
+}
+
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (spinand->eccinfo.get_status)
+ return spinand->eccinfo.get_status(spinand, status);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * We have no way to know exactly how many bitflips have been
+ * fixed, so let's return the maximum possible value so that
+ * wear-leveling layers move the data immediately.
+ */
+ return nand->eccreq.strength;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req,
+ bool ecc_enabled)
+{
+ u8 status;
+ int ret;
+
+ ret = spinand_load_page_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = spinand_read_from_cache_op(spinand, req);
+ if (ret)
+ return ret;
+
+ if (!ecc_enabled)
+ return 0;
+
+ return spinand_check_ecc_status(spinand, status);
+}
+
+static int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ u8 status;
+ int ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_to_cache_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_program_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand, &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int max_bitflips = 0;
+ struct nand_io_iter iter;
+ bool enable_ecc = false;
+ bool ecc_failed = false;
+ int ret = 0;
+
+ if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
+ enable_ecc = true;
+
+ mutex_lock(&spinand->lock);
+
+ nanddev_io_for_each_page(nand, from, ops, &iter) {
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ break;
+
+ ret = spinand_ecc_enable(spinand, enable_ecc);
+ if (ret)
+ break;
+
+ ret = spinand_read_page(spinand, &iter.req, enable_ecc);
+ if (ret < 0 && ret != -EBADMSG)
+ break;
+
+ if (ret == -EBADMSG) {
+ ecc_failed = true;
+ mtd->ecc_stats.failed++;
+ ret = 0;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ }
+
+ ops->retlen += iter.req.datalen;
+ ops->oobretlen += iter.req.ooblen;
+ }
+
+ mutex_unlock(&spinand->lock);
+
+ if (ecc_failed && !ret)
+ ret = -EBADMSG;
+
+ return ret ? ret : max_bitflips;
+}
+
+static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ bool enable_ecc = false;
+ int ret = 0;
+
+ if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
+ enable_ecc = true;
+
+ mutex_lock(&spinand->lock);
+
+ nanddev_io_for_each_page(nand, to, ops, &iter) {
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ break;
+
+ ret = spinand_ecc_enable(spinand, enable_ecc);
+ if (ret)
+ break;
+
+ ret = spinand_write_page(spinand, &iter.req);
+ if (ret)
+ break;
+
+ ops->retlen += iter.req.datalen;
+ ops->oobretlen += iter.req.ooblen;
+ }
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_page_io_req req = {
+ .pos = *pos,
+ .ooblen = 2,
+ .ooboffs = 0,
+ .oobbuf.in = spinand->oobbuf,
+ .mode = MTD_OPS_RAW,
+ };
+
+ memset(spinand->oobbuf, 0, 2);
+ spinand_select_target(spinand, pos->target);
+ spinand_read_page(spinand, &req, false);
+ if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
+ return true;
+
+ return false;
+}
+
+static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ mutex_lock(&spinand->lock);
+ ret = nanddev_isbad(nand, &pos);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_page_io_req req = {
+ .pos = *pos,
+ .ooboffs = 0,
+ .ooblen = 2,
+ .oobbuf.out = spinand->oobbuf,
+ };
+ int ret;
+
+ /* Erase block before marking it bad. */
+ ret = spinand_select_target(spinand, pos->target);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ spinand_erase_op(spinand, pos);
+
+ memset(spinand->oobbuf, 0, 2);
+ return spinand_write_page(spinand, &req);
+}
+
+static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ mutex_lock(&spinand->lock);
+ ret = nanddev_markbad(nand, &pos);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 status;
+ int ret;
+
+ ret = spinand_select_target(spinand, pos->target);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_erase_op(spinand, pos);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand, &status);
+ if (!ret && (status & STATUS_ERASE_FAILED))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int spinand_mtd_erase(struct mtd_info *mtd,
+ struct erase_info *einfo)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ mutex_lock(&spinand->lock);
+ ret = nanddev_mtd_erase(mtd, einfo);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ mutex_lock(&spinand->lock);
+ ret = nanddev_isreserved(nand, &pos);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static const struct nand_ops spinand_ops = {
+ .erase = spinand_erase,
+ .markbad = spinand_markbad,
+ .isbad = spinand_isbad,
+};
+
+static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &macronix_spinand_manufacturer,
+ &micron_spinand_manufacturer,
+ &winbond_spinand_manufacturer,
+};
+
+static int spinand_manufacturer_detect(struct spinand_device *spinand)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
+ ret = spinand_manufacturers[i]->ops->detect(spinand);
+ if (ret > 0) {
+ spinand->manufacturer = spinand_manufacturers[i];
+ return 0;
+ } else if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static int spinand_manufacturer_init(struct spinand_device *spinand)
+{
+ if (spinand->manufacturer->ops->init)
+ return spinand->manufacturer->ops->init(spinand);
+
+ return 0;
+}
+
+static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
+{
+ /* Release manufacturer private data */
+ if (spinand->manufacturer->ops->cleanup)
+ return spinand->manufacturer->ops->cleanup(spinand);
+}
+
+static const struct spi_mem_op *
+spinand_select_op_variant(struct spinand_device *spinand,
+ const struct spinand_op_variants *variants)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ for (i = 0; i < variants->nops; i++) {
+ struct spi_mem_op op = variants->ops[i];
+ unsigned int nbytes;
+ int ret;
+
+ nbytes = nanddev_per_page_oobsize(nand) +
+ nanddev_page_size(nand);
+
+ while (nbytes) {
+ op.data.nbytes = nbytes;
+ ret = spi_mem_adjust_op_size(spinand->spimem, &op);
+ if (ret)
+ break;
+
+ if (!spi_mem_supports_op(spinand->spimem, &op))
+ break;
+
+ nbytes -= op.data.nbytes;
+ }
+
+ if (!nbytes)
+ return &variants->ops[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * spinand_match_and_init() - Try to find a match between a device ID and an
+ * entry in a spinand_info table
+ * @spinand: SPI NAND object
+ * @table: SPI NAND device description table
+ * @table_size: size of the device description table
+ *
+ * Should be used by SPI NAND manufacturer drivers when they want to find a
+ * match between a device ID retrieved through the READ_ID command and an
+ * entry in the SPI NAND description table. If a match is found, the spinand
+ * object will be initialized with information provided by the matching
+ * spinand_info entry.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_match_and_init(struct spinand_device *spinand,
+ const struct spinand_info *table,
+ unsigned int table_size, u8 devid)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ for (i = 0; i < table_size; i++) {
+ const struct spinand_info *info = &table[i];
+ const struct spi_mem_op *op;
+
+ if (devid != info->devid)
+ continue;
+
+ nand->memorg = table[i].memorg;
+ nand->eccreq = table[i].eccreq;
+ spinand->eccinfo = table[i].eccinfo;
+ spinand->flags = table[i].flags;
+ spinand->select_target = table[i].select_target;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.read_cache);
+ if (!op)
+ return -ENOTSUPP;
+
+ spinand->op_templates.read_cache = op;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.write_cache);
+ if (!op)
+ return -ENOTSUPP;
+
+ spinand->op_templates.write_cache = op;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.update_cache);
+ spinand->op_templates.update_cache = op;
+
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int spinand_detect(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ ret = spinand_reset_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_read_id_op(spinand, spinand->id.data);
+ if (ret)
+ return ret;
+
+ spinand->id.len = SPINAND_MAX_ID_LEN;
+
+ ret = spinand_manufacturer_detect(spinand);
+ if (ret) {
+ dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
+ spinand->id.data);
+ return ret;
+ }
+
+ if (nand->memorg.ntargets > 1 && !spinand->select_target) {
+ dev_err(dev,
+ "SPI NANDs with more than one die must implement ->select_target()\n");
+ return -EINVAL;
+ }
+
+ dev_info(&spinand->spimem->spi->dev,
+ "%s SPI NAND was found.\n", spinand->manufacturer->name);
+ dev_info(&spinand->spimem->spi->dev,
+ "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
+ nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
+ nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
+
+ return 0;
+}
+
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
+ .ecc = spinand_noecc_ooblayout_ecc,
+ .free = spinand_noecc_ooblayout_free,
+};
+
+static int spinand_init(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ int ret, i;
+
+ /*
+ * We need a scratch buffer because the spi_mem interface requires that
+ * buf passed in spi_mem_op->data.buf be DMA-able.
+ */
+ spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
+ if (!spinand->scratchbuf)
+ return -ENOMEM;
+
+ ret = spinand_detect(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ /*
+ * Use kzalloc() instead of devm_kzalloc() here, because some drivers
+ * may use this buffer for DMA access.
+ * Memory allocated by devm_ does not guarantee DMA-safe alignment.
+ */
+ spinand->databuf = kzalloc(nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand),
+ GFP_KERNEL);
+ if (!spinand->databuf) {
+ ret = -ENOMEM;
+ goto err_free_bufs;
+ }
+
+ spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
+
+ ret = spinand_init_cfg_cache(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_init_quad_enable(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_manufacturer_init(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
+ ret);
+ goto err_free_bufs;
+ }
+
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
+ if (ret)
+ goto err_free_bufs;
+ }
+
+ ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
+ if (ret)
+ goto err_manuf_cleanup;
+
+ /*
+ * Right now, we don't support ECC, so let the whole oob
+ * area is available for user.
+ */
+ mtd->_read_oob = spinand_mtd_read;
+ mtd->_write_oob = spinand_mtd_write;
+ mtd->_block_isbad = spinand_mtd_block_isbad;
+ mtd->_block_markbad = spinand_mtd_block_markbad;
+ mtd->_block_isreserved = spinand_mtd_block_isreserved;
+ mtd->_erase = spinand_mtd_erase;
+
+ if (spinand->eccinfo.ooblayout)
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
+ else
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ goto err_cleanup_nanddev;
+
+ mtd->oobavail = ret;
+
+ return 0;
+
+err_cleanup_nanddev:
+ nanddev_cleanup(nand);
+
+err_manuf_cleanup:
+ spinand_manufacturer_cleanup(spinand);
+
+err_free_bufs:
+ kfree(spinand->databuf);
+ kfree(spinand->scratchbuf);
+ return ret;
+}
+
+static void spinand_cleanup(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ nanddev_cleanup(nand);
+ spinand_manufacturer_cleanup(spinand);
+ kfree(spinand->databuf);
+ kfree(spinand->scratchbuf);
+}
+
+static int spinand_probe(struct spi_mem *mem)
+{
+ struct spinand_device *spinand;
+ struct mtd_info *mtd;
+ int ret;
+
+ spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
+ GFP_KERNEL);
+ if (!spinand)
+ return -ENOMEM;
+
+ spinand->spimem = mem;
+ spi_mem_set_drvdata(mem, spinand);
+ spinand_set_of_node(spinand, mem->spi->dev.of_node);
+ mutex_init(&spinand->lock);
+ mtd = spinand_to_mtd(spinand);
+ mtd->dev.parent = &mem->spi->dev;
+
+ ret = spinand_init(spinand);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_spinand_cleanup;
+
+ return 0;
+
+err_spinand_cleanup:
+ spinand_cleanup(spinand);
+
+ return ret;
+}
+
+static int spinand_remove(struct spi_mem *mem)
+{
+ struct spinand_device *spinand;
+ struct mtd_info *mtd;
+ int ret;
+
+ spinand = spi_mem_get_drvdata(mem);
+ mtd = spinand_to_mtd(spinand);
+
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ spinand_cleanup(spinand);
+
+ return 0;
+}
+
+static const struct spi_device_id spinand_ids[] = {
+ { .name = "spi-nand" },
+ { /* sentinel */ },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id spinand_of_ids[] = {
+ { .compatible = "spi-nand" },
+ { /* sentinel */ },
+};
+#endif
+
+static struct spi_mem_driver spinand_drv = {
+ .spidrv = {
+ .id_table = spinand_ids,
+ .driver = {
+ .name = "spi-nand",
+ .of_match_table = of_match_ptr(spinand_of_ids),
+ },
+ },
+ .probe = spinand_probe,
+ .remove = spinand_remove,
+};
+module_spi_mem_driver(spinand_drv);
+
+MODULE_DESCRIPTION("SPI NAND framework");
+MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
new file mode 100644
index 000000000000..98f6b9c4b684
--- /dev/null
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Macronix
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_MACRONIX 0xC2
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int mx35lfxge4ab_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 2;
+ region->length = mtd->oobsize - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
+ .ecc = mx35lfxge4ab_ooblayout_ecc,
+ .free = mx35lfxge4ab_ooblayout_free,
+};
+
+static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY(1, 1),
+ SPI_MEM_OP_DATA_IN(1, eccsr, 1));
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 eccsr;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * Let's try to retrieve the real maximum number of bitflips
+ * in order to avoid forcing the wear-leveling layer to move
+ * data around if it's not necessary.
+ */
+ if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
+ return nand->eccreq.strength;
+
+ if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr))
+ return nand->eccreq.strength;
+
+ return eccsr;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info macronix_spinand_table[] = {
+ SPINAND_INFO("MX35LF1GE4AB", 0x12,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35LF2GE4AB", 0x22,
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+};
+
+static int macronix_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Macronix SPI NAND read ID needs a dummy byte, so the first byte in
+ * raw_id is garbage.
+ */
+ if (id[1] != SPINAND_MFR_MACRONIX)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, macronix_spinand_table,
+ ARRAY_SIZE(macronix_spinand_table),
+ id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
+ .detect = macronix_spinand_detect,
+};
+
+const struct spinand_manufacturer macronix_spinand_manufacturer = {
+ .id = SPINAND_MFR_MACRONIX,
+ .name = "Macronix",
+ .ops = &macronix_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
new file mode 100644
index 000000000000..9c4381d6847b
--- /dev/null
+++ b/drivers/mtd/nand/spi/micron.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_MICRON 0x2c
+
+#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
+#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
+#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
+#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 64;
+ region->length = 64;
+
+ return 0;
+}
+
+static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
+ .ecc = mt29f2g01abagd_ooblayout_ecc,
+ .free = mt29f2g01abagd_ooblayout_free,
+};
+
+static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & MICRON_STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case MICRON_STATUS_ECC_1TO3_BITFLIPS:
+ return 3;
+
+ case MICRON_STATUS_ECC_4TO6_BITFLIPS:
+ return 6;
+
+ case MICRON_STATUS_ECC_7TO8_BITFLIPS:
+ return 8;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info micron_spinand_table[] = {
+ SPINAND_INFO("MT29F2G01ABAGD", 0x24,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
+ mt29f2g01abagd_ecc_get_status)),
+};
+
+static int micron_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Micron SPI NAND read ID need a dummy byte,
+ * so the first byte in raw_id is dummy.
+ */
+ if (id[1] != SPINAND_MFR_MICRON)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, micron_spinand_table,
+ ARRAY_SIZE(micron_spinand_table), id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
+ .detect = micron_spinand_detect,
+};
+
+const struct spinand_manufacturer micron_spinand_manufacturer = {
+ .id = SPINAND_MFR_MICRON,
+ .name = "Micron",
+ .ops = &micron_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
new file mode 100644
index 000000000000..67baa1b32c00
--- /dev/null
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 exceet electronics GmbH
+ *
+ * Authors:
+ * Frieder Schrempf <frieder.schrempf@exceet.de>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_WINBOND 0xEF
+
+#define WINBOND_CFG_BUF_READ BIT(3)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+
+ return 0;
+}
+
+static int w25m02gv_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 2;
+ region->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w25m02gv_ooblayout = {
+ .ecc = w25m02gv_ooblayout_ecc,
+ .free = w25m02gv_ooblayout_free,
+};
+
+static int w25m02gv_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0xc2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1,
+ spinand->scratchbuf,
+ 1));
+
+ *spinand->scratchbuf = target;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static const struct spinand_info winbond_spinand_table[] = {
+ SPINAND_INFO("W25M02GV", 0xAB,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 2),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_SELECT_TARGET(w25m02gv_select_target)),
+};
+
+/**
+ * winbond_spinand_detect - initialize device related part in spinand_device
+ * struct if it is a Winbond device.
+ * @spinand: SPI NAND device structure
+ */
+static int winbond_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Winbond SPI NAND read ID need a dummy byte,
+ * so the first byte in raw_id is dummy.
+ */
+ if (id[1] != SPINAND_MFR_WINBOND)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, winbond_spinand_table,
+ ARRAY_SIZE(winbond_spinand_table), id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static int winbond_spinand_init(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ /*
+ * Make sure all dies are in buffer read mode and not continuous read
+ * mode.
+ */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ spinand_select_target(spinand, i);
+ spinand_upd_cfg(spinand, WINBOND_CFG_BUF_READ,
+ WINBOND_CFG_BUF_READ);
+ }
+
+ return 0;
+}
+
+static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
+ .detect = winbond_spinand_detect,
+ .init = winbond_spinand_init,
+};
+
+const struct spinand_manufacturer winbond_spinand_manufacturer = {
+ .id = SPINAND_MFR_WINBOND,
+ .name = "Winbond",
+ .ops = &winbond_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 27184e3874db..91b7fb326f9a 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -577,7 +577,7 @@ static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block)
int NFTL_mount(struct NFTLrecord *s)
{
int i;
- unsigned int first_logical_block, logical_block, rep_block, nb_erases, erase_mark;
+ unsigned int first_logical_block, logical_block, rep_block, erase_mark;
unsigned int block, first_block, is_first_block;
int chain_length, do_format_chain;
struct nftl_uci0 h0;
@@ -621,7 +621,6 @@ int NFTL_mount(struct NFTLrecord *s)
logical_block = le16_to_cpu ((h0.VirtUnitNum | h0.SpareVirtUnitNum));
rep_block = le16_to_cpu ((h0.ReplUnitNum | h0.SpareReplUnitNum));
- nb_erases = le32_to_cpu (h1.WearInfo);
erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
is_first_block = !(logical_block >> 15);
diff --git a/drivers/mtd/parsers/parser_trx.c b/drivers/mtd/parsers/parser_trx.c
index 17ac33599783..4a89a68622fe 100644
--- a/drivers/mtd/parsers/parser_trx.c
+++ b/drivers/mtd/parsers/parser_trx.c
@@ -116,9 +116,16 @@ static int parser_trx_parse(struct mtd_info *mtd,
return i;
};
+static const struct of_device_id mtd_parser_trx_of_match_table[] = {
+ { .compatible = "brcm,trx" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtd_parser_trx_of_match_table);
+
static struct mtd_part_parser mtd_parser_trx = {
.parse_fn = parser_trx_parse,
.name = "trx",
+ .of_match_table = mtd_parser_trx_of_match_table,
};
module_mtd_part_parser(mtd_parser_trx);
diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c
index 6c5708bacad8..820048726b4f 100644
--- a/drivers/mtd/spi-nor/atmel-quadspi.c
+++ b/drivers/mtd/spi-nor/atmel-quadspi.c
@@ -34,7 +34,7 @@
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
/* QSPI register offsets */
#define QSPI_CR 0x0000 /* Control Register */
@@ -737,6 +737,26 @@ static int atmel_qspi_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused atmel_qspi_suspend(struct device *dev)
+{
+ struct atmel_qspi *aq = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(aq->clk);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_qspi_resume(struct device *dev)
+{
+ struct atmel_qspi *aq = dev_get_drvdata(dev);
+
+ clk_prepare_enable(aq->clk);
+
+ return atmel_qspi_init(aq);
+}
+
+static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
+ atmel_qspi_resume);
static const struct of_device_id atmel_qspi_dt_ids[] = {
{ .compatible = "atmel,sama5d2-qspi" },
@@ -749,6 +769,7 @@ static struct platform_driver atmel_qspi_driver = {
.driver = {
.name = "atmel_qspi",
.of_match_table = atmel_qspi_dt_ids,
+ .pm = &atmel_qspi_pm_ops,
},
.probe = atmel_qspi_probe,
.remove = atmel_qspi_remove,
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index d7e10b36a0b9..8e714fbfa521 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -525,15 +525,14 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
reg_base + CQSPI_REG_INDIRECTRD);
while (remaining > 0) {
- ret = wait_for_completion_timeout(&cqspi->transfer_complete,
- msecs_to_jiffies
- (CQSPI_READ_TIMEOUT_MS));
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
+ ret = -ETIMEDOUT;
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
- if (!ret && bytes_to_read == 0) {
+ if (ret && bytes_to_read == 0) {
dev_err(nor->dev, "Indirect read timeout, no bytes\n");
- ret = -ETIMEDOUT;
goto failrd;
}
@@ -649,10 +648,8 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
iowrite32_rep(cqspi->ahb_base, txbuf,
DIV_ROUND_UP(write_bytes, 4));
- ret = wait_for_completion_timeout(&cqspi->transfer_complete,
- msecs_to_jiffies
- (CQSPI_TIMEOUT_MS));
- if (!ret) {
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
dev_err(nor->dev, "Indirect write timeout\n");
ret = -ETIMEDOUT;
goto failwr;
@@ -988,9 +985,8 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
}
dma_async_issue_pending(cqspi->rx_chan);
- ret = wait_for_completion_timeout(&cqspi->rx_dma_complete,
- msecs_to_jiffies(len));
- if (ret <= 0) {
+ if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
+ msecs_to_jiffies(len))) {
dmaengine_terminate_sync(cqspi->rx_chan);
dev_err(nor->dev, "DMA wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index d2cbfc27826e..af0a22019516 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -908,7 +908,7 @@ struct intel_spi *intel_spi_probe(struct device *dev,
if (!ispi->writeable || !writeable)
ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
- ret = mtd_device_parse_register(&ispi->nor.mtd, NULL, NULL, &part, 1);
+ ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
index 15374216d4d9..0c9094ec5966 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -436,6 +436,7 @@ static int nxp_spifi_probe(struct platform_device *pdev)
}
ret = nxp_spifi_setup_flash(spifi, flash_np);
+ of_node_put(flash_np);
if (ret) {
dev_err(&pdev->dev, "unable to setup flash chip\n");
goto dis_clks;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d9c368c44194..f028277fb1ce 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -2757,8 +2757,18 @@ static int spi_nor_init(struct spi_nor *nor)
if ((nor->addr_width == 4) &&
(JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
- !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
set_4byte(nor, nor->info, 1);
+ }
return 0;
}
@@ -2781,7 +2791,8 @@ void spi_nor_restore(struct spi_nor *nor)
/* restore the addressing mode */
if ((nor->addr_width == 4) &&
(JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
- !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ !(nor->info->flags & SPI_NOR_4B_OPCODES) &&
+ (nor->flags & SNOR_F_BROKEN_RESET))
set_4byte(nor, nor->info, 0);
}
EXPORT_SYMBOL_GPL(spi_nor_restore);
@@ -2911,6 +2922,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
}
+ if (of_property_read_bool(np, "broken-flash-reset"))
+ nor->flags |= SNOR_F_BROKEN_RESET;
+
/* Some devices cannot do fast-read, no matter what DT tells us */
if (info->flags & SPI_NOR_NO_FR)
params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
index 72553506a00b..13e9fc961d3b 100644
--- a/drivers/mtd/spi-nor/stm32-quadspi.c
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -355,7 +355,7 @@ static int stm32_qspi_read_reg(struct spi_nor *nor,
struct device *dev = flash->qspi->dev;
struct stm32_qspi_cmd cmd;
- dev_dbg(dev, "read_reg: cmd:%#.2x buf:%p len:%#x\n", opcode, buf, len);
+ dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = opcode;
@@ -376,7 +376,7 @@ static int stm32_qspi_write_reg(struct spi_nor *nor, u8 opcode,
struct device *dev = flash->qspi->dev;
struct stm32_qspi_cmd cmd;
- dev_dbg(dev, "write_reg: cmd:%#.2x buf:%p len:%#x\n", opcode, buf, len);
+ dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = opcode;
@@ -398,7 +398,7 @@ static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
struct stm32_qspi_cmd cmd;
int err;
- dev_dbg(qspi->dev, "read(%#.2x): buf:%p from:%#.8x len:%#zx\n",
+ dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n",
nor->read_opcode, buf, (u32)from, len);
memset(&cmd, 0, sizeof(cmd));
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 217b790d22ed..a764a83f99da 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4102,7 +4102,8 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6096440e96ea..35847250da5a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -160,14 +160,19 @@ static ssize_t bonding_sysfs_store_option(struct device *d,
{
struct bonding *bond = to_bond(d);
const struct bond_option *opt;
+ char *buffer_clone;
int ret;
opt = bond_opt_get_by_name(attr->attr.name);
if (WARN_ON(!opt))
return -ENOENT;
- ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
+ buffer_clone = kstrndup(buffer, count, GFP_KERNEL);
+ if (!buffer_clone)
+ return -ENOMEM;
+ ret = bond_opt_tryset_rtnl(bond, opt->id, buffer_clone);
if (!ret)
ret = count;
+ kfree(buffer_clone);
return ret;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 2cb75988b328..7cdd0cead693 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -73,6 +73,12 @@ config CAN_CALC_BITTIMING
config CAN_LEDS
bool "Enable LED triggers for Netlink based drivers"
depends on LEDS_CLASS
+ # The netdev trigger (LEDS_TRIGGER_NETDEV) should be able to do
+ # everything that this driver is doing. This is marked as broken
+ # because it uses stuff that is intended to be changed or removed.
+ # Please consider switching to the netdev trigger and confirm it
+ # fulfills your needs instead of fixing this driver.
+ depends on BROKEN
select LEDS_TRIGGERS
---help---
This option adds two LED triggers for packet receive and transmit
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d4dd4da23997..da636a22c542 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
static int i82527_compat;
module_param(i82527_compat, int, 0444);
-MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 compatibility mode "
"without using additional functions");
/*
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3c71f1cb205f..49163570a63a 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -649,8 +649,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- *cf = skb_put(skb, sizeof(struct can_frame));
- memset(*cf, 0, sizeof(struct can_frame));
+ *cf = skb_put_zero(skb, sizeof(struct can_frame));
return skb;
}
@@ -678,8 +677,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- *cfd = skb_put(skb, sizeof(struct canfd_frame));
- memset(*cfd, 0, sizeof(struct canfd_frame));
+ *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
return skb;
}
@@ -703,7 +701,8 @@ EXPORT_SYMBOL_GPL(alloc_can_err_skb);
/*
* Allocate and setup space for the CAN network device
*/
-struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs)
{
struct net_device *dev;
struct can_priv *priv;
@@ -715,7 +714,8 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
else
size = sizeof_priv;
- dev = alloc_netdev(size, "can%d", NET_NAME_UNKNOWN, can_setup);
+ dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+ txqs, rxqs);
if (!dev)
return NULL;
@@ -734,7 +734,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
return dev;
}
-EXPORT_SYMBOL_GPL(alloc_candev);
+EXPORT_SYMBOL_GPL(alloc_candev_mqs);
/*
* Free space of the CAN network device
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index d53a45bf2a72..8e972ef08637 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1,24 +1,13 @@
-/*
- * flexcan.c - FLEXCAN CAN controller driver
- *
- * Copyright (c) 2005-2006 Varma Electronics Oy
- * Copyright (c) 2009 Sascha Hauer, Pengutronix
- * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
- * Copyright (c) 2014 David Jander, Protonic Holland
- *
- * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
- *
- * LICENCE:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// flexcan.c - FLEXCAN CAN controller driver
+//
+// Copyright (c) 2005-2006 Varma Electronics Oy
+// Copyright (c) 2009 Sascha Hauer, Pengutronix
+// Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2014 David Jander, Protonic Holland
+//
+// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
#include <linux/netdevice.h>
#include <linux/can.h>
@@ -523,7 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
return err;
}
-static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index adfdb66a486e..02042cb09bd2 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1684,7 +1684,7 @@ static int ican3_stop(struct net_device *ndev)
return 0;
}
-static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ican3_dev *mod = netdev_priv(ndev);
struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index ed8561d4a90f..5696d7e80751 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -486,7 +486,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
if (msg_size <= 0)
break;
- msg_ptr += msg_size;
+ msg_ptr += ALIGN(msg_size, 4);
}
if (msg_size < 0)
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 455a3797a200..c458d5fdc8d3 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -174,9 +174,6 @@ struct pciefd_page {
u32 size;
};
-#define CANFD_IRQ_SET 0x00000001
-#define CANFD_TX_PATH_SET 0x00000002
-
/* CAN-FD channel object */
struct pciefd_board;
struct pciefd_can {
@@ -418,7 +415,7 @@ static int pciefd_pre_cmd(struct peak_canfd_priv *ucan)
break;
/* going into operational mode: setup IRQ handler */
- err = request_irq(priv->board->pci_dev->irq,
+ err = request_irq(priv->ucan.ndev->irq,
pciefd_irq_handler,
IRQF_SHARED,
PCIEFD_DRV_NAME,
@@ -491,15 +488,18 @@ static int pciefd_post_cmd(struct peak_canfd_priv *ucan)
/* controller now in reset mode: */
+ /* disable IRQ for this CAN */
+ pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
+ PCIEFD_REG_CAN_RX_CTL_CLR);
+
/* stop and reset DMA addresses in Tx/Rx engines */
pciefd_can_clear_tx_dma(priv);
pciefd_can_clear_rx_dma(priv);
- /* disable IRQ for this CAN */
- pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
- PCIEFD_REG_CAN_RX_CTL_CLR);
+ /* wait for above commands to complete (read cycle) */
+ (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);
- free_irq(priv->board->pci_dev->irq, priv);
+ free_irq(priv->ucan.ndev->irq, priv);
ucan->can.state = CAN_STATE_STOPPED;
@@ -638,7 +638,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
GFP_KERNEL);
if (!priv->tx_dma_vaddr) {
dev_err(&pciefd->pci_dev->dev,
- "Tx dmaim_alloc_coherent(%u) failure\n",
+ "Tx dmam_alloc_coherent(%u) failure\n",
PCIEFD_TX_DMA_SIZE);
goto err_free_candev;
}
@@ -691,7 +691,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
pciefd->can[pciefd->can_count] = priv;
dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n",
- ndev->name, priv->reg_base, pciefd->pci_dev->irq);
+ ndev->name, priv->reg_base, ndev->irq);
return 0;
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 5adc95c922ee..a97b81d1d0da 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -608,7 +608,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
writeb(0x00, cfg_base + PITA_GPIOICR);
/* Toggle reset */
writeb(0x05, cfg_base + PITA_MISC + 3);
- mdelay(5);
+ usleep_range(5000, 6000);
/* Leave parport mux mode */
writeb(0x04, cfg_base + PITA_MISC + 3);
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 485b19c9ae47..b8c39ede7cd5 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -530,7 +530,7 @@ static int pcan_add_channels(struct pcan_pccard *card)
pcan_write_reg(card, PCC_CCR, ccr);
/* wait 2ms before unresetting channels */
- mdelay(2);
+ usleep_range(2000, 3000);
ccr &= ~PCC_CCR_RST_ALL;
pcan_write_reg(card, PCC_CCR, ccr);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 1ac2090a1721..093fc9a529f0 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -409,7 +409,7 @@ static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode)
* xx xx xx xx ff ll 00 11 22 33 44 55 66 77
* [ can_id ] [flags] [len] [can data (up to 8 bytes]
*/
-static int sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sun4ican_priv *priv = netdev_priv(dev);
struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index c36f4bdcbf4f..750d04d9e2ae 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -1,6 +1,12 @@
menu "CAN USB interfaces"
depends on USB
+config CAN_8DEV_USB
+ tristate "8 devices USB2CAN interface"
+ ---help---
+ This driver supports the USB2CAN interface
+ from 8 devices (http://www.8devices.com).
+
config CAN_EMS_USB
tristate "EMS CPC-USB/ARM7 CAN/USB interface"
---help---
@@ -26,7 +32,7 @@ config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
---help---
This driver adds support for Kvaser CAN/USB devices like Kvaser
- Leaf Light and Kvaser USBcan II.
+ Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS.
The driver provides support for the following devices:
- Kvaser Leaf Light
@@ -55,12 +61,30 @@ config CAN_KVASER_USB
- Kvaser Memorator HS/HS
- Kvaser Memorator HS/LS
- Scania VCI2 (if you have the Kvaser logo on top)
+ - Kvaser BlackBird v2
+ - Kvaser Leaf Pro HS v2
+ - Kvaser Hybrid 2xCAN/LIN
+ - Kvaser Hybrid Pro 2xCAN/LIN
+ - Kvaser Memorator 2xHS v2
+ - Kvaser Memorator Pro 2xHS v2
+ - Kvaser Memorator Pro 5xHS
+ - Kvaser USBcan Light 4xHS
+ - Kvaser USBcan Pro 2xHS v2
+ - Kvaser USBcan Pro 5xHS
+ - ATI Memorator Pro 2xHS v2
+ - ATI USBcan Pro 2xHS v2
If unsure, say N.
To compile this driver as a module, choose M here: the
module will be called kvaser_usb.
+config CAN_MCBA_USB
+ tristate "Microchip CAN BUS Analyzer interface"
+ ---help---
+ This driver supports the CAN BUS Analyzer interface
+ from Microchip (http://www.microchip.com/development-tools/).
+
config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
---help---
@@ -77,16 +101,26 @@ config CAN_PEAK_USB
(see also http://www.peak-system.com).
-config CAN_8DEV_USB
- tristate "8 devices USB2CAN interface"
- ---help---
- This driver supports the USB2CAN interface
- from 8 devices (http://www.8devices.com).
-
config CAN_MCBA_USB
tristate "Microchip CAN BUS Analyzer interface"
---help---
This driver supports the CAN BUS Analyzer interface
from Microchip (http://www.microchip.com/development-tools/).
+config CAN_UCAN
+ tristate "Theobroma Systems UCAN interface"
+ ---help---
+ This driver supports the Theobroma Systems
+ UCAN USB-CAN interface.
+
+ The UCAN driver supports the microcontroller-based USB/CAN
+ adapters from Theobroma Systems. There are two form-factors
+ that run essentially the same firmware:
+
+ * Seal: standalone USB stick
+ https://www.theobroma-systems.com/seal)
+ * Mule: integrated on the PCB of various System-on-Modules
+ from Theobroma Systems like the A31-µQ7 and the RK3399-Q7
+ (https://www.theobroma-systems.com/rk3399-q7)
+
endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 49ac7b99ba32..aa0f17c0b2ed 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -3,10 +3,11 @@
# Makefile for the Linux Controller Area Network USB drivers.
#
+obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
-obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
-obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
-obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
+obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
+obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
+obj-$(CONFIG_CAN_UCAN) += ucan.o
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
deleted file mode 100644
index daed57d3d209..000000000000
--- a/drivers/net/can/usb/kvaser_usb.c
+++ /dev/null
@@ -1,2085 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * Parts of this driver are based on the following:
- * - Kvaser linux leaf driver (version 4.78)
- * - CAN driver for esd CAN-USB/2
- * - Kvaser linux usbcanII driver (version 5.3)
- *
- * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
- * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
- * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
- * Copyright (C) 2015 Valeo S.A.
- */
-
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/usb.h>
-
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
-#define MAX_RX_URBS 4
-#define START_TIMEOUT 1000 /* msecs */
-#define STOP_TIMEOUT 1000 /* msecs */
-#define USB_SEND_TIMEOUT 1000 /* msecs */
-#define USB_RECV_TIMEOUT 1000 /* msecs */
-#define RX_BUFFER_SIZE 3072
-#define CAN_USB_CLOCK 8000000
-#define MAX_NET_DEVICES 3
-#define MAX_USBCAN_NET_DEVICES 2
-
-/* Kvaser Leaf USB devices */
-#define KVASER_VENDOR_ID 0x0bfd
-#define USB_LEAF_DEVEL_PRODUCT_ID 10
-#define USB_LEAF_LITE_PRODUCT_ID 11
-#define USB_LEAF_PRO_PRODUCT_ID 12
-#define USB_LEAF_SPRO_PRODUCT_ID 14
-#define USB_LEAF_PRO_LS_PRODUCT_ID 15
-#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
-#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
-#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
-#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
-#define USB_MEMO2_DEVEL_PRODUCT_ID 22
-#define USB_MEMO2_HSHS_PRODUCT_ID 23
-#define USB_UPRO_HSHS_PRODUCT_ID 24
-#define USB_LEAF_LITE_GI_PRODUCT_ID 25
-#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
-#define USB_MEMO2_HSLS_PRODUCT_ID 27
-#define USB_LEAF_LITE_CH_PRODUCT_ID 28
-#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
-#define USB_OEM_MERCURY_PRODUCT_ID 34
-#define USB_OEM_LEAF_PRODUCT_ID 35
-#define USB_CAN_R_PRODUCT_ID 39
-#define USB_LEAF_LITE_V2_PRODUCT_ID 288
-#define USB_MINI_PCIE_HS_PRODUCT_ID 289
-#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
-#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
-#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
-
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
-}
-
-/* Kvaser USBCan-II devices */
-#define USB_USBCAN_REVB_PRODUCT_ID 2
-#define USB_VCI2_PRODUCT_ID 3
-#define USB_USBCAN2_PRODUCT_ID 4
-#define USB_MEMORATOR_PRODUCT_ID 5
-
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
- id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
-
-/* USB devices features */
-#define KVASER_HAS_SILENT_MODE BIT(0)
-#define KVASER_HAS_TXRX_ERRORS BIT(1)
-
-/* Message header size */
-#define MSG_HEADER_LEN 2
-
-/* Can message flags */
-#define MSG_FLAG_ERROR_FRAME BIT(0)
-#define MSG_FLAG_OVERRUN BIT(1)
-#define MSG_FLAG_NERR BIT(2)
-#define MSG_FLAG_WAKEUP BIT(3)
-#define MSG_FLAG_REMOTE_FRAME BIT(4)
-#define MSG_FLAG_RESERVED BIT(5)
-#define MSG_FLAG_TX_ACK BIT(6)
-#define MSG_FLAG_TX_REQUEST BIT(7)
-
-/* Can states (M16C CxSTRH register) */
-#define M16C_STATE_BUS_RESET BIT(0)
-#define M16C_STATE_BUS_ERROR BIT(4)
-#define M16C_STATE_BUS_PASSIVE BIT(5)
-#define M16C_STATE_BUS_OFF BIT(6)
-
-/* Can msg ids */
-#define CMD_RX_STD_MESSAGE 12
-#define CMD_TX_STD_MESSAGE 13
-#define CMD_RX_EXT_MESSAGE 14
-#define CMD_TX_EXT_MESSAGE 15
-#define CMD_SET_BUS_PARAMS 16
-#define CMD_GET_BUS_PARAMS 17
-#define CMD_GET_BUS_PARAMS_REPLY 18
-#define CMD_GET_CHIP_STATE 19
-#define CMD_CHIP_STATE_EVENT 20
-#define CMD_SET_CTRL_MODE 21
-#define CMD_GET_CTRL_MODE 22
-#define CMD_GET_CTRL_MODE_REPLY 23
-#define CMD_RESET_CHIP 24
-#define CMD_RESET_CARD 25
-#define CMD_START_CHIP 26
-#define CMD_START_CHIP_REPLY 27
-#define CMD_STOP_CHIP 28
-#define CMD_STOP_CHIP_REPLY 29
-
-#define CMD_LEAF_GET_CARD_INFO2 32
-#define CMD_USBCAN_RESET_CLOCK 32
-#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
-
-#define CMD_GET_CARD_INFO 34
-#define CMD_GET_CARD_INFO_REPLY 35
-#define CMD_GET_SOFTWARE_INFO 38
-#define CMD_GET_SOFTWARE_INFO_REPLY 39
-#define CMD_ERROR_EVENT 45
-#define CMD_FLUSH_QUEUE 48
-#define CMD_RESET_ERROR_COUNTER 49
-#define CMD_TX_ACKNOWLEDGE 50
-#define CMD_CAN_ERROR_EVENT 51
-#define CMD_FLUSH_QUEUE_REPLY 68
-
-#define CMD_LEAF_USB_THROTTLE 77
-#define CMD_LEAF_LOG_MESSAGE 106
-
-/* error factors */
-#define M16C_EF_ACKE BIT(0)
-#define M16C_EF_CRCE BIT(1)
-#define M16C_EF_FORME BIT(2)
-#define M16C_EF_STFE BIT(3)
-#define M16C_EF_BITE0 BIT(4)
-#define M16C_EF_BITE1 BIT(5)
-#define M16C_EF_RCVE BIT(6)
-#define M16C_EF_TRE BIT(7)
-
-/* Only Leaf-based devices can report M16C error factors,
- * thus define our own error status flags for USBCANII
- */
-#define USBCAN_ERROR_STATE_NONE 0
-#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
-#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
-#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
-
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN 1
-#define KVASER_USB_TSEG1_MAX 16
-#define KVASER_USB_TSEG2_MIN 1
-#define KVASER_USB_TSEG2_MAX 8
-#define KVASER_USB_SJW_MAX 4
-#define KVASER_USB_BRP_MIN 1
-#define KVASER_USB_BRP_MAX 64
-#define KVASER_USB_BRP_INC 1
-
-/* ctrl modes */
-#define KVASER_CTRL_MODE_NORMAL 1
-#define KVASER_CTRL_MODE_SILENT 2
-#define KVASER_CTRL_MODE_SELFRECEPTION 3
-#define KVASER_CTRL_MODE_OFF 4
-
-/* Extended CAN identifier flag */
-#define KVASER_EXTENDED_FRAME BIT(31)
-
-/* Kvaser USB CAN dongles are divided into two major families:
- * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo'
- * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
- */
-enum kvaser_usb_family {
- KVASER_LEAF,
- KVASER_USBCAN,
-};
-
-struct kvaser_msg_simple {
- u8 tid;
- u8 channel;
-} __packed;
-
-struct kvaser_msg_cardinfo {
- u8 tid;
- u8 nchannels;
- union {
- struct {
- __le32 serial_number;
- __le32 padding;
- } __packed leaf0;
- struct {
- __le32 serial_number_low;
- __le32 serial_number_high;
- } __packed usbcan0;
- } __packed;
- __le32 clock_resolution;
- __le32 mfgdate;
- u8 ean[8];
- u8 hw_revision;
- union {
- struct {
- u8 usb_hs_mode;
- } __packed leaf1;
- struct {
- u8 padding;
- } __packed usbcan1;
- } __packed;
- __le16 padding;
-} __packed;
-
-struct kvaser_msg_cardinfo2 {
- u8 tid;
- u8 reserved;
- u8 pcb_id[24];
- __le32 oem_unlock_code;
-} __packed;
-
-struct leaf_msg_softinfo {
- u8 tid;
- u8 padding0;
- __le32 sw_options;
- __le32 fw_version;
- __le16 max_outstanding_tx;
- __le16 padding1[9];
-} __packed;
-
-struct usbcan_msg_softinfo {
- u8 tid;
- u8 fw_name[5];
- __le16 max_outstanding_tx;
- u8 padding[6];
- __le32 fw_version;
- __le16 checksum;
- __le16 sw_options;
-} __packed;
-
-struct kvaser_msg_busparams {
- u8 tid;
- u8 channel;
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 no_samp;
-} __packed;
-
-struct kvaser_msg_tx_can {
- u8 channel;
- u8 tid;
- u8 msg[14];
- union {
- struct {
- u8 padding;
- u8 flags;
- } __packed leaf;
- struct {
- u8 flags;
- u8 padding;
- } __packed usbcan;
- } __packed;
-} __packed;
-
-struct kvaser_msg_rx_can_header {
- u8 channel;
- u8 flag;
-} __packed;
-
-struct leaf_msg_rx_can {
- u8 channel;
- u8 flag;
-
- __le16 time[3];
- u8 msg[14];
-} __packed;
-
-struct usbcan_msg_rx_can {
- u8 channel;
- u8 flag;
-
- u8 msg[14];
- __le16 time;
-} __packed;
-
-struct leaf_msg_chip_state_event {
- u8 tid;
- u8 channel;
-
- __le16 time[3];
- u8 tx_errors_count;
- u8 rx_errors_count;
-
- u8 status;
- u8 padding[3];
-} __packed;
-
-struct usbcan_msg_chip_state_event {
- u8 tid;
- u8 channel;
-
- u8 tx_errors_count;
- u8 rx_errors_count;
- __le16 time;
-
- u8 status;
- u8 padding[3];
-} __packed;
-
-struct kvaser_msg_tx_acknowledge_header {
- u8 channel;
- u8 tid;
-} __packed;
-
-struct leaf_msg_tx_acknowledge {
- u8 channel;
- u8 tid;
-
- __le16 time[3];
- u8 flags;
- u8 time_offset;
-} __packed;
-
-struct usbcan_msg_tx_acknowledge {
- u8 channel;
- u8 tid;
-
- __le16 time;
- __le16 padding;
-} __packed;
-
-struct leaf_msg_error_event {
- u8 tid;
- u8 flags;
- __le16 time[3];
- u8 channel;
- u8 padding;
- u8 tx_errors_count;
- u8 rx_errors_count;
- u8 status;
- u8 error_factor;
-} __packed;
-
-struct usbcan_msg_error_event {
- u8 tid;
- u8 padding;
- u8 tx_errors_count_ch0;
- u8 rx_errors_count_ch0;
- u8 tx_errors_count_ch1;
- u8 rx_errors_count_ch1;
- u8 status_ch0;
- u8 status_ch1;
- __le16 time;
-} __packed;
-
-struct kvaser_msg_ctrl_mode {
- u8 tid;
- u8 channel;
- u8 ctrl_mode;
- u8 padding[3];
-} __packed;
-
-struct kvaser_msg_flush_queue {
- u8 tid;
- u8 channel;
- u8 flags;
- u8 padding[3];
-} __packed;
-
-struct leaf_msg_log_message {
- u8 channel;
- u8 flags;
- __le16 time[3];
- u8 dlc;
- u8 time_offset;
- __le32 id;
- u8 data[8];
-} __packed;
-
-struct kvaser_msg {
- u8 len;
- u8 id;
- union {
- struct kvaser_msg_simple simple;
- struct kvaser_msg_cardinfo cardinfo;
- struct kvaser_msg_cardinfo2 cardinfo2;
- struct kvaser_msg_busparams busparams;
-
- struct kvaser_msg_rx_can_header rx_can_header;
- struct kvaser_msg_tx_acknowledge_header tx_acknowledge_header;
-
- union {
- struct leaf_msg_softinfo softinfo;
- struct leaf_msg_rx_can rx_can;
- struct leaf_msg_chip_state_event chip_state_event;
- struct leaf_msg_tx_acknowledge tx_acknowledge;
- struct leaf_msg_error_event error_event;
- struct leaf_msg_log_message log_message;
- } __packed leaf;
-
- union {
- struct usbcan_msg_softinfo softinfo;
- struct usbcan_msg_rx_can rx_can;
- struct usbcan_msg_chip_state_event chip_state_event;
- struct usbcan_msg_tx_acknowledge tx_acknowledge;
- struct usbcan_msg_error_event error_event;
- } __packed usbcan;
-
- struct kvaser_msg_tx_can tx_can;
- struct kvaser_msg_ctrl_mode ctrl_mode;
- struct kvaser_msg_flush_queue flush_queue;
- } u;
-} __packed;
-
-/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
- * handling. Some discrepancies between the two families exist:
- *
- * - USBCAN firmware does not report M16C "error factors"
- * - USBCAN controllers has difficulties reporting if the raised error
- * event is for ch0 or ch1. They leave such arbitration to the OS
- * driver by letting it compare error counters with previous values
- * and decide the error event's channel. Thus for USBCAN, the channel
- * field is only advisory.
- */
-struct kvaser_usb_error_summary {
- u8 channel, status, txerr, rxerr;
- union {
- struct {
- u8 error_factor;
- } leaf;
- struct {
- u8 other_ch_status;
- u8 error_state;
- } usbcan;
- };
-};
-
-/* Context for an outstanding, not yet ACKed, transmission */
-struct kvaser_usb_tx_urb_context {
- struct kvaser_usb_net_priv *priv;
- u32 echo_index;
- int dlc;
-};
-
-struct kvaser_usb {
- struct usb_device *udev;
- struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES];
-
- struct usb_endpoint_descriptor *bulk_in, *bulk_out;
- struct usb_anchor rx_submitted;
-
- /* @max_tx_urbs: Firmware-reported maximum number of outstanding,
- * not yet ACKed, transmissions on this device. This value is
- * also used as a sentinel for marking free tx contexts.
- */
- u32 fw_version;
- unsigned int nchannels;
- unsigned int max_tx_urbs;
- enum kvaser_usb_family family;
-
- bool rxinitdone;
- void *rxbuf[MAX_RX_URBS];
- dma_addr_t rxbuf_dma[MAX_RX_URBS];
-};
-
-struct kvaser_usb_net_priv {
- struct can_priv can;
- struct can_berr_counter bec;
-
- struct kvaser_usb *dev;
- struct net_device *netdev;
- int channel;
-
- struct completion start_comp, stop_comp;
- struct usb_anchor tx_submitted;
-
- spinlock_t tx_contexts_lock;
- int active_tx_contexts;
- struct kvaser_usb_tx_urb_context tx_contexts[];
-};
-
-static const struct usb_device_id kvaser_usb_table[] = {
- /* Leaf family IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
-
- /* USBCANII family IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
-
- { }
-};
-MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
-
-static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev,
- struct kvaser_msg *msg)
-{
- int actual_len;
-
- return usb_bulk_msg(dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->bulk_out->bEndpointAddress),
- msg, msg->len, &actual_len,
- USB_SEND_TIMEOUT);
-}
-
-static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
- struct kvaser_msg *msg)
-{
- struct kvaser_msg *tmp;
- void *buf;
- int actual_len;
- int err;
- int pos;
- unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
-
- buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- do {
- err = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE, &actual_len,
- USB_RECV_TIMEOUT);
- if (err < 0)
- goto end;
-
- pos = 0;
- while (pos <= actual_len - MSG_HEADER_LEN) {
- tmp = buf + pos;
-
- /* Handle messages crossing the USB endpoint max packet
- * size boundary. Check kvaser_usb_read_bulk_callback()
- * for further details.
- */
- if (tmp->len == 0) {
- pos = round_up(pos, le16_to_cpu(dev->bulk_in->
- wMaxPacketSize));
- continue;
- }
-
- if (pos + tmp->len > actual_len) {
- dev_err_ratelimited(dev->udev->dev.parent,
- "Format error\n");
- break;
- }
-
- if (tmp->id == id) {
- memcpy(msg, tmp, tmp->len);
- goto end;
- }
-
- pos += tmp->len;
- }
- } while (time_before(jiffies, to));
-
- err = -EINVAL;
-
-end:
- kfree(buf);
-
- return err;
-}
-
-static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev,
- u8 msg_id, int channel)
-{
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = msg_id;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
- msg->u.simple.channel = channel;
- msg->u.simple.tid = 0xff;
-
- rc = kvaser_usb_send_msg(dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
-{
- struct kvaser_msg msg;
- int err;
-
- err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0);
- if (err)
- return err;
-
- err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg);
- if (err)
- return err;
-
- switch (dev->family) {
- case KVASER_LEAF:
- dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
- dev->max_tx_urbs =
- le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
- break;
- case KVASER_USBCAN:
- dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
- dev->max_tx_urbs =
- le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
- break;
- }
-
- return 0;
-}
-
-static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
-{
- struct kvaser_msg msg;
- int err;
-
- err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0);
- if (err)
- return err;
-
- err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg);
- if (err)
- return err;
-
- dev->nchannels = msg.u.cardinfo.nchannels;
- if ((dev->nchannels > MAX_NET_DEVICES) ||
- (dev->family == KVASER_USBCAN &&
- dev->nchannels > MAX_USBCAN_NET_DEVICES))
- return -EINVAL;
-
- return 0;
-}
-
-static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct net_device_stats *stats;
- struct kvaser_usb_tx_urb_context *context;
- struct kvaser_usb_net_priv *priv;
- struct sk_buff *skb;
- struct can_frame *cf;
- unsigned long flags;
- u8 channel, tid;
-
- channel = msg->u.tx_acknowledge_header.channel;
- tid = msg->u.tx_acknowledge_header.tid;
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
-
- if (!netif_device_present(priv->netdev))
- return;
-
- stats = &priv->netdev->stats;
-
- context = &priv->tx_contexts[tid % dev->max_tx_urbs];
-
- /* Sometimes the state change doesn't come after a bus-off event */
- if (priv->can.restart_ms &&
- (priv->can.state >= CAN_STATE_BUS_OFF)) {
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb) {
- cf->can_id |= CAN_ERR_RESTARTED;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
- } else {
- netdev_err(priv->netdev,
- "No memory left for err_skb\n");
- }
-
- priv->can.can_stats.restarts++;
- netif_carrier_on(priv->netdev);
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
- }
-
- stats->tx_packets++;
- stats->tx_bytes += context->dlc;
-
- spin_lock_irqsave(&priv->tx_contexts_lock, flags);
-
- can_get_echo_skb(priv->netdev, context->echo_index);
- context->echo_index = dev->max_tx_urbs;
- --priv->active_tx_contexts;
- netif_wake_queue(priv->netdev);
-
- spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
-}
-
-static void kvaser_usb_simple_msg_callback(struct urb *urb)
-{
- struct net_device *netdev = urb->context;
-
- kfree(urb->transfer_buffer);
-
- if (urb->status)
- netdev_warn(netdev, "urb status received: %d\n",
- urb->status);
-}
-
-static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
- u8 msg_id)
-{
- struct kvaser_usb *dev = priv->dev;
- struct net_device *netdev = priv->netdev;
- struct kvaser_msg *msg;
- struct urb *urb;
- void *buf;
- int err;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
- if (!buf) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- msg = (struct kvaser_msg *)buf;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
- msg->id = msg_id;
- msg->u.simple.channel = priv->channel;
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->bulk_out->bEndpointAddress),
- buf, msg->len,
- kvaser_usb_simple_msg_callback, netdev);
- usb_anchor_urb(urb, &priv->tx_submitted);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- netdev_err(netdev, "Error transmitting URB\n");
- usb_unanchor_urb(urb);
- kfree(buf);
- usb_free_urb(urb);
- return err;
- }
-
- usb_free_urb(urb);
-
- return 0;
-}
-
-static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
- const struct kvaser_usb_error_summary *es,
- struct can_frame *cf)
-{
- struct kvaser_usb *dev = priv->dev;
- struct net_device_stats *stats = &priv->netdev->stats;
- enum can_state cur_state, new_state, tx_state, rx_state;
-
- netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
-
- new_state = cur_state = priv->can.state;
-
- if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET))
- new_state = CAN_STATE_BUS_OFF;
- else if (es->status & M16C_STATE_BUS_PASSIVE)
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if (es->status & M16C_STATE_BUS_ERROR) {
- /* Guard against spurious error events after a busoff */
- if (cur_state < CAN_STATE_BUS_OFF) {
- if ((es->txerr >= 128) || (es->rxerr >= 128))
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if ((es->txerr >= 96) || (es->rxerr >= 96))
- new_state = CAN_STATE_ERROR_WARNING;
- else if (cur_state > CAN_STATE_ERROR_ACTIVE)
- new_state = CAN_STATE_ERROR_ACTIVE;
- }
- }
-
- if (!es->status)
- new_state = CAN_STATE_ERROR_ACTIVE;
-
- if (new_state != cur_state) {
- tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
- rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
-
- can_change_state(priv->netdev, cf, tx_state, rx_state);
- }
-
- if (priv->can.restart_ms &&
- (cur_state >= CAN_STATE_BUS_OFF) &&
- (new_state < CAN_STATE_BUS_OFF)) {
- priv->can.can_stats.restarts++;
- }
-
- switch (dev->family) {
- case KVASER_LEAF:
- if (es->leaf.error_factor) {
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- }
- break;
- case KVASER_USBCAN:
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
- stats->tx_errors++;
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
- stats->rx_errors++;
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
- priv->can.can_stats.bus_error++;
- }
- break;
- }
-
- priv->bec.txerr = es->txerr;
- priv->bec.rxerr = es->rxerr;
-}
-
-static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_usb_error_summary *es)
-{
- struct can_frame *cf, tmp_cf = { .can_id = CAN_ERR_FLAG, .can_dlc = CAN_ERR_DLC };
- struct sk_buff *skb;
- struct net_device_stats *stats;
- struct kvaser_usb_net_priv *priv;
- enum can_state old_state, new_state;
-
- if (es->channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", es->channel);
- return;
- }
-
- priv = dev->nets[es->channel];
- stats = &priv->netdev->stats;
-
- /* Update all of the can interface's state and error counters before
- * trying any memory allocation that can actually fail with -ENOMEM.
- *
- * We send a temporary stack-allocated error can frame to
- * can_change_state() for the very same reason.
- *
- * TODO: Split can_change_state() responsibility between updating the
- * can interface's state and counters, and the setting up of can error
- * frame ID and data to userspace. Remove stack allocation afterwards.
- */
- old_state = priv->can.state;
- kvaser_usb_rx_error_update_can_state(priv, es, &tmp_cf);
- new_state = priv->can.state;
-
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
- memcpy(cf, &tmp_cf, sizeof(*cf));
-
- if (new_state != old_state) {
- if (es->status &
- (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
- if (!priv->can.restart_ms)
- kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
- netif_carrier_off(priv->netdev);
- }
-
- if (priv->can.restart_ms &&
- (old_state >= CAN_STATE_BUS_OFF) &&
- (new_state < CAN_STATE_BUS_OFF)) {
- cf->can_id |= CAN_ERR_RESTARTED;
- netif_carrier_on(priv->netdev);
- }
- }
-
- switch (dev->family) {
- case KVASER_LEAF:
- if (es->leaf.error_factor) {
- cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
-
- if (es->leaf.error_factor & M16C_EF_ACKE)
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
- if (es->leaf.error_factor & M16C_EF_CRCE)
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
- if (es->leaf.error_factor & M16C_EF_FORME)
- cf->data[2] |= CAN_ERR_PROT_FORM;
- if (es->leaf.error_factor & M16C_EF_STFE)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- if (es->leaf.error_factor & M16C_EF_BITE0)
- cf->data[2] |= CAN_ERR_PROT_BIT0;
- if (es->leaf.error_factor & M16C_EF_BITE1)
- cf->data[2] |= CAN_ERR_PROT_BIT1;
- if (es->leaf.error_factor & M16C_EF_TRE)
- cf->data[2] |= CAN_ERR_PROT_TX;
- }
- break;
- case KVASER_USBCAN:
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
- cf->can_id |= CAN_ERR_BUSERROR;
- }
- break;
- }
-
- cf->data[6] = es->txerr;
- cf->data[7] = es->rxerr;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
-}
-
-/* For USBCAN, report error to userspace iff the channels's errors counter
- * has changed, or we're the only channel seeing a bus error state.
- */
-static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
- struct kvaser_usb_error_summary *es)
-{
- struct kvaser_usb_net_priv *priv;
- int channel;
- bool report_error;
-
- channel = es->channel;
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
- report_error = false;
-
- if (es->txerr != priv->bec.txerr) {
- es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
- report_error = true;
- }
- if (es->rxerr != priv->bec.rxerr) {
- es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
- report_error = true;
- }
- if ((es->status & M16C_STATE_BUS_ERROR) &&
- !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
- es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
- report_error = true;
- }
-
- if (report_error)
- kvaser_usb_rx_error(dev, es);
-}
-
-static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_error_summary es = { };
-
- switch (msg->id) {
- /* Sometimes errors are sent as unsolicited chip state events */
- case CMD_CHIP_STATE_EVENT:
- es.channel = msg->u.usbcan.chip_state_event.channel;
- es.status = msg->u.usbcan.chip_state_event.status;
- es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count;
- es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count;
- kvaser_usbcan_conditionally_rx_error(dev, &es);
- break;
-
- case CMD_CAN_ERROR_EVENT:
- es.channel = 0;
- es.status = msg->u.usbcan.error_event.status_ch0;
- es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0;
- es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0;
- es.usbcan.other_ch_status =
- msg->u.usbcan.error_event.status_ch1;
- kvaser_usbcan_conditionally_rx_error(dev, &es);
-
- /* The USBCAN firmware supports up to 2 channels.
- * Now that ch0 was checked, check if ch1 has any errors.
- */
- if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
- es.channel = 1;
- es.status = msg->u.usbcan.error_event.status_ch1;
- es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1;
- es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1;
- es.usbcan.other_ch_status =
- msg->u.usbcan.error_event.status_ch0;
- kvaser_usbcan_conditionally_rx_error(dev, &es);
- }
- break;
-
- default:
- dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
- msg->id);
- }
-}
-
-static void kvaser_leaf_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_error_summary es = { };
-
- switch (msg->id) {
- case CMD_CAN_ERROR_EVENT:
- es.channel = msg->u.leaf.error_event.channel;
- es.status = msg->u.leaf.error_event.status;
- es.txerr = msg->u.leaf.error_event.tx_errors_count;
- es.rxerr = msg->u.leaf.error_event.rx_errors_count;
- es.leaf.error_factor = msg->u.leaf.error_event.error_factor;
- break;
- case CMD_LEAF_LOG_MESSAGE:
- es.channel = msg->u.leaf.log_message.channel;
- es.status = msg->u.leaf.log_message.data[0];
- es.txerr = msg->u.leaf.log_message.data[2];
- es.rxerr = msg->u.leaf.log_message.data[3];
- es.leaf.error_factor = msg->u.leaf.log_message.data[1];
- break;
- case CMD_CHIP_STATE_EVENT:
- es.channel = msg->u.leaf.chip_state_event.channel;
- es.status = msg->u.leaf.chip_state_event.status;
- es.txerr = msg->u.leaf.chip_state_event.tx_errors_count;
- es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count;
- es.leaf.error_factor = 0;
- break;
- default:
- dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
- msg->id);
- return;
- }
-
- kvaser_usb_rx_error(dev, &es);
-}
-
-static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
- const struct kvaser_msg *msg)
-{
- struct can_frame *cf;
- struct sk_buff *skb;
- struct net_device_stats *stats = &priv->netdev->stats;
-
- if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
- MSG_FLAG_NERR)) {
- netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
- msg->u.rx_can_header.flag);
-
- stats->rx_errors++;
- return;
- }
-
- if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) {
- stats->rx_over_errors++;
- stats->rx_errors++;
-
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
- }
-}
-
-static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_net_priv *priv;
- struct can_frame *cf;
- struct sk_buff *skb;
- struct net_device_stats *stats;
- u8 channel = msg->u.rx_can_header.channel;
- const u8 *rx_msg = NULL; /* GCC */
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
- stats = &priv->netdev->stats;
-
- if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
- (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) {
- kvaser_leaf_rx_error(dev, msg);
- return;
- } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
- MSG_FLAG_NERR |
- MSG_FLAG_OVERRUN)) {
- kvaser_usb_rx_can_err(priv, msg);
- return;
- } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
- netdev_warn(priv->netdev,
- "Unhandled frame (flags: 0x%02x)",
- msg->u.rx_can_header.flag);
- return;
- }
-
- switch (dev->family) {
- case KVASER_LEAF:
- rx_msg = msg->u.leaf.rx_can.msg;
- break;
- case KVASER_USBCAN:
- rx_msg = msg->u.usbcan.rx_can.msg;
- break;
- }
-
- skb = alloc_can_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
-
- if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) {
- cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id);
- if (cf->can_id & KVASER_EXTENDED_FRAME)
- cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
- else
- cf->can_id &= CAN_SFF_MASK;
-
- cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc);
-
- if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
- cf->can_id |= CAN_RTR_FLAG;
- else
- memcpy(cf->data, &msg->u.leaf.log_message.data,
- cf->can_dlc);
- } else {
- cf->can_id = ((rx_msg[0] & 0x1f) << 6) | (rx_msg[1] & 0x3f);
-
- if (msg->id == CMD_RX_EXT_MESSAGE) {
- cf->can_id <<= 18;
- cf->can_id |= ((rx_msg[2] & 0x0f) << 14) |
- ((rx_msg[3] & 0xff) << 6) |
- (rx_msg[4] & 0x3f);
- cf->can_id |= CAN_EFF_FLAG;
- }
-
- cf->can_dlc = get_can_dlc(rx_msg[5]);
-
- if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
- cf->can_id |= CAN_RTR_FLAG;
- else
- memcpy(cf->data, &rx_msg[6],
- cf->can_dlc);
- }
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
-}
-
-static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_net_priv *priv;
- u8 channel = msg->u.simple.channel;
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
-
- if (completion_done(&priv->start_comp) &&
- netif_queue_stopped(priv->netdev)) {
- netif_wake_queue(priv->netdev);
- } else {
- netif_start_queue(priv->netdev);
- complete(&priv->start_comp);
- }
-}
-
-static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_net_priv *priv;
- u8 channel = msg->u.simple.channel;
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
-
- complete(&priv->stop_comp);
-}
-
-static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- switch (msg->id) {
- case CMD_START_CHIP_REPLY:
- kvaser_usb_start_chip_reply(dev, msg);
- break;
-
- case CMD_STOP_CHIP_REPLY:
- kvaser_usb_stop_chip_reply(dev, msg);
- break;
-
- case CMD_RX_STD_MESSAGE:
- case CMD_RX_EXT_MESSAGE:
- kvaser_usb_rx_can_msg(dev, msg);
- break;
-
- case CMD_LEAF_LOG_MESSAGE:
- if (dev->family != KVASER_LEAF)
- goto warn;
- kvaser_usb_rx_can_msg(dev, msg);
- break;
-
- case CMD_CHIP_STATE_EVENT:
- case CMD_CAN_ERROR_EVENT:
- if (dev->family == KVASER_LEAF)
- kvaser_leaf_rx_error(dev, msg);
- else
- kvaser_usbcan_rx_error(dev, msg);
- break;
-
- case CMD_TX_ACKNOWLEDGE:
- kvaser_usb_tx_acknowledge(dev, msg);
- break;
-
- /* Ignored messages */
- case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
- if (dev->family != KVASER_USBCAN)
- goto warn;
- break;
-
- case CMD_FLUSH_QUEUE_REPLY:
- if (dev->family != KVASER_LEAF)
- goto warn;
- break;
-
- default:
-warn: dev_warn(dev->udev->dev.parent,
- "Unhandled message (%d)\n", msg->id);
- break;
- }
-}
-
-static void kvaser_usb_read_bulk_callback(struct urb *urb)
-{
- struct kvaser_usb *dev = urb->context;
- struct kvaser_msg *msg;
- int pos = 0;
- int err, i;
-
- switch (urb->status) {
- case 0:
- break;
- case -ENOENT:
- case -EPIPE:
- case -EPROTO:
- case -ESHUTDOWN:
- return;
- default:
- dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n",
- urb->status);
- goto resubmit_urb;
- }
-
- while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
- msg = urb->transfer_buffer + pos;
-
- /* The Kvaser firmware can only read and write messages that
- * does not cross the USB's endpoint wMaxPacketSize boundary.
- * If a follow-up command crosses such boundary, firmware puts
- * a placeholder zero-length command in its place then aligns
- * the real command to the next max packet size.
- *
- * Handle such cases or we're going to miss a significant
- * number of events in case of a heavy rx load on the bus.
- */
- if (msg->len == 0) {
- pos = round_up(pos, le16_to_cpu(dev->bulk_in->
- wMaxPacketSize));
- continue;
- }
-
- if (pos + msg->len > urb->actual_length) {
- dev_err_ratelimited(dev->udev->dev.parent,
- "Format error\n");
- break;
- }
-
- kvaser_usb_handle_message(dev, msg);
- pos += msg->len;
- }
-
-resubmit_urb:
- usb_fill_bulk_urb(urb, dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- urb->transfer_buffer, RX_BUFFER_SIZE,
- kvaser_usb_read_bulk_callback, dev);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err == -ENODEV) {
- for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
- continue;
-
- netif_device_detach(dev->nets[i]->netdev);
- }
- } else if (err) {
- dev_err(dev->udev->dev.parent,
- "Failed resubmitting read bulk urb: %d\n", err);
- }
-
- return;
-}
-
-static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
-{
- int i, err = 0;
-
- if (dev->rxinitdone)
- return 0;
-
- for (i = 0; i < MAX_RX_URBS; i++) {
- struct urb *urb = NULL;
- u8 *buf = NULL;
- dma_addr_t buf_dma;
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- err = -ENOMEM;
- break;
- }
-
- buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE,
- GFP_KERNEL, &buf_dma);
- if (!buf) {
- dev_warn(dev->udev->dev.parent,
- "No memory left for USB buffer\n");
- usb_free_urb(urb);
- err = -ENOMEM;
- break;
- }
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE,
- kvaser_usb_read_bulk_callback,
- dev);
- urb->transfer_dma = buf_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- usb_anchor_urb(urb, &dev->rx_submitted);
-
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err) {
- usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
- buf_dma);
- usb_free_urb(urb);
- break;
- }
-
- dev->rxbuf[i] = buf;
- dev->rxbuf_dma[i] = buf_dma;
-
- usb_free_urb(urb);
- }
-
- if (i == 0) {
- dev_warn(dev->udev->dev.parent,
- "Cannot setup read URBs, error %d\n", err);
- return err;
- } else if (i < MAX_RX_URBS) {
- dev_warn(dev->udev->dev.parent,
- "RX performances may be slow\n");
- }
-
- dev->rxinitdone = true;
-
- return 0;
-}
-
-static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv)
-{
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = CMD_SET_CTRL_MODE;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode);
- msg->u.ctrl_mode.tid = 0xff;
- msg->u.ctrl_mode.channel = priv->channel;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
- else
- msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
-
- rc = kvaser_usb_send_msg(priv->dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv)
-{
- int err;
-
- init_completion(&priv->start_comp);
-
- err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP,
- priv->channel);
- if (err)
- return err;
-
- if (!wait_for_completion_timeout(&priv->start_comp,
- msecs_to_jiffies(START_TIMEOUT)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int kvaser_usb_open(struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct kvaser_usb *dev = priv->dev;
- int err;
-
- err = open_candev(netdev);
- if (err)
- return err;
-
- err = kvaser_usb_setup_rx_urbs(dev);
- if (err)
- goto error;
-
- err = kvaser_usb_set_opt_mode(priv);
- if (err)
- goto error;
-
- err = kvaser_usb_start_chip(priv);
- if (err) {
- netdev_warn(netdev, "Cannot start device, error %d\n", err);
- goto error;
- }
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- return 0;
-
-error:
- close_candev(netdev);
- return err;
-}
-
-static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
-{
- int i, max_tx_urbs;
-
- max_tx_urbs = priv->dev->max_tx_urbs;
-
- priv->active_tx_contexts = 0;
- for (i = 0; i < max_tx_urbs; i++)
- priv->tx_contexts[i].echo_index = max_tx_urbs;
-}
-
-/* This method might sleep. Do not call it in the atomic context
- * of URB completions.
- */
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
-{
- usb_kill_anchored_urbs(&priv->tx_submitted);
- kvaser_usb_reset_tx_urb_contexts(priv);
-}
-
-static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
-{
- int i;
-
- usb_kill_anchored_urbs(&dev->rx_submitted);
-
- for (i = 0; i < MAX_RX_URBS; i++)
- usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
- dev->rxbuf[i],
- dev->rxbuf_dma[i]);
-
- for (i = 0; i < dev->nchannels; i++) {
- struct kvaser_usb_net_priv *priv = dev->nets[i];
-
- if (priv)
- kvaser_usb_unlink_tx_urbs(priv);
- }
-}
-
-static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv)
-{
- int err;
-
- init_completion(&priv->stop_comp);
-
- err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP,
- priv->channel);
- if (err)
- return err;
-
- if (!wait_for_completion_timeout(&priv->stop_comp,
- msecs_to_jiffies(STOP_TIMEOUT)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv)
-{
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = CMD_FLUSH_QUEUE;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue);
- msg->u.flush_queue.channel = priv->channel;
- msg->u.flush_queue.flags = 0x00;
-
- rc = kvaser_usb_send_msg(priv->dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_close(struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct kvaser_usb *dev = priv->dev;
- int err;
-
- netif_stop_queue(netdev);
-
- err = kvaser_usb_flush_queue(priv);
- if (err)
- netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
-
- err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
- if (err)
- netdev_warn(netdev, "Cannot reset card, error %d\n", err);
-
- err = kvaser_usb_stop_chip(priv);
- if (err)
- netdev_warn(netdev, "Cannot stop device, error %d\n", err);
-
- /* reset tx contexts */
- kvaser_usb_unlink_tx_urbs(priv);
-
- priv->can.state = CAN_STATE_STOPPED;
- close_candev(priv->netdev);
-
- return 0;
-}
-
-static void kvaser_usb_write_bulk_callback(struct urb *urb)
-{
- struct kvaser_usb_tx_urb_context *context = urb->context;
- struct kvaser_usb_net_priv *priv;
- struct net_device *netdev;
-
- if (WARN_ON(!context))
- return;
-
- priv = context->priv;
- netdev = priv->netdev;
-
- kfree(urb->transfer_buffer);
-
- if (!netif_device_present(netdev))
- return;
-
- if (urb->status)
- netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
-}
-
-static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct kvaser_usb *dev = priv->dev;
- struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
- struct kvaser_usb_tx_urb_context *context = NULL;
- struct urb *urb;
- void *buf;
- struct kvaser_msg *msg;
- int i, err, ret = NETDEV_TX_OK;
- u8 *msg_tx_can_flags = NULL; /* GCC */
- unsigned long flags;
-
- if (can_dropped_invalid_skb(netdev, skb))
- return NETDEV_TX_OK;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
- if (!buf) {
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- goto freeurb;
- }
-
- msg = buf;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
- msg->u.tx_can.channel = priv->channel;
-
- switch (dev->family) {
- case KVASER_LEAF:
- msg_tx_can_flags = &msg->u.tx_can.leaf.flags;
- break;
- case KVASER_USBCAN:
- msg_tx_can_flags = &msg->u.tx_can.usbcan.flags;
- break;
- }
-
- *msg_tx_can_flags = 0;
-
- if (cf->can_id & CAN_EFF_FLAG) {
- msg->id = CMD_TX_EXT_MESSAGE;
- msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
- msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f;
- msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f;
- msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff;
- msg->u.tx_can.msg[4] = cf->can_id & 0x3f;
- } else {
- msg->id = CMD_TX_STD_MESSAGE;
- msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f;
- msg->u.tx_can.msg[1] = cf->can_id & 0x3f;
- }
-
- msg->u.tx_can.msg[5] = cf->can_dlc;
- memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
-
- if (cf->can_id & CAN_RTR_FLAG)
- *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
-
- spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- for (i = 0; i < dev->max_tx_urbs; i++) {
- if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
- context = &priv->tx_contexts[i];
-
- context->echo_index = i;
- can_put_echo_skb(skb, netdev, context->echo_index);
- ++priv->active_tx_contexts;
- if (priv->active_tx_contexts >= dev->max_tx_urbs)
- netif_stop_queue(netdev);
-
- break;
- }
- }
- spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
-
- /* This should never happen; it implies a flow control bug */
- if (!context) {
- netdev_warn(netdev, "cannot find free context\n");
-
- kfree(buf);
- ret = NETDEV_TX_BUSY;
- goto freeurb;
- }
-
- context->priv = priv;
- context->dlc = cf->can_dlc;
-
- msg->u.tx_can.tid = context->echo_index;
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->bulk_out->bEndpointAddress),
- buf, msg->len,
- kvaser_usb_write_bulk_callback, context);
- usb_anchor_urb(urb, &priv->tx_submitted);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err)) {
- spin_lock_irqsave(&priv->tx_contexts_lock, flags);
-
- can_free_echo_skb(netdev, context->echo_index);
- context->echo_index = dev->max_tx_urbs;
- --priv->active_tx_contexts;
- netif_wake_queue(netdev);
-
- spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
-
- usb_unanchor_urb(urb);
- kfree(buf);
-
- stats->tx_dropped++;
-
- if (err == -ENODEV)
- netif_device_detach(netdev);
- else
- netdev_warn(netdev, "Failed tx_urb %d\n", err);
-
- goto freeurb;
- }
-
- ret = NETDEV_TX_OK;
-
-freeurb:
- usb_free_urb(urb);
- return ret;
-}
-
-static const struct net_device_ops kvaser_usb_netdev_ops = {
- .ndo_open = kvaser_usb_open,
- .ndo_stop = kvaser_usb_close,
- .ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct can_bittiming_const kvaser_usb_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
-};
-
-static int kvaser_usb_set_bittiming(struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
- struct kvaser_usb *dev = priv->dev;
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = CMD_SET_BUS_PARAMS;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams);
- msg->u.busparams.channel = priv->channel;
- msg->u.busparams.tid = 0xff;
- msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
- msg->u.busparams.sjw = bt->sjw;
- msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
- msg->u.busparams.tseg2 = bt->phase_seg2;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- msg->u.busparams.no_samp = 3;
- else
- msg->u.busparams.no_samp = 1;
-
- rc = kvaser_usb_send_msg(dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_set_mode(struct net_device *netdev,
- enum can_mode mode)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- int err;
-
- switch (mode) {
- case CAN_MODE_START:
- err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP);
- if (err)
- return err;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int kvaser_usb_get_berr_counter(const struct net_device *netdev,
- struct can_berr_counter *bec)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
-
- *bec = priv->bec;
-
- return 0;
-}
-
-static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
-{
- int i;
-
- for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
- continue;
-
- unregister_candev(dev->nets[i]->netdev);
- }
-
- kvaser_usb_unlink_all_urbs(dev);
-
- for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
- continue;
-
- free_candev(dev->nets[i]->netdev);
- }
-}
-
-static int kvaser_usb_init_one(struct usb_interface *intf,
- const struct usb_device_id *id, int channel)
-{
- struct kvaser_usb *dev = usb_get_intfdata(intf);
- struct net_device *netdev;
- struct kvaser_usb_net_priv *priv;
- int err;
-
- err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
- if (err)
- return err;
-
- netdev = alloc_candev(sizeof(*priv) +
- dev->max_tx_urbs * sizeof(*priv->tx_contexts),
- dev->max_tx_urbs);
- if (!netdev) {
- dev_err(&intf->dev, "Cannot alloc candev\n");
- return -ENOMEM;
- }
-
- priv = netdev_priv(netdev);
-
- init_usb_anchor(&priv->tx_submitted);
- init_completion(&priv->start_comp);
- init_completion(&priv->stop_comp);
-
- priv->dev = dev;
- priv->netdev = netdev;
- priv->channel = channel;
-
- spin_lock_init(&priv->tx_contexts_lock);
- kvaser_usb_reset_tx_urb_contexts(priv);
-
- priv->can.state = CAN_STATE_STOPPED;
- priv->can.clock.freq = CAN_USB_CLOCK;
- priv->can.bittiming_const = &kvaser_usb_bittiming_const;
- priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
- priv->can.do_set_mode = kvaser_usb_set_mode;
- if (id->driver_info & KVASER_HAS_TXRX_ERRORS)
- priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
- if (id->driver_info & KVASER_HAS_SILENT_MODE)
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
-
- netdev->flags |= IFF_ECHO;
-
- netdev->netdev_ops = &kvaser_usb_netdev_ops;
-
- SET_NETDEV_DEV(netdev, &intf->dev);
- netdev->dev_id = channel;
-
- dev->nets[channel] = priv;
-
- err = register_candev(netdev);
- if (err) {
- dev_err(&intf->dev, "Failed to register can device\n");
- free_candev(netdev);
- dev->nets[channel] = NULL;
- return err;
- }
-
- netdev_dbg(netdev, "device registered\n");
-
- return 0;
-}
-
-static int kvaser_usb_get_endpoints(const struct usb_interface *intf,
- struct usb_endpoint_descriptor **in,
- struct usb_endpoint_descriptor **out)
-{
- const struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- int i;
-
- iface_desc = &intf->altsetting[0];
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (!*in && usb_endpoint_is_bulk_in(endpoint))
- *in = endpoint;
-
- if (!*out && usb_endpoint_is_bulk_out(endpoint))
- *out = endpoint;
-
- /* use first bulk endpoint for in and out */
- if (*in && *out)
- return 0;
- }
-
- return -ENODEV;
-}
-
-static int kvaser_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct kvaser_usb *dev;
- int err = -ENOMEM;
- int i, retry = 3;
-
- dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- if (kvaser_is_leaf(id)) {
- dev->family = KVASER_LEAF;
- } else if (kvaser_is_usbcan(id)) {
- dev->family = KVASER_USBCAN;
- } else {
- dev_err(&intf->dev,
- "Product ID (%d) does not belong to any known Kvaser USB family",
- id->idProduct);
- return -ENODEV;
- }
-
- err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
- if (err) {
- dev_err(&intf->dev, "Cannot get usb endpoint(s)");
- return err;
- }
-
- dev->udev = interface_to_usbdev(intf);
-
- init_usb_anchor(&dev->rx_submitted);
-
- usb_set_intfdata(intf, dev);
-
- /* On some x86 laptops, plugging a Kvaser device again after
- * an unplug makes the firmware always ignore the very first
- * command. For such a case, provide some room for retries
- * instead of completely exiting the driver.
- */
- do {
- err = kvaser_usb_get_software_info(dev);
- } while (--retry && err == -ETIMEDOUT);
-
- if (err) {
- dev_err(&intf->dev,
- "Cannot get software infos, error %d\n", err);
- return err;
- }
-
- dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
- ((dev->fw_version >> 24) & 0xff),
- ((dev->fw_version >> 16) & 0xff),
- (dev->fw_version & 0xffff));
-
- dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
-
- err = kvaser_usb_get_card_info(dev);
- if (err) {
- dev_err(&intf->dev,
- "Cannot get card infos, error %d\n", err);
- return err;
- }
-
- for (i = 0; i < dev->nchannels; i++) {
- err = kvaser_usb_init_one(intf, id, i);
- if (err) {
- kvaser_usb_remove_interfaces(dev);
- return err;
- }
- }
-
- return 0;
-}
-
-static void kvaser_usb_disconnect(struct usb_interface *intf)
-{
- struct kvaser_usb *dev = usb_get_intfdata(intf);
-
- usb_set_intfdata(intf, NULL);
-
- if (!dev)
- return;
-
- kvaser_usb_remove_interfaces(dev);
-}
-
-static struct usb_driver kvaser_usb_driver = {
- .name = "kvaser_usb",
- .probe = kvaser_usb_probe,
- .disconnect = kvaser_usb_disconnect,
- .id_table = kvaser_usb_table,
-};
-
-module_usb_driver(kvaser_usb_driver);
-
-MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
-MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
new file mode 100644
index 000000000000..9f41ddab6a5a
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
+kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
new file mode 100644
index 000000000000..390b6bde883c
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
+ * - Kvaser linux mhydra driver (version 5.24)
+ *
+ * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
+ */
+
+#ifndef KVASER_USB_H
+#define KVASER_USB_H
+
+/* Kvaser USB CAN dongles are divided into three major platforms:
+ * - Hydra: Running firmware labeled as 'mhydra'
+ * - Leaf: Based on Renesas M32C or Freescale i.MX28, running firmware labeled
+ * as 'filo'
+ * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
+ */
+
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#define KVASER_USB_MAX_RX_URBS 4
+#define KVASER_USB_MAX_TX_URBS 128
+#define KVASER_USB_TIMEOUT 1000 /* msecs */
+#define KVASER_USB_RX_BUFFER_SIZE 3072
+#define KVASER_USB_MAX_NET_DEVICES 5
+
+/* USB devices features */
+#define KVASER_USB_HAS_SILENT_MODE BIT(0)
+#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
+
+/* Device capabilities */
+#define KVASER_USB_CAP_BERR_CAP 0x01
+#define KVASER_USB_CAP_EXT_CAP 0x02
+#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04
+
+struct kvaser_usb_dev_cfg;
+
+enum kvaser_usb_leaf_family {
+ KVASER_LEAF,
+ KVASER_USBCAN,
+};
+
+#define KVASER_USB_HYDRA_MAX_CMD_LEN 128
+struct kvaser_usb_dev_card_data_hydra {
+ u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES];
+ u8 sysdbg_he;
+ spinlock_t transid_lock; /* lock for transid */
+ u16 transid;
+ /* lock for usb_rx_leftover and usb_rx_leftover_len */
+ spinlock_t usb_rx_leftover_lock;
+ u8 usb_rx_leftover[KVASER_USB_HYDRA_MAX_CMD_LEN];
+ u8 usb_rx_leftover_len;
+};
+struct kvaser_usb_dev_card_data {
+ u32 ctrlmode_supported;
+ u32 capabilities;
+ union {
+ struct {
+ enum kvaser_usb_leaf_family family;
+ } leaf;
+ struct kvaser_usb_dev_card_data_hydra hydra;
+ };
+};
+
+/* Context for an outstanding, not yet ACKed, transmission */
+struct kvaser_usb_tx_urb_context {
+ struct kvaser_usb_net_priv *priv;
+ u32 echo_index;
+ int dlc;
+};
+
+struct kvaser_usb {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
+ const struct kvaser_usb_dev_ops *ops;
+ const struct kvaser_usb_dev_cfg *cfg;
+
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct usb_anchor rx_submitted;
+
+ /* @max_tx_urbs: Firmware-reported maximum number of outstanding,
+ * not yet ACKed, transmissions on this device. This value is
+ * also used as a sentinel for marking free tx contexts.
+ */
+ u32 fw_version;
+ unsigned int nchannels;
+ unsigned int max_tx_urbs;
+ struct kvaser_usb_dev_card_data card_data;
+
+ bool rxinitdone;
+ void *rxbuf[KVASER_USB_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[KVASER_USB_MAX_RX_URBS];
+};
+
+struct kvaser_usb_net_priv {
+ struct can_priv can;
+ struct can_berr_counter bec;
+
+ struct kvaser_usb *dev;
+ struct net_device *netdev;
+ int channel;
+
+ struct completion start_comp, stop_comp, flush_comp;
+ struct usb_anchor tx_submitted;
+
+ spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
+ int active_tx_contexts;
+ struct kvaser_usb_tx_urb_context tx_contexts[];
+};
+
+/**
+ * struct kvaser_usb_dev_ops - Device specific functions
+ * @dev_set_mode: used for can.do_set_mode
+ * @dev_set_bittiming: used for can.do_set_bittiming
+ * @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_get_berr_counter: used for can.do_get_berr_counter
+ *
+ * @dev_setup_endpoints: setup USB in and out endpoints
+ * @dev_init_card: initialize card
+ * @dev_get_software_info: get software info
+ * @dev_get_software_details: get software details
+ * @dev_get_card_info: get card info
+ * @dev_get_capabilities: discover device capabilities
+ *
+ * @dev_set_opt_mode: set ctrlmod
+ * @dev_start_chip: start the CAN controller
+ * @dev_stop_chip: stop the CAN controller
+ * @dev_reset_chip: reset the CAN controller
+ * @dev_flush_queue: flush outstanding CAN messages
+ * @dev_read_bulk_callback: handle incoming commands
+ * @dev_frame_to_cmd: translate struct can_frame into device command
+ */
+struct kvaser_usb_dev_ops {
+ int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
+ int (*dev_set_bittiming)(struct net_device *netdev);
+ int (*dev_set_data_bittiming)(struct net_device *netdev);
+ int (*dev_get_berr_counter)(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+ int (*dev_setup_endpoints)(struct kvaser_usb *dev);
+ int (*dev_init_card)(struct kvaser_usb *dev);
+ int (*dev_get_software_info)(struct kvaser_usb *dev);
+ int (*dev_get_software_details)(struct kvaser_usb *dev);
+ int (*dev_get_card_info)(struct kvaser_usb *dev);
+ int (*dev_get_capabilities)(struct kvaser_usb *dev);
+ int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv);
+ int (*dev_start_chip)(struct kvaser_usb_net_priv *priv);
+ int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv);
+ int (*dev_reset_chip)(struct kvaser_usb *dev, int channel);
+ int (*dev_flush_queue)(struct kvaser_usb_net_priv *priv);
+ void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf,
+ int len);
+ void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid);
+};
+
+struct kvaser_usb_dev_cfg {
+ const struct can_clock clock;
+ const unsigned int timestamp_freq;
+ const struct can_bittiming_const * const bittiming_const;
+ const struct can_bittiming_const * const data_bittiming_const;
+};
+
+extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
+extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+
+int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
+ int *actual_len);
+
+int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len);
+
+int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
+ int len);
+
+int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
new file mode 100644
index 000000000000..b939a4c10b84
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -0,0 +1,835 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
+ * - Kvaser linux mhydra driver (version 5.24)
+ *
+ * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/if.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/netlink.h>
+
+#include "kvaser_usb.h"
+
+/* Kvaser USB vendor id. */
+#define KVASER_VENDOR_ID 0x0bfd
+
+/* Kvaser Leaf USB devices product ids */
+#define USB_LEAF_DEVEL_PRODUCT_ID 10
+#define USB_LEAF_LITE_PRODUCT_ID 11
+#define USB_LEAF_PRO_PRODUCT_ID 12
+#define USB_LEAF_SPRO_PRODUCT_ID 14
+#define USB_LEAF_PRO_LS_PRODUCT_ID 15
+#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
+#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
+#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
+#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
+#define USB_MEMO2_DEVEL_PRODUCT_ID 22
+#define USB_MEMO2_HSHS_PRODUCT_ID 23
+#define USB_UPRO_HSHS_PRODUCT_ID 24
+#define USB_LEAF_LITE_GI_PRODUCT_ID 25
+#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
+#define USB_MEMO2_HSLS_PRODUCT_ID 27
+#define USB_LEAF_LITE_CH_PRODUCT_ID 28
+#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
+#define USB_OEM_MERCURY_PRODUCT_ID 34
+#define USB_OEM_LEAF_PRODUCT_ID 35
+#define USB_CAN_R_PRODUCT_ID 39
+#define USB_LEAF_LITE_V2_PRODUCT_ID 288
+#define USB_MINI_PCIE_HS_PRODUCT_ID 289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
+
+/* Kvaser USBCan-II devices product ids */
+#define USB_USBCAN_REVB_PRODUCT_ID 2
+#define USB_VCI2_PRODUCT_ID 3
+#define USB_USBCAN2_PRODUCT_ID 4
+#define USB_MEMORATOR_PRODUCT_ID 5
+
+/* Kvaser Minihydra USB devices product ids */
+#define USB_BLACKBIRD_V2_PRODUCT_ID 258
+#define USB_MEMO_PRO_5HS_PRODUCT_ID 260
+#define USB_USBCAN_PRO_5HS_PRODUCT_ID 261
+#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 262
+#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 263
+#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264
+#define USB_MEMO_2HS_PRODUCT_ID 265
+#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266
+#define USB_HYBRID_CANLIN_PRODUCT_ID 267
+#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268
+#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
+#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270
+
+static inline bool kvaser_is_leaf(const struct usb_device_id *id)
+{
+ return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
+ id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
+ (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
+ id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID);
+}
+
+static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
+{
+ return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
+ id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
+}
+
+static inline bool kvaser_is_hydra(const struct usb_device_id *id)
+{
+ return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
+ id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID;
+}
+
+static const struct usb_device_id kvaser_usb_table[] = {
+ /* Leaf USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
+
+ /* USBCANII USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+
+ /* Minihydra USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
+
+int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len)
+{
+ int actual_len; /* Not used */
+
+ return usb_bulk_msg(dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ cmd, len, &actual_len, KVASER_USB_TIMEOUT);
+}
+
+int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
+ int *actual_len)
+{
+ return usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ cmd, len, actual_len, KVASER_USB_TIMEOUT);
+}
+
+static void kvaser_usb_send_cmd_callback(struct urb *urb)
+{
+ struct net_device *netdev = urb->context;
+
+ kfree(urb->transfer_buffer);
+
+ if (urb->status)
+ netdev_warn(netdev, "urb status received: %d\n", urb->status);
+}
+
+int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
+ int len)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device *netdev = priv->netdev;
+ struct urb *urb;
+ int err;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ cmd, len, kvaser_usb_send_cmd_callback, netdev);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ netdev_err(netdev, "Error transmitting URB\n");
+ usb_unanchor_urb(urb);
+ }
+ usb_free_urb(urb);
+
+ return 0;
+}
+
+int kvaser_usb_can_rx_over_error(struct net_device *netdev)
+{
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return -ENOMEM;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+
+ return 0;
+}
+
+static void kvaser_usb_read_bulk_callback(struct urb *urb)
+{
+ struct kvaser_usb *dev = urb->context;
+ int err;
+ unsigned int i;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
+ case -ESHUTDOWN:
+ return;
+ default:
+ dev_info(&dev->intf->dev, "Rx URB aborted (%d)\n", urb->status);
+ goto resubmit_urb;
+ }
+
+ dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+ urb->actual_length);
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE,
+ kvaser_usb_read_bulk_callback, dev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err == -ENODEV) {
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ netif_device_detach(dev->nets[i]->netdev);
+ }
+ } else if (err) {
+ dev_err(&dev->intf->dev,
+ "Failed resubmitting read bulk urb: %d\n", err);
+ }
+}
+
+static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
+{
+ int i, err = 0;
+
+ if (dev->rxinitdone)
+ return 0;
+
+ for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf = NULL;
+ dma_addr_t buf_dma;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = usb_alloc_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE,
+ GFP_KERNEL, &buf_dma);
+ if (!buf) {
+ dev_warn(&dev->intf->dev,
+ "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ err = -ENOMEM;
+ break;
+ }
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe
+ (dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ buf, KVASER_USB_RX_BUFFER_SIZE,
+ kvaser_usb_read_bulk_callback, dev);
+ urb->transfer_dma = buf_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev,
+ KVASER_USB_RX_BUFFER_SIZE, buf,
+ buf_dma);
+ usb_free_urb(urb);
+ break;
+ }
+
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
+ usb_free_urb(urb);
+ }
+
+ if (i == 0) {
+ dev_warn(&dev->intf->dev, "Cannot setup read URBs, error %d\n",
+ err);
+ return err;
+ } else if (i < KVASER_USB_MAX_RX_URBS) {
+ dev_warn(&dev->intf->dev, "RX performances may be slow\n");
+ }
+
+ dev->rxinitdone = true;
+
+ return 0;
+}
+
+static int kvaser_usb_open(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(dev);
+ if (err)
+ goto error;
+
+ err = dev->ops->dev_set_opt_mode(priv);
+ if (err)
+ goto error;
+
+ err = dev->ops->dev_start_chip(priv);
+ if (err) {
+ netdev_warn(netdev, "Cannot start device, error %d\n", err);
+ goto error;
+ }
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+
+error:
+ close_candev(netdev);
+ return err;
+}
+
+static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
+{
+ int i, max_tx_urbs;
+
+ max_tx_urbs = priv->dev->max_tx_urbs;
+
+ priv->active_tx_contexts = 0;
+ for (i = 0; i < max_tx_urbs; i++)
+ priv->tx_contexts[i].echo_index = max_tx_urbs;
+}
+
+/* This method might sleep. Do not call it in the atomic context
+ * of URB completions.
+ */
+static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+{
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+ kvaser_usb_reset_tx_urb_contexts(priv);
+}
+
+static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
+{
+ int i;
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++)
+ usb_free_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE,
+ dev->rxbuf[i], dev->rxbuf_dma[i]);
+
+ for (i = 0; i < dev->nchannels; i++) {
+ struct kvaser_usb_net_priv *priv = dev->nets[i];
+
+ if (priv)
+ kvaser_usb_unlink_tx_urbs(priv);
+ }
+}
+
+static int kvaser_usb_close(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ netif_stop_queue(netdev);
+
+ err = dev->ops->dev_flush_queue(priv);
+ if (err)
+ netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
+
+ if (dev->ops->dev_reset_chip) {
+ err = dev->ops->dev_reset_chip(dev, priv->channel);
+ if (err)
+ netdev_warn(netdev, "Cannot reset card, error %d\n",
+ err);
+ }
+
+ err = dev->ops->dev_stop_chip(priv);
+ if (err)
+ netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+
+ /* reset tx contexts */
+ kvaser_usb_unlink_tx_urbs(priv);
+
+ priv->can.state = CAN_STATE_STOPPED;
+ close_candev(priv->netdev);
+
+ return 0;
+}
+
+static void kvaser_usb_write_bulk_callback(struct urb *urb)
+{
+ struct kvaser_usb_tx_urb_context *context = urb->context;
+ struct kvaser_usb_net_priv *priv;
+ struct net_device *netdev;
+
+ if (WARN_ON(!context))
+ return;
+
+ priv = context->priv;
+ netdev = priv->netdev;
+
+ kfree(urb->transfer_buffer);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+}
+
+static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct kvaser_usb_tx_urb_context *context = NULL;
+ struct urb *urb;
+ void *buf;
+ int cmd_len = 0;
+ int err, ret = NETDEV_TX_OK;
+ unsigned int i;
+ unsigned long flags;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+ for (i = 0; i < dev->max_tx_urbs; i++) {
+ if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
+ context = &priv->tx_contexts[i];
+
+ context->echo_index = i;
+ can_put_echo_skb(skb, netdev, context->echo_index);
+ ++priv->active_tx_contexts;
+ if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
+ netif_stop_queue(netdev);
+
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+
+ /* This should never happen; it implies a flow control bug */
+ if (!context) {
+ netdev_warn(netdev, "cannot find free context\n");
+
+ ret = NETDEV_TX_BUSY;
+ goto freeurb;
+ }
+
+ buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
+ context->echo_index);
+ if (!buf) {
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
+ can_free_echo_skb(netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+ goto freeurb;
+ }
+
+ context->priv = priv;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ buf, cmd_len, kvaser_usb_write_bulk_callback,
+ context);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
+ can_free_echo_skb(netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+
+ usb_unanchor_urb(urb);
+ kfree(buf);
+
+ stats->tx_dropped++;
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "Failed tx_urb %d\n", err);
+
+ goto freeurb;
+ }
+
+ ret = NETDEV_TX_OK;
+
+freeurb:
+ usb_free_urb(urb);
+ return ret;
+}
+
+static const struct net_device_ops kvaser_usb_netdev_ops = {
+ .ndo_open = kvaser_usb_open,
+ .ndo_stop = kvaser_usb_close,
+ .ndo_start_xmit = kvaser_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ unregister_candev(dev->nets[i]->netdev);
+ }
+
+ kvaser_usb_unlink_all_urbs(dev);
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ free_candev(dev->nets[i]->netdev);
+ }
+}
+
+static int kvaser_usb_init_one(struct kvaser_usb *dev,
+ const struct usb_device_id *id, int channel)
+{
+ struct net_device *netdev;
+ struct kvaser_usb_net_priv *priv;
+ int err;
+
+ if (dev->ops->dev_reset_chip) {
+ err = dev->ops->dev_reset_chip(dev, channel);
+ if (err)
+ return err;
+ }
+
+ netdev = alloc_candev(sizeof(*priv) +
+ dev->max_tx_urbs * sizeof(*priv->tx_contexts),
+ dev->max_tx_urbs);
+ if (!netdev) {
+ dev_err(&dev->intf->dev, "Cannot alloc candev\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(netdev);
+
+ init_usb_anchor(&priv->tx_submitted);
+ init_completion(&priv->start_comp);
+ init_completion(&priv->stop_comp);
+ priv->can.ctrlmode_supported = 0;
+
+ priv->dev = dev;
+ priv->netdev = netdev;
+ priv->channel = channel;
+
+ spin_lock_init(&priv->tx_contexts_lock);
+ kvaser_usb_reset_tx_urb_contexts(priv);
+
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.clock.freq = dev->cfg->clock.freq;
+ priv->can.bittiming_const = dev->cfg->bittiming_const;
+ priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
+ priv->can.do_set_mode = dev->ops->dev_set_mode;
+ if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+ (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
+ priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
+ if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+ priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
+
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
+ priv->can.do_set_data_bittiming =
+ dev->ops->dev_set_data_bittiming;
+ }
+
+ netdev->flags |= IFF_ECHO;
+
+ netdev->netdev_ops = &kvaser_usb_netdev_ops;
+
+ SET_NETDEV_DEV(netdev, &dev->intf->dev);
+ netdev->dev_id = channel;
+
+ dev->nets[channel] = priv;
+
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(&dev->intf->dev, "Failed to register CAN device\n");
+ free_candev(netdev);
+ dev->nets[channel] = NULL;
+ return err;
+ }
+
+ netdev_dbg(netdev, "device registered\n");
+
+ return 0;
+}
+
+static int kvaser_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct kvaser_usb *dev;
+ int err;
+ int i;
+
+ dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ if (kvaser_is_leaf(id)) {
+ dev->card_data.leaf.family = KVASER_LEAF;
+ dev->ops = &kvaser_usb_leaf_dev_ops;
+ } else if (kvaser_is_usbcan(id)) {
+ dev->card_data.leaf.family = KVASER_USBCAN;
+ dev->ops = &kvaser_usb_leaf_dev_ops;
+ } else if (kvaser_is_hydra(id)) {
+ dev->ops = &kvaser_usb_hydra_dev_ops;
+ } else {
+ dev_err(&intf->dev,
+ "Product ID (%d) is not a supported Kvaser USB device\n",
+ id->idProduct);
+ return -ENODEV;
+ }
+
+ dev->intf = intf;
+
+ err = dev->ops->dev_setup_endpoints(dev);
+ if (err) {
+ dev_err(&intf->dev, "Cannot get usb endpoint(s)");
+ return err;
+ }
+
+ dev->udev = interface_to_usbdev(intf);
+
+ init_usb_anchor(&dev->rx_submitted);
+
+ usb_set_intfdata(intf, dev);
+
+ dev->card_data.ctrlmode_supported = 0;
+ dev->card_data.capabilities = 0;
+ err = dev->ops->dev_init_card(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Failed to initialize card, error %d\n", err);
+ return err;
+ }
+
+ err = dev->ops->dev_get_software_info(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get software info, error %d\n", err);
+ return err;
+ }
+
+ if (dev->ops->dev_get_software_details) {
+ err = dev->ops->dev_get_software_details(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get software details, error %d\n", err);
+ return err;
+ }
+ }
+
+ if (WARN_ON(!dev->cfg))
+ return -ENODEV;
+
+ dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+ ((dev->fw_version >> 24) & 0xff),
+ ((dev->fw_version >> 16) & 0xff),
+ (dev->fw_version & 0xffff));
+
+ dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
+
+ err = dev->ops->dev_get_card_info(dev);
+ if (err) {
+ dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
+ return err;
+ }
+
+ if (dev->ops->dev_get_capabilities) {
+ err = dev->ops->dev_get_capabilities(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get capabilities, error %d\n", err);
+ kvaser_usb_remove_interfaces(dev);
+ return err;
+ }
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ err = kvaser_usb_init_one(dev, id, i);
+ if (err) {
+ kvaser_usb_remove_interfaces(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void kvaser_usb_disconnect(struct usb_interface *intf)
+{
+ struct kvaser_usb *dev = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (!dev)
+ return;
+
+ kvaser_usb_remove_interfaces(dev);
+}
+
+static struct usb_driver kvaser_usb_driver = {
+ .name = "kvaser_usb",
+ .probe = kvaser_usb_probe,
+ .disconnect = kvaser_usb_disconnect,
+ .id_table = kvaser_usb_table,
+};
+
+module_usb_driver(kvaser_usb_driver);
+
+MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
+MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
+MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
new file mode 100644
index 000000000000..c084bae5ec0a
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -0,0 +1,2028 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Parts of this driver are based on the following:
+ * - Kvaser linux mhydra driver (version 5.24)
+ * - CAN driver for esd CAN-USB/2
+ *
+ * Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ *
+ * Known issues:
+ * - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only
+ * reported after a call to do_get_berr_counter(), since firmware does not
+ * distinguish between ERROR_WARNING and ERROR_ACTIVE.
+ * - Hardware timestamps are not set for CAN Tx frames.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/netlink.h>
+
+#include "kvaser_usb.h"
+
+/* Forward declarations */
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan;
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc;
+
+#define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82
+#define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02
+
+#define KVASER_USB_HYDRA_MAX_TRANSID 0xff
+#define KVASER_USB_HYDRA_MIN_TRANSID 0x01
+
+/* Minihydra command IDs */
+#define CMD_SET_BUSPARAMS_REQ 16
+#define CMD_GET_CHIP_STATE_REQ 19
+#define CMD_CHIP_STATE_EVENT 20
+#define CMD_SET_DRIVERMODE_REQ 21
+#define CMD_START_CHIP_REQ 26
+#define CMD_START_CHIP_RESP 27
+#define CMD_STOP_CHIP_REQ 28
+#define CMD_STOP_CHIP_RESP 29
+#define CMD_TX_CAN_MESSAGE 33
+#define CMD_GET_CARD_INFO_REQ 34
+#define CMD_GET_CARD_INFO_RESP 35
+#define CMD_GET_SOFTWARE_INFO_REQ 38
+#define CMD_GET_SOFTWARE_INFO_RESP 39
+#define CMD_ERROR_EVENT 45
+#define CMD_FLUSH_QUEUE 48
+#define CMD_TX_ACKNOWLEDGE 50
+#define CMD_FLUSH_QUEUE_RESP 66
+#define CMD_SET_BUSPARAMS_FD_REQ 69
+#define CMD_SET_BUSPARAMS_FD_RESP 70
+#define CMD_SET_BUSPARAMS_RESP 85
+#define CMD_GET_CAPABILITIES_REQ 95
+#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_RX_MESSAGE 106
+#define CMD_MAP_CHANNEL_REQ 200
+#define CMD_MAP_CHANNEL_RESP 201
+#define CMD_GET_SOFTWARE_DETAILS_REQ 202
+#define CMD_GET_SOFTWARE_DETAILS_RESP 203
+#define CMD_EXTENDED 255
+
+/* Minihydra extended command IDs */
+#define CMD_TX_CAN_MESSAGE_FD 224
+#define CMD_TX_ACKNOWLEDGE_FD 225
+#define CMD_RX_MESSAGE_FD 226
+
+/* Hydra commands are handled by different threads in firmware.
+ * The threads are denoted hydra entity (HE). Each HE got a unique 6-bit
+ * address. The address is used in hydra commands to get/set source and
+ * destination HE. There are two predefined HE addresses, the remaining
+ * addresses are different between devices and firmware versions. Hence, we need
+ * to enumerate the addresses (see kvaser_usb_hydra_map_channel()).
+ */
+
+/* Well-known HE addresses */
+#define KVASER_USB_HYDRA_HE_ADDRESS_ROUTER 0x00
+#define KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL 0x3e
+
+#define KVASER_USB_HYDRA_TRANSID_CANHE 0x40
+#define KVASER_USB_HYDRA_TRANSID_SYSDBG 0x61
+
+struct kvaser_cmd_map_ch_req {
+ char name[16];
+ u8 channel;
+ u8 reserved[11];
+} __packed;
+
+struct kvaser_cmd_map_ch_res {
+ u8 he_addr;
+ u8 channel;
+ u8 reserved[26];
+} __packed;
+
+struct kvaser_cmd_card_info {
+ __le32 serial_number;
+ __le32 clock_res;
+ __le32 mfg_date;
+ __le32 ean[2];
+ u8 hw_version;
+ u8 usb_mode;
+ u8 hw_type;
+ u8 reserved0;
+ u8 nchannels;
+ u8 reserved1[3];
+} __packed;
+
+struct kvaser_cmd_sw_info {
+ u8 reserved0[8];
+ __le16 max_outstanding_tx;
+ u8 reserved1[18];
+} __packed;
+
+struct kvaser_cmd_sw_detail_req {
+ u8 use_ext_cmd;
+ u8 reserved[27];
+} __packed;
+
+/* Software detail flags */
+#define KVASER_USB_HYDRA_SW_FLAG_FW_BETA BIT(2)
+#define KVASER_USB_HYDRA_SW_FLAG_FW_BAD BIT(4)
+#define KVASER_USB_HYDRA_SW_FLAG_FREQ_80M BIT(5)
+#define KVASER_USB_HYDRA_SW_FLAG_EXT_CMD BIT(9)
+#define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10)
+#define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11)
+#define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12)
+struct kvaser_cmd_sw_detail_res {
+ __le32 sw_flags;
+ __le32 sw_version;
+ __le32 sw_name;
+ __le32 ean[2];
+ __le32 max_bitrate;
+ u8 reserved[4];
+} __packed;
+
+/* Sub commands for cap_req and cap_res */
+#define KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE 0x02
+#define KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT 0x05
+#define KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT 0x06
+struct kvaser_cmd_cap_req {
+ __le16 cap_cmd;
+ u8 reserved[26];
+} __packed;
+
+/* Status codes for cap_res */
+#define KVASER_USB_HYDRA_CAP_STAT_OK 0x00
+#define KVASER_USB_HYDRA_CAP_STAT_NOT_IMPL 0x01
+#define KVASER_USB_HYDRA_CAP_STAT_UNAVAIL 0x02
+struct kvaser_cmd_cap_res {
+ __le16 cap_cmd;
+ __le16 status;
+ __le32 mask;
+ __le32 value;
+ u8 reserved[16];
+} __packed;
+
+/* CMD_ERROR_EVENT error codes */
+#define KVASER_USB_HYDRA_ERROR_EVENT_CAN 0x01
+#define KVASER_USB_HYDRA_ERROR_EVENT_PARAM 0x09
+struct kvaser_cmd_error_event {
+ __le16 timestamp[3];
+ u8 reserved;
+ u8 error_code;
+ __le16 info1;
+ __le16 info2;
+} __packed;
+
+/* Chip state status flags. Used for chip_state_event and err_frame_data. */
+#define KVASER_USB_HYDRA_BUS_ERR_ACT 0x00
+#define KVASER_USB_HYDRA_BUS_ERR_PASS BIT(5)
+#define KVASER_USB_HYDRA_BUS_BUS_OFF BIT(6)
+struct kvaser_cmd_chip_state_event {
+ __le16 timestamp[3];
+ u8 tx_err_counter;
+ u8 rx_err_counter;
+ u8 bus_status;
+ u8 reserved[19];
+} __packed;
+
+/* Busparam modes */
+#define KVASER_USB_HYDRA_BUS_MODE_CAN 0x00
+#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
+#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
+struct kvaser_cmd_set_busparams {
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 nsamples;
+ u8 reserved0[4];
+ __le32 bitrate_d;
+ u8 tseg1_d;
+ u8 tseg2_d;
+ u8 sjw_d;
+ u8 nsamples_d;
+ u8 canfd_mode;
+ u8 reserved1[7];
+} __packed;
+
+/* Ctrl modes */
+#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
+#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
+struct kvaser_cmd_set_ctrlmode {
+ u8 mode;
+ u8 reserved[27];
+} __packed;
+
+struct kvaser_err_frame_data {
+ u8 bus_status;
+ u8 reserved0;
+ u8 tx_err_counter;
+ u8 rx_err_counter;
+ u8 reserved1[4];
+} __packed;
+
+struct kvaser_cmd_rx_can {
+ u8 cmd_len;
+ u8 cmd_no;
+ u8 channel;
+ u8 flags;
+ __le16 timestamp[3];
+ u8 dlc;
+ u8 padding;
+ __le32 id;
+ union {
+ u8 data[8];
+ struct kvaser_err_frame_data err_frame_data;
+ };
+} __packed;
+
+/* Extended CAN ID flag. Used in rx_can and tx_can */
+#define KVASER_USB_HYDRA_EXTENDED_FRAME_ID BIT(31)
+struct kvaser_cmd_tx_can {
+ __le32 id;
+ u8 data[8];
+ u8 dlc;
+ u8 flags;
+ __le16 transid;
+ u8 channel;
+ u8 reserved[11];
+} __packed;
+
+struct kvaser_cmd_header {
+ u8 cmd_no;
+ /* The destination HE address is stored in 0..5 of he_addr.
+ * The upper part of source HE address is stored in 6..7 of he_addr, and
+ * the lower part is stored in 12..15 of transid.
+ */
+ u8 he_addr;
+ __le16 transid;
+} __packed;
+
+struct kvaser_cmd {
+ struct kvaser_cmd_header header;
+ union {
+ struct kvaser_cmd_map_ch_req map_ch_req;
+ struct kvaser_cmd_map_ch_res map_ch_res;
+
+ struct kvaser_cmd_card_info card_info;
+ struct kvaser_cmd_sw_info sw_info;
+ struct kvaser_cmd_sw_detail_req sw_detail_req;
+ struct kvaser_cmd_sw_detail_res sw_detail_res;
+
+ struct kvaser_cmd_cap_req cap_req;
+ struct kvaser_cmd_cap_res cap_res;
+
+ struct kvaser_cmd_error_event error_event;
+
+ struct kvaser_cmd_set_busparams set_busparams_req;
+
+ struct kvaser_cmd_chip_state_event chip_state_event;
+
+ struct kvaser_cmd_set_ctrlmode set_ctrlmode;
+
+ struct kvaser_cmd_rx_can rx_can;
+ struct kvaser_cmd_tx_can tx_can;
+ } __packed;
+} __packed;
+
+/* CAN frame flags. Used in rx_can, ext_rx_can, tx_can and ext_tx_can */
+#define KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME BIT(0)
+#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
+#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
+#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
+/* CAN frame flags. Used in ext_rx_can and ext_tx_can */
+#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
+#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
+#define KVASER_USB_HYDRA_CF_FLAG_FDF BIT(16)
+#define KVASER_USB_HYDRA_CF_FLAG_BRS BIT(17)
+#define KVASER_USB_HYDRA_CF_FLAG_ESI BIT(18)
+
+/* KCAN packet header macros. Used in ext_rx_can and ext_tx_can */
+#define KVASER_USB_KCAN_DATA_DLC_BITS 4
+#define KVASER_USB_KCAN_DATA_DLC_SHIFT 8
+#define KVASER_USB_KCAN_DATA_DLC_MASK \
+ GENMASK(KVASER_USB_KCAN_DATA_DLC_BITS - 1 + \
+ KVASER_USB_KCAN_DATA_DLC_SHIFT, \
+ KVASER_USB_KCAN_DATA_DLC_SHIFT)
+
+#define KVASER_USB_KCAN_DATA_BRS BIT(14)
+#define KVASER_USB_KCAN_DATA_FDF BIT(15)
+#define KVASER_USB_KCAN_DATA_OSM BIT(16)
+#define KVASER_USB_KCAN_DATA_AREQ BIT(31)
+#define KVASER_USB_KCAN_DATA_SRR BIT(31)
+#define KVASER_USB_KCAN_DATA_RTR BIT(29)
+#define KVASER_USB_KCAN_DATA_IDE BIT(30)
+struct kvaser_cmd_ext_rx_can {
+ __le32 flags;
+ __le32 id;
+ __le32 kcan_id;
+ __le32 kcan_header;
+ __le64 timestamp;
+ union {
+ u8 kcan_payload[64];
+ struct kvaser_err_frame_data err_frame_data;
+ };
+} __packed;
+
+struct kvaser_cmd_ext_tx_can {
+ __le32 flags;
+ __le32 id;
+ __le32 kcan_id;
+ __le32 kcan_header;
+ u8 databytes;
+ u8 dlc;
+ u8 reserved[6];
+ u8 kcan_payload[64];
+} __packed;
+
+struct kvaser_cmd_ext_tx_ack {
+ __le32 flags;
+ u8 reserved0[4];
+ __le64 timestamp;
+ u8 reserved1[8];
+} __packed;
+
+/* struct for extended commands (CMD_EXTENDED) */
+struct kvaser_cmd_ext {
+ struct kvaser_cmd_header header;
+ __le16 len;
+ u8 cmd_no_ext;
+ u8 reserved;
+
+ union {
+ struct kvaser_cmd_ext_rx_can rx_can;
+ struct kvaser_cmd_ext_tx_can tx_can;
+ struct kvaser_cmd_ext_tx_ack tx_ack;
+ } __packed;
+} __packed;
+
+static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
+ .name = "kvaser_usb_kcan",
+ .tseg1_min = 1,
+ .tseg1_max = 255,
+ .tseg2_min = 1,
+ .tseg2_max = 32,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 4096,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+ .name = "kvaser_usb_flex",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+#define KVASER_USB_HYDRA_TRANSID_BITS 12
+#define KVASER_USB_HYDRA_TRANSID_MASK \
+ GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0)
+#define KVASER_USB_HYDRA_HE_ADDR_SRC_MASK GENMASK(7, 6)
+#define KVASER_USB_HYDRA_HE_ADDR_DEST_MASK GENMASK(5, 0)
+#define KVASER_USB_HYDRA_HE_ADDR_SRC_BITS 2
+static inline u16 kvaser_usb_hydra_get_cmd_transid(const struct kvaser_cmd *cmd)
+{
+ return le16_to_cpu(cmd->header.transid) & KVASER_USB_HYDRA_TRANSID_MASK;
+}
+
+static inline void kvaser_usb_hydra_set_cmd_transid(struct kvaser_cmd *cmd,
+ u16 transid)
+{
+ cmd->header.transid =
+ cpu_to_le16(transid & KVASER_USB_HYDRA_TRANSID_MASK);
+}
+
+static inline u8 kvaser_usb_hydra_get_cmd_src_he(const struct kvaser_cmd *cmd)
+{
+ return (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) >>
+ KVASER_USB_HYDRA_HE_ADDR_SRC_BITS |
+ le16_to_cpu(cmd->header.transid) >>
+ KVASER_USB_HYDRA_TRANSID_BITS;
+}
+
+static inline void kvaser_usb_hydra_set_cmd_dest_he(struct kvaser_cmd *cmd,
+ u8 dest_he)
+{
+ cmd->header.he_addr =
+ (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) |
+ (dest_he & KVASER_USB_HYDRA_HE_ADDR_DEST_MASK);
+}
+
+static u8 kvaser_usb_hydra_channel_from_cmd(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ int i;
+ u8 channel = 0xff;
+ u8 src_he = kvaser_usb_hydra_get_cmd_src_he(cmd);
+
+ for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) {
+ if (dev->card_data.hydra.channel_to_he[i] == src_he) {
+ channel = i;
+ break;
+ }
+ }
+
+ return channel;
+}
+
+static u16 kvaser_usb_hydra_get_next_transid(struct kvaser_usb *dev)
+{
+ unsigned long flags;
+ u16 transid;
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+
+ spin_lock_irqsave(&card_data->transid_lock, flags);
+ transid = card_data->transid;
+ if (transid >= KVASER_USB_HYDRA_MAX_TRANSID)
+ transid = KVASER_USB_HYDRA_MIN_TRANSID;
+ else
+ transid++;
+ card_data->transid = transid;
+ spin_unlock_irqrestore(&card_data->transid_lock, flags);
+
+ return transid;
+}
+
+static size_t kvaser_usb_hydra_cmd_size(struct kvaser_cmd *cmd)
+{
+ size_t ret;
+
+ if (cmd->header.cmd_no == CMD_EXTENDED)
+ ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len);
+ else
+ ret = sizeof(struct kvaser_cmd);
+
+ return ret;
+}
+
+static struct kvaser_usb_net_priv *
+kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv = NULL;
+ u8 channel = kvaser_usb_hydra_channel_from_cmd(dev, cmd);
+
+ if (channel >= dev->nchannels)
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ else
+ priv = dev->nets[channel];
+
+ return priv;
+}
+
+static ktime_t
+kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg,
+ const struct kvaser_cmd *cmd)
+{
+ u64 ticks;
+
+ if (cmd->header.cmd_no == CMD_EXTENDED) {
+ struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
+
+ ticks = le64_to_cpu(cmd_ext->rx_can.timestamp);
+ } else {
+ ticks = le16_to_cpu(cmd->rx_can.timestamp[0]);
+ ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16;
+ ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32;
+ }
+
+ return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+}
+
+static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
+ u8 cmd_no, int channel)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = cmd_no;
+ if (channel < 0) {
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+ } else {
+ if (channel >= KVASER_USB_MAX_NET_DEVICES) {
+ dev_err(&dev->intf->dev, "channel (%d) out of range.\n",
+ channel);
+ err = -EINVAL;
+ goto end;
+ }
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[channel]);
+ }
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int
+kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ u8 cmd_no)
+{
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = cmd_no;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd_async(priv, cmd,
+ kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ kfree(cmd);
+
+ return err;
+}
+
+/* This function is used for synchronously waiting on hydra control commands.
+ * Note: Compared to kvaser_usb_hydra_read_bulk_callback(), we never need to
+ * handle partial hydra commands. Since hydra control commands are always
+ * non-extended commands.
+ */
+static int kvaser_usb_hydra_wait_cmd(const struct kvaser_usb *dev, u8 cmd_no,
+ struct kvaser_cmd *cmd)
+{
+ void *buf;
+ int err;
+ unsigned long timeout = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
+
+ if (cmd->header.cmd_no == CMD_EXTENDED) {
+ dev_err(&dev->intf->dev, "Wait for CMD_EXTENDED not allowed\n");
+ return -EINVAL;
+ }
+
+ buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ do {
+ int actual_len = 0;
+ int pos = 0;
+
+ err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE,
+ &actual_len);
+ if (err < 0)
+ goto end;
+
+ while (pos < actual_len) {
+ struct kvaser_cmd *tmp_cmd;
+ size_t cmd_len;
+
+ tmp_cmd = buf + pos;
+ cmd_len = kvaser_usb_hydra_cmd_size(tmp_cmd);
+ if (pos + cmd_len > actual_len) {
+ dev_err_ratelimited(&dev->intf->dev,
+ "Format error\n");
+ break;
+ }
+
+ if (tmp_cmd->header.cmd_no == cmd_no) {
+ memcpy(cmd, tmp_cmd, cmd_len);
+ goto end;
+ }
+ pos += cmd_len;
+ }
+ } while (time_before(jiffies, timeout));
+
+ err = -EINVAL;
+
+end:
+ kfree(buf);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_map_channel_resp(struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u8 he, channel;
+ u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+
+ if (transid > 0x007f || transid < 0x0040) {
+ dev_err(&dev->intf->dev,
+ "CMD_MAP_CHANNEL_RESP, invalid transid: 0x%x\n",
+ transid);
+ return -EINVAL;
+ }
+
+ switch (transid) {
+ case KVASER_USB_HYDRA_TRANSID_CANHE:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 1:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 2:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 3:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 4:
+ channel = transid & 0x000f;
+ he = cmd->map_ch_res.he_addr;
+ card_data->channel_to_he[channel] = he;
+ break;
+ case KVASER_USB_HYDRA_TRANSID_SYSDBG:
+ card_data->sysdbg_he = cmd->map_ch_res.he_addr;
+ break;
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unknown CMD_MAP_CHANNEL_RESP transid=0x%x\n",
+ transid);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
+ u8 channel, const char *name)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ strcpy(cmd->map_ch_req.name, name);
+ cmd->header.cmd_no = CMD_MAP_CHANNEL_REQ;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ROUTER);
+ cmd->map_ch_req.channel = channel;
+
+ kvaser_usb_hydra_set_cmd_transid(cmd, transid);
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_MAP_CHANNEL_RESP, cmd);
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_map_channel_resp(dev, cmd);
+ if (err)
+ goto end;
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
+ u16 cap_cmd_req, u16 *status)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+ struct kvaser_cmd *cmd;
+ u32 value = 0;
+ u32 mask = 0;
+ u16 cap_cmd_res;
+ int err;
+ int i;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
+ cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
+
+ kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
+ if (err)
+ goto end;
+
+ *status = le16_to_cpu(cmd->cap_res.status);
+
+ if (*status != KVASER_USB_HYDRA_CAP_STAT_OK)
+ goto end;
+
+ cap_cmd_res = le16_to_cpu(cmd->cap_res.cap_cmd);
+ switch (cap_cmd_res) {
+ case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE:
+ case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT:
+ case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT:
+ value = le32_to_cpu(cmd->cap_res.value);
+ mask = le32_to_cpu(cmd->cap_res.mask);
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
+ cap_cmd_res);
+ break;
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (BIT(i) & (value & mask)) {
+ switch (cap_cmd_res) {
+ case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_LISTENONLY;
+ break;
+ case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT:
+ card_data->capabilities |=
+ KVASER_USB_CAP_BERR_CAP;
+ break;
+ case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_ONE_SHOT;
+ break;
+ }
+ }
+ }
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static void kvaser_usb_hydra_start_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ if (completion_done(&priv->start_comp) &&
+ netif_queue_stopped(priv->netdev)) {
+ netif_wake_queue(priv->netdev);
+ } else {
+ netif_start_queue(priv->netdev);
+ complete(&priv->start_comp);
+ }
+}
+
+static void kvaser_usb_hydra_stop_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ complete(&priv->stop_comp);
+}
+
+static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ complete(&priv->flush_comp);
+}
+
+static void
+kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
+ u8 bus_status,
+ const struct can_berr_counter *bec,
+ enum can_state *new_state)
+{
+ if (bus_status & KVASER_USB_HYDRA_BUS_BUS_OFF) {
+ *new_state = CAN_STATE_BUS_OFF;
+ } else if (bus_status & KVASER_USB_HYDRA_BUS_ERR_PASS) {
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (bus_status == KVASER_USB_HYDRA_BUS_ERR_ACT) {
+ if (bec->txerr >= 128 || bec->rxerr >= 128) {
+ netdev_warn(priv->netdev,
+ "ERR_ACTIVE but err tx=%u or rx=%u >=128\n",
+ bec->txerr, bec->rxerr);
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (bec->txerr >= 96 || bec->rxerr >= 96) {
+ *new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ *new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+}
+
+static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
+ u8 bus_status,
+ const struct can_berr_counter *bec)
+{
+ struct net_device *netdev = priv->netdev;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ enum can_state new_state, old_state;
+
+ old_state = priv->can.state;
+
+ kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, bec,
+ &new_state);
+
+ if (new_state == old_state)
+ return;
+
+ /* Ignore state change if previous state was STOPPED and the new state
+ * is BUS_OFF. Firmware always report this as BUS_OFF, since firmware
+ * does not distinguish between BUS_OFF and STOPPED.
+ */
+ if (old_state == CAN_STATE_STOPPED && new_state == CAN_STATE_BUS_OFF)
+ return;
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (skb) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec->txerr >= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec->txerr <= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ can_change_state(netdev, cf, tx_state, rx_state);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_hydra_send_simple_cmd_async
+ (priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (!skb) {
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return;
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ priv->can.can_stats.restarts++;
+
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+
+ stats = &netdev->stats;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct can_berr_counter bec;
+ u8 bus_status;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ bus_status = cmd->chip_state_event.bus_status;
+ bec.txerr = cmd->chip_state_event.tx_err_counter;
+ bec.rxerr = cmd->chip_state_event.rx_err_counter;
+
+ kvaser_usb_hydra_update_state(priv, bus_status, &bec);
+ priv->bec.txerr = bec.txerr;
+ priv->bec.rxerr = bec.rxerr;
+}
+
+static void kvaser_usb_hydra_error_event_parameter(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* info1 will contain the offending cmd_no */
+ switch (le16_to_cpu(cmd->error_event.info1)) {
+ case CMD_START_CHIP_REQ:
+ dev_warn(&dev->intf->dev,
+ "CMD_START_CHIP_REQ error in parameter\n");
+ break;
+
+ case CMD_STOP_CHIP_REQ:
+ dev_warn(&dev->intf->dev,
+ "CMD_STOP_CHIP_REQ error in parameter\n");
+ break;
+
+ case CMD_FLUSH_QUEUE:
+ dev_warn(&dev->intf->dev,
+ "CMD_FLUSH_QUEUE error in parameter\n");
+ break;
+
+ case CMD_SET_BUSPARAMS_REQ:
+ dev_warn(&dev->intf->dev,
+ "Set bittiming failed. Error in parameter\n");
+ break;
+
+ case CMD_SET_BUSPARAMS_FD_REQ:
+ dev_warn(&dev->intf->dev,
+ "Set data bittiming failed. Error in parameter\n");
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled parameter error event cmd_no (%u)\n",
+ le16_to_cpu(cmd->error_event.info1));
+ break;
+ }
+}
+
+static void kvaser_usb_hydra_error_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ switch (cmd->error_event.error_code) {
+ case KVASER_USB_HYDRA_ERROR_EVENT_PARAM:
+ kvaser_usb_hydra_error_event_parameter(dev, cmd);
+ break;
+
+ case KVASER_USB_HYDRA_ERROR_EVENT_CAN:
+ /* Wrong channel mapping?! This should never happen!
+ * info1 will contain the offending cmd_no
+ */
+ dev_err(&dev->intf->dev,
+ "Received CAN error event for cmd_no (%u)\n",
+ le16_to_cpu(cmd->error_event.info1));
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled error event (%d)\n",
+ cmd->error_event.error_code);
+ break;
+ }
+}
+
+static void
+kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_err_frame_data *err_frame_data,
+ ktime_t hwtstamp)
+{
+ struct net_device *netdev = priv->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct can_berr_counter bec;
+ enum can_state new_state, old_state;
+ u8 bus_status;
+
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ bus_status = err_frame_data->bus_status;
+ bec.txerr = err_frame_data->tx_err_counter;
+ bec.rxerr = err_frame_data->rx_err_counter;
+
+ old_state = priv->can.state;
+ kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
+ &new_state);
+
+ skb = alloc_can_err_skb(netdev, &cf);
+
+ if (new_state != old_state) {
+ if (skb) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec.txerr >= bec.rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec.txerr <= bec.rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+
+ can_change_state(netdev, cf, tx_state, rx_state);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_hydra_send_simple_cmd_async
+ (priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+
+ if (!skb) {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = hwtstamp;
+
+ cf->can_id |= CAN_ERR_BUSERROR;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+
+ priv->bec.txerr = bec.txerr;
+ priv->bec.rxerr = bec.rxerr;
+}
+
+static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_cmd_ext *cmd)
+{
+ struct net_device *netdev = priv->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 flags;
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_BUSERROR;
+ flags = le32_to_cpu(cmd->tx_ack.flags);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_OSM_NACK)
+ cf->can_id |= CAN_ERR_ACK;
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ABL) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ priv->can.can_stats.arbitration_lost++;
+ }
+
+ stats->tx_errors++;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_tx_urb_context *context;
+ struct kvaser_usb_net_priv *priv;
+ unsigned long irq_flags;
+ bool one_shot_fail = false;
+ u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ if (!netif_device_present(priv->netdev))
+ return;
+
+ if (cmd->header.cmd_no == CMD_EXTENDED) {
+ struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
+ u32 flags = le32_to_cpu(cmd_ext->tx_ack.flags);
+
+ if (flags & (KVASER_USB_HYDRA_CF_FLAG_OSM_NACK |
+ KVASER_USB_HYDRA_CF_FLAG_ABL)) {
+ kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
+ one_shot_fail = true;
+ }
+ }
+
+ context = &priv->tx_contexts[transid % dev->max_tx_urbs];
+ if (!one_shot_fail) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_dlc2len(context->dlc);
+ }
+
+ spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
+
+ can_get_echo_skb(priv->netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(priv->netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags);
+}
+
+static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv = NULL;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct net_device_stats *stats;
+ u8 flags;
+ ktime_t hwtstamp;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ stats = &priv->netdev->stats;
+
+ flags = cmd->rx_can.flags;
+ hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
+ kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
+ hwtstamp);
+ return;
+ }
+
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = hwtstamp;
+
+ cf->can_id = le32_to_cpu(cmd->rx_can.id);
+
+ if (cf->can_id & KVASER_USB_HYDRA_EXTENDED_FRAME_ID) {
+ cf->can_id &= CAN_EFF_MASK;
+ cf->can_id |= CAN_EFF_FLAG;
+ } else {
+ cf->can_id &= CAN_SFF_MASK;
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
+ kvaser_usb_can_rx_over_error(priv->netdev);
+
+ cf->can_dlc = get_can_dlc(cmd->rx_can.dlc);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, cmd->rx_can.data, cf->can_dlc);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
+ const struct kvaser_cmd_ext *cmd)
+{
+ struct kvaser_cmd *std_cmd = (struct kvaser_cmd *)cmd;
+ struct kvaser_usb_net_priv *priv;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct net_device_stats *stats;
+ u32 flags;
+ u8 dlc;
+ u32 kcan_header;
+ ktime_t hwtstamp;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, std_cmd);
+ if (!priv)
+ return;
+
+ stats = &priv->netdev->stats;
+
+ kcan_header = le32_to_cpu(cmd->rx_can.kcan_header);
+ dlc = (kcan_header & KVASER_USB_KCAN_DATA_DLC_MASK) >>
+ KVASER_USB_KCAN_DATA_DLC_SHIFT;
+
+ flags = le32_to_cpu(cmd->rx_can.flags);
+ hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
+ kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
+ hwtstamp);
+ return;
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF)
+ skb = alloc_canfd_skb(priv->netdev, &cf);
+ else
+ skb = alloc_can_skb(priv->netdev, (struct can_frame **)&cf);
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = hwtstamp;
+
+ cf->can_id = le32_to_cpu(cmd->rx_can.id);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID) {
+ cf->can_id &= CAN_EFF_MASK;
+ cf->can_id |= CAN_EFF_FLAG;
+ } else {
+ cf->can_id &= CAN_SFF_MASK;
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
+ kvaser_usb_can_rx_over_error(priv->netdev);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) {
+ cf->len = can_dlc2len(get_canfd_dlc(dlc));
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS)
+ cf->flags |= CANFD_BRS;
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI)
+ cf->flags |= CANFD_ESI;
+ } else {
+ cf->len = get_can_dlc(dlc);
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->len;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ switch (cmd->header.cmd_no) {
+ case CMD_START_CHIP_RESP:
+ kvaser_usb_hydra_start_chip_reply(dev, cmd);
+ break;
+
+ case CMD_STOP_CHIP_RESP:
+ kvaser_usb_hydra_stop_chip_reply(dev, cmd);
+ break;
+
+ case CMD_FLUSH_QUEUE_RESP:
+ kvaser_usb_hydra_flush_queue_reply(dev, cmd);
+ break;
+
+ case CMD_CHIP_STATE_EVENT:
+ kvaser_usb_hydra_state_event(dev, cmd);
+ break;
+
+ case CMD_ERROR_EVENT:
+ kvaser_usb_hydra_error_event(dev, cmd);
+ break;
+
+ case CMD_TX_ACKNOWLEDGE:
+ kvaser_usb_hydra_tx_acknowledge(dev, cmd);
+ break;
+
+ case CMD_RX_MESSAGE:
+ kvaser_usb_hydra_rx_msg_std(dev, cmd);
+ break;
+
+ /* Ignored commands */
+ case CMD_SET_BUSPARAMS_RESP:
+ case CMD_SET_BUSPARAMS_FD_RESP:
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev, "Unhandled command (%d)\n",
+ cmd->header.cmd_no);
+ break;
+ }
+}
+
+static void kvaser_usb_hydra_handle_cmd_ext(const struct kvaser_usb *dev,
+ const struct kvaser_cmd_ext *cmd)
+{
+ switch (cmd->cmd_no_ext) {
+ case CMD_TX_ACKNOWLEDGE_FD:
+ kvaser_usb_hydra_tx_acknowledge(dev, (struct kvaser_cmd *)cmd);
+ break;
+
+ case CMD_RX_MESSAGE_FD:
+ kvaser_usb_hydra_rx_msg_ext(dev, cmd);
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev, "Unhandled extended command (%d)\n",
+ cmd->header.cmd_no);
+ break;
+ }
+}
+
+static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ if (cmd->header.cmd_no == CMD_EXTENDED)
+ kvaser_usb_hydra_handle_cmd_ext
+ (dev, (struct kvaser_cmd_ext *)cmd);
+ else
+ kvaser_usb_hydra_handle_cmd_std(dev, cmd);
+}
+
+static void *
+kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd_ext *cmd;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 dlc = can_len2dlc(cf->len);
+ u8 nbr_of_bytes = cf->len;
+ u32 flags;
+ u32 id;
+ u32 kcan_id;
+ u32 kcan_header;
+
+ *frame_len = nbr_of_bytes;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
+ if (!cmd)
+ return NULL;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ ((struct kvaser_cmd *)cmd,
+ dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid((struct kvaser_cmd *)cmd, transid);
+
+ cmd->header.cmd_no = CMD_EXTENDED;
+ cmd->cmd_no_ext = CMD_TX_CAN_MESSAGE_FD;
+
+ *cmd_len = ALIGN(sizeof(struct kvaser_cmd_ext) -
+ sizeof(cmd->tx_can.kcan_payload) + nbr_of_bytes,
+ 8);
+
+ cmd->len = cpu_to_le16(*cmd_len);
+
+ cmd->tx_can.databytes = nbr_of_bytes;
+ cmd->tx_can.dlc = dlc;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id = cf->can_id & CAN_EFF_MASK;
+ flags = KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID;
+ kcan_id = (cf->can_id & CAN_EFF_MASK) |
+ KVASER_USB_KCAN_DATA_IDE | KVASER_USB_KCAN_DATA_SRR;
+ } else {
+ id = cf->can_id & CAN_SFF_MASK;
+ flags = 0;
+ kcan_id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ if (cf->can_id & CAN_ERR_FLAG)
+ flags |= KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
+
+ kcan_header = ((dlc << KVASER_USB_KCAN_DATA_DLC_SHIFT) &
+ KVASER_USB_KCAN_DATA_DLC_MASK) |
+ KVASER_USB_KCAN_DATA_AREQ |
+ (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ?
+ KVASER_USB_KCAN_DATA_OSM : 0);
+
+ if (can_is_canfd_skb(skb)) {
+ kcan_header |= KVASER_USB_KCAN_DATA_FDF |
+ (cf->flags & CANFD_BRS ?
+ KVASER_USB_KCAN_DATA_BRS : 0);
+ } else {
+ if (cf->can_id & CAN_RTR_FLAG) {
+ kcan_id |= KVASER_USB_KCAN_DATA_RTR;
+ cmd->tx_can.databytes = 0;
+ flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME;
+ }
+ }
+
+ cmd->tx_can.kcan_id = cpu_to_le32(kcan_id);
+ cmd->tx_can.id = cpu_to_le32(id);
+ cmd->tx_can.flags = cpu_to_le32(flags);
+ cmd->tx_can.kcan_header = cpu_to_le32(kcan_header);
+
+ memcpy(cmd->tx_can.kcan_payload, cf->data, nbr_of_bytes);
+
+ return cmd;
+}
+
+static void *
+kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 flags;
+ u32 id;
+
+ *frame_len = cf->can_dlc;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ if (!cmd)
+ return NULL;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid(cmd, transid);
+
+ cmd->header.cmd_no = CMD_TX_CAN_MESSAGE;
+
+ *cmd_len = ALIGN(sizeof(struct kvaser_cmd), 8);
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id = (cf->can_id & CAN_EFF_MASK);
+ id |= KVASER_USB_HYDRA_EXTENDED_FRAME_ID;
+ } else {
+ id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ cmd->tx_can.dlc = cf->can_dlc;
+
+ flags = (cf->can_id & CAN_EFF_FLAG ?
+ KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0);
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME;
+
+ flags |= (cf->can_id & CAN_ERR_FLAG ?
+ KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME : 0);
+
+ cmd->tx_can.id = cpu_to_le32(id);
+ cmd->tx_can.flags = flags;
+
+ memcpy(cmd->tx_can.data, cf->data, *frame_len);
+
+ return cmd;
+}
+
+static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
+ enum can_mode mode)
+{
+ int err = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* CAN controller automatically recovers from BUS_OFF */
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ int tseg1 = bt->prop_seg + bt->phase_seg1;
+ int tseg2 = bt->phase_seg2;
+ int sjw = bt->sjw;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
+ cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
+ cmd->set_busparams_req.sjw = (u8)sjw;
+ cmd->set_busparams_req.tseg1 = (u8)tseg1;
+ cmd->set_busparams_req.tseg2 = (u8)tseg2;
+ cmd->set_busparams_req.nsamples = 1;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+{
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+ int tseg2 = dbt->phase_seg2;
+ int sjw = dbt->sjw;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
+ cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
+ cmd->set_busparams_req.sjw_d = (u8)sjw;
+ cmd->set_busparams_req.tseg1_d = (u8)tseg1;
+ cmd->set_busparams_req.tseg2_d = (u8)tseg2;
+ cmd->set_busparams_req.nsamples_d = 1;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ cmd->set_busparams_req.canfd_mode =
+ KVASER_USB_HYDRA_BUS_MODE_NONISO;
+ else
+ cmd->set_busparams_req.canfd_mode =
+ KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO;
+ }
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ int err;
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev,
+ CMD_GET_CHIP_STATE_REQ,
+ priv->channel);
+ if (err)
+ return err;
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev)
+{
+ const struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *ep;
+ int i;
+
+ iface_desc = &dev->intf->altsetting[0];
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ ep = &iface_desc->endpoint[i].desc;
+
+ if (!dev->bulk_in && usb_endpoint_is_bulk_in(ep) &&
+ ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_IN_ADDR)
+ dev->bulk_in = ep;
+
+ if (!dev->bulk_out && usb_endpoint_is_bulk_out(ep) &&
+ ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_OUT_ADDR)
+ dev->bulk_out = ep;
+
+ if (dev->bulk_in && dev->bulk_out)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
+{
+ int err;
+ unsigned int i;
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+
+ card_data->transid = KVASER_USB_HYDRA_MIN_TRANSID;
+ spin_lock_init(&card_data->transid_lock);
+
+ memset(card_data->usb_rx_leftover, 0, KVASER_USB_HYDRA_MAX_CMD_LEN);
+ card_data->usb_rx_leftover_len = 0;
+ spin_lock_init(&card_data->usb_rx_leftover_lock);
+
+ memset(card_data->channel_to_he, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL,
+ sizeof(card_data->channel_to_he));
+ card_data->sysdbg_he = 0;
+
+ for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) {
+ err = kvaser_usb_hydra_map_channel
+ (dev,
+ (KVASER_USB_HYDRA_TRANSID_CANHE | i),
+ i, "CAN");
+ if (err) {
+ dev_err(&dev->intf->dev,
+ "CMD_MAP_CHANNEL_REQ failed for CAN%u\n", i);
+ return err;
+ }
+ }
+
+ err = kvaser_usb_hydra_map_channel(dev, KVASER_USB_HYDRA_TRANSID_SYSDBG,
+ 0, "SYSDBG");
+ if (err) {
+ dev_err(&dev->intf->dev,
+ "CMD_MAP_CHANNEL_REQ failed for SYSDBG\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO_REQ,
+ -1);
+ if (err)
+ return err;
+
+ memset(&cmd, 0, sizeof(struct kvaser_cmd));
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_RESP, &cmd);
+ if (err)
+ return err;
+
+ dev->max_tx_urbs = min_t(unsigned int, KVASER_USB_MAX_TX_URBS,
+ le16_to_cpu(cmd.sw_info.max_outstanding_tx));
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+ u32 flags;
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
+ cmd->sw_detail_req.use_ext_cmd = 1;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_DETAILS_RESP,
+ cmd);
+ if (err)
+ goto end;
+
+ dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ flags = le32_to_cpu(cmd->sw_detail_res.sw_flags);
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) {
+ dev_err(&dev->intf->dev,
+ "Bad firmware, device refuse to run!\n");
+ err = -EINVAL;
+ goto end;
+ }
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BETA)
+ dev_info(&dev->intf->dev, "Beta firmware in use\n");
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CAP)
+ card_data->capabilities |= KVASER_USB_CAP_EXT_CAP;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CMD)
+ card_data->capabilities |= KVASER_USB_HYDRA_CAP_EXT_CMD;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_CANFD)
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_FD;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_NONISO)
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M)
+ dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan;
+ else
+ dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc;
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_CARD_INFO_REQ, -1);
+ if (err)
+ return err;
+
+ memset(&cmd, 0, sizeof(struct kvaser_cmd));
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd);
+ if (err)
+ return err;
+
+ dev->nchannels = cmd.card_info.nchannels;
+ if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev)
+{
+ int err;
+ u16 status;
+
+ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
+ dev_info(&dev->intf->dev,
+ "No extended capability support. Upgrade your device.\n");
+ return 0;
+ }
+
+ err = kvaser_usb_hydra_get_single_capability
+ (dev,
+ KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE failed %u\n",
+ status);
+
+ err = kvaser_usb_hydra_get_single_capability
+ (dev,
+ KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT failed %u\n",
+ status);
+
+ err = kvaser_usb_hydra_get_single_capability
+ (dev, KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT failed %u\n",
+ status);
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int err;
+
+ if ((priv->can.ctrlmode &
+ (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) ==
+ CAN_CTRLMODE_FD_NON_ISO) {
+ netdev_warn(priv->netdev,
+ "CTRLMODE_FD shall be on if CTRLMODE_FD_NON_ISO is on\n");
+ return -EINVAL;
+ }
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_LISTEN;
+ else
+ cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->start_comp);
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->start_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->stop_comp);
+
+ /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
+ * see comment in kvaser_usb_hydra_update_state()
+ */
+ priv->can.state = CAN_STATE_STOPPED;
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_STOP_CHIP_REQ,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->stop_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->flush_comp);
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->flush_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* A single extended hydra command can be transmitted in multiple transfers
+ * We have to buffer partial hydra commands, and handle them on next callback.
+ */
+static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev,
+ void *buf, int len)
+{
+ unsigned long irq_flags;
+ struct kvaser_cmd *cmd;
+ int pos = 0;
+ size_t cmd_len;
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+ int usb_rx_leftover_len;
+ spinlock_t *usb_rx_leftover_lock = &card_data->usb_rx_leftover_lock;
+
+ spin_lock_irqsave(usb_rx_leftover_lock, irq_flags);
+ usb_rx_leftover_len = card_data->usb_rx_leftover_len;
+ if (usb_rx_leftover_len) {
+ int remaining_bytes;
+
+ cmd = (struct kvaser_cmd *)card_data->usb_rx_leftover;
+
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+
+ remaining_bytes = min_t(unsigned int, len,
+ cmd_len - usb_rx_leftover_len);
+ /* Make sure we do not overflow usb_rx_leftover */
+ if (remaining_bytes + usb_rx_leftover_len >
+ KVASER_USB_HYDRA_MAX_CMD_LEN) {
+ dev_err(&dev->intf->dev, "Format error\n");
+ spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
+ return;
+ }
+
+ memcpy(card_data->usb_rx_leftover + usb_rx_leftover_len, buf,
+ remaining_bytes);
+ pos += remaining_bytes;
+
+ if (remaining_bytes + usb_rx_leftover_len == cmd_len) {
+ kvaser_usb_hydra_handle_cmd(dev, cmd);
+ usb_rx_leftover_len = 0;
+ } else {
+ /* Command still not complete */
+ usb_rx_leftover_len += remaining_bytes;
+ }
+ card_data->usb_rx_leftover_len = usb_rx_leftover_len;
+ }
+ spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
+
+ while (pos < len) {
+ cmd = buf + pos;
+
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+
+ if (pos + cmd_len > len) {
+ /* We got first part of a command */
+ int leftover_bytes;
+
+ leftover_bytes = len - pos;
+ /* Make sure we do not overflow usb_rx_leftover */
+ if (leftover_bytes > KVASER_USB_HYDRA_MAX_CMD_LEN) {
+ dev_err(&dev->intf->dev, "Format error\n");
+ return;
+ }
+ spin_lock_irqsave(usb_rx_leftover_lock, irq_flags);
+ memcpy(card_data->usb_rx_leftover, buf + pos,
+ leftover_bytes);
+ card_data->usb_rx_leftover_len = leftover_bytes;
+ spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
+ break;
+ }
+
+ kvaser_usb_hydra_handle_cmd(dev, cmd);
+ pos += cmd_len;
+ }
+}
+
+static void *
+kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ void *buf;
+
+ if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD)
+ buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len,
+ cmd_len, transid);
+ else
+ buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len,
+ cmd_len, transid);
+
+ return buf;
+}
+
+const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
+ .dev_set_mode = kvaser_usb_hydra_set_mode,
+ .dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
+ .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
+ .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
+ .dev_init_card = kvaser_usb_hydra_init_card,
+ .dev_get_software_info = kvaser_usb_hydra_get_software_info,
+ .dev_get_software_details = kvaser_usb_hydra_get_software_details,
+ .dev_get_card_info = kvaser_usb_hydra_get_card_info,
+ .dev_get_capabilities = kvaser_usb_hydra_get_capabilities,
+ .dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode,
+ .dev_start_chip = kvaser_usb_hydra_start_chip,
+ .dev_stop_chip = kvaser_usb_hydra_stop_chip,
+ .dev_reset_chip = NULL,
+ .dev_flush_queue = kvaser_usb_hydra_flush_queue,
+ .dev_read_bulk_callback = kvaser_usb_hydra_read_bulk_callback,
+ .dev_frame_to_cmd = kvaser_usb_hydra_frame_to_cmd,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
+ .clock = {
+ .freq = 80000000,
+ },
+ .timestamp_freq = 80,
+ .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
+ .data_bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
+ .clock = {
+ .freq = 24000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+};
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
new file mode 100644
index 000000000000..07d2f3aa2c02
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -0,0 +1,1358 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
+ *
+ * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/netlink.h>
+
+#include "kvaser_usb.h"
+
+/* Forward declaration */
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
+
+#define CAN_USB_CLOCK 8000000
+#define MAX_USBCAN_NET_DEVICES 2
+
+/* Command header size */
+#define CMD_HEADER_LEN 2
+
+/* Kvaser CAN message flags */
+#define MSG_FLAG_ERROR_FRAME BIT(0)
+#define MSG_FLAG_OVERRUN BIT(1)
+#define MSG_FLAG_NERR BIT(2)
+#define MSG_FLAG_WAKEUP BIT(3)
+#define MSG_FLAG_REMOTE_FRAME BIT(4)
+#define MSG_FLAG_RESERVED BIT(5)
+#define MSG_FLAG_TX_ACK BIT(6)
+#define MSG_FLAG_TX_REQUEST BIT(7)
+
+/* CAN states (M16C CxSTRH register) */
+#define M16C_STATE_BUS_RESET BIT(0)
+#define M16C_STATE_BUS_ERROR BIT(4)
+#define M16C_STATE_BUS_PASSIVE BIT(5)
+#define M16C_STATE_BUS_OFF BIT(6)
+
+/* Leaf/usbcan command ids */
+#define CMD_RX_STD_MESSAGE 12
+#define CMD_TX_STD_MESSAGE 13
+#define CMD_RX_EXT_MESSAGE 14
+#define CMD_TX_EXT_MESSAGE 15
+#define CMD_SET_BUS_PARAMS 16
+#define CMD_CHIP_STATE_EVENT 20
+#define CMD_SET_CTRL_MODE 21
+#define CMD_RESET_CHIP 24
+#define CMD_START_CHIP 26
+#define CMD_START_CHIP_REPLY 27
+#define CMD_STOP_CHIP 28
+#define CMD_STOP_CHIP_REPLY 29
+
+#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
+
+#define CMD_GET_CARD_INFO 34
+#define CMD_GET_CARD_INFO_REPLY 35
+#define CMD_GET_SOFTWARE_INFO 38
+#define CMD_GET_SOFTWARE_INFO_REPLY 39
+#define CMD_FLUSH_QUEUE 48
+#define CMD_TX_ACKNOWLEDGE 50
+#define CMD_CAN_ERROR_EVENT 51
+#define CMD_FLUSH_QUEUE_REPLY 68
+
+#define CMD_LEAF_LOG_MESSAGE 106
+
+/* error factors */
+#define M16C_EF_ACKE BIT(0)
+#define M16C_EF_CRCE BIT(1)
+#define M16C_EF_FORME BIT(2)
+#define M16C_EF_STFE BIT(3)
+#define M16C_EF_BITE0 BIT(4)
+#define M16C_EF_BITE1 BIT(5)
+#define M16C_EF_RCVE BIT(6)
+#define M16C_EF_TRE BIT(7)
+
+/* Only Leaf-based devices can report M16C error factors,
+ * thus define our own error status flags for USBCANII
+ */
+#define USBCAN_ERROR_STATE_NONE 0
+#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
+#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
+#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
+
+/* bittiming parameters */
+#define KVASER_USB_TSEG1_MIN 1
+#define KVASER_USB_TSEG1_MAX 16
+#define KVASER_USB_TSEG2_MIN 1
+#define KVASER_USB_TSEG2_MAX 8
+#define KVASER_USB_SJW_MAX 4
+#define KVASER_USB_BRP_MIN 1
+#define KVASER_USB_BRP_MAX 64
+#define KVASER_USB_BRP_INC 1
+
+/* ctrl modes */
+#define KVASER_CTRL_MODE_NORMAL 1
+#define KVASER_CTRL_MODE_SILENT 2
+#define KVASER_CTRL_MODE_SELFRECEPTION 3
+#define KVASER_CTRL_MODE_OFF 4
+
+/* Extended CAN identifier flag */
+#define KVASER_EXTENDED_FRAME BIT(31)
+
+struct kvaser_cmd_simple {
+ u8 tid;
+ u8 channel;
+} __packed;
+
+struct kvaser_cmd_cardinfo {
+ u8 tid;
+ u8 nchannels;
+ __le32 serial_number;
+ __le32 padding0;
+ __le32 clock_resolution;
+ __le32 mfgdate;
+ u8 ean[8];
+ u8 hw_revision;
+ union {
+ struct {
+ u8 usb_hs_mode;
+ } __packed leaf1;
+ struct {
+ u8 padding;
+ } __packed usbcan1;
+ } __packed;
+ __le16 padding1;
+} __packed;
+
+struct leaf_cmd_softinfo {
+ u8 tid;
+ u8 padding0;
+ __le32 sw_options;
+ __le32 fw_version;
+ __le16 max_outstanding_tx;
+ __le16 padding1[9];
+} __packed;
+
+struct usbcan_cmd_softinfo {
+ u8 tid;
+ u8 fw_name[5];
+ __le16 max_outstanding_tx;
+ u8 padding[6];
+ __le32 fw_version;
+ __le16 checksum;
+ __le16 sw_options;
+} __packed;
+
+struct kvaser_cmd_busparams {
+ u8 tid;
+ u8 channel;
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 no_samp;
+} __packed;
+
+struct kvaser_cmd_tx_can {
+ u8 channel;
+ u8 tid;
+ u8 data[14];
+ union {
+ struct {
+ u8 padding;
+ u8 flags;
+ } __packed leaf;
+ struct {
+ u8 flags;
+ u8 padding;
+ } __packed usbcan;
+ } __packed;
+} __packed;
+
+struct kvaser_cmd_rx_can_header {
+ u8 channel;
+ u8 flag;
+} __packed;
+
+struct leaf_cmd_rx_can {
+ u8 channel;
+ u8 flag;
+
+ __le16 time[3];
+ u8 data[14];
+} __packed;
+
+struct usbcan_cmd_rx_can {
+ u8 channel;
+ u8 flag;
+
+ u8 data[14];
+ __le16 time;
+} __packed;
+
+struct leaf_cmd_chip_state_event {
+ u8 tid;
+ u8 channel;
+
+ __le16 time[3];
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+
+ u8 status;
+ u8 padding[3];
+} __packed;
+
+struct usbcan_cmd_chip_state_event {
+ u8 tid;
+ u8 channel;
+
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+ __le16 time;
+
+ u8 status;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_cmd_tx_acknowledge_header {
+ u8 channel;
+ u8 tid;
+} __packed;
+
+struct leaf_cmd_error_event {
+ u8 tid;
+ u8 flags;
+ __le16 time[3];
+ u8 channel;
+ u8 padding;
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+ u8 status;
+ u8 error_factor;
+} __packed;
+
+struct usbcan_cmd_error_event {
+ u8 tid;
+ u8 padding;
+ u8 tx_errors_count_ch0;
+ u8 rx_errors_count_ch0;
+ u8 tx_errors_count_ch1;
+ u8 rx_errors_count_ch1;
+ u8 status_ch0;
+ u8 status_ch1;
+ __le16 time;
+} __packed;
+
+struct kvaser_cmd_ctrl_mode {
+ u8 tid;
+ u8 channel;
+ u8 ctrl_mode;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_cmd_flush_queue {
+ u8 tid;
+ u8 channel;
+ u8 flags;
+ u8 padding[3];
+} __packed;
+
+struct leaf_cmd_log_message {
+ u8 channel;
+ u8 flags;
+ __le16 time[3];
+ u8 dlc;
+ u8 time_offset;
+ __le32 id;
+ u8 data[8];
+} __packed;
+
+struct kvaser_cmd {
+ u8 len;
+ u8 id;
+ union {
+ struct kvaser_cmd_simple simple;
+ struct kvaser_cmd_cardinfo cardinfo;
+ struct kvaser_cmd_busparams busparams;
+
+ struct kvaser_cmd_rx_can_header rx_can_header;
+ struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header;
+
+ union {
+ struct leaf_cmd_softinfo softinfo;
+ struct leaf_cmd_rx_can rx_can;
+ struct leaf_cmd_chip_state_event chip_state_event;
+ struct leaf_cmd_error_event error_event;
+ struct leaf_cmd_log_message log_message;
+ } __packed leaf;
+
+ union {
+ struct usbcan_cmd_softinfo softinfo;
+ struct usbcan_cmd_rx_can rx_can;
+ struct usbcan_cmd_chip_state_event chip_state_event;
+ struct usbcan_cmd_error_event error_event;
+ } __packed usbcan;
+
+ struct kvaser_cmd_tx_can tx_can;
+ struct kvaser_cmd_ctrl_mode ctrl_mode;
+ struct kvaser_cmd_flush_queue flush_queue;
+ } u;
+} __packed;
+
+/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
+ * handling. Some discrepancies between the two families exist:
+ *
+ * - USBCAN firmware does not report M16C "error factors"
+ * - USBCAN controllers has difficulties reporting if the raised error
+ * event is for ch0 or ch1. They leave such arbitration to the OS
+ * driver by letting it compare error counters with previous values
+ * and decide the error event's channel. Thus for USBCAN, the channel
+ * field is only advisory.
+ */
+struct kvaser_usb_err_summary {
+ u8 channel, status, txerr, rxerr;
+ union {
+ struct {
+ u8 error_factor;
+ } leaf;
+ struct {
+ u8 other_ch_status;
+ u8 error_state;
+ } usbcan;
+ };
+};
+
+static void *
+kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ u8 *cmd_tx_can_flags = NULL; /* GCC */
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ *frame_len = cf->can_dlc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (cmd) {
+ cmd->u.tx_can.tid = transid & 0xff;
+ cmd->len = *cmd_len = CMD_HEADER_LEN +
+ sizeof(struct kvaser_cmd_tx_can);
+ cmd->u.tx_can.channel = priv->channel;
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
+ break;
+ case KVASER_USBCAN:
+ cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags;
+ break;
+ }
+
+ *cmd_tx_can_flags = 0;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ cmd->id = CMD_TX_EXT_MESSAGE;
+ cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f;
+ cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f;
+ cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f;
+ cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff;
+ cmd->u.tx_can.data[4] = cf->can_id & 0x3f;
+ } else {
+ cmd->id = CMD_TX_STD_MESSAGE;
+ cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f;
+ cmd->u.tx_can.data[1] = cf->can_id & 0x3f;
+ }
+
+ cmd->u.tx_can.data[5] = cf->can_dlc;
+ memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc);
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
+ }
+ return cmd;
+}
+
+static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
+ struct kvaser_cmd *cmd)
+{
+ struct kvaser_cmd *tmp;
+ void *buf;
+ int actual_len;
+ int err;
+ int pos;
+ unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
+
+ buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ do {
+ err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE,
+ &actual_len);
+ if (err < 0)
+ goto end;
+
+ pos = 0;
+ while (pos <= actual_len - CMD_HEADER_LEN) {
+ tmp = buf + pos;
+
+ /* Handle commands crossing the USB endpoint max packet
+ * size boundary. Check kvaser_usb_read_bulk_callback()
+ * for further details.
+ */
+ if (tmp->len == 0) {
+ pos = round_up(pos,
+ le16_to_cpu
+ (dev->bulk_in->wMaxPacketSize));
+ continue;
+ }
+
+ if (pos + tmp->len > actual_len) {
+ dev_err_ratelimited(&dev->intf->dev,
+ "Format error\n");
+ break;
+ }
+
+ if (tmp->id == id) {
+ memcpy(cmd, tmp, tmp->len);
+ goto end;
+ }
+
+ pos += tmp->len;
+ }
+ } while (time_before(jiffies, to));
+
+ err = -EINVAL;
+
+end:
+ kfree(buf);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
+ u8 cmd_id, int channel)
+{
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = cmd_id;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
+ cmd->u.simple.channel = channel;
+ cmd->u.simple.tid = 0xff;
+
+ rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0);
+ if (err)
+ return err;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd);
+ if (err)
+ return err;
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
+ break;
+ case KVASER_USBCAN:
+ dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_software_info(struct kvaser_usb *dev)
+{
+ int err;
+ int retry = 3;
+
+ /* On some x86 laptops, plugging a Kvaser device again after
+ * an unplug makes the firmware always ignore the very first
+ * command. For such a case, provide some room for retries
+ * instead of completely exiting the driver.
+ */
+ do {
+ err = kvaser_usb_leaf_get_software_info_inner(dev);
+ } while (--retry && err == -ETIMEDOUT);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0);
+ if (err)
+ return err;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd);
+ if (err)
+ return err;
+
+ dev->nchannels = cmd.u.cardinfo.nchannels;
+ if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
+ (dev->card_data.leaf.family == KVASER_USBCAN &&
+ dev->nchannels > MAX_USBCAN_NET_DEVICES))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct net_device_stats *stats;
+ struct kvaser_usb_tx_urb_context *context;
+ struct kvaser_usb_net_priv *priv;
+ unsigned long flags;
+ u8 channel, tid;
+
+ channel = cmd->u.tx_acknowledge_header.channel;
+ tid = cmd->u.tx_acknowledge_header.tid;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ if (!netif_device_present(priv->netdev))
+ return;
+
+ stats = &priv->netdev->stats;
+
+ context = &priv->tx_contexts[tid % dev->max_tx_urbs];
+
+ /* Sometimes the state change doesn't come after a bus-off event */
+ if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ } else {
+ netdev_err(priv->netdev,
+ "No memory left for err_skb\n");
+ }
+
+ priv->can.can_stats.restarts++;
+ netif_carrier_on(priv->netdev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += context->dlc;
+
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
+ can_get_echo_skb(priv->netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(priv->netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+}
+
+static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ u8 cmd_id)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
+ cmd->id = cmd_id;
+ cmd->u.simple.channel = priv->channel;
+
+ err = kvaser_usb_send_cmd_async(priv, cmd, cmd->len);
+ if (err)
+ kfree(cmd);
+
+ return err;
+}
+
+static void
+kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_usb_err_summary *es,
+ struct can_frame *cf)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device_stats *stats = &priv->netdev->stats;
+ enum can_state cur_state, new_state, tx_state, rx_state;
+
+ netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
+
+ new_state = priv->can.state;
+ cur_state = priv->can.state;
+
+ if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (es->status & M16C_STATE_BUS_PASSIVE) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (es->status & M16C_STATE_BUS_ERROR) {
+ /* Guard against spurious error events after a busoff */
+ if (cur_state < CAN_STATE_BUS_OFF) {
+ if (es->txerr >= 128 || es->rxerr >= 128)
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (es->txerr >= 96 || es->rxerr >= 96)
+ new_state = CAN_STATE_ERROR_WARNING;
+ else if (cur_state > CAN_STATE_ERROR_ACTIVE)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+
+ if (!es->status)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (new_state != cur_state) {
+ tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
+ rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
+
+ can_change_state(priv->netdev, cf, tx_state, rx_state);
+ }
+
+ if (priv->can.restart_ms &&
+ cur_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ priv->can.can_stats.restarts++;
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ if (es->leaf.error_factor) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ }
+ break;
+ case KVASER_USBCAN:
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
+ stats->tx_errors++;
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
+ stats->rx_errors++;
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
+ priv->can.can_stats.bus_error++;
+ break;
+ }
+
+ priv->bec.txerr = es->txerr;
+ priv->bec.rxerr = es->rxerr;
+}
+
+static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_usb_err_summary *es)
+{
+ struct can_frame *cf;
+ struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
+ .can_dlc = CAN_ERR_DLC };
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ struct kvaser_usb_net_priv *priv;
+ enum can_state old_state, new_state;
+
+ if (es->channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", es->channel);
+ return;
+ }
+
+ priv = dev->nets[es->channel];
+ stats = &priv->netdev->stats;
+
+ /* Update all of the CAN interface's state and error counters before
+ * trying any memory allocation that can actually fail with -ENOMEM.
+ *
+ * We send a temporary stack-allocated error CAN frame to
+ * can_change_state() for the very same reason.
+ *
+ * TODO: Split can_change_state() responsibility between updating the
+ * CAN interface's state and counters, and the setting up of CAN error
+ * frame ID and data to userspace. Remove stack allocation afterwards.
+ */
+ old_state = priv->can.state;
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ new_state = priv->can.state;
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+ memcpy(cf, &tmp_cf, sizeof(*cf));
+
+ if (new_state != old_state) {
+ if (es->status &
+ (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_leaf_simple_cmd_async(priv,
+ CMD_STOP_CHIP);
+ netif_carrier_off(priv->netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_carrier_on(priv->netdev);
+ }
+ }
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ if (es->leaf.error_factor) {
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+
+ if (es->leaf.error_factor & M16C_EF_ACKE)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (es->leaf.error_factor & M16C_EF_CRCE)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (es->leaf.error_factor & M16C_EF_FORME)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (es->leaf.error_factor & M16C_EF_STFE)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (es->leaf.error_factor & M16C_EF_BITE0)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (es->leaf.error_factor & M16C_EF_BITE1)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (es->leaf.error_factor & M16C_EF_TRE)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ break;
+ case KVASER_USBCAN:
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
+ cf->can_id |= CAN_ERR_BUSERROR;
+ break;
+ }
+
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+/* For USBCAN, report error to userspace if the channels's errors counter
+ * has changed, or we're the only channel seeing a bus error state.
+ */
+static void
+kvaser_usb_leaf_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
+ struct kvaser_usb_err_summary *es)
+{
+ struct kvaser_usb_net_priv *priv;
+ unsigned int channel;
+ bool report_error;
+
+ channel = es->channel;
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ report_error = false;
+
+ if (es->txerr != priv->bec.txerr) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
+ report_error = true;
+ }
+ if (es->rxerr != priv->bec.rxerr) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
+ report_error = true;
+ }
+ if ((es->status & M16C_STATE_BUS_ERROR) &&
+ !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
+ report_error = true;
+ }
+
+ if (report_error)
+ kvaser_usb_leaf_rx_error(dev, es);
+}
+
+static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_err_summary es = { };
+
+ switch (cmd->id) {
+ /* Sometimes errors are sent as unsolicited chip state events */
+ case CMD_CHIP_STATE_EVENT:
+ es.channel = cmd->u.usbcan.chip_state_event.channel;
+ es.status = cmd->u.usbcan.chip_state_event.status;
+ es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count;
+ es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ break;
+
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = 0;
+ es.status = cmd->u.usbcan.error_event.status_ch0;
+ es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
+ es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
+ es.usbcan.other_ch_status =
+ cmd->u.usbcan.error_event.status_ch1;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+
+ /* The USBCAN firmware supports up to 2 channels.
+ * Now that ch0 was checked, check if ch1 has any errors.
+ */
+ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
+ es.channel = 1;
+ es.status = cmd->u.usbcan.error_event.status_ch1;
+ es.txerr =
+ cmd->u.usbcan.error_event.tx_errors_count_ch1;
+ es.rxerr =
+ cmd->u.usbcan.error_event.rx_errors_count_ch1;
+ es.usbcan.other_ch_status =
+ cmd->u.usbcan.error_event.status_ch0;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ }
+ break;
+
+ default:
+ dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
+ }
+}
+
+static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_err_summary es = { };
+
+ switch (cmd->id) {
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = cmd->u.leaf.error_event.channel;
+ es.status = cmd->u.leaf.error_event.status;
+ es.txerr = cmd->u.leaf.error_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
+ es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
+ break;
+ case CMD_LEAF_LOG_MESSAGE:
+ es.channel = cmd->u.leaf.log_message.channel;
+ es.status = cmd->u.leaf.log_message.data[0];
+ es.txerr = cmd->u.leaf.log_message.data[2];
+ es.rxerr = cmd->u.leaf.log_message.data[3];
+ es.leaf.error_factor = cmd->u.leaf.log_message.data[1];
+ break;
+ case CMD_CHIP_STATE_EVENT:
+ es.channel = cmd->u.leaf.chip_state_event.channel;
+ es.status = cmd->u.leaf.chip_state_event.status;
+ es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count;
+ es.leaf.error_factor = 0;
+ break;
+ default:
+ dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
+ return;
+ }
+
+ kvaser_usb_leaf_rx_error(dev, &es);
+}
+
+static void kvaser_usb_leaf_rx_can_err(const struct kvaser_usb_net_priv *priv,
+ const struct kvaser_cmd *cmd)
+{
+ if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR)) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
+ cmd->u.rx_can_header.flag);
+
+ stats->rx_errors++;
+ return;
+ }
+
+ if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN)
+ kvaser_usb_can_rx_over_error(priv->netdev);
+}
+
+static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ u8 channel = cmd->u.rx_can_header.channel;
+ const u8 *rx_data = NULL; /* GCC */
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ stats = &priv->netdev->stats;
+
+ if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
+ (dev->card_data.leaf.family == KVASER_LEAF &&
+ cmd->id == CMD_LEAF_LOG_MESSAGE)) {
+ kvaser_usb_leaf_leaf_rx_error(dev, cmd);
+ return;
+ } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR |
+ MSG_FLAG_OVERRUN)) {
+ kvaser_usb_leaf_rx_can_err(priv, cmd);
+ return;
+ } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
+ netdev_warn(priv->netdev,
+ "Unhandled frame (flags: 0x%02x)\n",
+ cmd->u.rx_can_header.flag);
+ return;
+ }
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ rx_data = cmd->u.leaf.rx_can.data;
+ break;
+ case KVASER_USBCAN:
+ rx_data = cmd->u.usbcan.rx_can.data;
+ break;
+ }
+
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+ CMD_LEAF_LOG_MESSAGE) {
+ cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
+ if (cf->can_id & KVASER_EXTENDED_FRAME)
+ cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ cf->can_id &= CAN_SFF_MASK;
+
+ cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc);
+
+ if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &cmd->u.leaf.log_message.data,
+ cf->can_dlc);
+ } else {
+ cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f);
+
+ if (cmd->id == CMD_RX_EXT_MESSAGE) {
+ cf->can_id <<= 18;
+ cf->can_id |= ((rx_data[2] & 0x0f) << 14) |
+ ((rx_data[3] & 0xff) << 6) |
+ (rx_data[4] & 0x3f);
+ cf->can_id |= CAN_EFF_FLAG;
+ }
+
+ cf->can_dlc = get_can_dlc(rx_data[5]);
+
+ if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &rx_data[6], cf->can_dlc);
+ }
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.simple.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ if (completion_done(&priv->start_comp) &&
+ netif_queue_stopped(priv->netdev)) {
+ netif_wake_queue(priv->netdev);
+ } else {
+ netif_start_queue(priv->netdev);
+ complete(&priv->start_comp);
+ }
+}
+
+static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.simple.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ complete(&priv->stop_comp);
+}
+
+static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ switch (cmd->id) {
+ case CMD_START_CHIP_REPLY:
+ kvaser_usb_leaf_start_chip_reply(dev, cmd);
+ break;
+
+ case CMD_STOP_CHIP_REPLY:
+ kvaser_usb_leaf_stop_chip_reply(dev, cmd);
+ break;
+
+ case CMD_RX_STD_MESSAGE:
+ case CMD_RX_EXT_MESSAGE:
+ kvaser_usb_leaf_rx_can_msg(dev, cmd);
+ break;
+
+ case CMD_LEAF_LOG_MESSAGE:
+ if (dev->card_data.leaf.family != KVASER_LEAF)
+ goto warn;
+ kvaser_usb_leaf_rx_can_msg(dev, cmd);
+ break;
+
+ case CMD_CHIP_STATE_EVENT:
+ case CMD_CAN_ERROR_EVENT:
+ if (dev->card_data.leaf.family == KVASER_LEAF)
+ kvaser_usb_leaf_leaf_rx_error(dev, cmd);
+ else
+ kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
+ break;
+
+ case CMD_TX_ACKNOWLEDGE:
+ kvaser_usb_leaf_tx_acknowledge(dev, cmd);
+ break;
+
+ /* Ignored commands */
+ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
+ if (dev->card_data.leaf.family != KVASER_USBCAN)
+ goto warn;
+ break;
+
+ case CMD_FLUSH_QUEUE_REPLY:
+ if (dev->card_data.leaf.family != KVASER_LEAF)
+ goto warn;
+ break;
+
+ default:
+warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id);
+ break;
+ }
+}
+
+static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
+ void *buf, int len)
+{
+ struct kvaser_cmd *cmd;
+ int pos = 0;
+
+ while (pos <= len - CMD_HEADER_LEN) {
+ cmd = buf + pos;
+
+ /* The Kvaser firmware can only read and write commands that
+ * does not cross the USB's endpoint wMaxPacketSize boundary.
+ * If a follow-up command crosses such boundary, firmware puts
+ * a placeholder zero-length command in its place then aligns
+ * the real command to the next max packet size.
+ *
+ * Handle such cases or we're going to miss a significant
+ * number of events in case of a heavy rx load on the bus.
+ */
+ if (cmd->len == 0) {
+ pos = round_up(pos, le16_to_cpu
+ (dev->bulk_in->wMaxPacketSize));
+ continue;
+ }
+
+ if (pos + cmd->len > len) {
+ dev_err_ratelimited(&dev->intf->dev, "Format error\n");
+ break;
+ }
+
+ kvaser_usb_leaf_handle_command(dev, cmd);
+ pos += cmd->len;
+ }
+}
+
+static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_SET_CTRL_MODE;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode);
+ cmd->u.ctrl_mode.tid = 0xff;
+ cmd->u.ctrl_mode.channel = priv->channel;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
+ else
+ cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
+
+ rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->start_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->start_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->stop_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->stop_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_reset_chip(struct kvaser_usb *dev, int channel)
+{
+ return kvaser_usb_leaf_send_simple_cmd(dev, CMD_RESET_CHIP, channel);
+}
+
+static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_FLUSH_QUEUE;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue);
+ cmd->u.flush_queue.channel = priv->channel;
+ cmd->u.flush_queue.flags = 0x00;
+
+ rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+
+ dev->cfg = &kvaser_usb_leaf_dev_cfg;
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+ return 0;
+}
+
+static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+ .name = "kvaser_usb",
+ .tseg1_min = KVASER_USB_TSEG1_MIN,
+ .tseg1_max = KVASER_USB_TSEG1_MAX,
+ .tseg2_min = KVASER_USB_TSEG2_MIN,
+ .tseg2_max = KVASER_USB_TSEG2_MAX,
+ .sjw_max = KVASER_USB_SJW_MAX,
+ .brp_min = KVASER_USB_BRP_MIN,
+ .brp_max = KVASER_USB_BRP_MAX,
+ .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_SET_BUS_PARAMS;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
+ cmd->u.busparams.channel = priv->channel;
+ cmd->u.busparams.tid = 0xff;
+ cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
+ cmd->u.busparams.sjw = bt->sjw;
+ cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
+ cmd->u.busparams.tseg2 = bt->phase_seg2;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ cmd->u.busparams.no_samp = 3;
+ else
+ cmd->u.busparams.no_samp = 1;
+
+ rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
+ enum can_mode mode)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
+{
+ const struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ int i;
+
+ iface_desc = &dev->intf->altsetting[0];
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint))
+ dev->bulk_in = endpoint;
+
+ if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint))
+ dev->bulk_out = endpoint;
+
+ /* use first bulk endpoint for in and out */
+ if (dev->bulk_in && dev->bulk_out)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
+ .dev_set_mode = kvaser_usb_leaf_set_mode,
+ .dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
+ .dev_set_data_bittiming = NULL,
+ .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
+ .dev_init_card = kvaser_usb_leaf_init_card,
+ .dev_get_software_info = kvaser_usb_leaf_get_software_info,
+ .dev_get_software_details = NULL,
+ .dev_get_card_info = kvaser_usb_leaf_get_card_info,
+ .dev_get_capabilities = NULL,
+ .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
+ .dev_start_chip = kvaser_usb_leaf_start_chip,
+ .dev_stop_chip = kvaser_usb_leaf_stop_chip,
+ .dev_reset_chip = kvaser_usb_leaf_reset_chip,
+ .dev_flush_queue = kvaser_usb_leaf_flush_queue,
+ .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
+ .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
+ .clock = {
+ .freq = CAN_USB_CLOCK,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index f530a80f5051..13238a72a338 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -423,6 +423,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_ERROR_WARNING;
break;
}
+ /* else: fall through */
case CAN_STATE_ERROR_WARNING:
if (n & PCAN_USB_ERROR_BUS_HEAVY) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 50e911428638..611f9d31be5d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -353,6 +353,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
default:
netdev_warn(netdev, "tx urb submitting failed err=%d\n",
err);
+ /* fall through */
case -ENOENT:
/* cable unplugged */
stats->tx_dropped++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 0105fbfea273..d516def846ab 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -141,8 +141,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
switch (id) {
case PCAN_USBPRO_TXMSG8:
i += 4;
+ /* fall through */
case PCAN_USBPRO_TXMSG4:
i += 4;
+ /* fall through */
case PCAN_USBPRO_TXMSG0:
*pc++ = va_arg(ap, int);
*pc++ = va_arg(ap, int);
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
new file mode 100644
index 000000000000..0678a38b1af4
--- /dev/null
+++ b/drivers/net/can/usb/ucan.c
@@ -0,0 +1,1613 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Driver for Theobroma Systems UCAN devices, Protocol Version 3
+ *
+ * Copyright (C) 2018 Theobroma Systems Design und Consulting GmbH
+ *
+ *
+ * General Description:
+ *
+ * The USB Device uses three Endpoints:
+ *
+ * CONTROL Endpoint: Is used the setup the device (start, stop,
+ * info, configure).
+ *
+ * IN Endpoint: The device sends CAN Frame Messages and Device
+ * Information using the IN endpoint.
+ *
+ * OUT Endpoint: The driver sends configuration requests, and CAN
+ * Frames on the out endpoint.
+ *
+ * Error Handling:
+ *
+ * If error reporting is turned on the device encodes error into CAN
+ * error frames (see uapi/linux/can/error.h) and sends it using the
+ * IN Endpoint. The driver updates statistics and forward it.
+ */
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define UCAN_DRIVER_NAME "ucan"
+#define UCAN_MAX_RX_URBS 8
+/* the CAN controller needs a while to enable/disable the bus */
+#define UCAN_USB_CTL_PIPE_TIMEOUT 1000
+/* this driver currently supports protocol version 3 only */
+#define UCAN_PROTOCOL_VERSION_MIN 3
+#define UCAN_PROTOCOL_VERSION_MAX 3
+
+/* UCAN Message Definitions
+ * ------------------------
+ *
+ * ucan_message_out_t and ucan_message_in_t define the messages
+ * transmitted on the OUT and IN endpoint.
+ *
+ * Multibyte fields are transmitted with little endianness
+ *
+ * INTR Endpoint: a single uint32_t storing the current space in the fifo
+ *
+ * OUT Endpoint: single message of type ucan_message_out_t is
+ * transmitted on the out endpoint
+ *
+ * IN Endpoint: multiple messages ucan_message_in_t concateted in
+ * the following way:
+ *
+ * m[n].len <=> the length if message n(including the header in bytes)
+ * m[n] is is aligned to a 4 byte boundary, hence
+ * offset(m[0]) := 0;
+ * offset(m[n+1]) := offset(m[n]) + (m[n].len + 3) & 3
+ *
+ * this implies that
+ * offset(m[n]) % 4 <=> 0
+ */
+
+/* Device Global Commands */
+enum {
+ UCAN_DEVICE_GET_FW_STRING = 0,
+};
+
+/* UCAN Commands */
+enum {
+ /* start the can transceiver - val defines the operation mode */
+ UCAN_COMMAND_START = 0,
+ /* cancel pending transmissions and stop the can transceiver */
+ UCAN_COMMAND_STOP = 1,
+ /* send can transceiver into low-power sleep mode */
+ UCAN_COMMAND_SLEEP = 2,
+ /* wake up can transceiver from low-power sleep mode */
+ UCAN_COMMAND_WAKEUP = 3,
+ /* reset the can transceiver */
+ UCAN_COMMAND_RESET = 4,
+ /* get piece of info from the can transceiver - subcmd defines what
+ * piece
+ */
+ UCAN_COMMAND_GET = 5,
+ /* clear or disable hardware filter - subcmd defines which of the two */
+ UCAN_COMMAND_FILTER = 6,
+ /* Setup bittiming */
+ UCAN_COMMAND_SET_BITTIMING = 7,
+ /* recover from bus-off state */
+ UCAN_COMMAND_RESTART = 8,
+};
+
+/* UCAN_COMMAND_START and UCAN_COMMAND_GET_INFO operation modes (bitmap).
+ * Undefined bits must be set to 0.
+ */
+enum {
+ UCAN_MODE_LOOPBACK = BIT(0),
+ UCAN_MODE_SILENT = BIT(1),
+ UCAN_MODE_3_SAMPLES = BIT(2),
+ UCAN_MODE_ONE_SHOT = BIT(3),
+ UCAN_MODE_BERR_REPORT = BIT(4),
+};
+
+/* UCAN_COMMAND_GET subcommands */
+enum {
+ UCAN_COMMAND_GET_INFO = 0,
+ UCAN_COMMAND_GET_PROTOCOL_VERSION = 1,
+};
+
+/* UCAN_COMMAND_FILTER subcommands */
+enum {
+ UCAN_FILTER_CLEAR = 0,
+ UCAN_FILTER_DISABLE = 1,
+ UCAN_FILTER_ENABLE = 2,
+};
+
+/* OUT endpoint message types */
+enum {
+ UCAN_OUT_TX = 2, /* transmit a CAN frame */
+};
+
+/* IN endpoint message types */
+enum {
+ UCAN_IN_TX_COMPLETE = 1, /* CAN frame transmission completed */
+ UCAN_IN_RX = 2, /* CAN frame received */
+};
+
+struct ucan_ctl_cmd_start {
+ __le16 mode; /* OR-ing any of UCAN_MODE_* */
+} __packed;
+
+struct ucan_ctl_cmd_set_bittiming {
+ __le32 tq; /* Time quanta (TQ) in nanoseconds */
+ __le16 brp; /* TQ Prescaler */
+ __le16 sample_point; /* Samplepoint on tenth percent */
+ u8 prop_seg; /* Propagation segment in TQs */
+ u8 phase_seg1; /* Phase buffer segment 1 in TQs */
+ u8 phase_seg2; /* Phase buffer segment 2 in TQs */
+ u8 sjw; /* Synchronisation jump width in TQs */
+} __packed;
+
+struct ucan_ctl_cmd_device_info {
+ __le32 freq; /* Clock Frequency for tq generation */
+ u8 tx_fifo; /* Size of the transmission fifo */
+ u8 sjw_max; /* can_bittiming fields... */
+ u8 tseg1_min;
+ u8 tseg1_max;
+ u8 tseg2_min;
+ u8 tseg2_max;
+ __le16 brp_inc;
+ __le32 brp_min;
+ __le32 brp_max; /* ...can_bittiming fields */
+ __le16 ctrlmodes; /* supported control modes */
+ __le16 hwfilter; /* Number of HW filter banks */
+ __le16 rxmboxes; /* Number of receive Mailboxes */
+} __packed;
+
+struct ucan_ctl_cmd_get_protocol_version {
+ __le32 version;
+} __packed;
+
+union ucan_ctl_payload {
+ /* Setup Bittiming
+ * bmRequest == UCAN_COMMAND_START
+ */
+ struct ucan_ctl_cmd_start cmd_start;
+ /* Setup Bittiming
+ * bmRequest == UCAN_COMMAND_SET_BITTIMING
+ */
+ struct ucan_ctl_cmd_set_bittiming cmd_set_bittiming;
+ /* Get Device Information
+ * bmRequest == UCAN_COMMAND_GET; wValue = UCAN_COMMAND_GET_INFO
+ */
+ struct ucan_ctl_cmd_device_info cmd_get_device_info;
+ /* Get Protocol Version
+ * bmRequest == UCAN_COMMAND_GET;
+ * wValue = UCAN_COMMAND_GET_PROTOCOL_VERSION
+ */
+ struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
+
+ u8 raw[128];
+} __packed;
+
+enum {
+ UCAN_TX_COMPLETE_SUCCESS = BIT(0),
+};
+
+/* Transmission Complete within ucan_message_in */
+struct ucan_tx_complete_entry_t {
+ u8 echo_index;
+ u8 flags;
+} __packed __aligned(0x2);
+
+/* CAN Data message format within ucan_message_in/out */
+struct ucan_can_msg {
+ /* note DLC is computed by
+ * msg.len - sizeof (msg.len)
+ * - sizeof (msg.type)
+ * - sizeof (msg.can_msg.id)
+ */
+ __le32 id;
+
+ union {
+ u8 data[CAN_MAX_DLEN]; /* Data of CAN frames */
+ u8 dlc; /* RTR dlc */
+ };
+} __packed;
+
+/* OUT Endpoint, outbound messages */
+struct ucan_message_out {
+ __le16 len; /* Length of the content include header */
+ u8 type; /* UCAN_OUT_TX and friends */
+ u8 subtype; /* command sub type */
+
+ union {
+ /* Transmit CAN frame
+ * (type == UCAN_TX) && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
+ * subtype stores the echo id
+ */
+ struct ucan_can_msg can_msg;
+ } msg;
+} __packed __aligned(0x4);
+
+/* IN Endpoint, inbound messages */
+struct ucan_message_in {
+ __le16 len; /* Length of the content include header */
+ u8 type; /* UCAN_IN_RX and friends */
+ u8 subtype; /* command sub type */
+
+ union {
+ /* CAN Frame received
+ * (type == UCAN_IN_RX)
+ * && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
+ */
+ struct ucan_can_msg can_msg;
+
+ /* CAN transmission complete
+ * (type == UCAN_IN_TX_COMPLETE)
+ */
+ struct ucan_tx_complete_entry_t can_tx_complete_msg[0];
+ } __aligned(0x4) msg;
+} __packed;
+
+/* Macros to calculate message lengths */
+#define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg)
+
+#define UCAN_IN_HDR_SIZE offsetof(struct ucan_message_in, msg)
+#define UCAN_IN_LEN(member) (UCAN_OUT_HDR_SIZE + sizeof(member))
+
+struct ucan_priv;
+
+/* Context Information for transmission URBs */
+struct ucan_urb_context {
+ struct ucan_priv *up;
+ u8 dlc;
+ bool allocated;
+};
+
+/* Information reported by the USB device */
+struct ucan_device_info {
+ struct can_bittiming_const bittiming_const;
+ u8 tx_fifo;
+};
+
+/* Driver private data */
+struct ucan_priv {
+ /* must be the first member */
+ struct can_priv can;
+
+ /* linux USB device structures */
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct net_device *netdev;
+
+ /* lock for can->echo_skb (used around
+ * can_put/get/free_echo_skb
+ */
+ spinlock_t echo_skb_lock;
+
+ /* usb device information information */
+ u8 intf_index;
+ u8 in_ep_addr;
+ u8 out_ep_addr;
+ u16 in_ep_size;
+
+ /* transmission and reception buffers */
+ struct usb_anchor rx_urbs;
+ struct usb_anchor tx_urbs;
+
+ union ucan_ctl_payload *ctl_msg_buffer;
+ struct ucan_device_info device_info;
+
+ /* transmission control information and locks */
+ spinlock_t context_lock;
+ unsigned int available_tx_urbs;
+ struct ucan_urb_context *context_array;
+};
+
+static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len)
+{
+ if (le32_to_cpu(msg->id) & CAN_RTR_FLAG)
+ return get_can_dlc(msg->dlc);
+ else
+ return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id)));
+}
+
+static void ucan_release_context_array(struct ucan_priv *up)
+{
+ if (!up->context_array)
+ return;
+
+ /* lock is not needed because, driver is currently opening or closing */
+ up->available_tx_urbs = 0;
+
+ kfree(up->context_array);
+ up->context_array = NULL;
+}
+
+static int ucan_alloc_context_array(struct ucan_priv *up)
+{
+ int i;
+
+ /* release contexts if any */
+ ucan_release_context_array(up);
+
+ up->context_array = kcalloc(up->device_info.tx_fifo,
+ sizeof(*up->context_array),
+ GFP_KERNEL);
+ if (!up->context_array) {
+ netdev_err(up->netdev,
+ "Not enough memory to allocate tx contexts\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < up->device_info.tx_fifo; i++) {
+ up->context_array[i].allocated = false;
+ up->context_array[i].up = up;
+ }
+
+ /* lock is not needed because, driver is currently opening */
+ up->available_tx_urbs = up->device_info.tx_fifo;
+
+ return 0;
+}
+
+static struct ucan_urb_context *ucan_alloc_context(struct ucan_priv *up)
+{
+ int i;
+ unsigned long flags;
+ struct ucan_urb_context *ret = NULL;
+
+ if (WARN_ON_ONCE(!up->context_array))
+ return NULL;
+
+ /* execute context operation atomically */
+ spin_lock_irqsave(&up->context_lock, flags);
+
+ for (i = 0; i < up->device_info.tx_fifo; i++) {
+ if (!up->context_array[i].allocated) {
+ /* update context */
+ ret = &up->context_array[i];
+ up->context_array[i].allocated = true;
+
+ /* stop queue if necessary */
+ up->available_tx_urbs--;
+ if (!up->available_tx_urbs)
+ netif_stop_queue(up->netdev);
+
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&up->context_lock, flags);
+ return ret;
+}
+
+static bool ucan_release_context(struct ucan_priv *up,
+ struct ucan_urb_context *ctx)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ if (WARN_ON_ONCE(!up->context_array))
+ return false;
+
+ /* execute context operation atomically */
+ spin_lock_irqsave(&up->context_lock, flags);
+
+ /* context was not allocated, maybe the device sent garbage */
+ if (ctx->allocated) {
+ ctx->allocated = false;
+
+ /* check if the queue needs to be woken */
+ if (!up->available_tx_urbs)
+ netif_wake_queue(up->netdev);
+ up->available_tx_urbs++;
+
+ ret = true;
+ }
+
+ spin_unlock_irqrestore(&up->context_lock, flags);
+ return ret;
+}
+
+static int ucan_ctrl_command_out(struct ucan_priv *up,
+ u8 cmd, u16 subcmd, u16 datalen)
+{
+ return usb_control_msg(up->udev,
+ usb_sndctrlpipe(up->udev, 0),
+ cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ subcmd,
+ up->intf_index,
+ up->ctl_msg_buffer,
+ datalen,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+}
+
+static int ucan_device_request_in(struct ucan_priv *up,
+ u8 cmd, u16 subcmd, u16 datalen)
+{
+ return usb_control_msg(up->udev,
+ usb_rcvctrlpipe(up->udev, 0),
+ cmd,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ subcmd,
+ 0,
+ up->ctl_msg_buffer,
+ datalen,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+}
+
+/* Parse the device information structure reported by the device and
+ * setup private variables accordingly
+ */
+static void ucan_parse_device_info(struct ucan_priv *up,
+ struct ucan_ctl_cmd_device_info *device_info)
+{
+ struct can_bittiming_const *bittiming =
+ &up->device_info.bittiming_const;
+ u16 ctrlmodes;
+
+ /* store the data */
+ up->can.clock.freq = le32_to_cpu(device_info->freq);
+ up->device_info.tx_fifo = device_info->tx_fifo;
+ strcpy(bittiming->name, "ucan");
+ bittiming->tseg1_min = device_info->tseg1_min;
+ bittiming->tseg1_max = device_info->tseg1_max;
+ bittiming->tseg2_min = device_info->tseg2_min;
+ bittiming->tseg2_max = device_info->tseg2_max;
+ bittiming->sjw_max = device_info->sjw_max;
+ bittiming->brp_min = le32_to_cpu(device_info->brp_min);
+ bittiming->brp_max = le32_to_cpu(device_info->brp_max);
+ bittiming->brp_inc = le16_to_cpu(device_info->brp_inc);
+
+ ctrlmodes = le16_to_cpu(device_info->ctrlmodes);
+
+ up->can.ctrlmode_supported = 0;
+
+ if (ctrlmodes & UCAN_MODE_LOOPBACK)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+ if (ctrlmodes & UCAN_MODE_SILENT)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+ if (ctrlmodes & UCAN_MODE_3_SAMPLES)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ if (ctrlmodes & UCAN_MODE_ONE_SHOT)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+ if (ctrlmodes & UCAN_MODE_BERR_REPORT)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING;
+}
+
+/* Handle a CAN error frame that we have received from the device.
+ * Returns true if the can state has changed.
+ */
+static bool ucan_handle_error_frame(struct ucan_priv *up,
+ struct ucan_message_in *m,
+ canid_t canid)
+{
+ enum can_state new_state = up->can.state;
+ struct net_device_stats *net_stats = &up->netdev->stats;
+ struct can_device_stats *can_stats = &up->can.can_stats;
+
+ if (canid & CAN_ERR_LOSTARB)
+ can_stats->arbitration_lost++;
+
+ if (canid & CAN_ERR_BUSERROR)
+ can_stats->bus_error++;
+
+ if (canid & CAN_ERR_ACK)
+ net_stats->tx_errors++;
+
+ if (canid & CAN_ERR_BUSOFF)
+ new_state = CAN_STATE_BUS_OFF;
+
+ /* controller problems, details in data[1] */
+ if (canid & CAN_ERR_CRTL) {
+ u8 d1 = m->msg.can_msg.data[1];
+
+ if (d1 & CAN_ERR_CRTL_RX_OVERFLOW)
+ net_stats->rx_over_errors++;
+
+ /* controller state bits: if multiple are set the worst wins */
+ if (d1 & CAN_ERR_CRTL_ACTIVE)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (d1 & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING))
+ new_state = CAN_STATE_ERROR_WARNING;
+
+ if (d1 & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE))
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ }
+
+ /* protocol error, details in data[2] */
+ if (canid & CAN_ERR_PROT) {
+ u8 d2 = m->msg.can_msg.data[2];
+
+ if (d2 & CAN_ERR_PROT_TX)
+ net_stats->tx_errors++;
+ else
+ net_stats->rx_errors++;
+ }
+
+ /* no state change - we are done */
+ if (up->can.state == new_state)
+ return false;
+
+ /* we switched into a better state */
+ if (up->can.state > new_state) {
+ up->can.state = new_state;
+ return true;
+ }
+
+ /* we switched into a worse state */
+ up->can.state = new_state;
+ switch (new_state) {
+ case CAN_STATE_BUS_OFF:
+ can_stats->bus_off++;
+ can_bus_off(up->netdev);
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ can_stats->error_passive++;
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ can_stats->error_warning++;
+ break;
+ default:
+ break;
+ }
+ return true;
+}
+
+/* Callback on reception of a can frame via the IN endpoint
+ *
+ * This function allocates an skb and transferres it to the Linux
+ * network stack
+ */
+static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
+{
+ int len;
+ canid_t canid;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &up->netdev->stats;
+
+ /* get the contents of the length field */
+ len = le16_to_cpu(m->len);
+
+ /* check sanity */
+ if (len < UCAN_IN_HDR_SIZE + sizeof(m->msg.can_msg.id)) {
+ netdev_warn(up->netdev, "invalid input message len: %d\n", len);
+ return;
+ }
+
+ /* handle error frames */
+ canid = le32_to_cpu(m->msg.can_msg.id);
+ if (canid & CAN_ERR_FLAG) {
+ bool busstate_changed = ucan_handle_error_frame(up, m, canid);
+
+ /* if berr-reporting is off only state changes get through */
+ if (!(up->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ !busstate_changed)
+ return;
+ } else {
+ canid_t canid_mask;
+ /* compute the mask for canid */
+ canid_mask = CAN_RTR_FLAG;
+ if (canid & CAN_EFF_FLAG)
+ canid_mask |= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ canid_mask |= CAN_SFF_MASK;
+
+ if (canid & ~canid_mask)
+ netdev_warn(up->netdev,
+ "unexpected bits set (canid %x, mask %x)",
+ canid, canid_mask);
+
+ canid &= canid_mask;
+ }
+
+ /* allocate skb */
+ skb = alloc_can_skb(up->netdev, &cf);
+ if (!skb)
+ return;
+
+ /* fill the can frame */
+ cf->can_id = canid;
+
+ /* compute DLC taking RTR_FLAG into account */
+ cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len);
+
+ /* copy the payload of non RTR frames */
+ if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG))
+ memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc);
+
+ /* don't count error frames as real packets */
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ /* pass it to Linux */
+ netif_rx(skb);
+}
+
+/* callback indicating completed transmission */
+static void ucan_tx_complete_msg(struct ucan_priv *up,
+ struct ucan_message_in *m)
+{
+ unsigned long flags;
+ u16 count, i;
+ u8 echo_index, dlc;
+ u16 len = le16_to_cpu(m->len);
+
+ struct ucan_urb_context *context;
+
+ if (len < UCAN_IN_HDR_SIZE || (len % 2 != 0)) {
+ netdev_err(up->netdev, "invalid tx complete length\n");
+ return;
+ }
+
+ count = (len - UCAN_IN_HDR_SIZE) / 2;
+ for (i = 0; i < count; i++) {
+ /* we did not submit such echo ids */
+ echo_index = m->msg.can_tx_complete_msg[i].echo_index;
+ if (echo_index >= up->device_info.tx_fifo) {
+ up->netdev->stats.tx_errors++;
+ netdev_err(up->netdev,
+ "invalid echo_index %d received\n",
+ echo_index);
+ continue;
+ }
+
+ /* gather information from the context */
+ context = &up->context_array[echo_index];
+ dlc = READ_ONCE(context->dlc);
+
+ /* Release context and restart queue if necessary.
+ * Also check if the context was allocated
+ */
+ if (!ucan_release_context(up, context))
+ continue;
+
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ if (m->msg.can_tx_complete_msg[i].flags &
+ UCAN_TX_COMPLETE_SUCCESS) {
+ /* update statistics */
+ up->netdev->stats.tx_packets++;
+ up->netdev->stats.tx_bytes += dlc;
+ can_get_echo_skb(up->netdev, echo_index);
+ } else {
+ up->netdev->stats.tx_dropped++;
+ can_free_echo_skb(up->netdev, echo_index);
+ }
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+ }
+}
+
+/* callback on reception of a USB message */
+static void ucan_read_bulk_callback(struct urb *urb)
+{
+ int ret;
+ int pos;
+ struct ucan_priv *up = urb->context;
+ struct net_device *netdev = up->netdev;
+ struct ucan_message_in *m;
+
+ /* the device is not up and the driver should not receive any
+ * data on the bulk in pipe
+ */
+ if (WARN_ON(!up->context_array)) {
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+ return;
+ }
+
+ /* check URB status */
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
+ case -ESHUTDOWN:
+ case -ETIME:
+ /* urb is not resubmitted -> free dma data */
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+ netdev_dbg(up->netdev, "not resumbmitting urb; status: %d\n",
+ urb->status);
+ return;
+ default:
+ goto resubmit;
+ }
+
+ /* sanity check */
+ if (!netif_device_present(netdev))
+ return;
+
+ /* iterate over input */
+ pos = 0;
+ while (pos < urb->actual_length) {
+ int len;
+
+ /* check sanity (length of header) */
+ if ((urb->actual_length - pos) < UCAN_IN_HDR_SIZE) {
+ netdev_warn(up->netdev,
+ "invalid message (short; no hdr; l:%d)\n",
+ urb->actual_length);
+ goto resubmit;
+ }
+
+ /* setup the message address */
+ m = (struct ucan_message_in *)
+ ((u8 *)urb->transfer_buffer + pos);
+ len = le16_to_cpu(m->len);
+
+ /* check sanity (length of content) */
+ if (urb->actual_length - pos < len) {
+ netdev_warn(up->netdev,
+ "invalid message (short; no data; l:%d)\n",
+ urb->actual_length);
+ print_hex_dump(KERN_WARNING,
+ "raw data: ",
+ DUMP_PREFIX_ADDRESS,
+ 16,
+ 1,
+ urb->transfer_buffer,
+ urb->actual_length,
+ true);
+
+ goto resubmit;
+ }
+
+ switch (m->type) {
+ case UCAN_IN_RX:
+ ucan_rx_can_msg(up, m);
+ break;
+ case UCAN_IN_TX_COMPLETE:
+ ucan_tx_complete_msg(up, m);
+ break;
+ default:
+ netdev_warn(up->netdev,
+ "invalid message (type; t:%d)\n",
+ m->type);
+ break;
+ }
+
+ /* proceed to next message */
+ pos += len;
+ /* align to 4 byte boundary */
+ pos = round_up(pos, 4);
+ }
+
+resubmit:
+ /* resubmit urb when done */
+ usb_fill_bulk_urb(urb, up->udev,
+ usb_rcvbulkpipe(up->udev,
+ up->in_ep_addr),
+ urb->transfer_buffer,
+ up->in_ep_size,
+ ucan_read_bulk_callback,
+ up);
+
+ usb_anchor_urb(urb, &up->rx_urbs);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+
+ if (ret < 0) {
+ netdev_err(up->netdev,
+ "failed resubmitting read bulk urb: %d\n",
+ ret);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+
+ if (ret == -ENODEV)
+ netif_device_detach(netdev);
+ }
+}
+
+/* callback after transmission of a USB message */
+static void ucan_write_bulk_callback(struct urb *urb)
+{
+ unsigned long flags;
+ struct ucan_priv *up;
+ struct ucan_urb_context *context = urb->context;
+
+ /* get the urb context */
+ if (WARN_ON_ONCE(!context))
+ return;
+
+ /* free up our allocated buffer */
+ usb_free_coherent(urb->dev,
+ sizeof(struct ucan_message_out),
+ urb->transfer_buffer,
+ urb->transfer_dma);
+
+ up = context->up;
+ if (WARN_ON_ONCE(!up))
+ return;
+
+ /* sanity check */
+ if (!netif_device_present(up->netdev))
+ return;
+
+ /* transmission failed (USB - the device will not send a TX complete) */
+ if (urb->status) {
+ netdev_warn(up->netdev,
+ "failed to transmit USB message to device: %d\n",
+ urb->status);
+
+ /* update counters an cleanup */
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ can_free_echo_skb(up->netdev, context - up->context_array);
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+
+ up->netdev->stats.tx_dropped++;
+
+ /* release context and restart the queue if necessary */
+ if (!ucan_release_context(up, context))
+ netdev_err(up->netdev,
+ "urb failed, failed to release context\n");
+ }
+}
+
+static void ucan_cleanup_rx_urbs(struct ucan_priv *up, struct urb **urbs)
+{
+ int i;
+
+ for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
+ if (urbs[i]) {
+ usb_unanchor_urb(urbs[i]);
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urbs[i]->transfer_buffer,
+ urbs[i]->transfer_dma);
+ usb_free_urb(urbs[i]);
+ }
+ }
+
+ memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
+}
+
+static int ucan_prepare_and_anchor_rx_urbs(struct ucan_priv *up,
+ struct urb **urbs)
+{
+ int i;
+
+ memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
+
+ for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
+ void *buf;
+
+ urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urbs[i])
+ goto err;
+
+ buf = usb_alloc_coherent(up->udev,
+ up->in_ep_size,
+ GFP_KERNEL, &urbs[i]->transfer_dma);
+ if (!buf) {
+ /* cleanup this urb */
+ usb_free_urb(urbs[i]);
+ urbs[i] = NULL;
+ goto err;
+ }
+
+ usb_fill_bulk_urb(urbs[i], up->udev,
+ usb_rcvbulkpipe(up->udev,
+ up->in_ep_addr),
+ buf,
+ up->in_ep_size,
+ ucan_read_bulk_callback,
+ up);
+
+ urbs[i]->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urbs[i], &up->rx_urbs);
+ }
+ return 0;
+
+err:
+ /* cleanup other unsubmitted urbs */
+ ucan_cleanup_rx_urbs(up, urbs);
+ return -ENOMEM;
+}
+
+/* Submits rx urbs with the semantic: Either submit all, or cleanup
+ * everything. I case of errors submitted urbs are killed and all urbs in
+ * the array are freed. I case of no errors every entry in the urb
+ * array is set to NULL.
+ */
+static int ucan_submit_rx_urbs(struct ucan_priv *up, struct urb **urbs)
+{
+ int i, ret;
+
+ /* Iterate over all urbs to submit. On success remove the urb
+ * from the list.
+ */
+ for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
+ ret = usb_submit_urb(urbs[i], GFP_KERNEL);
+ if (ret) {
+ netdev_err(up->netdev,
+ "could not submit urb; code: %d\n",
+ ret);
+ goto err;
+ }
+
+ /* Anchor URB and drop reference, USB core will take
+ * care of freeing it
+ */
+ usb_free_urb(urbs[i]);
+ urbs[i] = NULL;
+ }
+ return 0;
+
+err:
+ /* Cleanup unsubmitted urbs */
+ ucan_cleanup_rx_urbs(up, urbs);
+
+ /* Kill urbs that are already submitted */
+ usb_kill_anchored_urbs(&up->rx_urbs);
+
+ return ret;
+}
+
+/* Open the network device */
+static int ucan_open(struct net_device *netdev)
+{
+ int ret, ret_cleanup;
+ u16 ctrlmode;
+ struct urb *urbs[UCAN_MAX_RX_URBS];
+ struct ucan_priv *up = netdev_priv(netdev);
+
+ ret = ucan_alloc_context_array(up);
+ if (ret)
+ return ret;
+
+ /* Allocate and prepare IN URBS - allocated and anchored
+ * urbs are stored in urbs[] for clean
+ */
+ ret = ucan_prepare_and_anchor_rx_urbs(up, urbs);
+ if (ret)
+ goto err_contexts;
+
+ /* Check the control mode */
+ ctrlmode = 0;
+ if (up->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ ctrlmode |= UCAN_MODE_LOOPBACK;
+ if (up->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ ctrlmode |= UCAN_MODE_SILENT;
+ if (up->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ ctrlmode |= UCAN_MODE_3_SAMPLES;
+ if (up->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ ctrlmode |= UCAN_MODE_ONE_SHOT;
+
+ /* Enable this in any case - filtering is down within the
+ * receive path
+ */
+ ctrlmode |= UCAN_MODE_BERR_REPORT;
+ up->ctl_msg_buffer->cmd_start.mode = cpu_to_le16(ctrlmode);
+
+ /* Driver is ready to receive data - start the USB device */
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_START, 0, 2);
+ if (ret < 0) {
+ netdev_err(up->netdev,
+ "could not start device, code: %d\n",
+ ret);
+ goto err_reset;
+ }
+
+ /* Call CAN layer open */
+ ret = open_candev(netdev);
+ if (ret)
+ goto err_stop;
+
+ /* Driver is ready to receive data. Submit RX URBS */
+ ret = ucan_submit_rx_urbs(up, urbs);
+ if (ret)
+ goto err_stop;
+
+ up->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Start the network queue */
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_stop:
+ /* The device have started already stop it */
+ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
+ if (ret_cleanup < 0)
+ netdev_err(up->netdev,
+ "could not stop device, code: %d\n",
+ ret_cleanup);
+
+err_reset:
+ /* The device might have received data, reset it for
+ * consistent state
+ */
+ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ if (ret_cleanup < 0)
+ netdev_err(up->netdev,
+ "could not reset device, code: %d\n",
+ ret_cleanup);
+
+ /* clean up unsubmitted urbs */
+ ucan_cleanup_rx_urbs(up, urbs);
+
+err_contexts:
+ ucan_release_context_array(up);
+ return ret;
+}
+
+static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
+ struct ucan_urb_context *context,
+ struct can_frame *cf,
+ u8 echo_index)
+{
+ int mlen;
+ struct urb *urb;
+ struct ucan_message_out *m;
+
+ /* create a URB, and a buffer for it, and copy the data to the URB */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(up->netdev, "no memory left for URBs\n");
+ return NULL;
+ }
+
+ m = usb_alloc_coherent(up->udev,
+ sizeof(struct ucan_message_out),
+ GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!m) {
+ netdev_err(up->netdev, "no memory left for USB buffer\n");
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ /* build the USB message */
+ m->type = UCAN_OUT_TX;
+ m->msg.can_msg.id = cpu_to_le32(cf->can_id);
+
+ if (cf->can_id & CAN_RTR_FLAG) {
+ mlen = UCAN_OUT_HDR_SIZE +
+ offsetof(struct ucan_can_msg, dlc) +
+ sizeof(m->msg.can_msg.dlc);
+ m->msg.can_msg.dlc = cf->can_dlc;
+ } else {
+ mlen = UCAN_OUT_HDR_SIZE +
+ sizeof(m->msg.can_msg.id) + cf->can_dlc;
+ memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc);
+ }
+ m->len = cpu_to_le16(mlen);
+
+ context->dlc = cf->can_dlc;
+
+ m->subtype = echo_index;
+
+ /* build the urb */
+ usb_fill_bulk_urb(urb, up->udev,
+ usb_sndbulkpipe(up->udev,
+ up->out_ep_addr),
+ m, mlen, ucan_write_bulk_callback, context);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ return urb;
+}
+
+static void ucan_clean_up_tx_urb(struct ucan_priv *up, struct urb *urb)
+{
+ usb_free_coherent(up->udev, sizeof(struct ucan_message_out),
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+}
+
+/* callback when Linux needs to send a can frame */
+static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ unsigned long flags;
+ int ret;
+ u8 echo_index;
+ struct urb *urb;
+ struct ucan_urb_context *context;
+ struct ucan_priv *up = netdev_priv(netdev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* check skb */
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* allocate a context and slow down tx path, if fifo state is low */
+ context = ucan_alloc_context(up);
+ echo_index = context - up->context_array;
+
+ if (WARN_ON_ONCE(!context))
+ return NETDEV_TX_BUSY;
+
+ /* prepare urb for transmission */
+ urb = ucan_prepare_tx_urb(up, context, cf, echo_index);
+ if (!urb)
+ goto drop;
+
+ /* put the skb on can loopback stack */
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ can_put_echo_skb(skb, up->netdev, echo_index);
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+
+ /* transmit it */
+ usb_anchor_urb(urb, &up->tx_urbs);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* cleanup urb */
+ if (ret) {
+ /* on error, clean up */
+ usb_unanchor_urb(urb);
+ ucan_clean_up_tx_urb(up, urb);
+ if (!ucan_release_context(up, context))
+ netdev_err(up->netdev,
+ "xmit err: failed to release context\n");
+
+ /* remove the skb from the echo stack - this also
+ * frees the skb
+ */
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ can_free_echo_skb(up->netdev, echo_index);
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+
+ if (ret == -ENODEV) {
+ netif_device_detach(up->netdev);
+ } else {
+ netdev_warn(up->netdev,
+ "xmit err: failed to submit urb %d\n",
+ ret);
+ up->netdev->stats.tx_dropped++;
+ }
+ return NETDEV_TX_OK;
+ }
+
+ netif_trans_update(netdev);
+
+ /* release ref, as we do not need the urb anymore */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+drop:
+ if (!ucan_release_context(up, context))
+ netdev_err(up->netdev,
+ "xmit drop: failed to release context\n");
+ dev_kfree_skb(skb);
+ up->netdev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+/* Device goes down
+ *
+ * Clean up used resources
+ */
+static int ucan_close(struct net_device *netdev)
+{
+ int ret;
+ struct ucan_priv *up = netdev_priv(netdev);
+
+ up->can.state = CAN_STATE_STOPPED;
+
+ /* stop sending data */
+ usb_kill_anchored_urbs(&up->tx_urbs);
+
+ /* stop receiving data */
+ usb_kill_anchored_urbs(&up->rx_urbs);
+
+ /* stop and reset can device */
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
+ if (ret < 0)
+ netdev_err(up->netdev,
+ "could not stop device, code: %d\n",
+ ret);
+
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ if (ret < 0)
+ netdev_err(up->netdev,
+ "could not reset device, code: %d\n",
+ ret);
+
+ netif_stop_queue(netdev);
+
+ ucan_release_context_array(up);
+
+ close_candev(up->netdev);
+ return 0;
+}
+
+/* CAN driver callbacks */
+static const struct net_device_ops ucan_netdev_ops = {
+ .ndo_open = ucan_open,
+ .ndo_stop = ucan_close,
+ .ndo_start_xmit = ucan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+/* Request to set bittiming
+ *
+ * This function generates an USB set bittiming message and transmits
+ * it to the device
+ */
+static int ucan_set_bittiming(struct net_device *netdev)
+{
+ int ret;
+ struct ucan_priv *up = netdev_priv(netdev);
+ struct ucan_ctl_cmd_set_bittiming *cmd_set_bittiming;
+
+ cmd_set_bittiming = &up->ctl_msg_buffer->cmd_set_bittiming;
+ cmd_set_bittiming->tq = cpu_to_le32(up->can.bittiming.tq);
+ cmd_set_bittiming->brp = cpu_to_le16(up->can.bittiming.brp);
+ cmd_set_bittiming->sample_point =
+ cpu_to_le16(up->can.bittiming.sample_point);
+ cmd_set_bittiming->prop_seg = up->can.bittiming.prop_seg;
+ cmd_set_bittiming->phase_seg1 = up->can.bittiming.phase_seg1;
+ cmd_set_bittiming->phase_seg2 = up->can.bittiming.phase_seg2;
+ cmd_set_bittiming->sjw = up->can.bittiming.sjw;
+
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_SET_BITTIMING, 0,
+ sizeof(*cmd_set_bittiming));
+ return (ret < 0) ? ret : 0;
+}
+
+/* Restart the device to get it out of BUS-OFF state.
+ * Called when the user runs "ip link set can1 type can restart".
+ */
+static int ucan_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ int ret;
+ unsigned long flags;
+ struct ucan_priv *up = netdev_priv(netdev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ netdev_dbg(up->netdev, "restarting device\n");
+
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESTART, 0, 0);
+ up->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* check if queue can be restarted,
+ * up->available_tx_urbs must be protected by the
+ * lock
+ */
+ spin_lock_irqsave(&up->context_lock, flags);
+
+ if (up->available_tx_urbs > 0)
+ netif_wake_queue(up->netdev);
+
+ spin_unlock_irqrestore(&up->context_lock, flags);
+
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Probe the device, reset it and gather general device information */
+static int ucan_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret;
+ int i;
+ u32 protocol_version;
+ struct usb_device *udev;
+ struct net_device *netdev;
+ struct usb_host_interface *iface_desc;
+ struct ucan_priv *up;
+ struct usb_endpoint_descriptor *ep;
+ u16 in_ep_size;
+ u16 out_ep_size;
+ u8 in_ep_addr;
+ u8 out_ep_addr;
+ union ucan_ctl_payload *ctl_msg_buffer;
+ char firmware_str[sizeof(union ucan_ctl_payload) + 1];
+
+ udev = interface_to_usbdev(intf);
+
+ /* Stage 1 - Interface Parsing
+ * ---------------------------
+ *
+ * Identifie the device USB interface descriptor and its
+ * endpoints. Probing is aborted on errors.
+ */
+
+ /* check if the interface is sane */
+ iface_desc = intf->cur_altsetting;
+ if (!iface_desc)
+ return -ENODEV;
+
+ dev_info(&udev->dev,
+ "%s: probing device on interface #%d\n",
+ UCAN_DRIVER_NAME,
+ iface_desc->desc.bInterfaceNumber);
+
+ /* interface sanity check */
+ if (iface_desc->desc.bNumEndpoints != 2) {
+ dev_err(&udev->dev,
+ "%s: invalid EP count (%d)",
+ UCAN_DRIVER_NAME, iface_desc->desc.bNumEndpoints);
+ goto err_firmware_needs_update;
+ }
+
+ /* check interface endpoints */
+ in_ep_addr = 0;
+ out_ep_addr = 0;
+ in_ep_size = 0;
+ out_ep_size = 0;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+ ep = &iface_desc->endpoint[i].desc;
+
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != 0) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ /* In Endpoint */
+ in_ep_addr = ep->bEndpointAddress;
+ in_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
+ in_ep_size = le16_to_cpu(ep->wMaxPacketSize);
+ } else if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
+ 0) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ /* Out Endpoint */
+ out_ep_addr = ep->bEndpointAddress;
+ out_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
+ out_ep_size = le16_to_cpu(ep->wMaxPacketSize);
+ }
+ }
+
+ /* check if interface is sane */
+ if (!in_ep_addr || !out_ep_addr) {
+ dev_err(&udev->dev, "%s: invalid endpoint configuration\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (in_ep_size < sizeof(struct ucan_message_in)) {
+ dev_err(&udev->dev, "%s: invalid in_ep MaxPacketSize\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (out_ep_size < sizeof(struct ucan_message_out)) {
+ dev_err(&udev->dev, "%s: invalid out_ep MaxPacketSize\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+
+ /* Stage 2 - Device Identification
+ * -------------------------------
+ *
+ * The device interface seems to be a ucan device. Do further
+ * compatibility checks. On error probing is aborted, on
+ * success this stage leaves the ctl_msg_buffer with the
+ * reported contents of a GET_INFO command (supported
+ * bittimings, tx_fifo depth). This information is used in
+ * Stage 3 for the final driver initialisation.
+ */
+
+ /* Prepare Memory for control transferes */
+ ctl_msg_buffer = devm_kzalloc(&udev->dev,
+ sizeof(union ucan_ctl_payload),
+ GFP_KERNEL);
+ if (!ctl_msg_buffer) {
+ dev_err(&udev->dev,
+ "%s: failed to allocate control pipe memory\n",
+ UCAN_DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ /* get protocol version
+ *
+ * note: ucan_ctrl_command_* wrappers cannot be used yet
+ * because `up` is initialised in Stage 3
+ */
+ ret = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ UCAN_COMMAND_GET,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ UCAN_COMMAND_GET_PROTOCOL_VERSION,
+ iface_desc->desc.bInterfaceNumber,
+ ctl_msg_buffer,
+ sizeof(union ucan_ctl_payload),
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+
+ /* older firmware version do not support this command - those
+ * are not supported by this drive
+ */
+ if (ret != 4) {
+ dev_err(&udev->dev,
+ "%s: could not read protocol version, ret=%d\n",
+ UCAN_DRIVER_NAME, ret);
+ if (ret >= 0)
+ ret = -EINVAL;
+ goto err_firmware_needs_update;
+ }
+
+ /* this driver currently supports protocol version 3 only */
+ protocol_version =
+ le32_to_cpu(ctl_msg_buffer->cmd_get_protocol_version.version);
+ if (protocol_version < UCAN_PROTOCOL_VERSION_MIN ||
+ protocol_version > UCAN_PROTOCOL_VERSION_MAX) {
+ dev_err(&udev->dev,
+ "%s: device protocol version %d is not supported\n",
+ UCAN_DRIVER_NAME, protocol_version);
+ goto err_firmware_needs_update;
+ }
+
+ /* request the device information and store it in ctl_msg_buffer
+ *
+ * note: ucan_ctrl_command_* wrappers connot be used yet
+ * because `up` is initialised in Stage 3
+ */
+ ret = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ UCAN_COMMAND_GET,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ UCAN_COMMAND_GET_INFO,
+ iface_desc->desc.bInterfaceNumber,
+ ctl_msg_buffer,
+ sizeof(ctl_msg_buffer->cmd_get_device_info),
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+
+ if (ret < 0) {
+ dev_err(&udev->dev, "%s: failed to retrieve device info\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (ret < sizeof(ctl_msg_buffer->cmd_get_device_info)) {
+ dev_err(&udev->dev, "%s: device reported invalid device info\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (ctl_msg_buffer->cmd_get_device_info.tx_fifo == 0) {
+ dev_err(&udev->dev,
+ "%s: device reported invalid tx-fifo size\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+
+ /* Stage 3 - Driver Initialisation
+ * -------------------------------
+ *
+ * Register device to Linux, prepare private structures and
+ * reset the device.
+ */
+
+ /* allocate driver resources */
+ netdev = alloc_candev(sizeof(struct ucan_priv),
+ ctl_msg_buffer->cmd_get_device_info.tx_fifo);
+ if (!netdev) {
+ dev_err(&udev->dev,
+ "%s: cannot allocate candev\n", UCAN_DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ up = netdev_priv(netdev);
+
+ /* initialze data */
+ up->udev = udev;
+ up->intf = intf;
+ up->netdev = netdev;
+ up->intf_index = iface_desc->desc.bInterfaceNumber;
+ up->in_ep_addr = in_ep_addr;
+ up->out_ep_addr = out_ep_addr;
+ up->in_ep_size = in_ep_size;
+ up->ctl_msg_buffer = ctl_msg_buffer;
+ up->context_array = NULL;
+ up->available_tx_urbs = 0;
+
+ up->can.state = CAN_STATE_STOPPED;
+ up->can.bittiming_const = &up->device_info.bittiming_const;
+ up->can.do_set_bittiming = ucan_set_bittiming;
+ up->can.do_set_mode = &ucan_set_mode;
+ spin_lock_init(&up->context_lock);
+ spin_lock_init(&up->echo_skb_lock);
+ netdev->netdev_ops = &ucan_netdev_ops;
+
+ usb_set_intfdata(intf, up);
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ /* parse device information
+ * the data retrieved in Stage 2 is still available in
+ * up->ctl_msg_buffer
+ */
+ ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
+
+ /* just print some device information - if available */
+ ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
+ sizeof(union ucan_ctl_payload));
+ if (ret > 0) {
+ /* copy string while ensuring zero terminiation */
+ strncpy(firmware_str, up->ctl_msg_buffer->raw,
+ sizeof(union ucan_ctl_payload));
+ firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
+ } else {
+ strcpy(firmware_str, "unknown");
+ }
+
+ /* device is compatible, reset it */
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ if (ret < 0)
+ goto err_free_candev;
+
+ init_usb_anchor(&up->rx_urbs);
+ init_usb_anchor(&up->tx_urbs);
+
+ up->can.state = CAN_STATE_STOPPED;
+
+ /* register the device */
+ ret = register_candev(netdev);
+ if (ret)
+ goto err_free_candev;
+
+ /* initialisation complete, log device info */
+ netdev_info(up->netdev, "registered device\n");
+ netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+
+ /* success */
+ return 0;
+
+err_free_candev:
+ free_candev(netdev);
+ return ret;
+
+err_firmware_needs_update:
+ dev_err(&udev->dev,
+ "%s: probe failed; try to update the device firmware\n",
+ UCAN_DRIVER_NAME);
+ return -ENODEV;
+}
+
+/* disconnect the device */
+static void ucan_disconnect(struct usb_interface *intf)
+{
+ struct usb_device *udev;
+ struct ucan_priv *up = usb_get_intfdata(intf);
+
+ udev = interface_to_usbdev(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (up) {
+ unregister_netdev(up->netdev);
+ free_candev(up->netdev);
+ }
+}
+
+static struct usb_device_id ucan_table[] = {
+ /* Mule (soldered onto compute modules) */
+ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425a, 0)},
+ /* Seal (standalone USB stick) */
+ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425b, 0)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ucan_table);
+/* driver callbacks */
+static struct usb_driver ucan_driver = {
+ .name = UCAN_DRIVER_NAME,
+ .probe = ucan_probe,
+ .disconnect = ucan_disconnect,
+ .id_table = ucan_table,
+};
+
+module_usb_driver(ucan_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Martin Elshuber <martin.elshuber@theobroma-systems.com>");
+MODULE_AUTHOR("Jakob Unterwurzacher <jakob.unterwurzacher@theobroma-systems.com>");
+MODULE_DESCRIPTION("Driver for Theobroma Systems UCAN devices");
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 5a24039733ef..045f0845e665 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
- * Copyright (C) 2017 Sandvik Mining and Construction Oy
+ * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
* Description:
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -51,16 +51,34 @@ enum xcan_reg {
XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
- XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
- XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
- XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
- XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
- XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
- XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
- XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
- XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
+
+ /* not on CAN FD cores */
+ XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
+ XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
+ XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
+
+ /* only on CAN FD cores */
+ XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
+ XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
+ XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
+ XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
+ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
};
+#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
+#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
+#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
+#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
+
+#define XCAN_CANFD_FRAME_SIZE 0x48
+#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
+ XCAN_CANFD_FRAME_SIZE * (n))
+#define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
+ XCAN_CANFD_FRAME_SIZE * (n))
+
+/* the single TX mailbox used by this driver on CAN FD HW */
+#define XCAN_TX_MAILBOX_IDX 0
+
/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
@@ -70,6 +88,9 @@ enum xcan_reg {
#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
+#define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
+#define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
+#define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
@@ -83,6 +104,7 @@ enum xcan_reg {
#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
+#define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
@@ -100,15 +122,15 @@ enum xcan_reg {
#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
-
-#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
- XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
- XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
- XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
+#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
+#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
+#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
+#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
+#define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
@@ -118,6 +140,27 @@ enum xcan_reg {
#define XCAN_FRAME_MAX_DATA_LEN 8
#define XCAN_TIMEOUT (1 * HZ)
+/* TX-FIFO-empty interrupt available */
+#define XCAN_FLAG_TXFEMP 0x0001
+/* RX Match Not Finished interrupt available */
+#define XCAN_FLAG_RXMNF 0x0002
+/* Extended acceptance filters with control at 0xE0 */
+#define XCAN_FLAG_EXT_FILTERS 0x0004
+/* TX mailboxes instead of TX FIFO */
+#define XCAN_FLAG_TX_MAILBOXES 0x0008
+/* RX FIFO with each buffer in separate registers at 0x1100
+ * instead of the regular FIFO at 0x50
+ */
+#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
+
+struct xcan_devtype_data {
+ unsigned int flags;
+ const struct can_bittiming_const *bittiming_const;
+ const char *bus_clk_name;
+ unsigned int btr_ts2_shift;
+ unsigned int btr_sjw_shift;
+};
+
/**
* struct xcan_priv - This definition define CAN driver instance
* @can: CAN private data structure.
@@ -133,6 +176,7 @@ enum xcan_reg {
* @irq_flags: For request_irq()
* @bus_clk: Pointer to struct clk
* @can_clk: Pointer to struct clk
+ * @devtype: Device type specific constants
*/
struct xcan_priv {
struct can_priv can;
@@ -149,6 +193,7 @@ struct xcan_priv {
unsigned long irq_flags;
struct clk *bus_clk;
struct clk *can_clk;
+ struct xcan_devtype_data devtype;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -164,9 +209,16 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
-#define XCAN_CAP_WATERMARK 0x0001
-struct xcan_devtype_data {
- unsigned int caps;
+static const struct can_bittiming_const xcan_bittiming_const_canfd = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 64,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
};
/**
@@ -224,6 +276,23 @@ static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
}
/**
+ * xcan_rx_int_mask - Get the mask for the receive interrupt
+ * @priv: Driver private data structure
+ *
+ * Return: The receive interrupt mask used by the driver on this HW
+ */
+static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
+{
+ /* RXNEMP is better suited for our use case as it cannot be cleared
+ * while the FIFO is non-empty, but CAN FD HW does not have it
+ */
+ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
+ return XCAN_IXR_RXOK_MASK;
+ else
+ return XCAN_IXR_RXNEMP_MASK;
+}
+
+/**
* set_reset_mode - Resets the CAN device mode
* @ndev: Pointer to net_device structure
*
@@ -287,10 +356,10 @@ static int xcan_set_bittiming(struct net_device *ndev)
btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
/* Setting Time Segment 2 in BTR Register */
- btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
+ btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
/* Setting Synchronous jump width in BTR Register */
- btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
+ btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
@@ -318,6 +387,7 @@ static int xcan_chip_start(struct net_device *ndev)
u32 reg_msr, reg_sr_mask;
int err;
unsigned long timeout;
+ u32 ier;
/* Check if it is in reset mode */
err = set_reset_mode(ndev);
@@ -329,7 +399,15 @@ static int xcan_chip_start(struct net_device *ndev)
return err;
/* Enable interrupts */
- priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
+ ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
+ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
+ XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
+
+ if (priv->devtype.flags & XCAN_FLAG_RXMNF)
+ ier |= XCAN_IXR_RXMNF_MASK;
+
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
/* Check whether it is loopback mode or normal mode */
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
@@ -340,6 +418,12 @@ static int xcan_chip_start(struct net_device *ndev)
reg_sr_mask = XCAN_SR_NORMAL_MASK;
}
+ /* enable the first extended filter, if any, as cores with extended
+ * filtering default to non-receipt if all filters are disabled
+ */
+ if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
+ priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
+
priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
@@ -390,34 +474,15 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
/**
- * xcan_start_xmit - Starts the transmission
- * @skb: sk_buff pointer that contains data to be Txed
- * @ndev: Pointer to net_device structure
- *
- * This function is invoked from upper layers to initiate transmission. This
- * function uses the next available free txbuff and populates their fields to
- * start the transmission.
- *
- * Return: 0 on success and failure value on error
+ * xcan_write_frame - Write a frame to HW
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @frame_offset: Register offset to write the frame to
*/
-static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
+ int frame_offset)
{
- struct xcan_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
u32 id, dlc, data[2] = {0, 0};
- unsigned long flags;
-
- if (can_dropped_invalid_skb(ndev, skb))
- return NETDEV_TX_OK;
-
- /* Check if the TX buffer is full */
- if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
- XCAN_SR_TXFLL_MASK)) {
- netif_stop_queue(ndev);
- netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
- return NETDEV_TX_BUSY;
- }
+ struct can_frame *cf = (struct can_frame *)skb->data;
/* Watch carefully on the bit sequence */
if (cf->can_id & CAN_EFF_FLAG) {
@@ -453,24 +518,44 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (cf->can_dlc > 4)
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+ priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+ /* If the CAN frame is RTR frame this write triggers transmission
+ * (not on CAN FD)
+ */
+ priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
+ data[0]);
+ /* If the CAN frame is Standard/Extended frame this
+ * write triggers transmission (not on CAN FD)
+ */
+ priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
+ data[1]);
+ }
+}
+
+/**
+ * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
+ *
+ * Return: 0 on success, -ENOSPC if FIFO is full.
+ */
+static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+
+ /* Check if the TX buffer is full */
+ if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
+ XCAN_SR_TXFLL_MASK))
+ return -ENOSPC;
+
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_head++;
- /* Write the Frame to Xilinx CAN TX FIFO */
- priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
- /* If the CAN frame is RTR frame this write triggers tranmission */
- priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
- if (!(cf->can_id & CAN_RTR_FLAG)) {
- priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
- /* If the CAN frame is Standard/Extended frame this
- * write triggers tranmission
- */
- priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
- stats->tx_bytes += cf->can_dlc;
- }
+ xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
if (priv->tx_max > 1)
@@ -482,6 +567,70 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return 0;
+}
+
+/**
+ * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
+ *
+ * Return: 0 on success, -ENOSPC if there is no space
+ */
+static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+
+ if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
+ BIT(XCAN_TX_MAILBOX_IDX)))
+ return -ENOSPC;
+
+ can_put_echo_skb(skb, ndev, 0);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ priv->tx_head++;
+
+ xcan_write_frame(priv, skb,
+ XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
+
+ /* Mark buffer as ready for transmit */
+ priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
+
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return 0;
+}
+
+/**
+ * xcan_start_xmit - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from upper layers to initiate transmission.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
+ */
+static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
+ ret = xcan_start_xmit_mailbox(skb, ndev);
+ else
+ ret = xcan_start_xmit_fifo(skb, ndev);
+
+ if (ret < 0) {
+ netdev_err(ndev, "BUG!, TX full when queue awake!\n");
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
return NETDEV_TX_OK;
}
@@ -489,13 +638,14 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* xcan_rx - Is called from CAN isr to complete the received
* frame processing
* @ndev: Pointer to net_device structure
+ * @frame_base: Register offset to the frame to be read
*
* This function is invoked from the CAN isr(poll) to process the Rx frames. It
* does minimal processing and invokes "netif_receive_skb" to complete further
* processing.
* Return: 1 on success and 0 on failure.
*/
-static int xcan_rx(struct net_device *ndev)
+static int xcan_rx(struct net_device *ndev, int frame_base)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
@@ -510,9 +660,9 @@ static int xcan_rx(struct net_device *ndev)
}
/* Read a frame from Xilinx zynq CANPS */
- id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
- dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
- XCAN_DLCR_DLC_SHIFT;
+ id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
+ dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
+ XCAN_DLCR_DLC_SHIFT;
/* Change Xilinx CAN data length format to socketCAN data format */
cf->can_dlc = get_can_dlc(dlc);
@@ -535,8 +685,8 @@ static int xcan_rx(struct net_device *ndev)
}
/* DW1/DW2 must always be read to remove message from RXFIFO */
- data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
- data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
+ data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
+ data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
if (!(cf->can_id & CAN_RTR_FLAG)) {
/* Change Xilinx CAN data format to socketCAN data format */
@@ -594,39 +744,19 @@ static void xcan_set_error_state(struct net_device *ndev,
u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
u32 txerr = ecr & XCAN_ECR_TEC_MASK;
u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
+ enum can_state tx_state = txerr >= rxerr ? new_state : 0;
+ enum can_state rx_state = txerr <= rxerr ? new_state : 0;
+
+ /* non-ERROR states are handled elsewhere */
+ if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
+ return;
- priv->can.state = new_state;
+ can_change_state(ndev, cf, tx_state, rx_state);
if (cf) {
- cf->can_id |= CAN_ERR_CRTL;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
-
- switch (new_state) {
- case CAN_STATE_ERROR_PASSIVE:
- priv->can.can_stats.error_passive++;
- if (cf)
- cf->data[1] = (rxerr > 127) ?
- CAN_ERR_CRTL_RX_PASSIVE :
- CAN_ERR_CRTL_TX_PASSIVE;
- break;
- case CAN_STATE_ERROR_WARNING:
- priv->can.can_stats.error_warning++;
- if (cf)
- cf->data[1] |= (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- break;
- case CAN_STATE_ERROR_ACTIVE:
- if (cf)
- cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
- break;
- default:
- /* non-ERROR states are handled elsewhere */
- WARN_ON(1);
- break;
- }
}
/**
@@ -703,7 +833,8 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
} else {
enum can_state new_state = xcan_current_error_state(ndev);
- xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+ if (new_state != priv->can.state)
+ xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
}
/* Check for Arbitration lost interrupt */
@@ -725,6 +856,17 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
}
}
+ /* Check for RX Match Not Finished interrupt */
+ if (isr & XCAN_IXR_RXMNF_MASK) {
+ stats->rx_dropped++;
+ stats->rx_errors++;
+ netdev_err(ndev, "RX match not finished, frame discarded\n");
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
+ }
+ }
+
/* Check for error interrupt */
if (isr & XCAN_IXR_ERROR_MASK) {
if (skb)
@@ -809,6 +951,44 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
}
/**
+ * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
+ *
+ * Return: Register offset of the next frame in RX FIFO.
+ */
+static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
+{
+ int offset;
+
+ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
+ u32 fsr;
+
+ /* clear RXOK before the is-empty check so that any newly
+ * received frame will reassert it without a race
+ */
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
+
+ fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
+
+ /* check if RX FIFO is empty */
+ if (!(fsr & XCAN_FSR_FL_MASK))
+ return -ENOENT;
+
+ offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+
+ } else {
+ /* check if RX FIFO is empty */
+ if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
+ XCAN_IXR_RXNEMP_MASK))
+ return -ENOENT;
+
+ /* frames are read from a static offset */
+ offset = XCAN_RXFIFO_OFFSET;
+ }
+
+ return offset;
+}
+
+/**
* xcan_rx_poll - Poll routine for rx packets (NAPI)
* @napi: napi structure pointer
* @quota: Max number of rx packets to be processed.
@@ -822,14 +1002,24 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
{
struct net_device *ndev = napi->dev;
struct xcan_priv *priv = netdev_priv(ndev);
- u32 isr, ier;
+ u32 ier;
int work_done = 0;
-
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
- while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
- work_done += xcan_rx(ndev);
- priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ int frame_offset;
+
+ while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
+ (work_done < quota)) {
+ work_done += xcan_rx(ndev, frame_offset);
+
+ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
+ /* increment read index */
+ priv->write_reg(priv, XCAN_FSR_OFFSET,
+ XCAN_FSR_IRI_MASK);
+ else
+ /* clear rx-not-empty (will actually clear only if
+ * empty)
+ */
+ priv->write_reg(priv, XCAN_ICR_OFFSET,
+ XCAN_IXR_RXNEMP_MASK);
}
if (work_done) {
@@ -840,7 +1030,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
if (work_done < quota) {
napi_complete_done(napi, work_done);
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= XCAN_IXR_RXNEMP_MASK;
+ ier |= xcan_rx_int_mask(priv);
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
}
return work_done;
@@ -908,8 +1098,8 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
}
while (frames_sent--) {
- can_get_echo_skb(ndev, priv->tx_tail %
- priv->tx_max);
+ stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
+ priv->tx_max);
priv->tx_tail++;
stats->tx_packets++;
}
@@ -939,6 +1129,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
struct xcan_priv *priv = netdev_priv(ndev);
u32 isr, ier;
u32 isr_errors;
+ u32 rx_int_mask = xcan_rx_int_mask(priv);
/* Get the interrupt status from Xilinx CAN */
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -958,16 +1149,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
/* Check for the type of error interrupt and Processing it */
isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
+ XCAN_IXR_RXMNF_MASK);
if (isr_errors) {
priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
}
/* Check for the type of receive interrupt and Processing it */
- if (isr & XCAN_IXR_RXNEMP_MASK) {
+ if (isr & rx_int_mask) {
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier &= ~XCAN_IXR_RXNEMP_MASK;
+ ier &= ~rx_int_mask;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
napi_schedule(&priv->napi);
}
@@ -1214,13 +1406,35 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
};
static const struct xcan_devtype_data xcan_zynq_data = {
- .caps = XCAN_CAP_WATERMARK,
+ .bittiming_const = &xcan_bittiming_const,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
+ .bus_clk_name = "pclk",
+};
+
+static const struct xcan_devtype_data xcan_axi_data = {
+ .bittiming_const = &xcan_bittiming_const,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
+ .bus_clk_name = "s_axi_aclk",
+};
+
+static const struct xcan_devtype_data xcan_canfd_data = {
+ .flags = XCAN_FLAG_EXT_FILTERS |
+ XCAN_FLAG_RXMNF |
+ XCAN_FLAG_TX_MAILBOXES |
+ XCAN_FLAG_RX_FIFO_MULTI,
+ .bittiming_const = &xcan_bittiming_const,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
+ .bus_clk_name = "s_axi_aclk",
};
/* Match table for OF platform binding */
static const struct of_device_id xcan_of_match[] = {
{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
- { .compatible = "xlnx,axi-can-1.00.a", },
+ { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
+ { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xcan_of_match);
@@ -1240,9 +1454,12 @@ static int xcan_probe(struct platform_device *pdev)
struct net_device *ndev;
struct xcan_priv *priv;
const struct of_device_id *of_id;
- int caps = 0;
+ const struct xcan_devtype_data *devtype = &xcan_axi_data;
void __iomem *addr;
- int ret, rx_max, tx_max, tx_fifo_depth;
+ int ret;
+ int rx_max, tx_max;
+ int hw_tx_max, hw_rx_max;
+ const char *hw_tx_max_property;
/* Get the virtual base address for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1252,25 +1469,33 @@ static int xcan_probe(struct platform_device *pdev)
goto err;
}
- ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
- &tx_fifo_depth);
- if (ret < 0)
- goto err;
+ of_id = of_match_device(xcan_of_match, &pdev->dev);
+ if (of_id && of_id->data)
+ devtype = of_id->data;
- ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
- if (ret < 0)
- goto err;
+ hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
+ "tx-mailbox-count" : "tx-fifo-depth";
- of_id = of_match_device(xcan_of_match, &pdev->dev);
- if (of_id) {
- const struct xcan_devtype_data *devtype_data = of_id->data;
+ ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
+ &hw_tx_max);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "missing %s property\n",
+ hw_tx_max_property);
+ goto err;
+ }
- if (devtype_data)
- caps = devtype_data->caps;
+ ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
+ &hw_rx_max);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "missing rx-fifo-depth property (mailbox mode is not supported)\n");
+ goto err;
}
- /* There is no way to directly figure out how many frames have been
- * sent when the TXOK interrupt is processed. If watermark programming
+ /* With TX FIFO:
+ *
+ * There is no way to directly figure out how many frames have been
+ * sent when the TXOK interrupt is processed. If TXFEMP
* is supported, we can have 2 frames in the FIFO and use TXFEMP
* to determine if 1 or 2 frames have been sent.
* Theoretically we should be able to use TXFWMEMP to determine up
@@ -1279,12 +1504,20 @@ static int xcan_probe(struct platform_device *pdev)
* than 2 frames in FIFO) is set anyway with no TXOK (a frame was
* sent), which is not a sensible state - possibly TXFWMEMP is not
* completely synchronized with the rest of the bits?
+ *
+ * With TX mailboxes:
+ *
+ * HW sends frames in CAN ID priority order. To preserve FIFO ordering
+ * we submit frames one at a time.
*/
- if (caps & XCAN_CAP_WATERMARK)
- tx_max = min(tx_fifo_depth, 2);
+ if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
+ (devtype->flags & XCAN_FLAG_TXFEMP))
+ tx_max = min(hw_tx_max, 2);
else
tx_max = 1;
+ rx_max = hw_rx_max;
+
/* Create a CAN device instance */
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
if (!ndev)
@@ -1292,13 +1525,14 @@ static int xcan_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->dev = &pdev->dev;
- priv->can.bittiming_const = &xcan_bittiming_const;
+ priv->can.bittiming_const = devtype->bittiming_const;
priv->can.do_set_mode = xcan_do_set_mode;
priv->can.do_get_berr_counter = xcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
priv->reg_base = addr;
priv->tx_max = tx_max;
+ priv->devtype = *devtype;
spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */
@@ -1316,22 +1550,12 @@ static int xcan_probe(struct platform_device *pdev)
ret = PTR_ERR(priv->can_clk);
goto err_free;
}
- /* Check for type of CAN device */
- if (of_device_is_compatible(pdev->dev.of_node,
- "xlnx,zynq-can-1.0")) {
- priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(priv->bus_clk)) {
- dev_err(&pdev->dev, "bus clock not found\n");
- ret = PTR_ERR(priv->bus_clk);
- goto err_free;
- }
- } else {
- priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(priv->bus_clk)) {
- dev_err(&pdev->dev, "bus clock not found\n");
- ret = PTR_ERR(priv->bus_clk);
- goto err_free;
- }
+
+ priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
+ if (IS_ERR(priv->bus_clk)) {
+ dev_err(&pdev->dev, "bus clock not found\n");
+ ret = PTR_ERR(priv->bus_clk);
+ goto err_free;
}
priv->write_reg = xcan_write_reg_le;
@@ -1364,9 +1588,9 @@ static int xcan_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
- priv->reg_base, ndev->irq, priv->can.clock.freq,
- tx_fifo_depth, priv->tx_max);
+ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
+ priv->reg_base, ndev->irq, priv->can.clock.freq,
+ hw_tx_max, priv->tx_max);
return 0;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 2b81b97e994f..d3ce1e4cb4d3 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -5,7 +5,7 @@ source "drivers/net/dsa/b53/Kconfig"
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
- depends on HAS_IOMEM && NET_DSA && OF_MDIO
+ depends on HAS_IOMEM && NET_DSA
select NET_DSA_TAG_BRCM
select FIXED_PHY
select BCM7XXX_PHY
@@ -52,6 +52,17 @@ config NET_DSA_QCA8K
This enables support for the Qualcomm Atheros QCA8K Ethernet
switch chips.
+config NET_DSA_REALTEK_SMI
+ tristate "Realtek SMI Ethernet switch family support"
+ depends on NET_DSA
+ select FIXED_PHY
+ select IRQ_DOMAIN
+ select REALTEK_PHY
+ select REGMAP
+ ---help---
+ This enables support for the Realtek SMI-based switch
+ chips, currently only RTL8366RB.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
@@ -76,4 +87,15 @@ config NET_DSA_SMSC_LAN9303_MDIO
Enable access functions if the SMSC/Microchip LAN9303 is configured
for MDIO managed mode.
+config NET_DSA_VITESSE_VSC73XX
+ tristate "Vitesse VSC7385/7388/7395/7398 support"
+ depends on OF && SPI
+ depends on NET_DSA
+ select FIXED_PHY
+ select VITESSE_PHY
+ select GPIOLIB
+ ---help---
+ This enables support for the Vitesse VSC7385, VSC7388,
+ VSC7395 and VSC7398 SparX integrated ethernet switches.
+
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 15c2a831edf1..46c1cba91ffe 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -8,9 +8,12 @@ endif
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek.o
+realtek-objs := realtek-smi.o rtl8366.o rtl8366rb.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
+obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o
obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 8247481eaa06..91de2ba99ad1 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -374,6 +374,7 @@ static const struct of_device_id b53_srab_of_match[] = {
{ .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID },
{ .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,omega-srab", .data = (void *)BCM583XX_DEVICE_ID },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, b53_srab_of_match);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 02e8982519ce..e0066adcd2f3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -166,6 +166,11 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+ /* Enable learning */
+ reg = core_readl(priv, CORE_DIS_LEARN);
+ reg &= ~BIT(port);
+ core_writel(priv, reg, CORE_DIS_LEARN);
+
/* Enable Broadcom tags for that port if requested */
if (priv->brcm_tag_mask & BIT(port))
b53_brcm_hdr_setup(ds, port);
@@ -220,10 +225,15 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- u32 off, reg;
+ u32 reg;
- if (priv->wol_ports_mask & (1 << port))
+ /* Disable learning while in WoL mode */
+ if (priv->wol_ports_mask & (1 << port)) {
+ reg = core_readl(priv, CORE_DIS_LEARN);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_DIS_LEARN);
return;
+ }
if (port == priv->moca_port)
bcm_sf2_port_intr_disable(priv, port);
@@ -231,11 +241,6 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, false);
- if (dsa_is_cpu_port(ds, port))
- off = CORE_IMP_CTL;
- else
- off = CORE_G_PCTL_PORT(port);
-
b53_disable_port(ds, port, phy);
/* Power down the port memory */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index b89acaee12d4..47c5f272a084 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -732,6 +732,8 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ s8 cpu_port = ds->ports[port].cpu_dp->index;
+ __u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
int ret = -EINVAL;
@@ -748,21 +750,28 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
fs->location > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
+ /* This rule is a Wake-on-LAN filter and we must specifically
+ * target the CPU port in order for it to be working.
+ */
+ if (ring_cookie == RX_CLS_FLOW_WAKE)
+ ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
+
/* We do not support discarding packets, check that the
* destination port is enabled and that we are within the
* number of ports supported by the switch
*/
- port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES;
+ port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
- if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
- !dsa_is_user_port(ds, port_num) ||
+ if (ring_cookie == RX_CLS_FLOW_DISC ||
+ !(dsa_is_user_port(ds, port_num) ||
+ dsa_is_cpu_port(ds, port_num)) ||
port_num >= priv->hw_params.num_ports)
return -EINVAL;
/*
* We have a small oddity where Port 6 just does not have a
* valid bit here (so we substract by one).
*/
- queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES;
+ queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
if (port_num >= 7)
port_num -= 1;
@@ -1187,6 +1196,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
+ struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1213,12 +1223,23 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
mutex_unlock(&priv->cfp.lock);
+ if (ret)
+ return ret;
+
+ /* Pass up the commands to the attached master network device */
+ if (p->ethtool_ops->get_rxnfc) {
+ ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
+
return ret;
}
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
+ struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1239,6 +1260,23 @@ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
mutex_unlock(&priv->cfp.lock);
+ if (ret)
+ return ret;
+
+ /* Pass up the commands to the attached master network device.
+ * This can fail, so rollback the operation if we need to.
+ */
+ if (p->ethtool_ops->set_rxnfc) {
+ ret = p->ethtool_ops->set_rxnfc(p, nfc);
+ if (ret && ret != -EOPNOTSUPP) {
+ mutex_lock(&priv->cfp.lock);
+ bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
+ mutex_unlock(&priv->cfp.lock);
+ } else {
+ ret = 0;
+ }
+ }
+
return ret;
}
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 3ccd5a865dcb..0a1e530d52b7 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -168,6 +168,8 @@ enum bcm_sf2_reg_offs {
#define CORE_SWITCH_CTRL 0x00088
#define MII_DUMB_FWDG_EN (1 << 6)
+#define CORE_DIS_LEARN 0x000f0
+
#define CORE_SFT_LRN_CTRL 0x000f8
#define SW_LEARN_CNTL(x) (1 << (x))
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index bb28c701381a..8da3d39e3218 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -524,7 +524,7 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
}
static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
- int link, int speed, int duplex,
+ int link, int speed, int duplex, int pause,
phy_interface_t mode)
{
int err;
@@ -543,6 +543,12 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
goto restore_link;
}
+ if (chip->info->ops->port_set_pause) {
+ err = chip->info->ops->port_set_pause(chip, port, pause);
+ if (err)
+ goto restore_link;
+ }
+
if (chip->info->ops->port_set_duplex) {
err = chip->info->ops->port_set_duplex(chip, port, duplex);
if (err && err != -EOPNOTSUPP)
@@ -584,17 +590,100 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed,
- phydev->duplex, phydev->interface);
+ phydev->duplex, phydev->pause,
+ phydev->interface);
mutex_unlock(&chip->reg_lock);
if (err && err != -EOPNOTSUPP)
dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
}
+static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ if (!phy_interface_mode_is_8023z(state->interface)) {
+ /* 10M and 100M are only supported in non-802.3z mode */
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ }
+}
+
+static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ /* FIXME: if the port is in 1000Base-X mode, then it only supports
+ * 1000M FD speeds. In this case, CMODE will indicate 5.
+ */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ mv88e6065_phylink_validate(chip, port, mask, state);
+}
+
+static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ /* No ethtool bits for 200Mbps */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ mv88e6065_phylink_validate(chip, port, mask, state);
+}
+
+static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ if (port >= 9)
+ phylink_set(mask, 2500baseX_Full);
+
+ /* No ethtool bits for 200Mbps */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ mv88e6065_phylink_validate(chip, port, mask, state);
+}
+
+static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ if (port >= 9) {
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ }
+
+ mv88e6390_phylink_validate(chip, port, mask, state);
+}
+
static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set_port_modes(mask);
+
+ if (chip->info->ops->phylink_validate)
+ chip->info->ops->phylink_validate(chip, port, mask, state);
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
}
static int mv88e6xxx_link_state(struct dsa_switch *ds, int port,
@@ -604,7 +693,10 @@ static int mv88e6xxx_link_state(struct dsa_switch *ds, int port,
int err;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_port_link_state(chip, port, state);
+ if (chip->info->ops->port_link_state)
+ err = chip->info->ops->port_link_state(chip, port, state);
+ else
+ err = -EOPNOTSUPP;
mutex_unlock(&chip->reg_lock);
return err;
@@ -615,7 +707,7 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
const struct phylink_link_state *state)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int speed, duplex, link, err;
+ int speed, duplex, link, pause, err;
if (mode == MLO_AN_PHY)
return;
@@ -629,9 +721,10 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
duplex = DUPLEX_UNFORCED;
link = LINK_UNFORCED;
}
+ pause = !!phylink_test(state->advertising, Pause);
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex,
+ err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, pause,
state->interface);
mutex_unlock(&chip->reg_lock);
@@ -2080,6 +2173,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
int err;
u16 reg;
+ chip->ports[port].chip = chip;
+ chip->ports[port].port = port;
+
/* MAC Forcing register: don't force link, speed, duplex or flow control
* state to any particular values on physical ports, but force the CPU
* port and all DSA ports to their maximum bandwidth and full duplex.
@@ -2087,10 +2183,12 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
SPEED_MAX, DUPLEX_FULL,
+ PAUSE_OFF,
PHY_INTERFACE_MODE_NA);
else
err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
SPEED_UNFORCED, DUPLEX_UNFORCED,
+ PAUSE_ON,
PHY_INTERFACE_MODE_NA);
if (err)
return err;
@@ -2239,7 +2337,12 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
int err;
mutex_lock(&chip->reg_lock);
+
err = mv88e6xxx_serdes_power(chip, port, true);
+
+ if (!err && chip->info->ops->serdes_irq_setup)
+ err = chip->info->ops->serdes_irq_setup(chip, port);
+
mutex_unlock(&chip->reg_lock);
return err;
@@ -2251,8 +2354,13 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
mutex_lock(&chip->reg_lock);
+
+ if (chip->info->ops->serdes_irq_free)
+ chip->info->ops->serdes_irq_free(chip, port);
+
if (mv88e6xxx_serdes_power(chip, port, false))
dev_err(chip->dev, "failed to power off SERDES\n");
+
mutex_unlock(&chip->reg_lock);
}
@@ -2286,6 +2394,7 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ u8 cmode;
int err;
int i;
@@ -2294,6 +2403,17 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
mutex_lock(&chip->reg_lock);
+ /* Cache the cmode of each port. */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
+ if (chip->info->ops->port_get_cmode) {
+ err = chip->info->ops->port_get_cmode(chip, i, &cmode);
+ if (err)
+ goto unlock;
+
+ chip->ports[i].cmode = cmode;
+ }
+ }
+
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
@@ -2601,6 +2721,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2617,6 +2739,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -2632,6 +2755,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_link_state = mv88e6185_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2643,6 +2768,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6097_ops = {
@@ -2665,6 +2791,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2679,6 +2807,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6123_ops = {
@@ -2696,6 +2825,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2709,6 +2840,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6131_ops = {
@@ -2729,6 +2861,9 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
+ .port_set_pause = mv88e6185_port_set_pause,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2744,6 +2879,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6141_ops = {
@@ -2769,6 +2905,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -2784,6 +2922,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6341_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -2806,6 +2945,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2819,6 +2960,9 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .avb_ops = &mv88e6165_avb_ops,
+ .ptp_ops = &mv88e6165_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6165_ops = {
@@ -2834,6 +2978,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2847,6 +2993,9 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .avb_ops = &mv88e6165_avb_ops,
+ .ptp_ops = &mv88e6165_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
@@ -2870,6 +3019,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2883,6 +3034,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -2908,6 +3060,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2924,6 +3078,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6352_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -2947,6 +3102,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2960,6 +3117,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -2985,6 +3143,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3001,6 +3161,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6352_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -3017,6 +3178,9 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_set_pause = mv88e6185_port_set_pause,
+ .port_link_state = mv88e6185_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3032,6 +3196,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6190_ops = {
@@ -3053,6 +3218,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3068,7 +3235,10 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -3090,6 +3260,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3104,8 +3276,11 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .serdes_power = mv88e6390x_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_validate = mv88e6390x_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -3127,6 +3302,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3142,6 +3319,11 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -3167,6 +3349,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3184,6 +3368,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.serdes_power = mv88e6352_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6352_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -3206,6 +3392,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3221,8 +3409,12 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -3247,6 +3439,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3261,6 +3455,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -3285,6 +3481,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3297,6 +3495,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -3322,6 +3522,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3338,6 +3540,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.serdes_power = mv88e6341_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -3361,6 +3565,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3374,6 +3580,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -3397,6 +3604,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3411,6 +3620,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -3436,6 +3647,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3453,9 +3666,11 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_power = mv88e6352_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
.serdes_get_sset_count = mv88e6352_serdes_get_sset_count,
.serdes_get_strings = mv88e6352_serdes_get_strings,
.serdes_get_stats = mv88e6352_serdes_get_stats,
+ .phylink_validate = mv88e6352_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -3480,6 +3695,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3495,8 +3712,12 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -3521,6 +3742,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+ .port_get_cmode = mv88e6352_port_get_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3535,9 +3758,13 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .serdes_power = mv88e6390x_serdes_power,
+ .serdes_irq_setup = mv88e6390_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_validate = mv88e6390x_phylink_validate,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -3689,6 +3916,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
+ .ptp_support = true,
.ops = &mv88e6161_ops,
},
@@ -3711,6 +3939,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
.ops = &mv88e6165_ops,
},
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8ac3fbb15352..f9ecb7872d32 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -155,6 +155,7 @@ struct mv88e6xxx_bus_ops;
struct mv88e6xxx_irq_ops;
struct mv88e6xxx_gpio_ops;
struct mv88e6xxx_avb_ops;
+struct mv88e6xxx_ptp_ops;
struct mv88e6xxx_irq {
u16 masked;
@@ -190,12 +191,16 @@ struct mv88e6xxx_port_hwtstamp {
};
struct mv88e6xxx_port {
+ struct mv88e6xxx_chip *chip;
+ int port;
u64 serdes_stats[2];
u64 atu_member_violation;
u64 atu_miss_violation;
u64 atu_full_violation;
u64 vtu_member_violation;
u64 vtu_miss_violation;
+ u8 cmode;
+ int serdes_irq;
};
struct mv88e6xxx_chip {
@@ -273,6 +278,7 @@ struct mv88e6xxx_chip {
struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
u16 trig_config;
u16 evcap_config;
+ u16 enable_count;
/* Per-port timestamping resources. */
struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS];
@@ -349,6 +355,13 @@ struct mv88e6xxx_ops {
*/
int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup);
+#define PAUSE_ON 1
+#define PAUSE_OFF 0
+
+ /* Enable/disable sending Pause */
+ int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port,
+ int pause);
+
#define SPEED_MAX INT_MAX
#define SPEED_UNFORCED -2
@@ -381,12 +394,16 @@ struct mv88e6xxx_ops {
*/
int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
+ int (*port_get_cmode)(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
/* Some devices have a per port register indicating what is
* the upstream port this port should forward to.
*/
int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
+ /* Return the port link state, as required by phylink */
+ int (*port_link_state)(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state);
/* Snapshot the statistics for a port. The statistics can then
* be read back a leisure but still with a consistent view.
@@ -418,6 +435,10 @@ struct mv88e6xxx_ops {
/* Power on/off a SERDES interface */
int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on);
+ /* SERDES interrupt handling */
+ int (*serdes_irq_setup)(struct mv88e6xxx_chip *chip, int port);
+ void (*serdes_irq_free)(struct mv88e6xxx_chip *chip, int port);
+
/* Statistics from the SERDES interface */
int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
@@ -439,6 +460,14 @@ struct mv88e6xxx_ops {
/* Remote Management Unit operations */
int (*rmu_disable)(struct mv88e6xxx_chip *chip);
+
+ /* Precision Time Protocol operations */
+ const struct mv88e6xxx_ptp_ops *ptp_ops;
+
+ /* Phylink */
+ void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state);
};
struct mv88e6xxx_irq_ops {
@@ -486,6 +515,24 @@ struct mv88e6xxx_avb_ops {
int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data);
};
+struct mv88e6xxx_ptp_ops {
+ u64 (*clock_read)(const struct cyclecounter *cc);
+ int (*ptp_enable)(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+ int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan);
+ void (*event_work)(struct work_struct *ugly);
+ int (*port_enable)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_disable)(struct mv88e6xxx_chip *chip, int port);
+ int (*global_enable)(struct mv88e6xxx_chip *chip);
+ int (*global_disable)(struct mv88e6xxx_chip *chip);
+ int n_ext_ts;
+ int arr0_sts_reg;
+ int arr1_sts_reg;
+ int dep_sts_reg;
+ u32 rx_filters;
+};
+
#define STATS_TYPE_PORT BIT(0)
#define STATS_TYPE_BANK0 BIT(1)
#define STATS_TYPE_BANK1 BIT(2)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 37e8ce2c72a0..194660d8c783 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -160,6 +160,7 @@
#define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000
#define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00
#define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe
+#define MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL 0xf
#define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf
#define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00
#define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e
@@ -335,6 +336,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
@@ -484,6 +486,7 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
+static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {};
static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {};
static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {};
diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c
index 2e398ccb88ca..672b503a67e1 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_avb.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c
@@ -130,6 +130,31 @@ const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {
.tai_write = mv88e6352_g2_avb_tai_write,
};
+static int mv88e6165_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6352_g2_avb_port_ptp_read(chip,
+ MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6165_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6352_g2_avb_port_ptp_write(chip,
+ MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data);
+}
+
+const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {
+ .port_ptp_read = mv88e6352_g2_avb_port_ptp_read,
+ .port_ptp_write = mv88e6352_g2_avb_port_ptp_write,
+ .ptp_read = mv88e6352_g2_avb_ptp_read,
+ .ptp_write = mv88e6352_g2_avb_ptp_write,
+ .tai_read = mv88e6165_g2_avb_tai_read,
+ .tai_write = mv88e6165_g2_avb_tai_write,
+};
+
static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
int port, int addr, u16 *data,
int len)
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index a036c490b7ce..a17c16a2ab78 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -51,17 +51,30 @@ static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr,
return chip->info->ops->avb_ops->ptp_write(chip, addr, data);
}
+static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data)
+{
+ if (!chip->info->ops->avb_ops->ptp_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->ptp_read(chip, addr, data, 1);
+}
+
/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
* timestamp. When working properly, hardware will produce a timestamp
* within 1ms. Software may enounter delays due to MDIO contention, so
* the timeout is set accordingly.
*/
-#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(20)
+#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ptp_ops *ptp_ops;
+ struct mv88e6xxx_chip *chip;
+
+ chip = ds->priv;
+ ptp_ops = chip->info->ops->ptp_ops;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
@@ -74,17 +87,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ info->rx_filters = ptp_ops->rx_filters;
return 0;
}
@@ -92,10 +95,9 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
struct hwtstamp_config *config)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
bool tstamp_enable = false;
- u16 port_config0;
- int err;
/* Prevent the TX/RX paths from trying to interact with the
* timestamp hardware while we reconfigure it.
@@ -120,6 +122,14 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
/* The switch supports timestamping both L2 and L4; one cannot be
* disabled independently of the other.
*/
+
+ if (!(BIT(config->rx_filter) & ptp_ops->rx_filters)) {
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ dev_dbg(chip->dev, "Unsupported rx_filter %d\n",
+ config->rx_filter);
+ return -ERANGE;
+ }
+
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tstamp_enable = false;
@@ -141,24 +151,22 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
return -ERANGE;
}
+ mutex_lock(&chip->reg_lock);
if (tstamp_enable) {
- /* Disable transportSpecific value matching, so that packets
- * with either 1588 (0) and 802.1AS (1) will be timestamped.
- */
- port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH;
+ chip->enable_count += 1;
+ if (chip->enable_count == 1 && ptp_ops->global_enable)
+ ptp_ops->global_enable(chip);
+ if (ptp_ops->port_enable)
+ ptp_ops->port_enable(chip, port);
} else {
- /* Disable PTP. This disables both RX and TX timestamping. */
- port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP;
+ if (ptp_ops->port_disable)
+ ptp_ops->port_disable(chip, port);
+ chip->enable_count -= 1;
+ if (chip->enable_count == 0 && ptp_ops->global_disable)
+ ptp_ops->global_disable(chip);
}
-
- mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
- port_config0);
mutex_unlock(&chip->reg_lock);
- if (err < 0)
- return err;
-
/* Once hardware has been configured, enable timestamp checks
* in the RX/TX paths.
*/
@@ -338,17 +346,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_port_hwtstamp *ps)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct sk_buff *skb;
skb = skb_dequeue(&ps->rx_queue);
if (skb)
- mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR0_STS,
+ mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr0_sts_reg,
&ps->rx_queue);
skb = skb_dequeue(&ps->rx_queue2);
if (skb)
- mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR1_STS,
+ mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr1_sts_reg,
&ps->rx_queue2);
}
@@ -389,6 +398,7 @@ bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_port_hwtstamp *ps)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct skb_shared_hwtstamps shhwtstamps;
u16 departure_block[4], status;
struct sk_buff *tmp_skb;
@@ -401,7 +411,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
- MV88E6XXX_PORT_PTP_DEP_STS,
+ ptp_ops->dep_sts_reg,
departure_block,
ARRAY_SIZE(departure_block));
mutex_unlock(&chip->reg_lock);
@@ -425,8 +435,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
/* We have the timestamp; go ahead and clear valid now */
mutex_lock(&chip->reg_lock);
- mv88e6xxx_port_ptp_write(chip, ps->port_id,
- MV88E6XXX_PORT_PTP_DEP_STS, 0);
+ mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0);
mutex_unlock(&chip->reg_lock);
status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK;
@@ -522,8 +531,48 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
return true;
}
+int mv88e6165_global_disable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
+ if (err)
+ return err;
+ val |= MV88E6165_PTP_CFG_DISABLE_PTP;
+
+ return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
+}
+
+int mv88e6165_global_enable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
+ if (err)
+ return err;
+
+ val &= ~(MV88E6165_PTP_CFG_DISABLE_PTP | MV88E6165_PTP_CFG_TSPEC_MASK);
+
+ return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
+}
+
+int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
+ MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP);
+}
+
+int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
+ MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH);
+}
+
static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
ps->port_id = port;
@@ -531,12 +580,15 @@ static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port)
skb_queue_head_init(&ps->rx_queue);
skb_queue_head_init(&ps->rx_queue2);
- return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
- MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP);
+ if (ptp_ops->port_disable)
+ return ptp_ops->port_disable(chip, port);
+
+ return 0;
}
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int err;
int i;
@@ -547,6 +599,18 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
return err;
}
+ /* Disable PTP globally */
+ if (ptp_ops->global_disable) {
+ err = ptp_ops->global_disable(chip);
+ if (err)
+ return err;
+ }
+
+ /* Set the ethertype of L2 PTP messages */
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
+ if (err)
+ return err;
+
/* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to
* timestamp. This affects all ports that have timestamping enabled,
* but the timestamp config is per-port; thus we configure all events
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index bc71c9212a08..b9a72661bcc4 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -19,7 +19,7 @@
#include "chip.h"
-/* Global PTP registers */
+/* Global 6352 PTP registers */
/* Offset 0x00: PTP EtherType */
#define MV88E6XXX_PTP_ETHERTYPE 0x00
@@ -34,6 +34,12 @@
/* Offset 0x02: Timestamp Arrival Capture Pointers */
#define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02
+/* Offset 0x05: PTP Global Configuration */
+#define MV88E6165_PTP_CFG 0x05
+#define MV88E6165_PTP_CFG_TSPEC_MASK 0xf000
+#define MV88E6165_PTP_CFG_DISABLE_TS_OVERWRITE BIT(1)
+#define MV88E6165_PTP_CFG_DISABLE_PTP BIT(0)
+
/* Offset 0x07: PTP Global Configuration */
#define MV88E6341_PTP_CFG 0x07
#define MV88E6341_PTP_CFG_UPDATE 0x8000
@@ -46,7 +52,7 @@
/* Offset 0x08: PTP Interrupt Status */
#define MV88E6XXX_PTP_IRQ_STATUS 0x08
-/* Per-Port PTP Registers */
+/* Per-Port 6352 PTP Registers */
/* Offset 0x00: PTP Configuration 0 */
#define MV88E6XXX_PORT_PTP_CFG0 0x00
#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12
@@ -123,6 +129,10 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
+int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port);
+int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port);
+int mv88e6165_global_enable(struct mv88e6xxx_chip *chip);
+int mv88e6165_global_disable(struct mv88e6xxx_chip *chip);
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 429d0ebcd5b1..92945841c8e8 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -19,6 +19,7 @@
#include "chip.h"
#include "port.h"
+#include "serdes.h"
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
u16 *val)
@@ -36,6 +37,29 @@ int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
return mv88e6xxx_write(chip, addr, reg, val);
}
+/* Offset 0x00: MAC (or PCS or Physical) Status Register
+ *
+ * For most devices, this is read only. However the 6185 has the MyPause
+ * bit read/write.
+ */
+int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
+ int pause)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ if (pause)
+ reg |= MV88E6XXX_PORT_STS_MY_PAUSE;
+ else
+ reg &= ~MV88E6XXX_PORT_STS_MY_PAUSE;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
+}
+
/* Offset 0x01: MAC (or PCS or Physical) Control Register
*
* Link, Duplex and Flow Control have one force bit, one value bit.
@@ -318,8 +342,9 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
- u16 reg;
+ int lane;
u16 cmode;
+ u16 reg;
int err;
if (mode == PHY_INTERFACE_MODE_NA)
@@ -349,6 +374,20 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = 0;
}
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return lane;
+
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390_serdes_power(chip, port, false);
+ if (err)
+ return err;
+
if (cmode) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err)
@@ -360,12 +399,38 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
if (err)
return err;
+
+ err = mv88e6390_serdes_power(chip, port, true);
+ if (err)
+ return err;
+
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6390_serdes_irq_enable(chip, port, lane);
+ if (err)
+ return err;
+ }
}
+ chip->ports[port].cmode = cmode;
+
return 0;
}
-int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
+int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ *cmode = reg & MV88E6185_PORT_STS_CMODE_MASK;
+
+ return 0;
+}
+
+int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
{
int err;
u16 reg;
@@ -379,7 +444,7 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
return 0;
}
-int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
+int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state)
{
int err;
@@ -400,7 +465,7 @@ int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
state->speed = SPEED_1000;
break;
case MV88E6XXX_PORT_STS_SPEED_10000:
- if ((reg &MV88E6XXX_PORT_STS_CMODE_MASK) ==
+ if ((reg & MV88E6XXX_PORT_STS_CMODE_MASK) ==
MV88E6XXX_PORT_STS_CMODE_2500BASEX)
state->speed = SPEED_2500;
else
@@ -417,6 +482,42 @@ int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state)
+{
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
+ u8 cmode = chip->ports[port].cmode;
+
+ /* When a port is in "Cross-chip serdes" mode, it uses
+ * 1000Base-X full duplex mode, but there is no automatic
+ * link detection. Use the sync OK status for link (as it
+ * would do for 1000Base-X mode.)
+ */
+ if (cmode == MV88E6185_PORT_STS_CMODE_SERDES) {
+ u16 mac;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port,
+ MV88E6XXX_PORT_MAC_CTL, &mac);
+ if (err)
+ return err;
+
+ state->link = !!(mac & MV88E6185_PORT_MAC_CTL_SYNC_OK);
+ state->an_enabled = 1;
+ state->an_complete =
+ !!(mac & MV88E6185_PORT_MAC_CTL_AN_DONE);
+ state->duplex =
+ state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN;
+ state->speed =
+ state->link ? SPEED_1000 : SPEED_UNKNOWN;
+
+ return 0;
+ }
+ }
+
+ return mv88e6352_port_link_state(chip, port, state);
+}
+
/* Offset 0x02: Jamming Control
*
* Do not limit the period of time that this port can be paused for by
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 5e1db1b221ca..f32f56af8e35 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -42,14 +42,28 @@
#define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b
#define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c
#define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d
+#define MV88E6185_PORT_STS_CDUPLEX 0x0008
+#define MV88E6185_PORT_STS_CMODE_MASK 0x0007
+#define MV88E6185_PORT_STS_CMODE_GMII_FD 0x0000
+#define MV88E6185_PORT_STS_CMODE_MII_100_FD_PS 0x0001
+#define MV88E6185_PORT_STS_CMODE_MII_100 0x0002
+#define MV88E6185_PORT_STS_CMODE_MII_10 0x0003
+#define MV88E6185_PORT_STS_CMODE_SERDES 0x0004
+#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
+#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
+#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
#define MV88E6XXX_PORT_MAC_CTL 0x01
#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK 0x8000
#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK 0x4000
+#define MV88E6185_PORT_MAC_CTL_SYNC_OK 0x4000
#define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000
#define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000
#define MV88E6352_PORT_MAC_CTL_200BASE 0x1000
+#define MV88E6185_PORT_MAC_CTL_AN_EN 0x0400
+#define MV88E6185_PORT_MAC_CTL_AN_RESTART 0x0200
+#define MV88E6185_PORT_MAC_CTL_AN_DONE 0x0100
#define MV88E6XXX_PORT_MAC_CTL_FC 0x0080
#define MV88E6XXX_PORT_MAC_CTL_FORCE_FC 0x0040
#define MV88E6XXX_PORT_MAC_CTL_LINK_UP 0x0020
@@ -242,6 +256,8 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
u16 val);
+int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
+ int pause);
int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
@@ -295,8 +311,11 @@ int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
-int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
-int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
+int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state);
+int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index bd85e2c390e1..4b336d8d4c67 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -16,6 +16,7 @@
#include "chip.h"
#include "global2.h"
+#include "hwtstamp.h"
#include "ptp.h"
/* Raw timestamps are in units of 8-ns clock periods. */
@@ -50,7 +51,7 @@ static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data)
}
/* TODO: places where this are called should be using pinctrl */
-static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
+static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
int func, int input)
{
int err;
@@ -65,7 +66,7 @@ static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
}
-static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
@@ -79,13 +80,27 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
return ((u32)phc_time[1] << 16) | phc_time[0];
}
-/* mv88e6xxx_config_eventcap - configure TAI event capture
+static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
+{
+ struct mv88e6xxx_chip *chip = cc_to_chip(cc);
+ u16 phc_time[2];
+ int err;
+
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
+ ARRAY_SIZE(phc_time));
+ if (err)
+ return 0;
+ else
+ return ((u32)phc_time[1] << 16) | phc_time[0];
+}
+
+/* mv88e6352_config_eventcap - configure TAI event capture
* @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
* @rising: zero for falling-edge trigger, else rising-edge trigger
*
* This will also reset the capture sequence counter.
*/
-static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event,
+static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
int rising)
{
u16 global_config;
@@ -118,7 +133,7 @@ static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event,
return err;
}
-static void mv88e6xxx_tai_event_work(struct work_struct *ugly)
+static void mv88e6352_tai_event_work(struct work_struct *ugly)
{
struct delayed_work *dw = to_delayed_work(ugly);
struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw);
@@ -232,7 +247,7 @@ static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp,
return 0;
}
-static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip,
+static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
struct ptp_clock_request *rq, int on)
{
int rising = (rq->extts.flags & PTP_RISING_EDGE);
@@ -250,18 +265,18 @@ static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip,
if (on) {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ;
- err = mv88e6xxx_set_gpio_func(chip, pin, func, true);
+ err = mv88e6352_set_gpio_func(chip, pin, func, true);
if (err)
goto out;
schedule_delayed_work(&chip->tai_event_work,
TAI_EVENT_WORK_INTERVAL);
- err = mv88e6xxx_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
+ err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
} else {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
- err = mv88e6xxx_set_gpio_func(chip, pin, func, true);
+ err = mv88e6352_set_gpio_func(chip, pin, func, true);
cancel_delayed_work_sync(&chip->tai_event_work);
}
@@ -272,20 +287,20 @@ out:
return err;
}
-static int mv88e6xxx_ptp_enable(struct ptp_clock_info *ptp,
+static int mv88e6352_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- return mv88e6xxx_ptp_enable_extts(chip, rq, on);
+ return mv88e6352_ptp_enable_extts(chip, rq, on);
default:
return -EOPNOTSUPP;
}
}
-static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
switch (func) {
@@ -299,6 +314,55 @@ static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
return 0;
}
+const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+ .clock_read = mv88e6352_ptp_clock_read,
+ .ptp_enable = mv88e6352_ptp_enable,
+ .ptp_verify = mv88e6352_ptp_verify,
+ .event_work = mv88e6352_tai_event_work,
+ .port_enable = mv88e6352_hwtstamp_port_enable,
+ .port_disable = mv88e6352_hwtstamp_port_disable,
+ .n_ext_ts = 1,
+ .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+};
+
+const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
+ .clock_read = mv88e6165_ptp_clock_read,
+ .global_enable = mv88e6165_global_enable,
+ .global_disable = mv88e6165_global_disable,
+ .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+};
+
+static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+{
+ struct mv88e6xxx_chip *chip = cc_to_chip(cc);
+
+ if (chip->info->ops->ptp_ops->clock_read)
+ return chip->info->ops->ptp_ops->clock_read(cc);
+
+ return 0;
+}
+
/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
* seconds; this task forces periodic reads so that we don't miss any.
*/
@@ -317,6 +381,7 @@ static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int i;
/* Set up the cycle counter */
@@ -330,14 +395,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
ktime_to_ns(ktime_get_real()));
INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check);
- INIT_DELAYED_WORK(&chip->tai_event_work, mv88e6xxx_tai_event_work);
+ if (ptp_ops->event_work)
+ INIT_DELAYED_WORK(&chip->tai_event_work, ptp_ops->event_work);
chip->ptp_clock_info.owner = THIS_MODULE;
snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name),
dev_name(chip->dev));
chip->ptp_clock_info.max_adj = 1000000;
- chip->ptp_clock_info.n_ext_ts = 1;
+ chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts;
chip->ptp_clock_info.n_per_out = 0;
chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip);
chip->ptp_clock_info.pps = 0;
@@ -355,8 +421,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime;
chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime;
chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime;
- chip->ptp_clock_info.enable = mv88e6xxx_ptp_enable;
- chip->ptp_clock_info.verify = mv88e6xxx_ptp_verify;
+ chip->ptp_clock_info.enable = ptp_ops->ptp_enable;
+ chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
@@ -373,7 +439,8 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
if (chip->ptp_clock) {
cancel_delayed_work_sync(&chip->overflow_work);
- cancel_delayed_work_sync(&chip->tai_event_work);
+ if (chip->info->ops->ptp_ops->event_work)
+ cancel_delayed_work_sync(&chip->tai_event_work);
ptp_clock_unregister(chip->ptp_clock);
chip->ptp_clock = NULL;
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 10f271ab650d..28a030840517 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -78,6 +78,71 @@
/* Offset 0x12: Lock Status */
#define MV88E6XXX_TAI_LOCK_STATUS 0x12
+/* Offset 0x00: Ether Type */
+#define MV88E6XXX_PTP_GC_ETYPE 0x00
+
+/* 6165 Global Control Registers */
+/* Offset 0x00: Ether Type */
+#define MV88E6XXX_PTP_GC_ETYPE 0x00
+
+/* Offset 0x01: Message ID */
+#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01
+
+/* Offset 0x02: Time Stamp Arrive Time */
+#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02
+
+/* Offset 0x03: Port Arrival Interrupt Enable */
+#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03
+
+/* Offset 0x04: Port Departure Interrupt Enable */
+#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04
+
+/* Offset 0x05: Configuration */
+#define MV88E6XXX_PTP_GC_CONFIG 0x05
+#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1)
+#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0)
+
+/* Offset 0x8: Interrupt Status */
+#define MV88E6XXX_PTP_GC_INT_STATUS 0x08
+
+/* Offset 0x9/0xa: Global Time */
+#define MV88E6XXX_PTP_GC_TIME_LO 0x09
+#define MV88E6XXX_PTP_GC_TIME_HI 0x0A
+
+/* 6165 Per Port Registers */
+/* Offset 0: Arrival Time 0 Status */
+#define MV88E6165_PORT_PTP_ARR0_STS 0x00
+
+/* Offset 0x01/0x02: PTP Arrival 0 Time */
+#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01
+#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02
+
+/* Offset 0x03: PTP Arrival 0 Sequence ID */
+#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03
+
+/* Offset 0x04: PTP Arrival 1 Status */
+#define MV88E6165_PORT_PTP_ARR1_STS 0x04
+
+/* Offset 0x05/0x6E: PTP Arrival 1 Time */
+#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05
+#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06
+
+/* Offset 0x07: PTP Arrival 1 Sequence ID */
+#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07
+
+/* Offset 0x08: PTP Departure Status */
+#define MV88E6165_PORT_PTP_DEP_STS 0x08
+
+/* Offset 0x09/0x0a: PTP Deperture Time */
+#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09
+#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a
+
+/* Offset 0x0b: PTP Departure Sequence ID */
+#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b
+
+/* Offset 0x0d: Port Status */
+#define MV88E6164_PORT_STATUS 0x0d
+
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
@@ -87,6 +152,9 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
#define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \
ptp_clock_info)
+extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
+
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
@@ -103,6 +171,9 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
}
+static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
#endif /* _MV88E6XXX_PTP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 880b2cf0a530..e82983975754 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -11,6 +11,8 @@
* (at your option) any later version.
*/
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/mii.h>
#include "chip.h"
@@ -35,6 +37,22 @@ static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
reg, val);
}
+static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
+ int lane, int device, int reg, u16 *val)
+{
+ int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
+
+ return mv88e6xxx_phy_read(chip, lane, reg_c45, val);
+}
+
+static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
+ int lane, int device, int reg, u16 val)
+{
+ int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
+
+ return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
+}
+
static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
{
u16 val, new_val;
@@ -57,14 +75,7 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
{
- u8 cmode;
- int err;
-
- err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
- if (err) {
- dev_err(chip->dev, "failed to read cmode\n");
- return false;
- }
+ u8 cmode = chip->ports[port].cmode;
if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) ||
(cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) ||
@@ -174,16 +185,121 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
+/* Return the SERDES lane address a port is using. Only Ports 9 and 10
+ * have SERDES lanes. Returns -ENODEV if a port does not have a lane.
+ */
+static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ switch (port) {
+ case 9:
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ return MV88E6390_PORT9_LANE0;
+ return -ENODEV;
+ case 10:
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ return MV88E6390_PORT10_LANE0;
+ return -ENODEV;
+ default:
+ return -ENODEV;
+ }
+}
+
+/* Return the SERDES lane address a port is using. Ports 9 and 10 can
+ * use multiple lanes. If so, return the first lane the port uses.
+ * Returns -ENODEV if a port does not have a lane.
+ */
+int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode_port9, cmode_port10, cmode_port;
+
+ cmode_port9 = chip->ports[9].cmode;
+ cmode_port10 = chip->ports[10].cmode;
+ cmode_port = chip->ports[port].cmode;
+
+ switch (port) {
+ case 2:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT9_LANE1;
+ return -ENODEV;
+ case 3:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT9_LANE2;
+ return -ENODEV;
+ case 4:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT9_LANE3;
+ return -ENODEV;
+ case 5:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT10_LANE1;
+ return -ENODEV;
+ case 6:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT10_LANE2;
+ return -ENODEV;
+ case 7:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
+ return MV88E6390_PORT10_LANE3;
+ return -ENODEV;
+ case 9:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ return MV88E6390_PORT9_LANE0;
+ return -ENODEV;
+ case 10:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ return MV88E6390_PORT10_LANE0;
+ return -ENODEV;
+ default:
+ return -ENODEV;
+ }
+}
+
/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
-static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on)
+static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
+ bool on)
{
u16 val, new_val;
- int reg_c45;
int err;
- reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE |
- MV88E6390_PCS_CONTROL_1;
- err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val);
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PCS_CONTROL_1, &val);
+
if (err)
return err;
@@ -195,22 +311,21 @@ static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on)
new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN;
if (val != new_val)
- err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val);
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PCS_CONTROL_1, new_val);
return err;
}
-/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
-static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr,
- bool on)
+/* Set the power on/off for SGMII and 1000Base-X */
+static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
+ bool on)
{
u16 val, new_val;
- int reg_c45;
int err;
- reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE |
- MV88E6390_SGMII_CONTROL;
- err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val);
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_CONTROL, &val);
if (err)
return err;
@@ -222,127 +337,261 @@ static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr,
new_val = val | MV88E6390_SGMII_CONTROL_PDOWN;
if (val != new_val)
- err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val);
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_CONTROL, new_val);
return err;
}
-static int mv88e6390_serdes_lower(struct mv88e6xxx_chip *chip, u8 cmode,
- int port_donor, int lane, bool rxaui, bool on)
+static int mv88e6390_serdes_power_lane(struct mv88e6xxx_chip *chip, int port,
+ int lane, bool on)
{
- int err;
- u8 cmode_donor;
-
- err = mv88e6xxx_port_get_cmode(chip, port_donor, &cmode_donor);
- if (err)
- return err;
+ u8 cmode = chip->ports[port].cmode;
- switch (cmode_donor) {
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- if (!rxaui)
- break;
- /* Fall through */
- case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
- cmode == MV88E6XXX_PORT_STS_CMODE_SGMII)
- return mv88e6390_serdes_sgmii(chip, lane, on);
+ return mv88e6390_serdes_power_sgmii(chip, lane, on);
+ case MV88E6XXX_PORT_STS_CMODE_XAUI:
+ case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ return mv88e6390_serdes_power_10g(chip, lane, on);
+ }
+
+ return 0;
+}
+
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ int lane;
+
+ lane = mv88e6390_serdes_get_lane(chip, port);
+ if (lane == -ENODEV)
+ return 0;
+
+ if (lane < 0)
+ return lane;
+
+ switch (port) {
+ case 9 ... 10:
+ return mv88e6390_serdes_power_lane(chip, port, lane, on);
}
+
+ return 0;
+}
+
+int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ int lane;
+
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+ if (lane == -ENODEV)
+ return 0;
+
+ if (lane < 0)
+ return lane;
+
+ switch (port) {
+ case 2 ... 4:
+ case 5 ... 7:
+ case 9 ... 10:
+ return mv88e6390_serdes_power_lane(chip, port, lane, on);
+ }
+
return 0;
}
-static int mv88e6390_serdes_port9(struct mv88e6xxx_chip *chip, u8 cmode,
- bool on)
+static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
+ int port, int lane)
{
+ struct dsa_switch *ds = chip->ds;
+ u16 status;
+ bool up;
+
+ mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_STATUS, &status);
+
+ /* Status must be read twice in order to give the current link
+ * status. Otherwise the change in link status since the last
+ * read of the register is returned.
+ */
+ mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_STATUS, &status);
+ up = status & MV88E6390_SGMII_STATUS_LINK;
+
+ dsa_port_phylink_mac_change(ds, port, up);
+}
+
+static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
+ int lane)
+{
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_INT_ENABLE,
+ MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP);
+}
+
+static int mv88e6390_serdes_irq_disable_sgmii(struct mv88e6xxx_chip *chip,
+ int lane)
+{
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_INT_ENABLE, 0);
+}
+
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ u8 cmode = chip->ports[port].cmode;
+ int err = 0;
+
switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
case MV88E6XXX_PORT_STS_CMODE_SGMII:
- return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT9_LANE0, on);
- case MV88E6XXX_PORT_STS_CMODE_XAUI:
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_10g(chip, MV88E6390_PORT9_LANE0, on);
+ err = mv88e6390_serdes_irq_enable_sgmii(chip, lane);
}
- return 0;
+ return err;
}
-static int mv88e6390_serdes_port10(struct mv88e6xxx_chip *chip, u8 cmode,
- bool on)
+int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port,
+ int lane)
{
+ u8 cmode = chip->ports[port].cmode;
+ int err = 0;
+
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
- return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT10_LANE0, on);
- case MV88E6XXX_PORT_STS_CMODE_XAUI:
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_10g(chip, MV88E6390_PORT10_LANE0, on);
+ err = mv88e6390_serdes_irq_disable_sgmii(chip, lane);
}
- return 0;
+ return err;
}
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
+ int lane, u16 *status)
{
- u8 cmode;
int err;
- err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
- if (err)
- return err;
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_INT_STATUS, status);
- switch (port) {
- case 2:
- return mv88e6390_serdes_lower(chip, cmode, 9,
- MV88E6390_PORT9_LANE1,
- false, on);
- case 3:
- return mv88e6390_serdes_lower(chip, cmode, 9,
- MV88E6390_PORT9_LANE2,
- true, on);
- case 4:
- return mv88e6390_serdes_lower(chip, cmode, 9,
- MV88E6390_PORT9_LANE3,
- true, on);
- case 5:
- return mv88e6390_serdes_lower(chip, cmode, 10,
- MV88E6390_PORT10_LANE1,
- false, on);
- case 6:
- return mv88e6390_serdes_lower(chip, cmode, 10,
- MV88E6390_PORT10_LANE2,
- true, on);
- case 7:
- return mv88e6390_serdes_lower(chip, cmode, 10,
- MV88E6390_PORT10_LANE3,
- true, on);
- case 9:
- return mv88e6390_serdes_port9(chip, cmode, on);
- case 10:
- return mv88e6390_serdes_port10(chip, cmode, on);
+ return err;
+}
+
+static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_port *port = dev_id;
+ struct mv88e6xxx_chip *chip = port->chip;
+ irqreturn_t ret = IRQ_NONE;
+ u8 cmode = port->cmode;
+ u16 status;
+ int lane;
+ int err;
+
+ lane = mv88e6390x_serdes_get_lane(chip, port->port);
+
+ mutex_lock(&chip->reg_lock);
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
+ if (err)
+ goto out;
+ if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP)) {
+ ret = IRQ_HANDLED;
+ mv88e6390_serdes_irq_link_sgmii(chip, port->port, lane);
+ }
}
+out:
+ mutex_unlock(&chip->reg_lock);
- return 0;
+ return ret;
}
-int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
{
+ int lane;
int err;
- u8 cmode;
- if (port != 5)
+ /* Only support ports 9 and 10 at the moment */
+ if (port < 9)
return 0;
- err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
- if (err)
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+
+ if (lane == -ENODEV)
+ return 0;
+
+ if (lane < 0)
+ return lane;
+
+ chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain,
+ port);
+ if (chip->ports[port].serdes_irq < 0) {
+ dev_err(chip->dev, "Unable to map SERDES irq: %d\n",
+ chip->ports[port].serdes_irq);
+ return chip->ports[port].serdes_irq;
+ }
+
+ /* Requesting the IRQ will trigger irq callbacks. So we cannot
+ * hold the reg_lock.
+ */
+ mutex_unlock(&chip->reg_lock);
+ err = request_threaded_irq(chip->ports[port].serdes_irq, NULL,
+ mv88e6390_serdes_thread_fn,
+ IRQF_ONESHOT, "mv88e6xxx-serdes",
+ &chip->ports[port]);
+ mutex_lock(&chip->reg_lock);
+
+ if (err) {
+ dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n",
+ err);
return err;
+ }
+
+ return mv88e6390_serdes_irq_enable(chip, port, lane);
+}
+
+void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+{
+ int lane = mv88e6390x_serdes_get_lane(chip, port);
+
+ if (port < 9)
+ return;
+
+ if (lane < 0)
+ return;
+
+ mv88e6390_serdes_irq_disable(chip, port, lane);
+
+ /* Freeing the IRQ will trigger irq callbacks. So we cannot
+ * hold the reg_lock.
+ */
+ mutex_unlock(&chip->reg_lock);
+ free_irq(chip->ports[port].serdes_irq, &chip->ports[port]);
+ mutex_lock(&chip->reg_lock);
+
+ chip->ports[port].serdes_irq = 0;
+}
+
+int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ if (port != 5)
+ return 0;
if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- return mv88e6390_serdes_sgmii(chip, MV88E6341_ADDR_SERDES, on);
+ return mv88e6390_serdes_power_sgmii(chip, MV88E6341_ADDR_SERDES,
+ on);
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index b6e5fbd46b5e..b1496de9c6fe 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -29,7 +29,6 @@
#define MV88E6390_PORT10_LANE1 0x15
#define MV88E6390_PORT10_LANE2 0x16
#define MV88E6390_PORT10_LANE3 0x17
-#define MV88E6390_SERDES_DEVICE (4 << 16)
/* 10GBASE-R and 10GBASE-X4/X2 */
#define MV88E6390_PCS_CONTROL_1 0x1000
@@ -43,13 +42,36 @@
#define MV88E6390_SGMII_CONTROL_RESET BIT(15)
#define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14)
#define MV88E6390_SGMII_CONTROL_PDOWN BIT(11)
+#define MV88E6390_SGMII_STATUS 0x2001
+#define MV88E6390_SGMII_STATUS_AN_DONE BIT(5)
+#define MV88E6390_SGMII_STATUS_REMOTE_FAULT BIT(4)
+#define MV88E6390_SGMII_STATUS_LINK BIT(2)
+#define MV88E6390_SGMII_INT_ENABLE 0xa001
+#define MV88E6390_SGMII_INT_SPEED_CHANGE BIT(14)
+#define MV88E6390_SGMII_INT_DUPLEX_CHANGE BIT(13)
+#define MV88E6390_SGMII_INT_PAGE_RX BIT(12)
+#define MV88E6390_SGMII_INT_AN_COMPLETE BIT(11)
+#define MV88E6390_SGMII_INT_LINK_DOWN BIT(10)
+#define MV88E6390_SGMII_INT_LINK_UP BIT(9)
+#define MV88E6390_SGMII_INT_SYMBOL_ERROR BIT(8)
+#define MV88E6390_SGMII_INT_FALSE_CARRIER BIT(7)
+#define MV88E6390_SGMII_INT_STATUS 0xa002
+int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
+int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
+int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
+void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+
#endif
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
new file mode 100644
index 000000000000..b4b839a1d095
--- /dev/null
+++ b/drivers/net/dsa/realtek-smi.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek Simple Management Interface (SMI) driver
+ * It can be discussed how "simple" this interface is.
+ *
+ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
+ * but the protocol is not MDIO at all. Instead it is a Realtek
+ * pecularity that need to bit-bang the lines in a special way to
+ * communicate with the switch.
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+
+#include "realtek-smi.h"
+
+#define REALTEK_SMI_ACK_RETRY_COUNT 5
+#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
+#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
+
+static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
+{
+ ndelay(smi->clk_delay);
+}
+
+static void realtek_smi_start(struct realtek_smi *smi)
+{
+ /* Set GPIO pins to output mode, with initial state:
+ * SCK = 0, SDA = 1
+ */
+ gpiod_direction_output(smi->mdc, 0);
+ gpiod_direction_output(smi->mdio, 1);
+ realtek_smi_clk_delay(smi);
+
+ /* CLK 1: 0 -> 1, 1 -> 0 */
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ realtek_smi_clk_delay(smi);
+
+ /* CLK 2: */
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdio, 0);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdio, 1);
+}
+
+static void realtek_smi_stop(struct realtek_smi *smi)
+{
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdio, 0);
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdio, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 1);
+
+ /* Add a click */
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 1);
+
+ /* Set GPIO pins to input mode */
+ gpiod_direction_input(smi->mdio);
+ gpiod_direction_input(smi->mdc);
+}
+
+static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
+{
+ for (; len > 0; len--) {
+ realtek_smi_clk_delay(smi);
+
+ /* Prepare data */
+ gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
+ realtek_smi_clk_delay(smi);
+
+ /* Clocking */
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ gpiod_set_value(smi->mdc, 0);
+ }
+}
+
+static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
+{
+ gpiod_direction_input(smi->mdio);
+
+ for (*data = 0; len > 0; len--) {
+ u32 u;
+
+ realtek_smi_clk_delay(smi);
+
+ /* Clocking */
+ gpiod_set_value(smi->mdc, 1);
+ realtek_smi_clk_delay(smi);
+ u = !!gpiod_get_value(smi->mdio);
+ gpiod_set_value(smi->mdc, 0);
+
+ *data |= (u << (len - 1));
+ }
+
+ gpiod_direction_output(smi->mdio, 0);
+}
+
+static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
+{
+ int retry_cnt;
+
+ retry_cnt = 0;
+ do {
+ u32 ack;
+
+ realtek_smi_read_bits(smi, 1, &ack);
+ if (ack == 0)
+ break;
+
+ if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
+ dev_err(smi->dev, "ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
+{
+ realtek_smi_write_bits(smi, data, 8);
+ return realtek_smi_wait_for_ack(smi);
+}
+
+static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
+{
+ realtek_smi_write_bits(smi, data, 8);
+ return 0;
+}
+
+static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(smi, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(smi, 0x00, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(smi, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(smi, 0x01, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
+{
+ unsigned long flags;
+ u8 lo = 0;
+ u8 hi = 0;
+ int ret;
+
+ spin_lock_irqsave(&smi->lock, flags);
+
+ realtek_smi_start(smi);
+
+ /* Send READ command */
+ ret = realtek_smi_write_byte(smi, smi->cmd_read);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(smi, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(smi, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Read DATA[7:0] */
+ realtek_smi_read_byte0(smi, &lo);
+ /* Read DATA[15:8] */
+ realtek_smi_read_byte1(smi, &hi);
+
+ *data = ((u32)lo) | (((u32)hi) << 8);
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(smi);
+ spin_unlock_irqrestore(&smi->lock, flags);
+
+ return ret;
+}
+
+static int realtek_smi_write_reg(struct realtek_smi *smi,
+ u32 addr, u32 data, bool ack)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&smi->lock, flags);
+
+ realtek_smi_start(smi);
+
+ /* Send WRITE command */
+ ret = realtek_smi_write_byte(smi, smi->cmd_write);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(smi, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(smi, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Write DATA[7:0] */
+ ret = realtek_smi_write_byte(smi, data & 0xff);
+ if (ret)
+ goto out;
+
+ /* Write DATA[15:8] */
+ if (ack)
+ ret = realtek_smi_write_byte(smi, data >> 8);
+ else
+ ret = realtek_smi_write_byte_noack(smi, data >> 8);
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(smi);
+ spin_unlock_irqrestore(&smi->lock, flags);
+
+ return ret;
+}
+
+/* There is one single case when we need to use this accessor and that
+ * is when issueing soft reset. Since the device reset as soon as we write
+ * that bit, no ACK will come back for natural reasons.
+ */
+int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
+ u32 data)
+{
+ return realtek_smi_write_reg(smi, addr, data, false);
+}
+EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
+
+/* Regmap accessors */
+
+static int realtek_smi_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_smi *smi = ctx;
+
+ return realtek_smi_write_reg(smi, reg, val, true);
+}
+
+static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_smi *smi = ctx;
+
+ return realtek_smi_read_reg(smi, reg, val);
+}
+
+static const struct regmap_config realtek_smi_mdio_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_smi *smi = bus->priv;
+
+ return smi->ops->phy_read(smi, addr, regnum);
+}
+
+static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_smi *smi = bus->priv;
+
+ return smi->ops->phy_write(smi, addr, regnum, val);
+}
+
+int realtek_smi_setup_mdio(struct realtek_smi *smi)
+{
+ struct device_node *mdio_np;
+ int ret;
+
+ mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
+ "realtek,smi-mdio");
+ if (!mdio_np) {
+ dev_err(smi->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
+ if (!smi->slave_mii_bus)
+ return -ENOMEM;
+ smi->slave_mii_bus->priv = smi;
+ smi->slave_mii_bus->name = "SMI slave MII";
+ smi->slave_mii_bus->read = realtek_smi_mdio_read;
+ smi->slave_mii_bus->write = realtek_smi_mdio_write;
+ snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
+ smi->ds->index);
+ smi->slave_mii_bus->dev.of_node = mdio_np;
+ smi->slave_mii_bus->parent = smi->dev;
+ smi->ds->slave_mii_bus = smi->slave_mii_bus;
+
+ ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
+ if (ret) {
+ dev_err(smi->dev, "unable to register MDIO bus %s\n",
+ smi->slave_mii_bus->id);
+ of_node_put(mdio_np);
+ }
+
+ return 0;
+}
+
+static int realtek_smi_probe(struct platform_device *pdev)
+{
+ const struct realtek_smi_variant *var;
+ struct device *dev = &pdev->dev;
+ struct realtek_smi *smi;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ np = dev->of_node;
+
+ smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
+ if (!smi)
+ return -ENOMEM;
+ smi->map = devm_regmap_init(dev, NULL, smi,
+ &realtek_smi_mdio_regmap_config);
+ if (IS_ERR(smi->map)) {
+ ret = PTR_ERR(smi->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Link forward and backward */
+ smi->dev = dev;
+ smi->clk_delay = var->clk_delay;
+ smi->cmd_read = var->cmd_read;
+ smi->cmd_write = var->cmd_write;
+ smi->ops = var->ops;
+
+ dev_set_drvdata(dev, smi);
+ spin_lock_init(&smi->lock);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+
+ /* Assert then deassert RESET */
+ smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(smi->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(smi->reset);
+ }
+ msleep(REALTEK_SMI_HW_STOP_DELAY);
+ gpiod_set_value(smi->reset, 0);
+ msleep(REALTEK_SMI_HW_START_DELAY);
+ dev_info(dev, "deasserted RESET\n");
+
+ /* Fetch MDIO pins */
+ smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
+ if (IS_ERR(smi->mdc))
+ return PTR_ERR(smi->mdc);
+ smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
+ if (IS_ERR(smi->mdio))
+ return PTR_ERR(smi->mdio);
+
+ smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ ret = smi->ops->detect(smi);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ smi->ds = dsa_switch_alloc(dev, smi->num_ports);
+ if (!smi->ds)
+ return -ENOMEM;
+ smi->ds->priv = smi;
+
+ smi->ds->ops = var->ds_ops;
+ ret = dsa_register_switch(smi->ds);
+ if (ret) {
+ dev_err(dev, "unable to register switch ret = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int realtek_smi_remove(struct platform_device *pdev)
+{
+ struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
+
+ dsa_unregister_switch(smi->ds);
+ gpiod_set_value(smi->reset, 1);
+
+ return 0;
+}
+
+static const struct of_device_id realtek_smi_of_match[] = {
+ {
+ .compatible = "realtek,rtl8366rb",
+ .data = &rtl8366rb_variant,
+ },
+ {
+ /* FIXME: add support for RTL8366S and more */
+ .compatible = "realtek,rtl8366s",
+ .data = NULL,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
+
+static struct platform_driver realtek_smi_driver = {
+ .driver = {
+ .name = "realtek-smi",
+ .of_match_table = of_match_ptr(realtek_smi_of_match),
+ },
+ .probe = realtek_smi_probe,
+ .remove = realtek_smi_remove,
+};
+module_platform_driver(realtek_smi_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi.h
new file mode 100644
index 000000000000..9a63b51e1d82
--- /dev/null
+++ b/drivers/net/dsa/realtek-smi.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Realtek SMI interface driver defines
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#ifndef _REALTEK_SMI_H
+#define _REALTEK_SMI_H
+
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <net/dsa.h>
+
+struct realtek_smi_ops;
+struct dentry;
+struct inode;
+struct file;
+
+struct rtl8366_mib_counter {
+ unsigned int base;
+ unsigned int offset;
+ unsigned int length;
+ const char *name;
+};
+
+struct rtl8366_vlan_mc {
+ u16 vid;
+ u16 untag;
+ u16 member;
+ u8 fid;
+ u8 priority;
+};
+
+struct rtl8366_vlan_4k {
+ u16 vid;
+ u16 untag;
+ u16 member;
+ u8 fid;
+};
+
+struct realtek_smi {
+ struct device *dev;
+ struct gpio_desc *reset;
+ struct gpio_desc *mdc;
+ struct gpio_desc *mdio;
+ struct regmap *map;
+ struct mii_bus *slave_mii_bus;
+
+ unsigned int clk_delay;
+ u8 cmd_read;
+ u8 cmd_write;
+ spinlock_t lock; /* Locks around command writes */
+ struct dsa_switch *ds;
+ struct irq_domain *irqdomain;
+ bool leds_disabled;
+
+ unsigned int cpu_port;
+ unsigned int num_ports;
+ unsigned int num_vlan_mc;
+ unsigned int num_mib_counters;
+ struct rtl8366_mib_counter *mib_counters;
+
+ const struct realtek_smi_ops *ops;
+
+ int vlan_enabled;
+ int vlan4k_enabled;
+
+ char buf[4096];
+};
+
+/**
+ * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
+ * @detect: detects the chiptype
+ */
+struct realtek_smi_ops {
+ int (*detect)(struct realtek_smi *smi);
+ int (*reset_chip)(struct realtek_smi *smi);
+ int (*setup)(struct realtek_smi *smi);
+ void (*cleanup)(struct realtek_smi *smi);
+ int (*get_mib_counter)(struct realtek_smi *smi,
+ int port,
+ struct rtl8366_mib_counter *mib,
+ u64 *mibvalue);
+ int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
+ struct rtl8366_vlan_mc *vlanmc);
+ int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc);
+ int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k);
+ int (*set_vlan_4k)(struct realtek_smi *smi,
+ const struct rtl8366_vlan_4k *vlan4k);
+ int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
+ int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
+ bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
+ int (*enable_vlan)(struct realtek_smi *smi, bool enable);
+ int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
+ int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
+ int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
+ int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
+ u16 val);
+};
+
+struct realtek_smi_variant {
+ const struct dsa_switch_ops *ds_ops;
+ const struct realtek_smi_ops *ops;
+ unsigned int clk_delay;
+ u8 cmd_read;
+ u8 cmd_write;
+};
+
+/* SMI core calls */
+int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
+ u32 data);
+int realtek_smi_setup_mdio(struct realtek_smi *smi);
+
+/* RTL8366 library helpers */
+int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
+int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ u32 untag, u32 fid);
+int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
+int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+ unsigned int vid);
+int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
+int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
+int rtl8366_reset_vlan(struct realtek_smi *smi);
+int rtl8366_init_vlan(struct realtek_smi *smi);
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering);
+int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int rtl8366_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data);
+int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
+void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
+
+extern const struct realtek_smi_variant rtl8366rb_variant;
+
+#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
new file mode 100644
index 000000000000..6dedd43442cc
--- /dev/null
+++ b/drivers/net/dsa/rtl8366.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI library helpers for the RTL8366x variants
+ * RTL8366RB and RTL8366S
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ */
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+
+#include "realtek-smi.h"
+
+int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+{
+ int ret;
+ int i;
+
+ *used = 0;
+ for (i = 0; i < smi->num_ports; i++) {
+ int index = 0;
+
+ ret = smi->ops->get_mc_index(smi, i, &index);
+ if (ret)
+ return ret;
+
+ if (mc_index == index) {
+ *used = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+
+int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ u32 untag, u32 fid)
+{
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+ int i;
+
+ /* Update the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlan4k.member = member;
+ vlan4k.untag = untag;
+ vlan4k.fid = fid;
+ ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+ if (ret)
+ return ret;
+
+ /* Try to find an existing MC entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
+
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ if (vid == vlanmc.vid) {
+ /* update the MC entry */
+ vlanmc.member = member;
+ vlanmc.untag = untag;
+ vlanmc.fid = fid;
+
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+
+int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ int ret;
+ int index;
+
+ ret = smi->ops->get_mc_index(smi, port, &index);
+ if (ret)
+ return ret;
+
+ ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
+ if (ret)
+ return ret;
+
+ *val = vlanmc.vid;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
+
+int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+ unsigned int vid)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+ int i;
+
+ /* Try to find an existing MC entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ if (vid == vlanmc.vid) {
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ ret = smi->ops->set_mc_index(smi, port, i);
+ return ret;
+ }
+ }
+
+ /* We have no MC entry for this VID, try to find an empty one */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ if (vlanmc.vid == 0 && vlanmc.member == 0) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlanmc.vid = vid;
+ vlanmc.member = vlan4k.member;
+ vlanmc.untag = vlan4k.untag;
+ vlanmc.fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ ret = smi->ops->set_mc_index(smi, port, i);
+ return ret;
+ }
+ }
+
+ /* MC table is full, try to find an unused entry and replace it */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ int used;
+
+ ret = rtl8366_mc_is_used(smi, i, &used);
+ if (ret)
+ return ret;
+
+ if (!used) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlanmc.vid = vid;
+ vlanmc.member = vlan4k.member;
+ vlanmc.untag = vlan4k.untag;
+ vlanmc.fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ ret = smi->ops->set_mc_index(smi, port, i);
+ return ret;
+ }
+ }
+
+ dev_err(smi->dev,
+ "all VLAN member configurations are in use\n");
+
+ return -ENOSPC;
+}
+EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+
+int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
+{
+ int ret;
+
+ /* To enable 4k VLAN, ordinary VLAN must be enabled first,
+ * but if we disable 4k VLAN it is fine to leave ordinary
+ * VLAN enabled.
+ */
+ if (enable) {
+ /* Make sure VLAN is ON */
+ ret = smi->ops->enable_vlan(smi, true);
+ if (ret)
+ return ret;
+
+ smi->vlan_enabled = true;
+ }
+
+ ret = smi->ops->enable_vlan4k(smi, enable);
+ if (ret)
+ return ret;
+
+ smi->vlan4k_enabled = enable;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
+
+int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
+{
+ int ret;
+
+ ret = smi->ops->enable_vlan(smi, enable);
+ if (ret)
+ return ret;
+
+ smi->vlan_enabled = enable;
+
+ /* If we turn VLAN off, make sure that we turn off
+ * 4k VLAN as well, if that happened to be on.
+ */
+ if (!enable) {
+ smi->vlan4k_enabled = false;
+ ret = smi->ops->enable_vlan4k(smi, false);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+
+int rtl8366_reset_vlan(struct realtek_smi *smi)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ int ret;
+ int i;
+
+ rtl8366_enable_vlan(smi, false);
+ rtl8366_enable_vlan4k(smi, false);
+
+ /* Clear the 16 VLAN member configurations */
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.member = 0;
+ vlanmc.untag = 0;
+ vlanmc.fid = 0;
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+
+int rtl8366_init_vlan(struct realtek_smi *smi)
+{
+ int port;
+ int ret;
+
+ ret = rtl8366_reset_vlan(smi);
+ if (ret)
+ return ret;
+
+ /* Loop over the available ports, for each port, associate
+ * it with the VLAN (port+1)
+ */
+ for (port = 0; port < smi->num_ports; port++) {
+ u32 mask;
+
+ if (port == smi->cpu_port)
+ /* For the CPU port, make all ports members of this
+ * VLAN.
+ */
+ mask = GENMASK(smi->num_ports - 1, 0);
+ else
+ /* For all other ports, enable itself plus the
+ * CPU port.
+ */
+ mask = BIT(port) | BIT(smi->cpu_port);
+
+ /* For each port, set the port as member of VLAN (port+1)
+ * and untagged, except for the CPU port: the CPU port (5) is
+ * member of VLAN 6 and so are ALL the other ports as well.
+ * Use filter 0 (no filter).
+ */
+ dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n",
+ (port + 1), port, mask);
+ ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
+ if (ret)
+ return ret;
+
+ dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n",
+ (port + 1), port, (port + 1));
+ ret = rtl8366_set_pvid(smi, port, (port + 1));
+ if (ret)
+ return ret;
+ }
+
+ return rtl8366_enable_vlan(smi, true);
+}
+EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
+
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+
+ if (!smi->ops->is_vlan_valid(smi, port))
+ return -EINVAL;
+
+ dev_info(smi->dev, "%s filtering on port %d\n",
+ vlan_filtering ? "enable" : "disable",
+ port);
+
+ /* TODO:
+ * The hardware support filter ID (FID) 0..7, I have no clue how to
+ * support this in the driver when the callback only says on/off.
+ */
+ ret = smi->ops->get_vlan_4k(smi, port, &vlan4k);
+ if (ret)
+ return ret;
+
+ /* Just set the filter to FID 1 for now then */
+ ret = rtl8366_set_vlan(smi, port,
+ vlan4k.member,
+ vlan4k.untag,
+ 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
+
+int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (!smi->ops->is_vlan_valid(smi, port))
+ return -EINVAL;
+
+ dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
+ vlan->vid_begin, vlan->vid_end);
+
+ /* Enable VLAN in the hardware
+ * FIXME: what's with this 4k business?
+ * Just rtl8366_enable_vlan() seems inconclusive.
+ */
+ ret = rtl8366_enable_vlan4k(smi, true);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare);
+
+void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
+ struct realtek_smi *smi = ds->priv;
+ u32 member = 0;
+ u32 untag = 0;
+ u16 vid;
+ int ret;
+
+ if (!smi->ops->is_vlan_valid(smi, port))
+ return;
+
+ dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+ port,
+ untagged ? "untagged" : "tagged",
+ pvid ? " PVID" : "no PVID");
+
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ dev_err(smi->dev, "port is DSA or CPU port\n");
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ int pvid_val = 0;
+
+ dev_info(smi->dev, "add VLAN %04x\n", vid);
+ member |= BIT(port);
+
+ if (untagged)
+ untag |= BIT(port);
+
+ /* To ensure that we have a valid MC entry for this VLAN,
+ * initialize the port VLAN ID here.
+ */
+ ret = rtl8366_get_pvid(smi, port, &pvid_val);
+ if (ret < 0) {
+ dev_err(smi->dev, "could not lookup PVID for port %d\n",
+ port);
+ return;
+ }
+ if (pvid_val == 0) {
+ ret = rtl8366_set_pvid(smi, port, vid);
+ if (ret < 0)
+ return;
+ }
+ }
+
+ ret = rtl8366_set_vlan(smi, port, member, untag, 0);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to set up VLAN %04x",
+ vid);
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+
+int rtl8366_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct realtek_smi *smi = ds->priv;
+ u16 vid;
+ int ret;
+
+ dev_info(smi->dev, "del VLAN on port %d\n", port);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ int i;
+
+ dev_info(smi->dev, "del VLAN %04x\n", vid);
+
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
+
+ ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ if (vid == vlanmc.vid) {
+ /* clear VLAN member configurations */
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.member = 0;
+ vlanmc.untag = 0;
+ vlanmc.fid = 0;
+
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to remove VLAN %04x\n",
+ vid);
+ return ret;
+ }
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
+
+void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366_mib_counter *mib;
+ int i;
+
+ if (port >= smi->num_ports)
+ return;
+
+ for (i = 0; i < smi->num_mib_counters; i++) {
+ mib = &smi->mib_counters[i];
+ strncpy(data + i * ETH_GSTRING_LEN,
+ mib->name, ETH_GSTRING_LEN);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_strings);
+
+int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ /* We only support SS_STATS */
+ if (sset != ETH_SS_STATS)
+ return 0;
+ if (port >= smi->num_ports)
+ return -EINVAL;
+
+ return smi->num_mib_counters;
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
+
+void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ struct realtek_smi *smi = ds->priv;
+ int i;
+ int ret;
+
+ if (port >= smi->num_ports)
+ return;
+
+ for (i = 0; i < smi->num_mib_counters; i++) {
+ struct rtl8366_mib_counter *mib;
+ u64 mibvalue = 0;
+
+ mib = &smi->mib_counters[i];
+ ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
+ if (ret) {
+ dev_err(smi->dev, "error reading MIB counter %s\n",
+ mib->name);
+ }
+ data[i] = mibvalue;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
new file mode 100644
index 000000000000..a4d5049df692
--- /dev/null
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -0,0 +1,1454 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch
+ *
+ * This is a sparsely documented chip, the only viable documentation seems
+ * to be a patched up code drop from the vendor that appear in various
+ * GPL source trees.
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include "realtek-smi.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* Switch Global Configuration register */
+#define RTL8366RB_SGCR 0x0000
+#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0)
+#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4)
+#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
+#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
+#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
+#define RTL8366RB_SGCR_MAX_LENGTH_9216 RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_EN_VLAN BIT(13)
+#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
+
+/* Port Enable Control register */
+#define RTL8366RB_PECR 0x0001
+
+/* Switch Security Control registers */
+#define RTL8366RB_SSCR0 0x0002
+#define RTL8366RB_SSCR1 0x0003
+#define RTL8366RB_SSCR2 0x0004
+#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
+
+/* Port Mode Control registers */
+#define RTL8366RB_PMC0 0x0005
+#define RTL8366RB_PMC0_SPI BIT(0)
+#define RTL8366RB_PMC0_EN_AUTOLOAD BIT(1)
+#define RTL8366RB_PMC0_PROBE BIT(2)
+#define RTL8366RB_PMC0_DIS_BISR BIT(3)
+#define RTL8366RB_PMC0_ADCTEST BIT(4)
+#define RTL8366RB_PMC0_SRAM_DIAG BIT(5)
+#define RTL8366RB_PMC0_EN_SCAN BIT(6)
+#define RTL8366RB_PMC0_P4_IOMODE_SHIFT 7
+#define RTL8366RB_PMC0_P4_IOMODE_MASK GENMASK(9, 7)
+#define RTL8366RB_PMC0_P5_IOMODE_SHIFT 10
+#define RTL8366RB_PMC0_P5_IOMODE_MASK GENMASK(12, 10)
+#define RTL8366RB_PMC0_SDSMODE_SHIFT 13
+#define RTL8366RB_PMC0_SDSMODE_MASK GENMASK(15, 13)
+#define RTL8366RB_PMC1 0x0006
+
+/* Port Mirror Control Register */
+#define RTL8366RB_PMCR 0x0007
+#define RTL8366RB_PMCR_SOURCE_PORT(a) (a)
+#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f
+#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4)
+#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0
+#define RTL8366RB_PMCR_MIRROR_RX BIT(8)
+#define RTL8366RB_PMCR_MIRROR_TX BIT(9)
+#define RTL8366RB_PMCR_MIRROR_SPC BIT(10)
+#define RTL8366RB_PMCR_MIRROR_ISO BIT(11)
+
+/* bits 0..7 = port 0, bits 8..15 = port 1 */
+#define RTL8366RB_PAACR0 0x0010
+/* bits 0..7 = port 2, bits 8..15 = port 3 */
+#define RTL8366RB_PAACR1 0x0011
+/* bits 0..7 = port 4, bits 8..15 = port 5 */
+#define RTL8366RB_PAACR2 0x0012
+#define RTL8366RB_PAACR_SPEED_10M 0
+#define RTL8366RB_PAACR_SPEED_100M 1
+#define RTL8366RB_PAACR_SPEED_1000M 2
+#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2)
+#define RTL8366RB_PAACR_LINK_UP BIT(4)
+#define RTL8366RB_PAACR_TX_PAUSE BIT(5)
+#define RTL8366RB_PAACR_RX_PAUSE BIT(6)
+#define RTL8366RB_PAACR_AN BIT(7)
+
+#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \
+ RTL8366RB_PAACR_FULL_DUPLEX | \
+ RTL8366RB_PAACR_LINK_UP | \
+ RTL8366RB_PAACR_TX_PAUSE | \
+ RTL8366RB_PAACR_RX_PAUSE)
+
+/* bits 0..7 = port 0, bits 8..15 = port 1 */
+#define RTL8366RB_PSTAT0 0x0014
+/* bits 0..7 = port 2, bits 8..15 = port 3 */
+#define RTL8366RB_PSTAT1 0x0015
+/* bits 0..7 = port 4, bits 8..15 = port 5 */
+#define RTL8366RB_PSTAT2 0x0016
+
+#define RTL8366RB_POWER_SAVING_REG 0x0021
+
+/* CPU port control reg */
+#define RTL8368RB_CPU_CTRL_REG 0x0061
+#define RTL8368RB_CPU_PORTS_MSK 0x00FF
+/* Enables inserting custom tag length/type 0x8899 */
+#define RTL8368RB_CPU_INSTAG BIT(15)
+
+#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */
+#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */
+#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */
+
+#define RTL8366RB_RESET_CTRL_REG 0x0100
+#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0)
+#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1)
+
+#define RTL8366RB_CHIP_ID_REG 0x0509
+#define RTL8366RB_CHIP_ID_8366 0x5937
+#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A
+#define RTL8366RB_CHIP_VERSION_MASK 0xf
+
+/* PHY registers control */
+#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000
+#define RTL8366RB_PHY_CTRL_READ BIT(0)
+#define RTL8366RB_PHY_CTRL_WRITE 0
+#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001
+#define RTL8366RB_PHY_INT_BUSY BIT(0)
+#define RTL8366RB_PHY_EXT_BUSY BIT(4)
+#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002
+#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010
+#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011
+#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012
+
+#define RTL8366RB_PHY_REG_MASK 0x1f
+#define RTL8366RB_PHY_PAGE_OFFSET 5
+#define RTL8366RB_PHY_PAGE_MASK (0xf << 5)
+#define RTL8366RB_PHY_NO_OFFSET 9
+#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
+
+#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
+
+/* LED control registers */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_OFF 0x0
+#define RTL8366RB_LED_DUP_COL 0x1
+#define RTL8366RB_LED_LINK_ACT 0x2
+#define RTL8366RB_LED_SPD1000 0x3
+#define RTL8366RB_LED_SPD100 0x4
+#define RTL8366RB_LED_SPD10 0x5
+#define RTL8366RB_LED_SPD1000_ACT 0x6
+#define RTL8366RB_LED_SPD100_ACT 0x7
+#define RTL8366RB_LED_SPD10_ACT 0x8
+#define RTL8366RB_LED_SPD100_10_ACT 0x9
+#define RTL8366RB_LED_FIBER 0xa
+#define RTL8366RB_LED_AN_FAULT 0xb
+#define RTL8366RB_LED_LINK_RX 0xc
+#define RTL8366RB_LED_LINK_TX 0xd
+#define RTL8366RB_LED_MASTER 0xe
+#define RTL8366RB_LED_FORCE 0xf
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_1_OFFSET 6
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_3_OFFSET 6
+
+#define RTL8366RB_MIB_COUNT 33
+#define RTL8366RB_GLOBAL_MIB_COUNT 1
+#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
+#define RTL8366RB_MIB_COUNTER_BASE 0x1000
+#define RTL8366RB_MIB_CTRL_REG 0x13F0
+#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC
+#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0)
+#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1)
+#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p))
+#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11)
+
+#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063
+#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \
+ (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4)
+#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf
+#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4))
+
+#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C
+#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185
+
+#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180
+#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01
+#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01
+
+#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3)
+
+#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014
+#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003
+#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004
+#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010
+#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020
+#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040
+#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
+
+#define RTL8366RB_NUM_VLANS 16
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_NUM_VIDS 4096
+#define RTL8366RB_PRIORITYMAX 7
+#define RTL8366RB_FIDMAX 7
+
+#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
+#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */
+#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */
+#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */
+#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */
+
+#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */
+
+#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4 | \
+ RTL8366RB_PORT_5 | \
+ RTL8366RB_PORT_CPU)
+
+#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4 | \
+ RTL8366RB_PORT_5)
+
+#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4)
+
+#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU
+
+/* First configuration word per member config, VID and prio */
+#define RTL8366RB_VLAN_VID_MASK 0xfff
+#define RTL8366RB_VLAN_PRIORITY_SHIFT 12
+#define RTL8366RB_VLAN_PRIORITY_MASK 0x7
+/* Second configuration word per member config, member and untagged */
+#define RTL8366RB_VLAN_UNTAG_SHIFT 8
+#define RTL8366RB_VLAN_UNTAG_MASK 0xff
+#define RTL8366RB_VLAN_MEMBER_MASK 0xff
+/* Third config word per member config, STAG currently unused */
+#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff
+#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8
+#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7
+#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5
+#define RTL8366RB_VLAN_FID_MASK 0x7
+
+/* Port ingress bandwidth control */
+#define RTL8366RB_IB_BASE 0x0200
+#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum))
+#define RTL8366RB_IB_BDTH_MASK 0x3fff
+#define RTL8366RB_IB_PREIFG BIT(14)
+
+/* Port egress bandwidth control */
+#define RTL8366RB_EB_BASE 0x02d1
+#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum))
+#define RTL8366RB_EB_BDTH_MASK 0x3fff
+#define RTL8366RB_EB_PREIFG_REG 0x02f8
+#define RTL8366RB_EB_PREIFG BIT(9)
+
+#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */
+#define RTL8366RB_BDTH_UNIT 64
+#define RTL8366RB_BDTH_REG_DEFAULT 16383
+
+/* QOS */
+#define RTL8366RB_QOS BIT(15)
+/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */
+#define RTL8366RB_QOS_DEFAULT_PREIFG 1
+
+/* Interrupt handling */
+#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440
+#define RTL8366RB_INTERRUPT_POLARITY BIT(0)
+#define RTL8366RB_P4_RGMII_LED BIT(2)
+#define RTL8366RB_INTERRUPT_MASK_REG 0x0441
+#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0)
+#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8)
+#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9)
+#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12)
+#define RTL8366RB_INTERRUPT_P4_UTP BIT(13)
+#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \
+ RTL8366RB_INTERRUPT_ACLEXCEED | \
+ RTL8366RB_INTERRUPT_STORMEXCEED | \
+ RTL8366RB_INTERRUPT_P4_FIBER | \
+ RTL8366RB_INTERRUPT_P4_UTP)
+#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
+#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
+
+/* bits 0..5 enable force when cleared */
+#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
+
+#define RTL8366RB_OAM_PARSER_REG 0x0F14
+#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15
+
+#define RTL8366RB_GREEN_FEATURE_REG 0x0F51
+#define RTL8366RB_GREEN_FEATURE_MSK 0x0007
+#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
+#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
+
+static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
+ { 0, 0, 4, "IfInOctets" },
+ { 0, 4, 4, "EtherStatsOctets" },
+ { 0, 8, 2, "EtherStatsUnderSizePkts" },
+ { 0, 10, 2, "EtherFragments" },
+ { 0, 12, 2, "EtherStatsPkts64Octets" },
+ { 0, 14, 2, "EtherStatsPkts65to127Octets" },
+ { 0, 16, 2, "EtherStatsPkts128to255Octets" },
+ { 0, 18, 2, "EtherStatsPkts256to511Octets" },
+ { 0, 20, 2, "EtherStatsPkts512to1023Octets" },
+ { 0, 22, 2, "EtherStatsPkts1024to1518Octets" },
+ { 0, 24, 2, "EtherOversizeStats" },
+ { 0, 26, 2, "EtherStatsJabbers" },
+ { 0, 28, 2, "IfInUcastPkts" },
+ { 0, 30, 2, "EtherStatsMulticastPkts" },
+ { 0, 32, 2, "EtherStatsBroadcastPkts" },
+ { 0, 34, 2, "EtherStatsDropEvents" },
+ { 0, 36, 2, "Dot3StatsFCSErrors" },
+ { 0, 38, 2, "Dot3StatsSymbolErrors" },
+ { 0, 40, 2, "Dot3InPauseFrames" },
+ { 0, 42, 2, "Dot3ControlInUnknownOpcodes" },
+ { 0, 44, 4, "IfOutOctets" },
+ { 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
+ { 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
+ { 0, 52, 2, "Dot3sDeferredTransmissions" },
+ { 0, 54, 2, "Dot3StatsLateCollisions" },
+ { 0, 56, 2, "EtherStatsCollisions" },
+ { 0, 58, 2, "Dot3StatsExcessiveCollisions" },
+ { 0, 60, 2, "Dot3OutPauseFrames" },
+ { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
+ { 0, 64, 2, "Dot1dTpPortInDiscards" },
+ { 0, 66, 2, "IfOutUcastPkts" },
+ { 0, 68, 2, "IfOutMulticastPkts" },
+ { 0, 70, 2, "IfOutBroadcastPkts" },
+};
+
+static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
+ int port,
+ struct rtl8366_mib_counter *mib,
+ u64 *mibvalue)
+{
+ u32 addr, val;
+ int ret;
+ int i;
+
+ addr = RTL8366RB_MIB_COUNTER_BASE +
+ RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) +
+ mib->offset;
+
+ /* Writing access counter address first
+ * then ASIC will prepare 64bits counter wait for being retrived
+ */
+ ret = regmap_write(smi->map, addr, 0); /* Write whatever */
+ if (ret)
+ return ret;
+
+ /* Read MIB control register */
+ ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
+ if (ret)
+ return -EIO;
+
+ if (val & RTL8366RB_MIB_CTRL_BUSY_MASK)
+ return -EBUSY;
+
+ if (val & RTL8366RB_MIB_CTRL_RESET_MASK)
+ return -EIO;
+
+ /* Read each individual MIB 16 bits at the time */
+ *mibvalue = 0;
+ for (i = mib->length; i > 0; i--) {
+ ret = regmap_read(smi->map, addr + (i - 1), &val);
+ if (ret)
+ return ret;
+ *mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
+ }
+ return 0;
+}
+
+static u32 rtl8366rb_get_irqmask(struct irq_data *d)
+{
+ int line = irqd_to_hwirq(d);
+ u32 val;
+
+ /* For line interrupts we combine link down in bits
+ * 6..11 with link up in bits 0..5 into one interrupt.
+ */
+ if (line < 12)
+ val = BIT(line) | BIT(line + 6);
+ else
+ val = BIT(line);
+ return val;
+}
+
+static void rtl8366rb_mask_irq(struct irq_data *d)
+{
+ struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ int ret;
+
+ ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ rtl8366rb_get_irqmask(d), 0);
+ if (ret)
+ dev_err(smi->dev, "could not mask IRQ\n");
+}
+
+static void rtl8366rb_unmask_irq(struct irq_data *d)
+{
+ struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ int ret;
+
+ ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ rtl8366rb_get_irqmask(d),
+ rtl8366rb_get_irqmask(d));
+ if (ret)
+ dev_err(smi->dev, "could not unmask IRQ\n");
+}
+
+static irqreturn_t rtl8366rb_irq(int irq, void *data)
+{
+ struct realtek_smi *smi = data;
+ u32 stat;
+ int ret;
+
+ /* This clears the IRQ status register */
+ ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ &stat);
+ if (ret) {
+ dev_err(smi->dev, "can't read interrupt status\n");
+ return IRQ_NONE;
+ }
+ stat &= RTL8366RB_INTERRUPT_VALID;
+ if (!stat)
+ return IRQ_NONE;
+ while (stat) {
+ int line = __ffs(stat);
+ int child_irq;
+
+ stat &= ~BIT(line);
+ /* For line interrupts we combine link down in bits
+ * 6..11 with link up in bits 0..5 into one interrupt.
+ */
+ if (line < 12 && line > 5)
+ line -= 5;
+ child_irq = irq_find_mapping(smi->irqdomain, line);
+ handle_nested_irq(child_irq);
+ }
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip rtl8366rb_irq_chip = {
+ .name = "RTL8366RB",
+ .irq_mask = rtl8366rb_mask_irq,
+ .irq_unmask = rtl8366rb_unmask_irq,
+};
+
+static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
+ .map = rtl8366rb_irq_map,
+ .unmap = rtl8366rb_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+{
+ struct device_node *intc;
+ unsigned long irq_trig;
+ int irq;
+ int ret;
+ u32 val;
+ int i;
+
+ intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ if (!intc) {
+ dev_err(smi->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+ /* RB8366RB IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ dev_err(smi->dev, "failed to get parent IRQ\n");
+ return irq ? irq : -EINVAL;
+ }
+
+ /* This clears the IRQ status register */
+ ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ &val);
+ if (ret) {
+ dev_err(smi->dev, "can't read interrupt status\n");
+ return ret;
+ }
+
+ /* Fetch IRQ edge information from the descriptor */
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ dev_info(smi->dev, "active high/rising IRQ\n");
+ val = 0;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ dev_info(smi->dev, "active low/falling IRQ\n");
+ val = RTL8366RB_INTERRUPT_POLARITY;
+ break;
+ }
+ ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_INTERRUPT_POLARITY,
+ val);
+ if (ret) {
+ dev_err(smi->dev, "could not configure IRQ polarity\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(smi->dev, irq, NULL,
+ rtl8366rb_irq, IRQF_ONESHOT,
+ "RTL8366RB", smi);
+ if (ret) {
+ dev_err(smi->dev, "unable to request irq: %d\n", ret);
+ return ret;
+ }
+ smi->irqdomain = irq_domain_add_linear(intc,
+ RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops,
+ smi);
+ if (!smi->irqdomain) {
+ dev_err(smi->dev, "failed to create IRQ domain\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < smi->num_ports; i++)
+ irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
+
+ return 0;
+}
+
+static int rtl8366rb_set_addr(struct realtek_smi *smi)
+{
+ u8 addr[ETH_ALEN];
+ u16 val;
+ int ret;
+
+ eth_random_addr(addr);
+
+ dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ val = addr[0] << 8 | addr[1];
+ ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
+ if (ret)
+ return ret;
+ val = addr[2] << 8 | addr[3];
+ ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
+ if (ret)
+ return ret;
+ val = addr[4] << 8 | addr[5];
+ ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Found in a vendor driver */
+
+/* For the "version 0" early silicon, appear in most source releases */
+static const u16 rtl8366rb_init_jam_ver_0[] = {
+ 0x000B, 0x0001, 0x03A6, 0x0100, 0x03A7, 0x0001, 0x02D1, 0x3FFF,
+ 0x02D2, 0x3FFF, 0x02D3, 0x3FFF, 0x02D4, 0x3FFF, 0x02D5, 0x3FFF,
+ 0x02D6, 0x3FFF, 0x02D7, 0x3FFF, 0x02D8, 0x3FFF, 0x022B, 0x0688,
+ 0x022C, 0x0FAC, 0x03D0, 0x4688, 0x03D1, 0x01F5, 0x0000, 0x0830,
+ 0x02F9, 0x0200, 0x02F7, 0x7FFF, 0x02F8, 0x03FF, 0x0080, 0x03E8,
+ 0x0081, 0x00CE, 0x0082, 0x00DA, 0x0083, 0x0230, 0xBE0F, 0x2000,
+ 0x0231, 0x422A, 0x0232, 0x422A, 0x0233, 0x422A, 0x0234, 0x422A,
+ 0x0235, 0x422A, 0x0236, 0x422A, 0x0237, 0x422A, 0x0238, 0x422A,
+ 0x0239, 0x422A, 0x023A, 0x422A, 0x023B, 0x422A, 0x023C, 0x422A,
+ 0x023D, 0x422A, 0x023E, 0x422A, 0x023F, 0x422A, 0x0240, 0x422A,
+ 0x0241, 0x422A, 0x0242, 0x422A, 0x0243, 0x422A, 0x0244, 0x422A,
+ 0x0245, 0x422A, 0x0246, 0x422A, 0x0247, 0x422A, 0x0248, 0x422A,
+ 0x0249, 0x0146, 0x024A, 0x0146, 0x024B, 0x0146, 0xBE03, 0xC961,
+ 0x024D, 0x0146, 0x024E, 0x0146, 0x024F, 0x0146, 0x0250, 0x0146,
+ 0xBE64, 0x0226, 0x0252, 0x0146, 0x0253, 0x0146, 0x024C, 0x0146,
+ 0x0251, 0x0146, 0x0254, 0x0146, 0xBE62, 0x3FD0, 0x0084, 0x0320,
+ 0x0255, 0x0146, 0x0256, 0x0146, 0x0257, 0x0146, 0x0258, 0x0146,
+ 0x0259, 0x0146, 0x025A, 0x0146, 0x025B, 0x0146, 0x025C, 0x0146,
+ 0x025D, 0x0146, 0x025E, 0x0146, 0x025F, 0x0146, 0x0260, 0x0146,
+ 0x0261, 0xA23F, 0x0262, 0x0294, 0x0263, 0xA23F, 0x0264, 0x0294,
+ 0x0265, 0xA23F, 0x0266, 0x0294, 0x0267, 0xA23F, 0x0268, 0x0294,
+ 0x0269, 0xA23F, 0x026A, 0x0294, 0x026B, 0xA23F, 0x026C, 0x0294,
+ 0x026D, 0xA23F, 0x026E, 0x0294, 0x026F, 0xA23F, 0x0270, 0x0294,
+ 0x02F5, 0x0048, 0xBE09, 0x0E00, 0xBE1E, 0x0FA0, 0xBE14, 0x8448,
+ 0xBE15, 0x1007, 0xBE4A, 0xA284, 0xC454, 0x3F0B, 0xC474, 0x3F0B,
+ 0xBE48, 0x3672, 0xBE4B, 0x17A7, 0xBE4C, 0x0B15, 0xBE52, 0x0EDD,
+ 0xBE49, 0x8C00, 0xBE5B, 0x785C, 0xBE5C, 0x785C, 0xBE5D, 0x785C,
+ 0xBE61, 0x368A, 0xBE63, 0x9B84, 0xC456, 0xCC13, 0xC476, 0xCC13,
+ 0xBE65, 0x307D, 0xBE6D, 0x0005, 0xBE6E, 0xE120, 0xBE2E, 0x7BAF,
+};
+
+/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
+static const u16 rtl8366rb_init_jam_ver_1[] = {
+ 0x0000, 0x0830, 0x0001, 0x8000, 0x0400, 0x8130, 0xBE78, 0x3C3C,
+ 0x0431, 0x5432, 0xBE37, 0x0CE4, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
+ 0xC44C, 0x1585, 0xC44C, 0x1185, 0xC44C, 0x1585, 0xC46C, 0x1585,
+ 0xC46C, 0x1185, 0xC46C, 0x1585, 0xC451, 0x2135, 0xC471, 0x2135,
+ 0xBE10, 0x8140, 0xBE15, 0x0007, 0xBE6E, 0xE120, 0xBE69, 0xD20F,
+ 0xBE6B, 0x0320, 0xBE24, 0xB000, 0xBE23, 0xFF51, 0xBE22, 0xDF20,
+ 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, 0xBE24, 0x0000,
+ 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, 0xBE21, 0x0140,
+ 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, 0xBE2E, 0x7B7A,
+ 0xBE36, 0x0CE4, 0x02F5, 0x0048, 0xBE77, 0x2940, 0x000A, 0x83E0,
+ 0xBE79, 0x3C3C, 0xBE00, 0x1340,
+};
+
+/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
+static const u16 rtl8366rb_init_jam_ver_2[] = {
+ 0x0450, 0x0000, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
+ 0xC44F, 0x6250, 0xC46F, 0x6250, 0xC456, 0x0C14, 0xC476, 0x0C14,
+ 0xC44C, 0x1C85, 0xC44C, 0x1885, 0xC44C, 0x1C85, 0xC46C, 0x1C85,
+ 0xC46C, 0x1885, 0xC46C, 0x1C85, 0xC44C, 0x0885, 0xC44C, 0x0881,
+ 0xC44C, 0x0885, 0xC46C, 0x0885, 0xC46C, 0x0881, 0xC46C, 0x0885,
+ 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
+ 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6E, 0x0320,
+ 0xBE77, 0x2940, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
+ 0x8000, 0x0001, 0xBE15, 0x1007, 0x8000, 0x0000, 0xBE15, 0x1007,
+ 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, 0xBE10, 0x8140,
+ 0xBE00, 0x1340, 0x0F51, 0x0010,
+};
+
+/* Appears in a DDWRT code dump */
+static const u16 rtl8366rb_init_jam_ver_3[] = {
+ 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
+ 0x0F51, 0x0017, 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
+ 0xC456, 0x0C14, 0xC476, 0x0C14, 0xC454, 0x3F8B, 0xC474, 0x3F8B,
+ 0xC450, 0x2071, 0xC470, 0x2071, 0xC451, 0x226B, 0xC471, 0x226B,
+ 0xC452, 0xA293, 0xC472, 0xA293, 0xC44C, 0x1585, 0xC44C, 0x1185,
+ 0xC44C, 0x1585, 0xC46C, 0x1585, 0xC46C, 0x1185, 0xC46C, 0x1585,
+ 0xC44C, 0x0185, 0xC44C, 0x0181, 0xC44C, 0x0185, 0xC46C, 0x0185,
+ 0xC46C, 0x0181, 0xC46C, 0x0185, 0xBE24, 0xB000, 0xBE23, 0xFF51,
+ 0xBE22, 0xDF20, 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800,
+ 0xBE24, 0x0000, 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60,
+ 0xBE21, 0x0140, 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000,
+ 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
+ 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6B, 0x0320,
+ 0xBE77, 0x2800, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
+ 0x8000, 0x0001, 0xBE10, 0x8140, 0x8000, 0x0000, 0xBE10, 0x8140,
+ 0xBE15, 0x1007, 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160,
+ 0xBE10, 0x8140, 0xBE00, 0x1340, 0x0450, 0x0000, 0x0401, 0x0000,
+};
+
+/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
+static const u16 rtl8366rb_init_jam_f5d8235[] = {
+ 0x0242, 0x02BF, 0x0245, 0x02BF, 0x0248, 0x02BF, 0x024B, 0x02BF,
+ 0x024E, 0x02BF, 0x0251, 0x02BF, 0x0254, 0x0A3F, 0x0256, 0x0A3F,
+ 0x0258, 0x0A3F, 0x025A, 0x0A3F, 0x025C, 0x0A3F, 0x025E, 0x0A3F,
+ 0x0263, 0x007C, 0x0100, 0x0004, 0xBE5B, 0x3500, 0x800E, 0x200F,
+ 0xBE1D, 0x0F00, 0x8001, 0x5011, 0x800A, 0xA2F4, 0x800B, 0x17A3,
+ 0xBE4B, 0x17A3, 0xBE41, 0x5011, 0xBE17, 0x2100, 0x8000, 0x8304,
+ 0xBE40, 0x8304, 0xBE4A, 0xA2F4, 0x800C, 0xA8D5, 0x8014, 0x5500,
+ 0x8015, 0x0004, 0xBE4C, 0xA8D5, 0xBE59, 0x0008, 0xBE09, 0x0E00,
+ 0xBE36, 0x1036, 0xBE37, 0x1036, 0x800D, 0x00FF, 0xBE4D, 0x00FF,
+};
+
+/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
+static const u16 rtl8366rb_init_jam_dgn3500[] = {
+ 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0F51, 0x0017,
+ 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, 0x0450, 0x0000,
+ 0x0401, 0x0000, 0x0431, 0x0960,
+};
+
+/* This jam table activates "green ethernet", which means low power mode
+ * and is claimed to detect the cable length and not use more power than
+ * necessary, and the ports should enter power saving mode 10 seconds after
+ * a cable is disconnected. Seems to always be the same.
+ */
+static const u16 rtl8366rb_green_jam[][2] = {
+ {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
+ {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
+ {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
+};
+
+static int rtl8366rb_setup(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+ const u16 *jam_table;
+ u32 chip_ver = 0;
+ u32 chip_id = 0;
+ int jam_size;
+ u32 val;
+ int ret;
+ int i;
+
+ ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
+ if (ret) {
+ dev_err(smi->dev, "unable to read chip id\n");
+ return ret;
+ }
+
+ switch (chip_id) {
+ case RTL8366RB_CHIP_ID_8366:
+ break;
+ default:
+ dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
+ &chip_ver);
+ if (ret) {
+ dev_err(smi->dev, "unable to read chip version\n");
+ return ret;
+ }
+
+ dev_info(smi->dev, "RTL%04x ver %u chip found\n",
+ chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
+
+ /* Do the init dance using the right jam table */
+ switch (chip_ver) {
+ case 0:
+ jam_table = rtl8366rb_init_jam_ver_0;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0);
+ break;
+ case 1:
+ jam_table = rtl8366rb_init_jam_ver_1;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1);
+ break;
+ case 2:
+ jam_table = rtl8366rb_init_jam_ver_2;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2);
+ break;
+ default:
+ jam_table = rtl8366rb_init_jam_ver_3;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3);
+ break;
+ }
+
+ /* Special jam tables for special routers
+ * TODO: are these necessary? Maintainers, please test
+ * without them, using just the off-the-shelf tables.
+ */
+ if (of_machine_is_compatible("belkin,f5d8235-v1")) {
+ jam_table = rtl8366rb_init_jam_f5d8235;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235);
+ }
+ if (of_machine_is_compatible("netgear,dgn3500") ||
+ of_machine_is_compatible("netgear,dgn3500b")) {
+ jam_table = rtl8366rb_init_jam_dgn3500;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
+ }
+
+ i = 0;
+ while (i < jam_size) {
+ if ((jam_table[i] & 0xBE00) == 0xBE00) {
+ ret = regmap_read(smi->map,
+ RTL8366RB_PHY_ACCESS_BUSY_REG,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & RTL8366RB_PHY_INT_BUSY)) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+ }
+ }
+ dev_dbg(smi->dev, "jam %04x into register %04x\n",
+ jam_table[i + 1],
+ jam_table[i]);
+ ret = regmap_write(smi->map,
+ jam_table[i],
+ jam_table[i + 1]);
+ if (ret)
+ return ret;
+ i += 2;
+ }
+
+ /* Set up the "green ethernet" feature */
+ i = 0;
+ while (i < ARRAY_SIZE(rtl8366rb_green_jam)) {
+ ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & RTL8366RB_PHY_INT_BUSY)) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+ ret = regmap_write(smi->map,
+ rtl8366rb_green_jam[i][0],
+ rtl8366rb_green_jam[i][1]);
+ if (ret)
+ return ret;
+ i++;
+ }
+ }
+ ret = regmap_write(smi->map,
+ RTL8366RB_GREEN_FEATURE_REG,
+ (chip_ver == 1) ? 0x0007 : 0x0003);
+ if (ret)
+ return ret;
+
+ /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
+ ret = regmap_write(smi->map, 0x0c, 0x240);
+ if (ret)
+ return ret;
+ ret = regmap_write(smi->map, 0x0d, 0x240);
+ if (ret)
+ return ret;
+
+ /* Set some random MAC address */
+ ret = rtl8366rb_set_addr(smi);
+ if (ret)
+ return ret;
+
+ /* Enable CPU port and enable inserting CPU tag
+ *
+ * Disabling RTL8368RB_CPU_INSTAG here will change the behaviour
+ * of the switch totally and it will start talking Realtek RRCP
+ * internally. It is probably possible to experiment with this,
+ * but then the kernel needs to understand and handle RRCP first.
+ */
+ ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
+ 0xFFFF,
+ RTL8368RB_CPU_INSTAG | BIT(smi->cpu_port));
+ if (ret)
+ return ret;
+
+ /* Make sure we default-enable the fixed CPU port */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
+ BIT(smi->cpu_port),
+ 0);
+ if (ret)
+ return ret;
+
+ /* Set maximum packet length to 1536 bytes */
+ ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_MAX_LENGTH_MASK,
+ RTL8366RB_SGCR_MAX_LENGTH_1536);
+ if (ret)
+ return ret;
+
+ /* Enable learning for all ports */
+ ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
+ if (ret)
+ return ret;
+
+ /* Enable auto ageing for all ports */
+ ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0);
+ if (ret)
+ return ret;
+
+ /* Port 4 setup: this enables Port 4, usually the WAN port,
+ * common PHY IO mode is apparently mode 0, and this is not what
+ * the port is initialized to. There is no explanation of the
+ * IO modes in the Realtek source code, if your WAN port is
+ * connected to something exotic such as fiber, then this might
+ * be worth experimenting with.
+ */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
+ RTL8366RB_PMC0_P4_IOMODE_MASK,
+ 0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
+ if (ret)
+ return ret;
+
+ /* Discard VLAN tagged packets if the port is not a member of
+ * the VLAN with which the packets is associated.
+ */
+ ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ RTL8366RB_PORT_ALL);
+ if (ret)
+ return ret;
+
+ /* Don't drop packets whose DA has not been learned */
+ ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
+ RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
+ if (ret)
+ return ret;
+
+ /* Set blinking, TODO: make this configurable */
+ ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
+ RTL8366RB_LED_BLINKRATE_MASK,
+ RTL8366RB_LED_BLINKRATE_56MS);
+ if (ret)
+ return ret;
+
+ /* Set up LED activity:
+ * Each port has 4 LEDs, we configure all ports to the same
+ * behaviour (no individual config) but we can set up each
+ * LED separately.
+ */
+ if (smi->leds_disabled) {
+ /* Turn everything off */
+ regmap_update_bits(smi->map,
+ RTL8366RB_LED_0_1_CTRL_REG,
+ 0x0FFF, 0);
+ regmap_update_bits(smi->map,
+ RTL8366RB_LED_2_3_CTRL_REG,
+ 0x0FFF, 0);
+ regmap_update_bits(smi->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ 0);
+ val = RTL8366RB_LED_OFF;
+ } else {
+ /* TODO: make this configurable per LED */
+ val = RTL8366RB_LED_FORCE;
+ }
+ for (i = 0; i < 4; i++) {
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_CTRL_REG,
+ 0xf << (i * 4),
+ val << (i * 4));
+ if (ret)
+ return ret;
+ }
+
+ ret = rtl8366_init_vlan(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8366rb_setup_cascaded_irq(smi);
+ if (ret)
+ dev_info(smi->dev, "no interrupt support\n");
+
+ ret = realtek_smi_setup_mdio(smi);
+ if (ret) {
+ dev_info(smi->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ /* For now, the RTL switches are handled without any custom tags.
+ *
+ * It is possible to turn on "custom tags" by removing the
+ * RTL8368RB_CPU_INSTAG flag when enabling the port but what it
+ * does is unfamiliar to DSA: ethernet frames of type 8899, the Realtek
+ * Remote Control Protocol (RRCP) start to appear on the CPU port of
+ * the device. So this is not the ordinary few extra bytes in the
+ * frame. Instead it appears that the switch starts to talk Realtek
+ * RRCP internally which means a pretty complex RRCP implementation
+ * decoding and responding the RRCP protocol is needed to exploit this.
+ *
+ * The OpenRRCP project (dormant since 2009) have reverse-egineered
+ * parts of the protocol.
+ */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (port != smi->cpu_port)
+ return;
+
+ dev_info(smi->dev, "adjust link on CPU port (%d)\n", port);
+
+ /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
+ ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
+ BIT(port), BIT(port));
+ if (ret)
+ return;
+
+ ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
+ 0xFF00U,
+ RTL8366RB_PAACR_CPU_PORT << 8);
+ if (ret)
+ return;
+
+ /* Enable the CPU port */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ 0);
+ if (ret)
+ return;
+}
+
+static void rb8366rb_set_port_led(struct realtek_smi *smi,
+ int port, bool enable)
+{
+ u16 val = enable ? 0x3f : 0;
+ int ret;
+
+ if (smi->leds_disabled)
+ return;
+
+ switch (port) {
+ case 0:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_0_1_CTRL_REG,
+ 0x3F, val);
+ break;
+ case 1:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_0_1_CTRL_REG,
+ 0x3F << RTL8366RB_LED_1_OFFSET,
+ val << RTL8366RB_LED_1_OFFSET);
+ break;
+ case 2:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_2_3_CTRL_REG,
+ 0x3F, val);
+ break;
+ case 3:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_LED_2_3_CTRL_REG,
+ 0x3F << RTL8366RB_LED_3_OFFSET,
+ val << RTL8366RB_LED_3_OFFSET);
+ break;
+ case 4:
+ ret = regmap_update_bits(smi->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ enable ? RTL8366RB_P4_RGMII_LED : 0);
+ break;
+ default:
+ dev_err(smi->dev, "no LED for port %d\n", port);
+ return;
+ }
+ if (ret)
+ dev_err(smi->dev, "error updating LED on port %d\n", port);
+}
+
+static int
+rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ dev_dbg(smi->dev, "enable port %d\n", port);
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ 0);
+ if (ret)
+ return ret;
+
+ rb8366rb_set_port_led(smi, port, true);
+ return 0;
+}
+
+static void
+rtl8366rb_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ dev_dbg(smi->dev, "disable port %d\n", port);
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ BIT(port));
+ if (ret)
+ return;
+
+ rb8366rb_set_port_led(smi, port, false);
+}
+
+static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
+
+ if (vid >= RTL8366RB_NUM_VIDS)
+ return -EINVAL;
+
+ /* write VID */
+ ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+ vid & RTL8366RB_VLAN_VID_MASK);
+ if (ret)
+ return ret;
+
+ /* write table access control word */
+ ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ RTL8366RB_TABLE_VLAN_READ_CTRL);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_read(smi->map,
+ RTL8366RB_VLAN_TABLE_READ_BASE + i,
+ &data[i]);
+ if (ret)
+ return ret;
+ }
+
+ vlan4k->vid = vid;
+ vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+ RTL8366RB_VLAN_UNTAG_MASK;
+ vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+ vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
+ const struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ if (vlan4k->vid >= RTL8366RB_NUM_VIDS ||
+ vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK ||
+ vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+ vlan4k->fid > RTL8366RB_FIDMAX)
+ return -EINVAL;
+
+ data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK;
+ data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) |
+ ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+ RTL8366RB_VLAN_UNTAG_SHIFT);
+ data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* write table access control word */
+ ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ RTL8366RB_TABLE_VLAN_WRITE_CTRL);
+
+ return ret;
+}
+
+static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
+
+ if (index >= RTL8366RB_NUM_VLANS)
+ return -EINVAL;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_read(smi->map,
+ RTL8366RB_VLAN_MC_BASE(index) + i,
+ &data[i]);
+ if (ret)
+ return ret;
+ }
+
+ vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK;
+ vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) &
+ RTL8366RB_VLAN_PRIORITY_MASK;
+ vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+ RTL8366RB_VLAN_UNTAG_MASK;
+ vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+ vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ if (index >= RTL8366RB_NUM_VLANS ||
+ vlanmc->vid >= RTL8366RB_NUM_VIDS ||
+ vlanmc->priority > RTL8366RB_PRIORITYMAX ||
+ vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK ||
+ vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+ vlanmc->fid > RTL8366RB_FIDMAX)
+ return -EINVAL;
+
+ data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) |
+ ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) <<
+ RTL8366RB_VLAN_PRIORITY_SHIFT);
+ data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) |
+ ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+ RTL8366RB_VLAN_UNTAG_SHIFT);
+ data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(smi->map,
+ RTL8366RB_VLAN_MC_BASE(index) + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
+{
+ u32 data;
+ int ret;
+
+ if (port >= smi->num_ports)
+ return -EINVAL;
+
+ ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ &data);
+ if (ret)
+ return ret;
+
+ *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) &
+ RTL8366RB_PORT_VLAN_CTRL_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
+{
+ if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
+ return -EINVAL;
+
+ return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ RTL8366RB_PORT_VLAN_CTRL_MASK <<
+ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
+ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
+}
+
+static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+{
+ unsigned int max = RTL8366RB_NUM_VLANS;
+
+ if (smi->vlan4k_enabled)
+ max = RTL8366RB_NUM_VIDS - 1;
+
+ if (vlan == 0 || vlan >= max)
+ return false;
+
+ return true;
+}
+
+static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
+{
+ dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ return regmap_update_bits(smi->map,
+ RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
+ enable ? RTL8366RB_SGCR_EN_VLAN : 0);
+}
+
+static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
+{
+ dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_EN_VLAN_4KTB,
+ enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
+}
+
+static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+{
+ u32 val;
+ u32 reg;
+ int ret;
+
+ if (phy > RTL8366RB_PHY_NO_MAX)
+ return -EINVAL;
+
+ ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_READ);
+ if (ret)
+ return ret;
+
+ reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
+
+ ret = regmap_write(smi->map, reg, 0);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to write PHY%d reg %04x @ %04x, ret %d\n",
+ phy, regnum, reg, ret);
+ return ret;
+ }
+
+ ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
+ if (ret)
+ return ret;
+
+ dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
+ phy, regnum, reg, val);
+
+ return val;
+}
+
+static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+ u16 val)
+{
+ u32 reg;
+ int ret;
+
+ if (phy > RTL8366RB_PHY_NO_MAX)
+ return -EINVAL;
+
+ ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+
+ reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
+
+ dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
+ phy, regnum, reg, val);
+
+ ret = regmap_write(smi->map, reg, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8366rb_reset_chip(struct realtek_smi *smi)
+{
+ int timeout = 10;
+ u32 val;
+ int ret;
+
+ realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
+ RTL8366RB_CHIP_CTRL_RESET_HW);
+ do {
+ usleep_range(20000, 25000);
+ ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW))
+ break;
+ } while (--timeout);
+
+ if (!timeout) {
+ dev_err(smi->dev, "timeout waiting for the switch to reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_detect(struct realtek_smi *smi)
+{
+ struct device *dev = smi->dev;
+ int ret;
+ u32 val;
+
+ /* Detect device */
+ ret = regmap_read(smi->map, 0x5c, &val);
+ if (ret) {
+ dev_err(dev, "can't get chip ID (%d)\n", ret);
+ return ret;
+ }
+
+ switch (val) {
+ case 0x6027:
+ dev_info(dev, "found an RTL8366S switch\n");
+ dev_err(dev, "this switch is not yet supported, submit patches!\n");
+ return -ENODEV;
+ case 0x5937:
+ dev_info(dev, "found an RTL8366RB switch\n");
+ smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ smi->num_ports = RTL8366RB_NUM_PORTS;
+ smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
+ smi->mib_counters = rtl8366rb_mib_counters;
+ smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+ break;
+ default:
+ dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
+ val);
+ break;
+ }
+
+ ret = rtl8366rb_reset_chip(smi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct dsa_switch_ops rtl8366rb_switch_ops = {
+ .get_tag_protocol = rtl8366_get_tag_protocol,
+ .setup = rtl8366rb_setup,
+ .adjust_link = rtl8366rb_adjust_link,
+ .get_strings = rtl8366_get_strings,
+ .get_ethtool_stats = rtl8366_get_ethtool_stats,
+ .get_sset_count = rtl8366_get_sset_count,
+ .port_vlan_filtering = rtl8366_vlan_filtering,
+ .port_vlan_prepare = rtl8366_vlan_prepare,
+ .port_vlan_add = rtl8366_vlan_add,
+ .port_vlan_del = rtl8366_vlan_del,
+ .port_enable = rtl8366rb_port_enable,
+ .port_disable = rtl8366rb_port_disable,
+};
+
+static const struct realtek_smi_ops rtl8366rb_smi_ops = {
+ .detect = rtl8366rb_detect,
+ .get_vlan_mc = rtl8366rb_get_vlan_mc,
+ .set_vlan_mc = rtl8366rb_set_vlan_mc,
+ .get_vlan_4k = rtl8366rb_get_vlan_4k,
+ .set_vlan_4k = rtl8366rb_set_vlan_4k,
+ .get_mc_index = rtl8366rb_get_mc_index,
+ .set_mc_index = rtl8366rb_set_mc_index,
+ .get_mib_counter = rtl8366rb_get_mib_counter,
+ .is_vlan_valid = rtl8366rb_is_vlan_valid,
+ .enable_vlan = rtl8366rb_enable_vlan,
+ .enable_vlan4k = rtl8366rb_enable_vlan4k,
+ .phy_read = rtl8366rb_phy_read,
+ .phy_write = rtl8366rb_phy_write,
+};
+
+const struct realtek_smi_variant rtl8366rb_variant = {
+ .ds_ops = &rtl8366rb_switch_ops,
+ .ops = &rtl8366rb_smi_ops,
+ .clk_delay = 10,
+ .cmd_read = 0xa9,
+ .cmd_write = 0xa8,
+};
+EXPORT_SYMBOL_GPL(rtl8366rb_variant);
diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c
new file mode 100644
index 000000000000..9f1b5f2e8a64
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx.c
@@ -0,0 +1,1365 @@
+// SPDX-License-Identifier: GPL-2.0
+/* DSA driver for:
+ * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+ *
+ * These switches have a built-in 8051 CPU and can download and execute a
+ * firmware in this CPU. They can also be configured to use an external CPU
+ * handling the switch in a memory-mapped manner by connecting to that external
+ * CPU's memory bus.
+ *
+ * This driver (currently) only takes control of the switch chip over SPI and
+ * configures it to route packages around when connected to a CPU port. The
+ * chip has embedded PHYs and VLAN support so we model it using DSA.
+ *
+ * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
+ * Includes portions of code from the firmware uploader by:
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+#include <linux/etherdevice.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/random.h>
+#include <net/dsa.h>
+
+#define VSC73XX_BLOCK_MAC 0x1 /* Subblocks 0-4, 6 (CPU port) */
+#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
+#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
+#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
+#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
+#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
+#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+
+#define CPU_PORT 6 /* CPU port */
+
+/* MAC Block registers */
+#define VSC73XX_MAC_CFG 0x00
+#define VSC73XX_MACHDXGAP 0x02
+#define VSC73XX_FCCONF 0x04
+#define VSC73XX_FCMACHI 0x08
+#define VSC73XX_FCMACLO 0x0c
+#define VSC73XX_MAXLEN 0x10
+#define VSC73XX_ADVPORTM 0x19
+#define VSC73XX_TXUPDCFG 0x24
+#define VSC73XX_TXQ_SELECT_CFG 0x28
+#define VSC73XX_RXOCT 0x50
+#define VSC73XX_TXOCT 0x51
+#define VSC73XX_C_RX0 0x52
+#define VSC73XX_C_RX1 0x53
+#define VSC73XX_C_RX2 0x54
+#define VSC73XX_C_TX0 0x55
+#define VSC73XX_C_TX1 0x56
+#define VSC73XX_C_TX2 0x57
+#define VSC73XX_C_CFG 0x58
+#define VSC73XX_CAT_DROP 0x6e
+#define VSC73XX_CAT_PR_MISC_L2 0x6f
+#define VSC73XX_CAT_PR_USR_PRIO 0x75
+#define VSC73XX_Q_MISC_CONF 0xdf
+
+/* MAC_CFG register bits */
+#define VSC73XX_MAC_CFG_WEXC_DIS BIT(31)
+#define VSC73XX_MAC_CFG_PORT_RST BIT(29)
+#define VSC73XX_MAC_CFG_TX_EN BIT(28)
+#define VSC73XX_MAC_CFG_SEED_LOAD BIT(27)
+#define VSC73XX_MAC_CFG_SEED_MASK GENMASK(26, 19)
+#define VSC73XX_MAC_CFG_SEED_OFFSET 19
+#define VSC73XX_MAC_CFG_FDX BIT(18)
+#define VSC73XX_MAC_CFG_GIGA_MODE BIT(17)
+#define VSC73XX_MAC_CFG_RX_EN BIT(16)
+#define VSC73XX_MAC_CFG_VLAN_DBLAWR BIT(15)
+#define VSC73XX_MAC_CFG_VLAN_AWR BIT(14)
+#define VSC73XX_MAC_CFG_100_BASE_T BIT(13) /* Not in manual */
+#define VSC73XX_MAC_CFG_TX_IPG_MASK GENMASK(10, 6)
+#define VSC73XX_MAC_CFG_TX_IPG_OFFSET 6
+#define VSC73XX_MAC_CFG_TX_IPG_1000M (6 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
+#define VSC73XX_MAC_CFG_TX_IPG_100_10M (17 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
+#define VSC73XX_MAC_CFG_MAC_RX_RST BIT(5)
+#define VSC73XX_MAC_CFG_MAC_TX_RST BIT(4)
+#define VSC73XX_MAC_CFG_CLK_SEL_MASK GENMASK(2, 0)
+#define VSC73XX_MAC_CFG_CLK_SEL_OFFSET 0
+#define VSC73XX_MAC_CFG_CLK_SEL_1000M 1
+#define VSC73XX_MAC_CFG_CLK_SEL_100M 2
+#define VSC73XX_MAC_CFG_CLK_SEL_10M 3
+#define VSC73XX_MAC_CFG_CLK_SEL_EXT 4
+
+#define VSC73XX_MAC_CFG_1000M_F_PHY (VSC73XX_MAC_CFG_FDX | \
+ VSC73XX_MAC_CFG_GIGA_MODE | \
+ VSC73XX_MAC_CFG_TX_IPG_1000M | \
+ VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_100_10M_F_PHY (VSC73XX_MAC_CFG_FDX | \
+ VSC73XX_MAC_CFG_TX_IPG_100_10M | \
+ VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_100_10M_H_PHY (VSC73XX_MAC_CFG_TX_IPG_100_10M | \
+ VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_1000M_F_RGMII (VSC73XX_MAC_CFG_FDX | \
+ VSC73XX_MAC_CFG_GIGA_MODE | \
+ VSC73XX_MAC_CFG_TX_IPG_1000M | \
+ VSC73XX_MAC_CFG_CLK_SEL_1000M)
+#define VSC73XX_MAC_CFG_RESET (VSC73XX_MAC_CFG_PORT_RST | \
+ VSC73XX_MAC_CFG_MAC_RX_RST | \
+ VSC73XX_MAC_CFG_MAC_TX_RST)
+
+/* Flow control register bits */
+#define VSC73XX_FCCONF_ZERO_PAUSE_EN BIT(17)
+#define VSC73XX_FCCONF_FLOW_CTRL_OBEY BIT(16)
+#define VSC73XX_FCCONF_PAUSE_VAL_MASK GENMASK(15, 0)
+
+/* ADVPORTM advanced port setup register bits */
+#define VSC73XX_ADVPORTM_IFG_PPM BIT(7)
+#define VSC73XX_ADVPORTM_EXC_COL_CONT BIT(6)
+#define VSC73XX_ADVPORTM_EXT_PORT BIT(5)
+#define VSC73XX_ADVPORTM_INV_GTX BIT(4)
+#define VSC73XX_ADVPORTM_ENA_GTX BIT(3)
+#define VSC73XX_ADVPORTM_DDR_MODE BIT(2)
+#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1)
+#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
+
+/* CAT_DROP categorizer frame dropping register bits */
+#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6)
+#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4)
+#define VSC73XX_CAT_DROP_FWD_PAUSE_ENA BIT(3)
+#define VSC73XX_CAT_DROP_UNTAGGED_ENA BIT(2)
+#define VSC73XX_CAT_DROP_TAGGED_ENA BIT(1)
+#define VSC73XX_CAT_DROP_NULL_MAC_ENA BIT(0)
+
+#define VSC73XX_Q_MISC_CONF_EXTENT_MEM BIT(31)
+#define VSC73XX_Q_MISC_CONF_EARLY_TX_MASK GENMASK(4, 1)
+#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1)
+#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0)
+
+/* Frame analyzer block 2 registers */
+#define VSC73XX_STORMLIMIT 0x02
+#define VSC73XX_ADVLEARN 0x03
+#define VSC73XX_IFLODMSK 0x04
+#define VSC73XX_VLANMASK 0x05
+#define VSC73XX_MACHDATA 0x06
+#define VSC73XX_MACLDATA 0x07
+#define VSC73XX_ANMOVED 0x08
+#define VSC73XX_ANAGEFIL 0x09
+#define VSC73XX_ANEVENTS 0x0a
+#define VSC73XX_ANCNTMASK 0x0b
+#define VSC73XX_ANCNTVAL 0x0c
+#define VSC73XX_LEARNMASK 0x0d
+#define VSC73XX_UFLODMASK 0x0e
+#define VSC73XX_MFLODMASK 0x0f
+#define VSC73XX_RECVMASK 0x10
+#define VSC73XX_AGGRCTRL 0x20
+#define VSC73XX_AGGRMSKS 0x30 /* Until 0x3f */
+#define VSC73XX_DSTMASKS 0x40 /* Until 0x7f */
+#define VSC73XX_SRCMASKS 0x80 /* Until 0x87 */
+#define VSC73XX_CAPENAB 0xa0
+#define VSC73XX_MACACCESS 0xb0
+#define VSC73XX_IPMCACCESS 0xb1
+#define VSC73XX_MACTINDX 0xc0
+#define VSC73XX_VLANACCESS 0xd0
+#define VSC73XX_VLANTIDX 0xe0
+#define VSC73XX_AGENCTRL 0xf0
+#define VSC73XX_CAPRST 0xff
+
+#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
+#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
+#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
+#define VSC73XX_MACACCESS_AGED_FLAG BIT(11)
+#define VSC73XX_MACACCESS_VALID BIT(10)
+#define VSC73XX_MACACCESS_LOCKED BIT(9)
+#define VSC73XX_MACACCESS_DEST_IDX_MASK GENMASK(8, 3)
+#define VSC73XX_MACACCESS_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_MACACCESS_CMD_IDLE 0
+#define VSC73XX_MACACCESS_CMD_LEARN 1
+#define VSC73XX_MACACCESS_CMD_FORGET 2
+#define VSC73XX_MACACCESS_CMD_AGE_TABLE 3
+#define VSC73XX_MACACCESS_CMD_FLUSH_TABLE 4
+#define VSC73XX_MACACCESS_CMD_CLEAR_TABLE 5
+#define VSC73XX_MACACCESS_CMD_READ_ENTRY 6
+#define VSC73XX_MACACCESS_CMD_WRITE_ENTRY 7
+
+#define VSC73XX_VLANACCESS_LEARN_DISABLED BIT(30)
+#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29)
+#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28)
+#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2)
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
+
+/* MII block 3 registers */
+#define VSC73XX_MII_STAT 0x0
+#define VSC73XX_MII_CMD 0x1
+#define VSC73XX_MII_DATA 0x2
+
+/* Arbiter block 5 registers */
+#define VSC73XX_ARBEMPTY 0x0c
+#define VSC73XX_ARBDISC 0x0e
+#define VSC73XX_SBACKWDROP 0x12
+#define VSC73XX_DBACKWDROP 0x13
+#define VSC73XX_ARBBURSTPROB 0x15
+
+/* System block 7 registers */
+#define VSC73XX_ICPU_SIPAD 0x01
+#define VSC73XX_GMIIDELAY 0x05
+#define VSC73XX_ICPU_CTRL 0x10
+#define VSC73XX_ICPU_ADDR 0x11
+#define VSC73XX_ICPU_SRAM 0x12
+#define VSC73XX_HWSEM 0x13
+#define VSC73XX_GLORESET 0x14
+#define VSC73XX_ICPU_MBOX_VAL 0x15
+#define VSC73XX_ICPU_MBOX_SET 0x16
+#define VSC73XX_ICPU_MBOX_CLR 0x17
+#define VSC73XX_CHIPID 0x18
+#define VSC73XX_GPIO 0x34
+
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE 0
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS 1
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS 2
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS 3
+
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE (0 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS (1 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS (2 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS (3 << 4)
+
+#define VSC73XX_ICPU_CTRL_WATCHDOG_RST BIT(31)
+#define VSC73XX_ICPU_CTRL_CLK_DIV_MASK GENMASK(12, 8)
+#define VSC73XX_ICPU_CTRL_SRST_HOLD BIT(7)
+#define VSC73XX_ICPU_CTRL_ICPU_PI_EN BIT(6)
+#define VSC73XX_ICPU_CTRL_BOOT_EN BIT(3)
+#define VSC73XX_ICPU_CTRL_EXT_ACC_EN BIT(2)
+#define VSC73XX_ICPU_CTRL_CLK_EN BIT(1)
+#define VSC73XX_ICPU_CTRL_SRST BIT(0)
+
+#define VSC73XX_CHIPID_ID_SHIFT 12
+#define VSC73XX_CHIPID_ID_MASK 0xffff
+#define VSC73XX_CHIPID_REV_SHIFT 28
+#define VSC73XX_CHIPID_REV_MASK 0xf
+#define VSC73XX_CHIPID_ID_7385 0x7385
+#define VSC73XX_CHIPID_ID_7388 0x7388
+#define VSC73XX_CHIPID_ID_7395 0x7395
+#define VSC73XX_CHIPID_ID_7398 0x7398
+
+#define VSC73XX_GLORESET_STROBE BIT(4)
+#define VSC73XX_GLORESET_ICPU_LOCK BIT(3)
+#define VSC73XX_GLORESET_MEM_LOCK BIT(2)
+#define VSC73XX_GLORESET_PHY_RESET BIT(1)
+#define VSC73XX_GLORESET_MASTER_RESET BIT(0)
+
+#define VSC73XX_CMD_MODE_READ 0
+#define VSC73XX_CMD_MODE_WRITE 1
+#define VSC73XX_CMD_MODE_SHIFT 4
+#define VSC73XX_CMD_BLOCK_SHIFT 5
+#define VSC73XX_CMD_BLOCK_MASK 0x7
+#define VSC73XX_CMD_SUBBLOCK_MASK 0xf
+
+#define VSC7385_CLOCK_DELAY ((3 << 4) | 3)
+#define VSC7385_CLOCK_DELAY_MASK ((3 << 4) | 3)
+
+#define VSC73XX_ICPU_CTRL_STOP (VSC73XX_ICPU_CTRL_SRST_HOLD | \
+ VSC73XX_ICPU_CTRL_BOOT_EN | \
+ VSC73XX_ICPU_CTRL_EXT_ACC_EN)
+
+#define VSC73XX_ICPU_CTRL_START (VSC73XX_ICPU_CTRL_CLK_DIV | \
+ VSC73XX_ICPU_CTRL_BOOT_EN | \
+ VSC73XX_ICPU_CTRL_CLK_EN | \
+ VSC73XX_ICPU_CTRL_SRST)
+
+/**
+ * struct vsc73xx - VSC73xx state container
+ */
+struct vsc73xx {
+ struct device *dev;
+ struct gpio_desc *reset;
+ struct spi_device *spi;
+ struct dsa_switch *ds;
+ struct gpio_chip gc;
+ u16 chipid;
+ u8 addr[ETH_ALEN];
+ struct mutex lock; /* Protects SPI traffic */
+};
+
+#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385)
+#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388)
+#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395)
+#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
+#define IS_739X(a) (IS_7395(a) || IS_7398(a))
+
+struct vsc73xx_counter {
+ u8 counter;
+ const char *name;
+};
+
+/* Counters are named according to the MIB standards where applicable.
+ * Some counters are custom, non-standard. The standard counters are
+ * named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
+ * 30A Counters.
+ */
+static const struct vsc73xx_counter vsc73xx_rx_counters[] = {
+ { 0, "RxEtherStatsPkts" },
+ { 1, "RxBroadcast+MulticastPkts" }, /* non-standard counter */
+ { 2, "RxTotalErrorPackets" }, /* non-standard counter */
+ { 3, "RxEtherStatsBroadcastPkts" },
+ { 4, "RxEtherStatsMulticastPkts" },
+ { 5, "RxEtherStatsPkts64Octets" },
+ { 6, "RxEtherStatsPkts65to127Octets" },
+ { 7, "RxEtherStatsPkts128to255Octets" },
+ { 8, "RxEtherStatsPkts256to511Octets" },
+ { 9, "RxEtherStatsPkts512to1023Octets" },
+ { 10, "RxEtherStatsPkts1024to1518Octets" },
+ { 11, "RxJumboFrames" }, /* non-standard counter */
+ { 12, "RxaPauseMACControlFramesTransmitted" },
+ { 13, "RxFIFODrops" }, /* non-standard counter */
+ { 14, "RxBackwardDrops" }, /* non-standard counter */
+ { 15, "RxClassifierDrops" }, /* non-standard counter */
+ { 16, "RxEtherStatsCRCAlignErrors" },
+ { 17, "RxEtherStatsUndersizePkts" },
+ { 18, "RxEtherStatsOversizePkts" },
+ { 19, "RxEtherStatsFragments" },
+ { 20, "RxEtherStatsJabbers" },
+ { 21, "RxaMACControlFramesReceived" },
+ /* 22-24 are undefined */
+ { 25, "RxaFramesReceivedOK" },
+ { 26, "RxQoSClass0" }, /* non-standard counter */
+ { 27, "RxQoSClass1" }, /* non-standard counter */
+ { 28, "RxQoSClass2" }, /* non-standard counter */
+ { 29, "RxQoSClass3" }, /* non-standard counter */
+};
+
+static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
+ { 0, "TxEtherStatsPkts" },
+ { 1, "TxBroadcast+MulticastPkts" }, /* non-standard counter */
+ { 2, "TxTotalErrorPackets" }, /* non-standard counter */
+ { 3, "TxEtherStatsBroadcastPkts" },
+ { 4, "TxEtherStatsMulticastPkts" },
+ { 5, "TxEtherStatsPkts64Octets" },
+ { 6, "TxEtherStatsPkts65to127Octets" },
+ { 7, "TxEtherStatsPkts128to255Octets" },
+ { 8, "TxEtherStatsPkts256to511Octets" },
+ { 9, "TxEtherStatsPkts512to1023Octets" },
+ { 10, "TxEtherStatsPkts1024to1518Octets" },
+ { 11, "TxJumboFrames" }, /* non-standard counter */
+ { 12, "TxaPauseMACControlFramesTransmitted" },
+ { 13, "TxFIFODrops" }, /* non-standard counter */
+ { 14, "TxDrops" }, /* non-standard counter */
+ { 15, "TxEtherStatsCollisions" },
+ { 16, "TxEtherStatsCRCAlignErrors" },
+ { 17, "TxEtherStatsUndersizePkts" },
+ { 18, "TxEtherStatsOversizePkts" },
+ { 19, "TxEtherStatsFragments" },
+ { 20, "TxEtherStatsJabbers" },
+ /* 21-24 are undefined */
+ { 25, "TxaFramesReceivedOK" },
+ { 26, "TxQoSClass0" }, /* non-standard counter */
+ { 27, "TxQoSClass1" }, /* non-standard counter */
+ { 28, "TxQoSClass2" }, /* non-standard counter */
+ { 29, "TxQoSClass3" }, /* non-standard counter */
+};
+
+static int vsc73xx_is_addr_valid(u8 block, u8 subblock)
+{
+ switch (block) {
+ case VSC73XX_BLOCK_MAC:
+ switch (subblock) {
+ case 0 ... 4:
+ case 6:
+ return 1;
+ }
+ break;
+
+ case VSC73XX_BLOCK_ANALYZER:
+ case VSC73XX_BLOCK_SYSTEM:
+ switch (subblock) {
+ case 0:
+ return 1;
+ }
+ break;
+
+ case VSC73XX_BLOCK_MII:
+ case VSC73XX_BLOCK_CAPTURE:
+ case VSC73XX_BLOCK_ARBITER:
+ switch (subblock) {
+ case 0 ... 1:
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock)
+{
+ u8 ret;
+
+ ret = (block & VSC73XX_CMD_BLOCK_MASK) << VSC73XX_CMD_BLOCK_SHIFT;
+ ret |= (mode & 1) << VSC73XX_CMD_MODE_SHIFT;
+ ret |= subblock & VSC73XX_CMD_SUBBLOCK_MASK;
+
+ return ret;
+}
+
+static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 *val)
+{
+ struct spi_transfer t[2];
+ struct spi_message m;
+ u8 cmd[4];
+ u8 buf[4];
+ int ret;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = sizeof(buf);
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_READ, block, subblock);
+ cmd[1] = reg;
+ cmd[2] = 0;
+ cmd[3] = 0;
+
+ mutex_lock(&vsc->lock);
+ ret = spi_sync(vsc->spi, &m);
+ mutex_unlock(&vsc->lock);
+
+ if (ret)
+ return ret;
+
+ *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+
+ return 0;
+}
+
+static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 val)
+{
+ struct spi_transfer t[2];
+ struct spi_message m;
+ u8 cmd[2];
+ u8 buf[4];
+ int ret;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = sizeof(buf);
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_WRITE, block, subblock);
+ cmd[1] = reg;
+
+ buf[0] = (val >> 24) & 0xff;
+ buf[1] = (val >> 16) & 0xff;
+ buf[2] = (val >> 8) & 0xff;
+ buf[3] = val & 0xff;
+
+ mutex_lock(&vsc->lock);
+ ret = spi_sync(vsc->spi, &m);
+ mutex_unlock(&vsc->lock);
+
+ return ret;
+}
+
+static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock,
+ u8 reg, u32 mask, u32 val)
+{
+ u32 tmp, orig;
+ int ret;
+
+ /* Same read-modify-write algorithm as e.g. regmap */
+ ret = vsc73xx_read(vsc, block, subblock, reg, &orig);
+ if (ret)
+ return ret;
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ return vsc73xx_write(vsc, block, subblock, reg, tmp);
+}
+
+static int vsc73xx_detect(struct vsc73xx *vsc)
+{
+ bool icpu_si_boot_en;
+ bool icpu_pi_en;
+ u32 val;
+ u32 rev;
+ int ret;
+ u32 id;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_ICPU_MBOX_VAL, &val);
+ if (ret) {
+ dev_err(vsc->dev, "unable to read mailbox (%d)\n", ret);
+ return ret;
+ }
+
+ if (val == 0xffffffff) {
+ dev_info(vsc->dev, "chip seems dead, assert reset\n");
+ gpiod_set_value_cansleep(vsc->reset, 1);
+ /* Reset pulse should be 20ns minimum, according to datasheet
+ * table 245, so 10us should be fine
+ */
+ usleep_range(10, 100);
+ gpiod_set_value_cansleep(vsc->reset, 0);
+ /* Wait 20ms according to datasheet table 245 */
+ msleep(20);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_ICPU_MBOX_VAL, &val);
+ if (val == 0xffffffff) {
+ dev_err(vsc->dev, "seems not to help, giving up\n");
+ return -ENODEV;
+ }
+ }
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_CHIPID, &val);
+ if (ret) {
+ dev_err(vsc->dev, "unable to read chip id (%d)\n", ret);
+ return ret;
+ }
+
+ id = (val >> VSC73XX_CHIPID_ID_SHIFT) &
+ VSC73XX_CHIPID_ID_MASK;
+ switch (id) {
+ case VSC73XX_CHIPID_ID_7385:
+ case VSC73XX_CHIPID_ID_7388:
+ case VSC73XX_CHIPID_ID_7395:
+ case VSC73XX_CHIPID_ID_7398:
+ break;
+ default:
+ dev_err(vsc->dev, "unsupported chip, id=%04x\n", id);
+ return -ENODEV;
+ }
+
+ vsc->chipid = id;
+ rev = (val >> VSC73XX_CHIPID_REV_SHIFT) &
+ VSC73XX_CHIPID_REV_MASK;
+ dev_info(vsc->dev, "VSC%04X (rev: %d) switch found\n", id, rev);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_ICPU_CTRL, &val);
+ if (ret) {
+ dev_err(vsc->dev, "unable to read iCPU control\n");
+ return ret;
+ }
+
+ /* The iCPU can always be used but can boot in different ways.
+ * If it is initially disabled and has no external memory,
+ * we are in control and can do whatever we like, else we
+ * are probably in trouble (we need some way to communicate
+ * with the running firmware) so we bail out for now.
+ */
+ icpu_pi_en = !!(val & VSC73XX_ICPU_CTRL_ICPU_PI_EN);
+ icpu_si_boot_en = !!(val & VSC73XX_ICPU_CTRL_BOOT_EN);
+ if (icpu_si_boot_en && icpu_pi_en) {
+ dev_err(vsc->dev,
+ "iCPU enabled boots from SI, has external memory\n");
+ dev_err(vsc->dev, "no idea how to deal with this\n");
+ return -ENODEV;
+ }
+ if (icpu_si_boot_en && !icpu_pi_en) {
+ dev_err(vsc->dev,
+ "iCPU enabled boots from SI, no external memory\n");
+ dev_err(vsc->dev, "no idea how to deal with this\n");
+ return -ENODEV;
+ }
+ if (!icpu_si_boot_en && icpu_pi_en) {
+ dev_err(vsc->dev,
+ "iCPU enabled, boots from PI external memory\n");
+ dev_err(vsc->dev, "no idea how to deal with this\n");
+ return -ENODEV;
+ }
+ /* !icpu_si_boot_en && !cpu_pi_en */
+ dev_info(vsc->dev, "iCPU disabled, no external memory\n");
+
+ return 0;
+}
+
+static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u32 cmd;
+ u32 val;
+ int ret;
+
+ /* Setting bit 26 means "read" */
+ cmd = BIT(26) | (phy << 21) | (regnum << 16);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ if (ret)
+ return ret;
+ msleep(2);
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+ if (ret)
+ return ret;
+ if (val & BIT(16)) {
+ dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
+ regnum, phy);
+ return -EIO;
+ }
+ val &= 0xFFFFU;
+
+ dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
+ regnum, phy, val);
+
+ return val;
+}
+
+static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u32 cmd;
+ int ret;
+
+ /* It was found through tedious experiments that this router
+ * chip really hates to have it's PHYs reset. They
+ * never recover if that happens: autonegotiation stops
+ * working after a reset. Just filter out this command.
+ * (Resetting the whole chip is OK.)
+ */
+ if (regnum == 0 && (val & BIT(15))) {
+ dev_info(vsc->dev, "reset PHY - disallowed\n");
+ return 0;
+ }
+
+ cmd = (phy << 21) | (regnum << 16);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ if (ret)
+ return ret;
+
+ dev_dbg(vsc->dev, "write %04x to reg %02x in phy%d\n",
+ val, regnum, phy);
+ return 0;
+}
+
+static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ /* The switch internally uses a 8 byte header with length,
+ * source port, tag, LPA and priority. This is supposedly
+ * only accessible when operating the switch using the internal
+ * CPU or with an external CPU mapping the device in, but not
+ * when operating the switch over SPI and putting frames in/out
+ * on port 6 (the CPU port). So far we must assume that we
+ * cannot access the tag. (See "Internal frame header" section
+ * 3.9.1 in the manual.)
+ */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int vsc73xx_setup(struct dsa_switch *ds)
+{
+ struct vsc73xx *vsc = ds->priv;
+ int i;
+
+ dev_info(vsc->dev, "set up the switch\n");
+
+ /* Issue RESET */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
+ VSC73XX_GLORESET_MASTER_RESET);
+ usleep_range(125, 200);
+
+ /* Initialize memory, initialize RAM bank 0..15 except 6 and 7
+ * This sequence appears in the
+ * VSC7385 SparX-G5 datasheet section 6.6.1
+ * VSC7395 SparX-G5e datasheet section 6.6.1
+ * "initialization sequence".
+ * No explanation is given to the 0x1010400 magic number.
+ */
+ for (i = 0; i <= 15; i++) {
+ if (i != 6 && i != 7) {
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MEMINIT,
+ 2,
+ 0, 0x1010400 + i);
+ mdelay(1);
+ }
+ }
+ mdelay(30);
+
+ /* Clear MAC table */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS,
+ VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
+
+ /* Clear VLAN table */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
+
+ msleep(40);
+
+ /* Use 20KiB buffers on all ports on VSC7395
+ * The VSC7385 has 16KiB buffers and that is the
+ * default if we don't set this up explicitly.
+ * Port "31" is "all ports".
+ */
+ if (IS_739X(vsc))
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 0x1f,
+ VSC73XX_Q_MISC_CONF,
+ VSC73XX_Q_MISC_CONF_EXTENT_MEM);
+
+ /* Put all ports into reset until enabled */
+ for (i = 0; i < 7; i++) {
+ if (i == 5)
+ continue;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 4,
+ VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
+ }
+
+ /* MII delay, set both GTX and RX delay to 2 ns */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
+ VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
+ VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
+ /* Enable reception of frames on all ports */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
+ 0x5f);
+ /* IP multicast flood mask (table 144) */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
+ 0xff);
+
+ mdelay(50);
+
+ /* Release reset from the internal PHYs */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
+ VSC73XX_GLORESET_PHY_RESET);
+
+ udelay(4);
+
+ return 0;
+}
+
+static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
+{
+ u32 val;
+
+ /* MAC configure, first reset the port and then write defaults */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET);
+
+ /* Take up the port in 1Gbit mode by default, this will be
+ * augmented after auto-negotiation on the PHY-facing
+ * ports.
+ */
+ if (port == CPU_PORT)
+ val = VSC73XX_MAC_CFG_1000M_F_RGMII;
+ else
+ val = VSC73XX_MAC_CFG_1000M_F_PHY;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_MAC_CFG,
+ val |
+ VSC73XX_MAC_CFG_TX_EN |
+ VSC73XX_MAC_CFG_RX_EN);
+
+ /* Max length, we can do up to 9.6 KiB, so allow that.
+ * According to application not "VSC7398 Jumbo Frames" setting
+ * up the MTU to 9.6 KB does not affect the performance on standard
+ * frames, so just enable it. It is clear from the application note
+ * that "9.6 kilobytes" == 9600 bytes.
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_MAXLEN, 9600);
+
+ /* Flow control for the CPU port:
+ * Use a zero delay pause frame when pause condition is left
+ * Obey pause control frames
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_FCCONF,
+ VSC73XX_FCCONF_ZERO_PAUSE_EN |
+ VSC73XX_FCCONF_FLOW_CTRL_OBEY);
+
+ /* Issue pause control frames on PHY facing ports.
+ * Allow early initiation of MAC transmission if the amount
+ * of egress data is below 512 bytes on CPU port.
+ * FIXME: enable 20KiB buffers?
+ */
+ if (port == CPU_PORT)
+ val = VSC73XX_Q_MISC_CONF_EARLY_TX_512;
+ else
+ val = VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE;
+ val |= VSC73XX_Q_MISC_CONF_EXTENT_MEM;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_Q_MISC_CONF,
+ val);
+
+ /* Flow control MAC: a MAC address used in flow control frames */
+ val = (vsc->addr[5] << 16) | (vsc->addr[4] << 8) | (vsc->addr[3]);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_FCMACHI,
+ val);
+ val = (vsc->addr[2] << 16) | (vsc->addr[1] << 8) | (vsc->addr[0]);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_FCMACLO,
+ val);
+
+ /* Tell the categorizer to forward pause frames, not control
+ * frame. Do not drop anything.
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_CAT_DROP,
+ VSC73XX_CAT_DROP_FWD_PAUSE_ENA);
+
+ /* Clear all counters */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port, VSC73XX_C_RX0, 0);
+}
+
+static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
+ int port, struct phy_device *phydev,
+ u32 initval)
+{
+ u32 val = initval;
+ u8 seed;
+
+ /* Reset this port FIXME: break out subroutine */
+ val |= VSC73XX_MAC_CFG_RESET;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+
+ /* Seed the port randomness with randomness */
+ get_random_bytes(&seed, 1);
+ val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
+ val |= VSC73XX_MAC_CFG_SEED_LOAD;
+ val |= VSC73XX_MAC_CFG_WEXC_DIS;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+
+ /* Flow control for the PHY facing ports:
+ * Use a zero delay pause frame when pause condition is left
+ * Obey pause control frames
+ * When generating pause frames, use 0xff as pause value
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF,
+ VSC73XX_FCCONF_ZERO_PAUSE_EN |
+ VSC73XX_FCCONF_FLOW_CTRL_OBEY |
+ 0xff);
+
+ /* Disallow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), 0);
+
+ /* Enable TX, RX, deassert reset, stop loading seed */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD |
+ VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN,
+ VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
+}
+
+static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u32 val;
+
+ /* Special handling of the CPU-facing port */
+ if (port == CPU_PORT) {
+ /* Other ports are already initialized but not this one */
+ vsc73xx_init_port(vsc, CPU_PORT);
+ /* Select the external port for this interface (EXT_PORT)
+ * Enable the GMII GTX external clock
+ * Use double data rate (DDR mode)
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ CPU_PORT,
+ VSC73XX_ADVPORTM,
+ VSC73XX_ADVPORTM_EXT_PORT |
+ VSC73XX_ADVPORTM_ENA_GTX |
+ VSC73XX_ADVPORTM_DDR_MODE);
+ }
+
+ /* This is the MAC confiuration that always need to happen
+ * after a PHY or the CPU port comes up or down.
+ */
+ if (!phydev->link) {
+ int maxloop = 10;
+
+ dev_dbg(vsc->dev, "port %d: went down\n",
+ port);
+
+ /* Disable RX on this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RX_EN, 0);
+
+ /* Discard packets */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), BIT(port));
+
+ /* Wait until queue is empty */
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ while (!(val & BIT(port))) {
+ msleep(1);
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ if (--maxloop == 0) {
+ dev_err(vsc->dev,
+ "timeout waiting for block arbiter\n");
+ /* Continue anyway */
+ break;
+ }
+ }
+
+ /* Put this port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET);
+
+ /* Accept packets again */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), 0);
+
+ /* Allow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+
+ /* Receive mask (disable forwarding) */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), 0);
+
+ return;
+ }
+
+ /* Figure out what speed was negotiated */
+ if (phydev->speed == SPEED_1000) {
+ dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
+ port);
+
+ /* Set up default for internal port or external RGMII */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ val = VSC73XX_MAC_CFG_1000M_F_RGMII;
+ else
+ val = VSC73XX_MAC_CFG_1000M_F_PHY;
+ vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+ } else if (phydev->speed == SPEED_100) {
+ if (phydev->duplex == DUPLEX_FULL) {
+ val = VSC73XX_MAC_CFG_100_10M_F_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 100 Mbit full duplex mode\n",
+ port);
+ } else {
+ val = VSC73XX_MAC_CFG_100_10M_H_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 100 Mbit half duplex mode\n",
+ port);
+ }
+ vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+ } else if (phydev->speed == SPEED_10) {
+ if (phydev->duplex == DUPLEX_FULL) {
+ val = VSC73XX_MAC_CFG_100_10M_F_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 10 Mbit full duplex mode\n",
+ port);
+ } else {
+ val = VSC73XX_MAC_CFG_100_10M_H_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 10 Mbit half duplex mode\n",
+ port);
+ }
+ vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+ } else {
+ dev_err(vsc->dev,
+ "could not adjust link: unknown speed\n");
+ }
+
+ /* Enable port (forwarding) in the receieve mask */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), BIT(port));
+}
+
+static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ dev_info(vsc->dev, "enable port %d\n", port);
+ vsc73xx_init_port(vsc, port);
+
+ return 0;
+}
+
+static void vsc73xx_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ /* Just put the port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
+}
+
+static const struct vsc73xx_counter *
+vsc73xx_find_counter(struct vsc73xx *vsc,
+ u8 counter,
+ bool tx)
+{
+ const struct vsc73xx_counter *cnts;
+ int num_cnts;
+ int i;
+
+ if (tx) {
+ cnts = vsc73xx_tx_counters;
+ num_cnts = ARRAY_SIZE(vsc73xx_tx_counters);
+ } else {
+ cnts = vsc73xx_rx_counters;
+ num_cnts = ARRAY_SIZE(vsc73xx_rx_counters);
+ }
+
+ for (i = 0; i < num_cnts; i++) {
+ const struct vsc73xx_counter *cnt;
+
+ cnt = &cnts[i];
+ if (cnt->counter == counter)
+ return cnt;
+ }
+
+ return NULL;
+}
+
+static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ const struct vsc73xx_counter *cnt;
+ struct vsc73xx *vsc = ds->priv;
+ u8 indices[6];
+ int i, j;
+ u32 val;
+ int ret;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_C_CFG, &val);
+ if (ret)
+ return;
+
+ indices[0] = (val & 0x1f); /* RX counter 0 */
+ indices[1] = ((val >> 5) & 0x1f); /* RX counter 1 */
+ indices[2] = ((val >> 10) & 0x1f); /* RX counter 2 */
+ indices[3] = ((val >> 16) & 0x1f); /* TX counter 0 */
+ indices[4] = ((val >> 21) & 0x1f); /* TX counter 1 */
+ indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */
+
+ /* The first counters is the RX octets */
+ j = 0;
+ strncpy(data + j * ETH_GSTRING_LEN,
+ "RxEtherStatsOctets", ETH_GSTRING_LEN);
+ j++;
+
+ /* Each port supports recording 3 RX counters and 3 TX counters,
+ * figure out what counters we use in this set-up and return the
+ * names of them. The hardware default counters will be number of
+ * packets on RX/TX, combined broadcast+multicast packets RX/TX and
+ * total error packets RX/TX.
+ */
+ for (i = 0; i < 3; i++) {
+ cnt = vsc73xx_find_counter(vsc, indices[i], false);
+ if (cnt)
+ strncpy(data + j * ETH_GSTRING_LEN,
+ cnt->name, ETH_GSTRING_LEN);
+ j++;
+ }
+
+ /* TX stats begins with the number of TX octets */
+ strncpy(data + j * ETH_GSTRING_LEN,
+ "TxEtherStatsOctets", ETH_GSTRING_LEN);
+ j++;
+
+ for (i = 3; i < 6; i++) {
+ cnt = vsc73xx_find_counter(vsc, indices[i], true);
+ if (cnt)
+ strncpy(data + j * ETH_GSTRING_LEN,
+ cnt->name, ETH_GSTRING_LEN);
+ j++;
+ }
+}
+
+static int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ /* We only support SS_STATS */
+ if (sset != ETH_SS_STATS)
+ return 0;
+ /* RX and TX packets, then 3 RX counters, 3 TX counters */
+ return 8;
+}
+
+static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u8 regs[] = {
+ VSC73XX_RXOCT,
+ VSC73XX_C_RX0,
+ VSC73XX_C_RX1,
+ VSC73XX_C_RX2,
+ VSC73XX_TXOCT,
+ VSC73XX_C_TX0,
+ VSC73XX_C_TX1,
+ VSC73XX_C_TX2,
+ };
+ u32 val;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
+ regs[i], &val);
+ if (ret) {
+ dev_err(vsc->dev, "error reading counter %d\n", i);
+ return;
+ }
+ data[i] = val;
+ }
+}
+
+static const struct dsa_switch_ops vsc73xx_ds_ops = {
+ .get_tag_protocol = vsc73xx_get_tag_protocol,
+ .setup = vsc73xx_setup,
+ .phy_read = vsc73xx_phy_read,
+ .phy_write = vsc73xx_phy_write,
+ .adjust_link = vsc73xx_adjust_link,
+ .get_strings = vsc73xx_get_strings,
+ .get_ethtool_stats = vsc73xx_get_ethtool_stats,
+ .get_sset_count = vsc73xx_get_sset_count,
+ .port_enable = vsc73xx_port_enable,
+ .port_disable = vsc73xx_port_disable,
+};
+
+static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 val;
+ int ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 tmp = val ? BIT(offset) : 0;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset), tmp);
+}
+
+static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 tmp = val ? BIT(offset) : 0;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset + 4) | BIT(offset),
+ BIT(offset + 4) | tmp);
+}
+
+static int vsc73xx_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset + 4),
+ 0);
+}
+
+static int vsc73xx_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 val;
+ int ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, &val);
+ if (ret)
+ return ret;
+
+ return !(val & BIT(offset + 4));
+}
+
+static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
+{
+ int ret;
+
+ vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
+ vsc->chipid);
+ vsc->gc.ngpio = 4;
+ vsc->gc.owner = THIS_MODULE;
+ vsc->gc.parent = vsc->dev;
+ vsc->gc.of_node = vsc->dev->of_node;
+ vsc->gc.base = -1;
+ vsc->gc.get = vsc73xx_gpio_get;
+ vsc->gc.set = vsc73xx_gpio_set;
+ vsc->gc.direction_input = vsc73xx_gpio_direction_input;
+ vsc->gc.direction_output = vsc73xx_gpio_direction_output;
+ vsc->gc.get_direction = vsc73xx_gpio_get_direction;
+ vsc->gc.can_sleep = true;
+ ret = devm_gpiochip_add_data(vsc->dev, &vsc->gc, vsc);
+ if (ret) {
+ dev_err(vsc->dev, "unable to register GPIO chip\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int vsc73xx_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct vsc73xx *vsc;
+ int ret;
+
+ vsc = devm_kzalloc(dev, sizeof(*vsc), GFP_KERNEL);
+ if (!vsc)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, vsc);
+ vsc->spi = spi_dev_get(spi);
+ vsc->dev = dev;
+ mutex_init(&vsc->lock);
+
+ /* Release reset, if any */
+ vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(vsc->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(vsc->reset);
+ }
+ if (vsc->reset)
+ /* Wait 20ms according to datasheet table 245 */
+ msleep(20);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(dev, "spi setup failed.\n");
+ return ret;
+ }
+
+ ret = vsc73xx_detect(vsc);
+ if (ret) {
+ dev_err(dev, "no chip found (%d)\n", ret);
+ return -ENODEV;
+ }
+
+ eth_random_addr(vsc->addr);
+ dev_info(vsc->dev,
+ "MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ vsc->addr[0], vsc->addr[1], vsc->addr[2],
+ vsc->addr[3], vsc->addr[4], vsc->addr[5]);
+
+ /* The VSC7395 switch chips have 5+1 ports which means 5
+ * ordinary ports and a sixth CPU port facing the processor
+ * with an RGMII interface. These ports are numbered 0..4
+ * and 6, so they leave a "hole" in the port map for port 5,
+ * which is invalid.
+ *
+ * The VSC7398 has 8 ports, port 7 is again the CPU port.
+ *
+ * We allocate 8 ports and avoid access to the nonexistant
+ * ports.
+ */
+ vsc->ds = dsa_switch_alloc(dev, 8);
+ if (!vsc->ds)
+ return -ENOMEM;
+ vsc->ds->priv = vsc;
+
+ vsc->ds->ops = &vsc73xx_ds_ops;
+ ret = dsa_register_switch(vsc->ds);
+ if (ret) {
+ dev_err(dev, "unable to register switch (%d)\n", ret);
+ return ret;
+ }
+
+ ret = vsc73xx_gpio_probe(vsc);
+ if (ret) {
+ dsa_unregister_switch(vsc->ds);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vsc73xx_remove(struct spi_device *spi)
+{
+ struct vsc73xx *vsc = spi_get_drvdata(spi);
+
+ dsa_unregister_switch(vsc->ds);
+ gpiod_set_value(vsc->reset, 1);
+
+ return 0;
+}
+
+static const struct of_device_id vsc73xx_of_match[] = {
+ {
+ .compatible = "vitesse,vsc7385",
+ },
+ {
+ .compatible = "vitesse,vsc7388",
+ },
+ {
+ .compatible = "vitesse,vsc7395",
+ },
+ {
+ .compatible = "vitesse,vsc7398",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+
+static struct spi_driver vsc73xx_driver = {
+ .probe = vsc73xx_probe,
+ .remove = vsc73xx_remove,
+ .driver = {
+ .name = "vsc73xx",
+ .of_match_table = vsc73xx_of_match,
+ },
+};
+module_spi_driver(vsc73xx_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index d422a124cd7c..0b6bbf63f7ca 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -610,6 +610,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
+ /* Fall through */
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index b6d735bf8011..342ae08ec3c2 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -153,9 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
-
#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
@@ -239,7 +236,7 @@ static enum mac8390_access mac8390_testio(unsigned long membase)
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
/* Try writing 32 bits */
- memcpy_toio(membase, &outdata, 4);
+ memcpy_toio((void __iomem *)membase, &outdata, 4);
/* Now compare them */
if (memcmp_withio(&outdata, membase, 4) == 0)
return ACCESS_32;
@@ -711,7 +708,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
+ memcpy_fromio(hdr, (void __iomem *)dev->mem_start + hdr_start, 4);
/* Fix endianness */
hdr->count = swab16(hdr->count);
}
@@ -725,13 +722,16 @@ static void sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- memcpy_fromio(skb->data, dev->mem_start + xfer_base,
+ memcpy_fromio(skb->data,
+ (void __iomem *)dev->mem_start + xfer_base,
semi_count);
count -= semi_count;
- memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
- count);
+ memcpy_fromio(skb->data + semi_count,
+ (void __iomem *)ei_status.rmem_start, count);
} else {
- memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
+ memcpy_fromio(skb->data,
+ (void __iomem *)dev->mem_start + xfer_base,
+ count);
}
}
@@ -740,7 +740,7 @@ static void sane_block_output(struct net_device *dev, int count,
{
long shmem = (start_page - WD_START_PG)<<8;
- memcpy_toio(dev->mem_start + shmem, buf, count);
+ memcpy_toio((void __iomem *)dev->mem_start + shmem, buf, count);
}
/* dayna block input/output */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index af766fd61151..6fde68aa13a4 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -81,7 +81,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -128,6 +127,7 @@ config FEALNX
cards. <http://www.myson.com.tw/>
source "drivers/net/ethernet/natsemi/Kconfig"
+source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 8fbfe9ce2fa5..b45d5f626b59 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
-obj-$(CONFIG_NET_CADENCE) += cadence/
+obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
@@ -36,7 +36,6 @@ obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
-obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
@@ -60,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
+obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
obj-$(CONFIG_NET_VENDOR_NI) += ni/
obj-$(CONFIG_NET_NETX) += netx-eth.o
@@ -68,7 +68,7 @@ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
obj-$(CONFIG_LPC_ENET) += nxp/
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
obj-$(CONFIG_ETHOC) += ethoc.o
-obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
+obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/
obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
@@ -80,8 +80,7 @@ obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
obj-$(CONFIG_NET_VENDOR_SIS) += sis/
-obj-$(CONFIG_SFC) += sfc/
-obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
+obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 3872ab96b80a..097467f44b0d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -802,7 +802,7 @@ static int starfire_init_one(struct pci_dev *pdev,
int mii_status;
for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
- mdelay(100);
+ msleep(100);
boguscnt = 1000;
while (--boguscnt > 0)
if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 8f71b79b4949..4f11f98347ed 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -551,6 +551,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
ap->name);
break;
}
+ /* Fall through */
case PCI_VENDOR_ID_SGI:
printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
break;
@@ -1933,7 +1934,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
while (idx != rxretprd) {
struct ring_info *rip;
struct sk_buff *skb;
- struct rx_desc *rxdesc, *retdesc;
+ struct rx_desc *retdesc;
u32 skbidx;
int bd_flags, desc_type, mapsize;
u16 csum;
@@ -1959,19 +1960,16 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
mapsize = ACE_STD_BUFSIZE;
- rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
mapsize = ACE_JUMBO_BUFSIZE;
- rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
mapsize = ACE_MINI_BUFSIZE;
- rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
default:
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f2af87d70594..c673ac2df65b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev)
#endif /* CONFIG_NET_POLL_CONTROLLER */
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
u16 qid;
/* we suspect that this is good for in--kernel network services that
@@ -2223,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
- qid = fallback(dev, skb);
+ qid = fallback(dev, skb, NULL);
return qid;
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index be198cc0b10c..f5ad12c10934 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2036,22 +2036,22 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
}
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->tx_dma_addr)
return -ENOMEM;
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->rx_dma_addr)
return -ENOMEM;
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->tx_skbuff)
return -ENOMEM;
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->rx_skbuff)
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index cc1e4f820e64..533094233659 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
struct page *pages = NULL;
dma_addr_t pages_dma;
gfp_t gfp;
- int order, ret;
+ int order;
again:
order = alloc_order;
@@ -316,10 +316,9 @@ again:
/* Map the pages */
pages_dma = dma_map_page(pdata->dev, pages, 0,
PAGE_SIZE << order, DMA_FROM_DEVICE);
- ret = dma_mapping_error(pdata->dev, pages_dma);
- if (ret) {
+ if (dma_mapping_error(pdata->dev, pages_dma)) {
put_page(pages);
- return ret;
+ return -ENOMEM;
}
pa->pages = pages;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index e107e180e2c8..1e929a1e4ca7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -119,6 +119,7 @@
#include <linux/clk.h>
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -887,7 +888,6 @@ static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
static u32 xgbe_vid_crc32_le(__le16 vid_le)
{
- u32 poly = 0xedb88320; /* CRCPOLY_LE */
u32 crc = ~0;
u32 temp = 0;
unsigned char *data = (unsigned char *)&vid_le;
@@ -904,7 +904,7 @@ static u32 xgbe_vid_crc32_le(__le16 vid_le)
data_byte >>= 1;
if (temp)
- crc ^= poly;
+ crc ^= CRC32_POLY_LE;
}
return crc;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 3188f553da35..078a04dc1182 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -836,19 +836,19 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
#ifdef CONFIG_ACPI
static struct acpi_device *acpi_phy_find_device(struct device *dev)
{
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
struct fwnode_handle *fw_node;
int status;
fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
&args);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status) || !is_acpi_device_node(args.fwnode)) {
dev_dbg(dev, "No matching phy in ACPI table\n");
return NULL;
}
- return args.adev;
+ return to_acpi_device_node(args.fwnode);
}
#endif
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 5a655d289dd5..024998d6d8c6 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
@@ -37,11 +38,6 @@
#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
-/*
- * CRC polynomial - used in working out multicast filter bits.
- */
-#define ENET_CRCPOLY 0x04c11db7
-
/* switch to use multicast code lifted from sunhme driver */
#define SUNHME_MULTICAST
@@ -838,7 +834,7 @@ crc416(unsigned int curval, unsigned short nxtval)
next = next >> 1;
/* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
+ if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
}
return cur;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index f2d8063a2cef..08c9fa6ca71f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -11,6 +11,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
+#include "aq_vec.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
@@ -284,6 +285,117 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
return aq_nic_update_interrupt_moderation_settings(aq_nic);
}
+static int aq_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (unlikely(!aq_nic->aq_fw_ops->renegotiate))
+ return -EOPNOTSUPP;
+
+ if (netif_running(ndev))
+ return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+
+ return 0;
+}
+
+static void aq_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ pause->autoneg = 0;
+
+ if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ pause->rx_pause = 1;
+ if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+ pause->tx_pause = 1;
+}
+
+static int aq_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ if (!aq_nic->aq_fw_ops->set_flow_control)
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
+
+ if (pause->tx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+
+ err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
+
+ return err;
+}
+
+static void aq_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+
+ ring->rx_pending = aq_nic_cfg->rxds;
+ ring->tx_pending = aq_nic_cfg->txds;
+
+ ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
+ ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
+}
+
+static int aq_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ int err = 0;
+ bool ndev_running = false;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+ const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
+ err = -EOPNOTSUPP;
+ goto err_exit;
+ }
+
+ if (netif_running(ndev)) {
+ ndev_running = true;
+ dev_close(ndev);
+ }
+
+ aq_nic_free_vectors(aq_nic);
+
+ aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
+ aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
+ aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
+
+ aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
+ aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
+ aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
+
+ for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
+ aq_nic->aq_vecs++) {
+ aq_nic->aq_vec[aq_nic->aq_vecs] =
+ aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
+ if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+ if (ndev_running)
+ err = dev_open(ndev);
+
+err_exit:
+ return err;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
@@ -291,6 +403,11 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+ .nway_reset = aq_ethtool_nway_reset,
+ .get_ringparam = aq_get_ringparam,
+ .set_ringparam = aq_set_ringparam,
+ .get_pauseparam = aq_ethtool_get_pauseparam,
+ .set_pauseparam = aq_ethtool_set_pauseparam,
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 2c6ebd91a9f2..5c00671f248d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -24,8 +24,10 @@ struct aq_hw_caps_s {
u64 link_speed_msk;
unsigned int hw_priv_flags;
u32 media_type;
- u32 rxds;
- u32 txds;
+ u32 rxds_max;
+ u32 txds_max;
+ u32 rxds_min;
+ u32 txds_min;
u32 txhwb_alignment;
u32 irq_mask;
u32 vecs;
@@ -98,6 +100,9 @@ struct aq_stats_s {
#define AQ_HW_MEDIA_TYPE_TP 1U
#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+#define AQ_HW_TXD_MULTIPLE 8U
+#define AQ_HW_RXD_MULTIPLE 8U
+
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
struct aq_hw_s {
@@ -199,25 +204,30 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
- int (*hw_deinit)(struct aq_hw_s *self);
-
int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
};
struct aq_fw_ops {
int (*init)(struct aq_hw_s *self);
+ int (*deinit)(struct aq_hw_s *self);
+
int (*reset)(struct aq_hw_s *self);
+ int (*renegotiate)(struct aq_hw_s *self);
+
int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
- int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state);
+ int (*set_state)(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
int (*update_link_status)(struct aq_hw_s *self);
int (*update_stats)(struct aq_hw_s *self);
+
+ int (*set_flow_control)(struct aq_hw_s *self);
};
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 7a22d0257e04..26dc6782b475 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -89,8 +89,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */
- cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF);
- cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF);
+ cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
+ cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
/*rss rings */
cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
@@ -768,10 +768,14 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
- if (self->aq_nic_cfg.flow_control)
+ if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
+ if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+
if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
else
@@ -886,7 +890,7 @@ void aq_nic_deinit(struct aq_nic_s *self)
aq_vec_deinit(aq_vec);
if (self->power_state == AQ_HW_POWER_STATE_D0) {
- (void)self->aq_hw_ops->hw_deinit(self->aq_hw);
+ (void)self->aq_fw_ops->deinit(self->aq_hw);
} else {
(void)self->aq_hw_ops->hw_set_power(self->aq_hw,
self->power_state);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 8cc6abadc03b..97addfa6f895 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -19,29 +19,31 @@
#include "hw_atl_a0_internal.h"
#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
- .is_64_dma = true, \
- .msix_irqs = 4U, \
- .irq_mask = ~0U, \
- .vecs = HW_ATL_A0_RSS_MAX, \
- .tcs = HW_ATL_A0_TC_MAX, \
- .rxd_alignment = 1U, \
- .rxd_size = HW_ATL_A0_RXD_SIZE, \
- .rxds = 248U, \
- .txd_alignment = 1U, \
- .txd_size = HW_ATL_A0_TXD_SIZE, \
- .txds = 8U * 1024U, \
- .txhwb_alignment = 4096U, \
- .tx_rings = HW_ATL_A0_TX_RINGS, \
- .rx_rings = HW_ATL_A0_RX_RINGS, \
- .hw_features = NETIF_F_HW_CSUM | \
- NETIF_F_RXHASH | \
- NETIF_F_RXCSUM | \
- NETIF_F_SG | \
- NETIF_F_TSO, \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_A0_RSS_MAX, \
+ .tcs = HW_ATL_A0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_A0_RXD_SIZE, \
+ .rxds_max = HW_ATL_A0_MAX_RXD, \
+ .rxds_min = HW_ATL_A0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_A0_TXD_SIZE, \
+ .txds_max = HW_ATL_A0_MAX_TXD, \
+ .txds_min = HW_ATL_A0_MIN_RXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_A0_TX_RINGS, \
+ .rx_rings = HW_ATL_A0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_SG | \
+ NETIF_F_TSO, \
.hw_priv_flags = IFF_UNICAST_FLT, \
- .flow_control = true, \
- .mtu = HW_ATL_A0_MTU_JUMBO, \
- .mac_regs_count = 88, \
+ .flow_control = true, \
+ .mtu = HW_ATL_A0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
@@ -875,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_init = hw_atl_a0_hw_init,
- .hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_a0_hw_reset,
.hw_start = hw_atl_a0_hw_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 1d8855558d74..3c94cff57876 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,4 +88,12 @@
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
+#define HW_ATL_A0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_A0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_A0_MAX_RXD 8184U
+#define HW_ATL_A0_MAX_TXD 8184U
+
#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 3bdab972420b..1d44a386e7d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -20,30 +20,32 @@
#include "hw_atl_llh_internal.h"
#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
- .is_64_dma = true, \
- .msix_irqs = 4U, \
- .irq_mask = ~0U, \
- .vecs = HW_ATL_B0_RSS_MAX, \
- .tcs = HW_ATL_B0_TC_MAX, \
- .rxd_alignment = 1U, \
- .rxd_size = HW_ATL_B0_RXD_SIZE, \
- .rxds = 4U * 1024U, \
- .txd_alignment = 1U, \
- .txd_size = HW_ATL_B0_TXD_SIZE, \
- .txds = 8U * 1024U, \
- .txhwb_alignment = 4096U, \
- .tx_rings = HW_ATL_B0_TX_RINGS, \
- .rx_rings = HW_ATL_B0_RX_RINGS, \
- .hw_features = NETIF_F_HW_CSUM | \
- NETIF_F_RXCSUM | \
- NETIF_F_RXHASH | \
- NETIF_F_SG | \
- NETIF_F_TSO | \
- NETIF_F_LRO, \
- .hw_priv_flags = IFF_UNICAST_FLT, \
- .flow_control = true, \
- .mtu = HW_ATL_B0_MTU_JUMBO, \
- .mac_regs_count = 88, \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_B0_RSS_MAX, \
+ .tcs = HW_ATL_B0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_B0_RXD_SIZE, \
+ .rxds_max = HW_ATL_B0_MAX_RXD, \
+ .rxds_min = HW_ATL_B0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_B0_TXD_SIZE, \
+ .txds_max = HW_ATL_B0_MAX_TXD, \
+ .txds_min = HW_ATL_B0_MIN_TXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_B0_TX_RINGS, \
+ .rx_rings = HW_ATL_B0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_LRO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_B0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
@@ -933,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
- .hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_b0_hw_reset,
.hw_start = hw_atl_b0_hw_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 405d1455c222..28568f5fa74b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,6 +142,14 @@
#define HW_ATL_INTR_MODER_MAX 0x1FF
#define HW_ATL_INTR_MODER_MIN 0xFF
+#define HW_ATL_B0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_B0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_B0_MAX_RXD 8184U
+#define HW_ATL_B0_MAX_TXD 8184U
+
/* HW layer capabilities */
#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index e652d86b87d4..c965e65d07db 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -30,10 +30,11 @@
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_MPI_STATE_ADR 0x036CU
-#define HW_ATL_MPI_STATE_MSK 0x00FFU
-#define HW_ATL_MPI_STATE_SHIFT 0U
-#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U
-#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_STATE_MSK 0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT 0U
+#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U
+#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704
#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388
@@ -525,19 +526,20 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
{
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
- val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT);
+ val = val & ~HW_ATL_MPI_SPEED_MSK;
+ val |= speed << HW_ATL_MPI_SPEED_SHIFT;
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
return 0;
}
-void hw_atl_utils_mpi_set(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state,
- u32 speed)
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
{
int err = 0;
u32 transaction_id = 0;
struct hw_aq_atl_utils_mbox_header mbox;
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
if (state == MPI_RESET) {
hw_atl_utils_mpi_read_mbox(self, &mbox);
@@ -551,21 +553,21 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
}
+ /* On interface DEINIT we disable DW (raise bit)
+ * Otherwise enable DW (clear bit)
+ */
+ if (state == MPI_DEINIT || state == MPI_POWER)
+ val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
+ else
+ val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
- aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR,
- (speed << HW_ATL_MPI_SPEED_SHIFT) | state);
-
-err_exit:;
-}
-
-static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state)
-{
- u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+ /* Set new state bits */
+ val = val & ~HW_ATL_MPI_STATE_MSK;
+ val |= state & HW_ATL_MPI_STATE_MSK;
- val = state | (val & HW_ATL_MPI_SPEED_MSK);
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
- return 0;
+err_exit:
+ return err;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
@@ -721,16 +723,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
*p = chip_features;
}
-int hw_atl_utils_hw_deinit(struct aq_hw_s *self)
+static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
{
- hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U);
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
return 0;
}
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
unsigned int power_state)
{
- hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U);
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_POWER);
return 0;
}
@@ -823,10 +827,12 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
const struct aq_fw_ops aq_fw_1x_ops = {
.init = hw_atl_utils_mpi_create,
+ .deinit = hw_atl_fw1x_deinit,
.reset = NULL,
.get_mac_permanent = hw_atl_utils_get_mac_permanent,
.set_link_speed = hw_atl_utils_mpi_set_speed,
.set_state = hw_atl_utils_mpi_set_state,
.update_link_status = hw_atl_utils_mpi_get_link_status,
.update_stats = hw_atl_utils_update_stats,
+ .set_flow_control = NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index cd8f18f39c61..b875590efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -239,6 +239,41 @@ enum hw_atl_fw2x_caps_hi {
CAPS_HI_TRANSACTION_ID,
};
+enum hw_atl_fw2x_ctrl {
+ CTRL_RESERVED1 = 0x00,
+ CTRL_RESERVED2,
+ CTRL_RESERVED3,
+ CTRL_PAUSE,
+ CTRL_ASYMMETRIC_PAUSE,
+ CTRL_RESERVED4,
+ CTRL_RESERVED5,
+ CTRL_RESERVED6,
+ CTRL_1GBASET_FD_EEE,
+ CTRL_2P5GBASET_FD_EEE,
+ CTRL_5GBASET_FD_EEE,
+ CTRL_10GBASET_FD_EEE,
+ CTRL_THERMAL_SHUTDOWN,
+ CTRL_PHY_LOGS,
+ CTRL_EEE_AUTO_DISABLE,
+ CTRL_PFC,
+ CTRL_WAKE_ON_LINK,
+ CTRL_CABLE_DIAG,
+ CTRL_TEMPERATURE,
+ CTRL_DOWNSHIFT,
+ CTRL_PTP_AVB,
+ CTRL_RESERVED7,
+ CTRL_LINK_DROP,
+ CTRL_SLEEP_PROXY,
+ CTRL_WOL,
+ CTRL_MAC_STOP,
+ CTRL_EXT_LOOPBACK,
+ CTRL_INT_LOOPBACK,
+ CTRL_RESERVED8,
+ CTRL_WOL_TIMER,
+ CTRL_STATISTICS,
+ CTRL_FORCE_RECONNECT,
+};
+
struct aq_hw_s;
struct aq_fw_ops;
struct aq_hw_caps_s;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 39cd3a27fe77..e37943760a58 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -28,6 +28,10 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed);
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+
static int aq_fw2x_init(struct aq_hw_s *self)
{
int err = 0;
@@ -39,6 +43,16 @@ static int aq_fw2x_init(struct aq_hw_s *self)
return err;
}
+static int aq_fw2x_deinit(struct aq_hw_s *self)
+{
+ int err = aq_fw2x_set_link_speed(self, 0);
+
+ if (!err)
+ err = aq_fw2x_set_state(self, MPI_DEINIT);
+
+ return err;
+}
+
static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
{
enum hw_atl_fw2x_rate rate = 0;
@@ -73,10 +87,38 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
return 0;
}
+static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
+{
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ *mpi_state |= BIT(CAPS_HI_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_PAUSE);
+
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+ *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+}
+
static int aq_fw2x_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
- /* No explicit state in 2x fw */
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ switch (state) {
+ case MPI_INIT:
+ mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ break;
+ case MPI_DEINIT:
+ mpi_state |= BIT(CAPS_HI_LINK_DROP);
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
return 0;
}
@@ -173,12 +215,37 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
return hw_atl_utils_update_stats(self);
}
+static int aq_fw2x_renegotiate(struct aq_hw_s *self)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ mpi_opts |= BIT(CTRL_FORCE_RECONNECT);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return 0;
+}
+
+static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
+ return 0;
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
+ .deinit = aq_fw2x_deinit,
.reset = NULL,
+ .renegotiate = aq_fw2x_renegotiate,
.get_mac_permanent = aq_fw2x_get_mac_permanent,
.set_link_speed = aq_fw2x_set_link_speed,
.set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
.update_stats = aq_fw2x_update_stats,
+ .set_flow_control = aq_fw2x_set_flow_control,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index a445de6837a6..94efc6477bdc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -12,8 +12,8 @@
#define NIC_MAJOR_DRIVER_VERSION 2
#define NIC_MINOR_DRIVER_VERSION 0
-#define NIC_BUILD_DRIVER_VERSION 2
-#define NIC_REVISION_DRIVER_VERSION 1
+#define NIC_BUILD_DRIVER_VERSION 3
+#define NIC_REVISION_DRIVER_VERSION 0
#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 5e5022fa1d04..6d3221134927 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1279,7 +1279,6 @@ static void alx_check_link(struct alx_priv *alx)
struct alx_hw *hw = &alx->hw;
unsigned long flags;
int old_speed;
- u8 old_duplex;
int err;
/* clear PHY internal interrupt status, otherwise the main
@@ -1288,7 +1287,6 @@ static void alx_check_link(struct alx_priv *alx)
alx_clear_phy_intr(hw);
old_speed = hw->link_speed;
- old_duplex = hw->duplex;
err = alx_read_phy_link(hw);
if (err < 0)
goto reset;
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
index 8ba7f8ff3434..392f564d8fd4 100644
--- a/drivers/net/ethernet/aurora/Kconfig
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -1,5 +1,6 @@
config NET_VENDOR_AURORA
bool "Aurora VLSI devices"
+ default y
help
If you have a network (Ethernet) device belonging to this class,
say Y.
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index e94159507847..c8d1f8fa4713 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -304,12 +304,10 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
again:
do {
- struct nb8800_rx_buf *rxb;
unsigned int len;
next = (last + 1) % RX_DESC_COUNT;
- rxb = &priv->rx_bufs[next];
rxd = &priv->rx_descs[next];
if (!rxd->report)
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 4c3bfde6e8de..c1d3ee9baf7e 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -61,7 +61,7 @@ config BCM63XX_ENET
config BCMGENET
tristate "Broadcom GENET internal MAC support"
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
select MII
select PHYLIB
select FIXED_PHY
@@ -181,7 +181,7 @@ config BGMAC_PLATFORM
config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
- depends on OF
+ depends on HAS_IOMEM
depends on NET_DSA || !NET_DSA
select MII
select PHYLIB
@@ -230,4 +230,12 @@ config BNXT_DCB
If unsure, say N.
+config BNXT_HWMON
+ bool "Broadcom NetXtreme-C/E HWMON support"
+ default y
+ depends on BNXT && HWMON && !(BNXT=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
+ devices, via the hwmon sysfs interface.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a1f60f89e059..147045757b10 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -521,7 +521,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
struct bcm_sysport_priv *priv = netdev_priv(dev);
u32 reg;
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+ wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
if (!(priv->wolopts & WAKE_MAGICSECURE))
@@ -539,7 +539,7 @@ static int bcm_sysport_set_wol(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
- u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+ u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
if (!device_can_wakeup(kdev))
return -ENOTSUPP;
@@ -1041,17 +1041,45 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
+{
+ u32 reg, bit;
+
+ reg = umac_readl(priv, UMAC_MPD_CTRL);
+ if (enable)
+ reg |= MPD_EN;
+ else
+ reg &= ~MPD_EN;
+ umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+ if (priv->is_lite)
+ bit = RBUF_ACPI_EN_LITE;
+ else
+ bit = RBUF_ACPI_EN;
+
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ if (enable)
+ reg |= bit;
+ else
+ reg &= ~bit;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+}
+
static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
u32 reg;
/* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD);
+ intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
+
+ /* Disable RXCHK, active filters and Broadcom tag matching */
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
+ RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
/* Clear the MagicPacket detection logic */
- reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
- umac_writel(priv, reg, UMAC_MPD_CTRL);
+ mpd_enable_set(priv, false);
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -1077,6 +1105,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *txr;
unsigned int ring, ring_bit;
+ u32 reg;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1102,9 +1131,14 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD) {
- netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
- bcm_sysport_resume_from_wol(priv);
+ if (priv->irq0_stat & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
}
if (!priv->is_lite)
@@ -2090,6 +2124,132 @@ static int bcm_sysport_stop(struct net_device *dev)
return 0;
}
+static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
+ u64 location)
+{
+ unsigned int index;
+ u32 reg;
+
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
+ reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
+ reg &= RXCHK_BRCM_TAG_CID_MASK;
+ if (reg == location)
+ return index;
+ }
+
+ return -EINVAL;
+}
+
+static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ int index;
+
+ /* This is not a rule that we know about */
+ index = bcm_sysport_rule_find(priv, nfc->fs.location);
+ if (index < 0)
+ return -EOPNOTSUPP;
+
+ nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
+
+ return 0;
+}
+
+static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ unsigned int index;
+ u32 reg;
+
+ /* We cannot match locations greater than what the classification ID
+ * permits (256 entries)
+ */
+ if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
+ return -E2BIG;
+
+ /* We cannot support flows that are not destined for a wake-up */
+ if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
+ return -EOPNOTSUPP;
+
+ /* All filters are already in use, we cannot match more rules */
+ if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
+ RXCHK_BRCM_TAG_MAX)
+ return -ENOSPC;
+
+ index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
+ if (index > RXCHK_BRCM_TAG_MAX)
+ return -ENOSPC;
+
+ /* Location is the classification ID, and index is the position
+ * within one of our 8 possible filters to be programmed
+ */
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
+ reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
+ reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
+ rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
+ rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+
+ set_bit(index, priv->filters);
+
+ return 0;
+}
+
+static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
+ u64 location)
+{
+ int index;
+
+ /* This is not a rule that we know about */
+ index = bcm_sysport_rule_find(priv, location);
+ if (index < 0)
+ return -EOPNOTSUPP;
+
+ /* No need to disable this filter if it was enabled, this will
+ * be taken care of during suspend time by bcm_sysport_suspend_to_wol
+ */
+ clear_bit(index, priv->filters);
+
+ return 0;
+}
+
+static int bcm_sysport_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXCLSRULE:
+ ret = bcm_sysport_rule_get(priv, nfc);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int bcm_sysport_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = bcm_sysport_rule_set(priv, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = bcm_sysport_rule_del(priv, nfc->fs.location);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
@@ -2104,10 +2264,12 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.set_coalesce = bcm_sysport_set_coalesce,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_rxnfc = bcm_sysport_get_rxnfc,
+ .set_rxnfc = bcm_sysport_set_rxnfc,
};
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2116,7 +2278,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@@ -2124,7 +2286,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
return tx_ring->index;
}
@@ -2423,21 +2585,43 @@ static int bcm_sysport_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
{
struct net_device *ndev = priv->netdev;
unsigned int timeout = 1000;
+ unsigned int index, i = 0;
u32 reg;
/* Password has already been programmed */
reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg |= MPD_EN;
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
+ reg |= MPD_EN;
reg &= ~PSW_EN;
if (priv->wolopts & WAKE_MAGICSECURE)
reg |= PSW_EN;
umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (priv->wolopts & WAKE_FILTER) {
+ /* Turn on ACPI matching to steal packets from RBUF */
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ if (priv->is_lite)
+ reg |= RBUF_ACPI_EN_LITE;
+ else
+ reg |= RBUF_ACPI_EN;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+
+ /* Enable RXCHK, active filters and Broadcom tag matching */
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
+ RXCHK_BRCM_TAG_MATCH_SHIFT);
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
+ i++;
+ }
+ reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
+ }
+
/* Make sure RBUF entered WoL mode as result */
do {
reg = rbuf_readl(priv, RBUF_STATUS);
@@ -2449,9 +2633,7 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* Do not leave the UniMAC RBUF matching only MPD packets */
if (!timeout) {
- reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
- umac_writel(priv, reg, UMAC_MPD_CTRL);
+ mpd_enable_set(priv, false);
netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
return -ETIMEDOUT;
}
@@ -2460,14 +2642,14 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
umac_enable_set(priv, CMD_RX_EN, 1);
/* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD);
+ intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
}
-static int bcm_sysport_suspend(struct device *d)
+static int __maybe_unused bcm_sysport_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2529,7 +2711,7 @@ static int bcm_sysport_suspend(struct device *d)
return ret;
}
-static int bcm_sysport_resume(struct device *d)
+static int __maybe_unused bcm_sysport_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2622,7 +2804,6 @@ out_free_tx_rings:
bcm_sysport_fini_tx_ring(priv, i);
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
bcm_sysport_suspend, bcm_sysport_resume);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index cf440b91fd04..046c6c1d97fd 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -11,6 +11,7 @@
#ifndef __BCM_SYSPORT_H
#define __BCM_SYSPORT_H
+#include <linux/bitmap.h>
#include <linux/if_vlan.h>
#include <linux/net_dim.h>
@@ -155,14 +156,18 @@ struct bcm_rsb {
#define RXCHK_PARSE_AUTH (1 << 22)
#define RXCHK_BRCM_TAG0 0x04
-#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG(i) ((i) * 0x4 + RXCHK_BRCM_TAG0)
#define RXCHK_BRCM_TAG0_MASK 0x24
-#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MASK(i) ((i) * 0x4 + RXCHK_BRCM_TAG0_MASK)
#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
#define RXCHK_ETHERTYPE 0x48
#define RXCHK_BAD_CSUM_CNTR 0x4C
#define RXCHK_OTHER_DISC_CNTR 0x50
+#define RXCHK_BRCM_TAG_MAX 8
+#define RXCHK_BRCM_TAG_CID_SHIFT 16
+#define RXCHK_BRCM_TAG_CID_MASK 0xff
+
/* TXCHCK offsets and defines */
#define SYS_PORT_TXCHK_OFFSET 0x380
#define TXCHK_PKT_RDY_THRESH 0x00
@@ -185,6 +190,7 @@ struct bcm_rsb {
#define RBUF_RSB_SWAP0 (1 << 22)
#define RBUF_RSB_SWAP1 (1 << 23)
#define RBUF_ACPI_EN (1 << 23)
+#define RBUF_ACPI_EN_LITE (1 << 24)
#define RBUF_PKT_RDY_THRESH 0x04
@@ -777,6 +783,7 @@ struct bcm_sysport_priv {
/* Ethtool */
u32 msg_enable;
+ DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);
struct bcm_sysport_stats64 stats64;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e6ea8e61f96d..4c94d9218bba 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -236,7 +236,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
struct device *dma_dev = bgmac->dma_dev;
int empty_slot;
- bool freed = false;
unsigned bytes_compl = 0, pkts_compl = 0;
/* The last slot that hardware didn't consume yet */
@@ -279,7 +278,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
slot->dma_addr = 0;
ring->start++;
- freed = true;
}
if (!pkts_compl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index af7b5a4d8ba0..5a727d4729da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1910,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1932,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return fallback(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a8ce5c55bbb0..0e508e5defce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 22243c480a05..98d4c5a3ff21 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params,
*/
if (!vars->link_up)
break;
+ /* else: fall through */
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
@@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall through */
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall through */
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = SPEED_100;
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 57348f2b49a3..71362b7f6040 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
bp->num_queues,
1 + bp->num_cnic_queues);
- /* falling through... */
+ /* fall through */
case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
- /* falling through... */
+ /* fall through */
case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8baf9d3eb4b1..3f4d2c8da21a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall through */
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
@@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall through */
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index dc77bfded865..62da46537734 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1827,6 +1827,7 @@ get_vf:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_rss_update_eqe(bp, vf);
+ /* fall through */
case EVENT_RING_OPCODE_VF_FLR:
/* Do nothing for now */
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4394c1162be4..8bb1e38b1681 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -51,6 +51,8 @@
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -1115,7 +1117,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info->hash_type = PKT_HASH_TYPE_L4;
tpa_info->gso_type = SKB_GSO_TCPV4;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type == 3)
+ if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
@@ -1727,8 +1729,8 @@ static int bnxt_async_event_process(struct bnxt *bp,
speed);
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
- /* fall thru */
}
+ /* fall through */
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
@@ -3012,13 +3014,6 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
- if (bp->hwrm_dbg_resp_addr) {
- dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
- bp->hwrm_dbg_resp_addr,
- bp->hwrm_dbg_resp_dma_addr);
-
- bp->hwrm_dbg_resp_addr = NULL;
- }
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -3030,12 +3025,6 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
GFP_KERNEL);
if (!bp->hwrm_cmd_resp_addr)
return -ENOMEM;
- bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
- HWRM_DBG_REG_BUF_SIZE,
- &bp->hwrm_dbg_resp_dma_addr,
- GFP_KERNEL);
- if (!bp->hwrm_dbg_resp_addr)
- netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
return 0;
}
@@ -3458,7 +3447,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
memcpy(short_cmd_req, req, msg_len);
@@ -3651,7 +3640,9 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
+ struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -3689,7 +3680,15 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ else if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
}
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
@@ -3994,6 +3993,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
max_rings = bp->rx_nr_rings - 1;
@@ -4591,7 +4591,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
}
hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
u16 cp, stats;
hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
@@ -4637,7 +4637,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->fid = cpu_to_le16(0xffff);
enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
req->num_tx_rings = cpu_to_le16(tx_rings);
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
@@ -4710,7 +4710,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
struct hwrm_func_vf_cfg_input req = {0};
int rc;
- if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+ if (!BNXT_NEW_RM(bp)) {
bp->hw_resc.resv_tx_rings = tx_rings;
return 0;
}
@@ -4770,7 +4770,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
return true;
@@ -4806,7 +4806,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
tx = hw_resc->resv_tx_rings;
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
rx = hw_resc->resv_rx_rings;
cp = hw_resc->resv_cp_rings;
grp = hw_resc->resv_hw_ring_grps;
@@ -4850,7 +4850,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
u32 flags;
int rc;
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return 0;
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
@@ -4879,7 +4879,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
@@ -5101,9 +5101,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
- bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
- bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
+ bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
@@ -5175,7 +5175,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
pf->vf_resv_strategy =
le16_to_cpu(resp->vf_reservation_strategy);
- if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+ if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
}
hwrm_func_resc_qcaps_exit:
@@ -5261,7 +5261,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code >= 0x10803) {
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (!rc)
- bp->flags |= BNXT_FLAG_NEW_RM;
+ bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
}
return 0;
}
@@ -5281,7 +5281,8 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
int rc = 0;
struct hwrm_queue_qportcfg_input req = {0};
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
- u8 i, *qptr;
+ u8 i, j, *qptr;
+ bool no_rdma;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
@@ -5299,19 +5300,24 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
+ no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
+ qptr = &resp->queue_id0;
+ for (i = 0, j = 0; i < bp->max_tc; i++) {
+ bp->q_info[j].queue_id = *qptr++;
+ bp->q_info[j].queue_profile = *qptr++;
+ bp->tc_to_qidx[j] = j;
+ if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
+ (no_rdma && BNXT_PF(bp)))
+ j++;
+ }
+ bp->max_tc = max_t(u8, j, 1);
+
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
bp->max_tc = 1;
if (bp->max_lltc > bp->max_tc)
bp->max_lltc = bp->max_tc;
- qptr = &resp->queue_id0;
- for (i = 0; i < bp->max_tc; i++) {
- bp->q_info[i].queue_id = *qptr++;
- bp->q_info[i].queue_profile = *qptr++;
- bp->tc_to_qidx[i] = i;
- }
-
qportcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -5364,7 +5370,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
- bp->flags |= BNXT_FLAG_SHORT_CMD;
+ bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -5933,7 +5939,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
max_idx = min_t(int, bp->total_irqs, max_cp);
avail_msix = max_idx - bp->cp_nr_rings;
- if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num)
+ if (!BNXT_NEW_RM(bp) || avail_msix >= num)
return avail_msix;
if (max_irq < total_req) {
@@ -5946,7 +5952,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
static int bnxt_get_num_msix(struct bnxt *bp)
{
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return bnxt_get_max_func_irqs(bp);
return bnxt_cp_rings_in_use(bp);
@@ -6069,8 +6075,7 @@ int bnxt_reserve_rings(struct bnxt *bp)
netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
return rc;
}
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
- (bnxt_get_num_msix(bp) != bp->total_irqs)) {
+ if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
@@ -6350,6 +6355,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
+ if (bp->test_info)
+ bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
+ }
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -6646,6 +6655,39 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+{
+ struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_drv_if_change_input req = {0};
+ bool resc_reinit = false;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
+ if (up)
+ req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc && (resp->flags &
+ cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
+ resc_reinit = true;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (up && resc_reinit && BNXT_NEW_RM(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ }
+ return rc;
+}
+
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
{
struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
@@ -6755,6 +6797,62 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
} while (handle && handle != 0xffff);
}
+#ifdef CONFIG_BNXT_HWMON
+static ssize_t bnxt_show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct hwrm_temp_monitor_query_input req = {0};
+ struct hwrm_temp_monitor_query_output *resp;
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u32 temp = 0;
+
+ resp = bp->hwrm_cmd_resp_addr;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ temp = resp->temp * 1000; /* display millidegree */
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return sprintf(buf, "%u\n", temp);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
+
+static struct attribute *bnxt_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bnxt);
+
+static void bnxt_hwmon_close(struct bnxt *bp)
+{
+ if (bp->hwmon_dev) {
+ hwmon_device_unregister(bp->hwmon_dev);
+ bp->hwmon_dev = NULL;
+ }
+}
+
+static void bnxt_hwmon_open(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+ DRV_MODULE_NAME, bp,
+ bnxt_groups);
+ if (IS_ERR(bp->hwmon_dev)) {
+ bp->hwmon_dev = NULL;
+ dev_warn(&pdev->dev, "Cannot register hwmon device\n");
+ }
+}
+#else
+static void bnxt_hwmon_close(struct bnxt *bp)
+{
+}
+
+static void bnxt_hwmon_open(struct bnxt *bp)
+{
+}
+#endif
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -6907,8 +7005,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
mutex_unlock(&bp->link_lock);
- if (rc)
+ if (rc) {
netdev_warn(bp->dev, "failed to update phy settings\n");
+ if (BNXT_SINGLE_PF(bp)) {
+ bp->link_info.phy_retry = true;
+ bp->link_info.phy_retry_expires =
+ jiffies + 5 * HZ;
+ }
+ }
}
if (irq_re_init)
@@ -6994,8 +7098,16 @@ void bnxt_half_close_nic(struct bnxt *bp)
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ bnxt_hwrm_if_change(bp, true);
+ rc = __bnxt_open_nic(bp, true, true);
+ if (rc)
+ bnxt_hwrm_if_change(bp, false);
+
+ bnxt_hwmon_open(bp);
- return __bnxt_open_nic(bp, true, true);
+ return rc;
}
static bool bnxt_drv_busy(struct bnxt *bp)
@@ -7057,8 +7169,10 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
+ bnxt_hwrm_if_change(bp, false);
return 0;
}
@@ -7308,7 +7422,7 @@ skip_uc:
static bool bnxt_can_reserve_rings(struct bnxt *bp)
{
#ifdef CONFIG_BNXT_SRIOV
- if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
+ if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
/* No minimum rings were provisioned by the PF. Don't
@@ -7358,7 +7472,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
return false;
}
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return true;
if (vnics == bp->hw_resc.resv_vnics)
@@ -7592,6 +7706,16 @@ static void bnxt_timer(struct timer_list *t)
set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
+
+ if (bp->link_info.phy_retry) {
+ if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
+ bp->link_info.phy_retry = 0;
+ netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
+ } else {
+ set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+ }
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -7679,6 +7803,19 @@ static void bnxt_sp_task(struct work_struct *work)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
}
+ if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
+ int rc;
+
+ mutex_lock(&bp->link_lock);
+ rc = bnxt_update_phy_setting(bp);
+ mutex_unlock(&bp->link_lock);
+ if (rc) {
+ netdev_warn(bp->dev, "update phy settings retry failed\n");
+ } else {
+ bp->link_info.phy_retry = false;
+ netdev_info(bp->dev, "update phy settings retry succeeded\n");
+ }
+ }
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
mutex_lock(&bp->link_lock);
bnxt_get_port_module_status(bp);
@@ -7731,7 +7868,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
cp += bnxt_get_ulp_msix_num(bp);
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
vnics);
@@ -7991,7 +8128,7 @@ static int bnxt_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
- bp, bp);
+ bp, bp, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
return 0;
@@ -8740,7 +8877,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
if (rc)
goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 91575ef97c8c..fefa011320e0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.9.1"
+#define DRV_MODULE_VERSION "1.9.2"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 9
-#define DRV_VER_UPD 1
+#define DRV_VER_UPD 2
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -326,6 +326,10 @@ struct rx_tpa_start_cmp_ext {
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
+#define TPA_START_IS_IPV6(rx_tpa_start) \
+ (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -862,6 +866,7 @@ struct bnxt_pf_info {
u8 vf_resv_strategy;
#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0
#define BNXT_VF_RESV_STRATEGY_MINIMAL 1
+#define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC 2
void *hwrm_cmd_req_addr[4];
dma_addr_t hwrm_cmd_req_dma_addr[4];
struct bnxt_vf_info *vf;
@@ -959,6 +964,9 @@ struct bnxt_link_info {
u16 advertising; /* user adv setting */
bool force_link_chng;
+ bool phy_retry;
+ unsigned long phy_retry_expires;
+
/* a copy of phy_qcfg output used to report link
* info to VF
*/
@@ -990,6 +998,8 @@ struct bnxt_led_info {
struct bnxt_test_info {
u8 offline_mask;
+ u8 flags;
+#define BNXT_TEST_FL_EXT_LPBK 0x1
u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
@@ -1134,7 +1144,6 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
- #define BNXT_FLAG_DCB_ENABLED 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1163,15 +1172,11 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
- #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
- #define BNXT_FLAG_SHORT_CMD 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
- #define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
- #define BNXT_FLAG_NEW_RM 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1276,10 +1281,19 @@ struct bnxt {
struct ieee_ets *ieee_ets;
u8 dcbx_cap;
u8 default_pri;
+ u8 max_dscp_value;
#endif /* CONFIG_BNXT_DCB */
u32 msg_enable;
+ u32 fw_cap;
+ #define BNXT_FW_CAP_SHORT_CMD 0x00000001
+ #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
+ #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
+ #define BNXT_FW_CAP_NEW_RM 0x00000008
+ #define BNXT_FW_CAP_IF_CHANGE 0x00000010
+
+#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
@@ -1287,9 +1301,6 @@ struct bnxt {
dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
- void *hwrm_dbg_resp_addr;
- dma_addr_t hwrm_dbg_resp_dma_addr;
-#define HWRM_DBG_REG_BUF_SIZE 128
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
@@ -1345,6 +1356,7 @@ struct bnxt {
#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
+#define BNXT_UPDATE_PHY_SP_EVENT 16
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
@@ -1400,6 +1412,7 @@ struct bnxt {
struct bnxt_tc_info *tc_info;
struct dentry *debugfs_pdev;
struct dentry *debugfs_dim;
+ struct device *hwmon_dev;
};
#define BNXT_RX_STATS_OFFSET(counter) \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
new file mode 100644
index 000000000000..09c22f8fe399
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -0,0 +1,66 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2018 Broadcom Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_COREDUMP_H
+#define BNXT_COREDUMP_H
+
+struct bnxt_coredump_segment_hdr {
+ __u8 signature[4];
+ __le32 component_id;
+ __le32 segment_id;
+ __le32 flags;
+ __u8 low_version;
+ __u8 high_version;
+ __le16 function_id;
+ __le32 offset;
+ __le32 length;
+ __le32 status;
+ __le32 duration;
+ __le32 data_offset;
+ __le32 instance;
+ __le32 rsvd[5];
+};
+
+struct bnxt_coredump_record {
+ __u8 signature[4];
+ __le32 flags;
+ __u8 low_version;
+ __u8 high_version;
+ __u8 asic_state;
+ __u8 rsvd0[5];
+ char system_name[32];
+ __le16 year;
+ __le16 month;
+ __le16 day;
+ __le16 hour;
+ __le16 minute;
+ __le16 second;
+ __le16 utc_bias;
+ __le16 rsvd1;
+ char commandline[256];
+ __le32 total_segments;
+ __le32 os_ver_major;
+ __le32 os_ver_minor;
+ __le32 rsvd2;
+ char os_name[32];
+ __le16 end_year;
+ __le16 end_month;
+ __le16 end_day;
+ __le16 end_hour;
+ __le16 end_minute;
+ __le16 end_second;
+ __le16 end_utc_bias;
+ __le32 asic_id1;
+ __le32 asic_id2;
+ __le32 coredump_status;
+ __u8 ioctl_low_version;
+ __u8 ioctl_high_version;
+ __le16 rsvd3[313];
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index d5bc72cecde3..ddc98c359488 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -385,6 +385,61 @@ set_app_exit:
return rc;
}
+static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
+{
+ struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_dscp_qcaps_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
+ if (bp->max_dscp_value < 0x3f)
+ bp->max_dscp_value = 0;
+ }
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
+ bool add)
+{
+ struct hwrm_queue_dscp2pri_cfg_input req = {0};
+ struct bnxt_dscp2pri_entry *dscp2pri;
+ dma_addr_t mapping;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10800)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
+ dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri),
+ &mapping, GFP_KERNEL);
+ if (!dscp2pri)
+ return -ENOMEM;
+
+ req.src_data_addr = cpu_to_le64(mapping);
+ dscp2pri->dscp = app->protocol;
+ if (add)
+ dscp2pri->mask = 0x3f;
+ else
+ dscp2pri->mask = 0;
+ dscp2pri->pri = app->priority;
+ req.entry_cnt = cpu_to_le16(1);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
+ mapping);
+ return rc;
+}
+
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
@@ -551,15 +606,30 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
return rc;
}
+static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app)
+{
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) {
+ if (!bp->max_dscp_value)
+ return -ENOTSUPP;
+ if (app->protocol > bp->max_dscp_value)
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = -EINVAL;
+ int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
+ rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
+ if (rc)
+ return rc;
+
rc = dcb_ieee_setapp(dev, app);
if (rc)
return rc;
@@ -570,6 +640,9 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true);
+
return rc;
}
@@ -582,6 +655,10 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
+ rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
+ if (rc)
+ return rc;
+
rc = dcb_ieee_delapp(dev, app);
if (rc)
return rc;
@@ -591,6 +668,9 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false);
+
return rc;
}
@@ -610,7 +690,7 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 1;
if (mode & DCB_CAP_DCBX_HOST) {
- if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+ if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
return 1;
/* only support IEEE */
@@ -642,10 +722,11 @@ void bnxt_dcb_init(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10501)
return;
+ bnxt_hwrm_queue_dscp_qcaps(bp);
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
- if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+ if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
- else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT)
+ else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT)
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
bp->dev->dcbnl_ops = &dcbnl_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 69efde785f23..6eed231de565 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -33,10 +33,20 @@ struct bnxt_cos2bw_cfg {
u8 unused;
};
+struct bnxt_dscp2pri_entry {
+ u8 dscp;
+ u8 mask;
+ u8 pri;
+};
+
#define BNXT_LLQ(q_profile) \
((q_profile) == \
QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE)
+#define BNXT_CNPQ(q_profile) \
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
void bnxt_dcb_init(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 402fa32f7a88..f3b9fbcc705b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,16 +21,99 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
};
+static const struct bnxt_dl_nvm_param nvm_params[] = {
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
+ BNXT_NVM_SHARED_CFG, 1},
+};
+
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ int msg_len, union devlink_param_value *val)
+{
+ struct hwrm_nvm_get_variable_input *req = msg;
+ void *data_addr = NULL, *buf = NULL;
+ struct bnxt_dl_nvm_param nvm_param;
+ int bytesize, idx = 0, rc, i;
+ dma_addr_t data_dma_addr;
+
+ /* Get/Set NVM CFG parameter is supported only on PFs */
+ if (BNXT_VF(bp))
+ return -EPERM;
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ if (nvm_params[i].id == param_id) {
+ nvm_param = nvm_params[i];
+ break;
+ }
+ }
+
+ if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ idx = bp->pf.port_id;
+ else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
+
+ bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
+ if (nvm_param.num_bits == 1)
+ buf = &val->vbool;
+
+ data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
+ &data_dma_addr, GFP_KERNEL);
+ if (!data_addr)
+ return -ENOMEM;
+
+ req->dest_data_addr = cpu_to_le64(data_dma_addr);
+ req->data_len = cpu_to_le16(nvm_param.num_bits);
+ req->option_num = cpu_to_le16(nvm_param.offset);
+ req->index_0 = cpu_to_le16(idx);
+ if (idx)
+ req->dimensions = cpu_to_le16(1);
+
+ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE))
+ memcpy(data_addr, buf, bytesize);
+
+ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
+ memcpy(buf, data_addr, bytesize);
+
+ dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+ if (rc)
+ return -EIO;
+ return 0;
+}
+
+static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct hwrm_nvm_get_variable_input req = {0};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
+ return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct hwrm_nvm_set_variable_input req = {0};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+ return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static const struct devlink_param bnxt_dl_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
+};
+
int bnxt_dl_register(struct bnxt *bp)
{
struct devlink *dl;
int rc;
- if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
- return 0;
-
- if (bp->hwrm_spec_code < 0x10803) {
- netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
+ if (bp->hwrm_spec_code < 0x10600) {
+ netdev_warn(bp->dev, "Firmware does not support NVM params");
return -ENOTSUPP;
}
@@ -41,16 +124,34 @@ int bnxt_dl_register(struct bnxt *bp)
}
bnxt_link_bp_to_dl(bp, dl);
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
+ /* Add switchdev eswitch mode setting, if SRIOV supported */
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
+ bp->hwrm_spec_code > 0x10803)
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
rc = devlink_register(dl, &bp->pdev->dev);
if (rc) {
- bnxt_link_bp_to_dl(bp, NULL);
- devlink_free(dl);
netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
- return rc;
+ goto err_dl_free;
+ }
+
+ rc = devlink_params_register(dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ if (rc) {
+ netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+ rc);
+ goto err_dl_unreg;
}
return 0;
+
+err_dl_unreg:
+ devlink_unregister(dl);
+err_dl_free:
+ bnxt_link_bp_to_dl(bp, NULL);
+ devlink_free(dl);
+ return rc;
}
void bnxt_dl_unregister(struct bnxt *bp)
@@ -60,6 +161,8 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
+ devlink_params_unregister(dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index e92a35d8b642..2f68dc048390 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -33,6 +33,21 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
}
}
+#define NVM_OFF_ENABLE_SRIOV 401
+
+enum bnxt_nvm_dir_type {
+ BNXT_NVM_SHARED_CFG = 40,
+ BNXT_NVM_PORT_CFG,
+ BNXT_NVM_FUNC_CFG,
+};
+
+struct bnxt_dl_nvm_param {
+ u16 id;
+ u16 offset;
+ u16 dir_type;
+ u16 num_bits;
+};
+
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7270c8b0cef3..e52d7af3ab3e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -16,12 +16,15 @@
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
+#include <linux/utsname.h>
+#include <linux/time.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_xdp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
+#include "bnxt_coredump.h"
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
@@ -112,6 +115,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
BNXT_MAX_STATS_COAL_TICKS);
stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
bp->stats_coal_ticks = stats_ticks;
+ if (bp->stats_coal_ticks)
+ bp->current_interval =
+ bp->stats_coal_ticks * HZ / 1000000;
+ else
+ bp->current_interval = BNXT_TIMER_INTERVAL;
update_stats = true;
}
@@ -162,7 +170,7 @@ static const struct {
BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
- BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
+ BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
@@ -205,9 +213,9 @@ static const struct {
BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
- BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
+ BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
- BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
+ BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
@@ -463,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_tx_sch_inputs;
/* Get the most up-to-date max_tx_sch_inputs. */
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
@@ -2392,7 +2400,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
+static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
{
struct hwrm_port_phy_cfg_input req = {0};
@@ -2400,7 +2408,10 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
if (enable) {
bnxt_disable_an_for_lpbk(bp, &req);
- req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+ if (ext)
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
+ else
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
} else {
req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
}
@@ -2533,15 +2544,17 @@ static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
return rc;
}
-#define BNXT_DRV_TESTS 3
+#define BNXT_DRV_TESTS 4
#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
-#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+#define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
u64 *buf)
{
struct bnxt *bp = netdev_priv(dev);
+ bool do_ext_lpbk = false;
bool offline = false;
u8 test_results = 0;
u8 test_mask = 0;
@@ -2555,6 +2568,10 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
return;
}
+ if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
+ (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK))
+ do_ext_lpbk = true;
+
if (etest->flags & ETH_TEST_FL_OFFLINE) {
if (bp->pf.active_vfs) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -2595,13 +2612,22 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
- bnxt_hwrm_phy_loopback(bp, true);
+ bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
if (bnxt_run_loopback(bp)) {
buf[BNXT_PHYLPBK_TEST_IDX] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
- bnxt_hwrm_phy_loopback(bp, false);
+ if (do_ext_lpbk) {
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ bnxt_hwrm_phy_loopback(bp, true, true);
+ msleep(1000);
+ if (bnxt_run_loopback(bp)) {
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+ bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
bnxt_open_nic(bp, false, true);
}
@@ -2662,6 +2688,331 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return rc;
}
+static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
+ struct bnxt_hwrm_dbg_dma_info *info)
+{
+ struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_dbg_cmn_input *cmn_req = msg;
+ __le16 *seq_ptr = msg + info->seq_off;
+ u16 seq = 0, len, segs_off;
+ void *resp = cmn_resp;
+ dma_addr_t dma_handle;
+ int rc, off = 0;
+ void *dma_buf;
+
+ dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
+ GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+
+ segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ total_segments);
+ cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
+ cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ while (1) {
+ *seq_ptr = cpu_to_le16(seq);
+ rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
+ if (!seq &&
+ cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ info->segs = le16_to_cpu(*((__le16 *)(resp +
+ segs_off)));
+ if (!info->segs) {
+ rc = -EIO;
+ break;
+ }
+
+ info->dest_buf_size = info->segs *
+ sizeof(struct coredump_segment_record);
+ info->dest_buf = kmalloc(info->dest_buf_size,
+ GFP_KERNEL);
+ if (!info->dest_buf) {
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ if (info->dest_buf)
+ memcpy(info->dest_buf + off, dma_buf, len);
+
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+ seq++;
+ off += len;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
+ struct bnxt_coredump *coredump)
+{
+ struct hwrm_dbg_coredump_list_input req = {0};
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
+
+ info.dma_len = COREDUMP_LIST_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ data_len);
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+ if (!rc) {
+ coredump->data = info.dest_buf;
+ coredump->data_size = info.dest_buf_size;
+ coredump->total_segs = info.segs;
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
+ u16 segment_id)
+{
+ struct hwrm_dbg_coredump_initiate_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
+ req.component_id = cpu_to_le16(component_id);
+ req.segment_id = cpu_to_le16(segment_id);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
+ u16 segment_id, u32 *seg_len,
+ void *buf, u32 offset)
+{
+ struct hwrm_dbg_coredump_retrieve_input req = {0};
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
+ req.component_id = cpu_to_le16(component_id);
+ req.segment_id = cpu_to_le16(segment_id);
+
+ info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
+ seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
+ data_len);
+ if (buf)
+ info.dest_buf = buf + offset;
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+ if (!rc)
+ *seg_len = info.dest_buf_size;
+
+ return rc;
+}
+
+static void
+bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec, u32 seg_len,
+ int status, u32 duration, u32 instance)
+{
+ memset(seg_hdr, 0, sizeof(*seg_hdr));
+ memcpy(seg_hdr->signature, "sEgM", 4);
+ if (seg_rec) {
+ seg_hdr->component_id = (__force __le32)seg_rec->component_id;
+ seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
+ seg_hdr->low_version = seg_rec->version_low;
+ seg_hdr->high_version = seg_rec->version_hi;
+ } else {
+ /* For hwrm_ver_get response Component id = 2
+ * and Segment id = 0
+ */
+ seg_hdr->component_id = cpu_to_le32(2);
+ seg_hdr->segment_id = 0;
+ }
+ seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
+ seg_hdr->length = cpu_to_le32(seg_len);
+ seg_hdr->status = cpu_to_le32(status);
+ seg_hdr->duration = cpu_to_le32(duration);
+ seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
+ seg_hdr->instance = cpu_to_le32(instance);
+}
+
+static void
+bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
+ time64_t start, s16 start_utc, u16 total_segs,
+ int status)
+{
+ time64_t end = ktime_get_real_seconds();
+ u32 os_ver_major = 0, os_ver_minor = 0;
+ struct tm tm;
+
+ time64_to_tm(start, 0, &tm);
+ memset(record, 0, sizeof(*record));
+ memcpy(record->signature, "cOrE", 4);
+ record->flags = 0;
+ record->low_version = 0;
+ record->high_version = 1;
+ record->asic_state = 0;
+ strlcpy(record->system_name, utsname()->nodename,
+ sizeof(record->system_name));
+ record->year = cpu_to_le16(tm.tm_year);
+ record->month = cpu_to_le16(tm.tm_mon);
+ record->day = cpu_to_le16(tm.tm_mday);
+ record->hour = cpu_to_le16(tm.tm_hour);
+ record->minute = cpu_to_le16(tm.tm_min);
+ record->second = cpu_to_le16(tm.tm_sec);
+ record->utc_bias = cpu_to_le16(start_utc);
+ strcpy(record->commandline, "ethtool -w");
+ record->total_segments = cpu_to_le32(total_segs);
+
+ sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
+ record->os_ver_major = cpu_to_le32(os_ver_major);
+ record->os_ver_minor = cpu_to_le32(os_ver_minor);
+
+ strlcpy(record->os_name, utsname()->sysname, 32);
+ time64_to_tm(end, 0, &tm);
+ record->end_year = cpu_to_le16(tm.tm_year + 1900);
+ record->end_month = cpu_to_le16(tm.tm_mon + 1);
+ record->end_day = cpu_to_le16(tm.tm_mday);
+ record->end_hour = cpu_to_le16(tm.tm_hour);
+ record->end_minute = cpu_to_le16(tm.tm_min);
+ record->end_second = cpu_to_le16(tm.tm_sec);
+ record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
+ record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
+ bp->ver_resp.chip_rev << 8 |
+ bp->ver_resp.chip_metal);
+ record->asic_id2 = 0;
+ record->coredump_status = cpu_to_le32(status);
+ record->ioctl_low_version = 0;
+ record->ioctl_high_version = 0;
+}
+
+static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+{
+ u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+ struct coredump_segment_record *seg_record = NULL;
+ u32 offset = 0, seg_hdr_len, seg_record_len;
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_coredump coredump = {NULL};
+ time64_t start_time;
+ u16 start_utc;
+ int rc = 0, i;
+
+ start_time = ktime_get_real_seconds();
+ start_utc = sys_tz.tz_minuteswest * 60;
+ seg_hdr_len = sizeof(seg_hdr);
+
+ /* First segment should be hwrm_ver_get response */
+ *dump_len = seg_hdr_len + ver_get_resp_len;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
+ 0, 0, 0);
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len;
+ memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
+ offset += ver_get_resp_len;
+ }
+
+ rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to get coredump segment list\n");
+ goto err;
+ }
+
+ *dump_len += seg_hdr_len * coredump.total_segs;
+
+ seg_record = (struct coredump_segment_record *)coredump.data;
+ seg_record_len = sizeof(*seg_record);
+
+ for (i = 0; i < coredump.total_segs; i++) {
+ u16 comp_id = le16_to_cpu(seg_record->component_id);
+ u16 seg_id = le16_to_cpu(seg_record->segment_id);
+ u32 duration = 0, seg_len = 0;
+ unsigned long start, end;
+
+ start = jiffies;
+
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ if (rc) {
+ netdev_err(bp->dev,
+ "Failed to initiate coredump for seg = %d\n",
+ seg_record->segment_id);
+ goto next_seg;
+ }
+
+ /* Write segment data into the buffer */
+ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
+ &seg_len, buf,
+ offset + seg_hdr_len);
+ if (rc)
+ netdev_err(bp->dev,
+ "Failed to retrieve coredump for seg = %d\n",
+ seg_record->segment_id);
+
+next_seg:
+ end = jiffies;
+ duration = jiffies_to_msecs(end - start);
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
+ rc, duration, 0);
+
+ if (buf) {
+ /* Write segment header into the buffer */
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len + seg_len;
+ }
+
+ *dump_len += seg_len;
+ seg_record =
+ (struct coredump_segment_record *)((u8 *)seg_record +
+ seg_record_len);
+ }
+
+err:
+ if (buf)
+ bnxt_fill_coredump_record(bp, buf + offset, start_time,
+ start_utc, coredump.total_segs + 1,
+ rc);
+ kfree(coredump.data);
+ *dump_len += sizeof(struct bnxt_coredump_record);
+
+ return rc;
+}
+
+static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->hwrm_spec_code < 0x10801)
+ return -EOPNOTSUPP;
+
+ dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
+ bp->ver_resp.hwrm_fw_min_8b << 16 |
+ bp->ver_resp.hwrm_fw_bld_8b << 8 |
+ bp->ver_resp.hwrm_fw_rsvd_8b;
+
+ return bnxt_get_coredump(bp, NULL, &dump->len);
+}
+
+static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
+ void *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->hwrm_spec_code < 0x10801)
+ return -EOPNOTSUPP;
+
+ memset(buf, 0, dump->len);
+
+ return bnxt_get_coredump(bp, buf, &dump->len);
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
@@ -2702,6 +3053,8 @@ void bnxt_ethtool_init(struct bnxt *bp)
strcpy(str, "Mac loopback test (offline)");
} else if (i == BNXT_PHYLPBK_TEST_IDX) {
strcpy(str, "Phy loopback test (offline)");
+ } else if (i == BNXT_EXTLPBK_TEST_IDX) {
+ strcpy(str, "Ext loopback test (offline)");
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
@@ -2763,4 +3116,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
.reset = bnxt_reset,
+ .get_dump_flag = bnxt_get_dump_flag,
+ .get_dump_data = bnxt_get_dump_data,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 836ef682f24c..b5b65b3f8534 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -22,6 +22,43 @@ struct bnxt_led_cfg {
u8 rsvd;
};
+#define COREDUMP_LIST_BUF_LEN 2048
+#define COREDUMP_RETRIEVE_BUF_LEN 4096
+
+struct bnxt_coredump {
+ void *data;
+ int data_size;
+ u16 total_segs;
+};
+
+struct bnxt_hwrm_dbg_dma_info {
+ void *dest_buf;
+ int dest_buf_size;
+ u16 dma_len;
+ u16 seq_off;
+ u16 data_len_off;
+ u16 segs;
+};
+
+struct hwrm_dbg_cmn_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+};
+
+struct hwrm_dbg_cmn_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define HWRM_DBG_CMN_FLAGS_MORE 1
+};
+
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 0fe0ea8dce6c..971ace5d0d4a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -96,6 +96,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
#define HWRM_FUNC_VF_CFG 0xfUL
#define HWRM_RESERVED1 0x10UL
@@ -159,6 +160,7 @@ struct cmd_nums {
#define HWRM_RING_FREE 0x51UL
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
#define HWRM_RING_RESET 0x5eUL
#define HWRM_RING_GRP_ALLOC 0x60UL
#define HWRM_RING_GRP_FREE 0x61UL
@@ -191,6 +193,8 @@ struct cmd_nums {
#define HWRM_PORT_QSTATS_EXT 0xb4UL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
#define HWRM_FW_SET_TIME 0xc8UL
#define HWRM_FW_GET_TIME 0xc9UL
#define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
@@ -269,6 +273,11 @@ struct cmd_nums {
#define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
#define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
#define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -284,6 +293,8 @@ struct cmd_nums {
#define HWRM_DBG_COREDUMP_LIST 0xff17UL
#define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
#define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -318,6 +329,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
#define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
#define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -344,9 +356,9 @@ struct hwrm_err_output {
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 9
-#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 15
-#define HWRM_VERSION_STR "1.9.1.15"
+#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_RSVD 25
+#define HWRM_VERSION_STR "1.9.2.25"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -526,6 +538,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
@@ -564,6 +577,8 @@ struct hwrm_async_event_cmpl_link_status_change {
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
};
/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
@@ -817,23 +832,26 @@ struct hwrm_func_qcaps_output {
__le16 fid;
__le16 port_id;
__le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -947,58 +965,26 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
#define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
u8 options;
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
__le16 alloc_stat_ctx;
- u8 unused_2[7];
- u8 valid;
-};
-
-/* hwrm_func_vlan_cfg_input (size:384b/48B) */
-struct hwrm_func_vlan_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[2];
- __le32 enables;
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
- __le16 stag_vid;
- u8 stag_pcp;
- u8 unused_1;
- __be16 stag_tpid;
- __le16 ctag_vid;
- u8 ctag_pcp;
- u8 unused_2;
- __be16 ctag_tpid;
- __le32 rsvd1;
- __le32 rsvd2;
- u8 unused_3[4];
-};
-
-/* hwrm_func_vlan_cfg_output (size:128b/16B) */
-struct hwrm_func_vlan_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
+ __le16 alloc_msix;
+ u8 unused_2[5];
u8 valid;
};
@@ -1010,7 +996,7 @@ struct hwrm_func_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le16 fid;
- u8 unused_0[2];
+ __le16 num_msix;
__le32 flags;
#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
@@ -1050,6 +1036,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
#define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
__le16 mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1109,13 +1097,19 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
#define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
u8 options;
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL
- #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
};
@@ -1212,30 +1206,6 @@ struct hwrm_func_vf_resc_free_output {
u8 valid;
};
-/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
-struct hwrm_func_vf_vnic_ids_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0[2];
- __le32 max_vnic_id_cnt;
- __le64 vnic_id_tbl_addr;
-};
-
-/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
-struct hwrm_func_vf_vnic_ids_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id_cnt;
- u8 unused_0[3];
- u8 valid;
-};
-
/* hwrm_func_drv_rgtr_input (size:896b/112B) */
struct hwrm_func_drv_rgtr_input {
__le16 req_type;
@@ -1286,7 +1256,9 @@ struct hwrm_func_drv_rgtr_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_0[7];
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
u8 valid;
};
@@ -1372,7 +1344,7 @@ struct hwrm_func_drv_qver_input {
u8 unused_0[2];
};
-/* hwrm_func_drv_qver_output (size:192b/24B) */
+/* hwrm_func_drv_qver_output (size:256b/32B) */
struct hwrm_func_drv_qver_output {
__le16 error_code;
__le16 req_type;
@@ -1394,12 +1366,13 @@ struct hwrm_func_drv_qver_output {
u8 ver_maj_8b;
u8 ver_min_8b;
u8 ver_upd_8b;
- u8 unused_0[2];
- u8 valid;
+ u8 unused_0[3];
__le16 ver_maj;
__le16 ver_min;
__le16 ver_upd;
__le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
};
/* hwrm_func_resource_qcaps_input (size:192b/24B) */
@@ -1493,6 +1466,410 @@ struct hwrm_func_vf_resource_cfg_output {
u8 valid;
};
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* hwrm_port_phy_cfg_input (size:448b/56B) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
@@ -1592,10 +1969,11 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
#define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
u8 force_pause;
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
@@ -1751,10 +2129,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
@@ -2014,6 +2393,131 @@ struct hwrm_port_mac_ptp_qcfg_output {
u8 valid;
};
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
__le16 req_type;
@@ -2039,6 +2543,83 @@ struct hwrm_port_qstats_output {
u8 valid;
};
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:2368b/296B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+};
+
/* hwrm_port_qstats_ext_input (size:320b/40B) */
struct hwrm_port_qstats_ext_input {
__le16 req_type;
@@ -2062,7 +2643,8 @@ struct hwrm_port_qstats_ext_output {
__le16 resp_len;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[3];
+ __le16 total_active_cos_queues;
+ u8 unused_0;
u8 valid;
};
@@ -2153,9 +2735,10 @@ struct hwrm_port_phy_qcaps_output {
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -2612,6 +3195,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id0;
u8 queue_id0_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2620,6 +3204,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id1;
u8 queue_id1_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2628,6 +3213,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id2;
u8 queue_id2_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2636,6 +3222,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id3;
u8 queue_id3_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2644,6 +3231,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id4;
u8 queue_id4_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2652,6 +3240,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id5;
u8 queue_id5_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2660,6 +3249,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id6;
u8 queue_id6_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2668,6 +3258,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id7;
u8 queue_id7_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -3689,18 +4280,21 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
__le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
__le16 cos_rule;
__le16 lb_rule;
__le16 mru;
- u8 unused_0[4];
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -3740,6 +4334,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
u8 unused_1[7];
u8 valid;
};
@@ -3857,7 +4452,14 @@ struct hwrm_vnic_rss_cfg_input {
#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- u8 unused_0[4];
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
__le64 ring_grp_tbl_addr;
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
@@ -3950,7 +4552,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
u8 valid;
};
-/* hwrm_ring_alloc_input (size:640b/80B) */
+/* hwrm_ring_alloc_input (size:704b/88B) */
struct hwrm_ring_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3961,12 +4563,17 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
#define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
#define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
u8 unused_0[3];
__le64 page_tbl_addr;
__le32 fbo;
@@ -3977,8 +4584,9 @@ struct hwrm_ring_alloc_input {
__le16 logical_id;
__le16 cmpl_ring_id;
__le16 queue_id;
- u8 unused_2[2];
- __le32 reserved1;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
__le16 ring_arb_cfg;
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
@@ -4016,6 +4624,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
#define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
u8 unused_4[3];
+ __le64 cq_handle;
};
/* hwrm_ring_alloc_output (size:128b/16B) */
@@ -4042,7 +4651,9 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_TX 0x1UL
#define RING_FREE_REQ_RING_TYPE_RX 0x2UL
#define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
u8 unused_0;
__le16 ring_id;
u8 unused_1[4];
@@ -4058,6 +4669,52 @@ struct hwrm_ring_free_output {
u8 valid;
};
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
struct hwrm_ring_cmpl_ring_qaggint_params_input {
__le16 req_type;
@@ -4100,6 +4757,7 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
__le16 flags;
#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
__le16 num_cmpl_dma_aggr;
__le16 num_cmpl_dma_aggr_during_int;
__le16 cmpl_aggr_dma_tmr;
@@ -4107,7 +4765,14 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
__le16 int_lat_tmr_min;
__le16 int_lat_tmr_max;
__le16 num_cmpl_aggr_int;
- u8 unused_0[6];
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
};
/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
@@ -4120,34 +4785,6 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
u8 valid;
};
-/* hwrm_ring_reset_input (size:192b/24B) */
-struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
- u8 unused_0;
- __le16 ring_id;
- u8 unused_1[4];
-};
-
-/* hwrm_ring_reset_output (size:128b/16B) */
-struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
/* hwrm_ring_grp_alloc_input (size:192b/24B) */
struct hwrm_ring_grp_alloc_input {
__le16 req_type;
@@ -5032,7 +5669,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0[7];
};
@@ -5059,7 +5697,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -5087,7 +5726,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -5259,140 +5899,6 @@ struct hwrm_pcie_qstats_output {
u8 valid;
};
-/* tx_port_stats (size:3264b/408B) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* rx_port_stats (size:4224b/528B) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* rx_port_stats_ext (size:320b/40B) */
-struct rx_port_stats_ext {
- __le64 link_down_events;
- __le64 continuous_pause_events;
- __le64 resume_pause_events;
- __le64 continuous_roce_pause_events;
- __le64 resume_roce_pause_events;
-};
-
/* pcie_ctx_hw_stats (size:768b/96B) */
struct pcie_ctx_hw_stats {
__le64 pcie_pl_signal_integrity;
@@ -5884,6 +6390,114 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid;
};
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 unused_0[7];
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 unused_0[2];
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
__le16 req_type;
@@ -6269,12 +6883,14 @@ struct hwrm_nvm_set_variable_input {
__le16 index_2;
__le16 index_3;
u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
u8 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a64910892c25..6d583bcd2a81 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -447,7 +447,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
struct bnxt_pf_info *pf = &bp->pf;
- int i, rc = 0;
+ int i, rc = 0, min = 1;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
@@ -464,14 +464,19 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
- if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
- req.min_cmpl_rings = cpu_to_le16(1);
- req.min_tx_rings = cpu_to_le16(1);
- req.min_rx_rings = cpu_to_le16(1);
- req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX);
- req.min_vnics = cpu_to_le16(1);
- req.min_stat_ctx = cpu_to_le16(1);
- req.min_hw_ring_grps = cpu_to_le16(1);
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ min = 0;
+ req.min_rsscos_ctx = cpu_to_le16(min);
+ }
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
+ pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ req.min_cmpl_rings = cpu_to_le16(min);
+ req.min_tx_rings = cpu_to_le16(min);
+ req.min_rx_rings = cpu_to_le16(min);
+ req.min_l2_ctxs = cpu_to_le16(min);
+ req.min_vnics = cpu_to_le16(min);
+ req.min_stat_ctx = cpu_to_le16(min);
+ req.min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
@@ -618,7 +623,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
{
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
else
return bnxt_hwrm_func_cfg(bp, num_vfs);
@@ -956,9 +961,13 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
mac_ok = true;
- } else if (bp->hwrm_spec_code < 0x10202) {
- mac_ok = true;
} else {
+ /* There are two cases:
+ * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
+ * to the PF and so it doesn't have to match
+ * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * valid MAC address and firmware spec >= 0x10202
+ */
mac_ok = true;
}
if (mac_ok)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 491bd40a254d..139d96c5a023 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1568,22 +1568,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp)
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower)
{
- int rc = 0;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
- break;
-
+ return bnxt_tc_add_flow(bp, src_fid, cls_flower);
case TC_CLSFLOWER_DESTROY:
- rc = bnxt_tc_del_flow(bp, cls_flower);
- break;
-
+ return bnxt_tc_del_flow(bp, cls_flower);
case TC_CLSFLOWER_STATS:
- rc = bnxt_tc_get_flow_stats(bp, cls_flower);
- break;
+ return bnxt_tc_get_flow_stats(bp, cls_flower);
+ default:
+ return -EOPNOTSUPP;
}
- return rc;
}
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 840f6e505f73..c37b2842f972 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -141,7 +141,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (avail_msix > num_msix)
avail_msix = num_msix;
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
idx = bp->cp_nr_rings;
} else {
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
@@ -162,7 +162,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
return -EAGAIN;
}
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 05d405905906..e31f5d803c13 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
bnxt_vf_rep_setup_tc_block_cb,
- vf_rep, vf_rep);
+ vf_rep, vf_rep, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
bnxt_vf_rep_setup_tc_block_cb, vf_rep);
@@ -543,9 +543,14 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (bp->hwrm_spec_code < 0x10803) {
+ netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
+ rc = -ENOTSUPP;
+ goto done;
+ }
+
if (pci_num_vf(bp->pdev) == 0) {
- netdev_info(bp->dev,
- "Enable VFs before setting switchdev mode");
+ netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
rc = -EPERM;
goto done;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 1f0e872d0667..0584d07c8c33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -219,7 +219,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
rc = bnxt_xdp_set(bp, xdp->prog);
break;
case XDP_QUERY_PROG:
- xdp->prog_attached = !!bp->xdp_prog;
xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
rc = 0;
break;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 4fd829b5e65d..d83233ae4a15 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -2562,7 +2562,6 @@ static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
- struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
@@ -2571,7 +2570,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
- req = (struct fcoe_kwqe_destroy *) kwqe;
cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
@@ -4090,7 +4088,7 @@ static void cnic_cm_free_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- kfree(cp->csk_tbl);
+ kvfree(cp->csk_tbl);
cp->csk_tbl = NULL;
cnic_free_id_tbl(&cp->csk_port_tbl);
}
@@ -4100,8 +4098,8 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
u32 port_id;
- cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
- GFP_KERNEL);
+ cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
+ GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
@@ -5091,13 +5089,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_eth_dev *ethdev = cp->ethdev;
- int func, ret;
+ int ret;
u32 pfid;
dev->stats_addr = ethdev->addr_drv_info_to_mcp;
cp->func = bp->pf_num;
- func = CNIC_FUNC(cp);
pfid = bp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index aa1374d0af93..e6f28c7942ab 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,6 +54,7 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/crc32poly.h>
#include <net/checksum.h>
#include <net/ip.h>
@@ -725,6 +726,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
+ /* else: fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -785,6 +787,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
+ /* else: fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -9720,7 +9723,7 @@ static inline u32 calc_crc(unsigned char *buf, int len)
reg >>= 1;
if (tmp)
- reg ^= 0xedb88320;
+ reg ^= CRC32_POLY_LE;
}
}
@@ -10719,28 +10722,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
switch (limit) {
case 16:
tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
+ /* fall through */
case 15:
tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
+ /* fall through */
case 14:
tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
+ /* fall through */
case 13:
tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
+ /* fall through */
case 12:
tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
+ /* fall through */
case 11:
tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
+ /* fall through */
case 10:
tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
+ /* fall through */
case 9:
tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
+ /* fall through */
case 8:
tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
+ /* fall through */
case 7:
tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
+ /* fall through */
case 6:
tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
+ /* fall through */
case 5:
tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
+ /* fall through */
case 4:
/* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
case 3:
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 427d65a1a126..b9984015ca8c 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -2,7 +2,7 @@
# Atmel device configuration
#
-config NET_CADENCE
+config NET_VENDOR_CADENCE
bool "Cadence devices"
depends on HAS_IOMEM
default y
@@ -16,7 +16,7 @@ config NET_CADENCE
the remaining Atmel network card questions. If you say Y, you will be
asked for your specific card in the following questions.
-if NET_CADENCE
+if NET_VENDOR_CADENCE
config MACB
tristate "Cadence MACB/GEM support"
@@ -48,4 +48,4 @@ config MACB_PCI
To compile this driver as a module, choose M here: the module
will be called macb_pci.
-endif # NET_CADENCE
+endif # NET_VENDOR_CADENCE
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a6c911bb5ce2..dc09f9a8a49b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -1565,6 +1566,9 @@ static unsigned int macb_tx_map(struct macb *bp,
if (i == queue->tx_head) {
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
+ if ((bp->dev->features & NETIF_F_HW_CSUM) &&
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
* (second or later descriptor)
@@ -1651,7 +1655,68 @@ static inline int macb_clear_csum(struct sk_buff *skb)
return 0;
}
-static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+{
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
+ int padlen = ETH_ZLEN - (*skb)->len;
+ int headroom = skb_headroom(*skb);
+ int tailroom = skb_tailroom(*skb);
+ struct sk_buff *nskb;
+ u32 fcs;
+
+ if (!(ndev->features & NETIF_F_HW_CSUM) ||
+ !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
+ skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ return 0;
+
+ if (padlen <= 0) {
+ /* FCS could be appeded to tailroom. */
+ if (tailroom >= ETH_FCS_LEN)
+ goto add_fcs;
+ /* FCS could be appeded by moving data to headroom. */
+ else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
+ padlen = 0;
+ /* No room for FCS, need to reallocate skb. */
+ else
+ padlen = ETH_FCS_LEN - tailroom;
+ } else {
+ /* Add room for FCS. */
+ padlen += ETH_FCS_LEN;
+ }
+
+ if (!cloned && headroom + tailroom >= padlen) {
+ (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
+ skb_set_tail_pointer(*skb, (*skb)->len);
+ } else {
+ nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ dev_kfree_skb_any(*skb);
+ *skb = nskb;
+ }
+
+ if (padlen) {
+ if (padlen >= ETH_FCS_LEN)
+ skb_put_zero(*skb, padlen - ETH_FCS_LEN);
+ else
+ skb_trim(*skb, ETH_FCS_LEN - padlen);
+ }
+
+add_fcs:
+ /* set FCS to packet */
+ fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
+ fcs = ~fcs;
+
+ skb_put_u8(*skb, fcs & 0xff);
+ skb_put_u8(*skb, (fcs >> 8) & 0xff);
+ skb_put_u8(*skb, (fcs >> 16) & 0xff);
+ skb_put_u8(*skb, (fcs >> 24) & 0xff);
+
+ return 0;
+}
+
+static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(dev);
@@ -1660,6 +1725,17 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
bool is_lso, is_udp = 0;
+ netdev_tx_t ret = NETDEV_TX_OK;
+
+ if (macb_clear_csum(skb)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (macb_pad_and_fcs(&skb, dev)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
is_lso = (skb_shinfo(skb)->gso_size != 0);
@@ -1716,11 +1792,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- if (macb_clear_csum(skb)) {
- dev_kfree_skb_any(skb);
- goto unlock;
- }
-
/* Map socket buffer for DMA transfer */
if (!macb_tx_map(bp, queue, skb, hdrlen)) {
dev_kfree_skb_any(skb);
@@ -1739,7 +1810,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unlock:
spin_unlock_irqrestore(&bp->lock, flags);
- return NETDEV_TX_OK;
+ return ret;
}
static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
@@ -3549,7 +3620,8 @@ static int at91ether_close(struct net_device *dev)
}
/* Transmit packet */
-static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 678835136bf8..cd5296b84229 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -466,6 +466,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
case HWTSTAMP_TX_ONESTEP_SYNC:
if (gem_ptp_set_one_step_sync(bp, 1) != 0)
return -ERANGE;
+ /* fall through */
case HWTSTAMP_TX_ON:
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 92d88c5f76fb..5f03199a3acf 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -4,7 +4,6 @@
config NET_VENDOR_CAVIUM
bool "Cavium ethernet drivers"
- depends on PCI
default y
---help---
Select this option if you want enable Cavium network support.
@@ -36,7 +35,7 @@ config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT && PCI
select PHYLIB
- select MDIO_THUNDER
+ select MDIO_THUNDER if PCI
select THUNDER_NIC_RGX
---help---
This driver supports programming and controlling of MAC
@@ -46,7 +45,7 @@ config THUNDER_NIC_RGX
tristate "Thunder MAC interface driver (RGX)"
depends on 64BIT && PCI
select PHYLIB
- select MDIO_THUNDER
+ select MDIO_THUNDER if PCI
---help---
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
@@ -67,6 +66,7 @@ config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT && PCI
depends on MAY_USE_DEVLINK
+ depends on PCI
imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 929d485a3a2f..9f4f3c1d5043 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -493,6 +493,9 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
/* set DPTR */
reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
@@ -1414,50 +1417,6 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct,
return 0;
}
-void cn23xx_dump_iq_regs(struct octeon_device *oct)
-{
- u32 regval, q_no;
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_DOORBELL(0),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_DOORBELL(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_BASE_ADDR64(0),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_BASE_ADDR64(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_SIZE(0),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n",
- CN23XX_SLI_CTL_STATUS,
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS)));
-
- for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) {
- dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
- q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
- }
-
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
- CN23XX_CONFIG_PCIE_DEVCTL, regval);
-
- dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n",
- oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
- CVM_CAST64(lio_pci_readq(
- oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n",
- oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
-}
-
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index 9338a0008378..962bb62933db 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -165,6 +165,9 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
reg_val =
octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
/* set DPTR */
reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
@@ -379,7 +382,7 @@ void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
mbox_cmd.fn = NULL;
- mbox_cmd.fn_arg = 0;
+ mbox_cmd.fn_arg = NULL;
octeon_mbox_write(oct, &mbox_cmd);
}
@@ -679,33 +682,3 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
return 0;
}
-
-void cn23xx_dump_vf_iq_regs(struct octeon_device *oct)
-{
- u32 regval, q_no;
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_DOORBELL(0),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_DOORBELL(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_BASE_ADDR64(0),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_SIZE(0),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0))));
-
- for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
- dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
- q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))));
- }
-
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
- CN23XX_CONFIG_PCIE_DEVCTL, regval);
-}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 06f7449c569d..8e05afd5e39c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -389,18 +389,14 @@ static int lio_set_link_ksettings(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct oct_link_info *linfo;
struct octeon_device *oct;
- u32 is25G = 0;
oct = lio->oct_dev;
linfo = &lio->linfo;
- if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
- oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
- is25G = 1;
- } else {
+ if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID))
return -EOPNOTSUPP;
- }
if (oct->no_speed_setting) {
dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
@@ -857,7 +853,14 @@ static int lio_set_phys_id(struct net_device *netdev,
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
int value, ret;
+ u32 cur_ver;
+
+ linfo = &lio->linfo;
+ cur_ver = OCT_FW_VER(oct->fw_info.ver.maj,
+ oct->fw_info.ver.min,
+ oct->fw_info.ver.rev);
switch (state) {
case ETHTOOL_ID_ACTIVE:
@@ -896,16 +899,22 @@ static int lio_set_phys_id(struct net_device *netdev,
return ret;
} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
octnet_id_active(netdev, LED_IDENTIFICATION_ON);
-
- /* returns 0 since updates are asynchronous */
- return 0;
+ if (linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ return 2;
+ else
+ return 0;
} else {
return -EINVAL;
}
break;
case ETHTOOL_ID_ON:
- if (oct->chip_id == OCTEON_CN66XX)
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
+ linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ octnet_id_active(netdev, LED_IDENTIFICATION_ON);
+ else if (oct->chip_id == OCTEON_CN66XX)
octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
VITESSE_PHY_GPIO_HIGH);
else
@@ -914,7 +923,11 @@ static int lio_set_phys_id(struct net_device *netdev,
break;
case ETHTOOL_ID_OFF:
- if (oct->chip_id == OCTEON_CN66XX)
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
+ linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
+ else if (oct->chip_id == OCTEON_CN66XX)
octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
VITESSE_PHY_GPIO_LOW);
else
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7e8454d3b1ad..6fb13fa73b27 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -687,7 +687,7 @@ static void lio_sync_octeon_time(struct work_struct *work)
lt = (struct lio_time *)sc->virtdptr;
/* Get time of the day */
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
lt->sec = ts.tv_sec;
lt->nsec = ts.tv_nsec;
octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
@@ -2209,6 +2209,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCSHWTSTAMP:
if (lio->oct_dev->ptp_enable)
return hwtstamp_ioctl(netdev, ifr);
+ /* fall through */
default:
return -EOPNOTSUPP;
}
@@ -2631,7 +2632,7 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
@@ -2909,7 +2910,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.cb_fn = NULL;
nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl);
@@ -3068,7 +3069,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.param2 = linkstate;
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.cb_fn = NULL;
nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl);
@@ -3302,7 +3303,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
{
struct lio *lio = NULL;
struct net_device *netdev;
- u8 mac[6], i, j, *fw_ver;
+ u8 mac[6], i, j, *fw_ver, *micro_ver;
+ unsigned long micro;
+ u32 cur_ver;
struct octeon_soft_command *sc;
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
@@ -3432,6 +3435,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
fw_ver);
}
+ /* extract micro version field; point past '<maj>.<min>.' */
+ micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
+ if (kstrtoul(micro_ver, 10, &micro) != 0)
+ micro = 0;
+ octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
+ octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
+ octeon_dev->fw_info.ver.rev = micro;
+
octeon_swap_8B_data((u64 *)(&resp->cfg_info),
(sizeof(struct liquidio_if_cfg_info)) >> 3);
@@ -3572,9 +3583,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
u8 vfmac[ETH_ALEN];
- random_ether_addr(&vfmac[0]);
- if (__liquidio_set_vf_mac(netdev, j,
- &vfmac[0], false)) {
+ eth_random_addr(vfmac);
+ if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
dev_err(&octeon_dev->pci_dev->dev,
"Error setting VF%d MAC address\n",
j);
@@ -3675,7 +3685,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OCTEON_CN2350_25GB_SUBSYS_ID ||
octeon_dev->subsystem_id ==
OCTEON_CN2360_25GB_SUBSYS_ID) {
- liquidio_get_speed(lio);
+ cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
+ octeon_dev->fw_info.ver.min,
+ octeon_dev->fw_info.ver.rev);
+
+ /* speed control unsupported in f/w older than 1.7.2 */
+ if (cur_ver < OCT_FW_VER(1, 7, 2)) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "speed setting not supported by f/w.");
+ octeon_dev->speed_setting = 25;
+ octeon_dev->no_speed_setting = 1;
+ } else {
+ liquidio_get_speed(lio);
+ }
if (octeon_dev->speed_setting == 0) {
octeon_dev->speed_setting = 25;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7fa0212873ac..b77835724dc8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1693,7 +1693,7 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev,
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 690424b6781a..7407fcd338e9 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -907,6 +907,7 @@ static inline int opcode_slow_path(union octeon_rh *rh)
#define VITESSE_PHY_GPIO_LOW 0x3
#define LED_IDENTIFICATION_ON 0x1
#define LED_IDENTIFICATION_OFF 0x0
+#define LIO23XX_COPPERHEAD_LED_GPIO 0x2
struct oct_mdio_cmd {
u64 op;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 7f97ae48efed..0cc2338d8d2a 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -902,7 +902,7 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
*
* Octeon always uses UTC time. so timezone information is not sent.
*/
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
ret = snprintf(boottime, MAX_BOOTTIME_SIZE,
" time_sec=%lld time_nsec=%ld",
(s64)ts.tv_sec, ts.tv_nsec);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 94a4ed88d618..d99ca6ba23a4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -288,8 +288,17 @@ struct oct_fw_info {
*/
u32 app_mode;
char liquidio_firmware_version[32];
+ /* Fields extracted from legacy string 'liquidio_firmware_version' */
+ struct {
+ u8 maj;
+ u8 min;
+ u8 rev;
+ } ver;
};
+#define OCT_FW_VER(maj, min, rev) \
+ (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev)))
+
/* wrappers around work structs */
struct cavium_wk {
struct delayed_work work;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 5fed7b63223e..2327062e8af6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -82,6 +82,16 @@ struct octeon_instr_queue {
/** A spinlock to protect while posting on the ring. */
spinlock_t post_lock;
+ /** This flag indicates if the queue can be used for soft commands.
+ * If this flag is set, post_lock must be acquired before posting
+ * a command to the queue.
+ * If this flag is clear, post_lock is invalid for the queue.
+ * All control commands (soft commands) will go through only Queue 0
+ * (control and data queue). So only queue-0 needs post_lock,
+ * other queues are only data queues and does not need post_lock
+ */
+ bool allow_soft_cmds;
+
u32 pkt_in_done;
/** A spinlock to protect access to the input ring.*/
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 1f2e75da28f8..8f746e1348d4 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -110,8 +110,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
- dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
- iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
+ dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
+ iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
iq->txpciq.u64 = txpciq.u64;
iq->fill_threshold = (u32)conf->db_min;
@@ -126,7 +126,12 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize the spinlock for this instruction queue */
spin_lock_init(&iq->lock);
- spin_lock_init(&iq->post_lock);
+ if (iq_no == 0) {
+ iq->allow_soft_cmds = true;
+ spin_lock_init(&iq->post_lock);
+ } else {
+ iq->allow_soft_cmds = false;
+ }
spin_lock_init(&iq->iq_flush_running_lock);
@@ -566,7 +571,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
/* Get the lock and prevent other tasks and tx interrupt handler from
* running.
*/
- spin_lock_bh(&iq->post_lock);
+ if (iq->allow_soft_cmds)
+ spin_lock_bh(&iq->post_lock);
st = __post_command2(iq, cmd);
@@ -583,7 +589,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
}
- spin_unlock_bh(&iq->post_lock);
+ if (iq->allow_soft_cmds)
+ spin_unlock_bh(&iq->post_lock);
/* This is only done here to expedite packets being flushed
* for cases where there are no IQ completion interrupts.
@@ -702,11 +709,20 @@ octeon_prepare_soft_command(struct octeon_device *oct,
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
+ struct octeon_instr_queue *iq;
struct octeon_instr_ih2 *ih2;
struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
u32 len;
+ iq = oct->instr_queue[sc->iq_no];
+ if (!iq->allow_soft_cmds) {
+ dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
+ sc->iq_no);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
+ return IQ_SEND_FAILED;
+ }
+
if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
if (ih3->dlengsz) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5603f5ab1fee..92ba958e204e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -527,6 +527,7 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic,
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case IPV4_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 135766c4296b..768f584f8392 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1848,7 +1848,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return nicvf_xdp_setup(nic, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nic->xdp_prog;
xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
return 0;
default:
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 8623be13bf86..0ccdde366ae1 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -109,10 +109,6 @@ static int disable_msi = 0;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
-static const char pci_speed[][4] = {
- "33", "66", "100", "133"
-};
-
/*
* Setup MAC to receive the types of packets we want.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 248e40c6966c..0e9182d3f02c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -136,6 +136,7 @@ again:
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
+ /* fall through */
case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb3_ofld_send(dev, skb);
case L2T_STATE_RESOLVING:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 3c5057868ab3..36d25883d123 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -120,6 +120,8 @@ struct cudbg_mem_desc {
u32 idx;
};
+#define CUDBG_MEMINFO_REV 1
+
struct cudbg_meminfo {
struct cudbg_mem_desc avail[4];
struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
@@ -137,6 +139,9 @@ struct cudbg_meminfo {
u32 port_alloc[4];
u32 loopback_used[NCHAN];
u32 loopback_alloc[NCHAN];
+ u32 p_structs_free_cnt;
+ u32 free_rx_cnt;
+ u32 free_tx_cnt;
};
struct cudbg_cim_pif_la {
@@ -281,12 +286,18 @@ struct cudbg_tid_data {
#define CUDBG_NUM_ULPTX 11
#define CUDBG_NUM_ULPTX_READ 512
+#define CUDBG_NUM_ULPTX_ASIC 6
+#define CUDBG_NUM_ULPTX_ASIC_READ 128
+
+#define CUDBG_ULPTX_LA_REV 1
struct cudbg_ulptx_la {
u32 rdptr[CUDBG_NUM_ULPTX];
u32 wrptr[CUDBG_NUM_ULPTX];
u32 rddata[CUDBG_NUM_ULPTX];
u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ];
+ u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ];
+ u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC];
};
#define CUDBG_CHAC_PBT_ADDR 0x2800
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 0afcfe99bff3..d97e0d7e541a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -349,6 +349,11 @@ int cudbg_fill_meminfo(struct adapter *padap,
meminfo_buff->up_extmem2_hi = hi;
lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
+ for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
+ meminfo_buff->free_rx_cnt +=
+ FREERXPAGECOUNT_G(t4_read_reg(padap,
+ TP_FLM_FREE_RX_CNT_A));
+
meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
meminfo_buff->rx_pages_data[1] =
t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
@@ -356,6 +361,11 @@ int cudbg_fill_meminfo(struct adapter *padap,
lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
+ for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
+ meminfo_buff->free_tx_cnt +=
+ FREETXPAGECOUNT_G(t4_read_reg(padap,
+ TP_FLM_FREE_TX_CNT_A));
+
meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
meminfo_buff->tx_pages_data[1] =
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
@@ -364,6 +374,8 @@ int cudbg_fill_meminfo(struct adapter *padap,
meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
+ meminfo_buff->p_structs_free_cnt =
+ FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
for (i = 0; i < 4; i++) {
if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
@@ -1465,14 +1477,23 @@ int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_meminfo *meminfo_buff;
+ struct cudbg_ver_hdr *ver_hdr;
int rc;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_meminfo),
&temp_buff);
if (rc)
return rc;
- meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
+ ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
+ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+ ver_hdr->revision = CUDBG_MEMINFO_REV;
+ ver_hdr->size = sizeof(struct cudbg_meminfo);
+
+ meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
+ sizeof(*ver_hdr));
rc = cudbg_fill_meminfo(padap, meminfo_buff);
if (rc) {
cudbg_err->sys_err = rc;
@@ -2586,15 +2607,24 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ulptx_la *ulptx_la_buff;
+ struct cudbg_ver_hdr *ver_hdr;
u32 i, j;
int rc;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_ulptx_la),
&temp_buff);
if (rc)
return rc;
- ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
+ ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
+ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+ ver_hdr->revision = CUDBG_ULPTX_LA_REV;
+ ver_hdr->size = sizeof(struct cudbg_ulptx_la);
+
+ ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
+ sizeof(*ver_hdr));
for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
ULP_TX_LA_RDPTR_0_A +
@@ -2610,6 +2640,25 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
t4_read_reg(padap,
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
}
+
+ for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
+ t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
+ ulptx_la_buff->rdptr_asic[i] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
+ ulptx_la_buff->rddata_asic[i][0] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
+ ulptx_la_buff->rddata_asic[i][1] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
+ ulptx_la_buff->rddata_asic[i][2] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
+ ulptx_la_buff->rddata_asic[i][3] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
+ ulptx_la_buff->rddata_asic[i][4] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
+ ulptx_la_buff->rddata_asic[i][5] =
+ t4_read_reg(padap, PM_RX_BASE_ADDR);
+ }
+
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 0dbe2d9e22d6..76d16747f513 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -46,6 +46,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/rhashtable.h>
#include <linux/etherdevice.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
@@ -319,6 +320,21 @@ struct vpd_params {
u8 na[MACADDR_LEN + 1];
};
+/* Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct pci_params {
unsigned int vpd_cap_addr;
unsigned char speed;
@@ -346,6 +362,7 @@ struct adapter_params {
struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
+ struct pf_resources pfres;
struct pci_params pci;
struct devlog_params devlog;
enum pcie_memwin drv_memwin;
@@ -521,6 +538,15 @@ enum {
MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
};
+enum {
+ PRIV_FLAG_PORT_TX_VM_BIT,
+};
+
+#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT)
+
+#define PRIV_FLAGS_ADAP 0
+#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM
+
struct adapter;
struct sge_rspq;
@@ -557,6 +583,7 @@ struct port_info {
struct hwtstamp_config tstamp_config;
bool ptp_enable;
struct sched_table *sched_tbl;
+ u32 eth_flags;
};
struct dentry;
@@ -867,6 +894,7 @@ struct adapter {
unsigned int flags;
unsigned int adap_idx;
enum chip_type chip;
+ u32 eth_flags;
int msg_enable;
__be16 vxlan_port;
@@ -956,6 +984,7 @@ struct adapter {
struct chcr_stats_debug chcr_stats;
/* TC flower offload */
+ bool tc_flower_initialized;
struct rhashtable flower_tbl;
struct rhashtable_params flower_ht_params;
struct timer_list flower_stats_timer;
@@ -1333,7 +1362,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void t4_free_sge_resources(struct adapter *adap);
void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
irq_handler_t t4_intr_handler(struct adapter *adap);
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
@@ -1555,6 +1584,7 @@ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
@@ -1823,4 +1853,5 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
+int cxgb4_dcb_enabled(const struct net_device *dev);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 8d751efcb90e..5f01c0a7fd98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -224,7 +224,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
break;
case CUDBG_MEMINFO:
- len = sizeof(struct cudbg_meminfo);
+ len = sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_meminfo);
break;
case CUDBG_CIM_PIF_LA:
len = sizeof(struct cudbg_cim_pif_la);
@@ -273,7 +274,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
}
break;
case CUDBG_ULPTX_LA:
- len = sizeof(struct cudbg_ulptx_la);
+ len = sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_ulptx_la);
break;
case CUDBG_UP_CIM_INDIRECT:
n = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4e7f72b17e82..b34f0f077a31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -22,7 +22,7 @@
/* DCBx version control
*/
-static const char * const dcb_ver_array[] = {
+const char * const dcb_ver_array[] = {
"Unknown",
"DCBx-CIN",
"DCBx-CEE 1.01",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c301aaf79d64..0f72f9c4ec74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2414,6 +2414,234 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
.release = seq_release_private
};
+#ifdef CONFIG_CHELSIO_T4_DCB
+extern char *dcb_ver_array[];
+
+/* Data Center Briging information for each port.
+ */
+static int dcb_info_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Data Center Bridging Information\n");
+ } else {
+ int port = (uintptr_t)v - 2;
+ struct net_device *dev = adap->port[port];
+ struct port_info *pi = netdev2pinfo(dev);
+ struct port_dcb_info *dcb = &pi->dcb;
+
+ seq_puts(seq, "\n");
+ seq_printf(seq, "Port: %d (DCB negotiated: %s)\n",
+ port,
+ cxgb4_dcb_enabled(dev) ? "yes" : "no");
+
+ if (cxgb4_dcb_enabled(dev))
+ seq_printf(seq, "[ DCBx Version %s ]\n",
+ dcb_ver_array[dcb->dcb_version]);
+
+ if (dcb->msgs) {
+ int i;
+
+ seq_puts(seq, "\n Index\t\t\t :\t");
+ for (i = 0; i < 8; i++)
+ seq_printf(seq, " %3d", i);
+ seq_puts(seq, "\n\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PGID) {
+ int prio, pgid;
+
+ seq_puts(seq, " Priority Group IDs\t :\t");
+ for (prio = 0; prio < 8; prio++) {
+ pgid = (dcb->pgid >> 4 * (7 - prio)) & 0xf;
+ seq_printf(seq, " %3d", pgid);
+ }
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PGRATE) {
+ int pg;
+
+ seq_puts(seq, " Priority Group BW(%)\t :\t");
+ for (pg = 0; pg < 8; pg++)
+ seq_printf(seq, " %3d", dcb->pgrate[pg]);
+ seq_puts(seq, "\n");
+
+ if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+ seq_puts(seq, " TSA Algorithm\t\t :\t");
+ for (pg = 0; pg < 8; pg++)
+ seq_printf(seq, " %3d", dcb->tsa[pg]);
+ seq_puts(seq, "\n");
+ }
+
+ seq_printf(seq, " Max PG Traffic Classes [%3d ]\n",
+ dcb->pg_num_tcs_supported);
+
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PRIORATE) {
+ int prio;
+
+ seq_puts(seq, " Priority Rate\t:\t");
+ for (prio = 0; prio < 8; prio++)
+ seq_printf(seq, " %3d", dcb->priorate[prio]);
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_PFC) {
+ int prio;
+
+ seq_puts(seq, " Priority Flow Control :\t");
+ for (prio = 0; prio < 8; prio++) {
+ int pfcen = (dcb->pfcen >> 1 * (7 - prio))
+ & 0x1;
+ seq_printf(seq, " %3d", pfcen);
+ }
+ seq_puts(seq, "\n");
+
+ seq_printf(seq, " Max PFC Traffic Classes [%3d ]\n",
+ dcb->pfc_num_tcs_supported);
+
+ seq_puts(seq, "\n");
+ }
+
+ if (dcb->msgs & CXGB4_DCB_FW_APP_ID) {
+ int app, napps;
+
+ seq_puts(seq, " Application Information:\n");
+ seq_puts(seq, " App Priority Selection Protocol\n");
+ seq_puts(seq, " Index Map Field ID\n");
+ for (app = 0, napps = 0;
+ app < CXGB4_MAX_DCBX_APP_SUPPORTED; app++) {
+ struct app_priority *ap;
+ static const char * const sel_names[] = {
+ "Ethertype",
+ "Socket TCP",
+ "Socket UDP",
+ "Socket All",
+ };
+ const char *sel_name;
+
+ ap = &dcb->app_priority[app];
+ /* skip empty slots */
+ if (ap->protocolid == 0)
+ continue;
+ napps++;
+
+ if (ap->sel_field < ARRAY_SIZE(sel_names))
+ sel_name = sel_names[ap->sel_field];
+ else
+ sel_name = "UNKNOWN";
+
+ seq_printf(seq, " %3d %#04x %-10s (%d) %#06x (%d)\n",
+ app,
+ ap->user_prio_map,
+ sel_name, ap->sel_field,
+ ap->protocolid, ap->protocolid);
+ }
+ if (napps == 0)
+ seq_puts(seq, " --- None ---\n");
+ }
+ }
+ return 0;
+}
+
+static inline void *dcb_info_get_idx(struct adapter *adap, loff_t pos)
+{
+ return (pos <= adap->params.nports
+ ? (void *)((uintptr_t)pos + 1)
+ : NULL);
+}
+
+static void *dcb_info_start(struct seq_file *seq, loff_t *pos)
+{
+ struct adapter *adap = seq->private;
+
+ return (*pos
+ ? dcb_info_get_idx(adap, *pos)
+ : SEQ_START_TOKEN);
+}
+
+static void dcb_info_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *dcb_info_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct adapter *adap = seq->private;
+
+ (*pos)++;
+ return dcb_info_get_idx(adap, *pos);
+}
+
+static const struct seq_operations dcb_info_seq_ops = {
+ .start = dcb_info_start,
+ .next = dcb_info_next,
+ .stop = dcb_info_stop,
+ .show = dcb_info_show
+};
+
+static int dcb_info_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &dcb_info_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations dcb_info_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = dcb_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
+static int resources_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct pf_resources *pfres = &adapter->params.pfres;
+
+ #define S(desc, fmt, var) \
+ seq_printf(seq, "%-60s " fmt "\n", \
+ desc " (" #var "):", pfres->var)
+
+ S("Virtual Interfaces", "%d", nvi);
+ S("Egress Queues", "%d", neq);
+ S("Ethernet Control", "%d", nethctrl);
+ S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
+ S("Ingress Queues", "%d", niq);
+ S("Traffic Class", "%d", tc);
+ S("Port Access Rights Mask", "%#x", pmask);
+ S("MAC Address Filters", "%d", nexactf);
+ S("Firmware Command Read Capabilities", "%#x", r_caps);
+ S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
+
+ #undef S
+
+ return 0;
+}
+
+static int resources_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, resources_show, inode->i_private);
+}
+
+static const struct file_operations resources_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = resources_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
/**
* ethqset2pinfo - return port_info of an Ethernet Queue Set
* @adap: the adapter
@@ -2436,16 +2664,64 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
return NULL;
}
+static int sge_qinfo_uld_txq_entries(const struct adapter *adap, int uld)
+{
+ const struct sge_uld_txq_info *utxq_info = adap->sge.uld_txq_info[uld];
+
+ if (!utxq_info)
+ return 0;
+
+ return DIV_ROUND_UP(utxq_info->ntxq, 4);
+}
+
+static int sge_qinfo_uld_rspq_entries(const struct adapter *adap, int uld,
+ bool ciq)
+{
+ const struct sge_uld_rxq_info *urxq_info = adap->sge.uld_rxq_info[uld];
+
+ if (!urxq_info)
+ return 0;
+
+ return ciq ? DIV_ROUND_UP(urxq_info->nciq, 4) :
+ DIV_ROUND_UP(urxq_info->nrxq, 4);
+}
+
+static int sge_qinfo_uld_rxq_entries(const struct adapter *adap, int uld)
+{
+ return sge_qinfo_uld_rspq_entries(adap, uld, false);
+}
+
+static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
+{
+ return sge_qinfo_uld_rspq_entries(adap, uld, true);
+}
+
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
+ int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
+ int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
+ int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
+ const struct sge_uld_txq_info *utxq_info;
+ const struct sge_uld_rxq_info *urxq_info;
struct adapter *adap = seq->private;
- int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
- int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
- int i, r = (uintptr_t)v - 1;
- int ofld_idx = r - eth_entries;
- int ctrl_idx = ofld_idx - ofld_entries;
- int fq_idx = ctrl_idx - ctrl_entries;
+ int i, n, r = (uintptr_t)v - 1;
+ int eth_entries, ctrl_entries;
+ struct sge *s = &adap->sge;
+
+ eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
+ ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+
+ mutex_lock(&uld_mutex);
+ if (s->uld_txq_info)
+ for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
+ uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
+
+ if (s->uld_rxq_info) {
+ for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
+ uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
+ uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
+ }
+ }
if (r)
seq_putc(seq, '\n');
@@ -2467,9 +2743,10 @@ do { \
if (r < eth_entries) {
int base_qset = r * 4;
- const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
- const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
- int n = min(4, adap->sge.ethqsets - 4 * r);
+ const struct sge_eth_rxq *rx = &s->ethrxq[base_qset];
+ const struct sge_eth_txq *tx = &s->ethtxq[base_qset];
+
+ n = min(4, s->ethqsets - 4 * r);
S("QType:", "Ethernet");
S("Interface:",
@@ -2494,8 +2771,7 @@ do { \
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
R("FL size:", fl.size - 8);
R("FL pend:", fl.pend_cred);
@@ -2520,9 +2796,196 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (ctrl_idx < ctrl_entries) {
- const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
- int n = min(4, adap->params.nports - 4 * ctrl_idx);
+ goto unlock;
+ }
+
+ r -= eth_entries;
+ if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
+ const struct sge_uld_txq *tx;
+
+ utxq_info = s->uld_txq_info[CXGB4_TX_OFLD];
+ tx = &utxq_info->uldtxq[r * 4];
+ n = min(4, utxq_info->ntxq - 4 * r);
+
+ S("QType:", "OFLD-TXQ");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+
+ goto unlock;
+ }
+
+ r -= uld_txq_entries[CXGB4_TX_OFLD];
+ if (r < uld_rxq_entries[CXGB4_ULD_RDMA]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "RDMA-CPL");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_RDMA];
+ if (r < uld_ciq_entries[CXGB4_ULD_RDMA]) {
+ const struct sge_ofld_rxq *rx;
+ int ciq_idx = 0;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ ciq_idx = urxq_info->nrxq + (r * 4);
+ rx = &urxq_info->uldrxq[ciq_idx];
+ n = min(4, urxq_info->nciq - 4 * r);
+
+ S("QType:", "RDMA-CIQ");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+
+ goto unlock;
+ }
+
+ r -= uld_ciq_entries[CXGB4_ULD_RDMA];
+ if (r < uld_rxq_entries[CXGB4_ULD_ISCSI]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSI];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "iSCSI");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_ISCSI];
+ if (r < uld_rxq_entries[CXGB4_ULD_ISCSIT]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSIT];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "iSCSIT");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_ISCSIT];
+ if (r < uld_rxq_entries[CXGB4_ULD_TLS]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_TLS];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "TLS");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_TLS];
+ if (r < uld_txq_entries[CXGB4_TX_CRYPTO]) {
+ const struct sge_ofld_rxq *rx;
+ const struct sge_uld_txq *tx;
+
+ utxq_info = s->uld_txq_info[CXGB4_TX_CRYPTO];
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_CRYPTO];
+ tx = &utxq_info->uldtxq[r * 4];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, utxq_info->ntxq - 4 * r);
+
+ S("QType:", "Crypto");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_txq_entries[CXGB4_TX_CRYPTO];
+ if (r < ctrl_entries) {
+ const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
+
+ n = min(4, adap->params.nports - 4 * r);
S("QType:", "Control");
T("TxQ ID:", q.cntxt_id);
@@ -2532,8 +2995,13 @@ do { \
T("TxQ PIDX:", q.pidx);
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
- } else if (fq_idx == 0) {
- const struct sge_rspq *evtq = &adap->sge.fw_evtq;
+
+ goto unlock;
+ }
+
+ r -= ctrl_entries;
+ if (r < 1) {
+ const struct sge_rspq *evtq = &s->fw_evtq;
seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
@@ -2544,8 +3012,13 @@ do { \
seq_printf(seq, "%-12s %16u\n", "Intr delay:",
qtimer_val(adap, evtq));
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
- adap->sge.counter_val[evtq->pktcnt_idx]);
+ s->counter_val[evtq->pktcnt_idx]);
+
+ goto unlock;
}
+
+unlock:
+ mutex_unlock(&uld_mutex);
#undef R
#undef RL
#undef T
@@ -2559,8 +3032,21 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
+ int tot_uld_entries = 0;
+ int i;
+
+ mutex_lock(&uld_mutex);
+ for (i = 0; i < CXGB4_TX_MAX; i++)
+ tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
+
+ for (i = 0; i < CXGB4_ULD_MAX; i++) {
+ tot_uld_entries += sge_qinfo_uld_rxq_entries(adap, i);
+ tot_uld_entries += sge_qinfo_uld_ciq_entries(adap, i);
+ }
+ mutex_unlock(&uld_mutex);
+
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+ tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -2851,15 +3337,17 @@ static int meminfo_show(struct seq_file *seq, void *v)
mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
meminfo.up_extmem2_hi);
- seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
- meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
- meminfo.rx_pages_data[2]);
+ seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
+ meminfo.rx_pages_data[0], meminfo.free_rx_cnt,
+ meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]);
- seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
- meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
- meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
+ seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
+ meminfo.tx_pages_data[0], meminfo.free_tx_cnt,
+ meminfo.tx_pages_data[1], meminfo.tx_pages_data[2],
+ meminfo.tx_pages_data[3]);
- seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
+ seq_printf(seq, "%u p-structs (%u free)\n\n",
+ meminfo.p_structs, meminfo.p_structs_free_cnt);
for (i = 0; i < 4; i++)
/* For T6 these are MAC buffer groups */
@@ -2924,6 +3412,169 @@ static const struct file_operations chcr_stats_debugfs_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+
+#define PRINT_ADAP_STATS(string, value) \
+ seq_printf(seq, "%-25s %-20llu\n", (string), \
+ (unsigned long long)(value))
+
+#define PRINT_CH_STATS(string, value) \
+do { \
+ seq_printf(seq, "%-25s ", (string)); \
+ for (i = 0; i < adap->params.arch.nchan; i++) \
+ seq_printf(seq, "%-20llu ", \
+ (unsigned long long)stats.value[i]); \
+ seq_printf(seq, "\n"); \
+} while (0)
+
+#define PRINT_CH_STATS2(string, value) \
+do { \
+ seq_printf(seq, "%-25s ", (string)); \
+ for (i = 0; i < adap->params.arch.nchan; i++) \
+ seq_printf(seq, "%-20llu ", \
+ (unsigned long long)stats[i].value); \
+ seq_printf(seq, "\n"); \
+} while (0)
+
+static void show_tcp_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_tcp_stats v4, v6;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, &v4, &v6, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts);
+ PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs);
+ PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs);
+ PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts);
+ PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs);
+}
+
+static void show_ddp_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_usm_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_get_usm_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames);
+ PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets);
+ PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops);
+}
+
+static void show_rdma_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_rdma_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_rdma_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod);
+ PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt);
+}
+
+static void show_tp_err_adapter_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_err_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_err_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh);
+ PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer);
+}
+
+static void show_cpl_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_cpl_stats stats;
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_cpl_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS("tp_cpl_requests:", req);
+ PRINT_CH_STATS("tp_cpl_responses:", rsp);
+}
+
+static void show_tp_err_channel_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_err_stats stats;
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_err_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs);
+ PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs);
+ PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs);
+ PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs);
+ PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops);
+ PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops);
+ PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops);
+ PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops);
+}
+
+static void show_fcoe_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_fcoe_stats stats[NCHAN];
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ for (i = 0; i < adap->params.arch.nchan; i++)
+ t4_get_fcoe_stats(adap, i, &stats[i], false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp);
+ PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp);
+ PRINT_CH_STATS2("fcoe_frames_drop", frames_drop);
+}
+
+#undef PRINT_CH_STATS2
+#undef PRINT_CH_STATS
+#undef PRINT_ADAP_STATS
+
+static int tp_stats_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+
+ seq_puts(seq, "\n--------Adapter Stats--------\n");
+ show_tcp_stats(seq);
+ show_ddp_stats(seq);
+ show_rdma_stats(seq);
+ show_tp_err_adapter_stats(seq);
+
+ seq_puts(seq, "\n-------- Channel Stats --------\n");
+ if (adap->params.arch.nchan == NCHAN)
+ seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n",
+ " ", "channel 0", "channel 1",
+ "channel 2", "channel 3");
+ else
+ seq_printf(seq, "%-25s %-20s %-20s\n",
+ " ", "channel 0", "channel 1");
+ show_cpl_stats(seq);
+ show_tp_err_channel_stats(seq);
+ show_fcoe_stats(seq);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats);
+
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -2973,6 +3624,10 @@ int t4_setup_debugfs(struct adapter *adap)
{ "rss_key", &rss_key_debugfs_fops, 0400, 0 },
{ "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
{ "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
+ { "resources", &resources_debugfs_fops, 0400, 0 },
+#ifdef CONFIG_CHELSIO_T4_DCB
+ { "dcb_info", &dcb_info_debugfs_fops, 0400, 0 },
+#endif
{ "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 },
{ "ibq_tp0", &cim_ibq_fops, 0400, 0 },
{ "ibq_tp1", &cim_ibq_fops, 0400, 1 },
@@ -2999,6 +3654,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "blocked_fl", &blocked_fl_fops, 0600, 0 },
{ "meminfo", &meminfo_fops, 0400, 0 },
{ "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
+ { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index f7eef93ffc87..d07230c892a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -115,42 +115,10 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
"db_drop ",
"db_full ",
"db_empty ",
- "tcp_ipv4_out_rsts ",
- "tcp_ipv4_in_segs ",
- "tcp_ipv4_out_segs ",
- "tcp_ipv4_retrans_segs ",
- "tcp_ipv6_out_rsts ",
- "tcp_ipv6_in_segs ",
- "tcp_ipv6_out_segs ",
- "tcp_ipv6_retrans_segs ",
- "usm_ddp_frames ",
- "usm_ddp_octets ",
- "usm_ddp_drops ",
- "rdma_no_rqe_mod_defer ",
- "rdma_no_rqe_pkt_defer ",
- "tp_err_ofld_no_neigh ",
- "tp_err_ofld_cong_defer ",
"write_coal_success ",
"write_coal_fail ",
};
-static char channel_stats_strings[][ETH_GSTRING_LEN] = {
- "--------Channel--------- ",
- "tp_cpl_requests ",
- "tp_cpl_responses ",
- "tp_mac_in_errs ",
- "tp_hdr_in_errs ",
- "tp_tcp_in_errs ",
- "tp_tcp6_in_errs ",
- "tp_tnl_cong_drops ",
- "tp_tnl_tx_drops ",
- "tp_ofld_vlan_drops ",
- "tp_ofld_chan_drops ",
- "fcoe_octets_ddp ",
- "fcoe_frames_ddp ",
- "fcoe_frames_drop ",
-};
-
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"-------Loopback----------- ",
"octets_ok ",
@@ -177,14 +145,19 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"bg3_frames_trunc ",
};
+static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
+};
+
static int get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(stats_strings) +
ARRAY_SIZE(adapter_stats_strings) +
- ARRAY_SIZE(channel_stats_strings) +
ARRAY_SIZE(loopback_stats_strings);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(cxgb4_priv_flags_strings);
default:
return -EOPNOTSUPP;
}
@@ -235,6 +208,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
FW_HDR_FW_VER_MINOR_G(exprom_vers),
FW_HDR_FW_VER_MICRO_G(exprom_vers),
FW_HDR_FW_VER_BUILD_G(exprom_vers));
+ info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
}
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -245,11 +219,11 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
memcpy(data, adapter_stats_strings,
sizeof(adapter_stats_strings));
data += sizeof(adapter_stats_strings);
- memcpy(data, channel_stats_strings,
- sizeof(channel_stats_strings));
- data += sizeof(channel_stats_strings);
memcpy(data, loopback_stats_strings,
sizeof(loopback_stats_strings));
+ } else if (stringset == ETH_SS_PRIV_FLAGS) {
+ memcpy(data, cxgb4_priv_flags_strings,
+ sizeof(cxgb4_priv_flags_strings));
}
}
@@ -270,41 +244,10 @@ struct adapter_stats {
u64 db_drop;
u64 db_full;
u64 db_empty;
- u64 tcp_v4_out_rsts;
- u64 tcp_v4_in_segs;
- u64 tcp_v4_out_segs;
- u64 tcp_v4_retrans_segs;
- u64 tcp_v6_out_rsts;
- u64 tcp_v6_in_segs;
- u64 tcp_v6_out_segs;
- u64 tcp_v6_retrans_segs;
- u64 frames;
- u64 octets;
- u64 drops;
- u64 rqe_dfr_mod;
- u64 rqe_dfr_pkt;
- u64 ofld_no_neigh;
- u64 ofld_cong_defer;
u64 wc_success;
u64 wc_fail;
};
-struct channel_stats {
- u64 cpl_req;
- u64 cpl_rsp;
- u64 mac_in_errs;
- u64 hdr_in_errs;
- u64 tcp_in_errs;
- u64 tcp6_in_errs;
- u64 tnl_cong_drops;
- u64 tnl_tx_drops;
- u64 ofld_vlan_drops;
- u64 ofld_chan_drops;
- u64 octets_ddp;
- u64 frames_ddp;
- u64 frames_drop;
-};
-
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
@@ -327,45 +270,14 @@ static void collect_sge_port_stats(const struct adapter *adap,
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
{
- struct tp_tcp_stats v4, v6;
- struct tp_rdma_stats rdma_stats;
- struct tp_err_stats err_stats;
- struct tp_usm_stats usm_stats;
u64 val1, val2;
memset(s, 0, sizeof(*s));
- spin_lock(&adap->stats_lock);
- t4_tp_get_tcp_stats(adap, &v4, &v6, false);
- t4_tp_get_rdma_stats(adap, &rdma_stats, false);
- t4_get_usm_stats(adap, &usm_stats, false);
- t4_tp_get_err_stats(adap, &err_stats, false);
- spin_unlock(&adap->stats_lock);
-
s->db_drop = adap->db_stats.db_drop;
s->db_full = adap->db_stats.db_full;
s->db_empty = adap->db_stats.db_empty;
- s->tcp_v4_out_rsts = v4.tcp_out_rsts;
- s->tcp_v4_in_segs = v4.tcp_in_segs;
- s->tcp_v4_out_segs = v4.tcp_out_segs;
- s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
- s->tcp_v6_out_rsts = v6.tcp_out_rsts;
- s->tcp_v6_in_segs = v6.tcp_in_segs;
- s->tcp_v6_out_segs = v6.tcp_out_segs;
- s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
-
- if (is_offload(adap)) {
- s->frames = usm_stats.frames;
- s->octets = usm_stats.octets;
- s->drops = usm_stats.drops;
- s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
- s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
- }
-
- s->ofld_no_neigh = err_stats.ofld_no_neigh;
- s->ofld_cong_defer = err_stats.ofld_cong_defer;
-
if (!is_t4(adap->params.chip)) {
int v;
@@ -379,36 +291,6 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
}
}
-static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
- u8 i)
-{
- struct tp_cpl_stats cpl_stats;
- struct tp_err_stats err_stats;
- struct tp_fcoe_stats fcoe_stats;
-
- memset(s, 0, sizeof(*s));
-
- spin_lock(&adap->stats_lock);
- t4_tp_get_cpl_stats(adap, &cpl_stats, false);
- t4_tp_get_err_stats(adap, &err_stats, false);
- t4_get_fcoe_stats(adap, i, &fcoe_stats, false);
- spin_unlock(&adap->stats_lock);
-
- s->cpl_req = cpl_stats.req[i];
- s->cpl_rsp = cpl_stats.rsp[i];
- s->mac_in_errs = err_stats.mac_in_errs[i];
- s->hdr_in_errs = err_stats.hdr_in_errs[i];
- s->tcp_in_errs = err_stats.tcp_in_errs[i];
- s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
- s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
- s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
- s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
- s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
- s->octets_ddp = fcoe_stats.octets_ddp;
- s->frames_ddp = fcoe_stats.frames_ddp;
- s->frames_drop = fcoe_stats.frames_drop;
-}
-
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
@@ -429,11 +311,6 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
data += sizeof(struct adapter_stats) / sizeof(u64);
*data++ = (u64)pi->port_id;
- collect_channel_stats(adapter, (struct channel_stats *)data,
- pi->port_id);
- data += sizeof(struct channel_stats) / sizeof(u64);
-
- *data++ = (u64)pi->port_id;
memset(&s, 0, sizeof(s));
t4_get_lb_stats(adapter, pi->port_id, &s);
@@ -751,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev,
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
link_ksettings->link_modes.lp_advertising);
- if (netif_carrier_ok(dev)) {
- base->speed = pi->link_cfg.speed;
- base->duplex = DUPLEX_FULL;
- } else {
- base->speed = SPEED_UNKNOWN;
- base->duplex = DUPLEX_UNKNOWN;
- }
+ base->speed = (netif_carrier_ok(dev)
+ ? pi->link_cfg.speed
+ : SPEED_UNKNOWN);
+ base->duplex = DUPLEX_FULL;
if (pi->link_cfg.fc & PAUSE_RX) {
if (pi->link_cfg.fc & PAUSE_TX) {
@@ -1499,6 +1373,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev,
offset, len, &data[eprom->len - len]);
}
+static u32 cxgb4_get_priv_flags(struct net_device *netdev)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+
+ return (adapter->eth_flags | pi->eth_flags);
+}
+
+/**
+ * set_flags - set/unset specified flags if passed in new_flags
+ * @cur_flags: pointer to current flags
+ * @new_flags: new incoming flags
+ * @flags: set of flags to set/unset
+ */
+static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
+{
+ *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
+}
+
+static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+
+ set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
+ set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
+
+ return 0;
+}
+
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
@@ -1535,6 +1439,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_dump_data = get_dump_data,
.get_module_info = cxgb4_get_module_info,
.get_module_eeprom = cxgb4_get_module_eeprom,
+ .get_priv_flags = cxgb4_get_priv_flags,
+ .set_priv_flags = cxgb4_set_priv_flags,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a8926e97935e..961e3087d1d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -267,7 +267,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
}
}
-static int cxgb4_dcb_enabled(const struct net_device *dev)
+int cxgb4_dcb_enabled(const struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
@@ -554,10 +554,9 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
dev = q->adap->port[q->adap->chan_map[port]];
dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
- ? !!(pcmd->u.info.dcbxdis_pkd &
- FW_PORT_CMD_DCBXDIS_F)
- : !!(pcmd->u.info32.lstatus32_to_cbllen32 &
- FW_PORT_CMD_DCBXDIS32_F));
+ ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
+ : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
+ & FW_PORT_CMD_DCBXDIS32_F));
state_input = (dcbxdis
? CXGB4_DCB_INPUT_FW_DISABLED
: CXGB4_DCB_INPUT_FW_ENABLED);
@@ -924,12 +923,14 @@ static int setup_sge_queues(struct adapter *adap)
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
freeout:
+ dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
t4_free_sge_resources(adap);
return err;
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
int txq;
@@ -971,7 +972,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)
@@ -3016,7 +3017,7 @@ static int cxgb_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
- pi, dev);
+ pi, dev, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
return 0;
@@ -3219,7 +3220,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
- .ndo_start_xmit = t4_eth_xmit,
+ .ndo_start_xmit = t4_start_xmit,
.ndo_select_queue = cxgb_select_queue,
.ndo_get_stats64 = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
@@ -3538,6 +3539,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
u32 v;
int ret;
+ /* Now that we've successfully configured and initialized the adapter
+ * can ask the Firmware what resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning information\n");
+ return ret;
+ }
+
/* get device capabilities */
memset(c, 0, sizeof(*c));
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
@@ -4172,32 +4183,6 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
- /*
- * Grab VPD parameters. This should be done after we establish a
- * connection to the firmware since some of the VPD parameters
- * (notably the Core Clock frequency) are retrieved via requests to
- * the firmware. On the other hand, we need these fairly early on
- * so we do this right after getting ahold of the firmware.
- */
- ret = t4_get_vpd_params(adap, &adap->params.vpd);
- if (ret < 0)
- goto bye;
-
- /*
- * Find out what ports are available to us. Note that we need to do
- * this before calling adap_init0_no_config() since it needs nports
- * and portvec ...
- */
- v =
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
- if (ret < 0)
- goto bye;
-
- adap->params.nports = hweight32(port_vec);
- adap->params.portvec = port_vec;
-
/* If the firmware is initialized already, emit a simply note to that
* effect. Otherwise, it's time to try initializing the adapter.
*/
@@ -4248,6 +4233,45 @@ static int adap_init0(struct adapter *adap)
}
}
+ /* Now that we've successfully configured and initialized the adapter
+ * (or found it already initialized), we can ask the Firmware what
+ * resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning information\n");
+ goto bye;
+ }
+
+ /* Grab VPD parameters. This should be done after we establish a
+ * connection to the firmware since some of the VPD parameters
+ * (notably the Core Clock frequency) are retrieved via requests to
+ * the firmware. On the other hand, we need these fairly early on
+ * so we do this right after getting ahold of the firmware.
+ *
+ * We need to do this after initializing the adapter because someone
+ * could have FLASHed a new VPD which won't be read by the firmware
+ * until we do the RESET ...
+ */
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
+ if (ret < 0)
+ goto bye;
+
+ /* Find out what ports are available to us. Note that we need to do
+ * this before calling adap_init0_no_config() since it needs nports
+ * and portvec ...
+ */
+ v =
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+ if (ret < 0)
+ goto bye;
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
/* Give the SGE code a chance to pull in anything that it needs ...
* Note that this must be called after we retrieve our VPD parameters
* in order to know how to convert core ticks to seconds, etc.
@@ -4799,10 +4823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
-static void cfg_queues(struct adapter *adap)
+static int cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int i = 0, n10g = 0, qidx = 0;
+ int i, n10g = 0, qidx = 0;
+ int niqflint, neq, avail_eth_qsets;
+ int max_eth_qsets = 32;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
@@ -4814,16 +4840,46 @@ static void cfg_queues(struct adapter *adap)
adap->params.crypto = 0;
}
- n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ /* Calculate the number of Ethernet Queue Sets available based on
+ * resources provisioned for us. We always have an Asynchronous
+ * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
+ * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
+ * Ingress Queue. Meanwhile, we need two Egress Queues for each
+ * Queue Set: one for the Free List and one for the Ethernet TX Queue.
+ *
+ * Note that we should also take into account all of the various
+ * Offload Queues. But, in any situation where we're operating in
+ * a Resource Constrained Provisioning environment, doing any Offload
+ * at all is problematic ...
+ */
+ niqflint = adap->params.pfres.niqflint - 1;
+ if (!(adap->flags & USING_MSIX))
+ niqflint--;
+ neq = adap->params.pfres.neq / 2;
+ avail_eth_qsets = min(niqflint, neq);
+
+ if (avail_eth_qsets > max_eth_qsets)
+ avail_eth_qsets = max_eth_qsets;
+
+ if (avail_eth_qsets < adap->params.nports) {
+ dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
+ avail_eth_qsets, adap->params.nports);
+ return -ENOMEM;
+ }
+
+ /* Count the number of 10Gb/s or better ports */
+ for_each_port(adap, i)
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
* own TX Queue in order to prevent Head-Of-Line Blocking.
*/
- if (adap->params.nports * 8 > MAX_ETH_QSETS) {
- dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
- MAX_ETH_QSETS, adap->params.nports * 8);
- BUG_ON(1);
+ if (adap->params.nports * 8 > avail_eth_qsets) {
+ dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
+ avail_eth_qsets, adap->params.nports * 8);
+ return -ENOMEM;
}
for_each_port(adap, i) {
@@ -4839,7 +4895,7 @@ static void cfg_queues(struct adapter *adap)
* per 10G port.
*/
if (n10g)
- q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+ q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
@@ -4890,6 +4946,8 @@ static void cfg_queues(struct adapter *adap)
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
init_rspq(adap, &s->intrq, 0, 1, 512, 64);
+
+ return 0;
}
/*
@@ -5086,17 +5144,9 @@ static void print_port_info(const struct net_device *dev)
{
char buf[80];
char *bufp = buf;
- const char *spd = "";
const struct port_info *pi = netdev_priv(dev);
const struct adapter *adap = pi->adapter;
- if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
- spd = " 2.5 GT/s";
- else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
- spd = " 5 GT/s";
- else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
- spd = " 8 GT/s";
-
if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
bufp += sprintf(bufp, "100M/");
if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
@@ -5600,6 +5650,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_CHELSIO_T4_DCB
netdev->dcbnl_ops = &cxgb4_dcb_ops;
cxgb4_dcb_state_init(netdev);
+ cxgb4_dcb_version_init(netdev);
#endif
cxgb4_set_ethtool_ops(netdev);
}
@@ -5630,10 +5681,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ if (!(adapter->flags & FW_OK))
+ goto fw_attach_fail;
+
/* Configure queues and allocate tables now, they can be needed as
* soon as the first register_netdev completes.
*/
- cfg_queues(adapter);
+ err = cfg_queues(adapter);
+ if (err)
+ goto out_free_dev;
adapter->smt = t4_init_smt();
if (!adapter->smt) {
@@ -5705,7 +5761,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
u32 hash_base, hash_reg;
- if (chip <= CHELSIO_T5) {
+ if (chip_ver <= CHELSIO_T5) {
hash_reg = LE_DB_TID_HASHBASE_A;
hash_base = t4_read_reg(adapter, hash_reg);
adapter->tids.hash_base = hash_base / 4;
@@ -5740,6 +5796,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_dev;
}
+fw_attach_fail:
/*
* The card is now ready to go. If any errors occur during device
* registration we do not fail the whole card but rather proceed only
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 3ddd2c4acf68..623f73dd7738 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap)
{
int ret;
+ if (adap->tc_flower_initialized)
+ return -EEXIST;
+
adap->flower_ht_params = cxgb4_tc_flower_ht_params;
ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
if (ret)
@@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap)
INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
+ adap->tc_flower_initialized = true;
return 0;
}
void cxgb4_cleanup_tc_flower(struct adapter *adap)
{
+ if (!adap->tc_flower_initialized)
+ return;
+
if (adap->flower_stats_timer.function)
del_timer_sync(&adap->flower_stats_timer);
cancel_work_sync(&adap->flower_stats_work);
rhashtable_destroy(&adap->flower_tbl);
+ adap->tc_flower_initialized = false;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 77c2c538b1fd..301c4df8a566 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -231,6 +231,7 @@ again:
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
+ /* fall through */
case L2T_STATE_VALID: /* fast-path, send the packet on */
return t4_ofld_send(adap, skb);
case L2T_STATE_RESOLVING:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 9148abb7994c..7fc656680299 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap)
struct port_info *pi = netdev2pinfo(adap->port[j]);
s = pi->sched_tbl;
+ if (!s)
+ continue;
+
for (i = 0; i < s->sched_size; i++) {
struct sched_class *e;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 395e2a0e8d7f..6807bc3a44fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
}
/**
- * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
* @dev: the egress net device
*
* Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
*/
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid, ctrl0, op;
u64 cntrl, *end, *sgl;
@@ -1547,6 +1547,374 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+/* Constants ... */
+enum {
+ /* Egress Queue sizes, producer and consumer indices are all in units
+ * of Egress Context Units bytes. Note that as far as the hardware is
+ * concerned, the free list is an Egress Queue (the host produces free
+ * buffers which the hardware consumes) and free list entries are
+ * 64-bit PCI DMA addresses.
+ */
+ EQ_UNIT = SGE_EQ_IDXSIZE,
+ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+ TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+ T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+};
+
+/**
+ * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit completely as
+ * immediate data.
+ */
+static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
+{
+ /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+ * which does not accommodate immediate data. We could dike out all
+ * of the support code for immediate data but that would tie our hands
+ * too much if we ever want to enhace the firmware. It would also
+ * create more differences between the PF and VF Drivers.
+ */
+ return false;
+}
+
+/**
+ * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a TX Work Request for the
+ * given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+ if (t4vf_is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+ sizeof(__be64));
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ if (skb_shinfo(skb)->gso_size)
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
+ */
+static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const struct skb_shared_info *ssi;
+ struct fw_eth_tx_pkt_vm_wr *wr;
+ int qidx, credits, max_pkt_len;
+ struct cpl_tx_pkt_core *cpl;
+ const struct port_info *pi;
+ unsigned int flits, ndesc;
+ struct sge_eth_txq *txq;
+ struct adapter *adapter;
+ u64 cntrl, *end;
+ u32 wr_mid;
+ const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci);
+
+ /* The chip minimum packet length is 10 octets but the firmware
+ * command that we are using requires that we copy the Ethernet header
+ * (including the VLAN tag) into the header so we reject anything
+ * smaller than that ...
+ */
+ if (unlikely(skb->len < fw_hdr_copy_len))
+ goto out_free;
+
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
+ /* Figure out which TX Queue we're going to use. */
+ pi = netdev_priv(dev);
+ adapter = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ WARN_ON(qidx >= pi->nqsets);
+ txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+ /* Take this opportunity to reclaim any TX Descriptors whose DMA
+ * transfers have completed.
+ */
+ cxgb4_reclaim_completed_tx(adapter, &txq->q, true);
+
+ /* Calculate the number of flits and TX Descriptors we're going to
+ * need along with how many TX Descriptors will be left over after
+ * we inject our Work Request.
+ */
+ flits = t4vf_calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ /* Not enough room for this packet's Work Request. Stop the
+ * TX Queue and return a "busy" condition. The queue will get
+ * started later on when the firmware informs us that space
+ * has opened up.
+ */
+ eth_txq_stop(txq);
+ dev_err(adapter->pdev_dev,
+ "%s: TX ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!t4vf_is_eth_imm(skb) &&
+ unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ /* We need to map the skb into PCI DMA space (because it can't
+ * be in-lined directly into the Work Request) and the mapping
+ * operation failed. Record the error and drop the packet.
+ */
+ txq->mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /* After we're done injecting the Work Request for this
+ * packet, we'll be below our "stop threshold" so stop the TX
+ * Queue now and schedule a request for an SGE Egress Queue
+ * Update message. The queue will get started later on when
+ * the firmware processes this Work Request and sends us an
+ * Egress Queue Status Update message indicating that space
+ * has opened up.
+ */
+ eth_txq_stop(txq);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ /* Start filling in our Work Request. Note that we do _not_ handle
+ * the WR Header wrapping around the TX Descriptor Ring. If our
+ * maximum header size ever exceeds one TX Descriptor, we'll need to
+ * do something else here.
+ */
+ WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = cpu_to_be32(wr_mid);
+ wr->r3[0] = cpu_to_be32(0);
+ wr->r3[1] = cpu_to_be32(0);
+ skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ end = (u64 *)wr + flits;
+
+ /* If this is a Large Send Offload packet we'll put in an LSO CPL
+ * message with an encapsulated TX Packet CPL message. Otherwise we
+ * just use a TX Packet CPL message.
+ */
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(sizeof(*lso) +
+ sizeof(*cpl)));
+ /* Fill in the LSO CPL message. */
+ lso->lso_ctrl =
+ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F |
+ LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = cpu_to_be16(0);
+ lso->mss = cpu_to_be16(ssi->gso_size);
+ lso->seqno_offset = cpu_to_be32(0);
+ if (is_t4(adapter->params.chip))
+ lso->len = cpu_to_be32(skb->len);
+ else
+ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
+
+ /* Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(lso + 1);
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ txq->tso++;
+ txq->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = (t4vf_is_eth_imm(skb)
+ ? skb->len + sizeof(*cpl)
+ : sizeof(*cpl));
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(len));
+
+ /* Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(adapter->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
+ txq->tx_cso++;
+ } else {
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ }
+ }
+
+ /* If there's a VLAN tag present, add that to the list of things to
+ * do in this Work Request.
+ */
+ if (skb_vlan_tag_present(skb)) {
+ txq->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ /* Fill in the TX Packet CPL message header. */
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->port_id) |
+ TXPKT_PF_V(0));
+ cpl->pack = cpu_to_be16(0);
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ /* Fill in the body of the TX Packet CPL message with either in-lined
+ * data or a Scatter/Gather List.
+ */
+ if (t4vf_is_eth_imm(skb)) {
+ /* In-line the packet's data and free the skb since we don't
+ * need it any longer.
+ */
+ cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
+ dev_consume_skb_any(skb);
+ } else {
+ /* Write the skb's Scatter/Gather list into the TX Packet CPL
+ * message and retain a pointer to the skb so we can free it
+ * later when its DMA completes. (We store the skb pointer
+ * in the Software Descriptor corresponding to the last TX
+ * Descriptor used by the Work Request.)
+ *
+ * The retained skb will be freed when the corresponding TX
+ * Descriptors are reclaimed after their DMAs complete.
+ * However, this could take quite a while since, in general,
+ * the hardware is set up to be lazy about sending DMA
+ * completion notifications to us and we mostly perform TX
+ * reclaims in the transmit routine.
+ *
+ * This is good for performamce but means that we rely on new
+ * TX packets arriving to run the destructors of completed
+ * packets, which open up space in their sockets' send queues.
+ * Sometimes we do not get such new packets causing TX to
+ * stall. A single UDP transmitter is a good example of this
+ * situation. We have a clean up timer that periodically
+ * reclaims completed packets but it doesn't run often enough
+ * (nor do we want it to) to prevent lengthy stalls. A
+ * solution to this problem is to run the destructor early,
+ * after the packet is queued but before it's DMAd. A con is
+ * that we lie to socket memory accounting, but the amount of
+ * extra memory is reasonable (limited by the number of TX
+ * descriptors), the packets do actually get freed quickly by
+ * new packets almost always, and for protocols like TCP that
+ * wait for acks to really free up the data the extra memory
+ * is even less. On the positive side we run the destructors
+ * on the sending CPU rather than on a potentially different
+ * completing CPU, usually a good thing.
+ *
+ * Run the destructor before telling the DMA engine about the
+ * packet to make sure it doesn't complete and get freed
+ * prematurely.
+ */
+ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+ struct sge_txq *tq = &txq->q;
+ int last_desc;
+
+ /* If the Work Request header was an exact multiple of our TX
+ * Descriptor length, then it's possible that the starting SGL
+ * pointer lines up exactly with the end of our TX Descriptor
+ * ring. If that's the case, wrap around to the beginning
+ * here ...
+ */
+ if (unlikely((void *)sgl == (void *)tq->stat)) {
+ sgl = (void *)tq->desc;
+ end = (void *)((void *)tq->desc +
+ ((void *)end - (void *)tq->stat));
+ }
+
+ cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
+ skb_orphan(skb);
+
+ last_desc = tq->pidx + ndesc - 1;
+ if (last_desc >= tq->size)
+ last_desc -= tq->size;
+ tq->sdesc[last_desc].skb = skb;
+ tq->sdesc[last_desc].sgl = sgl;
+ }
+
+ /* Advance our internal TX Queue state, tell the hardware about
+ * the new TX descriptors and return success.
+ */
+ txq_advance(&txq->q, ndesc);
+
+ cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
+ return NETDEV_TX_OK;
+
+out_free:
+ /* An error of some sort happened. Free the TX skb and tell the
+ * OS that we've "dealt" with the packet ...
+ */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
+ return cxgb4_vf_eth_xmit(skb, dev);
+
+ return cxgb4_eth_xmit(skb, dev);
+}
+
/**
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
@@ -3044,7 +3412,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
if (cong >= 0)
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
+ FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
+ : FW_IQ_IQTYPE_OFLD));
if (fl) {
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 3720c3e11ebb..5fe5d16dee72 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2882,6 +2882,57 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
return 0;
}
+/**
+ * t4_get_pfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a physical
+ * function. The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+ struct pf_resources *pfres = &adapter->params.pfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ int v;
+ u32 word;
+
+ /* Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PFVF_CMD_PFN_V(adapter->pf) |
+ FW_PFVF_CMD_VFN_V(0));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /* Extract PF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
+ pfres->niq = FW_PFVF_CMD_NIQ_G(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ pfres->neq = FW_PFVF_CMD_NEQ_G(word);
+ pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ pfres->tc = FW_PFVF_CMD_TC_G(word);
+ pfres->nvi = FW_PFVF_CMD_NVI_G(word);
+ pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
+ pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
+ pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
+
+ return 0;
+}
+
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -7453,10 +7504,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
switch (nmac) {
case 5:
memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+ /* Fall through */
case 4:
memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+ /* Fall through */
case 3:
memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+ /* Fall through */
case 2:
memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index c7f8d0441278..60df66f4d21c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -188,6 +188,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50af), /* Custom T580-KR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50b0), /* Custom T520-CR-LOM */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 6b55aa2eb2a5..eb222d40ddbf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1502,6 +1502,25 @@
#define TP_MIB_DATA_A 0x7e54
#define TP_INT_CAUSE_A 0x7e74
+#define TP_FLM_FREE_PS_CNT_A 0x7e80
+#define TP_FLM_FREE_RX_CNT_A 0x7e84
+
+#define FREEPSTRUCTCOUNT_S 0
+#define FREEPSTRUCTCOUNT_M 0x1fffffU
+#define FREEPSTRUCTCOUNT_G(x) (((x) >> FREEPSTRUCTCOUNT_S) & FREEPSTRUCTCOUNT_M)
+
+#define FREERXPAGECOUNT_S 0
+#define FREERXPAGECOUNT_M 0x1fffffU
+#define FREERXPAGECOUNT_V(x) ((x) << FREERXPAGECOUNT_S)
+#define FREERXPAGECOUNT_G(x) (((x) >> FREERXPAGECOUNT_S) & FREERXPAGECOUNT_M)
+
+#define TP_FLM_FREE_TX_CNT_A 0x7e88
+
+#define FREETXPAGECOUNT_S 0
+#define FREETXPAGECOUNT_M 0x1fffffU
+#define FREETXPAGECOUNT_V(x) ((x) << FREETXPAGECOUNT_S)
+#define FREETXPAGECOUNT_G(x) (((x) >> FREETXPAGECOUNT_S) & FREETXPAGECOUNT_M)
+
#define FLMTXFLSTEMPTY_S 30
#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
@@ -1683,6 +1702,16 @@
#define ULP_TX_LA_RDPTR_0_A 0x8ec0
#define ULP_TX_LA_RDDATA_0_A 0x8ec4
#define ULP_TX_LA_WRPTR_0_A 0x8ec8
+#define ULP_TX_ASIC_DEBUG_CTRL_A 0x8f70
+
+#define ULP_TX_ASIC_DEBUG_0_A 0x8f74
+#define ULP_TX_ASIC_DEBUG_1_A 0x8f78
+#define ULP_TX_ASIC_DEBUG_2_A 0x8f7c
+#define ULP_TX_ASIC_DEBUG_3_A 0x8f80
+#define ULP_TX_ASIC_DEBUG_4_A 0x8f84
+
+/* registers for module PM_RX */
+#define PM_RX_BASE_ADDR 0x8fc0
#define PMRX_E_PCMD_PAR_ERROR_S 0
#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index f1967cf6d43c..5dc6c4154af8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1472,6 +1472,12 @@ enum fw_iq_type {
FW_IQ_TYPE_NO_FL_INT_CAP
};
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_OTHER,
+ FW_IQ_IQTYPE_NIC,
+ FW_IQ_IQTYPE_OFLD,
+};
+
struct fw_iq_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
@@ -1586,6 +1592,12 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_IQFLINTISCSIC_S 26
#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
+#define FW_IQ_CMD_IQTYPE_S 24
+#define FW_IQ_CMD_IQTYPE_M 0x3
+#define FW_IQ_CMD_IQTYPE_V(x) ((x) << FW_IQ_CMD_IQTYPE_S)
+#define FW_IQ_CMD_IQTYPE_G(x) \
+ (((x) >> FW_IQ_CMD_IQTYPE_S) & FW_IQ_CMD_IQTYPE_M)
+
#define FW_IQ_CMD_FL0CNGCHMAP_S 20
#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 4eb15ceddca3..a844296135b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x13
-#define T4FW_VERSION_MICRO 0x01
+#define T4FW_VERSION_MINOR 0x14
+#define T4FW_VERSION_MICRO 0x08
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x13
-#define T5FW_VERSION_MICRO 0x01
+#define T5FW_VERSION_MINOR 0x14
+#define T5FW_VERSION_MICRO 0x08
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x13
-#define T6FW_VERSION_MICRO 0x01
+#define T6FW_VERSION_MINOR 0x14
+#define T6FW_VERSION_MICRO 0x08
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 0ed161642371..74849be5f004 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -412,12 +412,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
ppmax * (sizeof(struct cxgbi_ppod_data)) +
ppod_bmap_size * sizeof(unsigned long);
- ppm = vmalloc(alloc_sz);
+ ppm = vzalloc(alloc_sz);
if (!ppm)
goto release_ppm_pool;
- memset(ppm, 0, alloc_sz);
-
ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index e9db811df59c..901e44b0b795 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1071,7 +1071,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
unsigned int num_bars)
{
if (!vdev) {
- vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+ vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
if (!vdev)
return NULL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index f8aa326d1d58..a3e7b003ada1 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -35,7 +35,7 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
- rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!rq->bufs[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 090cc65658a3..eb75891974df 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -35,7 +35,7 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!wq->bufs[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 6d7404f66f84..1c9ad3630c77 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -46,6 +46,11 @@
#define DRV_NAME "gmac-gemini"
#define DRV_VERSION "1.0"
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
#define HSIZE_8 0x00
#define HSIZE_16 0x01
#define HSIZE_32 0x02
@@ -146,6 +151,7 @@ struct gemini_ethernet {
void __iomem *base;
struct gemini_ethernet_port *port0;
struct gemini_ethernet_port *port1;
+ bool initialized;
spinlock_t irq_lock; /* Locks IRQ-related registers */
unsigned int freeq_order;
@@ -300,23 +306,26 @@ static void gmac_speed_set(struct net_device *netdev)
status.bits.speed = GMAC_SPEED_1000;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
- netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n",
+ phydev_name(phydev));
break;
case 100:
status.bits.speed = GMAC_SPEED_100;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n",
+ phydev_name(phydev));
break;
case 10:
status.bits.speed = GMAC_SPEED_10;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n",
+ phydev_name(phydev));
break;
default:
- netdev_warn(netdev, "Not supported PHY speed (%d)\n",
- phydev->speed);
+ netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n",
+ phydev->speed, phydev_name(phydev));
}
if (phydev->duplex == DUPLEX_FULL) {
@@ -363,12 +372,6 @@ static int gmac_setup_phy(struct net_device *netdev)
return -ENODEV;
netdev->phydev = phy;
- netdev_info(netdev, "connected to PHY \"%s\"\n",
- phydev_name(phy));
- phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
- (unsigned long)phy->phy_id,
- phy_modes(phy->interface));
-
phy->supported &= PHY_GBIT_FEATURES;
phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
phy->advertising = phy->supported;
@@ -376,19 +379,19 @@ static int gmac_setup_phy(struct net_device *netdev)
/* set PHY interface type */
switch (phy->interface) {
case PHY_INTERFACE_MODE_MII:
- netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ netdev_dbg(netdev,
+ "MII: set GMAC0 to GMII mode, GMAC1 disabled\n");
status.bits.mii_rmii = GMAC_PHY_MII;
- netdev_info(netdev, "connect to MII\n");
break;
case PHY_INTERFACE_MODE_GMII:
- netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ netdev_dbg(netdev,
+ "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n");
status.bits.mii_rmii = GMAC_PHY_GMII;
- netdev_info(netdev, "connect to GMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
- dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+ netdev_dbg(netdev,
+ "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII\n");
break;
default:
netdev_err(netdev, "Unsupported MII interface\n");
@@ -398,29 +401,63 @@ static int gmac_setup_phy(struct net_device *netdev)
}
writel(status.bits32, port->gmac_base + GMAC_STATUS);
+ if (netif_msg_link(port))
+ phy_attached_info(phy);
+
return 0;
}
-static int gmac_pick_rx_max_len(int max_l3_len)
-{
- /* index = CONFIG_MAXLEN_XXX values */
- static const int max_len[8] = {
- 1536, 1518, 1522, 1542,
- 9212, 10236, 1518, 1518
- };
- int i, n = 5;
+/* The maximum frame length is not logically enumerated in the
+ * hardware, so we do a table lookup to find the applicable max
+ * frame length.
+ */
+struct gmac_max_framelen {
+ unsigned int max_l3_len;
+ u8 val;
+};
+
+static const struct gmac_max_framelen gmac_maxlens[] = {
+ {
+ .max_l3_len = 1518,
+ .val = CONFIG0_MAXLEN_1518,
+ },
+ {
+ .max_l3_len = 1522,
+ .val = CONFIG0_MAXLEN_1522,
+ },
+ {
+ .max_l3_len = 1536,
+ .val = CONFIG0_MAXLEN_1536,
+ },
+ {
+ .max_l3_len = 1542,
+ .val = CONFIG0_MAXLEN_1542,
+ },
+ {
+ .max_l3_len = 9212,
+ .val = CONFIG0_MAXLEN_9k,
+ },
+ {
+ .max_l3_len = 10236,
+ .val = CONFIG0_MAXLEN_10k,
+ },
+};
- max_l3_len += ETH_HLEN + VLAN_HLEN;
+static int gmac_pick_rx_max_len(unsigned int max_l3_len)
+{
+ const struct gmac_max_framelen *maxlen;
+ int maxtot;
+ int i;
- if (max_l3_len > max_len[n])
- return -1;
+ maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN;
- for (i = 0; i < 5; i++) {
- if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
- n = i;
+ for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) {
+ maxlen = &gmac_maxlens[i];
+ if (maxtot <= maxlen->max_l3_len)
+ return maxlen->val;
}
- return n;
+ return -1;
}
static int gmac_init(struct net_device *netdev)
@@ -1276,8 +1313,8 @@ static void gmac_enable_irq(struct net_device *netdev, int enable)
unsigned long flags;
u32 val, mask;
- netdev_info(netdev, "%s device %d %s\n", __func__,
- netdev->dev_id, enable ? "enable" : "disable");
+ netdev_dbg(netdev, "%s device %d %s\n", __func__,
+ netdev->dev_id, enable ? "enable" : "disable");
spin_lock_irqsave(&geth->irq_lock, flags);
mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
@@ -1753,7 +1790,10 @@ static int gmac_open(struct net_device *netdev)
phy_start(netdev->phydev);
err = geth_resize_freeq(port);
- if (err) {
+ /* It's fine if it's just busy, the other port has set up
+ * the freeq in that case.
+ */
+ if (err && (err != -EBUSY)) {
netdev_err(netdev, "could not resize freeq\n");
goto err_stop_phy;
}
@@ -1782,7 +1822,7 @@ static int gmac_open(struct net_device *netdev)
HRTIMER_MODE_REL);
port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
- netdev_info(netdev, "opened\n");
+ netdev_dbg(netdev, "opened\n");
return 0;
@@ -2264,6 +2304,14 @@ static void gemini_port_remove(struct gemini_ethernet_port *port)
static void gemini_ethernet_init(struct gemini_ethernet *geth)
{
+ /* Only do this once both ports are online */
+ if (geth->initialized)
+ return;
+ if (geth->port0 && geth->port1)
+ geth->initialized = true;
+ else
+ return;
+
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
@@ -2354,6 +2402,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->id = id;
port->geth = geth;
port->dev = dev;
+ port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/* DMA memory */
dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2410,6 +2459,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
geth->port0 = port;
else
geth->port1 = port;
+
+ /* This will just be done once both ports are up and reset */
+ gemini_ethernet_init(geth);
+
platform_set_drvdata(pdev, port);
/* Set up and register the netdev */
@@ -2423,6 +2476,11 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+ /* We can handle jumbo frames up to 10236 bytes so, let's accept
+ * payloads of 10236 bytes minus VLAN and ethernet header
+ */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
netif_napi_add(netdev, &port->napi, gmac_napi_poll,
@@ -2435,7 +2493,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->mac_addr[0], port->mac_addr[1],
port->mac_addr[2]);
dev_info(dev, "using a random ethernet address\n");
- random_ether_addr(netdev->dev_addr);
+ eth_random_addr(netdev->dev_addr);
}
gmac_write_mac_address(netdev);
@@ -2527,7 +2585,6 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
spin_lock_init(&geth->irq_lock);
spin_lock_init(&geth->freeq_lock);
- gemini_ethernet_init(geth);
/* The children will use this */
platform_set_drvdata(pdev, geth);
@@ -2540,8 +2597,8 @@ static int gemini_ethernet_remove(struct platform_device *pdev)
{
struct gemini_ethernet *geth = platform_get_drvdata(pdev);
- gemini_ethernet_init(geth);
geth_cleanup_freeq(geth);
+ geth->initialized = false;
return 0;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index a31b4df3e7ff..66535d1653f6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3204,6 +3204,8 @@ srom_map_media(struct net_device *dev)
case SROM_10BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
+ /* fall through */
+
case SROM_10BASET:
if (lp->params.fdx && !lp->fdx) return -1;
if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
@@ -3224,6 +3226,8 @@ srom_map_media(struct net_device *dev)
case SROM_100BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
+ /* fall through */
+
case SROM_100BASET:
if (lp->params.fdx && !lp->fdx) return -1;
lp->media = _100Mb;
@@ -3236,6 +3240,8 @@ srom_map_media(struct net_device *dev)
case SROM_100BASEFF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
+ /* fall through */
+
case SROM_100BASEF:
if (lp->params.fdx && !lp->fdx) return -1;
lp->media = _100Mb;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 00d02a0967d0..3e3e08698876 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -923,6 +923,7 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
data->phy_id = 1;
else
return -ENODEV;
+ /* Fall through */
case SIOCGMIIREG: /* Read MII PHY register. */
if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index b4853ec9de8d..8cf794edd3c3 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -1,7 +1,7 @@
config BE2NET
tristate "ServerEngines' 10Gbps NIC - BladeEngine"
depends on PCI
- ---help---
+ help
This driver implements the NIC functionality for ServerEngines'
10Gbps network adapter - BladeEngine.
@@ -10,6 +10,42 @@ config BE2NET_HWMON
depends on BE2NET && HWMON
depends on !(BE2NET=y && HWMON=m)
default y
- ---help---
+ help
Say Y here if you want to expose thermal sensor data on
be2net network adapter.
+
+config BE2NET_BE2
+ bool "Support for BE2 chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on BE2
+ chipsets. (e.g. OneConnect OCe10xxx)
+
+config BE2NET_BE3
+ bool "Support for BE3 chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on BE3
+ chipsets. (e.g. OneConnect OCe11xxx)
+
+config BE2NET_LANCER
+ bool "Support for Lancer chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on Lancer
+ chipsets. (e.g LightPulse LPe12xxx)
+
+config BE2NET_SKYHAWK
+ bool "Support for Skyhawk chipsets"
+ depends on BE2NET
+ default y
+ help
+ Say Y here if you want to use devices based on Skyhawk
+ chipsets. (e.g. OneConnect OCe14xxx)
+
+comment "WARNING: be2net is useless without any enabled chip"
+ depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
+ BE2NET_SKYHAWK=n && BE2NET
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 382891f81e09..58bcee8f0a58 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.4.0.0"
+#define DRV_VER "12.0.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -185,34 +185,13 @@ static inline void queue_tail_inc(struct be_queue_info *q)
struct be_eq_obj {
struct be_queue_info q;
- char desc[32];
-
- /* Adaptive interrupt coalescing (AIC) info */
- bool enable_aic;
- u32 min_eqd; /* in usecs */
- u32 max_eqd; /* in usecs */
- u32 eqd; /* configured val when aic is off */
- u32 cur_eqd; /* in usecs */
+ struct be_adapter *adapter;
+ struct napi_struct napi;
u8 idx; /* array index */
u8 msix_idx;
u16 spurious_intr;
- struct napi_struct napi;
- struct be_adapter *adapter;
cpumask_var_t affinity_mask;
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define BE_EQ_IDLE 0
-#define BE_EQ_NAPI 1 /* napi owns this EQ */
-#define BE_EQ_POLL 2 /* poll owns this EQ */
-#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL)
-#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */
-#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */
-#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
-#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD)
- unsigned int state;
- spinlock_t lock; /* lock to serialize napi and busy-poll */
-#endif /* CONFIG_NET_RX_BUSY_POLL */
} ____cacheline_aligned_in_smp;
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
@@ -238,7 +217,6 @@ struct be_tx_stats {
u64 tx_vxlan_offload_pkts;
u64 tx_reqs;
u64 tx_compl;
- ulong tx_jiffies;
u32 tx_stops;
u32 tx_drv_drops; /* pkts dropped by driver */
/* the error counters are described in be_ethtool.c */
@@ -261,9 +239,9 @@ struct be_tx_compl_info {
struct be_tx_obj {
u32 db_offset;
+ struct be_tx_compl_info txcp;
struct be_queue_info q;
struct be_queue_info cq;
- struct be_tx_compl_info txcp;
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats;
@@ -458,10 +436,10 @@ struct be_port_resources {
#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
struct rss_info {
- u64 rss_flags;
u8 rsstable[RSS_INDIR_TABLE_LEN];
u8 rss_queue[RSS_INDIR_TABLE_LEN];
u8 rss_hkey[RSS_HASH_KEY_LEN];
+ u64 rss_flags;
};
#define BE_INVALID_DIE_TEMP 0xFF
@@ -544,11 +522,13 @@ enum {
};
struct be_error_recovery {
- /* Lancer error recovery variables */
- u8 recovery_retries;
+ union {
+ u8 recovery_retries; /* used for Lancer */
+ u8 recovery_state; /* used for BEx and Skyhawk */
+ };
/* BEx/Skyhawk error recovery variables */
- u8 recovery_state;
+ bool recovery_supported;
u16 ue_to_reset_time; /* Time after UE, to soft reset
* the chip - PF0 only
*/
@@ -556,7 +536,6 @@ struct be_error_recovery {
* of SLIPORT_SEMAPHORE reg
*/
u16 last_err_code;
- bool recovery_supported;
unsigned long probe_time;
unsigned long last_recovery_time;
@@ -773,17 +752,33 @@ static inline u16 be_max_any_irqs(struct be_adapter *adapter)
/* Is BE in QNQ multi-channel mode */
#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
+#ifdef CONFIG_BE2NET_LANCER
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
+#else
+#define lancer_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_LANCER */
+#ifdef CONFIG_BE2NET_SKYHAWK
#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
adapter->pdev->device == OC_DEVICE_ID6)
+#else
+#define skyhawk_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_SKYHAWK */
+#ifdef CONFIG_BE2NET_BE3
#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
adapter->pdev->device == OC_DEVICE_ID2)
+#else
+#define BE3_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_BE3 */
+#ifdef CONFIG_BE2NET_BE2
#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
adapter->pdev->device == OC_DEVICE_ID1)
+#else
+#define BE2_chip(adapter) (0)
+#endif /* CONFIG_BE2NET_BE2 */
#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 7f7e206f95f8..3f6749fc889f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -575,6 +575,7 @@ static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
break;
}
}
+ /* fall through */
case PHY_TYPE_SFP_PLUS_10GB:
case PHY_TYPE_XFP_10GB:
case PHY_TYPE_SFP_1GB:
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 8f755009ff38..74d122616e76 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -47,14 +47,22 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
static struct workqueue_struct *be_err_recovery_workq;
static const struct pci_device_id be_dev_ids[] = {
+#ifdef CONFIG_BE2NET_BE2
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
- { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+#endif /* CONFIG_BE2NET_BE2 */
+#ifdef CONFIG_BE2NET_BE3
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+#endif /* CONFIG_BE2NET_BE3 */
+#ifdef CONFIG_BE2NET_LANCER
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+#endif /* CONFIG_BE2NET_LANCER */
+#ifdef CONFIG_BE2NET_SKYHAWK
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
+#endif /* CONFIG_BE2NET_SKYHAWK */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -1412,6 +1420,83 @@ drop:
return NETDEV_TX_OK;
}
+static void be_tx_timeout(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_tx_obj *txo;
+ struct sk_buff *skb;
+ struct tcphdr *tcphdr;
+ struct udphdr *udphdr;
+ u32 *entry;
+ int status;
+ int i, j;
+
+ for_all_tx_queues(adapter, txo, i) {
+ dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
+ i, txo->q.head, txo->q.tail,
+ atomic_read(&txo->q.used), txo->q.id);
+
+ entry = txo->q.dma_mem.va;
+ for (j = 0; j < TX_Q_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ entry = txo->cq.dma_mem.va;
+ dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
+ i, txo->cq.head, txo->cq.tail,
+ atomic_read(&txo->cq.used));
+ for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ for (j = 0; j < TX_Q_LEN; j++) {
+ if (txo->sent_skb_list[j]) {
+ skb = txo->sent_skb_list[j];
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
+ tcphdr = tcp_hdr(skb);
+ dev_info(dev, "TCP source port %d\n",
+ ntohs(tcphdr->source));
+ dev_info(dev, "TCP dest port %d\n",
+ ntohs(tcphdr->dest));
+ dev_info(dev, "TCP sequence num %d\n",
+ ntohs(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %d\n",
+ ntohs(tcphdr->ack_seq));
+ } else if (ip_hdr(skb)->protocol ==
+ IPPROTO_UDP) {
+ udphdr = udp_hdr(skb);
+ dev_info(dev, "UDP source port %d\n",
+ ntohs(udphdr->source));
+ dev_info(dev, "UDP dest port %d\n",
+ ntohs(udphdr->dest));
+ }
+ dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
+ j, skb, skb->len, skb->protocol);
+ }
+ }
+ }
+
+ if (lancer_chip(adapter)) {
+ dev_info(dev, "Initiating reset due to tx timeout\n");
+ dev_info(dev, "Resetting adapter\n");
+ status = lancer_physdev_ctrl(adapter,
+ PHYSDEV_CONTROL_FW_RESET_MASK);
+ if (status)
+ dev_err(dev, "Reset failed .. Reboot server\n");
+ }
+}
+
static inline bool be_in_all_promisc(struct be_adapter *adapter)
{
return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
@@ -3274,7 +3359,7 @@ void be_detect_error(struct be_adapter *adapter)
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
- dev_info(dev, "Firmware update in progress\n");
+ dev_info(dev, "Reset is in progress\n");
} else {
dev_err(dev, "Error detected in the card\n");
dev_err(dev, "ERR: sliport status 0x%x\n",
@@ -3403,9 +3488,11 @@ static int be_msix_register(struct be_adapter *adapter)
int status, i, vec;
for_all_evt_queues(adapter, eqo, i) {
- sprintf(eqo->desc, "%s-q%d", netdev->name, i);
+ char irq_name[IFNAMSIZ+4];
+
+ snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i);
vec = be_msix_vec_get(adapter, eqo);
- status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
+ status = request_irq(vec, be_msix, 0, irq_name, eqo);
if (status)
goto err_msix;
@@ -5216,6 +5303,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
.ndo_set_vf_spoofchk = be_set_vf_spoofchk,
+ .ndo_tx_timeout = be_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index ab02057ac730..65a22cd9aef2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1171,7 +1171,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
- buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.pass_time_stamp = true;
buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
params.specific_params.non_rx_params.err_fqid = errq->fqid;
@@ -1213,7 +1213,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
- buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.pass_time_stamp = true;
buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
rx_p = &params.specific_params.rx_params;
@@ -1610,14 +1610,28 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
{
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
struct device *dev = priv->net_dev->dev.parent;
+ struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd);
const struct qm_sg_entry *sgt;
struct sk_buff **skbh, *skb;
int nr_frags, i;
+ u64 ns;
skbh = (struct sk_buff **)phys_to_virt(addr);
skb = *skbh;
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
+ &ns)) {
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else {
+ dev_warn(dev, "fman_port_get_tstamp failed!\n");
+ }
+ }
+
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
nr_frags = skb_shinfo(skb)->nr_frags;
dma_unmap_single(dev, addr,
@@ -2087,6 +2101,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0))
goto skb_to_fd_failed;
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
return NETDEV_TX_OK;
@@ -2228,6 +2247,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
+ struct skb_shared_hwtstamps *shhwtstamps;
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
const struct qm_fd *fd = &dq->fd;
@@ -2241,6 +2261,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct sk_buff *skb;
int *count_ptr;
void *vaddr;
+ u64 ns;
fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd);
@@ -2305,6 +2326,16 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
if (!skb)
return qman_cb_dqrr_consume;
+ if (priv->rx_tstamp) {
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ else
+ dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
+ }
+
skb->protocol = eth_type_trans(skb, net_dev);
if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
@@ -2524,11 +2555,58 @@ static int dpaa_eth_stop(struct net_device *net_dev)
return err;
}
+static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->tx_tstamp = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->tx_tstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->rx_tstamp = false;
+ } else {
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->rx_tstamp = true;
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- if (!net_dev->phydev)
- return -EINVAL;
- return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ int ret = -EINVAL;
+
+ if (cmd == SIOCGMIIREG) {
+ if (net_dev->phydev)
+ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ }
+
+ if (cmd == SIOCSHWTSTAMP)
+ return dpaa_ts_ioctl(net_dev, rq, cmd);
+
+ return ret;
}
static const struct net_device_ops dpaa_ops = {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index bd9422082f83..af320f83c742 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -182,6 +182,9 @@ struct dpaa_priv {
struct dpaa_buffer_layout buf_layout[2];
u16 rx_headroom;
+
+ bool tx_tstamp; /* Tx timestamping enabled */
+ bool rx_tstamp; /* Rx timestamping enabled */
};
/* from dpaa_ethtool.c */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 2f933b6b2f4e..3184c8f7cdd0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -32,6 +32,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/string.h>
+#include <linux/of_platform.h>
+#include <linux/net_tstamp.h>
+#include <linux/fsl/ptp_qoriq.h>
#include "dpaa_eth.h"
#include "mac.h"
@@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static int dpaa_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *info)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct device_node *mac_node = dev->of_node;
+ struct device_node *fman_node = NULL, *ptp_node = NULL;
+ struct platform_device *ptp_dev = NULL;
+ struct qoriq_ptp *ptp = NULL;
+
+ info->phc_index = -1;
+
+ fman_node = of_get_parent(mac_node);
+ if (fman_node)
+ ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+
+ if (ptp_node)
+ ptp_dev = of_find_device_by_node(ptp_node);
+
+ if (ptp_dev)
+ ptp = platform_get_drvdata(ptp_dev);
+
+ if (ptp)
+ info->phc_index = ptp->phc_index;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
@@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.set_link_ksettings = dpaa_set_link_ksettings,
.get_rxnfc = dpaa_get_rxnfc,
.set_rxnfc = dpaa_set_rxnfc,
+ .get_ts_info = dpaa_get_ts_info,
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c729665107f5..2708297e7795 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -48,6 +48,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/platform_device.h>
#include <linux/mdio.h>
#include <linux/phy.h>
@@ -2949,13 +2950,12 @@ fec_enet_close(struct net_device *ndev)
*/
#define FEC_HASH_BITS 6 /* #bits in hash */
-#define CRC32_POLY 0xEDB88320
static void set_multicast_list(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct netdev_hw_addr *ha;
- unsigned int i, bit, data, crc, tmp;
+ unsigned int crc, tmp;
unsigned char hash;
unsigned int hash_high = 0, hash_low = 0;
@@ -2983,15 +2983,7 @@ static void set_multicast_list(struct net_device *ndev)
/* Add the addresses in hash register */
netdev_for_each_mc_addr(ha, ndev) {
/* calculate crc32 value of mac address */
- crc = 0xffffffff;
-
- for (i = 0; i < ndev->addr_len; i++) {
- data = ha->addr[i];
- for (bit = 0; bit < 8; bit++, data >>= 1) {
- crc = (crc >> 1) ^
- (((crc ^ data) & 1) ? CRC32_POLY : 0);
- }
- }
+ crc = ether_crc_le(ndev->addr_len, ha->addr);
/* only upper 6 bits (FEC_HASH_BITS) are used
* which point to specific bit in the hash registers
@@ -3136,6 +3128,7 @@ static int fec_enet_init(struct net_device *ndev)
unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
sizeof(struct bufdesc);
unsigned dsize_log2 = __fls(dsize);
+ int ret;
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
@@ -3146,6 +3139,13 @@ static int fec_enet_init(struct net_device *ndev)
fep->tx_align = 0x3;
#endif
+ /* Check mask of the streaming and coherent API */
+ ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
+ if (ret < 0) {
+ dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
+ return ret;
+ }
+
fec_enet_alloc_queue(ndev);
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 36c2d7d6ee1b..7e892b1cbd3d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -99,7 +99,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
unsigned long flags;
u32 val, tempval;
- int inc;
struct timespec64 ts;
u64 ns;
val = 0;
@@ -114,7 +113,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
fep->pps_channel = DEFAULT_PPS_CHANNEL;
fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
- inc = fep->ptp_inc;
spin_lock_irqsave(&fep->tmreg_lock, flags);
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 9530405030a7..c415ac67cb7b 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2801,7 +2801,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
of_node_put(muram_node);
of_node_put(fm_node);
- err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
+ "fman", fman);
if (err < 0) {
dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
__func__, irq, err);
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index bfa02e0014ae..935c317fa696 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -41,6 +41,7 @@
/* Frame queue Context Override */
#define FM_FD_CMD_FCO 0x80000000
#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
+#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */
#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
/* TX-Port: Unsupported Format */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 57b1e2b47c0a..1ca543ac8f2c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -123,11 +123,13 @@
#define DTSEC_ECNTRL_R100M 0x00000008
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
+#define TCTRL_TTSE 0x00000040
#define TCTRL_GTS 0x00000020
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_PAL_SHIFT 16
#define RCTRL_GHTX 0x00000400
+#define RCTRL_RTSE 0x00000040
#define RCTRL_GRS 0x00000020
#define RCTRL_MPROM 0x00000008
#define RCTRL_RSF 0x00000004
@@ -1136,6 +1138,31 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 rctrl, tctrl;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ rctrl = ioread32be(&regs->rctrl);
+ tctrl = ioread32be(&regs->tctrl);
+
+ if (enable) {
+ rctrl |= RCTRL_RTSE;
+ tctrl |= TCTRL_TTSE;
+ } else {
+ rctrl &= ~RCTRL_RTSE;
+ tctrl &= ~TCTRL_TTSE;
+ }
+
+ iowrite32be(rctrl, &regs->rctrl);
+ iowrite32be(tctrl, &regs->tctrl);
+
+ return 0;
+}
+
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 1a689adf5a22..5149d96ec2c1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -56,5 +56,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 446a97b792e3..bc6eb30aa20f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -964,6 +964,11 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
+int memac_set_tstamp(struct fman_mac *memac, bool enable)
+{
+ return 0; /* Always enabled. */
+}
+
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index b5a50338ed9a..b2c671ec0ce7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -58,5 +58,6 @@ int memac_set_exception(struct fman_mac *memac,
int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_set_allmulti(struct fman_mac *memac, bool enable);
+int memac_set_tstamp(struct fman_mac *memac, bool enable);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ecbf6187e13a..ee82ee1384eb 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1739,6 +1739,18 @@ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
}
EXPORT_SYMBOL(fman_port_get_hash_result_offset);
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp)
+{
+ if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE)
+ return -EINVAL;
+
+ *tstamp = be64_to_cpu(*(__be64 *)(data +
+ port->buffer_offsets.time_stamp_offset));
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_get_tstamp);
+
static int fman_port_probe(struct platform_device *of_dev)
{
struct fman_port *port;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index e86ca6a34e4e..9dbb69f40121 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -153,6 +153,8 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port);
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset);
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
+
struct fman_port *fman_port_bind(struct device *dev);
#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 284735d4ebe9..40705938eecc 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -44,6 +44,7 @@
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
/* Command and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_EN_TIMESTAMP 0x00100000
#define CMD_CFG_NO_LEN_CHK 0x00020000
#define CMD_CFG_PAUSE_IGNORE 0x00000100
#define CMF_CFG_CRC_FWD 0x00000040
@@ -588,6 +589,26 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+
+ if (enable)
+ tmp |= CMD_CFG_EN_TIMESTAMP;
+ else
+ tmp &= ~CMD_CFG_EN_TIMESTAMP;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index cbbd3b422a98..3bfd1062b386 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -52,5 +52,6 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 7b5b95f52c09..a847b9c3b31a 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -471,6 +471,7 @@ static void setup_dtsec(struct mac_device *mac_dev)
mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
mac_dev->set_exception = dtsec_set_exception;
mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -490,6 +491,7 @@ static void setup_tgec(struct mac_device *mac_dev)
mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
mac_dev->set_exception = tgec_set_exception;
mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -509,6 +511,7 @@ static void setup_memac(struct mac_device *mac_dev)
mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
mac_dev->set_exception = memac_set_exception;
mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index b520cec120ee..824a81a9f350 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -68,6 +68,7 @@ struct mac_device {
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
+ int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
struct mac_device *mac_dev);
int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h
index 7832db71dcb9..1dbee5d898b3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fec.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fec.h
@@ -2,9 +2,6 @@
#ifndef FS_ENET_FEC_H
#define FS_ENET_FEC_H
-/* CRC polynomium used by the FEC for the multicast group filtering */
-#define FEC_CRC_POLY 0x04C11DB7
-
#define FEC_MAX_MULTICAST_ADDRS 64
/* Interrupt events/masks.
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 1fc27c97e3b2..99fe2c210d0f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
+#include <linux/crc32.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -176,21 +177,10 @@ static void set_multicast_start(struct net_device *dev)
static void set_multicast_one(struct net_device *dev, const u8 *mac)
{
struct fs_enet_private *fep = netdev_priv(dev);
- int temp, hash_index, i, j;
+ int temp, hash_index;
u32 crc, csrVal;
- u8 byte, msb;
-
- crc = 0xffffffff;
- for (i = 0; i < 6; i++) {
- byte = mac[i];
- for (j = 0; j < 8; j++) {
- msb = crc >> 31;
- crc <<= 1;
- if (msb ^ (byte & 0x1))
- crc ^= FEC_CRC_POLY;
- byte >>= 1;
- }
- }
+
+ crc = ether_crc(6, mac);
temp = (crc & 0x3f) >> 1;
hash_index = ((temp & 0x01) << 4) |
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8cb98cae0a6f..395a5266ea30 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -740,7 +740,6 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
u64 class)
{
- unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
unsigned int *local_rqfpr;
unsigned int *local_rqfcr;
@@ -819,7 +818,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
}
priv->cur_filer_idx = l - 1;
- last_rule_idx = l;
/* hash rules */
ethflow_to_filer_rules(priv, ethflow);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 42fca3208c0b..22a817da861e 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3096,6 +3096,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
ugeth_vdbg("%s: IN", __func__);
+ netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&ugeth->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -3240,6 +3241,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
{
/* Start from the next BD that should be filled */
struct ucc_geth_private *ugeth = netdev_priv(dev);
+ unsigned int bytes_sent = 0;
+ int howmany = 0;
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
@@ -3257,7 +3260,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
if (!skb)
break;
-
+ howmany++;
+ bytes_sent += skb->len;
dev->stats.tx_packets++;
dev_consume_skb_any(skb);
@@ -3279,6 +3283,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->confBd[txQ] = bd;
+ netdev_completed_queue(dev, howmany, bytes_sent);
return 0;
}
@@ -3479,6 +3484,7 @@ static int ucc_geth_open(struct net_device *dev)
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
+ netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
@@ -3509,6 +3515,7 @@ static int ucc_geth_close(struct net_device *dev)
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index fb1a7251f45d..25152715396b 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -85,10 +85,12 @@ config HNS3
drivers(like ODP)to register with HNAE devices and their associated
operations.
+if HNS3
+
config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
+ default m
depends on PCI_MSI
- depends on HNS3
---help---
This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of
@@ -97,16 +99,15 @@ config HNS3_HCLGE
config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support"
default n
- depends on HNS3 && HNS3_HCLGE && DCB
+ depends on HNS3_HCLGE && DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
If unsure, say N.
config HNS3_HCLGEVF
- tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
- depends on PCI_MSI
- depends on HNS3
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
depends on HNS3_HCLGE
---help---
This selects the HNS3 VF drivers network acceleration engine & its hardware
@@ -115,11 +116,13 @@ config HNS3_HCLGEVF
config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support"
+ default m
depends on 64BIT && PCI
- depends on HNS3
---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
devices and their associated operations.
+endif #HNS3
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 340e28211135..14374a856d30 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -904,7 +904,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
hip04_config_fifo(priv);
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
hip04_update_mac_address(ndev);
ret = hip04_alloc_ring(ndev, d);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 25a6c8722eca..c5727003af8c 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1006,12 +1006,11 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
for (i = 0; i < QUEUE_NUMS; i++) {
size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
- virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
- GFP_KERNEL);
+ virt_addr = dma_zalloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
if (virt_addr == NULL)
goto error_free_pool;
- memset(virt_addr, 0, size);
priv->pool[i].size = size;
priv->pool[i].desc = virt_addr;
priv->pool[i].phys_addr = phys_addr;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index bd68379d2bea..e6aad30e7e69 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -70,8 +70,8 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
return container_of(q, struct ring_pair_cb, q);
}
-struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
- u32 port_id)
+static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+ u32 port_id)
{
int vfnum_per_port;
int qnum_per_vf;
@@ -329,7 +329,7 @@ static int hns_ae_start(struct hnae_handle *handle)
return 0;
}
-void hns_ae_stop(struct hnae_handle *handle)
+static void hns_ae_stop(struct hnae_handle *handle)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
@@ -357,7 +357,7 @@ static void hns_ae_reset(struct hnae_handle *handle)
}
}
-void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
{
u32 flag;
@@ -577,8 +577,8 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
*rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
}
-void hns_ae_update_stats(struct hnae_handle *handle,
- struct net_device_stats *net_stats)
+static void hns_ae_update_stats(struct hnae_handle *handle,
+ struct net_device_stats *net_stats)
{
int port;
int idx;
@@ -660,7 +660,7 @@ void hns_ae_update_stats(struct hnae_handle *handle,
net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
}
-void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
{
int idx;
struct hns_mac_cb *mac_cb;
@@ -692,8 +692,8 @@ void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
}
-void hns_ae_get_strings(struct hnae_handle *handle,
- u32 stringset, u8 *data)
+static void hns_ae_get_strings(struct hnae_handle *handle,
+ u32 stringset, u8 *data)
{
int port;
int idx;
@@ -725,7 +725,7 @@ void hns_ae_get_strings(struct hnae_handle *handle,
hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
}
-int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
{
u32 sset_count = 0;
struct hns_mac_cb *mac_cb;
@@ -771,7 +771,7 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
return ret;
}
-void hns_ae_update_led_status(struct hnae_handle *handle)
+static void hns_ae_update_led_status(struct hnae_handle *handle)
{
struct hns_mac_cb *mac_cb;
@@ -783,8 +783,8 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
hns_set_led_opt(mac_cb);
}
-int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
- enum hnae_led_state status)
+static int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+ enum hnae_led_state status)
{
struct hns_mac_cb *mac_cb;
@@ -795,7 +795,7 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
return hns_cpld_led_set_id(mac_cb, status);
}
-void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+static void hns_ae_get_regs(struct hnae_handle *handle, void *data)
{
u32 *p = data;
int i;
@@ -820,7 +820,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data)
hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
}
-int hns_ae_get_regs_len(struct hnae_handle *handle)
+static int hns_ae_get_regs_len(struct hnae_handle *handle)
{
u32 total_num;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 74bd260ca02a..5488c6e89f21 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -339,7 +339,7 @@ static void hns_gmac_init(void *mac_drv)
GMAC_TX_WATER_LINE_SHIFT, 8);
}
-void hns_gmac_update_stats(void *mac_drv)
+static void hns_gmac_update_stats(void *mac_drv)
{
struct mac_hw_stats *hw_stats = NULL;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 9dcc5765f11f..1c2326bd76e2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -458,11 +458,6 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
- MAC_MAX_MTU : MAC_MAX_MTU_V2;
-
- if (mac_cb->mac_type == HNAE_PORT_DEBUG)
- max_frm = MAC_MAX_MTU_DBG;
if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)
return -EINVAL;
@@ -708,7 +703,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
{
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
struct platform_device *pdev;
struct mii_bus *mii_bus;
int rc;
@@ -722,13 +717,15 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
mac_cb->fw_port, "mdio-node", 0, &args);
if (rc)
return rc;
+ if (!is_acpi_device_node(args.fwnode))
+ return -EINVAL;
addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
if (addr < 0)
return addr;
/* dev address in adev */
- pdev = hns_dsaf_find_platform_device(acpi_fwnode_handle(args.adev));
+ pdev = hns_dsaf_find_platform_device(args.fwnode);
if (!pdev) {
dev_err(mac_cb->dev, "mac%d mdio pdev is NULL\n",
mac_cb->mac_id);
@@ -931,8 +928,9 @@ static int hns_mac_get_mode(phy_interface_t phy_if)
}
}
-u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
- struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+static u8 __iomem *
+hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+ struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
{
u8 __iomem *base = dsaf_dev->io_base;
int mac_id = mac_cb->mac_id;
@@ -950,7 +948,8 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
* @mac_cb: mac control block
* return 0 - success , negative --fail
*/
-int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
+static int
+hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
{
int ret;
u32 mac_mode_idx;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 0ce07f6eb1e6..ca50c2553a9c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -28,7 +28,7 @@
#include "hns_dsaf_rcb.h"
#include "hns_dsaf_misc.h"
-const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+const static char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
[DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
[DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
@@ -42,7 +42,7 @@ static const struct acpi_device_id hns_dsaf_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
-int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
{
int ret, i;
u32 desc_num;
@@ -959,7 +959,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
spin_unlock_bh(&dsaf_dev->tcam_lock);
}
-void hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
+static void
+hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
{
addr[0] = mac_key->high.bits.mac_0;
addr[1] = mac_key->high.bits.mac_1;
@@ -1682,7 +1683,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_tcam_mcast_cfg mac_data;
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
- struct dsaf_drv_tbl_tcam_key tmp_mac_key;
struct dsaf_tbl_tcam_data tcam_data;
u8 mc_addr[ETH_ALEN];
int mskid;
@@ -1739,10 +1739,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
/* if exist, add in */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
&mac_data);
-
- tmp_mac_key.high.val =
- le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
}
/* config hardware entry */
@@ -1852,7 +1848,7 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_tcam_data tcam_data;
int mskid;
const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
- struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key;
+ struct dsaf_drv_tbl_tcam_key mask_key;
struct dsaf_tbl_tcam_data *pmask_key = NULL;
u8 mc_addr[ETH_ALEN];
@@ -1915,9 +1911,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* read entry */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
- tmp_mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
/*del the port*/
if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
mskid = mac_entry->port_num;
@@ -2084,8 +2077,9 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
* @dsaf_id: dsa fabric id
* @xge_ge_work_mode
*/
-void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
- enum dsaf_port_rate_mode rate_mode)
+static void
+hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate_mode)
{
u32 port_work_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index acf29633ec79..16294cd3c954 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -340,7 +340,8 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
* bit18-19 for com/dfx
* @enable: false - request reset , true - drop reset
*/
-void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+static void
+hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
u32 reg_addr;
@@ -362,7 +363,7 @@ void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
* bit18-19 for com/dfx
* @enable: false - request reset , true - drop reset
*/
-void
+static void
hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
@@ -370,7 +371,7 @@ hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
msk, dereset);
}
-void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
+static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
{
if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
@@ -384,7 +385,7 @@ void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
}
}
-void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
{
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
HNS_ROCE_RESET_FUNC, 0, dereset);
@@ -568,7 +569,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
return phy_if;
}
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+static int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
u32 val = 0;
int ret;
@@ -586,7 +587,7 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
return 0;
}
-int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
union acpi_object *obj;
union acpi_object obj_args, argv4;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 93e71e27401b..d160d8c9e45b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -73,7 +73,7 @@ hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
* comm_index: common index
* retuen 0 - success , negative --fail
*/
-int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
{
struct ppe_common_cb *ppe_common;
int ppe_num;
@@ -104,7 +104,8 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
return 0;
}
-void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+static void
+hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
{
dsaf_dev->ppe_common[comm_index] = NULL;
}
@@ -203,9 +204,9 @@ static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
- mdelay(100);
+ msleep(100);
dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
- mdelay(100);
+ msleep(100);
if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
switch (dsaf_mode) {
@@ -337,7 +338,7 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
}
}
-void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+static void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
{
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index e2e28532e4dc..9d76e2e54f9d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -705,7 +705,7 @@ void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
}
}
-int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
+static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
{
switch (dsaf_dev->dsaf_mode) {
case DSAF_MODE_ENABLE_FIX:
@@ -741,7 +741,7 @@ int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
}
}
-void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 51e7e9f5af49..ba4316910dea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -215,10 +215,10 @@ static void hns_xgmac_init(void *mac_drv)
u32 port = drv->mac_id;
dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
- mdelay(100);
+ msleep(100);
dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
- mdelay(100);
+ msleep(100);
hns_xgmac_lf_rf_control_init(drv);
hns_xgmac_exc_irq_en(drv, 0);
@@ -311,7 +311,7 @@ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
}
-void hns_xgmac_update_stats(void *mac_drv)
+static void hns_xgmac_update_stats(void *mac_drv)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ef9ef703d13a..9f2b552aee33 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1300,7 +1300,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-void hns_nic_update_stats(struct net_device *netdev)
+static void hns_nic_update_stats(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1582,7 +1582,7 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
/* use only for netconsole to poll with the device without interrupt */
#ifdef CONFIG_NET_POLL_CONTROLLER
-void hns_nic_poll_controller(struct net_device *ndev)
+static void hns_nic_poll_controller(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
unsigned long flags;
@@ -1935,7 +1935,7 @@ static int hns_nic_uc_unsync(struct net_device *netdev,
*
* return void
*/
-void hns_set_multicast_list(struct net_device *ndev)
+static void hns_set_multicast_list(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
@@ -1957,7 +1957,7 @@ void hns_set_multicast_list(struct net_device *ndev)
}
}
-void hns_nic_set_rx_mode(struct net_device *ndev)
+static void hns_nic_set_rx_mode(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
@@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -2032,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
- return fallback(ndev, skb);
+ return fallback(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {
@@ -2377,7 +2378,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
}
priv->fwnode = &ae_node->fwnode;
} else if (is_acpi_node(dev->fwnode)) {
- struct acpi_reference_args args;
+ struct fwnode_reference_args args;
if (acpi_dev_found(hns_enet_acpi_match[0].id))
priv->enet_ver = AE_VERSION_1;
@@ -2393,7 +2394,11 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
dev_err(dev, "not find ae-handle\n");
goto out_read_prop_fail;
}
- priv->fwnode = acpi_fwnode_handle(args.adev);
+ if (!is_acpi_device_node(args.fwnode)) {
+ ret = -EINVAL;
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = args.fwnode;
} else {
dev_err(dev, "cannot read cfg data from OF or acpi\n");
return -ENXIO;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 2e14a3ae1d8b..08f3c4743f74 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -307,6 +307,7 @@ static int __lb_setup(struct net_device *ndev,
break;
case MAC_LOOP_PHY_NONE:
ret = hns_nic_config_phy_loopback(phy_dev, 0x0);
+ /* fall through */
case MAC_LOOP_NONE:
if (!ret && h->dev->ops->set_loopback) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
@@ -658,8 +659,8 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
* @dev: net device
* @param: ethtool parameter
*/
-void hns_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *param)
+static void hns_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *param)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
@@ -808,7 +809,8 @@ static int hns_set_coalesce(struct net_device *net_dev,
* @dev: net device
* @ch: channel info.
*/
-void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+static void
+hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
@@ -825,8 +827,8 @@ void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
* @stats: statistics info.
* @data: statistics data.
*/
-void hns_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+static void hns_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
{
u64 *p = data;
struct hns_nic_priv *priv = netdev_priv(netdev);
@@ -883,7 +885,7 @@ void hns_get_ethtool_stats(struct net_device *netdev,
* @stats: string set ID.
* @data: objects data.
*/
-void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -973,7 +975,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
*
* Return string set count.
*/
-int hns_get_sset_count(struct net_device *netdev, int stringset)
+static int hns_get_sset_count(struct net_device *netdev, int stringset)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1007,7 +1009,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
*
* Return 0 on success, negative on failure.
*/
-int hns_phy_led_set(struct net_device *netdev, int value)
+static int hns_phy_led_set(struct net_device *netdev, int value)
{
int retval;
struct phy_device *phy_dev = netdev->phydev;
@@ -1029,7 +1031,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
*
* Return 0 on success, negative on failure.
*/
-int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+static int
+hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1103,8 +1106,8 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
* @cmd: ethtool cmd
* @date: register data
*/
-void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
- void *data)
+static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+ void *data)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 9d79dad2c6aa..fff5be8078ac 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -1,14 +1,7 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/spinlock.h>
#include "hnae3.h"
@@ -41,13 +34,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
{
switch (client->type) {
case HNAE3_CLIENT_KNIC:
- hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_UNIC:
- hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_ROCE:
- hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break;
default:
break;
@@ -61,16 +54,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
switch (client->type) {
case HNAE3_CLIENT_KNIC:
- inited = hnae_get_bit(ae_dev->flag,
+ inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_UNIC:
- inited = hnae_get_bit(ae_dev->flag,
+ inited = hnae3_get_bit(ae_dev->flag,
HNAE3_UNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_ROCE:
- inited = hnae_get_bit(ae_dev->flag,
- HNAE3_ROCE_CLIENT_INITED_B);
+ inited = hnae3_get_bit(ae_dev->flag,
+ HNAE3_ROCE_CLIENT_INITED_B);
break;
default:
break;
@@ -86,7 +79,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
- hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
@@ -95,7 +88,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
ret = ae_dev->ops->init_client_instance(client, ae_dev);
if (ret) {
dev_err(&ae_dev->pdev->dev,
- "fail to instantiate client\n");
+ "fail to instantiate client, ret = %d\n", ret);
return ret;
}
@@ -135,7 +128,8 @@ int hnae3_register_client(struct hnae3_client *client)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed for port\n");
+ "match and instantiation failed for port, ret = %d\n",
+ ret);
}
exit:
@@ -185,11 +179,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
ae_dev->ops = ae_algo->ops;
ret = ae_algo->ops->init_ae_dev(ae_dev);
if (ret) {
- dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n");
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
continue;
}
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
/* check the client list for the match with this ae_dev type and
* initialize the figure out client instance
@@ -198,7 +193,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed\n");
+ "match and instantiation failed, ret = %d\n",
+ ret);
}
}
@@ -218,7 +214,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
- if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -232,7 +228,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_algo->node);
@@ -271,11 +267,12 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
/* ae_dev init should set flag */
ret = ae_dev->ops->init_ae_dev(ae_dev);
if (ret) {
- dev_err(&ae_dev->pdev->dev, "init ae_dev error\n");
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
goto out_err;
}
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
break;
}
@@ -286,7 +283,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed\n");
+ "match and instantiation failed, ret = %d\n",
+ ret);
}
out_err:
@@ -306,7 +304,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
- if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -317,7 +315,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_dev->node);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 8acb1d116a02..67befff0bfc5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNAE3_H
#define __HNAE3_H
@@ -62,10 +56,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
- hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
- hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -167,7 +161,6 @@ struct hnae3_client_ops {
#define HNAE3_CLIENT_NAME_LENGTH 16
struct hnae3_client {
char name[HNAE3_CLIENT_NAME_LENGTH];
- u16 version;
unsigned long state;
enum hnae3_client_type type;
const struct hnae3_client_ops *ops;
@@ -436,7 +429,6 @@ struct hnae3_dcb_ops {
struct hnae3_ae_algo {
const struct hnae3_ae_ops *ops;
struct list_head node;
- char name[HNAE3_CLASS_NAME_SIZE];
const struct pci_device_id *pdev_id_table;
};
@@ -509,17 +501,17 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
};
-#define hnae_set_field(origin, mask, shift, val) \
+#define hnae3_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
(origin) |= ((val) << (shift)) & (mask); \
} while (0)
-#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
-#define hnae_set_bit(origin, shift, val) \
- hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
-#define hnae_get_bit(origin, shift) \
- hnae_get_field((origin), (0x1 << (shift)), (shift))
+#define hnae3_set_bit(origin, shift, val) \
+ hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
+#define hnae3_get_bit(origin, shift) \
+ hnae3_get_field((origin), (0x1 << (shift)), (shift))
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index eb82700da7d0..ea5f8a84070d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hnae3.h"
#include "hns3_enet.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 25a73bb2e642..3554dca7a680 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
@@ -56,15 +50,16 @@ static const struct pci_device_id hns3_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
-static irqreturn_t hns3_irq_handle(int irq, void *dev)
+static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
- struct hns3_enet_tqp_vector *tqp_vector = dev;
+ struct hns3_enet_tqp_vector *tqp_vector = vector;
napi_schedule(&tqp_vector->napi);
@@ -239,7 +234,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
- int ret;
+ int i, ret;
+
+ if (kinfo->num_tc <= 1) {
+ netdev_reset_tc(netdev);
+ } else {
+ ret = netdev_set_num_tc(netdev, kinfo->num_tc);
+ if (ret) {
+ netdev_err(netdev,
+ "netdev_set_num_tc fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!kinfo->tc_info[i].enable)
+ continue;
+
+ netdev_set_tc_queue(netdev,
+ kinfo->tc_info[i].tc,
+ kinfo->tc_info[i].tqp_count,
+ kinfo->tc_info[i].tqp_offset);
+ }
+ }
ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) {
@@ -312,7 +328,9 @@ out_start_err:
static int hns3_nic_net_open(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo;
+ int i, ret;
netif_carrier_off(netdev);
@@ -327,6 +345,12 @@ static int hns3_nic_net_open(struct net_device *netdev)
return ret;
}
+ kinfo = &h->kinfo;
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ netdev_set_prio_tc_map(netdev, i,
+ kinfo->prio_tc[i]);
+ }
+
priv->ae_handle->last_reset_time = jiffies;
return 0;
}
@@ -493,8 +517,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* find the txbd field values */
*paylen = skb->len - hdr_len;
- hnae_set_bit(*type_cs_vlan_tso,
- HNS3_TXD_TSO_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso,
+ HNS3_TXD_TSO_B, 1);
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
@@ -586,21 +610,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute L2 header size for normal packet, defined in 2 Bytes */
l2_len = l3.hdr - skb->data;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
/* tunnel packet*/
if (skb->encapsulation) {
/* compute OL2 header size, defined in 2 Bytes */
ol2_len = l2_len;
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, ol2_len >> 1);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, ol2_len >> 1);
/* compute OL3 header size, defined in 4 Bytes */
ol3_len = l4.hdr - l3.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, ol3_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, ol3_len >> 2);
/* MAC in UDP, MAC in GRE (0x6558)*/
if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
@@ -609,16 +633,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute OL4 header size, defined in 4 Bytes. */
ol4_len = l2_hdr - l4.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, ol4_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+ ol4_len >> 2);
/* switch IP header ptr from outer to inner header */
l3.hdr = skb_inner_network_header(skb);
/* compute inner l2 header size, defined in 2 Bytes. */
l2_len = l3.hdr - l2_hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
} else {
/* skb packet types not supported by hardware,
* txbd len fild doesn't be filled.
@@ -634,22 +659,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
l3_len = l4.hdr - l3.hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, l3_len >> 2);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, l3_len >> 2);
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, l4.tcp->doff);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S, l4.tcp->doff);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
break;
case IPPROTO_UDP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
break;
default:
/* skb packet types not supported by hardware,
@@ -703,32 +730,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
/* define outer network header type.*/
if (skb->protocol == htons(ETH_P_IP)) {
if (skb_is_gso(skb))
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
else
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
}
/* define tunnel type(OL4).*/
switch (l4_proto) {
case IPPROTO_UDP:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
break;
case IPPROTO_GRE:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -749,43 +778,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
}
if (l3.v4->version == 4) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
if (skb_is_gso(skb))
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
-
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
} else if (l3.v6->version == 6) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
}
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_TCP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
break;
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_UDP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_SCTP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -807,11 +836,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
/* Config bd buffer end */
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
- HNS3_TXD_BDTYPE_S, 0);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
+ HNS3_TXD_BDTYPE_S, 0);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -844,10 +873,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag;
} else {
- hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else if (skb->protocol == htons(ETH_P_8021Q)) {
@@ -880,7 +909,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 out_vtag = 0;
u32 paylen = 0;
u16 mss = 0;
- __be16 protocol;
u8 ol4_proto;
u8 il4_proto;
int ret;
@@ -909,7 +937,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
- protocol = skb->protocol;
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret)
@@ -1135,7 +1162,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
wmb(); /* Commit all data before submit */
- hnae_queue_xmit(ring->tqp, buf_num);
+ hnae3_queue_xmit(ring->tqp, buf_num);
return NETDEV_TX_OK;
@@ -1304,7 +1331,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
bool if_running;
- unsigned int i;
int ret;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
@@ -1328,24 +1354,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
goto out;
- if (tc <= 1) {
- netdev_reset_tc(netdev);
- } else {
- ret = netdev_set_num_tc(netdev, tc);
- if (ret)
- goto out;
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (!kinfo->tc_info[i].enable)
- continue;
-
- netdev_set_tc_queue(netdev,
- kinfo->tc_info[i].tc,
- kinfo->tc_info[i].tqp_count,
- kinfo->tc_info[i].tqp_offset);
- }
- }
-
ret = hns3_nic_set_real_num_queue(netdev);
out:
@@ -1665,6 +1673,9 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct pci_dev *pdev = h->pdev;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1698,12 +1709,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (pdev->revision != 0x20)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae_page_order(ring);
+ unsigned int order = hnae3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
@@ -1714,7 +1728,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae_page_size(ring);
+ cb->length = hnae3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
@@ -1780,33 +1794,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
/* free desc along with its attached buffer */
static void hns3_free_desc(struct hns3_enet_ring *ring)
{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
hns3_free_buffers(ring);
- dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- DMA_BIDIRECTIONAL);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{
int size = ring->desc_num * sizeof(ring->desc[0]);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
@@ -1887,7 +1895,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
- /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
hns3_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean);
@@ -1917,7 +1925,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
if (is_ring_empty(ring) || head == ring->next_to_clean)
return true; /* no data to poll */
- if (!is_valid_clean_head(ring, head)) {
+ if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
@@ -2016,15 +2024,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
bool twobufs;
twobufs = ((PAGE_SIZE < 8192) &&
- hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
+ hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
- truesize = hnae_buf_size(ring);
+ truesize = hnae3_buf_size(ring);
if (!twobufs)
- last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+ last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2076,13 +2084,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
/* check if hardware has done checksum */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
netdev_err(netdev, "L3/L4 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
@@ -2091,23 +2099,25 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
}
- l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
- HNS3_RXD_L3ID_S);
- l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
- HNS3_RXD_L4ID_S);
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
- ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
+ ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+ HNS3_RXD_OL4ID_S);
switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP:
case HNS3_OL4_TYPE_NVGRE:
skb->csum_level = 1;
+ /* fall through */
case HNS3_OL4_TYPE_NO_TUN:
/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
- if (l3_type == HNS3_L3_TYPE_IPV4 ||
- (l3_type == HNS3_L3_TYPE_IPV6 &&
- (l4_type == HNS3_L4_TYPE_UDP ||
- l4_type == HNS3_L4_TYPE_TCP ||
- l4_type == HNS3_L4_TYPE_SCTP)))
+ if ((l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) &&
+ (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP))
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
@@ -2135,8 +2145,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
- switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
- HNS3_RXD_STRP_TAGP_S)) {
+ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
break;
@@ -2174,7 +2184,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Check valid BD */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
return -EFAULT;
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
@@ -2229,7 +2239,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
- while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2257,7 +2267,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
vlan_tag);
}
- if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]);
u64_stats_update_begin(&ring->syncp);
@@ -2269,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
if (unlikely((!desc->rx.pkt_len) ||
- hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
+ hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
netdev_err(netdev, "truncated pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.err_pkt_len++;
@@ -2279,7 +2289,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return -EFAULT;
}
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
netdev_err(netdev, "L2 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l2_err++;
@@ -2532,10 +2542,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
tx_ring = tqp_vector->tx_group.ring;
if (tx_ring) {
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2549,12 +2559,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(chain->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- HNAE3_RING_GL_TX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2564,10 +2574,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
if (!tx_ring && rx_ring) {
cur_chain->next = NULL;
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2579,10 +2589,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
cur_chain = chain;
@@ -2745,10 +2755,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (ret)
return ret;
- ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
- if (ret)
- return ret;
-
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
@@ -2809,7 +2815,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->io_base = q->io_base;
}
- hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
+ hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
ring->tqp = q;
ring->desc = NULL;
@@ -2969,13 +2975,33 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
- hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
- hns3_buf_size2type(ring->buf_size));
hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
ring->desc_num / 8 - 1);
}
}
+static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ int i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
+ int j;
+
+ if (!tc_info->enable)
+ continue;
+
+ for (j = 0; j < tc_info->tqp_count; j++) {
+ struct hnae3_queue *q;
+
+ q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
+ hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
+ tc_info->tc);
+ }
+ }
+}
+
int hns3_init_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -3081,7 +3107,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->reset_level = HNAE3_NONE_RESET;
priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
@@ -3102,6 +3127,11 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ if (handle->flags & HNAE3_SUPPORT_VF)
+ handle->reset_level = HNAE3_VF_RESET;
+ else
+ handle->reset_level = HNAE3_FUNC_RESET;
+
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3208,7 +3238,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
struct net_device *ndev = kinfo->netdev;
bool if_running;
int ret;
- u8 i;
if (tc > HNAE3_MAX_TC)
return -EINVAL;
@@ -3218,10 +3247,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if_running = netif_running(ndev);
- ret = netdev_set_num_tc(ndev, tc);
- if (ret)
- return ret;
-
if (if_running) {
(void)hns3_nic_net_stop(ndev);
msleep(100);
@@ -3232,27 +3257,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (ret)
goto err_out;
- if (tc <= 1) {
- netdev_reset_tc(ndev);
- goto out;
- }
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
-
- if (tc_info->enable)
- netdev_set_tc_queue(ndev,
- tc_info->tc,
- tc_info->tqp_count,
- tc_info->tqp_offset);
- }
-
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- netdev_set_prio_tc_map(ndev, i,
- kinfo->prio_tc[i]);
- }
-
-out:
ret = hns3_nic_set_real_num_queue(ndev);
err_out:
@@ -3409,6 +3413,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
rx_ring->next_to_use = 0;
}
+ hns3_init_tx_ring_tc(priv);
+
return 0;
}
@@ -3418,7 +3424,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
struct net_device *ndev = kinfo->netdev;
if (!netif_running(ndev))
- return -EIO;
+ return 0;
return hns3_nic_net_stop(ndev);
}
@@ -3458,10 +3464,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- ret = hns3_get_ring_config(priv);
- if (ret)
- return ret;
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
return ret;
@@ -3493,10 +3495,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
- hns3_put_ring_config(priv);
-
- priv->ring_data = NULL;
-
hns3_uninit_mac_addr(netdev);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 3b083d5ae9ce..a02a96aee2a2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HNS3_ENET_H
#define __HNS3_ENET_H
@@ -43,7 +37,7 @@ enum hns3_nic_state {
#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040
#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044
#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048
-#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C
+#define HNS3_RING_TX_RING_TC_REG 0x00050
#define HNS3_RING_TX_RING_TAIL_REG 0x00058
#define HNS3_RING_TX_RING_HEAD_REG 0x0005C
#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
@@ -499,7 +493,6 @@ struct hns3_enet_tqp_vector {
u16 num_tqps; /* total number of tqps in TQP vector */
- cpumask_t affinity_mask;
char name[HNAE3_INT_NAME_LEN];
/* when 0 should adjust interrupt coalesce parameter */
@@ -591,7 +584,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
-#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
+#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
@@ -601,9 +594,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-#define hnae_buf_size(_ring) ((_ring)->buf_size)
-#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
-#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+#define hnae3_buf_size(_ring) ((_ring)->buf_size)
+#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
+#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 40c0425b4023..f70ee6910ee2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
#include <linux/string.h>
@@ -59,7 +53,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TPYE_NUM 1
+#define HNS3_SELF_TEST_TYPE_NUM 2
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -84,6 +78,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
return -EOPNOTSUPP;
switch (loop) {
+ case HNAE3_MAC_INTER_LOOP_SERDES:
case HNAE3_MAC_INTER_LOOP_MAC:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
@@ -201,7 +196,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
rx_group = &ring->tqp_vector->rx_group;
pre_rx_pkt = rx_group->total_packets;
+ preempt_disable();
hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
+ preempt_enable();
rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt);
rx_group->total_packets = pre_rx_pkt;
@@ -291,7 +288,7 @@ static void hns3_self_test(struct net_device *ndev,
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int st_param[HNS3_SELF_TEST_TPYE_NUM][2];
+ int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
bool if_running = netif_running(ndev);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
bool dis_vlan_filter;
@@ -307,6 +304,10 @@ static void hns3_self_test(struct net_device *ndev,
st_param[HNAE3_MAC_INTER_LOOP_MAC][1] =
h->flags & HNAE3_SUPPORT_MAC_LOOPBACK;
+ st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES;
+ st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] =
+ h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK;
+
if (if_running)
dev_close(ndev);
@@ -320,7 +321,7 @@ static void hns3_self_test(struct net_device *ndev,
set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
- for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) {
+ for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index c36d64710fa6..ac13cb2b168e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -18,8 +12,7 @@
#include "hclge_main.h"
#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
-#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
- DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclge_ring_space(struct hclge_cmq_ring *ring)
@@ -46,31 +39,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+ size, &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
{
- dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- DMA_BIDIRECTIONAL);
+ int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
@@ -80,7 +66,7 @@ static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
(ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
int ret;
- ring->flag = ring_type;
+ ring->ring_type = ring_type;
ring->dev = hdev;
ret = hclge_alloc_cmd_desc(ring);
@@ -111,8 +97,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
if (is_read)
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
}
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
@@ -121,26 +105,26 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
struct hclge_dev *hdev = ring->dev;
struct hclge_hw *hw = &hdev->hw;
- if (ring->flag == HCLGE_TYPE_CSQ) {
+ if (ring->ring_type == HCLGE_TYPE_CSQ) {
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
- (u32)dma);
+ lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
- (u32)((dma >> 31) >> 1));
+ upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
HCLGE_NIC_CMQ_ENABLE);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
- (u32)dma);
+ lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
- (u32)((dma >> 31) >> 1));
+ upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
HCLGE_NIC_CMQ_ENABLE);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
}
@@ -152,33 +136,27 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw)
static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{
- struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
- u16 ntc = csq->next_to_clean;
- struct hclge_desc *desc;
- int clean = 0;
u32 head;
+ int clean;
- desc = &csq->desc[ntc];
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- return 0;
- }
-
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ dev_warn(&hdev->pdev->dev,
+ "IMP firmware watchdog reset soon expected!\n");
+ return -EIO;
}
- csq->next_to_clean = ntc;
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
return clean;
}
@@ -216,7 +194,7 @@ static bool hclge_is_special_opcode(u16 opcode)
**/
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
- struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
@@ -227,7 +205,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclge_ring_space(&hw->cmq.csq)) {
+ if (num > hclge_ring_space(&hw->cmq.csq) ||
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -256,33 +235,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
*/
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
- if (hclge_cmd_csq_done(hw))
+ if (hclge_cmd_csq_done(hw)) {
+ complete = true;
break;
+ }
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
}
- if (hclge_cmd_csq_done(hw)) {
- complete = true;
+ if (!complete) {
+ retval = -EAGAIN;
+ } else {
handle = 0;
while (handle < num) {
/* Get the result of hardware write back */
desc_to_use = &hw->cmq.csq.desc[ntc];
desc[handle] = *desc_to_use;
- pr_debug("Get cmd desc:\n");
if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[handle].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
- if ((enum hclge_cmd_return_status)desc_ret ==
- HCLGE_CMD_EXEC_SUCCESS)
+ if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else
retval = -EIO;
- hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
+ hw->cmq.last_status = desc_ret;
ntc++;
handle++;
if (ntc == hw->cmq.csq.desc_num)
@@ -290,15 +270,13 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
}
}
- if (!complete)
- retval = -EAGAIN;
-
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
- if (handle != num) {
+ if (handle < 0)
+ retval = handle;
+ else if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
- }
spin_unlock_bh(&hw->cmq.csq.lock);
@@ -369,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
spin_lock_init(&hdev->hw.cmq.crq.lock);
hclge_cmd_init_regs(&hdev->hw);
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d9aaa76c76eb..821d4c2f84bd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_CMD_H
#define __HCLGE_CMD_H
@@ -27,17 +21,10 @@ struct hclge_desc {
__le32 data[6];
};
-struct hclge_desc_cb {
- dma_addr_t dma;
- void *va;
- u32 length;
-};
-
struct hclge_cmq_ring {
dma_addr_t desc_dma_addr;
struct hclge_desc *desc;
- struct hclge_desc_cb *desc_cb;
- struct hclge_dev *dev;
+ struct hclge_dev *dev;
u32 head;
u32 tail;
@@ -45,7 +32,7 @@ struct hclge_cmq_ring {
u16 desc_num;
int next_to_use;
int next_to_clean;
- u8 flag;
+ u8 ring_type; /* cmq ring type */
spinlock_t lock; /* Command queue lock */
};
@@ -71,26 +58,19 @@ struct hclge_misc_vector {
struct hclge_cmq {
struct hclge_cmq_ring csq;
struct hclge_cmq_ring crq;
- u16 tx_timeout; /* Tx timeout */
+ u16 tx_timeout;
enum hclge_cmd_status last_status;
};
-#define HCLGE_CMD_FLAG_IN_VALID_SHIFT 0
-#define HCLGE_CMD_FLAG_OUT_VALID_SHIFT 1
-#define HCLGE_CMD_FLAG_NEXT_SHIFT 2
-#define HCLGE_CMD_FLAG_WR_OR_RD_SHIFT 3
-#define HCLGE_CMD_FLAG_NO_INTR_SHIFT 4
-#define HCLGE_CMD_FLAG_ERR_INTR_SHIFT 5
-
-#define HCLGE_CMD_FLAG_IN BIT(HCLGE_CMD_FLAG_IN_VALID_SHIFT)
-#define HCLGE_CMD_FLAG_OUT BIT(HCLGE_CMD_FLAG_OUT_VALID_SHIFT)
-#define HCLGE_CMD_FLAG_NEXT BIT(HCLGE_CMD_FLAG_NEXT_SHIFT)
-#define HCLGE_CMD_FLAG_WR BIT(HCLGE_CMD_FLAG_WR_OR_RD_SHIFT)
-#define HCLGE_CMD_FLAG_NO_INTR BIT(HCLGE_CMD_FLAG_NO_INTR_SHIFT)
-#define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT)
+#define HCLGE_CMD_FLAG_IN BIT(0)
+#define HCLGE_CMD_FLAG_OUT BIT(1)
+#define HCLGE_CMD_FLAG_NEXT BIT(2)
+#define HCLGE_CMD_FLAG_WR BIT(3)
+#define HCLGE_CMD_FLAG_NO_INTR BIT(4)
+#define HCLGE_CMD_FLAG_ERR_INTR BIT(5)
enum hclge_opcode_type {
- /* Generic command */
+ /* Generic commands */
HCLGE_OPC_QUERY_FW_VER = 0x0001,
HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
HCLGE_OPC_GBL_RST_STATUS = 0x0021,
@@ -106,18 +86,17 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
- /* Device management command */
- /* MAC commond */
+ /* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
HCLGE_OPC_QUERY_AN_RESULT = 0x0306,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
- /* MACSEC command */
+ HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
- /* PFC/Pause CMD*/
+ /* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
HCLGE_OPC_CFG_MAC_PARA = 0x0703,
@@ -148,7 +127,7 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
- /* Packet buffer allocate command */
+ /* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
@@ -156,11 +135,10 @@ enum hclge_opcode_type {
HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
- /* PTP command */
/* TQP management command */
HCLGE_OPC_SET_TQP_MAP = 0x0A01,
- /* TQP command */
+ /* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
@@ -172,10 +150,10 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
- /* TSO cmd */
+ /* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
- /* RSS cmd */
+ /* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGE_OPC_RSS_TC_MODE = 0x0D08,
@@ -184,15 +162,15 @@ enum hclge_opcode_type {
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
- /* Vlan offload command */
+ /* Vlan offload commands */
HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
- /* Interrupts cmd */
+ /* Interrupts commands */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
- /* MAC command */
+ /* MAC commands */
HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
@@ -201,13 +179,13 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
- /* Multicast linear table cmd */
+ /* Multicast linear table commands */
HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021,
HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022,
HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023,
- /* VLAN command */
+ /* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
@@ -215,7 +193,7 @@ enum hclge_opcode_type {
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
- /* QCN command */
+ /* QCN commands */
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03,
@@ -225,7 +203,7 @@ enum hclge_opcode_type {
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
- /* Mailbox cmd */
+ /* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
/* Led command */
@@ -381,8 +359,10 @@ struct hclge_pf_res_cmd {
__le16 buf_size;
__le16 msixcap_localid_ba_nic;
__le16 msixcap_localid_ba_rocee;
+#define HCLGE_MSIX_OFT_ROCEE_S 0
+#define HCLGE_MSIX_OFT_ROCEE_M GENMASK(15, 0)
#define HCLGE_PF_VEC_NUM_S 0
-#define HCLGE_PF_VEC_NUM_M (0xff << HCLGE_PF_VEC_NUM_S)
+#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
__le16 pf_intr_vector_number;
__le16 pf_own_fun_number;
__le32 rsv[3];
@@ -471,8 +451,8 @@ struct hclge_rss_tc_mode_cmd {
u8 rsv[8];
};
-#define HCLGE_LINK_STS_B 0
-#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B)
+#define HCLGE_LINK_STATUS_UP_B 0
+#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
struct hclge_link_status_cmd {
u8 status;
u8 rsv[23];
@@ -571,7 +551,8 @@ struct hclge_config_auto_neg_cmd {
struct hclge_config_max_frm_size_cmd {
__le16 max_frm_size;
- u8 rsv[22];
+ u8 min_frm_size;
+ u8 rsv[21];
};
enum hclge_mac_vlan_tbl_opcode {
@@ -581,13 +562,13 @@ enum hclge_mac_vlan_tbl_opcode {
HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
};
-#define HCLGE_MAC_VLAN_BIT0_EN_B 0x0
-#define HCLGE_MAC_VLAN_BIT1_EN_B 0x1
-#define HCLGE_MAC_EPORT_SW_EN_B 0xc
-#define HCLGE_MAC_EPORT_TYPE_B 0xb
-#define HCLGE_MAC_EPORT_VFID_S 0x3
+#define HCLGE_MAC_VLAN_BIT0_EN_B 0
+#define HCLGE_MAC_VLAN_BIT1_EN_B 1
+#define HCLGE_MAC_EPORT_SW_EN_B 12
+#define HCLGE_MAC_EPORT_TYPE_B 11
+#define HCLGE_MAC_EPORT_VFID_S 3
#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3)
-#define HCLGE_MAC_EPORT_PFID_S 0x0
+#define HCLGE_MAC_EPORT_PFID_S 0
#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0)
struct hclge_mac_vlan_tbl_entry_cmd {
u8 flags;
@@ -603,7 +584,7 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
-#define HCLGE_VLAN_MASK_EN_B 0x0
+#define HCLGE_VLAN_MASK_EN_B 0
struct hclge_mac_vlan_mask_entry_cmd {
u8 rsv0[2];
u8 vlan_mask;
@@ -634,23 +615,23 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
+#define HCLGE_CFG_MTA_MAC_SEL_S 0
#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
-#define HCLGE_CFG_MTA_MAC_EN_B 0x7
+#define HCLGE_CFG_MTA_MAC_EN_B 7
struct hclge_mta_filter_mode_cmd {
u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
u8 rsv[23];
};
-#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0
+#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0
struct hclge_cfg_func_mta_filter_cmd {
u8 accept; /* Only used lowest 1 bit */
u8 function_id;
u8 rsv[22];
};
-#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0
-#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0
+#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0
+#define HCLGE_CFG_MTA_ITEM_IDX_S 0
#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
struct hclge_cfg_func_mta_item_cmd {
__le16 item_idx; /* Only used lowest 12 bit */
@@ -795,6 +776,17 @@ struct hclge_reset_cmd {
u8 fun_reset_vfid;
u8 rsv[22];
};
+
+#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
+#define HCLGE_CMD_SERDES_DONE_B BIT(0)
+#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1)
+struct hclge_serdes_lb_cmd {
+ u8 mask;
+ u8 enable;
+ u8 result;
+ u8 rsv[21];
+};
+
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 955f0e3d5c95..f08ebb7caaaf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hclge_main.h"
#include "hclge_tm.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
index 7d808ee96694..278f21e02736 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_DCB_H__
#define __HCLGE_DCB_H__
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d318d35e598f..8577dfc799ad 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/acpi.h>
#include <linux/device.h>
@@ -793,9 +787,10 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
count += 1;
handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
- } else {
- count = -EOPNOTSUPP;
}
+
+ count++;
+ handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
ARRAY_SIZE(g_all_32bit_stats_string) +
@@ -938,18 +933,22 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
- hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->num_msi = hdev->num_roce_msi +
+ hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
}
return 0;
@@ -1038,38 +1037,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */
- cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_VMDQ_M,
- HCLGE_CFG_VMDQ_S);
- cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
- cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_TQP_DESC_N_M,
- HCLGE_CFG_TQP_DESC_N_S);
-
- cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_PHY_ADDR_M,
- HCLGE_CFG_PHY_ADDR_S);
- cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_MEDIA_TP_M,
- HCLGE_CFG_MEDIA_TP_S);
- cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_RX_BUF_LEN_M,
- HCLGE_CFG_RX_BUF_LEN_S);
+ cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_VMDQ_M,
+ HCLGE_CFG_VMDQ_S);
+ cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
+ cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TQP_DESC_N_M,
+ HCLGE_CFG_TQP_DESC_N_S);
+
+ cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_PHY_ADDR_M,
+ HCLGE_CFG_PHY_ADDR_S);
+ cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_MEDIA_TP_M,
+ HCLGE_CFG_MEDIA_TP_S);
+ cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_RX_BUF_LEN_M,
+ HCLGE_CFG_RX_BUF_LEN_S);
/* get mac_address */
mac_addr_tmp = __le32_to_cpu(req->param[2]);
- mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_MAC_ADDR_H_M,
- HCLGE_CFG_MAC_ADDR_H_S);
+ mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_MAC_ADDR_H_M,
+ HCLGE_CFG_MAC_ADDR_H_S);
mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
- cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_DEFAULT_SPEED_M,
- HCLGE_CFG_DEFAULT_SPEED_S);
- cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_RSS_SIZE_M,
- HCLGE_CFG_RSS_SIZE_S);
+ cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_DEFAULT_SPEED_M,
+ HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1077,9 +1076,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[1].data;
cfg->numa_node_map = __le32_to_cpu(req->param[0]);
- cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_SPEED_ABILITY_M,
- HCLGE_CFG_SPEED_ABILITY_S);
+ cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_M,
+ HCLGE_CFG_SPEED_ABILITY_S);
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1098,22 +1097,22 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
req = (struct hclge_cfg_param_cmd *)desc[i].data;
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
true);
- hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
- HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
+ hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
+ HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
/* Len should be united by 4 bytes when send to hardware */
- hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
- HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
+ hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
+ HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
req->offset = cpu_to_le32(offset);
}
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "get config failed %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
return ret;
}
hclge_parse_cfg(hcfg, desc);
+
return 0;
}
@@ -1130,13 +1129,10 @@ static int hclge_get_cap(struct hclge_dev *hdev)
/* get pf resource */
ret = hclge_query_pf_resource(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "query pf resource error %d.\n", ret);
- return ret;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
- return 0;
+ return ret;
}
static int hclge_configure(struct hclge_dev *hdev)
@@ -1189,7 +1185,7 @@ static int hclge_configure(struct hclge_dev *hdev)
/* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae_set_bit(hdev->hw_tc_map, i, 1);
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
@@ -1208,13 +1204,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
req = (struct hclge_cfg_tso_status_cmd *)desc.data;
tso_mss = 0;
- hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_min);
+ hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+ HCLGE_TSO_MSS_MIN_S, tso_mss_min);
req->tso_mss_min = cpu_to_le16(tso_mss);
tso_mss = 0;
- hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_max);
+ hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+ HCLGE_TSO_MSS_MIN_S, tso_mss_max);
req->tso_mss_max = cpu_to_le16(tso_mss);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1265,44 +1261,43 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
req->tqp_vid = cpu_to_le16(tqp_vid);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
- ret);
- return ret;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
- return 0;
+ return ret;
}
-static int hclge_assign_tqp(struct hclge_vport *vport,
- struct hnae3_queue **tqp, u16 num_tqps)
+static int hclge_assign_tqp(struct hclge_vport *vport)
{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int i, alloced;
for (i = 0, alloced = 0; i < hdev->num_tqps &&
- alloced < num_tqps; i++) {
+ alloced < kinfo->num_tqps; i++) {
if (!hdev->htqp[i].alloced) {
hdev->htqp[i].q.handle = &vport->nic;
hdev->htqp[i].q.tqp_index = alloced;
- tqp[alloced] = &hdev->htqp[i].q;
+ hdev->htqp[i].q.desc_num = kinfo->num_desc;
+ kinfo->tqp[alloced] = &hdev->htqp[i].q;
hdev->htqp[i].alloced = true;
alloced++;
}
}
- vport->alloc_tqps = num_tqps;
+ vport->alloc_tqps = kinfo->num_tqps;
return 0;
}
-static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
+static int hclge_knic_setup(struct hclge_vport *vport,
+ u16 num_tqps, u16 num_desc)
{
struct hnae3_handle *nic = &vport->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
struct hclge_dev *hdev = vport->back;
int i, ret;
- kinfo->num_desc = hdev->num_desc;
+ kinfo->num_desc = num_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
kinfo->rss_size
@@ -1329,13 +1324,11 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
if (!kinfo->tqp)
return -ENOMEM;
- ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
- if (ret) {
+ ret = hclge_assign_tqp(vport);
+ if (ret)
dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
- return -EINVAL;
- }
- return 0;
+ return ret;
}
static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
@@ -1397,7 +1390,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->numa_node_mask = hdev->numa_node_mask;
if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
- ret = hclge_knic_setup(vport, num_tqps);
+ ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
if (ret) {
dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
ret);
@@ -1487,13 +1480,11 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
@@ -1501,13 +1492,10 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
{
int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "tx buffer alloc failed %d\n", ret);
- return ret;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
- return 0;
+ return ret;
}
static int hclge_get_tc_num(struct hclge_dev *hdev)
@@ -1825,17 +1813,13 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
(1 << HCLGE_TC0_PRI_BUF_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"rx private buffer alloc cmd failed %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
-#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
-
static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
@@ -1863,25 +1847,21 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
req->tc_wl[j].high =
cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->tc_wl[j].low =
cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
/* Send 2 descriptor at one time */
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"rx private waterline config cmd failed %d\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_common_thrd_config(struct hclge_dev *hdev,
@@ -1911,24 +1891,20 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
req->com_thrd[j].high =
cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_thrd[j].low =
cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
/* Send 2 descriptors at one time */
ret = hclge_cmd_send(&hdev->hw, desc, 2);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"common threshold config cmd failed %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_common_wl_config(struct hclge_dev *hdev,
@@ -1943,23 +1919,17 @@ static int hclge_common_wl_config(struct hclge_dev *hdev,
req = (struct hclge_rx_com_wl *)desc.data;
req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
- req->com_wl.high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
- HCLGE_RX_PRIV_EN_B);
+ req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
- req->com_wl.low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
- HCLGE_RX_PRIV_EN_B);
+ req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"common waterline config cmd failed %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
int hclge_buffer_alloc(struct hclge_dev *hdev)
@@ -2074,7 +2044,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
- HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->roce_base_msix_offset;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
@@ -2118,48 +2088,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
- hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
switch (speed) {
case HCLGE_MAC_SPEED_10M:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 6);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 6);
break;
case HCLGE_MAC_SPEED_100M:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 7);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 7);
break;
case HCLGE_MAC_SPEED_1G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 0);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 0);
break;
case HCLGE_MAC_SPEED_10G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 1);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 1);
break;
case HCLGE_MAC_SPEED_25G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 2);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 2);
break;
case HCLGE_MAC_SPEED_40G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 3);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 3);
break;
case HCLGE_MAC_SPEED_50G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 4);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 4);
break;
case HCLGE_MAC_SPEED_100G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 5);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 5);
break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL;
}
- hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
- 1);
+ hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
+ 1);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2201,18 +2171,16 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return ret;
}
- *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
- speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
- HCLGE_QUERY_SPEED_S);
+ *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
+ speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
+ HCLGE_QUERY_SPEED_S);
ret = hclge_parse_speed(speed_tmp, speed);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"could not parse speed(=%d), %d\n", speed_tmp, ret);
- return -EIO;
- }
- return 0;
+ return ret;
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2225,17 +2193,15 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
req = (struct hclge_config_auto_neg_cmd *)desc.data;
- hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
@@ -2269,8 +2235,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
- hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
- mask_vlan ? 1 : 0);
+ hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+ mask_vlan ? 1 : 0);
ether_addr_copy(req->mac_mask, mac_mask);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2341,13 +2307,11 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mtu = ETH_DATA_LEN;
ret = hclge_set_mtu(handle, mtu);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"set mtu failed ret=%d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
@@ -2386,7 +2350,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
}
req = (struct hclge_link_status_cmd *)desc.data;
- link_status = req->status & HCLGE_LINK_STATUS;
+ link_status = req->status & HCLGE_LINK_STATUS_UP_M;
return !!link_status;
}
@@ -2505,7 +2469,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
u32 cmdq_src_reg;
/* fetch the events from their corresponding regs */
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
/* Assumption: If by any chance reset and mailbox events are reported
@@ -2517,12 +2481,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
@@ -2614,6 +2580,12 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
{
+ if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
hdev->num_msi_left += 1;
hdev->num_msi_used -= 1;
@@ -2705,7 +2677,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
}
val = hclge_read_dev(&hdev->hw, reg);
- while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
+ while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
val = hclge_read_dev(&hdev->hw, reg);
cnt++;
@@ -2727,8 +2699,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
- hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
- hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
+ hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
req->fun_reset_vfid = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2747,13 +2718,13 @@ static void hclge_do_reset(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
+ hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Global Reset requested\n");
break;
case HNAE3_CORE_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
+ hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Core Reset requested\n");
break;
@@ -2810,8 +2781,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
break;
default:
- dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
- hdev->reset_type);
break;
}
@@ -2824,16 +2793,17 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
- /* perform reset of the stack & ae device for a client */
+ struct hnae3_handle *handle;
+ /* perform reset of the stack & ae device for a client */
+ handle = &hdev->vport[0].nic;
+ rtnl_lock();
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (!hclge_reset_wait(hdev)) {
- rtnl_lock();
hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
hclge_reset_ae_dev(hdev->ae_dev);
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- rtnl_unlock();
hclge_clear_reset_cause(hdev);
} else {
@@ -2843,6 +2813,8 @@ static void hclge_reset(struct hclge_dev *hdev)
}
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ handle->last_reset_time = jiffies;
+ rtnl_unlock();
}
static void hclge_reset_event(struct hnae3_handle *handle)
@@ -2855,8 +2827,13 @@ static void hclge_reset_event(struct hnae3_handle *handle)
* know this if last reset request did not occur very recently (watchdog
* timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
* In case of new request we reset the "reset level" to PF reset.
+ * And if it is a repeat reset request of the most recent one then we
+ * want to make sure we throttle the reset request. Therefore, we will
+ * not allow it again before 3*HZ times.
*/
- if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
+ if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+ return;
+ else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
handle->reset_level = HNAE3_FUNC_RESET;
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
@@ -2868,8 +2845,6 @@ static void hclge_reset_event(struct hnae3_handle *handle)
if (handle->reset_level < HNAE3_GLOBAL_RESET)
handle->reset_level++;
-
- handle->last_reset_time = jiffies;
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -3110,23 +3085,21 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
u16 mode = 0;
- hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
- hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
- HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
- HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
+ hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
+ hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
+ HCLGE_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
+ HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
req->rss_tc_mode[i] = cpu_to_le16(mode);
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Configure rss tc mode fail, status = %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
@@ -3149,13 +3122,10 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Configure rss input fail, status = %d\n", ret);
- return ret;
- }
-
- return 0;
+ return ret;
}
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
@@ -3491,16 +3461,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
i = 0;
for (node = ring_chain; node; node = node->next) {
tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
- hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
- HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
- HCLGE_TQP_ID_S, node->tqp_index);
- hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- hnae_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S));
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -3603,12 +3573,11 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Set promisc mode fail, status is %d.\n", ret);
- return ret;
- }
- return 0;
+
+ return ret;
}
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -3648,20 +3617,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3689,7 +3658,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
@@ -3704,6 +3673,55 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
+{
+#define HCLGE_SERDES_RETRY_MS 10
+#define HCLGE_SERDES_RETRY_NUM 100
+ struct hclge_serdes_lb_cmd *req;
+ struct hclge_desc desc;
+ int ret, i = 0;
+
+ req = (struct hclge_serdes_lb_cmd *)&desc.data[0];
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+
+ if (en) {
+ req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ } else {
+ req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback set fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ do {
+ msleep(HCLGE_SERDES_RETRY_MS);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback get, ret = %d\n", ret);
+ return ret;
+ }
+ } while (++i < HCLGE_SERDES_RETRY_NUM &&
+ !(req->result & HCLGE_CMD_SERDES_DONE_B));
+
+ if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
+ dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
+ return -EBUSY;
+ } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
+ dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en)
{
@@ -3715,6 +3733,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_MAC_INTER_LOOP_MAC:
ret = hclge_set_mac_loopback(hdev, en);
break;
+ case HNAE3_MAC_INTER_LOOP_SERDES:
+ ret = hclge_set_serdes_loopback(hdev, en);
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -3763,7 +3784,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i, ret;
+ int i;
for (i = 0; i < vport->alloc_tqps; i++)
hclge_tqp_enable(hdev, i, 0, true);
@@ -3777,9 +3798,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
- ret = hclge_mac_start_phy(hdev);
- if (ret)
- return ret;
+ hclge_mac_start_phy(hdev);
return 0;
}
@@ -3911,7 +3930,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
#define HCLGE_FUNC_NUMBER_PER_DESC 6
int i, j;
- for (i = 0; i < HCLGE_DESC_NUMBER; i++)
+ for (i = 1; i < HCLGE_DESC_NUMBER; i++)
for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
if (desc[i].data[j])
return false;
@@ -3953,20 +3972,18 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
req = (struct hclge_mta_filter_mode_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
- hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
- enable);
- hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
+ hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
+ enable);
+ hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
+ HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Config mat filter mode failed for cmd_send, ret =%d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
@@ -3980,19 +3997,17 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
- hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
- enable);
+ hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
+ enable);
req->function_id = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
"Config func_id enable failed for cmd_send, ret =%d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int hclge_set_mta_table_item(struct hclge_vport *vport,
@@ -4007,10 +4022,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
- hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
+ hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
- hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
+ hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
+ HCLGE_CFG_MTA_ITEM_IDX_S, idx);
req->item_idx = cpu_to_le16(item_idx);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -4257,17 +4272,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-
- hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
- hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
- hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
- HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
- hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
- HCLGE_MAC_EPORT_PFID_S, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
req.egress_port = cpu_to_le16(egress_port);
@@ -4318,8 +4326,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
@@ -4331,7 +4339,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_mc_addr_common(vport, addr);
+ return hclge_add_mc_addr_common(vport, addr);
}
int hclge_add_mc_addr_common(struct hclge_vport *vport,
@@ -4351,10 +4359,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4418,10 +4426,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4604,13 +4612,11 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
req->vlan_fe = filter_en;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
ret);
- return ret;
- }
- return 0;
+ return ret;
}
#define HCLGE_FILTER_TYPE_VF 0
@@ -4802,19 +4808,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
- vcfg->accept_tag1 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
- vcfg->accept_untag1 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
- vcfg->accept_tag2 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
- vcfg->accept_untag2 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
- vcfg->insert_tag1_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
- vcfg->insert_tag2_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] =
@@ -4840,14 +4846,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
- vcfg->strip_tag1_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
- vcfg->strip_tag2_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
- vcfg->vlan1_vlan_prionly ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
- vcfg->vlan2_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] =
@@ -4999,16 +5005,15 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = cpu_to_le16(max_frm_size);
+ req->min_frm_size = HCLGE_MAC_MIN_FRAME;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
- return ret;
- }
-
- hdev->mps = max_frm_size;
+ else
+ hdev->mps = max_frm_size;
- return 0;
+ return ret;
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -5043,7 +5048,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
- hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -5073,7 +5078,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret;
}
- return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+ return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
@@ -5380,12 +5385,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
- mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
- HCLGE_PHY_MDIX_CTRL_S);
+ mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
+ HCLGE_PHY_MDIX_CTRL_S);
retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
- mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
- is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
+ mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
+ is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
@@ -5412,6 +5417,16 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
*tp_mdix = ETH_TP_MDI;
}
+static int hclge_init_instance_hw(struct hclge_dev *hdev)
+{
+ return hclge_mac_connect_phy(hdev);
+}
+
+static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
+{
+ hclge_mac_disconnect_phy(hdev);
+}
+
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -5431,6 +5446,13 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (ret)
return ret;
+ ret = hclge_init_instance_hw(hdev);
+ if (ret) {
+ client->ops->uninit_instance(&vport->nic,
+ 0);
+ return ret;
+ }
+
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
@@ -5493,6 +5515,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (client->type == HNAE3_CLIENT_ROCE)
return;
if (client->ops->uninit_instance) {
+ hclge_uninit_instance_hw(hdev);
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
@@ -5531,7 +5554,6 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->back = hdev;
hw->io_base = pcim_iomap(pdev, 2, 0);
if (!hw->io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
@@ -5562,6 +5584,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pci_disable_device(pdev);
}
+static void hclge_state_init(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_state_uninit(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ if (hdev->service_timer.function)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
+ cancel_work_sync(&hdev->service_task);
+ if (hdev->rst_service_task.func)
+ cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -5577,8 +5623,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
- hdev->reset_request = 0;
- hdev->reset_pending = 0;
ae_dev->priv = hdev;
ret = hclge_pci_init(hdev);
@@ -5702,12 +5746,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
- set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
- set_bit(HCLGE_STATE_DOWN, &hdev->state);
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+ hclge_state_init(hdev);
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -5812,16 +5851,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
- set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
+ hclge_state_uninit(hdev);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
@@ -5905,9 +5935,10 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
u32 *rss_indir;
int ret, i;
+ /* Free old tqps, and reallocate with new tqp number when nic setup */
hclge_release_tqp(vport);
- ret = hclge_knic_setup(vport, new_tqps_num);
+ ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
if (ret) {
dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
return ret;
@@ -6149,8 +6180,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
req = (struct hclge_set_led_state_cmd *)desc.data;
- hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
- HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+ hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -6280,7 +6311,6 @@ static const struct hnae3_ae_ops hclge_ops = {
static struct hnae3_ae_algo ae_algo = {
.ops = &hclge_ops,
- .name = HCLGE_NAME,
.pdev_id_table = ae_algo_pci_tbl,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7488534528cd..1528fb3fa6be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MAIN_H
#define __HCLGE_MAIN_H
@@ -22,8 +16,6 @@
#define HCLGE_INVALID_VPORT 0xffff
-#define HCLGE_ROCE_VECTOR_OFFSET 96
-
#define HCLGE_PF_CFG_BLOCK_SIZE 32
#define HCLGE_PF_CFG_DESC_NUM \
(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
@@ -40,7 +32,7 @@
#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGE_RSS_HASH_ALGO_MASK 0xf
+#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
#define HCLGE_RSS_CFG_TBL_NUM \
(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
@@ -77,11 +69,11 @@
/* Copper Specific Status Register */
#define HCLGE_PHY_CSS_REG 17
-#define HCLGE_PHY_MDIX_CTRL_S (5)
+#define HCLGE_PHY_MDIX_CTRL_S 5
#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
-#define HCLGE_PHY_MDIX_STATUS_B (6)
-#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
+#define HCLGE_PHY_MDIX_STATUS_B 6
+#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
@@ -89,9 +81,10 @@
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
+#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
-#define HCLGE_GLOBAL_RESET_BIT 0x0
-#define HCLGE_CORE_RESET_BIT 0x1
+#define HCLGE_GLOBAL_RESET_BIT 0
+#define HCLGE_CORE_RESET_BIT 1
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -128,6 +121,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_STATISTICS_UPDATING,
+ HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_MAX
};
@@ -138,12 +132,6 @@ enum hclge_evt_cause {
};
#define HCLGE_MPF_ENBALE 1
-struct hclge_caps {
- u16 num_tqp;
- u16 num_buffer_cell;
- u32 flag;
- u16 vmdq;
-};
enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
@@ -189,8 +177,6 @@ struct hclge_hw {
struct hclge_mac mac;
int num_vec;
struct hclge_cmq cmq;
- struct hclge_caps caps;
- void *back;
};
/* TQP stats */
@@ -202,7 +188,10 @@ struct hlcge_tqp_stats {
};
struct hclge_tqp {
- struct device *dev; /* Device for DMA mapping */
+ /* copy of device pointer from pci_dev,
+ * used when perform DMA mapping
+ */
+ struct device *dev;
struct hnae3_queue q;
struct hlcge_tqp_stats tqp_stats;
u16 index; /* Global index in a NIC controller */
@@ -492,13 +481,11 @@ struct hclge_dev {
u16 num_tqps; /* Num task queue pairs of this PF */
u16 num_req_vfs; /* Num VFs requested for this PF */
- /* Base task tqp physical id of this PF */
- u16 base_tqp_pid;
+ u16 base_tqp_pid; /* Base task tqp physical id of this PF */
u16 alloc_rss_size; /* Allocated RSS task queue */
u16 rss_size_max; /* HW defined max RSS task queue */
- /* Num of guaranteed filters for this PF */
- u16 fdir_pf_filter_count;
+ u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
u32 numa_node_mask;
u16 rx_buf_len;
@@ -520,6 +507,7 @@ struct hclge_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 roce_base_msix_offset;
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
@@ -560,7 +548,7 @@ struct hclge_dev {
u32 mps; /* Max packet size */
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
- bool enable_mta; /* Mutilcast filter enable */
+ bool enable_mta; /* Multicast filter enable */
struct hclge_vlan_type_cfg vlan_type_cfg;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 7541cb9b71ce..f34851c91eb3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -104,13 +104,15 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
}
}
-/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
+ * from mailbox message
* msg[0]: opcode
* msg[1]: <not relevant to this function>
* msg[2]: ring_num
* msg[3]: first ring type (TX|RX)
* msg[4]: first tqp id
- * msg[5] ~ msg[14]: other ring type and tqp id
+ * msg[5]: first int_gl idx
+ * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
*/
static int hclge_get_ring_chain_from_mbx(
struct hclge_mbx_vf_to_pf_cmd *req,
@@ -128,12 +130,12 @@ static int hclge_get_ring_chain_from_mbx(
HCLGE_MBX_RING_NODE_VARIABLE_NUM))
return -ENOMEM;
- hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
- hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- req->msg[5]);
+ hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg[5]);
cur_chain = ring_chain;
@@ -142,19 +144,19 @@ static int hclge_get_ring_chain_from_mbx(
if (!new_chain)
goto err;
- hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
+ hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
- hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
+ hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
cur_chain->next = new_chain;
cur_chain = new_chain;
@@ -460,7 +462,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 9f7932e423b5..398971a062f4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
#include <linux/kernel.h>
@@ -67,16 +61,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
- hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
- hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, regnum);
- hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
- HCLGE_MDIO_CTRL_ST_S, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
- HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
mdio_cmd->data_wr = cpu_to_le16(data);
@@ -105,16 +99,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
- hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
- hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, regnum);
- hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
- HCLGE_MDIO_CTRL_ST_S, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
- HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
/* Read out phy data */
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -125,7 +119,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
return ret;
}
- if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
+ if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
dev_err(&hdev->pdev->dev, "mdio read data error\n");
return -EIO;
}
@@ -199,7 +193,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
netdev_err(netdev, "failed to configure flow control.\n");
}
-int hclge_mac_start_phy(struct hclge_dev *hdev)
+int hclge_mac_connect_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
@@ -208,6 +202,8 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return 0;
+ phydev->supported &= ~SUPPORTED_FIBRE;
+
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
@@ -219,11 +215,29 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
phydev->advertising = phydev->supported;
- phy_start(phydev);
-
return 0;
}
+void hclge_mac_disconnect_phy(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_disconnect(phydev);
+}
+
+void hclge_mac_start_phy(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_start(phydev);
+}
+
void hclge_mac_stop_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
@@ -233,5 +247,4 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
return;
phy_stop(phydev);
- phy_disconnect(phydev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index c5e91cfb8f2c..5fbf7dddb5d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -1,17 +1,13 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H
int hclge_mac_mdio_config(struct hclge_dev *hdev);
-int hclge_mac_start_phy(struct hclge_dev *hdev);
+int hclge_mac_connect_phy(struct hclge_dev *hdev);
+void hclge_mac_disconnect_phy(struct hclge_dev *hdev);
+void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 262c125f8137..5db70a1451c5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
@@ -1184,10 +1178,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
- grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
- HCLGE_BP_GRP_ID_S);
- sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
- HCLGE_BP_SUB_GRP_ID_S);
+ grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+ HCLGE_BP_GRP_ID_S);
+ sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+ HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
@@ -1223,6 +1217,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
tx_en = true;
rx_en = true;
break;
+ case HCLGE_FC_PFC:
+ tx_en = false;
+ rx_en = false;
+ break;
default:
tx_en = true;
rx_en = true;
@@ -1240,8 +1238,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret)
return ret;
- if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
- return hclge_mac_pause_setup_hw(hdev);
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index c2b6e8a6700f..dd4c194747c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016~2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
#ifndef __HCLGE_TM_H
#define __HCLGE_TM_H
@@ -123,10 +117,11 @@ struct hclge_port_shapping_cmd {
};
#define hclge_tm_set_field(dest, string, val) \
- hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
- (HCLGE_TM_SHAP_##string##_LSH), val)
+ hnae3_set_field((dest), \
+ (HCLGE_TM_SHAP_##string##_MSK), \
+ (HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \
- hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
+ hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 1bbfe131b596..fb471fe2c494 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+ size, &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
{
- dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- hclgevf_ring_to_dma_dir(ring));
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 621c6cbacf76..19b32860309c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -82,6 +82,7 @@ struct hclgevf_cmq {
enum hclgevf_opcode_type {
/* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
+ HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
/* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
@@ -134,6 +135,19 @@ struct hclgevf_query_version_cmd {
__le32 firmware_rsv[5];
};
+#define HCLGEVF_MSIX_OFT_ROCEE_S 0
+#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
+#define HCLGEVF_VEC_NUM_S 0
+#define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S)
+struct hclgevf_query_res_cmd {
+ __le16 tqp_num;
+ __le16 reserved;
+ __le16 msixcap_localid_ba_nic;
+ __le16 msixcap_localid_ba_rocee;
+ __le16 vf_intr_vector_number;
+ __le16 rsv[7];
+};
+
#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a17872aab168..9c0091f2addf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -330,6 +330,12 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
{
+ if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
hdev->num_msi_left += 1;
hdev->num_msi_used -= 1;
@@ -444,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
- (tc_valid[i] & 0x1));
- hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
- HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
- HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+ hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
}
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -547,24 +553,18 @@ static int hclgevf_get_tc_size(struct hnae3_handle *handle)
}
static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
- int vector,
+ int vector_id,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_ring_chain_node *node;
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclgevf_desc desc;
- int i = 0, vector_id;
+ int i = 0;
int status;
u8 type;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- vector_id = hclgevf_get_vector_index(hdev, vector);
- if (vector_id < 0) {
- dev_err(&handle->pdev->dev,
- "Get vector index fail. ret =%d\n", vector_id);
- return vector_id;
- }
for (node = ring_chain; node; node = node->next) {
int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
@@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
}
req->msg[idx_offset] =
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
req->msg[idx_offset + 1] = node->tqp_index;
- req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S);
+ req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
i++;
if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
@@ -617,7 +617,17 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
- return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
}
static int hclgevf_unmap_ring_from_vector(
@@ -635,7 +645,7 @@ static int hclgevf_unmap_ring_from_vector(
return vector_id;
}
- ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
if (ret)
dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vector=%d, ret =%d\n",
@@ -648,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector(
static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
- hclgevf_free_vector(hdev, vector);
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "hclgevf_put_vector get vector index fail. ret =%d\n",
+ vector_id);
+ return vector_id;
+ }
+
+ hclgevf_free_vector(hdev, vector_id);
return 0;
}
@@ -990,8 +1009,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
/* wait to check the hardware reset completion status */
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
+ while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
+ (cnt < HCLGEVF_RESET_WAIT_CNT)) {
msleep(HCLGEVF_RESET_WAIT_MS);
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
cnt++;
@@ -1351,14 +1370,13 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
struct hnae3_handle *roce = &hdev->roce;
struct hnae3_handle *nic = &hdev->nic;
- roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+ roce->rinfo.num_vectors = hdev->num_roce_msix;
if (hdev->num_msi_left < roce->rinfo.num_vectors ||
hdev->num_msi_left == 0)
return -EINVAL;
- roce->rinfo.base_vector =
- hdev->vector_status[hdev->num_msi_used];
+ roce->rinfo.base_vector = hdev->roce_base_vector;
roce->rinfo.netdev = nic->kinfo.netdev;
roce->rinfo.roce_io_base = hdev->hw.io_base;
@@ -1501,10 +1519,15 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
if (hclgevf_dev_ongoing_reset(hdev))
return 0;
- hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+ if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
+ vectors = pci_alloc_irq_vectors(pdev,
+ hdev->roce_base_msix_offset + 1,
+ hdev->num_msi,
+ PCI_IRQ_MSIX);
+ else
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
- PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
dev_err(&pdev->dev,
"failed(%d) to allocate MSI/MSI-X vectors\n",
@@ -1519,6 +1542,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
+ hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
@@ -1582,9 +1606,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
hclgevf_free_vector(hdev, 0);
}
-static int hclgevf_init_instance(struct hclgevf_dev *hdev,
- struct hnae3_client *client)
+static int hclgevf_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
+ struct hclgevf_dev *hdev = ae_dev->priv;
int ret;
switch (client->type) {
@@ -1635,9 +1660,11 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev,
return 0;
}
-static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
- struct hnae3_client *client)
+static void hclgevf_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
/* un-init roce, if it exists */
if (hdev->roce_client)
hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
@@ -1648,22 +1675,6 @@ static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
client->ops->uninit_instance(&hdev->nic, 0);
}
-static int hclgevf_register_client(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- return hclgevf_init_instance(hdev, client);
-}
-
-static void hclgevf_unregister_client(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- hclgevf_uninit_instance(hdev, client);
-}
-
static int hclgevf_pci_init(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -1727,6 +1738,45 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
pci_disable_device(pdev);
}
+static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_query_res_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query vf resource failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ req = (struct hclgevf_query_res_cmd *)desc.data;
+
+ if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGEVF_MSIX_OFT_ROCEE_M,
+ HCLGEVF_MSIX_OFT_ROCEE_S);
+ hdev->num_roce_msix =
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ /* VF should have NIC vectors and Roce vectors, NIC vectors
+ * are queued before Roce vectors. The offset is fixed to 64.
+ */
+ hdev->num_msi = hdev->num_roce_msix +
+ hdev->roce_base_msix_offset;
+ } else {
+ hdev->num_msi =
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+ }
+
+ return 0;
+}
+
static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -1744,18 +1794,26 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ /* Get vf resource */
+ ret = hclgevf_query_vf_resource(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query vf status error, ret = %d.\n", ret);
+ goto err_query_vf;
+ }
+
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
- goto err_irq_init;
+ goto err_query_vf;
}
hclgevf_state_init(hdev);
- ret = hclgevf_cmd_init(hdev);
- if (ret)
- goto err_cmd_init;
-
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
@@ -1811,11 +1869,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
err_config:
hclgevf_misc_irq_uninit(hdev);
err_misc_irq_init:
- hclgevf_cmd_uninit(hdev);
-err_cmd_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
-err_irq_init:
+err_query_vf:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
hclgevf_pci_uninit(hdev);
return ret;
}
@@ -1924,8 +1982,8 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
- .init_client_instance = hclgevf_register_client,
- .uninit_client_instance = hclgevf_unregister_client,
+ .init_client_instance = hclgevf_init_client_instance,
+ .uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
.stop = hclgevf_ae_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector,
@@ -1962,7 +2020,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
static struct hnae3_ae_algo ae_algovf = {
.ops = &hclgevf_ops,
- .name = HCLGEVF_NAME,
.pdev_id_table = ae_algovf_pci_tbl,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 0656e8e5c5f0..b23ba171473c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -12,7 +12,6 @@
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
-#define HCLGEVF_ROCEE_VECTOR_NUM 0
#define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff
@@ -150,6 +149,9 @@ struct hclgevf_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 num_roce_msix; /* Num of roce vectors for this VF */
+ u16 roce_base_msix_offset;
+ int roce_base_vector;
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index b598c06af8e0..e9d5a4f96304 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
@@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* tail the async message in arq */
msg_q = hdev->arq.msg_q[hdev->arq.tail];
- memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE);
+ memcpy(&msg_q[0], req->msg,
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
hdev->arq.count++;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 79b567447084..6b19607a4caa 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -264,7 +264,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_cmd_fw_ctxt fw_ctxt;
- struct hinic_pfhwdev *pfhwdev;
u16 out_size;
int err;
@@ -276,8 +275,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
&fw_ctxt, sizeof(fw_ctxt),
&fw_ctxt, &out_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index b9db6d649743..cb239627770f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -635,17 +635,18 @@ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
}
/**
- * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
+ * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the
+ * wqe only have one wqebb
* @sq: send queue
* @skb: return skb that was saved
- * @wqe_size: the size of the wqe
+ * @wqe_size: the wqe size ptr
* @cons_idx: consumer index of the wqe
*
* Return wqe in ci position
**/
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx)
+struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int *wqe_size, u16 *cons_idx)
{
struct hinic_hw_wqe *hw_wqe;
struct hinic_sq_wqe *sq_wqe;
@@ -658,6 +659,8 @@ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
if (IS_ERR(hw_wqe))
return NULL;
+ *skb = sq->saved_skb[*cons_idx];
+
sq_wqe = &hw_wqe->sq_wqe;
ctrl = &sq_wqe->ctrl;
ctrl_info = be32_to_cpu(ctrl->ctrl_info);
@@ -665,11 +668,28 @@ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
*wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
*wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len);
+ *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size);
- *skb = sq->saved_skb[*cons_idx];
+ return &hw_wqe->sq_wqe;
+}
+
+/**
+ * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
+ * @sq: send queue
+ * @skb: return skb that was saved
+ * @wqe_size: the size of the wqe
+ * @cons_idx: consumer index of the wqe
+ *
+ * Return wqe in ci position
+ **/
+struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int wqe_size, u16 *cons_idx)
+{
+ struct hinic_hw_wqe *hw_wqe;
- /* using the real wqe size to read wqe again */
- hw_wqe = hinic_read_wqe(sq->wq, *wqe_size, cons_idx);
+ hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx);
+ *skb = sq->saved_skb[*cons_idx];
return &hw_wqe->sq_wqe;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index df729a1587e9..6c84f83ec283 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -165,7 +165,11 @@ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx);
+ unsigned int wqe_size, u16 *cons_idx);
+
+struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
+ struct sk_buff **skb,
+ unsigned int *wqe_size, u16 *cons_idx);
void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 2353ec829c04..c5fca0356c9c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -283,7 +283,11 @@ static void free_all_tx_skbs(struct hinic_txq *txq)
int nr_sges;
u16 ci;
- while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) {
+ while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
+ sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
+ if (!sq_wqe)
+ break;
+
nr_sges = skb_shinfo(skb)->nr_frags + 1;
hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
@@ -319,11 +323,21 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
do {
hw_ci = HW_CONS_IDX(sq) & wq->mask;
- sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &sw_ci);
+ /* Reading a WQEBB to get real WQE size and consumer index. */
+ sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
if ((!sq_wqe) ||
(((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
break;
+ /* If this WQE have multiple WQEBBs, we will read again to get
+ * full size WQE.
+ */
+ if (wqe_size > wq->wqebb_size) {
+ sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
+ if (unlikely(!sq_wqe))
+ break;
+ }
+
tx_bytes += skb->len;
pkts++;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index ffe7acbeaa22..dafdd4ade705 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -718,23 +718,6 @@ static int init_tx_pools(struct net_device *netdev)
return 0;
}
-static void release_error_buffers(struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
- list_del(&error_buff->list);
- dma_unmap_single(dev, error_buff->dma, error_buff->len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- kfree(error_buff);
- }
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-}
-
static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
{
int i;
@@ -896,7 +879,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_tx_pools(adapter);
release_rx_pools(adapter);
- release_error_buffers(adapter);
release_napi(adapter);
release_login_rsp_buffer(adapter);
}
@@ -3843,132 +3825,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
ibmvnic_send_crq(adapter, &crq);
}
-static void handle_error_info_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff, *tmp;
- unsigned long flags;
- bool found = false;
- int i;
-
- if (!crq->request_error_rsp.rc.code) {
- dev_info(dev, "Request Error Rsp returned with rc=%x\n",
- crq->request_error_rsp.rc.code);
- return;
- }
-
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
- if (error_buff->error_id == crq->request_error_rsp.error_id) {
- found = true;
- list_del(&error_buff->list);
- break;
- }
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-
- if (!found) {
- dev_err(dev, "Couldn't find error id %x\n",
- be32_to_cpu(crq->request_error_rsp.error_id));
- return;
- }
-
- dev_err(dev, "Detailed info for error id %x:",
- be32_to_cpu(crq->request_error_rsp.error_id));
-
- for (i = 0; i < error_buff->len; i++) {
- pr_cont("%02x", (int)error_buff->buff[i]);
- if (i % 8 == 7)
- pr_cont(" ");
- }
- pr_cont("\n");
-
- dma_unmap_single(dev, error_buff->dma, error_buff->len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- kfree(error_buff);
-}
-
-static void request_error_information(struct ibmvnic_adapter *adapter,
- union ibmvnic_crq *err_crq)
-{
- struct device *dev = &adapter->vdev->dev;
- struct net_device *netdev = adapter->netdev;
- struct ibmvnic_error_buff *error_buff;
- unsigned long timeout = msecs_to_jiffies(30000);
- union ibmvnic_crq crq;
- unsigned long flags;
- int rc, detail_len;
-
- error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
- if (!error_buff)
- return;
-
- detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
- error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
- if (!error_buff->buff) {
- kfree(error_buff);
- return;
- }
-
- error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, error_buff->dma)) {
- netdev_err(netdev, "Couldn't map error buffer\n");
- kfree(error_buff->buff);
- kfree(error_buff);
- return;
- }
-
- error_buff->len = detail_len;
- error_buff->error_id = err_crq->error_indication.error_id;
-
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_add_tail(&error_buff->list, &adapter->errors);
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-
- memset(&crq, 0, sizeof(crq));
- crq.request_error_info.first = IBMVNIC_CRQ_CMD;
- crq.request_error_info.cmd = REQUEST_ERROR_INFO;
- crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
- crq.request_error_info.len = cpu_to_be32(detail_len);
- crq.request_error_info.error_id = err_crq->error_indication.error_id;
-
- rc = ibmvnic_send_crq(adapter, &crq);
- if (rc) {
- netdev_err(netdev, "failed to request error information\n");
- goto err_info_fail;
- }
-
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- netdev_err(netdev, "timeout waiting for error information\n");
- goto err_info_fail;
+static const char *ibmvnic_fw_err_cause(u16 cause)
+{
+ switch (cause) {
+ case ADAPTER_PROBLEM:
+ return "adapter problem";
+ case BUS_PROBLEM:
+ return "bus problem";
+ case FW_PROBLEM:
+ return "firmware problem";
+ case DD_PROBLEM:
+ return "device driver problem";
+ case EEH_RECOVERY:
+ return "EEH recovery";
+ case FW_UPDATED:
+ return "firmware updated";
+ case LOW_MEMORY:
+ return "low Memory";
+ default:
+ return "unknown";
}
-
- return;
-
-err_info_fail:
- spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_del(&error_buff->list);
- spin_unlock_irqrestore(&adapter->error_list_lock, flags);
-
- kfree(error_buff->buff);
- kfree(error_buff);
}
static void handle_error_indication(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
+ u16 cause;
- dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
- crq->error_indication.flags
- & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
- be32_to_cpu(crq->error_indication.error_id),
- be16_to_cpu(crq->error_indication.error_cause));
+ cause = be16_to_cpu(crq->error_indication.error_cause);
- if (be32_to_cpu(crq->error_indication.error_id))
- request_error_information(adapter, crq);
+ dev_warn_ratelimited(dev,
+ "Firmware reports %serror, cause: %s. Starting recovery...\n",
+ crq->error_indication.flags
+ & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
+ ibmvnic_fw_err_cause(cause));
if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
@@ -4468,10 +4359,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
netdev_dbg(netdev, "Got Error Indication\n");
handle_error_indication(crq, adapter);
break;
- case REQUEST_ERROR_RSP:
- netdev_dbg(netdev, "Got Error Detail Response\n");
- handle_error_info_rsp(crq, adapter);
- break;
case REQUEST_STATISTICS_RSP:
netdev_dbg(netdev, "Got Statistics Response\n");
complete(&adapter->stats_done);
@@ -4830,9 +4717,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->stats_lock);
- INIT_LIST_HEAD(&adapter->errors);
- spin_lock_init(&adapter->error_list_lock);
-
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
mutex_init(&adapter->reset_lock);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f9fb780102ac..f06eec145ca6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -512,24 +512,6 @@ struct ibmvnic_error_indication {
u8 reserved2[2];
} __packed __aligned(8);
-struct ibmvnic_request_error_info {
- u8 first;
- u8 cmd;
- u8 reserved[2];
- __be32 ioba;
- __be32 len;
- __be32 error_id;
-} __packed __aligned(8);
-
-struct ibmvnic_request_error_rsp {
- u8 first;
- u8 cmd;
- u8 reserved[2];
- __be32 error_id;
- __be32 len;
- struct ibmvnic_rc rc;
-} __packed __aligned(8);
-
struct ibmvnic_link_state_indication {
u8 first;
u8 cmd;
@@ -709,8 +691,6 @@ union ibmvnic_crq {
struct ibmvnic_request_debug_stats request_debug_stats;
struct ibmvnic_request_debug_stats request_debug_stats_rsp;
struct ibmvnic_error_indication error_indication;
- struct ibmvnic_request_error_info request_error_info;
- struct ibmvnic_request_error_rsp request_error_rsp;
struct ibmvnic_link_state_indication link_state_indication;
struct ibmvnic_change_mac_addr change_mac_addr;
struct ibmvnic_change_mac_addr change_mac_addr_rsp;
@@ -809,8 +789,6 @@ enum ibmvnic_commands {
SET_PHYS_PARMS = 0x07,
SET_PHYS_PARMS_RSP = 0x87,
ERROR_INDICATION = 0x08,
- REQUEST_ERROR_INFO = 0x09,
- REQUEST_ERROR_RSP = 0x89,
LOGICAL_LINK_STATE = 0x0C,
LOGICAL_LINK_STATE_RSP = 0x8C,
REQUEST_STATISTICS = 0x0D,
@@ -945,14 +923,6 @@ struct ibmvnic_rx_pool {
struct ibmvnic_long_term_buff long_term_buff;
};
-struct ibmvnic_error_buff {
- char *buff;
- dma_addr_t dma;
- int len;
- struct list_head list;
- __be32 error_id;
-};
-
struct ibmvnic_vpd {
unsigned char *buff;
dma_addr_t dma_addr;
@@ -1047,9 +1017,6 @@ struct ibmvnic_adapter {
struct completion init_done;
int init_done_rc;
- struct list_head errors;
- spinlock_t error_list_lock;
-
struct completion fw_done;
int fw_done_rc;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ddbea79d18e5..501ee718177f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -868,6 +868,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
cmd_completed = true;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = 0;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7d888e05f96f..80e3eec6134e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -2247,6 +2247,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index eb2d1530d331..85f75b5978fc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3541,6 +3541,41 @@ i40e_aq_update_nvm_exit:
}
/**
+ * i40e_aq_rearrange_nvm
+ * @hw: pointer to the hw struct
+ * @rearrange_nvm: defines direction of rearrangement
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Rearrange NVM structure, available only for transition FW
+ **/
+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_nvm_update *cmd;
+ i40e_status status;
+ struct i40e_aq_desc desc;
+
+ cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
+ I40E_AQ_NVM_REARRANGE_TO_STRUCT);
+
+ if (!rearrange_nvm) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_rearrange_nvm_exit;
+ }
+
+ cmd->command_flags |= rearrange_nvm;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_rearrange_nvm_exit:
+ return status;
+}
+
+/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 6947a2a571cb..abcd096ede14 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -7,6 +7,11 @@
#include "i40e_diag.h"
struct i40e_stats {
+ /* The stat_string is expected to be a format string formatted using
+ * vsnprintf by i40e_add_stat_strings. Every member of a stats array
+ * should use the same format specifiers as they will be formatted
+ * using the same variadic arguments.
+ */
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
@@ -26,6 +31,8 @@ struct i40e_stats {
I40E_STAT(struct i40e_vsi, _name, _stat)
#define I40E_VEB_STAT(_name, _stat) \
I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_PFC_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_pfc_stats, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -56,6 +63,13 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
};
+static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
+ I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
+ I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
+ I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
+ I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+};
+
static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
@@ -141,6 +155,22 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count),
};
+struct i40e_pfc_stats {
+ u64 priority_xon_rx;
+ u64 priority_xoff_rx;
+ u64 priority_xon_tx;
+ u64 priority_xoff_tx;
+ u64 priority_xon_2_xoff;
+};
+
+static const struct i40e_stats i40e_gstrings_pfc_stats[] = {
+ I40E_PFC_STAT("port.tx_priority_%u_xon_tx", priority_xon_tx),
+ I40E_PFC_STAT("port.tx_priority_%u_xoff_tx", priority_xoff_tx),
+ I40E_PFC_STAT("port.rx_priority_%u_xon_rx", priority_xon_rx),
+ I40E_PFC_STAT("port.rx_priority_%u_xoff_rx", priority_xoff_rx),
+ I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff),
+};
+
/* We use num_tx_queues here as a proxy for the maximum number of queues
* available because we always allocate queues symmetrically.
*/
@@ -155,23 +185,17 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
-#define I40E_PFC_STATS_LEN ( \
- (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
- FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
- / sizeof(u64))
-#define I40E_VEB_TC_STATS_LEN ( \
- (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
- FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
- / sizeof(u64))
-#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
-#define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
+
+#define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \
+ I40E_MAX_USER_PRIORITY)
+
+#define I40E_VEB_STATS_LEN (ARRAY_SIZE(i40e_gstrings_veb_stats) + \
+ (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \
+ I40E_MAX_TRAFFIC_CLASS))
+
#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
+ I40E_VEB_STATS_LEN + \
I40E_VSI_STATS_LEN((n)))
enum i40e_ethtool_test_id {
@@ -1565,7 +1589,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring;
u16 unused;
/* clone ring and setup updated count */
@@ -1589,9 +1612,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
*/
- ring = &rx_rings[i];
- unused = I40E_DESC_UNUSED(ring);
- err = i40e_alloc_rx_buffers(ring, unused);
+ unused = I40E_DESC_UNUSED(&rx_rings[i]);
+ err = i40e_alloc_rx_buffers(&rx_rings[i], unused);
rx_unwind:
if (err) {
do {
@@ -1681,7 +1703,7 @@ static int i40e_get_stats_count(struct net_device *netdev)
struct i40e_pf *pf = vsi->back;
if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
- return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL;
+ return I40E_PF_STATS_LEN(netdev);
else
return I40E_VSI_STATS_LEN(netdev);
}
@@ -1706,6 +1728,114 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
}
/**
+ * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
+ * @data: location to store the stat value
+ * @pointer: basis for where to copy from
+ * @stat: the stat definition
+ *
+ * Copies the stat data defined by the pointer and stat structure pair into
+ * the memory supplied as data. Used to implement i40e_add_ethtool_stats.
+ * If the pointer is null, data will be zero'd.
+ */
+static inline void
+i40e_add_one_ethtool_stat(u64 *data, void *pointer,
+ const struct i40e_stats *stat)
+{
+ char *p;
+
+ if (!pointer) {
+ /* ensure that the ethtool data buffer is zero'd for any stats
+ * which don't have a valid pointer.
+ */
+ *data = 0;
+ return;
+ }
+
+ p = (char *)pointer + stat->stat_offset;
+ switch (stat->sizeof_stat) {
+ case sizeof(u64):
+ *data = *((u64 *)p);
+ break;
+ case sizeof(u32):
+ *data = *((u32 *)p);
+ break;
+ case sizeof(u16):
+ *data = *((u16 *)p);
+ break;
+ case sizeof(u8):
+ *data = *((u8 *)p);
+ break;
+ default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stat->stat_string);
+ *data = 0;
+ }
+}
+
+/**
+ * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location to copy stats from
+ * @stats: array of stats to copy
+ * @size: the size of the stats definition
+ *
+ * Copy the stats defined by the stats array using the pointer as a base into
+ * the data buffer supplied by ethtool. Updates the data pointer to point to
+ * the next empty location for successive calls to __i40e_add_ethtool_stats.
+ * If pointer is null, set the data values to zero and update the pointer to
+ * skip these stats.
+ **/
+static inline void
+__i40e_add_ethtool_stats(u64 **data, void *pointer,
+ const struct i40e_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
+}
+
+/**
+ * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location where stats are stored
+ * @stats: static const array of stat definitions
+ *
+ * Macro to ease the use of __i40e_add_ethtool_stats by taking a static
+ * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
+ * ensuring that we pass the size associated with the given stats array.
+ * Assumes that stats is an array.
+ **/
+#define i40e_add_ethtool_stats(data, pointer, stats) \
+ __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
+
+/**
+ * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
+ * @pf: the PF device structure
+ * @i: the priority value to copy
+ *
+ * The PFC stats are found as arrays in pf->stats, which is not easy to pass
+ * into i40e_add_ethtool_stats. Produce a formatted i40e_pfc_stats structure
+ * of the PFC stats for the given priority.
+ **/
+static inline struct i40e_pfc_stats
+i40e_get_pfc_stats(struct i40e_pf *pf, unsigned int i)
+{
+#define I40E_GET_PFC_STAT(stat, priority) \
+ .stat = pf->stats.stat[priority]
+
+ struct i40e_pfc_stats pfc = {
+ I40E_GET_PFC_STAT(priority_xon_rx, i),
+ I40E_GET_PFC_STAT(priority_xoff_rx, i),
+ I40E_GET_PFC_STAT(priority_xon_tx, i),
+ I40E_GET_PFC_STAT(priority_xoff_tx, i),
+ I40E_GET_PFC_STAT(priority_xon_2_xoff, i),
+ };
+ return pfc;
+}
+
+/**
* i40e_get_ethtool_stats - copy stat values into supplied buffer
* @netdev: the netdev to collect stats for
* @stats: ethtool stats command structure
@@ -1726,23 +1856,19 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = pf->veb[pf->lan_veb];
unsigned int i;
- char *p;
- struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
unsigned int start;
+ bool veb_stats;
+ u64 *p = data;
i40e_update_stats(vsi);
- for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
- p = (char *)net_stats + i40e_gstrings_net_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_net_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
- p = (char *)vsi + i40e_gstrings_misc_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_misc_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+ i40e_add_ethtool_stats(&data, i40e_get_vsi_stats_struct(vsi),
+ i40e_gstrings_net_stats);
+
+ i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats);
+
rcu_read_lock();
for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) {
tx_ring = READ_ONCE(vsi->tx_rings[i]);
@@ -1777,45 +1903,72 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_unlock();
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
- return;
+ goto check_data_pointer;
- if ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
- struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+
+ /* If veb stats aren't enabled, pass NULL instead of the veb so that
+ * we initialize stats to zero and update the data pointer
+ * intelligently
+ */
+ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+ i40e_gstrings_veb_stats);
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+ i40e_gstrings_veb_tc_stats);
+
+ i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
- for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
- p = (char *)veb;
- p += i40e_gstrings_veb_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_veb_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- *(data++) = veb->tc_stats.tc_tx_packets[i];
- *(data++) = veb->tc_stats.tc_tx_bytes[i];
- *(data++) = veb->tc_stats.tc_rx_packets[i];
- *(data++) = veb->tc_stats.tc_rx_bytes[i];
- }
- } else {
- data += I40E_VEB_STATS_TOTAL;
- }
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- p = (char *)pf + i40e_gstrings_stats[i].stat_offset;
- *(data++) = (i40e_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- *(data++) = pf->stats.priority_xon_tx[i];
- *(data++) = pf->stats.priority_xoff_tx[i];
+ struct i40e_pfc_stats pfc = i40e_get_pfc_stats(pf, i);
+
+ i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats);
}
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- *(data++) = pf->stats.priority_xon_rx[i];
- *(data++) = pf->stats.priority_xoff_rx[i];
+
+check_data_pointer:
+ WARN_ONCE(data - p != i40e_get_stats_count(netdev),
+ "ethtool stats count mismatch!");
+}
+
+/**
+ * __i40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @size: size of the stats array
+ *
+ * Format and copy the strings described by stats into the buffer pointed at
+ * by p.
+ **/
+static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
+ const unsigned int size, ...)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ va_list args;
+
+ va_start(args, size);
+ vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
+ *p += ETH_GSTRING_LEN;
+ va_end(args);
}
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
- *(data++) = pf->stats.priority_xon_2_xoff[i];
}
/**
+ * 40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ *
+ * Format and copy the strings described by the const static stats value into
+ * the buffer pointed at by p. Assumes that stats can have ARRAY_SIZE called
+ * for it.
+ **/
+#define i40e_add_stat_strings(p, stats, ...) \
+ __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
+
+/**
* i40e_get_stat_strings - copy stat strings into supplied buffer
* @netdev: the netdev to collect strings for
* @data: supplied buffer to copy strings into
@@ -1833,16 +1986,10 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
unsigned int i;
u8 *p = data;
- for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_net_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_misc_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
+ i40e_add_stat_strings(&data, i40e_gstrings_net_stats);
+
+ i40e_add_stat_strings(&data, i40e_gstrings_misc_stats);
+
for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) {
snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
data += ETH_GSTRING_LEN;
@@ -1856,52 +2003,15 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
- for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_veb_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_packets", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_tx_bytes", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_packets", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "veb.tc_%u_rx_bytes", i);
- data += ETH_GSTRING_LEN;
- }
+ i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_stats[i].stat_string);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xon", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xoff", i);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xoff", i);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(data, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon_2_xoff", i);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ i40e_add_stat_strings(&data, i40e_gstrings_veb_tc_stats, i);
+
+ i40e_add_stat_strings(&data, i40e_gstrings_stats);
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
@@ -4535,7 +4645,6 @@ flags_complete:
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
struct i40e_dcbx_config *dcbcfg;
- int i;
i40e_aq_stop_lldp(&pf->hw, true, NULL);
i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c944bd10b03d..f2c622e78802 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1800,6 +1800,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
num_tc_qps);
break;
}
+ /* fall through */
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
@@ -6597,6 +6598,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
err = i40e_aq_set_phy_config(hw, &config, NULL);
if (err) {
@@ -7522,7 +7525,7 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -7554,7 +7557,7 @@ static int i40e_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
- np, np);
+ np, np, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
return 0;
@@ -11841,7 +11844,6 @@ static int i40e_xdp(struct net_device *dev,
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
return 0;
default:
@@ -11978,7 +11980,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
IFNAMSIZ - 4,
pf->vsi[pf->lan_vsi]->netdev->name);
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_mac_filter(vsi, mac_addr);
@@ -14354,12 +14356,6 @@ static void i40e_shutdown(struct pci_dev *pdev)
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
-
- wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
- wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3170655cdeb9..e08d754824b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -193,6 +193,9 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b151ae316546..b5042d1a63c0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2253,9 +2253,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
break;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping packet */
+ /* fall through -- handle aborts by dropping packet */
case XDP_DROP:
result = I40E_XDP_CONSUMED;
break;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index c355120dfdfd..21a0dbf6ccf6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -797,6 +797,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
cmd_completed = true;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = 0;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index aa81e87cd471..5fd8529465d4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -2175,6 +2175,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 9cef54971312..eea280ba411e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1021,75 +1021,6 @@ do_retry:
}
/**
- * i40evf_aq_set_phy_register
- * @hw: pointer to the hw struct
- * @phy_select: select which phy should be accessed
- * @dev_addr: PHY device address
- * @reg_addr: PHY register address
- * @reg_val: new register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Reset the external PHY.
- **/
-i40e_status i40evf_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_phy_register);
-
- cmd->phy_interface = phy_select;
- cmd->dev_address = dev_addr;
- cmd->reg_address = cpu_to_le32(reg_addr);
- cmd->reg_value = cpu_to_le32(reg_val);
-
- status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40evf_aq_get_phy_register
- * @hw: pointer to the hw struct
- * @phy_select: select which phy should be accessed
- * @dev_addr: PHY device address
- * @reg_addr: PHY register address
- * @reg_val: read register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Reset the external PHY.
- **/
-i40e_status i40evf_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_phy_register);
-
- cmd->phy_interface = phy_select;
- cmd->dev_address = dev_addr;
- cmd->reg_address = cpu_to_le32(reg_addr);
-
- status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
- *reg_val = le32_to_cpu(cmd->reg_value);
-
- return status;
-}
-
-/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index a7b87f935411..5906c1c1d19d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2884,7 +2884,7 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -2926,7 +2926,7 @@ static int i40evf_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
adapter);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b13b42e5a1d9..bafdcf70a353 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -225,19 +225,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT;
- /* Make sure the PHY is in a good state. Several people have reported
- * firmware leaving the PHY's page select register set to something
- * other than the default of zero, which causes the PHY ID read to
- * access something other than the intended register.
- */
- ret_val = hw->phy.ops.reset(hw);
- if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
- goto out;
- }
-
/* Set phy->phy_addr and phy->id. */
- igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
return ret_val;
@@ -1720,6 +1708,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
/* disable PCS autoneg and support parallel detect only */
pcs_autoneg = false;
+ /* fall through */
default:
if (hw->mac.type == e1000_82575 ||
hw->mac.type == e1000_82576) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 252440a418dc..8a28f3388f69 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1048,6 +1048,22 @@
#define E1000_TQAVCTRL_XMIT_MODE BIT(0)
#define E1000_TQAVCTRL_DATAFETCHARB BIT(4)
#define E1000_TQAVCTRL_DATATRANARB BIT(8)
+#define E1000_TQAVCTRL_DATATRANTIM BIT(9)
+#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10)
+/* Fetch Time Delta - bits 31:16
+ *
+ * This field holds the value to be reduced from the launch time for
+ * fetch time decision. The FetchTimeDelta value is defined in 32 ns
+ * granularity.
+ *
+ * This field is 16 bits wide, and so the maximum value is:
+ *
+ * 65535 * 32 = 2097120 ~= 2.1 msec
+ *
+ * XXX: We are configuring the max value here since we couldn't come up
+ * with a reason for not doing so.
+ */
+#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16)
/* TX Qav Credit Control fields */
#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 2be0e762ec69..ad2125e5a7f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -659,6 +659,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
phy_data |= M88E1000_PSCR_AUTO_X_1000T;
break;
}
+ /* fall through */
case 0:
default:
phy_data |= M88E1000_PSCR_AUTO_X_MODE;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9643b5b3d444..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -262,6 +262,7 @@ struct igb_ring {
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
+ bool launchtime_enable; /* true if LaunchTime is enabled */
bool cbs_enable; /* indicates if CBS is enabled */
s32 idleslope; /* idleSlope in kbps */
s32 sendslope; /* sendSlope in kbps */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f707709969ac..d03c2f0d7592 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -22,7 +22,6 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
@@ -1654,33 +1653,65 @@ static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
wr32(E1000_I210_TQAVCC(queue), val);
}
+static bool is_any_cbs_enabled(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i]->cbs_enable)
+ return true;
+ }
+
+ return false;
+}
+
+static bool is_any_txtime_enabled(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i]->launchtime_enable)
+ return true;
+ }
+
+ return false;
+}
+
/**
- * igb_configure_cbs - Configure Credit-Based Shaper (CBS)
+ * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
* @adapter: pointer to adapter struct
* @queue: queue number
- * @enable: true = enable CBS, false = disable CBS
- * @idleslope: idleSlope in kbps
- * @sendslope: sendSlope in kbps
- * @hicredit: hiCredit in bytes
- * @locredit: loCredit in bytes
*
- * Configure CBS for a given hardware queue. When disabling, idleslope,
- * sendslope, hicredit, locredit arguments are ignored. Returns 0 if
- * success. Negative otherwise.
+ * Configure CBS and Launchtime for a given hardware queue.
+ * Parameters are retrieved from the correct Tx ring, so
+ * igb_save_cbs_params() and igb_save_txtime_params() should be used
+ * for setting those correctly prior to this function being called.
**/
-static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
- bool enable, int idleslope, int sendslope,
- int hicredit, int locredit)
+static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
+ struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
- u32 tqavcc;
+ u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
- if (enable || queue == 0) {
+ /* If any of the Qav features is enabled, configure queues as SR and
+ * with HIGH PRIO. If none is, then configure them with LOW PRIO and
+ * as SP.
+ */
+ if (ring->cbs_enable || ring->launchtime_enable) {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
+ set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+ } else {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
+ set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
+ }
+
+ /* If CBS is enabled, set DataTranARB and config its parameters. */
+ if (ring->cbs_enable || queue == 0) {
/* i210 does not allow the queue 0 to be in the Strict
* Priority mode while the Qav mode is enabled, so,
* instead of disabling strict priority mode, we give
@@ -1690,14 +1721,19 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* Queue0 QueueMode must be set to 1b when
* TransmitMode is set to Qav."
*/
- if (queue == 0 && !enable) {
+ if (queue == 0 && !ring->cbs_enable) {
/* max "linkspeed" idleslope in kbps */
- idleslope = 1000000;
- hicredit = ETH_FRAME_LEN;
+ ring->idleslope = 1000000;
+ ring->hicredit = ETH_FRAME_LEN;
}
- set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
- set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+ /* Always set data transfer arbitration to credit-based
+ * shaper algorithm on TQAVCTRL if CBS is enabled for any of
+ * the queues.
+ */
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
/* According to i210 datasheet section 7.2.7.7, we should set
* the 'idleSlope' field from TQAVCC register following the
@@ -1756,17 +1792,16 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* calculated value, so the resulting bandwidth might
* be slightly higher for some configurations.
*/
- value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
+ value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
tqavcc = rd32(E1000_I210_TQAVCC(queue));
tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
tqavcc |= value;
wr32(E1000_I210_TQAVCC(queue), tqavcc);
- wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
+ wr32(E1000_I210_TQAVHC(queue),
+ 0x80000000 + ring->hicredit * 0x7735);
} else {
- set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
- set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
/* Set idleSlope to zero. */
tqavcc = rd32(E1000_I210_TQAVCC(queue));
@@ -1775,6 +1810,43 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
/* Set hiCredit to zero. */
wr32(E1000_I210_TQAVHC(queue), 0);
+
+ /* If CBS is not enabled for any queues anymore, then return to
+ * the default state of Data Transmission Arbitration on
+ * TQAVCTRL.
+ */
+ if (!is_any_cbs_enabled(adapter)) {
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ }
+ }
+
+ /* If LaunchTime is enabled, set DataTranTIM. */
+ if (ring->launchtime_enable) {
+ /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
+ * for any of the SR queues, and configure fetchtime delta.
+ * XXX NOTE:
+ * - LaunchTime will be enabled for all SR queues.
+ * - A fixed offset can be added relative to the launch
+ * time of all packets if configured at reg LAUNCH_OS0.
+ * We are keeping it as 0 for now (default value).
+ */
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
+ E1000_TQAVCTRL_FETCHTIME_DELTA;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ } else {
+ /* If Launchtime is not enabled for any SR queues anymore,
+ * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
+ * effectively disabling Launchtime.
+ */
+ if (!is_any_txtime_enabled(adapter)) {
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
+ tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ }
}
/* XXX: In i210 controller the sendSlope and loCredit parameters from
@@ -1782,9 +1854,27 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* configuration' in respect to these parameters.
*/
- netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
- (enable) ? "enabled" : "disabled", queue,
- idleslope, sendslope, hicredit, locredit);
+ netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
+ idleslope %d sendslope %d hiCredit %d \
+ locredit %d\n",
+ (ring->cbs_enable) ? "enabled" : "disabled",
+ (ring->launchtime_enable) ? "enabled" : "disabled", queue,
+ ring->idleslope, ring->sendslope, ring->hicredit,
+ ring->locredit);
+}
+
+static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
+ bool enable)
+{
+ struct igb_ring *ring;
+
+ if (queue < 0 || queue > adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[queue];
+ ring->launchtime_enable = enable;
+
+ return 0;
}
static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
@@ -1807,21 +1897,15 @@ static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
return 0;
}
-static bool is_any_cbs_enabled(struct igb_adapter *adapter)
-{
- struct igb_ring *ring;
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = adapter->tx_ring[i];
-
- if (ring->cbs_enable)
- return true;
- }
-
- return false;
-}
-
+/**
+ * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
+ * @adapter: pointer to adapter struct
+ *
+ * Configure TQAVCTRL register switching the controller's Tx mode
+ * if FQTSS mode is enabled or disabled. Additionally, will issue
+ * a call to igb_config_tx_modes() per queue so any previously saved
+ * Tx parameters are applied.
+ **/
static void igb_setup_tx_mode(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1836,11 +1920,11 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
int i, max_queue;
/* Configure TQAVCTRL register: set transmit mode to 'Qav',
- * set data fetch arbitration to 'round robin' and set data
- * transfer arbitration to 'credit shaper algorithm.
+ * set data fetch arbitration to 'round robin', set SP_WAIT_SR
+ * so SP queues wait for SR ones.
*/
val = rd32(E1000_I210_TQAVCTRL);
- val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
+ val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
val &= ~E1000_TQAVCTRL_DATAFETCHARB;
wr32(E1000_I210_TQAVCTRL, val);
@@ -1881,11 +1965,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
adapter->num_tx_queues : I210_SR_QUEUES_NUM;
for (i = 0; i < max_queue; i++) {
- struct igb_ring *ring = adapter->tx_ring[i];
-
- igb_configure_cbs(adapter, i, ring->cbs_enable,
- ring->idleslope, ring->sendslope,
- ring->hicredit, ring->locredit);
+ igb_config_tx_modes(adapter, i);
}
} else {
wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
@@ -2459,6 +2539,19 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
+{
+ if (!is_fqtss_enabled(adapter)) {
+ enable_fqtss(adapter, true);
+ return;
+ }
+
+ igb_config_tx_modes(adapter, queue);
+
+ if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
+ enable_fqtss(adapter, false);
+}
+
static int igb_offload_cbs(struct igb_adapter *adapter,
struct tc_cbs_qopt_offload *qopt)
{
@@ -2479,17 +2572,7 @@ static int igb_offload_cbs(struct igb_adapter *adapter,
if (err)
return err;
- if (is_fqtss_enabled(adapter)) {
- igb_configure_cbs(adapter, qopt->queue, qopt->enable,
- qopt->idleslope, qopt->sendslope,
- qopt->hicredit, qopt->locredit);
-
- if (!is_any_cbs_enabled(adapter))
- enable_fqtss(adapter, false);
-
- } else {
- enable_fqtss(adapter, true);
- }
+ igb_offload_apply(adapter, qopt->queue);
return 0;
}
@@ -2698,7 +2781,7 @@ static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -2728,7 +2811,7 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
adapter);
@@ -2738,6 +2821,29 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
}
}
+static int igb_offload_txtime(struct igb_adapter *adapter,
+ struct tc_etf_qopt_offload *qopt)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* Launchtime offloading is only supported by i210 controller. */
+ if (hw->mac.type != e1000_i210)
+ return -EOPNOTSUPP;
+
+ /* Launchtime offloading is only supported by queues 0 and 1. */
+ if (qopt->queue < 0 || qopt->queue > 1)
+ return -EINVAL;
+
+ err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
+ if (err)
+ return err;
+
+ igb_offload_apply(adapter, qopt->queue);
+
+ return 0;
+}
+
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -2748,6 +2854,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK:
return igb_setup_tc_block(adapter, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return igb_offload_txtime(adapter, type_data);
default:
return -EOPNOTSUPP;
@@ -5067,6 +5175,7 @@ bool igb_has_link(struct igb_adapter *adapter)
case e1000_media_type_copper:
if (!hw->mac.get_link_status)
return true;
+ /* fall through */
case e1000_media_type_internal_serdes:
hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
@@ -5568,11 +5677,14 @@ set_itr_now:
}
}
-static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
- u32 type_tucmd, u32 mss_l4len_idx)
+static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
{
struct e1000_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
+ struct timespec64 ts;
context_desc = IGB_TX_CTXTDESC(tx_ring, i);
@@ -5587,9 +5699,18 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
mss_l4len_idx |= tx_ring->reg_idx << 4;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ /* We assume there is always a valid tx time available. Invalid times
+ * should have been handled by the upper layers.
+ */
+ if (tx_ring->launchtime_enable) {
+ ts = ns_to_timespec64(first->skb->tstamp);
+ context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
+ } else {
+ context_desc->seqnum_seed = 0;
+ }
}
static int igb_tso(struct igb_ring *tx_ring,
@@ -5672,7 +5793,8 @@ static int igb_tso(struct igb_ring *tx_ring,
vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
return 1;
}
@@ -5714,6 +5836,7 @@ csum_failed:
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
+ /* fall through */
default:
skb_checksum_help(skb);
goto csum_failed;
@@ -5727,7 +5850,7 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
+ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
}
#define IGB_SET_FLAG(_input, _flag, _result) \
@@ -5909,7 +6032,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
* We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
- wmb();
+ dma_wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
@@ -6015,8 +6138,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
}
}
- skb_tx_timestamp(skb);
-
if (skb_vlan_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -6032,6 +6153,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
else if (!tso)
igb_tx_csum(tx_ring, first);
+ skb_tx_timestamp(skb);
+
if (igb_tx_map(tx_ring, first, hdr_len))
goto cleanup_tx_tstamp;
@@ -8409,7 +8532,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
- wmb();
+ dma_wmb();
writel(i, rx_ring->tail);
}
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index f818f060e5a7..e0c989ffb2b3 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2102,6 +2102,7 @@ csum_failed:
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
+ /* fall through */
default:
skb_checksum_help(skb);
goto csum_failed;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 144d5fe6b944..4fc906c6166b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -855,7 +855,8 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *);
void ixgbe_free_tx_resources(struct ixgbe_ring *);
void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
-void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
+void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index bd1ba88ec1d5..e5a8461fe6a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -511,7 +511,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev)
{
-#define IXGBE_REGS_LEN 1139
+#define IXGBE_REGS_LEN 1145
return IXGBE_REGS_LEN * sizeof(u32);
}
@@ -874,6 +874,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* X540 specific DCB registers */
regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
+
+ /* Security config registers */
+ regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+ regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
+ regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
}
static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1690,35 +1698,17 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
{
- struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
- struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_ctl;
-
- /* shut down the DMA engines now so they can be reinitialized later */
+ /* Shut down the DMA engines now so they can be reinitialized later,
+ * since the test rings and normally used rings should overlap on
+ * queue 0 we can just use the standard disable Rx/Tx calls and they
+ * will take care of disabling the test rings for us.
+ */
/* first Rx */
- hw->mac.ops.disable_rx(hw);
- ixgbe_disable_rx_queue(adapter, rx_ring);
+ ixgbe_disable_rx(adapter);
/* now Tx */
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
- reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
-
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
- reg_ctl &= ~IXGBE_DMATXCTL_TE;
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
- break;
- default:
- break;
- }
+ ixgbe_disable_tx(adapter);
ixgbe_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 62e57b05a0ae..447098005490 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4022,38 +4022,6 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
}
}
-void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBE_MAX_RX_DESC_POLL;
- u32 rxdctl;
- u8 reg_idx = ring->reg_idx;
-
- if (ixgbe_removed(hw->hw_addr))
- return;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
-
- /* write value back with RXDCTL.ENABLE bit cleared */
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
-
- if (hw->mac.type == ixgbe_mac_82598EB &&
- !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
- return;
-
- /* the hardware may take up to 100us to really disable the rx queue */
- do {
- udelay(10);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
-
- if (!wait_loop) {
- e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
- "the polling period\n", reg_idx);
- }
-}
-
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
@@ -4063,9 +4031,13 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
- /* disable queue to avoid issues while updating state */
+ /* disable queue to avoid use of these values while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- ixgbe_disable_rx_queue(adapter, ring);
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+ IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -5275,6 +5247,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ int num_tc = netdev_get_num_tc(adapter->netdev);
struct net_device *vdev = accel->netdev;
int i, baseq, err;
@@ -5286,6 +5260,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
+ /* record configuration for macvlan interface in vdev */
+ for (i = 0; i < num_tc; i++)
+ netdev_bind_sb_channel_queue(adapter->netdev, vdev,
+ i, rss_i, baseq + (rss_i * i));
+
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = vdev;
@@ -5310,6 +5289,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
@@ -5622,6 +5605,212 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
ixgbe_up_complete(adapter);
}
+static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
+{
+ u16 devctl2;
+
+ pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
+
+ switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ /* For now we cap the upper limit on delay to 2 seconds
+ * as we end up going up to 34 seconds of delay in worst
+ * case timeout value.
+ */
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ return 2000000ul; /* 2.0 s */
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ return 520000ul; /* 520 ms */
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ return 130000ul; /* 130 ms */
+ case IXGBE_PCIDEVCTRL2_16_32ms:
+ return 32000ul; /* 32 ms */
+ case IXGBE_PCIDEVCTRL2_1_2ms:
+ return 2000ul; /* 2 ms */
+ case IXGBE_PCIDEVCTRL2_50_100us:
+ return 100ul; /* 100 us */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def:
+ return 32000ul; /* 32 ms */
+ default:
+ break;
+ }
+
+ /* We shouldn't need to hit this path, but just in case default as
+ * though completion timeout is not supported and support 32ms.
+ */
+ return 32000ul;
+}
+
+void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, wait_loop;
+ u32 rxdctl;
+
+ /* disable receives */
+ hw->mac.ops.disable_rx(hw);
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
+ /* disable all enabled Rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ rxdctl |= IXGBE_RXDCTL_SWFLSH;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+ }
+
+ /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* Determine our minimum delay interval. We will increase this value
+ * with each subsequent test. This way if the device returns quickly
+ * we should spend as little time as possible waiting, however as
+ * the time increases we will wait for larger periods of time.
+ *
+ * The trick here is that we increase the interval using the
+ * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
+ * of that wait is that it totals up to 100x whatever interval we
+ * choose. Since our minimum wait is 100us we can just divide the
+ * total timeout by 100 to get our minimum delay interval.
+ */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ rxdctl = 0;
+
+ /* OR together the reading of all the active RXDCTL registers,
+ * and then test the result. We need the disable to complete
+ * before we start freeing the memory and invalidating the
+ * DMA mappings.
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ }
+
+ if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv,
+ "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
+}
+
+void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, wait_loop;
+ u32 txdctl;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
+ /* disable all enabled Tx queues */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* disable all enabled XDP Tx queues */
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* If the link is not up there shouldn't be much in the way of
+ * pending transactions. Those that are left will be flushed out
+ * when the reset logic goes through the flush sequence to clean out
+ * the pending Tx transactions.
+ */
+ if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ goto dma_engine_disable;
+
+ /* Determine our minimum delay interval. We will increase this value
+ * with each subsequent test. This way if the device returns quickly
+ * we should spend as little time as possible waiting, however as
+ * the time increases we will wait for larger periods of time.
+ *
+ * The trick here is that we increase the interval using the
+ * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
+ * of that wait is that it totals up to 100x whatever interval we
+ * choose. Since our minimum wait is 100us we can just divide the
+ * total timeout by 100 to get our minimum delay interval.
+ */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ txdctl = 0;
+
+ /* OR together the reading of all the active TXDCTL registers,
+ * and then test the result. We need the disable to complete
+ * before we start freeing the memory and invalidating the
+ * DMA mappings.
+ */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ }
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ }
+
+ if (!(txdctl & IXGBE_TXDCTL_ENABLE))
+ goto dma_engine_disable;
+ }
+
+ e_err(drv,
+ "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
+
+dma_engine_disable:
+ /* Disable the Tx DMA engine on 82599 and later MAC */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
+ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+ ~IXGBE_DMATXCTL_TE));
+ /* fall through */
+ default:
+ break;
+ }
+}
+
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -5803,24 +5992,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
return; /* do nothing if already down */
- /* disable receives */
- hw->mac.ops.disable_rx(hw);
+ /* Shut off incoming Tx traffic */
+ netif_tx_stop_all_queues(netdev);
- /* disable all enabled rx queues */
- for (i = 0; i < adapter->num_rx_queues; i++)
- /* this call also flushes the previous write */
- ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+ /* call carrier off first to avoid false dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
- usleep_range(10000, 20000);
+ /* Disable Rx */
+ ixgbe_disable_rx(adapter);
/* synchronize_sched() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
synchronize_sched();
- netif_tx_stop_all_queues(netdev);
-
- /* call carrier off first to avoid false dev_watchdog timeouts */
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
ixgbe_irq_disable(adapter);
@@ -5848,30 +6032,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/* disable transmits in the hardware now that interrupts are off */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- u8 reg_idx = adapter->tx_ring[i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
- }
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
-
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
- }
-
- /* Disable the Tx DMA engine on 82599 and later MAC */
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
- (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
- ~IXGBE_DMATXCTL_TE));
- break;
- default:
- break;
- }
+ ixgbe_disable_tx(adapter);
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
@@ -6458,6 +6619,11 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (adapter->xdp_prog) {
+ e_warn(probe, "MTU cannot be changed while XDP program is loaded\n");
+ return -EPERM;
+ }
+
/*
* For 82599EB we cannot allow legacy VFs to enable their receive
* paths when MTU greater than 1500 is configured. So display a
@@ -8197,25 +8363,25 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
input, common, ring->queue_index);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
- struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
struct ixgbe_adapter *adapter;
- int txq;
-#ifdef IXGBE_FCOE
struct ixgbe_ring_feature *f;
-#endif
+ int txq;
- if (fwd_adapter) {
- adapter = netdev_priv(dev);
- txq = reciprocal_scale(skb_get_hash(skb),
- adapter->num_rx_queues_per_pool);
+ if (sb_dev) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+ struct net_device *vdev = sb_dev;
- return txq + fwd_adapter->tx_base_queue;
- }
+ txq = vdev->tc_to_txq[tc].offset;
+ txq += reciprocal_scale(skb_get_hash(skb),
+ vdev->tc_to_txq[tc].count);
-#ifdef IXGBE_FCOE
+ return txq;
+ }
/*
* only execute the code below if protocol is FCoE
@@ -8226,11 +8392,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
case htons(ETH_P_FIP):
adapter = netdev_priv(dev);
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
/* fall through */
default:
- return fallback(dev, skb);
+ return fallback(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -8242,11 +8408,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
txq -= f->indices;
return txq + f->offset;
-#else
- return fallback(dev, skb);
-#endif
}
+#endif
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf)
{
@@ -8766,6 +8930,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
/* if we cannot find a free pool then disable the offload */
netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
macvlan_release_l2fw_offload(vdev);
+
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
kfree(accel);
return 0;
@@ -9329,7 +9498,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
adapter);
@@ -9393,6 +9562,11 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
features &= ~NETIF_F_LRO;
+ if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
+ e_dev_err("LRO is not supported with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
+
return features;
}
@@ -9769,6 +9943,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!macvlan_supports_dest_filter(vdev))
return ERR_PTR(-EMEDIUMTYPE);
+ /* We need to lock down the macvlan to be a single queue device so that
+ * we can reuse the tc_to_txq field in the macvlan netdev to represent
+ * the queue mapping to our netdev.
+ */
+ if (netif_is_multiqueue(vdev))
+ return ERR_PTR(-ERANGE);
+
pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
if (pool == adapter->num_rx_pools) {
u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
@@ -9825,6 +10006,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
return ERR_PTR(-ENOMEM);
set_bit(pool, adapter->fwd_bitmask);
+ netdev_set_sb_channel(vdev, pool);
accel->pool = pool;
accel->netdev = vdev;
@@ -9866,6 +10048,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
ring->netdev = NULL;
}
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(pdev, accel->netdev);
+ netdev_set_sb_channel(accel->netdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
}
@@ -9966,7 +10152,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!(adapter->xdp_prog);
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
@@ -10026,7 +10211,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
- .ndo_select_queue = ixgbe_select_queue,
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -10049,6 +10233,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef IXGBE_FCOE
+ .ndo_select_queue = ixgbe_select_queue,
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59416eddd840..d86446d202d5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4462,7 +4462,6 @@ static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return ixgbevf_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!(adapter->xdp_prog);
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 06ff185eb188..a5ab6f3403ae 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1911,10 +1911,10 @@ jme_wait_link(struct jme_adapter *jme)
{
u32 phylink, to = JME_WAIT_LINK_TIME;
- mdelay(1000);
+ msleep(1000);
phylink = jme_linkstat_from_phy(jme);
while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
- mdelay(10);
+ usleep_range(10000, 11000);
phylink = jme_linkstat_from_phy(jme);
}
}
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index afc810069440..7a637b51c7d2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static u16
-ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- /* we are currently only using the first queue */
- return 0;
-}
-
static int
ltq_etop_init(struct net_device *dev)
{
@@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
- .ndo_select_queue = ltq_etop_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_init = ltq_etop_init,
.ndo_tx_timeout = ltq_etop_tx_timeout,
};
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0ad2f3f7da85..bc80a678abc3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -295,10 +295,10 @@
#define MVNETA_RSS_LU_TABLE_SIZE 1
/* Max number of Rx descriptors */
-#define MVNETA_MAX_RXD 128
+#define MVNETA_MAX_RXD 512
/* Max number of Tx descriptors */
-#define MVNETA_MAX_TXD 532
+#define MVNETA_MAX_TXD 1024
/* Max number of allowed TCP segments for software TSO */
#define MVNETA_MAX_TSO_SEGS 100
@@ -328,6 +328,8 @@
enum {
ETHTOOL_STAT_EEE_WAKEUP,
+ ETHTOOL_STAT_SKB_ALLOC_ERR,
+ ETHTOOL_STAT_REFILL_ERR,
ETHTOOL_MAX_STATS,
};
@@ -375,6 +377,8 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3054, T_REG_32, "fc_sent", },
{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
+ { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
+ { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
};
struct mvneta_pcpu_stats {
@@ -479,7 +483,10 @@ struct mvneta_port {
#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
#define MVNETA_RXD_L3_IP4 BIT(25)
-#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
+#define MVNETA_RXD_LAST_DESC BIT(26)
+#define MVNETA_RXD_FIRST_DESC BIT(27)
+#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
+ MVNETA_RXD_LAST_DESC)
#define MVNETA_RXD_L4_CSUM_OK BIT(30)
#if defined(__LITTLE_ENDIAN)
@@ -589,9 +596,6 @@ struct mvneta_rx_queue {
/* num of rx descriptors in the rx descriptor ring */
int size;
- /* counter of times when mvneta_refill() failed */
- int missed;
-
u32 pkts_coal;
u32 time_coal;
@@ -609,6 +613,18 @@ struct mvneta_rx_queue {
/* Index of the next RX DMA descriptor to process */
int next_desc_to_proc;
+
+ /* Index of first RX DMA descriptor to refill */
+ int first_to_refill;
+ u32 refill_num;
+
+ /* pointer to uncomplete skb buffer */
+ struct sk_buff *skb;
+ int left_size;
+
+ /* error counters */
+ u32 skb_alloc_err;
+ u32 refill_err;
};
static enum cpuhp_state online_hpstate;
@@ -621,6 +637,7 @@ static int txq_number = 8;
static int rxq_def;
static int rx_copybreak __read_mostly = 256;
+static int rx_header_size __read_mostly = 128;
/* HW BM need that each port be identify by a unique ID */
static int global_port_id;
@@ -1684,13 +1701,6 @@ static void mvneta_rx_error(struct mvneta_port *pp,
{
u32 status = rx_desc->status;
- if (!mvneta_rxq_desc_is_first_last(status)) {
- netdev_err(pp->dev,
- "bad rx status %08x (buffer oversize), size=%d\n",
- status, rx_desc->data_size);
- return;
- }
-
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
@@ -1715,7 +1725,8 @@ static void mvneta_rx_error(struct mvneta_port *pp,
static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
struct sk_buff *skb)
{
- if ((status & MVNETA_RXD_L3_IP4) &&
+ if ((pp->dev->features & NETIF_F_RXCSUM) &&
+ (status & MVNETA_RXD_L3_IP4) &&
(status & MVNETA_RXD_L4_CSUM_OK)) {
skb->csum = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1790,47 +1801,30 @@ static void mvneta_txq_done(struct mvneta_port *pp,
}
}
-void *mvneta_frag_alloc(unsigned int frag_size)
-{
- if (likely(frag_size <= PAGE_SIZE))
- return netdev_alloc_frag(frag_size);
- else
- return kmalloc(frag_size, GFP_ATOMIC);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
-
-void mvneta_frag_free(unsigned int frag_size, void *data)
-{
- if (likely(frag_size <= PAGE_SIZE))
- skb_free_frag(data);
- else
- kfree(data);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_free);
-
/* Refill processing for SW buffer management */
+/* Allocate page per descriptor */
static int mvneta_rx_refill(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
- struct mvneta_rx_queue *rxq)
-
+ struct mvneta_rx_queue *rxq,
+ gfp_t gfp_mask)
{
dma_addr_t phys_addr;
- void *data;
+ struct page *page;
- data = mvneta_frag_alloc(pp->frag_size);
- if (!data)
+ page = __dev_alloc_page(gfp_mask);
+ if (!page)
return -ENOMEM;
- phys_addr = dma_map_single(pp->dev->dev.parent, data,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
+ /* map page for use */
+ phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- mvneta_frag_free(pp->frag_size, data);
+ __free_page(page);
return -ENOMEM;
}
phys_addr += pp->rx_offset_correction;
- mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
+ mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
return 0;
}
@@ -1893,115 +1887,192 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
void *data = rxq->buf_virt_addr[i];
+ if (!data || !(rx_desc->buf_phys_addr))
+ continue;
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- mvneta_frag_free(pp->frag_size, data);
+ __free_page(data);
}
}
+static inline
+int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
+{
+ struct mvneta_rx_desc *rx_desc;
+ int curr_desc = rxq->first_to_refill;
+ int i;
+
+ for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
+ rx_desc = rxq->descs + curr_desc;
+ if (!(rx_desc->buf_phys_addr)) {
+ if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
+ pr_err("Can't refill queue %d. Done %d from %d\n",
+ rxq->id, i, rxq->refill_num);
+ rxq->refill_err++;
+ break;
+ }
+ }
+ curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
+ }
+ rxq->refill_num -= i;
+ rxq->first_to_refill = curr_desc;
+
+ return i;
+}
+
/* Main rx processing when using software buffer management */
-static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_swbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
- int rx_done;
+ int rx_todo, rx_proc;
+ int refill = 0;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
/* Get number of received packets */
- rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
-
- if (rx_todo > rx_done)
- rx_todo = rx_done;
-
- rx_done = 0;
+ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
+ rx_proc = 0;
/* Fairness NAPI loop */
- while (rx_done < rx_todo) {
+ while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
- struct sk_buff *skb;
unsigned char *data;
+ struct page *page;
dma_addr_t phys_addr;
- u32 rx_status, frag_size;
- int rx_bytes, err, index;
+ u32 rx_status, index;
+ int rx_bytes, skb_size, copy_size;
+ int frag_num, frag_size, frag_offset;
- rx_done++;
- rx_status = rx_desc->status;
- rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
index = rx_desc - rxq->descs;
- data = rxq->buf_virt_addr[index];
- phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
-
- if (!mvneta_rxq_desc_is_first_last(rx_status) ||
- (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
- mvneta_rx_error(pp, rx_desc);
-err_drop_frame:
- dev->stats.rx_errors++;
- /* leave the descriptor untouched */
- continue;
- }
+ page = (struct page *)rxq->buf_virt_addr[index];
+ data = page_address(page);
+ /* Prefetch header */
+ prefetch(data);
- if (rx_bytes <= rx_copybreak) {
- /* better copy a small frame and not unmap the DMA region */
- skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
- if (unlikely(!skb))
- goto err_drop_frame;
-
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr,
- MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes,
- DMA_FROM_DEVICE);
- skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes);
-
- skb->protocol = eth_type_trans(skb, dev);
- mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
-
- rcvd_pkts++;
- rcvd_bytes += rx_bytes;
+ phys_addr = rx_desc->buf_phys_addr;
+ rx_status = rx_desc->status;
+ rx_proc++;
+ rxq->refill_num++;
+
+ if (rx_status & MVNETA_RXD_FIRST_DESC) {
+ /* Check errors only for FIRST descriptor */
+ if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
+ mvneta_rx_error(pp, rx_desc);
+ dev->stats.rx_errors++;
+ /* leave the descriptor untouched */
+ continue;
+ }
+ rx_bytes = rx_desc->data_size -
+ (ETH_FCS_LEN + MVNETA_MH_SIZE);
+
+ /* Allocate small skb for each new packet */
+ skb_size = max(rx_copybreak, rx_header_size);
+ rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
+ if (unlikely(!rxq->skb)) {
+ netdev_err(dev,
+ "Can't allocate skb on queue %d\n",
+ rxq->id);
+ dev->stats.rx_dropped++;
+ rxq->skb_alloc_err++;
+ continue;
+ }
+ copy_size = min(skb_size, rx_bytes);
+
+ /* Copy data from buffer to SKB, skip Marvell header */
+ memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
+ copy_size);
+ skb_put(rxq->skb, copy_size);
+ rxq->left_size = rx_bytes - copy_size;
+
+ mvneta_rx_csum(pp, rx_status, rxq->skb);
+ if (rxq->left_size == 0) {
+ int size = copy_size + MVNETA_MH_SIZE;
+
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ phys_addr, 0,
+ size,
+ DMA_FROM_DEVICE);
+
+ /* leave the descriptor and buffer untouched */
+ } else {
+ /* refill descriptor with new buffer later */
+ rx_desc->buf_phys_addr = 0;
+
+ frag_num = 0;
+ frag_offset = copy_size + MVNETA_MH_SIZE;
+ frag_size = min(rxq->left_size,
+ (int)(PAGE_SIZE - frag_offset));
+ skb_add_rx_frag(rxq->skb, frag_num, page,
+ frag_offset, frag_size,
+ PAGE_SIZE);
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rxq->left_size -= frag_size;
+ }
+ } else {
+ /* Middle or Last descriptor */
+ if (unlikely(!rxq->skb)) {
+ pr_debug("no skb for rx_status 0x%x\n",
+ rx_status);
+ continue;
+ }
+ if (!rxq->left_size) {
+ /* last descriptor has only FCS */
+ /* and can be discarded */
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ phys_addr, 0,
+ ETH_FCS_LEN,
+ DMA_FROM_DEVICE);
+ /* leave the descriptor and buffer untouched */
+ } else {
+ /* refill descriptor with new buffer later */
+ rx_desc->buf_phys_addr = 0;
+
+ frag_num = skb_shinfo(rxq->skb)->nr_frags;
+ frag_offset = 0;
+ frag_size = min(rxq->left_size,
+ (int)(PAGE_SIZE - frag_offset));
+ skb_add_rx_frag(rxq->skb, frag_num, page,
+ frag_offset, frag_size,
+ PAGE_SIZE);
+
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ rxq->left_size -= frag_size;
+ }
+ } /* Middle or Last descriptor */
- /* leave the descriptor and buffer untouched */
+ if (!(rx_status & MVNETA_RXD_LAST_DESC))
+ /* no last descriptor this time */
continue;
- }
- /* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc, rxq);
- if (err) {
- netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
- goto err_drop_frame;
+ if (rxq->left_size) {
+ pr_err("get last desc, but left_size (%d) != 0\n",
+ rxq->left_size);
+ dev_kfree_skb_any(rxq->skb);
+ rxq->left_size = 0;
+ rxq->skb = NULL;
+ continue;
}
-
- frag_size = pp->frag_size;
-
- skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
-
- /* After refill old buffer has to be unmapped regardless
- * the skb is successfully built or not.
- */
- dma_unmap_single(dev->dev.parent, phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
-
- if (!skb)
- goto err_drop_frame;
-
rcvd_pkts++;
- rcvd_bytes += rx_bytes;
+ rcvd_bytes += rxq->skb->len;
/* Linux processing */
- skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
- skb_put(skb, rx_bytes);
-
- skb->protocol = eth_type_trans(skb, dev);
+ rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
- mvneta_rx_csum(pp, rx_status, skb);
+ if (dev->features & NETIF_F_GRO)
+ napi_gro_receive(napi, rxq->skb);
+ else
+ netif_receive_skb(rxq->skb);
- napi_gro_receive(&port->napi, skb);
+ /* clean uncomplete skb pointer in queue */
+ rxq->skb = NULL;
+ rxq->left_size = 0;
}
if (rcvd_pkts) {
@@ -2013,17 +2084,20 @@ err_drop_frame:
u64_stats_update_end(&stats->syncp);
}
+ /* return some buffers to hardware queue, one at a time is too slow */
+ refill = mvneta_rx_refill_queue(pp, rxq);
+
/* Update rxq management counters */
- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+ mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
- return rx_done;
+ return rcvd_pkts;
}
/* Main rx processing when using hardware buffer management */
-static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_hwbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
int rx_done;
u32 rcvd_pkts = 0;
@@ -2085,7 +2159,7 @@ err_drop_frame:
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2102,7 +2176,7 @@ err_drop_frame:
err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) {
netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
+ rxq->refill_err++;
goto err_drop_frame_ret_pool;
}
@@ -2129,7 +2203,7 @@ err_drop_frame:
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
}
if (rcvd_pkts) {
@@ -2722,9 +2796,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
if (rx_queue) {
rx_queue = rx_queue - 1;
if (pp->bm_priv)
- rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_hwbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
else
- rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_swbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
}
if (rx_done < budget) {
@@ -2761,9 +2837,11 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
for (i = 0; i < num; i++) {
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
- if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
- netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
- __func__, rxq->id, i, num);
+ if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
+ GFP_KERNEL) != 0) {
+ netdev_err(pp->dev,
+ "%s:rxq %d, %d of %d buffs filled\n",
+ __func__, rxq->id, i, num);
break;
}
}
@@ -2821,21 +2899,23 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
- /* Set Offset */
- mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
-
/* Set coalescing pkts and time */
mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
if (!pp->bm_priv) {
- /* Fill RXQ with buffers from RX pool */
- mvneta_rxq_buf_size_set(pp, rxq,
- MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, 0);
+ mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
} else {
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq,
+ NET_SKB_PAD - pp->rx_offset_correction);
+
mvneta_rxq_bm_enable(pp, rxq);
+ /* Fill RXQ with buffers from RX pool */
mvneta_rxq_long_pool_set(pp, rxq);
mvneta_rxq_short_pool_set(pp, rxq);
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
@@ -2864,6 +2944,9 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
{
mvneta_rxq_drop_pkts(pp, rxq);
+ if (rxq->skb)
+ dev_kfree_skb_any(rxq->skb);
+
if (rxq->descs)
dma_free_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2874,6 +2957,10 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
rxq->last_desc = 0;
rxq->next_desc_to_proc = 0;
rxq->descs_phys = 0;
+ rxq->first_to_refill = 0;
+ rxq->refill_num = 0;
+ rxq->skb = NULL;
+ rxq->left_size = 0;
}
static int mvneta_txq_sw_init(struct mvneta_port *pp,
@@ -3177,8 +3264,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mvneta_bm_update_mtu(pp, mtu);
pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
- pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
@@ -3194,7 +3279,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
on_each_cpu(mvneta_percpu_enable, pp, true);
mvneta_start_dev(pp);
- mvneta_port_up(pp);
netdev_update_features(dev);
@@ -3666,8 +3750,7 @@ static int mvneta_open(struct net_device *dev)
int ret;
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
- pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ pp->frag_size = PAGE_SIZE;
ret = mvneta_setup_rxqs(pp);
if (ret)
@@ -3962,6 +4045,12 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
case ETHTOOL_STAT_EEE_WAKEUP:
val = phylink_get_eee_err(pp->phylink);
break;
+ case ETHTOOL_STAT_SKB_ALLOC_ERR:
+ val = pp->rxqs[0].skb_alloc_err;
+ break;
+ case ETHTOOL_STAT_REFILL_ERR:
+ val = pp->rxqs[0].refill_err;
+ break;
}
break;
}
@@ -4018,13 +4107,18 @@ static int mvneta_config_rss(struct mvneta_port *pp)
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- /* We have to synchronise on the napi of each CPU */
- for_each_online_cpu(cpu) {
- struct mvneta_pcpu_port *pcpu_port =
- per_cpu_ptr(pp->ports, cpu);
+ if (!pp->neta_armada3700) {
+ /* We have to synchronise on the napi of each CPU */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *pcpu_port =
+ per_cpu_ptr(pp->ports, cpu);
- napi_synchronize(&pcpu_port->napi);
- napi_disable(&pcpu_port->napi);
+ napi_synchronize(&pcpu_port->napi);
+ napi_disable(&pcpu_port->napi);
+ }
+ } else {
+ napi_synchronize(&pp->napi);
+ napi_disable(&pp->napi);
}
pp->rxq_def = pp->indir[0];
@@ -4041,12 +4135,16 @@ static int mvneta_config_rss(struct mvneta_port *pp)
mvneta_percpu_elect(pp);
spin_unlock(&pp->lock);
- /* We have to synchronise on the napi of each CPU */
- for_each_online_cpu(cpu) {
- struct mvneta_pcpu_port *pcpu_port =
- per_cpu_ptr(pp->ports, cpu);
+ if (!pp->neta_armada3700) {
+ /* We have to synchronise on the napi of each CPU */
+ for_each_online_cpu(cpu) {
+ struct mvneta_pcpu_port *pcpu_port =
+ per_cpu_ptr(pp->ports, cpu);
- napi_enable(&pcpu_port->napi);
+ napi_enable(&pcpu_port->napi);
+ }
+ } else {
+ napi_enable(&pp->napi);
}
netif_tx_start_all_queues(pp->dev);
@@ -4362,14 +4460,6 @@ static int mvneta_probe(struct platform_device *pdev)
pp->dn = dn;
pp->rxq_def = rxq_def;
-
- /* Set RX packet offset correction for platforms, whose
- * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
- * platforms and 0B for 32-bit ones.
- */
- pp->rx_offset_correction =
- max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
-
pp->indir[0] = rxq_def;
/* Get special SoC configurations */
@@ -4457,16 +4547,28 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pp->id = global_port_id++;
+ pp->rx_offset_correction = 0; /* not relevant for SW BM */
/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
- if (bm_node && bm_node->data) {
- pp->bm_priv = bm_node->data;
- err = mvneta_bm_port_init(pdev, pp);
- if (err < 0) {
- dev_info(&pdev->dev, "use SW buffer management\n");
- pp->bm_priv = NULL;
+ if (bm_node) {
+ pp->bm_priv = mvneta_bm_get(bm_node);
+ if (pp->bm_priv) {
+ err = mvneta_bm_port_init(pdev, pp);
+ if (err < 0) {
+ dev_info(&pdev->dev,
+ "use SW buffer management\n");
+ mvneta_bm_put(pp->bm_priv);
+ pp->bm_priv = NULL;
+ }
}
+ /* Set RX packet offset correction for platforms, whose
+ * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
+ * platforms and 0B for 32-bit ones.
+ */
+ pp->rx_offset_correction = max(0,
+ NET_SKB_PAD -
+ MVNETA_RX_PKT_OFFSET_CORRECTION);
}
of_node_put(bm_node);
@@ -4526,6 +4628,7 @@ err_netdev:
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
}
err_free_stats:
free_percpu(pp->stats);
@@ -4563,6 +4666,7 @@ static int mvneta_remove(struct platform_device *pdev)
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
}
return 0;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 466939f8f0cf..de468e1bdba9 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
@@ -392,6 +393,20 @@ static void mvneta_bm_put_sram(struct mvneta_bm *priv)
MVNETA_BM_BPPI_SIZE);
}
+struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+
+ return pdev ? platform_get_drvdata(pdev) : NULL;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_get);
+
+void mvneta_bm_put(struct mvneta_bm *priv)
+{
+ platform_device_put(priv->pdev);
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_put);
+
static int mvneta_bm_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index a32de432800c..c8425d35c049 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -130,10 +130,10 @@ struct mvneta_bm_pool {
};
/* Declarations and definitions */
-void *mvneta_frag_alloc(unsigned int frag_size);
-void mvneta_frag_free(unsigned int frag_size, void *data);
-
#if IS_ENABLED(CONFIG_MVNETA_BM)
+struct mvneta_bm *mvneta_bm_get(struct device_node *node);
+void mvneta_bm_put(struct mvneta_bm *priv);
+
void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map);
void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
@@ -178,5 +178,7 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool)
{ return 0; }
+struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
+void mvneta_bm_put(struct mvneta_bm *priv) {}
#endif /* CONFIG_MVNETA_BM */
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile
index 4d11dd9e3246..51f65a202c6e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/Makefile
+++ b/drivers/net/ethernet/marvell/mvpp2/Makefile
@@ -4,4 +4,4 @@
#
obj-$(CONFIG_MVPP2) := mvpp2.o
-mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o
+mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index def00dc3eb4e..67b9e81b7c02 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1,17 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for Marvell PPv2 network controller for Armada 375 SoC.
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _MVPP2_H_
#define _MVPP2_H_
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
@@ -66,15 +64,18 @@
#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240
+#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244
+#define MVPP2_PRS_TCAM_HIT_CNT_MASK GENMASK(15, 0)
/* RSS Registers */
#define MVPP22_RSS_INDEX 0x1500
#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
-#define MVPP22_RSS_TABLE_ENTRY 0x1508
-#define MVPP22_RSS_TABLE 0x1510
+#define MVPP22_RXQ2RSS_TABLE 0x1504
#define MVPP22_RSS_TABLE_POINTER(p) (p)
+#define MVPP22_RSS_TABLE_ENTRY 0x1508
#define MVPP22_RSS_WIDTH 0x150c
/* Classifier Registers */
@@ -86,11 +87,28 @@
#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
#define MVPP2_CLS_LKP_TBL_REG 0x1818
#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
+#define MVPP2_CLS_LKP_FLOW_PTR(flow) ((flow) << 16)
#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
+#define MVPP2_CLS_FLOW_TBL0_LAST BIT(0)
+#define MVPP2_CLS_FLOW_TBL0_ENG_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL0_OFFS 1
+#define MVPP2_CLS_FLOW_TBL0_ENG(x) ((x) << 1)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK 0xff
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID(port) ((port) << 4)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL BIT(23)
#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
+#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
+#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_SEQ(x) ((x) << 15)
#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
+#define MVPP2_CLS_FLOW_TBL2_FLD_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6)
+#define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6))
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
@@ -98,6 +116,32 @@
#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
+/* Classifier C2 engine Registers */
+#define MVPP22_CLS_C2_TCAM_IDX 0x1b00
+#define MVPP22_CLS_C2_TCAM_DATA0 0x1b10
+#define MVPP22_CLS_C2_TCAM_DATA1 0x1b14
+#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
+#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
+#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
+#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
+#define MVPP22_CLS_C2_HIT_CTR 0x1b50
+#define MVPP22_CLS_C2_ACT 0x1b60
+#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
+#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13)
+#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11)
+#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9)
+#define MVPP22_CLS_C2_ATTR0 0x1b64
+#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24)
+#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f
+#define MVPP22_CLS_C2_ATTR0_QHIGH_OFFS 24
+#define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21)
+#define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7
+#define MVPP22_CLS_C2_ATTR0_QLOW_OFFS 21
+#define MVPP22_CLS_C2_ATTR1 0x1b68
+#define MVPP22_CLS_C2_ATTR2 0x1b6c
+#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30)
+#define MVPP22_CLS_C2_ATTR3 0x1b70
+
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
@@ -275,6 +319,11 @@
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
+/* Hit counters registers */
+#define MVPP2_CTRS_IDX 0x7040
+#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700
+#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704
+
/* TX Scheduler registers */
#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
@@ -499,7 +548,7 @@
#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
/* Dfault number of RXQs in use */
-#define MVPP2_DEFAULT_RXQ 4
+#define MVPP2_DEFAULT_RXQ 1
/* Max number of Rx descriptors */
#define MVPP2_MAX_RXD_MAX 1024
@@ -553,6 +602,11 @@
((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
+#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
+#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
+
+/* RSS constants */
+#define MVPP22_RSS_TABLE_ENTRIES 32
/* IPv6 max L3 address size */
#define MVPP2_MAX_L3_ADDR_SIZE 16
@@ -703,6 +757,9 @@ struct mvpp2 {
/* Workqueue to gather hardware statistics */
char queue_name[30];
struct workqueue_struct *stats_queue;
+
+ /* Debugfs root entry */
+ struct dentry *dbgfs_dir;
};
struct mvpp2_pcpu_stats {
@@ -795,6 +852,9 @@ struct mvpp2_port {
bool has_tx_irqs;
u32 tx_time_coal;
+
+ /* RSS indirection table */
+ u32 indir[MVPP22_RSS_TABLE_ENTRIES];
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -831,52 +891,52 @@ struct mvpp2_port {
/* HW TX descriptor for PPv2.1 */
struct mvpp21_tx_desc {
- u32 command; /* Options used by HW for packet transmitting.*/
+ __le32 command; /* Options used by HW for packet transmitting.*/
u8 packet_offset; /* the offset from the buffer beginning */
u8 phys_txq; /* destination queue ID */
- u16 data_size; /* data size of transmitted packet in bytes */
- u32 buf_dma_addr; /* physical addr of transmitted buffer */
- u32 buf_cookie; /* cookie for access to TX buffer in tx path */
- u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
- u32 reserved2; /* reserved (for future use) */
+ __le16 data_size; /* data size of transmitted packet in bytes */
+ __le32 buf_dma_addr; /* physical addr of transmitted buffer */
+ __le32 buf_cookie; /* cookie for access to TX buffer in tx path */
+ __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
+ __le32 reserved2; /* reserved (for future use) */
};
/* HW RX descriptor for PPv2.1 */
struct mvpp21_rx_desc {
- u32 status; /* info about received packet */
- u16 reserved1; /* parser_info (for future use, PnC) */
- u16 data_size; /* size of received packet in bytes */
- u32 buf_dma_addr; /* physical address of the buffer */
- u32 buf_cookie; /* cookie for access to RX buffer in rx path */
- u16 reserved2; /* gem_port_id (for future use, PON) */
- u16 reserved3; /* csum_l4 (for future use, PnC) */
+ __le32 status; /* info about received packet */
+ __le16 reserved1; /* parser_info (for future use, PnC) */
+ __le16 data_size; /* size of received packet in bytes */
+ __le32 buf_dma_addr; /* physical address of the buffer */
+ __le32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ __le16 reserved2; /* gem_port_id (for future use, PON) */
+ __le16 reserved3; /* csum_l4 (for future use, PnC) */
u8 reserved4; /* bm_qset (for future use, BM) */
u8 reserved5;
- u16 reserved6; /* classify_info (for future use, PnC) */
- u32 reserved7; /* flow_id (for future use, PnC) */
- u32 reserved8;
+ __le16 reserved6; /* classify_info (for future use, PnC) */
+ __le32 reserved7; /* flow_id (for future use, PnC) */
+ __le32 reserved8;
};
/* HW TX descriptor for PPv2.2 */
struct mvpp22_tx_desc {
- u32 command;
+ __le32 command;
u8 packet_offset;
u8 phys_txq;
- u16 data_size;
- u64 reserved1;
- u64 buf_dma_addr_ptp;
- u64 buf_cookie_misc;
+ __le16 data_size;
+ __le64 reserved1;
+ __le64 buf_dma_addr_ptp;
+ __le64 buf_cookie_misc;
};
/* HW RX descriptor for PPv2.2 */
struct mvpp22_rx_desc {
- u32 status;
- u16 reserved1;
- u16 data_size;
- u32 reserved2;
- u32 reserved3;
- u64 buf_dma_addr_key_hash;
- u64 buf_cookie_misc;
+ __le32 status;
+ __le16 reserved1;
+ __le16 data_size;
+ __le32 reserved2;
+ __le32 reserved3;
+ __le64 buf_dma_addr_key_hash;
+ __le64 buf_cookie_misc;
};
/* Opaque type used by the driver to manipulate the HW TX and RX
@@ -1043,4 +1103,8 @@ u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset);
void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset,
u32 data);
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 8581d5b17dd5..efdb7a656835 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1,17 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RSS and Classifier helpers for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include "mvpp2.h"
#include "mvpp2_cls.h"
+#include "mvpp2_prs.h"
+
+#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
+{ \
+ .flow_type = _type, \
+ .flow_id = _id, \
+ .supported_hash_opts = _opts, \
+ .prs_ri = { \
+ .ri = _ri, \
+ .ri_mask = _ri_mask \
+ } \
+}
+
+static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
+ /* TCP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* IPv4 flows, no vlan tag */
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv4 flows, with vlan tag */
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, no vlan tag */
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, with vlan tag */
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* Non IP flow, no vlan tag */
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
+ 0,
+ MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK),
+ /* Non IP flow, with vlan tag */
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
+ MVPP22_CLS_HEK_OPT_VLAN,
+ 0, 0),
+};
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe)
+{
+ fe->index = index;
+ mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
+ fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
+ fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
+ fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
+}
/* Update classification flow table registers */
static void mvpp2_cls_flow_write(struct mvpp2 *priv,
@@ -23,6 +349,25 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
}
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le)
+{
+ u32 val;
+
+ val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
+ mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+ le->way = way;
+ le->lkpid = lkpid;
+ le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
+}
+
/* Update classification lookup table register */
static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
struct mvpp2_cls_lookup_entry *le)
@@ -34,6 +379,439 @@ static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
}
+/* Operations on flow entry */
+static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+}
+
+static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
+ int num_of_fields)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
+}
+
+static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
+ int field_index)
+{
+ return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK;
+}
+
+static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
+ int field_index, int field_id)
+{
+ fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK);
+ fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
+}
+
+static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
+ int engine)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
+}
+
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
+ MVPP2_CLS_FLOW_TBL0_ENG_MASK;
+}
+
+static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
+ bool from_packet)
+{
+ if (from_packet)
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+ else
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+}
+
+static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
+}
+
+static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
+ bool is_last)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
+ fe->data[0] |= !!is_last;
+}
+
+static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
+}
+
+static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
+ u32 port)
+{
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+/* Initialize the parser entry for the given flow */
+static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
+ struct mvpp2_cls_flow *flow)
+{
+ mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
+ flow->prs_ri.ri_mask);
+}
+
+/* Initialize the Lookup Id table entry for the given flow */
+static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
+ struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_lookup_entry le;
+
+ le.way = 0;
+ le.lkpid = flow->flow_id;
+
+ /* The default RxQ for this port is set in the C2 lookup */
+ le.data = 0;
+
+ /* We point on the first lookup in the sequence for the flow, that is
+ * the C2 lookup.
+ */
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
+
+ /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
+ le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ mvpp2_cls_lookup_write(priv, &le);
+}
+
+/* Initialize the flow table entries for the given flow */
+static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_flow_entry fe;
+ int i;
+
+ /* C2 lookup */
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
+
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_last_set(&fe, 0);
+ mvpp2_cls_flow_pri_set(&fe, 0);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
+
+ /* Add all ports */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++)
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+
+ /* C3Hx lookups */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
+
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_pri_set(&fe, i + 1);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ /* Update the last entry */
+ mvpp2_cls_flow_last_set(&fe, 1);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
+
+ mvpp2_cls_flow_write(priv, &fe);
+}
+
+/* Adds a field to the Header Extracted Key generation parameters*/
+static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
+ u32 field_id)
+{
+ int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ if (nb_fields == MVPP2_FLOW_N_FIELDS)
+ return -EINVAL;
+
+ mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
+
+ mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
+
+ return 0;
+}
+
+static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
+ unsigned long hash_opts)
+{
+ u32 field_id;
+ int i;
+
+ /* Clear old fields */
+ mvpp2_cls_flow_hek_num_set(fe, 0);
+ fe->data[2] = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ field_id = MVPP22_CLS_FIELD_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ field_id = MVPP22_CLS_FIELD_IP4SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ field_id = MVPP22_CLS_FIELD_IP4DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ field_id = MVPP22_CLS_FIELD_IP6SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ field_id = MVPP22_CLS_FIELD_IP6DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ field_id = MVPP22_CLS_FIELD_L4SIP;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ field_id = MVPP22_CLS_FIELD_L4DIP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (mvpp2_flow_add_hek_field(fe, field_id))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+{
+ if (flow >= MVPP2_N_FLOWS)
+ return NULL;
+
+ return &cls_flows[flow];
+}
+
+/* Set the hash generation options for the given traffic flow.
+ * One traffic flow (in the ethtool sense) has multiple classification flows,
+ * to handle specific cases such as fragmentation, or the presence of a
+ * VLAN / DSA Tag.
+ *
+ * Each of these individual flows has different constraints, for example we
+ * can't hash fragmented packets on L4 data (else we would risk having packet
+ * re-ordering), so each classification flows masks the options with their
+ * supported ones.
+ *
+ */
+static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
+ u16 requested_opts)
+{
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
+ int i, engine, flow_index;
+ u16 hash_opts;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return -EINVAL;
+
+ if (flow->flow_type != flow_type)
+ continue;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+ flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = flow->supported_hash_opts & requested_opts;
+
+ /* Use C3HB engine to access L4 infos. This adds L4 infos to the
+ * hash parameters
+ */
+ if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
+ engine = MVPP22_CLS_ENGINE_C3HB;
+ else
+ engine = MVPP22_CLS_ENGINE_C3HA;
+
+ if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
+ return -EINVAL;
+
+ mvpp2_cls_flow_eng_set(&fe, engine);
+
+ mvpp2_cls_flow_write(port->priv, &fe);
+ }
+
+ return 0;
+}
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
+{
+ u16 hash_opts = 0;
+ int n_fields, i, field;
+
+ n_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ for (i = 0; i < n_fields; i++) {
+ field = mvpp2_cls_flow_hek_get(fe, i);
+
+ switch (field) {
+ case MVPP22_CLS_FIELD_MAC_DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ break;
+ case MVPP22_CLS_FIELD_VLAN:
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ break;
+ case MVPP22_CLS_FIELD_L3_PROTO:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ break;
+ case MVPP22_CLS_FIELD_IP4SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
+ break;
+ case MVPP22_CLS_FIELD_IP4DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
+ break;
+ case MVPP22_CLS_FIELD_IP6SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
+ break;
+ case MVPP22_CLS_FIELD_IP6DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
+ break;
+ case MVPP22_CLS_FIELD_L4SIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ break;
+ case MVPP22_CLS_FIELD_L4DIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ break;
+ default:
+ break;
+ }
+ }
+ return hash_opts;
+}
+
+/* Returns the hash opts for this flow. There are several classifier flows
+ * for one traffic flow, this returns an aggregation of all configurations.
+ */
+static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
+{
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
+ int i, flow_index;
+ u16 hash_opts = 0;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ if (flow->flow_type != flow_type)
+ continue;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+ flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts |= mvpp2_flow_get_hek_fields(&fe);
+ }
+
+ return hash_opts;
+}
+
+static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
+{
+ struct mvpp2_cls_flow *flow;
+ int i;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ break;
+
+ mvpp2_cls_flow_prs_init(priv, flow);
+ mvpp2_cls_flow_lkp_init(priv, flow);
+ mvpp2_cls_flow_init(priv, flow);
+ }
+}
+
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ /* Write TCAM */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+}
+
+static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql, pmap;
+
+ memset(&c2, 0, sizeof(c2));
+
+ c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
+
+ pmap = BIT(port->id);
+ c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+ /* Update RSS status after matching this entry */
+ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
+ /* Mark packet as "forwarded to software", needed for RSS */
+ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+ /* Configure the default rx queue : Update Queue Low and Queue High, but
+ * don't lock, since the rx queue selection might be overridden by RSS
+ */
+ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
+ MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
+
+ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
/* Classifier default initialization */
void mvpp2_cls_init(struct mvpp2 *priv)
{
@@ -61,6 +839,8 @@ void mvpp2_cls_init(struct mvpp2 *priv)
le.way = 1;
mvpp2_cls_lookup_write(priv, &le);
}
+
+ mvpp2_cls_port_init_flows(priv);
}
void mvpp2_cls_port_config(struct mvpp2_port *port)
@@ -89,6 +869,47 @@ void mvpp2_cls_port_config(struct mvpp2_port *port)
/* Update lookup ID table entry */
mvpp2_cls_lookup_write(port->priv, &le);
+
+ mvpp2_port_c2_cls_init(port);
+}
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
+
+ return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
+}
+
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+void mvpp22_rss_enable(struct mvpp2_port *port)
+{
+ mvpp2_rss_port_c2_enable(port);
+}
+
+void mvpp22_rss_disable(struct mvpp2_port *port)
+{
+ mvpp2_rss_port_c2_disable(port);
}
/* Set CPU queue number for oversize packets */
@@ -107,7 +928,116 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
-void mvpp22_init_rss(struct mvpp2_port *port)
+static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
+{
+ int nrxqs, cpu, cpus = num_possible_cpus();
+
+ /* Number of RXQs per CPU */
+ nrxqs = port->nrxqs / cpus;
+
+ /* CPU that will handle this rx queue */
+ cpu = rxq / nrxqs;
+
+ if (!cpu_online(cpu))
+ return port->first_rxq;
+
+ /* Indirection to better distribute the paquets on the CPUs when
+ * configuring the RSS queues.
+ */
+ return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
+}
+
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
+{
+ struct mvpp2 *priv = port->priv;
+ int i;
+
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
+ u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
+ MVPP22_RSS_INDEX_TABLE_ENTRY(i);
+ mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+
+ mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
+ mvpp22_rxfh_indir(port, port->indir[i]));
+ }
+}
+
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ u16 hash_opts = 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ if (info->data & RXH_L4_B_0_1)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ if (info->data & RXH_L4_B_2_3)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ /* Fallthrough */
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ if (info->data & RXH_L2DA)
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ if (info->data & RXH_VLAN)
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ if (info->data & RXH_L3_PROTO)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ if (info->data & RXH_IP_SRC)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
+ MVPP22_CLS_HEK_OPT_IP6SA);
+ if (info->data & RXH_IP_DST)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
+ MVPP22_CLS_HEK_OPT_IP6DA);
+ break;
+ default: return -EOPNOTSUPP;
+ }
+
+ return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
+}
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ unsigned long hash_opts;
+ int i;
+
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
+ info->data = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ info->data |= RXH_L2DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ info->data |= RXH_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_L3_PROTO:
+ info->data |= RXH_L3_PROTO;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ info->data |= RXH_IP_SRC;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ info->data |= RXH_IP_DST;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ info->data |= RXH_L4_B_0_1;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ info->data |= RXH_L4_B_2_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void mvpp22_rss_port_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
int i;
@@ -115,27 +1045,30 @@ void mvpp22_init_rss(struct mvpp2_port *port)
/* Set the table width: replace the whole classifier Rx queue number
* with the ones configured in RSS table entries.
*/
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
- /* Loop through the classifier Rx Queues and map them to a RSS table.
- * Map them all to the first table (0) by default.
+ /* The default RxQ is used as a key to select the RSS table to use.
+ * We use one RSS table per port.
*/
- for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
- mvpp2_write(priv, MVPP22_RSS_TABLE,
- MVPP22_RSS_TABLE_POINTER(0));
- }
+ mvpp2_write(priv, MVPP22_RSS_INDEX,
+ MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(port->id));
/* Configure the first table to evenly distribute the packets across
- * real Rx Queues. The table entries map a hash to an port Rx Queue.
+ * real Rx Queues. The table entries map a hash to a port Rx Queue.
*/
- for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
- u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
- MVPP22_RSS_INDEX_TABLE_ENTRY(i);
- mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
+ port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
- mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
- }
+ mvpp22_rss_fill_table(port, port->id);
+ /* Configure default flows */
+ mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
+ mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8e1d7f9ffa0b..089f05f29891 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -1,27 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* RSS and Classifier definitions for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _MVPP2_CLS_H_
#define _MVPP2_CLS_H_
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+
/* Classifier constants */
#define MVPP2_CLS_FLOWS_TBL_SIZE 512
#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
#define MVPP2_CLS_LKP_TBL_SIZE 64
#define MVPP2_CLS_RX_QUEUES 256
-/* RSS constants */
-#define MVPP22_RSS_TABLE_ENTRIES 32
+/* Classifier flow constants */
+
+#define MVPP2_FLOW_N_FIELDS 4
+
+enum mvpp2_cls_engine {
+ MVPP22_CLS_ENGINE_C2 = 1,
+ MVPP22_CLS_ENGINE_C3A,
+ MVPP22_CLS_ENGINE_C3B,
+ MVPP22_CLS_ENGINE_C4,
+ MVPP22_CLS_ENGINE_C3HA = 6,
+ MVPP22_CLS_ENGINE_C3HB = 7,
+};
+
+#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0)
+#define MVPP22_CLS_HEK_OPT_VLAN BIT(1)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2)
+#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6)
+#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8)
+#define MVPP22_CLS_HEK_N_FIELDS 9
+
+#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \
+ MVPP22_CLS_HEK_OPT_L4DIP)
+
+#define MVPP22_CLS_HEK_IP4_2T (MVPP22_CLS_HEK_OPT_IP4SA | \
+ MVPP22_CLS_HEK_OPT_IP4DA)
+
+#define MVPP22_CLS_HEK_IP6_2T (MVPP22_CLS_HEK_OPT_IP6SA | \
+ MVPP22_CLS_HEK_OPT_IP6DA)
+
+/* The fifth tuple in "5T" is the L4_Info field */
+#define MVPP22_CLS_HEK_IP4_5T (MVPP22_CLS_HEK_IP4_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+enum mvpp2_cls_field_id {
+ MVPP22_CLS_FIELD_MAC_DA = 0x03,
+ MVPP22_CLS_FIELD_VLAN = 0x06,
+ MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
+ MVPP22_CLS_FIELD_IP4SA = 0x10,
+ MVPP22_CLS_FIELD_IP4DA = 0x11,
+ MVPP22_CLS_FIELD_IP6SA = 0x17,
+ MVPP22_CLS_FIELD_IP6DA = 0x1a,
+ MVPP22_CLS_FIELD_L4SIP = 0x1d,
+ MVPP22_CLS_FIELD_L4DIP = 0x1e,
+};
+
+enum mvpp2_cls_flow_seq {
+ MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
+ MVPP2_CLS_FLOW_SEQ_FIRST1,
+ MVPP2_CLS_FLOW_SEQ_FIRST2,
+ MVPP2_CLS_FLOW_SEQ_LAST,
+ MVPP2_CLS_FLOW_SEQ_MIDDLE
+};
+
+/* Classifier C2 engine constants */
+#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
+
+enum mvpp22_cls_c2_action {
+ MVPP22_C2_NO_UPD = 0,
+ MVPP22_C2_NO_UPD_LOCK,
+ MVPP22_C2_UPD,
+ MVPP22_C2_UPD_LOCK,
+};
+
+enum mvpp22_cls_c2_fwd_action {
+ MVPP22_C2_FWD_NO_UPD = 0,
+ MVPP22_C2_FWD_NO_UPD_LOCK,
+ MVPP22_C2_FWD_SW,
+ MVPP22_C2_FWD_SW_LOCK,
+ MVPP22_C2_FWD_HW,
+ MVPP22_C2_FWD_HW_LOCK,
+ MVPP22_C2_FWD_HW_LOW_LAT,
+ MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
+};
+
+#define MVPP2_CLS_C2_TCAM_WORDS 5
+#define MVPP2_CLS_C2_ATTR_WORDS 5
+
+struct mvpp2_cls_c2_entry {
+ u32 index;
+ u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+ u32 act;
+ u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+};
+
+/* Classifier C2 engine entries */
+#define MVPP22_CLS_C2_RSS_ENTRY(port) (port)
+#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS
+/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
+ *
+ * The first performs a lookup using the C2 TCAM engine, to tag the
+ * packet for software forwarding (needed for RSS), enable or disable RSS, and
+ * assign the default rx queue.
+ *
+ * The second configures the hash generation, by specifying which fields of the
+ * packet header are used to generate the hash, and specifies the relevant hash
+ * engine to use.
+ */
+#define MVPP22_RSS_FLOW_C2_OFFS 0
+#define MVPP22_RSS_FLOW_HASH_OFFS 1
+#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1)
+
+#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_C2_OFFS)
+#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_HASH_OFFS)
+#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port)
+
+/* Packet flow ID */
+enum mvpp2_prs_flow {
+ MVPP2_FL_START = 8,
+ MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START,
+ MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */
+ MVPP2_FL_IP4_TAG,
+ MVPP2_FL_IP6_UNTAG,
+ MVPP2_FL_IP6_TAG,
+ MVPP2_FL_NON_IP_UNTAG,
+ MVPP2_FL_NON_IP_TAG,
+ MVPP2_FL_LAST,
+};
+
+struct mvpp2_cls_flow {
+ /* The L2-L4 traffic flow type */
+ int flow_type;
+
+ /* The first id in the flow table for this flow */
+ u16 flow_id;
+
+ /* The supported HEK fields for this flow */
+ u16 supported_hash_opts;
+
+ /* The Header Parser result_info that matches this flow */
+ struct mvpp2_prs_result_info prs_ri;
+};
+
+#define MVPP2_N_FLOWS 52
+
+#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
+#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW)
+#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \
+ (port) + 1)
struct mvpp2_cls_flow_entry {
u32 index;
u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
@@ -33,7 +193,15 @@ struct mvpp2_cls_lookup_entry {
u32 data;
};
-void mvpp22_init_rss(struct mvpp2_port *port);
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
+
+void mvpp22_rss_port_init(struct mvpp2_port *port);
+
+void mvpp22_rss_enable(struct mvpp2_port *port);
+void mvpp22_rss_disable(struct mvpp2_port *port);
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
void mvpp2_cls_init(struct mvpp2 *priv);
@@ -41,4 +209,25 @@ void mvpp2_cls_port_config(struct mvpp2_port *port);
void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port);
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe);
+
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le);
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
new file mode 100644
index 000000000000..f9744a61e5dd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2018 Marvell
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+#include "mvpp2_cls.h"
+
+struct mvpp2_dbgfs_prs_entry {
+ int tid;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_flow_entry {
+ int flow;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_port_flow_entry {
+ struct mvpp2_port *port;
+ struct mvpp2_dbgfs_flow_entry *dbg_fe;
+};
+
+static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
+
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits);
+
+static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+
+ u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
+
+static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
+ const char *flow_name;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ switch (f->flow_type) {
+ case IPV4_FLOW:
+ flow_name = "ipv4";
+ break;
+ case IPV6_FLOW:
+ flow_name = "ipv6";
+ break;
+ case TCP_V4_FLOW:
+ flow_name = "tcp4";
+ break;
+ case TCP_V6_FLOW:
+ flow_name = "tcp6";
+ break;
+ case UDP_V4_FLOW:
+ flow_name = "udp4";
+ break;
+ case UDP_V6_FLOW:
+ flow_name = "udp6";
+ break;
+ default:
+ flow_name = "other";
+ }
+
+ seq_printf(s, "%s\n", flow_name);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private;
+
+ kfree(flow_entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
+ .open = mvpp2_dbgfs_flow_type_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_flow_type_release,
+};
+
+static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ seq_printf(s, "%d\n", f->flow_id);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id);
+
+static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *f;
+ int flow_index;
+ u16 hash_opts;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = mvpp2_flow_get_hek_fields(&fe);
+
+ seq_printf(s, "0x%04x\n", hash_opts);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
+ inode->i_private);
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private;
+
+ kfree(flow_entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
+ .open = mvpp2_dbgfs_port_flow_hash_opt_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_port_flow_hash_opt_release,
+};
+
+static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *f;
+ int flow_index, engine;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ engine = mvpp2_cls_flow_eng_get(&fe);
+
+ seq_printf(s, "%d\n", engine);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
+
+static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ u32 hits;
+
+ hits = mvpp2_cls_c2_hit_count(port->priv,
+ MVPP22_CLS_C2_RSS_ENTRY(port->id));
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
+
+static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+
+ ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ seq_printf(s, "%d\n", (qh << 3 | ql));
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
+
+static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ int enabled;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
+
+ seq_printf(s, "%d\n", enabled);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable);
+
+static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ unsigned char byte[2], enable[2];
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ u16 rvid;
+ int tid;
+
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ if (!priv->prs_shadow[tid].valid)
+ continue;
+
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+
+ rvid = ((byte[0] & 0xf) << 8) + byte[1];
+
+ seq_printf(s, "%u\n", rvid);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid);
+
+static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int i;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ mvpp2_prs_init_from_hw(port->priv, &pe, i);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap))
+ seq_printf(s, "%03d\n", i);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser);
+
+static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int index, tid;
+
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC ||
+ priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ /* We only want entries active on this port */
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ /* Read mac addr from entry */
+ for (index = 0; index < ETH_ALEN; index++)
+ mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+ &da_mask[index]);
+
+ seq_printf(s, "%pM\n", da);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter);
+
+static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+
+ seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu);
+
+static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned int pmap;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ pmap &= MVPP2_PRS_PORT_MASK;
+
+ seq_printf(s, "%02x\n", pmap);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap);
+
+static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char ai, ai_mask;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
+ ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK;
+
+ seq_printf(s, "%02x %02x\n", ai, ai_mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai);
+
+static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char data[8], mask[8];
+ int i;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ for (i = 0; i < 8; i++)
+ mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]);
+
+ seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata);
+
+static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ seq_printf(s, "%*phN\n", 14, pe.sram);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram);
+
+static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ int val;
+
+ val = mvpp2_prs_hits(entry->priv, entry->tid);
+ if (val < 0)
+ return val;
+
+ seq_printf(s, "%d\n", val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits);
+
+static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+ int tid = entry->tid;
+
+ seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_prs_entry *entry = seq->private;
+
+ kfree(entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
+ .open = mvpp2_dbgfs_prs_valid_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_prs_valid_release,
+};
+
+static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
+ struct mvpp2_port *port,
+ struct mvpp2_dbgfs_flow_entry *entry)
+{
+ struct mvpp2_dbgfs_port_flow_entry *port_entry;
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
+
+ /* This will be freed by 'hash_opts' release op */
+ port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
+ if (!port_entry)
+ return -ENOMEM;
+
+ port_entry->port = port;
+ port_entry->dbg_fe = entry;
+
+ debugfs_create_file("hash_opts", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_hash_opt_fops);
+
+ debugfs_create_file("engine", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_engine_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int flow)
+{
+ struct mvpp2_dbgfs_flow_entry *entry;
+ struct dentry *flow_entry_dir;
+ char flow_entry_name[10];
+ int i, ret;
+
+ sprintf(flow_entry_name, "%02d", flow);
+
+ flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
+ if (!flow_entry_dir)
+ return -ENOMEM;
+
+ /* This will be freed by 'type' release op */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->flow = flow;
+ entry->priv = priv;
+
+ debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
+
+ debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_dec_hits_fops);
+
+ debugfs_create_file("type", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_type_fops);
+
+ debugfs_create_file("id", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_id_fops);
+
+ /* Create entry for each port */
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir,
+ priv->port_list[i], entry);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *flow_dir;
+ int i, ret;
+
+ flow_dir = debugfs_create_dir("flows", parent);
+ if (!flow_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int tid)
+{
+ struct mvpp2_dbgfs_prs_entry *entry;
+ struct dentry *prs_entry_dir;
+ char prs_entry_name[10];
+
+ if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ sprintf(prs_entry_name, "%03d", tid);
+
+ prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
+ if (!prs_entry_dir)
+ return -ENOMEM;
+
+ /* The 'valid' entry's ops will free that */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->tid = tid;
+ entry->priv = priv;
+
+ /* Create each attr */
+ debugfs_create_file("sram", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_sram_fops);
+
+ debugfs_create_file("valid", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_valid_fops);
+
+ debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_lu_fops);
+
+ debugfs_create_file("ai", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_ai_fops);
+
+ debugfs_create_file("header_data", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hdata_fops);
+
+ debugfs_create_file("hits", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hits_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *prs_dir;
+ int i, ret;
+
+ prs_dir = debugfs_create_dir("parser", parent);
+ if (!prs_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_init(struct dentry *parent,
+ struct mvpp2_port *port)
+{
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
+
+ debugfs_create_file("parser_entries", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_parser_fops);
+
+ debugfs_create_file("mac_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_filter_fops);
+
+ debugfs_create_file("vid_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_vid_fops);
+
+ debugfs_create_file("c2_hits", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
+ return 0;
+}
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
+{
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
+{
+ struct dentry *mvpp2_dir, *mvpp2_root;
+ int ret, i;
+
+ mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
+ if (!mvpp2_root) {
+ mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
+ if (IS_ERR(mvpp2_root))
+ return;
+ }
+
+ mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
+ if (IS_ERR(mvpp2_dir))
+ return;
+
+ priv->dbgfs_dir = mvpp2_dir;
+
+ ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ return;
+err:
+ mvpp2_dbgfs_cleanup(priv);
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 0319ed9ef8b8..32d785b616e1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Marvell PPv2 network controller for Armada 375 SoC.
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/acpi.h>
@@ -66,7 +63,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
-static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
+static int queue_mode = MVPP2_QDIST_MULTI_MODE;
module_param(queue_mode, int, 0444);
MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
@@ -151,9 +148,10 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.buf_dma_addr;
+ return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
else
- return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
+ MVPP2_DESC_DMA_MASK;
}
static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@@ -166,12 +164,12 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
offset = dma_addr & MVPP2_TX_DESC_ALIGN;
if (port->priv->hw_version == MVPP21) {
- tx_desc->pp21.buf_dma_addr = addr;
+ tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
tx_desc->pp21.packet_offset = offset;
} else {
- u64 val = (u64)addr;
+ __le64 val = cpu_to_le64(addr);
- tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
+ tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
tx_desc->pp22.buf_dma_addr_ptp |= val;
tx_desc->pp22.packet_offset = offset;
}
@@ -181,9 +179,9 @@ static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.data_size;
+ return le16_to_cpu(tx_desc->pp21.data_size);
else
- return tx_desc->pp22.data_size;
+ return le16_to_cpu(tx_desc->pp22.data_size);
}
static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
@@ -191,9 +189,9 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
size_t size)
{
if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.data_size = size;
+ tx_desc->pp21.data_size = cpu_to_le16(size);
else
- tx_desc->pp22.data_size = size;
+ tx_desc->pp22.data_size = cpu_to_le16(size);
}
static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
@@ -211,9 +209,9 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
unsigned int command)
{
if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.command = command;
+ tx_desc->pp21.command = cpu_to_le32(command);
else
- tx_desc->pp22.command = command;
+ tx_desc->pp22.command = cpu_to_le32(command);
}
static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
@@ -229,36 +227,38 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_dma_addr;
+ return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
else
- return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
+ MVPP2_DESC_DMA_MASK;
}
static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_cookie;
+ return le32_to_cpu(rx_desc->pp21.buf_cookie);
else
- return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
+ MVPP2_DESC_DMA_MASK;
}
static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.data_size;
+ return le16_to_cpu(rx_desc->pp21.data_size);
else
- return rx_desc->pp22.data_size;
+ return le16_to_cpu(rx_desc->pp22.data_size);
}
static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.status;
+ return le32_to_cpu(rx_desc->pp21.status);
else
- return rx_desc->pp22.status;
+ return le32_to_cpu(rx_desc->pp22.status);
}
static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -1735,7 +1735,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
command |= MVPP2_TXD_IP_CSUM_DISABLE;
- if (l3_proto == swab16(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
} else {
@@ -3273,6 +3273,11 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
}
}
+static bool mvpp22_rss_is_supported(void)
+{
+ return queue_mode == MVPP2_QDIST_MULTI_MODE;
+}
+
static int mvpp2_open(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -3365,9 +3370,6 @@ static int mvpp2_open(struct net_device *dev)
mvpp2_start_dev(port);
- if (priv->hw_version == MVPP22)
- mvpp22_init_rss(port);
-
/* Start hardware statistics gathering */
queue_delayed_work(priv->stats_queue, &port->stats_work,
MVPP2_MIB_COUNTERS_STATS_DELAY);
@@ -3626,6 +3628,13 @@ static int mvpp2_set_features(struct net_device *dev,
}
}
+ if (changed & NETIF_F_RXHASH) {
+ if (features & NETIF_F_RXHASH)
+ mvpp22_rss_enable(port);
+ else
+ mvpp22_rss_disable(port);
+ }
+
return 0;
}
@@ -3813,6 +3822,94 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(port->phylink, cmd);
}
+static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = mvpp2_ethtool_rxfh_get(port, info);
+ break;
+ case ETHTOOL_GRXRINGS:
+ info->data = port->nrxqs;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return ret;
+}
+
+static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = mvpp2_ethtool_rxfh_set(port, info);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+ return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+}
+
+static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (indir)
+ memcpy(indir, port->indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_CRC32;
+
+ return 0;
+}
+
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ memcpy(port->indir, indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+ mvpp22_rss_fill_table(port, port->id);
+ }
+
+ return 0;
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -3844,6 +3941,12 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_pauseparam = mvpp2_ethtool_set_pause_param,
.get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
.set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+ .get_rxnfc = mvpp2_ethtool_get_rxnfc,
+ .set_rxnfc = mvpp2_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
+ .get_rxfh = mvpp2_ethtool_get_rxfh,
+ .set_rxfh = mvpp2_ethtool_set_rxfh,
+
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -3985,8 +4088,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
MVPP2_MAX_PORTS * priv->max_port_rxqs)
return -EINVAL;
- if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
- (port->ntxqs > MVPP2_MAX_TXQ))
+ if (port->nrxqs % MVPP2_DEFAULT_RXQ ||
+ port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
return -EINVAL;
/* Disable port */
@@ -4075,6 +4178,9 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_oversize_rxq_set(port);
mvpp2_cls_port_config(port);
+ if (mvpp22_rss_is_supported())
+ mvpp22_rss_port_init(port);
+
/* Provide an initial Rx packet size */
port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
@@ -4681,6 +4787,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (mvpp22_rss_is_supported())
+ dev->hw_features |= NETIF_F_RXHASH;
+
if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
@@ -5011,6 +5120,12 @@ static int mvpp2_probe(struct platform_device *pdev)
(unsigned long)of_device_get_match_data(&pdev->dev);
}
+ /* multi queue mode isn't supported on PPV2.1, fallback to single
+ * mode
+ */
+ if (priv->hw_version == MVPP21)
+ queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
@@ -5174,6 +5289,8 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ mvpp2_dbgfs_init(priv, pdev->name);
+
platform_set_drvdata(pdev, priv);
return 0;
@@ -5207,6 +5324,8 @@ static int mvpp2_remove(struct platform_device *pdev)
struct fwnode_handle *port_fwnode;
int i = 0;
+ mvpp2_dbgfs_cleanup(priv);
+
flush_workqueue(priv->stats_queue);
destroy_workqueue(priv->stats_queue);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 6bb69f086794..392fd895f278 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Header Parser helpers for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
@@ -30,24 +27,24 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
return -EINVAL;
/* Clear entry invalidation bit */
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+ mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
return 0;
}
/* Initialize tcam entry from hw */
-static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
- struct mvpp2_prs_entry *pe, int tid)
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid)
{
int i;
@@ -60,18 +57,18 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
- if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+ if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
return MVPP2_PRS_TCAM_ENTRY_INVALID;
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+ pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+ pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
return 0;
}
@@ -103,42 +100,35 @@ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
/* Update lookup field in tcam sw entry */
static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
- pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
}
/* Update mask for single port in tcam sw entry */
static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
unsigned int port, bool add)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
if (add)
- pe->tcam.byte[enable_off] &= ~(1 << port);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
else
- pe->tcam.byte[enable_off] |= 1 << port;
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
}
/* Update port map in tcam sw entry */
static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
unsigned int ports)
{
- unsigned char port_mask = MVPP2_PRS_PORT_MASK;
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
- pe->tcam.byte[enable_off] &= ~port_mask;
- pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
}
/* Obtain port map from tcam sw entry */
-static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+ return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
}
/* Set byte of data and its enable bits in tcam sw entry */
@@ -146,55 +136,58 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
unsigned int offs, unsigned char byte,
unsigned char enable)
{
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
}
/* Get byte of data and its enable bits from tcam sw entry */
-static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
- unsigned int offs, unsigned char *byte,
- unsigned char *enable)
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable)
{
- *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
- *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
+ *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
}
/* Compare tcam data bytes with a pattern */
static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
u16 data)
{
- int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
u16 tcam_data;
- tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
- if (tcam_data != data)
- return false;
- return true;
+ tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
+ return tcam_data == data;
}
/* Update ai bits in tcam sw entry */
static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
unsigned int bits, unsigned int enable)
{
- int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+ int i;
for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
if (!(enable & BIT(i)))
continue;
if (bits & BIT(i))
- pe->tcam.byte[ai_idx] |= 1 << i;
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
else
- pe->tcam.byte[ai_idx] &= ~(1 << i);
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
}
- pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
}
/* Get ai bits from tcam sw entry */
static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
{
- return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+ return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
}
/* Set ethertype in tcam sw entry */
@@ -215,16 +208,16 @@ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
/* Set bits in sram sw entry */
static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
+ u32 val)
{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
}
/* Clear bits in sram sw entry */
static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
+ u32 val)
{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
}
/* Update ri bits in sram sw entry */
@@ -234,15 +227,16 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
unsigned int i;
for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
- int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
-
if (!(mask & BIT(i)))
continue;
if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
else
- mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
}
@@ -251,7 +245,7 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
/* Obtain ri bits from sram sw entry */
static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
{
- return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+ return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
}
/* Update ai bits in sram sw entry */
@@ -259,16 +253,18 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
unsigned int bits, unsigned int mask)
{
unsigned int i;
- int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
if (!(mask & BIT(i)))
continue;
if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
else
- mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
}
@@ -278,12 +274,12 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
{
u8 bits;
- int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
- int ai_en_off = ai_off + 1;
- int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+ /* ai is stored on bits 90->97; so it spreads across two u32 */
+ int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
+ int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
- bits = (pe->sram.byte[ai_off] >> ai_shift) |
- (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+ bits = (pe->sram[ai_off] >> ai_shift) |
+ (pe->sram[ai_off + 1] << (32 - ai_shift));
return bits;
}
@@ -316,8 +312,7 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
}
/* Set value */
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
- (unsigned char)shift;
+ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
/* Reset and set operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
@@ -346,13 +341,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
/* Set value */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
MVPP2_PRS_SRAM_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] |=
- (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+ offset & MVPP2_PRS_SRAM_UDF_MASK);
/* Set offset type */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
@@ -362,16 +352,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
/* Set offset operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
- (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
- (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+ op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
/* Set base offset as current */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
@@ -662,7 +644,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
continue;
mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
if (!match)
continue;
@@ -790,8 +772,8 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
- mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
+ mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
if (!match)
continue;
@@ -932,8 +914,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
pe.index = tid;
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
@@ -1433,17 +1415,13 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
pe.index = tid;
- /* Clear tcam data before updating */
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
-
mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
MVPP2_PRS_IPV4_HEAD,
MVPP2_PRS_IPV4_HEAD_MASK);
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
@@ -1644,8 +1622,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
MVPP2_PRS_IPV4_IHL_MASK);
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
@@ -2428,6 +2406,41 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
return 0;
}
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ u8 *ri_byte, *ri_byte_mask;
+ int tid, i;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_tcam_first_free(priv,
+ MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ ri_byte = (u8 *)&ri;
+ ri_byte_mask = (u8 *)&ri_mask;
+
+ mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ for (i = 0; i < 4; i++) {
+ mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
+ ri_byte_mask[i]);
+ }
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
/* Set prs flow for the port */
int mvpp2_prs_def_flow(struct mvpp2_port *port)
{
@@ -2465,3 +2478,19 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
return 0;
}
+
+int mvpp2_prs_hits(struct mvpp2 *priv, int index)
+{
+ u32 val;
+
+ if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
+
+ val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
+
+ val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+
+ return val;
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index 22fbbc4c8b28..e22f6c85d380 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -1,22 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header Parser definitions for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
+#ifndef _MVPP2_PRS_H_
+#define _MVPP2_PRS_H_
+
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/platform_device.h>
#include "mvpp2.h"
-#ifndef _MVPP2_PRS_H_
-#define _MVPP2_PRS_H_
-
/* Parser constants */
#define MVPP2_PRS_TCAM_SRAM_SIZE 256
#define MVPP2_PRS_TCAM_WORDS 6
@@ -50,17 +48,25 @@
* The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
*/
#define MVPP2_PRS_AI_BITS 8
+#define MVPP2_PRS_AI_MASK 0xff
#define MVPP2_PRS_PORT_MASK 0xff
#define MVPP2_PRS_LU_MASK 0xf
-#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
- (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
-#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
- (((offs) * 2) - ((offs) % 2) + 2)
-#define MVPP2_PRS_TCAM_AI_BYTE 16
-#define MVPP2_PRS_TCAM_PORT_BYTE 17
-#define MVPP2_PRS_TCAM_LU_BYTE 20
-#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
-#define MVPP2_PRS_TCAM_INV_WORD 5
+
+/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */
+#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2)
+#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2)
+
+#define MVPP2_PRS_TCAM_EN(data) ((data) << 16)
+#define MVPP2_PRS_TCAM_AI_WORD 4
+#define MVPP2_PRS_TCAM_AI(ai) (ai)
+#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai))
+#define MVPP2_PRS_TCAM_PORT_WORD 4
+#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8)
+#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p))
+#define MVPP2_PRS_TCAM_LU_WORD 5
+#define MVPP2_PRS_TCAM_LU(lu) (lu)
+#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu))
+#define MVPP2_PRS_TCAM_INV_WORD 5
#define MVPP2_PRS_VID_TCAM_BYTE 2
@@ -146,6 +152,7 @@
#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
+#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff
#define MVPP2_PRS_SRAM_UDF_OFFS 73
#define MVPP2_PRS_SRAM_UDF_BITS 8
#define MVPP2_PRS_SRAM_UDF_MASK 0xff
@@ -214,6 +221,10 @@
#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
#define MVPP2_PRS_RI_DROP_MASK 0x80000000
+#define MVPP2_PRS_IP_MASK (MVPP2_PRS_RI_L3_PROTO_MASK | \
+ MVPP2_PRS_RI_IP_FRAG_MASK | \
+ MVPP2_PRS_RI_L4_PROTO_MASK)
+
/* Sram additional info bits assignment */
#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
@@ -255,20 +266,15 @@ enum mvpp2_prs_lookup {
MVPP2_PRS_LU_LAST,
};
-union mvpp2_prs_tcam_entry {
- u32 word[MVPP2_PRS_TCAM_WORDS];
- u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
-};
-
-union mvpp2_prs_sram_entry {
- u32 word[MVPP2_PRS_SRAM_WORDS];
- u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
-};
-
struct mvpp2_prs_entry {
u32 index;
- union mvpp2_prs_tcam_entry tcam;
- union mvpp2_prs_sram_entry sram;
+ u32 tcam[MVPP2_PRS_TCAM_WORDS];
+ u32 sram[MVPP2_PRS_SRAM_WORDS];
+};
+
+struct mvpp2_prs_result_info {
+ u32 ri;
+ u32 ri_mask;
};
struct mvpp2_prs_shadow {
@@ -288,10 +294,21 @@ struct mvpp2_prs_shadow {
int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv);
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid);
+
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe);
+
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable);
+
int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add);
int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type);
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask);
+
int mvpp2_prs_def_flow(struct mvpp2_port *port);
void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port);
@@ -311,4 +328,6 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port);
int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da);
+int mvpp2_prs_hits(struct mvpp2 *priv, int index);
+
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d8ebf0a05e0c..6e6abdc399de 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -605,10 +605,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- &eth->phy_scratch_ring,
- GFP_ATOMIC | __GFP_ZERO);
+ eth->scratch_ring = dma_zalloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ &eth->phy_scratch_ring,
+ GFP_ATOMIC);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
@@ -623,7 +623,6 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
return -ENOMEM;
- memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
@@ -1221,14 +1220,11 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dev,
- MTK_DMA_SIZE * sz,
- &ring->phys,
- GFP_ATOMIC | __GFP_ZERO);
+ ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
goto no_tx_mem;
- memset(ring->dma, 0, MTK_DMA_SIZE * sz);
for (i = 0; i < MTK_DMA_SIZE; i++) {
int next = (i + 1) % MTK_DMA_SIZE;
u32 next_ptr = ring->phys + next * sz;
@@ -1321,10 +1317,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
}
- ring->dma = dma_alloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys,
- GFP_ATOMIC | __GFP_ZERO);
+ ring->dma = dma_zalloc_coherent(eth->dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
return -ENOMEM;
@@ -2463,42 +2458,6 @@ free_netdev:
return err;
}
-static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
-{
- u32 val[2], id[4];
-
- regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
- regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
-
- id[3] = ((val[0] >> 16) & 0xff) - '0';
- id[2] = ((val[0] >> 24) & 0xff) - '0';
- id[1] = (val[1] & 0xff) - '0';
- id[0] = ((val[1] >> 8) & 0xff) - '0';
-
- *chip_id = (id[3] * 1000) + (id[2] * 100) +
- (id[1] * 10) + id[0];
-
- if (!(*chip_id)) {
- dev_err(eth->dev, "failed to get chip id\n");
- return -ENODEV;
- }
-
- dev_info(eth->dev, "chip id = %d\n", *chip_id);
-
- return 0;
-}
-
-static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
-{
- switch (eth->chip_id) {
- case MT7622_ETH:
- case MT7623_ETH:
- return true;
- }
-
- return false;
-}
-
static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2577,11 +2536,7 @@ static int mtk_probe(struct platform_device *pdev)
if (err)
return err;
- err = mtk_get_chip_id(eth, &eth->chip_id);
- if (err)
- return err;
-
- eth->hwlro = mtk_is_hwlro_supported(eth);
+ eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
for_each_child_of_node(pdev->dev.of_node, mac_np) {
if (!of_device_is_compatible(mac_np,
@@ -2670,19 +2625,19 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
- .caps = MTK_GMAC1_TRGMII,
+ .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
static const struct mtk_soc_data mt7622_data = {
- .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
+ .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
- .caps = MTK_GMAC1_TRGMII,
+ .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 672b8c353c47..46819297fc3e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -566,6 +566,7 @@ struct mtk_rx_ring {
#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII)
#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \
MTK_GMAC2_SGMII)
+#define MTK_HWLRO BIT(12)
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
/* struct mtk_eth_data - This is the structure holding all differences
@@ -635,7 +636,6 @@ struct mtk_eth {
struct regmap *ethsys;
struct regmap *sgmiisys;
struct regmap *pctl;
- u32 chip_id;
bool hwlro;
refcount_t dma_refcnt;
struct mtk_tx_ring tx_ring;
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 16b10d01fcf4..3f400770fcd8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
- srq.o resource_tracker.o
+ srq.o resource_tracker.o crdump.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index e2b6b0cac1ac..c81d15bf259c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -178,10 +178,12 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
dev = persist->dev;
mlx4_err(dev, "device is going to be reset\n");
- if (mlx4_is_slave(dev))
+ if (mlx4_is_slave(dev)) {
err = mlx4_reset_slave(dev);
- else
+ } else {
+ mlx4_crdump_collect(dev);
err = mlx4_reset_master(dev);
+ }
if (!err) {
mlx4_err(dev, "device was reset successfully\n");
@@ -212,7 +214,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
- err = mlx4_restart_one(persist->pdev);
+ err = mlx4_restart_one(persist->pdev, false, NULL);
mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
new file mode 100644
index 000000000000..88316c743820
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx4.h"
+
+#define BAD_ACCESS 0xBADACCE5
+#define HEALTH_BUFFER_SIZE 0x40
+#define CR_ENABLE_BIT swab32(BIT(6))
+#define CR_ENABLE_BIT_OFFSET 0xF3F04
+#define MAX_NUM_OF_DUMPS_TO_STORE (8)
+
+static const char *region_cr_space_str = "cr-space";
+static const char *region_fw_health_str = "fw-health";
+
+/* Set to true in case cr enable bit was set to true before crdump */
+static bool crdump_enbale_bit_set;
+
+static void crdump_enable_crspace_access(struct mlx4_dev *dev,
+ u8 __iomem *cr_space)
+{
+ /* Get current enable bit value */
+ crdump_enbale_bit_set =
+ readl(cr_space + CR_ENABLE_BIT_OFFSET) & CR_ENABLE_BIT;
+
+ /* Enable FW CR filter (set bit6 to 0) */
+ if (crdump_enbale_bit_set)
+ writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) & ~CR_ENABLE_BIT,
+ cr_space + CR_ENABLE_BIT_OFFSET);
+
+ /* Enable block volatile crspace accesses */
+ writel(swab32(1), cr_space + dev->caps.health_buffer_addrs +
+ HEALTH_BUFFER_SIZE);
+}
+
+static void crdump_disable_crspace_access(struct mlx4_dev *dev,
+ u8 __iomem *cr_space)
+{
+ /* Disable block volatile crspace accesses */
+ writel(0, cr_space + dev->caps.health_buffer_addrs +
+ HEALTH_BUFFER_SIZE);
+
+ /* Restore FW CR filter value (set bit6 to original value) */
+ if (crdump_enbale_bit_set)
+ writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) | CR_ENABLE_BIT,
+ cr_space + CR_ENABLE_BIT_OFFSET);
+}
+
+static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
+ u8 __iomem *cr_space,
+ u32 id)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+ unsigned long cr_res_size;
+ u8 *crspace_data;
+ int offset;
+ int err;
+
+ if (!crdump->region_crspace) {
+ mlx4_err(dev, "crdump: cr-space region is NULL\n");
+ return;
+ }
+
+ /* Try to collect CR space */
+ cr_res_size = pci_resource_len(pdev, 0);
+ crspace_data = kvmalloc(cr_res_size, GFP_KERNEL);
+ if (crspace_data) {
+ for (offset = 0; offset < cr_res_size; offset += 4)
+ *(u32 *)(crspace_data + offset) =
+ readl(cr_space + offset);
+
+ err = devlink_region_snapshot_create(crdump->region_crspace,
+ cr_res_size, crspace_data,
+ id, &kvfree);
+ if (err) {
+ kvfree(crspace_data);
+ mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+ region_cr_space_str, id, err);
+ } else {
+ mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+ id, region_cr_space_str);
+ }
+ } else {
+ mlx4_err(dev, "crdump: Failed to allocate crspace buffer\n");
+ }
+}
+
+static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
+ u8 __iomem *cr_space,
+ u32 id)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ u8 *health_data;
+ int offset;
+ int err;
+
+ if (!crdump->region_fw_health) {
+ mlx4_err(dev, "crdump: fw-health region is NULL\n");
+ return;
+ }
+
+ /* Try to collect health buffer */
+ health_data = kvmalloc(HEALTH_BUFFER_SIZE, GFP_KERNEL);
+ if (health_data) {
+ u8 __iomem *health_buf_start =
+ cr_space + dev->caps.health_buffer_addrs;
+
+ for (offset = 0; offset < HEALTH_BUFFER_SIZE; offset += 4)
+ *(u32 *)(health_data + offset) =
+ readl(health_buf_start + offset);
+
+ err = devlink_region_snapshot_create(crdump->region_fw_health,
+ HEALTH_BUFFER_SIZE,
+ health_data,
+ id, &kvfree);
+ if (err) {
+ kvfree(health_data);
+ mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+ region_fw_health_str, id, err);
+ } else {
+ mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+ id, region_fw_health_str);
+ }
+ } else {
+ mlx4_err(dev, "crdump: Failed to allocate health buffer\n");
+ }
+}
+
+int mlx4_crdump_collect(struct mlx4_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+ unsigned long cr_res_size;
+ u8 __iomem *cr_space;
+ u32 id;
+
+ if (!dev->caps.health_buffer_addrs) {
+ mlx4_info(dev, "crdump: FW doesn't support health buffer access, skipping\n");
+ return 0;
+ }
+
+ if (!crdump->snapshot_enable) {
+ mlx4_info(dev, "crdump: devlink snapshot disabled, skipping\n");
+ return 0;
+ }
+
+ cr_res_size = pci_resource_len(pdev, 0);
+
+ cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size);
+ if (!cr_space) {
+ mlx4_err(dev, "crdump: Failed to map pci cr region\n");
+ return -ENODEV;
+ }
+
+ crdump_enable_crspace_access(dev, cr_space);
+
+ /* Get the available snapshot ID for the dumps */
+ id = devlink_region_shapshot_id_get(devlink);
+
+ /* Try to capture dumps */
+ mlx4_crdump_collect_crspace(dev, cr_space, id);
+ mlx4_crdump_collect_fw_health(dev, cr_space, id);
+
+ crdump_disable_crspace_access(dev, cr_space);
+
+ iounmap(cr_space);
+ return 0;
+}
+
+int mlx4_crdump_init(struct mlx4_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+
+ crdump->snapshot_enable = false;
+
+ /* Create cr-space region */
+ crdump->region_crspace =
+ devlink_region_create(devlink,
+ region_cr_space_str,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
+ if (IS_ERR(crdump->region_crspace))
+ mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+ region_cr_space_str,
+ PTR_ERR(crdump->region_crspace));
+
+ /* Create fw-health region */
+ crdump->region_fw_health =
+ devlink_region_create(devlink,
+ region_fw_health_str,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
+ if (IS_ERR(crdump->region_fw_health))
+ mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+ region_fw_health_str,
+ PTR_ERR(crdump->region_fw_health));
+
+ return 0;
+}
+
+void mlx4_crdump_end(struct mlx4_dev *dev)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+
+ devlink_region_destroy(crdump->region_fw_health);
+ devlink_region_destroy(crdump->region_crspace);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 65eb06e017e4..6785661d1a72 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2926,7 +2926,6 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return mlx4_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_id = mlx4_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 3360f7b9ee73..a1aeeb8094c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -795,8 +795,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(dev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0227786308af..1857ee0f0871 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -688,15 +688,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
- return fallback(dev, skb) % rings_p_up;
+ return fallback(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 46dcbfbe4c5e..babcfd9c0571 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -825,7 +825,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
-
+#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET 0xe4
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1082,6 +1082,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->rl_caps.min_unit = size >> 14;
}
+ MLX4_GET(dev_cap->health_buffer_addrs, outbox,
+ QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET);
+
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index cd6399c76bfd..650ae08c71de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -128,6 +128,7 @@ struct mlx4_dev_cap {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 872014702fc1..d2d59444f562 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -159,9 +159,10 @@ static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
-int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(1);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
-MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment "
+ "(0-7) (default: 0)");
static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
static int arr_argc = 2;
@@ -177,6 +178,131 @@ struct mlx4_port_config {
static atomic_t pf_loading = ATOMIC_INIT(0);
+static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ ctx->val.vbool = !!mlx4_internal_err_reset;
+ return 0;
+}
+
+static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ mlx4_internal_err_reset = ctx->val.vbool;
+ return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+
+ ctx->val.vbool = dev->persist->crdump.snapshot_enable;
+ return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+
+ dev->persist->crdump.snapshot_enable = ctx->val.vbool;
+ return 0;
+}
+
+static int
+mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ u32 value = val.vu32;
+
+ if (value < 1 || value > 128)
+ return -ERANGE;
+
+ if (!is_power_of_2(value)) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum mlx4_devlink_param_id {
+ MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+};
+
+static const struct devlink_param mlx4_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(INT_ERR_RESET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ mlx4_devlink_ierr_reset_get,
+ mlx4_devlink_ierr_reset_set, NULL),
+ DEVLINK_PARAM_GENERIC(MAX_MACS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx4_devlink_max_macs_validate),
+ DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ mlx4_devlink_crdump_snapshot_get,
+ mlx4_devlink_crdump_snapshot_set, NULL),
+ DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+};
+
+static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ int err;
+
+ err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
+ if (err)
+ mlx4_warn(dev,
+ "devlink set parameter %u value failed (err = %d)",
+ param_id, err);
+}
+
+static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = !!mlx4_internal_err_reset;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
+
+ value.vu32 = 1UL << log_num_mac;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+
+ value.vbool = enable_64b_cqe_eqe;
+ mlx4_devlink_set_init_value(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
+
+ value.vbool = enable_4k_uar;
+ mlx4_devlink_set_init_value(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
+
+ value.vbool = false;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
+}
+
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
@@ -428,6 +554,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
dev->caps.wol_port[1] = dev_cap->wol_port[1];
dev->caps.wol_port[2] = dev_cap->wol_port[2];
+ dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs;
/* Save uar page shift */
if (!mlx4_is_slave(dev)) {
@@ -3711,10 +3838,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
}
}
- err = mlx4_catas_init(&priv->dev);
+ err = mlx4_crdump_init(&priv->dev);
if (err)
goto err_release_regions;
+ err = mlx4_catas_init(&priv->dev);
+ if (err)
+ goto err_crdump;
+
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
if (err)
goto err_catas;
@@ -3724,6 +3855,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
err_catas:
mlx4_catas_end(&priv->dev);
+err_crdump:
+ mlx4_crdump_end(&priv->dev);
+
err_release_regions:
pci_release_regions(pdev);
@@ -3757,8 +3891,68 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
return __set_port_type(info, mlx4_port_type);
}
+static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ union devlink_param_value saved_value;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ &saved_value);
+ if (!err && mlx4_internal_err_reset != saved_value.vbool) {
+ mlx4_internal_err_reset = saved_value.vbool;
+ /* Notify on value changed on runtime configuration mode */
+ devlink_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
+ }
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
+ if (!err)
+ log_num_mac = order_base_2(saved_value.vu32);
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ &saved_value);
+ if (!err)
+ enable_64b_cqe_eqe = saved_value.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ &saved_value);
+ if (!err)
+ enable_4k_uar = saved_value.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ &saved_value);
+ if (!err && crdump->snapshot_enable != saved_value.vbool) {
+ crdump->snapshot_enable = saved_value.vbool;
+ devlink_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
+ }
+}
+
+static int mlx4_devlink_reload(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_dev_persistent *persist = dev->persist;
+ int err;
+
+ if (persist->num_vfs)
+ mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
+ err = mlx4_restart_one(persist->pdev, true, devlink);
+ if (err)
+ mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err);
+
+ return err;
+}
+
static const struct devlink_ops mlx4_devlink_ops = {
.port_type_set = mlx4_devlink_port_type_set,
+ .reload = mlx4_devlink_reload,
};
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -3792,14 +3986,21 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ret = devlink_register(devlink, &pdev->dev);
if (ret)
goto err_persist_free;
-
- ret = __mlx4_init_one(pdev, id->driver_data, priv);
+ ret = devlink_params_register(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
if (ret)
goto err_devlink_unregister;
+ mlx4_devlink_set_params_init_values(devlink);
+ ret = __mlx4_init_one(pdev, id->driver_data, priv);
+ if (ret)
+ goto err_params_unregister;
pci_save_state(pdev);
return 0;
+err_params_unregister:
+ devlink_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
devlink_unregister(devlink);
err_persist_free:
@@ -3929,6 +4130,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
else
mlx4_info(dev, "%s: interface is down\n", __func__);
mlx4_catas_end(dev);
+ mlx4_crdump_end(dev);
if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev);
@@ -3936,6 +4138,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
mlx4_pci_disable_device(dev);
+ devlink_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
@@ -3960,7 +4164,7 @@ static int restore_current_port_types(struct mlx4_dev *dev,
return err;
}
-int mlx4_restart_one(struct pci_dev *pdev)
+int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
@@ -3973,6 +4177,8 @@ int mlx4_restart_one(struct pci_dev *pdev)
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
mlx4_unload_one(pdev);
+ if (reload)
+ mlx4_devlink_param_load_driverinit_values(devlink);
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
if (err) {
mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
@@ -4205,7 +4411,7 @@ static int __init mlx4_verify_params(void)
if (use_prio != 0)
pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
- if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
+ if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) {
pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
log_mtts_per_seg);
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 4c5306dbcf11..ffed2d4c9403 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1412,6 +1412,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
+ /* fall through */
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
@@ -1441,6 +1442,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
+ /* fall through */
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cb9e923e8399..ebcd2778eeb3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -84,7 +84,6 @@ enum {
MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
- MLX4_MTT_ENTRY_PER_SEG = 8,
};
enum {
@@ -1042,7 +1041,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
int mlx4_catas_init(struct mlx4_dev *dev);
void mlx4_catas_end(struct mlx4_dev *dev);
-int mlx4_restart_one(struct pci_dev *pdev);
+int mlx4_crdump_init(struct mlx4_dev *dev);
+void mlx4_crdump_end(struct mlx4_dev *dev);
+int mlx4_restart_one(struct pci_dev *pdev, bool reload,
+ struct devlink *devlink);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
@@ -1227,6 +1229,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
int mlx4_comm_internal_err(u32 slave_read);
+int mlx4_crdump_collect(struct mlx4_dev *dev);
+
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
void mlx4_do_sense_ports(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ace6545f82e6..c3228b89df46 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index bae8b22edbb7..ba361c5fbda3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -105,7 +105,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
request->num_mtt =
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
min(1UL << (31 - log_mtts_per_seg),
- si.totalram >> (log_mtts_per_seg - 1))));
+ (si.totalram << 1) >> log_mtts_per_seg)));
+
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 7a84dd07ced2..37a551436e4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -7,6 +7,7 @@ config MLX5_CORE
depends on MAY_USE_DEVLINK
depends on PCI
imply PTP_1588_CLOCK
+ imply VXLAN
default n
---help---
Core driver for low level functionality of the ConnectX-4 and
@@ -35,6 +36,24 @@ config MLX5_CORE_EN
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+config MLX5_EN_ARFS
+ bool "Mellanox MLX5 ethernet accelerated receive flow steering (ARFS) support"
+ depends on MLX5_CORE_EN && RFS_ACCEL
+ default y
+ ---help---
+ Mellanox MLX5 ethernet hardware-accelerated receive flow steering support,
+ Enables ethernet netdevice arfs support and ntuple filtering.
+
+config MLX5_EN_RXNFC
+ bool "Mellanox MLX5 ethernet rx nfc flow steering support"
+ depends on MLX5_CORE_EN
+ default y
+ ---help---
+ Mellanox MLX5 ethernet rx nfc flow steering support
+ Enables ethtool receive network flow classification, which allows user defined
+ flow rules to direct traffic into arbitrary rx queue via ethtool set/get_rxnfc
+ API.
+
config MLX5_MPFS
bool "Mellanox Technologies MLX5 MPFS support"
depends on MLX5_CORE_EN
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9efbf193ad5a..d324a3884462 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,33 +1,61 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
+#
+# Makefile for Mellanox 5th generation network adapters
+# (ConnectX series) core & netdev driver
+#
+
subdir-ccflags-y += -I$(src)
+obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
+
+#
+# mlx5 core basic
+#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
- diag/fs_tracepoint.o
-
-mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
-
-mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
- fpga/ipsec.o fpga/tls.o
+ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
+ diag/fs_tracepoint.o diag/fw_tracer.o
+#
+# Netdev basic
+#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
- en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
-
-mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
-
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o
+ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
+ en_selftest.o en/port.o
+
+#
+# Netdev extra
+#
+mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
+mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o
+
+#
+# Core extra
+#
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o
+mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
+mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
+mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
+
+#
+# Ipoib netdev
+#
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
+#
+# Accelerations & FPGA
+#
+mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
-mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
+mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
+ fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
- en_accel/ipsec_stats.o
+ en_accel/ipsec_stats.o
-mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
CFLAGS_tracepoint.o := -I$(src)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
new file mode 100644
index 000000000000..c13260467750
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
@@ -0,0 +1,37 @@
+#ifndef __MLX5E_ACCEL_H__
+#define __MLX5E_ACCEL_H__
+
+#ifdef CONFIG_MLX5_ACCEL
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "en.h"
+
+static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
+{
+ __be16 *ethtype;
+
+ if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
+ return false;
+ ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
+ if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ return false;
+ return true;
+}
+
+static inline void remove_metadata_hdr(struct sk_buff *skb)
+{
+ struct ethhdr *old_eth;
+ struct ethhdr *new_eth;
+
+ /* Remove the metadata from the buffer */
+ old_eth = (struct ethhdr *)skb->data;
+ new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
+ memmove(new_eth, old_eth, 2 * ETH_ALEN);
+ /* Ethertype is already in its new place */
+ skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+}
+
+#endif /* CONFIG_MLX5_ACCEL */
+
+#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
index 77ac19f38cbe..da7bd26368f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -37,17 +37,26 @@
#include "mlx5_core.h"
#include "fpga/tls.h"
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx)
{
- return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, p_swid);
+ return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, p_swid,
+ direction_sx);
}
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid)
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx)
{
- mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL);
+ mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
+}
+
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn)
+{
+ return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
}
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index 6f9c9f446ecc..def4093ebfae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits {
u8 reserved_at_2[0x1e];
};
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid);
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid);
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx);
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx);
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
@@ -72,10 +76,14 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
#else
static inline int
-mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid) { return 0; }
-static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { }
+mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx) { return -ENOTSUPP; }
+static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx) { }
+static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
+ u32 seq, u64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 9364015c818b..3ce14d42ddc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -211,7 +211,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
ent->ret = 0;
return;
}
- usleep_range(5000, 10000);
+ cond_resched();
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
new file mode 100644
index 000000000000..d4ec93bde4de
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -0,0 +1,947 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#define CREATE_TRACE_POINTS
+#include "fw_tracer.h"
+#include "fw_tracer_tracepoint.h"
+
+static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
+{
+ u32 *string_db_base_address_out = tracer->str_db.base_address_out;
+ u32 *string_db_size_out = tracer->str_db.size_out;
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ void *mtrc_cap_sp;
+ int err, i;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CAP, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Error reading tracer caps %d\n",
+ err);
+ return err;
+ }
+
+ if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) {
+ mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n");
+ return -ENOTSUPP;
+ }
+
+ tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver);
+ tracer->str_db.first_string_trace =
+ MLX5_GET(mtrc_cap, out, first_string_trace);
+ tracer->str_db.num_string_trace =
+ MLX5_GET(mtrc_cap, out, num_string_trace);
+ tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
+ tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+
+ for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
+ string_db_base_address_out[i] = MLX5_GET(mtrc_string_db_param,
+ mtrc_cap_sp,
+ string_db_base_address);
+ string_db_size_out[i] = MLX5_GET(mtrc_string_db_param,
+ mtrc_cap_sp, string_db_size);
+ }
+
+ return err;
+}
+
+static int mlx5_set_mtrc_caps_trace_owner(struct mlx5_fw_tracer *tracer,
+ u32 *out, u32 out_size,
+ u8 trace_owner)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+
+ MLX5_SET(mtrc_cap, in, trace_owner, trace_owner);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out, out_size,
+ MLX5_REG_MTRC_CAP, 0, 1);
+}
+
+static int mlx5_fw_tracer_ownership_acquire(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ int err;
+
+ err = mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out),
+ MLX5_FW_TRACER_ACQUIRE_OWNERSHIP);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Acquire tracer ownership failed %d\n",
+ err);
+ return err;
+ }
+
+ tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+
+ if (!tracer->owner)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer)
+{
+ u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+
+ mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out),
+ MLX5_FW_TRACER_RELEASE_OWNERSHIP);
+ tracer->owner = false;
+}
+
+static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct device *ddev = &dev->pdev->dev;
+ dma_addr_t dma;
+ void *buff;
+ gfp_t gfp;
+ int err;
+
+ tracer->buff.size = TRACE_BUFFER_SIZE_BYTE;
+
+ gfp = GFP_KERNEL | __GFP_ZERO;
+ buff = (void *)__get_free_pages(gfp,
+ get_order(tracer->buff.size));
+ if (!buff) {
+ err = -ENOMEM;
+ mlx5_core_warn(dev, "FWTracer: Failed to allocate pages, %d\n", err);
+ return err;
+ }
+ tracer->buff.log_buf = buff;
+
+ dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
+ mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n",
+ dma_mapping_error(ddev, dma));
+ err = -ENOMEM;
+ goto free_pages;
+ }
+ tracer->buff.dma = dma;
+
+ return 0;
+
+free_pages:
+ free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
+
+ return err;
+}
+
+static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct device *ddev = &dev->pdev->dev;
+
+ if (!tracer->buff.log_buf)
+ return;
+
+ dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE);
+ free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
+}
+
+static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ int err, inlen, i;
+ __be64 *mtt;
+ void *mkc;
+ u32 *in;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
+ sizeof(*mtt) * round_up(TRACER_BUFFER_PAGE_NUM, 2);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
+ mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++)
+ mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE);
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, pd, tracer->buff.pdn);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
+ MLX5_SET64(mkc, mkc, start_addr, tracer->buff.dma);
+ MLX5_SET64(mkc, mkc, len, tracer->buff.size);
+ err = mlx5_core_create_mkey(dev, &tracer->buff.mkey, in, inlen);
+ if (err)
+ mlx5_core_warn(dev, "FWTracer: Failed to create mkey, %d\n", err);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_fw_tracer_free_strings_db(struct mlx5_fw_tracer *tracer)
+{
+ u32 num_string_db = tracer->str_db.num_string_db;
+ int i;
+
+ for (i = 0; i < num_string_db; i++) {
+ kfree(tracer->str_db.buffer[i]);
+ tracer->str_db.buffer[i] = NULL;
+ }
+}
+
+static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer)
+{
+ u32 *string_db_size_out = tracer->str_db.size_out;
+ u32 num_string_db = tracer->str_db.num_string_db;
+ int i;
+
+ for (i = 0; i < num_string_db; i++) {
+ tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL);
+ if (!tracer->str_db.buffer[i])
+ goto free_strings_db;
+ }
+
+ return 0;
+
+free_strings_db:
+ mlx5_fw_tracer_free_strings_db(tracer);
+ return -ENOMEM;
+}
+
+static void mlx5_tracer_read_strings_db(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer,
+ read_fw_strings_work);
+ u32 num_of_reads, num_string_db = tracer->str_db.num_string_db;
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0};
+ u32 leftovers, offset;
+ int err = 0, i, j;
+ u32 *out, outlen;
+ void *out_value;
+
+ outlen = MLX5_ST_SZ_BYTES(mtrc_stdb) + STRINGS_DB_READ_SIZE_BYTES;
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_string_db; i++) {
+ offset = 0;
+ MLX5_SET(mtrc_stdb, in, string_db_index, i);
+ num_of_reads = tracer->str_db.size_out[i] /
+ STRINGS_DB_READ_SIZE_BYTES;
+ leftovers = (tracer->str_db.size_out[i] %
+ STRINGS_DB_READ_SIZE_BYTES) /
+ STRINGS_DB_LEFTOVER_SIZE_BYTES;
+
+ MLX5_SET(mtrc_stdb, in, read_size, STRINGS_DB_READ_SIZE_BYTES);
+ for (j = 0; j < num_of_reads; j++) {
+ MLX5_SET(mtrc_stdb, in, start_offset, offset);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ outlen, MLX5_REG_MTRC_STDB,
+ 0, 1);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n",
+ err);
+ goto out_free;
+ }
+
+ out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data);
+ memcpy(tracer->str_db.buffer[i] + offset, out_value,
+ STRINGS_DB_READ_SIZE_BYTES);
+ offset += STRINGS_DB_READ_SIZE_BYTES;
+ }
+
+ /* Strings database is aligned to 64, need to read leftovers*/
+ MLX5_SET(mtrc_stdb, in, read_size,
+ STRINGS_DB_LEFTOVER_SIZE_BYTES);
+ for (j = 0; j < leftovers; j++) {
+ MLX5_SET(mtrc_stdb, in, start_offset, offset);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ outlen, MLX5_REG_MTRC_STDB,
+ 0, 1);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n",
+ err);
+ goto out_free;
+ }
+
+ out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data);
+ memcpy(tracer->str_db.buffer[i] + offset, out_value,
+ STRINGS_DB_LEFTOVER_SIZE_BYTES);
+ offset += STRINGS_DB_LEFTOVER_SIZE_BYTES;
+ }
+ }
+
+ tracer->str_db.loaded = true;
+
+out_free:
+ kfree(out);
+out:
+ return;
+}
+
+static void mlx5_fw_tracer_arm(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ int err;
+
+ MLX5_SET(mtrc_ctrl, in, arm_event, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CTRL, 0, 1);
+ if (err)
+ mlx5_core_warn(dev, "FWTracer: Failed to arm tracer event %d\n", err);
+}
+
+static const char *VAL_PARM = "%llx";
+static const char *REPLACE_64_VAL_PARM = "%x%x";
+static const char *PARAM_CHAR = "%";
+
+static int mlx5_tracer_message_hash(u32 message_id)
+{
+ return jhash_1word(message_id, 0) & (MESSAGE_HASH_SIZE - 1);
+}
+
+static struct tracer_string_format *mlx5_tracer_message_insert(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct hlist_head *head =
+ &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)];
+ struct tracer_string_format *cur_string;
+
+ cur_string = kzalloc(sizeof(*cur_string), GFP_KERNEL);
+ if (!cur_string)
+ return NULL;
+
+ hlist_add_head(&cur_string->hlist, head);
+
+ return cur_string;
+}
+
+static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+ u32 str_ptr, offset;
+ int i;
+
+ str_ptr = tracer_event->string_event.string_param;
+
+ for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ if (str_ptr > tracer->str_db.base_address_out[i] &&
+ str_ptr < tracer->str_db.base_address_out[i] +
+ tracer->str_db.size_out[i]) {
+ offset = str_ptr - tracer->str_db.base_address_out[i];
+ /* add it to the hash */
+ cur_string = mlx5_tracer_message_insert(tracer, tracer_event);
+ if (!cur_string)
+ return NULL;
+ cur_string->string = (char *)(tracer->str_db.buffer[i] +
+ offset);
+ return cur_string;
+ }
+ }
+
+ return NULL;
+}
+
+static void mlx5_tracer_clean_message(struct tracer_string_format *str_frmt)
+{
+ hlist_del(&str_frmt->hlist);
+ kfree(str_frmt);
+}
+
+static int mlx5_tracer_get_num_of_params(char *str)
+{
+ char *substr, *pstr = str;
+ int num_of_params = 0;
+
+ /* replace %llx with %x%x */
+ substr = strstr(pstr, VAL_PARM);
+ while (substr) {
+ memcpy(substr, REPLACE_64_VAL_PARM, 4);
+ pstr = substr;
+ substr = strstr(pstr, VAL_PARM);
+ }
+
+ /* count all the % characters */
+ substr = strstr(str, PARAM_CHAR);
+ while (substr) {
+ num_of_params += 1;
+ str = substr + 1;
+ substr = strstr(str, PARAM_CHAR);
+ }
+
+ return num_of_params;
+}
+
+static struct tracer_string_format *mlx5_tracer_message_find(struct hlist_head *head,
+ u8 event_id, u32 tmsn)
+{
+ struct tracer_string_format *message;
+
+ hlist_for_each_entry(message, head, hlist)
+ if (message->event_id == event_id && message->tmsn == tmsn)
+ return message;
+
+ return NULL;
+}
+
+static struct tracer_string_format *mlx5_tracer_message_get(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct hlist_head *head =
+ &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)];
+
+ return mlx5_tracer_message_find(head, tracer_event->event_id, tracer_event->string_event.tmsn);
+}
+
+static void poll_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event, u64 *trace)
+{
+ u32 timestamp_low, timestamp_mid, timestamp_high, urts;
+
+ tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id);
+ tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost);
+
+ switch (tracer_event->event_id) {
+ case TRACER_EVENT_TYPE_TIMESTAMP:
+ tracer_event->type = TRACER_EVENT_TYPE_TIMESTAMP;
+ urts = MLX5_GET(tracer_timestamp_event, trace, urts);
+ if (tracer->trc_ver == 0)
+ tracer_event->timestamp_event.unreliable = !!(urts >> 2);
+ else
+ tracer_event->timestamp_event.unreliable = !!(urts & 1);
+
+ timestamp_low = MLX5_GET(tracer_timestamp_event,
+ trace, timestamp7_0);
+ timestamp_mid = MLX5_GET(tracer_timestamp_event,
+ trace, timestamp39_8);
+ timestamp_high = MLX5_GET(tracer_timestamp_event,
+ trace, timestamp52_40);
+
+ tracer_event->timestamp_event.timestamp =
+ ((u64)timestamp_high << 40) |
+ ((u64)timestamp_mid << 8) |
+ (u64)timestamp_low;
+ break;
+ default:
+ if (tracer_event->event_id >= tracer->str_db.first_string_trace ||
+ tracer_event->event_id <= tracer->str_db.first_string_trace +
+ tracer->str_db.num_string_trace) {
+ tracer_event->type = TRACER_EVENT_TYPE_STRING;
+ tracer_event->string_event.timestamp =
+ MLX5_GET(tracer_string_event, trace, timestamp);
+ tracer_event->string_event.string_param =
+ MLX5_GET(tracer_string_event, trace, string_param);
+ tracer_event->string_event.tmsn =
+ MLX5_GET(tracer_string_event, trace, tmsn);
+ tracer_event->string_event.tdsn =
+ MLX5_GET(tracer_string_event, trace, tdsn);
+ } else {
+ tracer_event->type = TRACER_EVENT_TYPE_UNRECOGNIZED;
+ }
+ break;
+ }
+}
+
+static u64 get_block_timestamp(struct mlx5_fw_tracer *tracer, u64 *ts_event)
+{
+ struct tracer_event tracer_event;
+ u8 event_id;
+
+ event_id = MLX5_GET(tracer_event, ts_event, event_id);
+
+ if (event_id == TRACER_EVENT_TYPE_TIMESTAMP)
+ poll_trace(tracer, &tracer_event, ts_event);
+ else
+ tracer_event.timestamp_event.timestamp = 0;
+
+ return tracer_event.timestamp_event.timestamp;
+}
+
+static void mlx5_fw_tracer_clean_print_hash(struct mlx5_fw_tracer *tracer)
+{
+ struct tracer_string_format *str_frmt;
+ struct hlist_node *n;
+ int i;
+
+ for (i = 0; i < MESSAGE_HASH_SIZE; i++) {
+ hlist_for_each_entry_safe(str_frmt, n, &tracer->hash[i], hlist)
+ mlx5_tracer_clean_message(str_frmt);
+ }
+}
+
+static void mlx5_fw_tracer_clean_ready_list(struct mlx5_fw_tracer *tracer)
+{
+ struct tracer_string_format *str_frmt, *tmp_str;
+
+ list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list,
+ list)
+ list_del(&str_frmt->list);
+}
+
+static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
+ struct mlx5_core_dev *dev,
+ u64 trace_timestamp)
+{
+ char tmp[512];
+
+ snprintf(tmp, sizeof(tmp), str_frmt->string,
+ str_frmt->params[0],
+ str_frmt->params[1],
+ str_frmt->params[2],
+ str_frmt->params[3],
+ str_frmt->params[4],
+ str_frmt->params[5],
+ str_frmt->params[6]);
+
+ trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost,
+ str_frmt->event_id, tmp);
+
+ /* remove it from hash */
+ mlx5_tracer_clean_message(str_frmt);
+}
+
+static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+
+ if (tracer_event->string_event.tdsn == 0) {
+ cur_string = mlx5_tracer_get_string(tracer, tracer_event);
+ if (!cur_string)
+ return -1;
+
+ cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
+ cur_string->last_param_num = 0;
+ cur_string->event_id = tracer_event->event_id;
+ cur_string->tmsn = tracer_event->string_event.tmsn;
+ cur_string->timestamp = tracer_event->string_event.timestamp;
+ cur_string->lost = tracer_event->lost_event;
+ if (cur_string->num_of_params == 0) /* trace with no params */
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ } else {
+ cur_string = mlx5_tracer_message_get(tracer, tracer_event);
+ if (!cur_string) {
+ pr_debug("%s Got string event for unknown string tdsm: %d\n",
+ __func__, tracer_event->string_event.tmsn);
+ return -1;
+ }
+ cur_string->last_param_num += 1;
+ if (cur_string->last_param_num > TRACER_MAX_PARAMS) {
+ pr_debug("%s Number of params exceeds the max (%d)\n",
+ __func__, TRACER_MAX_PARAMS);
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ return 0;
+ }
+ /* keep the new parameter */
+ cur_string->params[cur_string->last_param_num - 1] =
+ tracer_event->string_event.string_param;
+ if (cur_string->last_param_num == cur_string->num_of_params)
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ }
+
+ return 0;
+}
+
+static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_timestamp_event timestamp_event =
+ tracer_event->timestamp_event;
+ struct tracer_string_format *str_frmt, *tmp_str;
+ struct mlx5_core_dev *dev = tracer->dev;
+ u64 trace_timestamp;
+
+ list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, list) {
+ list_del(&str_frmt->list);
+ if (str_frmt->timestamp < (timestamp_event.timestamp & MASK_6_0))
+ trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
+ (str_frmt->timestamp & MASK_6_0);
+ else
+ trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
+ (str_frmt->timestamp & MASK_6_0);
+
+ mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
+ }
+}
+
+static int mlx5_tracer_handle_trace(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ if (tracer_event->type == TRACER_EVENT_TYPE_STRING) {
+ mlx5_tracer_handle_string_trace(tracer, tracer_event);
+ } else if (tracer_event->type == TRACER_EVENT_TYPE_TIMESTAMP) {
+ if (!tracer_event->timestamp_event.unreliable)
+ mlx5_tracer_handle_timestamp_trace(tracer, tracer_event);
+ } else {
+ pr_debug("%s Got unrecognised type %d for parsing, exiting..\n",
+ __func__, tracer_event->type);
+ }
+ return 0;
+}
+
+static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, handle_traces_work);
+ u64 block_timestamp, last_block_timestamp, tmp_trace_block[TRACES_PER_BLOCK];
+ u32 block_count, start_offset, prev_start_offset, prev_consumer_index;
+ u32 trace_event_size = MLX5_ST_SZ_BYTES(tracer_event);
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct tracer_event tracer_event;
+ int i;
+
+ mlx5_core_dbg(dev, "FWTracer: Handle Trace event, owner=(%d)\n", tracer->owner);
+ if (!tracer->owner)
+ return;
+
+ block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
+ start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
+
+ /* Copy the block to local buffer to avoid HW override while being processed*/
+ memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
+ TRACER_BLOCK_SIZE_BYTE);
+
+ block_timestamp =
+ get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
+
+ while (block_timestamp > tracer->last_timestamp) {
+ /* Check block override if its not the first block */
+ if (!tracer->last_timestamp) {
+ u64 *ts_event;
+ /* To avoid block override be the HW in case of buffer
+ * wraparound, the time stamp of the previous block
+ * should be compared to the last timestamp handled
+ * by the driver.
+ */
+ prev_consumer_index =
+ (tracer->buff.consumer_index - 1) & (block_count - 1);
+ prev_start_offset = prev_consumer_index * TRACER_BLOCK_SIZE_BYTE;
+
+ ts_event = tracer->buff.log_buf + prev_start_offset +
+ (TRACES_PER_BLOCK - 1) * trace_event_size;
+ last_block_timestamp = get_block_timestamp(tracer, ts_event);
+ /* If previous timestamp different from last stored
+ * timestamp then there is a good chance that the
+ * current buffer is overwritten and therefore should
+ * not be parsed.
+ */
+ if (tracer->last_timestamp != last_block_timestamp) {
+ mlx5_core_warn(dev, "FWTracer: Events were lost\n");
+ tracer->last_timestamp = block_timestamp;
+ tracer->buff.consumer_index =
+ (tracer->buff.consumer_index + 1) & (block_count - 1);
+ break;
+ }
+ }
+
+ /* Parse events */
+ for (i = 0; i < TRACES_PER_BLOCK ; i++) {
+ poll_trace(tracer, &tracer_event, &tmp_trace_block[i]);
+ mlx5_tracer_handle_trace(tracer, &tracer_event);
+ }
+
+ tracer->buff.consumer_index =
+ (tracer->buff.consumer_index + 1) & (block_count - 1);
+
+ tracer->last_timestamp = block_timestamp;
+ start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
+ memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
+ TRACER_BLOCK_SIZE_BYTE);
+ block_timestamp = get_block_timestamp(tracer,
+ &tmp_trace_block[TRACES_PER_BLOCK - 1]);
+ }
+
+ mlx5_fw_tracer_arm(dev);
+}
+
+static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_conf)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_conf)] = {0};
+ int err;
+
+ MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY);
+ MLX5_SET(mtrc_conf, in, log_trace_buffer_size,
+ ilog2(TRACER_BUFFER_PAGE_NUM));
+ MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CONF, 0, 1);
+ if (err)
+ mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
+
+ return err;
+}
+
+static int mlx5_fw_tracer_set_mtrc_ctrl(struct mlx5_fw_tracer *tracer, u8 status, u8 arm)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0};
+ int err;
+
+ MLX5_SET(mtrc_ctrl, in, modify_field_select, TRACE_STATUS);
+ MLX5_SET(mtrc_ctrl, in, trace_status, status);
+ MLX5_SET(mtrc_ctrl, in, arm_event, arm);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTRC_CTRL, 0, 1);
+
+ if (!err && status)
+ tracer->last_timestamp = 0;
+
+ return err;
+}
+
+static int mlx5_fw_tracer_start(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev = tracer->dev;
+ int err;
+
+ err = mlx5_fw_tracer_ownership_acquire(tracer);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err);
+ /* Don't fail since ownership can be acquired on a later FW event */
+ return 0;
+ }
+
+ err = mlx5_fw_tracer_set_mtrc_conf(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to set tracer configuration %d\n", err);
+ goto release_ownership;
+ }
+
+ /* enable tracer & trace events */
+ err = mlx5_fw_tracer_set_mtrc_ctrl(tracer, 1, 1);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to enable tracer %d\n", err);
+ goto release_ownership;
+ }
+
+ mlx5_core_dbg(dev, "FWTracer: Ownership granted and active\n");
+ return 0;
+
+release_ownership:
+ mlx5_fw_tracer_ownership_release(tracer);
+ return err;
+}
+
+static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, ownership_change_work);
+
+ mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
+ if (tracer->owner) {
+ tracer->owner = false;
+ tracer->buff.consumer_index = 0;
+ return;
+ }
+
+ mlx5_fw_tracer_start(tracer);
+}
+
+/* Create software resources (Buffers, etc ..) */
+struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_tracer *tracer = NULL;
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG(dev, tracer_registers)) {
+ mlx5_core_dbg(dev, "FWTracer: Tracer capability not present\n");
+ return NULL;
+ }
+
+ tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+ if (!tracer)
+ return ERR_PTR(-ENOMEM);
+
+ tracer->work_queue = create_singlethread_workqueue("mlx5_fw_tracer");
+ if (!tracer->work_queue) {
+ err = -ENOMEM;
+ goto free_tracer;
+ }
+
+ tracer->dev = dev;
+
+ INIT_LIST_HEAD(&tracer->ready_strings_list);
+ INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change);
+ INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db);
+ INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces);
+
+
+ err = mlx5_query_mtrc_caps(tracer);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
+ goto destroy_workqueue;
+ }
+
+ err = mlx5_fw_tracer_create_log_buf(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Create log buffer failed %d\n", err);
+ goto destroy_workqueue;
+ }
+
+ err = mlx5_fw_tracer_allocate_strings_db(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Allocate strings database failed %d\n", err);
+ goto free_log_buf;
+ }
+
+ mlx5_core_dbg(dev, "FWTracer: Tracer created\n");
+
+ return tracer;
+
+free_log_buf:
+ mlx5_fw_tracer_destroy_log_buf(tracer);
+destroy_workqueue:
+ tracer->dev = NULL;
+ destroy_workqueue(tracer->work_queue);
+free_tracer:
+ kfree(tracer);
+ return ERR_PTR(err);
+}
+
+/* Create HW resources + start tracer
+ * must be called before Async EQ is created
+ */
+int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev;
+ int err;
+
+ if (IS_ERR_OR_NULL(tracer))
+ return 0;
+
+ dev = tracer->dev;
+
+ if (!tracer->str_db.loaded)
+ queue_work(tracer->work_queue, &tracer->read_fw_strings_work);
+
+ err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
+ return err;
+ }
+
+ err = mlx5_fw_tracer_create_mkey(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Failed to create mkey %d\n", err);
+ goto err_dealloc_pd;
+ }
+
+ mlx5_fw_tracer_start(tracer);
+
+ return 0;
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+ return err;
+}
+
+/* Stop tracer + Cleanup HW resources
+ * must be called after Async EQ is destroyed
+ */
+void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
+{
+ if (IS_ERR_OR_NULL(tracer))
+ return;
+
+ mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
+ tracer->owner);
+
+ cancel_work_sync(&tracer->ownership_change_work);
+ cancel_work_sync(&tracer->handle_traces_work);
+
+ if (tracer->owner)
+ mlx5_fw_tracer_ownership_release(tracer);
+
+ mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey);
+ mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
+}
+
+/* Free software resources (Buffers, etc ..) */
+void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
+{
+ if (IS_ERR_OR_NULL(tracer))
+ return;
+
+ mlx5_core_dbg(tracer->dev, "FWTracer: Destroy\n");
+
+ cancel_work_sync(&tracer->read_fw_strings_work);
+ mlx5_fw_tracer_clean_ready_list(tracer);
+ mlx5_fw_tracer_clean_print_hash(tracer);
+ mlx5_fw_tracer_free_strings_db(tracer);
+ mlx5_fw_tracer_destroy_log_buf(tracer);
+ flush_workqueue(tracer->work_queue);
+ destroy_workqueue(tracer->work_queue);
+ kfree(tracer);
+}
+
+void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+{
+ struct mlx5_fw_tracer *tracer = dev->tracer;
+
+ if (!tracer)
+ return;
+
+ switch (eqe->sub_type) {
+ case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state))
+ queue_work(tracer->work_queue, &tracer->ownership_change_work);
+ break;
+ case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
+ if (likely(tracer->str_db.loaded))
+ queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ break;
+ default:
+ mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
+ eqe->sub_type);
+ }
+}
+
+EXPORT_TRACEPOINT_SYMBOL(mlx5_fw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
new file mode 100644
index 000000000000..0347f2dd5cee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIB_TRACER_H__
+#define __LIB_TRACER_H__
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+#define STRINGS_DB_SECTIONS_NUM 8
+#define STRINGS_DB_READ_SIZE_BYTES 256
+#define STRINGS_DB_LEFTOVER_SIZE_BYTES 64
+#define TRACER_BUFFER_PAGE_NUM 64
+#define TRACER_BUFFER_CHUNK 4096
+#define TRACE_BUFFER_SIZE_BYTE (TRACER_BUFFER_PAGE_NUM * TRACER_BUFFER_CHUNK)
+
+#define TRACER_BLOCK_SIZE_BYTE 256
+#define TRACES_PER_BLOCK 32
+
+#define TRACER_MAX_PARAMS 7
+#define MESSAGE_HASH_BITS 6
+#define MESSAGE_HASH_SIZE BIT(MESSAGE_HASH_BITS)
+
+#define MASK_52_7 (0x1FFFFFFFFFFF80)
+#define MASK_6_0 (0x7F)
+
+struct mlx5_fw_tracer {
+ struct mlx5_core_dev *dev;
+ bool owner;
+ u8 trc_ver;
+ struct workqueue_struct *work_queue;
+ struct work_struct ownership_change_work;
+ struct work_struct read_fw_strings_work;
+
+ /* Strings DB */
+ struct {
+ u8 first_string_trace;
+ u8 num_string_trace;
+ u32 num_string_db;
+ u32 base_address_out[STRINGS_DB_SECTIONS_NUM];
+ u32 size_out[STRINGS_DB_SECTIONS_NUM];
+ void *buffer[STRINGS_DB_SECTIONS_NUM];
+ bool loaded;
+ } str_db;
+
+ /* Log Buffer */
+ struct {
+ u32 pdn;
+ void *log_buf;
+ dma_addr_t dma;
+ u32 size;
+ struct mlx5_core_mkey mkey;
+ u32 consumer_index;
+ } buff;
+
+ u64 last_timestamp;
+ struct work_struct handle_traces_work;
+ struct hlist_head hash[MESSAGE_HASH_SIZE];
+ struct list_head ready_strings_list;
+};
+
+struct tracer_string_format {
+ char *string;
+ int params[TRACER_MAX_PARAMS];
+ int num_of_params;
+ int last_param_num;
+ u8 event_id;
+ u32 tmsn;
+ struct hlist_node hlist;
+ struct list_head list;
+ u32 timestamp;
+ bool lost;
+};
+
+enum mlx5_fw_tracer_ownership_state {
+ MLX5_FW_TRACER_RELEASE_OWNERSHIP,
+ MLX5_FW_TRACER_ACQUIRE_OWNERSHIP,
+};
+
+enum tracer_ctrl_fields_select {
+ TRACE_STATUS = 1 << 0,
+};
+
+enum tracer_event_type {
+ TRACER_EVENT_TYPE_STRING,
+ TRACER_EVENT_TYPE_TIMESTAMP = 0xFF,
+ TRACER_EVENT_TYPE_UNRECOGNIZED,
+};
+
+enum tracing_mode {
+ TRACE_TO_MEMORY = 1 << 0,
+};
+
+struct tracer_timestamp_event {
+ u64 timestamp;
+ u8 unreliable;
+};
+
+struct tracer_string_event {
+ u32 timestamp;
+ u32 tmsn;
+ u32 tdsn;
+ u32 string_param;
+};
+
+struct tracer_event {
+ bool lost_event;
+ u32 type;
+ u8 event_id;
+ union {
+ struct tracer_string_event string_event;
+ struct tracer_timestamp_event timestamp_event;
+ };
+};
+
+struct mlx5_ifc_tracer_event_bits {
+ u8 lost[0x1];
+ u8 timestamp[0x7];
+ u8 event_id[0x8];
+ u8 event_data[0x30];
+};
+
+struct mlx5_ifc_tracer_string_event_bits {
+ u8 lost[0x1];
+ u8 timestamp[0x7];
+ u8 event_id[0x8];
+ u8 tmsn[0xd];
+ u8 tdsn[0x3];
+ u8 string_param[0x20];
+};
+
+struct mlx5_ifc_tracer_timestamp_event_bits {
+ u8 timestamp7_0[0x8];
+ u8 event_id[0x8];
+ u8 urts[0x3];
+ u8 timestamp52_40[0xd];
+ u8 timestamp39_8[0x20];
+};
+
+struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev);
+int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer);
+void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer);
+void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
+void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
new file mode 100644
index 000000000000..83f90e9aff45
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(__LIB_TRACER_TRACEPOINT_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __LIB_TRACER_TRACEPOINT_H__
+
+#include <linux/tracepoint.h>
+#include "fw_tracer.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+/* Tracepoint for FWTracer messages: */
+TRACE_EVENT(mlx5_fw,
+ TP_PROTO(const struct mlx5_fw_tracer *tracer, u64 trace_timestamp,
+ bool lost, u8 event_id, const char *msg),
+
+ TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(&tracer->dev->pdev->dev))
+ __field(u64, trace_timestamp)
+ __field(bool, lost)
+ __field(u8, event_id)
+ __string(msg, msg)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev));
+ __entry->trace_timestamp = trace_timestamp;
+ __entry->lost = lost;
+ __entry->event_id = event_id;
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("%s [0x%llx] %d [0x%x] %s",
+ __get_str(dev_name),
+ __entry->trace_timestamp,
+ __entry->lost, __entry->event_id,
+ __get_str(msg))
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ./diag
+#define TRACE_INCLUDE_FILE fw_tracer_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 405236cf0b04..db2cfcd21d43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -52,6 +52,7 @@
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
+#include "en/fs.h"
struct page_pool;
@@ -137,7 +138,6 @@ struct page_pool;
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
-#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
#define MLX5E_UMR_WQE_INLINE_SZ \
@@ -148,10 +148,6 @@ struct page_pool;
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
-#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT \
- ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
-
#define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -349,6 +345,7 @@ enum {
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
+ MLX5E_SQ_STATE_REDIRECT,
};
struct mlx5e_sq_wqe_info {
@@ -369,16 +366,14 @@ struct mlx5e_txqsq {
struct mlx5e_cq cq;
- /* write@xmit, read@completion */
- struct {
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
- } db;
-
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
struct mlx5e_sq_stats *stats;
+ struct {
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } db;
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
@@ -400,30 +395,43 @@ struct mlx5e_txqsq {
} recover;
} ____cacheline_aligned_in_smp;
+struct mlx5e_dma_info {
+ struct page *page;
+ dma_addr_t addr;
+};
+
+struct mlx5e_xdp_info {
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ struct mlx5e_dma_info di;
+};
+
struct mlx5e_xdpsq {
/* data path */
- /* dirtied @rx completion */
+ /* dirtied @completion */
u16 cc;
- u16 pc;
+ bool redirect_flush;
- struct mlx5e_cq cq;
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ bool doorbell;
- /* write@xmit, read@completion */
- struct {
- struct mlx5e_dma_info *di;
- bool doorbell;
- bool redirect_flush;
- } db;
+ struct mlx5e_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
+ struct mlx5e_xdpsq_stats *stats;
+ struct {
+ struct mlx5e_xdp_info *xdpi;
+ } db;
void __iomem *uar_map;
u32 sqn;
struct device *pdev;
__be32 mkey_be;
u8 min_inline_mode;
unsigned long state;
+ unsigned int hw_mtu;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -460,11 +468,6 @@ mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
-struct mlx5e_dma_info {
- struct page *page;
- dma_addr_t addr;
-};
-
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info *di;
u32 offset;
@@ -567,7 +570,6 @@ struct mlx5e_rq {
/* XDP */
struct bpf_prog *xdp_prog;
- unsigned int hw_mtu;
struct mlx5e_xdpsq xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
@@ -596,6 +598,9 @@ struct mlx5e_channel {
__be32 mkey_be;
u8 num_tc;
+ /* XDP_REDIRECT */
+ struct mlx5e_xdpsq xdpsq;
+
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats;
@@ -618,159 +623,16 @@ struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
+ struct mlx5e_xdpsq_stats rq_xdpsq;
+ struct mlx5e_xdpsq_stats xdpsq;
} ____cacheline_aligned_in_smp;
-enum mlx5e_traffic_types {
- MLX5E_TT_IPV4_TCP,
- MLX5E_TT_IPV6_TCP,
- MLX5E_TT_IPV4_UDP,
- MLX5E_TT_IPV6_UDP,
- MLX5E_TT_IPV4_IPSEC_AH,
- MLX5E_TT_IPV6_IPSEC_AH,
- MLX5E_TT_IPV4_IPSEC_ESP,
- MLX5E_TT_IPV6_IPSEC_ESP,
- MLX5E_TT_IPV4,
- MLX5E_TT_IPV6,
- MLX5E_TT_ANY,
- MLX5E_NUM_TT,
- MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
-};
-
-enum mlx5e_tunnel_types {
- MLX5E_TT_IPV4_GRE,
- MLX5E_TT_IPV6_GRE,
- MLX5E_NUM_TUNNEL_TT,
-};
-
enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};
-struct mlx5e_vxlan_db {
- spinlock_t lock; /* protect vxlan table */
- struct radix_tree_root tree;
-};
-
-struct mlx5e_l2_rule {
- u8 addr[ETH_ALEN + 2];
- struct mlx5_flow_handle *rule;
-};
-
-struct mlx5e_flow_table {
- int num_groups;
- struct mlx5_flow_table *t;
- struct mlx5_flow_group **g;
-};
-
-#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
-
-struct mlx5e_tc_table {
- struct mlx5_flow_table *t;
-
- struct rhashtable ht;
-
- DECLARE_HASHTABLE(mod_hdr_tbl, 8);
- DECLARE_HASHTABLE(hairpin_tbl, 8);
-};
-
-struct mlx5e_vlan_table {
- struct mlx5e_flow_table ft;
- DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
- DECLARE_BITMAP(active_svlans, VLAN_N_VID);
- struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
- struct mlx5_flow_handle *untagged_rule;
- struct mlx5_flow_handle *any_cvlan_rule;
- struct mlx5_flow_handle *any_svlan_rule;
- bool cvlan_filter_disabled;
-};
-
-struct mlx5e_l2_table {
- struct mlx5e_flow_table ft;
- struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
- struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
- struct mlx5e_l2_rule broadcast;
- struct mlx5e_l2_rule allmulti;
- struct mlx5e_l2_rule promisc;
- bool broadcast_enabled;
- bool allmulti_enabled;
- bool promisc_enabled;
-};
-
-/* L3/L4 traffic type classifier */
-struct mlx5e_ttc_table {
- struct mlx5e_flow_table ft;
- struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
- struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
-};
-
-#define ARFS_HASH_SHIFT BITS_PER_BYTE
-#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
-struct arfs_table {
- struct mlx5e_flow_table ft;
- struct mlx5_flow_handle *default_rule;
- struct hlist_head rules_hash[ARFS_HASH_SIZE];
-};
-
-enum arfs_type {
- ARFS_IPV4_TCP,
- ARFS_IPV6_TCP,
- ARFS_IPV4_UDP,
- ARFS_IPV6_UDP,
- ARFS_NUM_TYPES,
-};
-
-struct mlx5e_arfs_tables {
- struct arfs_table arfs_tables[ARFS_NUM_TYPES];
- /* Protect aRFS rules list */
- spinlock_t arfs_lock;
- struct list_head rules;
- int last_filter_id;
- struct workqueue_struct *wq;
-};
-
-/* NIC prio FTS */
-enum {
- MLX5E_VLAN_FT_LEVEL = 0,
- MLX5E_L2_FT_LEVEL,
- MLX5E_TTC_FT_LEVEL,
- MLX5E_INNER_TTC_FT_LEVEL,
- MLX5E_ARFS_FT_LEVEL
-};
-
-enum {
- MLX5E_TC_FT_LEVEL = 0,
- MLX5E_TC_TTC_FT_LEVEL,
-};
-
-struct mlx5e_ethtool_table {
- struct mlx5_flow_table *ft;
- int num_rules;
-};
-
-#define ETHTOOL_NUM_L3_L4_FTS 7
-#define ETHTOOL_NUM_L2_FTS 4
-
-struct mlx5e_ethtool_steering {
- struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
- struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
- struct list_head rules;
- int tot_num_rules;
-};
-
-struct mlx5e_flow_steering {
- struct mlx5_flow_namespace *ns;
- struct mlx5e_ethtool_steering ethtool;
- struct mlx5e_tc_table tc;
- struct mlx5e_vlan_table vlan;
- struct mlx5e_l2_table l2;
- struct mlx5e_ttc_table ttc;
- struct mlx5e_ttc_table inner_ttc;
- struct mlx5e_arfs_tables arfs;
-};
-
struct mlx5e_rqt {
u32 rqtn;
bool enabled;
@@ -810,7 +672,6 @@ struct mlx5e_priv {
u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
- struct mlx5e_vxlan_db vxlan;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -864,7 +725,8 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi);
@@ -874,14 +736,13 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -890,7 +751,6 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
@@ -906,23 +766,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
-int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
- int location);
-int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs);
-int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
- struct ethtool_rx_flow_spec *fs);
-int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
- int location);
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
@@ -933,8 +780,6 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
@@ -1051,32 +896,6 @@ void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#endif
-#ifndef CONFIG_RFS_ACCEL
-static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
-{
- return 0;
-}
-
-static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
-
-static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
-{
- return -EOPNOTSUPP;
-}
-#else
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
-int mlx5e_arfs_enable(struct mlx5e_priv *priv);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv);
-int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id);
-#endif
-
int mlx5e_create_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir, u32 *in, int inlen);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
@@ -1097,27 +916,6 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-struct ttc_params {
- struct mlx5_flow_table_attr ft_attr;
- u32 any_tt_tirn;
- u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_ttc_table *inner_ttc;
-};
-
-void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
-void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
-
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
-
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
-
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
new file mode 100644
index 000000000000..bbf69e859b78
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5E_FLOW_STEER_H__
+#define __MLX5E_FLOW_STEER_H__
+
+enum {
+ MLX5E_TC_FT_LEVEL = 0,
+ MLX5E_TC_TTC_FT_LEVEL,
+};
+
+struct mlx5e_tc_table {
+ struct mlx5_flow_table *t;
+
+ struct rhashtable ht;
+
+ DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
+};
+
+struct mlx5e_flow_table {
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
+};
+
+struct mlx5e_l2_rule {
+ u8 addr[ETH_ALEN + 2];
+ struct mlx5_flow_handle *rule;
+};
+
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_vlan_table {
+ struct mlx5e_flow_table ft;
+ DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
+ DECLARE_BITMAP(active_svlans, VLAN_N_VID);
+ struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *untagged_rule;
+ struct mlx5_flow_handle *any_cvlan_rule;
+ struct mlx5_flow_handle *any_svlan_rule;
+ bool cvlan_filter_disabled;
+};
+
+struct mlx5e_l2_table {
+ struct mlx5e_flow_table ft;
+ struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct mlx5e_l2_rule broadcast;
+ struct mlx5e_l2_rule allmulti;
+ struct mlx5e_l2_rule promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
+};
+
+enum mlx5e_traffic_types {
+ MLX5E_TT_IPV4_TCP,
+ MLX5E_TT_IPV6_TCP,
+ MLX5E_TT_IPV4_UDP,
+ MLX5E_TT_IPV6_UDP,
+ MLX5E_TT_IPV4_IPSEC_AH,
+ MLX5E_TT_IPV6_IPSEC_AH,
+ MLX5E_TT_IPV4_IPSEC_ESP,
+ MLX5E_TT_IPV6_IPSEC_ESP,
+ MLX5E_TT_IPV4,
+ MLX5E_TT_IPV6,
+ MLX5E_TT_ANY,
+ MLX5E_NUM_TT,
+ MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
+};
+
+enum mlx5e_tunnel_types {
+ MLX5E_TT_IPV4_GRE,
+ MLX5E_TT_IPV6_GRE,
+ MLX5E_NUM_TUNNEL_TT,
+};
+
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
+ struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
+};
+
+/* NIC prio FTS */
+enum {
+ MLX5E_VLAN_FT_LEVEL = 0,
+ MLX5E_L2_FT_LEVEL,
+ MLX5E_TTC_FT_LEVEL,
+ MLX5E_INNER_TTC_FT_LEVEL,
+#ifdef CONFIG_MLX5_EN_ARFS
+ MLX5E_ARFS_FT_LEVEL
+#endif
+};
+
+#ifdef CONFIG_MLX5_EN_RXNFC
+
+struct mlx5e_ethtool_table {
+ struct mlx5_flow_table *ft;
+ int num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+ struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+ struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
+ struct list_head rules;
+ int tot_num_rules;
+};
+
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
+int mlx5e_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+#else
+static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
+static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
+#endif /* CONFIG_MLX5_EN_RXNFC */
+
+#ifdef CONFIG_MLX5_EN_ARFS
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct arfs_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *default_rule;
+ struct hlist_head rules_hash[ARFS_HASH_SIZE];
+};
+
+enum arfs_type {
+ ARFS_IPV4_TCP,
+ ARFS_IPV6_TCP,
+ ARFS_IPV4_UDP,
+ ARFS_IPV6_UDP,
+ ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+ struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+ /* Protect aRFS rules list */
+ spinlock_t arfs_lock;
+ struct list_head rules;
+ int last_filter_id;
+ struct workqueue_struct *wq;
+};
+
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+#else
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+#endif
+
+struct mlx5e_flow_steering {
+ struct mlx5_flow_namespace *ns;
+#ifdef CONFIG_MLX5_EN_RXNFC
+ struct mlx5e_ethtool_steering ethtool;
+#endif
+ struct mlx5e_tc_table tc;
+ struct mlx5e_vlan_table vlan;
+ struct mlx5e_l2_table l2;
+ struct mlx5e_ttc_table ttc;
+ struct mlx5e_ttc_table inner_ttc;
+#ifdef CONFIG_MLX5_EN_ARFS
+ struct mlx5e_arfs_tables arfs;
+#endif
+};
+
+struct ttc_params {
+ struct mlx5_flow_table_attr ft_attr;
+ u32 any_tt_tirn;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table *inner_ttc;
+};
+
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
+
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+
+void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
+
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+
+#endif /* __MLX5E_FLOW_STEER_H__ */
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
new file mode 100644
index 000000000000..ad6d471d00dd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf_trace.h>
+#include "en/xdp.h"
+
+static inline bool
+mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
+ struct xdp_buff *xdp)
+{
+ struct mlx5e_xdp_info xdpi;
+
+ xdpi.xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpi.xdpf))
+ return false;
+ xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf);
+ dma_sync_single_for_device(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, PCI_DMA_TODEVICE);
+ xdpi.di = *di;
+
+ return mlx5e_xmit_xdp_frame(sq, &xdpi);
+}
+
+/* returns true if packet was consumed by xdp */
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len)
+{
+ struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+ struct xdp_buff xdp;
+ u32 act;
+ int err;
+
+ if (!prog)
+ return false;
+
+ xdp.data = va + *rx_headroom;
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_end = xdp.data + *len;
+ xdp.data_hard_start = va;
+ xdp.rxq = &rq->xdp_rxq;
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ *rx_headroom = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
+ return false;
+ case XDP_TX:
+ if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp)))
+ goto xdp_abort;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
+ return true;
+ case XDP_REDIRECT:
+ /* When XDP enabled then page-refcnt==1 here */
+ err = xdp_do_redirect(rq->netdev, &xdp, prog);
+ if (unlikely(err))
+ goto xdp_abort;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
+ rq->xdpsq.redirect_flush = true;
+ mlx5e_page_dma_unmap(rq, di);
+ rq->stats->xdp_redirect++;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+xdp_abort:
+ trace_xdp_exception(rq->netdev, prog, act);
+ /* fall through */
+ case XDP_DROP:
+ rq->stats->xdp_drop++;
+ return true;
+ }
+}
+
+bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg = wqe->data;
+
+ struct xdp_frame *xdpf = xdpi->xdpf;
+ dma_addr_t dma_addr = xdpi->dma_addr;
+ unsigned int dma_len = xdpf->len;
+
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
+
+ prefetchw(wqe);
+
+ if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
+ stats->err++;
+ return false;
+ }
+
+ if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
+ if (sq->doorbell) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->doorbell = false;
+ }
+ stats->full++;
+ return false;
+ }
+
+ cseg->fm_ce_se = 0;
+
+ /* copy the inline part if required */
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE);
+ eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ dma_len -= MLX5E_XDP_MIN_INLINE;
+ dma_addr += MLX5E_XDP_MIN_INLINE;
+ dseg++;
+ }
+
+ /* write the dma part */
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+
+ /* move page to reference to sq responsibility,
+ * and mark so it's not put back in page-cache.
+ */
+ sq->db.xdpi[pi] = *xdpi;
+ sq->pc++;
+
+ sq->doorbell = true;
+
+ stats->xmit++;
+ return true;
+}
+
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_xdpsq *sq;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+ bool is_redirect;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_xdpsq, cq);
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return false;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return false;
+
+ is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
+ rq = container_of(sq, struct mlx5e_rq, xdpsq);
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ i = 0;
+ do {
+ u16 wqe_counter;
+ bool last_wqe;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+
+ last_wqe = (sqcc == wqe_counter);
+ sqcc++;
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi->xdpf);
+ dma_unmap_single(sq->pdev, xdpi->dma_addr,
+ xdpi->xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi->di, true);
+ }
+ } while (!last_wqe);
+ } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+
+ sq->stats->cqes += i;
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_rq *rq;
+ bool is_redirect;
+
+ is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
+ rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq);
+
+ while (sq->cc != sq->pc) {
+ u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+ struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+
+ sq->cc++;
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi->xdpf);
+ dma_unmap_single(sq->pdev, xdpi->dma_addr,
+ xdpi->xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi->di, false);
+ }
+ }
+}
+
+int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_xdpsq *sq;
+ int drops = 0;
+ int sq_num;
+ int i;
+
+ if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ sq_num = smp_processor_id();
+
+ if (unlikely(sq_num >= priv->channels.num))
+ return -ENXIO;
+
+ sq = &priv->channels.c[sq_num]->xdpsq;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return -ENETDOWN;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ struct mlx5e_xdp_info xdpi;
+
+ xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ continue;
+ }
+
+ xdpi.xdpf = xdpf;
+
+ if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) {
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpf->len, DMA_TO_DEVICE);
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ mlx5e_xmit_xdp_doorbell(sq);
+
+ return n - drops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
new file mode 100644
index 000000000000..6dfab045925f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_XDP_H__
+#define __MLX5_EN_XDP_H__
+
+#include "en.h"
+
+#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
+ MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
+#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+#define MLX5E_XDP_TX_DS_COUNT \
+ ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
+
+bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
+int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index f20074dbef32..1dd225380a66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -34,19 +34,26 @@
#ifndef __MLX5E_EN_ACCEL_H__
#define __MLX5E_EN_ACCEL_H__
-#ifdef CONFIG_MLX5_ACCEL
-
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "en.h"
-static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
- struct mlx5e_txqsq *sq,
- struct net_device *dev,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+static inline void
+mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
+{
+ int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+
+ udp_hdr(skb)->len = htons(payload_len);
+}
+
+static inline struct sk_buff *
+mlx5e_accel_handle_tx(struct sk_buff *skb,
+ struct mlx5e_txqsq *sq,
+ struct net_device *dev,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
{
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
@@ -64,9 +71,10 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
}
#endif
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ mlx5e_udp_gso_handle_tx_skb(skb);
+
return skb;
}
-#endif /* CONFIG_MLX5_ACCEL */
-
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index c245d8e78509..128a82b1dbfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -37,6 +37,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
+#include "accel/accel.h"
#include "en.h"
enum {
@@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
}
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *cqe_bcnt)
{
struct mlx5e_ipsec_metadata *mdata;
- struct ethhdr *old_eth;
- struct ethhdr *new_eth;
struct xfrm_state *xs;
- __be16 *ethtype;
- /* Detect inline metadata */
- if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
- return skb;
- ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
- if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ if (!is_metadata_hdr_valid(skb))
return skb;
/* Use the metadata */
@@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
return NULL;
}
- /* Remove the metadata from the buffer */
- old_eth = (struct ethhdr *)skb->data;
- new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
- memmove(new_eth, old_eth, 2 * ETH_ALEN);
- /* Ethertype is already in its new place */
- skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+ remove_metadata_hdr(skb);
+ *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
return skb;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2bfbbef1b054..ca47c0540904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -41,7 +41,7 @@
#include "en.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index d167845271c3..eddd7702680b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
u32 caps = mlx5_accel_tls_device_caps(mdev);
int ret = -ENOMEM;
void *flow;
-
- if (direction != TLS_OFFLOAD_CTX_DIR_TX)
- return -EINVAL;
+ u32 swid;
flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
if (!flow)
@@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
if (ret)
goto free_flow;
+ ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, &swid,
+ direction == TLS_OFFLOAD_CTX_DIR_TX);
+ if (ret < 0)
+ goto free_flow;
+
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- struct mlx5e_tls_offload_context *tx_ctx =
+ struct mlx5e_tls_offload_context_tx *tx_ctx =
mlx5e_get_tls_tx_context(tls_ctx);
- u32 swid;
-
- ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, &swid);
- if (ret < 0)
- goto free_flow;
tx_ctx->swid = htonl(swid);
tx_ctx->expected_seq = start_offload_tcp_sn;
+ } else {
+ struct mlx5e_tls_offload_context_rx *rx_ctx =
+ mlx5e_get_tls_rx_context(tls_ctx);
+
+ rx_ctx->handle = htonl(swid);
}
return 0;
@@ -147,30 +150,60 @@ static void mlx5e_tls_del(struct net_device *netdev,
enum tls_offload_ctx_dir direction)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ unsigned int handle;
- if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
+ handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
+ mlx5e_get_tls_tx_context(tls_ctx)->swid :
+ mlx5e_get_tls_rx_context(tls_ctx)->handle);
- mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
- } else {
- netdev_err(netdev, "unsupported direction %d\n", direction);
- }
+ mlx5_accel_tls_del_flow(priv->mdev, handle,
+ direction == TLS_OFFLOAD_CTX_DIR_TX);
+}
+
+static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
+ u32 seq, u64 rcd_sn)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_tls_offload_context_rx *rx_ctx;
+
+ rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
+
+ netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
+ be64_to_cpu(rcd_sn));
+ mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
}
static const struct tlsdev_ops mlx5e_tls_ops = {
.tls_dev_add = mlx5e_tls_add,
.tls_dev_del = mlx5e_tls_del,
+ .tls_dev_resync_rx = mlx5e_tls_resync_rx,
};
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
+ u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
struct net_device *netdev = priv->netdev;
if (!mlx5_accel_is_tls_device(priv->mdev))
return;
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ if (caps & MLX5_ACCEL_TLS_TX) {
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ }
+
+ if (caps & MLX5_ACCEL_TLS_RX) {
+ netdev->features |= NETIF_F_HW_TLS_RX;
+ netdev->hw_features |= NETIF_F_HW_TLS_RX;
+ }
+
+ if (!(caps & MLX5_ACCEL_TLS_LRO)) {
+ netdev->features &= ~NETIF_F_LRO;
+ netdev->hw_features &= ~NETIF_F_LRO;
+ }
+
netdev->tlsdev_ops = &mlx5e_tls_ops;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index b6162178f621..3f5d72163b56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -43,25 +43,44 @@ struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data;
atomic64_t tx_tls_drop_bypass_required;
+ atomic64_t rx_tls_drop_resync_request;
+ atomic64_t rx_tls_resync_request;
+ atomic64_t rx_tls_resync_reply;
+ atomic64_t rx_tls_auth_fail;
};
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
};
-struct mlx5e_tls_offload_context {
- struct tls_offload_context base;
+struct mlx5e_tls_offload_context_tx {
+ struct tls_offload_context_tx base;
u32 expected_seq;
__be32 swid;
};
-static inline struct mlx5e_tls_offload_context *
+static inline struct mlx5e_tls_offload_context_tx *
mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) >
- TLS_OFFLOAD_CONTEXT_SIZE);
- return container_of(tls_offload_ctx(tls_ctx),
- struct mlx5e_tls_offload_context,
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ return container_of(tls_offload_ctx_tx(tls_ctx),
+ struct mlx5e_tls_offload_context_tx,
+ base);
+}
+
+struct mlx5e_tls_offload_context_rx {
+ struct tls_offload_context_rx base;
+ __be32 handle;
+};
+
+static inline struct mlx5e_tls_offload_context_rx *
+mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
+{
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_RX);
+ return container_of(tls_offload_ctx_rx(tls_ctx),
+ struct mlx5e_tls_offload_context_rx,
base);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 15aef71d1957..be137d4a9169 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -33,6 +33,14 @@
#include "en_accel/tls.h"
#include "en_accel/tls_rxtx.h"
+#include "accel/accel.h"
+
+#include <net/inet6_hashtables.h>
+#include <linux/ipv6.h>
+
+#define SYNDROM_DECRYPTED 0x30
+#define SYNDROM_RESYNC_REQUEST 0x31
+#define SYNDROM_AUTH_FAILED 0x32
#define SYNDROME_OFFLOAD_REQUIRED 32
#define SYNDROME_SYNC 33
@@ -44,10 +52,26 @@ struct sync_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-struct mlx5e_tls_metadata {
+struct recv_metadata_content {
+ u8 syndrome;
+ u8 reserved;
+ __be32 sync_seq;
+} __packed;
+
+struct send_metadata_content {
/* One byte of syndrome followed by 3 bytes of swid */
__be32 syndrome_swid;
__be16 first_seq;
+} __packed;
+
+struct mlx5e_tls_metadata {
+ union {
+ /* from fpga to host */
+ struct recv_metadata_content recv;
+ /* from host to fpga */
+ struct send_metadata_content send;
+ unsigned char raw[6];
+ } __packed content;
/* packet type ID field */
__be16 ethertype;
} __packed;
@@ -68,12 +92,13 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
2 * ETH_ALEN);
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
- pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
+ pet->content.send.syndrome_swid =
+ htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
return 0;
}
-static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
+static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
u32 tcp_seq, struct sync_info *info)
{
int remaining, i = 0, ret = -EINVAL;
@@ -149,7 +174,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
memcpy(pet, &syndrome, sizeof(syndrome));
- pet->first_seq = htons(tcp_seq);
+ pet->content.send.first_seq = htons(tcp_seq);
/* MLX5 devices don't care about the checksum partial start, offset
* and pseudo header
@@ -161,7 +186,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
}
static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
+mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi,
@@ -239,7 +264,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
u16 *pi)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context *context;
+ struct mlx5e_tls_offload_context_tx *context;
struct tls_context *tls_ctx;
u32 expected_seq;
int datalen;
@@ -276,3 +301,83 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
out:
return skb;
}
+
+static int tls_update_resync_sn(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_tls_metadata *mdata)
+{
+ struct sock *sk = NULL;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ __be32 seq;
+
+ if (mdata->ethertype != htons(ETH_P_IP))
+ return -EINVAL;
+
+ iph = (struct iphdr *)(mdata + 1);
+
+ th = ((void *)iph) + iph->ihl * 4;
+
+ if (iph->version == 4) {
+ sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ iph->saddr, th->source, iph->daddr,
+ th->dest, netdev->ifindex);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
+
+ sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ &ipv6h->saddr, th->source,
+ &ipv6h->daddr, ntohs(th->dest),
+ netdev->ifindex, 0);
+#endif
+ }
+ if (!sk || sk->sk_state == TCP_TIME_WAIT) {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
+ goto out;
+ }
+
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+
+ memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
+ tls_offload_rx_resync_request(sk, seq);
+out:
+ return 0;
+}
+
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ u32 *cqe_bcnt)
+{
+ struct mlx5e_tls_metadata *mdata;
+ struct mlx5e_priv *priv;
+
+ if (!is_metadata_hdr_valid(skb))
+ return;
+
+ /* Use the metadata */
+ mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
+ switch (mdata->content.recv.syndrome) {
+ case SYNDROM_DECRYPTED:
+ skb->decrypted = 1;
+ break;
+ case SYNDROM_RESYNC_REQUEST:
+ tls_update_resync_sn(netdev, skb, mdata);
+ priv = netdev_priv(netdev);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
+ break;
+ case SYNDROM_AUTH_FAILED:
+ /* Authentication failure will be observed and verified by kTLS */
+ priv = netdev_priv(netdev);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
+ break;
+ default:
+ /* Bypass the metadata header to others */
+ return;
+ }
+
+ remove_metadata_hdr(skb);
+ *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 405dfd302225..311667ec71b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe **wqe,
u16 *pi);
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ u32 *cqe_bcnt);
+
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index d258bb679271..45cdde694d20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -30,8 +30,6 @@
* SOFTWARE.
*/
-#ifdef CONFIG_RFS_ACCEL
-
#include <linux/hash.h>
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
@@ -738,4 +736,4 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
-#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index fffe514ba855..98dd3e0ada72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "en/port.h"
+#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -969,33 +970,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
return 0;
}
-static int mlx5e_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- int err = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = priv->channels.params.num_channels;
- break;
- case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs.ethtool.tot_num_rules;
- break;
- case ETHTOOL_GRXCLSRULE:
- err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
- break;
- case ETHTOOL_GRXCLSRLALL:
- err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@@ -1133,10 +1107,10 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
if (ret)
return ret;
- info->phc_index = mdev->clock.ptp ?
- ptp_clock_index(mdev->clock.ptp) : -1;
+ info->phc_index = mlx5_clock_get_ptp_index(mdev);
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
+ info->phc_index == -1)
return 0;
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
@@ -1606,26 +1580,6 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- int err = 0;
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXCLSRLINS:
- err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash)
{
@@ -1678,8 +1632,10 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+#ifdef CONFIG_MLX5_EN_RXNFC
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+#endif
.flash_device = mlx5e_flash_device,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
@@ -1696,5 +1652,4 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
-
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index eafc59280ada..75bb981e00b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -66,11 +66,14 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
break;
case IP_USER_FLOW:
+ case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
@@ -115,29 +118,203 @@ static void mask_spec(u8 *mask, u8 *val, size_t size)
*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
}
-static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
- __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
+#define MLX5E_FTE_SET(header_p, fld, v) \
+ MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
+
+#define MLX5E_FTE_ADDR_OF(header_p, fld) \
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
+
+static void
+set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
+ __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
{
if (ip4src_m) {
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_v, sizeof(ip4src_v));
- memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4src_m));
}
if (ip4dst_m) {
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_v, sizeof(ip4dst_v));
- memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4dst_m));
}
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- ethertype, ETH_P_IP);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- ethertype, 0xffff);
+
+ MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
+ MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
+}
+
+static void
+set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
+ __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
+{
+ u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
+
+ if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ ip6src_v, ip6_sz);
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ ip6src_m, ip6_sz);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ ip6dst_v, ip6_sz);
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ ip6dst_m, ip6_sz);
+ }
+
+ MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
+ MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
+}
+
+static void
+set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
+ __be16 pdst_m, __be16 pdst_v)
+{
+ if (psrc_m) {
+ MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff);
+ MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
+ }
+ if (pdst_m) {
+ MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff);
+ MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
+ }
+
+ MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
+ MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
+}
+
+static void
+set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
+ __be16 pdst_m, __be16 pdst_v)
+{
+ if (psrc_m) {
+ MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
+ MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
+ }
+
+ if (pdst_m) {
+ MLX5E_FTE_SET(headers_c, udp_dport, 0xffff);
+ MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
+ }
+
+ MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
+ MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
+}
+
+static void
+parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
+
+ set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
+ l4_mask->ip4dst, l4_val->ip4dst);
+
+ set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
+ struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
+
+ set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
+ l4_mask->ip4dst, l4_val->ip4dst);
+
+ set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
+
+ set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
+ l3_mask->ip4dst, l3_val->ip4dst);
+
+ if (l3_mask->proto) {
+ MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
+ MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
+ }
+}
+
+static void
+parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
+
+ set_ip6(headers_c, headers_v, l3_mask->ip6src,
+ l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
+
+ if (l3_mask->l4_proto) {
+ MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
+ MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
+ }
+}
+
+static void
+parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
+
+ set_ip6(headers_c, headers_v, l4_mask->ip6src,
+ l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
+
+ set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
+ struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
+
+ set_ip6(headers_c, headers_v, l4_mask->ip6src,
+ l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
+
+ set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
+ l4_mask->pdst, l4_val->pdst);
+}
+
+static void
+parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethhdr *eth_mask = &fs->m_u.ether_spec;
+ struct ethhdr *eth_val = &fs->h_u.ether_spec;
+
+ mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
+ MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
+ MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
+}
+
+static void
+set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
+{
+ MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
+ MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
+ MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
+ MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
+}
+
+static void
+set_dmac(void *headers_c, void *headers_v,
+ unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
+{
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
}
static int set_flow_attrs(u32 *match_c, u32 *match_v,
@@ -148,112 +325,42 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
- struct ethtool_tcpip4_spec *l4_mask;
- struct ethtool_tcpip4_spec *l4_val;
- struct ethtool_usrip4_spec *l3_mask;
- struct ethtool_usrip4_spec *l3_val;
- struct ethhdr *eth_val;
- struct ethhdr *eth_mask;
switch (flow_type) {
case TCP_V4_FLOW:
- l4_mask = &fs->m_u.tcp_ip4_spec;
- l4_val = &fs->h_u.tcp_ip4_spec;
- set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
- l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
-
- if (l4_mask->psrc) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
- ntohs(l4_val->psrc));
- }
- if (l4_mask->pdst) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
- ntohs(l4_val->pdst));
- }
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
- IPPROTO_TCP);
+ parse_tcp4(outer_headers_c, outer_headers_v, fs);
break;
case UDP_V4_FLOW:
- l4_mask = &fs->m_u.tcp_ip4_spec;
- l4_val = &fs->h_u.tcp_ip4_spec;
- set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
- l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
-
- if (l4_mask->psrc) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
- ntohs(l4_val->psrc));
- }
- if (l4_mask->pdst) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
- ntohs(l4_val->pdst));
- }
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
- IPPROTO_UDP);
+ parse_udp4(outer_headers_c, outer_headers_v, fs);
break;
case IP_USER_FLOW:
- l3_mask = &fs->m_u.usr_ip4_spec;
- l3_val = &fs->h_u.usr_ip4_spec;
- set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
- l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
+ parse_ip4(outer_headers_c, outer_headers_v, fs);
+ break;
+ case TCP_V6_FLOW:
+ parse_tcp6(outer_headers_c, outer_headers_v, fs);
+ break;
+ case UDP_V6_FLOW:
+ parse_udp6(outer_headers_c, outer_headers_v, fs);
+ break;
+ case IPV6_USER_FLOW:
+ parse_ip6(outer_headers_c, outer_headers_v, fs);
break;
case ETHER_FLOW:
- eth_mask = &fs->m_u.ether_spec;
- eth_val = &fs->h_u.ether_spec;
-
- mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_c, smac_47_16),
- eth_mask->h_source);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_v, smac_47_16),
- eth_val->h_source);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_c, dmac_47_16),
- eth_mask->h_dest);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_v, dmac_47_16),
- eth_val->h_dest);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
- ntohs(eth_mask->h_proto));
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
- ntohs(eth_val->h_proto));
+ parse_ether(outer_headers_c, outer_headers_v, fs);
break;
default:
return -EINVAL;
}
if ((fs->flow_type & FLOW_EXT) &&
- (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- first_vid, 0xfff);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- first_vid, ntohs(fs->h_ext.vlan_tci));
- }
+ (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
+ set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
+
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest)) {
mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_c, dmac_47_16),
- fs->m_ext.h_dest);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- outer_headers_v, dmac_47_16),
- fs->h_ext.h_dest);
+ set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
+ fs->h_ext.h_dest);
}
return 0;
@@ -379,16 +486,143 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
#define all_zeros_or_all_ones(field) \
((field) == 0 || (field) == (__force typeof(field))-1)
+static int validate_ethter(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethhdr *eth_mask = &fs->m_u.ether_spec;
+ int ntuples = 0;
+
+ if (!is_zero_ether_addr(eth_mask->h_dest))
+ ntuples++;
+ if (!is_zero_ether_addr(eth_mask->h_source))
+ ntuples++;
+ if (eth_mask->h_proto)
+ ntuples++;
+ return ntuples;
+}
+
+static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
+ int ntuples = 0;
+
+ if (l4_mask->tos)
+ return -EINVAL;
+
+ if (l4_mask->ip4src) {
+ if (!all_ones(l4_mask->ip4src))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->ip4dst) {
+ if (!all_ones(l4_mask->ip4dst))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->psrc) {
+ if (!all_ones(l4_mask->psrc))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->pdst) {
+ if (!all_ones(l4_mask->pdst))
+ return -EINVAL;
+ ntuples++;
+ }
+ /* Flow is TCP/UDP */
+ return ++ntuples;
+}
+
+static int validate_ip4(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
+ int ntuples = 0;
+
+ if (l3_mask->l4_4_bytes || l3_mask->tos ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+ if (l3_mask->ip4src) {
+ if (!all_ones(l3_mask->ip4src))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l3_mask->ip4dst) {
+ if (!all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l3_mask->proto)
+ ntuples++;
+ /* Flow is IPv4 */
+ return ++ntuples;
+}
+
+static int validate_ip6(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
+ int ntuples = 0;
+
+ if (l3_mask->l4_4_bytes || l3_mask->tclass)
+ return -EINVAL;
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
+ ntuples++;
+
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
+ ntuples++;
+ if (l3_mask->l4_proto)
+ ntuples++;
+ /* Flow is IPv6 */
+ return ++ntuples;
+}
+
+static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
+ int ntuples = 0;
+
+ if (l4_mask->tclass)
+ return -EINVAL;
+
+ if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
+ ntuples++;
+
+ if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
+ ntuples++;
+
+ if (l4_mask->psrc) {
+ if (!all_ones(l4_mask->psrc))
+ return -EINVAL;
+ ntuples++;
+ }
+ if (l4_mask->pdst) {
+ if (!all_ones(l4_mask->pdst))
+ return -EINVAL;
+ ntuples++;
+ }
+ /* Flow is TCP/UDP */
+ return ++ntuples;
+}
+
+static int validate_vlan(struct ethtool_rx_flow_spec *fs)
+{
+ if (fs->m_ext.vlan_etype ||
+ fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
+ return -EINVAL;
+
+ if (fs->m_ext.vlan_tci &&
+ (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
+ return -EINVAL;
+
+ return 1;
+}
+
static int validate_flow(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs)
{
- struct ethtool_tcpip4_spec *l4_mask;
- struct ethtool_usrip4_spec *l3_mask;
- struct ethhdr *eth_mask;
int num_tuples = 0;
+ int ret = 0;
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
- return -EINVAL;
+ return -ENOSPC;
if (fs->ring_cookie >= priv->channels.params.num_channels &&
fs->ring_cookie != RX_CLS_FLOW_DISC)
@@ -396,73 +630,42 @@ static int validate_flow(struct mlx5e_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
- eth_mask = &fs->m_u.ether_spec;
- if (!is_zero_ether_addr(eth_mask->h_dest))
- num_tuples++;
- if (!is_zero_ether_addr(eth_mask->h_source))
- num_tuples++;
- if (eth_mask->h_proto)
- num_tuples++;
+ num_tuples += validate_ethter(fs);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
- if (fs->m_u.tcp_ip4_spec.tos)
- return -EINVAL;
- l4_mask = &fs->m_u.tcp_ip4_spec;
- if (l4_mask->ip4src) {
- if (!all_ones(l4_mask->ip4src))
- return -EINVAL;
- num_tuples++;
- }
- if (l4_mask->ip4dst) {
- if (!all_ones(l4_mask->ip4dst))
- return -EINVAL;
- num_tuples++;
- }
- if (l4_mask->psrc) {
- if (!all_ones(l4_mask->psrc))
- return -EINVAL;
- num_tuples++;
- }
- if (l4_mask->pdst) {
- if (!all_ones(l4_mask->pdst))
- return -EINVAL;
- num_tuples++;
- }
- /* Flow is TCP/UDP */
- num_tuples++;
+ ret = validate_tcpudp4(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
break;
case IP_USER_FLOW:
- l3_mask = &fs->m_u.usr_ip4_spec;
- if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
- fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
- return -EINVAL;
- if (l3_mask->ip4src) {
- if (!all_ones(l3_mask->ip4src))
- return -EINVAL;
- num_tuples++;
- }
- if (l3_mask->ip4dst) {
- if (!all_ones(l3_mask->ip4dst))
- return -EINVAL;
- num_tuples++;
- }
- /* Flow is IPv4 */
- num_tuples++;
+ ret = validate_ip4(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = validate_tcpudp6(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
+ break;
+ case IPV6_USER_FLOW:
+ ret = validate_ip6(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
break;
default:
- return -EINVAL;
+ return -ENOTSUPP;
}
if ((fs->flow_type & FLOW_EXT)) {
- if (fs->m_ext.vlan_etype ||
- (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
- return -EINVAL;
-
- if (fs->m_ext.vlan_tci) {
- if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
- return -EINVAL;
- }
- num_tuples++;
+ ret = validate_vlan(fs);
+ if (ret < 0)
+ return ret;
+ num_tuples += ret;
}
if (fs->flow_type & FLOW_MAC_EXT &&
@@ -472,8 +675,9 @@ static int validate_flow(struct mlx5e_priv *priv,
return num_tuples;
}
-int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
- struct ethtool_rx_flow_spec *fs)
+static int
+mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs)
{
struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule;
@@ -483,8 +687,9 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
num_tuples = validate_flow(priv, fs);
if (num_tuples <= 0) {
- netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
- return -EINVAL;
+ netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
+ __func__, num_tuples);
+ return num_tuples;
}
eth_ft = get_flow_table(priv, fs, num_tuples);
@@ -519,8 +724,8 @@ del_ethtool_rule:
return err;
}
-int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
- int location)
+static int
+mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
{
struct mlx5e_ethtool_rule *eth_rule;
int err = 0;
@@ -539,8 +744,9 @@ out:
return err;
}
-int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
- int location)
+static int
+mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, int location)
{
struct mlx5e_ethtool_rule *eth_rule;
@@ -557,8 +763,9 @@ int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
return -ENOENT;
}
-int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
- u32 *rule_locs)
+static int
+mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
{
int location = 0;
int idx = 0;
@@ -587,3 +794,51 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
}
+
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int mlx5e_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = priv->channels.params.num_channels;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index c592678ab5f1..5a7939e70190 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -45,8 +45,10 @@
#include "en_accel/tls.h"
#include "accel/ipsec.h"
#include "accel/tls.h"
-#include "vxlan.h"
+#include "lib/vxlan.h"
+#include "lib/clock.h"
#include "en/port.h"
+#include "en/xdp.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -96,14 +98,19 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
{
- if (!params->xdp_prog) {
- u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ u32 frag_sz;
- return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
- }
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
+
+ if (params->xdp_prog && frag_sz < PAGE_SIZE)
+ frag_sz = PAGE_SIZE;
- return PAGE_SIZE;
+ return frag_sz;
}
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
@@ -222,7 +229,7 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
u8 port_state;
port_state = mlx5_query_vport_state(mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
0);
if (port_state == VPORT_STATE_UP) {
@@ -270,12 +277,9 @@ void mlx5e_update_stats_work(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
update_stats_work);
+
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->profile->update_stats(priv);
- queue_delayed_work(priv->wq, dwork,
- msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
- }
+ priv->profile->update_stats(priv);
mutex_unlock(&priv->state_lock);
}
@@ -352,8 +356,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info),
- GFP_KERNEL, cpu_to_node(c->cpu));
+ rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
+ sizeof(*rq->mpwqe.info)),
+ GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->mpwqe.info)
return -ENOMEM;
@@ -487,7 +492,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@ -670,7 +674,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err_free:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->mpwqe.info);
+ kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -702,7 +706,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->mpwqe.info);
+ kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -879,7 +883,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
/* UMR WQE (if in progress) is always at wq->head */
if (rq->mpwqe.umr_in_progress)
- mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
+ rq->dealloc_wqe(rq, wq->head);
while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe;
@@ -965,16 +969,16 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
- kfree(sq->db.di);
+ kvfree(sq->db.xdpi);
}
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di),
- GFP_KERNEL, numa);
- if (!sq->db.di) {
+ sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
+ GFP_KERNEL, numa);
+ if (!sq->db.xdpi) {
mlx5e_free_xdpsq_db(sq);
return -ENOMEM;
}
@@ -985,7 +989,8 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_xdpsq *sq)
+ struct mlx5e_xdpsq *sq,
+ bool is_redirect)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
@@ -997,6 +1002,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
+ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->stats = is_redirect ?
+ &c->priv->channel_stats[c->ix].xdpsq :
+ &c->priv->channel_stats[c->ix].rq_xdpsq;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1024,15 +1033,16 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
- kfree(sq->db.ico_wqe);
+ kvfree(sq->db.ico_wqe);
}
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe),
- GFP_KERNEL, numa);
+ sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
+ sizeof(*sq->db.ico_wqe)),
+ GFP_KERNEL, numa);
if (!sq->db.ico_wqe)
return -ENOMEM;
@@ -1077,8 +1087,8 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
- kfree(sq->db.wqe_info);
- kfree(sq->db.dma_fifo);
+ kvfree(sq->db.wqe_info);
+ kvfree(sq->db.dma_fifo);
}
static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
@@ -1086,10 +1096,12 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
- sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo),
- GFP_KERNEL, numa);
- sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info),
- GFP_KERNEL, numa);
+ sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
+ sizeof(*sq->db.dma_fifo)),
+ GFP_KERNEL, numa);
+ sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
+ sizeof(*sq->db.wqe_info)),
+ GFP_KERNEL, numa);
if (!sq->db.dma_fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq);
return -ENOMEM;
@@ -1523,7 +1535,8 @@ static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_xdpsq *sq)
+ struct mlx5e_xdpsq *sq,
+ bool is_redirect)
{
unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
struct mlx5e_create_sq_param csp = {};
@@ -1531,7 +1544,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
int err;
int i;
- err = mlx5e_alloc_xdpsq(c, params, param, sq);
+ err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
if (err)
return err;
@@ -1540,6 +1553,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ if (is_redirect)
+ set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
@@ -1893,7 +1908,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
int err;
int eqn;
- c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -1922,10 +1937,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+ err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+ if (err)
+ goto err_close_xdp_tx_cqs;
+
/* XDP SQ CQ params are same as normal TXQ sq CQ params */
err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
&cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
@@ -1942,7 +1961,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq;
- err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
+ err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0;
if (err)
goto err_close_sqs;
@@ -1950,9 +1969,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_xdp_sq;
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true);
+ if (err)
+ goto err_close_rq;
+
*cp = c;
return 0;
+
+err_close_rq:
+ mlx5e_close_rq(&c->rq);
+
err_close_xdp_sq:
if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq);
@@ -1971,6 +1998,9 @@ err_disable_napi:
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
+err_close_xdp_tx_cqs:
+ mlx5e_close_cq(&c->xdpsq.cq);
+
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
@@ -1979,7 +2009,7 @@ err_close_icosq_cq:
err_napi_del:
netif_napi_del(&c->napi);
- kfree(c);
+ kvfree(c);
return err;
}
@@ -2005,6 +2035,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
+ mlx5e_close_xdpsq(&c->xdpsq);
mlx5e_close_rq(&c->rq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq);
@@ -2014,11 +2045,12 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
if (c->xdp)
mlx5e_close_cq(&c->rq.xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_cq(&c->xdpsq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
- kfree(c);
+ kvfree(c);
}
#define DEFAULT_FRAG_SIZE (2048)
@@ -2276,7 +2308,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
chs->num = chs->params.num_channels;
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
- cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+ cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
if (!chs->c || !cparam)
goto err_free;
@@ -2287,7 +2319,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- kfree(cparam);
+ kvfree(cparam);
return 0;
err_close_channels:
@@ -2296,7 +2328,7 @@ err_close_channels:
err_free:
kfree(chs->c);
- kfree(cparam);
+ kvfree(cparam);
chs->num = 0;
return err;
}
@@ -2943,7 +2975,7 @@ int mlx5e_open(struct net_device *netdev)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
- if (mlx5e_vxlan_allowed(priv->mdev))
+ if (mlx5_vxlan_allowed(priv->mdev->vxlan))
udp_tunnel_get_rx_info(netdev);
return err;
@@ -3371,7 +3403,7 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
priv);
@@ -3405,6 +3437,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ /* update HW stats in background for next time */
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
@@ -3590,7 +3625,7 @@ unlock:
return err;
}
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3645,7 +3680,7 @@ static int mlx5e_set_features(struct net_device *netdev,
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
#endif
@@ -3703,6 +3738,14 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
+ if (params->xdp_prog &&
+ !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
+ new_mtu, MLX5E_XDP_MAX_MTU);
+ err = -EINVAL;
+ goto out;
+ }
+
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
@@ -3740,7 +3783,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
+ (mlx5_clock_get_ptp_index(priv->mdev) == -1))
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -3874,9 +3918,9 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
static int mlx5_vport_link2ifla(u8 esw_link)
{
switch (esw_link) {
- case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
+ case MLX5_VPORT_ADMIN_STATE_DOWN:
return IFLA_VF_LINK_STATE_DISABLE;
- case MLX5_ESW_VPORT_ADMIN_STATE_UP:
+ case MLX5_VPORT_ADMIN_STATE_UP:
return IFLA_VF_LINK_STATE_ENABLE;
}
return IFLA_VF_LINK_STATE_AUTO;
@@ -3886,11 +3930,11 @@ static int mlx5_ifla_link2vport(u8 ifla_link)
{
switch (ifla_link) {
case IFLA_VF_LINK_STATE_DISABLE:
- return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
+ return MLX5_VPORT_ADMIN_STATE_DOWN;
case IFLA_VF_LINK_STATE_ENABLE:
- return MLX5_ESW_VPORT_ADMIN_STATE_UP;
+ return MLX5_VPORT_ADMIN_STATE_UP;
}
- return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
+ return MLX5_VPORT_ADMIN_STATE_AUTO;
}
static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
@@ -3928,6 +3972,57 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
}
#endif
+struct mlx5e_vxlan_work {
+ struct work_struct work;
+ struct mlx5e_priv *priv;
+ u16 port;
+};
+
+static void mlx5e_vxlan_add_work(struct work_struct *work)
+{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
+
+ mutex_lock(&priv->state_lock);
+ mlx5_vxlan_add_port(priv->mdev->vxlan, port);
+ mutex_unlock(&priv->state_lock);
+
+ kfree(vxlan_work);
+}
+
+static void mlx5e_vxlan_del_work(struct work_struct *work)
+{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
+
+ mutex_lock(&priv->state_lock);
+ mlx5_vxlan_del_port(priv->mdev->vxlan, port);
+ mutex_unlock(&priv->state_lock);
+ kfree(vxlan_work);
+}
+
+static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
+{
+ struct mlx5e_vxlan_work *vxlan_work;
+
+ vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+ if (!vxlan_work)
+ return;
+
+ if (add)
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
+ else
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
+
+ vxlan_work->priv = priv;
+ vxlan_work->port = port;
+ queue_work(priv->wq, &vxlan_work->work);
+}
+
static void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
@@ -3936,10 +4031,10 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (!mlx5e_vxlan_allowed(priv->mdev))
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
return;
- mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
+ mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -3950,10 +4045,10 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (!mlx5e_vxlan_allowed(priv->mdev))
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
return;
- mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
+ mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
}
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
@@ -3984,7 +4079,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
port = be16_to_cpu(udph->dest);
/* Verify if UDP port is being offloaded by HW */
- if (mlx5e_vxlan_lookup_port(priv, port))
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;
}
@@ -4091,26 +4186,47 @@ static void mlx5e_tx_timeout(struct net_device *dev)
queue_work(priv->wq, &priv->tx_timeout_work);
}
+static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5e_channels new_channels = {};
+
+ if (priv->channels.params.lro_en) {
+ netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ return -EINVAL;
+ }
+
+ if (MLX5_IPSEC_DEV(priv->mdev)) {
+ netdev_warn(netdev, "can't set XDP with IPSec offload\n");
+ return -EINVAL;
+ }
+
+ new_channels.params = priv->channels.params;
+ new_channels.params.xdp_prog = prog;
+
+ if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
+ new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct bpf_prog *old_prog;
- int err = 0;
bool reset, was_opened;
+ int err = 0;
int i;
mutex_lock(&priv->state_lock);
- if ((netdev->features & NETIF_F_LRO) && prog) {
- netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
- err = -EINVAL;
- goto unlock;
- }
-
- if ((netdev->features & NETIF_F_HW_ESP) && prog) {
- netdev_warn(netdev, "can't set XDP with IPSec offload\n");
- err = -EINVAL;
- goto unlock;
+ if (prog) {
+ err = mlx5e_xdp_allowed(priv, prog);
+ if (err)
+ goto unlock;
}
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
@@ -4193,7 +4309,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return mlx5e_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_id = mlx5e_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
@@ -4235,11 +4350,12 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
.ndo_features_check = mlx5e_features_check,
-#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
-#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
+ .ndo_xdp_xmit = mlx5e_xdp_xmit,
+#ifdef CONFIG_MLX5_EN_ARFS
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif
@@ -4535,8 +4651,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
- netdev->hw_features |= NETIF_F_GSO_PARTIAL;
+ if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4544,7 +4659,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
}
- if (mlx5e_vxlan_allowed(mdev)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
@@ -4561,6 +4676,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM;
}
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ netdev->features |= NETIF_F_GSO_UDP_L4;
+
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
if (fcs_supported)
@@ -4585,7 +4705,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
netdev->hw_features |= NETIF_F_HW_TC;
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
@@ -4650,14 +4770,12 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
- mlx5e_vxlan_init(priv);
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_vxlan_cleanup(priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -4831,7 +4949,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return NULL;
}
-#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_EN_ARFS
netdev->rx_cpu_rmap = mdev->rmap;
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2b8040a3cdbd..c9cc9747d21d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -698,8 +698,8 @@ static int mlx5e_rep_open(struct net_device *dev)
goto unlock;
if (!mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev);
unlock:
@@ -716,8 +716,8 @@ static int mlx5e_rep_close(struct net_device *dev)
mutex_lock(&priv->state_lock);
mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
mutex_unlock(&priv->state_lock);
return ret;
@@ -797,7 +797,7 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
return 0;
@@ -897,6 +897,9 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ /* update HW stats in background for next time */
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d3a1dd20e41d..15d8ae28c040 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,7 +34,6 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
-#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
@@ -44,7 +43,9 @@
#include "en_rep.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls_rxtx.h"
#include "lib/clock.h"
+#include "en/xdp.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -238,8 +239,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
return 0;
}
-static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
{
dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
}
@@ -276,10 +276,11 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
}
static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *frag)
+ struct mlx5e_wqe_frag_info *frag,
+ bool recycle)
{
if (frag->last_in_page)
- mlx5e_page_release(rq, frag->di, true);
+ mlx5e_page_release(rq, frag->di, recycle);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -307,25 +308,26 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
free_frags:
while (--i >= 0)
- mlx5e_put_rx_frag(rq, --frag);
+ mlx5e_put_rx_frag(rq, --frag, true);
return err;
}
static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *wi)
+ struct mlx5e_wqe_frag_info *wi,
+ bool recycle)
{
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
- mlx5e_put_rx_frag(rq, wi);
+ mlx5e_put_rx_frag(rq, wi, recycle);
}
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, false);
}
static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
@@ -395,7 +397,8 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
}
}
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
+static void
+mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
const bool no_xdp_xmit =
bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
@@ -404,7 +407,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
- mlx5e_page_release(rq, &dma_info[i], true);
+ mlx5e_page_release(rq, &dma_info[i], recycle);
}
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
@@ -487,7 +490,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->pc += MLX5E_UMR_WQEBBS;
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
return 0;
@@ -504,8 +507,8 @@ err_unmap:
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
-
- mlx5e_free_rx_mpwqe(rq, wi);
+ /* Don't recycle, this function is called on rq/netdev close */
+ mlx5e_free_rx_mpwqe(rq, wi, false);
}
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
@@ -601,6 +604,8 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (!rq->mpwqe.umr_in_progress)
mlx5e_alloc_rx_mpwqe(rq, wq->head);
+ else
+ rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2;
return false;
}
@@ -795,6 +800,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct net_device *netdev = rq->netdev;
skb->mac_len = ETH_HLEN;
+
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
+#endif
+
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -839,135 +849,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
-static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *wqe;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
-
- wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
-}
-
-static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- const struct xdp_buff *xdp)
-{
- struct mlx5e_xdpsq *sq = &rq->xdpsq;
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg;
-
- ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
- dma_addr_t dma_addr = di->addr + data_offset;
- unsigned int dma_len = xdp->data_end - xdp->data;
-
- struct mlx5e_rq_stats *stats = rq->stats;
-
- prefetchw(wqe);
-
- if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
- stats->xdp_drop++;
- return false;
- }
-
- if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
- if (sq->db.doorbell) {
- /* SQ is full, ring doorbell */
- mlx5e_xmit_xdp_doorbell(sq);
- sq->db.doorbell = false;
- }
- stats->xdp_tx_full++;
- return false;
- }
-
- dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
-
- cseg->fm_ce_se = 0;
-
- dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
-
- /* copy the inline part if required */
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
- eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
- dma_len -= MLX5E_XDP_MIN_INLINE;
- dma_addr += MLX5E_XDP_MIN_INLINE;
- dseg++;
- }
-
- /* write the dma part */
- dseg->addr = cpu_to_be64(dma_addr);
- dseg->byte_count = cpu_to_be32(dma_len);
-
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
-
- /* move page to reference to sq responsibility,
- * and mark so it's not put back in page-cache.
- */
- __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
- sq->db.di[pi] = *di;
- sq->pc++;
-
- sq->db.doorbell = true;
-
- stats->xdp_tx++;
- return true;
-}
-
-/* returns true if packet was consumed by xdp */
-static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len)
-{
- struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
- struct xdp_buff xdp;
- u32 act;
- int err;
-
- if (!prog)
- return false;
-
- xdp.data = va + *rx_headroom;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.data_hard_start = va;
- xdp.rxq = &rq->xdp_rxq;
-
- act = bpf_prog_run_xdp(prog, &xdp);
- switch (act) {
- case XDP_PASS:
- *rx_headroom = xdp.data - xdp.data_hard_start;
- *len = xdp.data_end - xdp.data;
- return false;
- case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
- trace_xdp_exception(rq->netdev, prog, act);
- return true;
- case XDP_REDIRECT:
- /* When XDP enabled then page-refcnt==1 here */
- err = xdp_do_redirect(rq->netdev, &xdp, prog);
- if (!err) {
- __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
- rq->xdpsq.db.redirect_flush = true;
- mlx5e_page_dma_unmap(rq, di);
- }
- return true;
- default:
- bpf_warn_invalid_xdp_action(act);
- case XDP_ABORTED:
- trace_xdp_exception(rq->netdev, prog, act);
- case XDP_DROP:
- rq->stats->xdp_drop++;
- return true;
- }
-}
-
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
@@ -1105,7 +986,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
@@ -1147,7 +1028,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
@@ -1218,6 +1099,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
frag_size, DMA_FROM_DEVICE);
+ prefetchw(va); /* xdp_frame data area */
prefetch(data);
rcu_read_lock();
@@ -1261,7 +1143,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
if (unlikely(mpwrq_is_filler_cqe(cqe))) {
- rq->stats->mpwqe_filler++;
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
goto mpwrq_cqe_out;
}
@@ -1281,7 +1166,7 @@ mpwrq_cqe_out:
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
- mlx5e_free_rx_mpwqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
@@ -1317,14 +1202,14 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
- if (xdpsq->db.doorbell) {
+ if (xdpsq->doorbell) {
mlx5e_xmit_xdp_doorbell(xdpsq);
- xdpsq->db.doorbell = false;
+ xdpsq->doorbell = false;
}
- if (xdpsq->db.redirect_flush) {
+ if (xdpsq->redirect_flush) {
xdp_do_flush_map();
- xdpsq->db.redirect_flush = false;
+ xdpsq->redirect_flush = false;
}
mlx5_cqwq_update_db_record(&cq->wq);
@@ -1335,78 +1220,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
return work_done;
}
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
-{
- struct mlx5e_xdpsq *sq;
- struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
- u16 sqcc;
- int i;
-
- sq = container_of(cq, struct mlx5e_xdpsq, cq);
-
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return false;
-
- cqe = mlx5_cqwq_get_cqe(&cq->wq);
- if (!cqe)
- return false;
-
- rq = container_of(sq, struct mlx5e_rq, xdpsq);
-
- /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
- * otherwise a cq overrun may occur
- */
- sqcc = sq->cc;
-
- i = 0;
- do {
- u16 wqe_counter;
- bool last_wqe;
-
- mlx5_cqwq_pop(&cq->wq);
-
- wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
- do {
- struct mlx5e_dma_info *di;
- u16 ci;
-
- last_wqe = (sqcc == wqe_counter);
-
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- di = &sq->db.di[ci];
-
- sqcc++;
- /* Recycle RX page */
- mlx5e_page_release(rq, di, true);
- } while (!last_wqe);
- } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
-
- mlx5_cqwq_update_db_record(&cq->wq);
-
- /* ensure cq space is freed before enabling more cqes */
- wmb();
-
- sq->cc = sqcc;
- return (i == MLX5E_TX_CQ_POLL_BUDGET);
-}
-
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
-{
- struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
- struct mlx5e_dma_info *di;
- u16 ci;
-
- while (sq->cc != sq->pc) {
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
- di = &sq->db.di[ci];
- sq->cc++;
-
- mlx5e_page_release(rq, di, false);
- }
-}
-
#ifdef CONFIG_MLX5_CORE_IPOIB
#define MLX5_IB_GRH_DGID_OFFSET 24
@@ -1508,7 +1321,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
mlx5_wq_cyc_pop(wq);
}
@@ -1531,19 +1344,19 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (unlikely(!skb)) {
/* a DROP, save the page-reuse checks */
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
goto wq_cyc_pop;
}
- skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
+ skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
if (unlikely(!skb)) {
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
goto wq_cyc_pop;
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 4d316cc9b008..35ded91203f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -74,7 +74,7 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv)
if (!netif_carrier_ok(priv->netdev))
return 1;
- port_state = mlx5_query_vport_state(priv->mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+ port_state = mlx5_query_vport_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0);
return port_state == VPORT_STATE_UP ? 0 : 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1646859974ce..12fdf5c92b67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -44,6 +44,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
@@ -58,8 +59,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
@@ -67,10 +71,17 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@ -80,6 +91,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
};
@@ -118,6 +134,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
+ struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
+ struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
int j;
@@ -131,11 +149,15 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
- s->rx_xdp_drop += rq_stats->xdp_drop;
- s->rx_xdp_tx += rq_stats->xdp_tx;
- s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_redirect += rq_stats->xdp_redirect;
+ s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
+ s->rx_xdp_tx_full += xdpsq_stats->full;
+ s->rx_xdp_tx_err += xdpsq_stats->err;
+ s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
s->rx_wqe_err += rq_stats->wqe_err;
- s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
+ s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -145,7 +167,17 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
- s->ch_eq_rearm += ch_stats->eq_rearm;
+ s->rx_congst_umr += rq_stats->congst_umr;
+ s->ch_events += ch_stats->events;
+ s->ch_poll += ch_stats->poll;
+ s->ch_arm += ch_stats->arm;
+ s->ch_aff_change += ch_stats->aff_change;
+ s->ch_eq_rearm += ch_stats->eq_rearm;
+ /* xdp redirect */
+ s->tx_xdp_xmit += xdpsq_red_stats->xmit;
+ s->tx_xdp_full += xdpsq_red_stats->full;
+ s->tx_xdp_err += xdpsq_red_stats->err;
+ s->tx_xdp_cqes += xdpsq_red_stats->cqes;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
@@ -157,8 +189,10 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+ s->tx_nop += sq_stats->nop;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
+ s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_cqe_err += sq_stats->cqe_err;
s->tx_recover += sq_stats->recover;
@@ -170,6 +204,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
#endif
+ s->tx_cqes += sq_stats->cqes;
}
}
@@ -1106,13 +1141,13 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
@@ -1122,6 +1157,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
};
static const struct counter_desc sq_stats_desc[] = {
@@ -1140,16 +1176,37 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
};
+static const struct counter_desc rq_xdpsq_stats_desc[] = {
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
+};
+
+static const struct counter_desc xdpsq_stats_desc[] = {
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
+};
+
static const struct counter_desc ch_stats_desc[] = {
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
};
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
+#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
@@ -1158,7 +1215,9 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
- (NUM_SQ_STATS * max_nch * priv->max_opened_tc);
+ (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
+ (NUM_RQ_XDPSQ_STATS * max_nch) +
+ (NUM_XDPSQ_STATS * max_nch);
}
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
@@ -1172,9 +1231,14 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ch_stats_desc[j].format, i);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
+ for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_xdpsq_stats_desc[j].format, i);
+ }
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
@@ -1183,6 +1247,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sq_stats_desc[j].format,
priv->channel_tc2txq[i][tc]);
+ for (i = 0; i < max_nch; i++)
+ for (j = 0; j < NUM_XDPSQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ xdpsq_stats_desc[j].format, i);
+
return idx;
}
@@ -1198,11 +1267,16 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
ch_stats_desc, j);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq_stats_desc, j);
+ for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
+ rq_xdpsq_stats_desc, j);
+ }
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
@@ -1211,6 +1285,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
sq_stats_desc, j);
+ for (i = 0; i < max_nch; i++)
+ for (j = 0; j < NUM_XDPSQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
+ xdpsq_stats_desc, j);
+
return idx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 643153bb3607..a4c035aedd46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -44,6 +44,8 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc {
@@ -61,6 +63,7 @@ struct mlx5e_sw_stats {
u64 tx_tso_inner_packets;
u64 tx_tso_inner_bytes;
u64 tx_added_vlan_packets;
+ u64 tx_nop;
u64 rx_lro_packets;
u64 rx_lro_bytes;
u64 rx_removed_vlan_packets;
@@ -69,8 +72,11 @@ struct mlx5e_sw_stats {
u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
- u64 rx_xdp_tx;
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_full;
+ u64 rx_xdp_tx_err;
+ u64 rx_xdp_tx_cqe;
u64 tx_csum_none;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
@@ -78,10 +84,17 @@ struct mlx5e_sw_stats {
u64 tx_queue_dropped;
u64 tx_xmit_more;
u64 tx_recover;
+ u64 tx_cqes;
u64 tx_queue_wake;
+ u64 tx_udp_seg_rem;
u64 tx_cqe_err;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_full;
+ u64 tx_xdp_err;
+ u64 tx_xdp_cqes;
u64 rx_wqe_err;
- u64 rx_mpwqe_filler;
+ u64 rx_mpwqe_filler_cqes;
+ u64 rx_mpwqe_filler_strides;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
@@ -91,6 +104,11 @@ struct mlx5e_sw_stats {
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
+ u64 rx_congst_umr;
+ u64 ch_events;
+ u64 ch_poll;
+ u64 ch_arm;
+ u64 ch_aff_change;
u64 ch_eq_rearm;
#ifdef CONFIG_MLX5_EN_TLS
@@ -168,10 +186,10 @@ struct mlx5e_rq_stats {
u64 lro_bytes;
u64 removed_vlan_packets;
u64 xdp_drop;
- u64 xdp_tx;
- u64 xdp_tx_full;
+ u64 xdp_redirect;
u64 wqe_err;
- u64 mpwqe_filler;
+ u64 mpwqe_filler_cqes;
+ u64 mpwqe_filler_strides;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
@@ -181,6 +199,7 @@ struct mlx5e_rq_stats {
u64 cache_empty;
u64 cache_busy;
u64 cache_waive;
+ u64 congst_umr;
};
struct mlx5e_sq_stats {
@@ -196,6 +215,7 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+ u64 udp_seg_rem;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_ooo;
u64 tls_resync_bytes;
@@ -206,11 +226,24 @@ struct mlx5e_sq_stats {
u64 dropped;
u64 recover;
/* dirtied @completion */
- u64 wake ____cacheline_aligned_in_smp;
+ u64 cqes ____cacheline_aligned_in_smp;
+ u64 wake;
u64 cqe_err;
};
+struct mlx5e_xdpsq_stats {
+ u64 xmit;
+ u64 full;
+ u64 err;
+ /* dirtied @completion */
+ u64 cqes ____cacheline_aligned_in_smp;
+};
+
struct mlx5e_ch_stats {
+ u64 events;
+ u64 poll;
+ u64 arm;
+ u64 aff_change;
u64 eq_rearm;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index dfbcda0d0e08..9131a1376e7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -50,7 +50,7 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "vxlan.h"
+#include "lib/vxlan.h"
#include "fs_core.h"
#include "en/port.h"
@@ -1032,10 +1032,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
* dst ip pair
*/
n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
- if (!n) {
- WARN(1, "The neighbour already freed\n");
+ if (!n)
return;
- }
neigh_event_send(n, NULL);
neigh_release(n);
@@ -1126,16 +1124,12 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- struct net_device *up_dev = uplink_rpriv->netdev;
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
goto vxlan_match_offload_err;
- if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
else {
@@ -1213,6 +1207,26 @@ vxlan_match_offload_err:
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_dissector_key_ip *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ f->key);
+ struct flow_dissector_key_ip *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+ }
+
/* Enforce DMAC when offloading incoming tunneled flows.
* Flow counters require a match on the DMAC.
*/
@@ -1237,6 +1251,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
u16 addr_type = 0;
u8 ip_proto = 0;
@@ -1247,6 +1265,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
@@ -1256,7 +1275,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
- BIT(FLOW_DISSECTOR_KEY_IP))) {
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
@@ -1327,9 +1347,18 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
- if (mask->vlan_id || mask->vlan_priority) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
+ if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ svlan_tag, 1);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ cvlan_tag, 1);
+ }
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
@@ -1341,6 +1370,41 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CVLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CVLAN,
+ f->mask);
+ if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
+ if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_svlan_tag, 1);
+ MLX5_SET(fte_match_set_misc, misc_v,
+ outer_second_svlan_tag, 1);
+ } else {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_cvlan_tag, 1);
+ MLX5_SET(fte_match_set_misc, misc_v,
+ outer_second_cvlan_tag, 1);
+ }
+
+ MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
+ mask->vlan_id);
+ MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
+ key->vlan_id);
+ MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
+ mask->vlan_priority);
+ MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
+ key->vlan_priority);
+
+ *match_level = MLX5_MATCH_L2;
+ }
+ }
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
skb_flow_dissector_target(f->dissector,
@@ -2082,7 +2146,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi4 *fl4,
struct neighbour **out_n,
- int *out_ttl)
+ u8 *out_ttl)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -2106,7 +2170,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
else
*out_dev = rt->dst.dev;
- *out_ttl = ip4_dst_hoplimit(&rt->dst);
+ if (!(*out_ttl))
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
if (!n)
@@ -2135,7 +2200,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi6 *fl6,
struct neighbour **out_n,
- int *out_ttl)
+ u8 *out_ttl)
{
struct neighbour *n = NULL;
struct dst_entry *dst;
@@ -2150,7 +2215,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
if (ret < 0)
return ret;
- *out_ttl = ip6_dst_hoplimit(dst);
+ if (!(*out_ttl))
+ *out_ttl = ip6_dst_hoplimit(dst);
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
@@ -2174,7 +2240,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
static void gen_vxlan_header_ipv4(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
- int ttl,
+ u8 tos, u8 ttl,
__be32 daddr,
__be32 saddr,
__be16 udp_dst_port,
@@ -2194,6 +2260,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev,
ip->daddr = daddr;
ip->saddr = saddr;
+ ip->tos = tos;
ip->ttl = ttl;
ip->protocol = IPPROTO_UDP;
ip->version = 0x4;
@@ -2207,7 +2274,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev,
static void gen_vxlan_header_ipv6(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
- int ttl,
+ u8 tos, u8 ttl,
struct in6_addr *daddr,
struct in6_addr *saddr,
__be16 udp_dst_port,
@@ -2224,7 +2291,7 @@ static void gen_vxlan_header_ipv6(struct net_device *out_dev,
ether_addr_copy(eth->h_source, out_dev->dev_addr);
eth->h_proto = htons(ETH_P_IPV6);
- ip6_flow_hdr(ip6h, 0, 0);
+ ip6_flow_hdr(ip6h, tos, 0);
/* the HW fills up ipv6 payload len */
ip6h->nexthdr = IPPROTO_UDP;
ip6h->hop_limit = ttl;
@@ -2246,9 +2313,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
+ u8 nud_state, tos, ttl;
char *encap_header;
- int ttl, err;
- u8 nud_state;
+ int err;
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -2269,6 +2336,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
err = -EOPNOTSUPP;
goto free_encap;
}
+
+ tos = tun_key->tos;
+ ttl = tun_key->ttl;
+
fl4.flowi4_tos = tun_key->tos;
fl4.daddr = tun_key->u.ipv4.dst;
fl4.saddr = tun_key->u.ipv4.src;
@@ -2303,7 +2374,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
gen_vxlan_header_ipv4(out_dev, encap_header,
- ipv4_encap_size, e->h_dest, ttl,
+ ipv4_encap_size, e->h_dest, tos, ttl,
fl4.daddr,
fl4.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
@@ -2351,9 +2422,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
+ u8 nud_state, tos, ttl;
char *encap_header;
- int err, ttl = 0;
- u8 nud_state;
+ int err;
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -2375,6 +2446,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
goto free_encap;
}
+ tos = tun_key->tos;
+ ttl = tun_key->ttl;
+
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
@@ -2409,7 +2483,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
gen_vxlan_header_ipv6(out_dev, encap_header,
- ipv6_encap_size, e->h_dest, ttl,
+ ipv6_encap_size, e->h_dest, tos, ttl,
&fl6.daddr,
&fl6.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
@@ -2455,11 +2529,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
- REP_ETH);
- struct net_device *up_dev = uplink_rpriv->netdev;
unsigned short family = ip_tunnel_info_af(tun_info);
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
struct mlx5e_encap_entry *e;
@@ -2479,7 +2549,7 @@ vxlan_encap_offload_err:
return -EOPNOTSUPP;
}
- if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
@@ -2535,6 +2605,56 @@ out_err:
return err;
}
+static int parse_tc_vlan_action(struct mlx5e_priv *priv,
+ const struct tc_action *a,
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action)
+{
+ u8 vlan_idx = attr->total_vlan;
+
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
+ return -EOPNOTSUPP;
+
+ if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH))
+ return -EOPNOTSUPP;
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
+ } else {
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ }
+ } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
+ attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
+ attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
+ attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
+ if (!attr->vlan_proto[vlan_idx])
+ attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
+
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH))
+ return -EOPNOTSUPP;
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ } else {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
+ (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
+ tcf_vlan_push_prio(a)))
+ return -EOPNOTSUPP;
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ }
+ } else { /* action is TCA_VLAN_ACT_MODIFY */
+ return -EOPNOTSUPP;
+ }
+
+ attr->total_vlan = vlan_idx + 1;
+
+ return 0;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -2546,6 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
LIST_HEAD(actions);
bool encap = false;
u32 action = 0;
+ int err;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
@@ -2562,8 +2683,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_pedit(a)) {
- int err;
-
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
parse_attr);
if (err)
@@ -2630,23 +2749,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_vlan(a)) {
- if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
- action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- attr->vlan_vid = tcf_vlan_push_vid(a);
- if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
- attr->vlan_prio = tcf_vlan_push_prio(a);
- attr->vlan_proto = tcf_vlan_push_proto(a);
- if (!attr->vlan_proto)
- attr->vlan_proto = htons(ETH_P_8021Q);
- } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
- tcf_vlan_push_prio(a)) {
- return -EOPNOTSUPP;
- }
- } else { /* action is TCA_VLAN_ACT_MODIFY */
- return -EOPNOTSUPP;
- }
+ err = parse_tc_vlan_action(priv, a, attr, &action);
+
+ if (err)
+ return err;
+
attr->mirror_count = attr->out_count;
continue;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f29deb44bf3b..ae73ea992845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -66,22 +66,21 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
}
}
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
+{
+ return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
+}
+
static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{
- u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
-
- sq->db.dma_fifo[i].addr = addr;
- sq->db.dma_fifo[i].size = size;
- sq->db.dma_fifo[i].type = map_type;
- sq->dma_fifo_pc++;
-}
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
-static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
-{
- return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
+ dma->addr = addr;
+ dma->size = size;
+ dma->type = map_type;
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
@@ -111,10 +110,11 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int channel_ix = fallback(dev, skb);
+ int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;
@@ -228,7 +228,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
+ else
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
stats->tso_packets++;
stats->tso_bytes += skb->len - ihs;
}
@@ -443,12 +446,11 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
-#ifdef CONFIG_MLX5_ACCEL
/* might send skbs and update wqe and pi */
skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
if (unlikely(!skb))
return NETDEV_TX_OK;
-#endif
+
return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
@@ -466,6 +468,7 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
+ struct mlx5e_sq_stats *stats;
struct mlx5e_txqsq *sq;
struct mlx5_cqe64 *cqe;
u32 dma_fifo_cc;
@@ -483,6 +486,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (!cqe)
return false;
+ stats = sq->stats;
+
npkts = 0;
nbytes = 0;
@@ -511,7 +516,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
queue_work(cq->channel->priv->wq,
&sq->recover.recover_work);
}
- sq->stats->cqe_err++;
+ stats->cqe_err++;
}
do {
@@ -556,6 +561,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ stats->cqes += i;
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
@@ -571,7 +578,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
MLX5E_SQ_STOP_ROOM) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
- sq->stats->wake++;
+ stats->wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 1b17f682693b..85d517360157 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -32,6 +32,7 @@
#include <linux/irq.h>
#include "en.h"
+#include "en/xdp.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
@@ -74,13 +75,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
+ struct mlx5e_ch_stats *ch_stats = c->stats;
bool busy = false;
int work_done = 0;
int i;
+ ch_stats->poll++;
+
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
@@ -94,6 +100,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
+ ch_stats->aff_change++;
if (budget && work_done == budget)
work_done--;
}
@@ -101,6 +108,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!napi_complete_done(napi, work_done)))
return work_done;
+ ch_stats->arm++;
+
for (i = 0; i < c->num_tc; i++) {
mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
@@ -110,6 +119,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
+ mlx5e_cq_arm(&c->xdpsq.cq);
return work_done;
}
@@ -118,8 +128,9 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
- cq->event_ctr++;
napi_schedule(cq->napi);
+ cq->event_ctr++;
+ cq->channel->stats->events++;
}
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 406c23862f5f..48864f4988a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -40,6 +40,8 @@
#include "mlx5_core.h"
#include "fpga/core.h"
#include "eswitch.h"
+#include "lib/clock.h"
+#include "diag/fw_tracer.h"
enum {
MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
@@ -168,6 +170,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
case MLX5_EVENT_TYPE_GENERAL_EVENT:
return "MLX5_EVENT_TYPE_GENERAL_EVENT";
+ case MLX5_EVENT_TYPE_DEVICE_TRACER:
+ return "MLX5_EVENT_TYPE_DEVICE_TRACER";
default:
return "Unrecognized event";
}
@@ -576,6 +580,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
case MLX5_EVENT_TYPE_GENERAL_EVENT:
general_event_handler(dev, eqe);
break;
+
+ case MLX5_EVENT_TYPE_DEVICE_TRACER:
+ mlx5_fw_tracer_event(dev, eqe);
+ break;
+
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -853,6 +862,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, temp_warn_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
+ if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 40dba9e8af92..2b252cde5cc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -246,7 +246,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
}
-static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
@@ -1469,7 +1469,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
return;
mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num,
vport->info.link_state);
mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
@@ -1582,9 +1582,9 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
esw_vport_disable_qos(esw, vport_num);
if (vport_num && esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num,
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ MLX5_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
esw_vport_destroy_drop_counters(vport);
@@ -1618,7 +1618,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw->mode = mode;
if (mode == SRIOV_LEGACY) {
- err = esw_create_legacy_fdb_table(esw, nvfs + 1);
+ err = esw_create_legacy_fdb_table(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -1736,7 +1736,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
struct mlx5_vport *vport = &esw->vports[vport_num];
vport->vport = vport_num;
- vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
@@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
evport = &esw->vports[vport];
err = mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport, link_state);
if (err) {
mlx5_core_warn(esw->dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b174da2884c5..c17bfcab517c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -38,6 +38,7 @@
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
#ifdef CONFIG_MLX5_ESWITCH
@@ -256,9 +257,10 @@ struct mlx5_esw_flow_attr {
int out_count;
int action;
- __be16 vlan_proto;
- u16 vlan_vid;
- u8 vlan_prio;
+ __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
+ u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
+ u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
+ u8 total_vlan;
bool vlan_handled;
u32 encap_id;
u32 mod_hdr_id;
@@ -282,10 +284,17 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags);
-static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev)
+static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
+ u8 vlan_depth)
{
- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
+ bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
+
+ if (vlan_depth == 1)
+ return ret;
+
+ return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
}
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f32e69170b30..f72b5c9dcfe9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -66,13 +66,18 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
- if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto);
- flow_act.vlan[0].vid = attr->vlan_vid;
- flow_act.vlan[0].prio = attr->vlan_prio;
+ flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
+ flow_act.vlan[0].vid = attr->vlan_vid[0];
+ flow_act.vlan[0].prio = attr->vlan_prio[0];
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
+ flow_act.vlan[1].vid = attr->vlan_vid[1];
+ flow_act.vlan[1].prio = attr->vlan_prio[1];
+ }
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
@@ -266,7 +271,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
/* protects against (1) setting rules with different vlans to push and
* (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
*/
- if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
+ if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
goto out_notsupp;
return 0;
@@ -284,7 +289,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
int err = 0;
/* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -324,11 +329,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (vport->vlan_refcount)
goto skip_set_push;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
- vport->vlan = attr->vlan_vid;
+ vport->vlan = attr->vlan_vid[0];
skip_set_push:
vport->vlan_refcount++;
}
@@ -347,7 +352,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int err = 0;
/* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
if (!attr->vlan_handled)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index c9736238604a..5cf5f2a9d51f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
void *ptr)
{
+ unsigned long flags;
int ret;
/* TLS metadata format is 1 byte for syndrome followed
@@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
idr_preload(GFP_KERNEL);
- spin_lock_irq(idr_spinlock);
+ spin_lock_irqsave(idr_spinlock, flags);
ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
- spin_unlock_irq(idr_spinlock);
+ spin_unlock_irqrestore(idr_spinlock, flags);
idr_preload_end();
return ret;
@@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr,
spin_unlock_irqrestore(idr_spinlock, flags);
}
+static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf, u8 status)
+{
+ kfree(buf);
+}
+
struct mlx5_teardown_stream_context {
struct mlx5_fpga_tls_command_context cmd;
u32 swid;
@@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d",
syndrome);
- else
+ else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
- &fdev->tls->idr_spinlock,
+ &fdev->tls->tx_idr_spinlock,
+ ctx->swid);
+ else
+ mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
+ &fdev->tls->rx_idr_spinlock,
ctx->swid);
}
mlx5_fpga_tls_put_command_ctx(cmd);
@@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
MLX5_GET(tls_flow, flow, direction_sx));
}
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
+ void *flow;
+ void *cmd;
+ int ret;
+
+ buf = kzalloc(size, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ cmd = (buf + 1);
+
+ rcu_read_lock();
+ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+ rcu_read_unlock();
+ mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+
+ MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
+ MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
+ MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
+ MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
+
+ buf->sg[0].data = cmd;
+ buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
+ buf->complete = mlx_tls_kfree_complete;
+
+ ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+
+ return ret;
+}
+
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags)
{
@@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
mlx5_fpga_tls_teardown_completion);
}
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags)
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags, bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow;
rcu_read_lock();
- flow = idr_find(&tls->tx_idr, swid);
+ if (direction_sx)
+ flow = idr_find(&tls->tx_idr, swid);
+ else
+ flow = idr_find(&tls->rx_idr, swid);
+
rcu_read_unlock();
if (!flow) {
@@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
* the command context because we might not have received
* the tx completion yet.
*/
- mlx5_fpga_tls_del_tx_flow(fdev->mdev,
- MLX5_GET(tls_cmd, tls_cmd, swid),
- GFP_ATOMIC);
+ mlx5_fpga_tls_del_flow(fdev->mdev,
+ MLX5_GET(tls_cmd, tls_cmd, swid),
+ GFP_ATOMIC,
+ MLX5_GET(tls_cmd, tls_cmd,
+ direction_sx));
}
mlx5_fpga_tls_put_command_ctx(cmd);
@@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
if (err)
goto error;
- if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 |
- MLX5_ACCEL_TLS_AES_GCM128))) {
+ if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
err = -ENOTSUPP;
goto error;
}
@@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
INIT_LIST_HEAD(&tls->pending_cmds);
idr_init(&tls->tx_idr);
- spin_lock_init(&tls->idr_spinlock);
+ idr_init(&tls->rx_idr);
+ spin_lock_init(&tls->tx_idr_spinlock);
+ spin_lock_init(&tls->rx_idr_spinlock);
fdev->tls = tls;
return 0;
@@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
return 0;
}
-static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info, u32 swid,
- u32 tcp_sn)
+static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 swid, u32 tcp_sn)
{
u32 caps = mlx5_fpga_tls_device_caps(mdev);
struct mlx5_setup_stream_context *ctx;
@@ -533,30 +586,42 @@ out:
return ret;
}
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
int ret = -ENOMEM;
u32 swid;
- ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow);
+ if (direction_sx)
+ ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock, flow);
+ else
+ ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock, flow);
+
if (ret < 0)
return ret;
swid = ret;
- MLX5_SET(tls_flow, flow, direction_sx, 1);
+ MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
- ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
- start_offload_tcp_sn);
+ ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
+ start_offload_tcp_sn);
if (ret && ret != -EINTR)
goto free_swid;
*p_swid = swid;
return 0;
free_swid:
- mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid);
+ if (direction_sx)
+ mlx5_fpga_tls_release_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock, swid);
+ else
+ mlx5_fpga_tls_release_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock, swid);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
index 800a214e4e49..3b2e37bf76fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
@@ -46,15 +46,18 @@ struct mlx5_fpga_tls {
struct mlx5_fpga_conn *conn;
struct idr tx_idr;
- spinlock_t idr_spinlock; /* protects the IDR */
+ struct idr rx_idr;
+ spinlock_t tx_idr_spinlock; /* protects the IDR */
+ spinlock_t rx_idr_spinlock; /* protects the IDR */
};
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid);
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx);
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags);
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags, bool direction_sx);
bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
@@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
return mdev->fpga->tls->caps;
}
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn);
+
#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 56cee33cfb83..f418541af7cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1803,7 +1803,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
- int dest_num)
+ int num_dest)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_destination gen_dest = {};
@@ -1816,7 +1816,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP);
- if (dest_num)
+ if (num_dest)
return ERR_PTR(-EINVAL);
mutex_lock(&root->chain_lock);
next_ft = find_next_chained_ft(prio);
@@ -1824,7 +1824,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
gen_dest.ft = next_ft;
dest = &gen_dest;
- dest_num = 1;
+ num_dest = 1;
flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else {
mutex_unlock(&root->chain_lock);
@@ -1832,7 +1832,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
}
}
- handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
+ handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(handle) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index a8eecedd46c2..02e2e4575e4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,8 +33,15 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+
+static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
+{
+ return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
+}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
@@ -48,4 +55,21 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(nsec);
}
+#else
+static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) {}
+
+static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
+{
+ return -1;
+}
+
+static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
new file mode 100644
index 000000000000..9a8fd762167b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "vxlan.h"
+
+struct mlx5_vxlan {
+ struct mlx5_core_dev *mdev;
+ spinlock_t lock; /* protect vxlan table */
+ /* max_num_ports is usuallly 4, 16 buckets is more than enough */
+ DECLARE_HASHTABLE(htable, 4);
+ int num_ports;
+ struct mutex sync_lock; /* sync add/del port HW operations */
+};
+
+struct mlx5_vxlan_port {
+ struct hlist_node hlist;
+ atomic_t refcount;
+ u16 udp_port;
+};
+
+static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
+}
+
+static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
+
+ MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
+ MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
+
+ MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static struct mlx5_vxlan_port*
+mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+
+ hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
+ if (vxlanp->udp_port == port)
+ return vxlanp;
+ }
+
+ return NULL;
+}
+
+struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+
+ if (!mlx5_vxlan_allowed(vxlan))
+ return NULL;
+
+ spin_lock_bh(&vxlan->lock);
+ vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
+ spin_unlock_bh(&vxlan->lock);
+
+ return vxlanp;
+}
+
+int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ int ret = -ENOSPC;
+
+ vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
+ if (vxlanp) {
+ atomic_inc(&vxlanp->refcount);
+ return 0;
+ }
+
+ mutex_lock(&vxlan->sync_lock);
+ if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
+ mlx5_core_info(vxlan->mdev,
+ "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
+ port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
+ if (ret)
+ goto unlock;
+
+ vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
+ if (!vxlanp) {
+ ret = -ENOMEM;
+ goto err_delete_port;
+ }
+
+ vxlanp->udp_port = port;
+ atomic_set(&vxlanp->refcount, 1);
+
+ spin_lock_bh(&vxlan->lock);
+ hash_add(vxlan->htable, &vxlanp->hlist, port);
+ spin_unlock_bh(&vxlan->lock);
+
+ vxlan->num_ports++;
+ mutex_unlock(&vxlan->sync_lock);
+ return 0;
+
+err_delete_port:
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+
+unlock:
+ mutex_unlock(&vxlan->sync_lock);
+ return ret;
+}
+
+int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ bool remove = false;
+ int ret = 0;
+
+ mutex_lock(&vxlan->sync_lock);
+
+ spin_lock_bh(&vxlan->lock);
+ vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
+ if (!vxlanp) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ if (atomic_dec_and_test(&vxlanp->refcount)) {
+ hash_del(&vxlanp->hlist);
+ remove = true;
+ }
+
+out_unlock:
+ spin_unlock_bh(&vxlan->lock);
+
+ if (remove) {
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+ kfree(vxlanp);
+ vxlan->num_ports--;
+ }
+
+ mutex_unlock(&vxlan->sync_lock);
+
+ return ret;
+}
+
+struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_vxlan *vxlan;
+
+ if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
+ return ERR_PTR(-ENOTSUPP);
+
+ vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
+ if (!vxlan)
+ return ERR_PTR(-ENOMEM);
+
+ vxlan->mdev = mdev;
+ mutex_init(&vxlan->sync_lock);
+ spin_lock_init(&vxlan->lock);
+ hash_init(vxlan->htable);
+
+ /* Hardware adds 4789 by default */
+ mlx5_vxlan_add_port(vxlan, 4789);
+
+ return vxlan;
+}
+
+void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ struct hlist_node *tmp;
+ int bkt;
+
+ if (!mlx5_vxlan_allowed(vxlan))
+ return;
+
+ /* Lockless since we are the only hash table consumers*/
+ hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
+ hash_del(&vxlanp->hlist);
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
+ kfree(vxlanp);
+ }
+
+ kfree(vxlan);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
index 5ef6ae7d568a..8fb0eb08fa6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
@@ -33,31 +33,32 @@
#define __MLX5_VXLAN_H__
#include <linux/mlx5/driver.h>
-#include "en.h"
-struct mlx5e_vxlan {
- atomic_t refcount;
- u16 udp_port;
-};
+struct mlx5_vxlan;
+struct mlx5_vxlan_port;
-struct mlx5e_vxlan_work {
- struct work_struct work;
- struct mlx5e_priv *priv;
- sa_family_t sa_family;
- u16 port;
-};
-
-static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
+static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
{
- return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
- mlx5_core_is_pf(mdev));
+ /* not allowed reason is encoded in vxlan pointer as error,
+ * on mlx5_vxlan_create
+ */
+ return !IS_ERR_OR_NULL(vxlan);
}
-void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
-
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
- u16 port, int add);
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
+#if IS_ENABLED(CONFIG_VXLAN)
+struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev);
+void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
+int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
+int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
+struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
+#else
+static inline struct mlx5_vxlan*
+mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
+static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+static inline struct mx5_vxlan_port*
+mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return NULL; }
+#endif
#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f9b950e1bd85..cf3e4a659052 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,6 +62,8 @@
#include "accel/ipsec.h"
#include "accel/tls.h"
#include "lib/clock.h"
+#include "lib/vxlan.h"
+#include "diag/fw_tracer.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -321,7 +323,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_eq_table *table = &priv->eq_table;
- int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+ MLX5_CAP_GEN(dev, max_num_eqs) :
+ 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int err;
@@ -960,6 +964,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_clock(dev);
+ dev->vxlan = mlx5_vxlan_create(dev);
+
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -990,6 +996,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_sriov_cleanup;
}
+ dev->tracer = mlx5_fw_tracer_create(dev);
+
return 0;
err_sriov_cleanup:
@@ -1001,6 +1009,7 @@ err_mpfs_cleanup:
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
err_tables_cleanup:
+ mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
@@ -1015,11 +1024,13 @@ out:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_fw_tracer_destroy(dev->tracer);
mlx5_fpga_cleanup(dev);
mlx5_sriov_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
+ mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
@@ -1167,10 +1178,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_put_uars;
}
+ err = mlx5_fw_tracer_init(dev->tracer);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init FW tracer\n");
+ goto err_fw_tracer;
+ }
+
err = alloc_comp_eqs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
- goto err_stop_eqs;
+ goto err_comp_eqs;
}
err = mlx5_irq_set_affinity_hints(dev);
@@ -1252,7 +1269,10 @@ err_fpga_start:
err_affinity_hints:
free_comp_eqs(dev);
-err_stop_eqs:
+err_comp_eqs:
+ mlx5_fw_tracer_cleanup(dev->tracer);
+
+err_fw_tracer:
mlx5_stop_eqs(dev);
err_put_uars:
@@ -1320,6 +1340,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_fpga_device_stop(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
+ mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_stop_eqs(dev);
mlx5_put_uars_page(dev, priv->uar);
mlx5_free_irq_vectors(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 49955117ae36..b4134fa0bba3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -99,7 +99,6 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault);
-void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 7eecd5b07bb1..b02af317c125 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -62,17 +62,6 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
return MLX5_GET(query_vport_state_out, out, state);
}
-EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
-
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
-{
- u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
-
- _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
-
- return MLX5_GET(query_vport_state_out, out, admin_state);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state)
@@ -90,7 +79,6 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
-EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
u32 *out, int outlen)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
deleted file mode 100644
index 2f74953e4561..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-#include "vxlan.h"
-
-void mlx5e_vxlan_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-
- spin_lock_init(&vxlan_db->lock);
- INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC);
-}
-
-static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
-
- MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
- MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
- MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
-
- MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
- MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
- MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- struct mlx5e_vxlan *vxlan;
-
- spin_lock_bh(&vxlan_db->lock);
- vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- spin_unlock_bh(&vxlan_db->lock);
-
- return vxlan;
-}
-
-static void mlx5e_vxlan_add_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- u16 port = vxlan_work->port;
- struct mlx5e_vxlan *vxlan;
- int err;
-
- mutex_lock(&priv->state_lock);
- vxlan = mlx5e_vxlan_lookup_port(priv, port);
- if (vxlan) {
- atomic_inc(&vxlan->refcount);
- goto free_work;
- }
-
- if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
- goto free_work;
-
- vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
- if (!vxlan)
- goto err_delete_port;
-
- vxlan->udp_port = port;
- atomic_set(&vxlan->refcount, 1);
-
- spin_lock_bh(&vxlan_db->lock);
- err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
- spin_unlock_bh(&vxlan_db->lock);
- if (err)
- goto err_free;
-
- goto free_work;
-
-err_free:
- kfree(vxlan);
-err_delete_port:
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-free_work:
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- u16 port = vxlan_work->port;
- struct mlx5e_vxlan *vxlan;
- bool remove = false;
-
- mutex_lock(&priv->state_lock);
- spin_lock_bh(&vxlan_db->lock);
- vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- if (!vxlan)
- goto out_unlock;
-
- if (atomic_dec_and_test(&vxlan->refcount)) {
- radix_tree_delete(&vxlan_db->tree, port);
- remove = true;
- }
-
-out_unlock:
- spin_unlock_bh(&vxlan_db->lock);
-
- if (remove) {
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- kfree(vxlan);
- }
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
- u16 port, int add)
-{
- struct mlx5e_vxlan_work *vxlan_work;
-
- vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
- if (!vxlan_work)
- return;
-
- if (add)
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
- else
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
-
- vxlan_work->priv = priv;
- vxlan_work->port = port;
- vxlan_work->sa_family = sa_family;
- queue_work(priv->wq, &vxlan_work->work);
-}
-
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- struct mlx5e_vxlan *vxlan;
- unsigned int port = 0;
-
- /* Lockless since we are the only radix-tree consumers, wq is disabled */
- while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
- port = vxlan->udp_port;
- radix_tree_delete(&vxlan_db->tree, port);
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- kfree(vxlan);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 0b47126815b6..2bd4c3184eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -229,6 +229,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
return !wq->cur_sz;
}
+static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq)
+{
+ return wq->fbc.sz_m1 - wq->cur_sz;
+}
+
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
{
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 82827a8d3d67..8a291eb36c64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
depends on IPV6 || IPV6=n
depends on NET_IPGRE || NET_IPGRE=n
depends on IPV6_GRE || IPV6_GRE=n
+ select GENERIC_ALLOCATOR
select PARMAN
select MLXFW
default m
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0cadcabfe86f..68fa44a41485 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -15,11 +15,18 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
- spectrum_kvdl.o spectrum_acl_tcam.o \
- spectrum_acl.o spectrum_flower.o \
- spectrum_cnt.o spectrum_fid.o \
- spectrum_ipip.o spectrum_acl_flex_actions.o \
- spectrum_mr.o spectrum_mr_tcam.o \
+ spectrum1_kvdl.o spectrum2_kvdl.o \
+ spectrum_kvdl.o \
+ spectrum_acl_tcam.o spectrum_acl_ctcam.o \
+ spectrum_acl_atcam.o spectrum_acl_erp.o \
+ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
+ spectrum_acl.o \
+ spectrum_flower.o spectrum_cnt.o \
+ spectrum_fid.o spectrum_ipip.o \
+ spectrum_acl_flex_actions.o \
+ spectrum_acl_flex_keys.o \
+ spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
+ spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 2bc48054b685..0772e4339b33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/cmd.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CMD_H
#define _MLXSW_CMD_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f9c724752a32..81533d7f395c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -759,15 +726,6 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
-static void mlxsw_core_driver_put(const char *kind)
-{
- struct mlxsw_driver *mlxsw_driver;
-
- spin_lock(&mlxsw_core_driver_list_lock);
- mlxsw_driver = __driver_find(kind);
- spin_unlock(&mlxsw_core_driver_list_lock);
-}
-
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
unsigned int count,
@@ -1115,7 +1073,6 @@ err_bus_init:
if (!reload)
devlink_free(devlink);
err_devlink_alloc:
- mlxsw_core_driver_put(device_kind);
return err;
}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
@@ -1123,7 +1080,6 @@ EXPORT_SYMBOL(mlxsw_core_bus_device_register);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
bool reload)
{
- const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (mlxsw_core->reload_fail)
@@ -1144,7 +1100,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
reload_fail:
devlink_free(devlink);
- mlxsw_core_driver_put(device_kind);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 552cfa29c2f7..655ddd204ab2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CORE_H
#define _MLXSW_CORE_H
@@ -362,6 +329,7 @@ struct mlxsw_fw_rev {
u16 major;
u16 minor;
u16 subminor;
+ u16 can_reset_minor;
};
struct mlxsw_bus_info {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index f6f6a568d66a..c51b2adfc1e1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
- * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -355,9 +324,24 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block->first_set = mlxsw_afa_set_create(true);
if (!block->first_set)
goto err_first_set_create;
- block->cur_set = block->first_set;
+
+ /* In case user instructs to have dummy first set, we leave it
+ * empty here and create another, real, set right away.
+ */
+ if (mlxsw_afa->ops->dummy_first_set) {
+ block->cur_set = mlxsw_afa_set_create(false);
+ if (!block->cur_set)
+ goto err_second_set_create;
+ block->cur_set->prev = block->first_set;
+ block->first_set->next = block->cur_set;
+ } else {
+ block->cur_set = block->first_set;
+ }
+
return block;
+err_second_set_create:
+ mlxsw_afa_set_destroy(block->first_set);
err_first_set_create:
kfree(block);
return NULL;
@@ -419,11 +403,31 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_first_set);
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block)
{
- return block->first_set->kvdl_index;
+ return block->cur_set->ht_key.enc_actions;
}
-EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
+EXPORT_SYMBOL(mlxsw_afa_block_cur_set);
+
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
+{
+ /* First set is never in KVD linear. So the first set
+ * with valid KVD linear index is always the second one.
+ */
+ if (WARN_ON(!block->first_set->next))
+ return 0;
+ return block->first_set->next->kvdl_index;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index);
+
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity)
+{
+ u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block);
+
+ return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv,
+ kvdl_index, activity);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_activity_get);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
{
@@ -724,14 +728,17 @@ mlxsw_afa_vlan_pack(char *payload,
}
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
- u16 vid, u8 pcp, u8 et)
+ u16 vid, u8 pcp, u8 et,
+ struct netlink_ext_ack *extack)
{
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_VLAN_CODE,
MLXSW_AFA_VLAN_SIZE);
- if (IS_ERR(act))
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action");
return PTR_ERR(act);
+ }
mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
@@ -925,19 +932,23 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
int
mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
- const struct net_device *out_dev, bool ingress)
+ const struct net_device *out_dev, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_afa_mirror *mirror;
int err;
mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
ingress);
- if (IS_ERR(mirror))
+ if (IS_ERR(mirror)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create mirror action");
return PTR_ERR(mirror);
-
+ }
err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append mirror action");
goto err_append_allocated_mirror;
+ }
return 0;
@@ -987,23 +998,29 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
}
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port)
+ u8 local_port, bool in_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
u32 kvdl_index;
char *act;
int err;
- if (in_port)
+ if (in_port) {
+ NL_SET_ERR_MSG_MOD(extack, "Forwarding to ingress port is not supported");
return -EOPNOTSUPP;
+ }
fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
- if (IS_ERR(fwd_entry_ref))
+ if (IS_ERR(fwd_entry_ref)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create forward action");
return PTR_ERR(fwd_entry_ref);
+ }
kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
MLXSW_AFA_FORWARD_SIZE);
if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action");
err = PTR_ERR(act);
goto err_append_action;
}
@@ -1068,21 +1085,25 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 *p_counter_index)
+ u32 *p_counter_index,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_afa_counter *counter;
u32 counter_index;
int err;
counter = mlxsw_afa_counter_create(block);
- if (IS_ERR(counter))
+ if (IS_ERR(counter)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create count action");
return PTR_ERR(counter);
+ }
counter_index = counter->counter_index;
err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append count action");
goto err_append_allocated_counter;
-
+ }
if (p_counter_index)
*p_counter_index = counter_index;
return 0;
@@ -1125,13 +1146,16 @@ static inline void mlxsw_afa_virfwd_pack(char *payload,
mlxsw_afa_virfwd_fid_set(payload, fid);
}
-int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
+ struct netlink_ext_ack *extack)
{
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_VIRFWD_CODE,
MLXSW_AFA_VIRFWD_SIZE);
- if (IS_ERR(act))
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action");
return PTR_ERR(act);
+ }
mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 3a155d104384..0e3a59dda12e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H
#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H
@@ -45,6 +14,8 @@ struct mlxsw_afa_ops {
int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index,
char *enc_actions, bool is_first);
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
+ int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index,
+ bool *activity);
int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
@@ -54,6 +25,7 @@ struct mlxsw_afa_ops {
bool ingress, int *p_span_id);
void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
bool ingress);
+ bool dummy_first_set;
};
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -64,7 +36,9 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa);
void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block);
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
@@ -75,16 +49,21 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
u8 local_in_port,
const struct net_device *out_dev,
- bool ingress);
+ bool ingress,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port);
+ u8 local_port, bool in_port,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
- u16 vid, u8 pcp, u8 et);
+ u16 vid, u8 pcp, u8 et,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
u32 counter_index);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 *p_counter_index);
-int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
+ u32 *p_counter_index,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
bool rmid_valid, u32 kvdl_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index b32a00972e83..785bf01fe2be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -43,6 +12,7 @@
struct mlxsw_afk {
struct list_head key_info_list;
unsigned int max_blocks;
+ const struct mlxsw_afk_ops *ops;
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
};
@@ -69,8 +39,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
}
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
- const struct mlxsw_afk_block *blocks,
- unsigned int blocks_count)
+ const struct mlxsw_afk_ops *ops)
{
struct mlxsw_afk *mlxsw_afk;
@@ -79,8 +48,9 @@ struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
return NULL;
INIT_LIST_HEAD(&mlxsw_afk->key_info_list);
mlxsw_afk->max_blocks = max_blocks;
- mlxsw_afk->blocks = blocks;
- mlxsw_afk->blocks_count = blocks_count;
+ mlxsw_afk->ops = ops;
+ mlxsw_afk->blocks = ops->blocks;
+ mlxsw_afk->blocks_count = ops->blocks_count;
WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk));
return mlxsw_afk;
}
@@ -415,61 +385,76 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
}
EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
-static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item,
- const struct mlxsw_item *output_item,
- char *storage, char *output_indexed)
+static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output)
{
u32 value;
value = __mlxsw_item_get32(storage, storage_item, 0);
- __mlxsw_item_set32(output_indexed, output_item, 0, value);
+ __mlxsw_item_set32(output, output_item, 0, value);
}
-static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item,
- const struct mlxsw_item *output_item,
- char *storage, char *output_indexed)
+static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output)
{
char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
- char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
+ char *output_data = __mlxsw_item_data(output, output_item, 0);
size_t len = output_item->size.bytes;
memcpy(output_data, storage_data, len);
}
-#define MLXSW_AFK_KEY_BLOCK_SIZE 16
-
-static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
- int block_index, char *storage, char *output)
+static void
+mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
+ char *output, char *storage)
{
- char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE;
const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
- mlxsw_afk_encode_u32(storage_item, output_item,
- storage, output_indexed);
+ mlxsw_sp_afk_encode_u32(storage_item, output_item,
+ storage, output);
else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
- mlxsw_afk_encode_buf(storage_item, output_item,
- storage, output_indexed);
+ mlxsw_sp_afk_encode_buf(storage_item, output_item,
+ storage, output);
}
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+#define MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE 16
+
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask)
+ char *key, char *mask, int block_start, int block_end)
{
+ char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
+ char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
- int block_index;
+ int block_index, i;
+
+ for (i = block_start; i <= block_end; i++) {
+ memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+ memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+
+ mlxsw_afk_element_usage_for_each(element, &values->elusage) {
+ elinst = mlxsw_afk_key_info_elinst_get(key_info,
+ element,
+ &block_index);
+ if (!elinst || block_index != i)
+ continue;
+
+ mlxsw_sp_afk_encode_one(elinst, block_key,
+ values->storage.key);
+ mlxsw_sp_afk_encode_one(elinst, block_mask,
+ values->storage.mask);
+ }
- mlxsw_afk_element_usage_for_each(element, &values->elusage) {
- elinst = mlxsw_afk_key_info_elinst_get(key_info, element,
- &block_index);
- if (!elinst)
- continue;
- mlxsw_afk_encode_one(elinst, block_index,
- values->storage.key, key);
- mlxsw_afk_encode_one(elinst, block_index,
- values->storage.mask, mask);
+ if (key)
+ mlxsw_afk->ops->encode_block(block_key, i, key);
+ if (mask)
+ mlxsw_afk->ops->encode_block(block_mask, i, mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 122506daa586..c29c045d826d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H
#define _MLXSW_CORE_ACL_FLEX_KEYS_H
@@ -42,16 +11,20 @@
enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
- MLXSW_AFK_ELEMENT_DMAC,
- MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP4,
- MLXSW_AFK_ELEMENT_DST_IP4,
- MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- MLXSW_AFK_ELEMENT_DST_IP6_HI,
- MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
@@ -99,9 +72,11 @@ struct mlxsw_afk_element_info {
* define an internal storage geometry.
*/
static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
- MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6),
- MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
@@ -112,12 +87,14 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
};
#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
@@ -208,9 +185,14 @@ mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small,
struct mlxsw_afk;
+struct mlxsw_afk_ops {
+ const struct mlxsw_afk_block *blocks;
+ unsigned int blocks_count;
+ void (*encode_block)(char *block, int block_index, char *output);
+};
+
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
- const struct mlxsw_afk_block *blocks,
- unsigned int blocks_count);
+ const struct mlxsw_afk_ops *ops);
void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk);
struct mlxsw_afk_key_info;
@@ -243,8 +225,9 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
enum mlxsw_afk_element element,
const char *key_value, const char *mask_value,
unsigned int len);
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask);
+ char *key, char *mask, int block_start, int block_end);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 84185f8dfbae..f6cf2896d337 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index d866c98c1a97..6d29dc428608 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -1,34 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved
* Copyright (c) 2016 Ivan Vecera <cera@cera.cz>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
index 97b6bb5d9185..a33b896f4bb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/emad.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/emad.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_EMAD_H
#define _MLXSW_EMAD_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 25f9915ebd82..798bd5aca384 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/i2c.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/err.h>
#include <linux/i2c.h>
@@ -46,8 +15,6 @@
#include "core.h"
#include "i2c.h"
-static const char mlxsw_i2c_driver_name[] = "mlxsw_i2c";
-
#define MLXSW_I2C_CIR2_BASE 0x72000
#define MLXSW_I2C_CIR_STATUS_OFF 0x18
#define MLXSW_I2C_CIR2_OFF_STATUS (MLXSW_I2C_CIR2_BASE + \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.h b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
index daa24b213ea4..17e059d47fae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/i2c.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_I2C_H
#define _MLXSW_I2C_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/ib.h b/drivers/net/ethernet/mellanox/mlxsw/ib.h
index ce313aaa6336..2d0cb0f5eb85 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/ib.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/ib.h
@@ -1,36 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/ib.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXSW_IB_H
#define _MLXSW_IB_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 31c886edc791..e92cadc98128 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/item.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_ITEM_H
#define _MLXSW_ITEM_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 3dd16267b76c..5a6c4457fb55 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/minimal.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/i2c.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index fc4557245ff4..4d271fb3de3d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/pci.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -53,8 +22,6 @@
#include "port.h"
#include "resources.h"
-static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
-
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
#define mlxsw_pci_read32(mlxsw_pci, reg) \
@@ -1750,6 +1717,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
struct mlxsw_pci *mlxsw_pci;
+ bool called_again = false;
int err;
mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
@@ -1806,10 +1774,18 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.dev = &pdev->dev;
mlxsw_pci->id = id;
+again:
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci, false,
NULL);
- if (err) {
+ /* -EAGAIN is returned in case the FW was updated. FW needs
+ * a reset, so lets try to call mlxsw_core_bus_device_register()
+ * again.
+ */
+ if (err == -EAGAIN && !called_again) {
+ called_again = true;
+ goto again;
+ } else if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index d65582325cd5..946339e13eb9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/pci.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_PCI_H
#define _MLXSW_PCI_H
@@ -39,6 +8,7 @@
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 963155f6a17a..83f452b7ccbb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
- * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_PCI_HW_H
#define _MLXSW_PCI_HW_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index c580abba8d34..a33eeef0b00c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -1,38 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/port.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXSW_PORT_H
#define _MLXSW_PORT_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1877d9f8a11a..6e8b619b769b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,44 +1,10 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/reg.h
- * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_REG_H
#define _MLXSW_REG_H
+#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
@@ -1943,6 +1909,28 @@ static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port,
mlxsw_reg_cwtpm_ntcp_r_set(payload, profile);
}
+/* PGCR - Policy-Engine General Configuration Register
+ * ---------------------------------------------------
+ * This register configures general Policy-Engine settings.
+ */
+#define MLXSW_REG_PGCR_ID 0x3001
+#define MLXSW_REG_PGCR_LEN 0x20
+
+MLXSW_REG_DEFINE(pgcr, MLXSW_REG_PGCR_ID, MLXSW_REG_PGCR_LEN);
+
+/* reg_pgcr_default_action_pointer_base
+ * Default action pointer base. Each region has a default action pointer
+ * which is equal to default_action_pointer_base + region_id.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pgcr, default_action_pointer_base, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_pgcr_pack(char *payload, u32 pointer_base)
+{
+ MLXSW_REG_ZERO(pgcr, payload);
+ mlxsw_reg_pgcr_default_action_pointer_base_set(payload, pointer_base);
+}
+
/* PPBT - Policy-Engine Port Binding Table
* ---------------------------------------
* This register is used for configuration of the Port Binding Table.
@@ -2132,14 +2120,18 @@ MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4);
/* reg_ptar_action_set_type
* Type of action set to be used on this region.
- * For Spectrum, this is always type 2 - "flexible"
+ * For Spectrum and Spectrum-2, this is always type 2 - "flexible"
* Access: WO
*/
MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8);
+enum mlxsw_reg_ptar_key_type {
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX = 0x50, /* Spetrum */
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX2 = 0x51, /* Spectrum-2 */
+};
+
/* reg_ptar_key_type
* TCAM key type for the region.
- * For Spectrum, this is always type 0x50 - "FLEX_KEY"
* Access: WO
*/
MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8);
@@ -2182,13 +2174,14 @@ MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8,
MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false);
static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op,
+ enum mlxsw_reg_ptar_key_type key_type,
u16 region_size, u16 region_id,
const char *tcam_region_info)
{
MLXSW_REG_ZERO(ptar, payload);
mlxsw_reg_ptar_op_set(payload, op);
mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */
- mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */
+ mlxsw_reg_ptar_key_type_set(payload, key_type);
mlxsw_reg_ptar_region_size_set(payload, region_size);
mlxsw_reg_ptar_region_id_set(payload, region_id);
mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info);
@@ -2327,6 +2320,23 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN);
*/
MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
+/* reg_pefa_a
+ * Index in the KVD Linear Centralized Database.
+ * Activity
+ * For a new entry: set if ca=0, clear if ca=1
+ * Set if a packet lookup has hit on the specific entry
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pefa, a, 0x04, 29, 1);
+
+/* reg_pefa_ca
+ * Clear activity
+ * When write: activity is according to this field
+ * When read: after reading the activity is cleared according to ca
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pefa, ca, 0x04, 24, 1);
+
#define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8
/* reg_pefa_flex_action_set
@@ -2336,12 +2346,20 @@ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
*/
MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN);
-static inline void mlxsw_reg_pefa_pack(char *payload, u32 index,
+static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, bool ca,
const char *flex_action_set)
{
MLXSW_REG_ZERO(pefa, payload);
mlxsw_reg_pefa_index_set(payload, index);
- mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set);
+ mlxsw_reg_pefa_ca_set(payload, ca);
+ if (flex_action_set)
+ mlxsw_reg_pefa_flex_action_set_memcpy_to(payload,
+ flex_action_set);
+}
+
+static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a)
+{
+ *p_a = mlxsw_reg_pefa_a_get(payload);
}
/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2
@@ -2397,6 +2415,15 @@ MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3);
*/
MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
+/* reg_ptce2_priority
+ * Priority of the rule, higher values win. The range is 1..cap_kvd_size-1.
+ * Note: priority does not have to be unique per rule.
+ * Within a region, higher priority should have lower offset (no limitation
+ * between regions in a multi-region).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24);
+
/* reg_ptce2_tcam_region_info
* Opaque object that represents the TCAM region.
* Access: Index
@@ -2404,14 +2431,14 @@ MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10,
MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
-#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96
+#define MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN 96
/* reg_ptce2_flex_key_blocks
* ACL Key.
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
- MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_mask
* mask- in the same size as key. A bit that is set directs the TCAM
@@ -2420,7 +2447,7 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
- MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_flex_action_set
* ACL action set.
@@ -2432,15 +2459,567 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
enum mlxsw_reg_ptce2_op op,
const char *tcam_region_info,
- u16 offset)
+ u16 offset, u32 priority)
{
MLXSW_REG_ZERO(ptce2, payload);
mlxsw_reg_ptce2_v_set(payload, valid);
mlxsw_reg_ptce2_op_set(payload, op);
mlxsw_reg_ptce2_offset_set(payload, offset);
+ mlxsw_reg_ptce2_priority_set(payload, priority);
mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
}
+/* PERPT - Policy-Engine ERP Table Register
+ * ----------------------------------------
+ * This register adds and removes eRPs from the eRP table.
+ */
+#define MLXSW_REG_PERPT_ID 0x3021
+#define MLXSW_REG_PERPT_LEN 0x80
+
+MLXSW_REG_DEFINE(perpt, MLXSW_REG_PERPT_ID, MLXSW_REG_PERPT_LEN);
+
+/* reg_perpt_erpt_bank
+ * eRP table bank.
+ * Range 0 .. cap_max_erp_table_banks - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_bank, 0x00, 16, 4);
+
+/* reg_perpt_erpt_index
+ * Index to eRP table within the eRP bank.
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_index, 0x00, 0, 8);
+
+enum mlxsw_reg_perpt_key_size {
+ MLXSW_REG_PERPT_KEY_SIZE_2KB,
+ MLXSW_REG_PERPT_KEY_SIZE_4KB,
+ MLXSW_REG_PERPT_KEY_SIZE_8KB,
+ MLXSW_REG_PERPT_KEY_SIZE_12KB,
+};
+
+/* reg_perpt_key_size
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, key_size, 0x04, 0, 4);
+
+/* reg_perpt_bf_bypass
+ * 0 - The eRP is used only if bloom filter state is set for the given
+ * rule.
+ * 1 - The eRP is used regardless of bloom filter state.
+ * The bypass is an OR condition of region_id or eRP. See PERCR.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, bf_bypass, 0x08, 8, 1);
+
+/* reg_perpt_erp_id
+ * eRP ID for use by the rules.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, erp_id, 0x08, 0, 4);
+
+/* reg_perpt_erpt_base_bank
+ * Base eRP table bank, points to head of erp_vector
+ * Range is 0 .. cap_max_erp_table_banks - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_bank, 0x0C, 16, 4);
+
+/* reg_perpt_erpt_base_index
+ * Base index to eRP table within the eRP bank
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_index, 0x0C, 0, 8);
+
+/* reg_perpt_erp_index_in_vector
+ * eRP index in the vector.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erp_index_in_vector, 0x10, 0, 4);
+
+/* reg_perpt_erp_vector
+ * eRP vector.
+ * Access: OP
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, perpt, erp_vector, 0x14, 4, 1);
+
+/* reg_perpt_mask
+ * Mask
+ * 0 - A-TCAM will ignore the bit in key
+ * 1 - A-TCAM will compare the bit in key
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, perpt, mask, 0x20, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+static inline void mlxsw_reg_perpt_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_perpt_erp_vector_set(payload, bit, true);
+}
+
+static inline void
+mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index,
+ enum mlxsw_reg_perpt_key_size key_size, u8 erp_id,
+ u8 erpt_base_bank, u8 erpt_base_index, u8 erp_index,
+ char *mask)
+{
+ MLXSW_REG_ZERO(perpt, payload);
+ mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank);
+ mlxsw_reg_perpt_erpt_index_set(payload, erpt_index);
+ mlxsw_reg_perpt_key_size_set(payload, key_size);
+ mlxsw_reg_perpt_bf_bypass_set(payload, true);
+ mlxsw_reg_perpt_erp_id_set(payload, erp_id);
+ mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank);
+ mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index);
+ mlxsw_reg_perpt_erp_index_in_vector_set(payload, erp_index);
+ mlxsw_reg_perpt_mask_memcpy_to(payload, mask);
+}
+
+/* PERAR - Policy-Engine Region Association Register
+ * -------------------------------------------------
+ * This register associates a hw region for region_id's. Changing on the fly
+ * is supported by the device.
+ */
+#define MLXSW_REG_PERAR_ID 0x3026
+#define MLXSW_REG_PERAR_LEN 0x08
+
+MLXSW_REG_DEFINE(perar, MLXSW_REG_PERAR_ID, MLXSW_REG_PERAR_LEN);
+
+/* reg_perar_region_id
+ * Region identifier
+ * Range 0 .. cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perar, region_id, 0x00, 0, 16);
+
+static inline unsigned int
+mlxsw_reg_perar_hw_regions_needed(unsigned int block_num)
+{
+ return DIV_ROUND_UP(block_num, 4);
+}
+
+/* reg_perar_hw_region
+ * HW Region
+ * Range 0 .. cap_max_regions-1
+ * Default: hw_region = region_id
+ * For a 8 key block region, 2 consecutive regions are used
+ * For a 12 key block region, 3 consecutive regions are used
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perar, hw_region, 0x04, 0, 16);
+
+static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id,
+ u16 hw_region)
+{
+ MLXSW_REG_ZERO(perar, payload);
+ mlxsw_reg_perar_region_id_set(payload, region_id);
+ mlxsw_reg_perar_hw_region_set(payload, hw_region);
+}
+
+/* PTCE-V3 - Policy-Engine TCAM Entry Register Version 3
+ * -----------------------------------------------------
+ * This register is a new version of PTCE-V2 in order to support the
+ * A-TCAM. This register is not supported by SwitchX/-2 and Spectrum.
+ */
+#define MLXSW_REG_PTCE3_ID 0x3027
+#define MLXSW_REG_PTCE3_LEN 0xF0
+
+MLXSW_REG_DEFINE(ptce3, MLXSW_REG_PTCE3_ID, MLXSW_REG_PTCE3_LEN);
+
+/* reg_ptce3_v
+ * Valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, v, 0x00, 31, 1);
+
+enum mlxsw_reg_ptce3_op {
+ /* Write operation. Used to write a new entry to the table.
+ * All R/W fields are relevant for new entry. Activity bit is set
+ * for new entries. Write with v = 0 will delete the entry. Must
+ * not be used if an entry exists.
+ */
+ MLXSW_REG_PTCE3_OP_WRITE_WRITE = 0,
+ /* Update operation */
+ MLXSW_REG_PTCE3_OP_WRITE_UPDATE = 1,
+ /* Read operation */
+ MLXSW_REG_PTCE3_OP_QUERY_READ = 0,
+};
+
+/* reg_ptce3_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptce3, op, 0x00, 20, 3);
+
+/* reg_ptce3_priority
+ * Priority of the rule. Higher values win.
+ * For Spectrum-2 range is 1..cap_kvd_size - 1
+ * Note: Priority does not have to be unique per rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, priority, 0x04, 0, 24);
+
+/* reg_ptce3_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_ptce3_flex2_key_blocks
+ * ACL key. The key must be masked according to eRP (if exists) or
+ * according to master mask.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, flex2_key_blocks, 0x20,
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce3_erp_id
+ * eRP ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, erp_id, 0x80, 0, 4);
+
+/* reg_ptce3_delta_start
+ * Start point of delta_value and delta_mask, in bits. Must not exceed
+ * num_key_blocks * 36 - 8. Reserved when delta_mask = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_start, 0x84, 0, 10);
+
+/* reg_ptce3_delta_mask
+ * Delta mask.
+ * 0 - Ignore relevant bit in delta_value
+ * 1 - Compare relevant bit in delta_value
+ * Delta mask must not be set for reserved fields in the key blocks.
+ * Note: No delta when no eRPs. Thus, for regions with
+ * PERERP.erpt_pointer_valid = 0 the delta mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_mask, 0x88, 16, 8);
+
+/* reg_ptce3_delta_value
+ * Delta value.
+ * Bits which are masked by delta_mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_value, 0x88, 0, 8);
+
+/* reg_ptce3_prune_vector
+ * Pruning vector relative to the PERPT.erp_id.
+ * Used for reducing lookups.
+ * 0 - NEED: Do a lookup using the eRP.
+ * 1 - PRUNE: Do not perform a lookup using the eRP.
+ * Maybe be modified by PEAPBL and PEAPBM.
+ * Note: In Spectrum-2, a region of 8 key blocks must be set to either
+ * all 1's or all 0's.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, ptce3, prune_vector, 0x90, 4, 1);
+
+/* reg_ptce3_prune_ctcam
+ * Pruning on C-TCAM. Used for reducing lookups.
+ * 0 - NEED: Do a lookup in the C-TCAM.
+ * 1 - PRUNE: Do not perform a lookup in the C-TCAM.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, prune_ctcam, 0x94, 31, 1);
+
+/* reg_ptce3_large_exists
+ * Large entry key ID exists.
+ * Within the region:
+ * 0 - SINGLE: The large_entry_key_id is not currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will be added.
+ * For rule delete: The MSB of the key will be removed.
+ * 1 - NON_SINGLE: The large_entry_key_id is currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will not be added.
+ * For rule delete: The MSB of the key will not be removed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptce3, large_exists, 0x98, 31, 1);
+
+/* reg_ptce3_large_entry_key_id
+ * Large entry key ID.
+ * A key for 12 key blocks rules. Reserved when region has less than 12 key
+ * blocks. Must be different for different keys which have the same common
+ * 6 key blocks (MSB, blocks 6..11) key within a region.
+ * Range is 0..cap_max_pe_large_key_id - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, large_entry_key_id, 0x98, 0, 24);
+
+/* reg_ptce3_action_pointer
+ * Pointer to action.
+ * Range is 0..cap_max_kvd_action_sets - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, action_pointer, 0xA0, 0, 24);
+
+static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
+ enum mlxsw_reg_ptce3_op op,
+ u32 priority,
+ const char *tcam_region_info,
+ const char *key, u8 erp_id,
+ bool large_exists, u32 lkey_id,
+ u32 action_pointer)
+{
+ MLXSW_REG_ZERO(ptce3, payload);
+ mlxsw_reg_ptce3_v_set(payload, valid);
+ mlxsw_reg_ptce3_op_set(payload, op);
+ mlxsw_reg_ptce3_priority_set(payload, priority);
+ mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
+ mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
+ mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+ mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
+ mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
+ mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
+}
+
+/* PERCR - Policy-Engine Region Configuration Register
+ * ---------------------------------------------------
+ * This register configures the region parameters. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERCR_ID 0x302A
+#define MLXSW_REG_PERCR_LEN 0x80
+
+MLXSW_REG_DEFINE(percr, MLXSW_REG_PERCR_ID, MLXSW_REG_PERCR_LEN);
+
+/* reg_percr_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, percr, region_id, 0x00, 0, 16);
+
+/* reg_percr_atcam_ignore_prune
+ * Ignore prune_vector by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, atcam_ignore_prune, 0x04, 25, 1);
+
+/* reg_percr_ctcam_ignore_prune
+ * Ignore prune_ctcam by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, ctcam_ignore_prune, 0x04, 24, 1);
+
+/* reg_percr_bf_bypass
+ * Bloom filter bypass.
+ * 0 - Bloom filter is used (default)
+ * 1 - Bloom filter is bypassed. The bypass is an OR condition of
+ * region_id or eRP. See PERPT.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, bf_bypass, 0x04, 16, 1);
+
+/* reg_percr_master_mask
+ * Master mask. Logical OR mask of all masks of all rules of a region
+ * (both A-TCAM and C-TCAM). When there are no eRPs
+ * (erpt_pointer_valid = 0), then this provides the mask.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, percr, master_mask, 0x20, 96);
+
+static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id)
+{
+ MLXSW_REG_ZERO(percr, payload);
+ mlxsw_reg_percr_region_id_set(payload, region_id);
+ mlxsw_reg_percr_atcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_bf_bypass_set(payload, true);
+}
+
+/* PERERP - Policy-Engine Region eRP Register
+ * ------------------------------------------
+ * This register configures the region eRP. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERERP_ID 0x302B
+#define MLXSW_REG_PERERP_LEN 0x1C
+
+MLXSW_REG_DEFINE(pererp, MLXSW_REG_PERERP_ID, MLXSW_REG_PERERP_LEN);
+
+/* reg_pererp_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pererp, region_id, 0x00, 0, 16);
+
+/* reg_pererp_ctcam_le
+ * C-TCAM lookup enable. Reserved when erpt_pointer_valid = 0.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, ctcam_le, 0x04, 28, 1);
+
+/* reg_pererp_erpt_pointer_valid
+ * erpt_pointer is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer_valid, 0x10, 31, 1);
+
+/* reg_pererp_erpt_bank_pointer
+ * Pointer to eRP table bank. May be modified at any time.
+ * Range 0..cap_max_erp_table_banks-1
+ * Reserved when erpt_pointer_valid = 0
+ */
+MLXSW_ITEM32(reg, pererp, erpt_bank_pointer, 0x10, 16, 4);
+
+/* reg_pererp_erpt_pointer
+ * Pointer to eRP table within the eRP bank. Can be changed for an
+ * existing region.
+ * Range 0..cap_max_erp_table_size-1
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer, 0x10, 0, 8);
+
+/* reg_pererp_erpt_vector
+ * Vector of allowed eRP indexes starting from erpt_pointer within the
+ * erpt_bank_pointer. Next entries will be in next bank.
+ * Note that eRP index is used and not eRP ID.
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1);
+
+/* reg_pererp_master_rp_id
+ * Master RP ID. When there are no eRPs, then this provides the eRP ID
+ * for the lookup. Can be changed for an existing region.
+ * Reserved when erpt_pointer_valid = 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4);
+
+static inline void mlxsw_reg_pererp_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_pererp_erpt_vector_set(payload, bit, true);
+}
+
+static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id,
+ bool ctcam_le, bool erpt_pointer_valid,
+ u8 erpt_bank_pointer, u8 erpt_pointer,
+ u8 master_rp_id)
+{
+ MLXSW_REG_ZERO(pererp, payload);
+ mlxsw_reg_pererp_region_id_set(payload, region_id);
+ mlxsw_reg_pererp_ctcam_le_set(payload, ctcam_le);
+ mlxsw_reg_pererp_erpt_pointer_valid_set(payload, erpt_pointer_valid);
+ mlxsw_reg_pererp_erpt_bank_pointer_set(payload, erpt_bank_pointer);
+ mlxsw_reg_pererp_erpt_pointer_set(payload, erpt_pointer);
+ mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id);
+}
+
+/* IEDR - Infrastructure Entry Delete Register
+ * ----------------------------------------------------
+ * This register is used for deleting entries from the entry tables.
+ * It is legitimate to attempt to delete a nonexisting entry (the device will
+ * respond as a good flow).
+ */
+#define MLXSW_REG_IEDR_ID 0x3804
+#define MLXSW_REG_IEDR_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_IEDR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_IEDR_REC_MAX_COUNT 64
+#define MLXSW_REG_IEDR_LEN (MLXSW_REG_IEDR_BASE_LEN + \
+ MLXSW_REG_IEDR_REC_LEN * \
+ MLXSW_REG_IEDR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(iedr, MLXSW_REG_IEDR_ID, MLXSW_REG_IEDR_LEN);
+
+/* reg_iedr_num_rec
+ * Number of records.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, iedr, num_rec, 0x00, 0, 8);
+
+/* reg_iedr_rec_type
+ * Resource type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_size
+ * Size of entries do be deleted. The unit is 1 entry, regardless of entry type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_index_start
+ * Resource index start.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_index_start, MLXSW_REG_IEDR_BASE_LEN, 0, 24,
+ MLXSW_REG_IEDR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_iedr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(iedr, payload);
+}
+
+static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index,
+ u8 rec_type, u16 rec_size,
+ u32 rec_index_start)
+{
+ u8 num_rec = mlxsw_reg_iedr_num_rec_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_iedr_num_rec_set(payload, rec_index + 1);
+ mlxsw_reg_iedr_rec_type_set(payload, rec_index, rec_type);
+ mlxsw_reg_iedr_rec_size_set(payload, rec_index, rec_size);
+ mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start);
+}
+
+/* QPTS - QoS Priority Trust State Register
+ * ----------------------------------------
+ * This register controls the port policy to calculate the switch priority and
+ * packet color based on incoming packet fields.
+ */
+#define MLXSW_REG_QPTS_ID 0x4002
+#define MLXSW_REG_QPTS_LEN 0x8
+
+MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN);
+
+/* reg_qpts_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_qpts_trust_state {
+ MLXSW_REG_QPTS_TRUST_STATE_PCP = 1,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP = 2, /* For MPLS, trust EXP. */
+};
+
+/* reg_qpts_trust_state
+ * Trust state for a given port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3);
+
+static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ MLXSW_REG_ZERO(qpts, payload);
+
+ mlxsw_reg_qpts_local_port_set(payload, local_port);
+ mlxsw_reg_qpts_trust_state_set(payload, ts);
+}
+
/* QPCR - QoS Policer Configuration Register
* -----------------------------------------
* The QPCR register is used to create policers - that limit
@@ -2753,6 +3332,219 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
mlxsw_reg_qeec_next_element_index_set(payload, next_index);
}
+/* QRWE - QoS ReWrite Enable
+ * -------------------------
+ * This register configures the rewrite enable per receive port.
+ */
+#define MLXSW_REG_QRWE_ID 0x400F
+#define MLXSW_REG_QRWE_LEN 0x08
+
+MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN);
+
+/* reg_qrwe_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported. No support for router port.
+ */
+MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8);
+
+/* reg_qrwe_dscp
+ * Whether to enable DSCP rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1);
+
+/* reg_qrwe_pcp
+ * Whether to enable PCP and DEI rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1);
+
+static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port,
+ bool rewrite_pcp, bool rewrite_dscp)
+{
+ MLXSW_REG_ZERO(qrwe, payload);
+ mlxsw_reg_qrwe_local_port_set(payload, local_port);
+ mlxsw_reg_qrwe_pcp_set(payload, rewrite_pcp);
+ mlxsw_reg_qrwe_dscp_set(payload, rewrite_dscp);
+}
+
+/* QPDSM - QoS Priority to DSCP Mapping
+ * ------------------------------------
+ * QoS Priority to DSCP Mapping Register
+ */
+#define MLXSW_REG_QPDSM_ID 0x4011
+#define MLXSW_REG_QPDSM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN 0x4 /* record length */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT 16
+#define MLXSW_REG_QPDSM_LEN (MLXSW_REG_QPDSM_BASE_LEN + \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN);
+
+/* reg_qpdsm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8);
+
+/* reg_qpdsm_prio_entry_color0_e
+ * Enable update of the entry for color 0 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 31, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color0_dscp
+ * DSCP field in the outer label of the packet for color 0 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 24, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_e
+ * Enable update of the entry for color 1 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 23, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_dscp
+ * DSCP field in the outer label of the packet for color 1 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 16, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_e
+ * Enable update of the entry for color 2 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_dscp
+ * DSCP field in the outer label of the packet for color 2 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 8, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(qpdsm, payload);
+ mlxsw_reg_qpdsm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp)
+{
+ mlxsw_reg_qpdsm_prio_entry_color0_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color0_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color1_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color1_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color2_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp);
+}
+
+/* QPDPM - QoS Port DSCP to Priority Mapping Register
+ * --------------------------------------------------
+ * This register controls the mapping from DSCP field to
+ * Switch Priority for IP packets.
+ */
+#define MLXSW_REG_QPDPM_ID 0x4013
+#define MLXSW_REG_QPDPM_BASE_LEN 0x4 /* base length, without records */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN 0x2 /* record length */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_QPDPM_LEN (MLXSW_REG_QPDPM_BASE_LEN + \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN);
+
+/* reg_qpdpm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8);
+
+/* reg_qpdpm_dscp_e
+ * Enable update of the specific entry. When cleared, the switch_prio and color
+ * fields are ignored and the previous switch_prio and color values are
+ * preserved.
+ * Access: WO
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_e, MLXSW_REG_QPDPM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdpm_dscp_prio
+ * The new Switch Priority value for the relevant DSCP value.
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio,
+ MLXSW_REG_QPDPM_BASE_LEN, 0, 4,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(qpdpm, payload);
+ mlxsw_reg_qpdpm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdpm_dscp_pack(char *payload, unsigned short dscp, u8 prio)
+{
+ mlxsw_reg_qpdpm_dscp_entry_e_set(payload, dscp, 1);
+ mlxsw_reg_qpdpm_dscp_entry_prio_set(payload, dscp, prio);
+}
+
+/* QTCTM - QoS Switch Traffic Class Table is Multicast-Aware Register
+ * ------------------------------------------------------------------
+ * This register configures if the Switch Priority to Traffic Class mapping is
+ * based on Multicast packet indication. If so, then multicast packets will get
+ * a Traffic Class that is plus (cap_max_tclass_data/2) the value configured by
+ * QTCT.
+ * By default, Switch Priority to Traffic Class mapping is not based on
+ * Multicast packet indication.
+ */
+#define MLXSW_REG_QTCTM_ID 0x401A
+#define MLXSW_REG_QTCTM_LEN 0x08
+
+MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN);
+
+/* reg_qtctm_local_port
+ * Local port number.
+ * No support for CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
+
+/* reg_qtctm_mc
+ * Multicast Mode
+ * Whether Switch Priority to Traffic Class mapping is based on Multicast packet
+ * indication (default is 0, not based on Multicast packet indication).
+ */
+MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1);
+
+static inline void
+mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc)
+{
+ MLXSW_REG_ZERO(qtctm, payload);
+ mlxsw_reg_qtctm_local_port_set(payload, local_port);
+ mlxsw_reg_qtctm_mc_set(payload, mc);
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -3350,6 +4142,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
MLXSW_REG_PPCNT_EXT_CNT = 0x5,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
@@ -3508,6 +4301,68 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+/* Ethernet RFC 2819 Counter Group */
+
+/* reg_ppcnt_ether_stats_pkts64octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts64octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts65to127octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts65to127octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts128to255octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts128to255octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts256to511octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts256to511octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts512to1023octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts512to1023octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1024to1518octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1024to1518octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1519to2047octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1519to2047octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts2048to4095octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts2048to4095octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts4096to8191octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x98, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts8192to10239octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+
/* Ethernet Extended Counter Group Counters */
/* reg_ppcnt_ecn_marked
@@ -4338,6 +5193,20 @@ MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
*/
MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
+/* reg_ritr_if_vrrp_id_ipv6
+ * VRRP ID for IPv6
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv6, 0x1C, 8, 8);
+
+/* reg_ritr_if_vrrp_id_ipv4
+ * VRRP ID for IPv4
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
+
/* VLAN Interface */
/* reg_ritr_vlan_if_vid
@@ -7871,6 +8740,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
+ MLXSW_REG(pgcr),
MLXSW_REG(ppbt),
MLXSW_REG(pacl),
MLXSW_REG(pagt),
@@ -7879,9 +8749,20 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(prcr),
MLXSW_REG(pefa),
MLXSW_REG(ptce2),
+ MLXSW_REG(perpt),
+ MLXSW_REG(perar),
+ MLXSW_REG(ptce3),
+ MLXSW_REG(percr),
+ MLXSW_REG(pererp),
+ MLXSW_REG(iedr),
+ MLXSW_REG(qpts),
MLXSW_REG(qpcr),
MLXSW_REG(qtct),
MLXSW_REG(qeec),
+ MLXSW_REG(qrwe),
+ MLXSW_REG(qpdsm),
+ MLXSW_REG(qpdpm),
+ MLXSW_REG(qtctm),
MLXSW_REG(pmlp),
MLXSW_REG(pmtu),
MLXSW_REG(ptys),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index fd9299ccec72..79a31de7c825 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/resources.h
- * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016-2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_RESOURCES_H
#define _MLXSW_RESOURCES_H
@@ -42,6 +11,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
+ MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
MLXSW_RES_ID_CQE_V0,
MLXSW_RES_ID_CQE_V1,
@@ -63,6 +34,13 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANKS,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE,
+ MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB,
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
@@ -83,6 +61,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
+ [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
[MLXSW_RES_ID_CQE_V0] = 0x2210,
[MLXSW_RES_ID_CQE_V1] = 0x2211,
@@ -104,6 +84,13 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941,
+ [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB] = 0x2950,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953,
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 968b88af2ef5..6070d1591d1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
- * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -74,17 +41,27 @@
#include "spectrum_span.h"
#include "../mlxfw/mlxfw.h"
-#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1620
-#define MLXSW_FWREV_SUBMINOR 192
-#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
+#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
+
+#define MLXSW_SP1_FWREV_MAJOR 13
+#define MLXSW_SP1_FWREV_MINOR 1702
+#define MLXSW_SP1_FWREV_SUBMINOR 6
+#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
-#define MLXSW_SP_FW_FILENAME \
- "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
- "." __stringify(MLXSW_FWREV_MINOR) \
- "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
+static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
+ .major = MLXSW_SP1_FWREV_MAJOR,
+ .minor = MLXSW_SP1_FWREV_MINOR,
+ .subminor = MLXSW_SP1_FWREV_SUBMINOR,
+ .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
+};
+
+#define MLXSW_SP1_FW_FILENAME \
+ "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP1_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
-static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp_driver_version[] = "1.0";
/* tx_hdr_version
@@ -338,35 +315,50 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
+ const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
+ const char *fw_filename = mlxsw_sp->fw_filename;
const struct firmware *firmware;
int err;
+ /* Don't check if driver does not require it */
+ if (!req_rev || !fw_filename)
+ return 0;
+
/* Validate driver & FW are compatible */
- if (rev->major != MLXSW_FWREV_MAJOR) {
+ if (rev->major != req_rev->major) {
WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
- rev->major, MLXSW_FWREV_MAJOR);
+ rev->major, req_rev->major);
return -EINVAL;
}
- if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
- MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
+ if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+ MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor))
return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
- MLXSW_SP_FW_FILENAME);
+ fw_filename);
- err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
+ err = request_firmware_direct(&firmware, fw_filename,
mlxsw_sp->bus_info->dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
- MLXSW_SP_FW_FILENAME);
+ fw_filename);
return err;
}
err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
release_firmware(firmware);
- return err;
+ if (err)
+ dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
+
+ /* On FW flash success, tell the caller FW reset is needed
+ * if current FW supports it.
+ */
+ if (rev->minor >= req_rev->can_reset_minor)
+ return err ? err : -EAGAIN;
+ else
+ return 0;
}
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
@@ -1441,6 +1433,11 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
return 0;
case TC_CLSFLOWER_STATS:
return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
+ case TC_CLSFLOWER_TMPLT_CREATE:
+ return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
+ case TC_CLSFLOWER_TMPLT_DESTROY:
+ mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -1503,7 +1500,8 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
static int
mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tcf_block *block, bool ingress)
+ struct tcf_block *block, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_block *acl_block;
@@ -1518,7 +1516,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
block_cb = __tcf_block_cb_register(block,
mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp, acl_block);
+ mlxsw_sp, acl_block, extack);
if (IS_ERR(block_cb)) {
err = PTR_ERR(block_cb);
goto err_cb_register;
@@ -1541,7 +1539,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
err_block_bind:
if (!tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
err_cb_register:
mlxsw_sp_acl_block_destroy(acl_block);
}
@@ -1571,7 +1569,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
mlxsw_sp_port, ingress);
if (!err && !tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
mlxsw_sp_acl_block_destroy(acl_block);
}
}
@@ -1596,11 +1594,12 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
switch (f->command) {
case TC_BLOCK_BIND:
err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port);
+ mlxsw_sp_port, f->extack);
if (err)
return err;
err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
- f->block, ingress);
+ f->block, ingress,
+ f->extack);
if (err) {
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
return err;
@@ -1712,7 +1711,8 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
+ sizeof(drvinfo->driver));
strlcpy(drvinfo->version, mlxsw_sp_driver_version,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -1873,6 +1873,52 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
+ {
+ .str = "ether_pkts64octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
+ },
+ {
+ .str = "ether_pkts65to127octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
+ },
+ {
+ .str = "ether_pkts128to255octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
+ },
+ {
+ .str = "ether_pkts256to511octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
+ },
+ {
+ .str = "ether_pkts512to1023octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
+ },
+ {
+ .str = "ether_pkts1024to1518octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
+ },
+ {
+ .str = "ether_pkts1519to2047octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
+ },
+ {
+ .str = "ether_pkts2048to4095octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
+ },
+ {
+ .str = "ether_pkts4096to8191octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
+ },
+ {
+ .str = "ether_pkts8192to10239octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
{
.str = "rx_octets_prio",
@@ -1925,9 +1971,11 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
- (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
- MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
- IEEE_8021QAZ_MAX_TCS)
+ MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+ (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
+ IEEE_8021QAZ_MAX_TCS) + \
+ (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
+ TC_MAX_QUEUE))
static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
{
@@ -1964,11 +2012,16 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
mlxsw_sp_port_get_prio_strings(&p, i);
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ for (i = 0; i < TC_MAX_QUEUE; i++)
mlxsw_sp_port_get_tc_strings(&p, i);
break;
@@ -2003,10 +2056,14 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
int *p_len, enum mlxsw_reg_ppcnt_grp grp)
{
switch (grp) {
- case MLXSW_REG_PPCNT_IEEE_8023_CNT:
+ case MLXSW_REG_PPCNT_IEEE_8023_CNT:
*p_hw_stats = mlxsw_sp_port_hw_stats;
*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_2819_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_PRIO_CNT:
*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2056,6 +2113,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+ /* RFC 2819 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+
/* Per-Priority Counters */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
@@ -2064,7 +2126,7 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
}
/* Per-TC Counters */
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ for (i = 0; i < TC_MAX_QUEUE; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
data, data_index);
data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
@@ -2711,9 +2773,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
false, 0);
if (err)
return err;
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i + 8, i,
+ false, 0);
+ if (err)
+ return err;
}
- /* Make sure the max shaper is disabled in all hierarcies that
+ /* Make sure the max shaper is disabled in all hierarchies that
* support it.
*/
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
@@ -2748,6 +2817,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
+static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qtctm_pl[MLXSW_REG_QTCTM_LEN];
+
+ mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
@@ -2876,6 +2955,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_ets_init;
}
+ err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_tc_mc_mode;
+ }
+
/* ETS and buffers must be initialized before DCB. */
err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
if (err) {
@@ -2932,6 +3018,8 @@ err_port_qdiscs_init:
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
+err_port_tc_mc_mode:
err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
@@ -2966,6 +3054,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
kfree(mlxsw_sp_port->sample);
@@ -3371,6 +3460,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
false, SP_IP2ME, DISCARD),
@@ -3623,10 +3714,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->bus_info = mlxsw_bus_info;
err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
+ if (err)
return err;
- }
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
@@ -3757,6 +3846,36 @@ err_fids_init:
return err;
}
+static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
+ mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+}
+
+static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+}
+
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -3777,7 +3896,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_kvdl_fini(mlxsw_sp);
}
-static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
+static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
@@ -3803,6 +3922,28 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
},
};
+static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
+ .used_max_mid = 1,
+ .max_mid = MLXSW_SP_MID_MAX,
+ .used_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .max_fid_offset_flood_tables = 3,
+ .fid_offset_flood_table_size = VLAN_N_VID - 1,
+ .max_fid_flood_tables = 3,
+ .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -3839,7 +3980,7 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
DEVLINK_RESOURCE_UNIT_ENTRY);
}
-static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct devlink_resource_size_params hash_single_size_params;
@@ -3850,7 +3991,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
const struct mlxsw_config_profile *profile;
int err;
- profile = &mlxsw_sp_config_profile;
+ profile = &mlxsw_sp1_config_profile;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
return -EIO;
@@ -3876,7 +4017,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
return err;
- err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
+ err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
if (err)
return err;
@@ -3905,6 +4046,16 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
return 0;
}
+static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_sp1_resources_kvd_register(mlxsw_core);
+}
+
+static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ return 0;
+}
+
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
@@ -3960,10 +4111,10 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
return 0;
}
-static struct mlxsw_driver mlxsw_sp_driver = {
- .kind = mlxsw_sp_driver_name,
+static struct mlxsw_driver mlxsw_sp1_driver = {
+ .kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp_init,
+ .init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
@@ -3979,10 +4130,35 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
- .resources_register = mlxsw_sp_resources_register,
+ .resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp_config_profile,
+ .profile = &mlxsw_sp1_config_profile,
+ .res_query_enabled = true,
+};
+
+static struct mlxsw_driver mlxsw_sp2_driver = {
+ .kind = mlxsw_sp2_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp2_init,
+ .fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
};
@@ -4397,7 +4573,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
if (!is_vlan_dev(upper_dev) &&
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
- !netif_is_ovs_master(upper_dev)) {
+ !netif_is_ovs_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
@@ -4423,6 +4600,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
return -EINVAL;
}
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
return -EINVAL;
@@ -4461,6 +4643,9 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
else
mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
}
break;
}
@@ -4545,8 +4730,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
+ if (!netif_is_bridge_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
if (!info->linking)
@@ -4558,6 +4744,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4571,6 +4762,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
upper_dev);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
} else {
err = -EINVAL;
WARN_ON(1);
@@ -4620,6 +4814,64 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
+ if (!info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev))
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+
+ if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+
+ return -EOPNOTSUPP;
+}
+
static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
{
struct netdev_notifier_changeupper_info *info = ptr;
@@ -4661,6 +4913,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
else if (is_vlan_dev(dev))
err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ else if (netif_is_macvlan(dev))
+ err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
return notifier_from_errno(err);
}
@@ -4681,14 +4937,24 @@ static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
.notifier_call = mlxsw_sp_inet6addr_event,
};
-static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
+static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
{0, },
};
-static struct pci_driver mlxsw_sp_pci_driver = {
- .name = mlxsw_sp_driver_name,
- .id_table = mlxsw_sp_pci_id_table,
+static struct pci_driver mlxsw_sp1_pci_driver = {
+ .name = mlxsw_sp1_driver_name,
+ .id_table = mlxsw_sp1_pci_id_table,
+};
+
+static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp2_pci_driver = {
+ .name = mlxsw_sp2_driver_name,
+ .id_table = mlxsw_sp2_pci_id_table,
};
static int __init mlxsw_sp_module_init(void)
@@ -4700,19 +4966,31 @@ static int __init mlxsw_sp_module_init(void)
register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
- err = mlxsw_core_driver_register(&mlxsw_sp_driver);
+ err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
+ if (err)
+ goto err_sp1_core_driver_register;
+
+ err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
+ if (err)
+ goto err_sp2_core_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
if (err)
- goto err_core_driver_register;
+ goto err_sp1_pci_driver_register;
- err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
+ err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
if (err)
- goto err_pci_driver_register;
+ goto err_sp2_pci_driver_register;
return 0;
-err_pci_driver_register:
- mlxsw_core_driver_unregister(&mlxsw_sp_driver);
-err_core_driver_register:
+err_sp2_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+err_sp2_core_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
+err_sp1_core_driver_register:
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4722,8 +5000,10 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
- mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
- mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4736,5 +5016,6 @@ module_exit(mlxsw_sp_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
-MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
-MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
+MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4a519d8edec8..3ae930196741 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_H
#define _MLXSW_SPECTRUM_H
@@ -54,6 +21,7 @@
#include "core.h"
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
+#include "reg.h"
#define MLXSW_SP_FID_8021D_MAX 1024
@@ -145,6 +113,9 @@ struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
struct mlxsw_sp_kvdl;
+struct mlxsw_sp_kvdl_ops;
+struct mlxsw_sp_mr_tcam_ops;
+struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -168,6 +139,13 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
+ const struct mlxsw_fw_rev *req_rev;
+ const char *fw_filename;
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ const struct mlxsw_afa_ops *afa_ops;
+ const struct mlxsw_afk_ops *afk_ops;
+ const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
+ const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
};
static inline struct mlxsw_sp_upper *
@@ -233,6 +211,7 @@ struct mlxsw_sp_port {
struct ieee_ets *ets;
struct ieee_maxrate *maxrate;
struct ieee_pfc *pfc;
+ enum mlxsw_reg_qpts_trust_state trust_state;
} dcb;
struct {
u8 module;
@@ -407,6 +386,8 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
@@ -435,15 +416,62 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */
+enum mlxsw_sp_kvdl_entry_type {
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+};
+
+static inline unsigned int
+mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
+{
+ switch (type) {
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
+ default:
+ return 1;
+ }
+}
+
+struct mlxsw_sp_kvdl_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+ void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+ int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+ int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv);
+};
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
- u32 *p_entry_index);
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
- unsigned int entry_count,
- unsigned int *p_alloc_size);
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+
+/* spectrum1_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+
+/* spectrum2_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
@@ -452,44 +480,14 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index;
};
-enum mlxsw_sp_acl_profile {
- MLXSW_SP_ACL_PROFILE_FLOWER,
-};
-
-struct mlxsw_sp_acl_profile_ops {
- size_t ruleset_priv_size;
- int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
- void *priv, void *ruleset_priv);
- void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
- int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
- void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
- u16 (*ruleset_group_id)(void *ruleset_priv);
- size_t rule_priv_size;
- int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv, void *rule_priv,
- struct mlxsw_sp_acl_rule_info *rulei);
- void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
- int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
- bool *activity);
-};
-
-struct mlxsw_sp_acl_ops {
- size_t priv_size;
- int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
- void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
- const struct mlxsw_sp_acl_profile_ops *
- (*profile_ops)(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_acl_profile profile);
-};
-
struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
+enum mlxsw_sp_acl_profile {
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+};
+
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
@@ -507,6 +505,7 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
+bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
@@ -514,7 +513,8 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
- enum mlxsw_sp_acl_profile profile);
+ enum mlxsw_sp_acl_profile profile,
+ struct mlxsw_afk_element_usage *tmplt_elusage);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
@@ -541,25 +541,30 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_block *block,
- struct net_device *out_dev);
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct net_device *out_dev);
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u32 action, u16 vid, u16 proto, u8 prio);
+ u32 action, u16 vid, u16 proto, u8 prio,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei);
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u16 fid);
+ u16 fid, struct netlink_ext_ack *extack);
struct mlxsw_sp_acl_rule;
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- unsigned long cookie);
+ unsigned long cookie,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule);
int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
@@ -582,7 +587,52 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
/* spectrum_acl_tcam.c */
-extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
+struct mlxsw_sp_acl_tcam;
+struct mlxsw_sp_acl_tcam_region;
+
+struct mlxsw_sp_acl_tcam_ops {
+ enum mlxsw_reg_ptar_key_type key_type;
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ size_t region_priv_size;
+ int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *region);
+ void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
+ int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region);
+ size_t chunk_priv_size;
+ void (*chunk_init)(void *region_priv, void *chunk_priv,
+ unsigned int priority);
+ void (*chunk_fini)(void *chunk_priv);
+ size_t entry_priv_size;
+ int (*entry_add)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv);
+ int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity);
+};
+
+/* spectrum1_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops;
+
+/* spectrum2_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops;
+
+/* spectrum_acl_flex_actions.c */
+extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops;
+extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
+
+/* spectrum_acl_flex_keys.c */
+extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
+extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
@@ -594,6 +644,12 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
+int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f);
+void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f);
/* spectrum_qdisc.c */
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -631,4 +687,40 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_mr.c */
+enum mlxsw_sp_mr_route_prio {
+ MLXSW_SP_MR_ROUTE_PRIO_SG,
+ MLXSW_SP_MR_ROUTE_PRIO_STARG,
+ MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ __MLXSW_SP_MR_ROUTE_PRIO_MAX
+};
+
+#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
+
+struct mlxsw_sp_mr_route_key;
+
+struct mlxsw_sp_mr_tcam_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(void *priv);
+ size_t route_priv_size;
+ int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio);
+ void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key);
+ int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block);
+};
+
+/* spectrum1_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
+
+/* spectrum2_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
new file mode 100644
index 000000000000..2a9eac90002e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp1_acl_tcam_region {
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ } catchall;
+};
+
+struct mlxsw_sp1_acl_tcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp1_acl_tcam_entry {
+ struct mlxsw_sp_acl_ctcam_entry centry;
+};
+
+static int
+mlxsw_sp1_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp1_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp1_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp1_acl_ctcam_region_entry_remove,
+};
+
+static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+}
+
+static int
+mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ int err;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion,
+ &region->catchall.cchunk,
+ MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
+ rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ if (IS_ERR(rulei)) {
+ err = PTR_ERR(rulei);
+ goto err_rulei_create;
+ }
+ err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ if (WARN_ON(err))
+ goto err_rulei_act_continue;
+ err = mlxsw_sp_acl_rulei_commit(rulei);
+ if (err)
+ goto err_rulei_commit;
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry,
+ rulei, false);
+ if (err)
+ goto err_entry_add;
+ region->catchall.rulei = rulei;
+ return 0;
+
+err_entry_add:
+err_rulei_commit:
+err_rulei_act_continue:
+ mlxsw_sp_acl_rulei_destroy(rulei);
+err_rulei_create:
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry);
+ mlxsw_sp_acl_rulei_destroy(rulei);
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *_region)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ int err;
+
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
+ _region,
+ &mlxsw_sp1_acl_ctcam_region_ops);
+ if (err)
+ return err;
+ err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region);
+ if (err)
+ goto err_catchall_add;
+ region->region = _region;
+ return 0;
+
+err_catchall_add:
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region);
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
+ priority);
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
+}
+
+static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry,
+ rulei, false);
+}
+
+static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *_region,
+ unsigned int offset,
+ bool *activity)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ int err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+ _region->tcam_region_info, offset, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ return err;
+ *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+ return 0;
+}
+
+static int
+mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+ unsigned int offset;
+
+ offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry);
+ return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp,
+ region->region,
+ offset, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX,
+ .priv_size = 0,
+ .init = mlxsw_sp1_acl_tcam_init,
+ .fini = mlxsw_sp1_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region),
+ .region_init = mlxsw_sp1_acl_tcam_region_init,
+ .region_fini = mlxsw_sp1_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp1_acl_tcam_region_associate,
+ .chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp1_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry),
+ .entry_add = mlxsw_sp1_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp1_acl_tcam_entry_del,
+ .entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
new file mode 100644
index 000000000000..09ee0a807747
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+
+#define MLXSW_SP1_KVDL_SINGLE_BASE 0
+#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP1_KVDL_SINGLE_END \
+ (MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1)
+
+#define MLXSW_SP1_KVDL_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE)
+#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152
+#define MLXSW_SP1_KVDL_CHUNKS_END \
+ (MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \
+ (MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1
+#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
+
+struct mlxsw_sp1_kvdl_part_info {
+ unsigned int part_index;
+ unsigned int start_index;
+ unsigned int end_index;
+ unsigned int alloc_size;
+ enum mlxsw_sp_resource_id resource_id;
+};
+
+enum mlxsw_sp1_kvdl_part_id {
+ MLXSW_SP1_KVDL_PART_ID_SINGLE,
+ MLXSW_SP1_KVDL_PART_ID_CHUNKS,
+ MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS,
+};
+
+#define MLXSW_SP1_KVDL_PART_INFO(id) \
+[MLXSW_SP1_KVDL_PART_ID_##id] = { \
+ .start_index = MLXSW_SP1_KVDL_##id##_BASE, \
+ .end_index = MLXSW_SP1_KVDL_##id##_END, \
+ .alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \
+ .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
+}
+
+static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
+ MLXSW_SP1_KVDL_PART_INFO(SINGLE),
+ MLXSW_SP1_KVDL_PART_INFO(CHUNKS),
+ MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS),
+};
+
+#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info)
+
+struct mlxsw_sp1_kvdl_part {
+ struct mlxsw_sp1_kvdl_part_info info;
+ unsigned long usage[0]; /* Entries */
+};
+
+struct mlxsw_sp1_kvdl {
+ struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN];
+};
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl,
+ unsigned int alloc_size)
+{
+ struct mlxsw_sp1_kvdl_part *part, *min_part = NULL;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (alloc_size <= part->info.alloc_size &&
+ (!min_part ||
+ part->info.alloc_size <= min_part->info.alloc_size))
+ min_part = part;
+ }
+
+ return min_part ?: ERR_PTR(-ENOBUFS);
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index)
+{
+ struct mlxsw_sp1_kvdl_part *part;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (kvdl_index >= part->info.start_index &&
+ kvdl_index <= part->info.end_index)
+ return part;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static u32
+mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ unsigned int entry_index)
+{
+ return info->start_index + entry_index * info->alloc_size;
+}
+
+static unsigned int
+mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ u32 kvdl_index)
+{
+ return (kvdl_index - info->start_index) / info->alloc_size;
+}
+
+static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part,
+ u32 *p_kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index, nr_entries;
+
+ nr_entries = (info->end_index - info->start_index + 1) /
+ info->alloc_size;
+ entry_index = find_first_zero_bit(part->usage, nr_entries);
+ if (entry_index == nr_entries)
+ return -ENOBUFS;
+ __set_bit(entry_index, part->usage);
+
+ *p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index);
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part,
+ u32 kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index;
+
+ entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index);
+ __clear_bit(entry_index, part->usage);
+}
+
+static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ /* Find partition with smallest allocation size satisfying the
+ * requested size.
+ */
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index);
+}
+
+static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index);
+ if (IS_ERR(part))
+ return;
+ mlxsw_sp1_kvdl_part_free(part, entry_index);
+}
+
+static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_size)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ *p_alloc_size = part->info.alloc_size;
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part,
+ struct mlxsw_sp1_kvdl_part *part_prev,
+ unsigned int size)
+{
+ if (!part_prev) {
+ part->info.end_index = size - 1;
+ } else {
+ part->info.start_index = part_prev->info.end_index + 1;
+ part->info.end_index = part->info.start_index + size - 1;
+ }
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp1_kvdl_part_info *info,
+ struct mlxsw_sp1_kvdl_part *part_prev)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl_part *part;
+ bool need_update = true;
+ unsigned int nr_entries;
+ size_t usage_size;
+ u64 resource_size;
+ int err;
+
+ err = devlink_resource_size_get(devlink, info->resource_id,
+ &resource_size);
+ if (err) {
+ need_update = false;
+ resource_size = info->end_index - info->start_index + 1;
+ }
+
+ nr_entries = div_u64(resource_size, info->alloc_size);
+ usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&part->info, info, sizeof(part->info));
+
+ if (need_update)
+ mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size);
+ return part;
+}
+
+static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_kvdl *kvdl)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info;
+ struct mlxsw_sp1_kvdl_part *part_prev = NULL;
+ int err, i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp1_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info,
+ part_prev);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ part_prev = kvdl->parts[i];
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (info->end_index -
+ info->start_index + 1) /
+ info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += info->alloc_size;
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ u64 occ = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]);
+
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ int err;
+
+ err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
+ if (err)
+ return err;
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp1_kvdl_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp1_kvdl_single_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp1_kvdl_chunks_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp1_kvdl_large_chunks_occ_get,
+ kvdl);
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
+ mlxsw_sp1_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_kvdl),
+ .init = mlxsw_sp1_kvdl_init,
+ .fini = mlxsw_sp1_kvdl_fini,
+ .alloc = mlxsw_sp1_kvdl_alloc,
+ .free = mlxsw_sp1_kvdl_free,
+ .alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query,
+};
+
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ static struct devlink_resource_size_params size_params;
+ u32 kvdl_max_size;
+ int err;
+
+ kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP1_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP1_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
new file mode 100644
index 000000000000..c8c67536917b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp1_mr_tcam_region {
+ struct mlxsw_sp *mlxsw_sp;
+ enum mlxsw_reg_rtar_key_type rtar_key_type;
+ struct parman *parman;
+ struct parman_prio *parman_prios;
+};
+
+struct mlxsw_sp1_mr_tcam {
+ struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+};
+
+struct mlxsw_sp1_mr_tcam_route {
+ struct parman_item parman_item;
+ struct parman_prio *parman_prio;
+};
+
+static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ ntohl(key->group.addr4),
+ ntohl(key->group_mask.addr4),
+ ntohl(key->source.addr4),
+ ntohl(key->source_mask.addr4),
+ mlxsw_afa_block_first_set(afa_block));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ key->group.addr6,
+ key->group_mask.addr6,
+ key->source.addr6,
+ key->source_mask.addr6,
+ mlxsw_afa_block_first_set(afa_block));
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, 0, 0, 0, 0, NULL);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, zero_addr, zero_addr,
+ zero_addr, zero_addr, NULL);
+ break;
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static struct mlxsw_sp1_mr_tcam_region *
+mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ return &mr_tcam->tcam_regions[proto];
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+ int err;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ err = parman_item_add(tcam_region->parman,
+ &tcam_region->parman_prios[prio],
+ &route->parman_item);
+ if (err)
+ return err;
+
+ route->parman_prio = &tcam_region->parman_prios[prio];
+ return 0;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ parman_item_remove(tcam_region->parman,
+ route->parman_prio, &route->parman_item);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ int err;
+
+ err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route,
+ key, prio);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+ if (err)
+ goto err_route_replace;
+ return 0;
+
+err_route_replace:
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key);
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+
+ return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+}
+
+#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
+ mr_tcam_region->rtar_key_type,
+ MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
+ mr_tcam_region->rtar_key_type, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
+ mr_tcam_region->rtar_key_type, new_count);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rrcr_pl[MLXSW_REG_RRCR_LEN];
+
+ mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
+ from_index, count,
+ mr_tcam_region->rtar_key_type, to_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
+}
+
+static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = {
+ .base_count = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp1_mr_tcam_region_parman_resize,
+ .move = mlxsw_sp1_mr_tcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static int
+mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region,
+ enum mlxsw_reg_rtar_key_type rtar_key_type)
+{
+ struct parman_prio *parman_prios;
+ struct parman *parman;
+ int err;
+ int i;
+
+ mr_tcam_region->rtar_key_type = rtar_key_type;
+ mr_tcam_region->mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region);
+ if (err)
+ return err;
+
+ parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops,
+ mr_tcam_region);
+ if (!parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+ mr_tcam_region->parman = parman;
+
+ parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
+ sizeof(*parman_prios), GFP_KERNEL);
+ if (!parman_prios) {
+ err = -ENOMEM;
+ goto err_parman_prios_alloc;
+ }
+ mr_tcam_region->parman_prios = parman_prios;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_init(mr_tcam_region->parman,
+ &mr_tcam_region->parman_prios[i], i);
+ return 0;
+
+err_parman_prios_alloc:
+ parman_destroy(parman);
+err_parman_create:
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_fini(&mr_tcam_region->parman_prios[i]);
+ kfree(mr_tcam_region->parman_prios);
+ parman_destroy(mr_tcam_region->parman);
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+}
+
+static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+ u32 rtar_key;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ return -EIO;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV4],
+ rtar_key);
+ if (err)
+ return err;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV6],
+ rtar_key);
+ if (err)
+ goto err_ipv6_region_init;
+
+ return 0;
+
+err_ipv6_region_init:
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ return err;
+}
+
+static void mlxsw_sp1_mr_tcam_fini(void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_mr_tcam),
+ .init = mlxsw_sp1_mr_tcam_init,
+ .fini = mlxsw_sp1_mr_tcam_fini,
+ .route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route),
+ .route_create = mlxsw_sp1_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp1_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp1_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
new file mode 100644
index 000000000000..8ca77f3e8f27
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_actions.h"
+
+struct mlxsw_sp2_acl_tcam {
+ struct mlxsw_sp_acl_atcam atcam;
+ u32 kvdl_index;
+ unsigned int kvdl_count;
+};
+
+struct mlxsw_sp2_acl_tcam_region {
+ struct mlxsw_sp_acl_atcam_region aregion;
+ struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp2_acl_tcam_chunk {
+ struct mlxsw_sp_acl_atcam_chunk achunk;
+};
+
+struct mlxsw_sp2_acl_tcam_entry {
+ struct mlxsw_sp_acl_atcam_entry aentry;
+ struct mlxsw_afa_block *act_block;
+};
+
+static int
+mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+ struct mlxsw_sp_acl_erp *erp;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
+ if (IS_ERR(erp))
+ return PTR_ERR(erp);
+ aentry->erp = erp;
+
+ return 0;
+}
+
+static void
+mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp2_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp2_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp2_acl_ctcam_region_entry_remove,
+};
+
+static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *_tcam)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+ struct mlxsw_afa_block *afa_block;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ char pgcr_pl[MLXSW_REG_PGCR_LEN];
+ char *enc_actions;
+ int i;
+ int err;
+
+ tcam->kvdl_count = _tcam->max_regions;
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, &tcam->kvdl_index);
+ if (err)
+ return err;
+
+ /* Create flex action block, set default action (continue)
+ * but don't commit. We need just the current set encoding
+ * to be written using PEFA register to all indexes for all regions.
+ */
+ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+ if (!afa_block) {
+ err = -ENOMEM;
+ goto err_afa_block;
+ }
+ err = mlxsw_afa_block_continue(afa_block);
+ if (WARN_ON(err))
+ goto err_afa_block_continue;
+ enc_actions = mlxsw_afa_block_cur_set(afa_block);
+
+ for (i = 0; i < tcam->kvdl_count; i++) {
+ mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
+ true, enc_actions);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ goto err_pefa_write;
+ }
+ mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl);
+ if (err)
+ goto err_pgcr_write;
+
+ err = mlxsw_sp_acl_atcam_init(mlxsw_sp, &tcam->atcam);
+ if (err)
+ goto err_atcam_init;
+
+ mlxsw_afa_block_destroy(afa_block);
+ return 0;
+
+err_atcam_init:
+err_pgcr_write:
+err_pefa_write:
+err_afa_block_continue:
+ mlxsw_afa_block_destroy(afa_block);
+err_afa_block:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+
+ mlxsw_sp_acl_atcam_fini(mlxsw_sp, &tcam->atcam);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
+ struct mlxsw_sp_acl_tcam_region *_region)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam *tcam = tcam_priv;
+
+ region->region = _region;
+
+ return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam,
+ &region->aregion, _region,
+ &mlxsw_sp2_acl_ctcam_region_ops);
+}
+
+static void
+mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp_acl_atcam_region_fini(&region->aregion);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_atcam_chunk_init(&region->aregion, &chunk->achunk,
+ priority);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk);
+}
+
+static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ entry->act_block = rulei->act_block;
+ return mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, &region->aregion,
+ &chunk->achunk, &entry->aentry,
+ rulei);
+}
+
+static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, &region->aregion, &chunk->achunk,
+ &entry->aentry);
+}
+
+static int
+mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_afa_block_activity_get(entry->act_block, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2,
+ .priv_size = sizeof(struct mlxsw_sp2_acl_tcam),
+ .init = mlxsw_sp2_acl_tcam_init,
+ .fini = mlxsw_sp2_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region),
+ .region_init = mlxsw_sp2_acl_tcam_region_init,
+ .region_fini = mlxsw_sp2_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp2_acl_tcam_region_associate,
+ .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp2_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry),
+ .entry_add = mlxsw_sp2_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp2_acl_tcam_entry_del,
+ .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
new file mode 100644
index 000000000000..68c8b148bef2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "resources.h"
+
+struct mlxsw_sp2_kvdl_part_info {
+ u8 res_type;
+ /* For each defined partititon we need to know how many
+ * usage bits we need and how many indexes there are
+ * represented by a single bit. This could be got from FW
+ * querying appropriate resources. So have the resource
+ * ids for for this purpose in partition definition.
+ */
+ enum mlxsw_res_id usage_bit_count_res_id;
+ enum mlxsw_res_id index_range_res_id;
+};
+
+#define MLXSW_SP2_KVDL_PART_INFO(_entry_type, _res_type, \
+ _usage_bit_count_res_id, _index_range_res_id) \
+[MLXSW_SP_KVDL_ENTRY_TYPE_##_entry_type] = { \
+ .res_type = _res_type, \
+ .usage_bit_count_res_id = MLXSW_RES_ID_##_usage_bit_count_res_id, \
+ .index_range_res_id = MLXSW_RES_ID_##_index_range_res_id, \
+}
+
+static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
+ MLXSW_SP2_KVDL_PART_INFO(ADJ, 0x21, KVD_SIZE, MAX_KVD_LINEAR_RANGE),
+ MLXSW_SP2_KVDL_PART_INFO(ACTSET, 0x23, MAX_KVD_ACTION_SETS,
+ MAX_KVD_ACTION_SETS),
+ MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+};
+
+#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
+
+struct mlxsw_sp2_kvdl_part {
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ unsigned int usage_bit_count;
+ unsigned int indexes_per_usage_bit;
+ unsigned int last_allocated_bit;
+ unsigned long usage[0]; /* Usage bits */
+};
+
+struct mlxsw_sp2_kvdl {
+ struct mlxsw_sp2_kvdl_part *parts[MLXSW_SP2_KVDL_PARTS_INFO_LEN];
+};
+
+static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int bit_count,
+ unsigned int *p_bit)
+{
+ unsigned int start_bit;
+ unsigned int bit;
+ unsigned int i;
+ bool wrap = false;
+
+ start_bit = part->last_allocated_bit + 1;
+ if (start_bit == part->usage_bit_count)
+ start_bit = 0;
+ bit = start_bit;
+again:
+ bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit);
+ if (!wrap && bit + bit_count >= part->usage_bit_count) {
+ wrap = true;
+ bit = 0;
+ goto again;
+ }
+ if (wrap && bit + bit_count >= start_bit)
+ return -ENOBUFS;
+ for (i = 0; i < bit_count; i++) {
+ if (test_bit(bit + i, part->usage)) {
+ bit += bit_count;
+ goto again;
+ }
+ }
+ *p_bit = bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_part_alloc(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size,
+ u32 *p_kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ err = mlxsw_sp2_kvdl_part_find_zero_bits(part, bit_count, &bit);
+ if (err)
+ return err;
+ for (i = 0; i < bit_count; i++)
+ __set_bit(bit + i, part->usage);
+ *p_kvdl_index = bit * part->indexes_per_usage_bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_rec_del(struct mlxsw_sp *mlxsw_sp, u8 res_type,
+ u16 size, u32 kvdl_index)
+{
+ char *iedr_pl;
+ int err;
+
+ iedr_pl = kmalloc(MLXSW_REG_IEDR_LEN, GFP_KERNEL);
+ if (!iedr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_iedr_pack(iedr_pl);
+ mlxsw_reg_iedr_rec_pack(iedr_pl, 0, res_type, size, kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(iedr), iedr_pl);
+ kfree(iedr_pl);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_part_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size, u32 kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ /* We need to ask FW to delete previously used KVD linear index */
+ err = mlxsw_sp2_kvdl_rec_del(mlxsw_sp, part->info->res_type,
+ size, kvdl_index);
+ if (err)
+ return;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ bit = kvdl_index / part->indexes_per_usage_bit;
+ for (i = 0; i < bit_count; i++)
+ __clear_bit(bit + i, part->usage);
+}
+
+static int mlxsw_sp2_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_alloc(part, size, p_entry_index);
+}
+
+static void mlxsw_sp2_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ int entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_free(mlxsw_sp, part, size, entry_index);
+}
+
+static int mlxsw_sp2_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
+{
+ *p_alloc_count = entry_count;
+ return 0;
+}
+
+static struct mlxsw_sp2_kvdl_part *
+mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp2_kvdl_part_info *info)
+{
+ unsigned int indexes_per_usage_bit;
+ struct mlxsw_sp2_kvdl_part *part;
+ unsigned int index_range;
+ unsigned int usage_bit_count;
+ size_t usage_size;
+
+ if (!mlxsw_core_res_valid(mlxsw_sp->core,
+ info->usage_bit_count_res_id) ||
+ !mlxsw_core_res_valid(mlxsw_sp->core,
+ info->index_range_res_id))
+ return ERR_PTR(-EIO);
+ usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core,
+ info->usage_bit_count_res_id);
+ index_range = mlxsw_core_res_get(mlxsw_sp->core,
+ info->index_range_res_id);
+
+ /* For some partitions, one usage bit represents a group of indexes.
+ * That's why we compute the number of indexes per usage bit here,
+ * according to queried resources.
+ */
+ indexes_per_usage_bit = index_range / usage_bit_count;
+
+ usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+ part->info = info;
+ part->usage_bit_count = usage_bit_count;
+ part->indexes_per_usage_bit = indexes_per_usage_bit;
+ part->last_allocated_bit = usage_bit_count - 1;
+ return part;
+}
+
+static void mlxsw_sp2_kvdl_part_fini(struct mlxsw_sp2_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp2_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl *kvdl)
+{
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp2_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp2_kvdl_part_init(mlxsw_sp, info);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_parts_fini(struct mlxsw_sp2_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static int mlxsw_sp2_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ return mlxsw_sp2_kvdl_parts_init(mlxsw_sp, kvdl);
+}
+
+static void mlxsw_sp2_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ mlxsw_sp2_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp2_kvdl),
+ .init = mlxsw_sp2_kvdl_init,
+ .fini = mlxsw_sp2_kvdl_fini,
+ .alloc = mlxsw_sp2_kvdl_alloc,
+ .free = mlxsw_sp2_kvdl_free,
+ .alloc_size_query = mlxsw_sp2_kvdl_alloc_size_query,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
new file mode 100644
index 000000000000..4dd62478162e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+
+#include "core_acl_flex_actions.h"
+#include "spectrum.h"
+#include "spectrum_mr.h"
+
+static int
+mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+}
+
+static int
+mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ return 0;
+}
+
+static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ return 0;
+}
+
+static void mlxsw_sp2_mr_tcam_fini(void *priv)
+{
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
+ .init = mlxsw_sp2_mr_tcam_init,
+ .fini = mlxsw_sp2_mr_tcam_fini,
+ .route_create = mlxsw_sp2_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp2_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp2_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 79b1fa27a9a4..c4f9238591e6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -48,13 +17,12 @@
#include "spectrum.h"
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
-#include "spectrum_acl_flex_keys.h"
+#include "spectrum_acl_tcam.h"
struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_sp_fid *dummy_fid;
- const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
struct list_head rules;
struct {
@@ -62,8 +30,7 @@ struct mlxsw_sp_acl {
unsigned long interval; /* ms */
#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
} rule_activity_update;
- unsigned long priv[0];
- /* priv has to be always the last item */
+ struct mlxsw_sp_acl_tcam tcam;
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
@@ -160,6 +127,17 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
return block->disable_count;
}
+bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ if (!binding->ingress)
+ return true;
+ }
+ return false;
+}
+
static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
@@ -319,7 +297,8 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
- const struct mlxsw_sp_acl_profile_ops *ops)
+ const struct mlxsw_sp_acl_profile_ops *ops,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -339,7 +318,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_init;
- err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
+ err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
+ tmplt_elusage);
if (err)
goto err_ops_ruleset_add;
@@ -409,7 +389,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- ops = acl->ops->profile_ops(mlxsw_sp, profile);
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
@@ -421,13 +401,14 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
- enum mlxsw_sp_acl_profile profile)
+ enum mlxsw_sp_acl_profile profile,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- ops = acl->ops->profile_ops(mlxsw_sp, profile);
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
@@ -436,7 +417,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
return ruleset;
}
- return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
+ return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
+ tmplt_elusage);
}
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -487,7 +469,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
unsigned int priority)
{
- rulei->priority = priority;
+ rulei->priority = priority >> 16;
}
void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
@@ -536,18 +518,23 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct net_device *out_dev)
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u8 local_port;
bool in_port;
if (out_dev) {
- if (!mlxsw_sp_port_dev_check(out_dev))
+ if (!mlxsw_sp_port_dev_check(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
return -EINVAL;
+ }
mlxsw_sp_port = netdev_priv(out_dev);
- if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
+ if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
return -EINVAL;
+ }
local_port = mlxsw_sp_port->local_port;
in_port = false;
} else {
@@ -558,20 +545,22 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
in_port = true;
}
return mlxsw_afa_block_append_fwd(rulei->act_block,
- local_port, in_port);
+ local_port, in_port, extack);
}
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_block *block,
- struct net_device *out_dev)
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_acl_block_binding *binding;
struct mlxsw_sp_port *in_port;
- if (!list_is_singular(&block->binding_list))
+ if (!list_is_singular(&block->binding_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
return -EOPNOTSUPP;
-
+ }
binding = list_first_entry(&block->binding_list,
struct mlxsw_sp_acl_block_binding, list);
in_port = binding->mlxsw_sp_port;
@@ -579,12 +568,14 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_mirror(rulei->act_block,
in_port->local_port,
out_dev,
- binding->ingress);
+ binding->ingress,
+ extack);
}
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u32 action, u16 vid, u16 proto, u8 prio)
+ u32 action, u16 vid, u16 proto, u8 prio,
+ struct netlink_ext_ack *extack)
{
u8 ethertype;
@@ -597,44 +588,50 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
ethertype = 1;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
proto);
return -EINVAL;
}
return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
- vid, prio, ethertype);
+ vid, prio, ethertype,
+ extack);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
return -EINVAL;
}
}
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct netlink_ext_ack *extack)
{
return mlxsw_afa_block_append_counter(rulei->act_block,
- &rulei->counter_index);
+ &rulei->counter_index, extack);
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- u16 fid)
+ u16 fid, struct netlink_ext_ack *extack)
{
- return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
+ return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
}
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- unsigned long cookie)
+ unsigned long cookie,
+ struct netlink_ext_ack *extack)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
struct mlxsw_sp_acl_rule *rule;
int err;
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
- rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp),
+ GFP_KERNEL);
if (!rule) {
err = -ENOMEM;
goto err_alloc;
@@ -825,20 +822,20 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
- const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_fid *fid;
struct mlxsw_sp_acl *acl;
+ size_t alloc_size;
int err;
- acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
+ alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
+ acl = kzalloc(alloc_size, GFP_KERNEL);
if (!acl)
return -ENOMEM;
mlxsw_sp->acl = acl;
acl->mlxsw_sp = mlxsw_sp;
acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_FLEX_KEYS),
- mlxsw_sp_afk_blocks,
- MLXSW_SP_AFK_BLOCKS_COUNT);
+ mlxsw_sp->afk_ops);
if (!acl->afk) {
err = -ENOMEM;
goto err_afk_create;
@@ -857,12 +854,10 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules);
- err = acl_ops->init(mlxsw_sp, acl->priv);
+ err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
if (err)
goto err_acl_ops_init;
- acl->ops = acl_ops;
-
/* Create the delayed work for the rule activity_update */
INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
mlxsw_sp_acl_rul_activity_update_work);
@@ -884,10 +879,9 @@ err_afk_create:
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
- acl_ops->fini(mlxsw_sp, acl->priv);
+ mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
new file mode 100644
index 000000000000..2dda028f94db
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_keys.h"
+
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
+
+struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_lkey_id {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key;
+ refcount_t refcnt;
+ u32 id;
+};
+
+struct mlxsw_sp_acl_atcam_region_ops {
+ int (*init)(struct mlxsw_sp_acl_atcam_region *aregion);
+ void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
+ struct mlxsw_sp_acl_atcam_lkey_id *
+ (*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
+ void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
+};
+
+struct mlxsw_sp_acl_atcam_region_generic {
+ struct mlxsw_sp_acl_atcam_lkey_id dummy_lkey_id;
+};
+
+struct mlxsw_sp_acl_atcam_region_12kb {
+ struct rhashtable lkey_ht;
+ unsigned int max_lkey_id;
+ unsigned long *used_lkey_id;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_lkey_id_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_lkey_id_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_node),
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_entry_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_node),
+};
+
+static bool
+mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_generic_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = kzalloc(sizeof(*region_generic), GFP_KERNEL);
+ if (!region_generic)
+ return -ENOMEM;
+
+ refcount_set(&region_generic->dummy_lkey_id.refcnt, 1);
+ aregion->priv = region_generic;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ kfree(aregion->priv);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = aregion->priv;
+ return &region_generic->dummy_lkey_id;
+}
+
+static void
+mlxsw_sp_acl_atcam_generic_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_generic_ops = {
+ .init = mlxsw_sp_acl_atcam_region_generic_init,
+ .fini = mlxsw_sp_acl_atcam_region_generic_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_generic_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_generic_lkey_id_put,
+};
+
+static int
+mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb;
+ size_t alloc_size;
+ u64 max_lkey_id;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID))
+ return -EIO;
+
+ max_lkey_id = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID);
+ region_12kb = kzalloc(sizeof(*region_12kb), GFP_KERNEL);
+ if (!region_12kb)
+ return -ENOMEM;
+
+ alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long);
+ region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL);
+ if (!region_12kb->used_lkey_id) {
+ err = -ENOMEM;
+ goto err_used_lkey_id_alloc;
+ }
+
+ err = rhashtable_init(&region_12kb->lkey_ht,
+ &mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ region_12kb->max_lkey_id = max_lkey_id;
+ aregion->priv = region_12kb;
+
+ return 0;
+
+err_rhashtable_init:
+ kfree(region_12kb->used_lkey_id);
+err_used_lkey_id_alloc:
+ kfree(region_12kb);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+
+ rhashtable_destroy(&region_12kb->lkey_ht);
+ kfree(region_12kb->used_lkey_id);
+ kfree(region_12kb);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_lkey_id_create(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key *ht_key)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ u32 id;
+ int err;
+
+ id = find_first_zero_bit(region_12kb->used_lkey_id,
+ region_12kb->max_lkey_id);
+ if (id < region_12kb->max_lkey_id)
+ __set_bit(id, region_12kb->used_lkey_id);
+ else
+ return ERR_PTR(-ENOBUFS);
+
+ lkey_id = kzalloc(sizeof(*lkey_id), GFP_KERNEL);
+ if (!lkey_id) {
+ err = -ENOMEM;
+ goto err_lkey_id_alloc;
+ }
+
+ lkey_id->id = id;
+ memcpy(&lkey_id->ht_key, ht_key, sizeof(*ht_key));
+ refcount_set(&lkey_id->refcnt, 1);
+
+ err = rhashtable_insert_fast(&region_12kb->lkey_ht,
+ &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return lkey_id;
+
+err_rhashtable_insert:
+ kfree(lkey_id);
+err_lkey_id_alloc:
+ __clear_bit(id, region_12kb->used_lkey_id);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ u32 id = lkey_id->id;
+
+ rhashtable_remove_fast(&region_12kb->lkey_ht, &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ kfree(lkey_id);
+ __clear_bit(id, region_12kb->used_lkey_id);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key = {{ 0 } };
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
+ NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
+ ht_key.erp_id = erp_id;
+ lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (lkey_id) {
+ refcount_inc(&lkey_id->refcnt);
+ return lkey_id;
+ }
+
+ return mlxsw_sp_acl_atcam_lkey_id_create(aregion, &ht_key);
+}
+
+static void
+mlxsw_sp_acl_atcam_12kb_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ if (refcount_dec_and_test(&lkey_id->refcnt))
+ mlxsw_sp_acl_atcam_lkey_id_destroy(aregion, lkey_id);
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_12kb_ops = {
+ .init = mlxsw_sp_acl_atcam_region_12kb_init,
+ .fini = mlxsw_sp_acl_atcam_region_12kb_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_12kb_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_12kb_lkey_id_put,
+};
+
+static const struct mlxsw_sp_acl_atcam_region_ops *
+mlxsw_sp_acl_atcam_region_ops_arr[] = {
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] =
+ &mlxsw_sp_acl_atcam_region_12kb_ops,
+};
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id)
+{
+ char perar_pl[MLXSW_REG_PERAR_LEN];
+ /* For now, just assume that every region has 12 key blocks */
+ u16 hw_region = region_id * 3;
+ u64 max_regions;
+
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+ if (hw_region >= max_regions)
+ return -ENOBUFS;
+
+ mlxsw_reg_perar_pack(perar_pl, region_id, hw_region);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl);
+}
+
+static void
+mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ enum mlxsw_sp_acl_atcam_region_type region_type;
+ unsigned int blocks_count;
+
+ /* We already know the blocks count can not exceed the maximum
+ * blocks count.
+ */
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ if (blocks_count <= 2)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB;
+ else if (blocks_count <= 4)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB;
+ else if (blocks_count <= 8)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB;
+ else
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB;
+
+ aregion->type = region_type;
+ aregion->ops = mlxsw_sp_acl_atcam_region_ops_arr[region_type];
+}
+
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
+{
+ int err;
+
+ aregion->region = region;
+ aregion->atcam = atcam;
+ mlxsw_sp_acl_atcam_region_type_init(aregion);
+
+ err = rhashtable_init(&aregion->entries_ht,
+ &mlxsw_sp_acl_atcam_entries_ht_params);
+ if (err)
+ return err;
+ err = aregion->ops->init(aregion);
+ if (err)
+ goto err_ops_init;
+ err = mlxsw_sp_acl_erp_region_init(aregion);
+ if (err)
+ goto err_erp_region_init;
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion,
+ region, ops);
+ if (err)
+ goto err_ctcam_region_init;
+
+ return 0;
+
+err_ctcam_region_init:
+ mlxsw_sp_acl_erp_region_fini(aregion);
+err_erp_region_init:
+ aregion->ops->fini(aregion);
+err_ops_init:
+ rhashtable_destroy(&aregion->entries_ht);
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion);
+ mlxsw_sp_acl_erp_region_fini(aregion);
+ aregion->ops->fini(aregion);
+ rhashtable_destroy(&aregion->entries_ht);
+}
+
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority)
+{
+ mlxsw_sp_acl_ctcam_chunk_init(&aregion->cregion, &achunk->cchunk,
+ priority);
+}
+
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk)
+{
+ mlxsw_sp_acl_ctcam_chunk_fini(&achunk->cchunk);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+ u32 kvdl_index, priority;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true);
+ if (err)
+ return err;
+
+ lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
+ if (IS_ERR(lkey_id))
+ return PTR_ERR(lkey_id);
+ aentry->lkey_id = lkey_id;
+
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
+ priority, region->tcam_region_info,
+ aentry->ht_key.enc_key, erp_id,
+ refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
+ kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+ if (err)
+ goto err_ptce3_write;
+
+ return 0;
+
+err_ptce3_write:
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+
+ mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
+ region->tcam_region_info, aentry->ht_key.enc_key,
+ erp_id, refcount_read(&lkey_id->refcnt) != 1,
+ lkey_id->id, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+}
+
+static int
+__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_erp *erp;
+ unsigned int blocks_count;
+ int err;
+
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values,
+ aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
+
+ erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
+ if (IS_ERR(erp))
+ return PTR_ERR(erp);
+ aentry->erp = erp;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
+
+ /* We can't insert identical rules into the A-TCAM, so fail and
+ * let the rule spill into C-TCAM
+ */
+ err = rhashtable_lookup_insert_fast(&aregion->entries_ht,
+ &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry,
+ rulei);
+ if (err)
+ goto err_rule_insert;
+
+ return 0;
+
+err_rule_insert:
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_put(aregion, erp);
+ return err;
+}
+
+static void
+__mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+ mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+}
+
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ err = __mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, aregion, aentry, rulei);
+ if (!err)
+ return 0;
+
+ /* It is possible we failed to add the rule to the A-TCAM due to
+ * exceeded number of masks. Try to spill into C-TCAM.
+ */
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry,
+ rulei, true);
+ if (!err)
+ return 0;
+
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ if (mlxsw_sp_acl_atcam_is_centry(aentry))
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry);
+ else
+ __mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry);
+}
+
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ return mlxsw_sp_acl_erps_init(mlxsw_sp, atcam);
+}
+
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
new file mode 100644
index 000000000000..e3c6fe8b1d40
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+static int
+mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 new_size)
+{
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
+ region->key_type, new_size, region->id,
+ region->tcam_region_info);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 src_offset, u16 dst_offset, u16 size)
+{
+ char prcr_pl[MLXSW_REG_PRCR_LEN];
+
+ mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
+ region->tcam_region_info, src_offset,
+ region->tcam_region_info, dst_offset, size);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
+}
+
+static int
+mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ unsigned int blocks_count;
+ char *act_set;
+ u32 priority;
+ char *mask;
+ char *key;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority,
+ fillup_priority);
+ if (err)
+ return err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ region->tcam_region_info,
+ centry->parman_item.index, priority);
+ key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
+ mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
+ blocks_count - 1);
+
+ err = cregion->ops->entry_insert(cregion, centry, mask);
+ if (err)
+ return err;
+
+ /* Only the first action set belongs here, the rest is in KVD */
+ act_set = mlxsw_afa_block_first_set(rulei->act_block);
+ mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ cregion->region->tcam_region_info,
+ centry->parman_item.index, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ cregion->ops->entry_remove(cregion, centry);
+}
+
+static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count);
+}
+
+static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+
+ mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region,
+ from_index, to_index, count);
+}
+
+static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = {
+ .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp_acl_ctcam_region_parman_resize,
+ .move = mlxsw_sp_acl_ctcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
+{
+ cregion->region = region;
+ cregion->ops = ops;
+ cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops,
+ cregion);
+ if (!cregion->parman)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ parman_destroy(cregion->parman);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority)
+{
+ parman_prio_init(cregion->parman, &cchunk->parman_prio, priority);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk)
+{
+ parman_prio_fini(&cchunk->parman_prio);
+}
+
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ int err;
+
+ err = parman_item_add(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion, centry,
+ rulei, fillup_priority);
+ if (err)
+ goto err_rule_insert;
+ return 0;
+
+err_rule_insert:
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ return err;
+}
+
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion, centry);
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
new file mode 100644
index 000000000000..0a4fd3c8662a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -0,0 +1,1168 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+/* gen_pool_alloc() returns 0 when allocation fails, so use an offset */
+#define MLXSW_SP_ACL_ERP_GENALLOC_OFFSET 0x100
+#define MLXSW_SP_ACL_ERP_MAX_PER_REGION 16
+
+struct mlxsw_sp_acl_erp_core {
+ unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1];
+ struct gen_pool *erp_tables;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int num_erp_banks;
+};
+
+struct mlxsw_sp_acl_erp_key {
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+ bool ctcam;
+};
+
+struct mlxsw_sp_acl_erp {
+ struct mlxsw_sp_acl_erp_key key;
+ u8 id;
+ u8 index;
+ refcount_t refcnt;
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+};
+
+struct mlxsw_sp_acl_erp_master_mask {
+ DECLARE_BITMAP(bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ unsigned int count[MLXSW_SP_ACL_TCAM_MASK_LEN];
+};
+
+struct mlxsw_sp_acl_erp_table {
+ struct mlxsw_sp_acl_erp_master_mask master_mask;
+ DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ struct list_head atcam_erps_list;
+ struct rhashtable erp_ht;
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ const struct mlxsw_sp_acl_erp_table_ops *ops;
+ unsigned long base_index;
+ unsigned int num_atcam_erps;
+ unsigned int num_max_atcam_erps;
+ unsigned int num_ctcam_erps;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_erp_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
+};
+
+struct mlxsw_sp_acl_erp_table_ops {
+ struct mlxsw_sp_acl_erp *
+ (*erp_create)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+ void (*erp_destroy)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+};
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_multiple_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_two_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_second_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_single_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_second_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_first_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_first_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
+};
+
+bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
+{
+ return erp->key.ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
+{
+ return erp->id;
+}
+
+static unsigned int
+mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+
+ return erp_core->erpt_entries_size[aregion->type];
+}
+
+static int mlxsw_sp_acl_erp_id_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_id)
+{
+ u8 id;
+
+ id = find_first_zero_bit(erp_table->erp_id_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ if (id < MLXSW_SP_ACL_ERP_MAX_PER_REGION) {
+ __set_bit(id, erp_table->erp_id_bitmap);
+ *p_id = id;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_id_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 id)
+{
+ __clear_bit(id, erp_table->erp_id_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_set(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (mask->count[bit]++ == 0)
+ __set_bit(bit, mask->bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_clear(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (--mask->count[bit] == 0)
+ __clear_bit(bit, mask->bitmap);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+ char *master_mask;
+
+ mlxsw_reg_percr_pack(percr_pl, region->id);
+ master_mask = mlxsw_reg_percr_master_mask_data(percr_pl);
+ bitmap_to_arr32((u32 *) master_mask, erp_table->master_mask.bitmap,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp)
+{
+ unsigned long bit;
+ int err;
+
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp)
+{
+ unsigned long bit;
+ int err;
+
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_acl_erp_id_get(erp_table, &erp->id);
+ if (err)
+ goto err_erp_id_get;
+
+ memcpy(&erp->key, key, sizeof(*key));
+ bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ list_add(&erp->list, &erp_table->atcam_erps_list);
+ refcount_set(&erp->refcnt, 1);
+ erp_table->num_atcam_erps++;
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ if (err)
+ goto err_master_mask_set;
+
+ err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return erp;
+
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+err_master_mask_set:
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+err_erp_id_get:
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+ kfree(erp);
+}
+
+static int
+mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long *p_index)
+{
+ unsigned int num_rows, entry_size;
+
+ /* We only allow allocations of entire rows */
+ if (num_erps % erp_core->num_erp_banks != 0)
+ return -EINVAL;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ num_rows = num_erps / erp_core->num_erp_banks;
+
+ *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+ if (*p_index == 0)
+ return -ENOBUFS;
+ *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_erp_table_free(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long index)
+{
+ unsigned long base_index;
+ unsigned int entry_size;
+ size_t size;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ base_index = index + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+ size = num_erps / erp_core->num_erp_banks * entry_size;
+ gen_pool_free(erp_core->erp_tables, base_index, size);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_table_master_rp(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ if (!list_is_singular(&erp_table->atcam_erps_list))
+ return NULL;
+
+ return list_first_entry(&erp_table->atcam_erps_list,
+ struct mlxsw_sp_acl_erp, list);
+}
+
+static int mlxsw_sp_acl_erp_index_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_index)
+{
+ u8 index;
+
+ index = find_first_zero_bit(erp_table->erp_index_bitmap,
+ erp_table->num_max_atcam_erps);
+ if (index < erp_table->num_max_atcam_erps) {
+ __set_bit(index, erp_table->erp_index_bitmap);
+ *p_index = index;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_index_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 index)
+{
+ __clear_bit(index, erp_table->erp_index_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_table_locate(const struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp,
+ u8 *p_erpt_bank, u8 *p_erpt_index)
+{
+ unsigned int entry_size = mlxsw_sp_acl_erp_table_entry_size(erp_table);
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned int row;
+
+ *p_erpt_bank = erp->index % erp_core->num_erp_banks;
+ row = erp->index / erp_core->num_erp_banks;
+ *p_erpt_index = erp_table->base_index + row * entry_size;
+}
+
+static int
+mlxsw_sp_acl_erp_table_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index,
+ erp->key.mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, true);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static void mlxsw_sp_acl_erp_table_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ char empty_mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index, empty_mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table,
+ bool ctcam_le)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void
+mlxsw_sp_acl_erp_table_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ /* It is possible we do not have a master RP when we disable the
+ * table when there are no rules in the A-TCAM and the last C-TCAM
+ * rule is deleted
+ */
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, false, false, 0, 0,
+ master_rp ? master_rp->id : 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_relocate(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ list_for_each_entry(erp, &erp_table->atcam_erps_list, list) {
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+ }
+
+ return 0;
+
+err_table_erp_add:
+ list_for_each_entry_continue_reverse(erp, &erp_table->atcam_erps_list,
+ list)
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ unsigned int num_erps, old_num_erps = erp_table->num_max_atcam_erps;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned long old_base_index = erp_table->base_index;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ int err;
+
+ if (erp_table->num_atcam_erps < erp_table->num_max_atcam_erps)
+ return 0;
+
+ if (erp_table->num_max_atcam_erps == MLXSW_SP_ACL_ERP_MAX_PER_REGION)
+ return -ENOBUFS;
+
+ num_erps = old_num_erps + erp_core->num_erp_banks;
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, num_erps,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = num_erps;
+
+ err = mlxsw_sp_acl_erp_table_relocate(erp_table);
+ if (err)
+ goto err_table_relocate;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, ctcam_le);
+ if (err)
+ goto err_table_enable;
+
+ mlxsw_sp_acl_erp_table_free(erp_core, old_num_erps,
+ erp_table->aregion->type, old_base_index);
+
+ return 0;
+
+err_table_enable:
+err_table_relocate:
+ erp_table->num_max_atcam_erps = old_num_erps;
+ mlxsw_sp_acl_erp_table_free(erp_core, num_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->base_index = old_base_index;
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+ int err;
+
+ /* Initially, allocate a single eRP row. Expand later as needed */
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, erp_core->num_erp_banks,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = erp_core->num_erp_banks;
+
+ /* Transition the sole RP currently configured (the master RP)
+ * to the eRP table
+ */
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp) {
+ err = -EINVAL;
+ goto err_table_master_rp;
+ }
+
+ /* Maintain the same eRP bank for the master RP, so that we
+ * wouldn't need to update the bloom filter
+ */
+ master_rp->index = master_rp->index % erp_core->num_erp_banks;
+ __set_bit(master_rp->index, erp_table->erp_index_bitmap);
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp);
+ if (err)
+ goto err_table_master_rp_add;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, false);
+ if (err)
+ goto err_table_enable;
+
+ return 0;
+
+err_table_enable:
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+err_table_master_rp_add:
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+err_table_master_rp:
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp)
+ return;
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+}
+
+static int
+mlxsw_sp_acl_erp_region_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, true);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, false);
+
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_ctcam_enable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* No need to re-enable lookup in the C-TCAM */
+ if (erp_table->num_ctcam_erps > 1)
+ return 0;
+
+ return mlxsw_sp_acl_erp_table_enable(erp_table, true);
+}
+
+static void
+mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* Only disable C-TCAM lookup when last C-TCAM eRP is deleted */
+ if (erp_table->num_ctcam_erps > 1)
+ return;
+
+ mlxsw_sp_acl_erp_table_enable(erp_table, false);
+}
+
+static void
+mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ switch (erp_table->num_atcam_erps) {
+ case 2:
+ /* Keep using the eRP table, but correctly set the
+ * operations pointer so that when an A-TCAM eRP is
+ * deleted we will transition to use the master mask
+ */
+ erp_table->ops = &erp_two_masks_ops;
+ break;
+ case 1:
+ /* We only kept the eRP table because we had C-TCAM
+ * eRPs in use. Now that the last C-TCAM eRP is gone we
+ * can stop using the table and transition to use the
+ * master mask
+ */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ erp_table->ops = &erp_single_mask_ops;
+ break;
+ case 0:
+ /* There are no more eRPs of any kind used by the region
+ * so free its eRP table and transition to initial state
+ */
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ mlxsw_sp_acl_erp_table_free(erp_table->erp_core,
+ erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->ops = &erp_no_mask_ops;
+ break;
+ default:
+ break;
+ }
+}
+
+static struct mlxsw_sp_acl_erp *
+__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&erp->key, key, sizeof(*key));
+ bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ refcount_set(&erp->refcnt, 1);
+ erp_table->num_ctcam_erps++;
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ if (err)
+ goto err_master_mask_set;
+
+ err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
+ if (err)
+ goto err_erp_region_ctcam_enable;
+
+ /* When C-TCAM is used, the eRP table must be used */
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ return erp;
+
+err_erp_region_ctcam_enable:
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+err_master_mask_set:
+ erp_table->num_ctcam_erps--;
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ /* There is a special situation where we need to spill rules
+ * into the C-TCAM, yet the region is still using a master
+ * mask and thus not performing a lookup in the C-TCAM. This
+ * can happen when two rules that only differ in priority - and
+ * thus sharing the same key - are programmed. In this case
+ * we transition the region to use an eRP table
+ */
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+ if (IS_ERR(erp)) {
+ err = PTR_ERR(erp);
+ goto err_erp_create;
+ }
+
+ return erp;
+
+err_erp_create:
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ erp_table->num_ctcam_erps--;
+ kfree(erp);
+
+ /* Once the last C-TCAM eRP was destroyed, the state we
+ * transition to depends on the number of A-TCAM eRPs currently
+ * in use
+ */
+ if (erp_table->num_ctcam_erps > 0)
+ return;
+ mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Expand the eRP table for the new eRP, if needed */
+ err = mlxsw_sp_acl_erp_table_expand(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+
+ if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
+ erp_table->ops = &erp_two_masks_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Transition to use eRP table instead of master mask */
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp)) {
+ err = PTR_ERR(erp);
+ goto err_erp_create;
+ }
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_two_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+err_erp_create:
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ /* Transition to use master mask instead of eRP table */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+
+ erp_table->ops = &erp_single_mask_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+
+ if (key->ctcam)
+ return ERR_PTR(-EINVAL);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ erp_table->ops = &erp_single_mask_ops;
+
+ return erp;
+}
+
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ erp_table->ops = &erp_no_mask_ops;
+}
+
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ WARN_ON(1);
+}
+
+struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key key;
+ struct mlxsw_sp_acl_erp *erp;
+
+ /* eRPs are allocated from a shared resource, but currently all
+ * allocations are done under RTNL.
+ */
+ ASSERT_RTNL();
+
+ memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+ key.ctcam = ctcam;
+ erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
+ mlxsw_sp_acl_erp_ht_params);
+ if (erp) {
+ refcount_inc(&erp->refcnt);
+ return erp;
+ }
+
+ return erp_table->ops->erp_create(erp_table, &key);
+}
+
+void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ ASSERT_RTNL();
+
+ if (!refcount_dec_and_test(&erp->refcnt))
+ return;
+
+ erp_table->ops->erp_destroy(erp_table, erp);
+}
+
+static struct mlxsw_sp_acl_erp_table *
+mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ int err;
+
+ erp_table = kzalloc(sizeof(*erp_table), GFP_KERNEL);
+ if (!erp_table)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ erp_table->erp_core = aregion->atcam->erp_core;
+ erp_table->ops = &erp_no_mask_ops;
+ INIT_LIST_HEAD(&erp_table->atcam_erps_list);
+ erp_table->aregion = aregion;
+
+ return erp_table;
+
+err_rhashtable_init:
+ kfree(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ WARN_ON(!list_empty(&erp_table->atcam_erps_list));
+ rhashtable_destroy(&erp_table->erp_ht);
+ kfree(erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+
+ mlxsw_reg_percr_pack(percr_pl, aregion->region->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0,
+ 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ int err;
+
+ erp_table = mlxsw_sp_acl_erp_table_create(aregion);
+ if (IS_ERR(erp_table))
+ return PTR_ERR(erp_table);
+ aregion->erp_table = erp_table;
+
+ /* Initialize the region's master mask to all zeroes */
+ err = mlxsw_sp_acl_erp_master_mask_init(aregion);
+ if (err)
+ goto err_erp_master_mask_init;
+
+ /* Initialize the region to not use the eRP table */
+ err = mlxsw_sp_acl_erp_region_param_init(aregion);
+ if (err)
+ goto err_erp_region_param_init;
+
+ return 0;
+
+err_erp_region_param_init:
+err_erp_master_mask_init:
+ mlxsw_sp_acl_erp_table_destroy(erp_table);
+ return err;
+}
+
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ mlxsw_sp_acl_erp_table_destroy(aregion->erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_tables_sizes_query(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB))
+ return -EIO;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = size;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int erpt_bank_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANK_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANKS))
+ return -EIO;
+ erpt_bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANK_SIZE);
+ erp_core->num_erp_banks = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANKS);
+
+ erp_core->erp_tables = gen_pool_create(0, -1);
+ if (!erp_core->erp_tables)
+ return -ENOMEM;
+ gen_pool_set_algo(erp_core->erp_tables, gen_pool_best_fit, NULL);
+
+ err = gen_pool_add(erp_core->erp_tables,
+ MLXSW_SP_ACL_ERP_GENALLOC_OFFSET, erpt_bank_size,
+ -1);
+ if (err)
+ goto err_gen_pool_add;
+
+ /* Different regions require masks of different sizes */
+ err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_sizes_query;
+
+ return 0;
+
+err_erp_tables_sizes_query:
+err_gen_pool_add:
+ gen_pool_destroy(erp_core->erp_tables);
+ return err;
+}
+
+static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ gen_pool_destroy(erp_core->erp_tables);
+}
+
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ int err;
+
+ erp_core = kzalloc(sizeof(*erp_core), GFP_KERNEL);
+ if (!erp_core)
+ return -ENOMEM;
+ erp_core->mlxsw_sp = mlxsw_sp;
+ atcam->erp_core = erp_core;
+
+ err = mlxsw_sp_acl_erp_tables_init(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_init;
+
+ return 0;
+
+err_erp_tables_init:
+ kfree(erp_core);
+ return err;
+}
+
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erp_tables_fini(mlxsw_sp, atcam->erp_core);
+ kfree(atcam->erp_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 510ce48d87f7..e47d1d286e93 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -1,46 +1,12 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
- * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include "spectrum_acl_flex_actions.h"
#include "core_acl_flex_actions.h"
#include "spectrum_span.h"
-#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
-
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
- char *enc_actions, bool is_first)
+ char *enc_actions, bool is_first, bool ca)
{
struct mlxsw_sp *mlxsw_sp = priv;
char pefa_pl[MLXSW_REG_PEFA_LEN];
@@ -53,11 +19,11 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
if (is_first)
return 0;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE,
- &kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, &kvdl_index);
if (err)
return err;
- mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
if (err)
goto err_pefa_write;
@@ -65,10 +31,25 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
return 0;
err_pefa_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
return err;
}
+static int mlxsw_sp1_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, false);
+}
+
+static int mlxsw_sp2_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, true);
+}
+
static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
bool is_first)
{
@@ -76,7 +57,29 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
if (is_first)
return;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
+}
+
+static int mlxsw_sp1_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ int err;
+
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, true, NULL);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pefa_unpack(pefa_pl, activity);
+ return 0;
}
static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
@@ -87,7 +90,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
u32 kvdl_index;
int err;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, &kvdl_index);
if (err)
return err;
mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
@@ -98,7 +102,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
return 0;
err_ppbs_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
return err;
}
@@ -106,7 +111,8 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
{
struct mlxsw_sp *mlxsw_sp = priv;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
}
static int
@@ -154,22 +160,36 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
}
-static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
- .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
+const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add,
+ .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp1_act_kvdl_set_activity_get,
+ .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
+ .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
+};
+
+const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp2_act_kvdl_set_activity_get,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
.counter_index_get = mlxsw_sp_act_counter_index_get,
.counter_index_put = mlxsw_sp_act_counter_index_put,
.mirror_add = mlxsw_sp_act_mirror_add,
.mirror_del = mlxsw_sp_act_mirror_del,
+ .dummy_first_set = true,
};
int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_ACTIONS_PER_SET),
- &mlxsw_sp_act_afa_ops, mlxsw_sp);
+ mlxsw_sp->afa_ops, mlxsw_sp);
return PTR_ERR_OR_ZERO(mlxsw_sp->afa);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
index bd6d552d95b9..fe436d816d0c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
#define _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
new file mode 100644
index 000000000000..d409b09ba8df
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "spectrum.h"
+#include "item.h"
+#include "core_acl_flex_keys.h"
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
+ MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
+ MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+ MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
+ MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
+ MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
+ MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
+ MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
+ MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
+ MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
+};
+
+#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
+
+static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
+ char *output)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
+ .blocks = mlxsw_sp1_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
+ .encode_block = mlxsw_sp1_afk_encode_block,
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x04, 0, 8), /* RX_ACL_SYSTEM_PORT */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
+};
+
+static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
+ MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
+ MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
+ MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5),
+ MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+ MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
+ MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
+ MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
+ MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
+ MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
+ MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
+ MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
+ MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
+};
+
+#define MLXSW_SP2_AFK_BITS_PER_BLOCK 36
+
+/* A block in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | | | | | | | | | | | | |35|34|33|32|
+ * +-----------------------------------------------------------------------------------------------+
+ * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ */
+MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK);
+
+/* The key / mask block layout in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | block11_high |
+ * +-----------------------------------------------------------------------------------------------+
+ * | block11_low | block10_high |
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * ...
+ */
+
+struct mlxsw_sp2_afk_block_layout {
+ unsigned short offset;
+ struct mlxsw_item item;
+};
+
+#define MLXSW_SP2_AFK_BLOCK_LAYOUT(_block, _offset, _shift) \
+ { \
+ .offset = _offset, \
+ { \
+ .shift = _shift, \
+ .size = {.bits = MLXSW_SP2_AFK_BITS_PER_BLOCK}, \
+ .name = #_block, \
+ } \
+ } \
+
+static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block0, 0x30, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block1, 0x2C, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block2, 0x28, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block3, 0x24, 12),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block4, 0x20, 16),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block5, 0x1C, 20),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block6, 0x18, 24),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block7, 0x14, 28),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block8, 0x0C, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block9, 0x08, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block10, 0x04, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
+};
+
+static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
+ char *output)
+{
+ u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+ const struct mlxsw_sp2_afk_block_layout *block_layout;
+
+ if (WARN_ON(block_index < 0 ||
+ block_index >= ARRAY_SIZE(mlxsw_sp2_afk_blocks_layout)))
+ return;
+
+ block_layout = &mlxsw_sp2_afk_blocks_layout[block_index];
+ __mlxsw_item_set64(output + block_layout->offset,
+ &block_layout->item, 0, block_value);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
+ .blocks = mlxsw_sp2_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
+ .encode_block = mlxsw_sp2_afk_encode_block,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
deleted file mode 100644
index fb8031828454..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-
-#include "core_acl_flex_keys.h"
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6),
- MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
- MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
- MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
- MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
- MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
- MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
- MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
- MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
- MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
-};
-
-static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
- MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
- MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
- MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
- MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
- MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
- MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
- MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
- MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
- MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
- MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
- MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
- MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
-};
-
-#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks)
-
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index ad1b548e3cac..e171513bb32a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -39,25 +8,25 @@
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
-#include <linux/parman.h>
#include "reg.h"
#include "core.h"
#include "resources.h"
#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
-struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
- unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
- unsigned int max_groups;
- unsigned int max_group_size;
-};
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->priv_size;
+}
-static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
{
- struct mlxsw_sp_acl_tcam *tcam = priv;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
u64 max_tcam_regions;
u64 max_regions;
u64 max_groups;
@@ -88,21 +57,53 @@ static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
+
+ err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ if (err)
+ goto err_tcam_init;
+
return 0;
+err_tcam_init:
+ kfree(tcam->used_groups);
err_alloc_used_groups:
kfree(tcam->used_regions);
return err;
}
-static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
{
- struct mlxsw_sp_acl_tcam *tcam = priv;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ ops->fini(mlxsw_sp, tcam->priv);
kfree(tcam->used_groups);
kfree(tcam->used_regions);
}
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority)
+{
+ u64 max_priority;
+
+ if (!fillup_priority) {
+ *priority = 0;
+ return 0;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
+ return -EIO;
+
+ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
+ if (rulei->priority > max_priority)
+ return -EINVAL;
+
+ /* Unlike in TC, in HW, higher number means higher priority. */
+ *priority = max_priority - rulei->priority;
+ return 0;
+}
+
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
@@ -157,37 +158,25 @@ struct mlxsw_sp_acl_tcam_group {
struct mlxsw_sp_acl_tcam_group_ops *ops;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
-};
-
-struct mlxsw_sp_acl_tcam_region {
- struct list_head list; /* Member of a TCAM group */
- struct list_head chunk_list; /* List of chunks under this region */
- struct parman *parman;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_acl_tcam_group *group;
- u16 id; /* ACL ID and region ID - they are same */
- char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
- struct mlxsw_afk_key_info *key_info;
- struct {
- struct parman_prio parman_prio;
- struct parman_item parman_item;
- struct mlxsw_sp_acl_rule_info *rulei;
- } catchall;
+ bool tmplt_elusage_set;
+ struct mlxsw_afk_element_usage tmplt_elusage;
};
struct mlxsw_sp_acl_tcam_chunk {
struct list_head list; /* Member of a TCAM region */
struct rhash_head ht_node; /* Member of a chunk HT */
unsigned int priority; /* Priority within the region and group */
- struct parman_prio parman_prio;
struct mlxsw_sp_acl_tcam_group *group;
struct mlxsw_sp_acl_tcam_region *region;
unsigned int ref_count;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
struct mlxsw_sp_acl_tcam_entry {
- struct parman_item parman_item;
struct mlxsw_sp_acl_tcam_chunk *chunk;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
@@ -216,13 +205,19 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_sp_acl_tcam_group *group,
const struct mlxsw_sp_acl_tcam_pattern *patterns,
- unsigned int patterns_count)
+ unsigned int patterns_count,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
int err;
group->tcam = tcam;
group->patterns = patterns;
group->patterns_count = patterns_count;
+ if (tmplt_elusage) {
+ group->tmplt_elusage_set = true;
+ memcpy(&group->tmplt_elusage, tmplt_elusage,
+ sizeof(group->tmplt_elusage));
+ }
INIT_LIST_HEAD(&group->region_list);
err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
if (err)
@@ -431,6 +426,15 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
const struct mlxsw_sp_acl_tcam_pattern *pattern;
int i;
+ /* In case the template is set, we don't have to look up the pattern
+ * and just use the template.
+ */
+ if (group->tmplt_elusage_set) {
+ memcpy(out, &group->tmplt_elusage, sizeof(*out));
+ WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
+ return;
+ }
+
for (i = 0; i < group->patterns_count; i++) {
pattern = &group->patterns[i];
mlxsw_afk_element_usage_fill(out, pattern->elements,
@@ -441,9 +445,6 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
memcpy(out, elusage, sizeof(*out));
}
-#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
-
static int
mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
@@ -455,6 +456,7 @@ mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
+ region->key_type,
MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
region->id, region->tcam_region_info);
encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
@@ -477,24 +479,13 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
{
char ptar_pl[MLXSW_REG_PTAR_LEN];
- mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
+ region->key_type, 0, region->id,
region->tcam_region_info);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
}
static int
-mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- u16 new_size)
-{
- char ptar_pl[MLXSW_REG_PTAR_LEN];
-
- mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
- new_size, region->id, region->tcam_region_info);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
-}
-
-static int
mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
@@ -516,193 +507,22 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
}
-static int
-mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- char *act_set;
- char *mask;
- char *key;
-
- mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset);
- key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
- mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
- mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
-
- /* Only the first action set belongs here, the rest is in KVD */
- act_set = mlxsw_afa_block_first_set(rulei->act_block);
- mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
-
- mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static int
-mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset,
- bool *activity)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- int err;
-
- mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
- region->tcam_region_info, offset);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
- if (err)
- return err;
- *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
- return 0;
-}
-
-#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
-
-static int
-mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
-{
- struct parman_prio *parman_prio = &region->catchall.parman_prio;
- struct parman_item *parman_item = &region->catchall.parman_item;
- struct mlxsw_sp_acl_rule_info *rulei;
- int err;
-
- parman_prio_init(region->parman, parman_prio,
- MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
- err = parman_item_add(region->parman, parman_prio, parman_item);
- if (err)
- goto err_parman_item_add;
-
- rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
- if (IS_ERR(rulei)) {
- err = PTR_ERR(rulei);
- goto err_rulei_create;
- }
-
- err = mlxsw_sp_acl_rulei_act_continue(rulei);
- if (WARN_ON(err))
- goto err_rulei_act_continue;
-
- err = mlxsw_sp_acl_rulei_commit(rulei);
- if (err)
- goto err_rulei_commit;
-
- err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
- parman_item->index, rulei);
- region->catchall.rulei = rulei;
- if (err)
- goto err_rule_insert;
-
- return 0;
-
-err_rule_insert:
-err_rulei_commit:
-err_rulei_act_continue:
- mlxsw_sp_acl_rulei_destroy(rulei);
-err_rulei_create:
- parman_item_remove(region->parman, parman_prio, parman_item);
-err_parman_item_add:
- parman_prio_fini(parman_prio);
- return err;
-}
-
-static void
-mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
-{
- struct parman_prio *parman_prio = &region->catchall.parman_prio;
- struct parman_item *parman_item = &region->catchall.parman_item;
- struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
-
- mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
- parman_item->index);
- mlxsw_sp_acl_rulei_destroy(rulei);
- parman_item_remove(region->parman, parman_prio, parman_item);
- parman_prio_fini(parman_prio);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- u16 src_offset, u16 dst_offset, u16 size)
-{
- char prcr_pl[MLXSW_REG_PRCR_LEN];
-
- mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
- region->tcam_region_info, src_offset,
- region->tcam_region_info, dst_offset, size);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
-}
-
-static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
- unsigned long new_count)
-{
- struct mlxsw_sp_acl_tcam_region *region = priv;
- struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
- u64 max_tcam_rules;
-
- max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
- if (new_count > max_tcam_rules)
- return -EINVAL;
- return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
-}
-
-static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
- unsigned long from_index,
- unsigned long to_index,
- unsigned long count)
-{
- struct mlxsw_sp_acl_tcam_region *region = priv;
- struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
-
- mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
- from_index, to_index, count);
-}
-
-static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
- .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
- .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
- .resize = mlxsw_sp_acl_tcam_region_parman_resize,
- .move = mlxsw_sp_acl_tcam_region_parman_move,
- .algo = PARMAN_ALGO_TYPE_LSORT,
-};
-
static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_afk_element_usage *elusage)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_tcam_region *region;
int err;
- region = kzalloc(sizeof(*region), GFP_KERNEL);
+ region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
if (!region)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&region->chunk_list);
region->mlxsw_sp = mlxsw_sp;
- region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
- region);
- if (!region->parman) {
- err = -ENOMEM;
- goto err_parman_create;
- }
-
region->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(region->key_info)) {
err = PTR_ERR(region->key_info);
@@ -713,6 +533,11 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_region_id_get;
+ err = ops->region_associate(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_associate;
+
+ region->key_type = ops->key_type;
err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
if (err)
goto err_tcam_region_alloc;
@@ -721,23 +546,22 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_tcam_region_enable;
- err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
+ err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
if (err)
- goto err_tcam_region_catchall_add;
+ goto err_tcam_region_init;
return region;
-err_tcam_region_catchall_add:
+err_tcam_region_init:
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
err_tcam_region_enable:
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
err_tcam_region_alloc:
+err_tcam_region_associate:
mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
err_region_id_get:
mlxsw_afk_key_info_put(region->key_info);
err_key_info_get:
- parman_destroy(region->parman);
-err_parman_create:
kfree(region);
return ERR_PTR(err);
}
@@ -746,12 +570,13 @@ static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
- mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->region_fini(mlxsw_sp, region->priv);
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
mlxsw_afk_key_info_put(region->key_info);
- parman_destroy(region->parman);
kfree(region);
}
@@ -826,13 +651,14 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
int err;
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
return ERR_PTR(-EINVAL);
- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
if (!chunk)
return ERR_PTR(-ENOMEM);
chunk->priority = priority;
@@ -844,7 +670,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_chunk_assoc;
- parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
+ ops->chunk_init(chunk->region->priv, chunk->priv, priority);
err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
@@ -854,7 +680,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
return chunk;
err_rhashtable_insert:
- parman_prio_fini(&chunk->parman_prio);
+ ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
err_chunk_assoc:
kfree(chunk);
@@ -865,11 +691,12 @@ static void
mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_chunk *chunk)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_group *group = chunk->group;
rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
- parman_prio_fini(&chunk->parman_prio);
+ ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
kfree(chunk);
}
@@ -903,11 +730,19 @@ static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
}
+static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->entry_priv_size;
+}
+
static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_entry *entry,
struct mlxsw_sp_acl_rule_info *rulei)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
struct mlxsw_sp_acl_tcam_region *region;
int err;
@@ -918,24 +753,16 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(chunk);
region = chunk->region;
- err = parman_item_add(region->parman, &chunk->parman_prio,
- &entry->parman_item);
- if (err)
- goto err_parman_item_add;
- err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
- entry->parman_item.index,
- rulei);
+ err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
+ entry->priv, rulei);
if (err)
- goto err_rule_insert;
+ goto err_entry_add;
entry->chunk = chunk;
return 0;
-err_rule_insert:
- parman_item_remove(region->parman, &chunk->parman_prio,
- &entry->parman_item);
-err_parman_item_add:
+err_entry_add:
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
return err;
}
@@ -943,13 +770,11 @@ err_parman_item_add:
static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
- entry->parman_item.index);
- parman_item_remove(region->parman, &chunk->parman_prio,
- &entry->parman_item);
+ ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
}
@@ -958,22 +783,24 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry,
bool *activity)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
- entry->parman_item.index,
- activity);
+ return ops->entry_activity_get(mlxsw_sp, region->priv,
+ entry->priv, activity);
}
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
- MLXSW_AFK_ELEMENT_DMAC,
- MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP4,
- MLXSW_AFK_ELEMENT_DST_IP4,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
@@ -987,10 +814,14 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- MLXSW_AFK_ELEMENT_DST_IP6_HI,
- MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
};
@@ -1019,14 +850,16 @@ struct mlxsw_sp_acl_tcam_flower_rule {
static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
- void *priv, void *ruleset_priv)
+ struct mlxsw_sp_acl_tcam *tcam,
+ void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- struct mlxsw_sp_acl_tcam *tcam = priv;
return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
mlxsw_sp_acl_tcam_patterns,
- MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
+ tmplt_elusage);
}
static void
@@ -1070,6 +903,12 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
}
+static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
+ mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
+}
+
static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
@@ -1107,7 +946,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
.ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
.ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
- .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
+ .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
@@ -1118,7 +957,7 @@ mlxsw_sp_acl_tcam_profile_ops_arr[] = {
[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
};
-static const struct mlxsw_sp_acl_profile_ops *
+const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_acl_profile profile)
{
@@ -1131,10 +970,3 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
return NULL;
return ops;
}
-
-const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
- .priv_size = sizeof(struct mlxsw_sp_acl_tcam),
- .init = mlxsw_sp_acl_tcam_init,
- .fini = mlxsw_sp_acl_tcam_fini,
- .profile_ops = mlxsw_sp_acl_tcam_profile_ops,
-};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
new file mode 100644
index 000000000000..219a4e26c332
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H
+#define _MLXSW_SPECTRUM_ACL_TCAM_H
+
+#include <linux/list.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_sp_acl_tcam {
+ unsigned long *used_regions; /* bit array */
+ unsigned int max_regions;
+ unsigned long *used_groups; /* bit array */
+ unsigned int max_groups;
+ unsigned int max_group_size;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority);
+
+struct mlxsw_sp_acl_profile_ops {
+ size_t ruleset_priv_size;
+ int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage);
+ void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ u16 (*ruleset_group_id)(void *ruleset_priv);
+ size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp);
+ int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+ bool *activity);
+};
+
+const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_acl_profile profile);
+
+#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
+
+#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
+
+#define MLXSW_SP_ACL_TCAM_MASK_LEN \
+ (MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE)
+
+struct mlxsw_sp_acl_tcam_group;
+
+struct mlxsw_sp_acl_tcam_region {
+ struct list_head list; /* Member of a TCAM group */
+ struct list_head chunk_list; /* List of chunks under this region */
+ struct mlxsw_sp_acl_tcam_group *group;
+ enum mlxsw_reg_ptar_key_type key_type;
+ u16 id; /* ACL ID and region ID - they are same */
+ char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
+ struct mlxsw_afk_key_info *key_info;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_ctcam_region {
+ struct parman *parman;
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp_acl_ctcam_chunk {
+ struct parman_prio parman_prio;
+};
+
+struct mlxsw_sp_acl_ctcam_entry {
+ struct parman_item parman_item;
+};
+
+struct mlxsw_sp_acl_ctcam_region_ops {
+ int (*entry_insert)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask);
+ void (*entry_remove)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+};
+
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion);
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority);
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk);
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority);
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+static inline unsigned int
+mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return centry->parman_item.index;
+}
+
+enum mlxsw_sp_acl_atcam_region_type {
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB,
+ __MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX,
+};
+
+#define MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX \
+ (__MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX - 1)
+
+struct mlxsw_sp_acl_atcam {
+ struct mlxsw_sp_acl_erp_core *erp_core;
+};
+
+struct mlxsw_sp_acl_atcam_region {
+ struct rhashtable entries_ht; /* A-TCAM only */
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ const struct mlxsw_sp_acl_atcam_region_ops *ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct mlxsw_sp_acl_atcam *atcam;
+ enum mlxsw_sp_acl_atcam_region_type type;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ void *priv;
+};
+
+struct mlxsw_sp_acl_atcam_entry_ht_key {
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp_acl_atcam_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ struct mlxsw_sp_acl_erp *erp;
+};
+
+static inline struct mlxsw_sp_acl_atcam_region *
+mlxsw_sp_acl_tcam_cregion_aregion(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ return container_of(cregion, struct mlxsw_sp_acl_atcam_region, cregion);
+}
+
+static inline struct mlxsw_sp_acl_atcam_entry *
+mlxsw_sp_acl_tcam_centry_aentry(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return container_of(centry, struct mlxsw_sp_acl_atcam_entry, centry);
+}
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id);
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority);
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk);
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+
+struct mlxsw_sp_acl_erp;
+
+bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
+u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
+struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp *erp);
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 0a9adc5962fb..4327487553c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 0f46775e0307..83c2e1e5f216 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
index fd34d0a01073..81465e267b10 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_CNT_H
#define _MLXSW_SPECTRUM_CNT_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index b6ed7f7c531e..b25048c6c761 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/netdevice.h>
#include <linux/string.h>
@@ -255,6 +224,270 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
return 0;
}
+static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
+ struct dcb_app *app)
+{
+ int prio;
+
+ if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
+ netdev_err(dev, "APP entry with priority value %u is invalid\n",
+ app->priority);
+ return -EINVAL;
+ }
+
+ switch (app->selector) {
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol >= 64) {
+ netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
+ app->protocol);
+ return -EINVAL;
+ }
+
+ /* Warn about any DSCP APP entries with the same PID. */
+ prio = fls(dcb_ieee_getapp_mask(dev, app));
+ if (prio--) {
+ if (prio < app->priority)
+ netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n",
+ app->priority, app->protocol, prio);
+ else if (prio > app->priority)
+ netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n",
+ app->priority, app->protocol, prio);
+ }
+ break;
+
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ if (app->protocol) {
+ netdev_err(dev, "EtherType APP entries with protocol value != 0 not supported\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ netdev_err(dev, "APP entries with selector %u not supported\n",
+ app->selector);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8
+mlxsw_sp_port_dcb_app_default_prio(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ u8 prio_mask;
+
+ prio_mask = dcb_ieee_getapp_default_prio_mask(mlxsw_sp_port->dev);
+ if (prio_mask)
+ /* Take the highest configured priority. */
+ return fls(prio_mask) - 1;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_port_dcb_app_dscp_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 default_prio,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ int i;
+
+ dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i])
+ map->map[i] = fls(map->map[i]) - 1;
+ else
+ map->map[i] = default_prio;
+ }
+}
+
+static bool
+mlxsw_sp_port_dcb_app_prio_dscp_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ bool have_dscp = false;
+ int i;
+
+ dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i]) {
+ map->map[i] = fls64(map->map[i]) - 1;
+ have_dscp = true;
+ }
+ }
+
+ return have_dscp;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpts(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpts_pl[MLXSW_REG_QPTS_LEN];
+
+ mlxsw_reg_qpts_pack(qpts_pl, mlxsw_sp_port->local_port, ts);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpts), qpts_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qrwe(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool rewrite_dscp)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qrwe_pl[MLXSW_REG_QRWE_LEN];
+
+ mlxsw_reg_qrwe_pack(qrwe_pl, mlxsw_sp_port->local_port,
+ false, rewrite_dscp);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qrwe), qrwe_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_toggle_trust(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ bool rewrite_dscp = ts == MLXSW_REG_QPTS_TRUST_STATE_DSCP;
+ int err;
+
+ if (mlxsw_sp_port->dcb.trust_state == ts)
+ return 0;
+
+ err = mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, ts);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update_qrwe(mlxsw_sp_port, rewrite_dscp);
+ if (err)
+ goto err_update_qrwe;
+
+ mlxsw_sp_port->dcb.trust_state = ts;
+ return 0;
+
+err_update_qrwe:
+ mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port,
+ mlxsw_sp_port->dcb.trust_state);
+ return err;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdpm_pl[MLXSW_REG_QPDPM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdpm_pack(qpdpm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdpm), qpdpm_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdsm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdsm_pl[MLXSW_REG_QPDSM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdsm_pack(qpdsm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdsm), qpdsm_pl);
+}
+
+static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct dcb_ieee_app_prio_map prio_map;
+ struct dcb_ieee_app_dscp_map dscp_map;
+ u8 default_prio;
+ bool have_dscp;
+ int err;
+
+ default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port);
+ have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
+ &prio_map);
+
+ if (!have_dscp) {
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_PCP);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+ return err;
+ }
+
+ mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
+ &dscp_map);
+ err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
+ &dscp_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure priority map\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_app_update_qpdsm(mlxsw_sp_port,
+ &prio_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure DSCP rewrite map\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP);
+ if (err) {
+ /* A failure to set trust DSCP means that the QPDPM and QPDSM
+ * maps installed above are not in effect. And since we are here
+ * attempting to set trust DSCP, we couldn't have attempted to
+ * switch trust to PCP. Thus no cleanup is necessary.
+ */
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L3\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_dcbnl_app_validate(dev, app);
+ if (err)
+ return err;
+
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ goto err_update;
+
+ return 0;
+
+err_update:
+ dcb_ieee_delapp(dev, app);
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to update DCB APP configuration\n");
+ return 0;
+}
+
static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
struct ieee_maxrate *maxrate)
{
@@ -394,6 +627,8 @@ static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc,
.ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc,
+ .ieee_setapp = mlxsw_sp_dcbnl_ieee_setapp,
+ .ieee_delapp = mlxsw_sp_dcbnl_ieee_delapp,
.getdcbx = mlxsw_sp_dcbnl_getdcbx,
.setdcbx = mlxsw_sp_dcbnl_setdcbx,
@@ -467,6 +702,7 @@ int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
if (err)
goto err_port_pfc_init;
+ mlxsw_sp_port->dcb.trust_state = MLXSW_REG_QPTS_TRUST_STATE_PCP;
mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index f56fa18d6b26..41e607a14846 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arakdis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <net/devlink.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
index 815d543cf114..e689576231ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_PIPELINE_H_
#define _MLXSW_PIPELINE_H_
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 54262af4e98f..715d24ff937e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 89dbf569dff5..ebd1b24ebaa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -48,7 +17,8 @@
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
- struct tcf_exts *exts)
+ struct tcf_exts *exts,
+ struct netlink_ext_ack *extack)
{
const struct tc_action *a;
LIST_HEAD(actions);
@@ -58,7 +28,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return 0;
/* Count action is inserted first */
- err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
if (err)
return err;
@@ -66,16 +36,22 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_ok(a)) {
err = mlxsw_sp_acl_rulei_act_terminate(rulei);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
return err;
+ }
} else if (is_tcf_gact_shot(a)) {
err = mlxsw_sp_acl_rulei_act_drop(rulei);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
+ }
} else if (is_tcf_gact_trap(a)) {
err = mlxsw_sp_acl_rulei_act_trap(rulei);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
return err;
+ }
} else if (is_tcf_gact_goto_chain(a)) {
u32 chain_index = tcf_gact_goto_chain_index(a);
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -89,8 +65,10 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
return err;
+ }
} else if (is_tcf_mirred_egress_redirect(a)) {
struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
@@ -99,20 +77,21 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
- fid_index);
+ fid_index, extack);
if (err)
return err;
out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
- out_dev);
+ out_dev, extack);
if (err)
return err;
} else if (is_tcf_mirred_egress_mirror(a)) {
struct net_device *out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
- block, out_dev);
+ block, out_dev,
+ extack);
if (err)
return err;
} else if (is_tcf_vlan(a)) {
@@ -123,8 +102,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
action, vid,
- proto, prio);
+ proto, prio, extack);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
return -EOPNOTSUPP;
}
@@ -144,10 +124,12 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->mask);
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
- ntohl(key->src), ntohl(mask->src));
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
- ntohl(key->dst), ntohl(mask->dst));
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ (char *) &key->src,
+ (char *) &mask->src, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ (char *) &key->dst,
+ (char *) &mask->dst, 4);
}
static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
@@ -161,24 +143,31 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->mask);
- size_t addr_half_size = sizeof(key->src) / 2;
-
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- &key->src.s6_addr[0],
- &mask->src.s6_addr[0],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- &key->src.s6_addr[addr_half_size],
- &mask->src.s6_addr[addr_half_size],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
- &key->dst.s6_addr[0],
- &mask->dst.s6_addr[0],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
- &key->dst.s6_addr[addr_half_size],
- &mask->dst.s6_addr[addr_half_size],
- addr_half_size);
+
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ &key->src.s6_addr[0x0],
+ &mask->src.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ &key->src.s6_addr[0x4],
+ &mask->src.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ &key->src.s6_addr[0x8],
+ &mask->src.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ &key->src.s6_addr[0xC],
+ &mask->src.s6_addr[0xC], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ &key->dst.s6_addr[0x0],
+ &mask->dst.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ &key->dst.s6_addr[0x4],
+ &mask->dst.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ &key->dst.s6_addr[0x8],
+ &mask->dst.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ &key->dst.s6_addr[0xC],
+ &mask->dst.s6_addr[0xC], 4);
}
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
@@ -192,6 +181,7 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
return 0;
if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
return -EINVAL;
}
@@ -220,6 +210,7 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
return 0;
if (ip_proto != IPPROTO_TCP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
return -EINVAL;
}
@@ -246,6 +237,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
return 0;
if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
return -EINVAL;
}
@@ -290,6 +282,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
return -EOPNOTSUPP;
}
@@ -340,13 +333,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
f->mask);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
- MLXSW_AFK_ELEMENT_DMAC,
- key->dst, mask->dst,
- sizeof(key->dst));
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ key->dst, mask->dst, 2);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
- MLXSW_AFK_ELEMENT_SMAC,
- key->src, mask->src,
- sizeof(key->src));
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ key->dst + 2, mask->dst + 2, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ key->src, mask->src, 2);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
+ key->src + 2, mask->src + 2, 4);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -358,6 +355,11 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
+
+ if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
+ return -EOPNOTSUPP;
+ }
if (mask->vlan_id != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_VID,
@@ -387,7 +389,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
+ f->common.extack);
}
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
@@ -401,11 +404,12 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
- MLXSW_SP_ACL_PROFILE_FLOWER);
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
- rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
+ f->common.extack);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto err_rule_create;
@@ -445,7 +449,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
- MLXSW_SP_ACL_PROFILE_FLOWER);
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
if (IS_ERR(ruleset))
return;
@@ -471,7 +475,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
- MLXSW_SP_ACL_PROFILE_FLOWER);
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
@@ -493,3 +497,40 @@ err_rule_get_stats:
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return err;
}
+
+int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule_info rulei;
+ int err;
+
+ memset(&rulei, 0, sizeof(rulei));
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
+ if (err)
+ return err;
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+ &rulei.values.elusage);
+
+ /* keep the reference to the ruleset */
+ return PTR_ERR_OR_ZERO(ruleset);
+}
+
+void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
+ if (IS_ERR(ruleset))
+ return;
+ /* put the reference to the ruleset kept in create */
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 98d896c14b87..00db26c96bf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
- * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 6909d867bb59..bb5c4d4a5872 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
- * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_IPIP_H_
#define _MLXSW_IPIP_H_
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index fe4327f547d2..1e4cdee7bcd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -1,454 +1,75 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
-#include <linux/bitops.h>
+#include <linux/slab.h>
#include "spectrum.h"
-#define MLXSW_SP_KVDL_SINGLE_BASE 0
-#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
-#define MLXSW_SP_KVDL_SINGLE_END \
- (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1)
-
-#define MLXSW_SP_KVDL_CHUNKS_BASE \
- (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
-#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152
-#define MLXSW_SP_KVDL_CHUNKS_END \
- (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \
- (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \
- (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
- (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1
-#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
-
-struct mlxsw_sp_kvdl_part_info {
- unsigned int part_index;
- unsigned int start_index;
- unsigned int end_index;
- unsigned int alloc_size;
- enum mlxsw_sp_resource_id resource_id;
-};
-
-enum mlxsw_sp_kvdl_part_id {
- MLXSW_SP_KVDL_PART_ID_SINGLE,
- MLXSW_SP_KVDL_PART_ID_CHUNKS,
- MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS,
-};
-
-#define MLXSW_SP_KVDL_PART_INFO(id) \
-[MLXSW_SP_KVDL_PART_ID_##id] = { \
- .start_index = MLXSW_SP_KVDL_##id##_BASE, \
- .end_index = MLXSW_SP_KVDL_##id##_END, \
- .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \
- .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
-}
-
-static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = {
- MLXSW_SP_KVDL_PART_INFO(SINGLE),
- MLXSW_SP_KVDL_PART_INFO(CHUNKS),
- MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS),
-};
-
-#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info)
-
-struct mlxsw_sp_kvdl_part {
- struct mlxsw_sp_kvdl_part_info info;
- unsigned long usage[0]; /* Entries */
-};
-
struct mlxsw_sp_kvdl {
- struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN];
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
- unsigned int alloc_size)
-{
- struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- part = kvdl->parts[i];
- if (alloc_size <= part->info.alloc_size &&
- (!min_part ||
- part->info.alloc_size <= min_part->info.alloc_size))
- min_part = part;
- }
-
- return min_part ?: ERR_PTR(-ENOBUFS);
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
-{
- struct mlxsw_sp_kvdl_part *part;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- part = kvdl->parts[i];
- if (kvdl_index >= part->info.start_index &&
- kvdl_index <= part->info.end_index)
- return part;
- }
-
- return ERR_PTR(-EINVAL);
-}
-
-static u32
-mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info,
- unsigned int entry_index)
-{
- return info->start_index + entry_index * info->alloc_size;
-}
-
-static unsigned int
-mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
- u32 kvdl_index)
-{
- return (kvdl_index - info->start_index) / info->alloc_size;
-}
-
-static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
- u32 *p_kvdl_index)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int entry_index, nr_entries;
-
- nr_entries = (info->end_index - info->start_index + 1) /
- info->alloc_size;
- entry_index = find_first_zero_bit(part->usage, nr_entries);
- if (entry_index == nr_entries)
- return -ENOBUFS;
- __set_bit(entry_index, part->usage);
-
- *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index);
-
- return 0;
-}
-
-static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
- u32 kvdl_index)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int entry_index;
-
- entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index);
- __clear_bit(entry_index, part->usage);
-}
-
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
- u32 *p_entry_index)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- /* Find partition with smallest allocation size satisfying the
- * requested size.
- */
- part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
- if (IS_ERR(part))
- return PTR_ERR(part);
-
- return mlxsw_sp_kvdl_part_alloc(part, p_entry_index);
-}
-
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index);
- if (IS_ERR(part))
- return;
- mlxsw_sp_kvdl_part_free(part, entry_index);
-}
-
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
- unsigned int entry_count,
- unsigned int *p_alloc_size)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
- if (IS_ERR(part))
- return PTR_ERR(part);
-
- *p_alloc_size = part->info.alloc_size;
-
- return 0;
-}
-
-static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part,
- struct mlxsw_sp_kvdl_part *part_prev,
- unsigned int size)
-{
-
- if (!part_prev) {
- part->info.end_index = size - 1;
- } else {
- part->info.start_index = part_prev->info.end_index + 1;
- part->info.end_index = part->info.start_index + size - 1;
- }
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_kvdl_part_info *info,
- struct mlxsw_sp_kvdl_part *part_prev)
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- struct mlxsw_sp_kvdl_part *part;
- bool need_update = true;
- unsigned int nr_entries;
- size_t usage_size;
- u64 resource_size;
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops;
+ struct mlxsw_sp_kvdl *kvdl;
int err;
- err = devlink_resource_size_get(devlink, info->resource_id,
- &resource_size);
- if (err) {
- need_update = false;
- resource_size = info->end_index - info->start_index + 1;
- }
-
- nr_entries = div_u64(resource_size, info->alloc_size);
- usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
- part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
- if (!part)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&part->info, info, sizeof(part->info));
-
- if (need_update)
- mlxsw_sp_kvdl_part_update(part, part_prev, resource_size);
- return part;
-}
-
-static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part)
-{
- kfree(part);
-}
-
-static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- const struct mlxsw_sp_kvdl_part_info *info;
- struct mlxsw_sp_kvdl_part *part_prev = NULL;
- int err, i;
+ kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size,
+ GFP_KERNEL);
+ if (!kvdl)
+ return -ENOMEM;
+ kvdl->kvdl_ops = kvdl_ops;
+ mlxsw_sp->kvdl = kvdl;
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- info = &mlxsw_sp_kvdl_parts_info[i];
- kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info,
- part_prev);
- if (IS_ERR(kvdl->parts[i])) {
- err = PTR_ERR(kvdl->parts[i]);
- goto err_kvdl_part_init;
- }
- part_prev = kvdl->parts[i];
- }
+ err = kvdl_ops->init(mlxsw_sp, kvdl->priv);
+ if (err)
+ goto err_init;
return 0;
-err_kvdl_part_init:
- for (i--; i >= 0; i--)
- mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
+err_init:
+ kfree(kvdl);
return err;
}
-static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
- mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
-}
-
-static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int nr_entries;
- int bit = -1;
- u64 occ = 0;
-
- nr_entries = (info->end_index -
- info->start_index + 1) /
- info->alloc_size;
- while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
- < nr_entries)
- occ += info->alloc_size;
- return occ;
-}
-
-static u64 mlxsw_sp_kvdl_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- u64 occ = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
- occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]);
-
- return occ;
-}
-
-static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
- return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
- return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
- return mlxsw_sp_kvdl_part_occ(part);
+ kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+ kfree(kvdl);
}
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- static struct devlink_resource_size_params size_params;
- u32 kvdl_max_size;
- int err;
-
- kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
- MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
- MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
-
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
- MLXSW_SP_KVDL_SINGLE_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- if (err)
- return err;
-
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
- MLXSW_SP_KVDL_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- if (err)
- return err;
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- return err;
+ return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_entry_index);
}
-int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- struct mlxsw_sp_kvdl *kvdl;
- int err;
-
- kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL);
- if (!kvdl)
- return -ENOMEM;
- mlxsw_sp->kvdl = kvdl;
-
- err = mlxsw_sp_kvdl_parts_init(mlxsw_sp);
- if (err)
- goto err_kvdl_parts_init;
-
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- mlxsw_sp_kvdl_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- mlxsw_sp_kvdl_single_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- mlxsw_sp_kvdl_chunks_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- mlxsw_sp_kvdl_large_chunks_occ_get,
- mlxsw_sp);
-
- return 0;
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
-err_kvdl_parts_init:
- kfree(mlxsw_sp->kvdl);
- return err;
+ kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
+ entry_count, entry_index);
}
-void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR);
- mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
- kfree(mlxsw_sp->kvdl);
+ return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_alloc_count);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index a82539609d49..54275624718b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/rhashtable.h>
#include <net/ipv6.h>
@@ -1075,6 +1044,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
cancel_delayed_work_sync(&mr->stats_update_dw);
- mr->mr_ops->fini(mr->priv);
+ mr->mr_ops->fini(mlxsw_sp, mr->priv);
kfree(mr);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
index 7c864a86811d..3cde3671fe35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_MCROUTER_H
#define _MLXSW_SPECTRUM_MCROUTER_H
@@ -46,15 +15,6 @@ enum mlxsw_sp_mr_route_action {
MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD,
};
-enum mlxsw_sp_mr_route_prio {
- MLXSW_SP_MR_ROUTE_PRIO_SG,
- MLXSW_SP_MR_ROUTE_PRIO_STARG,
- MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
- __MLXSW_SP_MR_ROUTE_PRIO_MAX
-};
-
-#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
-
struct mlxsw_sp_mr_route_key {
int vrid;
enum mlxsw_sp_l3proto proto;
@@ -101,7 +61,7 @@ struct mlxsw_sp_mr_ops {
u16 erif_index);
void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv);
- void (*fini)(void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
};
struct mlxsw_sp_mr;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 4f4c0d311883..346f4a5fe053 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -1,41 +1,9 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
-#include <linux/parman.h>
#include "spectrum_mr_tcam.h"
#include "reg.h"
@@ -43,15 +11,8 @@
#include "core_acl_flex_actions.h"
#include "spectrum_mr.h"
-struct mlxsw_sp_mr_tcam_region {
- struct mlxsw_sp *mlxsw_sp;
- enum mlxsw_reg_rtar_key_type rtar_key_type;
- struct parman *parman;
- struct parman_prio *parman_prios;
-};
-
struct mlxsw_sp_mr_tcam {
- struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+ void *priv;
};
/* This struct maps to one RIGR2 register entry */
@@ -84,8 +45,6 @@ mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
INIT_LIST_HEAD(&erif_list->erif_sublists);
}
-#define MLXSW_SP_KVDL_RIGR2_SIZE 1
-
static struct mlxsw_sp_mr_erif_sublist *
mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list)
@@ -96,8 +55,8 @@ mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
if (!erif_sublist)
return ERR_PTR(-ENOMEM);
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
- &erif_sublist->rigr2_kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, &erif_sublist->rigr2_kvdl_index);
if (err) {
kfree(erif_sublist);
return ERR_PTR(err);
@@ -112,7 +71,8 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_erif_sublist *erif_sublist)
{
list_del(&erif_sublist->list);
- mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, erif_sublist->rigr2_kvdl_index);
kfree(erif_sublist);
}
@@ -221,12 +181,11 @@ struct mlxsw_sp_mr_tcam_route {
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
u32 counter_index;
- struct parman_item parman_item;
- struct parman_prio *parman_prio;
enum mlxsw_sp_mr_route_action action;
struct mlxsw_sp_mr_route_key key;
u16 irif_index;
u16 min_mtu;
+ void *priv;
};
static struct mlxsw_afa_block *
@@ -297,60 +256,6 @@ mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
mlxsw_afa_block_destroy(afa_block);
}
-static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
- struct parman_item *parman_item,
- struct mlxsw_sp_mr_route_key *key,
- struct mlxsw_afa_block *afa_block)
-{
- char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
- switch (key->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
- key->vrid,
- MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
- ntohl(key->group.addr4),
- ntohl(key->group_mask.addr4),
- ntohl(key->source.addr4),
- ntohl(key->source_mask.addr4),
- mlxsw_afa_block_first_set(afa_block));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
- key->vrid,
- MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
- key->group.addr6,
- key->group_mask.addr6,
- key->source.addr6,
- key->source_mask.addr6,
- mlxsw_afa_block_first_set(afa_block));
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
-static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
- struct mlxsw_sp_mr_route_key *key,
- struct parman_item *parman_item)
-{
- struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
- char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
- switch (key->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
- vrid, 0, 0, 0, 0, 0, 0, NULL);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
- vrid, 0, 0, zero_addr, zero_addr,
- zero_addr, zero_addr, NULL);
- break;
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
static int
mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list,
@@ -370,51 +275,12 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static struct mlxsw_sp_mr_tcam_region *
-mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
- enum mlxsw_sp_l3proto proto)
-{
- return &mr_tcam->tcam_regions[proto];
-}
-
-static int
-mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
- struct mlxsw_sp_mr_tcam_route *route,
- enum mlxsw_sp_mr_route_prio prio)
-{
- struct mlxsw_sp_mr_tcam_region *tcam_region;
- int err;
-
- tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
- route->key.proto);
- err = parman_item_add(tcam_region->parman,
- &tcam_region->parman_prios[prio],
- &route->parman_item);
- if (err)
- return err;
-
- route->parman_prio = &tcam_region->parman_prios[prio];
- return 0;
-}
-
-static void
-mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
- struct mlxsw_sp_mr_tcam_route *route)
-{
- struct mlxsw_sp_mr_tcam_region *tcam_region;
-
- tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
- route->key.proto);
-
- parman_item_remove(tcam_region->parman,
- route->parman_prio, &route->parman_item);
-}
-
static int
mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_params *route_params)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
int err;
@@ -448,22 +314,23 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
goto err_afa_block_create;
}
- /* Allocate place in the TCAM */
- err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
- route_params->prio);
- if (err)
- goto err_parman_item_add;
+ route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
+ if (!route->priv) {
+ err = -ENOMEM;
+ goto err_route_priv_alloc;
+ }
/* Write the route to the TCAM */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, route->afa_block);
+ err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
+ &route->key, route->afa_block,
+ route_params->prio);
if (err)
- goto err_route_replace;
+ goto err_route_create;
return 0;
-err_route_replace:
- mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
-err_parman_item_add:
+err_route_create:
+ kfree(route->priv);
+err_route_priv_alloc:
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
err_afa_block_create:
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
@@ -476,12 +343,12 @@ err_counter_alloc:
static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
void *priv, void *route_priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
- &route->key, &route->parman_item);
- mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
+ ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
+ kfree(route->priv);
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
@@ -502,6 +369,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv,
enum mlxsw_sp_mr_route_action route_action)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
@@ -516,8 +384,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
@@ -534,6 +401,7 @@ err:
static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 min_mtu)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
@@ -549,8 +417,7 @@ static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
@@ -596,6 +463,7 @@ static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 erif_index)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_erif_sublist *erif_sublist;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
@@ -630,8 +498,7 @@ static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
}
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
@@ -653,6 +520,7 @@ static int
mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
struct mlxsw_sp_mr_route_info *route_info)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
@@ -677,8 +545,7 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
}
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
@@ -699,167 +566,36 @@ err_erif_populate:
return err;
}
-#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
-
-static int
-mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
-
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
- mr_tcam_region->rtar_key_type,
- MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void
-mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
-
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
- mr_tcam_region->rtar_key_type, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
- unsigned long new_count)
-{
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
- u64 max_tcam_rules;
-
- max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
- if (new_count > max_tcam_rules)
- return -EINVAL;
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
- mr_tcam_region->rtar_key_type, new_count);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
- unsigned long from_index,
- unsigned long to_index,
- unsigned long count)
-{
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rrcr_pl[MLXSW_REG_RRCR_LEN];
-
- mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
- from_index, count,
- mr_tcam_region->rtar_key_type, to_index);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
-}
-
-static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
- .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
- .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
- .resize = mlxsw_sp_mr_tcam_region_parman_resize,
- .move = mlxsw_sp_mr_tcam_region_parman_move,
- .algo = PARMAN_ALGO_TYPE_LSORT,
-};
-
-static int
-mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
- enum mlxsw_reg_rtar_key_type rtar_key_type)
-{
- struct parman_prio *parman_prios;
- struct parman *parman;
- int err;
- int i;
-
- mr_tcam_region->rtar_key_type = rtar_key_type;
- mr_tcam_region->mlxsw_sp = mlxsw_sp;
-
- err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
- if (err)
- return err;
-
- parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
- mr_tcam_region);
- if (!parman) {
- err = -ENOMEM;
- goto err_parman_create;
- }
- mr_tcam_region->parman = parman;
-
- parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
- sizeof(*parman_prios), GFP_KERNEL);
- if (!parman_prios) {
- err = -ENOMEM;
- goto err_parman_prios_alloc;
- }
- mr_tcam_region->parman_prios = parman_prios;
-
- for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
- parman_prio_init(mr_tcam_region->parman,
- &mr_tcam_region->parman_prios[i], i);
- return 0;
-
-err_parman_prios_alloc:
- parman_destroy(parman);
-err_parman_create:
- mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
- return err;
-}
-
-static void
-mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- int i;
-
- for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
- parman_prio_fini(&mr_tcam_region->parman_prios[i]);
- kfree(mr_tcam_region->parman_prios);
- parman_destroy(mr_tcam_region->parman);
- mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
-}
-
static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- u32 rtar_key;
int err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
- !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
return -EIO;
- rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
- err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &region[MLXSW_SP_L3_PROTO_IPV4],
- rtar_key);
- if (err)
- return err;
+ mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
+ if (!mr_tcam->priv)
+ return -ENOMEM;
- rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
- err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &region[MLXSW_SP_L3_PROTO_IPV6],
- rtar_key);
+ err = ops->init(mlxsw_sp, mr_tcam->priv);
if (err)
- goto err_ipv6_region_init;
-
+ goto err_init;
return 0;
-err_ipv6_region_init:
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+err_init:
+ kfree(mr_tcam->priv);
return err;
}
-static void mlxsw_sp_mr_tcam_fini(void *priv)
+static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ ops->fini(mr_tcam->priv);
+ kfree(mr_tcam->priv);
}
const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
index f9b59ee25406..3c84151b4c33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H
#define _MLXSW_SPECTRUM_MCROUTER_TCAM_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index cad603c35271..bdf53cf350f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 77b2adb29341..3a96307f51b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1,39 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
- * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -48,6 +14,7 @@
#include <linux/route.h>
#include <linux/gcd.h>
#include <linux/random.h>
+#include <linux/if_macvlan.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -60,6 +27,7 @@
#include <net/ndisc.h>
#include <net/ipv6.h>
#include <net/fib_notifier.h>
+#include <net/switchdev.h>
#include "spectrum.h"
#include "core.h"
@@ -163,7 +131,9 @@ struct mlxsw_sp_rif_ops {
const struct mlxsw_sp_rif_params *params);
int (*configure)(struct mlxsw_sp_rif *rif);
void (*deconfigure)(struct mlxsw_sp_rif *rif);
- struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
+ struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack);
+ void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
@@ -342,10 +312,6 @@ static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
struct mlxsw_sp_prefix_usage {
@@ -1109,7 +1075,8 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
u32 tunnel_index;
int err;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, &tunnel_index);
if (err)
return err;
@@ -1125,7 +1092,8 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
/* Unlink this node from the IPIP entry that it's the decap entry of. */
fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
fib_entry->decap.ipip_entry = NULL;
- mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, fib_entry->decap.tunnel_index);
}
static struct mlxsw_sp_fib_node *
@@ -2434,17 +2402,48 @@ static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
kfree(net_work);
}
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+
+static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
+{
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
+
+ __mlxsw_sp_router_init(mlxsw_sp);
+ kfree(net_work);
+}
+
+static int mlxsw_sp_router_schedule_work(struct net *net,
+ struct notifier_block *nb,
+ void (*cb)(struct work_struct *))
+{
+ struct mlxsw_sp_netevent_work *net_work;
+ struct mlxsw_sp_router *router;
+
+ if (!net_eq(net, &init_net))
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work)
+ return NOTIFY_BAD;
+
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ INIT_WORK(&net_work->work, cb);
+ net_work->mlxsw_sp = router->mlxsw_sp;
+ mlxsw_core_schedule_work(&net_work->work);
+ return NOTIFY_DONE;
+}
+
static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct mlxsw_sp_netevent_work *net_work;
struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp_router *router;
struct mlxsw_sp *mlxsw_sp;
unsigned long interval;
struct neigh_parms *p;
struct neighbour *n;
- struct net *net;
switch (event) {
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -2498,20 +2497,12 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
break;
case NETEVENT_IPV4_MPATH_HASH_UPDATE:
case NETEVENT_IPV6_MPATH_HASH_UPDATE:
- net = ptr;
-
- if (!net_eq(net, &init_net))
- return NOTIFY_DONE;
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_mp_hash_event_work);
- net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
- if (!net_work)
- return NOTIFY_BAD;
-
- router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
- INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
- net_work->mlxsw_sp = router->mlxsw_sp;
- mlxsw_core_schedule_work(&net_work->work);
- break;
+ case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_update_priority_work);
}
return NOTIFY_DONE;
@@ -3165,8 +3156,9 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
* by the device and make sure the request can be satisfied.
*/
mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
- err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
- &alloc_size);
+ err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ *p_adj_grp_size, &alloc_size);
if (err)
return err;
/* It is possible the allocation results in more allocated
@@ -3278,7 +3270,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* No valid allocation size available. */
goto set_trap;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ ecmp_size, &adj_index);
if (err) {
/* We ran out of KVD linear space, just set the
* trap and let everything flow through kernel.
@@ -3313,7 +3306,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
old_adj_index, old_ecmp_size);
- mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ old_ecmp_size, old_adj_index);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
goto set_trap;
@@ -3335,7 +3329,8 @@ set_trap:
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
if (old_adj_index_valid)
- mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ nh_grp->ecmp_size, nh_grp->adj_index);
}
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
@@ -5967,7 +5962,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct mlxsw_sp_rif *
+struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
@@ -6024,6 +6019,12 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
!list_empty(&inet6_dev->addr_list))
addr_list_empty = false;
+ /* macvlans do not have a RIF, but rather piggy back on the
+ * RIF of their lower device.
+ */
+ if (netif_is_macvlan(dev) && addr_list_empty)
+ return true;
+
if (rif && addr_list_empty &&
!netif_is_l3_slave(rif->dev))
return true;
@@ -6125,6 +6126,11 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
+{
+ return rif->fid;
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -6162,7 +6168,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
rif->ops = ops;
if (ops->fid_get) {
- fid = ops->fid_get(rif);
+ fid = ops->fid_get(rif, extack);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
goto err_fid_get;
@@ -6267,7 +6273,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
}
/* FID was already created, just take a reference */
- fid = rif->ops->fid_get(rif);
+ fid = rif->ops->fid_get(rif, extack);
err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
if (err)
goto err_fid_port_vid_map;
@@ -6432,6 +6438,123 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
+{
+ u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp4, mask);
+}
+
+static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
+{
+ u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp6, mask);
+}
+
+static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ const u8 *mac, bool adding)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u8 vrrp_id = adding ? mac[5] : 0;
+ int err;
+
+ if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
+ !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
+ return 0;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
+ mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
+ else
+ mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+ int err;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ if (!rif) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
+ macvlan_dev->dev_addr, true);
+ if (err)
+ goto err_rif_vrrp_add;
+
+ /* Make sure the bridge driver does not have this MAC pointing at
+ * some other port.
+ */
+ if (rif->ops->fdb_del)
+ rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
+
+ return 0;
+
+err_rif_vrrp_add:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ return err;
+}
+
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ /* If we do not have a RIF, then we already took care of
+ * removing the macvlan's MAC during RIF deletion.
+ */
+ if (!rif)
+ return;
+ mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
+ false);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp;
+
+ mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ break;
+ }
+
+ return 0;
+}
+
static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
unsigned long event,
struct netlink_ext_ack *extack)
@@ -6444,6 +6567,8 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
else if (is_vlan_dev(dev))
return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
+ else if (netif_is_macvlan(dev))
+ return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack);
else
return 0;
}
@@ -6684,7 +6809,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
int err = 0;
- if (!mlxsw_sp)
+ /* We do not create a RIF for a macvlan, but only use it to
+ * direct more MAC addresses to the router.
+ */
+ if (!mlxsw_sp || netif_is_macvlan(l3_dev))
return 0;
switch (event) {
@@ -6705,6 +6833,27 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err;
}
+static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
+{
+ struct mlxsw_sp_rif *rif = data;
+
+ if (!netif_is_macvlan(dev))
+ return 0;
+
+ return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
+{
+ if (!netif_is_macvlan_port(rif->dev))
+ return 0;
+
+ netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
+ return netdev_walk_all_upper_dev_rcu(rif->dev,
+ __mlxsw_sp_rif_macvlan_flush, rif);
+}
+
static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
{
@@ -6771,11 +6920,13 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_rif_subport_op(rif, false);
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
}
@@ -6857,6 +7008,7 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6865,19 +7017,49 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
- u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
+ u16 vid;
+ int err;
+
+ if (is_vlan_dev(rif->dev)) {
+ vid = vlan_dev_vlan_id(rif->dev);
+ } else {
+ err = br_vlan_get_pvid(rif->dev, &vid);
+ if (err < 0 || !vid) {
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
+ return ERR_PTR(-EINVAL);
+ }
+ }
return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
}
+static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *br_dev;
+ struct net_device *dev;
+
+ br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
+ dev = br_fdb_find_port(br_dev, mac, vid);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_vlan_configure,
.deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
@@ -6929,6 +7111,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6937,17 +7120,33 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
}
+static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *dev;
+
+ dev = br_fdb_find_port(rif->dev, mac, 0);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = 0;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
.type = MLXSW_SP_RIF_TYPE_FID,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_fid_configure,
.deconfigure = mlxsw_sp_rif_fid_deconfigure,
.fid_get = mlxsw_sp_rif_fid_fid_get,
+ .fdb_del = mlxsw_sp_rif_fid_fdb_del,
};
static struct mlxsw_sp_rif_ipip_lb *
@@ -7172,6 +7371,7 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
+ bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
int err;
@@ -7182,7 +7382,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
- mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index a01edcf56797..1a60391daafa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_ROUTER_H_
#define _MLXSW_ROUTER_H_
@@ -66,6 +35,8 @@ struct mlxsw_sp_neigh_entry;
struct mlxsw_sp_nexthop;
struct mlxsw_sp_ipip_entry;
+struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
@@ -75,6 +46,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 3d187d88cc7c..d965fd275c90 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -1,41 +1,11 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2018 Petr Machata <petrm@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#include <linux/if_bridge.h>
#include <linux/list.h>
#include <net/arp.h>
#include <net/gre.h>
+#include <net/lag.h>
#include <net/ndisc.h>
#include <net/ip6_tunnel.h>
@@ -254,7 +224,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
struct list_head *iter;
netdev_for_each_lower_dev(lag_dev, dev, iter)
- if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev))
+ if (netif_carrier_ok(dev) &&
+ net_lag_port_dev_txable(dev) &&
+ mlxsw_sp_port_dev_check(dev))
return dev;
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 14a6de904db1..5e04252f2a11 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -1,35 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_SPECTRUM_SPAN_H
#define _MLXSW_SPECTRUM_SPAN_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index eea5666a86b2..0d8444aaba01 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -1135,6 +1102,39 @@ err_port_vlan_set:
return err;
}
+static int
+mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_fid *fid;
+ u16 pvid;
+ u16 vid;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
+ if (!rif)
+ return 0;
+ fid = mlxsw_sp_rif_fid(rif);
+ pvid = mlxsw_sp_fid_8021q_vid(fid);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ if (vid != pvid) {
+ netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ } else {
+ if (vid == pvid) {
+ netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
@@ -1146,8 +1146,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
- if (netif_is_bridge_master(orig_dev))
- return -EOPNOTSUPP;
+ if (netif_is_bridge_master(orig_dev)) {
+ int err = 0;
+
+ if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
+ br_vlan_enabled(orig_dev) &&
+ switchdev_trans_ph_prepare(trans))
+ err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
+ orig_dev, vlan);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
if (switchdev_trans_ph_prepare(trans))
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
index bc44d5effc28..c218e10bd835 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
@@ -1,35 +1,5 @@
-/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index c698ec4fd9d4..bcf2e79a21c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/switchib.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 3922c1cfe5f5..2d4f213e154d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1,38 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 399e9d6993f7..53020724c2f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -1,38 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/trap.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXSW_TRAP_H
#define _MLXSW_TRAP_H
@@ -63,6 +31,7 @@ enum {
MLXSW_TRAP_ID_LBERROR = 0x54,
MLXSW_TRAP_ID_IPV4_OSPF = 0x55,
MLXSW_TRAP_ID_IPV4_PIM = 0x58,
+ MLXSW_TRAP_ID_IPV4_VRRP = 0x59,
MLXSW_TRAP_ID_RPF = 0x5C,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60,
@@ -78,6 +47,7 @@ enum {
MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
MLXSW_TRAP_ID_IPV6_PIM = 0x79,
+ MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index fdf94720ca62..da51dd9d5e44 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -1,37 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/txheader.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
#ifndef _MLXSW_TXHEADER_H
#define _MLXSW_TXHEADER_H
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index f3e9dd47b56f..0e9719fbc624 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -30,6 +30,7 @@
#include <linux/ethtool.h>
#include <linux/cache.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -1078,7 +1079,7 @@ static void ks_stop_rx(struct ks_net *ks)
} /* ks_stop_rx */
-static unsigned long const ethernet_polynomial = 0x04c11db7U;
+static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
static unsigned long ether_gen_crc(int length, u8 *data)
{
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index b72d1bd11296..ebbdfb908745 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3373,7 +3373,6 @@ static void port_get_link_speed(struct ksz_port *port)
*/
static void port_set_link_speed(struct ksz_port *port)
{
- struct ksz_port_info *info;
struct ksz_hw *hw = port->hw;
u16 data;
u16 cfg;
@@ -3382,8 +3381,6 @@ static void port_set_link_speed(struct ksz_port *port)
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
- info = &hw->port_info[p];
-
port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 71dca8bd51ac..16bd3f44dbe8 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,6 +46,7 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
select PHYLIB
+ select CRC16
---help---
Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 2e982cc249fb..538926d2b43f 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -6,4 +6,4 @@ obj-$(CONFIG_ENC28J60) += enc28j60.o
obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
obj-$(CONFIG_LAN743X) += lan743x.o
-lan743x-objs := lan743x_main.o
+lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
new file mode 100644
index 000000000000..07c1eb63415a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -0,0 +1,723 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#include <linux/netdevice.h>
+#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
+#include <linux/net_tstamp.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+/* eeprom */
+#define LAN743X_EEPROM_MAGIC (0x74A5)
+#define LAN743X_OTP_MAGIC (0x74F3)
+#define EEPROM_INDICATOR_1 (0xA5)
+#define EEPROM_INDICATOR_2 (0xAA)
+#define EEPROM_MAC_OFFSET (0x01)
+#define MAX_EEPROM_SIZE 512
+#define OTP_INDICATOR_1 (0xF3)
+#define OTP_INDICATOR_2 (0xF7)
+
+static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ unsigned long timeout;
+ u32 buf;
+ int i;
+
+ buf = lan743x_csr_read(adapter, OTP_PWR_DN);
+
+ if (buf & OTP_PWR_DN_PWRDN_N_) {
+ /* clear it and wait to be cleared */
+ lan743x_csr_write(adapter, OTP_PWR_DN, 0);
+
+ timeout = jiffies + HZ;
+ do {
+ udelay(1);
+ buf = lan743x_csr_read(adapter, OTP_PWR_DN);
+ if (time_after(jiffies, timeout)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "timeout on OTP_PWR_DN completion\n");
+ return -EIO;
+ }
+ } while (buf & OTP_PWR_DN_PWRDN_N_);
+ }
+
+ /* set to BYTE program mode */
+ lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+
+ for (i = 0; i < length; i++) {
+ lan743x_csr_write(adapter, OTP_ADDR1,
+ ((offset + i) >> 8) &
+ OTP_ADDR1_15_11_MASK_);
+ lan743x_csr_write(adapter, OTP_ADDR2,
+ ((offset + i) &
+ OTP_ADDR2_10_3_MASK_));
+ lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]);
+ lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ timeout = jiffies + HZ;
+ do {
+ udelay(1);
+ buf = lan743x_csr_read(adapter, OTP_STATUS);
+ if (time_after(jiffies, timeout)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Timeout on OTP_STATUS completion\n");
+ return -EIO;
+ }
+ } while (buf & OTP_STATUS_BUSY_);
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_wait(struct lan743x_adapter *adapter)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+
+ do {
+ val = lan743x_csr_read(adapter, E2P_CMD);
+
+ if (!(val & E2P_CMD_EPC_BUSY_) ||
+ (val & E2P_CMD_EPC_TIMEOUT_))
+ break;
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "EEPROM read operation timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_confirm_not_busy(struct lan743x_adapter *adapter)
+{
+ unsigned long start_time = jiffies;
+ u32 val;
+
+ do {
+ val = lan743x_csr_read(adapter, E2P_CMD);
+
+ if (!(val & E2P_CMD_EPC_BUSY_))
+ return 0;
+
+ usleep_range(40, 100);
+ } while (!time_after(jiffies, start_time + HZ));
+
+ netif_warn(adapter, drv, adapter->netdev, "EEPROM is busy\n");
+ return -EIO;
+}
+
+static int lan743x_eeprom_read(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_eeprom_confirm_not_busy(adapter);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ val = lan743x_csr_read(adapter, E2P_DATA);
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_eeprom_confirm_not_busy(adapter);
+ if (retval)
+ return retval;
+
+ /* Issue write/erase enable command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ /* Fill data register */
+ val = data[i];
+ lan743x_csr_write(adapter, E2P_DATA, val);
+
+ /* Send "write" command */
+ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, E2P_CMD, val);
+
+ retval = lan743x_eeprom_wait(adapter);
+ if (retval < 0)
+ return retval;
+
+ offset++;
+ }
+
+ return 0;
+}
+
+static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info,
+ pci_name(adapter->pdev), sizeof(info->bus_info));
+}
+
+static u32 lan743x_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void lan743x_ethtool_set_msglevel(struct net_device *netdev,
+ u32 msglevel)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = msglevel;
+}
+
+static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+ return MAX_EEPROM_SIZE;
+}
+
+static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
+}
+
+static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret = -EINVAL;
+
+ if (ee->magic == LAN743X_EEPROM_MAGIC)
+ ret = lan743x_eeprom_write(adapter, ee->offset, ee->len,
+ data);
+ /* Beware! OTP is One Time Programming ONLY!
+ * So do some strict condition check before messing up
+ */
+ else if ((ee->magic == LAN743X_OTP_MAGIC) &&
+ (ee->offset == 0) &&
+ (ee->len == MAX_EEPROM_SIZE) &&
+ (data[0] == OTP_INDICATOR_1))
+ ret = lan743x_otp_write(adapter, ee->offset, ee->len, data);
+
+ return ret;
+}
+
+static const char lan743x_set0_hw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX FCS Errors",
+ "RX Alignment Errors",
+ "Rx Fragment Errors",
+ "RX Jabber Errors",
+ "RX Undersize Frame Errors",
+ "RX Oversize Frame Errors",
+ "RX Dropped Frames",
+ "RX Unicast Byte Count",
+ "RX Broadcast Byte Count",
+ "RX Multicast Byte Count",
+ "RX Unicast Frames",
+ "RX Broadcast Frames",
+ "RX Multicast Frames",
+ "RX Pause Frames",
+ "RX 64 Byte Frames",
+ "RX 65 - 127 Byte Frames",
+ "RX 128 - 255 Byte Frames",
+ "RX 256 - 511 Bytes Frames",
+ "RX 512 - 1023 Byte Frames",
+ "RX 1024 - 1518 Byte Frames",
+ "RX Greater 1518 Byte Frames",
+};
+
+static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX Queue 0 Frames",
+ "RX Queue 1 Frames",
+ "RX Queue 2 Frames",
+ "RX Queue 3 Frames",
+};
+
+static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = {
+ "RX Total Frames",
+ "EEE RX LPI Transitions",
+ "EEE RX LPI Time",
+ "RX Counter Rollover Status",
+ "TX FCS Errors",
+ "TX Excess Deferral Errors",
+ "TX Carrier Errors",
+ "TX Bad Byte Count",
+ "TX Single Collisions",
+ "TX Multiple Collisions",
+ "TX Excessive Collision",
+ "TX Late Collisions",
+ "TX Unicast Byte Count",
+ "TX Broadcast Byte Count",
+ "TX Multicast Byte Count",
+ "TX Unicast Frames",
+ "TX Broadcast Frames",
+ "TX Multicast Frames",
+ "TX Pause Frames",
+ "TX 64 Byte Frames",
+ "TX 65 - 127 Byte Frames",
+ "TX 128 - 255 Byte Frames",
+ "TX 256 - 511 Bytes Frames",
+ "TX 512 - 1023 Byte Frames",
+ "TX 1024 - 1518 Byte Frames",
+ "TX Greater 1518 Byte Frames",
+ "TX Total Frames",
+ "EEE TX LPI Transitions",
+ "EEE TX LPI Time",
+ "TX Counter Rollover Status",
+};
+
+static const u32 lan743x_set0_hw_cnt_addr[] = {
+ STAT_RX_FCS_ERRORS,
+ STAT_RX_ALIGNMENT_ERRORS,
+ STAT_RX_FRAGMENT_ERRORS,
+ STAT_RX_JABBER_ERRORS,
+ STAT_RX_UNDERSIZE_FRAME_ERRORS,
+ STAT_RX_OVERSIZE_FRAME_ERRORS,
+ STAT_RX_DROPPED_FRAMES,
+ STAT_RX_UNICAST_BYTE_COUNT,
+ STAT_RX_BROADCAST_BYTE_COUNT,
+ STAT_RX_MULTICAST_BYTE_COUNT,
+ STAT_RX_UNICAST_FRAMES,
+ STAT_RX_BROADCAST_FRAMES,
+ STAT_RX_MULTICAST_FRAMES,
+ STAT_RX_PAUSE_FRAMES,
+ STAT_RX_64_BYTE_FRAMES,
+ STAT_RX_65_127_BYTE_FRAMES,
+ STAT_RX_128_255_BYTE_FRAMES,
+ STAT_RX_256_511_BYTES_FRAMES,
+ STAT_RX_512_1023_BYTE_FRAMES,
+ STAT_RX_1024_1518_BYTE_FRAMES,
+ STAT_RX_GREATER_1518_BYTE_FRAMES,
+};
+
+static const u32 lan743x_set2_hw_cnt_addr[] = {
+ STAT_RX_TOTAL_FRAMES,
+ STAT_EEE_RX_LPI_TRANSITIONS,
+ STAT_EEE_RX_LPI_TIME,
+ STAT_RX_COUNTER_ROLLOVER_STATUS,
+ STAT_TX_FCS_ERRORS,
+ STAT_TX_EXCESS_DEFERRAL_ERRORS,
+ STAT_TX_CARRIER_ERRORS,
+ STAT_TX_BAD_BYTE_COUNT,
+ STAT_TX_SINGLE_COLLISIONS,
+ STAT_TX_MULTIPLE_COLLISIONS,
+ STAT_TX_EXCESSIVE_COLLISION,
+ STAT_TX_LATE_COLLISIONS,
+ STAT_TX_UNICAST_BYTE_COUNT,
+ STAT_TX_BROADCAST_BYTE_COUNT,
+ STAT_TX_MULTICAST_BYTE_COUNT,
+ STAT_TX_UNICAST_FRAMES,
+ STAT_TX_BROADCAST_FRAMES,
+ STAT_TX_MULTICAST_FRAMES,
+ STAT_TX_PAUSE_FRAMES,
+ STAT_TX_64_BYTE_FRAMES,
+ STAT_TX_65_127_BYTE_FRAMES,
+ STAT_TX_128_255_BYTE_FRAMES,
+ STAT_TX_256_511_BYTES_FRAMES,
+ STAT_TX_512_1023_BYTE_FRAMES,
+ STAT_TX_1024_1518_BYTE_FRAMES,
+ STAT_TX_GREATER_1518_BYTE_FRAMES,
+ STAT_TX_TOTAL_FRAMES,
+ STAT_EEE_TX_LPI_TRANSITIONS,
+ STAT_EEE_TX_LPI_TIME,
+ STAT_TX_COUNTER_ROLLOVER_STATUS
+};
+
+static void lan743x_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, lan743x_set0_hw_cnt_strings,
+ sizeof(lan743x_set0_hw_cnt_strings));
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings)],
+ lan743x_set1_sw_cnt_strings,
+ sizeof(lan743x_set1_sw_cnt_strings));
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) +
+ sizeof(lan743x_set1_sw_cnt_strings)],
+ lan743x_set2_hw_cnt_strings,
+ sizeof(lan743x_set2_hw_cnt_strings));
+ break;
+ }
+}
+
+static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int data_index = 0;
+ u32 buf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lan743x_set0_hw_cnt_addr); i++) {
+ buf = lan743x_csr_read(adapter, lan743x_set0_hw_cnt_addr[i]);
+ data[data_index++] = (u64)buf;
+ }
+ for (i = 0; i < ARRAY_SIZE(adapter->rx); i++)
+ data[data_index++] = (u64)(adapter->rx[i].frame_count);
+ for (i = 0; i < ARRAY_SIZE(lan743x_set2_hw_cnt_addr); i++) {
+ buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]);
+ data[data_index++] = (u64)buf;
+ }
+}
+
+static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ {
+ int ret;
+
+ ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings);
+ ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings);
+ ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
+ return ret;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXFH:
+ rxnfc->data = 0;
+ switch (rxnfc->flow_type) {
+ case TCP_V4_FLOW:case UDP_V4_FLOW:
+ case TCP_V6_FLOW:case UDP_V6_FLOW:
+ rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case IPV4_FLOW: case IPV6_FLOW:
+ rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+ }
+ break;
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = LAN743X_USED_RX_CHANNELS;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static u32 lan743x_ethtool_get_rxfh_key_size(struct net_device *netdev)
+{
+ return 40;
+}
+
+static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return 128;
+}
+
+static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
+ u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (indir) {
+ int dw_index;
+ int byte_index = 0;
+
+ for (dw_index = 0; dw_index < 32; dw_index++) {
+ u32 four_entries =
+ lan743x_csr_read(adapter, RFE_INDX(dw_index));
+
+ byte_index = dw_index << 2;
+ indir[byte_index + 0] =
+ ((four_entries >> 0) & 0x000000FF);
+ indir[byte_index + 1] =
+ ((four_entries >> 8) & 0x000000FF);
+ indir[byte_index + 2] =
+ ((four_entries >> 16) & 0x000000FF);
+ indir[byte_index + 3] =
+ ((four_entries >> 24) & 0x000000FF);
+ }
+ }
+ if (key) {
+ int dword_index;
+ int byte_index = 0;
+
+ for (dword_index = 0; dword_index < 10; dword_index++) {
+ u32 four_entries =
+ lan743x_csr_read(adapter,
+ RFE_HASH_KEY(dword_index));
+
+ byte_index = dword_index << 2;
+ key[byte_index + 0] =
+ ((four_entries >> 0) & 0x000000FF);
+ key[byte_index + 1] =
+ ((four_entries >> 8) & 0x000000FF);
+ key[byte_index + 2] =
+ ((four_entries >> 16) & 0x000000FF);
+ key[byte_index + 3] =
+ ((four_entries >> 24) & 0x000000FF);
+ }
+ }
+ if (hfunc)
+ (*hfunc) = ETH_RSS_HASH_TOP;
+ return 0;
+}
+
+static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
+ const u32 *indir, const u8 *key,
+ const u8 hfunc)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ u32 indir_value = 0;
+ int dword_index = 0;
+ int byte_index = 0;
+
+ for (dword_index = 0; dword_index < 32; dword_index++) {
+ byte_index = dword_index << 2;
+ indir_value =
+ (((indir[byte_index + 0] & 0x000000FF) << 0) |
+ ((indir[byte_index + 1] & 0x000000FF) << 8) |
+ ((indir[byte_index + 2] & 0x000000FF) << 16) |
+ ((indir[byte_index + 3] & 0x000000FF) << 24));
+ lan743x_csr_write(adapter, RFE_INDX(dword_index),
+ indir_value);
+ }
+ }
+ if (key) {
+ int dword_index = 0;
+ int byte_index = 0;
+ u32 key_value = 0;
+
+ for (dword_index = 0; dword_index < 10; dword_index++) {
+ byte_index = dword_index << 2;
+ key_value =
+ ((((u32)(key[byte_index + 0])) << 0) |
+ (((u32)(key[byte_index + 1])) << 8) |
+ (((u32)(key[byte_index + 2])) << 16) |
+ (((u32)(key[byte_index + 3])) << 24));
+ lan743x_csr_write(adapter, RFE_HASH_KEY(dword_index),
+ key_value);
+ }
+ }
+ return 0;
+}
+
+static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp.ptp_clock)
+ ts_info->phc_index = ptp_clock_index(adapter->ptp.ptp_clock);
+ else
+ ts_info->phc_index = -1;
+
+ ts_info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static int lan743x_ethtool_get_eee(struct net_device *netdev,
+ struct ethtool_eee *eee)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 buf;
+ int ret;
+
+ if (!phydev)
+ return -EIO;
+ if (!phydev->drv) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Missing PHY Driver\n");
+ return -EIO;
+ }
+
+ ret = phy_ethtool_get_eee(phydev, eee);
+ if (ret < 0)
+ return ret;
+
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ if (buf & MAC_CR_EEE_EN_) {
+ eee->eee_enabled = true;
+ eee->eee_active = !!(eee->advertised & eee->lp_advertised);
+ eee->tx_lpi_enabled = true;
+ /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+ buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ eee->tx_lpi_timer = buf;
+ } else {
+ eee->eee_enabled = false;
+ eee->eee_active = false;
+ eee->tx_lpi_enabled = false;
+ eee->tx_lpi_timer = 0;
+ }
+
+ return 0;
+}
+
+static int lan743x_ethtool_set_eee(struct net_device *netdev,
+ struct ethtool_eee *eee)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = NULL;
+ u32 buf = 0;
+ int ret = 0;
+
+ if (!netdev)
+ return -EINVAL;
+ adapter = netdev_priv(netdev);
+ if (!adapter)
+ return -EINVAL;
+ phydev = netdev->phydev;
+ if (!phydev)
+ return -EIO;
+ if (!phydev->drv) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Missing PHY Driver\n");
+ return -EIO;
+ }
+
+ if (eee->eee_enabled) {
+ ret = phy_init_eee(phydev, 0);
+ if (ret) {
+ netif_err(adapter, drv, adapter->netdev,
+ "EEE initialization failed\n");
+ return ret;
+ }
+
+ buf = (u32)eee->tx_lpi_timer;
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
+
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ buf |= MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, buf);
+ } else {
+ buf = lan743x_csr_read(adapter, MAC_CR);
+ buf &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, buf);
+ }
+
+ return phy_ethtool_set_eee(phydev, eee);
+}
+
+#ifdef CONFIG_PM
+static void lan743x_ethtool_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+ phy_ethtool_get_wol(netdev->phydev, wol);
+
+ wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
+ WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+
+ wol->wolopts |= adapter->wolopts;
+}
+
+static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->wolopts = 0;
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wolopts |= WAKE_UCAST;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wolopts |= WAKE_MCAST;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wolopts |= WAKE_BCAST;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wolopts |= WAKE_MAGIC;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wolopts |= WAKE_PHY;
+ if (wol->wolopts & WAKE_ARP)
+ adapter->wolopts |= WAKE_ARP;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
+
+ phy_ethtool_set_wol(netdev->phydev, wol);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+const struct ethtool_ops lan743x_ethtool_ops = {
+ .get_drvinfo = lan743x_ethtool_get_drvinfo,
+ .get_msglevel = lan743x_ethtool_get_msglevel,
+ .set_msglevel = lan743x_ethtool_set_msglevel,
+ .get_link = ethtool_op_get_link,
+
+ .get_eeprom_len = lan743x_ethtool_get_eeprom_len,
+ .get_eeprom = lan743x_ethtool_get_eeprom,
+ .set_eeprom = lan743x_ethtool_set_eeprom,
+ .get_strings = lan743x_ethtool_get_strings,
+ .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats,
+ .get_sset_count = lan743x_ethtool_get_sset_count,
+ .get_rxnfc = lan743x_ethtool_get_rxnfc,
+ .get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size,
+ .get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
+ .get_rxfh = lan743x_ethtool_get_rxfh,
+ .set_rxfh = lan743x_ethtool_set_rxfh,
+ .get_ts_info = lan743x_ethtool_get_ts_info,
+ .get_eee = lan743x_ethtool_get_eee,
+ .set_eee = lan743x_ethtool_set_eee,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+#ifdef CONFIG_PM
+ .get_wol = lan743x_ethtool_get_wol,
+ .set_wol = lan743x_ethtool_set_wol,
+#endif
+};
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h
new file mode 100644
index 000000000000..d0d11a777a58
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#ifndef _LAN743X_ETHTOOL_H
+#define _LAN743X_ETHTOOL_H
+
+#include "linux/ethtool.h"
+
+extern const struct ethtool_ops lan743x_ethtool_ops;
+
+#endif /* _LAN743X_ETHTOOL_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index dd947e4dd3ce..e7dce79ff2c9 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -11,7 +11,9 @@
#include <linux/phy.h>
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
+#include <linux/crc16.h>
#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
{
@@ -53,13 +55,13 @@ return_error:
return ret;
}
-static u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
+u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
{
return ioread32(&adapter->csr.csr_address[offset]);
}
-static void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
- u32 data)
+void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
+ u32 data)
{
iowrite32(data, &adapter->csr.csr_address[offset]);
}
@@ -265,6 +267,10 @@ static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
lan743x_intr_software_isr(adapter);
int_sts &= ~INT_BIT_SW_GP_;
}
+ if (int_sts & INT_BIT_1588_) {
+ lan743x_ptp_isr(adapter);
+ int_sts &= ~INT_BIT_1588_;
+ }
}
if (int_sts)
lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
@@ -828,7 +834,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
}
if (!mac_address_valid)
- random_ether_addr(adapter->mac_address);
+ eth_random_addr(adapter->mac_address);
lan743x_mac_set_address(adapter, adapter->mac_address);
ether_addr_copy(netdev->dev_addr, adapter->mac_address);
return 0;
@@ -974,6 +980,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
ksettings.base.duplex,
local_advertisement,
remote_advertisement);
+ lan743x_ptp_update_latency(adapter, ksettings.base.speed);
}
}
@@ -1023,6 +1030,24 @@ return_error:
return ret;
}
+static void lan743x_rfe_open(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, RFE_RSS_CFG,
+ RFE_RSS_CFG_UDP_IPV6_EX_ |
+ RFE_RSS_CFG_TCP_IPV6_EX_ |
+ RFE_RSS_CFG_IPV6_EX_ |
+ RFE_RSS_CFG_UDP_IPV6_ |
+ RFE_RSS_CFG_TCP_IPV6_ |
+ RFE_RSS_CFG_IPV6_ |
+ RFE_RSS_CFG_UDP_IPV4_ |
+ RFE_RSS_CFG_TCP_IPV4_ |
+ RFE_RSS_CFG_IPV4_ |
+ RFE_RSS_CFG_VALID_HASH_BITS_ |
+ RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
+ RFE_RSS_CFG_RSS_HASH_STORE_ |
+ RFE_RSS_CFG_RSS_ENABLE_);
+}
+
static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
{
u8 *mac_addr;
@@ -1206,6 +1231,7 @@ static void lan743x_tx_release_desc(struct lan743x_tx *tx,
struct lan743x_tx_buffer_info *buffer_info = NULL;
struct lan743x_tx_descriptor *descriptor = NULL;
u32 descriptor_type = 0;
+ bool ignore_sync;
descriptor = &tx->ring_cpu_ptr[descriptor_index];
buffer_info = &tx->buffer_info[descriptor_index];
@@ -1236,11 +1262,27 @@ clean_up_data_descriptor:
buffer_info->dma_ptr = 0;
buffer_info->buffer_length = 0;
}
- if (buffer_info->skb) {
+ if (!buffer_info->skb)
+ goto clear_active;
+
+ if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
+ goto clear_skb;
}
+ if (cleanup) {
+ lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
+ dev_kfree_skb(buffer_info->skb);
+ } else {
+ ignore_sync = (buffer_info->flags &
+ TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
+ lan743x_ptp_tx_timestamp_skb(tx->adapter,
+ buffer_info->skb, ignore_sync);
+ }
+
+clear_skb:
+ buffer_info->skb = NULL;
+
clear_active:
buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
@@ -1301,10 +1343,25 @@ static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
return last_head - last_tail - 1;
}
+void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
+ bool enable_timestamping,
+ bool enable_onestep_sync)
+{
+ if (enable_timestamping)
+ tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
+ else
+ tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
+ if (enable_onestep_sync)
+ tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
+ else
+ tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
+}
+
static int lan743x_tx_frame_start(struct lan743x_tx *tx,
unsigned char *first_buffer,
unsigned int first_buffer_length,
unsigned int frame_length,
+ bool time_stamp,
bool check_sum)
{
/* called only from within lan743x_tx_xmit_frame.
@@ -1342,6 +1399,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_ |
TX_DESC_DATA0_FS_ |
TX_DESC_DATA0_FCS_;
+ if (time_stamp)
+ tx->frame_data0 |= TX_DESC_DATA0_TSE_;
if (check_sum)
tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
@@ -1455,6 +1514,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
static void lan743x_tx_frame_end(struct lan743x_tx *tx,
struct sk_buff *skb,
+ bool time_stamp,
bool ignore_sync)
{
/* called only from within lan743x_tx_xmit_frame
@@ -1472,6 +1532,8 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
buffer_info = &tx->buffer_info[tx->frame_tail];
buffer_info->skb = skb;
+ if (time_stamp)
+ buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
@@ -1500,6 +1562,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
unsigned int frame_length = 0;
unsigned int head_length = 0;
unsigned long irq_flags = 0;
+ bool do_timestamp = false;
bool ignore_sync = false;
int nr_frags = 0;
bool gso = false;
@@ -1521,6 +1584,14 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
}
/* space available, transmit skb */
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
+ (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_timestamp = true;
+ if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
+ ignore_sync = true;
+ }
head_length = skb_headlen(skb);
frame_length = skb_pagelen(skb);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1534,6 +1605,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (lan743x_tx_frame_start(tx,
skb->data, head_length,
start_frame_length,
+ do_timestamp,
skb->ip_summed == CHECKSUM_PARTIAL)) {
dev_kfree_skb(skb);
goto unlock;
@@ -1561,7 +1633,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
}
finish:
- lan743x_tx_frame_end(tx, skb, ignore_sync);
+ lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
unlock:
spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
@@ -2390,6 +2462,8 @@ static int lan743x_netdev_close(struct net_device *netdev)
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
lan743x_rx_close(&adapter->rx[index]);
+ lan743x_ptp_close(adapter);
+
lan743x_phy_close(adapter);
lan743x_mac_close(adapter);
@@ -2417,6 +2491,12 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_mac;
+ ret = lan743x_ptp_open(adapter);
+ if (ret)
+ goto close_phy;
+
+ lan743x_rfe_open(adapter);
+
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
ret = lan743x_rx_open(&adapter->rx[index]);
if (ret)
@@ -2434,6 +2514,9 @@ close_rx:
if (adapter->rx[index].ring_cpu_ptr)
lan743x_rx_close(&adapter->rx[index]);
}
+ lan743x_ptp_close(adapter);
+
+close_phy:
lan743x_phy_close(adapter);
close_mac:
@@ -2461,6 +2544,8 @@ static int lan743x_netdev_ioctl(struct net_device *netdev,
{
if (!netif_running(netdev))
return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP)
+ return lan743x_ptp_ioctl(netdev, ifr, cmd);
return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
@@ -2585,6 +2670,11 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
mutex_init(&adapter->dp_lock);
+
+ ret = lan743x_gpio_init(adapter);
+ if (ret)
+ return ret;
+
ret = lan743x_mac_init(adapter);
if (ret)
return ret;
@@ -2593,6 +2683,10 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
if (ret)
return ret;
+ ret = lan743x_ptp_init(adapter);
+ if (ret)
+ return ret;
+
lan743x_rfe_update_mac_address(adapter);
ret = lan743x_dmac_init(adapter);
@@ -2689,6 +2783,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
goto cleanup_hardware;
adapter->netdev->netdev_ops = &lan743x_netdev_ops;
+ adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
adapter->netdev->hw_features = adapter->netdev->features;
@@ -2747,10 +2842,182 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
lan743x_netdev_close(netdev);
rtnl_unlock();
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
/* clean up lan743x portion */
lan743x_hardware_cleanup(adapter);
}
+#ifdef CONFIG_PM
+static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
+{
+ return bitrev16(crc16(0xFFFF, buf, len));
+}
+
+static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+{
+ const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+ const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+ const u8 arp_type[2] = { 0x08, 0x06 };
+ int mask_index;
+ u32 pmtctl;
+ u32 wucsr;
+ u32 macrx;
+ u16 crc;
+
+ for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
+
+ /* clear wake settings */
+ pmtctl = lan743x_csr_read(adapter, PMT_CTL);
+ pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
+ PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
+ PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
+
+ macrx = lan743x_csr_read(adapter, MAC_RX);
+
+ wucsr = 0;
+ mask_index = 0;
+
+ pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
+
+ if (adapter->wolopts & WAKE_PHY) {
+ pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
+ }
+ if (adapter->wolopts & WAKE_MAGIC) {
+ wucsr |= MAC_WUCSR_MPEN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_UCAST) {
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_BCAST) {
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_MCAST) {
+ /* IPv4 multicast */
+ crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ /* IPv6 multicast */
+ crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+ if (adapter->wolopts & WAKE_ARP) {
+ /* set MAC_WUF_CFG & WUF_MASK
+ * for packettype (offset 12,13) = ARP (0x0806)
+ */
+ crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
+ lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
+ MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
+ (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
+ (crc & MAC_WUF_CFG_CRC16_MASK_));
+ lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
+ lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
+ lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
+ mask_index++;
+
+ wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
+ macrx |= MAC_RX_RXEN_;
+ pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
+ pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
+ }
+
+ lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
+ lan743x_csr_write(adapter, PMT_CTL, pmtctl);
+ lan743x_csr_write(adapter, MAC_RX, macrx);
+}
+
+static int lan743x_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ lan743x_pcidev_shutdown(pdev);
+
+ /* clear all wakes */
+ lan743x_csr_write(adapter, MAC_WUCSR, 0);
+ lan743x_csr_write(adapter, MAC_WUCSR2, 0);
+ lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
+
+ if (adapter->wolopts)
+ lan743x_pm_set_wol(adapter);
+
+ /* Host sets PME_En, put D3hot */
+ ret = pci_prepare_to_sleep(pdev);
+
+ return 0;
+}
+
+static int lan743x_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ ret = lan743x_hardware_init(adapter, pdev);
+ if (ret) {
+ netif_err(adapter, probe, adapter->netdev,
+ "lan743x_hardware_init returned %d\n", ret);
+ }
+
+ /* open netdev when netdev is at running state while resume.
+ * For instance, it is true when system wakesup after pm-suspend
+ * However, it is false when system wakes up after suspend GUI menu
+ */
+ if (netif_running(netdev))
+ lan743x_netdev_open(netdev);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops lan743x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
+};
+#endif /*CONFIG_PM */
+
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
{ 0, }
@@ -2761,6 +3028,9 @@ static struct pci_driver lan743x_pcidev_driver = {
.id_table = lan743x_pcidev_tbl,
.probe = lan743x_pcidev_probe,
.remove = lan743x_pcidev_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &lan743x_pm_ops,
+#endif
.shutdown = lan743x_pcidev_shutdown,
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 73b463a9df61..0e82b6368798 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -4,12 +4,17 @@
#ifndef _LAN743X_H
#define _LAN743X_H
+#include "lan743x_ptp.h"
+
#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
#define DRIVER_DESC "LAN743x PCIe Gigabit Ethernet Driver"
#define DRIVER_NAME "lan743x"
/* Register Definitions */
#define ID_REV (0x00)
+#define ID_REV_ID_MASK_ (0xFFFF0000)
+#define ID_REV_ID_LAN7430_ (0x74300000)
+#define ID_REV_ID_LAN7431_ (0x74310000)
#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
(((id_rev) & 0xFFF00000) == 0x74300000)
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
@@ -24,8 +29,18 @@
#define HW_CFG_LRST_ BIT(1)
#define PMT_CTL (0x014)
+#define PMT_CTL_ETH_PHY_D3_COLD_OVR_ BIT(27)
+#define PMT_CTL_MAC_D3_RX_CLK_OVR_ BIT(25)
+#define PMT_CTL_ETH_PHY_EDPD_PLL_CTL_ BIT(24)
+#define PMT_CTL_ETH_PHY_D3_OVR_ BIT(23)
+#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
+#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
+#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
#define PMT_CTL_READY_ BIT(7)
#define PMT_CTL_ETH_PHY_RST_ BIT(4)
+#define PMT_CTL_WOL_EN_ BIT(3)
+#define PMT_CTL_ETH_PHY_WAKE_EN_ BIT(2)
+#define PMT_CTL_WUPS_MASK_ (0x00000003)
#define DP_SEL (0x024)
#define DP_SEL_DPRDY_ BIT(31)
@@ -42,6 +57,31 @@
#define DP_DATA_0 (0x030)
+#define E2P_CMD (0x040)
+#define E2P_CMD_EPC_BUSY_ BIT(31)
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000)
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_ BIT(10)
+#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF)
+
+#define E2P_DATA (0x044)
+
+#define GPIO_CFG0 (0x050)
+#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG1 (0x054)
+#define GPIO_CFG1_GPIOEN_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG1_GPIOBUF_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG2 (0x058)
+#define GPIO_CFG2_1588_POL_BIT_(bit) BIT(0 + (bit))
+
+#define GPIO_CFG3 (0x05C)
+#define GPIO_CFG3_1588_CH_SEL_BIT_(bit) BIT(16 + (bit))
+#define GPIO_CFG3_1588_OE_BIT_(bit) BIT(0 + (bit))
+
#define FCT_RX_CTL (0xAC)
#define FCT_RX_CTL_EN_(channel) BIT(28 + (channel))
#define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel))
@@ -62,6 +102,7 @@
((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_)
#define MAC_CR (0x100)
+#define MAC_CR_EEE_EN_ BIT(17)
#define MAC_CR_ADD_ BIT(12)
#define MAC_CR_ASD_ BIT(11)
#define MAC_CR_CNTR_RST_ BIT(5)
@@ -97,6 +138,40 @@
#define MAC_MII_DATA (0x124)
+#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
+
+#define MAC_WUCSR (0x140)
+#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_PFDA_EN_ BIT(3)
+#define MAC_WUCSR_WAKE_EN_ BIT(2)
+#define MAC_WUCSR_MPEN_ BIT(1)
+#define MAC_WUCSR_BCST_EN_ BIT(0)
+
+#define MAC_WK_SRC (0x144)
+
+#define MAC_WUF_CFG0 (0x150)
+#define MAC_NUM_OF_WUF_CFG (32)
+#define MAC_WUF_CFG_BEGIN (MAC_WUF_CFG0)
+#define MAC_WUF_CFG(index) (MAC_WUF_CFG_BEGIN + (4 * (index)))
+#define MAC_WUF_CFG_EN_ BIT(31)
+#define MAC_WUF_CFG_TYPE_MCAST_ (0x02000000)
+#define MAC_WUF_CFG_TYPE_ALL_ (0x01000000)
+#define MAC_WUF_CFG_OFFSET_SHIFT_ (16)
+#define MAC_WUF_CFG_CRC16_MASK_ (0x0000FFFF)
+
+#define MAC_WUF_MASK0_0 (0x200)
+#define MAC_WUF_MASK0_1 (0x204)
+#define MAC_WUF_MASK0_2 (0x208)
+#define MAC_WUF_MASK0_3 (0x20C)
+#define MAC_WUF_MASK0_BEGIN (MAC_WUF_MASK0_0)
+#define MAC_WUF_MASK1_BEGIN (MAC_WUF_MASK0_1)
+#define MAC_WUF_MASK2_BEGIN (MAC_WUF_MASK0_2)
+#define MAC_WUF_MASK3_BEGIN (MAC_WUF_MASK0_3)
+#define MAC_WUF_MASK0(index) (MAC_WUF_MASK0_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK1(index) (MAC_WUF_MASK1_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK2(index) (MAC_WUF_MASK2_BEGIN + (0x10 * (index)))
+#define MAC_WUF_MASK3(index) (MAC_WUF_MASK3_BEGIN + (0x10 * (index)))
+
/* offset 0x400 - 0x500, x may range from 0 to 32, for a total of 33 entries */
#define RFE_ADDR_FILT_HI(x) (0x400 + (8 * (x)))
#define RFE_ADDR_FILT_HI_VALID_ BIT(31)
@@ -111,13 +186,35 @@
#define RFE_CTL_MCAST_HASH_ BIT(3)
#define RFE_CTL_DA_PERFECT_ BIT(1)
+#define RFE_RSS_CFG (0x554)
+#define RFE_RSS_CFG_UDP_IPV6_EX_ BIT(16)
+#define RFE_RSS_CFG_TCP_IPV6_EX_ BIT(15)
+#define RFE_RSS_CFG_IPV6_EX_ BIT(14)
+#define RFE_RSS_CFG_UDP_IPV6_ BIT(13)
+#define RFE_RSS_CFG_TCP_IPV6_ BIT(12)
+#define RFE_RSS_CFG_IPV6_ BIT(11)
+#define RFE_RSS_CFG_UDP_IPV4_ BIT(10)
+#define RFE_RSS_CFG_TCP_IPV4_ BIT(9)
+#define RFE_RSS_CFG_IPV4_ BIT(8)
+#define RFE_RSS_CFG_VALID_HASH_BITS_ (0x000000E0)
+#define RFE_RSS_CFG_RSS_QUEUE_ENABLE_ BIT(2)
+#define RFE_RSS_CFG_RSS_HASH_STORE_ BIT(1)
+#define RFE_RSS_CFG_RSS_ENABLE_ BIT(0)
+
+#define RFE_HASH_KEY(index) (0x558 + (index << 2))
+
+#define RFE_INDX(index) (0x580 + (index << 2))
+
+#define MAC_WUCSR2 (0x600)
+
#define INT_STS (0x780)
#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
#define INT_BIT_ALL_RX_ (0x0F000000)
#define INT_BIT_DMA_TX_(channel) BIT(16 + (channel))
#define INT_BIT_ALL_TX_ (0x000F0000)
#define INT_BIT_SW_GP_ BIT(9)
-#define INT_BIT_ALL_OTHER_ (0x00000280)
+#define INT_BIT_1588_ BIT(7)
+#define INT_BIT_ALL_OTHER_ (INT_BIT_SW_GP_ | INT_BIT_1588_)
#define INT_BIT_MAS_ BIT(0)
#define INT_SET (0x784)
@@ -158,6 +255,71 @@
#define INT_MOD_CFG6 (0x7D8)
#define INT_MOD_CFG7 (0x7DC)
+#define PTP_CMD_CTL (0x0A00)
+#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6)
+#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5)
+#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
+#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3)
+#define PTP_CMD_CTL_PTP_ENABLE_ BIT(2)
+#define PTP_CMD_CTL_PTP_DISABLE_ BIT(1)
+#define PTP_CMD_CTL_PTP_RESET_ BIT(0)
+#define PTP_GENERAL_CONFIG (0x0A04)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \
+ (0x7 << (1 + ((channel) << 2)))
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (1)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (2)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
+ (((value) & 0x7) << (1 + ((channel) << 2)))
+#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
+
+#define PTP_INT_STS (0x0A08)
+#define PTP_INT_EN_SET (0x0A0C)
+#define PTP_INT_EN_CLR (0x0A10)
+#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13)
+#define PTP_INT_BIT_TX_TS_ BIT(12)
+#define PTP_INT_BIT_TIMER_B_ BIT(1)
+#define PTP_INT_BIT_TIMER_A_ BIT(0)
+
+#define PTP_CLOCK_SEC (0x0A14)
+#define PTP_CLOCK_NS (0x0A18)
+#define PTP_CLOCK_SUBNS (0x0A1C)
+#define PTP_CLOCK_RATE_ADJ (0x0A20)
+#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(31)
+#define PTP_CLOCK_STEP_ADJ (0x0A2C)
+#define PTP_CLOCK_STEP_ADJ_DIR_ BIT(31)
+#define PTP_CLOCK_STEP_ADJ_VALUE_MASK_ (0x3FFFFFFF)
+#define PTP_CLOCK_TARGET_SEC_X(channel) (0x0A30 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4))
+#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4))
+#define PTP_LATENCY (0x0A5C)
+#define PTP_LATENCY_TX_SET_(tx_latency) (((u32)(tx_latency)) << 16)
+#define PTP_LATENCY_RX_SET_(rx_latency) \
+ (((u32)(rx_latency)) & 0x0000FFFF)
+#define PTP_CAP_INFO (0x0A60)
+#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4)
+
+#define PTP_TX_MOD (0x0AA4)
+#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000)
+
+#define PTP_TX_MOD2 (0x0AA8)
+#define PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_ (0x00000001)
+
+#define PTP_TX_EGRESS_SEC (0x0AAC)
+#define PTP_TX_EGRESS_NS (0x0AB0)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_ (0xC0000000)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_ (0x00000000)
+#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_ (0x40000000)
+#define PTP_TX_EGRESS_NS_TS_NS_MASK_ (0x3FFFFFFF)
+
+#define PTP_TX_MSG_HEADER (0x0AB4)
+#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000)
+#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000)
+
#define DMAC_CFG (0xC00)
#define DMAC_CFG_COAL_EN_ BIT(16)
#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000)
@@ -288,9 +450,33 @@
#define TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_ BIT(3)
#define TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_ (0x00000007)
+#define OTP_PWR_DN (0x1000)
+#define OTP_PWR_DN_PWRDN_N_ BIT(0)
+
+#define OTP_ADDR1 (0x1004)
+#define OTP_ADDR1_15_11_MASK_ (0x1F)
+
+#define OTP_ADDR2 (0x1008)
+#define OTP_ADDR2_10_3_MASK_ (0xFF)
+
+#define OTP_PRGM_DATA (0x1010)
+
+#define OTP_PRGM_MODE (0x1014)
+#define OTP_PRGM_MODE_BYTE_ BIT(0)
+
+#define OTP_TST_CMD (0x1024)
+#define OTP_TST_CMD_PRGVRFY_ BIT(3)
+
+#define OTP_CMD_GO (0x1028)
+#define OTP_CMD_GO_GO_ BIT(0)
+
+#define OTP_STATUS (0x1030)
+#define OTP_STATUS_BUSY_ BIT(0)
+
/* MAC statistics registers */
#define STAT_RX_FCS_ERRORS (0x1200)
#define STAT_RX_ALIGNMENT_ERRORS (0x1204)
+#define STAT_RX_FRAGMENT_ERRORS (0x1208)
#define STAT_RX_JABBER_ERRORS (0x120C)
#define STAT_RX_UNDERSIZE_FRAME_ERRORS (0x1210)
#define STAT_RX_OVERSIZE_FRAME_ERRORS (0x1214)
@@ -298,12 +484,26 @@
#define STAT_RX_UNICAST_BYTE_COUNT (0x121C)
#define STAT_RX_BROADCAST_BYTE_COUNT (0x1220)
#define STAT_RX_MULTICAST_BYTE_COUNT (0x1224)
+#define STAT_RX_UNICAST_FRAMES (0x1228)
+#define STAT_RX_BROADCAST_FRAMES (0x122C)
#define STAT_RX_MULTICAST_FRAMES (0x1230)
+#define STAT_RX_PAUSE_FRAMES (0x1234)
+#define STAT_RX_64_BYTE_FRAMES (0x1238)
+#define STAT_RX_65_127_BYTE_FRAMES (0x123C)
+#define STAT_RX_128_255_BYTE_FRAMES (0x1240)
+#define STAT_RX_256_511_BYTES_FRAMES (0x1244)
+#define STAT_RX_512_1023_BYTE_FRAMES (0x1248)
+#define STAT_RX_1024_1518_BYTE_FRAMES (0x124C)
+#define STAT_RX_GREATER_1518_BYTE_FRAMES (0x1250)
#define STAT_RX_TOTAL_FRAMES (0x1254)
+#define STAT_EEE_RX_LPI_TRANSITIONS (0x1258)
+#define STAT_EEE_RX_LPI_TIME (0x125C)
+#define STAT_RX_COUNTER_ROLLOVER_STATUS (0x127C)
#define STAT_TX_FCS_ERRORS (0x1280)
#define STAT_TX_EXCESS_DEFERRAL_ERRORS (0x1284)
#define STAT_TX_CARRIER_ERRORS (0x1288)
+#define STAT_TX_BAD_BYTE_COUNT (0x128C)
#define STAT_TX_SINGLE_COLLISIONS (0x1290)
#define STAT_TX_MULTIPLE_COLLISIONS (0x1294)
#define STAT_TX_EXCESSIVE_COLLISION (0x1298)
@@ -311,8 +511,21 @@
#define STAT_TX_UNICAST_BYTE_COUNT (0x12A0)
#define STAT_TX_BROADCAST_BYTE_COUNT (0x12A4)
#define STAT_TX_MULTICAST_BYTE_COUNT (0x12A8)
+#define STAT_TX_UNICAST_FRAMES (0x12AC)
+#define STAT_TX_BROADCAST_FRAMES (0x12B0)
#define STAT_TX_MULTICAST_FRAMES (0x12B4)
+#define STAT_TX_PAUSE_FRAMES (0x12B8)
+#define STAT_TX_64_BYTE_FRAMES (0x12BC)
+#define STAT_TX_65_127_BYTE_FRAMES (0x12C0)
+#define STAT_TX_128_255_BYTE_FRAMES (0x12C4)
+#define STAT_TX_256_511_BYTES_FRAMES (0x12C8)
+#define STAT_TX_512_1023_BYTE_FRAMES (0x12CC)
+#define STAT_TX_1024_1518_BYTE_FRAMES (0x12D0)
+#define STAT_TX_GREATER_1518_BYTE_FRAMES (0x12D4)
#define STAT_TX_TOTAL_FRAMES (0x12D8)
+#define STAT_EEE_TX_LPI_TRANSITIONS (0x12DC)
+#define STAT_EEE_TX_LPI_TIME (0x12E0)
+#define STAT_TX_COUNTER_ROLLOVER_STATUS (0x12FC)
/* End of Register definitions */
@@ -415,8 +628,12 @@ struct lan743x_tx_buffer_info;
#define TX_FRAME_FLAG_IN_PROGRESS BIT(0)
+#define TX_TS_FLAG_TIMESTAMPING_ENABLED BIT(0)
+#define TX_TS_FLAG_ONE_STEP_SYNC BIT(1)
+
struct lan743x_tx {
struct lan743x_adapter *adapter;
+ u32 ts_flags;
u32 vector_flags;
int channel_number;
@@ -443,6 +660,10 @@ struct lan743x_tx {
struct sk_buff *overflow_skb;
};
+void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
+ bool enable_timestamping,
+ bool enable_onestep_sync);
+
/* RX */
struct lan743x_rx_descriptor;
struct lan743x_rx_buffer_info;
@@ -473,6 +694,9 @@ struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
int msg_enable;
+#ifdef CONFIG_PM
+ u32 wolopts;
+#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
struct lan743x_intr intr;
@@ -480,6 +704,9 @@ struct lan743x_adapter {
/* lock, used to prevent concurrent access to data port */
struct mutex dp_lock;
+ struct lan743x_gpio gpio;
+ struct lan743x_ptp ptp;
+
u8 mac_address[ETH_ALEN];
struct lan743x_phy phy;
@@ -530,6 +757,7 @@ struct lan743x_adapter {
#define TX_DESC_DATA0_IPE_ (0x00200000)
#define TX_DESC_DATA0_TPE_ (0x00100000)
#define TX_DESC_DATA0_FCS_ (0x00020000)
+#define TX_DESC_DATA0_TSE_ (0x00010000)
#define TX_DESC_DATA0_BUF_LENGTH_MASK_ (0x0000FFFF)
#define TX_DESC_DATA0_EXT_LSO_ (0x00200000)
#define TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_ (0x000FFFFF)
@@ -543,6 +771,7 @@ struct lan743x_tx_descriptor {
} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
+#define TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED BIT(1)
#define TX_BUFFER_INFO_FLAG_IGNORE_SYNC BIT(2)
#define TX_BUFFER_INFO_FLAG_SKB_FRAGMENT BIT(3)
struct lan743x_tx_buffer_info {
@@ -594,4 +823,7 @@ struct lan743x_rx_buffer_info {
#define RX_PROCESS_RESULT_PACKET_RECEIVED (1)
#define RX_PROCESS_RESULT_PACKET_DROPPED (2)
+u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
+void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
+
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
new file mode 100644
index 000000000000..64dba96edc79
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -0,0 +1,1160 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#include <linux/netdevice.h>
+#include "lan743x_main.h"
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/net_tstamp.h>
+
+#include "lan743x_ptp.h"
+
+#define LAN743X_NUMBER_OF_GPIO (12)
+#define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999)
+#define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934)
+
+static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter);
+static void lan743x_ptp_enable(struct lan743x_adapter *adapter);
+static void lan743x_ptp_disable(struct lan743x_adapter *adapter);
+static void lan743x_ptp_reset(struct lan743x_adapter *adapter);
+static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 sub_nano_seconds);
+
+int lan743x_gpio_init(struct lan743x_adapter *adapter)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+
+ spin_lock_init(&gpio->gpio_lock);
+
+ gpio->gpio_cfg0 = 0; /* set all direction to input, data = 0 */
+ gpio->gpio_cfg1 = 0x0FFF0000;/* disable all gpio, set to open drain */
+ gpio->gpio_cfg2 = 0;/* set all to 1588 low polarity level */
+ gpio->gpio_cfg3 = 0;/* disable all 1588 output */
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+ lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
+ lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
+
+ return 0;
+}
+
+static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter,
+ u32 bit_mask)
+{
+ int timeout = 1000;
+ u32 data = 0;
+
+ while (timeout &&
+ (data = (lan743x_csr_read(adapter, PTP_CMD_CTL) &
+ bit_mask))) {
+ usleep_range(1000, 20000);
+ timeout--;
+ }
+ if (data) {
+ netif_err(adapter, drv, adapter->netdev,
+ "timeout waiting for cmd to be done, cmd = 0x%08X\n",
+ bit_mask);
+ }
+}
+
+static void lan743x_ptp_tx_ts_enqueue_ts(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 header)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->tx_ts_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ ptp->tx_ts_seconds_queue[ptp->tx_ts_queue_size] = seconds;
+ ptp->tx_ts_nseconds_queue[ptp->tx_ts_queue_size] = nano_seconds;
+ ptp->tx_ts_header_queue[ptp->tx_ts_queue_size] = header;
+ ptp->tx_ts_queue_size++;
+ } else {
+ netif_err(adapter, drv, adapter->netdev,
+ "tx ts queue overflow\n");
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static void lan743x_ptp_tx_ts_complete(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct skb_shared_hwtstamps tstamps;
+ u32 header, nseconds, seconds;
+ bool ignore_sync = false;
+ struct sk_buff *skb;
+ int c, i;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ c = ptp->tx_ts_skb_queue_size;
+
+ if (c > ptp->tx_ts_queue_size)
+ c = ptp->tx_ts_queue_size;
+ if (c <= 0)
+ goto done;
+
+ for (i = 0; i < c; i++) {
+ ignore_sync = ((ptp->tx_ts_ignore_sync_queue &
+ BIT(i)) != 0);
+ skb = ptp->tx_ts_skb_queue[i];
+ nseconds = ptp->tx_ts_nseconds_queue[i];
+ seconds = ptp->tx_ts_seconds_queue[i];
+ header = ptp->tx_ts_header_queue[i];
+
+ memset(&tstamps, 0, sizeof(tstamps));
+ tstamps.hwtstamp = ktime_set(seconds, nseconds);
+ if (!ignore_sync ||
+ ((header & PTP_TX_MSG_HEADER_MSG_TYPE_) !=
+ PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_))
+ skb_tstamp_tx(skb, &tstamps);
+
+ dev_kfree_skb(skb);
+
+ ptp->tx_ts_skb_queue[i] = NULL;
+ ptp->tx_ts_seconds_queue[i] = 0;
+ ptp->tx_ts_nseconds_queue[i] = 0;
+ ptp->tx_ts_header_queue[i] = 0;
+ }
+
+ /* shift queue */
+ ptp->tx_ts_ignore_sync_queue >>= c;
+ for (i = c; i < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS; i++) {
+ ptp->tx_ts_skb_queue[i - c] = ptp->tx_ts_skb_queue[i];
+ ptp->tx_ts_seconds_queue[i - c] = ptp->tx_ts_seconds_queue[i];
+ ptp->tx_ts_nseconds_queue[i - c] = ptp->tx_ts_nseconds_queue[i];
+ ptp->tx_ts_header_queue[i - c] = ptp->tx_ts_header_queue[i];
+
+ ptp->tx_ts_skb_queue[i] = NULL;
+ ptp->tx_ts_seconds_queue[i] = 0;
+ ptp->tx_ts_nseconds_queue[i] = 0;
+ ptp->tx_ts_header_queue[i] = 0;
+ }
+ ptp->tx_ts_skb_queue_size -= c;
+ ptp->tx_ts_queue_size -= c;
+done:
+ ptp->pending_tx_timestamps -= c;
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int result = -ENODEV;
+ int index = 0;
+
+ mutex_lock(&ptp->command_lock);
+ for (index = 0; index < LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS; index++) {
+ if (!(test_bit(index, &ptp->used_event_ch))) {
+ ptp->used_event_ch |= BIT(index);
+ result = index;
+ break;
+ }
+ }
+ mutex_unlock(&ptp->command_lock);
+ return result;
+}
+
+static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter,
+ int event_channel)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ if (test_bit(event_channel, &ptp->used_event_ch)) {
+ ptp->used_event_ch &= ~BIT(event_channel);
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "attempted release on a not used event_channel = %d\n",
+ event_channel);
+ }
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
+ u32 *seconds, u32 *nano_seconds,
+ u32 *sub_nano_seconds);
+static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
+ s64 time_step_ns);
+
+static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter,
+ int bit, int ptp_channel)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+ unsigned long irq_flags = 0;
+ int bit_mask = BIT(bit);
+ int ret = -EBUSY;
+
+ spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
+
+ if (!(gpio->used_bits & bit_mask)) {
+ gpio->used_bits |= bit_mask;
+ gpio->output_bits |= bit_mask;
+ gpio->ptp_bits |= bit_mask;
+
+ /* set as output, and zero initial value */
+ gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(bit);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+
+ /* enable gpio, and set buffer type to push pull */
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(bit);
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+
+ /* set 1588 polarity to high */
+ gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2);
+
+ if (!ptp_channel) {
+ /* use channel A */
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ } else {
+ /* use channel B */
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(bit);
+ }
+ gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3);
+
+ ret = bit;
+ }
+ spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
+ return ret;
+}
+
+static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit)
+{
+ struct lan743x_gpio *gpio = &adapter->gpio;
+ unsigned long irq_flags = 0;
+ int bit_mask = BIT(bit);
+
+ spin_lock_irqsave(&gpio->gpio_lock, irq_flags);
+ if (gpio->used_bits & bit_mask) {
+ gpio->used_bits &= ~bit_mask;
+ if (gpio->output_bits & bit_mask) {
+ gpio->output_bits &= ~bit_mask;
+
+ if (gpio->ptp_bits & bit_mask) {
+ gpio->ptp_bits &= ~bit_mask;
+ /* disable ptp output */
+ gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG3,
+ gpio->gpio_cfg3);
+ }
+ /* release gpio output */
+
+ /* disable gpio */
+ gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(bit);
+ gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1);
+
+ /* reset back to input */
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(bit);
+ gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit);
+ lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0);
+ }
+ }
+ spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags);
+}
+
+static int lan743x_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 lan743x_rate_adj = 0;
+ bool positive = true;
+ u64 u64_delta = 0;
+
+ if ((scaled_ppm < (-LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM)) ||
+ scaled_ppm > LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM) {
+ return -EINVAL;
+ }
+ if (scaled_ppm > 0) {
+ u64_delta = (u64)scaled_ppm;
+ positive = true;
+ } else {
+ u64_delta = (u64)(-scaled_ppm);
+ positive = false;
+ }
+ u64_delta = (u64_delta << 19);
+ lan743x_rate_adj = div_u64(u64_delta, 1000000);
+
+ if (positive)
+ lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_;
+
+ lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ,
+ lan743x_rate_adj);
+
+ return 0;
+}
+
+static int lan743x_ptpci_adjfreq(struct ptp_clock_info *ptpci, s32 delta_ppb)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 lan743x_rate_adj = 0;
+ bool positive = true;
+ u32 u32_delta = 0;
+ u64 u64_delta = 0;
+
+ if ((delta_ppb < (-LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB)) ||
+ delta_ppb > LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB) {
+ return -EINVAL;
+ }
+ if (delta_ppb > 0) {
+ u32_delta = (u32)delta_ppb;
+ positive = true;
+ } else {
+ u32_delta = (u32)(-delta_ppb);
+ positive = false;
+ }
+ u64_delta = (((u64)u32_delta) << 35);
+ lan743x_rate_adj = div_u64(u64_delta, 1000000000);
+
+ if (positive)
+ lan743x_rate_adj |= PTP_CLOCK_RATE_ADJ_DIR_;
+
+ lan743x_csr_write(adapter, PTP_CLOCK_RATE_ADJ,
+ lan743x_rate_adj);
+
+ return 0;
+}
+
+static int lan743x_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+
+ lan743x_ptp_clock_step(adapter, delta);
+
+ return 0;
+}
+
+static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci,
+ struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 nano_seconds = 0;
+ u32 seconds = 0;
+
+ lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nano_seconds;
+
+ return 0;
+}
+
+static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
+ const struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 nano_seconds = 0;
+ u32 seconds = 0;
+
+ if (ts) {
+ if (ts->tv_sec > 0xFFFFFFFFLL ||
+ ts->tv_sec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_sec out of range, %lld\n",
+ ts->tv_sec);
+ return -ERANGE;
+ }
+ if (ts->tv_nsec >= 1000000000L ||
+ ts->tv_nsec < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ts->tv_nsec out of range, %ld\n",
+ ts->tv_nsec);
+ return -ERANGE;
+ }
+ seconds = ts->tv_sec;
+ nano_seconds = ts->tv_nsec;
+ lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
+ } else {
+ netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 general_config = 0;
+
+ if (ptp->perout_gpio_bit >= 0) {
+ lan743x_gpio_release(adapter, ptp->perout_gpio_bit);
+ ptp->perout_gpio_bit = -1;
+ }
+
+ if (ptp->perout_event_ch >= 0) {
+ /* set target to far in the future, effectively disabling it */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ 0);
+
+ general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
+ general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_
+ (ptp->perout_event_ch);
+ lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
+ lan743x_ptp_release_event_ch(adapter, ptp->perout_event_ch);
+ ptp->perout_event_ch = -1;
+ }
+}
+
+static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
+ struct ptp_perout_request *perout)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 period_sec = 0, period_nsec = 0;
+ u32 start_sec = 0, start_nsec = 0;
+ u32 general_config = 0;
+ int pulse_width = 0;
+ int perout_bit = 0;
+
+ if (!on) {
+ lan743x_ptp_perout_off(adapter);
+ return 0;
+ }
+
+ if (ptp->perout_event_ch >= 0 ||
+ ptp->perout_gpio_bit >= 0) {
+ /* already on, turn off first */
+ lan743x_ptp_perout_off(adapter);
+ }
+
+ ptp->perout_event_ch = lan743x_ptp_reserve_event_ch(adapter);
+ if (ptp->perout_event_ch < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve event channel for PEROUT\n");
+ goto failed;
+ }
+
+ switch (adapter->csr.id_rev & ID_REV_ID_MASK_) {
+ case ID_REV_ID_LAN7430_:
+ perout_bit = 2;/* GPIO 2 is preferred on EVB LAN7430 */
+ break;
+ case ID_REV_ID_LAN7431_:
+ perout_bit = 4;/* GPIO 4 is preferred on EVB LAN7431 */
+ break;
+ }
+
+ ptp->perout_gpio_bit = lan743x_gpio_rsrv_ptp_out(adapter,
+ perout_bit,
+ ptp->perout_event_ch);
+
+ if (ptp->perout_gpio_bit < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve gpio %d for PEROUT\n",
+ perout_bit);
+ goto failed;
+ }
+
+ start_sec = perout->start.sec;
+ start_sec += perout->start.nsec / 1000000000;
+ start_nsec = perout->start.nsec % 1000000000;
+
+ period_sec = perout->period.sec;
+ period_sec += perout->period.nsec / 1000000000;
+ period_nsec = perout->period.nsec % 1000000000;
+
+ if (period_sec == 0) {
+ if (period_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (period_nsec >= 20000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (period_nsec >= 2000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (period_nsec >= 200000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (period_nsec >= 20000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (period_nsec >= 200) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ goto failed;
+ }
+ } else {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
+
+ /* turn off by setting target far in future */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), 0);
+
+ /* Configure to pulse every period */
+ general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG);
+ general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
+ (ptp->perout_event_ch));
+ general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
+ (ptp->perout_event_ch, pulse_width);
+ general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_
+ (ptp->perout_event_ch);
+ lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config);
+
+ /* set the reload to one toggle cycle */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(ptp->perout_event_ch),
+ period_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_NS_X(ptp->perout_event_ch),
+ period_nsec);
+
+ /* set the start time */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch),
+ start_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch),
+ start_nsec);
+
+ return 0;
+
+failed:
+ lan743x_ptp_perout_off(adapter);
+ return -ENODEV;
+}
+
+static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *request, int on)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+
+ if (request) {
+ switch (request->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return -EINVAL;
+ case PTP_CLK_REQ_PEROUT:
+ if (request->perout.index == 0)
+ return lan743x_ptp_perout(adapter, on,
+ &request->perout);
+ return -EINVAL;
+ case PTP_CLK_REQ_PPS:
+ return -EINVAL;
+ default:
+ netif_err(adapter, drv, adapter->netdev,
+ "request->type == %d, Unknown\n",
+ request->type);
+ break;
+ }
+ } else {
+ netif_err(adapter, drv, adapter->netdev, "request == NULL\n");
+ }
+ return 0;
+}
+
+static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
+{
+ struct lan743x_ptp *ptp =
+ container_of(ptpci, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(ptp, struct lan743x_adapter, ptp);
+ u32 cap_info, cause, header, nsec, seconds;
+ bool new_timestamp_available = false;
+ int count = 0;
+
+ while ((count < 100) &&
+ (lan743x_csr_read(adapter, PTP_INT_STS) & PTP_INT_BIT_TX_TS_)) {
+ count++;
+ cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
+
+ if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
+ seconds = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_SEC);
+ nsec = lan743x_csr_read(adapter, PTP_TX_EGRESS_NS);
+ cause = (nsec &
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
+ header = lan743x_csr_read(adapter,
+ PTP_TX_MSG_HEADER);
+
+ if (cause == PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
+ nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
+ lan743x_ptp_tx_ts_enqueue_ts(adapter,
+ seconds, nsec,
+ header);
+ new_timestamp_available = true;
+ } else if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Auto capture cause not supported\n");
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "unknown tx timestamp capture cause\n");
+ }
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "TX TS INT but no TX TS CNT\n");
+ }
+ lan743x_csr_write(adapter, PTP_INT_STS, PTP_INT_BIT_TX_TS_);
+ }
+
+ if (new_timestamp_available)
+ lan743x_ptp_tx_ts_complete(adapter);
+
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+
+ return -1;
+}
+
+static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
+ u32 *seconds, u32 *nano_seconds,
+ u32 *sub_nano_seconds)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ if (seconds)
+ (*seconds) = lan743x_csr_read(adapter, PTP_CLOCK_SEC);
+
+ if (nano_seconds)
+ (*nano_seconds) = lan743x_csr_read(adapter, PTP_CLOCK_NS);
+
+ if (sub_nano_seconds)
+ (*sub_nano_seconds) =
+ lan743x_csr_read(adapter, PTP_CLOCK_SUBNS);
+
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
+ s64 time_step_ns)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 nano_seconds_step = 0;
+ u64 abs_time_step_ns = 0;
+ u32 unsigned_seconds = 0;
+ u32 nano_seconds = 0;
+ u32 remainder = 0;
+ s32 seconds = 0;
+
+ if (time_step_ns > 15000000000LL) {
+ /* convert to clock set */
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds += remainder;
+ if (nano_seconds >= 1000000000) {
+ unsigned_seconds++;
+ nano_seconds -= 1000000000;
+ }
+ lan743x_ptp_clock_set(adapter, unsigned_seconds,
+ nano_seconds, 0);
+ return;
+ } else if (time_step_ns < -15000000000LL) {
+ /* convert to clock set */
+ time_step_ns = -time_step_ns;
+
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds_step = remainder;
+ if (nano_seconds < nano_seconds_step) {
+ unsigned_seconds--;
+ nano_seconds += 1000000000;
+ }
+ nano_seconds -= nano_seconds_step;
+ lan743x_ptp_clock_set(adapter, unsigned_seconds,
+ nano_seconds, 0);
+ return;
+ }
+
+ /* do clock step */
+ if (time_step_ns >= 0) {
+ abs_time_step_ns = (u64)(time_step_ns);
+ seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder);
+ nano_seconds = (u32)remainder;
+ } else {
+ abs_time_step_ns = (u64)(-time_step_ns);
+ seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder));
+ nano_seconds = (u32)remainder;
+ if (nano_seconds > 0) {
+ /* subtracting nano seconds is not allowed
+ * convert to subtracting from seconds,
+ * and adding to nanoseconds
+ */
+ seconds--;
+ nano_seconds = (1000000000 - nano_seconds);
+ }
+ }
+
+ if (nano_seconds > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nano_seconds += 8;
+ }
+
+ if (nano_seconds >= 1000000000) {
+ /* carry into seconds */
+ seconds++;
+ nano_seconds -= 1000000000;
+ }
+
+ while (seconds) {
+ mutex_lock(&ptp->command_lock);
+ if (seconds > 0) {
+ u32 adjustment_value = (u32)seconds;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ PTP_CLOCK_STEP_ADJ_DIR_ |
+ adjustment_value);
+ seconds -= ((s32)adjustment_value);
+ } else {
+ u32 adjustment_value = (u32)(-seconds);
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ adjustment_value);
+ seconds += ((s32)adjustment_value);
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_);
+ lan743x_ptp_wait_till_cmd_done(adapter,
+ PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_);
+ mutex_unlock(&ptp->command_lock);
+ }
+ if (nano_seconds) {
+ mutex_lock(&ptp->command_lock);
+ lan743x_csr_write(adapter, PTP_CLOCK_STEP_ADJ,
+ PTP_CLOCK_STEP_ADJ_DIR_ |
+ (nano_seconds &
+ PTP_CLOCK_STEP_ADJ_VALUE_MASK_));
+ lan743x_csr_write(adapter, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLK_STP_NSEC_);
+ lan743x_ptp_wait_till_cmd_done(adapter,
+ PTP_CMD_CTL_PTP_CLK_STP_NSEC_);
+ mutex_unlock(&ptp->command_lock);
+ }
+}
+
+void lan743x_ptp_isr(void *context)
+{
+ struct lan743x_adapter *adapter = (struct lan743x_adapter *)context;
+ struct lan743x_ptp *ptp = NULL;
+ int enable_flag = 1;
+ u32 ptp_int_sts = 0;
+
+ ptp = &adapter->ptp;
+
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_);
+
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+ ptp_int_sts &= lan743x_csr_read(adapter, PTP_INT_EN_SET);
+
+ if (ptp_int_sts & PTP_INT_BIT_TX_TS_) {
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+ enable_flag = 0;/* tasklet will re-enable later */
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TX_SWTS_ERR_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "PTP TX Software Timestamp Error\n");
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TX_SWTS_ERR_);
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TIMER_B_) {
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TIMER_B_);
+ }
+ if (ptp_int_sts & PTP_INT_BIT_TIMER_A_) {
+ /* clear int status bit */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TIMER_A_);
+ }
+
+ if (enable_flag) {
+ /* re-enable isr */
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+ }
+}
+
+static void lan743x_ptp_tx_ts_enqueue_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->tx_ts_skb_queue_size < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ ptp->tx_ts_skb_queue[ptp->tx_ts_skb_queue_size] = skb;
+ if (ignore_sync)
+ ptp->tx_ts_ignore_sync_queue |=
+ BIT(ptp->tx_ts_skb_queue_size);
+ ptp->tx_ts_skb_queue_size++;
+ } else {
+ /* this should never happen, so long as the tx channel
+ * calls and honors the result from
+ * lan743x_ptp_request_tx_timestamp
+ */
+ netif_err(adapter, drv, adapter->netdev,
+ "tx ts skb queue overflow\n");
+ dev_kfree_skb(skb);
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+static void lan743x_ptp_sync_to_system_clock(struct lan743x_adapter *adapter)
+{
+ struct timespec64 ts;
+
+ memset(&ts, 0, sizeof(ts));
+ timekeeping_clocktai64(&ts);
+
+ lan743x_ptp_clock_set(adapter, ts.tv_sec, ts.tv_nsec, 0);
+}
+
+void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
+ u32 link_speed)
+{
+ switch (link_speed) {
+ case 10:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(0) |
+ PTP_LATENCY_RX_SET_(0));
+ break;
+ case 100:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(181) |
+ PTP_LATENCY_RX_SET_(594));
+ break;
+ case 1000:
+ lan743x_csr_write(adapter, PTP_LATENCY,
+ PTP_LATENCY_TX_SET_(30) |
+ PTP_LATENCY_RX_SET_(525));
+ break;
+ }
+}
+
+int lan743x_ptp_init(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_init(&ptp->command_lock);
+ spin_lock_init(&ptp->tx_ts_lock);
+ ptp->used_event_ch = 0;
+ ptp->perout_event_ch = -1;
+ ptp->perout_gpio_bit = -1;
+ return 0;
+}
+
+int lan743x_ptp_open(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int ret = -ENODEV;
+ u32 temp;
+
+ lan743x_ptp_reset(adapter);
+ lan743x_ptp_sync_to_system_clock(adapter);
+ temp = lan743x_csr_read(adapter, PTP_TX_MOD2);
+ temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_;
+ lan743x_csr_write(adapter, PTP_TX_MOD2, temp);
+ lan743x_ptp_enable(adapter);
+ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
+ lan743x_csr_write(adapter, PTP_INT_EN_SET,
+ PTP_INT_BIT_TX_SWTS_ERR_ | PTP_INT_BIT_TX_TS_);
+ ptp->flags |= PTP_FLAG_ISR_ENABLED;
+
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK))
+ return 0;
+
+ snprintf(ptp->pin_config[0].name, 32, "lan743x_ptp_pin_0");
+ ptp->pin_config[0].index = 0;
+ ptp->pin_config[0].func = PTP_PF_PEROUT;
+ ptp->pin_config[0].chan = 0;
+
+ ptp->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(ptp->ptp_clock_info.name, 16, "%pm",
+ adapter->netdev->dev_addr);
+ ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
+ ptp->ptp_clock_info.n_alarm = 0;
+ ptp->ptp_clock_info.n_ext_ts = 0;
+ ptp->ptp_clock_info.n_per_out = 1;
+ ptp->ptp_clock_info.n_pins = 0;
+ ptp->ptp_clock_info.pps = 0;
+ ptp->ptp_clock_info.pin_config = NULL;
+ ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
+ ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
+ ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
+ ptp->ptp_clock_info.gettime64 = lan743x_ptpci_gettime64;
+ ptp->ptp_clock_info.getcrosststamp = NULL;
+ ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64;
+ ptp->ptp_clock_info.enable = lan743x_ptpci_enable;
+ ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work;
+ ptp->ptp_clock_info.verify = NULL;
+
+ ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info,
+ &adapter->pdev->dev);
+
+ if (IS_ERR(ptp->ptp_clock)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "ptp_clock_register failed\n");
+ goto done;
+ }
+ ptp->flags |= PTP_FLAG_PTP_CLOCK_REGISTERED;
+ netif_info(adapter, ifup, adapter->netdev,
+ "successfully registered ptp clock\n");
+
+ return 0;
+done:
+ lan743x_ptp_close(adapter);
+ return ret;
+}
+
+void lan743x_ptp_close(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int index;
+
+ if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
+ ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED;
+ netif_info(adapter, drv, adapter->netdev,
+ "ptp clock unregister\n");
+ }
+
+ if (ptp->flags & PTP_FLAG_ISR_ENABLED) {
+ lan743x_csr_write(adapter, PTP_INT_EN_CLR,
+ PTP_INT_BIT_TX_SWTS_ERR_ |
+ PTP_INT_BIT_TX_TS_);
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_1588_);
+ ptp->flags &= ~PTP_FLAG_ISR_ENABLED;
+ }
+
+ /* clean up pending timestamp requests */
+ lan743x_ptp_tx_ts_complete(adapter);
+ spin_lock_bh(&ptp->tx_ts_lock);
+ for (index = 0;
+ index < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS;
+ index++) {
+ struct sk_buff *skb = ptp->tx_ts_skb_queue[index];
+
+ if (skb)
+ dev_kfree_skb(skb);
+ ptp->tx_ts_skb_queue[index] = NULL;
+ ptp->tx_ts_seconds_queue[index] = 0;
+ ptp->tx_ts_nseconds_queue[index] = 0;
+ }
+ ptp->tx_ts_skb_queue_size = 0;
+ ptp->tx_ts_queue_size = 0;
+ ptp->pending_tx_timestamps = 0;
+ spin_unlock_bh(&ptp->tx_ts_lock);
+
+ lan743x_ptp_disable(adapter);
+}
+
+void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter,
+ bool ts_insert_enable)
+{
+ u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD);
+
+ if (ts_insert_enable)
+ ptp_tx_mod |= PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_;
+ else
+ ptp_tx_mod &= ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_;
+
+ lan743x_csr_write(adapter, PTP_TX_MOD, ptp_tx_mod);
+}
+
+static bool lan743x_ptp_is_enabled(struct lan743x_adapter *adapter)
+{
+ if (lan743x_csr_read(adapter, PTP_CMD_CTL) & PTP_CMD_CTL_PTP_ENABLE_)
+ return true;
+ return false;
+}
+
+static void lan743x_ptp_enable(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ if (lan743x_ptp_is_enabled(adapter)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "PTP already enabled\n");
+ goto done;
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_disable(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ if (!lan743x_ptp_is_enabled(adapter)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "PTP already disabled\n");
+ goto done;
+ }
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_DISABLE_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_ENABLE_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_reset(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ if (lan743x_ptp_is_enabled(adapter)) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Attempting reset while enabled\n");
+ goto done;
+ }
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_RESET_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_RESET_);
+done:
+ mutex_unlock(&ptp->command_lock);
+}
+
+static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
+ u32 seconds, u32 nano_seconds,
+ u32 sub_nano_seconds)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+
+ lan743x_csr_write(adapter, PTP_CLOCK_SEC, seconds);
+ lan743x_csr_write(adapter, PTP_CLOCK_NS, nano_seconds);
+ lan743x_csr_write(adapter, PTP_CLOCK_SUBNS, sub_nano_seconds);
+
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ mutex_unlock(&ptp->command_lock);
+}
+
+bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ bool result = false;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
+ /* request granted */
+ ptp->pending_tx_timestamps++;
+ result = true;
+ }
+ spin_unlock_bh(&ptp->tx_ts_lock);
+ return result;
+}
+
+void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ spin_lock_bh(&ptp->tx_ts_lock);
+ if (ptp->pending_tx_timestamps > 0)
+ ptp->pending_tx_timestamps--;
+ else
+ netif_err(adapter, drv, adapter->netdev,
+ "unrequest failed, pending_tx_timestamps==0\n");
+ spin_unlock_bh(&ptp->tx_ts_lock);
+}
+
+void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync)
+{
+ lan743x_ptp_tx_ts_enqueue_skb(adapter, skb, ignore_sync);
+
+ lan743x_ptp_tx_ts_complete(adapter);
+}
+
+int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int ret = 0;
+ int index;
+
+ if (!ifr) {
+ netif_err(adapter, drv, adapter->netdev,
+ "SIOCSHWTSTAMP, ifr == NULL\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "ignoring hwtstamp_config.flags == 0x%08X, expected 0\n",
+ config.flags);
+ }
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ false, false);
+ lan743x_ptp_set_sync_ts_insert(adapter, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ true, false);
+ lan743x_ptp_set_sync_ts_insert(adapter, false);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ index++)
+ lan743x_tx_set_timestamping_mode(&adapter->tx[index],
+ true, true);
+
+ lan743x_ptp_set_sync_ts_insert(adapter, true);
+ break;
+ default:
+ netif_warn(adapter, drv, adapter->netdev,
+ " tx_type = %d, UNKNOWN\n", config.tx_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+ return ret;
+}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
new file mode 100644
index 000000000000..5fc1b3cd5e33
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2018 Microchip Technology Inc. */
+
+#ifndef _LAN743X_PTP_H
+#define _LAN743X_PTP_H
+
+#include "linux/ptp_clock_kernel.h"
+#include "linux/netdevice.h"
+
+struct lan743x_adapter;
+
+/* GPIO */
+struct lan743x_gpio {
+ /* gpio_lock: used to prevent concurrent access to gpio settings */
+ spinlock_t gpio_lock;
+
+ int used_bits;
+ int output_bits;
+ int ptp_bits;
+ u32 gpio_cfg0;
+ u32 gpio_cfg1;
+ u32 gpio_cfg2;
+ u32 gpio_cfg3;
+};
+
+int lan743x_gpio_init(struct lan743x_adapter *adapter);
+
+void lan743x_ptp_isr(void *context);
+bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter);
+void lan743x_ptp_unrequest_tx_timestamp(struct lan743x_adapter *adapter);
+void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
+ struct sk_buff *skb, bool ignore_sync);
+int lan743x_ptp_init(struct lan743x_adapter *adapter);
+int lan743x_ptp_open(struct lan743x_adapter *adapter);
+void lan743x_ptp_close(struct lan743x_adapter *adapter);
+void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
+ u32 link_speed);
+
+int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
+#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
+
+#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1)
+#define PTP_FLAG_ISR_ENABLED BIT(2)
+
+struct lan743x_ptp {
+ int flags;
+
+ /* command_lock: used to prevent concurrent ptp commands */
+ struct mutex command_lock;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct ptp_pin_desc pin_config[1];
+
+#define LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS (2)
+ unsigned long used_event_ch;
+
+ int perout_event_ch;
+ int perout_gpio_bit;
+
+ /* tx_ts_lock: used to prevent concurrent access to timestamp arrays */
+ spinlock_t tx_ts_lock;
+ int pending_tx_timestamps;
+ struct sk_buff *tx_ts_skb_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ unsigned int tx_ts_ignore_sync_queue;
+ int tx_ts_skb_queue_size;
+ u32 tx_ts_seconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ u32 tx_ts_nseconds_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ u32 tx_ts_header_queue[LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS];
+ int tx_ts_queue_size;
+};
+
+#endif /* _LAN743X_PTP_H */
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 776a8a9be8e3..1a4f2bb48ead 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -148,12 +148,191 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
return 0;
}
+static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
+{
+ /* Select the VID to configure */
+ ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
+ ANA_TABLES_VLANTIDX);
+ /* Set the vlan port members mask and issue a write command */
+ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
+ ANA_TABLES_VLANACCESS_CMD_WRITE,
+ ANA_TABLES_VLANACCESS);
+
+ return ocelot_vlant_wait_for_completion(ocelot);
+}
+
+static void ocelot_vlan_mode(struct ocelot_port *port,
+ netdev_features_t features)
+{
+ struct ocelot *ocelot = port->ocelot;
+ u8 p = port->chip_port;
+ u32 val;
+
+ /* Filtering */
+ val = ocelot_read(ocelot, ANA_VLANMASK);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ val |= BIT(p);
+ else
+ val &= ~BIT(p);
+ ocelot_write(ocelot, val, ANA_VLANMASK);
+}
+
+static void ocelot_vlan_port_apply(struct ocelot *ocelot,
+ struct ocelot_port *port)
+{
+ u32 val;
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to clasify for untagged frames (may be zero) */
+ val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid);
+ if (port->vlan_aware)
+ val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VLAN_CFG_VLAN_VID_M |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+ ANA_PORT_VLAN_CFG, port->chip_port);
+
+ /* Drop frames with multicast source address */
+ val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA;
+ if (port->vlan_aware && !port->vid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port);
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */
+ val = REW_TAG_CFG_TAG_TPID_CFG(0);
+
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG(1);
+ else
+ /* Tag all frames */
+ val |= REW_TAG_CFG_TAG_CFG(3);
+ }
+ ocelot_rmw_gix(ocelot, val,
+ REW_TAG_CFG_TAG_TPID_CFG_M |
+ REW_TAG_CFG_TAG_CFG_M,
+ REW_TAG_CFG, port->chip_port);
+
+ /* Set default VLAN and tag type to 8021Q. */
+ val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID(port->vid);
+ ocelot_rmw_gix(ocelot, val,
+ REW_PORT_VLAN_CFG_PORT_TPID_M |
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port->chip_port);
+}
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int ret;
+
+ /* Add the port MAC address to with the right VLAN information */
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+ ENTRYTYPE_LOCKED);
+
+ /* Make the port a member of the VLAN */
+ ocelot->vlan_mask[vid] |= BIT(port->chip_port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ /* Untagged egress vlan clasification */
+ if (untagged)
+ port->vid = vid;
+
+ ocelot_vlan_port_apply(ocelot, port);
+
+ return 0;
+}
+
+static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int ret;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ /* Del the port MAC address to with the right VLAN information */
+ ocelot_mact_forget(ocelot, dev->dev_addr, vid);
+
+ /* Stop the port from being a member of the vlan */
+ ocelot->vlan_mask[vid] &= ~BIT(port->chip_port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ /* Egress */
+ if (port->vid == vid)
+ port->vid = 0;
+
+ ocelot_vlan_port_apply(ocelot, port);
+
+ return 0;
+}
+
static void ocelot_vlan_init(struct ocelot *ocelot)
{
+ u16 port, vid;
+
/* Clear VLAN table, by default all ports are members of all VLANs */
ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
ANA_TABLES_VLANACCESS);
ocelot_vlant_wait_for_completion(ocelot);
+
+ /* Configure the port VLAN memberships */
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ ocelot->vlan_mask[vid] = 0;
+ ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ }
+
+ /* Because VLAN filtering is enabled, we need VID 0 to get untagged
+ * traffic. It is added automatically if 8021q module is loaded, but
+ * we can't rely on it since module may be not loaded.
+ */
+ ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
+ ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
+
+ /* Configure the CPU port to be VLAN aware */
+ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ANA_PORT_VLAN_CFG, ocelot->num_phys_ports);
+
+ /* Set vlan ingress filter mask to all ports but the CPU port by
+ * default.
+ */
+ ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
+ ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
+ }
}
/* Watermark encode
@@ -539,6 +718,20 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
+ if (!vid) {
+ if (!port->vlan_aware)
+ /* If the bridge is not VLAN aware and no VID was
+ * provided, set it to pvid to ensure the MAC entry
+ * matches incoming untagged packets
+ */
+ vid = port->pvid;
+ else
+ /* If the bridge is VLAN aware a VID must be provided as
+ * otherwise the learnt entry wouldn't match any frame.
+ */
+ return -EINVAL;
+ }
+
return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
ENTRYTYPE_NORMAL);
}
@@ -690,6 +883,30 @@ end:
return ret;
}
+static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_add(dev, vid, false, true);
+}
+
+static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_del(dev, vid);
+}
+
+static int ocelot_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ netdev_features_t changed = dev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
+ ocelot_vlan_mode(port, features);
+
+ return 0;
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
@@ -701,6 +918,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_fdb_add = ocelot_fdb_add,
.ndo_fdb_del = ocelot_fdb_del,
.ndo_fdb_dump = ocelot_fdb_dump,
+ .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
+ .ndo_set_features = ocelot_set_features,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -780,6 +1000,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_get_strings,
.get_ethtool_stats = ocelot_get_ethtool_stats,
.get_sset_count = ocelot_get_sset_count,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int ocelot_port_attr_get(struct net_device *dev,
@@ -914,6 +1136,10 @@ static int ocelot_port_attr_set(struct net_device *dev,
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ocelot_port->vlan_aware = attr->u.vlan_filtering;
+ ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port);
+ break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
break;
@@ -925,6 +1151,40 @@ static int ocelot_port_attr_set(struct net_device *dev,
return err;
}
+static int ocelot_port_obj_add_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_add(dev, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_PVID,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ocelot_port_vlan_del_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_del(dev, vid);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
const unsigned char *addr,
u16 vid)
@@ -951,7 +1211,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
bool new = false;
if (!vid)
- vid = 1;
+ vid = port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
@@ -992,7 +1252,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
u16 vid = mdb->vid;
if (!vid)
- vid = 1;
+ vid = port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
@@ -1024,6 +1284,11 @@ static int ocelot_port_obj_add(struct net_device *dev,
int ret = 0;
switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_obj_add_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
trans);
@@ -1041,6 +1306,10 @@ static int ocelot_port_obj_del(struct net_device *dev,
int ret = 0;
switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_vlan_del_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
@@ -1086,6 +1355,142 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
+
+ /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */
+ ocelot_port->vlan_aware = 0;
+ ocelot_port->pvid = 0;
+ ocelot_port->vid = 0;
+}
+
+static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
+{
+ int i, port, lag;
+
+ /* Reset destination and aggregation PGIDS */
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
+
+ for (i = PGID_AGGR; i < PGID_SRC; i++)
+ ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+ ANA_PGID_PGID, i);
+
+ /* Now, set PGIDs for each LAG */
+ for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
+ unsigned long bond_mask;
+ int aggr_count = 0;
+ u8 aggr_idx[16];
+
+ bond_mask = ocelot->lags[lag];
+ if (!bond_mask)
+ continue;
+
+ for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
+ // Destination mask
+ ocelot_write_rix(ocelot, bond_mask,
+ ANA_PGID_PGID, port);
+ aggr_idx[aggr_count] = port;
+ aggr_count++;
+ }
+
+ for (i = PGID_AGGR; i < PGID_SRC; i++) {
+ u32 ac;
+
+ ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
+ ac &= ~bond_mask;
+ ac |= BIT(aggr_idx[i % aggr_count]);
+ ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
+ }
+ }
+}
+
+static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
+{
+ unsigned long bond_mask = ocelot->lags[lag];
+ unsigned int p;
+
+ for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) {
+ u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+
+ /* Use lag port as logical port for port i */
+ ocelot_write_gix(ocelot, port_cfg |
+ ANA_PORT_PORT_CFG_PORTID_VAL(lag),
+ ANA_PORT_PORT_CFG, p);
+ }
+}
+
+static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
+ struct net_device *bond)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int p = ocelot_port->chip_port;
+ int lag, lp;
+ struct net_device *ndev;
+ u32 bond_mask = 0;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(bond, ndev) {
+ struct ocelot_port *port = netdev_priv(ndev);
+
+ bond_mask |= BIT(port->chip_port);
+ }
+ rcu_read_unlock();
+
+ lp = __ffs(bond_mask);
+
+ /* If the new port is the lowest one, use it as the logical port from
+ * now on
+ */
+ if (p == lp) {
+ lag = p;
+ ocelot->lags[p] = bond_mask;
+ bond_mask &= ~BIT(p);
+ if (bond_mask) {
+ lp = __ffs(bond_mask);
+ ocelot->lags[lp] = 0;
+ }
+ } else {
+ lag = lp;
+ ocelot->lags[lp] |= BIT(p);
+ }
+
+ ocelot_setup_lag(ocelot, lag);
+ ocelot_set_aggr_pgids(ocelot);
+
+ return 0;
+}
+
+static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port,
+ struct net_device *bond)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int p = ocelot_port->chip_port;
+ u32 port_cfg;
+ int i;
+
+ /* Remove port from any lag */
+ for (i = 0; i < ocelot->num_phys_ports; i++)
+ ocelot->lags[i] &= ~BIT(ocelot_port->chip_port);
+
+ /* if it was the logical port of the lag, move the lag config to the
+ * next port
+ */
+ if (ocelot->lags[p]) {
+ int n = __ffs(ocelot->lags[p]);
+
+ ocelot->lags[n] = ocelot->lags[p];
+ ocelot->lags[p] = 0;
+
+ ocelot_setup_lag(ocelot, n);
+ }
+
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+ ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p),
+ ANA_PORT_PORT_CFG, p);
+
+ ocelot_set_aggr_pgids(ocelot);
}
/* Checks if the net_device instance given to us originate from our driver. */
@@ -1113,6 +1518,17 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
else
ocelot_port_bridge_leave(ocelot_port,
info->upper_dev);
+
+ ocelot_vlan_port_apply(ocelot_port->ocelot,
+ ocelot_port);
+ }
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = ocelot_port_lag_join(ocelot_port,
+ info->upper_dev);
+ else
+ ocelot_port_lag_leave(ocelot_port,
+ info->upper_dev);
}
break;
default:
@@ -1129,6 +1545,20 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
+ if (event == NETDEV_PRECHANGEUPPER &&
+ netif_is_lag_master(info->upper_dev)) {
+ struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
+ struct netlink_ext_ack *extack;
+
+ if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ extack = netdev_notifier_info_to_extack(&info->info);
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+
+ ret = -EINVAL;
+ goto notify;
+ }
+ }
+
if (netif_is_lag_master(dev)) {
struct net_device *slave;
struct list_head *iter;
@@ -1176,6 +1606,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->ethtool_ops = &ocelot_ethtool_ops;
dev->switchdev_ops = &ocelot_port_switchdev_ops;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
dev->dev_addr[ETH_ALEN - 1] += port;
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
@@ -1187,6 +1620,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
goto err_register_netdev;
}
+ /* Basic L2 initialization */
+ ocelot_vlan_port_apply(ocelot, ocelot_port);
+
return 0;
err_register_netdev:
@@ -1201,6 +1637,11 @@ int ocelot_init(struct ocelot *ocelot)
int i, cpu = ocelot->num_phys_ports;
char queue_name[32];
+ ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(u32), GFP_KERNEL);
+ if (!ocelot->lags)
+ return -ENOMEM;
+
ocelot->stats = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports * ocelot->num_stats,
sizeof(u64), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 097bd12a10d4..616bec30dfa3 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -493,7 +493,7 @@ struct ocelot {
u8 num_cpu_ports;
struct ocelot_port **ports;
- u16 lags[16];
+ u32 *lags;
/* Keep track of the vlan port masks */
u32 vlan_mask[VLAN_N_VID];
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 18df7d934e81..26bb3b18f3be 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -29,7 +29,7 @@ static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
- info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16;
+ info->tag_type = (ifh[3] & BIT(16)) >> 16;
info->vid = ifh[3] & GENMASK(11, 0);
return 0;
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 71899009c468..c26e0f70c494 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -2,8 +2,8 @@
# Exar device configuration
#
-config NET_VENDOR_EXAR
- bool "Exar devices"
+config NET_VENDOR_NETERION
+ bool "Neterion (Exar) devices"
default y
depends on PCI
---help---
@@ -11,16 +11,19 @@ config NET_VENDOR_EXAR
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Exar cards. If you say Y, you will be asked for
- your specific card in the following questions.
+ the questions about Neterion/Exar cards. If you say Y, you will be
+ asked for your specific card in the following questions.
-if NET_VENDOR_EXAR
+if NET_VENDOR_NETERION
config S2IO
- tristate "Exar Xframe 10Gb Ethernet Adapter"
+ tristate "Neterion (Exar) Xframe 10Gb Ethernet Adapter"
depends on PCI
---help---
This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+ These were originally released from S2IO, which renamed itself
+ Neterion. So, the adapters might be labeled as either one, depending
+ on its age.
More specific information on configuring the driver is in
<file:Documentation/networking/s2io.txt>.
@@ -29,11 +32,13 @@ config S2IO
will be called s2io.
config VXGE
- tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
+ tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter"
depends on PCI
---help---
This driver supports Exar Corp's X3100 Series 10 GbE PCIe
- I/O Virtualized Server Adapter.
+ I/O Virtualized Server Adapter. These were originally released from
+ Neterion, which was later acquired by Exar. So, the adapters might be
+ labeled as either one, depending on its age.
More specific information on configuring the driver is in
<file:Documentation/networking/vxge.txt>.
@@ -50,4 +55,4 @@ config VXGE_DEBUG_TRACE_ALL
the vxge driver. By default only few debug trace statements are
enabled.
-endif # NET_VENDOR_EXAR
+endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 358ed6118881..398011c87643 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -14,7 +14,6 @@
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
-#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include "vxge-traffic.h"
@@ -1095,12 +1094,9 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
{
struct __vxge_hw_device *hldev;
struct list_head *p, *n;
- u16 ret;
- if (blockpool == NULL) {
- ret = 1;
- goto exit;
- }
+ if (!blockpool)
+ return;
hldev = blockpool->hldev;
@@ -1123,8 +1119,7 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
kfree((void *)p);
}
- ret = 0;
-exit:
+
return;
}
@@ -2260,14 +2255,11 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
struct __vxge_hw_blockpool *blockpool;
struct __vxge_hw_blockpool_entry *entry = NULL;
dma_addr_t dma_addr;
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 req_out;
blockpool = &devh->block_pool;
if (block_addr == NULL) {
blockpool->req_out--;
- status = VXGE_HW_FAIL;
goto exit;
}
@@ -2277,7 +2269,6 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
blockpool->req_out--;
- status = VXGE_HW_FAIL;
goto exit;
}
@@ -2292,7 +2283,7 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
else
list_del(&entry->item);
- if (entry != NULL) {
+ if (entry) {
entry->length = length;
entry->memblock = block_addr;
entry->dma_addr = dma_addr;
@@ -2300,13 +2291,10 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
entry->dma_handle = dma_h;
list_add(&entry->item, &blockpool->free_block_list);
blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ }
blockpool->req_out--;
- req_out = blockpool->req_out;
exit:
return;
}
@@ -2358,7 +2346,6 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
struct __vxge_hw_blockpool_entry *entry = NULL;
struct __vxge_hw_blockpool *blockpool;
void *memblock = NULL;
- enum vxge_hw_status status = VXGE_HW_OK;
blockpool = &devh->block_pool;
@@ -2368,10 +2355,8 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
&dma_object->handle,
&dma_object->acc_handle);
- if (memblock == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ if (!memblock)
goto exit;
- }
dma_object->addr = pci_map_single(devh->pdev, memblock, size,
PCI_DMA_BIDIRECTIONAL);
@@ -2380,7 +2365,6 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
dma_object->addr))) {
vxge_os_dma_free(devh->pdev, memblock,
&dma_object->acc_handle);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
@@ -3784,17 +3768,20 @@ vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
itable[j]);
+ /* fall through */
case 2:
*data0 |=
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
itable[j]);
+ /* fall through */
case 3:
*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
itable[j]);
+ /* fall through */
case 4:
*data1 |=
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index cb87fccb9f6a..2572a4b91c7c 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -43,8 +43,6 @@
#include "fw.h"
#include "main.h"
-#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
-
#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
@@ -441,7 +439,10 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
}
if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
- nfp_bpf_event_output(bpf, skb);
+ if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
return;
}
@@ -465,3 +466,21 @@ err_unlock:
err_free:
dev_kfree_skb_any(skb);
}
+
+void
+nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+ const struct cmsg_hdr *hdr = data;
+
+ if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
+ cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
+ return;
+ }
+
+ if (hdr->type == CMSG_TYPE_BPF_EVENT)
+ nfp_bpf_event_output(bpf, data, len);
+ else
+ cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
+ hdr->type);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 4c7972e3db63..e4f9b7ec8528 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -51,6 +51,7 @@ enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_MAPS = 3,
NFP_BPF_CAP_TYPE_RANDOM = 4,
NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5,
+ NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6,
};
struct nfp_bpf_cap_tlv_func {
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 8a92088df0d7..eff57f7d056a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -34,10 +34,11 @@
#define pr_fmt(fmt) "NFP net bpf: " fmt
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/kernel.h>
#include <linux/pkt_cls.h>
+#include <linux/reciprocal_div.h>
#include <linux/unistd.h>
#include "main.h"
@@ -416,6 +417,60 @@ emit_alu(struct nfp_prog *nfp_prog, swreg dst,
}
static void
+__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
+ enum mul_type type, enum mul_step step, u16 breg, bool swap,
+ bool wr_both, bool dst_lmextn, bool src_lmextn)
+{
+ u64 insn;
+
+ insn = OP_MUL_BASE |
+ FIELD_PREP(OP_MUL_A_SRC, areg) |
+ FIELD_PREP(OP_MUL_B_SRC, breg) |
+ FIELD_PREP(OP_MUL_STEP, step) |
+ FIELD_PREP(OP_MUL_DST_AB, dst_ab) |
+ FIELD_PREP(OP_MUL_SW, swap) |
+ FIELD_PREP(OP_MUL_TYPE, type) |
+ FIELD_PREP(OP_MUL_WR_AB, wr_both) |
+ FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
+ enum mul_step step, swreg rreg)
+{
+ struct nfp_insn_ur_regs reg;
+ u16 areg;
+ int err;
+
+ if (type == MUL_TYPE_START && step != MUL_STEP_NONE) {
+ nfp_prog->error = -EINVAL;
+ return;
+ }
+
+ if (step == MUL_LAST || step == MUL_LAST_2) {
+ /* When type is step and step Number is LAST or LAST2, left
+ * source is used as destination.
+ */
+ err = swreg_to_unrestricted(lreg, reg_none(), rreg, &reg);
+ areg = reg.dst;
+ } else {
+ err = swreg_to_unrestricted(reg_none(), lreg, rreg, &reg);
+ areg = reg.areg;
+ }
+
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap,
+ reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
+}
+
+static void
__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
bool zero, bool swap, bool wr_both,
@@ -670,7 +725,7 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
xfer_num = round_up(len, 4) / 4;
if (src_40bit_addr)
- addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+ addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base,
&off);
/* Setup PREV_ALU fields to override memory read length. */
@@ -1380,6 +1435,133 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
SHF_SC_R_ROT, 16);
}
+static void
+wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+ swreg rreg, bool gen_high_half)
+{
+ emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg);
+ emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none());
+ if (gen_high_half)
+ emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2,
+ reg_none());
+ else
+ wrp_immed(nfp_prog, dst_hi, 0);
+}
+
+static void
+wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+ swreg rreg)
+{
+ emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg);
+ emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none());
+}
+
+static int
+wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ bool gen_high_half, bool ropnd_from_reg)
+{
+ swreg multiplier, multiplicand, dst_hi, dst_lo;
+ const struct bpf_insn *insn = &meta->insn;
+ u32 lopnd_max, ropnd_max;
+ u8 dst_reg;
+
+ dst_reg = insn->dst_reg;
+ multiplicand = reg_a(dst_reg * 2);
+ dst_hi = reg_both(dst_reg * 2 + 1);
+ dst_lo = reg_both(dst_reg * 2);
+ lopnd_max = meta->umax_dst;
+ if (ropnd_from_reg) {
+ multiplier = reg_b(insn->src_reg * 2);
+ ropnd_max = meta->umax_src;
+ } else {
+ u32 imm = insn->imm;
+
+ multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+ ropnd_max = imm;
+ }
+ if (lopnd_max > U16_MAX || ropnd_max > U16_MAX)
+ wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier,
+ gen_high_half);
+ else
+ wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier);
+
+ return 0;
+}
+
+static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
+{
+ swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst);
+ struct reciprocal_value_adv rvalue;
+ u8 pre_shift, exp;
+ swreg magic;
+
+ if (imm > U32_MAX) {
+ wrp_immed(nfp_prog, dst_both, 0);
+ return 0;
+ }
+
+ /* NOTE: because we are using "reciprocal_value_adv" which doesn't
+ * support "divisor > (1u << 31)", we need to JIT separate NFP sequence
+ * to handle such case which actually equals to the result of unsigned
+ * comparison "dst >= imm" which could be calculated using the following
+ * NFP sequence:
+ *
+ * alu[--, dst, -, imm]
+ * immed[imm, 0]
+ * alu[dst, imm, +carry, 0]
+ *
+ */
+ if (imm > 1U << 31) {
+ swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+
+ emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b);
+ wrp_immed(nfp_prog, imm_a(nfp_prog), 0);
+ emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C,
+ reg_imm(0));
+ return 0;
+ }
+
+ rvalue = reciprocal_value_adv(imm, 32);
+ exp = rvalue.exp;
+ if (rvalue.is_wide_m && !(imm & 1)) {
+ pre_shift = fls(imm & -imm) - 1;
+ rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift);
+ } else {
+ pre_shift = 0;
+ }
+ magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog));
+ if (imm == 1U << exp) {
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, exp);
+ } else if (rvalue.is_wide_m) {
+ wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a,
+ magic, true);
+ emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB,
+ imm_b(nfp_prog));
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, 1);
+ emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD,
+ imm_b(nfp_prog));
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, rvalue.sh - 1);
+ } else {
+ if (pre_shift)
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+ dst_b, SHF_SC_R_SHF, pre_shift);
+ wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true);
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+ dst_b, SHF_SC_R_SHF, rvalue.sh);
+ }
+
+ return 0;
+}
+
static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
@@ -1460,6 +1642,51 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u32 ret_einval, end;
+ swreg plen, delta;
+
+ BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN));
+
+ plen = imm_a(nfp_prog);
+ delta = reg_a(2 * 2);
+
+ ret_einval = nfp_prog_current_offset(nfp_prog) + 9;
+ end = nfp_prog_current_offset(nfp_prog) + 11;
+
+ /* Calculate resulting length */
+ emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta);
+ /* delta == 0 is not allowed by the kernel, add must overflow to make
+ * length smaller.
+ */
+ emit_br(nfp_prog, BR_BCC, ret_einval, 0);
+
+ /* if (new_len < 14) then -EINVAL */
+ emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN));
+ emit_br(nfp_prog, BR_BMI, ret_einval, 0);
+
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_ADD, delta);
+ emit_alu(nfp_prog, pv_len(nfp_prog),
+ pv_len(nfp_prog), ALU_OP_ADD, delta);
+
+ emit_br(nfp_prog, BR_UNC, end, 2);
+ wrp_immed(nfp_prog, reg_both(0), 0);
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
+ return -EINVAL;
+
+ wrp_immed(nfp_prog, reg_both(0), -22);
+ wrp_immed(nfp_prog, reg_both(1), ~0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, end))
+ return -EINVAL;
+
+ return 0;
+}
+
static int
map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -1684,6 +1911,31 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, true, true);
+}
+
+static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, true, false);
+}
+
+static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm);
+}
+
+static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ /* NOTE: verifier hook has rejected cases for which verifier doesn't
+ * know whether the source operand is constant or not.
+ */
+ return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src);
+}
+
static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -1772,8 +2024,8 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __shl_imm64(nfp_prog, dst, umin);
@@ -1881,8 +2133,8 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __shr_imm64(nfp_prog, dst, umin);
@@ -1995,8 +2247,8 @@ static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __ashr_imm64(nfp_prog, dst, umin);
@@ -2097,6 +2349,26 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
}
+static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, false, true);
+}
+
+static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, false, false);
+}
+
+static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return div_reg64(nfp_prog, meta);
+}
+
+static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return div_imm64(nfp_prog, meta);
+}
+
static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
u8 dst = meta->insn.dst_reg * 2;
@@ -2814,6 +3086,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
switch (meta->insn.imm) {
case BPF_FUNC_xdp_adjust_head:
return adjust_head(nfp_prog, meta);
+ case BPF_FUNC_xdp_adjust_tail:
+ return adjust_tail(nfp_prog, meta);
case BPF_FUNC_map_lookup_elem:
case BPF_FUNC_map_update_elem:
case BPF_FUNC_map_delete_elem:
@@ -2848,6 +3122,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
[BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
[BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64,
+ [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64,
+ [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64,
+ [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64,
[BPF_ALU64 | BPF_NEG] = neg_reg64,
[BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64,
[BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
@@ -2867,6 +3145,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_ADD | BPF_K] = add_imm,
[BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
[BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_MUL | BPF_X] = mul_reg,
+ [BPF_ALU | BPF_MUL | BPF_K] = mul_imm,
+ [BPF_ALU | BPF_DIV | BPF_X] = div_reg,
+ [BPF_ALU | BPF_DIV | BPF_K] = div_imm,
[BPF_ALU | BPF_NEG] = neg_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
[BPF_ALU | BPF_END | BPF_X] = end_reg32,
@@ -3299,7 +3581,8 @@ curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
return false;
- if (ld_meta->ptr.type != PTR_TO_PACKET)
+ if (ld_meta->ptr.type != PTR_TO_PACKET &&
+ ld_meta->ptr.type != PTR_TO_MAP_VALUE)
return false;
if (st_meta->ptr.type != PTR_TO_PACKET)
@@ -3647,6 +3930,7 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
struct nfp_insn_meta *meta1, *meta2;
struct nfp_bpf_map *nfp_map;
struct bpf_map *map;
+ u32 id;
nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
if (meta1->skip || meta2->skip)
@@ -3658,11 +3942,14 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
map = (void *)(unsigned long)((u32)meta1->insn.imm |
(u64)meta2->insn.imm << 32);
- if (bpf_map_offload_neutral(map))
- continue;
- nfp_map = map_to_offmap(map)->dev_priv;
+ if (bpf_map_offload_neutral(map)) {
+ id = map->id;
+ } else {
+ nfp_map = map_to_offmap(map)->dev_priv;
+ id = nfp_map->tid;
+ }
- meta1->insn.imm = nfp_map->tid;
+ meta1->insn.imm = id;
meta2->insn.imm = 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 40216d56dddc..970af07f4656 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -45,8 +45,8 @@
const struct rhashtable_params nfp_bpf_maps_neutral_params = {
.nelem_hint = 4,
- .key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr),
- .key_offset = offsetof(struct nfp_bpf_neutral_map, ptr),
+ .key_len = FIELD_SIZEOF(struct bpf_map, id),
+ .key_offset = offsetof(struct nfp_bpf_neutral_map, map_id),
.head_offset = offsetof(struct nfp_bpf_neutral_map, l),
.automatic_shrinking = true,
};
@@ -66,26 +66,19 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog, struct netlink_ext_ack *extack)
{
bool running, xdp_running;
- int ret;
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
- xdp_running = running && nn->dp.bpf_offload_xdp;
+ xdp_running = running && nn->xdp_hw.prog;
if (!prog && !xdp_running)
return 0;
if (prog && running && !xdp_running)
return -EBUSY;
- ret = nfp_net_bpf_offload(nn, prog, running, extack);
- /* Stop offload if replace not possible */
- if (ret)
- return ret;
-
- nn->dp.bpf_offload_xdp = !!prog;
- return ret;
+ return nfp_net_bpf_offload(nn, prog, running, extack);
}
static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
@@ -202,14 +195,11 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
- if (tcf_block_shared(f->block))
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_bpf_setup_tc_block_cb,
- nn, nn);
+ nn, nn, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
nfp_bpf_setup_tc_block_cb,
@@ -344,6 +334,14 @@ nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
return 0;
}
+static int
+nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ bpf->adjust_tail = true;
+ return 0;
+}
+
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
struct nfp_cpp *cpp = app->pf->cpp;
@@ -390,6 +388,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
if (nfp_bpf_parse_cap_qsel(app->priv, value, length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_ADJUST_TAIL:
+ if (nfp_bpf_parse_cap_adjust_tail(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -411,6 +414,20 @@ err_release_free:
return -EINVAL;
}
+static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev);
+}
+
+static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
+}
+
static int nfp_bpf_init(struct nfp_app *app)
{
struct nfp_app_bpf *bpf;
@@ -434,6 +451,11 @@ static int nfp_bpf_init(struct nfp_app *app)
if (err)
goto err_free_neutral_maps;
+ bpf->bpf_dev = bpf_offload_dev_create();
+ err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
+ if (err)
+ goto err_free_neutral_maps;
+
return 0;
err_free_neutral_maps:
@@ -452,6 +474,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
{
struct nfp_app_bpf *bpf = app->priv;
+ bpf_offload_dev_destroy(bpf->bpf_dev);
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
@@ -473,10 +496,14 @@ const struct nfp_app_type app_bpf = {
.extra_cap = nfp_bpf_extra_cap,
+ .ndo_init = nfp_bpf_ndo_init,
+ .ndo_uninit = nfp_bpf_ndo_uninit,
+
.vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
.ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
+ .ctrl_msg_rx_raw = nfp_bpf_ctrl_msg_rx_raw,
.setup_tc = nfp_bpf_setup_tc,
.bpf = nfp_ndo_bpf,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 654fe7823e5e..dbd00982fd2b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -47,6 +47,8 @@
#include "../nfp_asm.h"
#include "fw.h"
+#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
+
/* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
@@ -110,6 +112,8 @@ enum pkt_vec {
* struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app
*
+ * @bpf_dev: BPF offload device handle
+ *
* @tag_allocator: bitmap of control message tags in use
* @tag_alloc_next: next tag bit to allocate
* @tag_alloc_last: next tag bit to be freed
@@ -146,10 +150,13 @@ enum pkt_vec {
*
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
* @queue_select: BPF can set the RX queue ID in packet vector
+ * @adjust_tail: BPF can simply trunc packet size for adjust tail
*/
struct nfp_app_bpf {
struct nfp_app *app;
+ struct bpf_offload_dev *bpf_dev;
+
DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
u16 tag_alloc_next;
u16 tag_alloc_last;
@@ -189,6 +196,7 @@ struct nfp_app_bpf {
bool pseudo_random;
bool queue_select;
+ bool adjust_tail;
};
enum nfp_bpf_map_use {
@@ -217,6 +225,7 @@ struct nfp_bpf_map {
struct nfp_bpf_neutral_map {
struct rhash_head l;
struct bpf_map *ptr;
+ u32 map_id;
u32 count;
};
@@ -263,8 +272,10 @@ struct nfp_bpf_reg_state {
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
- * @umin: copy of core verifier umin_value.
- * @umax: copy of core verifier umax_value.
+ * @umin_src: copy of core verifier umin_value for src opearnd.
+ * @umax_src: copy of core verifier umax_value for src operand.
+ * @umin_dst: copy of core verifier umin_value for dst opearnd.
+ * @umax_dst: copy of core verifier umax_value for dst operand.
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
@@ -300,12 +311,15 @@ struct nfp_insn_meta {
struct bpf_reg_state arg1;
struct nfp_bpf_reg_state arg2;
};
- /* We are interested in range info for some operands,
- * for example, the shift amount.
+ /* We are interested in range info for operands of ALU
+ * operations. For example, shift amount, multiplicand and
+ * multiplier etc.
*/
struct {
- u64 umin;
- u64 umax;
+ u64 umin_src;
+ u64 umax_src;
+ u64 umin_dst;
+ u64 umax_dst;
};
};
unsigned int off;
@@ -339,6 +353,11 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
return BPF_MODE(meta->insn.code);
}
+static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
+{
+ return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
+}
+
static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
{
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
@@ -384,23 +403,14 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
}
-static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
{
- u8 code = meta->insn.code;
- bool is_alu, is_shift;
- u8 opclass, opcode;
-
- opclass = BPF_CLASS(code);
- is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU;
- if (!is_alu)
- return false;
-
- opcode = BPF_OP(code);
- is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH;
- if (!is_shift)
- return false;
+ return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
+}
- return BPF_SRC(code) == BPF_X;
+static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
+{
+ return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
}
/**
@@ -496,7 +506,11 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key);
-int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb);
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+ unsigned int len);
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
+void
+nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data,
+ unsigned int len);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 7eae4c0266f8..1ccd6371a15b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -67,7 +67,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
ASSERT_RTNL();
/* Reuse path - other offloaded program is already tracking this map. */
- record = rhashtable_lookup_fast(&bpf->maps_neutral, &map,
+ record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params);
if (record) {
nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
@@ -89,6 +89,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
}
record->ptr = map;
+ record->map_id = map->id;
record->count = 1;
err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
@@ -190,8 +191,10 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
meta->insn = prog[i];
meta->n = i;
- if (is_mbpf_indir_shift(meta))
- meta->umin = U64_MAX;
+ if (is_mbpf_alu(meta)) {
+ meta->umin_src = U64_MAX;
+ meta->umin_dst = U64_MAX;
+ }
list_add_tail(&meta->l, &nfp_prog->insns);
}
@@ -377,11 +380,23 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
bpf->maps.max_elems - bpf->map_elems_in_use);
return -ENOMEM;
}
- if (offmap->map.key_size > bpf->maps.max_key_sz ||
- offmap->map.value_size > bpf->maps.max_val_sz ||
- round_up(offmap->map.key_size, 8) +
+
+ if (round_up(offmap->map.key_size, 8) +
round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
- pr_info("elements don't fit in device constraints\n");
+ pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
+ round_up(offmap->map.key_size, 8) +
+ round_up(offmap->map.value_size, 8),
+ bpf->maps.max_elem_sz);
+ return -ENOMEM;
+ }
+ if (offmap->map.key_size > bpf->maps.max_key_sz) {
+ pr_info("map key size %u, FW max is %u\n",
+ offmap->map.key_size, bpf->maps.max_key_sz);
+ return -ENOMEM;
+ }
+ if (offmap->map.value_size > bpf->maps.max_val_sz) {
+ pr_info("map value size %u, FW max is %u\n",
+ offmap->map.value_size, bpf->maps.max_val_sz);
return -ENOMEM;
}
@@ -451,43 +466,43 @@ nfp_bpf_perf_event_copy(void *dst, const void *src,
return 0;
}
-int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb)
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+ unsigned int len)
{
- struct cmsg_bpf_event *cbe = (void *)skb->data;
- u32 pkt_size, data_size;
- struct bpf_map *map;
+ struct cmsg_bpf_event *cbe = (void *)data;
+ struct nfp_bpf_neutral_map *record;
+ u32 pkt_size, data_size, map_id;
+ u64 map_id_full;
- if (skb->len < sizeof(struct cmsg_bpf_event))
- goto err_drop;
+ if (len < sizeof(struct cmsg_bpf_event))
+ return -EINVAL;
pkt_size = be32_to_cpu(cbe->pkt_size);
data_size = be32_to_cpu(cbe->data_size);
- map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr);
+ map_id_full = be64_to_cpu(cbe->map_ptr);
+ map_id = map_id_full;
- if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
- goto err_drop;
+ if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ return -EINVAL;
if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
- goto err_drop;
+ return -EINVAL;
rcu_read_lock();
- if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map,
- nfp_bpf_maps_neutral_params)) {
+ record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
+ nfp_bpf_maps_neutral_params);
+ if (!record || map_id_full > U32_MAX) {
rcu_read_unlock();
- pr_warn("perf event: dest map pointer %px not recognized, dropping event\n",
- map);
- goto err_drop;
+ cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
+ map_id_full, map_id_full);
+ return -EINVAL;
}
- bpf_event_output(map, be32_to_cpu(cbe->cpu_id),
+ bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
&cbe->data[round_up(pkt_size, 4)], data_size,
cbe->data, pkt_size, nfp_bpf_perf_event_copy);
rcu_read_unlock();
- dev_consume_skb_any(skb);
return 0;
-err_drop:
- dev_kfree_skb_any(skb);
- return -EINVAL;
}
static int
@@ -564,14 +579,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
{
int err;
- if (prog) {
- struct bpf_prog_offload *offload = prog->aux->offload;
-
- if (!offload)
- return -EINVAL;
- if (offload->netdev != nn->dp.netdev)
- return -EINVAL;
- }
+ if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
+ return -EINVAL;
if (prog && old_prog) {
u8 cap;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 4bfeba7b21b2..a6e9248669e1 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -178,6 +178,13 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
break;
+ case BPF_FUNC_xdp_adjust_tail:
+ if (!bpf->adjust_tail) {
+ pr_vlog(env, "adjust_tail not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
case BPF_FUNC_map_lookup_elem:
if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
bpf->helpers.map_lookup, reg1) ||
@@ -517,6 +524,82 @@ nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
static int
+nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *sreg =
+ cur_regs(env) + meta->insn.src_reg;
+ const struct bpf_reg_state *dreg =
+ cur_regs(env) + meta->insn.dst_reg;
+
+ meta->umin_src = min(meta->umin_src, sreg->umin_value);
+ meta->umax_src = max(meta->umax_src, sreg->umax_value);
+ meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
+ meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
+
+ /* NFP supports u16 and u32 multiplication.
+ *
+ * For ALU64, if either operand is beyond u32's value range, we reject
+ * it. One thing to note, if the source operand is BPF_K, then we need
+ * to check "imm" field directly, and we'd reject it if it is negative.
+ * Because for ALU64, "imm" (with s32 type) is expected to be sign
+ * extended to s64 which NFP mul doesn't support.
+ *
+ * For ALU32, it is fine for "imm" be negative though, because the
+ * result is 32-bits and there is no difference on the low halve of
+ * the result for signed/unsigned mul, so we will get correct result.
+ */
+ if (is_mbpf_mul(meta)) {
+ if (meta->umax_dst > U32_MAX) {
+ pr_vlog(env, "multiplier is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
+ pr_vlog(env, "multiplicand is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_class(meta) == BPF_ALU64 &&
+ mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+ pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
+ return -EINVAL;
+ }
+ }
+
+ /* NFP doesn't have divide instructions, we support divide by constant
+ * through reciprocal multiplication. Given NFP support multiplication
+ * no bigger than u32, we'd require divisor and dividend no bigger than
+ * that as well.
+ *
+ * Also eBPF doesn't support signed divide and has enforced this on C
+ * language level by failing compilation. However LLVM assembler hasn't
+ * enforced this, so it is possible for negative constant to leak in as
+ * a BPF_K operand through assembly code, we reject such cases as well.
+ */
+ if (is_mbpf_div(meta)) {
+ if (meta->umax_dst > U32_MAX) {
+ pr_vlog(env, "dividend is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_src(meta) == BPF_X) {
+ if (meta->umin_src != meta->umax_src) {
+ pr_vlog(env, "divisor is not constant\n");
+ return -EINVAL;
+ }
+ if (meta->umax_src > U32_MAX) {
+ pr_vlog(env, "divisor is not within u32 value range\n");
+ return -EINVAL;
+ }
+ }
+ if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+ pr_vlog(env, "divide by negative constant is not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
@@ -551,13 +634,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
if (is_mbpf_xadd(meta))
return nfp_bpf_check_xadd(nfp_prog, meta, env);
- if (is_mbpf_indir_shift(meta)) {
- const struct bpf_reg_state *sreg =
- cur_regs(env) + meta->insn.src_reg;
-
- meta->umin = min(meta->umin, sreg->umin_value);
- meta->umax = max(meta->umax, sreg->umax_value);
- }
+ if (is_mbpf_alu(meta))
+ return nfp_bpf_check_alu(nfp_prog, meta, env);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 4a6d2db75071..0ba0356ec4e6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -32,8 +32,10 @@
*/
#include <linux/bitfield.h>
+#include <net/geneve.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
+#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_pedit.h>
@@ -44,6 +46,16 @@
#include "main.h"
#include "../nfp_net_repr.h"
+/* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
+ * to change. Such changes will break our FW ABI.
+ */
+#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
+#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
+#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
+#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
+ NFP_FL_TUNNEL_KEY | \
+ NFP_FL_TUNNEL_GENEVE_OPT)
+
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
@@ -226,7 +238,71 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
}
static int
-nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
+ const struct tc_action *action)
+{
+ struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ int opt_len, opt_cnt, act_start, tot_push_len;
+ u8 *src = ip_tunnel_info_opts(ip_tun);
+
+ /* We need to populate the options in reverse order for HW.
+ * Therefore we go through the options, calculating the
+ * number of options and the total size, then we populate
+ * them in reverse order in the action list.
+ */
+ opt_cnt = 0;
+ tot_push_len = 0;
+ opt_len = ip_tun->options_len;
+ while (opt_len > 0) {
+ struct geneve_opt *opt = (struct geneve_opt *)src;
+
+ opt_cnt++;
+ if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
+ return -EOPNOTSUPP;
+
+ tot_push_len += sizeof(struct nfp_fl_push_geneve) +
+ opt->length * 4;
+ if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
+ return -EOPNOTSUPP;
+
+ opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
+ src += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ act_start = *list_len;
+ *list_len += tot_push_len;
+ src = ip_tunnel_info_opts(ip_tun);
+ while (opt_cnt) {
+ struct geneve_opt *opt = (struct geneve_opt *)src;
+ struct nfp_fl_push_geneve *push;
+ size_t act_size, len;
+
+ opt_cnt--;
+ act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
+ tot_push_len -= act_size;
+ len = act_start + tot_push_len;
+
+ push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
+ push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
+ push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ push->reserved = 0;
+ push->class = opt->opt_class;
+ push->type = opt->type;
+ push->length = opt->length;
+ memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
+
+ src += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ return 0;
+}
+
+static int
+nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
+ struct nfp_fl_set_ipv4_udp_tun *set_tun,
const struct tc_action *action,
struct nfp_fl_pre_tunnel *pre_tun,
enum nfp_flower_tun_type tun_type,
@@ -234,16 +310,19 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
{
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
- struct net *net;
- if (ip_tun->options_len)
+ BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
+ NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
+ NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
+ if (ip_tun->options_len &&
+ (tun_type != NFP_FL_TUNNEL_GENEVE ||
+ !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
return -EOPNOTSUPP;
- net = dev_net(netdev);
-
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -254,7 +333,42 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
- set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+
+ if (ip_tun->key.ttl) {
+ set_tun->ttl = ip_tun->key.ttl;
+ } else {
+ struct net *net = dev_net(netdev);
+ struct flowi4 flow = {};
+ struct rtable *rt;
+ int err;
+
+ /* Do a route lookup to determine ttl - if fails then use
+ * default. Note that CONFIG_INET is a requirement of
+ * CONFIG_NET_SWITCHDEV so must be defined here.
+ */
+ flow.daddr = ip_tun->key.u.ipv4.dst;
+ flow.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(net, &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (!err) {
+ set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
+ ip_rt_put(rt);
+ } else {
+ set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+ }
+ }
+
+ set_tun->tos = ip_tun->key.tos;
+
+ if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
+ return -EOPNOTSUPP;
+ set_tun->tun_flags = ip_tun->key.tun_flags;
+
+ if (tun_type == NFP_FL_TUNNEL_GENEVE) {
+ set_tun->tun_proto = htons(ETH_P_TEB);
+ set_tun->tun_len = ip_tun->options_len / 4;
+ }
/* Complete pre_tunnel action. */
pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
@@ -398,8 +512,27 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
return 0;
}
+static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
+{
+ switch (ip_proto) {
+ case 0:
+ /* Filter doesn't force proto match,
+ * both TCP and UDP will be updated if encountered
+ */
+ return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
+ case IPPROTO_TCP:
+ return TCA_CSUM_UPDATE_FLAG_TCP;
+ case IPPROTO_UDP:
+ return TCA_CSUM_UPDATE_FLAG_UDP;
+ default:
+ /* All other protocols will be ignored by FW */
+ return 0;
+ }
+}
+
static int
-nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
+ char *nfp_action, int *a_len, u32 *csum_updated)
{
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ip4_addrs set_ip_addr;
@@ -409,6 +542,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
int idx, nkeys, err;
size_t act_size;
u32 offset, cmd;
+ u8 ip_proto = 0;
memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
memset(&set_ip6_src, 0, sizeof(set_ip6_src));
@@ -451,6 +585,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
return err;
}
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *basic;
+
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ ip_proto = basic->ip_proto;
+ }
+
if (set_eth.head.len_lw) {
act_size = sizeof(set_eth);
memcpy(nfp_action, &set_eth, act_size);
@@ -459,6 +602,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
act_size = sizeof(set_ip_addr);
memcpy(nfp_action, &set_ip_addr, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+ nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
@@ -471,18 +618,30 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw) {
act_size = sizeof(set_ip6_dst);
memcpy(nfp_action, &set_ip6_dst, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_src.head.len_lw) {
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_tport.head.len_lw) {
act_size = sizeof(set_tport);
memcpy(nfp_action, &set_tport, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
return 0;
@@ -493,12 +652,18 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt)
+ int *out_cnt, u32 *csum_updated)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_output *output;
int err, prelag_size;
+ /* If csum_updated has not been reset by now, it means HW will
+ * incorrectly update csums when they are not requested.
+ */
+ if (*csum_updated)
+ return -EOPNOTSUPP;
+
if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
@@ -529,10 +694,11 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
static int
nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt)
+ int *out_cnt, u32 *csum_updated)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
@@ -545,14 +711,14 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
} else if (is_tcf_mirred_egress_redirect(a)) {
err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt);
+ out_cnt, csum_updated);
if (err)
return err;
} else if (is_tcf_mirred_egress_mirror(a)) {
err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt);
+ out_cnt, csum_updated);
if (err)
return err;
@@ -592,9 +758,13 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
+ err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
+ if (err)
+ return err;
+
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
- netdev);
+ err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
+ *tun_type, netdev);
if (err)
return err;
*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
@@ -602,8 +772,17 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
/* Tunnel decap is handled by default so accept action. */
return 0;
} else if (is_tcf_pedit(a)) {
- if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+ if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
+ a_len, csum_updated))
+ return -EOPNOTSUPP;
+ } else if (is_tcf_csum(a)) {
+ /* csum action requests recalc of something we have not fixed */
+ if (tcf_csum_update_flags(a) & ~*csum_updated)
return -EOPNOTSUPP;
+ /* If we will correctly fix the csum we can remove it from the
+ * csum update list. Which will later be used to check support.
+ */
+ *csum_updated &= ~tcf_csum_update_flags(a);
} else {
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
@@ -620,6 +799,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
int act_len, act_cnt, err, tun_out_cnt, out_cnt;
enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
+ u32 csum_updated = 0;
LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -632,8 +812,9 @@ int nfp_flower_compile_action(struct nfp_app *app,
tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) {
- err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev,
- &tun_type, &tun_out_cnt, &out_cnt);
+ err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
+ netdev, &tun_type, &tun_out_cnt,
+ &out_cnt, &csum_updated);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4a7f3510a296..325954b829c8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -37,6 +37,7 @@
#include <linux/bitfield.h>
#include <linux/skbuff.h>
#include <linux/types.h>
+#include <net/geneve.h>
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
@@ -51,6 +52,7 @@
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
+#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
@@ -81,6 +83,11 @@
#define NFP_FL_MAX_A_SIZ 1216
#define NFP_FL_LW_SIZ 2
+/* Maximum allowed geneve options */
+#define NFP_FL_MAX_GENEVE_OPT_ACT 32
+#define NFP_FL_MAX_GENEVE_OPT_CNT 64
+#define NFP_FL_MAX_GENEVE_OPT_KEY 32
+
/* Action opcodes */
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
@@ -94,6 +101,7 @@
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26
#define NFP_FL_ACTION_OPCODE_NUM 32
#define NFP_FL_OUT_FLAGS_LAST BIT(15)
@@ -203,10 +211,22 @@ struct nfp_fl_set_ipv4_udp_tun {
__be16 reserved;
__be64 tun_id __packed;
__be32 tun_type_index;
- __be16 reserved2;
+ __be16 tun_flags;
u8 ttl;
- u8 reserved3;
- __be32 extra[2];
+ u8 tos;
+ __be32 extra;
+ u8 tun_len;
+ u8 res2;
+ __be16 tun_proto;
+};
+
+struct nfp_fl_push_geneve {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be16 class;
+ u8 type;
+ u8 length;
+ u8 opt_data[];
};
/* Metadata with L2 (1W/4B)
@@ -346,7 +366,7 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_dst |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | Reserved |
+ * | Reserved | tos | ttl |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -356,10 +376,17 @@ struct nfp_flower_ipv6 {
struct nfp_flower_ipv4_udp_tun {
__be32 ip_src;
__be32 ip_dst;
- __be32 reserved[2];
+ __be16 reserved1;
+ u8 tos;
+ u8 ttl;
+ __be32 reserved2;
__be32 tun_id;
};
+struct nfp_flower_geneve_options {
+ u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
+};
+
#define NFP_FL_TUN_VNI_OFFSET 8
/* The base header for a control message packet.
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 0c4c957717ea..bf10598f66ae 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -564,8 +564,9 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
if (lag_upper_info &&
lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
(lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
- (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
- lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) {
+ (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
+ lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
+ lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
can_offload = false;
nfp_flower_cmsg_warn(priv->app,
"Unable to offload tx_type %u hash %u\n",
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index bbe5764d26cb..85f8209bf007 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -69,11 +69,12 @@ struct nfp_app;
/* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
+#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
- struct timespec64 *last_used;
+ ktime_t *last_used;
u8 init_unallocated;
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 84f7a5dbea9d..a0c72f277faa 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -262,6 +262,21 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
}
+static int
+nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_enc_opts *opts;
+
+ opts = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS,
+ target);
+ memcpy(key_buf, opts->data, opts->len);
+
+ return 0;
+}
+
static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
struct tc_cls_flower_offload *flow,
@@ -270,6 +285,7 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ipv4_addrs *tun_ips;
struct flow_dissector_key_keyid *vni;
+ struct flow_dissector_key_ip *ip;
memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
@@ -293,6 +309,14 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
frame->ip_src = tun_ips->src;
frame->ip_dst = tun_ips->dst;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ target);
+ frame->tos = ip->tos;
+ frame->ttl = ip->ttl;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
@@ -415,6 +439,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
nfp_flow->nfp_tun_ipv4_addr = tun_dst;
nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
}
+
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
+ err = nfp_flower_compile_geneve_opt(ext, flow, false);
+ if (err)
+ return err;
+
+ err = nfp_flower_compile_geneve_opt(msk, flow, true);
+ if (err)
+ return err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 93fb809f50d1..c098730544b7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -158,7 +158,6 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
{
struct nfp_flower_priv *priv = app->priv;
struct circ_buf *ring;
- struct timespec64 now;
ring = &priv->mask_ids.mask_id_free_list;
/* Checking if buffer is full. */
@@ -169,8 +168,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
(NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
- getnstimeofday64(&now);
- priv->mask_ids.last_used[mask_id] = now;
+ priv->mask_ids.last_used[mask_id] = ktime_get();
return 0;
}
@@ -178,7 +176,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
{
struct nfp_flower_priv *priv = app->priv;
- struct timespec64 delta, now;
+ ktime_t reuse_timeout;
struct circ_buf *ring;
u8 temp_id, freed_id;
@@ -198,10 +196,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
*mask_id = temp_id;
- getnstimeofday64(&now);
- delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]);
+ reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
+ NFP_FL_MASK_REUSE_TIME_NS);
- if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS)
+ if (ktime_before(ktime_get(), reuse_timeout))
goto err_not_found;
memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 525057bee0ed..2edab01c3beb 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -66,6 +66,8 @@
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
BIT(FLOW_DISSECTOR_KEY_MPLS) | \
BIT(FLOW_DISSECTOR_KEY_IP))
@@ -74,7 +76,9 @@
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
@@ -139,6 +143,21 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
}
static int
+nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
+ u32 *key_layer_two, int *key_size)
+{
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
+ return -EOPNOTSUPP;
+
+ if (enc_opts->len > 0) {
+ *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
+ *key_size += sizeof(struct nfp_flower_geneve_options);
+ }
+
+ return 0;
+}
+
+static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
@@ -151,6 +170,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
u32 key_layer_two;
u8 key_layer;
int key_size;
+ int err;
if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP;
@@ -176,6 +196,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
struct flow_dissector_key_ports *mask_enc_ports = NULL;
+ struct flow_dissector_key_enc_opts *enc_op = NULL;
struct flow_dissector_key_ports *enc_ports = NULL;
struct flow_dissector_key_control *mask_enc_ctl =
skb_flow_dissector_target(flow->dissector,
@@ -212,11 +233,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (mask_enc_ports->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ enc_op = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS,
+ flow->key);
+ }
+
switch (enc_ports->dst) {
case htons(NFP_FL_VXLAN_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (enc_op)
+ return -EOPNOTSUPP;
break;
case htons(NFP_FL_GENEVE_PORT):
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
@@ -226,6 +257,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_size += sizeof(struct nfp_flower_ext_meta);
key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (!enc_op)
+ break;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
+ return -EOPNOTSUPP;
+ err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
+ &key_size);
+ if (err)
+ return err;
break;
default:
return -EOPNOTSUPP;
@@ -584,9 +624,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
return nfp_flower_del_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_STATS:
return nfp_flower_get_stats(app, netdev, flower, egress);
+ default:
+ return -EOPNOTSUPP;
}
-
- return -EOPNOTSUPP;
}
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
@@ -631,14 +671,11 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
- if (tcf_block_shared(f->block))
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_flower_setup_tc_block_cb,
- repr, repr);
+ repr, repr, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
nfp_flower_setup_tc_block_cb,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index f28b244f4ee7..8607d09ab732 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -86,6 +86,23 @@ const char *nfp_app_mip_name(struct nfp_app *app)
return nfp_mip_name(app->pf->mip);
}
+int nfp_app_ndo_init(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (!app || !app->type->ndo_init)
+ return 0;
+ return app->type->ndo_init(app, netdev);
+}
+
+void nfp_app_ndo_uninit(struct net_device *netdev)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (app && app->type->ndo_uninit)
+ app->type->ndo_uninit(app, netdev);
+}
+
u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data)
{
if (!port || !port->app || !port->app->type->port_get_stats)
@@ -155,6 +172,8 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc))
return ERR_PTR(-EINVAL);
+ if (WARN_ON(!apps[id]->ctrl_msg_rx && apps[id]->ctrl_msg_rx_raw))
+ return ERR_PTR(-EINVAL);
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (!app)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index ee74caacb015..4e1eb3395648 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -78,6 +78,8 @@ extern const struct nfp_app_type app_abm;
* @init: perform basic app checks and init
* @clean: clean app state
* @extra_cap: extra capabilities string
+ * @ndo_init: vNIC and repr netdev .ndo_init
+ * @ndo_uninit: vNIC and repr netdev .ndo_unint
* @vnic_alloc: allocate vNICs (assign port types, etc.)
* @vnic_free: free up app's vNIC state
* @vnic_init: vNIC netdev was registered
@@ -96,6 +98,7 @@ extern const struct nfp_app_type app_abm;
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
+ * @ctrl_msg_rx_raw: handler for control messages from data queues
* @setup_tc: setup TC ndo
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
@@ -117,6 +120,9 @@ struct nfp_app_type {
const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
+ int (*ndo_init)(struct nfp_app *app, struct net_device *netdev);
+ void (*ndo_uninit)(struct nfp_app *app, struct net_device *netdev);
+
int (*vnic_alloc)(struct nfp_app *app, struct nfp_net *nn,
unsigned int id);
void (*vnic_free)(struct nfp_app *app, struct nfp_net *nn);
@@ -145,6 +151,8 @@ struct nfp_app_type {
void (*stop)(struct nfp_app *app);
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
+ void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data,
+ unsigned int len);
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
@@ -200,6 +208,9 @@ static inline void nfp_app_clean(struct nfp_app *app)
app->type->clean(app);
}
+int nfp_app_ndo_init(struct net_device *netdev);
+void nfp_app_ndo_uninit(struct net_device *netdev);
+
static inline int nfp_app_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
unsigned int id)
{
@@ -310,6 +321,11 @@ static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app)
return app->type->ctrl_has_meta;
}
+static inline bool nfp_app_ctrl_uses_data_vnics(struct nfp_app *app)
+{
+ return app && app->type->ctrl_msg_rx_raw;
+}
+
static inline const char *nfp_app_extra_cap(struct nfp_app *app,
struct nfp_net *nn)
{
@@ -373,6 +389,16 @@ static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
app->type->ctrl_msg_rx(app, skb);
}
+static inline void
+nfp_app_ctrl_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
+{
+ if (!app || !app->type->ctrl_msg_rx_raw)
+ return;
+
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0, data, len);
+ app->type->ctrl_msg_rx_raw(app, data, len);
+}
+
static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode)
{
if (!app->type->eswitch_mode_get)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index f6677bc9875a..fad0e62a910c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -93,6 +93,7 @@ enum br_mask {
BR_BNE = 0x01,
BR_BMI = 0x02,
BR_BHS = 0x04,
+ BR_BCC = 0x05,
BR_BLO = 0x05,
BR_BGE = 0x08,
BR_BLT = 0x09,
@@ -426,4 +427,32 @@ static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
}
+enum mul_type {
+ MUL_TYPE_START = 0x00,
+ MUL_TYPE_STEP_24x8 = 0x01,
+ MUL_TYPE_STEP_16x16 = 0x02,
+ MUL_TYPE_STEP_32x32 = 0x03,
+};
+
+enum mul_step {
+ MUL_STEP_1 = 0x00,
+ MUL_STEP_NONE = MUL_STEP_1,
+ MUL_STEP_2 = 0x01,
+ MUL_STEP_3 = 0x02,
+ MUL_STEP_4 = 0x03,
+ MUL_LAST = 0x04,
+ MUL_LAST_2 = 0x05,
+};
+
+#define OP_MUL_BASE 0x0f800000000ULL
+#define OP_MUL_A_SRC 0x000000003ffULL
+#define OP_MUL_B_SRC 0x000000ffc00ULL
+#define OP_MUL_STEP 0x00000700000ULL
+#define OP_MUL_DST_AB 0x00000800000ULL
+#define OP_MUL_SW 0x00040000000ULL
+#define OP_MUL_TYPE 0x00180000000ULL
+#define OP_MUL_WR_AB 0x20000000000ULL
+#define OP_MUL_SRC_LMEXTN 0x40000000000ULL
+#define OP_MUL_DST_LMEXTN 0x80000000000ULL
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 152283d7e59c..4a540c5e27fe 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -236,16 +236,20 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
int err;
pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err);
- if (!err)
- return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
+ if (err) {
+ /* For backwards compatibility if symbol not found allow all */
+ pf->limit_vfs = ~0;
+ if (err == -ENOENT)
+ return 0;
- pf->limit_vfs = ~0;
- /* Allow any setting for backwards compatibility if symbol not found */
- if (err == -ENOENT)
- return 0;
+ nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
+ return err;
+ }
- nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
- return err;
+ err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
+ if (err)
+ nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err);
+ return 0;
}
static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 2a71a9ffd095..439e6ffe2f05 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -250,7 +250,7 @@ struct nfp_net_tx_ring {
struct nfp_net_tx_desc *txds;
dma_addr_t dma;
- unsigned int size;
+ size_t size;
bool is_xdp;
} ____cacheline_aligned;
@@ -350,9 +350,9 @@ struct nfp_net_rx_buf {
* @qcp_fl: Pointer to base of the QCP freelist queue
* @rxbufs: Array of transmitted FL/RX buffers
* @rxds: Virtual address of FL/RX ring in host memory
+ * @xdp_rxq: RX-ring info avail for XDP
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
- * @xdp_rxq: RX-ring info avail for XDP
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -364,14 +364,15 @@ struct nfp_net_rx_ring {
u32 idx;
int fl_qcidx;
- unsigned int size;
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
- dma_addr_t dma;
struct xdp_rxq_info xdp_rxq;
+
+ dma_addr_t dma;
+ size_t size;
} ____cacheline_aligned;
/**
@@ -485,7 +486,6 @@ struct nfp_stat_pair {
* @dev: Backpointer to struct device
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
- * @bpf_offload_xdp: Offloaded BPF program is XDP
* @chained_metadata_format: Firemware will use new metadata format
* @rx_dma_dir: Mapping direction for RX buffers
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
@@ -510,7 +510,6 @@ struct nfp_net_dp {
struct net_device *netdev;
u8 is_vf:1;
- u8 bpf_offload_xdp:1;
u8 chained_metadata_format:1;
u8 rx_dma_dir;
@@ -553,8 +552,8 @@ struct nfp_net_dp {
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
- * @xdp_flags: Flags with which XDP prog was loaded
- * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes)
+ * @xdp: Information about the driver XDP program
+ * @xdp_hw: Information about the HW XDP program
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
@@ -610,8 +609,8 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
- u32 xdp_flags;
- struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdp;
+ struct xdp_attachment_info xdp_hw;
unsigned int max_tx_rings;
unsigned int max_rx_rings;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index d4c27f849f9b..a8b9fbab5f73 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -53,6 +53,8 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/mm.h>
+#include <linux/overflow.h>
#include <linux/page_ref.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
@@ -945,11 +947,10 @@ err_free:
/**
* nfp_net_tx_complete() - Handled completed TX packets
- * @tx_ring: TX ring structure
- *
- * Return: Number of completed TX descriptors
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
*/
-static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
+static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
@@ -999,7 +1000,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
/* check for last gather fragment */
if (fidx == nr_frags - 1)
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, budget);
tx_ring->txbufs[idx].dma_addr = 0;
tx_ring->txbufs[idx].skb = NULL;
@@ -1077,7 +1078,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
* @dp: NFP Net data path struct
* @tx_ring: TX ring structure
*
- * Assumes that the device is stopped
+ * Assumes that the device is stopped, must be idempotent.
*/
static void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
@@ -1119,7 +1120,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring->rd_p++;
}
- memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
+ memset(tx_ring->txds, 0, tx_ring->size);
tx_ring->wr_p = 0;
tx_ring->rd_p = 0;
tx_ring->qcp_rd_p = 0;
@@ -1279,13 +1280,18 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
* nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
* @rx_ring: RX ring structure
*
- * Warning: Do *not* call if ring buffers were never put on the FW freelist
- * (i.e. device was not enabled)!
+ * Assumes that the device is stopped, must be idempotent.
*/
static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
{
unsigned int wr_idx, last_idx;
+ /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
+ * kept at cnt - 1 FL bufs.
+ */
+ if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
+ return;
+
/* Move the empty entry to the end of the list */
wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
last_idx = rx_ring->cnt - 1;
@@ -1294,7 +1300,7 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
rx_ring->rxbufs[last_idx].dma_addr = 0;
rx_ring->rxbufs[last_idx].frag = NULL;
- memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
+ memset(rx_ring->rxds, 0, rx_ring->size);
rx_ring->wr_p = 0;
rx_ring->rd_p = 0;
}
@@ -1709,8 +1715,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
}
- if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
- dp->bpf_offload_xdp) && !meta.portid) {
+ if (xdp_prog && !meta.portid) {
void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
int act;
@@ -1752,6 +1757,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
}
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+ pkt_len);
+ nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_repr_get(nn->app, meta.portid);
+ if (unlikely(!netdev)) {
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
skb = build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
@@ -1767,20 +1795,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
- if (likely(!meta.portid)) {
- netdev = dp->netdev;
- } else {
- struct nfp_net *nn;
-
- nn = netdev_priv(dp->netdev);
- netdev = nfp_app_repr_get(nn->app, meta.portid);
- if (unlikely(!netdev)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
- continue;
- }
- nfp_repr_inc_rx_stats(netdev, pkt_len);
- }
-
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
@@ -1828,7 +1842,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
unsigned int pkts_polled = 0;
if (r_vec->tx_ring)
- nfp_net_tx_complete(r_vec->tx_ring);
+ nfp_net_tx_complete(r_vec->tx_ring, budget);
if (r_vec->rx_ring)
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
@@ -2062,7 +2076,7 @@ static void nfp_ctrl_poll(unsigned long arg)
struct nfp_net_r_vector *r_vec = (void *)arg;
spin_lock_bh(&r_vec->lock);
- nfp_net_tx_complete(r_vec->tx_ring);
+ nfp_net_tx_complete(r_vec->tx_ring, 0);
__nfp_ctrl_tx_queued(r_vec);
spin_unlock_bh(&r_vec->lock);
@@ -2121,7 +2135,7 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- kfree(tx_ring->txbufs);
+ kvfree(tx_ring->txbufs);
if (tx_ring->txds)
dma_free_coherent(dp->dev, tx_ring->size,
@@ -2145,18 +2159,17 @@ static int
nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- int sz;
tx_ring->cnt = dp->txd_cnt;
- tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->txds)
goto err_alloc;
- sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
- tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
+ tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
+ GFP_KERNEL);
if (!tx_ring->txbufs)
goto err_alloc;
@@ -2270,7 +2283,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
if (dp->netdev)
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- kfree(rx_ring->rxbufs);
+ kvfree(rx_ring->rxbufs);
if (rx_ring->rxds)
dma_free_coherent(dp->dev, rx_ring->size,
@@ -2293,7 +2306,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
static int
nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- int sz, err;
+ int err;
if (dp->netdev) {
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
@@ -2303,14 +2316,14 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
}
rx_ring->cnt = dp->rxd_cnt;
- rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
+ rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->rxds)
goto err_alloc;
- sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
- rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
+ rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
+ GFP_KERNEL);
if (!rx_ring->rxbufs)
goto err_alloc;
@@ -2508,6 +2521,8 @@ static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
+ *
+ * Warning: must be fully idempotent.
*/
static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
{
@@ -3115,6 +3130,21 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void nfp_net_netpoll(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int i;
+
+ /* nfp_net's NAPIs are statically allocated so even if there is a race
+ * with reconfig path this will simply try to schedule some disabled
+ * NAPI instances.
+ */
+ for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
+ napi_schedule_irqoff(&nn->r_vecs[i].napi);
+}
+#endif
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3377,14 +3407,18 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
nfp_net_set_vxlan_port(nn, idx, 0);
}
-static int
-nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
- struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
{
+ struct bpf_prog *prog = bpf->prog;
struct nfp_net_dp *dp;
+ int err;
+
+ if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
+ return -EBUSY;
if (!prog == !nn->dp.xdp_prog) {
WRITE_ONCE(nn->dp.xdp_prog, prog);
+ xdp_attachment_setup(&nn->xdp, bpf);
return 0;
}
@@ -3398,38 +3432,26 @@ nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
- return nfp_net_ring_reconfig(nn, dp, extack);
+ err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
+ if (err)
+ return err;
+
+ xdp_attachment_setup(&nn->xdp, bpf);
+ return 0;
}
-static int
-nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
- struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
{
- struct bpf_prog *drv_prog, *offload_prog;
int err;
- if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
+ if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
return -EBUSY;
- /* Load both when no flags set to allow easy activation of driver path
- * when program is replaced by one which can't be offloaded.
- */
- drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog;
- offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
-
- err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
+ err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
if (err)
return err;
- err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
- if (err && flags & XDP_FLAGS_HW_MODE)
- return err;
-
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
- nn->xdp_prog = prog;
- nn->xdp_flags = flags;
-
+ xdp_attachment_setup(&nn->xdp_hw, bpf);
return 0;
}
@@ -3439,16 +3461,13 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
+ return nfp_net_xdp_setup_drv(nn, xdp);
case XDP_SETUP_PROG_HW:
- return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
- xdp->extack);
+ return nfp_net_xdp_setup_hw(nn, xdp);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nn->xdp_prog;
- if (nn->dp.bpf_offload_xdp)
- xdp->prog_attached = XDP_ATTACHED_HW;
- xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
- xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
- return 0;
+ return xdp_attachment_query(&nn->xdp, xdp);
+ case XDP_QUERY_PROG_HW:
+ return xdp_attachment_query(&nn->xdp_hw, xdp);
default:
return nfp_app_bpf(nn->app, nn, xdp);
}
@@ -3476,12 +3495,17 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
}
const struct net_device_ops nfp_net_netdev_ops = {
+ .ndo_init = nfp_app_ndo_init,
+ .ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = nfp_net_netpoll,
+#endif
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
@@ -3840,6 +3864,9 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.mtu = NFP_NET_DEFAULT_MTU;
nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
+ if (nfp_app_ctrl_uses_data_vnics(nn->app))
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
+
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
nfp_net_rss_init(nn);
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index bb63c115537d..44d3ea75d043 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -127,6 +127,7 @@
#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
+#define NFP_NET_CFG_CTRL_CMSG_DATA (0x1 << 12) /* RX cmsgs on data Qs */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 26d1cc4e2906..6a79c8e4a7a4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -233,12 +233,10 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static void
nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
- struct nfp_app *app;
-
- app = nfp_app_from_netdev(netdev);
- if (!app)
- return;
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ strlcpy(drvinfo->bus_info, pci_name(app->pdev),
+ sizeof(drvinfo->bus_info));
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
@@ -452,7 +450,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
+ return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -460,7 +458,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
struct nfp_net *nn = netdev_priv(netdev);
int i;
- for (i = 0; i < nn->dp.num_r_vecs; i++) {
+ for (i = 0; i < nn->max_r_vecs; i++) {
data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
@@ -486,7 +484,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
u64 tmp[NN_RVEC_GATHER_STATS];
unsigned int i, j;
- for (i = 0; i < nn->dp.num_r_vecs; i++) {
+ for (i = 0; i < nn->max_r_vecs; i++) {
unsigned int start;
do {
@@ -521,15 +519,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
return data;
}
-static unsigned int
-nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings)
+static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
{
- return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2;
+ return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
}
static u8 *
-nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
- unsigned int tx_rings, bool repr)
+nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
{
int swap_off, i;
@@ -549,36 +545,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
data = nfp_pr_et(data, nfp_net_et_stats[i].name);
- for (i = 0; i < tx_rings; i++) {
- data = nfp_pr_et(data, "txq_%u_pkts", i);
- data = nfp_pr_et(data, "txq_%u_bytes", i);
- }
-
- for (i = 0; i < rx_rings; i++) {
+ for (i = 0; i < num_vecs; i++) {
data = nfp_pr_et(data, "rxq_%u_pkts", i);
data = nfp_pr_et(data, "rxq_%u_bytes", i);
+ data = nfp_pr_et(data, "txq_%u_pkts", i);
+ data = nfp_pr_et(data, "txq_%u_bytes", i);
}
return data;
}
static u64 *
-nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem,
- unsigned int rx_rings, unsigned int tx_rings)
+nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
{
unsigned int i;
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
*data++ = readq(mem + nfp_net_et_stats[i].off);
- for (i = 0; i < tx_rings; i++) {
- *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
- *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
- }
-
- for (i = 0; i < rx_rings; i++) {
+ for (i = 0; i < num_vecs; i++) {
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
}
return data;
@@ -633,8 +622,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
data = nfp_vnic_get_sw_stats_strings(netdev, data);
- data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings,
- nn->dp.num_tx_rings,
+ data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
false);
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
@@ -649,8 +637,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_net *nn = netdev_priv(netdev);
data = nfp_vnic_get_sw_stats(netdev, data);
- data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
- nn->dp.num_rx_rings, nn->dp.num_tx_rings);
+ data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(nn->port, data);
}
@@ -662,8 +649,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
return nfp_vnic_get_sw_stats_count(netdev) +
- nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
- nn->dp.num_tx_rings) +
+ nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
nfp_mac_get_stats_count(netdev) +
nfp_app_port_get_stats_count(nn->port);
default:
@@ -679,7 +665,7 @@ static void nfp_port_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
- data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true);
+ data = nfp_vnic_get_hw_stats_strings(data, 0, true);
else
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(port, data);
@@ -694,7 +680,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_port *port = nfp_port_from_netdev(netdev);
if (nfp_port_is_vnic(port))
- data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0);
+ data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
else
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(port, data);
@@ -708,7 +694,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
- count = nfp_vnic_get_hw_stats_count(0, 0);
+ count = nfp_vnic_get_hw_stats_count(0);
else
count = nfp_mac_get_stats_count(netdev);
count += nfp_app_port_get_stats_count(port);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d7b712f6362f..18a09cdcd9c6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -262,6 +262,8 @@ err_port_disable:
}
const struct net_device_ops nfp_repr_netdev_ops = {
+ .ndo_init = nfp_app_ndo_init,
+ .ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_repr_open,
.ndo_stop = nfp_repr_stop,
.ndo_start_xmit = nfp_repr_xmit,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 749655c329b2..c8d0b1016a64 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1248,7 +1248,7 @@ static void nfp6000_free(struct nfp_cpp *cpp)
kfree(nfp);
}
-static void nfp6000_read_serial(struct device *dev, u8 *serial)
+static int nfp6000_read_serial(struct device *dev, u8 *serial)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
@@ -1256,25 +1256,29 @@ static void nfp6000_read_serial(struct device *dev, u8 *serial)
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
if (!pos) {
- memset(serial, 0, NFP_SERIAL_LEN);
- return;
+ dev_err(dev, "can't find PCIe Serial Number Capability\n");
+ return -EINVAL;
}
pci_read_config_dword(pdev, pos + 4, &reg);
put_unaligned_be16(reg >> 16, serial + 4);
pci_read_config_dword(pdev, pos + 8, &reg);
put_unaligned_be32(reg, serial);
+
+ return 0;
}
-static u16 nfp6000_get_interface(struct device *dev)
+static int nfp6000_get_interface(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
u32 reg;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos)
- return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
+ if (!pos) {
+ dev_err(dev, "can't find PCIe Serial Number Capability\n");
+ return -EINVAL;
+ }
pci_read_config_dword(pdev, pos + 4, &reg);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index b0da3d436850..c338d539fa96 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -364,8 +364,8 @@ struct nfp_cpp_operations {
int (*init)(struct nfp_cpp *cpp);
void (*free)(struct nfp_cpp *cpp);
- void (*read_serial)(struct device *dev, u8 *serial);
- u16 (*get_interface)(struct device *dev);
+ int (*read_serial)(struct device *dev, u8 *serial);
+ int (*get_interface)(struct device *dev);
int (*area_init)(struct nfp_cpp_area *area,
u32 dest, unsigned long long address,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index ef30597aa319..73de57a09800 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -1163,10 +1163,10 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
{
const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
struct nfp_cpp *cpp;
+ int ifc, err;
u32 mask[2];
u32 xpbaddr;
size_t tgt;
- int err;
cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
if (!cpp) {
@@ -1176,9 +1176,19 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
cpp->op = ops;
cpp->priv = priv;
- cpp->interface = ops->get_interface(parent);
- if (ops->read_serial)
- ops->read_serial(parent, cpp->serial);
+
+ ifc = ops->get_interface(parent);
+ if (ifc < 0) {
+ err = ifc;
+ goto err_free_cpp;
+ }
+ cpp->interface = ifc;
+ if (ops->read_serial) {
+ err = ops->read_serial(parent, cpp->serial);
+ if (err)
+ goto err_free_cpp;
+ }
+
rwlock_init(&cpp->resource_lock);
init_waitqueue_head(&cpp->waitq);
lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
@@ -1191,7 +1201,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
err = device_register(&cpp->dev);
if (err < 0) {
put_device(&cpp->dev);
- goto err_dev;
+ goto err_free_cpp;
}
dev_set_drvdata(&cpp->dev, cpp);
@@ -1238,7 +1248,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
err_out:
device_unregister(&cpp->dev);
-err_dev:
+err_free_cpp:
kfree(cpp);
err_malloc:
return ERR_PTR(err);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index 37a6d7822a38..40510860341b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -207,7 +207,7 @@ nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
* nfp_nffw_info_open() - Acquire the lock on the NFFW table
* @cpp: NFP CPP handle
*
- * Return: 0, or -ERRNO
+ * Return: pointer to nfp_nffw_info object or ERR_PTR()
*/
struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
{
@@ -253,10 +253,8 @@ err_free:
}
/**
- * nfp_nffw_info_release() - Release the lock on the NFFW table
+ * nfp_nffw_info_close() - Release the lock on the NFFW table and free state
* @state: NFP FW info state
- *
- * Return: 0, or -ERRNO
*/
void nfp_nffw_info_close(struct nfp_nffw_info *state)
{
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 09f674ec0f9e..76efed058f33 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -155,7 +155,6 @@ struct nixge_priv {
int tx_irq;
int rx_irq;
- u32 last_link;
/* Buffer descriptors */
struct nixge_hw_dma_bd *tx_bd_v;
@@ -504,7 +503,6 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_skb->skb = skb;
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
- cur_p->app4 = (unsigned long)skb;
tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
/* Start the transfer */
@@ -740,22 +738,12 @@ static void nixge_dma_err_handler(unsigned long data)
cur_p->phys = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
cur_p->sw_id_offset = 0;
}
for (i = 0; i < RX_BD_NUM; i++) {
cur_p = &lp->rx_bd_v[i];
cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
}
lp->tx_bd_ci = 0;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 7cbd0174459c..1d9b0d44ddb6 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5777,7 +5777,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
(np->rx_ring_size +
np->tx_ring_size),
&np->ring_addr,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!np->rx_ring.orig)
goto out_unmap;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -5786,7 +5786,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
sizeof(struct ring_desc_ex) *
(np->rx_ring_size +
np->tx_ring_size),
- &np->ring_addr, GFP_ATOMIC);
+ &np->ring_addr, GFP_KERNEL);
if (!np->rx_ring.ex)
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
index 31288d4ad248..862de0f3bc41 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_PCH_GBE) += pch_gbe.o
pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o
-pch_gbe-y += pch_gbe_api.o pch_gbe_main.o
+pch_gbe-y += pch_gbe_main.o
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 697e29dd4bd3..44c2f291e766 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -326,32 +326,6 @@ struct pch_gbe_regs {
#define PCH_GBE_FC_FULL 3
#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL
-
-struct pch_gbe_hw;
-/**
- * struct pch_gbe_functions - HAL APi function pointer
- * @get_bus_info: for pch_gbe_hal_get_bus_info
- * @init_hw: for pch_gbe_hal_init_hw
- * @read_phy_reg: for pch_gbe_hal_read_phy_reg
- * @write_phy_reg: for pch_gbe_hal_write_phy_reg
- * @reset_phy: for pch_gbe_hal_phy_hw_reset
- * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset
- * @power_up_phy: for pch_gbe_hal_power_up_phy
- * @power_down_phy: for pch_gbe_hal_power_down_phy
- * @read_mac_addr: for pch_gbe_hal_read_mac_addr
- */
-struct pch_gbe_functions {
- void (*get_bus_info) (struct pch_gbe_hw *);
- s32 (*init_hw) (struct pch_gbe_hw *);
- s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
- s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
- void (*reset_phy) (struct pch_gbe_hw *);
- void (*sw_reset_phy) (struct pch_gbe_hw *);
- void (*power_up_phy) (struct pch_gbe_hw *hw);
- void (*power_down_phy) (struct pch_gbe_hw *hw);
- s32 (*read_mac_addr) (struct pch_gbe_hw *);
-};
-
/**
* struct pch_gbe_mac_info - MAC information
* @addr[6]: Store the MAC address
@@ -394,17 +368,6 @@ struct pch_gbe_phy_info {
/*!
* @ingroup Gigabit Ether driver Layer
- * @struct pch_gbe_bus_info
- * @brief Bus information
- */
-struct pch_gbe_bus_info {
- u8 type;
- u8 speed;
- u8 width;
-};
-
-/*!
- * @ingroup Gigabit Ether driver Layer
* @struct pch_gbe_hw
* @brief Hardware information
*/
@@ -414,10 +377,8 @@ struct pch_gbe_hw {
struct pch_gbe_regs __iomem *reg;
spinlock_t miim_lock;
- const struct pch_gbe_functions *func;
struct pch_gbe_mac_info mac;
struct pch_gbe_phy_info phy;
- struct pch_gbe_bus_info bus;
};
/**
@@ -680,7 +641,6 @@ void pch_gbe_set_ethtool_ops(struct net_device *netdev);
/* pch_gbe_mac.c */
s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
u16 data);
#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
deleted file mode 100644
index 51250363566b..000000000000
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "pch_gbe.h"
-#include "pch_gbe_phy.h"
-#include "pch_gbe_api.h"
-
-/* bus type values */
-#define pch_gbe_bus_type_unknown 0
-#define pch_gbe_bus_type_pci 1
-#define pch_gbe_bus_type_pcix 2
-#define pch_gbe_bus_type_pci_express 3
-#define pch_gbe_bus_type_reserved 4
-
-/* bus speed values */
-#define pch_gbe_bus_speed_unknown 0
-#define pch_gbe_bus_speed_33 1
-#define pch_gbe_bus_speed_66 2
-#define pch_gbe_bus_speed_100 3
-#define pch_gbe_bus_speed_120 4
-#define pch_gbe_bus_speed_133 5
-#define pch_gbe_bus_speed_2500 6
-#define pch_gbe_bus_speed_reserved 7
-
-/* bus width values */
-#define pch_gbe_bus_width_unknown 0
-#define pch_gbe_bus_width_pcie_x1 1
-#define pch_gbe_bus_width_pcie_x2 2
-#define pch_gbe_bus_width_pcie_x4 4
-#define pch_gbe_bus_width_32 5
-#define pch_gbe_bus_width_64 6
-#define pch_gbe_bus_width_reserved 7
-
-/**
- * pch_gbe_plat_get_bus_info - Obtain bus information for adapter
- * @hw: Pointer to the HW structure
- */
-static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
-{
- hw->bus.type = pch_gbe_bus_type_pci_express;
- hw->bus.speed = pch_gbe_bus_speed_2500;
- hw->bus.width = pch_gbe_bus_width_pcie_x1;
-}
-
-/**
- * pch_gbe_plat_init_hw - Initialize hardware
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * Negative value: Failed-EBUSY
- */
-static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
-{
- s32 ret_val;
-
- ret_val = pch_gbe_phy_get_id(hw);
- if (ret_val) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
- return ret_val;
- }
- pch_gbe_phy_init_setting(hw);
- /* Setup Mac interface option RGMII */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
- pch_gbe_phy_set_rgmii(hw);
-#endif
- return ret_val;
-}
-
-static const struct pch_gbe_functions pch_gbe_ops = {
- .get_bus_info = pch_gbe_plat_get_bus_info,
- .init_hw = pch_gbe_plat_init_hw,
- .read_phy_reg = pch_gbe_phy_read_reg_miic,
- .write_phy_reg = pch_gbe_phy_write_reg_miic,
- .reset_phy = pch_gbe_phy_hw_reset,
- .sw_reset_phy = pch_gbe_phy_sw_reset,
- .power_up_phy = pch_gbe_phy_power_up,
- .power_down_phy = pch_gbe_phy_power_down,
- .read_mac_addr = pch_gbe_mac_read_mac_addr
-};
-
-/**
- * pch_gbe_plat_init_function_pointers - Init func ptrs
- * @hw: Pointer to the HW structure
- */
-static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
-{
- /* Set PHY parameter */
- hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
- /* Set function pointers */
- hw->func = &pch_gbe_ops;
-}
-
-/**
- * pch_gbe_hal_setup_init_funcs - Initializes function pointers
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
-{
- if (!hw->reg) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: Registers not mapped\n");
- return -ENOSYS;
- }
- pch_gbe_plat_init_function_pointers(hw);
- return 0;
-}
-
-/**
- * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
-{
- if (!hw->func->get_bus_info) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->get_bus_info(hw);
-}
-
-/**
- * pch_gbe_hal_init_hw - Initialize hardware
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
-{
- if (!hw->func->init_hw) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return -ENOSYS;
- }
- return hw->func->init_hw(hw);
-}
-
-/**
- * pch_gbe_hal_read_phy_reg - Reads PHY register
- * @hw: Pointer to the HW structure
- * @offset: The register to read
- * @data: The buffer to store the 16-bit read.
- * Returns:
- * 0: Successfully
- * Negative value: Failed
- */
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
- u16 *data)
-{
- if (!hw->func->read_phy_reg)
- return 0;
- return hw->func->read_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_write_phy_reg - Writes PHY register
- * @hw: Pointer to the HW structure
- * @offset: The register to read
- * @data: The value to write.
- * Returns:
- * 0: Successfully
- * Negative value: Failed
- */
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
- u16 data)
-{
- if (!hw->func->write_phy_reg)
- return 0;
- return hw->func->write_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_phy_hw_reset - Hard PHY reset
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
-{
- if (!hw->func->reset_phy) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_phy_sw_reset - Soft PHY reset
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
-{
- if (!hw->func->sw_reset_phy) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->sw_reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_read_mac_addr - Reads MAC address
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
-{
- if (!hw->func->read_mac_addr) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return -ENOSYS;
- }
- return hw->func->read_mac_addr(hw);
-}
-
-/**
- * pch_gbe_hal_power_up_phy - Power up PHY
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
-{
- if (hw->func->power_up_phy)
- hw->func->power_up_phy(hw);
-}
-
-/**
- * pch_gbe_hal_power_down_phy - Power down PHY
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
-{
- if (hw->func->power_down_phy)
- hw->func->power_down_phy(hw);
-}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
deleted file mode 100644
index 91ce07c8306c..000000000000
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef _PCH_GBE_API_H_
-#define _PCH_GBE_API_H_
-
-#include "pch_gbe_phy.h"
-
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw);
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data);
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw);
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw);
-
-#endif
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 731ce1e419e4..adaa0024adfe 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -17,7 +17,7 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
/**
* pch_gbe_stats - Stats item information
@@ -125,7 +125,7 @@ static int pch_gbe_set_link_ksettings(struct net_device *netdev,
u32 advertising;
int ret;
- pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
+ pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
memcpy(&copy_ecmd, ecmd, sizeof(*ecmd));
@@ -204,7 +204,7 @@ static void pch_gbe_get_regs(struct net_device *netdev,
*regs_buff++ = ioread32(&hw->reg->INT_ST + i);
/* PHY register */
for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) {
- pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp);
+ pch_gbe_phy_read_reg_miic(&adapter->hw, i, &tmp);
*regs_buff++ = tmp;
}
}
@@ -349,25 +349,12 @@ static int pch_gbe_set_ringparam(struct net_device *netdev,
err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
if (err)
goto err_setup_tx;
- /* save the new, restore the old in order to free it,
- * then restore the new back again */
-#ifdef RINGFREE
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
- pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
- kfree(tx_old);
- kfree(rx_old);
- adapter->rx_ring = rxdr;
- adapter->tx_ring = txdr;
-#else
pch_gbe_free_rx_resources(adapter, rx_old);
pch_gbe_free_tx_resources(adapter, tx_old);
kfree(tx_old);
kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
-#endif
err = pch_gbe_up(adapter);
}
return err;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 34a1581eda95..43c0c10dfeb7 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -18,7 +18,7 @@
*/
#include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
@@ -34,7 +34,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_DMA_ALIGN 0
#define PCH_GBE_DMA_PADDING 2
#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */
-#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
@@ -113,8 +112,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define MINNOW_PHY_RESET_GPIO 13
-static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
-
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
@@ -290,7 +287,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
* Returns:
* 0: Successful.
*/
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
+static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
{
struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
u32 adr1a, adr1b;
@@ -369,9 +366,7 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
/* Read the MAC address. and store to the private data */
pch_gbe_mac_read_mac_addr(hw);
iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
-#ifdef PCH_GBE_MAC_IFOP_RGMII
iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
-#endif
pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
/* Setup the receive addresses */
pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
@@ -416,44 +411,6 @@ static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
}
-
-/**
- * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
- * @hw: Pointer to the HW structure
- * @mc_addr_list: Array of multicast addresses to program
- * @mc_addr_count: Number of multicast addresses to program
- * @mar_used_count: The first MAC Address register free to program
- * @mar_total_num: Total number of supported MAC Address Registers
- */
-static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 mar_used_count, u32 mar_total_num)
-{
- u32 i, adrmask;
-
- /* Load the first set of multicast addresses into the exact
- * filters (RAR). If there are not enough to fill the RAR
- * array, clear the filters.
- */
- for (i = mar_used_count; i < mar_total_num; i++) {
- if (mc_addr_count) {
- pch_gbe_mac_mar_set(hw, mc_addr_list, i);
- mc_addr_count--;
- mc_addr_list += ETH_ALEN;
- } else {
- /* Clear MAC address mask */
- adrmask = ioread32(&hw->reg->ADDR_MASK);
- iowrite32((adrmask | (0x0001 << i)),
- &hw->reg->ADDR_MASK);
- /* wait busy */
- pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
- /* Clear MAC address */
- iowrite32(0, &hw->reg->mac_adr[i].high);
- iowrite32(0, &hw->reg->mac_adr[i].low);
- }
- }
-}
-
/**
* pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
@@ -763,14 +720,23 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
void pch_gbe_reset(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ s32 ret_val;
- pch_gbe_mac_reset_hw(&adapter->hw);
+ pch_gbe_mac_reset_hw(hw);
/* reprogram multicast address register after reset */
pch_gbe_set_multi(netdev);
/* Setup the receive address. */
- pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
- if (pch_gbe_hal_init_hw(&adapter->hw))
- netdev_err(netdev, "Hardware Error\n");
+ pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
+
+ ret_val = pch_gbe_phy_get_id(hw);
+ if (ret_val) {
+ netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
+ return;
+ }
+ pch_gbe_phy_init_setting(hw);
+ /* Setup Mac interface option RGMII */
+ pch_gbe_phy_set_rgmii(hw);
}
/**
@@ -1036,7 +1002,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
unsigned long rgmii = 0;
/* Set the RGMII control. */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
switch (speed) {
case SPEED_10:
rgmii = (PCH_GBE_RGMII_RATE_2_5M |
@@ -1052,10 +1017,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
break;
}
iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#else /* GMII */
- rgmii = 0;
- iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#endif
}
static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
u16 duplex)
@@ -2029,12 +1990,8 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
- /* Initialize the hardware-specific values */
- if (pch_gbe_hal_setup_init_funcs(hw)) {
- netdev_err(netdev, "Hardware Initialization Failure\n");
- return -EIO;
- }
if (pch_gbe_alloc_queues(adapter)) {
netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
@@ -2075,7 +2032,7 @@ static int pch_gbe_open(struct net_device *netdev)
err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
if (err)
goto err_setup_rx;
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
err = pch_gbe_up(adapter);
if (err)
goto err_up;
@@ -2084,7 +2041,7 @@ static int pch_gbe_open(struct net_device *netdev)
err_up:
if (!adapter->wake_up_evt)
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
err_setup_rx:
pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
@@ -2107,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev)
pch_gbe_down(adapter);
if (!adapter->wake_up_evt)
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
return 0;
@@ -2148,50 +2105,52 @@ static void pch_gbe_set_multi(struct net_device *netdev)
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
- u8 *mta_list;
- u32 rctl;
- int i;
- int mc_count;
+ u32 rctl, adrmask;
+ int mc_count, i;
netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
- /* Check for Promiscuous and All Multicast modes */
+ /* By default enable address & multicast filtering */
rctl = ioread32(&hw->reg->RX_MODE);
+ rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
+
+ /* Promiscuous mode disables all hardware address filtering */
+ if (netdev->flags & IFF_PROMISC)
+ rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
+
+ /* If we want to monitor more multicast addresses than the hardware can
+ * support then disable hardware multicast filtering.
+ */
mc_count = netdev_mc_count(netdev);
- if ((netdev->flags & IFF_PROMISC)) {
- rctl &= ~PCH_GBE_ADD_FIL_EN;
- rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else if ((netdev->flags & IFF_ALLMULTI)) {
- /* all the multicasting receive permissions */
- rctl |= PCH_GBE_ADD_FIL_EN;
+ if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else {
- if (mc_count >= PCH_GBE_MAR_ENTRIES) {
- /* all the multicasting receive permissions */
- rctl |= PCH_GBE_ADD_FIL_EN;
- rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else {
- rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
- }
- }
+
iowrite32(rctl, &hw->reg->RX_MODE);
- if (mc_count >= PCH_GBE_MAR_ENTRIES)
- return;
- mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC);
- if (!mta_list)
+ /* If we're not using multicast filtering then there's no point
+ * configuring the unused MAC address registers.
+ */
+ if (!(rctl & PCH_GBE_MLT_FIL_EN))
return;
- /* The shared function expects a packed array of only addresses. */
- i = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- if (i == mc_count)
- break;
- memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
+ /* Load the first set of multicast addresses into MAC address registers
+ * for use by hardware filtering.
+ */
+ i = 1;
+ netdev_for_each_mc_addr(ha, netdev)
+ pch_gbe_mac_mar_set(hw, ha->addr, i++);
+
+ /* If there are spare MAC registers, mask & clear them */
+ for (; i < PCH_GBE_MAR_ENTRIES; i++) {
+ /* Clear MAC address mask */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Clear MAC address */
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
}
- pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
- PCH_GBE_MAR_ENTRIES);
- kfree(mta_list);
netdev_dbg(netdev,
"RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
@@ -2437,7 +2396,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
pch_gbe_reset(adapter);
/* Clear wake up status */
pch_gbe_mac_set_wol_event(hw, 0);
@@ -2482,7 +2441,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev)
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
} else {
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
}
@@ -2511,7 +2470,7 @@ static int pch_gbe_resume(struct device *device)
return err;
}
pci_set_master(pdev);
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
pch_gbe_reset(adapter);
/* Clear wake on lan control and status */
pch_gbe_mac_set_wol_event(hw, 0);
@@ -2541,7 +2500,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
- pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ pch_gbe_phy_hw_reset(&adapter->hw);
free_netdev(netdev);
}
@@ -2627,10 +2586,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "PHY initialize error\n");
goto err_free_adapter;
}
- pch_gbe_hal_get_bus_info(&adapter->hw);
/* Read the MAC address. and store to the private data */
- ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
+ ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
if (ret) {
dev_err(&pdev->dev, "MAC address Read Error\n");
goto err_free_adapter;
@@ -2677,7 +2635,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
return 0;
err_free_adapter:
- pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ pch_gbe_phy_hw_reset(&adapter->hw);
err_free_netdev:
free_netdev(netdev);
return ret;
@@ -2776,32 +2734,7 @@ static struct pci_driver pch_gbe_driver = {
.shutdown = pch_gbe_shutdown,
.err_handler = &pch_gbe_err_handler
};
-
-
-static int __init pch_gbe_init_module(void)
-{
- int ret;
-
- pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
- ret = pci_register_driver(&pch_gbe_driver);
- if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
- if (copybreak == 0) {
- pr_info("copybreak disabled\n");
- } else {
- pr_info("copybreak enabled for packets <= %u bytes\n",
- copybreak);
- }
- }
- return ret;
-}
-
-static void __exit pch_gbe_exit_module(void)
-{
- pci_unregister_driver(&pch_gbe_driver);
-}
-
-module_init(pch_gbe_init_module);
-module_exit(pch_gbe_exit_module);
+module_pci_driver(pch_gbe_driver);
MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
@@ -2809,8 +2742,4 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
-module_param(copybreak, uint, 0644);
-MODULE_PARM_DESC(copybreak,
- "Maximum size of packet that is copied to a new buffer on receive");
-
/* pch_gbe_main.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index a5cad5ea9436..6b35b573beef 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -184,7 +184,7 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
* pch_gbe_phy_sw_reset - PHY software reset
* @hw: Pointer to the HW structure
*/
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
+static void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
{
u16 phy_ctrl;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 95ad0151ad02..23ac38711619 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -21,12 +21,10 @@
#define PCH_GBE_PHY_REGS_LEN 32
#define PCH_GBE_PHY_RESET_DELAY_US 10
-#define PCH_GBE_MAC_IFOP_RGMII
s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw);
s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data);
s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw);
void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index b5ea2a56106e..1df28f2edd1f 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -2,7 +2,7 @@
# Packet engine device configuration
#
-config NET_PACKET_ENGINE
+config NET_VENDOR_PACKET_ENGINES
bool "Packet Engine devices"
default y
depends on PCI
@@ -14,7 +14,7 @@ config NET_PACKET_ENGINE
the questions about packet engine devices. If you say Y, you will
be asked for your specific card in the following questions.
-if NET_PACKET_ENGINE
+if NET_VENDOR_PACKET_ENGINES
config HAMACHI
tristate "Packet Engines Hamachi GNIC-II support"
@@ -40,4 +40,4 @@ config YELLOWFIN
To compile this driver as a module, choose M here: the module
will be called yellowfin. This is recommended.
-endif # NET_PACKET_ENGINE
+endif # NET_VENDOR_PACKET_ENGINES
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 3157f97dd782..3c1be87cdfa5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -167,9 +167,9 @@ skip:
case NETXEN_BRDTYPE_P3_REF_QG:
case NETXEN_BRDTYPE_P3_4_GB:
case NETXEN_BRDTYPE_P3_4_GB_MM:
-
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
+ /* fall through */
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
@@ -198,6 +198,7 @@ skip:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
+ /* fall through */
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P3_10G_XFP:
supported |= SUPPORTED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 1cd39c9a0345..52ad80621335 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -566,9 +566,8 @@ static int
netxen_send_cmd_descs(struct netxen_adapter *adapter,
struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
{
- u32 i, producer, consumer;
+ u32 i, producer;
struct netxen_cmd_buffer *pbuf;
- struct cmd_desc_type0 *cmd_desc;
struct nx_host_tx_ring *tx_ring;
i = 0;
@@ -580,7 +579,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
- consumer = tx_ring->sw_consumer;
if (nr_desc >= netxen_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
@@ -595,8 +593,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
}
do {
- cmd_desc = &cmd_desc_arr[i];
-
pbuf = &tx_ring->cmd_buf_arr[producer];
pbuf->skb = NULL;
pbuf->frag_count = 0;
@@ -2350,7 +2346,7 @@ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
static int netxen_parse_md_template(struct netxen_adapter *adapter)
{
int num_of_entries, buff_level, e_cnt, esize;
- int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0;
+ int rv = 0, sane_start = 0, sane_end = 0;
char *dbuff;
void *template_buff = adapter->mdump.md_template;
char *dump_buff = adapter->mdump.md_capture_buff;
@@ -2386,8 +2382,6 @@ static int netxen_parse_md_template(struct netxen_adapter *adapter)
break;
case RDEND:
entry->hdr.driver_flags |= NX_DUMP_SKIP;
- if (!sane_end)
- end_cnt = e_cnt;
sane_end += 1;
break;
case CNTRL:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8259e8309320..69aa7fc392c5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2073,7 +2073,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct skb_frag_struct *frag;
u32 producer;
- int frag_count, no_of_desc;
+ int frag_count;
u32 num_txd = tx_ring->num_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -2093,8 +2093,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
frag_count = 1 + skb_shinfo(skb)->nr_frags;
}
- /* 4 fragments per cmd des */
- no_of_desc = (frag_count + 3) >> 2;
if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1dfaccd151f0..a60e1c8d470a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -336,6 +336,10 @@ struct qed_hw_info {
*/
u8 num_active_tc;
u8 offload_tc;
+ bool offload_tc_set;
+
+ bool multi_tc_roce_en;
+#define IS_QED_MULTI_TC_ROCE(p_hwfn) (((p_hwfn)->hw_info.multi_tc_roce_en))
u32 concrete_fid;
u16 opaque_fid;
@@ -399,8 +403,8 @@ struct qed_qm_info {
u16 start_pq;
u8 start_vport;
u16 pure_lb_pq;
- u16 offload_pq;
- u16 low_latency_pq;
+ u16 first_ofld_pq;
+ u16 first_llt_pq;
u16 pure_ack_pq;
u16 ooo_pq;
u16 first_vf_pq;
@@ -881,11 +885,14 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
#define PQ_FLAGS_OFLD (BIT(5))
#define PQ_FLAGS_VFS (BIT(6))
#define PQ_FLAGS_LLT (BIT(7))
+#define PQ_FLAGS_MTC (BIT(8))
/* physical queue index for cm context intialization */
u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
+u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
@@ -921,4 +928,6 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
enum qed_mfw_tlv_type type,
union qed_mfw_tlv_data *tlv_data);
+
+void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index b5b5ff725426..f1977aa440e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1531,7 +1531,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
}
/* CM PF */
-void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
{
/* XCM pure-LB queue */
STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index e0680ce91328..6bb76e6d3c14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -208,7 +208,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
/* QM reconf data */
if (p_info->personality == personality)
- p_info->offload_tc = tc;
+ qed_hw_info_set_offload_tc(p_info, tc);
}
/* Update app protocol data and hw_info fields with the TLV info */
@@ -221,7 +221,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
struct qed_hw_info *p_info = &p_hwfn->hw_info;
enum qed_pci_personality personality;
enum dcbx_protocol_type id;
- char *name;
int i;
for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
@@ -231,7 +230,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
continue;
personality = qed_dcbx_app_update[i].personality;
- name = qed_dcbx_app_update[i].name;
qed_dcbx_set_params(p_data, p_info, enable,
prio, tc, type, personality);
@@ -869,7 +867,7 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
return rc;
}
-void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
+static void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
{
struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
void *cookie = hwfn->cdev->ops_cookie;
@@ -991,6 +989,24 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
}
+u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri)
+{
+ struct qed_dcbx_get *dcbx_info = &p_hwfn->p_dcbx_info->get;
+
+ if (pri >= QED_MAX_PFC_PRIORITIES) {
+ DP_ERR(p_hwfn, "Invalid priority %d\n", pri);
+ return QED_DCBX_DEFAULT_TC;
+ }
+
+ if (!dcbx_info->operational.valid) {
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Dcbx parameters not available\n");
+ return QED_DCBX_DEFAULT_TC;
+ }
+
+ return dcbx_info->operational.params.ets_pri_tc_tbl[pri];
+}
+
#ifdef CONFIG_DCB
static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
struct qed_dcbx_get *p_get,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 5feb90e049e0..a4d688c04e18 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -123,4 +123,7 @@ void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
+#define QED_DCBX_DEFAULT_TC 0
+
+u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 4340c4c90bcb..1aa9fc1c5890 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7838,8 +7838,8 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
}
-int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
- enum qed_nvm_images image_id, u32 *length)
+static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id, u32 *length)
{
struct qed_nvm_image_att image_att;
int rc;
@@ -7854,8 +7854,9 @@ int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
- u32 *num_dumped_bytes, enum qed_nvm_images image_id)
+static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes,
+ enum qed_nvm_images image_id)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e5249b4741d0..016ca8a7ec8a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -215,6 +215,8 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
break;
case QED_PCI_ETH_ROCE:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ if (IS_QED_MULTI_TC_ROCE(p_hwfn))
+ flags |= PQ_FLAGS_MTC;
break;
case QED_PCI_ETH_IWARP:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
@@ -230,20 +232,30 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
}
/* Getters for resource amounts necessary for qm initialization */
-u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
{
return p_hwfn->hw_info.num_hw_tc;
}
-u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
{
return IS_QED_SRIOV(p_hwfn->cdev) ?
p_hwfn->cdev->p_iov_info->total_vfs : 0;
}
+static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
+
+ if (!(PQ_FLAGS_MTC & pq_flags))
+ return 1;
+
+ return qed_init_qm_get_num_tcs(p_hwfn);
+}
+
#define NUM_DEFAULT_RLS 1
-u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
{
u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
@@ -261,7 +273,7 @@ u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
return num_pf_rls;
}
-u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
{
u32 pq_flags = qed_get_pq_flags(p_hwfn);
@@ -273,7 +285,7 @@ u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
}
/* calc amount of PQs according to the requested flags */
-u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
{
u32 pq_flags = qed_get_pq_flags(p_hwfn);
@@ -282,8 +294,11 @@ u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
(!!(PQ_FLAGS_MCOS & pq_flags)) *
qed_init_qm_get_num_tcs(p_hwfn) +
(!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
- (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) +
- (!!(PQ_FLAGS_LLT & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) +
+ (!!(PQ_FLAGS_OFLD & pq_flags)) *
+ qed_init_qm_get_num_mtc_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LLT & pq_flags)) *
+ qed_init_qm_get_num_mtc_tcs(p_hwfn) +
(!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
}
@@ -394,7 +409,25 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
/* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP 1
#define PQ_INIT_DEFAULT_TC 0
-#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc)
+{
+ p_info->offload_tc = tc;
+ p_info->offload_tc_set = true;
+}
+
+static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.offload_tc_set;
+}
+
+static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn)
+{
+ if (qed_is_offload_tc_set(p_hwfn))
+ return p_hwfn->hw_info.offload_tc;
+
+ return PQ_INIT_DEFAULT_TC;
+}
static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info,
@@ -456,9 +489,9 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
case PQ_FLAGS_ACK:
return &qm_info->pure_ack_pq;
case PQ_FLAGS_OFLD:
- return &qm_info->offload_pq;
+ return &qm_info->first_ofld_pq;
case PQ_FLAGS_LLT:
- return &qm_info->low_latency_pq;
+ return &qm_info->first_llt_pq;
case PQ_FLAGS_VFS:
return &qm_info->first_vf_pq;
default:
@@ -507,14 +540,26 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
}
-u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
+u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
+{
+ u16 first_ofld_pq, pq_offset;
+
+ first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
+ tc : PQ_INIT_DEFAULT_TC;
+
+ return first_ofld_pq + pq_offset;
+}
+
+u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc)
{
- u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
+ u16 first_llt_pq, pq_offset;
- if (rl > max_rl)
- DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+ first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
+ pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
+ tc : PQ_INIT_DEFAULT_TC;
- return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+ return first_llt_pq + pq_offset;
}
/* Functions for creating specific types of pqs */
@@ -548,7 +593,22 @@ static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
return;
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
+ PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn)
+{
+ u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn);
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc;
+
+ /* override pq's TC if offload TC is set */
+ for (tc = 0; tc < num_tcs; tc++)
+ qed_init_qm_pq(p_hwfn, qm_info,
+ qed_is_offload_tc_set(p_hwfn) ?
+ p_hwfn->hw_info.offload_tc : tc,
+ PQ_INIT_SHARE_VPORT);
}
static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
@@ -559,7 +619,7 @@ static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
return;
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ qed_init_qm_mtc_pqs(p_hwfn);
}
static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
@@ -570,7 +630,7 @@ static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
return;
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+ qed_init_qm_mtc_pqs(p_hwfn);
}
static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
@@ -611,7 +671,8 @@ static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
- qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
+ qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
+ PQ_INIT_PF_RL);
}
static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
@@ -652,12 +713,19 @@ static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
- if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) {
- DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
- return -EINVAL;
+ if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
+ return 0;
+
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
+ p_hwfn->hw_info.multi_tc_roce_en = 0;
+ DP_NOTICE(p_hwfn,
+ "multi-tc roce was disabled to reduce requested amount of pqs\n");
+ if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
+ return 0;
}
- return 0;
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return -EINVAL;
}
static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
@@ -671,11 +739,13 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
/* top level params */
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n",
qm_info->start_pq,
qm_info->start_vport,
qm_info->pure_lb_pq,
- qm_info->offload_pq, qm_info->pure_ack_pq);
+ qm_info->first_ofld_pq,
+ qm_info->first_llt_pq,
+ qm_info->pure_ack_pq);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
"ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
@@ -1719,14 +1789,14 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall into */
+ /* Fall through */
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall into */
+ /* Fall through */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_params->p_tunn,
@@ -2908,6 +2978,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn))
+ p_hwfn->hw_info.multi_tc_roce_en = 1;
+
p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
p_hwfn->hw_info.num_active_tc = 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index bee10c1781fb..8faceb691657 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12444,6 +12444,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+
#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
@@ -12543,6 +12545,15 @@ struct public_drv_mb {
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
+
/* Resource Allocation params - Driver version support */
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
@@ -12596,6 +12607,9 @@ struct public_drv_mb {
#define FW_MSG_CODE_PHY_OK 0x00110000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
@@ -12687,6 +12701,8 @@ struct mcp_public_data {
struct public_func func[MCP_GLOB_FUNC_MAX];
};
+#define MAX_I2C_TRANSACTION_SIZE 16
+
/* OCBB definitions */
enum tlvs {
/* Category 1: Device Properties */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d845badf9b90..d6430dfebd83 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1225,19 +1225,6 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
0);
}
-void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- u32 rfs_cm_hdr_event_id;
-
- /* Set RFS event ID to be awakened i Tstorm By Prs */
- rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
-}
-
void qed_gft_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 pf_id,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index c0d4a54a5edb..1135387bd99d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -873,8 +873,8 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
}
-void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_conn *p_conn)
+static void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
{
qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 90a2b53096e2..17f3dfa2cc94 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -377,7 +377,7 @@ qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
}
}
-const char *iwarp_state_names[] = {
+const static char *iwarp_state_names[] = {
"IDLE",
"RTS",
"TERMINATE",
@@ -942,7 +942,7 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
-void
+static void
qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct mpa_v2_hdr *mpa_v2_params;
@@ -967,7 +967,7 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
mpa_data_size;
}
-void
+static void
qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct qed_iwarp_cm_event_params params;
@@ -2500,7 +2500,7 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
/* The only slowpath for iwarp ll2 is unalign flush. When this completion
* is received, need to reset the FPDU.
*/
-void
+static void
qed_iwarp_ll2_slowpath(void *cxt,
u8 connection_handle,
u32 opaque_data_0, u32 opaque_data_1)
@@ -2803,8 +2803,9 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
}
-void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
- struct qed_iwarp_ep *ep, u8 fw_return_code)
+static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ u8 fw_return_code)
{
struct qed_iwarp_cm_event_params params;
@@ -2824,8 +2825,9 @@ void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
ep->event_cb(ep->cb_context, &params);
}
-void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
- struct qed_iwarp_ep *ep, int fw_ret_code)
+static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ int fw_ret_code)
{
struct qed_iwarp_cm_event_params params;
bool event_cb = false;
@@ -2954,7 +2956,7 @@ qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
}
}
-void
+static void
qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, u8 fw_return_code)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 5ede6408649d..82a1bd1f8a8c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -2188,16 +2188,17 @@ out:
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
int i;
memset(info, 0, sizeof(*info));
- info->num_tc = 1;
-
if (IS_PF(cdev)) {
int max_vf_vlan_filters = 0;
int max_vf_mac_filters = 0;
+ info->num_tc = p_hwfn->hw_info.num_hw_tc;
+
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
u16 num_queues = 0;
@@ -2248,6 +2249,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
} else {
u16 total_cids = 0;
+ info->num_tc = 1;
+
/* Determine queues & XDP support */
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -2554,7 +2557,7 @@ static int qed_start_txq(struct qed_dev *cdev,
rc = qed_eth_tx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
- p_params, 0,
+ p_params, p_params->tc,
pbl_addr, pbl_size, ret_params);
if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 012973d75ad0..14ac9cab2653 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -158,7 +158,8 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
+static void qed_ll2b_complete_rx_packet(void *cxt,
+ struct qed_ll2_comp_rx_data *data)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_buffer *buffer = data->cookie;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 758a9a5127fa..2094d86a7a08 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -948,13 +948,14 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->eth_pf_params.num_arfs_filters = 0;
/* In case we might support RDMA, don't allow qede to be greedy
- * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
+ * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
+ * per hwfn.
*/
if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
u16 *num_cons;
num_cons = &params->eth_pf_params.num_cons;
- *num_cons = min_t(u16, *num_cons, 192);
+ *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
}
for (i = 0; i < cdev->num_hwfns; i++) {
@@ -2102,6 +2103,28 @@ out:
return status;
}
+static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
+ u8 dev_addr, u32 offset, u32 len)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
+ offset, len, buf);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
@@ -2144,6 +2167,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
.update_wol = &qed_update_wol,
+ .read_module_eeprom = &qed_read_module_eeprom,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index cdd645024a32..d89a0e22f6e4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -570,12 +570,13 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
+static int
+qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
{
struct qed_mcp_mb_params mb_params;
int rc;
@@ -1551,7 +1552,8 @@ qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
- p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+ qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
+ p_hwfn->ufp_info.tc);
qed_qm_reconf(p_hwfn, p_ptt);
} else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
@@ -2473,6 +2475,55 @@ out:
return rc;
}
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
+{
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
+ u32 resp, param;
+ int rc;
+
+ nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
+ nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
+
+ addr = offset;
+ offset = 0;
+ bytes_left = len;
+ while (bytes_left > 0) {
+ bytes_to_copy = min_t(u32, bytes_left,
+ MAX_I2C_TRANSACTION_SIZE);
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
+ return -ENODEV;
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return -EINVAL;
+
+ offset += buf_size;
+ bytes_left -= buf_size;
+ }
+
+ return 0;
+}
+
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
@@ -2959,7 +3010,7 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
return rc;
}
-int
+static int
__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_resc_lock_params *p_params)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 632a838f1fe3..047976d5c6e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -840,6 +840,22 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
/**
+ * @brief Read from sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to read into
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
+
+/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 101d677114f2..be941cfaa2d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -134,7 +134,7 @@ static bool qed_bmap_is_empty(struct qed_bmap *bmap)
return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
}
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
@@ -706,7 +706,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
-int qed_rdma_stop(void *rdma_cxt)
+static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index b5ce1581645f..7d7a64c55ff1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -44,8 +44,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/if_vlan.h>
#include "qed.h"
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
@@ -157,7 +159,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
-void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
{
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
@@ -231,16 +233,33 @@ static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
+static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ u8 pri, tc = 0;
+
+ if (qp->vlan_id) {
+ pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u tc: %u (vlan priority %s)\n",
+ qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
+
+ return tc;
+}
+
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_resp_ramrod_data *p_ramrod;
+ u16 regular_latency_queue, low_latency_queue;
struct qed_sp_init_data init_data;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 regular_latency_queue;
enum protocol_type proto;
int rc;
+ u8 tc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -324,12 +343,17 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
- regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
-
+ tc = qed_roce_get_qp_tc(p_hwfn, qp);
+ regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
+ low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u pqs: regular_latency %u low_latency %u\n",
+ qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
+ low_latency_queue - CM_TX_PQ_BASE);
p_ramrod->regular_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->low_latency_phy_queue =
- cpu_to_le16(regular_latency_queue);
+ cpu_to_le16(low_latency_queue);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
@@ -345,11 +369,6 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
qp->stats_queue;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "rc = %d regular physical queue = 0x%x\n", rc,
- regular_latency_queue);
-
if (rc)
goto err;
@@ -375,12 +394,13 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_req_ramrod_data *p_ramrod;
+ u16 regular_latency_queue, low_latency_queue;
struct qed_sp_init_data init_data;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 regular_latency_queue;
enum protocol_type proto;
int rc;
+ u8 tc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -453,12 +473,17 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
p_ramrod->cq_cid =
cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
- regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
-
+ tc = qed_roce_get_qp_tc(p_hwfn, qp);
+ regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
+ low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u pqs: regular_latency %u low_latency %u\n",
+ qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
+ low_latency_queue - CM_TX_PQ_BASE);
p_ramrod->regular_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->low_latency_phy_queue =
- cpu_to_le16(regular_latency_queue);
+ cpu_to_le16(low_latency_queue);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
@@ -471,9 +496,6 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
qp->stats_queue;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
-
if (rc)
goto err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 26e918d7f2f9..9b08a9d9e151 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -672,8 +672,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0;
}
-bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
- int vfid, bool b_fail_malicious)
+static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
+ int vfid, bool b_fail_malicious)
{
/* Check PF supports sriov */
if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
@@ -687,7 +687,7 @@ bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
return true;
}
-bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
{
return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
}
@@ -3979,7 +3979,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
}
}
-void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
+static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
{
int i;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index be6ddde1a104..3d4269659820 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -169,7 +169,7 @@ static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
p_qid_tlv->qid = p_cid->qid_usage_idx;
}
-int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
+static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index d7ed0d3dbf71..6a4d266fb8e2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -52,6 +52,9 @@
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_eth_if.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 33
#define QEDE_REVISION_VERSION 0
@@ -386,6 +389,15 @@ struct qede_tx_queue {
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
QEDE_MAX_TSS_CNT(edev))
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
+#define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \
+ ((idx) % QEDE_TSS_COUNT(edev)))
+#define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev))
+#define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \
+ (txq)->cos) + (txq)->index)
+#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \
+ (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
+ [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
+#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
/* Regular Tx requires skb + metadata for release purpose,
* while XDP requires the pages and the mapped address.
@@ -399,6 +411,8 @@ struct qede_tx_queue {
/* Slowpath; Should be kept in end [unless missing padding] */
void *handle;
+ u16 cos;
+ u16 ndev_txq_id;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -458,7 +472,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
void qede_free_arfs(struct qede_dev *edev);
int qede_alloc_arfs(struct qede_dev *edev);
int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
-int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
+int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie);
int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
u32 *rule_locs);
@@ -524,6 +538,8 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
+int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ struct tc_cls_flower_offload *f);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
@@ -541,5 +557,7 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define QEDE_RX_HDR_SIZE 256
#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
+#define for_each_cos_in_txq(edev, var) \
+ for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index f4a0f8ff8261..19652cd27ca7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -222,7 +222,7 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev,
QEDE_TXQ_XDP_TO_IDX(edev, txq),
qede_tqstats_arr[i].string);
else
- sprintf(*buf, "%d: %s", txq->index,
+ sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
qede_tqstats_arr[i].string);
*buf += ETH_GSTRING_LEN;
}
@@ -262,8 +262,13 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_get_strings_stats_txq(edev, fp->txq, &buf);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_strings_stats_txq(edev,
+ &fp->txq[cos], &buf);
+ }
}
/* Account for non-queue statistics */
@@ -338,8 +343,12 @@ static void qede_get_ethtool_stats(struct net_device *dev,
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_get_ethtool_stats_txq(fp->txq, &buf);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_ethtool_stats_txq(&fp->txq[cos], &buf);
+ }
}
for (i = 0; i < QEDE_NUM_STATS; i++) {
@@ -366,7 +375,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
num_stats--;
/* Account for the Regular Tx statistics */
- num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+ num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS *
+ edev->dev_info.num_tc;
/* Account for the Regular Rx statistics */
num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
@@ -741,9 +751,17 @@ static int qede_get_coalesce(struct net_device *dev,
}
for_each_queue(i) {
+ struct qede_tx_queue *txq;
+
fp = &edev->fp_array[i];
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
if (fp->type & QEDE_FASTPATH_TX) {
- tx_handle = fp->txq->handle;
+ txq = QEDE_FP_TC0_TXQ(fp);
+ tx_handle = txq->handle;
break;
}
}
@@ -801,9 +819,17 @@ static int qede_set_coalesce(struct net_device *dev,
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ struct qede_tx_queue *txq;
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
+ txq = QEDE_FP_TC0_TXQ(fp);
+
rc = edev->ops->common->set_coalesce(edev->cdev,
0, txc,
- fp->txq->handle);
+ txq->handle);
if (rc) {
DP_INFO(edev,
"Set TX coalesce error, rc = %d\n", rc);
@@ -1259,7 +1285,7 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
rc = qede_add_cls_rule(edev, info);
break;
case ETHTOOL_SRXCLSRLDEL:
- rc = qede_del_cls_rule(edev, info);
+ rc = qede_delete_flow_filter(edev, info->fs.location);
break;
default:
DP_INFO(edev, "Command parameters not supported\n");
@@ -1385,8 +1411,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
u16 val;
for_each_queue(i) {
- if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- txq = edev->fp_array[i].txq;
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ txq = QEDE_FP_TC0_TXQ(fp);
break;
}
}
@@ -1780,6 +1808,92 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
+static int qede_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u8 buf[4];
+ int rc;
+
+ /* Read first 4 bytes to find the sfp type */
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0, 0, 4);
+ if (rc) {
+ DP_ERR(edev, "Failed reading EEPROM data %d\n", rc);
+ return rc;
+ }
+
+ switch (buf[0]) {
+ case 0x3: /* SFP, SFP+, SFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case 0xc: /* QSFP */
+ case 0xd: /* QSFP+ */
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case 0x11: /* QSFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qede_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 start_addr = ee->offset, size = 0;
+ u8 *buf = data;
+ int rc = 0;
+
+ /* Read A0 section */
+ if (ee->offset < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN)
+ size = ETH_MODULE_SFF_8079_LEN - ee->offset;
+ else
+ size = ee->len;
+
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0,
+ start_addr, size);
+ if (rc) {
+ DP_ERR(edev, "Failed reading A0 section %d\n", rc);
+ return rc;
+ }
+
+ buf += size;
+ start_addr += size;
+ }
+
+ /* Read A2 section */
+ if (start_addr >= ETH_MODULE_SFF_8079_LEN &&
+ start_addr < ETH_MODULE_SFF_8472_LEN) {
+ size = ee->len - size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + size > ETH_MODULE_SFF_8472_LEN)
+ size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A2,
+ start_addr, size);
+ if (rc) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Failed reading A2 section %d\n", rc);
+ return 0;
+ }
+ }
+
+ return rc;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
@@ -1813,6 +1927,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.self_test = qede_self_test,
+ .get_module_info = qede_get_module_info,
+ .get_module_eeprom = qede_get_module_eeprom,
.get_eee = qede_get_eee,
.set_eee = qede_set_eee,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index b823bfe2ea4d..9673d19308e6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -83,7 +83,7 @@ struct qede_arfs_fltr_node {
struct qede_arfs_tuple tuple;
u32 flow_id;
- u16 sw_id;
+ u64 sw_id;
u16 rxq_id;
u16 next_rxq_id;
u8 vfid;
@@ -138,7 +138,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
n->tuple.stringify(&n->tuple, tuple_buffer);
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
- "%s sw_id[0x%x]: %s [vf %u queue %d]\n",
+ "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
add_fltr ? "Adding" : "Deleting",
n->sw_id, tuple_buffer, n->vfid, rxq_id);
}
@@ -152,7 +152,10 @@ static void
qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
{
kfree(fltr->data);
- clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+
+ if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
+ clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+
kfree(fltr);
}
@@ -214,7 +217,7 @@ void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
if (fw_rc) {
DP_NOTICE(edev,
- "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+ "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
fw_rc, fltr->flow_id, fltr->sw_id,
ntohs(fltr->tuple.src_port),
ntohs(fltr->tuple.dst_port), fltr->rxq_id);
@@ -1116,7 +1119,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return qede_xdp_set(edev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!edev->xdp_prog;
xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
return 0;
default:
@@ -1349,7 +1351,7 @@ out:
}
static struct qede_arfs_fltr_node *
-qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
+qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
{
struct qede_arfs_fltr_node *fltr;
@@ -1600,6 +1602,69 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev,
return 0;
}
+static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
+ struct qede_arfs_tuple *t)
+{
+ /* We must have Only 4-tuples/l4 port/src ip/dst ip
+ * as an input.
+ */
+ if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !t->src_ipv4 && !t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !t->dst_ipv4 && t->src_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+ } else if (!t->src_port && !t->dst_port &&
+ t->dst_ipv4 && !t->src_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
+ } else {
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->ip_comp = qede_flow_spec_ipv4_cmp;
+ t->build_hdr = qede_flow_build_ipv4_hdr;
+ t->stringify = qede_flow_stringify_ipv4_hdr;
+
+ return 0;
+}
+
+static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct in6_addr *zaddr)
+{
+ /* We must have Only 4-tuples/l4 port/src ip/dst ip
+ * as an input.
+ */
+ if (t->src_port && t->dst_port &&
+ memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+ } else if (!t->src_port && !t->dst_port &&
+ memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
+ } else {
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->ip_comp = qede_flow_spec_ipv6_cmp;
+ t->build_hdr = qede_flow_build_ipv6_hdr;
+
+ return 0;
+}
+
static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
struct qede_arfs_tuple *t,
struct ethtool_rx_flow_spec *fs)
@@ -1639,27 +1704,7 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
t->src_port = fs->h_u.tcp_ip4_spec.psrc;
t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
- /* We must either have a valid 4-tuple or only dst port
- * or only src ip as an input
- */
- if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
- t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
- } else if (!t->src_port && t->dst_port &&
- !t->src_ipv4 && !t->dst_ipv4) {
- t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
- } else if (!t->src_port && !t->dst_port &&
- !t->dst_ipv4 && t->src_ipv4) {
- t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
- } else {
- DP_INFO(edev, "Invalid N-tuple\n");
- return -EOPNOTSUPP;
- }
-
- t->ip_comp = qede_flow_spec_ipv4_cmp;
- t->build_hdr = qede_flow_build_ipv4_hdr;
- t->stringify = qede_flow_stringify_ipv4_hdr;
-
- return 0;
+ return qede_set_v4_tuple_to_profile(edev, t);
}
static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
@@ -1691,10 +1736,8 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
struct ethtool_rx_flow_spec *fs)
{
struct in6_addr zero_addr;
- void *p;
- p = &zero_addr;
- memset(p, 0, sizeof(zero_addr));
+ memset(&zero_addr, 0, sizeof(zero_addr));
if ((fs->h_u.tcp_ip6_spec.psrc &
fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
@@ -1721,30 +1764,7 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
t->src_port = fs->h_u.tcp_ip6_spec.psrc;
t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
- /* We must make sure we have a valid 4-tuple or only dest port
- * or only src ip as an input
- */
- if (t->src_port && t->dst_port &&
- memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
- memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
- t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
- } else if (!t->src_port && t->dst_port &&
- !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
- !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
- t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
- } else if (!t->src_port && !t->dst_port &&
- !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) &&
- memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) {
- t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
- } else {
- DP_INFO(edev, "Invalid N-tuple\n");
- return -EOPNOTSUPP;
- }
-
- t->ip_comp = qede_flow_spec_ipv6_cmp;
- t->build_hdr = qede_flow_build_ipv6_hdr;
-
- return 0;
+ return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
}
static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
@@ -1942,9 +1962,8 @@ unlock:
return rc;
}
-int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
{
- struct ethtool_rx_flow_spec *fsp = &info->fs;
struct qede_arfs_fltr_node *fltr = NULL;
int rc = -EPERM;
@@ -1953,7 +1972,7 @@ int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
goto unlock;
fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
- fsp->location);
+ cookie);
if (!fltr)
goto unlock;
@@ -1983,3 +2002,293 @@ unlock:
__qede_unlock(edev);
return count;
}
+
+static int qede_parse_actions(struct qede_dev *edev,
+ struct tcf_exts *exts)
+{
+ int rc = -EINVAL, num_act = 0;
+ const struct tc_action *a;
+ bool is_drop = false;
+ LIST_HEAD(actions);
+
+ if (!tcf_exts_has_actions(exts)) {
+ DP_NOTICE(edev, "No tc actions received\n");
+ return rc;
+ }
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ num_act++;
+
+ if (is_tcf_gact_shot(a))
+ is_drop = true;
+ }
+
+ if (num_act == 1 && is_drop)
+ return 0;
+
+ return rc;
+}
+
+static int
+qede_tc_parse_ports(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *t)
+{
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->mask);
+
+ if ((key->src && mask->src != U16_MAX) ||
+ (key->dst && mask->dst != U16_MAX)) {
+ DP_NOTICE(edev, "Do not support ports masks\n");
+ return -EINVAL;
+ }
+
+ t->src_port = key->src;
+ t->dst_port = key->dst;
+ }
+
+ return 0;
+}
+
+static int
+qede_tc_parse_v6_common(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *t)
+{
+ struct in6_addr zero_addr, addr;
+
+ memset(&zero_addr, 0, sizeof(addr));
+ memset(&addr, 0xff, sizeof(addr));
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_dissector_key_ipv6_addrs *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->mask);
+
+ if ((memcmp(&key->src, &zero_addr, sizeof(addr)) &&
+ memcmp(&mask->src, &addr, sizeof(addr))) ||
+ (memcmp(&key->dst, &zero_addr, sizeof(addr)) &&
+ memcmp(&mask->dst, &addr, sizeof(addr)))) {
+ DP_NOTICE(edev,
+ "Do not support IPv6 address prefix/mask\n");
+ return -EINVAL;
+ }
+
+ memcpy(&t->src_ipv6, &key->src, sizeof(addr));
+ memcpy(&t->dst_ipv6, &key->dst, sizeof(addr));
+ }
+
+ if (qede_tc_parse_ports(edev, f, t))
+ return -EINVAL;
+
+ return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
+}
+
+static int
+qede_tc_parse_v4_common(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *t)
+{
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_dissector_key_ipv4_addrs *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->mask);
+
+ if ((key->src && mask->src != U32_MAX) ||
+ (key->dst && mask->dst != U32_MAX)) {
+ DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
+ return -EINVAL;
+ }
+
+ t->src_ipv4 = key->src;
+ t->dst_ipv4 = key->dst;
+ }
+
+ if (qede_tc_parse_ports(edev, f, t))
+ return -EINVAL;
+
+ return qede_set_v4_tuple_to_profile(edev, t);
+}
+
+static int
+qede_tc_parse_tcp_v6(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_TCP;
+ tuple->eth_proto = htons(ETH_P_IPV6);
+
+ return qede_tc_parse_v6_common(edev, f, tuple);
+}
+
+static int
+qede_tc_parse_tcp_v4(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_TCP;
+ tuple->eth_proto = htons(ETH_P_IP);
+
+ return qede_tc_parse_v4_common(edev, f, tuple);
+}
+
+static int
+qede_tc_parse_udp_v6(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_UDP;
+ tuple->eth_proto = htons(ETH_P_IPV6);
+
+ return qede_tc_parse_v6_common(edev, f, tuple);
+}
+
+static int
+qede_tc_parse_udp_v4(struct qede_dev *edev,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_UDP;
+ tuple->eth_proto = htons(ETH_P_IP);
+
+ return qede_tc_parse_v4_common(edev, f, tuple);
+}
+
+static int
+qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
+ struct tc_cls_flower_offload *f,
+ struct qede_arfs_tuple *tuple)
+{
+ int rc = -EINVAL;
+ u8 ip_proto = 0;
+
+ memset(tuple, 0, sizeof(*tuple));
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ DP_NOTICE(edev, "Unsupported key set:0x%x\n",
+ f->dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (proto != htons(ETH_P_IP) &&
+ proto != htons(ETH_P_IPV6)) {
+ DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
+ return -EPROTONOSUPPORT;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ ip_proto = key->ip_proto;
+ }
+
+ if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
+ rc = qede_tc_parse_tcp_v4(edev, f, tuple);
+ else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
+ rc = qede_tc_parse_tcp_v6(edev, f, tuple);
+ else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
+ rc = qede_tc_parse_udp_v4(edev, f, tuple);
+ else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
+ rc = qede_tc_parse_udp_v6(edev, f, tuple);
+ else
+ DP_NOTICE(edev, "Invalid tc protocol request\n");
+
+ return rc;
+}
+
+int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ struct tc_cls_flower_offload *f)
+{
+ struct qede_arfs_fltr_node *n;
+ int min_hlen, rc = -EINVAL;
+ struct qede_arfs_tuple t;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ /* parse flower attribute and prepare filter */
+ if (qede_parse_flower_attr(edev, proto, f, &t))
+ goto unlock;
+
+ /* Validate profile mode and number of filters */
+ if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
+ edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
+ DP_NOTICE(edev,
+ "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
+ t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ goto unlock;
+ }
+
+ /* parse tc actions and get the vf_id */
+ if (qede_parse_actions(edev, f->exts))
+ goto unlock;
+
+ if (qede_flow_find_fltr(edev, &t)) {
+ rc = -EEXIST;
+ goto unlock;
+ }
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ min_hlen = qede_flow_get_min_header_size(&t);
+
+ n->data = kzalloc(min_hlen, GFP_KERNEL);
+ if (!n->data) {
+ kfree(n);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ memcpy(&n->tuple, &t, sizeof(n->tuple));
+
+ n->buf_len = min_hlen;
+ n->b_is_drop = true;
+ n->sw_id = f->cookie;
+
+ n->tuple.build_hdr(&n->tuple, n->data);
+
+ rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
+ if (rc)
+ goto unlock;
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+ rc = qede_poll_arfs_filter_config(edev, n);
+
+unlock:
+ __qede_unlock(edev);
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 6c702399b801..1a78027de071 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -408,12 +408,12 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
struct netdev_queue *netdev_txq;
u16 hw_bd_cons;
- unsigned int pkts_compl = 0, bytes_compl = 0;
int rc;
- netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier();
@@ -1119,8 +1119,10 @@ static bool qede_rx_xdp(struct qede_dev *edev,
default:
bpf_warn_invalid_xdp_action(act);
+ /* Fall through */
case XDP_ABORTED:
trace_xdp_exception(edev->ndev, prog, act);
+ /* Fall through */
case XDP_DROP:
qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
}
@@ -1363,9 +1365,14 @@ static bool qede_poll_is_more_work(struct qede_fastpath *fp)
if (qede_txq_has_work(fp->xdp_tx))
return true;
- if (likely(fp->type & QEDE_FASTPATH_TX))
- if (qede_txq_has_work(fp->txq))
- return true;
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ return true;
+ }
+ }
return false;
}
@@ -1380,8 +1387,14 @@ int qede_poll(struct napi_struct *napi, int budget)
struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
- if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
- qede_tx_int(edev, fp->txq);
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ qede_tx_int(edev, &fp->txq[cos]);
+ }
+ }
if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
qede_xdp_tx_int(edev, fp->xdp_tx);
@@ -1442,8 +1455,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
- WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
- txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
+ WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
+ txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 6a796040a32c..46d0f2eaa0c0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -536,6 +536,97 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
+static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ int cos, count, offset;
+
+ if (num_tc > edev->dev_info.num_tc)
+ return -EINVAL;
+
+ netdev_reset_tc(ndev);
+ netdev_set_num_tc(ndev, num_tc);
+
+ for_each_cos_in_txq(edev, cos) {
+ count = QEDE_TSS_COUNT(edev);
+ offset = cos * QEDE_TSS_COUNT(edev);
+ netdev_set_tc_queue(ndev, cos, count, offset);
+ }
+
+ return 0;
+}
+
+static int
+qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
+ __be16 proto)
+{
+ switch (f->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return qede_add_tc_flower_fltr(edev, proto, f);
+ case TC_CLSFLOWER_DESTROY:
+ return qede_delete_flow_filter(edev, f->cookie);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct tc_cls_flower_offload *f;
+ struct qede_dev *edev = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ f = type_data;
+ return qede_set_flower(edev, f, f->common.protocol);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_setup_tc_block(struct qede_dev *edev,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ qede_setup_tc_block_cb,
+ edev, edev, f->extack);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct tc_mqprio_qopt *mqprio;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return qede_setup_tc_block(edev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ mqprio = type_data;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ return qede_setup_tc(dev, mqprio->num_tc);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -568,6 +659,7 @@ static const struct net_device_ops qede_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
+ .ndo_setup_tc = qede_setup_tc_offload,
};
static const struct net_device_ops qede_netdev_vf_ops = {
@@ -621,7 +713,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
struct qede_dev *edev;
ndev = alloc_etherdev_mqs(sizeof(*edev),
- info->num_queues, info->num_queues);
+ info->num_queues * info->num_tc,
+ info->num_queues);
if (!ndev) {
pr_err("etherdev allocation failed\n");
return NULL;
@@ -688,7 +781,7 @@ static void qede_init_ndev(struct qede_dev *edev)
/* user-changeble features */
hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
hw_features |= NETIF_F_NTUPLE;
@@ -830,7 +923,8 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
+ fp->txq = kcalloc(edev->dev_info.num_tc,
+ sizeof(*fp->txq), GFP_KERNEL);
if (!fp->txq)
goto err;
}
@@ -879,10 +973,15 @@ static void qede_sp_task(struct work_struct *work)
static void qede_update_pf_params(struct qed_dev *cdev)
{
struct qed_pf_params pf_params;
+ u16 num_cons;
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
- pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+
+ /* 1 rx + 1 xdp + max tx cos */
+ num_cons = QED_MIN_L2_CONS;
+
+ pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
/* Same for VFs - make sure they'll have sufficient connections
* to support XDP Tx queues.
@@ -1363,8 +1462,12 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
if (fp->type & QEDE_FASTPATH_XDP)
qede_free_mem_txq(edev, fp->xdp_tx);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_free_mem_txq(edev, fp->txq);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_free_mem_txq(edev, &fp->txq[cos]);
+ }
}
/* This function allocates all memory needed for a single fp (i.e. an entity
@@ -1391,9 +1494,13 @@ static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
}
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_alloc_mem_txq(edev, fp->txq);
- if (rc)
- goto out;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
+ if (rc)
+ goto out;
+ }
}
out:
@@ -1466,10 +1573,23 @@ static void qede_init_fp(struct qede_dev *edev)
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txq->index = txq_index++;
- if (edev->dev_info.is_legacy)
- fp->txq->is_legacy = 1;
- fp->txq->dev = &edev->pdev->dev;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ struct qede_tx_queue *txq = &fp->txq[cos];
+ u16 ndev_tx_id;
+
+ txq->cos = cos;
+ txq->index = txq_index;
+ ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
+ txq->ndev_txq_id = ndev_tx_id;
+
+ if (edev->dev_info.is_legacy)
+ txq->is_legacy = 1;
+ txq->dev = &edev->pdev->dev;
+ }
+
+ txq_index++;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1483,7 +1603,9 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
{
int rc = 0;
- rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
+ rc = netif_set_real_num_tx_queues(edev->ndev,
+ QEDE_TSS_COUNT(edev) *
+ edev->dev_info.num_tc);
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
return rc;
@@ -1685,9 +1807,13 @@ static int qede_stop_queues(struct qede_dev *edev)
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_drain_txq(edev, fp->txq, true);
- if (rc)
- return rc;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_drain_txq(edev, &fp->txq[cos], true);
+ if (rc)
+ return rc;
+ }
}
if (fp->type & QEDE_FASTPATH_XDP) {
@@ -1703,9 +1829,13 @@ static int qede_stop_queues(struct qede_dev *edev)
/* Stop the Tx Queue(s) */
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_stop_txq(edev, fp->txq, i);
- if (rc)
- return rc;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_stop_txq(edev, &fp->txq[cos], i);
+ if (rc)
+ return rc;
+ }
}
/* Stop the Rx Queue */
@@ -1758,6 +1888,7 @@ static int qede_start_txq(struct qede_dev *edev,
params.p_sb = fp->sb_info;
params.sb_idx = sb_idx;
+ params.tc = txq->cos;
rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
page_cnt, &ret_params);
@@ -1877,9 +2008,14 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
}
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
- if (rc)
- goto out;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
+ TX_PI(cos));
+ if (rc)
+ goto out;
+ }
}
}
@@ -1973,6 +2109,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked)
{
struct qed_link_params link_params;
+ u8 num_tc;
int rc;
DP_INFO(edev, "Starting qede load\n");
@@ -2019,6 +2156,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+ num_tc = netdev_get_num_tc(edev->ndev);
+ num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
+ qede_setup_tc(edev->ndev, num_tc);
+
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
@@ -2143,7 +2284,7 @@ static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
- netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
if (netif_xmit_stopped(netdev_txq))
return true;
@@ -2208,9 +2349,11 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
for_each_queue(i) {
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
+ struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
+
+ if (txq->sw_tx_cons != txq->sw_tx_prod)
etlv->txqs_empty = false;
- if (qede_is_txq_full(edev, fp->txq))
+ if (qede_is_txq_full(edev, txq))
etlv->num_txqs_full++;
}
if (fp->type & QEDE_FASTPATH_RX) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 7f7deeaf1cf0..3b0adda7cc9c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -351,9 +351,9 @@ skip:
case QLCNIC_BRDTYPE_P3P_REF_QG:
case QLCNIC_BRDTYPE_P3P_4_GB:
case QLCNIC_BRDTYPE_P3P_4_GB_MM:
-
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
+ /* fall through */
case QLCNIC_BRDTYPE_P3P_10G_CX4:
case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
@@ -377,6 +377,7 @@ skip:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(adapter->netdev) &&
ahw->has_link_events;
+ /* fall through */
case QLCNIC_BRDTYPE_P3P_10G_XFP:
supported |= SUPPORTED_FIBRE;
advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 7848cf04b29a..822aa393c370 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -276,13 +276,6 @@ static const unsigned crb_hub_agt[64] = {
0,
};
-static const u32 msi_tgt_status[8] = {
- ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
- ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
- ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
- ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
-};
-
/* PCI Windowing for DDR regions. */
#define QLCNIC_PCIE_SEM_TIMEOUT 10000
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 0c744b9c6e0a..77e386ebff09 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -212,7 +212,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vp->max_tx_bw = MAX_BW;
vp->min_tx_bw = MIN_BW;
vp->spoofchk = false;
- random_ether_addr(vp->mac);
+ eth_random_addr(vp->mac);
dev_info(&adapter->pdev->dev,
"MAC Address %pM is configured for VF %d\n",
vp->mac, i);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index 4be65d6761b3..957c72985a06 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1176,6 +1176,7 @@ void ql_mpi_idc_work(struct work_struct *work)
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
ql_link_off(qdev);
+ /* Fall through */
case MB_CMD_SET_PORT_CFG:
/* Signal the resulting link up AEN
* that the frame routing and mac addr
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index b9a7548ec6a0..0afc3d335d56 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -210,7 +210,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
- random_ether_addr(rmnet_dev->dev_addr);
+ eth_random_addr(rmnet_dev->dev_addr);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 7c69f4c8134d..e1cd934c2e4f 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -99,7 +99,7 @@ config R8169
depends on PCI
select FW_LOADER
select CRC32
- select MII
+ select PHYLIB
---help---
Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index eaedc11ed686..0d9c3831838f 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -15,27 +15,22 @@
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/in.h>
+#include <linux/io.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
-#include <linux/pci-aspm.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define RTL8169_VERSION "2.3LK-NAPI"
#define MODULENAME "r8169"
-#define PFX MODULENAME ": "
#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
@@ -57,19 +52,6 @@
#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
-#ifdef RTL8169_DEBUG
-#define assert(expr) \
- if (!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__func__,__LINE__); \
- }
-#define dprintk(fmt, args...) \
- do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
-#else
-#define assert(expr) do {} while (0)
-#define dprintk(fmt, args...) do {} while (0)
-#endif /* RTL8169_DEBUG */
-
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
@@ -95,7 +77,6 @@ static const int multicast_filter_limit = 32;
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL8169_TX_TIMEOUT (6*HZ)
-#define RTL8169_PHY_TIMEOUT (10*HZ)
/* write/read MMIO register */
#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
@@ -160,136 +141,70 @@ enum mac_version {
RTL_GIGA_MAC_NONE = 0xff,
};
-enum rtl_tx_desc_version {
- RTL_TD_0 = 0,
- RTL_TD_1 = 1,
-};
-
#define JUMBO_1K ETH_DATA_LEN
#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
-#define _R(NAME,TD,FW,SZ) { \
- .name = NAME, \
- .txd_version = TD, \
- .fw_name = FW, \
- .jumbo_max = SZ, \
-}
-
static const struct {
const char *name;
- enum rtl_tx_desc_version txd_version;
const char *fw_name;
- u16 jumbo_max;
} rtl_chip_infos[] = {
/* PCI devices. */
- [RTL_GIGA_MAC_VER_01] =
- _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_02] =
- _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_03] =
- _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_04] =
- _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_05] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K),
- [RTL_GIGA_MAC_VER_06] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K),
+ [RTL_GIGA_MAC_VER_01] = {"RTL8169" },
+ [RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
+ [RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
+ [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
+ [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" },
+ [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" },
/* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_08] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_09] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_10] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_11] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
- [RTL_GIGA_MAC_VER_12] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
- [RTL_GIGA_MAC_VER_13] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_14] =
- _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_15] =
- _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_16] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
- [RTL_GIGA_MAC_VER_17] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
- [RTL_GIGA_MAC_VER_18] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_19] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_20] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_21] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_22] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_23] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_24] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
- [RTL_GIGA_MAC_VER_25] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_26] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_27] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_28] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_29] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_30] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_31] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_32] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_33] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_34] =
- _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, JUMBO_9K),
- [RTL_GIGA_MAC_VER_35] =
- _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_36] =
- _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_37] =
- _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_38] =
- _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_39] =
- _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_40] =
- _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_41] =
- _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_42] =
- _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, JUMBO_9K),
- [RTL_GIGA_MAC_VER_43] =
- _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, JUMBO_1K),
- [RTL_GIGA_MAC_VER_44] =
- _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_45] =
- _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, JUMBO_9K),
- [RTL_GIGA_MAC_VER_46] =
- _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, JUMBO_9K),
- [RTL_GIGA_MAC_VER_47] =
- _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, JUMBO_1K),
- [RTL_GIGA_MAC_VER_48] =
- _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, JUMBO_1K),
- [RTL_GIGA_MAC_VER_49] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_50] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
- [RTL_GIGA_MAC_VER_51] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
+ [RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_09] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_10] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
+ [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" },
+ [RTL_GIGA_MAC_VER_13] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_14] = {"RTL8100e" },
+ [RTL_GIGA_MAC_VER_15] = {"RTL8100e" },
+ [RTL_GIGA_MAC_VER_16] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
+ [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
+ [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" },
+ [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" },
+ [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
+ [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
+ [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
+ [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" },
+ [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
+ [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
+ [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
+ [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" },
+ [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1},
+ [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2},
+ [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3},
+ [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1},
+ [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2},
+ [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 },
+ [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
+ [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
+ [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
+ [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" },
+ [RTL_GIGA_MAC_VER_42] = {"RTL8168g/8111g", FIRMWARE_8168G_3},
+ [RTL_GIGA_MAC_VER_43] = {"RTL8106e", FIRMWARE_8106E_2},
+ [RTL_GIGA_MAC_VER_44] = {"RTL8411", FIRMWARE_8411_2 },
+ [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1},
+ [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
+ [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1},
+ [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
+ [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
};
-#undef _R
enum cfg_version {
RTL_CFG_0 = 0x00,
@@ -399,12 +314,6 @@ enum rtl_registers {
FuncForceEvent = 0xfc,
};
-enum rtl8110_registers {
- TBICSR = 0x64,
- TBI_ANAR = 0x68,
- TBI_LPAR = 0x6a,
-};
-
enum rtl8168_8101_registers {
CSIDR = 0x64,
CSIAR = 0x68,
@@ -571,14 +480,6 @@ enum rtl_register_content {
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
ASPM_en = (1 << 0), /* ASPM enable */
- /* TBICSR p.28 */
- TBIReset = 0x80000000,
- TBILoopback = 0x40000000,
- TBINwEnable = 0x20000000,
- TBINwRestart = 0x10000000,
- TBILinkOk = 0x02000000,
- TBINwComplete = 0x01000000,
-
/* CPlusCmd p.31 */
EnableBist = (1 << 15), // 8168 8101
Mac_dbgo_oe = (1 << 14), // 8168 8101
@@ -732,7 +633,6 @@ enum rtl_flag {
RTL_FLAG_TASK_ENABLED,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_PHY_PENDING,
RTL_FLAG_MAX
};
@@ -760,7 +660,6 @@ struct rtl8169_private {
dma_addr_t RxPhyAddr;
void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
- struct timer_list timer;
u16 cp_cmd;
u16 event_slow;
@@ -776,14 +675,7 @@ struct rtl8169_private {
void (*disable)(struct rtl8169_private *);
} jumbo_ops;
- int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
- int (*get_link_ksettings)(struct net_device *,
- struct ethtool_link_ksettings *);
- void (*phy_reset_enable)(struct rtl8169_private *tp);
void (*hw_start)(struct rtl8169_private *tp);
- unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
- unsigned int (*link_ok)(struct rtl8169_private *tp);
- int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
struct {
@@ -792,7 +684,8 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- struct mii_if_info mii;
+ unsigned supports_gmii:1;
+ struct mii_bus *mii_bus;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@@ -822,7 +715,6 @@ MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_LICENSE("GPL");
-MODULE_VERSION(RTL8169_VERSION);
MODULE_FIRMWARE(FIRMWARE_8168D_1);
MODULE_FIRMWARE(FIRMWARE_8168D_2);
MODULE_FIRMWARE(FIRMWARE_8168E_1);
@@ -1143,21 +1035,6 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
rtl_writephy(tp, reg_addr, (val & ~m) | p);
}
-static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
- int val)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl_writephy(tp, location, val);
-}
-
-static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return rtl_readphy(tp, location);
-}
-
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1478,54 +1355,22 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
RTL_R8(tp, ChipCmd);
}
-static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
-{
- return RTL_R32(tp, TBICSR) & TBIReset;
-}
-
-static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
-{
- return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
-}
-
-static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp)
-{
- return RTL_R32(tp, TBICSR) & TBILinkOk;
-}
-
-static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp)
-{
- return RTL_R8(tp, PHYstatus) & LinkStatus;
-}
-
-static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
-{
- RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset);
-}
-
-static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
-{
- unsigned int val;
-
- val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
- rtl_writephy(tp, MII_BMCR, val & 0xffff);
-}
-
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
- if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+ if (phydev->speed == SPEED_1000) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
ERIAR_EXGMAC);
- } else if (RTL_R8(tp, PHYstatus) & _100bps) {
+ } else if (phydev->speed == SPEED_100) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1543,7 +1388,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
- if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+ if (phydev->speed == SPEED_1000) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1555,7 +1400,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
- if (RTL_R8(tp, PHYstatus) & _10bps) {
+ if (phydev->speed == SPEED_10) {
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
@@ -1567,25 +1412,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
}
}
-static void rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp)
-{
- struct device *d = tp_to_dev(tp);
-
- if (tp->link_ok(tp)) {
- rtl_link_chg_patch(tp);
- /* This is to cancel a scheduled suspend if there's one. */
- pm_request_resume(d);
- netif_carrier_on(dev);
- if (net_ratelimit())
- netif_info(tp, ifup, dev, "link up\n");
- } else {
- netif_carrier_off(dev);
- netif_info(tp, ifdown, dev, "link down\n");
- pm_runtime_idle(d);
- }
-}
-
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -1626,21 +1452,11 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
- struct device *d = tp_to_dev(tp);
-
- pm_runtime_get_noresume(d);
rtl_lock_work(tp);
-
wol->supported = WAKE_ANY;
- if (pm_runtime_active(d))
- wol->wolopts = __rtl8169_get_wol(tp);
- else
- wol->wolopts = tp->saved_wolopts;
-
+ wol->wolopts = tp->saved_wolopts;
rtl_unlock_work(tp);
-
- pm_runtime_put_noidle(d);
}
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1716,18 +1532,21 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
+ if (wol->wolopts & ~WAKE_ANY)
+ return -EINVAL;
+
pm_runtime_get_noresume(d);
rtl_lock_work(tp);
+ tp->saved_wolopts = wol->wolopts;
+
if (pm_runtime_active(d))
- __rtl8169_set_wol(tp, wol->wolopts);
- else
- tp->saved_wolopts = wol->wolopts;
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
rtl_unlock_work(tp);
- device_set_wakeup_enable(d, wol->wolopts);
+ device_set_wakeup_enable(d, tp->saved_wolopts);
pm_runtime_put_noidle(d);
@@ -1746,7 +1565,6 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl_fw *rtl_fw = tp->rtl_fw;
strlcpy(info->driver, MODULENAME, sizeof(info->driver));
- strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
if (!IS_ERR_OR_NULL(rtl_fw))
@@ -1759,124 +1577,6 @@ static int rtl8169_get_regs_len(struct net_device *dev)
return R8169_REGS_SIZE;
}
-static int rtl8169_set_speed_tbi(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 ignored)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret = 0;
- u32 reg;
-
- reg = RTL_R32(tp, TBICSR);
- if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
- (duplex == DUPLEX_FULL)) {
- RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart));
- } else if (autoneg == AUTONEG_ENABLE)
- RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart);
- else {
- netif_warn(tp, link, dev,
- "incorrect speed setting refused in TBI mode\n");
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int rtl8169_set_speed_xmii(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 adv)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int giga_ctrl, bmcr;
- int rc = -EINVAL;
-
- rtl_writephy(tp, 0x1f, 0x0000);
-
- if (autoneg == AUTONEG_ENABLE) {
- int auto_nego;
-
- auto_nego = rtl_readphy(tp, MII_ADVERTISE);
- auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
-
- if (adv & ADVERTISED_10baseT_Half)
- auto_nego |= ADVERTISE_10HALF;
- if (adv & ADVERTISED_10baseT_Full)
- auto_nego |= ADVERTISE_10FULL;
- if (adv & ADVERTISED_100baseT_Half)
- auto_nego |= ADVERTISE_100HALF;
- if (adv & ADVERTISED_100baseT_Full)
- auto_nego |= ADVERTISE_100FULL;
-
- auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-
- giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
- giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
-
- /* The 8100e/8101e/8102e do Fast Ethernet only. */
- if (tp->mii.supports_gmii) {
- if (adv & ADVERTISED_1000baseT_Half)
- giga_ctrl |= ADVERTISE_1000HALF;
- if (adv & ADVERTISED_1000baseT_Full)
- giga_ctrl |= ADVERTISE_1000FULL;
- } else if (adv & (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full)) {
- netif_info(tp, link, dev,
- "PHY does not support 1000Mbps\n");
- goto out;
- }
-
- bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
-
- rtl_writephy(tp, MII_ADVERTISE, auto_nego);
- rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
- } else {
- if (speed == SPEED_10)
- bmcr = 0;
- else if (speed == SPEED_100)
- bmcr = BMCR_SPEED100;
- else
- goto out;
-
- if (duplex == DUPLEX_FULL)
- bmcr |= BMCR_FULLDPLX;
- }
-
- rtl_writephy(tp, MII_BMCR, bmcr);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03) {
- if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
- rtl_writephy(tp, 0x17, 0x2138);
- rtl_writephy(tp, 0x0e, 0x0260);
- } else {
- rtl_writephy(tp, 0x17, 0x2108);
- rtl_writephy(tp, 0x0e, 0x0000);
- }
- }
-
- rc = 0;
-out:
- return rc;
-}
-
-static int rtl8169_set_speed(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 advertising)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
-
- ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
- if (ret < 0)
- goto out;
-
- if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
- (advertising & ADVERTISED_1000baseT_Full) &&
- !pci_is_pcie(tp->pci_dev)) {
- mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
- }
-out:
- return ret;
-}
-
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1940,76 +1640,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
}
-static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- u32 status;
- u32 supported, advertising;
-
- supported =
- SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
- cmd->base.port = PORT_FIBRE;
-
- status = RTL_R32(tp, TBICSR);
- advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
- cmd->base.autoneg = !!(status & TBINwEnable);
-
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL; /* Always set */
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
- return 0;
-}
-
-static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- mii_ethtool_get_link_ksettings(&tp->mii, cmd);
-
- return 0;
-}
-
-static int rtl8169_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int rc;
-
- rtl_lock_work(tp);
- rc = tp->get_link_ksettings(dev, cmd);
- rtl_unlock_work(tp);
-
- return rc;
-}
-
-static int rtl8169_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int rc;
- u32 advertising;
-
- if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising))
- return -EINVAL;
-
- del_timer_sync(&tp->timer);
-
- rtl_lock_work(tp);
- rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
- cmd->base.duplex, advertising);
- rtl_unlock_work(tp);
-
- return rc;
-}
-
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
@@ -2185,13 +1815,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static int rtl8169_nway_reset(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return mii_nway_restart(&tp->mii);
-}
-
/*
* Interrupt coalescing
*
@@ -2264,7 +1887,7 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
const struct rtl_coalesce_info *ci;
int rc;
- rc = rtl8169_get_link_ksettings(dev, &ecmd);
+ rc = phy_ethtool_get_link_ksettings(dev, &ecmd);
if (rc < 0)
return ERR_PTR(rc);
@@ -2422,9 +2045,9 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_sset_count = rtl8169_get_sset_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
- .nway_reset = rtl8169_nway_reset,
- .get_link_ksettings = rtl8169_get_link_ksettings,
- .set_link_ksettings = rtl8169_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -2537,15 +2160,15 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
"unknown MAC, using family default\n");
tp->mac_version = default_version;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_42 :
RTL_GIGA_MAC_VER_43;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_45 :
RTL_GIGA_MAC_VER_47;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_46 :
RTL_GIGA_MAC_VER_48;
}
@@ -2553,7 +2176,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
static void rtl8169_print_mac_version(struct rtl8169_private *tp)
{
- dprintk("mac_version = 0x%02x\n", tp->mac_version);
+ netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version);
}
struct phy_reg {
@@ -4405,62 +4028,16 @@ static void rtl_hw_phy_config(struct net_device *dev)
}
}
-static void rtl_phy_work(struct rtl8169_private *tp)
-{
- struct timer_list *timer = &tp->timer;
- unsigned long timeout = RTL8169_PHY_TIMEOUT;
-
- assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
-
- if (tp->phy_reset_pending(tp)) {
- /*
- * A busy loop could burn quite a few cycles on nowadays CPU.
- * Let's delay the execution of the timer for a few ticks.
- */
- timeout = HZ/10;
- goto out_mod_timer;
- }
-
- if (tp->link_ok(tp))
- return;
-
- netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
-
- tp->phy_reset_enable(tp);
-
-out_mod_timer:
- mod_timer(timer, jiffies + timeout);
-}
-
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
if (!test_and_set_bit(flag, tp->wk.flags))
schedule_work(&tp->wk.work);
}
-static void rtl8169_phy_timer(struct timer_list *t)
-{
- struct rtl8169_private *tp = from_timer(tp, t, timer);
-
- rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
-}
-
-DECLARE_RTL_COND(rtl_phy_reset_cond)
-{
- return tp->phy_reset_pending(tp);
-}
-
-static void rtl8169_phy_reset(struct net_device *dev,
- struct rtl8169_private *tp)
-{
- tp->phy_reset_enable(tp);
- rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
-}
-
static bool rtl_tbi_enabled(struct rtl8169_private *tp)
{
return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
- (RTL_R8(tp, PHYstatus) & TBI_Enable);
+ (RTL_R8(tp, PHYstatus) & TBI_Enable);
}
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -4468,7 +4045,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ netif_dbg(tp, drv, dev,
+ "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(tp, 0x82, 0x01);
}
@@ -4478,23 +4056,18 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ netif_dbg(tp, drv, dev,
+ "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(tp, 0x82, 0x01);
- dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+ netif_dbg(tp, drv, dev,
+ "Set PHY Reg 0x0bh = 0x00h\n");
rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
}
- rtl8169_phy_reset(dev, tp);
+ /* We may have called phy_speed_down before */
+ phy_speed_up(dev->phydev);
- rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0));
-
- if (rtl_tbi_enabled(tp))
- netif_info(tp, link, dev, "TBI auto-negotiating\n");
+ genphy_soft_reset(dev->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4539,34 +4112,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
-}
-
-static int rtl_xmii_ioctl(struct rtl8169_private *tp,
- struct mii_ioctl_data *data, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = 32; /* Internal PHY */
- return 0;
-
- case SIOCGMIIREG:
- data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
- return 0;
-
- case SIOCSMIIREG:
- rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
- return 0;
- }
- return -EOPNOTSUPP;
-}
+ if (!netif_running(dev))
+ return -ENODEV;
-static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
-{
- return -EOPNOTSUPP;
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void rtl_init_mdio_ops(struct rtl8169_private *tp)
@@ -4594,30 +4143,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
-static void rtl_speed_down(struct rtl8169_private *tp)
-{
- u32 adv;
- int lpa;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- lpa = rtl_readphy(tp, MII_LPA);
-
- if (lpa & (LPA_10HALF | LPA_10FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
- else if (lpa & (LPA_100HALF | LPA_100FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
- else
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0);
-
- rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- adv);
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -4639,56 +4164,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
{
- if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+ if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp))
return false;
- rtl_speed_down(tp);
+ phy_speed_down(tp->dev->phydev, false);
rtl_wol_suspend_quirk(tp);
return true;
}
-static void r8168_phy_power_up(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_writephy(tp, 0x0e, 0x0000);
- break;
- default:
- break;
- }
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
-
- /* give MAC/PHY some time to resume */
- msleep(20);
-}
-
-static void r8168_phy_power_down(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
- break;
-
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_writephy(tp, 0x0e, 0x0200);
- default:
- rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
- break;
- }
-}
-
static void r8168_pll_power_down(struct rtl8169_private *tp)
{
if (r8168_check_dash(tp))
@@ -4701,8 +4185,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (rtl_wol_pll_power_down(tp))
return;
- r8168_phy_power_down(tp);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
@@ -4754,7 +4236,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
break;
}
- r8168_phy_power_up(tp);
+ phy_resume(tp->dev->phydev);
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void rtl_pll_power_down(struct rtl8169_private *tp)
@@ -5172,8 +4656,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
tp->mac_version == RTL_GIGA_MAC_VER_03) {
- dprintk("Set MAC Reg C+CR Offset 0xe0. "
- "Bit-3 and bit-14 MUST be 1\n");
+ netif_dbg(tp, drv, tp->dev,
+ "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
tp->cp_cmd |= (1 << 14);
}
@@ -5236,12 +4720,7 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
rtl_csi_write(tp, 0x070c, csi | val << 24);
}
-static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
-{
- rtl_csi_access_enable(tp, 0x17);
-}
-
-static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
+static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
{
rtl_csi_access_enable(tp, 0x27);
}
@@ -5290,6 +4769,17 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
RTL_W8(tp, Config3, data);
}
+static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
+{
+ if (enable) {
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+ } else {
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ }
+}
+
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5337,7 +4827,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
{ 0x07, 0, 0x2000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
@@ -5346,7 +4836,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5359,7 +4849,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5383,7 +4873,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
{ 0x06, 0x0080, 0x0000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
@@ -5399,7 +4889,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{ 0x03, 0x0400, 0x0220 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
@@ -5413,14 +4903,14 @@ static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
__rtl_hw_start_8168cp(tp);
}
static void rtl_hw_start_8168d(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_disable_clock_request(tp);
@@ -5435,7 +4925,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5453,7 +4943,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
{ 0x0c, 0x0100, 0x0020 }
};
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5482,7 +4972,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
{ 0x0a, 0x0000, 0x0040 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
@@ -5507,7 +4997,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
{ 0x19, 0x0000, 0x0224 }
};
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
@@ -5536,11 +5026,13 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5611,7 +5103,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5646,9 +5138,9 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5681,9 +5173,9 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
@@ -5700,8 +5192,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
@@ -5711,7 +5202,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5780,6 +5271,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
r8168_mac_ocp_write(tp, 0xc094, 0x0000);
r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
@@ -5793,7 +5286,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5831,11 +5324,12 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
rtl_hw_start_8168ep(tp);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
@@ -5847,14 +5341,15 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
rtl_hw_start_8168ep(tp);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
@@ -5868,8 +5363,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
rtl_hw_start_8168ep(tp);
@@ -5889,6 +5383,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
data = r8168_mac_ocp_read(tp, 0xe860);
data |= 0x0080;
r8168_mac_ocp_write(tp, 0xe860, data);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168(struct rtl8169_private *tp)
@@ -6006,8 +5502,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
break;
default:
- printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
- tp->dev->name, tp->mac_version);
+ netif_err(tp, drv, tp->dev,
+ "unknown chipset (mac_version = %d)\n",
+ tp->mac_version);
break;
}
}
@@ -6026,7 +5523,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
};
u8 cfg1;
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, FIX_NAK_1);
@@ -6045,7 +5542,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -6100,7 +5597,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
{ 0x1e, 0, 0x4000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
@@ -6384,7 +5881,6 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_enable(&tp->napi);
rtl_hw_start(tp);
netif_wake_queue(dev);
- rtl8169_check_link_status(dev, tp);
}
static void rtl8169_tx_timeout(struct net_device *dev)
@@ -6958,20 +6454,15 @@ release_descriptor:
static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
- int handled = 0;
- u16 status;
+ u16 status = rtl_get_events(tp);
- status = rtl_get_events(tp);
- if (status && status != 0xffff) {
- status &= RTL_EVENT_NAPI | tp->event_slow;
- if (status) {
- handled = 1;
+ if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow)))
+ return IRQ_NONE;
- rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
- }
- }
- return IRQ_RETVAL(handled);
+ rtl_irq_disable(tp);
+ napi_schedule_irqoff(&tp->napi);
+
+ return IRQ_HANDLED;
}
/*
@@ -7001,7 +6492,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
rtl8169_pcierr_interrupt(dev);
if (status & LinkChg)
- rtl8169_check_link_status(dev, tp);
+ phy_mac_interrupt(dev->phydev);
rtl_irq_enable_all(tp);
}
@@ -7015,7 +6506,6 @@ static void rtl_task(struct work_struct *work)
/* XXX - keep rtl_slow_event_work() as first element. */
{ RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
{ RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
- { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
};
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work);
@@ -7084,11 +6574,51 @@ static void rtl8169_rx_missed(struct net_device *dev)
RTL_W32(tp, RxMissed, 0);
}
+static void r8169_phylink_handler(struct net_device *ndev)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+
+ if (netif_carrier_ok(ndev)) {
+ rtl_link_chg_patch(tp);
+ pm_request_resume(&tp->pci_dev->dev);
+ } else {
+ pm_runtime_idle(&tp->pci_dev->dev);
+ }
+
+ if (net_ratelimit())
+ phy_print_status(ndev->phydev);
+}
+
+static int r8169_phy_connect(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0);
+ phy_interface_t phy_mode;
+ int ret;
+
+ phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
+ PHY_INTERFACE_MODE_MII;
+
+ ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
+ phy_mode);
+ if (ret)
+ return ret;
+
+ if (!tp->supports_gmii)
+ phy_set_max_speed(phydev, SPEED_100);
+
+ /* Ensure to advertise everything, incl. pause */
+ phydev->advertising = phydev->supported;
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- del_timer_sync(&tp->timer);
+ phy_stop(dev->phydev);
napi_disable(&tp->napi);
netif_stop_queue(dev);
@@ -7129,6 +6659,8 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
+ phy_disconnect(dev->phydev);
+
pci_free_irq(pdev, 0, tp);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -7189,6 +6721,10 @@ static int rtl_open(struct net_device *dev)
if (retval < 0)
goto err_release_fw_2;
+ retval = r8169_phy_connect(tp);
+ if (retval)
+ goto err_free_irq;
+
rtl_lock_work(tp);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -7204,17 +6740,17 @@ static int rtl_open(struct net_device *dev)
if (!rtl8169_init_counter_offsets(tp))
netif_warn(tp, hw, dev, "counter reset/update failed\n");
+ phy_start(dev->phydev);
netif_start_queue(dev);
rtl_unlock_work(tp);
- tp->saved_wolopts = 0;
pm_runtime_put_sync(&pdev->dev);
-
- rtl8169_check_link_status(dev, tp);
out:
return retval;
+err_free_irq:
+ pci_free_irq(pdev, 0, tp);
err_release_fw_2:
rtl_release_firmware(tp);
rtl8169_rx_clear(tp);
@@ -7293,6 +6829,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
if (!netif_running(dev))
return;
+ phy_stop(dev->phydev);
netif_device_detach(dev);
netif_stop_queue(dev);
@@ -7323,6 +6860,9 @@ static void __rtl8169_resume(struct net_device *dev)
netif_device_attach(dev);
rtl_pll_power_up(tp);
+ rtl8169_init_phy(dev, tp);
+
+ phy_start(tp->dev->phydev);
rtl_lock_work(tp);
napi_enable(&tp->napi);
@@ -7336,9 +6876,6 @@ static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_init_phy(dev, tp);
if (netif_running(dev))
__rtl8169_resume(dev);
@@ -7352,13 +6889,10 @@ static int rtl8169_runtime_suspend(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray) {
- rtl_pll_power_down(tp);
+ if (!tp->TxDescArray)
return 0;
- }
rtl_lock_work(tp);
- tp->saved_wolopts = __rtl8169_get_wol(tp);
__rtl8169_set_wol(tp, WAKE_ANY);
rtl_unlock_work(tp);
@@ -7383,11 +6917,8 @@ static int rtl8169_runtime_resume(struct device *device)
rtl_lock_work(tp);
__rtl8169_set_wol(tp, tp->saved_wolopts);
- tp->saved_wolopts = 0;
rtl_unlock_work(tp);
- rtl8169_init_phy(dev, tp);
-
__rtl8169_resume(dev);
return 0;
@@ -7455,7 +6986,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
rtl8169_hw_reset(tp);
if (system_state == SYSTEM_POWER_OFF) {
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ if (tp->saved_wolopts) {
rtl_wol_suspend_quirk(tp);
rtl_wol_shutdown_quirk(tp);
}
@@ -7476,6 +7007,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
netif_napi_del(&tp->napi);
unregister_netdev(dev);
+ mdiobus_unregister(tp->mii_bus);
rtl_release_firmware(tp);
@@ -7544,6 +7076,11 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
flags = PCI_IRQ_LEGACY;
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_40) {
+ /* This version was reported to have issues with resume
+ * from suspend when using MSI-X
+ */
+ flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
} else {
flags = PCI_IRQ_ALL_TYPES;
}
@@ -7561,6 +7098,68 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond)
return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
}
+static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (phyaddr > 0)
+ return -ENODEV;
+
+ return rtl_readphy(tp, phyreg);
+}
+
+static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
+ int phyreg, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (phyaddr > 0)
+ return -ENODEV;
+
+ rtl_writephy(tp, phyreg, val);
+
+ return 0;
+}
+
+static int r8169_mdio_register(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ struct phy_device *phydev;
+ struct mii_bus *new_bus;
+ int ret;
+
+ new_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!new_bus)
+ return -ENOMEM;
+
+ new_bus->name = "r8169";
+ new_bus->priv = tp;
+ new_bus->parent = &pdev->dev;
+ new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x",
+ PCI_DEVID(pdev->bus->number, pdev->devfn));
+
+ new_bus->read = r8169_mdio_read_reg;
+ new_bus->write = r8169_mdio_write_reg;
+
+ ret = mdiobus_register(new_bus);
+ if (ret)
+ return ret;
+
+ phydev = mdiobus_get_phy(new_bus, 0);
+ if (!phydev) {
+ mdiobus_unregister(new_bus);
+ return -ENODEV;
+ }
+
+ /* PHY will be woken up in rtl_open() */
+ phy_suspend(phydev);
+
+ tp->mii_bus = new_bus;
+
+ return 0;
+}
+
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
u32 data;
@@ -7614,19 +7213,48 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
}
}
+/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
+static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int rtl_jumbo_max(struct rtl8169_private *tp)
+{
+ /* Non-GBit versions don't support jumbo frames */
+ if (!tp->supports_gmii)
+ return JUMBO_1K;
+
+ switch (tp->mac_version) {
+ /* RTL8169 */
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ return JUMBO_7K;
+ /* RTL8168b */
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ return JUMBO_4K;
+ /* RTL8168c */
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
+ return JUMBO_6K;
+ default:
+ return JUMBO_9K;
+ }
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
struct rtl8169_private *tp;
- struct mii_if_info *mii;
struct net_device *dev;
int chipset, region, i;
- int rc;
-
- if (netif_msg_drv(&debug)) {
- printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
- MODULENAME, RTL8169_VERSION);
- }
+ int jumbo_max, rc;
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
@@ -7638,19 +7266,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->dev = dev;
tp->pci_dev = pdev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
-
- mii = &tp->mii;
- mii->dev = dev;
- mii->mdio_read = rtl_mdio_read;
- mii->mdio_write = rtl_mdio_write;
- mii->phy_id_mask = 0x1f;
- mii->reg_num_mask = 0x1f;
- mii->supports_gmii = cfg->has_gmii;
-
- /* disable ASPM completely as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
+ tp->supports_gmii = cfg->has_gmii;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -7689,6 +7305,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
rtl8169_get_mac_version(tp, cfg->default_ver);
+ if (rtl_tbi_enabled(tp)) {
+ dev_err(&pdev->dev, "TBI fiber mode not supported\n");
+ return -ENODEV;
+ }
+
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
if ((sizeof(dma_addr_t) > 4) &&
@@ -7736,22 +7357,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->saved_wolopts = __rtl8169_get_wol(tp);
- if (rtl_tbi_enabled(tp)) {
- tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
- tp->phy_reset_enable = rtl8169_tbi_reset_enable;
- tp->phy_reset_pending = rtl8169_tbi_reset_pending;
- tp->link_ok = rtl8169_tbi_link_ok;
- tp->do_ioctl = rtl_tbi_ioctl;
- } else {
- tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
- tp->phy_reset_enable = rtl8169_xmii_reset_enable;
- tp->phy_reset_pending = rtl8169_xmii_reset_pending;
- tp->link_ok = rtl8169_xmii_link_ok;
- tp->do_ioctl = rtl_xmii_ioctl;
- }
-
mutex_init(&tp->wk.mutex);
u64_stats_init(&tp->rx_stats.syncp);
u64_stats_init(&tp->tx_stats.syncp);
@@ -7800,16 +7405,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- switch (rtl_chip_infos[chipset].txd_version) {
- case RTL_TD_0:
- tp->tso_csum = rtl8169_tso_csum_v1;
- break;
- case RTL_TD_1:
+ if (rtl_chip_supports_csum_v2(tp)) {
tp->tso_csum = rtl8169_tso_csum_v2;
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- break;
- default:
- WARN_ON_ONCE(1);
+ } else {
+ tp->tso_csum = rtl8169_tso_csum_v1;
}
dev->hw_features |= NETIF_F_RXALL;
@@ -7817,14 +7417,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* MTU range: 60 - hw-specific max */
dev->min_mtu = ETH_ZLEN;
- dev->max_mtu = rtl_chip_infos[chipset].jumbo_max;
+ jumbo_max = rtl_jumbo_max(tp);
+ dev->max_mtu = jumbo_max;
tp->hw_start = cfg->hw_start;
tp->event_slow = cfg->event_slow;
tp->coalesce_info = cfg->coalesce_info;
- timer_setup(&tp->timer, rtl8169_phy_timer, 0);
-
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
@@ -7835,30 +7434,39 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- rc = register_netdev(dev);
- if (rc < 0)
+ rc = r8169_mdio_register(tp);
+ if (rc)
return rc;
+ /* chip gets powered up in rtl_open() */
+ rtl_pll_power_down(tp);
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_mdio_unregister;
+
netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr,
(u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
pci_irq_vector(pdev, 0));
- if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
- netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
- "tx checksumming: %s]\n",
- rtl_chip_infos[chipset].jumbo_max,
- tp->mac_version <= RTL_GIGA_MAC_VER_06 ? "ok" : "ko");
- }
+
+ if (jumbo_max > JUMBO_1K)
+ netif_info(tp, probe, dev,
+ "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
+ jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
+ "ok" : "ko");
if (r8168_check_dash(tp))
rtl8168_driver_start(tp);
- netif_carrier_off(dev);
-
if (pci_dev_run_wake(pdev))
pm_runtime_put_sync(&pdev->dev);
return 0;
+
+err_mdio_unregister:
+ mdiobus_unregister(tp->mii_bus);
+ return rc;
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0d811c02ff34..c06f2df895c2 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1167,7 +1167,7 @@ static int ravb_get_sset_count(struct net_device *netdev, int sset)
}
static void ravb_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *estats, u64 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
int i = 0;
@@ -1199,7 +1199,7 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
break;
}
}
@@ -1564,7 +1564,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* TAG and timestamp required flag */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
- desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+ desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
}
skb_tx_timestamp(skb);
@@ -1597,7 +1597,8 @@ drop:
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5614fd231bbe..5573199c4536 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -439,10 +439,15 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
enum_index);
}
+static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index)
+{
+ return mdp->reg_offset[enum_index];
+}
+
static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
int enum_index)
{
- u16 offset = mdp->reg_offset[enum_index];
+ u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
return;
@@ -452,7 +457,7 @@ static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
- u16 offset = mdp->reg_offset[enum_index];
+ u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
return ~0U;
@@ -622,7 +627,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -672,7 +676,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -798,7 +801,6 @@ static struct sh_eth_cpu_data r8a77980_data = {
.hw_swap = 1,
.nbst = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -851,7 +853,6 @@ static struct sh_eth_cpu_data sh7724_data = {
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
};
static void sh_eth_set_rate_sh7757(struct net_device *ndev)
@@ -898,7 +899,6 @@ static struct sh_eth_cpu_data sh7757_data = {
.hw_swap = 1,
.no_ade = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.rtrate = 1,
.dual_port = 1,
};
@@ -978,7 +978,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -1467,7 +1466,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
- sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
+ sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
/* all sh_eth int mask */
sh_eth_write(ndev, 0, EESIPR);
@@ -1527,9 +1526,9 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* mask reset */
if (mdp->cd->apr)
- sh_eth_write(ndev, APR_AP, APR);
+ sh_eth_write(ndev, 1, APR);
if (mdp->cd->mpr)
- sh_eth_write(ndev, MPR_MP, MPR);
+ sh_eth_write(ndev, 1, MPR);
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
@@ -2677,34 +2676,36 @@ static int sh_eth_tsu_busy(struct net_device *ndev)
return 0;
}
-static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
+static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset,
const u8 *addr)
{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
u32 val;
val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
- iowrite32(val, reg);
+ iowrite32(val, mdp->tsu_addr + offset);
if (sh_eth_tsu_busy(ndev) < 0)
return -EBUSY;
val = addr[4] << 8 | addr[5];
- iowrite32(val, reg + 4);
+ iowrite32(val, mdp->tsu_addr + offset + 4);
if (sh_eth_tsu_busy(ndev) < 0)
return -EBUSY;
return 0;
}
-static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
+static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr)
{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
u32 val;
- val = ioread32(reg);
+ val = ioread32(mdp->tsu_addr + offset);
addr[0] = (val >> 24) & 0xff;
addr[1] = (val >> 16) & 0xff;
addr[2] = (val >> 8) & 0xff;
addr[3] = val & 0xff;
- val = ioread32(reg + 4);
+ val = ioread32(mdp->tsu_addr + offset + 4);
addr[4] = (val >> 8) & 0xff;
addr[5] = val & 0xff;
}
@@ -2713,12 +2714,12 @@ static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
u8 c_addr[ETH_ALEN];
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
- sh_eth_tsu_read_entry(reg_offset, c_addr);
+ sh_eth_tsu_read_entry(ndev, reg_offset, c_addr);
if (ether_addr_equal(addr, c_addr))
return i;
}
@@ -2740,7 +2741,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int ret;
u8 blank[ETH_ALEN];
@@ -2757,7 +2758,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i, ret;
if (!mdp->cd->tsu)
@@ -2831,15 +2832,15 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev)
static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
u8 addr[ETH_ALEN];
- void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
if (!mdp->cd->tsu)
return;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
- sh_eth_tsu_read_entry(reg_offset, addr);
+ sh_eth_tsu_read_entry(ndev, reg_offset, addr);
if (is_multicast_ether_addr(addr))
sh_eth_tsu_del_entry(ndev, addr);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 726c55a82dd7..f94be99cf400 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -383,12 +383,12 @@ enum ECSIPR_STATUS_MASK_BIT {
/* APR */
enum APR_BIT {
- APR_AP = 0x00000001,
+ APR_AP = 0x0000ffff,
};
/* MPR */
enum MPR_BIT {
- MPR_MP = 0x00000001,
+ MPR_MP = 0x0000ffff,
};
/* TRSCER */
@@ -403,8 +403,7 @@ enum DESC_I_BIT {
/* RPADIR */
enum RPADIR_BIT {
- RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
- RPADIR_PADR = 0x0003f,
+ RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff,
};
/* FDR */
@@ -488,7 +487,6 @@ struct sh_eth_cpu_data {
u32 ecsipr_value;
u32 fdr_value;
u32 fcftr_value;
- u32 rpadir_value;
/* interrupt checking mask */
u32 tx_check;
@@ -560,10 +558,4 @@ struct sh_eth_private {
unsigned wol_enabled:1;
};
-static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
- int enum_index)
-{
- return mdp->tsu_addr + mdp->reg_offset[enum_index];
-}
-
#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 542b67d436df..c9aad0eda57f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -319,6 +319,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -329,6 +330,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3bac58d0f88b..c5c297e78d06 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -6,3 +6,5 @@ sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
obj-$(CONFIG_SFC) += sfc.o
+
+obj-$(CONFIG_SFC_FALCON) += falcon/
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 019cef1d3cf7..3d76fd1504c2 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -199,7 +199,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
return -ENOMEM;
for (i = 0; i < efx->vf_count; i++) {
- random_ether_addr(nic_data->vf[i].mac);
+ eth_random_addr(nic_data->vf[i].mac);
nic_data->vf[i].efx = NULL;
nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
@@ -564,7 +564,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct ef10_vf *vf;
- u16 old_vlan, new_vlan;
+ u16 new_vlan;
int rc = 0, rc2 = 0;
if (vf_i >= efx->vf_count)
@@ -619,7 +619,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
}
/* Do the actual vlan change */
- old_vlan = vf->vlan;
vf->vlan = new_vlan;
/* Restore everything in reverse order */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index ce3a177081a8..330233286e78 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -264,11 +264,17 @@ static int efx_check_disabled(struct efx_nic *efx)
static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_tx_queue *tx_queue;
+ struct list_head rx_list;
int spent;
if (unlikely(!channel->enabled))
return 0;
+ /* Prepare the batch receive list */
+ EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+ INIT_LIST_HEAD(&rx_list);
+ channel->rx_list = &rx_list;
+
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->pkts_compl = 0;
tx_queue->bytes_compl = 0;
@@ -291,6 +297,10 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
}
}
+ /* Receive any packets we queued up */
+ netif_receive_skb_list(channel->rx_list);
+ channel->rx_list = NULL;
+
return spent;
}
@@ -555,6 +565,8 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail;
}
+ channel->rx_list = NULL;
+
return 0;
fail:
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 56049157a5af..1ccdb7a82e2a 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -963,6 +963,7 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
switch (info->flow_type) {
case TCP_V4_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 65568925c3ef..961b92979640 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -448,6 +448,7 @@ enum efx_sync_events_state {
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
+ * @rx_list: list of SKBs from current RX, awaiting processing
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
* @sync_events_state: Current state of sync events on this channel
@@ -500,6 +501,8 @@ struct efx_channel {
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
+ struct list_head *rx_list;
+
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d2e254f2f72b..396ff01298cd 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -634,7 +634,12 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
return;
/* Pass the packet up */
- netif_receive_skb(skb);
+ if (channel->rx_list != NULL)
+ /* Add to list, will pass up later */
+ list_add_tail(&skb->list, channel->rx_list);
+ else
+ /* No list, so pass it up now */
+ netif_receive_skb(skb);
}
/* Handle a received packet. Second half: Touches packet payload. */
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 949aaef390b6..15c62c160953 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -321,7 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int card_idx = -1;
void __iomem *ioaddr;
int chip_idx = (int) ent->driver_data;
- int irq;
struct net_device *dev;
struct epic_private *ep;
int i, ret, option = 0, duplex = 0;
@@ -338,7 +337,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_enable_device(pdev);
if (ret)
goto out;
- irq = pdev->irq;
if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
dev_err(&pdev->dev, "no PCI region space\n");
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index e080d3e7c582..7aa5ebb6766c 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -232,8 +232,7 @@
#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
#define NETSEC_EEPROM_PKT_ME_SIZE 0x24
-#define DESC_NUM 128
-#define NAPI_BUDGET (DESC_NUM / 2)
+#define DESC_NUM 256
#define DESC_SZ sizeof(struct netsec_de)
@@ -642,8 +641,6 @@ static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
tmp_skb = netsec_alloc_skb(priv, &td);
- dma_rmb();
-
tail = dring->tail;
if (!tmp_skb) {
@@ -657,8 +654,6 @@ static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
/* move tail ahead */
dring->tail = (dring->tail + 1) % DESC_NUM;
- dring->pkt_cnt--;
-
return skb;
}
@@ -731,25 +726,24 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct net_device *ndev = priv->ndev;
struct netsec_rx_pkt_info rx_info;
- int done = 0, rx_num = 0;
+ int done = 0;
struct netsec_desc desc;
struct sk_buff *skb;
u16 len;
while (done < budget) {
- if (!rx_num) {
- rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
- dring->pkt_cnt += rx_num;
+ u16 idx = dring->tail;
+ struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
- /* move head 'rx_num' */
- dring->head = (dring->head + rx_num) % DESC_NUM;
+ if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD))
+ break;
- rx_num = dring->pkt_cnt;
- if (!rx_num)
- break;
- }
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the netsec_de until we have
+ * verified the descriptor has been written back
+ */
+ dma_rmb();
done++;
- rx_num--;
skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
if (unlikely(!skb) || rx_info.err_flag) {
netif_err(priv, drv, priv->ndev,
@@ -780,11 +774,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
static int netsec_napi_poll(struct napi_struct *napi, int budget)
{
struct netsec_priv *priv;
- struct net_device *ndev;
int tx, rx, done, todo;
priv = container_of(napi, struct netsec_priv, napi);
- ndev = priv->ndev;
todo = budget;
do {
@@ -1666,7 +1658,7 @@ static int netsec_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "hardware revision %d.%d\n",
hw_ver >> 16, hw_ver & 0xffff);
- netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
ndev->netdev_ops = &netsec_netdev_ops;
ndev->ethtool_ops = &netsec_ethtool_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 68e9e2640c62..99967a80a8c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -5,7 +5,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
- stmmac_tc.o $(stmmac-y)
+ stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
+ $(stmmac-y)
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 78fd0f8b8e81..1854f270ad66 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -36,12 +36,14 @@
#include "mmc.h"
/* Synopsys Core versions */
-#define DWMAC_CORE_3_40 0x34
-#define DWMAC_CORE_3_50 0x35
-#define DWMAC_CORE_4_00 0x40
-#define DWMAC_CORE_4_10 0x41
-#define DWMAC_CORE_5_00 0x50
-#define DWMAC_CORE_5_10 0x51
+#define DWMAC_CORE_3_40 0x34
+#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_4_00 0x40
+#define DWMAC_CORE_4_10 0x41
+#define DWMAC_CORE_5_00 0x50
+#define DWMAC_CORE_5_10 0x51
+#define DWXGMAC_CORE_2_10 0x21
+
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
/* These need to be power of two, and >= 4 */
@@ -398,6 +400,8 @@ struct mac_link {
u32 speed10;
u32 speed100;
u32 speed1000;
+ u32 speed2500;
+ u32 speed10000;
u32 duplex;
};
@@ -439,6 +443,7 @@ struct stmmac_rx_routing {
int dwmac100_setup(struct stmmac_priv *priv);
int dwmac1000_setup(struct stmmac_priv *priv);
int dwmac4_setup(struct stmmac_priv *priv);
+int dwxgmac2_setup(struct stmmac_priv *priv);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index 3304095c934c..fad503820e04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -78,6 +78,8 @@ static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "snps,dwmac-4.00"},
{ .compatible = "snps,dwmac-4.10a"},
{ .compatible = "snps,dwmac"},
+ { .compatible = "snps,dwxgmac-2.10"},
+ { .compatible = "snps,dwxgmac"},
{ }
};
MODULE_DEVICE_TABLE(of, dwmac_generic_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index f08625a02cea..7b923362ee55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -61,6 +61,7 @@ struct rk_priv_data {
struct clk *mac_clk_tx;
struct clk *clk_mac_ref;
struct clk *clk_mac_refout;
+ struct clk *clk_mac_speed;
struct clk *aclk_mac;
struct clk *pclk_mac;
struct clk *clk_phy;
@@ -83,6 +84,64 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define PX30_GRF_GMAC_CON1 0x0904
+
+/* PX30_GRF_GMAC_CON1 */
+#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
+ GRF_BIT(6))
+#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define PX30_GMAC_SPEED_100M GRF_BIT(2)
+
+static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ int ret;
+
+ if (IS_ERR(bsp_priv->clk_mac_speed)) {
+ dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_10M);
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
+ __func__, ret);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_100M);
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
+ __func__, ret);
+
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops px30_ops = {
+ .set_to_rmii = px30_set_to_rmii,
+ .set_rmii_speed = px30_set_rmii_speed,
+};
+
#define RK3128_GRF_MAC_CON0 0x0168
#define RK3128_GRF_MAC_CON1 0x016c
@@ -1042,6 +1101,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
}
}
+ bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
+ if (IS_ERR(bsp_priv->clk_mac_speed))
+ dev_err(dev, "cannot get clock %s\n", "clk_mac_speed");
+
if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
} else {
@@ -1094,6 +1157,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
if (!IS_ERR(bsp_priv->mac_clk_tx))
clk_prepare_enable(bsp_priv->mac_clk_tx);
+ if (!IS_ERR(bsp_priv->clk_mac_speed))
+ clk_prepare_enable(bsp_priv->clk_mac_speed);
+
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_prepare_enable(bsp_priv->clk_mac);
@@ -1118,6 +1184,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
clk_disable_unprepare(bsp_priv->pclk_mac);
clk_disable_unprepare(bsp_priv->mac_clk_tx);
+
+ clk_disable_unprepare(bsp_priv->clk_mac_speed);
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_disable_unprepare(bsp_priv->clk_mac);
@@ -1414,6 +1482,7 @@ static int rk_gmac_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,px30-gmac", .data = &px30_ops },
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 65bc3556bd8f..edb6053bd980 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -407,6 +407,19 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
}
}
+static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+{
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
+ if (qmode != MTL_QUEUE_AVB)
+ mtl_tx_op |= MTL_OP_MODE_TXQEN;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
+
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+}
+
static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
{
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
@@ -441,6 +454,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
};
@@ -468,5 +482,6 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
new file mode 100644
index 000000000000..0a80fa25afe3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC definitions.
+ */
+
+#ifndef __STMMAC_DWXGMAC2_H__
+#define __STMMAC_DWXGMAC2_H__
+
+#include "common.h"
+
+/* Misc */
+#define XGMAC_JUMBO_LEN 16368
+
+/* MAC Registers */
+#define XGMAC_TX_CONFIG 0x00000000
+#define XGMAC_CONFIG_SS_OFF 29
+#define XGMAC_CONFIG_SS_MASK GENMASK(30, 29)
+#define XGMAC_CONFIG_SS_10000 (0x0 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_2500 (0x2 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_1000 (0x3 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SARC GENMASK(22, 20)
+#define XGMAC_CONFIG_SARC_SHIFT 20
+#define XGMAC_CONFIG_JD BIT(16)
+#define XGMAC_CONFIG_TE BIT(0)
+#define XGMAC_CORE_INIT_TX (XGMAC_CONFIG_JD)
+#define XGMAC_RX_CONFIG 0x00000004
+#define XGMAC_CONFIG_ARPEN BIT(31)
+#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
+#define XGMAC_CONFIG_GPSL_SHIFT 16
+#define XGMAC_CONFIG_S2KP BIT(11)
+#define XGMAC_CONFIG_IPC BIT(9)
+#define XGMAC_CONFIG_JE BIT(8)
+#define XGMAC_CONFIG_WD BIT(7)
+#define XGMAC_CONFIG_GPSLCE BIT(6)
+#define XGMAC_CONFIG_CST BIT(2)
+#define XGMAC_CONFIG_ACS BIT(1)
+#define XGMAC_CONFIG_RE BIT(0)
+#define XGMAC_CORE_INIT_RX 0
+#define XGMAC_PACKET_FILTER 0x00000008
+#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_PM BIT(4)
+#define XGMAC_FILTER_HMC BIT(2)
+#define XGMAC_FILTER_PR BIT(0)
+#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
+#define XGMAC_RXQ_CTRL0 0x000000a0
+#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
+#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
+#define XGMAC_RXQ_CTRL2 0x000000a8
+#define XGMAC_RXQ_CTRL3 0x000000ac
+#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
+#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_PMTIS BIT(4)
+#define XGMAC_INT_EN 0x000000b4
+#define XGMAC_TSIE BIT(12)
+#define XGMAC_LPIIE BIT(5)
+#define XGMAC_PMTIE BIT(4)
+#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE | XGMAC_TSIE)
+#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4)
+#define XGMAC_PT GENMASK(31, 16)
+#define XGMAC_PT_SHIFT 16
+#define XGMAC_TFE BIT(1)
+#define XGMAC_RX_FLOW_CTRL 0x00000090
+#define XGMAC_RFE BIT(0)
+#define XGMAC_PMT 0x000000c0
+#define XGMAC_GLBLUCAST BIT(9)
+#define XGMAC_RWKPKTEN BIT(2)
+#define XGMAC_MGKPKTEN BIT(1)
+#define XGMAC_PWRDWN BIT(0)
+#define XGMAC_HW_FEATURE0 0x0000011c
+#define XGMAC_HWFEAT_SAVLANINS BIT(27)
+#define XGMAC_HWFEAT_RXCOESEL BIT(16)
+#define XGMAC_HWFEAT_TXCOESEL BIT(14)
+#define XGMAC_HWFEAT_TSSEL BIT(12)
+#define XGMAC_HWFEAT_AVSEL BIT(11)
+#define XGMAC_HWFEAT_RAVSEL BIT(10)
+#define XGMAC_HWFEAT_ARPOFFSEL BIT(9)
+#define XGMAC_HWFEAT_MGKSEL BIT(7)
+#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_GMIISEL BIT(1)
+#define XGMAC_HW_FEATURE1 0x00000120
+#define XGMAC_HWFEAT_TSOEN BIT(18)
+#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
+#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
+#define XGMAC_HW_FEATURE2 0x00000124
+#define XGMAC_HWFEAT_PPSOUTNUM GENMASK(26, 24)
+#define XGMAC_HWFEAT_TXCHCNT GENMASK(21, 18)
+#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
+#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
+#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
+#define XGMAC_MDIO_ADDR 0x00000200
+#define XGMAC_MDIO_DATA 0x00000204
+#define XGMAC_MDIO_C22P 0x00000220
+#define XGMAC_ADDR0_HIGH 0x00000300
+#define XGMAC_AE BIT(31)
+#define XGMAC_DCS GENMASK(19, 16)
+#define XGMAC_DCS_SHIFT 16
+#define XGMAC_ADDR0_LOW 0x00000304
+#define XGMAC_ARP_ADDR 0x00000c10
+#define XGMAC_TIMESTAMP_STATUS 0x00000d20
+#define XGMAC_TXTSC BIT(15)
+#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
+#define XGMAC_TXTSSTSLO GENMASK(30, 0)
+#define XGMAC_TXTIMESTAMP_SEC 0x00000d34
+
+/* MTL Registers */
+#define XGMAC_MTL_OPMODE 0x00001000
+#define XGMAC_ETSALG GENMASK(6, 5)
+#define XGMAC_WRR (0x0 << 5)
+#define XGMAC_WFQ (0x1 << 5)
+#define XGMAC_DWRR (0x2 << 5)
+#define XGMAC_RAA BIT(2)
+#define XGMAC_MTL_INT_STATUS 0x00001020
+#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030
+#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034
+#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8)
+#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
+#define XGMAC_TQS GENMASK(25, 16)
+#define XGMAC_TQS_SHIFT 16
+#define XGMAC_TTC GENMASK(6, 4)
+#define XGMAC_TTC_SHIFT 4
+#define XGMAC_TXQEN GENMASK(3, 2)
+#define XGMAC_TXQEN_SHIFT 2
+#define XGMAC_TSF BIT(1)
+#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x)))
+#define XGMAC_RQS GENMASK(25, 16)
+#define XGMAC_RQS_SHIFT 16
+#define XGMAC_EHFC BIT(7)
+#define XGMAC_RSF BIT(5)
+#define XGMAC_RTC GENMASK(1, 0)
+#define XGMAC_RTC_SHIFT 0
+#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x)))
+#define XGMAC_RXOIE BIT(16)
+#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x)))
+#define XGMAC_RXOVFIS BIT(16)
+#define XGMAC_ABPSIS BIT(1)
+#define XGMAC_TXUNFIS BIT(0)
+
+/* DMA Registers */
+#define XGMAC_DMA_MODE 0x00003000
+#define XGMAC_SWR BIT(0)
+#define XGMAC_DMA_SYSBUS_MODE 0x00003004
+#define XGMAC_WR_OSR_LMT GENMASK(29, 24)
+#define XGMAC_WR_OSR_LMT_SHIFT 24
+#define XGMAC_RD_OSR_LMT GENMASK(21, 16)
+#define XGMAC_RD_OSR_LMT_SHIFT 16
+#define XGMAC_EN_LPI BIT(15)
+#define XGMAC_LPI_XIT_PKT BIT(14)
+#define XGMAC_AAL BIT(12)
+#define XGMAC_BLEN GENMASK(7, 1)
+#define XGMAC_BLEN256 BIT(7)
+#define XGMAC_BLEN128 BIT(6)
+#define XGMAC_BLEN64 BIT(5)
+#define XGMAC_BLEN32 BIT(4)
+#define XGMAC_BLEN16 BIT(3)
+#define XGMAC_BLEN8 BIT(2)
+#define XGMAC_BLEN4 BIT(1)
+#define XGMAC_UNDEF BIT(0)
+#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+#define XGMAC_PBLx8 BIT(16)
+#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
+#define XGMAC_TxPBL GENMASK(21, 16)
+#define XGMAC_TxPBL_SHIFT 16
+#define XGMAC_TSE BIT(12)
+#define XGMAC_OSP BIT(4)
+#define XGMAC_TXST BIT(0)
+#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
+#define XGMAC_RxPBL GENMASK(21, 16)
+#define XGMAC_RxPBL_SHIFT 16
+#define XGMAC_RXST BIT(0)
+#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x)))
+#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x)))
+#define XGMAC_DMA_CH_TxDESC_RING_LEN(x) (0x00003130 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_RING_LEN(x) (0x00003134 + (0x80 * (x)))
+#define XGMAC_DMA_CH_INT_EN(x) (0x00003138 + (0x80 * (x)))
+#define XGMAC_NIE BIT(15)
+#define XGMAC_AIE BIT(14)
+#define XGMAC_RBUE BIT(7)
+#define XGMAC_RIE BIT(6)
+#define XGMAC_TIE BIT(0)
+#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
+ XGMAC_RIE | XGMAC_TIE)
+#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
+#define XGMAC_RWT GENMASK(7, 0)
+#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
+#define XGMAC_NIS BIT(15)
+#define XGMAC_AIS BIT(14)
+#define XGMAC_FBE BIT(12)
+#define XGMAC_RBU BIT(7)
+#define XGMAC_RI BIT(6)
+#define XGMAC_TPS BIT(1)
+#define XGMAC_TI BIT(0)
+
+/* Descriptors */
+#define XGMAC_TDES2_IOC BIT(31)
+#define XGMAC_TDES2_TTSE BIT(30)
+#define XGMAC_TDES2_B2L GENMASK(29, 16)
+#define XGMAC_TDES2_B2L_SHIFT 16
+#define XGMAC_TDES2_B1L GENMASK(13, 0)
+#define XGMAC_TDES3_OWN BIT(31)
+#define XGMAC_TDES3_CTXT BIT(30)
+#define XGMAC_TDES3_FD BIT(29)
+#define XGMAC_TDES3_LD BIT(28)
+#define XGMAC_TDES3_CPC GENMASK(27, 26)
+#define XGMAC_TDES3_CPC_SHIFT 26
+#define XGMAC_TDES3_TCMSSV BIT(26)
+#define XGMAC_TDES3_THL GENMASK(22, 19)
+#define XGMAC_TDES3_THL_SHIFT 19
+#define XGMAC_TDES3_TSE BIT(18)
+#define XGMAC_TDES3_CIC GENMASK(17, 16)
+#define XGMAC_TDES3_CIC_SHIFT 16
+#define XGMAC_TDES3_TPL GENMASK(17, 0)
+#define XGMAC_TDES3_FL GENMASK(14, 0)
+#define XGMAC_RDES3_OWN BIT(31)
+#define XGMAC_RDES3_CTXT BIT(30)
+#define XGMAC_RDES3_IOC BIT(30)
+#define XGMAC_RDES3_LD BIT(28)
+#define XGMAC_RDES3_CDA BIT(27)
+#define XGMAC_RDES3_ES BIT(15)
+#define XGMAC_RDES3_PL GENMASK(13, 0)
+#define XGMAC_RDES3_TSD BIT(6)
+#define XGMAC_RDES3_TSA BIT(4)
+
+#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
new file mode 100644
index 000000000000..d182f82f7b58
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include "stmmac.h"
+#include "dwxgmac2.h"
+
+static void dwxgmac2_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int mtu = dev->mtu;
+ u32 tx, rx;
+
+ tx = readl(ioaddr + XGMAC_TX_CONFIG);
+ rx = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ tx |= XGMAC_CORE_INIT_TX;
+ rx |= XGMAC_CORE_INIT_RX;
+
+ if (mtu >= 9000) {
+ rx |= XGMAC_CONFIG_GPSLCE;
+ rx |= XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT;
+ rx |= XGMAC_CONFIG_WD;
+ } else if (mtu > 2000) {
+ rx |= XGMAC_CONFIG_JE;
+ } else if (mtu > 1500) {
+ rx |= XGMAC_CONFIG_S2KP;
+ }
+
+ if (hw->ps) {
+ tx |= XGMAC_CONFIG_TE;
+ tx &= ~hw->link.speed_mask;
+
+ switch (hw->ps) {
+ case SPEED_10000:
+ tx |= hw->link.speed10000;
+ break;
+ case SPEED_2500:
+ tx |= hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ default:
+ tx |= hw->link.speed1000;
+ break;
+ }
+ }
+
+ writel(tx, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx, ioaddr + XGMAC_RX_CONFIG);
+ writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+}
+
+static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
+ u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ if (enable) {
+ tx |= XGMAC_CONFIG_TE;
+ rx |= XGMAC_CONFIG_RE;
+ } else {
+ tx &= ~XGMAC_CONFIG_TE;
+ rx &= ~XGMAC_CONFIG_RE;
+ }
+
+ writel(tx, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ if (hw->rx_csum)
+ value |= XGMAC_CONFIG_IPC;
+ else
+ value &= ~XGMAC_CONFIG_IPC;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+
+ return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
+}
+
+static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
+ writel(value, ioaddr + XGMAC_RXQ_CTRL0);
+}
+
+static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSRQ(queue);
+ value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
+
+ writel(value, ioaddr + reg);
+}
+
+static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
+ u32 rx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_OPMODE);
+ value &= ~XGMAC_RAA;
+
+ switch (rx_alg) {
+ case MTL_RX_ALGORITHM_SP:
+ break;
+ case MTL_RX_ALGORITHM_WSP:
+ value |= XGMAC_RAA;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
+ u32 tx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_OPMODE);
+ value &= ~XGMAC_ETSALG;
+
+ switch (tx_alg) {
+ case MTL_TX_ALGORITHM_WRR:
+ value |= XGMAC_WRR;
+ break;
+ case MTL_TX_ALGORITHM_WFQ:
+ value |= XGMAC_WFQ;
+ break;
+ case MTL_TX_ALGORITHM_DWRR:
+ value |= XGMAC_DWRR;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
+ u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_QxMDMACH(queue);
+ value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
+
+ writel(value, ioaddr + reg);
+}
+
+static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 stat, en;
+
+ en = readl(ioaddr + XGMAC_INT_EN);
+ stat = readl(ioaddr + XGMAC_INT_STATUS);
+
+ stat &= en;
+
+ if (stat & XGMAC_PMTIS) {
+ x->irq_receive_pmt_irq_n++;
+ readl(ioaddr + XGMAC_PMT);
+ }
+
+ return 0;
+}
+
+static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int ret = 0;
+ u32 status;
+
+ status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
+ if (status & BIT(chan)) {
+ u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
+
+ if (chan_status & XGMAC_RXOVFIS)
+ ret |= CORE_IRQ_MTL_RX_OVERFLOW;
+
+ writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
+ }
+
+ return ret;
+}
+
+static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 i;
+
+ if (fc & FLOW_RX)
+ writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
+ if (fc & FLOW_TX) {
+ for (i = 0; i < tx_cnt; i++) {
+ u32 value = XGMAC_TFE;
+
+ if (duplex)
+ value |= pause_time << XGMAC_PT_SHIFT;
+
+ writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
+ }
+ }
+}
+
+static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 val = 0x0;
+
+ if (mode & WAKE_MAGIC)
+ val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
+ if (mode & WAKE_UCAST)
+ val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
+ if (val) {
+ u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
+ cfg |= XGMAC_CONFIG_RE;
+ writel(cfg, ioaddr + XGMAC_RX_CONFIG);
+ }
+
+ writel(val, ioaddr + XGMAC_PMT);
+}
+
+static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = (addr[5] << 8) | addr[4];
+ writel(value | XGMAC_AE, ioaddr + XGMAC_ADDR0_HIGH);
+
+ value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(value, ioaddr + XGMAC_ADDR0_LOW);
+}
+
+static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + XGMAC_ADDR0_HIGH);
+ lo_addr = readl(ioaddr + XGMAC_ADDR0_LOW);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static void dwxgmac2_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 value = XGMAC_FILTER_RA;
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= XGMAC_FILTER_PR;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+ value |= XGMAC_FILTER_PM;
+ writel(~0x0, ioaddr + XGMAC_HASH_TABLE(0));
+ writel(~0x0, ioaddr + XGMAC_HASH_TABLE(1));
+ }
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+}
+
+const struct stmmac_ops dwxgmac210_ops = {
+ .core_init = dwxgmac2_core_init,
+ .set_mac = dwxgmac2_set_mac,
+ .rx_ipc = dwxgmac2_rx_ipc,
+ .rx_queue_enable = dwxgmac2_rx_queue_enable,
+ .rx_queue_prio = dwxgmac2_rx_queue_prio,
+ .tx_queue_prio = NULL,
+ .rx_queue_routing = NULL,
+ .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = NULL,
+ .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
+ .config_cbs = NULL,
+ .dump_regs = NULL,
+ .host_irq_status = dwxgmac2_host_irq_status,
+ .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
+ .flow_ctrl = dwxgmac2_flow_ctrl,
+ .pmt = dwxgmac2_pmt,
+ .set_umac_addr = dwxgmac2_set_umac_addr,
+ .get_umac_addr = dwxgmac2_get_umac_addr,
+ .set_eee_mode = NULL,
+ .reset_eee_mode = NULL,
+ .set_eee_timer = NULL,
+ .set_eee_pls = NULL,
+ .pcs_ctrl_ane = NULL,
+ .pcs_rane = NULL,
+ .pcs_get_adv_lp = NULL,
+ .debug = NULL,
+ .set_filter = dwxgmac2_set_filter,
+};
+
+int dwxgmac2_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tXGMAC2\n");
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->link.duplex = 0;
+ mac->link.speed10 = 0;
+ mac->link.speed100 = 0;
+ mac->link.speed1000 = XGMAC_CONFIG_SS_1000;
+ mac->link.speed2500 = XGMAC_CONFIG_SS_2500;
+ mac->link.speed10000 = XGMAC_CONFIG_SS_10000;
+ mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
+
+ mac->mii.addr = XGMAC_MDIO_ADDR;
+ mac->mii.data = XGMAC_MDIO_DATA;
+ mac->mii.addr_shift = 16;
+ mac->mii.addr_mask = GENMASK(20, 16);
+ mac->mii.reg_shift = 0;
+ mac->mii.reg_mask = GENMASK(15, 0);
+ mac->mii.clk_csr_shift = 19;
+ mac->mii.clk_csr_mask = GENMASK(21, 19);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
new file mode 100644
index 000000000000..1d858fdec997
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "dwxgmac2.h"
+
+static int dwxgmac2_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+ int ret = tx_done;
+
+ if (unlikely(tdes3 & XGMAC_TDES3_OWN))
+ return tx_dma_own;
+ if (likely(!(tdes3 & XGMAC_TDES3_LD)))
+ return tx_not_ls;
+
+ return ret;
+}
+
+static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ int ret = good_frame;
+
+ if (unlikely(rdes3 & XGMAC_RDES3_OWN))
+ return dma_own;
+ if (likely(!(rdes3 & XGMAC_RDES3_LD)))
+ return discard_frame;
+ if (unlikely(rdes3 & XGMAC_RDES3_ES))
+ ret = discard_frame;
+
+ return ret;
+}
+
+static int dwxgmac2_get_tx_len(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des2) & XGMAC_TDES2_B1L);
+}
+
+static int dwxgmac2_get_tx_owner(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_TDES3_OWN) > 0;
+}
+
+static void dwxgmac2_set_tx_owner(struct dma_desc *p)
+{
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_OWN);
+}
+
+static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
+{
+ p->des3 = cpu_to_le32(XGMAC_RDES3_OWN);
+
+ if (!disable_rx_ic)
+ p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
+}
+
+static int dwxgmac2_get_tx_ls(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
+}
+
+static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
+}
+
+static void dwxgmac2_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(XGMAC_TDES2_TTSE);
+}
+
+static int dwxgmac2_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return 0; /* Not supported */
+}
+
+static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns = 0;
+
+ ns += le32_to_cpu(p->des1) * 1000000000ULL;
+ ns += le32_to_cpu(p->des0);
+
+ *ts = ns;
+}
+
+static int dwxgmac2_rx_check_timestamp(void *desc)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ bool desc_valid, ts_valid;
+
+ desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT);
+ ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA);
+
+ if (likely(desc_valid && ts_valid))
+ return 0;
+ return -EINVAL;
+}
+
+static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ int ret = -EBUSY;
+
+ if (likely(rdes3 & XGMAC_RDES3_CDA)) {
+ ret = dwxgmac2_rx_check_timestamp(next_desc);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
+{
+ dwxgmac2_set_rx_owner(p, disable_rx_ic);
+}
+
+static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls, unsigned int tot_pkt_len)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
+
+ tdes3 = tot_pkt_len & XGMAC_TDES3_FL;
+ if (is_fs)
+ tdes3 |= XGMAC_TDES3_FD;
+ else
+ tdes3 &= ~XGMAC_TDES3_FD;
+
+ if (csum_flag)
+ tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT;
+ else
+ tdes3 &= ~XGMAC_TDES3_CIC;
+
+ if (ls)
+ tdes3 |= XGMAC_TDES3_LD;
+ else
+ tdes3 &= ~XGMAC_TDES3_LD;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= XGMAC_TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+ int len1, int len2, bool tx_own,
+ bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ if (len1)
+ p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L);
+ if (len2)
+ p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) &
+ XGMAC_TDES2_B2L);
+ if (is_fs) {
+ tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE;
+ tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) &
+ XGMAC_TDES3_THL;
+ tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL;
+ } else {
+ tdes3 &= ~XGMAC_TDES3_FD;
+ }
+
+ if (ls)
+ tdes3 |= XGMAC_TDES3_LD;
+ else
+ tdes3 &= ~XGMAC_TDES3_LD;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= XGMAC_TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwxgmac2_set_tx_ic(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(XGMAC_TDES2_IOC);
+}
+
+static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = cpu_to_le32(mss);
+ p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV);
+}
+
+static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des0);
+}
+
+static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des0 = cpu_to_le32(addr);
+ p->des1 = 0;
+}
+
+static void dwxgmac2_clear(struct dma_desc *p)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+const struct stmmac_desc_ops dwxgmac210_desc_ops = {
+ .tx_status = dwxgmac2_get_tx_status,
+ .rx_status = dwxgmac2_get_rx_status,
+ .get_tx_len = dwxgmac2_get_tx_len,
+ .get_tx_owner = dwxgmac2_get_tx_owner,
+ .set_tx_owner = dwxgmac2_set_tx_owner,
+ .set_rx_owner = dwxgmac2_set_rx_owner,
+ .get_tx_ls = dwxgmac2_get_tx_ls,
+ .get_rx_frame_len = dwxgmac2_get_rx_frame_len,
+ .enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
+ .get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
+ .get_rx_timestamp_status = dwxgmac2_get_rx_timestamp_status,
+ .get_timestamp = dwxgmac2_get_timestamp,
+ .set_tx_ic = dwxgmac2_set_tx_ic,
+ .prepare_tx_desc = dwxgmac2_prepare_tx_desc,
+ .prepare_tso_tx_desc = dwxgmac2_prepare_tso_tx_desc,
+ .release_tx_desc = dwxgmac2_release_tx_desc,
+ .init_rx_desc = dwxgmac2_init_rx_desc,
+ .init_tx_desc = dwxgmac2_init_tx_desc,
+ .set_mss = dwxgmac2_set_mss,
+ .get_addr = dwxgmac2_get_addr,
+ .set_addr = dwxgmac2_set_addr,
+ .clear = dwxgmac2_clear,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
new file mode 100644
index 000000000000..20909036e002
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include <linux/iopoll.h>
+#include "stmmac.h"
+#include "dwxgmac2.h"
+
+static int dwxgmac2_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_MODE);
+
+ /* DMA SW reset */
+ writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
+
+ return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
+ !(value & XGMAC_SWR), 0, 100000);
+}
+
+static void dwxgmac2_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, int atds)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
+
+ if (dma_cfg->aal)
+ value |= XGMAC_AAL;
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+}
+
+static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+
+ if (dma_cfg->pblx8)
+ value |= XGMAC_PBLx8;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+ writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value &= ~XGMAC_RxPBL;
+ value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ writel(dma_rx_phy, ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
+}
+
+static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value &= ~XGMAC_TxPBL;
+ value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
+ value |= XGMAC_OSP;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ writel(dma_tx_phy, ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
+}
+
+static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ int i;
+
+ if (axi->axi_lpi_en)
+ value |= XGMAC_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= XGMAC_LPI_XIT_PKT;
+
+ value &= ~XGMAC_WR_OSR_LMT;
+ value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
+ XGMAC_WR_OSR_LMT;
+
+ value &= ~XGMAC_RD_OSR_LMT;
+ value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
+ XGMAC_RD_OSR_LMT;
+
+ value &= ~XGMAC_BLEN;
+ for (i = 0; i < AXI_BLEN; i++) {
+ if (axi->axi_blen[i])
+ value &= ~XGMAC_UNDEF;
+
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= XGMAC_BLEN256;
+ break;
+ case 128:
+ value |= XGMAC_BLEN128;
+ break;
+ case 64:
+ value |= XGMAC_BLEN64;
+ break;
+ case 32:
+ value |= XGMAC_BLEN32;
+ break;
+ case 16:
+ value |= XGMAC_BLEN16;
+ break;
+ case 8:
+ value |= XGMAC_BLEN8;
+ break;
+ case 4:
+ value |= XGMAC_BLEN4;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+}
+
+static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
+ unsigned int rqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ value |= XGMAC_RSF;
+ } else {
+ value &= ~XGMAC_RSF;
+ value &= ~XGMAC_RTC;
+
+ if (mode <= 64)
+ value |= 0x0 << XGMAC_RTC_SHIFT;
+ else if (mode <= 96)
+ value |= 0x2 << XGMAC_RTC_SHIFT;
+ else
+ value |= 0x3 << XGMAC_RTC_SHIFT;
+ }
+
+ value &= ~XGMAC_RQS;
+ value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
+
+ writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
+
+ /* Enable MTL RX overflow */
+ value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
+ writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
+}
+
+static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+ unsigned int tqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ value |= XGMAC_TSF;
+ } else {
+ value &= ~XGMAC_TSF;
+ value &= ~XGMAC_TTC;
+
+ if (mode <= 64)
+ value |= 0x0 << XGMAC_TTC_SHIFT;
+ else if (mode <= 96)
+ value |= 0x2 << XGMAC_TTC_SHIFT;
+ else if (mode <= 128)
+ value |= 0x3 << XGMAC_TTC_SHIFT;
+ else if (mode <= 192)
+ value |= 0x4 << XGMAC_TTC_SHIFT;
+ else if (mode <= 256)
+ value |= 0x5 << XGMAC_TTC_SHIFT;
+ else if (mode <= 384)
+ value |= 0x6 << XGMAC_TTC_SHIFT;
+ else
+ value |= 0x7 << XGMAC_TTC_SHIFT;
+ }
+
+ value &= ~XGMAC_TXQEN;
+ if (qmode != MTL_QUEUE_AVB)
+ value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ else
+ value |= 0x1 << XGMAC_TXQEN_SHIFT;
+
+ value &= ~XGMAC_TQS;
+ value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
+
+ writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+}
+
+static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value |= XGMAC_TXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_TX_CONFIG);
+ value |= XGMAC_CONFIG_TE;
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value &= ~XGMAC_TXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_TX_CONFIG);
+ value &= ~XGMAC_CONFIG_TE;
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value |= XGMAC_RXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ value |= XGMAC_CONFIG_RE;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value &= ~XGMAC_RXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ value &= ~XGMAC_CONFIG_RE;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan)
+{
+ u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ int ret = 0;
+
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & XGMAC_AIS)) {
+ if (unlikely(intr_status & XGMAC_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret |= tx_hard_error;
+ }
+ if (unlikely(intr_status & XGMAC_FBE)) {
+ x->fatal_bus_error_irq++;
+ ret |= tx_hard_error;
+ }
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & XGMAC_NIS)) {
+ x->normal_irq_n++;
+
+ if (likely(intr_status & XGMAC_RI)) {
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ if (likely(value & XGMAC_RIE)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & XGMAC_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ }
+
+ /* Clear interrupts */
+ writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
+
+ return ret;
+}
+
+static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
+{
+ u32 hw_cap;
+
+ /* MAC HW feature 0 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
+ dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
+ dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
+ dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
+ dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
+ dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
+ dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
+
+ /* MAC HW feature 1 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
+ dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
+ dma_cap->tx_fifo_size =
+ 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
+ dma_cap->rx_fifo_size =
+ 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
+
+ /* MAC HW feature 2 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
+ dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
+ dma_cap->number_tx_channel =
+ ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
+ dma_cap->number_rx_channel =
+ ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
+ dma_cap->number_tx_queues =
+ ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
+ dma_cap->number_rx_queues =
+ ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
+}
+
+static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
+{
+ u32 i;
+
+ for (i = 0; i < nchan; i++)
+ writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i));
+}
+
+static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
+}
+
+static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
+}
+
+static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+{
+ writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
+}
+
+static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+{
+ writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
+}
+
+static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ if (en)
+ value |= XGMAC_TSE;
+ else
+ value &= ~XGMAC_TSE;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+}
+
+static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value |= bfsize << 1;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+}
+
+const struct stmmac_dma_ops dwxgmac210_dma_ops = {
+ .reset = dwxgmac2_dma_reset,
+ .init = dwxgmac2_dma_init,
+ .init_chan = dwxgmac2_dma_init_chan,
+ .init_rx_chan = dwxgmac2_dma_init_rx_chan,
+ .init_tx_chan = dwxgmac2_dma_init_tx_chan,
+ .axi = dwxgmac2_dma_axi,
+ .dump_regs = NULL,
+ .dma_rx_mode = dwxgmac2_dma_rx_mode,
+ .dma_tx_mode = dwxgmac2_dma_tx_mode,
+ .enable_dma_irq = dwxgmac2_enable_dma_irq,
+ .disable_dma_irq = dwxgmac2_disable_dma_irq,
+ .start_tx = dwxgmac2_dma_start_tx,
+ .stop_tx = dwxgmac2_dma_stop_tx,
+ .start_rx = dwxgmac2_dma_start_rx,
+ .stop_rx = dwxgmac2_dma_stop_rx,
+ .dma_interrupt = dwxgmac2_dma_interrupt,
+ .get_hw_feature = dwxgmac2_get_hw_feature,
+ .rx_watchdog = dwxgmac2_rx_watchdog,
+ .set_rx_ring_len = dwxgmac2_set_rx_ring_len,
+ .set_tx_ring_len = dwxgmac2_set_tx_ring_len,
+ .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
+ .enable_tso = dwxgmac2_enable_tso,
+ .set_bfsize = dwxgmac2_set_bfsize,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 1f50e83cafb2..357309a6d6a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -72,6 +72,7 @@ static int stmmac_dwmac4_quirks(struct stmmac_priv *priv)
static const struct stmmac_hwif_entry {
bool gmac;
bool gmac4;
+ bool xgmac;
u32 min_id;
const struct stmmac_regs_off regs;
const void *desc;
@@ -87,6 +88,7 @@ static const struct stmmac_hwif_entry {
{
.gmac = false,
.gmac4 = false,
+ .xgmac = false,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -103,6 +105,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = true,
.gmac4 = false,
+ .xgmac = false,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -119,6 +122,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -135,6 +139,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = DWMAC_CORE_4_00,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -151,6 +156,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = DWMAC_CORE_4_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -167,6 +173,7 @@ static const struct stmmac_hwif_entry {
}, {
.gmac = false,
.gmac4 = true,
+ .xgmac = false,
.min_id = DWMAC_CORE_5_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -180,11 +187,29 @@ static const struct stmmac_hwif_entry {
.tc = &dwmac510_tc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
- }
+ }, {
+ .gmac = false,
+ .gmac4 = false,
+ .xgmac = true,
+ .min_id = DWXGMAC_CORE_2_10,
+ .regs = {
+ .ptp_off = PTP_XGMAC_OFFSET,
+ .mmc_off = 0,
+ },
+ .desc = &dwxgmac210_desc_ops,
+ .dma = &dwxgmac210_dma_ops,
+ .mac = &dwxgmac210_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .setup = dwxgmac2_setup,
+ .quirks = NULL,
+ },
};
int stmmac_hwif_init(struct stmmac_priv *priv)
{
+ bool needs_xgmac = priv->plat->has_xgmac;
bool needs_gmac4 = priv->plat->has_gmac4;
bool needs_gmac = priv->plat->has_gmac;
const struct stmmac_hwif_entry *entry;
@@ -195,7 +220,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
if (needs_gmac) {
id = stmmac_get_id(priv, GMAC_VERSION);
- } else if (needs_gmac4) {
+ } else if (needs_gmac4 || needs_xgmac) {
id = stmmac_get_id(priv, GMAC4_VERSION);
} else {
id = 0;
@@ -229,6 +254,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
continue;
if (needs_gmac4 ^ entry->gmac4)
continue;
+ if (needs_xgmac ^ entry->xgmac)
+ continue;
/* Use synopsys_id var because some setups can override this */
if (priv->synopsys_id < entry->min_id)
continue;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index fe8b536b13f8..92b8944f26e3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+ void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
};
@@ -236,6 +237,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
#define stmmac_enable_tso(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_dma_qmode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, qmode, __args)
#define stmmac_set_dma_bfsize(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
@@ -444,17 +447,22 @@ struct stmmac_mode_ops {
struct stmmac_priv;
struct tc_cls_u32_offload;
+struct tc_cbs_qopt_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
int (*setup_cls_u32)(struct stmmac_priv *priv,
struct tc_cls_u32_offload *cls);
+ int (*setup_cbs)(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
stmmac_do_callback(__priv, tc, init, __args)
#define stmmac_tc_setup_cls_u32(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
+#define stmmac_tc_setup_cbs(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_cbs, __args)
struct stmmac_regs_off {
u32 ptp_off;
@@ -471,6 +479,9 @@ extern const struct stmmac_ops dwmac410_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ef6a8d39db2f..ff1ffb46198a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -51,6 +51,7 @@
#include <linux/reset.h>
#include <linux/of_mdio.h>
#include "dwmac1000.h"
+#include "dwxgmac2.h"
#include "hwif.h"
#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
@@ -262,6 +263,21 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
else
priv->clk_csr = 0;
}
+
+ if (priv->plat->has_xgmac) {
+ if (clk_rate > 400000000)
+ priv->clk_csr = 0x5;
+ else if (clk_rate > 350000000)
+ priv->clk_csr = 0x4;
+ else if (clk_rate > 300000000)
+ priv->clk_csr = 0x3;
+ else if (clk_rate > 250000000)
+ priv->clk_csr = 0x2;
+ else if (clk_rate > 150000000)
+ priv->clk_csr = 0x1;
+ else
+ priv->clk_csr = 0x0;
+ }
}
static void print_pkt(unsigned char *buf, int len)
@@ -498,7 +514,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (!priv->hwts_rx_en)
return;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4)
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
desc = np;
/* Check if timestamp is available */
@@ -540,6 +556,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
u32 ts_event_en = 0;
u32 value = 0;
u32 sec_inc;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
netdev_alert(priv->dev, "No support for HW time stamping\n");
@@ -575,7 +594,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* PTP v1, UDP, any kind of event packet */
config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
+ if (xmac)
snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -610,7 +629,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
+ if (xmac)
snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -647,7 +666,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
+ if (xmac)
snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -718,7 +737,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* program Sub Second Increment reg */
stmmac_config_sub_second_increment(priv,
priv->ptpaddr, priv->plat->clk_ptp_rate,
- priv->plat->has_gmac4, &sec_inc);
+ xmac, &sec_inc);
temp = div_u64(1000000000ULL, sec_inc);
/* Store sub second increment and flags for later use */
@@ -755,12 +774,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
*/
static int stmmac_init_ptp(struct stmmac_priv *priv)
{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
priv->adv_ts = 0;
- /* Check if adv_ts can be enabled for dwmac 4.x core */
- if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
+ /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
+ if (xmac && priv->dma_cap.atime_stamp)
priv->adv_ts = 1;
/* Dwmac 3.x core with extend_desc can support adv_ts */
else if (priv->extend_desc && priv->dma_cap.atime_stamp)
@@ -2173,6 +2194,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
+ if (priv->plat->axi)
+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
+
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
rx_q = &priv->rx_queue[chan];
@@ -2203,12 +2230,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
for (chan = 0; chan < dma_csr_ch; chan++)
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
- /* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
-
- if (priv->plat->axi)
- stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
-
return ret;
}
@@ -2526,9 +2547,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
netdev_warn(priv->dev, "%s: failed debugFS registration\n",
__func__);
#endif
- /* Start the ball rolling... */
- stmmac_start_all_dma(priv);
-
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if (priv->use_riwt) {
@@ -2549,6 +2567,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
}
+ /* Start the ball rolling... */
+ stmmac_start_all_dma(priv);
+
return 0;
}
@@ -3305,6 +3326,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int coe = priv->hw->rx_csum;
unsigned int next_entry;
unsigned int count = 0;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3406,7 +3430,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* in case of GMAC4 because it needs
* to refill the used descriptors, always.
*/
- if (unlikely(!priv->plat->has_gmac4 &&
+ if (unlikely(!xmac &&
((frame_len < priv->rx_copybreak) ||
stmmac_rx_threshold_count(rx_q)))) {
skb = netdev_alloc_skb_ip_align(priv->dev,
@@ -3642,7 +3666,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queues_count;
u32 queue;
+ bool xmac;
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake)
@@ -3661,7 +3687,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
/* To handle GMAC own interrupts */
- if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
+ if ((priv->plat->has_gmac) || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
int mtl_status;
@@ -3778,7 +3804,7 @@ static int stmmac_setup_tc_block(struct stmmac_priv *priv,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
return 0;
@@ -3795,6 +3821,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_BLOCK:
return stmmac_setup_tc_block(priv, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return stmmac_tc_setup_cbs(priv, priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4267,6 +4295,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
ndev->max_mtu = JUMBO_LEN;
+ else if (priv->plat->has_xgmac)
+ ndev->max_mtu = XGMAC_JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
@@ -4288,7 +4318,8 @@ int stmmac_dvr_probe(struct device *device,
* has to be disable and this can be done by passing the
* riwt_off field from the platform.
*/
- if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 5df1a608e566..b72ef171477e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -29,6 +29,7 @@
#include <linux/phy.h>
#include <linux/slab.h>
+#include "dwxgmac2.h"
#include "stmmac.h"
#define MII_BUSY 0x00000001
@@ -39,6 +40,115 @@
#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
+/* XGMAC defines */
+#define MII_XGMAC_SADDR BIT(18)
+#define MII_XGMAC_CMD_SHIFT 16
+#define MII_XGMAC_WRITE (1 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_READ (3 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_BUSY BIT(22)
+#define MII_XGMAC_MAX_C22ADDR 3
+#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0)
+
+static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
+{
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 tmp;
+
+ /* HW does not support C22 addr >= 4 */
+ if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ return -ENODEV;
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Set port as Clause 22 */
+ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
+ tmp &= ~MII_XGMAC_C22P_MASK;
+ tmp |= BIT(phyaddr);
+ writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
+
+ *hw_addr = (phyaddr << 16) | (phyreg & 0x1f);
+ return 0;
+}
+
+static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 tmp, addr, value = MII_XGMAC_BUSY;
+ int ret;
+
+ if (phyreg & MII_ADDR_C45) {
+ return -EOPNOTSUPP;
+ } else {
+ ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
+ }
+
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= MII_XGMAC_SADDR | MII_XGMAC_READ;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Set the MII address register to read */
+ writel(addr, priv->ioaddr + mii_address);
+ writel(value, priv->ioaddr + mii_data);
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ return readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
+}
+
+static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 addr, tmp, value = MII_XGMAC_BUSY;
+ int ret;
+
+ if (phyreg & MII_ADDR_C45) {
+ return -EOPNOTSUPP;
+ } else {
+ ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ return ret;
+ }
+
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= phydata | MII_XGMAC_SADDR;
+ value |= MII_XGMAC_WRITE;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000))
+ return -EBUSY;
+
+ /* Set the MII address register to write */
+ writel(addr, priv->ioaddr + mii_address);
+ writel(value, priv->ioaddr + mii_data);
+
+ /* Wait until any existing MII operation is complete */
+ return readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000);
+}
+
/**
* stmmac_mdio_read
* @bus: points to the mii_bus structure
@@ -205,7 +315,7 @@ int stmmac_mdio_register(struct net_device *ndev)
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
struct device *dev = ndev->dev.parent;
- int addr, found;
+ int addr, found, max_addr;
if (!mdio_bus_data)
return 0;
@@ -223,8 +333,23 @@ int stmmac_mdio_register(struct net_device *ndev)
#endif
new_bus->name = "stmmac";
- new_bus->read = &stmmac_mdio_read;
- new_bus->write = &stmmac_mdio_write;
+
+ if (priv->plat->has_xgmac) {
+ new_bus->read = &stmmac_xgmac2_mdio_read;
+ new_bus->write = &stmmac_xgmac2_mdio_write;
+
+ /* Right now only C22 phys are supported */
+ max_addr = MII_XGMAC_MAX_C22ADDR + 1;
+
+ /* Check if DT specified an unsupported phy addr */
+ if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR)
+ dev_err(dev, "Unsupported phy_addr (max=%d)\n",
+ MII_XGMAC_MAX_C22ADDR);
+ } else {
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ max_addr = PHY_MAX_ADDR;
+ }
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -243,7 +368,7 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_done;
found = 0;
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ for (addr = 0; addr < max_addr; addr++) {
struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
if (!phydev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 6a393b16a1fc..c54a50dbd5ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -303,7 +303,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int stmmac_pci_suspend(struct device *dev)
+static int __maybe_unused stmmac_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int ret;
@@ -321,7 +321,7 @@ static int stmmac_pci_suspend(struct device *dev)
return 0;
}
-static int stmmac_pci_resume(struct device *dev)
+static int __maybe_unused stmmac_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 72da77b94ecd..3609c7b696c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -486,6 +486,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->force_sf_dma_mode = 1;
}
+ if (of_device_is_compatible(np, "snps,dwxgmac")) {
+ plat->has_xgmac = 1;
+ plat->pmt = 1;
+ plat->tso_en = of_property_read_bool(np, "snps,tso");
+ }
+
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 0cb0e39a2be9..2293e21f789f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -71,6 +71,9 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
u32 sec, nsec;
u32 quotient, reminder;
int neg_adj = 0;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (delta < 0) {
neg_adj = 1;
@@ -82,8 +85,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
nsec = reminder;
spin_lock_irqsave(&priv->ptp_lock, flags);
- stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj,
- priv->plat->has_gmac4);
+ stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index f4b31d69f60e..ecccf895fd7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -21,6 +21,7 @@
#ifndef __STMMAC_PTP_H__
#define __STMMAC_PTP_H__
+#define PTP_XGMAC_OFFSET 0xd00
#define PTP_GMAC4_OFFSET 0xb00
#define PTP_GMAC3_X_OFFSET 0x700
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 2258cd8cc844..1a96dd9c1091 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -289,7 +289,67 @@ static int tc_init(struct stmmac_priv *priv)
return 0;
}
+static int tc_setup_cbs(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue = qopt->queue;
+ u32 ptr, speed_div;
+ u32 mode_to_use;
+ u64 value;
+ int ret;
+
+ /* Queue 0 is not AVB capable */
+ if (queue <= 0 || queue >= tx_queues_count)
+ return -EINVAL;
+ if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
+ return -EOPNOTSUPP;
+
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+ } else if (!qopt->enable) {
+ return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+ }
+
+ /* Port Transmit Rate and Speed Divider */
+ ptr = (priv->speed == SPEED_100) ? 4 : 8;
+ speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
+
+ /* Final adjustments for HW */
+ value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
+
+ value = qopt->hicredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
+
+ value = qopt->locredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
+
+ ret = stmmac_config_cbs(priv, priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ if (ret)
+ return ret;
+
+ dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
+ queue, qopt->sendslope, qopt->idleslope,
+ qopt->hicredit, qopt->locredit);
+ return 0;
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
};
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index a5dd627fe2f9..d42f47f6c632 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
}
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 88c12474a0c3..9319d84bf49f 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1225,25 +1225,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
bmsr = err;
if (bmsr & BMSR_LSTATUS) {
- u16 adv, lpa;
-
- err = mii_read(np, np->phy_addr, MII_ADVERTISE);
- if (err < 0)
- goto out;
- adv = err;
-
- err = mii_read(np, np->phy_addr, MII_LPA);
- if (err < 0)
- goto out;
- lpa = err;
-
- err = mii_read(np, np->phy_addr, MII_ESTATUS);
- if (err < 0)
- goto out;
link_up = 1;
current_speed = SPEED_1000;
current_duplex = DUPLEX_FULL;
-
}
lp->active_speed = current_speed;
lp->active_duplex = current_duplex;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a94f50442613..12539b357a78 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
}
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index 458a7844260a..99d86e39ff54 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/dcbnl.h>
#include "dwc-xlgmac.h"
@@ -193,7 +194,6 @@ static u32 xlgmac_vid_crc32_le(__le16 vid_le)
{
unsigned char *data = (unsigned char *)&vid_le;
unsigned char data_byte = 0;
- u32 poly = 0xedb88320;
u32 crc = ~0;
u32 temp = 0;
int i, bits;
@@ -208,7 +208,7 @@ static u32 xlgmac_vid_crc32_le(__le16 vid_le)
data_byte >>= 1;
if (temp)
- crc ^= poly;
+ crc ^= CRC32_POLY_LE;
}
return crc;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 163d8d16bc24..dc966ddb6d81 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1151,7 +1151,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
struct rx_map *dm;
struct rxf_fifo *f;
struct rxdb *db;
- struct sk_buff *skb;
int delta;
ENTER;
@@ -1161,7 +1160,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
DBG("db=%p f=%p\n", db, f);
dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
DBG("dm=%p\n", dm);
- skb = dm->skb;
rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
rxfd->va_lo = rxdd->va_lo;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 3e34cb8ac1d3..832bce07c385 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -39,12 +39,15 @@
#include <linux/sys_soc.h>
#include <linux/pinctrl/consumer.h>
+#include <net/pkt_cls.h>
#include "cpsw.h"
#include "cpsw_ale.h"
#include "cpts.h"
#include "davinci_cpdma.h"
+#include <net/pkt_sched.h>
+
#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
NETIF_MSG_DRV | NETIF_MSG_LINK | \
NETIF_MSG_IFUP | NETIF_MSG_INTR | \
@@ -153,6 +156,12 @@ do { \
#define IRQ_NUM 2
#define CPSW_MAX_QUEUES 8
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
+#define CPSW_FIFO_SHAPE_EN_SHIFT 16
+#define CPSW_FIFO_RATE_EN_SHIFT 20
+#define CPSW_TC_NUM 4
+#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
+#define CPSW_PCT_MASK 0x7f
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
@@ -253,23 +262,24 @@ struct cpsw_ss_regs {
#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
/* Bit definitions for the CPSW2_CONTROL register */
-#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */
-#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */
-#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */
-#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */
-#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */
-#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */
-#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */
-#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
-#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
-#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
-#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
-#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
-#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
-#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
-#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
-#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
+#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
+#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
+#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
+#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
+#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
+#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
+#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
+#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
#define CTRL_V2_TS_BITS \
(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
@@ -281,7 +291,7 @@ struct cpsw_ss_regs {
#define CTRL_V3_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
TS_LTYPE1_EN)
@@ -453,6 +463,9 @@ struct cpsw_priv {
u8 mac_addr[ETH_ALEN];
bool rx_pause;
bool tx_pause;
+ bool mqprio_hw;
+ int fifo_bw[CPSW_TC_NUM];
+ int shp_cfg_speed;
u32 emac_port;
struct cpsw_common *cpsw;
};
@@ -552,40 +565,28 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
(func)(slave++, ##arg); \
} while (0)
-#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \
- do { \
- if (!cpsw->data.dual_emac) \
- break; \
- if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
- ndev = cpsw->slaves[0].ndev; \
- skb->dev = ndev; \
- } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
- ndev = cpsw->slaves[1].ndev; \
- skb->dev = ndev; \
- } \
- } while (0)
-#define cpsw_add_mcast(cpsw, priv, addr) \
- do { \
- if (cpsw->data.dual_emac) { \
- struct cpsw_slave *slave = cpsw->slaves + \
- priv->emac_port; \
- int slave_port = cpsw_get_slave_port( \
- slave->slave_num); \
- cpsw_ale_add_mcast(cpsw->ale, addr, \
- 1 << slave_port | ALE_PORT_HOST, \
- ALE_VLAN, slave->port_vlan, 0); \
- } else { \
- cpsw_ale_add_mcast(cpsw->ale, addr, \
- ALE_ALL_PORTS, \
- 0, 0, 0); \
- } \
- } while (0)
-
static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
+static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (cpsw->data.dual_emac) {
+ struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
+ int slave_port = cpsw_get_slave_port(slave->slave_num);
+
+ cpsw_ale_add_mcast(cpsw->ale, addr,
+ 1 << slave_port | ALE_PORT_HOST,
+ ALE_VLAN, slave->port_vlan, 0);
+ return;
+ }
+
+ cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
+}
+
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -693,7 +694,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr);
+ cpsw_add_mcast(priv, ha->addr);
}
}
}
@@ -785,10 +786,16 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct sk_buff *skb = token;
struct sk_buff *new_skb;
struct net_device *ndev = skb->dev;
- int ret = 0;
+ int ret = 0, port;
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
+ if (cpsw->data.dual_emac) {
+ port = CPDMA_RX_SOURCE_PORT(status);
+ if (port) {
+ ndev = cpsw->slaves[--port].ndev;
+ skb->dev = ndev;
+ }
+ }
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
/* In dual emac mode check for all interfaces */
@@ -967,8 +974,8 @@ static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
/* process every unprocessed channel */
ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
- for (ch = 0, num_tx = 0; ch_map; ch_map >>= 1, ch++) {
- if (!(ch_map & 0x01))
+ for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
+ if (!(ch_map & 0x80))
continue;
txv = &cpsw->txv[ch];
@@ -1077,6 +1084,38 @@ static void cpsw_set_slave_mac(struct cpsw_slave *slave,
slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
}
+static bool cpsw_shp_is_off(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = 7 << shift;
+ val = val & mask;
+
+ return !val;
+}
+
+static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = (1 << --fifo) << shift;
+ val = on ? val | mask : val & ~mask;
+
+ writel_relaxed(val, &cpsw->regs->ptype);
+}
+
static void _cpsw_adjust_link(struct cpsw_slave *slave,
struct cpsw_priv *priv, bool *link)
{
@@ -1116,6 +1155,12 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
mac_control |= BIT(4);
*link = true;
+
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed &&
+ !cpsw_shp_is_off(priv))
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
} else {
mac_control = 0;
/* disable forwarding */
@@ -1577,6 +1622,231 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
soft_reset_slave(slave);
}
+static int cpsw_tc_to_fifo(int tc, int num_tc)
+{
+ if (tc == num_tc - 1)
+ return 0;
+
+ return CPSW_FIFO_SHAPERS_NUM - tc;
+}
+
+static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 val = 0, send_pct, shift;
+ struct cpsw_slave *slave;
+ int pct = 0, i;
+
+ if (bw > priv->shp_cfg_speed * 1000)
+ goto err;
+
+ /* shaping has to stay enabled for highest fifos linearly
+ * and fifo bw no more then interface can allow
+ */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ send_pct = slave_read(slave, SEND_PERCENT);
+ for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
+ if (!bw) {
+ if (i >= fifo || !priv->fifo_bw[i])
+ continue;
+
+ dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
+ continue;
+ }
+
+ if (!priv->fifo_bw[i] && i > fifo) {
+ dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
+ return -EINVAL;
+ }
+
+ shift = (i - 1) * 8;
+ if (i == fifo) {
+ send_pct &= ~(CPSW_PCT_MASK << shift);
+ val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
+ if (!val)
+ val = 1;
+
+ send_pct |= val << shift;
+ pct += val;
+ continue;
+ }
+
+ if (priv->fifo_bw[i])
+ pct += (send_pct >> shift) & CPSW_PCT_MASK;
+ }
+
+ if (pct >= 100)
+ goto err;
+
+ slave_write(slave, send_pct, SEND_PERCENT);
+ priv->fifo_bw[fifo] = bw;
+
+ dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
+ DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
+
+ return 0;
+err:
+ dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
+ return -EINVAL;
+}
+
+static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 tx_in_ctl_rg, val;
+ int ret;
+
+ ret = cpsw_set_fifo_bw(priv, fifo, bw);
+ if (ret)
+ return ret;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
+
+ if (!bw)
+ cpsw_fifo_shp_on(priv, fifo, bw);
+
+ val = slave_read(slave, tx_in_ctl_rg);
+ if (cpsw_shp_is_off(priv)) {
+ /* disable FIFOs rate limited queues */
+ val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
+
+ /* set type of FIFO queues to normal priority mode */
+ val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
+
+ /* set type of FIFO queues to be rate limited */
+ if (bw)
+ val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
+ else
+ priv->shp_cfg_speed = 0;
+ }
+
+ /* toggle a FIFO rate limited queue */
+ if (bw)
+ val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ else
+ val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ slave_write(slave, val, tx_in_ctl_rg);
+
+ /* FIFO transmit shape enable */
+ cpsw_fifo_shp_on(priv, fifo, bw);
+ return 0;
+}
+
+/* Defaults:
+ * class A - prio 3
+ * class B - prio 2
+ * shaping for class A should be set first
+ */
+static int cpsw_set_cbs(struct net_device *ndev,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int prev_speed = 0;
+ int tc, ret, fifo;
+ u32 bw = 0;
+
+ tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
+
+ /* enable channels in backward order, as highest FIFOs must be rate
+ * limited first and for compliance with CPDMA rate limited channels
+ * that also used in bacward order. FIFO0 cannot be rate limited.
+ */
+ fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
+ if (!fifo) {
+ dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
+ return -EINVAL;
+ }
+
+ /* do nothing, it's disabled anyway */
+ if (!qopt->enable && !priv->fifo_bw[fifo])
+ return 0;
+
+ /* shapers can be set if link speed is known */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ if (slave->phy && slave->phy->link) {
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed)
+ prev_speed = priv->shp_cfg_speed;
+
+ priv->shp_cfg_speed = slave->phy->speed;
+ }
+
+ if (!priv->shp_cfg_speed) {
+ dev_err(priv->dev, "Link speed is not known");
+ return -1;
+ }
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ bw = qopt->enable ? qopt->idleslope : 0;
+ ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
+ if (ret) {
+ priv->shp_cfg_speed = prev_speed;
+ prev_speed = 0;
+ }
+
+ if (bw && prev_speed)
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
+
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ int fifo, bw;
+
+ for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
+ bw = priv->fifo_bw[fifo];
+ if (!bw)
+ continue;
+
+ cpsw_set_fifo_rlimit(priv, fifo, bw);
+ }
+}
+
+static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 tx_prio_map = 0;
+ int i, tc, fifo;
+ u32 tx_prio_rg;
+
+ if (!priv->mqprio_hw)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ tc = netdev_get_prio_tc_map(priv->ndev, i);
+ fifo = CPSW_FIFO_SHAPERS_NUM - tc;
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave_write(slave, tx_prio_map, tx_prio_rg);
+}
+
+/* restore resources after port reset */
+static void cpsw_restore(struct cpsw_priv *priv)
+{
+ /* restore MQPRIO offload */
+ for_each_slave(priv, cpsw_mqprio_resume, priv);
+
+ /* restore CBS offload */
+ for_each_slave(priv, cpsw_cbs_resume, priv);
+}
+
static int cpsw_ndo_open(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1656,6 +1926,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
}
+ cpsw_restore(priv);
+
/* Enable Interrupt pacing if configured */
if (cpsw->coal_intvl != 0) {
struct ethtool_coalesce coal;
@@ -2187,6 +2459,78 @@ static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
return ret;
}
+static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int fifo, num_tc, count, offset;
+ struct cpsw_slave *slave;
+ u32 tx_prio_map = 0;
+ int i, tc, ret;
+
+ num_tc = mqprio->qopt.num_tc;
+ if (num_tc > CPSW_TC_NUM)
+ return -EINVAL;
+
+ if (mqprio->mode != TC_MQPRIO_MODE_DCB)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(cpsw->dev);
+ return ret;
+ }
+
+ if (num_tc) {
+ for (i = 0; i < 8; i++) {
+ tc = mqprio->qopt.prio_tc_map[i];
+ fifo = cpsw_tc_to_fifo(tc, num_tc);
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+ for (i = 0; i < num_tc; i++) {
+ count = mqprio->qopt.count[i];
+ offset = mqprio->qopt.offset[i];
+ netdev_set_tc_queue(ndev, i, count, offset);
+ }
+ }
+
+ if (!mqprio->qopt.hw) {
+ /* restore default configuration */
+ netdev_reset_tc(ndev);
+ tx_prio_map = TX_PRIORITY_MAPPING;
+ }
+
+ priv->mqprio_hw = mqprio->qopt.hw;
+
+ offset = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ slave_write(slave, tx_prio_map, offset);
+
+ pm_runtime_put_sync(cpsw->dev);
+
+ return 0;
+}
+
+static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return cpsw_set_cbs(ndev, type_data);
+
+ case TC_SETUP_QDISC_MQPRIO:
+ return cpsw_set_mqprio(ndev, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -2202,6 +2546,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = cpsw_ndo_setup_tc,
};
static int cpsw_get_regs_len(struct net_device *ndev)
@@ -2428,7 +2773,7 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
void (*handler)(void *, int, int);
struct netdev_queue *queue;
struct cpsw_vector *vec;
- int ret, *ch;
+ int ret, *ch, vch;
if (rx) {
ch = &cpsw->rx_ch_num;
@@ -2441,7 +2786,8 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
}
while (*ch < ch_num) {
- vec[*ch].ch = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
+ vch = rx ? *ch : 7 - *ch;
+ vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
queue = netdev_get_tx_queue(priv->ndev, *ch);
queue->tx_maxrate = 0;
@@ -2924,7 +3270,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
priv_sl2->mac_addr);
} else {
- random_ether_addr(priv_sl2->mac_addr);
+ eth_random_addr(priv_sl2->mac_addr);
dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
priv_sl2->mac_addr);
}
@@ -2932,7 +3278,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -2978,7 +3324,7 @@ static int cpsw_probe(struct platform_device *pdev)
u32 slave_offset, sliver_offset, slave_size;
const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
- int ret = 0, i;
+ int ret = 0, i, ch;
int irq;
cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
@@ -3153,7 +3499,8 @@ static int cpsw_probe(struct platform_device *pdev)
if (soc)
cpsw->quirk_irq = 1;
- cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+ ch = cpsw->quirk_irq ? 0 : 7;
+ cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
if (IS_ERR(cpsw->txv[0].ch)) {
dev_err(priv->dev, "error initializing tx dma channel\n");
ret = PTR_ERR(cpsw->txv[0].ch);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 6f63c8729afc..b96b93c686bf 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -114,7 +114,10 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
dev_consume_skb_any(skb);
dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
mtype, seqid);
- } else if (time_after(jiffies, skb_cb->tmo)) {
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
dev_dbg(cpts->dev,
"expiring tx timestamp mtype %u seqid %04x\n",
@@ -158,6 +161,7 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
*/
break;
}
+ /* fall through */
case CPTS_EV_PUSH:
case CPTS_EV_RX:
list_del_init(&event->list);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 4f1267477aa4..4236dcdd5634 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -406,37 +406,36 @@ static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
struct cpdma_chan *chan;
u32 old_rate = ch->rate;
u32 new_rmask = 0;
- int rlim = 1;
+ int rlim = 0;
int i;
- *prio_mode = 0;
for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
chan = ctlr->channels[i];
- if (!chan) {
- rlim = 0;
+ if (!chan)
continue;
- }
if (chan == ch)
chan->rate = rate;
if (chan->rate) {
- if (rlim) {
- new_rmask |= chan->mask;
- } else {
- ch->rate = old_rate;
- dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n",
- chan->chan_num);
- return -EINVAL;
- }
- } else {
- *prio_mode = 1;
- rlim = 0;
+ rlim = 1;
+ new_rmask |= chan->mask;
+ continue;
}
+
+ if (rlim)
+ goto err;
}
*rmask = new_rmask;
+ *prio_mode = rlim;
return 0;
+
+err:
+ ch->rate = old_rate;
+ dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
+ chan->chan_num);
+ return -EINVAL;
}
static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index e40aa3e31af2..a1d335a3c5e4 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
return err;
}
-static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- return 0;
-}
-
static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = {
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
.ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
.ndo_tx_timeout = netcp_ndo_tx_timeout,
- .ndo_select_queue = netcp_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_setup_tc = netcp_setup_tc,
};
@@ -2052,7 +2045,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
if (is_valid_ether_addr(efuse_mac_addr))
ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
else
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
devm_iounmap(dev, efuse);
devm_release_mem_region(dev, res.start, size);
@@ -2061,7 +2054,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
if (mac_addr)
ether_addr_copy(ndev->dev_addr, mac_addr);
else
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
}
ret = of_property_read_string(node_interface, "rx-channel",
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index c769cd9d11e7..93d142867c2a 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -966,6 +966,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY: /* get address of MII PHY in use. */
data->phy_id = phy;
+ /* fall through */
case SIOCGMIIREG: /* read MII PHY register. */
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 2a0c06e0f730..42f1f518dad6 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -70,7 +70,8 @@
#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
* only. This is not documented
- * in the HW spec */
+ * in the HW spec
+ */
/* Define for programming the MAC address into the EmacLite */
#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
@@ -94,11 +95,11 @@
-#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */
+#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -238,8 +239,8 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
/* Set up to output the remaining data */
align_buffer = 0;
- to_u8_ptr = (u8 *) &align_buffer;
- from_u8_ptr = (u8 *) from_u16_ptr;
+ to_u8_ptr = (u8 *)&align_buffer;
+ from_u8_ptr = (u8 *)from_u16_ptr;
/* Output the remaining data */
for (; length > 0; length--)
@@ -272,7 +273,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
u32 align_buffer;
from_u32_ptr = src_ptr;
- to_u16_ptr = (u16 *) dest_ptr;
+ to_u16_ptr = (u16 *)dest_ptr;
for (; length > 3; length -= 4) {
/* Copy each word into the temporary buffer */
@@ -288,9 +289,9 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
u8 *to_u8_ptr, *from_u8_ptr;
/* Set up to read the remaining data */
- to_u8_ptr = (u8 *) to_u16_ptr;
+ to_u8_ptr = (u8 *)to_u16_ptr;
align_buffer = *from_u32_ptr++;
- from_u8_ptr = (u8 *) &align_buffer;
+ from_u8_ptr = (u8 *)&align_buffer;
/* Read the remaining data */
for (; length > 0; length--)
@@ -336,7 +337,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
} else if (drvdata->tx_ping_pong != 0) {
/* If the expected buffer is full, try the other buffer,
- * if it is configured in HW */
+ * if it is configured in HW
+ */
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
@@ -349,7 +351,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
return -1; /* Buffer was full, return failure */
/* Write the frame to the buffer */
- xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
+ xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
addr + XEL_TPLR_OFFSET);
@@ -357,7 +359,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
- * has been transmitted */
+ * has been transmitted
+ */
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
@@ -369,6 +372,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* xemaclite_recv_data - Receive a frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Address where the data is to be received
+ * @maxlen: Maximum supported ethernet packet length
*
* This function is intended to be called from the interrupt context or
* with a wrapper which waits for the receive frame to be available.
@@ -394,7 +398,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
/* The instance is out of sync, try other buffer if other
* buffer is configured, return 0 otherwise. If the instance is
* out of sync, do not update the 'next_rx_buf_to_use' since it
- * will correct on subsequent calls */
+ * will correct on subsequent calls
+ */
if (drvdata->rx_ping_pong != 0)
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
@@ -408,13 +413,15 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
return 0; /* No data was available */
}
- /* Get the protocol type of the ethernet frame that arrived */
+ /* Get the protocol type of the ethernet frame that arrived
+ */
proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
- * or an IP packet or an ARP packet */
+ * or an IP packet or an ARP packet
+ */
if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
@@ -430,7 +437,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
else
/* Field contains type other than IP or ARP, use max
- * frame size and let user parse it */
+ * frame size and let user parse it
+ */
length = ETH_FRAME_LEN + ETH_FCS_LEN;
} else
/* Use the length in the frame, plus the header and trailer */
@@ -440,7 +448,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = maxlen;
/* Read from the EmacLite device */
- xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
+ xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
@@ -471,7 +479,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
/* Determine the expected Tx buffer address */
addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
- xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
+ xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN);
xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
@@ -488,7 +496,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
/**
* xemaclite_set_mac_address - Set the MAC address for this device
* @dev: Pointer to the network device instance
- * @addr: Void pointer to the sockaddr structure
+ * @address: Void pointer to the sockaddr structure
*
* This function copies the HW address from the sockaddr strucutre to the
* net_device structure and updates the address in HW.
@@ -564,19 +572,19 @@ static void xemaclite_tx_handler(struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
dev->stats.tx_packets++;
- if (lp->deferred_skb) {
- if (xemaclite_send_data(lp,
- (u8 *) lp->deferred_skb->data,
- lp->deferred_skb->len) != 0)
- return;
- else {
- dev->stats.tx_bytes += lp->deferred_skb->len;
- dev_kfree_skb_irq(lp->deferred_skb);
- lp->deferred_skb = NULL;
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
- }
- }
+
+ if (!lp->deferred_skb)
+ return;
+
+ if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data,
+ lp->deferred_skb->len))
+ return;
+
+ dev->stats.tx_bytes += lp->deferred_skb->len;
+ dev_kfree_skb_irq(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
}
/**
@@ -602,18 +610,18 @@ static void xemaclite_rx_handler(struct net_device *dev)
return;
}
- /*
- * A new skb should have the data halfword aligned, but this code is
+ /* A new skb should have the data halfword aligned, but this code is
* here just in case that isn't true. Calculate how many
* bytes we should reserve to get the data to start on a word
- * boundary */
+ * boundary
+ */
align = BUFFER_ALIGN(skb->data);
if (align)
skb_reserve(skb, align);
skb_reserve(skb, 2);
- len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
+ len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@@ -639,6 +647,8 @@ static void xemaclite_rx_handler(struct net_device *dev)
* @dev_id: Void pointer to the network device instance used as callback
* reference
*
+ * Return: IRQ_HANDLED
+ *
* This function handles the Tx and Rx interrupts of the EmacLite device.
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
@@ -706,8 +716,8 @@ static int xemaclite_mdio_wait(struct net_local *lp)
unsigned long end = jiffies + 2;
/* wait for the MDIO interface to not be busy or timeout
- after some time.
- */
+ * after some time.
+ */
while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
@@ -757,7 +767,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
- "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ "%s(phy_id=%i, reg=%x) == %x\n", __func__,
phy_id, reg, rc);
return rc;
@@ -772,6 +782,8 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
*
* This function waits till the device is ready to accept a new MDIO
* request and then writes the val to the MDIO Write Data register.
+ *
+ * Return: 0 upon success or a negative error upon failure
*/
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 val)
@@ -780,7 +792,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u32 ctrl_reg;
dev_dbg(&lp->ndev->dev,
- "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ "%s(phy_id=%i, reg=%x, val=%x)\n", __func__,
phy_id, reg, val);
if (xemaclite_mdio_wait(lp))
@@ -805,7 +817,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
/**
* xemaclite_mdio_setup - Register mii_bus for the Emaclite device
* @lp: Pointer to the Emaclite device private data
- * @ofdev: Pointer to OF device structure
+ * @dev: Pointer to OF device structure
*
* This function enables MDIO bus in the Emaclite device and registers a
* mii_bus.
@@ -905,6 +917,9 @@ static void xemaclite_adjust_link(struct net_device *ndev)
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
* It also connects to the phy device, if MDIO is included in Emaclite device.
+ *
+ * Return: 0 on success. -ENODEV, if PHY cannot be connected.
+ * Non-zero error value on failure.
*/
static int xemaclite_open(struct net_device *dev)
{
@@ -975,6 +990,8 @@ static int xemaclite_open(struct net_device *dev)
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
* It also disconnects the phy device associated with the Emaclite device.
+ *
+ * Return: 0, always.
*/
static int xemaclite_close(struct net_device *dev)
{
@@ -1017,10 +1034,11 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
new_skb = orig_skb;
spin_lock_irqsave(&lp->reset_lock, flags);
- if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
+ if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) {
/* If the Emaclite Tx buffer is busy, stop the Tx queue and
* defer the skb for transmission during the ISR, after the
- * current transmission is complete */
+ * current transmission is complete
+ */
netif_stop_queue(dev);
lp->deferred_skb = new_skb;
/* Take the time stamp now, since we can't do this in an ISR. */
@@ -1052,13 +1070,12 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
{
u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
- if (p) {
- return (bool)*p;
- } else {
- dev_warn(&ofdev->dev, "Parameter %s not found,"
- "defaulting to false\n", s);
+ if (!p) {
+ dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s);
return false;
}
+
+ return (bool)*p;
}
static const struct net_device_ops xemaclite_netdev_ops;
@@ -1066,7 +1083,6 @@ static const struct net_device_ops xemaclite_netdev_ops;
/**
* xemaclite_of_probe - Probe method for the Emaclite device.
* @ofdev: Pointer to OF device structure
- * @match: Pointer to the structure used for matching a device
*
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 750954be5a74..d3eae1239045 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1395,8 +1395,8 @@ static void fjes_watch_unshare_task(struct work_struct *work)
while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
(wait_time < 3000)) {
- for (epidx = 0; epidx < hw->max_epid; epidx++) {
- if (epidx == hw->my_epid)
+ for (epidx = 0; epidx < max_epid; epidx++) {
+ if (epidx == my_epid)
continue;
is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
@@ -1453,8 +1453,8 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
if (hw->hw_info.buffer_unshare_reserve_bit) {
- for (epidx = 0; epidx < hw->max_epid; epidx++) {
- if (epidx == hw->my_epid)
+ for (epidx = 0; epidx < max_epid; epidx++) {
+ if (epidx == my_epid)
continue;
if (test_bit(epidx,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index ada33c2d9ac2..6acb6b5718b9 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -236,7 +236,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
}
/* Update tunnel dst according to Geneve options. */
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
- gnvh->options, gnvh->opt_len * 4);
+ gnvh->options, gnvh->opt_len * 4,
+ TUNNEL_GENEVE_OPT);
} else {
/* Drop packets w/ critical options,
* since we don't support any...
@@ -418,11 +419,12 @@ static int geneve_hlen(struct genevehdr *gh)
return sizeof(*gh) + gh->opt_len * 4;
}
-static struct sk_buff **geneve_gro_receive(struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *geneve_gro_receive(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
{
- struct sk_buff *p, **pp = NULL;
+ struct sk_buff *pp = NULL;
+ struct sk_buff *p;
struct genevehdr *gh, *gh2;
unsigned int hlen, gh_len, off_gnv;
const struct packet_offload *ptype;
@@ -449,7 +451,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
goto out;
}
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -674,7 +676,8 @@ static void geneve_build_header(struct genevehdr *geneveh,
geneveh->proto_type = htons(ETH_P_TEB);
geneveh->rsvd2 = 0;
- ip_tunnel_info_opts_get(geneveh->options, info);
+ if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
+ ip_tunnel_info_opts_get(geneveh->options, info);
}
static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ec629a730005..7a145172d503 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1255,7 +1255,7 @@ out:
return skb->len;
}
-static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
+static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_LINK] = { .type = NLA_U32, },
[GTPA_VERSION] = { .type = NLA_U32, },
[GTPA_TID] = { .type = NLA_U64, },
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 32f49c4ce457..d79a69dd2146 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -878,10 +878,8 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
{
- unsigned char channel;
int actual;
- channel = cmd & SIXP_CHN_MASK;
if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */
/* RX and DCD flags can only be set in the same prio command,
@@ -933,10 +931,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
static void decode_std_command(struct sixpack *sp, unsigned char cmd)
{
- unsigned char checksum = 0, rest = 0, channel;
+ unsigned char checksum = 0, rest = 0;
short i;
- channel = cmd & SIXP_CHN_MASK;
switch (cmd & SIXP_CMD_MASK) { /* normal command */
case SIXP_SEOF:
if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4b6e308199d2..a32ded5b4f41 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -873,6 +873,17 @@ struct netvsc_ethtool_stats {
unsigned long wake_queue;
};
+struct netvsc_ethtool_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 vf_rx_packets;
+ u64 vf_rx_bytes;
+ u64 vf_tx_packets;
+ u64 vf_tx_bytes;
+};
+
struct netvsc_vf_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index dd1d6e115145..20275d1e6f9a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -329,7 +329,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct net_device_context *ndc = netdev_priv(ndev);
@@ -343,9 +343,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
if (vf_ops->ndo_select_queue)
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
else
- txq = fallback(vf_netdev, skb);
+ txq = fallback(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
@@ -1118,6 +1118,64 @@ static void netvsc_get_vf_stats(struct net_device *net,
}
}
+static void netvsc_get_pcpu_stats(struct net_device *net,
+ struct netvsc_ethtool_pcpu_stats *pcpu_tot)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
+ int i;
+
+ /* fetch percpu stats of vf */
+ for_each_possible_cpu(i) {
+ const struct netvsc_vf_pcpu_stats *stats =
+ per_cpu_ptr(ndev_ctx->vf_stats, i);
+ struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ this_tot->vf_rx_packets = stats->rx_packets;
+ this_tot->vf_tx_packets = stats->tx_packets;
+ this_tot->vf_rx_bytes = stats->rx_bytes;
+ this_tot->vf_tx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ this_tot->rx_packets = this_tot->vf_rx_packets;
+ this_tot->tx_packets = this_tot->vf_tx_packets;
+ this_tot->rx_bytes = this_tot->vf_rx_bytes;
+ this_tot->tx_bytes = this_tot->vf_tx_bytes;
+ }
+
+ /* fetch percpu stats of netvsc */
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+ const struct netvsc_stats *stats;
+ struct netvsc_ethtool_pcpu_stats *this_tot =
+ &pcpu_tot[nvchan->channel->target_cpu];
+ u64 packets, bytes;
+ unsigned int start;
+
+ stats = &nvchan->tx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ this_tot->tx_bytes += bytes;
+ this_tot->tx_packets += packets;
+
+ stats = &nvchan->rx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ this_tot->rx_bytes += bytes;
+ this_tot->rx_packets += packets;
+ }
+}
+
static void netvsc_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *t)
{
@@ -1215,6 +1273,23 @@ static const struct {
{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
+}, pcpu_stats[] = {
+ { "cpu%u_rx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
+ { "cpu%u_rx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
+ { "cpu%u_tx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
+ { "cpu%u_tx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
+ { "cpu%u_vf_rx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
+ { "cpu%u_vf_rx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
+ { "cpu%u_vf_tx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
+ { "cpu%u_vf_tx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
}, vf_stats[] = {
{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
{ "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
@@ -1226,6 +1301,9 @@ static const struct {
#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
+/* statistics per queue (rx/tx packets/bytes) */
+#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
+
/* 4 statistics per queue (rx/tx packets/bytes) */
#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
@@ -1241,7 +1319,8 @@ static int netvsc_get_sset_count(struct net_device *dev, int string_set)
case ETH_SS_STATS:
return NETVSC_GLOBAL_STATS_LEN
+ NETVSC_VF_STATS_LEN
- + NETVSC_QUEUE_STATS_LEN(nvdev);
+ + NETVSC_QUEUE_STATS_LEN(nvdev)
+ + NETVSC_PCPU_STATS_LEN;
default:
return -EINVAL;
}
@@ -1255,9 +1334,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
const void *nds = &ndc->eth_stats;
const struct netvsc_stats *qstats;
struct netvsc_vf_pcpu_stats sum;
+ struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
- int i, j;
+ int i, j, cpu;
if (!nvdev)
return;
@@ -1289,6 +1369,19 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
data[i++] = packets;
data[i++] = bytes;
}
+
+ pcpu_sum = kvmalloc_array(num_possible_cpus(),
+ sizeof(struct netvsc_ethtool_pcpu_stats),
+ GFP_KERNEL);
+ netvsc_get_pcpu_stats(dev, pcpu_sum);
+ for_each_present_cpu(cpu) {
+ struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
+
+ for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
+ data[i++] = *(u64 *)((void *)this_sum
+ + pcpu_stats[j].offset);
+ }
+ kvfree(pcpu_sum);
}
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1296,7 +1389,7 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
u8 *p = data;
- int i;
+ int i, cpu;
if (!nvdev)
return;
@@ -1324,6 +1417,13 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
+ for_each_present_cpu(cpu) {
+ for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
+ sprintf(p, pcpu_stats[i].name, cpu);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
break;
}
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 408ece27131c..2a5209f23f29 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1338,7 +1338,7 @@ out:
/* setting up multiple channels failed */
net_device->max_chn = 1;
net_device->num_chn = 1;
- return 0;
+ return net_device;
err_dev_remv:
rndis_filter_device_remove(dev, net_device);
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 8782f5655e3f..0e372f392cb1 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -115,3 +115,14 @@ config IEEE802154_MCR20A
This driver can also be built as a module. To do so, say M here.
the module will be called 'mcr20a'.
+
+config IEEE802154_HWSIM
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "Simulated radio testing tool for mac802154"
+ ---help---
+ This driver is a developer testing tool that can be used to test
+ IEEE 802.15.4 networking stack (mac802154) functionality. This is not
+ needed for normal wpan usage and is only for testing.
+
+ This driver can also be built as a module. To do so say M here.
+ The module will be called 'mac802154_hwsim'.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index 104744d5a668..0c78b6298060 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
obj-$(CONFIG_IEEE802154_ADF7242) += adf7242.o
obj-$(CONFIG_IEEE802154_CA8210) += ca8210.o
obj-$(CONFIG_IEEE802154_MCR20A) += mcr20a.o
+obj-$(CONFIG_IEEE802154_HWSIM) += mac802154_hwsim.o
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 176395e4b7bb..3b0588d7e702 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -254,6 +254,9 @@ static __init int fakelb_init_module(void)
{
ieee802154fake_dev = platform_device_register_simple(
"ieee802154fakelb", -1, NULL, 0);
+
+ pr_warn("fakelb driver is marked as deprecated, please use mac802154_hwsim!\n");
+
return platform_driver_register(&ieee802154fake_driver);
}
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
new file mode 100644
index 000000000000..bf70ab892e69
--- /dev/null
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -0,0 +1,937 @@
+/*
+ * HWSIM IEEE 802.15.4 interface
+ *
+ * (C) 2018 Mojatau, Alexander Aring <aring@mojatau.com>
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Based on fakelb, original Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
+#include <linux/netdevice.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <net/mac802154.h>
+#include <net/cfg802154.h>
+#include <net/genetlink.h>
+#include "mac802154_hwsim.h"
+
+MODULE_DESCRIPTION("Software simulator of IEEE 802.15.4 radio(s) for mac802154");
+MODULE_LICENSE("GPL");
+
+static LIST_HEAD(hwsim_phys);
+static DEFINE_MUTEX(hwsim_phys_lock);
+
+static LIST_HEAD(hwsim_ifup_phys);
+
+static struct platform_device *mac802154hwsim_dev;
+
+/* MAC802154_HWSIM netlink family */
+static struct genl_family hwsim_genl_family;
+
+static int hwsim_radio_idx;
+
+enum hwsim_multicast_groups {
+ HWSIM_MCGRP_CONFIG,
+};
+
+static const struct genl_multicast_group hwsim_mcgrps[] = {
+ [HWSIM_MCGRP_CONFIG] = { .name = "config", },
+};
+
+struct hwsim_pib {
+ u8 page;
+ u8 channel;
+
+ struct rcu_head rcu;
+};
+
+struct hwsim_edge_info {
+ u8 lqi;
+
+ struct rcu_head rcu;
+};
+
+struct hwsim_edge {
+ struct hwsim_phy *endpoint;
+ struct hwsim_edge_info __rcu *info;
+
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+struct hwsim_phy {
+ struct ieee802154_hw *hw;
+ u32 idx;
+
+ struct hwsim_pib __rcu *pib;
+
+ bool suspended;
+ struct list_head edges;
+
+ struct list_head list;
+ struct list_head list_ifup;
+};
+
+static int hwsim_add_one(struct genl_info *info, struct device *dev,
+ bool init);
+static void hwsim_del(struct hwsim_phy *phy);
+
+static int hwsim_hw_ed(struct ieee802154_hw *hw, u8 *level)
+{
+ *level = 0xbe;
+
+ return 0;
+}
+
+static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ struct hwsim_phy *phy = hw->priv;
+ struct hwsim_pib *pib, *pib_old;
+
+ pib = kzalloc(sizeof(*pib), GFP_KERNEL);
+ if (!pib)
+ return -ENOMEM;
+
+ pib->page = page;
+ pib->channel = channel;
+
+ pib_old = rtnl_dereference(phy->pib);
+ rcu_assign_pointer(phy->pib, pib);
+ kfree_rcu(pib_old, rcu);
+ return 0;
+}
+
+static int hwsim_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+ struct hwsim_phy *current_phy = hw->priv;
+ struct hwsim_pib *current_pib, *endpoint_pib;
+ struct hwsim_edge_info *einfo;
+ struct hwsim_edge *e;
+
+ WARN_ON(current_phy->suspended);
+
+ rcu_read_lock();
+ current_pib = rcu_dereference(current_phy->pib);
+ list_for_each_entry_rcu(e, &current_phy->edges, list) {
+ /* Can be changed later in rx_irqsafe, but this is only a
+ * performance tweak. Received radio should drop the frame
+ * in mac802154 stack anyway... so we don't need to be
+ * 100% of locking here to check on suspended
+ */
+ if (e->endpoint->suspended)
+ continue;
+
+ endpoint_pib = rcu_dereference(e->endpoint->pib);
+ if (current_pib->page == endpoint_pib->page &&
+ current_pib->channel == endpoint_pib->channel) {
+ struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
+
+ einfo = rcu_dereference(e->info);
+ if (newskb)
+ ieee802154_rx_irqsafe(e->endpoint->hw, newskb,
+ einfo->lqi);
+ }
+ }
+ rcu_read_unlock();
+
+ ieee802154_xmit_complete(hw, skb, false);
+ return 0;
+}
+
+static int hwsim_hw_start(struct ieee802154_hw *hw)
+{
+ struct hwsim_phy *phy = hw->priv;
+
+ phy->suspended = false;
+ list_add_rcu(&phy->list_ifup, &hwsim_ifup_phys);
+ synchronize_rcu();
+
+ return 0;
+}
+
+static void hwsim_hw_stop(struct ieee802154_hw *hw)
+{
+ struct hwsim_phy *phy = hw->priv;
+
+ phy->suspended = true;
+ list_del_rcu(&phy->list_ifup);
+ synchronize_rcu();
+}
+
+static int
+hwsim_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
+{
+ return 0;
+}
+
+static const struct ieee802154_ops hwsim_ops = {
+ .owner = THIS_MODULE,
+ .xmit_async = hwsim_hw_xmit,
+ .ed = hwsim_hw_ed,
+ .set_channel = hwsim_hw_channel,
+ .start = hwsim_hw_start,
+ .stop = hwsim_hw_stop,
+ .set_promiscuous_mode = hwsim_set_promiscuous_mode,
+};
+
+static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ return hwsim_add_one(info, &mac802154hwsim_dev->dev, false);
+}
+
+static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct hwsim_phy *phy, *tmp;
+ s64 idx = -1;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+
+ mutex_lock(&hwsim_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &hwsim_phys, list) {
+ if (idx == phy->idx) {
+ hwsim_del(phy);
+ mutex_unlock(&hwsim_phys_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&hwsim_phys_lock);
+
+ return -ENODEV;
+}
+
+static int append_radio_msg(struct sk_buff *skb, struct hwsim_phy *phy)
+{
+ struct nlattr *nl_edges, *nl_edge;
+ struct hwsim_edge_info *einfo;
+ struct hwsim_edge *e;
+ int ret;
+
+ ret = nla_put_u32(skb, MAC802154_HWSIM_ATTR_RADIO_ID, phy->idx);
+ if (ret < 0)
+ return ret;
+
+ rcu_read_lock();
+ if (list_empty(&phy->edges)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ nl_edges = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGES);
+ if (!nl_edges) {
+ rcu_read_unlock();
+ return -ENOBUFS;
+ }
+
+ list_for_each_entry_rcu(e, &phy->edges, list) {
+ nl_edge = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGE);
+ if (!nl_edge) {
+ rcu_read_unlock();
+ nla_nest_cancel(skb, nl_edges);
+ return -ENOBUFS;
+ }
+
+ ret = nla_put_u32(skb, MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID,
+ e->endpoint->idx);
+ if (ret < 0) {
+ rcu_read_unlock();
+ nla_nest_cancel(skb, nl_edge);
+ nla_nest_cancel(skb, nl_edges);
+ return ret;
+ }
+
+ einfo = rcu_dereference(e->info);
+ ret = nla_put_u8(skb, MAC802154_HWSIM_EDGE_ATTR_LQI,
+ einfo->lqi);
+ if (ret < 0) {
+ rcu_read_unlock();
+ nla_nest_cancel(skb, nl_edge);
+ nla_nest_cancel(skb, nl_edges);
+ return ret;
+ }
+
+ nla_nest_end(skb, nl_edge);
+ }
+ rcu_read_unlock();
+
+ nla_nest_end(skb, nl_edges);
+
+ return 0;
+}
+
+static int hwsim_get_radio(struct sk_buff *skb, struct hwsim_phy *phy,
+ u32 portid, u32 seq,
+ struct netlink_callback *cb, int flags)
+{
+ void *hdr;
+ int res = -EMSGSIZE;
+
+ hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags,
+ MAC802154_HWSIM_CMD_GET_RADIO);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (cb)
+ genl_dump_check_consistent(cb, hdr);
+
+ res = append_radio_msg(skb, phy);
+ if (res < 0)
+ goto out_err;
+
+ genlmsg_end(skb, hdr);
+ return 0;
+
+out_err:
+ genlmsg_cancel(skb, hdr);
+ return res;
+}
+
+static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct hwsim_phy *phy;
+ struct sk_buff *skb;
+ int idx, res = -ENODEV;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID])
+ return -EINVAL;
+ idx = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+
+ mutex_lock(&hwsim_phys_lock);
+ list_for_each_entry(phy, &hwsim_phys, list) {
+ if (phy->idx != idx)
+ continue;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ res = -ENOMEM;
+ goto out_err;
+ }
+
+ res = hwsim_get_radio(skb, phy, info->snd_portid,
+ info->snd_seq, NULL, 0);
+ if (res < 0) {
+ nlmsg_free(skb);
+ goto out_err;
+ }
+
+ genlmsg_reply(skb, info);
+ break;
+ }
+
+out_err:
+ mutex_unlock(&hwsim_phys_lock);
+
+ return res;
+}
+
+static int hwsim_dump_radio_nl(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int idx = cb->args[0];
+ struct hwsim_phy *phy;
+ int res;
+
+ mutex_lock(&hwsim_phys_lock);
+
+ if (idx == hwsim_radio_idx)
+ goto done;
+
+ list_for_each_entry(phy, &hwsim_phys, list) {
+ if (phy->idx < idx)
+ continue;
+
+ res = hwsim_get_radio(skb, phy, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
+ if (res < 0)
+ break;
+
+ idx = phy->idx + 1;
+ }
+
+ cb->args[0] = idx;
+
+done:
+ mutex_unlock(&hwsim_phys_lock);
+ return skb->len;
+}
+
+/* caller need to held hwsim_phys_lock */
+static struct hwsim_phy *hwsim_get_radio_by_id(uint32_t idx)
+{
+ struct hwsim_phy *phy;
+
+ list_for_each_entry(phy, &hwsim_phys, list) {
+ if (phy->idx == idx)
+ return phy;
+ }
+
+ return NULL;
+}
+
+static const struct nla_policy hwsim_edge_policy[MAC802154_HWSIM_EDGE_ATTR_MAX + 1] = {
+ [MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] = { .type = NLA_U32 },
+ [MAC802154_HWSIM_EDGE_ATTR_LQI] = { .type = NLA_U8 },
+};
+
+static struct hwsim_edge *hwsim_alloc_edge(struct hwsim_phy *endpoint, u8 lqi)
+{
+ struct hwsim_edge_info *einfo;
+ struct hwsim_edge *e;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ kfree(e);
+ return NULL;
+ }
+
+ einfo->lqi = 0xff;
+ rcu_assign_pointer(e->info, einfo);
+ e->endpoint = endpoint;
+
+ return e;
+}
+
+static void hwsim_free_edge(struct hwsim_edge *e)
+{
+ struct hwsim_edge_info *einfo;
+
+ rcu_read_lock();
+ einfo = rcu_dereference(e->info);
+ rcu_read_unlock();
+
+ kfree_rcu(einfo, rcu);
+ kfree_rcu(e, rcu);
+}
+
+static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
+ struct hwsim_phy *phy_v0, *phy_v1;
+ struct hwsim_edge *e;
+ u32 v0, v1;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
+ return -EINVAL;
+
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
+ info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
+ hwsim_edge_policy, NULL))
+ return -EINVAL;
+
+ if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID])
+ return -EINVAL;
+
+ v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+ v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]);
+
+ if (v0 == v1)
+ return -EINVAL;
+
+ mutex_lock(&hwsim_phys_lock);
+ phy_v0 = hwsim_get_radio_by_id(v0);
+ if (!phy_v0) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOENT;
+ }
+
+ phy_v1 = hwsim_get_radio_by_id(v1);
+ if (!phy_v1) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOENT;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &phy_v0->edges, list) {
+ if (e->endpoint->idx == v1) {
+ mutex_unlock(&hwsim_phys_lock);
+ rcu_read_unlock();
+ return -EEXIST;
+ }
+ }
+ rcu_read_unlock();
+
+ e = hwsim_alloc_edge(phy_v1, 0xff);
+ if (!e) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOMEM;
+ }
+ list_add_rcu(&e->list, &phy_v0->edges);
+ /* wait until changes are done under hwsim_phys_lock lock
+ * should prevent of calling this function twice while
+ * edges list has not the changes yet.
+ */
+ synchronize_rcu();
+ mutex_unlock(&hwsim_phys_lock);
+
+ return 0;
+}
+
+static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
+ struct hwsim_phy *phy_v0;
+ struct hwsim_edge *e;
+ u32 v0, v1;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
+ return -EINVAL;
+
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+ info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
+ hwsim_edge_policy, NULL))
+ return -EINVAL;
+
+ if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID])
+ return -EINVAL;
+
+ v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+ v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]);
+
+ mutex_lock(&hwsim_phys_lock);
+ phy_v0 = hwsim_get_radio_by_id(v0);
+ if (!phy_v0) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOENT;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &phy_v0->edges, list) {
+ if (e->endpoint->idx == v1) {
+ rcu_read_unlock();
+ list_del_rcu(&e->list);
+ hwsim_free_edge(e);
+ /* same again - wait until list changes are done */
+ synchronize_rcu();
+ mutex_unlock(&hwsim_phys_lock);
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+
+ mutex_unlock(&hwsim_phys_lock);
+
+ return -ENOENT;
+}
+
+static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
+{
+ struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
+ struct hwsim_edge_info *einfo;
+ struct hwsim_phy *phy_v0;
+ struct hwsim_edge *e;
+ u32 v0, v1;
+ u8 lqi;
+
+ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+ !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
+ return -EINVAL;
+
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+ info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
+ hwsim_edge_policy, NULL))
+ return -EINVAL;
+
+ if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
+ !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
+ return -EINVAL;
+
+ v0 = nla_get_u32(info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID]);
+ v1 = nla_get_u32(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]);
+ lqi = nla_get_u8(edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI]);
+
+ mutex_lock(&hwsim_phys_lock);
+ phy_v0 = hwsim_get_radio_by_id(v0);
+ if (!phy_v0) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOENT;
+ }
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ mutex_unlock(&hwsim_phys_lock);
+ return -ENOMEM;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &phy_v0->edges, list) {
+ if (e->endpoint->idx == v1) {
+ einfo->lqi = lqi;
+ rcu_assign_pointer(e->info, einfo);
+ rcu_read_unlock();
+ mutex_unlock(&hwsim_phys_lock);
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+
+ kfree(einfo);
+ mutex_unlock(&hwsim_phys_lock);
+
+ return -ENOENT;
+}
+
+/* MAC802154_HWSIM netlink policy */
+
+static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] = {
+ [MAC802154_HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 },
+ [MAC802154_HWSIM_ATTR_RADIO_EDGE] = { .type = NLA_NESTED },
+ [MAC802154_HWSIM_ATTR_RADIO_EDGES] = { .type = NLA_NESTED },
+};
+
+/* Generic Netlink operations array */
+static const struct genl_ops hwsim_nl_ops[] = {
+ {
+ .cmd = MAC802154_HWSIM_CMD_NEW_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_new_radio_nl,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_DEL_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_del_radio_nl,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_GET_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_get_radio_nl,
+ .dumpit = hwsim_dump_radio_nl,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_NEW_EDGE,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_new_edge_nl,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_DEL_EDGE,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_del_edge_nl,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MAC802154_HWSIM_CMD_SET_EDGE,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_set_edge_lqi,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+};
+
+static struct genl_family hwsim_genl_family __ro_after_init = {
+ .name = "MAC802154_HWSIM",
+ .version = 1,
+ .maxattr = MAC802154_HWSIM_ATTR_MAX,
+ .module = THIS_MODULE,
+ .ops = hwsim_nl_ops,
+ .n_ops = ARRAY_SIZE(hwsim_nl_ops),
+ .mcgrps = hwsim_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
+};
+
+static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
+ struct genl_info *info)
+{
+ if (info)
+ genl_notify(&hwsim_genl_family, mcast_skb, info,
+ HWSIM_MCGRP_CONFIG, GFP_KERNEL);
+ else
+ genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0,
+ HWSIM_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static void hwsim_mcast_new_radio(struct genl_info *info, struct hwsim_phy *phy)
+{
+ struct sk_buff *mcast_skb;
+ void *data;
+
+ mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!mcast_skb)
+ return;
+
+ data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0,
+ MAC802154_HWSIM_CMD_NEW_RADIO);
+ if (!data)
+ goto out_err;
+
+ if (append_radio_msg(mcast_skb, phy) < 0)
+ goto out_err;
+
+ genlmsg_end(mcast_skb, data);
+
+ hwsim_mcast_config_msg(mcast_skb, info);
+ return;
+
+out_err:
+ genlmsg_cancel(mcast_skb, data);
+ nlmsg_free(mcast_skb);
+}
+
+static void hwsim_edge_unsubscribe_me(struct hwsim_phy *phy)
+{
+ struct hwsim_phy *tmp;
+ struct hwsim_edge *e;
+
+ rcu_read_lock();
+ /* going to all phy edges and remove phy from it */
+ list_for_each_entry(tmp, &hwsim_phys, list) {
+ list_for_each_entry_rcu(e, &tmp->edges, list) {
+ if (e->endpoint->idx == phy->idx) {
+ list_del_rcu(&e->list);
+ hwsim_free_edge(e);
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ synchronize_rcu();
+}
+
+static int hwsim_subscribe_all_others(struct hwsim_phy *phy)
+{
+ struct hwsim_phy *sub;
+ struct hwsim_edge *e;
+
+ list_for_each_entry(sub, &hwsim_phys, list) {
+ e = hwsim_alloc_edge(sub, 0xff);
+ if (!e)
+ goto me_fail;
+
+ list_add_rcu(&e->list, &phy->edges);
+ }
+
+ list_for_each_entry(sub, &hwsim_phys, list) {
+ e = hwsim_alloc_edge(phy, 0xff);
+ if (!e)
+ goto sub_fail;
+
+ list_add_rcu(&e->list, &sub->edges);
+ }
+
+ return 0;
+
+me_fail:
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &phy->edges, list) {
+ list_del_rcu(&e->list);
+ hwsim_free_edge(e);
+ }
+ rcu_read_unlock();
+sub_fail:
+ hwsim_edge_unsubscribe_me(phy);
+ return -ENOMEM;
+}
+
+static int hwsim_add_one(struct genl_info *info, struct device *dev,
+ bool init)
+{
+ struct ieee802154_hw *hw;
+ struct hwsim_phy *phy;
+ struct hwsim_pib *pib;
+ int idx;
+ int err;
+
+ idx = hwsim_radio_idx++;
+
+ hw = ieee802154_alloc_hw(sizeof(*phy), &hwsim_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ phy = hw->priv;
+ phy->hw = hw;
+
+ /* 868 MHz BPSK 802.15.4-2003 */
+ hw->phy->supported.channels[0] |= 1;
+ /* 915 MHz BPSK 802.15.4-2003 */
+ hw->phy->supported.channels[0] |= 0x7fe;
+ /* 2.4 GHz O-QPSK 802.15.4-2003 */
+ hw->phy->supported.channels[0] |= 0x7FFF800;
+ /* 868 MHz ASK 802.15.4-2006 */
+ hw->phy->supported.channels[1] |= 1;
+ /* 915 MHz ASK 802.15.4-2006 */
+ hw->phy->supported.channels[1] |= 0x7fe;
+ /* 868 MHz O-QPSK 802.15.4-2006 */
+ hw->phy->supported.channels[2] |= 1;
+ /* 915 MHz O-QPSK 802.15.4-2006 */
+ hw->phy->supported.channels[2] |= 0x7fe;
+ /* 2.4 GHz CSS 802.15.4a-2007 */
+ hw->phy->supported.channels[3] |= 0x3fff;
+ /* UWB Sub-gigahertz 802.15.4a-2007 */
+ hw->phy->supported.channels[4] |= 1;
+ /* UWB Low band 802.15.4a-2007 */
+ hw->phy->supported.channels[4] |= 0x1e;
+ /* UWB High band 802.15.4a-2007 */
+ hw->phy->supported.channels[4] |= 0xffe0;
+ /* 750 MHz O-QPSK 802.15.4c-2009 */
+ hw->phy->supported.channels[5] |= 0xf;
+ /* 750 MHz MPSK 802.15.4c-2009 */
+ hw->phy->supported.channels[5] |= 0xf0;
+ /* 950 MHz BPSK 802.15.4d-2009 */
+ hw->phy->supported.channels[6] |= 0x3ff;
+ /* 950 MHz GFSK 802.15.4d-2009 */
+ hw->phy->supported.channels[6] |= 0x3ffc00;
+
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+ /* hwsim phy channel 13 as default */
+ hw->phy->current_channel = 13;
+ pib = kzalloc(sizeof(*pib), GFP_KERNEL);
+ if (!pib) {
+ err = -ENOMEM;
+ goto err_pib;
+ }
+
+ rcu_assign_pointer(phy->pib, pib);
+ phy->idx = idx;
+ INIT_LIST_HEAD(&phy->edges);
+
+ hw->flags = IEEE802154_HW_PROMISCUOUS;
+ hw->parent = dev;
+
+ err = ieee802154_register_hw(hw);
+ if (err)
+ goto err_reg;
+
+ mutex_lock(&hwsim_phys_lock);
+ if (init) {
+ err = hwsim_subscribe_all_others(phy);
+ if (err < 0) {
+ mutex_unlock(&hwsim_phys_lock);
+ goto err_reg;
+ }
+ }
+ list_add_tail(&phy->list, &hwsim_phys);
+ mutex_unlock(&hwsim_phys_lock);
+
+ hwsim_mcast_new_radio(info, phy);
+
+ return idx;
+
+err_reg:
+ kfree(pib);
+err_pib:
+ ieee802154_free_hw(phy->hw);
+ return err;
+}
+
+static void hwsim_del(struct hwsim_phy *phy)
+{
+ struct hwsim_pib *pib;
+
+ hwsim_edge_unsubscribe_me(phy);
+
+ list_del(&phy->list);
+
+ rcu_read_lock();
+ pib = rcu_dereference(phy->pib);
+ rcu_read_unlock();
+
+ kfree_rcu(pib, rcu);
+
+ ieee802154_unregister_hw(phy->hw);
+ ieee802154_free_hw(phy->hw);
+}
+
+static int hwsim_probe(struct platform_device *pdev)
+{
+ struct hwsim_phy *phy, *tmp;
+ int err, i;
+
+ for (i = 0; i < 2; i++) {
+ err = hwsim_add_one(NULL, &pdev->dev, true);
+ if (err < 0)
+ goto err_slave;
+ }
+
+ dev_info(&pdev->dev, "Added 2 mac802154 hwsim hardware radios\n");
+ return 0;
+
+err_slave:
+ mutex_lock(&hwsim_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &hwsim_phys, list)
+ hwsim_del(phy);
+ mutex_unlock(&hwsim_phys_lock);
+ return err;
+}
+
+static int hwsim_remove(struct platform_device *pdev)
+{
+ struct hwsim_phy *phy, *tmp;
+
+ mutex_lock(&hwsim_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &hwsim_phys, list)
+ hwsim_del(phy);
+ mutex_unlock(&hwsim_phys_lock);
+
+ return 0;
+}
+
+static struct platform_driver mac802154hwsim_driver = {
+ .probe = hwsim_probe,
+ .remove = hwsim_remove,
+ .driver = {
+ .name = "mac802154_hwsim",
+ },
+};
+
+static __init int hwsim_init_module(void)
+{
+ int rc;
+
+ rc = genl_register_family(&hwsim_genl_family);
+ if (rc)
+ return rc;
+
+ mac802154hwsim_dev = platform_device_register_simple("mac802154_hwsim",
+ -1, NULL, 0);
+ if (IS_ERR(mac802154hwsim_dev)) {
+ rc = PTR_ERR(mac802154hwsim_dev);
+ goto platform_dev;
+ }
+
+ rc = platform_driver_register(&mac802154hwsim_driver);
+ if (rc < 0)
+ goto platform_drv;
+
+ return 0;
+
+platform_drv:
+ genl_unregister_family(&hwsim_genl_family);
+platform_dev:
+ platform_device_unregister(mac802154hwsim_dev);
+ return rc;
+}
+
+static __exit void hwsim_remove_module(void)
+{
+ genl_unregister_family(&hwsim_genl_family);
+ platform_driver_unregister(&mac802154hwsim_driver);
+ platform_device_unregister(mac802154hwsim_dev);
+}
+
+module_init(hwsim_init_module);
+module_exit(hwsim_remove_module);
diff --git a/drivers/net/ieee802154/mac802154_hwsim.h b/drivers/net/ieee802154/mac802154_hwsim.h
new file mode 100644
index 000000000000..6c6e30e3871d
--- /dev/null
+++ b/drivers/net/ieee802154/mac802154_hwsim.h
@@ -0,0 +1,73 @@
+#ifndef __MAC802154_HWSIM_H
+#define __MAC802154_HWSIM_H
+
+/* mac802154 hwsim netlink commands
+ *
+ * @MAC802154_HWSIM_CMD_UNSPEC: unspecified command to catch error
+ * @MAC802154_HWSIM_CMD_GET_RADIO: fetch information about existing radios
+ * @MAC802154_HWSIM_CMD_SET_RADIO: change radio parameters during runtime
+ * @MAC802154_HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters
+ * returns the radio ID (>= 0) or negative on errors, if successful
+ * then multicast the result
+ * @MAC802154_HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted
+ * @MAC802154_HWSIM_CMD_GET_EDGE: fetch information about existing edges
+ * @MAC802154_HWSIM_CMD_SET_EDGE: change edge parameters during runtime
+ * @MAC802154_HWSIM_CMD_DEL_EDGE: delete edges between radios
+ * @MAC802154_HWSIM_CMD_NEW_EDGE: create a new edge between two radios
+ * @__MAC802154_HWSIM_CMD_MAX: enum limit
+ */
+enum {
+ MAC802154_HWSIM_CMD_UNSPEC,
+
+ MAC802154_HWSIM_CMD_GET_RADIO,
+ MAC802154_HWSIM_CMD_SET_RADIO,
+ MAC802154_HWSIM_CMD_NEW_RADIO,
+ MAC802154_HWSIM_CMD_DEL_RADIO,
+
+ MAC802154_HWSIM_CMD_GET_EDGE,
+ MAC802154_HWSIM_CMD_SET_EDGE,
+ MAC802154_HWSIM_CMD_DEL_EDGE,
+ MAC802154_HWSIM_CMD_NEW_EDGE,
+
+ __MAC802154_HWSIM_CMD_MAX,
+};
+
+#define MAC802154_HWSIM_CMD_MAX (__MAC802154_HWSIM_MAX - 1)
+
+/* mac802154 hwsim netlink attributes
+ *
+ * @MAC802154_HWSIM_ATTR_UNSPEC: unspecified attribute to catch error
+ * @MAC802154_HWSIM_ATTR_RADIO_ID: u32 attribute to identify the radio
+ * @MAC802154_HWSIM_ATTR_EDGE: nested attribute of edges
+ * @MAC802154_HWSIM_ATTR_EDGES: list if nested attributes which contains the
+ * edge information according the radio id
+ * @__MAC802154_HWSIM_ATTR_MAX: enum limit
+ */
+enum {
+ MAC802154_HWSIM_ATTR_UNSPEC,
+ MAC802154_HWSIM_ATTR_RADIO_ID,
+ MAC802154_HWSIM_ATTR_RADIO_EDGE,
+ MAC802154_HWSIM_ATTR_RADIO_EDGES,
+ __MAC802154_HWSIM_ATTR_MAX,
+};
+
+#define MAC802154_HWSIM_ATTR_MAX (__MAC802154_HWSIM_ATTR_MAX - 1)
+
+/* mac802154 hwsim edge netlink attributes
+ *
+ * @MAC802154_HWSIM_EDGE_ATTR_UNSPEC: unspecified attribute to catch error
+ * @MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID: radio id where the edge points to
+ * @MAC802154_HWSIM_EDGE_ATTR_LQI: LQI value which the endpoint radio will
+ * receive for this edge
+ * @__MAC802154_HWSIM_ATTR_MAX: enum limit
+ */
+enum {
+ MAC802154_HWSIM_EDGE_ATTR_UNSPEC,
+ MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID,
+ MAC802154_HWSIM_EDGE_ATTR_LQI,
+ __MAC802154_HWSIM_EDGE_ATTR_MAX,
+};
+
+#define MAC802154_HWSIM_EDGE_ATTR_MAX (__MAC802154_HWSIM_EDGE_ATTR_MAX - 1)
+
+#endif /* __MAC802154_HWSIM_H */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index adde8fc45588..cfda146f3b3b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -514,7 +514,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
- void *accel_priv = NULL;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
@@ -533,15 +532,10 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
return NET_XMIT_SUCCESS;
}
}
-
- /* For packets that are non-multicast and not bridged we will pass
- * the necessary information so that the lowerdev can distinguish
- * the source of the packets via the accel_priv value.
- */
- accel_priv = vlan->accel_priv;
xmit_world:
skb->dev = vlan->lowerdev;
- return dev_queue_xmit_accel(skb, accel_priv);
+ return dev_queue_xmit_accel(skb,
+ netdev_get_sb_channel(dev) ? dev : NULL);
}
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
@@ -1647,6 +1641,7 @@ static int macvlan_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_UP:
+ case NETDEV_DOWN:
case NETDEV_CHANGE:
list_for_each_entry(vlan, &port->vlans, list)
netif_stacked_transfer_operstate(vlan->lowerdev,
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 4f390fa557e4..7ae1856d1f18 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -115,7 +115,8 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
}
static u16 net_failover_select_queue(struct net_device *dev,
- struct sk_buff *skb, void *accel_priv,
+ struct sk_buff *skb,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct net_failover_info *nfo_info = netdev_priv(dev);
@@ -128,9 +129,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
if (ops->ndo_select_queue)
txq = ops->ndo_select_queue(primary_dev, skb,
- accel_priv, fallback);
+ sb_dev, fallback);
else
- txq = fallback(primary_dev, skb);
+ txq = fallback(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
@@ -219,14 +220,14 @@ static int net_failover_change_mtu(struct net_device *dev, int new_mtu)
struct net_device *primary_dev, *standby_dev;
int ret = 0;
- primary_dev = rcu_dereference(nfo_info->primary_dev);
+ primary_dev = rtnl_dereference(nfo_info->primary_dev);
if (primary_dev) {
ret = dev_set_mtu(primary_dev, new_mtu);
if (ret)
return ret;
}
- standby_dev = rcu_dereference(nfo_info->standby_dev);
+ standby_dev = rtnl_dereference(nfo_info->standby_dev);
if (standby_dev) {
ret = dev_set_mtu(standby_dev, new_mtu);
if (ret) {
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 449b2a1a1800..0fee1d06c084 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -13,3 +13,7 @@ endif
ifneq ($(CONFIG_NET_DEVLINK),)
netdevsim-objs += devlink.o fib.o
endif
+
+ifneq ($(CONFIG_XFRM_OFFLOAD),)
+netdevsim-objs += ipsec.o
+endif
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 75c25306d234..81444208b216 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -92,7 +92,7 @@ static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
static bool nsim_xdp_offload_active(struct netdevsim *ns)
{
- return ns->xdp_prog_mode == XDP_ATTACHED_HW;
+ return ns->xdp_hw.prog;
}
static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
@@ -195,14 +195,14 @@ static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
}
-static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+static int
+nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
+ struct xdp_attachment_info *xdp)
{
int err;
- if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) {
- NSIM_EA(bpf->extack, "program loaded with different flags");
+ if (!xdp_attachment_flags_ok(xdp, bpf))
return -EBUSY;
- }
if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
@@ -219,18 +219,7 @@ static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
return err;
}
- if (ns->xdp_prog)
- bpf_prog_put(ns->xdp_prog);
-
- ns->xdp_prog = bpf->prog;
- ns->xdp_flags = bpf->flags;
-
- if (!bpf->prog)
- ns->xdp_prog_mode = XDP_ATTACHED_NONE;
- else if (bpf->command == XDP_SETUP_PROG)
- ns->xdp_prog_mode = XDP_ATTACHED_DRV;
- else
- ns->xdp_prog_mode = XDP_ATTACHED_HW;
+ xdp_attachment_setup(xdp, bpf);
return 0;
}
@@ -249,8 +238,8 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
state->state = "verify";
/* Program id is not populated yet when we create the state. */
- sprintf(name, "%u", ns->prog_id_gen++);
- state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs);
+ sprintf(name, "%u", ns->sdev->prog_id_gen++);
+ state->ddir = debugfs_create_dir(name, ns->sdev->ddir_bpf_bound_progs);
if (IS_ERR_OR_NULL(state->ddir)) {
kfree(state);
return -ENOMEM;
@@ -261,7 +250,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
&state->state, &nsim_bpf_string_fops);
debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
- list_add_tail(&state->l, &ns->bpf_bound_progs);
+ list_add_tail(&state->l, &ns->sdev->bpf_bound_progs);
prog->aux->offload->dev_priv = state;
@@ -290,10 +279,6 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
return -EINVAL;
}
- if (nsim_xdp_offload_active(ns)) {
- NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog");
- return -EBUSY;
- }
return 0;
}
@@ -309,7 +294,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
return -EINVAL;
}
- if (bpf->prog->aux->offload->netdev != ns->netdev) {
+ if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) {
NSIM_EA(bpf->extack, "program bound to different dev");
return -EINVAL;
}
@@ -512,7 +497,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
}
offmap->dev_ops = &nsim_bpf_map_ops;
- list_add_tail(&nmap->l, &ns->bpf_bound_maps);
+ list_add_tail(&nmap->l, &ns->sdev->bpf_bound_maps);
return 0;
@@ -567,22 +552,21 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
nsim_bpf_destroy_prog(bpf->offload.prog);
return 0;
case XDP_QUERY_PROG:
- bpf->prog_attached = ns->xdp_prog_mode;
- bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0;
- bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0;
- return 0;
+ return xdp_attachment_query(&ns->xdp, bpf);
+ case XDP_QUERY_PROG_HW:
+ return xdp_attachment_query(&ns->xdp_hw, bpf);
case XDP_SETUP_PROG:
err = nsim_setup_prog_checks(ns, bpf);
if (err)
return err;
- return nsim_xdp_set_prog(ns, bpf);
+ return nsim_xdp_set_prog(ns, bpf, &ns->xdp);
case XDP_SETUP_PROG_HW:
err = nsim_setup_prog_hw_checks(ns, bpf);
if (err)
return err;
- return nsim_xdp_set_prog(ns, bpf);
+ return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw);
case BPF_OFFLOAD_MAP_ALLOC:
if (!ns->bpf_map_accept)
return -EOPNOTSUPP;
@@ -598,8 +582,26 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
int nsim_bpf_init(struct netdevsim *ns)
{
- INIT_LIST_HEAD(&ns->bpf_bound_progs);
- INIT_LIST_HEAD(&ns->bpf_bound_maps);
+ int err;
+
+ if (ns->sdev->refcnt == 1) {
+ INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs);
+ INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps);
+
+ ns->sdev->ddir_bpf_bound_progs =
+ debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir);
+ if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
+ return -ENOMEM;
+
+ ns->sdev->bpf_dev = bpf_offload_dev_create();
+ err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
+ if (err)
+ return err;
+ }
+
+ err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev);
+ if (err)
+ goto err_destroy_bdev;
debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
&ns->bpf_offloaded_id);
@@ -609,10 +611,6 @@ int nsim_bpf_init(struct netdevsim *ns)
&ns->bpf_bind_accept);
debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir,
&ns->bpf_bind_verifier_delay);
- ns->ddir_bpf_bound_progs =
- debugfs_create_dir("bpf_bound_progs", ns->ddir);
- if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs))
- return -ENOMEM;
ns->bpf_tc_accept = true;
debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
@@ -631,12 +629,23 @@ int nsim_bpf_init(struct netdevsim *ns)
&ns->bpf_map_accept);
return 0;
+
+err_destroy_bdev:
+ if (ns->sdev->refcnt == 1)
+ bpf_offload_dev_destroy(ns->sdev->bpf_dev);
+ return err;
}
void nsim_bpf_uninit(struct netdevsim *ns)
{
- WARN_ON(!list_empty(&ns->bpf_bound_progs));
- WARN_ON(!list_empty(&ns->bpf_bound_maps));
- WARN_ON(ns->xdp_prog);
+ WARN_ON(ns->xdp.prog);
+ WARN_ON(ns->xdp_hw.prog);
WARN_ON(ns->bpf_offloaded);
+ bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev);
+
+ if (ns->sdev->refcnt == 1) {
+ WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs));
+ WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps));
+ bpf_offload_dev_destroy(ns->sdev->bpf_dev);
+ }
}
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
new file mode 100644
index 000000000000..2dcf6cc269d0
--- /dev/null
+++ b/drivers/net/netdevsim/ipsec.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
+
+#include <crypto/aead.h>
+#include <linux/debugfs.h>
+#include <net/xfrm.h>
+
+#include "netdevsim.h"
+
+#define NSIM_IPSEC_AUTH_BITS 128
+
+static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = filp->private_data;
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+ size_t bufsize;
+ char *buf, *p;
+ int len;
+ int i;
+
+ /* the buffer needed is
+ * (num SAs * 3 lines each * ~60 bytes per line) + one more line
+ */
+ bufsize = (ipsec->count * 4 * 60) + 60;
+ buf = kzalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ p = buf;
+ p += snprintf(p, bufsize - (p - buf),
+ "SA count=%u tx=%u\n",
+ ipsec->count, ipsec->tx);
+
+ for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
+ struct nsim_sa *sap = &ipsec->sa[i];
+
+ if (!sap->used)
+ continue;
+
+ p += snprintf(p, bufsize - (p - buf),
+ "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
+ i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
+ sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
+ p += snprintf(p, bufsize - (p - buf),
+ "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
+ i, be32_to_cpu(sap->xs->id.spi),
+ sap->xs->id.proto, sap->salt, sap->crypt);
+ p += snprintf(p, bufsize - (p - buf),
+ "sa[%i] key=0x%08x %08x %08x %08x\n",
+ i, sap->key[0], sap->key[1],
+ sap->key[2], sap->key[3]);
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf);
+
+ kfree(buf);
+ return len;
+}
+
+static const struct file_operations ipsec_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = nsim_dbg_netdev_ops_read,
+};
+
+static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec)
+{
+ u32 i;
+
+ if (ipsec->count == NSIM_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search sa table */
+ for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->sa[i].used)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
+ u32 *mykey, u32 *mysalt)
+{
+ const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+ struct net_device *dev = xs->xso.dev;
+ unsigned char *key_data;
+ char *alg_name = NULL;
+ int key_len;
+
+ if (!xs->aead) {
+ netdev_err(dev, "Unsupported IPsec algorithm\n");
+ return -EINVAL;
+ }
+
+ if (xs->aead->alg_icv_len != NSIM_IPSEC_AUTH_BITS) {
+ netdev_err(dev, "IPsec offload requires %d bit authentication\n",
+ NSIM_IPSEC_AUTH_BITS);
+ return -EINVAL;
+ }
+
+ key_data = &xs->aead->alg_key[0];
+ key_len = xs->aead->alg_key_len;
+ alg_name = xs->aead->alg_name;
+
+ if (strcmp(alg_name, aes_gcm_name)) {
+ netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
+ aes_gcm_name);
+ return -EINVAL;
+ }
+
+ /* 160 accounts for 16 byte key and 4 byte salt */
+ if (key_len > NSIM_IPSEC_AUTH_BITS) {
+ *mysalt = ((u32 *)key_data)[4];
+ } else if (key_len == NSIM_IPSEC_AUTH_BITS) {
+ *mysalt = 0;
+ } else {
+ netdev_err(dev, "IPsec hw offload only supports 128 bit keys with optional 32 bit salt\n");
+ return -EINVAL;
+ }
+ memcpy(mykey, key_data, 16);
+
+ return 0;
+}
+
+static int nsim_ipsec_add_sa(struct xfrm_state *xs)
+{
+ struct nsim_ipsec *ipsec;
+ struct net_device *dev;
+ struct netdevsim *ns;
+ struct nsim_sa sa;
+ u16 sa_idx;
+ int ret;
+
+ dev = xs->xso.dev;
+ ns = netdev_priv(dev);
+ ipsec = &ns->ipsec;
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
+ xs->id.proto);
+ return -EINVAL;
+ }
+
+ if (xs->calg) {
+ netdev_err(dev, "Compression offload not supported\n");
+ return -EINVAL;
+ }
+
+ /* find the first unused index */
+ ret = nsim_ipsec_find_empty_idx(ipsec);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Rx table!\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.used = true;
+ sa.xs = xs;
+
+ if (sa.xs->id.proto & IPPROTO_ESP)
+ sa.crypt = xs->ealg || xs->aead;
+
+ /* get the key and salt */
+ ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for SA table\n");
+ return ret;
+ }
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ sa.rx = true;
+
+ if (xs->props.family == AF_INET6)
+ memcpy(sa.ipaddr, &xs->id.daddr.a6, 16);
+ else
+ memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4);
+ }
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa));
+
+ /* the XFRM stack doesn't like offload_handle == 0,
+ * so add a bitflag in case our array index is 0
+ */
+ xs->xso.offload_handle = sa_idx | NSIM_IPSEC_VALID;
+ ipsec->count++;
+
+ return 0;
+}
+
+static void nsim_ipsec_del_sa(struct xfrm_state *xs)
+{
+ struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+ u16 sa_idx;
+
+ sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
+ if (!ipsec->sa[sa_idx].used) {
+ netdev_err(ns->netdev, "Invalid SA for delete sa_idx=%d\n",
+ sa_idx);
+ return;
+ }
+
+ memset(&ipsec->sa[sa_idx], 0, sizeof(struct nsim_sa));
+ ipsec->count--;
+}
+
+static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+ struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+
+ ipsec->ok++;
+
+ return true;
+}
+
+static const struct xfrmdev_ops nsim_xfrmdev_ops = {
+ .xdo_dev_state_add = nsim_ipsec_add_sa,
+ .xdo_dev_state_delete = nsim_ipsec_del_sa,
+ .xdo_dev_offload_ok = nsim_ipsec_offload_ok,
+};
+
+bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
+{
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+ struct xfrm_state *xs;
+ struct nsim_sa *tsa;
+ u32 sa_idx;
+
+ /* do we even need to check this packet? */
+ if (!skb->sp)
+ return true;
+
+ if (unlikely(!skb->sp->len)) {
+ netdev_err(ns->netdev, "no xfrm state len = %d\n",
+ skb->sp->len);
+ return false;
+ }
+
+ xs = xfrm_input_state(skb);
+ if (unlikely(!xs)) {
+ netdev_err(ns->netdev, "no xfrm_input_state() xs = %p\n", xs);
+ return false;
+ }
+
+ sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
+ if (unlikely(sa_idx >= NSIM_IPSEC_MAX_SA_COUNT)) {
+ netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n",
+ sa_idx, NSIM_IPSEC_MAX_SA_COUNT);
+ return false;
+ }
+
+ tsa = &ipsec->sa[sa_idx];
+ if (unlikely(!tsa->used)) {
+ netdev_err(ns->netdev, "unused sa_idx=%d\n", sa_idx);
+ return false;
+ }
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(ns->netdev, "unexpected proto=%d\n", xs->id.proto);
+ return false;
+ }
+
+ ipsec->tx++;
+
+ return true;
+}
+
+void nsim_ipsec_init(struct netdevsim *ns)
+{
+ ns->netdev->xfrmdev_ops = &nsim_xfrmdev_ops;
+
+#define NSIM_ESP_FEATURES (NETIF_F_HW_ESP | \
+ NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+
+ ns->netdev->features |= NSIM_ESP_FEATURES;
+ ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES;
+
+ ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns,
+ &ipsec_dbg_fops);
+}
+
+void nsim_ipsec_teardown(struct netdevsim *ns)
+{
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+
+ if (ipsec->count)
+ netdev_err(ns->netdev, "tearing down IPsec offload with %d SAs left\n",
+ ipsec->count);
+ debugfs_remove_recursive(ipsec->pfile);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index ec68f38213d9..8d8e2b3f263e 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -22,6 +22,7 @@
#include <net/netlink.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
+#include <net/switchdev.h>
#include "netdevsim.h"
@@ -40,6 +41,9 @@ struct nsim_vf_config {
static u32 nsim_dev_id;
+static struct dentry *nsim_ddir;
+static struct dentry *nsim_sdev_ddir;
+
static int nsim_num_vf(struct device *dev)
{
struct netdevsim *ns = to_nsim(dev);
@@ -144,8 +148,29 @@ static struct device_type nsim_dev_type = {
.release = nsim_dev_release,
};
+static int
+nsim_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(ns->sdev->switch_id);
+ memcpy(&attr->u.ppid.id, &ns->sdev->switch_id,
+ attr->u.ppid.id_len);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct switchdev_ops nsim_switchdev_ops = {
+ .switchdev_port_attr_get = nsim_port_attr_get,
+};
+
static int nsim_init(struct net_device *dev)
{
+ char sdev_ddir_name[10], sdev_link_name[32];
struct netdevsim *ns = netdev_priv(dev);
int err;
@@ -154,9 +179,32 @@ static int nsim_init(struct net_device *dev)
if (IS_ERR_OR_NULL(ns->ddir))
return -ENOMEM;
+ if (!ns->sdev) {
+ ns->sdev = kzalloc(sizeof(*ns->sdev), GFP_KERNEL);
+ if (!ns->sdev) {
+ err = -ENOMEM;
+ goto err_debugfs_destroy;
+ }
+ ns->sdev->refcnt = 1;
+ ns->sdev->switch_id = nsim_dev_id;
+ sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
+ ns->sdev->ddir = debugfs_create_dir(sdev_ddir_name,
+ nsim_sdev_ddir);
+ if (IS_ERR_OR_NULL(ns->sdev->ddir)) {
+ err = PTR_ERR_OR_ZERO(ns->sdev->ddir) ?: -EINVAL;
+ goto err_sdev_free;
+ }
+ } else {
+ sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
+ ns->sdev->refcnt++;
+ }
+
+ sprintf(sdev_link_name, "../../" DRV_NAME "_sdev/%s", sdev_ddir_name);
+ debugfs_create_symlink("sdev", ns->ddir, sdev_link_name);
+
err = nsim_bpf_init(ns);
if (err)
- goto err_debugfs_destroy;
+ goto err_sdev_destroy;
ns->dev.id = nsim_dev_id++;
ns->dev.bus = &nsim_bus;
@@ -166,17 +214,26 @@ static int nsim_init(struct net_device *dev)
goto err_bpf_uninit;
SET_NETDEV_DEV(dev, &ns->dev);
+ SWITCHDEV_SET_OPS(dev, &nsim_switchdev_ops);
err = nsim_devlink_setup(ns);
if (err)
goto err_unreg_dev;
+ nsim_ipsec_init(ns);
+
return 0;
err_unreg_dev:
device_unregister(&ns->dev);
err_bpf_uninit:
nsim_bpf_uninit(ns);
+err_sdev_destroy:
+ if (!--ns->sdev->refcnt) {
+ debugfs_remove_recursive(ns->sdev->ddir);
+err_sdev_free:
+ kfree(ns->sdev);
+ }
err_debugfs_destroy:
debugfs_remove_recursive(ns->ddir);
return err;
@@ -186,9 +243,14 @@ static void nsim_uninit(struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ nsim_ipsec_teardown(ns);
nsim_devlink_teardown(ns);
debugfs_remove_recursive(ns->ddir);
nsim_bpf_uninit(ns);
+ if (!--ns->sdev->refcnt) {
+ debugfs_remove_recursive(ns->sdev->ddir);
+ kfree(ns->sdev);
+ }
}
static void nsim_free(struct net_device *dev)
@@ -203,11 +265,15 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ if (!nsim_ipsec_tx(ns, skb))
+ goto out;
+
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
ns->tx_bytes += skb->len;
u64_stats_update_end(&ns->syncp);
+out:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -221,8 +287,7 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
{
struct netdevsim *ns = netdev_priv(dev);
- if (ns->xdp_prog_mode == XDP_ATTACHED_DRV &&
- new_mtu > NSIM_XDP_MAX_MTU)
+ if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
return -EBUSY;
dev->mtu = new_mtu;
@@ -260,7 +325,7 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb,
- ns, ns);
+ ns, ns, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns);
return 0;
@@ -464,15 +529,46 @@ static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static int nsim_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (tb[IFLA_LINK]) {
+ struct net_device *joindev;
+ struct netdevsim *joinns;
+
+ joindev = __dev_get_by_index(src_net,
+ nla_get_u32(tb[IFLA_LINK]));
+ if (!joindev)
+ return -ENODEV;
+ if (joindev->netdev_ops != &nsim_netdev_ops)
+ return -EINVAL;
+
+ joinns = netdev_priv(joindev);
+ if (!joinns->sdev || !joinns->sdev->refcnt)
+ return -EINVAL;
+ ns->sdev = joinns->sdev;
+ }
+
+ return register_netdevice(dev);
+}
+
+static void nsim_dellink(struct net_device *dev, struct list_head *head)
+{
+ unregister_netdevice_queue(dev, head);
+}
+
static struct rtnl_link_ops nsim_link_ops __read_mostly = {
.kind = DRV_NAME,
.priv_size = sizeof(struct netdevsim),
.setup = nsim_setup,
.validate = nsim_validate,
+ .newlink = nsim_newlink,
+ .dellink = nsim_dellink,
};
-struct dentry *nsim_ddir;
-
static int __init nsim_module_init(void)
{
int err;
@@ -481,9 +577,15 @@ static int __init nsim_module_init(void)
if (IS_ERR_OR_NULL(nsim_ddir))
return -ENOMEM;
+ nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
+ if (IS_ERR_OR_NULL(nsim_sdev_ddir)) {
+ err = -ENOMEM;
+ goto err_debugfs_destroy;
+ }
+
err = bus_register(&nsim_bus);
if (err)
- goto err_debugfs_destroy;
+ goto err_sdir_destroy;
err = nsim_devlink_init();
if (err)
@@ -499,6 +601,8 @@ err_dl_fini:
nsim_devlink_exit();
err_unreg_bus:
bus_unregister(&nsim_bus);
+err_sdir_destroy:
+ debugfs_remove_recursive(nsim_sdev_ddir);
err_debugfs_destroy:
debugfs_remove_recursive(nsim_ddir);
return err;
@@ -509,6 +613,7 @@ static void __exit nsim_module_exit(void)
rtnl_link_unregister(&nsim_link_ops);
nsim_devlink_exit();
bus_unregister(&nsim_bus);
+ debugfs_remove_recursive(nsim_sdev_ddir);
debugfs_remove_recursive(nsim_ddir);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 8ca50b72c328..384c254fafc5 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
+#include <net/xdp.h>
#define DRV_NAME "netdevsim"
@@ -26,9 +27,46 @@
#define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg)
struct bpf_prog;
+struct bpf_offload_dev;
struct dentry;
struct nsim_vf_config;
+struct netdevsim_shared_dev {
+ unsigned int refcnt;
+ u32 switch_id;
+
+ struct dentry *ddir;
+
+ struct bpf_offload_dev *bpf_dev;
+
+ struct dentry *ddir_bpf_bound_progs;
+ u32 prog_id_gen;
+
+ struct list_head bpf_bound_progs;
+ struct list_head bpf_bound_maps;
+};
+
+#define NSIM_IPSEC_MAX_SA_COUNT 33
+#define NSIM_IPSEC_VALID BIT(31)
+
+struct nsim_sa {
+ struct xfrm_state *xs;
+ __be32 ipaddr[4];
+ u32 key[4];
+ u32 salt;
+ bool used;
+ bool crypt;
+ bool rx;
+};
+
+struct nsim_ipsec {
+ struct nsim_sa sa[NSIM_IPSEC_MAX_SA_COUNT];
+ struct dentry *pfile;
+ u32 count;
+ u32 tx;
+ u32 ok;
+};
+
struct netdevsim {
struct net_device *netdev;
@@ -37,6 +75,7 @@ struct netdevsim {
struct u64_stats_sync syncp;
struct device dev;
+ struct netdevsim_shared_dev *sdev;
struct dentry *ddir;
@@ -46,16 +85,11 @@ struct netdevsim {
struct bpf_prog *bpf_offloaded;
u32 bpf_offloaded_id;
- u32 xdp_flags;
- int xdp_prog_mode;
- struct bpf_prog *xdp_prog;
-
- u32 prog_id_gen;
+ struct xdp_attachment_info xdp;
+ struct xdp_attachment_info xdp_hw;
bool bpf_bind_accept;
u32 bpf_bind_verifier_delay;
- struct dentry *ddir_bpf_bound_progs;
- struct list_head bpf_bound_progs;
bool bpf_tc_accept;
bool bpf_tc_non_bound_accept;
@@ -63,14 +97,12 @@ struct netdevsim {
bool bpf_xdpoffload_accept;
bool bpf_map_accept;
- struct list_head bpf_bound_maps;
#if IS_ENABLED(CONFIG_NET_DEVLINK)
struct devlink *devlink;
#endif
+ struct nsim_ipsec ipsec;
};
-extern struct dentry *nsim_ddir;
-
#ifdef CONFIG_BPF_SYSCALL
int nsim_bpf_init(struct netdevsim *ns);
void nsim_bpf_uninit(struct netdevsim *ns);
@@ -148,6 +180,25 @@ static inline void nsim_devlink_exit(void)
}
#endif
+#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
+void nsim_ipsec_init(struct netdevsim *ns);
+void nsim_ipsec_teardown(struct netdevsim *ns);
+bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb);
+#else
+static inline void nsim_ipsec_init(struct netdevsim *ns)
+{
+}
+
+static inline void nsim_ipsec_teardown(struct netdevsim *ns)
+{
+}
+
+static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
+{
+ return true;
+}
+#endif
+
static inline struct netdevsim *to_nsim(struct device *ptr)
{
return container_of(ptr, struct netdevsim, dev);
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 9f6f7ccd44f7..b12023bc2cab 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -430,7 +430,7 @@ static int ntb_netdev_probe(struct device *client_dev)
ndev->hw_features = ndev->features;
ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
- random_ether_addr(ndev->perm_addr);
+ eth_random_addr(ndev->perm_addr);
memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
ndev->netdev_ops = &ntb_netdev_ops;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 343989f9f9d9..82070792edbb 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -28,7 +28,7 @@ config MDIO_BCM_IPROC
config MDIO_BCM_UNIMAC
tristate "Broadcom UniMAC MDIO bus controller"
- depends on HAS_IOMEM && OF_MDIO
+ depends on HAS_IOMEM
help
This module provides a driver for the Broadcom UniMAC MDIO busses.
This hardware can be found in the Broadcom GENET Ethernet MAC
@@ -92,7 +92,8 @@ config MDIO_CAVIUM
config MDIO_GPIO
tristate "GPIO lib-based bitbanged MDIO buses"
- depends on MDIO_BITBANG && GPIOLIB
+ depends on MDIO_BITBANG
+ depends on GPIOLIB || COMPILE_TEST
---help---
Supports GPIO lib-based MDIO busses.
@@ -213,6 +214,7 @@ comment "MII PHY device drivers"
config SFP
tristate "SFP cage support"
depends on I2C && PHYLINK
+ depends on HWMON || HWMON=n
select MDIO_I2C
config AMD_PHY
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 01d2ff2f6241..b2b6307d64a4 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -229,6 +229,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
phy_read(phydev, MII_BMSR);
switch (rev) {
+ case 0xa0:
case 0xb0:
ret = bcm7xxx_28nm_b0_afe_config_init(phydev);
break;
@@ -659,6 +660,7 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
+ BCM7XXX_28NM_GPHY(PHY_ID_BCM_OMEGA, "Broadcom Omega Combo GPHY"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 79e9b103188b..29aa8d772b0c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -757,13 +757,16 @@ static int decode_evnt(struct dp83640_private *dp83640,
phy_txts = data;
- switch (words) { /* fall through in every case */
+ switch (words) {
case 3:
dp83640->edata.sec_hi = phy_txts->sec_hi;
+ /* fall through */
case 2:
dp83640->edata.sec_lo = phy_txts->sec_lo;
+ /* fall through */
case 1:
dp83640->edata.ns_hi = phy_txts->ns_hi;
+ /* fall through */
case 0:
dp83640->edata.ns_lo = phy_txts->ns_lo;
}
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 49ac678eb2dc..78cad134a79e 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -21,6 +21,7 @@
#define MII_DP83811_SGMII_CTRL 0x09
#define MII_DP83811_INT_STAT1 0x12
#define MII_DP83811_INT_STAT2 0x13
+#define MII_DP83811_INT_STAT3 0x18
#define MII_DP83811_RESET_CTRL 0x1f
#define DP83811_HW_RESET BIT(15)
@@ -44,6 +45,11 @@
#define DP83811_OVERVOLTAGE_INT_EN BIT(6)
#define DP83811_UNDERVOLTAGE_INT_EN BIT(7)
+/* INT_STAT3 bits */
+#define DP83811_LPS_INT_EN BIT(0)
+#define DP83811_NO_FRAME_INT_EN BIT(3)
+#define DP83811_POR_DONE_INT_EN BIT(4)
+
#define MII_DP83811_RXSOP1 0x04a5
#define MII_DP83811_RXSOP2 0x04a6
#define MII_DP83811_RXSOP3 0x04a7
@@ -81,6 +87,10 @@ static int dp83811_ack_interrupt(struct phy_device *phydev)
if (err < 0)
return err;
+ err = phy_read(phydev, MII_DP83811_INT_STAT3);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -216,6 +226,18 @@ static int dp83811_config_intr(struct phy_device *phydev)
DP83811_UNDERVOLTAGE_INT_EN);
err = phy_write(phydev, MII_DP83811_INT_STAT2, misr_status);
+ if (err < 0)
+ return err;
+
+ misr_status = phy_read(phydev, MII_DP83811_INT_STAT3);
+ if (misr_status < 0)
+ return misr_status;
+
+ misr_status |= (DP83811_LPS_INT_EN |
+ DP83811_NO_FRAME_INT_EN |
+ DP83811_POR_DONE_INT_EN);
+
+ err = phy_write(phydev, MII_DP83811_INT_STAT3, misr_status);
} else {
err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
@@ -223,6 +245,10 @@ static int dp83811_config_intr(struct phy_device *phydev)
return err;
err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_DP83811_INT_STAT3, 0);
}
return err;
@@ -258,21 +284,19 @@ static int dp83811_config_init(struct phy_device *phydev)
if (err < 0)
return err;
+ value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
- if (!(value & DP83811_SGMII_EN)) {
- err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+ err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
(DP83811_SGMII_EN | value));
- if (err < 0)
- return err;
- } else {
- err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
- (~DP83811_SGMII_EN & value));
- if (err < 0)
- return err;
- }
+ } else {
+ err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+ (~DP83811_SGMII_EN & value));
}
+ if (err < 0)
+
+ return err;
+
value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN;
return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 001fe1df7557..67b260877f30 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -259,10 +259,8 @@ static int __init fixed_mdio_bus_init(void)
int ret;
pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
- goto err_pdev;
- }
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
fmb->mii_bus = mdiobus_alloc();
if (fmb->mii_bus == NULL) {
@@ -287,7 +285,6 @@ err_mdiobus_alloc:
mdiobus_free(fmb->mii_bus);
err_mdiobus_reg:
platform_device_unregister(pdev);
-err_pdev:
return ret;
}
module_init(fixed_mdio_bus_init);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 1cd439bdf608..f7c69ca34056 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -679,7 +679,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- mdelay(500);
+ msleep(500);
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 0c5b68e7da51..c017486e9b86 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -13,7 +13,7 @@
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
-
+#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/of_mdio.h>
@@ -22,7 +22,14 @@
#include <linux/mdio-mux.h>
#include <linux/delay.h>
-#define MDIO_PARAM_OFFSET 0x00
+#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
+#define MDIO_RATE_ADJ_INT_OFFSET 0x004
+#define MDIO_RATE_ADJ_DIVIDENT_SHIFT 16
+
+#define MDIO_SCAN_CTRL_OFFSET 0x008
+#define MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR 28
+
+#define MDIO_PARAM_OFFSET 0x23c
#define MDIO_PARAM_MIIM_CYCLE 29
#define MDIO_PARAM_INTERNAL_SEL 25
#define MDIO_PARAM_BUS_ID 22
@@ -30,27 +37,56 @@
#define MDIO_PARAM_PHY_ID 16
#define MDIO_PARAM_PHY_DATA 0
-#define MDIO_READ_OFFSET 0x04
+#define MDIO_READ_OFFSET 0x240
#define MDIO_READ_DATA_MASK 0xffff
-#define MDIO_ADDR_OFFSET 0x08
+#define MDIO_ADDR_OFFSET 0x244
-#define MDIO_CTRL_OFFSET 0x0C
+#define MDIO_CTRL_OFFSET 0x248
#define MDIO_CTRL_WRITE_OP 0x1
#define MDIO_CTRL_READ_OP 0x2
-#define MDIO_STAT_OFFSET 0x10
+#define MDIO_STAT_OFFSET 0x24c
#define MDIO_STAT_DONE 1
#define BUS_MAX_ADDR 32
#define EXT_BUS_START_ADDR 16
+#define MDIO_REG_ADDR_SPACE_SIZE 0x250
+
+#define MDIO_OPERATING_FREQUENCY 11000000
+#define MDIO_RATE_ADJ_DIVIDENT 1
+
struct iproc_mdiomux_desc {
void *mux_handle;
void __iomem *base;
struct device *dev;
struct mii_bus *mii_bus;
+ struct clk *core_clk;
};
+static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md)
+{
+ u32 divisor;
+ u32 val;
+
+ /* Disable external mdio master access */
+ val = readl(md->base + MDIO_SCAN_CTRL_OFFSET);
+ val |= BIT(MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR);
+ writel(val, md->base + MDIO_SCAN_CTRL_OFFSET);
+
+ if (md->core_clk) {
+ /* use rate adjust regs to derrive the mdio's operating
+ * frequency from the specified core clock
+ */
+ divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY;
+ divisor = divisor / (MDIO_RATE_ADJ_DIVIDENT + 1);
+ val = divisor;
+ val |= MDIO_RATE_ADJ_DIVIDENT << MDIO_RATE_ADJ_DIVIDENT_SHIFT;
+ writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET);
+ writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET);
+ }
+}
+
static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
{
unsigned int timeout = 1000; /* loop for 1s */
@@ -169,18 +205,39 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
md->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res->start & 0xfff) {
+ /* For backward compatibility in case the
+ * base address is specified with an offset.
+ */
+ dev_info(&pdev->dev, "fix base address in dt-blob\n");
+ res->start &= ~0xfff;
+ res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
+ }
md->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(md->base)) {
dev_err(&pdev->dev, "failed to ioremap register\n");
return PTR_ERR(md->base);
}
- md->mii_bus = mdiobus_alloc();
+ md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
if (!md->mii_bus) {
dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
return -ENOMEM;
}
+ md->core_clk = devm_clk_get(&pdev->dev, NULL);
+ if (md->core_clk == ERR_PTR(-ENOENT) ||
+ md->core_clk == ERR_PTR(-EINVAL))
+ md->core_clk = NULL;
+ else if (IS_ERR(md->core_clk))
+ return PTR_ERR(md->core_clk);
+
+ rc = clk_prepare_enable(md->core_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to enable core clk\n");
+ return rc;
+ }
+
bus = md->mii_bus;
bus->priv = md;
bus->name = "iProc MDIO mux bus";
@@ -194,7 +251,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
rc = mdiobus_register(bus);
if (rc) {
dev_err(&pdev->dev, "mdiomux registration failed\n");
- goto out;
+ goto out_clk;
}
platform_set_drvdata(pdev, md);
@@ -206,13 +263,15 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
goto out_register;
}
+ mdio_mux_iproc_config(md);
+
dev_info(md->dev, "iProc mdiomux registered\n");
return 0;
out_register:
mdiobus_unregister(bus);
-out:
- mdiobus_free(bus);
+out_clk:
+ clk_disable_unprepare(md->core_clk);
return rc;
}
@@ -222,11 +281,37 @@ static int mdio_mux_iproc_remove(struct platform_device *pdev)
mdio_mux_uninit(md->mux_handle);
mdiobus_unregister(md->mii_bus);
- mdiobus_free(md->mii_bus);
+ clk_disable_unprepare(md->core_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mdio_mux_iproc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(md->core_clk);
return 0;
}
+static int mdio_mux_iproc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+
+ clk_prepare_enable(md->core_clk);
+ mdio_mux_iproc_config(md);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mdio_mux_iproc_pm_ops,
+ mdio_mux_iproc_suspend, mdio_mux_iproc_resume);
+
static const struct of_device_id mdio_mux_iproc_match[] = {
{
.compatible = "brcm,mdio-mux-iproc",
@@ -239,6 +324,7 @@ static struct platform_driver mdiomux_iproc_driver = {
.driver = {
.name = "mdio-mux-iproc",
.of_match_table = mdio_mux_iproc_match,
+ .pm = &mdio_mux_iproc_pm_ops,
},
.probe = mdio_mux_iproc_probe,
.remove = mdio_mux_iproc_remove,
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 082ffef0dec4..bc90764a8b8d 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -20,23 +20,23 @@
struct mdio_mux_gpio_state {
struct gpio_descs *gpios;
void *mux_handle;
+ int values[];
};
static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
void *data)
{
struct mdio_mux_gpio_state *s = data;
- int values[s->gpios->ndescs];
unsigned int n;
if (current_child == desired_child)
return 0;
for (n = 0; n < s->gpios->ndescs; n++)
- values[n] = (desired_child >> n) & 1;
+ s->values[n] = (desired_child >> n) & 1;
gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc,
- values);
+ s->values);
return 0;
}
@@ -44,15 +44,21 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
static int mdio_mux_gpio_probe(struct platform_device *pdev)
{
struct mdio_mux_gpio_state *s;
+ struct gpio_descs *gpios;
int r;
- s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
- if (!s)
+ gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(gpios))
+ return PTR_ERR(gpios);
+
+ s = devm_kzalloc(&pdev->dev, struct_size(s, values, gpios->ndescs),
+ GFP_KERNEL);
+ if (!s) {
+ gpiod_put_array(gpios);
return -ENOMEM;
+ }
- s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
- if (IS_ERR(s->gpios))
- return PTR_ERR(s->gpios);
+ s->gpios = gpios;
r = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL);
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 650c2667d523..84ca9ff40ae0 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -123,7 +123,7 @@ static const struct vsc8531_edge_rate_table edge_table[] = {
};
#endif /* CONFIG_OF_MDIO */
-static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
+static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page)
{
int rc;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 6c9b24fe3148..1ee25877c4d1 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -467,6 +467,20 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
+static int phy_config_aneg(struct phy_device *phydev)
+{
+ if (phydev->drv->config_aneg)
+ return phydev->drv->config_aneg(phydev);
+
+ /* Clause 45 PHYs that don't implement Clause 22 registers are not
+ * allowed to call genphy_config_aneg()
+ */
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+ return -EOPNOTSUPP;
+
+ return genphy_config_aneg(phydev);
+}
+
/**
* phy_start_aneg_priv - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
@@ -493,10 +507,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
/* Invalidate LP advertising flags */
phydev->lp_advertising = 0;
- if (phydev->drv->config_aneg)
- err = phydev->drv->config_aneg(phydev);
- else
- err = genphy_config_aneg(phydev);
+ err = phy_config_aneg(phydev);
if (err < 0)
goto out_unlock;
@@ -514,7 +525,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
* negotiation may already be done and aneg interrupt may not be
* generated.
*/
- if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
+ if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) {
err = phy_aneg_done(phydev);
if (err > 0) {
trigger = true;
@@ -546,6 +557,84 @@ int phy_start_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_start_aneg);
+static int phy_poll_aneg_done(struct phy_device *phydev)
+{
+ unsigned int retries = 100;
+ int ret;
+
+ do {
+ msleep(100);
+ ret = phy_aneg_done(phydev);
+ } while (!ret && --retries);
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * phy_speed_down - set speed to lowest speed supported by both link partners
+ * @phydev: the phy_device struct
+ * @sync: perform action synchronously
+ *
+ * Description: Typically used to save energy when waiting for a WoL packet
+ *
+ * WARNING: Setting sync to false may cause the system being unable to suspend
+ * in case the PHY generates an interrupt when finishing the autonegotiation.
+ * This interrupt may wake up the system immediately after suspend.
+ * Therefore use sync = false only if you're sure it's safe with the respective
+ * network chip.
+ */
+int phy_speed_down(struct phy_device *phydev, bool sync)
+{
+ u32 adv = phydev->lp_advertising & phydev->supported;
+ u32 adv_old = phydev->advertising;
+ int ret;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return 0;
+
+ if (adv & PHY_10BT_FEATURES)
+ phydev->advertising &= ~(PHY_100BT_FEATURES |
+ PHY_1000BT_FEATURES);
+ else if (adv & PHY_100BT_FEATURES)
+ phydev->advertising &= ~PHY_1000BT_FEATURES;
+
+ if (phydev->advertising == adv_old)
+ return 0;
+
+ ret = phy_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ return sync ? phy_poll_aneg_done(phydev) : 0;
+}
+EXPORT_SYMBOL_GPL(phy_speed_down);
+
+/**
+ * phy_speed_up - (re)set advertised speeds to all supported speeds
+ * @phydev: the phy_device struct
+ *
+ * Description: Used to revert the effect of phy_speed_down
+ */
+int phy_speed_up(struct phy_device *phydev)
+{
+ u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES;
+ u32 adv_old = phydev->advertising;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return 0;
+
+ phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask);
+
+ if (phydev->advertising == adv_old)
+ return 0;
+
+ return phy_config_aneg(phydev);
+}
+EXPORT_SYMBOL_GPL(phy_speed_up);
+
/**
* phy_start_machine - start PHY state machine tracking
* @phydev: the phy_device struct
@@ -894,7 +983,7 @@ void phy_state_machine(struct work_struct *work)
needs_aneg = true;
break;
case PHY_NOLINK:
- if (phydev->irq != PHY_POLL)
+ if (!phy_polling_mode(phydev))
break;
err = phy_read_status(phydev);
@@ -935,7 +1024,7 @@ void phy_state_machine(struct work_struct *work)
/* Only register a CHANGE if we are polling and link changed
* since latest checking.
*/
- if (phydev->irq == PHY_POLL) {
+ if (phy_polling_mode(phydev)) {
old_link = phydev->link;
err = phy_read_status(phydev);
if (err)
@@ -1034,7 +1123,7 @@ void phy_state_machine(struct work_struct *work)
* PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
* between states from phy_mac_interrupt()
*/
- if (phydev->irq == PHY_POLL)
+ if (phy_polling_mode(phydev))
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
PHY_STATE_TIME * HZ);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b9f5f40a7ac1..db1172db1e7c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1555,6 +1555,14 @@ int genphy_read_status(struct phy_device *phydev)
if (adv < 0)
return adv;
+ if (lpagb & LPA_1000MSFAIL) {
+ if (adv & CTL1000_ENABLE_MASTER)
+ phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n");
+ else
+ phydev_err(phydev, "Master/Slave resolution failed\n");
+ return -ENOLINK;
+ }
+
phydev->lp_advertising =
mii_stat1000_to_ethtool_lpa_t(lpagb);
common_adv_gb = lpagb & adv << 2;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index af4dc4425be2..3ba5cf2a8a5f 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1688,4 +1688,34 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
.disconnect_phy = phylink_sfp_disconnect_phy,
};
+/* Helpers for MAC drivers */
+
+/**
+ * phylink_helper_basex_speed() - 1000BaseX/2500BaseX helper
+ * @state: a pointer to a &struct phylink_link_state
+ *
+ * Inspect the interface mode, advertising mask or forced speed and
+ * decide whether to run at 2.5Gbit or 1Gbit appropriately, switching
+ * the interface mode to suit. @state->interface is appropriately
+ * updated, and the advertising mask has the "other" baseX_Full flag
+ * cleared.
+ */
+void phylink_helper_basex_speed(struct phylink_link_state *state)
+{
+ if (phy_interface_mode_is_8023z(state->interface)) {
+ bool want_2500 = state->an_enabled ?
+ phylink_test(state->advertising, 2500baseX_Full) :
+ state->speed == SPEED_2500;
+
+ if (want_2500) {
+ phylink_clear(state->advertising, 1000baseX_Full);
+ state->interface = PHY_INTERFACE_MODE_2500BASEX;
+ } else {
+ phylink_clear(state->advertising, 2500baseX_Full);
+ state->interface = PHY_INTERFACE_MODE_1000BASEX;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 082fb40c656d..7fc8508b5231 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -37,6 +37,9 @@
#define RTL8201F_ISR 0x1e
#define RTL8201F_IER 0x13
+#define RTL8366RB_POWER_SAVE 0x15
+#define RTL8366RB_POWER_SAVE_ON BIT(12)
+
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
@@ -128,6 +131,37 @@ static int rtl8211f_config_intr(struct phy_device *phydev)
return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
}
+static int rtl8211_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Quirk was copied from vendor driver. Unfortunately it includes no
+ * description of the magic numbers.
+ */
+ if (phydev->speed == SPEED_100 && phydev->autoneg == AUTONEG_DISABLE) {
+ phy_write(phydev, 0x17, 0x2138);
+ phy_write(phydev, 0x0e, 0x0260);
+ } else {
+ phy_write(phydev, 0x17, 0x2108);
+ phy_write(phydev, 0x0e, 0x0000);
+ }
+
+ return 0;
+}
+
+static int rtl8211c_config_init(struct phy_device *phydev)
+{
+ /* RTL8211C has an issue when operating in Gigabit slave mode */
+ phy_set_bits(phydev, MII_CTRL1000,
+ CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+
+ return genphy_config_init(phydev);
+}
+
static int rtl8211f_config_init(struct phy_device *phydev)
{
int ret;
@@ -159,6 +193,24 @@ static int rtl8211b_resume(struct phy_device *phydev)
return genphy_resume(phydev);
}
+static int rtl8366rb_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_config_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE,
+ RTL8366RB_POWER_SAVE_ON);
+ if (ret) {
+ dev_err(&phydev->mdio.dev,
+ "error enabling power management\n");
+ }
+
+ return ret;
+}
+
static struct phy_driver realtek_drvs[] = {
{
.phy_id = 0x00008201,
@@ -179,6 +231,14 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ .phy_id = 0x001cc910,
+ .name = "RTL8211 Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .config_aneg = rtl8211_config_aneg,
+ .read_mmd = &genphy_read_mmd_unsupported,
+ .write_mmd = &genphy_write_mmd_unsupported,
+ }, {
.phy_id = 0x001cc912,
.name = "RTL8211B Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
@@ -191,6 +251,14 @@ static struct phy_driver realtek_drvs[] = {
.suspend = rtl8211b_suspend,
.resume = rtl8211b_resume,
}, {
+ .phy_id = 0x001cc913,
+ .name = "RTL8211C Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = rtl8211c_config_init,
+ .read_mmd = &genphy_read_mmd_unsupported,
+ .write_mmd = &genphy_write_mmd_unsupported,
+ }, {
.phy_id = 0x001cc914,
.name = "RTL8211DN Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
@@ -223,6 +291,15 @@ static struct phy_driver realtek_drvs[] = {
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ }, {
+ .phy_id = 0x001cc961,
+ .name = "RTL8366RB Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &rtl8366rb_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
},
};
@@ -230,10 +307,13 @@ module_phy_driver(realtek_drvs);
static struct mdio_device_id __maybe_unused realtek_tbl[] = {
{ 0x001cc816, 0x001fffff },
+ { 0x001cc910, 0x001fffff },
{ 0x001cc912, 0x001fffff },
+ { 0x001cc913, 0x001fffff },
{ 0x001cc914, 0x001fffff },
{ 0x001cc915, 0x001fffff },
{ 0x001cc916, 0x001fffff },
+ { 0x001cc961, 0x001fffff },
{ }
};
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index c4c92db86dfa..4637d980310e 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,5 +1,7 @@
+#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/hwmon.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -58,6 +60,69 @@ enum {
SFP_S_TX_DISABLE,
};
+static const char * const mod_state_strings[] = {
+ [SFP_MOD_EMPTY] = "empty",
+ [SFP_MOD_PROBE] = "probe",
+ [SFP_MOD_HPOWER] = "hpower",
+ [SFP_MOD_PRESENT] = "present",
+ [SFP_MOD_ERROR] = "error",
+};
+
+static const char *mod_state_to_str(unsigned short mod_state)
+{
+ if (mod_state >= ARRAY_SIZE(mod_state_strings))
+ return "Unknown module state";
+ return mod_state_strings[mod_state];
+}
+
+static const char * const dev_state_strings[] = {
+ [SFP_DEV_DOWN] = "down",
+ [SFP_DEV_UP] = "up",
+};
+
+static const char *dev_state_to_str(unsigned short dev_state)
+{
+ if (dev_state >= ARRAY_SIZE(dev_state_strings))
+ return "Unknown device state";
+ return dev_state_strings[dev_state];
+}
+
+static const char * const event_strings[] = {
+ [SFP_E_INSERT] = "insert",
+ [SFP_E_REMOVE] = "remove",
+ [SFP_E_DEV_DOWN] = "dev_down",
+ [SFP_E_DEV_UP] = "dev_up",
+ [SFP_E_TX_FAULT] = "tx_fault",
+ [SFP_E_TX_CLEAR] = "tx_clear",
+ [SFP_E_LOS_HIGH] = "los_high",
+ [SFP_E_LOS_LOW] = "los_low",
+ [SFP_E_TIMEOUT] = "timeout",
+};
+
+static const char *event_to_str(unsigned short event)
+{
+ if (event >= ARRAY_SIZE(event_strings))
+ return "Unknown event";
+ return event_strings[event];
+}
+
+static const char * const sm_state_strings[] = {
+ [SFP_S_DOWN] = "down",
+ [SFP_S_INIT] = "init",
+ [SFP_S_WAIT_LOS] = "wait_los",
+ [SFP_S_LINK_UP] = "link_up",
+ [SFP_S_TX_FAULT] = "tx_fault",
+ [SFP_S_REINIT] = "reinit",
+ [SFP_S_TX_DISABLE] = "rx_disable",
+};
+
+static const char *sm_state_to_str(unsigned short sm_state)
+{
+ if (sm_state >= ARRAY_SIZE(sm_state_strings))
+ return "Unknown state";
+ return sm_state_strings[sm_state];
+}
+
static const char *gpio_of_names[] = {
"mod-def0",
"los",
@@ -131,6 +196,12 @@ struct sfp {
unsigned int sm_retries;
struct sfp_eeprom_id id;
+#if IS_ENABLED(CONFIG_HWMON)
+ struct sfp_diag diag;
+ struct device *hwmon_dev;
+ char *hwmon_name;
+#endif
+
};
static bool sff_module_supported(const struct sfp_eeprom_id *id)
@@ -316,6 +387,719 @@ static unsigned int sfp_check(void *buf, size_t len)
return check;
}
+/* hwmon */
+#if IS_ENABLED(CONFIG_HWMON)
+static umode_t sfp_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct sfp *sfp = data;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_lcrit_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_lcrit:
+ case hwmon_temp_crit:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_min_alarm:
+ case hwmon_in_max_alarm:
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ case hwmon_in_min:
+ case hwmon_in_max:
+ case hwmon_in_lcrit:
+ case hwmon_in_crit:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_min_alarm:
+ case hwmon_curr_max_alarm:
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ case hwmon_curr_min:
+ case hwmon_curr_max:
+ case hwmon_curr_lcrit:
+ case hwmon_curr_crit:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_power:
+ /* External calibration of receive power requires
+ * floating point arithmetic. Doing that in the kernel
+ * is not easy, so just skip it. If the module does
+ * not require external calibration, we can however
+ * show receiver power, since FP is then not needed.
+ */
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL &&
+ channel == 1)
+ return 0;
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_min_alarm:
+ case hwmon_power_max_alarm:
+ case hwmon_power_lcrit_alarm:
+ case hwmon_power_crit_alarm:
+ case hwmon_power_min:
+ case hwmon_power_max:
+ case hwmon_power_lcrit:
+ case hwmon_power_crit:
+ return 0444;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
+{
+ __be16 val;
+ int err;
+
+ err = sfp_read(sfp, true, reg, &val, sizeof(val));
+ if (err < 0)
+ return err;
+
+ *value = be16_to_cpu(val);
+
+ return 0;
+}
+
+static void sfp_hwmon_to_rx_power(long *value)
+{
+ *value = DIV_ROUND_CLOSEST(*value, 100);
+}
+
+static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,
+ long *value)
+{
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL)
+ *value = DIV_ROUND_CLOSEST(*value * slope, 256) + offset;
+}
+
+static void sfp_hwmon_calibrate_temp(struct sfp *sfp, long *value)
+{
+ sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_t_slope),
+ be16_to_cpu(sfp->diag.cal_t_offset), value);
+
+ if (*value >= 0x8000)
+ *value -= 0x10000;
+
+ *value = DIV_ROUND_CLOSEST(*value * 1000, 256);
+}
+
+static void sfp_hwmon_calibrate_vcc(struct sfp *sfp, long *value)
+{
+ sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_v_slope),
+ be16_to_cpu(sfp->diag.cal_v_offset), value);
+
+ *value = DIV_ROUND_CLOSEST(*value, 10);
+}
+
+static void sfp_hwmon_calibrate_bias(struct sfp *sfp, long *value)
+{
+ sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txi_slope),
+ be16_to_cpu(sfp->diag.cal_txi_offset), value);
+
+ *value = DIV_ROUND_CLOSEST(*value, 500);
+}
+
+static void sfp_hwmon_calibrate_tx_power(struct sfp *sfp, long *value)
+{
+ sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txpwr_slope),
+ be16_to_cpu(sfp->diag.cal_txpwr_offset), value);
+
+ *value = DIV_ROUND_CLOSEST(*value, 10);
+}
+
+static int sfp_hwmon_read_temp(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_calibrate_temp(sfp, value);
+
+ return 0;
+}
+
+static int sfp_hwmon_read_vcc(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_calibrate_vcc(sfp, value);
+
+ return 0;
+}
+
+static int sfp_hwmon_read_bias(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_calibrate_bias(sfp, value);
+
+ return 0;
+}
+
+static int sfp_hwmon_read_tx_power(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+
+ return 0;
+}
+
+static int sfp_hwmon_read_rx_power(struct sfp *sfp, int reg, long *value)
+{
+ int err;
+
+ err = sfp_hwmon_read_sensor(sfp, reg, value);
+ if (err < 0)
+ return err;
+
+ sfp_hwmon_to_rx_power(value);
+
+ return 0;
+}
+
+static int sfp_hwmon_temp(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return sfp_hwmon_read_temp(sfp, SFP_TEMP, value);
+
+ case hwmon_temp_lcrit:
+ *value = be16_to_cpu(sfp->diag.temp_low_alarm);
+ sfp_hwmon_calibrate_temp(sfp, value);
+ return 0;
+
+ case hwmon_temp_min:
+ *value = be16_to_cpu(sfp->diag.temp_low_warn);
+ sfp_hwmon_calibrate_temp(sfp, value);
+ return 0;
+ case hwmon_temp_max:
+ *value = be16_to_cpu(sfp->diag.temp_high_warn);
+ sfp_hwmon_calibrate_temp(sfp, value);
+ return 0;
+
+ case hwmon_temp_crit:
+ *value = be16_to_cpu(sfp->diag.temp_high_alarm);
+ sfp_hwmon_calibrate_temp(sfp, value);
+ return 0;
+
+ case hwmon_temp_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TEMP_LOW);
+ return 0;
+
+ case hwmon_temp_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TEMP_LOW);
+ return 0;
+
+ case hwmon_temp_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TEMP_HIGH);
+ return 0;
+
+ case hwmon_temp_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TEMP_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_vcc(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_in_input:
+ return sfp_hwmon_read_vcc(sfp, SFP_VCC, value);
+
+ case hwmon_in_lcrit:
+ *value = be16_to_cpu(sfp->diag.volt_low_alarm);
+ sfp_hwmon_calibrate_vcc(sfp, value);
+ return 0;
+
+ case hwmon_in_min:
+ *value = be16_to_cpu(sfp->diag.volt_low_warn);
+ sfp_hwmon_calibrate_vcc(sfp, value);
+ return 0;
+
+ case hwmon_in_max:
+ *value = be16_to_cpu(sfp->diag.volt_high_warn);
+ sfp_hwmon_calibrate_vcc(sfp, value);
+ return 0;
+
+ case hwmon_in_crit:
+ *value = be16_to_cpu(sfp->diag.volt_high_alarm);
+ sfp_hwmon_calibrate_vcc(sfp, value);
+ return 0;
+
+ case hwmon_in_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_VCC_LOW);
+ return 0;
+
+ case hwmon_in_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_VCC_LOW);
+ return 0;
+
+ case hwmon_in_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_VCC_HIGH);
+ return 0;
+
+ case hwmon_in_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_VCC_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_bias(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ return sfp_hwmon_read_bias(sfp, SFP_TX_BIAS, value);
+
+ case hwmon_curr_lcrit:
+ *value = be16_to_cpu(sfp->diag.bias_low_alarm);
+ sfp_hwmon_calibrate_bias(sfp, value);
+ return 0;
+
+ case hwmon_curr_min:
+ *value = be16_to_cpu(sfp->diag.bias_low_warn);
+ sfp_hwmon_calibrate_bias(sfp, value);
+ return 0;
+
+ case hwmon_curr_max:
+ *value = be16_to_cpu(sfp->diag.bias_high_warn);
+ sfp_hwmon_calibrate_bias(sfp, value);
+ return 0;
+
+ case hwmon_curr_crit:
+ *value = be16_to_cpu(sfp->diag.bias_high_alarm);
+ sfp_hwmon_calibrate_bias(sfp, value);
+ return 0;
+
+ case hwmon_curr_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TX_BIAS_LOW);
+ return 0;
+
+ case hwmon_curr_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TX_BIAS_LOW);
+ return 0;
+
+ case hwmon_curr_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TX_BIAS_HIGH);
+ return 0;
+
+ case hwmon_curr_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TX_BIAS_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_tx_power(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_power_input:
+ return sfp_hwmon_read_tx_power(sfp, SFP_TX_POWER, value);
+
+ case hwmon_power_lcrit:
+ *value = be16_to_cpu(sfp->diag.txpwr_low_alarm);
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+ return 0;
+
+ case hwmon_power_min:
+ *value = be16_to_cpu(sfp->diag.txpwr_low_warn);
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+ return 0;
+
+ case hwmon_power_max:
+ *value = be16_to_cpu(sfp->diag.txpwr_high_warn);
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+ return 0;
+
+ case hwmon_power_crit:
+ *value = be16_to_cpu(sfp->diag.txpwr_high_alarm);
+ sfp_hwmon_calibrate_tx_power(sfp, value);
+ return 0;
+
+ case hwmon_power_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TXPWR_LOW);
+ return 0;
+
+ case hwmon_power_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TXPWR_LOW);
+ return 0;
+
+ case hwmon_power_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN0_TXPWR_HIGH);
+ return 0;
+
+ case hwmon_power_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM0_TXPWR_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_rx_power(struct sfp *sfp, u32 attr, long *value)
+{
+ u8 status;
+ int err;
+
+ switch (attr) {
+ case hwmon_power_input:
+ return sfp_hwmon_read_rx_power(sfp, SFP_RX_POWER, value);
+
+ case hwmon_power_lcrit:
+ *value = be16_to_cpu(sfp->diag.rxpwr_low_alarm);
+ sfp_hwmon_to_rx_power(value);
+ return 0;
+
+ case hwmon_power_min:
+ *value = be16_to_cpu(sfp->diag.rxpwr_low_warn);
+ sfp_hwmon_to_rx_power(value);
+ return 0;
+
+ case hwmon_power_max:
+ *value = be16_to_cpu(sfp->diag.rxpwr_high_warn);
+ sfp_hwmon_to_rx_power(value);
+ return 0;
+
+ case hwmon_power_crit:
+ *value = be16_to_cpu(sfp->diag.rxpwr_high_alarm);
+ sfp_hwmon_to_rx_power(value);
+ return 0;
+
+ case hwmon_power_lcrit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM1_RXPWR_LOW);
+ return 0;
+
+ case hwmon_power_min_alarm:
+ err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN1_RXPWR_LOW);
+ return 0;
+
+ case hwmon_power_max_alarm:
+ err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_WARN1_RXPWR_HIGH);
+ return 0;
+
+ case hwmon_power_crit_alarm:
+ err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status));
+ if (err < 0)
+ return err;
+
+ *value = !!(status & SFP_ALARM1_RXPWR_HIGH);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct sfp *sfp = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return sfp_hwmon_temp(sfp, attr, value);
+ case hwmon_in:
+ return sfp_hwmon_vcc(sfp, attr, value);
+ case hwmon_curr:
+ return sfp_hwmon_bias(sfp, attr, value);
+ case hwmon_power:
+ switch (channel) {
+ case 0:
+ return sfp_hwmon_tx_power(sfp, attr, value);
+ case 1:
+ return sfp_hwmon_rx_power(sfp, attr, value);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops sfp_hwmon_ops = {
+ .is_visible = sfp_hwmon_is_visible,
+ .read = sfp_hwmon_read,
+};
+
+static u32 sfp_hwmon_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_chip = {
+ .type = hwmon_chip,
+ .config = sfp_hwmon_chip_config,
+};
+
+static u32 sfp_hwmon_temp_config[] = {
+ HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+ HWMON_T_CRIT | HWMON_T_LCRIT |
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_temp_channel_info = {
+ .type = hwmon_temp,
+ .config = sfp_hwmon_temp_config,
+};
+
+static u32 sfp_hwmon_vcc_config[] = {
+ HWMON_I_INPUT |
+ HWMON_I_MAX | HWMON_I_MIN |
+ HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
+ HWMON_I_CRIT | HWMON_I_LCRIT |
+ HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_vcc_channel_info = {
+ .type = hwmon_in,
+ .config = sfp_hwmon_vcc_config,
+};
+
+static u32 sfp_hwmon_bias_config[] = {
+ HWMON_C_INPUT |
+ HWMON_C_MAX | HWMON_C_MIN |
+ HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
+ HWMON_C_CRIT | HWMON_C_LCRIT |
+ HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_bias_channel_info = {
+ .type = hwmon_curr,
+ .config = sfp_hwmon_bias_config,
+};
+
+static u32 sfp_hwmon_power_config[] = {
+ /* Transmit power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+ /* Receive power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+ 0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_power_channel_info = {
+ .type = hwmon_power,
+ .config = sfp_hwmon_power_config,
+};
+
+static const struct hwmon_channel_info *sfp_hwmon_info[] = {
+ &sfp_hwmon_chip,
+ &sfp_hwmon_vcc_channel_info,
+ &sfp_hwmon_temp_channel_info,
+ &sfp_hwmon_bias_channel_info,
+ &sfp_hwmon_power_channel_info,
+ NULL,
+};
+
+static const struct hwmon_chip_info sfp_hwmon_chip_info = {
+ .ops = &sfp_hwmon_ops,
+ .info = sfp_hwmon_info,
+};
+
+static int sfp_hwmon_insert(struct sfp *sfp)
+{
+ int err, i;
+
+ if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE)
+ return 0;
+
+ if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM))
+ return 0;
+
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+ /* This driver in general does not support address
+ * change.
+ */
+ return 0;
+
+ err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
+ if (err < 0)
+ return err;
+
+ sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL);
+ if (!sfp->hwmon_name)
+ return -ENODEV;
+
+ for (i = 0; sfp->hwmon_name[i]; i++)
+ if (hwmon_is_bad_char(sfp->hwmon_name[i]))
+ sfp->hwmon_name[i] = '_';
+
+ sfp->hwmon_dev = hwmon_device_register_with_info(sfp->dev,
+ sfp->hwmon_name, sfp,
+ &sfp_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(sfp->hwmon_dev);
+}
+
+static void sfp_hwmon_remove(struct sfp *sfp)
+{
+ hwmon_device_unregister(sfp->hwmon_dev);
+ kfree(sfp->hwmon_name);
+}
+#else
+static int sfp_hwmon_insert(struct sfp *sfp)
+{
+ return 0;
+}
+
+static void sfp_hwmon_remove(struct sfp *sfp)
+{
+}
+#endif
+
/* Helpers */
static void sfp_module_tx_disable(struct sfp *sfp)
{
@@ -636,6 +1420,10 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
dev_warn(sfp->dev,
"module address swap to access page 0xA2 is not supported.\n");
+ ret = sfp_hwmon_insert(sfp);
+ if (ret < 0)
+ return ret;
+
ret = sfp_module_insert(sfp->sfp_bus, &sfp->id);
if (ret < 0)
return ret;
@@ -647,6 +1435,8 @@ static void sfp_sm_mod_remove(struct sfp *sfp)
{
sfp_module_remove(sfp->sfp_bus);
+ sfp_hwmon_remove(sfp);
+
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
@@ -661,8 +1451,11 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
{
mutex_lock(&sfp->sm_mutex);
- dev_dbg(sfp->dev, "SM: enter %u:%u:%u event %u\n",
- sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state, event);
+ dev_dbg(sfp->dev, "SM: enter %s:%s:%s event %s\n",
+ mod_state_to_str(sfp->sm_mod_state),
+ dev_state_to_str(sfp->sm_dev_state),
+ sm_state_to_str(sfp->sm_state),
+ event_to_str(event));
/* This state machine tracks the insert/remove state of
* the module, and handles probing the on-board EEPROM.
@@ -793,8 +1586,10 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
break;
}
- dev_dbg(sfp->dev, "SM: exit %u:%u:%u\n",
- sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state);
+ dev_dbg(sfp->dev, "SM: exit %s:%s:%s\n",
+ mod_state_to_str(sfp->sm_mod_state),
+ dev_state_to_str(sfp->sm_dev_state),
+ sm_state_to_str(sfp->sm_state));
mutex_unlock(&sfp->sm_mutex);
}
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index d9dd8fbfffc7..fbf9ad429593 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -72,6 +72,10 @@
#define PHY_ID_VSC8572 0x000704d0
#define PHY_ID_VSC8574 0x000704a0
#define PHY_ID_VSC8601 0x00070420
+#define PHY_ID_VSC7385 0x00070450
+#define PHY_ID_VSC7388 0x00070480
+#define PHY_ID_VSC7395 0x00070550
+#define PHY_ID_VSC7398 0x00070580
#define PHY_ID_VSC8662 0x00070660
#define PHY_ID_VSC8221 0x000fc550
#define PHY_ID_VSC8211 0x000fc4b0
@@ -116,6 +120,137 @@ static int vsc824x_config_init(struct phy_device *phydev)
return err;
}
+#define VSC73XX_EXT_PAGE_ACCESS 0x1f
+
+static int vsc73xx_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, VSC73XX_EXT_PAGE_ACCESS);
+}
+
+static int vsc73xx_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page);
+}
+
+static void vsc73xx_config_init(struct phy_device *phydev)
+{
+ /* Receiver init */
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x0c, 0x0300, 0x0200);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* Config LEDs 0x61 */
+ phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061);
+}
+
+static int vsc738x_config_init(struct phy_device *phydev)
+{
+ u16 rev;
+ /* This magic sequence appear in the application note
+ * "VSC7385/7388 PHY Configuration".
+ *
+ * Maybe one day we will get to know what it all means.
+ */
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0200);
+ phy_write(phydev, 0x1f, 0x52b5);
+ phy_write(phydev, 0x10, 0xb68a);
+ phy_modify(phydev, 0x12, 0xff07, 0x0003);
+ phy_modify(phydev, 0x11, 0x00ff, 0x00a2);
+ phy_write(phydev, 0x10, 0x968a);
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* Read revision */
+ rev = phy_read(phydev, MII_PHYSID2);
+ rev &= 0x0f;
+
+ /* Special quirk for revision 0 */
+ if (rev == 0) {
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0200);
+ phy_write(phydev, 0x1f, 0x52b5);
+ phy_write(phydev, 0x12, 0x0000);
+ phy_write(phydev, 0x11, 0x0689);
+ phy_write(phydev, 0x10, 0x8f92);
+ phy_write(phydev, 0x1f, 0x52b5);
+ phy_write(phydev, 0x12, 0x0000);
+ phy_write(phydev, 0x11, 0x0e35);
+ phy_write(phydev, 0x10, 0x9786);
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0000);
+ phy_write(phydev, 0x17, 0xff80);
+ phy_write(phydev, 0x17, 0x0000);
+ }
+
+ phy_write(phydev, 0x1f, 0x0000);
+ phy_write(phydev, 0x12, 0x0048);
+
+ if (rev == 0) {
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_write(phydev, 0x14, 0x6600);
+ phy_write(phydev, 0x1f, 0x0000);
+ phy_write(phydev, 0x18, 0xa24e);
+ } else {
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x16, 0x0fc0, 0x0240);
+ phy_modify(phydev, 0x14, 0x6000, 0x4000);
+ /* bits 14-15 in extended register 0x14 controls DACG amplitude
+ * 6 = -8%, 2 is hardware default
+ */
+ phy_write(phydev, 0x1f, 0x0001);
+ phy_modify(phydev, 0x14, 0xe000, 0x6000);
+ phy_write(phydev, 0x1f, 0x0000);
+ }
+
+ vsc73xx_config_init(phydev);
+
+ return genphy_config_init(phydev);
+}
+
+static int vsc739x_config_init(struct phy_device *phydev)
+{
+ /* This magic sequence appears in the VSC7395 SparX-G5e application
+ * note "VSC7395/VSC7398 PHY Configuration"
+ *
+ * Maybe one day we will get to know what it all means.
+ */
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0200);
+ phy_write(phydev, 0x1f, 0x52b5);
+ phy_write(phydev, 0x10, 0xb68a);
+ phy_modify(phydev, 0x12, 0xff07, 0x0003);
+ phy_modify(phydev, 0x11, 0x00ff, 0x00a2);
+ phy_write(phydev, 0x10, 0x968a);
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x08, 0x0200, 0x0000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0000);
+ phy_write(phydev, 0x12, 0x0048);
+ phy_write(phydev, 0x1f, 0x2a30);
+ phy_modify(phydev, 0x16, 0x0fc0, 0x0240);
+ phy_modify(phydev, 0x14, 0x6000, 0x4000);
+ phy_write(phydev, 0x1f, 0x0001);
+ phy_modify(phydev, 0x14, 0xe000, 0x6000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ vsc73xx_config_init(phydev);
+
+ return genphy_config_init(phydev);
+}
+
+static int vsc73xx_config_aneg(struct phy_device *phydev)
+{
+ /* The VSC73xx switches does not like to be instructed to
+ * do autonegotiation in any way, it prefers that you just go
+ * with the power-on/reset defaults. Writing some registers will
+ * just make autonegotiation permanently fail.
+ */
+ return 0;
+}
+
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
* on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
@@ -319,6 +454,42 @@ static struct phy_driver vsc82xx_driver[] = {
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
+ .phy_id = PHY_ID_VSC7385,
+ .name = "Vitesse VSC7385",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = vsc738x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+}, {
+ .phy_id = PHY_ID_VSC7388,
+ .name = "Vitesse VSC7388",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = vsc738x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+}, {
+ .phy_id = PHY_ID_VSC7395,
+ .name = "Vitesse VSC7395",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = vsc739x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+}, {
+ .phy_id = PHY_ID_VSC7398,
+ .name = "Vitesse VSC7398",
+ .phy_id_mask = 0x000ffff0,
+ .features = PHY_GBIT_FEATURES,
+ .config_init = vsc739x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+}, {
.phy_id = PHY_ID_VSC8662,
.name = "Vitesse VSC8662",
.phy_id_mask = 0x000ffff0,
@@ -358,6 +529,10 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8514, 0x000ffff0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
{ PHY_ID_VSC8574, 0x000ffff0 },
+ { PHY_ID_VSC7385, 0x000ffff0 },
+ { PHY_ID_VSC7388, 0x000ffff0 },
+ { PHY_ID_VSC7395, 0x000ffff0 },
+ { PHY_ID_VSC7398, 0x000ffff0 },
{ PHY_ID_VSC8662, 0x000ffff0 },
{ PHY_ID_VSC8221, 0x000ffff0 },
{ PHY_ID_VSC8211, 0x000ffff0 },
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 2e5150b0b8d5..74a8782313cf 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -33,17 +33,22 @@ struct gmii2rgmii {
struct phy_device *phy_dev;
struct phy_driver *phy_drv;
struct phy_driver conv_phy_drv;
- int addr;
+ struct mdio_device *mdio;
};
static int xgmiitorgmii_read_status(struct phy_device *phydev)
{
struct gmii2rgmii *priv = phydev->priv;
+ struct mii_bus *bus = priv->mdio->bus;
+ int addr = priv->mdio->addr;
u16 val = 0;
+ int err;
- priv->phy_drv->read_status(phydev);
+ err = priv->phy_drv->read_status(phydev);
+ if (err < 0)
+ return err;
- val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
+ val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG);
val &= ~XILINX_GMII2RGMII_SPEED_MASK;
if (phydev->speed == SPEED_1000)
@@ -53,7 +58,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
else
val |= BMCR_SPEED10;
- mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val);
+ mdiobus_write(bus, addr, XILINX_GMII2RGMII_REG, val);
return 0;
}
@@ -81,7 +86,12 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
return -EPROBE_DEFER;
}
- priv->addr = mdiodev->addr;
+ if (!priv->phy_dev->drv) {
+ dev_info(dev, "Attached phy not ready\n");
+ return -EPROBE_DEFER;
+ }
+
+ priv->mdio = mdiodev;
priv->phy_drv = priv->phy_dev->drv;
memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,
sizeof(struct phy_driver));
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 6c7fd98cb00a..a205750b431b 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -96,7 +96,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
*/
struct ppp_mppe_state {
struct crypto_skcipher *arc4;
- struct crypto_ahash *sha1;
+ struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
@@ -136,25 +136,16 @@ struct ppp_mppe_state {
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
- AHASH_REQUEST_ON_STACK(req, state->sha1);
- struct scatterlist sg[4];
- unsigned int nbytes;
-
- sg_init_table(sg, 4);
-
- nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
- nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
- sizeof(sha_pad->sha_pad1));
- nbytes += setup_sg(&sg[2], state->session_key, state->keylen);
- nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
- sizeof(sha_pad->sha_pad2));
-
- ahash_request_set_tfm(req, state->sha1);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, sg, state->sha1_digest, nbytes);
-
- crypto_ahash_digest(req);
- ahash_request_zero(req);
+ crypto_shash_init(state->sha1);
+ crypto_shash_update(state->sha1, state->master_key,
+ state->keylen);
+ crypto_shash_update(state->sha1, sha_pad->sha_pad1,
+ sizeof(sha_pad->sha_pad1));
+ crypto_shash_update(state->sha1, state->session_key,
+ state->keylen);
+ crypto_shash_update(state->sha1, sha_pad->sha_pad2,
+ sizeof(sha_pad->sha_pad2));
+ crypto_shash_final(state->sha1, state->sha1_digest);
}
/*
@@ -200,6 +191,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
static void *mppe_alloc(unsigned char *options, int optlen)
{
struct ppp_mppe_state *state;
+ struct crypto_shash *shash;
unsigned int digestsize;
if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
@@ -217,13 +209,21 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out_free;
}
- state->sha1 = crypto_alloc_ahash("sha1", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(state->sha1)) {
- state->sha1 = NULL;
+ shash = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(shash))
+ goto out_free;
+
+ state->sha1 = kmalloc(sizeof(*state->sha1) +
+ crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!state->sha1) {
+ crypto_free_shash(shash);
goto out_free;
}
+ state->sha1->tfm = shash;
+ state->sha1->flags = 0;
- digestsize = crypto_ahash_digestsize(state->sha1);
+ digestsize = crypto_shash_digestsize(shash);
if (digestsize < MPPE_MAX_KEY_LEN)
goto out_free;
@@ -246,7 +246,10 @@ static void *mppe_alloc(unsigned char *options, int optlen)
out_free:
kfree(state->sha1_digest);
- crypto_free_ahash(state->sha1);
+ if (state->sha1) {
+ crypto_free_shash(state->sha1->tfm);
+ kzfree(state->sha1);
+ }
crypto_free_skcipher(state->arc4);
kfree(state);
out:
@@ -261,7 +264,8 @@ static void mppe_free(void *arg)
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
if (state) {
kfree(state->sha1_digest);
- crypto_free_ahash(state->sha1);
+ crypto_free_shash(state->sha1->tfm);
+ kzfree(state->sha1);
crypto_free_skcipher(state->arc4);
kfree(state);
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b070959737ff..6a047d30e8c6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -41,11 +41,6 @@
#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
-static struct team_port *team_port_get_rcu(const struct net_device *dev)
-{
- return rcu_dereference(dev->rx_handler_data);
-}
-
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
{
struct team_port *port = rtnl_dereference(dev->rx_handler_data);
@@ -1707,7 +1702,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index f5727baac84a..2bbefe828670 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -200,6 +200,7 @@ struct tun_flow_entry {
};
#define TUN_NUM_FLOW_ENTRIES 1024
+#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
struct tun_prog {
struct rcu_head rcu;
@@ -406,7 +407,7 @@ static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
static inline u32 tun_hashfn(u32 rxhash)
{
- return rxhash & 0x3ff;
+ return rxhash & TUN_MASK_FLOW_ENTRIES;
}
static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
@@ -607,7 +608,8 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
}
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct tun_struct *tun = netdev_priv(dev);
u16 ret;
@@ -1268,7 +1270,6 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return tun_xdp_set(dev, xdp->prog, xdp->extack);
case XDP_QUERY_PROG:
xdp->prog_id = tun_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b1b3d8f7e67d..b654f05b2ccd 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -693,24 +693,32 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
u32 phyid;
struct asix_common_private *priv;
- usbnet_get_endpoints(dev,intf);
+ usbnet_get_endpoints(dev, intf);
- /* Get the MAC address */
- if (dev->driver_info->data & FLAG_EEPROM_MAC) {
- for (i = 0; i < (ETH_ALEN >> 1); i++) {
- ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i,
- 0, 2, buf + i * 2, 0);
- if (ret < 0)
- break;
- }
+ /* Maybe the boot loader passed the MAC address via device tree */
+ if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) {
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from device tree");
} else {
- ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
- 0, 0, ETH_ALEN, buf, 0);
- }
+ /* Try getting the MAC address from EEPROM */
+ if (dev->driver_info->data & FLAG_EEPROM_MAC) {
+ for (i = 0; i < (ETH_ALEN >> 1); i++) {
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM,
+ 0x04 + i, 0, 2, buf + i * 2,
+ 0);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
+ 0, 0, ETH_ALEN, buf, 0);
+ }
- if (ret < 0) {
- netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
- return ret;
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read MAC address: %d\n",
+ ret);
+ return ret;
+ }
}
asix_set_netdev_dev_addr(dev, buf);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 18d36dff97ea..424053bd8b21 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -869,6 +869,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
default:
dev_warn(&intf->dev,
"Couldn't detect memory size, assuming 32k\n");
+ /* fall through */
case 0x87654321:
catc_set_reg(catc, TxBufCount, 4);
catc_set_reg(catc, RxBufCount, 16);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 288ecd999171..78b16eb9e58c 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -99,6 +99,7 @@ static void tx_complete(struct urb *req)
struct net_device *dev = skb->dev;
struct usbpn_dev *pnd = netdev_priv(dev);
int status = req->status;
+ unsigned long flags;
switch (status) {
case 0:
@@ -109,16 +110,17 @@ static void tx_complete(struct urb *req)
case -ECONNRESET:
case -ESHUTDOWN:
dev->stats.tx_aborted_errors++;
+ /* fall through */
default:
dev->stats.tx_errors++;
dev_dbg(&dev->dev, "TX error (%d)\n", status);
}
dev->stats.tx_packets++;
- spin_lock(&pnd->tx_lock);
+ spin_lock_irqsave(&pnd->tx_lock, flags);
pnd->tx_queue--;
netif_wake_queue(dev);
- spin_unlock(&pnd->tx_lock);
+ spin_unlock_irqrestore(&pnd->tx_lock, flags);
dev_kfree_skb_any(skb);
usb_free_urb(req);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index e53883ad6107..184c24baca15 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -999,6 +999,7 @@ static void read_bulk_callback(struct urb *urb)
struct hso_net *odev = urb->context;
struct net_device *net;
int result;
+ unsigned long flags;
int status = urb->status;
/* is al ok? (Filip: Who's Al ?) */
@@ -1028,11 +1029,11 @@ static void read_bulk_callback(struct urb *urb)
if (urb->actual_length) {
/* Handle the IP stream, add header and push it onto network
* stack if the packet is complete. */
- spin_lock(&odev->net_lock);
+ spin_lock_irqsave(&odev->net_lock, flags);
packetizeRx(odev, urb->transfer_buffer, urb->actual_length,
(urb->transfer_buffer_length >
urb->actual_length) ? 1 : 0);
- spin_unlock(&odev->net_lock);
+ spin_unlock_irqrestore(&odev->net_lock, flags);
}
/* We are done with this URB, resubmit it. Prep the USB to wait for
@@ -1193,6 +1194,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
{
struct hso_serial *serial = urb->context;
int status = urb->status;
+ unsigned long flags;
hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status);
@@ -1216,10 +1218,10 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
/* Valid data, handle RX data */
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
put_rxbuf_data_and_resubmit_bulk_urb(serial);
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
}
/*
@@ -1502,12 +1504,13 @@ static void tiocmget_intr_callback(struct urb *urb)
DUMP(serial_state_notification,
sizeof(struct hso_serial_state_notification));
} else {
+ unsigned long flags;
UART_state_bitmap = le16_to_cpu(serial_state_notification->
UART_state_bitmap);
prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap;
icount = &tiocmget->icount;
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
if ((UART_state_bitmap & B_OVERRUN) !=
(prev_UART_state_bitmap & B_OVERRUN))
icount->parity++;
@@ -1530,7 +1533,7 @@ static void tiocmget_intr_callback(struct urb *urb)
(prev_UART_state_bitmap & B_RX_CARRIER))
icount->dcd++;
tiocmget->prev_UART_state_bitmap = UART_state_bitmap;
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
tiocmget->intr_completed = 1;
wake_up_interruptible(&tiocmget->waitq);
}
@@ -1729,7 +1732,6 @@ static int hso_serial_ioctl(struct tty_struct *tty,
/* starts a transmit */
static void hso_kick_transmit(struct hso_serial *serial)
{
- u8 *temp;
unsigned long flags;
int res;
@@ -1745,14 +1747,12 @@ static void hso_kick_transmit(struct hso_serial *serial)
goto out;
/* Switch pointers around to avoid memcpy */
- temp = serial->tx_buffer;
- serial->tx_buffer = serial->tx_data;
- serial->tx_data = temp;
+ swap(serial->tx_buffer, serial->tx_data);
serial->tx_data_count = serial->tx_buffer_count;
serial->tx_buffer_count = 0;
- /* If temp is set, it means we switched buffers */
- if (temp && serial->write_data) {
+ /* If serial->tx_data is set, it means we switched buffers */
+ if (serial->tx_data && serial->write_data) {
res = serial->write_data(serial);
if (res >= 0)
serial->tx_urb_used = 1;
@@ -1852,6 +1852,7 @@ static void intr_callback(struct urb *urb)
struct hso_serial *serial;
unsigned char *port_req;
int status = urb->status;
+ unsigned long flags;
int i;
usb_mark_last_busy(urb->dev);
@@ -1879,7 +1880,7 @@ static void intr_callback(struct urb *urb)
if (serial != NULL) {
hso_dbg(0x1, "Pending read interrupt on port %d\n",
i);
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
if (serial->rx_state == RX_IDLE &&
serial->port.count > 0) {
/* Setup and send a ctrl req read on
@@ -1893,7 +1894,8 @@ static void intr_callback(struct urb *urb)
hso_dbg(0x1, "Already a read pending on port %d or port not open\n",
i);
}
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock,
+ flags);
}
}
}
@@ -1920,6 +1922,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
{
struct hso_serial *serial = urb->context;
int status = urb->status;
+ unsigned long flags;
/* sanity check */
if (!serial) {
@@ -1927,9 +1930,9 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
return;
}
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
serial->tx_urb_used = 0;
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
if (status) {
handle_usb_error(status, __func__, serial->parent);
return;
@@ -1971,14 +1974,15 @@ static void ctrl_callback(struct urb *urb)
struct hso_serial *serial = urb->context;
struct usb_ctrlrequest *req;
int status = urb->status;
+ unsigned long flags;
/* sanity check */
if (!serial)
return;
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
serial->tx_urb_used = 0;
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
if (status) {
handle_usb_error(status, __func__, serial->parent);
return;
@@ -1994,9 +1998,9 @@ static void ctrl_callback(struct urb *urb)
(USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) {
/* response to a read command */
serial->rx_urb_filled[0] = 1;
- spin_lock(&serial->serial_lock);
+ spin_lock_irqsave(&serial->serial_lock, flags);
put_rxbuf_data_and_resubmit_ctrl_urb(serial);
- spin_unlock(&serial->serial_lock);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
} else {
hso_put_activity(serial->parent);
tty_port_tty_wakeup(&serial->port);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f1605833c5cf..913e50bab0a2 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -587,7 +587,7 @@ static void kaweth_usb_receive(struct urb *urb)
struct kaweth_device *kaweth = urb->context;
struct net_device *net = kaweth->net;
int status = urb->status;
-
+ unsigned long flags;
int count = urb->actual_length;
int count2 = urb->transfer_buffer_length;
@@ -619,12 +619,12 @@ static void kaweth_usb_receive(struct urb *urb)
net->stats.rx_errors++;
dev_dbg(dev, "Status was -EOVERFLOW.\n");
}
- spin_lock(&kaweth->device_lock);
+ spin_lock_irqsave(&kaweth->device_lock, flags);
if (IS_BLOCKED(kaweth->status)) {
- spin_unlock(&kaweth->device_lock);
+ spin_unlock_irqrestore(&kaweth->device_lock, flags);
return;
}
- spin_unlock(&kaweth->device_lock);
+ spin_unlock_irqrestore(&kaweth->device_lock, flags);
if(status && status != -EREMOTEIO && count != 1) {
dev_err(&kaweth->intf->dev,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index aeca484a75b8..a9991c5f4736 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1649,7 +1649,7 @@ lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
struct lan78xx_net *dev = netdev_priv(netdev);
/* Read Device/MAC registers */
- for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
+ for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
if (!netdev->phydev)
@@ -1723,7 +1723,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
"MAC address read from EEPROM");
} else {
/* generate random MAC */
- random_ether_addr(addr);
+ eth_random_addr(addr);
netif_dbg(dev, ifup, dev->net,
"MAC address set to random addr");
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6514c86f043e..f4247b275e09 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1067,7 +1067,7 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg1d, 0);
set_register(pegasus, Reg7b, 1);
- mdelay(100);
+ msleep(100);
if ((pegasus->features & HAS_HOME_PNA) && mii_mode)
set_register(pegasus, Reg7b, 0);
else
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2a58607a6aea..97742708460b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1252,6 +1252,7 @@ static void read_bulk_callback(struct urb *urb)
int status = urb->status;
struct rx_agg *agg;
struct r8152 *tp;
+ unsigned long flags;
agg = urb->context;
if (!agg)
@@ -1281,9 +1282,9 @@ static void read_bulk_callback(struct urb *urb)
if (urb->actual_length < ETH_ZLEN)
break;
- spin_lock(&tp->rx_lock);
+ spin_lock_irqsave(&tp->rx_lock, flags);
list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock(&tp->rx_lock);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
napi_schedule(&tp->napi);
return;
case -ESHUTDOWN:
@@ -1311,6 +1312,7 @@ static void write_bulk_callback(struct urb *urb)
struct net_device *netdev;
struct tx_agg *agg;
struct r8152 *tp;
+ unsigned long flags;
int status = urb->status;
agg = urb->context;
@@ -1332,9 +1334,9 @@ static void write_bulk_callback(struct urb *urb)
stats->tx_bytes += agg->skb_len;
}
- spin_lock(&tp->tx_lock);
+ spin_lock_irqsave(&tp->tx_lock, flags);
list_add_tail(&agg->list, &tp->tx_free);
- spin_unlock(&tp->tx_lock);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
usb_autopm_put_interface_async(tp->intf);
@@ -1374,6 +1376,7 @@ static void intr_callback(struct urb *urb)
case -ECONNRESET: /* unlink */
case -ESHUTDOWN:
netif_device_detach(tp->netdev);
+ /* fall through */
case -ENOENT:
case -EPROTO:
netif_info(tp, intr, tp->netdev,
@@ -2739,6 +2742,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
r8152_mdio_write(tp, MII_BMCR, data);
data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+ /* fall through */
default:
if (data != PHY_STAT_LAN_ON)
@@ -4411,7 +4415,6 @@ out1:
static int rtl8152_system_suspend(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
- int ret = 0;
netif_device_detach(netdev);
@@ -4426,7 +4429,7 @@ static int rtl8152_system_suspend(struct r8152 *tp)
napi_enable(napi);
}
- return ret;
+ return 0;
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 48ba80a8ca5c..80373a9171dd 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -391,6 +391,7 @@ static void read_bulk_callback(struct urb *urb)
u16 rx_stat;
int status = urb->status;
int result;
+ unsigned long flags;
dev = urb->context;
if (!dev)
@@ -432,9 +433,9 @@ static void read_bulk_callback(struct urb *urb)
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += pkt_len;
- spin_lock(&dev->rx_pool_lock);
+ spin_lock_irqsave(&dev->rx_pool_lock, flags);
skb = pull_skb(dev);
- spin_unlock(&dev->rx_pool_lock);
+ spin_unlock_irqrestore(&dev->rx_pool_lock, flags);
if (!skb)
goto resched;
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 2d316c1b851b..6ac232e52bf7 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -358,7 +358,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
/* power up and reset phy */
sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
/* at least 10ms, here 20ms for safe */
- mdelay(20);
+ msleep(20);
sr_write_reg(dev, SR_PRR, 0);
/* at least 1ms, here 2ms for reading right register */
udelay(2 * 1000);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a69ad39ee57e..e3202af72df5 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -17,22 +17,47 @@
#include <net/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
+#include <net/xdp.h>
#include <linux/veth.h>
#include <linux/module.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/ptr_ring.h>
+#include <linux/bpf_trace.h>
#define DRV_NAME "veth"
#define DRV_VERSION "1.0"
+#define VETH_XDP_FLAG BIT(0)
+#define VETH_RING_SIZE 256
+#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
+
+/* Separating two types of XDP xmit */
+#define VETH_XDP_TX BIT(0)
+#define VETH_XDP_REDIR BIT(1)
+
struct pcpu_vstats {
u64 packets;
u64 bytes;
struct u64_stats_sync syncp;
};
+struct veth_rq {
+ struct napi_struct xdp_napi;
+ struct net_device *dev;
+ struct bpf_prog __rcu *xdp_prog;
+ struct xdp_mem_info xdp_mem;
+ bool rx_notify_masked;
+ struct ptr_ring xdp_ring;
+ struct xdp_rxq_info xdp_rxq;
+};
+
struct veth_priv {
struct net_device __rcu *peer;
atomic64_t dropped;
- unsigned requested_headroom;
+ struct bpf_prog *_xdp_prog;
+ struct veth_rq *rq;
+ unsigned int requested_headroom;
};
/*
@@ -98,11 +123,67 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_link_ksettings = veth_get_link_ksettings,
};
+/* general routines */
+
+static bool veth_is_xdp_frame(void *ptr)
+{
+ return (unsigned long)ptr & VETH_XDP_FLAG;
+}
+
+static void *veth_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
+}
+
+static void *veth_xdp_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
+}
+
+static void veth_ptr_free(void *ptr)
+{
+ if (veth_is_xdp_frame(ptr))
+ xdp_return_frame(veth_ptr_to_xdp(ptr));
+ else
+ kfree_skb(ptr);
+}
+
+static void __veth_xdp_flush(struct veth_rq *rq)
+{
+ /* Write ptr_ring before reading rx_notify_masked */
+ smp_mb();
+ if (!rq->rx_notify_masked) {
+ rq->rx_notify_masked = true;
+ napi_schedule(&rq->xdp_napi);
+ }
+}
+
+static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
+{
+ if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ return NET_RX_SUCCESS;
+}
+
+static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
+ struct veth_rq *rq, bool xdp)
+{
+ return __dev_forward_skb(dev, skb) ?: xdp ?
+ veth_xdp_rx(rq, skb) :
+ netif_rx(skb);
+}
+
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct veth_priv *priv = netdev_priv(dev);
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
+ bool rcv_xdp = false;
+ int rxq;
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
@@ -111,7 +192,16 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
+ rcv_priv = netdev_priv(rcv);
+ rxq = skb_get_queue_mapping(skb);
+ if (rxq < rcv->real_num_rx_queues) {
+ rq = &rcv_priv->rq[rxq];
+ rcv_xdp = rcu_access_pointer(rq->xdp_prog);
+ if (rcv_xdp)
+ skb_record_rx_queue(skb, rxq);
+ }
+
+ if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
u64_stats_update_begin(&stats->syncp);
@@ -122,14 +212,15 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
atomic64_inc(&priv->dropped);
}
+
+ if (rcv_xdp)
+ __veth_xdp_flush(rq);
+
rcu_read_unlock();
+
return NETDEV_TX_OK;
}
-/*
- * general routines
- */
-
static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -179,18 +270,502 @@ static void veth_set_multicast_list(struct net_device *dev)
{
}
+static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
+ int buflen)
+{
+ struct sk_buff *skb;
+
+ if (!buflen) {
+ buflen = SKB_DATA_ALIGN(headroom + len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+ skb = build_skb(head, buflen);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+
+ return skb;
+}
+
+static int veth_select_rxq(struct net_device *dev)
+{
+ return smp_processor_id() % dev->real_num_rx_queues;
+}
+
+static int veth_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct net_device *rcv;
+ unsigned int max_len;
+ struct veth_rq *rq;
+ int i, drops = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ rcv = rcu_dereference(priv->peer);
+ if (unlikely(!rcv))
+ return -ENXIO;
+
+ rcv_priv = netdev_priv(rcv);
+ rq = &rcv_priv->rq[veth_select_rxq(rcv)];
+ /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
+ * side. This means an XDP program is loaded on the peer and the peer
+ * device is up.
+ */
+ if (!rcu_access_pointer(rq->xdp_prog))
+ return -ENXIO;
+
+ max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
+
+ spin_lock(&rq->xdp_ring.producer_lock);
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *frame = frames[i];
+ void *ptr = veth_xdp_to_ptr(frame);
+
+ if (unlikely(frame->len > max_len ||
+ __ptr_ring_produce(&rq->xdp_ring, ptr))) {
+ xdp_return_frame_rx_napi(frame);
+ drops++;
+ }
+ }
+ spin_unlock(&rq->xdp_ring.producer_lock);
+
+ if (flags & XDP_XMIT_FLUSH)
+ __veth_xdp_flush(rq);
+
+ return n - drops;
+}
+
+static void veth_xdp_flush(struct net_device *dev)
+{
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct net_device *rcv;
+ struct veth_rq *rq;
+
+ rcu_read_lock();
+ rcv = rcu_dereference(priv->peer);
+ if (unlikely(!rcv))
+ goto out;
+
+ rcv_priv = netdev_priv(rcv);
+ rq = &rcv_priv->rq[veth_select_rxq(rcv)];
+ /* xdp_ring is initialized on receive side? */
+ if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
+ goto out;
+
+ __veth_xdp_flush(rq);
+out:
+ rcu_read_unlock();
+}
+
+static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct xdp_frame *frame = convert_to_xdp_frame(xdp);
+
+ if (unlikely(!frame))
+ return -EOVERFLOW;
+
+ return veth_xdp_xmit(dev, 1, &frame, 0);
+}
+
+static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
+ struct xdp_frame *frame,
+ unsigned int *xdp_xmit)
+{
+ void *hard_start = frame->data - frame->headroom;
+ void *head = hard_start - sizeof(struct xdp_frame);
+ int len = frame->len, delta = 0;
+ struct xdp_frame orig_frame;
+ struct bpf_prog *xdp_prog;
+ unsigned int headroom;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (likely(xdp_prog)) {
+ struct xdp_buff xdp;
+ u32 act;
+
+ xdp.data_hard_start = hard_start;
+ xdp.data = frame->data;
+ xdp.data_end = frame->data + frame->len;
+ xdp.data_meta = frame->data - frame->metasize;
+ xdp.rxq = &rq->xdp_rxq;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ delta = frame->data - xdp.data;
+ len = xdp.data_end - xdp.data;
+ break;
+ case XDP_TX:
+ orig_frame = *frame;
+ xdp.data_hard_start = head;
+ xdp.rxq->mem = frame->mem;
+ if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
+ trace_xdp_exception(rq->dev, xdp_prog, act);
+ frame = &orig_frame;
+ goto err_xdp;
+ }
+ *xdp_xmit |= VETH_XDP_TX;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_REDIRECT:
+ orig_frame = *frame;
+ xdp.data_hard_start = head;
+ xdp.rxq->mem = frame->mem;
+ if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
+ frame = &orig_frame;
+ goto err_xdp;
+ }
+ *xdp_xmit |= VETH_XDP_REDIR;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rq->dev, xdp_prog, act);
+ case XDP_DROP:
+ goto err_xdp;
+ }
+ }
+ rcu_read_unlock();
+
+ headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
+ skb = veth_build_skb(head, headroom, len, 0);
+ if (!skb) {
+ xdp_return_frame(frame);
+ goto err;
+ }
+
+ xdp_scrub_frame(frame);
+ skb->protocol = eth_type_trans(skb, rq->dev);
+err:
+ return skb;
+err_xdp:
+ rcu_read_unlock();
+ xdp_return_frame(frame);
+xdp_xmit:
+ return NULL;
+}
+
+static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
+ unsigned int *xdp_xmit)
+{
+ u32 pktlen, headroom, act, metalen;
+ void *orig_data, *orig_data_end;
+ struct bpf_prog *xdp_prog;
+ int mac_len, delta, off;
+ struct xdp_buff xdp;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (unlikely(!xdp_prog)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ mac_len = skb->data - skb_mac_header(skb);
+ pktlen = skb->len + mac_len;
+ headroom = skb_headroom(skb) - mac_len;
+
+ if (skb_shared(skb) || skb_head_is_locked(skb) ||
+ skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
+ struct sk_buff *nskb;
+ int size, head_off;
+ void *head, *start;
+ struct page *page;
+
+ size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ if (size > PAGE_SIZE)
+ goto drop;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ goto drop;
+
+ head = page_address(page);
+ start = head + VETH_XDP_HEADROOM;
+ if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
+ page_frag_free(head);
+ goto drop;
+ }
+
+ nskb = veth_build_skb(head,
+ VETH_XDP_HEADROOM + mac_len, skb->len,
+ PAGE_SIZE);
+ if (!nskb) {
+ page_frag_free(head);
+ goto drop;
+ }
+
+ skb_copy_header(nskb, skb);
+ head_off = skb_headroom(nskb) - skb_headroom(skb);
+ skb_headers_offset_update(nskb, head_off);
+ if (skb->sk)
+ skb_set_owner_w(nskb, skb->sk);
+ consume_skb(skb);
+ skb = nskb;
+ }
+
+ xdp.data_hard_start = skb->head;
+ xdp.data = skb_mac_header(skb);
+ xdp.data_end = xdp.data + pktlen;
+ xdp.data_meta = xdp.data;
+ xdp.rxq = &rq->xdp_rxq;
+ orig_data = xdp.data;
+ orig_data_end = xdp.data_end;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ get_page(virt_to_page(xdp.data));
+ consume_skb(skb);
+ xdp.rxq->mem = rq->xdp_mem;
+ if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
+ trace_xdp_exception(rq->dev, xdp_prog, act);
+ goto err_xdp;
+ }
+ *xdp_xmit |= VETH_XDP_TX;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_REDIRECT:
+ get_page(virt_to_page(xdp.data));
+ consume_skb(skb);
+ xdp.rxq->mem = rq->xdp_mem;
+ if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
+ goto err_xdp;
+ *xdp_xmit |= VETH_XDP_REDIR;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rq->dev, xdp_prog, act);
+ case XDP_DROP:
+ goto drop;
+ }
+ rcu_read_unlock();
+
+ delta = orig_data - xdp.data;
+ off = mac_len + delta;
+ if (off > 0)
+ __skb_push(skb, off);
+ else if (off < 0)
+ __skb_pull(skb, -off);
+ skb->mac_header -= delta;
+ off = xdp.data_end - orig_data_end;
+ if (off != 0)
+ __skb_put(skb, off);
+ skb->protocol = eth_type_trans(skb, rq->dev);
+
+ metalen = xdp.data - xdp.data_meta;
+ if (metalen)
+ skb_metadata_set(skb, metalen);
+out:
+ return skb;
+drop:
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return NULL;
+err_xdp:
+ rcu_read_unlock();
+ page_frag_free(xdp.data);
+xdp_xmit:
+ return NULL;
+}
+
+static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
+{
+ int i, done = 0;
+
+ for (i = 0; i < budget; i++) {
+ void *ptr = __ptr_ring_consume(&rq->xdp_ring);
+ struct sk_buff *skb;
+
+ if (!ptr)
+ break;
+
+ if (veth_is_xdp_frame(ptr)) {
+ skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr),
+ xdp_xmit);
+ } else {
+ skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit);
+ }
+
+ if (skb)
+ napi_gro_receive(&rq->xdp_napi, skb);
+
+ done++;
+ }
+
+ return done;
+}
+
+static int veth_poll(struct napi_struct *napi, int budget)
+{
+ struct veth_rq *rq =
+ container_of(napi, struct veth_rq, xdp_napi);
+ unsigned int xdp_xmit = 0;
+ int done;
+
+ xdp_set_return_frame_no_direct();
+ done = veth_xdp_rcv(rq, budget, &xdp_xmit);
+
+ if (done < budget && napi_complete_done(napi, done)) {
+ /* Write rx_notify_masked before reading ptr_ring */
+ smp_store_mb(rq->rx_notify_masked, false);
+ if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
+ rq->rx_notify_masked = true;
+ napi_schedule(&rq->xdp_napi);
+ }
+ }
+
+ if (xdp_xmit & VETH_XDP_TX)
+ veth_xdp_flush(rq->dev);
+ if (xdp_xmit & VETH_XDP_REDIR)
+ xdp_do_flush_map();
+ xdp_clear_return_frame_no_direct();
+
+ return done;
+}
+
+static int veth_napi_add(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
+ if (err)
+ goto err_xdp_ring;
+ }
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&rq->xdp_napi);
+ }
+
+ return 0;
+err_xdp_ring:
+ for (i--; i >= 0; i--)
+ ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
+
+ return err;
+}
+
+static void veth_napi_del(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ napi_disable(&rq->xdp_napi);
+ napi_hash_del(&rq->xdp_napi);
+ }
+ synchronize_net();
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ netif_napi_del(&rq->xdp_napi);
+ rq->rx_notify_masked = false;
+ ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
+ }
+}
+
+static int veth_enable_xdp(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
+ if (err < 0)
+ goto err_rxq_reg;
+
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err < 0)
+ goto err_reg_mem;
+
+ /* Save original mem info as it can be overwritten */
+ rq->xdp_mem = rq->xdp_rxq.mem;
+ }
+
+ err = veth_napi_add(dev);
+ if (err)
+ goto err_rxq_reg;
+ }
+
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
+
+ return 0;
+err_reg_mem:
+ xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
+err_rxq_reg:
+ for (i--; i >= 0; i--)
+ xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
+
+ return err;
+}
+
+static void veth_disable_xdp(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
+ veth_napi_del(dev);
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ rq->xdp_rxq.mem = rq->xdp_mem;
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ }
+}
+
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
+ int err;
if (!peer)
return -ENOTCONN;
+ if (priv->_xdp_prog) {
+ err = veth_enable_xdp(dev);
+ if (err)
+ return err;
+ }
+
if (peer->flags & IFF_UP) {
netif_carrier_on(dev);
netif_carrier_on(peer);
}
+
return 0;
}
@@ -203,6 +778,9 @@ static int veth_close(struct net_device *dev)
if (peer)
netif_carrier_off(peer);
+ if (priv->_xdp_prog)
+ veth_disable_xdp(dev);
+
return 0;
}
@@ -228,7 +806,7 @@ static void veth_dev_free(struct net_device *dev)
static void veth_poll_controller(struct net_device *dev)
{
/* veth only receives frames when its peer sends one
- * Since it's a synchronous operation, we are guaranteed
+ * Since it has nothing to do with disabling irqs, we are guaranteed
* never to have pending data when we poll for it so
* there is nothing to do here.
*
@@ -253,6 +831,23 @@ static int veth_get_iflink(const struct net_device *dev)
return iflink;
}
+static netdev_features_t veth_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+
+ peer = rtnl_dereference(priv->peer);
+ if (peer) {
+ struct veth_priv *peer_priv = netdev_priv(peer);
+
+ if (peer_priv->_xdp_prog)
+ features &= ~NETIF_F_GSO_SOFTWARE;
+ }
+
+ return features;
+}
+
static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
{
struct veth_priv *peer_priv, *priv = netdev_priv(dev);
@@ -276,6 +871,103 @@ out:
rcu_read_unlock();
}
+static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ struct net_device *peer;
+ unsigned int max_mtu;
+ int err;
+
+ old_prog = priv->_xdp_prog;
+ priv->_xdp_prog = prog;
+ peer = rtnl_dereference(priv->peer);
+
+ if (prog) {
+ if (!peer) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
+ err = -ENOTCONN;
+ goto err;
+ }
+
+ max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
+ peer->hard_header_len -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ if (peer->mtu > max_mtu) {
+ NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
+ err = -ERANGE;
+ goto err;
+ }
+
+ if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
+ err = -ENOSPC;
+ goto err;
+ }
+
+ if (dev->flags & IFF_UP) {
+ err = veth_enable_xdp(dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
+ goto err;
+ }
+ }
+
+ if (!old_prog) {
+ peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
+ peer->max_mtu = max_mtu;
+ }
+ }
+
+ if (old_prog) {
+ if (!prog) {
+ if (dev->flags & IFF_UP)
+ veth_disable_xdp(dev);
+
+ if (peer) {
+ peer->hw_features |= NETIF_F_GSO_SOFTWARE;
+ peer->max_mtu = ETH_MAX_MTU;
+ }
+ }
+ bpf_prog_put(old_prog);
+ }
+
+ if ((!!old_prog ^ !!prog) && peer)
+ netdev_update_features(peer);
+
+ return 0;
+err:
+ priv->_xdp_prog = old_prog;
+
+ return err;
+}
+
+static u32 veth_xdp_query(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ const struct bpf_prog *xdp_prog;
+
+ xdp_prog = priv->_xdp_prog;
+ if (xdp_prog)
+ return xdp_prog->aux->id;
+
+ return 0;
+}
+
+static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return veth_xdp_set(dev, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = veth_xdp_query(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -288,8 +980,11 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_poll_controller = veth_poll_controller,
#endif
.ndo_get_iflink = veth_get_iflink,
+ .ndo_fix_features = veth_fix_features,
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom,
+ .ndo_bpf = veth_xdp,
+ .ndo_xdp_xmit = veth_xdp_xmit,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
@@ -345,13 +1040,31 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static int veth_alloc_queues(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+
+ priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
+ if (!priv->rq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void veth_free_queues(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+
+ kfree(priv->rq);
+}
+
static struct rtnl_link_ops veth_link_ops;
static int veth_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- int err;
+ int err, i;
struct net_device *peer;
struct veth_priv *priv;
char ifname[IFNAMSIZ];
@@ -404,6 +1117,12 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
return PTR_ERR(peer);
}
+ err = veth_alloc_queues(peer);
+ if (err) {
+ put_net(net);
+ goto err_peer_alloc_queues;
+ }
+
if (!ifmp || !tbp[IFLA_ADDRESS])
eth_hw_addr_random(peer);
@@ -432,6 +1151,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
* should be re-allocated
*/
+ err = veth_alloc_queues(dev);
+ if (err)
+ goto err_alloc_queues;
+
if (tb[IFLA_ADDRESS] == NULL)
eth_hw_addr_random(dev);
@@ -451,19 +1174,28 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
*/
priv = netdev_priv(dev);
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ priv->rq[i].dev = dev;
rcu_assign_pointer(priv->peer, peer);
priv = netdev_priv(peer);
+ for (i = 0; i < peer->real_num_rx_queues; i++)
+ priv->rq[i].dev = peer;
rcu_assign_pointer(priv->peer, dev);
+
return 0;
err_register_dev:
+ veth_free_queues(dev);
+err_alloc_queues:
/* nothing to do */
err_configure_peer:
unregister_netdevice(peer);
return err;
err_register_peer:
+ veth_free_queues(peer);
+err_peer_alloc_queues:
free_netdev(peer);
return err;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2b6ec927809e..765920905226 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -30,7 +30,7 @@
#include <linux/cpu.h>
#include <linux/average.h>
#include <linux/filter.h>
-#include <linux/netdevice.h>
+#include <linux/kernel.h>
#include <linux/pci.h>
#include <net/route.h>
#include <net/xdp.h>
@@ -82,25 +82,43 @@ struct virtnet_sq_stats {
struct u64_stats_sync syncp;
u64 packets;
u64 bytes;
+ u64 xdp_tx;
+ u64 xdp_tx_drops;
+ u64 kicks;
};
struct virtnet_rq_stats {
struct u64_stats_sync syncp;
u64 packets;
u64 bytes;
+ u64 drops;
+ u64 xdp_packets;
+ u64 xdp_tx;
+ u64 xdp_redirects;
+ u64 xdp_drops;
+ u64 kicks;
};
#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
- { "packets", VIRTNET_SQ_STAT(packets) },
- { "bytes", VIRTNET_SQ_STAT(bytes) },
+ { "packets", VIRTNET_SQ_STAT(packets) },
+ { "bytes", VIRTNET_SQ_STAT(bytes) },
+ { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
+ { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
+ { "kicks", VIRTNET_SQ_STAT(kicks) },
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
- { "packets", VIRTNET_RQ_STAT(packets) },
- { "bytes", VIRTNET_RQ_STAT(bytes) },
+ { "packets", VIRTNET_RQ_STAT(packets) },
+ { "bytes", VIRTNET_RQ_STAT(bytes) },
+ { "drops", VIRTNET_RQ_STAT(drops) },
+ { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
+ { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
+ { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
+ { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
+ { "kicks", VIRTNET_RQ_STAT(kicks) },
};
#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
@@ -447,22 +465,12 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
return 0;
}
-static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
- struct xdp_frame *xdpf)
+static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
{
- struct xdp_frame *xdpf_sent;
- struct send_queue *sq;
- unsigned int len;
unsigned int qp;
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
- sq = &vi->sq[qp];
-
- /* Free up any pending old buffers before queueing new ones. */
- while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
- xdp_return_frame(xdpf_sent);
-
- return __virtnet_xdp_xmit_one(vi, sq, xdpf);
+ return &vi->sq[qp];
}
static int virtnet_xdp_xmit(struct net_device *dev,
@@ -474,23 +482,28 @@ static int virtnet_xdp_xmit(struct net_device *dev,
struct bpf_prog *xdp_prog;
struct send_queue *sq;
unsigned int len;
- unsigned int qp;
int drops = 0;
- int err;
+ int kicks = 0;
+ int ret, err;
int i;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
+ sq = virtnet_xdp_sq(vi);
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
- sq = &vi->sq[qp];
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
+ ret = -EINVAL;
+ drops = n;
+ goto out;
+ }
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
xdp_prog = rcu_dereference(rq->xdp_prog);
- if (!xdp_prog)
- return -ENXIO;
+ if (!xdp_prog) {
+ ret = -ENXIO;
+ drops = n;
+ goto out;
+ }
/* Free up any pending old buffers before queueing new ones. */
while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
@@ -505,11 +518,20 @@ static int virtnet_xdp_xmit(struct net_device *dev,
drops++;
}
}
+ ret = n - drops;
- if (flags & XDP_XMIT_FLUSH)
- virtqueue_kick(sq->vq);
+ if (flags & XDP_XMIT_FLUSH) {
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
+ kicks = 1;
+ }
+out:
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.xdp_tx += n;
+ sq->stats.xdp_tx_drops += drops;
+ sq->stats.kicks += kicks;
+ u64_stats_update_end(&sq->stats.syncp);
- return n - drops;
+ return ret;
}
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
@@ -587,7 +609,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
void *buf, void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
- unsigned int *rbytes)
+ struct virtnet_rq_stats *stats)
{
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
@@ -602,7 +624,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
int err;
len -= vi->hdr_len;
- *rbytes += len;
+ stats->bytes += len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -644,6 +666,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ stats->xdp_packets++;
switch (act) {
case XDP_PASS:
@@ -652,11 +675,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
len = xdp.data_end - xdp.data;
break;
case XDP_TX:
+ stats->xdp_tx++;
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- err = __virtnet_xdp_tx_xmit(vi, xdpf);
- if (unlikely(err)) {
+ err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+ if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
}
@@ -664,6 +688,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
+ stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err)
goto err_xdp;
@@ -672,6 +697,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
case XDP_DROP:
@@ -697,7 +723,8 @@ err:
err_xdp:
rcu_read_unlock();
- dev->stats.rx_dropped++;
+ stats->xdp_drops++;
+ stats->drops++;
put_page(page);
xdp_xmit:
return NULL;
@@ -708,19 +735,19 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct receive_queue *rq,
void *buf,
unsigned int len,
- unsigned int *rbytes)
+ struct virtnet_rq_stats *stats)
{
struct page *page = buf;
struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
- *rbytes += len - vi->hdr_len;
+ stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
goto err;
return skb;
err:
- dev->stats.rx_dropped++;
+ stats->drops++;
give_pages(rq, page);
return NULL;
}
@@ -732,7 +759,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
- unsigned int *rbytes)
+ struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -745,7 +772,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
int err;
head_skb = NULL;
- *rbytes += len - vi->hdr_len;
+ stats->bytes += len - vi->hdr_len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -794,6 +821,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ stats->xdp_packets++;
switch (act) {
case XDP_PASS:
@@ -818,11 +846,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
break;
case XDP_TX:
+ stats->xdp_tx++;
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- err = __virtnet_xdp_tx_xmit(vi, xdpf);
- if (unlikely(err)) {
+ err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+ if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
if (unlikely(xdp_page != page))
put_page(xdp_page);
@@ -834,6 +863,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
+ stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err) {
if (unlikely(xdp_page != page))
@@ -847,8 +877,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
@@ -883,7 +915,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_buf;
}
- *rbytes += len;
+ stats->bytes += len;
page = virt_to_head_page(buf);
truesize = mergeable_ctx_to_truesize(ctx);
@@ -929,6 +961,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
err_xdp:
rcu_read_unlock();
+ stats->xdp_drops++;
err_skb:
put_page(page);
while (num_buf-- > 1) {
@@ -939,12 +972,12 @@ err_skb:
dev->stats.rx_length_errors++;
break;
}
- *rbytes += len;
+ stats->bytes += len;
page = virt_to_head_page(buf);
put_page(page);
}
err_buf:
- dev->stats.rx_dropped++;
+ stats->drops++;
dev_kfree_skb(head_skb);
xdp_xmit:
return NULL;
@@ -952,7 +985,8 @@ xdp_xmit:
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
void *buf, unsigned int len, void **ctx,
- unsigned int *xdp_xmit, unsigned int *rbytes)
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
@@ -973,11 +1007,11 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (vi->mergeable_rx_bufs)
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
- rbytes);
+ stats);
else if (vi->big_packets)
- skb = receive_big(dev, vi, rq, buf, len, rbytes);
+ skb = receive_big(dev, vi, rq, buf, len, stats);
else
- skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes);
+ skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
if (unlikely(!skb))
return;
@@ -1171,7 +1205,12 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
if (err)
break;
} while (rq->vq->num_free);
- virtqueue_kick(rq->vq);
+ if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.kicks++;
+ u64_stats_update_end(&rq->stats.syncp);
+ }
+
return !oom;
}
@@ -1246,22 +1285,24 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
unsigned int *xdp_xmit)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
- unsigned int len, received = 0, bytes = 0;
+ struct virtnet_rq_stats stats = {};
+ unsigned int len;
void *buf;
+ int i;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
- while (received < budget &&
+ while (stats.packets < budget &&
(buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
- receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes);
- received++;
+ receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
+ stats.packets++;
}
} else {
- while (received < budget &&
+ while (stats.packets < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes);
- received++;
+ receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
+ stats.packets++;
}
}
@@ -1271,11 +1312,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
u64_stats_update_begin(&rq->stats.syncp);
- rq->stats.bytes += bytes;
- rq->stats.packets += received;
+ for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ size_t offset = virtnet_rq_stats_desc[i].offset;
+ u64 *item;
+
+ item = (u64 *)((u8 *)&rq->stats + offset);
+ *item += *(u64 *)((u8 *)&stats + offset);
+ }
u64_stats_update_end(&rq->stats.syncp);
- return received;
+ return stats.packets;
}
static void free_old_xmit_skbs(struct send_queue *sq)
@@ -1331,7 +1377,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
container_of(napi, struct receive_queue, napi);
struct virtnet_info *vi = rq->vq->vdev->priv;
struct send_queue *sq;
- unsigned int received, qp;
+ unsigned int received;
unsigned int xdp_xmit = 0;
virtnet_poll_cleantx(rq);
@@ -1346,10 +1392,12 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
xdp_do_flush_map();
if (xdp_xmit & VIRTIO_XDP_TX) {
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
- smp_processor_id();
- sq = &vi->sq[qp];
- virtqueue_kick(sq->vq);
+ sq = virtnet_xdp_sq(vi);
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.kicks++;
+ u64_stats_update_end(&sq->stats.syncp);
+ }
}
return received;
@@ -1511,8 +1559,13 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (kick || netif_xmit_stopped(txq))
- virtqueue_kick(sq->vq);
+ if (kick || netif_xmit_stopped(txq)) {
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.kicks++;
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+ }
return NETDEV_TX_OK;
}
@@ -1616,7 +1669,7 @@ static void virtnet_stats(struct net_device *dev,
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
- u64 tpackets, tbytes, rpackets, rbytes;
+ u64 tpackets, tbytes, rpackets, rbytes, rdrops;
struct receive_queue *rq = &vi->rq[i];
struct send_queue *sq = &vi->sq[i];
@@ -1630,17 +1683,18 @@ static void virtnet_stats(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
rpackets = rq->stats.packets;
rbytes = rq->stats.bytes;
+ rdrops = rq->stats.drops;
} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
tot->rx_bytes += rbytes;
tot->tx_bytes += tbytes;
+ tot->rx_dropped += rdrops;
}
tot->tx_dropped = dev->stats.tx_dropped;
tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
- tot->rx_dropped = dev->stats.rx_dropped;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
}
@@ -1824,8 +1878,8 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_queue_pairs; i++) {
- virtqueue_set_affinity(vi->rq[i].vq, -1);
- virtqueue_set_affinity(vi->sq[i].vq, -1);
+ virtqueue_set_affinity(vi->rq[i].vq, NULL);
+ virtqueue_set_affinity(vi->sq[i].vq, NULL);
}
vi->affinity_hint_set = false;
@@ -1834,28 +1888,41 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
static void virtnet_set_affinity(struct virtnet_info *vi)
{
- int i;
- int cpu;
+ cpumask_var_t mask;
+ int stragglers;
+ int group_size;
+ int i, j, cpu;
+ int num_cpu;
+ int stride;
- /* In multiqueue mode, when the number of cpu is equal to the number of
- * queue pairs, we let the queue pairs to be private to one cpu by
- * setting the affinity hint to eliminate the contention.
- */
- if (vi->curr_queue_pairs == 1 ||
- vi->max_queue_pairs != num_online_cpus()) {
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
virtnet_clean_affinity(vi, -1);
return;
}
- i = 0;
- for_each_online_cpu(cpu) {
- virtqueue_set_affinity(vi->rq[i].vq, cpu);
- virtqueue_set_affinity(vi->sq[i].vq, cpu);
- netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
- i++;
+ num_cpu = num_online_cpus();
+ stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
+ stragglers = num_cpu >= vi->curr_queue_pairs ?
+ num_cpu % vi->curr_queue_pairs :
+ 0;
+ cpu = cpumask_next(-1, cpu_online_mask);
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ group_size = stride + (i < stragglers ? 1 : 0);
+
+ for (j = 0; j < group_size; j++) {
+ cpumask_set_cpu(cpu, mask);
+ cpu = cpumask_next_wrap(cpu, cpu_online_mask,
+ nr_cpu_ids, false);
+ }
+ virtqueue_set_affinity(vi->rq[i].vq, mask);
+ virtqueue_set_affinity(vi->sq[i].vq, mask);
+ __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
+ cpumask_clear(mask);
}
vi->affinity_hint_set = true;
+ free_cpumask_var(mask);
}
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
@@ -2348,7 +2415,6 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
case XDP_QUERY_PROG:
xdp->prog_id = virtnet_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e857cb3335f6..ababba37d735 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -568,11 +568,12 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
return vh;
}
-static struct sk_buff **vxlan_gro_receive(struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *vxlan_gro_receive(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
{
- struct sk_buff *p, **pp = NULL;
+ struct sk_buff *pp = NULL;
+ struct sk_buff *p;
struct vxlanhdr *vh, *vh2;
unsigned int hlen, off_vx;
int flush = 1;
@@ -607,7 +608,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -2154,7 +2155,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
- if (info->options_len)
+ if (info->options_len &&
+ info->key.tun_flags & TUNNEL_VXLAN_OPT)
md = ip_tunnel_info_opts(info);
ttl = info->key.ttl;
tos = info->key.tos;
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index bd46b2552980..2a3f0f1a2b0a 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2134,7 +2134,6 @@ static void
fst_openport(struct fst_port_info *port)
{
int signals;
- int txq_length;
/* Only init things if card is actually running. This allows open to
* succeed for downloads etc.
@@ -2161,7 +2160,6 @@ fst_openport(struct fst_port_info *port)
else
netif_carrier_off(port_to_dev(port));
- txq_length = port->txqe - port->txqs;
port->txqe = 0;
port->txqs = 0;
}
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 9b09c9d0d0fb..5f0366a125e2 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -192,7 +192,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
ALIGNMENT_OF_UCC_HDLC_PRAM);
- if (priv->ucc_pram_offset < 0) {
+ if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
ret = -ENOMEM;
goto free_tx_bd;
@@ -230,14 +230,14 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
/* Alloc riptr, tiptr */
riptr = qe_muram_alloc(32, 32);
- if (riptr < 0) {
+ if (IS_ERR_VALUE(riptr)) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
ret = -ENOMEM;
goto free_tx_skbuff;
}
tiptr = qe_muram_alloc(32, 32);
- if (tiptr < 0) {
+ if (IS_ERR_VALUE(tiptr)) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
ret = -ENOMEM;
goto free_riptr;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index b3a1b6f5c406..4907453f17f5 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1491,7 +1491,6 @@ static int lmc_rx(struct net_device *dev)
lmc_softc_t *sc = dev_to_sc(dev);
int i;
int rx_work_limit = LMC_RXDESCS;
- unsigned int next_rx;
int rxIntLoopCnt; /* debug -baz */
int localLengthErrCnt = 0;
long stat;
@@ -1505,7 +1504,6 @@ static int lmc_rx(struct net_device *dev)
rxIntLoopCnt = 0; /* debug -baz */
i = sc->lmc_next_rx % LMC_RXDESCS;
- next_rx = sc->lmc_next_rx;
while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
{
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 4c417903e9be..094cea775d0c 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -566,13 +566,12 @@ static void i2400m_msg_ack_hook(struct i2400m *i2400m,
{
int result;
struct device *dev = i2400m_dev(i2400m);
- unsigned ack_type, ack_status;
+ unsigned int ack_type;
char strerr[32];
/* Chew on the message, we might need some information from
* here */
ack_type = le16_to_cpu(l3l4_hdr->type);
- ack_status = le16_to_cpu(l3l4_hdr->status);
switch (ack_type) {
case I2400M_MT_CMD_ENTER_POWERSAVE:
/* This is just left here for the sake of example, as
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index a89b5685e68b..e9fc168bb734 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1552,7 +1552,6 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
int ret, itr;
struct device *dev = i2400m_dev(i2400m);
struct i2400m_fw *i2400m_fw;
- const struct i2400m_bcf_hdr *bcf; /* Firmware data */
const struct firmware *fw;
const char *fw_name;
@@ -1574,7 +1573,7 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
}
/* Load firmware files to memory. */
- for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) {
+ for (itr = 0, ret = -ENOENT; ; itr++) {
fw_name = i2400m->bus_fw_names[itr];
if (fw_name == NULL) {
dev_err(dev, "Could not find a usable firmware image\n");
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index a654687b5fa2..9ab3f0fdfea4 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -535,14 +535,12 @@ void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
{
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
struct device *dev = i2400m_dev(i2400m);
- int protocol;
d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
i2400m, skb, skb->len, cs);
switch(cs) {
case I2400M_CS_IPV4_0:
case I2400M_CS_IPV4:
- protocol = ETH_P_IP;
i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
skb->data - ETH_HLEN,
cpu_to_be16(ETH_P_IP));
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 502c346aa790..529ebca1e9e1 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -130,12 +130,12 @@ retry:
dev_err(dev, "BM-CMD: too many stalls in "
"URB; resetting device\n");
usb_queue_reset_device(i2400mu->usb_iface);
- /* fallthrough */
} else {
usb_clear_halt(i2400mu->usb_dev, pipe);
msleep(10); /* give the device some time */
goto retry;
}
+ /* fall through */
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index 99ef81b3d5a5..3a0e7226768a 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -131,12 +131,12 @@ retry:
dev_err(dev, "BM-CMD: too many stalls in "
"URB; resetting device\n");
usb_queue_reset_device(i2400mu->usb_iface);
- /* fallthrough */
} else {
usb_clear_halt(i2400mu->usb_dev, usb_pipe);
msleep(10); /* give the device some time */
goto retry;
}
+ /* fall through */
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 84f071ac0d84..54ff5930126c 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -1,15 +1,15 @@
config ATH10K
- tristate "Atheros 802.11ac wireless cards support"
- depends on MAC80211 && HAS_DMA
+ tristate "Atheros 802.11ac wireless cards support"
+ depends on MAC80211 && HAS_DMA
select ATH_COMMON
select CRC32
select WANT_DEV_COREDUMP
select ATH10K_CE
- ---help---
- This module adds support for wireless adapters based on
- Atheros IEEE 802.11ac family of chipsets.
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11ac family of chipsets.
- If you choose to build a module, it'll be called ath10k.
+ If you choose to build a module, it'll be called ath10k.
config ATH10K_CE
bool
@@ -41,12 +41,12 @@ config ATH10K_USB
work in progress and will not fully work.
config ATH10K_SNOC
- tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
- depends on ATH10K && ARCH_QCOM
- ---help---
- This module adds support for integrated WCN3990 chip connected
- to system NOC(SNOC). Currently work in progress and will not
- fully work.
+ tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
+ depends on ATH10K && ARCH_QCOM
+ ---help---
+ This module adds support for integrated WCN3990 chip connected
+ to system NOC(SNOC). Currently work in progress and will not
+ fully work.
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index fa39ffffd34d..c9bd0e2b5db7 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -133,11 +133,8 @@ static void ath10k_ahb_clock_deinit(struct ath10k *ar)
static int ath10k_ahb_clock_enable(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
- struct device *dev;
int ret;
- dev = &ar_ahb->pdev->dev;
-
if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) ||
IS_ERR_OR_NULL(ar_ahb->ref_clk) ||
IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
@@ -451,12 +448,10 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct platform_device *pdev;
- struct device *dev;
struct resource *res;
int ret;
pdev = ar_ahb->pdev;
- dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 3b96a43fbda4..18c709c484e7 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1512,7 +1512,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
if (ret) {
dma_free_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
+ (nentries * sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space_unaligned,
base_addr);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index dbeffaef6024..b8fb5382dede 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -383,4 +383,46 @@ static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
return CE_INTERRUPT_SUMMARY;
}
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ad4f6e3c0737..c40cd129afe7 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -41,10 +41,8 @@ static bool uart_print;
static bool skip_otp;
static bool rawmode;
-/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
- * by default.
- */
-unsigned long ath10k_coredump_mask = 0x3;
+unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) |
+ BIT(ATH10K_FW_CRASH_DUMP_CE_DATA);
/* FIXME: most of these should be readonly */
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
@@ -82,6 +80,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -113,6 +112,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -145,6 +145,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -176,6 +177,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -207,6 +209,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -238,6 +241,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -272,6 +276,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -309,6 +314,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
@@ -347,6 +353,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
/* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
* or 2x2 160Mhz, long-guard-interval.
@@ -388,6 +395,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
/* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
* 1x1 160Mhz, long-guard-interval.
@@ -423,6 +431,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -456,6 +465,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.target_cpu_freq = 176000000,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
@@ -494,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
@@ -2084,6 +2095,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->max_num_peers = TARGET_10_4_NUM_PEERS;
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 951dbdd1c9eb..9feea02e7d37 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -48,7 +48,8 @@
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
-#define ATH10K_NUM_CHANS 40
+#define ATH10K_NUM_CHANS 41
+#define ATH10K_MAX_5G_CHAN 173
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
@@ -185,6 +186,11 @@ struct ath10k_wmi {
const struct wmi_ops *ops;
const struct wmi_peer_flags_map *peer_flags;
+ u32 mgmt_max_num_pending_tx;
+
+ /* Protected by data_lock */
+ struct idr mgmt_pending_tx;
+
u32 num_mem_chunks;
u32 rx_decap_mode;
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0d98c93a3aba..0baaad90b8d1 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1727,7 +1727,9 @@ int ath10k_debug_start(struct ath10k *ar)
ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
}
- if (ar->debug.nf_cal_period) {
+ if (ar->debug.nf_cal_period &&
+ !test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->cal_period,
ar->debug.nf_cal_period);
@@ -1744,7 +1746,9 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
- ath10k_debug_cal_data_fetch(ar);
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features))
+ ath10k_debug_cal_data_fetch(ar);
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
@@ -2293,6 +2297,52 @@ static const struct file_operations fops_tpc_stats_final = {
.llseek = default_llseek,
};
+static ssize_t ath10k_write_warm_hw_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ bool val;
+
+ if (kstrtobool_from_user(user_buf, count, &val))
+ return -EFAULT;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map)))
+ ath10k_warn(ar, "wmi service for reset chip is not available\n");
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset,
+ WMI_RST_MODE_WARM_RESET);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to enable warm hw reset: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_warm_hw_reset = {
+ .write = ath10k_write_warm_hw_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@@ -2367,15 +2417,18 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar,
&fops_fw_dbglog);
- debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
- &fops_cal_data);
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
+ &fops_cal_data);
+
+ debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
+ &fops_nf_cal_period);
+ }
debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar,
&fops_ani_enable);
- debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
- &fops_nf_cal_period);
-
if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy,
ar, &fops_simulate_radar);
@@ -2418,6 +2471,9 @@ int ath10k_debug_register(struct ath10k *ar)
ar->debug.debugfs_phy, ar,
&fops_tpc_stats_final);
+ debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
+ &fops_warm_hw_reset);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 8902720b4e49..331b8d558791 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -274,7 +274,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
- if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
+ if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
@@ -655,7 +655,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
- HTC_HOST_MAX_MSG_PER_BUNDLE);
+ HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Extended ready message. RX bundle size: %d\n",
htc->max_msgs_per_htc_bundle);
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 34877597dd6a..51fda6c23f69 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -50,7 +50,8 @@ struct ath10k;
* 4-byte aligned.
*/
-#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@@ -58,6 +59,7 @@ enum ath10k_htc_tx_flags {
};
enum ath10k_htc_rx_flags {
+ ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index c72d8af122a2..4d1cd90d6d27 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -268,11 +268,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
- spin_unlock_bh(&htt->rx_ring.lock);
if (ret)
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
return ret;
}
@@ -284,7 +285,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
+ spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
ath10k_htt_get_rx_ring_size(htt),
@@ -1089,7 +1092,7 @@ static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
- __skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+ skb_queue_tail(&ar->htt.rx_msdus_q, skb);
}
static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
@@ -2810,7 +2813,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
- __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
+ skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@@ -2874,7 +2877,7 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
if (skb_queue_empty(&ar->htt.rx_msdus_q))
break;
- skb = __skb_dequeue(&ar->htt.rx_msdus_q);
+ skb = skb_dequeue(&ar->htt.rx_msdus_q);
if (!skb)
break;
ath10k_process_rx(ar, skb);
@@ -2905,7 +2908,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
goto exit;
}
- while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
+ while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 5d8b97a0ccaa..7cff0d52338f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -208,10 +208,10 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
struct ath10k *ar = htt->ar;
int ret;
- lockdep_assert_held(&htt->tx_lock);
-
+ spin_lock_bh(&htt->tx_lock);
ret = idr_alloc(&htt->pending_tx, skb, 0,
htt->max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
@@ -1056,7 +1056,7 @@ static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
return HTT_DATA_TX_EXT_TID_MGMT;
else if (cb->flags & ATH10K_SKB_F_QOS)
- return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
else
return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
}
@@ -1077,9 +1077,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
- spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
- spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
@@ -1161,9 +1159,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
struct htt_msdu_ext_desc *ext_desc = NULL;
struct htt_msdu_ext_desc *ext_desc_t = NULL;
- spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
- spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
@@ -1202,7 +1198,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* pass through */
+ /* fall through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_32;
@@ -1363,9 +1359,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
struct htt_msdu_ext_desc_64 *ext_desc = NULL;
struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
- spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
- spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
@@ -1404,7 +1398,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* pass through */
+ /* fall through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_64;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 23467e9fefeb..977f79ebb4fd 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -586,6 +586,9 @@ struct ath10k_hw_params {
/* target supporting retention restore on ddr */
bool rri_on_ddr;
+
+ /* Number of bytes to be the offset for each FFT sample */
+ int spectral_bin_offset;
};
struct htt_rx_desc;
@@ -696,6 +699,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
+#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
/* Target specific defines for WMI-HL-1.0 firmware */
#define TARGET_HL_10_TLV_NUM_PEERS 14
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 836e0a47b94a..90f9372dec25 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -101,6 +101,8 @@ static struct ieee80211_rate ath10k_rates_rev2[] = {
#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
+#define ath10k_wmi_legacy_rates ath10k_rates
+
static bool ath10k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -3085,6 +3087,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
passive = channel->flags & IEEE80211_CHAN_NO_IR;
ch->passive = passive;
+ /* the firmware is ignoring the "radar" flag of the
+ * channel and is scanning actively using Probe Requests
+ * on "Radar detection"/DFS channels which are not
+ * marked as "available"
+ */
+ ch->passive |= ch->chan_radar;
+
ch->freq = channel->center_freq;
ch->band_center_freq1 = channel->center_freq;
ch->min_power = 0;
@@ -4026,7 +4035,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
drv_priv);
/* Prevent aggressive sta/tid taking over tx queue */
- max = 16;
+ max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
ret = 0;
while (ath10k_mac_tx_can_push(hw, txq) && max--) {
ret = ath10k_mac_tx_push_txq(hw, txq);
@@ -4047,6 +4056,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
rcu_read_unlock();
spin_unlock_bh(&ar->txqs_lock);
}
+EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
/************/
/* Scanning */
@@ -4287,7 +4297,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_txq *f_txq;
struct ath10k_txq *f_artxq;
int ret = 0;
- int max = 16;
+ int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
spin_lock_bh(&ar->txqs_lock);
if (list_empty(&artxq->list))
@@ -5438,8 +5448,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
- int ret = 0;
+ struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
+ u16 bitrate, hw_value;
+ u8 rate;
+ int rateidx, ret = 0;
+ enum nl80211_band band;
mutex_lock(&ar->conf_mutex);
@@ -5607,6 +5621,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
+ band = def.chan->band;
+ rateidx = vif->bss_conf.mcast_rate[band] - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate;
+ hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value;
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH10K_HW_RATECODE(hw_value, 0, preamble);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = ar->wmi.vdev_param->mcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = ar->wmi.vdev_param->bcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -6062,13 +6114,13 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
mode = chan_to_phymode(&def);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
- sta->addr, bw, mode);
+ sta->addr, bw, mode);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
- WMI_PEER_PHYMODE, mode);
+ WMI_PEER_PHYMODE, mode);
if (err) {
ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
- sta->addr, mode, err);
+ sta->addr, mode, err);
goto exit;
}
@@ -6934,7 +6986,6 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
const struct cfg80211_bitrate_mask *mask,
u8 *rate, u8 *nss)
{
- struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
int rate_idx;
int i;
u16 bitrate;
@@ -6944,8 +6995,11 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
if (hweight32(mask->control[band].legacy) == 1) {
rate_idx = ffs(mask->control[band].legacy) - 1;
- hw_rate = sband->bitrates[rate_idx].hw_value;
- bitrate = sband->bitrates[rate_idx].bitrate;
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
+ bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
if (ath10k_mac_bitrate_is_cck(bitrate))
preamble = WMI_RATE_PREAMBLE_CCK;
@@ -7737,7 +7791,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
return;
sinfo->rx_duration = arsta->rx_duration;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
if (!arsta->txrate.legacy && !arsta->txrate.nss)
return;
@@ -7750,7 +7804,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
sinfo->txrate.bw = arsta->txrate.bw;
}
sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
static const struct ieee80211_ops ath10k_ops = {
@@ -7870,6 +7924,9 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
CHAN5G(161, 5805, 0),
CHAN5G(165, 5825, 0),
CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+ /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */
+ /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */
};
struct ath10k *ath10k_mac_create(size_t priv_size)
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index e52fd83156b6..0ed436657108 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -86,48 +86,6 @@ struct pcie_state {
/* PCIE_CONFIG_FLAG definitions */
#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
-/* Host software's Copy Engine configuration. */
-#define CE_ATTR_FLAGS 0
-
-/*
- * Configuration information for a Copy Engine pipe.
- * Passed from Host to Target during startup (one per CE).
- *
- * NOTE: Structure is shared between Host software and Target firmware!
- */
-struct ce_pipe_config {
- __le32 pipenum;
- __le32 pipedir;
- __le32 nentries;
- __le32 nbytes_max;
- __le32 flags;
- __le32 reserved;
-};
-
-/*
- * Directions for interconnect pipe configuration.
- * These definitions may be used during configuration and are shared
- * between Host and Target.
- *
- * Pipe Directions are relative to the Host, so PIPEDIR_IN means
- * "coming IN over air through Target to Host" as with a WiFi Rx operation.
- * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
- * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
- * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
- * over the interconnect.
- */
-#define PIPEDIR_NONE 0
-#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
-#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
-#define PIPEDIR_INOUT 3 /* bidirectional */
-
-/* Establish a mapping between a service/direction and a pipe. */
-struct service_to_pipe {
- __le32 service_id;
- __le32 pipedir;
- __le32 pipenum;
-};
-
/* Per-pipe state. */
struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index d612ce8c9cff..7f61591ce0de 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -30,6 +30,7 @@
#include "debug.h"
#include "hif.h"
#include "htc.h"
+#include "mac.h"
#include "targaddrs.h"
#include "trace.h"
#include "sdio.h"
@@ -396,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
int ret;
payload_len = le16_to_cpu(htc_hdr->len);
+ skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
if (trailer_present) {
trailer = skb->data + sizeof(*htc_hdr) +
@@ -434,12 +436,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
+ int lookahead_idx = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
lookaheads_local = lookaheads;
n_lookahead_local = n_lookahead;
- id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
+ id = ((struct ath10k_htc_hdr *)
+ &lookaheads[lookahead_idx++])->eid;
if (id >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
@@ -462,6 +466,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
/* Only read lookahead's from RX trailers
* for the last packet in a bundle.
*/
+ lookahead_idx--;
lookaheads_local = NULL;
n_lookahead_local = NULL;
}
@@ -505,11 +510,11 @@ static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
*bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
- if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) {
+ if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
ath10k_warn(ar,
"HTC bundle length %u exceeds maximum %u\n",
le16_to_cpu(htc_hdr->len),
- HTC_HOST_MAX_MSG_PER_BUNDLE);
+ HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
return -ENOMEM;
}
@@ -600,6 +605,9 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
* ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
* packet skb's have been allocated in the previous step.
*/
+ if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
+ full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
+
ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
act_len,
full_len,
@@ -1342,6 +1350,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
break;
} while (time_before(jiffies, timeout) && !done);
+ ath10k_mac_tx_push_pending(ar);
+
sdio_claim_host(ar_sdio->func);
if (ret && ret != -ECANCELED)
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index 4ff7b545293b..453eb6263143 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -96,14 +96,14 @@
* way:
*
* Let's assume that each packet in a bundle of the maximum bundle size
- * (HTC_HOST_MAX_MSG_PER_BUNDLE) has the HTC header bundle count set
- * to the maximum value (HTC_HOST_MAX_MSG_PER_BUNDLE).
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
+ * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
*
* in this case the driver must allocate
- * (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) skb's.
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
*/
#define ATH10K_SDIO_MAX_RX_MSGS \
- (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE)
+ (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index a3a7042fe13a..fa1843a7e0fd 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -14,19 +14,20 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/module.h>
+#include <linux/clk.h>
#include <linux/kernel.h>
-#include "debug.h"
-#include "hif.h"
-#include "htc.h"
-#include "ce.h"
-#include "snoc.h"
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/clk.h>
-#define WCN3990_CE_ATTR_FLAGS 0
+
+#include "ce.h"
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "snoc.h"
+
#define ATH10K_SNOC_RX_POST_RETRY_MS 50
#define CE_POLL_PIPE 4
@@ -449,7 +450,7 @@ static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
{
- struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
+ struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
struct ath10k *ar = ar_snoc->ar;
ath10k_snoc_rx_post(ar);
@@ -820,7 +821,7 @@ static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
.write32 = ath10k_snoc_write32,
};
-int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
+static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
int i;
@@ -868,7 +869,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
return done;
}
-void ath10k_snoc_init_napi(struct ath10k *ar)
+static void ath10k_snoc_init_napi(struct ath10k *ar)
{
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
ATH10K_NAPI_BUDGET);
@@ -1303,13 +1304,13 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
ar->ce_priv = &ar_snoc->ce;
- ath10k_snoc_resource_init(ar);
+ ret = ath10k_snoc_resource_init(ar);
if (ret) {
ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
goto err_core_destroy;
}
- ath10k_snoc_setup_resource(ar);
+ ret = ath10k_snoc_setup_resource(ar);
if (ret) {
ath10k_warn(ar, "failed to setup resource: %d\n", ret);
goto err_core_destroy;
@@ -1388,25 +1389,7 @@ static struct platform_driver ath10k_snoc_driver = {
.of_match_table = ath10k_snoc_dt_match,
},
};
-
-static int __init ath10k_snoc_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&ath10k_snoc_driver);
- if (ret)
- pr_err("failed to register ath10k snoc driver: %d\n",
- ret);
-
- return ret;
-}
-module_init(ath10k_snoc_init);
-
-static void __exit ath10k_snoc_exit(void)
-{
- platform_driver_unregister(&ath10k_snoc_driver);
-}
-module_exit(ath10k_snoc_exit);
+module_platform_driver(ath10k_snoc_driver);
MODULE_AUTHOR("Qualcomm");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index 05dc98f46ccd..f9e530189d48 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -19,7 +19,6 @@
#include "hw.h"
#include "ce.h"
-#include "pci.h"
struct ath10k_snoc_drv_priv {
enum ath10k_hw_rev hw_rev;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index af6995de7e00..653b6d013207 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -145,7 +145,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]);
bins = (u8 *)fftr;
- bins += sizeof(*fftr);
+ bins += sizeof(*fftr) + ar->hw_params.spectral_bin_offset;
fft_sample->tsf = __cpu_to_be64(tsf);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 5ecce04005d2..7fd63bbf8e24 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -31,6 +31,8 @@ struct wmi_ops {
struct wmi_scan_ev_arg *arg);
int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg);
+ int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg);
int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
@@ -262,6 +264,16 @@ ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
+ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_tx_compl)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
+}
+
+static inline int
ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8c49a26fc571..cdc1e64d52ad 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -618,6 +618,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_TDLS_PEER_EVENTID:
ath10k_wmi_event_tdls_peer(ar, skb);
break;
+ case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
+ ath10k_wmi_event_mgmt_tx_compl(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -659,6 +662,31 @@ static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
return 0;
}
+static int
+ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_tx_compl_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+
+ arg->desc_id = ev->desc_id;
+ arg->status = ev->status;
+ arg->pdev_id = ev->pdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
@@ -1076,6 +1104,8 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = reg->eeprom_rd;
+ arg->low_5ghz_chan = reg->low_5ghz_chan;
+ arg->high_5ghz_chan = reg->high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = svc_bmap;
arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
@@ -1584,6 +1614,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+ cfg->wmi_send_separate = __cpu_to_le32(0);
+ cfg->num_ocb_vdevs = __cpu_to_le32(0);
+ cfg->num_ocb_channels = __cpu_to_le32(0);
+ cfg->num_ocb_schedules = __cpu_to_le32(0);
+ cfg->host_capab = __cpu_to_le32(0);
ath10k_wmi_put_host_mem_chunks(ar, chunks);
@@ -1614,10 +1649,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
ie_len = roundup(arg->ie_len, 4);
len = (sizeof(*tlv) + sizeof(*cmd)) +
- (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
- (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
- (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
- (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+ sizeof(*tlv) + chan_len +
+ sizeof(*tlv) + ssid_len +
+ sizeof(*tlv) + bssid_len +
+ sizeof(*tlv) + ie_len;
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
@@ -1785,7 +1820,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
{
struct wmi_tlv_vdev_start_cmd *cmd;
struct wmi_channel *ch;
- struct wmi_p2p_noa_descriptor *noa;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
@@ -1843,7 +1877,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = 0;
- noa = (void *)tlv->value;
/* Note: This is a nested TLV containing:
* [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
@@ -2605,6 +2638,30 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
return skb;
}
+static int
+ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
+ dma_addr_t paddr)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ int ret;
+
+ pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
+ if (!pkt_addr)
+ return -ENOMEM;
+
+ pkt_addr->vaddr = skb;
+ pkt_addr->paddr = paddr;
+
+ spin_lock_bh(&ar->data_lock);
+ ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
+ wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
+ return ret;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
dma_addr_t paddr)
@@ -2616,9 +2673,9 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
u32 buf_len = msdu->len;
struct wmi_tlv *tlv;
struct sk_buff *skb;
+ int len, desc_id;
u32 vdev_id;
void *ptr;
- int len;
if (!cb->vif)
return ERR_PTR(-EINVAL);
@@ -2649,13 +2706,17 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if (!skb)
return ERR_PTR(-ENOMEM);
+ desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
+ if (desc_id < 0)
+ goto err_free_skb;
+
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- cmd->desc_id = 0;
+ cmd->desc_id = __cpu_to_le32(desc_id);
cmd->chanfreq = 0;
cmd->buf_len = __cpu_to_le32(buf_len);
cmd->frame_len = __cpu_to_le32(msdu->len);
@@ -2672,6 +2733,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
memcpy(ptr, msdu->data, buf_len);
return skb;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+ return ERR_PTR(desc_id);
}
static struct sk_buff *
@@ -2700,7 +2765,8 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
static struct sk_buff *
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
- u32 log_level) {
+ u32 log_level)
+{
struct wmi_tlv_dbglog_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
@@ -3835,6 +3901,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+ .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 3e1e340cd834..4f0c20c90642 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -320,6 +320,7 @@ enum wmi_tlv_event_id {
WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_TLV_MGMT_TX_COMPLETION_EVENTID,
WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
WMI_TLV_BA_RSP_SSN_EVENTID,
@@ -1573,6 +1574,17 @@ struct wmi_tlv {
u8 value[0];
} __packed;
+struct ath10k_mgmt_tx_pkt_addr {
+ void *vaddr;
+ dma_addr_t paddr;
+};
+
+struct wmi_tlv_mgmt_tx_compl_ev {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
#define WMI_TLV_MGMT_RX_NUM_RSSI 4
struct wmi_tlv_mgmt_rx_ev {
@@ -1670,6 +1682,11 @@ struct wmi_tlv_resource_config {
__le32 keep_alive_pattern_size;
__le32 max_tdls_concurrent_sleep_sta;
__le32 max_tdls_concurrent_buffer_sta;
+ __le32 wmi_send_separate;
+ __le32 num_ocb_vdevs;
+ __le32 num_ocb_channels;
+ __le32 num_ocb_schedules;
+ __le32 host_capab;
} __packed;
struct wmi_tlv_init_cmd {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index f97ab795cf2e..fd612d2905b0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1333,7 +1333,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
- .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
@@ -2313,6 +2313,59 @@ static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
return true;
}
+static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
+ u32 status)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *msdu;
+ int ret;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
+ if (!pkt_addr) {
+ ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
+ desc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_FROM_DEVICE);
+ info = IEEE80211_SKB_CB(msdu);
+ info->flags |= status;
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ ret = 0;
+
+out:
+ idr_remove(&wmi->mgmt_pending_tx, desc_id);
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+}
+
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
+ int ret;
+
+ ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
+ return ret;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id),
+ __le32_to_cpu(arg.status));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
+
+ return 0;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@@ -2366,7 +2419,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
*/
if (channel >= 1 && channel <= 14) {
status->band = NL80211_BAND_2GHZ;
- } else if (channel >= 36 && channel <= 169) {
+ } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
@@ -4602,10 +4655,6 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
ev = (struct wmi_pdev_tpc_config_event *)skb->data;
- tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
- if (!tpc_stats)
- return;
-
num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
@@ -4614,6 +4663,10 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
return;
}
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+
ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
num_tx_chain);
@@ -5018,13 +5071,11 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
void *vaddr;
pool_size = num_units * round_up(unit_len, 4);
- vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
+ vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
- memset(vaddr, 0, pool_size);
-
ar->wmi.mem_chunks[idx].vaddr = vaddr;
ar->wmi.mem_chunks[idx].paddr = paddr;
ar->wmi.mem_chunks[idx].len = pool_size;
@@ -9075,6 +9126,11 @@ int ath10k_wmi_attach(struct ath10k *ar)
INIT_WORK(&ar->radar_confirmation_work,
ath10k_radar_confirmation_work);
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ idr_init(&ar->wmi.mgmt_pending_tx);
+ }
+
return 0;
}
@@ -9093,8 +9149,35 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
ar->wmi.num_mem_chunks = 0;
}
+static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
+ void *ctx)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
+ struct ath10k *ar = ctx;
+ struct sk_buff *msdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "force cleanup mgmt msdu_id %hu\n", msdu_id);
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_FROM_DEVICE);
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
void ath10k_wmi_detach(struct ath10k *ar)
{
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ spin_lock_bh(&ar->data_lock);
+ idr_for_each(&ar->wmi.mgmt_pending_tx,
+ ath10k_wmi_mgmt_tx_clean_up_pending, ar);
+ idr_destroy(&ar->wmi.mgmt_pending_tx);
+ spin_unlock_bh(&ar->data_lock);
+ }
+
cancel_work_sync(&ar->svc_rdy_work);
if (ar->svc_rdy_skb)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index d68afb65402a..36220258e3c7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -462,6 +462,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS);
SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
+ SVCSTR(WMI_SERVICE_RESET_CHIP);
default:
return NULL;
}
@@ -3934,7 +3935,11 @@ enum wmi_10x_pdev_param {
WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
- WMI_10X_PDEV_PARAM_CAL_PERIOD
+ WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ WMI_10X_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_10X_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_10X_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_10X_PDEV_PARAM_PDEV_RESET
};
enum wmi_10_4_pdev_param {
@@ -6501,6 +6506,15 @@ struct wmi_force_fw_hang_cmd {
__le32 delay_ms;
} __packed;
+enum wmi_pdev_reset_mode_type {
+ WMI_RST_MODE_TX_FLUSH = 1,
+ WMI_RST_MODE_WARM_RESET,
+ WMI_RST_MODE_COLD_RESET,
+ WMI_RST_MODE_WARM_RESET_RESTORE_CAL,
+ WMI_RST_MODE_COLD_RESET_RESTORE_CAL,
+ WMI_RST_MODE_MAX,
+};
+
enum ath10k_dbglog_level {
ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
ATH10K_DBGLOG_LEVEL_INFO = 1,
@@ -6600,6 +6614,12 @@ struct wmi_scan_ev_arg {
__le32 vdev_id;
};
+struct wmi_tlv_mgmt_tx_compl_ev_arg {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
@@ -7071,6 +7091,7 @@ int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index f23c851765df..05140d8baa36 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -670,6 +670,7 @@ ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
break;
case NL80211_IFTYPE_ADHOC:
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
+ /* fall through */
default:
/* On non-STA modes timer1 is used as next DMA
* beacon alert (DBA) timer and timer2 as next
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index b1b8bc326830..ae08572c4b58 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -483,7 +483,6 @@ static u32
ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
{
u32 mix, step;
- u32 *rf;
const struct ath5k_gain_opt *go;
const struct ath5k_gain_opt_step *g_step;
const struct ath5k_rf_reg *rf_regs;
@@ -502,7 +501,6 @@ ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
if (ah->ah_rf_banks == NULL)
return 0;
- rf = ah->ah_rf_banks;
ah->ah_gain.g_f_corr = 0;
/* No VGA (Variable Gain Amplifier) override, skip */
@@ -549,13 +547,10 @@ ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
{
const struct ath5k_rf_reg *rf_regs;
u32 step, mix_ovr, level[4];
- u32 *rf;
if (ah->ah_rf_banks == NULL)
return false;
- rf = ah->ah_rf_banks;
-
if (ah->ah_radio == AR5K_RF5111) {
rf_regs = rf_regs_5111;
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index 334dbd834b3a..bde5a10d470c 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -534,7 +534,7 @@ int ath6kl_bmi_init(struct ath6kl *ar)
/* cmd + addr + len + data_size */
ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
- ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
+ ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_KERNEL);
if (!ar->bmi.cmd_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 0687697d5e2d..e121187f371f 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1811,20 +1811,20 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->target_stats.rx_byte) {
sinfo->rx_bytes = vif->target_stats.rx_byte;
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
sinfo->rx_packets = vif->target_stats.rx_pkt;
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
}
if (vif->target_stats.tx_byte) {
sinfo->tx_bytes = vif->target_stats.tx_byte;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
sinfo->tx_packets = vif->target_stats.tx_pkt;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
}
sinfo->signal = vif->target_stats.cs_rssi;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
rate = vif->target_stats.tx_ucast_rate;
@@ -1857,12 +1857,12 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
if (test_bit(CONNECTED, &vif->flags) &&
test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
vif->nw_type == INFRA_NETWORK) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
@@ -3899,16 +3899,19 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
switch (ar->hw.cap) {
case WMI_11AN_CAP:
ht = true;
+ /* fall through */
case WMI_11A_CAP:
band_5gig = true;
break;
case WMI_11GN_CAP:
ht = true;
+ /* fall through */
case WMI_11G_CAP:
band_2gig = true;
break;
case WMI_11AGN_CAP:
ht = true;
+ /* fall through */
case WMI_11AG_CAP:
band_2gig = true;
band_5gig = true;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 546243e11737..434b66829646 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -746,10 +746,8 @@ static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
struct htc_endpoint *ep;
struct htc_packet *packet;
u8 ep_id, *netdata;
- u32 netlen;
netdata = skb->data;
- netlen = skb->len;
htc_hdr = (struct htc_frame_hdr *) netdata;
@@ -855,12 +853,8 @@ static int htc_process_trailer(struct htc_target *target, u8 *buffer,
{
struct htc_credit_report *report;
struct htc_record_hdr *record;
- u8 *record_buf, *orig_buf;
- int orig_len, status;
-
- orig_buf = buffer;
- orig_len = len;
- status = 0;
+ u8 *record_buf;
+ int status = 0;
while (len > 0) {
if (len < sizeof(struct htc_record_hdr)) {
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 808fb30be9ad..0c61dbaa62a4 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -272,7 +272,7 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
{
struct ath6kl_dbglog_hdr debug_hdr;
struct ath6kl_dbglog_buf debug_buf;
- u32 address, length, dropped, firstbuf, debug_hdr_addr;
+ u32 address, length, firstbuf, debug_hdr_addr;
int ret, loop;
u8 *buf;
@@ -303,7 +303,6 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
address = TARG_VTOP(ar->target_type,
le32_to_cpu(debug_hdr.dbuf_addr));
firstbuf = address;
- dropped = le32_to_cpu(debug_hdr.dropped);
ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
if (ret)
goto out;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 2195b1b7a8a6..bb50680580f3 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1415,6 +1415,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x19))},
{},
};
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 618d12ed4b40..b22ed499f7ba 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1701,7 +1701,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
struct ath6kl_sta *sta;
struct aggr_info_conn *aggr_conn = NULL;
struct rxtid *rxtid;
- struct rxtid_stats *stats;
u16 hold_q_size;
u8 tid, aid;
@@ -1722,7 +1721,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
return;
rxtid = &aggr_conn->rx_tid[tid];
- stats = &aggr_conn->stat[tid];
if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 7922550c2159..ef2dd68d3f77 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -583,12 +583,14 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
case 0x5:
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
+ /* fall through */
case 0x3:
if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
break;
}
+ /* else: fall through */
case 0x1:
case 0x2:
case 0x7:
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 50fcd343c41a..fd9db8ca99d7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -676,10 +676,10 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
return 0;
ah->cal_list_curr = currCal = currCal->calNext;
- if (currCal->calState == CAL_WAITING) {
+ if (currCal->calState == CAL_WAITING)
ath9k_hw_reset_calibration(ah, currCal);
- return 0;
- }
+
+ return 0;
}
/* Do NF cal only at longer intervals */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 61a9b85045d2..713291881208 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -119,6 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
aModeRefSel = 2;
if (aModeRefSel)
break;
+ /* else: fall through */
case 1:
default:
aModeRefSel = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index fe5102ca5010..98c5f524a360 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1800,6 +1800,8 @@ static void ar9003_hw_spectral_scan_config(struct ath_hw *ah,
static void ar9003_hw_spectral_scan_trigger(struct ath_hw *ah)
{
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ENABLE);
/* Activate spectral scan */
REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
AR_PHY_SPECTRAL_SCAN_ACTIVE);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index ef0de4f1312c..21ba20981a80 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -342,7 +342,7 @@ struct ath_chanctx {
struct ath_beacon_config beacon;
struct ath9k_hw_cal_data caldata;
- struct timespec tsf_ts;
+ struct timespec64 tsf_ts;
u64 tsf_val;
u32 last_beacon;
@@ -1021,7 +1021,7 @@ struct ath_softc {
struct ath_offchannel offchannel;
struct ath_chanctx *next_chan;
struct completion go_beacon;
- struct timespec last_event_time;
+ struct timespec64 last_event_time;
#endif
unsigned long driver_data;
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 1b05b5d7a038..fd61ae4782b6 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -233,9 +233,9 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
static u32 chanctx_event_delta(struct ath_softc *sc)
{
u64 ms;
- struct timespec ts, *old;
+ struct timespec64 ts, *old;
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
old = &sc->last_event_time;
ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
@@ -334,7 +334,7 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
{
struct ath_chanctx *prev, *cur;
- struct timespec ts;
+ struct timespec64 ts;
u32 cur_tsf, prev_tsf, beacon_int;
s32 offset;
@@ -346,7 +346,7 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
if (!prev->switch_after_beacon)
return;
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
cur_tsf = (u32) cur->tsf_val +
ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
@@ -1230,7 +1230,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_chanctx *old_ctx;
- struct timespec ts;
+ struct timespec64 ts;
bool measure_time = false;
bool send_ps = false;
bool queues_stopped = false;
@@ -1260,7 +1260,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_unlock_bh(&sc->chan_lock);
if (sc->next_chan == &sc->offchannel.chan) {
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
measure_time = true;
}
@@ -1277,7 +1277,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_lock_bh(&sc->chan_lock);
if (sc->cur_chan != &sc->offchannel.chan) {
- getrawmonotonic(&sc->cur_chan->tsf_ts);
+ ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index f685843a2ff3..0a6eb8a8c1ed 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -538,7 +538,7 @@ static int read_file_interrupt(struct seq_file *file, void *data)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
PR_IS("RXLP", rxlp);
PR_IS("RXHP", rxhp);
- PR_IS("WATHDOG", bb_watchdog);
+ PR_IS("WATCHDOG", bb_watchdog);
} else {
PR_IS("RX", rxok);
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index cb0eef13af1c..fb649d85b8fc 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
struct hif_device_usb *hif_dev;
+ unsigned long flags;
bool txok = true;
if (!cmd || !cmd->skb || !cmd->hif_dev)
@@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb)
* If the URBs are being flushed, no need to complete
* this packet.
*/
- spin_lock(&hif_dev->tx.tx_lock);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
- spin_unlock(&hif_dev->tx.tx_lock);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
dev_kfree_skb_any(cmd->skb);
kfree(cmd);
return;
}
- spin_unlock(&hif_dev->tx.tx_lock);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 585736a837ed..799010ed04e0 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+ unsigned long flags;
- spin_lock(&priv->rx.rxbuflock);
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
if (!tmp_buf->in_process) {
rxbuf = tmp_buf;
break;
}
}
- spin_unlock(&priv->rx.rxbuflock);
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
if (rxbuf == NULL) {
ath_dbg(common, ANY, "No free RX buffer\n");
goto err;
}
- spin_lock(&priv->rx.rxbuflock);
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
rxbuf->skb = skb;
rxbuf->in_process = true;
- spin_unlock(&priv->rx.rxbuflock);
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
tasklet_schedule(&priv->rx_tasklet);
return;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e60bea4604e4..bb319f22761f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -496,7 +496,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah)
ath_err(common, "eeprom contains invalid mac address: %pM\n",
common->macaddr);
- random_ether_addr(common->macaddr);
+ eth_random_addr(common->macaddr);
ath_err(common, "random mac address will be used: %pM\n",
common->macaddr);
@@ -1835,13 +1835,13 @@ fail:
return -EINVAL;
}
-u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur)
+u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
{
- struct timespec ts;
+ struct timespec64 ts;
s64 usec;
if (!cur) {
- getrawmonotonic(&ts);
+ ktime_get_raw_ts64(&ts);
cur = &ts;
}
@@ -1859,7 +1859,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
- struct timespec tsf_ts;
+ struct timespec64 tsf_ts;
u32 tsf_offset;
u64 tsf = 0;
int r;
@@ -1905,7 +1905,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* Save TSF before chip reset, a cold reset clears it */
- getrawmonotonic(&tsf_ts);
+ ktime_get_raw_ts64(&tsf_ts);
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -2942,16 +2942,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ieee80211_channel *channel;
int chan_pwr, new_pwr;
+ u16 ctl = NO_CTL;
if (!chan)
return;
+ if (!test)
+ ctl = ath9k_regd_get_ctl(reg, chan);
+
channel = chan->chan;
chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
new_pwr = min_t(int, chan_pwr, reg->power_limit);
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(reg, chan),
+ ah->eep_ops->set_txpower(ah, chan, ctl,
get_antenna_gain(ah, chan), new_pwr, test);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9804a24a2dc0..68956cdc8c9a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1060,7 +1060,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
-u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur);
+u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b6663c80e7dd..1049773378f2 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1865,7 +1865,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
tsf -= le64_to_cpu(avp->tsf_adjust);
- getrawmonotonic(&avp->chanctx->tsf_ts);
+ ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_settsf64(sc->sc_ah, tsf);
avp->chanctx->tsf_val = tsf;
@@ -1881,7 +1881,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- getrawmonotonic(&avp->chanctx->tsf_ts);
+ ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_reset_tsf(sc->sc_ah);
avp->chanctx->tsf_val = 0;
@@ -1928,6 +1928,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
flush = true;
+ /* fall through */
case IEEE80211_AMPDU_TX_STOP_CONT:
ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 645f0fbd9179..92b2dd396436 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,7 +18,6 @@
#include <linux/nl80211.h>
#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/module.h>
#include "ath9k.h"
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index b0b5579b7560..d1f6710ca63b 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
{
struct wmi *wmi = priv;
struct wmi_cmd_hdr *hdr;
+ unsigned long flags;
u16 cmd_id;
if (unlikely(wmi->stopped))
@@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
cmd_id = be16_to_cpu(hdr->command_id);
if (cmd_id & 0x1000) {
- spin_lock(&wmi->wmi_lock);
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
__skb_queue_tail(&wmi->wmi_event_queue, skb);
- spin_unlock(&wmi->wmi_lock);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
tasklet_schedule(&wmi->wmi_event_tasklet);
return;
}
/* Check if there has been a timeout. */
- spin_lock(&wmi->wmi_lock);
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
- spin_unlock(&wmi->wmi_lock);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
goto free_skb;
}
- spin_unlock(&wmi->wmi_lock);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
/* WMI command response */
ath9k_wmi_rsp_callback(wmi, skb);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 7fdb152be0bb..43b6c8508e49 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -62,7 +62,7 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok);
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- int seqno);
+ struct ath_buf *bf);
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->status.status_driver_data[0];
- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_STATUS_EOSP)) {
ieee80211_tx_status(hw, skb);
return;
}
@@ -295,7 +296,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
if (fi->baw_tracked) {
- ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
+ ath_tx_update_baw(sc, tid, bf);
sendbar = true;
}
@@ -311,10 +312,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- int seqno)
+ struct ath_buf *bf)
{
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ u16 seqno = bf->bf_state.seqno;
int index, cindex;
+ if (!fi->baw_tracked)
+ return;
+
index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
@@ -335,6 +341,9 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
u16 seqno = bf->bf_state.seqno;
int index, cindex;
+ if (fi->baw_tracked)
+ return;
+
index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
__set_bit(cindex, tid->tx_buf);
@@ -611,7 +620,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update
* block-ack window
*/
- ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_update_baw(sc, tid, bf);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -641,7 +650,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* run out of tx buf.
*/
if (!tbf) {
- ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_update_baw(sc, tid, bf);
ath_tx_complete_buf(sc, bf, txq,
&bf_head, NULL, ts,
@@ -969,7 +978,8 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_lastbf = bf;
tx_info = IEEE80211_SKB_CB(skb);
- tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ tx_info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
+ IEEE80211_TX_STATUS_EOSP);
/*
* No aggregation session is running, but there may be frames
@@ -1009,11 +1019,14 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
- ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_update_baw(sc, tid, bf);
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
continue;
}
+ if (bf_isampdu(bf))
+ ath_tx_addto_baw(sc, tid, bf);
+
return bf;
}
@@ -1071,8 +1084,6 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_next = NULL;
/* link buffers of this frame to the aggregate */
- if (!fi->baw_tracked)
- ath_tx_addto_baw(sc, tid, bf);
bf->bf_state.ndelim = ndelim;
list_add_tail(&bf->list, bf_q);
@@ -1659,6 +1670,22 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
}
}
+
+static void
+ath9k_set_moredata(struct ath_softc *sc, struct ath_buf *bf, bool val)
+{
+ struct ieee80211_hdr *hdr;
+ u16 mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ u16 mask_val = mask * val;
+
+ hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
+ if ((hdr->frame_control & mask) != mask_val) {
+ hdr->frame_control = (hdr->frame_control & ~mask) | mask_val;
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ sizeof(*hdr), DMA_TO_DEVICE);
+ }
+}
+
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
@@ -1689,12 +1716,11 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
if (!bf)
break;
+ ath9k_set_moredata(sc, bf, true);
list_add_tail(&bf->list, &bf_q);
ath_set_rates(tid->an->vif, tid->an->sta, bf);
- if (bf_isampdu(bf)) {
- ath_tx_addto_baw(sc, tid, bf);
+ if (bf_isampdu(bf))
bf->bf_state.bf_type &= ~BUF_AGGR;
- }
if (bf_tail)
bf_tail->bf_next = bf;
@@ -1712,6 +1738,9 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
if (list_empty(&bf_q))
return;
+ if (!more_data)
+ ath9k_set_moredata(sc, bf_tail, false);
+
info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
info->flags |= IEEE80211_TX_STATUS_EOSP;
@@ -2407,7 +2436,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
.txq = sc->beacon.cabq
};
struct ath_tx_info info = {};
- struct ieee80211_hdr *hdr;
struct ath_buf *bf_tail = NULL;
struct ath_buf *bf;
LIST_HEAD(bf_q);
@@ -2451,15 +2479,10 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (list_empty(&bf_q))
return;
- bf = list_first_entry(&bf_q, struct ath_buf, list);
- hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
-
- if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) {
- hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
- sizeof(*hdr), DMA_TO_DEVICE);
- }
+ bf = list_last_entry(&bf_q, struct ath_buf, list);
+ ath9k_set_moredata(sc, bf, false);
+ bf = list_first_entry(&bf_q, struct ath_buf, list);
ath_txq_lock(sc, txctl.txq);
ath_tx_fill_desc(sc, bf, txctl.txq, 0);
ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index aeb5e6e806be..79998a3ddb7a 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -493,7 +493,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+ struct wcn36xx_sta *sta_priv = sta ? wcn36xx_sta_to_priv(sta) : NULL;
int ret = 0;
u8 key[WLAN_MAX_KEY_LEN];
@@ -512,7 +512,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
- vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP104;
break;
case WLAN_CIPHER_SUITE_CCMP:
vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
@@ -567,15 +567,19 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_conf->keyidx,
key_conf->keylen,
key);
+
if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
(WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
- sta_priv->is_data_encrypted = true;
- wcn36xx_smd_set_stakey(wcn,
- vif_priv->encrypt_type,
- key_conf->keyidx,
- key_conf->keylen,
- key,
- get_sta_index(vif, sta_priv));
+ list_for_each_entry(sta_priv,
+ &vif_priv->sta_list, list) {
+ sta_priv->is_data_encrypted = true;
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ }
}
}
break;
@@ -984,6 +988,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
+ INIT_LIST_HEAD(&vif_priv->sta_list);
list_add(&vif_priv->list, &wcn->vif_list);
wcn36xx_smd_add_sta_self(wcn, vif);
@@ -1005,6 +1010,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
spin_lock_init(&sta_priv->ampdu_lock);
sta_priv->vif = vif_priv;
+ list_add(&sta_priv->list, &vif_priv->sta_list);
+
/*
* For STA mode HW will be configured on BSS_CHANGED_ASSOC because
* at this stage AID is not available yet.
@@ -1032,6 +1039,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
+ list_del(&sta_priv->list);
wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
sta_priv->vif = NULL;
@@ -1153,8 +1161,6 @@ static const struct ieee80211_ops wcn36xx_ops = {
static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
{
- int ret = 0;
-
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -1201,7 +1207,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
wiphy_ext_feature_set(wcn->hw->wiphy,
NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- return ret;
+ return 0;
}
static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index b4dadf75d565..00098f24116d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -250,7 +250,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
- int ret = 0;
+ int ret;
unsigned long start;
struct wcn36xx_hal_msg_header *hdr =
(struct wcn36xx_hal_msg_header *)wcn->hal_buf;
@@ -446,7 +446,7 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
int wcn36xx_smd_start(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
- int ret = 0;
+ int ret;
int i;
size_t len;
@@ -493,7 +493,7 @@ out:
int wcn36xx_smd_stop(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_stop_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
@@ -520,7 +520,7 @@ out:
int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
{
struct wcn36xx_hal_init_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
@@ -549,7 +549,7 @@ out:
int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_start_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
@@ -579,7 +579,7 @@ out:
int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_end_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
@@ -610,7 +610,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
enum wcn36xx_hal_sys_mode mode)
{
struct wcn36xx_hal_finish_scan_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
@@ -732,7 +732,7 @@ out:
static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
- int ret = 0;
+ int ret;
ret = wcn36xx_smd_rsp_status_check(buf, len);
if (ret)
@@ -747,7 +747,7 @@ int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
struct ieee80211_vif *vif, int ch)
{
struct wcn36xx_hal_switch_channel_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
@@ -860,7 +860,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn,
u8 *channels, size_t channel_count)
{
struct wcn36xx_hal_update_scan_params_req_ex msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
@@ -931,7 +931,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_add_sta_self_req msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
@@ -965,7 +965,7 @@ out:
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
{
struct wcn36xx_hal_del_sta_self_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
@@ -993,7 +993,7 @@ out:
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_delete_sta_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
@@ -1040,7 +1040,7 @@ static int wcn36xx_smd_join_rsp(void *buf, size_t len)
int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
{
struct wcn36xx_hal_join_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
@@ -1089,7 +1089,7 @@ int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
enum wcn36xx_hal_link_state state)
{
struct wcn36xx_hal_set_link_state_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
@@ -1215,7 +1215,7 @@ int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
{
struct wcn36xx_hal_config_sta_req_msg msg;
struct wcn36xx_hal_config_sta_params *sta_params;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
@@ -1414,7 +1414,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct wcn36xx_hal_config_bss_params *bss;
struct wcn36xx_hal_config_sta_params *sta_params;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
@@ -1579,7 +1579,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
u16 p2p_off)
{
struct wcn36xx_hal_send_beacon_req_msg msg_body;
- int ret = 0, pad, pvm_len;
+ int ret, pad, pvm_len;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
@@ -1653,7 +1653,7 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
struct sk_buff *skb)
{
struct wcn36xx_hal_send_probe_resp_req_msg msg;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
@@ -1700,7 +1700,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_set_sta_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
@@ -1708,12 +1708,20 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
msg_body.set_sta_key_params.sta_index = sta_index;
msg_body.set_sta_key_params.enc_type = enc_type;
- msg_body.set_sta_key_params.key[0].id = keyidx;
- msg_body.set_sta_key_params.key[0].unicast = 1;
- msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
- msg_body.set_sta_key_params.key[0].pae_role = 0;
- msg_body.set_sta_key_params.key[0].length = keylen;
- memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ if (enc_type == WCN36XX_HAL_ED_WEP104 ||
+ enc_type == WCN36XX_HAL_ED_WEP40) {
+ /* Use bss key for wep (static) */
+ msg_body.set_sta_key_params.def_wep_idx = keyidx;
+ msg_body.set_sta_key_params.wep_type = 0;
+ } else {
+ msg_body.set_sta_key_params.key[0].id = keyidx;
+ msg_body.set_sta_key_params.key[0].unicast = 1;
+ msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+ msg_body.set_sta_key_params.key[0].pae_role = 0;
+ msg_body.set_sta_key_params.key[0].length = keylen;
+ memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ }
+
msg_body.set_sta_key_params.single_tid_rc = 1;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -1741,7 +1749,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
u8 *key)
{
struct wcn36xx_hal_set_bss_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
@@ -1778,7 +1786,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
@@ -1810,7 +1818,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
u8 keyidx)
{
struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
@@ -1839,7 +1847,7 @@ int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_enter_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
@@ -1869,7 +1877,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_exit_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
@@ -1895,7 +1903,7 @@ out:
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
@@ -1930,7 +1938,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
{
struct wcn36xx_hal_keep_alive_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
@@ -1968,7 +1976,7 @@ int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5)
{
struct wcn36xx_hal_dump_cmd_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
@@ -2013,7 +2021,6 @@ void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
- int ret = 0;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
@@ -2022,8 +2029,8 @@ int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
arr_idx = cap / 32;
bit_idx = cap % 32;
- ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
- return ret;
+
+ return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
}
void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
@@ -2043,7 +2050,7 @@ void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
- int ret = 0, i;
+ int ret, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -2079,7 +2086,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_add_ba_session_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
@@ -2117,7 +2124,7 @@ out:
int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
{
struct wcn36xx_hal_add_ba_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
@@ -2145,7 +2152,7 @@ out:
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
{
struct wcn36xx_hal_del_ba_req_msg msg_body;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
@@ -2185,7 +2192,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
@@ -2364,7 +2371,7 @@ int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
{
struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
size_t len;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
@@ -2399,7 +2406,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
- int ret = 0;
+ int ret;
mutex_lock(&wcn->hal_mutex);
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 11e74015c79a..a58f313983b9 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -129,6 +129,8 @@ struct wcn36xx_vif {
u8 self_sta_index;
u8 self_dpu_desc_index;
u8 self_ucast_dpu_sign;
+
+ struct list_head sta_list;
};
/**
@@ -154,6 +156,7 @@ struct wcn36xx_vif {
* |______________|_____________|_______________|
*/
struct wcn36xx_sta {
+ struct list_head list;
struct wcn36xx_vif *vif;
u16 aid;
u16 tid;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 398edd2a7f2b..d3d61ae459e2 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,6 +9,7 @@ wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
+wil6210-y += txrx_edma.o
wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += fw.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 78946f28d0c7..f79c337105cb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -302,14 +302,14 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
sinfo->generation = wil->sinfo_gen;
- sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) |
- BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_RX_BITRATE) |
- BIT(NL80211_STA_INFO_TX_BITRATE) |
- BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
@@ -322,7 +322,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
sinfo->tx_failed = stats->tx_errors;
if (test_bit(wil_vif_fwconnected, vif->status)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING,
wil->fw_capabilities))
sinfo->signal = reply.evt.rssi;
@@ -689,11 +689,12 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
- /* check we are client side */
+ /* scan is supported on client interfaces and on AP interface */
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_AP:
break;
default:
return -EOPNOTSUPP;
@@ -1089,18 +1090,51 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
int rc;
bool tx_status;
- /* Note, currently we do not support the "wait" parameter, user-space
- * must call remain_on_channel before mgmt_tx or listen on a channel
- * another way (AP/PCP or connected station)
- * in addition we need to check if specified "chan" argument is
- * different from currently "listened" channel and fail if it is.
+ wil_dbg_misc(wil, "mgmt_tx: channel %d offchan %d, wait %d\n",
+ params->chan ? params->chan->hw_value : -1,
+ params->offchan,
+ params->wait);
+
+ /* Note, currently we support the "wait" parameter only on AP mode.
+ * In other modes, user-space must call remain_on_channel before
+ * mgmt_tx or listen on a channel other than active one.
*/
- rc = wmi_mgmt_tx(vif, buf, len);
- tx_status = (rc == 0);
+ if (params->chan && params->chan->hw_value == 0) {
+ wil_err(wil, "invalid channel\n");
+ return -EINVAL;
+ }
+ if (wdev->iftype != NL80211_IFTYPE_AP) {
+ wil_dbg_misc(wil,
+ "send WMI_SW_TX_REQ_CMDID on non-AP interfaces\n");
+ rc = wmi_mgmt_tx(vif, buf, len);
+ goto out;
+ }
+
+ if (!params->chan || params->chan->hw_value == vif->channel) {
+ wil_dbg_misc(wil,
+ "send WMI_SW_TX_REQ_CMDID for on-channel\n");
+ rc = wmi_mgmt_tx(vif, buf, len);
+ goto out;
+ }
+
+ if (params->offchan == 0) {
+ wil_err(wil,
+ "invalid channel params: current %d requested %d, off-channel not allowed\n",
+ vif->channel, params->chan->hw_value);
+ return -EBUSY;
+ }
+
+ /* use the wmi_mgmt_tx_ext only on AP mode and off-channel */
+ rc = wmi_mgmt_tx_ext(vif, buf, len, params->chan->hw_value,
+ params->wait);
+
+out:
+ tx_status = (rc == 0);
cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
tx_status, GFP_KERNEL);
+
return rc;
}
@@ -1726,7 +1760,7 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int authorize;
int cid, i;
- struct vring_tx_data *txdata = NULL;
+ struct wil_ring_tx_data *txdata = NULL;
wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n",
mac, params->sta_flags_mask, params->sta_flags_set,
@@ -1746,20 +1780,20 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
return -ENOLINK;
}
- for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++)
- if (wil->vring2cid_tid[i][0] == cid) {
- txdata = &wil->vring_tx_data[i];
+ for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++)
+ if (wil->ring2cid_tid[i][0] == cid) {
+ txdata = &wil->ring_tx_data[i];
break;
}
if (!txdata) {
- wil_err(wil, "vring data not found\n");
+ wil_err(wil, "ring data not found\n");
return -ENOLINK;
}
authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
txdata->dot1x_open = authorize ? 1 : 0;
- wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i,
+ wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i,
txdata->dot1x_open);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ebfdff4d328c..51c3330bc316 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -29,7 +29,10 @@
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
static u32 dbg_txdesc_index;
-static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
+static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */
+static u32 dbg_status_msg_index;
+/* 0..wil->num_rx_status_rings-1 for Rx, wil->tx_sring_idx for Tx */
+static u32 dbg_sring_index;
enum dbg_off_type {
doff_u32 = 0,
@@ -47,20 +50,53 @@ struct dbg_off {
enum dbg_off_type type;
};
-static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
- const char *name, struct vring *vring,
- char _s, char _h)
+static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
+ struct wil_ring *ring,
+ char _s, char _h, int idx)
{
- void __iomem *x = wmi_addr(wil, vring->hwtail);
+ u8 num_of_descs;
+ bool has_skb = false;
+
+ if (ring->is_rx) {
+ struct wil_rx_enhanced_desc *rx_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[idx].rx.enhanced;
+ u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+ has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ seq_printf(s, "%c", (has_skb) ? _h : _s);
+ } else {
+ struct wil_tx_enhanced_desc *d =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[idx].tx.enhanced;
+
+ num_of_descs = (u8)d->mac.d[2];
+ has_skb = ring->ctx[idx].skb;
+ if (num_of_descs >= 1)
+ seq_printf(s, "%c", ring->ctx[idx].skb ? _h : _s);
+ else
+ /* num_of_descs == 0, it's a frag in a list of descs */
+ seq_printf(s, "%c", has_skb ? 'h' : _s);
+ }
+}
+
+static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
+ const char *name, struct wil_ring *ring,
+ char _s, char _h)
+{
+ void __iomem *x = wmi_addr(wil, ring->hwtail);
u32 v;
- seq_printf(s, "VRING %s = {\n", name);
- seq_printf(s, " pa = %pad\n", &vring->pa);
- seq_printf(s, " va = 0x%p\n", vring->va);
- seq_printf(s, " size = %d\n", vring->size);
- seq_printf(s, " swtail = %d\n", vring->swtail);
- seq_printf(s, " swhead = %d\n", vring->swhead);
- seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
+ seq_printf(s, "RING %s = {\n", name);
+ seq_printf(s, " pa = %pad\n", &ring->pa);
+ seq_printf(s, " va = 0x%p\n", ring->va);
+ seq_printf(s, " size = %d\n", ring->size);
+ if (wil->use_enhanced_dma_hw && ring->is_rx)
+ seq_printf(s, " swtail = %u\n", *ring->edma_rx_swtail.va);
+ else
+ seq_printf(s, " swtail = %d\n", ring->swtail);
+ seq_printf(s, " swhead = %d\n", ring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail);
if (x) {
v = readl(x);
seq_printf(s, "0x%08x = %d\n", v, v);
@@ -68,41 +104,45 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
seq_puts(s, "???\n");
}
- if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+ if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
- for (i = 0; i < vring->size; i++) {
- volatile struct vring_tx_desc *d = &vring->va[i].tx;
-
- if ((i % 128) == 0 && (i != 0))
+ for (i = 0; i < ring->size; i++) {
+ if ((i % 128) == 0 && i != 0)
seq_puts(s, "\n");
- seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
- _s : (vring->ctx[i].skb ? _h : 'h'));
+ if (wil->use_enhanced_dma_hw) {
+ wil_print_desc_edma(s, wil, ring, _s, _h, i);
+ } else {
+ volatile struct vring_tx_desc *d =
+ &ring->va[i].tx.legacy;
+ seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
+ _s : (ring->ctx[i].skb ? _h : 'h'));
+ }
}
seq_puts(s, "\n");
}
seq_puts(s, "}\n");
}
-static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+static int wil_ring_debugfs_show(struct seq_file *s, void *data)
{
uint i;
struct wil6210_priv *wil = s->private;
- wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
+ wil_print_ring(s, wil, "rx", &wil->ring_rx, 'S', '_');
- for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
- struct vring *vring = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+ struct wil_ring *ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
- if (vring->va) {
- int cid = wil->vring2cid_tid[i][0];
- int tid = wil->vring2cid_tid[i][1];
- u32 swhead = vring->swhead;
- u32 swtail = vring->swtail;
- int used = (vring->size + swhead - swtail)
- % vring->size;
- int avail = vring->size - used - 1;
+ if (ring->va) {
+ int cid = wil->ring2cid_tid[i][0];
+ int tid = wil->ring2cid_tid[i][1];
+ u32 swhead = ring->swhead;
+ u32 swtail = ring->swtail;
+ int used = (ring->size + swhead - swtail)
+ % ring->size;
+ int avail = ring->size - used - 1;
char name[10];
char sidle[10];
/* performance monitoring */
@@ -137,20 +177,88 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
txdata->dot1x_open ? "+" : "-",
used, avail, sidle);
- wil_print_vring(s, wil, name, vring, '_', 'H');
+ wil_print_ring(s, wil, name, ring, '_', 'H');
+ }
+ }
+
+ return 0;
+}
+
+static int wil_ring_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_ring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_ring = {
+ .open = wil_ring_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ void __iomem *x = wmi_addr(wil, sring->hwtail);
+ int sring_idx = sring - wil->srings;
+ u32 v;
+
+ seq_printf(s, "Status Ring %s [ %d ] = {\n",
+ sring->is_rx ? "RX" : "TX", sring_idx);
+ seq_printf(s, " pa = %pad\n", &sring->pa);
+ seq_printf(s, " va = 0x%pK\n", sring->va);
+ seq_printf(s, " size = %d\n", sring->size);
+ seq_printf(s, " elem_size = %zu\n", sring->elem_size);
+ seq_printf(s, " swhead = %d\n", sring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", sring->hwtail);
+ if (x) {
+ v = readl_relaxed(x);
+ seq_printf(s, "0x%08x = %d\n", v, v);
+ } else {
+ seq_puts(s, "???\n");
+ }
+ seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
+
+ if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+ uint i;
+
+ for (i = 0; i < sring->size; i++) {
+ u32 *sdword_0 =
+ (u32 *)(sring->va + (sring->elem_size * i));
+
+ if ((i % 128) == 0 && i != 0)
+ seq_puts(s, "\n");
+ if (i == sring->swhead)
+ seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+ 'X' : 'x');
+ else
+ seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+ '1' : '0');
}
+ seq_puts(s, "\n");
}
+ seq_puts(s, "}\n");
+}
+
+static int wil_srings_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int i = 0;
+
+ for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++)
+ if (wil->srings[i].va)
+ wil_print_sring(s, wil, &wil->srings[i]);
return 0;
}
-static int wil_vring_seq_open(struct inode *inode, struct file *file)
+static int wil_srings_seq_open(struct inode *inode, struct file *file)
{
- return single_open(file, wil_vring_debugfs_show, inode->i_private);
+ return single_open(file, wil_srings_debugfs_show, inode->i_private);
}
-static const struct file_operations fops_vring = {
- .open = wil_vring_seq_open,
+static const struct file_operations fops_srings = {
+ .open = wil_srings_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
@@ -162,8 +270,8 @@ static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
}
-static void wil_print_ring(struct seq_file *s, const char *prefix,
- void __iomem *off)
+static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
+ void __iomem *off)
{
struct wil6210_priv *wil = s->private;
struct wil6210_mbox_ring r;
@@ -249,9 +357,9 @@ static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
if (ret < 0)
return ret;
- wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+ wil_print_mbox_ring(s, "tx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx));
- wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+ wil_print_mbox_ring(s, "rx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx));
wil_pm_runtime_put(wil);
@@ -719,13 +827,13 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
if ((strcmp(cmd, "add") == 0) ||
(strcmp(cmd, "del_tx") == 0)) {
- struct vring_tx_data *txdata;
+ struct wil_ring_tx_data *txdata;
if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) {
wil_err(wil, "BACK: invalid ring id %d\n", p1);
return -EINVAL;
}
- txdata = &wil->vring_tx_data[p1];
+ txdata = &wil->ring_tx_data[p1];
if (strcmp(cmd, "add") == 0) {
if (rc < 3) {
wil_err(wil, "BACK: add require at least 2 params\n");
@@ -972,54 +1080,93 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- struct vring *vring;
- bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
+ struct wil_ring *ring;
+ bool tx;
+ int ring_idx = dbg_ring_index;
+ int txdesc_idx = dbg_txdesc_index;
+ volatile struct vring_tx_desc *d;
+ volatile u32 *u;
+ struct sk_buff *skb;
+
+ if (wil->use_enhanced_dma_hw) {
+ /* RX ring index == 0 */
+ if (ring_idx >= WIL6210_MAX_TX_RINGS) {
+ seq_printf(s, "invalid ring index %d\n", ring_idx);
+ return 0;
+ }
+ tx = ring_idx > 0; /* desc ring 0 is reserved for RX */
+ } else {
+ /* RX ring index == WIL6210_MAX_TX_RINGS */
+ if (ring_idx > WIL6210_MAX_TX_RINGS) {
+ seq_printf(s, "invalid ring index %d\n", ring_idx);
+ return 0;
+ }
+ tx = (ring_idx < WIL6210_MAX_TX_RINGS);
+ }
- vring = tx ? &wil->vring_tx[dbg_vring_index] : &wil->vring_rx;
+ ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx;
- if (!vring->va) {
+ if (!ring->va) {
if (tx)
- seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
+ seq_printf(s, "No Tx[%2d] RING\n", ring_idx);
else
- seq_puts(s, "No Rx VRING\n");
+ seq_puts(s, "No Rx RING\n");
return 0;
}
- if (dbg_txdesc_index < vring->size) {
- /* use struct vring_tx_desc for Rx as well,
- * only field used, .dma.length, is the same
- */
- volatile struct vring_tx_desc *d =
- &vring->va[dbg_txdesc_index].tx;
- volatile u32 *u = (volatile u32 *)d;
- struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
-
+ if (txdesc_idx >= ring->size) {
if (tx)
- seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
- dbg_txdesc_index);
+ seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
+ ring_idx, txdesc_idx, ring->size);
else
- seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
- seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- u[0], u[1], u[2], u[3]);
- seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- u[4], u[5], u[6], u[7]);
- seq_printf(s, " SKB = 0x%p\n", skb);
+ seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
+ txdesc_idx, ring->size);
+ return 0;
+ }
- if (skb) {
- skb_get(skb);
- wil_seq_print_skb(s, skb);
- kfree_skb(skb);
+ /* use struct vring_tx_desc for Rx as well,
+ * only field used, .dma.length, is the same
+ */
+ d = &ring->va[txdesc_idx].tx.legacy;
+ u = (volatile u32 *)d;
+ skb = NULL;
+
+ if (wil->use_enhanced_dma_hw) {
+ if (tx) {
+ skb = ring->ctx[txdesc_idx].skb;
+ } else {
+ struct wil_rx_enhanced_desc *rx_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[txdesc_idx].rx.enhanced;
+ u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+ if (!wil_val_in_range(buff_id, 0,
+ wil->rx_buff_mgmt.size)) {
+ seq_printf(s, "invalid buff_id %d\n", buff_id);
+ return 0;
+ }
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
}
- seq_puts(s, "}\n");
} else {
- if (tx)
- seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
- dbg_vring_index, dbg_txdesc_index,
- vring->size);
- else
- seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
- dbg_txdesc_index, vring->size);
+ skb = ring->ctx[txdesc_idx].skb;
}
+ if (tx)
+ seq_printf(s, "Tx[%2d][%3d] = {\n", ring_idx,
+ txdesc_idx);
+ else
+ seq_printf(s, "Rx[%3d] = {\n", txdesc_idx);
+ seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+ seq_printf(s, " SKB = 0x%p\n", skb);
+
+ if (skb) {
+ skb_get(skb);
+ wil_seq_print_skb(s, skb);
+ kfree_skb(skb);
+ }
+ seq_puts(s, "}\n");
return 0;
}
@@ -1036,6 +1183,115 @@ static const struct file_operations fops_txdesc = {
.llseek = seq_lseek,
};
+/*---------Tx/Rx status message------------*/
+static int wil_status_msg_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int sring_idx = dbg_sring_index;
+ struct wil_status_ring *sring;
+ bool tx = sring_idx == wil->tx_sring_idx ? 1 : 0;
+ u32 status_msg_idx = dbg_status_msg_index;
+ u32 *u;
+
+ if (sring_idx >= WIL6210_MAX_STATUS_RINGS) {
+ seq_printf(s, "invalid status ring index %d\n", sring_idx);
+ return 0;
+ }
+
+ sring = &wil->srings[sring_idx];
+
+ if (!sring->va) {
+ seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R');
+ return 0;
+ }
+
+ if (status_msg_idx >= sring->size) {
+ seq_printf(s, "%cxDesc index (%d) >= size (%d)\n",
+ tx ? 'T' : 'R', status_msg_idx, sring->size);
+ return 0;
+ }
+
+ u = sring->va + (sring->elem_size * status_msg_idx);
+
+ seq_printf(s, "%cx[%d][%3d] = {\n",
+ tx ? 'T' : 'R', sring_idx, status_msg_idx);
+
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ if (!tx && !wil->use_compressed_rx_status)
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+
+ seq_puts(s, "}\n");
+
+ return 0;
+}
+
+static int wil_status_msg_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_status_msg_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_status_msg = {
+ .open = wil_status_msg_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
+{
+ struct wil_rx_buff *it;
+ int i = 0;
+
+ list_for_each_entry(it, lh, list) {
+ if ((i % 16) == 0 && i != 0)
+ seq_puts(s, "\n ");
+ seq_printf(s, "[%4d] ", it->id);
+ i++;
+ }
+ seq_printf(s, "\nNumber of buffers: %u\n", i);
+
+ return i;
+}
+
+static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt;
+ int num_active;
+ int num_free;
+
+ seq_printf(s, " size = %zu\n", rbm->size);
+ seq_printf(s, " free_list_empty_cnt = %lu\n",
+ rbm->free_list_empty_cnt);
+
+ /* Print active list */
+ seq_puts(s, " Active list:\n");
+ num_active = wil_print_rx_buff(s, &rbm->active);
+ seq_puts(s, "\n Free list:\n");
+ num_free = wil_print_rx_buff(s, &rbm->free);
+
+ seq_printf(s, " Total number of buffers: %u\n",
+ num_active + num_free);
+
+ return 0;
+}
+
+static int wil_rx_buff_mgmt_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_rx_buff_mgmt_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_rx_buff_mgmt = {
+ .open = wil_rx_buff_mgmt_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
/*---------beamforming------------*/
static char *wil_bfstatus_str(u32 status)
{
@@ -1132,7 +1388,7 @@ static const struct file_operations fops_bf = {
};
/*---------temp------------*/
-static void print_temp(struct seq_file *s, const char *prefix, u32 t)
+static void print_temp(struct seq_file *s, const char *prefix, s32 t)
{
switch (t) {
case 0:
@@ -1140,7 +1396,8 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t)
seq_printf(s, "%s N/A\n", prefix);
break;
default:
- seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000);
+ seq_printf(s, "%s %s%d.%03d\n", prefix, (t < 0 ? "-" : ""),
+ abs(t / 1000), abs(t % 1000));
break;
}
}
@@ -1148,7 +1405,7 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t)
static int wil_temp_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- u32 t_m, t_r;
+ s32 t_m, t_r;
int rc = wmi_get_temperature(wil, &t_m, &t_r);
if (rc) {
@@ -1384,6 +1641,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
int i;
u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
+ unsigned long long drop_dup_mcast = r->drop_dup_mcast;
seq_printf(s, "([%2d]) 0x%03x [", r->buf_size, r->head_seq_num);
for (i = 0; i < r->buf_size; i++) {
@@ -1393,9 +1651,9 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
}
seq_printf(s,
- "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
- r->total, drop_dup + drop_old, drop_dup, drop_old,
- r->ssn_last_drop);
+ "] total %llu drop %llu (dup %llu + old %llu + dup mcast %llu) last 0x%03x\n",
+ r->total, drop_dup + drop_old + drop_dup_mcast, drop_dup,
+ drop_old, drop_dup_mcast, r->ssn_last_drop);
}
static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
@@ -1477,6 +1735,12 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
p->stats.rx_short_frame,
p->stats.rx_large_frame,
p->stats.rx_replay);
+ seq_printf(s,
+ "mic error %lu, key error %lu, amsdu error %lu, csum error %lu\n",
+ p->stats.rx_mic_error,
+ p->stats.rx_key_error,
+ p->stats.rx_amsdu_error,
+ p->stats.rx_csum_err);
seq_puts(s, "Rx/MCS:");
for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
@@ -1538,6 +1802,343 @@ static const struct file_operations fops_mids = {
.llseek = seq_lseek,
};
+static int wil_tx_latency_debugfs_show(struct seq_file *s, void *data)
+__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
+{
+ struct wil6210_priv *wil = s->private;
+ int i, bin;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+ u8 aid = 0;
+ u8 mid;
+
+ if (!p->tx_latency_bins)
+ continue;
+
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ aid = p->aid;
+ break;
+ }
+ mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
+ seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status,
+ mid, aid);
+
+ if (p->status == wil_sta_connected) {
+ u64 num_packets = 0;
+ u64 tx_latency_avg = p->stats.tx_latency_total_us;
+
+ seq_puts(s, "Tx/Latency bin:");
+ for (bin = 0; bin < WIL_NUM_LATENCY_BINS; bin++) {
+ seq_printf(s, " %lld",
+ p->tx_latency_bins[bin]);
+ num_packets += p->tx_latency_bins[bin];
+ }
+ seq_puts(s, "\n");
+ if (!num_packets)
+ continue;
+ do_div(tx_latency_avg, num_packets);
+ seq_printf(s, "Tx/Latency min/avg/max (us): %d/%lld/%d",
+ p->stats.tx_latency_min_us,
+ tx_latency_avg,
+ p->stats.tx_latency_max_us);
+
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+
+static int wil_tx_latency_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_tx_latency_debugfs_show,
+ inode->i_private);
+}
+
+static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int val, rc, i;
+ bool enable;
+
+ rc = kstrtoint_from_user(buf, len, 0, &val);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+ if (val == 1)
+ /* default resolution */
+ val = 500;
+ if (val && (val < 50 || val > 1000)) {
+ wil_err(wil, "Invalid resolution %d\n", val);
+ return -EINVAL;
+ }
+
+ enable = !!val;
+ if (wil->tx_latency == enable)
+ return len;
+
+ wil_info(wil, "%s TX latency measurements (resolution %dusec)\n",
+ enable ? "Enabling" : "Disabling", val);
+
+ if (enable) {
+ size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS;
+
+ wil->tx_latency_res = val;
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *sta = &wil->sta[i];
+
+ kfree(sta->tx_latency_bins);
+ sta->tx_latency_bins = kzalloc(sz, GFP_KERNEL);
+ if (!sta->tx_latency_bins)
+ return -ENOMEM;
+ sta->stats.tx_latency_min_us = U32_MAX;
+ sta->stats.tx_latency_max_us = 0;
+ sta->stats.tx_latency_total_us = 0;
+ }
+ }
+ wil->tx_latency = enable;
+
+ return len;
+}
+
+static const struct file_operations fops_tx_latency = {
+ .open = wil_tx_latency_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_tx_latency_write,
+ .llseek = seq_lseek,
+};
+
+static void wil_link_stats_print_basic(struct wil6210_vif *vif,
+ struct seq_file *s,
+ struct wmi_link_stats_basic *basic)
+{
+ char per[5] = "?";
+
+ if (basic->per_average != 0xff)
+ snprintf(per, sizeof(per), "%d%%", basic->per_average);
+
+ seq_printf(s, "CID %d {\n"
+ "\tTxMCS %d TxTpt %d\n"
+ "\tGoodput(rx:tx) %d:%d\n"
+ "\tRxBcastFrames %d\n"
+ "\tRSSI %d SQI %d SNR %d PER %s\n"
+ "\tRx RFC %d Ant num %d\n"
+ "\tSectors(rx:tx) my %d:%d peer %d:%d\n"
+ "}\n",
+ basic->cid,
+ basic->bf_mcs, le32_to_cpu(basic->tx_tpt),
+ le32_to_cpu(basic->rx_goodput),
+ le32_to_cpu(basic->tx_goodput),
+ le32_to_cpu(basic->rx_bcast_frames),
+ basic->rssi, basic->sqi, basic->snr, per,
+ basic->selected_rfc, basic->rx_effective_ant_num,
+ basic->my_rx_sector, basic->my_tx_sector,
+ basic->other_rx_sector, basic->other_tx_sector);
+}
+
+static void wil_link_stats_print_global(struct wil6210_priv *wil,
+ struct seq_file *s,
+ struct wmi_link_stats_global *global)
+{
+ seq_printf(s, "Frames(rx:tx) %d:%d\n"
+ "BA Frames(rx:tx) %d:%d\n"
+ "Beacons %d\n"
+ "Rx Errors (MIC:CRC) %d:%d\n"
+ "Tx Errors (no ack) %d\n",
+ le32_to_cpu(global->rx_frames),
+ le32_to_cpu(global->tx_frames),
+ le32_to_cpu(global->rx_ba_frames),
+ le32_to_cpu(global->tx_ba_frames),
+ le32_to_cpu(global->tx_beacons),
+ le32_to_cpu(global->rx_mic_errors),
+ le32_to_cpu(global->rx_crc_errors),
+ le32_to_cpu(global->tx_fail_no_ack));
+}
+
+static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif,
+ struct seq_file *s)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_basic *stats;
+ int i;
+
+ if (!vif->fw_stats_ready) {
+ seq_puts(s, "no statistics\n");
+ return;
+ }
+
+ seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf);
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if (wil->sta[i].status == wil_sta_unused)
+ continue;
+ if (wil->sta[i].mid != vif->mid)
+ continue;
+
+ stats = &wil->sta[i].fw_stats_basic;
+ wil_link_stats_print_basic(vif, s, stats);
+ }
+}
+
+static int wil_link_stats_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_vif *vif;
+ int i, rc;
+
+ rc = mutex_lock_interruptible(&wil->vif_mutex);
+ if (rc)
+ return rc;
+
+ /* iterate over all MIDs and show per-cid statistics. Then show the
+ * global statistics
+ */
+ for (i = 0; i < wil->max_vifs; i++) {
+ vif = wil->vifs[i];
+
+ seq_printf(s, "MID %d ", i);
+ if (!vif) {
+ seq_puts(s, "unused\n");
+ continue;
+ }
+
+ wil_link_stats_debugfs_show_vif(vif, s);
+ }
+
+ mutex_unlock(&wil->vif_mutex);
+
+ return 0;
+}
+
+static int wil_link_stats_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_link_stats_debugfs_show, inode->i_private);
+}
+
+static ssize_t wil_link_stats_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int cid, interval, rc, i;
+ struct wil6210_vif *vif;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ /* specify cid (use -1 for all cids) and snapshot interval in ms */
+ rc = sscanf(kbuf, "%d %d", &cid, &interval);
+ kfree(kbuf);
+ if (rc < 0)
+ return rc;
+ if (rc < 2 || interval < 0)
+ return -EINVAL;
+
+ wil_info(wil, "request link statistics, cid %d interval %d\n",
+ cid, interval);
+
+ rc = mutex_lock_interruptible(&wil->vif_mutex);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < wil->max_vifs; i++) {
+ vif = wil->vifs[i];
+ if (!vif)
+ continue;
+
+ rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_BASIC,
+ (cid == -1 ? 0xff : cid), interval);
+ if (rc)
+ wil_err(wil, "link statistics failed for mid %d\n", i);
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+ return len;
+}
+
+static const struct file_operations fops_link_stats = {
+ .open = wil_link_stats_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_link_stats_write,
+ .llseek = seq_lseek,
+};
+
+static int
+wil_link_stats_global_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ if (!wil->fw_stats_global.ready)
+ return 0;
+
+ seq_printf(s, "TSF %lld\n", wil->fw_stats_global.tsf);
+ wil_link_stats_print_global(wil, s, &wil->fw_stats_global.stats);
+
+ return 0;
+}
+
+static int
+wil_link_stats_global_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_link_stats_global_debugfs_show,
+ inode->i_private);
+}
+
+static ssize_t
+wil_link_stats_global_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int interval, rc;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+
+ /* specify snapshot interval in ms */
+ rc = kstrtoint_from_user(buf, len, 0, &interval);
+ if (rc || interval < 0) {
+ wil_err(wil, "Invalid argument\n");
+ return -EINVAL;
+ }
+
+ wil_info(wil, "request global link stats, interval %d\n", interval);
+
+ rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_GLOBAL, 0, interval);
+ if (rc)
+ wil_err(wil, "global link stats failed %d\n", rc);
+
+ return rc ? rc : len;
+}
+
+static const struct file_operations fops_link_stats_global = {
+ .open = wil_link_stats_global_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_link_stats_global_write,
+ .llseek = seq_lseek,
+};
+
static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1760,6 +2361,60 @@ static const struct file_operations fops_suspend_stats = {
.open = simple_open,
};
+/*---------compressed_rx_status---------*/
+static ssize_t wil_compressed_rx_status_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int compressed_rx_status;
+ int rc;
+
+ rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+
+ if (wil_has_active_ifaces(wil, true, false)) {
+ wil_err(wil, "cannot change edma config after iface is up\n");
+ return -EPERM;
+ }
+
+ wil_info(wil, "%sable compressed_rx_status\n",
+ compressed_rx_status ? "En" : "Dis");
+
+ wil->use_compressed_rx_status = compressed_rx_status;
+
+ return len;
+}
+
+static int
+wil_compressed_rx_status_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ seq_printf(s, "%d\n", wil->use_compressed_rx_status);
+
+ return 0;
+}
+
+static int
+wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_compressed_rx_status_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_compressed_rx_status = {
+ .open = wil_compressed_rx_status_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_compressed_rx_status_write,
+ .llseek = seq_lseek,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1790,7 +2445,7 @@ static const struct {
const struct file_operations *fops;
} dbg_files[] = {
{"mbox", 0444, &fops_mbox},
- {"vrings", 0444, &fops_vring},
+ {"rings", 0444, &fops_ring},
{"stations", 0444, &fops_sta},
{"mids", 0444, &fops_mids},
{"desc", 0444, &fops_txdesc},
@@ -1813,6 +2468,13 @@ static const struct {
{"fw_capabilities", 0444, &fops_fw_capabilities},
{"fw_version", 0444, &fops_fw_version},
{"suspend_stats", 0644, &fops_suspend_stats},
+ {"compressed_rx_status", 0644, &fops_compressed_rx_status},
+ {"srings", 0444, &fops_srings},
+ {"status_msg", 0444, &fops_status_msg},
+ {"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt},
+ {"tx_latency", 0644, &fops_tx_latency},
+ {"link_stats", 0644, &fops_link_stats},
+ {"link_stats_global", 0644, &fops_link_stats_global},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1858,7 +2520,12 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(chip_revision, 0444, doff_u8),
WIL_FIELD(abft_len, 0644, doff_u8),
WIL_FIELD(wakeup_trigger, 0644, doff_u8),
- WIL_FIELD(vring_idle_trsh, 0644, doff_u32),
+ WIL_FIELD(ring_idle_trsh, 0644, doff_u32),
+ WIL_FIELD(num_rx_status_rings, 0644, doff_u8),
+ WIL_FIELD(rx_status_ring_order, 0644, doff_u32),
+ WIL_FIELD(tx_status_ring_order, 0644, doff_u32),
+ WIL_FIELD(rx_buff_id_count, 0644, doff_u32),
+ WIL_FIELD(amsdu_en, 0644, doff_u8),
{},
};
@@ -1872,9 +2539,11 @@ static const struct dbg_off dbg_wil_regs[] = {
/* static parameters */
static const struct dbg_off dbg_statics[] = {
{"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
- {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32},
+ {"ring_index", 0644, (ulong)&dbg_ring_index, doff_u32},
{"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
{"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
+ {"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
+ {"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32},
{},
};
@@ -1922,10 +2591,14 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
void wil6210_debugfs_remove(struct wil6210_priv *wil)
{
+ int i;
+
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
kfree(wil->dbg_data.data_arr);
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++)
+ kfree(wil->sta[i].tx_latency_bins);
/* free pmc memory without sending command to fw, as it will
* be reset on the way down anyway
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index e7ff41e623d2..a04c87ffd37b 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -101,7 +101,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
if (ret < 0)
return ret;
- wil_configure_interrupt_moderation(wil);
+ wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 540fc20984d8..3e2bbbceca06 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +23,8 @@
MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT);
MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS);
MODULE_FIRMWARE(WIL_BOARD_FILE_NAME);
+MODULE_FIRMWARE(WIL_FW_NAME_TALYN);
+MODULE_FIRMWARE(WIL_BRD_NAME_TALYN);
static
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 718161b829c2..388b3d4717ca 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -145,7 +145,7 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
capabilities);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
memcpy(wil->fw_capabilities, rec->capabilities,
- min(sizeof(wil->fw_capabilities), capa_size));
+ min_t(size_t, sizeof(wil->fw_capabilities), capa_size));
wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
rec->capabilities, capa_size, false);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 84e9840c1752..5d287a8e1b45 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -44,6 +44,8 @@
(~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_TX_EDMA BIT_TX_STATUS_IRQ
+#define WIL6210_IMC_RX_EDMA BIT_RX_STATUS_IRQ
#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
ISR_MISC_MBOX_EVT | \
ISR_MISC_FW_ERROR)
@@ -87,12 +89,24 @@ static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
WIL6210_IRQ_DISABLE);
}
+static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
{
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
+static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
@@ -125,6 +139,12 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
WIL6210_IMC_TX);
}
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_TX_EDMA);
+}
+
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0;
@@ -133,6 +153,12 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
}
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_RX_EDMA);
+}
+
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
@@ -164,7 +190,9 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
+ wil6210_mask_irq_tx_edma(wil);
wil6210_mask_irq_rx(wil);
+ wil6210_mask_irq_rx_edma(wil);
wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@@ -179,13 +207,43 @@ void wil_unmask_irq(struct wil6210_priv *wil)
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_MISC_VALUE);
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
wil6210_unmask_irq_pseudo(wil);
- wil6210_unmask_irq_tx(wil);
- wil6210_unmask_irq_rx(wil);
+ if (wil->use_enhanced_dma_hw) {
+ wil6210_unmask_irq_tx_edma(wil);
+ wil6210_unmask_irq_rx_edma(wil);
+ } else {
+ wil6210_unmask_irq_tx(wil);
+ wil6210_unmask_irq_rx(wil);
+ }
wil6210_unmask_irq_misc(wil, true);
}
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
+{
+ u32 moderation;
+
+ wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
+
+ wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
+
+ /* Update RX and TX moderation */
+ moderation = wil->rx_max_burst_duration |
+ (WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
+ wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
+ wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
+
+ /* Treat special events as regular
+ * (set bit 0 to 0x1 and clear bits 1-8)
+ */
+ wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
+ wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
+}
+
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
@@ -294,6 +352,97 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
return IRQ_HANDLED;
}
+static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
+
+ trace_wil6210_irq_rx(isr);
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: RX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_rx_edma(wil);
+
+ if (likely(isr & BIT_RX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "RX status ring\n");
+ isr &= ~BIT_RX_STATUS_IRQ;
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ if (likely(test_bit(wil_status_napi_en, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_rx);
+ } else {
+ wil_err(wil,
+ "Got Rx interrupt while stopping interface\n");
+ }
+ } else {
+ wil_err(wil, "Got Rx interrupt while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ /* Rx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_rx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_rx_edma(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
+
+ trace_wil6210_irq_tx(isr);
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: TX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_tx_edma(wil);
+
+ if (likely(isr & BIT_TX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "TX status ring\n");
+ isr &= ~BIT_TX_STATUS_IRQ;
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_tx);
+ } else {
+ wil_err(wil, "Got Tx status ring IRQ while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+ /* Tx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_tx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_tx_edma(wil);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -476,6 +625,15 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
wil6210_unmask_irq_misc(wil, false);
+ /* in non-triple MSI case, this is done inside wil6210_thread_irq
+ * because it has to be done after unmasking the pseudo.
+ */
+ if (wil->n_msi == 3 && wil->suspend_resp_rcvd) {
+ wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
+ wil->suspend_resp_comp = true;
+ wake_up_interruptible(&wil->wq);
+ }
+
return IRQ_HANDLED;
}
@@ -510,30 +668,53 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
*/
static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
{
+ u32 icm_rx, icr_rx, imv_rx;
+ u32 icm_tx, icr_tx, imv_tx;
+ u32 icm_misc, icr_misc, imv_misc;
+
if (!test_bit(wil_status_irqen, wil->status)) {
- u32 icm_rx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICM));
- u32 icr_rx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
- u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+ if (wil->use_enhanced_dma_hw) {
+ icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR +
offsetof(struct RGF_ICR, IMV));
- u32 icm_tx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICM));
- u32 icr_tx = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
- u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+ icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ } else {
+ icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
offsetof(struct RGF_ICR, IMV));
- u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+ icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ }
+ icm_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICM));
- u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+ icr_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
- u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+ imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
offsetof(struct RGF_ICR, IMV));
/* HALP interrupt can be unmasked when misc interrupts are
@@ -592,11 +773,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
* voting for wake thread - need at least 1 vote
*/
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
- (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+ (wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
- (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+ (wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
@@ -610,6 +791,40 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
return rc;
}
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+
+ /* IRQ's are in the following order:
+ * - Tx
+ * - Rx
+ * - Misc
+ */
+ rc = request_irq(irq, wil->txrx_ops.irq_tx, IRQF_SHARED,
+ WIL_NAME "_tx", wil);
+ if (rc)
+ return rc;
+
+ rc = request_irq(irq + 1, wil->txrx_ops.irq_rx, IRQF_SHARED,
+ WIL_NAME "_rx", wil);
+ if (rc)
+ goto free0;
+
+ rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+ wil6210_irq_misc_thread,
+ IRQF_SHARED, WIL_NAME "_misc", wil);
+ if (rc)
+ goto free1;
+
+ return 0;
+free1:
+ free_irq(irq + 1, wil);
+free0:
+ free_irq(irq, wil);
+
+ return rc;
+}
+
/* can't use wil_ioread32_and_clear because ICC value is not set yet */
static inline void wil_clear32(void __iomem *addr)
{
@@ -624,6 +839,10 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
wmb(); /* make sure write completed */
@@ -646,16 +865,28 @@ void wil6210_clear_halp(struct wil6210_priv *wil)
wil6210_unmask_halp(wil);
}
-int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
{
int rc;
- wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
+ wil_dbg_misc(wil, "init_irq: %s, n_msi=%d\n",
+ wil->n_msi ? "MSI" : "INTx", wil->n_msi);
- rc = request_threaded_irq(irq, wil6210_hardirq,
- wil6210_thread_irq,
- use_msi ? 0 : IRQF_SHARED,
- WIL_NAME, wil);
+ if (wil->use_enhanced_dma_hw) {
+ wil->txrx_ops.irq_tx = wil6210_irq_tx_edma;
+ wil->txrx_ops.irq_rx = wil6210_irq_rx_edma;
+ } else {
+ wil->txrx_ops.irq_tx = wil6210_irq_tx;
+ wil->txrx_ops.irq_rx = wil6210_irq_rx;
+ }
+
+ if (wil->n_msi == 3)
+ rc = wil6210_request_3msi(wil, irq);
+ else
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ wil->n_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
return rc;
}
@@ -665,4 +896,8 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
wil_mask_irq(wil);
free_irq(irq, wil);
+ if (wil->n_msi == 3) {
+ free_irq(irq + 1, wil);
+ free_irq(irq + 2, wil);
+ }
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index e7006c2428a0..7debed6bec06 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -21,11 +21,14 @@
#include "wil6210.h"
#include "txrx.h"
+#include "txrx_edma.h"
#include "wmi.h"
#include "boot_loader.h"
#define WAIT_FOR_HALP_VOTE_MS 100
#define WAIT_FOR_SCAN_ABORT_MS 1000
+#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1
+#define WIL_BOARD_FILE_MAX_NAMELEN 128
bool debug_fw; /* = false; */
module_param(debug_fw, bool, 0444);
@@ -110,9 +113,29 @@ MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
-#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
+enum {
+ WIL_BOOT_ERR,
+ WIL_BOOT_VANILLA,
+ WIL_BOOT_PRODUCTION,
+ WIL_BOOT_DEVELOPMENT,
+};
+
+enum {
+ WIL_SIG_STATUS_VANILLA = 0x0,
+ WIL_SIG_STATUS_DEVELOPMENT = 0x1,
+ WIL_SIG_STATUS_PRODUCTION = 0x2,
+ WIL_SIG_STATUS_CORRUPTED_PRODUCTION = 0x3,
+};
+
+#define RST_DELAY (20) /* msec, for loop in @wil_wait_device_ready */
#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
+#define PMU_READY_DELAY_MS (4) /* ms, for sleep in @wil_wait_device_ready */
+
+#define OTP_HW_DELAY (200) /* usec, loop in @wil_wait_device_ready_talyn_mb */
+/* round up to be above 2 ms total */
+#define OTP_HW_COUNT (1 + 2000 / OTP_HW_DELAY)
+
/*
* Due to a hardware issue,
* one has to read/write to/from NIC in 32-bit chunks;
@@ -160,6 +183,37 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
}
}
+static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
+{
+ struct wil_ring *ring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
+
+ lockdep_assert_held(&wil->mutex);
+
+ if (!ring->va)
+ return;
+
+ wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
+
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->mid = U8_MAX;
+ txdata->enabled = 0; /* no Tx can be in progress or start anew */
+ spin_unlock_bh(&txdata->lock);
+ /* napi_synchronize waits for completion of the current NAPI but will
+ * not prevent the next NAPI run.
+ * Add a memory barrier to guarantee that txdata->enabled is zeroed
+ * before napi_synchronize so that the next scheduled NAPI will not
+ * handle this vring
+ */
+ wmb();
+ /* make sure NAPI won't touch this vring */
+ if (test_bit(wil_status_napi_en, wil->status))
+ napi_synchronize(&wil->napi_tx);
+
+ wil->txrx_ops.ring_fini_tx(wil, ring);
+}
+
static void wil_disconnect_cid(struct wil6210_vif *vif, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -219,12 +273,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
/* release vrings */
- for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
- if (wil->vring2cid_tid[i][0] == cid)
- wil_vring_fini_tx(wil, i);
+ for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+ if (wil->ring2cid_tid[i][0] == cid)
+ wil_ring_fini_tx(wil, i);
}
/* statistics */
memset(&sta->stats, 0, sizeof(sta->stats));
+ sta->stats.tx_latency_min_us = U32_MAX;
}
static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
@@ -453,18 +508,19 @@ static void wil_fw_error_worker(struct work_struct *work)
mutex_unlock(&wil->mutex);
}
-static int wil_find_free_vring(struct wil6210_priv *wil)
+static int wil_find_free_ring(struct wil6210_priv *wil)
{
int i;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- if (!wil->vring_tx[i].va)
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ if (!wil->ring_tx[i].va)
return i;
}
return -EINVAL;
}
-int wil_tx_init(struct wil6210_vif *vif, int cid)
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc = -EINVAL, ringid;
@@ -473,16 +529,17 @@ int wil_tx_init(struct wil6210_vif *vif, int cid)
wil_err(wil, "No connection pending\n");
goto out;
}
- ringid = wil_find_free_vring(wil);
+ ringid = wil_find_free_ring(wil);
if (ringid < 0) {
wil_err(wil, "No free vring found\n");
goto out;
}
- wil_dbg_wmi(wil, "Configure for connection CID %d MID %d vring %d\n",
+ wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n",
cid, vif->mid, ringid);
- rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0);
+ rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order,
+ cid, 0);
if (rc)
wil_err(wil, "init TX for CID %d MID %d vring %d failed\n",
cid, vif->mid, ringid);
@@ -494,19 +551,19 @@ out:
int wil_bcast_init(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- int ri = vif->bcast_vring, rc;
+ int ri = vif->bcast_ring, rc;
- if ((ri >= 0) && wil->vring_tx[ri].va)
+ if (ri >= 0 && wil->ring_tx[ri].va)
return 0;
- ri = wil_find_free_vring(wil);
+ ri = wil_find_free_ring(wil);
if (ri < 0)
return ri;
- vif->bcast_vring = ri;
- rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order);
+ vif->bcast_ring = ri;
+ rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order);
if (rc)
- vif->bcast_vring = -1;
+ vif->bcast_ring = -1;
return rc;
}
@@ -514,13 +571,13 @@ int wil_bcast_init(struct wil6210_vif *vif)
void wil_bcast_fini(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- int ri = vif->bcast_vring;
+ int ri = vif->bcast_ring;
if (ri < 0)
return;
- vif->bcast_vring = -1;
- wil_vring_fini_tx(wil, ri);
+ vif->bcast_ring = -1;
+ wil_ring_fini_tx(wil, ri);
}
void wil_bcast_fini_all(struct wil6210_priv *wil)
@@ -548,7 +605,7 @@ int wil_priv_init(struct wil6210_priv *wil)
}
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++)
- spin_lock_init(&wil->vring_tx_data[i].lock);
+ spin_lock_init(&wil->ring_tx_data[i].lock);
mutex_init(&wil->mutex);
mutex_init(&wil->vif_mutex);
@@ -589,11 +646,30 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
WMI_WAKEUP_TRIGGER_BCAST;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
- wil->vring_idle_trsh = 16;
+ wil->ring_idle_trsh = 16;
wil->reply_mid = U8_MAX;
wil->max_vifs = 1;
+ /* edma configuration can be updated via debugfs before allocation */
+ wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
+ wil->use_compressed_rx_status = true;
+ wil->use_rx_hw_reordering = true;
+ wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+ /* Rx status ring size should be bigger than the number of RX buffers
+ * in order to prevent backpressure on the status ring, which may
+ * cause HW freeze.
+ */
+ wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+ /* Number of RX buffer IDs should be bigger than the RX descriptor
+ * ring size as in HW reorder flow, the HW can consume additional
+ * buffers before releasing the previous ones.
+ */
+ wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
+
+ wil->amsdu_en = 1;
+
return 0;
out_wmi_wq:
@@ -736,14 +812,24 @@ static void wil_bl_prepare_halt(struct wil6210_priv *wil)
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
- wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
- wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+ if (wil->hw_version >= HW_VER_TALYN_MB) {
+ wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB,
+ BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0_TALYN_MB,
+ BIT_USER_MAC_CPU_MAN_RST);
+ } else {
+ wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+ }
}
static inline void wil_release_cpu(struct wil6210_priv *wil)
{
/* Start CPU */
- wil_w(wil, RGF_USER_USER_CPU_0, 1);
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, 1);
+ else
+ wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
@@ -767,11 +853,146 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
}
}
-static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+static int wil_wait_device_ready(struct wil6210_priv *wil, int no_flash)
{
int delay = 0;
u32 x, x1 = 0;
+ /* wait until device ready. */
+ if (no_flash) {
+ msleep(PMU_READY_DELAY_MS);
+
+ wil_dbg_misc(wil, "Reset completed\n");
+ } else {
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready));
+ if (x1 != x) {
+ wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
+ x1, x);
+ x1 = x;
+ }
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while (x != BL_READY);
+
+ wil_dbg_misc(wil, "Reset completed in %d ms\n",
+ delay * RST_DELAY);
+ }
+
+ return 0;
+}
+
+static int wil_wait_device_ready_talyn_mb(struct wil6210_priv *wil)
+{
+ u32 otp_hw;
+ u8 signature_status;
+ bool otp_signature_err;
+ bool hw_section_done;
+ u32 otp_qc_secured;
+ int delay = 0;
+
+ /* Wait for OTP signature test to complete */
+ usleep_range(2000, 2200);
+
+ wil->boot_config = WIL_BOOT_ERR;
+
+ /* Poll until OTP signature status is valid.
+ * In vanilla and development modes, when signature test is complete
+ * HW sets BIT_OTP_SIGNATURE_ERR_TALYN_MB.
+ * In production mode BIT_OTP_SIGNATURE_ERR_TALYN_MB remains 0, poll
+ * for signature status change to 2 or 3.
+ */
+ do {
+ otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+ signature_status = WIL_GET_BITS(otp_hw, 8, 9);
+ otp_signature_err = otp_hw & BIT_OTP_SIGNATURE_ERR_TALYN_MB;
+
+ if (otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_VANILLA) {
+ wil->boot_config = WIL_BOOT_VANILLA;
+ break;
+ }
+ if (otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_DEVELOPMENT) {
+ wil->boot_config = WIL_BOOT_DEVELOPMENT;
+ break;
+ }
+ if (!otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_PRODUCTION) {
+ wil->boot_config = WIL_BOOT_PRODUCTION;
+ break;
+ }
+ if (!otp_signature_err &&
+ signature_status ==
+ WIL_SIG_STATUS_CORRUPTED_PRODUCTION) {
+ /* Unrecognized OTP signature found. Possibly a
+ * corrupted production signature, access control
+ * is applied as in production mode, therefore
+ * do not fail
+ */
+ wil->boot_config = WIL_BOOT_PRODUCTION;
+ break;
+ }
+ if (delay++ > OTP_HW_COUNT)
+ break;
+
+ usleep_range(OTP_HW_DELAY, OTP_HW_DELAY + 10);
+ } while (!otp_signature_err && signature_status == 0);
+
+ if (wil->boot_config == WIL_BOOT_ERR) {
+ wil_err(wil,
+ "invalid boot config, signature_status %d otp_signature_err %d\n",
+ signature_status, otp_signature_err);
+ return -ETIME;
+ }
+
+ wil_dbg_misc(wil,
+ "signature test done in %d usec, otp_hw 0x%x, boot_config %d\n",
+ delay * OTP_HW_DELAY, otp_hw, wil->boot_config);
+
+ if (wil->boot_config == WIL_BOOT_VANILLA)
+ /* Assuming not SPI boot (currently not supported) */
+ goto out;
+
+ hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+ delay = 0;
+
+ while (!hw_section_done) {
+ msleep(RST_DELAY);
+
+ otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+ hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "TO waiting for hw_section_done\n");
+ return -ETIME;
+ }
+ }
+
+ wil_dbg_misc(wil, "HW section done in %d ms\n", delay * RST_DELAY);
+
+ otp_qc_secured = wil_r(wil, RGF_OTP_QC_SECURED);
+ wil->secured_boot = otp_qc_secured & BIT_BOOT_FROM_ROM ? 1 : 0;
+ wil_dbg_misc(wil, "secured boot is %sabled\n",
+ wil->secured_boot ? "en" : "dis");
+
+out:
+ wil_dbg_misc(wil, "Reset completed\n");
+
+ return 0;
+}
+
+static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+{
+ u32 x;
+ int rc;
+
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
/* Clear MAC link up */
@@ -811,10 +1032,17 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
- wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+ if (wil->hw_version >= HW_VER_TALYN_MB) {
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x7e000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0xc00000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+ } else {
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xfe000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+ }
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
@@ -830,34 +1058,12 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
- /* wait until device ready. typical time is 20..80 msec */
- if (no_flash)
- do {
- msleep(RST_DELAY);
- x = wil_r(wil, USER_EXT_USER_PMU_3);
- if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
- x);
- return -ETIME;
- }
- } while ((x & BIT_PMU_DEVICE_RDY) == 0);
+ if (wil->hw_version == HW_VER_TALYN_MB)
+ rc = wil_wait_device_ready_talyn_mb(wil);
else
- do {
- msleep(RST_DELAY);
- x = wil_r(wil, RGF_USER_BL +
- offsetof(struct bl_dedicated_registers_v0,
- boot_loader_ready));
- if (x1 != x) {
- wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
- x1, x);
- x1 = x;
- }
- if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
- x);
- return -ETIME;
- }
- } while (x != BL_READY);
+ rc = wil_wait_device_ready(wil, no_flash);
+ if (rc)
+ return rc;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -865,7 +1071,7 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
- if (no_flash) {
+ if (wil->hw_version < HW_VER_TALYN_MB && no_flash) {
/* Reset OTP HW vectors to fit 40MHz */
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
@@ -880,7 +1086,6 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
}
- wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
}
@@ -925,6 +1130,9 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
}
+ if (test_bit(WMI_FW_CAPABILITY_TX_REQ_EXT, wil->fw_capabilities))
+ wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX;
+
if (wil->platform_ops.set_features) {
features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
wil->fw_capabilities) &&
@@ -932,8 +1140,20 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
wil->platform_capa)) ?
BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+ if (wil->n_msi == 3)
+ features |= BIT(WIL_PLATFORM_FEATURE_TRIPLE_MSI);
+
wil->platform_ops.set_features(wil->platform_handle, features);
}
+
+ if (test_bit(WMI_FW_CAPABILITY_BACK_WIN_SIZE_64,
+ wil->fw_capabilities)) {
+ wil->max_agg_wsize = WIL_MAX_AGG_WSIZE_64;
+ wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE_128;
+ } else {
+ wil->max_agg_wsize = WIL_MAX_AGG_WSIZE;
+ wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE;
+ }
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -945,6 +1165,28 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
le32_to_cpus(&r->head);
}
+/* construct actual board file name to use */
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
+{
+ const char *board_file;
+ const char *wil_talyn_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+
+ if (wil->board_file) {
+ board_file = wil->board_file;
+ } else {
+ /* If specific FW file is used for Talyn,
+ * use specific board file
+ */
+ if (strcmp(wil->wil_fw_name, wil_talyn_fw_name) == 0)
+ board_file = WIL_BRD_NAME_TALYN;
+ else
+ board_file = WIL_BOARD_FILE_NAME;
+ }
+
+ strlcpy(buf, board_file, len);
+}
+
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
@@ -1042,8 +1284,14 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
struct net_device *ndev = wil->main_ndev;
struct wiphy *wiphy = wil_to_wiphy(wil);
u8 mac[8];
+ int mac_addr;
- wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC),
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ mac_addr = RGF_OTP_MAC_TALYN_MB;
+ else
+ mac_addr = RGF_OTP_MAC;
+
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
sizeof(mac));
if (!is_valid_ether_addr(mac)) {
wil_err(wil, "Invalid MAC %pM\n", mac);
@@ -1060,7 +1308,7 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
- ulong to = msecs_to_jiffies(1000);
+ ulong to = msecs_to_jiffies(2000);
ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
if (0 == left) {
@@ -1147,8 +1395,13 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
/* it is W1C, clear by writing back same value */
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
- /* clear PAL_UNIT_ICR (potential D0->D3 leftover) */
- wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0);
+ /* clear PAL_UNIT_ICR (potential D0->D3 leftover)
+ * In Talyn-MB host cannot access this register due to
+ * access control, hence PAL_UNIT_ICR is cleared by the FW
+ */
+ if (wil->hw_version < HW_VER_TALYN_MB)
+ wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR),
+ 0);
if (wil->fw_calib_result > 0) {
__le32 val = cpu_to_le32(wil->fw_calib_result |
@@ -1284,7 +1537,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil_target_reset(wil, no_flash);
wil6210_clear_irq(wil);
wil_enable_irq(wil);
- wil_rx_fini(wil);
+ wil->txrx_ops.rx_fini(wil);
+ wil->txrx_ops.tx_fini(wil);
if (rc) {
if (!no_flash)
wil_bl_crash_info(wil, true);
@@ -1304,8 +1558,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
+ char board_file[WIL_BOARD_FILE_MAX_NAMELEN];
+
+ if (wil->secured_boot) {
+ wil_err(wil, "secured boot is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ board_file[0] = '\0';
+ wil_get_board_file(wil, board_file, sizeof(board_file));
wil_info(wil, "Use firmware <%s> + board <%s>\n",
- wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+ wil->wil_fw_name, board_file);
if (!no_flash)
wil_bl_prepare_halt(wil);
@@ -1317,11 +1580,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (rc)
goto out;
if (wil->brd_file_addr)
- rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+ rc = wil_request_board(wil, board_file);
else
- rc = wil_request_firmware(wil,
- WIL_BOARD_FILE_NAME,
- true);
+ rc = wil_request_firmware(wil, board_file, true);
if (rc)
goto out;
@@ -1337,7 +1598,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
clear_bit(wil_status_resetting, wil->status);
if (load_fw) {
- wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
/* we just started MAC, wait for FW ready */
@@ -1352,6 +1612,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
+ wil->txrx_ops.configure_interrupt_moderation(wil);
+
+ /* Enable OFU rdy valid bug fix, to prevent hang in oful34_rx
+ * while there is back-pressure from Host during RX
+ */
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ wil_s(wil, RGF_DMA_MISC_CTL,
+ BIT_OFUL34_RDY_VALID_BUG_FIX_EN);
+
rc = wil_restore_vifs(wil);
if (rc) {
wil_err(wil, "failed to restore vifs, rc %d\n", rc);
@@ -1406,8 +1675,12 @@ int __wil_up(struct wil6210_priv *wil)
if (rc)
return rc;
- /* Rx VRING. After MAC and beacon */
- rc = wil_rx_init(wil, 1 << rx_ring_order);
+ /* Rx RING. After MAC and beacon */
+ rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
+ if (rc)
+ return rc;
+
+ rc = wil->txrx_ops.tx_init(wil);
if (rc)
return rc;
@@ -1568,3 +1841,11 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_unlock(&wil->halp.lock);
}
+
+void wil_init_txrx_ops(struct wil6210_priv *wil)
+{
+ if (wil->use_enhanced_dma_hw)
+ wil_init_txrx_ops_edma(wil);
+ else
+ wil_init_txrx_ops_legacy_dma(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index eb6c14ed65a4..7a78a06bd356 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -120,6 +120,27 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
return done;
}
+static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_rx);
+ int quota = budget;
+ int done;
+
+ wil_rx_handle_edma(wil, &quota);
+ done = budget - quota;
+
+ if (done < budget) {
+ napi_complete_done(napi, done);
+ wil6210_unmask_irq_rx_edma(wil);
+ wil_dbg_txrx(wil, "NAPI RX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
+
+ return done;
+}
+
static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
@@ -129,11 +150,11 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
/* always process ALL Tx complete, regardless budget - it is fast */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- struct vring *vring = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ struct wil_ring *ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
struct wil6210_vif *vif;
- if (!vring->va || !txdata->enabled ||
+ if (!ring->va || !txdata->enabled ||
txdata->mid >= wil->max_vifs)
continue;
@@ -157,6 +178,30 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
return min(tx_done, budget);
}
+static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_tx);
+ int tx_done;
+ /* There is only one status TX ring */
+ struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+ if (!sring->va)
+ return 0;
+
+ tx_done = wil_tx_sring_handler(wil, sring);
+
+ if (tx_done < budget) {
+ napi_complete(napi);
+ wil6210_unmask_irq_tx_edma(wil);
+ wil_dbg_txrx(wil, "NAPI TX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
+
+ return min(tx_done, budget);
+}
+
static void wil_dev_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -228,7 +273,7 @@ static void wil_p2p_discovery_timer_fn(struct timer_list *t)
static void wil_vif_init(struct wil6210_vif *vif)
{
- vif->bcast_vring = -1;
+ vif->bcast_ring = -1;
mutex_init(&vif->probe_client_mutex);
@@ -418,11 +463,21 @@ int wil_if_add(struct wil6210_priv *wil)
}
init_dummy_netdev(&wil->napi_ndev);
- netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
- WIL6210_NAPI_BUDGET);
- netif_tx_napi_add(&wil->napi_ndev,
- &wil->napi_tx, wil6210_netdev_poll_tx,
- WIL6210_NAPI_BUDGET);
+ if (wil->use_enhanced_dma_hw) {
+ netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ wil6210_netdev_poll_rx_edma,
+ WIL6210_NAPI_BUDGET);
+ netif_tx_napi_add(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx_edma,
+ WIL6210_NAPI_BUDGET);
+ } else {
+ netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ wil6210_netdev_poll_rx,
+ WIL6210_NAPI_BUDGET);
+ netif_tx_napi_add(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx,
+ WIL6210_NAPI_BUDGET);
+ }
wil_update_net_queues_bh(wil, vif, NULL, true);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 19cbc6add637..89119e7facd0 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -24,11 +24,11 @@
#include <linux/rtnetlink.h>
#include <linux/pm_runtime.h>
-static bool use_msi = true;
-module_param(use_msi, bool, 0444);
-MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
+static int n_msi = 3;
+module_param(n_msi, int, 0444);
+MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - single, or 3 - (default) ");
-static bool ftm_mode;
+bool ftm_mode;
module_param(ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
@@ -85,7 +85,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
break;
case JTAG_DEV_ID_TALYN:
- wil->hw_name = "Talyn";
+ wil->hw_name = "Talyn-MA";
wil->hw_version = HW_VER_TALYN;
memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
@@ -93,6 +93,25 @@ int wil_set_capabilities(struct wil6210_priv *wil)
if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) &
BIT_NO_FLASH_INDICATION)
set_bit(hw_capa_no_flash, wil->hw_capa);
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
+ break;
+ case JTAG_DEV_ID_TALYN_MB:
+ wil->hw_name = "Talyn-MB";
+ wil->hw_version = HW_VER_TALYN_MB;
+ memcpy(fw_mapping, talyn_mb_fw_mapping,
+ sizeof(talyn_mb_fw_mapping));
+ wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+ set_bit(hw_capa_no_flash, wil->hw_capa);
+ wil->use_enhanced_dma_hw = true;
+ wil->use_rx_hw_reordering = true;
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
@@ -102,6 +121,8 @@ int wil_set_capabilities(struct wil6210_priv *wil)
return -EINVAL;
}
+ wil_init_txrx_ops(wil);
+
iccm_section = wil_find_fw_mapping("fw_code");
if (!iccm_section) {
wil_err(wil, "fw_code section not found in fw_mapping\n");
@@ -129,12 +150,24 @@ int wil_set_capabilities(struct wil6210_priv *wil)
void wil_disable_irq(struct wil6210_priv *wil)
{
- disable_irq(wil->pdev->irq);
+ int irq = wil->pdev->irq;
+
+ disable_irq(irq);
+ if (wil->n_msi == 3) {
+ disable_irq(irq + 1);
+ disable_irq(irq + 2);
+ }
}
void wil_enable_irq(struct wil6210_priv *wil)
{
- enable_irq(wil->pdev->irq);
+ int irq = wil->pdev->irq;
+
+ enable_irq(irq);
+ if (wil->n_msi == 3) {
+ enable_irq(irq + 1);
+ enable_irq(irq + 2);
+ }
}
static void wil_remove_all_additional_vifs(struct wil6210_priv *wil)
@@ -161,28 +194,47 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
* and only MSI should be used
*/
int msi_only = pdev->msi_enabled;
- bool _use_msi = use_msi;
wil_dbg_misc(wil, "if_pcie_enable\n");
pci_set_master(pdev);
- wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
+ /* how many MSI interrupts to request? */
+ switch (n_msi) {
+ case 3:
+ case 1:
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", n_msi);
+ break;
+ case 0:
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+ break;
+ default:
+ wil_err(wil, "Invalid n_msi=%d, default to 1\n", n_msi);
+ n_msi = 1;
+ }
+
+ if (n_msi == 3 &&
+ pci_alloc_irq_vectors(pdev, n_msi, n_msi, PCI_IRQ_MSI) < n_msi) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ n_msi = 1;
+ }
- if (use_msi && pci_enable_msi(pdev)) {
+ if (n_msi == 1 && pci_enable_msi(pdev)) {
wil_err(wil, "pci_enable_msi failed, use INTx\n");
- _use_msi = false;
+ n_msi = 0;
}
- if (!_use_msi && msi_only) {
+ wil->n_msi = n_msi;
+
+ if (wil->n_msi == 0 && msi_only) {
wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
rc = -ENODEV;
goto stop_master;
}
- rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
+ rc = wil6210_init_irq(wil, pdev->irq);
if (rc)
- goto stop_master;
+ goto release_vectors;
/* need reset here to obtain MAC */
mutex_lock(&wil->mutex);
@@ -195,8 +247,9 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
release_irq:
wil6210_fini_irq(wil, pdev->irq);
- /* safe to call if no MSI */
- pci_disable_msi(pdev);
+ release_vectors:
+ /* safe to call if no allocation */
+ pci_free_irq_vectors(pdev);
stop_master:
pci_clear_master(pdev);
return rc;
@@ -257,8 +310,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
- int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
- int i;
+ int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
+ int i, start_idx;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
@@ -293,24 +346,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto if_free;
}
/* rollback to err_plat */
-
- /* device supports >32bit addresses */
- for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
- rc = dma_set_mask_and_coherent(dev,
- DMA_BIT_MASK(dma_addr_size[i]));
- if (rc) {
- dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
- dma_addr_size[i], rc);
- continue;
- }
- dev_info(dev, "using dma mask %d", dma_addr_size[i]);
- wil->dma_addr_size = dma_addr_size[i];
- break;
- }
-
- if (wil->dma_addr_size == 0)
- goto err_plat;
-
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
@@ -350,6 +385,28 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
goto err_iounmap;
}
+
+ /* device supports >32bit addresses.
+ * for legacy DMA start from 48 bit.
+ */
+ start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
+
+ for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
+ rc = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(dma_addr_size[i]));
+ if (rc) {
+ dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+ dma_addr_size[i], rc);
+ continue;
+ }
+ dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+ wil->dma_addr_size = dma_addr_size[i];
+ break;
+ }
+
+ if (wil->dma_addr_size == 0)
+ goto err_iounmap;
+
wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index ba81fb3ac96f..3a4194779ddf 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -211,7 +211,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
goto reject_suspend;
}
- if (!wil_is_rx_idle(wil)) {
+ if (!wil->txrx_ops.is_rx_idle(wil)) {
wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
@@ -235,9 +235,9 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
start = jiffies;
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
- while (!wil_is_rx_idle(wil)) {
+ while (!wil->txrx_ops.is_rx_idle(wil)) {
if (time_after(jiffies, data_comp_to)) {
- if (wil_is_rx_idle(wil))
+ if (wil->txrx_ops.is_rx_idle(wil))
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 76f8084c1fd8..b608aa16b4f1 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -95,17 +95,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
struct wil6210_vif *vif;
struct net_device *ndev;
- struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- int tid = wil_rxdesc_tid(d);
- int cid = wil_rxdesc_cid(d);
- int mid = wil_rxdesc_mid(d);
- u16 seq = wil_rxdesc_seq(d);
- int mcast = wil_rxdesc_mcast(d);
- struct wil_sta_info *sta = &wil->sta[cid];
+ int tid, cid, mid, mcast, retry;
+ u16 seq;
+ struct wil_sta_info *sta;
struct wil_tid_ampdu_rx *r;
u16 hseq;
int index;
+ wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
+ &mcast, &retry);
+ sta = &wil->sta[cid];
+
wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
mid, cid, tid, seq, mcast);
@@ -117,11 +117,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
}
ndev = vif_to_ndev(vif);
- if (unlikely(mcast)) {
- wil_netif_rx_any(skb, ndev);
- return;
- }
-
spin_lock(&sta->tid_rx_lock);
r = sta->tid_rx[tid];
@@ -130,6 +125,19 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
goto out;
}
+ if (unlikely(mcast)) {
+ if (retry && seq == r->mcast_last_seq) {
+ r->drop_dup_mcast++;
+ wil_dbg_txrx(wil, "Rx drop: dup mcast seq 0x%03x\n",
+ seq);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+ r->mcast_last_seq = seq;
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
r->total++;
hseq = r->head_seq_num;
@@ -262,6 +270,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
r->buf_size = size;
r->stored_mpdu_num = 0;
r->first_time = true;
+ r->mcast_last_seq = U16_MAX;
return r;
}
@@ -288,7 +297,7 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
/* ADDBA processing */
static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
{
- u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
+ u16 max_agg_size = min_t(u16, wil->max_agg_wsize, wil->max_ampdu_size /
(mtu_max + WIL_MAX_MPDU_OVERHEAD));
if (!req_agg_wsize)
@@ -315,7 +324,10 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
* bits 6..15: buffer size
*/
u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
- bool agg_amsdu = !!(param_set & BIT(0));
+ bool agg_amsdu = wil->use_enhanced_dma_hw &&
+ wil->use_rx_hw_reordering &&
+ test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+ wil->amsdu_en && (param_set & BIT(0));
int ba_policy = param_set & BIT(1);
u16 status = WLAN_STATUS_SUCCESS;
u16 ssn = seq_ctrl >> 4;
@@ -352,16 +364,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (status == WLAN_STATUS_SUCCESS) {
if (req_agg_wsize == 0) {
wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
- WIL_MAX_AGG_WSIZE);
- agg_wsize = WIL_MAX_AGG_WSIZE;
+ wil->max_agg_wsize);
+ agg_wsize = wil->max_agg_wsize;
} else {
agg_wsize = min_t(u16,
- WIL_MAX_AGG_WSIZE, req_agg_wsize);
+ wil->max_agg_wsize, req_agg_wsize);
}
}
- rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status,
- agg_amsdu, agg_wsize, agg_timeout);
+ rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
+ status, agg_amsdu, agg_wsize,
+ agg_timeout);
if (rc || (status != WLAN_STATUS_SUCCESS)) {
wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
status);
@@ -384,7 +397,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
{
u8 agg_wsize = wil_agg_size(wil, wsize);
u16 agg_timeout = 0;
- struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int rc = 0;
if (txdata->addba_in_progress) {
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index c4db2a9d9f7f..853abc3a73e4 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -187,6 +187,40 @@ TRACE_EVENT(wil6210_rx,
__entry->seq, __entry->type, __entry->subtype)
);
+TRACE_EVENT(wil6210_rx_status,
+ TP_PROTO(struct wil6210_priv *wil, u8 use_compressed, u16 buff_id,
+ void *msg),
+ TP_ARGS(wil, use_compressed, buff_id, msg),
+ TP_STRUCT__entry(__field(u8, use_compressed)
+ __field(u16, buff_id)
+ __field(unsigned int, len)
+ __field(u8, mid)
+ __field(u8, cid)
+ __field(u8, tid)
+ __field(u8, type)
+ __field(u8, subtype)
+ __field(u16, seq)
+ __field(u8, mcs)
+ ),
+ TP_fast_assign(__entry->use_compressed = use_compressed;
+ __entry->buff_id = buff_id;
+ __entry->len = wil_rx_status_get_length(msg);
+ __entry->mid = wil_rx_status_get_mid(msg);
+ __entry->cid = wil_rx_status_get_cid(msg);
+ __entry->tid = wil_rx_status_get_tid(msg);
+ __entry->type = wil_rx_status_get_frame_type(wil,
+ msg);
+ __entry->subtype = wil_rx_status_get_fc1(wil, msg);
+ __entry->seq = wil_rx_status_get_seq(wil, msg);
+ __entry->mcs = wil_rx_status_get_mcs(msg);
+ ),
+ TP_printk(
+ "compressed %d buff_id %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x type 0x%1x subtype 0x%1x",
+ __entry->use_compressed, __entry->buff_id, __entry->len,
+ __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
+ __entry->seq, __entry->type, __entry->subtype)
+);
+
TRACE_EVENT(wil6210_tx,
TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
TP_ARGS(vring, index, len, frags),
@@ -226,6 +260,31 @@ TRACE_EVENT(wil6210_tx_done,
__entry->err)
);
+TRACE_EVENT(wil6210_tx_status,
+ TP_PROTO(struct wil_ring_tx_status *msg, u16 index,
+ unsigned int len),
+ TP_ARGS(msg, index, len),
+ TP_STRUCT__entry(__field(u16, index)
+ __field(unsigned int, len)
+ __field(u8, num_descs)
+ __field(u8, ring_id)
+ __field(u8, status)
+ __field(u8, mcs)
+
+ ),
+ TP_fast_assign(__entry->index = index;
+ __entry->len = len;
+ __entry->num_descs = msg->num_descriptors;
+ __entry->ring_id = msg->ring_id;
+ __entry->status = msg->status;
+ __entry->mcs = wil_tx_status_get_mcs(msg);
+ ),
+ TP_printk(
+ "ring_id %d swtail 0x%x len %d num_descs %d status 0x%x mcs %d",
+ __entry->ring_id, __entry->index, __entry->len,
+ __entry->num_descs, __entry->status, __entry->mcs)
+);
+
#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index b9a9fa828961..6a7943e487fb 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -28,6 +28,7 @@
#include "wmi.h"
#include "txrx.h"
#include "trace.h"
+#include "txrx_edma.h"
static bool rtap_include_phy_info;
module_param(rtap_include_phy_info, bool, 0444);
@@ -47,62 +48,28 @@ static inline uint wil_rx_snaplen(void)
return rx_align_2 ? 6 : 0;
}
-static inline int wil_vring_is_empty(struct vring *vring)
+/* wil_ring_wmark_low - low watermark for available descriptor space */
+static inline int wil_ring_wmark_low(struct wil_ring *ring)
{
- return vring->swhead == vring->swtail;
+ return ring->size / 8;
}
-static inline u32 wil_vring_next_tail(struct vring *vring)
+/* wil_ring_wmark_high - high watermark for available descriptor space */
+static inline int wil_ring_wmark_high(struct wil_ring *ring)
{
- return (vring->swtail + 1) % vring->size;
-}
-
-static inline void wil_vring_advance_head(struct vring *vring, int n)
-{
- vring->swhead = (vring->swhead + n) % vring->size;
-}
-
-static inline int wil_vring_is_full(struct vring *vring)
-{
- return wil_vring_next_tail(vring) == vring->swhead;
-}
-
-/* Used space in Tx Vring */
-static inline int wil_vring_used_tx(struct vring *vring)
-{
- u32 swhead = vring->swhead;
- u32 swtail = vring->swtail;
- return (vring->size + swhead - swtail) % vring->size;
-}
-
-/* Available space in Tx Vring */
-static inline int wil_vring_avail_tx(struct vring *vring)
-{
- return vring->size - wil_vring_used_tx(vring) - 1;
-}
-
-/* wil_vring_wmark_low - low watermark for available descriptor space */
-static inline int wil_vring_wmark_low(struct vring *vring)
-{
- return vring->size/8;
-}
-
-/* wil_vring_wmark_high - high watermark for available descriptor space */
-static inline int wil_vring_wmark_high(struct vring *vring)
-{
- return vring->size/4;
+ return ring->size / 4;
}
/* returns true if num avail descriptors is lower than wmark_low */
-static inline int wil_vring_avail_low(struct vring *vring)
+static inline int wil_ring_avail_low(struct wil_ring *ring)
{
- return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
+ return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
}
/* returns true if num avail descriptors is higher than wmark_high */
-static inline int wil_vring_avail_high(struct vring *vring)
+static inline int wil_ring_avail_high(struct wil_ring *ring)
{
- return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
+ return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
}
/* returns true when all tx vrings are empty */
@@ -112,9 +79,10 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
unsigned long data_comp_to;
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- struct vring *vring = &wil->vring_tx[i];
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ struct wil_ring *vring = &wil->ring_tx[i];
+ int vring_index = vring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata =
+ &wil->ring_tx_data[vring_index];
spin_lock(&txdata->lock);
@@ -126,7 +94,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
data_comp_to = jiffies + msecs_to_jiffies(
WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
- while (!wil_vring_is_empty(vring)) {
+ while (!wil_ring_is_empty(vring)) {
if (time_after(jiffies, data_comp_to)) {
wil_dbg_pm(wil,
"TO waiting for idle tx\n");
@@ -150,13 +118,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
return true;
}
-/* wil_val_in_range - check if value in [min,max) */
-static inline bool wil_val_in_range(int val, int min, int max)
-{
- return val >= min && val < max;
-}
-
-static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = vring->size * sizeof(vring->va[0]);
@@ -205,7 +167,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
* we can use any
*/
for (i = 0; i < vring->size; i++) {
- volatile struct vring_tx_desc *_d = &vring->va[i].tx;
+ volatile struct vring_tx_desc *_d =
+ &vring->va[i].tx.legacy;
_d->dma.status = TX_DMA_STATUS_DU;
}
@@ -216,9 +179,10 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return 0;
}
-static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
+static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
struct wil_ctx *ctx)
{
+ struct vring_tx_desc *d = &desc->legacy;
dma_addr_t pa = wil_desc_addr(&d->dma.addr);
u16 dmalen = le16_to_cpu(d->dma.length);
@@ -234,15 +198,14 @@ static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
}
}
-static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
- int tx)
+static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = vring->size * sizeof(vring->va[0]);
lockdep_assert_held(&wil->mutex);
- if (tx) {
- int vring_index = vring - wil->vring_tx;
+ if (!vring->is_rx) {
+ int vring_index = vring - wil->ring_tx;
wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
vring_index, vring->size, vring->va,
@@ -253,33 +216,33 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
&vring->pa, vring->ctx);
}
- while (!wil_vring_is_empty(vring)) {
+ while (!wil_ring_is_empty(vring)) {
dma_addr_t pa;
u16 dmalen;
struct wil_ctx *ctx;
- if (tx) {
+ if (!vring->is_rx) {
struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d =
- &vring->va[vring->swtail].tx;
+ &vring->va[vring->swtail].tx.legacy;
ctx = &vring->ctx[vring->swtail];
if (!ctx) {
wil_dbg_txrx(wil,
"ctx(%d) was already completed\n",
vring->swtail);
- vring->swtail = wil_vring_next_tail(vring);
+ vring->swtail = wil_ring_next_tail(vring);
continue;
}
*d = *_d;
- wil_txdesc_unmap(dev, d, ctx);
+ wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
- vring->swtail = wil_vring_next_tail(vring);
+ vring->swtail = wil_ring_next_tail(vring);
} else { /* rx */
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d =
- &vring->va[vring->swhead].rx;
+ &vring->va[vring->swhead].rx.legacy;
ctx = &vring->ctx[vring->swhead];
*d = *_d;
@@ -287,7 +250,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
dmalen = le16_to_cpu(d->dma.length);
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
kfree_skb(ctx->skb);
- wil_vring_advance_head(vring, 1);
+ wil_ring_advance_head(vring, 1);
}
}
dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
@@ -302,13 +265,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
*
* Safe to call from IRQ
*/
-static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
struct vring_rx_desc dd, *d = &dd;
- volatile struct vring_rx_desc *_d = &vring->va[i].rx;
+ volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
dma_addr_t pa;
struct sk_buff *skb = dev_alloc_skb(sz + headroom);
@@ -318,6 +281,12 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
skb_reserve(skb, headroom);
skb_put(skb, sz);
+ /**
+ * Make sure that the network stack calculates checksum for packets
+ * which failed the HW checksum calculation
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
kfree_skb(skb);
@@ -445,19 +414,12 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
}
}
-/* similar to ieee80211_ version, but FC contain only 1-st byte */
-static inline int wil_is_back_req(u8 fc)
-{
- return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
- (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
-}
-
-bool wil_is_rx_idle(struct wil6210_priv *wil)
+static bool wil_is_rx_idle(struct wil6210_priv *wil)
{
struct vring_rx_desc *_d;
- struct vring *vring = &wil->vring_rx;
+ struct wil_ring *ring = &wil->ring_rx;
- _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
+ _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
if (_d->dma.status & RX_DMA_STATUS_DU)
return false;
@@ -472,7 +434,7 @@ bool wil_is_rx_idle(struct wil6210_priv *wil)
* Safe to call from IRQ
*/
static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
- struct vring *vring)
+ struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
struct wil6210_vif *vif;
@@ -492,11 +454,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
again:
- if (unlikely(wil_vring_is_empty(vring)))
+ if (unlikely(wil_ring_is_empty(vring)))
return NULL;
i = (int)vring->swhead;
- _d = &vring->va[i].rx;
+ _d = &vring->va[i].rx.legacy;
if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
/* it is not error, we just reached end of Rx done area */
return NULL;
@@ -504,7 +466,7 @@ again:
skb = vring->ctx[i].skb;
vring->ctx[i].skb = NULL;
- wil_vring_advance_head(vring, 1);
+ wil_ring_advance_head(vring, 1);
if (!skb) {
wil_err(wil, "No Rx skb at [%d]\n", i);
goto again;
@@ -613,6 +575,8 @@ again:
* mis-calculates TCP checksum - if it should be 0x0,
* it writes 0xffff in violation of RFC 1624
*/
+ else
+ stats->rx_csum_err++;
}
if (snaplen) {
@@ -641,15 +605,15 @@ again:
static int wil_rx_refill(struct wil6210_priv *wil, int count)
{
struct net_device *ndev = wil->main_ndev;
- struct vring *v = &wil->vring_rx;
+ struct wil_ring *v = &wil->ring_rx;
u32 next_tail;
int rc = 0;
int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
WIL6210_RTAP_SIZE : 0;
- for (; next_tail = wil_vring_next_tail(v),
- (next_tail != v->swhead) && (count-- > 0);
- v->swtail = next_tail) {
+ for (; next_tail = wil_ring_next_tail(v),
+ (next_tail != v->swhead) && (count-- > 0);
+ v->swtail = next_tail) {
rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
if (unlikely(rc)) {
wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
@@ -677,7 +641,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
* Cut'n'paste from original memcmp (see lib/string.c)
* with minimal modifications
*/
-static int reverse_memcmp(const void *cs, const void *ct, size_t count)
+int reverse_memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
@@ -722,6 +686,30 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
return 0;
}
+static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_net_stats *stats)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
+ (d->dma.error & RX_DMA_ERROR_MIC)) {
+ stats->rx_mic_error++;
+ wil_dbg_txrx(wil, "MIC error, dropping packet\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
+ int *security)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ *cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+ *security = wil_rxdesc_security(d);
+}
+
/*
* Pass Rx packet to the netif. Update statistics.
* Called in softirq context (NAPI poll).
@@ -733,15 +721,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct wireless_dev *wdev = vif_to_wdev(vif);
unsigned int len = skb->len;
- struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
- int security = wil_rxdesc_security(d);
+ int cid;
+ int security;
struct ethhdr *eth = (void *)skb->data;
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
* is not suitable, need to look at data
*/
int mcast = is_multicast_ether_addr(eth->h_dest);
- struct wil_net_stats *stats = &wil->sta[cid].stats;
+ struct wil_net_stats *stats;
struct sk_buff *xmit_skb = NULL;
static const char * const gro_res_str[] = {
[GRO_MERGED] = "GRO_MERGED",
@@ -751,6 +738,10 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
[GRO_DROP] = "GRO_DROP",
};
+ wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
+
+ stats = &wil->sta[cid].stats;
+
if (ndev->features & NETIF_F_RXHASH)
/* fake L4 to ensure it won't be re-calculated later
* set hash to any non-zero value to activate rps
@@ -761,13 +752,19 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
skb_orphan(skb);
- if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
+ if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
rc = GRO_DROP;
dev_kfree_skb(skb);
stats->rx_replay++;
goto stats;
}
+ /* check errors reported by HW and update statistics */
+ if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
if (mcast) {
/* send multicast frames both to higher layers in
@@ -835,7 +832,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
- struct vring *v = &wil->vring_rx;
+ struct wil_ring *v = &wil->ring_rx;
struct sk_buff *skb;
if (unlikely(!v->va)) {
@@ -875,9 +872,9 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil)
}
}
-int wil_rx_init(struct wil6210_priv *wil, u16 size)
+static int wil_rx_init(struct wil6210_priv *wil, u16 size)
{
- struct vring *vring = &wil->vring_rx;
+ struct wil_ring *vring = &wil->ring_rx;
int rc;
wil_dbg_misc(wil, "rx_init\n");
@@ -890,6 +887,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
wil_rx_buf_len_init(wil);
vring->size = size;
+ vring->is_rx = true;
rc = wil_vring_alloc(wil, vring);
if (rc)
return rc;
@@ -904,22 +902,46 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
return 0;
err_free:
- wil_vring_free(wil, vring, 0);
+ wil_vring_free(wil, vring);
return rc;
}
-void wil_rx_fini(struct wil6210_priv *wil)
+static void wil_rx_fini(struct wil6210_priv *wil)
{
- struct vring *vring = &wil->vring_rx;
+ struct wil_ring *vring = &wil->ring_rx;
wil_dbg_misc(wil, "rx_fini\n");
if (vring->va)
- wil_vring_free(wil, vring, 0);
+ wil_vring_free(wil, vring);
+}
+
+static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
+ u32 len, int vring_index)
+{
+ struct vring_tx_desc *d = &desc->legacy;
+
+ wil_desc_addr_set(&d->dma.addr, pa);
+ d->dma.ip_length = 0;
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.b11 = 0/*14 | BIT(7)*/;
+ d->dma.error = 0;
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = cpu_to_le16((u16)len);
+ d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+ d->mac.d[0] = 0;
+ d->mac.d[1] = 0;
+ d->mac.d[2] = 0;
+ d->mac.ucode_cmd = 0;
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
}
-static inline void wil_tx_data_init(struct vring_tx_data *txdata)
+void wil_tx_data_init(struct wil_ring_tx_data *txdata)
{
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = 0;
@@ -935,8 +957,8 @@ static inline void wil_tx_data_init(struct vring_tx_data *txdata)
spin_unlock_bh(&txdata->lock);
}
-int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
- int cid, int tid)
+static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
+ int cid, int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
@@ -966,8 +988,8 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
} __packed reply = {
.cmd = {.status = WMI_FW_STATUS_FAILURE},
};
- struct vring *vring = &wil->vring_tx[id];
- struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+ struct wil_ring *vring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -980,13 +1002,14 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
}
wil_tx_data_init(txdata);
+ vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
- wil->vring2cid_tid[id][0] = cid;
- wil->vring2cid_tid[id][1] = tid;
+ wil->ring2cid_tid[id][0] = cid;
+ wil->ring2cid_tid[id][1] = tid;
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
@@ -1019,9 +1042,9 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
- wil_vring_free(wil, vring, 1);
- wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
- wil->vring2cid_tid[id][1] = 0;
+ wil_vring_free(wil, vring);
+ wil->ring2cid_tid[id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[id][1] = 0;
out:
@@ -1050,8 +1073,8 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
} __packed reply = {
.cmd = {.status = WMI_FW_STATUS_FAILURE},
};
- struct vring *vring = &wil->vring_tx[id];
- struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+ struct wil_ring *vring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -1064,13 +1087,14 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
}
wil_tx_data_init(txdata);
+ vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
- wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
- wil->vring2cid_tid[id][1] = 0; /* TID */
+ wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
+ wil->ring2cid_tid[id][1] = 0; /* TID */
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
@@ -1101,62 +1125,32 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
txdata->enabled = 0;
txdata->dot1x_open = false;
spin_unlock_bh(&txdata->lock);
- wil_vring_free(wil, vring, 1);
+ wil_vring_free(wil, vring);
out:
return rc;
}
-void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
-{
- struct vring *vring = &wil->vring_tx[id];
- struct vring_tx_data *txdata = &wil->vring_tx_data[id];
-
- lockdep_assert_held(&wil->mutex);
-
- if (!vring->va)
- return;
-
- wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
-
- spin_lock_bh(&txdata->lock);
- txdata->dot1x_open = false;
- txdata->mid = U8_MAX;
- txdata->enabled = 0; /* no Tx can be in progress or start anew */
- spin_unlock_bh(&txdata->lock);
- /* napi_synchronize waits for completion of the current NAPI but will
- * not prevent the next NAPI run.
- * Add a memory barrier to guarantee that txdata->enabled is zeroed
- * before napi_synchronize so that the next scheduled NAPI will not
- * handle this vring
- */
- wmb();
- /* make sure NAPI won't touch this vring */
- if (test_bit(wil_status_napi_en, wil->status))
- napi_synchronize(&wil->napi_tx);
-
- wil_vring_free(wil, vring, 1);
-}
-
-static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
int i;
struct ethhdr *eth = (void *)skb->data;
int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
if (cid < 0)
return NULL;
/* TODO: fix for multiple TID */
- for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
- if (wil->vring2cid_tid[i][0] == cid) {
- struct vring *v = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ if (wil->ring2cid_tid[i][0] == cid) {
+ struct wil_ring *v = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
eth->h_dest, i);
@@ -1174,42 +1168,43 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
return NULL;
}
-static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb);
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb);
-static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
- struct vring *v;
+ struct wil_ring *ring;
int i;
u8 cid;
- struct vring_tx_data *txdata;
+ struct wil_ring_tx_data *txdata;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
* find 1-st vring eligible for this skb and use it.
*/
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- v = &wil->vring_tx[i];
- txdata = &wil->vring_tx_data[i];
- if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ ring = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
+ if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
continue;
- cid = wil->vring2cid_tid[i][0];
+ cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
- return v;
+ return ring;
}
- wil_dbg_txrx(wil, "Tx while no vrings active?\n");
+ wil_dbg_txrx(wil, "Tx while no rings active?\n");
return NULL;
}
@@ -1225,22 +1220,22 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
* Use old strategy when new is not supported yet:
* - for PBSS
*/
-static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
- struct vring *v;
- struct vring_tx_data *txdata;
- int i = vif->bcast_vring;
+ struct wil_ring *v;
+ struct wil_ring_tx_data *txdata;
+ int i = vif->bcast_ring;
if (i < 0)
return NULL;
- v = &wil->vring_tx[i];
- txdata = &wil->vring_tx_data[i];
+ v = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
if (!v->va || !txdata->enabled)
return NULL;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
return NULL;
return v;
@@ -1250,35 +1245,36 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil,
struct sk_buff *skb, int vring_index)
{
struct ethhdr *eth = (void *)skb->data;
- int cid = wil->vring2cid_tid[vring_index][0];
+ int cid = wil->ring2cid_tid[vring_index][0];
ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
}
-static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
- struct wil6210_vif *vif,
- struct sk_buff *skb)
+static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
{
- struct vring *v, *v2;
+ struct wil_ring *v, *v2;
struct sk_buff *skb2;
int i;
u8 cid;
struct ethhdr *eth = (void *)skb->data;
char *src = eth->h_source;
- struct vring_tx_data *txdata, *txdata2;
+ struct wil_ring_tx_data *txdata, *txdata2;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
/* find 1-st vring eligible for data */
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- v = &wil->vring_tx[i];
- txdata = &wil->vring_tx_data[i];
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ v = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
continue;
- cid = wil->vring2cid_tid[i][0];
+ cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
/* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -1298,15 +1294,15 @@ found:
/* find other active vrings and duplicate skb for each */
for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
- v2 = &wil->vring_tx[i];
- txdata2 = &wil->vring_tx_data[i];
+ v2 = &wil->ring_tx[i];
+ txdata2 = &wil->ring_tx_data[i];
if (!v2->va || txdata2->mid != vif->mid)
continue;
- cid = wil->vring2cid_tid[i][0];
+ cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->vring_tx_data[i].dot1x_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1316,7 +1312,7 @@ found:
if (skb2) {
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
wil_set_da_for_vring(wil, skb2, i);
- wil_tx_vring(wil, vif, v2, skb2);
+ wil_tx_ring(wil, vif, v2, skb2);
} else {
wil_err(wil, "skb_copy failed\n");
}
@@ -1325,28 +1321,6 @@ found:
return v;
}
-static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
- int vring_index)
-{
- wil_desc_addr_set(&d->dma.addr, pa);
- d->dma.ip_length = 0;
- /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
- d->dma.b11 = 0/*14 | BIT(7)*/;
- d->dma.error = 0;
- d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
- d->dma.length = cpu_to_le16((u16)len);
- d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
- d->mac.d[0] = 0;
- d->mac.d[1] = 0;
- d->mac.d[2] = 0;
- d->mac.ucode_cmd = 0;
- /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
- d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
- (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
-
- return 0;
-}
-
static inline
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
{
@@ -1454,7 +1428,7 @@ static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
}
static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb)
+ struct wil_ring *vring, struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
@@ -1474,13 +1448,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
int sg_desc_cnt = 0; /* number of descriptors for current mss*/
u32 swhead = vring->swhead;
- int used, avail = wil_vring_avail_tx(vring);
+ int used, avail = wil_ring_avail_tx(vring);
int nr_frags = skb_shinfo(skb)->nr_frags;
int min_desc_required = nr_frags + 1;
int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
int f, len, hdrlen, headlen;
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int vring_index = vring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
uint i = swhead;
dma_addr_t pa;
const skb_frag_t *frag = NULL;
@@ -1548,7 +1522,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
tcp_hdr_len = tcp_hdrlen(skb);
skb_net_hdr_len = skb_network_header_len(skb);
- _hdr_desc = &vring->va[i].tx;
+ _hdr_desc = &vring->va[i].tx.legacy;
pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
@@ -1556,7 +1530,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
goto err_exit;
}
- wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
+ hdrlen, vring_index);
wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
tcp_hdr_len, skb_net_hdr_len);
wil_tx_last_desc(hdr_desc);
@@ -1613,7 +1588,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
goto mem_error;
}
- _desc = &vring->va[i].tx;
+ _desc = &vring->va[i].tx.legacy;
if (!_first_desc) {
_first_desc = _desc;
@@ -1623,7 +1598,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
d = &desc_mem;
}
- wil_tx_desc_map(d, pa, lenmss, vring_index);
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+ pa, lenmss, vring_index);
wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
is_ipv4, tcp_hdr_len,
skb_net_hdr_len);
@@ -1701,8 +1677,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
vring->ctx[i].skb = skb_get(skb);
/* performance monitoring */
- used = wil_vring_used_tx(vring);
- if (wil_val_in_range(wil->vring_idle_trsh,
+ used = wil_ring_used_tx(vring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
used, used + descs_used)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
@@ -1717,7 +1693,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
wmb();
/* advance swhead */
- wil_vring_advance_head(vring, descs_used);
+ wil_ring_advance_head(vring, descs_used);
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
/* make sure all writes to descriptors (shared memory) are done before
@@ -1725,6 +1701,11 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
*/
wmb();
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
wil_w(wil, vring->hwtail, vring->swhead);
return 0;
@@ -1733,12 +1714,12 @@ mem_error:
struct wil_ctx *ctx;
i = (swhead + descs_used - 1) % vring->size;
- d = (struct vring_tx_desc *)&vring->va[i].tx;
- _desc = &vring->va[i].tx;
+ d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
+ _desc = &vring->va[i].tx.legacy;
*d = *_desc;
_desc->dma.status = TX_DMA_STATUS_DU;
ctx = &vring->ctx[i];
- wil_txdesc_unmap(dev, d, ctx);
+ wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
memset(ctx, 0, sizeof(*ctx));
descs_used--;
}
@@ -1746,26 +1727,26 @@ err_exit:
return rc;
}
-static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb)
+static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d;
- u32 swhead = vring->swhead;
- int avail = wil_vring_avail_tx(vring);
+ u32 swhead = ring->swhead;
+ int avail = wil_ring_avail_tx(ring);
int nr_frags = skb_shinfo(skb)->nr_frags;
uint f = 0;
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
uint i = swhead;
dma_addr_t pa;
int used;
- bool mcast = (vring_index == vif->bcast_vring);
+ bool mcast = (ring_index == vif->bcast_ring);
uint len = skb_headlen(skb);
- wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
- vring_index);
+ wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
+ skb->len, ring_index, nr_frags);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1773,23 +1754,24 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
if (unlikely(avail < 1 + nr_frags)) {
wil_err_ratelimited(wil,
"Tx ring[%2d] full. No space for %d fragments\n",
- vring_index, 1 + nr_frags);
+ ring_index, 1 + nr_frags);
return -ENOMEM;
}
- _d = &vring->va[i].tx;
+ _d = &ring->va[i].tx.legacy;
pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
- wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
+ wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
skb_headlen(skb), skb->data, &pa);
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
if (unlikely(dma_mapping_error(dev, pa)))
return -EINVAL;
- vring->ctx[i].mapped_as = wil_mapped_as_single;
+ ring->ctx[i].mapped_as = wil_mapped_as_single;
/* 1-st segment */
- wil_tx_desc_map(d, pa, len, vring_index);
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
+ ring_index);
if (unlikely(mcast)) {
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
@@ -1798,11 +1780,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
/* Process TCP/UDP checksum offloading */
if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
- vring_index);
+ ring_index);
goto dma_error;
}
- vring->ctx[i].nr_frags = nr_frags;
+ ring->ctx[i].nr_frags = nr_frags;
wil_tx_desc_set_nr_frags(d, nr_frags + 1);
/* middle segments */
@@ -1812,20 +1794,21 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
int len = skb_frag_size(frag);
*_d = *d;
- wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- i = (swhead + f + 1) % vring->size;
- _d = &vring->va[i].tx;
+ i = (swhead + f + 1) % ring->size;
+ _d = &ring->va[i].tx.legacy;
pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "Tx[%2d] failed to map fragment\n",
- vring_index);
+ ring_index);
goto dma_error;
}
- vring->ctx[i].mapped_as = wil_mapped_as_page;
- wil_tx_desc_map(d, pa, len, vring_index);
+ ring->ctx[i].mapped_as = wil_mapped_as_page;
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+ pa, len, ring_index);
/* no need to check return code -
* if it succeeded for 1-st descriptor,
* it will succeed here too
@@ -1837,7 +1820,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
*_d = *d;
- wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
@@ -1845,15 +1828,15 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
* to prevent skb release before accounting
* in case of immediate "tx done"
*/
- vring->ctx[i].skb = skb_get(skb);
+ ring->ctx[i].skb = skb_get(skb);
/* performance monitoring */
- used = wil_vring_used_tx(vring);
- if (wil_val_in_range(wil->vring_idle_trsh,
+ used = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
used, used + nr_frags + 1)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
- vring_index, used, used + nr_frags + 1);
+ ring_index, used, used + nr_frags + 1);
}
/* Make sure to advance the head only after descriptor update is done.
@@ -1864,17 +1847,22 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
wmb();
/* advance swhead */
- wil_vring_advance_head(vring, nr_frags + 1);
- wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
- vring->swhead);
- trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
+ wil_ring_advance_head(ring, nr_frags + 1);
+ wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
+ ring->swhead);
+ trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
- wil_w(wil, vring->hwtail, vring->swhead);
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
+ wil_w(wil, ring->hwtail, ring->swhead);
return 0;
dma_error:
@@ -1883,12 +1871,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
for (f = 0; f < nr_frags; f++) {
struct wil_ctx *ctx;
- i = (swhead + f) % vring->size;
- ctx = &vring->ctx[i];
- _d = &vring->va[i].tx;
+ i = (swhead + f) % ring->size;
+ ctx = &ring->ctx[i];
+ _d = &ring->va[i].tx.legacy;
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
- wil_txdesc_unmap(dev, d, ctx);
+ wil->txrx_ops.tx_desc_unmap(dev,
+ (union wil_tx_desc *)d,
+ ctx);
memset(ctx, 0, sizeof(*ctx));
}
@@ -1896,11 +1886,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
return -EINVAL;
}
-static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, struct sk_buff *skb)
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb)
{
- int vring_index = vring - wil->vring_tx;
- struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
int rc;
spin_lock(&txdata->lock);
@@ -1914,8 +1904,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
return -EINVAL;
}
- rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
- (wil, vif, vring, skb);
+ rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
+ (wil, vif, ring, skb);
spin_unlock(&txdata->lock);
@@ -1941,7 +1931,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
*/
static inline void __wil_update_net_queues(struct wil6210_priv *wil,
struct wil6210_vif *vif,
- struct vring *vring,
+ struct wil_ring *ring,
bool check_stop)
{
int i;
@@ -1949,9 +1939,9 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
if (unlikely(!vif))
return;
- if (vring)
+ if (ring)
wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
- (int)(vring - wil->vring_tx), vif->mid, check_stop,
+ (int)(ring - wil->ring_tx), vif->mid, check_stop,
vif->net_queue_stopped);
else
wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
@@ -1962,7 +1952,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
return;
if (check_stop) {
- if (!vring || unlikely(wil_vring_avail_low(vring))) {
+ if (!ring || unlikely(wil_ring_avail_low(ring))) {
/* not enough room in the vring */
netif_tx_stop_all_queues(vif_to_ndev(vif));
vif->net_queue_stopped = true;
@@ -1978,22 +1968,22 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
/* check wake */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- struct vring *cur_vring = &wil->vring_tx[i];
- struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+ struct wil_ring *cur_ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
- if (txdata->mid != vif->mid || !cur_vring->va ||
- !txdata->enabled || cur_vring == vring)
+ if (txdata->mid != vif->mid || !cur_ring->va ||
+ !txdata->enabled || cur_ring == ring)
continue;
- if (wil_vring_avail_low(cur_vring)) {
- wil_dbg_txrx(wil, "vring %d full, can't wake\n",
- (int)(cur_vring - wil->vring_tx));
+ if (wil_ring_avail_low(cur_ring)) {
+ wil_dbg_txrx(wil, "ring %d full, can't wake\n",
+ (int)(cur_ring - wil->ring_tx));
return;
}
}
- if (!vring || wil_vring_avail_high(vring)) {
- /* enough room in the vring */
+ if (!ring || wil_ring_avail_high(ring)) {
+ /* enough room in the ring */
wil_dbg_txrx(wil, "calling netif_tx_wake\n");
netif_tx_wake_all_queues(vif_to_ndev(vif));
vif->net_queue_stopped = false;
@@ -2001,18 +1991,18 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
}
void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool check_stop)
+ struct wil_ring *ring, bool check_stop)
{
spin_lock(&wil->net_queue_lock);
- __wil_update_net_queues(wil, vif, vring, check_stop);
+ __wil_update_net_queues(wil, vif, ring, check_stop);
spin_unlock(&wil->net_queue_lock);
}
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool check_stop)
+ struct wil_ring *ring, bool check_stop)
{
spin_lock_bh(&wil->net_queue_lock);
- __wil_update_net_queues(wil, vif, vring, check_stop);
+ __wil_update_net_queues(wil, vif, ring, check_stop);
spin_unlock_bh(&wil->net_queue_lock);
}
@@ -2022,7 +2012,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct wil6210_priv *wil = vif_to_wil(vif);
struct ethhdr *eth = (void *)skb->data;
bool bcast = is_multicast_ether_addr(eth->h_dest);
- struct vring *vring;
+ struct wil_ring *ring;
static bool pr_once_fw;
int rc;
@@ -2048,36 +2038,36 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* find vring */
if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
/* in STA mode (ESS), all to same VRING (to AP) */
- vring = wil_find_tx_vring_sta(wil, vif, skb);
+ ring = wil_find_tx_ring_sta(wil, vif, skb);
} else if (bcast) {
if (vif->pbss)
/* in pbss, no bcast VRING - duplicate skb in
* all stations VRINGs
*/
- vring = wil_find_tx_bcast_2(wil, vif, skb);
+ ring = wil_find_tx_bcast_2(wil, vif, skb);
else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
/* AP has a dedicated bcast VRING */
- vring = wil_find_tx_bcast_1(wil, vif, skb);
+ ring = wil_find_tx_bcast_1(wil, vif, skb);
else
/* unexpected combination, fallback to duplicating
* the skb in all stations VRINGs
*/
- vring = wil_find_tx_bcast_2(wil, vif, skb);
+ ring = wil_find_tx_bcast_2(wil, vif, skb);
} else {
/* unicast, find specific VRING by dest. address */
- vring = wil_find_tx_ucast(wil, vif, skb);
+ ring = wil_find_tx_ucast(wil, vif, skb);
}
- if (unlikely(!vring)) {
- wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
+ if (unlikely(!ring)) {
+ wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest);
goto drop;
}
/* set up vring entry */
- rc = wil_tx_vring(wil, vif, vring, skb);
+ rc = wil_tx_ring(wil, vif, ring, skb);
switch (rc) {
case 0:
/* shall we stop net queues? */
- wil_update_net_queues_bh(wil, vif, vring, true);
+ wil_update_net_queues_bh(wil, vif, ring, true);
/* statistics will be updated on the tx_complete */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2093,20 +2083,29 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NET_XMIT_DROP;
}
-static inline bool wil_need_txstat(struct sk_buff *skb)
+void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_sta_info *sta)
{
- struct ethhdr *eth = (void *)skb->data;
+ int skb_time_us;
+ int bin;
- return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
- (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
-}
+ if (!wil->tx_latency)
+ return;
-static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
-{
- if (unlikely(wil_need_txstat(skb)))
- skb_complete_wifi_ack(skb, acked);
- else
- acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+ if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
+ return;
+
+ skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
+ bin = skb_time_us / wil->tx_latency_res;
+ bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
+
+ wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
+ sta->tx_latency_bins[bin]++;
+ sta->stats.tx_latency_total_us += skb_time_us;
+ if (skb_time_us < sta->stats.tx_latency_min_us)
+ sta->stats.tx_latency_min_us = skb_time_us;
+ if (skb_time_us > sta->stats.tx_latency_max_us)
+ sta->stats.tx_latency_max_us = skb_time_us;
}
/**
@@ -2121,10 +2120,10 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct device *dev = wil_to_dev(wil);
- struct vring *vring = &wil->vring_tx[ringid];
- struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+ struct wil_ring *vring = &wil->ring_tx[ringid];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int done = 0;
- int cid = wil->vring2cid_tid[ringid][0];
+ int cid = wil->ring2cid_tid[ringid][0];
struct wil_net_stats *stats = NULL;
volatile struct vring_tx_desc *_d;
int used_before_complete;
@@ -2142,12 +2141,12 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
- used_before_complete = wil_vring_used_tx(vring);
+ used_before_complete = wil_ring_used_tx(vring);
if (cid < WIL6210_MAX_CID)
stats = &wil->sta[cid].stats;
- while (!wil_vring_is_empty(vring)) {
+ while (!wil_ring_is_empty(vring)) {
int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
/**
@@ -2158,7 +2157,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
/* TODO: check we are not past head */
- _d = &vring->va[lf].tx;
+ _d = &vring->va[lf].tx.legacy;
if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
break;
@@ -2170,7 +2169,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
ctx = &vring->ctx[vring->swtail];
skb = ctx->skb;
- _d = &vring->va[vring->swtail].tx;
+ _d = &vring->va[vring->swtail].tx.legacy;
*d = *_d;
@@ -2184,7 +2183,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- wil_txdesc_unmap(dev, d, ctx);
+ wil->txrx_ops.tx_desc_unmap(dev,
+ (union wil_tx_desc *)d,
+ ctx);
if (skb) {
if (likely(d->dma.error == 0)) {
@@ -2193,6 +2194,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
if (stats) {
stats->tx_packets++;
stats->tx_bytes += skb->len;
+
+ wil_tx_latency_calc(wil, skb,
+ &wil->sta[cid]);
}
} else {
ndev->stats.tx_errors++;
@@ -2203,7 +2207,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
}
memset(ctx, 0, sizeof(*ctx));
/* Make sure the ctx is zeroed before updating the tail
- * to prevent a case where wil_tx_vring will see
+ * to prevent a case where wil_tx_ring will see
* this descriptor as used and handle it before ctx zero
* is completed.
*/
@@ -2213,14 +2217,14 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
* so hardware will not try to process this desc.,
* - rest of descriptor will be initialized on Tx.
*/
- vring->swtail = wil_vring_next_tail(vring);
+ vring->swtail = wil_ring_next_tail(vring);
done++;
}
}
/* performance monitoring */
- used_new = wil_vring_used_tx(vring);
- if (wil_val_in_range(wil->vring_idle_trsh,
+ used_new = wil_ring_used_tx(vring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
used_new, used_before_complete)) {
wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
ringid, used_before_complete, used_new);
@@ -2233,3 +2237,49 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
return done;
}
+
+static inline int wil_tx_init(struct wil6210_priv *wil)
+{
+ return 0;
+}
+
+static inline void wil_tx_fini(struct wil6210_priv *wil) {}
+
+static void wil_get_reorder_params(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid, int *cid,
+ int *mid, u16 *seq, int *mcast, int *retry)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ *tid = wil_rxdesc_tid(d);
+ *cid = wil_rxdesc_cid(d);
+ *mid = wil_rxdesc_mid(d);
+ *seq = wil_rxdesc_seq(d);
+ *mcast = wil_rxdesc_mcast(d);
+ *retry = wil_rxdesc_retry(d);
+}
+
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
+{
+ wil->txrx_ops.configure_interrupt_moderation =
+ wil_configure_interrupt_moderation;
+ /* TX ops */
+ wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
+ wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
+ wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
+ wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
+ wil->txrx_ops.ring_fini_tx = wil_vring_free;
+ wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
+ wil->txrx_ops.tx_init = wil_tx_init;
+ wil->txrx_ops.tx_fini = wil_tx_fini;
+ /* RX ops */
+ wil->txrx_ops.rx_init = wil_rx_init;
+ wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
+ wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
+ wil->txrx_ops.get_netif_rx_params =
+ wil_get_netif_rx_params;
+ wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
+ wil->txrx_ops.rx_error_check = wil_rx_error_check;
+ wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
+ wil->txrx_ops.rx_fini = wil_rx_fini;
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 5f07717acc2c..9d83be481839 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -18,6 +18,9 @@
#ifndef WIL6210_TXRX_H
#define WIL6210_TXRX_H
+#include "wil6210.h"
+#include "txrx_edma.h"
+
#define BUF_SW_OWNED (1)
#define BUF_HW_OWNED (0)
@@ -29,19 +32,13 @@
/* Tx/Rx path */
-/* Common representation of physical address in Vring */
-struct vring_dma_addr {
- __le32 addr_low;
- __le16 addr_high;
-} __packed;
-
-static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
+static inline dma_addr_t wil_desc_addr(struct wil_ring_dma_addr *addr)
{
return le32_to_cpu(addr->addr_low) |
((u64)le16_to_cpu(addr->addr_high) << 32);
}
-static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
+static inline void wil_desc_addr_set(struct wil_ring_dma_addr *addr,
dma_addr_t pa)
{
addr->addr_low = cpu_to_le32(lower_32_bits(pa));
@@ -294,7 +291,7 @@ struct vring_tx_mac {
*/
struct vring_tx_dma {
u32 d0;
- struct vring_dma_addr addr;
+ struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11; /* 0..6: mac_length; 7:ip_version */
u8 error; /* 0..2: err; 3..7: reserved; */
@@ -428,7 +425,7 @@ struct vring_rx_mac {
struct vring_rx_dma {
u32 d0;
- struct vring_dma_addr addr;
+ struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11;
u8 error;
@@ -441,14 +438,24 @@ struct vring_tx_desc {
struct vring_tx_dma dma;
} __packed;
+union wil_tx_desc {
+ struct vring_tx_desc legacy;
+ struct wil_tx_enhanced_desc enhanced;
+} __packed;
+
struct vring_rx_desc {
struct vring_rx_mac mac;
struct vring_rx_dma dma;
} __packed;
-union vring_desc {
- struct vring_tx_desc tx;
- struct vring_rx_desc rx;
+union wil_rx_desc {
+ struct vring_rx_desc legacy;
+ struct wil_rx_enhanced_desc enhanced;
+} __packed;
+
+union wil_ring_desc {
+ union wil_tx_desc tx;
+ union wil_rx_desc rx;
} __packed;
static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
@@ -493,6 +500,11 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
return WIL_GET_BITS(d->mac.d0, 28, 31);
}
+static inline int wil_rxdesc_retry(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 31, 31);
+}
+
static inline int wil_rxdesc_key_id(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->mac.d1, 4, 5);
@@ -528,6 +540,76 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
return (void *)skb->cb;
}
+static inline int wil_ring_is_empty(struct wil_ring *ring)
+{
+ return ring->swhead == ring->swtail;
+}
+
+static inline u32 wil_ring_next_tail(struct wil_ring *ring)
+{
+ return (ring->swtail + 1) % ring->size;
+}
+
+static inline void wil_ring_advance_head(struct wil_ring *ring, int n)
+{
+ ring->swhead = (ring->swhead + n) % ring->size;
+}
+
+static inline int wil_ring_is_full(struct wil_ring *ring)
+{
+ return wil_ring_next_tail(ring) == ring->swhead;
+}
+
+static inline bool wil_need_txstat(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (void *)skb->data;
+
+ return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
+}
+
+static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
+{
+ if (unlikely(wil_need_txstat(skb)))
+ skb_complete_wifi_ack(skb, acked);
+ else
+ acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+}
+
+/* Used space in Tx ring */
+static inline int wil_ring_used_tx(struct wil_ring *ring)
+{
+ u32 swhead = ring->swhead;
+ u32 swtail = ring->swtail;
+
+ return (ring->size + swhead - swtail) % ring->size;
+}
+
+/* Available space in Tx ring */
+static inline int wil_ring_avail_tx(struct wil_ring *ring)
+{
+ return ring->size - wil_ring_used_tx(ring) - 1;
+}
+
+static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil)
+{
+ /* In Enhanced DMA ring 0 is reserved for RX */
+ return wil->use_enhanced_dma_hw ? 1 : 0;
+}
+
+/* similar to ieee80211_ version, but FC contain only 1-st byte */
+static inline int wil_is_back_req(u8 fc)
+{
+ return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
+/* wil_val_in_range - check if value in [min,max) */
+static inline bool wil_val_in_range(int val, int min, int max)
+{
+ return val >= min && val < max;
+}
+
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
@@ -536,5 +618,9 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
int size, u16 ssn);
void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
struct wil_tid_ampdu_rx *r);
+void wil_tx_data_init(struct wil_ring_tx_data *txdata);
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
+void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_sta_info *sta);
#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
new file mode 100644
index 000000000000..bca61cb44c37
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -0,0 +1,1608 @@
+/*
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/prefetch.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "wil6210.h"
+#include "txrx_edma.h"
+#include "txrx.h"
+#include "trace.h"
+
+#define WIL_EDMA_MAX_DATA_OFFSET (2)
+/* RX buffer size must be aligned to 4 bytes */
+#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
+
+static void wil_tx_desc_unmap_edma(struct device *dev,
+ union wil_tx_desc *desc,
+ struct wil_ctx *ctx)
+{
+ struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc;
+ dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma);
+ u16 dmalen = le16_to_cpu(d->dma.length);
+
+ switch (ctx->mapped_as) {
+ case wil_mapped_as_single:
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ case wil_mapped_as_page:
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static int wil_find_free_sring(struct wil6210_priv *wil)
+{
+ int i;
+
+ for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) {
+ if (!wil->srings[i].va)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static void wil_sring_free(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz;
+
+ if (!sring || !sring->va)
+ return;
+
+ sz = sring->elem_size * sring->size;
+
+ wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
+ sz, sring->va, &sring->pa);
+
+ dma_free_coherent(dev, sz, (void *)sring->va, sring->pa);
+ sring->pa = 0;
+ sring->va = NULL;
+}
+
+static int wil_sring_alloc(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = sring->elem_size * sring->size;
+
+ wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz);
+
+ if (sz == 0) {
+ wil_err(wil, "Cannot allocate a zero size status ring\n");
+ return -EINVAL;
+ }
+
+ sring->swhead = 0;
+
+ /* Status messages are allocated and initialized to 0. This is necessary
+ * since DR bit should be initialized to 0.
+ */
+ sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
+ if (!sring->va)
+ return -ENOMEM;
+
+ wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va,
+ &sring->pa);
+
+ return 0;
+}
+
+static int wil_tx_init_edma(struct wil6210_priv *wil)
+{
+ int ring_id = wil_find_free_sring(wil);
+ struct wil_status_ring *sring;
+ int rc;
+ u16 status_ring_size;
+
+ if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+ wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+ wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+ status_ring_size = 1 << wil->tx_status_ring_order;
+
+ wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n",
+ status_ring_size, ring_id);
+
+ if (ring_id < 0)
+ return ring_id;
+
+ /* Allocate Tx status ring. Tx descriptor rings will be
+ * allocated on WMI connect event
+ */
+ sring = &wil->srings[ring_id];
+
+ sring->is_rx = false;
+ sring->size = status_ring_size;
+ sring->elem_size = sizeof(struct wil_ring_tx_status);
+ rc = wil_sring_alloc(wil, sring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_tx_sring_cfg(wil, ring_id);
+ if (rc)
+ goto out_free;
+
+ sring->desc_rdy_pol = 1;
+ wil->tx_sring_idx = ring_id;
+
+ return 0;
+out_free:
+ wil_sring_free(wil, sring);
+ return rc;
+}
+
+/**
+ * Allocate one skb for Rx descriptor RING
+ */
+static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
+ struct wil_ring *ring, u32 i)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = ALIGN(wil->rx_buf_len, 4);
+ dma_addr_t pa;
+ u16 buff_id;
+ struct list_head *active = &wil->rx_buff_mgmt.active;
+ struct list_head *free = &wil->rx_buff_mgmt.free;
+ struct wil_rx_buff *rx_buff;
+ struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr;
+ struct sk_buff *skb;
+ struct wil_rx_enhanced_desc dd, *d = &dd;
+ struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *)
+ &ring->va[i].rx.enhanced;
+
+ if (unlikely(list_empty(free))) {
+ wil->rx_buff_mgmt.free_list_empty_cnt++;
+ return -EAGAIN;
+ }
+
+ skb = dev_alloc_skb(sz);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_put(skb, sz);
+
+ /**
+ * Make sure that the network stack calculates checksum for packets
+ * which failed the HW checksum calculation
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ /* Get the buffer ID - the index of the rx buffer in the buff_arr */
+ rx_buff = list_first_entry(free, struct wil_rx_buff, list);
+ buff_id = rx_buff->id;
+
+ /* Move a buffer from the free list to the active list */
+ list_move(&rx_buff->list, active);
+
+ buff_arr[buff_id].skb = skb;
+
+ wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+ d->dma.length = cpu_to_le16(sz);
+ d->mac.buff_id = cpu_to_le16(buff_id);
+ *_d = *d;
+
+ /* Save the physical address in skb->cb for later use in dma_unmap */
+ memcpy(skb->cb, &pa, sizeof(pa));
+
+ return 0;
+}
+
+static inline
+void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg)
+{
+ memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)),
+ sring->elem_size);
+}
+
+static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
+{
+ sring->swhead = (sring->swhead + 1) % sring->size;
+ if (sring->swhead == 0)
+ sring->desc_rdy_pol = 1 - sring->desc_rdy_pol;
+}
+
+static int wil_rx_refill_edma(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ u32 next_head;
+ int rc = 0;
+ u32 swtail = *ring->edma_rx_swtail.va;
+
+ for (; next_head = wil_ring_next_head(ring), (next_head != swtail);
+ ring->swhead = next_head) {
+ rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
+ if (unlikely(rc)) {
+ if (rc == -EAGAIN)
+ wil_dbg_txrx(wil, "No free buffer ID found\n");
+ else
+ wil_err_ratelimited(wil,
+ "Error %d in refill desc[%d]\n",
+ rc, ring->swhead);
+ break;
+ }
+ }
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ wil_w(wil, ring->hwtail, ring->swhead);
+
+ return rc;
+}
+
+static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
+ struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ u32 next_tail;
+ u32 swhead = (ring->swhead + 1) % ring->size;
+ dma_addr_t pa;
+ u16 dmalen;
+
+ for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead);
+ ring->swtail = next_tail) {
+ struct wil_rx_enhanced_desc dd, *d = &dd;
+ struct wil_rx_enhanced_desc *_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[ring->swtail].rx.enhanced;
+ struct sk_buff *skb;
+ u16 buff_id;
+
+ *d = *_d;
+ pa = wil_rx_desc_get_addr_edma(&d->dma);
+ dmalen = le16_to_cpu(d->dma.length);
+ dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
+
+ /* Extract the SKB from the rx_buff management array */
+ buff_id = __le16_to_cpu(d->mac.buff_id);
+ if (buff_id >= wil->rx_buff_mgmt.size) {
+ wil_err(wil, "invalid buff_id %d\n", buff_id);
+ continue;
+ }
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
+ if (unlikely(!skb))
+ wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ else
+ kfree_skb(skb);
+
+ /* Move the buffer from the active to the free list */
+ list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+ }
+}
+
+static void wil_free_rx_buff_arr(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return;
+
+ /* Move all the buffers to the free list in case active list is
+ * not empty in order to release all SKBs before deleting the array
+ */
+ wil_move_all_rx_buff_to_free_list(wil, ring);
+
+ kfree(wil->rx_buff_mgmt.buff_arr);
+ wil->rx_buff_mgmt.buff_arr = NULL;
+}
+
+static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
+ size_t size)
+{
+ struct wil_rx_buff *buff_arr;
+ struct list_head *active = &wil->rx_buff_mgmt.active;
+ struct list_head *free = &wil->rx_buff_mgmt.free;
+ int i;
+
+ wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
+ GFP_KERNEL);
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return -ENOMEM;
+
+ /* Set list heads */
+ INIT_LIST_HEAD(active);
+ INIT_LIST_HEAD(free);
+
+ /* Linkify the list */
+ buff_arr = wil->rx_buff_mgmt.buff_arr;
+ for (i = 0; i < size; i++) {
+ list_add(&buff_arr[i].list, free);
+ buff_arr[i].id = i;
+ }
+
+ wil->rx_buff_mgmt.size = size;
+
+ return 0;
+}
+
+static int wil_init_rx_sring(struct wil6210_priv *wil,
+ u16 status_ring_size,
+ size_t elem_size,
+ u16 ring_id)
+{
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ int rc;
+
+ wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size,
+ ring_id);
+
+ memset(&sring->rx_data, 0, sizeof(sring->rx_data));
+
+ sring->is_rx = true;
+ sring->size = status_ring_size;
+ sring->elem_size = elem_size;
+ rc = wil_sring_alloc(wil, sring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_rx_sring_add(wil, ring_id);
+ if (rc)
+ goto out_free;
+
+ sring->desc_rdy_pol = 1;
+
+ return 0;
+out_free:
+ wil_sring_free(wil, sring);
+ return rc;
+}
+
+static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
+ struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = ring->size * sizeof(ring->va[0]);
+
+ wil_dbg_misc(wil, "alloc_desc_ring:\n");
+
+ BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
+
+ ring->swhead = 0;
+ ring->swtail = 0;
+ ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
+ if (!ring->ctx)
+ goto err;
+
+ ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
+ if (!ring->va)
+ goto err_free_ctx;
+
+ if (ring->is_rx) {
+ sz = sizeof(*ring->edma_rx_swtail.va);
+ ring->edma_rx_swtail.va =
+ dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
+ GFP_KERNEL);
+ if (!ring->edma_rx_swtail.va)
+ goto err_free_va;
+ }
+
+ wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n",
+ ring->is_rx ? "RX" : "TX",
+ ring->size, ring->va, &ring->pa, ring->ctx);
+
+ return 0;
+err_free_va:
+ dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
+ (void *)ring->va, ring->pa);
+ ring->va = NULL;
+err_free_ctx:
+ kfree(ring->ctx);
+ ring->ctx = NULL;
+err:
+ return -ENOMEM;
+}
+
+static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz;
+ int ring_index = 0;
+
+ if (!ring->va)
+ return;
+
+ sz = ring->size * sizeof(ring->va[0]);
+
+ lockdep_assert_held(&wil->mutex);
+ if (ring->is_rx) {
+ wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
+ ring->size, ring->va,
+ &ring->pa, ring->ctx);
+
+ wil_move_all_rx_buff_to_free_list(wil, ring);
+ goto out;
+ }
+
+ /* TX ring */
+ ring_index = ring - wil->ring_tx;
+
+ wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
+ ring_index, ring->size, ring->va,
+ &ring->pa, ring->ctx);
+
+ while (!wil_ring_is_empty(ring)) {
+ struct wil_ctx *ctx;
+
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ struct wil_tx_enhanced_desc *_d =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[ring->swtail].tx.enhanced;
+
+ ctx = &ring->ctx[ring->swtail];
+ if (!ctx) {
+ wil_dbg_txrx(wil,
+ "ctx(%d) was already completed\n",
+ ring->swtail);
+ ring->swtail = wil_ring_next_tail(ring);
+ continue;
+ }
+ *d = *_d;
+ wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+ ring->swtail = wil_ring_next_tail(ring);
+ }
+
+out:
+ dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
+ kfree(ring->ctx);
+ ring->pa = 0;
+ ring->va = NULL;
+ ring->ctx = NULL;
+}
+
+static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size,
+ int status_ring_id)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+
+ wil_dbg_misc(wil, "init RX desc ring\n");
+
+ ring->size = desc_ring_size;
+ ring->is_rx = true;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id);
+ if (rc)
+ goto out_free;
+
+ return 0;
+out_free:
+ wil_ring_free_edma(wil, ring);
+ return rc;
+}
+
+static void wil_get_reorder_params_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid,
+ int *cid, int *mid, u16 *seq,
+ int *mcast, int *retry)
+{
+ struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+ *tid = wil_rx_status_get_tid(s);
+ *cid = wil_rx_status_get_cid(s);
+ *mid = wil_rx_status_get_mid(s);
+ *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s));
+ *mcast = wil_rx_status_get_mcast(s);
+ *retry = wil_rx_status_get_retry(s);
+}
+
+static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
+ int *security)
+{
+ struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+ *cid = wil_rx_status_get_cid(s);
+ *security = wil_rx_status_get_security(s);
+}
+
+static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct wil_rx_status_extended *st;
+ int cid, tid, key_id, mc;
+ struct wil_sta_info *s;
+ struct wil_tid_crypto_rx *c;
+ struct wil_tid_crypto_rx_single *cc;
+ const u8 *pn;
+
+ /* In HW reorder, HW is responsible for crypto check */
+ if (wil->use_rx_hw_reordering)
+ return 0;
+
+ st = wil_skb_rxstatus(skb);
+
+ cid = wil_rx_status_get_cid(st);
+ tid = wil_rx_status_get_tid(st);
+ key_id = wil_rx_status_get_key_id(st);
+ mc = wil_rx_status_get_mcast(st);
+ s = &wil->sta[cid];
+ c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
+ cc = &c->key_id[key_id];
+ pn = (u8 *)&st->ext.pn_15_0;
+
+ if (!cc->key_set) {
+ wil_err_ratelimited(wil,
+ "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+ cid, tid, mc, key_id);
+ return -EINVAL;
+ }
+
+ if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+ wil_err_ratelimited(wil,
+ "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+ cid, tid, mc, key_id, pn, cc->pn);
+ return -EINVAL;
+ }
+ memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+ return 0;
+}
+
+static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
+{
+ struct wil_status_ring *sring;
+ struct wil_rx_status_extended msg1;
+ void *msg = &msg1;
+ u8 dr_bit;
+ int i;
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ sring = &wil->srings[i];
+ if (!sring->va)
+ continue;
+
+ wil_get_next_rx_status_msg(sring, msg);
+ dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+
+ /* Check if there are unhandled RX status messages */
+ if (dr_bit == sring->desc_rdy_pol)
+ return false;
+ }
+
+ return true;
+}
+
+static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
+{
+ wil->rx_buf_len = rx_large_buf ?
+ WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
+}
+
+static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
+{
+ u16 status_ring_size;
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+ size_t elem_size = wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended);
+ int i;
+ u16 max_rx_pl_per_desc;
+
+ /* In SW reorder one must use extended status messages */
+ if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
+ wil_err(wil,
+ "compressed RX status cannot be used with SW reorder\n");
+ return -EINVAL;
+ }
+
+ if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+ wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+ wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+
+ status_ring_size = 1 << wil->rx_status_ring_order;
+
+ wil_dbg_misc(wil,
+ "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
+ desc_ring_size, status_ring_size, elem_size);
+
+ wil_rx_buf_len_init_edma(wil);
+
+ max_rx_pl_per_desc = ALIGN(wil->rx_buf_len, 4);
+
+ /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
+ if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
+ wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
+
+ wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
+ wil->num_rx_status_rings);
+
+ rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc);
+ if (rc)
+ return rc;
+
+ /* Allocate status ring */
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ int sring_id = wil_find_free_sring(wil);
+
+ if (sring_id < 0) {
+ rc = -EFAULT;
+ goto err_free_status;
+ }
+ rc = wil_init_rx_sring(wil, status_ring_size, elem_size,
+ sring_id);
+ if (rc)
+ goto err_free_status;
+ }
+
+ /* Allocate descriptor ring */
+ rc = wil_init_rx_desc_ring(wil, desc_ring_size,
+ WIL_DEFAULT_RX_STATUS_RING_ID);
+ if (rc)
+ goto err_free_status;
+
+ if (wil->rx_buff_id_count >= status_ring_size) {
+ wil_info(wil,
+ "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
+ wil->rx_buff_id_count, status_ring_size,
+ status_ring_size - 1);
+ wil->rx_buff_id_count = status_ring_size - 1;
+ }
+
+ /* Allocate Rx buffer array */
+ rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count);
+ if (rc)
+ goto err_free_desc;
+
+ /* Fill descriptor ring with credits */
+ rc = wil_rx_refill_edma(wil);
+ if (rc)
+ goto err_free_rx_buff_arr;
+
+ return 0;
+err_free_rx_buff_arr:
+ wil_free_rx_buff_arr(wil);
+err_free_desc:
+ wil_ring_free_edma(wil, ring);
+err_free_status:
+ for (i = 0; i < wil->num_rx_status_rings; i++)
+ wil_sring_free(wil, &wil->srings[i]);
+
+ return rc;
+}
+
+static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
+ int size, int cid, int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ lockdep_assert_held(&wil->mutex);
+
+ wil_dbg_misc(wil,
+ "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
+ ring_id, cid, tid, wil->tx_sring_idx);
+
+ wil_tx_data_init(txdata);
+ ring->size = size;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[ring_id][0] = cid;
+ wil->ring2cid_tid[ring_id][1] = tid;
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+
+ rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid);
+ if (rc) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n");
+ goto out_free;
+ }
+
+ if (txdata->dot1x_open && agg_wsize >= 0)
+ wil_addba_tx_request(wil, ring_id, agg_wsize);
+
+ return 0;
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
+ spin_unlock_bh(&txdata->lock);
+ wil_ring_free_edma(wil, ring);
+ wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[ring_id][1] = 0;
+
+ out:
+ return rc;
+}
+
+/* This function is used only for RX SW reorder */
+static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
+ struct sk_buff *skb, struct wil_net_stats *stats)
+{
+ u8 ftype;
+ u8 fc1;
+ int mid;
+ int tid;
+ u16 seq;
+ struct wil6210_vif *vif;
+
+ ftype = wil_rx_status_get_frame_type(wil, msg);
+ if (ftype == IEEE80211_FTYPE_DATA)
+ return 0;
+
+ fc1 = wil_rx_status_get_fc1(wil, msg);
+ mid = wil_rx_status_get_mid(msg);
+ tid = wil_rx_status_get_tid(msg);
+ seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg));
+ vif = wil->vifs[mid];
+
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid);
+ return -EAGAIN;
+ }
+
+ wil_dbg_txrx(wil,
+ "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ if (stats)
+ stats->rx_non_data_frame++;
+ if (wil_is_back_req(fc1)) {
+ wil_dbg_txrx(wil,
+ "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
+ mid, cid, tid, seq);
+ wil_rx_bar(wil, vif, cid, tid, seq);
+ } else {
+ u32 sz = wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended);
+
+ /* print again all info. One can enable only this
+ * without overhead for printing every Rx frame
+ */
+ wil_dbg_txrx(wil,
+ "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)msg, sz, false);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+ }
+
+ return -EAGAIN;
+}
+
+static int wil_rx_error_check_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb,
+ struct wil_net_stats *stats)
+{
+ int error;
+ int l2_rx_status;
+ int l3_rx_status;
+ int l4_rx_status;
+ void *msg = wil_skb_rxstatus(skb);
+
+ error = wil_rx_status_get_error(msg);
+ if (!error) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return 0;
+ }
+
+ l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
+ if (l2_rx_status != 0) {
+ wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
+ l2_rx_status);
+ /* Due to HW issue, KEY error will trigger a MIC error */
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) {
+ wil_dbg_txrx(wil,
+ "L2 MIC/KEY error, dropping packet\n");
+ stats->rx_mic_error++;
+ }
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) {
+ wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n");
+ stats->rx_key_error++;
+ }
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) {
+ wil_dbg_txrx(wil,
+ "L2 REPLAY error, dropping packet\n");
+ stats->rx_replay++;
+ }
+ if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) {
+ wil_dbg_txrx(wil,
+ "L2 AMSDU error, dropping packet\n");
+ stats->rx_amsdu_error++;
+ }
+ return -EFAULT;
+ }
+
+ l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
+ l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
+ if (!l3_rx_status && !l4_rx_status)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW don't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ else
+ stats->rx_csum_err++;
+
+ return 0;
+}
+
+static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct wil_rx_status_extended msg1;
+ void *msg = &msg1;
+ u16 buff_id;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ struct wil_ring_rx_data *rxdata = &sring->rx_data;
+ unsigned int sz = ALIGN(wil->rx_buf_len, 4);
+ struct wil_net_stats *stats = NULL;
+ u16 dmalen;
+ int cid;
+ bool eop, headstolen;
+ int delta;
+ u8 dr_bit;
+ u8 data_offset;
+ struct wil_rx_status_extended *s;
+ u16 sring_idx = sring - wil->srings;
+
+ BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
+
+again:
+ wil_get_next_rx_status_msg(sring, msg);
+ dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
+
+ /* Completed handling all the ready status messages */
+ if (dr_bit != sring->desc_rdy_pol)
+ return NULL;
+
+ /* Extract the buffer ID from the status message */
+ buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
+ wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
+ buff_id, sring->swhead);
+ wil_sring_advance_swhead(sring);
+ goto again;
+ }
+
+ wil_sring_advance_swhead(sring);
+
+ /* Extract the SKB from the rx_buff management array */
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
+ if (!skb) {
+ wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ goto again;
+ }
+
+ memcpy(&pa, skb->cb, sizeof(pa));
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
+
+ trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id,
+ msg);
+ wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
+ buff_id, sring_idx, dmalen);
+ wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
+ /* Move the buffer from the active list to the free list */
+ list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+
+ eop = wil_rx_status_get_eop(msg);
+
+ cid = wil_rx_status_get_cid(msg);
+ if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
+ wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
+ cid, sring->swhead);
+ rxdata->skipping = true;
+ goto skipping;
+ }
+ stats = &wil->sta[cid].stats;
+
+ if (unlikely(skb->len < ETH_HLEN)) {
+ wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len);
+ stats->rx_short_frame++;
+ rxdata->skipping = true;
+ goto skipping;
+ }
+
+ if (unlikely(dmalen > sz)) {
+ wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ stats->rx_large_frame++;
+ rxdata->skipping = true;
+ }
+
+skipping:
+ /* skipping indicates if a certain SKB should be dropped.
+ * It is set in case there is an error on the current SKB or in case
+ * of RX chaining: as long as we manage to merge the SKBs it will
+ * be false. once we have a bad SKB or we don't manage to merge SKBs
+ * it will be set to the !EOP value of the current SKB.
+ * This guarantees that all the following SKBs until EOP will also
+ * get dropped.
+ */
+ if (unlikely(rxdata->skipping)) {
+ kfree_skb(skb);
+ if (rxdata->skb) {
+ kfree_skb(rxdata->skb);
+ rxdata->skb = NULL;
+ }
+ rxdata->skipping = !eop;
+ goto again;
+ }
+
+ skb_trim(skb, dmalen);
+
+ prefetch(skb->data);
+
+ if (!rxdata->skb) {
+ rxdata->skb = skb;
+ } else {
+ if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
+ &delta))) {
+ kfree_skb_partial(skb, headstolen);
+ } else {
+ wil_err(wil, "failed to merge skbs!\n");
+ kfree_skb(skb);
+ kfree_skb(rxdata->skb);
+ rxdata->skb = NULL;
+ rxdata->skipping = !eop;
+ goto again;
+ }
+ }
+
+ if (!eop)
+ goto again;
+
+ /* reaching here rxdata->skb always contains a full packet */
+ skb = rxdata->skb;
+ rxdata->skb = NULL;
+ rxdata->skipping = false;
+
+ if (stats) {
+ stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
+ if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
+ stats->rx_per_mcs[stats->last_mcs_rx]++;
+ }
+
+ if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
+ wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
+ kfree_skb(skb);
+ goto again;
+ }
+
+ /* Compensate for the HW data alignment according to the status
+ * message
+ */
+ data_offset = wil_rx_status_get_data_offset(msg);
+ if (data_offset == 0xFF ||
+ data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
+ wil_err(wil, "Unexpected data offset %d\n", data_offset);
+ kfree_skb(skb);
+ goto again;
+ }
+
+ skb_pull(skb, data_offset);
+
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ /* Has to be done after dma_unmap_single as skb->cb is also
+ * used for holding the pa
+ */
+ s = wil_skb_rxstatus(skb);
+ memcpy(s, msg, sring->elem_size);
+
+ return skb;
+}
+
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota)
+{
+ struct net_device *ndev;
+ struct wil_ring *ring = &wil->ring_rx;
+ struct wil_status_ring *sring;
+ struct sk_buff *skb;
+ int i;
+
+ if (unlikely(!ring->va)) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_txrx(wil, "rx_handle\n");
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ sring = &wil->srings[i];
+ if (unlikely(!sring->va)) {
+ wil_err(wil,
+ "Rx IRQ while Rx status ring %d not yet initialized\n",
+ i);
+ continue;
+ }
+
+ while ((*quota > 0) &&
+ (NULL != (skb =
+ wil_sring_reap_rx_edma(wil, sring)))) {
+ (*quota)--;
+ if (wil->use_rx_hw_reordering) {
+ void *msg = wil_skb_rxstatus(skb);
+ int mid = wil_rx_status_get_mid(msg);
+ struct wil6210_vif *vif = wil->vifs[mid];
+
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil,
+ "RX desc invalid mid %d",
+ mid);
+ kfree_skb(skb);
+ continue;
+ }
+ ndev = vif_to_ndev(vif);
+ wil_netif_rx_any(skb, ndev);
+ } else {
+ wil_rx_reorder(wil, skb);
+ }
+ }
+
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+ }
+
+ wil_rx_refill_edma(wil);
+}
+
+static int wil_tx_desc_map_edma(union wil_tx_desc *desc,
+ dma_addr_t pa,
+ u32 len,
+ int ring_index)
+{
+ struct wil_tx_enhanced_desc *d =
+ (struct wil_tx_enhanced_desc *)&desc->enhanced;
+
+ memset(d, 0, sizeof(struct wil_tx_enhanced_desc));
+
+ wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.length = cpu_to_le16((u16)len);
+ d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS);
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
+ * 3 - eth mode
+ */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+static inline void
+wil_get_next_tx_status_msg(struct wil_status_ring *sring,
+ struct wil_ring_tx_status *msg)
+{
+ struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
+ (sring->va + (sring->elem_size * sring->swhead));
+
+ *msg = *_msg;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx descriptor RING.
+ * Return number of descriptors cleared.
+ */
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct net_device *ndev;
+ struct device *dev = wil_to_dev(wil);
+ struct wil_ring *ring = NULL;
+ struct wil_ring_tx_data *txdata;
+ /* Total number of completed descriptors in all descriptor rings */
+ int desc_cnt = 0;
+ int cid;
+ struct wil_net_stats *stats = NULL;
+ struct wil_tx_enhanced_desc *_d;
+ unsigned int ring_id;
+ unsigned int num_descs;
+ int i;
+ u8 dr_bit; /* Descriptor Ready bit */
+ struct wil_ring_tx_status msg;
+ struct wil6210_vif *vif;
+ int used_before_complete;
+ int used_new;
+
+ wil_get_next_tx_status_msg(sring, &msg);
+ dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+
+ /* Process completion messages while DR bit has the expected polarity */
+ while (dr_bit == sring->desc_rdy_pol) {
+ num_descs = msg.num_descriptors;
+ if (!num_descs) {
+ wil_err(wil, "invalid num_descs 0\n");
+ goto again;
+ }
+
+ /* Find the corresponding descriptor ring */
+ ring_id = msg.ring_id;
+
+ if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
+ wil_err(wil, "invalid ring id %d\n", ring_id);
+ goto again;
+ }
+ ring = &wil->ring_tx[ring_id];
+ if (unlikely(!ring->va)) {
+ wil_err(wil, "Tx irq[%d]: ring not initialized\n",
+ ring_id);
+ goto again;
+ }
+ txdata = &wil->ring_tx_data[ring_id];
+ if (unlikely(!txdata->enabled)) {
+ wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id);
+ goto again;
+ }
+ vif = wil->vifs[txdata->mid];
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
+ txdata->mid, ring_id);
+ goto again;
+ }
+
+ ndev = vif_to_ndev(vif);
+
+ cid = wil->ring2cid_tid[ring_id][0];
+ if (cid < WIL6210_MAX_CID)
+ stats = &wil->sta[cid].stats;
+
+ wil_dbg_txrx(wil,
+ "tx_status: completed desc_ring (%d), num_descs (%d)\n",
+ ring_id, num_descs);
+
+ used_before_complete = wil_ring_used_tx(ring);
+
+ for (i = 0 ; i < num_descs; ++i) {
+ struct wil_ctx *ctx = &ring->ctx[ring->swtail];
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ u16 dmalen;
+ struct sk_buff *skb = ctx->skb;
+
+ _d = (struct wil_tx_enhanced_desc *)
+ &ring->va[ring->swtail].tx.enhanced;
+ *d = *_d;
+
+ dmalen = le16_to_cpu(d->dma.length);
+ trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
+ wil_dbg_txrx(wil,
+ "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
+ ring_id, ring->swtail, dmalen,
+ msg.status);
+ wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)&msg, sizeof(msg),
+ false);
+
+ wil_tx_desc_unmap_edma(dev,
+ (union wil_tx_desc *)d,
+ ctx);
+
+ if (skb) {
+ if (likely(msg.status == 0)) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ if (stats) {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+
+ wil_tx_latency_calc(wil, skb,
+ &wil->sta[cid]);
+ }
+ } else {
+ ndev->stats.tx_errors++;
+ if (stats)
+ stats->tx_errors++;
+ }
+ wil_consume_skb(skb, msg.status == 0);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ /* Make sure the ctx is zeroed before updating the tail
+ * to prevent a case where wil_tx_ring will see
+ * this descriptor as used and handle it before ctx zero
+ * is completed.
+ */
+ wmb();
+
+ ring->swtail = wil_ring_next_tail(ring);
+
+ desc_cnt++;
+ }
+
+ /* performance monitoring */
+ used_new = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used_new, used_before_complete)) {
+ wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+ ring_id, used_before_complete, used_new);
+ txdata->last_idle = get_cycles();
+ }
+
+again:
+ wil_sring_advance_swhead(sring);
+
+ wil_get_next_tx_status_msg(sring, &msg);
+ dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
+ }
+
+ /* shall we wake net queues? */
+ if (desc_cnt)
+ wil_update_net_queues(wil, vif, NULL, false);
+
+ /* Update the HW tail ptr (RD ptr) */
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+
+ return desc_cnt;
+}
+
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d,
+ int tso_desc_type, bool is_ipv4,
+ int tcp_hdr_len,
+ int skb_net_hdr_len,
+ int mss)
+{
+ /* Number of descriptors */
+ d->mac.d[2] |= 1;
+ /* Maximum Segment Size */
+ d->mac.tso_mss |= cpu_to_le16(mss >> 2);
+ /* L4 header len: TCP header length */
+ d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK;
+ /* EOP, TSO desc type, Segmentation enable,
+ * Insert IPv4 and TCP / UDP Checksum
+ */
+ d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) |
+ tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS |
+ BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS);
+ /* Calculate pseudo-header */
+ d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS);
+ /* IP Header Length */
+ d->dma.ip_length |= skb_net_hdr_len;
+ /* MAC header length and IP address family*/
+ d->dma.b11 |= ETH_HLEN |
+ is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+}
+
+static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr,
+ int len, uint i, int tso_desc_type,
+ skb_frag_t *frag, struct wil_ring *ring,
+ struct sk_buff *skb, bool is_ipv4,
+ int tcp_hdr_len, int skb_net_hdr_len,
+ int mss, int *descs_used)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *)
+ &ring->va[i].tx.enhanced;
+ struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem;
+ int ring_index = ring - wil->ring_tx;
+ dma_addr_t pa;
+
+ if (len == 0)
+ return 0;
+
+ if (!frag) {
+ pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE);
+ ring->ctx[i].mapped_as = wil_mapped_as_single;
+ } else {
+ pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+ ring->ctx[i].mapped_as = wil_mapped_as_page;
+ }
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ wil_err(wil, "TSO: Skb DMA map error\n");
+ return -EINVAL;
+ }
+
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa,
+ len, ring_index);
+ wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4,
+ tcp_hdr_len,
+ skb_net_hdr_len, mss);
+
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ if (tso_desc_type == wil_tso_type_lst)
+ ring->ctx[i].skb = skb_get(skb);
+
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ *_desc = *d;
+ (*descs_used)++;
+
+ return 0;
+}
+
+static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct wil_ring *ring,
+ struct sk_buff *skb)
+{
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
+ int used, avail = wil_ring_avail_tx(ring);
+ int f, hdrlen, headlen;
+ int gso_type;
+ bool is_ipv4;
+ u32 swhead = ring->swhead;
+ int descs_used = 0; /* total number of used descriptors */
+ int rc = -EINVAL;
+ int tcp_hdr_len;
+ int skb_net_hdr_len;
+ int mss = skb_shinfo(skb)->gso_size;
+
+ wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
+ ring_index);
+
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
+ if (unlikely(avail < min_desc_required)) {
+ wil_err_ratelimited(wil,
+ "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+ ring_index, min_desc_required);
+ return -ENOMEM;
+ }
+
+ gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+ switch (gso_type) {
+ case SKB_GSO_TCPV4:
+ is_ipv4 = true;
+ break;
+ case SKB_GSO_TCPV6:
+ is_ipv4 = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return -EINVAL;
+
+ /* tcp header length and skb network header length are fixed for all
+ * packet's descriptors - read them once here
+ */
+ tcp_hdr_len = tcp_hdrlen(skb);
+ skb_net_hdr_len = skb_network_header_len(skb);
+
+ /* First descriptor must contain the header only
+ * Header Length = MAC header len + IP header len + TCP header len
+ */
+ hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
+ wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
+ hdrlen);
+ rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
+ wil_tso_type_hdr, NULL, ring, skb,
+ is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ return -EINVAL;
+
+ /* Second descriptor contains the head */
+ headlen = skb_headlen(skb) - hdrlen;
+ wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen);
+ rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
+ (swhead + descs_used) % ring->size,
+ (nr_frags != 0) ? wil_tso_type_first :
+ wil_tso_type_lst, NULL, ring, skb,
+ is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ goto mem_error;
+
+ /* Rest of the descriptors are from the SKB fragments */
+ for (f = 0; f < nr_frags; f++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+ int len = frag->size;
+
+ wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
+ len, descs_used);
+
+ rc = wil_tx_tso_gen_desc(wil, NULL, len,
+ (swhead + descs_used) % ring->size,
+ (f != nr_frags - 1) ?
+ wil_tso_type_mid : wil_tso_type_lst,
+ frag, ring, skb, is_ipv4,
+ tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ goto mem_error;
+ }
+
+ /* performance monitoring */
+ used = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used, used + descs_used)) {
+ txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ ring_index, used, used + descs_used);
+ }
+
+ /* advance swhead */
+ wil_ring_advance_head(ring, descs_used);
+ wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
+ wil_w(wil, ring->hwtail, ring->swhead);
+
+ return 0;
+
+mem_error:
+ while (descs_used > 0) {
+ struct device *dev = wil_to_dev(wil);
+ struct wil_ctx *ctx;
+ int i = (swhead + descs_used - 1) % ring->size;
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ struct wil_tx_enhanced_desc *_desc =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[i].tx.enhanced;
+
+ *d = *_desc;
+ ctx = &ring->ctx[i];
+ wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+ memset(ctx, 0, sizeof(*ctx));
+ descs_used--;
+ }
+ return rc;
+}
+
+static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
+ int size)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ int rc;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n",
+ ring_id, wil->tx_sring_idx);
+
+ lockdep_assert_held(&wil->mutex);
+
+ wil_tx_data_init(txdata);
+ ring->size = size;
+ ring->is_rx = false;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */
+ wil->ring2cid_tid[ring_id][1] = 0; /* TID */
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+
+ rc = wil_wmi_bcast_desc_ring_add(vif, ring_id);
+ if (rc)
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->enabled = 0;
+ txdata->dot1x_open = false;
+ spin_unlock_bh(&txdata->lock);
+ wil_ring_free_edma(wil, ring);
+
+out:
+ return rc;
+}
+
+static void wil_tx_fini_edma(struct wil6210_priv *wil)
+{
+ struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+ wil_dbg_misc(wil, "free TX sring\n");
+
+ wil_sring_free(wil, sring);
+}
+
+static void wil_rx_data_free(struct wil_status_ring *sring)
+{
+ if (!sring)
+ return;
+
+ kfree_skb(sring->rx_data.skb);
+ sring->rx_data.skb = NULL;
+}
+
+static void wil_rx_fini_edma(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ int i;
+
+ wil_dbg_misc(wil, "rx_fini_edma\n");
+
+ wil_ring_free_edma(wil, ring);
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ wil_rx_data_free(&wil->srings[i]);
+ wil_sring_free(wil, &wil->srings[i]);
+ }
+
+ wil_free_rx_buff_arr(wil);
+}
+
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
+{
+ wil->txrx_ops.configure_interrupt_moderation =
+ wil_configure_interrupt_moderation_edma;
+ /* TX ops */
+ wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma;
+ wil->txrx_ops.ring_fini_tx = wil_ring_free_edma;
+ wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma;
+ wil->txrx_ops.tx_init = wil_tx_init_edma;
+ wil->txrx_ops.tx_fini = wil_tx_fini_edma;
+ wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
+ wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
+ wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
+ /* RX ops */
+ wil->txrx_ops.rx_init = wil_rx_init_edma;
+ wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
+ wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma;
+ wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma;
+ wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma;
+ wil->txrx_ops.rx_error_check = wil_rx_error_check_edma;
+ wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma;
+ wil->txrx_ops.rx_fini = wil_rx_fini_edma;
+}
+
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
new file mode 100644
index 000000000000..a7fe9292fda3
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_EDMA_H
+#define WIL6210_TXRX_EDMA_H
+
+#include "wil6210.h"
+
+/* limit status ring size in range [ring size..max ring size] */
+#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN)
+#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
+/* RX sring order should be bigger than RX ring order */
+#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11)
+#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12)
+#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536)
+
+#define WIL_DEFAULT_RX_STATUS_RING_ID 0
+#define WIL_RX_DESC_RING_ID 0
+#define WIL_RX_STATUS_IRQ_IDX 0
+#define WIL_TX_STATUS_IRQ_IDX 1
+
+#define WIL_EDMA_AGG_WATERMARK (0xffff)
+#define WIL_EDMA_AGG_WATERMARK_POS (16)
+
+#define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50)
+#define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */
+
+/* Error field */
+#define WIL_RX_EDMA_ERROR_MIC (1)
+#define WIL_RX_EDMA_ERROR_KEY (2) /* Key missing */
+#define WIL_RX_EDMA_ERROR_REPLAY (3)
+#define WIL_RX_EDMA_ERROR_AMSDU (4)
+#define WIL_RX_EDMA_ERROR_FCS (7)
+
+#define WIL_RX_EDMA_ERROR_L3_ERR (BIT(0) | BIT(1))
+#define WIL_RX_EDMA_ERROR_L4_ERR (BIT(0) | BIT(1))
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_BIT BIT(11)
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK 0x7
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK 0xf
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_POS 2
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_POS 4
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5
+
+#define WIL_RX_EDMA_MID_VALID_BIT BIT(22)
+
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
+
+#define WIL_EDMA_DESC_TX_CFG_EOP_POS 0
+#define WIL_EDMA_DESC_TX_CFG_EOP_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS 3
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_LEN 2
+
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS 6
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS 7
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS 15
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_LEN 1
+
+/* Enhanced Rx descriptor - MAC part
+ * [dword 0] : Reserved
+ * [dword 1] : Reserved
+ * [dword 2] : Reserved
+ * [dword 3]
+ * bit 0..15 : Buffer ID
+ * bit 16..31 : Reserved
+ */
+struct wil_ring_rx_enhanced_mac {
+ u32 d[3];
+ __le16 buff_id;
+ u16 reserved;
+} __packed;
+
+/* Enhanced Rx descriptor - DMA part
+ * [dword 0] - Reserved
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ * bit 16..31 : Reserved
+ * [dword 3]
+ * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ * bit 16..31 : length
+ */
+struct wil_ring_rx_enhanced_dma {
+ u32 d0;
+ struct wil_ring_dma_addr addr;
+ u16 w5;
+ __le16 addr_high_high;
+ __le16 length;
+} __packed;
+
+struct wil_rx_enhanced_desc {
+ struct wil_ring_rx_enhanced_mac mac;
+ struct wil_ring_rx_enhanced_dma dma;
+} __packed;
+
+/* Enhanced Tx descriptor - DMA part
+ * [dword 0]
+ * Same as legacy
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
+ * offload feature
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ * bit 16..31 : length
+ */
+struct wil_ring_tx_enhanced_dma {
+ u8 l4_hdr_len;
+ u8 cmd;
+ u16 w1;
+ struct wil_ring_dma_addr addr;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ __le16 addr_high_high;
+ __le16 length;
+} __packed;
+
+/* Enhanced Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrupt_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..30 : reserved1:3
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5..14 : reserved0:10
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..23 : reserved0:2
+ * bit 24 : Dest ID extension:1
+ * bit 25..31 : reserved0:7
+ * [dword 3]
+ * bit 0..15 : tso_mss:16
+ * bit 16..31 : descriptor_scratchpad:16 - mailbox between driver and ucode
+ */
+struct wil_ring_tx_enhanced_mac {
+ u32 d[3];
+ __le16 tso_mss;
+ u16 scratchpad;
+} __packed;
+
+struct wil_tx_enhanced_desc {
+ struct wil_ring_tx_enhanced_mac mac;
+ struct wil_ring_tx_enhanced_dma dma;
+} __packed;
+
+#define TX_STATUS_DESC_READY_POS 7
+
+/* Enhanced TX status message
+ * [dword 0]
+ * bit 0.. 7 : Number of Descriptor:8 - The number of descriptors that
+ * are used to form the packets. It is needed for WB when
+ * releasing the packet
+ * bit 8..15 : tx_ring_id:8 The transmission ring ID that is related to
+ * the message
+ * bit 16..23 : Status:8 - The TX status Code
+ * 0x0 - A successful transmission
+ * 0x1 - Retry expired
+ * 0x2 - Lifetime Expired
+ * 0x3 - Released
+ * 0x4-0xFF - Reserved
+ * bit 24..30 : Reserved:7
+ * bit 31 : Descriptor Ready bit:1 - It is initiated to
+ * zero by the driver when the ring is created. It is set by the HW
+ * to one for each completed status message. Each wrap around,
+ * the DR bit value is flipped.
+ * [dword 1]
+ * bit 0..31 : timestamp:32 - Set when MPDU is transmitted.
+ * [dword 2]
+ * bit 0.. 4 : MCS:5 - The transmitted MCS value
+ * bit 5 : Reserved:1
+ * bit 6.. 7 : CB mode:2 - 0-DMG 1-EDMG 2-Wide
+ * bit 8..12 : QID:5 - The QID that was used for the transmission
+ * bit 13..15 : Reserved:3
+ * bit 16..20 : Num of MSDUs:5 - Number of MSDUs in the aggregation
+ * bit 21..22 : Reserved:2
+ * bit 23 : Retry:1 - An indication that the transmission was retried
+ * bit 24..31 : TX-Sector:8 - the antenna sector that was used for
+ * transmission
+ * [dword 3]
+ * bit 0..11 : Sequence number:12 - The Sequence Number that was used
+ * for the MPDU transmission
+ * bit 12..31 : Reserved:20
+ */
+struct wil_ring_tx_status {
+ u8 num_descriptors;
+ u8 ring_id;
+ u8 status;
+ u8 desc_ready; /* Only the last bit should be set */
+ u32 timestamp;
+ u32 d2;
+ u16 seq_number; /* Only the first 12 bits */
+ u16 w7;
+} __packed;
+
+/* Enhanced Rx status message - compressed part
+ * [dword 0]
+ * bit 0.. 2 : L2 Rx Status:3 - The L2 packet reception Status
+ * 0-Success, 1-MIC Error, 2-Key Error, 3-Replay Error,
+ * 4-A-MSDU Error, 5-Reserved, 6-Reserved, 7-FCS Error
+ * bit 3.. 4 : L3 Rx Status:2 - Bit0 - L3I - L3 identified and checksum
+ * calculated, Bit1- L3Err - IPv4 Checksum Error
+ * bit 5.. 6 : L4 Rx Status:2 - Bit0 - L4I - L4 identified and checksum
+ * calculated, Bit1- L4Err - TCP/UDP Checksum Error
+ * bit 7 : Reserved:1
+ * bit 8..19 : Flow ID:12 - MSDU flow ID
+ * bit 20..21 : MID:2 - The MAC ID
+ * bit 22 : MID_V:1 - The MAC ID field is valid
+ * bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
+ * bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
+ * bit 25 : BC:1 - The received MPDU is broadcast
+ * bit 26 : MC:1 - The received MPDU is multicast
+ * bit 27 : Raw:1 - The MPDU received with no translation
+ * bit 28 : Sec:1 - The FC control (b14) - Frame Protected
+ * bit 29 : Error:1 - An error is set when (L2 status != 0) ||
+ * (L3 status == 3) || (L4 status == 3)
+ * bit 30 : EOP:1 - End of MSDU signaling. It is set to mark the end
+ * of the transfer, otherwise the status indicates buffer
+ * only completion.
+ * bit 31 : Descriptor Ready bit:1 - It is initiated to
+ * zero by the driver when the ring is created. It is set
+ * by the HW to one for each completed status message.
+ * Each wrap around, the DR bit value is flipped.
+ * [dword 1]
+ * bit 0.. 5 : MAC Len:6 - The number of bytes that are used for L2 header
+ * bit 6..11 : IPLEN:6 - The number of DW that are used for L3 header
+ * bit 12..15 : I4Len:4 - The number of DW that are used for L4 header
+ * bit 16..21 : MCS:6 - The received MCS field from the PLCP Header
+ * bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide
+ * bit 24..27 : Data Offset:4 - The data offset, a code that describe the
+ * payload shift from the beginning of the buffer:
+ * 0 - 0 Bytes, 3 - 2 Bytes
+ * bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field
+ * bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field
+ * bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU
+ * bit 31 : Key ID:1 - The extracted Key ID from the encryption header
+ * [dword 2]
+ * bit 0..15 : Buffer ID:16 - The Buffer Identifier
+ * bit 16..31 : Length:16 - It indicates the valid bytes that are stored
+ * in the current descriptor buffer. For multiple buffer
+ * descriptor, SW need to sum the total descriptor length
+ * in all buffers to produce the packet length
+ * [dword 3]
+ * bit 0..31 : timestamp:32 - The MPDU Timestamp.
+ */
+struct wil_rx_status_compressed {
+ u32 d0;
+ u32 d1;
+ __le16 buff_id;
+ __le16 length;
+ u32 timestamp;
+} __packed;
+
+/* Enhanced Rx status message - extension part
+ * [dword 0]
+ * bit 0.. 4 : QID:5 - The Queue Identifier that the packet is received
+ * from
+ * bit 5.. 7 : Reserved:3
+ * bit 8..11 : TID:4 - The QoS (b3-0) TID Field
+ * bit 12..15 Source index:4 - The Source index that was found
+ during Parsing the TA. This field is used to define the
+ source of the packet
+ * bit 16..18 : Destination index:3 - The Destination index that
+ was found during Parsing the RA.
+ * bit 19..20 : DS Type:2 - The FC Control (b9-8) - From / To DS
+ * bit 21..22 : MIC ICR:2 - this signal tells the DMA to assert an
+ interrupt after it writes the packet
+ * bit 23 : ESOP:1 - The QoS (b4) ESOP field
+ * bit 24 : RDG:1
+ * bit 25..31 : Reserved:7
+ * [dword 1]
+ * bit 0.. 1 : Frame Type:2 - The FC Control (b3-2) - MPDU Type
+ (management, data, control and extension)
+ * bit 2.. 5 : Syb type:4 - The FC Control (b7-4) - Frame Subtype
+ * bit 6..11 : Ext sub type:6 - The FC Control (b11-8) - Frame Extended
+ * Subtype
+ * bit 12..13 : ACK Policy:2 - The QoS (b6-5) ACK Policy fields
+ * bit 14 : DECRYPT_BYP:1 - The MPDU is bypass by the decryption unit
+ * bit 15..23 : Reserved:9
+ * bit 24..31 : RSSI/SNR:8 - The RSSI / SNR measurement for the received
+ * MPDU
+ * [dword 2]
+ * bit 0..11 : SN:12 - The received Sequence number field
+ * bit 12..15 : Reserved:4
+ * bit 16..31 : PN bits [15:0]:16
+ * [dword 3]
+ * bit 0..31 : PN bits [47:16]:32
+ */
+struct wil_rx_status_extension {
+ u32 d0;
+ u32 d1;
+ __le16 seq_num; /* only lower 12 bits */
+ u16 pn_15_0;
+ u32 pn_47_16;
+} __packed;
+
+struct wil_rx_status_extended {
+ struct wil_rx_status_compressed comp;
+ struct wil_rx_status_extension ext;
+} __packed;
+
+static inline void *wil_skb_rxstatus(struct sk_buff *skb)
+{
+ return (void *)skb->cb;
+}
+
+static inline __le16 wil_rx_status_get_length(void *msg)
+{
+ return ((struct wil_rx_status_compressed *)msg)->length;
+}
+
+static inline u8 wil_rx_status_get_mcs(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 16, 21);
+}
+
+static inline u16 wil_rx_status_get_flow_id(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 8, 19);
+}
+
+static inline u8 wil_rx_status_get_mcast(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 26, 26);
+}
+
+/**
+ * In case of DLPF miss the parsing of flow Id should be as follows:
+ * dest_id:2
+ * src_id :3 - cid
+ * tid:3
+ * Otherwise:
+ * tid:4
+ * cid:4
+ */
+
+static inline u8 wil_rx_status_get_cid(void *msg)
+{
+ u16 val = wil_rx_status_get_flow_id(msg);
+
+ if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+ /* CID is in bits 2..4 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_CID_POS) &
+ WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+ else
+ /* CID is in bits 4..7 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_HIT_CID_POS) &
+ WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK;
+}
+
+static inline u8 wil_rx_status_get_tid(void *msg)
+{
+ u16 val = wil_rx_status_get_flow_id(msg);
+
+ if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+ /* TID is in bits 5..7 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_TID_POS) &
+ WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+ else
+ /* TID is in bits 0..3 */
+ return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+}
+
+static inline int wil_rx_status_get_desc_rdy_bit(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 31, 31);
+}
+
+static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 30, 30);
+}
+
+static inline __le16 wil_rx_status_get_buff_id(void *msg)
+{
+ return ((struct wil_rx_status_compressed *)msg)->buff_id;
+}
+
+static inline u8 wil_rx_status_get_data_offset(void *msg)
+{
+ u8 val = WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 24, 27);
+
+ switch (val) {
+ case 0: return 0;
+ case 3: return 2;
+ default: return 0xFF;
+ }
+}
+
+static inline int wil_rx_status_get_frame_type(struct wil6210_priv *wil,
+ void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return IEEE80211_FTYPE_DATA;
+
+ return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+ 0, 1) << 2;
+}
+
+static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return 0;
+
+ return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+ 0, 5) << 2;
+}
+
+static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return 0;
+
+ return ((struct wil_rx_status_extended *)msg)->ext.seq_num;
+}
+
+static inline u8 wil_rx_status_get_retry(void *msg)
+{
+ /* retry bit is missing in EDMA HW. return 1 to be on the safe side */
+ return 1;
+}
+
+static inline int wil_rx_status_get_mid(void *msg)
+{
+ if (!(((struct wil_rx_status_compressed *)msg)->d0 &
+ WIL_RX_EDMA_MID_VALID_BIT))
+ return 0; /* use the default MID */
+
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 20, 21);
+}
+
+static inline int wil_rx_status_get_error(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 29, 29);
+}
+
+static inline int wil_rx_status_get_l2_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 0, 2);
+}
+
+static inline int wil_rx_status_get_l3_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 3, 4);
+}
+
+static inline int wil_rx_status_get_l4_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 5, 6);
+}
+
+static inline int wil_rx_status_get_security(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 28, 28);
+}
+
+static inline u8 wil_rx_status_get_key_id(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 31, 31);
+}
+
+static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg)
+{
+ return WIL_GET_BITS(msg->d2, 0, 4);
+}
+
+static inline u32 wil_ring_next_head(struct wil_ring *ring)
+{
+ return (ring->swhead + 1) % ring->size;
+}
+
+static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr,
+ __le16 *addr_high_high,
+ dma_addr_t pa)
+{
+ addr->addr_low = cpu_to_le32(lower_32_bits(pa));
+ addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
+ *addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16));
+}
+
+static inline
+dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma)
+{
+ return le32_to_cpu(dma->addr.addr_low) |
+ ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+ ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+static inline
+dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma)
+{
+ return le32_to_cpu(dma->addr.addr_low) |
+ ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+ ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil);
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+ struct wil_status_ring *sring);
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota);
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil);
+
+#endif /* WIL6210_TXRX_EDMA_H */
+
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index b623510c6f6c..17c294b1ead1 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -24,6 +24,7 @@
#include <net/cfg80211.h>
#include <linux/timex.h>
#include <linux/types.h>
+#include <linux/irqreturn.h>
#include "wmi.h"
#include "wil_platform.h"
#include "fw.h"
@@ -36,6 +37,11 @@ extern bool rx_align_2;
extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
+extern bool ftm_mode;
+
+struct wil6210_priv;
+struct wil6210_vif;
+union wil_tx_desc;
#define WIL_NAME "wil6210"
@@ -45,11 +51,17 @@ extern bool disable_ap_sme;
#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
+#define WIL_FW_NAME_TALYN "wil6436.fw"
+#define WIL_FW_NAME_FTM_TALYN "wil6436_ftm.fw"
+#define WIL_BRD_NAME_TALYN "wil6436.brd"
+
#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
+#define WIL_NUM_LATENCY_BINS 200
+
/* maximum number of virtual interfaces the driver supports
* (including the main interface)
*/
@@ -80,6 +92,10 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
+#define WIL_MAX_AMPDU_SIZE_128 (128 * 1024) /* FW/HW limit */
+#define WIL_MAX_AGG_WSIZE_64 (64) /* FW/HW limit */
+#define WIL6210_MAX_STATUS_RINGS (8)
+
/* Hardware offload block adds the following:
* 26 bytes - 3-address QoS data header
* 8 bytes - IV + EIV (for GCMP)
@@ -203,7 +219,9 @@ struct RGF_ICR {
#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
#define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0)
- #define BIT_NO_FLASH_INDICATION BIT(8)
+ #define BIT_OTP_SIGNATURE_ERR_TALYN_MB BIT(0)
+ #define BIT_OTP_HW_SECTION_DONE_TALYN_MB BIT(2)
+ #define BIT_NO_FLASH_INDICATION BIT(8)
#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec)
#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0)
#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4)
@@ -284,6 +302,8 @@ struct RGF_ICR {
#define BIT_DMA_ITR_RX_IDL_CNT_CTL_FOREVER BIT(2)
#define BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR BIT(3)
#define BIT_DMA_ITR_RX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
+#define RGF_DMA_MISC_CTL (0x881d6c)
+ #define BIT_OFUL34_RDY_VALID_BUG_FIX_EN BIT(7)
#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
@@ -305,20 +325,49 @@ struct RGF_ICR {
#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
+#define RGF_OTP_QC_SECURED (0x8a0038)
+ #define BIT_BOOT_FROM_ROM BIT(31)
+
+/* eDMA */
+#define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8)
+
+#define RGF_INT_CTRL_INT_GEN_CFG_0 (0x8bc000)
+#define RGF_INT_CTRL_INT_GEN_CFG_1 (0x8bc004)
+#define RGF_INT_GEN_TIME_UNIT_LIMIT (0x8bc0c8)
+
+#define RGF_INT_GEN_CTRL (0x8bc0ec)
+ #define BIT_CONTROL_0 BIT(0)
+
+/* eDMA status interrupts */
+#define RGF_INT_GEN_RX_ICR (0x8bc0f4)
+ #define BIT_RX_STATUS_IRQ BIT(WIL_RX_STATUS_IRQ_IDX)
+#define RGF_INT_GEN_TX_ICR (0x8bc110)
+ #define BIT_TX_STATUS_IRQ BIT(WIL_TX_STATUS_IRQ_IDX)
+#define RGF_INT_CTRL_RX_INT_MASK (0x8bc12c)
+#define RGF_INT_CTRL_TX_INT_MASK (0x8bc130)
+
+#define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134)
+
#define USER_EXT_USER_PMU_3 (0x88d00c)
#define BIT_PMU_DEVICE_RDY BIT(0)
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
#define JTAG_DEV_ID_SPARROW (0x2632072f)
#define JTAG_DEV_ID_TALYN (0x7e0e1)
+ #define JTAG_DEV_ID_TALYN_MB (0x1007e0e1)
#define RGF_USER_REVISION_ID (0x88afe4)
#define RGF_USER_REVISION_ID_MASK (3)
#define REVISION_ID_SPARROW_B0 (0x0)
#define REVISION_ID_SPARROW_D0 (0x3)
+#define RGF_OTP_MAC_TALYN_MB (0x8a0304)
#define RGF_OTP_MAC (0x8a0620)
+/* Talyn-MB */
+#define RGF_USER_USER_CPU_0_TALYN_MB (0x8c0138)
+#define RGF_USER_MAC_CPU_0_TALYN_MB (0x8c0154)
+
/* crash codes for FW/Ucode stored here */
/* ASSERT RGFs */
@@ -332,6 +381,7 @@ enum {
HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */
+ HW_VER_TALYN_MB /* JTAG_DEV_ID_TALYN_MB */
};
/* popular locations */
@@ -349,7 +399,14 @@ enum {
/* Hardware definitions end */
#define SPARROW_FW_MAPPING_TABLE_SIZE 10
#define TALYN_FW_MAPPING_TABLE_SIZE 13
-#define MAX_FW_MAPPING_TABLE_SIZE 13
+#define TALYN_MB_FW_MAPPING_TABLE_SIZE 19
+#define MAX_FW_MAPPING_TABLE_SIZE 19
+
+/* Common representation of physical address in wil ring */
+struct wil_ring_dma_addr {
+ __le32 addr_low;
+ __le16 addr_high;
+} __packed;
struct fw_map {
u32 from; /* linker address - from, inclusive */
@@ -357,12 +414,14 @@ struct fw_map {
u32 host; /* PCI/Host address - BAR0 + 0x880000 */
const char *name; /* for debugfs */
bool fw; /* true if FW mapping, false if UCODE mapping */
+ bool crash_dump; /* true if should be dumped during crash dump */
};
/* array size should be in sync with actual definition in the wmi.c */
extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
extern const struct fw_map sparrow_d0_mac_rgf_ext;
extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map talyn_mb_fw_mapping[TALYN_MB_FW_MAPPING_TABLE_SIZE];
extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
/**
@@ -438,7 +497,7 @@ enum { /* for wil_ctx.mapped_as */
};
/**
- * struct wil_ctx - software context for Vring descriptor
+ * struct wil_ctx - software context for ring descriptor
*/
struct wil_ctx {
struct sk_buff *skb;
@@ -446,22 +505,123 @@ struct wil_ctx {
u8 mapped_as;
};
-union vring_desc;
+struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */
+ u32 *va;
+ dma_addr_t pa;
+};
-struct vring {
+/**
+ * A general ring structure, used for RX and TX.
+ * In legacy DMA it represents the vring,
+ * In enahnced DMA it represents the descriptor ring (vrings are handled by FW)
+ */
+struct wil_ring {
dma_addr_t pa;
- volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
- u16 size; /* number of vring_desc elements */
+ volatile union wil_ring_desc *va;
+ u16 size; /* number of wil_ring_desc elements */
u32 swtail;
u32 swhead;
u32 hwtail; /* write here to inform hw */
struct wil_ctx *ctx; /* ctx[size] - software context */
+ struct wil_desc_ring_rx_swtail edma_rx_swtail;
+ bool is_rx;
+};
+
+/**
+ * Additional data for Rx ring.
+ * Used for enhanced DMA RX chaining.
+ */
+struct wil_ring_rx_data {
+ /* the skb being assembled */
+ struct sk_buff *skb;
+ /* true if we are skipping a bad fragmented packet */
+ bool skipping;
+ u16 buff_size;
+};
+
+/**
+ * Status ring structure, used for enhanced DMA completions for RX and TX.
+ */
+struct wil_status_ring {
+ dma_addr_t pa;
+ void *va; /* pointer to ring_[tr]x_status elements */
+ u16 size; /* number of status elements */
+ size_t elem_size; /* status element size in bytes */
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ bool is_rx;
+ u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
+ struct wil_ring_rx_data rx_data;
+};
+
+#define WIL_STA_TID_NUM (16)
+#define WIL_MCS_MAX (12) /* Maximum MCS supported */
+
+struct wil_net_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long tx_errors;
+ u32 tx_latency_min_us;
+ u32 tx_latency_max_us;
+ u64 tx_latency_total_us;
+ unsigned long rx_dropped;
+ unsigned long rx_non_data_frame;
+ unsigned long rx_short_frame;
+ unsigned long rx_large_frame;
+ unsigned long rx_replay;
+ unsigned long rx_mic_error;
+ unsigned long rx_key_error; /* eDMA specific */
+ unsigned long rx_amsdu_error; /* eDMA specific */
+ unsigned long rx_csum_err;
+ u16 last_mcs_rx;
+ u64 rx_per_mcs[WIL_MCS_MAX + 1];
};
/**
- * Additional data for Tx Vring
+ * struct tx_rx_ops - different TX/RX ops for legacy and enhanced
+ * DMA flow
*/
-struct vring_tx_data {
+struct wil_txrx_ops {
+ void (*configure_interrupt_moderation)(struct wil6210_priv *wil);
+ /* TX ops */
+ int (*ring_init_tx)(struct wil6210_vif *vif, int ring_id,
+ int size, int cid, int tid);
+ void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring);
+ int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size);
+ int (*tx_init)(struct wil6210_priv *wil);
+ void (*tx_fini)(struct wil6210_priv *wil);
+ int (*tx_desc_map)(union wil_tx_desc *desc, dma_addr_t pa,
+ u32 len, int ring_index);
+ void (*tx_desc_unmap)(struct device *dev,
+ union wil_tx_desc *desc,
+ struct wil_ctx *ctx);
+ int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb);
+ irqreturn_t (*irq_tx)(int irq, void *cookie);
+ /* RX ops */
+ int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
+ void (*rx_fini)(struct wil6210_priv *wil);
+ int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
+ u8 tid, u8 token, u16 status, bool amsdu,
+ u16 agg_wsize, u16 timeout);
+ void (*get_reorder_params)(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid, int *cid,
+ int *mid, u16 *seq, int *mcast, int *retry);
+ void (*get_netif_rx_params)(struct sk_buff *skb,
+ int *cid, int *security);
+ int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb);
+ int (*rx_error_check)(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_net_stats *stats);
+ bool (*is_rx_idle)(struct wil6210_priv *wil);
+ irqreturn_t (*irq_rx)(int irq, void *cookie);
+};
+
+/**
+ * Additional data for Tx ring
+ */
+struct wil_ring_tx_data {
bool dot1x_open;
int enabled;
cycles_t idle, last_idle, begin;
@@ -503,6 +663,8 @@ struct pci_dev;
* @drop_dup: duplicate frames dropped for this reorder buffer
* @drop_old: old frames dropped for this reorder buffer
* @first_time: true when this buffer used 1-st time
+ * @mcast_last_seq: sequence number (SN) of last received multicast packet
+ * @drop_dup_mcast: duplicate multicast frames dropped for this reorder buffer
*/
struct wil_tid_ampdu_rx {
struct sk_buff **reorder_buf;
@@ -516,6 +678,8 @@ struct wil_tid_ampdu_rx {
unsigned long long drop_dup;
unsigned long long drop_old;
bool first_time; /* is it 1-st time this buffer used? */
+ u16 mcast_last_seq; /* multicast dup detection */
+ unsigned long long drop_dup_mcast;
};
/**
@@ -550,24 +714,6 @@ enum wil_sta_status {
wil_sta_connected = 2,
};
-#define WIL_STA_TID_NUM (16)
-#define WIL_MCS_MAX (12) /* Maximum MCS supported */
-
-struct wil_net_stats {
- unsigned long rx_packets;
- unsigned long tx_packets;
- unsigned long rx_bytes;
- unsigned long tx_bytes;
- unsigned long tx_errors;
- unsigned long rx_dropped;
- unsigned long rx_non_data_frame;
- unsigned long rx_short_frame;
- unsigned long rx_large_frame;
- unsigned long rx_replay;
- u16 last_mcs_rx;
- u64 rx_per_mcs[WIL_MCS_MAX + 1];
-};
-
/**
* struct wil_sta_info - data for peer
*
@@ -581,6 +727,14 @@ struct wil_sta_info {
u8 mid;
enum wil_sta_status status;
struct wil_net_stats stats;
+ /**
+ * 20 latency bins. 1st bin counts packets with latency
+ * of 0..tx_latency_res, last bin counts packets with latency
+ * of 19*tx_latency_res and above.
+ * tx_latency_res is configured from "tx_latency" debug-fs.
+ */
+ u64 *tx_latency_bins;
+ struct wmi_link_stats_basic fw_stats_basic;
/* Rx BACK */
struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
spinlock_t tid_rx_lock; /* guarding tid_rx array */
@@ -681,7 +835,7 @@ struct wil6210_vif {
u8 hidden_ssid; /* relevant in AP mode */
u32 ap_isolate; /* no intra-BSS communication */
bool pbss;
- int bcast_vring;
+ int bcast_ring;
struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
int locally_generated_disc; /* relevant in STA mode */
struct timer_list connect_timer;
@@ -695,6 +849,39 @@ struct wil6210_vif {
struct mutex probe_client_mutex; /* protect @probe_client_pending */
struct work_struct probe_client_worker;
int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
+ bool fw_stats_ready; /* per-cid statistics are ready inside sta_info */
+ u64 fw_stats_tsf; /* measurement timestamp */
+};
+
+/**
+ * RX buffer allocated for enhanced DMA RX descriptors
+ */
+struct wil_rx_buff {
+ struct sk_buff *skb;
+ struct list_head list;
+ int id;
+};
+
+/**
+ * During Rx completion processing, the driver extracts a buffer ID which
+ * is used as an index to the rx_buff_mgmt.buff_arr array and then the SKB
+ * is given to the network stack and the buffer is moved from the 'active'
+ * list to the 'free' list.
+ * During Rx refill, SKBs are attached to free buffers and moved to the
+ * 'active' list.
+ */
+struct wil_rx_buff_mgmt {
+ struct wil_rx_buff *buff_arr;
+ size_t size; /* number of items in buff_arr */
+ struct list_head active;
+ struct list_head free;
+ unsigned long free_list_empty_cnt; /* statistics */
+};
+
+struct wil_fw_stats_global {
+ bool ready;
+ u64 tsf; /* measurement timestamp */
+ struct wmi_link_stats_global stats;
};
struct wil6210_priv {
@@ -702,6 +889,7 @@ struct wil6210_priv {
u32 bar_size;
struct wiphy *wiphy;
struct net_device *main_ndev;
+ int n_msi;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
u8 fw_version[ETHTOOL_FWVERS_LEN];
@@ -761,14 +949,20 @@ struct wil6210_priv {
struct net_device napi_ndev; /* dummy net_device serving all VIFs */
/* DMA related */
- struct vring vring_rx;
+ struct wil_ring ring_rx;
unsigned int rx_buf_len;
- struct vring vring_tx[WIL6210_MAX_TX_RINGS];
- struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
- u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
+ struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS];
+ struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS];
+ struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS];
+ u8 num_rx_status_rings;
+ int tx_sring_idx;
+ u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_sta_info sta[WIL6210_MAX_CID];
- u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */
+ u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once */
u32 dma_addr_size; /* indicates dma addr size */
+ struct wil_rx_buff_mgmt rx_buff_mgmt;
+ bool use_enhanced_dma_hw;
+ struct wil_txrx_ops txrx_ops;
struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
/* statistics */
@@ -781,6 +975,8 @@ struct wil6210_priv {
u8 wakeup_trigger;
struct wil_suspend_stats suspend_stats;
struct wil_debugfs_data dbg_data;
+ bool tx_latency; /* collect TX latency measurements */
+ size_t tx_latency_res; /* bin resolution in usec */
void *platform_handle;
struct wil_platform_ops platform_ops;
@@ -811,6 +1007,21 @@ struct wil6210_priv {
u32 rgf_fw_assert_code_addr;
u32 rgf_ucode_assert_code_addr;
u32 iccm_base;
+
+ /* relevant only for eDMA */
+ bool use_compressed_rx_status;
+ u32 rx_status_ring_order;
+ u32 tx_status_ring_order;
+ u32 rx_buff_id_count;
+ bool amsdu_en;
+ bool use_rx_hw_reordering;
+ bool secured_boot;
+ u8 boot_config;
+
+ struct wil_fw_stats_global fw_stats_global;
+
+ u32 max_agg_wsize;
+ u32 max_ampdu_size;
};
#define wil_to_wiphy(i) (i->wiphy)
@@ -894,6 +1105,8 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
wil_w(wil, reg, wil_r(wil, reg) & ~val);
}
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
+
#if defined(CONFIG_DYNAMIC_DEBUG)
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
@@ -990,7 +1203,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
int key_usage);
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie);
-int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
@@ -1010,13 +1223,14 @@ int wmi_new_sta(struct wil6210_vif *vif, const u8 *mac, u8 aid);
int wmi_port_allocate(struct wil6210_priv *wil, u8 mid,
const u8 *mac, enum nl80211_iftype iftype);
int wmi_port_delete(struct wil6210_priv *wil, u8 mid);
+int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval);
int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid,
u8 cidxtid, u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
void wil6210_clear_irq(struct wil6210_priv *wil);
-int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil_mask_irq(struct wil6210_priv *wil);
void wil_unmask_irq(struct wil6210_priv *wil);
@@ -1083,30 +1297,28 @@ void wil_probe_client_flush(struct wil6210_vif *vif);
void wil_probe_client_worker(struct work_struct *work);
void wil_disconnect_worker(struct work_struct *work);
-int wil_rx_init(struct wil6210_priv *wil, u16 size);
-void wil_rx_fini(struct wil6210_priv *wil);
+void wil_init_txrx_ops(struct wil6210_priv *wil);
/* TX API */
-int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
- int cid, int tid);
-void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
-int wil_tx_init(struct wil6210_vif *vif, int cid);
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid);
int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size);
int wil_bcast_init(struct wil6210_vif *vif);
void wil_bcast_fini(struct wil6210_vif *vif);
void wil_bcast_fini_all(struct wil6210_priv *wil);
void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool should_stop);
+ struct wil_ring *ring, bool should_stop);
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
- struct vring *vring, bool check_stop);
+ struct wil_ring *ring, bool check_stop);
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int wil_tx_complete(struct wil6210_vif *vif, int ringid);
void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
/* RX API */
void wil_rx_handle(struct wil6210_priv *wil, int *quota);
void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
@@ -1127,7 +1339,6 @@ bool wil_is_wmi_idle(struct wil6210_priv *wil);
int wmi_resume(struct wil6210_priv *wil);
int wmi_suspend(struct wil6210_priv *wil);
bool wil_is_tx_idle(struct wil6210_priv *wil);
-bool wil_is_rx_idle(struct wil6210_priv *wil);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
@@ -1141,5 +1352,22 @@ int wmi_start_sched_scan(struct wil6210_priv *wil,
struct cfg80211_sched_scan_request *request);
int wmi_stop_sched_scan(struct wil6210_priv *wil);
int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
+int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
+ u8 channel, u16 duration_ms);
+
+int reverse_memcmp(const void *cs, const void *ct, size_t count);
+
+/* WMI for enhanced DMA */
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id);
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil,
+ u16 max_rx_pl_per_desc);
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id);
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id);
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid);
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id);
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
+ u8 tid, u8 token, u16 status, bool amsdu,
+ u16 agg_wsize, u16 timeout);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1ed330674d9b..dc33a0b4c3fa 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -36,7 +37,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
- if (!map->fw)
+ if (!map->crash_dump)
continue;
if (map->host < host_min)
@@ -85,7 +86,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
- if (!map->fw)
+ if (!map->crash_dump)
continue;
data = (void * __force)wil->csr + HOSTADDR(map->host);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 177026e5323b..bca090611477 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -29,6 +29,7 @@ enum wil_platform_event {
enum wil_platform_features {
WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+ WIL_PLATFORM_FEATURE_TRIPLE_MSI = 1,
WIL_PLATFORM_FEATURE_MAX,
};
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 5d991243cdb5..42c02a20ec97 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -89,28 +89,28 @@ MODULE_PARM_DESC(led_id,
*/
const struct fw_map sparrow_fw_mapping[] = {
/* FW code RAM 256k */
- {0x000000, 0x040000, 0x8c0000, "fw_code", true},
+ {0x000000, 0x040000, 0x8c0000, "fw_code", true, true},
/* FW data RAM 32k */
- {0x800000, 0x808000, 0x900000, "fw_data", true},
+ {0x800000, 0x808000, 0x900000, "fw_data", true, true},
/* periph data 128k */
- {0x840000, 0x860000, 0x908000, "fw_peri", true},
+ {0x840000, 0x860000, 0x908000, "fw_peri", true, true},
/* various RGF 40k */
- {0x880000, 0x88a000, 0x880000, "rgf", true},
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
- {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
- {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 512b */
- {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true},
+ {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true},
/* upper area 548k */
- {0x8c0000, 0x949000, 0x8c0000, "upper", true},
+ {0x8c0000, 0x949000, 0x8c0000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 128k */
- {0x000000, 0x020000, 0x920000, "uc_code", false},
+ {0x000000, 0x020000, 0x920000, "uc_code", false, false},
/* ucode data RAM 16k */
- {0x800000, 0x804000, 0x940000, "uc_data", false},
+ {0x800000, 0x804000, 0x940000, "uc_data", false, false},
};
/**
@@ -118,7 +118,7 @@ const struct fw_map sparrow_fw_mapping[] = {
* it is a bit larger to support extra features
*/
const struct fw_map sparrow_d0_mac_rgf_ext = {
- 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true
+ 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
};
/**
@@ -134,34 +134,89 @@ const struct fw_map sparrow_d0_mac_rgf_ext = {
*/
const struct fw_map talyn_fw_mapping[] = {
/* FW code RAM 1M */
- {0x000000, 0x100000, 0x900000, "fw_code", true},
+ {0x000000, 0x100000, 0x900000, "fw_code", true, true},
/* FW data RAM 128k */
- {0x800000, 0x820000, 0xa00000, "fw_data", true},
+ {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
/* periph. data RAM 96k */
- {0x840000, 0x858000, 0xa20000, "fw_peri", true},
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
/* various RGF 40k */
- {0x880000, 0x88a000, 0x880000, "rgf", true},
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
- {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
- {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 1344b */
- {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true},
+ {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true},
/* ext USER RGF 4k */
- {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true},
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
/* OTP 4k */
- {0x8a0000, 0x8a1000, 0x8a0000, "otp", true},
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
/* DMA EXT RGF 64k */
- {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true},
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
/* upper area 1536k */
- {0x900000, 0xa80000, 0x900000, "upper", true},
+ {0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 256k */
- {0x000000, 0x040000, 0xa38000, "uc_code", false},
+ {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
/* ucode data RAM 32k */
- {0x800000, 0x808000, 0xa78000, "uc_data", false},
+ {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
+};
+
+/**
+ * @talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn MB memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xc80000 4Mb BAR0
+ * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
+ * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
+ */
+const struct fw_map talyn_mb_fw_mapping[] = {
+ /* FW code RAM 768k */
+ {0x000000, 0x0c0000, 0x900000, "fw_code", true, true},
+ /* FW data RAM 128k */
+ {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
+ /* periph. data RAM 96k */
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
+ /* mac_ext_rgf 2256b */
+ {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true},
+ /* ext USER RGF 4k */
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
+ /* SEC PKA 16k */
+ {0x890000, 0x894000, 0x890000, "sec_pka", true, true},
+ /* SEC KDF RGF 3096b */
+ {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true},
+ /* SEC MAIN 2124b */
+ {0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true},
+ /* OTP 4k */
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
+ /* DMA EXT RGF 64k */
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
+ /* DUM USER RGF 528b */
+ {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
+ /* DMA OFU 296b */
+ {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
+ /* ucode debug 4k */
+ {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
+ /* upper area 1536k */
+ {0x900000, 0xa80000, 0x900000, "upper", true, true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+ */
+ /* ucode code RAM 256k */
+ {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
+ /* ucode data RAM 32k */
+ {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
};
struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
@@ -365,14 +420,16 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_DEL_STA_CMD";
case WMI_DISCONNECT_STA_CMDID:
return "WMI_DISCONNECT_STA_CMD";
- case WMI_VRING_BA_EN_CMDID:
- return "WMI_VRING_BA_EN_CMD";
- case WMI_VRING_BA_DIS_CMDID:
- return "WMI_VRING_BA_DIS_CMD";
+ case WMI_RING_BA_EN_CMDID:
+ return "WMI_RING_BA_EN_CMD";
+ case WMI_RING_BA_DIS_CMDID:
+ return "WMI_RING_BA_DIS_CMD";
case WMI_RCP_DELBA_CMDID:
return "WMI_RCP_DELBA_CMD";
case WMI_RCP_ADDBA_RESP_CMDID:
return "WMI_RCP_ADDBA_RESP_CMD";
+ case WMI_RCP_ADDBA_RESP_EDMA_CMDID:
+ return "WMI_RCP_ADDBA_RESP_EDMA_CMD";
case WMI_PS_DEV_PROFILE_CFG_CMDID:
return "WMI_PS_DEV_PROFILE_CFG_CMD";
case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
@@ -395,6 +452,22 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_START_SCHED_SCAN_CMD";
case WMI_STOP_SCHED_SCAN_CMDID:
return "WMI_STOP_SCHED_SCAN_CMD";
+ case WMI_TX_STATUS_RING_ADD_CMDID:
+ return "WMI_TX_STATUS_RING_ADD_CMD";
+ case WMI_RX_STATUS_RING_ADD_CMDID:
+ return "WMI_RX_STATUS_RING_ADD_CMD";
+ case WMI_TX_DESC_RING_ADD_CMDID:
+ return "WMI_TX_DESC_RING_ADD_CMD";
+ case WMI_RX_DESC_RING_ADD_CMDID:
+ return "WMI_RX_DESC_RING_ADD_CMD";
+ case WMI_BCAST_DESC_RING_ADD_CMDID:
+ return "WMI_BCAST_DESC_RING_ADD_CMD";
+ case WMI_CFG_DEF_RX_OFFLOAD_CMDID:
+ return "WMI_CFG_DEF_RX_OFFLOAD_CMD";
+ case WMI_LINK_STATS_CMDID:
+ return "WMI_LINK_STATS_CMD";
+ case WMI_SW_TX_REQ_EXT_CMDID:
+ return "WMI_SW_TX_REQ_EXT_CMDID";
default:
return "Untracked CMD";
}
@@ -449,8 +522,8 @@ static const char *eventid2name(u16 eventid)
return "WMI_RCP_ADDBA_REQ_EVENT";
case WMI_DELBA_EVENTID:
return "WMI_DELBA_EVENT";
- case WMI_VRING_EN_EVENTID:
- return "WMI_VRING_EN_EVENT";
+ case WMI_RING_EN_EVENTID:
+ return "WMI_RING_EN_EVENT";
case WMI_DATA_PORT_OPEN_EVENTID:
return "WMI_DATA_PORT_OPEN_EVENT";
case WMI_AOA_MEAS_EVENTID:
@@ -519,6 +592,20 @@ static const char *eventid2name(u16 eventid)
return "WMI_STOP_SCHED_SCAN_EVENT";
case WMI_SCHED_SCAN_RESULT_EVENTID:
return "WMI_SCHED_SCAN_RESULT_EVENT";
+ case WMI_TX_STATUS_RING_CFG_DONE_EVENTID:
+ return "WMI_TX_STATUS_RING_CFG_DONE_EVENT";
+ case WMI_RX_STATUS_RING_CFG_DONE_EVENTID:
+ return "WMI_RX_STATUS_RING_CFG_DONE_EVENT";
+ case WMI_TX_DESC_RING_CFG_DONE_EVENTID:
+ return "WMI_TX_DESC_RING_CFG_DONE_EVENT";
+ case WMI_RX_DESC_RING_CFG_DONE_EVENTID:
+ return "WMI_RX_DESC_RING_CFG_DONE_EVENT";
+ case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID:
+ return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT";
+ case WMI_LINK_STATS_CONFIG_DONE_EVENTID:
+ return "WMI_LINK_STATS_CONFIG_DONE_EVENT";
+ case WMI_LINK_STATS_EVENTID:
+ return "WMI_LINK_STATS_EVENT";
default:
return "Untracked EVENT";
}
@@ -906,7 +993,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
wil->sta[evt->cid].mid = vif->mid;
wil->sta[evt->cid].status = wil_sta_conn_pending;
- rc = wil_tx_init(vif, evt->cid);
+ rc = wil_ring_init_tx(vif, evt->cid);
if (rc) {
wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
evt->cid, rc);
@@ -1063,16 +1150,16 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
}
}
-static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
+static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- struct wmi_vring_en_event *evt = d;
- u8 vri = evt->vring_index;
+ struct wmi_ring_en_event *evt = d;
+ u8 vri = evt->ring_index;
struct wireless_dev *wdev = vif_to_wdev(vif);
wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
- if (vri >= ARRAY_SIZE(wil->vring_tx)) {
+ if (vri >= ARRAY_SIZE(wil->ring_tx)) {
wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
@@ -1081,8 +1168,8 @@ static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
/* in AP mode with disable_ap_sme, this is done by
* wil_cfg80211_change_station()
*/
- wil->vring_tx_data[vri].dot1x_open = true;
- if (vri == vif->bcast_vring) /* no BA for bcast */
+ wil->ring_tx_data[vri].dot1x_open = true;
+ if (vri == vif->bcast_ring) /* no BA for bcast */
return;
if (agg_wsize >= 0)
wil_addba_tx_request(wil, vri, agg_wsize);
@@ -1093,7 +1180,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_ba_status_event *evt = d;
- struct vring_tx_data *txdata;
+ struct wil_ring_tx_data *txdata;
wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
evt->ringid,
@@ -1112,7 +1199,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
evt->amsdu = 0;
}
- txdata = &wil->vring_tx_data[evt->ringid];
+ txdata = &wil->ring_tx_data[evt->ringid];
txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
txdata->agg_wsize = evt->agg_wsize;
@@ -1150,11 +1237,11 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (!evt->from_initiator) {
int i;
/* find Tx vring it belongs to */
- for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
- if ((wil->vring2cid_tid[i][0] == cid) &&
- (wil->vring2cid_tid[i][1] == tid)) {
- struct vring_tx_data *txdata =
- &wil->vring_tx_data[i];
+ for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+ if (wil->ring2cid_tid[i][0] == cid &&
+ wil->ring2cid_tid[i][1] == tid) {
+ struct wil_ring_tx_data *txdata =
+ &wil->ring_tx_data[i];
wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
txdata->agg_timeout = 0;
@@ -1164,7 +1251,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
break; /* max. 1 matching ring */
}
}
- if (i >= ARRAY_SIZE(wil->vring2cid_tid))
+ if (i >= ARRAY_SIZE(wil->ring2cid_tid))
wil_err(wil, "DELBA: unable to find Tx vring\n");
return;
}
@@ -1250,6 +1337,130 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
cfg80211_sched_scan_results(wiphy, 0);
}
+static void wil_link_stats_store_basic(struct wil6210_vif *vif,
+ struct wmi_link_stats_basic *basic)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ u8 cid = basic->cid;
+ struct wil_sta_info *sta;
+
+ if (cid < 0 || cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "invalid cid %d\n", cid);
+ return;
+ }
+
+ sta = &wil->sta[cid];
+ sta->fw_stats_basic = *basic;
+}
+
+static void wil_link_stats_store_global(struct wil6210_vif *vif,
+ struct wmi_link_stats_global *global)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil->fw_stats_global.stats = *global;
+}
+
+static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
+ bool has_next, void *payload,
+ size_t payload_size)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ size_t hdr_size = sizeof(struct wmi_link_stats_record);
+ size_t stats_size, record_size, expected_size;
+ struct wmi_link_stats_record *hdr;
+
+ if (payload_size < hdr_size) {
+ wil_err(wil, "link stats wrong event size %zu\n", payload_size);
+ return;
+ }
+
+ while (payload_size >= hdr_size) {
+ hdr = payload;
+ stats_size = le16_to_cpu(hdr->record_size);
+ record_size = hdr_size + stats_size;
+
+ if (payload_size < record_size) {
+ wil_err(wil, "link stats payload ended unexpectedly, size %zu < %zu\n",
+ payload_size, record_size);
+ return;
+ }
+
+ switch (hdr->record_type_id) {
+ case WMI_LINK_STATS_TYPE_BASIC:
+ expected_size = sizeof(struct wmi_link_stats_basic);
+ if (stats_size < expected_size) {
+ wil_err(wil, "link stats invalid basic record size %zu < %zu\n",
+ stats_size, expected_size);
+ return;
+ }
+ if (vif->fw_stats_ready) {
+ /* clean old statistics */
+ vif->fw_stats_tsf = 0;
+ vif->fw_stats_ready = 0;
+ }
+
+ wil_link_stats_store_basic(vif, payload + hdr_size);
+
+ if (!has_next) {
+ vif->fw_stats_tsf = tsf;
+ vif->fw_stats_ready = 1;
+ }
+
+ break;
+ case WMI_LINK_STATS_TYPE_GLOBAL:
+ expected_size = sizeof(struct wmi_link_stats_global);
+ if (stats_size < sizeof(struct wmi_link_stats_global)) {
+ wil_err(wil, "link stats invalid global record size %zu < %zu\n",
+ stats_size, expected_size);
+ return;
+ }
+
+ if (wil->fw_stats_global.ready) {
+ /* clean old statistics */
+ wil->fw_stats_global.tsf = 0;
+ wil->fw_stats_global.ready = 0;
+ }
+
+ wil_link_stats_store_global(vif, payload + hdr_size);
+
+ if (!has_next) {
+ wil->fw_stats_global.tsf = tsf;
+ wil->fw_stats_global.ready = 1;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ /* skip to next record */
+ payload += record_size;
+ payload_size -= record_size;
+ }
+}
+
+static void
+wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_event *evt = d;
+ size_t payload_size;
+
+ if (len < offsetof(struct wmi_link_stats_event, payload)) {
+ wil_err(wil, "stats event way too short %d\n", len);
+ return;
+ }
+ payload_size = le16_to_cpu(evt->payload_size);
+ if (len < sizeof(struct wmi_link_stats_event) + payload_size) {
+ wil_err(wil, "stats event too short %d\n", len);
+ return;
+ }
+
+ wmi_link_stats_parse(vif, le64_to_cpu(evt->tsf), evt->has_next,
+ evt->payload, payload_size);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -1277,9 +1488,10 @@ static const struct {
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
- {WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
+ {WMI_RING_EN_EVENTID, wmi_evt_ring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
{WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
+ {WMI_LINK_STATS_EVENTID, wmi_evt_link_stats},
};
/*
@@ -1909,7 +2121,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
return rc;
}
-int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
@@ -2063,29 +2275,32 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
int wmi_addba(struct wil6210_priv *wil, u8 mid,
u8 ringid, u8 size, u16 timeout)
{
- struct wmi_vring_ba_en_cmd cmd = {
- .ringid = ringid,
+ u8 amsdu = wil->use_enhanced_dma_hw && wil->use_rx_hw_reordering &&
+ test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+ wil->amsdu_en;
+ struct wmi_ring_ba_en_cmd cmd = {
+ .ring_id = ringid,
.agg_max_wsize = size,
.ba_timeout = cpu_to_le16(timeout),
- .amsdu = 0,
+ .amsdu = amsdu,
};
- wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size,
- timeout);
+ wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d amsdu %d)\n",
+ ringid, size, timeout, amsdu);
- return wmi_send(wil, WMI_VRING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
+ return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
{
- struct wmi_vring_ba_dis_cmd cmd = {
- .ringid = ringid,
+ struct wmi_ring_ba_dis_cmd cmd = {
+ .ring_id = ringid,
.reason = cpu_to_le16(reason),
};
wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
- return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
+ return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason)
@@ -2146,6 +2361,54 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
return rc;
}
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
+ u8 token, u16 status, bool amsdu, u16 agg_wsize,
+ u16 timeout)
+{
+ int rc;
+ struct wmi_rcp_addba_resp_edma_cmd cmd = {
+ .cid = cid,
+ .tid = tid,
+ .dialog_token = token,
+ .status_code = cpu_to_le16(status),
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (should be 0 for us)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
+ (agg_wsize << 6)),
+ .ba_timeout = cpu_to_le16(timeout),
+ /* route all the connections to status ring 0 */
+ .status_ring_id = WIL_DEFAULT_RX_STATUS_RING_ID,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_rcp_addba_resp_sent_event evt;
+ } __packed reply = {
+ .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
+ };
+
+ wil_dbg_wmi(wil,
+ "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s, sring_id %d\n",
+ cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-",
+ WIL_DEFAULT_RX_STATUS_RING_ID);
+
+ rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_EDMA_CMDID, mid, &cmd,
+ sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status) {
+ wil_err(wil, "ADDBA response failed with status %d\n",
+ le16_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
enum wmi_ps_profile_type ps_profile)
{
@@ -2852,3 +3115,351 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
return rc;
}
+
+int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
+ u8 channel, u16 duration_ms)
+{
+ size_t total;
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct ieee80211_mgmt *mgmt_frame = (void *)buf;
+ struct wmi_sw_tx_req_ext_cmd *cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_sw_tx_complete_event evt;
+ } __packed evt = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ int rc;
+
+ wil_dbg_wmi(wil, "mgmt_tx_ext mid %d channel %d duration %d\n",
+ vif->mid, channel, duration_ms);
+ wil_hex_dump_wmi("mgmt_tx_ext frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+
+ if (len < sizeof(struct ieee80211_hdr_3addr)) {
+ wil_err(wil, "short frame. len %zu\n", len);
+ return -EINVAL;
+ }
+
+ total = sizeof(*cmd) + len;
+ if (total < len) {
+ wil_err(wil, "mgmt_tx_ext invalid len %zu\n", len);
+ return -EINVAL;
+ }
+
+ cmd = kzalloc(total, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
+ cmd->len = cpu_to_le16(len);
+ memcpy(cmd->payload, buf, len);
+ cmd->channel = channel - 1;
+ cmd->duration_ms = cpu_to_le16(duration_ms);
+
+ rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "mgmt_tx_ext failed with status %d\n",
+ evt.evt.status);
+ rc = -EINVAL;
+ }
+
+ kfree(cmd);
+
+ return rc;
+}
+
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id)
+{
+ int rc;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ struct wmi_tx_status_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(sring->size),
+ },
+ .irq_index = WIL_TX_STATUS_IRQ_IDX
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_tx_status_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_id = ring_id;
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+ rc = wmi_call(wil, WMI_TX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_STATUS_RING_CFG_DONE_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, u16 max_rx_pl_per_desc)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ struct wmi_cfg_def_rx_offload_cmd cmd = {
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(WIL_MAX_ETH_MTU)),
+ .max_rx_pl_per_desc = cpu_to_le16(max_rx_pl_per_desc),
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ .l2_802_3_offload_ctrl = 0,
+ .l3_l4_ctrl = 1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_cfg_def_rx_offload_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ rc = wmi_call(wil, WMI_CFG_DEF_RX_OFFLOAD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ int rc;
+ struct wmi_rx_status_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(sring->size),
+ .ring_id = ring_id,
+ },
+ .rx_msg_type = wil->use_compressed_rx_status ?
+ WMI_RX_MSG_TYPE_COMPRESSED :
+ WMI_RX_MSG_TYPE_EXTENDED,
+ .irq_index = WIL_RX_STATUS_IRQ_IDX,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_status_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+ rc = wmi_call(wil, WMI_RX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_RX_STATUS_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+ struct wmi_rx_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = WIL_RX_DESC_RING_ID,
+ },
+ .status_ring_id = status_ring_id,
+ .irq_index = WIL_RX_STATUS_IRQ_IDX,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa);
+ rc = wmi_call(wil, WMI_RX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_RX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int sring_id = wil->tx_sring_idx; /* there is only one TX sring */
+ int rc;
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+ struct wmi_tx_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = ring_id,
+ },
+ .status_ring_id = sring_id,
+ .cid = cid,
+ .tid = tid,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ }
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_tx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ rc = wmi_call(wil, WMI_TX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ return 0;
+}
+
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ int rc;
+ struct wmi_bcast_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = ring_id,
+ },
+ .status_ring_id = wil->tx_sring_idx,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ rc = wmi_call(wil, WMI_BCAST_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_BCAST_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Broadcast Tx config failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ return 0;
+}
+
+int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_cmd cmd = {
+ .record_type_mask = cpu_to_le32(type),
+ .cid = cid,
+ .action = WMI_LINK_STATS_SNAPSHOT,
+ .interval_msec = cpu_to_le32(interval),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_link_stats_config_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ int rc;
+
+ rc = wmi_call(wil, WMI_LINK_STATS_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_LINK_STATS_CONFIG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_LINK_STATS_CMDID failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Link statistics config failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index dc503d903786..139acb2caf92 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -53,6 +53,17 @@
* must always be kept equal to (WMI_RF_RX2TX_LENGTH+1)
*/
#define WMI_RF_RX2TX_CONF_LENGTH (4)
+/* Qos configuration */
+#define WMI_QOS_NUM_OF_PRIORITY (4)
+#define WMI_QOS_MIN_DEFAULT_WEIGHT (10)
+#define WMI_QOS_VRING_SLOT_MIN_MS (2)
+#define WMI_QOS_VRING_SLOT_MAX_MS (10)
+/* (WMI_QOS_MIN_DEFAULT_WEIGHT * WMI_QOS_VRING_SLOT_MAX_MS /
+ * WMI_QOS_VRING_SLOT_MIN_MS)
+ */
+#define WMI_QOS_MAX_WEIGHT 50
+#define WMI_QOS_SET_VIF_PRIORITY (0xFF)
+#define WMI_QOS_DEFAULT_PRIORITY (WMI_QOS_NUM_OF_PRIORITY)
/* Mailbox interface
* used for commands and events
@@ -86,6 +97,12 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_PNO = 15,
WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19,
+ WMI_FW_CAPABILITY_MULTI_VIFS = 20,
+ WMI_FW_CAPABILITY_FT_ROAMING = 21,
+ WMI_FW_CAPABILITY_BACK_WIN_SIZE_64 = 22,
+ WMI_FW_CAPABILITY_AMSDU = 23,
+ WMI_FW_CAPABILITY_RAW_MODE = 24,
+ WMI_FW_CAPABILITY_TX_REQ_EXT = 25,
WMI_FW_CAPABILITY_MAX,
};
@@ -109,6 +126,9 @@ enum wmi_command_id {
WMI_SET_PROBED_SSID_CMDID = 0x0A,
/* deprecated */
WMI_SET_LISTEN_INT_CMDID = 0x0B,
+ WMI_FT_AUTH_CMDID = 0x0C,
+ WMI_FT_REASSOC_CMDID = 0x0D,
+ WMI_UPDATE_FT_IES_CMDID = 0x0E,
WMI_BCON_CTRL_CMDID = 0x0F,
WMI_ADD_CIPHER_KEY_CMDID = 0x16,
WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
@@ -117,6 +137,12 @@ enum wmi_command_id {
WMI_SET_WSC_STATUS_CMDID = 0x41,
WMI_PXMT_RANGE_CFG_CMDID = 0x42,
WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
+ WMI_RADAR_GENERAL_CONFIG_CMDID = 0x100,
+ WMI_RADAR_CONFIG_SELECT_CMDID = 0x101,
+ WMI_RADAR_PARAMS_CONFIG_CMDID = 0x102,
+ WMI_RADAR_SET_MODE_CMDID = 0x103,
+ WMI_RADAR_CONTROL_CMDID = 0x104,
+ WMI_RADAR_PCI_CONTROL_CMDID = 0x105,
WMI_MEM_READ_CMDID = 0x800,
WMI_MEM_WR_CMDID = 0x801,
WMI_ECHO_CMDID = 0x803,
@@ -148,8 +174,8 @@ enum wmi_command_id {
WMI_CFG_RX_CHAIN_CMDID = 0x820,
WMI_VRING_CFG_CMDID = 0x821,
WMI_BCAST_VRING_CFG_CMDID = 0x822,
- WMI_VRING_BA_EN_CMDID = 0x823,
- WMI_VRING_BA_DIS_CMDID = 0x824,
+ WMI_RING_BA_EN_CMDID = 0x823,
+ WMI_RING_BA_DIS_CMDID = 0x824,
WMI_RCP_ADDBA_RESP_CMDID = 0x825,
WMI_RCP_DELBA_CMDID = 0x826,
WMI_SET_SSID_CMDID = 0x827,
@@ -157,12 +183,17 @@ enum wmi_command_id {
WMI_SET_PCP_CHANNEL_CMDID = 0x829,
WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
WMI_SW_TX_REQ_CMDID = 0x82B,
+ /* Event is shared between WMI_SW_TX_REQ_CMDID and
+ * WMI_SW_TX_REQ_EXT_CMDID
+ */
+ WMI_SW_TX_REQ_EXT_CMDID = 0x82C,
WMI_MLME_PUSH_CMDID = 0x835,
WMI_BEAMFORMING_MGMT_CMDID = 0x836,
WMI_BF_TXSS_MGMT_CMDID = 0x837,
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
@@ -205,7 +236,12 @@ enum wmi_command_id {
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
/* Power Save Configuration Commands */
WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
+ WMI_RS_ENABLE_CMDID = 0x91E,
+ WMI_RS_CFG_EX_CMDID = 0x91F,
+ WMI_GET_DETAILED_RS_RES_EX_CMDID = 0x920,
+ /* deprecated */
WMI_RS_CFG_CMDID = 0x921,
+ /* deprecated */
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
WMI_AOA_MEAS_CMDID = 0x923,
WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924,
@@ -234,7 +270,15 @@ enum wmi_command_id {
WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5,
WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7,
+ /* deprecated */
WMI_BF_CONTROL_CMDID = 0x9AA,
+ WMI_BF_CONTROL_EX_CMDID = 0x9AB,
+ WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0,
+ WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1,
+ WMI_TX_DESC_RING_ADD_CMDID = 0x9C2,
+ WMI_RX_DESC_RING_ADD_CMDID = 0x9C3,
+ WMI_BCAST_DESC_RING_ADD_CMDID = 0x9C4,
+ WMI_CFG_DEF_RX_OFFLOAD_CMDID = 0x9C5,
WMI_SCHEDULING_SCHEME_CMDID = 0xA01,
WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02,
WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03,
@@ -244,6 +288,11 @@ enum wmi_command_id {
WMI_GET_CCA_INDICATIONS_CMDID = 0xA07,
WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID = 0xA08,
WMI_INTERNAL_FW_IOCTL_CMDID = 0xA0B,
+ WMI_LINK_STATS_CMDID = 0xA0C,
+ WMI_SET_GRANT_MCS_CMDID = 0xA0E,
+ WMI_SET_AP_SLOT_SIZE_CMDID = 0xA0F,
+ WMI_SET_VRING_PRIORITY_WEIGHT_CMDID = 0xA10,
+ WMI_SET_VRING_PRIORITY_CMDID = 0xA11,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@@ -442,6 +491,30 @@ struct wmi_start_sched_scan_cmd {
struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
} __packed;
+/* WMI_FT_AUTH_CMDID */
+struct wmi_ft_auth_cmd {
+ u8 bssid[WMI_MAC_LEN];
+ /* enum wmi_channel */
+ u8 channel;
+ /* enum wmi_channel */
+ u8 edmg_channel;
+ u8 reserved[4];
+} __packed;
+
+/* WMI_FT_REASSOC_CMDID */
+struct wmi_ft_reassoc_cmd {
+ u8 bssid[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+/* WMI_UPDATE_FT_IES_CMDID */
+struct wmi_update_ft_ies_cmd {
+ /* Length of the FT IEs */
+ __le16 ie_len;
+ u8 reserved[2];
+ u8 ie_info[0];
+} __packed;
+
/* WMI_SET_PROBED_SSID_CMDID */
#define MAX_PROBED_SSID_INDEX (3)
@@ -498,6 +571,109 @@ struct wmi_pxmt_snr2_range_cfg_cmd {
s8 snr2range_arr[2];
} __packed;
+/* WMI_RADAR_GENERAL_CONFIG_CMDID */
+struct wmi_radar_general_config_cmd {
+ /* Number of pulses (CIRs) in FW FIFO to initiate pulses transfer
+ * from FW to Host
+ */
+ __le32 fifo_watermark;
+ /* In unit of us, in the range [100, 1000000] */
+ __le32 t_burst;
+ /* Valid in the range [1, 32768], 0xFFFF means infinite */
+ __le32 n_bursts;
+ /* In unit of 330Mhz clk, in the range [4, 2000]*330 */
+ __le32 t_pulse;
+ /* In the range of [1,4096] */
+ __le16 n_pulses;
+ /* Number of taps after cTap per CIR */
+ __le16 n_samples;
+ /* Offset from the main tap (0 = zero-distance). In the range of [0,
+ * 255]
+ */
+ u8 first_sample_offset;
+ /* Number of Pulses to average, 1, 2, 4, 8 */
+ u8 pulses_to_avg;
+ /* Number of adjacent taps to average, 1, 2, 4, 8 */
+ u8 samples_to_avg;
+ /* The index to config general params */
+ u8 general_index;
+ u8 reserved[4];
+} __packed;
+
+/* WMI_RADAR_CONFIG_SELECT_CMDID */
+struct wmi_radar_config_select_cmd {
+ /* Select the general params index to use */
+ u8 general_index;
+ u8 reserved[3];
+ /* 0 means don't update burst_active_vector */
+ __le32 burst_active_vector;
+ /* 0 means don't update pulse_active_vector */
+ __le32 pulse_active_vector;
+} __packed;
+
+/* WMI_RADAR_PARAMS_CONFIG_CMDID */
+struct wmi_radar_params_config_cmd {
+ /* The burst index selected to config */
+ u8 burst_index;
+ /* 0-not active, 1-active */
+ u8 burst_en;
+ /* The pulse index selected to config */
+ u8 pulse_index;
+ /* 0-not active, 1-active */
+ u8 pulse_en;
+ /* TX RF to use on current pulse */
+ u8 tx_rfc_idx;
+ u8 tx_sector;
+ /* Offset from calibrated value.(expected to be 0)(value is row in
+ * Gain-LUT, not dB)
+ */
+ s8 tx_rf_gain_comp;
+ /* expected to be 0 */
+ s8 tx_bb_gain_comp;
+ /* RX RF to use on current pulse */
+ u8 rx_rfc_idx;
+ u8 rx_sector;
+ /* Offset from calibrated value.(expected to be 0)(value is row in
+ * Gain-LUT, not dB)
+ */
+ s8 rx_rf_gain_comp;
+ /* Value in dB.(expected to be 0) */
+ s8 rx_bb_gain_comp;
+ /* Offset from calibrated value.(expected to be 0) */
+ s8 rx_timing_offset;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_SET_MODE_CMDID */
+struct wmi_radar_set_mode_cmd {
+ /* 0-disable/1-enable */
+ u8 enable;
+ /* enum wmi_channel */
+ u8 channel;
+ /* In the range of [0,7], 0xff means use default */
+ u8 tx_rfc_idx;
+ /* In the range of [0,7], 0xff means use default */
+ u8 rx_rfc_idx;
+} __packed;
+
+/* WMI_RADAR_CONTROL_CMDID */
+struct wmi_radar_control_cmd {
+ /* 0-stop/1-start */
+ u8 start;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_PCI_CONTROL_CMDID */
+struct wmi_radar_pci_control_cmd {
+ /* pcie host buffer start address */
+ __le64 base_addr;
+ /* pcie host control block address */
+ __le64 control_block_addr;
+ /* pcie host buffer size */
+ __le32 buffer_size;
+ __le32 reserved;
+} __packed;
+
/* WMI_RF_MGMT_CMDID */
enum wmi_rf_mgmt_type {
WMI_RF_MGMT_W_DISABLE = 0x00,
@@ -635,12 +811,18 @@ struct wmi_pcp_start_cmd {
u8 pcp_max_assoc_sta;
u8 hidden_ssid;
u8 is_go;
- u8 reserved0[5];
+ /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */
+ u8 edmg_channel;
+ u8 raw_mode;
+ u8 reserved[3];
/* A-BFT length override if non-0 */
u8 abft_len;
/* enum wmi_ap_sme_offload_mode_e */
u8 ap_sme_offload_mode;
u8 network_type;
+ /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is
+ * the primary channel number
+ */
u8 channel;
u8 disable_sec_offload;
u8 disable_sec;
@@ -653,6 +835,17 @@ struct wmi_sw_tx_req_cmd {
u8 payload[0];
} __packed;
+/* WMI_SW_TX_REQ_EXT_CMDID */
+struct wmi_sw_tx_req_ext_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ __le16 duration_ms;
+ /* Channel to use, 0xFF for currently active channel */
+ u8 channel;
+ u8 reserved[5];
+ u8 payload[0];
+} __packed;
+
/* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */
struct wmi_vring_switch_timing_config_cmd {
/* Set vring timing configuration:
@@ -679,6 +872,7 @@ struct wmi_vring_cfg_schd {
enum wmi_vring_cfg_encap_trans_type {
WMI_VRING_ENC_TYPE_802_3 = 0x00,
WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01,
+ WMI_VRING_ENC_TYPE_NONE = 0x02,
};
enum wmi_vring_cfg_ds_cfg {
@@ -736,7 +930,11 @@ struct wmi_vring_cfg {
u8 cid;
/* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
u8 tid;
- u8 reserved[2];
+ /* Update the vring's priority for Qos purpose. Set to
+ * WMI_QOS_DEFAULT_PRIORITY to use MID's QoS priority
+ */
+ u8 qos_priority;
+ u8 reserved;
} __packed;
enum wmi_vring_cfg_cmd_action {
@@ -767,6 +965,78 @@ struct wmi_bcast_vring_cfg_cmd {
struct wmi_bcast_vring_cfg vring_cfg;
} __packed;
+struct wmi_edma_ring_cfg {
+ __le64 ring_mem_base;
+ /* size in number of items */
+ __le16 ring_size;
+ u8 ring_id;
+ u8 reserved;
+} __packed;
+
+enum wmi_rx_msg_type {
+ WMI_RX_MSG_TYPE_COMPRESSED = 0x00,
+ WMI_RX_MSG_TYPE_EXTENDED = 0x01,
+};
+
+struct wmi_tx_status_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_rx_status_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ /* wmi_rx_msg_type */
+ u8 rx_msg_type;
+ u8 reserved[2];
+} __packed;
+
+struct wmi_cfg_def_rx_offload_cmd {
+ __le16 max_msdu_size;
+ __le16 max_rx_pl_per_desc;
+ u8 decap_trans_type;
+ u8 l2_802_3_offload_ctrl;
+ u8 l2_nwifi_offload_ctrl;
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+ u8 l3_l4_ctrl;
+ u8 reserved[6];
+} __packed;
+
+struct wmi_tx_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ __le16 max_msdu_size;
+ /* Correlated status ring (0-63) */
+ u8 status_ring_id;
+ u8 cid;
+ u8 tid;
+ u8 encap_trans_type;
+ u8 mac_ctrl;
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ u8 reserved[3];
+ struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+struct wmi_rx_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ /* 0-63 status rings */
+ u8 status_ring_id;
+ u8 reserved[2];
+ __le64 sw_tail_host_addr;
+} __packed;
+
+struct wmi_bcast_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ __le16 max_msdu_size;
+ /* Correlated status ring (0-63) */
+ u8 status_ring_id;
+ u8 encap_trans_type;
+ u8 reserved[4];
+} __packed;
+
/* WMI_LO_POWER_CALIB_FROM_OTP_CMDID */
struct wmi_lo_power_calib_from_otp_cmd {
/* index to read from OTP. zero based */
@@ -781,18 +1051,18 @@ struct wmi_lo_power_calib_from_otp_event {
u8 reserved[3];
} __packed;
-/* WMI_VRING_BA_EN_CMDID */
-struct wmi_vring_ba_en_cmd {
- u8 ringid;
+/* WMI_RING_BA_EN_CMDID */
+struct wmi_ring_ba_en_cmd {
+ u8 ring_id;
u8 agg_max_wsize;
__le16 ba_timeout;
u8 amsdu;
u8 reserved[3];
} __packed;
-/* WMI_VRING_BA_DIS_CMDID */
-struct wmi_vring_ba_dis_cmd {
- u8 ringid;
+/* WMI_RING_BA_DIS_CMDID */
+struct wmi_ring_ba_dis_cmd {
+ u8 ring_id;
u8 reserved;
__le16 reason;
} __packed;
@@ -950,6 +1220,21 @@ struct wmi_rcp_addba_resp_cmd {
u8 reserved[2];
} __packed;
+/* WMI_RCP_ADDBA_RESP_EDMA_CMDID */
+struct wmi_rcp_addba_resp_edma_cmd {
+ u8 cid;
+ u8 tid;
+ u8 dialog_token;
+ u8 reserved;
+ __le16 status_code;
+ /* ieee80211_ba_parameterset field to send */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ u8 status_ring_id;
+ /* wmi_cfg_rx_chain_cmd_reorder_type */
+ u8 reorder_type;
+} __packed;
+
/* WMI_RCP_DELBA_CMDID */
struct wmi_rcp_delba_cmd {
/* Used for cid less than 8. For higher cid set
@@ -999,8 +1284,8 @@ struct wmi_echo_cmd {
} __packed;
/* WMI_DEEP_ECHO_CMDID
- * Check FW and ucode are alive
- * Returned event: WMI_ECHO_RSP_EVENTID
+ * Check FW and uCode is alive
+ * Returned event: WMI_DEEP_ECHO_RSP_EVENTID
*/
struct wmi_deep_echo_cmd {
__le32 value;
@@ -1324,6 +1609,10 @@ struct wmi_fixed_scheduling_config_complete_event {
u8 reserved[3];
} __packed;
+/* This value exists for backwards compatibility only.
+ * Do not use it in new commands.
+ * Use dynamic arrays where possible.
+ */
#define WMI_NUM_MCS (13)
/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */
@@ -1371,6 +1660,52 @@ struct wmi_set_multi_directed_omnis_config_event {
u8 reserved[3];
} __packed;
+/* WMI_RADAR_GENERAL_CONFIG_EVENTID */
+struct wmi_radar_general_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_CONFIG_SELECT_EVENTID */
+struct wmi_radar_config_select_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+ /* In unit of bytes */
+ __le32 fifo_size;
+ /* In unit of bytes */
+ __le32 pulse_size;
+} __packed;
+
+/* WMI_RADAR_PARAMS_CONFIG_EVENTID */
+struct wmi_radar_params_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_SET_MODE_EVENTID */
+struct wmi_radar_set_mode_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_CONTROL_EVENTID */
+struct wmi_radar_control_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_PCI_CONTROL_EVENTID */
+struct wmi_radar_pci_control_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* WMI_SET_LONG_RANGE_CONFIG_CMDID */
struct wmi_set_long_range_config_cmd {
__le32 reserved;
@@ -1383,12 +1718,12 @@ struct wmi_set_long_range_config_complete_event {
u8 reserved[3];
} __packed;
-/* payload max size is 236 bytes: max event buffer size (256) - WMI headers
+/* payload max size is 1024 bytes: max event buffer size (1044) - WMI headers
* (16) - prev struct field size (4)
*/
-#define WMI_MAX_IOCTL_PAYLOAD_SIZE (236)
-#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (236)
-#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (236)
+#define WMI_MAX_IOCTL_PAYLOAD_SIZE (1024)
+#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (1024)
+#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (1024)
enum wmi_internal_fw_ioctl_code {
WMI_INTERNAL_FW_CODE_NONE = 0x0,
@@ -1428,7 +1763,37 @@ struct wmi_internal_fw_event_event {
__le32 payload[0];
} __packed;
-/* WMI_BF_CONTROL_CMDID */
+/* WMI_SET_VRING_PRIORITY_WEIGHT_CMDID */
+struct wmi_set_vring_priority_weight_cmd {
+ /* Array of weights. Valid values are
+ * WMI_QOS_MIN_DEFAULT_WEIGHT...WMI_QOS_MAX_WEIGHT. Weight #0 is
+ * hard-coded WMI_QOS_MIN_WEIGHT. This array provide the weights
+ * #1..#3
+ */
+ u8 weight[3];
+ u8 reserved;
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_CMDID */
+struct wmi_vring_priority {
+ u8 vring_idx;
+ /* Weight index. Valid value is 0-3 */
+ u8 priority;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_CMDID */
+struct wmi_set_vring_priority_cmd {
+ /* number of entries in vring_priority. Set to
+ * WMI_QOS_SET_VIF_PRIORITY to update the VIF's priority, and there
+ * will be only one entry in vring_priority
+ */
+ u8 num_of_vrings;
+ u8 reserved[3];
+ struct wmi_vring_priority vring_priority[0];
+} __packed;
+
+/* WMI_BF_CONTROL_CMDID - deprecated */
struct wmi_bf_control_cmd {
/* wmi_bf_triggers */
__le32 triggers;
@@ -1470,6 +1835,95 @@ struct wmi_bf_control_cmd {
u8 reserved2[2];
} __packed;
+/* BF configuration for each MCS */
+struct wmi_bf_control_ex_mcs {
+ /* Long term throughput threshold [Mbps] */
+ u8 long_term_mbps_th_tbl;
+ u8 reserved;
+ /* Long term timeout threshold table [msec] */
+ __le16 long_term_trig_timeout_per_mcs;
+} __packed;
+
+/* WMI_BF_CONTROL_EX_CMDID */
+struct wmi_bf_control_ex_cmd {
+ /* wmi_bf_triggers */
+ __le32 triggers;
+ /* enum wmi_edmg_tx_mode */
+ u8 tx_mode;
+ /* DISABLED = 0, ENABLED = 1 , DRY_RUN = 2 */
+ u8 txss_mode;
+ /* DISABLED = 0, ENABLED = 1, DRY_RUN = 2 */
+ u8 brp_mode;
+ /* Max cts threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_thr;
+ /* Max cts threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_dense_thr;
+ /* Max b-ack threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_thr;
+ /* Max b-ack threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_dense_thr;
+ u8 reserved0;
+ /* Wrong sectors threshold */
+ __le32 wrong_sector_bis_thr;
+ /* BOOL to enable/disable long term trigger */
+ u8 long_term_enable;
+ /* 1 = Update long term thresholds from the long_term_mbps_th_tbl and
+ * long_term_trig_timeout_per_mcs arrays, 0 = Ignore
+ */
+ u8 long_term_update_thr;
+ u8 each_mcs_cfg_size;
+ u8 reserved1;
+ /* Configuration for each MCS */
+ struct wmi_bf_control_ex_mcs each_mcs_cfg[0];
+} __packed;
+
+/* WMI_LINK_STATS_CMD */
+enum wmi_link_stats_action {
+ WMI_LINK_STATS_SNAPSHOT = 0x00,
+ WMI_LINK_STATS_PERIODIC = 0x01,
+ WMI_LINK_STATS_STOP_PERIODIC = 0x02,
+};
+
+/* WMI_LINK_STATS_EVENT record identifiers */
+enum wmi_link_stats_record_type {
+ WMI_LINK_STATS_TYPE_BASIC = 0x01,
+ WMI_LINK_STATS_TYPE_GLOBAL = 0x02,
+};
+
+/* WMI_LINK_STATS_CMDID */
+struct wmi_link_stats_cmd {
+ /* bitmask of required record types
+ * (wmi_link_stats_record_type_e)
+ */
+ __le32 record_type_mask;
+ /* 0xff for all cids */
+ u8 cid;
+ /* wmi_link_stats_action_e */
+ u8 action;
+ u8 reserved[6];
+ /* range = 100 - 10000 */
+ __le32 interval_msec;
+} __packed;
+
+/* WMI_SET_GRANT_MCS_CMDID */
+struct wmi_set_grant_mcs_cmd {
+ u8 mcs;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_AP_SLOT_SIZE_CMDID */
+struct wmi_set_ap_slot_size_cmd {
+ __le32 slot_size;
+} __packed;
+
/* WMI Events
* List of Events (target to host)
*/
@@ -1482,10 +1936,19 @@ enum wmi_event_id {
WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007,
WMI_SCAN_COMPLETE_EVENTID = 0x100A,
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
+ WMI_FT_AUTH_STATUS_EVENTID = 0x100C,
+ WMI_FT_REASSOC_STATUS_EVENTID = 0x100D,
+ WMI_RADAR_GENERAL_CONFIG_EVENTID = 0x1100,
+ WMI_RADAR_CONFIG_SELECT_EVENTID = 0x1101,
+ WMI_RADAR_PARAMS_CONFIG_EVENTID = 0x1102,
+ WMI_RADAR_SET_MODE_EVENTID = 0x1103,
+ WMI_RADAR_CONTROL_EVENTID = 0x1104,
+ WMI_RADAR_PCI_CONTROL_EVENTID = 0x1105,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
WMI_FW_READY_EVENTID = 0x1801,
WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_DEEP_ECHO_RSP_EVENTID = 0x1804,
/* deprecated */
WMI_FS_TUNE_DONE_EVENTID = 0x180A,
/* deprecated */
@@ -1511,6 +1974,9 @@ enum wmi_event_id {
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
+ /* Event is shared between WMI_SW_TX_REQ_CMDID and
+ * WMI_SW_TX_REQ_EXT_CMDID
+ */
WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
@@ -1535,7 +2001,7 @@ enum wmi_event_id {
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
- WMI_VRING_EN_EVENTID = 0x1865,
+ WMI_RING_EN_EVENTID = 0x1865,
WMI_GET_RF_STATUS_EVENTID = 0x1866,
WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868,
@@ -1558,7 +2024,12 @@ enum wmi_event_id {
WMI_PCP_FACTOR_EVENTID = 0x191A,
/* Power Save Configuration Events */
WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C,
+ WMI_RS_ENABLE_EVENTID = 0x191E,
+ WMI_RS_CFG_EX_EVENTID = 0x191F,
+ WMI_GET_DETAILED_RS_RES_EX_EVENTID = 0x1920,
+ /* deprecated */
WMI_RS_CFG_DONE_EVENTID = 0x1921,
+ /* deprecated */
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
WMI_AOA_MEAS_EVENTID = 0x1923,
WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924,
@@ -1586,7 +2057,14 @@ enum wmi_event_id {
WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5,
WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7,
+ /* deprecated */
WMI_BF_CONTROL_EVENTID = 0x19AA,
+ WMI_BF_CONTROL_EX_EVENTID = 0x19AB,
+ WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0,
+ WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1,
+ WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2,
+ WMI_RX_DESC_RING_CFG_DONE_EVENTID = 0x19C3,
+ WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID = 0x19C5,
WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01,
WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02,
WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03,
@@ -1597,6 +2075,12 @@ enum wmi_event_id {
WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID = 0x1A08,
WMI_INTERNAL_FW_EVENT_EVENTID = 0x1A0A,
WMI_INTERNAL_FW_IOCTL_EVENTID = 0x1A0B,
+ WMI_LINK_STATS_CONFIG_DONE_EVENTID = 0x1A0C,
+ WMI_LINK_STATS_EVENTID = 0x1A0D,
+ WMI_SET_GRANT_MCS_EVENTID = 0x1A0E,
+ WMI_SET_AP_SLOT_SIZE_EVENTID = 0x1A0F,
+ WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID = 0x1A10,
+ WMI_SET_VRING_PRIORITY_EVENTID = 0x1A11,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -1861,6 +2345,33 @@ struct wmi_scan_complete_event {
__le32 status;
} __packed;
+/* WMI_FT_AUTH_STATUS_EVENTID */
+struct wmi_ft_auth_status_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+ u8 mac_addr[WMI_MAC_LEN];
+ __le16 ie_len;
+ u8 ie_info[0];
+} __packed;
+
+/* WMI_FT_REASSOC_STATUS_EVENTID */
+struct wmi_ft_reassoc_status_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ /* association id received from new AP */
+ u8 aid;
+ /* enum wmi_channel */
+ u8 channel;
+ /* enum wmi_channel */
+ u8 edmg_channel;
+ u8 mac_addr[WMI_MAC_LEN];
+ __le16 beacon_ie_len;
+ __le16 reassoc_req_ie_len;
+ __le16 reassoc_resp_ie_len;
+ u8 ie_info[0];
+} __packed;
+
/* wmi_rx_mgmt_info */
struct wmi_rx_mgmt_info {
u8 mcs;
@@ -1997,6 +2508,49 @@ struct wmi_rcp_addba_resp_sent_event {
u8 reserved2[2];
} __packed;
+/* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_tx_status_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_rx_status_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID */
+struct wmi_cfg_def_rx_offload_done_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_tx_desc_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_rx_desc_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
/* WMI_RCP_ADDBA_REQ_EVENTID */
struct wmi_rcp_addba_req_event {
/* Used for cid less than 8. For higher cid set
@@ -2047,9 +2601,9 @@ struct wmi_data_port_open_event {
u8 reserved[3];
} __packed;
-/* WMI_VRING_EN_EVENTID */
-struct wmi_vring_en_event {
- u8 vring_index;
+/* WMI_RING_EN_EVENTID */
+struct wmi_ring_en_event {
+ u8 ring_index;
u8 reserved[3];
} __packed;
@@ -2174,6 +2728,11 @@ struct wmi_echo_rsp_event {
__le32 echoed_value;
} __packed;
+/* WMI_DEEP_ECHO_RSP_EVENTID */
+struct wmi_deep_echo_rsp_event {
+ __le32 echoed_value;
+} __packed;
+
/* WMI_RF_PWR_ON_DELAY_RSP_EVENTID */
struct wmi_rf_pwr_on_delay_rsp_event {
/* wmi_fw_status */
@@ -2312,6 +2871,81 @@ struct wmi_rs_cfg {
__le32 mcs_en_vec;
} __packed;
+enum wmi_edmg_tx_mode {
+ WMI_TX_MODE_DMG = 0x0,
+ WMI_TX_MODE_EDMG_CB1 = 0x1,
+ WMI_TX_MODE_EDMG_CB2 = 0x2,
+ WMI_TX_MODE_EDMG_CB1_LONG_LDPC = 0x3,
+ WMI_TX_MODE_EDMG_CB2_LONG_LDPC = 0x4,
+ WMI_TX_MODE_MAX,
+};
+
+/* Rate search parameters common configuration */
+struct wmi_rs_cfg_ex_common {
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ /* stop threshold [0-100] */
+ u8 stop_th;
+ /* MCS1 stop threshold [0-100] */
+ u8 mcs1_fail_th;
+ u8 max_back_failure_th;
+ /* Debug feature for disabling internal RS trigger (which is
+ * currently triggered by BF Done)
+ */
+ u8 dbg_disable_internal_trigger;
+ u8 reserved[3];
+ __le32 back_failure_mask;
+} __packed;
+
+/* Rate search parameters configuration per MCS */
+struct wmi_rs_cfg_ex_mcs {
+ /* The maximal allowed PER for each MCS
+ * MCS will be considered as failed if PER during RS is higher
+ */
+ u8 per_threshold;
+ /* Number of MPDUs for each MCS
+ * this is the minimal statistic required to make an educated
+ * decision
+ */
+ u8 min_frame_cnt;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_RS_CFG_EX_CMDID */
+struct wmi_rs_cfg_ex_cmd {
+ /* Configuration for all MCSs */
+ struct wmi_rs_cfg_ex_common common_cfg;
+ u8 each_mcs_cfg_size;
+ u8 reserved[3];
+ /* Configuration for each MCS */
+ struct wmi_rs_cfg_ex_mcs each_mcs_cfg[0];
+} __packed;
+
+/* WMI_RS_CFG_EX_EVENTID */
+struct wmi_rs_cfg_ex_event {
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_RS_ENABLE_CMDID */
+struct wmi_rs_enable_cmd {
+ u8 cid;
+ /* enable or disable rate search */
+ u8 rs_enable;
+ u8 reserved[2];
+ __le32 mcs_en_vec;
+} __packed;
+
+/* WMI_RS_ENABLE_EVENTID */
+struct wmi_rs_enable_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* Slot types */
enum wmi_sched_scheme_slot_type {
WMI_SCHED_SLOT_SP = 0x0,
@@ -2404,7 +3038,7 @@ struct wmi_scheduling_scheme_event {
u8 reserved[1];
} __packed;
-/* WMI_RS_CFG_CMDID */
+/* WMI_RS_CFG_CMDID - deprecated */
struct wmi_rs_cfg_cmd {
/* connection id */
u8 cid;
@@ -2414,7 +3048,7 @@ struct wmi_rs_cfg_cmd {
struct wmi_rs_cfg rs_cfg;
} __packed;
-/* WMI_RS_CFG_DONE_EVENTID */
+/* WMI_RS_CFG_DONE_EVENTID - deprecated */
struct wmi_rs_cfg_done_event {
u8 cid;
/* enum wmi_fw_status */
@@ -2422,7 +3056,7 @@ struct wmi_rs_cfg_done_event {
u8 reserved[2];
} __packed;
-/* WMI_GET_DETAILED_RS_RES_CMDID */
+/* WMI_GET_DETAILED_RS_RES_CMDID - deprecated */
struct wmi_get_detailed_rs_res_cmd {
/* connection id */
u8 cid;
@@ -2447,7 +3081,7 @@ struct wmi_rs_results {
u8 mcs;
} __packed;
-/* WMI_GET_DETAILED_RS_RES_EVENTID */
+/* WMI_GET_DETAILED_RS_RES_EVENTID - deprecated */
struct wmi_get_detailed_rs_res_event {
u8 cid;
/* enum wmi_rs_results_status */
@@ -2457,6 +3091,45 @@ struct wmi_get_detailed_rs_res_event {
u8 reserved[3];
} __packed;
+/* WMI_GET_DETAILED_RS_RES_EX_CMDID */
+struct wmi_get_detailed_rs_res_ex_cmd {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/* Rate search results */
+struct wmi_rs_results_ex_common {
+ /* RS timestamp */
+ __le32 tsf;
+ /* RS selected MCS */
+ u8 mcs;
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ u8 reserved[2];
+} __packed;
+
+/* Rate search results */
+struct wmi_rs_results_ex_mcs {
+ /* number of sent MPDUs */
+ u8 num_of_tx_pkt;
+ /* number of non-acked MPDUs */
+ u8 num_of_non_acked_pkt;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EX_EVENTID */
+struct wmi_get_detailed_rs_res_ex_event {
+ u8 cid;
+ /* enum wmi_rs_results_status */
+ u8 status;
+ u8 reserved0[2];
+ struct wmi_rs_results_ex_common common_rs_results;
+ u8 each_mcs_results_size;
+ u8 reserved1[3];
+ /* Results for each MCS */
+ struct wmi_rs_results_ex_mcs each_mcs_results[0];
+} __packed;
+
/* BRP antenna limit mode */
enum wmi_brp_ant_limit_mode {
/* Disable BRP force antenna limit */
@@ -3207,13 +3880,20 @@ struct wmi_get_assoc_list_res_event {
u8 reserved[3];
} __packed;
-/* WMI_BF_CONTROL_EVENTID */
+/* WMI_BF_CONTROL_EVENTID - deprecated */
struct wmi_bf_control_event {
/* wmi_fw_status */
u8 status;
u8 reserved[3];
} __packed;
+/* WMI_BF_CONTROL_EX_EVENTID */
+struct wmi_bf_control_ex_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* WMI_COMMAND_NOT_SUPPORTED_EVENTID */
struct wmi_command_not_supported_event {
/* device id */
@@ -3283,4 +3963,96 @@ struct wmi_internal_fw_set_channel_event {
u8 reserved[3];
} __packed;
+/* WMI_LINK_STATS_CONFIG_DONE_EVENTID */
+struct wmi_link_stats_config_done_event {
+ /* wmi_fw_status_e */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_STATS_EVENTID */
+struct wmi_link_stats_event {
+ __le64 tsf;
+ __le16 payload_size;
+ u8 has_next;
+ u8 reserved[5];
+ /* a stream of wmi_link_stats_record_s */
+ u8 payload[0];
+} __packed;
+
+/* WMI_LINK_STATS_EVENT */
+struct wmi_link_stats_record {
+ /* wmi_link_stats_record_type_e */
+ u8 record_type_id;
+ u8 reserved;
+ __le16 record_size;
+ u8 record[0];
+} __packed;
+
+/* WMI_LINK_STATS_TYPE_BASIC */
+struct wmi_link_stats_basic {
+ u8 cid;
+ s8 rssi;
+ u8 sqi;
+ u8 bf_mcs;
+ u8 per_average;
+ u8 selected_rfc;
+ u8 rx_effective_ant_num;
+ u8 my_rx_sector;
+ u8 my_tx_sector;
+ u8 other_rx_sector;
+ u8 other_tx_sector;
+ u8 reserved[7];
+ /* 1/4 Db units */
+ __le16 snr;
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le32 bf_count;
+ __le32 rx_bcast_frames;
+} __packed;
+
+/* WMI_LINK_STATS_TYPE_GLOBAL */
+struct wmi_link_stats_global {
+ /* all ack-able frames */
+ __le32 rx_frames;
+ /* all ack-able frames */
+ __le32 tx_frames;
+ __le32 rx_ba_frames;
+ __le32 tx_ba_frames;
+ __le32 tx_beacons;
+ __le32 rx_mic_errors;
+ __le32 rx_crc_errors;
+ __le32 tx_fail_no_ack;
+ u8 reserved[8];
+} __packed;
+
+/* WMI_SET_GRANT_MCS_EVENTID */
+struct wmi_set_grant_mcs_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_AP_SLOT_SIZE_EVENTID */
+struct wmi_set_ap_slot_size_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID */
+struct wmi_set_vring_priority_weight_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_EVENTID */
+struct wmi_set_vring_priority_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index b01dc34d55af..74538085cfb7 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1399,6 +1399,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel)
return 0;
}
+#ifdef CONFIG_PROC_FS
static int atmel_proc_show(struct seq_file *m, void *v)
{
struct atmel_private *priv = m->private;
@@ -1481,6 +1482,7 @@ static int atmel_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Current state:\t\t%s\n", s);
return 0;
}
+#endif
static const struct net_device_ops atmel_netdev_ops = {
.ndo_open = atmel_open,
@@ -1516,10 +1518,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->present_callback = card_present;
priv->card = card;
priv->firmware = NULL;
- priv->firmware_id[0] = '\0';
priv->firmware_type = fw_type;
if (firmware) /* module parameter */
- strcpy(priv->firmware_id, firmware);
+ strlcpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI;
priv->station_state = STATION_STATE_DOWN;
priv->do_rx_crc = 0;
@@ -2646,14 +2647,9 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
}
- if (!(new_firmware = kmalloc(com.len, GFP_KERNEL))) {
- rc = -ENOMEM;
- break;
- }
-
- if (copy_from_user(new_firmware, com.data, com.len)) {
- kfree(new_firmware);
- rc = -EFAULT;
+ new_firmware = memdup_user(com.data, com.len);
+ if (IS_ERR(new_firmware)) {
+ rc = PTR_ERR(new_firmware);
break;
}
@@ -3681,7 +3677,7 @@ static int probe_atmel_card(struct net_device *dev)
atmel_write16(dev, GCR, 0x0060);
atmel_write16(dev, GCR, 0x0040);
- mdelay(500);
+ msleep(500);
if (atmel_read16(dev, MR2) == 0) {
/* No stored firmware so load a small stub which just
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index cb987c2ecc6b..87131f663292 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
- strncpy(led->name, name, sizeof(led->name));
+ strlcpy(led->name, name, sizeof(led->name));
atomic_set(&led->state, 0);
led->led_dev.name = led->name;
diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
index fd4565389c77..bc922118b6ac 100644
--- a/drivers/net/wireless/broadcom/b43legacy/leds.c
+++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
@@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
led->dev = dev;
led->index = led_index;
led->activelow = activelow;
- strncpy(led->name, name, sizeof(led->name));
+ strlcpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b6122aad639e..5444e6213d45 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2434,7 +2434,7 @@ static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si)
struct nl80211_sta_flag_update *sfu;
brcmf_dbg(TRACE, "flags %08x\n", fw_sta_flags);
- si->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+ si->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS);
sfu = &si->sta_flags;
sfu->mask = BIT(NL80211_STA_FLAG_WME) |
BIT(NL80211_STA_FLAG_AUTHENTICATED) |
@@ -2470,7 +2470,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
brcmf_err("Failed to get bss info (%d)\n", err);
goto out_kfree;
}
- si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ si->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
si->bss_param.dtim_period = buf->bss_le.dtim_period;
capability = le16_to_cpu(buf->bss_le.capability);
@@ -2501,7 +2501,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
brcmf_err("BRCMF_C_GET_RATE error (%d)\n", err);
return err;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->txrate.legacy = rate * 5;
memset(&scbval, 0, sizeof(scbval));
@@ -2512,7 +2512,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
return err;
}
rssi = le32_to_cpu(scbval.val);
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi;
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_GET_PKTCNTS, &pktcnt,
@@ -2521,10 +2521,10 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
brcmf_err("BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err);
return err;
}
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->rx_packets = le32_to_cpu(pktcnt.rx_good_pkt);
sinfo->rx_dropped_misc = le32_to_cpu(pktcnt.rx_bad_pkt);
sinfo->tx_packets = le32_to_cpu(pktcnt.tx_good_pkt);
@@ -2571,7 +2571,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
}
}
brcmf_dbg(TRACE, "version %d\n", le16_to_cpu(sta_info_le.ver));
- sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME);
sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
sta_flags = le32_to_cpu(sta_info_le.flags);
brcmf_convert_sta_flags(sta_flags, sinfo);
@@ -2581,33 +2581,33 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
else
sinfo->sta_flags.set &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
if (sta_flags & BRCMF_STA_ASSOC) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME);
sinfo->connected_time = le32_to_cpu(sta_info_le.in);
brcmf_fill_bss_param(ifp, sinfo);
}
if (sta_flags & BRCMF_STA_SCBSTATS) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->tx_failed = le32_to_cpu(sta_info_le.tx_failures);
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_packets = le32_to_cpu(sta_info_le.tx_pkts);
sinfo->tx_packets += le32_to_cpu(sta_info_le.tx_mcast_pkts);
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->rx_packets = le32_to_cpu(sta_info_le.rx_ucast_pkts);
sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts);
if (sinfo->tx_packets) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
sinfo->txrate.legacy =
le32_to_cpu(sta_info_le.tx_rate) / 100;
}
if (sinfo->rx_packets) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
sinfo->rxrate.legacy =
le32_to_cpu(sta_info_le.rx_rate) / 100;
}
if (le16_to_cpu(sta_info_le.ver) >= 4) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES);
sinfo->tx_bytes = le64_to_cpu(sta_info_le.tx_tot_bytes);
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
}
total_rssi = 0;
@@ -2623,10 +2623,10 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
}
}
if (count_rssi) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
sinfo->chains = count_rssi;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
} else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -2639,7 +2639,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
goto done;
} else {
rssi = le32_to_cpu(scb_val.val);
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
@@ -6926,15 +6926,15 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
cfg->d11inf.io_type = (u8)io_type;
brcmu_d11_attach(&cfg->d11inf);
- err = brcmf_setup_wiphy(wiphy, ifp);
- if (err < 0)
- goto priv_out;
-
/* regulatory notifer below needs access to cfg so
* assign it now.
*/
drvr->config = cfg;
+ err = brcmf_setup_wiphy(wiphy, ifp);
+ if (err < 0)
+ goto priv_out;
+
brcmf_dbg(INFO, "Registering custom regulatory\n");
wiphy->reg_notifier = brcmf_cfg80211_reg_notifier;
wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 72954fd6df3b..b1f702faff4f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -21,6 +21,7 @@
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <net/addrconf.h>
+#include <net/ieee80211_radiotap.h>
#include <net/ipv6.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
@@ -404,6 +405,30 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
netif_rx_ni(skb);
}
+void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FMT_RADIOTAP)) {
+ /* Do nothing */
+ } else {
+ struct ieee80211_radiotap_header *radiotap;
+
+ /* TODO: use RX status to fill some radiotap data */
+ radiotap = skb_push(skb, sizeof(*radiotap));
+ memset(radiotap, 0, sizeof(*radiotap));
+ radiotap->it_len = cpu_to_le16(sizeof(*radiotap));
+
+ /* TODO: 4 bytes with receive status? */
+ skb->len -= 4;
+ }
+
+ skb->dev = ifp->ndev;
+ skb_reset_mac_header(skb);
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+
+ brcmf_netif_rx(ifp, skb);
+}
+
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
struct brcmf_if **ifp)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 401f50458686..dcf6e27cc16f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -121,6 +121,7 @@ struct brcmf_pub {
struct brcmf_if *iflist[BRCMF_MAX_IFS];
s32 if2bss[BRCMF_MAX_IFS];
+ struct brcmf_if *mon_if;
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
@@ -216,6 +217,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
int __init brcmf_core_init(void);
void __exit brcmf_core_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 800a423c7bc2..8347da632a5b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -48,6 +48,8 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MBSS, "mbss" },
{ BRCMF_FEAT_MCHAN, "mchan" },
{ BRCMF_FEAT_P2P, "p2p" },
+ { BRCMF_FEAT_MONITOR, "monitor" },
+ { BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
};
#ifdef DEBUG
@@ -91,6 +93,42 @@ static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
}
#endif /* DEBUG */
+struct brcmf_feat_fwfeat {
+ const char * const fwid;
+ u32 feat_flags;
+};
+
+static const struct brcmf_feat_fwfeat brcmf_feat_fwfeat_map[] = {
+ /* brcmfmac43602-pcie.ap.bin from linux-firmware.git commit ea1178515b88 */
+ { "01-6cb8e269", BIT(BRCMF_FEAT_MONITOR) },
+ /* brcmfmac4366b-pcie.bin from linux-firmware.git commit 52442afee990 */
+ { "01-c47a91a4", BIT(BRCMF_FEAT_MONITOR) },
+};
+
+static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv)
+{
+ const struct brcmf_feat_fwfeat *e;
+ u32 feat_flags = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_feat_fwfeat_map); i++) {
+ e = &brcmf_feat_fwfeat_map[i];
+ if (!strcmp(e->fwid, drv->fwver)) {
+ feat_flags = e->feat_flags;
+ break;
+ }
+ }
+
+ if (!feat_flags)
+ return;
+
+ for (i = 0; i < BRCMF_FEAT_LAST; i++)
+ if (feat_flags & BIT(i))
+ brcmf_dbg(INFO, "enabling firmware feature: %s\n",
+ brcmf_feat_names[i]);
+ drv->feat_flags |= feat_flags;
+}
+
/**
* brcmf_feat_iovar_int_get() - determine feature through iovar query.
*
@@ -251,6 +289,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
}
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
+ brcmf_feat_firmware_overrides(drvr);
+
/* set chip related quirks */
switch (drvr->bus_if->chip) {
case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index d1193825e559..0b4974df353a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -33,6 +33,8 @@
* MFP: 802.11w Management Frame Protection.
* GSCAN: enhanced scan offload feature.
* FWSUP: Firmware supplicant.
+ * MONITOR: firmware can pass monitor packets to host.
+ * MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -48,7 +50,9 @@
BRCMF_FEAT_DEF(WOWL_ARP_ND) \
BRCMF_FEAT_DEF(MFP) \
BRCMF_FEAT_DEF(GSCAN) \
- BRCMF_FEAT_DEF(FWSUP)
+ BRCMF_FEAT_DEF(FWSUP) \
+ BRCMF_FEAT_DEF(MONITOR) \
+ BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 4b290705e3e6..d5bb81e88762 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -32,11 +32,30 @@
#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002
-#define BRCMF_STA_WME 0x00000002 /* WMM association */
-#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */
-#define BRCMF_STA_ASSOC 0x00000010 /* Associated */
-#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */
-#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */
+#define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */
+#define BRCMF_STA_WME 0x00000002 /* WMM association */
+#define BRCMF_STA_NONERP 0x00000004 /* No ERP */
+#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */
+#define BRCMF_STA_ASSOC 0x00000010 /* Associated */
+#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */
+#define BRCMF_STA_WDS 0x00000040 /* Wireless Distribution System */
+#define BRCMF_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */
+#define BRCMF_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */
+#define BRCMF_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */
+#define BRCMF_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */
+#define BRCMF_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */
+#define BRCMF_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */
+#define BRCMF_STA_N_CAP 0x00002000 /* STA 802.11n capable */
+#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */
+#define BRCMF_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */
+#define BRCMF_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */
+#define BRCMF_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */
+#define BRCMF_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */
+#define BRCMF_STA_RIFS_CAP 0x00080000 /* rifs enabled */
+#define BRCMF_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */
+#define BRCMF_STA_WPS 0x00200000 /* WPS state */
+#define BRCMF_STA_DWDS_CAP 0x01000000 /* DWDS CAP */
+#define BRCMF_STA_DWDS 0x02000000 /* DWDS active */
/* size of brcmf_scan_params not including variable length array */
#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -155,6 +174,8 @@
#define BRCMF_MFP_CAPABLE 1
#define BRCMF_MFP_REQUIRED 2
+#define BRCMF_VHT_CAP_MCS_MAP_NSS_MAX 8
+
/* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each
* ioctl. It is relatively small because firmware has small maximum size input
* playload restriction for ioctls.
@@ -531,6 +552,8 @@ struct brcmf_sta_info_le {
/* w/hi bit set if basic */
__le32 in; /* seconds elapsed since associated */
__le32 listen_interval_inms; /* Min Listen interval in ms for STA */
+
+ /* Fields valid for ver >= 3 */
__le32 tx_pkts; /* # of packets transmitted */
__le32 tx_failures; /* # of packets failed */
__le32 rx_ucast_pkts; /* # of unicast packets received */
@@ -539,6 +562,8 @@ struct brcmf_sta_info_le {
__le32 rx_rate; /* Rate of last successful rx frame */
__le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
__le32 rx_decrypt_failures; /* # of packet decrypted failed */
+
+ /* Fields valid for ver >= 4 */
__le32 tx_tot_pkts; /* # of tx pkts (ucast + mcast) */
__le32 rx_tot_pkts; /* # of data packets recvd (uni + mcast) */
__le32 tx_mcast_pkts; /* # of mcast pkts txed */
@@ -575,6 +600,14 @@ struct brcmf_sta_info_le {
*/
__le32 rx_pkts_retried; /* # rx with retry bit set */
__le32 tx_rate_fallback; /* lowest fallback TX rate */
+
+ /* Fields valid for ver >= 5 */
+ struct {
+ __le32 count; /* # rates in this set */
+ u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+ u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */
+ __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
+ } rateset_adv;
};
struct brcmf_chanspec_list {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index c40ba8855cd5..4e8397a0cbc8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -69,6 +69,8 @@
#define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8
#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01
+#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02
+#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07
#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5
#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
@@ -1128,6 +1130,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
struct sk_buff *skb;
u16 data_offset;
u16 buflen;
+ u16 flags;
u32 idx;
struct brcmf_if *ifp;
@@ -1137,6 +1140,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
data_offset = le16_to_cpu(rx_complete->data_offset);
buflen = le16_to_cpu(rx_complete->data_len);
idx = le32_to_cpu(rx_complete->msg.request_id);
+ flags = le16_to_cpu(rx_complete->flags);
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
msgbuf->rx_pktids, idx);
@@ -1150,6 +1154,20 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
skb_trim(skb, buflen);
+ if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) ==
+ BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) {
+ ifp = msgbuf->drvr->mon_if;
+
+ if (!ifp) {
+ brcmf_err("Received unexpected monitor pkt\n");
+ brcmu_pkt_buf_free_skb(skb);
+ return;
+ }
+
+ brcmf_netif_mon_rx(ifp, skb);
+ return;
+ }
+
ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
if (!ifp || !ifp->ndev) {
brcmf_err("Received pkt for invalid ifidx %d\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index 3a13d176b221..35e3b101e5cf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -159,7 +159,7 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
{
u16 data;
- if ((addr == RADIO_IDCODE))
+ if (addr == RADIO_IDCODE)
return 0xffff;
switch (pi->pubpi.phy_type) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 1a187557982e..bedec1606caa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -16904,7 +16904,7 @@ static void wlc_phy_workarounds_nphy_rev3(struct brcms_phy *pi)
}
}
-void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi)
+static void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi)
{
static const u8 rfseq_rx2tx_events[] = {
NPHY_RFSEQ_CMD_NOP,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
index b9672da24a9d..b24bc57ca91b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
@@ -213,7 +213,7 @@ static const s16 log_table[] = {
30498,
31267,
32024,
- 32768
+ 32767
};
#define LOG_TABLE_SIZE 32 /* log_table size */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 72046e182745..04dd7a936593 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -3419,7 +3419,7 @@ done:
static void airo_handle_tx(struct airo_info *ai, u16 status)
{
- int i, len = 0, index = -1;
+ int i, index = -1;
u16 fid;
if (test_bit(FLAG_MPI, &ai->flags)) {
@@ -3443,11 +3443,9 @@ static void airo_handle_tx(struct airo_info *ai, u16 status)
fid = IN4500(ai, TXCOMPLFID);
- for(i = 0; i < MAX_FIDS; i++) {
- if ((ai->fids[i] & 0xffff) == fid) {
- len = ai->fids[i] >> 16;
+ for (i = 0; i < MAX_FIDS; i++) {
+ if ((ai->fids[i] & 0xffff) == fid)
index = i;
- }
}
if (index != -1) {
diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c
index d9ed22b4cc6b..3718f958c0fc 100644
--- a/drivers/net/wireless/cisco/airo_cs.c
+++ b/drivers/net/wireless/cisco/airo_cs.c
@@ -102,11 +102,8 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int airo_config(struct pcmcia_device *link)
{
- struct local_info *dev;
int ret;
- dev = link->priv;
-
dev_dbg(&link->dev, "airo_config\n");
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index b8fd3cc90634..910db46db6a1 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -692,7 +692,7 @@ static void printk_buf(int level, const u8 * data, u32 len)
static void schedule_reset(struct ipw2100_priv *priv)
{
- unsigned long now = get_seconds();
+ time64_t now = ktime_get_boottime_seconds();
/* If we haven't received a reset request within the backoff period,
* then we can reset the backoff interval so this reset occurs
@@ -701,10 +701,10 @@ static void schedule_reset(struct ipw2100_priv *priv)
(now - priv->last_reset > priv->reset_backoff))
priv->reset_backoff = 0;
- priv->last_reset = get_seconds();
+ priv->last_reset = now;
if (!(priv->status & STATUS_RESET_PENDING)) {
- IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n",
+ IPW_DEBUG_INFO("%s: Scheduling firmware restart (%llds).\n",
priv->net_dev->name, priv->reset_backoff);
netif_carrier_off(priv->net_dev);
netif_stop_queue(priv->net_dev);
@@ -2079,7 +2079,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
memcpy(priv->bssid, bssid, ETH_ALEN);
priv->status |= STATUS_ASSOCIATING;
- priv->connect_start = get_seconds();
+ priv->connect_start = ktime_get_boottime_seconds();
schedule_delayed_work(&priv->wx_event_work, HZ / 10);
}
@@ -4070,8 +4070,8 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr,
#define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" y "\n", priv-> x)
if (priv->status & STATUS_ASSOCIATED)
- len += sprintf(buf + len, "connected: %lu\n",
- get_seconds() - priv->connect_start);
+ len += sprintf(buf + len, "connected: %llu\n",
+ ktime_get_boottime_seconds() - priv->connect_start);
else
len += sprintf(buf + len, "not connected\n");
@@ -4108,7 +4108,7 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr,
DUMP_VAR(txq_stat.lo, "d");
DUMP_VAR(ieee->scans, "d");
- DUMP_VAR(reset_backoff, "d");
+ DUMP_VAR(reset_backoff, "lld");
return len;
}
@@ -5112,11 +5112,9 @@ static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
.host_command_length = ETH_ALEN
};
int err;
- int len;
IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
- len = ETH_ALEN;
/* The Firmware currently ignores the BSSID and just disassociates from
* the currently associated AP -- but in the off chance that a future
* firmware does use the BSSID provided here, we go ahead and try and
@@ -6437,7 +6435,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
pci_disable_device(pci_dev);
pci_set_power_state(pci_dev, PCI_D3hot);
- priv->suspend_at = get_seconds();
+ priv->suspend_at = ktime_get_boottime_seconds();
mutex_unlock(&priv->action_mutex);
@@ -6482,7 +6480,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
* the queue of needed */
netif_device_attach(dev);
- priv->suspend_time = get_seconds() - priv->suspend_at;
+ priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
/* Bring the device back up */
if (!(priv->status & STATUS_RF_KILL_SW))
@@ -7723,7 +7721,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
struct libipw_device *ieee = priv->ieee;
struct lib80211_crypt_data *crypt;
struct iw_param *param = &wrqu->param;
- int ret = 0;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
@@ -7733,7 +7730,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
/*
* wpa_supplicant will control these internally
*/
- ret = -EOPNOTSUPP;
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
@@ -7801,9 +7797,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
{
struct ipw2100_priv *priv = libipw_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- __le16 reason;
-
- reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
index ce3e35f6b60f..8c11c7fa2eef 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
@@ -491,7 +491,7 @@ struct ipw2100_priv {
/* Statistics */
int resets;
- int reset_backoff;
+ time64_t reset_backoff;
/* Context */
u8 essid[IW_ESSID_MAX_SIZE];
@@ -500,8 +500,8 @@ struct ipw2100_priv {
u8 channel;
int last_mode;
- unsigned long connect_start;
- unsigned long last_reset;
+ time64_t connect_start;
+ time64_t last_reset;
u32 channel_mask;
u32 fatal_error;
@@ -581,9 +581,9 @@ struct ipw2100_priv {
int user_requested_scan;
- /* Track time in suspend */
- unsigned long suspend_at;
- unsigned long suspend_time;
+ /* Track time in suspend, using CLOCK_BOOTTIME */
+ time64_t suspend_at;
+ time64_t suspend_time;
u32 interrupts;
int tx_interrupts;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 8a858f7e36f4..9644e7b93645 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -7112,7 +7112,7 @@ static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
{
u32 ret = 0;
- if ((priv == NULL))
+ if (!priv)
return 0;
if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
@@ -11888,7 +11888,7 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
- priv->suspend_at = get_seconds();
+ priv->suspend_at = ktime_get_boottime_seconds();
return 0;
}
@@ -11925,7 +11925,7 @@ static int ipw_pci_resume(struct pci_dev *pdev)
* the queue of needed */
netif_device_attach(dev);
- priv->suspend_time = get_seconds() - priv->suspend_at;
+ priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
/* Bring the device back up */
schedule_work(&priv->up);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index aa301d1eee3c..f98ab1f71edd 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -1343,9 +1343,9 @@ struct ipw_priv {
s8 tx_power;
- /* Track time in suspend */
- unsigned long suspend_at;
- unsigned long suspend_time;
+ /* Track time in suspend using CLOCK_BOOTIME */
+ time64_t suspend_at;
+ time64_t suspend_time;
#ifdef CONFIG_PM
u32 pm_state[16];
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index dd29f46d086b..d32d39fa2686 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -479,7 +479,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
{
struct iw_point *erq = &(wrqu->encoding);
int len, key;
- struct lib80211_crypt_data *crypt;
struct libipw_security *sec = &ieee->sec;
LIBIPW_DEBUG_WX("GET_ENCODE\n");
@@ -492,7 +491,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
} else
key = ieee->crypt_info.tx_keyidx;
- crypt = ieee->crypt_info.crypt[key];
erq->flags = key + 1;
if (!sec->enabled) {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
index c1b4441fb8b2..a2960032be81 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
@@ -95,7 +95,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_Rx - OFDM:");
pos +=
scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 62a9794f952b..57e3b6cca234 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -476,8 +476,6 @@ il3945_tx_skb(struct il_priv *il,
int txq_id = skb_get_queue_mapping(skb);
u16 len, idx, hdr_len;
u16 firstlen, secondlen;
- u8 id;
- u8 unicast;
u8 sta_id;
u8 tid = 0;
__le16 fc;
@@ -496,9 +494,6 @@ il3945_tx_skb(struct il_priv *il,
goto drop_unlock;
}
- unicast = !is_multicast_ether_addr(hdr->addr1);
- id = 0;
-
fc = hdr->frame_control;
#ifdef CONFIG_IWLEGACY_DEBUG
@@ -957,10 +952,8 @@ il3945_rx_queue_restock(struct il_priv *il)
struct list_head *element;
struct il_rx_buf *rxb;
unsigned long flags;
- int write;
spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
/* Get next free Rx buffer, remove from free list */
element = rxq->rx_free.next;
@@ -2725,7 +2718,6 @@ void
il3945_post_associate(struct il_priv *il)
{
int rc = 0;
- struct ieee80211_conf *conf = NULL;
if (!il->vif || !il->is_open)
return;
@@ -2738,8 +2730,6 @@ il3945_post_associate(struct il_priv *il)
il_scan_cancel_timeout(il, 200);
- conf = &il->hw->conf;
-
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
il3945_commit_rxon(il);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index dbf164d48ed3..3e568ce2fb20 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -1634,7 +1634,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
{
struct il_channel_info *ch_info;
s8 max_power;
- u8 a_band;
u8 i;
if (il->tx_power_user_lmt == power) {
@@ -1650,7 +1649,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
for (i = 0; i < il->channel_count; i++) {
ch_info = &il->channel_info[i];
- a_band = il_is_channel_a_band(ch_info);
/* find minimum power of all user and regulatory constraints
* (does not consider h/w clipping limitations) */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 562e94870a9c..280cd8ae1696 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1338,15 +1338,12 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
u32 *accum_stats;
u32 *delta, *max_delta;
struct stats_general_common *general, *accum_general;
- struct stats_tx *tx, *accum_tx;
prev_stats = (__le32 *) &il->_4965.stats;
accum_stats = (u32 *) &il->_4965.accum_stats;
size = sizeof(struct il_notif_stats);
general = &il->_4965.stats.general.common;
accum_general = &il->_4965.accum_stats.general.common;
- tx = &il->_4965.stats.tx;
- accum_tx = &il->_4965.accum_stats.tx;
delta = (u32 *) &il->_4965.delta_stats;
max_delta = (u32 *) &il->_4965.max_delta;
@@ -4784,7 +4781,6 @@ static void
il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
{
struct il_priv *il = context;
- struct il_ucode_header *ucode;
int err;
struct il4965_firmware_pieces pieces;
const unsigned int api_max = il->cfg->ucode_api_max;
@@ -4814,8 +4810,6 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
}
/* Data from ucode file: header followed by uCode images */
- ucode = (struct il_ucode_header *)ucode_raw->data;
-
err = il4965_load_firmware(il, ucode_raw, &pieces);
if (err)
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 4d08d78c6b71..04e376cc898c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -7,13 +7,13 @@ iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
+iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
-iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index a63ca8820568..fedb108db68f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -63,6 +63,7 @@
static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index d4ba66aecdc9..91ca77c7571c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -59,7 +59,7 @@
#define IWL_22000_UCODE_API_MAX 38
/* Lowest firmware API version supported */
-#define IWL_22000_UCODE_API_MIN 24
+#define IWL_22000_UCODE_API_MIN 39
/* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d
@@ -73,29 +73,48 @@
#define IWL_22000_SMEM_OFFSET 0x400000
#define IWL_22000_SMEM_LEN 0xD0000
-#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
-#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_MODULE_FIRMWARE(api) \
IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
+ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_22000 10
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
.num_of_queues = 512,
+ .max_tfd_queue_size = 256,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_base_params iwl_22560_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+ .num_of_queues = 512,
+ .max_tfd_queue_size = 65536,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -110,11 +129,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
-#define IWL_DEVICE_22000 \
+#define IWL_DEVICE_22000_COMMON \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_22000, \
- .base_params = &iwl_22000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
.non_shared_ant = ANT_A, \
@@ -129,6 +146,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
+ .ht_params = &iwl_22000_ht_params, \
+ .nvm_ver = IWL_22000_NVM_VERSION, \
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.use_tfh = true, \
.rf_id = true, \
.gen2 = true, \
@@ -136,86 +157,114 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dbgc_supported = true, \
.min_umac_error_event_table = 0x400000
+#define IWL_DEVICE_22500 \
+ IWL_DEVICE_22000_COMMON, \
+ .device_family = IWL_DEVICE_FAMILY_22000, \
+ .base_params = &iwl_22000_base_params, \
+ .csr = &iwl_csr_v1
+
+#define IWL_DEVICE_22560 \
+ IWL_DEVICE_22000_COMMON, \
+ .device_family = IWL_DEVICE_FAMILY_22560, \
+ .base_params = &iwl_22560_base_params, \
+ .csr = &iwl_csr_v2
+
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
.cdb = true,
};
const struct iwl_cfg iwl22000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_JF_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_F0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
+ .name = "Intel(R) Dual Band Wireless AX 22560",
+ .fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
+ IWL_DEVICE_22560,
+ .cdb = true,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index a224f1be1ec2..36151e61a26f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -53,6 +53,7 @@
static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.pll_cfg = true,
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index dbcec7ce7863..b5d8274761d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -72,6 +72,7 @@
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 69bfa827e82a..a62c8346f13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -123,6 +123,7 @@
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 7262e973e0d6..c46fa712985b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -104,6 +104,7 @@
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index c8ea63d02619..24b2f7cbb308 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -95,6 +95,7 @@
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 007bfe7656a4..08d3d8a190f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -187,20 +189,4 @@ struct iwl_card_state_notif {
__le32 flags;
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
-/**
- * struct iwl_fseq_ver_mismatch_nty - Notification about version
- *
- * This notification does not have a direct impact on the init flow.
- * It means that another core (not WiFi) has initiated the FSEQ flow
- * and updated the FSEQ version. The driver only prints an error when
- * this occurs.
- *
- * @aux_read_fseq_ver: auxiliary read FSEQ version
- * @wifi_fseq_ver: FSEQ version (embedded in WiFi)
- */
-struct iwl_fseq_ver_mismatch_ntf {
- __le32 aux_read_fseq_ver;
- __le32 wifi_fseq_ver;
-} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */
-
#endif /* __iwl_fw_api_alive_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index f285bacc8726..6dad748e5cdc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -193,7 +193,8 @@ enum iwl_legacy_cmds {
FW_GET_ITEM_CMD = 0x1a,
/**
- * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2,
+ * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
+ * &struct iwl_tx_cmd_gen3,
* response in &struct iwl_mvm_tx_resp or
* &struct iwl_mvm_tx_resp_v3
*/
@@ -646,13 +647,6 @@ enum iwl_system_subcmd_ids {
* @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
*/
INIT_EXTENDED_CFG_CMD = 0x03,
-
- /**
- * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version
- * mismatch during init. The format is specified in
- * &struct iwl_fseq_ver_mismatch_ntf.
- */
- FSEQ_VER_MISMATCH_NTF = 0xFF,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 5f6e855006dd..59b3c6e8f37b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +85,16 @@ enum iwl_data_path_subcmd_ids {
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
/**
+ * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
+ */
+ STA_HE_CTXT_CMD = 0x7,
+
+ /**
+ * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
+ */
+ RFH_QUEUE_CONFIG_CMD = 0xD,
+
+ /**
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
*/
TLC_MNG_CONFIG_CMD = 0xF,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index f2e31e040a7b..55594c93b014 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -279,6 +281,10 @@ enum iwl_mac_filter_flags {
MAC_FILTER_OUT_BCAST = BIT(8),
MAC_FILTER_IN_CRC32 = BIT(11),
MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
+ /**
+ * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax
+ */
+ MAC_FILTER_IN_11AX = BIT(14),
};
/**
@@ -406,4 +412,170 @@ struct iwl_missed_beacons_notif {
__le32 num_recvd_beacons;
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
+/**
+ * struct iwl_he_backoff_conf - used for backoff configuration
+ * Per each trigger-based AC, (set by MU EDCA Parameter set info-element)
+ * used for backoff configuration of TXF5..TXF8 trigger based.
+ * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via
+ * trigger-based TX.
+ * @cwmin: CW min
+ * @cwmax: CW max
+ * @aifsn: AIFSN
+ * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is
+ * allowed till the MU-TIMER is 0
+ * @mu_time: MU time in 8TU units
+ */
+struct iwl_he_backoff_conf {
+ __le16 cwmin;
+ __le16 cwmax;
+ __le16 aifsn;
+ __le16 mu_time;
+} __packed; /* AC_QOS_DOT11AX_API_S */
+
+#define MAX_HE_SUPP_NSS 2
+#define MAX_HE_CHANNEL_BW_INDX 4
+
+/**
+ * struct iwl_he_pkt_ext - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ * QAM_th2 <= QAM_tx --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ * For each Nss/Bw define 2 QAM thrsholds (0..5)
+ * For rates below the low_th, no need for PPE
+ * For rates between low_th and high_th, need 8us PPE
+ * For rates equal or higher then the high_th, need 16us PPE
+ * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
+ * (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S */
+
+/**
+ * enum iwl_he_sta_ctxt_flags - HE STA context flags
+ * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific
+ * control frames such as TRIG, NDPA, BACK)
+ * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS
+ * color for RX filter but use MAC header
+ * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation
+ * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap
+ * of 32-bits
+ * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid
+ * and should be used
+ * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation
+ * is enabled according to UORA element existence
+ * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing
+ * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK-
+ * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
+ * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
+ * parameter set, i.e. the backoff counters for trig-based ACs
+ */
+enum iwl_he_sta_ctxt_flags {
+ STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
+ STA_CTXT_HE_BSS_COLOR_DIS = BIT(5),
+ STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6),
+ STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7),
+ STA_CTXT_HE_PACKET_EXT = BIT(8),
+ STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9),
+ STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10),
+ STA_CTXT_HE_ACK_ENABLED = BIT(11),
+ STA_CTXT_HE_MU_EDCA_CW = BIT(12),
+};
+
+/**
+ * enum iwl_he_htc_flags - HE HTC support flags
+ * @IWL_HE_HTC_SUPPORT: HE-HTC support
+ * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule
+ * support via A-control field
+ * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field
+ * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field
+ * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field
+ */
+enum iwl_he_htc_flags {
+ IWL_HE_HTC_SUPPORT = BIT(0),
+ IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3),
+ IWL_HE_HTC_BSR_SUPP = BIT(4),
+ IWL_HE_HTC_OMI_SUPP = BIT(5),
+ IWL_HE_HTC_BQR_SUPP = BIT(6),
+};
+
+/*
+ * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in
+ * response to HE MRQ and if the STA provides unsolicited HE MFB
+ */
+#define IWL_HE_HTC_LINK_ADAP_POS (1)
+#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0)
+#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS)
+#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
+
+/**
+ * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ */
+struct iwl_he_sta_context_cmd {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 reserved3;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S */
+
#endif /* __iwl_fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 8d6dc9189985..6c5338364794 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -195,7 +195,6 @@ struct iwl_nvm_get_info_general {
* @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
* @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
* @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
- * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
* @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
* @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
* @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
@@ -206,6 +205,9 @@ enum iwl_nvm_mac_sku_flags {
NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
+ /**
+ * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+ */
NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 21e13a315421..087fae91baef 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -314,8 +314,11 @@ enum {
IWL_RATE_MCS_8_INDEX,
IWL_RATE_MCS_9_INDEX,
IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
+ IWL_RATE_MCS_10_INDEX,
+ IWL_RATE_MCS_11_INDEX,
+ IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
- IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
+ IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
};
#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -440,8 +443,8 @@ enum {
#define RATE_LEGACY_RATE_MSK 0xff
/* Bit 10 - OFDM HE */
-#define RATE_MCS_OFDM_HE_POS 10
-#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS)
+#define RATE_MCS_HE_POS 10
+#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS)
/*
* Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
@@ -482,15 +485,33 @@ enum {
#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
/*
- * Bit 20-21: HE guard interval and LTF type.
- * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us,
- * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us
+ * Bit 20-21: HE LTF type and guard interval
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 & SGI (bit 13) clear 4xLTF+3.2us
+ * 3 & SGI (bit 13) set 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * 3 (does not occur)
*/
#define RATE_MCS_HE_GI_LTF_POS 20
#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
#define RATE_MCS_HE_TYPE_POS 22
+#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
@@ -501,6 +522,9 @@ enum {
#define RATE_MCS_LDPC_POS 27
#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
+/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define RATE_MCS_HE_106T_POS 28
+#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
/* Link Quality definitions */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 7e570c4a9df0..2f599353c885 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -343,6 +345,169 @@ enum iwl_rx_mpdu_mac_info {
IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
};
+/*
+ * enum iwl_rx_he_phy - HE PHY data
+ */
+enum iwl_rx_he_phy {
+ IWL_RX_HE_PHY_BEAM_CHNG = BIT(0),
+ IWL_RX_HE_PHY_UPLINK = BIT(1),
+ IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc,
+ IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00,
+ IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12),
+ IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000,
+ IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20),
+ IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000,
+ IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23),
+ IWL_RX_HE_PHY_DOPPLER = BIT(24),
+ /* 6 bits reserved */
+ IWL_RX_HE_PHY_DELIM_EOF = BIT(31),
+
+ /* second dword - MU data */
+ IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0),
+ IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
+ IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL,
+ IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8),
+ /* trigger encoded */
+ IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
+ IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL,
+ /* 1 bit reserved */
+ IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21),
+ IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL,
+ /* 8 bits reserved */
+};
+
+/**
+ * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v1 {
+ /* DW7 - carries rss_hash only when rpa_en == 1 */
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+ /* DW8 - carries filter_match only when rpa_en == 1 */
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+ /* DW9 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW10 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW11 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW12 & DW13 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+ /**
+ * @he_phy_data:
+ * HE PHY data, see &enum iwl_rx_he_phy, valid
+ * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ */
+ __le64 he_phy_data;
+ };
+} __packed;
+
+/**
+ * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v3 {
+ /* DW7 - carries filter_match only when rpa_en == 1 */
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+ /* DW8 - carries rss_hash only when rpa_en == 1 */
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+ /* DW9 */
+ /**
+ * @partial_hash: 31:0 ip/tcp header hash
+ * w/o some fields (such as IP SRC addr)
+ */
+ __le32 partial_hash;
+ /* DW10 */
+ /**
+ * @raw_xsum: raw xsum value
+ */
+ __le32 raw_xsum;
+ /* DW11 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW12 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW13 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW14 & DW15 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+ /**
+ * @he_phy_data:
+ * HE PHY data, see &enum iwl_rx_he_phy, valid
+ * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ */
+ __le64 he_phy_data;
+ };
+ /* DW16 & DW17 */
+ /**
+ * @reserved: reserved
+ */
+ __le32 reserved[2];
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
/**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
*/
@@ -400,51 +565,14 @@ struct iwl_rx_mpdu_desc {
* @reorder_data: &enum iwl_rx_mpdu_reorder_data
*/
__le32 reorder_data;
- /* DW7 - carries rss_hash only when rpa_en == 1 */
- /**
- * @rss_hash: RSS hash value
- */
- __le32 rss_hash;
- /* DW8 - carries filter_match only when rpa_en == 1 */
- /**
- * @filter_match: filter match value
- */
- __le32 filter_match;
- /* DW9 */
- /**
- * @rate_n_flags: RX rate/flags encoding
- */
- __le32 rate_n_flags;
- /* DW10 */
- /**
- * @energy_a: energy chain A
- */
- u8 energy_a;
- /**
- * @energy_b: energy chain B
- */
- u8 energy_b;
- /**
- * @channel: channel number
- */
- u8 channel;
- /**
- * @mac_context: MAC context mask
- */
- u8 mac_context;
- /* DW11 */
- /**
- * @gp2_on_air_rise: GP2 timer value on air rise (INA)
- */
- __le32 gp2_on_air_rise;
- /* DW12 & DW13 */
- /**
- * @tsf_on_air_rise:
- * TSF value on air rise (INA), only valid if
- * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
- */
- __le64 tsf_on_air_rise;
-} __packed;
+
+ union {
+ struct iwl_rx_mpdu_desc_v1 v1;
+ struct iwl_rx_mpdu_desc_v3 v3;
+ };
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
+#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
struct iwl_frame_release {
u8 baid;
@@ -587,4 +715,36 @@ struct iwl_ba_window_status_notif {
__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @q_num: Q num
+ * @enable: enable queue
+ * @reserved: alignment
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @fr_bd_cb: DMA address of freeRB table
+ * @ur_bd_cb: DMA address of used RB table
+ * @fr_bd_wid: Initial index of the free table
+ */
+struct iwl_rfh_queue_data {
+ u8 q_num;
+ u8 enable;
+ __le16 reserved;
+ __le64 urbd_stts_wrptr;
+ __le64 fr_bd_cb;
+ __le64 ur_bd_cb;
+ __le32 fr_bd_wid;
+} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
+
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @num_queues: number of queues configured
+ * @reserved: alignment
+ * @data: DMA addresses per-queue
+ */
+struct iwl_rfh_queue_config {
+ u8 num_queues;
+ u8 reserved[3];
+ struct iwl_rfh_queue_data data[];
+} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
+
#endif /* __iwl_fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index a2a40b515a3c..514b86123d3d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -320,6 +322,29 @@ struct iwl_tx_cmd_gen2 {
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_7 */
+/**
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @flags: combination of &enum iwl_tx_cmd_flags
+ * @offload_assist: TX offload configuration
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @ttl: time to live - packet lifetime limit. The FW should drop if
+ * passed.
+ * @hdr: 802.11 header
+ */
+struct iwl_tx_cmd_gen3 {
+ __le16 len;
+ __le16 flags;
+ __le32 offload_assist;
+ struct iwl_dram_sec_info dram_info;
+ __le32 rate_n_flags;
+ __le64 ttl;
+ struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_8 */
+
/*
* TX response related data
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
deleted file mode 100644
index 6f75985eea66..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include "iwl-drv.h"
-#include "runtime.h"
-#include "fw/api/commands.h"
-#include "fw/api/alive.h"
-
-static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data;
-
- IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n",
- __le32_to_cpu(fseq->aux_read_fseq_ver),
- __le32_to_cpu(fseq->wifi_fseq_ver));
-}
-
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
-
- switch (cmd) {
- case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF):
- iwl_fwrt_fseq_ver_mismatch(fwrt, rxb);
- break;
- default:
- break;
- }
-}
-IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fa283285fcbe..a31a42e673c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
- /* Pull RXF1 */
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
- /* Pull RXF2 */
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
- RXF_DIFF_FROM_PREV, 1);
- /* Pull LMAC2 RXF1 */
- if (fwrt->smem_cfg.num_lmacs > 1)
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size,
- LMAC2_PRPH_OFFSET, 2);
-
- /* Pull TXF data from LMAC1 */
- for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
- /* Mark the number of TXF we're pulling now */
- iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
- iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i],
- 0, i);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+ /* Pull RXF1 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[0].rxfifo1_size, 0, 0);
+ /* Pull RXF2 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
+ RXF_DIFF_FROM_PREV, 1);
+ /* Pull LMAC2 RXF1 */
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[1].rxfifo1_size,
+ LMAC2_PRPH_OFFSET, 2);
}
- /* Pull TXF data from LMAC2 */
- if (fwrt->smem_cfg.num_lmacs > 1) {
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ /* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
- iwl_trans_write_prph(fwrt->trans,
- TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
- i);
+ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
iwl_fwrt_dump_txf(fwrt, dump_data,
- cfg->lmac[1].txfifo_size[i],
- LMAC2_PRPH_OFFSET,
- i + cfg->num_txfifo_entries);
+ cfg->lmac[0].txfifo_size[i], 0, i);
+ }
+
+ /* Pull TXF data from LMAC2 */
+ if (fwrt->smem_cfg.num_lmacs > 1) {
+ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
+ i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(fwrt->trans,
+ TXF_LARC_NUM +
+ LMAC2_PRPH_OFFSET, i);
+ iwl_fwrt_dump_txf(fwrt, dump_data,
+ cfg->lmac[1].txfifo_size[i],
+ LMAC2_PRPH_OFFSET,
+ i + cfg->num_txfifo_entries);
+ }
}
}
- if (fw_has_capa(&fwrt->fw->ucode_capa,
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
for (i = 0;
@@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
fifo_data_len = 0;
- /* Count RXF2 size */
- if (mem_cfg->rxfifo2_size) {
- /* Add header info */
- fifo_data_len += mem_cfg->rxfifo2_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
-
- /* Count RXF1 sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- if (!mem_cfg->lmac[i].rxfifo1_size)
- continue;
-
- /* Add header info */
- fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
- /* Count TXF sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- int j;
+ /* Count RXF2 size */
+ if (mem_cfg->rxfifo2_size) {
+ /* Add header info */
+ fifo_data_len +=
+ mem_cfg->rxfifo2_size +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
- for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
- if (!mem_cfg->lmac[i].txfifo_size[j])
+ /* Count RXF1 sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ if (!mem_cfg->lmac[i].rxfifo1_size)
continue;
/* Add header info */
fifo_data_len +=
- mem_cfg->lmac[i].txfifo_size[j] +
+ mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
}
- if (fw_has_capa(&fwrt->fw->ucode_capa,
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ size_t fifo_const_len = sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+
+ /* Count TXF sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ int j;
+
+ for (j = 0; j < mem_cfg->num_txfifo_entries;
+ j++) {
+ if (!mem_cfg->lmac[i].txfifo_size[j])
+ continue;
+
+ /* Add header info */
+ fifo_data_len +=
+ fifo_const_len +
+ mem_cfg->lmac[i].txfifo_size[j];
+ }
+ }
+ }
+
+ if ((fwrt->fw->dbg_dump_mask &
+ BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;
i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
@@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Make room for PRPH registers */
- if (!fwrt->trans->cfg->gen2) {
+ if (!fwrt->trans->cfg->gen2 &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
i++) {
/* The range includes both boundaries */
@@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
if (!fwrt->trans->cfg->gen2 &&
- fwrt->trans->cfg->mq_rx_supported) {
+ fwrt->trans->cfg->mq_rx_supported &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i <
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
/* The range includes both boundaries */
@@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
}
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
file_len = sizeof(*dump_file) +
- sizeof(*dump_data) * 3 +
- sizeof(*dump_smem_cfg) +
fifo_data_len +
prph_len +
- radio_len +
- sizeof(*dump_info);
-
- /* Make room for the SMEM, if it exists */
- if (smem_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
- /* Make room for the secondary SRAM, if it exists */
- if (sram2_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
- /* Make room for MEM segments */
- for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
- le32_to_cpu(fw_dbg_mem[i].len);
+ radio_len;
+
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+ file_len += sizeof(*dump_data) + sizeof(*dump_info);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+ file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
+
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ /* Make room for the SMEM, if it exists */
+ if (smem_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ smem_len;
+
+ /* Make room for the secondary SRAM, if it exists */
+ if (sram2_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ sram2_len;
+
+ /* Make room for MEM segments */
+ for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ le32_to_cpu(fw_dbg_mem[i].len);
+ }
}
/* Make room for fw's virtual image pages, if it exists */
- if (!fwrt->trans->cfg->gen2 &&
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block)
file_len += fwrt->num_of_paging_blk *
@@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
- if (fwrt->dump.desc)
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
- if (!fwrt->fw->n_dbg_mem_tlv)
- file_len += sram_len + sizeof(*dump_mem);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
+ !fwrt->fw->n_dbg_mem_tlv)
+ file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
dump_file = vzalloc(file_len);
if (!dump_file) {
@@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
- dump_data->len = cpu_to_le32(sizeof(*dump_info));
- dump_info = (void *)dump_data->data;
- dump_info->device_family =
- fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
- dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
- memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
- sizeof(dump_info->fw_human_readable));
- strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
- sizeof(dump_info->dev_human_readable));
- strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
- sizeof(dump_info->bus_human_readable));
-
- dump_data = iwl_fw_error_next_data(dump_data);
-
- /* Dump shared memory configuration */
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
- dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
- dump_smem_cfg = (void *)dump_data->data;
- dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
- dump_smem_cfg->num_txfifo_entries =
- cpu_to_le32(mem_cfg->num_txfifo_entries);
- for (i = 0; i < MAX_NUM_LMAC; i++) {
- int j;
-
- for (j = 0; j < TX_FIFO_MAX_NUM; j++)
- dump_smem_cfg->lmac[i].txfifo_size[j] =
- cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]);
- dump_smem_cfg->lmac[i].rxfifo1_size =
- cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
- }
- dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
- dump_smem_cfg->internal_txfifo_addr =
- cpu_to_le32(mem_cfg->internal_txfifo_addr);
- for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
- dump_smem_cfg->internal_txfifo_size[i] =
- cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
+ dump_data->len = cpu_to_le32(sizeof(*dump_info));
+ dump_info = (void *)dump_data->data;
+ dump_info->device_family =
+ fwrt->trans->cfg->device_family ==
+ IWL_DEVICE_FAMILY_7000 ?
+ cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
+ cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+ dump_info->hw_step =
+ cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+ memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
+ sizeof(dump_info->fw_human_readable));
+ strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+ sizeof(dump_info->dev_human_readable) - 1);
+ strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
+ sizeof(dump_info->bus_human_readable) - 1);
+
+ dump_data = iwl_fw_error_next_data(dump_data);
}
- dump_data = iwl_fw_error_next_data(dump_data);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+ /* Dump shared memory configuration */
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
+ dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
+ dump_smem_cfg = (void *)dump_data->data;
+ dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
+ dump_smem_cfg->num_txfifo_entries =
+ cpu_to_le32(mem_cfg->num_txfifo_entries);
+ for (i = 0; i < MAX_NUM_LMAC; i++) {
+ int j;
+ u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
+
+ for (j = 0; j < TX_FIFO_MAX_NUM; j++)
+ dump_smem_cfg->lmac[i].txfifo_size[j] =
+ cpu_to_le32(txf_size[j]);
+ dump_smem_cfg->lmac[i].rxfifo1_size =
+ cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
+ }
+ dump_smem_cfg->rxfifo2_size =
+ cpu_to_le32(mem_cfg->rxfifo2_size);
+ dump_smem_cfg->internal_txfifo_addr =
+ cpu_to_le32(mem_cfg->internal_txfifo_addr);
+ for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
+ dump_smem_cfg->internal_txfifo_size[i] =
+ cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+ }
+
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
/* We only dump the FIFOs if the FW is in error state */
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
@@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
iwl_read_radio_regs(fwrt, &dump_data);
}
- if (fwrt->dump.desc) {
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
fwrt->dump.desc->len);
@@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (monitor_dump_only)
goto dump_trans_data;
- if (!fwrt->fw->n_dbg_mem_tlv) {
+ if (!fwrt->fw->n_dbg_mem_tlv &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
bool success;
+ if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
+ break;
+
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (smem_len) {
+ if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
@@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (sram2_len) {
+ if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
@@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Dump fw's virtual image */
- if (!fwrt->trans->cfg->gen2 &&
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 9d939cbaf6c6..bbf2b265a06a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52,
+
+ /* TLVs 0x1000-0x2000 are for internal driver usage */
+ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
};
struct iwl_ucode_tlv {
@@ -318,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
- * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
@@ -889,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
-/**
- * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- * change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- * hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- * specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_fw_gscan_capabilities {
- __le32 max_scan_cache_size;
- __le32 max_scan_buckets;
- __le32 max_ap_cache_per_scan;
- __le32 max_rssi_sample_size;
- __le32 max_scan_reporting_threshold;
- __le32 max_hotlist_aps;
- __le32 max_significant_change_aps;
- __le32 max_bssid_history_entries;
- __le32 max_hotlist_ssids;
- __le32 max_number_epno_networks;
- __le32 max_number_epno_networks_by_ssid;
- __le32 max_number_of_white_listed_ssid;
- __le32 max_number_of_black_listed_ssid;
-} __packed;
-
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index f4912382b6af..0861b97c4233 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -193,41 +193,6 @@ struct iwl_fw_cscheme_list {
} __packed;
/**
- * struct iwl_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- * change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- * hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- * specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_gscan_capabilities {
- u32 max_scan_cache_size;
- u32 max_scan_buckets;
- u32 max_ap_cache_per_scan;
- u32 max_rssi_sample_size;
- u32 max_scan_reporting_threshold;
- u32 max_hotlist_aps;
- u32 max_significant_change_aps;
- u32 max_bssid_history_entries;
- u32 max_hotlist_ssids;
- u32 max_number_epno_networks;
- u32 max_number_epno_networks_by_ssid;
- u32 max_number_of_white_listed_ssid;
- u32 max_number_of_black_listed_ssid;
-};
-
-/**
* enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware
* @IWL_FW_MVM: MVM firmware
@@ -298,7 +263,7 @@ struct iwl_fw {
size_t n_dbg_mem_tlv;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
- struct iwl_gscan_capabilities gscan_capa;
+ u32 dbg_dump_mask;
};
static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index d8db1dd100b0..ed23367f7088 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb);
-
#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index fb4b6442b4d7..ff85d69c2a8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -143,7 +145,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
return;
pkt = cmd.resp_pkt;
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
iwl_parse_shared_mem_22000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 84a816809723..12fddcf15bab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -93,6 +93,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
IWL_DEVICE_FAMILY_22000,
+ IWL_DEVICE_FAMILY_22560,
};
/*
@@ -176,6 +177,7 @@ static inline u8 num_of_ant(u8 mask)
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
+ * @max_tfd_queue_size: max number of entries in tfd queue.
*/
struct iwl_base_params {
unsigned int wd_timeout;
@@ -191,6 +193,7 @@ struct iwl_base_params {
scd_chain_ext_wa:1;
u16 num_of_queues; /* def: HW dependent */
+ u32 max_tfd_queue_size; /* def: HW dependent */
u8 max_ll_items;
u8 led_compensation;
@@ -571,9 +574,11 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
new file mode 100644
index 000000000000..ebea99189ca9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -0,0 +1,286 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_context_info_file_gen3_h__
+#define __iwl_context_info_file_gen3_h__
+
+#include "iwl-context-info.h"
+
+#define CSR_CTXT_INFO_BOOT_CTRL 0x0
+#define CSR_CTXT_INFO_ADDR 0x118
+#define CSR_IML_DATA_ADDR 0x120
+#define CSR_IML_SIZE_ADDR 0x128
+#define CSR_IML_RESP_ADDR 0x12c
+
+/* Set bit for enabling automatic function boot */
+#define CSR_AUTO_FUNC_BOOT_ENA BIT(1)
+/* Set bit for initiating function boot */
+#define CSR_AUTO_FUNC_INIT BIT(7)
+
+/**
+ * enum iwl_prph_scratch_mtr_format - tfd size configuration
+ * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
+ */
+enum iwl_prph_scratch_mtr_format {
+ IWL_PRPH_MTR_FORMAT_16B = 0x0,
+ IWL_PRPH_MTR_FORMAT_32B = 0x40000,
+ IWL_PRPH_MTR_FORMAT_64B = 0x80000,
+ IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
+};
+
+/**
+ * enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
+ * in hwm config.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
+ * multicomm.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
+ * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
+ * completion descriptor, 1 for responses (legacy)
+ * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
+ * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
+ * 3: 256 bit.
+ */
+enum iwl_prph_scratch_flags {
+ IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
+ IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
+ IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
+ IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
+ IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
+ IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
+ IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
+ IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
+};
+
+/*
+ * struct iwl_prph_scratch_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: prph scratch information version id
+ * @size: the size of the context information in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_version {
+ __le16 mac_id;
+ __le16 version;
+ __le16 size;
+ __le16 reserved;
+} __packed; /* PERIPH_SCRATCH_VERSION_S */
+
+/*
+ * struct iwl_prph_scratch_control - control structure
+ * @control_flags: context information flags see &enum iwl_prph_scratch_flags
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_control {
+ __le32 control_flags;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_CONTROL_S */
+
+/*
+ * struct iwl_prph_scratch_ror_cfg - ror config
+ * @ror_base_addr: ror start address
+ * @ror_size: ror size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_ror_cfg {
+ __le64 ror_base_addr;
+ __le32 ror_size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_hwm_cfg - hwm config
+ * @hwm_base_addr: hwm start address
+ * @hwm_size: hwm size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_hwm_cfg {
+ __le64 hwm_base_addr;
+ __le32 hwm_size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_rbd_cfg {
+ __le64 free_rbd_addr;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @ror_cfg: ror configuration
+ * @hwm_cfg: hwm configuration
+ * @rbd_cfg: default RX queue configuration
+ */
+struct iwl_prph_scratch_ctrl_cfg {
+ struct iwl_prph_scratch_version version;
+ struct iwl_prph_scratch_control control;
+ struct iwl_prph_scratch_ror_cfg ror_cfg;
+ struct iwl_prph_scratch_hwm_cfg hwm_cfg;
+ struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+
+/*
+ * struct iwl_prph_scratch - peripheral scratch mapping
+ * @ctrl_cfg: control and configuration of prph scratch
+ * @dram: firmware images addresses in DRAM
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch {
+ struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
+ __le32 reserved[16];
+ struct iwl_context_info_dram dram;
+} __packed; /* PERIPH_SCRATCH_S */
+
+/*
+ * struct iwl_prph_info - peripheral information
+ * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
+ * @ipc_status_mirror: reflects the value in the IPC Status CSR register
+ * @sleep_notif: indicates the peripheral sleep status
+ * @reserved: reserved
+ */
+struct iwl_prph_info {
+ __le32 boot_stage_mirror;
+ __le32 ipc_status_mirror;
+ __le32 sleep_notif;
+ __le32 reserved;
+} __packed; /* PERIPH_INFO_S */
+
+/*
+ * struct iwl_context_info_gen3 - device INIT configuration
+ * @version: version of the context information
+ * @size: size of context information in DWs
+ * @config: context in which the peripheral would execute - a subset of
+ * capability csr register published by the peripheral
+ * @prph_info_base_addr: the peripheral information structure start address
+ * @cr_head_idx_arr_base_addr: the completion ring head index array
+ * start address
+ * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
+ * start address
+ * @cr_tail_idx_arr_base_addr: the completion ring tail index array
+ * start address
+ * @tr_head_idx_arr_base_addr: the transfer ring head index array
+ * start address
+ * @cr_idx_arr_size: number of entries in the completion ring index array
+ * @tr_idx_arr_size: number of entries in the transfer ring index array
+ * @mtr_base_addr: the message transfer ring start address
+ * @mcr_base_addr: the message completion ring start address
+ * @mtr_size: number of entries which the message transfer ring can hold
+ * @mcr_size: number of entries which the message completion ring can hold
+ * @mtr_doorbell_vec: the doorbell vector associated with the message
+ * transfer ring
+ * @mcr_doorbell_vec: the doorbell vector associated with the message
+ * completion ring
+ * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a transfer descriptor in the message transfer ring
+ * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a completion descriptor in the message completion ring
+ * @mtr_opt_header_size: the size of the optional header in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mtr_opt_footer_size: the size of the optional footer in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mcr_opt_header_size: the size of the optional header in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @mcr_opt_footer_size: the size of the optional footer in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @msg_rings_ctrl_flags: message rings control flags
+ * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
+ * after updating the Peripheral Information structure
+ * @prph_scratch_base_addr: the peripheral scratch structure start address
+ * @prph_scratch_size: the size of the peripheral scratch structure in DWs
+ * @reserved: reserved
+ */
+struct iwl_context_info_gen3 {
+ __le16 version;
+ __le16 size;
+ __le32 config;
+ __le64 prph_info_base_addr;
+ __le64 cr_head_idx_arr_base_addr;
+ __le64 tr_tail_idx_arr_base_addr;
+ __le64 cr_tail_idx_arr_base_addr;
+ __le64 tr_head_idx_arr_base_addr;
+ __le16 cr_idx_arr_size;
+ __le16 tr_idx_arr_size;
+ __le64 mtr_base_addr;
+ __le64 mcr_base_addr;
+ __le16 mtr_size;
+ __le16 mcr_size;
+ __le16 mtr_doorbell_vec;
+ __le16 mcr_doorbell_vec;
+ __le16 mtr_msi_vec;
+ __le16 mcr_msi_vec;
+ u8 mtr_opt_header_size;
+ u8 mtr_opt_footer_size;
+ u8 mcr_opt_header_size;
+ u8 mcr_opt_footer_size;
+ __le16 msg_rings_ctrl_flags;
+ __le16 prph_info_msi_vec;
+ __le64 prph_scratch_base_addr;
+ __le32 prph_scratch_size;
+ __le32 reserved;
+} __packed; /* IPC_CONTEXT_INFO_S */
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ const struct fw_img *fw);
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+
+#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index b870c0986744..4b6fdf3b15fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -199,5 +201,8 @@ struct iwl_context_info {
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info_dram *ctxt_dram);
#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index ba971d3946e2..9019de99f077 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -339,6 +339,9 @@ enum {
/* HW_RF CHIP ID */
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
+/* HW_RF CHIP STEP */
+#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
+
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -592,6 +595,8 @@ enum msix_fh_int_causes {
enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c59ce4f8a5ed..c0631255aee7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
return 0;
}
-static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
- const u32 len)
-{
- struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
- struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
-
- capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
- capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
- capa->max_ap_cache_per_scan =
- le32_to_cpu(fw_capa->max_ap_cache_per_scan);
- capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
- capa->max_scan_reporting_threshold =
- le32_to_cpu(fw_capa->max_scan_reporting_threshold);
- capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
- capa->max_significant_change_aps =
- le32_to_cpu(fw_capa->max_significant_change_aps);
- capa->max_bssid_history_entries =
- le32_to_cpu(fw_capa->max_bssid_history_entries);
- capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids);
- capa->max_number_epno_networks =
- le32_to_cpu(fw_capa->max_number_epno_networks);
- capa->max_number_epno_networks_by_ssid =
- le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid);
- capa->max_number_of_white_listed_ssid =
- le32_to_cpu(fw_capa->max_number_of_white_listed_ssid);
- capa->max_number_of_black_listed_ssid =
- le32_to_cpu(fw_capa->max_number_of_black_listed_ssid);
-}
-
/*
* Gets uCode section from tlv.
*/
@@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
u32 build, paging_mem_size;
int num_of_cpus;
bool usniffer_req = false;
- bool gscan_capa = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -1043,6 +1013,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
break;
}
+ case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
+ if (tlv_len != sizeof(u32)) {
+ IWL_ERR(drv,
+ "dbg lst mask size incorrect, skip\n");
+ break;
+ }
+
+ drv->fw.dbg_dump_mask =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ }
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
*usniffer_images = true;
iwl_store_ucode_sec(pieces, tlv_data,
@@ -1079,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
paging_mem_size;
break;
case IWL_UCODE_TLV_FW_GSCAN_CAPA:
- /*
- * Don't return an error in case of a shorter tlv_len
- * to enable loading of FW that has an old format
- * of GSCAN capabilities TLV.
- */
- if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities))
- break;
-
- iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
- gscan_capa = true;
+ /* ignored */
break;
case IWL_UCODE_TLV_FW_MEM_SEG: {
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
@@ -1153,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
- /*
- * If ucode advertises that it supports GSCAN but GSCAN
- * capabilities TLV is not present, or if it has an old format,
- * warn and continue without GSCAN.
- */
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
- !gscan_capa) {
- IWL_DEBUG_INFO(drv,
- "GSCAN is supported but capabilities TLV is unavailable\n");
- __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
- capa->_capa);
- }
-
return 0;
invalid_tlv_len:
@@ -1316,6 +1275,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
+ /* dump all fw memory areas by default */
+ fw->dbg_dump_mask = 0xffffffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
@@ -1787,7 +1748,8 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
MODULE_PARM_DESC(amsdu_size,
- "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)");
+ "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
+ "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
@@ -1856,3 +1818,7 @@ module_param_named(remove_when_gone,
0444);
MODULE_PARM_DESC(remove_when_gone,
"Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
+
+module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
+ S_IRUGO);
+MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 777f5df8a0c6..a4c96215933b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -767,7 +767,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
if ((cfg->mq_rx_supported &&
- iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) ||
+ iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 11789ffb6512..df0e9ffff706 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -434,13 +434,15 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* RXF to DRAM.
* Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
*/
-#define RFH_GEN_STATUS 0xA09808
+#define RFH_GEN_STATUS 0xA09808
+#define RFH_GEN_STATUS_GEN3 0xA07824
#define RBD_FETCH_IDLE BIT(29)
#define SRAM_DMA_IDLE BIT(30)
#define RXF_DMA_IDLE BIT(31)
/* DMA configuration */
-#define RFH_RXF_DMA_CFG 0xA09820
+#define RFH_RXF_DMA_CFG 0xA09820
+#define RFH_RXF_DMA_CFG_GEN3 0xA07880
/* RB size */
#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
#define RFH_RXF_DMA_RB_SIZE_POS 16
@@ -643,10 +645,13 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
/* cb size is the exponent - 3 */
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \
+ TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
@@ -753,7 +758,7 @@ struct iwl_tfh_tfd {
* For devices up to 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
- * For 22000 and on:
+ * For 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
@@ -762,4 +767,15 @@ struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
} __packed;
+/**
+ * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * For 22560 and on:
+ * @tfd_offset: 0-12 - tx command byte count
+ * 12-13 - number of 64 byte chunks
+ * 14-16 - reserved
+ */
+struct iwl_gen3_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+} __packed;
+
#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index a7dd8a8cddf9..97072cf75bca 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -17,9 +18,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -31,6 +30,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,8 @@ enum iwl_amsdu_size {
IWL_AMSDU_4K = 1,
IWL_AMSDU_8K = 2,
IWL_AMSDU_12K = 3,
+ /* Add 2K at the end to avoid breaking current API */
+ IWL_AMSDU_2K = 4,
};
enum iwl_uapsd_disable {
@@ -144,6 +146,10 @@ struct iwl_mod_params {
bool lar_disable;
bool fw_monitor;
bool disable_11ac;
+ /**
+ * @disable_11ax: disable HE capabilities, default = false
+ */
+ bool disable_11ax;
bool remove_when_gone;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index b815ba38dbdb..b4c3a957c102 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -430,6 +430,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
else
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
+ case IWL_AMSDU_2K:
+ if (cfg->mq_rx_supported)
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ else
+ WARN(1, "RB size of 2K is not supported by this device\n");
+ break;
case IWL_AMSDU_4K:
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
@@ -463,6 +470,101 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
+static struct ieee80211_sband_iftype_data iwl_he_capa = {
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
+ .phy_cap_info[3] =
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
+ .phy_cap_info[4] =
+ IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
+ .phy_cap_info[5] =
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
+ .phy_cap_info[6] =
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+ .phy_cap_info[7] =
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_7,
+ .phy_cap_info[8] =
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
+ },
+ /*
+ * Set default Tx/Rx HE MCS NSS Support field. Indicate support
+ * for up to 2 spatial streams and all MCS, without any special
+ * cases
+ */
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ /*
+ * Set default PPE thresholds, with PPET16 set to 0, PPET8 set
+ * to 7
+ */
+ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
+ },
+};
+
+static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
+ u8 tx_chains, u8 rx_chains)
+{
+ if (sband->band == NL80211_BAND_2GHZ ||
+ sband->band == NL80211_BAND_5GHZ)
+ sband->iftype_data = &iwl_he_capa;
+ else
+ return;
+
+ sband->n_iftype_data = 1;
+
+ /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
+ if ((tx_chains & rx_chains) != ANT_AB) {
+ iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
+ ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
+ iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
+ }
+}
+
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *nvm_ch_flags, u8 tx_chains,
@@ -483,6 +585,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
tx_chains, rx_chains);
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
@@ -495,6 +600,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains);
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
@@ -1293,6 +1401,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
nvm->sku_cap_11n_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+ nvm->sku_cap_11ax_enable =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
nvm->sku_cap_band_24ghz_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
nvm->sku_cap_band_52ghz_enable =
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1b9c627ee34d..279dd7b7a3fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -350,6 +350,8 @@ static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
{
switch (rb_size) {
+ case IWL_AMSDU_2K:
+ return get_order(2 * 1024);
case IWL_AMSDU_4K:
return get_order(4 * 1024);
case IWL_AMSDU_8K:
@@ -438,6 +440,20 @@ struct iwl_trans_txq_scd_cfg {
};
/**
+ * struct iwl_trans_rxq_dma_data - RX queue DMA data
+ * @fr_bd_cb: DMA address of free BD cyclic buffer
+ * @fr_bd_wid: Initial write index of the free BD cyclic buffer
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @ur_bd_cb: DMA address of used BD cyclic buffer
+ */
+struct iwl_trans_rxq_dma_data {
+ u64 fr_bd_cb;
+ u32 fr_bd_wid;
+ u64 urbd_stts_wrptr;
+ u64 ur_bd_cb;
+};
+
+/**
* struct iwl_trans_ops - transport specific operations
*
* All the handlers MUST be implemented
@@ -557,6 +573,8 @@ struct iwl_trans_ops {
int cmd_id, int size,
unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
+ int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
@@ -753,6 +771,7 @@ struct iwl_trans {
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
+ u32 dbg_dump_mask;
u8 dbg_dest_reg_num;
enum iwl_plat_pm_mode system_pm_mode;
@@ -945,6 +964,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
cfg, queue_wdg_timeout);
}
+static inline int
+iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
+ return -ENOTSUPP;
+
+ return trans->ops->rxq_dma_data(trans, queue, data);
+}
+
static inline void
iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 3fcf489f3120..79bdae994822 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif
+ /*
+ * TODO: this is needed because the firmware is not stopping
+ * the recording automatically before entering D3. This can
+ * be removed once the FW starts doing that.
+ */
+ iwl_fw_dbg_stop_recording(&mvm->fwrt);
+
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1c4178f20441..05b77419953c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc;
int bin_len = count / 2;
int ret = -EINVAL;
+ size_t mpdu_cmd_hdr_size =
+ (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
@@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
goto out;
/* avoid invalid memory access */
- if (bin_len < sizeof(*pkt) + sizeof(*desc))
+ if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
goto out;
/* check this is RX packet */
@@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
/* check the length in metadata matches actual received length */
desc = (void *)pkt->data;
if (le16_to_cpu(desc->mpdu_len) !=
- (bin_len - sizeof(*desc) - sizeof(*pkt)))
+ (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
goto out;
local_bh_disable();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 866c91c923be..6bb1a99a197a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
+static int iwl_configure_rxq(struct iwl_mvm *mvm)
+{
+ int i, num_queues, size;
+ struct iwl_rfh_queue_config *cmd;
+
+ /* Do not configure default queue, it is configured via context info */
+ num_queues = mvm->trans->num_rx_queues - 1;
+
+ size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->num_queues = num_queues;
+
+ for (i = 0; i < num_queues; i++) {
+ struct iwl_trans_rxq_dma_data data;
+
+ cmd->data[i].q_num = i + 1;
+ iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
+
+ cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
+ cmd->data[i].urbd_stts_wrptr =
+ cpu_to_le64(data.urbd_stts_wrptr);
+ cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
+ cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ RFH_QUEUE_CONFIG_CMD),
+ 0, size, cmd);
+}
+
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
{
struct iwl_dqa_enable_cmd dqa_cmd = {
@@ -301,7 +336,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Init RSS configuration */
- /* TODO - remove 22000 disablement when we have RXQ config API */
- if (iwl_mvm_has_new_rx_api(mvm) &&
- mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ ret = iwl_configure_rxq(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
+ ret);
+ goto error;
+ }
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 8ba16fc24e3a..b3fd20502abb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -780,6 +780,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a6e072234398..b15b0d84bb7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -36,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -914,7 +915,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
- u8 buf_size = params->buf_size;
+ u16 buf_size = params->buf_size;
bool amsdu = params->amsdu;
u16 timeout = params->timeout;
@@ -1897,6 +1898,194 @@ void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
iwl_mvm_mu_mimo_iface_iterator, notif);
}
+static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+ u8 byte_num = ppe_pos_bit / 8;
+ u8 bit_num = ppe_pos_bit % 8;
+ u8 residue_bits;
+ u8 res;
+
+ if (bit_num <= 5)
+ return (ppe[byte_num] >> bit_num) &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+ /*
+ * If bit_num > 5, we have to combine bits with next byte.
+ * Calculate how many bits we need to take from current byte (called
+ * here "residue_bits"), and add them to bits from next byte.
+ */
+
+ residue_bits = 8 - bit_num;
+
+ res = (ppe[byte_num + 1] &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+ residue_bits;
+ res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+ return res;
+}
+
+static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 sta_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+ .sta_id = sta_id,
+ .tid_limit = IWL_MAX_TID_COUNT,
+ .bss_color = vif->bss_conf.bss_color,
+ .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
+ .frame_time_rts_th =
+ cpu_to_le16(vif->bss_conf.frame_time_rts_th),
+ };
+ struct ieee80211_sta *sta;
+ u32 flags;
+ int i;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
+ if (IS_ERR(sta)) {
+ rcu_read_unlock();
+ WARN(1, "Can't find STA to configure HE\n");
+ return;
+ }
+
+ if (!sta->he_cap.has_he) {
+ rcu_read_unlock();
+ return;
+ }
+
+ flags = 0;
+
+ /* HTC flags */
+ if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+ IEEE80211_HE_MAC_CAP0_HTC_HE)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
+ if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+ (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+ u8 link_adap =
+ ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+ (sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+ if (link_adap == 2)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
+ else if (link_adap == 3)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
+ }
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
+
+ /* If PPE Thresholds exist, parse them into a FW-familiar format */
+ if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 ru_index_bitmap =
+ (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+ u8 *ppe = &sta->he_cap.ppe_thres[0];
+ u8 ppe_pos_bit = 7; /* Starting after PPE header */
+
+ /*
+ * FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 bw;
+
+ for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
+ ru_index_tmp >>= 1;
+ if (!(ru_index_tmp & 1))
+ continue;
+
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ }
+ }
+
+ flags |= STA_CTXT_HE_PACKET_EXT;
+ }
+ rcu_read_unlock();
+
+ /* Mark MU EDCA as enabled, unless none detected on some AC */
+ flags |= STA_CTXT_HE_MU_EDCA_CW;
+ for (i = 0; i < AC_NUM; i++) {
+ struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+ &mvmvif->queue_params[i].mu_edca_param_rec;
+
+ if (!mvmvif->queue_params[i].mu_edca) {
+ flags &= ~STA_CTXT_HE_MU_EDCA_CW;
+ break;
+ }
+
+ sta_ctxt_cmd.trig_based_txf[i].cwmin =
+ cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+ sta_ctxt_cmd.trig_based_txf[i].cwmax =
+ cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+ sta_ctxt_cmd.trig_based_txf[i].aifsn =
+ cpu_to_le16(mu_edca->aifsn);
+ sta_ctxt_cmd.trig_based_txf[i].mu_time =
+ cpu_to_le16(mu_edca->mu_edca_timer);
+ }
+
+ if (vif->bss_conf.multi_sta_back_32bit)
+ flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
+
+ if (vif->bss_conf.ack_enabled)
+ flags |= STA_CTXT_HE_ACK_ENABLED;
+
+ if (vif->bss_conf.uora_exists) {
+ flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
+
+ sta_ctxt_cmd.rand_alloc_ecwmin =
+ vif->bss_conf.uora_ocw_range & 0x7;
+ sta_ctxt_cmd.rand_alloc_ecwmax =
+ (vif->bss_conf.uora_ocw_range >> 3) & 0x7;
+ }
+
+ /* TODO: support Multi BSSID IE */
+
+ sta_ctxt_cmd.flags = cpu_to_le32(flags);
+
+ if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
+ DATA_PATH_GROUP, 0),
+ 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
+ IWL_ERR(mvm, "Failed to config FW to work HE!\n");
+}
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1910,8 +2099,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* beacon interval, which was not known when the station interface was
* added.
*/
- if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
+ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ if (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
+
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+ }
/*
* If we're not associated yet, take the (new) BSSID before associating
@@ -4216,7 +4410,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
if (mvmsta->avg_energy) {
sinfo->signal_avg = mvmsta->avg_energy;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
if (!fw_has_capa(&mvm->fw->ucode_capa,
@@ -4240,11 +4434,11 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
mvmvif->beacon_stats.accu_num_beacons;
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
if (mvmvif->beacon_stats.avg_signal) {
/* firmware only reports a value after RXing a few beacons */
sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
}
unlock:
mutex_unlock(&mvm->mutex);
@@ -4364,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
- /* TODO - remove this when we have RXQ config API */
- if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
- qmask = BIT(0);
- if (notif->sync)
- atomic_set(&mvm->queue_sync_counter, 1);
- }
-
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 6a4ba160c59e..b3987a0a7018 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -654,7 +654,7 @@ struct iwl_mvm_tcm {
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
- u8 buf_size;
+ u16 buf_size;
int queue;
u16 last_amsdu;
u8 last_sub_index;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ff1e518096c5..0e26619fb330 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -448,6 +448,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(STA_HE_CTXT_CMD),
+ HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -620,7 +622,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq;
- trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
+ trans->rx_mpdu_cmd_hdr_size =
+ (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
} else {
op_mode->ops = &iwl_mvm_ops;
trans->rx_mpdu_cmd_hdr_size =
@@ -703,11 +709,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
/* the hardware splits the A-MSDU */
- if (mvm->cfg->mq_rx_supported)
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ trans_cfg.rx_buf_size = IWL_AMSDU_2K;
+ /* TODO: remove when balanced power mode is fw supported */
+ iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
+ } else if (mvm->cfg->mq_rx_supported) {
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ }
trans->wide_cmd_header = true;
- trans_cfg.bc_table_dword = true;
+ trans_cfg.bc_table_dword =
+ mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
@@ -738,6 +750,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
+ trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -1003,10 +1016,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
schedule_work(&mvm->async_handlers_wk);
- return;
+ break;
}
-
- iwl_fwrt_handle_notification(&mvm->fwrt, rxb);
}
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index b8b2b819e8e7..8169d1450b3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -183,6 +183,43 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
}
}
+static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
+{
+ switch (mcs) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+ case IEEE80211_HE_MCS_NOT_SUPPORTED:
+ return 0;
+ }
+
+ WARN(1, "invalid HE MCS %d\n", mcs);
+ return 0;
+}
+
+static void
+rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
+ const struct ieee80211_sta_he_cap *he_cap,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+ int i;
+
+ for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
+ u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+ u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+
+ cmd->ht_rates[i][0] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+ cmd->ht_rates[i][1] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+ }
+}
+
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
@@ -192,6 +229,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
unsigned long supp; /* must be unsigned long for for_each_set_bit */
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
/* non HT rates */
supp = 0;
@@ -202,7 +240,11 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cmd->non_ht_rates = cpu_to_le16(supp);
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
- if (vht_cap && vht_cap->vht_supported) {
+ /* HT/VHT rates */
+ if (he_cap && he_cap->has_he) {
+ cmd->mode = IWL_TLC_MNG_MODE_HE;
+ rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+ } else if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap && ht_cap->ht_supported) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 642da10b0b7f..30cfd7d50bc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -363,7 +363,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx += 1;
if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
return idx;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK ||
+ rate_n_flags & RATE_MCS_HE_MSK) {
idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
idx += IWL_RATE_MCS_0_INDEX;
@@ -372,6 +373,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx++;
if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
return idx;
+ if ((rate_n_flags & RATE_MCS_HE_MSK) &&
+ (idx <= IWL_LAST_HE_RATE))
+ return idx;
} else {
/* legacy rate format, search for match in table */
@@ -516,6 +520,8 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
[LQ_HT_MIMO2] = "HT MIMO",
[LQ_VHT_SISO] = "VHT SISO",
[LQ_VHT_MIMO2] = "VHT MIMO",
+ [LQ_HE_SISO] = "HE SISO",
+ [LQ_HE_MIMO2] = "HE MIMO",
};
if (type < LQ_NONE || type >= LQ_MAX)
@@ -900,7 +906,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
/* Legacy */
if (!(ucode_rate & RATE_MCS_HT_MSK) &&
- !(ucode_rate & RATE_MCS_VHT_MSK)) {
+ !(ucode_rate & RATE_MCS_VHT_MSK) &&
+ !(ucode_rate & RATE_MCS_HE_MSK)) {
if (num_of_ant == 1) {
if (band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
@@ -911,7 +918,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
return 0;
}
- /* HT or VHT */
+ /* HT, VHT or HE */
if (ucode_rate & RATE_MCS_SGI_MSK)
rate->sgi = true;
if (ucode_rate & RATE_MCS_LDPC_MSK)
@@ -953,10 +960,24 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
} else {
WARN_ON_ONCE(1);
}
+ } else if (ucode_rate & RATE_MCS_HE_MSK) {
+ nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ rate->type = LQ_HE_SISO;
+ WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+ "stbc %d bfer %d", rate->stbc, rate->bfer);
+ } else if (nss == 2) {
+ rate->type = LQ_HE_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
}
WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
- !is_vht(rate));
+ !is_he(rate) && !is_vht(rate));
return 0;
}
@@ -3606,7 +3627,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
if (!(rate & RATE_MCS_HT_MSK) &&
- !(rate & RATE_MCS_VHT_MSK)) {
+ !(rate & RATE_MCS_VHT_MSK) &&
+ !(rate & RATE_MCS_HE_MSK)) {
int index = iwl_hwrate_to_plcp_idx(rate);
return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
@@ -3625,6 +3647,11 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
mcs = rate & RATE_HT_MCS_INDEX_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK)
>> RATE_HT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_HE_MSK) {
+ type = "HE";
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+ >> RATE_VHT_MCS_NSS_POS) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}
@@ -3886,6 +3913,8 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
[IWL_RATE_MCS_7_INDEX] = "MCS7",
[IWL_RATE_MCS_8_INDEX] = "MCS8",
[IWL_RATE_MCS_9_INDEX] = "MCS9",
+ [IWL_RATE_MCS_10_INDEX] = "MCS10",
+ [IWL_RATE_MCS_11_INDEX] = "MCS11",
};
char *buff, *pos, *endpos;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index cffb8c852934..d2cf484e2b73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -144,8 +144,13 @@ enum {
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
+/*
+ * FIXME - various places in firmware API still use u8,
+ * e.g. LQ command and SCD config command.
+ * This should be 256 instead.
+ */
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -162,6 +167,8 @@ enum iwl_table_type {
LQ_HT_MIMO2,
LQ_VHT_SISO, /* VHT types */
LQ_VHT_MIMO2,
+ LQ_HE_SISO, /* HE types */
+ LQ_HE_MIMO2,
LQ_MAX,
};
@@ -183,11 +190,16 @@ struct rs_rate {
#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
-#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
-#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
+#define is_type_he_siso(type) ((type) == LQ_HE_SISO)
+#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \
+ is_type_he_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \
+ is_type_vht_mimo2(type) || is_type_he_mimo2(type))
#define is_type_mimo(type) (is_type_mimo2(type))
#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type))
#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
@@ -201,6 +213,7 @@ struct rs_rate {
#define is_mimo(rate) is_type_mimo((rate)->type)
#define is_ht(rate) is_type_ht((rate)->type)
#define is_vht(rate) is_type_vht((rate)->type)
+#define is_he(rate) is_type_he((rate)->type)
#define is_a_band(rate) is_type_a_band((rate)->type)
#define is_g_band(rate) is_type_g_band((rate)->type)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 129c4c09648d..b53148f972a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -196,22 +198,31 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct sk_buff *skb, int queue,
struct ieee80211_sta *sta)
{
- if (iwl_mvm_check_pn(mvm, skb, queue, sta))
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
kfree_skb(skb);
- else
+ } else {
+ unsigned int radiotap_len = 0;
+
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he);
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
+ __skb_push(skb, radiotap_len);
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+ }
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
- struct iwl_rx_mpdu_desc *desc,
- struct ieee80211_rx_status *rx_status)
+ struct ieee80211_rx_status *rx_status,
+ u32 rate_n_flags, int energy_a,
+ int energy_b)
{
- int energy_a, energy_b, max_energy;
- u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
+ int max_energy;
+ u32 rate_flags = rate_n_flags;
- energy_a = desc->energy_a;
energy_a = energy_a ? -energy_a : S8_MIN;
- energy_b = desc->energy_b;
energy_b = energy_b ? -energy_b : S8_MIN;
max_energy = max(energy_a, energy_b);
@@ -356,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
tid = IWL_MAX_TID_COUNT;
/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
- sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
dup_data->last_seq[tid] == hdr->seq_ctrl &&
@@ -850,17 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
- struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
+ struct ieee80211_hdr *hdr;
u32 len = le16_to_cpu(desc->mpdu_len);
- u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
+ u32 rate_n_flags, gp2_on_air_rise;
u16 phy_info = le16_to_cpu(desc->phy_info);
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 crypt_len = 0;
+ u8 crypt_len = 0, channel, energy_a, energy_b;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 he_type = 0xffffffff;
+ /* this is invalid e.g. because puncture type doesn't allow 0b11 */
+#define HE_PHY_DATA_INVAL ((u64)-1)
+ u64 he_phy_data = HE_PHY_DATA_INVAL;
+ size_t desc_size;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ channel = desc->v3.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ energy_a = desc->v3.energy_a;
+ energy_b = desc->v3.energy_b;
+ desc_size = sizeof(*desc);
+ } else {
+ rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ channel = desc->v1.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+ energy_a = desc->v1.energy_a;
+ energy_b = desc->v1.energy_b;
+ desc_size = IWL_RX_DESC_SIZE_V1;
+ }
+
+ hdr = (void *)(pkt->data + desc_size);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -882,6 +918,51 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
+ if (rate_n_flags & RATE_MCS_HE_MSK) {
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+ };
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
+ };
+ unsigned int radiotap_len = 0;
+
+ he = skb_put_data(skb, &known, sizeof(known));
+ radiotap_len += sizeof(known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_type == RATE_MCS_HE_TYPE_MU) {
+ he_mu = skb_put_data(skb, &mu_known,
+ sizeof(mu_known));
+ radiotap_len += sizeof(mu_known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ }
+ }
+
+ /* temporarily hide the radiotap data */
+ __skb_pull(skb, radiotap_len);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
@@ -904,20 +985,80 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
- rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
+ u64 tsf_on_air_rise;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
+ else
+ tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
+
+ rx_status->mactime = tsf_on_air_rise;
/* TSF as indicated by the firmware is at INA time */
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ } else if (he_type == RATE_MCS_HE_TYPE_SU) {
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+ if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+ he_phy_data))
+ he->data3 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ mvm->ampdu_ref++;
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+ } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
}
- rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
- rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
- NL80211_BAND_2GHZ;
- rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
+ rx_status->device_timestamp = gp2_on_air_rise;
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
- iwl_mvm_get_signal_strength(mvm, desc, rx_status);
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
+ energy_b);
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->ampdu_reference = mvm->ampdu_ref;
@@ -925,6 +1066,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
+
+ if (he_phy_data != HE_PHY_DATA_INVAL &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |=
+ RX_FLAG_AMPDU_EOF_BIT;
+ }
}
}
@@ -1033,7 +1183,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
- /* Set up the HT phy flags */
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
break;
@@ -1048,6 +1197,70 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ if (rate_n_flags & RATE_MCS_HE_MSK &&
+ phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ /*
+ * Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the he_mu pointer,
+ * i.e. we don't have the phy data (due to the bits
+ * being used for TSF). This shouldn't happen though
+ * as management frames where we need the TSF/timers
+ * are not be transmitted in HE-MU, I think.
+ */
+ u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |=
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
+ if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+ } else if (he) {
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+ }
+
if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -1072,6 +1285,119 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
+ } else if (he) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+ if (rate_n_flags & RATE_MCS_BF_POS)
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+ RATE_MCS_HE_GI_LTF_POS) {
+ case 0:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 1:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 2:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case 3:
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ switch (he_type) {
+ case RATE_MCS_HE_TYPE_SU: {
+ u16 val;
+
+ /* LTF syms correspond to streams */
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ switch (rx_status->nss) {
+ case 1:
+ val = 0;
+ break;
+ case 2:
+ val = 1;
+ break;
+ case 3:
+ case 4:
+ val = 2;
+ break;
+ case 5:
+ case 6:
+ val = 3;
+ break;
+ case 7:
+ case 8:
+ val = 4;
+ break;
+ default:
+ WARN_ONCE(1, "invalid nss: %d\n",
+ rx_status->nss);
+ val = 0;
+ }
+ he->data5 |=
+ le16_encode_bits(val,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ }
+ break;
+ case RATE_MCS_HE_TYPE_MU: {
+ u16 val;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_phy_data == HE_PHY_DATA_INVAL)
+ break;
+
+ val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+ he_phy_data);
+
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data5 |=
+ cpu_to_le16(FIELD_PREP(
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
+ val));
+ }
+ break;
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ case RATE_MCS_HE_TYPE_TRIG:
+ /* not supported yet */
+ break;
+ }
} else {
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9263b9aa8b72..18db1ed92d9b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2184,7 +2184,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data,
- u16 ssn, u8 buf_size)
+ u16 ssn, u16 buf_size)
{
int i;
@@ -2211,7 +2211,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
}
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
@@ -2273,7 +2273,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (start) {
cmd.add_immediate_ba_tid = (u8) tid;
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
- cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
+ cmd.rx_ba_window = cpu_to_le16(buf_size);
} else {
cmd.remove_immediate_ba_tid = (u8) tid;
}
@@ -2559,7 +2559,7 @@ out:
}
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1c43ea8dd8cc..0fc211108149 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -412,7 +412,7 @@ struct iwl_mvm_sta {
u32 tfd_queue_msk;
u32 mac_id_n_color;
u16 tid_disable_agg;
- u8 max_agg_bufsize;
+ u16 max_agg_bufsize;
enum iwl_sta_type sta_type;
enum ieee80211_sta_state sta_state;
bool bt_reduced_txpower;
@@ -518,11 +518,11 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu);
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index cf2591f2ac23..ff193dca2020 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -484,13 +484,15 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Make sure we zero enough of dev_cmd */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
- struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = 0;
+ u32 rate_n_flags = 0;
+ u16 flags = 0;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
@@ -507,25 +509,43 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
- cmd->offload_assist |= cpu_to_le16(offload_assist);
+ if (!info->control.hw_key)
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
- /* Total # bytes to be transmitted */
- cmd->len = cpu_to_le16((u16)skb->len);
+ /* For data packets rate info comes from the fw */
+ if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
+ }
- /* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
- if (!info->control.hw_key)
- cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
+ cmd->offload_assist |= cpu_to_le32(offload_assist);
- /* For data packets rate info comes from the fw */
- if (ieee80211_is_data(hdr->frame_control) && sta)
- goto out;
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
- cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
- cmd->rate_n_flags =
- cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+ cmd->flags = cpu_to_le16(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ } else {
+ struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+
+ cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+
+ cmd->flags = cpu_to_le32(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ }
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
new file mode 100644
index 000000000000..2146fda8da2f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info-gen3.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ const struct fw_img *fw)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_context_info_gen3 *ctxt_info_gen3;
+ struct iwl_prph_scratch *prph_scratch;
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+ struct iwl_prph_info *prph_info;
+ void *iml_img;
+ u32 control_flags = 0;
+ int ret;
+
+ /* Allocate prph scratch */
+ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
+ &trans_pcie->prph_scratch_dma_addr,
+ GFP_KERNEL);
+ if (!prph_scratch)
+ return -ENOMEM;
+
+ prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+
+ prph_sc_ctrl->version.version = 0;
+ prph_sc_ctrl->version.mac_id =
+ cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
+
+ control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
+ IWL_PRPH_SCRATCH_MTR_MODE |
+ (IWL_PRPH_MTR_FORMAT_256B &
+ IWL_PRPH_SCRATCH_MTR_FORMAT) |
+ IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
+ IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+
+ /* initialize RX default queue */
+ prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+ cpu_to_le64(trans_pcie->rxq->bd_dma);
+
+ /* Configure debug, for integration */
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+ prph_sc_ctrl->hwm_cfg.hwm_base_addr =
+ cpu_to_le64(trans_pcie->fw_mon_phys);
+ prph_sc_ctrl->hwm_cfg.hwm_size =
+ cpu_to_le32(trans_pcie->fw_mon_size);
+
+ /* allocate ucode sections in dram and set addresses */
+ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
+ if (ret) {
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_scratch),
+ prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ return ret;
+ }
+
+ /* Allocate prph information
+ * currently we don't assign to the prph info anything, but it would get
+ * assigned later */
+ prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
+ &trans_pcie->prph_info_dma_addr,
+ GFP_KERNEL);
+ if (!prph_info)
+ return -ENOMEM;
+
+ /* Allocate context info */
+ ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
+ sizeof(*ctxt_info_gen3),
+ &trans_pcie->ctxt_info_dma_addr,
+ GFP_KERNEL);
+ if (!ctxt_info_gen3)
+ return -ENOMEM;
+
+ ctxt_info_gen3->prph_info_base_addr =
+ cpu_to_le64(trans_pcie->prph_info_dma_addr);
+ ctxt_info_gen3->prph_scratch_base_addr =
+ cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
+ ctxt_info_gen3->prph_scratch_size =
+ cpu_to_le32(sizeof(*prph_scratch));
+ ctxt_info_gen3->cr_head_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+ ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+ ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
+ ctxt_info_gen3->cr_idx_arr_size =
+ cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
+ ctxt_info_gen3->tr_idx_arr_size =
+ cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+ ctxt_info_gen3->mtr_base_addr =
+ cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ ctxt_info_gen3->mcr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+ ctxt_info_gen3->mtr_size =
+ cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
+ ctxt_info_gen3->mcr_size =
+ cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+
+ trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
+ trans_pcie->prph_info = prph_info;
+ trans_pcie->prph_scratch = prph_scratch;
+
+ /* Allocate IML */
+ iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
+ &trans_pcie->iml_dma_addr, GFP_KERNEL);
+ if (!iml_img)
+ return -ENOMEM;
+
+ memcpy(iml_img, trans->iml, trans->iml_len);
+
+ iwl_enable_interrupts(trans);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_ADDR,
+ trans_pcie->ctxt_info_dma_addr);
+ iwl_write64(trans, CSR_IML_DATA_ADDR,
+ trans_pcie->iml_dma_addr);
+ iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
+ iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
+
+ return 0;
+}
+
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->ctxt_info_gen3)
+ return;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_dma_addr = 0;
+ trans_pcie->ctxt_info_gen3 = NULL;
+
+ iwl_pcie_ctxt_info_free_fw_img(trans);
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
+ trans_pcie->prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ trans_pcie->prph_scratch_dma_addr = 0;
+ trans_pcie->prph_scratch = NULL;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
+ trans_pcie->prph_info,
+ trans_pcie->prph_info_dma_addr);
+ trans_pcie->prph_info_dma_addr = 0;
+ trans_pcie->prph_info = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 3fc4343581ee..b2cd7ef5fc3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,57 +57,6 @@
#include "internal.h"
#include "iwl-prph.h"
-static int iwl_pcie_get_num_sections(const struct fw_img *fw,
- int start)
-{
- int i = 0;
-
- while (start < fw->num_sec &&
- fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
- fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
- start++;
- i++;
- }
-
- return i;
-}
-
-static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
- const struct fw_desc *sec,
- struct iwl_dram_data *dram)
-{
- dram->block = dma_alloc_coherent(trans->dev, sec->len,
- &dram->physical,
- GFP_KERNEL);
- if (!dram->block)
- return -ENOMEM;
-
- dram->size = sec->len;
- memcpy(dram->block, sec->data, sec->len);
-
- return 0;
-}
-
-static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
- int i;
-
- if (!dram->fw) {
- WARN_ON(dram->fw_cnt);
- return;
- }
-
- for (i = 0; i < dram->fw_cnt; i++)
- dma_free_coherent(trans->dev, dram->fw[i].size,
- dram->fw[i].block, dram->fw[i].physical);
-
- kfree(dram->fw);
- dram->fw_cnt = 0;
- dram->fw = NULL;
-}
-
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
dram->paging = NULL;
}
-static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
- const struct fw_img *fw,
- struct iwl_context_info *ctxt_info)
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info_dram *ctxt_dram)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
- struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
if (WARN(dram->paging,
@@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
/* allocate ucode sections in dram and set addresses */
- ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
+ ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
if (ret) {
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
ctxt_info, trans_pcie->ctxt_info_dma_addr);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 8520523b91b4..b150da4c6721 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -72,7 +72,6 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include "fw/acpi.h"
@@ -828,19 +827,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
/* 22000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */
@@ -1003,6 +1015,10 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;
+ /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return 0;
+
/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
iwl_pcie_conf_msix_hw(trans_pcie);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 45ea32796cda..b63d44b7cd7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -3,6 +3,7 @@
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -17,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -45,6 +45,7 @@
#include "iwl-debug.h"
#include "iwl-io.h"
#include "iwl-op-mode.h"
+#include "iwl-drv.h"
/* We need 2 entries for the TX command and header, and another one might
* be needed for potential data in the SKB's head. The remaining ones can
@@ -59,6 +60,7 @@
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
#define RX_PENDING_WATERMARK 16
+#define FIRST_RX_QUEUE 512
struct iwl_host_cmd;
@@ -71,6 +73,7 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
+ * @size: size used from the buffer
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -78,6 +81,7 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
+ u32 size;
};
/**
@@ -98,14 +102,121 @@ struct isr_statistics {
u32 unhandled;
};
+#define IWL_CD_STTS_OPTIMIZED_POS 0
+#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
+#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
+#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
+#define IWL_CD_STTS_WIFI_STATUS_POS 4
+#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
+
+/**
+ * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
+ * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
+ * In sniffer mode, when split is used, set in last CD completion. (RX)
+ * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
+ * all CD completion. (RX)
+ * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
+ */
+enum iwl_completion_desc_transfer_status {
+ IWL_CD_STTS_UNUSED,
+ IWL_CD_STTS_UNUSED_2,
+ IWL_CD_STTS_END_TRANSFER,
+ IWL_CD_STTS_OVERFLOW,
+ IWL_CD_STTS_ABORTED,
+ IWL_CD_STTS_ERROR,
+};
+
+/**
+ * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
+ * @IWL_CD_STTS_VALID: the packet is valid (RX)
+ * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
+ * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
+ * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
+ * @IWL_CD_STTS_DUP: duplicate packet (RX)
+ * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
+ * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
+ * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
+ * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
+ * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
+ * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
+ * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
+ * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
+ * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
+ * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
+ */
+enum iwl_completion_desc_wifi_status {
+ IWL_CD_STTS_VALID,
+ IWL_CD_STTS_FCS_ERR,
+ IWL_CD_STTS_SEC_KEY_ERR,
+ IWL_CD_STTS_DECRYPTION_ERR,
+ IWL_CD_STTS_DUP,
+ IWL_CD_STTS_ICV_MIC_ERR,
+ IWL_CD_STTS_INTERNAL_SNAP_ERR,
+ IWL_CD_STTS_SEC_PORT_FAIL,
+ IWL_CD_STTS_BA_OLD_SN,
+ IWL_CD_STTS_QOS_NULL,
+ IWL_CD_STTS_MAC_HDR_ERR,
+ IWL_CD_STTS_MAX_RETRANS,
+ IWL_CD_STTS_EX_LIFETIME,
+ IWL_CD_STTS_NOT_USED,
+ IWL_CD_STTS_REPLAY_ERR,
+};
+
+#define IWL_RX_TD_TYPE_MSK 0xff000000
+#define IWL_RX_TD_SIZE_MSK 0x00ffffff
+#define IWL_RX_TD_SIZE_2K BIT(11)
+#define IWL_RX_TD_TYPE 0
+
+/**
+ * struct iwl_rx_transfer_desc - transfer descriptor
+ * @type_n_size: buffer type (bit 0: external buff valid,
+ * bit 1: optional footer valid, bit 2-7: reserved)
+ * and buffer size
+ * @addr: ptr to free buffer start address
+ * @rbid: unique tag of the buffer
+ * @reserved: reserved
+ */
+struct iwl_rx_transfer_desc {
+ __le32 type_n_size;
+ __le64 addr;
+ __le16 rbid;
+ __le16 reserved;
+} __packed;
+
+#define IWL_RX_CD_SIZE 0xffffff00
+
+/**
+ * struct iwl_rx_completion_desc - completion descriptor
+ * @type: buffer type (bit 0: external buff valid,
+ * bit 1: optional footer valid, bit 2-7: reserved)
+ * @status: status of the completion
+ * @reserved1: reserved
+ * @rbid: unique tag of the received buffer
+ * @size: buffer size, masked by IWL_RX_CD_SIZE
+ * @reserved2: reserved
+ */
+struct iwl_rx_completion_desc {
+ u8 type;
+ u8 status;
+ __le16 reserved1;
+ __le16 rbid;
+ __le32 size;
+ u8 reserved2[22];
+} __packed;
+
/**
* struct iwl_rxq - Rx queue
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
+ * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
+ * @tr_tail: driver's pointer to the transmission ring tail buffer
+ * @tr_tail_dma: physical address of the buffer for the transmission ring tail
+ * @cr_tail: driver's pointer to the completion ring tail buffer
+ * @cr_tail_dma: physical address of the buffer for the completion ring tail
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
@@ -125,8 +236,16 @@ struct iwl_rxq {
int id;
void *bd;
dma_addr_t bd_dma;
- __le32 *used_bd;
+ union {
+ void *used_bd;
+ __le32 *bd_32;
+ struct iwl_rx_completion_desc *cd;
+ };
dma_addr_t used_bd_dma;
+ __le16 *tr_tail;
+ dma_addr_t tr_tail_dma;
+ __le16 *cr_tail;
+ dma_addr_t cr_tail_dma;
u32 read;
u32 write;
u32 free_count;
@@ -136,7 +255,7 @@ struct iwl_rxq {
struct list_head rx_free;
struct list_head rx_used;
bool need_update;
- struct iwl_rb_status *rb_stts;
+ void *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
struct napi_struct napi;
@@ -175,18 +294,36 @@ struct iwl_dma_ptr {
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
*/
-static inline int iwl_queue_inc_wrap(int index)
+static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
{
- return ++index & (TFD_QUEUE_SIZE_MAX - 1);
+ return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_get_closed_rb_stts - get closed rb stts from different structs
+ * @rxq - the rxq to get the rb stts from
+ */
+static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ __le16 *rb_stts = rxq->rb_stts;
+
+ return READ_ONCE(*rb_stts);
+ } else {
+ struct iwl_rb_status *rb_stts = rxq->rb_stts;
+
+ return READ_ONCE(rb_stts->closed_rb_num);
+ }
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
*/
-static inline int iwl_queue_dec_wrap(int index)
+static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
{
- return --index & (TFD_QUEUE_SIZE_MAX - 1);
+ return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
}
struct iwl_cmd_meta {
@@ -315,6 +452,18 @@ enum iwl_shared_irq_flags {
};
/**
+ * enum iwl_image_response_code - image response values
+ * @IWL_IMAGE_RESP_DEF: the default value of the register
+ * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
+ * @IWL_IMAGE_RESP_FAIL: iml reading failed
+ */
+enum iwl_image_response_code {
+ IWL_IMAGE_RESP_DEF = 0,
+ IWL_IMAGE_RESP_SUCCESS = 1,
+ IWL_IMAGE_RESP_FAIL = 2,
+};
+
+/**
* struct iwl_dram_data
* @physical: page phy pointer
* @block: pointer to the allocated block/page
@@ -347,6 +496,12 @@ struct iwl_self_init_dram {
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @ctxt_info: context information for FW self init
+ * @ctxt_info_gen3: context information for gen3 devices
+ * @prph_info: prph info for self init
+ * @prph_scratch: prph scratch for self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @prph_info_dma_addr: dma addr of prph info
+ * @prph_scratch_dma_addr: dma addr of prph scratch
* @ctxt_info_dma_addr: dma addr of context information
* @init_dram: DRAM data of firmware image (including paging).
* Context information addresses will be taken from here.
@@ -391,8 +546,16 @@ struct iwl_trans_pcie {
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
- struct iwl_context_info *ctxt_info;
+ union {
+ struct iwl_context_info *ctxt_info;
+ struct iwl_context_info_gen3 *ctxt_info_gen3;
+ };
+ struct iwl_prph_info *prph_info;
+ struct iwl_prph_scratch *prph_scratch;
dma_addr_t ctxt_info_dma_addr;
+ dma_addr_t prph_info_dma_addr;
+ dma_addr_t prph_scratch_dma_addr;
+ dma_addr_t iml_dma_addr;
struct iwl_self_init_dram init_dram;
struct iwl_trans *trans;
@@ -477,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
return (void *)trans->trans_specific;
}
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
+ struct msix_entry *entry)
+{
+ /*
+ * Before sending the interrupt the HW disables it to prevent
+ * a nested interrupt. This is done by writing 1 to the corresponding
+ * bit in the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as
+ * write 1 clear (W1C) register, meaning that it's being clear
+ * by writing 1 to the bit.
+ */
+ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+}
+
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
@@ -504,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq);
/*****************************************************
* ICT - interrupt handling
@@ -588,6 +770,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
+#define IWL_NUM_OF_COMPLETION_RINGS 31
+#define IWL_NUM_OF_TRANSFER_RINGS 527
+
+static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
+ int start)
+{
+ int i = 0;
+
+ while (start < fw->num_sec &&
+ fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+ fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const struct fw_desc *sec,
+ struct iwl_dram_data *dram)
+{
+ dram->block = dma_alloc_coherent(trans->dev, sec->len,
+ &dram->physical,
+ GFP_KERNEL);
+ if (!dram->block)
+ return -ENOMEM;
+
+ dram->size = sec->len;
+ memcpy(dram->block, sec->data, sec->len);
+
+ return 0;
+}
+
+static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ int i;
+
+ if (!dram->fw) {
+ WARN_ON(dram->fw_cnt);
+ return;
+ }
+
+ for (i = 0; i < dram->fw_cnt; i++)
+ dma_free_coherent(trans->dev, dram->fw[i].size,
+ dram->fw[i].block, dram->fw[i].physical);
+
+ kfree(dram->fw);
+ dram->fw_cnt = 0;
+ dram->fw = NULL;
+}
+
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -660,7 +896,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
-static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
+static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
@@ -676,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
return txq->tfds + trans_pcie->tfd_size * idx;
}
+static inline const char *queue_name(struct device *dev,
+ struct iwl_trans_pcie *trans_p, int i)
+{
+ if (trans_p->shared_vec_mask) {
+ int vec = trans_p->shared_vec_mask &
+ IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+
+ if (i == 0)
+ return DRV_NAME ": shared IRQ";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i + vec);
+ }
+ if (i == 0)
+ return DRV_NAME ": default queue";
+
+ if (i == trans_p->alloc_vecs - 1)
+ return DRV_NAME ": exception";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i);
+}
+
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -730,9 +989,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
+ int index = iwl_pcie_get_cmd_index(q, i);
+ int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
+ int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
}
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
@@ -801,7 +1064,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_queue_space(const struct iwl_txq *q);
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -818,6 +1081,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
#endif
+/* common functions that are used by gen3 transport */
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
+
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index d15f5ba2dc77..d017aa2a0a8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -18,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -37,6 +36,7 @@
#include "iwl-io.h"
#include "internal.h"
#include "iwl-op-mode.h"
+#include "iwl-context-info-gen3.h"
/******************************************************************************
*
@@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->cfg->mq_rx_supported) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ /* TODO: remove this for 22560 once fw does it */
+ iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
+ return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
+ RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+ } else if (trans->cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -209,7 +214,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->cfg->mq_rx_supported)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ iwl_write32(trans, HBUS_TARG_WRPTR,
+ (rxq->write_actual |
+ ((FIRST_RX_QUEUE + rxq->id) << 16)));
+ else if (trans->cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
@@ -233,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
}
}
+static void iwl_pcie_restock_bd(struct iwl_trans *trans,
+ struct iwl_rxq *rxq,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ struct iwl_rx_transfer_desc *bd = rxq->bd;
+
+ bd[rxq->write].type_n_size =
+ cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
+ ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+ bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
+ bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
+ } else {
+ __le64 *bd = rxq->bd;
+
+ bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ }
+}
+
/*
* iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
*/
@@ -254,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
spin_lock(&rxq->lock);
while (rxq->free_count) {
- __le64 *bd = (__le64 *)rxq->bd;
-
/* Get next free Rx buffer, remove from free list */
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list);
@@ -264,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
/* 12 first bits are expected to be empty */
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */
- bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
rxq->free_count--;
}
@@ -391,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
- struct iwl_rxq *rxq)
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
@@ -448,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
}
}
-static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
@@ -608,89 +634,174 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
-static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- struct device *dev = trans->dev;
- int i;
- int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
+ struct iwl_rx_transfer_desc *rx_td;
- if (WARN_ON(trans_pcie->rxq))
- return -EINVAL;
+ if (use_rx_td)
+ return sizeof(*rx_td);
+ else
+ return trans->cfg->mq_rx_supported ? sizeof(__le64) :
+ sizeof(__le32);
+}
- trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
- GFP_KERNEL);
- if (!trans_pcie->rxq)
- return -EINVAL;
+static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct device *dev = trans->dev;
+ bool use_rx_td = (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560);
+ int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+
+ if (rxq->bd)
+ dma_free_coherent(trans->dev,
+ free_size * rxq->queue_size,
+ rxq->bd, rxq->bd_dma);
+ rxq->bd_dma = 0;
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ use_rx_td ? sizeof(__le16) :
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->rb_stts_dma = 0;
+ rxq->rb_stts = NULL;
+
+ if (rxq->used_bd)
+ dma_free_coherent(trans->dev,
+ (use_rx_td ? sizeof(*rxq->cd) :
+ sizeof(__le32)) * rxq->queue_size,
+ rxq->used_bd, rxq->used_bd_dma);
+ rxq->used_bd_dma = 0;
+ rxq->used_bd = NULL;
+
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ return;
- spin_lock_init(&rba->lock);
+ if (rxq->tr_tail)
+ dma_free_coherent(dev, sizeof(__le16),
+ rxq->tr_tail, rxq->tr_tail_dma);
+ rxq->tr_tail_dma = 0;
+ rxq->tr_tail = NULL;
+
+ if (rxq->cr_tail)
+ dma_free_coherent(dev, sizeof(__le16),
+ rxq->cr_tail, rxq->cr_tail_dma);
+ rxq->cr_tail_dma = 0;
+ rxq->cr_tail = NULL;
+}
- for (i = 0; i < trans->num_rx_queues; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+ int i;
+ int free_size;
+ bool use_rx_td = (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560);
- spin_lock_init(&rxq->lock);
- if (trans->cfg->mq_rx_supported)
- rxq->queue_size = MQ_RX_TABLE_SIZE;
- else
- rxq->queue_size = RX_QUEUE_SIZE;
+ spin_lock_init(&rxq->lock);
+ if (trans->cfg->mq_rx_supported)
+ rxq->queue_size = MQ_RX_TABLE_SIZE;
+ else
+ rxq->queue_size = RX_QUEUE_SIZE;
- /*
- * Allocate the circular buffer of Read Buffer Descriptors
- * (RBDs)
- */
- rxq->bd = dma_zalloc_coherent(dev,
- free_size * rxq->queue_size,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err;
+ free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
- if (trans->cfg->mq_rx_supported) {
- rxq->used_bd = dma_zalloc_coherent(dev,
- sizeof(__le32) *
- rxq->queue_size,
- &rxq->used_bd_dma,
- GFP_KERNEL);
- if (!rxq->used_bd)
- goto err;
- }
+ /*
+ * Allocate the circular buffer of Read Buffer Descriptors
+ * (RBDs)
+ */
+ rxq->bd = dma_zalloc_coherent(dev,
+ free_size * rxq->queue_size,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err;
- /*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma,
+ if (trans->cfg->mq_rx_supported) {
+ rxq->used_bd = dma_zalloc_coherent(dev,
+ (use_rx_td ?
+ sizeof(*rxq->cd) :
+ sizeof(__le32)) *
+ rxq->queue_size,
+ &rxq->used_bd_dma,
GFP_KERNEL);
- if (!rxq->rb_stts)
+ if (!rxq->used_bd)
goto err;
}
+
+ /* Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
+ sizeof(__le16) :
+ sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma,
+ GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err;
+
+ if (!use_rx_td)
+ return 0;
+
+ /* Allocate the driver's pointer to TR tail */
+ rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+ &rxq->tr_tail_dma,
+ GFP_KERNEL);
+ if (!rxq->tr_tail)
+ goto err;
+
+ /* Allocate the driver's pointer to CR tail */
+ rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+ &rxq->cr_tail_dma,
+ GFP_KERNEL);
+ if (!rxq->cr_tail)
+ goto err;
+ /*
+ * W/A 22560 device step Z0 must be non zero bug
+ * TODO: remove this when stop supporting Z0
+ */
+ *rxq->cr_tail = cpu_to_le16(500);
+
return 0;
err:
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- if (rxq->bd)
- dma_free_coherent(dev, free_size * rxq->queue_size,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
-
- if (rxq->used_bd)
- dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
- rxq->used_bd, rxq->used_bd_dma);
- rxq->used_bd_dma = 0;
- rxq->used_bd = NULL;
+ iwl_pcie_free_rxq_dma(trans, rxq);
}
kfree(trans_pcie->rxq);
return -ENOMEM;
}
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i, ret;
+
+ if (WARN_ON(trans_pcie->rxq))
+ return -EINVAL;
+
+ trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq)
+ return -EINVAL;
+
+ spin_lock_init(&rba->lock);
+
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -792,6 +903,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
int i;
switch (trans_pcie->rx_buf_size) {
+ case IWL_AMSDU_2K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_2K;
+ break;
case IWL_AMSDU_4K:
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
break;
@@ -872,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
iwl_pcie_enable_rx_wake(trans, true);
}
-static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
lockdep_assert_held(&rxq->lock);
@@ -882,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
rxq->used_count = 0;
}
-static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
{
WARN_ON(1);
return 0;
@@ -931,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0;
rxq->write = 0;
rxq->write_actual = 0;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+ memset(rxq->rb_stts, 0,
+ (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -1002,8 +1118,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
int i;
/*
@@ -1022,27 +1136,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- if (rxq->bd)
- dma_free_coherent(trans->dev,
- free_size * rxq->queue_size,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- else
- IWL_DEBUG_INFO(trans,
- "Free rxq->rb_stts which is NULL\n");
-
- if (rxq->used_bd)
- dma_free_coherent(trans->dev,
- sizeof(__le32) * rxq->queue_size,
- rxq->used_bd, rxq->used_bd_dma);
- rxq->used_bd_dma = 0;
- rxq->used_bd = NULL;
+ iwl_pcie_free_rxq_dma(trans, rxq);
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
@@ -1202,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
}
@@ -1236,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
}
+static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
+ struct iwl_rxq *rxq, int i)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_mem_buffer *rxb;
+ u16 vid;
+
+ if (!trans->cfg->mq_rx_supported) {
+ rxb = rxq->queue[i];
+ rxq->queue[i] = NULL;
+ return rxb;
+ }
+
+ /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+ else
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+
+ if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+ goto out_err;
+
+ rxb = trans_pcie->global_table[vid - 1];
+ if (rxb->invalid)
+ goto out_err;
+
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
+
+ rxb->invalid = true;
+
+ return rxb;
+
+out_err:
+ WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
+ iwl_force_nmi(trans);
+ return NULL;
+}
+
/*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
@@ -1250,7 +1385,7 @@ restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
@@ -1266,30 +1401,9 @@ restart:
if (unlikely(rxq->used_count == rxq->queue_size / 2))
emergency = true;
- if (trans->cfg->mq_rx_supported) {
- /*
- * used_bd is a 32 bit but only 12 are used to retrieve
- * the vid
- */
- u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
-
- if (WARN(!vid ||
- vid > ARRAY_SIZE(trans_pcie->global_table),
- "Invalid rxb index from HW %u\n", (u32)vid)) {
- iwl_force_nmi(trans);
- goto out;
- }
- rxb = trans_pcie->global_table[vid - 1];
- if (WARN(rxb->invalid,
- "Invalid rxb from HW %u\n", (u32)vid)) {
- iwl_force_nmi(trans);
- goto out;
- }
- rxb->invalid = true;
- } else {
- rxb = rxq->queue[i];
- rxq->queue[i] = NULL;
- }
+ rxb = iwl_pcie_get_rxb(trans, rxq, i);
+ if (!rxb)
+ goto out;
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
@@ -1331,6 +1445,9 @@ restart:
out:
/* Backtrack one entry */
rxq->read = i;
+ /* update cr tail with the rxq read pointer */
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ *rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
/*
@@ -1362,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
}
-static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
- struct msix_entry *entry)
-{
- /*
- * Before sending the interrupt the HW disables it to prevent
- * a nested interrupt. This is done by writing 1 to the corresponding
- * bit in the mask register. After handling the interrupt, it should be
- * re-enabled by clearing this bit. This register is defined as
- * write 1 clear (W1C) register, meaning that it's being clear
- * by writing 1 to the bit.
- */
- iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
-}
-
/*
* iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
* This interrupt handler should be used with RSS queue only.
@@ -1970,7 +2073,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
@@ -1995,8 +2099,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- /* uCode wakes up after power-down sleep */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
+ inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
+ /* Reflect IML transfer status */
+ int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
+
+ IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
+ if (res == IWL_IMAGE_RESP_FAIL) {
+ isr_stats->sw++;
+ iwl_pcie_irq_handle_error(trans);
+ }
+ } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ /* uCode wakes up after power-down sleep */
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
iwl_pcie_rxq_check_wrptr(trans);
iwl_pcie_txq_check_wrptrs(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index b8e8dac2895d..2bc67219ed3e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -53,6 +53,7 @@
#include "iwl-trans.h"
#include "iwl-prph.h"
#include "iwl-context-info.h"
+#include "iwl-context-info-gen3.h"
#include "internal.h"
/*
@@ -188,7 +189,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
}
iwl_pcie_ctxt_info_free_paging(trans);
- iwl_pcie_ctxt_info_free(trans);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ iwl_pcie_ctxt_info_gen3_free(trans);
+ else
+ iwl_pcie_ctxt_info_free(trans);
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -346,7 +350,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- ret = iwl_pcie_ctxt_info_init(trans, fw);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
+ else
+ ret = iwl_pcie_ctxt_info_init(trans, fw);
if (ret)
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7229991ae70d..7d319b6863fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -84,6 +84,7 @@
#include "iwl-scd.h"
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
+#include "fw/dbg.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -203,7 +204,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
trans_pcie->fw_mon_size = 0;
}
-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page *page = NULL;
@@ -1132,21 +1133,44 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
+static struct iwl_causes_list causes_list_v2[] = {
+ {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
+ {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
+ {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
+ {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
+ {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
+ {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
+ {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
+ {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
+ {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
+ {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+ {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+ {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+ {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i;
+ int i, arr_size =
+ (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
- for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
- iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
- iwl_clear_bit(trans, causes_list[i].mask_reg,
- causes_list[i].cause_num);
+ for (i = 0; i < arr_size; i++) {
+ struct iwl_causes_list *causes =
+ (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ causes_list : causes_list_v2;
+
+ iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
+ iwl_clear_bit(trans, causes[i].mask_reg,
+ causes[i].cause_num);
}
}
@@ -1539,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_enable_rx_wake(trans, true);
- /*
- * Reconfigure IVAR table in case of MSIX or reset ict table in
- * MSI mode since HW reset erased it.
- * Also enables interrupts - none will happen as
- * the device doesn't know we're waking it up, only when
- * the opmode actually tells it after this call.
- */
- iwl_pcie_conf_msix_hw(trans_pcie);
- if (!trans_pcie->msix_enabled)
- iwl_pcie_reset_ict(trans);
- iwl_enable_interrupts(trans);
-
iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_mac_access_req));
iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -1568,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}
+ /*
+ * Reconfigure IVAR table in case of MSIX or reset ict table in
+ * MSI mode since HW reset erased it.
+ * Also enables interrupts - none will happen as
+ * the device doesn't know we're waking it up, only when
+ * the opmode actually tells it after this call.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+ if (!trans_pcie->msix_enabled)
+ iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
+
iwl_pcie_set_pwr(trans, false);
if (!reset) {
@@ -1685,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
}
}
-static const char *queue_name(struct device *dev,
- struct iwl_trans_pcie *trans_p, int i)
-{
- if (trans_p->shared_vec_mask) {
- int vec = trans_p->shared_vec_mask &
- IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
-
- if (i == 0)
- return DRV_NAME ": shared IRQ";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ": queue %d", i + vec);
- }
- if (i == 0)
- return DRV_NAME ": default queue";
-
- if (i == trans_p->alloc_vecs - 1)
- return DRV_NAME ": exception";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ": queue %d", i);
-}
-
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie)
{
@@ -2236,12 +2237,28 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (TFD_QUEUE_SIZE_MAX - 1),
+ (trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (TFD_QUEUE_SIZE_MAX - 1),
+ (trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
+static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
+ return -EINVAL;
+
+ data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
+ data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
+ data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
+ data->fr_bd_wid = 0;
+
+ return 0;
+}
+
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2522,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
rxq->free_count);
if (rxq->rb_stts) {
+ u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
+ rxq));
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) &
- 0x0FFF);
+ r & 0x0FFF);
} else {
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: Not Allocated\n");
@@ -2731,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock);
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
@@ -2934,11 +2952,12 @@ static struct iwl_trans_dump_data
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
- u32 len, num_rbs;
+ u32 len, num_rbs = 0;
u32 monitor_len;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->cfg->mq_rx_supported;
+ !trans->cfg->mq_rx_supported &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
/* transport dump header */
len = sizeof(*dump_data);
@@ -2990,6 +3009,10 @@ static struct iwl_trans_dump_data
}
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+ if (!(trans->dbg_dump_mask &
+ BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
+ return NULL;
+
dump_data = vzalloc(len);
if (!dump_data)
return NULL;
@@ -3002,22 +3025,28 @@ static struct iwl_trans_dump_data
}
/* CSR registers */
- len += sizeof(*data) + IWL_CSR_TO_DUMP;
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += sizeof(*data) + IWL_CSR_TO_DUMP;
/* FH registers */
- if (trans->cfg->gen2)
- len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2);
- else
- len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
+ if (trans->cfg->gen2)
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND_GEN2 -
+ FH_MEM_LOWER_BOUND_GEN2);
+ else
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND -
+ FH_MEM_LOWER_BOUND);
+ }
if (dump_rbs) {
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
- & 0x0FFF;
+ num_rbs =
+ le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
+ & 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
sizeof(struct iwl_fw_error_dump_rb) +
@@ -3025,7 +3054,8 @@ static struct iwl_trans_dump_data
}
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2)
+ if (trans->cfg->gen2 &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -3037,41 +3067,51 @@ static struct iwl_trans_dump_data
len = 0;
data = (void *)dump_data->data;
- data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
- txcmd = (void *)data->data;
- spin_lock_bh(&cmdq->lock);
- ptr = cmdq->write_ptr;
- for (i = 0; i < cmdq->n_window; i++) {
- u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
- u32 caplen, cmdlen;
-
- cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
- trans_pcie->tfd_size * ptr);
- caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
-
- if (cmdlen) {
- len += sizeof(*txcmd) + caplen;
- txcmd->cmdlen = cpu_to_le32(cmdlen);
- txcmd->caplen = cpu_to_le32(caplen);
- memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
- txcmd = (void *)((u8 *)txcmd->data + caplen);
+
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
+ u16 tfd_size = trans_pcie->tfd_size;
+
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->write_ptr;
+ for (i = 0; i < cmdq->n_window; i++) {
+ u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u32 caplen, cmdlen;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(trans,
+ cmdq->tfds +
+ tfd_size * ptr);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd,
+ caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_queue_dec_wrap(trans, ptr);
}
+ spin_unlock_bh(&cmdq->lock);
- ptr = iwl_queue_dec_wrap(ptr);
+ data->len = cpu_to_le32(len);
+ len += sizeof(*data);
+ data = iwl_fw_error_next_data(data);
}
- spin_unlock_bh(&cmdq->lock);
- data->len = cpu_to_le32(len);
- len += sizeof(*data);
- data = iwl_fw_error_next_data(data);
-
- len += iwl_trans_pcie_dump_csr(trans, &data);
- len += iwl_trans_pcie_fh_regs_dump(trans, &data);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += iwl_trans_pcie_dump_csr(trans, &data);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
+ len += iwl_trans_pcie_fh_regs_dump(trans, &data);
if (dump_rbs)
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2) {
+ if (trans->cfg->gen2 &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
dma_addr_t addr =
@@ -3091,8 +3131,8 @@ static struct iwl_trans_dump_data
len += sizeof(*data) + sizeof(*paging) + page_len;
}
}
-
- len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+ len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len;
@@ -3187,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
.txq_free = iwl_trans_pcie_dyn_txq_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
+ .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -3349,14 +3390,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
- if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) {
+
+ if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
- if (hw_status & UMAG_GEN_HW_IS_FPGA)
- trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
- else
+ if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
+ /*
+ * b step fw is the same for physical card and fpga
+ */
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
+ CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
+ } else {
+ /*
+ * a step no FPGA
+ */
trans->cfg = &iwl22000_2ac_cfg_hr;
+ }
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 48890a1c825f..b99f33ff9123 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,6 +52,7 @@
*****************************************************************************/
#include <linux/pm_runtime.h>
#include <net/tso.h>
+#include <linux/tcp.h>
#include "iwl-debug.h"
#include "iwl-csr.h"
@@ -84,16 +87,20 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+ struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
__le16 bc_ent;
- len = DIV_ROUND_UP(len, 4);
+ if (trans_pcie->bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
return;
@@ -111,7 +118,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+ else
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
}
/*
@@ -355,52 +365,89 @@ out_err:
return -EINVAL;
}
-static
-struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys;
- bool amsdu;
- int i, len, tb1_len, tb2_len, hdr_len;
+ int len;
void *tb1_addr;
- memset(tfd, 0, sizeof(*tfd));
+ tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
+ len + IWL_FIRST_TB_SIZE,
+ hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int i, len, tb1_len, tb2_len;
+ void *tb1_addr;
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+
/* The first TB points to bi-directional DMA data */
- if (!amsdu)
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
- /* there must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
-
/*
* The second TB (tb1) points to the remainder of the TX command
* and the 802.11 header - dword aligned size
* (This calculation modifies the TX command, so do it before the
* setup of the first TB)
*/
- len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
- ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
- /* do not align A-MSDU to dword as the subframe header aligns it */
- if (amsdu)
- tb1_len = len;
- else
- tb1_len = ALIGN(len, 4);
+ tb1_len = ALIGN(len, 4);
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -409,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- if (amsdu) {
- if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
- tb1_len + IWL_FIRST_TB_SIZE,
- hdr_len, dev_cmd))
- goto out_err;
-
- /*
- * building the A-MSDU might have changed this data, so memcpy
- * it now
- */
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
- return tfd;
- }
-
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
@@ -467,13 +497,50 @@ out_err:
return NULL;
}
+static
+struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ len = sizeof(struct iwl_tx_cmd_gen2);
+ else
+ len = sizeof(struct iwl_tx_cmd_gen3);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (amsdu)
+ return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+
+ return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len);
+}
+
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ u16 cmd_len;
int idx;
void *tfd;
@@ -488,11 +555,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(txq) < txq->high_mark) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
+ if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(txq) < 3)) {
+ if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -526,7 +605,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
iwl_pcie_gen2_get_num_tbs(trans, tfd));
/* start timer if queue currently empty */
@@ -538,7 +617,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
@@ -650,7 +729,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
- if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -787,7 +866,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
iwl_trans_ref(trans);
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -954,7 +1033,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -1062,6 +1141,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
+ (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -1113,7 +1195,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txq[qid] = txq;
- wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
+ wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 473fe7ccb07c..93f0d387688a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -71,27 +71,28 @@
*
***************************************************/
-int iwl_queue_space(const struct iwl_txq *q)
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
/*
* To avoid ambiguity between empty and completely full queues, there
- * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
- * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
- if (q->n_window < TFD_QUEUE_SIZE_MAX)
+ if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
max = q->n_window;
else
- max = TFD_QUEUE_SIZE_MAX - 1;
+ max = trans->cfg->base_params->max_tfd_queue_size - 1;
/*
- * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
- * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
*/
- used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->cfg->base_params->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
@@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = trans_pcie->tfd_size *
+ trans->cfg->base_params->max_tfd_queue_size;
size_t tb0_buf_sz;
int i;
@@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
int ret;
+ u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
txq->need_update = false;
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ /* max_tfd_queue_size must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
/* Initialize queue's high/low-water marks, and head/tail indexes */
ret = iwl_queue_init(txq, slots_num);
@@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ trans_pcie->tfd_size *
+ trans->cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
@@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
- u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_gen3_bc_tbl) :
+ sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
@@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
- scd_bc_tbls_size);
+ bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
goto error;
@@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
- int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
+ int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
+ int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
@@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
}
- if (txq->read_ptr == tfd_num)
+ if (read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
@@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(tfd_num);
+ last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
+ __func__, txq_id, last_to_free,
+ trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
goto out;
}
@@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
for (;
- txq->read_ptr != tfd_num;
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
- int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb = txq->entries[idx].skb;
+ read_ptr != tfd_num;
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
+ read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
+ struct sk_buff *skb = txq->entries[read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__skb_queue_tail(skbs, skb);
- txq->entries[idx].skb = NULL;
+ txq->entries[read_ptr].skb = NULL;
if (!trans->cfg->use_tfh)
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
@@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(txq) > txq->low_mark &&
+ if (iwl_queue_space(trans, txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
@@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
}
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(txq) > txq->low_mark)
+ if (iwl_queue_space(trans, txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
@@ -1225,23 +1236,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_txq *txq = trans_pcie->txq[txq_id];
unsigned long flags;
int nfreed = 0;
+ u16 r;
lockdep_assert_held(&txq->lock);
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
+ idx = iwl_pcie_get_cmd_index(txq, idx);
+ r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+
+ if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
+ (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
+ __func__, txq_id, idx,
+ trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+ for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
+ r = iwl_queue_inc_wrap(trans, r)) {
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, txq->write_ptr, txq->read_ptr);
+ idx, txq->write_ptr, r);
iwl_force_nmi(trans);
}
}
@@ -1555,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -1711,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -2311,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(txq) < txq->high_mark) {
+ if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(txq) < 3)) {
+ if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -2444,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index d1884b8913e7..0094b1d2b577 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -66,7 +66,7 @@ static void prism2_send_mgmt(struct net_device *dev,
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int ap_debug_proc_show(struct seq_file *m, void *v)
{
struct ap_data *ap = PDE_DATA(file_inode(m->file));
@@ -81,8 +81,7 @@ static int ap_debug_proc_show(struct seq_file *m, void *v)
seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
{
@@ -990,7 +989,7 @@ static void prism2_send_mgmt(struct net_device *dev,
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
+#ifdef CONFIG_PROC_FS
static int prism2_sta_proc_show(struct seq_file *m, void *v)
{
struct sta_info *sta = m->private;
@@ -1059,6 +1058,7 @@ static int prism2_sta_proc_show(struct seq_file *m, void *v)
return 0;
}
+#endif
static void handle_add_proc_queue(struct work_struct *work)
{
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 2720aa39f530..ad1aa65fee7f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -151,13 +151,6 @@ static int prism2_get_ram_size(local_info_t *local);
#define HFA384X_MAGIC 0x8A32
#endif
-
-static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
-{
- return HFA384X_INW(reg);
-}
-
-
static void hfa384x_read_regs(struct net_device *dev,
struct hfa384x_regs *regs)
{
@@ -2897,7 +2890,12 @@ static void hostap_tick_timer(struct timer_list *t)
}
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
+static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
+{
+ return HFA384X_INW(reg);
+}
+
static int prism2_registers_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -2951,8 +2949,7 @@ static int prism2_registers_proc_show(struct seq_file *m, void *v)
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
struct set_tim_data {
struct list_head list;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 5b33ccab9188..703d74cea3c2 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -11,8 +11,7 @@
#define PROC_LIMIT (PAGE_SIZE - 80)
-
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int prism2_debug_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -43,9 +42,9 @@ static int prism2_debug_proc_show(struct seq_file *m, void *v)
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
+#ifdef CONFIG_PROC_FS
static int prism2_stats_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -82,6 +81,7 @@ static int prism2_stats_proc_show(struct seq_file *m, void *v)
return 0;
}
+#endif
static int prism2_wds_proc_show(struct seq_file *m, void *v)
{
@@ -174,6 +174,7 @@ static const struct seq_operations prism2_bss_list_proc_seqops = {
.show = prism2_bss_list_proc_show,
};
+#ifdef CONFIG_PROC_FS
static int prism2_crypt_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -190,6 +191,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
}
return 0;
}
+#endif
static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
size_t count, loff_t *_pos)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 18e819d964f1..998dfac0fcff 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2,6 +2,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -2517,6 +2518,123 @@ out_err:
nlmsg_free(mcast_skb);
}
+static const struct ieee80211_sband_iftype_data he_capa_2ghz = {
+ /* TODO: should we support other types, e.g., P2P?*/
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes unset, as
+ * DCM, beam forming, RU and PPE threshold information
+ * are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xffff),
+ .tx_mcs_160 = cpu_to_le16(0xffff),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ },
+};
+
+static const struct ieee80211_sband_iftype_data he_capa_5ghz = {
+ /* TODO: should we support other types, e.g., P2P?*/
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes unset, as
+ * DCM, beam forming, RU and PPE threshold information
+ * are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+};
+
+static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband)
+{
+ if (sband->band == NL80211_BAND_2GHZ)
+ sband->iftype_data =
+ (struct ieee80211_sband_iftype_data *)&he_capa_2ghz;
+ else if (sband->band == NL80211_BAND_5GHZ)
+ sband->iftype_data =
+ (struct ieee80211_sband_iftype_data *)&he_capa_5ghz;
+ else
+ return;
+
+ sband->n_iftype_data = 1;
+}
+
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
{
@@ -2678,6 +2796,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband = &data->bands[band];
+
+ sband->band = band;
+
switch (band) {
case NL80211_BAND_2GHZ:
sband->channels = data->channels_2ghz;
@@ -2734,6 +2855,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.rx_mask[1] = 0xff;
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ mac80211_hswim_he_capab(sband);
+
hw->wiphy->bands[band] = sband;
}
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index f99031cfdf86..57edfada0665 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -1559,10 +1559,10 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
int ret;
size_t i;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_RX_BYTES) |
- BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->tx_bytes = priv->dev->stats.tx_bytes;
sinfo->tx_packets = priv->dev->stats.tx_packets;
sinfo->rx_bytes = priv->dev->stats.rx_bytes;
@@ -1572,14 +1572,14 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
ret = lbs_get_rssi(priv, &signal, &noise);
if (ret == 0) {
sinfo->signal = signal;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
/* Convert priv->cur_rate from hw_value to NL80211 value */
for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
if (priv->cur_rate == lbs_rates[i].hw_value) {
sinfo->txrate.legacy = lbs_rates[i].bitrate;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
break;
}
}
diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
index dd1ee1f0af48..469134930026 100644
--- a/drivers/net/wireless/marvell/libertas/dev.h
+++ b/drivers/net/wireless/marvell/libertas/dev.h
@@ -104,6 +104,7 @@ struct lbs_private {
u8 fw_ready;
u8 surpriseremoved;
u8 setup_fw_on_resume;
+ u8 power_up_on_resume;
int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
void (*reset_card) (struct lbs_private *priv);
int (*power_save) (struct lbs_private *priv);
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 2300e796c6ab..43743c26c071 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func)
static int if_sdio_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- int ret;
struct if_sdio_card *card = sdio_get_drvdata(func);
+ struct lbs_private *priv = card->priv;
+ int ret;
mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
+ priv->power_up_on_resume = false;
/* If we're powered off anyway, just let the mmc layer remove the
* card. */
- if (!lbs_iface_active(card->priv))
- return -ENOSYS;
+ if (!lbs_iface_active(priv)) {
+ if (priv->fw_ready) {
+ priv->power_up_on_resume = true;
+ if_sdio_power_off(card);
+ }
+
+ return 0;
+ }
dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
sdio_func_id(func), flags);
@@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev)
/* If we aren't being asked to wake on anything, we should bail out
* and let the SD stack power down the card.
*/
- if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+ if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
dev_info(dev, "Suspend without wake params -- powering down card\n");
- return -ENOSYS;
+ if (priv->fw_ready) {
+ priv->power_up_on_resume = true;
+ if_sdio_power_off(card);
+ }
+
+ return 0;
}
if (!(flags & MMC_PM_KEEP_POWER)) {
@@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev)
if (ret)
return ret;
- ret = lbs_suspend(card->priv);
+ ret = lbs_suspend(priv);
if (ret)
return ret;
@@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev)
dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
+ if (card->priv->power_up_on_resume) {
+ if_sdio_power_on(card);
+ wait_event(card->pwron_waitq, card->priv->fw_ready);
+ }
+
ret = lbs_resume(card->priv);
return ret;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index ffea610f67e2..c67a8e7be310 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -614,6 +614,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
struct if_usb_card *cardp,
struct lbs_private *priv)
{
+ unsigned long flags;
u8 i;
if (recvlength > LBS_CMD_BUFFER_SIZE) {
@@ -623,9 +624,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
return;
}
- BUG_ON(!in_interrupt());
-
- spin_lock(&priv->driver_lock);
+ spin_lock_irqsave(&priv->driver_lock, flags);
i = (priv->resp_idx == 0) ? 1 : 0;
BUG_ON(priv->resp_len[i]);
@@ -635,7 +634,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
kfree_skb(skb);
lbs_notify_command_response(priv, i);
- spin_unlock(&priv->driver_lock);
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
lbs_deb_usbd(&cardp->udev->dev,
"Wake up main thread to handle cmd response\n");
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 5153922e7ce1..e92fc5001171 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -603,6 +603,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
struct if_usb_card *cardp,
struct lbtf_private *priv)
{
+ unsigned long flags;
+
if (recvlength > LBS_CMD_BUFFER_SIZE) {
lbtf_deb_usbd(&cardp->udev->dev,
"The receive buffer is too large\n");
@@ -610,14 +612,12 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
return;
}
- BUG_ON(!in_interrupt());
-
- spin_lock(&priv->driver_lock);
+ spin_lock_irqsave(&priv->driver_lock, flags);
memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
recvlength - MESSAGE_HEADER_LEN);
kfree_skb(skb);
lbtf_cmd_response_rx(priv);
- spin_unlock(&priv->driver_lock);
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
}
/**
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 5d75c971004b..e2addd8b878b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,10 +696,11 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
"Send delba to tid=%d, %pM\n",
tid, rx_reor_tbl_ptr->ta);
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
- goto exit;
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
+ return;
}
}
-exit:
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 7ab44cd32a9d..8e63d14c1e1c 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,6 +103,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
* There could be holes in the buffer, which are skipped by the function.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -111,25 +113,21 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
{
int pkt_to_send, i;
void *rx_tmp_ptr;
- unsigned long flags;
pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) :
tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) {
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
}
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -140,7 +138,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
}
tbl->start_win = start_win;
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
@@ -150,6 +147,8 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* The start window is adjusted automatically when a hole is located.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -157,21 +156,15 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
{
int i, j, xchg;
void *rx_tmp_ptr;
- unsigned long flags;
for (i = 0; i < tbl->win_size; ++i) {
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
- if (!tbl->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
+ if (!tbl->rx_reorder_ptr[i])
break;
- }
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -184,7 +177,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
}
}
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
@@ -192,6 +184,8 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
*
* The function stops the associated timer and dispatches all the
* pending packets in the Rx reorder table before deletion.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -217,11 +211,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
del_timer_sync(&tbl->timer_context.timer);
tbl->timer_context.timer_is_set = false;
-
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
-
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
@@ -234,22 +224,17 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
/*
* This function returns the pointer to an entry in Rx reordering
* table which matches the given TA/TID pair.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
- if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
+ if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
return tbl;
- }
- }
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return NULL;
}
@@ -266,14 +251,9 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
return;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
- if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
+ if (!memcmp(tbl->ta, ta, ETH_ALEN))
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- }
- }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
@@ -282,24 +262,18 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
/*
* This function finds the last sequence number used in the packets
* buffered in Rx reordering table.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static int
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
{
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
- struct mwifiex_private *priv = ctx->priv;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
- if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
+ if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
return i;
- }
- }
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return -1;
}
@@ -317,17 +291,22 @@ mwifiex_flush_data(struct timer_list *t)
struct reorder_tmr_cnxt *ctx =
from_timer(ctx, t, timer);
int start_win, seq_num;
+ unsigned long flags;
ctx->timer_is_set = false;
+ spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
seq_num = mwifiex_11n_find_last_seq_num(ctx);
- if (seq_num < 0)
+ if (seq_num < 0) {
+ spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
return;
+ }
mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
start_win);
+ spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -354,11 +333,14 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
* If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn
*/
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node)
@@ -569,16 +551,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int prev_start_win, start_win, end_win, win_size;
u16 pkt_index;
bool init_window_shift = false;
+ unsigned long flags;
int ret = 0;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (pkt_type != PKT_TYPE_BAR)
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
@@ -665,6 +651,8 @@ done:
if (!tbl->timer_context.timer_is_set ||
prev_start_win != tbl->start_win)
mwifiex_11n_rxreorder_timer_restart(tbl);
+
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
@@ -693,14 +681,18 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, EVENT,
"event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
@@ -734,6 +726,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
+ unsigned long flags;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
@@ -747,17 +740,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return 0;
}
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl) {
@@ -768,6 +764,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
else
tbl->amsdu = false;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_dbg(priv->adapter, CMD,
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -807,11 +804,8 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
- &priv->rx_reorder_tbl_ptr, list) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ &priv->rx_reorder_tbl_ptr, list)
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- }
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
@@ -935,6 +929,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
int tlv_buf_left = len;
int ret;
u8 *tmp;
+ unsigned long flags;
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
@@ -954,14 +949,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
tlv_bitmap_len);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_reor_tbl_ptr =
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
tlv_rxba->mac);
if (!rx_reor_tbl_ptr) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, ERROR,
"Can not find rx_reorder_tbl!");
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
for (i = 0; i < tlv_bitmap_len; i++) {
for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 4b5ae9098504..adc88433faa8 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1353,17 +1353,17 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
{
u32 rate;
- sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_RX_PACKETS) | BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_BITRATE) |
- BIT(NL80211_STA_INFO_SIGNAL) | BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) | BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_SIGNAL) | BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
if (!node)
return -ENOENT;
- sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) |
- BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->inactive_time =
jiffies_to_msecs(jiffies - node->stats.last_rx);
@@ -1413,7 +1413,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
sinfo->txrate.legacy = rate * 5;
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap &
WLAN_CAPABILITY_SHORT_PREAMBLE)
@@ -2322,7 +2322,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
if (priv->scan_block)
priv->scan_block = false;
- if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: Ignore connection.\t"
"Card removed or FW in bad state\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 9cfcdf6bec52..60db2b969e20 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -372,7 +372,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
adapter->ps_state = PS_STATE_SLEEP_CFM;
if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
- (adapter->is_hs_configured &&
+ (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags) &&
!adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
mwifiex_hs_activated_event(mwifiex_get_priv
@@ -564,25 +564,26 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
}
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: device in suspended state\n");
return -1;
}
- if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
+ if (test_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags) &&
+ cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: host entering sleep state\n");
return -1;
}
- if (adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: card is removed\n");
return -1;
}
- if (adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: FW is in bad state\n");
return -1;
@@ -789,7 +790,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
if (priv && (host_cmd->command !=
cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event(priv, false);
}
}
@@ -825,7 +827,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
return -1;
}
- adapter->is_cmd_timedout = 0;
+ clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
@@ -927,7 +929,7 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer);
struct cmd_ctrl_node *cmd_node;
- adapter->is_cmd_timedout = 1;
+ set_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
if (!adapter->curr_cmd) {
mwifiex_dbg(adapter, ERROR,
"cmd: empty curr_cmd\n");
@@ -953,7 +955,8 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
mwifiex_dbg(adapter, MSG,
"is_cmd_timedout = %d\n",
- adapter->is_cmd_timedout);
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+ &adapter->work_flags));
mwifiex_dbg(adapter, MSG,
"num_tx_timeout = %d\n",
adapter->dbg.num_tx_timeout);
@@ -1135,7 +1138,8 @@ void
mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
{
if (activated) {
- if (priv->adapter->is_hs_configured) {
+ if (test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &priv->adapter->work_flags)) {
priv->adapter->hs_activated = true;
mwifiex_update_rxreor_flags(priv->adapter,
RXREOR_FORCE_NO_DROP);
@@ -1186,11 +1190,11 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
phs_cfg->params.hs_config.gap);
}
if (conditions != HS_CFG_CANCEL) {
- adapter->is_hs_configured = true;
+ set_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->iface_type == MWIFIEX_USB)
mwifiex_hs_activated_event(priv, true);
} else {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->hs_activated)
mwifiex_hs_activated_event(priv, false);
}
@@ -1212,8 +1216,8 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
adapter->if_ops.wakeup(adapter);
adapter->hs_activated = false;
- adapter->is_hs_configured = false;
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY),
false);
@@ -1273,7 +1277,7 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
return;
}
adapter->pm_wakeup_card_req = true;
- if (adapter->is_hs_configured)
+ if (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags))
mwifiex_hs_activated_event(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
true);
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 07453932f703..cce70252fd96 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -813,7 +813,7 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
MWIFIEX_SYNC_CMD, &hscfg);
mwifiex_enable_hs(priv->adapter);
- priv->adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &priv->adapter->work_flags);
ret = count;
done:
kfree(buf);
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index b10baacb51c9..75cbd609d606 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -355,8 +355,14 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
case WLAN_EID_HT_OPERATION:
case WLAN_EID_VHT_CAPABILITY:
case WLAN_EID_VHT_OPERATION:
- case WLAN_EID_VENDOR_SPECIFIC:
break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ /* Skip only Microsoft WMM IE */
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+ (const u8 *)hdr,
+ hdr->len + sizeof(struct ieee_types_header)))
+ break;
default:
memcpy(gen_ie->ie_buffer + ie_len, hdr,
hdr->len + sizeof(struct ieee_types_header));
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index d239e9248c05..673e89dff0b5 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -233,7 +233,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->event_received = false;
adapter->data_received = false;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
@@ -270,7 +270,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF);
adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF;
adapter->hs_cfg.gap = HS_CFG_GAP_DEF;
@@ -439,7 +439,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
priv = adapter->priv[i];
- spin_lock_init(&priv->rx_pkt_lock);
spin_lock_init(&priv->wmm.ra_list_spinlock);
spin_lock_init(&priv->curr_bcn_buf_lock);
spin_lock_init(&priv->sta_list_spinlock);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 510f6b8e717d..20cee5c397fb 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -404,7 +404,8 @@ process_start:
!skb_queue_empty(&adapter->tx_data_q)) {
mwifiex_process_tx_queue(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -420,7 +421,8 @@ process_start:
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_process_bypass_tx(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -435,7 +437,8 @@ process_start:
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -647,7 +650,7 @@ err_dnld_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
@@ -870,7 +873,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
"data: %lu BSS(%d-%d): Data <= kernel\n",
jiffies, priv->bss_type, priv->bss_num);
- if (priv->adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) {
kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
@@ -1279,7 +1282,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
@@ -1371,7 +1375,7 @@ static void mwifiex_rx_work_queue(struct work_struct *work)
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, rx_work);
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return;
mwifiex_process_rx(adapter);
}
@@ -1387,7 +1391,7 @@ static void mwifiex_main_work_queue(struct work_struct *work)
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, main_work);
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return;
mwifiex_main_process(adapter);
}
@@ -1404,7 +1408,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
if (adapter->if_ops.disable_int)
adapter->if_ops.disable_int(adapter);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
adapter->int_status = 0;
@@ -1492,11 +1496,11 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
adapter->if_ops.up_dev(adapter);
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
init_waitqueue_head(&adapter->init_wait_q);
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
- adapter->is_cmd_timedout = 0;
+ clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
init_waitqueue_head(&adapter->hs_activate_wait_q);
init_waitqueue_head(&adapter->cmd_wait_q.wait);
adapter->cmd_wait_q.status = 0;
@@ -1551,7 +1555,7 @@ err_init_fw:
adapter->if_ops.unregister_dev(adapter);
err_kmalloc:
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
mwifiex_dbg(adapter, ERROR,
@@ -1648,9 +1652,9 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->fw_done = fw_done;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
init_waitqueue_head(&adapter->init_wait_q);
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
init_waitqueue_head(&adapter->hs_activate_wait_q);
init_waitqueue_head(&adapter->cmd_wait_q.wait);
@@ -1698,7 +1702,7 @@ err_init_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
err_registerdev:
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 69ac0a22c28c..b025ba164412 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -517,6 +517,14 @@ enum mwifiex_iface_work_flags {
MWIFIEX_IFACE_WORK_CARD_RESET,
};
+enum mwifiex_adapter_work_flags {
+ MWIFIEX_SURPRISE_REMOVED,
+ MWIFIEX_IS_CMD_TIMEDOUT,
+ MWIFIEX_IS_SUSPENDED,
+ MWIFIEX_IS_HS_CONFIGURED,
+ MWIFIEX_IS_HS_ENABLING,
+};
+
struct mwifiex_band_config {
u8 chan_band:2;
u8 chan_width:2;
@@ -616,9 +624,6 @@ struct mwifiex_private {
struct list_head rx_reorder_tbl_ptr;
/* spin lock for rx_reorder_tbl_ptr queue */
spinlock_t rx_reorder_tbl_lock;
- /* spin lock for Rx packets */
- spinlock_t rx_pkt_lock;
-
#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
u32 assoc_rsp_size;
@@ -875,7 +880,7 @@ struct mwifiex_adapter {
struct device *dev;
struct wiphy *wiphy;
u8 perm_addr[ETH_ALEN];
- bool surprise_removed;
+ unsigned long work_flags;
u32 fw_release_number;
u8 intf_hdr_len;
u16 init_wait_q_woken;
@@ -929,7 +934,6 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *curr_cmd;
/* spin lock for command */
spinlock_t mwifiex_cmd_lock;
- u8 is_cmd_timedout;
u16 last_init_cmd;
struct timer_list cmd_timer;
struct list_head cmd_free_q;
@@ -979,13 +983,10 @@ struct mwifiex_adapter {
u16 pps_uapsd_mode;
u32 pm_wakeup_fw_try;
struct timer_list wakeup_timer;
- u8 is_hs_configured;
struct mwifiex_hs_config_param hs_cfg;
u8 hs_activated;
u16 hs_activate_wait_q_woken;
wait_queue_head_t hs_activate_wait_q;
- bool is_suspended;
- bool hs_enabling;
u8 event_body[MAX_EVENT_SIZE];
u32 hw_dot_11n_dev_cap;
u8 hw_dev_mcs_support;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 0c42b7296ddd..3fe81b2a929a 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -170,7 +170,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@@ -178,8 +178,8 @@ static int mwifiex_pcie_suspend(struct device *dev)
flush_workqueue(adapter->workqueue);
/* Indicate device suspended */
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return 0;
}
@@ -207,13 +207,13 @@ static int mwifiex_pcie_resume(struct device *dev)
adapter = card->adapter;
- if (!adapter->is_suspended) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"Device already resumed\n");
return 0;
}
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
@@ -2430,7 +2430,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
}
adapter = card->adapter;
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
goto exit;
if (card->msix_enable)
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 895b806cdb03..8e483b0bc3b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1495,7 +1495,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
return -EBUSY;
}
- if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"Ignore scan. Card removed or firmware in bad state\n");
return -EFAULT;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index dfdcbc4f141a..d49fbd58afa7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,13 +181,13 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter = card->adapter;
- if (!adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"device already resumed\n");
return 0;
}
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
/* Disable Host Sleep */
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -260,7 +260,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: not allowed while suspended\n", __func__);
return -1;
@@ -450,7 +450,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@@ -460,8 +460,8 @@ static int mwifiex_sdio_suspend(struct device *dev)
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
/* Indicate device suspended */
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 03a6492662ca..a327fc5b36e3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -224,7 +224,8 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
adapter->tx_lock_flag = false;
adapter->pps_uapsd_mode = false;
- if (adapter->is_cmd_timedout && adapter->curr_cmd)
+ if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags) &&
+ adapter->curr_cmd)
return;
priv->media_connected = false;
mwifiex_dbg(adapter, MSG,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 5414b755cf82..b454b5f85503 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -419,7 +419,8 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
}
if (hs_cfg->is_invoke_hostcmd) {
if (hs_cfg->conditions == HS_CFG_CANCEL) {
- if (!adapter->is_hs_configured)
+ if (!test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags))
/* Already cancelled */
break;
/* Save previous condition */
@@ -535,7 +536,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
memset(&hscfg, 0, sizeof(hscfg));
hscfg.is_invoke_hostcmd = true;
- adapter->hs_enabling = true;
+ set_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_cancel_all_pending_cmd(adapter);
if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
@@ -601,7 +602,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
else
info->wep_status = false;
- info->is_hs_configured = adapter->is_hs_configured;
+ info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
info->is_deep_sleep = adapter->is_deep_sleep;
return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 620f8650a742..37c24b95e642 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -143,7 +143,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
int ret;
struct mwifiex_txinfo *tx_info = NULL;
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return -1;
if (!priv->media_connected)
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 1e6a62c69ac5..a83c5afc256a 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -289,32 +289,6 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
src_node->stats.rx_packets++;
}
- skb->dev = priv->netdev;
- skb->protocol = eth_type_trans(skb, priv->netdev);
- skb->ip_summed = CHECKSUM_NONE;
-
- /* This is required only in case of 11n and USB/PCIE as we alloc
- * a buffer of 4K only if its 11N (to be able to receive 4K
- * AMSDU packets). In case of SD we allocate buffers based
- * on the size of packet and hence this is not needed.
- *
- * Modifying the truesize here as our allocation for each
- * skb is 4K but we only receive 2K packets and this cause
- * the kernel to start dropping packets in case where
- * application has allocated buffer based on 2K size i.e.
- * if there a 64K packet received (in IP fragments and
- * application allocates 64K to receive this packet but
- * this packet would almost double up because we allocate
- * each 1.5K fragment in 4K and pass it up. As soon as the
- * 64K limit hits kernel will start to drop rest of the
- * fragments. Currently we fail the Filesndl-ht.scr script
- * for UDP, hence this fix
- */
- if ((adapter->iface_type == MWIFIEX_USB ||
- adapter->iface_type == MWIFIEX_PCIE) &&
- (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
- skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
-
if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
@@ -350,6 +324,32 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
return 0;
}
+ skb->dev = priv->netdev;
+ skb->protocol = eth_type_trans(skb, priv->netdev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* This is required only in case of 11n and USB/PCIE as we alloc
+ * a buffer of 4K only if its 11N (to be able to receive 4K
+ * AMSDU packets). In case of SD we allocate buffers based
+ * on the size of packet and hence this is not needed.
+ *
+ * Modifying the truesize here as our allocation for each
+ * skb is 4K but we only receive 2K packets and this cause
+ * the kernel to start dropping packets in case where
+ * application has allocated buffer based on 2K size i.e.
+ * if there a 64K packet received (in IP fragments and
+ * application allocates 64K to receive this packet but
+ * this packet would almost double up because we allocate
+ * each 1.5K fragment in 4K and pass it up. As soon as the
+ * 64K limit hits kernel will start to drop rest of the
+ * fragments. Currently we fail the Filesndl-ht.scr script
+ * for UDP, hence this fix
+ */
+ if ((adapter->iface_type == MWIFIEX_USB ||
+ adapter->iface_type == MWIFIEX_PCIE) &&
+ skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)
+ skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
+
/* Forward multicast/broadcast packet to upper layer*/
if (in_interrupt())
netif_rx(skb);
@@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!priv->ap_11n_enabled ||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
ret = mwifiex_handle_uap_rx_forward(priv, skb);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* Reorder and send to kernel */
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 88f4c89f89ba..433c6a16870b 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -181,7 +181,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
atomic_dec(&card->rx_data_urb_pending);
if (recv_length) {
- if (urb->status || (adapter->surprise_removed)) {
+ if (urb->status ||
+ test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"URB status is failed: %d\n", urb->status);
/* Do not free skb in case of command ep */
@@ -218,10 +219,10 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
dev_kfree_skb_any(skb);
}
} else if (urb->status) {
- if (!adapter->is_suspended) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, FATAL,
"Card is removed: %d\n", urb->status);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
}
dev_kfree_skb_any(skb);
return;
@@ -529,7 +530,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
}
- if (unlikely(adapter->is_suspended))
+ if (unlikely(test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)))
mwifiex_dbg(adapter, WARN,
"Device already suspended\n");
@@ -537,19 +538,19 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return -EFAULT;
}
- /* 'is_suspended' flag indicates device is suspended.
+ /* 'MWIFIEX_IS_SUSPENDED' bit indicates device is suspended.
* It must be set here before the usb_kill_urb() calls. Reason
* is in the complete handlers, urb->status(= -ENOENT) and
* this flag is used in combination to distinguish between a
* 'suspended' state and a 'disconnect' one.
*/
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
usb_kill_urb(card->rx_cmd.urb);
@@ -593,7 +594,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
}
adapter = card->adapter;
- if (unlikely(!adapter->is_suspended)) {
+ if (unlikely(!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags))) {
mwifiex_dbg(adapter, WARN,
"Device already resumed\n");
return 0;
@@ -602,7 +603,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
/* Indicate device resumed. The netdev queue will be resumed only
* after the urbs have been re-submitted
*/
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
if (!atomic_read(&card->rx_data_urb_pending))
for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
@@ -1158,13 +1159,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
unsigned long flags;
int idx, ret;
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: not allowed while suspended\n", __func__);
return -1;
}
- if (adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
return -1;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 6dd212898117..f9b71539d33e 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -197,9 +197,11 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->is_deep_sleep = adapter->is_deep_sleep;
info->pm_wakeup_card_req = adapter->pm_wakeup_card_req;
info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
- info->is_hs_configured = adapter->is_hs_configured;
+ info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
info->hs_activated = adapter->hs_activated;
- info->is_cmd_timedout = adapter->is_cmd_timedout;
+ info->is_cmd_timedout = test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+ &adapter->work_flags);
info->num_cmd_host_to_card_failure
= adapter->dbg.num_cmd_host_to_card_failure;
info->num_cmd_sleep_cfm_host_to_card_failure
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 936a0a841af8..407b9932ca4d 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -599,7 +599,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
if (priv->adapter->if_ops.clean_pcie_ring &&
- !priv->adapter->surprise_removed)
+ !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index fc05d79c80d0..b6c5f17dca30 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,10 +1,37 @@
config MT76_CORE
tristate
+config MT76_USB
+ tristate
+ depends on MT76_CORE
+
+config MT76x2_COMMON
+ tristate
+ depends on MT76_CORE
+
+config MT76x0U
+ tristate "MediaTek MT76x0U (USB) support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7610U-based wireless USB dongles.
+
config MT76x2E
tristate "MediaTek MT76x2E (PCIe) support"
select MT76_CORE
+ select MT76x2_COMMON
depends on MAC80211
depends on PCI
---help---
This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
+
+config MT76x2U
+ tristate "MediaTek MT76x2U (USB) support"
+ select MT76_CORE
+ select MT76_USB
+ select MT76x2_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7612U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index a0156bc01dea..158d10d2716c 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -1,15 +1,31 @@
obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76_USB) += mt76-usb.o
+obj-$(CONFIG_MT76x0U) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
+mt76-usb-y := usb.o usb_trace.o usb_mcu.o
+
CFLAGS_trace.o := -I$(src)
+CFLAGS_usb_trace.o := -I$(src)
+
+mt76x2-common-y := \
+ mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
+ mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
+ mt76x2_debugfs.o
mt76x2e-y := \
mt76x2_pci.o mt76x2_dma.o \
- mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
- mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+ mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
+ mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
mt76x2_dfs.o mt76x2_trace.o
+mt76x2u-y := \
+ mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
+ mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
+
CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 1e8cdce919d9..73c8b2805c97 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -113,7 +113,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
if (nframes)
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
REORDER_TIMEOUT);
- mt76_rx_complete(dev, &frames, -1);
+ mt76_rx_complete(dev, &frames, NULL);
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 3dbedcedc2c4..c51da2205b93 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -239,6 +239,80 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
iowrite32(q->head, &q->regs->cpu_idx);
}
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_queue_entry e;
+ struct mt76_txwi_cache *t;
+ struct mt76_queue_buf buf[32];
+ struct sk_buff *iter;
+ dma_addr_t addr;
+ int len;
+ u32 tx_info = 0;
+ int n, ret;
+
+ t = mt76_get_txwi(dev);
+ if (!t) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return -ENOMEM;
+ }
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+ &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto free;
+
+ len = skb->len - skb->data_len;
+ addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr)) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ n = 0;
+ buf[n].addr = t->dma_addr;
+ buf[n++].len = dev->drv->txwi_size;
+ buf[n].addr = addr;
+ buf[n++].len = len;
+
+ skb_walk_frags(skb, iter) {
+ if (n == ARRAY_SIZE(buf))
+ goto unmap;
+
+ addr = dma_map_single(dev->dev, iter->data, iter->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr))
+ goto unmap;
+
+ buf[n].addr = addr;
+ buf[n++].len = iter->len;
+ }
+
+ if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ goto unmap;
+
+ return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+ ret = -ENOMEM;
+ for (n--; n > 0; n--)
+ dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+ DMA_TO_DEVICE);
+
+free:
+ e.skb = skb;
+ e.txwi = t;
+ dev->drv->tx_complete_skb(dev, q, &e, true);
+ mt76_put_txwi(dev, t);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
+
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
{
@@ -400,7 +474,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
do {
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
- mt76_rx_poll_complete(dev, qid);
+ mt76_rx_poll_complete(dev, qid, napi);
done += cur;
} while (cur && done < budget);
@@ -436,6 +510,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
.add_buf = mt76_dma_add_buf,
+ .tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 1dad39697929..27248e24a19b 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -25,6 +25,39 @@
#define MT_DMA_CTL_LAST_SEC0 BIT(30)
#define MT_DMA_CTL_DMA_DONE BIT(31)
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_INFO_TX_BURST BIT(17)
+#define MT_TXD_INFO_80211 BIT(19)
+#define MT_TXD_INFO_TSO BIT(20)
+#define MT_TXD_INFO_CSO BIT(21)
+#define MT_TXD_INFO_WIV BIT(24)
+#define MT_TXD_INFO_QSEL GENMASK(26, 25)
+#define MT_TXD_INFO_DPORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
+#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
+
+/* MCU request message header */
+#define MT_MCU_MSG_LEN GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
+#define MT_MCU_MSG_PORT GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD BIT(30)
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_RX_RXWI_LEN 32
+
struct mt76_desc {
__le32 buf0;
__le32 ctrl;
@@ -32,6 +65,16 @@ struct mt76_desc {
__le32 info;
} __packed __aligned(4);
+enum dma_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
int mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index d62e34e7eadf..029d54bce9e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -303,14 +303,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
SET_IEEE80211_DEV(hw, dev->dev);
SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->available_antennas_tx = dev->antenna_mask;
@@ -591,15 +583,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
}
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
- int queue)
+ struct napi_struct *napi)
{
- struct napi_struct *napi = NULL;
struct ieee80211_sta *sta;
struct sk_buff *skb;
- if (queue >= 0)
- napi = &dev->napi[queue];
-
spin_lock(&dev->rx_lock);
while ((skb = __skb_dequeue(frames)) != NULL) {
if (mt76_check_ccmp_pn(skb)) {
@@ -613,7 +601,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
spin_unlock(&dev->rx_lock);
}
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi)
{
struct sk_buff_head frames;
struct sk_buff *skb;
@@ -625,5 +614,6 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
mt76_rx_aggr_reorder(skb, &frames);
}
- mt76_rx_complete(dev, &frames, q);
+ mt76_rx_complete(dev, &frames, napi);
}
+EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index d2166fbf50ff..2eab35879163 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/leds.h>
+#include <linux/usb.h>
#include <net/mac80211.h>
#include "util.h"
@@ -30,6 +31,7 @@
#define MT_RX_BUF_SIZE 2048
struct mt76_dev;
+struct mt76_wcid;
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
@@ -62,12 +64,22 @@ struct mt76_queue_buf {
int len;
};
+struct mt76u_buf {
+ struct mt76_dev *dev;
+ struct urb *urb;
+ size_t len;
+ bool done;
+};
+
struct mt76_queue_entry {
union {
void *buf;
struct sk_buff *skb;
};
- struct mt76_txwi_cache *txwi;
+ union {
+ struct mt76_txwi_cache *txwi;
+ struct mt76u_buf ubuf;
+ };
bool schedule;
};
@@ -88,6 +100,7 @@ struct mt76_queue {
struct list_head swq;
int swq_queued;
+ u16 first;
u16 head;
u16 tail;
int ndesc;
@@ -110,6 +123,10 @@ struct mt76_queue_ops {
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi);
+ int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+
void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more);
@@ -187,9 +204,13 @@ struct mt76_rx_tid {
enum {
MT76_STATE_INITIALIZED,
MT76_STATE_RUNNING,
+ MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
MT76_RESET,
MT76_OFFCHANNEL,
+ MT76_REMOVED,
+ MT76_READING_STATS,
+ MT76_MORE_STATS,
};
struct mt76_hw_cap {
@@ -210,6 +231,8 @@ struct mt76_driver_ops {
void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
+ bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
+
void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -229,6 +252,64 @@ struct mt76_sband {
struct mt76_channel_state *chan;
};
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+enum mt_vendor_req {
+ MT_VEND_DEV_MODE = 0x1,
+ MT_VEND_WRITE = 0x2,
+ MT_VEND_MULTI_WRITE = 0x6,
+ MT_VEND_MULTI_READ = 0x7,
+ MT_VEND_READ_EEPROM = 0x9,
+ MT_VEND_WRITE_FCE = 0x42,
+ MT_VEND_WRITE_CFG = 0x46,
+ MT_VEND_READ_CFG = 0x47,
+};
+
+enum mt76u_in_ep {
+ MT_EP_IN_PKT_RX,
+ MT_EP_IN_CMD_RESP,
+ __MT_EP_IN_MAX,
+};
+
+enum mt76u_out_ep {
+ MT_EP_OUT_INBAND_CMD,
+ MT_EP_OUT_AC_BK,
+ MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_VI,
+ MT_EP_OUT_AC_VO,
+ MT_EP_OUT_HCCA,
+ __MT_EP_OUT_MAX,
+};
+
+#define MT_SG_MAX_SIZE 8
+#define MT_NUM_TX_ENTRIES 256
+#define MT_NUM_RX_ENTRIES 128
+#define MCU_RESP_URB_SIZE 1024
+struct mt76_usb {
+ struct mutex usb_ctrl_mtx;
+ u8 data[32];
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+ struct delayed_work stat_work;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u16 out_max_packet;
+ u8 in_ep[__MT_EP_IN_MAX];
+ u16 in_max_packet;
+
+ struct mt76u_mcu {
+ struct mutex mutex;
+ struct completion cmpl;
+ struct mt76u_buf res;
+ u32 msg_seq;
+ } mcu;
+};
+
struct mt76_dev {
struct ieee80211_hw *hw;
struct cfg80211_chan_def chandef;
@@ -271,6 +352,8 @@ struct mt76_dev {
char led_name[32];
bool led_al;
u8 led_pin;
+
+ struct mt76_usb usb;
};
enum mt76_phy_type {
@@ -390,6 +473,26 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+/* increment with wrap-around */
+static inline int mt76_incr(int val, int size)
+{
+ return (val + 1) & (size - 1);
+}
+
+/* decrement with wrap-around */
+static inline int mt76_decr(int val, int size)
+{
+ return (val - 1) & (size - 1);
+}
+
+/* Hardware uses mirrored order of queues with Q3
+ * having the highest priority
+ */
+static inline u8 q2hwq(u8 q)
+{
+ return q ^ 0x3;
+}
+
static inline struct ieee80211_txq *
mtxq_to_txq(struct mt76_txq *mtxq)
{
@@ -409,9 +512,9 @@ wcid_to_sta(struct mt76_wcid *wcid)
return container_of(ptr, struct ieee80211_sta, drv_priv);
}
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -442,10 +545,69 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
+struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
- int queue);
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+ struct napi_struct *napi);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+/* usb */
+static inline bool mt76u_urb_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN &&
+ urb->status != -ENOENT;
+}
+
+/* Map hardware queues to usb endpoints */
+static inline u8 q2ep(u8 qid)
+{
+ /* TODO: take management packets to queue 5 */
+ return qid + 1;
+}
+
+static inline bool mt76u_check_sg(struct mt76_dev *dev)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ return (udev->bus->sg_tablesize > 0 &&
+ (udev->bus->no_sg_constraint ||
+ udev->speed == USB_SPEED_WIRELESS));
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len);
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
+void mt76u_deinit(struct mt76_dev *dev);
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen, gfp_t gfp);
+void mt76u_buf_free(struct mt76u_buf *buf);
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+int mt76u_submit_rx_buffers(struct mt76_dev *dev);
+int mt76u_alloc_queues(struct mt76_dev *dev);
+void mt76u_stop_queues(struct mt76_dev *dev);
+void mt76u_stop_stat_wk(struct mt76_dev *dev);
+void mt76u_queues_deinit(struct mt76_dev *dev);
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset);
+void mt76u_mcu_complete_urb(struct urb *urb);
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
+void mt76u_mcu_fw_reset(struct mt76_dev *dev);
+int mt76u_mcu_init_rx(struct mt76_dev *dev);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
new file mode 100644
index 000000000000..7843908261ba
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MT76x0U) += mt76x0.o
+
+mt76x0-objs = \
+ usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \
+ mac.o util.o debugfs.o tx.o core.o
+# ccflags-y := -DDEBUG
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
new file mode 100644
index 000000000000..892803fce842
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev)
+{
+ int i = 100;
+ u32 val;
+
+ do {
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -EIO;
+
+ val = mt76_rr(dev, MT_MAC_CSR0);
+ if (val && ~val)
+ return 0;
+
+ udelay(10);
+ } while (i--);
+
+ return -EIO;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
new file mode 100644
index 000000000000..e7a77a886068
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt76x0_dev *dev = data;
+
+ mt76_wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt76x0_dev *dev = data;
+
+ *val = mt76_rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt76x0_dev *dev = file->private;
+ int i, j;
+
+#define stat_printf(grp, off, name) \
+ seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+ stat_printf(rx_stat, 0, rx_crc_err);
+ stat_printf(rx_stat, 1, rx_phy_err);
+ stat_printf(rx_stat, 2, rx_false_cca);
+ stat_printf(rx_stat, 3, rx_plcp_err);
+ stat_printf(rx_stat, 4, rx_fifo_overflow);
+ stat_printf(rx_stat, 5, rx_duplicate);
+
+ stat_printf(tx_stat, 0, tx_fail_cnt);
+ stat_printf(tx_stat, 1, tx_bcn_cnt);
+ stat_printf(tx_stat, 2, tx_success);
+ stat_printf(tx_stat, 3, tx_retransmit);
+ stat_printf(tx_stat, 4, tx_zero_len);
+ stat_printf(tx_stat, 5, tx_underflow);
+
+ stat_printf(aggr_stat, 0, non_aggr_tx);
+ stat_printf(aggr_stat, 1, aggr_tx);
+
+ stat_printf(zero_len_del, 0, tx_zero_len_del);
+ stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+ seq_puts(file, "Aggregations stats:\n");
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%08llx ",
+ dev->stats.aggr_n[i * 8 + j]);
+ seq_putc(file, '\n');
+ }
+
+ seq_printf(file, "recent average AMPDU len: %d\n",
+ atomic_read(&dev->avg_ampdu_len));
+
+ return 0;
+}
+
+static int
+mt76x0_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x0_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt76x0_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
+mt76x0_eeprom_param_read(struct seq_file *file, void *data)
+{
+ struct mt76x0_dev *dev = file->private;
+ int i;
+
+ seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+ seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n",
+ dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]);
+ seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n",
+ dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1],
+ dev->ee->rssi_offset_5ghz[2]);
+ seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off);
+ seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz);
+ seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n",
+ dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1],
+ dev->ee->lna_gain_5ghz[2]);
+ seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type);
+ seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+ dev->ee->reg.start + dev->ee->reg.num - 1);
+
+ seq_puts(file, "Per channel power:\n");
+ for (i = 0; i < 58; i++)
+ seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i,
+ dev->ee->tx_pwr_per_chan[i]);
+
+ seq_puts(file, "Per rate power 2GHz:\n");
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+ i, dev->ee->tx_pwr_cfg_2g[i][0],
+ dev->ee->tx_pwr_cfg_5g[i][1]);
+
+ seq_puts(file, "Per rate power 5GHz:\n");
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+ i, dev->ee->tx_pwr_cfg_5g[i][0],
+ dev->ee->tx_pwr_cfg_5g[i][1]);
+
+ return 0;
+}
+
+static int
+mt76x0_eeprom_param_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x0_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+ .open = mt76x0_eeprom_param_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir);
+ if (!dir)
+ return;
+
+ debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+ debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+ &fops_regval);
+ debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
+ &fops_eeprom_param);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
new file mode 100644
index 000000000000..e2efb430419b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+ struct mt76x0_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+ const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+ unsigned int hdrlen;
+
+ if (unlikely(len < 10))
+ return 0;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (unlikely(hdrlen > len))
+ return 0;
+ return hdrlen;
+}
+
+static struct sk_buff *
+mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+ void *data, u32 seg_len, u32 truesize, struct page *p)
+{
+ struct sk_buff *skb;
+ u32 true_len, hdr_len = 0, copy, frag;
+
+ skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi);
+ if (!true_len || true_len > seg_len)
+ goto bad_frame;
+
+ hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
+ if (!hdr_len)
+ goto bad_frame;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+ memcpy(skb_put(skb, hdr_len), data, hdr_len);
+
+ data += hdr_len + 2;
+ true_len -= hdr_len;
+ hdr_len = 0;
+ }
+
+ /* If not doing paged RX allocated skb will always have enough space */
+ copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+ frag = true_len - copy;
+
+ memcpy(skb_put(skb, copy), data, copy);
+ data += copy;
+
+ if (frag) {
+ skb_add_rx_frag(skb, 0, p, data - page_address(p),
+ frag, truesize);
+ get_page(p);
+ }
+
+ return skb;
+
+bad_frame:
+ dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n",
+ true_len, hdr_len);
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data,
+ u32 seg_len, struct page *p)
+{
+ struct sk_buff *skb;
+ struct mt76x0_rxwi *rxwi;
+ u32 fce_info, truesize = seg_len;
+
+ /* DMA_INFO field at the beginning of the segment contains only some of
+ * the information, we need to read the FCE descriptor from the end.
+ */
+ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+ seg_len -= MT_FCE_INFO_LEN;
+
+ data += MT_DMA_HDR_LEN;
+ seg_len -= MT_DMA_HDR_LEN;
+
+ rxwi = (struct mt76x0_rxwi *) data;
+ data += sizeof(struct mt76x0_rxwi);
+ seg_len -= sizeof(struct mt76x0_rxwi);
+
+ if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
+ dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n");
+
+ trace_mt76x0_rx(&dev->mt76, rxwi, fce_info);
+
+ skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
+ if (!skb)
+ return;
+
+ spin_lock(&dev->mac_lock);
+ ieee80211_rx(dev->mt76.hw, skb);
+ spin_unlock(&dev->mac_lock);
+}
+
+static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len)
+{
+ u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+ sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN;
+ u16 dma_len = get_unaligned_le16(data);
+
+ if (data_len < min_seg_len ||
+ WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return 0;
+
+ return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e)
+{
+ u32 seg_len, data_len = e->urb->actual_length;
+ u8 *data = page_address(e->p);
+ struct page *new_p = NULL;
+ int cnt = 0;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return;
+
+ /* Copy if there is very little data in the buffer. */
+ if (data_len > 512)
+ new_p = dev_alloc_pages(MT_RX_ORDER);
+
+ while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) {
+ mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
+
+ data_len -= seg_len;
+ data += seg_len;
+ cnt++;
+ }
+
+ if (cnt > 1)
+ trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p);
+
+ if (new_p) {
+ /* we have one extra ref from the allocator */
+ __free_pages(e->p, MT_RX_ORDER);
+
+ e->p = new_p;
+ }
+}
+
+static struct mt76x0_dma_buf_rx *
+mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev)
+{
+ struct mt76x0_rx_queue *q = &dev->rx_q;
+ struct mt76x0_dma_buf_rx *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (!q->pending)
+ goto out;
+
+ buf = &q->e[q->start];
+ q->pending--;
+ q->start = (q->start + 1) % q->entries;
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+ return buf;
+}
+
+static void mt76x0_complete_rx(struct urb *urb)
+{
+ struct mt76x0_dev *dev = urb->context;
+ struct mt76x0_rx_queue *q = &dev->rx_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (mt76x0_urb_has_error(urb))
+ dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+ goto out;
+
+ q->end = (q->end + 1) % q->entries;
+ q->pending++;
+ tasklet_schedule(&dev->rx_tasklet);
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt76x0_rx_tasklet(unsigned long data)
+{
+ struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+ struct mt76x0_dma_buf_rx *e;
+
+ while ((e = mt76x0_rx_get_pending_entry(dev))) {
+ if (e->urb->status)
+ continue;
+
+ mt76x0_rx_process_entry(dev, e);
+ mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC);
+ }
+}
+
+static void mt76x0_complete_tx(struct urb *urb)
+{
+ struct mt76x0_tx_queue *q = urb->context;
+ struct mt76x0_dev *dev = q->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (mt76x0_urb_has_error(urb))
+ dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+ goto out;
+
+ skb = q->e[q->start].skb;
+ trace_mt76x0_tx_dma_done(&dev->mt76, skb);
+
+ __skb_queue_tail(&dev->tx_skb_done, skb);
+ tasklet_schedule(&dev->tx_tasklet);
+
+ if (q->used == q->entries - q->entries / 8)
+ ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+
+ q->start = (q->start + 1) % q->entries;
+ q->used--;
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static void mt76x0_tx_tasklet(unsigned long data)
+{
+ struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+ struct sk_buff_head skbs;
+ unsigned long flags;
+
+ __skb_queue_head_init(&skbs);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ set_bit(MT76_MORE_STATS, &dev->mt76.state);
+ if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+
+ skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+
+ mt76x0_tx_status(dev, skb);
+ }
+}
+
+static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev,
+ struct sk_buff *skb, u8 ep)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]);
+ struct mt76x0_dma_buf_tx *e;
+ struct mt76x0_tx_queue *q = &dev->tx_q[ep];
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (WARN_ON_ONCE(q->entries <= q->used)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ e = &q->e[q->end];
+ e->skb = skb;
+ usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+ mt76x0_complete_tx, q);
+ ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+ if (ret) {
+ /* Special-handle ENODEV from TX urb submission because it will
+ * often be the first ENODEV we see after device is removed.
+ */
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ else
+ dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n",
+ ret);
+ goto out;
+ }
+
+ q->end = (q->end + 1) % q->entries;
+ q->used++;
+
+ if (q->used >= q->entries)
+ ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return ret;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+ if (ep == 5)
+ return MT_QSEL_MGMT;
+ return MT_QSEL_EDCA;
+}
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q)
+{
+ u8 ep = q2ep(hw_q);
+ u32 dma_flags;
+ int ret;
+
+ dma_flags = MT_TXD_PKT_INFO_80211;
+ if (wcid->hw_key_idx == 0xff)
+ dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+ ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+ if (ret)
+ return ret;
+
+ ret = mt76x0_dma_submit_tx(dev, skb, ep);
+
+ if (ret) {
+ ieee80211_free_txskb(dev->mt76.hw, skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt76x0_kill_rx(struct mt76x0_dev *dev)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ int next = dev->rx_q.end;
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+ usb_poison_urb(dev->rx_q.e[next].urb);
+ spin_lock_irqsave(&dev->rx_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+ struct mt76x0_dma_buf_rx *e, gfp_t gfp)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ u8 *buf = page_address(e->p);
+ unsigned pipe;
+ int ret;
+
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]);
+
+ usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+ mt76x0_complete_rx, dev);
+
+ trace_mt76x0_submit_urb(&dev->mt76, e->urb);
+ ret = usb_submit_urb(e->urb, gfp);
+ if (ret)
+ dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret);
+
+ return ret;
+}
+
+static int mt76x0_submit_rx(struct mt76x0_dev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt76x0_free_rx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+ usb_free_urb(dev->rx_q.e[i].urb);
+ }
+}
+
+static int mt76x0_alloc_rx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+ dev->rx_q.dev = dev;
+ dev->rx_q.entries = N_RX_ENTRIES;
+
+ for (i = 0; i < N_RX_ENTRIES; i++) {
+ dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+ if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q)
+{
+ int i;
+
+ WARN_ON(q->used);
+
+ for (i = 0; i < q->entries; i++) {
+ usb_poison_urb(q->e[i].urb);
+ usb_free_urb(q->e[i].urb);
+ }
+}
+
+static void mt76x0_free_tx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ mt76x0_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev,
+ struct mt76x0_tx_queue *q)
+{
+ int i;
+
+ q->dev = dev;
+ q->entries = N_TX_ENTRIES;
+
+ for (i = 0; i < N_TX_ENTRIES; i++) {
+ q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!q->e[i].urb)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mt76x0_alloc_tx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX,
+ sizeof(*dev->tx_q), GFP_KERNEL);
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i]))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mt76x0_dma_init(struct mt76x0_dev *dev)
+{
+ int ret = -ENOMEM;
+
+ tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev);
+ tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev);
+
+ ret = mt76x0_alloc_tx(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_alloc_rx(dev);
+ if (ret)
+ goto err;
+
+ ret = mt76x0_submit_rx(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ mt76x0_dma_cleanup(dev);
+ return ret;
+}
+
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev)
+{
+ mt76x0_kill_rx(dev);
+
+ tasklet_kill(&dev->rx_tasklet);
+
+ mt76x0_free_rx(dev);
+ mt76x0_free_tx(dev);
+
+ tasklet_kill(&dev->tx_tasklet);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
new file mode 100644
index 000000000000..891ce1c3461f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_DMA_H
+#define __MT76X0U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_SEQ GENMASK(19, 16)
+#define MT_TXD_CMD_TYPE GENMASK(26, 20)
+
+enum mt76_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+enum mt76_info_type {
+ DMA_PACKET,
+ DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
+#define MT_TXD_PKT_INFO_80211 BIT(19)
+#define MT_TXD_PKT_INFO_TSO BIT(20)
+#define MT_TXD_PKT_INFO_CSO BIT(21)
+#define MT_TXD_PKT_INFO_WIV BIT(24)
+#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
+
+enum mt76_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+
+static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
+ enum mt76_msg_port d_port,
+ enum mt76_info_type type, u32 flags)
+{
+ u32 info;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+
+ info = flags |
+ FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+ FIELD_PREP(MT_TXD_INFO_TYPE, type);
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+ flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
+ return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR BIT(24)
+#define MT_RXD_INFO_QSEL GENMASK(26, 25)
+#define MT_RXD_INFO_PORT GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
+
+enum mt76_evt_type {
+ CMD_DONE,
+ CMD_ERROR,
+ CMD_RETRY,
+ EVENT_PWR_RSP,
+ EVENT_WOW_RSP,
+ EVENT_CARRIER_DETECT_RSP,
+ EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
new file mode 100644
index 000000000000..36da1e6bc21a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static bool
+field_valid(u8 val)
+{
+ return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+ if (!field_valid(val))
+ return 0;
+
+ return val;
+}
+
+static inline int
+sign_extend(u32 val, unsigned int size)
+{
+ bool sign = val & BIT(size - 1);
+
+ val &= BIT(size - 1) - 1;
+
+ return sign ? val : -val;
+}
+
+static int
+mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data,
+ enum mt76x0_eeprom_access_modes mode)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+ FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
+ MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+ * will not return valid data but it's ok.
+ */
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+#define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16)
+static int
+mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
+{
+ u8 data[MT_MAP_READS * 16];
+ int ret, i;
+ u32 start = 0, end = 0, cnt_free;
+
+ for (i = 0; i < MT_MAP_READS; i++) {
+ ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+ data + i * 16, MT_EE_PHYSICAL_READ);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+ if (!data[i]) {
+ if (!start)
+ start = MT_EE_USAGE_MAP_START + i;
+ end = MT_EE_USAGE_MAP_START + i;
+ }
+ cnt_free = end - start + 1;
+
+ if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+ dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 };
+ u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1);
+
+ switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) {
+ case BOARD_TYPE_5GHZ:
+ dev->ee->has_5ghz = true;
+ break;
+ case BOARD_TYPE_2GHZ:
+ dev->ee->has_2ghz = true;
+ break;
+ default:
+ dev->ee->has_2ghz = true;
+ dev->ee->has_5ghz = true;
+ break;
+ }
+
+ dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz);
+
+ if (!field_valid(nic_conf1 & 0xff))
+ nic_conf1 &= 0xff00;
+
+ if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+ dev_err(dev->mt76.dev,
+ "Error: this driver does not support HW RF ctrl\n");
+
+ if (!field_valid(nic_conf0 >> 8))
+ return;
+
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ dev_err(dev->mt76.dev,
+ "Error: device has more than 1 RX/TX stream!\n");
+
+ dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0);
+ dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type);
+}
+
+static int
+mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom)
+{
+ const void *src = eeprom + MT_EE_MAC_ADDR;
+
+ ether_addr_copy(dev->macaddr, src);
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+ return 0;
+}
+
+static void
+mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 temp = eeprom[MT_EE_TEMP_OFFSET];
+
+ if (field_valid(temp))
+ dev->ee->temp_off = sign_extend(temp, 8);
+ else
+ dev->ee->temp_off = -10;
+}
+
+static void
+mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ /* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c)
+ * - comments in rtmp_def.h are incorrect (see rt_channel.c)
+ */
+ static const struct reg_channel_bounds chan_bounds[] = {
+ /* EEPROM country regions 0 - 7 */
+ { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
+ { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
+ /* EEPROM country regions 32 - 33 */
+ { 1, 11 }, { 1, 14 }
+ };
+ u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ];
+ int idx = -1;
+
+ dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]);
+ if (val < 8)
+ idx = val;
+ if (val > 31 && val < 33)
+ idx = val - 32 + 8;
+
+ if (idx != -1)
+ dev_info(dev->mt76.dev,
+ "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+ val, chan_bounds[idx].start,
+ chan_bounds[idx].start + chan_bounds[idx].num - 1);
+ else
+ idx = 5; /* channels 1 - 14 */
+
+ dev->ee->reg = chan_bounds[idx];
+
+ /* TODO: country region 33 is special - phy should be set to B-mode
+ * before entering channel 14 (see sta/connect.c)
+ */
+}
+
+static void
+mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 comp;
+
+ dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+ comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+ if (comp & BIT(7))
+ dev->ee->rf_freq_off -= comp & 0x7f;
+ else
+ dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 gain;
+
+ dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ];
+ dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0];
+
+ gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1];
+ if (gain == 0xff || gain == 0)
+ dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0];
+ else
+ dev->ee->lna_gain_5ghz[1] = gain;
+
+ gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2];
+ if (gain == 0xff || gain == 0)
+ dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0];
+ else
+ dev->ee->lna_gain_5ghz[2] = gain;
+}
+
+static void
+mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ int i;
+ s8 *rssi_offset = dev->ee->rssi_offset_2ghz;
+
+ for (i = 0; i < 2; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->mt76.dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+
+ rssi_offset = dev->ee->rssi_offset_5ghz;
+
+ for (i = 0; i < 3; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->mt76.dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+}
+
+static u32
+calc_bw40_power_rate(u32 value, int delta)
+{
+ u32 ret = 0;
+ int i, tmp;
+
+ for (i = 0; i < 4; i++) {
+ tmp = s6_to_int((value >> i*8) & 0xff) + delta;
+ ret |= (u32)(int_to_s6(tmp)) << i*8;
+ }
+
+ return ret;
+}
+
+static s8
+get_delta(u8 val)
+{
+ s8 ret;
+
+ if (!field_valid(val) || !(val & BIT(7)))
+ return 0;
+
+ ret = val & 0x1f;
+ if (ret > 8)
+ ret = 8;
+ if (val & BIT(6))
+ ret = -ret;
+
+ return ret;
+}
+
+static void
+mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ s8 bw40_delta_2g, bw40_delta_5g;
+ u32 val;
+ int i;
+
+ bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+ bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]);
+
+ for (i = 0; i < 5; i++) {
+ val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+ /* Skip last 16 bits. */
+ if (i == 4)
+ val &= 0x0000ffff;
+
+ dev->ee->tx_pwr_cfg_2g[i][0] = val;
+ dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g);
+ }
+
+ /* Reading per rate tx power for 5 GHz band is a bit more complex. Note
+ * we mix 16 bit and 32 bit reads and sometimes do shifts.
+ */
+ val = get_unaligned_le16(eeprom + 0x120);
+ val <<= 16;
+ dev->ee->tx_pwr_cfg_5g[0][0] = val;
+ dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le32(eeprom + 0x122);
+ dev->ee->tx_pwr_cfg_5g[1][0] = val;
+ dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0x126);
+ dev->ee->tx_pwr_cfg_5g[2][0] = val;
+ dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0xec);
+ val <<= 16;
+ dev->ee->tx_pwr_cfg_5g[3][0] = val;
+ dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0xee);
+ dev->ee->tx_pwr_cfg_5g[4][0] = val;
+ dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+}
+
+static void
+mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ int i;
+ u8 tx_pwr;
+
+ for (i = 0; i < 14; i++) {
+ tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i];
+ if (tx_pwr <= 0x3f && tx_pwr > 0)
+ dev->ee->tx_pwr_per_chan[i] = tx_pwr;
+ else
+ dev->ee->tx_pwr_per_chan[i] = 5;
+ }
+
+ for (i = 0; i < 40; i++) {
+ tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i];
+ if (tx_pwr <= 0x3f && tx_pwr > 0)
+ dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr;
+ else
+ dev->ee->tx_pwr_per_chan[14 + i] = 5;
+ }
+
+ dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22];
+ dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28];
+ dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34];
+ dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44];
+}
+
+int
+mt76x0_eeprom_init(struct mt76x0_dev *dev)
+{
+ u8 *eeprom;
+ int i, ret;
+
+ ret = mt76x0_efuse_physical_size_check(dev);
+ if (ret)
+ return ret;
+
+ dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL);
+ if (!dev->ee)
+ return -ENOMEM;
+
+ eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) {
+ ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+ if (ret)
+ goto out;
+ }
+
+ if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER)
+ dev_warn(dev->mt76.dev,
+ "Warning: unsupported EEPROM version %02hhx\n",
+ eeprom[MT_EE_VERSION_EE]);
+ dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+ eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+ mt76x0_set_macaddr(dev, eeprom);
+ mt76x0_set_chip_cap(dev, eeprom);
+ mt76x0_set_country_reg(dev, eeprom);
+ mt76x0_set_rf_freq_off(dev, eeprom);
+ mt76x0_set_temp_offset(dev, eeprom);
+ mt76x0_set_lna_gain(dev, eeprom);
+ mt76x0_set_rssi_offset(dev, eeprom);
+ dev->chainmask = 0x0101;
+
+ mt76x0_set_tx_power_per_rate(dev, eeprom);
+ mt76x0_set_tx_power_per_chan(dev, eeprom);
+
+out:
+ kfree(eeprom);
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
new file mode 100644
index 000000000000..e37b573aed7b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_EEPROM_H
+#define __MT76X0U_EEPROM_H
+
+struct mt76x0_dev;
+
+#define MT76X0U_EE_MAX_VER 0x0c
+#define MT76X0_EEPROM_SIZE 512
+
+#define MT76X0U_DEFAULT_TX_POWER 6
+
+enum mt76_eeprom_field {
+ MT_EE_CHIP_ID = 0x00,
+ MT_EE_VERSION_FAE = 0x02,
+ MT_EE_VERSION_EE = 0x03,
+ MT_EE_MAC_ADDR = 0x04,
+ MT_EE_NIC_CONF_0 = 0x34,
+ MT_EE_NIC_CONF_1 = 0x36,
+ MT_EE_COUNTRY_REGION_5GHZ = 0x38,
+ MT_EE_COUNTRY_REGION_2GHZ = 0x39,
+ MT_EE_FREQ_OFFSET = 0x3a,
+ MT_EE_NIC_CONF_2 = 0x42,
+
+ MT_EE_LNA_GAIN_2GHZ = 0x44,
+ MT_EE_LNA_GAIN_5GHZ_0 = 0x45,
+ MT_EE_RSSI_OFFSET = 0x46,
+ MT_EE_RSSI_OFFSET_5GHZ = 0x4a,
+ MT_EE_LNA_GAIN_5GHZ_1 = 0x49,
+ MT_EE_LNA_GAIN_5GHZ_2 = 0x4d,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x50,
+
+ MT_EE_TX_POWER_OFFSET_2GHZ = 0x52,
+
+ MT_EE_TX_TSSI_SLOPE = 0x6e,
+ MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
+ MT_EE_TX_TSSI_OFFSET = 0x76,
+
+ MT_EE_TX_POWER_OFFSET_5GHZ = 0x78,
+
+ MT_EE_TEMP_OFFSET = 0xd1,
+ MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
+ MT_EE_TX_POWER_BYRATE_BASE = 0xde,
+
+ MT_EE_TX_POWER_BYRATE_BASE_5GHZ = 0x120,
+
+ MT_EE_USAGE_MAP_START = 0x1e0,
+ MT_EE_USAGE_MAP_END = 0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
+ (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
+ MT_EE_USAGE_MAP_START + 1)
+
+enum mt76x0_eeprom_access_modes {
+ MT_EE_READ = 0,
+ MT_EE_PHYSICAL_READ = 1,
+};
+
+struct reg_channel_bounds {
+ u8 start;
+ u8 num;
+};
+
+struct mt76x0_eeprom_params {
+ u8 rf_freq_off;
+ s16 temp_off;
+ s8 rssi_offset_2ghz[2];
+ s8 rssi_offset_5ghz[3];
+ s8 lna_gain_2ghz;
+ s8 lna_gain_5ghz[3];
+ u8 pa_type;
+
+ /* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */
+ u32 tx_pwr_cfg_2g[5][2];
+ u32 tx_pwr_cfg_5g[5][2];
+
+ u8 tx_pwr_per_chan[58];
+
+ struct reg_channel_bounds reg;
+
+ bool has_2ghz;
+ bool has_5ghz;
+};
+
+int mt76x0_eeprom_init(struct mt76x0_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+ WARN_ON(reg & ~GENMASK(5, 0));
+ return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+ int s6;
+
+ s6 = s6_validate(reg);
+ if (s6 & BIT(5))
+ s6 -= BIT(6);
+
+ return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+ if (val < -0x20)
+ return 0x20;
+ if (val > 0x1f)
+ return 0x1f;
+
+ return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
new file mode 100644
index 000000000000..7cdb3e740522
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -0,0 +1,720 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+#include "usb.h"
+
+#include "initvals.h"
+
+static void
+mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
+{
+ int i;
+
+ /* Note: we don't turn off WLAN_CLK because that makes the device
+ * not respond properly on the probe path.
+ * In case anyone (PSM?) wants to use this function we can
+ * bring the clock stuff back and fixup the probe path.
+ */
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ if (!enable)
+ return;
+
+ for (i = 200; i; i--) {
+ val = mt76_rr(dev, MT_CMB_CTRL);
+
+ if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+ break;
+
+ udelay(20);
+ }
+
+ /* Note: vendor driver tries to disable/enable wlan here and retry
+ * but the code which does it is so buggy it must have never
+ * triggered, so don't bother.
+ */
+ if (!i)
+ dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n");
+}
+
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
+{
+ u32 val;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (reset) {
+ val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ }
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x0_set_wlan_state(dev, val, enable);
+
+ mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_PBF_SYS_CTRL);
+ val &= ~0x2000;
+ mt76_wr(dev, MT_PBF_SYS_CTRL, val);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ msleep(200);
+}
+
+static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+ if (dev->in_max_packet == 512)
+ val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ val = mt76_rr(dev, MT_COM_REG0);
+ if (val & 1)
+ dev_dbg(dev->mt76.dev, "MCU not ready\n");
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+#define RANDOM_WRITE(dev, tab) \
+ mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab));
+
+static int mt76x0_init_bbp(struct mt76x0_dev *dev)
+{
+ int ret, i;
+
+ ret = mt76x0_wait_bbp_ready(dev);
+ if (ret)
+ return ret;
+
+ RANDOM_WRITE(dev, mt76x0_bbp_init_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+
+ RANDOM_WRITE(dev, mt76x0_dcoc_tab);
+
+ return 0;
+}
+
+static void
+mt76_init_beacon_offsets(struct mt76x0_dev *dev)
+{
+ u16 base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = dev->beacon_offsets[i];
+
+ regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
+{
+ u32 reg;
+
+ RANDOM_WRITE(dev, common_mac_reg_table);
+
+ mt76_init_beacon_offsets(dev);
+
+ /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
+ RANDOM_WRITE(dev, mt76x0_mac_reg_table);
+
+ /* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
+ reg = mt76_rr(dev, MT_MAC_SYS_CTRL);
+ reg &= ~0x3;
+ mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
+
+ if (is_mt7610e(dev)) {
+ /* Disable COEX_EN */
+ reg = mt76_rr(dev, MT_COEXCFG0);
+ reg &= 0xFFFFFFFE;
+ mt76_wr(dev, MT_COEXCFG0, reg);
+ }
+
+ /* Set 0x141C[15:12]=0xF */
+ reg = mt76_rr(dev, MT_EXT_CCA_CFG);
+ reg |= 0x0000F000;
+ mt76_wr(dev, MT_EXT_CCA_CFG, reg);
+
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ /*
+ TxRing 9 is for Mgmt frame.
+ TxRing 8 is for In-band command frame.
+ WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9.
+ WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8.
+ */
+ reg = mt76_rr(dev, MT_WMM_CTRL);
+ reg &= ~0x000003FF;
+ reg |= 0x00000201;
+ mt76_wr(dev, MT_WMM_CTRL, reg);
+
+ /* TODO: Probably not needed */
+ mt76_wr(dev, 0x7028, 0);
+ mt76_wr(dev, 0x7010, 0);
+ mt76_wr(dev, 0x7024, 0);
+ msleep(10);
+}
+
+static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS; i++) {
+ vals[i * 2] = 0xffffffff;
+ vals[i * 2 + 1] = 0x00ffffff;
+ }
+
+ ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static int mt76x0_init_key_mem(struct mt76x0_dev *dev)
+{
+ u32 vals[4] = {};
+
+ return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+ vals, ARRAY_SIZE(vals));
+}
+
+static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS * 2; i++)
+ vals[i] = 1;
+
+ ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static void mt76x0_reset_counters(struct mt76x0_dev *dev)
+{
+ mt76_rr(dev, MT_RX_STA_CNT0);
+ mt76_rr(dev, MT_RX_STA_CNT1);
+ mt76_rr(dev, MT_RX_STA_CNT2);
+ mt76_rr(dev, MT_TX_STA_CNT0);
+ mt76_rr(dev, MT_TX_STA_CNT1);
+ mt76_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt76x0_mac_start(struct mt76x0_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+ return -ETIMEDOUT;
+
+ dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+ MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+ MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+ MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+ MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
+{
+ int i, ok;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n");
+
+ /* Page count on TxQ */
+ i = 200;
+ while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+ (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+ msleep(10);
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC TX did not stop!\n");
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Page count on RxQ */
+ ok = 0;
+ i = 200;
+ while (i--) {
+ if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+ !mt76_rr(dev, 0x0a30) &&
+ !mt76_rr(dev, 0x0a34)) {
+ if (ok++ > 5)
+ break;
+ continue;
+ }
+ msleep(1);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt76x0_mac_stop(struct mt76x0_dev *dev)
+{
+ mt76x0_mac_stop_hw(dev);
+ flush_delayed_work(&dev->stat_work);
+ cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
+{
+ mt76x0_chip_onoff(dev, false, false);
+}
+
+int mt76x0_init_hardware(struct mt76x0_dev *dev)
+{
+ static const u16 beacon_offsets[16] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ int ret;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt76x0_chip_onoff(dev, true, true);
+
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_mcu_init(dev);
+ if (ret)
+ goto err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Wait for ASIC ready after FW load. */
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ mt76x0_reset_csr_bbp(dev);
+ mt76x0_init_usb_dma(dev);
+
+ mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0);
+ mt76_wr(dev, MT_TSO_CTRL, 0x0);
+
+ ret = mt76x0_mcu_cmd_init(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_dma_init(dev);
+ if (ret)
+ goto err_mcu;
+
+ mt76x0_init_mac_registers(dev);
+
+ if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) {
+ ret = -EIO;
+ goto err_rx;
+ }
+
+ ret = mt76x0_init_bbp(dev);
+ if (ret)
+ goto err_rx;
+
+ ret = mt76x0_init_wcid_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt76x0_init_key_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt76x0_init_wcid_attr_mem(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+
+ mt76x0_reset_counters(dev);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+ mt76_wr(dev, MT_TXOP_CTRL_CFG,
+ FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+ FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+ ret = mt76x0_eeprom_init(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76x0_phy_init(dev);
+ return 0;
+
+err_rx:
+ mt76x0_dma_cleanup(dev);
+err_mcu:
+ mt76x0_mcu_cmd_deinit(dev);
+err:
+ mt76x0_chip_onoff(dev, false, false);
+ return ret;
+}
+
+void mt76x0_cleanup(struct mt76x0_dev *dev)
+{
+ if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return;
+
+ mt76x0_stop_hardware(dev);
+ mt76x0_dma_cleanup(dev);
+ mt76x0_mcu_cmd_deinit(dev);
+}
+
+struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct mt76x0_dev *dev;
+
+ hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->mt76.dev = pdev;
+ dev->mt76.hw = hw;
+ mutex_init(&dev->usb_ctrl_mtx);
+ mutex_init(&dev->reg_atomic_mutex);
+ mutex_init(&dev->hw_atomic_mutex);
+ mutex_init(&dev->mutex);
+ spin_lock_init(&dev->tx_lock);
+ spin_lock_init(&dev->rx_lock);
+ spin_lock_init(&dev->mt76.lock);
+ spin_lock_init(&dev->mac_lock);
+ spin_lock_init(&dev->con_mon_lock);
+ atomic_set(&dev->avg_ampdu_len, 1);
+ skb_queue_head_init(&dev->tx_skb_done);
+
+ dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0);
+ if (!dev->stat_wq) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ return dev;
+}
+
+#define CHAN2G(_idx, _freq) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+#define CHAN5G(_idx, _freq) { \
+ .band = NL80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+ CHAN5G(36, 5180),
+ CHAN5G(40, 5200),
+ CHAN5G(44, 5220),
+ CHAN5G(46, 5230),
+ CHAN5G(48, 5240),
+ CHAN5G(52, 5260),
+ CHAN5G(56, 5280),
+ CHAN5G(60, 5300),
+ CHAN5G(64, 5320),
+
+ CHAN5G(100, 5500),
+ CHAN5G(104, 5520),
+ CHAN5G(108, 5540),
+ CHAN5G(112, 5560),
+ CHAN5G(116, 5580),
+ CHAN5G(120, 5600),
+ CHAN5G(124, 5620),
+ CHAN5G(128, 5640),
+ CHAN5G(132, 5660),
+ CHAN5G(136, 5680),
+ CHAN5G(140, 5700),
+};
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ void *chanlist;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.rx_mask[0] = 0xff;
+ ht_cap->mcs.rx_mask[4] = 0x1;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76x0_dev *dev)
+{
+ dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband;
+
+ WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+ ARRAY_SIZE(mt76_channels_2ghz));
+
+
+ return mt76_init_sband(dev, &dev->mt76.sband_2g.sband,
+ mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz),
+ mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+static int
+mt76_init_sband_5g(struct mt76x0_dev *dev)
+{
+ dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband;
+
+ return mt76_init_sband(dev, &dev->mt76.sband_5g.sband,
+ mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz),
+ mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4);
+}
+
+
+int mt76x0_register_device(struct mt76x0_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->mt76.hw;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+ * entry no. 1 like it does in the vendor driver.
+ */
+ dev->wcid_mask[0] |= 1;
+
+ /* init fake wcid for monitor interfaces */
+ dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid),
+ GFP_KERNEL);
+ if (!dev->mon_wcid)
+ return -ENOMEM;
+ dev->mon_wcid->idx = 0xff;
+ dev->mon_wcid->hw_key_idx = -1;
+
+ SET_IEEE80211_DEV(hw, dev->mt76.dev);
+
+ hw->queues = 4;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+
+ hw->sta_data_size = sizeof(struct mt76_sta);
+ hw->vif_data_size = sizeof(struct mt76_vif);
+
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ if (dev->ee->has_2ghz) {
+ ret = mt76_init_sband_2g(dev);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->ee->has_5ghz) {
+ ret = mt76_init_sband_5g(dev);
+ if (ret)
+ return ret;
+ }
+
+ dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0];
+
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
+ INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ mt76x0_init_debugfs(dev);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
new file mode 100644
index 000000000000..24afcfd94b4e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -0,0 +1,282 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_INITVALS_H
+#define __MT76X0U_INITVALS_H
+
+#include "phy.h"
+
+static const struct mt76_reg_pair common_mac_reg_table[] = {
+#if 1
+ {MT_BCN_OFFSET(0), 0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */
+ {MT_BCN_OFFSET(1), 0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */
+#endif
+
+ {MT_LEGACY_BASIC_RATE, 0x0000013f}, /* Basic rate set bitmap*/
+ {MT_HT_BASIC_RATE, 0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/
+ {MT_MAC_SYS_CTRL, 0x00}, /* 0x1004, , default Disable RX*/
+ {MT_RX_FILTR_CFG, 0x17f97}, /*0x1400 , RX filter control, */
+ {MT_BKOFF_SLOT_CFG, 0x209}, /* default set short slot time, CC_DELAY_TIME should be 2 */
+ /*{TX_SW_CFG0, 0x40a06}, Gary,2006-08-23 */
+ {MT_TX_SW_CFG0, 0x0}, /* Gary,2008-05-21 for CWC test */
+ {MT_TX_SW_CFG1, 0x80606}, /* Gary,2006-08-23 */
+ {MT_TX_LINK_CFG, 0x1020}, /* Gary,2006-08-23 */
+ /*{TX_TIMEOUT_CFG, 0x00182090}, CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/
+ {MT_TX_TIMEOUT_CFG, 0x000a2090}, /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/
+ {MT_MAX_LEN_CFG, 0xa0fff | 0x00001000}, /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/
+ {MT_LED_CFG, 0x7f031e46}, /* Gary, 2006-08-23*/
+
+ {MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f /*0xbfbf3f1f*/},
+ {MT_PBF_RX_MAX_PCNT, 0x9f},
+
+ /*{TX_RTY_CFG, 0x6bb80408}, Jan, 2006/11/16*/
+/* WMM_ACM_SUPPORT */
+/* {TX_RTY_CFG, 0x6bb80101}, sample*/
+ {MT_TX_RETRY_CFG, 0x47d01f0f}, /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/
+
+ {MT_AUTO_RSP_CFG, 0x00000013}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+ {MT_CCK_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+ {MT_OFDM_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+ {MT_PBF_CFG, 0xf40006}, /* Only enable Queue 2*/
+ {MT_MM40_PROT_CFG, 0x3F44084}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+ {MT_WPDMA_GLO_CFG, 0x00000030},
+ {MT_GF20_PROT_CFG, 0x01744004}, /* set 19:18 --> Short NAV for MIMO PS*/
+ {MT_GF40_PROT_CFG, 0x03F44084},
+ {MT_MM20_PROT_CFG, 0x01744004},
+ {MT_TXOP_CTRL_CFG, 0x0000583f, /*0x0000243f*/ /*0x000024bf*/}, /*Extension channel backoff.*/
+ {MT_TX_RTS_CFG, 0x00092b20},
+
+ {MT_EXP_ACK_TIME, 0x002400ca}, /* default value */
+ {MT_TXOP_HLDR_ET, 0x00000002},
+
+ /* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us
+ is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0
+ and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping
+ will always lost. So we change the SIFS of CCK from 10us to 16us. */
+ {MT_XIFS_TIME_CFG, 0x33a41010},
+ {MT_PWR_PIN_CFG, 0x00000000},
+};
+
+static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
+ /* {MT_IOCFG_6, 0xA0040080 }, */
+ {MT_PBF_SYS_CTRL, 0x00080c00 },
+ {MT_PBF_CFG, 0x77723c1f },
+ {MT_FCE_PSE_CTRL, 0x00000001 },
+
+ {MT_AMPDU_MAX_LEN_20M1S, 0xBAA99887 },
+
+ /* Delay bb_tx_pe for proper tx_mcs_pwr update */
+ {MT_TX_SW_CFG0, 0x00000601 },
+
+ /* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */
+ {MT_TX_SW_CFG1, 0x00040000 },
+ {MT_TX_SW_CFG2, 0x00000000 },
+
+ /* disable Tx info report */
+ {0xa44, 0x0000000 },
+
+ {MT_HEADER_TRANS_CTRL_REG, 0x0},
+ {MT_TSO_CTRL, 0x0},
+
+ /* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */
+ {MT_BB_PA_MODE_CFG1, 0x00500055},
+
+ /* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */
+ {MT_RF_PA_MODE_CFG1, 0x00500055},
+
+ {MT_TX_ALC_CFG_0, 0x2F2F000C},
+ {MT_TX0_BB_GAIN_ATTEN, 0x00000000}, /* set BBP atten gain = 0 */
+
+ {MT_TX_PWR_CFG_0, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_1, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_2, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_3, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_4, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_7, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_8, 0x3A},
+ {MT_TX_PWR_CFG_9, 0x3A},
+ /* Enable Tx length > 4095 byte */
+ {0x150C, 0x00000002},
+
+ /* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */
+ {0x1238, 0x001700C8},
+ /* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */
+ /* {MT_LDO_CTRL_0, 0x00A647B6}, */
+
+ /* Default LDO_DIG supply 1.26V, change to 1.2V */
+ {MT_LDO_CTRL_1, 0x6B006464 },
+/*
+ {MT_HT_BASIC_RATE, 0x00004003 },
+ {MT_HT_CTRL_CFG, 0x000001FF },
+*/
+};
+
+
+static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
+ {MT_BBP(CORE, 1), 0x00000002},
+ {MT_BBP(CORE, 4), 0x00000000},
+ {MT_BBP(CORE, 24), 0x00000000},
+ {MT_BBP(CORE, 32), 0x4003000a},
+ {MT_BBP(CORE, 42), 0x00000000},
+ {MT_BBP(CORE, 44), 0x00000000},
+
+ {MT_BBP(IBI, 11), 0x00000080},
+
+ /*
+ 0x2300[5] Default Antenna:
+ 0 for WIFI main antenna
+ 1 for WIFI aux antenna
+
+ */
+ {MT_BBP(AGC, 0), 0x00021400},
+ {MT_BBP(AGC, 1), 0x00000003},
+ {MT_BBP(AGC, 2), 0x003A6464},
+ {MT_BBP(AGC, 15), 0x88A28CB8},
+ {MT_BBP(AGC, 22), 0x00001E21},
+ {MT_BBP(AGC, 23), 0x0000272C},
+ {MT_BBP(AGC, 24), 0x00002F3A},
+ {MT_BBP(AGC, 25), 0x8000005A},
+ {MT_BBP(AGC, 26), 0x007C2005},
+ {MT_BBP(AGC, 34), 0x000A0C0C},
+ {MT_BBP(AGC, 37), 0x2121262C},
+ {MT_BBP(AGC, 41), 0x38383E45},
+ {MT_BBP(AGC, 57), 0x00001010},
+ {MT_BBP(AGC, 59), 0xBAA20E96},
+ {MT_BBP(AGC, 63), 0x00000001},
+
+ {MT_BBP(TXC, 0), 0x00280403},
+ {MT_BBP(TXC, 1), 0x00000000},
+
+ {MT_BBP(RXC, 1), 0x00000012},
+ {MT_BBP(RXC, 2), 0x00000011},
+ {MT_BBP(RXC, 3), 0x00000005},
+ {MT_BBP(RXC, 4), 0x00000000},
+ {MT_BBP(RXC, 5), 0xF977C4EC},
+ {MT_BBP(RXC, 7), 0x00000090},
+
+ {MT_BBP(TXO, 8), 0x00000000},
+
+ {MT_BBP(TXBE, 0), 0x00000000},
+ {MT_BBP(TXBE, 4), 0x00000004},
+ {MT_BBP(TXBE, 6), 0x00000000},
+ {MT_BBP(TXBE, 8), 0x00000014},
+ {MT_BBP(TXBE, 9), 0x20000000},
+ {MT_BBP(TXBE, 10), 0x00000000},
+ {MT_BBP(TXBE, 12), 0x00000000},
+ {MT_BBP(TXBE, 13), 0x00000000},
+ {MT_BBP(TXBE, 14), 0x00000000},
+ {MT_BBP(TXBE, 15), 0x00000000},
+ {MT_BBP(TXBE, 16), 0x00000000},
+ {MT_BBP(TXBE, 17), 0x00000000},
+
+ {MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */
+ {MT_BBP(RXFE, 3), 0x00000000},
+ {MT_BBP(RXFE, 4), 0x00000000},
+
+ {MT_BBP(RXO, 13), 0x00000092},
+ {MT_BBP(RXO, 14), 0x00060612},
+ {MT_BBP(RXO, 15), 0xC8321B18},
+ {MT_BBP(RXO, 16), 0x0000001E},
+ {MT_BBP(RXO, 17), 0x00000000},
+ {MT_BBP(RXO, 18), 0xCC00A993},
+ {MT_BBP(RXO, 19), 0xB9CB9CB9},
+ {MT_BBP(RXO, 20), 0x26c00057},
+ {MT_BBP(RXO, 21), 0x00000001},
+ {MT_BBP(RXO, 24), 0x00000006},
+};
+
+static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 8), 0x0E344EF0}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 8), 0x122C54F2}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 14), 0x310F2E39}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 14), 0x310F2A3F}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 32), 0x00003230}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 32), 0x0000181C}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 33), 0x00003240}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 33), 0x00003218}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 35), 0x11112016}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 35), 0x11112016}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXO, 28), 0x0000008A}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXO, 28), 0x0000008A}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 4), 0x1FEDA049}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 4), 0x1FECA054}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 6), 0x00000045}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 6), 0x0000000A}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 12), 0x05052879}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 12), 0x050528F9}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 12), 0x050528F9}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 13), 0x35050004}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 13), 0x2C3A0406}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 27), 0x000000E1}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 27), 0x000000EC}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 28), 0x00060806}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00050806}},
+ {RF_A_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00060801}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_80, {MT_BBP(AGC, 28), 0x00060806}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 31), 0x00000F23}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 31), 0x00000F13}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 39), 0x2A2A3036}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A2C36}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A3036}},
+ {RF_A_BAND | RF_BW_80, {MT_BBP(AGC, 39), 0x2A2A2A36}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 43), 0x27273438}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 43), 0x27272D38}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 43), 0x27272B30}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 51), 0x17171C1C}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 51), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 53), 0x26262A2F}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 53), 0x2626322F}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 53), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 55), 0x40404E58}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 55), 0x40405858}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 55), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 58), 0x00001010}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 58), 0x00000000}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXFE, 0), 0x3D5000E0}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXFE, 0), 0x895000E0}},
+};
+
+static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
+ {MT_BBP(CAL, 47), 0x000010F0 },
+ {MT_BBP(CAL, 48), 0x00008080 },
+ {MT_BBP(CAL, 49), 0x00000F07 },
+ {MT_BBP(CAL, 50), 0x00000040 },
+ {MT_BBP(CAL, 51), 0x00000404 },
+ {MT_BBP(CAL, 52), 0x00080803 },
+ {MT_BBP(CAL, 53), 0x00000704 },
+ {MT_BBP(CAL, 54), 0x00002828 },
+ {MT_BBP(CAL, 55), 0x00005050 },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
new file mode 100644
index 000000000000..95d43efc1f3d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
@@ -0,0 +1,772 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_PHY_INITVALS_H
+#define __MT76X0U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value) \
+ { (bank) << 16 | (reg), value }
+
+
+static const struct mt76_reg_pair mt76x0_rf_central_tab[] = {
+/*
+ Bank 0 - For central blocks: BG, PLL, XTAL, LO, ADC/DAC
+*/
+ { MT_RF(0, 1), 0x01},
+ { MT_RF(0, 2), 0x11},
+
+ /*
+ R3 ~ R7: VCO Cal.
+ */
+ { MT_RF(0, 3), 0x73}, /* VCO Freq Cal - No Bypass, VCO Amp Cal - No Bypass */
+ { MT_RF(0, 4), 0x30}, /* R4 b<7>=1, VCO cal */
+ { MT_RF(0, 5), 0x00},
+ { MT_RF(0, 6), 0x41}, /* Set the open loop amplitude to middle since bypassing amplitude calibration */
+ { MT_RF(0, 7), 0x00},
+
+ /*
+ XO
+ */
+ { MT_RF(0, 8), 0x00},
+ { MT_RF(0, 9), 0x00},
+ { MT_RF(0, 10), 0x0C},
+ { MT_RF(0, 11), 0x00},
+ { MT_RF(0, 12), 0x00},
+
+ /*
+ BG
+ */
+ { MT_RF(0, 13), 0x00},
+ { MT_RF(0, 14), 0x00},
+ { MT_RF(0, 15), 0x00},
+
+ /*
+ LDO
+ */
+ { MT_RF(0, 19), 0x20},
+ /*
+ XO
+ */
+ { MT_RF(0, 20), 0x22},
+ { MT_RF(0, 21), 0x12},
+ { MT_RF(0, 23), 0x00},
+ { MT_RF(0, 24), 0x33}, /* See band selection for R24<1:0> */
+ { MT_RF(0, 25), 0x00},
+
+ /*
+ PLL, See Freq Selection
+ */
+ { MT_RF(0, 26), 0x00},
+ { MT_RF(0, 27), 0x00},
+ { MT_RF(0, 28), 0x00},
+ { MT_RF(0, 29), 0x00},
+ { MT_RF(0, 30), 0x00},
+ { MT_RF(0, 31), 0x00},
+ { MT_RF(0, 32), 0x00},
+ { MT_RF(0, 33), 0x00},
+ { MT_RF(0, 34), 0x00},
+ { MT_RF(0, 35), 0x00},
+ { MT_RF(0, 36), 0x00},
+ { MT_RF(0, 37), 0x00},
+
+ /*
+ LO Buffer
+ */
+ { MT_RF(0, 38), 0x2F},
+
+ /*
+ Test Ports
+ */
+ { MT_RF(0, 64), 0x00},
+ { MT_RF(0, 65), 0x80},
+ { MT_RF(0, 66), 0x01},
+ { MT_RF(0, 67), 0x04},
+
+ /*
+ ADC/DAC
+ */
+ { MT_RF(0, 68), 0x00},
+ { MT_RF(0, 69), 0x08},
+ { MT_RF(0, 70), 0x08},
+ { MT_RF(0, 71), 0x40},
+ { MT_RF(0, 72), 0xD0},
+ { MT_RF(0, 73), 0x93},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = {
+/*
+ Bank 5 - Channel 0 2G RF registers
+*/
+ /*
+ RX logic operation
+ */
+ /* RF_R00 Change in SelectBand6590 */
+
+ { MT_RF(5, 2), 0x0C}, /* 5G+2G (MT7610U) */
+ { MT_RF(5, 3), 0x00},
+
+ /*
+ TX logic operation
+ */
+ { MT_RF(5, 4), 0x00},
+ { MT_RF(5, 5), 0x84},
+ { MT_RF(5, 6), 0x02},
+
+ /*
+ LDO
+ */
+ { MT_RF(5, 7), 0x00},
+ { MT_RF(5, 8), 0x00},
+ { MT_RF(5, 9), 0x00},
+
+ /*
+ RX
+ */
+ { MT_RF(5, 10), 0x51},
+ { MT_RF(5, 11), 0x22},
+ { MT_RF(5, 12), 0x22},
+ { MT_RF(5, 13), 0x0F},
+ { MT_RF(5, 14), 0x47}, /* Increase mixer current for more gain */
+ { MT_RF(5, 15), 0x25},
+ { MT_RF(5, 16), 0xC7}, /* Tune LNA2 tank */
+ { MT_RF(5, 17), 0x00},
+ { MT_RF(5, 18), 0x00},
+ { MT_RF(5, 19), 0x30}, /* Improve max Pin */
+ { MT_RF(5, 20), 0x33},
+ { MT_RF(5, 21), 0x02},
+ { MT_RF(5, 22), 0x32}, /* Tune LNA1 tank */
+ { MT_RF(5, 23), 0x00},
+ { MT_RF(5, 24), 0x25},
+ { MT_RF(5, 26), 0x00},
+ { MT_RF(5, 27), 0x12},
+ { MT_RF(5, 28), 0x0F},
+ { MT_RF(5, 29), 0x00},
+
+ /*
+ LOGEN
+ */
+ { MT_RF(5, 30), 0x51}, /* Tune LOGEN tank */
+ { MT_RF(5, 31), 0x35},
+ { MT_RF(5, 32), 0x31},
+ { MT_RF(5, 33), 0x31},
+ { MT_RF(5, 34), 0x34},
+ { MT_RF(5, 35), 0x03},
+ { MT_RF(5, 36), 0x00},
+
+ /*
+ TX
+ */
+ { MT_RF(5, 37), 0xDD}, /* Improve 3.2GHz spur */
+ { MT_RF(5, 38), 0xB3},
+ { MT_RF(5, 39), 0x33},
+ { MT_RF(5, 40), 0xB1},
+ { MT_RF(5, 41), 0x71},
+ { MT_RF(5, 42), 0xF2},
+ { MT_RF(5, 43), 0x47},
+ { MT_RF(5, 44), 0x77},
+ { MT_RF(5, 45), 0x0E},
+ { MT_RF(5, 46), 0x10},
+ { MT_RF(5, 47), 0x00},
+ { MT_RF(5, 48), 0x53},
+ { MT_RF(5, 49), 0x03},
+ { MT_RF(5, 50), 0xEF},
+ { MT_RF(5, 51), 0xC7},
+ { MT_RF(5, 52), 0x62},
+ { MT_RF(5, 53), 0x62},
+ { MT_RF(5, 54), 0x00},
+ { MT_RF(5, 55), 0x00},
+ { MT_RF(5, 56), 0x0F},
+ { MT_RF(5, 57), 0x0F},
+ { MT_RF(5, 58), 0x16},
+ { MT_RF(5, 59), 0x16},
+ { MT_RF(5, 60), 0x10},
+ { MT_RF(5, 61), 0x10},
+ { MT_RF(5, 62), 0xD0},
+ { MT_RF(5, 63), 0x6C},
+ { MT_RF(5, 64), 0x58},
+ { MT_RF(5, 65), 0x58},
+ { MT_RF(5, 66), 0xF2},
+ { MT_RF(5, 67), 0xE8},
+ { MT_RF(5, 68), 0xF0},
+ { MT_RF(5, 69), 0xF0},
+ { MT_RF(5, 127), 0x04},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = {
+/*
+ Bank 6 - Channel 0 5G RF registers
+*/
+ /*
+ RX logic operation
+ */
+ /* RF_R00 Change in SelectBandmt76x0 */
+
+ { MT_RF(6, 2), 0x0C},
+ { MT_RF(6, 3), 0x00},
+
+ /*
+ TX logic operation
+ */
+ { MT_RF(6, 4), 0x00},
+ { MT_RF(6, 5), 0x84},
+ { MT_RF(6, 6), 0x02},
+
+ /*
+ LDO
+ */
+ { MT_RF(6, 7), 0x00},
+ { MT_RF(6, 8), 0x00},
+ { MT_RF(6, 9), 0x00},
+
+ /*
+ RX
+ */
+ { MT_RF(6, 10), 0x00},
+ { MT_RF(6, 11), 0x01},
+
+ { MT_RF(6, 13), 0x23},
+ { MT_RF(6, 14), 0x00},
+ { MT_RF(6, 15), 0x04},
+ { MT_RF(6, 16), 0x22},
+
+ { MT_RF(6, 18), 0x08},
+ { MT_RF(6, 19), 0x00},
+ { MT_RF(6, 20), 0x00},
+ { MT_RF(6, 21), 0x00},
+ { MT_RF(6, 22), 0xFB},
+
+ /*
+ LOGEN5G
+ */
+ { MT_RF(6, 25), 0x76},
+ { MT_RF(6, 26), 0x24},
+ { MT_RF(6, 27), 0x04},
+ { MT_RF(6, 28), 0x00},
+ { MT_RF(6, 29), 0x00},
+
+ /*
+ TX
+ */
+ { MT_RF(6, 37), 0xBB},
+ { MT_RF(6, 38), 0xB3},
+
+ { MT_RF(6, 40), 0x33},
+ { MT_RF(6, 41), 0x33},
+
+ { MT_RF(6, 43), 0x03},
+ { MT_RF(6, 44), 0xB3},
+
+ { MT_RF(6, 46), 0x17},
+ { MT_RF(6, 47), 0x0E},
+ { MT_RF(6, 48), 0x10},
+ { MT_RF(6, 49), 0x07},
+
+ { MT_RF(6, 62), 0x00},
+ { MT_RF(6, 63), 0x00},
+ { MT_RF(6, 64), 0xF1},
+ { MT_RF(6, 65), 0x0F},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = {
+/*
+ Bank 7 - Channel 0 VGA RF registers
+*/
+ /* E3 CR */
+ { MT_RF(7, 0), 0x47}, /* Allow BBP/MAC to do calibration */
+ { MT_RF(7, 1), 0x00},
+ { MT_RF(7, 2), 0x00},
+ { MT_RF(7, 3), 0x00},
+ { MT_RF(7, 4), 0x00},
+
+ { MT_RF(7, 10), 0x13},
+ { MT_RF(7, 11), 0x0F},
+ { MT_RF(7, 12), 0x13}, /* For dcoc */
+ { MT_RF(7, 13), 0x13}, /* For dcoc */
+ { MT_RF(7, 14), 0x13}, /* For dcoc */
+ { MT_RF(7, 15), 0x20}, /* For dcoc */
+ { MT_RF(7, 16), 0x22}, /* For dcoc */
+
+ { MT_RF(7, 17), 0x7C},
+
+ { MT_RF(7, 18), 0x00},
+ { MT_RF(7, 19), 0x00},
+ { MT_RF(7, 20), 0x00},
+ { MT_RF(7, 21), 0xF1},
+ { MT_RF(7, 22), 0x11},
+ { MT_RF(7, 23), 0xC2},
+ { MT_RF(7, 24), 0x41},
+ { MT_RF(7, 25), 0x20},
+ { MT_RF(7, 26), 0x40},
+ { MT_RF(7, 27), 0xD7},
+ { MT_RF(7, 28), 0xA2},
+ { MT_RF(7, 29), 0x60},
+ { MT_RF(7, 30), 0x49},
+ { MT_RF(7, 31), 0x20},
+ { MT_RF(7, 32), 0x44},
+ { MT_RF(7, 33), 0xC1},
+ { MT_RF(7, 34), 0x60},
+ { MT_RF(7, 35), 0xC0},
+
+ { MT_RF(7, 61), 0x01},
+
+ { MT_RF(7, 72), 0x3C},
+ { MT_RF(7, 73), 0x34},
+ { MT_RF(7, 74), 0x00},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = {
+ /* Bank, Register, Bw/Band, Value */
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00},
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00},
+
+ /* TODO: need to check B7.R6 & B7.R7 setting for 2.4G again @20121112 */
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03},
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00},
+
+ /* TODO: need to check B7.R58 & B7.R59 setting for 2.4G again @20121112 */
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA},
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA},
+
+ { MT_RF(7, 76), RF_BW_20, 0x40},
+ { MT_RF(7, 76), RF_BW_40, 0x40},
+ { MT_RF(7, 76), RF_BW_80, 0x10},
+
+ { MT_RF(7, 77), RF_BW_20, 0x40},
+ { MT_RF(7, 77), RF_BW_40, 0x40},
+ { MT_RF(7, 77), RF_BW_80, 0x10},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = {
+ /* Bank, Register, Bw/Band, Value */
+ { MT_RF(0, 16), RF_G_BAND, 0x20},
+ { MT_RF(0, 16), RF_A_BAND, 0x20},
+
+ { MT_RF(0, 18), RF_G_BAND, 0x00},
+ { MT_RF(0, 18), RF_A_BAND, 0x00},
+
+ { MT_RF(0, 39), RF_G_BAND, 0x36},
+ { MT_RF(0, 39), RF_A_BAND_LB, 0x34},
+ { MT_RF(0, 39), RF_A_BAND_MB, 0x33},
+ { MT_RF(0, 39), RF_A_BAND_HB, 0x31},
+ { MT_RF(0, 39), RF_A_BAND_11J, 0x36},
+
+ { MT_RF(6, 12), RF_A_BAND_LB, 0x44},
+ { MT_RF(6, 12), RF_A_BAND_MB, 0x44},
+ { MT_RF(6, 12), RF_A_BAND_HB, 0x55},
+ { MT_RF(6, 12), RF_A_BAND_11J, 0x44},
+
+ { MT_RF(6, 17), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 17), RF_A_BAND_MB, 0x00},
+ { MT_RF(6, 17), RF_A_BAND_HB, 0x00},
+ { MT_RF(6, 17), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 24), RF_A_BAND_LB, 0xA1},
+ { MT_RF(6, 24), RF_A_BAND_MB, 0x41},
+ { MT_RF(6, 24), RF_A_BAND_HB, 0x21},
+ { MT_RF(6, 24), RF_A_BAND_11J, 0xE1},
+
+ { MT_RF(6, 39), RF_A_BAND_LB, 0x36},
+ { MT_RF(6, 39), RF_A_BAND_MB, 0x34},
+ { MT_RF(6, 39), RF_A_BAND_HB, 0x32},
+ { MT_RF(6, 39), RF_A_BAND_11J, 0x37},
+
+ { MT_RF(6, 42), RF_A_BAND_LB, 0xFB},
+ { MT_RF(6, 42), RF_A_BAND_MB, 0xF3},
+ { MT_RF(6, 42), RF_A_BAND_HB, 0xEB},
+ { MT_RF(6, 42), RF_A_BAND_11J, 0xEB},
+
+ /* Move R6-R45, R50~R59 to mt76x0_RF_INT_PA_5G_Channel_0_RegTb/mt76x0_RF_EXT_PA_5G_Channel_0_RegTb */
+
+ { MT_RF(6, 127), RF_G_BAND, 0x84},
+ { MT_RF(6, 127), RF_A_BAND, 0x04},
+
+ { MT_RF(7, 5), RF_G_BAND, 0x40},
+ { MT_RF(7, 5), RF_A_BAND, 0x00},
+
+ { MT_RF(7, 9), RF_G_BAND, 0x00},
+ { MT_RF(7, 9), RF_A_BAND, 0x00},
+
+ { MT_RF(7, 70), RF_G_BAND, 0x00},
+ { MT_RF(7, 70), RF_A_BAND, 0x6D},
+
+ { MT_RF(7, 71), RF_G_BAND, 0x00},
+ { MT_RF(7, 71), RF_A_BAND, 0xB0},
+
+ { MT_RF(7, 78), RF_G_BAND, 0x00},
+ { MT_RF(7, 78), RF_A_BAND, 0x55},
+
+ { MT_RF(7, 79), RF_G_BAND, 0x00},
+ { MT_RF(7, 79), RF_A_BAND, 0x55},
+};
+
+static const struct mt76x0_freq_item mt76x0_frequency_plan[] = {
+ {1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2412 */
+ {2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1}, /* Freq 2417 */
+ {3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2422 */
+ {4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2427 */
+ {5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2432 */
+ {6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2437 */
+ {7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2442 */
+ {8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1}, /* Freq 2447 */
+ {9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2452 */
+ {10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0}, /* Freq 2457 */
+ {11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2462 */
+ {12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2467 */
+ {13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2472 */
+ {14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2484 */
+
+ {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 4915 */
+ {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4920 */
+ {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4925 */
+ {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4935 */
+ {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4940 */
+ {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4945 */
+ {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4960 */
+ {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4980 */
+
+ {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5180 */
+ {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5185 */
+ {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5190 */
+ {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5195 */
+ {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5200 */
+ {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5205 */
+ {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5210 */
+ {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5215 */
+ {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5220 */
+ {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5225 */
+ {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5230 */
+ {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5235 */
+ {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5240 */
+ {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5245 */
+ {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5250 */
+ {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5255 */
+ {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5260 */
+ {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5265 */
+ {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5270 */
+ {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5275 */
+ {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5280 */
+ {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5285 */
+ {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5290 */
+ {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5295 */
+ {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5300 */
+ {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5305 */
+ {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5310 */
+ {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5315 */
+ {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5320 */
+
+ {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5500 */
+ {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5505 */
+ {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5510 */
+ {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5515 */
+ {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5520 */
+ {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5525 */
+ {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5530 */
+ {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5535 */
+ {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5540 */
+ {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5545 */
+ {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5550 */
+ {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5555 */
+ {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5560 */
+ {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5565 */
+ {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5570 */
+ {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5575 */
+ {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5580 */
+ {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5585 */
+ {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5590 */
+ {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5595 */
+ {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5600 */
+ {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5605 */
+ {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5610 */
+ {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5615 */
+ {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5620 */
+ {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5625 */
+ {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5630 */
+ {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5635 */
+ {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5640 */
+ {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5645 */
+ {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5650 */
+ {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5655 */
+ {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5660 */
+ {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5665 */
+ {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5670 */
+ {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5675 */
+ {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5680 */
+
+ {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5685 */
+ {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5690 */
+ {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5695 */
+ {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5700 */
+ {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5705 */
+ {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5710 */
+ {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5715 */
+ {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5720 */
+ {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5725 */
+ {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5730 */
+ {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5735 */
+ {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5740 */
+ {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5745 */
+ {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5750 */
+ {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5755 */
+ {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5760 */
+ {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5765 */
+ {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5770 */
+ {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5775 */
+ {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5780 */
+ {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5785 */
+ {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5790 */
+ {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5795 */
+ {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5800 */
+ {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5805 */
+ {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5810 */
+ {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5815 */
+ {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5820 */
+ {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5825 */
+ {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5830 */
+ {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5835 */
+ {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5840 */
+ {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5845 */
+ {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5850 */
+ {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5855 */
+ {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5860 */
+ {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5865 */
+};
+
+static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = {
+ {1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2412 */
+ {2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3}, /* Freq 2417 */
+ {3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3}, /* Freq 2422 */
+ {4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3}, /* Freq 2427 */
+ {5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3}, /* Freq 2432 */
+ {6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3}, /* Freq 2437 */
+ {7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3}, /* Freq 2442 */
+ {8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3}, /* Freq 2447 */
+ {9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3}, /* Freq 2452 */
+ {10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3}, /* Freq 2457 */
+ {11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2222, 0x3}, /* Freq 2462 */
+ {12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x7777, 0x3}, /* Freq 2467 */
+ {13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2472 */
+ {14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3}, /* Freq 2484 */
+
+ {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 4915 */
+ {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x0, 0x3}, /* Freq 4920 */
+ {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2AAA, 0x3}, /* Freq 4925 */
+ {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x8000, 0x3}, /* Freq 4935 */
+ {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 4940 */
+ {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 4945 */
+ {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 4960 */
+ {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 4980 */
+
+ {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 5180 */
+ {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 5185 */
+ {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5190 */
+ {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5195 */
+ {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5200 */
+ {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5205 */
+ {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5210 */
+ {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5215 */
+ {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5220 */
+ {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5225 */
+ {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5230 */
+ {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5235 */
+ {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5240 */
+ {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5245 */
+ {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5250 */
+ {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5255 */
+ {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5260 */
+ {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5265 */
+ {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5270 */
+ {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5275 */
+ {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5280 */
+ {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5285 */
+ {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5290 */
+ {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5295 */
+ {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5300 */
+ {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5305 */
+ {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5310 */
+ {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5315 */
+ {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5320 */
+
+ {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5500 */
+ {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5505 */
+ {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5510 */
+ {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5515 */
+ {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5520 */
+ {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5525 */
+ {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5530 */
+ {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5535 */
+ {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5540 */
+ {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5545 */
+ {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5550 */
+ {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5555 */
+ {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5560 */
+ {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5565 */
+ {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5570 */
+ {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5575 */
+ {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5580 */
+ {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5585 */
+ {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5590 */
+ {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5595 */
+ {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5600 */
+ {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5605 */
+ {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5610 */
+ {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5615 */
+ {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5620 */
+ {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5625 */
+ {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5630 */
+ {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5635 */
+ {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5640 */
+ {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5645 */
+ {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5650 */
+ {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5655 */
+ {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5660 */
+ {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5665 */
+ {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5670 */
+ {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5675 */
+ {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5680 */
+
+ {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5685 */
+ {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5690 */
+ {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5695 */
+ {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5700 */
+ {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5705 */
+ {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5710 */
+ {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5715 */
+ {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5720 */
+ {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5725 */
+ {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5730 */
+ {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5735 */
+ {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5740 */
+ {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5745 */
+ {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5750 */
+ {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5755 */
+ {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5760 */
+ {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5765 */
+ {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5770 */
+ {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5775 */
+ {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5780 */
+ {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5785 */
+ {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5790 */
+ {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5795 */
+ {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5800 */
+ {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5805 */
+ {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5810 */
+ {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5815 */
+ {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5820 */
+ {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5825 */
+ {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5830 */
+ {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5835 */
+ {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5840 */
+ {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5845 */
+ {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5850 */
+ {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5855 */
+ {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5860 */
+ {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5865 */
+};
+
+static const u8 mt76x0_sdm_channel[] = {
+ 183, 185, 43, 45, 54, 55, 57, 58, 102, 103, 105, 106, 115, 117, 126, 127, 129, 130, 139, 141, 150, 151, 153, 154, 163, 165
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = {
+ { MT_RF(6, 45), RF_A_BAND_LB, 0x63},
+ { MT_RF(6, 45), RF_A_BAND_MB, 0x43},
+ { MT_RF(6, 45), RF_A_BAND_HB, 0x33},
+ { MT_RF(6, 45), RF_A_BAND_11J, 0x73},
+
+ { MT_RF(6, 50), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_MB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_11J, 0x02},
+
+ { MT_RF(6, 51), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_MB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_11J, 0x02},
+
+ { MT_RF(6, 52), RF_A_BAND_LB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_MB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_HB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_11J, 0x08},
+
+ { MT_RF(6, 53), RF_A_BAND_LB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_MB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_HB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_11J, 0x08},
+
+ { MT_RF(6, 54), RF_A_BAND_LB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_MB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_HB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_11J, 0x0A},
+
+ { MT_RF(6, 55), RF_A_BAND_LB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_MB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_HB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_11J, 0x0A},
+
+ { MT_RF(6, 56), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_MB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_HB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 57), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_MB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_HB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 58), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 58), RF_A_BAND_MB, 0x03},
+ { MT_RF(6, 58), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 58), RF_A_BAND_11J, 0x07},
+
+ { MT_RF(6, 59), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 59), RF_A_BAND_MB, 0x03},
+ { MT_RF(6, 59), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 59), RF_A_BAND_11J, 0x07},
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
new file mode 100644
index 000000000000..91a84be36d3b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info,
+ struct mt76_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + last_rate;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+
+ rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->mt76.lock, flags);
+ wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_irqrestore(&dev->mt76.lock, flags);
+}
+
+struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev)
+{
+ struct mt76_tx_status stat = {};
+ u32 stat2, stat1;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ return stat;
+}
+
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+ priv = msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ spin_lock_bh(&dev->mac_lock);
+ ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
+ spin_unlock_bh(&dev->mac_lock);
+out:
+ rcu_read_unlock();
+}
+
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ bool ht_rts[4] = {};
+ int i;
+
+ prot[0] = MT_PROT_NAV_SHORT |
+ MT_PROT_TXOP_ALLOW_ALL |
+ MT_PROT_RTS_THR_EN;
+ prot[1] = prot[0];
+ if (legacy_prot)
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+ prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+ if (legacy_prot) {
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+ } else {
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ ht_rts[1] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+ }
+
+ if (non_gf)
+ ht_rts[2] = ht_rts[3] = true;
+
+ for (i = 0; i < 4; i++)
+ if (ht_rts[i])
+ prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+ for (i = 0; i < 6; i++)
+ mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
+{
+ if (short_preamb)
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+ else
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
+{
+ u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
+
+ val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ if (!enable) {
+ mt76_wr(dev, MT_BEACON_TIME_CFG, val);
+ return;
+ }
+
+ val &= ~MT_BEACON_TIME_CFG_INTVAL;
+ val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
+{
+ u32 val = mt76_rr(dev, 0x10f4);
+
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+
+ dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n");
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+ udelay(10);
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+void mt76x0_mac_work(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ mac_work.work);
+ struct {
+ u32 addr_base;
+ u32 span;
+ u64 *stat_base;
+ } spans[] = {
+ { MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
+ { MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
+ { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
+ { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
+ { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
+ { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
+ };
+ u32 sum, n;
+ int i, j, k;
+
+ /* Note: using MCU_RANDOM_READ is actually slower then reading all the
+ * registers by hand. MCU takes ca. 20ms to complete read of 24
+ * registers while reading them one by one will takes roughly
+ * 24*200us =~ 5ms.
+ */
+
+ k = 0;
+ n = 0;
+ sum = 0;
+ for (i = 0; i < ARRAY_SIZE(spans); i++)
+ for (j = 0; j < spans[i].span; j++) {
+ u32 val = mt76_rr(dev, spans[i].addr_base + j * 4);
+
+ spans[i].stat_base[j * 2] += val & 0xffff;
+ spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+ /* Calculate average AMPDU length */
+ if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+ spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+ continue;
+
+ n += (val >> 16) + (val & 0xffff);
+ sum += (val & 0xffff) * (1 + k * 2) +
+ (val >> 16) * (2 + k * 2);
+ k++;
+ }
+
+ atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+ mt76x0_check_mac_err(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ u8 zmac[ETH_ALEN] = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ if (mac)
+ memcpy(zmac, mac, sizeof(zmac));
+
+ mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
+{
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ void *msta;
+ u8 min_factor = 3;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+ wcid = rcu_dereference(dev->wcid[i]);
+ if (!wcid)
+ continue;
+
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+ min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+ }
+ rcu_read_unlock();
+
+ mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+ FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+ u16 rate, int rssi)
+{
+ dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
+ dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
+}
+
+static int
+mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt76x0_rxwi *rxwi = rxi;
+ u32 len, ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ int rssi;
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ if (WARN_ON(len < 10))
+ return 0;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+ }
+
+ status->chains = BIT(0);
+ rssi = mt76x0_phy_get_rssi(dev, rxwi);
+ status->chain_signal[0] = status->signal = rssi;
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ mt76_mac_process_rate(status, rate);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ if (mt76x0_rx_is_our_beacon(dev, data)) {
+ mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
+ } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) {
+ if (dev->avg_rssi == 0)
+ dev->avg_rssi = rssi;
+ else
+ dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
+
+ }
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ return len;
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_mt76x0_set_key(&dev->mt76, idx);
+
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP) {
+ /* Note: start with 1 to comply with spec,
+ * (see comment on common/cmm_wpa.c:4291).
+ */
+ iv_data[0] |= 1;
+ iv_data[3] |= 0x20;
+ }
+ }
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ val = mt76_rr(dev, MT_WCID_ATTR(idx));
+ val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+ val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+ FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+ val &= ~MT_WCID_ATTR_PAIRWISE;
+ val |= MT_WCID_ATTR_PAIRWISE *
+ !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ mt76_wr(dev, MT_WCID_ATTR(idx), val);
+
+ return 0;
+}
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+ key_data, sizeof(key_data));
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
new file mode 100644
index 000000000000..bea067b71c13
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ * are called for MT76X0U, names used by this driver are educated guesses
+ * (see vendor mac/ral_omac.c).
+ */
+struct mt76x0_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 tid_sn;
+ __le16 rate;
+
+ s8 rssi[4];
+
+ __le32 bbp_rxinfo[4];
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_U2M BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211 BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
+#define MT_RXWI_CTL_TID GENMASK(31, 28)
+
+#define MT_RXWI_FRAG GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC BIT(6)
+#define MT_RXWI_RATE_BW GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI BIT(9)
+#define MT_RXWI_RATE_STBC BIT(10)
+#define MT_RXWI_RATE_LDPC_ETXBF BIT(11)
+#define MT_RXWI_RATE_SND BIT(12)
+#define MT_RXWI_RATE_PHY GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
+
+#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
+
+enum mt76_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+};
+
+struct mt76_txwi {
+ __le16 flags;
+ __le16 rate_ctl;
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+ __le32 iv;
+ __le32 eiv;
+ u8 aid;
+ u8 txstream;
+ u8 ctl2;
+ u8 pktid;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_RATE_MCS GENMASK(6, 0)
+#define MT_TXWI_RATE_BW BIT(7)
+#define MT_TXWI_RATE_SGI BIT(8)
+#define MT_TXWI_RATE_STBC GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
+#define MT_TXWI_CTL_PIFS_REV BIT(6)
+
+#define MT_TXWI_PKTID_PROBE BIT(7)
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi);
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key);
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev);
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
new file mode 100644
index 000000000000..cf6ffb1ba4a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+
+static int mt76x0_start(struct ieee80211_hw *hw)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x0_mac_start(dev);
+ if (ret)
+ goto out;
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt76x0_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt76x0_mac_stop(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+
+static int mt76x0_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int idx;
+
+ idx = ffs(~dev->vif_mask);
+ if (!idx || idx > 8)
+ return -ENOSPC;
+
+ idx--;
+ dev->vif_mask |= BIT(idx);
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = GROUP_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+
+ return 0;
+}
+
+static void mt76x0_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int wcid = mvif->group_wcid.idx;
+
+ dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt76x0_phy_con_cal_onoff(dev, info);
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+ /* Note: this is a hack because beacon_int is not changed
+ * on leave nor is any more appropriate event generated.
+ * rt2x00 doesn't seem to be bothered though.
+ */
+ if (is_zero_ether_addr(info->bssid))
+ mt76x0_mac_config_tsf(dev, false, 0);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+ mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+ mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+ mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+ mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT)
+ mt76x0_mac_config_tsf(dev, true, info->beacon_int);
+
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt76x0_mac_set_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE)
+ mt76x0_mac_set_short_preamble(dev, info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt76x0_phy_recalibrate_after_assoc(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+ mt76x0_mac_set_ampdu_factor(dev);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static int
+mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+ mt76x0_mac_wcid_setup(dev, idx, 0, NULL);
+ mt76x0_mac_set_ampdu_factor(dev);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static void
+mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt76x0_sw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x0_agc_save(dev);
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mt76x0_agc_restore(dev);
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+static int
+mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+ struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+ return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+ WARN_ON(msta->wcid.idx > N_WCIDS);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ msta->agg_ssn[tid] = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates;
+ struct ieee80211_tx_rate rate = {};
+
+ rcu_read_lock();
+ rates = rcu_dereference(sta->rates);
+
+ if (!rates)
+ goto out;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+ rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt76x0_ops = {
+ .tx = mt76x0_tx,
+ .start = mt76x0_start,
+ .stop = mt76x0_stop,
+ .add_interface = mt76x0_add_interface,
+ .remove_interface = mt76x0_remove_interface,
+ .config = mt76x0_config,
+ .configure_filter = mt76_configure_filter,
+ .bss_info_changed = mt76x0_bss_info_changed,
+ .sta_add = mt76x0_sta_add,
+ .sta_remove = mt76x0_sta_remove,
+ .sta_notify = mt76x0_sta_notify,
+ .set_key = mt76x0_set_key,
+ .conf_tx = mt76x0_conf_tx,
+ .sw_scan_start = mt76x0_sw_scan,
+ .sw_scan_complete = mt76x0_sw_scan_complete,
+ .ampdu_action = mt76_ampdu_action,
+ .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+ .set_rts_threshold = mt76x0_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
new file mode 100644
index 000000000000..8affacbab90a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
@@ -0,0 +1,656 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x38f8
+#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE 1024
+
+static inline int firmware_running(struct mt76x0_dev *dev)
+{
+ return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+ put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb,
+ u8 seq, enum mcu_cmd cmd)
+{
+ WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+ FIELD_PREP(MT_TXD_CMD_SEQ, seq) |
+ FIELD_PREP(MT_TXD_CMD_TYPE, cmd)));
+}
+
+static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev,
+ struct sk_buff *skb, bool need_resp)
+{
+ u32 i, csum = 0;
+
+ for (i = 0; i < skb->len / 4; i++)
+ csum ^= get_unaligned_le32(skb->data + i * 4);
+
+ trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *
+mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (skb) {
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ memcpy(skb_put(skb, len), data, len);
+ }
+ return skb;
+}
+
+static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len)
+{
+ int i;
+ int n = dev->mcu.reg_pairs_len;
+ u8 *buf = dev->mcu.resp.buf;
+
+ buf += 4;
+ len -= 8;
+
+ if (dev->mcu.burst_read) {
+ u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base;
+
+ WARN_ON_ONCE(len/4 != n);
+ for (i = 0; i < n; i++) {
+ u32 val = get_unaligned_le32(buf + 4*i);
+
+ dev->mcu.reg_pairs[i].reg = reg++;
+ dev->mcu.reg_pairs[i].value = val;
+ }
+ } else {
+ WARN_ON_ONCE(len/8 != n);
+ for (i = 0; i < n; i++) {
+ u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base;
+ u32 val = get_unaligned_le32(buf + 8*i + 4);
+
+ WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg);
+ dev->mcu.reg_pairs[i].value = val;
+ }
+ }
+}
+
+static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq)
+{
+ struct urb *urb = dev->mcu.resp.urb;
+ u32 rxfce;
+ int urb_status, ret, try = 5;
+
+ while (try--) {
+ if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+ msecs_to_jiffies(300))) {
+ dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__);
+ continue;
+ }
+
+ /* Make copies of important data before reusing the urb */
+ rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+ urb_status = urb->status * mt76x0_urb_has_error(urb);
+
+ if (urb_status == 0 && dev->mcu.reg_pairs)
+ mt76x0_read_resp_regs(dev, urb->actual_length);
+
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt76x0_complete_urb,
+ &dev->mcu.resp_cmpl);
+ if (ret)
+ return ret;
+
+ if (urb_status)
+ dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n",
+ urb_status);
+
+ if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+ return 0;
+
+ dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static int
+__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+ dev->out_ep[MT_EP_OUT_INBAND_CMD]);
+ int sent, ret;
+ u8 seq = 0;
+
+ if (wait_resp)
+ while (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ mt76x0_dma_skb_wrap_cmd(skb, seq, cmd);
+
+ if (dev->mcu.resp_cmpl.done)
+ dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n");
+
+ trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp);
+ trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len);
+
+ ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret);
+ goto out;
+ }
+ if (sent != skb->len)
+ dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__);
+
+ if (wait_resp)
+ ret = mt76x0_mcu_wait_resp(dev, seq);
+
+out:
+ return ret;
+}
+
+static int
+mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ int ret;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return 0;
+
+ mutex_lock(&dev->mcu.mutex);
+ ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp);
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+
+int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
+ enum mcu_function func, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(cal),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ mutex_lock(&dev->mcu.mutex);
+
+ dev->mcu.reg_pairs = data;
+ dev->mcu.reg_pairs_len = n;
+ dev->mcu.reg_base = base;
+ dev->mcu.burst_read = false;
+
+ ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true);
+
+ dev->mcu.reg_pairs = NULL;
+
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+
+}
+
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+ const u32 *data, int n)
+{
+ const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_regs_per_cmd, n);
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+ for (i = 0; i < cnt; i++)
+ skb_put_le32(skb, data[i]);
+
+ ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt76x0_burst_write_regs(dev, offset + cnt * 4,
+ data + cnt, n - cnt);
+}
+
+#if 0
+static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, base + data[0].reg);
+ skb_put_le32(skb, n);
+
+ mutex_lock(&dev->mcu.mutex);
+
+ dev->mcu.reg_pairs = data;
+ dev->mcu.reg_pairs_len = n;
+ dev->mcu.reg_base = base;
+ dev->mcu.burst_read = true;
+
+ ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true);
+
+ dev->mcu.reg_pairs = NULL;
+
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+#endif
+
+struct mt76_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76_fw {
+ struct mt76_fw_header hdr;
+ u8 ivb[MT_MCU_IVB_SIZE];
+ u8 ilm[];
+};
+
+static int __mt76x0_dma_fw(struct mt76x0_dev *dev,
+ const struct mt76x0_dma_buf *dma_buf,
+ const void *data, u32 len, u32 dst_addr)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */
+ __le32 reg;
+ u32 val;
+ int ret;
+
+ reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_TXD_INFO_LEN, len));
+ memcpy(buf.buf, &reg, sizeof(reg));
+ memcpy(buf.buf + sizeof(reg), data, len);
+ memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+ ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ if (ret)
+ return ret;
+ len = roundup(len, 4);
+ ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+ if (ret)
+ return ret;
+
+ buf.len = MT_DMA_HDR_LEN + len + 4;
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+ &buf, GFP_KERNEL,
+ mt76x0_complete_urb, &cmpl);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+ dev_err(dev->mt76.dev, "Error: firmware upload timed out\n");
+ usb_kill_urb(buf.urb);
+ return -ETIMEDOUT;
+ }
+ if (mt76x0_urb_has_error(buf.urb)) {
+ dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n",
+ buf.urb->status);
+ return buf.urb->status;
+ }
+
+ val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ msleep(5);
+
+ return 0;
+}
+
+static int
+mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf,
+ const void *data, int len, u32 dst_addr)
+{
+ int n, ret;
+
+ if (len == 0)
+ return 0;
+
+ n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+ ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr);
+ if (ret)
+ return ret;
+
+#if 0
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+ return -ETIMEDOUT;
+#endif
+
+ return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw)
+{
+ struct mt76x0_dma_buf dma_buf;
+ void *ivb;
+ u32 ilm_len, dlm_len;
+ int i, ret;
+
+ ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+ if (!ivb)
+ return -ENOMEM;
+ if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+ dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n",
+ ilm_len, sizeof(fw->ivb));
+ ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+ if (ret)
+ goto error;
+
+ dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+ dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+ ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+ dlm_len, MT_MCU_DLM_OFFSET);
+ if (ret)
+ goto error;
+
+ ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ 0x12, 0, ivb, sizeof(fw->ivb));
+ if (ret < 0)
+ goto error;
+ ret = 0;
+
+ for (i = 100; i && !firmware_running(dev); i--)
+ msleep(10);
+ if (!i) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ dev_dbg(dev->mt76.dev, "Firmware running!\n");
+error:
+ kfree(ivb);
+ mt76x0_usb_free_buf(dev, &dma_buf);
+
+ return ret;
+}
+
+static int mt76x0_load_firmware(struct mt76x0_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76_fw_header *hdr;
+ int len, ret;
+ u32 val;
+
+ mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+
+ if (firmware_running(dev))
+ return 0;
+
+ ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto err_inv_fw;
+
+ hdr = (const struct mt76_fw_header *) fw->data;
+
+ if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+ goto err_inv_fw;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto err_inv_fw;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_dbg(dev->mt76.dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, 0x1004, 0x2c);
+
+ mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
+ mt76x0_vendor_reset(dev);
+ msleep(5);
+/*
+ mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+ MT_PBF_CFG_TX1Q_EN |
+ MT_PBF_CFG_TX2Q_EN |
+ MT_PBF_CFG_TX3Q_EN));
+*/
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 3);
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+ val |= MT_USB_DMA_CFG_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+ release_firmware(fw);
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ return ret;
+
+err_inv_fw:
+ dev_err(dev->mt76.dev, "Invalid firmware image\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev)
+{
+ int ret;
+
+ mutex_init(&dev->mcu.mutex);
+
+ ret = mt76x0_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+ return 0;
+}
+
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
+{
+ int ret;
+
+ ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1);
+ if (ret)
+ return ret;
+
+ init_completion(&dev->mcu.resp_cmpl);
+ if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+ return -ENOMEM;
+ }
+
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt76x0_complete_urb, &dev->mcu.resp_cmpl);
+ if (ret) {
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+ return ret;
+ }
+
+ return 0;
+}
+
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev)
+{
+ usb_kill_urb(dev->mcu.resp.urb);
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
new file mode 100644
index 000000000000..8c2f77f4c3f5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_MCU_H
+#define __MT76X0U_MCU_H
+
+struct mt76x0_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+
+#define MT_MCU_IVB_SIZE 0x40
+#define MT_MCU_DLM_OFFSET 0x80000
+
+#define MT_MCU_MEMMAP_WLAN 0x00410000
+/* We use same space for BBP as for MAC regs
+ * #define MT_MCU_MEMMAP_BBP 0x40000000
+ */
+#define MT_MCU_MEMMAP_RF 0x80000000
+
+#define INBAND_PACKET_MAX_LEN 192
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ BW_SETTING = 2,
+ ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+ MCU_CAL_R = 1,
+ MCU_CAL_RXDCOC,
+ MCU_CAL_LC,
+ MCU_CAL_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_BW,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQ,
+ MCU_CAL_TXDCOC,
+ MCU_CAL_RX_GROUP_DELAY,
+ MCU_CAL_TX_GROUP_DELAY,
+};
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev);
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev);
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev);
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
+
+int
+mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
new file mode 100644
index 000000000000..fc9857f61771
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MT76X0U_H
+#define MT76X0U_H
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT_CALIBRATE_INTERVAL (4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
+
+#define MT_BBP_REG_VERSION 0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
+#define MT_RX_ORDER 3
+#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
+
+struct mt76x0_dma_buf {
+ struct urb *urb;
+ void *buf;
+ dma_addr_t dma;
+ size_t len;
+};
+
+struct mt76x0_mcu {
+ struct mutex mutex;
+
+ u8 msg_seq;
+
+ struct mt76x0_dma_buf resp;
+ struct completion resp_cmpl;
+
+ struct mt76_reg_pair *reg_pairs;
+ unsigned int reg_pairs_len;
+ u32 reg_base;
+ bool burst_read;
+};
+
+struct mac_stats {
+ u64 rx_stat[6];
+ u64 tx_stat[6];
+ u64 aggr_stat[2];
+ u64 aggr_n[32];
+ u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES 16
+struct mt76x0_rx_queue {
+ struct mt76x0_dev *dev;
+
+ struct mt76x0_dma_buf_rx {
+ struct urb *urb;
+ struct page *p;
+ } e[N_RX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int pending;
+};
+
+#define N_TX_ENTRIES 64
+
+struct mt76x0_tx_queue {
+ struct mt76x0_dev *dev;
+
+ struct mt76x0_dma_buf_tx {
+ struct urb *urb;
+ struct sk_buff *skb;
+ } e[N_TX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int used;
+ unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ * 0: mcast wcid
+ * 1: bssid wcid
+ * 1...: STAs
+ * ...7e: group wcids
+ * 7f: reserved
+ */
+#define N_WCIDS 128
+#define GROUP_WCID(idx) (254 - idx)
+
+struct mt76x0_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE 39
+#define MT_FREQ_OFFSET_INVALID -128
+
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+
+enum mt_bw {
+ MT_BW_20,
+ MT_BW_40,
+};
+
+/**
+ * struct mt76x0_dev - adapter structure
+ * @lock: protects @wcid->tx_rate.
+ * @mac_lock: locks out mac80211's tx status and rx paths.
+ * @tx_lock: protects @tx_q and changes of MT76_STATE_*_STATS
+ * flags in @state.
+ * @rx_lock: protects @rx_q.
+ * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex: ensures exclusive access from mac80211 callbacks.
+ * @reg_atomic_mutex: ensures atomicity of indirect register accesses
+ * (accesses to RF and BBP).
+ * @hw_atomic_mutex: ensures exclusive access to HW during critical
+ * operations (power management, channel switch).
+ */
+struct mt76x0_dev {
+ struct mt76_dev mt76; /* must be first */
+
+ struct mutex mutex;
+
+ struct mutex usb_ctrl_mtx;
+ u8 data[32];
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u16 out_max_packet;
+ u8 in_ep[__MT_EP_IN_MAX];
+ u16 in_max_packet;
+
+ unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
+ unsigned long vif_mask;
+
+ struct mt76x0_mcu mcu;
+
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ struct workqueue_struct *stat_wq;
+ struct delayed_work stat_work;
+
+ struct mt76_wcid *mon_wcid;
+ struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+ spinlock_t mac_lock;
+
+ const u16 *beacon_offsets;
+
+ u8 macaddr[ETH_ALEN];
+ struct mt76x0_eeprom_params *ee;
+
+ struct mutex reg_atomic_mutex;
+ struct mutex hw_atomic_mutex;
+
+ u32 rxfilter;
+ u32 debugfs_reg;
+
+ /* TX */
+ spinlock_t tx_lock;
+ struct mt76x0_tx_queue *tx_q;
+ struct sk_buff_head tx_skb_done;
+
+ atomic_t avg_ampdu_len;
+
+ /* RX */
+ spinlock_t rx_lock;
+ struct mt76x0_rx_queue rx_q;
+
+ /* Connection monitoring things */
+ spinlock_t con_mon_lock;
+ u8 ap_bssid[ETH_ALEN];
+
+ s8 bcn_freq_off;
+ u8 bcn_phy_mode;
+
+ int avg_rssi; /* starts at 0 and converges */
+
+ u8 agc_save;
+ u16 chainmask;
+
+ struct mac_stats stats;
+};
+
+struct mt76x0_wcid {
+ u8 idx;
+ u8 hw_key_idx;
+
+ u16 tx_rate;
+ bool tx_rate_set;
+ u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 is_probe:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+struct mt76_sta {
+ struct mt76_wcid wcid;
+ struct mt76_tx_status status;
+ int n_frames;
+ u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+struct mt76x0_rxwi;
+
+extern const struct ieee80211_ops mt76x0_ops;
+
+static inline bool is_mt7610e(struct mt76x0_dev *dev)
+{
+ /* TODO */
+ return false;
+}
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev);
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len);
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int len);
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+ const u32 *data, int n);
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
+int mt76x0_init_hardware(struct mt76x0_dev *dev);
+int mt76x0_register_device(struct mt76x0_dev *dev);
+void mt76x0_cleanup(struct mt76x0_dev *dev);
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
+
+int mt76x0_mac_start(struct mt76x0_dev *dev);
+void mt76x0_mac_stop(struct mt76x0_dev *dev);
+
+/* PHY */
+void mt76x0_phy_init(struct mt76x0_dev *dev);
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
+void mt76x0_agc_save(struct mt76x0_dev *dev);
+void mt76x0_agc_restore(struct mt76x0_dev *dev);
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+ struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt76x0_mac_work(struct work_struct *work);
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
+
+/* TX */
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
+void mt76x0_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76x0_remove_hdr_pad(struct sk_buff *skb);
+int mt76x0_insert_hdr_pad(struct sk_buff *skb);
+
+int mt76x0_dma_init(struct mt76x0_dev *dev);
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
new file mode 100644
index 000000000000..5da7bfbe907f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -0,0 +1,1008 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "phy.h"
+#include "initvals.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static int
+mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
+{
+ int ret = 0;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ return -EINVAL;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
+ trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int
+mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
+{
+ int ret = -ETIMEDOUT;
+ u32 val;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ return -EINVAL;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_KICK);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ val = mt76_rr(dev, MT_RF_CSR_CFG);
+ if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg &&
+ FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
+ trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int
+rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ .value = val,
+ };
+
+ return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ } else {
+ WARN_ON_ONCE(1);
+ return mt76x0_rf_csr_wr(dev, offset, val);
+ }
+}
+
+static int
+rf_rr(struct mt76x0_dev *dev, u32 offset)
+{
+ int ret;
+ u32 val;
+
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ };
+
+ ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ val = pair.value;
+ } else {
+ WARN_ON_ONCE(1);
+ ret = val = mt76x0_rf_csr_rr(dev, offset);
+ }
+
+ return (ret < 0) ? ret : val;
+}
+
+static int
+rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = rf_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ ret = rf_wr(dev, offset, val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int
+rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+ return rf_rmw(dev, offset, 0, val);
+}
+
+#if 0
+static int
+rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
+{
+ return rf_rmw(dev, offset, mask, 0);
+}
+#endif
+
+#define RF_RANDOM_WRITE(dev, tab) \
+ mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));
+
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
+{
+ int i = 20;
+ u32 val;
+
+ do {
+ val = mt76_rr(dev, MT_BBP(CORE, 0));
+ printk("BBP version %08x\n", val);
+ if (val && ~val)
+ break;
+ } while (--i);
+
+ if (!i) {
+ dev_err(dev->mt76.dev, "Error: BBP is not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void
+mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
+ u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi)
+{
+ s8 lna_gain, rssi_offset;
+ int val;
+
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) {
+ lna_gain = dev->ee->lna_gain_2ghz;
+ rssi_offset = dev->ee->rssi_offset_2ghz[0];
+ } else {
+ lna_gain = dev->ee->lna_gain_5ghz[0];
+ rssi_offset = dev->ee->rssi_offset_5ghz[0];
+ }
+
+ val = rxwi->rssi[0] + rssi_offset - lna_gain;
+
+ return val;
+}
+
+static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
+{
+ u8 val;
+
+ val = rf_rr(dev, MT_RF(0, 4));
+ if ((val & 0x70) != 0x30)
+ return;
+
+ /*
+ * Calibration Mode - Open loop, closed loop, and amplitude:
+ * B0.R06.[0]: 1
+ * B0.R06.[3:1] bp_close_code: 100
+ * B0.R05.[7:0] bp_open_code: 0x0
+ * B0.R04.[2:0] cal_bits: 000
+ * B0.R03.[2:0] startup_time: 011
+ * B0.R03.[6:4] settle_time:
+ * 80MHz channel: 110
+ * 40MHz channel: 101
+ * 20MHz channel: 100
+ */
+ val = rf_rr(dev, MT_RF(0, 6));
+ val &= ~0xf;
+ val |= 0x09;
+ rf_wr(dev, MT_RF(0, 6), val);
+
+ val = rf_rr(dev, MT_RF(0, 5));
+ if (val != 0)
+ rf_wr(dev, MT_RF(0, 5), 0x0);
+
+ val = rf_rr(dev, MT_RF(0, 4));
+ val &= ~0x07;
+ rf_wr(dev, MT_RF(0, 4), val);
+
+ val = rf_rr(dev, MT_RF(0, 3));
+ val &= ~0x77;
+ if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
+ val |= 0x63;
+ } else if (channel == 3 || channel == 4 || channel == 10) {
+ val |= 0x53;
+ } else if (channel == 2 || channel == 5 || channel == 6 ||
+ channel == 8 || channel == 11 || channel == 12) {
+ val |= 0x43;
+ } else {
+ WARN(1, "Unknown channel %u\n", channel);
+ return;
+ }
+ rf_wr(dev, MT_RF(0, 3), val);
+
+ /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
+ val = rf_rr(dev, MT_RF(0, 4));
+ val = ((val & ~(0x80)) | 0x80);
+ rf_wr(dev, MT_RF(0, 4), val);
+
+ msleep(2);
+}
+
+static void
+mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
+{
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+
+static void
+mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+
+ rf_wr(dev, MT_RF(5, 0), 0x45);
+ rf_wr(dev, MT_RF(6, 0), 0x44);
+
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
+ break;
+ case NL80211_BAND_5GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+
+ rf_wr(dev, MT_RF(5, 0), 0x44);
+ rf_wr(dev, MT_RF(6, 0), 0x45);
+
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
+ break;
+ default:
+ break;
+ }
+}
+
+#define EXT_PA_2G_5G 0x0
+#define EXT_PA_5G_ONLY 0x1
+#define EXT_PA_2G_ONLY 0x2
+#define INT_PA_2G_5G 0x3
+
+static void
+mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+ u16 rf_band = rf_bw_band & 0xff00;
+ u16 rf_bw = rf_bw_band & 0x00ff;
+ u32 mac_reg;
+ u8 rf_val;
+ int i;
+ bool bSDM = false;
+ const struct mt76x0_freq_item *freq_item;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) {
+ if (channel == mt76x0_sdm_channel[i]) {
+ bSDM = true;
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_frequency_plan); i++) {
+ if (channel == mt76x0_frequency_plan[i].channel) {
+ rf_band = mt76x0_frequency_plan[i].band;
+
+ if (bSDM)
+ freq_item = &(mt76x0_sdm_frequency_plan[i]);
+ else
+ freq_item = &(mt76x0_frequency_plan[i]);
+
+ rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
+ rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
+ rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
+ rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
+ rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
+
+ rf_val = rf_rr(dev, MT_RF(0, 32));
+ rf_val &= ~0xE0;
+ rf_val |= freq_item->pllR32_b7b5;
+ rf_wr(dev, MT_RF(0, 32), rf_val);
+
+ /* R32<4:0> pll_den: (Denomina - 8) */
+ rf_val = rf_rr(dev, MT_RF(0, 32));
+ rf_val &= ~0x1F;
+ rf_val |= freq_item->pllR32_b4b0;
+ rf_wr(dev, MT_RF(0, 32), rf_val);
+
+ /* R31<7:5> */
+ rf_val = rf_rr(dev, MT_RF(0, 31));
+ rf_val &= ~0xE0;
+ rf_val |= freq_item->pllR31_b7b5;
+ rf_wr(dev, MT_RF(0, 31), rf_val);
+
+ /* R31<4:0> pll_k(Nominator) */
+ rf_val = rf_rr(dev, MT_RF(0, 31));
+ rf_val &= ~0x1F;
+ rf_val |= freq_item->pllR31_b4b0;
+ rf_wr(dev, MT_RF(0, 31), rf_val);
+
+ /* R30<7> sdm_reset_n */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x80;
+ if (bSDM) {
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ rf_val |= 0x80;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ } else {
+ rf_val |= freq_item->pllR30_b7;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ }
+
+ /* R30<6:2> sdmmash_prbs,sin */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x7C;
+ rf_val |= freq_item->pllR30_b6b2;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R30<1> sdm_bp */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x02;
+ rf_val |= (freq_item->pllR30_b1 << 1);
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R30<0> R29<7:0> (hex) pll_n */
+ rf_val = freq_item->pll_n & 0x00FF;
+ rf_wr(dev, MT_RF(0, 29), rf_val);
+
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x1;
+ rf_val |= ((freq_item->pll_n >> 8) & 0x0001);
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R28<7:6> isi_iso */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0xC0;
+ rf_val |= freq_item->pllR28_b7b6;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<5:4> pfd_dly */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x30;
+ rf_val |= freq_item->pllR28_b5b4;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<3:2> clksel option */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x0C;
+ rf_val |= freq_item->pllR28_b3b2;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */
+ rf_val = freq_item->pll_sdm_k & 0x000000FF;
+ rf_wr(dev, MT_RF(0, 26), rf_val);
+
+ rf_val = ((freq_item->pll_sdm_k >> 8) & 0x000000FF);
+ rf_wr(dev, MT_RF(0, 27), rf_val);
+
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x3;
+ rf_val |= ((freq_item->pll_sdm_k >> 16) & 0x0003);
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R24<1:0> xo_div */
+ rf_val = rf_rr(dev, MT_RF(0, 24));
+ rf_val &= ~0x3;
+ rf_val |= freq_item->pllR24_b1b0;
+ rf_wr(dev, MT_RF(0, 24), rf_val);
+
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) {
+ rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ } else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) &&
+ (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) {
+ rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) {
+ rf_wr(dev, mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg &= ~0xC; /* Clear 0x518[3:2] */
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+
+ if (dev->ee->pa_type == INT_PA_2G_5G ||
+ (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) ||
+ (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) {
+ ; /* Internal PA - nothing to do. */
+ } else {
+ /*
+ MT_RF_MISC (offset: 0x0518)
+ [2]1'b1: enable external A band PA, 1'b0: disable external A band PA
+ [3]1'b1: enable external G band PA, 1'b0: disable external G band PA
+ */
+ if (rf_band & RF_A_BAND) {
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg |= 0x4;
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+ } else {
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg |= 0x8;
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+ }
+
+ /* External PA */
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++)
+ if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band)
+ rf_wr(dev, mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
+ mt76x0_rf_ext_pa_tab[i].value);
+ }
+
+ if (rf_band & RF_G_BAND) {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x63707400);
+ /* Set Atten mode = 2 For G band, Disable Tx Inc dcoc. */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x896400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800);
+ /* Set Atten mode = 0 For Ext A band, Disable Tx Inc dcoc Cal. */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x890400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ }
+}
+
+static void
+mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if ((rf_bw_band & item->bw_band) != rf_bw_band)
+ continue;
+
+ if (pair->reg == MT_BBP(AGC, 8)) {
+ u32 val = pair->value;
+ u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
+
+ if (channel > 14) {
+ if (channel < 100)
+ gain -= dev->ee->lna_gain_5ghz[0]*2;
+ else if (channel < 137)
+ gain -= dev->ee->lna_gain_5ghz[1]*2;
+ else
+ gain -= dev->ee->lna_gain_5ghz[2]*2;
+
+ } else {
+ gain -= dev->ee->lna_gain_2ghz*2;
+ }
+
+ val &= ~MT_BBP_AGC_GAIN;
+ val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
+ mt76_wr(dev, pair->reg, val);
+ } else {
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+ }
+}
+
+#if 0
+static void
+mt76x0_extra_power_over_mac(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8);
+ val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_7, val);
+
+ /* TODO: fix VHT */
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_8, val);
+
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band)
+{
+ u32 val;
+ int i;
+ int bw = (rf_bw_band & RF_BW_20) ? 0 : 1;
+
+ for (i = 0; i < 4; i++) {
+ if (channel <= 14)
+ val = dev->ee->tx_pwr_cfg_2g[i][bw];
+ else
+ val = dev->ee->tx_pwr_cfg_5g[i][bw];
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val);
+ }
+
+ mt76x0_extra_power_over_mac(dev);
+}
+#endif
+
+static void
+mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
+{
+ enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
+ int bw;
+
+ switch (width) {
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ bw = BW_20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bw = BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ bw = BW_10;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_5:
+ /* TODO error */
+ return ;
+ }
+
+ mt76x0_mcu_function_select(dev, BW_SETTING, bw);
+}
+
+static void
+mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel)
+{
+ static const int mt76x0_tx_pwr_ch_list[] = {
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,
+ 36,38,40,44,46,48,52,54,56,60,62,64,
+ 100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140,
+ 149,151,153,157,159,161,165,167,169,171,173,
+ 42,58,106,122,155
+ };
+ int i;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++)
+ if (mt76x0_tx_pwr_ch_list[i] == channel)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list)))
+ return;
+
+ val = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ val &= ~0x3f3f;
+ val |= dev->ee->tx_pwr_per_chan[i];
+ val |= 0x2f2f << 16;
+ mt76_wr(dev, MT_TX_ALC_CFG_0, val);
+}
+
+static int
+__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ int ch_group_index, freq, freq1;
+ u8 channel;
+ u32 val;
+ u16 rf_bw_band;
+
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+ channel = chandef->chan->hw_value;
+ rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ if (freq1 > freq)
+ ch_group_index = 0;
+ else
+ ch_group_index = 1;
+ channel += 2 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ channel += 6 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_80;
+ break;
+ default:
+ ch_group_index = 0;
+ rf_bw_band |= RF_BW_20;
+ break;
+ }
+
+ mt76x0_bbp_set_bw(dev, chandef->width);
+ mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
+ mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ mt76x0_phy_set_band(dev, chandef->chan->band);
+ mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
+
+ /* set Japan Tx filter at channel 14 */
+ val = mt76_rr(dev, MT_BBP(CORE, 1));
+ if (channel == 14)
+ val |= 0x20;
+ else
+ val &= ~0x20;
+ mt76_wr(dev, MT_BBP(CORE, 1), val);
+
+ mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
+
+ /* Vendor driver don't do it */
+ /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
+
+ if (scan)
+ mt76x0_vco_cal(dev, channel);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+ mt76x0_phy_set_chan_pwr(dev, channel);
+
+ dev->mt76.chandef = *chandef;
+ return 0;
+}
+
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+ ret = __mt76x0_phy_set_channel(dev, chandef);
+ mutex_unlock(&dev->hw_atomic_mutex);
+
+ return ret;
+}
+
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
+{
+ u32 tx_alc, reg_val;
+ u8 channel = dev->mt76.chandef.chan->hw_value;
+ int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0);
+
+ mt76x0_vco_cal(dev, channel);
+
+ tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+ usleep_range(500, 700);
+
+ reg_val = mt76_rr(dev, 0x2124);
+ reg_val &= 0xffffff7e;
+ mt76_wr(dev, 0x2124, reg_val);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz);
+
+ mt76_wr(dev, 0x2124, reg_val);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+ msleep(100);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+}
+
+void mt76x0_agc_save(struct mt76x0_dev *dev)
+{
+ /* Only one RX path */
+ dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
+}
+
+void mt76x0_agc_restore(struct mt76x0_dev *dev)
+{
+ mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
+}
+
+static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
+{
+ u8 rf_b7_73, rf_b0_66, rf_b0_67;
+ int cycle, temp;
+ u32 val;
+ s32 sval;
+
+ rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
+ rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
+ rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
+
+ rf_wr(dev, MT_RF(7, 73), 0x02);
+ rf_wr(dev, MT_RF(0, 66), 0x23);
+ rf_wr(dev, MT_RF(0, 73), 0x01);
+
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
+
+ for (cycle = 0; cycle < 2000; cycle++) {
+ val = mt76_rr(dev, MT_BBP(CORE, 34));
+ if (!(val & 0x10))
+ break;
+ udelay(3);
+ }
+
+ if (cycle >= 2000) {
+ val &= 0x10;
+ mt76_wr(dev, MT_BBP(CORE, 34), val);
+ goto done;
+ }
+
+ sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+ if (!(sval & 0x80))
+ sval &= 0x7f; /* Positive */
+ else
+ sval |= 0xffffff00; /* Negative */
+
+ temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25;
+
+done:
+ rf_wr(dev, MT_RF(7, 73), rf_b7_73);
+ rf_wr(dev, MT_RF(0, 66), rf_b0_66);
+ rf_wr(dev, MT_RF(0, 73), rf_b0_67);
+}
+
+static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
+{
+ u32 val, init_vga;
+
+ init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
+ if (dev->avg_rssi > -60)
+ init_vga -= 0x20;
+ else if (dev->avg_rssi > -70)
+ init_vga -= 0x10;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 8));
+ val &= 0xFFFF80FF;
+ val |= init_vga << 8;
+ mt76_wr(dev, MT_BBP(AGC,8), val);
+}
+
+static void mt76x0_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ cal_work.work);
+
+ mt76x0_dynamic_vga_tuning(dev);
+ mt76x0_temp_sensor(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+ struct ieee80211_bss_conf *info)
+{
+ /* Start/stop collecting beacon data */
+ spin_lock_bh(&dev->con_mon_lock);
+ ether_addr_copy(dev->ap_bssid, info->bssid);
+ dev->avg_rssi = 0;
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+}
+
+static void
+mt76x0_set_rx_chains(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~(BIT(3) | BIT(4));
+
+ if (dev->chainmask & BIT(1))
+ val |= BIT(3);
+
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+
+ mb();
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+}
+
+static void
+mt76x0_set_tx_dac(struct mt76x0_dev *dev)
+{
+ if (dev->chainmask & BIT(1))
+ mt76_set(dev, MT_BBP(TXBE, 5), 3);
+ else
+ mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x0_rf_init(struct mt76x0_dev *dev)
+{
+ int i;
+ u8 val;
+
+ RF_RANDOM_WRITE(dev, mt76x0_rf_central_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i];
+
+ if (item->bw_band == RF_BW_20)
+ rf_wr(dev, item->rf_bank_reg, item->value);
+ else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+ rf_wr(dev, item->rf_bank_reg, item->value);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) {
+ rf_wr(dev,
+ mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ /*
+ Frequency calibration
+ E1: B0.R22<6:0>: xo_cxo<6:0>
+ E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
+ */
+ rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF));
+ val = rf_rr(dev, MT_RF(0, 22));
+
+ /*
+ Reset the DAC (Set B0.R73<7>=1, then set B0.R73<7>=0, and then set B0.R73<7>) during power up.
+ */
+ val = rf_rr(dev, MT_RF(0, 73));
+ val |= 0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+ val &= ~0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+ val |= 0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+
+ /*
+ vcocal_en (initiate VCO calibration (reset after completion)) - It should be at the end of RF configuration.
+ */
+ rf_set(dev, MT_RF(0, 4), 0x80);
+}
+
+static void mt76x0_ant_select(struct mt76x0_dev *dev)
+{
+ /* Single antenna mode. */
+ mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
+ mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
+ mt76_clear(dev, MT_COEXCFG0, BIT(2));
+ mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
+}
+
+void mt76x0_phy_init(struct mt76x0_dev *dev)
+{
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
+
+ mt76x0_ant_select(dev);
+
+ mt76x0_rf_init(dev);
+
+ mt76x0_set_rx_chains(dev);
+ mt76x0_set_tx_dac(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
new file mode 100644
index 000000000000..2880a43c3cb0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
@@ -0,0 +1,81 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MT76X0_PHY_H_
+#define _MT76X0_PHY_H_
+
+#define RF_G_BAND 0x0100
+#define RF_A_BAND 0x0200
+#define RF_A_BAND_LB 0x0400
+#define RF_A_BAND_MB 0x0800
+#define RF_A_BAND_HB 0x1000
+#define RF_A_BAND_11J 0x2000
+
+#define RF_BW_20 1
+#define RF_BW_40 2
+#define RF_BW_10 4
+#define RF_BW_80 8
+
+#define MT_RF(bank, reg) ((bank) << 16 | (reg))
+#define MT_RF_BANK(offset) (offset >> 16)
+#define MT_RF_REG(offset) (offset & 0xff)
+
+struct mt76x0_bbp_switch_item {
+ u16 bw_band;
+ struct mt76_reg_pair reg_pair;
+};
+
+struct mt76x0_rf_switch_item {
+ u32 rf_bank_reg;
+ u16 bw_band;
+ u8 value;
+};
+
+struct mt76x0_freq_item {
+ u8 channel;
+ u32 band;
+ u8 pllR37;
+ u8 pllR36;
+ u8 pllR35;
+ u8 pllR34;
+ u8 pllR33;
+ u8 pllR32_b7b5;
+ u8 pllR32_b4b0; /* PLL_DEN (Denomina - 8) */
+ u8 pllR31_b7b5;
+ u8 pllR31_b4b0; /* PLL_K (Nominator *)*/
+ u8 pllR30_b7; /* sdm_reset_n */
+ u8 pllR30_b6b2; /* sdmmash_prbs,sin */
+ u8 pllR30_b1; /* sdm_bp */
+ u16 pll_n; /* R30<0>, R29<7:0> (hex) */
+ u8 pllR28_b7b6; /* isi,iso */
+ u8 pllR28_b5b4; /* pfd_dly */
+ u8 pllR28_b3b2; /* clksel option */
+ u32 pll_sdm_k; /* R28<1:0>, R27<7:0>, R26<7:0> (hex) SDM_k */
+ u8 pllR24_b1b0; /* xo_div */
+};
+
+struct mt76x0_rate_pwr_item {
+ s8 mcs_power;
+ u8 rf_pa_mode;
+};
+
+struct mt76x0_rate_pwr_tab {
+ struct mt76x0_rate_pwr_item cck[4];
+ struct mt76x0_rate_pwr_item ofdm[8];
+ struct mt76x0_rate_pwr_item ht[8];
+ struct mt76x0_rate_pwr_item vht[10];
+ struct mt76x0_rate_pwr_item stbc[8];
+ struct mt76x0_rate_pwr_item mcs32;
+};
+
+#endif /* _MT76X0_PHY_H_ */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
new file mode 100644
index 000000000000..16bed4aaa242
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_COEXCFG3 0x004c
+
+#define MT_LDO_CTRL_0 0x006c
+#define MT_LDO_CTRL_1 0x0070
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_IOCFG_6 0x0124
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT(_n + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_WMM_CTRL 0x0230 /* MT76x0 */
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_USB_DMA_CFG 0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKEUP_EN BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_WL_LPK_EN BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+#if 0
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
+#endif
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+#define MT_RING_SIZE 0x10
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA 0x0430
+#define MT_TXQ_STA 0x0434
+#define MT_RF_CSR_CFG 0x0500
+#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR BIT(30)
+#define MT_RF_CSR_CFG_KICK BIT(31)
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_MISC 0x0518
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_COM_REG0 0x0730
+#define MT_COM_REG1 0x0734
+#define MT_COM_REG2 0x0738
+#define MT_COM_REG3 0x073C
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_FCE_SKIP_FS 0x0a6c
+
+#define MT_MAC_CSR0 0x1000
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
+
+#define MT_LED_CFG 0x102c
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TBTT_TIMER_CFG 0x1124
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+#define MT_TXOP_TRUN_EN GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
+#define MT_HT_FBK_CFG0 0x1354
+#define MT_HT_FBK_CFG1 0x1358
+#define MT_LG_FBK_CFG0 0x135c
+#define MT_LG_FBK_CFG1 0x1360
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_PROT_RATE GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS BIT(16)
+#define MT_PROT_CTRL_CTS2SELF BIT(17)
+#define MT_PROT_NAV_SHORT BIT(18)
+#define MT_PROT_NAV_LONG BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
+#define MT_PROT_RTS_THR_EN BIT(26)
+#define MT_PROT_RATE_CCK_11 0x0003
+#define MT_PROT_RATE_OFDM_6 0x4000
+#define MT_PROT_RATE_OFDM_24 0x4004
+#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
+ ~MT_PROT_TXOP_ALLOW_MM40 & \
+ ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN 0x13c0
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_AUTO_RSP_CFG 0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
+
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+#define MT_HT_CTRL_CFG 0x1410
+#define MT_RX_PARSER_CFG 0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+
+#define MT_RX_STA_CNT0 0x1700
+#define MT_RX_STA_CNT1 0x1704
+#define MT_RX_STA_CNT2 0x1708
+#define MT_TX_STA_CNT0 0x170c
+#define MT_TX_STA_CNT1 0x1710
+#define MT_TX_STA_CNT2 0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ * MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
+ * MT_TX_STAT_FIFO_ETXBF BIT(27)
+ * MT_TX_STAT_FIFO_SND BIT(28)
+ * MT_TX_STAT_FIFO_ITXBF BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT 0x171c
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+
+#define MT_MPDU_DENSITY_CNT 0x1740
+
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8)
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) \
+ (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx) \
+ (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx) \
+ ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) \
+ (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) \
+ (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) \
+ ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+enum mt76_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
new file mode 100644
index 000000000000..8abdd3cd546d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
new file mode 100644
index 000000000000..8a752a09f2dc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76X0U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x0.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x0
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s "
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT "%04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt76x0_submit_urb,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u),
+ TP_STRUCT__entry(
+ DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = u->pipe;
+ __entry->len = u->transfer_buffer_length;
+ ),
+ TP_printk(DEV_PR_FMT "p:%08x len:%u",
+ DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({ \
+ struct urb u; \
+ u.pipe = __pipe; \
+ u.transfer_buffer_length = __len; \
+ trace_mt76x0_submit_urb(__dev, &u); \
+})
+
+TRACE_EVENT(mt76x0_mcu_msg_send,
+ TP_PROTO(struct mt76_dev *dev,
+ struct sk_buff *skb, u32 csum, bool resp),
+ TP_ARGS(dev, skb, csum, resp),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, info)
+ __field(u32, csum)
+ __field(bool, resp)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->info = *(u32 *)skb->data;
+ __entry->csum = csum;
+ __entry->resp = resp;
+ ),
+ TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+ DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt76x0_vend_req,
+ TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t buflen, int ret),
+ TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+ __field(u16, val) __field(u16, offset) __field(void*, buf)
+ __field(int, buflen) __field(int, ret)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = pipe;
+ __entry->req = req;
+ __entry->req_type = req_type;
+ __entry->val = val;
+ __entry->offset = offset;
+ __entry->buf = buf;
+ __entry->buflen = buflen;
+ __entry->ret = ret;
+ ),
+ TP_printk(DEV_PR_FMT
+ "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+ DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+ __entry->req_type, __entry->val, __entry->offset,
+ !!__entry->buf, __entry->buflen)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, bank)
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ __entry->bank = bank;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 val),
+ TP_ARGS(dev, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->val = val;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+ )
+);
+
+TRACE_EVENT(mt76x0_rx,
+ TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f),
+ TP_ARGS(dev, rxwi, f),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76x0_rxwi, rxwi)
+ __field(u32, fce_info)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->rxwi = *rxwi;
+ __entry->fce_info = f;
+ ),
+ TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG,
+ le32_to_cpu(__entry->rxwi.rxinfo),
+ le32_to_cpu(__entry->rxwi.ctl))
+);
+
+TRACE_EVENT(mt76x0_tx,
+ TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
+ struct mt76_sta *sta, struct mt76_txwi *h),
+ TP_ARGS(dev, skb, sta, h),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76_txwi, h)
+ __field(struct sk_buff *, skb)
+ __field(struct mt76_sta *, sta)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->h = *h;
+ __entry->skb = skb;
+ __entry->sta = sta;
+ ),
+ TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
+ "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+ __entry->skb, __entry->sta,
+ le16_to_cpu(__entry->h.flags),
+ le16_to_cpu(__entry->h.rate_ctl),
+ __entry->h.ack_ctl, __entry->h.wcid,
+ le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt76x0_tx_dma_done,
+ TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb),
+ TP_ARGS(dev, skb),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(struct sk_buff *, skb)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->skb = skb;
+ ),
+ TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt76x0_tx_status_cleaned,
+ TP_PROTO(struct mt76_dev *dev, int cleaned),
+ TP_ARGS(dev, cleaned),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, cleaned)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cleaned = cleaned;
+ ),
+ TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt76x0_tx_status,
+ TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2),
+ TP_ARGS(dev, stat1, stat2),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, stat1) __field(u32, stat2)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->stat1 = stat1;
+ __entry->stat2 = stat2;
+ ),
+ TP_printk(DEV_PR_FMT "%08x %08x",
+ DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt76x0_rx_dma_aggr,
+ TP_PROTO(struct mt76_dev *dev, int cnt, bool paged),
+ TP_ARGS(dev, cnt, paged),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, cnt)
+ __field(bool, paged)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cnt = cnt;
+ __entry->paged = paged;
+ ),
+ TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+ DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, mt76x0_set_key,
+ TP_PROTO(struct mt76_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(mt76x0_set_shared_key,
+ TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key),
+ TP_ARGS(dev, vid, key),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, vid)
+ __field(u8, key)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->vid = vid;
+ __entry->key = key;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
new file mode 100644
index 000000000000..751b49c28ae5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+ int qid = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ return q2hwq(qid);
+}
+
+static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+ struct ieee80211_tx_info *info)
+{
+ int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+ skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+ if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+ mt76x0_remove_hdr_pad(skb);
+
+ skb_trim(skb, pkt_len);
+}
+
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ mt76x0_tx_skb_remove_dma_overhead(skb, info);
+
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock(&dev->mac_lock);
+ ieee80211_tx_status(dev->mt76.hw, skb);
+ spin_unlock(&dev->mac_lock);
+}
+
+static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76_txwi) + 4;
+ if (hdr_len % 4)
+ need_head += 2;
+
+ return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+ int pkt_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct mt76_txwi *txwi;
+ unsigned long flags;
+ u16 txwi_flags = 0;
+ u32 pkt_id;
+ u16 rate_ctl;
+ u8 nss;
+
+ txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ spin_lock_irqsave(&dev->mt76.lock, flags);
+ if (rate->idx < 0 || !rate->count) {
+ rate_ctl = wcid->tx_rate;
+ nss = wcid->tx_rate_nss;
+ } else {
+ rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss);
+ }
+ spin_unlock_irqrestore(&dev->mt76.lock, flags);
+
+ txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ pkt_id = 1;
+ } else {
+ pkt_id = 0;
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ pkt_id |= MT_TXWI_PKTID_PROBE;
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 7, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ ba_size = 0;
+ } else {
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU;
+ txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+ }
+
+ txwi->wcid = wcid->idx;
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(pkt_len);
+ txwi->pktid = pkt_id;
+
+ return txwi;
+}
+
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x0_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ struct mt76_sta *msta = NULL;
+ struct mt76_wcid *wcid = dev->mon_wcid;
+ struct mt76_txwi *txwi;
+ int pkt_len = skb->len;
+ int hw_q = skb2q(skb);
+
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+ if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) {
+ ieee80211_free_txskb(dev->mt76.hw, skb);
+ return;
+ }
+
+ if (sta) {
+ msta = (struct mt76_sta *) sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ wcid = &mvif->group_wcid;
+ }
+
+ txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+ if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q))
+ return;
+
+ trace_mt76x0_tx(&dev->mt76, skb, msta, txwi);
+}
+
+void mt76x0_tx_stat(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ stat_work.work);
+ struct mt76_tx_status stat;
+ unsigned long flags;
+ int cleaned = 0;
+ u8 update = 1;
+
+ while (!test_bit(MT76_REMOVED, &dev->mt76.state)) {
+ stat = mt76x0_mac_fetch_tx_status(dev);
+ if (!stat.valid)
+ break;
+
+ mt76x0_send_tx_status(dev, &stat, &update);
+
+ cleaned++;
+ }
+ trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ if (cleaned)
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+ else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(20));
+ else
+ clear_bit(MT76_READING_STATS, &dev->mt76.state);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+ u32 val;
+
+ /* TODO: should we do funny things with the parameters?
+ * See what mt76x0_set_default_edca() used to do in init.c.
+ */
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ WARN_ON(params->txop > 0xff);
+ WARN_ON(params->aifs > 0xf);
+ WARN_ON(cw_min > 0xf);
+ WARN_ON(cw_max > 0xf);
+
+ val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+ * a really long txop on AC0 (see connect.c:2009) but only on
+ * connect? When not connected should be 0.
+ */
+ if (!hw_q)
+ val |= 0x60;
+ else
+ val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
+ mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
new file mode 100644
index 000000000000..54ae1f113be2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt76x0.h"
+#include "usb.h"
+#include "trace.h"
+
+static struct usb_device_id mt76x0_device_table[] = {
+ { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */
+ { USB_DEVICE(0x13B1, 0x003E) }, /* Linksys AE6000 */
+ { USB_DEVICE(0x0E8D, 0x7610) }, /* Sabrent NTWLAC */
+ { USB_DEVICE(0x7392, 0xa711) }, /* Edimax 7711mac */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax / Elecom */
+ { USB_DEVICE(0x148f, 0x761a) }, /* TP-Link TL-WDN5200 */
+ { USB_DEVICE(0x148f, 0x760a) }, /* TP-Link unknown */
+ { USB_DEVICE(0x0b05, 0x17d1) }, /* Asus USB-AC51 */
+ { USB_DEVICE(0x0b05, 0x17db) }, /* Asus USB-AC50 */
+ { USB_DEVICE(0x0df6, 0x0075) }, /* Sitecom WLA-3100 */
+ { USB_DEVICE(0x2019, 0xab31) }, /* Planex GW-450D */
+ { USB_DEVICE(0x2001, 0x3d02) }, /* D-LINK DWA-171 rev B1 */
+ { USB_DEVICE(0x0586, 0x3425) }, /* Zyxel NWD6505 */
+ { USB_DEVICE(0x07b8, 0x7610) }, /* AboCom AU7212 */
+ { USB_DEVICE(0x04bb, 0x0951) }, /* I-O DATA WN-AC433UK */
+ { USB_DEVICE(0x057c, 0x8502) }, /* AVM FRITZ!WLAN USB Stick AC 430 */
+ { USB_DEVICE(0x293c, 0x5702) }, /* Comcast Xfinity KXW02AAA */
+ { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */
+ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
+ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */
+ { USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */
+ { 0, }
+};
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+ struct mt76x0_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+ buf->len = len;
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+ return !buf->urb || !buf->buf;
+}
+
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+ usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+ usb_free_urb(buf->urb);
+}
+
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+ struct mt76x0_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned pipe;
+ int ret;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]);
+ else
+ pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]);
+
+ usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+ complete_fn, context);
+ buf->urb->transfer_dma = buf->dma;
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ trace_mt76x0_submit_urb(&dev->mt76, buf->urb);
+ ret = usb_submit_urb(buf->urb, gfp);
+ if (ret)
+ dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+ dir, ep_idx, ret);
+ return ret;
+}
+
+void mt76x0_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
+{
+ int i, ret;
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ const unsigned int pipe = (direction == USB_DIR_IN) ?
+ usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ ret = usb_control_msg(usb_dev, pipe, req, req_type,
+ val, offset, buf, buflen,
+ MT_VEND_REQ_TOUT_MS);
+ trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset,
+ buf, buflen, ret);
+
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+
+ msleep(5);
+ }
+
+ dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+
+ return ret;
+}
+
+void mt76x0_vendor_reset(struct mt76x0_dev *dev)
+{
+ mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset)
+{
+ struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+ int ret;
+ u32 val = ~0;
+
+ WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+ 0, offset, mdev->data, MT_VEND_BUF);
+ if (ret == MT_VEND_BUF)
+ val = get_unaligned_le32(mdev->data);
+ else if (ret > 0)
+ dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+ ret, offset);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+
+ trace_mt76x0_reg_read(dev, offset, val);
+ return val;
+}
+
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ struct mt76x0_dev *mdev = dev;
+ int ret;
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+ val & 0xffff, offset, NULL, 0);
+ if (!ret)
+ ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+ val >> 16, offset + 2, NULL, 0);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+ int ret;
+
+ WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ put_unaligned_le32(val, mdev->data);
+ ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT,
+ 0, offset, mdev->data, MT_VEND_BUF);
+ trace_mt76x0_reg_write(dev, offset, val);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+}
+
+static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt76x0_rr(dev, offset) & ~mask;
+ mt76x0_wr(dev, offset, val);
+ return val;
+}
+
+static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+ WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+ mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4);
+}
+
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
+{
+ mt76_wr(dev, offset, get_unaligned_le32(addr));
+ mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt76x0_assign_pipes(struct usb_interface *usb_intf,
+ struct mt76x0_dev *dev)
+{
+ struct usb_endpoint_descriptor *ep_desc;
+ struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+ unsigned i, ep_i = 0, ep_o = 0;
+
+ BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX);
+ BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX);
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ ep_i++ < __MT_EP_IN_MAX) {
+ dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc);
+ dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+ /* Note: this is ignored by usb sub-system but vendor
+ * code does it. We can drop this at some point.
+ */
+ dev->in_ep[ep_i - 1] |= USB_DIR_IN;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ ep_o++ < __MT_EP_OUT_MAX) {
+ dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc);
+ dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+ }
+ }
+
+ if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+ dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n",
+ ep_i, ep_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt76x0_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+ struct mt76x0_dev *dev;
+ u32 asic_rev, mac_rev;
+ int ret;
+ static const struct mt76_bus_ops usb_ops = {
+ .rr = mt76x0_rr,
+ .wr = mt76x0_wr,
+ .rmw = mt76x0_rmw,
+ .copy = mt76x0_wr_copy,
+ };
+
+ dev = mt76x0_alloc_device(&usb_intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ usb_dev = usb_get_dev(usb_dev);
+ usb_reset_device(usb_dev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ dev->mt76.bus = &usb_ops;
+
+ ret = mt76x0_assign_pipes(usb_intf, dev);
+ if (ret)
+ goto err;
+
+ /* Disable the HW, otherwise MCU fail to initalize on hot reboot */
+ mt76x0_chip_onoff(dev, false, false);
+
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+ mac_rev = mt76_rr(dev, MT_MAC_CSR0);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n",
+ asic_rev, mac_rev);
+
+ /* Note: vendor driver skips this check for MT76X0U */
+ if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+ dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
+
+ ret = mt76x0_init_hardware(dev);
+ if (ret)
+ goto err;
+
+ ret = mt76x0_register_device(dev);
+ if (ret)
+ goto err_hw;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ return 0;
+err_hw:
+ mt76x0_cleanup(dev);
+err:
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->mt76.hw);
+ return ret;
+}
+
+static void mt76x0_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ if (!initalized)
+ return;
+
+ ieee80211_unregister_hw(dev->mt76.hw);
+ mt76x0_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->mt76.hw);
+}
+
+static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+
+ mt76x0_cleanup(dev);
+
+ return 0;
+}
+
+static int mt76x0_resume(struct usb_interface *usb_intf)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ int ret;
+
+ ret = mt76x0_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
+MODULE_FIRMWARE(MT7610_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt76x0_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x0_device_table,
+ .probe = mt76x0_probe,
+ .disconnect = mt76x0_disconnect,
+ .suspend = mt76x0_suspend,
+ .resume = mt76x0_resume,
+ .reset_resume = mt76x0_resume,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x0_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
new file mode 100644
index 000000000000..492e431390a8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_USB_H
+#define __MT76X0U_USB_H
+
+#include "mt76x0.h"
+
+#define MT7610_FIRMWARE "mediatek/mt7610u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+#define MT_VEND_DEV_MODE_RESET 1
+
+#define MT_VEND_BUF sizeof(__le32)
+
+static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0)
+{
+ return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev));
+}
+
+static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76)
+{
+ return interface_to_usbdev(to_usb_interface(mt76->dev));
+}
+
+static inline bool mt76x0_urb_has_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ENOENT &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN;
+}
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+ struct mt76x0_dma_buf *buf);
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf);
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+ struct mt76x0_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+void mt76x0_complete_urb(struct urb *urb);
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen);
+void mt76x0_vendor_reset(struct mt76x0_dev *dev);
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
new file mode 100644
index 000000000000..7856dd760419
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+void mt76x0_remove_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ memmove(skb->data + 2, skb->data, len);
+ skb_pull(skb, 2);
+}
+
+int mt76x0_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+ int ret;
+
+ if (len % 4 == 0)
+ return 0;
+
+ ret = skb_cow(skb, 2);
+ if (ret)
+ return ret;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
index dc12bbdbb2ee..dca3209bf5f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -27,11 +27,15 @@
#include <linux/mutex.h>
#include <linux/bitops.h>
#include <linux/kfifo.h>
+#include <linux/average.h>
#define MT7662_FIRMWARE "mt7662.bin"
#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
#define MT7662_EEPROM_SIZE 512
+#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
+#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
+
#define MT76x2_RX_RING_SIZE 256
#define MT_RX_HEADROOM 32
@@ -47,11 +51,14 @@
#include "mt76x2_mac.h"
#include "mt76x2_dfs.h"
+DECLARE_EWMA(signal, 10, 8)
+
struct mt76x2_mcu {
struct mutex mutex;
wait_queue_head_t wait;
struct sk_buff_head res_q;
+ struct mt76u_buf res_u;
u32 msg_seq;
};
@@ -69,9 +76,8 @@ struct mt76x2_calibration {
u8 agc_gain_init[MT_MAX_CHAINS];
u8 agc_gain_cur[MT_MAX_CHAINS];
- int avg_rssi[MT_MAX_CHAINS];
- int avg_rssi_all;
-
+ u16 false_cca;
+ s8 avg_rssi_all;
s8 agc_gain_adjust;
s8 low_gain;
@@ -120,10 +126,13 @@ struct mt76x2_dev {
u8 beacon_mask;
u8 beacon_data_mask;
- u32 rxfilter;
+ u8 tbtt_count;
+ u16 beacon_int;
u16 chainmask;
+ u32 rxfilter;
+
struct mt76x2_calibration cal;
s8 target_power;
@@ -149,8 +158,28 @@ struct mt76x2_sta {
struct mt76x2_vif *vif;
struct mt76x2_tx_status status;
int n_frames;
+
+ struct ewma_signal rssi;
+ int inactive_count;
};
+static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ switch (mt76_rr(dev, MT_MAC_CSR0)) {
+ case 0:
+ case ~0:
+ break;
+ default:
+ return true;
+ }
+ usleep_range(5000, 10000);
+ }
+ return false;
+}
+
static inline bool is_mt7612(struct mt76x2_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612;
@@ -158,6 +187,14 @@ static inline bool is_mt7612(struct mt76x2_dev *dev)
void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
+static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+ return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
{
mt76x2_set_irq_mask(dev, 0, mask);
@@ -168,11 +205,29 @@ static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
mt76x2_set_irq_mask(dev, mask, 0);
}
+static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev)
+{
+ return mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 100);
+}
+
+static inline bool wait_for_wpdma(struct mt76x2_dev *dev)
+{
+ return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000);
+}
+
extern const struct ieee80211_ops mt76x2_ops;
+extern struct ieee80211_rate mt76x2_rates[12];
+
struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x2_dev *dev);
void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+void mt76x2_init_device(struct mt76x2_dev *dev);
irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
void mt76x2_phy_power_on(struct mt76x2_dev *dev);
@@ -186,7 +241,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev);
int mt76x2_phy_start(struct mt76x2_dev *dev);
int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
struct cfg80211_chan_def *chandef);
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
void mt76x2_phy_calibrate(struct work_struct *work);
void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
@@ -214,6 +269,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
u32 *tx_info);
void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val);
void mt76x2_pre_tbtt_tasklet(unsigned long arg);
@@ -230,4 +286,45 @@ s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
+int mt76x2_insert_hdr_pad(struct sk_buff *skb);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat);
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat, u8 *update);
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x2_dev *dev);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq);
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+ enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+ enum nl80211_band band, u8 bw);
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl);
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper);
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev);
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
new file mode 100644
index 000000000000..a2338ba139b4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return;
+
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+ if (txq->sta) {
+ struct mt76x2_sta *sta;
+
+ sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+ mtxq->wcid = &sta->wcid;
+ } else {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+ mtxq->wcid = &mvif->group_wcid;
+ }
+
+ mt76_txq_init(&dev->mt76, txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_txq_init);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
+
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+ int i;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76x2_mac_wcid_set_drop(dev, idx, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76x2_txq_init(dev, sta->txq[i]);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+ ewma_signal_init(&msta->rssi);
+
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_add);
+
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76_txq_remove(&dev->mt76, sta->txq[i]);
+ mt76x2_mac_wcid_set_drop(dev, idx, true);
+ mt76_wcid_free(dev->wcid_mask, idx);
+ mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
+
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mt76_txq_remove(&dev->mt76, vif->txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
+
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x2_sta *msta;
+ struct mt76_wcid *wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+ wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ wcid->sw_iv = true;
+ }
+ } else {
+ if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ wcid->sw_iv = true;
+ }
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+EXPORT_SYMBOL_GPL(mt76x2_set_key);
+
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, qid;
+ u32 val;
+
+ qid = dev->mt76.q_tx[queue].hw_idx;
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+ FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
+
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
+
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+ struct ieee80211_tx_rate rate = {};
+
+ if (!rates)
+ return;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+ msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ void *rxwi = skb->data;
+
+ if (q == MT_RXQ_MCU) {
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
+ return;
+ }
+
+ skb_pull(skb, sizeof(struct mt76x2_rxwi));
+ if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(&dev->mt76, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
index 955ea3e692dd..77b5ff1be05f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -91,12 +91,20 @@ mt76x2_dfs_stat_read(struct seq_file *file, void *data)
struct mt76x2_dev *dev = file->private;
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ seq_printf(file, "allocated sequences:\t%d\n",
+ dfs_pd->seq_stats.seq_pool_len);
+ seq_printf(file, "used sequences:\t\t%d\n",
+ dfs_pd->seq_stats.seq_len);
+ seq_puts(file, "\n");
+
for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
seq_printf(file, "engine: %d\n", i);
seq_printf(file, " hw pattern detected:\t%d\n",
dfs_pd->stats[i].hw_pattern);
seq_printf(file, " hw pulse discarded:\t%d\n",
dfs_pd->stats[i].hw_pulse_discarded);
+ seq_printf(file, " sw pattern detected:\t%d\n",
+ dfs_pd->stats[i].sw_pattern);
}
return 0;
@@ -115,6 +123,18 @@ static const struct file_operations fops_dfs_stat = {
.release = single_release,
};
+static int read_agc(struct seq_file *file, void *data)
+{
+ struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+
+ seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all);
+ seq_printf(file, "low_gain: %d\n", dev->cal.low_gain);
+ seq_printf(file, "false_cca: %d\n", dev->cal.false_cca);
+ seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust);
+
+ return 0;
+}
+
void mt76x2_init_debugfs(struct mt76x2_dev *dev)
{
struct dentry *dir;
@@ -130,4 +150,7 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev)
debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
read_txpower);
+
+ debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
}
+EXPORT_SYMBOL_GPL(mt76x2_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
index f936dc9a5476..374cc655c11d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
@@ -159,6 +159,81 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
mt76_wr(dev, MT_BBP(DFS, 36), data);
}
+static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_sequence *seq)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ list_add(&seq->head, &dfs_pd->seq_pool);
+
+ dfs_pd->seq_stats.seq_pool_len++;
+ dfs_pd->seq_stats.seq_len--;
+}
+
+static
+struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sequence *seq;
+
+ if (list_empty(&dfs_pd->seq_pool)) {
+ seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
+ } else {
+ seq = list_first_entry(&dfs_pd->seq_pool,
+ struct mt76x2_dfs_sequence,
+ head);
+ list_del(&seq->head);
+ dfs_pd->seq_stats.seq_pool_len--;
+ }
+ if (seq)
+ dfs_pd->seq_stats.seq_len++;
+
+ return seq;
+}
+
+static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
+{
+ int remainder, factor;
+
+ if (!frac)
+ return 0;
+
+ if (abs(val - frac) <= margin)
+ return 1;
+
+ factor = val / frac;
+ remainder = val % frac;
+
+ if (remainder > margin) {
+ if ((frac - remainder) <= margin)
+ factor++;
+ else
+ factor = 0;
+ }
+ return factor;
+}
+
+static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ int i;
+
+ /* reset hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+ /* reset sw detector */
+ for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+ dfs_pd->event_rb[i].h_rb = 0;
+ dfs_pd->event_rb[i].t_rb = 0;
+ }
+
+ list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+ list_del_init(&seq->head);
+ mt76x2_dfs_seq_pool_put(dev, seq);
+ }
+}
+
static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
{
bool ret = false;
@@ -295,6 +370,256 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
return ret;
}
+static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ u32 data;
+
+ /* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2)
+ * 2nd: DFS_R37[21:0]: pulse time
+ * 3rd: DFS_R37[11:0]: pulse width
+ * 3rd: DFS_R37[25:16]: phase
+ * 4th: DFS_R37[12:0]: current pwr
+ * 4th: DFS_R37[21:16]: pwr stable counter
+ *
+ * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected
+ */
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ if (!MT_DFS_CHECK_EVENT(data))
+ return false;
+
+ event->engine = MT_DFS_EVENT_ENGINE(data);
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ event->ts = MT_DFS_EVENT_TIMESTAMP(data);
+ data = mt76_rr(dev, MT_BBP(DFS, 37));
+ event->width = MT_DFS_EVENT_WIDTH(data);
+
+ return true;
+}
+
+static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ if (event->engine == 2) {
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
+ u16 last_event_idx;
+ u32 delta_ts;
+
+ last_event_idx = mt76_decr(event_buff->t_rb,
+ MT_DFS_EVENT_BUFLEN);
+ delta_ts = event->ts - event_buff->data[last_event_idx].ts;
+ if (delta_ts < MT_DFS_EVENT_TIME_MARGIN &&
+ event_buff->data[last_event_idx].width >= 200)
+ return false;
+ }
+ return true;
+}
+
+static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event_rb *event_buff;
+
+ /* add radar event to ring buffer */
+ event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
+ : &dfs_pd->event_rb[0];
+ event_buff->data[event_buff->t_rb] = *event;
+ event_buff->data[event_buff->t_rb].fetch_ts = jiffies;
+
+ event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN);
+ if (event_buff->t_rb == event_buff->h_rb)
+ event_buff->h_rb = mt76_incr(event_buff->h_rb,
+ MT_DFS_EVENT_BUFLEN);
+}
+
+static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event,
+ u16 cur_len)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sw_detector_params *sw_params;
+ u32 width_delta, with_sum, factor, cur_pri;
+ struct mt76x2_dfs_sequence seq, *seq_p;
+ struct mt76x2_dfs_event_rb *event_rb;
+ struct mt76x2_dfs_event *cur_event;
+ int i, j, end, pri;
+
+ event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
+ : &dfs_pd->event_rb[0];
+
+ i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN);
+ end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN);
+
+ while (i != end) {
+ cur_event = &event_rb->data[i];
+ with_sum = event->width + cur_event->width;
+
+ sw_params = &dfs_pd->sw_dpd_params;
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ case NL80211_DFS_JP:
+ if (with_sum < 600)
+ width_delta = 8;
+ else
+ width_delta = with_sum >> 3;
+ break;
+ case NL80211_DFS_ETSI:
+ if (event->engine == 2)
+ width_delta = with_sum >> 6;
+ else if (with_sum < 620)
+ width_delta = 24;
+ else
+ width_delta = 8;
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return -EINVAL;
+ }
+
+ pri = event->ts - cur_event->ts;
+ if (abs(event->width - cur_event->width) > width_delta ||
+ pri < sw_params->min_pri)
+ goto next;
+
+ if (pri > sw_params->max_pri)
+ break;
+
+ seq.pri = event->ts - cur_event->ts;
+ seq.first_ts = cur_event->ts;
+ seq.last_ts = event->ts;
+ seq.engine = event->engine;
+ seq.count = 2;
+
+ j = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+ while (j != end) {
+ cur_event = &event_rb->data[j];
+ cur_pri = event->ts - cur_event->ts;
+ factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri,
+ sw_params->pri_margin);
+ if (factor > 0) {
+ seq.first_ts = cur_event->ts;
+ seq.count++;
+ }
+
+ j = mt76_decr(j, MT_DFS_EVENT_BUFLEN);
+ }
+ if (seq.count <= cur_len)
+ goto next;
+
+ seq_p = mt76x2_dfs_seq_pool_get(dev);
+ if (!seq_p)
+ return -ENOMEM;
+
+ *seq_p = seq;
+ INIT_LIST_HEAD(&seq_p->head);
+ list_add(&seq_p->head, &dfs_pd->sequences);
+next:
+ i = mt76_decr(i, MT_DFS_EVENT_BUFLEN);
+ }
+ return 0;
+}
+
+static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_event *event)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sw_detector_params *sw_params;
+ struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ u16 max_seq_len = 0;
+ u32 factor, pri;
+
+ sw_params = &dfs_pd->sw_dpd_params;
+ list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
+ if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
+ list_del_init(&seq->head);
+ mt76x2_dfs_seq_pool_put(dev, seq);
+ continue;
+ }
+
+ if (event->engine != seq->engine)
+ continue;
+
+ pri = event->ts - seq->last_ts;
+ factor = mt76x2_dfs_get_multiple(pri, seq->pri,
+ sw_params->pri_margin);
+ if (factor > 0) {
+ seq->last_ts = event->ts;
+ seq->count++;
+ max_seq_len = max_t(u16, max_seq_len, seq->count);
+ }
+ }
+ return max_seq_len;
+}
+
+static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_sequence *seq;
+
+ if (list_empty(&dfs_pd->sequences))
+ return false;
+
+ list_for_each_entry(seq, &dfs_pd->sequences, head) {
+ if (seq->count > MT_DFS_SEQUENCE_TH) {
+ dfs_pd->stats[seq->engine].sw_pattern++;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void mt76x2_dfs_add_events(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event event;
+ int i, seq_len;
+
+ /* disable debug mode */
+ mt76x2_dfs_set_capture_mode_ctrl(dev, false);
+ for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
+ if (!mt76x2_dfs_fetch_event(dev, &event))
+ break;
+
+ if (dfs_pd->last_event_ts > event.ts)
+ mt76x2_dfs_detector_reset(dev);
+ dfs_pd->last_event_ts = event.ts;
+
+ if (!mt76x2_dfs_check_event(dev, &event))
+ continue;
+
+ seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event);
+ mt76x2_dfs_create_sequence(dev, &event, seq_len);
+
+ mt76x2_dfs_queue_event(dev, &event);
+ }
+ mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+}
+
+static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x2_dfs_event_rb *event_buff;
+ struct mt76x2_dfs_event *event;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
+ event_buff = &dfs_pd->event_rb[i];
+
+ while (event_buff->h_rb != event_buff->t_rb) {
+ event = &event_buff->data[event_buff->h_rb];
+
+ /* sorted list */
+ if (time_is_after_jiffies(event->fetch_ts +
+ MT_DFS_EVENT_WINDOW))
+ break;
+ event_buff->h_rb = mt76_incr(event_buff->h_rb,
+ MT_DFS_EVENT_BUFLEN);
+ }
+ }
+}
+
static void mt76x2_dfs_tasklet(unsigned long arg)
{
struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
@@ -305,6 +630,24 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
if (test_bit(MT76_SCANNING, &dev->mt76.state))
goto out;
+ if (time_is_before_jiffies(dfs_pd->last_sw_check +
+ MT_DFS_SW_TIMEOUT)) {
+ bool radar_detected;
+
+ dfs_pd->last_sw_check = jiffies;
+
+ mt76x2_dfs_add_events(dev);
+ radar_detected = mt76x2_dfs_check_detection(dev);
+ if (radar_detected) {
+ /* sw detector rx radar pattern */
+ ieee80211_radar_detected(dev->mt76.hw);
+ mt76x2_dfs_detector_reset(dev);
+
+ return;
+ }
+ mt76x2_dfs_check_event_window(dev);
+ }
+
engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
if (!(engine_mask & 0xf))
goto out;
@@ -326,9 +669,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
/* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++;
ieee80211_radar_detected(dev->mt76.hw);
-
- /* reset hw detector */
- mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+ mt76x2_dfs_detector_reset(dev);
return;
}
@@ -340,6 +681,32 @@ out:
mt76x2_irq_enable(dev, MT_INT_GPTIMER);
}
+static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+ break;
+ case NL80211_DFS_ETSI:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2;
+ break;
+ case NL80211_DFS_JP:
+ dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI;
+ dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI;
+ dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN;
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ break;
+ }
+}
+
static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
{
u32 data;
@@ -462,6 +829,7 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->dfs_pd.region != NL80211_DFS_UNSET) {
+ mt76x2_dfs_init_sw_detector(dev);
mt76x2_dfs_set_bbp_params(dev);
/* enable debug mode */
mt76x2_dfs_set_capture_mode_ctrl(dev, true);
@@ -486,7 +854,10 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
{
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ INIT_LIST_HEAD(&dfs_pd->sequences);
+ INIT_LIST_HEAD(&dfs_pd->seq_pool);
dfs_pd->region = NL80211_DFS_UNSET;
+ dfs_pd->last_sw_check = jiffies;
tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
(unsigned long)dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
index 8dbc783cc6bc..693f421bf096 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
@@ -33,6 +33,22 @@
#define MT_DFS_PKT_END_MASK 0
#define MT_DFS_CH_EN 0xf
+/* sw detector params */
+#define MT_DFS_EVENT_LOOP 64
+#define MT_DFS_SW_TIMEOUT (HZ / 20)
+#define MT_DFS_EVENT_WINDOW (HZ / 5)
+#define MT_DFS_SEQUENCE_WINDOW (200 * (1 << 20))
+#define MT_DFS_EVENT_TIME_MARGIN 2000
+#define MT_DFS_PRI_MARGIN 4
+#define MT_DFS_SEQUENCE_TH 6
+
+#define MT_DFS_FCC_MAX_PRI ((28570 << 1) + 1000)
+#define MT_DFS_FCC_MIN_PRI (3000 - 2)
+#define MT_DFS_JP_MAX_PRI ((80000 << 1) + 1000)
+#define MT_DFS_JP_MIN_PRI (28500 - 2)
+#define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000)
+#define MT_DFS_ETSI_MIN_PRI (4500 - 20)
+
struct mt76x2_radar_specs {
u8 mode;
u16 avg_len;
@@ -50,6 +66,32 @@ struct mt76x2_radar_specs {
u16 pwr_jmp;
};
+#define MT_DFS_CHECK_EVENT(x) ((x) != GENMASK(31, 0))
+#define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0)
+#define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0))
+#define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0))
+struct mt76x2_dfs_event {
+ unsigned long fetch_ts;
+ u32 ts;
+ u16 width;
+ u8 engine;
+};
+
+#define MT_DFS_EVENT_BUFLEN 256
+struct mt76x2_dfs_event_rb {
+ struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN];
+ int h_rb, t_rb;
+};
+
+struct mt76x2_dfs_sequence {
+ struct list_head head;
+ u32 first_ts;
+ u32 last_ts;
+ u32 pri;
+ u16 count;
+ u8 engine;
+};
+
struct mt76x2_dfs_hw_pulse {
u8 engine;
u32 period;
@@ -58,9 +100,21 @@ struct mt76x2_dfs_hw_pulse {
u32 burst;
};
+struct mt76x2_dfs_sw_detector_params {
+ u32 min_pri;
+ u32 max_pri;
+ u32 pri_margin;
+};
+
struct mt76x2_dfs_engine_stats {
u32 hw_pattern;
u32 hw_pulse_discarded;
+ u32 sw_pattern;
+};
+
+struct mt76x2_dfs_seq_stats {
+ u32 seq_pool_len;
+ u32 seq_len;
};
struct mt76x2_dfs_pattern_detector {
@@ -69,6 +123,16 @@ struct mt76x2_dfs_pattern_detector {
u8 chirp_pulse_cnt;
u32 chirp_pulse_ts;
+ struct mt76x2_dfs_sw_detector_params sw_dpd_params;
+ struct mt76x2_dfs_event_rb event_rb[2];
+
+ struct list_head sequences;
+ struct list_head seq_pool;
+ struct mt76x2_dfs_seq_stats seq_stats;
+
+ unsigned long last_sw_check;
+ u32 last_event_ts;
+
struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
struct tasklet_struct dfs_tasklet;
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
index fd1ec4743e0b..6720a6a1313f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -66,27 +66,6 @@ mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
return 0;
}
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- void *rxwi = skb->data;
-
- if (q == MT_RXQ_MCU) {
- skb_queue_tail(&dev->mcu.res_q, skb);
- wake_up(&dev->mcu.wait);
- return;
- }
-
- skb_pull(skb, sizeof(struct mt76x2_rxwi));
- if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
- dev_kfree_skb(skb);
- return;
- }
-
- mt76_rx(&dev->mt76, q, skb);
-}
-
static int
mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
index e9d426bbf91a..da294558c268 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -19,34 +19,6 @@
#include "dma.h"
-#define MT_TXD_INFO_LEN GENMASK(15, 0)
-#define MT_TXD_INFO_NEXT_VLD BIT(16)
-#define MT_TXD_INFO_TX_BURST BIT(17)
-#define MT_TXD_INFO_80211 BIT(19)
-#define MT_TXD_INFO_TSO BIT(20)
-#define MT_TXD_INFO_CSO BIT(21)
-#define MT_TXD_INFO_WIV BIT(24)
-#define MT_TXD_INFO_QSEL GENMASK(26, 25)
-#define MT_TXD_INFO_DPORT GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE GENMASK(31, 30)
-
-#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
-#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
-#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
-#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
-#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
-#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
-#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
-#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
-
-/* MCU request message header */
-#define MT_MCU_MSG_LEN GENMASK(15, 0)
-#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
-#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
-#define MT_MCU_MSG_PORT GENMASK(29, 27)
-#define MT_MCU_MSG_TYPE GENMASK(31, 30)
-#define MT_MCU_MSG_TYPE_CMD BIT(30)
-
enum mt76x2_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
@@ -54,14 +26,4 @@ enum mt76x2_qsel {
MT_QSEL_EDCA_2,
};
-enum dma_msg_port {
- WLAN_PORT,
- CPU_RX_PORT,
- CPU_TX_PORT,
- HOST_PORT,
- VIRTUAL_CPU_RX_PORT,
- VIRTUAL_CPU_TX_PORT,
- DISCARD,
-};
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
index 95d5f7d888f0..1753bcb36356 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -40,8 +40,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
return 0;
}
-static void
-mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
{
u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
@@ -58,6 +57,7 @@ mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
break;
}
}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap);
static int
mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
@@ -415,6 +415,7 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
}
+EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
static s8
mt76x2_rate_power_val(u8 val)
@@ -482,6 +483,7 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
val >>= 8;
t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
}
+EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
{
@@ -493,6 +495,7 @@ int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
return ret;
}
+EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power);
static void
mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
@@ -600,6 +603,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
t->delta_bw40 = mt76x2_rate_power_val(bw40);
t->delta_bw80 = mt76x2_rate_power_val(bw80);
}
+EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
{
@@ -632,6 +636,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
{
@@ -642,6 +647,7 @@ bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
else
return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
}
+EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled);
int mt76x2_eeprom_init(struct mt76x2_dev *dev)
{
@@ -658,3 +664,6 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
index aa0b0c040375..0f3e4d2f4fee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -155,6 +155,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev);
static inline bool
mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
index 79ab93613e06..b814391f79ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -19,39 +19,6 @@
#include "mt76x2_eeprom.h"
#include "mt76x2_mcu.h"
-struct mt76x2_reg_pair {
- u32 reg;
- u32 value;
-};
-
-static bool
-mt76x2_wait_for_mac(struct mt76x2_dev *dev)
-{
- int i;
-
- for (i = 0; i < 500; i++) {
- switch (mt76_rr(dev, MT_MAC_CSR0)) {
- case 0:
- case ~0:
- break;
- default:
- return true;
- }
- usleep_range(5000, 10000);
- }
-
- return false;
-}
-
-static bool
-wait_for_wpdma(struct mt76x2_dev *dev)
-{
- return mt76_poll(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
- MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
- 0, 1000);
-}
-
static void
mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
{
@@ -71,107 +38,6 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
}
static void
-mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
- const struct mt76x2_reg_pair *data, int len)
-{
- while (len > 0) {
- mt76_wr(dev, data->reg, data->value);
- len--;
- data++;
- }
-}
-
-static void
-mt76_write_mac_initvals(struct mt76x2_dev *dev)
-{
-#define DEFAULT_PROT_CFG \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
- MT_PROT_CFG_RTS_THRESH)
-
-#define DEFAULT_PROT_CFG_20 \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
- FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
-
-#define DEFAULT_PROT_CFG_40 \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
- FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
-
- static const struct mt76x2_reg_pair vals[] = {
- /* Copied from MediaTek reference source */
- { MT_PBF_SYS_CTRL, 0x00080c00 },
- { MT_PBF_CFG, 0x1efebcff },
- { MT_FCE_PSE_CTRL, 0x00000001 },
- { MT_MAC_SYS_CTRL, 0x0000000c },
- { MT_MAX_LEN_CFG, 0x003e3f00 },
- { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
- { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
- { MT_XIFS_TIME_CFG, 0x33a40d0a },
- { MT_BKOFF_SLOT_CFG, 0x00000209 },
- { MT_TBTT_SYNC_CFG, 0x00422010 },
- { MT_PWR_PIN_CFG, 0x00000000 },
- { 0x1238, 0x001700c8 },
- { MT_TX_SW_CFG0, 0x00101001 },
- { MT_TX_SW_CFG1, 0x00010000 },
- { MT_TX_SW_CFG2, 0x00000000 },
- { MT_TXOP_CTRL_CFG, 0x0400583f },
- { MT_TX_RTS_CFG, 0x00100020 },
- { MT_TX_TIMEOUT_CFG, 0x000a2290 },
- { MT_TX_RETRY_CFG, 0x47f01f0f },
- { MT_EXP_ACK_TIME, 0x002c00dc },
- { MT_TX_PROT_CFG6, 0xe3f42004 },
- { MT_TX_PROT_CFG7, 0xe3f42084 },
- { MT_TX_PROT_CFG8, 0xe3f42104 },
- { MT_PIFS_TX_CFG, 0x00060fff },
- { MT_RX_FILTR_CFG, 0x00015f97 },
- { MT_LEGACY_BASIC_RATE, 0x0000017f },
- { MT_HT_BASIC_RATE, 0x00004003 },
- { MT_PN_PAD_MODE, 0x00000003 },
- { MT_TXOP_HLDR_ET, 0x00000002 },
- { 0xa44, 0x00000000 },
- { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
- { MT_TSO_CTRL, 0x00000000 },
- { MT_AUX_CLK_CFG, 0x00000000 },
- { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
- { MT_TX_ALC_CFG_4, 0x00000000 },
- { MT_TX_ALC_VGA3, 0x00000000 },
- { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_8, 0x0000003a },
- { MT_TX_PWR_CFG_9, 0x0000003a },
- { MT_EFUSE_CTRL, 0x0000d000 },
- { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
- { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
- { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
- { MT_TX_SW_CFG3, 0x00000004 },
- { MT_HT_FBK_TO_LEGACY, 0x00001818 },
- { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
- { MT_PROT_AUTO_TX_CFG, 0x00830083 },
- { MT_HT_CTRL_CFG, 0x000001ff },
- };
- struct mt76x2_reg_pair prot_vals[] = {
- { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG },
- { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG },
- { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
- { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
- { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
- { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
- };
-
- mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
- mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
-}
-
-static void
mt76x2_fixup_xtal(struct mt76x2_dev *dev)
{
u16 eep_val;
@@ -360,41 +226,6 @@ int mt76x2_mac_start(struct mt76x2_dev *dev)
return 0;
}
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
-{
- bool stopped = false;
- u32 rts_cfg;
- int i;
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
-
- rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
-
- /* Wait for MAC to become idle */
- for (i = 0; i < 300; i++) {
- if ((mt76_rr(dev, MT_MAC_STATUS) &
- (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
- mt76_rr(dev, MT_BBP(IBI, 12))) {
- udelay(1);
- continue;
- }
-
- stopped = true;
- break;
- }
-
- if (force && !stopped) {
- mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
-
- mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
- }
-
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
-}
-
void mt76x2_mac_resume(struct mt76x2_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
@@ -498,45 +329,6 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
MT_TX_TIMEOUT_CFG_ACKTO, ackto);
}
-static void
-mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
-{
- u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
- if (enable)
- val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
- MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
- else
- val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
- MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
-
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-}
-
-static void
-mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
-{
- u32 val;
-
- val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
- val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
-
- if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
- val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-
- val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
- }
-
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-
- mt76x2_set_wlan_state(dev, enable);
-}
-
int mt76x2_init_hardware(struct mt76x2_dev *dev)
{
static const u16 beacon_offsets[16] = {
@@ -567,11 +359,6 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
(unsigned long) dev);
- dev->chainmask = 0x202;
- dev->global_wcid.idx = 255;
- dev->global_wcid.hw_key_idx = -1;
- dev->slottime = 9;
-
val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
MT_WPDMA_GLO_CFG_BIG_ENDIAN |
@@ -663,34 +450,6 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy,
mt76x2_dfs_set_domain(dev, request->dfs_region);
}
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
-}
-
-static struct ieee80211_rate mt76x2_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(0, 60),
- OFDM_RATE(1, 90),
- OFDM_RATE(2, 120),
- OFDM_RATE(3, 180),
- OFDM_RATE(4, 240),
- OFDM_RATE(5, 360),
- OFDM_RATE(6, 480),
- OFDM_RATE(7, 540),
-};
-
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@@ -767,37 +526,6 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
mt76x2_led_set_config(mt76, 0xff, 0);
}
-static void
-mt76x2_init_txpower(struct mt76x2_dev *dev,
- struct ieee80211_supported_band *sband)
-{
- struct ieee80211_channel *chan;
- struct mt76x2_tx_power_info txp;
- struct mt76_rate_power t = {};
- int target_power;
- int i;
-
- for (i = 0; i < sband->n_channels; i++) {
- chan = &sband->channels[i];
-
- mt76x2_get_power_info(dev, &txp, chan);
-
- target_power = max_t(int, (txp.chain[0].target_power +
- txp.chain[0].delta),
- (txp.chain[1].target_power +
- txp.chain[1].delta));
-
- mt76x2_get_rate_power(dev, &t, chan);
-
- chan->max_power = mt76x2_get_max_rate_power(&t) +
- target_power;
- chan->max_power /= 2;
-
- /* convert to combined output power on 2x2 devices */
- chan->max_power += 3;
- }
-}
-
int mt76x2_register_device(struct mt76x2_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -812,20 +540,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
return -ENOMEM;
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+ mt76x2_init_device(dev);
ret = mt76x2_init_hardware(dev);
if (ret)
return ret;
- hw->queues = 4;
- hw->max_rates = 1;
- hw->max_report_rates = 7;
- hw->max_rate_tries = 1;
- hw->extra_tx_headroom = 2;
-
- hw->sta_data_size = sizeof(struct mt76x2_sta);
- hw->vif_data_size = sizeof(struct mt76x2_vif);
-
for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
u8 *addr = dev->macaddr_list[i].addr;
@@ -845,16 +568,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
wiphy->reg_notifier = mt76x2_regd_notifier;
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
-
- ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
- INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
- INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
- dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
mt76x2_dfs_init_detector(dev);
@@ -862,9 +584,6 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
- /* init antenna configuration */
- dev->mt76.antenna_mask = 3;
-
ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
ARRAY_SIZE(mt76x2_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
new file mode 100644
index 000000000000..324b2a4b8b67
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+struct ieee80211_rate mt76x2_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+EXPORT_SYMBOL_GPL(mt76x2_rates);
+
+struct mt76x2_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+}
+
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x2_set_wlan_state(dev, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+ const struct mt76x2_reg_pair *data, int len)
+{
+ while (len > 0) {
+ mt76_wr(dev, data->reg, data->value);
+ len--;
+ data++;
+ }
+}
+
+void mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG_CCK \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_OFDM \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+ static const struct mt76x2_reg_pair vals[] = {
+ /* Copied from MediaTek reference source */
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x1efebcff },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_MAC_SYS_CTRL, 0x0000000c },
+ { MT_MAX_LEN_CFG, 0x003e3f00 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
+ { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
+ { MT_XIFS_TIME_CFG, 0x33a40d0a },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TBTT_SYNC_CFG, 0x00422010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+ { 0x1238, 0x001700c8 },
+ { MT_TX_SW_CFG0, 0x00101001 },
+ { MT_TX_SW_CFG1, 0x00010000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_TXOP_CTRL_CFG, 0x0400583f },
+ { MT_TX_RTS_CFG, 0x00100020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2290 },
+ { MT_TX_RETRY_CFG, 0x47f01f0f },
+ { MT_EXP_ACK_TIME, 0x002c00dc },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_PIFS_TX_CFG, 0x00060fff },
+ { MT_RX_FILTR_CFG, 0x00015f97 },
+ { MT_LEGACY_BASIC_RATE, 0x0000017f },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_AUX_CLK_CFG, 0x00000000 },
+ { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
+ { MT_TX_ALC_CFG_4, 0x00000000 },
+ { MT_TX_ALC_VGA3, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_8, 0x0000003a },
+ { MT_TX_PWR_CFG_9, 0x0000003a },
+ { MT_EFUSE_CTRL, 0x0000d000 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
+ { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
+ { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
+ { MT_TX_SW_CFG3, 0x00000004 },
+ { MT_HT_FBK_TO_LEGACY, 0x00001818 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+ { MT_PROT_AUTO_TX_CFG, 0x00830083 },
+ { MT_HT_CTRL_CFG, 0x000001ff },
+ };
+ struct mt76x2_reg_pair prot_vals[] = {
+ { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK },
+ { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM },
+ { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ };
+
+ mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+ mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
+
+void mt76x2_init_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ hw->queues = 4;
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+ hw->extra_tx_headroom = 2;
+
+ hw->sta_data_size = sizeof(struct mt76x2_sta);
+ hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ dev->chainmask = 0x202;
+ dev->global_wcid.idx = 255;
+ dev->global_wcid.hw_key_idx = -1;
+ dev->slottime = 9;
+
+ /* init antenna configuration */
+ dev->mt76.antenna_mask = 3;
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_device);
+
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76_rate_power t = {};
+ int target_power;
+ int i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ target_power = max_t(int, (txp.chain[0].target_power +
+ txp.chain[0].delta),
+ (txp.chain[1].target_power +
+ txp.chain[1].delta));
+
+ mt76x2_get_rate_power(dev, &t, chan);
+
+ chan->max_power = mt76x2_get_max_rate_power(&t) +
+ target_power;
+ chan->max_power /= 2;
+
+ /* convert to combined output power on 2x2 devices */
+ chan->max_power += 3;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
index b49aea4da2d6..23cf437d14f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -28,500 +28,12 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
get_unaligned_le16(addr + 4));
}
-static int
-mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (idx >= 8)
- idx = 0;
-
- if (status->band == NL80211_BAND_2GHZ)
- idx += 4;
-
- status->rate_idx = idx;
- return 0;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8) {
- idx -= 8;
- status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
- }
-
- if (idx >= 4)
- idx = 0;
-
- status->rate_idx = idx;
- return 0;
- case MT_PHY_TYPE_HT_GF:
- status->enc_flags |= RX_ENC_FLAG_HT_GF;
- /* fall through */
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- status->rate_idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- status->encoding = RX_ENC_VHT;
- status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
- status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
- break;
- default:
- return -EINVAL;
- }
-
- if (rate & MT_RXWI_RATE_LDPC)
- status->enc_flags |= RX_ENC_FLAG_LDPC;
-
- if (rate & MT_RXWI_RATE_SGI)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
- if (rate & MT_RXWI_RATE_STBC)
- status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- status->bw = RATE_INFO_BW_40;
- break;
- case MT_PHY_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static __le16
-mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val)
-{
- u16 rateval;
- u8 phy, rate_idx;
- u8 nss = 1;
- u8 bw = 0;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 4);
- phy = MT_PHY_TYPE_VHT;
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- bw = 2;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 3);
- phy = MT_PHY_TYPE_HT;
- if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
- phy = MT_PHY_TYPE_HT_GF;
- if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else {
- const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
- u16 val;
-
- r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
- if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- val = r->hw_value_short;
- else
- val = r->hw_value;
-
- phy = val >> 8;
- rate_idx = val & 0xff;
- bw = 0;
- }
-
- rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
- rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
- rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rateval |= MT_RXWI_RATE_SGI;
-
- *nss_val = nss;
- return cpu_to_le16(rateval);
-}
-
-void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
-{
- u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
- u32 bit = MT_WCID_DROP_MASK(idx);
-
- /* prevent unnecessary writes */
- if ((val & bit) != (bit * drop))
- mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
-}
-
-void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate)
-{
- spin_lock_bh(&dev->mt76.lock);
- wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
- wcid->tx_rate_set = true;
- spin_unlock_bh(&dev->mt76.lock);
-}
-
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rate = &info->control.rates[0];
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
- u16 txwi_flags = 0;
- u8 nss;
- s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8];
-
- memset(txwi, 0, sizeof(*txwi));
-
- if (wcid)
- txwi->wcid = wcid->idx;
- else
- txwi->wcid = 0xff;
-
- txwi->pktid = 1;
-
- if (wcid && wcid->sw_iv && key) {
- u64 pn = atomic64_inc_return(&key->tx_pn);
- ccmp_pn[0] = pn;
- ccmp_pn[1] = pn >> 8;
- ccmp_pn[2] = 0;
- ccmp_pn[3] = 0x20 | (key->keyidx << 6);
- ccmp_pn[4] = pn >> 16;
- ccmp_pn[5] = pn >> 24;
- ccmp_pn[6] = pn >> 32;
- ccmp_pn[7] = pn >> 40;
- txwi->iv = *((__le32 *)&ccmp_pn[0]);
- txwi->eiv = *((__le32 *)&ccmp_pn[1]);
- }
-
- spin_lock_bh(&dev->mt76.lock);
- if (wcid && (rate->idx < 0 || !rate->count)) {
- txwi->rate = wcid->tx_rate;
- max_txpwr_adj = wcid->max_txpwr_adj;
- nss = wcid->tx_rate_nss;
- } else {
- txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
- max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
- }
- spin_unlock_bh(&dev->mt76.lock);
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
- max_txpwr_adj);
- txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
-
- if (mt76xx_rev(dev) >= MT76XX_REV_E4)
- txwi->txstream = 0x13;
- else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
- !(txwi->rate & cpu_to_le16(rate_ht_mask)))
- txwi->txstream = 0x93;
-
- if (info->flags & IEEE80211_TX_CTL_LDPC)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
- if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- txwi_flags |= MT_TXWI_FLAGS_MMPS;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- txwi->pktid |= MT_TXWI_PKTID_PROBE;
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
- u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
- ba_size <<= sta->ht_cap.ampdu_factor;
- ba_size = min_t(int, 63, ba_size - 1);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- ba_size = 0;
- txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
-
- txwi_flags |= MT_TXWI_FLAGS_AMPDU |
- FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density);
- }
-
- if (ieee80211_is_probe_resp(hdr->frame_control) ||
- ieee80211_is_beacon(hdr->frame_control))
- txwi_flags |= MT_TXWI_FLAGS_TS;
-
- txwi->flags |= cpu_to_le16(txwi_flags);
- txwi->len_ctl = cpu_to_le16(skb->len);
-}
-
-static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
-{
- int hdrlen;
-
- if (!len)
- return;
-
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- memmove(skb->data + len, skb->data, hdrlen);
- skb_pull(skb, len);
-}
-
-static struct mt76_wcid *
-mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast)
-{
- struct mt76x2_sta *sta;
- struct mt76_wcid *wcid;
-
- if (idx >= ARRAY_SIZE(dev->wcid))
- return NULL;
-
- wcid = rcu_dereference(dev->wcid[idx]);
- if (unicast || !wcid)
- return wcid;
-
- sta = container_of(wcid, struct mt76x2_sta, wcid);
- return &sta->vif->group_wcid;
-}
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *rxi)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
- struct mt76x2_rxwi *rxwi = rxi;
- u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
- u32 ctl = le32_to_cpu(rxwi->ctl);
- u16 rate = le16_to_cpu(rxwi->rate);
- u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
- bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0;
- u8 pn_len;
- u8 wcid;
- int len;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return -EINVAL;
-
- if (rxinfo & MT_RXINFO_L2PAD)
- pad_len += 2;
-
- if (rxinfo & MT_RXINFO_DECRYPT) {
- status->flag |= RX_FLAG_DECRYPTED;
- status->flag |= RX_FLAG_MMIC_STRIPPED;
- status->flag |= RX_FLAG_MIC_STRIPPED;
- status->flag |= RX_FLAG_IV_STRIPPED;
- }
-
- wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
- status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast);
-
- len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
- pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
- if (pn_len) {
- int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
- u8 *data = skb->data + offset;
-
- status->iv[0] = data[7];
- status->iv[1] = data[6];
- status->iv[2] = data[5];
- status->iv[3] = data[4];
- status->iv[4] = data[1];
- status->iv[5] = data[0];
-
- /*
- * Driver CCMP validation can't deal with fragments.
- * Let mac80211 take care of it.
- */
- if (rxinfo & MT_RXINFO_FRAG) {
- status->flag &= ~RX_FLAG_IV_STRIPPED;
- } else {
- pad_len += pn_len << 2;
- len -= pn_len << 2;
- }
- }
-
- mt76x2_remove_hdr_pad(skb, pad_len);
-
- if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
- status->aggr = true;
-
- if (WARN_ON_ONCE(len > skb->len))
- return -EINVAL;
-
- pskb_trim(skb, len);
- status->chains = BIT(0) | BIT(1);
- status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
- status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
- status->signal = max(status->chain_signal[0], status->chain_signal[1]);
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
-
- status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
- status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
-
- return mt76x2_mac_process_rate(status, rate);
-}
-
-static int
-mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
- enum nl80211_band band)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- txrate->idx = 0;
- txrate->flags = 0;
- txrate->count = 1;
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (band == NL80211_BAND_2GHZ)
- idx += 4;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8)
- idx -= 8;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_HT_GF:
- txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
- case MT_PHY_TYPE_HT:
- txrate->flags |= IEEE80211_TX_RC_MCS;
- txrate->idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
- txrate->idx = idx;
- break;
- default:
- return -EINVAL;
- }
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- break;
- case MT_PHY_BW_80:
- txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
- break;
- default:
- return -EINVAL;
- }
-
- if (rate & MT_RXWI_RATE_SGI)
- txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
-
- return 0;
-}
-
-static void
-mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
- struct ieee80211_tx_info *info,
- struct mt76x2_tx_status *st, int n_frames)
-{
- struct ieee80211_tx_rate *rate = info->status.rates;
- int cur_idx, last_rate;
- int i;
-
- if (!n_frames)
- return;
-
- last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
- mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
- dev->mt76.chandef.chan->band);
- if (last_rate < IEEE80211_TX_MAX_RATES - 1)
- rate[last_rate + 1].idx = -1;
-
- cur_idx = rate[last_rate].idx + st->retry;
- for (i = 0; i <= last_rate; i++) {
- rate[i].flags = rate[last_rate].flags;
- rate[i].idx = max_t(int, 0, cur_idx - i);
- rate[i].count = 1;
- }
-
- if (last_rate > 0)
- rate[last_rate - 1].count = st->retry + 1 - last_rate;
-
- info->status.ampdu_len = n_frames;
- info->status.ampdu_ack_len = st->success ? n_frames : 0;
-
- if (st->pktid & MT_TXWI_PKTID_PROBE)
- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
- if (st->aggr)
- info->flags |= IEEE80211_TX_CTL_AMPDU |
- IEEE80211_TX_STAT_AMPDU;
-
- if (!st->ack_req)
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
- else if (st->success)
- info->flags |= IEEE80211_TX_STAT_ACK;
-}
-
-static void
-mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
- u8 *update)
-{
- struct ieee80211_tx_info info = {};
- struct ieee80211_sta *sta = NULL;
- struct mt76_wcid *wcid = NULL;
- struct mt76x2_sta *msta = NULL;
-
- rcu_read_lock();
- if (stat->wcid < ARRAY_SIZE(dev->wcid))
- wcid = rcu_dereference(dev->wcid[stat->wcid]);
-
- if (wcid) {
- void *priv;
-
- priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
- sta = container_of(priv, struct ieee80211_sta,
- drv_priv);
- }
-
- if (msta && stat->aggr) {
- u32 stat_val, stat_cache;
-
- stat_val = stat->rate;
- stat_val |= ((u32) stat->retry) << 16;
- stat_cache = msta->status.rate;
- stat_cache |= ((u32) msta->status.retry) << 16;
-
- if (*update == 0 && stat_val == stat_cache &&
- stat->wcid == msta->status.wcid && msta->n_frames < 32) {
- msta->n_frames++;
- goto out;
- }
-
- mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
- msta->n_frames);
-
- msta->status = *stat;
- msta->n_frames = 1;
- *update = 0;
- } else {
- mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
- *update = 1;
- }
-
- ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
-
-out:
- rcu_read_unlock();
-}
-
void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
{
struct mt76x2_tx_status stat = {};
unsigned long flags;
u8 update = 1;
+ bool ret;
if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
return;
@@ -529,26 +41,13 @@ void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
trace_mac_txstat_poll(dev);
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
- u32 stat1, stat2;
-
spin_lock_irqsave(&dev->irq_lock, flags);
- stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
- stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
- if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
- spin_unlock_irqrestore(&dev->irq_lock, flags);
- break;
- }
-
+ ret = mt76x2_mac_load_tx_status(dev, &stat);
spin_unlock_irqrestore(&dev->irq_lock, flags);
- stat.valid = 1;
- stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
- stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
- stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
- stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
- stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
- stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
- stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+ if (!ret)
+ break;
+
trace_mac_txstat_fetch(dev, &stat);
if (!irq) {
@@ -597,104 +96,6 @@ void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
dev_kfree_skb_any(e->skb);
}
-static enum mt76x2_cipher_type
-mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
-{
- memset(key_data, 0, 32);
- if (!key)
- return MT_CIPHER_NONE;
-
- if (key->keylen > 32)
- return MT_CIPHER_NONE;
-
- memcpy(key_data, key->key, key->keylen);
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
- default:
- return MT_CIPHER_NONE;
- }
-}
-
-void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
-{
- struct mt76_wcid_addr addr = {};
- u32 attr;
-
- attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
- FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
-
- mt76_wr(dev, MT_WCID_ATTR(idx), attr);
-
- mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
- mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
-
- if (idx >= 128)
- return;
-
- if (mac)
- memcpy(addr.macaddr, mac, ETH_ALEN);
-
- mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
-}
-
-int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76x2_cipher_type cipher;
- u8 key_data[32];
- u8 iv_data[8];
-
- cipher = mt76x2_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EOPNOTSUPP;
-
- mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
- mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
-
- memset(iv_data, 0, sizeof(iv_data));
- if (key) {
- mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
- !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
- iv_data[3] = key->keyidx << 6;
- if (cipher >= MT_CIPHER_TKIP)
- iv_data[3] |= 0x20;
- }
-
- mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
-
- return 0;
-}
-
-int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76x2_cipher_type cipher;
- u8 key_data[32];
- u32 val;
-
- cipher = mt76x2_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EOPNOTSUPP;
-
- val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
- val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
- val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
- mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
-
- mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
- sizeof(key_data));
-
- return 0;
-}
-
static int
mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
{
@@ -704,7 +105,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
return -ENOSPC;
- mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+ mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
@@ -839,3 +240,33 @@ void mt76x2_mac_work(struct work_struct *work)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
}
+
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
+{
+ u32 data = 0;
+
+ if (val != ~0)
+ data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
+ MT_PROT_CFG_RTS_THRESH;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
+
+ mt76_rmw(dev, MT_CCK_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_OFDM_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG6,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG7,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG8,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
index c048cd06df6b..5af0107ba748 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -166,7 +166,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
void *rxi);
void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+ struct ieee80211_sta *sta, int len);
void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
new file mode 100644
index 000000000000..6542644bc325
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+ bool stopped = false;
+ u32 rts_cfg;
+ int i;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 300; i++) {
+ if ((mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+ mt76_rr(dev, MT_BBP(IBI, 12))) {
+ udelay(1);
+ continue;
+ }
+
+ stopped = true;
+ break;
+ }
+
+ if (force && !stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat)
+{
+ u32 stat1, stat2;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ if (!stat->valid)
+ return false;
+
+ stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status);
+
+static int
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ return 0;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+ struct ieee80211_tx_info *info,
+ struct mt76x2_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + last_rate;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+ rate[last_rate].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat, u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76x2_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+
+ priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76x2_send_tx_status);
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+ sizeof(key_data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup);
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP)
+ iv_data[3] |= 0x20;
+ }
+
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key);
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ spin_lock_bh(&dev->mt76.lock);
+ wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_bh(&dev->mt76.lock);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate);
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u16 txwi_flags = 0;
+ u8 nss;
+ s8 txpwr_adj, max_txpwr_adj;
+ u8 ccmp_pn[8];
+
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (wcid)
+ txwi->wcid = wcid->idx;
+ else
+ txwi->wcid = 0xff;
+
+ txwi->pktid = 1;
+
+ if (wcid && wcid->sw_iv && key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+ ccmp_pn[0] = pn;
+ ccmp_pn[1] = pn >> 8;
+ ccmp_pn[2] = 0;
+ ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+ ccmp_pn[4] = pn >> 16;
+ ccmp_pn[5] = pn >> 24;
+ ccmp_pn[6] = pn >> 32;
+ ccmp_pn[7] = pn >> 40;
+ txwi->iv = *((__le32 *)&ccmp_pn[0]);
+ txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+ }
+
+ spin_lock_bh(&dev->mt76.lock);
+ if (wcid && (rate->idx < 0 || !rate->count)) {
+ txwi->rate = wcid->tx_rate;
+ max_txpwr_adj = wcid->max_txpwr_adj;
+ nss = wcid->tx_rate_nss;
+ } else {
+ txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+ max_txpwr_adj);
+ txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+ txwi->txstream = 0x13;
+ else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+ !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+ txwi->txstream = 0x93;
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->pktid |= MT_TXWI_PKTID_PROBE;
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control))
+ txwi_flags |= MT_TXWI_FLAGS_TS;
+
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(len);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 bit = MT_WCID_DROP_MASK(idx);
+
+ /* prevent unnecessary writes */
+ if ((val & bit) != (bit * drop))
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop);
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ struct mt76_wcid_addr addr = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+ if (idx >= 128)
+ return;
+
+ if (mac)
+ memcpy(addr.macaddr, mac, ETH_ALEN);
+
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup);
+
+static int
+mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+ int hdrlen;
+
+ if (!len)
+ return;
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + len, skb->data, hdrlen);
+ skb_pull(skb, len);
+}
+
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+ struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+ rssi += cal->rssi_offset[chain];
+ rssi -= cal->lna_gain;
+
+ return rssi;
+}
+
+static struct mt76x2_sta *
+mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
+{
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->wcid[idx]);
+ if (!wcid)
+ return NULL;
+
+ return container_of(wcid, struct mt76x2_sta, wcid);
+}
+
+static struct mt76_wcid *
+mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta,
+ bool unicast)
+{
+ if (!sta)
+ return NULL;
+
+ if (unicast)
+ return &sta->wcid;
+ else
+ return &sta->vif->group_wcid;
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *rxi)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76x2_rxwi *rxwi = rxi;
+ struct mt76x2_sta *sta;
+ u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+ bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+ int pad_len = 0;
+ u8 pn_len;
+ u8 wcid;
+ int len;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return -EINVAL;
+
+ if (rxinfo & MT_RXINFO_L2PAD)
+ pad_len += 2;
+
+ if (rxinfo & MT_RXINFO_DECRYPT) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+ status->flag |= RX_FLAG_MIC_STRIPPED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+ sta = mt76x2_rx_get_sta(dev, wcid);
+ status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+ if (pn_len) {
+ int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+ u8 *data = skb->data + offset;
+
+ status->iv[0] = data[7];
+ status->iv[1] = data[6];
+ status->iv[2] = data[5];
+ status->iv[3] = data[4];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ /*
+ * Driver CCMP validation can't deal with fragments.
+ * Let mac80211 take care of it.
+ */
+ if (rxinfo & MT_RXINFO_FRAG) {
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+ } else {
+ pad_len += pn_len << 2;
+ len -= pn_len << 2;
+ }
+ }
+
+ mt76x2_remove_hdr_pad(skb, pad_len);
+
+ if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
+ status->aggr = true;
+
+ if (WARN_ON_ONCE(len > skb->len))
+ return -EINVAL;
+
+ pskb_trim(skb, len);
+ status->chains = BIT(0) | BIT(1);
+ status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
+ status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
+ status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+ status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+ if (sta) {
+ ewma_signal_add(&sta->rssi, status->signal);
+ sta->inactive_count = 0;
+ }
+
+ return mt76x2_mac_process_rate(status, rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
index ce90ff999b49..680a89f8aa87 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -53,30 +53,6 @@ mt76x2_stop(struct ieee80211_hw *hw)
mutex_unlock(&dev->mutex);
}
-static void
-mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
-{
- struct mt76_txq *mtxq;
-
- if (!txq)
- return;
-
- mtxq = (struct mt76_txq *) txq->drv_priv;
- if (txq->sta) {
- struct mt76x2_sta *sta;
-
- sta = (struct mt76x2_sta *) txq->sta->drv_priv;
- mtxq->wcid = &sta->wcid;
- } else {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
- mtxq->wcid = &mvif->group_wcid;
- }
-
- mt76_txq_init(&dev->mt76, txq);
-}
-
static int
mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -111,14 +87,6 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return 0;
}
-static void
-mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct mt76x2_dev *dev = hw->priv;
-
- mt76_txq_remove(&dev->mt76, vif->txq);
-}
-
static int
mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
{
@@ -194,39 +162,6 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
static void
-mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast)
-{
- struct mt76x2_dev *dev = hw->priv;
- u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- dev->rxfilter &= ~(_hw); \
- dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
- } while (0)
-
- mutex_lock(&dev->mutex);
-
- dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
-
- MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
- MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
- MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
- MT_RX_FILTR_CFG_CTS |
- MT_RX_FILTR_CFG_CFEND |
- MT_RX_FILTR_CFG_CFACK |
- MT_RX_FILTR_CFG_BA |
- MT_RX_FILTR_CFG_CTRL_RSV);
- MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
-
- *total_flags = flags;
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
- mutex_unlock(&dev->mutex);
-}
-
-static void
mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
@@ -238,10 +173,13 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_BSSID)
mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
- if (changed & BSS_CHANGED_BEACON_INT)
+ if (changed & BSS_CHANGED_BEACON_INT) {
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
+ dev->beacon_int = info->beacon_int;
+ dev->tbtt_count = 0;
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED) {
tasklet_disable(&dev->pre_tbtt_tasklet);
@@ -260,66 +198,6 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_unlock(&dev->mutex);
}
-static int
-mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- int ret = 0;
- int idx = 0;
- int i;
-
- mutex_lock(&dev->mutex);
-
- idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
- }
-
- msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.hw_key_idx = -1;
- mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
- mt76x2_mac_wcid_set_drop(dev, idx, false);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76x2_txq_init(dev, sta->txq[i]);
-
- if (vif->type == NL80211_IFTYPE_AP)
- set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
-
- rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
-
-out:
- mutex_unlock(&dev->mutex);
-
- return ret;
-}
-
-static int
-mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- int idx = msta->wcid.idx;
- int i;
-
- mutex_lock(&dev->mutex);
- rcu_assign_pointer(dev->wcid[idx], NULL);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(&dev->mt76, sta->txq[i]);
- mt76x2_mac_wcid_set_drop(dev, idx, true);
- mt76_wcid_free(dev->wcid_mask, idx);
- mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
- mutex_unlock(&dev->mutex);
-
- return 0;
-}
-
void
mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
@@ -331,117 +209,6 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
mt76x2_mac_wcid_set_drop(dev, idx, ps);
}
-static int
-mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- struct mt76x2_sta *msta;
- struct mt76_wcid *wcid;
- int idx = key->keyidx;
- int ret;
-
- /* fall back to sw encryption for unsupported ciphers */
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- case WLAN_CIPHER_SUITE_CCMP:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- /*
- * The hardware does not support per-STA RX GTK, fall back
- * to software mode for these.
- */
- if ((vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) &&
- (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
- key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
-
- msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
- wcid = msta ? &msta->wcid : &mvif->group_wcid;
-
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
- wcid->sw_iv = true;
- }
- } else {
- if (idx == wcid->hw_key_idx) {
- wcid->hw_key_idx = -1;
- wcid->sw_iv = true;
- }
-
- key = NULL;
- }
- mt76_wcid_key_setup(&dev->mt76, wcid, key);
-
- if (!msta) {
- if (key || wcid->hw_key_idx == idx) {
- ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
- if (ret)
- return ret;
- }
-
- return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
- }
-
- return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
-}
-
-static int
-mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct mt76x2_dev *dev = hw->priv;
- u8 cw_min = 5, cw_max = 10, qid;
- u32 val;
-
- qid = dev->mt76.q_tx[queue].hw_idx;
-
- if (params->cw_min)
- cw_min = fls(params->cw_min);
- if (params->cw_max)
- cw_max = fls(params->cw_max);
-
- val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
- FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
- FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
- FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
- mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
-
- val = mt76_rr(dev, MT_WMM_TXOP(qid));
- val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
- val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
- mt76_wr(dev, MT_WMM_TXOP(qid), val);
-
- val = mt76_rr(dev, MT_WMM_AIFSN);
- val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
- val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_AIFSN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMIN);
- val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
- val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMIN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMAX);
- val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
- val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMAX, val);
-
- return 0;
-}
-
static void
mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
@@ -480,75 +247,6 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
return 0;
}
-static int
-mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
-{
- enum ieee80211_ampdu_mlme_action action = params->action;
- struct ieee80211_sta *sta = params->sta;
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_txq *txq = sta->txq[params->tid];
- u16 tid = params->tid;
- u16 *ssn = &params->ssn;
- struct mt76_txq *mtxq;
-
- if (!txq)
- return -EINVAL;
-
- mtxq = (struct mt76_txq *)txq->drv_priv;
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
- mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
- BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = *ssn << 4;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- }
-
- return 0;
-}
-
-static void
-mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
- struct ieee80211_tx_rate rate = {};
-
- if (!rates)
- return;
-
- rate.idx = rates->rate[0].idx;
- rate.flags = rates->rate[0].flags;
- mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
- msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
-}
-
static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class)
{
@@ -600,6 +298,21 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
return 0;
}
+static int
+mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ if (val != ~0 && val > 0xffff)
+ return -EINVAL;
+
+ mutex_lock(&dev->mutex);
+ mt76x2_mac_set_tx_protection(dev, val);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x2_tx,
.start = mt76x2_start,
@@ -626,5 +339,6 @@ const struct ieee80211_ops mt76x2_ops = {
.set_tim = mt76x2_set_tim,
.set_antenna = mt76x2_set_antenna,
.get_antenna = mt76x2_get_antenna,
+ .set_rts_threshold = mt76x2_set_rts_threshold,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
index dfd36d736b06..743da57760dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -23,23 +23,6 @@
#include "mt76x2_dma.h"
#include "mt76x2_eeprom.h"
-struct mt76x2_fw_header {
- __le32 ilm_len;
- __le32 dlm_len;
- __le16 build_ver;
- __le16 fw_ver;
- u8 pad[4];
- char build_time[16];
-};
-
-struct mt76x2_patch_header {
- char build_time[16];
- char platform[4];
- char hw_version[4];
- char patch_version[4];
- u8 pad[2];
-};
-
static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
index d7a7e83262ce..e40293f21417 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -146,6 +146,23 @@ struct mt76x2_tssi_comp {
u8 offset1;
} __packed __aligned(4);
+struct mt76x2_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76x2_patch_header {
+ char build_time[16];
+ char platform[4];
+ char hw_version[4];
+ char patch_version[4];
+ u8 pad[2];
+};
+
int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 param);
int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
index c1c38ca3330a..84c96c0415b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -19,172 +19,6 @@
#include "mt76x2_mcu.h"
#include "mt76x2_eeprom.h"
-static void
-mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
- s8 gain;
-
- gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
- gain -= offset / 2;
- mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
-}
-
-static void
-mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
- s8 gain;
-
- gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
- gain += offset;
- mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
-}
-
-static void
-mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
-{
- s8 *gain_adj = dev->cal.rx.high_gain;
-
- mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
- mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
-
- mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
- mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
-}
-
-static u32
-mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
-{
- u32 val = 0;
-
- val |= (v1 & (BIT(6) - 1)) << 0;
- val |= (v2 & (BIT(6) - 1)) << 8;
- val |= (v3 & (BIT(6) - 1)) << 16;
- val |= (v4 & (BIT(6) - 1)) << 24;
- return val;
-}
-
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
-{
- struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
-
- rssi += cal->rssi_offset[chain];
- rssi -= cal->lna_gain;
-
- return rssi;
-}
-
-static void
-mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- r->all[i] += offset;
-}
-
-static void
-mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- if (r->all[i] > limit)
- r->all[i] = limit;
-}
-
-static int
-mt76x2_get_min_rate_power(struct mt76_rate_power *r)
-{
- int i;
- s8 ret = 0;
-
- for (i = 0; i < sizeof(r->all); i++) {
- if (!r->all[i])
- continue;
-
- if (ret)
- ret = min(ret, r->all[i]);
- else
- ret = r->all[i];
- }
-
- return ret;
-}
-
-
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
-{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
- struct mt76x2_tx_power_info txp;
- int txp_0, txp_1, delta = 0;
- struct mt76_rate_power t = {};
- int base_power, gain;
-
- mt76x2_get_power_info(dev, &txp, chan);
-
- if (width == NL80211_CHAN_WIDTH_40)
- delta = txp.delta_bw40;
- else if (width == NL80211_CHAN_WIDTH_80)
- delta = txp.delta_bw80;
-
- mt76x2_get_rate_power(dev, &t, chan);
- mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
- mt76x2_limit_rate_power(&t, dev->txpower_conf);
- dev->txpower_cur = mt76x2_get_max_rate_power(&t);
-
- base_power = mt76x2_get_min_rate_power(&t);
- delta += base_power - txp.chain[0].target_power;
- txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
- txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
-
- gain = min(txp_0, txp_1);
- if (gain < 0) {
- base_power -= gain;
- txp_0 -= gain;
- txp_1 -= gain;
- } else if (gain > 0x2f) {
- base_power -= gain - 0x2f;
- txp_0 = 0x2f;
- txp_1 = 0x2f;
- }
-
- mt76x2_add_rate_power_offset(&t, -base_power);
- dev->target_power = txp.chain[0].target_power;
- dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
- dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
- dev->rate_power = t;
-
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
-
- mt76_wr(dev, MT_TX_PWR_CFG_0,
- mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_1,
- mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_2,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
- mt76_wr(dev, MT_TX_PWR_CFG_3,
- mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_4,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
- mt76_wr(dev, MT_TX_PWR_CFG_7,
- mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
- mt76_wr(dev, MT_TX_PWR_CFG_8,
- mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
- mt76_wr(dev, MT_TX_PWR_CFG_9,
- mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
-}
-
-static bool
-mt76x2_channel_silent(struct mt76x2_dev *dev)
-{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-
- return ((chan->flags & IEEE80211_CHAN_RADAR) &&
- chan->dfs_state != NL80211_DFS_AVAILABLE);
-}
-
static bool
mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
{
@@ -243,140 +77,6 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
dev->cal.channel_cal_done = true;
}
-static void
-mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
-{
- u32 pa_mode[2];
- u32 pa_mode_adj;
-
- if (band == NL80211_BAND_2GHZ) {
- pa_mode[0] = 0x010055ff;
- pa_mode[1] = 0x00550055;
-
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
- } else {
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
- }
- } else {
- pa_mode[0] = 0x0000ffff;
- pa_mode[1] = 0x00ff00ff;
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
- } else {
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
- }
-
- if (mt76x2_ext_pa_enabled(dev, band))
- pa_mode_adj = 0x04000000;
- else
- pa_mode_adj = 0;
-
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
- }
-
- mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
- mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
- mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
- mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- u32 val;
-
- if (band == NL80211_BAND_2GHZ)
- val = 0x3c3c023c;
- else
- val = 0x363c023c;
-
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
- } else {
- if (band == NL80211_BAND_2GHZ) {
- u32 val = 0x0f3c3c3c;
-
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
- } else {
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
- }
- }
-}
-
-static void
-mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
-{
- u32 cfg0, cfg1;
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- cfg0 = bw ? 0x000b0c01 : 0x00101101;
- cfg1 = 0x00011414;
- } else {
- cfg0 = bw ? 0x000b0b01 : 0x00101001;
- cfg1 = 0x00021414;
- }
- mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
- mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
-
- mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
-}
-
-static void
-mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
-{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-
-static void
-mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
-{
- switch (band) {
- case NL80211_BAND_2GHZ:
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- case NL80211_BAND_5GHZ:
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- }
-
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-
void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
{
u32 val;
@@ -485,12 +185,14 @@ static void
mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
{
u32 false_cca;
- u8 limit = dev->cal.low_gain > 1 ? 4 : 16;
+ u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+ dev->cal.false_cca = false_cca;
if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
dev->cal.agc_gain_adjust += 2;
- else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0)
+ else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
+ (dev->cal.agc_gain_adjust >= limit && false_cca < 500))
dev->cal.agc_gain_adjust -= 2;
else
return;
@@ -501,57 +203,65 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
static void
mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
{
- u32 val = mt76_rr(dev, MT_BBP(AGC, 20));
- int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val);
- int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val);
u8 *gain = dev->cal.agc_gain_init;
- u8 gain_delta;
+ u8 low_gain_delta, gain_delta;
+ bool gain_change;
int low_gain;
+ u32 val;
- dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 +
- (rssi0 << 8) / 16;
- dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 +
- (rssi1 << 8) / 16;
- dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] +
- dev->cal.avg_rssi[1]) / 512;
+ dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
- if (dev->cal.low_gain == low_gain) {
+ gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ dev->cal.low_gain = low_gain;
+
+ if (!gain_change) {
mt76x2_phy_adjust_vga_gain(dev);
return;
}
- dev->cal.low_gain = low_gain;
-
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
- else
+ val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
+ if (low_gain == 2)
+ val |= 0x3;
+ else
+ val |= 0x5;
+ mt76_wr(dev, MT_BBP(AGC, 26), val);
+ } else {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+ }
- if (low_gain) {
- mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+ if (mt76x2_has_ext_lna(dev))
+ low_gain_delta = 10;
+ else
+ low_gain_delta = 14;
+
+ if (low_gain == 2) {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
- if (mt76x2_has_ext_lna(dev))
- gain_delta = 10;
- else
- gain_delta = 14;
+ gain_delta = low_gain_delta;
+ dev->cal.agc_gain_adjust = 0;
} else {
- mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
else
mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
gain_delta = 0;
+ dev->cal.agc_gain_adjust = low_gain_delta;
}
dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
- dev->cal.agc_gain_adjust = 0;
mt76x2_phy_set_gain_val(dev);
+
+ /* clear false CCA counters */
+ mt76_rr(dev, MT_RX_STAT_1);
}
int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
new file mode 100644
index 000000000000..9fd6ab4cbb94
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain -= offset / 2;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain += offset;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+ s8 *gain_adj = dev->cal.rx.high_gain;
+
+ mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+ mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+ mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+ mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+ enum nl80211_band band)
+{
+ u32 pa_mode[2];
+ u32 pa_mode_adj;
+
+ if (band == NL80211_BAND_2GHZ) {
+ pa_mode[0] = 0x010055ff;
+ pa_mode[1] = 0x00550055;
+
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+ } else {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+ }
+ } else {
+ pa_mode[0] = 0x0000ffff;
+ pa_mode[1] = 0x00ff00ff;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+ } else {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+ }
+
+ if (mt76x2_ext_pa_enabled(dev, band))
+ pa_mode_adj = 0x04000000;
+ else
+ pa_mode_adj = 0;
+
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+ }
+
+ mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ u32 val;
+
+ if (band == NL80211_BAND_2GHZ)
+ val = 0x3c3c023c;
+ else
+ val = 0x363c023c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+ } else {
+ if (band == NL80211_BAND_2GHZ) {
+ u32 val = 0x0f3c3c3c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ if (r->all[i] > limit)
+ r->all[i] = limit;
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ u32 val = 0;
+
+ val |= (v1 & (BIT(6) - 1)) << 0;
+ val |= (v2 & (BIT(6) - 1)) << 8;
+ val |= (v3 & (BIT(6) - 1)) << 16;
+ val |= (v4 & (BIT(6) - 1)) << 24;
+ return val;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ r->all[i] += offset;
+}
+
+static int
+mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+{
+ int i;
+ s8 ret = 0;
+
+ for (i = 0; i < sizeof(r->all); i++) {
+ if (!r->all[i])
+ continue;
+
+ if (ret)
+ ret = min(ret, r->all[i]);
+ else
+ ret = r->all[i];
+ }
+
+ return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+ enum nl80211_chan_width width = dev->mt76.chandef.width;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ int txp_0, txp_1, delta = 0;
+ struct mt76_rate_power t = {};
+ int base_power, gain;
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (width == NL80211_CHAN_WIDTH_40)
+ delta = txp.delta_bw40;
+ else if (width == NL80211_CHAN_WIDTH_80)
+ delta = txp.delta_bw80;
+
+ mt76x2_get_rate_power(dev, &t, chan);
+ mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
+ mt76x2_limit_rate_power(&t, dev->txpower_conf);
+ dev->txpower_cur = mt76x2_get_max_rate_power(&t);
+
+ base_power = mt76x2_get_min_rate_power(&t);
+ delta += base_power - txp.chain[0].target_power;
+ txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
+ txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
+
+ gain = min(txp_0, txp_1);
+ if (gain < 0) {
+ base_power -= gain;
+ txp_0 -= gain;
+ txp_1 -= gain;
+ } else if (gain > 0x2f) {
+ base_power -= gain - 0x2f;
+ txp_0 = 0x2f;
+ txp_1 = 0x2f;
+ }
+
+ mt76x2_add_rate_power_offset(&t, -base_power);
+ dev->target_power = txp.chain[0].target_power;
+ dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
+ dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
+ dev->rate_power = t;
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
+
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+ enum nl80211_band band, u8 bw)
+{
+ u32 cfg0, cfg1;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ cfg0 = bw ? 0x000b0c01 : 0x00101101;
+ cfg1 = 0x00011414;
+ } else {
+ cfg0 = bw ? 0x000b0b01 : 0x00101001;
+ cfg1 = 0x00021414;
+ }
+ mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+ mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
+
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
+
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
+
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
+{
+ struct mt76x2_sta *sta;
+ struct mt76_wcid *wcid;
+ int i, j, min_rssi = 0;
+ s8 cur_rssi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ unsigned long mask = dev->wcid_mask[i];
+
+ if (!mask)
+ continue;
+
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ wcid = rcu_dereference(dev->wcid[j]);
+ if (!wcid)
+ continue;
+
+ sta = container_of(wcid, struct mt76x2_sta, wcid);
+ spin_lock(&dev->mt76.rx_lock);
+ if (sta->inactive_count++ < 5)
+ cur_rssi = ewma_signal_read(&sta->rssi);
+ else
+ cur_rssi = 0;
+ spin_unlock(&dev->mt76.rx_lock);
+
+ if (cur_rssi < min_rssi)
+ min_rssi = cur_rssi;
+ }
+ }
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ if (!min_rssi)
+ return -75;
+
+ return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
index b9c334d9e5b8..1551ea453180 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -75,6 +75,21 @@
#define MT_XO_CTRL7 0x011c
+#define MT_USB_U3DMA_CFG 0x9018
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKE_UP_EN BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PAD BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+
#define MT_WLAN_MTC_CTRL 0x10148
#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
@@ -150,6 +165,9 @@
#define MT_TX_HW_QUEUE_MCU 8
#define MT_TX_HW_QUEUE_MGMT 9
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
#define MT_PBF_SYS_CTRL 0x0400
#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
@@ -202,6 +220,11 @@
#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+#define MT_FCE_SKIP_FS 0x0a6c
+
#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
#define MT_MAC_CSR0 0x1000
@@ -214,6 +237,7 @@
#define MT_MAC_ADDR_DW0 0x1008
#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
#define MT_MAC_BSSID_DW0 0x1010
#define MT_MAC_BSSID_DW1 0x1014
@@ -351,6 +375,7 @@
#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8)
#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
#define MT_VHT_HT_FBK_CFG1 0x1358
#define MT_PROT_CFG_RATE GENMASK(15, 0)
@@ -425,6 +450,7 @@
#define MT_RX_FILTR_CFG_BAR BIT(15)
#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+#define MT_AUTO_RSP_CFG 0x1404
#define MT_LEGACY_BASIC_RATE 0x1408
#define MT_HT_BASIC_RATE 0x140c
@@ -460,6 +486,10 @@
#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0)
#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
+#define MT_TX_STA_0 0x170c
+#define MT_TX_STA_1 0x1710
+#define MT_TX_STA_2 0x1714
+
#define MT_TX_STAT_FIFO 0x1718
#define MT_TX_STAT_FIFO_VALID BIT(0)
#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
index e46eafc4c436..4c907882e8b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -23,129 +23,6 @@ struct beacon_bc_data {
struct sk_buff *tail[8];
};
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76x2_dev *dev = hw->priv;
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_wcid *wcid = &dev->global_wcid;
-
- if (control->sta) {
- struct mt76x2_sta *msta;
-
- msta = (struct mt76x2_sta *) control->sta->drv_priv;
- wcid = &msta->wcid;
- /* sw encrypted frames */
- if (!info->control.hw_key && wcid->hw_key_idx != -1)
- control->sta = NULL;
- }
-
- if (vif && !control->sta) {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *) vif->drv_priv;
- wcid = &mvif->group_wcid;
- }
-
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
-}
-
-void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ieee80211_free_txskb(mt76_hw(dev), skb);
- } else {
- ieee80211_tx_info_clear_status(info);
- info->status.rates[0].idx = -1;
- info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status(mt76_hw(dev), skb);
- }
-}
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate)
-{
- s8 max_txpwr;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- u8 mcs = ieee80211_rate_get_vht_mcs(rate);
-
- if (mcs == 8 || mcs == 9) {
- max_txpwr = dev->rate_power.vht[8];
- } else {
- u8 nss, idx;
-
- nss = ieee80211_rate_get_vht_nss(rate);
- idx = ((nss - 1) << 3) + mcs;
- max_txpwr = dev->rate_power.ht[idx & 0xf];
- }
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
- } else {
- enum nl80211_band band = dev->mt76.chandef.chan->band;
-
- if (band == NL80211_BAND_2GHZ) {
- const struct ieee80211_rate *r;
- struct wiphy *wiphy = mt76_hw(dev)->wiphy;
- struct mt76_rate_power *rp = &dev->rate_power;
-
- r = &wiphy->bands[band]->bitrates[rate->idx];
- if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
- max_txpwr = rp->cck[r->hw_value & 0x3];
- else
- max_txpwr = rp->ofdm[r->hw_value & 0x7];
- } else {
- max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
- }
- }
-
- return max_txpwr;
-}
-
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
-{
- txpwr = min_t(s8, txpwr, dev->txpower_conf);
- txpwr -= (dev->target_power + dev->target_power_delta[0]);
- txpwr = min_t(s8, txpwr, max_txpwr_adj);
-
- if (!dev->enable_tpc)
- return 0;
- else if (txpwr >= 0)
- return min_t(s8, txpwr, 7);
- else
- return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
-}
-
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
-{
- s8 txpwr_adj;
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
- dev->rate_power.ofdm[4]);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
-}
-
-static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
-{
- int len = ieee80211_get_hdrlen_from_skb(skb);
-
- if (len % 4 == 0)
- return 0;
-
- skb_push(skb, 2);
- memmove(skb->data, skb->data + 2, len);
-
- skb->data[len] = 0;
- skb->data[len + 1] = 0;
- return 2;
-}
-
int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
@@ -159,7 +36,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
ret = mt76x2_insert_hdr_pad(skb);
if (ret < 0)
@@ -218,6 +95,37 @@ mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
data->tail[mvif->idx] = skb;
}
+static void
+mt76x2_resync_beacon_timer(struct mt76x2_dev *dev)
+{
+ u32 timer_val = dev->beacon_int << 4;
+
+ dev->tbtt_count++;
+
+ /*
+ * Beacon timer drifts by 1us every tick, the timer is configured
+ * in 1/16 TU (64us) units.
+ */
+ if (dev->tbtt_count < 62)
+ return;
+
+ if (dev->tbtt_count >= 64) {
+ dev->tbtt_count = 0;
+ return;
+ }
+
+ /*
+ * The updated beacon interval takes effect after two TBTT, because
+ * at this point the original interval has already been loaded into
+ * the next TBTT_TIMER value
+ */
+ if (dev->tbtt_count == 62)
+ timer_val -= 1;
+
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL, timer_val);
+}
+
void mt76x2_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
@@ -226,6 +134,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
struct sk_buff *skb;
int i, nframes;
+ mt76x2_resync_beacon_timer(dev);
+
data.dev = dev;
__skb_queue_head_init(&data.q);
@@ -256,7 +166,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+ mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
+ NULL);
}
spin_unlock_bh(&q->lock);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
new file mode 100644
index 000000000000..36afb166fa3f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x2_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->global_wcid;
+
+ if (control->sta) {
+ struct mt76x2_sta *msta;
+
+ msta = (struct mt76x2_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ /* sw encrypted frames */
+ if (!info->control.hw_key && wcid->hw_key_idx != -1)
+ control->sta = NULL;
+ }
+
+ if (vif && !control->sta) {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *)vif->drv_priv;
+ wcid = &mvif->group_wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx);
+
+int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return 0;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+ if (mcs == 8 || mcs == 9) {
+ max_txpwr = dev->rate_power.vht[8];
+ } else {
+ u8 nss, idx;
+
+ nss = ieee80211_rate_get_vht_nss(rate);
+ idx = ((nss - 1) << 3) + mcs;
+ max_txpwr = dev->rate_power.ht[idx & 0xf];
+ }
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+ } else {
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+ if (band == NL80211_BAND_2GHZ) {
+ const struct ieee80211_rate *r;
+ struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+ struct mt76_rate_power *rp = &dev->rate_power;
+
+ r = &wiphy->bands[band]->bitrates[rate->idx];
+ if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+ max_txpwr = rp->cck[r->hw_value & 0x3];
+ else
+ max_txpwr = rp->ofdm[r->hw_value & 0x7];
+ } else {
+ max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+ }
+ }
+
+ return max_txpwr;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj);
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
+ txpwr -= (dev->target_power + dev->target_power_delta[0]);
+ txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+ if (!dev->enable_tpc)
+ return 0;
+ else if (txpwr >= 0)
+ return min_t(s8, txpwr, 7);
+ else
+ return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj);
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+ s8 txpwr_adj;
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+ dev->rate_power.ofdm[4]);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto);
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ieee80211_free_txskb(mt76_hw(dev), skb);
+ } else {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(mt76_hw(dev), skb);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_complete);
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
new file mode 100644
index 000000000000..1428cfdee579
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt76x2u.h"
+
+static const struct usb_device_id mt76x2u_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
+ { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
+ { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
+ { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+ { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
+ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
+ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
+ { },
+};
+
+static int mt76x2u_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x2_dev *dev;
+ int err;
+
+ dev = mt76x2u_alloc_device(&intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ err = mt76u_init(&dev->mt76, intf);
+ if (err < 0)
+ goto err;
+
+ dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+ err = mt76x2u_register_device(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+
+err:
+ ieee80211_free_hw(mt76_hw(dev));
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+
+ return err;
+}
+
+static void mt76x2u_disconnect(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ ieee80211_unregister_hw(hw);
+ mt76x2u_cleanup(dev);
+
+ ieee80211_free_hw(hw);
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+}
+
+static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
+ pm_message_t state)
+{
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
+
+ mt76u_stop_queues(&dev->mt76);
+ mt76x2u_stop_hw(dev);
+ usb_kill_urb(usb->mcu.res.urb);
+
+ return 0;
+}
+
+static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
+{
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
+ int err;
+
+ reinit_completion(&usb->mcu.cmpl);
+ err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ &usb->mcu.res, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (err < 0)
+ return err;
+
+ err = mt76u_submit_rx_buffers(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ tasklet_enable(&usb->rx_tasklet);
+ tasklet_enable(&usb->tx_tasklet);
+
+ return mt76x2u_init_hardware(dev);
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
+MODULE_FIRMWARE(MT7662U_FIRMWARE);
+MODULE_FIRMWARE(MT7662U_ROM_PATCH);
+
+static struct usb_driver mt76x2u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x2u_device_table,
+ .probe = mt76x2u_probe,
+ .disconnect = mt76x2u_disconnect,
+#ifdef CONFIG_PM
+ .suspend = mt76x2u_suspend,
+ .resume = mt76x2u_resume,
+ .reset_resume = mt76x2u_resume,
+#endif /* CONFIG_PM */
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x2u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
new file mode 100644
index 000000000000..008092f0cd8a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2U_H
+#define __MT76x2U_H
+
+#include <linux/device.h>
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_mcu.h"
+
+#define MT7612U_EEPROM_SIZE 512
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */
+
+extern const struct ieee80211_ops mt76x2u_ops;
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev);
+int mt76x2u_register_device(struct mt76x2_dev *dev);
+int mt76x2u_init_hardware(struct mt76x2_dev *dev);
+void mt76x2u_cleanup(struct mt76x2_dev *dev);
+void mt76x2u_stop_hw(struct mt76x2_dev *dev);
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr);
+int mt76x2u_mac_reset(struct mt76x2_dev *dev);
+void mt76x2u_mac_resume(struct mt76x2_dev *dev);
+int mt76x2u_mac_start(struct mt76x2_dev *dev);
+int mt76x2u_mac_stop(struct mt76x2_dev *dev);
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x2u_phy_calibrate(struct work_struct *work);
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev);
+
+void mt76x2u_mcu_complete_urb(struct urb *urb);
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan);
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 val);
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data);
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force);
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+ bool ext, int rssi, u32 false_cca);
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
+ u8 temp_level, u8 channel);
+int mt76x2u_mcu_init(struct mt76x2_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev);
+
+int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
+void mt76x2u_stop_queues(struct mt76x2_dev *dev);
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update);
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
+ u32 flags);
+
+#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
new file mode 100644
index 000000000000..1ca5dd05b265
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "dma.h"
+
+static void mt76x2u_remove_dma_hdr(struct sk_buff *skb)
+{
+ int hdr_len;
+
+ skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN);
+ hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ if (hdr_len % 4) {
+ memmove(skb->data + 2, skb->data, hdr_len);
+ skb_pull(skb, 2);
+ }
+}
+
+static int
+mt76x2u_check_skb_rooms(struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN;
+ if (hdr_len % 4)
+ need_head += 2;
+ return skb_cow(skb, need_head);
+}
+
+static int
+mt76x2u_set_txinfo(struct sk_buff *skb,
+ struct mt76_wcid *wcid, u8 ep)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mt76x2_qsel qsel;
+ u32 flags;
+
+ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ ep == MT_EP_OUT_HCCA)
+ qsel = MT_QSEL_MGMT;
+ else
+ qsel = MT_QSEL_EDCA;
+
+ flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ flags |= MT_TXD_INFO_WIV;
+
+ return mt76u_skb_dma_info(skb, WLAN_PORT, flags);
+}
+
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x2_tx_status stat;
+
+ if (!mt76x2_mac_load_tx_status(dev, &stat))
+ return false;
+
+ mt76x2_send_tx_status(dev, &stat, update);
+
+ return true;
+}
+
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x2_txwi *txwi;
+ int err, len = skb->len;
+
+ err = mt76x2u_check_skb_rooms(skb);
+ if (err < 0)
+ return -ENOMEM;
+
+ mt76x2_insert_hdr_pad(skb);
+
+ txwi = skb_push(skb, sizeof(struct mt76x2_txwi));
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+
+ return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
+}
+
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ mt76x2u_remove_dma_hdr(e->skb);
+ mt76x2_tx_complete(dev, e->skb);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
new file mode 100644
index 000000000000..9b81e7641c06
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_init_dma(struct mt76x2_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+
+ /* disable AGGR_BULK_RX in order to receive one
+ * frame in each rx urb and avoid copies
+ */
+ val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+}
+
+static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
+ udelay(1);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
+
+ mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
+ udelay(1);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
+ usleep_range(150, 200);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
+ usleep_range(50, 100);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
+}
+
+static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+ int shift = unit ? 8 : 0;
+ u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
+
+ /* Enable RF BG */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
+ usleep_range(10, 20);
+
+ /* Enable RFDIG LDO/AFE/ABB/ADDA */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
+ usleep_range(10, 20);
+
+ /* Switch RFDIG power to internal LDO */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
+ usleep_range(10, 20);
+
+ mt76x2u_power_on_rf_patch(dev);
+
+ mt76_set(dev, 0x530, 0xf);
+}
+
+static void mt76x2u_power_on(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ /* Turn on WL MTCMOS */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
+ MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+ val = MT_WLAN_MTC_CTRL_STATE_UP |
+ MT_WLAN_MTC_CTRL_PWR_ACK |
+ MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+ mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
+ usleep_range(10, 20);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ usleep_range(10, 20);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
+
+ /* Turn on AD/DA power down */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
+
+ /* WLAN function enable */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
+
+ /* Release BBP software reset */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
+
+ mt76x2u_power_on_rf(dev, 0);
+ mt76x2u_power_on_rf(dev, 1);
+}
+
+static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
+{
+ u32 val, i;
+
+ dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
+ MT7612U_EEPROM_SIZE,
+ GFP_KERNEL);
+ dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
+ if (!dev->mt76.eeprom.data)
+ return -ENOMEM;
+
+ for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
+ val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
+ put_unaligned_le32(val, dev->mt76.eeprom.data + i);
+ }
+
+ mt76x2_eeprom_parse_hw_cap(dev);
+ return 0;
+}
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .tx_prepare_skb = mt76x2u_tx_prepare_skb,
+ .tx_complete_skb = mt76x2u_tx_complete_skb,
+ .tx_status_data = mt76x2u_tx_status_data,
+ .rx_skb = mt76x2_queue_rx_skb,
+ };
+ struct mt76x2_dev *dev;
+ struct mt76_dev *mdev;
+
+ mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
+ if (!mdev)
+ return NULL;
+
+ dev = container_of(mdev, struct mt76x2_dev, mt76);
+ mdev->dev = pdev;
+ mdev->drv = &drv_ops;
+
+ mutex_init(&dev->mutex);
+
+ return dev;
+}
+
+static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
+ mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
+ mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
+ mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
+}
+
+int mt76x2u_init_hardware(struct mt76x2_dev *dev)
+{
+ static const u16 beacon_offsets[] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ const struct mt76_wcid_addr addr = {
+ .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .ba_mask = 0,
+ };
+ int i, err;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt76x2_reset_wlan(dev, true);
+ mt76x2u_power_on(dev);
+
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ err = mt76x2u_mcu_fw_init(dev);
+ if (err < 0)
+ return err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
+ return -EIO;
+
+ /* wait for asic ready after fw load. */
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
+ mt76_wr(dev, MT_TSO_CTRL, 0);
+
+ mt76x2u_init_dma(dev);
+
+ err = mt76x2u_mcu_init(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76x2u_mac_reset(dev);
+ if (err < 0)
+ return err;
+
+ mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+ mt76x2u_init_beacon_offsets(dev);
+
+ if (!mt76x2_wait_for_bbp(dev))
+ return -ETIMEDOUT;
+
+ /* reset wcid table */
+ for (i = 0; i < 254; i++)
+ mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
+ sizeof(struct mt76_wcid_addr));
+
+ /* reset shared key table and pairwise key table */
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
+ for (i = 0; i < 256; i++)
+ mt76_wr(dev, MT_WCID_ATTR(i), 1);
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
+
+ err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+ if (err < 0)
+ return err;
+
+ mt76x2u_phy_set_rxpath(dev);
+ mt76x2u_phy_set_txdac(dev);
+
+ return mt76x2u_mac_stop(dev);
+}
+
+int mt76x2u_register_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int err;
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
+ mt76x2_init_device(dev);
+
+ err = mt76x2u_init_eeprom(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76u_mcu_init_rx(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto fail;
+
+ err = mt76x2u_init_hardware(dev);
+ if (err < 0)
+ goto fail;
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ err = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+ ARRAY_SIZE(mt76x2_rates));
+ if (err)
+ goto fail;
+
+ /* check hw sg support in order to enable AMSDU */
+ if (mt76u_check_sg(&dev->mt76))
+ hw->max_tx_fragments = MT_SG_MAX_SIZE;
+ else
+ hw->max_tx_fragments = 1;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ mt76x2_init_debugfs(dev);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+ return 0;
+
+fail:
+ mt76x2u_cleanup(dev);
+ return err;
+}
+
+void mt76x2u_stop_hw(struct mt76x2_dev *dev)
+{
+ mt76u_stop_stat_wk(&dev->mt76);
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x2u_mac_stop(dev);
+}
+
+void mt76x2u_cleanup(struct mt76x2_dev *dev)
+{
+ mt76x2u_mcu_set_radio_state(dev, false);
+ mt76x2u_stop_hw(dev);
+ mt76u_queues_deinit(&dev->mt76);
+ mt76x2u_mcu_deinit(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
new file mode 100644
index 000000000000..eab7ab297aa6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
+{
+ mt76_rr(dev, MT_RX_STAT_0);
+ mt76_rr(dev, MT_RX_STAT_1);
+ mt76_rr(dev, MT_RX_STAT_2);
+ mt76_rr(dev, MT_TX_STA_0);
+ mt76_rr(dev, MT_TX_STA_1);
+ mt76_rr(dev, MT_TX_STA_2);
+}
+
+static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
+{
+ s8 offset = 0;
+ u16 eep_val;
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+ offset = eep_val & 0x7f;
+ if ((eep_val & 0xff) == 0xff)
+ offset = 0;
+ else if (eep_val & 0x80)
+ offset = 0 - offset;
+
+ eep_val >>= 8;
+ if (eep_val == 0x00 || eep_val == 0xff) {
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val &= 0xff;
+
+ if (eep_val == 0x00 || eep_val == 0xff)
+ eep_val = 0x14;
+ }
+
+ eep_val &= 0x7f;
+ mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
+ MT_XO_CTRL5_C2_VAL, eep_val + offset);
+ mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
+
+ mt76_wr(dev, 0x504, 0x06000000);
+ mt76_wr(dev, 0x50c, 0x08800000);
+ mdelay(5);
+ mt76_wr(dev, 0x504, 0x0);
+
+ /* decrease SIFS from 16us to 13us */
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
+ MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
+
+ /* init fce */
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+ case 0:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+ break;
+ case 1:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt76x2u_mac_reset(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
+
+ /* init pbf regs */
+ mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+ mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+
+ mt76_write_mac_initvals(dev);
+
+ mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
+ mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
+ mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
+ mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
+
+ mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
+ mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
+ mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ if (is_mt7612(dev))
+ mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+ mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+ mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+ mt76x2u_mac_fixup_xtal(dev);
+
+ return 0;
+}
+
+int mt76x2u_mac_start(struct mt76x2_dev *dev)
+{
+ mt76x2u_mac_reset_counters(dev);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ wait_for_wpdma(dev);
+ usleep_range(50, 100);
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ return 0;
+}
+
+int mt76x2u_mac_stop(struct mt76x2_dev *dev)
+{
+ int i, count = 0, val;
+ bool stopped = false;
+ u32 rts_cfg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -EIO;
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+
+ /* wait tx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ /* page count on TxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
+ break;
+ usleep_range(10, 20);
+ }
+
+ /* disable tx-rx */
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 1000; i++) {
+ if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
+ !mt76_rr(dev, MT_BBP(IBI, 12))) {
+ stopped = true;
+ break;
+ }
+ usleep_range(10, 20);
+ }
+
+ if (!stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ /* page count on RxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
+ !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
+ ++count > 10)
+ break;
+ msleep(50);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
+ dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
+
+ /* wait rx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+
+ return 0;
+}
+
+void mt76x2u_mac_resume(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
+}
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr)
+{
+ ether_addr_copy(dev->mt76.macaddr, addr);
+
+ if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+ eth_random_addr(dev->mt76.macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->mt76.macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
new file mode 100644
index 000000000000..7367ba111119
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+
+static int mt76x2u_start(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x2u_mac_start(dev);
+ if (ret)
+ goto out;
+
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt76x2u_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76x2u_stop_hw(dev);
+ mutex_unlock(&dev->mutex);
+}
+
+static int mt76x2u_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv;
+ unsigned int idx = 0;
+
+ if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
+ mt76x2u_mac_setaddr(dev, vif->addr);
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = MT_VIF_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+ mt76x2_txq_init(dev, vif->txq);
+
+ return 0;
+}
+
+static int
+mt76x2u_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int err;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_set_channel(&dev->mt76);
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+ mt76x2_mac_stop(dev, false);
+
+ err = mt76x2u_phy_set_channel(dev, chandef);
+
+ mt76x2u_mac_resume(dev);
+
+ clear_bit(MT76_RESET, &dev->mt76.state);
+ mt76_txq_schedule_all(&dev->mt76);
+
+ return err;
+}
+
+static void
+mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ mt76x2u_phy_channel_calibrate(dev);
+ mt76x2_apply_gain_adj(dev);
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt76_wr(dev, MT_MAC_BSSID_DW0,
+ get_unaligned_le32(info->bssid));
+ mt76_wr(dev, MT_MAC_BSSID_DW1,
+ get_unaligned_le16(info->bssid + 4));
+ }
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int err = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ err = mt76x2u_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->txpower_conf = hw->conf.power_level * 2;
+
+ /* convert to per-chain power for 2x2 devices */
+ dev->txpower_conf -= 6;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ mt76x2_phy_set_txpower(dev);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return err;
+}
+
+static void
+mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+const struct ieee80211_ops mt76x2u_ops = {
+ .tx = mt76x2_tx,
+ .start = mt76x2u_start,
+ .stop = mt76x2u_stop,
+ .add_interface = mt76x2u_add_interface,
+ .remove_interface = mt76x2_remove_interface,
+ .sta_add = mt76x2_sta_add,
+ .sta_remove = mt76x2_sta_remove,
+ .set_key = mt76x2_set_key,
+ .ampdu_action = mt76x2_ampdu_action,
+ .config = mt76x2u_config,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .bss_info_changed = mt76x2u_bss_info_changed,
+ .configure_filter = mt76x2_configure_filter,
+ .conf_tx = mt76x2_conf_tx,
+ .sw_scan_start = mt76x2u_sw_scan,
+ .sw_scan_complete = mt76x2u_sw_scan_complete,
+ .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
new file mode 100644
index 000000000000..22c16d638baa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+#define MT_CMD_HDR_LEN 4
+#define MT_INBAND_PACKET_MAX_LEN 192
+#define MT_MCU_MEMMAP_WLAN 0x410000
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x3900
+#define MCU_ROM_PATCH_MAX_PAYLOAD 2048
+
+#define MT76U_MCU_ILM_OFFSET 0x80000
+#define MT76U_MCU_DLM_OFFSET 0x110000
+#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
+
+static int
+mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+ u32 val)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
+ func != Q_SELECT);
+}
+
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
+{
+ struct {
+ __le32 mode;
+ __le32 level;
+ } __packed __aligned(4) msg = {
+ .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
+ .level = cpu_to_le32(0),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
+ false);
+}
+
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+ u8 channel)
+{
+ struct {
+ u8 cr_mode;
+ u8 temp;
+ u8 ch;
+ u8 _pad0;
+ __le32 cfg;
+ } __packed __aligned(4) msg = {
+ .cr_mode = type,
+ .temp = temp_level,
+ .ch = channel,
+ };
+ struct sk_buff *skb;
+ u32 val;
+
+ val = BIT(31);
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ msg.cfg = cpu_to_le32(val);
+
+ /* first set the channel without the extension channel info */
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
+}
+
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan)
+{
+ struct {
+ u8 idx;
+ u8 scan;
+ u8 bw;
+ u8 _pad0;
+
+ __le16 chainmask;
+ u8 ext_chan;
+ u8 _pad1;
+
+ } __packed __aligned(4) msg = {
+ .idx = channel,
+ .scan = scan,
+ .bw = bw,
+ .chainmask = cpu_to_le16(dev->chainmask),
+ };
+ struct sk_buff *skb;
+
+ /* first set the channel without the extension channel info */
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+
+ mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+
+ usleep_range(5000, 10000);
+
+ msg.ext_chan = 0xe0 + bw_index;
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+}
+
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 val)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(type),
+ .value = cpu_to_le32(val),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force)
+{
+ struct {
+ __le32 channel;
+ __le32 gain_val;
+ } __packed __aligned(4) msg = {
+ .channel = cpu_to_le32(channel),
+ .gain_val = cpu_to_le32(gain),
+ };
+ struct sk_buff *skb;
+
+ if (force)
+ msg.channel |= cpu_to_le32(BIT(31));
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
+}
+
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+ bool ext, int rssi, u32 false_cca)
+{
+ struct {
+ __le32 channel;
+ __le32 rssi_val;
+ __le32 false_cca_val;
+ } __packed __aligned(4) msg = {
+ .rssi_val = cpu_to_le32(rssi),
+ .false_cca_val = cpu_to_le32(false_cca),
+ };
+ struct sk_buff *skb;
+ u32 val = channel;
+
+ if (ap)
+ val |= BIT(31);
+ if (ext)
+ val |= BIT(30);
+ msg.channel = cpu_to_le32(val);
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
+}
+
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data)
+{
+ struct {
+ __le32 id;
+ struct mt76x2_tssi_comp data;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+ .data = *tssi_data,
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
+{
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x12, 0, NULL, 0);
+}
+
+static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ const u8 data[] = {
+ 0x6f, 0xfc, 0x08, 0x01,
+ 0x20, 0x04, 0x00, 0x00,
+ 0x00, 0x09, 0x00,
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ u8 data[] = {
+ 0x6f, 0xfc, 0x05, 0x01,
+ 0x07, 0x01, 0x00, 0x04
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
+{
+ bool rom_protect = !is_mt7612(dev);
+ struct mt76x2_patch_header *hdr;
+ u32 val, patch_mask, patch_reg;
+ const struct firmware *fw;
+ int err;
+
+ if (rom_protect &&
+ !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "could not get hardware semaphore for ROM PATCH\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+ patch_mask = BIT(0);
+ patch_reg = MT_MCU_CLOCK_CTL;
+ } else {
+ patch_mask = BIT(1);
+ patch_reg = MT_MCU_COM_REG0;
+ }
+
+ if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+ dev_info(dev->mt76.dev, "ROM patch already applied\n");
+ return 0;
+ }
+
+ err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "failed to load firmware\n");
+ err = -EIO;
+ goto out;
+ }
+
+ hdr = (struct mt76x2_patch_header *)fw->data;
+ dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+
+ /* vendor reset */
+ mt76u_mcu_fw_reset(&dev->mt76);
+ usleep_range(5000, 10000);
+
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ fw->size - sizeof(*hdr),
+ MCU_ROM_PATCH_MAX_PAYLOAD,
+ MT76U_MCU_ROM_PATCH_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_enable_patch(dev);
+ mt76x2u_mcu_reset_wmt(dev);
+ mdelay(20);
+
+ if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
+ dev_err(dev->mt76.dev, "failed to load ROM patch\n");
+ err = -ETIMEDOUT;
+ }
+
+out:
+ if (rom_protect)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+ release_firmware(fw);
+ return err;
+}
+
+static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
+{
+ u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
+ const struct mt76x2_fw_header *hdr;
+ int err, len, ilm_len, dlm_len;
+ const struct firmware *fw;
+
+ err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt76x2_fw_header *)fw->data;
+ ilm_len = le32_to_cpu(hdr->ilm_len);
+ dlm_len = le32_to_cpu(hdr->dlm_len);
+ len = sizeof(*hdr) + ilm_len + dlm_len;
+ if (fw->size != len) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->build_ver);
+ dev_info(dev->mt76.dev, "Build: %x\n", val);
+ dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+ /* vendor reset */
+ mt76u_mcu_fw_reset(&dev->mt76);
+ usleep_range(5000, 10000);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ /* load ILM */
+ err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT76U_MCU_ILM_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ /* load DLM */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ dlm_offset += 0x800;
+ err = mt76u_mcu_fw_send_data(&dev->mt76,
+ fw->data + sizeof(*hdr) + ilm_len,
+ dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+ dlm_offset);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_load_ivb(dev);
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
+ dev_err(dev->mt76.dev, "firmware failed to start\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ dev_dbg(dev->mt76.dev, "firmware running\n");
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
+{
+ int err;
+
+ err = mt76x2u_mcu_load_rom_patch(dev);
+ if (err < 0)
+ return err;
+
+ return mt76x2u_mcu_load_firmware(dev);
+}
+
+int mt76x2u_mcu_init(struct mt76x2_dev *dev)
+{
+ int err;
+
+ err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
+ if (err < 0)
+ return err;
+
+ return mt76x2u_mcu_set_radio_state(dev, true);
+}
+
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+
+ usb_kill_urb(usb->mcu.res.urb);
+ mt76u_buf_free(&usb->mcu.res);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
new file mode 100644
index 000000000000..5158063d0c2e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~BIT(4);
+
+ switch (dev->chainmask & 0xf) {
+ case 2:
+ val |= BIT(3);
+ break;
+ default:
+ val &= ~BIT(3);
+ break;
+ }
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev)
+{
+ int txpath;
+
+ txpath = (dev->chainmask >> 8) & 0xf;
+ switch (txpath) {
+ case 2:
+ mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ default:
+ mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ }
+}
+
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ if (mt76x2_channel_silent(dev))
+ return;
+
+ mt76x2u_mac_stop(dev);
+
+ if (is_5ghz)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+
+ mt76x2u_mac_resume(dev);
+}
+
+static void
+mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76x2_tssi_comp t = {};
+
+ if (!dev->cal.tssi_cal_done)
+ return;
+
+ if (!dev->cal.tssi_comp_pending) {
+ /* TSSI trigger */
+ t.cal_mode = BIT(0);
+ mt76x2u_mcu_tssi_comp(dev, &t);
+ dev->cal.tssi_comp_pending = true;
+ } else {
+ if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+ return;
+
+ dev->cal.tssi_comp_pending = false;
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ t.pa_mode = 1;
+
+ t.cal_mode = BIT(1);
+ t.slope0 = txp.chain[0].tssi_slope;
+ t.offset0 = txp.chain[0].tssi_offset;
+ t.slope1 = txp.chain[1].tssi_slope;
+ t.offset1 = txp.chain[1].tssi_offset;
+ mt76x2u_mcu_tssi_comp(dev, &t);
+
+ if (t.pa_mode || dev->cal.dpd_cal_done)
+ return;
+
+ usleep_range(10000, 20000);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+ dev->cal.dpd_cal_done = true;
+ }
+}
+
+static void
+mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+ u8 channel = dev->mt76.chandef.chan->hw_value;
+ int freq, freq1;
+ u32 false_cca;
+
+ freq = dev->mt76.chandef.chan->center_freq;
+ freq1 = dev->mt76.chandef.center_freq1;
+
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80: {
+ int ch_group_index;
+
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ channel += 6 - ch_group_index * 4;
+ break;
+ }
+ case NL80211_CHAN_WIDTH_40:
+ if (freq1 > freq)
+ channel += 2;
+ else
+ channel -= 2;
+ break;
+ default:
+ break;
+ }
+
+ dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
+ mt76_rr(dev, MT_RX_STAT_1));
+
+ mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
+ dev->cal.avg_rssi_all, false_cca);
+}
+
+void mt76x2u_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x2_dev *dev;
+
+ dev = container_of(work, struct mt76x2_dev, cal_work.work);
+ mt76x2u_phy_tssi_compensate(dev);
+ mt76x2u_phy_update_channel_gain(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ struct ieee80211_channel *chan = chandef->chan;
+ u8 channel = chan->hw_value, bw, bw_index;
+ int ch_group_index, freq, freq1, ret;
+
+ dev->cal.channel_cal_done = false;
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = 1;
+ if (freq1 > freq) {
+ bw_index = 1;
+ ch_group_index = 0;
+ } else {
+ bw_index = 3;
+ ch_group_index = 1;
+ }
+ channel += 2 - ch_group_index * 4;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ bw = 2;
+ bw_index = ch_group_index;
+ channel += 6 - ch_group_index * 4;
+ break;
+ default:
+ bw = 0;
+ bw_index = 0;
+ ch_group_index = 0;
+ break;
+ }
+
+ mt76x2_read_rx_gain(dev);
+ mt76x2_phy_set_txpower_regs(dev, chan->band);
+ mt76x2_configure_tx_delay(dev, chan->band, bw);
+ mt76x2_phy_set_txpower(dev);
+
+ mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan);
+ if (ret)
+ return ret;
+
+ mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+ /* Enable LDPC Rx */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+ if (!dev->cal.init_cal_done) {
+ u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+ if (val != 0xff)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0);
+ }
+
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+ /* Rx LPF calibration */
+ if (!dev->cal.init_cal_done)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0);
+ dev->cal.init_cal_done = true;
+
+ mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
+ mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+ mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
+
+ mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
+
+ if (scan)
+ return 0;
+
+ if (mt76x2_tssi_enabled(dev)) {
+ /* init default values for temp compensation */
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ 0x38);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ 0x38);
+
+ /* init tssi calibration */
+ if (!mt76x2_channel_silent(dev)) {
+ struct ieee80211_channel *chan;
+ u32 flag = 0;
+
+ chan = dev->mt76.chandef.chan;
+ if (chan->band == NL80211_BAND_5GHZ)
+ flag |= BIT(0);
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ flag |= BIT(8);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ dev->cal.tssi_cal_done = true;
+ }
+ }
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index e96956710fb2..af48d43bb7dc 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -51,7 +51,7 @@ __mt76_get_txwi(struct mt76_dev *dev)
return t;
}
-static struct mt76_txwi_cache *
+struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
@@ -91,80 +91,6 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
return txq->ac;
}
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
-{
- struct mt76_queue_entry e;
- struct mt76_txwi_cache *t;
- struct mt76_queue_buf buf[32];
- struct sk_buff *iter;
- dma_addr_t addr;
- int len;
- u32 tx_info = 0;
- int n, ret;
-
- t = mt76_get_txwi(dev);
- if (!t) {
- ieee80211_free_txskb(dev->hw, skb);
- return -ENOMEM;
- }
-
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
- &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- if (ret < 0)
- goto free;
-
- len = skb->len - skb->data_len;
- addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = -ENOMEM;
- goto free;
- }
-
- n = 0;
- buf[n].addr = t->dma_addr;
- buf[n++].len = dev->drv->txwi_size;
- buf[n].addr = addr;
- buf[n++].len = len;
-
- skb_walk_frags(skb, iter) {
- if (n == ARRAY_SIZE(buf))
- goto unmap;
-
- addr = dma_map_single(dev->dev, iter->data, iter->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
- goto unmap;
-
- buf[n].addr = addr;
- buf[n++].len = iter->len;
- }
-
- if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
- goto unmap;
-
- return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
-
-unmap:
- ret = -ENOMEM;
- for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
- DMA_TO_DEVICE);
-
-free:
- e.skb = skb;
- e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
- mt76_put_txwi(dev, t);
- return ret;
-}
-EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
-
void
mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
@@ -185,7 +111,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
q = &dev->q_tx[qid];
spin_lock_bh(&q->lock);
- mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8)
@@ -241,7 +167,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
info->flags |= IEEE80211_TX_STATUS_EOSP;
mt76_skb_set_moredata(skb, !last);
- mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
}
void
@@ -321,7 +247,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
if (idx < 0)
return idx;
@@ -356,7 +282,8 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (cur_ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
+ txq->sta);
if (idx < 0)
return idx;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
new file mode 100644
index 000000000000..7780b07543bb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "usb_trace.h"
+#include "dma.h"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+/* should be called with usb_ctrl_mtx locked */
+static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+ int i, ret;
+
+ pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
+ : usb_sndctrlpipe(udev, 0);
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return -EIO;
+
+ ret = usb_control_msg(udev, pipe, req, req_type, val,
+ offset, buf, len, MT_VEND_REQ_TOUT_MS);
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+ usleep_range(5000, 10000);
+ }
+
+ dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+ return ret;
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_vendor_request(dev, req, req_type,
+ val, offset, buf, len);
+ trace_usb_reg_wr(dev, offset, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_vendor_request);
+
+/* should be called with usb_ctrl_mtx locked */
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u32 data = ~0;
+ u16 offset;
+ int ret;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_EEPROM:
+ req = MT_VEND_READ_EEPROM;
+ break;
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_READ_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_READ;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ ret = __mt76u_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, offset, usb->data, sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = get_unaligned_le32(usb->data);
+ trace_usb_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_rr(dev, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+/* should be called with usb_ctrl_mtx locked */
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u16 offset;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_WRITE_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_WRITE;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ put_unaligned_le32(val, usb->data);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR, 0,
+ offset, usb->data, sizeof(__le32));
+ trace_usb_reg_wr(dev, addr, val);
+}
+
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= __mt76u_rr(dev, addr) & ~mask;
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt76u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ const u32 *val = data;
+ int i, ret;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ for (i = 0; i < (len / 4); i++) {
+ put_unaligned_le32(val[i], usb->data);
+ ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0, offset + i * 4, usb->data,
+ sizeof(__le32));
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val & 0xffff, offset, NULL, 0);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val >> 16, offset + 2, NULL, 0);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+EXPORT_SYMBOL_GPL(mt76u_single_wr);
+
+static int
+mt76u_set_endpoints(struct usb_interface *intf,
+ struct mt76_usb *usb)
+{
+ struct usb_host_interface *intf_desc = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, in_ep = 0, out_ep = 0;
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ in_ep < __MT_EP_IN_MAX) {
+ usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
+ usb->in_max_packet = usb_endpoint_maxp(ep_desc);
+ in_ep++;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ out_ep < __MT_EP_OUT_MAX) {
+ usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
+ usb->out_max_packet = usb_endpoint_maxp(ep_desc);
+ out_ep++;
+ }
+ }
+
+ if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen)
+{
+ struct urb *urb = buf->urb;
+ int i;
+
+ for (i = 0; i < nsgs; i++) {
+ struct page *page;
+ void *data;
+ int offset;
+
+ data = netdev_alloc_frag(len);
+ if (!data)
+ break;
+
+ page = virt_to_head_page(data);
+ offset = data - page_address(page);
+ sg_set_page(&urb->sg[i], page, sglen, offset);
+ }
+
+ if (i < nsgs) {
+ int j;
+
+ for (j = nsgs; j < urb->num_sgs; j++)
+ skb_free_frag(sg_virt(&urb->sg[j]));
+ urb->num_sgs = i;
+ }
+
+ urb->num_sgs = max_t(int, i, urb->num_sgs);
+ buf->len = urb->num_sgs * sglen,
+ sg_init_marker(urb->sg, urb->num_sgs);
+
+ return i ? : -ENOMEM;
+}
+
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen, gfp_t gfp)
+{
+ buf->urb = usb_alloc_urb(0, gfp);
+ if (!buf->urb)
+ return -ENOMEM;
+
+ buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+ gfp);
+ if (!buf->urb->sg)
+ return -ENOMEM;
+
+ sg_init_table(buf->urb->sg, nsgs);
+ buf->dev = dev;
+
+ return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
+
+void mt76u_buf_free(struct mt76u_buf *buf)
+{
+ struct urb *urb = buf->urb;
+ int i;
+
+ for (i = 0; i < urb->num_sgs; i++)
+ skb_free_frag(sg_virt(&urb->sg[i]));
+ usb_free_urb(buf->urb);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_free);
+
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
+ else
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
+
+ usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
+ complete_fn, context);
+
+ return usb_submit_urb(buf->urb, gfp);
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_buf);
+
+static inline struct mt76u_buf
+*mt76u_get_next_rx_entry(struct mt76_queue *q)
+{
+ struct mt76u_buf *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (q->queued > 0) {
+ buf = &q->entry[q->head].ubuf;
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return buf;
+}
+
+static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+{
+ u16 dma_len, min_len;
+
+ dma_len = get_unaligned_le16(data);
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
+ MT_FCE_INFO_LEN;
+
+ if (data_len < min_len || WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return -EINVAL;
+ return dma_len;
+}
+
+static int
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ u8 *data = sg_virt(&urb->sg[0]);
+ int data_len, len, nsgs = 1;
+ struct sk_buff *skb;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+ return 0;
+
+ len = mt76u_get_rx_entry_len(data, urb->actual_length);
+ if (len < 0)
+ return 0;
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb)
+ return 0;
+
+ data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ if (skb->tail + data_len > skb->end) {
+ dev_kfree_skb(skb);
+ return 1;
+ }
+
+ __skb_put(skb, data_len);
+ len -= data_len;
+
+ while (len > 0) {
+ data_len = min_t(int, len, urb->sg[nsgs].length);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ sg_page(&urb->sg[nsgs]),
+ urb->sg[nsgs].offset,
+ data_len, q->buf_size);
+ len -= data_len;
+ nsgs++;
+ }
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
+
+ return nsgs;
+}
+
+static void mt76u_complete_rx(struct urb *urb)
+{
+ struct mt76_dev *dev = urb->context;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
+ goto out;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued++;
+ tasklet_schedule(&dev->usb.rx_tasklet);
+out:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static void mt76u_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int err, nsgs, buf_len = q->buf_size;
+ struct mt76u_buf *buf;
+
+ rcu_read_lock();
+
+ while (true) {
+ buf = mt76u_get_next_rx_entry(q);
+ if (!buf)
+ break;
+
+ nsgs = mt76u_process_rx_entry(dev, buf->urb);
+ if (nsgs > 0) {
+ err = mt76u_fill_rx_sg(dev, buf, nsgs,
+ buf_len,
+ SKB_WITH_OVERHEAD(buf_len));
+ if (err < 0)
+ break;
+ }
+ mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ buf, GFP_ATOMIC,
+ mt76u_complete_rx, dev);
+ }
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+
+ rcu_read_unlock();
+}
+
+int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+ int i, err = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+ for (i = 0; i < q->ndesc; i++) {
+ err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ &q->entry[i].ubuf, GFP_ATOMIC,
+ mt76u_complete_rx, dev);
+ if (err < 0)
+ break;
+ }
+ q->head = q->tail = 0;
+ q->queued = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
+
+static int mt76u_alloc_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i, err, nsgs;
+
+ spin_lock_init(&q->lock);
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ if (mt76u_check_sg(dev)) {
+ q->buf_size = MT_RX_BUF_SIZE;
+ nsgs = MT_SG_MAX_SIZE;
+ } else {
+ q->buf_size = PAGE_SIZE;
+ nsgs = 1;
+ }
+
+ for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
+ err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
+ nsgs, q->buf_size,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+ }
+ q->ndesc = MT_NUM_RX_ENTRIES;
+
+ return mt76u_submit_rx_buffers(dev);
+}
+
+static void mt76u_free_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ mt76u_buf_free(&q->entry[i].ubuf);
+}
+
+static void mt76u_stop_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ usb_kill_urb(q->entry[i].ubuf.urb);
+}
+
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+{
+ struct sk_buff *iter, *last = skb;
+ u32 info, pad;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+ info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ skb_walk_frags(skb, iter) {
+ last = iter;
+ if (!iter->next) {
+ skb->data_len += pad;
+ skb->len += pad;
+ break;
+ }
+ }
+
+ if (unlikely(pad)) {
+ if (__skb_pad(last, pad, true))
+ return -ENOMEM;
+ __skb_put(last, pad);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
+
+static void mt76u_tx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76u_buf *buf;
+ struct mt76_queue *q;
+ bool wake;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+
+ spin_lock_bh(&q->lock);
+ while (true) {
+ buf = &q->entry[q->head].ubuf;
+ if (!buf->done || !q->queued)
+ break;
+
+ dev->drv->tx_complete_skb(dev, q,
+ &q->entry[q->head],
+ false);
+
+ if (q->entry[q->head].schedule) {
+ q->entry[q->head].schedule = false;
+ q->swq_queued--;
+ }
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+ }
+ mt76_txq_schedule(dev, q);
+ wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ if (!q->queued)
+ wake_up(&dev->tx_wait);
+
+ spin_unlock_bh(&q->lock);
+
+ if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
+ ieee80211_queue_delayed_work(dev->hw,
+ &dev->usb.stat_work,
+ msecs_to_jiffies(10));
+
+ if (wake)
+ ieee80211_wake_queue(dev->hw, i);
+ }
+}
+
+static void mt76u_tx_status_data(struct work_struct *work)
+{
+ struct mt76_usb *usb;
+ struct mt76_dev *dev;
+ u8 update = 1;
+ u16 count = 0;
+
+ usb = container_of(work, struct mt76_usb, stat_work.work);
+ dev = container_of(usb, struct mt76_dev, usb);
+
+ while (true) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ break;
+
+ if (!dev->drv->tx_status_data(dev, &update))
+ break;
+ count++;
+ }
+
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
+ ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
+ msecs_to_jiffies(10));
+ else
+ clear_bit(MT76_READING_STATS, &dev->state);
+}
+
+static void mt76u_complete_tx(struct urb *urb)
+{
+ struct mt76u_buf *buf = urb->context;
+ struct mt76_dev *dev = buf->dev;
+
+ if (mt76u_urb_error(urb))
+ dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
+ buf->done = true;
+
+ tasklet_schedule(&dev->usb.tx_tasklet);
+}
+
+static int
+mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
+{
+ int nsgs = 1 + skb_shinfo(skb)->nr_frags;
+ struct sk_buff *iter;
+
+ skb_walk_frags(skb, iter)
+ nsgs += 1 + skb_shinfo(iter)->nr_frags;
+
+ memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
+
+ nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
+ sg_init_marker(urb->sg, nsgs);
+ urb->num_sgs = nsgs;
+
+ return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
+}
+
+static int
+mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ u8 ep = q2ep(q->hw_idx);
+ struct mt76u_buf *buf;
+ u16 idx = q->tail;
+ unsigned int pipe;
+ int err;
+
+ if (q->queued == q->ndesc)
+ return -ENOSPC;
+
+ err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
+ if (err < 0)
+ return err;
+
+ buf = &q->entry[idx].ubuf;
+ buf->done = false;
+
+ err = mt76u_tx_build_sg(skb, buf->urb);
+ if (err < 0)
+ return err;
+
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
+ usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
+ mt76u_complete_tx, buf);
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->entry[idx].skb = skb;
+ q->queued++;
+
+ return idx;
+}
+
+static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct mt76u_buf *buf;
+ int err;
+
+ while (q->first != q->tail) {
+ buf = &q->entry[q->first].ubuf;
+ err = usb_submit_urb(buf->urb, GFP_ATOMIC);
+ if (err < 0) {
+ if (err == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ else
+ dev_err(dev->dev, "tx urb submit failed:%d\n",
+ err);
+ break;
+ }
+ q->first = (q->first + 1) % q->ndesc;
+ }
+}
+
+static int mt76u_alloc_tx(struct mt76_dev *dev)
+{
+ struct mt76u_buf *buf;
+ struct mt76_queue *q;
+ size_t size;
+ int i, j;
+
+ size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->swq);
+ q->hw_idx = q2hwq(i);
+
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->ndesc = MT_NUM_TX_ENTRIES;
+ for (j = 0; j < q->ndesc; j++) {
+ buf = &q->entry[j].ubuf;
+ buf->dev = dev;
+
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!buf->urb)
+ return -ENOMEM;
+
+ buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+ if (!buf->urb->sg)
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void mt76u_free_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_free_urb(q->entry[j].ubuf.urb);
+ }
+}
+
+static void mt76u_stop_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_kill_urb(q->entry[j].ubuf.urb);
+ }
+}
+
+void mt76u_stop_queues(struct mt76_dev *dev)
+{
+ tasklet_disable(&dev->usb.rx_tasklet);
+ tasklet_disable(&dev->usb.tx_tasklet);
+
+ mt76u_stop_rx(dev);
+ mt76u_stop_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_queues);
+
+void mt76u_stop_stat_wk(struct mt76_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->usb.stat_work);
+ clear_bit(MT76_READING_STATS, &dev->state);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
+
+void mt76u_queues_deinit(struct mt76_dev *dev)
+{
+ mt76u_stop_queues(dev);
+
+ mt76u_free_rx(dev);
+ mt76u_free_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
+
+int mt76u_alloc_queues(struct mt76_dev *dev)
+{
+ int err;
+
+ err = mt76u_alloc_rx(dev);
+ if (err < 0)
+ goto err;
+
+ err = mt76u_alloc_tx(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+err:
+ mt76u_queues_deinit(dev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
+
+static const struct mt76_queue_ops usb_queue_ops = {
+ .tx_queue_skb = mt76u_tx_queue_skb,
+ .kick = mt76u_tx_kick,
+};
+
+int mt76u_init(struct mt76_dev *dev,
+ struct usb_interface *intf)
+{
+ static const struct mt76_bus_ops mt76u_ops = {
+ .rr = mt76u_rr,
+ .wr = mt76u_wr,
+ .rmw = mt76u_rmw,
+ .copy = mt76u_copy,
+ };
+ struct mt76_usb *usb = &dev->usb;
+
+ tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
+ tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
+ INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
+ skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+
+ init_completion(&usb->mcu.cmpl);
+ mutex_init(&usb->mcu.mutex);
+
+ mutex_init(&usb->usb_ctrl_mtx);
+ dev->bus = &mt76u_ops;
+ dev->queue_ops = &usb_queue_ops;
+
+ return mt76u_set_endpoints(intf, usb);
+}
+EXPORT_SYMBOL_GPL(mt76u_init);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
new file mode 100644
index 000000000000..070be803d463
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76.h"
+#include "dma.h"
+
+#define MT_CMD_HDR_LEN 4
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MT_CMD_HDR_LEN);
+ skb_put_data(skb, data, len);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc);
+
+void mt76u_mcu_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
+
+static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
+{
+ struct mt76_usb *usb = &dev->usb;
+ struct mt76u_buf *buf = &usb->mcu.res;
+ int i, ret;
+ u32 rxfce;
+
+ for (i = 0; i < 5; i++) {
+ if (!wait_for_completion_timeout(&usb->mcu.cmpl,
+ msecs_to_jiffies(300)))
+ continue;
+
+ if (buf->urb->status)
+ return -EIO;
+
+ rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0]));
+ ret = mt76u_submit_buf(dev, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (ret)
+ return ret;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce))
+ return 0;
+
+ dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
+ FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->dev, "error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76_usb *usb = &dev->usb;
+ unsigned int pipe;
+ int ret, sent;
+ u8 seq = 0;
+ u32 info;
+
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return 0;
+
+ mutex_lock(&usb->mcu.mutex);
+
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+ if (wait_resp) {
+ seq = ++usb->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++usb->mcu.msg_seq & 0xf;
+ }
+
+ info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ MT_MCU_MSG_TYPE_CMD;
+ ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info);
+ if (ret)
+ goto out;
+
+ ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt76u_mcu_wait_resp(dev, seq);
+
+out:
+ mutex_unlock(&usb->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg);
+
+void mt76u_mcu_fw_reset(struct mt76_dev *dev)
+{
+ mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x1, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset);
+
+static int
+__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
+ const void *fw_data, int len, u32 dst_addr)
+{
+ u8 *data = sg_virt(&buf->urb->sg[0]);
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ __le32 info;
+ u32 val;
+ int err;
+
+ info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, len) |
+ MT_MCU_MSG_TYPE_CMD);
+
+ memcpy(data, &info, sizeof(info));
+ memcpy(data + sizeof(info), fw_data, len);
+ memset(data + sizeof(info) + len, 0, 4);
+
+ mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ len = roundup(len, 4);
+ mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+
+ buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
+ err = mt76u_submit_buf(dev, USB_DIR_OUT,
+ MT_EP_OUT_INBAND_CMD,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb, &cmpl);
+ if (err < 0)
+ return err;
+
+ if (!wait_for_completion_timeout(&cmpl,
+ msecs_to_jiffies(1000))) {
+ dev_err(dev->dev, "firmware upload timed out\n");
+ usb_kill_urb(buf->urb);
+ return -ETIMEDOUT;
+ }
+
+ if (mt76u_urb_error(buf->urb)) {
+ dev_err(dev->dev, "firmware upload failed: %d\n",
+ buf->urb->status);
+ return buf->urb->status;
+ }
+
+ val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ return 0;
+}
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset)
+{
+ int err, len, pos = 0, max_len = max_payload - 8;
+ struct mt76u_buf buf;
+
+ err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ while (data_len > 0) {
+ len = min_t(int, data_len, max_len);
+ err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos,
+ len, offset + pos);
+ if (err < 0)
+ break;
+
+ data_len -= len;
+ pos += len;
+ usleep_range(5000, 10000);
+ }
+ mt76u_buf_free(&buf);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data);
+
+int mt76u_mcu_init_rx(struct mt76_dev *dev)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int err;
+
+ err = mt76u_buf_alloc(dev, &usb->mcu.res, 1,
+ MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &usb->mcu.res, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (err < 0)
+ mt76u_buf_free(&usb->mcu.res);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.c b/drivers/net/wireless/mediatek/mt76/usb_trace.c
new file mode 100644
index 000000000000..7e1f540f0b7a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "usb_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h
new file mode 100644
index 000000000000..52db7012304a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_USB_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76_usb
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT " %04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_rr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE usb_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index d3b611aaf061..faea99b7a445 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -603,6 +603,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev)
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 7b21016012c3..0f1789020960 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -308,6 +308,17 @@ mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int ret;
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (cmd == SET_KEY) {
key->hw_key_idx = wcid->idx;
wcid->hw_key_idx = idx;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index ae0ca8006849..4aa332f4646b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -843,6 +843,88 @@ static int qtnf_set_mac_acl(struct wiphy *wiphy,
return ret;
}
+static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
+ QLINK_PM_OFF, timeout);
+ if (ret) {
+ pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret = 0;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (!wowlan) {
+ pr_debug("WoWLAN triggers are not enabled\n");
+ qtnf_virtual_intf_cleanup(vif->netdev);
+ goto exit;
+ }
+
+ qtnf_scan_done(vif->mac, true);
+
+ ret = qtnf_cmd_send_wowlan_set(vif, wowlan);
+ if (ret) {
+ pr_err("MAC%u: failed to set WoWLAN triggers\n",
+ mac->macid);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int qtnf_resume(struct wiphy *wiphy)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret = 0;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = qtnf_cmd_send_wowlan_set(vif, NULL);
+ if (ret) {
+ pr_err("MAC%u: failed to reset WoWLAN triggers\n",
+ mac->macid);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static void qtnf_set_wakeup(struct wiphy *wiphy, bool enabled)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_bus *bus = mac->bus;
+
+ device_set_wakeup_enable(bus->dev, enabled);
+}
+#endif
+
static struct cfg80211_ops qtn_cfg80211_ops = {
.add_virtual_intf = qtnf_add_virtual_intf,
.change_virtual_intf = qtnf_change_virtual_intf,
@@ -869,6 +951,12 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.channel_switch = qtnf_channel_switch,
.start_radar_detection = qtnf_start_radar_detection,
.set_mac_acl = qtnf_set_mac_acl,
+ .set_power_mgmt = qtnf_set_power_mgmt,
+#ifdef CONFIG_PM
+ .suspend = qtnf_suspend,
+ .resume = qtnf_resume,
+ .set_wakeup = qtnf_set_wakeup,
+#endif
};
static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -921,6 +1009,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
qtn_cfg80211_ops.start_radar_detection = NULL;
+ if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
+ qtn_cfg80211_ops.set_power_mgmt = NULL;
+
wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
if (!wiphy)
return NULL;
@@ -975,7 +1066,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->retry_long = macinfo->lretry_limit;
wiphy->coverage_class = macinfo->coverage_class;
- wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH;
+ wiphy->max_scan_ssids =
+ (hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1;
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
@@ -994,6 +1086,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
@@ -1013,6 +1106,14 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (hw_info->hw_capab & QLINK_HW_CAPAB_STA_INACT_TIMEOUT)
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
+ if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
+ wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+
+#ifdef CONFIG_PM
+ if (macinfo->wowlan)
+ wiphy->wowlan = macinfo->wowlan;
+#endif
+
if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
REGULATORY_CUSTOM_REG;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index c5d94a95e21a..ae9e77300533 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -640,83 +640,83 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo,
return;
if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME);
sinfo->inactive_time = le32_to_cpu(stats->inactive_time);
}
if (qtnf_sta_stat_avail(connected_time,
QLINK_STA_INFO_CONNECTED_TIME)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME);
sinfo->connected_time = le32_to_cpu(stats->connected_time);
}
if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = stats->signal - QLINK_RSSI_OFFSET;
}
if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET;
}
if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate);
}
if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate);
}
if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS);
qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags);
}
if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
}
if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES);
sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
}
if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
}
if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
}
if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
sinfo->rx_packets = le32_to_cpu(stats->rx_packets);
}
if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_packets = le32_to_cpu(stats->tx_packets);
}
if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon);
}
if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc);
}
if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->tx_failed = le32_to_cpu(stats->tx_failed);
}
@@ -1092,6 +1092,9 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
case QTN_TLV_ID_UBOOT_VER:
uboot_ver = (const void *)tlv->val;
break;
+ case QTN_TLV_ID_MAX_SCAN_SSIDS:
+ hwinfo->max_scan_ssids = *tlv->val;
+ break;
default:
break;
}
@@ -1135,6 +1138,37 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
return 0;
}
+static void
+qtnf_parse_wowlan_info(struct qtnf_wmac *mac,
+ const struct qlink_wowlan_capab_data *wowlan)
+{
+ struct qtnf_mac_info *mac_info = &mac->macinfo;
+ const struct qlink_wowlan_support *data1;
+ struct wiphy_wowlan_support *supp;
+
+ supp = kzalloc(sizeof(*supp), GFP_KERNEL);
+ if (!supp)
+ return;
+
+ switch (le16_to_cpu(wowlan->version)) {
+ case 0x1:
+ data1 = (struct qlink_wowlan_support *)wowlan->data;
+
+ supp->flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT;
+ supp->n_patterns = le32_to_cpu(data1->n_patterns);
+ supp->pattern_max_len = le32_to_cpu(data1->pattern_max_len);
+ supp->pattern_min_len = le32_to_cpu(data1->pattern_min_len);
+
+ mac_info->wowlan = supp;
+ break;
+ default:
+ pr_warn("MAC%u: unsupported WoWLAN version 0x%x\n",
+ mac->macid, le16_to_cpu(wowlan->version));
+ kfree(supp);
+ break;
+ }
+}
+
static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const u8 *tlv_buf, size_t tlv_buf_size)
{
@@ -1144,6 +1178,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const struct qlink_iface_comb_num *comb_num;
const struct qlink_iface_limit_record *rec;
const struct qlink_iface_limit *lim;
+ const struct qlink_wowlan_capab_data *wowlan;
u16 rec_len;
u16 tlv_type;
u16 tlv_value_len;
@@ -1252,7 +1287,31 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
ext_capa_mask = (u8 *)tlv->val;
ext_capa_mask_len = tlv_value_len;
break;
+ case QTN_TLV_ID_WOWLAN_CAPAB:
+ if (tlv_value_len < sizeof(*wowlan))
+ return -EINVAL;
+
+ wowlan = (void *)tlv->val;
+ if (!le16_to_cpu(wowlan->len)) {
+ pr_warn("MAC%u: skip empty WoWLAN data\n",
+ mac->macid);
+ break;
+ }
+
+ rec_len = sizeof(*wowlan) + le16_to_cpu(wowlan->len);
+ if (unlikely(tlv_value_len != rec_len)) {
+ pr_warn("MAC%u: WoWLAN data size mismatch\n",
+ mac->macid);
+ return -EINVAL;
+ }
+
+ kfree(mac->macinfo.wowlan);
+ mac->macinfo.wowlan = NULL;
+ qtnf_parse_wowlan_info(mac, wowlan);
+ break;
default:
+ pr_warn("MAC%u: unknown TLV type %u\n",
+ mac->macid, tlv_type);
break;
}
@@ -2234,6 +2293,22 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
qchan->chan.flags = cpu_to_le32(flags);
}
+static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
+ const u8 *mac_addr,
+ const u8 *mac_addr_mask)
+{
+ struct qlink_random_mac_addr *randmac;
+ struct qlink_tlv_hdr *hdr =
+ skb_put(cmd_skb, sizeof(*hdr) + sizeof(*randmac));
+
+ hdr->type = cpu_to_le16(QTN_TLV_ID_RANDOM_MAC_ADDR);
+ hdr->len = cpu_to_le16(sizeof(*randmac));
+ randmac = (struct qlink_random_mac_addr *)hdr->val;
+
+ memcpy(randmac->mac_addr, mac_addr, ETH_ALEN);
+ memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN);
+}
+
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb;
@@ -2244,11 +2319,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
int count = 0;
int ret;
- if (scan_req->n_ssids > QTNF_MAX_SSID_LIST_LENGTH) {
- pr_err("MAC%u: too many SSIDs in scan request\n", mac->macid);
- return -EINVAL;
- }
-
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_SCAN,
sizeof(struct qlink_cmd));
@@ -2291,6 +2361,15 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
}
}
+ if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n",
+ mac->macid,
+ scan_req->mac_addr, scan_req->mac_addr_mask);
+
+ qtnf_cmd_randmac_tlv_add(cmd_skb, scan_req->mac_addr,
+ scan_req->mac_addr_mask);
+ }
+
ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
if (unlikely(ret))
@@ -2774,3 +2853,93 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
return ret;
}
+
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ struct qlink_cmd_pm_set *cmd;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_PM_SET, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_pm_set *)cmd_skb->data;
+ cmd->pm_mode = pm_mode;
+ cmd->pm_standby_timer = cpu_to_le32(timeout);
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ }
+
+out:
+ qtnf_bus_unlock(bus);
+ return ret;
+}
+
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+ const struct cfg80211_wowlan *wowl)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ struct qlink_cmd_wowlan_set *cmd;
+ u32 triggers = 0;
+ int count = 0;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_WOWLAN_SET, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ qtnf_bus_lock(bus);
+
+ cmd = (struct qlink_cmd_wowlan_set *)cmd_skb->data;
+
+ if (wowl) {
+ if (wowl->disconnect)
+ triggers |= QLINK_WOWLAN_TRIG_DISCONNECT;
+
+ if (wowl->magic_pkt)
+ triggers |= QLINK_WOWLAN_TRIG_MAGIC_PKT;
+
+ if (wowl->n_patterns && wowl->patterns) {
+ triggers |= QLINK_WOWLAN_TRIG_PATTERN_PKT;
+ while (count < wowl->n_patterns) {
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb,
+ QTN_TLV_ID_WOWLAN_PATTERN,
+ wowl->patterns[count].pattern,
+ wowl->patterns[count].pattern_len);
+ count++;
+ }
+ }
+ }
+
+ cmd->triggers = cpu_to_le32(triggers);
+
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ }
+
+out:
+ qtnf_bus_unlock(bus);
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index cf9274add26d..1ac41156c192 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -76,5 +76,8 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
u32 cac_time_ms);
int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
const struct cfg80211_acl_data *params);
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout);
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+ const struct cfg80211_wowlan *wowl);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index a6a450984f9a..19abbc4e23e0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -179,6 +179,30 @@ static void qtnf_netdev_tx_timeout(struct net_device *ndev)
}
}
+static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct sockaddr *sa = addr;
+ int ret;
+ unsigned char old_addr[ETH_ALEN];
+
+ memcpy(old_addr, sa->sa_data, sizeof(old_addr));
+
+ ret = eth_mac_addr(ndev, sa);
+ if (ret)
+ return ret;
+
+ qtnf_scan_done(vif->mac, true);
+
+ ret = qtnf_cmd_send_change_intf_type(vif, vif->wdev.iftype,
+ sa->sa_data);
+
+ if (ret)
+ memcpy(ndev->dev_addr, old_addr, ETH_ALEN);
+
+ return ret;
+}
+
/* Network device ops handlers */
const struct net_device_ops qtnf_netdev_ops = {
.ndo_open = qtnf_netdev_open,
@@ -186,6 +210,7 @@ const struct net_device_ops qtnf_netdev_ops = {
.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
.ndo_get_stats64 = qtnf_netdev_get_stats64,
+ .ndo_set_mac_address = qtnf_netdev_set_mac_address,
};
static int qtnf_mac_init_single_band(struct wiphy *wiphy,
@@ -470,6 +495,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
qtnf_mac_iface_comb_free(mac);
kfree(mac->macinfo.extended_capabilities);
kfree(mac->macinfo.extended_capabilities_mask);
+ kfree(mac->macinfo.wowlan);
wiphy_free(wiphy);
bus->mac[macid] = NULL;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 214435448335..a1e338a1f055 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -40,7 +40,6 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
-#define QTNF_MAX_SSID_LIST_LENGTH 2
#define QTNF_MAX_VSIE_LEN 255
#define QTNF_MAX_INTF 8
#define QTNF_MAX_EVENT_QUEUE_LEN 255
@@ -111,6 +110,7 @@ struct qtnf_mac_info {
u8 *extended_capabilities;
u8 *extended_capabilities_mask;
u8 extended_capabilities_len;
+ struct wiphy_wowlan_support *wowlan;
};
struct qtnf_chan_stats {
@@ -145,6 +145,7 @@ struct qtnf_hw_info {
u8 total_rx_chain;
char fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
+ u8 max_scan_ssids;
};
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index f85deda703fb..99d37e3efba6 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -69,11 +69,15 @@ struct qlink_msg_header {
* associated STAs due to inactivity. Inactivity timeout period is taken
* from QLINK_CMD_START_AP parameters.
* @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality
+ * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address
+ * Randomization in probe requests.
*/
enum qlink_hw_capab {
QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
+ QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
+ QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
};
enum qlink_iface_type {
@@ -253,6 +257,8 @@ enum qlink_cmd_type {
QLINK_CMD_CHAN_STATS = 0x0054,
QLINK_CMD_CONNECT = 0x0060,
QLINK_CMD_DISCONNECT = 0x0061,
+ QLINK_CMD_PM_SET = 0x0062,
+ QLINK_CMD_WOWLAN_SET = 0x0063,
};
/**
@@ -665,6 +671,54 @@ struct qlink_acl_data {
struct qlink_mac_address mac_addrs[0];
} __packed;
+/**
+ * enum qlink_pm_mode - Power Management mode
+ *
+ * @QLINK_PM_OFF: normal mode, no power saving enabled
+ * @QLINK_PM_AUTO_STANDBY: enable auto power save mode
+ */
+enum qlink_pm_mode {
+ QLINK_PM_OFF = 0,
+ QLINK_PM_AUTO_STANDBY = 1,
+};
+
+/**
+ * struct qlink_cmd_pm_set - data for QLINK_CMD_PM_SET command
+ *
+ * @pm_standby timer: period of network inactivity in seconds before
+ * putting a radio in power save mode
+ * @pm_mode: power management mode
+ */
+struct qlink_cmd_pm_set {
+ struct qlink_cmd chdr;
+ __le32 pm_standby_timer;
+ u8 pm_mode;
+} __packed;
+
+/**
+ * enum qlink_wowlan_trigger
+ *
+ * @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect
+ * @QLINK_WOWLAN_TRIG_MAGIC_PKT: wakeup on magic packet
+ * @QLINK_WOWLAN_TRIG_PATTERN_PKT: wakeup on user-defined packet
+ */
+enum qlink_wowlan_trigger {
+ QLINK_WOWLAN_TRIG_DISCONNECT = BIT(0),
+ QLINK_WOWLAN_TRIG_MAGIC_PKT = BIT(1),
+ QLINK_WOWLAN_TRIG_PATTERN_PKT = BIT(2),
+};
+
+/**
+ * struct qlink_cmd_wowlan_set - data for QLINK_CMD_WOWLAN_SET command
+ *
+ * @triggers: requested bitmask of WoWLAN triggers
+ */
+struct qlink_cmd_wowlan_set {
+ struct qlink_cmd chdr;
+ __le32 triggers;
+ u8 data[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -1062,6 +1116,8 @@ struct qlink_event_radar {
* @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
* &struct qlink_sta_stats. Valid values are marked as such in a bitmap
* carried by QTN_TLV_ID_STA_STATS_MAP.
+ * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
+ * for in any given scan.
*/
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
@@ -1089,6 +1145,10 @@ enum qlink_tlv_id {
QTN_TLV_ID_HW_ID = 0x0405,
QTN_TLV_ID_CALIBRATION_VER = 0x0406,
QTN_TLV_ID_UBOOT_VER = 0x0407,
+ QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408,
+ QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
+ QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
+ QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
};
struct qlink_tlv_hdr {
@@ -1360,4 +1420,49 @@ struct qlink_sta_stats {
u8 rsvd[1];
};
+/**
+ * struct qlink_random_mac_addr - data for QTN_TLV_ID_RANDOM_MAC_ADDR TLV
+ *
+ * Specifies MAC address mask/value for generation random MAC address
+ * during scan.
+ *
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
+ */
+struct qlink_random_mac_addr {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_wowlan_capab_data - data for QTN_TLV_ID_WOWLAN_CAPAB TLV
+ *
+ * WoWLAN capabilities supported by cards.
+ *
+ * @version: version of WoWLAN data structure, to ensure backward
+ * compatibility for firmwares with limited WoWLAN support
+ * @len: Total length of WoWLAN data
+ * @data: supported WoWLAN features
+ */
+struct qlink_wowlan_capab_data {
+ __le16 version;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+/**
+ * struct qlink_wowlan_support - supported WoWLAN capabilities
+ *
+ * @n_patterns: number of supported wakeup patterns
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ */
+struct qlink_wowlan_support {
+ __le32 n_patterns;
+ __le32 pattern_max_len;
+ __le32 pattern_min_len;
+} __packed;
+
#endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index c380c1f56ba6..fa2fd64084ac 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -527,24 +527,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
-int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_add);
-
-int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
-
void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h
index bc0ca5f58f38..283e2e607bba 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h
@@ -28,12 +28,6 @@
#include <linux/pci.h>
/*
- * This variable should be used with the
- * pci_driver structure initialization.
- */
-#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
-
-/*
* PCI driver handlers.
*/
int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index a7e0a17aa7e8..08c607c031bc 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -470,7 +470,6 @@ static inline struct rcs __iomem *rcs_base(ray_dev_t *dev)
static int ray_init(struct net_device *dev)
{
int i;
- UCHAR *p;
struct ccs __iomem *pccs;
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
@@ -513,12 +512,9 @@ static int ray_init(struct net_device *dev)
init_startup_params(local);
/* copy mac address to startup parameters */
- if (parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
- p = local->sparm.b4.a_mac_addr;
- } else {
+ if (!parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
memcpy(&local->sparm.b4.a_mac_addr,
&local->startup_res.station_addr, ADDRLEN);
- p = local->sparm.b4.a_mac_addr;
}
clear_interrupt(local); /* Clear any interrupt from the card */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
index fde89866fa8d..51e32df6120b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
@@ -363,7 +363,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
- rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221);
+ rtl8187se_rf_writereg(dev, 0x02, 0x088D); msleep(221);
rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
@@ -386,7 +386,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
- rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0975); msleep(31);
rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 54c9f6ab0c8c..f4122c8fdd97 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1907,7 +1907,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes =
- (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF);
+ (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
}
EXPORT_SYMBOL(rtl_rx_ampdu_apply);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index df3facc8e5a4..6597f7cb3411 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -2818,23 +2818,11 @@ static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
@@ -2949,23 +2937,11 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3008,23 +2984,11 @@ static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3071,23 +3035,11 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3121,23 +3073,11 @@ static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3189,23 +3129,11 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, false,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
@@ -3264,23 +3192,11 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
@@ -3336,23 +3252,11 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
@@ -3436,23 +3340,11 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, true, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- } else {
- btc8723b2ant_sw_mechanism(btcoexist, false, true,
- false, false);
- }
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 9935bd09db1f..51e4e92d95a0 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2480,7 +2480,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len);
if (ret == 0) {
sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
len = sizeof(rssi);
@@ -2488,7 +2488,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
&rssi, &len);
if (ret == 0) {
sinfo->signal = level_to_qual(le32_to_cpu(rssi));
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
}
@@ -2928,6 +2928,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
while (buflen >= sizeof(*auth_req)) {
auth_req = (void *)buf;
+ if (buflen < le32_to_cpu(auth_req->length))
+ return;
type = "unknown";
flags = le32_to_cpu(auth_req->flags);
pairwise_error = false;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 0761e61591bd..01edf960ff3c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -26,6 +26,9 @@ static struct ta_metadata metadata_flash_content[] = {
{"flash_content", 0x00010000},
{"rsi/rs9113_wlan_qspi.rps", 0x00010000},
{"rsi/rs9113_wlan_bt_dual_mode.rps", 0x00010000},
+ {"flash_content", 0x00010000},
+ {"rsi/rs9113_ap_bt_dual_mode.rps", 0x00010000},
+
};
int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
@@ -54,7 +57,6 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_vif *vif;
struct rsi_mgmt_desc *mgmt_desc;
struct skb_info *tx_params;
- struct ieee80211_bss_conf *bss = NULL;
struct rsi_xtended_desc *xtend_desc = NULL;
u8 header_size;
u32 dword_align_bytes = 0;
@@ -88,7 +90,6 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
tx_params->internal_hdr_size = header_size;
memset(&skb->data[0], 0, header_size);
- bss = &vif->bss_conf;
wh = (struct ieee80211_hdr *)&skb->data[header_size];
mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
@@ -145,7 +146,6 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_hdr *wh = NULL;
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
- struct ieee80211_bss_conf *bss;
struct rsi_data_desc *data_desc;
struct rsi_xtended_desc *xtend_desc;
u8 ieee80211_size = MIN_802_11_HDR_LEN;
@@ -156,7 +156,6 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
vif = info->control.vif;
- bss = &vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
@@ -246,7 +245,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
}
}
- data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff);
+ data_desc->mac_flags |= cpu_to_le16(seq_num & 0xfff);
data_desc->qid_tid = ((skb->priority & 0xf) |
((tx_params->tid & 0xf) << 4));
data_desc->sta_id = tx_params->sta_id;
@@ -285,7 +284,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
struct ieee80211_bss_conf *bss;
- struct ieee80211_hdr *wh;
int status = -EINVAL;
u8 header_size;
@@ -301,7 +299,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
bss = &vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
header_size = tx_params->internal_hdr_size;
- wh = (struct ieee80211_hdr *)&skb->data[header_size];
if (((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
@@ -744,13 +741,11 @@ static int ping_pong_write(struct rsi_hw *adapter, u8 cmd, u8 *addr, u32 size)
static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
u32 content_size)
{
- u8 cmd, *temp_flash_content;
+ u8 cmd;
u32 temp_content_size, num_flash, index;
u32 flash_start_address;
int status;
- temp_flash_content = flash_content;
-
if (content_size > MAX_FLASH_FILE_SIZE) {
rsi_dbg(ERR_ZONE,
"%s: Flash Content size is more than 400K %u\n",
@@ -842,7 +837,6 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
const struct firmware *fw_entry = NULL;
u32 regout_val = 0, content_size;
u16 tmp_regout_val = 0;
- u8 *flash_content = NULL;
struct ta_metadata *metadata_p;
int status;
@@ -904,28 +898,22 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
__func__, metadata_p->name);
return status;
}
- flash_content = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
- if (!flash_content) {
- rsi_dbg(ERR_ZONE, "%s: Failed to copy firmware\n", __func__);
- status = -EIO;
- goto fail;
- }
content_size = fw_entry->size;
rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size);
/* Get the firmware version */
common->lmac_ver.ver.info.fw_ver[0] =
- flash_content[LMAC_VER_OFFSET] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET] & 0xFF;
common->lmac_ver.ver.info.fw_ver[1] =
- flash_content[LMAC_VER_OFFSET + 1] & 0xFF;
- common->lmac_ver.major = flash_content[LMAC_VER_OFFSET + 2] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET + 1] & 0xFF;
+ common->lmac_ver.major = fw_entry->data[LMAC_VER_OFFSET + 2] & 0xFF;
common->lmac_ver.release_num =
- flash_content[LMAC_VER_OFFSET + 3] & 0xFF;
- common->lmac_ver.minor = flash_content[LMAC_VER_OFFSET + 4] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET + 3] & 0xFF;
+ common->lmac_ver.minor = fw_entry->data[LMAC_VER_OFFSET + 4] & 0xFF;
common->lmac_ver.patch_num = 0;
rsi_print_version(common);
- status = bl_write_header(adapter, flash_content, content_size);
+ status = bl_write_header(adapter, (u8 *)fw_entry->data, content_size);
if (status) {
rsi_dbg(ERR_ZONE,
"%s: RPS Image header loading failed\n",
@@ -967,7 +955,7 @@ fw_upgrade:
rsi_dbg(INFO_ZONE, "Burn Command Pass.. Upgrading the firmware\n");
- status = auto_fw_upgrade(adapter, flash_content, content_size);
+ status = auto_fw_upgrade(adapter, (u8 *)fw_entry->data, content_size);
if (status == 0) {
rsi_dbg(ERR_ZONE, "Firmware upgradation Done\n");
goto load_image_cmd;
@@ -981,13 +969,11 @@ fw_upgrade:
success:
rsi_dbg(ERR_ZONE, "***** Firmware Loading successful *****\n");
- kfree(flash_content);
release_firmware(fw_entry);
return 0;
fail:
rsi_dbg(ERR_ZONE, "##### Firmware loading failed #####\n");
- kfree(flash_content);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 2ca7464b7fa3..4e510cbe0a89 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -416,7 +416,8 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
/* Get free vap index */
for (i = 0; i < RSI_MAX_VIFS; i++) {
- if (!adapter->vifs[i]) {
+ if (!adapter->vifs[i] ||
+ !memcmp(vif->addr, adapter->vifs[i]->addr, ETH_ALEN)) {
vap_idx = i;
break;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 1485a0c89df2..01d99ed985ee 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -122,7 +122,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
u8 extended_desc)
{
struct ieee80211_tx_info *info;
- struct skb_info *rx_params;
struct sk_buff *skb = NULL;
u8 payload_offset;
struct ieee80211_vif *vif;
@@ -149,10 +148,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
vif = rsi_get_vif(common->priv, wh->addr1);
info = IEEE80211_SKB_CB(skb);
- rx_params = (struct skb_info *)info->driver_data;
- rx_params->rssi = rsi_get_rssi(buffer);
- rx_params->channel = rsi_get_connected_channel(vif);
-
return skb;
}
@@ -336,7 +331,6 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
spin_lock_init(&adapter->ps_lock);
timer_setup(&common->roc_timer, rsi_roc_timeout, 0);
init_completion(&common->wlan_init_completion);
- common->init_done = true;
adapter->device_model = RSI_DEV_9113;
common->oper_mode = oper_mode;
@@ -374,6 +368,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
}
#endif
+ common->init_done = true;
return adapter;
err:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index d0e5937cad6d..1095df7d9573 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -334,20 +334,17 @@ static int rsi_load_radio_caps(struct rsi_common *common)
struct ieee80211_conf *conf = &hw->conf;
if (conf_is_ht40_plus(conf)) {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_LOWER_20_ENABLE;
- radio_caps->radio_info =
- RSI_CMDDESC_LOWER_20_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16(LOWER_20_ENABLE |
+ (LOWER_20_ENABLE >> 12));
} else if (conf_is_ht40_minus(conf)) {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_UPPER_20_ENABLE;
- radio_caps->radio_info =
- RSI_CMDDESC_UPPER_20_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16(UPPER_20_ENABLE |
+ (UPPER_20_ENABLE >> 12));
} else {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_40MHZ;
- radio_caps->radio_info =
- RSI_CMDDESC_FULL_40_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16((BW_40MHZ << 12) |
+ FULL40M_ENABLE);
}
}
}
@@ -749,7 +746,7 @@ int rsi_hal_load_key(struct rsi_common *common,
key_descriptor |= RSI_CIPHER_TKIP;
}
key_descriptor |= RSI_PROTECT_DATA_FRAMES;
- key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK);
+ key_descriptor |= (key_id << RSI_KEY_ID_OFFSET);
rsi_set_len_qno(&set_key->desc_dword0.len_qno,
(frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 416981d99229..5733e440ecaf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1394,10 +1394,7 @@ static const struct dev_pm_ops rsi_pm_ops = {
#endif
static const struct sdio_device_id rsi_dev_table[] = {
- { SDIO_DEVICE(0x303, 0x100) },
- { SDIO_DEVICE(0x041B, 0x0301) },
- { SDIO_DEVICE(0x041B, 0x0201) },
- { SDIO_DEVICE(0x041B, 0x9330) },
+ { SDIO_DEVICE(RSI_SDIO_VID_9113, RSI_SDIO_PID_9113) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 6ce6b754df12..c0a163e40402 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -835,11 +835,7 @@ static int rsi_resume(struct usb_interface *intf)
#endif
static const struct usb_device_id rsi_dev_table[] = {
- { USB_DEVICE(0x0303, 0x0100) },
- { USB_DEVICE(0x041B, 0x0301) },
- { USB_DEVICE(0x041B, 0x0201) },
- { USB_DEVICE(0x041B, 0x9330) },
- { USB_DEVICE(0x1618, 0x9113) },
+ { USB_DEVICE(RSI_USB_VID_9113, RSI_USB_PID_9113) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 14620935c925..359fbdf85739 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -22,7 +22,7 @@
#include "rsi_main.h"
#define MAX_MGMT_PKT_SIZE 512
-#define RSI_NEEDED_HEADROOM 80
+#define RSI_NEEDED_HEADROOM 84
#define RSI_RCV_BUFFER_LEN 2000
#define RSI_11B_MODE 0
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 353dbdf31e75..66dcd2ec9051 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -28,6 +28,9 @@
#include <linux/mmc/sdio_ids.h>
#include "rsi_main.h"
+#define RSI_SDIO_VID_9113 0x041B
+#define RSI_SDIO_PID_9113 0x9330
+
enum sdio_interrupt_type {
BUFFER_FULL = 0x0,
BUFFER_AVAILABLE = 0x2,
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index b6fe79f0a513..5b2eddd1a2ee 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -22,6 +22,9 @@
#include "rsi_main.h"
#include "rsi_common.h"
+#define RSI_USB_VID_9113 0x1618
+#define RSI_USB_PID_9113 0x9113
+
#define USB_INTERNAL_REG_1 0x25000
#define RSI_USB_READY_MAGIC_NUM 0xab
#define FW_STATUS_REG 0x41050012
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 86ccf84ea0c6..597e934c4630 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -20,6 +20,8 @@
*
*/
+#include <linux/pm_runtime.h>
+
#include "../wlcore/debugfs.h"
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
@@ -276,15 +278,18 @@ static ssize_t radar_detection_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl18xx_cmd_radar_detection_debug(wl, channel);
if (ret < 0)
count = ret;
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -315,15 +320,18 @@ static ssize_t dynamic_fw_traces_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl18xx_acx_dynamic_fw_traces(wl);
if (ret < 0)
count = ret;
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -374,9 +382,11 @@ static ssize_t radar_debug_mode_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif_ap(wl, wlvif) {
wlcore_cmd_generic_cfg(wl, wlvif,
@@ -384,7 +394,8 @@ static ssize_t radar_debug_mode_write(struct file *file,
wl->radar_debug_mode, 0);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 3ca9167d6146..7c83915a7c5e 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -31,7 +31,6 @@
#include "wlcore.h"
#include "debug.h"
#include "wl12xx_80211.h"
-#include "ps.h"
#include "hw_ops.h"
int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 761cf8573a80..903968735a74 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
@@ -191,6 +192,12 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
+ goto free_vector;
+ }
+
do {
if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@@ -222,6 +229,9 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
} while (!event);
out:
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
+free_vector:
kfree(events_vector);
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index a2cb408be8aa..aeb74e74698e 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -26,6 +26,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
@@ -65,9 +66,11 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (!wl->plt &&
time_after(jiffies, wl->stats.fw_stats_update +
@@ -76,7 +79,8 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
wl->stats.fw_stats_update = jiffies;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -118,14 +122,18 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value,
return;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
+
return;
+ }
chip_op = arg;
chip_op(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
}
@@ -292,9 +300,11 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* In case we're already in PSM, trigger it again to set new timeout
* immediately without waiting for re-association
@@ -305,7 +315,8 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -359,9 +370,11 @@ static ssize_t forced_ps_write(struct file *file,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* In case we're already in PSM, trigger it again to switch mode
* immediately without waiting for re-association
@@ -374,7 +387,8 @@ static ssize_t forced_ps_write(struct file *file,
wl1271_ps_set_mode(wl, wlvif, ps_mode);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -838,15 +852,18 @@ static ssize_t rx_streaming_interval_write(struct file *file,
wl->conf.rx_streaming.interval = value;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -893,15 +910,18 @@ static ssize_t rx_streaming_always_write(struct file *file,
wl->conf.rx_streaming.always = value;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -940,15 +960,18 @@ static ssize_t beacon_filtering_write(struct file *file,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -1019,16 +1042,19 @@ static ssize_t sleep_auth_write(struct file *file,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_acx_sleep_auth(wl, value);
if (ret < 0)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
@@ -1083,7 +1109,7 @@ static ssize_t dev_mem_read(struct file *file,
* Don't fail if elp_wakeup returns an error, so the device's memory
* could be read even if the FW crashed
*/
- wl1271_ps_elp_wakeup(wl);
+ pm_runtime_get_sync(wl->dev);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1102,7 +1128,8 @@ read_err:
goto part_err;
part_err:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
skip_read:
mutex_unlock(&wl->mutex);
@@ -1164,7 +1191,7 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
* Don't fail if elp_wakeup returns an error, so the device's memory
* could be read even if the FW crashed
*/
- wl1271_ps_elp_wakeup(wl);
+ pm_runtime_get_sync(wl->dev);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1183,7 +1210,8 @@ write_err:
goto part_err;
part_err:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
skip_write:
mutex_unlock(&wl->mutex);
@@ -1247,8 +1275,9 @@ static ssize_t fw_logger_write(struct file *file,
}
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = pm_runtime_get_sync(wl->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
count = ret;
goto out;
}
@@ -1257,7 +1286,8 @@ static ssize_t fw_logger_write(struct file *file,
ret = wl12xx_cmd_config_fwlog(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 3a51ab116e79..89b0d0fade9f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
@@ -43,6 +44,7 @@
#define WL1271_BOOT_RETRIES 3
#define WL1271_SUSPEND_SLEEP 100
+#define WL1271_WAKEUP_TIMEOUT 500
static char *fwlog_param;
static int fwlog_mem_blocks = -1;
@@ -153,9 +155,11 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
if (!wl->conf.rx_streaming.interval)
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
@@ -166,7 +170,8 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -183,16 +188,19 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work)
if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -229,9 +237,11 @@ static void wlcore_rc_update_work(struct work_struct *work)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (ieee80211_vif_is_mesh(vif)) {
ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
@@ -243,7 +253,8 @@ static void wlcore_rc_update_work(struct work_struct *work)
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -539,15 +550,16 @@ static int wlcore_irq_locked(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
while (!done && loopcount--) {
/*
* In order to avoid a race with the hardirq, clear the flag
- * before acknowledging the chip. Since the mutex is held,
- * wl1271_ps_elp_wakeup cannot be called concurrently.
+ * before acknowledging the chip.
*/
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_atomic();
@@ -641,7 +653,8 @@ static int wlcore_irq_locked(struct wl1271 *wl)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
return ret;
@@ -796,8 +809,6 @@ void wl12xx_queue_recovery_work(struct wl1271 *wl)
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
- wl1271_ps_elp_wakeup(wl);
- wlcore_disable_interrupts_nosync(wl);
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
}
@@ -819,6 +830,7 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
{
u32 end_of_log = 0;
+ int error;
if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
return;
@@ -830,8 +842,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
* Do not send a stop fwlog command if the fw is hanged or if
* dbgpins are used (due to some fw bug).
*/
- if (wl1271_ps_elp_wakeup(wl))
+ error = pm_runtime_get_sync(wl->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(wl->dev);
return;
+ }
if (!wl->watchdog_recovery &&
wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
wl12xx_cmd_stop_fwlog(wl);
@@ -919,12 +934,20 @@ static void wl1271_recovery_work(struct work_struct *work)
container_of(work, struct wl1271, recovery_work);
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
+ int error;
mutex_lock(&wl->mutex);
if (wl->state == WLCORE_STATE_OFF || wl->plt)
goto out_unlock;
+ error = pm_runtime_get_sync(wl->dev);
+ if (error < 0) {
+ wl1271_warning("Enable for recovery failed");
+ pm_runtime_put_noidle(wl->dev);
+ }
+ wlcore_disable_interrupts_nosync(wl);
+
if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
wl12xx_read_fwlog_panic(wl);
@@ -958,6 +981,8 @@ static void wl1271_recovery_work(struct work_struct *work)
}
wlcore_op_stop_locked(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
ieee80211_restart_hw(wl->hw);
@@ -978,24 +1003,6 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
}
-static int wlcore_fw_sleep(struct wl1271 *wl)
-{
- int ret;
-
- mutex_lock(&wl->mutex);
- ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
- goto out;
- }
- set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-out:
- mutex_unlock(&wl->mutex);
- mdelay(WL1271_SUSPEND_SLEEP);
-
- return 0;
-}
-
static int wl1271_setup(struct wl1271 *wl)
{
wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
@@ -1184,7 +1191,6 @@ int wl1271_plt_stop(struct wl1271 *wl)
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
- cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
mutex_lock(&wl->mutex);
@@ -1719,6 +1725,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
+ unsigned long flags;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
@@ -1734,8 +1741,9 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = pm_runtime_get_sync(wl->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
mutex_unlock(&wl->mutex);
return ret;
}
@@ -1765,6 +1773,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
goto out_sleep;
out_sleep:
+ pm_runtime_put_noidle(wl->dev);
mutex_unlock(&wl->mutex);
if (ret < 0) {
@@ -1775,21 +1784,7 @@ out_sleep:
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
- /*
- * disable and re-enable interrupts in order to flush
- * the threaded_irq
- */
- wlcore_disable_interrupts(wl);
-
- /*
- * set suspended flag to avoid triggering a new threaded_irq
- * work. no need for spinlock as interrupts are disabled.
- */
- set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
-
- wlcore_enable_interrupts(wl);
flush_work(&wl->tx_work);
- flush_delayed_work(&wl->elp_work);
/*
* Cancel the watchdog even if above tx_flush failed. We will detect
@@ -1798,15 +1793,14 @@ out_sleep:
cancel_delayed_work(&wl->tx_watchdog_work);
/*
- * Use an immediate call for allowing the firmware to go into power
- * save during suspend.
- * Using a workque for this last write was only hapenning on resume
- * leaving the firmware with power save disabled during suspend,
- * while consuming full power during wowlan suspend.
+ * set suspended flag to avoid triggering a new threaded_irq
+ * work.
*/
- wlcore_fw_sleep(wl);
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
- return 0;
+ return pm_runtime_force_suspend(wl->dev);
}
static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
@@ -1821,6 +1815,12 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
wl->wow_enabled);
WARN_ON(!wl->wow_enabled);
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+ goto out_sleep;
+ }
+
/*
* re-enable irq_work enqueuing, and call irq_work directly if
* there is a pending work.
@@ -1857,9 +1857,11 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
goto out_sleep;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -1878,7 +1880,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
wl->wow_enabled = false;
@@ -1945,7 +1948,6 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
- cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
/* let's notify MAC80211 about the remaining pending TX frames */
@@ -2060,13 +2062,16 @@ static void wlcore_channel_switch_work(struct work_struct *work)
vif = wl12xx_wlvif_to_vif(wlvif);
ieee80211_chswitch_done(vif, false);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_cmd_stop_channel_switch(wl, wlvif);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -2128,14 +2133,17 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work)
if (!time_after(time_spare, wlvif->pending_auth_reply_time))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* cancel the ROC if active */
wlcore_update_inconn_sta(wl, wlvif, NULL, false);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -2537,9 +2545,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
wl12xx_get_vif_count(hw, vif, &vif_count);
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out_unlock;
/*
* in some very corner case HW recovery scenarios its possible to
@@ -2568,14 +2573,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl12xx_need_fw_change(wl, vif_count, true)) {
- wl12xx_force_active_psm(wl);
- set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
- mutex_unlock(&wl->mutex);
- wl1271_recovery_work(&wl->recovery_work);
- return 0;
- }
-
/*
* TODO: after the nvs issue will be solved, move this block
* to start(), and make sure here the driver is ON.
@@ -2592,6 +2589,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
+ /*
+ * Call runtime PM only after possible wl12xx_init_fw() above
+ * is done. Otherwise we do not have interrupts enabled.
+ */
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
+ goto out_unlock;
+ }
+
+ if (wl12xx_need_fw_change(wl, vif_count, true)) {
+ wl12xx_force_active_psm(wl);
+ set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+ mutex_unlock(&wl->mutex);
+ wl1271_recovery_work(&wl->recovery_work);
+ return 0;
+ }
+
if (!wlcore_is_p2p_mgmt(wlvif)) {
ret = wl12xx_cmd_role_enable(wl, vif->addr,
role_type, &wlvif->role_id);
@@ -2622,7 +2637,8 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
else
wl->sta_count++;
out:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out_unlock:
mutex_unlock(&wl->mutex);
@@ -2677,9 +2693,11 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
/* disable active roles */
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto deinit;
+ }
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
@@ -2697,7 +2715,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
goto deinit;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
}
deinit:
wl12xx_tx_reset_wlvif(wl, wlvif);
@@ -3121,9 +3140,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* configure each interface */
wl12xx_for_each_wlvif(wl, wlvif) {
@@ -3133,7 +3154,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3202,9 +3224,11 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
@@ -3247,7 +3271,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
*/
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3454,13 +3479,16 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_wake_queues;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out_wake_queues;
+ }
ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out_wake_queues:
if (might_change_spare)
@@ -3600,9 +3628,11 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
goto out_unlock;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out_unlock;
+ }
wlvif->default_key = key_idx;
@@ -3616,7 +3646,8 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out_unlock:
mutex_unlock(&wl->mutex);
@@ -3634,7 +3665,7 @@ void wlcore_regdomain_config(struct wl1271 *wl)
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
+ ret = pm_runtime_get_sync(wl->dev);
if (ret < 0)
goto out;
@@ -3644,7 +3675,8 @@ void wlcore_regdomain_config(struct wl1271 *wl)
goto out;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -3678,9 +3710,11 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* fail if there is any role in ROC */
if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
@@ -3691,7 +3725,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
ret = wlcore_scan(hw->priv, vif, ssid, len, req);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3718,9 +3753,11 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
ret = wl->ops->scan_stop(wl, wlvif);
@@ -3741,7 +3778,8 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
ieee80211_scan_completed(wl->hw, &info);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3766,9 +3804,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
if (ret < 0)
@@ -3777,7 +3817,8 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
wl->sched_vif = wlvif;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
@@ -3797,13 +3838,16 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl->ops->sched_scan_stop(wl, wlvif);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3822,15 +3866,18 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_acx_frag_threshold(wl, value);
if (ret < 0)
wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -3851,16 +3898,19 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_rts_threshold(wl, wlvif, value);
if (ret < 0)
wl1271_warning("set rts threshold failed: %d", ret);
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4607,9 +4657,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if ((changed & BSS_CHANGED_TXPOWER) &&
bss_conf->txpower != wlvif->power_level) {
@@ -4626,7 +4678,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
else
wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4665,9 +4718,11 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl12xx_for_each_wlvif(wl, wlvif) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
@@ -4690,7 +4745,8 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
}
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -4719,9 +4775,11 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wlvif->band = ctx->def.chan->band;
wlvif->channel = channel;
@@ -4737,7 +4795,8 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
wlvif->radar_enabled = true;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4768,9 +4827,11 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (wlvif->radar_enabled) {
wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
@@ -4778,7 +4839,8 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
wlvif->radar_enabled = false;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -4835,9 +4897,11 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
for (i = 0; i < n_vifs; i++) {
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
@@ -4847,7 +4911,8 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
goto out_sleep;
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4878,9 +4943,11 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/*
* the txop is confed in units of 32us by the mac80211,
@@ -4899,7 +4966,8 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
0, 0);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -4923,16 +4991,19 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
if (ret < 0)
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5238,13 +5309,16 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
if (new_state < old_state)
@@ -5293,9 +5367,11 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
ba_bitmap = &wl->links[hlid].ba_bitmap;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
tid, action);
@@ -5368,7 +5444,8 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5402,16 +5479,19 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl1271_set_band_rate(wl, wlvif);
wlvif->basic_rate =
wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
}
out:
mutex_unlock(&wl->mutex);
@@ -5441,9 +5521,11 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
/* TODO: change mac80211 to pass vif as param */
@@ -5465,7 +5547,8 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5532,9 +5615,11 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
if (ret)
@@ -5543,7 +5628,8 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
@@ -5584,9 +5670,11 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
if (ret < 0)
@@ -5596,7 +5684,8 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
msecs_to_jiffies(duration));
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
@@ -5638,13 +5727,16 @@ static int wlcore_roc_completed(struct wl1271 *wl)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = __wlcore_roc_completed(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -5719,19 +5811,22 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out_sleep;
+ }
ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
if (ret < 0)
goto out_sleep;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi_dbm;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -6065,16 +6160,16 @@ static int wl1271_register_hw(struct wl1271 *wl)
}
if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
- wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
+ wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
if (!strcmp(pdev_data->family->name, "wl18xx")) {
- wl1271_warning("This default nvs file can be removed from the file system\n");
+ wl1271_warning("This default nvs file can be removed from the file system");
} else {
- wl1271_warning("Your device performance is not optimized.\n");
- wl1271_warning("Please use the calibrator tool to configure your device.\n");
+ wl1271_warning("Your device performance is not optimized.");
+ wl1271_warning("Please use the calibrator tool to configure your device.");
}
if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
- wl1271_warning("Fuse mac address is zero. using random mac\n");
+ wl1271_warning("Fuse mac address is zero. using random mac");
/* Use TI oui and a random nic */
oui_addr = WLCORE_TI_OUI_ADDRESS;
nic_addr = get_random_int();
@@ -6300,7 +6395,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
- INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
@@ -6575,6 +6669,99 @@ out:
complete_all(&wl->nvs_loading_complete);
}
+static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ struct wl12xx_vif *wlvif;
+ int error;
+
+ /* We do not enter elp sleep in PLT mode */
+ if (wl->plt)
+ return 0;
+
+ /* Nothing to do if no ELP mode requested */
+ if (wl->sleep_auth != WL1271_PSM_ELP)
+ return 0;
+
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ return -EBUSY;
+ }
+
+ wl1271_debug(DEBUG_PSM, "chip to elp");
+ error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+ if (error < 0) {
+ wl12xx_queue_recovery_work(wl);
+
+ return error;
+ }
+
+ set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+
+ return 0;
+}
+
+static int __maybe_unused wlcore_runtime_resume(struct device *dev)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ DECLARE_COMPLETION_ONSTACK(compl);
+ unsigned long flags;
+ int ret;
+ unsigned long start_time = jiffies;
+ bool pending = false;
+
+ /* Nothing to do if no ELP mode requested */
+ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
+ return 0;
+
+ wl1271_debug(DEBUG_PSM, "waking up chip from elp");
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
+ pending = true;
+ else
+ wl->elp_compl = &compl;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto err;
+ }
+
+ if (!pending) {
+ ret = wait_for_completion_timeout(&compl,
+ msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
+ if (ret == 0) {
+ wl1271_error("ELP wakeup timeout!");
+ wl12xx_queue_recovery_work(wl);
+
+ /* Return no error for runtime PM for recovery */
+ return 0;
+ }
+ }
+
+ clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+
+ wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
+ jiffies_to_msecs(jiffies - start_time));
+
+ return 0;
+
+err:
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->elp_compl = NULL;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ return ret;
+}
+
+static const struct dev_pm_ops wlcore_pm_ops = {
+ SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
+ wlcore_runtime_resume,
+ NULL)
+};
+
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
{
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
@@ -6602,6 +6789,11 @@ int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
wlcore_nvs_cb(NULL, wl);
}
+ wl->dev->driver->pm = &wlcore_pm_ops;
+ pm_runtime_set_autosuspend_delay(wl->dev, 50);
+ pm_runtime_use_autosuspend(wl->dev);
+ pm_runtime_enable(wl->dev);
+
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_probe);
@@ -6610,6 +6802,13 @@ int wlcore_remove(struct platform_device *pdev)
{
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl1271 *wl = platform_get_drvdata(pdev);
+ int error;
+
+ error = pm_runtime_get_sync(wl->dev);
+ if (error < 0)
+ dev_warn(wl->dev, "PM runtime failed: %i\n", error);
+
+ wl->dev->driver->pm = NULL;
if (pdev_data->family && pdev_data->family->nvs_name)
wait_for_completion(&wl->nvs_loading_complete);
@@ -6621,6 +6820,11 @@ int wlcore_remove(struct platform_device *pdev)
disable_irq_wake(wl->irq);
}
wl1271_unregister_hw(wl);
+
+ pm_runtime_put_sync(wl->dev);
+ pm_runtime_dont_use_autosuspend(wl->dev);
+ pm_runtime_disable(wl->dev);
+
free_irq(wl->irq, wl);
wlcore_free_hw(wl);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index b36133b739cb..9de843d1984b 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -26,152 +26,6 @@
#include "tx.h"
#include "debug.h"
-#define WL1271_WAKEUP_TIMEOUT 500
-
-#define ELP_ENTRY_DELAY 30
-#define ELP_ENTRY_DELAY_FORCE_PS 5
-
-void wl1271_elp_work(struct work_struct *work)
-{
- struct delayed_work *dwork;
- struct wl1271 *wl;
- struct wl12xx_vif *wlvif;
- int ret;
-
- dwork = to_delayed_work(work);
- wl = container_of(dwork, struct wl1271, elp_work);
-
- wl1271_debug(DEBUG_PSM, "elp work");
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state != WLCORE_STATE_ON))
- goto out;
-
- /* our work might have been already cancelled */
- if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
- goto out;
-
- if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
- goto out;
-
- wl12xx_for_each_wlvif(wl, wlvif) {
- if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
- test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
- goto out;
- }
-
- wl1271_debug(DEBUG_PSM, "chip to elp");
- ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
- goto out;
- }
-
- set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-
-out:
- mutex_unlock(&wl->mutex);
-}
-
-/* Routines to toggle sleep mode while in ELP */
-void wl1271_ps_elp_sleep(struct wl1271 *wl)
-{
- struct wl12xx_vif *wlvif;
- u32 timeout;
-
- /* We do not enter elp sleep in PLT mode */
- if (wl->plt)
- return;
-
- if (wl->sleep_auth != WL1271_PSM_ELP)
- return;
-
- /* we shouldn't get consecutive sleep requests */
- if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
- return;
-
- wl12xx_for_each_wlvif(wl, wlvif) {
- if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
- test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
- return;
- }
-
- timeout = wl->conf.conn.forced_ps ?
- ELP_ENTRY_DELAY_FORCE_PS : ELP_ENTRY_DELAY;
- ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
- msecs_to_jiffies(timeout));
-}
-EXPORT_SYMBOL_GPL(wl1271_ps_elp_sleep);
-
-int wl1271_ps_elp_wakeup(struct wl1271 *wl)
-{
- DECLARE_COMPLETION_ONSTACK(compl);
- unsigned long flags;
- int ret;
- unsigned long start_time = jiffies;
- bool pending = false;
-
- /*
- * we might try to wake up even if we didn't go to sleep
- * before (e.g. on boot)
- */
- if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
- return 0;
-
- /* don't cancel_sync as it might contend for a mutex and deadlock */
- cancel_delayed_work(&wl->elp_work);
-
- if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
- return 0;
-
- wl1271_debug(DEBUG_PSM, "waking up chip from elp");
-
- /*
- * The spinlock is required here to synchronize both the work and
- * the completion variable in one entity.
- */
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
- pending = true;
- else
- wl->elp_compl = &compl;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
- goto err;
- }
-
- if (!pending) {
- ret = wait_for_completion_timeout(
- &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
- if (ret == 0) {
- wl1271_error("ELP wakeup timeout!");
- wl12xx_queue_recovery_work(wl);
- ret = -ETIMEDOUT;
- goto err;
- }
- }
-
- clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-
- wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
- jiffies_to_msecs(jiffies - start_time));
- goto out;
-
-err:
- spin_lock_irqsave(&wl->wl_lock, flags);
- wl->elp_compl = NULL;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- return ret;
-
-out:
- return 0;
-}
-EXPORT_SYMBOL_GPL(wl1271_ps_elp_wakeup);
-
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum wl1271_cmd_ps_mode mode)
{
diff --git a/drivers/net/wireless/ti/wlcore/ps.h b/drivers/net/wireless/ti/wlcore/ps.h
index de4f9da8ed26..411727587f95 100644
--- a/drivers/net/wireless/ti/wlcore/ps.h
+++ b/drivers/net/wireless/ti/wlcore/ps.h
@@ -29,9 +29,6 @@
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum wl1271_cmd_ps_mode mode);
-void wl1271_ps_elp_sleep(struct wl1271 *wl);
-int wl1271_ps_elp_wakeup(struct wl1271 *wl);
-void wl1271_elp_work(struct work_struct *work);
void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid, bool clean_queues);
void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 0f15696195f8..078a4940bc5c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
- u8 beacon)
+ u8 beacon, u8 probe_rsp)
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
}
+ if (beacon || probe_rsp)
+ status->boottime_ns = ktime_get_boot_ns();
+
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
status->band);
@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
if (ieee80211_is_data_present(hdr->frame_control))
is_data = 1;
- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
+ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
+ ieee80211_is_probe_resp(hdr->frame_control));
wlcore_hw_set_rx_csum(wl, desc, skb);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 5612f5916b4e..764e723e4ef9 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -22,13 +22,13 @@
*/
#include <linux/ieee80211.h>
+#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
#include "cmd.h"
#include "scan.h"
#include "acx.h"
-#include "ps.h"
#include "tx.h"
void wl1271_scan_complete_work(struct work_struct *work)
@@ -67,17 +67,17 @@ void wl1271_scan_complete_work(struct work_struct *work)
wl->scan.req = NULL;
wl->scan_wlvif = NULL;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
}
- wl1271_ps_elp_sleep(wl);
-
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
wl12xx_queue_recovery_work(wl);
@@ -85,6 +85,9 @@ void wl1271_scan_complete_work(struct work_struct *work)
wlcore_cmd_regdomain_config_locked(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
+
ieee80211_scan_completed(wl->hw, &info);
out:
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index d31eb775e023..7425ba9471d0 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -19,9 +19,11 @@
*
*/
+#include <linux/pm_runtime.h>
+
+#include "acx.h"
#include "wlcore.h"
#include "debug.h"
-#include "ps.h"
#include "sysfs.h"
static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
@@ -68,12 +70,15 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
wl1271_acx_sg_enable(wl, wl->sg_enabled);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 009ec07c4cec..dcb2c8b0feb6 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -22,13 +22,13 @@
*/
#include "testmode.h"
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <net/genetlink.h>
#include "wlcore.h"
#include "debug.h"
#include "acx.h"
-#include "ps.h"
#include "io.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -97,9 +97,11 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
@@ -141,7 +143,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
}
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -169,9 +172,11 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
@@ -205,7 +210,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
out_free:
kfree(cmd);
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 00e9b4624dcf..b6e19c2d66b0 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include "wlcore.h"
@@ -868,9 +869,11 @@ void wl1271_tx_work(struct work_struct *work)
int ret;
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_tx_work_locked(wl);
if (ret < 0) {
@@ -878,7 +881,8 @@ void wl1271_tx_work(struct work_struct *work)
goto out;
}
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index 5c0bcb1fe1a1..dbe78d8491ef 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -8,12 +8,13 @@
* version 2 as published by the Free Software Foundation.
*/
+#include <linux/pm_runtime.h>
+
#include <net/mac80211.h>
#include <net/netlink.h>
#include "wlcore.h"
#include "debug.h"
-#include "ps.h"
#include "hw_ops.h"
#include "vendor_cmd.h"
@@ -55,14 +56,17 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_smart_config_start(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]));
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -87,13 +91,16 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_smart_config_stop(wl);
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
@@ -131,16 +138,19 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
+ ret = pm_runtime_get_sync(wl->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wl->dev);
goto out;
+ }
ret = wlcore_smart_config_set_group_key(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]),
nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]),
nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]));
- wl1271_ps_elp_sleep(wl);
+ pm_runtime_mark_last_busy(wl->dev);
+ pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 95fbedc8ea34..d4b1f66ef457 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -348,7 +348,6 @@ struct wl1271 {
enum nl80211_band band;
struct completion *elp_compl;
- struct delayed_work elp_work;
/* in dBm */
int power_level;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index e840985385fc..32ec121ccac2 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -233,7 +233,6 @@ enum wl12xx_flags {
WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
WL1271_FLAG_IN_ELP,
- WL1271_FLAG_ELP_REQUESTED,
WL1271_FLAG_IRQ_RUNNING,
WL1271_FLAG_FW_TX_BUSY,
WL1271_FLAG_DUMMY_PACKET_PENDING,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
index 07b94eda9604..dd6a86b899eb 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
@@ -1341,7 +1341,7 @@ int zd_chip_control_leds(struct zd_chip *chip, enum led_status status)
case ZD_LED_SCANNING:
ioreqs[0].value = FW_LINK_OFF;
ioreqs[1].value = v[1] & ~other_led;
- if (get_seconds() % 3 == 0) {
+ if ((u32)ktime_get_seconds() % 3 == 0) {
ioreqs[1].value &= ~chip->link_led;
} else {
ioreqs[1].value |= chip->link_led;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index c30bf118c67d..c2cda3acd4af 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -371,25 +371,27 @@ static inline void handle_regs_int_override(struct urb *urb)
{
struct zd_usb *usb = urb->context;
struct zd_usb_interrupt *intr = &usb->intr;
+ unsigned long flags;
- spin_lock(&intr->lock);
+ spin_lock_irqsave(&intr->lock, flags);
if (atomic_read(&intr->read_regs_enabled)) {
atomic_set(&intr->read_regs_enabled, 0);
intr->read_regs_int_overridden = 1;
complete(&intr->read_regs.completion);
}
- spin_unlock(&intr->lock);
+ spin_unlock_irqrestore(&intr->lock, flags);
}
static inline void handle_regs_int(struct urb *urb)
{
struct zd_usb *usb = urb->context;
struct zd_usb_interrupt *intr = &usb->intr;
+ unsigned long flags;
int len;
u16 int_num;
ZD_ASSERT(in_interrupt());
- spin_lock(&intr->lock);
+ spin_lock_irqsave(&intr->lock, flags);
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
if (int_num == CR_INTERRUPT) {
@@ -425,7 +427,7 @@ static inline void handle_regs_int(struct urb *urb)
}
out:
- spin_unlock(&intr->lock);
+ spin_unlock_irqrestore(&intr->lock, flags);
/* CR_INTERRUPT might override read_reg too. */
if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled))
@@ -665,6 +667,7 @@ static void rx_urb_complete(struct urb *urb)
struct zd_usb_rx *rx;
const u8 *buffer;
unsigned int length;
+ unsigned long flags;
switch (urb->status) {
case 0:
@@ -693,14 +696,14 @@ static void rx_urb_complete(struct urb *urb)
/* If there is an old first fragment, we don't care. */
dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment));
- spin_lock(&rx->lock);
+ spin_lock_irqsave(&rx->lock, flags);
memcpy(rx->fragment, buffer, length);
rx->fragment_length = length;
- spin_unlock(&rx->lock);
+ spin_unlock_irqrestore(&rx->lock, flags);
goto resubmit;
}
- spin_lock(&rx->lock);
+ spin_lock_irqsave(&rx->lock, flags);
if (rx->fragment_length > 0) {
/* We are on a second fragment, we believe */
ZD_ASSERT(length + rx->fragment_length <=
@@ -710,9 +713,9 @@ static void rx_urb_complete(struct urb *urb)
handle_rx_packet(usb, rx->fragment,
rx->fragment_length + length);
rx->fragment_length = 0;
- spin_unlock(&rx->lock);
+ spin_unlock_irqrestore(&rx->lock, flags);
} else {
- spin_unlock(&rx->lock);
+ spin_unlock_irqrestore(&rx->lock, flags);
handle_rx_packet(usb, buffer, length);
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 78ebe494fef0..92274c237200 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -148,14 +148,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index a27daa23c9dc..3621e05a7494 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1603,9 +1603,9 @@ static void xenvif_ctrl_action(struct xenvif *vif)
static bool xenvif_ctrl_work_todo(struct xenvif *vif)
{
if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
- return 1;
+ return true;
- return 0;
+ return false;
}
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 9dd2ca62d84a..73f596a90c69 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -546,7 +546,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
}
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
unsigned int num_queues = dev->real_num_tx_queues;
u32 hash;
@@ -1603,14 +1604,16 @@ static int xennet_init_queue(struct netfront_queue *queue)
{
unsigned short i;
int err = 0;
+ char *devid;
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
- snprintf(queue->name, sizeof(queue->name), "%s-q%u",
- queue->info->netdev->name, queue->id);
+ devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
+ snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
+ devid, queue->id);
/* Initialise tx_skbs as a free chain containing every entry. */
queue->tx_skb_freelist = 0;
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index f624ae27eabe..5ee5f40b4dfc 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -19,6 +19,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/ntb.h>
+#include <linux/pci.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
MODULE_VERSION("0.1");
@@ -1487,7 +1488,7 @@ static int switchtec_ntb_add(struct device *dev,
stdev->sndev = NULL;
- if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
+ if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
return -ENODEV;
sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
index a59b6c4bb5b8..ad3d17c42e23 100644
--- a/drivers/nubus/bus.c
+++ b/drivers/nubus/bus.c
@@ -5,6 +5,7 @@
// Copyright (C) 2017 Finn Thain
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/nubus.h>
#include <linux/seq_file.h>
@@ -93,6 +94,8 @@ int nubus_device_register(struct nubus_board *board)
board->dev.release = nubus_device_release;
board->dev.bus = &nubus_bus_type;
dev_set_name(&board->dev, "slot.%X", board->slot);
+ board->dev.dma_mask = &board->dev.coherent_dma_mask;
+ dma_set_mask(&board->dev, DMA_BIT_MASK(32));
return device_register(&board->dev);
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 85de8053aa34..0360c015f658 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1423,11 +1423,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- bool is_write, sector_t sector)
+ unsigned int op, sector_t sector)
{
int ret;
- if (!is_write) {
+ if (!op_is_write(op)) {
ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
@@ -1464,7 +1464,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(bio_op(bio)), iter.bi_sector);
+ bio_op(bio), iter.bi_sector);
if (err) {
dev_err(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
@@ -1483,16 +1483,16 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
struct btt *btt = bdev->bd_disk->private_data;
int rc;
unsigned int len;
len = hpage_nr_pages(page) * PAGE_SIZE;
- rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
+ rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
if (rc == 0)
- page_endio(page, is_write, 0);
+ page_endio(page, op_is_write(op), 0);
return rc;
}
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 32e0364b48b9..6ee7fd7e4bbd 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -396,16 +396,15 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
return false;
*start = jiffies;
- generic_start_io_acct(disk->queue, bio_data_dir(bio),
- bio_sectors(bio), &disk->part0);
+ generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio),
+ &disk->part0);
return true;
}
static inline void nd_iostat_end(struct bio *bio, unsigned long start)
{
struct gendisk *disk = bio->bi_disk;
- generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0,
- start);
+ generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
}
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
unsigned int len)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 8b1fd7f1a224..dd17acd8fe68 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -120,7 +120,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
}
static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, bool is_write,
+ unsigned int len, unsigned int off, unsigned int op,
sector_t sector)
{
blk_status_t rc = BLK_STS_OK;
@@ -131,7 +131,7 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
- if (!is_write) {
+ if (!op_is_write(op)) {
if (unlikely(bad_pmem))
rc = BLK_STS_IOERR;
else {
@@ -180,8 +180,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, op_is_write(bio_op(bio)),
- iter.bi_sector);
+ bvec.bv_offset, bio_op(bio), iter.bi_sector);
if (rc) {
bio->bi_status = rc;
break;
@@ -198,13 +197,13 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, bool is_write)
+ struct page *page, unsigned int op)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
blk_status_t rc;
rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
- 0, is_write, sector);
+ 0, op, sector);
/*
* The ->rw_page interface is subtle and tricky. The core
@@ -213,7 +212,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
* caused by double completion.
*/
if (rc == 0)
- page_endio(page, is_write, 0);
+ page_endio(page, op_is_write(op), 0);
return blk_status_to_errno(rc);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index bf65501e6ed6..dd8ec1dd9219 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -252,7 +252,8 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req);
if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
- if (nvme_req_needs_failover(req, status)) {
+ if ((req->cmd_flags & REQ_NVME_MPATH) &&
+ blk_path_error(status)) {
nvme_failover_req(req);
return;
}
@@ -617,6 +618,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
return BLK_STS_NOTSUPP;
control |= NVME_RW_PRINFO_PRACT;
+ } else if (req_op(req) == REQ_OP_WRITE) {
+ t10_pi_prepare(req, ns->pi_type);
}
switch (ns->pi_type) {
@@ -627,8 +630,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF;
- cmnd->rw.reftag = cpu_to_le32(
- nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
break;
}
}
@@ -638,6 +640,22 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
return 0;
}
+void nvme_cleanup_cmd(struct request *req)
+{
+ if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+ nvme_req(req)->status == 0) {
+ struct nvme_ns *ns = req->rq_disk->private_data;
+
+ t10_pi_complete(req, ns->pi_type,
+ blk_rq_bytes(req) >> ns->lba_shift);
+ }
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ kfree(page_address(req->special_vec.bv_page) +
+ req->special_vec.bv_offset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
+
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd)
{
@@ -668,10 +686,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
}
cmd->common.command_id = req->tag;
- if (ns)
- trace_nvme_setup_nvm_cmd(req->q->id, cmd);
- else
- trace_nvme_setup_admin_cmd(cmd);
+ trace_nvme_setup_cmd(req, cmd);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -864,9 +879,6 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
if (unlikely(ctrl->kato == 0))
return;
- INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
- memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
- ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
@@ -1056,7 +1068,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
#define NVME_AEN_SUPPORTED \
- (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT)
+ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE)
static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
@@ -1472,6 +1484,12 @@ static void nvme_update_disk_info(struct gendisk *disk,
set_capacity(disk, capacity);
nvme_config_discard(ns);
+
+ if (id->nsattr & (1 << 0))
+ set_disk_ro(disk, true);
+ else
+ set_disk_ro(disk, false);
+
blk_mq_unfreeze_queue(disk->queue);
}
@@ -2270,21 +2288,16 @@ out_unlock:
return ret;
}
-int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 log_page, void *log,
- size_t size, u64 offset)
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+ void *log, size_t size, u64 offset)
{
struct nvme_command c = { };
unsigned long dwlen = size / 4 - 1;
c.get_log_page.opcode = nvme_admin_get_log_page;
-
- if (ns)
- c.get_log_page.nsid = cpu_to_le32(ns->head->ns_id);
- else
- c.get_log_page.nsid = cpu_to_le32(NVME_NSID_ALL);
-
+ c.get_log_page.nsid = cpu_to_le32(nsid);
c.get_log_page.lid = log_page;
+ c.get_log_page.lsp = lsp;
c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
@@ -2293,12 +2306,6 @@ int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
-static int nvme_get_log(struct nvme_ctrl *ctrl, u8 log_page, void *log,
- size_t size)
-{
- return nvme_get_log_ext(ctrl, NULL, log_page, log, size, 0);
-}
-
static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
{
int ret;
@@ -2309,8 +2316,8 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
if (!ctrl->effects)
return 0;
- ret = nvme_get_log(ctrl, NVME_LOG_CMD_EFFECTS, ctrl->effects,
- sizeof(*ctrl->effects));
+ ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
+ ctrl->effects, sizeof(*ctrl->effects), 0);
if (ret) {
kfree(ctrl->effects);
ctrl->effects = NULL;
@@ -2401,6 +2408,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
nvme_set_queue_limits(ctrl, ctrl->admin_q);
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
+ ctrl->max_namespaces = le32_to_cpu(id->mnan);
if (id->rtd3e) {
/* us -> s */
@@ -2460,8 +2468,12 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
}
+ ret = nvme_mpath_init(ctrl, id);
kfree(id);
+ if (ret < 0)
+ return ret;
+
if (ctrl->apst_enabled && !prev_apst_enabled)
dev_pm_qos_expose_latency_tolerance(ctrl->device);
else if (!ctrl->apst_enabled && prev_apst_enabled)
@@ -2680,6 +2692,10 @@ static struct attribute *nvme_ns_id_attrs[] = {
&dev_attr_nguid.attr,
&dev_attr_eui.attr,
&dev_attr_nsid.attr,
+#ifdef CONFIG_NVME_MULTIPATH
+ &dev_attr_ana_grpid.attr,
+ &dev_attr_ana_state.attr,
+#endif
NULL,
};
@@ -2702,6 +2718,14 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
return 0;
}
+#ifdef CONFIG_NVME_MULTIPATH
+ if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
+ if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
+ return 0;
+ if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
+ return 0;
+ }
+#endif
return a->mode;
}
@@ -3075,8 +3099,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_get_ctrl(ctrl);
- kfree(id);
-
device_add_disk(ctrl->device, ns->disk);
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_id_attr_group))
@@ -3086,8 +3108,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
ns->disk->disk_name);
- nvme_mpath_add_disk(ns->head);
+ nvme_mpath_add_disk(ns, id);
nvme_fault_inject_init(ns);
+ kfree(id);
+
return;
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
@@ -3229,7 +3253,8 @@ static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
* raced with us in reading the log page, which could cause us to miss
* updates.
*/
- error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
+ log_size, 0);
if (error)
dev_warn(ctrl->device,
"reading changed ns log failed: %d\n", error);
@@ -3346,9 +3371,9 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
if (!log)
return;
- if (nvme_get_log(ctrl, NVME_LOG_FW_SLOT, log, sizeof(*log)))
- dev_warn(ctrl->device,
- "Get FW SLOT INFO log error\n");
+ if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
+ sizeof(*log), 0))
+ dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
kfree(log);
}
@@ -3394,6 +3419,13 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
case NVME_AER_NOTICE_FW_ACT_STARTING:
queue_work(nvme_wq, &ctrl->fw_act_work);
break;
+#ifdef CONFIG_NVME_MULTIPATH
+ case NVME_AER_NOTICE_ANA:
+ if (!ctrl->ana_log_buf)
+ break;
+ queue_work(nvme_wq, &ctrl->ana_work);
+ break;
+#endif
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
@@ -3426,6 +3458,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
+ nvme_mpath_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
flush_work(&ctrl->scan_work);
@@ -3463,6 +3496,7 @@ static void nvme_free_ctrl(struct device *dev)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
kfree(ctrl->effects);
+ nvme_mpath_uninit(ctrl);
if (subsys) {
mutex_lock(&subsys->lock);
@@ -3499,6 +3533,10 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
+ ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
+
ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto out;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index f7efe5a58cc7..206d63cb1afc 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -474,7 +474,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
{
- if (ctrl->opts->max_reconnects != -1 &&
+ if (ctrl->opts->max_reconnects == -1 ||
ctrl->nr_reconnects < ctrl->opts->max_reconnects)
return true;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9bac912173ba..611e70cae754 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1737,6 +1737,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
}
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 41279da799ed..6fe5923c95d4 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -414,12 +414,6 @@ static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
/* Set compacted version for upper layers */
geo->version = NVM_OCSSD_SPEC_20;
- if (!(geo->major_ver_id == 2 && geo->minor_ver_id == 0)) {
- pr_err("nvm: OCSSD version not supported (v%d.%d)\n",
- geo->major_ver_id, geo->minor_ver_id);
- return -EINVAL;
- }
-
geo->num_ch = le16_to_cpu(id->num_grp);
geo->num_lun = le16_to_cpu(id->num_pu);
geo->all_luns = geo->num_ch * geo->num_lun;
@@ -583,7 +577,13 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
struct ppa_addr ppa;
size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
size_t log_pos, offset, len;
- int ret, i;
+ int ret, i, max_len;
+
+ /*
+ * limit requests to maximum 256K to avoid issuing arbitrary large
+ * requests when the device does not specific a maximum transfer size.
+ */
+ max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
/* Normalize lba address space to obtain log offset */
ppa.ppa = slba;
@@ -596,10 +596,11 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
while (left) {
- len = min_t(unsigned int, left, ctrl->max_hw_sectors << 9);
+ len = min_t(unsigned int, left, max_len);
- ret = nvme_get_log_ext(ctrl, ns, NVME_NVM_LOG_REPORT_CHUNK,
- dev_meta, len, offset);
+ ret = nvme_get_log(ctrl, ns->head->ns_id,
+ NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
+ offset);
if (ret) {
dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
break;
@@ -662,12 +663,10 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
- if (rqd->bio) {
+ if (rqd->bio)
blk_init_request_from_bio(rq, rqd->bio);
- } else {
+ else
rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
- rq->__data_len = 0;
- }
return rq;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1ffd3e8b13a1..5a9562881d4e 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Christoph Hellwig.
+ * Copyright (c) 2017-2018 Christoph Hellwig.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,11 @@ module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+ return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
+}
+
/*
* If multipathing is enabled we need to always use the subsystem instance
* number for numbering our devices to avoid conflicts between subsystems that
@@ -45,6 +50,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
+ u16 status = nvme_req(req)->status;
unsigned long flags;
spin_lock_irqsave(&ns->head->requeue_lock, flags);
@@ -52,15 +58,35 @@ void nvme_failover_req(struct request *req)
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
blk_mq_end_request(req, 0);
- nvme_reset_ctrl(ns->ctrl);
- kblockd_schedule_work(&ns->head->requeue_work);
-}
+ switch (status & 0x7ff) {
+ case NVME_SC_ANA_TRANSITION:
+ case NVME_SC_ANA_INACCESSIBLE:
+ case NVME_SC_ANA_PERSISTENT_LOSS:
+ /*
+ * If we got back an ANA error we know the controller is alive,
+ * but not ready to serve this namespaces. The spec suggests
+ * we should update our general state here, but due to the fact
+ * that the admin and I/O queues are not serialized that is
+ * fundamentally racy. So instead just clear the current path,
+ * mark the the path as pending and kick of a re-read of the ANA
+ * log page ASAP.
+ */
+ nvme_mpath_clear_current_path(ns);
+ if (ns->ctrl->ana_log_buf) {
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
+ }
+ break;
+ default:
+ /*
+ * Reset the controller for any non-ANA error as we don't know
+ * what caused the error.
+ */
+ nvme_reset_ctrl(ns->ctrl);
+ break;
+ }
-bool nvme_req_needs_failover(struct request *req, blk_status_t error)
-{
- if (!(req->cmd_flags & REQ_NVME_MPATH))
- return false;
- return blk_path_error(error);
+ kblockd_schedule_work(&ns->head->requeue_work);
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
@@ -75,25 +101,51 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
up_read(&ctrl->namespaces_rwsem);
}
+static const char *nvme_ana_state_names[] = {
+ [0] = "invalid state",
+ [NVME_ANA_OPTIMIZED] = "optimized",
+ [NVME_ANA_NONOPTIMIZED] = "non-optimized",
+ [NVME_ANA_INACCESSIBLE] = "inaccessible",
+ [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
+ [NVME_ANA_CHANGE] = "change",
+};
+
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
{
- struct nvme_ns *ns;
+ struct nvme_ns *ns, *fallback = NULL;
list_for_each_entry_rcu(ns, &head->list, siblings) {
- if (ns->ctrl->state == NVME_CTRL_LIVE) {
+ if (ns->ctrl->state != NVME_CTRL_LIVE ||
+ test_bit(NVME_NS_ANA_PENDING, &ns->flags))
+ continue;
+ switch (ns->ana_state) {
+ case NVME_ANA_OPTIMIZED:
rcu_assign_pointer(head->current_path, ns);
return ns;
+ case NVME_ANA_NONOPTIMIZED:
+ fallback = ns;
+ break;
+ default:
+ break;
}
}
- return NULL;
+ if (fallback)
+ rcu_assign_pointer(head->current_path, fallback);
+ return fallback;
+}
+
+static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
+{
+ return ns->ctrl->state == NVME_CTRL_LIVE &&
+ ns->ana_state == NVME_ANA_OPTIMIZED;
}
inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
{
struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
- if (unlikely(!ns || ns->ctrl->state != NVME_CTRL_LIVE))
+ if (unlikely(!ns || !nvme_path_is_optimized(ns)))
ns = __nvme_find_path(head);
return ns;
}
@@ -142,7 +194,7 @@ static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
srcu_idx = srcu_read_lock(&head->srcu);
ns = srcu_dereference(head->current_path, &head->srcu);
- if (likely(ns && ns->ctrl->state == NVME_CTRL_LIVE))
+ if (likely(ns && nvme_path_is_optimized(ns)))
found = ns->queue->poll_fn(q, qc);
srcu_read_unlock(&head->srcu, srcu_idx);
return found;
@@ -176,6 +228,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
struct request_queue *q;
bool vwc = false;
+ mutex_init(&head->lock);
bio_list_init(&head->requeue_list);
spin_lock_init(&head->requeue_lock);
INIT_WORK(&head->requeue_work, nvme_requeue_work);
@@ -220,29 +273,232 @@ out:
return -ENOMEM;
}
-void nvme_mpath_add_disk(struct nvme_ns_head *head)
+static void nvme_mpath_set_live(struct nvme_ns *ns)
{
+ struct nvme_ns_head *head = ns->head;
+
+ lockdep_assert_held(&ns->head->lock);
+
if (!head->disk)
return;
- mutex_lock(&head->subsys->lock);
if (!(head->disk->flags & GENHD_FL_UP)) {
device_add_disk(&head->subsys->dev, head->disk);
if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
&nvme_ns_id_attr_group))
- pr_warn("%s: failed to create sysfs group for identification\n",
- head->disk->disk_name);
+ dev_warn(&head->subsys->dev,
+ "failed to create id group.\n");
+ }
+
+ kblockd_schedule_work(&ns->head->requeue_work);
+}
+
+static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
+ int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
+ void *))
+{
+ void *base = ctrl->ana_log_buf;
+ size_t offset = sizeof(struct nvme_ana_rsp_hdr);
+ int error, i;
+
+ lockdep_assert_held(&ctrl->ana_lock);
+
+ for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
+ struct nvme_ana_group_desc *desc = base + offset;
+ u32 nr_nsids = le32_to_cpu(desc->nnsids);
+ size_t nsid_buf_size = nr_nsids * sizeof(__le32);
+
+ if (WARN_ON_ONCE(desc->grpid == 0))
+ return -EINVAL;
+ if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
+ return -EINVAL;
+ if (WARN_ON_ONCE(desc->state == 0))
+ return -EINVAL;
+ if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
+ return -EINVAL;
+
+ offset += sizeof(*desc);
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
+ return -EINVAL;
+
+ error = cb(ctrl, desc, data);
+ if (error)
+ return error;
+
+ offset += nsid_buf_size;
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline bool nvme_state_is_live(enum nvme_ana_state state)
+{
+ return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
+}
+
+static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
+ struct nvme_ns *ns)
+{
+ enum nvme_ana_state old;
+
+ mutex_lock(&ns->head->lock);
+ old = ns->ana_state;
+ ns->ana_grpid = le32_to_cpu(desc->grpid);
+ ns->ana_state = desc->state;
+ clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
+
+ if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+ nvme_mpath_set_live(ns);
+ mutex_unlock(&ns->head->lock);
+}
+
+static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+ struct nvme_ana_group_desc *desc, void *data)
+{
+ u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
+ unsigned *nr_change_groups = data;
+ struct nvme_ns *ns;
+
+ dev_info(ctrl->device, "ANA group %d: %s.\n",
+ le32_to_cpu(desc->grpid),
+ nvme_ana_state_names[desc->state]);
+
+ if (desc->state == NVME_ANA_CHANGE)
+ (*nr_change_groups)++;
+
+ if (!nr_nsids)
+ return 0;
+
+ down_write(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
+ continue;
+ nvme_update_ns_ana_state(desc, ns);
+ if (++n == nr_nsids)
+ break;
+ }
+ up_write(&ctrl->namespaces_rwsem);
+ WARN_ON_ONCE(n < nr_nsids);
+ return 0;
+}
+
+static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
+{
+ u32 nr_change_groups = 0;
+ int error;
+
+ mutex_lock(&ctrl->ana_lock);
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
+ groups_only ? NVME_ANA_LOG_RGO : 0,
+ ctrl->ana_log_buf, ctrl->ana_log_size, 0);
+ if (error) {
+ dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
+ goto out_unlock;
+ }
+
+ error = nvme_parse_ana_log(ctrl, &nr_change_groups,
+ nvme_update_ana_state);
+ if (error)
+ goto out_unlock;
+
+ /*
+ * In theory we should have an ANATT timer per group as they might enter
+ * the change state at different times. But that is a lot of overhead
+ * just to protect against a target that keeps entering new changes
+ * states while never finishing previous ones. But we'll still
+ * eventually time out once all groups are in change state, so this
+ * isn't a big deal.
+ *
+ * We also double the ANATT value to provide some slack for transports
+ * or AEN processing overhead.
+ */
+ if (nr_change_groups)
+ mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
+ else
+ del_timer_sync(&ctrl->anatt_timer);
+out_unlock:
+ mutex_unlock(&ctrl->ana_lock);
+ return error;
+}
+
+static void nvme_ana_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
+
+ nvme_read_ana_log(ctrl, false);
+}
+
+static void nvme_anatt_timeout(struct timer_list *t)
+{
+ struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
+
+ dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
+ nvme_reset_ctrl(ctrl);
+}
+
+void nvme_mpath_stop(struct nvme_ctrl *ctrl)
+{
+ if (!nvme_ctrl_use_ana(ctrl))
+ return;
+ del_timer_sync(&ctrl->anatt_timer);
+ cancel_work_sync(&ctrl->ana_work);
+}
+
+static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
+}
+DEVICE_ATTR_RO(ana_grpid);
+
+static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+
+ return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
+}
+DEVICE_ATTR_RO(ana_state);
+
+static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
+ struct nvme_ana_group_desc *desc, void *data)
+{
+ struct nvme_ns *ns = data;
+
+ if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
+ nvme_update_ns_ana_state(desc, ns);
+ return -ENXIO; /* just break out of the loop */
+ }
+
+ return 0;
+}
+
+void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ if (nvme_ctrl_use_ana(ns->ctrl)) {
+ mutex_lock(&ns->ctrl->ana_lock);
+ ns->ana_grpid = le32_to_cpu(id->anagrpid);
+ nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
+ mutex_unlock(&ns->ctrl->ana_lock);
+ } else {
+ mutex_lock(&ns->head->lock);
+ ns->ana_state = NVME_ANA_OPTIMIZED;
+ nvme_mpath_set_live(ns);
+ mutex_unlock(&ns->head->lock);
}
- mutex_unlock(&head->subsys->lock);
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
- &nvme_ns_id_attr_group);
- del_gendisk(head->disk);
+ if (head->disk->flags & GENHD_FL_UP) {
+ sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
+ &nvme_ns_id_attr_group);
+ del_gendisk(head->disk);
+ }
blk_set_queue_dying(head->disk->queue);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
@@ -250,3 +506,52 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
blk_cleanup_queue(head->disk->queue);
put_disk(head->disk);
}
+
+int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ int error;
+
+ if (!nvme_ctrl_use_ana(ctrl))
+ return 0;
+
+ ctrl->anacap = id->anacap;
+ ctrl->anatt = id->anatt;
+ ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
+ ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
+
+ mutex_init(&ctrl->ana_lock);
+ timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
+ ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
+ ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
+ if (!(ctrl->anacap & (1 << 6)))
+ ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
+
+ if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
+ dev_err(ctrl->device,
+ "ANA log page size (%zd) larger than MDTS (%d).\n",
+ ctrl->ana_log_size,
+ ctrl->max_hw_sectors << SECTOR_SHIFT);
+ dev_err(ctrl->device, "disabling ANA support.\n");
+ return 0;
+ }
+
+ INIT_WORK(&ctrl->ana_work, nvme_ana_work);
+ ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
+ if (!ctrl->ana_log_buf)
+ goto out;
+
+ error = nvme_read_ana_log(ctrl, true);
+ if (error)
+ goto out_free_ana_log_buf;
+ return 0;
+out_free_ana_log_buf:
+ kfree(ctrl->ana_log_buf);
+out:
+ return -ENOMEM;
+}
+
+void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+{
+ kfree(ctrl->ana_log_buf);
+}
+
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0c4a33df3b2f..bb4a2003c097 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -102,6 +102,7 @@ struct nvme_request {
u8 retries;
u8 flags;
u16 status;
+ struct nvme_ctrl *ctrl;
};
/*
@@ -119,6 +120,13 @@ static inline struct nvme_request *nvme_req(struct request *req)
return blk_mq_rq_to_pdu(req);
}
+static inline u16 nvme_req_qid(struct request *req)
+{
+ if (!req->rq_disk)
+ return 0;
+ return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
+}
+
/* The below value is the specific amount of delay needed before checking
* readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
@@ -175,6 +183,7 @@ struct nvme_ctrl {
u16 oacs;
u16 nssa;
u16 nr_streams;
+ u32 max_namespaces;
atomic_t abort_limit;
u8 vwc;
u32 vs;
@@ -197,6 +206,19 @@ struct nvme_ctrl {
struct work_struct fw_act_work;
unsigned long events;
+#ifdef CONFIG_NVME_MULTIPATH
+ /* asymmetric namespace access: */
+ u8 anacap;
+ u8 anatt;
+ u32 anagrpmax;
+ u32 nanagrpid;
+ struct mutex ana_lock;
+ struct nvme_ana_rsp_hdr *ana_log_buf;
+ size_t ana_log_size;
+ struct timer_list anatt_timer;
+ struct work_struct ana_work;
+#endif
+
/* Power saving configuration */
u64 ps_max_latency_us;
bool apst_enabled;
@@ -261,6 +283,7 @@ struct nvme_ns_head {
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
+ struct mutex lock;
#endif
struct list_head list;
struct srcu_struct srcu;
@@ -287,6 +310,10 @@ struct nvme_ns {
struct nvme_ctrl *ctrl;
struct request_queue *queue;
struct gendisk *disk;
+#ifdef CONFIG_NVME_MULTIPATH
+ enum nvme_ana_state ana_state;
+ u32 ana_grpid;
+#endif
struct list_head siblings;
struct nvm_dev *ndev;
struct kref kref;
@@ -299,8 +326,9 @@ struct nvme_ns {
bool ext;
u8 pi_type;
unsigned long flags;
-#define NVME_NS_REMOVING 0
-#define NVME_NS_DEAD 1
+#define NVME_NS_REMOVING 0
+#define NVME_NS_DEAD 1
+#define NVME_NS_ANA_PENDING 2
u16 noiob;
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
@@ -356,14 +384,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
-static inline void nvme_cleanup_cmd(struct request *req)
-{
- if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
- kfree(page_address(req->special_vec.bv_page) +
- req->special_vec.bv_offset);
- }
-}
-
static inline void nvme_end_request(struct request *req, __le16 status,
union nvme_result result)
{
@@ -420,6 +440,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
+void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
@@ -435,21 +456,24 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
-int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 log_page, void *log, size_t size, u64 offset);
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+ void *log, size_t size, u64 offset);
extern const struct attribute_group nvme_ns_id_attr_group;
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
+bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req);
-bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
-void nvme_mpath_add_disk(struct nvme_ns_head *head);
+void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
+void nvme_mpath_stop(struct nvme_ctrl *ctrl);
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
@@ -468,7 +492,14 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
+extern struct device_attribute dev_attr_ana_grpid;
+extern struct device_attribute dev_attr_ana_state;
+
#else
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+ return false;
+}
/*
* Without the multipath code enabled, multiple controller per subsystems are
* visible as devices and thus we cannot use the subsystem instance.
@@ -482,11 +513,6 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
static inline void nvme_failover_req(struct request *req)
{
}
-static inline bool nvme_req_needs_failover(struct request *req,
- blk_status_t error)
-{
- return false;
-}
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
}
@@ -495,7 +521,8 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
{
return 0;
}
-static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
+static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
+ struct nvme_id_ns *id)
{
}
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -507,6 +534,17 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
+static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
+ struct nvme_id_ctrl *id)
+{
+ return 0;
+}
+static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+{
+}
+static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ddd441b1516a..1b9951d2067e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -418,6 +418,8 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
+
+ nvme_req(req)->ctrl = &dev->ctrl;
return 0;
}
@@ -535,73 +537,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
mempool_free(iod->sg, dev->iod_mempool);
}
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
- if (be32_to_cpu(pi->ref_tag) == v)
- pi->ref_tag = cpu_to_be32(p);
-}
-
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
- if (be32_to_cpu(pi->ref_tag) == p)
- pi->ref_tag = cpu_to_be32(v);
-}
-
-/**
- * nvme_dif_remap - remaps ref tags to bip seed and physical lba
- *
- * The virtual start sector is the one that was originally submitted by the
- * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical
- * start sector may be different. Remap protection information to match the
- * physical LBA on writes, and back to the original seed on reads.
- *
- * Type 0 and 3 do not have a ref tag, so no remapping required.
- */
-static void nvme_dif_remap(struct request *req,
- void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
- struct nvme_ns *ns = req->rq_disk->private_data;
- struct bio_integrity_payload *bip;
- struct t10_pi_tuple *pi;
- void *p, *pmap;
- u32 i, nlb, ts, phys, virt;
-
- if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
- return;
-
- bip = bio_integrity(req->bio);
- if (!bip)
- return;
-
- pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
-
- p = pmap;
- virt = bip_get_seed(bip);
- phys = nvme_block_nr(ns, blk_rq_pos(req));
- nlb = (blk_rq_bytes(req) >> ns->lba_shift);
- ts = ns->disk->queue->integrity.tuple_size;
-
- for (i = 0; i < nlb; i++, virt++, phys++) {
- pi = (struct t10_pi_tuple *)p;
- dif_swap(phys, virt, pi);
- p += ts;
- }
- kunmap_atomic(pmap);
-}
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_dif_remap(struct request *req,
- void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
-{
-}
-static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
-{
-}
-#endif
-
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
{
int i;
@@ -827,9 +762,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
goto out_unmap;
- if (req_op(req) == REQ_OP_WRITE)
- nvme_dif_remap(req, nvme_dif_prep);
-
if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
goto out_unmap;
}
@@ -852,11 +784,8 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
if (iod->nents) {
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
- if (blk_integrity_rq(req)) {
- if (req_op(req) == REQ_OP_READ)
- nvme_dif_remap(req, nvme_dif_complete);
+ if (blk_integrity_rq(req))
dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
- }
}
nvme_cleanup_cmd(req);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 72e8e8e7d2d7..dc042017c293 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -40,13 +40,14 @@
#define NVME_RDMA_MAX_SEGMENTS 256
-#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
+#define NVME_RDMA_MAX_INLINE_SEGMENTS 4
struct nvme_rdma_device {
struct ib_device *dev;
struct ib_pd *pd;
struct kref ref;
struct list_head entry;
+ unsigned int num_inline_segments;
};
struct nvme_rdma_qe {
@@ -117,6 +118,7 @@ struct nvme_rdma_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
+ bool use_inline_data;
};
static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
@@ -249,7 +251,7 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
/* +1 for drain */
init_attr.cap.max_recv_wr = queue->queue_size + 1;
init_attr.cap.max_recv_sge = 1;
- init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS;
+ init_attr.cap.max_send_sge = 1 + dev->num_inline_segments;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
init_attr.send_cq = queue->ib_cq;
@@ -286,6 +288,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct ib_device *ibdev = dev->dev;
int ret;
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
DMA_TO_DEVICE);
if (ret)
@@ -374,6 +377,8 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
goto out_free_pd;
}
+ ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
+ ndev->dev->attrs.max_send_sge - 1);
list_add(&ndev->entry, &device_list);
out_unlock:
mutex_unlock(&device_list_mutex);
@@ -868,6 +873,31 @@ out_free_io_queues:
return ret;
}
+static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+{
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request,
+ &ctrl->ctrl);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_destroy_admin_queue(ctrl, remove);
+}
+
+static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+{
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request,
+ &ctrl->ctrl);
+ if (remove)
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_rdma_destroy_io_queues(ctrl, remove);
+ }
+}
+
static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
@@ -912,21 +942,44 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
}
}
-static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
+static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{
- struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
- struct nvme_rdma_ctrl, reconnect_work);
+ int ret = -EINVAL;
bool changed;
- int ret;
- ++ctrl->ctrl.nr_reconnects;
-
- ret = nvme_rdma_configure_admin_queue(ctrl, false);
+ ret = nvme_rdma_configure_admin_queue(ctrl, new);
if (ret)
- goto requeue;
+ return ret;
+
+ if (ctrl->ctrl.icdoff) {
+ dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
+ goto destroy_admin;
+ }
+
+ if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ dev_err(ctrl->ctrl.device,
+ "Mandatory keyed sgls are not supported!\n");
+ goto destroy_admin;
+ }
+
+ if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
+ }
+
+ if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+ dev_warn(ctrl->ctrl.device,
+ "sqsize %u > ctrl maxcmd %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
+ ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
+ }
+
+ if (ctrl->ctrl.sgls & (1 << 20))
+ ctrl->use_inline_data = true;
if (ctrl->ctrl.queue_count > 1) {
- ret = nvme_rdma_configure_io_queues(ctrl, false);
+ ret = nvme_rdma_configure_io_queues(ctrl, new);
if (ret)
goto destroy_admin;
}
@@ -935,10 +988,31 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
if (!changed) {
/* state change failure is ok if we're in DELETING state */
WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
- return;
+ ret = -EINVAL;
+ goto destroy_io;
}
nvme_start_ctrl(&ctrl->ctrl);
+ return 0;
+
+destroy_io:
+ if (ctrl->ctrl.queue_count > 1)
+ nvme_rdma_destroy_io_queues(ctrl, new);
+destroy_admin:
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ nvme_rdma_destroy_admin_queue(ctrl, new);
+ return ret;
+}
+
+static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_rdma_ctrl, reconnect_work);
+
+ ++ctrl->ctrl.nr_reconnects;
+
+ if (nvme_rdma_setup_ctrl(ctrl, false))
+ goto requeue;
dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
ctrl->ctrl.nr_reconnects);
@@ -947,9 +1021,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
return;
-destroy_admin:
- nvme_rdma_stop_queue(&ctrl->queues[0]);
- nvme_rdma_destroy_admin_queue(ctrl, false);
requeue:
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
ctrl->ctrl.nr_reconnects);
@@ -962,27 +1033,9 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl, err_work);
nvme_stop_keep_alive(&ctrl->ctrl);
-
- if (ctrl->ctrl.queue_count > 1) {
- nvme_stop_queues(&ctrl->ctrl);
- nvme_rdma_stop_io_queues(ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- nvme_rdma_destroy_io_queues(ctrl, false);
- }
-
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- nvme_rdma_stop_queue(&ctrl->queues[0]);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- nvme_rdma_destroy_admin_queue(ctrl, false);
-
- /*
- * queues are not a live anymore, so restart the queues to fail fast
- * new IO
- */
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
+ nvme_rdma_teardown_admin_queue(ctrl, false);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we're in DELETING state */
@@ -1089,19 +1142,27 @@ static int nvme_rdma_set_sg_null(struct nvme_command *c)
}
static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
- struct nvme_rdma_request *req, struct nvme_command *c)
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count)
{
struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+ struct scatterlist *sgl = req->sg_table.sgl;
+ struct ib_sge *sge = &req->sge[1];
+ u32 len = 0;
+ int i;
- req->sge[1].addr = sg_dma_address(req->sg_table.sgl);
- req->sge[1].length = sg_dma_len(req->sg_table.sgl);
- req->sge[1].lkey = queue->device->pd->local_dma_lkey;
+ for (i = 0; i < count; i++, sgl++, sge++) {
+ sge->addr = sg_dma_address(sgl);
+ sge->length = sg_dma_len(sgl);
+ sge->lkey = queue->device->pd->local_dma_lkey;
+ len += sge->length;
+ }
sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
- sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
+ sg->length = cpu_to_le32(len);
sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
- req->num_sge++;
+ req->num_sge += count;
return 0;
}
@@ -1194,15 +1255,16 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
goto out_free_table;
}
- if (count == 1) {
+ if (count <= dev->num_inline_segments) {
if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+ queue->ctrl->use_inline_data &&
blk_rq_payload_bytes(rq) <=
nvme_rdma_inline_data_size(queue)) {
- ret = nvme_rdma_map_sg_inline(queue, req, c);
+ ret = nvme_rdma_map_sg_inline(queue, req, c, count);
goto out;
}
- if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
ret = nvme_rdma_map_sg_single(queue, req, c);
goto out;
}
@@ -1573,6 +1635,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
nvme_rdma_destroy_queue_ib(queue);
+ /* fall through */
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
@@ -1735,25 +1798,12 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
- if (ctrl->ctrl.queue_count > 1) {
- nvme_stop_queues(&ctrl->ctrl);
- nvme_rdma_stop_io_queues(ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- nvme_rdma_destroy_io_queues(ctrl, shutdown);
- }
-
+ nvme_rdma_teardown_io_queues(ctrl, shutdown);
if (shutdown)
nvme_shutdown_ctrl(&ctrl->ctrl);
else
nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
-
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- nvme_rdma_stop_queue(&ctrl->queues[0]);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
- nvme_rdma_destroy_admin_queue(ctrl, shutdown);
+ nvme_rdma_teardown_admin_queue(ctrl, shutdown);
}
static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
@@ -1765,8 +1815,6 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl =
container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
- int ret;
- bool changed;
nvme_stop_ctrl(&ctrl->ctrl);
nvme_rdma_shutdown_ctrl(ctrl, false);
@@ -1777,25 +1825,9 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
return;
}
- ret = nvme_rdma_configure_admin_queue(ctrl, false);
- if (ret)
+ if (nvme_rdma_setup_ctrl(ctrl, false))
goto out_fail;
- if (ctrl->ctrl.queue_count > 1) {
- ret = nvme_rdma_configure_io_queues(ctrl, false);
- if (ret)
- goto out_fail;
- }
-
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- if (!changed) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
- return;
- }
-
- nvme_start_ctrl(&ctrl->ctrl);
-
return;
out_fail:
@@ -1958,49 +1990,10 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
WARN_ON_ONCE(!changed);
- ret = nvme_rdma_configure_admin_queue(ctrl, true);
+ ret = nvme_rdma_setup_ctrl(ctrl, true);
if (ret)
goto out_uninit_ctrl;
- /* sanity check icdoff */
- if (ctrl->ctrl.icdoff) {
- dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
- ret = -EINVAL;
- goto out_remove_admin_queue;
- }
-
- /* sanity check keyed sgls */
- if (!(ctrl->ctrl.sgls & (1 << 2))) {
- dev_err(ctrl->ctrl.device,
- "Mandatory keyed sgls are not supported!\n");
- ret = -EINVAL;
- goto out_remove_admin_queue;
- }
-
- /* only warn if argument is too large here, will clamp later */
- if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
- dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl sqsize %u, clamping down\n",
- opts->queue_size, ctrl->ctrl.sqsize + 1);
- }
-
- /* warn if maxcmd is lower than sqsize+1 */
- if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
- dev_warn(ctrl->ctrl.device,
- "sqsize %u > ctrl maxcmd %u, clamping down\n",
- ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
- ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
- }
-
- if (opts->nr_io_queues) {
- ret = nvme_rdma_configure_io_queues(ctrl, true);
- if (ret)
- goto out_remove_admin_queue;
- }
-
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
-
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
@@ -2010,13 +2003,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
mutex_unlock(&nvme_rdma_ctrl_mutex);
- nvme_start_ctrl(&ctrl->ctrl);
-
return &ctrl->ctrl;
-out_remove_admin_queue:
- nvme_rdma_stop_queue(&ctrl->queues[0]);
- nvme_rdma_destroy_admin_queue(ctrl, true);
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 41944bbef835..25b0e310f4a8 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -128,3 +128,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
return nvme_trace_common(p, cdw10);
}
}
+
+const char *nvme_trace_disk_name(struct trace_seq *p, char *name)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (*name)
+ trace_seq_printf(p, "disk=%s, ", name);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 01390f0e1671..a490790d6691 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -50,13 +50,8 @@
nvme_admin_opcode_name(nvme_admin_security_recv), \
nvme_admin_opcode_name(nvme_admin_sanitize_nvm))
-const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
- u8 *cdw10);
-#define __parse_nvme_admin_cmd(opcode, cdw10) \
- nvme_trace_parse_admin_cmd(p, opcode, cdw10)
-
#define nvme_opcode_name(opcode) { opcode, #opcode }
-#define show_opcode_name(val) \
+#define show_nvm_opcode_name(val) \
__print_symbolic(val, \
nvme_opcode_name(nvme_cmd_flush), \
nvme_opcode_name(nvme_cmd_write), \
@@ -70,85 +65,92 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
nvme_opcode_name(nvme_cmd_resv_acquire), \
nvme_opcode_name(nvme_cmd_resv_release))
-const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
- u8 *cdw10);
-#define __parse_nvme_cmd(opcode, cdw10) \
- nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
-
-TRACE_EVENT(nvme_setup_admin_cmd,
- TP_PROTO(struct nvme_command *cmd),
- TP_ARGS(cmd),
- TP_STRUCT__entry(
- __field(u8, opcode)
- __field(u8, flags)
- __field(u16, cid)
- __field(u64, metadata)
- __array(u8, cdw10, 24)
- ),
- TP_fast_assign(
- __entry->opcode = cmd->common.opcode;
- __entry->flags = cmd->common.flags;
- __entry->cid = cmd->common.command_id;
- __entry->metadata = le64_to_cpu(cmd->common.metadata);
- memcpy(__entry->cdw10, cmd->common.cdw10,
- sizeof(__entry->cdw10));
- ),
- TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
- __entry->cid, __entry->flags, __entry->metadata,
- show_admin_opcode_name(__entry->opcode),
- __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
-);
-
+#define show_opcode_name(qid, opcode) \
+ (qid ? show_nvm_opcode_name(opcode) : show_admin_opcode_name(opcode))
-TRACE_EVENT(nvme_setup_nvm_cmd,
- TP_PROTO(int qid, struct nvme_command *cmd),
- TP_ARGS(qid, cmd),
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+
+#define parse_nvme_cmd(qid, opcode, cdw10) \
+ (qid ? \
+ nvme_trace_parse_nvm_cmd(p, opcode, cdw10) : \
+ nvme_trace_parse_admin_cmd(p, opcode, cdw10))
+
+const char *nvme_trace_disk_name(struct trace_seq *p, char *name);
+#define __print_disk_name(name) \
+ nvme_trace_disk_name(p, name)
+
+#ifndef TRACE_HEADER_MULTI_READ
+static inline void __assign_disk_name(char *name, struct gendisk *disk)
+{
+ if (disk)
+ memcpy(name, disk->disk_name, DISK_NAME_LEN);
+ else
+ memset(name, 0, DISK_NAME_LEN);
+}
+#endif
+
+TRACE_EVENT(nvme_setup_cmd,
+ TP_PROTO(struct request *req, struct nvme_command *cmd),
+ TP_ARGS(req, cmd),
TP_STRUCT__entry(
- __field(int, qid)
- __field(u8, opcode)
- __field(u8, flags)
- __field(u16, cid)
- __field(u32, nsid)
- __field(u64, metadata)
- __array(u8, cdw10, 24)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, ctrl_id)
+ __field(int, qid)
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u32, nsid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
),
TP_fast_assign(
- __entry->qid = qid;
- __entry->opcode = cmd->common.opcode;
- __entry->flags = cmd->common.flags;
- __entry->cid = cmd->common.command_id;
- __entry->nsid = le32_to_cpu(cmd->common.nsid);
- __entry->metadata = le64_to_cpu(cmd->common.metadata);
- memcpy(__entry->cdw10, cmd->common.cdw10,
- sizeof(__entry->cdw10));
+ __entry->ctrl_id = nvme_req(req)->ctrl->instance;
+ __entry->qid = nvme_req_qid(req);
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ __assign_disk_name(__entry->disk, req->rq_disk);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
),
- TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
- __entry->qid, __entry->nsid, __entry->cid,
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->ctrl_id, __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
- show_opcode_name(__entry->opcode),
- __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
+ show_opcode_name(__entry->qid, __entry->opcode),
+ parse_nvme_cmd(__entry->qid, __entry->opcode, __entry->cdw10))
);
TRACE_EVENT(nvme_complete_rq,
TP_PROTO(struct request *req),
TP_ARGS(req),
TP_STRUCT__entry(
- __field(int, qid)
- __field(int, cid)
- __field(u64, result)
- __field(u8, retries)
- __field(u8, flags)
- __field(u16, status)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, ctrl_id)
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u8, retries)
+ __field(u8, flags)
+ __field(u16, status)
),
TP_fast_assign(
- __entry->qid = req->q->id;
- __entry->cid = req->tag;
- __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
- __entry->retries = nvme_req(req)->retries;
- __entry->flags = nvme_req(req)->flags;
- __entry->status = nvme_req(req)->status;
+ __entry->ctrl_id = nvme_req(req)->ctrl->instance;
+ __entry->qid = nvme_req_qid(req);
+ __entry->cid = req->tag;
+ __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
+ __entry->retries = nvme_req(req)->retries;
+ __entry->flags = nvme_req(req)->flags;
+ __entry->status = nvme_req(req)->status;
+ __assign_disk_name(__entry->disk, req->rq_disk);
),
- TP_printk("qid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
+ __entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->result,
__entry->retries, __entry->flags, __entry->status)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 38803576d5e1..a21caea1e080 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -19,6 +19,19 @@
#include <asm/unaligned.h>
#include "nvmet.h"
+/*
+ * This helper allows us to clear the AEN based on the RAE bit,
+ * Please use this helper when processing the log pages which are
+ * associated with the AEN.
+ */
+static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit)
+{
+ int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
+
+ if (!rae)
+ clear_bit(aen_bit, &req->sq->ctrl->aen_masked);
+}
+
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
{
u32 len = le16_to_cpu(cmd->get_log_page.numdu);
@@ -128,6 +141,36 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+{
+ u16 status = NVME_SC_INTERNAL;
+ struct nvme_effects_log *log;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ goto out;
+
+ log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
+
+ log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+
+ kfree(log);
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -146,12 +189,76 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
if (!status)
status = nvmet_zero_sgl(req, len, req->data_len - len);
ctrl->nr_changed_ns = 0;
- clear_bit(NVME_AEN_CFG_NS_ATTR, &ctrl->aen_masked);
+ nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR);
mutex_unlock(&ctrl->lock);
out:
nvmet_req_complete(req, status);
}
+static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
+ struct nvme_ana_group_desc *desc)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ u32 count = 0;
+
+ if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ if (ns->anagrpid == grpid)
+ desc->nsids[count++] = cpu_to_le32(ns->nsid);
+ rcu_read_unlock();
+ }
+
+ desc->grpid = cpu_to_le32(grpid);
+ desc->nnsids = cpu_to_le32(count);
+ desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+ desc->state = req->port->ana_state[grpid];
+ memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
+ return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
+}
+
+static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
+{
+ struct nvme_ana_rsp_hdr hdr = { 0, };
+ struct nvme_ana_group_desc *desc;
+ size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
+ size_t len;
+ u32 grpid;
+ u16 ngrps = 0;
+ u16 status;
+
+ status = NVME_SC_INTERNAL;
+ desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
+ NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
+ if (!desc)
+ goto out;
+
+ down_read(&nvmet_ana_sem);
+ for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+ if (!nvmet_ana_group_enabled[grpid])
+ continue;
+ len = nvmet_format_ana_group(req, grpid, desc);
+ status = nvmet_copy_to_sgl(req, offset, desc, len);
+ if (status)
+ break;
+ offset += len;
+ ngrps++;
+ }
+
+ hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+ hdr.ngrps = cpu_to_le16(ngrps);
+ nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE);
+ up_read(&nvmet_ana_sem);
+
+ kfree(desc);
+
+ /* copy the header last once we know the number of groups */
+ status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
+out:
+ nvmet_req_complete(req, status);
+}
+
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -183,8 +290,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
* the safest is to leave it as zeroes.
*/
- /* we support multiple ports and multiples hosts: */
- id->cmic = (1 << 0) | (1 << 1);
+ /* we support multiple ports, multiples hosts and ANA: */
+ id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -208,7 +315,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* first slot is read-only, only one slot supported */
id->frmw = (1 << 0) | (1 << 1);
- id->lpa = (1 << 0) | (1 << 2);
+ id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
id->npss = 0;
@@ -222,6 +329,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
+ id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
NVME_CTRL_ONCS_WRITE_ZEROES);
@@ -238,19 +346,24 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->has_keyed_sgls)
id->sgls |= cpu_to_le32(1 << 2);
- if (ctrl->ops->sqe_inline_size)
+ if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
strcpy(id->subnqn, ctrl->subsys->subsysnqn);
/* Max command capsule size is sqe + single page of in-capsule data */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
- ctrl->ops->sqe_inline_size) / 16);
+ req->port->inline_data_size) / 16);
/* Max response capsule size is cqe */
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
id->msdbd = ctrl->ops->msdbd;
+ id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
+ id->anatt = 10; /* random value */
+ id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
+ id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
+
/*
* Meh, we don't really support any power state. Fake up the same
* values that qemu does.
@@ -259,6 +372,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->psd[0].entry_lat = cpu_to_le32(0x10);
id->psd[0].exit_lat = cpu_to_le32(0x4);
+ id->nwpc = 1 << 0; /* write protect and no write protect */
+
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
kfree(id);
@@ -292,8 +407,15 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
*/
- id->ncap = id->nuse = id->nsze =
- cpu_to_le64(ns->size >> ns->blksize_shift);
+ id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
+ switch (req->port->ana_state[ns->anagrpid]) {
+ case NVME_ANA_INACCESSIBLE:
+ case NVME_ANA_PERSISTENT_LOSS:
+ break;
+ default:
+ id->nuse = id->nsze;
+ break;
+ }
/*
* We just provide a single LBA format that matches what the
@@ -307,11 +429,14 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* controllers, but also with any other user of the block device.
*/
id->nmic = (1 << 0);
+ id->anagrpid = cpu_to_le32(ns->anagrpid);
- memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le));
+ memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
id->lbaf[0].ds = ns->blksize_shift;
+ if (ns->readonly)
+ id->nsattr |= (1 << 0);
nvmet_put_namespace(ns);
done:
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
@@ -424,6 +549,52 @@ static void nvmet_execute_abort(struct nvmet_req *req)
nvmet_req_complete(req, 0);
}
+static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
+{
+ u16 status;
+
+ if (req->ns->file)
+ status = nvmet_file_flush(req);
+ else
+ status = nvmet_bdev_flush(req);
+
+ if (status)
+ pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
+ return status;
+}
+
+static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
+{
+ u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
+ struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
+
+ req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
+ if (unlikely(!req->ns))
+ return status;
+
+ mutex_lock(&subsys->lock);
+ switch (write_protect) {
+ case NVME_NS_WRITE_PROTECT:
+ req->ns->readonly = true;
+ status = nvmet_write_protect_flush_sync(req);
+ if (status)
+ req->ns->readonly = false;
+ break;
+ case NVME_NS_NO_WRITE_PROTECT:
+ req->ns->readonly = false;
+ status = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (!status)
+ nvmet_ns_changed(subsys, req->ns->nsid);
+ mutex_unlock(&subsys->lock);
+ return status;
+}
+
static void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
@@ -454,6 +625,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
case NVME_FEAT_HOST_ID:
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
break;
+ case NVME_FEAT_WRITE_PROTECT:
+ status = nvmet_set_feat_write_protect(req);
+ break;
default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
@@ -462,6 +636,26 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
+static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
+{
+ struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ u32 result;
+
+ req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
+ if (!req->ns)
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+
+ mutex_lock(&subsys->lock);
+ if (req->ns->readonly == true)
+ result = NVME_NS_WRITE_PROTECT;
+ else
+ result = NVME_NS_NO_WRITE_PROTECT;
+ nvmet_set_result(req, result);
+ mutex_unlock(&subsys->lock);
+
+ return 0;
+}
+
static void nvmet_execute_get_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
@@ -513,6 +707,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
sizeof(req->sq->ctrl->hostid));
break;
+ case NVME_FEAT_WRITE_PROTECT:
+ status = nvmet_get_feat_write_protect(req);
+ break;
default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
@@ -586,6 +783,12 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
case NVME_LOG_CHANGED_NS:
req->execute = nvmet_execute_get_log_changed_ns;
return 0;
+ case NVME_LOG_CMD_EFFECTS:
+ req->execute = nvmet_execute_get_log_cmd_effects_ns;
+ return 0;
+ case NVME_LOG_ANA:
+ req->execute = nvmet_execute_get_log_page_ana;
+ return 0;
}
break;
case nvme_admin_identify:
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index ebea1373d1b7..b37a8e3e3f80 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -218,6 +218,35 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, addr_trsvcid);
+static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
+}
+
+static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (port->enabled) {
+ pr_err("Cannot modify inline_data_size while port enabled\n");
+ pr_err("Disable the port before modifying\n");
+ return -EACCES;
+ }
+ ret = kstrtoint(page, 0, &port->inline_data_size);
+ if (ret) {
+ pr_err("Invalid value '%s' for inline_data_size\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+
static ssize_t nvmet_addr_trtype_show(struct config_item *item,
char *page)
{
@@ -387,6 +416,39 @@ out_unlock:
CONFIGFS_ATTR(nvmet_ns_, device_nguid);
+static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
+}
+
+static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ u32 oldgrpid, newgrpid;
+ int ret;
+
+ ret = kstrtou32(page, 0, &newgrpid);
+ if (ret)
+ return ret;
+
+ if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
+ return -EINVAL;
+
+ down_write(&nvmet_ana_sem);
+ oldgrpid = ns->anagrpid;
+ nvmet_ana_group_enabled[newgrpid]++;
+ ns->anagrpid = newgrpid;
+ nvmet_ana_group_enabled[oldgrpid]--;
+ nvmet_ana_chgcnt++;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_send_ana_event(ns->subsys, NULL);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
+
static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
@@ -412,11 +474,41 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_ns_, enable);
+static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
+}
+
+static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool val;
+
+ if (strtobool(page, &val))
+ return -EINVAL;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ pr_err("disable ns before setting buffered_io value.\n");
+ mutex_unlock(&ns->subsys->lock);
+ return -EINVAL;
+ }
+
+ ns->buffered_io = val;
+ mutex_unlock(&ns->subsys->lock);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, buffered_io);
+
static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid,
&nvmet_ns_attr_device_uuid,
+ &nvmet_ns_attr_ana_grpid,
&nvmet_ns_attr_enable,
+ &nvmet_ns_attr_buffered_io,
NULL,
};
@@ -863,6 +955,134 @@ static const struct config_item_type nvmet_referrals_type = {
.ct_group_ops = &nvmet_referral_group_ops,
};
+static struct {
+ enum nvme_ana_state state;
+ const char *name;
+} nvmet_ana_state_names[] = {
+ { NVME_ANA_OPTIMIZED, "optimized" },
+ { NVME_ANA_NONOPTIMIZED, "non-optimized" },
+ { NVME_ANA_INACCESSIBLE, "inaccessible" },
+ { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
+ { NVME_ANA_CHANGE, "change" },
+};
+
+static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_ana_group *grp = to_ana_group(item);
+ enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
+ if (state != nvmet_ana_state_names[i].state)
+ continue;
+ return sprintf(page, "%s\n", nvmet_ana_state_names[i].name);
+ }
+
+ return sprintf(page, "\n");
+}
+
+static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ana_group *grp = to_ana_group(item);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
+ if (sysfs_streq(page, nvmet_ana_state_names[i].name))
+ goto found;
+ }
+
+ pr_err("Invalid value '%s' for ana_state\n", page);
+ return -EINVAL;
+
+found:
+ down_write(&nvmet_ana_sem);
+ grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state;
+ nvmet_ana_chgcnt++;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_port_send_ana_event(grp->port);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
+
+static struct configfs_attribute *nvmet_ana_group_attrs[] = {
+ &nvmet_ana_group_attr_ana_state,
+ NULL,
+};
+
+static void nvmet_ana_group_release(struct config_item *item)
+{
+ struct nvmet_ana_group *grp = to_ana_group(item);
+
+ if (grp == &grp->port->ana_default_group)
+ return;
+
+ down_write(&nvmet_ana_sem);
+ grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
+ nvmet_ana_group_enabled[grp->grpid]--;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_port_send_ana_event(grp->port);
+ kfree(grp);
+}
+
+static struct configfs_item_operations nvmet_ana_group_item_ops = {
+ .release = nvmet_ana_group_release,
+};
+
+static const struct config_item_type nvmet_ana_group_type = {
+ .ct_item_ops = &nvmet_ana_group_item_ops,
+ .ct_attrs = nvmet_ana_group_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_ana_groups_make_group(
+ struct config_group *group, const char *name)
+{
+ struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
+ struct nvmet_ana_group *grp;
+ u32 grpid;
+ int ret;
+
+ ret = kstrtou32(name, 0, &grpid);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
+ goto out;
+
+ ret = -ENOMEM;
+ grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+ if (!grp)
+ goto out;
+ grp->port = port;
+ grp->grpid = grpid;
+
+ down_write(&nvmet_ana_sem);
+ nvmet_ana_group_enabled[grpid]++;
+ up_write(&nvmet_ana_sem);
+
+ nvmet_port_send_ana_event(grp->port);
+
+ config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
+ return &grp->group;
+out:
+ return ERR_PTR(ret);
+}
+
+static struct configfs_group_operations nvmet_ana_groups_group_ops = {
+ .make_group = nvmet_ana_groups_make_group,
+};
+
+static const struct config_item_type nvmet_ana_groups_type = {
+ .ct_group_ops = &nvmet_ana_groups_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
/*
* Ports definitions.
*/
@@ -870,6 +1090,7 @@ static void nvmet_port_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
+ kfree(port->ana_state);
kfree(port);
}
@@ -879,6 +1100,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_traddr,
&nvmet_attr_addr_trsvcid,
&nvmet_attr_addr_trtype,
+ &nvmet_attr_param_inline_data_size,
NULL,
};
@@ -897,6 +1119,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
{
struct nvmet_port *port;
u16 portid;
+ u32 i;
if (kstrtou16(name, 0, &portid))
return ERR_PTR(-EINVAL);
@@ -905,9 +1128,24 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
if (!port)
return ERR_PTR(-ENOMEM);
+ port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
+ sizeof(*port->ana_state), GFP_KERNEL);
+ if (!port->ana_state) {
+ kfree(port);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
+ if (i == NVMET_DEFAULT_ANA_GRPID)
+ port->ana_state[1] = NVME_ANA_OPTIMIZED;
+ else
+ port->ana_state[i] = NVME_ANA_INACCESSIBLE;
+ }
+
INIT_LIST_HEAD(&port->entry);
INIT_LIST_HEAD(&port->subsystems);
INIT_LIST_HEAD(&port->referrals);
+ port->inline_data_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
config_group_init_type_name(&port->group, name, &nvmet_port_type);
@@ -920,6 +1158,18 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
"referrals", &nvmet_referrals_type);
configfs_add_default_group(&port->referrals_group, &port->group);
+ config_group_init_type_name(&port->ana_groups_group,
+ "ana_groups", &nvmet_ana_groups_type);
+ configfs_add_default_group(&port->ana_groups_group, &port->group);
+
+ port->ana_default_group.port = port;
+ port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
+ config_group_init_type_name(&port->ana_default_group.group,
+ __stringify(NVMET_DEFAULT_ANA_GRPID),
+ &nvmet_ana_group_type);
+ configfs_add_default_group(&port->ana_default_group.group,
+ &port->ana_groups_group);
+
return &port->group;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 9838103f2d62..ebf3e7a6c49e 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -18,6 +18,7 @@
#include "nvmet.h"
+struct workqueue_struct *buffered_io_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
@@ -39,6 +40,10 @@ static DEFINE_IDA(cntlid_ida);
*/
DECLARE_RWSEM(nvmet_config_sem);
+u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+u64 nvmet_ana_chgcnt;
+DECLARE_RWSEM(nvmet_ana_sem);
+
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn);
@@ -175,7 +180,7 @@ out_unlock:
mutex_unlock(&ctrl->lock);
}
-static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
+void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ctrl *ctrl;
@@ -189,6 +194,33 @@ static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
}
}
+void nvmet_send_ana_event(struct nvmet_subsys *subsys,
+ struct nvmet_port *port)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (port && ctrl->port != port)
+ continue;
+ if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE))
+ continue;
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
+ }
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_port_send_ana_event(struct nvmet_port *port)
+{
+ struct nvmet_subsys_link *p;
+
+ down_read(&nvmet_config_sem);
+ list_for_each_entry(p, &port->subsystems, entry)
+ nvmet_send_ana_event(p->subsys, port);
+ up_read(&nvmet_config_sem);
+}
+
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
{
int ret = 0;
@@ -241,6 +273,10 @@ int nvmet_enable_port(struct nvmet_port *port)
return ret;
}
+ /* If the transport didn't set inline_data_size, then disable it. */
+ if (port->inline_data_size < 0)
+ port->inline_data_size = 0;
+
port->enabled = true;
return 0;
}
@@ -332,9 +368,13 @@ static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
- int ret = 0;
+ int ret;
mutex_lock(&subsys->lock);
+ ret = -EMFILE;
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+ ret = 0;
if (ns->enabled)
goto out_unlock;
@@ -369,6 +409,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_add_tail_rcu(&ns->dev_link, &old->dev_link);
}
+ subsys->nr_namespaces++;
nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
@@ -409,6 +450,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
percpu_ref_exit(&ns->ref);
mutex_lock(&subsys->lock);
+ subsys->nr_namespaces--;
nvmet_ns_changed(subsys, ns->nsid);
nvmet_ns_dev_disable(ns);
out_unlock:
@@ -419,6 +461,10 @@ void nvmet_ns_free(struct nvmet_ns *ns)
{
nvmet_ns_disable(ns);
+ down_write(&nvmet_ana_sem);
+ nvmet_ana_group_enabled[ns->anagrpid]--;
+ up_write(&nvmet_ana_sem);
+
kfree(ns->device_path);
kfree(ns);
}
@@ -436,7 +482,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->nsid = nsid;
ns->subsys = subsys;
+
+ down_write(&nvmet_ana_sem);
+ ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
+ nvmet_ana_group_enabled[ns->anagrpid]++;
+ up_write(&nvmet_ana_sem);
+
uuid_gen(&ns->uuid);
+ ns->buffered_io = false;
return ns;
}
@@ -542,6 +595,35 @@ int nvmet_sq_init(struct nvmet_sq *sq)
}
EXPORT_SYMBOL_GPL(nvmet_sq_init);
+static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
+ struct nvmet_ns *ns)
+{
+ enum nvme_ana_state state = port->ana_state[ns->anagrpid];
+
+ if (unlikely(state == NVME_ANA_INACCESSIBLE))
+ return NVME_SC_ANA_INACCESSIBLE;
+ if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
+ return NVME_SC_ANA_PERSISTENT_LOSS;
+ if (unlikely(state == NVME_ANA_CHANGE))
+ return NVME_SC_ANA_TRANSITION;
+ return 0;
+}
+
+static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
+{
+ if (unlikely(req->ns->readonly)) {
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_flush:
+ break;
+ default:
+ return NVME_SC_NS_WRITE_PROTECTED;
+ }
+ }
+
+ return 0;
+}
+
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -554,6 +636,12 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
if (unlikely(!req->ns))
return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ ret = nvmet_check_ana_state(req->port, req->ns);
+ if (unlikely(ret))
+ return ret;
+ ret = nvmet_io_cmd_check_access(req);
+ if (unlikely(ret))
+ return ret;
if (req->ns->file)
return nvmet_file_parse_io_cmd(req);
@@ -870,6 +958,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
nvmet_init_cap(ctrl);
+ ctrl->port = req->port;
+
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
@@ -1109,6 +1199,15 @@ static int __init nvmet_init(void)
{
int error;
+ nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+
+ buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
+ WQ_MEM_RECLAIM, 0);
+ if (!buffered_io_wq) {
+ error = -ENOMEM;
+ goto out;
+ }
+
error = nvmet_init_discovery();
if (error)
goto out;
@@ -1129,6 +1228,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
+ destroy_workqueue(buffered_io_wq);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 08656b849bd6..eae29f493a07 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -171,7 +171,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->has_keyed_sgls)
id->sgls |= cpu_to_le32(1 << 2);
- if (ctrl->ops->sqe_inline_size)
+ if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
strcpy(id->subnqn, ctrl->subsys->subsysnqn);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index e0b0f7df70c2..7bc9f6240432 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -124,6 +124,13 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
submit_bio(bio);
}
+u16 nvmet_bdev_flush(struct nvmet_req *req)
+{
+ if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return 0;
+}
+
static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
struct nvme_dsm_range *range, struct bio **bio)
{
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 8c42b3a8c420..81a9dc5290a8 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -16,6 +16,8 @@
void nvmet_file_ns_disable(struct nvmet_ns *ns)
{
if (ns->file) {
+ if (ns->buffered_io)
+ flush_workqueue(buffered_io_wq);
mempool_destroy(ns->bvec_pool);
ns->bvec_pool = NULL;
kmem_cache_destroy(ns->bvec_cache);
@@ -27,11 +29,14 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns)
{
- int ret;
+ int flags = O_RDWR | O_LARGEFILE;
struct kstat stat;
+ int ret;
+
+ if (!ns->buffered_io)
+ flags |= O_DIRECT;
- ns->file = filp_open(ns->device_path,
- O_RDWR | O_LARGEFILE | O_DIRECT, 0);
+ ns->file = filp_open(ns->device_path, flags, 0);
if (IS_ERR(ns->file)) {
pr_err("failed to open file %s: (%ld)\n",
ns->device_path, PTR_ERR(ns->file));
@@ -100,7 +105,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
iocb->ki_pos = pos;
iocb->ki_filp = req->ns->file;
- iocb->ki_flags = IOCB_DIRECT | ki_flags;
+ iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
ret = call_iter(iocb, &iter);
@@ -140,6 +145,12 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
return;
}
+ pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
+ if (unlikely(pos + req->data_len > req->ns->size)) {
+ nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+ return;
+ }
+
if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
GFP_KERNEL);
@@ -155,8 +166,6 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
is_sync = true;
}
- pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
-
memset(&req->f.iocb, 0, sizeof(struct kiocb));
for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
@@ -189,14 +198,31 @@ out:
nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
}
-static void nvmet_file_flush_work(struct work_struct *w)
+static void nvmet_file_buffered_io_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
- int ret;
- ret = vfs_fsync(req->ns->file, 1);
+ nvmet_file_execute_rw(req);
+}
- nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
+{
+ INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
+ queue_work(buffered_io_wq, &req->f.work);
+}
+
+u16 nvmet_file_flush(struct nvmet_req *req)
+{
+ if (vfs_fsync(req->ns->file, 1) < 0)
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return 0;
+}
+
+static void nvmet_file_flush_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+ nvmet_req_complete(req, nvmet_file_flush(req));
}
static void nvmet_file_execute_flush(struct nvmet_req *req)
@@ -209,22 +235,30 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
{
int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
struct nvme_dsm_range range;
- loff_t offset;
- loff_t len;
- int i, ret;
+ loff_t offset, len;
+ u16 ret;
+ int i;
for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
- if (nvmet_copy_from_sgl(req, i * sizeof(range), &range,
- sizeof(range)))
+ ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ sizeof(range));
+ if (ret)
break;
+
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
- ret = vfs_fallocate(req->ns->file, mode, offset, len);
- if (ret)
+ if (offset + len > req->ns->size) {
+ ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
break;
+ }
+
+ if (vfs_fallocate(req->ns->file, mode, offset, len)) {
+ ret = NVME_SC_INTERNAL | NVME_SC_DNR;
+ break;
+ }
}
- nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+ nvmet_req_complete(req, ret);
}
static void nvmet_file_dsm_work(struct work_struct *w)
@@ -263,6 +297,11 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
req->ns->blksize_shift);
+ if (unlikely(offset + len > req->ns->size)) {
+ nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+ return;
+ }
+
ret = vfs_fallocate(req->ns->file, mode, offset, len);
nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
}
@@ -280,7 +319,10 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
switch (cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
- req->execute = nvmet_file_execute_rw;
+ if (req->ns->buffered_io)
+ req->execute = nvmet_file_execute_rw_buffered_io;
+ else
+ req->execute = nvmet_file_execute_rw;
req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index ae7586b8be07..9908082b32c4 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -227,6 +227,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
{
struct nvme_loop_ctrl *ctrl = set->driver_data;
+ nvme_req(req)->ctrl = &ctrl->ctrl;
return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 480dfe10fad9..ec9af4ee03b6 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -30,12 +30,11 @@
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
-
/*
* Supported optional AENs:
*/
#define NVMET_AEN_CFG_OPTIONAL \
- NVME_AEN_CFG_NS_ATTR
+ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
/*
* Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
@@ -59,12 +58,15 @@ struct nvmet_ns {
struct percpu_ref ref;
struct block_device *bdev;
struct file *file;
+ bool readonly;
u32 nsid;
u32 blksize_shift;
loff_t size;
u8 nguid[16];
uuid_t uuid;
+ u32 anagrpid;
+ bool buffered_io;
bool enabled;
struct nvmet_subsys *subsys;
const char *device_path;
@@ -97,6 +99,18 @@ struct nvmet_sq {
struct completion confirm_done;
};
+struct nvmet_ana_group {
+ struct config_group group;
+ struct nvmet_port *port;
+ u32 grpid;
+};
+
+static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_ana_group,
+ group);
+}
+
/**
* struct nvmet_port - Common structure to keep port
* information for the target.
@@ -114,8 +128,12 @@ struct nvmet_port {
struct list_head subsystems;
struct config_group referrals_group;
struct list_head referrals;
+ struct config_group ana_groups_group;
+ struct nvmet_ana_group ana_default_group;
+ enum nvme_ana_state *ana_state;
void *priv;
bool enabled;
+ int inline_data_size;
};
static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
@@ -124,6 +142,13 @@ static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
group);
}
+static inline struct nvmet_port *ana_groups_to_port(
+ struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_port,
+ ana_groups_group);
+}
+
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_cq **cqs;
@@ -138,6 +163,8 @@ struct nvmet_ctrl {
u16 cntlid;
u32 kato;
+ struct nvmet_port *port;
+
u32 aen_enabled;
unsigned long aen_masked;
struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
@@ -166,6 +193,7 @@ struct nvmet_subsys {
struct kref ref;
struct list_head namespaces;
+ unsigned int nr_namespaces;
unsigned int max_nsid;
struct list_head ctrls;
@@ -225,7 +253,6 @@ struct nvmet_req;
struct nvmet_fabrics_ops {
struct module *owner;
unsigned int type;
- unsigned int sqe_inline_size;
unsigned int msdbd;
bool has_keyed_sgls : 1;
void (*queue_response)(struct nvmet_req *req);
@@ -269,6 +296,8 @@ struct nvmet_req {
const struct nvmet_fabrics_ops *ops;
};
+extern struct workqueue_struct *buffered_io_wq;
+
static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
{
req->rsp->status = cpu_to_le16(status << 1);
@@ -337,6 +366,10 @@ void nvmet_ns_disable(struct nvmet_ns *ns);
struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
void nvmet_ns_free(struct nvmet_ns *ns);
+void nvmet_send_ana_event(struct nvmet_subsys *subsys,
+ struct nvmet_port *port);
+void nvmet_port_send_ana_event(struct nvmet_port *port);
+
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
@@ -357,6 +390,22 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd);
#define NVMET_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
+
+/*
+ * Nice round number that makes a list of nsids fit into a page.
+ * Should become tunable at some point in the future.
+ */
+#define NVMET_MAX_NAMESPACES 1024
+
+/*
+ * 0 is not a valid ANA group ID, so we start numbering at 1.
+ *
+ * ANA Group 1 exists without manual intervention, has namespaces assigned to it
+ * by default, and is available in an optimized state through all ports.
+ */
+#define NVMET_MAX_ANAGRPS 128
+#define NVMET_DEFAULT_ANA_GRPID 1
+
#define NVMET_KAS 10
#define NVMET_DISC_KATO 120
@@ -370,6 +419,10 @@ extern struct nvmet_subsys *nvmet_disc_subsys;
extern u64 nvmet_genctr;
extern struct rw_semaphore nvmet_config_sem;
+extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+extern u64 nvmet_ana_chgcnt;
+extern struct rw_semaphore nvmet_ana_sem;
+
bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
const char *hostnqn);
@@ -377,6 +430,9 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
int nvmet_file_ns_enable(struct nvmet_ns *ns);
void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
void nvmet_file_ns_disable(struct nvmet_ns *ns);
+u16 nvmet_bdev_flush(struct nvmet_req *req);
+u16 nvmet_file_flush(struct nvmet_req *req);
+void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
static inline u32 nvmet_rw_len(struct nvmet_req *req)
{
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1a642e214a4c..3533e918ea37 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -33,16 +33,17 @@
#include "nvmet.h"
/*
- * We allow up to a page of inline data to go with the SQE
+ * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
*/
-#define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE
+#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
+#define NVMET_RDMA_MAX_INLINE_SGE 4
+#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
struct nvmet_rdma_cmd {
- struct ib_sge sge[2];
+ struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
struct ib_cqe cqe;
struct ib_recv_wr wr;
- struct scatterlist inline_sg;
- struct page *inline_page;
+ struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
struct nvme_command *nvme_cmd;
struct nvmet_rdma_queue *queue;
};
@@ -116,6 +117,8 @@ struct nvmet_rdma_device {
size_t srq_size;
struct kref ref;
struct list_head entry;
+ int inline_data_size;
+ int inline_page_count;
};
static bool nvmet_rdma_use_srq;
@@ -138,6 +141,11 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
+static int num_pages(int len)
+{
+ return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
+}
+
/* XXX: really should move to a generic header sooner or later.. */
static inline u32 get_unaligned_le24(const u8 *p)
{
@@ -184,6 +192,71 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
}
+static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c)
+{
+ struct scatterlist *sg;
+ struct ib_sge *sge;
+ int i;
+
+ if (!ndev->inline_data_size)
+ return;
+
+ sg = c->inline_sg;
+ sge = &c->sge[1];
+
+ for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+ if (sge->length)
+ ib_dma_unmap_page(ndev->device, sge->addr,
+ sge->length, DMA_FROM_DEVICE);
+ if (sg_page(sg))
+ __free_page(sg_page(sg));
+ }
+}
+
+static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c)
+{
+ struct scatterlist *sg;
+ struct ib_sge *sge;
+ struct page *pg;
+ int len;
+ int i;
+
+ if (!ndev->inline_data_size)
+ return 0;
+
+ sg = c->inline_sg;
+ sg_init_table(sg, ndev->inline_page_count);
+ sge = &c->sge[1];
+ len = ndev->inline_data_size;
+
+ for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg)
+ goto out_err;
+ sg_assign_page(sg, pg);
+ sge->addr = ib_dma_map_page(ndev->device,
+ pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, sge->addr))
+ goto out_err;
+ sge->length = min_t(int, len, PAGE_SIZE);
+ sge->lkey = ndev->pd->local_dma_lkey;
+ len -= sge->length;
+ }
+
+ return 0;
+out_err:
+ for (; i >= 0; i--, sg--, sge--) {
+ if (sge->length)
+ ib_dma_unmap_page(ndev->device, sge->addr,
+ sge->length, DMA_FROM_DEVICE);
+ if (sg_page(sg))
+ __free_page(sg_page(sg));
+ }
+ return -ENOMEM;
+}
+
static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *c, bool admin)
{
@@ -200,33 +273,17 @@ static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
c->sge[0].length = sizeof(*c->nvme_cmd);
c->sge[0].lkey = ndev->pd->local_dma_lkey;
- if (!admin) {
- c->inline_page = alloc_pages(GFP_KERNEL,
- get_order(NVMET_RDMA_INLINE_DATA_SIZE));
- if (!c->inline_page)
- goto out_unmap_cmd;
- c->sge[1].addr = ib_dma_map_page(ndev->device,
- c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
- DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
- goto out_free_inline_page;
- c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
- c->sge[1].lkey = ndev->pd->local_dma_lkey;
- }
+ if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
+ goto out_unmap_cmd;
c->cqe.done = nvmet_rdma_recv_done;
c->wr.wr_cqe = &c->cqe;
c->wr.sg_list = c->sge;
- c->wr.num_sge = admin ? 1 : 2;
+ c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
return 0;
-out_free_inline_page:
- if (!admin) {
- __free_pages(c->inline_page,
- get_order(NVMET_RDMA_INLINE_DATA_SIZE));
- }
out_unmap_cmd:
ib_dma_unmap_single(ndev->device, c->sge[0].addr,
sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
@@ -240,12 +297,8 @@ out:
static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *c, bool admin)
{
- if (!admin) {
- ib_dma_unmap_page(ndev->device, c->sge[1].addr,
- NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
- __free_pages(c->inline_page,
- get_order(NVMET_RDMA_INLINE_DATA_SIZE));
- }
+ if (!admin)
+ nvmet_rdma_free_inline_pages(ndev, c);
ib_dma_unmap_single(ndev->device, c->sge[0].addr,
sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
kfree(c->nvme_cmd);
@@ -382,13 +435,21 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *cmd)
{
+ int ret;
+
ib_dma_sync_single_for_device(ndev->device,
cmd->sge[0].addr, cmd->sge[0].length,
DMA_FROM_DEVICE);
if (ndev->srq)
- return ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
- return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
+ ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+ else
+ ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
+
+ if (unlikely(ret))
+ pr_err("post_recv cmd failed\n");
+
+ return ret;
}
static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
@@ -427,7 +488,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
}
- if (rsp->req.sg != &rsp->cmd->inline_sg)
+ if (rsp->req.sg != rsp->cmd->inline_sg)
sgl_free(rsp->req.sg);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
@@ -491,7 +552,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
rsp->send_sge.addr, rsp->send_sge.length,
DMA_TO_DEVICE);
- if (ib_post_send(cm_id->qp, first_wr, NULL)) {
+ if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
pr_err("sending cmd response failed\n");
nvmet_rdma_release_rsp(rsp);
}
@@ -527,10 +588,25 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
u64 off)
{
- sg_init_table(&rsp->cmd->inline_sg, 1);
- sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
- rsp->req.sg = &rsp->cmd->inline_sg;
- rsp->req.sg_cnt = 1;
+ int sg_count = num_pages(len);
+ struct scatterlist *sg;
+ int i;
+
+ sg = rsp->cmd->inline_sg;
+ for (i = 0; i < sg_count; i++, sg++) {
+ if (i < sg_count - 1)
+ sg_unmark_end(sg);
+ else
+ sg_mark_end(sg);
+ sg->offset = off;
+ sg->length = min_t(int, len, PAGE_SIZE - off);
+ len -= sg->length;
+ if (!i)
+ off = 0;
+ }
+
+ rsp->req.sg = rsp->cmd->inline_sg;
+ rsp->req.sg_cnt = sg_count;
}
static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
@@ -542,7 +618,7 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
if (!nvme_is_write(rsp->req.cmd))
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
+ if (off + len > rsp->queue->dev->inline_data_size) {
pr_err("invalid inline data offset!\n");
return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
}
@@ -741,7 +817,7 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
srq_size = 4095; /* XXX: tune */
srq_attr.attr.max_wr = srq_size;
- srq_attr.attr.max_sge = 2;
+ srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
srq_attr.attr.srq_limit = 0;
srq_attr.srq_type = IB_SRQT_BASIC;
srq = ib_create_srq(ndev->pd, &srq_attr);
@@ -763,11 +839,16 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
ndev->srq = srq;
ndev->srq_size = srq_size;
- for (i = 0; i < srq_size; i++)
- nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+ for (i = 0; i < srq_size; i++) {
+ ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+ if (ret)
+ goto out_free_cmds;
+ }
return 0;
+out_free_cmds:
+ nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
out_destroy_srq:
ib_destroy_srq(srq);
return ret;
@@ -791,7 +872,10 @@ static void nvmet_rdma_free_dev(struct kref *ref)
static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
+ struct nvmet_port *port = cm_id->context;
struct nvmet_rdma_device *ndev;
+ int inline_page_count;
+ int inline_sge_count;
int ret;
mutex_lock(&device_list_mutex);
@@ -805,6 +889,18 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
if (!ndev)
goto out_err;
+ inline_page_count = num_pages(port->inline_data_size);
+ inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
+ cm_id->device->attrs.max_recv_sge) - 1;
+ if (inline_page_count > inline_sge_count) {
+ pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
+ port->inline_data_size, cm_id->device->name,
+ inline_sge_count * PAGE_SIZE);
+ port->inline_data_size = inline_sge_count * PAGE_SIZE;
+ inline_page_count = inline_sge_count;
+ }
+ ndev->inline_data_size = port->inline_data_size;
+ ndev->inline_page_count = inline_page_count;
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -879,7 +975,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
} else {
/* +1 for drain */
qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
- qp_attr.cap.max_recv_sge = 2;
+ qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
}
ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
@@ -897,13 +993,17 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
if (!ndev->srq) {
for (i = 0; i < queue->recv_queue_size; i++) {
queue->cmds[i].queue = queue;
- nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+ ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+ if (ret)
+ goto err_destroy_qp;
}
}
out:
return ret;
+err_destroy_qp:
+ rdma_destroy_qp(queue->cm_id);
err_destroy_cq:
ib_free_cq(queue->cq);
goto out;
@@ -1377,6 +1477,15 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
return -EINVAL;
}
+ if (port->inline_data_size < 0) {
+ port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
+ } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
+ pr_warn("inline_data_size %u is too large, reducing to %u\n",
+ port->inline_data_size,
+ NVMET_RDMA_MAX_INLINE_DATA_SIZE);
+ port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+ }
+
ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
port->disc_addr.trsvcid, &addr);
if (ret) {
@@ -1454,7 +1563,6 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
- .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE,
.msdbd = 1,
.has_keyed_sgls = 1,
.add_port = nvmet_rdma_add_port,
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 53349912ac75..7ddbf0a1ab86 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -846,7 +846,7 @@ EXPORT_SYMBOL(of_iomap);
* for a given device_node
* @device: the device whose io range will be mapped
* @index: index of the io range
- * @name: name of the resource
+ * @name: name "override" for the memory region request or NULL
*
* Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
* error code on failure. Usage example:
@@ -856,7 +856,7 @@ EXPORT_SYMBOL(of_iomap);
* return PTR_ERR(base);
*/
void __iomem *of_io_request_and_map(struct device_node *np, int index,
- const char *name)
+ const char *name)
{
struct resource res;
void __iomem *mem;
@@ -864,6 +864,8 @@ void __iomem *of_io_request_and_map(struct device_node *np, int index,
if (of_address_to_resource(np, index, &res))
return IOMEM_ERR_PTR(-EINVAL);
+ if (!name)
+ name = res.name;
if (!request_mem_region(res.start, resource_size(&res), name))
return IOMEM_ERR_PTR(-EBUSY);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 33d85511d790..5957cd4fa262 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -127,20 +127,20 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
}
/*
- * Set default coherent_dma_mask to 32 bit. Drivers are expected to
- * setup the correct supported mask.
+ * If @dev is expected to be DMA-capable then the bus code that created
+ * it should have initialised its dma_mask pointer by this point. For
+ * now, we'll continue the legacy behaviour of coercing it to the
+ * coherent mask if not, but we'll no longer do so quietly.
*/
- if (!dev->coherent_dma_mask)
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
- /*
- * Set it to coherent_dma_mask by default if the architecture
- * code has not set it.
- */
- if (!dev->dma_mask)
+ if (!dev->dma_mask) {
+ dev_warn(dev, "DMA mask not set\n");
dev->dma_mask = &dev->coherent_dma_mask;
+ }
- if (!size)
+ if (!size && dev->coherent_dma_mask)
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+ else if (!size)
+ size = 1ULL << 32;
dev->dma_pfn_offset = offset;
@@ -149,6 +149,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
* set by the driver.
*/
mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
+ dev->bus_dma_mask = mask;
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 6da20b9688f7..800ad252cf9c 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1034,14 +1034,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
bool hotpluggable;
/* We are scanning "memory" nodes only */
- if (type == NULL) {
- /*
- * The longtrail doesn't have a device_type on the
- * /memory node, so look for the node called /memory@0.
- */
- if (!IS_ENABLED(CONFIG_PPC32) || depth != 1 || strcmp(uname, "memory@0") != 0)
- return 0;
- } else if (strcmp(type, "memory") != 0)
+ if (type == NULL || strcmp(type, "memory") != 0)
return 0;
reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d963baf8e53a..e92391d6d1bd 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -367,14 +367,23 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
phy_interface_t iface;
struct device_node *phy_np;
struct phy_device *phy;
+ int ret;
iface = of_get_phy_mode(np);
if (iface < 0)
return NULL;
-
- phy_np = of_parse_phandle(np, "phy-handle", 0);
- if (!phy_np)
- return NULL;
+ if (of_phy_is_fixed_link(np)) {
+ ret = of_phy_register_fixed_link(np);
+ if (ret < 0) {
+ netdev_err(dev, "broken fixed-link specification\n");
+ return NULL;
+ }
+ phy_np = of_node_get(np);
+ } else {
+ phy_np = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_np)
+ return NULL;
+ }
phy = of_phy_connect(dev, phy_np, hndlr, 0, iface);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 6925d993e1f0..7ba90c290a42 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -185,6 +185,9 @@ static struct platform_device *of_platform_device_create_pdata(
if (!dev)
goto err_clear_flag;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ if (!dev->dev.dma_mask)
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
of_msi_configure(&dev->dev, dev->dev.of_node);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 4923a2a8e14b..5b78f3b1b918 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -273,6 +273,9 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
+ if (!pdev->eetlp_prefix_path)
+ return -EINVAL;
+
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index cc9fa02d32a0..028b287466fb 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -102,7 +102,7 @@ config PCI_HOST_GENERIC
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
- depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST
+ depends on OF || COMPILE_TEST
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -239,6 +239,16 @@ config PCIE_MEDIATEK
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
+config PCIE_MOBIVEIL
+ bool "Mobiveil AXI PCIe controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here if you want to enable support for the Mobiveil AXI PCIe
+ Soft IP. It has up to 8 outbound and inbound windows
+ for address translation and it is a PCIe Gen4 IP.
+
config PCIE_TANGO_SMP8759
bool "Tango SMP8759 PCIe controller (DANGEROUS)"
depends on ARCH_TANGO && PCI_MSI && OF
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 24322b92f200..d56a507495c5 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
+obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 345aab56ce8b..ce9224a36f62 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -370,7 +370,7 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
}
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 4cc1e5df8c79..cee5f2f590e2 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -421,7 +421,6 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
}
}
- pp->root_bus_nr = -1;
pp->ops = &exynos_pcie_host_ops;
ret = dw_pcie_host_init(pp);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 80f604602783..4a9a673b4777 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -667,7 +667,6 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
}
}
- pp->root_bus_nr = -1;
pp->ops = &imx6_pcie_host_ops;
ret = dw_pcie_host_init(pp);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 3722a5f31e5e..e88bd221fffe 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -347,7 +347,6 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
}
}
- pp->root_bus_nr = -1;
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
if (ret) {
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 072fd7ecc29f..0c389a30ef5d 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -172,7 +172,6 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct device *dev = &pdev->dev;
int ret;
- pp->root_bus_nr = -1;
pp->ops = &armada8k_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 321b56cfd5d0..dba83abfe764 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -399,7 +399,6 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
}
}
- pp->root_bus_nr = -1;
pp->ops = &artpec6_pcie_host_ops;
ret = dw_pcie_host_init(pp);
@@ -428,7 +427,7 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 8650416f6f9e..1e7b02221eac 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -40,6 +40,39 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
__dw_pcie_ep_reset_bar(pci, bar, 0);
}
+static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
+ u8 cap)
+{
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ reg = dw_pcie_readw_dbi(pci, cap_ptr);
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ cap_id = (reg & 0x00ff);
+
+ if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
+}
+
+static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
+{
+ u8 next_cap_ptr;
+ u16 reg;
+
+ reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ if (!next_cap_ptr)
+ return 0;
+
+ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
+}
+
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr)
{
@@ -213,36 +246,84 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
{
- int val;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+
+ if (!ep->msi_cap)
+ return -EINVAL;
+
+ reg = ep->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ if (!(val & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+
+ return val;
+}
+
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+
+ if (!ep->msi_cap)
+ return -EINVAL;
+
+ reg = ep->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ val &= ~PCI_MSI_FLAGS_QMASK;
+ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
- if (!(val & MSI_CAP_MSI_EN_MASK))
+ if (!ep->msix_cap)
return -EINVAL;
- val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
+ reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ if (!(val & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val &= PCI_MSIX_FLAGS_QSIZE;
+
return val;
}
-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
{
- int val;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 val, reg;
+
+ if (!ep->msix_cap)
+ return -EINVAL;
- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
- val &= ~MSI_CAP_MMC_MASK;
- val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
+ reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_readw_dbi(pci, reg);
+ val &= ~PCI_MSIX_FLAGS_QSIZE;
+ val |= interrupts;
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
+ dw_pcie_writew_dbi(pci, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -282,32 +363,52 @@ static const struct pci_epc_ops epc_ops = {
.unmap_addr = dw_pcie_ep_unmap_addr,
.set_msi = dw_pcie_ep_set_msi,
.get_msi = dw_pcie_ep_get_msi,
+ .set_msix = dw_pcie_ep_set_msix,
+ .get_msix = dw_pcie_ep_get_msix,
.raise_irq = dw_pcie_ep_raise_irq,
.start = dw_pcie_ep_start,
.stop = dw_pcie_ep_stop,
};
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ dev_err(dev, "EP cannot trigger legacy IRQs\n");
+
+ return -EINVAL;
+}
+
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
u16 msg_ctrl, msg_data;
- u32 msg_addr_lower, msg_addr_upper;
+ u32 msg_addr_lower, msg_addr_upper, reg;
u64 msg_addr;
bool has_upper;
int ret;
+ if (!ep->msi_cap)
+ return -EINVAL;
+
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
- msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
+ reg = ep->msi_cap + PCI_MSI_FLAGS;
+ msg_ctrl = dw_pcie_readw_dbi(pci, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
- msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
+ reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
+ msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
if (has_upper) {
- msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
- msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64);
+ reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
+ msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
+ reg = ep->msi_cap + PCI_MSI_DATA_64;
+ msg_data = dw_pcie_readw_dbi(pci, reg);
} else {
msg_addr_upper = 0;
- msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32);
+ reg = ep->msi_cap + PCI_MSI_DATA_32;
+ msg_data = dw_pcie_readw_dbi(pci, reg);
}
msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
@@ -322,6 +423,64 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+ u16 tbl_offset, bir;
+ u32 bar_addr_upper, bar_addr_lower;
+ u32 msg_addr_upper, msg_addr_lower;
+ u32 reg, msg_data, vec_ctrl;
+ u64 tbl_addr, msg_addr, reg_u64;
+ void __iomem *msix_tbl;
+ int ret;
+
+ reg = ep->msix_cap + PCI_MSIX_TABLE;
+ tbl_offset = dw_pcie_readl_dbi(pci, reg);
+ bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+ tbl_offset >>= 3;
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bir);
+ bar_addr_upper = 0;
+ bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
+ reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
+ if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
+
+ tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
+ tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
+ tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
+ PCI_MSIX_ENTRY_SIZE);
+ if (!msix_tbl)
+ return -EINVAL;
+
+ msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
+ msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
+ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
+ msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
+ vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+ iounmap(msix_tbl);
+
+ if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
+ return -EPERM;
+
+ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ epc->mem->page_size);
+ if (ret)
+ return ret;
+
+ writel(msg_data, ep->msi_mem);
+
+ dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+
+ return 0;
+}
+
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@@ -386,15 +545,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return -ENOMEM;
ep->outbound_addr = addr;
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
-
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
return PTR_ERR(epc);
}
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
if (ret < 0)
epc->max_functions = 1;
@@ -409,15 +571,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
epc->mem->page_size);
if (!ep->msi_mem) {
- dev_err(dev, "Failed to reserve memory for MSI\n");
+ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
return -ENOMEM;
}
+ ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
- epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
- EPC_FEATURE_SET_BAR(epc->features, BAR_0);
+ ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
- ep->epc = epc;
- epc_set_drvdata(epc, ep);
dw_pcie_setup(pci);
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 5937fed4c938..c12bf794d69c 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -70,24 +70,29 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
enum pci_barno bar;
for (bar = BAR_0; bar <= BAR_5; bar++)
dw_pcie_ep_reset_bar(pci, bar);
+
+ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
+ epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
}
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type,
- u8 interrupt_num)
+ u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
- dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
- return -EINVAL;
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
case PCI_EPC_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
}
@@ -118,7 +123,6 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
return pp->msi_irq;
}
- pp->root_bus_nr = -1;
pp->ops = &dw_plat_pcie_host_ops;
ret = dw_pcie_host_init(pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index bee4e2535a61..96126fd8403c 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -96,17 +96,6 @@
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
((0x3 << 20) | ((region) << 9) | (0x1 << 8))
-#define MSI_MESSAGE_CONTROL 0x52
-#define MSI_CAP_MMC_SHIFT 1
-#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
-#define MSI_CAP_MME_SHIFT 4
-#define MSI_CAP_MSI_EN_MASK 0x1
-#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
-#define MSI_MESSAGE_ADDR_L32 0x54
-#define MSI_MESSAGE_ADDR_U32 0x58
-#define MSI_MESSAGE_DATA_32 0x58
-#define MSI_MESSAGE_DATA_64 0x5C
-
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
@@ -191,7 +180,7 @@ enum dw_pcie_as_type {
struct dw_pcie_ep_ops {
void (*ep_init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num);
+ enum pci_epc_irq_type type, u16 interrupt_num);
};
struct dw_pcie_ep {
@@ -208,6 +197,8 @@ struct dw_pcie_ep {
u32 num_ob_windows;
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
};
struct dw_pcie_ops {
@@ -357,8 +348,11 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
#else
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
@@ -374,12 +368,23 @@ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
}
+static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return 0;
+}
+
static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
return 0;
}
+static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 3611d6ce9a92..7b32e619b959 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -420,7 +420,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
phy_init(hipcie->phy);
}
- pp->root_bus_nr = -1;
pp->ops = &histb_pcie_host_ops;
platform_set_drvdata(pdev, hipcie);
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d2970a009eb5..5352e0c3be82 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -430,6 +430,9 @@ static int kirin_pcie_host_init(struct pcie_port *pp)
{
kirin_pcie_establish_link(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
return 0;
}
@@ -445,9 +448,34 @@ static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
.host_init = kirin_pcie_host_init,
};
+static int kirin_pcie_add_msi(struct dw_pcie *pci,
+ struct platform_device *pdev)
+{
+ int irq;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev,
+ "failed to get MSI IRQ (%d)\n", irq);
+ return irq;
+ }
+
+ pci->pp.msi_irq = irq;
+ }
+
+ return 0;
+}
+
static int __init kirin_add_pcie_port(struct dw_pcie *pci,
struct platform_device *pdev)
{
+ int ret;
+
+ ret = kirin_pcie_add_msi(pci, pdev);
+ if (ret)
+ return ret;
+
pci->pp.ops = &kirin_pcie_host_ops;
return dw_pcie_host_init(&pci->pp);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index a1d0198081a6..4352c1cb926d 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1251,7 +1251,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- pp->root_bus_nr = -1;
pp->ops = &qcom_pcie_dw_ops;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index ecb58f7b7566..7d0cdfd8138b 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -210,7 +210,6 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
return ret;
}
- pp->root_bus_nr = -1;
pp->ops = &spear13xx_pcie_host_ops;
ret = dw_pcie_host_init(pp);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 0fae816fba39..6b4555ff2548 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -111,24 +111,6 @@
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
-/* PCIe window configuration */
-#define OB_WIN_BASE_ADDR 0x4c00
-#define OB_WIN_BLOCK_SIZE 0x20
-#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
- OB_WIN_BLOCK_SIZE * (win) + \
- (offset))
-#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
-#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
-#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
-#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
-#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
-#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
-#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
-
-/* PCIe window types */
-#define OB_PCIE_MEM 0x0
-#define OB_PCIE_IO 0x4
-
/* LMI registers base address and register offsets */
#define LMI_BASE_ADDR 0x6000
#define CFG_REG (LMI_BASE_ADDR + 0x0)
@@ -247,34 +229,9 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
-/*
- * Set PCIe address window register which could be used for memory
- * mapping.
- */
-static void advk_pcie_set_ob_win(struct advk_pcie *pcie,
- u32 win_num, u32 match_ms,
- u32 match_ls, u32 mask_ms,
- u32 mask_ls, u32 remap_ms,
- u32 remap_ls, u32 action)
-{
- advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num));
- advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num));
- advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num));
- advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num));
- advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num));
- advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num));
- advk_writel(pcie, action, OB_WIN_ACTIONS(win_num));
- advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num));
-}
-
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
- int i;
-
- /* Point PCIe unit MBUS decode windows to DRAM space */
- for (i = 0; i < 8; i++)
- advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0);
/* Set to Direct mode */
reg = advk_readl(pcie, CTRL_CONFIG_REG);
@@ -433,6 +390,15 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
+static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
+ int devfn)
+{
+ if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+ return false;
+
+ return true;
+}
+
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
@@ -440,7 +406,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
u32 reg;
int ret;
- if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
+ if (!advk_pcie_valid_device(pcie, bus, devfn)) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -494,7 +460,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int offset;
int ret;
- if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+ if (!advk_pcie_valid_device(pcie, bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
if (where % size)
@@ -843,12 +809,6 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
switch (resource_type(res)) {
case IORESOURCE_IO:
- advk_pcie_set_ob_win(pcie, 1,
- upper_32_bits(res->start),
- lower_32_bits(res->start),
- 0, 0xF8000000, 0,
- lower_32_bits(res->start),
- OB_PCIE_IO);
err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
@@ -857,12 +817,6 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
}
break;
case IORESOURCE_MEM:
- advk_pcie_set_ob_win(pcie, 0,
- upper_32_bits(res->start),
- lower_32_bits(res->start),
- 0x0, 0xF8000000, 0,
- lower_32_bits(res->start),
- (2 << 20) | OB_PCIE_MEM);
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
@@ -889,7 +843,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct resource *res;
- struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int ret, irq;
@@ -943,21 +896,13 @@ static int advk_pcie_probe(struct platform_device *pdev)
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
- ret = pci_scan_root_bus_bridge(bridge);
+ ret = pci_host_probe(bridge);
if (ret < 0) {
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
return ret;
}
- bus = bridge->bus;
-
- pci_bus_assign_resources(bus);
-
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(bus);
return 0;
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index f6325f1a89e8..c00f82cc54aa 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -45,6 +45,7 @@
#include <linux/irqdomain.h>
#include <asm/irqdomain.h>
#include <asm/apic.h>
+#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/hyperv.h>
#include <linux/refcount.h>
@@ -1545,7 +1546,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
unsigned long flags;
int ret;
- hpdev = kzalloc(sizeof(*hpdev), GFP_ATOMIC);
+ hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
if (!hpdev)
return NULL;
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 23e270839e6a..50eb0729385b 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -125,6 +125,7 @@ struct mvebu_pcie {
struct platform_device *pdev;
struct mvebu_pcie_port *ports;
struct msi_controller *msi;
+ struct list_head resources;
struct resource io;
struct resource realio;
struct resource mem;
@@ -800,7 +801,7 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
- struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct mvebu_pcie *pcie = bus->sysdata;
struct mvebu_pcie_port *port;
int ret;
@@ -826,7 +827,7 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
- struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct mvebu_pcie *pcie = bus->sysdata;
struct mvebu_pcie_port *port;
int ret;
@@ -857,36 +858,6 @@ static struct pci_ops mvebu_pcie_ops = {
.write = mvebu_pcie_wr_conf,
};
-static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
-{
- struct mvebu_pcie *pcie = sys_to_pcie(sys);
- int err, i;
-
- pcie->mem.name = "PCI MEM";
- pcie->realio.name = "PCI I/O";
-
- if (resource_size(&pcie->realio) != 0)
- pci_add_resource_offset(&sys->resources, &pcie->realio,
- sys->io_offset);
-
- pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
- pci_add_resource(&sys->resources, &pcie->busn);
-
- err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources);
- if (err)
- return 0;
-
- for (i = 0; i < pcie->nports; i++) {
- struct mvebu_pcie_port *port = &pcie->ports[i];
-
- if (!port->base)
- continue;
- mvebu_pcie_setup_hw(port);
- }
-
- return 1;
-}
-
static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
const struct resource *res,
resource_size_t start,
@@ -917,31 +888,6 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
return start;
}
-static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
-{
- struct hw_pci hw;
-
- memset(&hw, 0, sizeof(hw));
-
-#ifdef CONFIG_PCI_MSI
- hw.msi_ctrl = pcie->msi;
-#endif
-
- hw.nr_controllers = 1;
- hw.private_data = (void **)&pcie;
- hw.setup = mvebu_pcie_setup;
- hw.map_irq = of_irq_parse_and_map_pci;
- hw.ops = &mvebu_pcie_ops;
- hw.align_resource = mvebu_pcie_align_resource;
-
- pci_common_init_dev(&pcie->pdev->dev, &hw);
-}
-
-/*
- * Looks up the list of register addresses encoded into the reg =
- * <...> property for one that matches the given port/lane. Once
- * found, maps it.
- */
static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
struct device_node *np,
struct mvebu_pcie_port *port)
@@ -1190,46 +1136,79 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
clk_disable_unprepare(port->clk);
}
-static int mvebu_pcie_probe(struct platform_device *pdev)
+/*
+ * We can't use devm_of_pci_get_host_bridge_resources() because we
+ * need to parse our special DT properties encoding the MEM and IO
+ * apertures.
+ */
+static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
{
- struct device *dev = &pdev->dev;
- struct mvebu_pcie *pcie;
+ struct device *dev = &pcie->pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *child;
- int num, i, ret;
+ unsigned int i;
+ int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
+ INIT_LIST_HEAD(&pcie->resources);
- pcie->pdev = pdev;
- platform_set_drvdata(pdev, pcie);
+ /* Get the bus range */
+ ret = of_pci_parse_bus_range(np, &pcie->busn);
+ if (ret) {
+ dev_err(dev, "failed to parse bus-range property: %d\n", ret);
+ return ret;
+ }
+ pci_add_resource(&pcie->resources, &pcie->busn);
- /* Get the PCIe memory and I/O aperture */
+ /* Get the PCIe memory aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
dev_err(dev, "invalid memory aperture size\n");
return -EINVAL;
}
+ pcie->mem.name = "PCI MEM";
+ pci_add_resource(&pcie->resources, &pcie->mem);
+
+ /* Get the PCIe IO aperture */
mvebu_mbus_get_pcie_io_aperture(&pcie->io);
if (resource_size(&pcie->io) != 0) {
pcie->realio.flags = pcie->io.flags;
pcie->realio.start = PCIBIOS_MIN_IO;
pcie->realio.end = min_t(resource_size_t,
- IO_SPACE_LIMIT,
- resource_size(&pcie->io));
- } else
- pcie->realio = pcie->io;
+ IO_SPACE_LIMIT - SZ_64K,
+ resource_size(&pcie->io) - 1);
+ pcie->realio.name = "PCI I/O";
- /* Get the bus range */
- ret = of_pci_parse_bus_range(np, &pcie->busn);
- if (ret) {
- dev_err(dev, "failed to parse bus-range property: %d\n", ret);
- return ret;
+ for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
+ pci_ioremap_io(i, pcie->io.start + i);
+
+ pci_add_resource(&pcie->resources, &pcie->realio);
}
+ return devm_request_pci_bus_resources(dev, &pcie->resources);
+}
+
+static int mvebu_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mvebu_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int num, i, ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
+
+ ret = mvebu_pcie_parse_request_resources(pcie);
+ if (ret)
+ return ret;
+
num = of_get_available_child_count(np);
pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
@@ -1272,20 +1251,24 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
+ mvebu_pcie_setup_hw(port);
mvebu_pcie_set_local_dev_nr(port, 1);
mvebu_sw_pci_bridge_init(port);
}
pcie->nports = i;
- for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
- pci_ioremap_io(i, pcie->io.start + i);
-
- mvebu_pcie_enable(pcie);
-
- platform_set_drvdata(pdev, pcie);
-
- return 0;
+ list_splice_init(&pcie->resources, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = 0;
+ bridge->ops = &mvebu_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+ bridge->align_resource = mvebu_pcie_align_resource;
+ bridge->msi = pcie->msi;
+
+ return pci_host_probe(bridge);
}
static const struct of_device_id mvebu_pcie_of_match_table[] = {
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c
index e3fe4124e3af..9e87dd7f9ac3 100644
--- a/drivers/pci/controller/pcie-cadence-ep.c
+++ b/drivers/pci/controller/pcie-cadence-ep.c
@@ -238,7 +238,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
- u16 flags, mmc, mme;
+ u16 flags, mme;
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
@@ -249,7 +249,6 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
* Get the Multiple Message Enable bitfield from the Message Control
* register.
*/
- mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
return mme;
@@ -363,7 +362,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
}
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -439,6 +439,7 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
struct pci_epc *epc;
struct resource *res;
int ret;
+ int phy_count;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
if (!ep)
@@ -473,6 +474,12 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
if (!ep->ob_addr)
return -ENOMEM;
+ ret = cdns_pcie_init_phy(dev, pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+ platform_set_drvdata(pdev, pcie);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@@ -521,6 +528,10 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
err_get_sync:
pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(pcie);
+ phy_count = pcie->phy_count;
+ while (phy_count--)
+ device_link_del(pcie->link[phy_count]);
return ret;
}
@@ -528,6 +539,7 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_put_sync(dev);
@@ -536,13 +548,14 @@ static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
pm_runtime_disable(dev);
- /* The PCIe controller can't be disabled. */
+ cdns_pcie_disable_phy(pcie);
}
static struct platform_driver cdns_pcie_ep_driver = {
.driver = {
.name = "cdns-pcie-ep",
.of_match_table = cdns_pcie_ep_of_match,
+ .pm = &cdns_pcie_pm_ops,
},
.probe = cdns_pcie_ep_probe,
.shutdown = cdns_pcie_ep_shutdown,
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c
index a4ebbd37b553..ec394f6a19c8 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/pcie-cadence-host.c
@@ -58,6 +58,11 @@ static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return pcie->reg_base + (where & 0xfff);
}
+ /* Check that the link is up */
+ if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
+ return NULL;
+ /* Clear AXI link-down status */
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
/* Update Output registers for AXI region 0. */
addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
@@ -239,6 +244,7 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
struct cdns_pcie *pcie;
struct resource *res;
int ret;
+ int phy_count;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
if (!bridge)
@@ -290,6 +296,13 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
}
pcie->mem_res = res;
+ ret = cdns_pcie_init_phy(dev, pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+ platform_set_drvdata(pdev, pcie);
+
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@@ -322,15 +335,35 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
err_get_sync:
pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(pcie);
+ phy_count = pcie->phy_count;
+ while (phy_count--)
+ device_link_del(pcie->link[phy_count]);
return ret;
}
+static void cdns_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(pcie);
+}
+
static struct platform_driver cdns_pcie_host_driver = {
.driver = {
.name = "cdns-pcie-host",
.of_match_table = cdns_pcie_host_of_match,
+ .pm = &cdns_pcie_pm_ops,
},
.probe = cdns_pcie_host_probe,
+ .shutdown = cdns_pcie_shutdown,
};
builtin_platform_driver(cdns_pcie_host_driver);
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c
index 138d113eb45d..86f1b002c846 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/pcie-cadence.c
@@ -124,3 +124,126 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+
+void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
+{
+ int i = pcie->phy_count;
+
+ while (i--) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+}
+
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pcie->phy_count; i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret < 0) {
+ phy_exit(pcie->phy[i]);
+ goto err_phy;
+ }
+ }
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+
+ return ret;
+}
+
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
+{
+ struct device_node *np = dev->of_node;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
+ int i;
+ int ret;
+ const char *name;
+
+ phy_count = of_property_count_strings(np, "phy-names");
+ if (phy_count < 1) {
+ dev_err(dev, "no phy-names. PHY will not be initialized\n");
+ pcie->phy_count = 0;
+ return 0;
+ }
+
+ phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ for (i = 0; i < phy_count; i++) {
+ of_property_read_string_index(np, "phy-names", i, &name);
+ phy[i] = devm_phy_optional_get(dev, name);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
+ }
+
+ pcie->phy_count = phy_count;
+ pcie->phy = phy;
+ pcie->link = link;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret)
+ goto err_link;
+
+ return 0;
+
+err_link:
+ while (--i >= 0)
+ device_link_del(link[i]);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cdns_pcie_suspend_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+
+ cdns_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int cdns_pcie_resume_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+const struct dev_pm_ops cdns_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
+ cdns_pcie_resume_noirq)
+};
diff --git a/drivers/pci/controller/pcie-cadence.h b/drivers/pci/controller/pcie-cadence.h
index 4bb27333b05c..ae6bf2a2b3d3 100644
--- a/drivers/pci/controller/pcie-cadence.h
+++ b/drivers/pci/controller/pcie-cadence.h
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/phy/phy.h>
/*
* Local Management Registers
@@ -165,6 +166,9 @@
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
(CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
+/* AXI link down register */
+#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+
enum cdns_pcie_rp_bar {
RP_BAR0,
RP_BAR1,
@@ -229,6 +233,9 @@ struct cdns_pcie {
struct resource *mem_res;
bool is_rc;
u8 bus;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
};
/* Register access */
@@ -279,7 +286,7 @@ static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
}
static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
- u32 reg, u16 value)
+ u32 reg, u32 value)
{
writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
@@ -307,5 +314,9 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
u32 r, u64 cpu_addr);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
+void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+extern const struct dev_pm_ops cdns_pcie_pm_ops;
#endif /* _PCIE_CADENCE_H */
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 3c76c5fa4f32..3160e9342a2f 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -85,6 +85,8 @@
#define IMAP_VALID_SHIFT 0
#define IMAP_VALID BIT(IMAP_VALID_SHIFT)
+#define IPROC_PCI_PM_CAP 0x48
+#define IPROC_PCI_PM_CAP_MASK 0xffff
#define IPROC_PCI_EXP_CAP 0xac
#define IPROC_PCIE_REG_INVALID 0xffff
@@ -375,6 +377,17 @@ static const u16 iproc_pcie_reg_paxc_v2[] = {
[IPROC_PCIE_CFG_DATA] = 0x1fc,
};
+/*
+ * List of device IDs of controllers that have corrupted capability list that
+ * require SW fixup
+ */
+static const u16 iproc_pcie_corrupt_cap_did[] = {
+ 0x16cd,
+ 0x16f0,
+ 0xd802,
+ 0xd804
+};
+
static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
{
struct iproc_pcie *pcie = bus->sysdata;
@@ -495,6 +508,49 @@ static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
return data;
}
+static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
+{
+ u32 i, dev_id;
+
+ switch (where & ~0x3) {
+ case PCI_VENDOR_ID:
+ dev_id = *val >> 16;
+
+ /*
+ * Activate fixup for those controllers that have corrupted
+ * capability list registers
+ */
+ for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++)
+ if (dev_id == iproc_pcie_corrupt_cap_did[i])
+ pcie->fix_paxc_cap = true;
+ break;
+
+ case IPROC_PCI_PM_CAP:
+ if (pcie->fix_paxc_cap) {
+ /* advertise PM, force next capability to PCIe */
+ *val &= ~IPROC_PCI_PM_CAP_MASK;
+ *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM;
+ }
+ break;
+
+ case IPROC_PCI_EXP_CAP:
+ if (pcie->fix_paxc_cap) {
+ /* advertise root port, version 2, terminate here */
+ *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 |
+ PCI_CAP_ID_EXP;
+ }
+ break;
+
+ case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
+ /* Don't advertise CRS SV support */
+ *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ break;
+
+ default:
+ break;
+ }
+}
+
static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
@@ -509,13 +565,10 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
/* root complex access */
if (busno == 0) {
ret = pci_generic_config_read32(bus, devfn, where, size, val);
- if (ret != PCIBIOS_SUCCESSFUL)
- return ret;
+ if (ret == PCIBIOS_SUCCESSFUL)
+ iproc_pcie_fix_cap(pcie, where, val);
- /* Don't advertise CRS SV support */
- if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL)
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
- return PCIBIOS_SUCCESSFUL;
+ return ret;
}
cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
@@ -529,6 +582,25 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
if (size <= 2)
*val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+ /*
+ * For PAXC and PAXCv2, the total number of PFs that one can enumerate
+ * depends on the firmware configuration. Unfortunately, due to an ASIC
+ * bug, unconfigured PFs cannot be properly hidden from the root
+ * complex. As a result, write access to these PFs will cause bus lock
+ * up on the embedded processor
+ *
+ * Since all unconfigured PFs are left with an incorrect, staled device
+ * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access
+ * early here and reject them all
+ */
+#define DEVICE_ID_MASK 0xffff0000
+#define DEVICE_ID_SHIFT 16
+ if (pcie->rej_unconfig_pf &&
+ (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID)
+ if ((*val & DEVICE_ID_MASK) ==
+ (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+
return PCIBIOS_SUCCESSFUL;
}
@@ -628,7 +700,7 @@ static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
struct iproc_pcie *pcie = iproc_data(bus);
iproc_pcie_apb_err_disable(bus, true);
- if (pcie->type == IPROC_PCIE_PAXB_V2)
+ if (pcie->iproc_cfg_read)
ret = iproc_pcie_config_read(bus, devfn, where, size, val);
else
ret = pci_generic_config_read32(bus, devfn, where, size, val);
@@ -808,14 +880,14 @@ static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
- dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
- window_idx, oarr_offset, &axi_addr, &pci_addr);
- dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n",
- readl(pcie->base + oarr_offset),
- readl(pcie->base + oarr_offset + 4));
- dev_info(dev, "omap lo 0x%x omap hi 0x%x\n",
- readl(pcie->base + omap_offset),
- readl(pcie->base + omap_offset + 4));
+ dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
+ window_idx, oarr_offset, &axi_addr, &pci_addr);
+ dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n",
+ readl(pcie->base + oarr_offset),
+ readl(pcie->base + oarr_offset + 4));
+ dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n",
+ readl(pcie->base + omap_offset),
+ readl(pcie->base + omap_offset + 4));
return 0;
}
@@ -982,8 +1054,8 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
iproc_pcie_reg_is_invalid(imap_offset))
return -EINVAL;
- dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
- region_idx, iarr_offset, &axi_addr, &pci_addr);
+ dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
+ region_idx, iarr_offset, &axi_addr, &pci_addr);
/*
* Program the IARR registers. The upper 32-bit IARR register is
@@ -993,9 +1065,9 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
pcie->base + iarr_offset);
writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
- dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n",
- readl(pcie->base + iarr_offset),
- readl(pcie->base + iarr_offset + 4));
+ dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n",
+ readl(pcie->base + iarr_offset),
+ readl(pcie->base + iarr_offset + 4));
/*
* Now program the IMAP registers. Each IARR region may have one or
@@ -1009,10 +1081,10 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
writel(upper_32_bits(axi_addr),
pcie->base + imap_offset + ib_map->imap_addr_offset);
- dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
- window_idx, readl(pcie->base + imap_offset),
- readl(pcie->base + imap_offset +
- ib_map->imap_addr_offset));
+ dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
+ window_idx, readl(pcie->base + imap_offset),
+ readl(pcie->base + imap_offset +
+ ib_map->imap_addr_offset));
imap_offset += ib_map->imap_window_offset;
axi_addr += size;
@@ -1144,10 +1216,22 @@ static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
return ret;
}
-static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr,
+ bool enable)
{
u32 val;
+ if (!enable) {
+ /*
+ * Disable PAXC MSI steering. All write transfers will be
+ * treated as non-MSI transfers
+ */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+ val &= ~MSI_ENABLE_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+ return;
+ }
+
/*
* Program bits [43:13] of address of GITS_TRANSLATER register into
* bits [30:0] of the MSI base address register. In fact, in all iProc
@@ -1201,7 +1285,7 @@ static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
return ret;
break;
case IPROC_PCIE_PAXC_V2:
- iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr);
+ iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true);
break;
default:
return -EINVAL;
@@ -1271,6 +1355,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
break;
case IPROC_PCIE_PAXB:
regs = iproc_pcie_reg_paxb;
+ pcie->iproc_cfg_read = true;
pcie->has_apb_err_disable = true;
if (pcie->need_ob_cfg) {
pcie->ob_map = paxb_ob_map;
@@ -1293,10 +1378,14 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
case IPROC_PCIE_PAXC:
regs = iproc_pcie_reg_paxc;
pcie->ep_is_internal = true;
+ pcie->iproc_cfg_read = true;
+ pcie->rej_unconfig_pf = true;
break;
case IPROC_PCIE_PAXC_V2:
regs = iproc_pcie_reg_paxc_v2;
pcie->ep_is_internal = true;
+ pcie->iproc_cfg_read = true;
+ pcie->rej_unconfig_pf = true;
pcie->need_msi_steer = true;
break;
default:
@@ -1427,6 +1516,24 @@ int iproc_pcie_remove(struct iproc_pcie *pcie)
}
EXPORT_SYMBOL(iproc_pcie_remove);
+/*
+ * The MSI parsing logic in certain revisions of Broadcom PAXC based root
+ * complex does not work and needs to be disabled
+ */
+static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev)
+{
+ struct iproc_pcie *pcie = iproc_data(pdev->bus);
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ iproc_pcie_paxc_v2_msi_steer(pcie, 0, false);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0,
+ quirk_paxc_disable_msi_parsing);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
+ quirk_paxc_disable_msi_parsing);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
+ quirk_paxc_disable_msi_parsing);
+
MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h
index 814b600b383a..4f03ea539805 100644
--- a/drivers/pci/controller/pcie-iproc.h
+++ b/drivers/pci/controller/pcie-iproc.h
@@ -58,8 +58,13 @@ struct iproc_msi;
* @phy: optional PHY device that controls the Serdes
* @map_irq: function callback to map interrupts
* @ep_is_internal: indicates an internal emulated endpoint device is connected
+ * @iproc_cfg_read: indicates the iProc config read function should be used
+ * @rej_unconfig_pf: indicates the root complex needs to detect and reject
+ * enumeration against unconfigured physical functions emulated in the ASIC
* @has_apb_err_disable: indicates the controller can be configured to prevent
* unsupported request from being forwarded as an APB bus error
+ * @fix_paxc_cap: indicates the controller has corrupted capability list in its
+ * config space registers and requires SW based fixup
*
* @need_ob_cfg: indicates SW needs to configure the outbound mapping window
* @ob: outbound mapping related parameters
@@ -84,7 +89,10 @@ struct iproc_pcie {
struct phy *phy;
int (*map_irq)(const struct pci_dev *, u8, u8);
bool ep_is_internal;
+ bool iproc_cfg_read;
+ bool rej_unconfig_pf;
bool has_apb_err_disable;
+ bool fix_paxc_cap;
bool need_ob_cfg;
struct iproc_pcie_ob ob;
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index cf0aa7cee5b0..a939e8d31735 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -23,6 +23,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
/* register offsets and bit positions */
/*
@@ -130,7 +132,7 @@ struct mobiveil_pcie {
void __iomem *config_axi_slave_base; /* endpoint config base */
void __iomem *csr_axi_slave_base; /* root port config base */
void __iomem *apb_csr_base; /* MSI register base */
- void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
+ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
struct irq_domain *intx_domain;
raw_spinlock_t intx_mask_lock;
int irq;
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 6beba8ed7b84..b8163c56a142 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -472,7 +472,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
enum pci_epc_irq_type type,
- u8 interrupt_num)
+ u16 interrupt_num)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 942b64fc7f1f..fd2dbd7eed7b 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -197,9 +197,20 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
int i, best = 1;
unsigned long flags;
- if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1)
+ if (vmd->msix_count == 1)
return &vmd->irqs[0];
+ /*
+ * White list for fast-interrupt handlers. All others will share the
+ * "slow" interrupt vector.
+ */
+ switch (msi_desc_to_pci_dev(desc)->class) {
+ case PCI_CLASS_STORAGE_EXPRESS:
+ break;
+ default:
+ return &vmd->irqs[0];
+ }
+
raw_spin_lock_irqsave(&list_lock, flags);
for (i = 1; i < vmd->msix_count; i++)
if (vmd->irqs[i].count < vmd->irqs[best].count)
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 63ed706445b9..3e86fa3c7da3 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -18,13 +18,16 @@
#include <linux/pci-epf.h>
#include <linux/pci_regs.h>
+#define IRQ_TYPE_LEGACY 0
+#define IRQ_TYPE_MSI 1
+#define IRQ_TYPE_MSIX 2
+
#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
-#define MSI_NUMBER_SHIFT 2
-#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
-#define COMMAND_READ BIT(8)
-#define COMMAND_WRITE BIT(9)
-#define COMMAND_COPY BIT(10)
+#define COMMAND_RAISE_MSIX_IRQ BIT(2)
+#define COMMAND_READ BIT(3)
+#define COMMAND_WRITE BIT(4)
+#define COMMAND_COPY BIT(5)
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
@@ -45,6 +48,7 @@ struct pci_epf_test {
struct pci_epf *epf;
enum pci_barno test_reg_bar;
bool linkup_notifier;
+ bool msix_available;
struct delayed_work cmd_handler;
};
@@ -56,6 +60,8 @@ struct pci_epf_test_reg {
u64 dst_addr;
u32 size;
u32 checksum;
+ u32 irq_type;
+ u32 irq_number;
} __packed;
static struct pci_epf_header test_header = {
@@ -244,31 +250,42 @@ err:
return ret;
}
-static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
+ u16 irq)
{
- u8 msi_count;
struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
reg->status |= STATUS_IRQ_RAISED;
- msi_count = pci_epc_get_msi(epc, epf->func_no);
- if (irq > msi_count || msi_count <= 0)
+
+ switch (irq_type) {
+ case IRQ_TYPE_LEGACY:
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
- else
+ break;
+ case IRQ_TYPE_MSI:
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
+ break;
+ case IRQ_TYPE_MSIX:
+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
+ break;
+ default:
+ dev_err(dev, "Failed to raise IRQ, unknown type\n");
+ break;
+ }
}
static void pci_epf_test_cmd_handler(struct work_struct *work)
{
int ret;
- u8 irq;
- u8 msi_count;
+ int count;
u32 command;
struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
cmd_handler.work);
struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -280,7 +297,10 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->command = 0;
reg->status = 0;
- irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+ if (reg->irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Failed to detect IRQ type\n");
+ goto reset_handler;
+ }
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
@@ -294,7 +314,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_WRITE_FAIL;
else
reg->status |= STATUS_WRITE_SUCCESS;
- pci_epf_test_raise_irq(epf_test, irq);
+ pci_epf_test_raise_irq(epf_test, reg->irq_type,
+ reg->irq_number);
goto reset_handler;
}
@@ -304,7 +325,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_READ_SUCCESS;
else
reg->status |= STATUS_READ_FAIL;
- pci_epf_test_raise_irq(epf_test, irq);
+ pci_epf_test_raise_irq(epf_test, reg->irq_type,
+ reg->irq_number);
goto reset_handler;
}
@@ -314,16 +336,28 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_COPY_SUCCESS;
else
reg->status |= STATUS_COPY_FAIL;
- pci_epf_test_raise_irq(epf_test, irq);
+ pci_epf_test_raise_irq(epf_test, reg->irq_type,
+ reg->irq_number);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSI_IRQ) {
- msi_count = pci_epc_get_msi(epc, epf->func_no);
- if (irq > msi_count || msi_count <= 0)
+ count = pci_epc_get_msi(epc, epf->func_no);
+ if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
+ reg->irq_number);
+ goto reset_handler;
+ }
+
+ if (command & COMMAND_RAISE_MSIX_IRQ) {
+ count = pci_epc_get_msix(epc, epf->func_no);
+ if (reg->irq_number > count || count <= 0)
+ goto reset_handler;
+ reg->status = STATUS_IRQ_RAISED;
+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
+ reg->irq_number);
goto reset_handler;
}
@@ -440,6 +474,8 @@ static int pci_epf_test_bind(struct pci_epf *epf)
else
epf_test->linkup_notifier = true;
+ epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE;
+
epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
ret = pci_epc_write_header(epc, epf->func_no, header);
@@ -457,8 +493,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return ret;
ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
- if (ret)
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
return ret;
+ }
+
+ if (epf_test->msix_available) {
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
+ if (ret) {
+ dev_err(dev, "MSI-X configuration failed\n");
+ return ret;
+ }
+ }
if (!epf_test->linkup_notifier)
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 018ea3433cb5..d1288a0bd530 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -286,6 +286,28 @@ static ssize_t pci_epf_msi_interrupts_show(struct config_item *item,
to_pci_epf_group(item)->epf->msi_interrupts);
}
+static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ u16 val;
+ int ret;
+
+ ret = kstrtou16(page, 0, &val);
+ if (ret)
+ return ret;
+
+ to_pci_epf_group(item)->epf->msix_interrupts = val;
+
+ return len;
+}
+
+static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%d\n",
+ to_pci_epf_group(item)->epf->msix_interrupts);
+}
+
PCI_EPF_HEADER_R(vendorid)
PCI_EPF_HEADER_W_u16(vendorid)
@@ -327,6 +349,7 @@ CONFIGFS_ATTR(pci_epf_, subsys_vendor_id);
CONFIGFS_ATTR(pci_epf_, subsys_id);
CONFIGFS_ATTR(pci_epf_, interrupt_pin);
CONFIGFS_ATTR(pci_epf_, msi_interrupts);
+CONFIGFS_ATTR(pci_epf_, msix_interrupts);
static struct configfs_attribute *pci_epf_attrs[] = {
&pci_epf_attr_vendorid,
@@ -340,6 +363,7 @@ static struct configfs_attribute *pci_epf_attrs[] = {
&pci_epf_attr_subsys_id,
&pci_epf_attr_interrupt_pin,
&pci_epf_attr_msi_interrupts,
+ &pci_epf_attr_msix_interrupts,
NULL,
};
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index b0ee42739c3c..094dcc3203b8 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -131,13 +131,13 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
* pci_epc_raise_irq() - interrupt the host system
* @epc: the EPC device which has to interrupt the host
* @func_no: the endpoint function number in the EPC device
- * @type: specify the type of interrupt; legacy or MSI
- * @interrupt_num: the MSI interrupt number
+ * @type: specify the type of interrupt; legacy, MSI or MSI-X
+ * @interrupt_num: the MSI or MSI-X interrupt number
*
- * Invoke to raise an MSI or legacy interrupt
+ * Invoke to raise an legacy, MSI or MSI-X interrupt
*/
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
unsigned long flags;
@@ -201,7 +201,8 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
u8 encode_int;
unsigned long flags;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ interrupts > 32)
return -EINVAL;
if (!epc->ops->set_msi)
@@ -218,6 +219,63 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
EXPORT_SYMBOL_GPL(pci_epc_set_msi);
/**
+ * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
+ * @epc: the EPC device to which MSI-X interrupts was requested
+ * @func_no: the endpoint function number in the EPC device
+ *
+ * Invoke to get the number of MSI-X interrupts allocated by the RC
+ */
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
+{
+ int interrupt;
+ unsigned long flags;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return 0;
+
+ if (!epc->ops->get_msix)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ interrupt = epc->ops->get_msix(epc, func_no);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ if (interrupt < 0)
+ return 0;
+
+ return interrupt + 1;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_msix);
+
+/**
+ * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
+ * @epc: the EPC device on which MSI-X has to be configured
+ * @func_no: the endpoint function number in the EPC device
+ * @interrupts: number of MSI-X interrupts required by the EPF
+ *
+ * Invoke to set the required number of MSI-X interrupts.
+ */
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ interrupts < 1 || interrupts > 2048)
+ return -EINVAL;
+
+ if (!epc->ops->set_msix)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_msix);
+
+/**
* pci_epc_unmap_addr() - unmap CPU address from PCI address
* @epc: the EPC device on which address is allocated
* @func_no: the endpoint function number in the EPC device
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 5bd6c1573295..6b7c1ed58e7e 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -74,20 +74,6 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
/*
- * Per PCI firmware specification, we should run the ACPI _OSC
- * method to get control of hotplug hardware before using it. If
- * an _OSC is missing, we look for an OSHP to do the same thing.
- * To handle different BIOS behavior, we look for _OSC on a root
- * bridge preferentially (according to PCI fw spec). Later for
- * OSHP within the scope of the hotplug controller and its parents,
- * up to the host bridge under which this controller exists.
- */
- if (shpchp_is_native(pdev))
- return 0;
-
- /* If _OSC exists, we should not evaluate OSHP */
-
- /*
* If there's no ACPI host bridge (i.e., ACPI support is compiled
* into the kernel but the hardware platform doesn't support ACPI),
* there's nothing to do here.
@@ -97,9 +83,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
if (!root)
return 0;
- if (root->osc_support_set)
- goto no_control;
+ /*
+ * If _OSC exists, it determines whether we're allowed to manage
+ * the SHPC. We executed it while enumerating the host bridge.
+ */
+ if (root->osc_support_set) {
+ if (host->native_shpc_hotplug)
+ return 0;
+ return -ENODEV;
+ }
+ /*
+ * In the absence of _OSC, we're always allowed to manage the SHPC.
+ * However, if an OSHP method is present, we must execute it so the
+ * firmware can transfer control to the OS, e.g., direct interrupts
+ * to the OS instead of to the firmware.
+ *
+ * N.B. The PCI Firmware Spec (r3.2, sec 4.8) does not endorse
+ * searching up the ACPI hierarchy, so the loops below are suspect.
+ */
handle = ACPI_HANDLE(&pdev->dev);
if (!handle) {
/*
@@ -128,7 +130,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
if (ACPI_FAILURE(status))
break;
}
-no_control:
+
pci_info(pdev, "Cannot get control of SHPC hotplug\n");
kfree(string.pointer);
return -ENODEV;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 12b5655fd390..ad32ffbc4b91 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -254,20 +254,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-/**
- * release_slot - free up the memory used by a slot
- * @hotplug_slot: slot to free
- */
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
-
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
/* callback routine to initialize 'struct slot' for each slot */
int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
unsigned int sun)
@@ -287,7 +273,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
slot->hotplug_slot->info = &slot->info;
slot->hotplug_slot->private = slot;
- slot->hotplug_slot->release = &release_slot;
slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
slot->acpi_slot = acpiphp_slot;
@@ -324,13 +309,12 @@ error:
void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
{
struct slot *slot = acpiphp_slot->slot;
- int retval = 0;
pr_info("Slot [%s] unregistered\n", slot_name(slot));
- retval = pci_hp_deregister(slot->hotplug_slot);
- if (retval)
- pr_err("pci_hp_deregister failed with error %d\n", retval);
+ pci_hp_deregister(slot->hotplug_slot);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
}
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 07b533adc9df..52a339baf06c 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -195,10 +195,8 @@ get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static void release_slot(struct hotplug_slot *hotplug_slot)
+static void release_slot(struct slot *slot)
{
- struct slot *slot = hotplug_slot->private;
-
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
pci_dev_put(slot->dev);
@@ -253,7 +251,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
hotplug_slot->private = slot;
- hotplug_slot->release = &release_slot;
hotplug_slot->ops = &cpci_hotplug_slot_ops;
/*
@@ -308,12 +305,8 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
slots--;
dbg("deregistering slot %s", slot_name(slot));
- status = pci_hp_deregister(slot->hotplug_slot);
- if (status) {
- err("pci_hp_deregister failed with error %d",
- status);
- break;
- }
+ pci_hp_deregister(slot->hotplug_slot);
+ release_slot(slot);
}
}
up_write(&list_rwsem);
@@ -623,6 +616,7 @@ cleanup_slots(void)
list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
+ release_slot(slot);
}
cleanup_null:
up_write(&list_rwsem);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 1797e36ec586..5a06636e910a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -266,17 +266,6 @@ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
return previous;
}
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
-
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
static int ctrl_slot_cleanup(struct controller *ctrl)
{
struct slot *old_slot, *next_slot;
@@ -285,9 +274,11 @@ static int ctrl_slot_cleanup(struct controller *ctrl)
ctrl->slot = NULL;
while (old_slot) {
- /* memory will be freed by the release_slot callback */
next_slot = old_slot->next;
pci_hp_deregister(old_slot->hotplug_slot);
+ kfree(old_slot->hotplug_slot->info);
+ kfree(old_slot->hotplug_slot);
+ kfree(old_slot);
old_slot = next_slot;
}
@@ -678,7 +669,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
/* register this slot with the hotplug pci core */
- hotplug_slot->release = &release_slot;
hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 1869b0411ce0..4ea57e9019f1 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -673,7 +673,20 @@ static void free_slots(void)
list_for_each_entry_safe(slot_cur, next, &ibmphp_slot_head,
ibm_slot_list) {
- pci_hp_deregister(slot_cur->hotplug_slot);
+ pci_hp_del(slot_cur->hotplug_slot);
+ slot_cur->ctrl = NULL;
+ slot_cur->bus_on = NULL;
+
+ /*
+ * We don't want to actually remove the resources,
+ * since ibmphp_free_resources() will do just that.
+ */
+ ibmphp_unconfigure_card(&slot_cur, -1);
+
+ pci_hp_destroy(slot_cur->hotplug_slot);
+ kfree(slot_cur->hotplug_slot->info);
+ kfree(slot_cur->hotplug_slot);
+ kfree(slot_cur);
}
debug("%s -- exit\n", __func__);
}
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 64549aa24c0f..6f8e90e3ec08 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -699,25 +699,6 @@ static int fillslotinfo(struct hotplug_slot *hotplug_slot)
return rc;
}
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot;
-
- if (!hotplug_slot || !hotplug_slot->private)
- return;
-
- slot = hotplug_slot->private;
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- slot->ctrl = NULL;
- slot->bus_on = NULL;
-
- /* we don't want to actually remove the resources, since free_resources will do just that */
- ibmphp_unconfigure_card(&slot, -1);
-
- kfree(slot);
-}
-
static struct pci_driver ibmphp_driver;
/*
@@ -941,7 +922,6 @@ static int __init ebda_rsrc_controller(void)
tmp_slot->hotplug_slot = hp_slot_ptr;
hp_slot_ptr->private = tmp_slot;
- hp_slot_ptr->release = release_slot;
rc = fillslotinfo(hp_slot_ptr);
if (rc)
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index af92fed46ab7..90fde5f106d8 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -396,8 +396,9 @@ static struct hotplug_slot *get_slot_from_name(const char *name)
* @owner: caller module owner
* @mod_name: caller module name
*
- * Registers a hotplug slot with the pci hotplug subsystem, which will allow
- * userspace interaction to the slot.
+ * Prepares a hotplug slot for in-kernel use and immediately publishes it to
+ * user space in one go. Drivers may alternatively carry out the two steps
+ * separately by invoking pci_hp_initialize() and pci_hp_add().
*
* Returns 0 if successful, anything else for an error.
*/
@@ -406,45 +407,91 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
struct module *owner, const char *mod_name)
{
int result;
+
+ result = __pci_hp_initialize(slot, bus, devnr, name, owner, mod_name);
+ if (result)
+ return result;
+
+ result = pci_hp_add(slot);
+ if (result)
+ pci_hp_destroy(slot);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(__pci_hp_register);
+
+/**
+ * __pci_hp_initialize - prepare hotplug slot for in-kernel use
+ * @slot: pointer to the &struct hotplug_slot to initialize
+ * @bus: bus this slot is on
+ * @devnr: slot number
+ * @name: name registered with kobject core
+ * @owner: caller module owner
+ * @mod_name: caller module name
+ *
+ * Allocate and fill in a PCI slot for use by a hotplug driver. Once this has
+ * been called, the driver may invoke hotplug_slot_name() to get the slot's
+ * unique name. The driver must be prepared to handle a ->reset_slot callback
+ * from this point on.
+ *
+ * Returns 0 on success or a negative int on error.
+ */
+int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus,
+ int devnr, const char *name, struct module *owner,
+ const char *mod_name)
+{
struct pci_slot *pci_slot;
if (slot == NULL)
return -ENODEV;
if ((slot->info == NULL) || (slot->ops == NULL))
return -EINVAL;
- if (slot->release == NULL) {
- dbg("Why are you trying to register a hotplug slot without a proper release function?\n");
- return -EINVAL;
- }
slot->ops->owner = owner;
slot->ops->mod_name = mod_name;
- mutex_lock(&pci_hp_mutex);
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
* driver and call it here again. If we've already created the
* pci_slot, the interface will simply bump the refcount.
*/
pci_slot = pci_create_slot(bus, devnr, name, slot);
- if (IS_ERR(pci_slot)) {
- result = PTR_ERR(pci_slot);
- goto out;
- }
+ if (IS_ERR(pci_slot))
+ return PTR_ERR(pci_slot);
slot->pci_slot = pci_slot;
pci_slot->hotplug = slot;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__pci_hp_initialize);
- list_add(&slot->slot_list, &pci_hotplug_slot_list);
+/**
+ * pci_hp_add - publish hotplug slot to user space
+ * @slot: pointer to the &struct hotplug_slot to publish
+ *
+ * Make a hotplug slot's sysfs interface available and inform user space of its
+ * addition by sending a uevent. The hotplug driver must be prepared to handle
+ * all &struct hotplug_slot_ops callbacks from this point on.
+ *
+ * Returns 0 on success or a negative int on error.
+ */
+int pci_hp_add(struct hotplug_slot *slot)
+{
+ struct pci_slot *pci_slot = slot->pci_slot;
+ int result;
result = fs_add_slot(pci_slot);
+ if (result)
+ return result;
+
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
- dbg("Added slot %s to the list\n", name);
-out:
+ mutex_lock(&pci_hp_mutex);
+ list_add(&slot->slot_list, &pci_hotplug_slot_list);
mutex_unlock(&pci_hp_mutex);
- return result;
+ dbg("Added slot %s to the list\n", hotplug_slot_name(slot));
+ return 0;
}
-EXPORT_SYMBOL_GPL(__pci_hp_register);
+EXPORT_SYMBOL_GPL(pci_hp_add);
/**
* pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
@@ -455,35 +502,62 @@ EXPORT_SYMBOL_GPL(__pci_hp_register);
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_deregister(struct hotplug_slot *slot)
+void pci_hp_deregister(struct hotplug_slot *slot)
+{
+ pci_hp_del(slot);
+ pci_hp_destroy(slot);
+}
+EXPORT_SYMBOL_GPL(pci_hp_deregister);
+
+/**
+ * pci_hp_del - unpublish hotplug slot from user space
+ * @slot: pointer to the &struct hotplug_slot to unpublish
+ *
+ * Remove a hotplug slot's sysfs interface.
+ *
+ * Returns 0 on success or a negative int on error.
+ */
+void pci_hp_del(struct hotplug_slot *slot)
{
struct hotplug_slot *temp;
- struct pci_slot *pci_slot;
- if (!slot)
- return -ENODEV;
+ if (WARN_ON(!slot))
+ return;
mutex_lock(&pci_hp_mutex);
temp = get_slot_from_name(hotplug_slot_name(slot));
- if (temp != slot) {
+ if (WARN_ON(temp != slot)) {
mutex_unlock(&pci_hp_mutex);
- return -ENODEV;
+ return;
}
list_del(&slot->slot_list);
-
- pci_slot = slot->pci_slot;
- fs_remove_slot(pci_slot);
+ mutex_unlock(&pci_hp_mutex);
dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
+ fs_remove_slot(slot->pci_slot);
+}
+EXPORT_SYMBOL_GPL(pci_hp_del);
- slot->release(slot);
+/**
+ * pci_hp_destroy - remove hotplug slot from in-kernel use
+ * @slot: pointer to the &struct hotplug_slot to destroy
+ *
+ * Destroy a PCI slot used by a hotplug driver. Once this has been called,
+ * the driver may no longer invoke hotplug_slot_name() to get the slot's
+ * unique name. The driver no longer needs to handle a ->reset_slot callback
+ * from this point on.
+ *
+ * Returns 0 on success or a negative int on error.
+ */
+void pci_hp_destroy(struct hotplug_slot *slot)
+{
+ struct pci_slot *pci_slot = slot->pci_slot;
+
+ slot->pci_slot = NULL;
pci_slot->hotplug = NULL;
pci_destroy_slot(pci_slot);
- mutex_unlock(&pci_hp_mutex);
-
- return 0;
}
-EXPORT_SYMBOL_GPL(pci_hp_deregister);
+EXPORT_SYMBOL_GPL(pci_hp_destroy);
/**
* pci_hp_change_slot_info - changes the slot's information structure in the core
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 5f892065585e..811cf83f956d 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/sched/signal.h> /* signal_pending() */
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/workqueue.h>
#include "../pcie/portdrv.h"
@@ -57,49 +58,111 @@ do { \
dev_warn(&ctrl->pcie->device, format, ## arg)
#define SLOT_NAME_SIZE 10
+
+/**
+ * struct slot - PCIe hotplug slot
+ * @state: current state machine position
+ * @ctrl: pointer to the slot's controller structure
+ * @hotplug_slot: pointer to the structure registered with the PCI hotplug core
+ * @work: work item to turn the slot on or off after 5 seconds in response to
+ * an Attention Button press
+ * @lock: protects reads and writes of @state;
+ * protects scheduling, execution and cancellation of @work
+ */
struct slot {
u8 state;
struct controller *ctrl;
struct hotplug_slot *hotplug_slot;
- struct delayed_work work; /* work for button event */
+ struct delayed_work work;
struct mutex lock;
- struct mutex hotplug_lock;
- struct workqueue_struct *wq;
-};
-
-struct event_info {
- u32 event_type;
- struct slot *p_slot;
- struct work_struct work;
};
+/**
+ * struct controller - PCIe hotplug controller
+ * @ctrl_lock: serializes writes to the Slot Control register
+ * @pcie: pointer to the controller's PCIe port service device
+ * @reset_lock: prevents access to the Data Link Layer Link Active bit in the
+ * Link Status register and to the Presence Detect State bit in the Slot
+ * Status register during a slot reset which may cause them to flap
+ * @slot: pointer to the controller's slot structure
+ * @queue: wait queue to wake up on reception of a Command Completed event,
+ * used for synchronous writes to the Slot Control register
+ * @slot_cap: cached copy of the Slot Capabilities register
+ * @slot_ctrl: cached copy of the Slot Control register
+ * @poll_thread: thread to poll for slot events if no IRQ is available,
+ * enabled with pciehp_poll_mode module parameter
+ * @cmd_started: jiffies when the Slot Control register was last written;
+ * the next write is allowed 1 second later, absent a Command Completed
+ * interrupt (PCIe r4.0, sec 6.7.3.2)
+ * @cmd_busy: flag set on Slot Control register write, cleared by IRQ handler
+ * on reception of a Command Completed event
+ * @link_active_reporting: cached copy of Data Link Layer Link Active Reporting
+ * Capable bit in Link Capabilities register; if this bit is zero, the
+ * Data Link Layer Link Active bit in the Link Status register will never
+ * be set and the driver is thus confined to wait 1 second before assuming
+ * the link to a hotplugged device is up and accessing it
+ * @notification_enabled: whether the IRQ was requested successfully
+ * @power_fault_detected: whether a power fault was detected by the hardware
+ * that has not yet been cleared by the user
+ * @pending_events: used by the IRQ handler to save events retrieved from the
+ * Slot Status register for later consumption by the IRQ thread
+ * @request_result: result of last user request submitted to the IRQ thread
+ * @requester: wait queue to wake up on completion of user request,
+ * used for synchronous slot enable/disable request via sysfs
+ */
struct controller {
- struct mutex ctrl_lock; /* controller lock */
- struct pcie_device *pcie; /* PCI Express port service */
+ struct mutex ctrl_lock;
+ struct pcie_device *pcie;
+ struct rw_semaphore reset_lock;
struct slot *slot;
- wait_queue_head_t queue; /* sleep & wake process */
+ wait_queue_head_t queue;
u32 slot_cap;
u16 slot_ctrl;
- struct timer_list poll_timer;
+ struct task_struct *poll_thread;
unsigned long cmd_started; /* jiffies */
unsigned int cmd_busy:1;
unsigned int link_active_reporting:1;
unsigned int notification_enabled:1;
unsigned int power_fault_detected;
+ atomic_t pending_events;
+ int request_result;
+ wait_queue_head_t requester;
};
-#define INT_PRESENCE_ON 1
-#define INT_PRESENCE_OFF 2
-#define INT_POWER_FAULT 3
-#define INT_BUTTON_PRESS 4
-#define INT_LINK_UP 5
-#define INT_LINK_DOWN 6
-
-#define STATIC_STATE 0
+/**
+ * DOC: Slot state
+ *
+ * @OFF_STATE: slot is powered off, no subordinate devices are enumerated
+ * @BLINKINGON_STATE: slot will be powered on after the 5 second delay,
+ * green led is blinking
+ * @BLINKINGOFF_STATE: slot will be powered off after the 5 second delay,
+ * green led is blinking
+ * @POWERON_STATE: slot is currently powering on
+ * @POWEROFF_STATE: slot is currently powering off
+ * @ON_STATE: slot is powered on, subordinate devices have been enumerated
+ */
+#define OFF_STATE 0
#define BLINKINGON_STATE 1
#define BLINKINGOFF_STATE 2
#define POWERON_STATE 3
#define POWEROFF_STATE 4
+#define ON_STATE 5
+
+/**
+ * DOC: Flags to request an action from the IRQ thread
+ *
+ * These are stored together with events read from the Slot Status register,
+ * hence must be greater than its 16-bit width.
+ *
+ * %DISABLE_SLOT: Disable the slot in response to a user request via sysfs or
+ * an Attention Button press after the 5 second delay
+ * %RERUN_ISR: Used by the IRQ handler to inform the IRQ thread that the
+ * hotplug port was inaccessible when the interrupt occurred, requiring
+ * that the IRQ handler is rerun by the IRQ thread after it has made the
+ * hotplug port accessible by runtime resuming its parents to D0
+ */
+#define DISABLE_SLOT (1 << 16)
+#define RERUN_ISR (1 << 17)
#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_ABP)
#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PCP)
@@ -113,15 +176,17 @@ struct controller {
int pciehp_sysfs_enable_slot(struct slot *slot);
int pciehp_sysfs_disable_slot(struct slot *slot);
-void pciehp_queue_interrupt_event(struct slot *slot, u32 event_type);
+void pciehp_request(struct controller *ctrl, int action);
+void pciehp_handle_button_press(struct slot *slot);
+void pciehp_handle_disable_request(struct slot *slot);
+void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events);
int pciehp_configure_device(struct slot *p_slot);
-int pciehp_unconfigure_device(struct slot *p_slot);
+void pciehp_unconfigure_device(struct slot *p_slot);
void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
-int pciehp_enable_slot(struct slot *p_slot);
-int pciehp_disable_slot(struct slot *p_slot);
-void pcie_reenable_notification(struct controller *ctrl);
+void pcie_shutdown_notification(struct controller *ctrl);
+void pcie_clear_hotplug_events(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
void pciehp_power_off_slot(struct slot *slot);
void pciehp_get_power_status(struct slot *slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 44a6a63802d5..ec48c9433ae5 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -26,11 +26,12 @@
#include <linux/interrupt.h>
#include <linux/time.h>
+#include "../pci.h"
+
/* Global variables */
bool pciehp_debug;
bool pciehp_poll_mode;
int pciehp_poll_time;
-static bool pciehp_force;
/*
* not really modular, but the easiest way to keep compat with existing
@@ -39,11 +40,9 @@ static bool pciehp_force;
module_param(pciehp_debug, bool, 0644);
module_param(pciehp_poll_mode, bool, 0644);
module_param(pciehp_poll_time, int, 0644);
-module_param(pciehp_force, bool, 0644);
MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
#define PCIE_MODULE_NAME "pciehp"
@@ -56,17 +55,6 @@ static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
static int reset_slot(struct hotplug_slot *slot, int probe);
-/**
- * release_slot - free up the memory used by a slot
- * @hotplug_slot: slot to free
- */
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- kfree(hotplug_slot->ops);
- kfree(hotplug_slot->info);
- kfree(hotplug_slot);
-}
-
static int init_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
@@ -107,15 +95,14 @@ static int init_slot(struct controller *ctrl)
/* register this slot with the hotplug pci core */
hotplug->info = info;
hotplug->private = slot;
- hotplug->release = &release_slot;
hotplug->ops = ops;
slot->hotplug_slot = hotplug;
snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
- retval = pci_hp_register(hotplug,
- ctrl->pcie->port->subordinate, 0, name);
+ retval = pci_hp_initialize(hotplug,
+ ctrl->pcie->port->subordinate, 0, name);
if (retval)
- ctrl_err(ctrl, "pci_hp_register failed: error %d\n", retval);
+ ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
out:
if (retval) {
kfree(ops);
@@ -127,7 +114,12 @@ out:
static void cleanup_slot(struct controller *ctrl)
{
- pci_hp_deregister(ctrl->slot->hotplug_slot);
+ struct hotplug_slot *hotplug_slot = ctrl->slot->hotplug_slot;
+
+ pci_hp_destroy(hotplug_slot);
+ kfree(hotplug_slot->ops);
+ kfree(hotplug_slot->info);
+ kfree(hotplug_slot);
}
/*
@@ -136,8 +128,11 @@ static void cleanup_slot(struct controller *ctrl)
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = slot->ctrl->pcie->port;
+ pci_config_pm_runtime_get(pdev);
pciehp_set_attention_status(slot, status);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
@@ -160,8 +155,11 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = slot->ctrl->pcie->port;
+ pci_config_pm_runtime_get(pdev);
pciehp_get_power_status(slot, value);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
@@ -176,16 +174,22 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = slot->ctrl->pcie->port;
+ pci_config_pm_runtime_get(pdev);
pciehp_get_latch_status(slot, value);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = slot->ctrl->pcie->port;
+ pci_config_pm_runtime_get(pdev);
pciehp_get_adapter_status(slot, value);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
@@ -196,12 +200,40 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
return pciehp_reset_slot(slot, probe);
}
+/**
+ * pciehp_check_presence() - synthesize event if presence has changed
+ *
+ * On probe and resume, an explicit presence check is necessary to bring up an
+ * occupied slot or bring down an unoccupied slot. This can't be triggered by
+ * events in the Slot Status register, they may be stale and are therefore
+ * cleared. Secondly, sending an interrupt for "events that occur while
+ * interrupt generation is disabled [when] interrupt generation is subsequently
+ * enabled" is optional per PCIe r4.0, sec 6.7.3.4.
+ */
+static void pciehp_check_presence(struct controller *ctrl)
+{
+ struct slot *slot = ctrl->slot;
+ u8 occupied;
+
+ down_read(&ctrl->reset_lock);
+ mutex_lock(&slot->lock);
+
+ pciehp_get_adapter_status(slot, &occupied);
+ if ((occupied && (slot->state == OFF_STATE ||
+ slot->state == BLINKINGON_STATE)) ||
+ (!occupied && (slot->state == ON_STATE ||
+ slot->state == BLINKINGOFF_STATE)))
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
+
+ mutex_unlock(&slot->lock);
+ up_read(&ctrl->reset_lock);
+}
+
static int pciehp_probe(struct pcie_device *dev)
{
int rc;
struct controller *ctrl;
struct slot *slot;
- u8 occupied, poweron;
/* If this is not a "hotplug" service, we have no business here. */
if (dev->service != PCIE_PORT_SERVICE_HP)
@@ -238,21 +270,20 @@ static int pciehp_probe(struct pcie_device *dev)
goto err_out_free_ctrl_slot;
}
- /* Check if slot is occupied */
+ /* Publish to user space */
slot = ctrl->slot;
- pciehp_get_adapter_status(slot, &occupied);
- pciehp_get_power_status(slot, &poweron);
- if (occupied && pciehp_force) {
- mutex_lock(&slot->hotplug_lock);
- pciehp_enable_slot(slot);
- mutex_unlock(&slot->hotplug_lock);
+ rc = pci_hp_add(slot->hotplug_slot);
+ if (rc) {
+ ctrl_err(ctrl, "Publication to user space failed (%d)\n", rc);
+ goto err_out_shutdown_notification;
}
- /* If empty slot's power status is on, turn power off */
- if (!occupied && poweron && POWER_CTRL(ctrl))
- pciehp_power_off_slot(slot);
+
+ pciehp_check_presence(ctrl);
return 0;
+err_out_shutdown_notification:
+ pcie_shutdown_notification(ctrl);
err_out_free_ctrl_slot:
cleanup_slot(ctrl);
err_out_release_ctlr:
@@ -264,6 +295,8 @@ static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
+ pci_hp_del(ctrl->slot->hotplug_slot);
+ pcie_shutdown_notification(ctrl);
cleanup_slot(ctrl);
pciehp_release_ctrl(ctrl);
}
@@ -274,27 +307,28 @@ static int pciehp_suspend(struct pcie_device *dev)
return 0;
}
-static int pciehp_resume(struct pcie_device *dev)
+static int pciehp_resume_noirq(struct pcie_device *dev)
{
- struct controller *ctrl;
- struct slot *slot;
- u8 status;
+ struct controller *ctrl = get_service_data(dev);
+ struct slot *slot = ctrl->slot;
- ctrl = get_service_data(dev);
+ /* pci_restore_state() just wrote to the Slot Control register */
+ ctrl->cmd_started = jiffies;
+ ctrl->cmd_busy = true;
- /* reinitialize the chipset's event detection logic */
- pcie_reenable_notification(ctrl);
+ /* clear spurious events from rediscovery of inserted card */
+ if (slot->state == ON_STATE || slot->state == BLINKINGOFF_STATE)
+ pcie_clear_hotplug_events(ctrl);
- slot = ctrl->slot;
+ return 0;
+}
+
+static int pciehp_resume(struct pcie_device *dev)
+{
+ struct controller *ctrl = get_service_data(dev);
+
+ pciehp_check_presence(ctrl);
- /* Check if slot is occupied */
- pciehp_get_adapter_status(slot, &status);
- mutex_lock(&slot->hotplug_lock);
- if (status)
- pciehp_enable_slot(slot);
- else
- pciehp_disable_slot(slot);
- mutex_unlock(&slot->hotplug_lock);
return 0;
}
#endif /* PM */
@@ -309,6 +343,7 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
#ifdef CONFIG_PM
.suspend = pciehp_suspend,
+ .resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
#endif /* PM */
};
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index c684faa43387..da7c72372ffc 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -17,28 +17,11 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
-static void interrupt_event_handler(struct work_struct *work);
-
-void pciehp_queue_interrupt_event(struct slot *p_slot, u32 event_type)
-{
- struct event_info *info;
-
- info = kmalloc(sizeof(*info), GFP_ATOMIC);
- if (!info) {
- ctrl_err(p_slot->ctrl, "dropped event %d (ENOMEM)\n", event_type);
- return;
- }
-
- INIT_WORK(&info->work, interrupt_event_handler);
- info->event_type = event_type;
- info->p_slot = p_slot;
- queue_work(p_slot->wq, &info->work);
-}
-
/* The following routines constitute the bulk of the
hotplug controller logic
*/
@@ -119,14 +102,11 @@ err_exit:
* remove_board - Turns off slot and LEDs
* @p_slot: slot where board is being removed
*/
-static int remove_board(struct slot *p_slot)
+static void remove_board(struct slot *p_slot)
{
- int retval;
struct controller *ctrl = p_slot->ctrl;
- retval = pciehp_unconfigure_device(p_slot);
- if (retval)
- return retval;
+ pciehp_unconfigure_device(p_slot);
if (POWER_CTRL(ctrl)) {
pciehp_power_off_slot(p_slot);
@@ -141,86 +121,30 @@ static int remove_board(struct slot *p_slot)
/* turn off Green LED */
pciehp_green_led_off(p_slot);
- return 0;
}
-struct power_work_info {
- struct slot *p_slot;
- struct work_struct work;
- unsigned int req;
-#define DISABLE_REQ 0
-#define ENABLE_REQ 1
-};
-
-/**
- * pciehp_power_thread - handle pushbutton events
- * @work: &struct work_struct describing work to be done
- *
- * Scheduled procedure to handle blocking stuff for the pushbuttons.
- * Handles all pending events and exits.
- */
-static void pciehp_power_thread(struct work_struct *work)
-{
- struct power_work_info *info =
- container_of(work, struct power_work_info, work);
- struct slot *p_slot = info->p_slot;
- int ret;
-
- switch (info->req) {
- case DISABLE_REQ:
- mutex_lock(&p_slot->hotplug_lock);
- pciehp_disable_slot(p_slot);
- mutex_unlock(&p_slot->hotplug_lock);
- mutex_lock(&p_slot->lock);
- p_slot->state = STATIC_STATE;
- mutex_unlock(&p_slot->lock);
- break;
- case ENABLE_REQ:
- mutex_lock(&p_slot->hotplug_lock);
- ret = pciehp_enable_slot(p_slot);
- mutex_unlock(&p_slot->hotplug_lock);
- if (ret)
- pciehp_green_led_off(p_slot);
- mutex_lock(&p_slot->lock);
- p_slot->state = STATIC_STATE;
- mutex_unlock(&p_slot->lock);
- break;
- default:
- break;
- }
-
- kfree(info);
-}
+static int pciehp_enable_slot(struct slot *slot);
+static int pciehp_disable_slot(struct slot *slot);
-static void pciehp_queue_power_work(struct slot *p_slot, int req)
+void pciehp_request(struct controller *ctrl, int action)
{
- struct power_work_info *info;
-
- p_slot->state = (req == ENABLE_REQ) ? POWERON_STATE : POWEROFF_STATE;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ctrl_err(p_slot->ctrl, "no memory to queue %s request\n",
- (req == ENABLE_REQ) ? "poweron" : "poweroff");
- return;
- }
- info->p_slot = p_slot;
- INIT_WORK(&info->work, pciehp_power_thread);
- info->req = req;
- queue_work(p_slot->wq, &info->work);
+ atomic_or(action, &ctrl->pending_events);
+ if (!pciehp_poll_mode)
+ irq_wake_thread(ctrl->pcie->irq, ctrl);
}
void pciehp_queue_pushbutton_work(struct work_struct *work)
{
struct slot *p_slot = container_of(work, struct slot, work.work);
+ struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
- pciehp_queue_power_work(p_slot, DISABLE_REQ);
+ pciehp_request(ctrl, DISABLE_SLOT);
break;
case BLINKINGON_STATE:
- pciehp_queue_power_work(p_slot, ENABLE_REQ);
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
break;
default:
break;
@@ -228,18 +152,15 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
mutex_unlock(&p_slot->lock);
}
-/*
- * Note: This function must be called with slot->lock held
- */
-static void handle_button_press_event(struct slot *p_slot)
+void pciehp_handle_button_press(struct slot *p_slot)
{
struct controller *ctrl = p_slot->ctrl;
- u8 getstatus;
+ mutex_lock(&p_slot->lock);
switch (p_slot->state) {
- case STATIC_STATE:
- pciehp_get_power_status(p_slot, &getstatus);
- if (getstatus) {
+ case OFF_STATE:
+ case ON_STATE:
+ if (p_slot->state == ON_STATE) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "Slot(%s): Powering off due to button press\n",
slot_name(p_slot));
@@ -251,7 +172,7 @@ static void handle_button_press_event(struct slot *p_slot)
/* blink green LED and turn off amber */
pciehp_green_led_blink(p_slot);
pciehp_set_attention_status(p_slot, 0);
- queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
+ schedule_delayed_work(&p_slot->work, 5 * HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
@@ -262,118 +183,104 @@ static void handle_button_press_event(struct slot *p_slot)
*/
ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
- if (p_slot->state == BLINKINGOFF_STATE)
+ if (p_slot->state == BLINKINGOFF_STATE) {
+ p_slot->state = ON_STATE;
pciehp_green_led_on(p_slot);
- else
+ } else {
+ p_slot->state = OFF_STATE;
pciehp_green_led_off(p_slot);
+ }
pciehp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n",
slot_name(p_slot));
- p_slot->state = STATIC_STATE;
- break;
- case POWEROFF_STATE:
- case POWERON_STATE:
- /*
- * Ignore if the slot is on power-on or power-off state;
- * this means that the previous attention button action
- * to hot-add or hot-remove is undergoing
- */
- ctrl_info(ctrl, "Slot(%s): Button ignored\n",
- slot_name(p_slot));
break;
default:
ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
slot_name(p_slot), p_slot->state);
break;
}
+ mutex_unlock(&p_slot->lock);
}
-/*
- * Note: This function must be called with slot->lock held
- */
-static void handle_link_event(struct slot *p_slot, u32 event)
+void pciehp_handle_disable_request(struct slot *slot)
{
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = slot->ctrl;
- switch (p_slot->state) {
+ mutex_lock(&slot->lock);
+ switch (slot->state) {
case BLINKINGON_STATE:
case BLINKINGOFF_STATE:
- cancel_delayed_work(&p_slot->work);
- /* Fall through */
- case STATIC_STATE:
- pciehp_queue_power_work(p_slot, event == INT_LINK_UP ?
- ENABLE_REQ : DISABLE_REQ);
- break;
- case POWERON_STATE:
- if (event == INT_LINK_UP) {
- ctrl_info(ctrl, "Slot(%s): Link Up event ignored; already powering on\n",
- slot_name(p_slot));
- } else {
- ctrl_info(ctrl, "Slot(%s): Link Down event queued; currently getting powered on\n",
- slot_name(p_slot));
- pciehp_queue_power_work(p_slot, DISABLE_REQ);
- }
- break;
- case POWEROFF_STATE:
- if (event == INT_LINK_UP) {
- ctrl_info(ctrl, "Slot(%s): Link Up event queued; currently getting powered off\n",
- slot_name(p_slot));
- pciehp_queue_power_work(p_slot, ENABLE_REQ);
- } else {
- ctrl_info(ctrl, "Slot(%s): Link Down event ignored; already powering off\n",
- slot_name(p_slot));
- }
- break;
- default:
- ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ cancel_delayed_work(&slot->work);
break;
}
+ slot->state = POWEROFF_STATE;
+ mutex_unlock(&slot->lock);
+
+ ctrl->request_result = pciehp_disable_slot(slot);
}
-static void interrupt_event_handler(struct work_struct *work)
+void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events)
{
- struct event_info *info = container_of(work, struct event_info, work);
- struct slot *p_slot = info->p_slot;
- struct controller *ctrl = p_slot->ctrl;
-
- mutex_lock(&p_slot->lock);
- switch (info->event_type) {
- case INT_BUTTON_PRESS:
- handle_button_press_event(p_slot);
- break;
- case INT_POWER_FAULT:
- if (!POWER_CTRL(ctrl))
- break;
- pciehp_set_attention_status(p_slot, 1);
- pciehp_green_led_off(p_slot);
- break;
- case INT_PRESENCE_ON:
- pciehp_queue_power_work(p_slot, ENABLE_REQ);
+ struct controller *ctrl = slot->ctrl;
+ bool link_active;
+ u8 present;
+
+ /*
+ * If the slot is on and presence or link has changed, turn it off.
+ * Even if it's occupied again, we cannot assume the card is the same.
+ */
+ mutex_lock(&slot->lock);
+ switch (slot->state) {
+ case BLINKINGOFF_STATE:
+ cancel_delayed_work(&slot->work);
+ /* fall through */
+ case ON_STATE:
+ slot->state = POWEROFF_STATE;
+ mutex_unlock(&slot->lock);
+ if (events & PCI_EXP_SLTSTA_DLLSC)
+ ctrl_info(ctrl, "Slot(%s): Link Down\n",
+ slot_name(slot));
+ if (events & PCI_EXP_SLTSTA_PDC)
+ ctrl_info(ctrl, "Slot(%s): Card not present\n",
+ slot_name(slot));
+ pciehp_disable_slot(slot);
break;
- case INT_PRESENCE_OFF:
- /*
- * Regardless of surprise capability, we need to
- * definitely remove a card that has been pulled out!
- */
- pciehp_queue_power_work(p_slot, DISABLE_REQ);
+ default:
+ mutex_unlock(&slot->lock);
break;
- case INT_LINK_UP:
- case INT_LINK_DOWN:
- handle_link_event(p_slot, info->event_type);
+ }
+
+ /* Turn the slot on if it's occupied or link is up */
+ mutex_lock(&slot->lock);
+ pciehp_get_adapter_status(slot, &present);
+ link_active = pciehp_check_link_active(ctrl);
+ if (!present && !link_active) {
+ mutex_unlock(&slot->lock);
+ return;
+ }
+
+ switch (slot->state) {
+ case BLINKINGON_STATE:
+ cancel_delayed_work(&slot->work);
+ /* fall through */
+ case OFF_STATE:
+ slot->state = POWERON_STATE;
+ mutex_unlock(&slot->lock);
+ if (present)
+ ctrl_info(ctrl, "Slot(%s): Card present\n",
+ slot_name(slot));
+ if (link_active)
+ ctrl_info(ctrl, "Slot(%s): Link Up\n",
+ slot_name(slot));
+ ctrl->request_result = pciehp_enable_slot(slot);
break;
default:
+ mutex_unlock(&slot->lock);
break;
}
- mutex_unlock(&p_slot->lock);
-
- kfree(info);
}
-/*
- * Note: This function must be called with slot->hotplug_lock held
- */
-int pciehp_enable_slot(struct slot *p_slot)
+static int __pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
struct controller *ctrl = p_slot->ctrl;
@@ -404,17 +311,29 @@ int pciehp_enable_slot(struct slot *p_slot)
return board_added(p_slot);
}
-/*
- * Note: This function must be called with slot->hotplug_lock held
- */
-int pciehp_disable_slot(struct slot *p_slot)
+static int pciehp_enable_slot(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+ int ret;
+
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
+ ret = __pciehp_enable_slot(slot);
+ if (ret && ATTN_BUTTN(ctrl))
+ pciehp_green_led_off(slot); /* may be blinking */
+ pm_runtime_put(&ctrl->pcie->port->dev);
+
+ mutex_lock(&slot->lock);
+ slot->state = ret ? OFF_STATE : ON_STATE;
+ mutex_unlock(&slot->lock);
+
+ return ret;
+}
+
+static int __pciehp_disable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
struct controller *ctrl = p_slot->ctrl;
- if (!p_slot->ctrl)
- return 1;
-
if (POWER_CTRL(p_slot->ctrl)) {
pciehp_get_power_status(p_slot, &getstatus);
if (!getstatus) {
@@ -424,32 +343,50 @@ int pciehp_disable_slot(struct slot *p_slot)
}
}
- return remove_board(p_slot);
+ remove_board(p_slot);
+ return 0;
+}
+
+static int pciehp_disable_slot(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+ int ret;
+
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
+ ret = __pciehp_disable_slot(slot);
+ pm_runtime_put(&ctrl->pcie->port->dev);
+
+ mutex_lock(&slot->lock);
+ slot->state = OFF_STATE;
+ mutex_unlock(&slot->lock);
+
+ return ret;
}
int pciehp_sysfs_enable_slot(struct slot *p_slot)
{
- int retval = -ENODEV;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGON_STATE:
- cancel_delayed_work(&p_slot->work);
- case STATIC_STATE:
- p_slot->state = POWERON_STATE;
+ case OFF_STATE:
mutex_unlock(&p_slot->lock);
- mutex_lock(&p_slot->hotplug_lock);
- retval = pciehp_enable_slot(p_slot);
- mutex_unlock(&p_slot->hotplug_lock);
- mutex_lock(&p_slot->lock);
- p_slot->state = STATIC_STATE;
- break;
+ /*
+ * The IRQ thread becomes a no-op if the user pulls out the
+ * card before the thread wakes up, so initialize to -ENODEV.
+ */
+ ctrl->request_result = -ENODEV;
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
+ wait_event(ctrl->requester,
+ !atomic_read(&ctrl->pending_events));
+ return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
slot_name(p_slot));
break;
case BLINKINGOFF_STATE:
+ case ON_STATE:
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
@@ -461,32 +398,28 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
}
mutex_unlock(&p_slot->lock);
- return retval;
+ return -ENODEV;
}
int pciehp_sysfs_disable_slot(struct slot *p_slot)
{
- int retval = -ENODEV;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
- cancel_delayed_work(&p_slot->work);
- case STATIC_STATE:
- p_slot->state = POWEROFF_STATE;
+ case ON_STATE:
mutex_unlock(&p_slot->lock);
- mutex_lock(&p_slot->hotplug_lock);
- retval = pciehp_disable_slot(p_slot);
- mutex_unlock(&p_slot->hotplug_lock);
- mutex_lock(&p_slot->lock);
- p_slot->state = STATIC_STATE;
- break;
+ pciehp_request(ctrl, DISABLE_SLOT);
+ wait_event(ctrl->requester,
+ !atomic_read(&ctrl->pending_events));
+ return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
slot_name(p_slot));
break;
case BLINKINGON_STATE:
+ case OFF_STATE:
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already disabled\n",
slot_name(p_slot));
@@ -498,5 +431,5 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
}
mutex_unlock(&p_slot->lock);
- return retval;
+ return -ENODEV;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 718b6073afad..7136e3430925 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -17,8 +17,9 @@
#include <linux/types.h>
#include <linux/signal.h>
#include <linux/jiffies.h>
-#include <linux/timer.h>
+#include <linux/kthread.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/slab.h>
@@ -31,47 +32,24 @@ static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
return ctrl->pcie->port;
}
-static irqreturn_t pcie_isr(int irq, void *dev_id);
-static void start_int_poll_timer(struct controller *ctrl, int sec);
-
-/* This is the interrupt polling timeout function. */
-static void int_poll_timeout(struct timer_list *t)
-{
- struct controller *ctrl = from_timer(ctrl, t, poll_timer);
-
- /* Poll for interrupt events. regs == NULL => polling */
- pcie_isr(0, ctrl);
-
- if (!pciehp_poll_time)
- pciehp_poll_time = 2; /* default polling interval is 2 sec */
-
- start_int_poll_timer(ctrl, pciehp_poll_time);
-}
-
-/* This function starts the interrupt polling timer. */
-static void start_int_poll_timer(struct controller *ctrl, int sec)
-{
- /* Clamp to sane value */
- if ((sec <= 0) || (sec > 60))
- sec = 2;
-
- ctrl->poll_timer.expires = jiffies + sec * HZ;
- add_timer(&ctrl->poll_timer);
-}
+static irqreturn_t pciehp_isr(int irq, void *dev_id);
+static irqreturn_t pciehp_ist(int irq, void *dev_id);
+static int pciehp_poll(void *data);
static inline int pciehp_request_irq(struct controller *ctrl)
{
int retval, irq = ctrl->pcie->irq;
- /* Install interrupt polling timer. Start with 10 sec delay */
if (pciehp_poll_mode) {
- timer_setup(&ctrl->poll_timer, int_poll_timeout, 0);
- start_int_poll_timer(ctrl, 10);
- return 0;
+ ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
+ "pciehp_poll-%s",
+ slot_name(ctrl->slot));
+ return PTR_ERR_OR_ZERO(ctrl->poll_thread);
}
/* Installs the interrupt handler */
- retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
+ retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
+ IRQF_SHARED, MY_NAME, ctrl);
if (retval)
ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
irq);
@@ -81,7 +59,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
static inline void pciehp_free_irq(struct controller *ctrl)
{
if (pciehp_poll_mode)
- del_timer_sync(&ctrl->poll_timer);
+ kthread_stop(ctrl->poll_thread);
else
free_irq(ctrl->pcie->irq, ctrl);
}
@@ -293,6 +271,11 @@ int pciehp_check_link_status(struct controller *ctrl)
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
+ /* ignore link or presence changes up to this point */
+ if (found)
+ atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
+ &ctrl->pending_events);
+
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
@@ -339,7 +322,9 @@ int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
struct pci_dev *pdev = ctrl_dev(slot->ctrl);
u16 slot_ctrl;
+ pci_config_pm_runtime_get(pdev);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ pci_config_pm_runtime_put(pdev);
*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
return 0;
}
@@ -350,7 +335,9 @@ void pciehp_get_attention_status(struct slot *slot, u8 *status)
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
+ pci_config_pm_runtime_get(pdev);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ pci_config_pm_runtime_put(pdev);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
@@ -425,9 +412,12 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ pci_config_pm_runtime_get(pdev);
pcie_write_cmd_nowait(ctrl, status << 6,
PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
+ pci_config_pm_runtime_put(pdev);
return 0;
}
@@ -539,20 +529,35 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
- struct pci_bus *subordinate = pdev->subordinate;
- struct pci_dev *dev;
- struct slot *slot = ctrl->slot;
+ struct device *parent = pdev->dev.parent;
u16 status, events;
- u8 present;
- bool link;
- /* Interrupts cannot originate from a controller that's asleep */
+ /*
+ * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
+ */
if (pdev->current_state == PCI_D3cold)
return IRQ_NONE;
+ /*
+ * Keep the port accessible by holding a runtime PM ref on its parent.
+ * Defer resume of the parent to the IRQ thread if it's suspended.
+ * Mask the interrupt until then.
+ */
+ if (parent) {
+ pm_runtime_get_noresume(parent);
+ if (!pm_runtime_active(parent)) {
+ pm_runtime_put(parent);
+ disable_irq_nosync(irq);
+ atomic_or(RERUN_ISR, &ctrl->pending_events);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
if (status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
+ if (parent)
+ pm_runtime_put(parent);
return IRQ_NONE;
}
@@ -571,86 +576,119 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
if (ctrl->power_fault_detected)
events &= ~PCI_EXP_SLTSTA_PFD;
- if (!events)
+ if (!events) {
+ if (parent)
+ pm_runtime_put(parent);
return IRQ_NONE;
-
- /* Capture link status before clearing interrupts */
- if (events & PCI_EXP_SLTSTA_DLLSC)
- link = pciehp_check_link_active(ctrl);
+ }
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
+ if (parent)
+ pm_runtime_put(parent);
- /* Check Command Complete Interrupt Pending */
+ /*
+ * Command Completed notifications are not deferred to the
+ * IRQ thread because it may be waiting for their arrival.
+ */
if (events & PCI_EXP_SLTSTA_CC) {
ctrl->cmd_busy = 0;
smp_mb();
wake_up(&ctrl->queue);
+
+ if (events == PCI_EXP_SLTSTA_CC)
+ return IRQ_HANDLED;
+
+ events &= ~PCI_EXP_SLTSTA_CC;
+ }
+
+ if (pdev->ignore_hotplug) {
+ ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
+ return IRQ_HANDLED;
}
- if (subordinate) {
- list_for_each_entry(dev, &subordinate->devices, bus_list) {
- if (dev->ignore_hotplug) {
- ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
- events, pci_name(dev));
- return IRQ_HANDLED;
- }
+ /* Save pending events for consumption by IRQ thread. */
+ atomic_or(events, &ctrl->pending_events);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pciehp_ist(int irq, void *dev_id)
+{
+ struct controller *ctrl = (struct controller *)dev_id;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ struct slot *slot = ctrl->slot;
+ irqreturn_t ret;
+ u32 events;
+
+ pci_config_pm_runtime_get(pdev);
+
+ /* rerun pciehp_isr() if the port was inaccessible on interrupt */
+ if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
+ ret = pciehp_isr(irq, dev_id);
+ enable_irq(irq);
+ if (ret != IRQ_WAKE_THREAD) {
+ pci_config_pm_runtime_put(pdev);
+ return ret;
}
}
+ synchronize_hardirq(irq);
+ events = atomic_xchg(&ctrl->pending_events, 0);
+ if (!events) {
+ pci_config_pm_runtime_put(pdev);
+ return IRQ_NONE;
+ }
+
/* Check Attention Button Pressed */
if (events & PCI_EXP_SLTSTA_ABP) {
ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
slot_name(slot));
- pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
+ pciehp_handle_button_press(slot);
}
/*
- * Check Link Status Changed at higher precedence than Presence
- * Detect Changed. The PDS value may be set to "card present" from
- * out-of-band detection, which may be in conflict with a Link Down
- * and cause the wrong event to queue.
+ * Disable requests have higher priority than Presence Detect Changed
+ * or Data Link Layer State Changed events.
*/
- if (events & PCI_EXP_SLTSTA_DLLSC) {
- ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
- link ? "Up" : "Down");
- pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
- INT_LINK_DOWN);
- } else if (events & PCI_EXP_SLTSTA_PDC) {
- present = !!(status & PCI_EXP_SLTSTA_PDS);
- ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot),
- present ? "" : "not ");
- pciehp_queue_interrupt_event(slot, present ? INT_PRESENCE_ON :
- INT_PRESENCE_OFF);
- }
+ down_read(&ctrl->reset_lock);
+ if (events & DISABLE_SLOT)
+ pciehp_handle_disable_request(slot);
+ else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
+ pciehp_handle_presence_or_link_change(slot, events);
+ up_read(&ctrl->reset_lock);
/* Check Power Fault Detected */
if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
- pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
+ pciehp_set_attention_status(slot, 1);
+ pciehp_green_led_off(slot);
}
+ pci_config_pm_runtime_put(pdev);
+ wake_up(&ctrl->requester);
return IRQ_HANDLED;
}
-static irqreturn_t pcie_isr(int irq, void *dev_id)
+static int pciehp_poll(void *data)
{
- irqreturn_t rc, handled = IRQ_NONE;
+ struct controller *ctrl = data;
- /*
- * To guarantee that all interrupt events are serviced, we need to
- * re-inspect Slot Status register after clearing what is presumed
- * to be the last pending interrupt.
- */
- do {
- rc = pciehp_isr(irq, dev_id);
- if (rc == IRQ_HANDLED)
- handled = IRQ_HANDLED;
- } while (rc == IRQ_HANDLED);
+ schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
+
+ while (!kthread_should_stop()) {
+ /* poll for interrupt events or user requests */
+ while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
+ atomic_read(&ctrl->pending_events))
+ pciehp_ist(IRQ_NOTCONNECTED, ctrl);
- /* Return IRQ_HANDLED if we handled one or more events */
- return handled;
+ if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
+ pciehp_poll_time = 2; /* clamp to sane value */
+
+ schedule_timeout_idle(pciehp_poll_time * HZ);
+ }
+
+ return 0;
}
static void pcie_enable_notification(struct controller *ctrl)
@@ -691,17 +729,6 @@ static void pcie_enable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
-void pcie_reenable_notification(struct controller *ctrl)
-{
- /*
- * Clear both Presence and Data Link Layer Changed to make sure
- * those events still fire after we have re-enabled them.
- */
- pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
- pcie_enable_notification(ctrl);
-}
-
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
@@ -715,6 +742,12 @@ static void pcie_disable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
}
+void pcie_clear_hotplug_events(struct controller *ctrl)
+{
+ pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+}
+
/*
* pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
* bus reset of the bridge, but at the same time we want to ensure that it is
@@ -728,10 +761,13 @@ int pciehp_reset_slot(struct slot *slot, int probe)
struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 stat_mask = 0, ctrl_mask = 0;
+ int rc;
if (probe)
return 0;
+ down_write(&ctrl->reset_lock);
+
if (!ATTN_BUTTN(ctrl)) {
ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
stat_mask |= PCI_EXP_SLTSTA_PDC;
@@ -742,18 +778,16 @@ int pciehp_reset_slot(struct slot *slot, int probe)
pcie_write_cmd(ctrl, 0, ctrl_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
- if (pciehp_poll_mode)
- del_timer_sync(&ctrl->poll_timer);
- pci_reset_bridge_secondary_bus(ctrl->pcie->port);
+ rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
- if (pciehp_poll_mode)
- int_poll_timeout(&ctrl->poll_timer);
- return 0;
+
+ up_write(&ctrl->reset_lock);
+ return rc;
}
int pcie_init_notification(struct controller *ctrl)
@@ -765,7 +799,7 @@ int pcie_init_notification(struct controller *ctrl)
return 0;
}
-static void pcie_shutdown_notification(struct controller *ctrl)
+void pcie_shutdown_notification(struct controller *ctrl)
{
if (ctrl->notification_enabled) {
pcie_disable_notification(ctrl);
@@ -776,32 +810,29 @@ static void pcie_shutdown_notification(struct controller *ctrl)
static int pcie_init_slot(struct controller *ctrl)
{
+ struct pci_bus *subordinate = ctrl_dev(ctrl)->subordinate;
struct slot *slot;
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
- slot->wq = alloc_ordered_workqueue("pciehp-%u", 0, PSN(ctrl));
- if (!slot->wq)
- goto abort;
+ down_read(&pci_bus_sem);
+ slot->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
+ up_read(&pci_bus_sem);
slot->ctrl = ctrl;
mutex_init(&slot->lock);
- mutex_init(&slot->hotplug_lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
ctrl->slot = slot;
return 0;
-abort:
- kfree(slot);
- return -ENOMEM;
}
static void pcie_cleanup_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
- cancel_delayed_work(&slot->work);
- destroy_workqueue(slot->wq);
+
+ cancel_delayed_work_sync(&slot->work);
kfree(slot);
}
@@ -826,6 +857,7 @@ struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
u32 slot_cap, link_cap;
+ u8 occupied, poweron;
struct pci_dev *pdev = dev->port;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@@ -847,6 +879,8 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
+ init_rwsem(&ctrl->reset_lock);
+ init_waitqueue_head(&ctrl->requester);
init_waitqueue_head(&ctrl->queue);
dbg_ctrl(ctrl);
@@ -855,16 +889,11 @@ struct controller *pcie_init(struct pcie_device *dev)
if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
ctrl->link_active_reporting = 1;
- /*
- * Clear all remaining event bits in Slot Status register except
- * Presence Detect Changed. We want to make sure possible
- * hotplug event is triggered when the interrupt is unmasked so
- * that we don't lose that event.
- */
+ /* Clear all remaining event bits in Slot Status register. */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
- PCI_EXP_SLTSTA_DLLSC);
+ PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
@@ -883,6 +912,19 @@ struct controller *pcie_init(struct pcie_device *dev)
if (pcie_init_slot(ctrl))
goto abort_ctrl;
+ /*
+ * If empty slot's power status is on, turn power off. The IRQ isn't
+ * requested yet, so avoid triggering a notification with this command.
+ */
+ if (POWER_CTRL(ctrl)) {
+ pciehp_get_adapter_status(ctrl->slot, &occupied);
+ pciehp_get_power_status(ctrl->slot, &poweron);
+ if (!occupied && poweron) {
+ pcie_disable_notification(ctrl);
+ pciehp_power_off_slot(ctrl->slot);
+ }
+ }
+
return ctrl;
abort_ctrl:
@@ -893,7 +935,6 @@ abort:
void pciehp_release_ctrl(struct controller *ctrl)
{
- pcie_shutdown_notification(ctrl);
pcie_cleanup_slot(ctrl);
kfree(ctrl);
}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 3f518dea856d..5c58c22e0c08 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -62,9 +62,8 @@ int pciehp_configure_device(struct slot *p_slot)
return ret;
}
-int pciehp_unconfigure_device(struct slot *p_slot)
+void pciehp_unconfigure_device(struct slot *p_slot)
{
- int rc = 0;
u8 presence = 0;
struct pci_dev *dev, *temp;
struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
@@ -107,5 +106,4 @@ int pciehp_unconfigure_device(struct slot *p_slot)
}
pci_unlock_rescan_remove();
- return rc;
}
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
deleted file mode 100644
index c19694a04d2c..000000000000
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ /dev/null
@@ -1,348 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * PCI Hot Plug Controller Skeleton Driver - 0.3
- *
- * Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
- * Copyright (C) 2001,2003 IBM Corp.
- *
- * All rights reserved.
- *
- * This driver is to be used as a skeleton driver to show how to interface
- * with the pci hotplug core easily.
- *
- * Send feedback to <greg@kroah.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/pci_hotplug.h>
-#include <linux/init.h>
-
-#define SLOT_NAME_SIZE 10
-struct slot {
- u8 number;
- struct hotplug_slot *hotplug_slot;
- struct list_head slot_list;
- char name[SLOT_NAME_SIZE];
-};
-
-static LIST_HEAD(slot_list);
-
-#define MY_NAME "pcihp_skeleton"
-
-#define dbg(format, arg...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG "%s: " format "\n", \
- MY_NAME, ## arg); \
- } while (0)
-#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME, ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME, ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME, ## arg)
-
-/* local variables */
-static bool debug;
-static int num_slots;
-
-#define DRIVER_VERSION "0.3"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
-#define DRIVER_DESC "Hot Plug PCI Controller Skeleton Driver"
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-module_param(debug, bool, 0644);
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-
-static int enable_slot(struct hotplug_slot *slot);
-static int disable_slot(struct hotplug_slot *slot);
-static int set_attention_status(struct hotplug_slot *slot, u8 value);
-static int hardware_test(struct hotplug_slot *slot, u32 value);
-static int get_power_status(struct hotplug_slot *slot, u8 *value);
-static int get_attention_status(struct hotplug_slot *slot, u8 *value);
-static int get_latch_status(struct hotplug_slot *slot, u8 *value);
-static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-
-static struct hotplug_slot_ops skel_hotplug_slot_ops = {
- .enable_slot = enable_slot,
- .disable_slot = disable_slot,
- .set_attention_status = set_attention_status,
- .hardware_test = hardware_test,
- .get_power_status = get_power_status,
- .get_attention_status = get_attention_status,
- .get_latch_status = get_latch_status,
- .get_adapter_status = get_adapter_status,
-};
-
-static int enable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in code here to enable the specified slot
- */
-
- return retval;
-}
-
-static int disable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in code here to disable the specified slot
- */
-
- return retval;
-}
-
-static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- switch (status) {
- case 0:
- /*
- * Fill in code here to turn light off
- */
- break;
-
- case 1:
- default:
- /*
- * Fill in code here to turn light on
- */
- break;
- }
-
- return retval;
-}
-
-static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- switch (value) {
- case 0:
- /* Specify a test here */
- break;
- case 1:
- /* Specify another test here */
- break;
- }
-
- return retval;
-}
-
-static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in logic to get the current power status of the specific
- * slot and store it in the *value location.
- */
-
- return retval;
-}
-
-static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in logic to get the current attention status of the specific
- * slot and store it in the *value location.
- */
-
- return retval;
-}
-
-static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in logic to get the current latch status of the specific
- * slot and store it in the *value location.
- */
-
- return retval;
-}
-
-static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- int retval = 0;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
-
- /*
- * Fill in logic to get the current adapter status of the specific
- * slot and store it in the *value location.
- */
-
- return retval;
-}
-
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
-static void make_slot_name(struct slot *slot)
-{
- /*
- * Stupid way to make a filename out of the slot name.
- * replace this if your hardware provides a better way to name slots.
- */
- snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d", slot->number);
-}
-
-/**
- * init_slots - initialize 'struct slot' structures for each slot
- *
- */
-static int __init init_slots(void)
-{
- struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
- int retval;
- int i;
-
- /*
- * Create a structure for each slot, and register that slot
- * with the pci_hotplug subsystem.
- */
- for (i = 0; i < num_slots; ++i) {
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot) {
- retval = -ENOMEM;
- goto error;
- }
-
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot) {
- retval = -ENOMEM;
- goto error_slot;
- }
- slot->hotplug_slot = hotplug_slot;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- retval = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot->info = info;
-
- slot->number = i;
-
- hotplug_slot->name = slot->name;
- hotplug_slot->private = slot;
- hotplug_slot->release = &release_slot;
- make_slot_name(slot);
- hotplug_slot->ops = &skel_hotplug_slot_ops;
-
- /*
- * Initialize the slot info structure with some known
- * good values.
- */
- get_power_status(hotplug_slot, &info->power_status);
- get_attention_status(hotplug_slot, &info->attention_status);
- get_latch_status(hotplug_slot, &info->latch_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
-
- dbg("registering slot %d\n", i);
- retval = pci_hp_register(slot->hotplug_slot);
- if (retval) {
- err("pci_hp_register failed with error %d\n", retval);
- goto error_info;
- }
-
- /* add slot to our internal list */
- list_add(&slot->slot_list, &slot_list);
- }
-
- return 0;
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
-error_slot:
- kfree(slot);
-error:
- return retval;
-}
-
-static void __exit cleanup_slots(void)
-{
- struct slot *slot, *next;
-
- /*
- * Unregister all of our slots with the pci_hotplug subsystem.
- * Memory will be freed in release_slot() callback after slot's
- * lifespan is finished.
- */
- list_for_each_entry_safe(slot, next, &slot_list, slot_list) {
- list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
- }
-}
-
-static int __init pcihp_skel_init(void)
-{
- int retval;
-
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
- /*
- * Do specific initialization stuff for your driver here
- * like initializing your controller hardware (if any) and
- * determining the number of slots you have in the system
- * right now.
- */
- num_slots = 5;
-
- return init_slots();
-}
-
-static void __exit pcihp_skel_exit(void)
-{
- /*
- * Clean everything up.
- */
- cleanup_slots();
-}
-
-module_init(pcihp_skel_init);
-module_exit(pcihp_skel_exit);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 6c2e8d7307c6..3276a5e4c430 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -538,9 +538,8 @@ static struct hotplug_slot_ops php_slot_ops = {
.disable_slot = pnv_php_disable_slot,
};
-static void pnv_php_release(struct hotplug_slot *slot)
+static void pnv_php_release(struct pnv_php_slot *php_slot)
{
- struct pnv_php_slot *php_slot = slot->private;
unsigned long flags;
/* Remove from global or child list */
@@ -596,7 +595,6 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
php_slot->power_state_check = false;
php_slot->slot.ops = &php_slot_ops;
php_slot->slot.info = &php_slot->slot_info;
- php_slot->slot.release = pnv_php_release;
php_slot->slot.private = php_slot;
INIT_LIST_HEAD(&php_slot->children);
@@ -924,6 +922,7 @@ static void pnv_php_unregister_one(struct device_node *dn)
php_slot->state = PNV_PHP_STATE_OFFLINE;
pci_hp_deregister(&php_slot->slot);
+ pnv_php_release(php_slot);
pnv_php_put_slot(php_slot);
}
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index fb5e0845429d..857c358b727b 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -404,13 +404,13 @@ static void __exit cleanup_slots(void)
/*
* Unregister all of our slots with the pci_hotplug subsystem,
* and free up all memory that we had allocated.
- * memory will be freed in release_slot callback.
*/
list_for_each_entry_safe(slot, next, &rpaphp_slot_head,
rpaphp_slot_list) {
list_del(&slot->rpaphp_slot_list);
pci_hp_deregister(slot->hotplug_slot);
+ dealloc_slot_struct(slot);
}
return;
}
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 3840a2075e6a..b916c8e4372d 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -19,12 +19,6 @@
#include "rpaphp.h"
/* free up the memory used by a slot */
-static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = (struct slot *) hotplug_slot->private;
- dealloc_slot_struct(slot);
-}
-
void dealloc_slot_struct(struct slot *slot)
{
kfree(slot->hotplug_slot->info);
@@ -56,7 +50,6 @@ struct slot *alloc_slot_struct(struct device_node *dn,
slot->power_domain = power_domain;
slot->hotplug_slot->private = slot;
slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
- slot->hotplug_slot->release = &rpaphp_release_slot;
return (slot);
@@ -90,10 +83,8 @@ int rpaphp_deregister_slot(struct slot *slot)
__func__, slot->name);
list_del(&slot->rpaphp_slot_list);
-
- retval = pci_hp_deregister(php_slot);
- if (retval)
- err("Problem unregistering a slot %s\n", slot->name);
+ pci_hp_deregister(php_slot);
+ dealloc_slot_struct(slot);
dbg("%s - Exit: rc[%d]\n", __func__, retval);
return retval;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index ffdc2977395d..93b5341d282c 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -130,15 +130,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
static struct hotplug_slot_ops s390_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -175,7 +166,6 @@ int zpci_init_slot(struct zpci_dev *zdev)
hotplug_slot->info = info;
hotplug_slot->ops = &s390_hotplug_slot_ops;
- hotplug_slot->release = &release_slot;
get_power_status(hotplug_slot, &info->power_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
@@ -209,5 +199,8 @@ void zpci_exit_slot(struct zpci_dev *zdev)
continue;
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
}
}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 78b6bdbb3a39..babd23409f61 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -628,7 +628,6 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
goto alloc_err;
}
bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
- bss_hotplug_slot->release = &sn_release_slot;
rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
if (rc)
@@ -656,8 +655,10 @@ alloc_err:
sn_release_slot(bss_hotplug_slot);
/* destroy anything else on the list */
- while ((bss_hotplug_slot = sn_hp_destroy()))
+ while ((bss_hotplug_slot = sn_hp_destroy())) {
pci_hp_deregister(bss_hotplug_slot);
+ sn_release_slot(bss_hotplug_slot);
+ }
return rc;
}
@@ -703,8 +704,10 @@ static void __exit sn_pci_hotplug_exit(void)
{
struct hotplug_slot *bss_hotplug_slot;
- while ((bss_hotplug_slot = sn_hp_destroy()))
+ while ((bss_hotplug_slot = sn_hp_destroy())) {
pci_hp_deregister(bss_hotplug_slot);
+ sn_release_slot(bss_hotplug_slot);
+ }
if (!list_empty(&sn_hp_list))
printk(KERN_ERR "%s: internal list is not empty\n", __FILE__);
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index e91be287f292..97cee23f3d51 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -61,22 +61,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.get_adapter_status = get_adapter_status,
};
-/**
- * release_slot - free up the memory used by a slot
- * @hotplug_slot: slot to free
- */
-static void release_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
- kfree(slot);
-}
-
static int init_slots(struct controller *ctrl)
{
struct slot *slot;
@@ -125,7 +109,6 @@ static int init_slots(struct controller *ctrl)
/* register this slot with the hotplug pci core */
hotplug_slot->private = slot;
- hotplug_slot->release = &release_slot;
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
@@ -171,6 +154,9 @@ void cleanup_slots(struct controller *ctrl)
cancel_delayed_work(&slot->work);
destroy_workqueue(slot->wq);
pci_hp_deregister(slot->hotplug_slot);
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
}
}
@@ -270,11 +256,30 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
+static bool shpc_capable(struct pci_dev *bridge)
+{
+ /*
+ * It is assumed that AMD GOLAM chips support SHPC but they do not
+ * have SHPC capability.
+ */
+ if (bridge->vendor == PCI_VENDOR_ID_AMD &&
+ bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
+ return true;
+
+ if (pci_find_capability(bridge, PCI_CAP_ID_SHPC))
+ return true;
+
+ return false;
+}
+
static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
struct controller *ctrl;
+ if (!shpc_capable(pdev))
+ return -ENODEV;
+
if (acpi_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
@@ -303,6 +308,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_cleanup_slots;
+ pdev->shpc_managed = 1;
return 0;
err_cleanup_slots:
@@ -319,6 +325,7 @@ static void shpc_remove(struct pci_dev *dev)
{
struct controller *ctrl = pci_get_drvdata(dev);
+ dev->shpc_managed = 0;
shpchp_remove_ctrl_files(ctrl);
ctrl->hpc_ops->release_ctlr(ctrl);
kfree(ctrl);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 1047b56e5730..1267dcc5a531 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -654,6 +654,7 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
switch (p_slot->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&p_slot->work);
+ /* fall through */
case STATIC_STATE:
p_slot->state = POWERON_STATE;
mutex_unlock(&p_slot->lock);
@@ -689,6 +690,7 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot)
switch (p_slot->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&p_slot->work);
+ /* fall through */
case STATIC_STATE:
p_slot->state = POWEROFF_STATE;
mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 0f04ae648cf1..c5f3cd4ed766 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -818,15 +818,15 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{
if (!dev->is_physfn)
return -ENOSYS;
+
if (numvfs > dev->sriov->total_VFs)
return -EINVAL;
/* Shouldn't change if VFs already enabled */
if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
return -EBUSY;
- else
- dev->sriov->driver_max_VFs = numvfs;
+ dev->sriov->driver_max_VFs = numvfs;
return 0;
}
EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index 2a808e10645f..a1de501a2729 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -86,13 +86,17 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
va_list ap;
int ret;
char *devname;
+ unsigned long irqflags = IRQF_SHARED;
+
+ if (!handler)
+ irqflags |= IRQF_ONESHOT;
va_start(ap, fmt);
devname = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
- IRQF_SHARED, devname, dev_id);
+ irqflags, devname, dev_id);
if (ret)
kfree(devname);
return ret;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 4d88afdfc843..f2ef896464b3 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1446,6 +1446,9 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
info->flags |= MSI_FLAG_MUST_REACTIVATE;
+ /* PCI-MSI is oneshot-safe */
+ info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
+
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
return NULL;
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 69a60d6ebd73..1836b8ddf292 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -266,7 +266,7 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
struct list_head *resources, resource_size_t *io_base)
{
struct device_node *dev_node = dev->of_node;
- struct resource *res;
+ struct resource *res, tmp_res;
struct resource *bus_range;
struct of_pci_range range;
struct of_pci_range_parser parser;
@@ -320,18 +320,16 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
- res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
+ err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
+ if (err)
+ continue;
+
+ res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
goto failed;
}
- err = of_pci_range_to_resource(&range, dev_node, res);
- if (err) {
- devm_kfree(dev, res);
- continue;
- }
-
if (resource_type(res) == IORESOURCE_IO) {
if (!io_base) {
dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 89ee6a2b6eb8..738e3546abb1 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -403,24 +403,7 @@ bool pciehp_is_native(struct pci_dev *bridge)
*/
bool shpchp_is_native(struct pci_dev *bridge)
{
- const struct pci_host_bridge *host;
-
- if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
- return false;
-
- /*
- * It is assumed that AMD GOLAM chips support SHPC but they do not
- * have SHPC capability.
- */
- if (bridge->vendor == PCI_VENDOR_ID_AMD &&
- bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
- return true;
-
- if (!pci_find_capability(bridge, PCI_CAP_ID_SHPC))
- return false;
-
- host = pci_find_host_bridge(bridge->bus);
- return host->native_shpc_hotplug;
+ return bridge->shpc_managed;
}
/**
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 6792292b5fc7..bef17c3fca67 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1668,7 +1668,7 @@ static int __init pci_driver_init(void)
if (ret)
return ret;
#endif
-
+ dma_debug_add_bus(&pci_bus_type);
return 0;
}
postcore_initcall(pci_driver_init);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 0c4653c1d2ce..9ecfe13157c0 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/security.h>
-#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/pm_runtime.h>
@@ -1449,7 +1448,9 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
if (val != 1)
return -EINVAL;
+ pm_runtime_get_sync(dev);
result = pci_reset_function(pdev);
+ pm_runtime_put(dev);
if (result < 0)
return result;
@@ -1746,6 +1747,9 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#endif
&pci_bridge_attr_group,
&pcie_dev_attr_group,
+#ifdef CONFIG_PCIEAER
+ &aer_stats_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 316496e99da9..29ff9619b5fa 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/logic_pio.h>
-#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
#include <linux/device.h>
@@ -115,6 +114,9 @@ static bool pcie_ari_disabled;
/* If set, the PCIe ATS capability will not be used. */
static bool pcie_ats_disabled;
+/* If set, the PCI config space of each device is printed during boot. */
+bool pci_early_dump;
+
bool pci_ats_disabled(void)
{
return pcie_ats_disabled;
@@ -191,6 +193,168 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
#endif
+/**
+ * pci_dev_str_match_path - test if a path string matches a device
+ * @dev: the PCI device to test
+ * @p: string to match the device against
+ * @endptr: pointer to the string after the match
+ *
+ * Test if a string (typically from a kernel parameter) formatted as a
+ * path of device/function addresses matches a PCI device. The string must
+ * be of the form:
+ *
+ * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
+ *
+ * A path for a device can be obtained using 'lspci -t'. Using a path
+ * is more robust against bus renumbering than using only a single bus,
+ * device and function address.
+ *
+ * Returns 1 if the string matches the device, 0 if it does not and
+ * a negative error code if it fails to parse the string.
+ */
+static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
+ const char **endptr)
+{
+ int ret;
+ int seg, bus, slot, func;
+ char *wpath, *p;
+ char end;
+
+ *endptr = strchrnul(path, ';');
+
+ wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
+ if (!wpath)
+ return -ENOMEM;
+
+ while (1) {
+ p = strrchr(wpath, '/');
+ if (!p)
+ break;
+ ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto free_and_exit;
+ }
+
+ if (dev->devfn != PCI_DEVFN(slot, func)) {
+ ret = 0;
+ goto free_and_exit;
+ }
+
+ /*
+ * Note: we don't need to get a reference to the upstream
+ * bridge because we hold a reference to the top level
+ * device which should hold a reference to the bridge,
+ * and so on.
+ */
+ dev = pci_upstream_bridge(dev);
+ if (!dev) {
+ ret = 0;
+ goto free_and_exit;
+ }
+
+ *p = 0;
+ }
+
+ ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
+ &func, &end);
+ if (ret != 4) {
+ seg = 0;
+ ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto free_and_exit;
+ }
+ }
+
+ ret = (seg == pci_domain_nr(dev->bus) &&
+ bus == dev->bus->number &&
+ dev->devfn == PCI_DEVFN(slot, func));
+
+free_and_exit:
+ kfree(wpath);
+ return ret;
+}
+
+/**
+ * pci_dev_str_match - test if a string matches a device
+ * @dev: the PCI device to test
+ * @p: string to match the device against
+ * @endptr: pointer to the string after the match
+ *
+ * Test if a string (typically from a kernel parameter) matches a specified
+ * PCI device. The string may be of one of the following formats:
+ *
+ * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
+ * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
+ *
+ * The first format specifies a PCI bus/device/function address which
+ * may change if new hardware is inserted, if motherboard firmware changes,
+ * or due to changes caused in kernel parameters. If the domain is
+ * left unspecified, it is taken to be 0. In order to be robust against
+ * bus renumbering issues, a path of PCI device/function numbers may be used
+ * to address the specific device. The path for a device can be determined
+ * through the use of 'lspci -t'.
+ *
+ * The second format matches devices using IDs in the configuration
+ * space which may match multiple devices in the system. A value of 0
+ * for any field will match all devices. (Note: this differs from
+ * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
+ * legacy reasons and convenience so users don't have to specify
+ * FFFFFFFFs on the command line.)
+ *
+ * Returns 1 if the string matches the device, 0 if it does not and
+ * a negative error code if the string cannot be parsed.
+ */
+static int pci_dev_str_match(struct pci_dev *dev, const char *p,
+ const char **endptr)
+{
+ int ret;
+ int count;
+ unsigned short vendor, device, subsystem_vendor, subsystem_device;
+
+ if (strncmp(p, "pci:", 4) == 0) {
+ /* PCI vendor/device (subvendor/subdevice) IDs are specified */
+ p += 4;
+ ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
+ &subsystem_vendor, &subsystem_device, &count);
+ if (ret != 4) {
+ ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
+ if (ret != 2)
+ return -EINVAL;
+
+ subsystem_vendor = 0;
+ subsystem_device = 0;
+ }
+
+ p += count;
+
+ if ((!vendor || vendor == dev->vendor) &&
+ (!device || device == dev->device) &&
+ (!subsystem_vendor ||
+ subsystem_vendor == dev->subsystem_vendor) &&
+ (!subsystem_device ||
+ subsystem_device == dev->subsystem_device))
+ goto found;
+ } else {
+ /*
+ * PCI Bus, Device, Function IDs are specified
+ * (optionally, may include a path of devfns following it)
+ */
+ ret = pci_dev_str_match_path(dev, p, &p);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ goto found;
+ }
+
+ *endptr = p;
+ return 0;
+
+found:
+ *endptr = p;
+ return 1;
+}
static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap, int *ttl)
@@ -1171,6 +1335,33 @@ static void pci_restore_config_space(struct pci_dev *pdev)
}
}
+static void pci_restore_rebar_state(struct pci_dev *pdev)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ if (!pos)
+ return;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ struct resource *res;
+ int bar_idx, size;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
+ res = pdev->resource + bar_idx;
+ size = order_base_2((resource_size(res) >> 20) | 1) - 1;
+ ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
+ ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
+ pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
+ }
+}
+
/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
@@ -1186,6 +1377,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_pri_state(dev);
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
+ pci_restore_rebar_state(dev);
pci_cleanup_aer_error_status_regs(dev);
@@ -2045,6 +2237,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
case PCI_D2:
if (pci_no_d1d2(dev))
break;
+ /* else: fall through */
default:
target_state = state;
}
@@ -2290,7 +2483,7 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
* @bridge: Bridge to check
*
* This function checks if it is possible to move the bridge to D3.
- * Currently we only allow D3 for recent enough PCIe ports.
+ * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@@ -2305,18 +2498,27 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * Hotplug interrupts cannot be delivered if the link is down,
- * so parents of a hotplug port must stay awake. In addition,
- * hotplug ports handled by firmware in System Management Mode
+ * Hotplug ports handled by firmware in System Management Mode
* may not be put into D3 by the OS (Thunderbolt on non-Macs).
- * For simplicity, disallow in general for now.
*/
- if (bridge->is_hotplug_bridge)
+ if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
return false;
if (pci_bridge_d3_force)
return true;
+ /* Even the oldest 2010 Thunderbolt controller supports D3. */
+ if (bridge->is_thunderbolt)
+ return true;
+
+ /*
+ * Hotplug ports handled natively by the OS were not validated
+ * by vendors for runtime D3 at least until 2018 because there
+ * was no OS support.
+ */
+ if (bridge->is_hotplug_bridge)
+ return false;
+
/*
* It should be safe to put PCIe ports from 2015 or newer
* to D3.
@@ -2820,6 +3022,66 @@ void pci_request_acs(void)
pci_acs_enable = 1;
}
+static const char *disable_acs_redir_param;
+
+/**
+ * pci_disable_acs_redir - disable ACS redirect capabilities
+ * @dev: the PCI device
+ *
+ * For only devices specified in the disable_acs_redir parameter.
+ */
+static void pci_disable_acs_redir(struct pci_dev *dev)
+{
+ int ret = 0;
+ const char *p;
+ int pos;
+ u16 ctrl;
+
+ if (!disable_acs_redir_param)
+ return;
+
+ p = disable_acs_redir_param;
+ while (*p) {
+ ret = pci_dev_str_match(dev, p, &p);
+ if (ret < 0) {
+ pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
+ disable_acs_redir_param);
+
+ break;
+ } else if (ret == 1) {
+ /* Found a match */
+ break;
+ }
+
+ if (*p != ';' && *p != ',') {
+ /* End of param or invalid format */
+ break;
+ }
+ p++;
+ }
+
+ if (ret != 1)
+ return;
+
+ if (!pci_dev_specific_disable_acs_redir(dev))
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos) {
+ pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
+ return;
+ }
+
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+ /* P2P Request & Completion Redirect */
+ ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+
+ pci_info(dev, "disabled ACS redirect\n");
+}
+
/**
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
* @dev: the PCI device
@@ -2859,12 +3121,22 @@ static void pci_std_enable_acs(struct pci_dev *dev)
void pci_enable_acs(struct pci_dev *dev)
{
if (!pci_acs_enable)
- return;
+ goto disable_acs_redir;
if (!pci_dev_specific_enable_acs(dev))
- return;
+ goto disable_acs_redir;
pci_std_enable_acs(dev);
+
+disable_acs_redir:
+ /*
+ * Note: pci_disable_acs_redir() must be called even if ACS was not
+ * enabled by the kernel because it may have been enabled by
+ * platform firmware. So if we are told to disable it, we should
+ * always disable it after setting the kernel's default
+ * preferences.
+ */
+ pci_disable_acs_redir(dev);
}
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
@@ -3070,7 +3342,7 @@ int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
return pos;
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
- return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
+ return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
}
/**
@@ -3093,7 +3365,7 @@ int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
- ctrl |= size << 8;
+ ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
return 0;
}
@@ -4077,7 +4349,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
* Returns true if the device advertises support for PCIe function level
* resets.
*/
-static bool pcie_has_flr(struct pci_dev *dev)
+bool pcie_has_flr(struct pci_dev *dev)
{
u32 cap;
@@ -4087,6 +4359,7 @@ static bool pcie_has_flr(struct pci_dev *dev)
pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
return cap & PCI_EXP_DEVCAP_FLR;
}
+EXPORT_SYMBOL_GPL(pcie_has_flr);
/**
* pcie_flr - initiate a PCIe function level reset
@@ -4262,19 +4535,18 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
}
/**
- * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
+ * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
* @dev: Bridge device
*
* Use the bridge control register to assert reset on the secondary bus.
* Devices on the secondary bus are left in power-on state.
*/
-int pci_reset_bridge_secondary_bus(struct pci_dev *dev)
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
pcibios_reset_secondary_bus(dev);
return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
}
-EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
@@ -4291,9 +4563,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
if (probe)
return 0;
- pci_reset_bridge_secondary_bus(dev->bus->self);
-
- return 0;
+ return pci_bridge_secondary_bus_reset(dev->bus->self);
}
static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
@@ -4825,7 +5095,7 @@ int pci_probe_reset_slot(struct pci_slot *slot)
EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
/**
- * pci_reset_slot - reset a PCI slot
+ * __pci_reset_slot - Try to reset a PCI slot
* @slot: PCI slot to reset
*
* A PCI bus may host multiple slots, each slot may support a reset mechanism
@@ -4837,33 +5107,9 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
* through this function. PCI config space of all devices in the slot and
* behind the slot is saved before and restored after reset.
*
- * Return 0 on success, non-zero on error.
- */
-int pci_reset_slot(struct pci_slot *slot)
-{
- int rc;
-
- rc = pci_slot_reset(slot, 1);
- if (rc)
- return rc;
-
- pci_slot_save_and_disable(slot);
-
- rc = pci_slot_reset(slot, 0);
-
- pci_slot_restore(slot);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(pci_reset_slot);
-
-/**
- * pci_try_reset_slot - Try to reset a PCI slot
- * @slot: PCI slot to reset
- *
* Same as above except return -EAGAIN if the slot cannot be locked
*/
-int pci_try_reset_slot(struct pci_slot *slot)
+static int __pci_reset_slot(struct pci_slot *slot)
{
int rc;
@@ -4884,10 +5130,11 @@ int pci_try_reset_slot(struct pci_slot *slot)
return rc;
}
-EXPORT_SYMBOL_GPL(pci_try_reset_slot);
static int pci_bus_reset(struct pci_bus *bus, int probe)
{
+ int ret;
+
if (!bus->self || !pci_bus_resetable(bus))
return -ENOTTY;
@@ -4898,11 +5145,11 @@ static int pci_bus_reset(struct pci_bus *bus, int probe)
might_sleep();
- pci_reset_bridge_secondary_bus(bus->self);
+ ret = pci_bridge_secondary_bus_reset(bus->self);
pci_bus_unlock(bus);
- return 0;
+ return ret;
}
/**
@@ -4918,15 +5165,12 @@ int pci_probe_reset_bus(struct pci_bus *bus)
EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
/**
- * pci_reset_bus - reset a PCI bus
+ * __pci_reset_bus - Try to reset a PCI bus
* @bus: top level PCI bus to reset
*
- * Do a bus reset on the given bus and any subordinate buses, saving
- * and restoring state of all devices.
- *
- * Return 0 on success, non-zero on error.
+ * Same as above except return -EAGAIN if the bus cannot be locked
*/
-int pci_reset_bus(struct pci_bus *bus)
+static int __pci_reset_bus(struct pci_bus *bus)
{
int rc;
@@ -4936,42 +5180,30 @@ int pci_reset_bus(struct pci_bus *bus)
pci_bus_save_and_disable(bus);
- rc = pci_bus_reset(bus, 0);
+ if (pci_bus_trylock(bus)) {
+ might_sleep();
+ rc = pci_bridge_secondary_bus_reset(bus->self);
+ pci_bus_unlock(bus);
+ } else
+ rc = -EAGAIN;
pci_bus_restore(bus);
return rc;
}
-EXPORT_SYMBOL_GPL(pci_reset_bus);
/**
- * pci_try_reset_bus - Try to reset a PCI bus
- * @bus: top level PCI bus to reset
+ * pci_reset_bus - Try to reset a PCI bus
+ * @pdev: top level PCI device to reset via slot/bus
*
* Same as above except return -EAGAIN if the bus cannot be locked
*/
-int pci_try_reset_bus(struct pci_bus *bus)
+int pci_reset_bus(struct pci_dev *pdev)
{
- int rc;
-
- rc = pci_bus_reset(bus, 1);
- if (rc)
- return rc;
-
- pci_bus_save_and_disable(bus);
-
- if (pci_bus_trylock(bus)) {
- might_sleep();
- pci_reset_bridge_secondary_bus(bus->self);
- pci_bus_unlock(bus);
- } else
- rc = -EAGAIN;
-
- pci_bus_restore(bus);
-
- return rc;
+ return pci_probe_reset_slot(pdev->slot) ?
+ __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
}
-EXPORT_SYMBOL_GPL(pci_try_reset_bus);
+EXPORT_SYMBOL_GPL(pci_reset_bus);
/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
@@ -5260,6 +5492,7 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
return PCI_SPEED_UNKNOWN;
}
+EXPORT_SYMBOL(pcie_get_speed_cap);
/**
* pcie_get_width_cap - query for the PCI device's link width capability
@@ -5278,6 +5511,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
return PCIE_LNK_WIDTH_UNKNOWN;
}
+EXPORT_SYMBOL(pcie_get_width_cap);
/**
* pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
@@ -5302,14 +5536,16 @@ u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
}
/**
- * pcie_print_link_status - Report the PCI device's link speed and width
+ * __pcie_print_link_status - Report the PCI device's link speed and width
* @dev: PCI device to query
+ * @verbose: Print info even when enough bandwidth is available
*
- * Report the available bandwidth at the device. If this is less than the
- * device is capable of, report the device's maximum possible bandwidth and
- * the upstream link that limits its performance to less than that.
+ * If the available bandwidth at the device is less than the device is
+ * capable of, report the device's maximum possible bandwidth and the
+ * upstream link that limits its performance. If @verbose, always print
+ * the available bandwidth, even if the device isn't constrained.
*/
-void pcie_print_link_status(struct pci_dev *dev)
+void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
{
enum pcie_link_width width, width_cap;
enum pci_bus_speed speed, speed_cap;
@@ -5319,11 +5555,11 @@ void pcie_print_link_status(struct pci_dev *dev)
bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
- if (bw_avail >= bw_cap)
+ if (bw_avail >= bw_cap && verbose)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
bw_cap / 1000, bw_cap % 1000,
PCIE_SPEED2STR(speed_cap), width_cap);
- else
+ else if (bw_avail < bw_cap)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
bw_avail / 1000, bw_avail % 1000,
PCIE_SPEED2STR(speed), width,
@@ -5331,6 +5567,17 @@ void pcie_print_link_status(struct pci_dev *dev)
bw_cap / 1000, bw_cap % 1000,
PCIE_SPEED2STR(speed_cap), width_cap);
}
+
+/**
+ * pcie_print_link_status - Report the PCI device's link speed and width
+ * @dev: PCI device to query
+ *
+ * Report the available bandwidth at the device.
+ */
+void pcie_print_link_status(struct pci_dev *dev)
+{
+ __pcie_print_link_status(dev, true);
+}
EXPORT_SYMBOL(pcie_print_link_status);
/**
@@ -5425,8 +5672,19 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
* @dev: the PCI device for which alias is added
* @devfn: alias slot and function
*
- * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
- * It should be called early, preferably as PCI fixup header quirk.
+ * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
+ * which is used to program permissible bus-devfn source addresses for DMA
+ * requests in an IOMMU. These aliases factor into IOMMU group creation
+ * and are useful for devices generating DMA requests beyond or different
+ * from their logical bus-devfn. Examples include device quirks where the
+ * device simply uses the wrong devfn, as well as non-transparent bridges
+ * where the alias may be a proxy for devices in another domain.
+ *
+ * IOMMU group creation is performed during device discovery or addition,
+ * prior to any potential DMA mapping and therefore prior to driver probing
+ * (especially for userspace assigned devices where IOMMU group definition
+ * cannot be left as a userspace activity). DMA aliases should therefore
+ * be configured via quirks, such as the PCI fixup header quirk.
*/
void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
{
@@ -5492,10 +5750,10 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
bool *resize)
{
- int seg, bus, slot, func, align_order, count;
- unsigned short vendor, device, subsystem_vendor, subsystem_device;
+ int align_order, count;
resource_size_t align = pcibios_default_alignment();
- char *p;
+ const char *p;
+ int ret;
spin_lock(&resource_alignment_lock);
p = resource_alignment_param;
@@ -5515,58 +5773,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
} else {
align_order = -1;
}
- if (strncmp(p, "pci:", 4) == 0) {
- /* PCI vendor/device (subvendor/subdevice) ids are specified */
- p += 4;
- if (sscanf(p, "%hx:%hx:%hx:%hx%n",
- &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
- if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
- printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
- p);
- break;
- }
- subsystem_vendor = subsystem_device = 0;
- }
- p += count;
- if ((!vendor || (vendor == dev->vendor)) &&
- (!device || (device == dev->device)) &&
- (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
- (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
- *resize = true;
- if (align_order == -1)
- align = PAGE_SIZE;
- else
- align = 1 << align_order;
- /* Found */
- break;
- }
- }
- else {
- if (sscanf(p, "%x:%x:%x.%x%n",
- &seg, &bus, &slot, &func, &count) != 4) {
- seg = 0;
- if (sscanf(p, "%x:%x.%x%n",
- &bus, &slot, &func, &count) != 3) {
- /* Invalid format */
- printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
- p);
- break;
- }
- }
- p += count;
- if (seg == pci_domain_nr(dev->bus) &&
- bus == dev->bus->number &&
- slot == PCI_SLOT(dev->devfn) &&
- func == PCI_FUNC(dev->devfn)) {
- *resize = true;
- if (align_order == -1)
- align = PAGE_SIZE;
- else
- align = 1 << align_order;
- /* Found */
- break;
- }
+
+ ret = pci_dev_str_match(dev, p, &p);
+ if (ret == 1) {
+ *resize = true;
+ if (align_order == -1)
+ align = PAGE_SIZE;
+ else
+ align = 1 << align_order;
+ break;
+ } else if (ret < 0) {
+ pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
+ p);
+ break;
}
+
if (*p != ';' && *p != ',') {
/* End of param or invalid format */
break;
@@ -5843,6 +6064,8 @@ static int __init pci_setup(char *str)
pcie_ats_disabled = true;
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
+ } else if (!strcmp(str, "earlydump")) {
+ pci_early_dump = true;
} else if (!strncmp(str, "realloc=", 8)) {
pci_realloc_get_opt(str + 8);
} else if (!strncmp(str, "realloc", 7)) {
@@ -5879,6 +6102,8 @@ static int __init pci_setup(char *str)
pcie_bus_config = PCIE_BUS_PEER2PEER;
} else if (!strncmp(str, "pcie_scan_all", 13)) {
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
+ } else if (!strncmp(str, "disable_acs_redir=", 18)) {
+ disable_acs_redir_param = str + 18;
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 08817253c8a2..6e0d1528d471 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -7,6 +7,7 @@
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
extern const unsigned char pcie_link_speed[];
+extern bool pci_early_dump;
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
@@ -33,6 +34,7 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api);
int pci_probe_reset_function(struct pci_dev *dev);
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
@@ -225,6 +227,10 @@ enum pci_bar_type {
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int crs_timeout);
+bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
+ int crs_timeout);
+int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout);
+
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
@@ -259,6 +265,7 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
+void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
/* Single Root I/O Virtualization */
struct pci_sriov {
@@ -311,6 +318,34 @@ static inline bool pci_dev_is_added(const struct pci_dev *dev)
return test_bit(PCI_DEV_ADDED, &dev->priv_flags);
}
+#ifdef CONFIG_PCIEAER
+#include <linux/aer.h>
+
+#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
+
+struct aer_err_info {
+ struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int error_dev_num;
+
+ unsigned int id:16;
+
+ unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
+ unsigned int __pad1:5;
+ unsigned int multi_error_valid:1;
+
+ unsigned int first_error:5;
+ unsigned int __pad2:2;
+ unsigned int tlp_header_valid:1;
+
+ unsigned int status; /* COR/UNCOR Error Status */
+ unsigned int mask; /* COR/UNCOR Error Mask */
+ struct aer_header_log_regs tlp; /* TLP Header */
+};
+
+int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
+void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+#endif /* CONFIG_PCIEAER */
+
#ifdef CONFIG_PCI_ATS
void pci_restore_ats_state(struct pci_dev *dev);
#else
@@ -367,6 +402,25 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
}
void pci_enable_acs(struct pci_dev *dev);
+#ifdef CONFIG_PCI_QUIRKS
+int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
+int pci_dev_specific_enable_acs(struct pci_dev *dev);
+int pci_dev_specific_disable_acs_redir(struct pci_dev *dev);
+#else
+static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
+ u16 acs_flags)
+{
+ return -ENOTTY;
+}
+static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
+{
+ return -ENOTTY;
+}
+static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+{
+ return -ENOTTY;
+}
+#endif
/* PCI error reporting and recovery */
void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
@@ -467,4 +521,19 @@ static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
}
#endif
+#ifdef CONFIG_PCIEAER
+void pci_no_aer(void);
+void pci_aer_init(struct pci_dev *dev);
+void pci_aer_exit(struct pci_dev *dev);
+extern const struct attribute_group aer_stats_attr_group;
+void pci_aer_clear_fatal_status(struct pci_dev *dev);
+void pci_aer_clear_device_status(struct pci_dev *dev);
+#else
+static inline void pci_no_aer(void) { }
+static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
+static inline void pci_aer_exit(struct pci_dev *d) { }
+static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
+static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index a2e88386af28..83180edd6ed4 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -31,26 +31,9 @@
#include "portdrv.h"
#define AER_ERROR_SOURCES_MAX 100
-#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
-struct aer_err_info {
- struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
- int error_dev_num;
-
- unsigned int id:16;
-
- unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
- unsigned int __pad1:5;
- unsigned int multi_error_valid:1;
-
- unsigned int first_error:5;
- unsigned int __pad2:2;
- unsigned int tlp_header_valid:1;
-
- unsigned int status; /* COR/UNCOR Error Status */
- unsigned int mask; /* COR/UNCOR Error Mask */
- struct aer_header_log_regs tlp; /* TLP Header */
-};
+#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
+#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
unsigned int status;
@@ -76,6 +59,42 @@ struct aer_rpc {
*/
};
+/* AER stats for the device */
+struct aer_stats {
+
+ /*
+ * Fields for all AER capable devices. They indicate the errors
+ * "as seen by this device". Note that this may mean that if an
+ * end point is causing problems, the AER counters may increment
+ * at its link partner (e.g. root port) because the errors will be
+ * "seen" by the link partner and not the the problematic end point
+ * itself (which may report all counters as 0 as it never saw any
+ * problems).
+ */
+ /* Counters for different type of correctable errors */
+ u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS];
+ /* Counters for different type of fatal uncorrectable errors */
+ u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
+ /* Counters for different type of nonfatal uncorrectable errors */
+ u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
+ /* Total number of ERR_COR sent by this device */
+ u64 dev_total_cor_errs;
+ /* Total number of ERR_FATAL sent by this device */
+ u64 dev_total_fatal_errs;
+ /* Total number of ERR_NONFATAL sent by this device */
+ u64 dev_total_nonfatal_errs;
+
+ /*
+ * Fields for Root ports & root complex event collectors only, these
+ * indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
+ * messages received by the root port / event collector, INCLUDING the
+ * ones that are generated internally (by the rootport itself)
+ */
+ u64 rootport_total_cor_errs;
+ u64 rootport_total_fatal_errs;
+ u64 rootport_total_nonfatal_errs;
+};
+
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
PCI_ERR_UNC_ECRC| \
PCI_ERR_UNC_UNSUP| \
@@ -303,12 +322,13 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return 0;
+ if (pcie_ports_native)
+ return 0;
+
if (!dev->__aer_firmware_first_valid)
aer_set_firmware_first(dev);
return dev->__aer_firmware_first;
}
-#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
- PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
static bool aer_firmware_first;
@@ -323,6 +343,9 @@ bool aer_acpi_firmware_first(void)
.firmware_first = 0,
};
+ if (pcie_ports_native)
+ return false;
+
if (!parsed) {
apei_hest_parse(aer_hest_parse, &info);
aer_firmware_first = info.firmware_first;
@@ -357,16 +380,30 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
+void pci_aer_clear_device_status(struct pci_dev *dev)
+{
+ u16 sta;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
+ pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
+}
+
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
int pos;
- u32 status;
+ u32 status, sev;
pos = dev->aer_cap;
if (!pos)
return -EIO;
+ if (pcie_aer_get_firmware_first(dev))
+ return -EIO;
+
+ /* Clear status bits for ERR_NONFATAL errors only */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
+ status &= ~sev;
if (status)
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
@@ -374,6 +411,26 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+void pci_aer_clear_fatal_status(struct pci_dev *dev)
+{
+ int pos;
+ u32 status, sev;
+
+ pos = dev->aer_cap;
+ if (!pos)
+ return;
+
+ if (pcie_aer_get_firmware_first(dev))
+ return;
+
+ /* Clear status bits for ERR_FATAL errors only */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
+ status &= sev;
+ if (status)
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
{
int pos;
@@ -387,6 +444,9 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
if (!pos)
return -EIO;
+ if (pcie_aer_get_firmware_first(dev))
+ return -EIO;
+
port_type = pci_pcie_type(dev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
@@ -402,10 +462,20 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
-int pci_aer_init(struct pci_dev *dev)
+void pci_aer_init(struct pci_dev *dev)
{
dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- return pci_cleanup_aer_error_status_regs(dev);
+
+ if (dev->aer_cap)
+ dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+
+ pci_cleanup_aer_error_status_regs(dev);
+}
+
+void pci_aer_exit(struct pci_dev *dev)
+{
+ kfree(dev->aer_stats);
+ dev->aer_stats = NULL;
}
#define AER_AGENT_RECEIVER 0
@@ -458,52 +528,52 @@ static const char *aer_error_layer[] = {
"Transaction Layer"
};
-static const char *aer_correctable_error_string[] = {
- "Receiver Error", /* Bit Position 0 */
+static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = {
+ "RxErr", /* Bit Position 0 */
NULL,
NULL,
NULL,
NULL,
NULL,
- "Bad TLP", /* Bit Position 6 */
- "Bad DLLP", /* Bit Position 7 */
- "RELAY_NUM Rollover", /* Bit Position 8 */
+ "BadTLP", /* Bit Position 6 */
+ "BadDLLP", /* Bit Position 7 */
+ "Rollover", /* Bit Position 8 */
NULL,
NULL,
NULL,
- "Replay Timer Timeout", /* Bit Position 12 */
- "Advisory Non-Fatal", /* Bit Position 13 */
- "Corrected Internal Error", /* Bit Position 14 */
- "Header Log Overflow", /* Bit Position 15 */
+ "Timeout", /* Bit Position 12 */
+ "NonFatalErr", /* Bit Position 13 */
+ "CorrIntErr", /* Bit Position 14 */
+ "HeaderOF", /* Bit Position 15 */
};
-static const char *aer_uncorrectable_error_string[] = {
+static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
"Undefined", /* Bit Position 0 */
NULL,
NULL,
NULL,
- "Data Link Protocol", /* Bit Position 4 */
- "Surprise Down Error", /* Bit Position 5 */
+ "DLP", /* Bit Position 4 */
+ "SDES", /* Bit Position 5 */
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
- "Poisoned TLP", /* Bit Position 12 */
- "Flow Control Protocol", /* Bit Position 13 */
- "Completion Timeout", /* Bit Position 14 */
- "Completer Abort", /* Bit Position 15 */
- "Unexpected Completion", /* Bit Position 16 */
- "Receiver Overflow", /* Bit Position 17 */
- "Malformed TLP", /* Bit Position 18 */
+ "TLP", /* Bit Position 12 */
+ "FCP", /* Bit Position 13 */
+ "CmpltTO", /* Bit Position 14 */
+ "CmpltAbrt", /* Bit Position 15 */
+ "UnxCmplt", /* Bit Position 16 */
+ "RxOF", /* Bit Position 17 */
+ "MalfTLP", /* Bit Position 18 */
"ECRC", /* Bit Position 19 */
- "Unsupported Request", /* Bit Position 20 */
- "ACS Violation", /* Bit Position 21 */
- "Uncorrectable Internal Error", /* Bit Position 22 */
- "MC Blocked TLP", /* Bit Position 23 */
- "AtomicOp Egress Blocked", /* Bit Position 24 */
- "TLP Prefix Blocked Error", /* Bit Position 25 */
+ "UnsupReq", /* Bit Position 20 */
+ "ACSViol", /* Bit Position 21 */
+ "UncorrIntErr", /* Bit Position 22 */
+ "BlockedTLP", /* Bit Position 23 */
+ "AtomicOpBlocked", /* Bit Position 24 */
+ "TLPBlockedErr", /* Bit Position 25 */
};
static const char *aer_agent_string[] = {
@@ -513,6 +583,144 @@ static const char *aer_agent_string[] = {
"Transmitter ID"
};
+#define aer_stats_dev_attr(name, stats_array, strings_array, \
+ total_string, total_field) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ unsigned int i; \
+ char *str = buf; \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ u64 *stats = pdev->aer_stats->stats_array; \
+ \
+ for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
+ if (strings_array[i]) \
+ str += sprintf(str, "%s %llu\n", \
+ strings_array[i], stats[i]); \
+ else if (stats[i]) \
+ str += sprintf(str, #stats_array "_bit[%d] %llu\n",\
+ i, stats[i]); \
+ } \
+ str += sprintf(str, "TOTAL_%s %llu\n", total_string, \
+ pdev->aer_stats->total_field); \
+ return str-buf; \
+} \
+static DEVICE_ATTR_RO(name)
+
+aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs,
+ aer_correctable_error_string, "ERR_COR",
+ dev_total_cor_errs);
+aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs,
+ aer_uncorrectable_error_string, "ERR_FATAL",
+ dev_total_fatal_errs);
+aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
+ aer_uncorrectable_error_string, "ERR_NONFATAL",
+ dev_total_nonfatal_errs);
+
+#define aer_stats_rootport_attr(name, field) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ return sprintf(buf, "%llu\n", pdev->aer_stats->field); \
+} \
+static DEVICE_ATTR_RO(name)
+
+aer_stats_rootport_attr(aer_rootport_total_err_cor,
+ rootport_total_cor_errs);
+aer_stats_rootport_attr(aer_rootport_total_err_fatal,
+ rootport_total_fatal_errs);
+aer_stats_rootport_attr(aer_rootport_total_err_nonfatal,
+ rootport_total_nonfatal_errs);
+
+static struct attribute *aer_stats_attrs[] __ro_after_init = {
+ &dev_attr_aer_dev_correctable.attr,
+ &dev_attr_aer_dev_fatal.attr,
+ &dev_attr_aer_dev_nonfatal.attr,
+ &dev_attr_aer_rootport_total_err_cor.attr,
+ &dev_attr_aer_rootport_total_err_fatal.attr,
+ &dev_attr_aer_rootport_total_err_nonfatal.attr,
+ NULL
+};
+
+static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (!pdev->aer_stats)
+ return 0;
+
+ if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
+ a == &dev_attr_aer_rootport_total_err_fatal.attr ||
+ a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group aer_stats_attr_group = {
+ .attrs = aer_stats_attrs,
+ .is_visible = aer_stats_attrs_are_visible,
+};
+
+static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
+ struct aer_err_info *info)
+{
+ int status, i, max = -1;
+ u64 *counter = NULL;
+ struct aer_stats *aer_stats = pdev->aer_stats;
+
+ if (!aer_stats)
+ return;
+
+ switch (info->severity) {
+ case AER_CORRECTABLE:
+ aer_stats->dev_total_cor_errs++;
+ counter = &aer_stats->dev_cor_errs[0];
+ max = AER_MAX_TYPEOF_COR_ERRS;
+ break;
+ case AER_NONFATAL:
+ aer_stats->dev_total_nonfatal_errs++;
+ counter = &aer_stats->dev_nonfatal_errs[0];
+ max = AER_MAX_TYPEOF_UNCOR_ERRS;
+ break;
+ case AER_FATAL:
+ aer_stats->dev_total_fatal_errs++;
+ counter = &aer_stats->dev_fatal_errs[0];
+ max = AER_MAX_TYPEOF_UNCOR_ERRS;
+ break;
+ }
+
+ status = (info->status & ~info->mask);
+ for (i = 0; i < max; i++)
+ if (status & (1 << i))
+ counter[i]++;
+}
+
+static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
+ struct aer_err_source *e_src)
+{
+ struct aer_stats *aer_stats = pdev->aer_stats;
+
+ if (!aer_stats)
+ return;
+
+ if (e_src->status & PCI_ERR_ROOT_COR_RCV)
+ aer_stats->rootport_total_cor_errs++;
+
+ if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
+ if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
+ aer_stats->rootport_total_fatal_errs++;
+ else
+ aer_stats->rootport_total_nonfatal_errs++;
+ }
+}
+
static void __print_tlp_header(struct pci_dev *dev,
struct aer_header_log_regs *t)
{
@@ -545,9 +753,10 @@ static void __aer_print_error(struct pci_dev *dev,
pci_err(dev, " [%2d] Unknown Error Bit%s\n",
i, info->first_error == i ? " (First)" : "");
}
+ pci_dev_aer_stats_incr(dev, info);
}
-static void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
+void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
int layer, agent;
int id = ((dev->bus->number << 8) | dev->devfn);
@@ -799,6 +1008,7 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
+ pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
pcie_do_nonfatal_recovery(dev);
else if (info->severity == AER_FATAL)
@@ -876,7 +1086,7 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
#endif
/**
- * get_device_error_info - read error status from dev and store it to info
+ * aer_get_device_error_info - read error status from dev and store it to info
* @dev: pointer to the device expected to have a error record
* @info: pointer to structure to store the error record
*
@@ -884,7 +1094,7 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
-static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
+int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int pos, temp;
@@ -942,11 +1152,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
/* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info->dev[i], e_info))
aer_print_error(e_info->dev[i], e_info);
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info->dev[i], e_info))
handle_error_source(e_info->dev[i], e_info);
}
}
@@ -962,6 +1172,8 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
struct pci_dev *pdev = rpc->rpd;
struct aer_err_info *e_info = &rpc->e_info;
+ pci_rootport_aer_stats_incr(pdev, e_src);
+
/*
* There is a possibility that both correctable error and
* uncorrectable error being logged. Report correctable error first.
@@ -1305,6 +1517,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
u32 reg32;
int pos;
+ int rc;
pos = dev->aer_cap;
@@ -1313,7 +1526,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- pci_reset_bridge_secondary_bus(dev);
+ rc = pci_bridge_secondary_bus_reset(dev);
pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
@@ -1325,7 +1538,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- return PCI_ERS_RESULT_RECOVERED;
+ return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
/**
@@ -1336,20 +1549,8 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
*/
static void aer_error_resume(struct pci_dev *dev)
{
- int pos;
- u32 status, mask;
- u16 reg16;
-
- /* Clean up Root device status */
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &reg16);
- pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16);
-
- /* Clean AER Root Error Status */
- pos = dev->aer_cap;
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
- status &= ~mask; /* Clear corresponding nonfatal bits */
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ pci_aer_clear_device_status(dev);
+ pci_cleanup_aer_uncorrect_error_status(dev);
}
static struct pcie_port_service_driver aerdriver = {
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index c687c817b47d..5326916715d2 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1127,11 +1127,9 @@ static int pcie_aspm_set_policy(const char *val,
if (aspm_disabled)
return -EPERM;
- for (i = 0; i < ARRAY_SIZE(policy_str); i++)
- if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
- break;
- if (i >= ARRAY_SIZE(policy_str))
- return -EINVAL;
+ i = sysfs_match_string(policy_str, val);
+ if (i < 0)
+ return i;
if (i == aspm_policy)
return 0;
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 921ed979109d..f03279fc87cd 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -6,6 +6,7 @@
* Copyright (C) 2016 Intel Corp.
*/
+#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -16,10 +17,8 @@
struct dpc_dev {
struct pcie_device *dev;
- struct work_struct work;
u16 cap_pos;
bool rp_extensions;
- u32 rp_pio_status;
u8 rp_log_size;
};
@@ -65,19 +64,13 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
return 0;
}
-static void dpc_wait_link_inactive(struct dpc_dev *dpc)
-{
- struct pci_dev *pdev = dpc->dev->port;
-
- pcie_wait_for_link(pdev, false);
-}
-
static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
struct dpc_dev *dpc;
struct pcie_device *pciedev;
struct device *devdpc;
- u16 cap, ctl;
+
+ u16 cap;
/*
* DPC disables the Link automatically in hardware, so it has
@@ -92,34 +85,17 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
* Wait until the Link is inactive, then clear DPC Trigger Status
* to allow the Port to leave DPC.
*/
- dpc_wait_link_inactive(dpc);
+ pcie_wait_for_link(pdev, false);
if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
return PCI_ERS_RESULT_DISCONNECT;
- if (dpc->rp_extensions && dpc->rp_pio_status) {
- pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS,
- dpc->rp_pio_status);
- dpc->rp_pio_status = 0;
- }
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
- pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
- ctl | PCI_EXP_DPC_CTL_INT_EN);
-
return PCI_ERS_RESULT_RECOVERED;
}
-static void dpc_work(struct work_struct *work)
-{
- struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
- struct pci_dev *pdev = dpc->dev->port;
-
- /* We configure DPC so it only triggers on ERR_FATAL */
- pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
-}
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
{
@@ -134,8 +110,6 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
status, mask);
- dpc->rp_pio_status = status;
-
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
@@ -146,15 +120,14 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
first_error = (dpc_status & 0x1f00) >> 8;
- status &= ~mask;
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
- if (status & (1 << i))
+ if ((status & ~mask) & (1 << i))
dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
first_error == i ? " (First)" : "");
}
if (dpc->rp_log_size < 4)
- return;
+ goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
&dw0);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
@@ -167,7 +140,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
dw0, dw1, dw2, dw3);
if (dpc->rp_log_size < 5)
- return;
+ goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
dev_err(dev, "RP PIO ImpSpec Log %#010x\n", log);
@@ -176,43 +149,26 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
+ clear_status:
+ pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
-static irqreturn_t dpc_irq(int irq, void *context)
+static irqreturn_t dpc_handler(int irq, void *context)
{
- struct dpc_dev *dpc = (struct dpc_dev *)context;
+ struct aer_err_info info;
+ struct dpc_dev *dpc = context;
struct pci_dev *pdev = dpc->dev->port;
struct device *dev = &dpc->dev->device;
- u16 cap = dpc->cap_pos, ctl, status, source, reason, ext_reason;
-
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
-
- if (!(ctl & PCI_EXP_DPC_CTL_INT_EN) || ctl == (u16)(~0))
- return IRQ_NONE;
+ u16 cap = dpc->cap_pos, status, source, reason, ext_reason;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
-
- if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT))
- return IRQ_NONE;
-
- if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) {
- pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
- PCI_EXP_DPC_STATUS_INTERRUPT);
- return IRQ_HANDLED;
- }
-
- pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
- ctl & ~PCI_EXP_DPC_CTL_INT_EN);
-
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID,
- &source);
+ pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n",
- status, source);
+ status, source);
reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
-
dev_warn(dev, "DPC %s detected, remove downstream devices\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
@@ -220,15 +176,36 @@ static irqreturn_t dpc_irq(int irq, void *context)
(ext_reason == 0) ? "RP PIO error" :
(ext_reason == 1) ? "software trigger" :
"reserved error");
+
/* show RP PIO error detail information */
if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
dpc_process_rp_pio_error(dpc);
+ else if (reason == 0 && aer_get_device_error_info(pdev, &info)) {
+ aer_print_error(pdev, &info);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ }
- pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
- PCI_EXP_DPC_STATUS_INTERRUPT);
+ /* We configure DPC so it only triggers on ERR_FATAL */
+ pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dpc_irq(int irq, void *context)
+{
+ struct dpc_dev *dpc = (struct dpc_dev *)context;
+ struct pci_dev *pdev = dpc->dev->port;
+ u16 cap = dpc->cap_pos, status;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
- schedule_work(&dpc->work);
+ if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || status == (u16)(~0))
+ return IRQ_NONE;
+ pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_INTERRUPT);
+ if (status & PCI_EXP_DPC_STATUS_TRIGGER)
+ return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
@@ -250,11 +227,11 @@ static int dpc_probe(struct pcie_device *dev)
dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
dpc->dev = dev;
- INIT_WORK(&dpc->work, dpc_work);
set_service_data(dev, dpc);
- status = devm_request_irq(device, dev->irq, dpc_irq, IRQF_SHARED,
- "pcie-dpc", dpc);
+ status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
+ dpc_handler, IRQF_SHARED,
+ "pcie-dpc", dpc);
if (status) {
dev_warn(device, "request IRQ%d failed: %d\n", dev->irq,
status);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index f02e334beb45..708fd3a0d646 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -175,9 +175,11 @@ out:
*/
static pci_ers_result_t default_reset_link(struct pci_dev *dev)
{
- pci_reset_bridge_secondary_bus(dev);
+ int rc;
+
+ rc = pci_bridge_secondary_bus_reset(dev);
pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
- return PCI_ERS_RESULT_RECOVERED;
+ return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
@@ -252,6 +254,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
dev->error_state = state;
pci_walk_bus(dev->subordinate, cb, &result_data);
if (cb == report_resume) {
+ pci_aer_clear_device_status(dev);
pci_cleanup_aer_uncorrect_error_status(dev);
dev->error_state = pci_channel_io_normal;
}
@@ -259,15 +262,10 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
/*
* If the error is reported by an end point, we think this
* error is related to the upstream link of the end point.
+ * The error is non fatal so the bus is ok; just invoke
+ * the callback for the function that logged the error.
*/
- if (state == pci_channel_io_normal)
- /*
- * the error is non fatal so the bus is ok, just invoke
- * the callback for the function that logged the error.
- */
- cb(dev, &result_data);
- else
- pci_walk_bus(dev->bus, cb, &result_data);
+ cb(dev, &result_data);
}
return result_data.result;
@@ -317,7 +315,8 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
* do error recovery on all subordinates of the bridge instead
* of the bridge and clear the error status of the bridge.
*/
- pci_cleanup_aer_uncorrect_error_status(dev);
+ pci_aer_clear_fatal_status(dev);
+ pci_aer_clear_device_status(dev);
}
if (result == PCI_ERS_RESULT_RECOVERED) {
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 6ffc797a0dc1..d59afa42fc14 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -50,6 +50,7 @@ struct pcie_port_service_driver {
int (*probe) (struct pcie_device *dev);
void (*remove) (struct pcie_device *dev);
int (*suspend) (struct pcie_device *dev);
+ int (*resume_noirq) (struct pcie_device *dev);
int (*resume) (struct pcie_device *dev);
/* Device driver may resume normal operations */
@@ -82,6 +83,7 @@ extern struct bus_type pcie_port_bus_type;
int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
int pcie_port_device_suspend(struct device *dev);
+int pcie_port_device_resume_noirq(struct device *dev);
int pcie_port_device_resume(struct device *dev);
#endif
void pcie_port_device_remove(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e0261ad4bcdd..7c37d815229e 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -353,14 +353,19 @@ error_disable:
}
#ifdef CONFIG_PM
-static int suspend_iter(struct device *dev, void *data)
+typedef int (*pcie_pm_callback_t)(struct pcie_device *);
+
+static int pm_iter(struct device *dev, void *data)
{
struct pcie_port_service_driver *service_driver;
+ size_t offset = *(size_t *)data;
+ pcie_pm_callback_t cb;
if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
service_driver = to_service_driver(dev->driver);
- if (service_driver->suspend)
- service_driver->suspend(to_pcie_device(dev));
+ cb = *(pcie_pm_callback_t *)((void *)service_driver + offset);
+ if (cb)
+ return cb(to_pcie_device(dev));
}
return 0;
}
@@ -371,20 +376,14 @@ static int suspend_iter(struct device *dev, void *data)
*/
int pcie_port_device_suspend(struct device *dev)
{
- return device_for_each_child(dev, NULL, suspend_iter);
+ size_t off = offsetof(struct pcie_port_service_driver, suspend);
+ return device_for_each_child(dev, &off, pm_iter);
}
-static int resume_iter(struct device *dev, void *data)
+int pcie_port_device_resume_noirq(struct device *dev)
{
- struct pcie_port_service_driver *service_driver;
-
- if ((dev->bus == &pcie_port_bus_type) &&
- (dev->driver)) {
- service_driver = to_service_driver(dev->driver);
- if (service_driver->resume)
- service_driver->resume(to_pcie_device(dev));
- }
- return 0;
+ size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
+ return device_for_each_child(dev, &off, pm_iter);
}
/**
@@ -393,7 +392,8 @@ static int resume_iter(struct device *dev, void *data)
*/
int pcie_port_device_resume(struct device *dev)
{
- return device_for_each_child(dev, NULL, resume_iter);
+ size_t off = offsetof(struct pcie_port_service_driver, resume);
+ return device_for_each_child(dev, &off, pm_iter);
}
#endif /* PM */
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 973f1b80a038..eef22dc29140 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -42,17 +42,6 @@ __setup("pcie_ports=", pcie_port_setup);
/* global data */
-static int pcie_portdrv_restore_config(struct pci_dev *dev)
-{
- int retval;
-
- retval = pci_enable_device(dev);
- if (retval)
- return retval;
- pci_set_master(dev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int pcie_port_runtime_suspend(struct device *dev)
{
@@ -76,10 +65,12 @@ static int pcie_port_runtime_idle(struct device *dev)
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
+ .resume_noirq = pcie_port_device_resume_noirq,
.resume = pcie_port_device_resume,
.freeze = pcie_port_device_suspend,
.thaw = pcie_port_device_resume,
.poweroff = pcie_port_device_suspend,
+ .restore_noirq = pcie_port_device_resume_noirq,
.restore = pcie_port_device_resume,
.runtime_suspend = pcie_port_runtime_suspend,
.runtime_resume = pcie_port_runtime_resume,
@@ -160,19 +151,6 @@ static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
return PCI_ERS_RESULT_RECOVERED;
}
-static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
-{
- /* If fatal, restore cfg space for possible link reset at upstream */
- if (dev->error_state == pci_channel_io_frozen) {
- dev->state_saved = true;
- pci_restore_state(dev);
- pcie_portdrv_restore_config(dev);
- pci_enable_pcie_error_reporting(dev);
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
static int resume_iter(struct device *device, void *data)
{
struct pcie_device *pcie_device;
@@ -208,7 +186,6 @@ static const struct pci_device_id port_pci_ids[] = { {
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
.mmio_enabled = pcie_portdrv_mmio_enabled,
- .slot_reset = pcie_portdrv_slot_reset,
.resume = pcie_portdrv_err_resume,
};
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 611adcd9c169..ec784009a36b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
-#include <linux/pci-aspm.h>
#include <linux/aer.h>
#include <linux/acpi.h>
#include <linux/hypervisor.h>
@@ -1549,6 +1548,20 @@ static int pci_intx_mask_broken(struct pci_dev *dev)
return 0;
}
+static void early_dump_pci_device(struct pci_dev *pdev)
+{
+ u32 value[256 / 4];
+ int i;
+
+ pci_info(pdev, "config space:\n");
+
+ for (i = 0; i < 256; i += 4)
+ pci_read_config_dword(pdev, i, &value[i / 4]);
+
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ value, 256, false);
+}
+
/**
* pci_setup_device - Fill in class and map information of a device
* @dev: the device structure to fill
@@ -1598,6 +1611,9 @@ int pci_setup_device(struct pci_dev *dev)
pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n",
dev->vendor, dev->device, dev->hdr_type, dev->class);
+ if (pci_early_dump)
+ early_dump_pci_device(dev);
+
/* Need to have dev->class ready */
dev->cfg_size = pci_cfg_space_size(dev);
@@ -1725,11 +1741,15 @@ int pci_setup_device(struct pci_dev *dev)
static void pci_configure_mps(struct pci_dev *dev)
{
struct pci_dev *bridge = pci_upstream_bridge(dev);
- int mps, p_mps, rc;
+ int mps, mpss, p_mps, rc;
if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
return;
+ /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
+ if (dev->is_virtfn)
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
@@ -1749,6 +1769,14 @@ static void pci_configure_mps(struct pci_dev *dev)
if (pcie_bus_config != PCIE_BUS_DEFAULT)
return;
+ mpss = 128 << dev->pcie_mpss;
+ if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
+ pcie_set_mps(bridge, mpss);
+ pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
+ mpss, p_mps, 128 << bridge->pcie_mpss);
+ p_mps = pcie_get_mps(bridge);
+ }
+
rc = pcie_set_mps(dev, p_mps);
if (rc) {
pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
@@ -1757,7 +1785,7 @@ static void pci_configure_mps(struct pci_dev *dev)
}
pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n",
- p_mps, mps, 128 << dev->pcie_mpss);
+ p_mps, mps, mpss);
}
static struct hpp_type0 pci_default_type0 = {
@@ -2042,6 +2070,29 @@ static void pci_configure_ltr(struct pci_dev *dev)
#endif
}
+static void pci_configure_eetlp_prefix(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_PASID
+ struct pci_dev *bridge;
+ u32 cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
+ if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
+ return;
+
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ dev->eetlp_prefix_path = 1;
+ else {
+ bridge = pci_upstream_bridge(dev);
+ if (bridge && bridge->eetlp_prefix_path)
+ dev->eetlp_prefix_path = 1;
+ }
+#endif
+}
+
static void pci_configure_device(struct pci_dev *dev)
{
struct hotplug_params hpp;
@@ -2051,6 +2102,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_extended_tags(dev, NULL);
pci_configure_relaxed_ordering(dev);
pci_configure_ltr(dev);
+ pci_configure_eetlp_prefix(dev);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
@@ -2064,6 +2116,7 @@ static void pci_configure_device(struct pci_dev *dev)
static void pci_release_capabilities(struct pci_dev *dev)
{
+ pci_aer_exit(dev);
pci_vpd_release(dev);
pci_iov_release(dev);
pci_free_cap_save_buffers(dev);
@@ -2156,8 +2209,8 @@ static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
return true;
}
-bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
- int timeout)
+bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
+ int timeout)
{
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
return false;
@@ -2172,6 +2225,24 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
return true;
}
+
+bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
+ int timeout)
+{
+#ifdef CONFIG_PCI_QUIRKS
+ struct pci_dev *bridge = bus->self;
+
+ /*
+ * Certain IDT switches have an issue where they improperly trigger
+ * ACS Source Validation errors on completions for config reads.
+ */
+ if (bridge && bridge->vendor == PCI_VENDOR_ID_IDT &&
+ bridge->device == 0x80b5)
+ return pci_idt_bus_quirk(bus, devfn, l, timeout);
+#endif
+
+ return pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
+}
EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
/*
@@ -2205,6 +2276,25 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
return dev;
}
+static void pcie_report_downtraining(struct pci_dev *dev)
+{
+ if (!pci_is_pcie(dev))
+ return;
+
+ /* Look from the device up to avoid downstream ports with no devices */
+ if ((pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_LEG_END) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM))
+ return;
+
+ /* Multi-function PCIe devices share the same link/status */
+ if (PCI_FUNC(dev->devfn) != 0 || dev->is_virtfn)
+ return;
+
+ /* Print link status only if the device is constrained by the fabric */
+ __pcie_print_link_status(dev, false);
+}
+
static void pci_init_capabilities(struct pci_dev *dev)
{
/* Enhanced Allocation */
@@ -2240,6 +2330,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Advanced Error Reporting */
pci_aer_init(dev);
+ pcie_report_downtraining(dev);
+
if (pci_probe_reset_function(dev) == 0)
dev->reset_fn = 1;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f439de848658..46f58a9771d7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -25,8 +25,10 @@
#include <linux/sched.h>
#include <linux/ktime.h>
#include <linux/mm.h>
+#include <linux/nvme.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
+#include <linux/switchtec.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -460,6 +462,7 @@ static void quirk_nfp6000(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
@@ -2105,6 +2108,7 @@ static void quirk_netmos(struct pci_dev *dev)
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return;
+ /* else: fall through */
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9845:
@@ -2352,6 +2356,9 @@ static void quirk_paxc_bridge(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
#endif
/*
@@ -3664,6 +3671,108 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
+/*
+ * The Samsung SM961/PM961 controller can sometimes enter a fatal state after
+ * FLR where config space reads from the device return -1. We seem to be
+ * able to avoid this condition if we disable the NVMe controller prior to
+ * FLR. This quirk is generic for any NVMe class device requiring similar
+ * assistance to quiesce the device prior to FLR.
+ *
+ * NVMe specification: https://nvmexpress.org/resources/specifications/
+ * Revision 1.0e:
+ * Chapter 2: Required and optional PCI config registers
+ * Chapter 3: NVMe control registers
+ * Chapter 7.3: Reset behavior
+ */
+static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
+{
+ void __iomem *bar;
+ u16 cmd;
+ u32 cfg;
+
+ if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
+ !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
+ if (!bar)
+ return -ENOTTY;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
+
+ cfg = readl(bar + NVME_REG_CC);
+
+ /* Disable controller if enabled */
+ if (cfg & NVME_CC_ENABLE) {
+ u32 cap = readl(bar + NVME_REG_CAP);
+ unsigned long timeout;
+
+ /*
+ * Per nvme_disable_ctrl() skip shutdown notification as it
+ * could complete commands to the admin queue. We only intend
+ * to quiesce the device before reset.
+ */
+ cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
+
+ writel(cfg, bar + NVME_REG_CC);
+
+ /*
+ * Some controllers require an additional delay here, see
+ * NVME_QUIRK_DELAY_BEFORE_CHK_RDY. None of those are yet
+ * supported by this quirk.
+ */
+
+ /* Cap register provides max timeout in 500ms increments */
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+
+ for (;;) {
+ u32 status = readl(bar + NVME_REG_CSTS);
+
+ /* Ready status becomes zero on disable complete */
+ if (!(status & NVME_CSTS_RDY))
+ break;
+
+ msleep(100);
+
+ if (time_after(jiffies, timeout)) {
+ pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
+ break;
+ }
+ }
+ }
+
+ pci_iounmap(dev, bar);
+
+ pcie_flr(dev);
+
+ return 0;
+}
+
+/*
+ * Intel DC P3700 NVMe controller will timeout waiting for ready status
+ * to change after NVMe enable if the driver starts interacting with the
+ * device too soon after FLR. A 250ms delay after FLR has heuristically
+ * proven to produce reliably working results for device assignment cases.
+ */
+static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
+{
+ if (!pcie_has_flr(dev))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pcie_flr(dev);
+
+ msleep(250);
+
+ return 0;
+}
+
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
@@ -3671,6 +3780,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
reset_ivb_igd },
+ { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
+ { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ 0 }
@@ -3740,6 +3851,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c134 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
@@ -4553,27 +4667,79 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
return 0;
}
-static const struct pci_dev_enable_acs {
+static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
+{
+ int pos;
+ u32 cap, ctrl;
+
+ if (!pci_quirk_intel_spt_pch_acs_match(dev))
+ return -ENOTTY;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return -ENOTTY;
+
+ pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
+
+ ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
+
+ pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
+
+ pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
+
+ return 0;
+}
+
+static const struct pci_dev_acs_ops {
u16 vendor;
u16 device;
int (*enable_acs)(struct pci_dev *dev);
-} pci_dev_enable_acs[] = {
- { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
- { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs },
- { 0 }
+ int (*disable_acs_redir)(struct pci_dev *dev);
+} pci_dev_acs_ops[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ .enable_acs = pci_quirk_enable_intel_pch_acs,
+ },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ .enable_acs = pci_quirk_enable_intel_spt_pch_acs,
+ .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
+ },
};
int pci_dev_specific_enable_acs(struct pci_dev *dev)
{
- const struct pci_dev_enable_acs *i;
- int ret;
+ const struct pci_dev_acs_ops *p;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
+ p = &pci_dev_acs_ops[i];
+ if ((p->vendor == dev->vendor ||
+ p->vendor == (u16)PCI_ANY_ID) &&
+ (p->device == dev->device ||
+ p->device == (u16)PCI_ANY_ID) &&
+ p->enable_acs) {
+ ret = p->enable_acs(dev);
+ if (ret >= 0)
+ return ret;
+ }
+ }
- for (i = pci_dev_enable_acs; i->enable_acs; i++) {
- if ((i->vendor == dev->vendor ||
- i->vendor == (u16)PCI_ANY_ID) &&
- (i->device == dev->device ||
- i->device == (u16)PCI_ANY_ID)) {
- ret = i->enable_acs(dev);
+ return -ENOTTY;
+}
+
+int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+{
+ const struct pci_dev_acs_ops *p;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
+ p = &pci_dev_acs_ops[i];
+ if ((p->vendor == dev->vendor ||
+ p->vendor == (u16)PCI_ANY_ID) &&
+ (p->device == dev->device ||
+ p->device == (u16)PCI_ANY_ID) &&
+ p->disable_acs_redir) {
+ ret = p->disable_acs_redir(dev);
if (ret >= 0)
return ret;
}
@@ -4753,3 +4919,197 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
+
+/*
+ * Some IDT switches incorrectly flag an ACS Source Validation error on
+ * completions for config read requests even though PCIe r4.0, sec
+ * 6.12.1.1, says that completions are never affected by ACS Source
+ * Validation. Here's the text of IDT 89H32H8G3-YC, erratum #36:
+ *
+ * Item #36 - Downstream port applies ACS Source Validation to Completions
+ * Section 6.12.1.1 of the PCI Express Base Specification 3.1 states that
+ * completions are never affected by ACS Source Validation. However,
+ * completions received by a downstream port of the PCIe switch from a
+ * device that has not yet captured a PCIe bus number are incorrectly
+ * dropped by ACS Source Validation by the switch downstream port.
+ *
+ * The workaround suggested by IDT is to issue a config write to the
+ * downstream device before issuing the first config read. This allows the
+ * downstream device to capture its bus and device numbers (see PCIe r4.0,
+ * sec 2.2.9), thus avoiding the ACS error on the completion.
+ *
+ * However, we don't know when the device is ready to accept the config
+ * write, so we do config reads until we receive a non-Config Request Retry
+ * Status, then do the config write.
+ *
+ * To avoid hitting the erratum when doing the config reads, we disable ACS
+ * SV around this process.
+ */
+int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
+{
+ int pos;
+ u16 ctrl = 0;
+ bool found;
+ struct pci_dev *bridge = bus->self;
+
+ pos = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ACS);
+
+ /* Disable ACS SV before initial config reads */
+ if (pos) {
+ pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
+ if (ctrl & PCI_ACS_SV)
+ pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
+ ctrl & ~PCI_ACS_SV);
+ }
+
+ found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
+
+ /* Write Vendor ID (read-only) so the endpoint latches its bus/dev */
+ if (found)
+ pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
+
+ /* Re-enable ACS_SV if it was previously enabled */
+ if (ctrl & PCI_ACS_SV)
+ pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
+
+ return found;
+}
+
+/*
+ * Microsemi Switchtec NTB uses devfn proxy IDs to move TLPs between
+ * NT endpoints via the internal switch fabric. These IDs replace the
+ * originating requestor ID TLPs which access host memory on peer NTB
+ * ports. Therefore, all proxy IDs must be aliased to the NTB device
+ * to permit access when the IOMMU is turned on.
+ */
+static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
+{
+ void __iomem *mmio;
+ struct ntb_info_regs __iomem *mmio_ntb;
+ struct ntb_ctrl_regs __iomem *mmio_ctrl;
+ struct sys_info_regs __iomem *mmio_sys_info;
+ u64 partition_map;
+ u8 partition;
+ int pp;
+
+ if (pci_enable_device(pdev)) {
+ pci_err(pdev, "Cannot enable Switchtec device\n");
+ return;
+ }
+
+ mmio = pci_iomap(pdev, 0, 0);
+ if (mmio == NULL) {
+ pci_disable_device(pdev);
+ pci_err(pdev, "Cannot iomap Switchtec device\n");
+ return;
+ }
+
+ pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
+
+ mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
+ mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
+ mmio_sys_info = mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
+
+ partition = ioread8(&mmio_ntb->partition_id);
+
+ partition_map = ioread32(&mmio_ntb->ep_map);
+ partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
+ partition_map &= ~(1ULL << partition);
+
+ for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
+ struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
+ u32 table_sz = 0;
+ int te;
+
+ if (!(partition_map & (1ULL << pp)))
+ continue;
+
+ pci_dbg(pdev, "Processing partition %d\n", pp);
+
+ mmio_peer_ctrl = &mmio_ctrl[pp];
+
+ table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
+ if (!table_sz) {
+ pci_warn(pdev, "Partition %d table_sz 0\n", pp);
+ continue;
+ }
+
+ if (table_sz > 512) {
+ pci_warn(pdev,
+ "Invalid Switchtec partition %d table_sz %d\n",
+ pp, table_sz);
+ continue;
+ }
+
+ for (te = 0; te < table_sz; te++) {
+ u32 rid_entry;
+ u8 devfn;
+
+ rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
+ devfn = (rid_entry >> 1) & 0xFF;
+ pci_dbg(pdev,
+ "Aliasing Partition %d Proxy ID %02x.%d\n",
+ pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ pci_add_dma_alias(pdev, devfn);
+ }
+ }
+
+ pci_iounmap(pdev, mmio);
+ pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
+ quirk_switchtec_ntb_dma_alias);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
+ quirk_switchtec_ntb_dma_alias);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 5e3d0dced2b8..461e7fd2756f 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/module.h>
-#include <linux/pci-aspm.h>
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index a7b5c37a85ec..137bf0cee897 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -80,7 +80,8 @@ EXPORT_SYMBOL_GPL(pci_disable_rom);
* The PCI window size could be much larger than the
* actual image size.
*/
-size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+static size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom,
+ size_t size)
{
void __iomem *image;
int last_image;
@@ -106,8 +107,14 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
length = readw(pds + 16);
image += length * 512;
/* Avoid iterating through memory outside the resource window */
- if (image > rom + size)
+ if (image >= rom + size)
break;
+ if (!last_image) {
+ if (readw(image) != 0xAA55) {
+ pci_info(pdev, "No more image in the PCI ROM\n");
+ break;
+ }
+ }
} while (length && !last_image);
/* never return a size larger than the PCI resource window */
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 47cd0c037433..9940cc70f38b 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -641,7 +641,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
- if (reg != MICROSEMI_VENDOR_ID)
+ if (reg != PCI_VENDOR_ID_MICROSEMI)
break;
reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
@@ -1203,7 +1203,7 @@ static void init_pff(struct switchtec_dev *stdev)
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
- if (reg != MICROSEMI_VENDOR_ID)
+ if (reg != PCI_VENDOR_ID_MICROSEMI)
break;
}
@@ -1267,7 +1267,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
struct switchtec_dev *stdev;
int rc;
- if (pdev->class == MICROSEMI_NTB_CLASSCODE)
+ if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
request_module_nowait("ntb_hw_switchtec");
stdev = stdev_create(pdev);
@@ -1321,19 +1321,19 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
#define SWITCHTEC_PCI_DEVICE(device_id) \
{ \
- .vendor = MICROSEMI_VENDOR_ID, \
+ .vendor = PCI_VENDOR_ID_MICROSEMI, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
- .class = MICROSEMI_MGMT_CLASSCODE, \
+ .class = (PCI_CLASS_MEMORY_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
}, \
{ \
- .vendor = MICROSEMI_VENDOR_ID, \
+ .vendor = PCI_VENDOR_ID_MICROSEMI, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
- .class = MICROSEMI_NTB_CLASSCODE, \
+ .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
}
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 8617565ba561..4963c2e2bd4c 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -146,7 +146,7 @@ static int pci_vpd_wait(struct pci_dev *dev)
if (!vpd->busy)
return 0;
- while (time_before(jiffies, timeout)) {
+ do {
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
if (ret < 0)
@@ -160,10 +160,13 @@ static int pci_vpd_wait(struct pci_dev *dev)
if (fatal_signal_pending(current))
return -EINTR;
+ if (time_after(jiffies, timeout))
+ break;
+
usleep_range(10, max_sleep);
if (max_sleep < 1024)
max_sleep *= 2;
- }
+ } while (true);
pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
return -ETIMEDOUT;
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 0d09d8e669cd..1bfeb160c5b1 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -53,6 +53,16 @@ enum {
CCI_IF_MAX,
};
+#define NUM_HW_CNTRS_CII_4XX 4
+#define NUM_HW_CNTRS_CII_5XX 8
+#define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX
+
+#define FIXED_HW_CNTRS_CII_4XX 1
+#define FIXED_HW_CNTRS_CII_5XX 0
+#define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX
+
+#define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX)
+
struct event_range {
u32 min;
u32 max;
@@ -633,8 +643,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
{
int i;
struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
-
- DECLARE_BITMAP(mask, cci_pmu->num_cntrs);
+ DECLARE_BITMAP(mask, HW_CNTRS_MAX);
bitmap_zero(mask, cci_pmu->num_cntrs);
for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
@@ -940,7 +949,7 @@ static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
{
int i;
- DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs);
+ DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX);
bitmap_zero(saved_mask, cci_pmu->num_cntrs);
pmu_save_counters(cci_pmu, saved_mask);
@@ -1245,7 +1254,7 @@ static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
- unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)];
+ unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)];
struct cci_pmu_hw_events fake_pmu = {
/*
* Initialise the fake PMU. We only need to populate the
@@ -1403,6 +1412,11 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
char *name = model->name;
u32 num_cntrs;
+ if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX))
+ return -EINVAL;
+ if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX))
+ return -EINVAL;
+
pmu_event_attr_group.attrs = model->event_attrs;
pmu_format_attr_group.attrs = model->format_attrs;
@@ -1455,8 +1469,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
#ifdef CONFIG_ARM_CCI400_PMU
[CCI400_R0] = {
.name = "CCI_400",
- .fixed_hw_cntrs = 1, /* Cycle counter */
- .num_hw_cntrs = 4,
+ .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
+ .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
.event_attrs = cci400_r0_pmu_event_attrs,
@@ -1475,8 +1489,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
},
[CCI400_R1] = {
.name = "CCI_400_r1",
- .fixed_hw_cntrs = 1, /* Cycle counter */
- .num_hw_cntrs = 4,
+ .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
+ .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
.event_attrs = cci400_r1_pmu_event_attrs,
@@ -1497,8 +1511,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
#ifdef CONFIG_ARM_CCI5xx_PMU
[CCI500_R0] = {
.name = "CCI_500",
- .fixed_hw_cntrs = 0,
- .num_hw_cntrs = 8,
+ .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
+ .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
.cntr_size = SZ_64K,
.format_attrs = cci5xx_pmu_format_attrs,
.event_attrs = cci5xx_pmu_event_attrs,
@@ -1521,8 +1535,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
},
[CCI550_R0] = {
.name = "CCI_550",
- .fixed_hw_cntrs = 0,
- .num_hw_cntrs = 8,
+ .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
+ .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
.cntr_size = SZ_64K,
.format_attrs = cci5xx_pmu_format_attrs,
.event_attrs = cci5xx_pmu_event_attrs,
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index b416ee18e6bb..4b15c36f4631 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1485,17 +1485,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ccn);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- if (!devm_request_mem_region(ccn->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- ccn->base = devm_ioremap(ccn->dev, res->start,
- resource_size(res));
- if (!ccn->base)
- return -EFAULT;
+ ccn->base = devm_ioremap_resource(ccn->dev, res);
+ if (IS_ERR(ccn->base))
+ return PTR_ERR(ccn->base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res)
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index a6347d487635..7f01f6f60b87 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -28,6 +28,14 @@
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
+static inline u64 arm_pmu_event_max_period(struct perf_event *event)
+{
+ if (event->hw.flags & ARMPMU_EVT_64BIT)
+ return GENMASK_ULL(63, 0);
+ else
+ return GENMASK_ULL(31, 0);
+}
+
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
@@ -114,8 +122,10 @@ int armpmu_event_set_period(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
+ u64 max_period;
int ret = 0;
+ max_period = arm_pmu_event_max_period(event);
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
@@ -136,12 +146,12 @@ int armpmu_event_set_period(struct perf_event *event)
* effect we are reducing max_period to account for
* interrupt latency (and we are being very conservative).
*/
- if (left > (armpmu->max_period >> 1))
- left = armpmu->max_period >> 1;
+ if (left > (max_period >> 1))
+ left = (max_period >> 1);
local64_set(&hwc->prev_count, (u64)-left);
- armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
+ armpmu->write_counter(event, (u64)(-left) & max_period);
perf_event_update_userpage(event);
@@ -153,6 +163,7 @@ u64 armpmu_event_update(struct perf_event *event)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
+ u64 max_period = arm_pmu_event_max_period(event);
again:
prev_raw_count = local64_read(&hwc->prev_count);
@@ -162,7 +173,7 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+ delta = (new_raw_count - prev_raw_count) & max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -227,11 +238,10 @@ armpmu_del(struct perf_event *event, int flags)
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
- clear_bit(idx, hw_events->used_mask);
- if (armpmu->clear_event_idx)
- armpmu->clear_event_idx(hw_events, event);
-
+ armpmu->clear_event_idx(hw_events, event);
perf_event_update_userpage(event);
+ /* Clear the allocated counter */
+ hwc->idx = -1;
}
static int
@@ -360,6 +370,7 @@ __hw_perf_event_init(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int mapping;
+ hwc->flags = 0;
mapping = armpmu->map_event(event);
if (mapping < 0) {
@@ -402,7 +413,7 @@ __hw_perf_event_init(struct perf_event *event)
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
- hwc->sample_period = armpmu->max_period >> 1;
+ hwc->sample_period = arm_pmu_event_max_period(event) >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
@@ -654,14 +665,9 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
int idx;
for (idx = 0; idx < armpmu->num_events; idx++) {
- /*
- * If the counter is not used skip it, there is no
- * need of stopping/restarting it.
- */
- if (!test_bit(idx, hw_events->used_mask))
- continue;
-
event = hw_events->events[idx];
+ if (!event)
+ continue;
switch (cmd) {
case CPU_PM_ENTER:
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 971ff336494a..96075cecb0ae 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -160,7 +160,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
static int armpmu_request_irqs(struct arm_pmu *armpmu)
{
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- int cpu, err;
+ int cpu, err = 0;
for_each_cpu(cpu, &armpmu->supported_cpus) {
int irq = per_cpu(hw_events->irq, cpu);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 44df61397a38..9efd2413240c 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -350,19 +350,21 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
/*
* Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, SCCL_ID is in MPIDR[aff3] and CCL_ID
- * is in MPIDR[aff2]; if not, SCCL_ID is in MPIDR[aff2] and CCL_ID is
- * in MPIDR[aff1]. If this changes in future, this shall be updated.
+ * If multi-threading is supported, CCL_ID is the low 3-bits in MPIDR[Aff2]
+ * and SCCL_ID is the upper 5-bits of Aff2 field; if not, SCCL_ID
+ * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
*/
static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
{
u64 mpidr = read_cpuid_mpidr();
if (mpidr & MPIDR_MT_BITMASK) {
+ int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+
if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ *sccl_id = aff2 >> 3;
if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ *ccl_id = aff2 & 0x7;
} else {
if (sccl_id)
*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index dd50371225bc..8d4b7e999f02 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -161,10 +161,10 @@ config PINCTRL_MCP23S08
select REGMAP_SPI if SPI_MASTER
select GENERIC_PINCONF
help
- SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
- I/O expanders.
- This provides a GPIO interface supporting inputs and outputs.
- The I2C versions of the chips can be used as interrupt-controller.
+ SPI/I2C driver for Microchip MCP23S08 / MCP23S17 / MCP23S18 /
+ MCP23008 / MCP23017 / MCP23018 I/O expanders.
+ This provides a GPIO interface supporting inputs and outputs and a
+ corresponding interrupt-controller.
config PINCTRL_OXNAS
bool
@@ -332,6 +332,7 @@ config PINCTRL_OCELOT
depends on OF
depends on MSCC_OCELOT || COMPILE_TEST
select GPIOLIB
+ select GPIOLIB_IRQCHIP
select GENERIC_PINCONF
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
diff --git a/drivers/pinctrl/actions/Kconfig b/drivers/pinctrl/actions/Kconfig
index 490927b4ea76..2397cb0f6011 100644
--- a/drivers/pinctrl/actions/Kconfig
+++ b/drivers/pinctrl/actions/Kconfig
@@ -5,6 +5,7 @@ config PINCTRL_OWL
select PINCONF
select GENERIC_PINCONF
select GPIOLIB
+ select GPIOLIB_IRQCHIP
help
Say Y here to enable Actions Semi OWL pinctrl driver
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index b5c880b50bb3..9d18c02f192b 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -45,6 +46,9 @@ struct owl_pinctrl {
struct clk *clk;
const struct owl_pinctrl_soc_data *soc;
void __iomem *base;
+ struct irq_chip irq_chip;
+ unsigned int num_irq;
+ unsigned int *irq;
};
static void owl_update_bits(void __iomem *base, u32 mask, u32 val)
@@ -701,10 +705,213 @@ static int owl_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static void irq_set_type(struct owl_pinctrl *pctrl, int gpio, unsigned int type)
+{
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ unsigned int offset, value, irq_type = 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ /*
+ * Since the hardware doesn't support interrupts on both edges,
+ * emulate it in the software by setting the single edge
+ * interrupt and switching to the opposite edge while ACKing
+ * the interrupt
+ */
+ if (owl_gpio_get(&pctrl->chip, gpio))
+ irq_type = OWL_GPIO_INT_EDGE_FALLING;
+ else
+ irq_type = OWL_GPIO_INT_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ irq_type = OWL_GPIO_INT_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_type = OWL_GPIO_INT_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = OWL_GPIO_INT_LEVEL_HIGH;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = OWL_GPIO_INT_LEVEL_LOW;
+ break;
+
+ default:
+ break;
+ }
+
+ port = owl_gpio_get_port(pctrl, &gpio);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ offset = (gpio < 16) ? 4 : 0;
+ value = readl_relaxed(gpio_base + port->intc_type + offset);
+ value &= ~(OWL_GPIO_INT_MASK << ((gpio % 16) * 2));
+ value |= irq_type << ((gpio % 16) * 2);
+ writel_relaxed(value, gpio_base + port->intc_type + offset);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void owl_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct owl_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ unsigned int gpio = data->hwirq;
+ u32 val;
+
+ port = owl_gpio_get_port(pctrl, &gpio);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_gpio_update_reg(gpio_base + port->intc_msk, gpio, false);
+
+ /* disable port interrupt if no interrupt pending bit is active */
+ val = readl_relaxed(gpio_base + port->intc_msk);
+ if (val == 0)
+ owl_gpio_update_reg(gpio_base + port->intc_ctl,
+ OWL_GPIO_CTLR_ENABLE, false);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void owl_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct owl_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ unsigned int gpio = data->hwirq;
+ u32 value;
+
+ port = owl_gpio_get_port(pctrl, &gpio);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ /* enable port interrupt */
+ value = readl_relaxed(gpio_base + port->intc_ctl);
+ value |= BIT(OWL_GPIO_CTLR_ENABLE) | BIT(OWL_GPIO_CTLR_SAMPLE_CLK_24M);
+ writel_relaxed(value, gpio_base + port->intc_ctl);
+
+ /* enable GPIO interrupt */
+ owl_gpio_update_reg(gpio_base + port->intc_msk, gpio, true);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static void owl_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct owl_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ unsigned int gpio = data->hwirq;
+
+ /*
+ * Switch the interrupt edge to the opposite edge of the interrupt
+ * which got triggered for the case of emulating both edges
+ */
+ if (irqd_get_trigger_type(data) == IRQ_TYPE_EDGE_BOTH) {
+ if (owl_gpio_get(gc, gpio))
+ irq_set_type(pctrl, gpio, IRQ_TYPE_EDGE_FALLING);
+ else
+ irq_set_type(pctrl, gpio, IRQ_TYPE_EDGE_RISING);
+ }
+
+ port = owl_gpio_get_port(pctrl, &gpio);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_gpio_update_reg(gpio_base + port->intc_ctl,
+ OWL_GPIO_CTLR_PENDING, true);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int owl_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct owl_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ irq_set_handler_locked(data, handle_level_irq);
+ else
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ irq_set_type(pctrl, data->hwirq, type);
+
+ return 0;
+}
+
+static void owl_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct owl_pinctrl *pctrl = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *domain = pctrl->chip.irq.domain;
+ unsigned int parent = irq_desc_get_irq(desc);
+ const struct owl_gpio_port *port;
+ void __iomem *base;
+ unsigned int pin, irq, offset = 0, i;
+ unsigned long pending_irq;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < pctrl->soc->nports; i++) {
+ port = &pctrl->soc->ports[i];
+ base = pctrl->base + port->offset;
+
+ /* skip ports that are not associated with this irq */
+ if (parent != pctrl->irq[i])
+ goto skip;
+
+ pending_irq = readl_relaxed(base + port->intc_pd);
+
+ for_each_set_bit(pin, &pending_irq, port->pins) {
+ irq = irq_find_mapping(domain, offset + pin);
+ generic_handle_irq(irq);
+
+ /* clear pending interrupt */
+ owl_gpio_update_reg(base + port->intc_pd, pin, true);
+ }
+
+skip:
+ offset += port->pins;
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
static int owl_gpio_init(struct owl_pinctrl *pctrl)
{
struct gpio_chip *chip;
- int ret;
+ struct gpio_irq_chip *gpio_irq;
+ int ret, i, j, offset;
chip = &pctrl->chip;
chip->base = -1;
@@ -714,6 +921,35 @@ static int owl_gpio_init(struct owl_pinctrl *pctrl)
chip->owner = THIS_MODULE;
chip->of_node = pctrl->dev->of_node;
+ pctrl->irq_chip.name = chip->of_node->name;
+ pctrl->irq_chip.irq_ack = owl_gpio_irq_ack;
+ pctrl->irq_chip.irq_mask = owl_gpio_irq_mask;
+ pctrl->irq_chip.irq_unmask = owl_gpio_irq_unmask;
+ pctrl->irq_chip.irq_set_type = owl_gpio_irq_set_type;
+
+ gpio_irq = &chip->irq;
+ gpio_irq->chip = &pctrl->irq_chip;
+ gpio_irq->handler = handle_simple_irq;
+ gpio_irq->default_type = IRQ_TYPE_NONE;
+ gpio_irq->parent_handler = owl_gpio_irq_handler;
+ gpio_irq->parent_handler_data = pctrl;
+ gpio_irq->num_parents = pctrl->num_irq;
+ gpio_irq->parents = pctrl->irq;
+
+ gpio_irq->map = devm_kcalloc(pctrl->dev, chip->ngpio,
+ sizeof(*gpio_irq->map), GFP_KERNEL);
+ if (!gpio_irq->map)
+ return -ENOMEM;
+
+ for (i = 0, offset = 0; i < pctrl->soc->nports; i++) {
+ const struct owl_gpio_port *port = &pctrl->soc->ports[i];
+
+ for (j = 0; j < port->pins; j++)
+ gpio_irq->map[offset + j] = gpio_irq->parents[i];
+
+ offset += port->pins;
+ }
+
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to register gpiochip\n");
@@ -728,7 +964,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
{
struct resource *res;
struct owl_pinctrl *pctrl;
- int ret;
+ int ret, i;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
@@ -772,14 +1008,40 @@ int owl_pinctrl_probe(struct platform_device *pdev,
&owl_pinctrl_desc, pctrl);
if (IS_ERR(pctrl->pctrldev)) {
dev_err(&pdev->dev, "could not register Actions OWL pinmux driver\n");
- return PTR_ERR(pctrl->pctrldev);
+ ret = PTR_ERR(pctrl->pctrldev);
+ goto err_exit;
+ }
+
+ ret = platform_irq_count(pdev);
+ if (ret < 0)
+ goto err_exit;
+
+ pctrl->num_irq = ret;
+
+ pctrl->irq = devm_kcalloc(&pdev->dev, pctrl->num_irq,
+ sizeof(*pctrl->irq), GFP_KERNEL);
+ if (!pctrl->irq) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ for (i = 0; i < pctrl->num_irq ; i++) {
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ goto err_exit;
+ pctrl->irq[i] = ret;
}
ret = owl_gpio_init(pctrl);
if (ret)
- return ret;
+ goto err_exit;
platform_set_drvdata(pdev, pctrl);
return 0;
+
+err_exit:
+ clk_disable_unprepare(pctrl->clk);
+
+ return ret;
}
diff --git a/drivers/pinctrl/actions/pinctrl-owl.h b/drivers/pinctrl/actions/pinctrl-owl.h
index 74342378937c..a724d1d406d4 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.h
+++ b/drivers/pinctrl/actions/pinctrl-owl.h
@@ -29,6 +29,18 @@ enum owl_pinconf_drv {
OWL_PINCONF_DRV_12MA,
};
+/* GPIO CTRL Bit Definition */
+#define OWL_GPIO_CTLR_PENDING 0
+#define OWL_GPIO_CTLR_ENABLE 1
+#define OWL_GPIO_CTLR_SAMPLE_CLK_24M 2
+
+/* GPIO TYPE Bit Definition */
+#define OWL_GPIO_INT_LEVEL_HIGH 0
+#define OWL_GPIO_INT_LEVEL_LOW 1
+#define OWL_GPIO_INT_EDGE_RISING 2
+#define OWL_GPIO_INT_EDGE_FALLING 3
+#define OWL_GPIO_INT_MASK 3
+
/**
* struct owl_pullctl - Actions pad pull control register
* @reg: offset to the pull control register
@@ -121,6 +133,10 @@ struct owl_pinmux_func {
* @outen: offset of the output enable register.
* @inen: offset of the input enable register.
* @dat: offset of the data register.
+ * @intc_ctl: offset of the interrupt control register.
+ * @intc_pd: offset of the interrupt pending register.
+ * @intc_msk: offset of the interrupt mask register.
+ * @intc_type: offset of the interrupt type register.
*/
struct owl_gpio_port {
unsigned int offset;
@@ -128,6 +144,10 @@ struct owl_gpio_port {
unsigned int outen;
unsigned int inen;
unsigned int dat;
+ unsigned int intc_ctl;
+ unsigned int intc_pd;
+ unsigned int intc_msk;
+ unsigned int intc_type;
};
/**
@@ -140,7 +160,7 @@ struct owl_gpio_port {
* @ngroups: number of entries in @groups.
* @padinfo: array describing the pad info of this SoC.
* @ngpios: number of pingroups the driver should expose as GPIOs.
- * @port: array describing all GPIO ports of this SoC.
+ * @ports: array describing all GPIO ports of this SoC.
* @nports: number of GPIO ports in this SoC.
*/
struct owl_pinctrl_soc_data {
diff --git a/drivers/pinctrl/actions/pinctrl-s900.c b/drivers/pinctrl/actions/pinctrl-s900.c
index 5503c7945764..ea67b14ef93b 100644
--- a/drivers/pinctrl/actions/pinctrl-s900.c
+++ b/drivers/pinctrl/actions/pinctrl-s900.c
@@ -1821,22 +1821,27 @@ static struct owl_padinfo s900_padinfo[NUM_PADS] = {
[SGPIO3] = PAD_INFO_PULLCTL_ST(SGPIO3)
};
-#define OWL_GPIO_PORT(port, base, count, _outen, _inen, _dat) \
- [OWL_GPIO_PORT_##port] = { \
- .offset = base, \
- .pins = count, \
- .outen = _outen, \
- .inen = _inen, \
- .dat = _dat, \
+#define OWL_GPIO_PORT(port, base, count, _outen, _inen, _dat, \
+ _intc_ctl, _intc_pd, _intc_msk, _intc_type) \
+ [OWL_GPIO_PORT_##port] = { \
+ .offset = base, \
+ .pins = count, \
+ .outen = _outen, \
+ .inen = _inen, \
+ .dat = _dat, \
+ .intc_ctl = _intc_ctl, \
+ .intc_pd = _intc_pd, \
+ .intc_msk = _intc_msk, \
+ .intc_type = _intc_type, \
}
static const struct owl_gpio_port s900_gpio_ports[] = {
- OWL_GPIO_PORT(A, 0x0000, 32, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(B, 0x000C, 32, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(C, 0x0018, 12, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(D, 0x0024, 30, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(E, 0x0030, 32, 0x0, 0x4, 0x8),
- OWL_GPIO_PORT(F, 0x00F0, 8, 0x0, 0x4, 0x8)
+ OWL_GPIO_PORT(A, 0x0000, 32, 0x0, 0x4, 0x8, 0x204, 0x208, 0x20C, 0x240),
+ OWL_GPIO_PORT(B, 0x000C, 32, 0x0, 0x4, 0x8, 0x534, 0x204, 0x208, 0x23C),
+ OWL_GPIO_PORT(C, 0x0018, 12, 0x0, 0x4, 0x8, 0x52C, 0x200, 0x204, 0x238),
+ OWL_GPIO_PORT(D, 0x0024, 30, 0x0, 0x4, 0x8, 0x524, 0x1FC, 0x200, 0x234),
+ OWL_GPIO_PORT(E, 0x0030, 32, 0x0, 0x4, 0x8, 0x51C, 0x1F8, 0x1FC, 0x230),
+ OWL_GPIO_PORT(F, 0x00F0, 8, 0x0, 0x4, 0x8, 0x460, 0x140, 0x144, 0x178)
};
static struct owl_pinctrl_soc_data s900_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 7f13ce8450a3..aefe3c33dffd 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -95,7 +95,7 @@ static inline void aspeed_sig_desc_print_val(
*
* @desc: The signal descriptor of interest
* @enabled: True to query the enabled state, false to query disabled state
- * @regmap: The IP block's regmap instance
+ * @map: The IP block's regmap instance
*
* Return: 1 if the descriptor's bitfield is configured to the state
* selected by @enabled, 0 if not, and less than zero if an unrecoverable
@@ -594,7 +594,7 @@ static inline const struct aspeed_pin_config *find_pinconf_config(
/**
* @param: pinconf configuration parameter
* @arg: The supported argument for @param, or -1 if any value is supported
- * @value: The register value to write to configure @arg for @param
+ * @val: The register value to write to configure @arg for @param
*
* The map is to be used in conjunction with the configuration array supplied
* by the driver implementation.
diff --git a/drivers/pinctrl/berlin/Kconfig b/drivers/pinctrl/berlin/Kconfig
index 8fe6ad7795dc..0dd60278e973 100644
--- a/drivers/pinctrl/berlin/Kconfig
+++ b/drivers/pinctrl/berlin/Kconfig
@@ -5,6 +5,11 @@ config PINCTRL_BERLIN
select PINMUX
select REGMAP_MMIO
+config PINCTRL_AS370
+ bool "Synaptics as370 pin controller driver"
+ depends on OF
+ select PINCTRL_BERLIN
+
config PINCTRL_BERLIN_BG2
def_bool MACH_BERLIN_BG2
depends on OF
diff --git a/drivers/pinctrl/berlin/Makefile b/drivers/pinctrl/berlin/Makefile
index 6f641ce2c830..00c53ca3676d 100644
--- a/drivers/pinctrl/berlin/Makefile
+++ b/drivers/pinctrl/berlin/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_PINCTRL_BERLIN_BG2) += berlin-bg2.o
obj-$(CONFIG_PINCTRL_BERLIN_BG2CD) += berlin-bg2cd.o
obj-$(CONFIG_PINCTRL_BERLIN_BG2Q) += berlin-bg2q.o
obj-$(CONFIG_PINCTRL_BERLIN_BG4CT) += berlin-bg4ct.o
+obj-$(CONFIG_PINCTRL_AS370) += pinctrl-as370.o
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index d6d183e9db17..b5903fffb3d0 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -216,10 +216,8 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
}
/* we will reallocate later */
- pctrl->functions = devm_kcalloc(&pdev->dev,
- max_functions,
- sizeof(*pctrl->functions),
- GFP_KERNEL);
+ pctrl->functions = kcalloc(max_functions,
+ sizeof(*pctrl->functions), GFP_KERNEL);
if (!pctrl->functions)
return -ENOMEM;
@@ -257,8 +255,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
function++;
}
- if (!found)
+ if (!found) {
+ kfree(pctrl->functions);
return -EINVAL;
+ }
if (!function->groups) {
function->groups =
@@ -267,8 +267,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
sizeof(char *),
GFP_KERNEL);
- if (!function->groups)
+ if (!function->groups) {
+ kfree(pctrl->functions);
return -ENOMEM;
+ }
}
groups = function->groups;
diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c
new file mode 100644
index 000000000000..d2bb811fc5fa
--- /dev/null
+++ b/drivers/pinctrl/berlin/pinctrl-as370.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synaptics AS370 pinctrl driver
+ *
+ * Copyright (C) 2018 Synaptics Incorporated
+ *
+ * Author: Jisheng Zhang <jszhang@kernel.org>
+ */
+
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "berlin.h"
+
+static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
+ BERLIN_PINCTRL_GROUP("I2S1_BCLKIO", 0x0, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO0 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* BCLKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG0 */
+ BERLIN_PINCTRL_GROUP("I2S1_LRCKIO", 0x0, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO1 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* LRCKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG1 */
+ BERLIN_PINCTRL_GROUP("I2S1_DO0", 0x0, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* 1P8V RSTB*/
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO0 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio"), /* GPIO2 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG2 */
+ BERLIN_PINCTRL_GROUP("I2S1_DO1", 0x0, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* 3P3V RSTB */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO1 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio"), /* GPIO3 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG3 */
+ BERLIN_PINCTRL_GROUP("I2S1_DO2", 0x0, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* CORE RSTB */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO2 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm4"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio"), /* GPIO4 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG4 */
+ BERLIN_PINCTRL_GROUP("I2S1_DO3", 0x0, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm5"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */
+ BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO6 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* MCLK */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG6 */
+ BERLIN_PINCTRL_GROUP("I2S2_BCLKIO", 0x0, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO7 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* BCLKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG7 */
+ BERLIN_PINCTRL_GROUP("I2S2_LRCKIO", 0x0, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO8 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* LRCKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG8 */
+ BERLIN_PINCTRL_GROUP("I2S2_DI0", 0x0, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO9 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI0 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm2"),
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG9 */
+ BERLIN_PINCTRL_GROUP("I2S2_DI1", 0x4, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO10 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI1 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm3"),
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG10 */
+ BERLIN_PINCTRL_GROUP("I2S2_DI2", 0x4, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO11 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI2 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdific"), /* SPDIFIC */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG11 */
+ BERLIN_PINCTRL_GROUP("I2S2_DI3", 0x4, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO12 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI3 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdifia"), /* SPDIFIA */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG12 */
+ BERLIN_PINCTRL_GROUP("PDM_CLKO", 0x4, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO13 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* CLKO */
+ BERLIN_PINCTRL_FUNCTION(0x2, "i2s2"), /* MCLK */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG13 */
+ BERLIN_PINCTRL_GROUP("PDM_DI0", 0x4, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO14 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI0 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG14 */
+ BERLIN_PINCTRL_GROUP("PDM_DI1", 0x4, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO15 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI1 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG15 */
+ BERLIN_PINCTRL_GROUP("PDM_DI2", 0x4, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO16 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI2 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm4"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdifid"), /* SPDIFID */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG16 */
+ BERLIN_PINCTRL_GROUP("PDM_DI3", 0x4, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO17 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI3 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm5"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdifi"), /* SPDIFI */
+ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG17 */
+ BERLIN_PINCTRL_GROUP("NAND_IO0", 0x4, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO0 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* DATA0 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pcie0")), /* MDIO */
+ BERLIN_PINCTRL_GROUP("NAND_IO1", 0x4, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO1 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* DATA1 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pcie0")), /* MDC */
+ BERLIN_PINCTRL_GROUP("NAND_IO2", 0x8, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO2 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* DATA2 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pcie1")), /* MDIO */
+ BERLIN_PINCTRL_GROUP("NAND_IO3", 0x8, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO3 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* DATA3 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pcie1")), /* MDC */
+ BERLIN_PINCTRL_GROUP("NAND_IO4", 0x8, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO4 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA4 */
+ BERLIN_PINCTRL_GROUP("NAND_IO5", 0x8, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO5 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA5 */
+ BERLIN_PINCTRL_GROUP("NAND_IO6", 0x8, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO6 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA6 */
+ BERLIN_PINCTRL_GROUP("NAND_IO7", 0x8, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* IO7 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA7 */
+ BERLIN_PINCTRL_GROUP("NAND_ALE", 0x8, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* ALE */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO18 */
+ BERLIN_PINCTRL_GROUP("NAND_CLE", 0x8, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CLE */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO19 */
+ BERLIN_PINCTRL_GROUP("NAND_WEn", 0x8, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WEn */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO20 */
+ BERLIN_PINCTRL_GROUP("NAND_REn", 0x8, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* REn */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO21 */
+ BERLIN_PINCTRL_GROUP("NAND_WPn", 0xc, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WPn */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* CLK */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO22 */
+ BERLIN_PINCTRL_GROUP("NAND_CEn", 0xc, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CEn */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* RSTn */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO23 */
+ BERLIN_PINCTRL_GROUP("NAND_RDY", 0xc, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* RDY */
+ BERLIN_PINCTRL_FUNCTION(0x1, "emmc"), /* CMD */
+ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO24 */
+ BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* GPIO25 */
+ BERLIN_PINCTRL_GROUP("SPI1_SS1n", 0xc, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS1n */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio"), /* GPIO26 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm2")),
+ BERLIN_PINCTRL_GROUP("SPI1_SS2n", 0xc, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* RXD */
+ BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS2n */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio"), /* GPIO27 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm3")),
+ BERLIN_PINCTRL_GROUP("SPI1_SS3n", 0xc, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* TXD */
+ BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS3n */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO28 */
+ BERLIN_PINCTRL_GROUP("SPI1_SCLK", 0xc, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SCLK */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO29 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm4")),
+ BERLIN_PINCTRL_GROUP("SPI1_SDO", 0xc, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDO */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO30 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm5")),
+ BERLIN_PINCTRL_GROUP("SPI1_SDI", 0xc, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDI */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* GPIO31 */
+ BERLIN_PINCTRL_GROUP("USB0_DRV_VBUS", 0x10, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "usb0"), /* VBUS */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO32 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "refclko")), /* 25M */
+ BERLIN_PINCTRL_GROUP("TW1_SCL", 0x10, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO33 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "tw1")), /* SCL */
+ BERLIN_PINCTRL_GROUP("TW1_SDA", 0x10, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO34 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "tw1")), /* SDA */
+ BERLIN_PINCTRL_GROUP("TW0_SCL", 0x10, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO35 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "tw0")), /* SCL */
+ BERLIN_PINCTRL_GROUP("TW0_SDA", 0x10, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO36 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "tw0")), /* SDA */
+ BERLIN_PINCTRL_GROUP("TMS", 0x10, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TMS */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")),
+ BERLIN_PINCTRL_GROUP("TDI", 0x10, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDI */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO38 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pwm1")),
+ BERLIN_PINCTRL_GROUP("TDO", 0x10, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDO */
+ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO39 */
+ BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")),
+ BERLIN_PINCTRL_GROUP("PWM6", 0x10, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO40 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm6")),
+ BERLIN_PINCTRL_GROUP("PWM7", 0x10, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO41 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm7")),
+ BERLIN_PINCTRL_GROUP("PWM0", 0x14, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* VDDCPUSOC RSTB */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm0"),
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO42 */
+ BERLIN_PINCTRL_GROUP("PWM1", 0x14, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO43 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm1")),
+ BERLIN_PINCTRL_GROUP("PWM2", 0x14, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO44 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm2")),
+ BERLIN_PINCTRL_GROUP("PWM3", 0x14, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO45 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm3")),
+ BERLIN_PINCTRL_GROUP("PWM4", 0x14, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO46 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm4")),
+ BERLIN_PINCTRL_GROUP("PWM5", 0x14, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO47 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "pwm5")),
+ BERLIN_PINCTRL_GROUP("URT1_RTSn", 0x14, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO48 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* RTSn */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "tw1a"), /* SCL */
+ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG0 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG18 */
+ BERLIN_PINCTRL_GROUP("URT1_CTSn", 0x14, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO49 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* CTSn */
+ BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"),
+ BERLIN_PINCTRL_FUNCTION(0x3, "tw1a"), /* SDA */
+ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG1 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG19 */
+ BERLIN_PINCTRL_GROUP("URT1_RXD", 0x14, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO50 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* RXD */
+ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG2 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG20 */
+ BERLIN_PINCTRL_GROUP("URT1_TXD", 0x14, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO51 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* TXD */
+ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG3 */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG21 */
+ BERLIN_PINCTRL_GROUP("I2S3_DI", 0x18, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO52 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s3"), /* DI */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG22 */
+ BERLIN_PINCTRL_GROUP("I2S3_DO", 0x18, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO53 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s3"), /* DO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG23 */
+ BERLIN_PINCTRL_GROUP("I2S3_BCLKIO", 0x18, 0x3, 0x06,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO54 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s3"), /* BCLKIO */
+ BERLIN_PINCTRL_FUNCTION(0x5, "clk")), /* DBG */
+ BERLIN_PINCTRL_GROUP("I2S3_LRCKIO", 0x18, 0x3, 0x09,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO55 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "i2s3")), /* LRCKIO */
+ BERLIN_PINCTRL_GROUP("SD0_DAT0", 0x18, 0x3, 0x0c,
+ BERLIN_PINCTRL_FUNCTION(0x0, "cpupll"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT0 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO56 */
+ BERLIN_PINCTRL_GROUP("SD0_DAT1", 0x18, 0x3, 0x0f,
+ BERLIN_PINCTRL_FUNCTION(0x0, "syspll"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT1 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO57 */
+ BERLIN_PINCTRL_GROUP("SD0_CLK", 0x18, 0x3, 0x12,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO58 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0")), /* CLK */
+ BERLIN_PINCTRL_GROUP("SD0_DAT2", 0x18, 0x3, 0x15,
+ BERLIN_PINCTRL_FUNCTION(0x0, "mempll"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT2 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO59 */
+ BERLIN_PINCTRL_GROUP("SD0_DAT3", 0x18, 0x3, 0x18,
+ BERLIN_PINCTRL_FUNCTION(0x0, "apll0"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* DAT3 */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO60 */
+ BERLIN_PINCTRL_GROUP("SD0_CMD", 0x18, 0x3, 0x1b,
+ BERLIN_PINCTRL_FUNCTION(0x0, "apll1"), /* OUT */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CMD */
+ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO61 */
+ BERLIN_PINCTRL_GROUP("SD0_CDn", 0x1c, 0x3, 0x00,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO62 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CDn */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm2")),
+ BERLIN_PINCTRL_GROUP("SD0_WP", 0x1c, 0x3, 0x03,
+ BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO63 */
+ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* WP */
+ BERLIN_PINCTRL_FUNCTION(0x3, "pwm3")),
+};
+
+static const struct berlin_pinctrl_desc as370_soc_pinctrl_data = {
+ .groups = as370_soc_pinctrl_groups,
+ .ngroups = ARRAY_SIZE(as370_soc_pinctrl_groups),
+};
+
+static const struct of_device_id as370_pinctrl_match[] = {
+ {
+ .compatible = "syna,as370-soc-pinctrl",
+ .data = &as370_soc_pinctrl_data,
+ },
+ {}
+};
+
+static int as370_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match =
+ of_match_device(as370_pinctrl_match, &pdev->dev);
+ struct regmap_config *rmconfig;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+
+ rmconfig = devm_kzalloc(&pdev->dev, sizeof(*rmconfig), GFP_KERNEL);
+ if (!rmconfig)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ rmconfig->reg_bits = 32,
+ rmconfig->val_bits = 32,
+ rmconfig->reg_stride = 4,
+ rmconfig->max_register = resource_size(res);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, rmconfig);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return berlin_pinctrl_probe_regmap(pdev, match->data, regmap);
+}
+
+static struct platform_driver as370_pinctrl_driver = {
+ .probe = as370_pinctrl_probe,
+ .driver = {
+ .name = "as370-pinctrl",
+ .of_match_table = as370_pinctrl_match,
+ },
+};
+builtin_platform_driver(as370_pinctrl_driver);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e5a303002021..a3dd777e3ce8 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/list.h>
-#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/pinctrl/consumer.h>
@@ -617,6 +616,26 @@ struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev,
}
EXPORT_SYMBOL_GPL(pinctrl_generic_get_group);
+static int pinctrl_generic_group_name_to_selector(struct pinctrl_dev *pctldev,
+ const char *function)
+{
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+ int ngroups = ops->get_groups_count(pctldev);
+ int selector = 0;
+
+ /* See if this pctldev has this group */
+ while (selector < ngroups) {
+ const char *gname = ops->get_group_name(pctldev, selector);
+
+ if (!strcmp(function, gname))
+ return selector;
+
+ selector++;
+ }
+
+ return -EINVAL;
+}
+
/**
* pinctrl_generic_add_group() - adds a new pin group
* @pctldev: pin controller device
@@ -631,6 +650,16 @@ int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
int *pins, int num_pins, void *data)
{
struct group_desc *group;
+ int selector;
+
+ if (!name)
+ return -EINVAL;
+
+ selector = pinctrl_generic_group_name_to_selector(pctldev, name);
+ if (selector >= 0)
+ return selector;
+
+ selector = pctldev->num_groups;
group = devm_kzalloc(pctldev->dev, sizeof(*group), GFP_KERNEL);
if (!group)
@@ -641,12 +670,11 @@ int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
group->num_pins = num_pins;
group->data = data;
- radix_tree_insert(&pctldev->pin_group_tree, pctldev->num_groups,
- group);
+ radix_tree_insert(&pctldev->pin_group_tree, selector, group);
pctldev->num_groups++;
- return 0;
+ return selector;
}
EXPORT_SYMBOL_GPL(pinctrl_generic_add_group);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 8cf2eba17c8c..4a0526e567df 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -218,12 +218,6 @@ int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
unsigned int group_selector);
-static inline int
-pinctrl_generic_remove_last_group(struct pinctrl_dev *pctldev)
-{
- return pinctrl_generic_remove_group(pctldev, pctldev->num_groups - 1);
-}
-
#endif /* CONFIG_GENERIC_PINCTRL_GROUPS */
struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 0d8ba1ef5329..dccf64c55498 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -117,6 +117,13 @@ config PINCTRL_IMX7ULP
help
Say Y here to enable the imx7ulp pinctrl driver
+config PINCTRL_IMX8MQ
+ bool "IMX8MQ pinctrl driver"
+ depends on SOC_IMX8MQ
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx8mq pinctrl driver
+
config PINCTRL_VF610
bool "Freescale Vybrid VF610 pinctrl driver"
depends on SOC_VF610
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 368be8cfc9b1..73175b3e7c9c 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_PINCTRL_IMX6SX) += pinctrl-imx6sx.o
obj-$(CONFIG_PINCTRL_IMX6UL) += pinctrl-imx6ul.o
obj-$(CONFIG_PINCTRL_IMX7D) += pinctrl-imx7d.o
obj-$(CONFIG_PINCTRL_IMX7ULP) += pinctrl-imx7ulp.o
+obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o
obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 1c6bb15579e1..b04edc22dad7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -383,7 +383,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
const char *name;
int i, ret;
- if (group > pctldev->num_groups)
+ if (group >= pctldev->num_groups)
return;
seq_puts(s, "\n");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index c3bdd90b1422..deb7870b3d1a 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -429,7 +429,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
const char *name;
int i, ret;
- if (group > info->ngroups)
+ if (group >= info->ngroups)
return;
seq_puts(s, "\n");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mq.c b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
new file mode 100644
index 000000000000..8d39af541d5f
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx8mq_pads {
+ MX8MQ_PAD_RESERVE0 = 0,
+ MX8MQ_PAD_RESERVE1 = 1,
+ MX8MQ_PAD_RESERVE2 = 2,
+ MX8MQ_PAD_RESERVE3 = 3,
+ MX8MQ_PAD_RESERVE4 = 4,
+ MX8MQ_IOMUXC_PMIC_STBY_REQ_CCMSRCGPCMIX = 5,
+ MX8MQ_IOMUXC_PMIC_ON_REQ_SNVSMIX = 6,
+ MX8MQ_IOMUXC_ONOFF_SNVSMIX = 7,
+ MX8MQ_IOMUXC_POR_B_SNVSMIX = 8,
+ MX8MQ_IOMUXC_RTC_RESET_B_SNVSMIX = 9,
+ MX8MQ_IOMUXC_GPIO1_IO00 = 10,
+ MX8MQ_IOMUXC_GPIO1_IO01 = 11,
+ MX8MQ_IOMUXC_GPIO1_IO02 = 12,
+ MX8MQ_IOMUXC_GPIO1_IO03 = 13,
+ MX8MQ_IOMUXC_GPIO1_IO04 = 14,
+ MX8MQ_IOMUXC_GPIO1_IO05 = 15,
+ MX8MQ_IOMUXC_GPIO1_IO06 = 16,
+ MX8MQ_IOMUXC_GPIO1_IO07 = 17,
+ MX8MQ_IOMUXC_GPIO1_IO08 = 18,
+ MX8MQ_IOMUXC_GPIO1_IO09 = 19,
+ MX8MQ_IOMUXC_GPIO1_IO10 = 20,
+ MX8MQ_IOMUXC_GPIO1_IO11 = 21,
+ MX8MQ_IOMUXC_GPIO1_IO12 = 22,
+ MX8MQ_IOMUXC_GPIO1_IO13 = 23,
+ MX8MQ_IOMUXC_GPIO1_IO14 = 24,
+ MX8MQ_IOMUXC_GPIO1_IO15 = 25,
+ MX8MQ_IOMUXC_ENET_MDC = 26,
+ MX8MQ_IOMUXC_ENET_MDIO = 27,
+ MX8MQ_IOMUXC_ENET_TD3 = 28,
+ MX8MQ_IOMUXC_ENET_TD2 = 29,
+ MX8MQ_IOMUXC_ENET_TD1 = 30,
+ MX8MQ_IOMUXC_ENET_TD0 = 31,
+ MX8MQ_IOMUXC_ENET_TX_CTL = 32,
+ MX8MQ_IOMUXC_ENET_TXC = 33,
+ MX8MQ_IOMUXC_ENET_RX_CTL = 34,
+ MX8MQ_IOMUXC_ENET_RXC = 35,
+ MX8MQ_IOMUXC_ENET_RD0 = 36,
+ MX8MQ_IOMUXC_ENET_RD1 = 37,
+ MX8MQ_IOMUXC_ENET_RD2 = 38,
+ MX8MQ_IOMUXC_ENET_RD3 = 39,
+ MX8MQ_IOMUXC_SD1_CLK = 40,
+ MX8MQ_IOMUXC_SD1_CMD = 41,
+ MX8MQ_IOMUXC_SD1_DATA0 = 42,
+ MX8MQ_IOMUXC_SD1_DATA1 = 43,
+ MX8MQ_IOMUXC_SD1_DATA2 = 44,
+ MX8MQ_IOMUXC_SD1_DATA3 = 45,
+ MX8MQ_IOMUXC_SD1_DATA4 = 46,
+ MX8MQ_IOMUXC_SD1_DATA5 = 47,
+ MX8MQ_IOMUXC_SD1_DATA6 = 48,
+ MX8MQ_IOMUXC_SD1_DATA7 = 49,
+ MX8MQ_IOMUXC_SD1_RESET_B = 50,
+ MX8MQ_IOMUXC_SD1_STROBE = 51,
+ MX8MQ_IOMUXC_SD2_CD_B = 52,
+ MX8MQ_IOMUXC_SD2_CLK = 53,
+ MX8MQ_IOMUXC_SD2_CMD = 54,
+ MX8MQ_IOMUXC_SD2_DATA0 = 55,
+ MX8MQ_IOMUXC_SD2_DATA1 = 56,
+ MX8MQ_IOMUXC_SD2_DATA2 = 57,
+ MX8MQ_IOMUXC_SD2_DATA3 = 58,
+ MX8MQ_IOMUXC_SD2_RESET_B = 59,
+ MX8MQ_IOMUXC_SD2_WP = 60,
+ MX8MQ_IOMUXC_NAND_ALE = 61,
+ MX8MQ_IOMUXC_NAND_CE0_B = 62,
+ MX8MQ_IOMUXC_NAND_CE1_B = 63,
+ MX8MQ_IOMUXC_NAND_CE2_B = 64,
+ MX8MQ_IOMUXC_NAND_CE3_B = 65,
+ MX8MQ_IOMUXC_NAND_CLE = 66,
+ MX8MQ_IOMUXC_NAND_DATA00 = 67,
+ MX8MQ_IOMUXC_NAND_DATA01 = 68,
+ MX8MQ_IOMUXC_NAND_DATA02 = 69,
+ MX8MQ_IOMUXC_NAND_DATA03 = 70,
+ MX8MQ_IOMUXC_NAND_DATA04 = 71,
+ MX8MQ_IOMUXC_NAND_DATA05 = 72,
+ MX8MQ_IOMUXC_NAND_DATA06 = 73,
+ MX8MQ_IOMUXC_NAND_DATA07 = 74,
+ MX8MQ_IOMUXC_NAND_DQS = 75,
+ MX8MQ_IOMUXC_NAND_RE_B = 76,
+ MX8MQ_IOMUXC_NAND_READY_B = 77,
+ MX8MQ_IOMUXC_NAND_WE_B = 78,
+ MX8MQ_IOMUXC_NAND_WP_B = 79,
+ MX8MQ_IOMUXC_SAI5_RXFS = 80,
+ MX8MQ_IOMUXC_SAI5_RXC = 81,
+ MX8MQ_IOMUXC_SAI5_RXD0 = 82,
+ MX8MQ_IOMUXC_SAI5_RXD1 = 83,
+ MX8MQ_IOMUXC_SAI5_RXD2 = 84,
+ MX8MQ_IOMUXC_SAI5_RXD3 = 85,
+ MX8MQ_IOMUXC_SAI5_MCLK = 86,
+ MX8MQ_IOMUXC_SAI1_RXFS = 87,
+ MX8MQ_IOMUXC_SAI1_RXC = 88,
+ MX8MQ_IOMUXC_SAI1_RXD0 = 89,
+ MX8MQ_IOMUXC_SAI1_RXD1 = 90,
+ MX8MQ_IOMUXC_SAI1_RXD2 = 91,
+ MX8MQ_IOMUXC_SAI1_RXD3 = 92,
+ MX8MQ_IOMUXC_SAI1_RXD4 = 93,
+ MX8MQ_IOMUXC_SAI1_RXD5 = 94,
+ MX8MQ_IOMUXC_SAI1_RXD6 = 95,
+ MX8MQ_IOMUXC_SAI1_RXD7 = 96,
+ MX8MQ_IOMUXC_SAI1_TXFS = 97,
+ MX8MQ_IOMUXC_SAI1_TXC = 98,
+ MX8MQ_IOMUXC_SAI1_TXD0 = 99,
+ MX8MQ_IOMUXC_SAI1_TXD1 = 100,
+ MX8MQ_IOMUXC_SAI1_TXD2 = 101,
+ MX8MQ_IOMUXC_SAI1_TXD3 = 102,
+ MX8MQ_IOMUXC_SAI1_TXD4 = 103,
+ MX8MQ_IOMUXC_SAI1_TXD5 = 104,
+ MX8MQ_IOMUXC_SAI1_TXD6 = 105,
+ MX8MQ_IOMUXC_SAI1_TXD7 = 106,
+ MX8MQ_IOMUXC_SAI1_MCLK = 107,
+ MX8MQ_IOMUXC_SAI2_RXFS = 108,
+ MX8MQ_IOMUXC_SAI2_RXC = 109,
+ MX8MQ_IOMUXC_SAI2_RXD0 = 110,
+ MX8MQ_IOMUXC_SAI2_TXFS = 111,
+ MX8MQ_IOMUXC_SAI2_TXC = 112,
+ MX8MQ_IOMUXC_SAI2_TXD0 = 113,
+ MX8MQ_IOMUXC_SAI2_MCLK = 114,
+ MX8MQ_IOMUXC_SAI3_RXFS = 115,
+ MX8MQ_IOMUXC_SAI3_RXC = 116,
+ MX8MQ_IOMUXC_SAI3_RXD = 117,
+ MX8MQ_IOMUXC_SAI3_TXFS = 118,
+ MX8MQ_IOMUXC_SAI3_TXC = 119,
+ MX8MQ_IOMUXC_SAI3_TXD = 120,
+ MX8MQ_IOMUXC_SAI3_MCLK = 121,
+ MX8MQ_IOMUXC_SPDIF_TX = 122,
+ MX8MQ_IOMUXC_SPDIF_RX = 123,
+ MX8MQ_IOMUXC_SPDIF_EXT_CLK = 124,
+ MX8MQ_IOMUXC_ECSPI1_SCLK = 125,
+ MX8MQ_IOMUXC_ECSPI1_MOSI = 126,
+ MX8MQ_IOMUXC_ECSPI1_MISO = 127,
+ MX8MQ_IOMUXC_ECSPI1_SS0 = 128,
+ MX8MQ_IOMUXC_ECSPI2_SCLK = 129,
+ MX8MQ_IOMUXC_ECSPI2_MOSI = 130,
+ MX8MQ_IOMUXC_ECSPI2_MISO = 131,
+ MX8MQ_IOMUXC_ECSPI2_SS0 = 132,
+ MX8MQ_IOMUXC_I2C1_SCL = 133,
+ MX8MQ_IOMUXC_I2C1_SDA = 134,
+ MX8MQ_IOMUXC_I2C2_SCL = 135,
+ MX8MQ_IOMUXC_I2C2_SDA = 136,
+ MX8MQ_IOMUXC_I2C3_SCL = 137,
+ MX8MQ_IOMUXC_I2C3_SDA = 138,
+ MX8MQ_IOMUXC_I2C4_SCL = 139,
+ MX8MQ_IOMUXC_I2C4_SDA = 140,
+ MX8MQ_IOMUXC_UART1_RXD = 141,
+ MX8MQ_IOMUXC_UART1_TXD = 142,
+ MX8MQ_IOMUXC_UART2_RXD = 143,
+ MX8MQ_IOMUXC_UART2_TXD = 144,
+ MX8MQ_IOMUXC_UART3_RXD = 145,
+ MX8MQ_IOMUXC_UART3_TXD = 146,
+ MX8MQ_IOMUXC_UART4_RXD = 147,
+ MX8MQ_IOMUXC_UART4_TXD = 148,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx8mq_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(MX8MQ_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_PMIC_STBY_REQ_CCMSRCGPCMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_PMIC_ON_REQ_SNVSMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ONOFF_SNVSMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_POR_B_SNVSMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_RTC_RESET_B_SNVSMIX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO00),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO01),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO02),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO03),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO04),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO05),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO06),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO07),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO08),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO09),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO10),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO11),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO12),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO13),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO14),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_GPIO1_IO15),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_MDC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_MDIO),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TX_CTL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_TXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RX_CTL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ENET_RD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_CLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_CMD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA4),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA5),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA6),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_DATA7),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_RESET_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD1_STROBE),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_CD_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_CLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_CMD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_DATA0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_DATA1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_DATA2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_DATA3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_RESET_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SD2_WP),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_ALE),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CE0_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CE1_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CE2_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CE3_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_CLE),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA00),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA01),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA02),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA03),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA04),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA05),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA06),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DATA07),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_DQS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_RE_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_READY_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_WE_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_NAND_WP_B),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_RXD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI5_MCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD4),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD5),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD6),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_RXD7),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD1),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD2),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD3),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD4),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD5),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD6),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_TXD7),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI1_MCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_RXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_RXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_TXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_TXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_TXD0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI2_MCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_RXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_RXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_TXFS),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_TXC),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_TXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SAI3_MCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SPDIF_TX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SPDIF_RX),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_SPDIF_EXT_CLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI2_SCLK),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C1_SCL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C1_SDA),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C2_SCL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C2_SDA),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C3_SCL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C3_SDA),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C4_SCL),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_I2C4_SDA),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART1_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART1_TXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART2_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART2_TXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART3_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART3_TXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART4_RXD),
+ IMX_PINCTRL_PIN(MX8MQ_IOMUXC_UART4_TXD),
+};
+
+static const struct imx_pinctrl_soc_info imx8mq_pinctrl_info = {
+ .pins = imx8mq_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8mq_pinctrl_pads),
+ .gpr_compatible = "fsl,imx8mq-iomuxc-gpr",
+};
+
+static const struct of_device_id imx8mq_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8mq-iomuxc", .data = &imx8mq_pinctrl_info, },
+ { /* sentinel */ }
+};
+
+static int imx8mq_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx8mq_pinctrl_info);
+}
+
+static struct platform_driver imx8mq_pinctrl_driver = {
+ .driver = {
+ .name = "imx8mq-pinctrl",
+ .of_match_table = of_match_ptr(imx8mq_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx8mq_pinctrl_probe,
+};
+
+static int __init imx8mq_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8mq_pinctrl_driver);
+}
+arch_initcall(imx8mq_pinctrl_init);
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 4aea1b8504f7..452a14f78707 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -1,6 +1,6 @@
-#
+# SPDX-License-Identifier: GPL-2.0
# Intel pin control drivers
-#
+
if (X86 || COMPILE_TEST)
config PINCTRL_BAYTRAIL
@@ -90,6 +90,14 @@ config PINCTRL_GEMINILAKE
This pinctrl driver provides an interface that allows configuring
of Intel Gemini Lake SoC pins and using them as GPIOs.
+config PINCTRL_ICELAKE
+ tristate "Intel Ice Lake PCH pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Ice Lake PCH pins and using them as GPIOs.
+
config PINCTRL_LEWISBURG
tristate "Intel Lewisburg pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index fadfe3ea2b04..cb491e655749 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o
obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o
obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o
obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
+obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 6b52ea1440a6..f38d596efa05 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Pinctrl GPIO driver for Intel Baytrail
- * Copyright (c) 2012-2013, Intel Corporation.
*
+ * Copyright (c) 2012-2013, Intel Corporation
* Author: Mathias Nyman <mathias.nyman@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/kernel.h>
@@ -1542,11 +1534,13 @@ static void byt_irq_unmask(struct irq_data *d)
switch (irqd_get_trigger_type(d)) {
case IRQ_TYPE_LEVEL_HIGH:
value |= BYT_TRIG_LVL;
+ /* fall through */
case IRQ_TYPE_EDGE_RISING:
value |= BYT_TRIG_POS;
break;
case IRQ_TYPE_LEVEL_LOW:
value |= BYT_TRIG_LVL;
+ /* fall through */
case IRQ_TYPE_EDGE_FALLING:
value |= BYT_TRIG_NEG;
break;
@@ -1691,7 +1685,8 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
value = readl(reg);
if (value)
dev_err(&vg->pdev->dev,
- "GPIO interrupt error, pins misconfigured\n");
+ "GPIO interrupt error, pins misconfigured. INT_STAT%u: 0x%08x\n",
+ base / 32, value);
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index e6e6fd112585..8b1c7b59ad3e 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Broxton SoC pinctrl/GPIO driver
*
* Copyright (C) 2015, 2016 Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index 6243e7d95e7e..fb1afe55bf53 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Cannon Lake PCH pinctrl/GPIO driver
*
* Copyright (C) 2017, Intel Corporation
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
@@ -447,12 +444,8 @@ static const struct intel_function cnlh_functions[] = {
static const struct intel_community cnlh_communities[] = {
CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
- /*
- * ACPI MMIO resources are returned in reverse order for
- * communities 3 and 4.
- */
- CNL_COMMUNITY(3, 155, 248, cnlh_community3_gpps),
- CNL_COMMUNITY(2, 249, 298, cnlh_community4_gpps),
+ CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
+ CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
};
static const struct intel_pinctrl_soc_data cnlh_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-cedarfork.c b/drivers/pinctrl/intel/pinctrl-cedarfork.c
index 59216b0533d9..c788e37e338e 100644
--- a/drivers/pinctrl/intel/pinctrl-cedarfork.c
+++ b/drivers/pinctrl/intel/pinctrl-cedarfork.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Cedar Fork PCH pinctrl/GPIO driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
@@ -240,51 +237,51 @@ static const struct pinctrl_pin_desc cdf_pins[] = {
PINCTRL_PIN(179, "GBE_GPIO10"),
PINCTRL_PIN(180, "GBE_GPIO11"),
PINCTRL_PIN(181, "GBE_GPIO12"),
- PINCTRL_PIN(182, "SATA0_LED_N"),
- PINCTRL_PIN(183, "SATA1_LED_N"),
- PINCTRL_PIN(184, "SATA_PDETECT0"),
- PINCTRL_PIN(185, "SATA_PDETECT1"),
- PINCTRL_PIN(186, "SATA0_SDOUT"),
- PINCTRL_PIN(187, "SATA1_SDOUT"),
- PINCTRL_PIN(188, "SATA2_LED_N"),
- PINCTRL_PIN(189, "SATA_PDETECT2"),
- PINCTRL_PIN(190, "SATA2_SDOUT"),
+ PINCTRL_PIN(182, "PECI_SMB_DATA"),
+ PINCTRL_PIN(183, "SATA0_LED_N"),
+ PINCTRL_PIN(184, "SATA1_LED_N"),
+ PINCTRL_PIN(185, "SATA_PDETECT0"),
+ PINCTRL_PIN(186, "SATA_PDETECT1"),
+ PINCTRL_PIN(187, "SATA0_SDOUT"),
+ PINCTRL_PIN(188, "SATA1_SDOUT"),
+ PINCTRL_PIN(189, "SATA2_LED_N"),
+ PINCTRL_PIN(190, "SATA_PDETECT2"),
+ PINCTRL_PIN(191, "SATA2_SDOUT"),
/* EAST3 */
- PINCTRL_PIN(191, "ESPI_IO0"),
- PINCTRL_PIN(192, "ESPI_IO1"),
- PINCTRL_PIN(193, "ESPI_IO2"),
- PINCTRL_PIN(194, "ESPI_IO3"),
- PINCTRL_PIN(195, "ESPI_CLK"),
- PINCTRL_PIN(196, "ESPI_RST_N"),
- PINCTRL_PIN(197, "ESPI_CS0_N"),
- PINCTRL_PIN(198, "ESPI_ALRT0_N"),
- PINCTRL_PIN(199, "ESPI_CS1_N"),
- PINCTRL_PIN(200, "ESPI_ALRT1_N"),
- PINCTRL_PIN(201, "ESPI_CLK_LOOPBK"),
+ PINCTRL_PIN(192, "ESPI_IO0"),
+ PINCTRL_PIN(193, "ESPI_IO1"),
+ PINCTRL_PIN(194, "ESPI_IO2"),
+ PINCTRL_PIN(195, "ESPI_IO3"),
+ PINCTRL_PIN(196, "ESPI_CLK"),
+ PINCTRL_PIN(197, "ESPI_RST_N"),
+ PINCTRL_PIN(198, "ESPI_CS0_N"),
+ PINCTRL_PIN(199, "ESPI_ALRT0_N"),
+ PINCTRL_PIN(200, "ESPI_CS1_N"),
+ PINCTRL_PIN(201, "ESPI_ALRT1_N"),
+ PINCTRL_PIN(202, "ESPI_CLK_LOOPBK"),
/* EAST0 */
- PINCTRL_PIN(202, "SPI_CS0_N"),
- PINCTRL_PIN(203, "SPI_CS1_N"),
- PINCTRL_PIN(204, "SPI_MOSI_IO0"),
- PINCTRL_PIN(205, "SPI_MISO_IO1"),
- PINCTRL_PIN(206, "SPI_IO2"),
- PINCTRL_PIN(207, "SPI_IO3"),
- PINCTRL_PIN(208, "SPI_CLK"),
- PINCTRL_PIN(209, "SPI_CLK_LOOPBK"),
- PINCTRL_PIN(210, "SUSPWRDNACK"),
- PINCTRL_PIN(211, "PMU_SUSCLK"),
- PINCTRL_PIN(212, "ADR_COMPLETE"),
- PINCTRL_PIN(213, "ADR_TRIGGER_N"),
- PINCTRL_PIN(214, "PMU_SLP_S45_N"),
- PINCTRL_PIN(215, "PMU_SLP_S3_N"),
- PINCTRL_PIN(216, "PMU_WAKE_N"),
- PINCTRL_PIN(217, "PMU_PWRBTN_N"),
- PINCTRL_PIN(218, "PMU_RESETBUTTON_N"),
- PINCTRL_PIN(219, "PMU_PLTRST_N"),
- PINCTRL_PIN(220, "SUS_STAT_N"),
- PINCTRL_PIN(221, "PMU_I2C_CLK"),
- PINCTRL_PIN(222, "PMU_I2C_DATA"),
- PINCTRL_PIN(223, "PECI_SMB_CLK"),
- PINCTRL_PIN(224, "PECI_SMB_DATA"),
+ PINCTRL_PIN(203, "SPI_CS0_N"),
+ PINCTRL_PIN(204, "SPI_CS1_N"),
+ PINCTRL_PIN(205, "SPI_MOSI_IO0"),
+ PINCTRL_PIN(206, "SPI_MISO_IO1"),
+ PINCTRL_PIN(207, "SPI_IO2"),
+ PINCTRL_PIN(208, "SPI_IO3"),
+ PINCTRL_PIN(209, "SPI_CLK"),
+ PINCTRL_PIN(210, "SPI_CLK_LOOPBK"),
+ PINCTRL_PIN(211, "SUSPWRDNACK"),
+ PINCTRL_PIN(212, "PMU_SUSCLK"),
+ PINCTRL_PIN(213, "ADR_COMPLETE"),
+ PINCTRL_PIN(214, "ADR_TRIGGER_N"),
+ PINCTRL_PIN(215, "PMU_SLP_S45_N"),
+ PINCTRL_PIN(216, "PMU_SLP_S3_N"),
+ PINCTRL_PIN(217, "PMU_WAKE_N"),
+ PINCTRL_PIN(218, "PMU_PWRBTN_N"),
+ PINCTRL_PIN(219, "PMU_RESETBUTTON_N"),
+ PINCTRL_PIN(220, "PMU_PLTRST_N"),
+ PINCTRL_PIN(221, "SUS_STAT_N"),
+ PINCTRL_PIN(222, "PMU_I2C_CLK"),
+ PINCTRL_PIN(223, "PMU_I2C_DATA"),
+ PINCTRL_PIN(224, "PECI_SMB_CLK"),
PINCTRL_PIN(225, "PECI_SMB_ALRT_N"),
/* EMMC */
PINCTRL_PIN(226, "EMMC_CMD"),
@@ -315,9 +312,9 @@ static const struct intel_padgroup cdf_community0_gpps[] = {
};
static const struct intel_padgroup cdf_community1_gpps[] = {
- CDF_GPP(0, 168, 190), /* EAST2 */
- CDF_GPP(1, 191, 201), /* EAST3 */
- CDF_GPP(2, 202, 225), /* EAST0 */
+ CDF_GPP(0, 168, 191), /* EAST2 */
+ CDF_GPP(1, 192, 202), /* EAST3 */
+ CDF_GPP(2, 203, 225), /* EAST0 */
CDF_GPP(3, 226, 236), /* EMMC */
};
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 0f1019ae3993..6d31ad799987 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cherryview/Braswell pinctrl driver
*
@@ -7,10 +8,6 @@
* This driver is based on the original Cherryview GPIO driver by
* Ning Li <ning.li@intel.com>
* Alan Cox <alan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/dmi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c
index 6572550cfe78..f321ab0d76e5 100644
--- a/drivers/pinctrl/intel/pinctrl-denverton.c
+++ b/drivers/pinctrl/intel/pinctrl-denverton.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Denverton SoC pinctrl/GPIO driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-geminilake.c b/drivers/pinctrl/intel/pinctrl-geminilake.c
index a6b94c930007..5c4c96752fc1 100644
--- a/drivers/pinctrl/intel/pinctrl-geminilake.c
+++ b/drivers/pinctrl/intel/pinctrl-geminilake.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Gemini Lake SoC pinctrl/GPIO driver
*
* Copyright (C) 2017 Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-icelake.c b/drivers/pinctrl/intel/pinctrl-icelake.c
new file mode 100644
index 000000000000..630b966ce081
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-icelake.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Ice Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2018, Intel Corporation
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define ICL_PAD_OWN 0x020
+#define ICL_PADCFGLOCK 0x080
+#define ICL_HOSTSW_OWN 0x0b0
+#define ICL_GPI_IE 0x110
+
+#define ICL_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define ICL_NO_GPIO -1
+
+#define ICL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = ICL_PAD_OWN, \
+ .padcfglock_offset = ICL_PADCFGLOCK, \
+ .hostown_offset = ICL_HOSTSW_OWN, \
+ .ie_offset = ICL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Ice Lake-LP */
+static const struct pinctrl_pin_desc icllp_pins[] = {
+ /* GPP_G */
+ PINCTRL_PIN(0, "SD3_CMD"),
+ PINCTRL_PIN(1, "SD3_D0"),
+ PINCTRL_PIN(2, "SD3_D1"),
+ PINCTRL_PIN(3, "SD3_D2"),
+ PINCTRL_PIN(4, "SD3_D3"),
+ PINCTRL_PIN(5, "SD3_CDB"),
+ PINCTRL_PIN(6, "SD3_CLK"),
+ PINCTRL_PIN(7, "SD3_WP"),
+ /* GPP_B */
+ PINCTRL_PIN(8, "CORE_VID_0"),
+ PINCTRL_PIN(9, "CORE_VID_1"),
+ PINCTRL_PIN(10, "VRALERTB"),
+ PINCTRL_PIN(11, "CPU_GP_2"),
+ PINCTRL_PIN(12, "CPU_GP_3"),
+ PINCTRL_PIN(13, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(14, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(15, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(16, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(17, "I2C5_SDA"),
+ PINCTRL_PIN(18, "I2C5_SCL"),
+ PINCTRL_PIN(19, "PMCALERTB"),
+ PINCTRL_PIN(20, "SLP_S0B"),
+ PINCTRL_PIN(21, "PLTRSTB"),
+ PINCTRL_PIN(22, "SPKR"),
+ PINCTRL_PIN(23, "GSPI0_CS0B"),
+ PINCTRL_PIN(24, "GSPI0_CLK"),
+ PINCTRL_PIN(25, "GSPI0_MISO"),
+ PINCTRL_PIN(26, "GSPI0_MOSI"),
+ PINCTRL_PIN(27, "GSPI1_CS0B"),
+ PINCTRL_PIN(28, "GSPI1_CLK"),
+ PINCTRL_PIN(29, "GSPI1_MISO"),
+ PINCTRL_PIN(30, "GSPI1_MOSI"),
+ PINCTRL_PIN(31, "SML1ALERTB"),
+ PINCTRL_PIN(32, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(33, "GSPI1_CLK_LOOPBK"),
+ /* GPP_A */
+ PINCTRL_PIN(34, "ESPI_IO_0"),
+ PINCTRL_PIN(35, "ESPI_IO_1"),
+ PINCTRL_PIN(36, "ESPI_IO_2"),
+ PINCTRL_PIN(37, "ESPI_IO_3"),
+ PINCTRL_PIN(38, "ESPI_CSB"),
+ PINCTRL_PIN(39, "ESPI_CLK"),
+ PINCTRL_PIN(40, "ESPI_RESETB"),
+ PINCTRL_PIN(41, "I2S2_SCLK"),
+ PINCTRL_PIN(42, "I2S2_SFRM"),
+ PINCTRL_PIN(43, "I2S2_TXD"),
+ PINCTRL_PIN(44, "I2S2_RXD"),
+ PINCTRL_PIN(45, "SATA_DEVSLP_2"),
+ PINCTRL_PIN(46, "SATAXPCIE_1"),
+ PINCTRL_PIN(47, "SATAXPCIE_2"),
+ PINCTRL_PIN(48, "USB2_OCB_1"),
+ PINCTRL_PIN(49, "USB2_OCB_2"),
+ PINCTRL_PIN(50, "USB2_OCB_3"),
+ PINCTRL_PIN(51, "DDSP_HPD_C"),
+ PINCTRL_PIN(52, "DDSP_HPD_B"),
+ PINCTRL_PIN(53, "DDSP_HPD_1"),
+ PINCTRL_PIN(54, "DDSP_HPD_2"),
+ PINCTRL_PIN(55, "I2S5_TXD"),
+ PINCTRL_PIN(56, "I2S5_RXD"),
+ PINCTRL_PIN(57, "I2S1_SCLK"),
+ PINCTRL_PIN(58, "ESPI_CLK_LOOPBK"),
+ /* GPP_H */
+ PINCTRL_PIN(59, "SD_1P8_SEL"),
+ PINCTRL_PIN(60, "SD_PWR_EN_B"),
+ PINCTRL_PIN(61, "GPPC_H_2"),
+ PINCTRL_PIN(62, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(63, "I2C2_SDA"),
+ PINCTRL_PIN(64, "I2C2_SCL"),
+ PINCTRL_PIN(65, "I2C3_SDA"),
+ PINCTRL_PIN(66, "I2C3_SCL"),
+ PINCTRL_PIN(67, "I2C4_SDA"),
+ PINCTRL_PIN(68, "I2C4_SCL"),
+ PINCTRL_PIN(69, "SRCCLKREQB_4"),
+ PINCTRL_PIN(70, "SRCCLKREQB_5"),
+ PINCTRL_PIN(71, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(72, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(73, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(74, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(75, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(76, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(77, "CPU_VCCIO_PWR_GATEB"),
+ PINCTRL_PIN(78, "TIME_SYNC_0"),
+ PINCTRL_PIN(79, "IMGCLKOUT_1"),
+ PINCTRL_PIN(80, "IMGCLKOUT_2"),
+ PINCTRL_PIN(81, "IMGCLKOUT_3"),
+ PINCTRL_PIN(82, "IMGCLKOUT_4"),
+ /* GPP_D */
+ PINCTRL_PIN(83, "ISH_GP_0"),
+ PINCTRL_PIN(84, "ISH_GP_1"),
+ PINCTRL_PIN(85, "ISH_GP_2"),
+ PINCTRL_PIN(86, "ISH_GP_3"),
+ PINCTRL_PIN(87, "IMGCLKOUT_0"),
+ PINCTRL_PIN(88, "SRCCLKREQB_0"),
+ PINCTRL_PIN(89, "SRCCLKREQB_1"),
+ PINCTRL_PIN(90, "SRCCLKREQB_2"),
+ PINCTRL_PIN(91, "SRCCLKREQB_3"),
+ PINCTRL_PIN(92, "ISH_SPI_CSB"),
+ PINCTRL_PIN(93, "ISH_SPI_CLK"),
+ PINCTRL_PIN(94, "ISH_SPI_MISO"),
+ PINCTRL_PIN(95, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(96, "ISH_UART0_RXD"),
+ PINCTRL_PIN(97, "ISH_UART0_TXD"),
+ PINCTRL_PIN(98, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(99, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(100, "ISH_GP_4"),
+ PINCTRL_PIN(101, "ISH_GP_5"),
+ PINCTRL_PIN(102, "I2S_MCLK"),
+ PINCTRL_PIN(103, "GSPI2_CLK_LOOPBK"),
+ /* GPP_F */
+ PINCTRL_PIN(104, "CNV_BRI_DT"),
+ PINCTRL_PIN(105, "CNV_BRI_RSP"),
+ PINCTRL_PIN(106, "CNV_RGI_DT"),
+ PINCTRL_PIN(107, "CNV_RGI_RSP"),
+ PINCTRL_PIN(108, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(109, "EMMC_HIP_MON"),
+ PINCTRL_PIN(110, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(111, "EMMC_CMD"),
+ PINCTRL_PIN(112, "EMMC_DATA0"),
+ PINCTRL_PIN(113, "EMMC_DATA1"),
+ PINCTRL_PIN(114, "EMMC_DATA2"),
+ PINCTRL_PIN(115, "EMMC_DATA3"),
+ PINCTRL_PIN(116, "EMMC_DATA4"),
+ PINCTRL_PIN(117, "EMMC_DATA5"),
+ PINCTRL_PIN(118, "EMMC_DATA6"),
+ PINCTRL_PIN(119, "EMMC_DATA7"),
+ PINCTRL_PIN(120, "EMMC_RCLK"),
+ PINCTRL_PIN(121, "EMMC_CLK"),
+ PINCTRL_PIN(122, "EMMC_RESETB"),
+ PINCTRL_PIN(123, "A4WP_PRESENT"),
+ /* vGPIO */
+ PINCTRL_PIN(124, "CNV_BTEN"),
+ PINCTRL_PIN(125, "CNV_WCEN"),
+ PINCTRL_PIN(126, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(127, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(128, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(129, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(130, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(131, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(132, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(133, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(134, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(135, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(136, "vUART0_TXD"),
+ PINCTRL_PIN(137, "vUART0_RXD"),
+ PINCTRL_PIN(138, "vUART0_CTS_B"),
+ PINCTRL_PIN(139, "vUART0_RTS_B"),
+ PINCTRL_PIN(140, "vISH_UART0_TXD"),
+ PINCTRL_PIN(141, "vISH_UART0_RXD"),
+ PINCTRL_PIN(142, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(143, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(144, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(145, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(146, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(147, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(148, "vI2S2_SCLK"),
+ PINCTRL_PIN(149, "vI2S2_SFRM"),
+ PINCTRL_PIN(150, "vI2S2_TXD"),
+ PINCTRL_PIN(151, "vI2S2_RXD"),
+ PINCTRL_PIN(152, "vSD3_CD_B"),
+ /* GPP_C */
+ PINCTRL_PIN(153, "SMBCLK"),
+ PINCTRL_PIN(154, "SMBDATA"),
+ PINCTRL_PIN(155, "SMBALERTB"),
+ PINCTRL_PIN(156, "SML0CLK"),
+ PINCTRL_PIN(157, "SML0DATA"),
+ PINCTRL_PIN(158, "SML0ALERTB"),
+ PINCTRL_PIN(159, "SML1CLK"),
+ PINCTRL_PIN(160, "SML1DATA"),
+ PINCTRL_PIN(161, "UART0_RXD"),
+ PINCTRL_PIN(162, "UART0_TXD"),
+ PINCTRL_PIN(163, "UART0_RTSB"),
+ PINCTRL_PIN(164, "UART0_CTSB"),
+ PINCTRL_PIN(165, "UART1_RXD"),
+ PINCTRL_PIN(166, "UART1_TXD"),
+ PINCTRL_PIN(167, "UART1_RTSB"),
+ PINCTRL_PIN(168, "UART1_CTSB"),
+ PINCTRL_PIN(169, "I2C0_SDA"),
+ PINCTRL_PIN(170, "I2C0_SCL"),
+ PINCTRL_PIN(171, "I2C1_SDA"),
+ PINCTRL_PIN(172, "I2C1_SCL"),
+ PINCTRL_PIN(173, "UART2_RXD"),
+ PINCTRL_PIN(174, "UART2_TXD"),
+ PINCTRL_PIN(175, "UART2_RTSB"),
+ PINCTRL_PIN(176, "UART2_CTSB"),
+ /* HVCMOS */
+ PINCTRL_PIN(177, "L_BKLTEN"),
+ PINCTRL_PIN(178, "L_BKLTCTL"),
+ PINCTRL_PIN(179, "L_VDDEN"),
+ PINCTRL_PIN(180, "SYS_PWROK"),
+ PINCTRL_PIN(181, "SYS_RESETB"),
+ PINCTRL_PIN(182, "MLK_RSTB"),
+ /* GPP_E */
+ PINCTRL_PIN(183, "SATAXPCIE_0"),
+ PINCTRL_PIN(184, "SPI1_IO_2"),
+ PINCTRL_PIN(185, "SPI1_IO_3"),
+ PINCTRL_PIN(186, "CPU_GP_0"),
+ PINCTRL_PIN(187, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(188, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(189, "GPPC_E_6"),
+ PINCTRL_PIN(190, "CPU_GP_1"),
+ PINCTRL_PIN(191, "SATA_LEDB"),
+ PINCTRL_PIN(192, "USB2_OCB_0"),
+ PINCTRL_PIN(193, "SPI1_CSB"),
+ PINCTRL_PIN(194, "SPI1_CLK"),
+ PINCTRL_PIN(195, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(196, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(197, "DDSP_HPD_A"),
+ PINCTRL_PIN(198, "ISH_GP_6"),
+ PINCTRL_PIN(199, "ISH_GP_7"),
+ PINCTRL_PIN(200, "DISP_MISC_4"),
+ PINCTRL_PIN(201, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(202, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(203, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(204, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(205, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(206, "DDPA_CTRLDATA"),
+ /* JTAG */
+ PINCTRL_PIN(207, "JTAG_TDO"),
+ PINCTRL_PIN(208, "JTAGX"),
+ PINCTRL_PIN(209, "PRDYB"),
+ PINCTRL_PIN(210, "PREQB"),
+ PINCTRL_PIN(211, "CPU_TRSTB"),
+ PINCTRL_PIN(212, "JTAG_TDI"),
+ PINCTRL_PIN(213, "JTAG_TMS"),
+ PINCTRL_PIN(214, "JTAG_TCK"),
+ PINCTRL_PIN(215, "ITP_PMODE"),
+ /* GPP_R */
+ PINCTRL_PIN(216, "HDA_BCLK"),
+ PINCTRL_PIN(217, "HDA_SYNC"),
+ PINCTRL_PIN(218, "HDA_SDO"),
+ PINCTRL_PIN(219, "HDA_SDI_0"),
+ PINCTRL_PIN(220, "HDA_RSTB"),
+ PINCTRL_PIN(221, "HDA_SDI_1"),
+ PINCTRL_PIN(222, "I2S1_TXD"),
+ PINCTRL_PIN(223, "I2S1_RXD"),
+ /* GPP_S */
+ PINCTRL_PIN(224, "SNDW1_CLK"),
+ PINCTRL_PIN(225, "SNDW1_DATA"),
+ PINCTRL_PIN(226, "SNDW2_CLK"),
+ PINCTRL_PIN(227, "SNDW2_DATA"),
+ PINCTRL_PIN(228, "SNDW3_CLK"),
+ PINCTRL_PIN(229, "SNDW3_DATA"),
+ PINCTRL_PIN(230, "SNDW4_CLK"),
+ PINCTRL_PIN(231, "SNDW4_DATA"),
+ /* SPI */
+ PINCTRL_PIN(232, "SPI0_IO_2"),
+ PINCTRL_PIN(233, "SPI0_IO_3"),
+ PINCTRL_PIN(234, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(235, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(236, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(237, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(238, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(239, "SPI0_CLK"),
+ PINCTRL_PIN(240, "SPI0_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup icllp_community0_gpps[] = {
+ ICL_GPP(0, 0, 7, 0), /* GPP_G */
+ ICL_GPP(1, 8, 33, 32), /* GPP_B */
+ ICL_GPP(2, 34, 58, 64), /* GPP_A */
+};
+
+static const struct intel_padgroup icllp_community1_gpps[] = {
+ ICL_GPP(0, 59, 82, 96), /* GPP_H */
+ ICL_GPP(1, 83, 103, 128), /* GPP_D */
+ ICL_GPP(2, 104, 123, 160), /* GPP_F */
+ ICL_GPP(3, 124, 152, 192), /* vGPIO */
+};
+
+static const struct intel_padgroup icllp_community4_gpps[] = {
+ ICL_GPP(0, 153, 176, 224), /* GPP_C */
+ ICL_GPP(1, 177, 182, ICL_NO_GPIO), /* HVCMOS */
+ ICL_GPP(2, 183, 206, 256), /* GPP_E */
+ ICL_GPP(3, 207, 215, ICL_NO_GPIO), /* JTAG */
+};
+
+static const struct intel_padgroup icllp_community5_gpps[] = {
+ ICL_GPP(0, 216, 223, 288), /* GPP_R */
+ ICL_GPP(1, 224, 231, 320), /* GPP_S */
+ ICL_GPP(2, 232, 240, ICL_NO_GPIO), /* SPI */
+};
+
+static const struct intel_community icllp_communities[] = {
+ ICL_COMMUNITY(0, 0, 58, icllp_community0_gpps),
+ ICL_COMMUNITY(1, 59, 152, icllp_community1_gpps),
+ ICL_COMMUNITY(2, 153, 215, icllp_community4_gpps),
+ ICL_COMMUNITY(3, 216, 240, icllp_community5_gpps),
+};
+
+static const unsigned int icllp_spi0_pins[] = { 22, 23, 24, 25, 26 };
+static const unsigned int icllp_spi0_modes[] = { 3, 1, 1, 1, 1 };
+static const unsigned int icllp_spi1_pins[] = { 27, 28, 29, 30, 31 };
+static const unsigned int icllp_spi1_modes[] = { 1, 1, 1, 1, 3 };
+static const unsigned int icllp_spi2_pins[] = { 92, 93, 94, 95, 98 };
+static const unsigned int icllp_spi2_modes[] = { 3, 3, 3, 3, 2 };
+
+static const unsigned int icllp_i2c0_pins[] = { 169, 170 };
+static const unsigned int icllp_i2c1_pins[] = { 171, 172 };
+static const unsigned int icllp_i2c2_pins[] = { 63, 64 };
+static const unsigned int icllp_i2c3_pins[] = { 65, 66 };
+static const unsigned int icllp_i2c4_pins[] = { 67, 68 };
+
+static const unsigned int icllp_uart0_pins[] = { 161, 162, 163, 164 };
+static const unsigned int icllp_uart1_pins[] = { 165, 166, 167, 168 };
+static const unsigned int icllp_uart2_pins[] = { 173, 174, 175, 176 };
+
+static const struct intel_pingroup icllp_groups[] = {
+ PIN_GROUP("spi0_grp", icllp_spi0_pins, icllp_spi0_modes),
+ PIN_GROUP("spi1_grp", icllp_spi1_pins, icllp_spi1_modes),
+ PIN_GROUP("spi2_grp", icllp_spi2_pins, icllp_spi2_modes),
+ PIN_GROUP("i2c0_grp", icllp_i2c0_pins, 1),
+ PIN_GROUP("i2c1_grp", icllp_i2c1_pins, 1),
+ PIN_GROUP("i2c2_grp", icllp_i2c2_pins, 1),
+ PIN_GROUP("i2c3_grp", icllp_i2c3_pins, 1),
+ PIN_GROUP("i2c4_grp", icllp_i2c4_pins, 1),
+ PIN_GROUP("uart0_grp", icllp_uart0_pins, 1),
+ PIN_GROUP("uart1_grp", icllp_uart1_pins, 1),
+ PIN_GROUP("uart2_grp", icllp_uart2_pins, 1),
+};
+
+static const char * const icllp_spi0_groups[] = { "spi0_grp" };
+static const char * const icllp_spi1_groups[] = { "spi1_grp" };
+static const char * const icllp_spi2_groups[] = { "spi2_grp" };
+static const char * const icllp_i2c0_groups[] = { "i2c0_grp" };
+static const char * const icllp_i2c1_groups[] = { "i2c1_grp" };
+static const char * const icllp_i2c2_groups[] = { "i2c2_grp" };
+static const char * const icllp_i2c3_groups[] = { "i2c3_grp" };
+static const char * const icllp_i2c4_groups[] = { "i2c4_grp" };
+static const char * const icllp_uart0_groups[] = { "uart0_grp" };
+static const char * const icllp_uart1_groups[] = { "uart1_grp" };
+static const char * const icllp_uart2_groups[] = { "uart2_grp" };
+
+static const struct intel_function icllp_functions[] = {
+ FUNCTION("spi0", icllp_spi0_groups),
+ FUNCTION("spi1", icllp_spi1_groups),
+ FUNCTION("spi2", icllp_spi2_groups),
+ FUNCTION("i2c0", icllp_i2c0_groups),
+ FUNCTION("i2c1", icllp_i2c1_groups),
+ FUNCTION("i2c2", icllp_i2c2_groups),
+ FUNCTION("i2c3", icllp_i2c3_groups),
+ FUNCTION("i2c4", icllp_i2c4_groups),
+ FUNCTION("uart0", icllp_uart0_groups),
+ FUNCTION("uart1", icllp_uart1_groups),
+ FUNCTION("uart2", icllp_uart2_groups),
+};
+
+static const struct intel_pinctrl_soc_data icllp_soc_data = {
+ .pins = icllp_pins,
+ .npins = ARRAY_SIZE(icllp_pins),
+ .groups = icllp_groups,
+ .ngroups = ARRAY_SIZE(icllp_groups),
+ .functions = icllp_functions,
+ .nfunctions = ARRAY_SIZE(icllp_functions),
+ .communities = icllp_communities,
+ .ncommunities = ARRAY_SIZE(icllp_communities),
+};
+
+static int icl_pinctrl_probe(struct platform_device *pdev)
+{
+ return intel_pinctrl_probe(pdev, &icllp_soc_data);
+}
+
+static const struct dev_pm_ops icl_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
+ intel_pinctrl_resume)
+};
+
+static const struct acpi_device_id icl_pinctrl_acpi_match[] = {
+ { "INT3455" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, icl_pinctrl_acpi_match);
+
+static struct platform_driver icl_pinctrl_driver = {
+ .probe = icl_pinctrl_probe,
+ .driver = {
+ .name = "icelake-pinctrl",
+ .acpi_match_table = icl_pinctrl_acpi_match,
+ .pm = &icl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(icl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Ice Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1e24a6b8a64e..62b009b27eda 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel pinctrl/GPIO core driver.
*
* Copyright (C) 2015, Intel Corporation
* Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -875,6 +872,36 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
return -EINVAL;
}
+static int intel_gpio_irq_reqres(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ int pin;
+ int ret;
+
+ pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
+ if (pin >= 0) {
+ ret = gpiochip_lock_as_irq(gc, pin);
+ if (ret) {
+ dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
+ pin);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void intel_gpio_irq_relres(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ int pin;
+
+ pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
+ if (pin >= 0)
+ gpiochip_unlock_as_irq(gc, pin);
+}
+
static void intel_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1090,6 +1117,8 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
static struct irq_chip intel_gpio_irqchip = {
.name = "intel-gpio",
+ .irq_request_resources = intel_gpio_irq_reqres,
+ .irq_release_resources = intel_gpio_irq_relres,
.irq_enable = intel_gpio_irq_enable,
.irq_ack = intel_gpio_irq_ack,
.irq_mask = intel_gpio_irq_mask,
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 98fdf9adf623..1785abf157e4 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Core pinctrl/GPIO driver for Intel GPIO controllers
*
* Copyright (C) 2015, Intel Corporation
* Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef PINCTRL_INTEL_H
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index 14d56ea6cfdc..99894647eddd 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Lewisburg pinctrl/GPIO driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index d9357054d41d..4a916be44f4f 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Merrifield SoC pinctrl driver
*
* Copyright (C) 2016, Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index fee3435a6f15..7984392104fe 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Sunrisepoint PCH pinctrl/GPIO driver
*
* Copyright (C) 2015, Intel Corporation
* Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index 30f3316747e2..a613e546717a 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 4c4740ffeb9c..6f931b85701b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -1263,6 +1263,7 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
MTK_DISABLE);
if (err)
goto err;
+ /* else: fall through */
case PIN_CONFIG_INPUT_ENABLE:
case PIN_CONFIG_SLEW_RATE:
reg = (param == PIN_CONFIG_SLEW_RATE) ?
@@ -1537,7 +1538,7 @@ static int mtk_build_groups(struct mtk_pinctrl *hw)
err = pinctrl_generic_add_group(hw->pctrl, group->name,
group->pins, group->num_pins,
group->data);
- if (err) {
+ if (err < 0) {
dev_err(hw->dev, "Failed to register group %s\n",
group->name);
return err;
@@ -1558,7 +1559,7 @@ static int mtk_build_functions(struct mtk_pinctrl *hw)
func->group_names,
func->num_group_names,
func->data);
- if (err) {
+ if (err < 0) {
dev_err(hw->dev, "Failed to register function %s\n",
func->name);
return err;
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 46a0918bd284..ad502eda4afa 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -672,6 +672,9 @@ static const unsigned int jtag_ao_tdo_pins[] = {GPIOAO_4};
static const unsigned int jtag_ao_clk_pins[] = {GPIOAO_5};
static const unsigned int jtag_ao_tms_pins[] = {GPIOAO_7};
+/* gen_clk */
+static const unsigned int gen_clk_ee_pins[] = {GPIOAO_13};
+
static struct meson_pmx_group meson_axg_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
@@ -718,6 +721,7 @@ static struct meson_pmx_group meson_axg_aobus_groups[] = {
GROUP(jtag_ao_tdo, 4),
GROUP(jtag_ao_clk, 4),
GROUP(jtag_ao_tms, 4),
+ GROUP(gen_clk_ee, 4),
};
static const char * const gpio_periphs_groups[] = {
@@ -947,6 +951,10 @@ static const char * const tdmb_groups[] = {
"tdmb_din2", "tdmb_dout2", "tdmb_din3", "tdmb_dout3",
};
+static const char * const gen_clk_ee_groups[] = {
+ "gen_clk_ee",
+};
+
static struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
@@ -992,6 +1000,7 @@ static struct meson_pmx_func meson_axg_aobus_functions[] = {
FUNCTION(pwm_ao_c),
FUNCTION(pwm_ao_d),
FUNCTION(jtag_ao),
+ FUNCTION(gen_clk_ee),
};
static struct meson_bank meson_axg_periphs_banks[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 2c97a2e07a5f..4ceb06f8a33c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -243,6 +243,8 @@ static const unsigned int i2s_out_ch67_y_pins[] = { GPIOY_10 };
static const unsigned int spdif_out_y_pins[] = { GPIOY_12 };
+static const unsigned int gen_clk_out_pins[] = { GPIOY_15 };
+
static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
MESON_PIN(GPIOAO_0),
MESON_PIN(GPIOAO_1),
@@ -453,6 +455,7 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(i2s_out_ch45_y, 1, 6),
GROUP(i2s_out_ch67_y, 1, 7),
GROUP(spdif_out_y, 1, 9),
+ GROUP(gen_clk_out, 6, 15),
/* Bank Z */
GROUP(eth_mdio, 6, 1),
@@ -706,6 +709,10 @@ static const char * const spdif_out_groups[] = {
"spdif_out_y",
};
+static const char * const gen_clk_out_groups[] = {
+ "gen_clk_out",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -790,6 +797,7 @@ static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(hdmi_i2c),
FUNCTION(i2s_out),
FUNCTION(spdif_out),
+ FUNCTION(gen_clk_out),
};
static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 53cf800688e9..aa48b3f23c7f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -80,6 +80,18 @@ struct armada_37xx_pmx_func {
unsigned int ngroups;
};
+struct armada_37xx_pm_state {
+ u32 out_en_l;
+ u32 out_en_h;
+ u32 out_val_l;
+ u32 out_val_h;
+ u32 irq_en_l;
+ u32 irq_en_h;
+ u32 irq_pol_l;
+ u32 irq_pol_h;
+ u32 selection;
+};
+
struct armada_37xx_pinctrl {
struct regmap *regmap;
void __iomem *base;
@@ -94,6 +106,7 @@ struct armada_37xx_pinctrl {
unsigned int ngroups;
struct armada_37xx_pmx_func *funcs;
unsigned int nfuncs;
+ struct armada_37xx_pm_state pm;
};
#define PIN_GRP(_name, _start, _nr, _mask, _func1, _func2) \
@@ -996,6 +1009,110 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
return 0;
}
+#if defined(CONFIG_PM)
+static int armada_3700_pinctrl_suspend(struct device *dev)
+{
+ struct armada_37xx_pinctrl *info = dev_get_drvdata(dev);
+
+ /* Save GPIO state */
+ regmap_read(info->regmap, OUTPUT_EN, &info->pm.out_en_l);
+ regmap_read(info->regmap, OUTPUT_EN + sizeof(u32), &info->pm.out_en_h);
+ regmap_read(info->regmap, OUTPUT_VAL, &info->pm.out_val_l);
+ regmap_read(info->regmap, OUTPUT_VAL + sizeof(u32),
+ &info->pm.out_val_h);
+
+ info->pm.irq_en_l = readl(info->base + IRQ_EN);
+ info->pm.irq_en_h = readl(info->base + IRQ_EN + sizeof(u32));
+ info->pm.irq_pol_l = readl(info->base + IRQ_POL);
+ info->pm.irq_pol_h = readl(info->base + IRQ_POL + sizeof(u32));
+
+ /* Save pinctrl state */
+ regmap_read(info->regmap, SELECTION, &info->pm.selection);
+
+ return 0;
+}
+
+static int armada_3700_pinctrl_resume(struct device *dev)
+{
+ struct armada_37xx_pinctrl *info = dev_get_drvdata(dev);
+ struct gpio_chip *gc;
+ struct irq_domain *d;
+ int i;
+
+ /* Restore GPIO state */
+ regmap_write(info->regmap, OUTPUT_EN, info->pm.out_en_l);
+ regmap_write(info->regmap, OUTPUT_EN + sizeof(u32),
+ info->pm.out_en_h);
+ regmap_write(info->regmap, OUTPUT_VAL, info->pm.out_val_l);
+ regmap_write(info->regmap, OUTPUT_VAL + sizeof(u32),
+ info->pm.out_val_h);
+
+ /*
+ * Input levels may change during suspend, which is not monitored at
+ * that time. GPIOs used for both-edge IRQs may not be synchronized
+ * anymore with their polarities (rising/falling edge) and must be
+ * re-configured manually.
+ */
+ gc = &info->gpio_chip;
+ d = gc->irq.domain;
+ for (i = 0; i < gc->ngpio; i++) {
+ u32 irq_bit = BIT(i % GPIO_PER_REG);
+ u32 mask, *irq_pol, input_reg, virq, type, level;
+
+ if (i < GPIO_PER_REG) {
+ mask = info->pm.irq_en_l;
+ irq_pol = &info->pm.irq_pol_l;
+ input_reg = INPUT_VAL;
+ } else {
+ mask = info->pm.irq_en_h;
+ irq_pol = &info->pm.irq_pol_h;
+ input_reg = INPUT_VAL + sizeof(u32);
+ }
+
+ if (!(mask & irq_bit))
+ continue;
+
+ virq = irq_find_mapping(d, i);
+ type = irq_get_trigger_type(virq);
+
+ /*
+ * Synchronize level and polarity for both-edge irqs:
+ * - a high input level expects a falling edge,
+ * - a low input level exepects a rising edge.
+ */
+ if ((type & IRQ_TYPE_SENSE_MASK) ==
+ IRQ_TYPE_EDGE_BOTH) {
+ regmap_read(info->regmap, input_reg, &level);
+ if ((*irq_pol ^ level) & irq_bit)
+ *irq_pol ^= irq_bit;
+ }
+ }
+
+ writel(info->pm.irq_en_l, info->base + IRQ_EN);
+ writel(info->pm.irq_en_h, info->base + IRQ_EN + sizeof(u32));
+ writel(info->pm.irq_pol_l, info->base + IRQ_POL);
+ writel(info->pm.irq_pol_h, info->base + IRQ_POL + sizeof(u32));
+
+ /* Restore pinctrl state */
+ regmap_write(info->regmap, SELECTION, info->pm.selection);
+
+ return 0;
+}
+
+/*
+ * Since pinctrl is an infrastructure module, its resume should be issued prior
+ * to other IO drivers.
+ */
+static const struct dev_pm_ops armada_3700_pinctrl_pm_ops = {
+ .suspend_late = armada_3700_pinctrl_suspend,
+ .resume_early = armada_3700_pinctrl_resume,
+};
+
+#define PINCTRL_ARMADA_37XX_DEV_PM_OPS (&armada_3700_pinctrl_pm_ops)
+#else
+#define PINCTRL_ARMADA_37XX_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
{
.compatible = "marvell,armada3710-sb-pinctrl",
@@ -1049,6 +1166,7 @@ static struct platform_driver armada_37xx_pinctrl_driver = {
.driver = {
.name = "armada-37xx-pinctrl",
.of_match_table = armada_37xx_pinctrl_of_match,
+ .pm = PINCTRL_ARMADA_37XX_DEV_PM_OPS,
},
};
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index aa592ef23a29..e3689cc62a41 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -101,15 +101,16 @@ static int abx500_gpio_get_bit(struct gpio_chip *chip, u8 reg,
reg += offset / 8;
ret = abx500_get_register_interruptible(pct->dev,
AB8500_MISC, reg, &val);
-
- *bit = !!(val & BIT(pos));
-
- if (ret < 0)
+ if (ret < 0) {
dev_err(pct->dev,
"%s read reg =%x, offset=%x failed (%d)\n",
__func__, reg, offset, ret);
+ return ret;
+ }
- return ret;
+ *bit = !!(val & BIT(pos));
+
+ return 0;
}
static int abx500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 04ae139671c8..41ccc759b8b8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -247,16 +247,16 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
+ u8 level = (pin_reg >> ACTIVE_LEVEL_OFF) &
+ ACTIVE_LEVEL_MASK;
interrupt_enable = "interrupt is enabled|";
- if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF)) &&
- !(pin_reg & BIT(ACTIVE_LEVEL_OFF + 1)))
- active_level = "Active low|";
- else if (pin_reg & BIT(ACTIVE_LEVEL_OFF) &&
- !(pin_reg & BIT(ACTIVE_LEVEL_OFF + 1)))
+ if (level == ACTIVE_LEVEL_HIGH)
active_level = "Active high|";
- else if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF)) &&
- pin_reg & BIT(ACTIVE_LEVEL_OFF + 1))
+ else if (level == ACTIVE_LEVEL_LOW)
+ active_level = "Active low|";
+ else if (!(pin_reg & BIT(LEVEL_TRIG_OFF)) &&
+ level == ACTIVE_LEVEL_BOTH)
active_level = "Active on both|";
else
active_level = "Unknown Active level|";
@@ -552,7 +552,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
/* Each status bit covers four pins */
for (i = 0; i < 4; i++) {
regval = readl(regs + i);
- if (!(regval & PIN_IRQ_PENDING))
+ if (!(regval & PIN_IRQ_PENDING) ||
+ !(regval & BIT(INTERRUPT_MASK_OFF)))
continue;
irq = irq_find_mapping(gc->irq.domain, irqnr + i);
generic_handle_irq(irq);
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 8fa453a59da5..22af7edfdb38 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -54,6 +54,10 @@
#define ACTIVE_LEVEL_MASK 0x3UL
#define DRV_STRENGTH_SEL_MASK 0x3UL
+#define ACTIVE_LEVEL_HIGH 0x0UL
+#define ACTIVE_LEVEL_LOW 0x1UL
+#define ACTIVE_LEVEL_BOTH 0x2UL
+
#define DB_TYPE_NO_DEBOUNCE 0x0UL
#define DB_TYPE_PRESERVE_LOW_GLITCH 0x1UL
#define DB_TYPE_PRESERVE_HIGH_GLITCH 0x2UL
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 67e4d9ffa6b1..ef7ab208b951 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -14,6 +14,7 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/pinctrl/at91.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
/* FIXME: needed for gpio_to_irq(), get rid of this */
@@ -49,6 +50,8 @@
#define ATMEL_PIO_IFSCEN_MASK BIT(13)
#define ATMEL_PIO_OPD_MASK BIT(14)
#define ATMEL_PIO_SCHMITT_MASK BIT(15)
+#define ATMEL_PIO_DRVSTR_MASK GENMASK(17, 16)
+#define ATMEL_PIO_DRVSTR_OFFSET 16
#define ATMEL_PIO_CFGR_EVTSEL_MASK GENMASK(26, 24)
#define ATMEL_PIO_CFGR_EVTSEL_FALLING (0 << 24)
#define ATMEL_PIO_CFGR_EVTSEL_RISING (1 << 24)
@@ -75,6 +78,9 @@
#define ATMEL_GET_PIN_FUNC(pinfunc) ((pinfunc >> 16) & 0xf)
#define ATMEL_GET_PIN_IOSET(pinfunc) ((pinfunc >> 20) & 0xf)
+/* Custom pinconf parameters */
+#define ATMEL_PIN_CONFIG_DRIVE_STRENGTH (PIN_CONFIG_END + 1)
+
struct atmel_pioctrl_data {
unsigned nbanks;
};
@@ -139,6 +145,10 @@ static const char * const atmel_functions[] = {
"GPIO", "A", "B", "C", "D", "E", "F", "G"
};
+static const struct pinconf_generic_params atmel_custom_bindings[] = {
+ {"atmel,drive-strength", ATMEL_PIN_CONFIG_DRIVE_STRENGTH, 0},
+};
+
/* --- GPIO --- */
static unsigned int atmel_gpio_read(struct atmel_pioctrl *atmel_pioctrl,
unsigned int bank, unsigned int reg)
@@ -692,6 +702,11 @@ static int atmel_conf_pin_config_group_get(struct pinctrl_dev *pctldev,
return -EINVAL;
arg = 1;
break;
+ case ATMEL_PIN_CONFIG_DRIVE_STRENGTH:
+ if (!(res & ATMEL_PIO_DRVSTR_MASK))
+ return -EINVAL;
+ arg = (res & ATMEL_PIO_DRVSTR_MASK) >> ATMEL_PIO_DRVSTR_OFFSET;
+ break;
default:
return -ENOTSUPP;
}
@@ -777,6 +792,18 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
ATMEL_PIO_SODR);
}
break;
+ case ATMEL_PIN_CONFIG_DRIVE_STRENGTH:
+ switch (arg) {
+ case ATMEL_PIO_DRVSTR_LO:
+ case ATMEL_PIO_DRVSTR_ME:
+ case ATMEL_PIO_DRVSTR_HI:
+ conf &= (~ATMEL_PIO_DRVSTR_MASK);
+ conf |= arg << ATMEL_PIO_DRVSTR_OFFSET;
+ break;
+ default:
+ dev_warn(pctldev->dev, "drive strength not updated (incorrect value)\n");
+ }
+ break;
default:
dev_warn(pctldev->dev,
"unsupported configuration parameter: %u\n",
@@ -816,6 +843,19 @@ static void atmel_conf_pin_config_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "%s ", "open-drain");
if (conf & ATMEL_PIO_SCHMITT_MASK)
seq_printf(s, "%s ", "schmitt");
+ if (conf & ATMEL_PIO_DRVSTR_MASK) {
+ switch ((conf & ATMEL_PIO_DRVSTR_MASK) >> ATMEL_PIO_DRVSTR_OFFSET) {
+ case ATMEL_PIO_DRVSTR_ME:
+ seq_printf(s, "%s ", "medium-drive");
+ break;
+ case ATMEL_PIO_DRVSTR_HI:
+ seq_printf(s, "%s ", "high-drive");
+ break;
+ /* ATMEL_PIO_DRVSTR_LO and 0 which is the default value at reset */
+ default:
+ seq_printf(s, "%s ", "low-drive");
+ }
+ }
}
static const struct pinconf_ops atmel_confops = {
@@ -931,10 +971,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl->npins = atmel_pioctrl->nbanks * ATMEL_PIO_NPINS_PER_BANK;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "unable to get atmel pinctrl resource\n");
- return -EINVAL;
- }
atmel_pioctrl->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(atmel_pioctrl->reg_base))
return -EINVAL;
@@ -958,6 +994,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
return -ENOMEM;
atmel_pinctrl_desc.pins = pin_desc;
atmel_pinctrl_desc.npins = atmel_pioctrl->npins;
+ atmel_pinctrl_desc.num_custom_params = ARRAY_SIZE(atmel_custom_bindings);
+ atmel_pinctrl_desc.custom_params = atmel_custom_bindings;
/* One pin is one group since a pin can achieve all functions. */
group_names = devm_kcalloc(dev,
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index a52779f33ad4..afd0b533c40a 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -316,7 +316,7 @@ static const struct pinctrl_ops axp20x_pctrl_ops = {
.get_group_pins = axp20x_group_pins,
};
-static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
+static int axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
unsigned int mask_len,
struct axp20x_pinctrl_function *func,
const struct pinctrl_pin_desc *pins)
@@ -331,18 +331,22 @@ static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
func->groups = devm_kcalloc(dev,
ngroups, sizeof(const char *),
GFP_KERNEL);
+ if (!func->groups)
+ return -ENOMEM;
group = func->groups;
for_each_set_bit(bit, &mask_cpy, mask_len) {
*group = pins[bit].name;
group++;
}
}
+
+ return 0;
}
-static void axp20x_build_funcs_groups(struct platform_device *pdev)
+static int axp20x_build_funcs_groups(struct platform_device *pdev)
{
struct axp20x_pctl *pctl = platform_get_drvdata(pdev);
- int i, pin, npins = pctl->desc->npins;
+ int i, ret, pin, npins = pctl->desc->npins;
pctl->funcs[AXP20X_FUNC_GPIO_OUT].name = "gpio_out";
pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval = AXP20X_MUX_GPIO_OUT;
@@ -366,13 +370,19 @@ static void axp20x_build_funcs_groups(struct platform_device *pdev)
pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name;
}
- axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
+ ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
npins, &pctl->funcs[AXP20X_FUNC_LDO],
pctl->desc->pins);
+ if (ret)
+ return ret;
- axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
+ ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
npins, &pctl->funcs[AXP20X_FUNC_ADC],
pctl->desc->pins);
+ if (ret)
+ return ret;
+
+ return 0;
}
static const struct of_device_id axp20x_pctl_match[] = {
@@ -424,7 +434,11 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pctl);
- axp20x_build_funcs_groups(pdev);
+ ret = axp20x_build_funcs_groups(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to build groups\n");
+ return ret;
+ }
pctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctrl_desc), GFP_KERNEL);
if (!pctrl_desc)
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index 8c9970ae8505..fa7d998e1d5a 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -1696,6 +1696,7 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.name = "gmii_gmac0_grp",
.pins = gmii_gmac0_3516_pins,
.num_pins = ARRAY_SIZE(gmii_gmac0_3516_pins),
+ .mask = GEMINI_GMAC_IOSEL_MASK,
.driving_mask = GENMASK(17, 16),
},
{
@@ -1703,6 +1704,7 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.pins = gmii_gmac1_3516_pins,
.num_pins = ARRAY_SIZE(gmii_gmac1_3516_pins),
/* Bring out RGMII on the GMAC1 pins */
+ .mask = GEMINI_GMAC_IOSEL_MASK,
.value = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
.driving_mask = GENMASK(19, 18),
},
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 022307dd4b54..4a8a8efadefa 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -666,7 +666,7 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
* can be used to fix state for MCP23xxx, that temporary
* lost its power supply.
*/
-#define MCP23S08_CONFIG_REGS 8
+#define MCP23S08_CONFIG_REGS 7
static int __check_mcp23s08_reg_cache(struct mcp23s08 *mcp)
{
int cached[MCP23S08_CONFIG_REGS];
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 15bb1cb8729b..f9fc2b3a8731 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
@@ -132,7 +133,7 @@ OCELOT_P(0, SG0, NONE, NONE);
OCELOT_P(1, SG0, NONE, NONE);
OCELOT_P(2, SG0, NONE, NONE);
OCELOT_P(3, SG0, NONE, NONE);
-OCELOT_P(4, IRQ0_IN, IRQ0_OUT, TWI);
+OCELOT_P(4, IRQ0_IN, IRQ0_OUT, TWI_SCL_M);
OCELOT_P(5, IRQ1_IN, IRQ1_OUT, PCI_WAKE);
OCELOT_P(6, UART, TWI_SCL_M, NONE);
OCELOT_P(7, UART, TWI_SCL_M, NONE);
@@ -427,11 +428,98 @@ static const struct gpio_chip ocelot_gpiolib_chip = {
.owner = THIS_MODULE,
};
+static void ocelot_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ regmap_update_bits(info->map, OCELOT_GPIO_INTR_ENA, BIT(gpio), 0);
+}
+
+static void ocelot_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ regmap_update_bits(info->map, OCELOT_GPIO_INTR_ENA, BIT(gpio),
+ BIT(gpio));
+}
+
+static void ocelot_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ regmap_write_bits(info->map, OCELOT_GPIO_INTR, BIT(gpio), BIT(gpio));
+}
+
+static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
+
+static struct irq_chip ocelot_eoi_irqchip = {
+ .name = "gpio",
+ .irq_mask = ocelot_irq_mask,
+ .irq_eoi = ocelot_irq_ack,
+ .irq_unmask = ocelot_irq_unmask,
+ .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+ .irq_set_type = ocelot_irq_set_type,
+};
+
+static struct irq_chip ocelot_irqchip = {
+ .name = "gpio",
+ .irq_mask = ocelot_irq_mask,
+ .irq_ack = ocelot_irq_ack,
+ .irq_unmask = ocelot_irq_unmask,
+ .irq_set_type = ocelot_irq_set_type,
+};
+
+static int ocelot_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH)))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip,
+ handle_fasteoi_irq, NULL);
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_chip_handler_name_locked(data, &ocelot_irqchip,
+ handle_edge_irq, NULL);
+
+ return 0;
+}
+
+static void ocelot_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *parent_chip = irq_desc_get_chip(desc);
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int reg = 0, irq;
+ unsigned long irqs;
+
+ regmap_read(info->map, OCELOT_GPIO_INTR_IDENT, &reg);
+ if (!reg)
+ return;
+
+ chained_irq_enter(parent_chip, desc);
+
+ irqs = reg;
+
+ for_each_set_bit(irq, &irqs, OCELOT_PINS) {
+ generic_handle_irq(irq_linear_revmap(chip->irq.domain, irq));
+ }
+
+ chained_irq_exit(parent_chip, desc);
+}
+
static int ocelot_gpiochip_register(struct platform_device *pdev,
struct ocelot_pinctrl *info)
{
struct gpio_chip *gc;
- int ret;
+ int ret, irq;
info->gpio_chip = ocelot_gpiolib_chip;
@@ -446,7 +534,17 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
if (ret)
return ret;
- /* TODO: this can be used as an irqchip but no board is using that */
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (irq <= 0)
+ return irq;
+
+ ret = gpiochip_irqchip_add(gc, &ocelot_irqchip, 0, handle_edge_irq,
+ IRQ_TYPE_NONE);
+ if (ret)
+ return ret;
+
+ gpiochip_set_chained_irqchip(gc, &ocelot_irqchip, irq,
+ ocelot_irq_handler);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 717c0f4449a0..f76edf664539 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -1006,6 +1006,7 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev,
const char *grpname;
const char **fngrps;
int ret, npins;
+ int gsel, fsel;
npins = rza1_dt_node_pin_count(np);
if (npins < 0) {
@@ -1055,18 +1056,19 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev,
fngrps[0] = grpname;
mutex_lock(&rza1_pctl->mutex);
- ret = pinctrl_generic_add_group(pctldev, grpname, grpins, npins,
- NULL);
- if (ret) {
+ gsel = pinctrl_generic_add_group(pctldev, grpname, grpins, npins,
+ NULL);
+ if (gsel < 0) {
mutex_unlock(&rza1_pctl->mutex);
- return ret;
+ return gsel;
}
- ret = pinmux_generic_add_function(pctldev, grpname, fngrps, 1,
- mux_confs);
- if (ret)
+ fsel = pinmux_generic_add_function(pctldev, grpname, fngrps, 1,
+ mux_confs);
+ if (fsel < 0) {
+ ret = fsel;
goto remove_group;
- mutex_unlock(&rza1_pctl->mutex);
+ }
dev_info(rza1_pctl->dev, "Parsed function and group %s with %d pins\n",
grpname, npins);
@@ -1083,15 +1085,15 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev,
(*map)->data.mux.group = np->name;
(*map)->data.mux.function = np->name;
*num_maps = 1;
+ mutex_unlock(&rza1_pctl->mutex);
return 0;
remove_function:
- mutex_lock(&rza1_pctl->mutex);
- pinmux_generic_remove_last_function(pctldev);
+ pinmux_generic_remove_function(pctldev, fsel);
remove_group:
- pinctrl_generic_remove_last_group(pctldev);
+ pinctrl_generic_remove_group(pctldev, gsel);
mutex_unlock(&rza1_pctl->mutex);
dev_info(rza1_pctl->dev, "Unable to parse function and group %s\n",
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index e5647dac0818..7ec72ff2419a 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -747,38 +747,44 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
/**
* pcs_add_function() - adds a new function to the function list
* @pcs: pcs driver instance
- * @np: device node of the mux entry
+ * @fcn: new function allocated
* @name: name of the function
* @vals: array of mux register value pairs used by the function
* @nvals: number of mux register value pairs
* @pgnames: array of pingroup names for the function
* @npgnames: number of pingroup names
+ *
+ * Caller must take care of locking.
*/
-static struct pcs_function *pcs_add_function(struct pcs_device *pcs,
- struct device_node *np,
- const char *name,
- struct pcs_func_vals *vals,
- unsigned nvals,
- const char **pgnames,
- unsigned npgnames)
+static int pcs_add_function(struct pcs_device *pcs,
+ struct pcs_function **fcn,
+ const char *name,
+ struct pcs_func_vals *vals,
+ unsigned int nvals,
+ const char **pgnames,
+ unsigned int npgnames)
{
struct pcs_function *function;
- int res;
+ int selector;
function = devm_kzalloc(pcs->dev, sizeof(*function), GFP_KERNEL);
if (!function)
- return NULL;
+ return -ENOMEM;
function->vals = vals;
function->nvals = nvals;
- res = pinmux_generic_add_function(pcs->pctl, name,
- pgnames, npgnames,
- function);
- if (res)
- return NULL;
+ selector = pinmux_generic_add_function(pcs->pctl, name,
+ pgnames, npgnames,
+ function);
+ if (selector < 0) {
+ devm_kfree(pcs->dev, function);
+ *fcn = NULL;
+ } else {
+ *fcn = function;
+ }
- return function;
+ return selector;
}
/**
@@ -979,8 +985,8 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
{
const char *name = "pinctrl-single,pins";
struct pcs_func_vals *vals;
- int rows, *pins, found = 0, res = -ENOMEM, i;
- struct pcs_function *function;
+ int rows, *pins, found = 0, res = -ENOMEM, i, fsel, gsel;
+ struct pcs_function *function = NULL;
rows = pinctrl_count_index_with_args(np, name);
if (rows <= 0) {
@@ -1030,21 +1036,25 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
}
pgnames[0] = np->name;
- function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1);
- if (!function) {
- res = -ENOMEM;
+ mutex_lock(&pcs->mutex);
+ fsel = pcs_add_function(pcs, &function, np->name, vals, found,
+ pgnames, 1);
+ if (fsel < 0) {
+ res = fsel;
goto free_pins;
}
- res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
- if (res < 0)
+ gsel = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
+ if (gsel < 0) {
+ res = gsel;
goto free_function;
+ }
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
(*map)->data.mux.group = np->name;
(*map)->data.mux.function = np->name;
- if (PCS_HAS_PINCONF) {
+ if (PCS_HAS_PINCONF && function) {
res = pcs_parse_pinconf(pcs, np, function, map);
if (res)
goto free_pingroups;
@@ -1052,15 +1062,17 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
} else {
*num_maps = 1;
}
+ mutex_unlock(&pcs->mutex);
+
return 0;
free_pingroups:
- pinctrl_generic_remove_last_group(pcs->pctl);
+ pinctrl_generic_remove_group(pcs->pctl, gsel);
*num_maps = 1;
free_function:
- pinmux_generic_remove_last_function(pcs->pctl);
-
+ pinmux_generic_remove_function(pcs->pctl, fsel);
free_pins:
+ mutex_unlock(&pcs->mutex);
devm_kfree(pcs->dev, pins);
free_vals:
@@ -1077,9 +1089,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
{
const char *name = "pinctrl-single,bits";
struct pcs_func_vals *vals;
- int rows, *pins, found = 0, res = -ENOMEM, i;
+ int rows, *pins, found = 0, res = -ENOMEM, i, fsel, gsel;
int npins_in_row;
- struct pcs_function *function;
+ struct pcs_function *function = NULL;
rows = pinctrl_count_index_with_args(np, name);
if (rows <= 0) {
@@ -1166,15 +1178,19 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
}
pgnames[0] = np->name;
- function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1);
- if (!function) {
- res = -ENOMEM;
+ mutex_lock(&pcs->mutex);
+ fsel = pcs_add_function(pcs, &function, np->name, vals, found,
+ pgnames, 1);
+ if (fsel < 0) {
+ res = fsel;
goto free_pins;
}
- res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
- if (res < 0)
+ gsel = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
+ if (gsel < 0) {
+ res = gsel;
goto free_function;
+ }
(*map)->type = PIN_MAP_TYPE_MUX_GROUP;
(*map)->data.mux.group = np->name;
@@ -1186,14 +1202,17 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
}
*num_maps = 1;
+ mutex_unlock(&pcs->mutex);
+
return 0;
free_pingroups:
- pinctrl_generic_remove_last_group(pcs->pctl);
+ pinctrl_generic_remove_group(pcs->pctl, gsel);
*num_maps = 1;
free_function:
- pinmux_generic_remove_last_function(pcs->pctl);
+ pinmux_generic_remove_function(pcs->pctl, fsel);
free_pins:
+ mutex_unlock(&pcs->mutex);
devm_kfree(pcs->dev, pins);
free_vals:
@@ -1598,19 +1617,19 @@ static int pcs_save_context(struct pcs_device *pcs)
switch (pcs->width) {
case 64:
- regsl = (u64 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- regsl[i] = pcs->read(pcs->base + i * mux_bytes);
+ regsl = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ *regsl++ = pcs->read(pcs->base + i);
break;
case 32:
- regsw = (u32 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- regsw[i] = pcs->read(pcs->base + i * mux_bytes);
+ regsw = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ *regsw++ = pcs->read(pcs->base + i);
break;
case 16:
- regshw = (u16 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- regshw[i] = pcs->read(pcs->base + i * mux_bytes);
+ regshw = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ *regshw++ = pcs->read(pcs->base + i);
break;
}
@@ -1628,19 +1647,19 @@ static void pcs_restore_context(struct pcs_device *pcs)
switch (pcs->width) {
case 64:
- regsl = (u64 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- pcs->write(regsl[i], pcs->base + i * mux_bytes);
+ regsl = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ pcs->write(*regsl++, pcs->base + i);
break;
case 32:
- regsw = (u32 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- pcs->write(regsw[i], pcs->base + i * mux_bytes);
+ regsw = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ pcs->write(*regsw++, pcs->base + i);
break;
case 16:
- regshw = (u16 *)pcs->saved_vals;
- for (i = 0; i < pcs->size / mux_bytes; i++)
- pcs->write(regshw[i], pcs->base + i * mux_bytes);
+ regshw = pcs->saved_vals;
+ for (i = 0; i < pcs->size; i += mux_bytes)
+ pcs->write(*regshw++, pcs->base + i);
break;
}
}
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index b8e9bda8ec98..5780442c068b 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -22,7 +22,6 @@
#include <linux/err.h>
#include <linux/list.h>
#include <linux/string.h>
-#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/pinctrl/machine.h>
@@ -308,7 +307,6 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
selector++;
}
- dev_err(pctldev->dev, "function '%s' not supported\n", function);
return -EINVAL;
}
@@ -775,6 +773,16 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
void *data)
{
struct function_desc *function;
+ int selector;
+
+ if (!name)
+ return -EINVAL;
+
+ selector = pinmux_func_name_to_selector(pctldev, name);
+ if (selector >= 0)
+ return selector;
+
+ selector = pctldev->num_functions;
function = devm_kzalloc(pctldev->dev, sizeof(*function), GFP_KERNEL);
if (!function)
@@ -785,12 +793,11 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
function->num_group_names = num_groups;
function->data = data;
- radix_tree_insert(&pctldev->pin_function_tree, pctldev->num_functions,
- function);
+ radix_tree_insert(&pctldev->pin_function_tree, selector, function);
pctldev->num_functions++;
- return 0;
+ return selector;
}
EXPORT_SYMBOL_GPL(pinmux_generic_add_function);
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index a331fcdbedd9..3319535c76cb 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -150,13 +150,6 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
unsigned int selector);
-static inline int
-pinmux_generic_remove_last_function(struct pinctrl_dev *pctldev)
-{
- return pinmux_generic_remove_function(pctldev,
- pctldev->num_functions - 1);
-}
-
void pinmux_generic_free_functions(struct pinctrl_dev *pctldev);
#else
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 0e22f52b2a19..2155a30c282b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -250,22 +250,30 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
/* Convert register value to pinconf value */
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- arg = arg == MSM_NO_PULL;
+ if (arg != MSM_NO_PULL)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- arg = arg == MSM_PULL_DOWN;
+ if (arg != MSM_PULL_DOWN)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_BUS_HOLD:
if (pctrl->soc->pull_no_keeper)
return -ENOTSUPP;
- arg = arg == MSM_KEEPER;
+ if (arg != MSM_KEEPER)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_UP:
if (pctrl->soc->pull_no_keeper)
arg = arg == MSM_PULL_UP_NO_KEEPER;
else
arg = arg == MSM_PULL_UP;
+ if (!arg)
+ return -EINVAL;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg = msm_regval_to_drive(arg);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 3e66e0d10010..cf82db78e69e 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -390,31 +390,47 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
switch (param) {
case PIN_CONFIG_DRIVE_PUSH_PULL:
- arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_CMOS;
+ if (pad->buffer_type != PMIC_GPIO_OUT_BUF_CMOS)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS;
+ if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_OPEN_SOURCE:
- arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS;
+ if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- arg = pad->pullup == PMIC_GPIO_PULL_DOWN;
+ if (pad->pullup != PMIC_GPIO_PULL_DOWN)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_DISABLE:
- arg = pad->pullup = PMIC_GPIO_PULL_DISABLE;
+ if (pad->pullup != PMIC_GPIO_PULL_DISABLE)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- arg = pad->pullup == PMIC_GPIO_PULL_UP_30;
+ if (pad->pullup != PMIC_GPIO_PULL_UP_30)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- arg = !pad->is_enabled;
+ if (pad->is_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_POWER_SOURCE:
arg = pad->power_source;
break;
case PIN_CONFIG_INPUT_ENABLE:
- arg = pad->input_enabled;
+ if (!pad->input_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_OUTPUT:
arg = pad->out_value;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index d82820fc349a..44c6b753f692 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -616,16 +616,22 @@ static const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks0),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5260_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks1),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5260_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
},
};
@@ -842,30 +848,40 @@ static const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks0),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5420_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks1),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5420_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos5420_pin_banks3,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks3),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 4 data */
.pin_banks = exynos5420_pin_banks4,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks4),
.eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
.retention_data = &exynos4_audio_retention_data,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index a263ddd94945..f49ea3d92aa1 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -25,6 +25,7 @@
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/soc/samsung/exynos-pmu.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <dt-bindings/pinctrl/samsung.h>
@@ -37,6 +38,8 @@ struct exynos_irq_chip {
u32 eint_con;
u32 eint_mask;
u32 eint_pend;
+ u32 eint_wake_mask_value;
+ u32 eint_wake_mask_reg;
};
static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
@@ -215,6 +218,7 @@ static struct exynos_irq_chip exynos_gpio_irq_chip = {
.eint_con = EXYNOS_GPIO_ECON_OFFSET,
.eint_mask = EXYNOS_GPIO_EMASK_OFFSET,
.eint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ /* eint_wake_mask_value not used */
};
static int exynos_eint_irq_map(struct irq_domain *h, unsigned int virq,
@@ -330,6 +334,8 @@ u32 exynos_get_eint_wake_mask(void)
static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
{
+ struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+ struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq);
@@ -339,6 +345,7 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
exynos_eint_wake_mask |= bit;
else
exynos_eint_wake_mask &= ~bit;
+ our_chip->eint_wake_mask_value = exynos_eint_wake_mask;
return 0;
}
@@ -346,6 +353,25 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
/*
* irq_chip for wakeup interrupts
*/
+static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = {
+ .chip = {
+ .name = "s5pv210_wkup_irq_chip",
+ .irq_unmask = exynos_irq_unmask,
+ .irq_mask = exynos_irq_mask,
+ .irq_ack = exynos_irq_ack,
+ .irq_set_type = exynos_irq_set_type,
+ .irq_set_wake = exynos_wkup_irq_set_wake,
+ .irq_request_resources = exynos_irq_request_resources,
+ .irq_release_resources = exynos_irq_release_resources,
+ },
+ .eint_con = EXYNOS_WKUP_ECON_OFFSET,
+ .eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
+ .eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
+ .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+ /* Only difference with exynos4210_wkup_irq_chip: */
+ .eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK,
+};
+
static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
.chip = {
.name = "exynos4210_wkup_irq_chip",
@@ -360,6 +386,8 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
.eint_con = EXYNOS_WKUP_ECON_OFFSET,
.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
+ .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+ .eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK,
};
static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
@@ -376,10 +404,14 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
.eint_con = EXYNOS7_WKUP_ECON_OFFSET,
.eint_mask = EXYNOS7_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
+ .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+ .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
};
/* list of external wakeup controllers supported */
static const struct of_device_id exynos_wkup_irq_ids[] = {
+ { .compatible = "samsung,s5pv210-wakeup-eint",
+ .data = &s5pv210_wkup_irq_chip },
{ .compatible = "samsung,exynos4210-wakeup-eint",
.data = &exynos4210_wkup_irq_chip },
{ .compatible = "samsung,exynos7-wakeup-eint",
@@ -542,6 +574,27 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
return 0;
}
+static void
+exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip)
+{
+ struct regmap *pmu_regs;
+
+ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
+ dev_warn(drvdata->dev,
+ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
+ return;
+ }
+
+ pmu_regs = drvdata->retention_ctrl->priv;
+ dev_info(drvdata->dev,
+ "Setting external wakeup interrupt mask: 0x%x\n",
+ irq_chip->eint_wake_mask_value);
+
+ regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
+ irq_chip->eint_wake_mask_value);
+}
+
static void exynos_pinctrl_suspend_bank(
struct samsung_pinctrl_drv_data *drvdata,
struct samsung_pin_bank *bank)
@@ -564,11 +617,24 @@ static void exynos_pinctrl_suspend_bank(
void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_bank *bank = drvdata->pin_banks;
+ struct exynos_irq_chip *irq_chip = NULL;
int i;
- for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
+ for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
if (bank->eint_type == EINT_TYPE_GPIO)
exynos_pinctrl_suspend_bank(drvdata, bank);
+ else if (bank->eint_type == EINT_TYPE_WKUP) {
+ if (!irq_chip) {
+ irq_chip = bank->irq_chip;
+ exynos_pinctrl_set_eint_wakeup_mask(drvdata,
+ irq_chip);
+ } else if (bank->irq_chip != irq_chip) {
+ dev_warn(drvdata->dev,
+ "More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n",
+ bank->name);
+ }
+ }
+ }
}
static void exynos_pinctrl_resume_bank(
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index f0cda9424dfe..e571bbd7139b 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -223,6 +223,13 @@ struct samsung_retention_data {
* interrupts for the controller.
* @eint_wkup_init: platform specific callback to setup the external wakeup
* interrupts for the controller.
+ * @suspend: platform specific suspend callback, executed during pin controller
+ * device suspend, see samsung_pinctrl_suspend()
+ * @resume: platform specific resume callback, executed during pin controller
+ * device suspend, see samsung_pinctrl_resume()
+ *
+ * External wakeup interrupts must define at least eint_wkup_init,
+ * retention_data and suspend in order for proper suspend/resume to work.
*/
struct samsung_pin_ctrl {
const struct samsung_pin_bank_data *pin_banks;
@@ -255,6 +262,10 @@ struct samsung_pin_ctrl {
* @pin_base: starting system wide pin number.
* @nr_pins: number of pins supported by the controller.
* @retention_ctrl: retention control runtime data.
+ * @suspend: platform specific suspend callback, executed during pin controller
+ * device suspend, see samsung_pinctrl_suspend()
+ * @resume: platform specific resume callback, executed during pin controller
+ * device suspend, see samsung_pinctrl_resume()
*/
struct samsung_pinctrl_drv_data {
struct list_head node;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index d2bbee656381..cfd7de67e3e3 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -1758,6 +1758,263 @@ static const unsigned int du_disp_mux[] = {
DU_DISP_MARK,
};
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+};
+
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+
+static const unsigned int hscif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
+};
+
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+
+static const unsigned int hscif1_data_a_mux[] = {
+ HRX1_A_MARK, HTX1_A_MARK,
+};
+
+static const unsigned int hscif1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+
+static const unsigned int hscif1_clk_a_mux[] = {
+ HSCK1_A_MARK,
+};
+
+static const unsigned int hscif1_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+
+static const unsigned int hscif1_ctrl_a_mux[] = {
+ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+static const unsigned int hscif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
+};
+
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK,
+};
+
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+
+static const unsigned int hscif2_data_a_mux[] = {
+ HRX2_A_MARK, HTX2_A_MARK,
+};
+
+static const unsigned int hscif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 10),
+};
+
+static const unsigned int hscif2_clk_a_mux[] = {
+ HSCK2_A_MARK,
+};
+
+static const unsigned int hscif2_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+
+static const unsigned int hscif2_ctrl_a_mux[] = {
+ HRTS2_N_A_MARK, HCTS2_N_A_MARK,
+};
+
+static const unsigned int hscif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+
+static const unsigned int hscif2_data_b_mux[] = {
+ HRX2_B_MARK, HTX2_B_MARK,
+};
+
+static const unsigned int hscif2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+
+static const unsigned int hscif2_clk_b_mux[] = {
+ HSCK2_B_MARK,
+};
+
+static const unsigned int hscif2_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
+};
+
+static const unsigned int hscif2_ctrl_b_mux[] = {
+ HRTS2_N_B_MARK, HCTS2_N_B_MARK,
+};
+
+static const unsigned int hscif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 25), RCAR_GP_PIN(6, 26),
+};
+
+static const unsigned int hscif2_data_c_mux[] = {
+ HRX2_C_MARK, HTX2_C_MARK,
+};
+
+static const unsigned int hscif2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 24),
+};
+
+static const unsigned int hscif2_clk_c_mux[] = {
+ HSCK2_C_MARK,
+};
+
+static const unsigned int hscif2_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 27),
+};
+
+static const unsigned int hscif2_ctrl_c_mux[] = {
+ HRTS2_N_C_MARK, HCTS2_N_C_MARK,
+};
+
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+
+static const unsigned int hscif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+static const unsigned int hscif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+};
+
+static const unsigned int hscif3_data_b_mux[] = {
+ HRX3_B_MARK, HTX3_B_MARK,
+};
+
+static const unsigned int hscif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+};
+
+static const unsigned int hscif3_data_c_mux[] = {
+ HRX3_C_MARK, HTX3_C_MARK,
+};
+
+static const unsigned int hscif3_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+
+static const unsigned int hscif3_data_d_mux[] = {
+ HRX3_D_MARK, HTX3_D_MARK,
+};
+
+/* - HSCIF4 ----------------------------------------------------------------- */
+static const unsigned int hscif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+};
+
+static const unsigned int hscif4_data_a_mux[] = {
+ HRX4_A_MARK, HTX4_A_MARK,
+};
+
+static const unsigned int hscif4_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 11),
+};
+
+static const unsigned int hscif4_clk_mux[] = {
+ HSCK4_MARK,
+};
+
+static const unsigned int hscif4_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
+};
+
+static const unsigned int hscif4_ctrl_mux[] = {
+ HRTS4_N_MARK, HCTS4_N_MARK,
+};
+
+static const unsigned int hscif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+
+static const unsigned int hscif4_data_b_mux[] = {
+ HRX4_B_MARK, HTX4_B_MARK,
+};
+
/* - I2C -------------------------------------------------------------------- */
static const unsigned int i2c1_a_pins[] = {
/* SDA, SCL */
@@ -3169,6 +3426,34 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(du_oddf),
SH_PFC_PIN_GROUP(du_cde),
SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
SH_PFC_PIN_GROUP(i2c1_a),
SH_PFC_PIN_GROUP(i2c1_b),
SH_PFC_PIN_GROUP(i2c2_a),
@@ -3379,6 +3664,49 @@ static const char * const du_groups[] = {
"du_disp",
};
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data_a",
+ "hscif1_clk_a",
+ "hscif1_ctrl_a",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data_a",
+ "hscif2_clk_a",
+ "hscif2_ctrl_a",
+ "hscif2_data_b",
+ "hscif2_clk_b",
+ "hscif2_ctrl_b",
+ "hscif2_data_c",
+ "hscif2_clk_c",
+ "hscif2_ctrl_c",
+};
+
+static const char * const hscif3_groups[] = {
+ "hscif3_data_a",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_b",
+ "hscif3_data_c",
+ "hscif3_data_d",
+};
+
+static const char * const hscif4_groups[] = {
+ "hscif4_data_a",
+ "hscif4_clk",
+ "hscif4_ctrl",
+ "hscif4_data_b",
+};
+
static const char * const i2c1_groups[] = {
"i2c1_a",
"i2c1_b",
@@ -3651,6 +3979,11 @@ static const char * const usb30_groups[] = {
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c6),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
index a68fd658aada..b81c807ac54d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -277,7 +277,7 @@
#define IP11_15_12 FM(TX0_A) FM(HTX1_A) FM(SSI_WS2_A) FM(RIF1_D0) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDAT1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_19_16 FM(CTS0_N_A) FM(NFDATA14_A) FM(AUDIO_CLKOUT_A) FM(RIF1_D1) FM(SCIF_CLK_A) FM(FMCLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_23_20 FM(RTS0_N_TANS_A) FM(NFDATA15_A) FM(AUDIO_CLKOUT1_A) FM(RIF1_CLK) FM(SCL2_A) FM(FMIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP11_27_24 FM(SCK0_A) FM(HSCK1_A) FM(USB3HS0_ID) FM(RTS1_N_TANS) FM(SDA2_A) FM(FMCLK_C) F_(0, 0) F_(0, 0) FM(USB1_ID) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24 FM(SCK0_A) FM(HSCK1_A) FM(USB3HS0_ID) FM(RTS1_N_TANS) FM(SDA2_A) FM(FMCLK_C) F_(0, 0) F_(0, 0) FM(USB0_ID) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP11_31_28 FM(RX1) FM(HRX2_B) FM(SSI_SCK9_B) FM(AUDIO_CLKOUT1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
@@ -1082,7 +1082,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP11_27_24, RTS1_N_TANS),
PINMUX_IPSR_MSEL(IP11_27_24, SDA2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP11_27_24, FMCLK_C, SEL_FM_2),
- PINMUX_IPSR_GPSR(IP11_27_24, USB1_ID),
+ PINMUX_IPSR_GPSR(IP11_27_24, USB0_ID),
PINMUX_IPSR_GPSR(IP11_31_28, RX1),
PINMUX_IPSR_MSEL(IP11_31_28, HRX2_B, SEL_HSCIF2_1),
@@ -1784,6 +1784,53 @@ static const unsigned int scif_clk_b_mux[] = {
SCIF_CLK_B_MARK,
};
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_a_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 9),
+};
+
+static const unsigned int usb0_a_mux[] = {
+ USB0_PWEN_A_MARK, USB0_OVC_A_MARK,
+};
+
+static const unsigned int usb0_b_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+
+static const unsigned int usb0_b_mux[] = {
+ USB0_PWEN_B_MARK, USB0_OVC_B_MARK,
+};
+
+static const unsigned int usb0_id_pins[] = {
+ /* ID */
+ RCAR_GP_PIN(5, 0)
+};
+
+static const unsigned int usb0_id_mux[] = {
+ USB0_ID_MARK,
+};
+
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 9),
+};
+
+static const unsigned int usb30_mux[] = {
+ USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+
+static const unsigned int usb30_id_pins[] = {
+ /* ID */
+ RCAR_GP_PIN(5, 0),
+};
+
+static const unsigned int usb30_id_mux[] = {
+ USB3HS0_ID_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_link),
SH_PFC_PIN_GROUP(avb_magic),
@@ -1837,6 +1884,11 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_data_c),
SH_PFC_PIN_GROUP(scif_clk_a),
SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(usb0_a),
+ SH_PFC_PIN_GROUP(usb0_b),
+ SH_PFC_PIN_GROUP(usb0_id),
+ SH_PFC_PIN_GROUP(usb30),
+ SH_PFC_PIN_GROUP(usb30_id),
};
static const char * const avb_groups[] = {
@@ -1933,6 +1985,17 @@ static const char * const scif_clk_groups[] = {
"scif_clk_b",
};
+static const char * const usb0_groups[] = {
+ "usb0_a",
+ "usb0_b",
+ "usb0_id",
+};
+
+static const char * const usb30_groups[] = {
+ "usb30",
+ "usb30_id",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(i2c1),
@@ -1948,6 +2011,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb30),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index dfed60982a8a..a9bec6e6fdd1 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -46,6 +46,8 @@
#define STM32_GPIO_PINS_PER_BANK 16
#define STM32_GPIO_IRQ_LINE 16
+#define SYSCFG_IRQMUX_MASK GENMASK(3, 0)
+
#define gpio_range_to_bank(chip) \
container_of(chip, struct stm32_gpio_bank, range)
@@ -73,6 +75,7 @@ struct stm32_gpio_bank {
struct fwnode_handle *fwnode;
struct irq_domain *domain;
u32 bank_nr;
+ u32 bank_ioport_nr;
};
struct stm32_pinctrl {
@@ -298,7 +301,7 @@ static int stm32_gpio_domain_activate(struct irq_domain *d,
struct stm32_gpio_bank *bank = d->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
- regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr);
+ regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_ioport_nr);
return 0;
}
@@ -638,6 +641,11 @@ static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
}
range = pinctrl_find_gpio_range_from_pin(pctldev, g->pin);
+ if (!range) {
+ dev_err(pctl->dev, "No gpio range defined.\n");
+ return -EINVAL;
+ }
+
bank = gpiochip_get_data(range->gc);
pin = stm32_gpio_pin(g->pin);
@@ -806,11 +814,17 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
unsigned int pin, enum pin_config_param param,
enum pin_config_param arg)
{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct pinctrl_gpio_range *range;
struct stm32_gpio_bank *bank;
int offset, ret = 0;
range = pinctrl_find_gpio_range_from_pin(pctldev, pin);
+ if (!range) {
+ dev_err(pctl->dev, "No gpio range defined.\n");
+ return -EINVAL;
+ }
+
bank = gpiochip_get_data(range->gc);
offset = stm32_gpio_pin(pin);
@@ -892,6 +906,9 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
bool val;
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+ if (!range)
+ return;
+
bank = gpiochip_get_data(range->gc);
offset = stm32_gpio_pin(pin);
@@ -948,6 +965,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
struct device_node *np)
{
struct stm32_gpio_bank *bank = &pctl->banks[pctl->nbanks];
+ int bank_ioport_nr;
struct pinctrl_gpio_range *range = &bank->range;
struct of_phandle_args args;
struct device *dev = pctl->dev;
@@ -998,12 +1016,17 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
pinctrl_add_gpio_range(pctl->pctl_dev,
&pctl->banks[bank_nr].range);
}
+
+ if (of_property_read_u32(np, "st,bank-ioport", &bank_ioport_nr))
+ bank_ioport_nr = bank_nr;
+
bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
bank->gpio_chip.ngpio = npins;
bank->gpio_chip.of_node = np;
bank->gpio_chip.parent = dev;
bank->bank_nr = bank_nr;
+ bank->bank_ioport_nr = bank_ioport_nr;
spin_lock_init(&bank->lock);
/* create irq hierarchical domain */
@@ -1033,6 +1056,7 @@ static int stm32_pctrl_dt_setup_irq(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct regmap *rm;
int offset, ret, i;
+ int mask, mask_width;
parent = of_irq_find_parent(np);
if (!parent)
@@ -1052,12 +1076,21 @@ static int stm32_pctrl_dt_setup_irq(struct platform_device *pdev,
if (ret)
return ret;
+ ret = of_property_read_u32_index(np, "st,syscfg", 2, &mask);
+ if (ret)
+ mask = SYSCFG_IRQMUX_MASK;
+
+ mask_width = fls(mask);
+
for (i = 0; i < STM32_GPIO_PINS_PER_BANK; i++) {
struct reg_field mux;
mux.reg = offset + (i / 4) * 4;
- mux.lsb = (i % 4) * 4;
- mux.msb = mux.lsb + 3;
+ mux.lsb = (i % 4) * mask_width;
+ mux.msb = mux.lsb + mask_width - 1;
+
+ dev_dbg(dev, "irqmux%d: reg:%#x, lsb:%d, msb:%d\n",
+ i, mux.reg, mux.lsb, mux.msb);
pctl->irqmux[i] = devm_regmap_field_alloc(dev, rm, mux);
if (IS_ERR(pctl->irqmux[i]))
@@ -1166,7 +1199,7 @@ int stm32_pctl_probe(struct platform_device *pdev)
return PTR_ERR(pctl->pctl_dev);
}
- for_each_child_of_node(np, child)
+ for_each_available_child_of_node(np, child)
if (of_property_read_bool(child, "gpio-controller"))
banks++;
@@ -1179,7 +1212,7 @@ int stm32_pctl_probe(struct platform_device *pdev)
if (!pctl->banks)
return -ENOMEM;
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
if (of_property_read_bool(child, "gpio-controller")) {
ret = stm32_gpiolib_register_bank(pctl, child);
if (ret)
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index f974eee29a19..1aba75897d14 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -629,12 +629,12 @@ static void tegra_pinctrl_clear_parked_bits(struct tegra_pmx *pmx)
}
}
-static bool gpio_node_has_range(void)
+static bool gpio_node_has_range(const char *compatible)
{
struct device_node *np;
bool has_prop = false;
- np = of_find_compatible_node(NULL, NULL, "nvidia,tegra30-gpio");
+ np = of_find_compatible_node(NULL, NULL, compatible);
if (!np)
return has_prop;
@@ -728,7 +728,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
tegra_pinctrl_clear_parked_bits(pmx);
- if (!gpio_node_has_range())
+ if (!gpio_node_has_range(pmx->soc->gpio_compatible))
pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
platform_set_drvdata(pdev, pmx);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index aa33c20766c4..44c71941b5f8 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -189,6 +189,7 @@ struct tegra_pingroup {
*/
struct tegra_pinctrl_soc_data {
unsigned ngpios;
+ const char *gpio_compatible;
const struct pinctrl_pin_desc *pins;
unsigned npins;
struct tegra_function *functions;
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c
index 56b33fca1bfc..d43c209e9c30 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra114.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra114.c
@@ -1839,6 +1839,7 @@ static const struct tegra_pingroup tegra114_groups[] = {
static const struct tegra_pinctrl_soc_data tegra114_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra30-gpio",
.pins = tegra114_pins,
.npins = ARRAY_SIZE(tegra114_pins),
.functions = tegra114_functions,
@@ -1867,4 +1868,9 @@ static struct platform_driver tegra114_pinctrl_driver = {
},
.probe = tegra114_pinctrl_probe,
};
-builtin_platform_driver(tegra114_pinctrl_driver);
+
+static int __init tegra114_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra114_pinctrl_driver);
+}
+arch_initcall(tegra114_pinctrl_init);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c
index 7bc998ace0d5..5b07a5834d15 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra124.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra124.c
@@ -2051,6 +2051,7 @@ static const struct tegra_pingroup tegra124_groups[] = {
static const struct tegra_pinctrl_soc_data tegra124_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra30-gpio",
.pins = tegra124_pins,
.npins = ARRAY_SIZE(tegra124_pins),
.functions = tegra124_functions,
@@ -2079,4 +2080,9 @@ static struct platform_driver tegra124_pinctrl_driver = {
},
.probe = tegra124_pinctrl_probe,
};
-builtin_platform_driver(tegra124_pinctrl_driver);
+
+static int __init tegra124_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra124_pinctrl_driver);
+}
+arch_initcall(tegra124_pinctrl_init);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
index b6dd939d32cc..1fc82a9576e0 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra20.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
@@ -2221,6 +2221,7 @@ static const struct tegra_pingroup tegra20_groups[] = {
static const struct tegra_pinctrl_soc_data tegra20_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra20-gpio",
.pins = tegra20_pins,
.npins = ARRAY_SIZE(tegra20_pins),
.functions = tegra20_functions,
@@ -2276,4 +2277,9 @@ static struct platform_driver tegra20_pinctrl_driver = {
},
.probe = tegra20_pinctrl_probe,
};
-builtin_platform_driver(tegra20_pinctrl_driver);
+
+static int __init tegra20_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra20_pinctrl_driver);
+}
+arch_initcall(tegra20_pinctrl_init);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c
index c244e5b17bd6..3e77f5474dd8 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra210.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra210.c
@@ -1553,6 +1553,7 @@ static const struct tegra_pingroup tegra210_groups[] = {
static const struct tegra_pinctrl_soc_data tegra210_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra30-gpio",
.pins = tegra210_pins,
.npins = ARRAY_SIZE(tegra210_pins),
.functions = tegra210_functions,
@@ -1581,4 +1582,9 @@ static struct platform_driver tegra210_pinctrl_driver = {
},
.probe = tegra210_pinctrl_probe,
};
-builtin_platform_driver(tegra210_pinctrl_driver);
+
+static int __init tegra210_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra210_pinctrl_driver);
+}
+arch_initcall(tegra210_pinctrl_init);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c
index 1f180a20f2ab..10e617003e9c 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra30.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra30.c
@@ -2474,6 +2474,7 @@ static const struct tegra_pingroup tegra30_groups[] = {
static const struct tegra_pinctrl_soc_data tegra30_pinctrl = {
.ngpios = NUM_GPIOS,
+ .gpio_compatible = "nvidia,tegra30-gpio",
.pins = tegra30_pins,
.npins = ARRAY_SIZE(tegra30_pins),
.functions = tegra30_functions,
@@ -2502,4 +2503,9 @@ static struct platform_driver tegra30_pinctrl_driver = {
},
.probe = tegra30_pinctrl_probe,
};
-builtin_platform_driver(tegra30_pinctrl_driver);
+
+static int __init tegra30_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra30_pinctrl_driver);
+}
+arch_initcall(tegra30_pinctrl_init);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 58825f68b58b..bce533f85420 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -517,6 +517,10 @@ static const int i2c4_muxvals[] = {1, 1};
static const unsigned nand_pins[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17};
static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {56, 57, 58, 59};
+static const int spi0_muxvals[] = {0, 0, 0, 0};
+static const unsigned spi1_pins[] = {169, 170, 171, 172};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
static const unsigned system_bus_pins[] = {1, 2, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17};
static const int system_bus_muxvals[] = {0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
@@ -596,6 +600,8 @@ static const struct uniphier_pinctrl_group uniphier_ld11_groups[] = {
UNIPHIER_PINCTRL_GROUP(i2c3),
UNIPHIER_PINCTRL_GROUP(i2c4),
UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(uart0),
@@ -632,6 +638,8 @@ static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const i2c4_groups[] = {"i2c4"};
static const char * const nand_groups[] = {"nand"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1"};
static const char * const uart0_groups[] = {"uart0"};
@@ -657,6 +665,8 @@ static const struct uniphier_pinmux_function uniphier_ld11_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(i2c4),
UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index 9f449b35e300..99f06fe8e1cb 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -606,6 +606,14 @@ static const unsigned nand_pins[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned sd_pins[] = {10, 11, 12, 13, 14, 15, 16, 17};
static const int sd_muxvals[] = {3, 3, 3, 3, 3, 3, 3, 3}; /* No SDVOLC */
+static const unsigned spi0_pins[] = {56, 57, 58, 59};
+static const int spi0_muxvals[] = {0, 0, 0, 0};
+static const unsigned spi1_pins[] = {169, 170, 171, 172};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
+static const unsigned spi2_pins[] = {86, 87, 88, 89};
+static const int spi2_muxvals[] = {1, 1, 1, 1};
+static const unsigned spi3_pins[] = {74, 75, 76, 77};
+static const int spi3_muxvals[] = {1, 1, 1, 1};
static const unsigned system_bus_pins[] = {1, 2, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17};
static const int system_bus_muxvals[] = {0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
@@ -685,6 +693,10 @@ static const struct uniphier_pinctrl_group uniphier_ld20_groups[] = {
UNIPHIER_PINCTRL_GROUP(i2c4),
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
+ UNIPHIER_PINCTRL_GROUP(spi2),
+ UNIPHIER_PINCTRL_GROUP(spi3),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(uart0),
@@ -722,6 +734,10 @@ static const char * const i2c3_groups[] = {"i2c3"};
static const char * const i2c4_groups[] = {"i2c4"};
static const char * const nand_groups[] = {"nand"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
+static const char * const spi2_groups[] = {"spi2"};
+static const char * const spi3_groups[] = {"spi3"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1"};
static const char * const uart0_groups[] = {"uart0"};
@@ -751,6 +767,10 @@ static const struct uniphier_pinmux_function uniphier_ld20_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c4),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
+ UNIPHIER_PINMUX_FUNCTION(spi2),
+ UNIPHIER_PINMUX_FUNCTION(spi3),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
index 0b10ebc07eb8..b247011524bf 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
@@ -576,6 +576,8 @@ static const unsigned nand_cs1_pins[] = {22, 23};
static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {44, 45, 46, 47, 48, 49, 50, 51, 52};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {135, 136, 137, 138};
+static const int spi0_muxvals[] = {12, 12, 12, 12};
static const unsigned system_bus_pins[] = {16, 17, 18, 19, 20, 165, 166, 167,
168, 169, 170, 171, 172, 173};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1,
@@ -640,6 +642,7 @@ static const struct uniphier_pinctrl_group uniphier_ld4_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs0),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
@@ -667,6 +670,7 @@ static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs0",
"system_bus_cs1",
@@ -690,6 +694,7 @@ static const struct uniphier_pinmux_function uniphier_ld4_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
index 8e4d45fea885..cb58797adaee 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
@@ -769,6 +769,10 @@ static const unsigned nand_cs1_pins[] = {37, 38};
static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {199, 200, 201, 202};
+static const int spi0_muxvals[] = {8, 8, 8, 8};
+static const unsigned spi1_pins[] = {93, 94, 95, 96};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
static const unsigned system_bus_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -851,6 +855,8 @@ static const struct uniphier_pinctrl_group uniphier_ld6b_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(system_bus_cs2),
@@ -882,6 +888,8 @@ static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1",
"system_bus_cs2",
@@ -907,6 +915,8 @@ static const struct uniphier_pinmux_function uniphier_ld6b_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index 24788a74c254..89148f81d5e0 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -1050,6 +1050,10 @@ static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned sd1_pins[] = {319, 320, 321, 322, 323, 324, 325, 326,
327};
static const int sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {199, 200, 201, 202};
+static const int spi0_muxvals[] = {11, 11, 11, 11};
+static const unsigned spi1_pins[] = {195, 196, 197, 198, 235, 238, 239};
+static const int spi1_muxvals[] = {11, 11, 11, 11, 11, 11, 11};
static const unsigned system_bus_pins[] = {25, 26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1138,6 +1142,8 @@ static const struct uniphier_pinctrl_group uniphier_pro4_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
UNIPHIER_PINCTRL_GROUP(sd1),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs0),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
@@ -1171,6 +1177,8 @@ static const char * const i2c6_groups[] = {"i2c6"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
static const char * const sd1_groups[] = {"sd1"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs0",
"system_bus_cs1",
@@ -1202,6 +1210,8 @@ static const struct uniphier_pinmux_function uniphier_pro4_functions[] = {
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
UNIPHIER_PINMUX_FUNCTION(sd1),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
index d5d5e579cb08..d77d6b37aabe 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
@@ -818,6 +818,12 @@ static const unsigned nand_cs1_pins[] = {26, 27};
static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {250, 251, 252, 253, 254, 255, 256, 257, 258};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {120, 121, 122, 123};
+static const int spi0_muxvals[] = {0, 0, 0, 0};
+static const unsigned spi1_pins[] = {134, 139, 85, 86};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
+static const unsigned spi2_pins[] = {55, 56, 57, 58, 82, 83, 84};
+static const int spi2_muxvals[] = {0, 0, 0, 0, 1, 1, 1};
static const unsigned system_bus_pins[] = {4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -904,6 +910,9 @@ static const struct uniphier_pinctrl_group uniphier_pro5_groups[] = {
UNIPHIER_PINCTRL_GROUP(i2c5c),
UNIPHIER_PINCTRL_GROUP(i2c6),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
+ UNIPHIER_PINCTRL_GROUP(spi2),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs0),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
@@ -934,6 +943,9 @@ static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"};
static const char * const i2c6_groups[] = {"i2c6"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
+static const char * const spi2_groups[] = {"spi2"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs0",
"system_bus_cs1",
@@ -961,6 +973,9 @@ static const struct uniphier_pinmux_function uniphier_pro5_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c6),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
+ UNIPHIER_PINMUX_FUNCTION(spi2),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
index 032619ad0e73..90199da87eb9 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
@@ -778,6 +778,10 @@ static const unsigned nand_cs1_pins[] = {37, 38};
static const int nand_cs1_muxvals[] = {8, 8};
static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
static const int sd_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8};
+static const unsigned spi0_pins[] = {199, 200, 201, 202};
+static const int spi0_muxvals[] = {8, 8, 8, 8};
+static const unsigned spi1_pins[] = {93, 94, 95, 96};
+static const int spi1_muxvals[] = {1, 1, 1, 1};
static const unsigned system_bus_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13};
static const int system_bus_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
@@ -861,6 +865,8 @@ static const struct uniphier_pinctrl_group uniphier_pxs2_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(uart0),
@@ -897,6 +903,8 @@ static const char * const i2c5_groups[] = {"i2c5"};
static const char * const i2c6_groups[] = {"i2c6"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1"};
static const char * const uart0_groups[] = {"uart0", "uart0b", "uart0b_ctsrts"};
@@ -928,6 +936,8 @@ static const struct uniphier_pinmux_function uniphier_pxs2_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c6),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
index 535bb2e935e4..3b860da47733 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
@@ -808,6 +808,10 @@ static const unsigned int nand_pins[] = {16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned int sd_pins[] = {43, 44, 45, 46, 47, 48, 49, 50, 51};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {100, 101, 102, 103};
+static const int spi0_muxvals[] = {0, 0, 0, 0};
+static const unsigned spi1_pins[] = {112, 113, 114, 115};
+static const int spi1_muxvals[] = {2, 2, 2, 2};
static const unsigned int system_bus_pins[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14};
static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -886,6 +890,8 @@ static const struct uniphier_pinctrl_group uniphier_pxs3_groups[] = {
UNIPHIER_PINCTRL_GROUP(i2c3),
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
+ UNIPHIER_PINCTRL_GROUP(spi1),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(uart0),
@@ -913,6 +919,8 @@ static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
+static const char * const spi1_groups[] = {"spi1"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1"};
static const char * const uart0_groups[] = {"uart0", "uart0_ctsrts"};
@@ -936,6 +944,8 @@ static const struct uniphier_pinmux_function uniphier_pxs3_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
+ UNIPHIER_PINMUX_FUNCTION(spi1),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
index 0f921a653164..f086083368a7 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
@@ -504,6 +504,8 @@ static const unsigned nand_cs1_pins[] = {22, 23};
static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned spi0_pins[] = {118, 119, 120, 121};
+static const int spi0_muxvals[] = {3, 3, 3, 3};
static const unsigned system_bus_pins[] = {136, 137, 138, 139, 140, 141, 142,
143, 144, 145, 146, 147, 148, 149};
static const int system_bus_muxvals[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -570,6 +572,7 @@ static const struct uniphier_pinctrl_group uniphier_sld8_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(spi0),
UNIPHIER_PINCTRL_GROUP(system_bus),
UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(system_bus_cs2),
@@ -598,6 +601,7 @@ static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const spi0_groups[] = {"spi0"};
static const char * const system_bus_groups[] = {"system_bus",
"system_bus_cs1",
"system_bus_cs2",
@@ -622,6 +626,7 @@ static const struct uniphier_pinmux_function uniphier_sld8_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(spi0),
UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 322de58eebaf..f66521c7f846 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -30,7 +30,8 @@ int loongson3_cpu_temp(int cpu)
case PRID_REV_LOONGSON3B_R2:
reg = ((reg >> 8) & 0xff) - 100;
break;
- case PRID_REV_LOONGSON3A_R3:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
reg = (reg & 0xffff)*731/0x4000 - 273;
break;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ac4d48830415..107d336453b2 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1218,6 +1218,17 @@ config INTEL_CHTDC_TI_PWRBTN
To compile this driver as a module, choose M here: the module
will be called intel_chtdc_ti_pwrbtn.
+config I2C_MULTI_INSTANTIATE
+ tristate "I2C multi instantiate pseudo device driver"
+ depends on I2C && ACPI
+ help
+ Some ACPI-based systems list multiple i2c-devices in a single ACPI
+ firmware-node. This driver will instantiate separate i2c-clients
+ for each device in the firmware-node.
+
+ To compile this driver as a module, choose M here: the module
+ will be called i2c-multi-instantiate.
+
endif # X86_PLATFORM_DEVICES
config PMC_ATOM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 2ba6cb795338..50dc8f280914 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -91,3 +91,4 @@ obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
+obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 3d523ca64694..d67f32a29bb4 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -858,12 +858,6 @@ static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static void asus_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
-{
- kfree(hotplug_slot->info);
- kfree(hotplug_slot);
-}
-
static struct hotplug_slot_ops asus_hotplug_slot_ops = {
.owner = THIS_MODULE,
.get_adapter_status = asus_get_adapter_status,
@@ -905,7 +899,6 @@ static int asus_setup_pci_hotplug(struct asus_wmi *asus)
goto error_info;
asus->hotplug_slot->private = asus;
- asus->hotplug_slot->release = &asus_cleanup_pci_hotplug;
asus->hotplug_slot->ops = &asus_hotplug_slot_ops;
asus_get_adapter_status(asus->hotplug_slot,
&asus->hotplug_slot->info->adapter_status);
@@ -1051,8 +1044,11 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
* asus_unregister_rfkill_notifier()
*/
asus_rfkill_hotplug(asus);
- if (asus->hotplug_slot)
+ if (asus->hotplug_slot) {
pci_hp_deregister(asus->hotplug_slot);
+ kfree(asus->hotplug_slot->info);
+ kfree(asus->hotplug_slot);
+ }
if (asus->hotplug_workqueue)
destroy_workqueue(asus->hotplug_workqueue);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 4c38904a8a32..a4bbf6ecd1f0 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -726,12 +726,6 @@ static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
-{
- kfree(hotplug_slot->info);
- kfree(hotplug_slot);
-}
-
static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
.owner = THIS_MODULE,
.get_adapter_status = eeepc_get_adapter_status,
@@ -758,7 +752,6 @@ static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
goto error_info;
eeepc->hotplug_slot->private = eeepc;
- eeepc->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
eeepc->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
eeepc_get_adapter_status(eeepc->hotplug_slot,
&eeepc->hotplug_slot->info->adapter_status);
@@ -837,8 +830,11 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
eeepc->wlan_rfkill = NULL;
}
- if (eeepc->hotplug_slot)
+ if (eeepc->hotplug_slot) {
pci_hp_deregister(eeepc->hotplug_slot);
+ kfree(eeepc->hotplug_slot->info);
+ kfree(eeepc->hotplug_slot);
+ }
if (eeepc->bluetooth_rfkill) {
rfkill_unregister(eeepc->bluetooth_rfkill);
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
new file mode 100644
index 000000000000..5456581b473c
--- /dev/null
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * I2C multi-instantiate driver, pseudo driver to instantiate multiple
+ * i2c-clients from a single fwnode.
+ *
+ * Copyright 2018 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+struct i2c_inst_data {
+ const char *type;
+ int gpio_irq_idx;
+};
+
+struct i2c_multi_inst_data {
+ int num_clients;
+ struct i2c_client *clients[0];
+};
+
+static int i2c_multi_inst_probe(struct platform_device *pdev)
+{
+ struct i2c_multi_inst_data *multi;
+ const struct acpi_device_id *match;
+ const struct i2c_inst_data *inst_data;
+ struct i2c_board_info board_info = {};
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ char name[32];
+ int i, ret;
+
+ match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!match) {
+ dev_err(dev, "Error ACPI match data is missing\n");
+ return -ENODEV;
+ }
+ inst_data = (const struct i2c_inst_data *)match->driver_data;
+
+ adev = ACPI_COMPANION(dev);
+
+ /* Count number of clients to instantiate */
+ for (i = 0; inst_data[i].type; i++) {}
+
+ multi = devm_kmalloc(dev,
+ offsetof(struct i2c_multi_inst_data, clients[i]),
+ GFP_KERNEL);
+ if (!multi)
+ return -ENOMEM;
+
+ multi->num_clients = i;
+
+ for (i = 0; i < multi->num_clients; i++) {
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, inst_data[i].type, I2C_NAME_SIZE);
+ snprintf(name, sizeof(name), "%s-%s", match->id,
+ inst_data[i].type);
+ board_info.dev_name = name;
+ board_info.irq = 0;
+ if (inst_data[i].gpio_irq_idx != -1) {
+ ret = acpi_dev_gpio_irq_get(adev,
+ inst_data[i].gpio_irq_idx);
+ if (ret < 0) {
+ dev_err(dev, "Error requesting irq at index %d: %d\n",
+ inst_data[i].gpio_irq_idx, ret);
+ goto error;
+ }
+ board_info.irq = ret;
+ }
+ multi->clients[i] = i2c_acpi_new_device(dev, i, &board_info);
+ if (!multi->clients[i]) {
+ dev_err(dev, "Error creating i2c-client, idx %d\n", i);
+ ret = -ENODEV;
+ goto error;
+ }
+ }
+
+ platform_set_drvdata(pdev, multi);
+ return 0;
+
+error:
+ while (--i >= 0)
+ i2c_unregister_device(multi->clients[i]);
+
+ return ret;
+}
+
+static int i2c_multi_inst_remove(struct platform_device *pdev)
+{
+ struct i2c_multi_inst_data *multi = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < multi->num_clients; i++)
+ i2c_unregister_device(multi->clients[i]);
+
+ return 0;
+}
+
+static const struct i2c_inst_data bsg1160_data[] = {
+ { "bmc150_accel", 0 },
+ { "bmc150_magn", -1 },
+ { "bmg160", -1 },
+ {}
+};
+
+/*
+ * Note new device-ids must also be added to i2c_multi_instantiate_ids in
+ * drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
+ */
+static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
+ { "BSG1160", (unsigned long)bsg1160_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
+
+static struct platform_driver i2c_multi_inst_driver = {
+ .driver = {
+ .name = "I2C multi instantiate pseudo device driver",
+ .acpi_match_table = ACPI_PTR(i2c_multi_inst_acpi_ids),
+ },
+ .probe = i2c_multi_inst_probe,
+ .remove = i2c_multi_inst_remove,
+};
+module_platform_driver(i2c_multi_inst_driver);
+
+MODULE_DESCRIPTION("I2C multi instantiate pseudo device driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index b5b890127479..f1afc0ebbc68 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -12,6 +12,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/bitops.h>
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index 85727ef6ce8e..6ac27e5908f5 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -29,4 +29,14 @@ config INTEL_RAPL
controller, CPU core (Power Plance 0), graphics uncore (Power Plane
1), etc.
+config IDLE_INJECT
+ bool "Idle injection framework"
+ depends on CPU_IDLE
+ default n
+ help
+ This enables support for the idle injection framework. It
+ provides a way to force idle periods on a set of specified
+ CPUs for power capping. Idle period can be injected
+ synchronously on a set of specified CPUs or alternatively
+ on a per CPU basis.
endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
index 0a21ef31372b..1b328854b36e 100644
--- a/drivers/powercap/Makefile
+++ b/drivers/powercap/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_POWERCAP) += powercap_sys.o
obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o
+obj-$(CONFIG_IDLE_INJECT) += idle_inject.o
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
new file mode 100644
index 000000000000..24ff2a068978
--- /dev/null
+++ b/drivers/powercap/idle_inject.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * The idle injection framework provides a way to force CPUs to enter idle
+ * states for a specified fraction of time over a specified period.
+ *
+ * It relies on the smpboot kthreads feature providing common code for CPU
+ * hotplug and thread [un]parking.
+ *
+ * All of the kthreads used for idle injection are created at init time.
+ *
+ * Next, the users of the the idle injection framework provide a cpumask via
+ * its register function. The kthreads will be synchronized with respect to
+ * this cpumask.
+ *
+ * The idle + run duration is specified via separate helpers and that allows
+ * idle injection to be started.
+ *
+ * The idle injection kthreads will call play_idle() with the idle duration
+ * specified as per the above.
+ *
+ * After all of them have been woken up, a timer is set to start the next idle
+ * injection cycle.
+ *
+ * The timer interrupt handler will wake up the idle injection kthreads for
+ * all of the CPUs in the cpumask provided by the user.
+ *
+ * Idle injection is stopped synchronously and no leftover idle injection
+ * kthread activity after its completion is guaranteed.
+ *
+ * It is up to the user of this framework to provide a lock for higher-level
+ * synchronization to prevent race conditions like starting idle injection
+ * while unregistering from the framework.
+ */
+#define pr_fmt(fmt) "ii_dev: " fmt
+
+#include <linux/cpu.h>
+#include <linux/hrtimer.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smpboot.h>
+
+#include <uapi/linux/sched/types.h>
+
+/**
+ * struct idle_inject_thread - task on/off switch structure
+ * @tsk: task injecting the idle cycles
+ * @should_run: whether or not to run the task (for the smpboot kthread API)
+ */
+struct idle_inject_thread {
+ struct task_struct *tsk;
+ int should_run;
+};
+
+/**
+ * struct idle_inject_device - idle injection data
+ * @timer: idle injection period timer
+ * @idle_duration_ms: duration of CPU idle time to inject
+ * @run_duration_ms: duration of CPU run time to allow
+ * @cpumask: mask of CPUs affected by idle injection
+ */
+struct idle_inject_device {
+ struct hrtimer timer;
+ unsigned int idle_duration_ms;
+ unsigned int run_duration_ms;
+ unsigned long int cpumask[0];
+};
+
+static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread);
+static DEFINE_PER_CPU(struct idle_inject_device *, idle_inject_device);
+
+/**
+ * idle_inject_wakeup - Wake up idle injection threads
+ * @ii_dev: target idle injection device
+ *
+ * Every idle injection task associated with the given idle injection device
+ * and running on an online CPU will be woken up.
+ */
+static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
+{
+ struct idle_inject_thread *iit;
+ unsigned int cpu;
+
+ for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) {
+ iit = per_cpu_ptr(&idle_inject_thread, cpu);
+ iit->should_run = 1;
+ wake_up_process(iit->tsk);
+ }
+}
+
+/**
+ * idle_inject_timer_fn - idle injection timer function
+ * @timer: idle injection hrtimer
+ *
+ * This function is called when the idle injection timer expires. It wakes up
+ * idle injection tasks associated with the timer and they, in turn, invoke
+ * play_idle() to inject a specified amount of CPU idle time.
+ *
+ * Return: HRTIMER_RESTART.
+ */
+static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
+{
+ unsigned int duration_ms;
+ struct idle_inject_device *ii_dev =
+ container_of(timer, struct idle_inject_device, timer);
+
+ duration_ms = READ_ONCE(ii_dev->run_duration_ms);
+ duration_ms += READ_ONCE(ii_dev->idle_duration_ms);
+
+ idle_inject_wakeup(ii_dev);
+
+ hrtimer_forward_now(timer, ms_to_ktime(duration_ms));
+
+ return HRTIMER_RESTART;
+}
+
+/**
+ * idle_inject_fn - idle injection work function
+ * @cpu: the CPU owning the task
+ *
+ * This function calls play_idle() to inject a specified amount of CPU idle
+ * time.
+ */
+static void idle_inject_fn(unsigned int cpu)
+{
+ struct idle_inject_device *ii_dev;
+ struct idle_inject_thread *iit;
+
+ ii_dev = per_cpu(idle_inject_device, cpu);
+ iit = per_cpu_ptr(&idle_inject_thread, cpu);
+
+ /*
+ * Let the smpboot main loop know that the task should not run again.
+ */
+ iit->should_run = 0;
+
+ play_idle(READ_ONCE(ii_dev->idle_duration_ms));
+}
+
+/**
+ * idle_inject_set_duration - idle and run duration update helper
+ * @run_duration_ms: CPU run time to allow in milliseconds
+ * @idle_duration_ms: CPU idle time to inject in milliseconds
+ */
+void idle_inject_set_duration(struct idle_inject_device *ii_dev,
+ unsigned int run_duration_ms,
+ unsigned int idle_duration_ms)
+{
+ if (run_duration_ms && idle_duration_ms) {
+ WRITE_ONCE(ii_dev->run_duration_ms, run_duration_ms);
+ WRITE_ONCE(ii_dev->idle_duration_ms, idle_duration_ms);
+ }
+}
+
+/**
+ * idle_inject_get_duration - idle and run duration retrieval helper
+ * @run_duration_ms: memory location to store the current CPU run time
+ * @idle_duration_ms: memory location to store the current CPU idle time
+ */
+void idle_inject_get_duration(struct idle_inject_device *ii_dev,
+ unsigned int *run_duration_ms,
+ unsigned int *idle_duration_ms)
+{
+ *run_duration_ms = READ_ONCE(ii_dev->run_duration_ms);
+ *idle_duration_ms = READ_ONCE(ii_dev->idle_duration_ms);
+}
+
+/**
+ * idle_inject_start - start idle injections
+ * @ii_dev: idle injection control device structure
+ *
+ * The function starts idle injection by first waking up all of the idle
+ * injection kthreads associated with @ii_dev to let them inject CPU idle time
+ * sets up a timer to start the next idle injection period.
+ *
+ * Return: -EINVAL if the CPU idle or CPU run time is not set or 0 on success.
+ */
+int idle_inject_start(struct idle_inject_device *ii_dev)
+{
+ unsigned int idle_duration_ms = READ_ONCE(ii_dev->idle_duration_ms);
+ unsigned int run_duration_ms = READ_ONCE(ii_dev->run_duration_ms);
+
+ if (!idle_duration_ms || !run_duration_ms)
+ return -EINVAL;
+
+ pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
+ cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
+
+ idle_inject_wakeup(ii_dev);
+
+ hrtimer_start(&ii_dev->timer,
+ ms_to_ktime(idle_duration_ms + run_duration_ms),
+ HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+/**
+ * idle_inject_stop - stops idle injections
+ * @ii_dev: idle injection control device structure
+ *
+ * The function stops idle injection and waits for the threads to finish work.
+ * If CPU idle time is being injected when this function runs, then it will
+ * wait until the end of the cycle.
+ *
+ * When it returns, there is no more idle injection kthread activity. The
+ * kthreads are scheduled out and the periodic timer is off.
+ */
+void idle_inject_stop(struct idle_inject_device *ii_dev)
+{
+ struct idle_inject_thread *iit;
+ unsigned int cpu;
+
+ pr_debug("Stopping idle injection on CPUs '%*pbl'\n",
+ cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
+
+ hrtimer_cancel(&ii_dev->timer);
+
+ /*
+ * Stopping idle injection requires all of the idle injection kthreads
+ * associated with the given cpumask to be parked and stay that way, so
+ * prevent CPUs from going online at this point. Any CPUs going online
+ * after the loop below will be covered by clearing the should_run flag
+ * that will cause the smpboot main loop to schedule them out.
+ */
+ cpu_hotplug_disable();
+
+ /*
+ * Iterate over all (online + offline) CPUs here in case one of them
+ * goes offline with the should_run flag set so as to prevent its idle
+ * injection kthread from running when the CPU goes online again after
+ * the ii_dev has been freed.
+ */
+ for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
+ iit = per_cpu_ptr(&idle_inject_thread, cpu);
+ iit->should_run = 0;
+
+ wait_task_inactive(iit->tsk, 0);
+ }
+
+ cpu_hotplug_enable();
+}
+
+/**
+ * idle_inject_setup - prepare the current task for idle injection
+ * @cpu: not used
+ *
+ * Called once, this function is in charge of setting the current task's
+ * scheduler parameters to make it an RT task.
+ */
+static void idle_inject_setup(unsigned int cpu)
+{
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+}
+
+/**
+ * idle_inject_should_run - function helper for the smpboot API
+ * @cpu: CPU the kthread is running on
+ *
+ * Return: whether or not the thread can run.
+ */
+static int idle_inject_should_run(unsigned int cpu)
+{
+ struct idle_inject_thread *iit =
+ per_cpu_ptr(&idle_inject_thread, cpu);
+
+ return iit->should_run;
+}
+
+/**
+ * idle_inject_register - initialize idle injection on a set of CPUs
+ * @cpumask: CPUs to be affected by idle injection
+ *
+ * This function creates an idle injection control device structure for the
+ * given set of CPUs and initializes the timer associated with it. It does not
+ * start any injection cycles.
+ *
+ * Return: NULL if memory allocation fails, idle injection control device
+ * pointer on success.
+ */
+struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
+{
+ struct idle_inject_device *ii_dev;
+ int cpu, cpu_rb;
+
+ ii_dev = kzalloc(sizeof(*ii_dev) + cpumask_size(), GFP_KERNEL);
+ if (!ii_dev)
+ return NULL;
+
+ cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
+ hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ii_dev->timer.function = idle_inject_timer_fn;
+
+ for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
+
+ if (per_cpu(idle_inject_device, cpu)) {
+ pr_err("cpu%d is already registered\n", cpu);
+ goto out_rollback;
+ }
+
+ per_cpu(idle_inject_device, cpu) = ii_dev;
+ }
+
+ return ii_dev;
+
+out_rollback:
+ for_each_cpu(cpu_rb, to_cpumask(ii_dev->cpumask)) {
+ if (cpu == cpu_rb)
+ break;
+ per_cpu(idle_inject_device, cpu_rb) = NULL;
+ }
+
+ kfree(ii_dev);
+
+ return NULL;
+}
+
+/**
+ * idle_inject_unregister - unregister idle injection control device
+ * @ii_dev: idle injection control device to unregister
+ *
+ * The function stops idle injection for the given control device,
+ * unregisters its kthreads and frees memory allocated when that device was
+ * created.
+ */
+void idle_inject_unregister(struct idle_inject_device *ii_dev)
+{
+ unsigned int cpu;
+
+ idle_inject_stop(ii_dev);
+
+ for_each_cpu(cpu, to_cpumask(ii_dev->cpumask))
+ per_cpu(idle_inject_device, cpu) = NULL;
+
+ kfree(ii_dev);
+}
+
+static struct smp_hotplug_thread idle_inject_threads = {
+ .store = &idle_inject_thread.tsk,
+ .setup = idle_inject_setup,
+ .thread_fn = idle_inject_fn,
+ .thread_comm = "idle_inject/%u",
+ .thread_should_run = idle_inject_should_run,
+};
+
+static int __init idle_inject_init(void)
+{
+ return smpboot_register_percpu_thread(&idle_inject_threads);
+}
+early_initcall(idle_inject_init);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 474c988d2e95..d137c480db46 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -43,7 +43,7 @@ config PTP_1588_CLOCK_DTE
config PTP_1588_CLOCK_QORIQ
tristate "Freescale QorIQ 1588 timer as PTP clock"
- depends on GIANFAR
+ depends on GIANFAR || FSL_DPAA_ETH
depends on PTP_1588_CLOCK
default y
help
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index e8652c148c52..fdd49c26bbcc 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -29,6 +29,7 @@
#include <linux/of_platform.h>
#include <linux/timex.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/fsl/ptp_qoriq.h>
@@ -39,11 +40,12 @@
/* Caller must hold qoriq_ptp->lock. */
static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp)
{
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
u64 ns;
u32 lo, hi;
- lo = qoriq_read(&qoriq_ptp->regs->tmr_cnt_l);
- hi = qoriq_read(&qoriq_ptp->regs->tmr_cnt_h);
+ lo = qoriq_read(&regs->ctrl_regs->tmr_cnt_l);
+ hi = qoriq_read(&regs->ctrl_regs->tmr_cnt_h);
ns = ((u64) hi) << 32;
ns |= lo;
return ns;
@@ -52,16 +54,18 @@ static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp)
/* Caller must hold qoriq_ptp->lock. */
static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns)
{
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
u32 hi = ns >> 32;
u32 lo = ns & 0xffffffff;
- qoriq_write(&qoriq_ptp->regs->tmr_cnt_l, lo);
- qoriq_write(&qoriq_ptp->regs->tmr_cnt_h, hi);
+ qoriq_write(&regs->ctrl_regs->tmr_cnt_l, lo);
+ qoriq_write(&regs->ctrl_regs->tmr_cnt_h, hi);
}
/* Caller must hold qoriq_ptp->lock. */
static void set_alarm(struct qoriq_ptp *qoriq_ptp)
{
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
u64 ns;
u32 lo, hi;
@@ -70,16 +74,18 @@ static void set_alarm(struct qoriq_ptp *qoriq_ptp)
ns -= qoriq_ptp->tclk_period;
hi = ns >> 32;
lo = ns & 0xffffffff;
- qoriq_write(&qoriq_ptp->regs->tmr_alarm1_l, lo);
- qoriq_write(&qoriq_ptp->regs->tmr_alarm1_h, hi);
+ qoriq_write(&regs->alarm_regs->tmr_alarm1_l, lo);
+ qoriq_write(&regs->alarm_regs->tmr_alarm1_h, hi);
}
/* Caller must hold qoriq_ptp->lock. */
static void set_fipers(struct qoriq_ptp *qoriq_ptp)
{
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+
set_alarm(qoriq_ptp);
- qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
- qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
+ qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
+ qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
}
/*
@@ -89,16 +95,17 @@ static void set_fipers(struct qoriq_ptp *qoriq_ptp)
static irqreturn_t isr(int irq, void *priv)
{
struct qoriq_ptp *qoriq_ptp = priv;
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
struct ptp_clock_event event;
u64 ns;
u32 ack = 0, lo, hi, mask, val;
- val = qoriq_read(&qoriq_ptp->regs->tmr_tevent);
+ val = qoriq_read(&regs->ctrl_regs->tmr_tevent);
if (val & ETS1) {
ack |= ETS1;
- hi = qoriq_read(&qoriq_ptp->regs->tmr_etts1_h);
- lo = qoriq_read(&qoriq_ptp->regs->tmr_etts1_l);
+ hi = qoriq_read(&regs->etts_regs->tmr_etts1_h);
+ lo = qoriq_read(&regs->etts_regs->tmr_etts1_l);
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
event.timestamp = ((u64) hi) << 32;
@@ -108,8 +115,8 @@ static irqreturn_t isr(int irq, void *priv)
if (val & ETS2) {
ack |= ETS2;
- hi = qoriq_read(&qoriq_ptp->regs->tmr_etts2_h);
- lo = qoriq_read(&qoriq_ptp->regs->tmr_etts2_l);
+ hi = qoriq_read(&regs->etts_regs->tmr_etts2_h);
+ lo = qoriq_read(&regs->etts_regs->tmr_etts2_l);
event.type = PTP_CLOCK_EXTTS;
event.index = 1;
event.timestamp = ((u64) hi) << 32;
@@ -130,16 +137,16 @@ static irqreturn_t isr(int irq, void *priv)
hi = ns >> 32;
lo = ns & 0xffffffff;
spin_lock(&qoriq_ptp->lock);
- qoriq_write(&qoriq_ptp->regs->tmr_alarm2_l, lo);
- qoriq_write(&qoriq_ptp->regs->tmr_alarm2_h, hi);
+ qoriq_write(&regs->alarm_regs->tmr_alarm2_l, lo);
+ qoriq_write(&regs->alarm_regs->tmr_alarm2_h, hi);
spin_unlock(&qoriq_ptp->lock);
qoriq_ptp->alarm_value = ns;
} else {
- qoriq_write(&qoriq_ptp->regs->tmr_tevent, ALM2);
+ qoriq_write(&regs->ctrl_regs->tmr_tevent, ALM2);
spin_lock(&qoriq_ptp->lock);
- mask = qoriq_read(&qoriq_ptp->regs->tmr_temask);
+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
mask &= ~ALM2EN;
- qoriq_write(&qoriq_ptp->regs->tmr_temask, mask);
+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
spin_unlock(&qoriq_ptp->lock);
qoriq_ptp->alarm_value = 0;
qoriq_ptp->alarm_interval = 0;
@@ -153,7 +160,7 @@ static irqreturn_t isr(int irq, void *priv)
}
if (ack) {
- qoriq_write(&qoriq_ptp->regs->tmr_tevent, ack);
+ qoriq_write(&regs->ctrl_regs->tmr_tevent, ack);
return IRQ_HANDLED;
} else
return IRQ_NONE;
@@ -169,6 +176,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
u32 tmr_add;
int neg_adj = 0;
struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
if (scaled_ppm < 0) {
neg_adj = 1;
@@ -186,7 +194,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
- qoriq_write(&qoriq_ptp->regs->tmr_add, tmr_add);
+ qoriq_write(&regs->ctrl_regs->tmr_add, tmr_add);
return 0;
}
@@ -250,6 +258,7 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
unsigned long flags;
u32 bit, mask;
@@ -266,23 +275,23 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
return -EINVAL;
}
spin_lock_irqsave(&qoriq_ptp->lock, flags);
- mask = qoriq_read(&qoriq_ptp->regs->tmr_temask);
+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
if (on)
mask |= bit;
else
mask &= ~bit;
- qoriq_write(&qoriq_ptp->regs->tmr_temask, mask);
+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
return 0;
case PTP_CLK_REQ_PPS:
spin_lock_irqsave(&qoriq_ptp->lock, flags);
- mask = qoriq_read(&qoriq_ptp->regs->tmr_temask);
+ mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
if (on)
mask |= PP1EN;
else
mask &= ~PP1EN;
- qoriq_write(&qoriq_ptp->regs->tmr_temask, mask);
+ qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
return 0;
@@ -309,20 +318,123 @@ static const struct ptp_clock_info ptp_qoriq_caps = {
.enable = ptp_qoriq_enable,
};
+/**
+ * qoriq_ptp_nominal_freq - calculate nominal frequency according to
+ * reference clock frequency
+ *
+ * @clk_src: reference clock frequency
+ *
+ * The nominal frequency is the desired clock frequency.
+ * It should be less than the reference clock frequency.
+ * It should be a factor of 1000MHz.
+ *
+ * Return the nominal frequency
+ */
+static u32 qoriq_ptp_nominal_freq(u32 clk_src)
+{
+ u32 remainder = 0;
+
+ clk_src /= 1000000;
+ remainder = clk_src % 100;
+ if (remainder) {
+ clk_src -= remainder;
+ clk_src += 100;
+ }
+
+ do {
+ clk_src -= 100;
+
+ } while (1000 % clk_src);
+
+ return clk_src * 1000000;
+}
+
+/**
+ * qoriq_ptp_auto_config - calculate a set of default configurations
+ *
+ * @qoriq_ptp: pointer to qoriq_ptp
+ * @node: pointer to device_node
+ *
+ * If below dts properties are not provided, this function will be
+ * called to calculate a set of default configurations for them.
+ * "fsl,tclk-period"
+ * "fsl,tmr-prsc"
+ * "fsl,tmr-add"
+ * "fsl,tmr-fiper1"
+ * "fsl,tmr-fiper2"
+ * "fsl,max-adj"
+ *
+ * Return 0 if success
+ */
+static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp,
+ struct device_node *node)
+{
+ struct clk *clk;
+ u64 freq_comp;
+ u64 max_adj;
+ u32 nominal_freq;
+ u32 remainder = 0;
+ u32 clk_src = 0;
+
+ qoriq_ptp->cksel = DEFAULT_CKSEL;
+
+ clk = of_clk_get(node, 0);
+ if (!IS_ERR(clk)) {
+ clk_src = clk_get_rate(clk);
+ clk_put(clk);
+ }
+
+ if (clk_src <= 100000000UL) {
+ pr_err("error reference clock value, or lower than 100MHz\n");
+ return -EINVAL;
+ }
+
+ nominal_freq = qoriq_ptp_nominal_freq(clk_src);
+ if (!nominal_freq)
+ return -EINVAL;
+
+ qoriq_ptp->tclk_period = 1000000000UL / nominal_freq;
+ qoriq_ptp->tmr_prsc = DEFAULT_TMR_PRSC;
+
+ /* Calculate initial frequency compensation value for TMR_ADD register.
+ * freq_comp = ceil(2^32 / freq_ratio)
+ * freq_ratio = reference_clock_freq / nominal_freq
+ */
+ freq_comp = ((u64)1 << 32) * nominal_freq;
+ freq_comp = div_u64_rem(freq_comp, clk_src, &remainder);
+ if (remainder)
+ freq_comp++;
+
+ qoriq_ptp->tmr_add = freq_comp;
+ qoriq_ptp->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - qoriq_ptp->tclk_period;
+ qoriq_ptp->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - qoriq_ptp->tclk_period;
+
+ /* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
+ * freq_ratio = reference_clock_freq / nominal_freq
+ */
+ max_adj = 1000000000ULL * (clk_src - nominal_freq);
+ max_adj = div_u64(max_adj, nominal_freq) - 1;
+ qoriq_ptp->caps.max_adj = max_adj;
+
+ return 0;
+}
+
static int qoriq_ptp_probe(struct platform_device *dev)
{
struct device_node *node = dev->dev.of_node;
struct qoriq_ptp *qoriq_ptp;
+ struct qoriq_ptp_registers *regs;
struct timespec64 now;
int err = -ENOMEM;
u32 tmr_ctrl;
unsigned long flags;
+ void __iomem *base;
qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL);
if (!qoriq_ptp)
goto no_memory;
- err = -ENODEV;
+ err = -EINVAL;
qoriq_ptp->caps = ptp_qoriq_caps;
@@ -341,17 +453,21 @@ static int qoriq_ptp_probe(struct platform_device *dev)
"fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) ||
of_property_read_u32(node,
"fsl,max-adj", &qoriq_ptp->caps.max_adj)) {
- pr_err("device tree node missing required elements\n");
- goto no_node;
+ pr_warn("device tree node missing required elements, try automatic configuration\n");
+
+ if (qoriq_ptp_auto_config(qoriq_ptp, node))
+ goto no_config;
}
+ err = -ENODEV;
+
qoriq_ptp->irq = platform_get_irq(dev, 0);
if (qoriq_ptp->irq < 0) {
pr_err("irq not in device tree\n");
goto no_node;
}
- if (request_irq(qoriq_ptp->irq, isr, 0, DRIVER, qoriq_ptp)) {
+ if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) {
pr_err("request_irq failed\n");
goto no_node;
}
@@ -368,12 +484,27 @@ static int qoriq_ptp_probe(struct platform_device *dev)
spin_lock_init(&qoriq_ptp->lock);
- qoriq_ptp->regs = ioremap(qoriq_ptp->rsrc->start,
- resource_size(qoriq_ptp->rsrc));
- if (!qoriq_ptp->regs) {
+ base = ioremap(qoriq_ptp->rsrc->start,
+ resource_size(qoriq_ptp->rsrc));
+ if (!base) {
pr_err("ioremap ptp registers failed\n");
goto no_ioremap;
}
+
+ qoriq_ptp->base = base;
+
+ if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) {
+ qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET;
+ qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET;
+ qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET;
+ qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET;
+ } else {
+ qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET;
+ qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET;
+ qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET;
+ qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET;
+ }
+
ktime_get_real_ts64(&now);
ptp_qoriq_settime(&qoriq_ptp->caps, &now);
@@ -383,13 +514,14 @@ static int qoriq_ptp_probe(struct platform_device *dev)
spin_lock_irqsave(&qoriq_ptp->lock, flags);
- qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl);
- qoriq_write(&qoriq_ptp->regs->tmr_add, qoriq_ptp->tmr_add);
- qoriq_write(&qoriq_ptp->regs->tmr_prsc, qoriq_ptp->tmr_prsc);
- qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
- qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
+ regs = &qoriq_ptp->regs;
+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl);
+ qoriq_write(&regs->ctrl_regs->tmr_add, qoriq_ptp->tmr_add);
+ qoriq_write(&regs->ctrl_regs->tmr_prsc, qoriq_ptp->tmr_prsc);
+ qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
+ qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
set_alarm(qoriq_ptp);
- qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
@@ -405,11 +537,12 @@ static int qoriq_ptp_probe(struct platform_device *dev)
return 0;
no_clock:
- iounmap(qoriq_ptp->regs);
+ iounmap(qoriq_ptp->base);
no_ioremap:
release_resource(qoriq_ptp->rsrc);
no_resource:
free_irq(qoriq_ptp->irq, qoriq_ptp);
+no_config:
no_node:
kfree(qoriq_ptp);
no_memory:
@@ -419,12 +552,13 @@ no_memory:
static int qoriq_ptp_remove(struct platform_device *dev)
{
struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev);
+ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
- qoriq_write(&qoriq_ptp->regs->tmr_temask, 0);
- qoriq_write(&qoriq_ptp->regs->tmr_ctrl, 0);
+ qoriq_write(&regs->ctrl_regs->tmr_temask, 0);
+ qoriq_write(&regs->ctrl_regs->tmr_ctrl, 0);
ptp_clock_unregister(qoriq_ptp->clock);
- iounmap(qoriq_ptp->regs);
+ iounmap(qoriq_ptp->base);
release_resource(qoriq_ptp->rsrc);
free_irq(qoriq_ptp->irq, qoriq_ptp);
kfree(qoriq_ptp);
@@ -434,6 +568,7 @@ static int qoriq_ptp_remove(struct platform_device *dev)
static const struct of_device_id match_table[] = {
{ .compatible = "fsl,etsec-ptp" },
+ { .compatible = "fsl,fman-ptp-timer" },
{},
};
MODULE_DEVICE_TABLE(of, match_table);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 5dbccf5f3037..329cdd33ed62 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -180,9 +180,9 @@ config REGULATOR_BCM590XX
BCM590xx PMUs. This will enable support for the software
controllable LDO/Switching regulators.
-config REGULATOR_BD71837
+config REGULATOR_BD718XX
tristate "ROHM BD71837 Power Regulator"
- depends on MFD_BD71837
+ depends on MFD_ROHM_BD718XX
help
This driver supports voltage regulators on ROHM BD71837 PMIC.
This will enable support for the software controllable buck
@@ -633,12 +633,12 @@ config REGULATOR_PCF50633
on PCF50633
config REGULATOR_PFUZE100
- tristate "Freescale PFUZE100/200/3000 regulator driver"
+ tristate "Freescale PFUZE100/200/3000/3001 regulator driver"
depends on I2C
select REGMAP_I2C
help
Say y here to support the regulators found on the Freescale
- PFUZE100/200/3000 PMIC.
+ PFUZE100/200/3000/3001 PMIC.
config REGULATOR_PV88060
tristate "Powerventure Semiconductor PV88060 regulator"
@@ -682,6 +682,15 @@ config REGULATOR_QCOM_RPM
Qualcomm RPM as a module. The module will be named
"qcom_rpm-regulator".
+config REGULATOR_QCOM_RPMH
+ tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
+ depends on QCOM_RPMH || COMPILE_TEST
+ help
+ This driver supports control of PMIC regulators via the RPMh hardware
+ block found on Qualcomm Technologies Inc. SoCs. RPMh regulator
+ control allows for voting on regulator state between multiple
+ processors within the SoC.
+
config REGULATOR_QCOM_SMD_RPM
tristate "Qualcomm SMD based RPM regulator driver"
depends on QCOM_SMD_RPM
@@ -950,6 +959,14 @@ config REGULATOR_TWL4030
This driver supports the voltage regulators provided by
this family of companion chips.
+config REGULATOR_UNIPHIER
+ tristate "UniPhier regulator driver"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && MFD_SYSCON
+ default ARCH_UNIPHIER
+ help
+ Support for regulators implemented on Socionext UniPhier SoCs.
+
config REGULATOR_VCTRL
tristate "Voltage controlled regulators"
depends on OF
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index bd818ceb7c72..801d9a34a203 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
-obj-$(CONFIG_REGULATOR_BD71837) += bd71837-regulator.o
+obj-$(CONFIG_REGULATOR_BD718XX) += bd71837-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
@@ -78,6 +78,7 @@ obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_RPMH) += qcom-rpmh-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
@@ -118,6 +119,7 @@ obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
obj-$(CONFIG_REGULATOR_TPS65132) += tps65132-regulator.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
+obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index f6d6a4ad9e8a..e976d073f28d 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -36,6 +36,8 @@ struct arizona_ldo1 {
struct regulator_consumer_supply supply;
struct regulator_init_data init_data;
+
+ struct gpio_desc *ena_gpiod;
};
static int arizona_ldo1_hc_list_voltage(struct regulator_dev *rdev,
@@ -253,12 +255,17 @@ static int arizona_ldo1_common_init(struct platform_device *pdev,
}
}
- /* We assume that high output = regulator off */
- config.ena_gpiod = devm_gpiod_get_optional(&pdev->dev, "wlf,ldoena",
- GPIOD_OUT_HIGH);
+ /* We assume that high output = regulator off
+ * Don't use devm, since we need to get against the parent device
+ * so clean up would happen at the wrong time
+ */
+ config.ena_gpiod = gpiod_get_optional(parent_dev, "wlf,ldoena",
+ GPIOD_OUT_LOW);
if (IS_ERR(config.ena_gpiod))
return PTR_ERR(config.ena_gpiod);
+ ldo1->ena_gpiod = config.ena_gpiod;
+
if (pdata->init_data)
config.init_data = pdata->init_data;
else
@@ -276,6 +283,9 @@ static int arizona_ldo1_common_init(struct platform_device *pdev,
of_node_put(config.of_node);
if (IS_ERR(ldo1->regulator)) {
+ if (config.ena_gpiod)
+ gpiod_put(config.ena_gpiod);
+
ret = PTR_ERR(ldo1->regulator);
dev_err(&pdev->dev, "Failed to register LDO1 supply: %d\n",
ret);
@@ -334,8 +344,19 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
return ret;
}
+static int arizona_ldo1_remove(struct platform_device *pdev)
+{
+ struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
+
+ if (ldo1->ena_gpiod)
+ gpiod_put(ldo1->ena_gpiod);
+
+ return 0;
+}
+
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
+ .remove = arizona_ldo1_remove,
.driver = {
.name = "arizona-ldo1",
},
diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c
index 6eae4d0432a2..0f8ac8dec3e1 100644
--- a/drivers/regulator/bd71837-regulator.c
+++ b/drivers/regulator/bd71837-regulator.c
@@ -2,19 +2,18 @@
// Copyright (C) 2018 ROHM Semiconductors
// bd71837-regulator.c ROHM BD71837MWV regulator driver
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd718x7.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/mfd/bd71837.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
struct bd71837_pmic {
struct regulator_desc descs[BD71837_REGULATOR_CNT];
@@ -39,7 +38,7 @@ static int bd71837_buck1234_set_ramp_delay(struct regulator_dev *rdev,
int id = rdev->desc->id;
unsigned int ramp_value = BUCK_RAMPRATE_10P00MV;
- dev_dbg(&(pmic->pdev->dev), "Buck[%d] Set Ramp = %d\n", id + 1,
+ dev_dbg(&pmic->pdev->dev, "Buck[%d] Set Ramp = %d\n", id + 1,
ramp_delay);
switch (ramp_delay) {
case 1 ... 1250:
@@ -73,14 +72,10 @@ static int bd71837_buck1234_set_ramp_delay(struct regulator_dev *rdev,
static int bd71837_set_voltage_sel_restricted(struct regulator_dev *rdev,
unsigned int sel)
{
- int ret;
-
- ret = regulator_is_enabled_regmap(rdev);
- if (!ret)
- ret = regulator_set_voltage_sel_regmap(rdev, sel);
- else if (ret == 1)
- ret = -EBUSY;
- return ret;
+ if (regulator_is_enabled_regmap(rdev))
+ return -EBUSY;
+
+ return regulator_set_voltage_sel_regmap(rdev, sel);
}
static struct regulator_ops bd71837_ldo_regulator_ops = {
@@ -195,7 +190,7 @@ static const struct regulator_linear_range bd71837_ldo1_voltage_ranges[] = {
* LDO2
* 0.8 or 0.9V
*/
-const unsigned int ldo_2_volts[] = {
+static const unsigned int ldo_2_volts[] = {
900000, 800000
};
@@ -495,7 +490,6 @@ struct reg_init {
static int bd71837_probe(struct platform_device *pdev)
{
struct bd71837_pmic *pmic;
- struct bd71837_board *pdata;
struct regulator_config config = { 0 };
struct reg_init pmic_regulator_inits[] = {
{
@@ -548,8 +542,7 @@ static int bd71837_probe(struct platform_device *pdev)
int i, err;
- pmic = devm_kzalloc(&pdev->dev, sizeof(struct bd71837_pmic),
- GFP_KERNEL);
+ pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
@@ -564,7 +557,6 @@ static int bd71837_probe(struct platform_device *pdev)
goto err;
}
platform_set_drvdata(pdev, pmic);
- pdata = dev_get_platdata(pmic->mfd->dev);
/* Register LOCK release */
err = regmap_update_bits(pmic->mfd->regmap, BD71837_REG_REGLOCK,
@@ -573,8 +565,8 @@ static int bd71837_probe(struct platform_device *pdev)
dev_err(&pmic->pdev->dev, "Failed to unlock PMIC (%d)\n", err);
goto err;
} else {
- dev_dbg(&pmic->pdev->dev, "%s: Unlocked lock register 0x%x\n",
- __func__, BD71837_REG_REGLOCK);
+ dev_dbg(&pmic->pdev->dev, "Unlocked lock register 0x%x\n",
+ BD71837_REG_REGLOCK);
}
for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
@@ -584,9 +576,6 @@ static int bd71837_probe(struct platform_device *pdev)
desc = &pmic->descs[i];
- if (pdata)
- config.init_data = pdata->init_data[i];
-
config.dev = pdev->dev.parent;
config.driver_data = pmic;
config.regmap = pmic->mfd->regmap;
@@ -619,8 +608,6 @@ static int bd71837_probe(struct platform_device *pdev)
pmic->rdev[i] = rdev;
}
- return 0;
-
err:
return err;
}
@@ -628,7 +615,6 @@ err:
static struct platform_driver bd71837_regulator = {
.driver = {
.name = "bd71837-pmic",
- .owner = THIS_MODULE,
},
.probe = bd71837_probe,
};
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index be574eb444eb..274c5ed7cd73 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -30,6 +30,7 @@ struct bd9571mwv_reg {
/* DDR Backup Power */
u8 bkup_mode_cnt_keepon; /* from "rohm,ddr-backup-power" */
u8 bkup_mode_cnt_saved;
+ bool bkup_mode_enabled;
/* Power switch type */
bool rstbmode_level;
@@ -171,13 +172,60 @@ static int bd9571mwv_bkup_mode_write(struct bd9571mwv *bd, unsigned int mode)
return 0;
}
+static ssize_t backup_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
+}
+
+static ssize_t backup_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
+ unsigned int mode;
+ int ret;
+
+ if (!count)
+ return 0;
+
+ ret = kstrtobool(buf, &bdreg->bkup_mode_enabled);
+ if (ret)
+ return ret;
+
+ if (!bdreg->rstbmode_level)
+ return count;
+
+ /*
+ * Configure DDR Backup Mode, to change the role of the accessory power
+ * switch from a power switch to a wake-up switch, or vice versa
+ */
+ ret = bd9571mwv_bkup_mode_read(bdreg->bd, &mode);
+ if (ret)
+ return ret;
+
+ mode &= ~BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK;
+ if (bdreg->bkup_mode_enabled)
+ mode |= bdreg->bkup_mode_cnt_keepon;
+
+ ret = bd9571mwv_bkup_mode_write(bdreg->bd, mode);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(backup_mode);
+
static int bd9571mwv_suspend(struct device *dev)
{
struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
unsigned int mode;
int ret;
- if (!device_may_wakeup(dev))
+ if (!bdreg->bkup_mode_enabled)
return 0;
/* Save DDR Backup Mode */
@@ -204,7 +252,7 @@ static int bd9571mwv_resume(struct device *dev)
{
struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
- if (!device_may_wakeup(dev))
+ if (!bdreg->bkup_mode_enabled)
return 0;
/* Restore DDR Backup Mode */
@@ -215,9 +263,15 @@ static const struct dev_pm_ops bd9571mwv_pm = {
SET_SYSTEM_SLEEP_PM_OPS(bd9571mwv_suspend, bd9571mwv_resume)
};
+static int bd9571mwv_regulator_remove(struct platform_device *pdev)
+{
+ device_remove_file(&pdev->dev, &dev_attr_backup_mode);
+ return 0;
+}
#define DEV_PM_OPS &bd9571mwv_pm
#else
#define DEV_PM_OPS NULL
+#define bd9571mwv_regulator_remove NULL
#endif /* CONFIG_PM_SLEEP */
static int bd9571mwv_regulator_probe(struct platform_device *pdev)
@@ -270,14 +324,21 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
+#ifdef CONFIG_PM_SLEEP
if (bdreg->bkup_mode_cnt_keepon) {
- device_set_wakeup_capable(&pdev->dev, true);
+ int ret;
+
/*
- * Wakeup is enabled by default in pulse mode, but needs
+ * Backup mode is enabled by default in pulse mode, but needs
* explicit user setup in level mode.
*/
- device_set_wakeup_enable(&pdev->dev, bdreg->rstbmode_pulse);
+ bdreg->bkup_mode_enabled = bdreg->rstbmode_pulse;
+
+ ret = device_create_file(&pdev->dev, &dev_attr_backup_mode);
+ if (ret)
+ return ret;
}
+#endif /* CONFIG_PM_SLEEP */
return 0;
}
@@ -294,6 +355,7 @@ static struct platform_driver bd9571mwv_regulator_driver = {
.pm = DEV_PM_OPS,
},
.probe = bd9571mwv_regulator_probe,
+ .remove = bd9571mwv_regulator_remove,
.id_table = bd9571mwv_regulator_id_table,
};
module_platform_driver(bd9571mwv_regulator_driver);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 6ed568b96c0e..bb1324f93143 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1740,6 +1740,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
rdev->use_count = 0;
}
+ device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS);
+
return regulator;
}
@@ -1829,9 +1831,21 @@ static void _regulator_put(struct regulator *regulator)
debugfs_remove_recursive(regulator->debugfs);
- /* remove any sysfs entries */
- if (regulator->dev)
+ if (regulator->dev) {
+ int count = 0;
+ struct regulator *r;
+
+ list_for_each_entry(r, &rdev->consumer_list, list)
+ if (r->dev == regulator->dev)
+ count++;
+
+ if (count == 1)
+ device_link_remove(regulator->dev, &rdev->dev);
+
+ /* remove any sysfs entries */
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+ }
+
regulator_lock(rdev);
list_del(&regulator->list);
@@ -4441,7 +4455,7 @@ void regulator_unregister(struct regulator_dev *rdev)
EXPORT_SYMBOL_GPL(regulator_unregister);
#ifdef CONFIG_SUSPEND
-static int _regulator_suspend_late(struct device *dev, void *data)
+static int _regulator_suspend(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
suspend_state_t *state = data;
@@ -4455,20 +4469,20 @@ static int _regulator_suspend_late(struct device *dev, void *data)
}
/**
- * regulator_suspend_late - prepare regulators for system wide suspend
+ * regulator_suspend - prepare regulators for system wide suspend
* @state: system suspend state
*
* Configure each regulator with it's suspend operating parameters for state.
*/
-static int regulator_suspend_late(struct device *dev)
+static int regulator_suspend(struct device *dev)
{
suspend_state_t state = pm_suspend_target_state;
return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_suspend_late);
+ _regulator_suspend);
}
-static int _regulator_resume_early(struct device *dev, void *data)
+static int _regulator_resume(struct device *dev, void *data)
{
int ret = 0;
struct regulator_dev *rdev = dev_to_rdev(dev);
@@ -4481,35 +4495,35 @@ static int _regulator_resume_early(struct device *dev, void *data)
regulator_lock(rdev);
- if (rdev->desc->ops->resume_early &&
+ if (rdev->desc->ops->resume &&
(rstate->enabled == ENABLE_IN_SUSPEND ||
rstate->enabled == DISABLE_IN_SUSPEND))
- ret = rdev->desc->ops->resume_early(rdev);
+ ret = rdev->desc->ops->resume(rdev);
regulator_unlock(rdev);
return ret;
}
-static int regulator_resume_early(struct device *dev)
+static int regulator_resume(struct device *dev)
{
suspend_state_t state = pm_suspend_target_state;
return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_resume_early);
+ _regulator_resume);
}
#else /* !CONFIG_SUSPEND */
-#define regulator_suspend_late NULL
-#define regulator_resume_early NULL
+#define regulator_suspend NULL
+#define regulator_resume NULL
#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_PM
static const struct dev_pm_ops __maybe_unused regulator_pm_ops = {
- .suspend_late = regulator_suspend_late,
- .resume_early = regulator_resume_early,
+ .suspend = regulator_suspend,
+ .resume = regulator_resume,
};
#endif
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index bd910fe123d9..2131457937b7 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -271,6 +271,29 @@ static struct regulator_ops cpcap_regulator_ops = {
};
static const unsigned int unknown_val_tbl[] = { 0, };
+static const unsigned int sw2_sw4_val_tbl[] = { 612500, 625000, 637500,
+ 650000, 662500, 675000,
+ 687500, 700000, 712500,
+ 725000, 737500, 750000,
+ 762500, 775000, 787500,
+ 800000, 812500, 825000,
+ 837500, 850000, 862500,
+ 875000, 887500, 900000,
+ 912500, 925000, 937500,
+ 950000, 962500, 975000,
+ 987500, 1000000, 1012500,
+ 1025000, 1037500, 1050000,
+ 1062500, 1075000, 1087500,
+ 1100000, 1112500, 1125000,
+ 1137500, 1150000, 1162500,
+ 1175000, 1187500, 1200000,
+ 1212500, 1225000, 1237500,
+ 1250000, 1262500, 1275000,
+ 1287500, 1300000, 1312500,
+ 1325000, 1337500, 1350000,
+ 1362500, 1375000, 1387500,
+ 1400000, 1412500, 1425000,
+ 1437500, 1450000, 1462500, };
static const unsigned int sw5_val_tbl[] = { 0, 5050000, };
static const unsigned int vcam_val_tbl[] = { 2600000, 2700000, 2800000,
2900000, };
@@ -389,6 +412,82 @@ static struct cpcap_regulator omap4_regulators[] = {
{ /* sentinel */ },
};
+static struct cpcap_regulator xoom_regulators[] = {
+ CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW1_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW2_SEL, sw2_sw4_val_tbl,
+ 0xf00, 0x7f, 0, 0x800, 0, 120),
+ CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW3_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW4_SEL, sw2_sw4_val_tbl,
+ 0xf00, 0x7f, 0, 0x900, 0, 100),
+ CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW5_SEL, sw5_val_tbl,
+ 0x2a, 0, 0, 0x22, 0, 0),
+ CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_SW6_SEL, unknown_val_tbl,
+ 0, 0, 0, 0, 0, 0),
+ CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_VCAM_SEL, vcam_val_tbl,
+ 0x87, 0x30, 4, 0x7, 0, 420),
+ CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VCSI_SEL, vcsi_val_tbl,
+ 0x47, 0x10, 4, 0x7, 0, 350),
+ CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VDAC_SEL, vdac_val_tbl,
+ 0x87, 0x30, 4, 0x3, 0, 420),
+ CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_VDIG_SEL, vdig_val_tbl,
+ 0x87, 0x30, 4, 0x5, 0, 420),
+ CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl,
+ 0x80, 0xf, 0, 0x80, 0, 420),
+ CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl,
+ 0x17, 0, 0, 0x2, 0, 0),
+ CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2,
+ CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl,
+ 0x87, 0x38, 3, 0x2, 0, 420),
+ CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VPLL_SEL, vpll_val_tbl,
+ 0x43, 0x18, 3, 0x1, 0, 420),
+ CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VRF1_SEL, vrf1_val_tbl,
+ 0xac, 0x2, 1, 0xc, 0, 10),
+ CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VRF2_SEL, vrf2_val_tbl,
+ 0x23, 0x8, 3, 0x3, 0, 10),
+ CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl,
+ 0x23, 0x8, 3, 0x3, 0, 420),
+ CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl,
+ 0x47, 0x10, 4, 0x5, 0, 420),
+ CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl,
+ 0x20c, 0xc0, 6, 0x8, 0, 420),
+ CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
+ 0xffff, vsim_val_tbl,
+ 0x23, 0x8, 3, 0x3, 0, 420),
+ CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
+ 0xffff, vsimcard_val_tbl,
+ 0x1e80, 0x8, 3, 0x1e00, 0, 420),
+ CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VVIB_SEL, vvib_val_tbl,
+ 0x1, 0xc, 2, 0, 0x1, 500),
+ CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3,
+ CPCAP_BIT_VUSB_SEL, vusb_val_tbl,
+ 0x11c, 0x40, 6, 0xc, 0, 0),
+ CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4,
+ CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl,
+ 0x16, 0x1, 0, 0x4, 0, 0),
+ { /* sentinel */ },
+};
+
static const struct of_device_id cpcap_regulator_id_table[] = {
{
.compatible = "motorola,cpcap-regulator",
@@ -397,6 +496,10 @@ static const struct of_device_id cpcap_regulator_id_table[] = {
.compatible = "motorola,mapphone-cpcap-regulator",
.data = omap4_regulators,
},
+ {
+ .compatible = "motorola,xoom-cpcap-regulator",
+ .data = xoom_regulators,
+ },
{},
};
MODULE_DEVICE_TABLE(of, cpcap_regulator_id_table);
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index 0db288ce319c..bc7f4751bf9c 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -1,19 +1,9 @@
-/*
- * max14577.c - Regulator driver for the Maxim 14577/77836
- *
- * Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max14577.c - Regulator driver for the Maxim 14577/77836
+//
+// Copyright (C) 2013,2014 Samsung Electronics
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index c301f3733475..bee060937f56 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -1,26 +1,12 @@
-/*
- * max77686.c - Regulator driver for the Maxim 77686
- *
- * Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@samsung.com>
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77686.c - Regulator driver for the Maxim 77686
+//
+// Copyright (C) 2012 Samsung Electronics
+// Chiwoong Byun <woong.byun@samsung.com>
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
+//
+// This driver is based on max8997.c
#include <linux/kernel.h>
#include <linux/bug.h>
diff --git a/drivers/regulator/max77693-regulator.c b/drivers/regulator/max77693-regulator.c
index e7000e777292..077ecbbfdf76 100644
--- a/drivers/regulator/max77693-regulator.c
+++ b/drivers/regulator/max77693-regulator.c
@@ -1,26 +1,12 @@
-/*
- * max77693.c - Regulator driver for the Maxim 77693 and 77843
- *
- * Copyright (C) 2013-2015 Samsung Electronics
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max77686.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77693.c - Regulator driver for the Maxim 77693 and 77843
+//
+// Copyright (C) 2013-2015 Samsung Electronics
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
+// Krzysztof Kozlowski <krzk@kernel.org>
+//
+// This driver is based on max77686.c
#include <linux/err.h>
#include <linux/slab.h>
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index b6261903818c..c30cf5c9f2de 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -1,25 +1,15 @@
-/*
- * max77802.c - Regulator driver for the Maxim 77802
- *
- * Copyright (C) 2013-2014 Google, Inc
- * Simon Glass <sjg@chromium.org>
- *
- * Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@samsung.com>
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77802.c - Regulator driver for the Maxim 77802
+//
+// Copyright (C) 2013-2014 Google, Inc
+// Simon Glass <sjg@chromium.org>
+//
+// Copyright (C) 2012 Samsung Electronics
+// Chiwoong Byun <woong.byun@samsung.com>
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
+//
+// This driver is based on max8997.c
#include <linux/kernel.h>
#include <linux/bug.h>
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
index a8ea30ee18a6..ad0c806b0737 100644
--- a/drivers/regulator/max8997-regulator.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -1,25 +1,11 @@
-/*
- * max8997.c - Regulator driver for the Maxim 8997/8966
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8998.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997.c - Regulator driver for the Maxim 8997/8966
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max8998.c
#include <linux/bug.h>
#include <linux/err.h>
@@ -165,8 +151,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
int rid = rdev_get_id(rdev);
int val;
- if (rid >= ARRAY_SIZE(reg_voltage_map) ||
- rid < 0)
+ if (rid < 0 || rid >= ARRAY_SIZE(reg_voltage_map))
return -EINVAL;
desc = reg_voltage_map[rid];
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 6b9f262ebbb0..271bb736f3f5 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -1,24 +1,10 @@
-/*
- * max8998.c - Voltage regulator driver for the Maxim 8998
- *
- * Copyright (C) 2009-2010 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- * Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8998.c - Voltage regulator driver for the Maxim 8998
+//
+// Copyright (C) 2009-2010 Samsung Electronics
+// Kyungmin Park <kyungmin.park@samsung.com>
+// Marek Szyprowski <m.szyprowski@samsung.com>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 8d9dbcc775ea..31c3a236120a 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -17,6 +17,8 @@
#include <linux/slab.h>
#include <linux/regmap.h>
+#define PFUZE_FLAG_DISABLE_SW BIT(1)
+
#define PFUZE_NUMREGS 128
#define PFUZE100_VOL_OFFSET 0
#define PFUZE100_STANDBY_OFFSET 1
@@ -44,16 +46,18 @@
#define PFUZE100_VGEN5VOL 0x70
#define PFUZE100_VGEN6VOL 0x71
-enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 };
+enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3, PFUZE3001 = 0x31, };
struct pfuze_regulator {
struct regulator_desc desc;
unsigned char stby_reg;
unsigned char stby_mask;
+ bool sw_reg;
};
struct pfuze_chip {
int chip_id;
+ int flags;
struct regmap *regmap;
struct device *dev;
struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
@@ -92,6 +96,7 @@ static const struct i2c_device_id pfuze_device_id[] = {
{.name = "pfuze100", .driver_data = PFUZE100},
{.name = "pfuze200", .driver_data = PFUZE200},
{.name = "pfuze3000", .driver_data = PFUZE3000},
+ {.name = "pfuze3001", .driver_data = PFUZE3001},
{ }
};
MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
@@ -100,6 +105,7 @@ static const struct of_device_id pfuze_dt_ids[] = {
{ .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
{ .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
{ .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000},
+ { .compatible = "fsl,pfuze3001", .data = (void *)PFUZE3001},
{ }
};
MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
@@ -108,10 +114,28 @@ static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
+ bool reg_has_ramp_delay;
unsigned int ramp_bits;
int ret;
- if (id < PFUZE100_SWBST) {
+ switch (pfuze100->chip_id) {
+ case PFUZE3001:
+ /* no dynamic voltage scaling for PF3001 */
+ reg_has_ramp_delay = false;
+ break;
+ case PFUZE3000:
+ reg_has_ramp_delay = (id < PFUZE3000_SWBST);
+ break;
+ case PFUZE200:
+ reg_has_ramp_delay = (id < PFUZE200_SWBST);
+ break;
+ case PFUZE100:
+ default:
+ reg_has_ramp_delay = (id < PFUZE100_SWBST);
+ break;
+ }
+
+ if (reg_has_ramp_delay) {
ramp_delay = 12500 / ramp_delay;
ramp_bits = (ramp_delay >> 1) - (ramp_delay >> 3);
ret = regmap_update_bits(pfuze100->regmap,
@@ -119,8 +143,9 @@ static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
0xc0, ramp_bits << 6);
if (ret < 0)
dev_err(pfuze100->dev, "ramp failed, err %d\n", ret);
- } else
+ } else {
ret = -EACCES;
+ }
return ret;
}
@@ -142,6 +167,14 @@ static const struct regulator_ops pfuze100_fixed_regulator_ops = {
};
static const struct regulator_ops pfuze100_sw_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pfuze100_set_ramp_delay,
+};
+
+static const struct regulator_ops pfuze100_sw_disable_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -192,13 +225,11 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
.vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
.vsel_mask = 0x3f, \
.enable_reg = (base) + PFUZE100_MODE_OFFSET, \
- .enable_val = 0xc, \
- .disable_val = 0x0, \
.enable_mask = 0xf, \
- .enable_time = 500, \
}, \
.stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
.stby_mask = 0x3f, \
+ .sw_reg = true, \
}
#define PFUZE100_SWB_REG(_chip, _name, base, mask, voltages) \
@@ -361,6 +392,19 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
};
+static struct pfuze_regulator pfuze3001_regulators[] = {
+ PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
+ PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+ PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE3001, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+ PFUZE3000_VCC_REG(PFUZE3001, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
+ PFUZE3000_VCC_REG(PFUZE3001, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
+ PFUZE100_VGEN_REG(PFUZE3001, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(PFUZE3001, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
#ifdef CONFIG_OF
/* PFUZE100 */
static struct of_regulator_match pfuze100_matches[] = {
@@ -418,6 +462,21 @@ static struct of_regulator_match pfuze3000_matches[] = {
{ .name = "vldo4", },
};
+/* PFUZE3001 */
+static struct of_regulator_match pfuze3001_matches[] = {
+
+ { .name = "sw1", },
+ { .name = "sw2", },
+ { .name = "sw3", },
+ { .name = "vsnvs", },
+ { .name = "vldo1", },
+ { .name = "vldo2", },
+ { .name = "vccsd", },
+ { .name = "v33", },
+ { .name = "vldo3", },
+ { .name = "vldo4", },
+};
+
static struct of_regulator_match *pfuze_matches;
static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -430,6 +489,9 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
if (!np)
return -EINVAL;
+ if (of_property_read_bool(np, "fsl,pfuze-support-disable-sw"))
+ chip->flags |= PFUZE_FLAG_DISABLE_SW;
+
parent = of_get_child_by_name(np, "regulators");
if (!parent) {
dev_err(dev, "regulators node not found\n");
@@ -437,6 +499,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
}
switch (chip->chip_id) {
+ case PFUZE3001:
+ pfuze_matches = pfuze3001_matches;
+ ret = of_regulator_match(dev, parent, pfuze3001_matches,
+ ARRAY_SIZE(pfuze3001_matches));
+ break;
case PFUZE3000:
pfuze_matches = pfuze3000_matches;
ret = of_regulator_match(dev, parent, pfuze3000_matches,
@@ -508,7 +575,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
*/
dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
} else if ((value & 0x0f) != pfuze_chip->chip_id &&
- (value & 0xf0) >> 4 != pfuze_chip->chip_id) {
+ (value & 0xf0) >> 4 != pfuze_chip->chip_id &&
+ (value != pfuze_chip->chip_id)) {
/* device id NOT match with your setting */
dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
return -ENODEV;
@@ -588,6 +656,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
/* use the right regulators after identify the right device */
switch (pfuze_chip->chip_id) {
+ case PFUZE3001:
+ pfuze_chip->pfuze_regulators = pfuze3001_regulators;
+ regulator_num = ARRAY_SIZE(pfuze3001_regulators);
+ sw_check_start = PFUZE3001_SW2;
+ sw_check_end = PFUZE3001_SW2;
+ sw_hi = 1 << 3;
+ break;
case PFUZE3000:
pfuze_chip->pfuze_regulators = pfuze3000_regulators;
regulator_num = ARRAY_SIZE(pfuze3000_regulators);
@@ -611,7 +686,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
}
dev_info(&client->dev, "pfuze%s found.\n",
(pfuze_chip->chip_id == PFUZE100) ? "100" :
- ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000"));
+ (((pfuze_chip->chip_id == PFUZE200) ? "200" :
+ ((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
sizeof(pfuze_chip->regulator_descs));
@@ -636,7 +712,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
if (i >= sw_check_start && i <= sw_check_end) {
regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
if (val & sw_hi) {
- if (pfuze_chip->chip_id == PFUZE3000) {
+ if (pfuze_chip->chip_id == PFUZE3000 ||
+ pfuze_chip->chip_id == PFUZE3001) {
desc->volt_table = pfuze3000_sw2hi;
desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi);
} else {
@@ -647,6 +724,21 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
}
}
+ /*
+ * Allow SW regulators to turn off. Checking it trough a flag is
+ * a workaround to keep the backward compatibility with existing
+ * old dtb's which may relay on the fact that we didn't disable
+ * the switched regulator till yet.
+ */
+ if (pfuze_chip->flags & PFUZE_FLAG_DISABLE_SW) {
+ if (pfuze_chip->regulator_descs[i].sw_reg) {
+ desc->ops = &pfuze100_sw_disable_regulator_ops;
+ desc->enable_val = 0x8;
+ desc->disable_val = 0x0;
+ desc->enable_time = 500;
+ }
+ }
+
config.dev = &client->dev;
config.init_data = init_data;
config.driver_data = pfuze_chip;
@@ -675,5 +767,5 @@ static struct i2c_driver pfuze_driver = {
module_i2c_driver(pfuze_driver);
MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
-MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/200/3000 PMIC");
+MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/200/3000/3001 PMIC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
new file mode 100644
index 000000000000..9f27daebd8c8
--- /dev/null
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+/**
+ * enum rpmh_regulator_type - supported RPMh accelerator types
+ * %VRM: RPMh VRM accelerator which supports voting on enable, voltage,
+ * and mode of LDO, SMPS, and BOB type PMIC regulators.
+ * %XOB: RPMh XOB accelerator which supports voting on the enable state
+ * of PMIC regulators.
+ */
+enum rpmh_regulator_type {
+ VRM,
+ XOB,
+};
+
+#define RPMH_REGULATOR_REG_VRM_VOLTAGE 0x0
+#define RPMH_REGULATOR_REG_ENABLE 0x4
+#define RPMH_REGULATOR_REG_VRM_MODE 0x8
+
+#define PMIC4_LDO_MODE_RETENTION 4
+#define PMIC4_LDO_MODE_LPM 5
+#define PMIC4_LDO_MODE_HPM 7
+
+#define PMIC4_SMPS_MODE_RETENTION 4
+#define PMIC4_SMPS_MODE_PFM 5
+#define PMIC4_SMPS_MODE_AUTO 6
+#define PMIC4_SMPS_MODE_PWM 7
+
+#define PMIC4_BOB_MODE_PASS 0
+#define PMIC4_BOB_MODE_PFM 1
+#define PMIC4_BOB_MODE_AUTO 2
+#define PMIC4_BOB_MODE_PWM 3
+
+/**
+ * struct rpmh_vreg_hw_data - RPMh regulator hardware configurations
+ * @regulator_type: RPMh accelerator type used to manage this
+ * regulator
+ * @ops: Pointer to regulator ops callback structure
+ * @voltage_range: The single range of voltages supported by this
+ * PMIC regulator type
+ * @n_voltages: The number of unique voltage set points defined
+ * by voltage_range
+ * @hpm_min_load_uA: Minimum load current in microamps that requires
+ * high power mode (HPM) operation. This is used
+ * for LDO hardware type regulators only.
+ * @pmic_mode_map: Array indexed by regulator framework mode
+ * containing PMIC hardware modes. Must be large
+ * enough to index all framework modes supported
+ * by this regulator hardware type.
+ * @of_map_mode: Maps an RPMH_REGULATOR_MODE_* mode value defined
+ * in device tree to a regulator framework mode
+ */
+struct rpmh_vreg_hw_data {
+ enum rpmh_regulator_type regulator_type;
+ const struct regulator_ops *ops;
+ const struct regulator_linear_range voltage_range;
+ int n_voltages;
+ int hpm_min_load_uA;
+ const int *pmic_mode_map;
+ unsigned int (*of_map_mode)(unsigned int mode);
+};
+
+/**
+ * struct rpmh_vreg - individual RPMh regulator data structure encapsulating a
+ * single regulator device
+ * @dev: Device pointer for the top-level PMIC RPMh
+ * regulator parent device. This is used as a
+ * handle in RPMh write requests.
+ * @addr: Base address of the regulator resource within
+ * an RPMh accelerator
+ * @rdesc: Regulator descriptor
+ * @hw_data: PMIC regulator configuration data for this RPMh
+ * regulator
+ * @always_wait_for_ack: Boolean flag indicating if a request must always
+ * wait for an ACK from RPMh before continuing even
+ * if it corresponds to a strictly lower power
+ * state (e.g. enabled --> disabled).
+ * @enabled: Flag indicating if the regulator is enabled or
+ * not
+ * @bypassed: Boolean indicating if the regulator is in
+ * bypass (pass-through) mode or not. This is
+ * only used by BOB rpmh-regulator resources.
+ * @voltage_selector: Selector used for get_voltage_sel() and
+ * set_voltage_sel() callbacks
+ * @mode: RPMh VRM regulator current framework mode
+ */
+struct rpmh_vreg {
+ struct device *dev;
+ u32 addr;
+ struct regulator_desc rdesc;
+ const struct rpmh_vreg_hw_data *hw_data;
+ bool always_wait_for_ack;
+
+ int enabled;
+ bool bypassed;
+ int voltage_selector;
+ unsigned int mode;
+};
+
+/**
+ * struct rpmh_vreg_init_data - initialization data for an RPMh regulator
+ * @name: Name for the regulator which also corresponds
+ * to the device tree subnode name of the regulator
+ * @resource_name: RPMh regulator resource name format string.
+ * This must include exactly one field: '%s' which
+ * is filled at run-time with the PMIC ID provided
+ * by device tree property qcom,pmic-id. Example:
+ * "ldo%s1" for RPMh resource "ldoa1".
+ * @supply_name: Parent supply regulator name
+ * @hw_data: Configuration data for this PMIC regulator type
+ */
+struct rpmh_vreg_init_data {
+ const char *name;
+ const char *resource_name;
+ const char *supply_name;
+ const struct rpmh_vreg_hw_data *hw_data;
+};
+
+/**
+ * rpmh_regulator_send_request() - send the request to RPMh
+ * @vreg: Pointer to the RPMh regulator
+ * @cmd: Pointer to the RPMh command to send
+ * @wait_for_ack: Boolean indicating if execution must wait until the
+ * request has been acknowledged as complete
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_send_request(struct rpmh_vreg *vreg,
+ struct tcs_cmd *cmd, bool wait_for_ack)
+{
+ int ret;
+
+ if (wait_for_ack || vreg->always_wait_for_ack)
+ ret = rpmh_write(vreg->dev, RPMH_ACTIVE_ONLY_STATE, cmd, 1);
+ else
+ ret = rpmh_write_async(vreg->dev, RPMH_ACTIVE_ONLY_STATE, cmd,
+ 1);
+
+ return ret;
+}
+
+static int _rpmh_regulator_vrm_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector, bool wait_for_ack)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ struct tcs_cmd cmd = {
+ .addr = vreg->addr + RPMH_REGULATOR_REG_VRM_VOLTAGE,
+ };
+ int ret;
+
+ /* VRM voltage control register is set with voltage in millivolts. */
+ cmd.data = DIV_ROUND_UP(regulator_list_voltage_linear_range(rdev,
+ selector), 1000);
+
+ ret = rpmh_regulator_send_request(vreg, &cmd, wait_for_ack);
+ if (!ret)
+ vreg->voltage_selector = selector;
+
+ return ret;
+}
+
+static int rpmh_regulator_vrm_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ if (vreg->enabled == -EINVAL) {
+ /*
+ * Cache the voltage and send it later when the regulator is
+ * enabled or disabled.
+ */
+ vreg->voltage_selector = selector;
+ return 0;
+ }
+
+ return _rpmh_regulator_vrm_set_voltage_sel(rdev, selector,
+ selector > vreg->voltage_selector);
+}
+
+static int rpmh_regulator_vrm_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->voltage_selector;
+}
+
+static int rpmh_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->enabled;
+}
+
+static int rpmh_regulator_set_enable_state(struct regulator_dev *rdev,
+ bool enable)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ struct tcs_cmd cmd = {
+ .addr = vreg->addr + RPMH_REGULATOR_REG_ENABLE,
+ .data = enable,
+ };
+ int ret;
+
+ if (vreg->enabled == -EINVAL &&
+ vreg->voltage_selector != -ENOTRECOVERABLE) {
+ ret = _rpmh_regulator_vrm_set_voltage_sel(rdev,
+ vreg->voltage_selector, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rpmh_regulator_send_request(vreg, &cmd, enable);
+ if (!ret)
+ vreg->enabled = enable;
+
+ return ret;
+}
+
+static int rpmh_regulator_enable(struct regulator_dev *rdev)
+{
+ return rpmh_regulator_set_enable_state(rdev, true);
+}
+
+static int rpmh_regulator_disable(struct regulator_dev *rdev)
+{
+ return rpmh_regulator_set_enable_state(rdev, false);
+}
+
+static int rpmh_regulator_vrm_set_mode_bypass(struct rpmh_vreg *vreg,
+ unsigned int mode, bool bypassed)
+{
+ struct tcs_cmd cmd = {
+ .addr = vreg->addr + RPMH_REGULATOR_REG_VRM_MODE,
+ };
+ int pmic_mode;
+
+ if (mode > REGULATOR_MODE_STANDBY)
+ return -EINVAL;
+
+ pmic_mode = vreg->hw_data->pmic_mode_map[mode];
+ if (pmic_mode < 0)
+ return pmic_mode;
+
+ if (bypassed)
+ cmd.data = PMIC4_BOB_MODE_PASS;
+ else
+ cmd.data = pmic_mode;
+
+ return rpmh_regulator_send_request(vreg, &cmd, true);
+}
+
+static int rpmh_regulator_vrm_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (mode == vreg->mode)
+ return 0;
+
+ ret = rpmh_regulator_vrm_set_mode_bypass(vreg, mode, vreg->bypassed);
+ if (!ret)
+ vreg->mode = mode;
+
+ return ret;
+}
+
+static unsigned int rpmh_regulator_vrm_get_mode(struct regulator_dev *rdev)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->mode;
+}
+
+/**
+ * rpmh_regulator_vrm_set_load() - set the regulator mode based upon the load
+ * current requested
+ * @rdev: Regulator device pointer for the rpmh-regulator
+ * @load_uA: Aggregated load current in microamps
+ *
+ * This function is used in the regulator_ops for VRM type RPMh regulator
+ * devices.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_vrm_set_load(struct regulator_dev *rdev, int load_uA)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ unsigned int mode;
+
+ if (load_uA >= vreg->hw_data->hpm_min_load_uA)
+ mode = REGULATOR_MODE_NORMAL;
+ else
+ mode = REGULATOR_MODE_IDLE;
+
+ return rpmh_regulator_vrm_set_mode(rdev, mode);
+}
+
+static int rpmh_regulator_vrm_set_bypass(struct regulator_dev *rdev,
+ bool enable)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (vreg->bypassed == enable)
+ return 0;
+
+ ret = rpmh_regulator_vrm_set_mode_bypass(vreg, vreg->mode, enable);
+ if (!ret)
+ vreg->bypassed = enable;
+
+ return ret;
+}
+
+static int rpmh_regulator_vrm_get_bypass(struct regulator_dev *rdev,
+ bool *enable)
+{
+ struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+ *enable = vreg->bypassed;
+
+ return 0;
+}
+
+static const struct regulator_ops rpmh_regulator_vrm_ops = {
+ .enable = rpmh_regulator_enable,
+ .disable = rpmh_regulator_disable,
+ .is_enabled = rpmh_regulator_is_enabled,
+ .set_voltage_sel = rpmh_regulator_vrm_set_voltage_sel,
+ .get_voltage_sel = rpmh_regulator_vrm_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_mode = rpmh_regulator_vrm_set_mode,
+ .get_mode = rpmh_regulator_vrm_get_mode,
+};
+
+static const struct regulator_ops rpmh_regulator_vrm_drms_ops = {
+ .enable = rpmh_regulator_enable,
+ .disable = rpmh_regulator_disable,
+ .is_enabled = rpmh_regulator_is_enabled,
+ .set_voltage_sel = rpmh_regulator_vrm_set_voltage_sel,
+ .get_voltage_sel = rpmh_regulator_vrm_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_mode = rpmh_regulator_vrm_set_mode,
+ .get_mode = rpmh_regulator_vrm_get_mode,
+ .set_load = rpmh_regulator_vrm_set_load,
+};
+
+static const struct regulator_ops rpmh_regulator_vrm_bypass_ops = {
+ .enable = rpmh_regulator_enable,
+ .disable = rpmh_regulator_disable,
+ .is_enabled = rpmh_regulator_is_enabled,
+ .set_voltage_sel = rpmh_regulator_vrm_set_voltage_sel,
+ .get_voltage_sel = rpmh_regulator_vrm_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_mode = rpmh_regulator_vrm_set_mode,
+ .get_mode = rpmh_regulator_vrm_get_mode,
+ .set_bypass = rpmh_regulator_vrm_set_bypass,
+ .get_bypass = rpmh_regulator_vrm_get_bypass,
+};
+
+static const struct regulator_ops rpmh_regulator_xob_ops = {
+ .enable = rpmh_regulator_enable,
+ .disable = rpmh_regulator_disable,
+ .is_enabled = rpmh_regulator_is_enabled,
+};
+
+/**
+ * rpmh_regulator_init_vreg() - initialize all attributes of an rpmh-regulator
+ * vreg: Pointer to the individual rpmh-regulator resource
+ * dev: Pointer to the top level rpmh-regulator PMIC device
+ * node: Pointer to the individual rpmh-regulator resource
+ * device node
+ * pmic_id: String used to identify the top level rpmh-regulator
+ * PMIC device on the board
+ * pmic_rpmh_data: Pointer to a null-terminated array of rpmh-regulator
+ * resources defined for the top level PMIC device
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev,
+ struct device_node *node, const char *pmic_id,
+ const struct rpmh_vreg_init_data *pmic_rpmh_data)
+{
+ struct regulator_config reg_config = {};
+ char rpmh_resource_name[20] = "";
+ const struct rpmh_vreg_init_data *rpmh_data;
+ struct regulator_init_data *init_data;
+ struct regulator_dev *rdev;
+ int ret;
+
+ vreg->dev = dev;
+
+ for (rpmh_data = pmic_rpmh_data; rpmh_data->name; rpmh_data++)
+ if (!strcmp(rpmh_data->name, node->name))
+ break;
+
+ if (!rpmh_data->name) {
+ dev_err(dev, "Unknown regulator %s\n", node->name);
+ return -EINVAL;
+ }
+
+ scnprintf(rpmh_resource_name, sizeof(rpmh_resource_name),
+ rpmh_data->resource_name, pmic_id);
+
+ vreg->addr = cmd_db_read_addr(rpmh_resource_name);
+ if (!vreg->addr) {
+ dev_err(dev, "%s: could not find RPMh address for resource %s\n",
+ node->name, rpmh_resource_name);
+ return -ENODEV;
+ }
+
+ vreg->rdesc.name = rpmh_data->name;
+ vreg->rdesc.supply_name = rpmh_data->supply_name;
+ vreg->hw_data = rpmh_data->hw_data;
+
+ vreg->enabled = -EINVAL;
+ vreg->voltage_selector = -ENOTRECOVERABLE;
+ vreg->mode = REGULATOR_MODE_INVALID;
+
+ if (rpmh_data->hw_data->n_voltages) {
+ vreg->rdesc.linear_ranges = &rpmh_data->hw_data->voltage_range;
+ vreg->rdesc.n_linear_ranges = 1;
+ vreg->rdesc.n_voltages = rpmh_data->hw_data->n_voltages;
+ }
+
+ vreg->always_wait_for_ack = of_property_read_bool(node,
+ "qcom,always-wait-for-ack");
+
+ vreg->rdesc.owner = THIS_MODULE;
+ vreg->rdesc.type = REGULATOR_VOLTAGE;
+ vreg->rdesc.ops = vreg->hw_data->ops;
+ vreg->rdesc.of_map_mode = vreg->hw_data->of_map_mode;
+
+ init_data = of_get_regulator_init_data(dev, node, &vreg->rdesc);
+ if (!init_data)
+ return -ENOMEM;
+
+ if (rpmh_data->hw_data->regulator_type == XOB &&
+ init_data->constraints.min_uV &&
+ init_data->constraints.min_uV == init_data->constraints.max_uV) {
+ vreg->rdesc.fixed_uV = init_data->constraints.min_uV;
+ vreg->rdesc.n_voltages = 1;
+ }
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.of_node = node;
+ reg_config.driver_data = vreg;
+
+ rdev = devm_regulator_register(dev, &vreg->rdesc, &reg_config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(dev, "%s: devm_regulator_register() failed, ret=%d\n",
+ node->name, ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s regulator registered for RPMh resource %s @ 0x%05X\n",
+ node->name, rpmh_resource_name, vreg->addr);
+
+ return 0;
+}
+
+static const int pmic_mode_map_pmic4_ldo[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = PMIC4_LDO_MODE_RETENTION,
+ [REGULATOR_MODE_IDLE] = PMIC4_LDO_MODE_LPM,
+ [REGULATOR_MODE_NORMAL] = PMIC4_LDO_MODE_HPM,
+ [REGULATOR_MODE_FAST] = -EINVAL,
+};
+
+static unsigned int rpmh_regulator_pmic4_ldo_of_map_mode(unsigned int rpmh_mode)
+{
+ unsigned int mode;
+
+ switch (rpmh_mode) {
+ case RPMH_REGULATOR_MODE_HPM:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case RPMH_REGULATOR_MODE_LPM:
+ mode = REGULATOR_MODE_IDLE;
+ break;
+ case RPMH_REGULATOR_MODE_RET:
+ mode = REGULATOR_MODE_STANDBY;
+ break;
+ default:
+ mode = REGULATOR_MODE_INVALID;
+ }
+
+ return mode;
+}
+
+static const int pmic_mode_map_pmic4_smps[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = PMIC4_SMPS_MODE_RETENTION,
+ [REGULATOR_MODE_IDLE] = PMIC4_SMPS_MODE_PFM,
+ [REGULATOR_MODE_NORMAL] = PMIC4_SMPS_MODE_AUTO,
+ [REGULATOR_MODE_FAST] = PMIC4_SMPS_MODE_PWM,
+};
+
+static unsigned int
+rpmh_regulator_pmic4_smps_of_map_mode(unsigned int rpmh_mode)
+{
+ unsigned int mode;
+
+ switch (rpmh_mode) {
+ case RPMH_REGULATOR_MODE_HPM:
+ mode = REGULATOR_MODE_FAST;
+ break;
+ case RPMH_REGULATOR_MODE_AUTO:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case RPMH_REGULATOR_MODE_LPM:
+ mode = REGULATOR_MODE_IDLE;
+ break;
+ case RPMH_REGULATOR_MODE_RET:
+ mode = REGULATOR_MODE_STANDBY;
+ break;
+ default:
+ mode = REGULATOR_MODE_INVALID;
+ }
+
+ return mode;
+}
+
+static const int pmic_mode_map_pmic4_bob[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = -EINVAL,
+ [REGULATOR_MODE_IDLE] = PMIC4_BOB_MODE_PFM,
+ [REGULATOR_MODE_NORMAL] = PMIC4_BOB_MODE_AUTO,
+ [REGULATOR_MODE_FAST] = PMIC4_BOB_MODE_PWM,
+};
+
+static unsigned int rpmh_regulator_pmic4_bob_of_map_mode(unsigned int rpmh_mode)
+{
+ unsigned int mode;
+
+ switch (rpmh_mode) {
+ case RPMH_REGULATOR_MODE_HPM:
+ mode = REGULATOR_MODE_FAST;
+ break;
+ case RPMH_REGULATOR_MODE_AUTO:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case RPMH_REGULATOR_MODE_LPM:
+ mode = REGULATOR_MODE_IDLE;
+ break;
+ default:
+ mode = REGULATOR_MODE_INVALID;
+ }
+
+ return mode;
+}
+
+static const struct rpmh_vreg_hw_data pmic4_pldo = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000),
+ .n_voltages = 256,
+ .hpm_min_load_uA = 10000,
+ .pmic_mode_map = pmic_mode_map_pmic4_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_pldo_lv = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000),
+ .n_voltages = 128,
+ .hpm_min_load_uA = 10000,
+ .pmic_mode_map = pmic_mode_map_pmic4_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_nldo = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
+ .n_voltages = 128,
+ .hpm_min_load_uA = 30000,
+ .pmic_mode_map = pmic_mode_map_pmic4_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_hfsmps3 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ .n_voltages = 216,
+ .pmic_mode_map = pmic_mode_map_pmic4_smps,
+ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_ftsmps426 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
+ .n_voltages = 259,
+ .pmic_mode_map = pmic_mode_map_pmic4_smps,
+ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_bob = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_bypass_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000),
+ .n_voltages = 84,
+ .pmic_mode_map = pmic_mode_map_pmic4_bob,
+ .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic4_lvs = {
+ .regulator_type = XOB,
+ .ops = &rpmh_regulator_xob_ops,
+ /* LVS hardware does not support voltage or mode configuration. */
+};
+
+#define RPMH_VREG(_name, _resource_name, _hw_data, _supply_name) \
+{ \
+ .name = _name, \
+ .resource_name = _resource_name, \
+ .hw_data = _hw_data, \
+ .supply_name = _supply_name, \
+}
+
+static const struct rpmh_vreg_init_data pm8998_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic4_ftsmps426, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic4_hfsmps3, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic4_hfsmps3, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic4_hfsmps3, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic4_ftsmps426, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic4_ftsmps426, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic4_ftsmps426, "vdd-s8"),
+ RPMH_VREG("smps9", "smp%s9", &pmic4_ftsmps426, "vdd-s9"),
+ RPMH_VREG("smps10", "smp%s10", &pmic4_ftsmps426, "vdd-s10"),
+ RPMH_VREG("smps11", "smp%s11", &pmic4_ftsmps426, "vdd-s11"),
+ RPMH_VREG("smps12", "smp%s12", &pmic4_ftsmps426, "vdd-s12"),
+ RPMH_VREG("smps13", "smp%s13", &pmic4_ftsmps426, "vdd-s13"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic4_nldo, "vdd-l1-l27"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic4_nldo, "vdd-l2-l8-l17"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic4_nldo, "vdd-l3-l11"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic4_nldo, "vdd-l4-l5"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic4_nldo, "vdd-l4-l5"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic4_pldo, "vdd-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic4_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic4_nldo, "vdd-l2-l8-l17"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic4_pldo, "vdd-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic4_pldo, "vdd-l10-l23-l25"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic4_nldo, "vdd-l3-l11"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic4_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic4_pldo, "vdd-l13-l19-l21"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic4_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic4_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic4_pldo, "vdd-l16-l28"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic4_nldo, "vdd-l2-l8-l17"),
+ RPMH_VREG("ldo18", "ldo%s18", &pmic4_pldo, "vdd-l18-l22"),
+ RPMH_VREG("ldo19", "ldo%s19", &pmic4_pldo, "vdd-l13-l19-l21"),
+ RPMH_VREG("ldo20", "ldo%s20", &pmic4_pldo, "vdd-l20-l24"),
+ RPMH_VREG("ldo21", "ldo%s21", &pmic4_pldo, "vdd-l13-l19-l21"),
+ RPMH_VREG("ldo22", "ldo%s22", &pmic4_pldo, "vdd-l18-l22"),
+ RPMH_VREG("ldo23", "ldo%s23", &pmic4_pldo, "vdd-l10-l23-l25"),
+ RPMH_VREG("ldo24", "ldo%s24", &pmic4_pldo, "vdd-l20-l24"),
+ RPMH_VREG("ldo25", "ldo%s25", &pmic4_pldo, "vdd-l10-l23-l25"),
+ RPMH_VREG("ldo26", "ldo%s26", &pmic4_nldo, "vdd-l26"),
+ RPMH_VREG("ldo27", "ldo%s27", &pmic4_nldo, "vdd-l1-l27"),
+ RPMH_VREG("ldo28", "ldo%s28", &pmic4_pldo, "vdd-l16-l28"),
+ RPMH_VREG("lvs1", "vs%s1", &pmic4_lvs, "vin-lvs-1-2"),
+ RPMH_VREG("lvs2", "vs%s2", &pmic4_lvs, "vin-lvs-1-2"),
+ {},
+};
+
+static const struct rpmh_vreg_init_data pmi8998_vreg_data[] = {
+ RPMH_VREG("bob", "bob%s1", &pmic4_bob, "vdd-bob"),
+ {},
+};
+
+static const struct rpmh_vreg_init_data pm8005_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic4_ftsmps426, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic4_ftsmps426, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic4_ftsmps426, "vdd-s4"),
+ {},
+};
+
+static int rpmh_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct rpmh_vreg_init_data *vreg_data;
+ struct device_node *node;
+ struct rpmh_vreg *vreg;
+ const char *pmic_id;
+ int ret;
+
+ vreg_data = of_device_get_match_data(dev);
+ if (!vreg_data)
+ return -ENODEV;
+
+ ret = of_property_read_string(dev->of_node, "qcom,pmic-id", &pmic_id);
+ if (ret < 0) {
+ dev_err(dev, "qcom,pmic-id missing in DT node\n");
+ return ret;
+ }
+
+ for_each_available_child_of_node(dev->of_node, node) {
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ of_node_put(node);
+ return -ENOMEM;
+ }
+
+ ret = rpmh_regulator_init_vreg(vreg, dev, node, pmic_id,
+ vreg_data);
+ if (ret < 0) {
+ of_node_put(node);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id rpmh_regulator_match_table[] = {
+ {
+ .compatible = "qcom,pm8998-rpmh-regulators",
+ .data = pm8998_vreg_data,
+ },
+ {
+ .compatible = "qcom,pmi8998-rpmh-regulators",
+ .data = pmi8998_vreg_data,
+ },
+ {
+ .compatible = "qcom,pm8005-rpmh-regulators",
+ .data = pm8005_vreg_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rpmh_regulator_match_table);
+
+static struct platform_driver rpmh_regulator_driver = {
+ .driver = {
+ .name = "qcom-rpmh-regulator",
+ .of_match_table = of_match_ptr(rpmh_regulator_match_table),
+ },
+ .probe = rpmh_regulator_probe,
+};
+module_platform_driver(rpmh_regulator_driver);
+
+MODULE_DESCRIPTION("Qualcomm RPMh regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 9817f1a75342..53a61fb65642 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -1060,7 +1060,7 @@ static irqreturn_t spmi_regulator_vs_ocp_isr(int irq, void *data)
#define SAW3_AVS_CTL_TGGL_MASK 0x8000000
#define SAW3_AVS_CTL_CLEAR_MASK 0x7efc00
-static struct regmap *saw_regmap = NULL;
+static struct regmap *saw_regmap;
static void spmi_saw_set_vdd(void *data)
{
@@ -1728,7 +1728,7 @@ static const struct spmi_regulator_data pmi8994_regulators[] = {
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
{ "l1", 0x4000, "vdd_l1", },
- { }
+ { }
};
static const struct of_device_id qcom_spmi_regulator_match[] = {
@@ -1752,7 +1752,8 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
const char *name;
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
- struct device_node *syscon;
+ struct device_node *syscon, *reg_node;
+ struct property *reg_prop;
int ret, lenp;
struct list_head *vreg_list;
@@ -1774,16 +1775,19 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
syscon = of_parse_phandle(node, "qcom,saw-reg", 0);
saw_regmap = syscon_node_to_regmap(syscon);
of_node_put(syscon);
- if (IS_ERR(regmap))
+ if (IS_ERR(saw_regmap))
dev_err(dev, "ERROR reading SAW regmap\n");
}
for (reg = match->data; reg->name; reg++) {
- if (saw_regmap && \
- of_find_property(of_find_node_by_name(node, reg->name), \
- "qcom,saw-slave", &lenp)) {
- continue;
+ if (saw_regmap) {
+ reg_node = of_get_child_by_name(node, reg->name);
+ reg_prop = of_find_property(reg_node, "qcom,saw-slave",
+ &lenp);
+ of_node_put(reg_node);
+ if (reg_prop)
+ continue;
}
vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
@@ -1816,13 +1820,17 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
if (ret)
continue;
- if (saw_regmap && \
- of_find_property(of_find_node_by_name(node, reg->name), \
- "qcom,saw-leader", &lenp)) {
- spmi_saw_ops = *(vreg->desc.ops);
- spmi_saw_ops.set_voltage_sel = \
- spmi_regulator_saw_set_voltage;
- vreg->desc.ops = &spmi_saw_ops;
+ if (saw_regmap) {
+ reg_node = of_get_child_by_name(node, reg->name);
+ reg_prop = of_find_property(reg_node, "qcom,saw-leader",
+ &lenp);
+ of_node_put(reg_node);
+ if (reg_prop) {
+ spmi_saw_ops = *(vreg->desc.ops);
+ spmi_saw_ops.set_voltage_sel =
+ spmi_regulator_saw_set_voltage;
+ vreg->desc.ops = &spmi_saw_ops;
+ }
}
config.dev = dev;
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 48f0ca90743c..095d25f3d2ea 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -1,13 +1,7 @@
-/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2013 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/bug.h>
#include <linux/err.h>
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index d1207ec683db..5bb6f4ca48db 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -1,20 +1,7 @@
-/*
- * s2mps11.c
- *
- * Copyright (c) 2012-2014 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2012-2014 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/bug.h>
#include <linux/err.h>
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 0cbc980753c2..667d16dc83ce 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -1,15 +1,7 @@
-/*
- * s5m8767.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2011 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/err.h>
#include <linux/of_gpio.h>
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index fc12badf3805..d84fab616abf 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -232,6 +232,8 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
tps->strobes = devm_kcalloc(&pdev->dev,
TPS65217_NUM_REGULATOR, sizeof(u8),
GFP_KERNEL);
+ if (!tps->strobes)
+ return -ENOMEM;
platform_set_drvdata(pdev, tps);
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
new file mode 100644
index 000000000000..abf22acbd13e
--- /dev/null
+++ b/drivers/regulator/uniphier-regulator.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Regulator controller driver for UniPhier SoC
+// Copyright 2018 Socionext Inc.
+// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/reset.h>
+
+#define MAX_CLKS 2
+#define MAX_RSTS 2
+
+struct uniphier_regulator_soc_data {
+ int nclks;
+ const char * const *clock_names;
+ int nrsts;
+ const char * const *reset_names;
+ const struct regulator_desc *desc;
+ const struct regmap_config *regconf;
+};
+
+struct uniphier_regulator_priv {
+ struct clk_bulk_data clk[MAX_CLKS];
+ struct reset_control *rst[MAX_RSTS];
+ const struct uniphier_regulator_soc_data *data;
+};
+
+static struct regulator_ops uniphier_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static int uniphier_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_regulator_priv *priv;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+ const char *name;
+ int i, ret, nr;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for (i = 0; i < priv->data->nclks; i++)
+ priv->clk[i].id = priv->data->clock_names[i];
+ ret = devm_clk_bulk_get(dev, priv->data->nclks, priv->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < priv->data->nrsts; i++) {
+ name = priv->data->reset_names[i];
+ priv->rst[i] = devm_reset_control_get_shared(dev, name);
+ if (IS_ERR(priv->rst[i]))
+ return PTR_ERR(priv->rst[i]);
+ }
+
+ ret = clk_bulk_prepare_enable(priv->data->nclks, priv->clk);
+ if (ret)
+ return ret;
+
+ for (nr = 0; nr < priv->data->nrsts; nr++) {
+ ret = reset_control_deassert(priv->rst[nr]);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ regmap = devm_regmap_init_mmio(dev, base, priv->data->regconf);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ config.dev = dev;
+ config.driver_data = priv;
+ config.of_node = dev->of_node;
+ config.regmap = regmap;
+ config.init_data = of_get_regulator_init_data(dev, dev->of_node,
+ priv->data->desc);
+ rdev = devm_regulator_register(dev, priv->data->desc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ goto out_rst_assert;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+out_rst_assert:
+ while (nr--)
+ reset_control_assert(priv->rst[nr]);
+
+ clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
+
+ return ret;
+}
+
+static int uniphier_regulator_remove(struct platform_device *pdev)
+{
+ struct uniphier_regulator_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < priv->data->nrsts; i++)
+ reset_control_assert(priv->rst[i]);
+
+ clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
+
+ return 0;
+}
+
+/* USB3 controller data */
+#define USB3VBUS_OFFSET 0x0
+#define USB3VBUS_REG BIT(4)
+#define USB3VBUS_REG_EN BIT(3)
+static const struct regulator_desc uniphier_usb3_regulator_desc = {
+ .name = "vbus",
+ .of_match = of_match_ptr("vbus"),
+ .ops = &uniphier_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = USB3VBUS_OFFSET,
+ .enable_mask = USB3VBUS_REG_EN | USB3VBUS_REG,
+ .enable_val = USB3VBUS_REG_EN | USB3VBUS_REG,
+ .disable_val = USB3VBUS_REG_EN,
+};
+
+static const struct regmap_config uniphier_usb3_regulator_regconf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 1,
+};
+
+static const char * const uniphier_pro4_clock_reset_names[] = {
+ "gio", "link",
+};
+
+static const struct uniphier_regulator_soc_data uniphier_pro4_usb3_data = {
+ .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
+ .clock_names = uniphier_pro4_clock_reset_names,
+ .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
+ .reset_names = uniphier_pro4_clock_reset_names,
+ .desc = &uniphier_usb3_regulator_desc,
+ .regconf = &uniphier_usb3_regulator_regconf,
+};
+
+static const char * const uniphier_pxs2_clock_reset_names[] = {
+ "link",
+};
+
+static const struct uniphier_regulator_soc_data uniphier_pxs2_usb3_data = {
+ .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
+ .clock_names = uniphier_pxs2_clock_reset_names,
+ .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
+ .reset_names = uniphier_pxs2_clock_reset_names,
+ .desc = &uniphier_usb3_regulator_desc,
+ .regconf = &uniphier_usb3_regulator_regconf,
+};
+
+static const struct of_device_id uniphier_regulator_match[] = {
+ /* USB VBUS */
+ {
+ .compatible = "socionext,uniphier-pro4-usb3-regulator",
+ .data = &uniphier_pro4_usb3_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-usb3-regulator",
+ .data = &uniphier_pxs2_usb3_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-usb3-regulator",
+ .data = &uniphier_pxs2_usb3_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-usb3-regulator",
+ .data = &uniphier_pxs2_usb3_data,
+ },
+ { /* Sentinel */ },
+};
+
+static struct platform_driver uniphier_regulator_driver = {
+ .probe = uniphier_regulator_probe,
+ .remove = uniphier_regulator_remove,
+ .driver = {
+ .name = "uniphier-regulator",
+ .of_match_table = uniphier_regulator_match,
+ },
+};
+module_platform_driver(uniphier_regulator_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier Regulator Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a9f60d0ee02e..a23e7d394a0a 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -73,8 +73,8 @@ static int dasd_alloc_queue(struct dasd_block *);
static void dasd_setup_queue(struct dasd_block *);
static void dasd_free_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *);
-static void dasd_device_tasklet(struct dasd_device *);
-static void dasd_block_tasklet(struct dasd_block *);
+static void dasd_device_tasklet(unsigned long);
+static void dasd_block_tasklet(unsigned long);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
@@ -125,8 +125,7 @@ struct dasd_device *dasd_alloc_device(void)
dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
spin_lock_init(&device->mem_lock);
atomic_set(&device->tasklet_scheduled, 0);
- tasklet_init(&device->tasklet,
- (void (*)(unsigned long)) dasd_device_tasklet,
+ tasklet_init(&device->tasklet, dasd_device_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
timer_setup(&device->timer, dasd_device_timeout, 0);
@@ -166,8 +165,7 @@ struct dasd_block *dasd_alloc_block(void)
atomic_set(&block->open_count, -1);
atomic_set(&block->tasklet_scheduled, 0);
- tasklet_init(&block->tasklet,
- (void (*)(unsigned long)) dasd_block_tasklet,
+ tasklet_init(&block->tasklet, dasd_block_tasklet,
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
@@ -2064,8 +2062,9 @@ EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
/*
* Acquire the device lock and process queues for the device.
*/
-static void dasd_device_tasklet(struct dasd_device *device)
+static void dasd_device_tasklet(unsigned long data)
{
+ struct dasd_device *device = (struct dasd_device *) data;
struct list_head final_queue;
atomic_set (&device->tasklet_scheduled, 0);
@@ -2783,8 +2782,9 @@ static void __dasd_block_start_head(struct dasd_block *block)
* block layer request queue, creates ccw requests, enqueues them on
* a dasd_device and processes ccw requests that have been returned.
*/
-static void dasd_block_tasklet(struct dasd_block *block)
+static void dasd_block_tasklet(unsigned long data)
{
+ struct dasd_block *block = (struct dasd_block *) data;
struct list_head final_queue;
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
@@ -3127,6 +3127,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
block->tag_set.nr_hw_queues = nr_hw_queues;
block->tag_set.queue_depth = queue_depth;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ block->tag_set.numa_node = NUMA_NO_NODE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
if (rc)
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index e36a114354fc..b9ce93e9df89 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -708,7 +708,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
struct ccw1 *ccw;
cqr = lcu->rsu_cqr;
- strncpy((char *) &cqr->magic, "ECKD", 4);
+ memcpy((char *) &cqr->magic, "ECKD", 4);
ASCEBC((char *) &cqr->magic, 4);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index b9ebb565ee2c..fab35c6170cc 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -426,7 +426,7 @@ dasd_add_busid(const char *bus_id, int features)
if (!devmap) {
/* This bus_id is new. */
new->devindex = dasd_max_devindex++;
- strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+ strlcpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
new->features = features;
new->device = NULL;
list_add(&new->list, &dasd_hashlists[hash]);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bbf95b78ef5d..4e7b55a14b1a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1780,6 +1780,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
struct dasd_eckd_private *private = device->private;
int i;
+ if (!private)
+ return;
+
dasd_alias_disconnect_device_from_lcu(device);
private->ned = NULL;
private->sneq = NULL;
@@ -2035,8 +2038,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
- cancel_work_sync(&device->reload_device);
- cancel_work_sync(&device->kick_validate);
+ if (cancel_work_sync(&device->reload_device))
+ dasd_put_device(device);
+ if (cancel_work_sync(&device->kick_validate))
+ dasd_put_device(device);
+
return 0;
};
@@ -3535,7 +3541,7 @@ static int prepare_itcw(struct itcw *itcw,
dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
&pfxdata, sizeof(pfxdata), total_data_size);
- return PTR_RET(dcw);
+ return PTR_ERR_OR_ZERO(dcw);
}
static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 6ef8714dc693..93bb09da7fdc 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -313,7 +313,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strncpy(header.busid, dev_name(&device->cdev->dev),
+ strlcpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
@@ -356,7 +356,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strncpy(header.busid, dev_name(&device->cdev->dev),
+ strlcpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index b1fcb76dd272..98f66b7b6794 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -455,6 +455,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ bdev->tag_set.numa_node = NUMA_NO_NODE;
ret = blk_mq_alloc_tag_set(&bdev->tag_set);
if (ret)
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 0a4c13e1e76e..c6ab34f94b1b 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -12,11 +12,6 @@ GCOV_PROFILE_sclp_early_core.o := n
KCOV_INSTRUMENT_sclp_early_core.o := n
UBSAN_SANITIZE_sclp_early_core.o := n
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp_early_core.o += -march=z900
-endif
-
CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 79eb60958015..bbb3001b0961 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -334,37 +334,41 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
int cmd, int perm)
{
struct kbentry tmp;
+ unsigned long kb_index, kb_table;
ushort *key_map, val, ov;
if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
return -EFAULT;
+ kb_index = (unsigned long) tmp.kb_index;
#if NR_KEYS < 256
- if (tmp.kb_index >= NR_KEYS)
+ if (kb_index >= NR_KEYS)
return -EINVAL;
#endif
+ kb_table = (unsigned long) tmp.kb_table;
#if MAX_NR_KEYMAPS < 256
- if (tmp.kb_table >= MAX_NR_KEYMAPS)
+ if (kb_table >= MAX_NR_KEYMAPS)
return -EINVAL;
+ kb_table = array_index_nospec(kb_table , MAX_NR_KEYMAPS);
#endif
switch (cmd) {
case KDGKBENT:
- key_map = kbd->key_maps[tmp.kb_table];
+ key_map = kbd->key_maps[kb_table];
if (key_map) {
- val = U(key_map[tmp.kb_index]);
+ val = U(key_map[kb_index]);
if (KTYP(val) >= KBD_NR_TYPES)
val = K_HOLE;
} else
- val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
+ val = (kb_index ? K_HOLE : K_NOSUCHMAP);
return put_user(val, &user_kbe->kb_value);
case KDSKBENT:
if (!perm)
return -EPERM;
- if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
+ if (!kb_index && tmp.kb_value == K_NOSUCHMAP) {
/* disallocate map */
- key_map = kbd->key_maps[tmp.kb_table];
+ key_map = kbd->key_maps[kb_table];
if (key_map) {
- kbd->key_maps[tmp.kb_table] = NULL;
+ kbd->key_maps[kb_table] = NULL;
kfree(key_map);
}
break;
@@ -375,18 +379,18 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
return -EINVAL;
- if (!(key_map = kbd->key_maps[tmp.kb_table])) {
+ if (!(key_map = kbd->key_maps[kb_table])) {
int j;
key_map = kmalloc(sizeof(plain_map),
GFP_KERNEL);
if (!key_map)
return -ENOMEM;
- kbd->key_maps[tmp.kb_table] = key_map;
+ kbd->key_maps[kb_table] = key_map;
for (j = 0; j < NR_KEYS; j++)
key_map[j] = U(K_HOLE);
}
- ov = U(key_map[tmp.kb_index]);
+ ov = U(key_map[kb_index]);
if (tmp.kb_value == ov)
break; /* nothing to do */
/*
@@ -395,7 +399,7 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
- key_map[tmp.kb_index] = U(tmp.kb_value);
+ key_map[kb_index] = U(tmp.kb_value);
break;
}
return 0;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 76c158c41510..4f1a69c9d81d 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -61,7 +61,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
struct appldata_product_id id;
int rc;
- strncpy(id.prod_nr, "LNXAPPL", 7);
+ memcpy(id.prod_nr, "LNXAPPL", 7);
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index ee6f3b563728..e69b12a40636 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -64,42 +64,18 @@ static struct notifier_block call_home_panic_nb = {
.priority = INT_MAX,
};
-static int proc_handler_callhome(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *count,
- loff_t *ppos)
-{
- unsigned long val;
- int len, rc;
- char buf[3];
-
- if (!*count || (*ppos && !write)) {
- *count = 0;
- return 0;
- }
- if (!write) {
- len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
- rc = copy_to_user(buffer, buf, sizeof(buf));
- if (rc != 0)
- return -EFAULT;
- } else {
- len = *count;
- rc = kstrtoul_from_user(buffer, len, 0, &val);
- if (rc)
- return rc;
- if (val != 0 && val != 1)
- return -EINVAL;
- callhome_enabled = val;
- }
- *count = len;
- *ppos += len;
- return 0;
-}
+static int zero;
+static int one = 1;
static struct ctl_table callhome_table[] = {
{
.procname = "callhome",
+ .data = &callhome_enabled,
+ .maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_handler_callhome,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
},
{}
};
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 37e65a05517f..cdcde18e7220 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -113,16 +113,16 @@ static int crypt_enabled(struct tape_device *device)
static void ext_to_int_kekl(struct tape390_kekl *in,
struct tape3592_kekl *out)
{
- int i;
+ int len;
memset(out, 0, sizeof(*out));
if (in->type == TAPE390_KEKL_TYPE_HASH)
out->flags |= 0x40;
if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
out->flags |= 0x80;
- strncpy(out->label, in->label, 64);
- for (i = strlen(in->label); i < sizeof(out->label); i++)
- out->label[i] = ' ';
+ len = min(sizeof(out->label), strlen(in->label));
+ memcpy(out->label, in->label, len);
+ memset(out->label + len, ' ', sizeof(out->label) - len);
ASCEBC(out->label, sizeof(out->label));
}
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index a07102472ce9..b58df0dd0039 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -54,10 +54,10 @@ struct tape_class_device *register_tape_dev(
if (!tcd)
return ERR_PTR(-ENOMEM);
- strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+ strlcpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
- strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+ strlcpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
@@ -77,7 +77,7 @@ struct tape_class_device *register_tape_dev(
tcd->class_device = device_create(tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
- rc = PTR_RET(tcd->class_device);
+ rc = PTR_ERR_OR_ZERO(tcd->class_device);
if (rc)
goto fail_with_cdev;
rc = sysfs_create_link(
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index afbdee74147d..51038ec309c1 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -471,14 +471,17 @@ int chp_new(struct chp_id chpid)
{
struct channel_subsystem *css = css_by_id(chpid.cssid);
struct channel_path *chp;
- int ret;
+ int ret = 0;
+ mutex_lock(&css->mutex);
if (chp_is_registered(chpid))
- return 0;
- chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
- if (!chp)
- return -ENOMEM;
+ goto out;
+ chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+ if (!chp) {
+ ret = -ENOMEM;
+ goto out;
+ }
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
@@ -505,21 +508,20 @@ int chp_new(struct chp_id chpid)
put_device(&chp->dev);
goto out;
}
- mutex_lock(&css->mutex);
+
if (css->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
device_unregister(&chp->dev);
- mutex_unlock(&css->mutex);
goto out;
}
}
css->chps[chpid.id] = chp;
- mutex_unlock(&css->mutex);
goto out;
out_free:
kfree(chp);
out:
+ mutex_unlock(&css->mutex);
return ret;
}
@@ -585,8 +587,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
case CRW_ERC_INIT:
- if (!chp_is_registered(chpid))
- chp_new(chpid);
+ chp_new(chpid);
chsc_chp_online(chpid);
break;
case CRW_ERC_PERRI: /* Path has gone. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 9029804dcd22..a0baee25134c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -91,7 +91,7 @@ struct chsc_ssd_area {
u16 sch; /* subchannel */
u8 chpid[8]; /* chpids 0-7 */
u16 fla[8]; /* full link addresses 0-7 */
-} __attribute__ ((packed));
+} __packed __aligned(PAGE_SIZE);
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
@@ -319,7 +319,7 @@ struct chsc_sei {
struct chsc_sei_nt2_area nt2_area;
u8 nt_area[PAGE_SIZE - 24];
} u;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
/*
* Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
@@ -841,7 +841,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
u32 : 4;
u32 fmt : 4;
u32 : 16;
- } __attribute__ ((packed)) *secm_area;
+ } *secm_area;
unsigned long flags;
int ret, ccode;
@@ -1014,7 +1014,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 cmg : 8;
u32 zeroes3;
u32 data[NR_MEASUREMENT_CHARS];
- } __attribute__ ((packed)) *scmc_area;
+ } *scmc_area;
chp->shared = -1;
chp->cmg = -1;
@@ -1142,7 +1142,7 @@ int __init chsc_get_cssid(int idx)
u8 cssid;
u32 : 24;
} list[0];
- } __packed *sdcal_area;
+ } *sdcal_area;
int ret;
spin_lock_irq(&chsc_page_lock);
@@ -1192,7 +1192,7 @@ chsc_determine_css_characteristics(void)
u32 reserved4;
u32 general_char[510];
u32 chsc_char[508];
- } __attribute__ ((packed)) *scsc_area;
+ } *scsc_area;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
@@ -1236,7 +1236,7 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
unsigned int rsvd3[3];
u64 clock_delta;
unsigned int rsvd4[2];
- } __attribute__ ((packed)) *rr;
+ } *rr;
int rc;
memset(page, 0, PAGE_SIZE);
@@ -1261,7 +1261,7 @@ int chsc_sstpi(void *page, void *result, size_t size)
unsigned int rsvd0[3];
struct chsc_header response;
char data[];
- } __attribute__ ((packed)) *rr;
+ } *rr;
int rc;
memset(page, 0, PAGE_SIZE);
@@ -1284,7 +1284,7 @@ int chsc_siosl(struct subchannel_id schid)
u32 word3;
struct chsc_header response;
u32 word[11];
- } __attribute__ ((packed)) *siosl_area;
+ } *siosl_area;
unsigned long flags;
int ccode;
int rc;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 5c9f0dd33f4e..78aba8d94eec 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -15,12 +15,12 @@
#define NR_MEASUREMENT_CHARS 5
struct cmg_chars {
u32 values[NR_MEASUREMENT_CHARS];
-} __attribute__ ((packed));
+};
#define NR_MEASUREMENT_ENTRIES 8
struct cmg_entry {
u32 values[NR_MEASUREMENT_ENTRIES];
-} __attribute__ ((packed));
+};
struct channel_path_desc_fmt1 {
u8 flags;
@@ -38,7 +38,7 @@ struct channel_path_desc_fmt1 {
u8 s:1;
u8 f:1;
u32 zeros[2];
-} __attribute__ ((packed));
+};
struct channel_path_desc_fmt3 {
struct channel_path_desc_fmt1 fmt1_desc;
@@ -59,7 +59,7 @@ struct css_chsc_char {
u32:7;
u32 pnso:1; /* bit 116 */
u32:11;
-}__attribute__((packed));
+} __packed;
extern struct css_chsc_char css_chsc_characteristics;
@@ -82,7 +82,7 @@ struct chsc_ssqd_area {
struct chsc_header response;
u32:32;
struct qdio_ssqd_desc qdio_ssqd;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
struct chsc_scssc_area {
struct chsc_header request;
@@ -102,7 +102,7 @@ struct chsc_scssc_area {
u32 reserved[1004];
struct chsc_header response;
u32:32;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
struct chsc_scpd {
struct chsc_header request;
@@ -120,7 +120,7 @@ struct chsc_scpd {
struct chsc_header response;
u32:32;
u8 data[0];
-} __packed;
+} __packed __aligned(PAGE_SIZE);
struct chsc_sda_area {
struct chsc_header request;
@@ -199,7 +199,7 @@ struct chsc_scm_info {
u32 reserved2[10];
u64 restok;
struct sale scmal[248];
-} __packed;
+} __packed __aligned(PAGE_SIZE);
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
@@ -243,7 +243,7 @@ struct chsc_pnso_area {
struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
struct qdio_brinfo_entry_l2 l2[0];
} entries;
-} __packed;
+} __packed __aligned(PAGE_SIZE);
int chsc_pnso_brinfo(struct subchannel_id schid,
struct chsc_pnso_area *brinfo_area,
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5130d7c67239..de744ca158fd 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -526,76 +526,6 @@ int cio_disable_subchannel(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
-static int cio_check_devno_blacklisted(struct subchannel *sch)
-{
- if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
- /*
- * This device must not be known to Linux. So we simply
- * say that there is no device and return ENODEV.
- */
- CIO_MSG_EVENT(6, "Blacklisted device detected "
- "at devno %04X, subchannel set %x\n",
- sch->schib.pmcw.dev, sch->schid.ssid);
- return -ENODEV;
- }
- return 0;
-}
-
-/**
- * cio_validate_subchannel - basic validation of subchannel
- * @sch: subchannel structure to be filled out
- * @schid: subchannel id
- *
- * Find out subchannel type and initialize struct subchannel.
- * Return codes:
- * 0 on success
- * -ENXIO for non-defined subchannels
- * -ENODEV for invalid subchannels or blacklisted devices
- * -EIO for subchannels in an invalid subchannel set
- */
-int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
-{
- char dbf_txt[15];
- int ccode;
- int err;
-
- sprintf(dbf_txt, "valsch%x", schid.sch_no);
- CIO_TRACE_EVENT(4, dbf_txt);
-
- /*
- * The first subchannel that is not-operational (ccode==3)
- * indicates that there aren't any more devices available.
- * If stsch gets an exception, it means the current subchannel set
- * is not valid.
- */
- ccode = stsch(schid, &sch->schib);
- if (ccode) {
- err = (ccode == 3) ? -ENXIO : ccode;
- goto out;
- }
- sch->st = sch->schib.pmcw.st;
- sch->schid = schid;
-
- switch (sch->st) {
- case SUBCHANNEL_TYPE_IO:
- case SUBCHANNEL_TYPE_MSG:
- if (!css_sch_is_valid(&sch->schib))
- err = -ENODEV;
- else
- err = cio_check_devno_blacklisted(sch);
- break;
- default:
- err = 0;
- }
- if (err)
- goto out;
-
- CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
- sch->schid.ssid, sch->schid.sch_no, sch->st);
-out:
- return err;
-}
-
/*
* do_cio_interrupt() handles all normal I/O device IRQ's
*/
@@ -719,6 +649,7 @@ struct subchannel *cio_probe_console(void)
{
struct subchannel_id schid;
struct subchannel *sch;
+ struct schib schib;
int sch_no, ret;
sch_no = cio_get_console_sch_no();
@@ -728,7 +659,11 @@ struct subchannel *cio_probe_console(void)
}
init_subchannel_id(&schid);
schid.sch_no = sch_no;
- sch = css_alloc_subchannel(schid);
+ ret = stsch(schid, &schib);
+ if (ret)
+ return ERR_PTR(-ENODEV);
+
+ sch = css_alloc_subchannel(schid, &schib);
if (IS_ERR(sch))
return sch;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 94cd813bdcfe..9811fd8a0c73 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -119,7 +119,6 @@ DECLARE_PER_CPU(struct irb, cio_irb);
#define to_subchannel(n) container_of(n, struct subchannel, dev)
-extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
extern int cio_enable_subchannel(struct subchannel *, u32);
extern int cio_disable_subchannel (struct subchannel *);
extern int cio_cancel (struct subchannel *);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 9263a0fb3858..aea502922646 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -25,6 +25,7 @@
#include "css.h"
#include "cio.h"
+#include "blacklist.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
@@ -168,18 +169,53 @@ static void css_subchannel_release(struct device *dev)
kfree(sch);
}
-struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
+static int css_validate_subchannel(struct subchannel_id schid,
+ struct schib *schib)
+{
+ int err;
+
+ switch (schib->pmcw.st) {
+ case SUBCHANNEL_TYPE_IO:
+ case SUBCHANNEL_TYPE_MSG:
+ if (!css_sch_is_valid(schib))
+ err = -ENODEV;
+ else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
+ CIO_MSG_EVENT(6, "Blacklisted device detected "
+ "at devno %04X, subchannel set %x\n",
+ schib->pmcw.dev, schid.ssid);
+ err = -ENODEV;
+ } else
+ err = 0;
+ break;
+ default:
+ err = 0;
+ }
+ if (err)
+ goto out;
+
+ CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
+ schid.ssid, schid.sch_no, schib->pmcw.st);
+out:
+ return err;
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
+ struct schib *schib)
{
struct subchannel *sch;
int ret;
+ ret = css_validate_subchannel(schid, schib);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
if (!sch)
return ERR_PTR(-ENOMEM);
- ret = cio_validate_subchannel(sch, schid);
- if (ret < 0)
- goto err;
+ sch->schid = schid;
+ sch->schib = *schib;
+ sch->st = schib->pmcw.st;
ret = css_sch_create_locks(sch);
if (ret)
@@ -244,8 +280,7 @@ static void ssd_register_chpids(struct chsc_ssd_info *ssd)
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (ssd->path_mask & mask)
- if (!chp_is_registered(ssd->chpid[i]))
- chp_new(ssd->chpid[i]);
+ chp_new(ssd->chpid[i]);
}
}
@@ -382,12 +417,12 @@ int css_register_subchannel(struct subchannel *sch)
return ret;
}
-static int css_probe_device(struct subchannel_id schid)
+static int css_probe_device(struct subchannel_id schid, struct schib *schib)
{
struct subchannel *sch;
int ret;
- sch = css_alloc_subchannel(schid);
+ sch = css_alloc_subchannel(schid, schib);
if (IS_ERR(sch))
return PTR_ERR(sch);
@@ -436,23 +471,23 @@ EXPORT_SYMBOL_GPL(css_sch_is_valid);
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
struct schib schib;
+ int ccode;
if (!slow) {
/* Will be done on the slow path. */
return -EAGAIN;
}
- if (stsch(schid, &schib)) {
- /* Subchannel is not provided. */
- return -ENXIO;
- }
- if (!css_sch_is_valid(&schib)) {
- /* Unusable - ignore. */
- return 0;
- }
- CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
- schid.sch_no);
+ /*
+ * The first subchannel that is not-operational (ccode==3)
+ * indicates that there aren't any more devices available.
+ * If stsch gets an exception, it means the current subchannel set
+ * is not valid.
+ */
+ ccode = stsch(schid, &schib);
+ if (ccode)
+ return (ccode == 3) ? -ENXIO : ccode;
- return css_probe_device(schid);
+ return css_probe_device(schid, &schib);
}
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
@@ -1081,6 +1116,11 @@ static int __init channel_subsystem_init(void)
if (ret)
goto out_wq;
+ /* Register subchannels which are already in use. */
+ cio_register_early_subchannels();
+ /* Start initial subchannel evaluation. */
+ css_schedule_eval_all();
+
return ret;
out_wq:
destroy_workqueue(cio_work_q);
@@ -1120,10 +1160,6 @@ int css_complete_work(void)
*/
static int __init channel_subsystem_init_sync(void)
{
- /* Register subchannels which are already in use. */
- cio_register_early_subchannels();
- /* Start initial subchannel evaluation. */
- css_schedule_eval_all();
css_complete_work();
return 0;
}
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 30357cbf350a..8d832900a63d 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -103,7 +103,8 @@ extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
extern int css_register_subchannel(struct subchannel *);
-extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id,
+ struct schib *schib);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
extern int max_ssid;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index f4ca72dd862f..9c7d9da42ba0 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -631,21 +631,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
unsigned long phys_aob = 0;
if (!q->use_cq)
- goto out;
+ return 0;
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
}
-out:
+ q->sbal_state[bufnr].flags = 0;
return phys_aob;
}
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 1f8d1c1e566d..0ebb29b6fd6d 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -30,6 +30,17 @@ DECLARE_EVENT_CLASS(s390_class_schib,
__field(u16, schno)
__field(u16, devno)
__field_struct(struct schib, schib)
+ __field(u8, pmcw_ena)
+ __field(u8, pmcw_st)
+ __field(u8, pmcw_dnv)
+ __field(u16, pmcw_dev)
+ __field(u8, pmcw_lpm)
+ __field(u8, pmcw_pnom)
+ __field(u8, pmcw_lpum)
+ __field(u8, pmcw_pim)
+ __field(u8, pmcw_pam)
+ __field(u8, pmcw_pom)
+ __field(u64, pmcw_chpid)
__field(int, cc)
),
TP_fast_assign(
@@ -38,18 +49,29 @@ DECLARE_EVENT_CLASS(s390_class_schib,
__entry->schno = schid.sch_no;
__entry->devno = schib->pmcw.dev;
__entry->schib = *schib;
+ __entry->pmcw_ena = schib->pmcw.ena;
+ __entry->pmcw_st = schib->pmcw.ena;
+ __entry->pmcw_dnv = schib->pmcw.dnv;
+ __entry->pmcw_dev = schib->pmcw.dev;
+ __entry->pmcw_lpm = schib->pmcw.lpm;
+ __entry->pmcw_pnom = schib->pmcw.pnom;
+ __entry->pmcw_lpum = schib->pmcw.lpum;
+ __entry->pmcw_pim = schib->pmcw.pim;
+ __entry->pmcw_pam = schib->pmcw.pam;
+ __entry->pmcw_pom = schib->pmcw.pom;
+ memcpy(&__entry->pmcw_chpid, &schib->pmcw.chpid, 8);
__entry->cc = cc;
),
TP_printk("schid=%x.%x.%04x cc=%d ena=%d st=%d dnv=%d dev=%04x "
"lpm=0x%02x pnom=0x%02x lpum=0x%02x pim=0x%02x pam=0x%02x "
"pom=0x%02x chpids=%016llx",
__entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
- __entry->schib.pmcw.ena, __entry->schib.pmcw.st,
- __entry->schib.pmcw.dnv, __entry->schib.pmcw.dev,
- __entry->schib.pmcw.lpm, __entry->schib.pmcw.pnom,
- __entry->schib.pmcw.lpum, __entry->schib.pmcw.pim,
- __entry->schib.pmcw.pam, __entry->schib.pmcw.pom,
- *((u64 *) __entry->schib.pmcw.chpid)
+ __entry->pmcw_ena, __entry->pmcw_st,
+ __entry->pmcw_dnv, __entry->pmcw_dev,
+ __entry->pmcw_lpm, __entry->pmcw_pnom,
+ __entry->pmcw_lpum, __entry->pmcw_pim,
+ __entry->pmcw_pam, __entry->pmcw_pom,
+ __entry->pmcw_chpid
)
);
@@ -89,6 +111,13 @@ TRACE_EVENT(s390_cio_tsch,
__field(u8, ssid)
__field(u16, schno)
__field_struct(struct irb, irb)
+ __field(u8, scsw_dcc)
+ __field(u8, scsw_pno)
+ __field(u8, scsw_fctl)
+ __field(u8, scsw_actl)
+ __field(u8, scsw_stctl)
+ __field(u8, scsw_dstat)
+ __field(u8, scsw_cstat)
__field(int, cc)
),
TP_fast_assign(
@@ -96,15 +125,22 @@ TRACE_EVENT(s390_cio_tsch,
__entry->ssid = schid.ssid;
__entry->schno = schid.sch_no;
__entry->irb = *irb;
+ __entry->scsw_dcc = scsw_cc(&irb->scsw);
+ __entry->scsw_pno = scsw_pno(&irb->scsw);
+ __entry->scsw_fctl = scsw_fctl(&irb->scsw);
+ __entry->scsw_actl = scsw_actl(&irb->scsw);
+ __entry->scsw_stctl = scsw_stctl(&irb->scsw);
+ __entry->scsw_dstat = scsw_dstat(&irb->scsw);
+ __entry->scsw_cstat = scsw_cstat(&irb->scsw);
__entry->cc = cc;
),
TP_printk("schid=%x.%x.%04x cc=%d dcc=%d pno=%d fctl=0x%x actl=0x%x "
"stctl=0x%x dstat=0x%x cstat=0x%x",
__entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
- scsw_cc(&__entry->irb.scsw), scsw_pno(&__entry->irb.scsw),
- scsw_fctl(&__entry->irb.scsw), scsw_actl(&__entry->irb.scsw),
- scsw_stctl(&__entry->irb.scsw),
- scsw_dstat(&__entry->irb.scsw), scsw_cstat(&__entry->irb.scsw)
+ __entry->scsw_dcc, __entry->scsw_pno,
+ __entry->scsw_fctl, __entry->scsw_actl,
+ __entry->scsw_stctl,
+ __entry->scsw_dstat, __entry->scsw_cstat
)
);
@@ -122,6 +158,9 @@ TRACE_EVENT(s390_cio_tpi,
__field(u8, cssid)
__field(u8, ssid)
__field(u16, schno)
+ __field(u8, adapter_IO)
+ __field(u8, isc)
+ __field(u8, type)
),
TP_fast_assign(
__entry->cc = cc;
@@ -136,11 +175,14 @@ TRACE_EVENT(s390_cio_tpi,
__entry->cssid = __entry->tpi_info.schid.cssid;
__entry->ssid = __entry->tpi_info.schid.ssid;
__entry->schno = __entry->tpi_info.schid.sch_no;
+ __entry->adapter_IO = __entry->tpi_info.adapter_IO;
+ __entry->isc = __entry->tpi_info.isc;
+ __entry->type = __entry->tpi_info.type;
),
TP_printk("schid=%x.%x.%04x cc=%d a=%d isc=%d type=%d",
__entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
- __entry->tpi_info.adapter_IO, __entry->tpi_info.isc,
- __entry->tpi_info.type
+ __entry->adapter_IO, __entry->isc,
+ __entry->type
)
);
@@ -299,16 +341,20 @@ TRACE_EVENT(s390_cio_interrupt,
__field(u8, cssid)
__field(u8, ssid)
__field(u16, schno)
+ __field(u8, isc)
+ __field(u8, type)
),
TP_fast_assign(
__entry->tpi_info = *tpi_info;
- __entry->cssid = __entry->tpi_info.schid.cssid;
- __entry->ssid = __entry->tpi_info.schid.ssid;
- __entry->schno = __entry->tpi_info.schid.sch_no;
+ __entry->cssid = tpi_info->schid.cssid;
+ __entry->ssid = tpi_info->schid.ssid;
+ __entry->schno = tpi_info->schid.sch_no;
+ __entry->isc = tpi_info->isc;
+ __entry->type = tpi_info->type;
),
TP_printk("schid=%x.%x.%04x isc=%d type=%d",
__entry->cssid, __entry->ssid, __entry->schno,
- __entry->tpi_info.isc, __entry->tpi_info.type
+ __entry->isc, __entry->type
)
);
@@ -321,11 +367,13 @@ TRACE_EVENT(s390_cio_adapter_int,
TP_ARGS(tpi_info),
TP_STRUCT__entry(
__field_struct(struct tpi_info, tpi_info)
+ __field(u8, isc)
),
TP_fast_assign(
__entry->tpi_info = *tpi_info;
+ __entry->isc = tpi_info->isc;
),
- TP_printk("isc=%d", __entry->tpi_info.isc)
+ TP_printk("isc=%d", __entry->isc)
);
/**
@@ -339,16 +387,30 @@ TRACE_EVENT(s390_cio_stcrw,
TP_STRUCT__entry(
__field_struct(struct crw, crw)
__field(int, cc)
+ __field(u8, slct)
+ __field(u8, oflw)
+ __field(u8, chn)
+ __field(u8, rsc)
+ __field(u8, anc)
+ __field(u8, erc)
+ __field(u16, rsid)
),
TP_fast_assign(
__entry->crw = *crw;
__entry->cc = cc;
+ __entry->slct = crw->slct;
+ __entry->oflw = crw->oflw;
+ __entry->chn = crw->chn;
+ __entry->rsc = crw->rsc;
+ __entry->anc = crw->anc;
+ __entry->erc = crw->erc;
+ __entry->rsid = crw->rsid;
),
TP_printk("cc=%d slct=%d oflw=%d chn=%d rsc=%d anc=%d erc=0x%x "
"rsid=0x%x",
- __entry->cc, __entry->crw.slct, __entry->crw.oflw,
- __entry->crw.chn, __entry->crw.rsc, __entry->crw.anc,
- __entry->crw.erc, __entry->crw.rsid
+ __entry->cc, __entry->slct, __entry->oflw,
+ __entry->chn, __entry->rsc, __entry->anc,
+ __entry->erc, __entry->rsid
)
);
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
deleted file mode 100644
index 16b59ce5e01d..000000000000
--- a/drivers/s390/crypto/ap_asm.h
+++ /dev/null
@@ -1,236 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 2016
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * Adjunct processor bus inline assemblies.
- */
-
-#ifndef _AP_ASM_H_
-#define _AP_ASM_H_
-
-#include <asm/isc.h>
-
-/**
- * ap_intructions_available() - Test if AP instructions are available.
- *
- * Returns 0 if the AP instructions are installed.
- */
-static inline int ap_instructions_available(void)
-{
- register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
- register unsigned long reg1 asm ("1") = -ENODEV;
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- " .long 0xb2af0000\n" /* PQAP(TAPQ) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
- return reg1;
-}
-
-/**
- * ap_tapq(): Test adjunct processor queue.
- * @qid: The AP queue number
- * @info: Pointer to queue descriptor
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
-{
- register unsigned long reg0 asm ("0") = qid;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- if (info)
- *info = reg2;
- return reg1;
-}
-
-/**
- * ap_pqap_rapq(): Reset adjunct processor queue.
- * @qid: The AP queue number
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
-{
- register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(RAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
- return reg1;
-}
-
-/**
- * ap_aqic(): Control interruption for a specific AP.
- * @qid: The AP queue number
- * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- */
-static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind)
-{
- register unsigned long reg0 asm ("0") = qid | (3UL << 24);
- register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl;
- register struct ap_queue_status reg1_out asm ("1");
- register void *reg2 asm ("2") = ind;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(AQIC) */
- : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
- :
- : "cc");
- return reg1_out;
-}
-
-/**
- * ap_qci(): Get AP configuration data
- *
- * Returns 0 on success, or -EOPNOTSUPP.
- */
-static inline int ap_qci(void *config)
-{
- register unsigned long reg0 asm ("0") = 0x04000000UL;
- register unsigned long reg1 asm ("1") = -EINVAL;
- register void *reg2 asm ("2") = (void *) config;
-
- asm volatile(
- ".long 0xb2af0000\n" /* PQAP(QCI) */
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "+d" (reg1), "+d" (reg2)
- :
- : "cc", "memory");
-
- return reg1;
-}
-
-/*
- * union ap_qact_ap_info - used together with the
- * ap_aqic() function to provide a convenient way
- * to handle the ap info needed by the qact function.
- */
-union ap_qact_ap_info {
- unsigned long val;
- struct {
- unsigned int : 3;
- unsigned int mode : 3;
- unsigned int : 26;
- unsigned int cat : 8;
- unsigned int : 8;
- unsigned char ver[2];
- };
-};
-
-/**
- * ap_qact(): Query AP combatibility type.
- * @qid: The AP queue number
- * @apinfo: On input the info about the AP queue. On output the
- * alternate AP queue info provided by the qact function
- * in GR2 is stored in.
- *
- * Returns AP queue status. Check response_code field for failures.
- */
-static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
- union ap_qact_ap_info *apinfo)
-{
- register unsigned long reg0 asm ("0") = qid | (5UL << 24)
- | ((ifbit & 0x01) << 22);
- register unsigned long reg1_in asm ("1") = apinfo->val;
- register struct ap_queue_status reg1_out asm ("1");
- register unsigned long reg2 asm ("2") = 0;
-
- asm volatile(
- ".long 0xb2af0000" /* PQAP(QACT) */
- : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
- : : "cc");
- apinfo->val = reg2;
- return reg1_out;
-}
-
-/**
- * ap_nqap(): Send message to adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: The program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on NQAP can't happen because the L bit is 1.
- * Condition code 2 on NQAP also means the send is incomplete,
- * because a segment boundary was reached. The NQAP is repeated.
- */
-static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
- unsigned long long psmid,
- void *msg, size_t length)
-{
- register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = (unsigned long) msg;
- register unsigned long reg3 asm ("3") = (unsigned long) length;
- register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
-
- asm volatile (
- "0: .long 0xb2ad0042\n" /* NQAP */
- " brc 2,0b"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
- : "d" (reg4), "d" (reg5)
- : "cc", "memory");
- return reg1;
-}
-
-/**
- * ap_dqap(): Receive message from adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on DQAP means the receive has taken place
- * but only partially. The response is incomplete, hence the
- * DQAP is repeated.
- * Condition code 2 on DQAP also means the receive is incomplete,
- * this time because a segment boundary was reached. Again, the
- * DQAP is repeated.
- * Note that gpr2 is used by the DQAP instruction to keep track of
- * any 'residual' length, in case the instruction gets interrupted.
- * Hence it gets zeroed before the instruction.
- */
-static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
- unsigned long long *psmid,
- void *msg, size_t length)
-{
- register unsigned long reg0 asm("0") = qid | 0x80000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm("2") = 0UL;
- register unsigned long reg4 asm("4") = (unsigned long) msg;
- register unsigned long reg5 asm("5") = (unsigned long) length;
- register unsigned long reg6 asm("6") = 0UL;
- register unsigned long reg7 asm("7") = 0UL;
-
-
- asm volatile(
- "0: .long 0xb2ae0064\n" /* DQAP */
- " brc 6,0b\n"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2),
- "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
- : : "cc", "memory");
- *psmid = (((unsigned long long) reg6) << 32) + reg7;
- return reg1;
-}
-
-#endif /* _AP_ASM_H_ */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 35a0c2b52f82..bf27fc4d1335 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -36,7 +36,6 @@
#include <linux/debugfs.h>
#include "ap_bus.h"
-#include "ap_asm.h"
#include "ap_debug.h"
/*
@@ -174,24 +173,6 @@ static inline int ap_qact_available(void)
return 0;
}
-/**
- * ap_test_queue(): Test adjunct processor queue.
- * @qid: The AP queue number
- * @tbit: Test facilities bit
- * @info: Pointer to queue descriptor
- *
- * Returns AP queue status structure.
- */
-struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info)
-{
- if (tbit)
- qid |= 1UL << 23; /* set T bit*/
- return ap_tapq(qid, info);
-}
-EXPORT_SYMBOL(ap_test_queue);
-
/*
* ap_query_configuration(): Fetch cryptographic config info
*
@@ -200,7 +181,7 @@ EXPORT_SYMBOL(ap_test_queue);
* is returned, e.g. if the PQAP(QCI) instruction is not
* available, the return value will be -EOPNOTSUPP.
*/
-int ap_query_configuration(struct ap_config_info *info)
+static inline int ap_query_configuration(struct ap_config_info *info)
{
if (!ap_configuration_available())
return -EOPNOTSUPP;
@@ -493,7 +474,7 @@ static int ap_poll_thread_start(void)
return 0;
mutex_lock(&ap_poll_thread_mutex);
ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
- rc = PTR_RET(ap_poll_kthread);
+ rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
if (rc)
ap_poll_kthread = NULL;
mutex_unlock(&ap_poll_thread_mutex);
@@ -1261,7 +1242,7 @@ static int __init ap_module_init(void)
/* Create /sys/devices/ap. */
ap_root_device = root_device_register("ap");
- rc = PTR_RET(ap_root_device);
+ rc = PTR_ERR_OR_ZERO(ap_root_device);
if (rc)
goto out_bus;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6a273c5ebca5..936541937e15 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/types.h>
+#include <asm/isc.h>
#include <asm/ap.h>
#define AP_DEVICES 256 /* Number of AP devices. */
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 2c726df210f6..c13e43292cb7 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -14,7 +14,6 @@
#include <asm/facility.h>
#include "ap_bus.h"
-#include "ap_asm.h"
/*
* AP card related attributes.
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba3a2e13b0eb..e365171fe28f 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -14,26 +14,6 @@
#include <asm/facility.h>
#include "ap_bus.h"
-#include "ap_asm.h"
-
-/**
- * ap_queue_irq_ctrl(): Control interruption on a AP queue.
- * @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- *
- * Control interruption on the given AP queue.
- * Just a simple wrapper function for the low level PQAP(AQIC)
- * instruction available for other kernel modules.
- */
-struct ap_queue_status ap_queue_irq_ctrl(ap_qid_t qid,
- struct ap_qirq_ctrl qirqctrl,
- void *ind)
-{
- return ap_aqic(qid, qirqctrl, ind);
-}
-EXPORT_SYMBOL(ap_queue_irq_ctrl);
/**
* ap_queue_enable_interruption(): Enable interruption on an AP queue.
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 3929c8be8098..e663432395c1 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -699,7 +699,7 @@ static int query_crypto_facility(u16 cardnr, u16 domain,
/* fill request cprb param block with FQ request */
preqparm = (struct fqreqparm *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "FQ", 2);
- strncpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+ memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
preqparm->lv1.len = sizeof(preqparm->lv1);
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index 233e1e695208..da2c8dfd4d74 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -83,9 +83,21 @@ static ssize_t zcrypt_card_online_store(struct device *dev,
static DEVICE_ATTR(online, 0644, zcrypt_card_online_show,
zcrypt_card_online_store);
+static ssize_t zcrypt_card_load_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+}
+
+static DEVICE_ATTR(load, 0444, zcrypt_card_load_show, NULL);
+
static struct attribute *zcrypt_card_attrs[] = {
&dev_attr_type.attr,
&dev_attr_online.attr,
+ &dev_attr_load.attr,
NULL,
};
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index 011d61d8a4ae..1752622b95f7 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -99,7 +99,7 @@ struct cca_pvt_ext_CRT_sec {
* @mex: pointer to user input data
* @p: pointer to memory area for the key
*
- * Returns the size of the key area or -EFAULT
+ * Returns the size of the key area or negative errno value.
*/
static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
{
@@ -118,6 +118,15 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
unsigned char *temp;
int i;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_modexpo(). However, do a plausibility check
+ * here to make sure the following copy_from_user() can't be utilized
+ * to compromise the system.
+ */
+ if (WARN_ON_ONCE(mex->inputdatalength > 512))
+ return -EINVAL;
+
memset(key, 0, sizeof(*key));
key->pubHdr = static_pub_hdr;
@@ -178,6 +187,15 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
struct cca_public_sec *pub;
int short_len, long_len, pad_len, key_len, size;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_crt(). However, do a plausibility check
+ * here to make sure the following copy_from_user() can't be utilized
+ * to compromise the system.
+ */
+ if (WARN_ON_ONCE(crt->inputdatalength > 512))
+ return -EINVAL;
+
memset(key, 0, sizeof(*key));
short_len = (crt->inputdatalength + 1) / 2;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 97d4bacbc442..e70ae078c86b 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -246,7 +246,7 @@ int speed_idx_ep11(int req_type)
* @ap_msg: pointer to AP message
* @mex: pointer to user input data
*
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or negative errno value.
*/
static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
@@ -272,6 +272,14 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
} __packed * msg = ap_msg->message;
int size;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_modexpo(). However, make sure the following
+ * copy_from_user() never exceeds the allocated buffer space.
+ */
+ if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE))
+ return -EINVAL;
+
/* VUD.ciphertext */
msg->length = mex->inputdatalength + 2;
if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
@@ -307,7 +315,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
* @ap_msg: pointer to AP message
* @crt: pointer to user input data
*
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or negative errno value.
*/
static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
@@ -334,6 +342,14 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
} __packed * msg = ap_msg->message;
int size;
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_crt(). However, make sure the following
+ * copy_from_user() never exceeds the allocated buffer space.
+ */
+ if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE))
+ return -EINVAL;
+
/* VUD.ciphertext */
msg->length = crt->inputdatalength + 2;
if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 720434e18007..91a52f268353 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -75,8 +75,20 @@ static ssize_t zcrypt_queue_online_store(struct device *dev,
static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show,
zcrypt_queue_online_store);
+static ssize_t zcrypt_queue_load_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+}
+
+static DEVICE_ATTR(load, 0444, zcrypt_queue_load_show, NULL);
+
static struct attribute *zcrypt_queue_attrs[] = {
&dev_attr_online.attr,
+ &dev_attr_load.attr,
NULL,
};
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index c7e484f70654..7c5a25ddf832 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -95,4 +95,14 @@ config CCWGROUP
tristate
default (LCS || CTCM || QETH)
+config ISM
+ tristate "Support for ISM vPCI Adapter"
+ depends on PCI && SMC
+ default n
+ help
+ Select this option if you want to use the Internal Shared Memory
+ vPCI Adapter.
+
+ To compile as a module choose M. The module name is ism.
+ If unsure, choose N.
endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 513b7ae64980..f2d6bbe57a6f 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -15,3 +15,6 @@ qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
obj-$(CONFIG_QETH_L2) += qeth_l2.o
qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
obj-$(CONFIG_QETH_L3) += qeth_l3.o
+
+ism-y := ism_drv.o
+obj-$(CONFIG_ISM) += ism.o
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
new file mode 100644
index 000000000000..0aab90817326
--- /dev/null
+++ b/drivers/s390/net/ism.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_ISM_H
+#define S390_ISM_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <net/smc.h>
+
+#define UTIL_STR_LEN 16
+
+/*
+ * Do not use the first word of the DMB bits to ensure 8 byte aligned access.
+ */
+#define ISM_DMB_WORD_OFFSET 1
+#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32)
+#define ISM_NR_DMBS 1920
+
+#define ISM_REG_SBA 0x1
+#define ISM_REG_IEQ 0x2
+#define ISM_READ_GID 0x3
+#define ISM_ADD_VLAN_ID 0x4
+#define ISM_DEL_VLAN_ID 0x5
+#define ISM_SET_VLAN 0x6
+#define ISM_RESET_VLAN 0x7
+#define ISM_QUERY_INFO 0x8
+#define ISM_QUERY_RGID 0x9
+#define ISM_REG_DMB 0xA
+#define ISM_UNREG_DMB 0xB
+#define ISM_SIGNAL_IEQ 0xE
+#define ISM_UNREG_SBA 0x11
+#define ISM_UNREG_IEQ 0x12
+
+#define ISM_ERROR 0xFFFF
+
+struct ism_req_hdr {
+ u32 cmd;
+ u16 : 16;
+ u16 len;
+};
+
+struct ism_resp_hdr {
+ u32 cmd;
+ u16 ret;
+ u16 len;
+};
+
+union ism_reg_sba {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 sba;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+union ism_reg_ieq {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 ieq;
+ u64 len;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+union ism_read_gid {
+ struct {
+ struct ism_req_hdr hdr;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ u64 gid;
+ } response;
+} __aligned(16);
+
+union ism_qi {
+ struct {
+ struct ism_req_hdr hdr;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ u32 version;
+ u32 max_len;
+ u64 ism_state;
+ u64 my_gid;
+ u64 sba;
+ u64 ieq;
+ u32 ieq_len;
+ u32 : 32;
+ u32 dmbs_owned;
+ u32 dmbs_used;
+ u32 vlan_required;
+ u32 vlan_nr_ids;
+ u16 vlan_id[64];
+ } response;
+} __aligned(64);
+
+union ism_query_rgid {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 rgid;
+ u32 vlan_valid;
+ u32 vlan_id;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+union ism_reg_dmb {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 dmb;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ u64 rgid;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ u64 dmb_tok;
+ } response;
+} __aligned(32);
+
+union ism_sig_ieq {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 rgid;
+ u32 trigger_irq;
+ u32 event_code;
+ u64 info;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(32);
+
+union ism_unreg_dmb {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 dmb_tok;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+union ism_cmd_simple {
+ struct {
+ struct ism_req_hdr hdr;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(8);
+
+union ism_set_vlan_id {
+ struct {
+ struct ism_req_hdr hdr;
+ u64 vlan_id;
+ } request;
+ struct {
+ struct ism_resp_hdr hdr;
+ } response;
+} __aligned(16);
+
+struct ism_eq_header {
+ u64 idx;
+ u64 ieq_len;
+ u64 entry_len;
+ u64 : 64;
+};
+
+struct ism_eq {
+ struct ism_eq_header header;
+ struct smcd_event entry[15];
+};
+
+struct ism_sba {
+ u32 s : 1; /* summary bit */
+ u32 e : 1; /* event bit */
+ u32 : 30;
+ u32 dmb_bits[ISM_NR_DMBS / 32];
+ u32 reserved[3];
+ u16 dmbe_mask[ISM_NR_DMBS];
+};
+
+struct ism_dev {
+ spinlock_t lock;
+ struct pci_dev *pdev;
+ struct smcd_dev *smcd;
+
+ void __iomem *ctl;
+
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+
+ int ieq_idx;
+};
+
+#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
+ ((dmb) | (idx) << 24 | (sf) << 23 | (offset))
+
+static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data,
+ unsigned int size)
+{
+ struct zpci_dev *zdev = to_zpci(ism->pdev);
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
+
+ return zpci_write_block(req, data, dmb_req);
+}
+
+#endif /* S390_ISM_H */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
new file mode 100644
index 000000000000..c0631895154e
--- /dev/null
+++ b/drivers/s390/net/ism_drv.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISM driver for s390.
+ *
+ * Copyright IBM Corp. 2018
+ */
+#define KMSG_COMPONENT "ism"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/err.h>
+#include <net/smc.h>
+
+#include <asm/debug.h>
+
+#include "ism.h"
+
+MODULE_DESCRIPTION("ISM driver for s390");
+MODULE_LICENSE("GPL");
+
+#define PCI_DEVICE_ID_IBM_ISM 0x04ED
+#define DRV_NAME "ism"
+
+static const struct pci_device_id ism_device_table[] = {
+ { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ism_device_table);
+
+static debug_info_t *ism_debug_info;
+
+static int ism_cmd(struct ism_dev *ism, void *cmd)
+{
+ struct ism_req_hdr *req = cmd;
+ struct ism_resp_hdr *resp = cmd;
+
+ memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
+ memcpy_toio(ism->ctl, req, sizeof(*req));
+
+ WRITE_ONCE(resp->ret, ISM_ERROR);
+
+ memcpy_fromio(resp, ism->ctl, sizeof(*resp));
+ if (resp->ret) {
+ debug_text_event(ism_debug_info, 0, "cmd failure");
+ debug_event(ism_debug_info, 0, resp, sizeof(*resp));
+ goto out;
+ }
+ memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
+ resp->len - sizeof(*resp));
+out:
+ return resp->ret;
+}
+
+static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
+{
+ union ism_cmd_simple cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = cmd_code;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ return ism_cmd(ism, &cmd);
+}
+
+static int query_info(struct ism_dev *ism)
+{
+ union ism_qi cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_QUERY_INFO;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ if (ism_cmd(ism, &cmd))
+ goto out;
+
+ debug_text_event(ism_debug_info, 3, "query info");
+ debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
+out:
+ return 0;
+}
+
+static int register_sba(struct ism_dev *ism)
+{
+ union ism_reg_sba cmd;
+ dma_addr_t dma_handle;
+ struct ism_sba *sba;
+
+ sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
+ &dma_handle, GFP_KERNEL);
+ if (!sba)
+ return -ENOMEM;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_REG_SBA;
+ cmd.request.hdr.len = sizeof(cmd.request);
+ cmd.request.sba = dma_handle;
+
+ if (ism_cmd(ism, &cmd)) {
+ dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
+ return -EIO;
+ }
+
+ ism->sba = sba;
+ ism->sba_dma_addr = dma_handle;
+
+ return 0;
+}
+
+static int register_ieq(struct ism_dev *ism)
+{
+ union ism_reg_ieq cmd;
+ dma_addr_t dma_handle;
+ struct ism_eq *ieq;
+
+ ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
+ &dma_handle, GFP_KERNEL);
+ if (!ieq)
+ return -ENOMEM;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_REG_IEQ;
+ cmd.request.hdr.len = sizeof(cmd.request);
+ cmd.request.ieq = dma_handle;
+ cmd.request.len = sizeof(*ieq);
+
+ if (ism_cmd(ism, &cmd)) {
+ dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
+ return -EIO;
+ }
+
+ ism->ieq = ieq;
+ ism->ieq_idx = -1;
+ ism->ieq_dma_addr = dma_handle;
+
+ return 0;
+}
+
+static int unregister_sba(struct ism_dev *ism)
+{
+ if (!ism->sba)
+ return 0;
+
+ if (ism_cmd_simple(ism, ISM_UNREG_SBA))
+ return -EIO;
+
+ dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
+ ism->sba, ism->sba_dma_addr);
+
+ ism->sba = NULL;
+ ism->sba_dma_addr = 0;
+
+ return 0;
+}
+
+static int unregister_ieq(struct ism_dev *ism)
+{
+ if (!ism->ieq)
+ return 0;
+
+ if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
+ return -EIO;
+
+ dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
+ ism->ieq, ism->ieq_dma_addr);
+
+ ism->ieq = NULL;
+ ism->ieq_dma_addr = 0;
+
+ return 0;
+}
+
+static int ism_read_local_gid(struct ism_dev *ism)
+{
+ union ism_read_gid cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_READ_GID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ ret = ism_cmd(ism, &cmd);
+ if (ret)
+ goto out;
+
+ ism->smcd->local_gid = cmd.response.gid;
+out:
+ return ret;
+}
+
+static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+ u32 vid)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_query_rgid cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_QUERY_RGID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.rgid = rgid;
+ cmd.request.vlan_valid = vid_valid;
+ cmd.request.vlan_id = vid;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+{
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
+ dmb->cpu_addr, dmb->dma_addr);
+}
+
+static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+{
+ unsigned long bit;
+
+ if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
+ return -EINVAL;
+
+ if (!dmb->sba_idx) {
+ bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
+ ISM_DMB_BIT_OFFSET);
+ if (bit == ISM_NR_DMBS)
+ return -ENOMEM;
+
+ dmb->sba_idx = bit;
+ }
+ if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
+ test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
+ return -EINVAL;
+
+ dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+ &dmb->dma_addr, GFP_KERNEL |
+ __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_COMP | __GFP_NORETRY);
+ if (!dmb->cpu_addr)
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+
+ return dmb->cpu_addr ? 0 : -ENOMEM;
+}
+
+static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_reg_dmb cmd;
+ int ret;
+
+ ret = ism_alloc_dmb(ism, dmb);
+ if (ret)
+ goto out;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_REG_DMB;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.dmb = dmb->dma_addr;
+ cmd.request.dmb_len = dmb->dmb_len;
+ cmd.request.sba_idx = dmb->sba_idx;
+ cmd.request.vlan_valid = dmb->vlan_valid;
+ cmd.request.vlan_id = dmb->vlan_id;
+ cmd.request.rgid = dmb->rgid;
+
+ ret = ism_cmd(ism, &cmd);
+ if (ret) {
+ ism_free_dmb(ism, dmb);
+ goto out;
+ }
+ dmb->dmb_tok = cmd.response.dmb_tok;
+out:
+ return ret;
+}
+
+static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_unreg_dmb cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_UNREG_DMB;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.dmb_tok = dmb->dmb_tok;
+
+ ret = ism_cmd(ism, &cmd);
+ if (ret)
+ goto out;
+
+ ism_free_dmb(ism, dmb);
+out:
+ return ret;
+}
+
+static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_set_vlan_id cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.vlan_id = vlan_id;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_set_vlan_id cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.vlan_id = vlan_id;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static int ism_set_vlan_required(struct smcd_dev *smcd)
+{
+ return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
+}
+
+static int ism_reset_vlan_required(struct smcd_dev *smcd)
+{
+ return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
+}
+
+static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info)
+{
+ struct ism_dev *ism = smcd->priv;
+ union ism_sig_ieq cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.rgid = rgid;
+ cmd.request.trigger_irq = trigger_irq;
+ cmd.request.event_code = event_code;
+ cmd.request.info = info;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static unsigned int max_bytes(unsigned int start, unsigned int len,
+ unsigned int boundary)
+{
+ return min(boundary - (start & (boundary - 1)), len);
+}
+
+static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data, unsigned int size)
+{
+ struct ism_dev *ism = smcd->priv;
+ unsigned int bytes;
+ u64 dmb_req;
+ int ret;
+
+ while (size) {
+ bytes = max_bytes(offset, size, PAGE_SIZE);
+ dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
+ offset);
+
+ ret = __ism_move(ism, dmb_req, data, bytes);
+ if (ret)
+ return ret;
+
+ size -= bytes;
+ data += bytes;
+ offset += bytes;
+ }
+
+ return 0;
+}
+
+static void ism_handle_event(struct ism_dev *ism)
+{
+ struct smcd_event *entry;
+
+ while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
+ if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
+ ism->ieq_idx = 0;
+
+ entry = &ism->ieq->entry[ism->ieq_idx];
+ debug_event(ism_debug_info, 2, entry, sizeof(*entry));
+ smcd_handle_event(ism->smcd, entry);
+ }
+}
+
+static irqreturn_t ism_handle_irq(int irq, void *data)
+{
+ struct ism_dev *ism = data;
+ unsigned long bit, end;
+ unsigned long *bv;
+
+ bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
+ end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
+
+ spin_lock(&ism->lock);
+ ism->sba->s = 0;
+ barrier();
+ for (bit = 0;;) {
+ bit = find_next_bit_inv(bv, end, bit);
+ if (bit >= end)
+ break;
+
+ clear_bit_inv(bit, bv);
+ barrier();
+ smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
+ ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
+ }
+
+ if (ism->sba->e) {
+ ism->sba->e = 0;
+ barrier();
+ ism_handle_event(ism);
+ }
+ spin_unlock(&ism->lock);
+ return IRQ_HANDLED;
+}
+
+static const struct smcd_ops ism_ops = {
+ .query_remote_gid = ism_query_rgid,
+ .register_dmb = ism_register_dmb,
+ .unregister_dmb = ism_unregister_dmb,
+ .add_vlan_id = ism_add_vlan_id,
+ .del_vlan_id = ism_del_vlan_id,
+ .set_vlan_required = ism_set_vlan_required,
+ .reset_vlan_required = ism_reset_vlan_required,
+ .signal_event = ism_signal_ieq,
+ .move_data = ism_move,
+};
+
+static int ism_dev_init(struct ism_dev *ism)
+{
+ struct pci_dev *pdev = ism->pdev;
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret <= 0)
+ goto out;
+
+ ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
+ pci_name(pdev), ism);
+ if (ret)
+ goto free_vectors;
+
+ ret = register_sba(ism);
+ if (ret)
+ goto free_irq;
+
+ ret = register_ieq(ism);
+ if (ret)
+ goto unreg_sba;
+
+ ret = ism_read_local_gid(ism);
+ if (ret)
+ goto unreg_ieq;
+
+ ret = smcd_register_dev(ism->smcd);
+ if (ret)
+ goto unreg_ieq;
+
+ query_info(ism);
+ return 0;
+
+unreg_ieq:
+ unregister_ieq(ism);
+unreg_sba:
+ unregister_sba(ism);
+free_irq:
+ free_irq(pci_irq_vector(pdev, 0), ism);
+free_vectors:
+ pci_free_irq_vectors(pdev);
+out:
+ return ret;
+}
+
+static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ism_dev *ism;
+ int ret;
+
+ ism = kzalloc(sizeof(*ism), GFP_KERNEL);
+ if (!ism)
+ return -ENOMEM;
+
+ spin_lock_init(&ism->lock);
+ dev_set_drvdata(&pdev->dev, ism);
+ ism->pdev = pdev;
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret)
+ goto err;
+
+ ret = pci_request_mem_regions(pdev, DRV_NAME);
+ if (ret)
+ goto err_disable;
+
+ ism->ctl = pci_iomap(pdev, 2, 0);
+ if (!ism->ctl)
+ goto err_resource;
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ goto err_unmap;
+
+ pci_set_dma_seg_boundary(pdev, SZ_1M - 1);
+ pci_set_dma_max_seg_size(pdev, SZ_1M);
+ pci_set_master(pdev);
+
+ ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
+ ISM_NR_DMBS);
+ if (!ism->smcd)
+ goto err_unmap;
+
+ ism->smcd->priv = ism;
+ ret = ism_dev_init(ism);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ smcd_free_dev(ism->smcd);
+err_unmap:
+ pci_iounmap(pdev, ism->ctl);
+err_resource:
+ pci_release_mem_regions(pdev);
+err_disable:
+ pci_disable_device(pdev);
+err:
+ kfree(ism);
+ dev_set_drvdata(&pdev->dev, NULL);
+ return ret;
+}
+
+static void ism_dev_exit(struct ism_dev *ism)
+{
+ struct pci_dev *pdev = ism->pdev;
+
+ smcd_unregister_dev(ism->smcd);
+ unregister_ieq(ism);
+ unregister_sba(ism);
+ free_irq(pci_irq_vector(pdev, 0), ism);
+ pci_free_irq_vectors(pdev);
+}
+
+static void ism_remove(struct pci_dev *pdev)
+{
+ struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
+
+ ism_dev_exit(ism);
+
+ smcd_free_dev(ism->smcd);
+ pci_iounmap(pdev, ism->ctl);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+ dev_set_drvdata(&pdev->dev, NULL);
+ kfree(ism);
+}
+
+static int ism_suspend(struct device *dev)
+{
+ struct ism_dev *ism = dev_get_drvdata(dev);
+
+ ism_dev_exit(ism);
+ return 0;
+}
+
+static int ism_resume(struct device *dev)
+{
+ struct ism_dev *ism = dev_get_drvdata(dev);
+
+ return ism_dev_init(ism);
+}
+
+static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
+
+static struct pci_driver ism_driver = {
+ .name = DRV_NAME,
+ .id_table = ism_device_table,
+ .probe = ism_probe,
+ .remove = ism_remove,
+ .driver = {
+ .pm = &ism_pm_ops,
+ },
+};
+
+static int __init ism_init(void)
+{
+ int ret;
+
+ ism_debug_info = debug_register("ism", 2, 1, 16);
+ if (!ism_debug_info)
+ return -ENODEV;
+
+ debug_register_view(ism_debug_info, &debug_hex_ascii_view);
+ ret = pci_register_driver(&ism_driver);
+ if (ret)
+ debug_unregister(ism_debug_info);
+
+ return ret;
+}
+
+static void __exit ism_exit(void)
+{
+ pci_unregister_driver(&ism_driver);
+ debug_unregister(ism_debug_info);
+}
+
+module_init(ism_init);
+module_exit(ism_exit);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index a246a618f9a4..34e0d476c5c6 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -104,6 +104,7 @@ struct qeth_dbf_info {
struct qeth_perf_stats {
unsigned int bufs_rec;
unsigned int bufs_sent;
+ unsigned int buf_elements_sent;
unsigned int skbs_sent_pack;
unsigned int bufs_sent_pack;
@@ -137,7 +138,6 @@ struct qeth_perf_stats {
unsigned int large_send_bytes;
unsigned int large_send_cnt;
unsigned int sg_skbs_sent;
- unsigned int sg_frags_sent;
/* initial values when measuring starts */
unsigned long initial_rx_packets;
unsigned long initial_tx_packets;
@@ -235,6 +235,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_IDX_FUNC_LEVEL_IQD 0x4108
#define QETH_BUFSIZE 4096
+#define CCW_CMD_WRITE 0x01
+#define CCW_CMD_READ 0x02
/**
* some more defs
@@ -465,7 +467,6 @@ struct qeth_qdio_out_buffer {
struct sk_buff_head skb_list;
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
- struct qaob *aob;
struct qeth_qdio_out_q *q;
struct qeth_qdio_out_buffer *next_pending;
};
@@ -593,7 +594,7 @@ static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
*/
struct qeth_channel {
enum qeth_channel_states state;
- struct ccw1 ccw;
+ struct ccw1 *ccw;
spinlock_t iob_lock;
wait_queue_head_t wait_q;
struct ccw_device *ccwdev;
@@ -659,12 +660,8 @@ struct qeth_card_info {
char mcl_level[QETH_MCL_LENGTH + 1];
int guestlan;
int mac_bits;
- int portno;
enum qeth_card_types type;
enum qeth_link_types link_type;
- int is_multicast_different;
- int initial_mtu;
- int max_mtu;
int broadcast_capable;
int unique_id;
bool layer_enforced;
@@ -935,6 +932,19 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
data, QETH_PROT_IPV6);
}
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
+ int ipv);
+static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
+ struct sk_buff *skb,
+ int ipv, int cast_type)
+{
+ if (IS_IQD(card) && cast_type != RTN_UNICAST)
+ return card->qdio.out_qs[card->qdio.no_out_queues - 1];
+ if (!card->qdio.do_prio_queueing)
+ return card->qdio.out_qs[card->qdio.default_out_queue];
+ return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
+}
+
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
@@ -955,6 +965,7 @@ extern struct qeth_card_list_struct qeth_core_card_list;
extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
+struct net_device *qeth_clone_netdev(struct net_device *orig);
void qeth_set_recovery_task(struct qeth_card *);
void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
@@ -972,7 +983,6 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
void *);
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
-int qeth_query_setadapterparms(struct qeth_card *);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
@@ -985,24 +995,18 @@ void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *);
void qeth_setadp_promisc_mode(struct qeth_card *);
struct net_device_stats *qeth_get_stats(struct net_device *);
-int qeth_change_mtu(struct net_device *, int);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
void qeth_prepare_control_data(struct qeth_card *, int,
struct qeth_cmd_buffer *);
void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
-void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob);
struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
-int qeth_bridgeport_query_ports(struct qeth_card *card,
- enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
-int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
-int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
-int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
int extra_elems, int data_offset);
int qeth_get_elements_for_frags(struct sk_buff *);
@@ -1026,7 +1030,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
-int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
void qeth_trace_features(struct qeth_card *);
void qeth_close_dev(struct qeth_card *);
int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
@@ -1046,7 +1049,9 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
int qeth_vm_request_mac(struct qeth_card *card);
-int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
+int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr **hdr, unsigned int hdr_len,
+ unsigned int proto_len, unsigned int *elements);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d01ac29fd986..49f64eb3eab0 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -65,7 +65,6 @@ static struct mutex qeth_mod_mutex;
static void qeth_send_control_data_cb(struct qeth_channel *,
struct qeth_cmd_buffer *);
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
-static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_buffers(struct qeth_card *);
@@ -473,7 +472,6 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
QETH_QDIO_BUF_HANDLED_DELAYED)) {
/* for recovery situations */
- q->bufs[bidx]->aob = q->bufstates[bidx].aob;
qeth_init_qdio_out_buf(q, bidx);
QETH_CARD_TEXT(q->card, 2, "clprecov");
}
@@ -510,7 +508,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
}
qeth_notify_skbs(buffer->q, buffer, notification);
- buffer->aob = NULL;
/* Free dangling allocations. The attached skbs are handled by
* qeth_cleanup_handled_pending().
*/
@@ -534,15 +531,24 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
queue == card->qdio.no_in_queues - 1;
}
+static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
+{
+ ccw->cmd_code = cmd_code;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = len;
+ ccw->cda = (__u32) __pa(data);
+}
+
static int __qeth_issue_next_read(struct qeth_card *card)
{
- int rc;
+ struct qeth_channel *channel = &card->read;
struct qeth_cmd_buffer *iob;
+ int rc;
QETH_CARD_TEXT(card, 5, "issnxrd");
- if (card->read.state != CH_STATE_UP)
+ if (channel->state != CH_STATE_UP)
return -EIO;
- iob = qeth_get_buffer(&card->read);
+ iob = qeth_get_buffer(channel);
if (!iob) {
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
@@ -550,14 +556,14 @@ static int __qeth_issue_next_read(struct qeth_card *card)
"available\n", dev_name(&card->gdev->dev));
return -ENOMEM;
}
- qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
+ qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
QETH_CARD_TEXT(card, 6, "noirqpnd");
- rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
+ rc = ccw_device_start(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0);
if (rc) {
QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
"rc=%i\n", dev_name(&card->gdev->dev), rc);
- atomic_set(&card->read.irq_pending, 0);
+ atomic_set(&channel->irq_pending, 0);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
@@ -655,8 +661,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
cmd->hdr.return_code, card);
}
card->lan_online = 0;
- if (card->dev && netif_carrier_ok(card->dev))
- netif_carrier_off(card->dev);
+ netif_carrier_off(card->dev);
return NULL;
case IPA_CMD_STARTLAN:
dev_info(&card->gdev->dev,
@@ -747,21 +752,6 @@ static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
return card;
}
-static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
- __u32 len)
-{
- struct qeth_card *card;
-
- card = CARD_FROM_CDEV(channel->ccwdev);
- QETH_CARD_TEXT(card, 4, "setupccw");
- if (channel == &card->read)
- memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
- else
- memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
- channel->ccw.count = len;
- channel->ccw.cda = (__u32) __pa(iob);
-}
-
static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
{
__u8 index;
@@ -909,11 +899,22 @@ out:
qeth_release_buffer(channel, iob);
}
-static int qeth_setup_channel(struct qeth_channel *channel)
+static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
{
int cnt;
QETH_DBF_TEXT(SETUP, 2, "setupch");
+
+ channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ if (!channel->ccw)
+ return -ENOMEM;
+ channel->state = CH_STATE_DOWN;
+ atomic_set(&channel->irq_pending, 0);
+ init_waitqueue_head(&channel->wait_q);
+
+ if (!alloc_buffers)
+ return 0;
+
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
channel->iob[cnt].data =
kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
@@ -925,15 +926,14 @@ static int qeth_setup_channel(struct qeth_channel *channel)
channel->iob[cnt].rc = 0;
}
if (cnt < QETH_CMD_BUFFER_NO) {
+ kfree(channel->ccw);
while (cnt-- > 0)
kfree(channel->iob[cnt].data);
return -ENOMEM;
}
channel->io_buf_no = 0;
- atomic_set(&channel->irq_pending, 0);
spin_lock_init(&channel->iob_lock);
- init_waitqueue_head(&channel->wait_q);
return 0;
}
@@ -1267,8 +1267,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf,
- enum qeth_qdio_buffer_states newbufstate)
+ struct qeth_qdio_out_buffer *buf)
{
int i;
@@ -1276,23 +1275,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
atomic_dec(&queue->set_pci_flags_count);
- if (newbufstate == QETH_QDIO_BUF_EMPTY) {
- qeth_release_skbs(buf);
- }
+ qeth_release_skbs(buf);
+
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
kmem_cache_free(qeth_core_header_cache,
buf->buffer->element[i].addr);
buf->is_header[i] = 0;
- buf->buffer->element[i].length = 0;
- buf->buffer->element[i].addr = NULL;
- buf->buffer->element[i].eflags = 0;
- buf->buffer->element[i].sflags = 0;
}
- buf->buffer->element[15].eflags = 0;
- buf->buffer->element[15].sflags = 0;
+
+ qeth_scrub_qdio_buffer(buf->buffer,
+ QETH_MAX_BUFFER_ELEMENTS(queue->card));
buf->next_element_to_fill = 0;
- atomic_set(&buf->state, newbufstate);
+ atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
@@ -1303,7 +1298,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
if (!q->bufs[j])
continue;
qeth_cleanup_handled_pending(q, j, 1);
- qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
+ qeth_clear_output_buffer(q, q->bufs[j]);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
q->bufs[j] = NULL;
@@ -1345,6 +1340,7 @@ static void qeth_clean_channel(struct qeth_channel *channel)
QETH_DBF_TEXT(SETUP, 2, "freech");
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
kfree(channel->iob[cnt].data);
+ kfree(channel->ccw);
}
static void qeth_set_single_write_queues(struct qeth_card *card)
@@ -1401,6 +1397,10 @@ static void qeth_init_qdio_info(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 4, "intqdinf");
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+ card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+ card->qdio.no_out_queues = QETH_MAX_QUEUES;
+
/* inbound */
card->qdio.no_in_queues = 1;
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
@@ -1413,12 +1413,10 @@ static void qeth_init_qdio_info(struct qeth_card *card)
INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}
-static void qeth_set_intial_options(struct qeth_card *card)
+static void qeth_set_initial_options(struct qeth_card *card)
{
card->options.route4.type = NO_ROUTER;
card->options.route6.type = NO_ROUTER;
- card->options.fake_broadcast = 0;
- card->options.performance_stats = 0;
card->options.rx_sg_cb = QETH_RX_SG_CB;
card->options.isolation = ISOLATION_MODE_NONE;
card->options.cq = QETH_CQ_DISABLED;
@@ -1461,19 +1459,13 @@ static void qeth_start_kernel_thread(struct work_struct *work)
}
static void qeth_buffer_reclaim_work(struct work_struct *);
-static int qeth_setup_card(struct qeth_card *card)
+static void qeth_setup_card(struct qeth_card *card)
{
-
QETH_DBF_TEXT(SETUP, 2, "setupcrd");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- card->read.state = CH_STATE_DOWN;
- card->write.state = CH_STATE_DOWN;
- card->data.state = CH_STATE_DOWN;
+ card->info.type = CARD_RDEV(card)->id.driver_info;
card->state = CARD_STATE_DOWN;
- card->lan_online = 0;
- card->read_or_write_problem = 0;
- card->dev = NULL;
spin_lock_init(&card->mclock);
spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
@@ -1481,24 +1473,15 @@ static int qeth_setup_card(struct qeth_card *card)
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
mutex_init(&card->vid_list_mutex);
- card->thread_start_mask = 0;
- card->thread_allowed_mask = 0;
- card->thread_running_mask = 0;
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
- /* initial options */
- qeth_set_intial_options(card);
+ qeth_set_initial_options(card);
/* IP address takeover */
INIT_LIST_HEAD(&card->ipato.entries);
- card->ipato.enabled = false;
- card->ipato.invert4 = false;
- card->ipato.invert6 = false;
- /* init QDIO stuff */
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
- return 0;
}
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
@@ -1515,19 +1498,23 @@ static struct qeth_card *qeth_alloc_card(void)
struct qeth_card *card;
QETH_DBF_TEXT(SETUP, 2, "alloccrd");
- card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- if (qeth_setup_channel(&card->read))
+ if (qeth_setup_channel(&card->read, true))
goto out_ip;
- if (qeth_setup_channel(&card->write))
+ if (qeth_setup_channel(&card->write, true))
goto out_channel;
+ if (qeth_setup_channel(&card->data, false))
+ goto out_data;
card->options.layer2 = -1;
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
+out_data:
+ qeth_clean_channel(&card->write);
out_channel:
qeth_clean_channel(&card->read);
out_ip:
@@ -1536,19 +1523,6 @@ out:
return NULL;
}
-static void qeth_determine_card_type(struct qeth_card *card)
-{
- QETH_DBF_TEXT(SETUP, 2, "detcdtyp");
-
- card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
- card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- card->info.type = CARD_RDEV(card)->id.driver_info;
- card->qdio.no_out_queues = QETH_MAX_QUEUES;
- if (card->info.type == QETH_CARD_TYPE_IQD)
- card->info.is_multicast_different = 0x0103;
- qeth_update_from_chp_desc(card);
-}
-
static int qeth_clear_channel(struct qeth_channel *channel)
{
unsigned long flags;
@@ -1689,13 +1663,10 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
if (!rcd_buf)
return -ENOMEM;
- channel->ccw.cmd_code = ciw->cmd;
- channel->ccw.cda = (__u32) __pa(rcd_buf);
- channel->ccw.count = ciw->count;
- channel->ccw.flags = CCW_FLAG_SLI;
+ qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
channel->state = CH_STATE_RCD;
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
QETH_RCD_PARM, LPM_ANYPATH, 0,
QETH_RCD_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1866,15 +1837,13 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
if (!iob)
return -ENOMEM;
iob->callback = idx_reply_cb;
- memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
- channel->ccw.count = QETH_BUFSIZE;
- channel->ccw.cda = (__u32) __pa(iob->data);
+ qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
wait_event(card->wait_q,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1917,9 +1886,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
if (!iob)
return -ENOMEM;
iob->callback = idx_reply_cb;
- memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
- channel->ccw.count = IDX_ACTIVATE_SIZE;
- channel->ccw.cda = (__u32) __pa(iob->data);
+ qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
+ iob->data);
if (channel == &card->write) {
memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
@@ -1930,7 +1898,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
&card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
}
- tmp = ((__u8)card->info.portno) | 0x80;
+ tmp = ((u8)card->dev->dev_port) | 0x80;
memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
@@ -1945,7 +1913,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1997,20 +1965,20 @@ static void qeth_idx_write_cb(struct qeth_channel *channel,
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
- dev_err(&card->write.ccwdev->dev,
+ dev_err(&channel->ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
else
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
" negative reply\n",
- dev_name(&card->write.ccwdev->dev));
+ dev_name(&channel->ccwdev->dev));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
"function level mismatch (sent: 0x%x, received: "
- "0x%x)\n", dev_name(&card->write.ccwdev->dev),
+ "0x%x)\n", dev_name(&channel->ccwdev->dev),
card->info.func_level, temp);
goto out;
}
@@ -2038,20 +2006,20 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
case QETH_IDX_ACT_ERR_EXCL:
- dev_err(&card->write.ccwdev->dev,
+ dev_err(&channel->ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
break;
case QETH_IDX_ACT_ERR_AUTH:
case QETH_IDX_ACT_ERR_AUTH_USER:
- dev_err(&card->read.ccwdev->dev,
+ dev_err(&channel->ccwdev->dev,
"Setting the device online failed because of "
"insufficient authorization\n");
break;
default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
- dev_name(&card->read.ccwdev->dev));
+ dev_name(&channel->ccwdev->dev));
}
QETH_CARD_TEXT_(card, 2, "idxread%c",
QETH_IDX_ACT_CAUSE_CODE(iob->data));
@@ -2062,7 +2030,7 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
if (temp != qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
"level mismatch (sent: 0x%x, received: 0x%x)\n",
- dev_name(&card->read.ccwdev->dev),
+ dev_name(&channel->ccwdev->dev),
card->info.func_level, temp);
goto out;
}
@@ -2079,7 +2047,7 @@ out:
void qeth_prepare_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob)
{
- qeth_setup_ccw(&card->write, iob->data, len);
+ qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
iob->callback = qeth_release_buffer;
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
@@ -2126,6 +2094,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long cb_cmd),
void *reply_param)
{
+ struct qeth_channel *channel = iob->channel;
int rc;
unsigned long flags;
struct qeth_reply *reply = NULL;
@@ -2135,7 +2104,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_CARD_TEXT(card, 2, "sendctl");
if (card->read_or_write_problem) {
- qeth_release_buffer(iob->channel, iob);
+ qeth_release_buffer(channel, iob);
return -EIO;
}
reply = qeth_alloc_reply(card);
@@ -2147,7 +2116,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
init_waitqueue_head(&reply->wait_q);
- while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
+ while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
if (IS_IPA(iob->data)) {
cmd = __ipa_cmd(iob);
@@ -2167,21 +2136,21 @@ int qeth_send_control_data(struct qeth_card *card, int len,
timeout = jiffies + event_timeout;
QETH_CARD_TEXT(card, 6, "noirqpnd");
- spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, event_timeout);
- spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
"ccw_device_start rc = %i\n",
- dev_name(&card->write.ccwdev->dev), rc);
+ dev_name(&channel->ccwdev->dev), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irqsave(&card->lock, flags);
list_del_init(&reply->list);
qeth_put_reply(reply);
spin_unlock_irqrestore(&card->lock, flags);
- qeth_release_buffer(iob->channel, iob);
- atomic_set(&card->write.irq_pending, 0);
+ qeth_release_buffer(channel, iob);
+ atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
return rc;
}
@@ -2288,19 +2257,42 @@ static int qeth_cm_setup(struct qeth_card *card)
}
-static int qeth_get_initial_mtu_for_card(struct qeth_card *card)
+static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
{
- switch (card->info.type) {
- case QETH_CARD_TYPE_IQD:
- return card->info.max_mtu;
- case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSX:
- if (!card->options.layer2)
- return ETH_DATA_LEN - 8; /* L3: allow for LLC + SNAP */
- /* fall through */
- default:
- return ETH_DATA_LEN;
+ struct net_device *dev = card->dev;
+ unsigned int new_mtu;
+
+ if (!max_mtu) {
+ /* IQD needs accurate max MTU to set up its RX buffers: */
+ if (IS_IQD(card))
+ return -EINVAL;
+ /* tolerate quirky HW: */
+ max_mtu = ETH_MAX_MTU;
+ }
+
+ rtnl_lock();
+ if (IS_IQD(card)) {
+ /* move any device with default MTU to new max MTU: */
+ new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
+
+ /* adjust RX buffer size to new max MTU: */
+ card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
+ if (dev->max_mtu && dev->max_mtu != max_mtu)
+ qeth_free_qdio_buffers(card);
+ } else {
+ if (dev->mtu)
+ new_mtu = dev->mtu;
+ /* default MTUs for first setup: */
+ else if (card->options.layer2)
+ new_mtu = ETH_DATA_LEN;
+ else
+ new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
}
+
+ dev->max_mtu = max_mtu;
+ dev->mtu = min(new_mtu, max_mtu);
+ rtnl_unlock();
+ return 0;
}
static int qeth_get_mtu_outof_framesize(int framesize)
@@ -2319,21 +2311,6 @@ static int qeth_get_mtu_outof_framesize(int framesize)
}
}
-static int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
-{
- switch (card->info.type) {
- case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSM:
- case QETH_CARD_TYPE_OSX:
- case QETH_CARD_TYPE_IQD:
- return ((mtu >= 576) &&
- (mtu <= card->info.max_mtu));
- case QETH_CARD_TYPE_OSN:
- default:
- return 1;
- }
-}
-
static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
@@ -2352,29 +2329,10 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
if (card->info.type == QETH_CARD_TYPE_IQD) {
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
mtu = qeth_get_mtu_outof_framesize(framesize);
- if (!mtu) {
- iob->rc = -EINVAL;
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
- return 0;
- }
- if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
- /* frame size has changed */
- if (card->dev &&
- ((card->dev->mtu == card->info.initial_mtu) ||
- (card->dev->mtu > mtu)))
- card->dev->mtu = mtu;
- qeth_free_qdio_buffers(card);
- }
- card->info.initial_mtu = mtu;
- card->info.max_mtu = mtu;
- card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
} else {
- card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
- iob->data);
- card->info.initial_mtu = min(card->info.max_mtu,
- qeth_get_initial_mtu_for_card(card));
- card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
+ mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
}
+ *(u16 *)reply->param = mtu;
memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
@@ -2388,11 +2346,19 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
return 0;
}
+static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
+{
+ if (IS_OSN(card))
+ return QETH_PROT_OSN2;
+ return (card->options.layer2 == 1) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
+}
+
static int qeth_ulp_enable(struct qeth_card *card)
{
- int rc;
- char prot_type;
+ u8 prot_type = qeth_mpc_select_prot_type(card);
struct qeth_cmd_buffer *iob;
+ u16 max_mtu;
+ int rc;
/*FIXME: trace view callbacks*/
QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
@@ -2400,25 +2366,17 @@ static int qeth_ulp_enable(struct qeth_card *card)
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
- *(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
- (__u8) card->info.portno;
- if (card->options.layer2)
- if (card->info.type == QETH_CARD_TYPE_OSN)
- prot_type = QETH_PROT_OSN2;
- else
- prot_type = QETH_PROT_LAYER2;
- else
- prot_type = QETH_PROT_TCPIP;
-
+ *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
- qeth_ulp_enable_cb, NULL);
- return rc;
-
+ qeth_ulp_enable_cb, &max_mtu);
+ if (rc)
+ return rc;
+ return qeth_update_max_mtu(card, max_mtu);
}
static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -2473,32 +2431,20 @@ static int qeth_ulp_setup(struct qeth_card *card)
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
{
- int rc;
struct qeth_qdio_out_buffer *newbuf;
- rc = 0;
newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
- if (!newbuf) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!newbuf)
+ return -ENOMEM;
+
newbuf->buffer = q->qdio_bufs[bidx];
skb_queue_head_init(&newbuf->skb_list);
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
newbuf->q = q;
- newbuf->aob = NULL;
newbuf->next_pending = q->bufs[bidx];
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
q->bufs[bidx] = newbuf;
- if (q->bufstates) {
- q->bufstates[bidx].user = newbuf;
- QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx);
- QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf);
- QETH_CARD_TEXT_(q->card, 2, "%lx",
- (long) newbuf->next_pending);
- }
-out:
- return rc;
+ return 0;
}
static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
@@ -2908,8 +2854,7 @@ int qeth_init_qdio_queues(struct qeth_card *card)
QDIO_MAX_BUFFERS_PER_Q);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
qeth_clear_output_buffer(card->qdio.out_qs[i],
- card->qdio.out_qs[i]->bufs[j],
- QETH_QDIO_BUF_EMPTY);
+ card->qdio.out_qs[i]->bufs[j]);
}
card->qdio.out_qs[i]->card = card;
card->qdio.out_qs[i]->next_buf_to_fill = 0;
@@ -2942,7 +2887,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
/* cmd->hdr.seqno is set by qeth_send_control_data() */
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
- cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
+ cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
if (card->options.layer2)
cmd->hdr.prim_version_no = 2;
else
@@ -2972,9 +2917,10 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- char prot_type)
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
{
+ u8 prot_type = qeth_mpc_select_prot_type(card);
+
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -2994,18 +2940,9 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
void *reply_param)
{
int rc;
- char prot_type;
QETH_CARD_TEXT(card, 4, "sendipa");
-
- if (card->options.layer2)
- if (card->info.type == QETH_CARD_TYPE_OSN)
- prot_type = QETH_PROT_OSN2;
- else
- prot_type = QETH_PROT_LAYER2;
- else
- prot_type = QETH_PROT_TCPIP;
- qeth_prepare_ipa_cmd(card, iob, prot_type);
+ qeth_prepare_ipa_cmd(card, iob);
rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
iob, reply_cb, reply_param);
if (rc == -ETIME) {
@@ -3076,7 +3013,7 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
return iob;
}
-int qeth_query_setadapterparms(struct qeth_card *card)
+static int qeth_query_setadapterparms(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -3089,7 +3026,6 @@ int qeth_query_setadapterparms(struct qeth_card *card)
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
static int qeth_query_ipassists_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -3129,7 +3065,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
return 0;
}
-int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
+static int qeth_query_ipassists(struct qeth_card *card,
+ enum qeth_prot_versions prot)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -3141,7 +3078,6 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_query_ipassists);
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -3180,7 +3116,6 @@ int qeth_query_switch_attributes(struct qeth_card *card,
return qeth_send_ipa_cmd(card, iob,
qeth_query_switch_attributes_cb, sw_info);
}
-EXPORT_SYMBOL_GPL(qeth_query_switch_attributes);
static int qeth_query_setdiagass_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -3530,13 +3465,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
+ atomic_add(count, &queue->used_buffers);
+
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
if (queue->card->options.performance_stats)
queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() -
queue->card->perf_stats.outbound_do_qdio_start_time;
- atomic_add(count, &queue->used_buffers);
if (rc) {
queue->card->stats.tx_errors += count;
/* ignore temporary SIGA errors without busy condition */
@@ -3601,7 +3537,7 @@ static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
- if (card->dev && (card->dev->flags & IFF_UP))
+ if (card->dev->flags & IFF_UP)
napi_schedule(&card->napi);
}
@@ -3634,10 +3570,10 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
-
-static void qeth_qdio_cq_handler(struct qeth_card *card,
- unsigned int qdio_err,
- unsigned int queue, int first_element, int count) {
+static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
+ unsigned int queue, int first_element,
+ int count)
+{
struct qeth_qdio_q *cq = card->qdio.c_q;
int i;
int rc;
@@ -3663,25 +3599,17 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
for (i = first_element; i < first_element + count; ++i) {
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
- int e;
+ int e = 0;
- e = 0;
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
buffer->element[e].addr) {
unsigned long phys_aob_addr;
phys_aob_addr = (unsigned long) buffer->element[e].addr;
qeth_qdio_handle_aob(card, phys_aob_addr);
- buffer->element[e].addr = NULL;
- buffer->element[e].eflags = 0;
- buffer->element[e].sflags = 0;
- buffer->element[e].length = 0;
-
++e;
}
-
- buffer->element[15].eflags = 0;
- buffer->element[15].sflags = 0;
+ qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
}
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
card->qdio.c_q->next_buf_to_init,
@@ -3760,11 +3688,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_notify_skbs(queue, buffer,
TX_NOTIFY_PENDING);
}
- buffer->aob = queue->bufstates[bidx].aob;
QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
- QETH_CARD_TEXT(queue->card, 5, "aob");
- QETH_CARD_TEXT_(queue->card, 5, "%lx",
- virt_to_phys(buffer->aob));
/* prepare the queue slot for re-use: */
qeth_scrub_qdio_buffer(buffer->buffer,
@@ -3782,8 +3706,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_notify_skbs(queue, buffer, n);
}
- qeth_clear_output_buffer(queue, buffer,
- QETH_QDIO_BUF_EMPTY);
+ qeth_clear_output_buffer(queue, buffer);
}
qeth_cleanup_handled_pending(queue, bidx, 0);
}
@@ -3810,15 +3733,11 @@ static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
* Note: Function assumes that we have 4 outbound queues.
*/
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
- int ipv, int cast_type)
+ int ipv)
{
__be16 *tci;
u8 tos;
- if (cast_type && card->info.is_multicast_different)
- return card->info.is_multicast_different &
- (card->qdio.no_out_queues - 1);
-
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC:
@@ -3882,6 +3801,17 @@ int qeth_get_elements_for_frags(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
+static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
+{
+ unsigned int elements = qeth_get_elements_for_frags(skb);
+ addr_t end = (addr_t)skb->data + skb_headlen(skb);
+ addr_t start = (addr_t)skb->data + data_offset;
+
+ if (start != end)
+ elements += qeth_get_elements_for_range(start, end);
+ return elements;
+}
+
/**
* qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags.
* @card: qeth card structure, to check max. elems.
@@ -3897,12 +3827,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
int qeth_get_elements_no(struct qeth_card *card,
struct sk_buff *skb, int extra_elems, int data_offset)
{
- addr_t end = (addr_t)skb->data + skb_headlen(skb);
- int elements = qeth_get_elements_for_frags(skb);
- addr_t start = (addr_t)skb->data + data_offset;
-
- if (start != end)
- elements += qeth_get_elements_for_range(start, end);
+ int elements = qeth_count_elements(skb, data_offset);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
@@ -3936,32 +3861,87 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
/**
- * qeth_push_hdr() - push a qeth_hdr onto an skb.
- * @skb: skb that the qeth_hdr should be pushed onto.
+ * qeth_add_hw_header() - add a HW header to an skb.
+ * @skb: skb that the HW header should be added to.
* @hdr: double pointer to a qeth_hdr. When returning with >= 0,
* it contains a valid pointer to a qeth_hdr.
- * @len: length of the hdr that needs to be pushed on.
+ * @hdr_len: length of the HW header.
+ * @proto_len: length of protocol headers that need to be in same page as the
+ * HW header.
*
* Returns the pushed length. If the header can't be pushed on
* (eg. because it would cross a page boundary), it is allocated from
* the cache instead and 0 is returned.
+ * The number of needed buffer elements is returned in @elements.
* Error to create the hdr is indicated by returning with < 0.
*/
-int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len)
-{
- if (skb_headroom(skb) >= len &&
- qeth_get_elements_for_range((addr_t)skb->data - len,
- (addr_t)skb->data) == 1) {
- *hdr = skb_push(skb, len);
- return len;
+int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr **hdr, unsigned int hdr_len,
+ unsigned int proto_len, unsigned int *elements)
+{
+ const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ const unsigned int contiguous = proto_len ? proto_len : 1;
+ unsigned int __elements;
+ addr_t start, end;
+ bool push_ok;
+ int rc;
+
+check_layout:
+ start = (addr_t)skb->data - hdr_len;
+ end = (addr_t)skb->data;
+
+ if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
+ /* Push HW header into same page as first protocol header. */
+ push_ok = true;
+ __elements = qeth_count_elements(skb, 0);
+ } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
+ /* Push HW header into a new page. */
+ push_ok = true;
+ __elements = 1 + qeth_count_elements(skb, 0);
+ } else {
+ /* Use header cache, copy protocol headers up. */
+ push_ok = false;
+ __elements = 1 + qeth_count_elements(skb, proto_len);
+ }
+
+ /* Compress skb to fit into one IO buffer: */
+ if (__elements > max_elements) {
+ if (!skb_is_nonlinear(skb)) {
+ /* Drop it, no easy way of shrinking it further. */
+ QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
+ max_elements, __elements, skb->len);
+ return -E2BIG;
+ }
+
+ rc = skb_linearize(skb);
+ if (card->options.performance_stats) {
+ if (rc)
+ card->perf_stats.tx_linfail++;
+ else
+ card->perf_stats.tx_lin++;
+ }
+ if (rc)
+ return rc;
+
+ /* Linearization changed the layout, re-evaluate: */
+ goto check_layout;
+ }
+
+ *elements = __elements;
+ /* Add the header: */
+ if (push_ok) {
+ *hdr = skb_push(skb, hdr_len);
+ return hdr_len;
}
/* fall back */
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!*hdr)
return -ENOMEM;
+ /* Copy protocol headers behind HW header: */
+ skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_push_hdr);
+EXPORT_SYMBOL_GPL(qeth_add_hw_header);
static void __qeth_fill_buffer(struct sk_buff *skb,
struct qeth_qdio_out_buffer *buf,
@@ -4241,24 +4221,6 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
-int qeth_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct qeth_card *card;
- char dbf_text[15];
-
- card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 4, "chgmtu");
- sprintf(dbf_text, "%8x", new_mtu);
- QETH_CARD_TEXT(card, 4, dbf_text);
-
- if (!qeth_mtu_is_valid(card, new_mtu))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-EXPORT_SYMBOL_GPL(qeth_change_mtu);
-
struct net_device_stats *qeth_get_stats(struct net_device *dev)
{
struct qeth_card *card;
@@ -4834,9 +4796,6 @@ int qeth_vm_request_mac(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
- if (!card->dev)
- return -ENODEV;
-
request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
if (!request || !response) {
@@ -5077,11 +5036,11 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
-
QETH_DBF_TEXT(SETUP, 2, "freecrd");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
+ qeth_clean_channel(&card->data);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
kfree(card);
@@ -5716,6 +5675,53 @@ static void qeth_clear_dbf_list(void)
mutex_unlock(&qeth_dbf_list_mutex);
}
+static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
+{
+ struct net_device *dev;
+
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_IQD:
+ dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
+ break;
+ case QETH_CARD_TYPE_OSN:
+ dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
+ break;
+ default:
+ dev = alloc_etherdev(0);
+ }
+
+ if (!dev)
+ return NULL;
+
+ dev->ml_priv = card;
+ dev->watchdog_timeo = QETH_TX_TIMEOUT;
+ dev->min_mtu = IS_OSN(card) ? 64 : 576;
+ /* initialized when device first goes online: */
+ dev->max_mtu = 0;
+ dev->mtu = 0;
+ SET_NETDEV_DEV(dev, &card->gdev->dev);
+ netif_carrier_off(dev);
+
+ if (!IS_OSN(card)) {
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->hw_features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
+ }
+
+ return dev;
+}
+
+struct net_device *qeth_clone_netdev(struct net_device *orig)
+{
+ struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
+
+ if (!clone)
+ return NULL;
+
+ clone->dev_port = orig->dev_port;
+ return clone;
+}
+
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -5758,12 +5764,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
gdev->cdev[1]->handler = qeth_irq;
gdev->cdev[2]->handler = qeth_irq;
- qeth_determine_card_type(card);
- rc = qeth_setup_card(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ qeth_setup_card(card);
+ qeth_update_from_chp_desc(card);
+
+ card->dev = qeth_alloc_netdev(card);
+ if (!card->dev)
goto err_card;
- }
qeth_determine_capabilities(card);
enforced_disc = qeth_enforce_discipline(card);
@@ -5775,7 +5781,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
card->info.layer_enforced = true;
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
- goto err_card;
+ goto err_load;
gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
? card->discipline->devtype
@@ -5793,6 +5799,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
err_disc:
qeth_core_free_discipline(card);
+err_load:
+ free_netdev(card->dev);
err_card:
qeth_core_free_card(card);
err_dev:
@@ -5815,10 +5823,10 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_del(&card->list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+ free_netdev(card->dev);
qeth_core_free_card(card);
dev_set_drvdata(&gdev->dev, NULL);
put_device(&gdev->dev);
- return;
}
static int qeth_core_set_online(struct ccwgroup_device *gdev)
@@ -5887,31 +5895,13 @@ static int qeth_core_restore(struct ccwgroup_device *gdev)
return 0;
}
-static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "qeth",
- },
- .ccw_driver = &qeth_ccw_driver,
- .setup = qeth_core_probe_device,
- .remove = qeth_core_remove_device,
- .set_online = qeth_core_set_online,
- .set_offline = qeth_core_set_offline,
- .shutdown = qeth_core_shutdown,
- .prepare = NULL,
- .complete = NULL,
- .freeze = qeth_core_freeze,
- .thaw = qeth_core_thaw,
- .restore = qeth_core_restore,
-};
-
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
size_t count)
{
int err;
- err = ccwgroup_create_dev(qeth_core_root_dev,
- &qeth_core_ccwgroup_driver, 3, buf);
+ err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
+ buf);
return err ? err : count;
}
@@ -5929,6 +5919,25 @@ static const struct attribute_group *qeth_drv_attr_groups[] = {
NULL,
};
+static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
+ .driver = {
+ .groups = qeth_drv_attr_groups,
+ .owner = THIS_MODULE,
+ .name = "qeth",
+ },
+ .ccw_driver = &qeth_ccw_driver,
+ .setup = qeth_core_probe_device,
+ .remove = qeth_core_remove_device,
+ .set_online = qeth_core_set_online,
+ .set_offline = qeth_core_set_offline,
+ .shutdown = qeth_core_shutdown,
+ .prepare = NULL,
+ .complete = NULL,
+ .freeze = qeth_core_freeze,
+ .thaw = qeth_core_thaw,
+ .restore = qeth_core_restore,
+};
+
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct qeth_card *card = dev->ml_priv;
@@ -5995,7 +6004,7 @@ static struct {
{"tx skbs packing"},
{"tx buffers packing"},
{"tx sg skbs"},
- {"tx sg frags"},
+ {"tx buffer elements"},
/* 10 */{"rx sg skbs"},
{"rx sg frags"},
{"rx sg page allocs"},
@@ -6054,7 +6063,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
data[6] = card->perf_stats.skbs_sent_pack;
data[7] = card->perf_stats.bufs_sent_pack;
data[8] = card->perf_stats.sg_skbs_sent;
- data[9] = card->perf_stats.sg_frags_sent;
+ data[9] = card->perf_stats.buf_elements_sent;
data[10] = card->perf_stats.sg_skbs_rx;
data[11] = card->perf_stats.sg_frags_rx;
data[12] = card->perf_stats.sg_alloc_page_rx;
@@ -6620,7 +6629,6 @@ static int __init qeth_core_init(void)
rc = ccw_driver_register(&qeth_ccw_driver);
if (rc)
goto ccw_err;
- qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups;
rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
if (rc)
goto ccwgroup_err;
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 22428b769f9b..5bcb8dafc3ee 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -146,17 +146,6 @@ unsigned char IPA_PDU_HEADER[] = {
};
EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
-unsigned char WRITE_CCW[] = {
- 0x01, CCW_FLAG_SLI, 0, 0,
- 0, 0, 0, 0
-};
-
-unsigned char READ_CCW[] = {
- 0x02, CCW_FLAG_SLI, 0, 0,
- 0, 0, 0, 0
-};
-
-
struct ipa_rc_msg {
enum qeth_ipa_return_codes rc;
char *msg;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 878e62f35169..aa8b9196b089 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -64,6 +64,9 @@ enum qeth_card_types {
QETH_CARD_TYPE_OSX = 2,
};
+#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD)
+#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
+
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
enum qeth_link_types {
@@ -815,10 +818,6 @@ extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
/* END OF IP Assist related definitions */
/*****************************************************************************/
-
-extern unsigned char WRITE_CCW[];
-extern unsigned char READ_CCW[];
-
extern unsigned char CM_ENABLE[];
#define CM_ENABLE_SIZE 0x63
#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index c3f18afb368b..25d0be25bcb3 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -112,7 +112,7 @@ static ssize_t qeth_dev_portno_show(struct device *dev,
if (!card)
return -EINVAL;
- return sprintf(buf, "%i\n", card->info.portno);
+ return sprintf(buf, "%i\n", card->dev->dev_port);
}
static ssize_t qeth_dev_portno_store(struct device *dev,
@@ -143,9 +143,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
rc = -EINVAL;
goto out;
}
- card->info.portno = portno;
- if (card->dev)
- card->dev->dev_port = portno;
+ card->dev->dev_port = portno;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -388,6 +386,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ struct net_device *ndev;
char *tmp;
int i, rc = 0;
enum qeth_discipline_id newdis;
@@ -424,8 +423,19 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
card->info.mac_bits = 0;
if (card->discipline) {
+ /* start with a new, pristine netdevice: */
+ ndev = qeth_clone_netdev(card->dev);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
card->discipline->remove(card->gdev);
qeth_core_free_discipline(card);
+ card->options.layer2 = -1;
+
+ free_netdev(card->dev);
+ card->dev = ndev;
}
rc = qeth_core_load_discipline(card, newdis);
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index f2130051ca11..ddc615b431a8 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -14,6 +14,11 @@ extern const struct attribute_group *qeth_l2_attr_groups[];
int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
+int qeth_bridgeport_query_ports(struct qeth_card *card,
+ enum qeth_sbp_roles *role,
+ enum qeth_sbp_states *state);
+int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
+int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state);
int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2487f0aeb165..710fa74892ae 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -26,7 +26,6 @@
static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
-static void qeth_l2_set_rx_mode(struct net_device *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -186,12 +185,12 @@ static void qeth_l2_del_all_macs(struct qeth_card *card)
static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
if (card->info.type == QETH_CARD_TYPE_OSN)
- return RTN_UNSPEC;
+ return RTN_UNICAST;
if (is_broadcast_ether_addr(skb->data))
return RTN_BROADCAST;
if (is_multicast_ether_addr(skb->data))
return RTN_MULTICAST;
- return RTN_UNSPEC;
+ return RTN_UNICAST;
}
static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
@@ -344,7 +343,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
kfree(tmpid);
}
- qeth_l2_set_rx_mode(card->dev);
return rc;
}
@@ -643,97 +641,58 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_promisc_to_bridge(card);
}
-static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int cast_type)
+static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int cast_type, int ipv)
{
- unsigned int data_offset = ETH_HLEN;
- struct qeth_hdr *hdr;
- int rc;
-
- hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
- if (!hdr)
- return -ENOMEM;
- qeth_l2_fill_header(hdr, skb, cast_type, skb->len);
- skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr),
- data_offset);
-
- if (!qeth_get_elements_no(card, skb, 1, data_offset)) {
- rc = -E2BIG;
- goto out;
- }
- rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
- sizeof(*hdr) + data_offset);
-out:
- if (rc)
- kmem_cache_free(qeth_core_header_cache, hdr);
- return rc;
-}
-
-static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int cast_type,
- int ipv)
-{
- int push_len = sizeof(struct qeth_hdr);
- unsigned int elements, nr_frags;
- unsigned int hdr_elements = 0;
+ const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
+ const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
+ unsigned int frame_len = skb->len;
+ unsigned int data_offset = 0;
struct qeth_hdr *hdr = NULL;
unsigned int hd_len = 0;
- int rc;
-
- /* fix hardware limitation: as long as we do not have sbal
- * chaining we can not send long frag lists
- */
- if (!qeth_get_elements_no(card, skb, 0, 0)) {
- rc = skb_linearize(skb);
-
- if (card->options.performance_stats) {
- if (rc)
- card->perf_stats.tx_linfail++;
- else
- card->perf_stats.tx_lin++;
- }
- if (rc)
- return rc;
- }
- nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int elements;
+ int push_len, rc;
+ bool is_sg;
- rc = skb_cow_head(skb, push_len);
+ rc = skb_cow_head(skb, hw_hdr_len);
if (rc)
return rc;
- push_len = qeth_push_hdr(skb, &hdr, push_len);
+
+ push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
+ &elements);
if (push_len < 0)
return push_len;
if (!push_len) {
- /* hdr was allocated from cache */
- hd_len = sizeof(*hdr);
- hdr_elements = 1;
+ /* HW header needs its own buffer element. */
+ hd_len = hw_hdr_len + proto_len;
+ data_offset = proto_len;
}
- qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
+ qeth_l2_fill_header(hdr, skb, cast_type, frame_len);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
if (card->options.performance_stats)
card->perf_stats.tx_csum++;
}
- elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
- if (!elements) {
- rc = -E2BIG;
- goto out;
+ is_sg = skb_is_nonlinear(skb);
+ if (IS_IQD(card)) {
+ rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
+ hd_len);
+ } else {
+ /* TODO: drop skb_orphan() once TX completion is fast enough */
+ skb_orphan(skb);
+ rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
+ hd_len, elements);
}
- elements += hdr_elements;
- /* TODO: remove the skb_orphan() once TX completion is fast enough */
- skb_orphan(skb);
- rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements);
-out:
if (!rc) {
- if (card->options.performance_stats && nr_frags) {
- card->perf_stats.sg_skbs_sent++;
- /* nr_frags + skb->data */
- card->perf_stats.sg_frags_sent += nr_frags + 1;
+ if (card->options.performance_stats) {
+ card->perf_stats.buf_elements_sent += elements;
+ if (is_sg)
+ card->perf_stats.sg_skbs_sent++;
}
} else {
- if (hd_len)
+ if (!push_len)
kmem_cache_free(qeth_core_header_cache, hdr);
if (rc == -EBUSY)
/* roll back to ETH header */
@@ -770,34 +729,23 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
- if (card->qdio.do_prio_queueing || (cast_type &&
- card->info.is_multicast_different))
- queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
- ipv, cast_type)];
- else
- queue = card->qdio.out_qs[card->qdio.default_out_queue];
-
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
card->stats.tx_carrier_errors++;
goto tx_drop;
}
+ queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
if (card->options.performance_stats) {
card->perf_stats.outbound_cnt++;
card->perf_stats.outbound_start_time = qeth_get_micros();
}
netif_stop_queue(dev);
- switch (card->info.type) {
- case QETH_CARD_TYPE_OSN:
+ if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
- break;
- case QETH_CARD_TYPE_IQD:
- rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
- break;
- default:
- rc = qeth_l2_xmit_osa(card, skb, queue, cast_type, ipv);
- }
+ else
+ rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv);
if (!rc) {
card->stats.tx_packets++;
@@ -906,13 +854,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
-
- if (card->dev) {
- unregister_netdev(card->dev);
- free_netdev(card->dev);
- card->dev = NULL;
- }
- return;
+ unregister_netdev(card->dev);
}
static const struct ethtool_ops qeth_l2_ethtool_ops = {
@@ -941,7 +883,6 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_set_mac_address = qeth_l2_set_mac_address,
- .ndo_change_mtu = qeth_change_mtu,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -951,35 +892,19 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
- switch (card->info.type) {
- case QETH_CARD_TYPE_IQD:
- card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
- ether_setup);
- break;
- case QETH_CARD_TYPE_OSN:
- card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
- ether_setup);
- break;
- default:
- card->dev = alloc_etherdev(0);
- }
+ int rc;
- if (!card->dev)
- return -ENODEV;
+ if (card->dev->netdev_ops)
+ return 0;
- card->dev->ml_priv = card;
card->dev->priv_flags |= IFF_UNICAST_FLT;
- card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
- card->dev->mtu = card->info.initial_mtu;
- card->dev->min_mtu = 64;
- card->dev->max_mtu = ETH_MAX_MTU;
- card->dev->dev_port = card->info.portno;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
if (card->info.type == QETH_CARD_TYPE_OSN) {
card->dev->ethtool_ops = &qeth_l2_osn_ops;
card->dev->flags |= IFF_NOARP;
} else {
card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
+ card->dev->needed_headroom = sizeof(struct qeth_hdr);
}
if (card->info.type == QETH_CARD_TYPE_OSM)
@@ -987,14 +912,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
else
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- if (card->info.type != QETH_CARD_TYPE_OSN &&
- card->info.type != QETH_CARD_TYPE_IQD) {
- card->dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- card->dev->needed_headroom = sizeof(struct qeth_hdr);
- card->dev->hw_features |= NETIF_F_SG;
- card->dev->vlan_features |= NETIF_F_SG;
- }
-
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->features |= NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */
@@ -1013,12 +930,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
- card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
- SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
- netif_carrier_off(card->dev);
- return register_netdev(card->dev);
+ rc = register_netdev(card->dev);
+ if (rc)
+ card->dev->netdev_ops = NULL;
+ return rc;
}
static int qeth_l2_start_ipassists(struct qeth_card *card)
@@ -1064,10 +981,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
- if (!card->dev && qeth_l2_setup_netdev(card)) {
- rc = -ENODEV;
+ rc = qeth_l2_setup_netdev(card);
+ if (rc)
goto out_remove;
- }
if (card->info.type != QETH_CARD_TYPE_OSN &&
!qeth_l2_send_setmac(card, card->dev->dev_addr))
@@ -1125,13 +1041,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
if (recovery_mode &&
card->info.type != QETH_CARD_TYPE_OSN) {
__qeth_l2_open(card->dev);
+ qeth_l2_set_rx_mode(card->dev);
} else {
rtnl_lock();
dev_open(card->dev);
rtnl_unlock();
}
- /* this also sets saved unicast addresses */
- qeth_l2_set_rx_mode(card->dev);
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -1171,8 +1086,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
- if (card->dev && netif_carrier_ok(card->dev))
- netif_carrier_off(card->dev);
+ netif_carrier_off(card->dev);
recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -1245,8 +1159,7 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->dev)
- netif_device_detach(card->dev);
+ netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
@@ -1279,8 +1192,7 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
rc = __qeth_l2_set_online(card->gdev, 0);
out:
qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (card->dev)
- netif_device_attach(card->dev);
+ netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
@@ -1327,25 +1239,26 @@ EXPORT_SYMBOL_GPL(qeth_l2_discipline);
static int qeth_osn_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob)
{
+ struct qeth_channel *channel = iob->channel;
unsigned long flags;
int rc = 0;
QETH_CARD_TEXT(card, 5, "osndctrd");
wait_event(card->wait_q,
- atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
+ atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob);
QETH_CARD_TEXT(card, 6, "osnoirqp");
- spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
- spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
"ccw_device_start rc = %i\n", rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
- qeth_release_buffer(iob->channel, iob);
- atomic_set(&card->write.irq_pending, 0);
+ qeth_release_buffer(channel, iob);
+ atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
}
return rc;
@@ -1358,7 +1271,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "osndipa");
- qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
+ qeth_prepare_ipa_cmd(card, iob);
s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
s2 = (u16)data_len;
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
@@ -1877,7 +1790,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
return rc;
return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
}
-EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
static int qeth_bridgeport_set_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -2025,7 +1937,6 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL);
return qeth_anset_makerc(card, rc, response);
}
-EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set);
static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
{
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 5905dc63e256..7175086677fb 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -117,9 +117,9 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
int rc = 0;
if (!card->ipato.enabled)
- return 0;
+ return false;
if (addr->type != QETH_IP_TYPE_NORMAL)
- return 0;
+ return false;
qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
(addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -1978,17 +1978,17 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
return cast_type;
- return RTN_UNSPEC;
+ return RTN_UNICAST;
}
rcu_read_unlock();
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
- RTN_MULTICAST : RTN_UNSPEC;
+ RTN_MULTICAST : RTN_UNICAST;
else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
- RTN_MULTICAST : RTN_UNSPEC;
+ RTN_MULTICAST : RTN_UNICAST;
/* ... and MAC address */
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
@@ -1997,22 +1997,21 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
return RTN_MULTICAST;
/* default to unicast */
- return RTN_UNSPEC;
+ return RTN_UNICAST;
}
-static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
- struct qeth_hdr *hdr, struct sk_buff *skb)
+static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
+ unsigned int data_len)
{
char daddr[16];
struct af_iucv_trans_hdr *iucv_hdr;
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
- hdr->hdr.l3.ext_flags = 0;
- hdr->hdr.l3.length = skb->len - ETH_HLEN;
+ hdr->hdr.l3.length = data_len;
hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
- iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
+ iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN);
memset(daddr, 0, sizeof(daddr));
daddr[0] = 0xfe;
daddr[1] = 0x80;
@@ -2051,6 +2050,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
}
+ if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
+
/* OSA only: */
if (!ipv) {
hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
@@ -2156,106 +2161,121 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
return elements;
}
-static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int ipv,
+ int cast_type)
{
- int rc;
- __be16 *tag;
+ const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
+ unsigned int frame_len, elements;
+ unsigned char eth_hdr[ETH_HLEN];
struct qeth_hdr *hdr = NULL;
- int hdr_elements = 0;
- int elements;
- struct qeth_card *card = dev->ml_priv;
- struct sk_buff *new_skb = NULL;
- int ipv = qeth_get_ip_version(skb);
- int cast_type = qeth_l3_get_cast_type(skb);
- struct qeth_qdio_out_q *queue =
- card->qdio.out_qs[card->qdio.do_prio_queueing
- || (cast_type && card->info.is_multicast_different) ?
- qeth_get_priority_queue(card, skb, ipv, cast_type) :
- card->qdio.default_out_queue];
- int tx_bytes = skb->len;
unsigned int hd_len = 0;
- bool use_tso;
- int data_offset = -1;
- unsigned int nr_frags;
-
- if (((card->info.type == QETH_CARD_TYPE_IQD) &&
- (((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
- ((card->options.cq == QETH_CQ_ENABLED) &&
- (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) ||
- card->options.sniffer)
- goto tx_drop;
+ int push_len, rc;
+ bool is_sg;
- if ((card->state != CARD_STATE_UP) || !card->lan_online) {
- card->stats.tx_carrier_errors++;
- goto tx_drop;
+ /* re-use the L2 header area for the HW header: */
+ rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
+ if (rc)
+ return rc;
+ skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
+ skb_pull(skb, ETH_HLEN);
+ frame_len = skb->len;
+
+ push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0,
+ &elements);
+ if (push_len < 0)
+ return push_len;
+ if (!push_len) {
+ /* hdr was added discontiguous from skb->data */
+ hd_len = hw_hdr_len;
}
- if ((cast_type == RTN_BROADCAST) &&
- (card->info.broadcast_capable == 0))
- goto tx_drop;
+ if (skb->protocol == htons(ETH_P_AF_IUCV))
+ qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
+ else
+ qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
- if (card->options.performance_stats) {
- card->perf_stats.outbound_cnt++;
- card->perf_stats.outbound_start_time = qeth_get_micros();
+ is_sg = skb_is_nonlinear(skb);
+ if (IS_IQD(card)) {
+ rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
+ } else {
+ /* TODO: drop skb_orphan() once TX completion is fast enough */
+ skb_orphan(skb);
+ rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len,
+ elements);
}
+ if (!rc) {
+ if (card->options.performance_stats) {
+ card->perf_stats.buf_elements_sent += elements;
+ if (is_sg)
+ card->perf_stats.sg_skbs_sent++;
+ }
+ } else {
+ if (!push_len)
+ kmem_cache_free(qeth_core_header_cache, hdr);
+ if (rc == -EBUSY) {
+ /* roll back to ETH header */
+ skb_pull(skb, push_len);
+ skb_push(skb, ETH_HLEN);
+ skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
+ }
+ }
+ return rc;
+}
+
+static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int ipv, int cast_type)
+{
+ int elements, len, rc;
+ __be16 *tag;
+ struct qeth_hdr *hdr = NULL;
+ int hdr_elements = 0;
+ struct sk_buff *new_skb = NULL;
+ int tx_bytes = skb->len;
+ unsigned int hd_len;
+ bool use_tso, is_sg;
+
/* Ignore segment size from skb_is_gso(), 1 page is always used. */
use_tso = skb_is_gso(skb) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
- if (card->info.type == QETH_CARD_TYPE_IQD) {
- new_skb = skb;
- data_offset = ETH_HLEN;
- hd_len = sizeof(*hdr);
- hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
- if (!hdr)
- goto tx_drop;
- hdr_elements++;
- } else {
- /* create a clone with writeable headroom */
- new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
- + VLAN_HLEN);
- if (!new_skb)
- goto tx_drop;
-
- if (ipv == 4) {
- skb_pull(new_skb, ETH_HLEN);
- }
+ /* create a clone with writeable headroom */
+ new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
+ VLAN_HLEN);
+ if (!new_skb)
+ return -ENOMEM;
- if (ipv != 4 && skb_vlan_tag_present(new_skb)) {
- skb_push(new_skb, VLAN_HLEN);
- skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
- skb_copy_to_linear_data_offset(new_skb, 4,
- new_skb->data + 8, 4);
- skb_copy_to_linear_data_offset(new_skb, 8,
- new_skb->data + 12, 4);
- tag = (__be16 *)(new_skb->data + 12);
- *tag = cpu_to_be16(ETH_P_8021Q);
- *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
- }
+ if (ipv == 4) {
+ skb_pull(new_skb, ETH_HLEN);
+ } else if (skb_vlan_tag_present(new_skb)) {
+ skb_push(new_skb, VLAN_HLEN);
+ skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
+ skb_copy_to_linear_data_offset(new_skb, 4,
+ new_skb->data + 8, 4);
+ skb_copy_to_linear_data_offset(new_skb, 8,
+ new_skb->data + 12, 4);
+ tag = (__be16 *)(new_skb->data + 12);
+ *tag = cpu_to_be16(ETH_P_8021Q);
+ *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
}
- netif_stop_queue(dev);
-
/* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists
*/
- if ((card->info.type != QETH_CARD_TYPE_IQD) &&
- ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
- (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
- int lin_rc = skb_linearize(new_skb);
+ if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
+ (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
+ rc = skb_linearize(new_skb);
if (card->options.performance_stats) {
- if (lin_rc)
+ if (rc)
card->perf_stats.tx_linfail++;
else
card->perf_stats.tx_lin++;
}
- if (lin_rc)
- goto tx_drop;
+ if (rc)
+ goto out;
}
- nr_frags = skb_shinfo(new_skb)->nr_frags;
if (use_tso) {
hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
@@ -2265,97 +2285,112 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
qeth_tso_fill_header(card, hdr, new_skb);
hdr_elements++;
} else {
- if (data_offset < 0) {
- hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
- qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
- new_skb->len -
- sizeof(struct qeth_hdr));
- } else {
- if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
- qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
- else {
- qeth_l3_fill_header(card, hdr, new_skb, ipv,
- cast_type,
- new_skb->len - data_offset);
- }
- }
-
- if (new_skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
- }
+ hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
+ qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+ new_skb->len - sizeof(struct qeth_hdr));
}
elements = use_tso ?
qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
- qeth_get_elements_no(card, new_skb, hdr_elements,
- (data_offset > 0) ? data_offset : 0);
+ qeth_get_elements_no(card, new_skb, hdr_elements, 0);
if (!elements) {
- if (data_offset >= 0)
- kmem_cache_free(qeth_core_header_cache, hdr);
- goto tx_drop;
+ rc = -E2BIG;
+ goto out;
}
elements += hdr_elements;
- if (card->info.type != QETH_CARD_TYPE_IQD) {
- int len;
- if (use_tso) {
- hd_len = sizeof(struct qeth_hdr_tso) +
- ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
- len = hd_len;
- } else {
- len = sizeof(struct qeth_hdr_layer3);
- }
+ if (use_tso) {
+ hd_len = sizeof(struct qeth_hdr_tso) +
+ ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
+ len = hd_len;
+ } else {
+ hd_len = 0;
+ len = sizeof(struct qeth_hdr_layer3);
+ }
- if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
- goto tx_drop;
- rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
- hd_len, elements);
- } else
- rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset,
- hd_len);
+ if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ is_sg = skb_is_nonlinear(new_skb);
+ rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
+ elements);
+out:
if (!rc) {
- card->stats.tx_packets++;
- card->stats.tx_bytes += tx_bytes;
if (new_skb != skb)
dev_kfree_skb_any(skb);
if (card->options.performance_stats) {
+ card->perf_stats.buf_elements_sent += elements;
+ if (is_sg)
+ card->perf_stats.sg_skbs_sent++;
if (use_tso) {
card->perf_stats.large_send_bytes += tx_bytes;
card->perf_stats.large_send_cnt++;
}
- if (nr_frags) {
- card->perf_stats.sg_skbs_sent++;
- /* nr_frags + skb->data */
- card->perf_stats.sg_frags_sent += nr_frags + 1;
- }
}
- rc = NETDEV_TX_OK;
} else {
- if (data_offset >= 0)
- kmem_cache_free(qeth_core_header_cache, hdr);
+ if (new_skb != skb)
+ dev_kfree_skb_any(new_skb);
+ }
+ return rc;
+}
- if (rc == -EBUSY) {
- if (new_skb != skb)
- dev_kfree_skb_any(new_skb);
- return NETDEV_TX_BUSY;
- } else
+static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int cast_type = qeth_l3_get_cast_type(skb);
+ struct qeth_card *card = dev->ml_priv;
+ int ipv = qeth_get_ip_version(skb);
+ struct qeth_qdio_out_q *queue;
+ int tx_bytes = skb->len;
+ int rc;
+
+ if (IS_IQD(card)) {
+ if (card->options.sniffer)
+ goto tx_drop;
+ if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
+ (card->options.cq == QETH_CQ_ENABLED &&
+ skb->protocol != htons(ETH_P_AF_IUCV)))
goto tx_drop;
}
- netif_wake_queue(dev);
- if (card->options.performance_stats)
- card->perf_stats.outbound_time += qeth_get_micros() -
- card->perf_stats.outbound_start_time;
- return rc;
+ if (card->state != CARD_STATE_UP || !card->lan_online) {
+ card->stats.tx_carrier_errors++;
+ goto tx_drop;
+ }
+
+ if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
+ goto tx_drop;
+
+ queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
+ if (card->options.performance_stats) {
+ card->perf_stats.outbound_cnt++;
+ card->perf_stats.outbound_start_time = qeth_get_micros();
+ }
+ netif_stop_queue(dev);
+
+ if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
+ rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
+ else
+ rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
+
+ if (!rc) {
+ card->stats.tx_packets++;
+ card->stats.tx_bytes += tx_bytes;
+ if (card->options.performance_stats)
+ card->perf_stats.outbound_time += qeth_get_micros() -
+ card->perf_stats.outbound_start_time;
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+ } else if (rc == -EBUSY) {
+ return NETDEV_TX_BUSY;
+ } /* else fall through */
tx_drop:
card->stats.tx_dropped++;
card->stats.tx_errors++;
- if ((new_skb != skb) && new_skb)
- dev_kfree_skb_any(new_skb);
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
@@ -2449,7 +2484,6 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
- .ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
@@ -2466,7 +2500,6 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
- .ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
@@ -2479,6 +2512,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
{
int rc;
+ if (card->dev->netdev_ops)
+ return 0;
+
if (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -2487,9 +2523,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
return -ENODEV;
}
- card->dev = alloc_etherdev(0);
- if (!card->dev)
- return -ENODEV;
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
/*IPv6 address autoconfiguration stuff*/
@@ -2497,9 +2530,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
card->dev->dev_id = card->info.unique_id & 0xffff;
- card->dev->hw_features |= NETIF_F_SG;
- card->dev->vlan_features |= NETIF_F_SG;
-
if (!card->info.guestlan) {
card->dev->features |= NETIF_F_SG;
card->dev->hw_features |= NETIF_F_TSO |
@@ -2513,38 +2543,35 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
- card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
- ether_setup);
- if (!card->dev)
- return -ENODEV;
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
+
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
- return rc;
+ goto out;
+
if (card->options.hsuid[0])
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
} else
return -ENODEV;
- card->dev->ml_priv = card;
- card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
- card->dev->mtu = card->info.initial_mtu;
- card->dev->min_mtu = 64;
- card->dev->max_mtu = ETH_MAX_MTU;
- card->dev->dev_port = card->info.portno;
card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
+ card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
+
netif_keep_dst(card->dev);
- netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
- PAGE_SIZE);
+ if (card->dev->hw_features & NETIF_F_TSO)
+ netif_set_gso_max_size(card->dev,
+ PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
- SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
- netif_carrier_off(card->dev);
- return register_netdev(card->dev);
+ rc = register_netdev(card->dev);
+out:
+ if (rc)
+ card->dev->netdev_ops = NULL;
+ return rc;
}
static const struct device_type qeth_l3_devtype = {
@@ -2582,15 +2609,9 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
- if (card->dev) {
- unregister_netdev(card->dev);
- free_netdev(card->dev);
- card->dev = NULL;
- }
-
+ unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
- return;
}
static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
@@ -2612,10 +2633,9 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- if (!card->dev && qeth_l3_setup_netdev(card)) {
- rc = -ENODEV;
+ rc = qeth_l3_setup_netdev(card);
+ if (rc)
goto out_remove;
- }
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
@@ -2666,11 +2686,12 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_enable_hw_features(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
rtnl_lock();
- if (recovery_mode)
+ if (recovery_mode) {
__qeth_l3_open(card->dev);
- else
+ qeth_l3_set_rx_mode(card->dev);
+ } else {
dev_open(card->dev);
- qeth_l3_set_rx_mode(card->dev);
+ }
rtnl_unlock();
}
qeth_trace_features(card);
@@ -2711,8 +2732,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
- if (card->dev && netif_carrier_ok(card->dev))
- netif_carrier_off(card->dev);
+ netif_carrier_off(card->dev);
recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -2780,8 +2800,7 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->dev)
- netif_device_detach(card->dev);
+ netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
@@ -2814,8 +2833,7 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
rc = __qeth_l3_set_online(card->gdev, 0);
out:
qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (card->dev)
- netif_device_attach(card->dev);
+ netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index f61192a048f4..45ac6d8705c6 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -299,8 +299,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
if (strlen(tmp) == 0) {
/* delete ip address only */
card->options.hsuid[0] = '\0';
- if (card->dev)
- memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+ memcpy(card->dev->perm_addr, card->options.hsuid, 9);
qeth_configure_cq(card, QETH_CQ_DISABLED);
return count;
}
@@ -311,8 +310,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
snprintf(card->options.hsuid, sizeof(card->options.hsuid),
"%-8s", tmp);
ASCEBC(card->options.hsuid, 8);
- if (card->dev)
- memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+ memcpy(card->dev->perm_addr, card->options.hsuid, 9);
rc = qeth_l3_modify_hsuid(card, true);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index a3a8c8d9d717..94f4d8fe85e0 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -101,7 +101,7 @@ static void __init zfcp_init_device_setup(char *devstr)
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
- strncpy(busid, token, ZFCP_BUS_ID_SIZE);
+ strlcpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 99ba4a770406..27521fc3ef5a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2038,6 +2038,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twa_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -2060,6 +2061,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = ioremap(mem_addr, mem_len);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
@@ -2067,8 +2069,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_DISABLE_INTERRUPTS(tw_dev);
/* Initialize the card */
- if (twa_reset_sequence(tw_dev, 0))
+ if (twa_reset_sequence(tw_dev, 0)) {
+ retval = -ENOMEM;
goto out_iounmap;
+ }
/* Set host specific parameters */
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index cf9f2a09b47d..40c1e6e64f58 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1594,6 +1594,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twl_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -1608,6 +1609,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_iomap(pdev, 1, 0);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
@@ -1617,6 +1619,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
+ retval = -ENOMEM;
goto out_iounmap;
}
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index f6179e3d6953..471366945bd4 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1925,7 +1925,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
if (test_bit(TW_IN_RESET, &tw_dev->flags))
return SCSI_MLQUEUE_HOST_BUSY;
- /* Save done function into Scsi_Cmnd struct */
+ /* Save done function into struct scsi_cmnd */
SCpnt->scsi_done = done;
/* Queue the command and get a request id */
@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (tw_initialize_device_extension(tw_dev)) {
printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_resource_start(pdev, 0);
if (!tw_dev->base_addr) {
printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 35c909bbf8ba..8fc851a9e116 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -49,6 +49,7 @@ config SCSI_NETLINK
config SCSI_MQ_DEFAULT
bool "SCSI: use blk-mq I/O path by default"
+ default y
depends on SCSI
---help---
This option enables the new blk-mq based I/O path for SCSI
@@ -841,18 +842,6 @@ config SCSI_IZIP_SLOW_CTR
Generally, saying N is fine.
-config SCSI_NCR_D700
- tristate "NCR Dual 700 MCA SCSI support"
- depends on MCA && SCSI
- select SCSI_SPI_ATTRS
- help
- This is a driver for the MicroChannel Dual 700 card produced by
- NCR and commonly used in 345x/35xx/4100 class machines. It always
- tries to negotiate sync and uses tag command queueing.
-
- Unless you have an NCR manufactured machine, the chances are that
- you do not have this SCSI card, so say N.
-
config SCSI_LASI700
tristate "HP Lasi SCSI support for 53c700/710"
depends on GSC && SCSI
@@ -1000,21 +989,9 @@ config SCSI_ZALON
used on the add-in Bluefish, Barracuda & Shrike SCSI cards.
Say Y here if you have one of these machines or cards.
-config SCSI_NCR_Q720
- tristate "NCR Quad 720 MCA SCSI support"
- depends on MCA && SCSI
- select SCSI_SPI_ATTRS
- help
- This is a driver for the MicroChannel Quad 720 card produced by
- NCR and commonly used in 345x/35xx/4100 class machines. It always
- tries to negotiate sync and uses tag command queueing.
-
- Unless you have an NCR manufactured machine, the chances are that
- you do not have this SCSI card, so say N.
-
config SCSI_NCR53C8XX_DEFAULT_TAGS
int "default tagged command queue depth"
- depends on SCSI_ZALON || SCSI_NCR_Q720
+ depends on SCSI_ZALON
default "8"
---help---
"Tagged command queuing" is a feature of SCSI-2 which improves
@@ -1040,7 +1017,7 @@ config SCSI_NCR53C8XX_DEFAULT_TAGS
config SCSI_NCR53C8XX_MAX_TAGS
int "maximum number of queued commands"
- depends on SCSI_ZALON || SCSI_NCR_Q720
+ depends on SCSI_ZALON
default "32"
---help---
This option allows you to specify the maximum number of commands
@@ -1057,7 +1034,7 @@ config SCSI_NCR53C8XX_MAX_TAGS
config SCSI_NCR53C8XX_SYNC
int "synchronous transfers frequency in MHz"
- depends on SCSI_ZALON || SCSI_NCR_Q720
+ depends on SCSI_ZALON
default "20"
---help---
The SCSI Parallel Interface-2 Standard defines 5 classes of transfer
@@ -1091,7 +1068,7 @@ config SCSI_NCR53C8XX_SYNC
config SCSI_NCR53C8XX_NO_DISCONNECT
bool "not allow targets to disconnect"
- depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
+ depends on SCSI_ZALON && SCSI_NCR53C8XX_DEFAULT_TAGS=0
help
This option is only provided for safety if you suspect some SCSI
device of yours to not support properly the target-disconnect
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 80aca2456353..6d71b2a9592b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -21,6 +21,7 @@ CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_SCSI) += scsi_mod.o
+obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_common.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
@@ -76,8 +77,6 @@ obj-$(CONFIG_SCSI_PM8001) += pm8001/
obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
-obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
-obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
@@ -156,7 +155,6 @@ obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \
scsicam.o scsi_error.o scsi_lib.o
-scsi_mod-y += scsi_common.o
scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o
scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
@@ -180,7 +178,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
-DCONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
-NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
# Files generated that shall be removed upon make clean
clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
deleted file mode 100644
index b39a2409a507..000000000000
--- a/drivers/scsi/NCR_D700.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Dual 700 MCA SCSI Driver
- *
- * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
-**-----------------------------------------------------------------------------
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-**-----------------------------------------------------------------------------
- */
-
-/* Notes:
- *
- * Most of the work is done in the chip specific module, 53c700.o
- *
- * TODO List:
- *
- * 1. Extract the SCSI ID from the voyager CMOS table (necessary to
- * support multi-host environments.
- *
- * */
-
-
-/* CHANGELOG
- *
- * Version 2.2
- *
- * Added mca_set_adapter_name().
- *
- * Version 2.1
- *
- * Modularise the driver into a Board piece (this file) and a chip
- * piece 53c700.[ch] and 53c700.scr, added module options. You can
- * now specify the scsi id by the parameters
- *
- * NCR_D700=slot:<n> [siop:<n>] id:<n> ....
- *
- * They need to be comma separated if compiled into the kernel
- *
- * Version 2.0
- *
- * Initial implementation of TCQ (Tag Command Queueing). TCQ is full
- * featured and uses the clock algorithm to keep track of outstanding
- * tags and guard against individual tag starvation. Also fixed a bug
- * in all of the 1.x versions where the D700_data_residue() function
- * was returning results off by 32 bytes (and thus causing the same 32
- * bytes to be written twice corrupting the data block). It turns out
- * the 53c700 only has a 6 bit DBC and DFIFO registers not 7 bit ones
- * like the 53c710 (The 710 is the only data manual still available,
- * which I'd been using to program the 700).
- *
- * Version 1.2
- *
- * Much improved message handling engine
- *
- * Version 1.1
- *
- * Add code to handle selection reasonably correctly. By the time we
- * get the selection interrupt, we've already responded, but drop off the
- * bus and hope the selector will go away.
- *
- * Version 1.0:
- *
- * Initial release. Fully functional except for procfs and tag
- * command queueing. Has only been tested on cards with 53c700-66
- * chips and only single ended. Features are
- *
- * 1. Synchronous data transfers to offset 8 (limit of 700-66) and
- * 100ns (10MHz) limit of SCSI-2
- *
- * 2. Disconnection and reselection
- *
- * Testing:
- *
- * I've only really tested this with the 700-66 chip, but have done
- * soak tests in multi-device environments to verify that
- * disconnections and reselections are being processed correctly.
- * */
-
-#define NCR_D700_VERSION "2.2"
-
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mca.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_transport.h>
-#include <scsi/scsi_transport_spi.h>
-
-#include "53c700.h"
-#include "NCR_D700.h"
-
-static char *NCR_D700; /* command line from insmod */
-
-MODULE_AUTHOR("James Bottomley");
-MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
-MODULE_LICENSE("GPL");
-module_param(NCR_D700, charp, 0);
-
-static __u8 id_array[2*(MCA_MAX_SLOT_NR + 1)] =
- { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
-
-#ifdef MODULE
-#define ARG_SEP ' '
-#else
-#define ARG_SEP ','
-#endif
-
-static int __init
-param_setup(char *string)
-{
- char *pos = string, *next;
- int slot = -1, siop = -1;
-
- while(pos != NULL && (next = strchr(pos, ':')) != NULL) {
- int val = (int)simple_strtoul(++next, NULL, 0);
-
- if(!strncmp(pos, "slot:", 5))
- slot = val;
- else if(!strncmp(pos, "siop:", 5))
- siop = val;
- else if(!strncmp(pos, "id:", 3)) {
- if(slot == -1) {
- printk(KERN_WARNING "NCR D700: Must specify slot for id parameter\n");
- } else if(slot > MCA_MAX_SLOT_NR) {
- printk(KERN_WARNING "NCR D700: Illegal slot %d for id %d\n", slot, val);
- } else {
- if(siop != 0 && siop != 1) {
- id_array[slot*2] = val;
- id_array[slot*2 + 1] =val;
- } else {
- id_array[slot*2 + siop] = val;
- }
- }
- }
- if((pos = strchr(pos, ARG_SEP)) != NULL)
- pos++;
- }
- return 1;
-}
-
-/* Host template. The 53c700 routine NCR_700_detect will
- * fill in all of the missing routines */
-static struct scsi_host_template NCR_D700_driver_template = {
- .module = THIS_MODULE,
- .name = "NCR Dual 700 MCA",
- .proc_name = "NCR_D700",
- .this_id = 7,
-};
-
-/* We needs this helper because we have two hosts per struct device */
-struct NCR_D700_private {
- struct device *dev;
- struct Scsi_Host *hosts[2];
- char name[30];
- char pad;
-};
-
-static int
-NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
- int slot, u32 region, int differential)
-{
- struct NCR_700_Host_Parameters *hostdata;
- struct Scsi_Host *host;
- int ret;
-
- hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
- if (!hostdata) {
- printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host"
- "data, detatching\n", siop);
- return -ENOMEM;
- }
-
- if (!request_region(region, 64, "NCR_D700")) {
- printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n",
- region);
- ret = -ENODEV;
- goto region_failed;
- }
-
- /* Fill in the three required pieces of hostdata */
- hostdata->base = ioport_map(region, 64);
- hostdata->differential = (((1<<siop) & differential) != 0);
- hostdata->clock = NCR_D700_CLOCK_MHZ;
- hostdata->burst_length = 8;
-
- /* and register the siop */
- host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);
- if (!host) {
- ret = -ENOMEM;
- goto detect_failed;
- }
-
- p->hosts[siop] = host;
- /* FIXME: read this from SUS */
- host->this_id = id_array[slot * 2 + siop];
- host->irq = irq;
- host->base = region;
- scsi_scan_host(host);
-
- return 0;
-
- detect_failed:
- release_region(region, 64);
- region_failed:
- kfree(hostdata);
-
- return ret;
-}
-
-static irqreturn_t
-NCR_D700_intr(int irq, void *data)
-{
- struct NCR_D700_private *p = (struct NCR_D700_private *)data;
- int i, found = 0;
-
- for (i = 0; i < 2; i++)
- if (p->hosts[i] &&
- NCR_700_intr(irq, p->hosts[i]) == IRQ_HANDLED)
- found++;
-
- return found ? IRQ_HANDLED : IRQ_NONE;
-}
-
-/* Detect a D700 card. Note, because of the setup --- the chips are
- * essentially connectecd to the MCA bus independently, it is easier
- * to set them up as two separate host adapters, rather than one
- * adapter with two channels */
-static int
-NCR_D700_probe(struct device *dev)
-{
- struct NCR_D700_private *p;
- int differential;
- static int banner = 1;
- struct mca_device *mca_dev = to_mca_device(dev);
- int slot = mca_dev->slot;
- int found = 0;
- int irq, i;
- int pos3j, pos3k, pos3a, pos3b, pos4;
- __u32 base_addr, offset_addr;
-
- /* enable board interrupt */
- pos4 = mca_device_read_pos(mca_dev, 4);
- pos4 |= 0x4;
- mca_device_write_pos(mca_dev, 4, pos4);
-
- mca_device_write_pos(mca_dev, 6, 9);
- pos3j = mca_device_read_pos(mca_dev, 3);
- mca_device_write_pos(mca_dev, 6, 10);
- pos3k = mca_device_read_pos(mca_dev, 3);
- mca_device_write_pos(mca_dev, 6, 0);
- pos3a = mca_device_read_pos(mca_dev, 3);
- mca_device_write_pos(mca_dev, 6, 1);
- pos3b = mca_device_read_pos(mca_dev, 3);
-
- base_addr = ((pos3j << 8) | pos3k) & 0xfffffff0;
- offset_addr = ((pos3a << 8) | pos3b) & 0xffffff70;
-
- irq = (pos4 & 0x3) + 11;
- if(irq >= 13)
- irq++;
- if(banner) {
- printk(KERN_NOTICE "NCR D700: Driver Version " NCR_D700_VERSION "\n"
- "NCR D700: Copyright (c) 2001 by James.Bottomley@HansenPartnership.com\n"
- "NCR D700:\n");
- banner = 0;
- }
- /* now do the bus related transforms */
- irq = mca_device_transform_irq(mca_dev, irq);
- base_addr = mca_device_transform_ioport(mca_dev, base_addr);
- offset_addr = mca_device_transform_ioport(mca_dev, offset_addr);
-
- printk(KERN_NOTICE "NCR D700: found in slot %d irq = %d I/O base = 0x%x\n", slot, irq, offset_addr);
-
- /*outb(BOARD_RESET, base_addr);*/
-
- /* clear any pending interrupts */
- (void)inb(base_addr + 0x08);
- /* get modctl, used later for setting diff bits */
- switch(differential = (inb(base_addr + 0x08) >> 6)) {
- case 0x00:
- /* only SIOP1 differential */
- differential = 0x02;
- break;
- case 0x01:
- /* Both SIOPs differential */
- differential = 0x03;
- break;
- case 0x03:
- /* No SIOPs differential */
- differential = 0x00;
- break;
- default:
- printk(KERN_ERR "D700: UNEXPECTED DIFFERENTIAL RESULT 0x%02x\n",
- differential);
- differential = 0x00;
- break;
- }
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- p->dev = dev;
- snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
- if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
- printk(KERN_ERR "D700: request_irq failed\n");
- kfree(p);
- return -EBUSY;
- }
- /* plumb in both 700 chips */
- for (i = 0; i < 2; i++) {
- int err;
-
- if ((err = NCR_D700_probe_one(p, i, irq, slot,
- offset_addr + (0x80 * i),
- differential)) != 0)
- printk("D700: SIOP%d: probe failed, error = %d\n",
- i, err);
- else
- found++;
- }
-
- if (!found) {
- kfree(p);
- return -ENODEV;
- }
-
- mca_device_set_claim(mca_dev, 1);
- mca_device_set_name(mca_dev, "NCR_D700");
- dev_set_drvdata(dev, p);
- return 0;
-}
-
-static void
-NCR_D700_remove_one(struct Scsi_Host *host)
-{
- scsi_remove_host(host);
- NCR_700_release(host);
- kfree((struct NCR_700_Host_Parameters *)host->hostdata[0]);
- free_irq(host->irq, host);
- release_region(host->base, 64);
-}
-
-static int
-NCR_D700_remove(struct device *dev)
-{
- struct NCR_D700_private *p = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < 2; i++)
- NCR_D700_remove_one(p->hosts[i]);
-
- kfree(p);
- return 0;
-}
-
-static short NCR_D700_id_table[] = { NCR_D700_MCA_ID, 0 };
-
-static struct mca_driver NCR_D700_driver = {
- .id_table = NCR_D700_id_table,
- .driver = {
- .name = "NCR_D700",
- .bus = &mca_bus_type,
- .probe = NCR_D700_probe,
- .remove = NCR_D700_remove,
- },
-};
-
-static int __init NCR_D700_init(void)
-{
-#ifdef MODULE
- if (NCR_D700)
- param_setup(NCR_D700);
-#endif
-
- return mca_register_driver(&NCR_D700_driver);
-}
-
-static void __exit NCR_D700_exit(void)
-{
- mca_unregister_driver(&NCR_D700_driver);
-}
-
-module_init(NCR_D700_init);
-module_exit(NCR_D700_exit);
-
-__setup("NCR_D700=", param_setup);
diff --git a/drivers/scsi/NCR_D700.h b/drivers/scsi/NCR_D700.h
deleted file mode 100644
index eb675d782ef6..000000000000
--- a/drivers/scsi/NCR_D700.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Dual 700 MCA SCSI Driver
- *
- * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
- */
-
-#ifndef _NCR_D700_H
-#define _NCR_D700_H
-
-/* Don't turn on debugging messages */
-#undef NCR_D700_DEBUG
-
-/* The MCA identifier */
-#define NCR_D700_MCA_ID 0x0092
-
-/* Defines for the Board registers */
-#define BOARD_RESET 0x80 /* board level reset */
-#define ADD_PARENB 0x04 /* Address Parity Enabled */
-#define DAT_PARENB 0x01 /* Data Parity Enabled */
-#define SFBK_ENB 0x10 /* SFDBK Interrupt Enabled */
-#define LED0GREEN 0x20 /* Led 0 (red 0; green 1) */
-#define LED1GREEN 0x40 /* Led 1 (red 0; green 1) */
-#define LED0RED 0xDF /* Led 0 (red 0; green 1) */
-#define LED1RED 0xBF /* Led 1 (red 0; green 1) */
-
-#define NCR_D700_CLOCK_MHZ 50
-
-#endif
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
deleted file mode 100644
index 54e7d26908ee..000000000000
--- a/drivers/scsi/NCR_Q720.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Quad 720 MCA SCSI Driver
- *
- * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
- */
-
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mca.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-
-#include "ncr53c8xx.h"
-
-#include "NCR_Q720.h"
-
-static struct ncr_chip q720_chip __initdata = {
- .revision_id = 0x0f,
- .burst_max = 3,
- .offset_max = 8,
- .nr_divisor = 4,
- .features = FE_WIDE | FE_DIFF | FE_VARCLK,
-};
-
-MODULE_AUTHOR("James Bottomley");
-MODULE_DESCRIPTION("NCR Quad 720 SCSI Driver");
-MODULE_LICENSE("GPL");
-
-#define NCR_Q720_VERSION "0.9"
-
-/* We needs this helper because we have up to four hosts per struct device */
-struct NCR_Q720_private {
- struct device *dev;
- void __iomem * mem_base;
- __u32 phys_mem_base;
- __u32 mem_size;
- __u8 irq;
- __u8 siops;
- __u8 irq_enable;
- struct Scsi_Host *hosts[4];
-};
-
-static struct scsi_host_template NCR_Q720_tpnt = {
- .module = THIS_MODULE,
- .proc_name = "NCR_Q720",
-};
-
-static irqreturn_t
-NCR_Q720_intr(int irq, void *data)
-{
- struct NCR_Q720_private *p = (struct NCR_Q720_private *)data;
- __u8 sir = (readb(p->mem_base + 0x0d) & 0xf0) >> 4;
- __u8 siop;
-
- sir |= ~p->irq_enable;
-
- if(sir == 0xff)
- return IRQ_NONE;
-
-
- while((siop = ffz(sir)) < p->siops) {
- sir |= 1<<siop;
- ncr53c8xx_intr(irq, p->hosts[siop]);
- }
- return IRQ_HANDLED;
-}
-
-static int __init
-NCR_Q720_probe_one(struct NCR_Q720_private *p, int siop,
- int irq, int slot, __u32 paddr, void __iomem *vaddr)
-{
- struct ncr_device device;
- __u8 scsi_id;
- static int unit = 0;
- __u8 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
- __u8 differential = readb(vaddr + NCR_Q720_SCSR_OFFSET) & 0x20;
- __u8 version;
- int error;
-
- scsi_id = scsr1 >> 4;
- /* enable burst length 16 (FIXME: should allow this) */
- scsr1 |= 0x02;
- /* force a siop reset */
- scsr1 |= 0x04;
- writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
- udelay(10);
- version = readb(vaddr + 0x18) >> 4;
-
- memset(&device, 0, sizeof(struct ncr_device));
- /* Initialise ncr_device structure with items required by ncr_attach. */
- device.chip = q720_chip;
- device.chip.revision_id = version;
- device.host_id = scsi_id;
- device.dev = p->dev;
- device.slot.base = paddr;
- device.slot.base_c = paddr;
- device.slot.base_v = vaddr;
- device.slot.irq = irq;
- device.differential = differential ? 2 : 0;
- printk("Q720 probe unit %d (siop%d) at 0x%lx, diff = %d, vers = %d\n", unit, siop,
- (unsigned long)paddr, differential, version);
-
- p->hosts[siop] = ncr_attach(&NCR_Q720_tpnt, unit++, &device);
-
- if (!p->hosts[siop])
- goto fail;
-
- p->irq_enable |= (1<<siop);
- scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
- /* clear the disable interrupt bit */
- scsr1 &= ~0x01;
- writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
-
- error = scsi_add_host(p->hosts[siop], p->dev);
- if (error)
- ncr53c8xx_release(p->hosts[siop]);
- else
- scsi_scan_host(p->hosts[siop]);
- return error;
-
- fail:
- return -ENODEV;
-}
-
-/* Detect a Q720 card. Note, because of the setup --- the chips are
- * essentially connectecd to the MCA bus independently, it is easier
- * to set them up as two separate host adapters, rather than one
- * adapter with two channels */
-static int __init
-NCR_Q720_probe(struct device *dev)
-{
- struct NCR_Q720_private *p;
- static int banner = 1;
- struct mca_device *mca_dev = to_mca_device(dev);
- int slot = mca_dev->slot;
- int found = 0;
- int irq, i, siops;
- __u8 pos2, pos4, asr2, asr9, asr10;
- __u16 io_base;
- __u32 base_addr, mem_size;
- void __iomem *mem_base;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- pos2 = mca_device_read_pos(mca_dev, 2);
- /* enable device */
- pos2 |= NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE;
- mca_device_write_pos(mca_dev, 2, pos2);
-
- io_base = (pos2 & NCR_Q720_POS2_IO_MASK) << NCR_Q720_POS2_IO_SHIFT;
-
-
- if(banner) {
- printk(KERN_NOTICE "NCR Q720: Driver Version " NCR_Q720_VERSION "\n"
- "NCR Q720: Copyright (c) 2003 by James.Bottomley@HansenPartnership.com\n"
- "NCR Q720:\n");
- banner = 0;
- }
- io_base = mca_device_transform_ioport(mca_dev, io_base);
-
- /* OK, this is phase one of the bootstrap, we now know the
- * I/O space base address. All the configuration registers
- * are mapped here (including pos) */
-
- /* sanity check I/O mapping */
- i = inb(io_base) | (inb(io_base+1)<<8);
- if(i != NCR_Q720_MCA_ID) {
- printk(KERN_ERR "NCR_Q720, adapter failed to I/O map registers correctly at 0x%x(0x%x)\n", io_base, i);
- kfree(p);
- return -ENODEV;
- }
-
- /* Phase II, find the ram base and memory map the board register */
- pos4 = inb(io_base + 4);
- /* enable streaming data */
- pos4 |= 0x01;
- outb(pos4, io_base + 4);
- base_addr = (pos4 & 0x7e) << 20;
- base_addr += (pos4 & 0x80) << 23;
- asr10 = inb(io_base + 0x12);
- base_addr += (asr10 & 0x80) << 24;
- base_addr += (asr10 & 0x70) << 23;
-
- /* OK, got the base addr, now we need to find the ram size,
- * enable and map it */
- asr9 = inb(io_base + 0x11);
- i = (asr9 & 0xc0) >> 6;
- if(i == 0)
- mem_size = 1024;
- else
- mem_size = 1 << (19 + i);
-
- /* enable the sram mapping */
- asr9 |= 0x20;
-
- /* disable the rom mapping */
- asr9 &= ~0x10;
-
- outb(asr9, io_base + 0x11);
-
- if(!request_mem_region(base_addr, mem_size, "NCR_Q720")) {
- printk(KERN_ERR "NCR_Q720: Failed to claim memory region 0x%lx\n-0x%lx",
- (unsigned long)base_addr,
- (unsigned long)(base_addr + mem_size));
- goto out_free;
- }
-
- if (dma_declare_coherent_memory(dev, base_addr, base_addr,
- mem_size, 0)) {
- printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n");
- goto out_release_region;
- }
-
- /* The first 1k of the memory buffer is a memory map of the registers
- */
- mem_base = dma_mark_declared_memory_occupied(dev, base_addr,
- 1024);
- if (IS_ERR(mem_base)) {
- printk("NCR_Q720 failed to reserve memory mapped region\n");
- goto out_release;
- }
-
- /* now also enable accesses in asr 2 */
- asr2 = inb(io_base + 0x0a);
-
- asr2 |= 0x01;
-
- outb(asr2, io_base + 0x0a);
-
- /* get the number of SIOPs (this should be 2 or 4) */
- siops = ((asr2 & 0xe0) >> 5) + 1;
-
- /* sanity check mapping (again) */
- i = readw(mem_base);
- if(i != NCR_Q720_MCA_ID) {
- printk(KERN_ERR "NCR_Q720, adapter failed to memory map registers correctly at 0x%lx(0x%x)\n", (unsigned long)base_addr, i);
- goto out_release;
- }
-
- irq = readb(mem_base + 5) & 0x0f;
-
-
- /* now do the bus related transforms */
- irq = mca_device_transform_irq(mca_dev, irq);
-
- printk(KERN_NOTICE "NCR Q720: found in slot %d irq = %d mem base = 0x%lx siops = %d\n", slot, irq, (unsigned long)base_addr, siops);
- printk(KERN_NOTICE "NCR Q720: On board ram %dk\n", mem_size/1024);
-
- p->dev = dev;
- p->mem_base = mem_base;
- p->phys_mem_base = base_addr;
- p->mem_size = mem_size;
- p->irq = irq;
- p->siops = siops;
-
- if (request_irq(irq, NCR_Q720_intr, IRQF_SHARED, "NCR_Q720", p)) {
- printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq);
- goto out_release;
- }
- /* disable all the siop interrupts */
- for(i = 0; i < siops; i++) {
- void __iomem *reg_scsr1 = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT + NCR_Q720_SCSR_OFFSET + 1;
- __u8 scsr1 = readb(reg_scsr1);
- scsr1 |= 0x01;
- writeb(scsr1, reg_scsr1);
- }
-
- /* plumb in all 720 chips */
- for (i = 0; i < siops; i++) {
- void __iomem *siop_v_base = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT;
- __u32 siop_p_base = base_addr + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT;
- __u16 port = io_base + NCR_Q720_CHIP_REGISTER_OFFSET
- + i*NCR_Q720_SIOP_SHIFT;
- int err;
-
- outb(0xff, port + 0x40);
- outb(0x07, port + 0x41);
- if ((err = NCR_Q720_probe_one(p, i, irq, slot,
- siop_p_base, siop_v_base)) != 0)
- printk("Q720: SIOP%d: probe failed, error = %d\n",
- i, err);
- else
- found++;
- }
-
- if (!found) {
- kfree(p);
- return -ENODEV;
- }
-
- mca_device_set_claim(mca_dev, 1);
- mca_device_set_name(mca_dev, "NCR_Q720");
- dev_set_drvdata(dev, p);
-
- return 0;
-
- out_release:
- dma_release_declared_memory(dev);
- out_release_region:
- release_mem_region(base_addr, mem_size);
- out_free:
- kfree(p);
-
- return -ENODEV;
-}
-
-static void __exit
-NCR_Q720_remove_one(struct Scsi_Host *host)
-{
- scsi_remove_host(host);
- ncr53c8xx_release(host);
-}
-
-static int __exit
-NCR_Q720_remove(struct device *dev)
-{
- struct NCR_Q720_private *p = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < p->siops; i++)
- if(p->hosts[i])
- NCR_Q720_remove_one(p->hosts[i]);
-
- dma_release_declared_memory(dev);
- release_mem_region(p->phys_mem_base, p->mem_size);
- free_irq(p->irq, p);
- kfree(p);
- return 0;
-}
-
-static short NCR_Q720_id_table[] = { NCR_Q720_MCA_ID, 0 };
-
-static struct mca_driver NCR_Q720_driver = {
- .id_table = NCR_Q720_id_table,
- .driver = {
- .name = "NCR_Q720",
- .bus = &mca_bus_type,
- .probe = NCR_Q720_probe,
- .remove = NCR_Q720_remove,
- },
-};
-
-static int __init
-NCR_Q720_init(void)
-{
- int ret = ncr53c8xx_init();
- if (!ret)
- ret = mca_register_driver(&NCR_Q720_driver);
- if (ret)
- ncr53c8xx_exit();
- return ret;
-}
-
-static void __exit
-NCR_Q720_exit(void)
-{
- mca_unregister_driver(&NCR_Q720_driver);
- ncr53c8xx_exit();
-}
-
-module_init(NCR_Q720_init);
-module_exit(NCR_Q720_exit);
diff --git a/drivers/scsi/NCR_Q720.h b/drivers/scsi/NCR_Q720.h
deleted file mode 100644
index d5f46cdb736e..000000000000
--- a/drivers/scsi/NCR_Q720.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* NCR Quad 720 MCA SCSI Driver
- *
- * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
- */
-
-#ifndef _NCR_Q720_H
-#define _NCR_Q720_H
-
-/* The MCA identifier */
-#define NCR_Q720_MCA_ID 0x0720
-
-#define NCR_Q720_CLOCK_MHZ 30
-
-#define NCR_Q720_POS2_BOARD_ENABLE 0x01
-#define NCR_Q720_POS2_INTERRUPT_ENABLE 0x02
-#define NCR_Q720_POS2_PARITY_DISABLE 0x04
-#define NCR_Q720_POS2_IO_MASK 0xf8
-#define NCR_Q720_POS2_IO_SHIFT 8
-
-#define NCR_Q720_CHIP_REGISTER_OFFSET 0x200
-#define NCR_Q720_SCSR_OFFSET 0x070
-#define NCR_Q720_SIOP_SHIFT 0x080
-
-#endif
-
-
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index b2942ec3d455..23b17621b6d2 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -143,7 +143,7 @@ static u8 wait_chip_ready(struct orc_host * host)
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
return 1;
- mdelay(100);
+ msleep(100);
}
return 0;
}
@@ -155,7 +155,7 @@ static u8 wait_firmware_ready(struct orc_host * host)
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
return 1;
- mdelay(100); /* wait 100ms before try again */
+ msleep(100); /* wait 100ms before try again */
}
return 0;
}
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a57f3a7d4748..6e356325d8d9 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -115,8 +115,6 @@
#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
#define ASENCODE_OVERLAPPED_COMMAND 0x00
-#define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD)
-
#define BYTE0(x) (unsigned char)(x)
#define BYTE1(x) (unsigned char)((x) >> 8)
#define BYTE2(x) (unsigned char)((x) >> 16)
@@ -2961,7 +2959,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case SYNCHRONIZE_CACHE:
if (((aac_cache & 6) == 6) && dev->cache_protected) {
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
/* Issue FIB to tell Firmware to flush it's cache */
@@ -2989,7 +2988,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x80) {
/* unit serial number page */
arr[3] = setinqserial(dev, &arr[4],
@@ -3000,7 +3001,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
char *sno = (char *)&inq_data;
@@ -3009,7 +3012,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
} else {
/* vpd page not implemented */
scsicmd->result = DID_OK << 16 |
@@ -3040,7 +3045,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
if (dev->in_reset)
@@ -3089,7 +3095,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
@@ -3115,7 +3122,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
@@ -3194,7 +3202,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd,
mode_buf_length);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
case MODE_SENSE_10:
@@ -3271,7 +3280,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
(char *)&mpd10,
mode_buf_length);
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
}
case REQUEST_SENSE:
@@ -3280,7 +3290,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
sizeof(struct sense_data));
memset(&dev->fsa_dev[cid].sense_data, 0,
sizeof(struct sense_data));
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
case ALLOW_MEDIUM_REMOVAL:
@@ -3290,7 +3301,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
else
fsa_dev_ptr[cid].locked = 0;
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
/*
* These commands are all No-Ops
@@ -3314,7 +3326,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
- scsicmd->result = AAC_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
break;
case START_STOP:
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index d62ddd63f4fe..6e1b022a823d 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -514,7 +514,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
* The only invalid cases are if the caller requests to wait and
* does not request a response and if the caller does not want a
* response and the Fib is not allocated from pool. If a response
- * is not requesed the Fib will just be deallocaed by the DPC
+ * is not requested the Fib will just be deallocaed by the DPC
* routine when the response comes back from the adapter. No
* further processing will be done besides deleting the Fib. We
* will have a debug mode where the adapter can notify the host
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 417ba349e10e..ddc69738375f 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -65,7 +65,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
- * back to the system. If no response was requesed we just
+ * back to the system. If no response was requested we just
* deallocate the Fib here and continue.
*/
while(aac_consumer_get(dev, q, &entry))
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 620166694171..576cdf9cc120 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -319,7 +319,7 @@ static void aac_rx_start_adapter(struct aac_dev *dev)
union aac_init *init;
init = dev->init;
- init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
// We can only use a 32 bit address here
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 882f40353b96..efa96c1c6aa3 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -251,7 +251,7 @@ static void aac_sa_start_adapter(struct aac_dev *dev)
* Fill in the remaining pieces of the init.
*/
init = dev->init;
- init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
/* We can only use a 32 bit address here */
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 4ebb35a29caa..7a51ccfa8662 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -409,7 +409,8 @@ static void aac_src_start_adapter(struct aac_dev *dev)
init = dev->init;
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
- init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r8.host_elapsed_seconds =
+ cpu_to_le32(ktime_get_real_seconds());
src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
lower_32_bits(dev->init_pa),
upper_32_bits(dev->init_pa),
@@ -417,7 +418,8 @@ static void aac_src_start_adapter(struct aac_dev *dev)
(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
0, 0, 0, NULL, NULL, NULL, NULL, NULL);
} else {
- init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+ init->r7.host_elapsed_seconds =
+ cpu_to_le32(ktime_get_real_seconds());
// We can only use a 32 bit address here
src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 24e57e770432..713f69033f20 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2416,8 +2416,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
- printk(" host_busy %u, host_no %d,\n",
- atomic_read(&s->host_busy), s->host_no);
+ printk(" host_busy %d, host_no %d,\n",
+ scsi_host_busy(s), s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq);
@@ -3182,8 +3182,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no);
seq_printf(m,
- " host_busy %u, max_id %u, max_lun %llu, max_channel %u\n",
- atomic_read(&shost->host_busy), shost->max_id,
+ " host_busy %d, max_id %u, max_lun %llu, max_channel %u\n",
+ scsi_host_busy(shost), shost->max_id,
shost->max_lun, shost->max_channel);
seq_printf(m,
@@ -8466,7 +8466,7 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp)
}
/*
- * Execute a single 'Scsi_Cmnd'.
+ * Execute a single 'struct scsi_cmnd'.
*/
static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
{
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index bc0058df31c6..4d7b0e0adbf7 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -422,16 +422,16 @@ enum aha152x_state {
*
*/
struct aha152x_hostdata {
- Scsi_Cmnd *issue_SC;
+ struct scsi_cmnd *issue_SC;
/* pending commands to issue */
- Scsi_Cmnd *current_SC;
+ struct scsi_cmnd *current_SC;
/* current command on the bus */
- Scsi_Cmnd *disconnected_SC;
+ struct scsi_cmnd *disconnected_SC;
/* commands that disconnected */
- Scsi_Cmnd *done_SC;
+ struct scsi_cmnd *done_SC;
/* command that was completed */
spinlock_t lock;
@@ -510,7 +510,7 @@ struct aha152x_hostdata {
*
*/
struct aha152x_scdata {
- Scsi_Cmnd *next; /* next sc in queue */
+ struct scsi_cmnd *next; /* next sc in queue */
struct completion *done;/* semaphore to block on */
struct scsi_eh_save ses;
};
@@ -633,7 +633,7 @@ static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
static void done(struct Scsi_Host *shpnt, int error);
/* diagnostics */
-static void show_command(Scsi_Cmnd * ptr);
+static void show_command(struct scsi_cmnd * ptr);
static void show_queues(struct Scsi_Host *shpnt);
static void disp_enintr(struct Scsi_Host *shpnt);
@@ -642,9 +642,9 @@ static void disp_enintr(struct Scsi_Host *shpnt);
* queue services:
*
*/
-static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
+static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC)
{
- Scsi_Cmnd *end;
+ struct scsi_cmnd *end;
SCNEXT(new_SC) = NULL;
if (!*SC)
@@ -656,9 +656,9 @@ static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
}
}
-static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC)
+static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd ** SC)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
ptr = *SC;
if (ptr) {
@@ -668,9 +668,10 @@ static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC)
return ptr;
}
-static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun)
+static inline struct scsi_cmnd *remove_lun_SC(struct scsi_cmnd ** SC,
+ int target, int lun)
{
- Scsi_Cmnd *ptr, *prev;
+ struct scsi_cmnd *ptr, *prev;
for (ptr = *SC, prev = NULL;
ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
@@ -689,9 +690,10 @@ static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun)
return ptr;
}
-static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp)
+static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC,
+ struct scsi_cmnd *SCp)
{
- Scsi_Cmnd *ptr, *prev;
+ struct scsi_cmnd *ptr, *prev;
for (ptr = *SC, prev = NULL;
ptr && SCp!=ptr;
@@ -912,8 +914,9 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
/*
* Queue a command and setup interrupts for a free bus.
*/
-static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
- int phase, void (*done)(Scsi_Cmnd *))
+static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
+ struct completion *complete,
+ int phase, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
@@ -987,7 +990,8 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
* queue a command
*
*/
-static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+static int aha152x_queue_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
return aha152x_internal_queue(SCpnt, NULL, 0, done);
}
@@ -998,7 +1002,7 @@ static DEF_SCSI_QCMD(aha152x_queue)
/*
*
*/
-static void reset_done(Scsi_Cmnd *SCpnt)
+static void reset_done(struct scsi_cmnd *SCpnt)
{
if(SCSEM(SCpnt)) {
complete(SCSEM(SCpnt));
@@ -1011,10 +1015,10 @@ static void reset_done(Scsi_Cmnd *SCpnt)
* Abort a command
*
*/
-static int aha152x_abort(Scsi_Cmnd *SCpnt)
+static int aha152x_abort(struct scsi_cmnd *SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
unsigned long flags;
DO_LOCK(flags);
@@ -1052,7 +1056,7 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
* Reset a device
*
*/
-static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
+static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
DECLARE_COMPLETION(done);
@@ -1110,13 +1114,14 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
return ret;
}
-static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs)
+static void free_hard_reset_SCs(struct Scsi_Host *shpnt,
+ struct scsi_cmnd **SCs)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
ptr=*SCs;
while(ptr) {
- Scsi_Cmnd *next;
+ struct scsi_cmnd *next;
if(SCDATA(ptr)) {
next = SCNEXT(ptr);
@@ -1171,7 +1176,7 @@ static int aha152x_bus_reset_host(struct Scsi_Host *shpnt)
* Reset the bus
*
*/
-static int aha152x_bus_reset(Scsi_Cmnd *SCpnt)
+static int aha152x_bus_reset(struct scsi_cmnd *SCpnt)
{
return aha152x_bus_reset_host(SCpnt->device->host);
}
@@ -1436,7 +1441,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
if(!(DONE_SC->SCp.phase & not_issued)) {
struct aha152x_scdata *sc;
- Scsi_Cmnd *ptr = DONE_SC;
+ struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
sc = SCDATA(ptr);
@@ -1451,7 +1456,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
}
if(DONE_SC && DONE_SC->scsi_done) {
- Scsi_Cmnd *ptr = DONE_SC;
+ struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
/* turn led off, when no commands are in the driver */
@@ -2247,13 +2252,13 @@ static void parerr_run(struct Scsi_Host *shpnt)
*/
static void rsti_run(struct Scsi_Host *shpnt)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n");
ptr=DISCONNECTED_SC;
while(ptr) {
- Scsi_Cmnd *next = SCNEXT(ptr);
+ struct scsi_cmnd *next = SCNEXT(ptr);
if (!ptr->device->soft_reset) {
remove_SC(&DISCONNECTED_SC, ptr);
@@ -2438,7 +2443,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
/*
* Show the command data of a command
*/
-static void show_command(Scsi_Cmnd *ptr)
+static void show_command(struct scsi_cmnd *ptr)
{
scsi_print_command(ptr);
scmd_printk(KERN_DEBUG, ptr,
@@ -2462,7 +2467,7 @@ static void show_command(Scsi_Cmnd *ptr)
*/
static void show_queues(struct Scsi_Host *shpnt)
{
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
unsigned long flags;
DO_LOCK(flags);
@@ -2484,7 +2489,7 @@ static void show_queues(struct Scsi_Host *shpnt)
disp_enintr(shpnt);
}
-static void get_command(struct seq_file *m, Scsi_Cmnd * ptr)
+static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
{
int i;
@@ -2813,7 +2818,7 @@ static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
{
int i;
- Scsi_Cmnd *ptr;
+ struct scsi_cmnd *ptr;
unsigned long flags;
seq_puts(m, AHA152X_REVID "\n");
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index b48d5436f094..786bf7f32c64 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -207,11 +207,11 @@ static int aha1740_test_port(unsigned int base)
static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
{
struct Scsi_Host *host = (struct Scsi_Host *) dev_id;
- void (*my_done)(Scsi_Cmnd *);
+ void (*my_done)(struct scsi_cmnd *);
int errstatus, adapstat;
int number_serviced;
struct ecb *ecbptr;
- Scsi_Cmnd *SCtmp;
+ struct scsi_cmnd *SCtmp;
unsigned int base;
unsigned long flags;
int handled = 0;
@@ -311,7 +311,8 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+static int aha1740_queuecommand_lck(struct scsi_cmnd * SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
@@ -520,7 +521,7 @@ static int aha1740_biosparam(struct scsi_device *sdev,
return 0;
}
-static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
+static int aha1740_eh_abort_handler (struct scsi_cmnd *dummy)
{
/*
* From Alan Cox :
diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h
index dfdaa4d3ea4e..6eeed6da0b54 100644
--- a/drivers/scsi/aha1740.h
+++ b/drivers/scsi/aha1740.h
@@ -135,8 +135,8 @@ struct ecb { /* Enhanced Control Block 6.1 */
/* Hardware defined portion ends here, rest is driver defined */
u8 sense[MAX_SENSE]; /* Sense area */
u8 status[MAX_STATUS]; /* Status area */
- Scsi_Cmnd *SCpnt; /* Link to the SCSI Command Block */
- void (*done) (Scsi_Cmnd *); /* Completion Function */
+ struct scsi_cmnd *SCpnt; /* Link to the SCSI Command Block */
+ void (*done) (struct scsi_cmnd *); /* Completion Function */
};
#define AHA1740CMD_NOP 0x00 /* No OP */
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 80e5b283fd81..1391e5f35918 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void)
aic94xx_transport_template =
sas_domain_attach_transport(&aic94xx_transport_functions);
- if (!aic94xx_transport_template)
+ if (!aic94xx_transport_template) {
+ err = -ENOMEM;
goto out_destroy_caches;
+ }
err = pci_register_driver(&aic94xx_pci_driver);
if (err)
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 2e51ccc510e8..9c397a2794d6 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -49,7 +49,7 @@ struct device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.40.00.05-20180309"
+#define ARCMSR_DRIVER_VERSION "v1.40.00.09-20180709"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 732b5d9242f1..12316ef4c893 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1061,6 +1061,13 @@ static int arcmsr_resume(struct pci_dev *pdev)
pci_set_master(pdev);
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_E) {
+ writel(0, &acb->pmuE->host_int_status);
+ writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ acb->doneq_index = 0;
+ }
arcmsr_iop_init(acb);
arcmsr_init_get_devmap_timer(acb);
if (set_date_time)
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index b46997cf77e2..8996d2329e11 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1055,7 +1055,7 @@ static void tscam(struct Scsi_Host *host, bool wide_chip, u8 scam_on)
udelay(2); /* 2 deskew delay(45ns*2=90ns) */
val &= 0x007f; /* no bsy */
atp_writew_io(dev, 0, 0x1c, val);
- mdelay(128);
+ msleep(128);
val &= 0x00fb; /* after 1ms no msg */
atp_writew_io(dev, 0, 0x1c, val);
while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0)
@@ -1286,9 +1286,9 @@ static void atp870_init(struct Scsi_Host *shpnt)
k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10;
atp_writeb_base(atpdev, 0x3a, k);
atp_writeb_base(atpdev, 0x3a, k & 0xdf);
- mdelay(32);
+ msleep(32);
atp_writeb_base(atpdev, 0x3a, k);
- mdelay(32);
+ msleep(32);
atp_set_host_id(atpdev, 0, host_id);
tscam(shpnt, wide_chip, scam_on);
@@ -1370,9 +1370,9 @@ static void atp880_init(struct Scsi_Host *shpnt)
k = atp_readb_base(atpdev, 0x38) & 0x80;
atp_writeb_base(atpdev, 0x38, k);
atp_writeb_base(atpdev, 0x3b, 0x20);
- mdelay(32);
+ msleep(32);
atp_writeb_base(atpdev, 0x3b, 0);
- mdelay(32);
+ msleep(32);
atp_readb_io(atpdev, 0, 0x1b);
atp_readb_io(atpdev, 0, 0x17);
@@ -1454,10 +1454,10 @@ static void atp885_init(struct Scsi_Host *shpnt)
atp_writeb_base(atpdev, 0x28, k);
atp_writeb_pci(atpdev, 0, 1, 0x80);
atp_writeb_pci(atpdev, 1, 1, 0x80);
- mdelay(100);
+ msleep(100);
atp_writeb_pci(atpdev, 0, 1, 0);
atp_writeb_pci(atpdev, 1, 1, 0);
- mdelay(1000);
+ msleep(1000);
atp_readb_io(atpdev, 0, 0x1b);
atp_readb_io(atpdev, 0, 0x17);
atp_readb_io(atpdev, 1, 0x1b);
@@ -1473,7 +1473,7 @@ static void atp885_init(struct Scsi_Host *shpnt)
k = (k & 0x07) | 0x40;
atp_set_host_id(atpdev, 1, k);
- mdelay(600); /* this delay used to be called tscam_885() */
+ msleep(600); /* this delay used to be called tscam_885() */
dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n");
atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7);
atp_writeb_io(atpdev, 0, 0x16, 0x80);
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 2eb66df3e3d6..c10aac4dbc5e 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1545,7 +1545,7 @@ int beiscsi_set_host_data(struct beiscsi_hba *phba)
snprintf((char *)ioctl->param.req.param_data,
sizeof(ioctl->param.req.param_data),
"Linux iSCSI v%s", BUILD_STR);
- ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len, 4);
+ ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4);
if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
ret = be_mbox_notify(ctrl);
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index a398c54139aa..c8f0a2144b44 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,11 +1,14 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
- * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
+ * Host Bus Adapters. Refer to the README file included with this package
+ * for driver version and adapter compatibility.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
+ * Copyright (c) 2018 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as published
+ * by the Free Software Foundation.
*
* Contact Information:
* linux-drivers@broadcom.com
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 818d185d63f0..3660059784f7 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,11 +1,22 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
- * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
+ * Host Bus Adapters. Refer to the README file included with this package
+ * for driver version and adapter compatibility.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
+ * Copyright (c) 2018 Broadcom. All Rights Reserved.
+ * The term “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful. ALL EXPRESS
+ * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+ * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
+ * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package.
*
* Contact Information:
* linux-drivers@broadcom.com
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 66ca967f2850..8fdc07b6c686 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,11 +1,22 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
- * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+ * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
+ * Host Bus Adapters. Refer to the README file included with this package
+ * for driver version and adapter compatibility.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
+ * Copyright (c) 2018 Broadcom. All Rights Reserved.
+ * The term “Broadcom†refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful. ALL EXPRESS
+ * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+ * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
+ * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package.
*
* Contact Information:
* linux-drivers@broadcom.com
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c05d6e91e4bd..c4a33317d344 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -70,21 +70,18 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
host_status = DID_ERROR;
}
}
- cmnd->result = ScsiResult(host_status, scsi_status);
+ cmnd->result = host_status << 16 | scsi_status;
break;
case BFI_IOIM_STS_TIMEDOUT:
- host_status = DID_TIME_OUT;
- cmnd->result = ScsiResult(host_status, 0);
+ cmnd->result = DID_TIME_OUT << 16;
break;
case BFI_IOIM_STS_PATHTOV:
- host_status = DID_TRANSPORT_DISRUPTED;
- cmnd->result = ScsiResult(host_status, 0);
+ cmnd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
default:
- host_status = DID_ERROR;
- cmnd->result = ScsiResult(host_status, 0);
+ cmnd->result = DID_ERROR << 16;
}
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
@@ -117,7 +114,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
struct bfad_itnim_data_s *itnim_data;
struct bfad_itnim_s *itnim;
- cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);
+ cmnd->result = DID_OK << 16 | SCSI_STATUS_GOOD;
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
if (cmnd->device->host != NULL)
@@ -144,7 +141,7 @@ bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
struct bfad_s *bfad = drv;
- cmnd->result = ScsiResult(DID_ERROR, 0);
+ cmnd->result = DID_ERROR << 16;
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
if (cmnd->device->host != NULL)
@@ -1253,14 +1250,14 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
printk(KERN_WARNING
"bfad%d, queuecommand %p %x failed, BFA stopped\n",
bfad->inst_no, cmnd, cmnd->cmnd[0]);
- cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
+ cmnd->result = DID_NO_CONNECT << 16;
goto out_fail_cmd;
}
itnim = itnim_data->itnim;
if (!itnim) {
- cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
+ cmnd->result = DID_IMM_RETRY << 16;
goto out_fail_cmd;
}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index af66275570c3..e61ed8dad0b4 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -44,7 +44,6 @@ u32 bfad_im_supported_speeds(struct bfa_s *bfa);
#define MAX_FCP_LUN 16384
#define BFAD_TARGET_RESET_TMO 60
#define BFAD_LUN_RESET_TMO 60
-#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
/*
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 8f03a869ac98..e9e669a6c2bc 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2727,6 +2727,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!ep->qp.ctx_base)
+ return -ENOMEM;
goto arm_cq;
}
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index c535c52e72e5..1c5051b1c125 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -199,7 +199,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
- if (driver_byte(result) & DRIVER_SENSE) {
+ if (driver_byte(result) == DRIVER_SENSE) {
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index a10cf25ee7f9..23d07e9f87d0 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -761,27 +761,116 @@ out:
static int
csio_hw_get_flash_params(struct csio_hw *hw)
{
+ /* Table for non-Numonix supported flash parts. Numonix parts are left
+ * to the preexisting code. All flash parts have 64KB sectors.
+ */
+ static struct flash_desc {
+ u32 vendor_and_model_id;
+ u32 size_mb;
+ } supported_flash[] = {
+ { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ };
+
+ u32 part, manufacturer;
+ u32 density, size = 0;
+ u32 flashid = 0;
int ret;
- uint32_t info = 0;
ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
if (!ret)
- ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
+ ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid);
csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
- if (ret != 0)
+ if (ret)
return ret;
- if ((info & 0xff) != 0x20) /* not a Numonix flash */
- return -EINVAL;
- info >>= 16; /* log2 of size */
- if (info >= 0x14 && info < 0x18)
- hw->params.sf_nsec = 1 << (info - 16);
- else if (info == 0x18)
- hw->params.sf_nsec = 64;
- else
- return -EINVAL;
- hw->params.sf_size = 1 << info;
+ /* Check to see if it's one of our non-standard supported Flash parts.
+ */
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ hw->params.sf_size = supported_flash[part].size_mb;
+ hw->params.sf_nsec =
+ hw->params.sf_size / SF_SEC_SIZE;
+ goto found;
+ }
+
+ /* Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /* This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14 ... 0x19: /* 1MB - 32MB */
+ size = 1 << density;
+ break;
+ case 0x20: /* 64MB */
+ size = 1 << 26;
+ break;
+ case 0x21: /* 128MB */
+ size = 1 << 27;
+ break;
+ case 0x22: /* 256MB */
+ size = 1 << 28;
+ }
+ break;
+ }
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /* This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16: /* 32 MB */
+ size = 1 << 25;
+ break;
+ case 0x17: /* 64MB */
+ size = 1 << 26;
+ }
+ break;
+ }
+ case 0xc2: /* Macronix */
+ case 0xef: /* Winbond */ {
+ /* This Density -> Size decoding table is taken from
+ * Macronix and Winbond Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17: /* 8MB */
+ case 0x18: /* 16MB */
+ size = 1 << density;
+ }
+ }
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /* Store decoded Flash size */
+ hw->params.sf_size = size;
+ hw->params.sf_nsec = size / SF_SEC_SIZE;
+found:
+ if (hw->params.sf_size < FLASH_MIN_SIZE)
+ csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, hw->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index faa357b62c61..5022e82ccc4f 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -39,6 +39,7 @@
#include <asm/page.h>
#include <linux/cache.h>
+#include "t4_values.h"
#include "csio_hw.h"
#include "csio_wr.h"
#include "csio_mb.h"
@@ -1309,8 +1310,11 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
struct csio_sge *sge = &wrm->sge;
uint32_t clsz = L1_CACHE_BYTES;
uint32_t s_hps = PAGE_SHIFT - 10;
- uint32_t ingpad = 0;
uint32_t stat_len = clsz > 64 ? 128 : 64;
+ u32 fl_align = clsz < 32 ? 32 : clsz;
+ u32 pack_align;
+ u32 ingpad, ingpack;
+ int pcie_cap;
csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
@@ -1318,14 +1322,82 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
SGE_HOST_PAGE_SIZE_A);
- sge->csio_fl_align = clsz < 32 ? 32 : clsz;
- ingpad = ilog2(sge->csio_fl_align) - 5;
+ /* T5 introduced the separation of the Free List Padding and
+ * Packing Boundaries. Thus, we can select a smaller Padding
+ * Boundary to avoid uselessly chewing up PCIe Link and Memory
+ * Bandwidth, and use a Packing Boundary which is large enough
+ * to avoid false sharing between CPUs, etc.
+ *
+ * For the PCI Link, the smaller the Padding Boundary the
+ * better. For the Memory Controller, a smaller Padding
+ * Boundary is better until we cross under the Memory Line
+ * Size (the minimum unit of transfer to/from Memory). If we
+ * have a Padding Boundary which is smaller than the Memory
+ * Line Size, that'll involve a Read-Modify-Write cycle on the
+ * Memory Controller which is never good.
+ */
+
+ /* We want the Packing Boundary to be based on the Cache Line
+ * Size in order to help avoid False Sharing performance
+ * issues between CPUs, etc. We also want the Packing
+ * Boundary to incorporate the PCI-E Maximum Payload Size. We
+ * get best performance when the Packing Boundary is a
+ * multiple of the Maximum Payload Size.
+ */
+ pack_align = fl_align;
+ pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ u32 mps, mps_log;
+ u16 devctl;
+
+ /* The PCIe Device Control Maximum Payload Size field
+ * [bits 7:5] encodes sizes as powers of 2 starting at
+ * 128 bytes.
+ */
+ pci_read_config_word(hw->pdev,
+ pcie_cap + PCI_EXP_DEVCTL,
+ &devctl);
+ mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+ mps = 1 << mps_log;
+ if (mps > pack_align)
+ pack_align = mps;
+ }
+
+ /* T5/T6 have a special interpretation of the "0"
+ * value for the Packing Boundary. This corresponds to 16
+ * bytes instead of the expected 32 bytes.
+ */
+ if (pack_align <= 16) {
+ ingpack = INGPACKBOUNDARY_16B_X;
+ fl_align = 16;
+ } else if (pack_align == 32) {
+ ingpack = INGPACKBOUNDARY_64B_X;
+ fl_align = 64;
+ } else {
+ u32 pack_align_log = fls(pack_align) - 1;
+
+ ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
+ fl_align = pack_align;
+ }
+
+ /* Use the smallest Ingress Padding which isn't smaller than
+ * the Memory Controller Read/Write Size. We'll take that as
+ * being 8 bytes since we don't know of any system with a
+ * wider Memory Controller Bus Width.
+ */
+ if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
+ ingpad = INGPADBOUNDARY_32B_X;
+ else
+ ingpad = T6_INGPADBOUNDARY_8B_X;
csio_set_reg_field(hw, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
EGRSTATUSPAGESIZE_F,
INGPADBOUNDARY_V(ingpad) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
+ csio_set_reg_field(hw, SGE_CONTROL2_A,
+ INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
+ INGPACKBOUNDARY_V(ingpack));
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
@@ -1337,14 +1409,16 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ fl_align - 1) & ~(fl_align - 1),
SGE_FL_BUFFER_SIZE2_A);
csio_wr_reg32(hw,
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ fl_align - 1) & ~(fl_align - 1),
SGE_FL_BUFFER_SIZE3_A);
}
+ sge->csio_fl_align = fl_align;
+
csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
/* default value of rx_dma_offset of the NIC driver */
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 497a68389461..37b8dc60f5f6 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -88,10 +88,8 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
- struct qstr this;
- struct path path;
struct file *file;
- struct inode *inode = NULL;
+ struct inode *inode;
int rc;
if (fops->owner && !try_module_get(fops->owner)) {
@@ -116,29 +114,15 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
goto err3;
}
- this.name = name;
- this.len = strlen(name);
- this.hash = 0;
- path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this);
- if (!path.dentry) {
- dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__);
- rc = -ENOMEM;
- goto err4;
- }
-
- path.mnt = mntget(ocxlflash_vfs_mount);
- d_instantiate(path.dentry, inode);
-
- file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
+ flags & (O_ACCMODE | O_NONBLOCK), fops);
if (IS_ERR(file)) {
rc = PTR_ERR(file);
dev_err(dev, "%s: alloc_file failed rc=%d\n",
__func__, rc);
- path_put(&path);
- goto err3;
+ goto err4;
}
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
out:
return file;
@@ -1157,7 +1141,7 @@ static int afu_release(struct inode *inode, struct file *file)
*
* Return: 0 on success, -errno on failure
*/
-static int ocxlflash_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ocxlflash_context *ctx = vma->vm_file->private_data;
@@ -1180,8 +1164,7 @@ static int ocxlflash_mmap_fault(struct vm_fault *vmf)
mmio_area = ctx->psn_phys;
mmio_area += offset;
- vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
- return VM_FAULT_NOPAGE;
+ return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
}
static const struct vm_operations_struct ocxlflash_vmops = {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index e489d89cbb45..acac6152f50b 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -339,7 +339,6 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
struct scsi_sense_hdr sshdr;
u8 *cmd_buf = NULL;
u8 *scsi_cmd = NULL;
- u8 *sense_buf = NULL;
int rc = 0;
int result = 0;
int retry_cnt = 0;
@@ -348,8 +347,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
retry:
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
+ if (unlikely(!cmd_buf || !scsi_cmd)) {
rc = -ENOMEM;
goto out;
}
@@ -364,7 +362,7 @@ retry:
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
- CMD_BUFSIZE, sense_buf, &sshdr, to, CMD_RETRIES,
+ CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
0, 0, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
@@ -395,7 +393,6 @@ retry:
if (retry_cnt++ < 1) {
kfree(cmd_buf);
kfree(scsi_cmd);
- kfree(sense_buf);
goto retry;
}
}
@@ -426,7 +423,6 @@ retry:
out:
kfree(cmd_buf);
kfree(scsi_cmd);
- kfree(sense_buf);
dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
__func__, gli->max_lba, gli->blk_len, rc);
@@ -1108,7 +1104,7 @@ out:
*
* Return: 0 on success, VM_FAULT_SIGBUS on failure
*/
-static int cxlflash_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
@@ -1119,7 +1115,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
struct ctx_info *ctxi = NULL;
struct page *err_page = NULL;
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int rc = 0;
+ vm_fault_t rc = 0;
int ctxid;
ctxid = cfg->ops->process_element(ctx);
@@ -1159,7 +1155,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
out:
if (likely(ctxi))
put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
return rc;
err:
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 66e445a17d6c..2c904bf16b65 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -426,7 +426,6 @@ static int write_same16(struct scsi_device *sdev,
{
u8 *cmd_buf = NULL;
u8 *scsi_cmd = NULL;
- u8 *sense_buf = NULL;
int rc = 0;
int result = 0;
u64 offset = lba;
@@ -440,8 +439,7 @@ static int write_same16(struct scsi_device *sdev,
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
+ if (unlikely(!cmd_buf || !scsi_cmd)) {
rc = -ENOMEM;
goto out;
}
@@ -457,7 +455,7 @@ static int write_same16(struct scsi_device *sdev,
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
- CMD_BUFSIZE, sense_buf, NULL, to,
+ CMD_BUFSIZE, NULL, NULL, to,
CMD_RETRIES, 0, 0, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
@@ -482,7 +480,6 @@ static int write_same16(struct scsi_device *sdev,
out:
kfree(cmd_buf);
kfree(scsi_cmd);
- kfree(sense_buf);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 60ef8df42b95..1ed2cd82129d 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3473,9 +3473,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/*if( srb->cmd->cmnd[0] == INQUIRY && */
/* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
- if ((cmd->result == (DID_OK << 16)
- || status_byte(cmd->result) &
- CHECK_CONDITION)) {
+ if ((cmd->result == (DID_OK << 16) ||
+ status_byte(cmd->result) == CHECK_CONDITION)) {
if (!dcb->init_tcq_flag) {
add_dev(acb, dcb, ptr);
dcb->init_tcq_flag = 1;
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index ffec695e0bfb..54da3166da8d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2175,15 +2175,13 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
- rcu_read_lock();
+ mutex_lock(&lport->disc.disc_mutex);
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
if (kref_get_unless_zero(&rdata->kref)) {
fc_rport_logoff(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
- rcu_read_unlock();
- mutex_lock(&lport->disc.disc_mutex);
lport->disc.disc_callback = NULL;
mutex_unlock(&lport->disc.disc_mutex);
}
@@ -2712,7 +2710,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
unsigned long deadline;
next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
- rcu_read_lock();
+ mutex_lock(&lport->disc.disc_mutex);
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
@@ -2733,7 +2731,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
next_time = deadline;
kref_put(&rdata->kref, fc_rport_destroy);
}
- rcu_read_unlock();
+ mutex_unlock(&lport->disc.disc_mutex);
return next_time;
}
@@ -3080,8 +3078,6 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
mutex_lock(&disc->disc_mutex);
callback = disc->pending ? disc->disc_callback : NULL;
disc->pending = 0;
- mutex_unlock(&disc->disc_mutex);
- rcu_read_lock();
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
@@ -3090,7 +3086,7 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
fc_rport_login(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
- rcu_read_unlock();
+ mutex_unlock(&disc->disc_mutex);
if (callback)
callback(lport, DISC_EV_SUCCESS);
}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 85604795d8ee..16709735b546 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -146,14 +146,14 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id);
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
int gdth_from_wait, int* pIndex);
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- Scsi_Cmnd *scp);
+ struct scsi_cmnd *scp);
static int gdth_async_event(gdth_ha_str *ha);
static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority);
+static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
static void gdth_next(gdth_ha_str *ha);
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b);
-static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
+static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
u16 idx, gdth_evt_data *evt);
static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
@@ -161,10 +161,11 @@ static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
gdth_evt_str *estr);
static void gdth_clear_events(void);
-static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
+static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
char *buffer, u16 count);
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive);
+static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
+ u16 hdrive);
static void gdth_enable_int(gdth_ha_str *ha);
static int gdth_test_busy(gdth_ha_str *ha);
@@ -446,7 +447,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
int timeout, u32 *info)
{
gdth_ha_str *ha = shost_priv(sdev->host);
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
struct gdth_cmndinfo cmndinfo;
DECLARE_COMPLETION_ONSTACK(wait);
int rval;
@@ -1982,11 +1983,11 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
/* command queueing/sending functions */
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
+static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
{
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- register Scsi_Cmnd *pscp;
- register Scsi_Cmnd *nscp;
+ register struct scsi_cmnd *pscp;
+ register struct scsi_cmnd *nscp;
unsigned long flags;
TRACE(("gdth_putq() priority %d\n",priority));
@@ -2000,11 +2001,11 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
scp->SCp.ptr = NULL;
} else { /* queue not empty */
pscp = ha->req_first;
- nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
/* priority: 0-highest,..,0xff-lowest */
while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
pscp = nscp;
- nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
}
pscp->SCp.ptr = (char *)scp;
scp->SCp.ptr = (char *)nscp;
@@ -2013,7 +2014,7 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
#ifdef GDTH_STATISTICS
flags = 0;
- for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
++flags;
if (max_rq < flags) {
max_rq = flags;
@@ -2024,8 +2025,8 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
static void gdth_next(gdth_ha_str *ha)
{
- register Scsi_Cmnd *pscp;
- register Scsi_Cmnd *nscp;
+ register struct scsi_cmnd *pscp;
+ register struct scsi_cmnd *nscp;
u8 b, t, l, firsttime;
u8 this_cmd, next_cmd;
unsigned long flags = 0;
@@ -2040,10 +2041,10 @@ static void gdth_next(gdth_ha_str *ha)
next_cmd = gdth_polling ? FALSE:TRUE;
cmd_index = 0;
- for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) {
+ for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
- if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr)
- pscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
+ pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
if (!nscp_cmndinfo->internal_command) {
b = nscp->device->channel;
t = nscp->device->id;
@@ -2250,7 +2251,7 @@ static void gdth_next(gdth_ha_str *ha)
if (!this_cmd)
break;
if (nscp == ha->req_first)
- ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr;
+ ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
else
pscp->SCp.ptr = nscp->SCp.ptr;
if (!next_cmd)
@@ -2275,7 +2276,7 @@ static void gdth_next(gdth_ha_str *ha)
* gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
* buffers, kmap_atomic() as needed.
*/
-static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
+static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
char *buffer, u16 count)
{
u16 cpcount,i, max_sg = scsi_sg_count(scp);
@@ -2317,7 +2318,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
}
}
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
+static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
{
u8 t;
gdth_inq_data inq;
@@ -2419,7 +2420,8 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
return 0;
}
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
+ u16 hdrive)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
@@ -2594,7 +2596,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
return cmd_index;
}
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
{
register gdth_cmd_str *cmdp;
u16 i;
@@ -2767,7 +2769,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
return cmd_index;
}
-static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
+static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
@@ -2958,7 +2960,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
gdt6_dpram_str __iomem *dp6_ptr;
gdt2_dpram_str __iomem *dp2_ptr;
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
int rval, i;
u8 IStatus;
u16 Service;
@@ -3217,7 +3219,7 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id)
}
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- Scsi_Cmnd *scp)
+ struct scsi_cmnd *scp)
{
gdth_msg_str *msg;
gdth_cmd_str *cmdp;
@@ -3708,7 +3710,7 @@ static u8 gdth_timer_running;
static void gdth_timeout(struct timer_list *unused)
{
u32 i;
- Scsi_Cmnd *nscp;
+ struct scsi_cmnd *nscp;
gdth_ha_str *ha;
unsigned long flags;
@@ -3724,7 +3726,8 @@ static void gdth_timeout(struct timer_list *unused)
if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
++act_stats;
- for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ for (act_rq=0,
+ nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
++act_rq;
TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
@@ -3909,12 +3912,12 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
}
-static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
+static int gdth_eh_bus_reset(struct scsi_cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
int i;
unsigned long flags;
- Scsi_Cmnd *cmnd;
+ struct scsi_cmnd *cmnd;
u8 b;
TRACE2(("gdth_eh_bus_reset()\n"));
@@ -4465,7 +4468,7 @@ free_fail:
static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
gdth_ha_str *ha;
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
unsigned long flags;
char cmnd[MAX_COMMAND_SIZE];
void __user *argp = (void __user *)arg;
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index e6e5ccb1e0f3..ee6ffcf388e8 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -162,9 +162,9 @@
#define BIGSECS 63 /* mapping 255*63 */
/* special command ptr. */
-#define UNUSED_CMND ((Scsi_Cmnd *)-1)
-#define INTERNAL_CMND ((Scsi_Cmnd *)-2)
-#define SCREEN_CMND ((Scsi_Cmnd *)-3)
+#define UNUSED_CMND ((struct scsi_cmnd *)-1)
+#define INTERNAL_CMND ((struct scsi_cmnd *)-2)
+#define SCREEN_CMND ((struct scsi_cmnd *)-3)
#define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
/* controller services */
@@ -867,7 +867,7 @@ typedef struct {
u16 service; /* service/firmware ver./.. */
u32 info;
u32 info2; /* additional info */
- Scsi_Cmnd *req_first; /* top of request queue */
+ struct scsi_cmnd *req_first; /* top of request queue */
struct {
u8 present; /* Flag: host drive present? */
u8 is_logdrv; /* Flag: log. drive (master)? */
@@ -896,7 +896,7 @@ typedef struct {
u32 id_list[MAXID]; /* IDs of the phys. devices */
} raw[MAXBUS]; /* SCSI channels */
struct {
- Scsi_Cmnd *cmnd; /* pending request */
+ struct scsi_cmnd *cmnd; /* pending request */
u16 service; /* service */
} cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
struct gdth_cmndinfo { /* per-command private info */
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 20add49cdd32..3a9751a80225 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -626,7 +626,7 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
{
unsigned long flags;
int i;
- Scsi_Cmnd *scp;
+ struct scsi_cmnd *scp;
struct gdth_cmndinfo *cmndinfo;
u8 b, t;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 7052a5d45f7f..6c7d2e201abe 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/lcm.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -199,17 +200,17 @@ struct hisi_sas_slot {
int dlvry_queue_slot;
int cmplt_queue;
int cmplt_queue_slot;
- int idx;
int abort;
int ready;
- void *buf;
- dma_addr_t buf_dma;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
- struct work_struct abort_slot;
struct timer_list internal_abort_timer;
bool is_internal;
struct hisi_sas_tmf_task *tmf;
+ /* Do not reorder/change members after here */
+ void *buf;
+ dma_addr_t buf_dma;
+ int idx;
};
struct hisi_sas_hw {
@@ -277,6 +278,7 @@ struct hisi_hba {
int n_phy;
spinlock_t lock;
+ struct semaphore sem;
struct timer_list timer;
struct workqueue_struct *wq;
@@ -298,7 +300,6 @@ struct hisi_hba {
int queue_count;
- struct dma_pool *buffer_pool;
struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES];
struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES];
dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES];
@@ -319,6 +320,7 @@ struct hisi_hba {
const struct hisi_sas_hw *hw; /* Low level hw interface */
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
struct work_struct rst_work;
+ u32 phy_state;
};
/* Generic HW DMA host memory structures */
@@ -479,4 +481,6 @@ extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
enum hisi_sas_phy_event event);
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
+extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
+extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 6f562974f8f6..a4e2e6aa9a6b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -242,20 +242,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->data_dir);
}
- if (slot->buf)
- dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
spin_lock_irqsave(&dq->lock, flags);
list_del_init(&slot->entry);
spin_unlock_irqrestore(&dq->lock, flags);
- slot->buf = NULL;
- slot->task = NULL;
- slot->port = NULL;
+
+ memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
+
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot->idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
-
- /* slot memory is fully zeroed when it is reused */
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -285,40 +281,6 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
device_id, abort_flag, tag_to_abort);
}
-/*
- * This function will issue an abort TMF regardless of whether the
- * task is in the sdev or not. Then it will do the task complete
- * cleanup and callbacks.
- */
-static void hisi_sas_slot_abort(struct work_struct *work)
-{
- struct hisi_sas_slot *abort_slot =
- container_of(work, struct hisi_sas_slot, abort_slot);
- struct sas_task *task = abort_slot->task;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
- struct scsi_cmnd *cmnd = task->uldd_task;
- struct hisi_sas_tmf_task tmf_task;
- struct scsi_lun lun;
- struct device *dev = hisi_hba->dev;
- int tag = abort_slot->idx;
-
- if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
- dev_err(dev, "cannot abort slot for non-ssp task\n");
- goto out;
- }
-
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
-
- hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
-out:
- /* Do cleanup for this task */
- hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
- if (task->task_done)
- task->task_done(task);
-}
-
static int hisi_sas_task_prep(struct sas_task *task,
struct hisi_sas_dq **dq_pointer,
bool is_tmf, struct hisi_sas_tmf_task *tmf,
@@ -334,8 +296,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
- unsigned long flags, flags_dq;
struct hisi_sas_dq *dq;
+ unsigned long flags;
int wr_q_index;
if (!sas_port) {
@@ -430,30 +392,22 @@ static int hisi_sas_task_prep(struct sas_task *task,
goto err_out_dma_unmap;
slot = &hisi_hba->slot_info[slot_idx];
- memset(slot, 0, sizeof(struct hisi_sas_slot));
-
- slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
- GFP_ATOMIC, &slot->buf_dma);
- if (!slot->buf) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
- spin_lock_irqsave(&dq->lock, flags_dq);
+ spin_lock_irqsave(&dq->lock, flags);
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (wr_q_index < 0) {
- spin_unlock_irqrestore(&dq->lock, flags_dq);
+ spin_unlock_irqrestore(&dq->lock, flags);
rc = -EAGAIN;
- goto err_out_buf;
+ goto err_out_tag;
}
list_add_tail(&slot->delivery, &dq->list);
- spin_unlock_irqrestore(&dq->lock, flags_dq);
+ list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&dq->lock, flags);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
- slot->idx = slot_idx;
slot->n_elem = n_elem;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
@@ -464,7 +418,6 @@ static int hisi_sas_task_prep(struct sas_task *task,
slot->tmf = tmf;
slot->is_internal = is_tmf;
task->lldd_task = slot;
- INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
@@ -488,21 +441,15 @@ static int hisi_sas_task_prep(struct sas_task *task,
break;
}
- spin_lock_irqsave(&dq->lock, flags);
- list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock_irqrestore(&dq->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
++(*pass);
- slot->ready = 1;
+ WRITE_ONCE(slot->ready, 1);
return 0;
-err_out_buf:
- dma_pool_free(hisi_hba->buffer_pool, slot->buf,
- slot->buf_dma);
err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
@@ -536,8 +483,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
struct device *dev = hisi_hba->dev;
struct hisi_sas_dq *dq = NULL;
- if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
- return -EINVAL;
+ if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
+ if (in_softirq())
+ return -EINVAL;
+
+ down(&hisi_hba->sem);
+ up(&hisi_hba->sem);
+ }
/* protect task_prep and start_delivery sequence */
rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
@@ -819,6 +771,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
for (i = 0; i < HISI_PHYES_NUM; i++)
INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
+
+ spin_lock_init(&phy->lock);
}
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -862,7 +816,6 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
hisi_sas_slot_task_free(hisi_hba, task, slot);
}
-/* hisi_hba.lock should be locked */
static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
struct domain_device *device)
{
@@ -914,7 +867,9 @@ static void hisi_sas_dev_gone(struct domain_device *device)
hisi_sas_dereg_device(hisi_hba, device);
+ down(&hisi_hba->sem);
hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ up(&hisi_hba->sem);
device->lldd_dev = NULL;
}
@@ -1351,21 +1306,12 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
}
}
-static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
{
- struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
- u32 old_state, state;
- int rc;
-
- if (!hisi_hba->hw->soft_reset)
- return -1;
- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- return -1;
-
- dev_info(dev, "controller resetting...\n");
- old_state = hisi_hba->hw->get_phys_state(hisi_hba);
+ down(&hisi_hba->sem);
+ hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
@@ -1374,34 +1320,61 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
del_timer_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- rc = hisi_hba->hw->soft_reset(hisi_hba);
- if (rc) {
- dev_warn(dev, "controller reset failed (%d)\n", rc);
- clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- scsi_unblock_requests(shost);
- goto out;
- }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
- clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
+{
+ struct Scsi_Host *shost = hisi_hba->shost;
+ u32 state;
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
msleep(1000);
hisi_sas_refresh_port_id(hisi_hba);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
if (hisi_hba->reject_stp_links_msk)
hisi_sas_terminate_stp_reject(hisi_hba);
hisi_sas_reset_init_all_devices(hisi_hba);
scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
state = hisi_hba->hw->get_phys_state(hisi_hba);
- hisi_sas_rescan_topology(hisi_hba, old_state, state);
- dev_info(dev, "controller reset complete\n");
+ hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
-out:
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ int rc;
- return rc;
+ if (!hisi_hba->hw->soft_reset)
+ return -1;
+
+ if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ return -1;
+
+ dev_info(dev, "controller resetting...\n");
+ hisi_sas_controller_reset_prepare(hisi_hba);
+
+ rc = hisi_hba->hw->soft_reset(hisi_hba);
+ if (rc) {
+ dev_warn(dev, "controller reset failed (%d)\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
+ scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ return rc;
+ }
+
+ hisi_sas_controller_reset_done(hisi_hba);
+ dev_info(dev, "controller reset complete\n");
+
+ return 0;
}
static int hisi_sas_abort_task(struct sas_task *task)
@@ -1644,14 +1617,32 @@ out:
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
+ int rc, i;
queue_work(hisi_hba->wq, &r.work);
wait_for_completion(r.completion);
- if (r.done)
- return TMF_RESP_FUNC_COMPLETE;
+ if (!r.done)
+ return TMF_RESP_FUNC_FAILED;
+
+ for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
+ struct domain_device *device = sas_dev->sas_device;
- return TMF_RESP_FUNC_FAILED;
+ if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
+ DEV_IS_EXPANDER(device->dev_type))
+ continue;
+
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
+ sas_dev->device_id, rc);
+ }
+
+ hisi_sas_release_tasks(hisi_hba);
+
+ return TMF_RESP_FUNC_COMPLETE;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1723,21 +1714,13 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
spin_unlock_irqrestore(&hisi_hba->lock, flags);
slot = &hisi_hba->slot_info[slot_idx];
- memset(slot, 0, sizeof(struct hisi_sas_slot));
-
- slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
- GFP_ATOMIC, &slot->buf_dma);
- if (!slot->buf) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
spin_lock_irqsave(&dq->lock, flags_dq);
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (wr_q_index < 0) {
spin_unlock_irqrestore(&dq->lock, flags_dq);
rc = -EAGAIN;
- goto err_out_buf;
+ goto err_out_tag;
}
list_add_tail(&slot->delivery, &dq->list);
spin_unlock_irqrestore(&dq->lock, flags_dq);
@@ -1745,7 +1728,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
- slot->idx = slot_idx;
slot->n_elem = n_elem;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
@@ -1767,7 +1749,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- slot->ready = 1;
+ WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
spin_lock_irqsave(&dq->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
@@ -1776,9 +1758,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
return 0;
-err_out_buf:
- dma_pool_free(hisi_hba->buffer_pool, slot->buf,
- slot->buf_dma);
err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
@@ -1919,7 +1898,8 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
} else {
struct hisi_sas_port *port = phy->port;
- if (phy->in_reset) {
+ if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
+ phy->in_reset) {
dev_info(dev, "ignore flutter phy%d down\n", phy_no);
return;
}
@@ -2014,8 +1994,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
{
struct device *dev = hisi_hba->dev;
- int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
+ int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
+ int max_command_entries_ru, sz_slot_buf_ru;
+ int blk_cnt, slots_per_blk;
+ sema_init(&hisi_hba->sem, 1);
spin_lock_init(&hisi_hba->lock);
for (i = 0; i < hisi_hba->n_phy; i++) {
hisi_sas_phy_init(hisi_hba, i);
@@ -2045,29 +2028,27 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
/* Delivery queue */
s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
- hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
- &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
+ hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
+ &hisi_hba->cmd_hdr_dma[i],
+ GFP_KERNEL);
if (!hisi_hba->cmd_hdr[i])
goto err_out;
/* Completion queue */
s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
- hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
- &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
+ hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
+ &hisi_hba->complete_hdr_dma[i],
+ GFP_KERNEL);
if (!hisi_hba->complete_hdr[i])
goto err_out;
}
- s = sizeof(struct hisi_sas_slot_buf_table);
- hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
- if (!hisi_hba->buffer_pool)
- goto err_out;
-
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
- GFP_KERNEL);
+ hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
+ GFP_KERNEL);
if (!hisi_hba->itct)
goto err_out;
+ memset(hisi_hba->itct, 0, s);
hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
sizeof(struct hisi_sas_slot),
@@ -2075,15 +2056,45 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
if (!hisi_hba->slot_info)
goto err_out;
+ /* roundup to avoid overly large block size */
+ max_command_entries_ru = roundup(max_command_entries, 64);
+ sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
+ s = lcm(max_command_entries_ru, sz_slot_buf_ru);
+ blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
+ slots_per_blk = s / sz_slot_buf_ru;
+ for (i = 0; i < blk_cnt; i++) {
+ struct hisi_sas_slot_buf_table *buf;
+ dma_addr_t buf_dma;
+ int slot_index = i * slots_per_blk;
+
+ buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
+ if (!buf)
+ goto err_out;
+ memset(buf, 0, s);
+
+ for (j = 0; j < slots_per_blk; j++, slot_index++) {
+ struct hisi_sas_slot *slot;
+
+ slot = &hisi_hba->slot_info[slot_index];
+ slot->buf = buf;
+ slot->buf_dma = buf_dma;
+ slot->idx = slot_index;
+
+ buf++;
+ buf_dma += sizeof(*buf);
+ }
+ }
+
s = max_command_entries * sizeof(struct hisi_sas_iost);
- hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
- GFP_KERNEL);
+ hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
+ GFP_KERNEL);
if (!hisi_hba->iost)
goto err_out;
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
- hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
- &hisi_hba->breakpoint_dma, GFP_KERNEL);
+ hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
+ &hisi_hba->breakpoint_dma,
+ GFP_KERNEL);
if (!hisi_hba->breakpoint)
goto err_out;
@@ -2094,14 +2105,16 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
goto err_out;
s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
- hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
- &hisi_hba->initial_fis_dma, GFP_KERNEL);
+ hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
+ &hisi_hba->initial_fis_dma,
+ GFP_KERNEL);
if (!hisi_hba->initial_fis)
goto err_out;
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
- hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
- &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
+ hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
+ &hisi_hba->sata_breakpoint_dma,
+ GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
goto err_out;
hisi_sas_init_mem(hisi_hba);
@@ -2122,54 +2135,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_alloc);
void hisi_sas_free(struct hisi_hba *hisi_hba)
{
- struct device *dev = hisi_hba->dev;
- int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
-
- for (i = 0; i < hisi_hba->queue_count; i++) {
- s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
- if (hisi_hba->cmd_hdr[i])
- dma_free_coherent(dev, s,
- hisi_hba->cmd_hdr[i],
- hisi_hba->cmd_hdr_dma[i]);
-
- s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
- if (hisi_hba->complete_hdr[i])
- dma_free_coherent(dev, s,
- hisi_hba->complete_hdr[i],
- hisi_hba->complete_hdr_dma[i]);
- }
-
- dma_pool_destroy(hisi_hba->buffer_pool);
-
- s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- if (hisi_hba->itct)
- dma_free_coherent(dev, s,
- hisi_hba->itct, hisi_hba->itct_dma);
-
- s = max_command_entries * sizeof(struct hisi_sas_iost);
- if (hisi_hba->iost)
- dma_free_coherent(dev, s,
- hisi_hba->iost, hisi_hba->iost_dma);
-
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
- if (hisi_hba->breakpoint)
- dma_free_coherent(dev, s,
- hisi_hba->breakpoint,
- hisi_hba->breakpoint_dma);
-
-
- s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
- if (hisi_hba->initial_fis)
- dma_free_coherent(dev, s,
- hisi_hba->initial_fis,
- hisi_hba->initial_fis_dma);
-
- s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
- if (hisi_hba->sata_breakpoint)
- dma_free_coherent(dev, s,
- hisi_hba->sata_breakpoint,
- hisi_hba->sata_breakpoint_dma);
-
if (hisi_hba->wq)
destroy_workqueue(hisi_hba->wq);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 89ab18c1959c..8f60f0e04599 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -903,23 +903,28 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- struct hisi_sas_slot *s, *s1;
+ struct hisi_sas_slot *s, *s1, *s2 = NULL;
struct list_head *dq_list;
int dlvry_queue = dq->id;
- int wp, count = 0;
+ int wp;
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
- count++;
- wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ s2 = s;
list_del(&s->delivery);
}
- if (!count)
+ if (!s2)
return;
+ /*
+ * Ensure that memories for slots built on other CPUs is observed.
+ */
+ smp_rmb();
+ wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
@@ -1296,11 +1301,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
slot_err_v1_hw(hisi_hba, task, slot);
- if (unlikely(slot->abort)) {
- queue_work(hisi_hba->wq, &slot->abort_slot);
- /* immediately return and do not complete */
+ if (unlikely(slot->abort))
return ts->stat;
- }
goto out;
}
@@ -1469,7 +1471,8 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
goto end;
}
- sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 213c530e63f2..9c5c5a601332 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1665,23 +1665,28 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- struct hisi_sas_slot *s, *s1;
+ struct hisi_sas_slot *s, *s1, *s2 = NULL;
struct list_head *dq_list;
int dlvry_queue = dq->id;
- int wp, count = 0;
+ int wp;
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
- count++;
- wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ s2 = s;
list_del(&s->delivery);
}
- if (!count)
+ if (!s2)
return;
+ /*
+ * Ensure that memories for slots built on other CPUs is observed.
+ */
+ smp_rmb();
+ wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
@@ -2840,7 +2845,8 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if (bcast_status & RX_BCAST_CHG_MSK)
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
@@ -3234,8 +3240,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
if (fis->status & ATA_ERR) {
dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
fis->status);
- disable_phy_v2_hw(hisi_hba, phy_no);
- enable_phy_v2_hw(hisi_hba, phy_no);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
res = IRQ_NONE;
goto end;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 9f1e2d03f914..08b503e274b8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -51,7 +51,6 @@
#define CFG_ABT_SET_IPTT_DONE 0xd8
#define CFG_ABT_SET_IPTT_DONE_OFF 0
#define HGC_IOMB_PROC1_STATUS 0x104
-#define CFG_1US_TIMER_TRSH 0xcc
#define CHNL_INT_STATUS 0x148
#define HGC_AXI_FIFO_ERR_INFO 0x154
#define AXI_ERR_INFO_OFF 0
@@ -121,6 +120,8 @@
#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
#define PHY_CFG_DC_OPT_OFF 2
#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
+#define PHY_CFG_PHY_RST_OFF 3
+#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
#define PHY_CTRL (PORT_BASE + 0x14)
#define PHY_CTRL_RESET_OFF 0
@@ -131,6 +132,9 @@
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
#define SL_CTA_OFF 17
#define SL_CTA_MSK (0x1 << SL_CTA_OFF)
+#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
+#define RX_BCAST_CHG_OFF 1
+#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -206,6 +210,8 @@
#define AXI_MASTER_CFG_BASE (0x5000)
#define AM_CTRL_GLOBAL (0x0)
+#define AM_CTRL_SHUTDOWN_REQ_OFF 0
+#define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF)
#define AM_CURR_TRANS_RETURN (0x150)
#define AM_CFG_MAX_TRANS (0x5010)
@@ -425,7 +431,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
(u32)((1ULL << hisi_hba->queue_count) - 1));
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
- hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
@@ -486,6 +491,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
+ hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
@@ -758,15 +764,25 @@ static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
cfg |= PHY_CFG_ENA_MSK;
+ cfg &= ~PHY_CFG_PHY_RST_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
}
static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+ u32 state;
cfg &= ~PHY_CFG_ENA_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+
+ mdelay(50);
+
+ state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ if (state & BIT(phy_no)) {
+ cfg |= PHY_CFG_PHY_RST_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+ }
}
static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -866,23 +882,28 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
- struct hisi_sas_slot *s, *s1;
+ struct hisi_sas_slot *s, *s1, *s2 = NULL;
struct list_head *dq_list;
int dlvry_queue = dq->id;
- int wp, count = 0;
+ int wp;
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
- count++;
- wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+ s2 = s;
list_del(&s->delivery);
}
- if (!count)
+ if (!s2)
return;
+ /*
+ * Ensure that memories for slots built on other CPUs is observed.
+ */
+ smp_rmb();
+ wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
+
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
@@ -1170,6 +1191,16 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
+
+ /* check ERR bit of Status Register */
+ if (fis->status & ATA_ERR) {
+ dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n",
+ phy_no, fis->status);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ res = IRQ_NONE;
+ goto end;
+ }
+
sas_phy->oob_mode = SATA_OOB_MODE;
attached_sas_addr[0] = 0x50;
attached_sas_addr[7] = phy_no;
@@ -1256,9 +1287,13 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -1327,11 +1362,77 @@ static const struct hisi_sas_hw_error port_axi_error[] = {
},
};
-static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
+static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
{
- struct hisi_hba *hisi_hba = p;
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1);
+ u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK);
struct device *dev = hisi_hba->dev;
+ int i;
+
+ irq_value &= ~irq_msk;
+ if (!irq_value)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
+ const struct hisi_sas_hw_error *error = &port_axi_error[i];
+
+ if (!(irq_value & error->irq_msk))
+ continue;
+
+ dev_err(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value);
+}
+
+static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct pci_dev *pci_dev = hisi_hba->pci_dev;
+ struct device *dev = hisi_hba->dev;
+
+ irq_value &= ~irq_msk;
+ if (!irq_value)
+ return;
+
+ if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n", phy_no);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ }
+
+ if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
+ u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
+ STP_LINK_TIMEOUT_STATE);
+
+ dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
+ phy_no, reg_value);
+ if (reg_value & BIT(4))
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ }
+
+ if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
+ (pci_dev->revision == 0x20)) {
+ u32 reg_value;
+ int rc;
+
+ rc = hisi_sas_read32_poll_timeout_atomic(
+ HILINK_ERR_DFX, reg_value,
+ !((reg_value >> 8) & BIT(phy_no)),
+ 1000, 10000);
+ if (rc)
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ }
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value);
+}
+
+static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
u32 irq_msk;
int phy_no = 0;
@@ -1341,84 +1442,12 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
while (irq_msk) {
u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT0);
- u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1);
- u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2);
- u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1_MSK);
- u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2_MSK);
-
- irq_value1 &= ~irq_msk1;
- irq_value2 &= ~irq_msk2;
-
- if ((irq_msk & (4 << (phy_no * 4))) &&
- irq_value1) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
- const struct hisi_sas_hw_error *error =
- &port_axi_error[i];
-
- if (!(irq_value1 & error->irq_msk))
- continue;
-
- dev_err(dev, "%s error (phy%d 0x%x) found!\n",
- error->msg, phy_no, irq_value1);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT1, irq_value1);
- }
- if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
- struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ if (irq_msk & (4 << (phy_no * 4)))
+ handle_chl_int1_v3_hw(hisi_hba, phy_no);
- if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
- dev_warn(dev, "phy%d identify timeout\n",
- phy_no);
- hisi_sas_notify_phy_event(phy,
- HISI_PHYE_LINK_RESET);
-
- }
-
- if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
- u32 reg_value = hisi_sas_phy_read32(hisi_hba,
- phy_no, STP_LINK_TIMEOUT_STATE);
-
- dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
- phy_no, reg_value);
- if (reg_value & BIT(4))
- hisi_sas_notify_phy_event(phy,
- HISI_PHYE_LINK_RESET);
- }
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2, irq_value2);
-
- if ((irq_value2 & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
- (pci_dev->revision == 0x20)) {
- u32 reg_value;
- int rc;
-
- rc = hisi_sas_read32_poll_timeout_atomic(
- HILINK_ERR_DFX, reg_value,
- !((reg_value >> 8) & BIT(phy_no)),
- 1000, 10000);
- if (rc) {
- disable_phy_v3_hw(hisi_hba, phy_no);
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2,
- BIT(CHL_INT2_RX_INVLD_DW_OFF));
- hisi_sas_phy_read32(hisi_hba, phy_no,
- ERR_CNT_INVLD_DW);
- mdelay(1);
- enable_phy_v3_hw(hisi_hba, phy_no);
- }
- }
- }
+ if (irq_msk & (8 << (phy_no * 4)))
+ handle_chl_int2_v3_hw(hisi_hba, phy_no);
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
hisi_sas_phy_write32(hisi_hba, phy_no,
@@ -1964,11 +1993,11 @@ static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
}
-static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
+static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ u32 status, reg_val;
int rc;
- u32 status;
interrupt_disable_v3_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
@@ -1978,14 +2007,32 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
mdelay(10);
- hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
+ reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
/* wait until bus idle */
rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
AM_CURR_TRANS_RETURN, status,
status == 0x3, 10, 100);
if (rc) {
- dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ dev_err(dev, "axi bus is not idle, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ rc = disable_host_v3_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "soft reset: disable host failed rc=%d\n", rc);
return rc;
}
@@ -2433,6 +2480,41 @@ static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ dev_info(dev, "FLR prepare\n");
+ set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ hisi_sas_controller_reset_prepare(hisi_hba);
+
+ rc = disable_host_v3_hw(hisi_hba);
+ if (rc)
+ dev_err(dev, "FLR: disable host failed rc=%d\n", rc);
+}
+
+static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+ hisi_sas_init_mem(hisi_hba);
+
+ rc = hw_init_v3_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
+ return;
+ }
+
+ hisi_sas_controller_reset_done(hisi_hba);
+ dev_info(dev, "FLR done\n");
+}
+
enum {
/* instances of the controller */
hip08,
@@ -2444,38 +2526,24 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
- u32 device_state, status;
+ u32 device_state;
int rc;
- u32 reg_val;
if (!pdev->pm_cap) {
dev_err(dev, "PCI PM not supported\n");
return -ENODEV;
}
- set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ return -1;
+
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
flush_workqueue(hisi_hba->wq);
- /* disable DQ/PHY/bus */
- interrupt_disable_v3_hw(hisi_hba);
- hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
- hisi_sas_kill_tasklets(hisi_hba);
-
- hisi_sas_stop_phys(hisi_hba);
- reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
- AM_CTRL_GLOBAL);
- reg_val |= 0x1;
- hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
- AM_CTRL_GLOBAL, reg_val);
-
- /* wait until bus idle */
- rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
- AM_CURR_TRANS_RETURN, status,
- status == 0x3, 10, 100);
+ rc = disable_host_v3_hw(hisi_hba);
if (rc) {
- dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
scsi_unblock_requests(shost);
@@ -2538,6 +2606,8 @@ static const struct pci_error_handlers hisi_sas_err_handler = {
.error_detected = hisi_sas_error_detected_v3_hw,
.mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
.slot_reset = hisi_sas_slot_reset_v3_hw,
+ .reset_prepare = hisi_sas_reset_prepare_v3_hw,
+ .reset_done = hisi_sas_reset_done_v3_hw,
};
static struct pci_driver sas_v3_pci_driver = {
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3771e59a9fae..f02dcc875a09 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -563,6 +563,38 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
+struct scsi_host_mq_in_flight {
+ int cnt;
+};
+
+static void scsi_host_check_in_flight(struct request *rq, void *data,
+ bool reserved)
+{
+ struct scsi_host_mq_in_flight *in_flight = data;
+
+ if (blk_mq_request_started(rq))
+ in_flight->cnt++;
+}
+
+/**
+ * scsi_host_busy - Return the host busy counter
+ * @shost: Pointer to Scsi_Host to inc.
+ **/
+int scsi_host_busy(struct Scsi_Host *shost)
+{
+ struct scsi_host_mq_in_flight in_flight = {
+ .cnt = 0,
+ };
+
+ if (!shost->use_blk_mq)
+ return atomic_read(&shost->host_busy);
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
+ &in_flight);
+ return in_flight.cnt;
+}
+EXPORT_SYMBOL(scsi_host_busy);
+
/**
* scsi_host_put - dec a Scsi_Host ref count
* @shost: Pointer to Scsi_Host to dec.
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index daefe8172b04..b64ca977825d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1322,7 +1322,7 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
/**
* ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
- * @scmd: Scsi_Cmnd with the scatterlist
+ * @scmd: struct scsi_cmnd with the scatterlist
* @evt: ibmvfc event struct
* @vfc_cmd: vfc_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 17df76f0be3c..9df8a1a2299c 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
static unsigned int partition_number = -1;
static LIST_HEAD(ibmvscsi_head);
@@ -262,7 +262,7 @@ static void gather_partition_info(void)
ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
if (ppartition_name)
- strncpy(partition_name, ppartition_name,
+ strlcpy(partition_name, ppartition_name,
sizeof(partition_name));
p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
if (p_number_ptr)
@@ -681,7 +681,7 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
/**
* map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
- * @cmd: Scsi_Cmnd with the scatterlist
+ * @cmd: struct scsi_cmnd with the scatterlist
* @srp_cmd: srp_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
*
@@ -1274,14 +1274,12 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
if (hostdata->client_migrated)
hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
- strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
+ strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
sizeof(hostdata->caps.name));
- hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
location = of_get_property(of_node, "ibm,loc-code", NULL);
location = location ? location : dev_name(hostdata->dev);
- strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
- hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
+ strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
req->buffer = cpu_to_be64(hostdata->caps_addr);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index c3a76af9f5fa..fac377320158 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2233,7 +2233,7 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
return -ENOMEM;
}
- nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
+ nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0,
TARGET_PROT_NORMAL, name, nexus,
NULL);
if (IS_ERR(nexus->se_sess)) {
@@ -2267,8 +2267,7 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
* Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
*/
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(se_sess);
- transport_deregister_session(se_sess);
+ target_remove_session(se_sess);
tport->ibmv_nexus = NULL;
kfree(nexus);
@@ -3928,7 +3927,6 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
}
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
const char *name)
{
struct ibmvscsis_tport *tport =
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 87c94191033b..8c6627bc8a39 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -892,7 +892,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
/* Check for optional message byte */
if (imm_wait(dev) == (unsigned char) 0xb8)
imm_in(dev, &h, 1);
- cmd->result = (DID_OK << 16) + (l & STATUS_MASK);
+ cmd->result = (DID_OK << 16) | (l & STATUS_MASK);
}
if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) {
w_ctr(ppb, 0x4);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 02d65dce74e5..f2ec80b0ffc0 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2412,6 +2412,28 @@ static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
}
/**
+ * ipr_log_sis64_service_required_error - Log a sis64 service required error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_41_error *error;
+
+ error = &hostrcb->hcam.u.error64.u.type_41_error;
+
+ error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
+ ipr_log_hex_data(ioa_cfg, error->data,
+ be32_to_cpu(hostrcb->hcam.length) -
+ (offsetof(struct ipr_hostrcb_error, u) +
+ offsetof(struct ipr_hostrcb_type_41_error, data)));
+}
+/**
* ipr_log_generic_error - Log an adapter error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
@@ -2586,6 +2608,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
case IPR_HOST_RCB_OVERLAY_ID_30:
ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
break;
+ case IPR_HOST_RCB_OVERLAY_ID_41:
+ ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
+ break;
case IPR_HOST_RCB_OVERLAY_ID_1:
case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
default:
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 93570734cbfb..68afbbde54d3 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1135,6 +1135,11 @@ struct ipr_hostrcb_type_30_error {
struct ipr_hostrcb64_fabric_desc desc[1];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb_type_41_error {
+ u8 failure_reason[64];
+ __be32 data[200];
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_error {
__be32 fd_ioasc;
struct ipr_res_addr fd_res_addr;
@@ -1173,6 +1178,7 @@ struct ipr_hostrcb64_error {
struct ipr_hostrcb_type_23_error type_23_error;
struct ipr_hostrcb_type_24_error type_24_error;
struct ipr_hostrcb_type_30_error type_30_error;
+ struct ipr_hostrcb_type_41_error type_41_error;
} u;
}__attribute__((packed, aligned (8)));
@@ -1218,6 +1224,7 @@ struct ipr_hcam {
#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
#define IPR_HOST_RCB_OVERLAY_ID_30 0x30
+#define IPR_HOST_RCB_OVERLAY_ID_41 0x41
#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
u8 reserved1[3];
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 3f3569ec5ce3..f969a71348ef 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -59,34 +59,25 @@ static void fc_disc_restart(struct fc_disc *);
/**
* fc_disc_stop_rports() - Delete all the remote ports associated with the lport
* @disc: The discovery job to stop remote ports on
- *
- * Locking Note: This function expects that the lport mutex is locked before
- * calling it.
*/
static void fc_disc_stop_rports(struct fc_disc *disc)
{
- struct fc_lport *lport;
struct fc_rport_priv *rdata;
- lport = fc_disc_lport(disc);
+ lockdep_assert_held(&disc->disc_mutex);
- rcu_read_lock();
- list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ list_for_each_entry(rdata, &disc->rports, peers) {
if (kref_get_unless_zero(&rdata->kref)) {
fc_rport_logoff(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
- rcu_read_unlock();
}
/**
* fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
* @disc: The discovery object to which the RSCN applies
* @fp: The RSCN frame
- *
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
*/
static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
{
@@ -101,6 +92,8 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
LIST_HEAD(disc_ports);
struct fc_disc_port *dp, *next;
+ lockdep_assert_held(&disc->disc_mutex);
+
lport = fc_disc_lport(disc);
FC_DISC_DBG(disc, "Received an RSCN event\n");
@@ -220,12 +213,11 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
/**
* fc_disc_restart() - Restart discovery
* @disc: The discovery object to be restarted
- *
- * Locking Note: This function expects that the disc mutex
- * is already locked.
*/
static void fc_disc_restart(struct fc_disc *disc)
{
+ lockdep_assert_held(&disc->disc_mutex);
+
if (!disc->disc_callback)
return;
@@ -271,16 +263,13 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
* fc_disc_done() - Discovery has been completed
* @disc: The discovery context
* @event: The discovery completion status
- *
- * Locking Note: This function expects that the disc mutex is locked before
- * it is called. The discovery callback is then made with the lock released,
- * and the lock is re-taken before returning from this function
*/
static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
{
struct fc_lport *lport = fc_disc_lport(disc);
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&disc->disc_mutex);
FC_DISC_DBG(disc, "Discovery complete\n");
disc->pending = 0;
@@ -294,9 +283,11 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
* discovery, reverify or log them in. Otherwise, log them out.
* Skip ports which were never discovered. These are the dNS port
* and ports which were created by PLOGI.
+ *
+ * We don't need to use the _rcu variant here as the rport list
+ * is protected by the disc mutex which is already held on entry.
*/
- rcu_read_lock();
- list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+ list_for_each_entry(rdata, &disc->rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
if (rdata->disc_id) {
@@ -307,7 +298,6 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
}
kref_put(&rdata->kref, fc_rport_destroy);
}
- rcu_read_unlock();
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
mutex_lock(&disc->disc_mutex);
@@ -360,15 +350,14 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
/**
* fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
* @lport: The discovery context
- *
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
*/
static void fc_disc_gpn_ft_req(struct fc_disc *disc)
{
struct fc_frame *fp;
struct fc_lport *lport = fc_disc_lport(disc);
+ lockdep_assert_held(&disc->disc_mutex);
+
WARN_ON(!fc_lport_test_ready(lport));
disc->pending = 1;
@@ -658,8 +647,6 @@ out:
* @lport: The local port to initiate discovery on
* @rdata: remote port private data
*
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
* On failure, an error code is returned.
*/
static int fc_disc_gpn_id_req(struct fc_lport *lport,
@@ -667,6 +654,7 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
{
struct fc_frame *fp;
+ lockdep_assert_held(&lport->disc.disc_mutex);
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
sizeof(struct fc_ns_fid));
if (!fp)
@@ -683,14 +671,13 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
* fc_disc_single() - Discover the directory information for a single target
* @lport: The local port the remote port is associated with
* @dp: The port to rediscover
- *
- * Locking Note: This function expects that the disc_mutex is locked
- * before it is called.
*/
static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
rdata = fc_rport_create(lport, dp->port_id);
if (!rdata)
return -ENOMEM;
@@ -708,7 +695,9 @@ static void fc_disc_stop(struct fc_lport *lport)
if (disc->pending)
cancel_delayed_work_sync(&disc->disc_work);
+ mutex_lock(&disc->disc_mutex);
fc_disc_stop_rports(disc);
+ mutex_unlock(&disc->disc_mutex);
}
/**
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 21be672679fb..be83590ed955 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -237,14 +237,13 @@ static const char *fc_lport_state(struct fc_lport *lport)
* @remote_fid: The FID of the ptp rport
* @remote_wwpn: The WWPN of the ptp rport
* @remote_wwnn: The WWNN of the ptp rport
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_ptp_setup(struct fc_lport *lport,
u32 remote_fid, u64 remote_wwpn,
u64 remote_wwnn)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (lport->ptp_rdata) {
fc_rport_logoff(lport->ptp_rdata);
kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
@@ -403,12 +402,11 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
* fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
* @lport: Fibre Channel local port receiving the RLIR
* @fp: The RLIR request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
fc_lport_state(lport));
@@ -420,9 +418,6 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
* fc_lport_recv_echo_req() - Handle received ECHO request
* @lport: The local port receiving the ECHO
* @fp: ECHO request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_echo_req(struct fc_lport *lport,
struct fc_frame *in_fp)
@@ -432,6 +427,8 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport,
void *pp;
void *dp;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
fc_lport_state(lport));
@@ -456,9 +453,6 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport,
* fc_lport_recv_rnid_req() - Handle received Request Node ID data request
* @lport: The local port receiving the RNID
* @fp: The RNID request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_rnid_req(struct fc_lport *lport,
struct fc_frame *in_fp)
@@ -474,6 +468,8 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
u8 fmt;
size_t len;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
fc_lport_state(lport));
@@ -515,12 +511,11 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
* fc_lport_recv_logo_req() - Handle received fabric LOGO request
* @lport: The local port receiving the LOGO
* @fp: The LOGO request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_lport_enter_reset(lport);
fc_frame_free(fp);
@@ -553,11 +548,11 @@ EXPORT_SYMBOL(fc_fabric_login);
/**
* __fc_linkup() - Handler for transport linkup events
* @lport: The lport whose link is up
- *
- * Locking: must be called with the lp_mutex held
*/
void __fc_linkup(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (!lport->link_up) {
lport->link_up = 1;
@@ -584,11 +579,11 @@ EXPORT_SYMBOL(fc_linkup);
/**
* __fc_linkdown() - Handler for transport linkdown events
* @lport: The lport whose link is down
- *
- * Locking: must be called with the lp_mutex held
*/
void __fc_linkdown(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (lport->link_up) {
lport->link_up = 0;
fc_lport_enter_reset(lport);
@@ -722,12 +717,11 @@ static void fc_lport_disc_callback(struct fc_lport *lport,
/**
* fc_rport_enter_ready() - Enter the ready state and start discovery
* @lport: The local port that is ready
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_ready(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered READY from state %s\n",
fc_lport_state(lport));
@@ -745,13 +739,12 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
* @lport: The local port which will have its Port ID set.
* @port_id: The new port ID.
* @fp: The frame containing the incoming request, or NULL.
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
struct fc_frame *fp)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (port_id)
printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
lport->host->host_no, port_id);
@@ -801,9 +794,6 @@ EXPORT_SYMBOL(fc_lport_set_local_id);
* A received FLOGI request indicates a point-to-point connection.
* Accept it with the common service parameters indicating our N port.
* Set up to do a PLOGI if we have the higher-number WWPN.
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
*/
static void fc_lport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
@@ -816,6 +806,8 @@ static void fc_lport_recv_flogi_req(struct fc_lport *lport,
u32 remote_fid;
u32 local_fid;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
fc_lport_state(lport));
@@ -1006,12 +998,11 @@ EXPORT_SYMBOL(fc_lport_reset);
/**
* fc_lport_reset_locked() - Reset the local port w/ the lport lock held
* @lport: The local port to be reset
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_reset_locked(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
if (lport->dns_rdata) {
fc_rport_logoff(lport->dns_rdata);
lport->dns_rdata = NULL;
@@ -1035,12 +1026,11 @@ static void fc_lport_reset_locked(struct fc_lport *lport)
/**
* fc_lport_enter_reset() - Reset the local port
* @lport: The local port to be reset
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_reset(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
fc_lport_state(lport));
@@ -1065,12 +1055,11 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
/**
* fc_lport_enter_disabled() - Disable the local port
* @lport: The local port to be reset
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_disabled(struct fc_lport *lport)
{
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
fc_lport_state(lport));
@@ -1321,14 +1310,13 @@ err:
/**
* fc_lport_enter_scr() - Send a SCR (State Change Register) request
* @lport: The local port to register for state changes
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_scr(struct fc_lport *lport)
{
struct fc_frame *fp;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
fc_lport_state(lport));
@@ -1349,9 +1337,6 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
/**
* fc_lport_enter_ns() - register some object with the name server
* @lport: Fibre Channel local port to register
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
{
@@ -1360,6 +1345,8 @@ static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
int size = sizeof(struct fc_ct_hdr);
size_t len;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
fc_lport_state_names[state],
fc_lport_state(lport));
@@ -1419,14 +1406,13 @@ static struct fc_rport_operations fc_lport_rport_ops = {
/**
* fc_rport_enter_dns() - Create a fc_rport for the name server
* @lport: The local port requesting a remote port for the name server
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_dns(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
fc_lport_state(lport));
@@ -1449,9 +1435,6 @@ err:
/**
* fc_lport_enter_ms() - management server commands
* @lport: Fibre Channel local port to register
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
{
@@ -1461,6 +1444,8 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
size_t len;
int numattrs;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
fc_lport_state_names[state],
fc_lport_state(lport));
@@ -1536,14 +1521,13 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
/**
* fc_rport_enter_fdmi() - Create a fc_rport for the management server
* @lport: The local port requesting a remote port for the management server
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_fdmi(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
fc_lport_state(lport));
@@ -1668,15 +1652,14 @@ EXPORT_SYMBOL(fc_lport_logo_resp);
/**
* fc_rport_enter_logo() - Logout of the fabric
* @lport: The local port to be logged out
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_logo(struct fc_lport *lport)
{
struct fc_frame *fp;
struct fc_els_logo *logo;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
fc_lport_state(lport));
@@ -1811,14 +1794,13 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
/**
* fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
* @lport: Fibre Channel local port to be logged in to the fabric
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static void fc_lport_enter_flogi(struct fc_lport *lport)
{
struct fc_frame *fp;
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
fc_lport_state(lport));
@@ -1962,9 +1944,6 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
* @job: The BSG Passthrough job
* @lport: The local port sending the request
* @did: The destination port id
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static int fc_lport_els_request(struct bsg_job *job,
struct fc_lport *lport,
@@ -1976,6 +1955,8 @@ static int fc_lport_els_request(struct bsg_job *job,
char *pp;
int len;
+ lockdep_assert_held(&lport->lp_mutex);
+
fp = fc_frame_alloc(lport, job->request_payload.payload_len);
if (!fp)
return -ENOMEM;
@@ -2023,9 +2004,6 @@ static int fc_lport_els_request(struct bsg_job *job,
* @lport: The local port sending the request
* @did: The destination FC-ID
* @tov: The timeout period to wait for the response
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this routine.
*/
static int fc_lport_ct_request(struct bsg_job *job,
struct fc_lport *lport, u32 did, u32 tov)
@@ -2036,6 +2014,8 @@ static int fc_lport_ct_request(struct bsg_job *job,
struct fc_ct_req *ct;
size_t len;
+ lockdep_assert_held(&lport->lp_mutex);
+
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
job->request_payload.payload_len);
if (!fp)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 89b1f1af2fd4..372387a450df 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -136,13 +136,13 @@ EXPORT_SYMBOL(fc_rport_lookup);
* @ids: The identifiers for the new remote port
*
* The remote port will start in the INIT state.
- *
- * Locking note: must be called with the disc_mutex held.
*/
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
rdata = fc_rport_lookup(lport, port_id);
if (rdata)
return rdata;
@@ -184,6 +184,7 @@ void fc_rport_destroy(struct kref *kref)
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
+ WARN_ON(!list_empty(&rdata->peers));
kfree_rcu(rdata, rcu);
}
EXPORT_SYMBOL(fc_rport_destroy);
@@ -245,12 +246,12 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
* fc_rport_state_enter() - Change the state of a remote port
* @rdata: The remote port whose state should change
* @new: The new state
- *
- * Locking Note: Called with the rport lock held
*/
static void fc_rport_state_enter(struct fc_rport_priv *rdata,
enum fc_rport_state new)
{
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (rdata->rp_state != new)
rdata->retries = 0;
rdata->rp_state = new;
@@ -469,8 +470,6 @@ EXPORT_SYMBOL(fc_rport_login);
* @rdata: The remote port to be deleted
* @event: The event to report as the reason for deletion
*
- * Locking Note: Called with the rport lock held.
- *
* Allow state change into DELETE only once.
*
* Call queue_work only if there's no event already pending.
@@ -483,6 +482,8 @@ EXPORT_SYMBOL(fc_rport_login);
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
enum fc_rport_event event)
{
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (rdata->rp_state == RPORT_ST_DELETE)
return;
@@ -546,13 +547,12 @@ EXPORT_SYMBOL(fc_rport_logoff);
* fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
* @rdata: The remote port that is ready
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: schedules workqueue, does not modify kref
*/
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
{
+ lockdep_assert_held(&rdata->rp_mutex);
+
fc_rport_state_enter(rdata, RPORT_ST_READY);
FC_RPORT_DBG(rdata, "Port is Ready\n");
@@ -615,15 +615,14 @@ static void fc_rport_timeout(struct work_struct *work)
* @rdata: The remote port the error is happened on
* @err: The error code
*
- * Locking Note: The rport lock is expected to be held before
- * calling this routine
- *
* Reference counting: does not modify kref
*/
static void fc_rport_error(struct fc_rport_priv *rdata, int err)
{
struct fc_lport *lport = rdata->local_port;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
-err, fc_rport_state(rdata), rdata->retries);
@@ -662,15 +661,14 @@ static void fc_rport_error(struct fc_rport_priv *rdata, int err)
* If the error was an exchange timeout retry immediately,
* otherwise wait for E_D_TOV.
*
- * Locking Note: The rport lock is expected to be held before
- * calling this routine
- *
* Reference counting: increments kref when scheduling retry_work
*/
static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
{
unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
+ lockdep_assert_held(&rdata->rp_mutex);
+
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (err == -FC_EX_CLOSED)
goto out;
@@ -822,9 +820,6 @@ bad:
* fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp
* @rdata: The remote port to send a FLOGI to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
@@ -832,6 +827,8 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (!lport->point_to_multipoint)
return fc_rport_enter_plogi(rdata);
@@ -1071,9 +1068,6 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
* fc_rport_enter_plogi() - Send Port Login (PLOGI) request
* @rdata: The remote port to send a PLOGI to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
@@ -1081,6 +1075,8 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
if (!fc_rport_compatible_roles(lport, rdata)) {
FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n");
fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
@@ -1232,9 +1228,6 @@ put:
* fc_rport_enter_prli() - Send Process Login (PRLI) request
* @rdata: The remote port to send the PRLI request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
@@ -1247,6 +1240,8 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
struct fc_frame *fp;
struct fc4_prov *prov;
+ lockdep_assert_held(&rdata->rp_mutex);
+
/*
* If the rport is one of the well known addresses
* we skip PRLI and RTV and go straight to READY.
@@ -1372,9 +1367,6 @@ put:
* fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
* @rdata: The remote port to send the RTV request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
@@ -1382,6 +1374,8 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
struct fc_frame *fp;
struct fc_lport *lport = rdata->local_port;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
fc_rport_state(rdata));
@@ -1406,8 +1400,6 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
* fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
* @rdata: The remote port that sent the RTV request
* @in_fp: The RTV request frame
- *
- * Locking Note: Called with the lport and rport locks held.
*/
static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
struct fc_frame *in_fp)
@@ -1417,6 +1409,9 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
struct fc_els_rtv_acc *rtv;
struct fc_seq_els_data rjt_data;
+ lockdep_assert_held(&rdata->rp_mutex);
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_RPORT_DBG(rdata, "Received RTV request\n");
fp = fc_frame_alloc(lport, sizeof(*rtv));
@@ -1460,9 +1455,6 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
* fc_rport_enter_logo() - Send a logout (LOGO) request
* @rdata: The remote port to send the LOGO request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
@@ -1470,6 +1462,8 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n",
fc_rport_state(rdata));
@@ -1548,9 +1542,6 @@ put:
* fc_rport_enter_adisc() - Send Address Discover (ADISC) request
* @rdata: The remote port to send the ADISC request to
*
- * Locking Note: The rport lock is expected to be held before calling
- * this routine.
- *
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
@@ -1558,6 +1549,8 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
fc_rport_state(rdata));
@@ -1581,8 +1574,6 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
* fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
* @rdata: The remote port that sent the ADISC request
* @in_fp: The ADISC request frame
- *
- * Locking Note: Called with the lport and rport locks held.
*/
static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
struct fc_frame *in_fp)
@@ -1592,6 +1583,9 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
struct fc_els_adisc *adisc;
struct fc_seq_els_data rjt_data;
+ lockdep_assert_held(&rdata->rp_mutex);
+ lockdep_assert_held(&lport->lp_mutex);
+
FC_RPORT_DBG(rdata, "Received ADISC request\n");
adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
@@ -1618,9 +1612,6 @@ drop:
* fc_rport_recv_rls_req() - Handle received Read Link Status request
* @rdata: The remote port that sent the RLS request
* @rx_fp: The PRLI request frame
- *
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
*/
static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
@@ -1634,6 +1625,8 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
struct fc_seq_els_data rjt_data;
struct fc_host_statistics *hst;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
fc_rport_state(rdata));
@@ -1687,8 +1680,6 @@ out:
* Handle incoming ELS requests that require port login.
* The ELS opcode has already been validated by the caller.
*
- * Locking Note: Called with the lport lock held.
- *
* Reference counting: does not modify kref
*/
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
@@ -1696,6 +1687,8 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
struct fc_rport_priv *rdata;
struct fc_seq_els_data els_data;
+ lockdep_assert_held(&lport->lp_mutex);
+
rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
if (!rdata) {
FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
@@ -1783,14 +1776,14 @@ busy:
* @lport: The local port that received the request
* @fp: The request frame
*
- * Locking Note: Called with the lport lock held.
- *
* Reference counting: does not modify kref
*/
void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
+ lockdep_assert_held(&lport->lp_mutex);
+
/*
* Handle FLOGI, PLOGI and LOGO requests separately, since they
* don't require prior login.
@@ -1831,8 +1824,6 @@ EXPORT_SYMBOL(fc_rport_recv_req);
* @lport: The local port that received the PLOGI request
* @rx_fp: The PLOGI request frame
*
- * Locking Note: The rport lock is held before calling this function.
- *
* Reference counting: increments kref on return
*/
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
@@ -1845,6 +1836,8 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
struct fc_seq_els_data rjt_data;
u32 sid;
+ lockdep_assert_held(&lport->lp_mutex);
+
sid = fc_frame_sid(fp);
FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
@@ -1955,9 +1948,6 @@ reject:
* fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
* @rdata: The remote port that sent the PRLI request
* @rx_fp: The PRLI request frame
- *
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
*/
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
@@ -1976,6 +1966,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_seq_els_data rjt_data;
struct fc4_prov *prov;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
@@ -2072,9 +2064,6 @@ drop:
* fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
* @rdata: The remote port that sent the PRLO request
* @rx_fp: The PRLO request frame
- *
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
*/
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
@@ -2091,6 +2080,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
unsigned int plen;
struct fc_seq_els_data rjt_data;
+ lockdep_assert_held(&rdata->rp_mutex);
+
FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
fc_rport_state(rdata));
@@ -2144,9 +2135,6 @@ drop:
* @lport: The local port that received the LOGO request
* @fp: The LOGO request frame
*
- * Locking Note: The rport lock is expected to be held before calling
- * this function.
- *
* Reference counting: drops kref on return
*/
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
@@ -2154,6 +2142,8 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
struct fc_rport_priv *rdata;
u32 sid;
+ lockdep_assert_held(&lport->lp_mutex);
+
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
sid = fc_frame_sid(fp);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c972cc2b3d5b..93c66ebad907 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1705,6 +1705,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
sc->result = DID_NO_CONNECT << 16;
break;
}
+ /* fall through */
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1832,6 +1833,7 @@ static void iscsi_tmf_timedout(struct timer_list *t)
static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
struct iscsi_tm *hdr, int age,
int timeout)
+ __must_hold(&session->frwd_lock)
{
struct iscsi_session *session = conn->session;
struct iscsi_task *task;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 369ef8f23b24..4fcb9e65be57 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -695,7 +695,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
struct scsi_data_buffer *sdb = scsi_in(task->sc);
/*
- * Setup copy of Data-In into the Scsi_Cmnd
+ * Setup copy of Data-In into the struct scsi_cmnd
* Scatterlist case:
* We set up the iscsi_segment to point to the next
* scatterlist entry to copy to. As we go along,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index ff1d612f6fb9..64a958a99f6a 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -176,7 +176,6 @@ qc_already_gone:
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
{
- unsigned long flags;
struct sas_task *task;
struct scatterlist *sg;
int ret = AC_ERR_SYSTEM;
@@ -187,10 +186,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
struct Scsi_Host *host = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(host->transportt);
- /* TODO: audit callers to ensure they are ready for qc_issue to
- * unconditionally re-enable interrupts
- */
- local_irq_save(flags);
+ /* TODO: we should try to remove that unlock */
spin_unlock(ap->lock);
/* If the device fell off, no sense in issuing commands */
@@ -252,7 +248,6 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
out:
spin_lock(ap->lock);
- local_irq_restore(flags);
return ret;
}
@@ -557,34 +552,46 @@ int sas_ata_init(struct domain_device *found_dev)
{
struct sas_ha_struct *ha = found_dev->port->ha;
struct Scsi_Host *shost = ha->core.shost;
+ struct ata_host *ata_host;
struct ata_port *ap;
int rc;
- ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
- ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
- &sata_port_info,
- shost);
+ ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
+ if (!ata_host) {
+ SAS_DPRINTK("ata host alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ ata_host_init(ata_host, ha->dev, &sas_sata_ops);
+
+ ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
if (!ap) {
SAS_DPRINTK("ata_sas_port_alloc failed.\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_host;
}
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
rc = ata_sas_port_init(ap);
- if (rc) {
- ata_sas_port_destroy(ap);
- return rc;
- }
- rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap);
- if (rc) {
- ata_sas_port_destroy(ap);
- return rc;
- }
+ if (rc)
+ goto destroy_port;
+
+ rc = ata_sas_tport_add(ata_host->dev, ap);
+ if (rc)
+ goto destroy_port;
+
+ found_dev->sata_dev.ata_host = ata_host;
found_dev->sata_dev.ap = ap;
return 0;
+
+destroy_port:
+ ata_sas_port_destroy(ap);
+free_host:
+ ata_host_put(ata_host);
+ return rc;
}
void sas_ata_task_abort(struct sas_task *task)
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 1ffca28fe6a8..0148ae62a52a 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -316,6 +316,8 @@ void sas_free_device(struct kref *kref)
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
ata_sas_port_destroy(dev->sata_dev.ap);
+ ata_host_put(dev->sata_dev.ata_host);
+ dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index ceab5e5c41c2..33229348dcb6 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -759,7 +759,7 @@ retry:
spin_unlock_irq(shost->host_lock);
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
- __func__, atomic_read(&shost->host_busy), shost->host_failed);
+ __func__, scsi_host_busy(shost), shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism),
@@ -801,7 +801,7 @@ out:
goto retry;
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
- __func__, atomic_read(&shost->host_busy),
+ __func__, scsi_host_busy(shost),
shost->host_failed, tries);
}
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index cb6aa802c48e..092a971d066b 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,8 +1,8 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
-# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
-# * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+# * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+# * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 20b249a649dd..e0d0da5f43d6 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -840,8 +840,7 @@ struct lpfc_hba {
#define LPFC_ENABLE_FCP 1
#define LPFC_ENABLE_NVME 2
#define LPFC_ENABLE_BOTH 3
- uint32_t nvme_embed_pbde;
- uint32_t fcp_embed_pbde;
+ uint32_t cfg_enable_pbde;
uint32_t io_channel_irqs; /* number of irqs for io channels */
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 729d343861f4..5a25553415f8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -64,6 +64,9 @@
#define LPFC_MIN_MRQ_POST 512
#define LPFC_MAX_MRQ_POST 2048
+#define LPFC_MAX_NVME_INFO_TMP_LEN 100
+#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
+
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
@@ -158,14 +161,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
char *statep;
int i;
int len = 0;
+ char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
- len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n");
+ len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
return len;
}
if (phba->nvmet_support) {
if (!phba->targetport) {
- len = snprintf(buf, PAGE_SIZE,
+ len = scnprintf(buf, PAGE_SIZE,
"NVME Target: x%llx is not allocated\n",
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
@@ -175,135 +179,169 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
statep = "REGISTERED";
else
statep = "INIT";
- len += snprintf(buf + len, PAGE_SIZE - len,
- "NVME Target Enabled State %s\n",
- statep);
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
- "NVME Target: lpfc",
- phba->brd_no,
- wwn_to_u64(vport->fc_portname.u.wwn),
- wwn_to_u64(vport->fc_nodename.u.wwn),
- phba->targetport->port_id);
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "\nNVME Target: Statistics\n");
+ scnprintf(tmp, sizeof(tmp),
+ "NVME Target Enabled State %s\n",
+ statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
+ "NVME Target: lpfc",
+ phba->brd_no,
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ phba->targetport->port_id);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
+ >= PAGE_SIZE)
+ goto buffer_done;
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Rcv %08x Drop %08x Abort %08x\n",
- atomic_read(&tgtp->rcv_ls_req_in),
- atomic_read(&tgtp->rcv_ls_req_drop),
- atomic_read(&tgtp->xmt_ls_abort));
+ scnprintf(tmp, sizeof(tmp),
+ "LS: Rcv %08x Drop %08x Abort %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_drop),
+ atomic_read(&tgtp->xmt_ls_abort));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
if (atomic_read(&tgtp->rcv_ls_req_in) !=
atomic_read(&tgtp->rcv_ls_req_out)) {
- len += snprintf(buf+len, PAGE_SIZE-len,
- "Rcv LS: in %08x != out %08x\n",
- atomic_read(&tgtp->rcv_ls_req_in),
- atomic_read(&tgtp->rcv_ls_req_out));
+ scnprintf(tmp, sizeof(tmp),
+ "Rcv LS: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_out));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
}
- len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %08x Drop %08x Cmpl %08x\n",
- atomic_read(&tgtp->xmt_ls_rsp),
- atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "LS: RSP Abort %08x xb %08x Err %08x\n",
- atomic_read(&tgtp->xmt_ls_rsp_aborted),
- atomic_read(&tgtp->xmt_ls_rsp_xb_set),
- atomic_read(&tgtp->xmt_ls_rsp_error));
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP: Rcv %08x Defer %08x Release %08x "
- "Drop %08x\n",
- atomic_read(&tgtp->rcv_fcp_cmd_in),
- atomic_read(&tgtp->rcv_fcp_cmd_defer),
- atomic_read(&tgtp->xmt_fcp_release),
- atomic_read(&tgtp->rcv_fcp_cmd_drop));
+ scnprintf(tmp, sizeof(tmp),
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp),
+ atomic_read(&tgtp->xmt_ls_drop),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
+ atomic_read(&tgtp->xmt_ls_rsp_error));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP: Rcv %08x Defer %08x Release %08x "
+ "Drop %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_defer),
+ atomic_read(&tgtp->xmt_fcp_release),
+ atomic_read(&tgtp->rcv_fcp_cmd_drop));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
atomic_read(&tgtp->rcv_fcp_cmd_out)) {
- len += snprintf(buf+len, PAGE_SIZE-len,
- "Rcv FCP: in %08x != out %08x\n",
- atomic_read(&tgtp->rcv_fcp_cmd_in),
- atomic_read(&tgtp->rcv_fcp_cmd_out));
+ scnprintf(tmp, sizeof(tmp),
+ "Rcv FCP: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
}
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
- "drop %08x\n",
- atomic_read(&tgtp->xmt_fcp_read),
- atomic_read(&tgtp->xmt_fcp_read_rsp),
- atomic_read(&tgtp->xmt_fcp_write),
- atomic_read(&tgtp->xmt_fcp_rsp),
- atomic_read(&tgtp->xmt_fcp_drop));
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
- atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
- atomic_read(&tgtp->xmt_fcp_rsp_error),
- atomic_read(&tgtp->xmt_fcp_rsp_drop));
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
- atomic_read(&tgtp->xmt_fcp_rsp_aborted),
- atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
- atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "ABORT: Xmt %08x Cmpl %08x\n",
- atomic_read(&tgtp->xmt_fcp_abort),
- atomic_read(&tgtp->xmt_fcp_abort_cmpl));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
- atomic_read(&tgtp->xmt_abort_sol),
- atomic_read(&tgtp->xmt_abort_unsol),
- atomic_read(&tgtp->xmt_abort_rsp),
- atomic_read(&tgtp->xmt_abort_rsp_error));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "DELAY: ctx %08x fod %08x wqfull %08x\n",
- atomic_read(&tgtp->defer_ctx),
- atomic_read(&tgtp->defer_fod),
- atomic_read(&tgtp->defer_wqfull));
+ scnprintf(tmp, sizeof(tmp),
+ "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
+ "drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_read),
+ atomic_read(&tgtp->xmt_fcp_read_rsp),
+ atomic_read(&tgtp->xmt_fcp_write),
+ atomic_read(&tgtp->xmt_fcp_rsp),
+ atomic_read(&tgtp->xmt_fcp_drop));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
+ atomic_read(&tgtp->xmt_fcp_rsp_error),
+ atomic_read(&tgtp->xmt_fcp_rsp_drop));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "ABORT: Xmt %08x Cmpl %08x\n",
+ atomic_read(&tgtp->xmt_fcp_abort),
+ atomic_read(&tgtp->xmt_fcp_abort_cmpl));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
+ atomic_read(&tgtp->xmt_abort_sol),
+ atomic_read(&tgtp->xmt_abort_unsol),
+ atomic_read(&tgtp->xmt_abort_rsp),
+ atomic_read(&tgtp->xmt_abort_rsp_error));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "DELAY: ctx %08x fod %08x wqfull %08x\n",
+ atomic_read(&tgtp->defer_ctx),
+ atomic_read(&tgtp->defer_fod),
+ atomic_read(&tgtp->defer_wqfull));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
/* Calculate outstanding IOs */
tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
tot += atomic_read(&tgtp->xmt_fcp_release);
tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
- len += snprintf(buf + len, PAGE_SIZE - len,
- "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
- "CTX Outstanding %08llx\n",
- phba->sli4_hba.nvmet_xri_cnt,
- phba->sli4_hba.nvmet_io_wait_cnt,
- phba->sli4_hba.nvmet_io_wait_total,
- tot);
-
- len += snprintf(buf+len, PAGE_SIZE-len, "\n");
- return len;
+ scnprintf(tmp, sizeof(tmp),
+ "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
+ "CTX Outstanding %08llx\n\n",
+ phba->sli4_hba.nvmet_xri_cnt,
+ phba->sli4_hba.nvmet_io_wait_cnt,
+ phba->sli4_hba.nvmet_io_wait_total,
+ tot);
+ strlcat(buf, tmp, PAGE_SIZE);
+ goto buffer_done;
}
localport = vport->localport;
if (!localport) {
- len = snprintf(buf, PAGE_SIZE,
+ len = scnprintf(buf, PAGE_SIZE,
"NVME Initiator x%llx is not allocated\n",
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
lport = (struct lpfc_nvme_lport *)localport->private;
- len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
-
- spin_lock_irq(shost->host_lock);
- len += snprintf(buf + len, PAGE_SIZE - len,
- "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
- phba->brd_no,
- phba->sli4_hba.max_cfg_param.max_xri,
- phba->sli4_hba.nvme_xri_max,
- phba->sli4_hba.scsi_xri_max,
- lpfc_sli4_get_els_iocb_cnt(phba));
+ if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ rcu_read_lock();
+ scnprintf(tmp, sizeof(tmp),
+ "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
+ phba->brd_no,
+ phba->sli4_hba.max_cfg_param.max_xri,
+ phba->sli4_hba.nvme_xri_max,
+ phba->sli4_hba.scsi_xri_max,
+ lpfc_sli4_get_els_iocb_cnt(phba));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -311,13 +349,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
else
statep = "UNKNOWN ";
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
- "NVME LPORT lpfc",
- phba->brd_no,
- wwn_to_u64(vport->fc_portname.u.wwn),
- wwn_to_u64(vport->fc_nodename.u.wwn),
- localport->port_id, statep);
+ scnprintf(tmp, sizeof(tmp),
+ "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
+ "NVME LPORT lpfc",
+ phba->brd_no,
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ localport->port_id, statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
rport = lpfc_ndlp_get_nrport(ndlp);
@@ -343,56 +383,77 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
/* Tab in to show lport ownership. */
- len += snprintf(buf + len, PAGE_SIZE - len,
- "NVME RPORT ");
- if (phba->brd_no >= 10)
- len += snprintf(buf + len, PAGE_SIZE - len, " ");
-
- len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ",
- nrport->port_name);
- len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ",
- nrport->node_name);
- len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
- nrport->port_id);
+ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ if (phba->brd_no >= 10) {
+ if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+
+ scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
+ nrport->port_name);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
+ nrport->node_name);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "DID x%06x ",
+ nrport->port_id);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
/* An NVME rport can have multiple roles. */
- if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "INITIATOR ");
- if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "TARGET ");
- if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "DISCSRVC ");
+ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
+ if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+ if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
+ if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+ if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
+ if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
- FC_PORT_ROLE_NVME_DISCOVERY))
- len += snprintf(buf + len, PAGE_SIZE - len,
- "UNKNOWN ROLE x%x",
- nrport->port_role);
-
- len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
- /* Terminate the string. */
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ FC_PORT_ROLE_NVME_DISCOVERY)) {
+ scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
+ nrport->port_role);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+ }
+
+ scnprintf(tmp, sizeof(tmp), "%s\n", statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
}
- spin_unlock_irq(shost->host_lock);
+ rcu_read_unlock();
if (!lport)
- return len;
-
- len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
- len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %010x Cmpl %010x Abort %08x\n",
- atomic_read(&lport->fc4NvmeLsRequests),
- atomic_read(&lport->fc4NvmeLsCmpls),
- atomic_read(&lport->xmt_ls_abort));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
- atomic_read(&lport->xmt_ls_err),
- atomic_read(&lport->cmpl_ls_xb),
- atomic_read(&lport->cmpl_ls_err));
+ goto buffer_done;
+
+ if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "LS: Xmt %010x Cmpl %010x Abort %08x\n",
+ atomic_read(&lport->fc4NvmeLsRequests),
+ atomic_read(&lport->fc4NvmeLsCmpls),
+ atomic_read(&lport->xmt_ls_abort));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
totin = 0;
totout = 0;
@@ -405,25 +466,46 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
data3 = atomic_read(&cstat->fc4NvmeControlRequests);
totout += (data1 + data2 + data3);
}
- len += snprintf(buf+len, PAGE_SIZE-len,
- "Total FCP Cmpl %016llx Issue %016llx "
- "OutIO %016llx\n",
- totin, totout, totout - totin);
-
- len += snprintf(buf+len, PAGE_SIZE-len,
- " abort %08x noxri %08x nondlp %08x qdepth %08x "
- "wqerr %08x err %08x\n",
- atomic_read(&lport->xmt_fcp_abort),
- atomic_read(&lport->xmt_fcp_noxri),
- atomic_read(&lport->xmt_fcp_bad_ndlp),
- atomic_read(&lport->xmt_fcp_qdepth),
- atomic_read(&lport->xmt_fcp_err),
- atomic_read(&lport->xmt_fcp_wqerr));
-
- len += snprintf(buf + len, PAGE_SIZE - len,
- "FCP CMPL: xb %08x Err %08x\n",
- atomic_read(&lport->cmpl_fcp_xb),
- atomic_read(&lport->cmpl_fcp_err));
+ scnprintf(tmp, sizeof(tmp),
+ "Total FCP Cmpl %016llx Issue %016llx "
+ "OutIO %016llx\n",
+ totin, totout, totout - totin);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
+ "wqerr %08x err %08x\n",
+ atomic_read(&lport->xmt_fcp_abort),
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_err),
+ atomic_read(&lport->xmt_fcp_wqerr));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp),
+ "FCP CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
+ strlcat(buf, tmp, PAGE_SIZE);
+
+buffer_done:
+ len = strnlen(buf, PAGE_SIZE);
+
+ if (unlikely(len >= (PAGE_SIZE - 1))) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+ "6314 Catching potential buffer "
+ "overflow > PAGE_SIZE = %lu bytes\n",
+ PAGE_SIZE);
+ strlcpy(buf + PAGE_SIZE - 1 -
+ strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
+ LPFC_NVME_INFO_MORE_STR,
+ strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
+ + 1);
+ }
+
return len;
}
@@ -5836,6 +5918,24 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
+ } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
+ switch (phba->fc_linkspeed) {
+ case LPFC_ASYNC_LINK_SPEED_10GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_25GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_40GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
+ break;
+ case LPFC_ASYNC_LINK_SPEED_100GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
} else
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
@@ -5891,7 +5991,6 @@ lpfc_get_stats(struct Scsi_Host *shost)
struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
- unsigned long seconds;
int rc = 0;
/*
@@ -5992,12 +6091,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
hs->dumped_frames = -1;
- seconds = get_seconds();
- if (seconds < psli->stats_start)
- hs->seconds_since_last_reset = seconds +
- ((unsigned long)-1 - psli->stats_start);
- else
- hs->seconds_since_last_reset = seconds - psli->stats_start;
+ hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6076,7 +6170,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
else
lso->link_events = (phba->fc_eventTag >> 1);
- psli->stats_start = get_seconds();
+ psli->stats_start = ktime_get_seconds();
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6454,6 +6548,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_auto_imax = 0;
phba->initial_imax = phba->cfg_fcp_imax;
+ phba->cfg_enable_pbde = 0;
+
/* A value of 0 means use the number of CPUs found in the system */
if (phba->cfg_fcp_io_channel == 0)
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h
index 931db52692f5..9659a8fff971 100644
--- a/drivers/scsi/lpfc/lpfc_attr.h
+++ b/drivers/scsi/lpfc/lpfc_attr.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index edb1a18a6414..90745feca808 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index e7d95a4e8042..32347c87e3b4 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index 6b32b0ae7506..43cf46a3a71f 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4ae9ba425e78..bea24bc4410a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -469,7 +469,6 @@ int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
void lpfc_start_fdiscs(struct lpfc_hba *phba);
struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
-#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2
#define HBA_EVENT_LINK_DOWN 3
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d4a200ae5a6f..1cbdc892ff95 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index f32eaeb2225a..30efc7bf91bd 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 5a7547f9d8d8..28e2b60fc5c0 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -150,6 +150,9 @@ struct lpfc_node_rrq {
unsigned long rrq_stop_time;
};
+#define lpfc_ndlp_check_qdepth(phba, ndlp) \
+ (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri)
+
/* Defines for nlp_flag (uint32) */
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6d84a10fef07..4dda969e947c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -5640,8 +5640,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
" mbx status x%x\n",
shdr_status, shdr_add_status, mb->mbxStatus);
- if (mb->mbxStatus && !(shdr_status &&
- shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
+ if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
+ (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
+ (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
mempool_free(pmb, phba->mbox_mem_pool);
goto error;
}
@@ -5661,6 +5662,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lcb_res = (struct fc_lcb_res_frame *)
(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
icmd = &elsiocb->iocb;
icmd->ulpContext = lcb_context->rx_id;
icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
@@ -5669,7 +5671,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
lcb_res->lcb_sub_command = lcb_context->sub_command;
lcb_res->lcb_type = lcb_context->type;
+ lcb_res->capability = lcb_context->capability;
lcb_res->lcb_frequency = lcb_context->frequency;
+ lcb_res->lcb_duration = lcb_context->duration;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -5712,6 +5716,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
uint32_t beacon_state)
{
struct lpfc_hba *phba = vport->phba;
+ union lpfc_sli4_cfg_shdr *cfg_shdr;
LPFC_MBOXQ_t *mbox = NULL;
uint32_t len;
int rc;
@@ -5720,6 +5725,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
if (!mbox)
return 1;
+ cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
len = sizeof(struct lpfc_mbx_set_beacon_config) -
sizeof(struct lpfc_sli4_cfg_mhdr);
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
@@ -5732,8 +5738,40 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
phba->sli4_hba.physical_port);
bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
beacon_state);
- bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
- bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
+ mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */
+
+ /*
+ * Check bv1s bit before issuing the mailbox
+ * if bv1s == 1, LCB V1 supported
+ * else, LCB V0 supported
+ */
+
+ if (phba->sli4_hba.pc_sli4_params.bv1s) {
+ /* COMMON_SET_BEACON_CONFIG_V1 */
+ cfg_shdr->request.word9 = BEACON_VERSION_V1;
+ lcb_context->capability |= LCB_CAPABILITY_DURATION;
+ bf_set(lpfc_mbx_set_beacon_port_type,
+ &mbox->u.mqe.un.beacon_config, 0);
+ bf_set(lpfc_mbx_set_beacon_duration_v1,
+ &mbox->u.mqe.un.beacon_config,
+ be16_to_cpu(lcb_context->duration));
+ } else {
+ /* COMMON_SET_BEACON_CONFIG_V0 */
+ if (be16_to_cpu(lcb_context->duration) != 0) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+ cfg_shdr->request.word9 = BEACON_VERSION_V0;
+ lcb_context->capability &= ~(LCB_CAPABILITY_DURATION);
+ bf_set(lpfc_mbx_set_beacon_state,
+ &mbox->u.mqe.un.beacon_config, beacon_state);
+ bf_set(lpfc_mbx_set_beacon_port_type,
+ &mbox->u.mqe.un.beacon_config, 1);
+ bf_set(lpfc_mbx_set_beacon_duration,
+ &mbox->u.mqe.un.beacon_config,
+ be16_to_cpu(lcb_context->duration));
+ }
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
@@ -5784,24 +5822,16 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
beacon->lcb_frequency,
be16_to_cpu(beacon->lcb_duration));
- if (phba->sli_rev < LPFC_SLI_REV4 ||
- (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_2)) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
-
- if (phba->hba_flag & HBA_FCOE_MODE) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
if (beacon->lcb_sub_command != LPFC_LCB_ON &&
beacon->lcb_sub_command != LPFC_LCB_OFF) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
- if (beacon->lcb_sub_command == LPFC_LCB_ON &&
- be16_to_cpu(beacon->lcb_duration) != 0) {
+
+ if (phba->sli_rev < LPFC_SLI_REV4 ||
+ phba->hba_flag & HBA_FCOE_MODE ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
+ LPFC_SLI_INTF_IF_TYPE_2)) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
@@ -5814,8 +5844,10 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
lcb_context->sub_command = beacon->lcb_sub_command;
+ lcb_context->capability = 0;
lcb_context->type = beacon->lcb_type;
lcb_context->frequency = beacon->lcb_frequency;
+ lcb_context->duration = beacon->lcb_duration;
lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
lcb_context->rx_id = cmdiocb->iocb.ulpContext;
lcb_context->ndlp = lpfc_nlp_get(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2fef54fab86d..eb71877f12f8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 08a3f1520159..009aa0eee040 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -1065,14 +1065,17 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */
struct fc_lcb_request_frame {
uint32_t lcb_command; /* ELS command opcode (0x81) */
uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
-#define LPFC_LCB_ON 0x1
-#define LPFC_LCB_OFF 0x2
- uint8_t reserved[3];
-
+#define LPFC_LCB_ON 0x1
+#define LPFC_LCB_OFF 0x2
+ uint8_t reserved[2];
+ uint8_t capability; /* LCB Payload Word 1, bit 0:7 */
uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
-#define LPFC_LCB_GREEN 0x1
-#define LPFC_LCB_AMBER 0x2
+#define LPFC_LCB_GREEN 0x1
+#define LPFC_LCB_AMBER 0x2
uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
+#define LCB_CAPABILITY_DURATION 1
+#define BEACON_VERSION_V1 1
+#define BEACON_VERSION_V0 0
uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
};
@@ -1082,7 +1085,8 @@ struct fc_lcb_request_frame {
struct fc_lcb_res_frame {
uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */
uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
- uint8_t reserved[3];
+ uint8_t reserved[2];
+ uint8_t capability; /* LCB Payload Word 1, bit 0:7 */
uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f43f0bacb77a..083f8c8706e5 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1790,9 +1790,12 @@ struct lpfc_mbx_set_beacon_config {
#define lpfc_mbx_set_beacon_duration_SHIFT 16
#define lpfc_mbx_set_beacon_duration_MASK 0x000000FF
#define lpfc_mbx_set_beacon_duration_WORD word4
-#define lpfc_mbx_set_beacon_status_duration_SHIFT 24
-#define lpfc_mbx_set_beacon_status_duration_MASK 0x000000FF
-#define lpfc_mbx_set_beacon_status_duration_WORD word4
+
+/* COMMON_SET_BEACON_CONFIG_V1 */
+#define lpfc_mbx_set_beacon_duration_v1_SHIFT 16
+#define lpfc_mbx_set_beacon_duration_v1_MASK 0x0000FFFF
+#define lpfc_mbx_set_beacon_duration_v1_WORD word4
+ uint32_t word5; /* RESERVED */
};
struct lpfc_id_range {
@@ -2243,6 +2246,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
*/
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
+#define ADD_STATUS_INVALID_REQUEST 0x4B
struct lpfc_mbx_sli4_config {
struct mbox_header header;
@@ -3392,7 +3396,41 @@ struct lpfc_sli4_parameters {
#define cfg_nosr_SHIFT 9
#define cfg_nosr_MASK 0x00000001
#define cfg_nosr_WORD word19
-#define LPFC_NODELAY_MAX_IO 32
+
+#define cfg_bv1s_SHIFT 10
+#define cfg_bv1s_MASK 0x00000001
+#define cfg_bv1s_WORD word19
+
+ uint32_t word20;
+#define cfg_max_tow_xri_SHIFT 0
+#define cfg_max_tow_xri_MASK 0x0000ffff
+#define cfg_max_tow_xri_WORD word20
+
+ uint32_t word21; /* RESERVED */
+ uint32_t word22; /* RESERVED */
+ uint32_t word23; /* RESERVED */
+
+ uint32_t word24;
+#define cfg_frag_field_offset_SHIFT 0
+#define cfg_frag_field_offset_MASK 0x0000ffff
+#define cfg_frag_field_offset_WORD word24
+
+#define cfg_frag_field_size_SHIFT 16
+#define cfg_frag_field_size_MASK 0x0000ffff
+#define cfg_frag_field_size_WORD word24
+
+ uint32_t word25;
+#define cfg_sgl_field_offset_SHIFT 0
+#define cfg_sgl_field_offset_MASK 0x0000ffff
+#define cfg_sgl_field_offset_WORD word25
+
+#define cfg_sgl_field_size_SHIFT 16
+#define cfg_sgl_field_size_MASK 0x0000ffff
+#define cfg_sgl_field_size_WORD word25
+
+ uint32_t word26; /* Chain SGE initial value LOW */
+ uint32_t word27; /* Chain SGE initial value HIGH */
+#define LPFC_NODELAY_MAX_IO 32
};
#define LPFC_SET_UE_RECOVERY 0x10
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index 07ee34017d88..d48414e295a0 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 52cae87da0d2..f3cae733ae2d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10387,6 +10387,11 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
!nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+ if (!nvmet_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6424 NVMET XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
if (!nvme_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6100 NVME XRI exchange busy "
@@ -10639,6 +10644,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
+ sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
@@ -10668,18 +10674,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
}
- /* Only embed PBDE for if_type 6 */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
- LPFC_SLI_INTF_IF_TYPE_6) {
- phba->fcp_embed_pbde = 1;
- phba->nvme_embed_pbde = 1;
- }
-
- /* PBDE support requires xib be set */
- if (!bf_get(cfg_xib, mbx_sli4_parameters)) {
- phba->fcp_embed_pbde = 0;
- phba->nvme_embed_pbde = 0;
- }
+ /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
+ phba->cfg_enable_pbde = 0;
/*
* To support Suppress Response feature we must satisfy 3 conditions.
@@ -10713,10 +10711,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->fcp_embed_io = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
- "6422 XIB %d: FCP %d %d NVME %d %d %d %d\n",
+ "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
bf_get(cfg_xib, mbx_sli4_parameters),
- phba->fcp_embed_pbde, phba->fcp_embed_io,
- phba->nvme_support, phba->nvme_embed_pbde,
+ phba->cfg_enable_pbde,
+ phba->fcp_embed_io, phba->nvme_support,
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 3b654ad08d1f..ea10f03437f5 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 47c02da11f01..deb094fdbb79 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 0758edb9dfe2..9c22a2c93462 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index b93e78f671fb..95d60ab5ebf9 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 1a803975bcbc..bd9bce9d9974 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -1062,6 +1062,9 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */
+ if (vport->phba->sli_rev == LPFC_SLI_REV3)
+ ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag;
/* software abort outstanding PLOGI */
lpfc_els_abort(vport->phba, ndlp);
@@ -1982,12 +1985,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_disc, nvpr))
ndlp->nlp_type |= NLP_NVME_DISCOVERY;
- /* This node is an NVME target. Adjust the command
- * queue depth on this node to not exceed the available
- * xris.
- */
- ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max;
-
/*
* If prli_fba is set, the Target supports FirstBurst.
* If prli_fb_sz is 0, the FirstBurst size is unlimited,
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 76a5a99605aa..028462e5994d 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1135,9 +1135,6 @@ out_err:
else
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
- atomic_dec(&ndlp->cmd_pending);
-
/* Update stats and complete the IO. There is
* no need for dma unprep because the nvme_transport
* owns the dma address.
@@ -1279,6 +1276,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 9 */
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+ /* Words 13 14 15 are for PBDE support */
+
pwqeq->vport = vport;
return 0;
}
@@ -1378,7 +1377,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
data_sg = sg_next(data_sg);
sgl++;
}
- if (phba->nvme_embed_pbde) {
+ if (phba->cfg_enable_pbde) {
/* Use PBDE support for first SGL only, offset == 0 */
/* Words 13-15 */
bde = (struct ulp_bde64 *)
@@ -1394,10 +1393,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
}
- } else {
- bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
- memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
+ } else {
/* For this clause to be valid, the payload_length
* and sg_cnt must zero.
*/
@@ -1546,17 +1543,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
- if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
- !expedite) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
- "6174 Fail IO, ndlp qdepth exceeded: "
- "idx %d DID %x pend %d qdepth %d\n",
- lpfc_queue_info->index, ndlp->nlp_DID,
- atomic_read(&ndlp->cmd_pending),
- ndlp->cmd_qdepth);
- atomic_inc(&lport->xmt_fcp_qdepth);
- ret = -EBUSY;
- goto out_fail;
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+ if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
+ !expedite) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6174 Fail IO, ndlp qdepth exceeded: "
+ "idx %d DID %x pend %d qdepth %d\n",
+ lpfc_queue_info->index, ndlp->nlp_DID,
+ atomic_read(&ndlp->cmd_pending),
+ ndlp->cmd_qdepth);
+ atomic_inc(&lport->xmt_fcp_qdepth);
+ ret = -EBUSY;
+ goto out_fail;
+ }
}
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
@@ -1614,8 +1613,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_free_nvme_buf;
}
- atomic_inc(&ndlp->cmd_pending);
-
lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_queue_info->index, ndlp->nlp_DID);
@@ -1623,7 +1620,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_inc(&lport->xmt_fcp_wqerr);
- atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 Fail IO, Could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
@@ -2378,6 +2374,11 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
+ }
return lpfc_ncmd;
}
@@ -2396,7 +2397,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
{
unsigned long iflag = 0;
+ if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
+ atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
+
lpfc_ncmd->nonsg_phys = 0;
+ lpfc_ncmd->ndlp = NULL;
+ lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
+
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
@@ -2687,7 +2694,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_rport *oldrport;
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
- struct lpfc_nodelist *prev_ndlp;
+ struct lpfc_nodelist *prev_ndlp = NULL;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
"6006 Register NVME PORT. DID x%06x nlptype x%x\n",
@@ -2736,23 +2743,29 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private;
if (oldrport) {
+ /* New remoteport record does not guarantee valid
+ * host private memory area.
+ */
+ prev_ndlp = oldrport->ndlp;
if (oldrport == remote_port->private) {
- /* Same remoteport. Just reuse. */
+ /* Same remoteport - ndlp should match.
+ * Just reuse.
+ */
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
"6014 Rebinding lport to "
"remoteport %p wwpn 0x%llx, "
- "Data: x%x x%x %p x%x x%06x\n",
+ "Data: x%x x%x %p %p x%x x%06x\n",
remote_port,
remote_port->port_name,
remote_port->port_id,
remote_port->port_role,
+ prev_ndlp,
ndlp,
ndlp->nlp_type,
ndlp->nlp_DID);
return 0;
}
- prev_ndlp = rport->ndlp;
/* Sever the ndlp<->rport association
* before dropping the ndlp ref from
@@ -2786,13 +2799,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Binding new rport to "
- "lport %p Remoteport %p WWNN 0x%llx, "
+ "lport %p Remoteport %p rport %p WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x, ndlp %p\n",
- lport, remote_port,
+ "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
+ lport, remote_port, rport,
rpinfo.node_name, rpinfo.port_name,
rpinfo.port_id, rpinfo.port_role,
- ndlp);
+ ndlp, prev_ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2970,7 +2983,7 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
u32 i, wait_cnt = 0;
- if (phba->sli_rev < LPFC_SLI_REV4)
+ if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
return;
/* Cycle through all NVME rings and make sure all outstanding
@@ -2979,6 +2992,9 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
pring = phba->sli4_hba.nvme_wq[i]->pring;
+ if (!pring)
+ continue;
+
/* Retrieve everything on the txcmplq */
while (!list_empty(&pring->txcmplq)) {
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 04bd463dd043..cfd4719be25c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -86,6 +86,7 @@ struct lpfc_nvme_buf {
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
+#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint16_t cpu;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 7271c9d885dd..b766afe10d3d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channsel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -402,6 +402,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
/* Process FCP command */
if (rc == 0) {
+ ctxp->rqb_buffer = NULL;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
return;
@@ -1116,8 +1117,17 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, smp_processor_id());
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6425 Defer rcv: no buffer xri x%x: "
+ "flg %x ste %x\n",
+ ctxp->oxid, ctxp->flag, ctxp->state);
+ return;
+ }
+
tgtp = phba->targetport->private;
- atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+ if (tgtp)
+ atomic_inc(&tgtp->rcv_fcp_cmd_defer);
/* Free the nvmebuf since a new buffer already replaced it */
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
@@ -1732,9 +1742,12 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t *payload;
uint32_t size, oxid, sid, rc;
- if (!nvmebuf || !phba->targetport) {
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+
+ if (!phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO\n");
+ "6154 LS Drop IO x%x\n", oxid);
oxid = 0;
size = 0;
sid = 0;
@@ -1744,9 +1757,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
@@ -1759,8 +1770,7 @@ dropit:
lpfc_nvmeio_data(phba, "NVMET LS DROP: "
"xri x%x sz %d from %06x\n",
oxid, size, sid);
- if (nvmebuf)
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
return;
}
ctxp->phba = phba;
@@ -1803,8 +1813,7 @@ dropit:
ctxp->oxid, rc);
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
- if (nvmebuf)
- lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
atomic_inc(&tgtp->xmt_ls_abort);
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
@@ -2492,7 +2501,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
/* Word 11 - set pbde later */
- if (phba->nvme_embed_pbde) {
+ if (phba->cfg_enable_pbde) {
do_pbde = 1;
} else {
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
@@ -2607,16 +2616,19 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(cnt);
- if (do_pbde && i == 0) {
+ if (i == 0) {
bde = (struct ulp_bde64 *)&wqe->words[13];
- memset(bde, 0, sizeof(struct ulp_bde64));
- /* Words 13-15 (PBDE)*/
- bde->addrLow = sgl->addr_lo;
- bde->addrHigh = sgl->addr_hi;
- bde->tus.f.bdeSize =
- le32_to_cpu(sgl->sge_len);
- bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bde->tus.w = cpu_to_le32(bde->tus.w);
+ if (do_pbde) {
+ /* Words 13-15 (PBDE) */
+ bde->addrLow = sgl->addr_lo;
+ bde->addrHigh = sgl->addr_hi;
+ bde->tus.f.bdeSize =
+ le32_to_cpu(sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ } else {
+ memset(bde, 0, sizeof(struct ulp_bde64));
+ }
}
sgl++;
ctxp->offset += cnt;
@@ -3105,11 +3117,17 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
}
aerr:
- ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
+ if (ctxp->flag & LPFC_NVMET_CTX_RLS)
+ list_del(&ctxp->list);
+ ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
ctxp->oxid, rc);
+ lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 81f520abfd64..1aaff63f1f41 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a94fb9f8bb44..5c7858e735c9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -995,6 +995,11 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock(&phba->scsi_buf_list_put_lock);
}
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+ }
return lpfc_cmd;
}
/**
@@ -1044,6 +1049,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
if (!found)
return NULL;
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+ }
return lpfc_cmd;
}
/**
@@ -1134,7 +1144,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
+ if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
+ atomic_dec(&psb->ndlp->cmd_pending);
+ psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
phba->lpfc_release_scsi_buf(phba, psb);
}
@@ -3017,8 +3030,8 @@ out:
if (err_type == BGS_GUARD_ERR_MASK) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
@@ -3028,8 +3041,8 @@ out:
} else if (err_type == BGS_REFTAG_ERR_MASK) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3040,8 +3053,8 @@ out:
} else if (err_type == BGS_APPTAG_ERR_MASK) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3096,7 +3109,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
spin_unlock(&_dump_buf_lock);
if (lpfc_bgs_get_invalid_prof(bgstat)) {
- cmd->result = ScsiResult(DID_ERROR, 0);
+ cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9072 BLKGRD: Invalid BG Profile in cmd"
" 0x%x lba 0x%llx blk cnt 0x%x "
@@ -3108,7 +3121,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
}
if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
- cmd->result = ScsiResult(DID_ERROR, 0);
+ cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9073 BLKGRD: Invalid BG PDIF Block in cmd"
" 0x%x lba 0x%llx blk cnt 0x%x "
@@ -3124,8 +3137,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9055 BLKGRD: Guard Tag error in cmd"
@@ -3140,8 +3153,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3157,8 +3170,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24
- | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3311,12 +3324,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
}
/*
* Setup the first Payload BDE. For FCoE we just key off
- * Performance Hints, for FC we utilize fcp_embed_pbde.
+ * Performance Hints, for FC we use lpfc_enable_pbde.
+ * We populate words 13-15 of IOCB/WQE.
*/
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
- phba->fcp_embed_pbde) {
+ phba->cfg_enable_pbde) {
bde = (struct ulp_bde64 *)
- &(iocb_cmd->unsli3.sli3Words[5]);
+ &(iocb_cmd->unsli3.sli3Words[5]);
bde->addrLow = first_data_sgl->addr_lo;
bde->addrHigh = first_data_sgl->addr_hi;
bde->tus.f.bdeSize =
@@ -3330,6 +3344,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
+
+ if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
+ phba->cfg_enable_pbde) {
+ bde = (struct ulp_bde64 *)
+ &(iocb_cmd->unsli3.sli3Words[5]);
+ memset(bde, 0, (sizeof(uint32_t) * 3));
+ }
}
/*
@@ -3866,7 +3887,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
out:
- cmnd->result = ScsiResult(host_status, scsi_status);
+ cmnd->result = host_status << 16 | scsi_status;
lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}
@@ -4019,7 +4040,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
- cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
fast_path_evt = lpfc_alloc_fast_evt(phba);
if (!fast_path_evt)
break;
@@ -4053,14 +4074,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
lpfc_cmd->result ==
IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
- cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+ cmd->result = DID_NO_CONNECT << 16;
break;
}
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = ScsiResult(DID_REQUEUE, 0);
+ cmd->result = DID_REQUEUE << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4094,16 +4115,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
/* else: fall through */
default:
- cmd->result = ScsiResult(DID_ERROR, 0);
+ cmd->result = DID_ERROR << 16;
break;
}
if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
- cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
- SAM_STAT_BUSY);
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
+ SAM_STAT_BUSY;
} else
- cmd->result = ScsiResult(DID_OK, 0);
+ cmd->result = DID_OK << 16;
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
@@ -4122,7 +4143,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
if (pnode && NLP_CHK_NODE_ACT(pnode)) {
- atomic_dec(&pnode->cmd_pending);
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
@@ -4135,8 +4155,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
pnode->last_change_time = jiffies;
}
spin_unlock_irqrestore(shost->host_lock, flags);
- } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
- atomic_dec(&pnode->cmd_pending);
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
@@ -4530,6 +4548,11 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
int err;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+
+ /* sanity check on references */
+ if (unlikely(!rdata) || unlikely(!rport))
+ goto out_fail_command;
+
err = fc_remote_port_chkready(rport);
if (err) {
cmnd->result = err;
@@ -4555,33 +4578,36 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
goto out_tgt_busy;
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
- "3377 Target Queue Full, scsi Id:%d Qdepth:%d"
- " Pending command:%d"
- " WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
- " WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
- ndlp->nlp_sid, ndlp->cmd_qdepth,
- atomic_read(&ndlp->cmd_pending),
- ndlp->nlp_nodename.u.wwn[0],
- ndlp->nlp_nodename.u.wwn[1],
- ndlp->nlp_nodename.u.wwn[2],
- ndlp->nlp_nodename.u.wwn[3],
- ndlp->nlp_nodename.u.wwn[4],
- ndlp->nlp_nodename.u.wwn[5],
- ndlp->nlp_nodename.u.wwn[6],
- ndlp->nlp_nodename.u.wwn[7],
- ndlp->nlp_portname.u.wwn[0],
- ndlp->nlp_portname.u.wwn[1],
- ndlp->nlp_portname.u.wwn[2],
- ndlp->nlp_portname.u.wwn[3],
- ndlp->nlp_portname.u.wwn[4],
- ndlp->nlp_portname.u.wwn[5],
- ndlp->nlp_portname.u.wwn[6],
- ndlp->nlp_portname.u.wwn[7]);
- goto out_tgt_busy;
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+ if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
+ "3377 Target Queue Full, scsi Id:%d "
+ "Qdepth:%d Pending command:%d"
+ " WWNN:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x, "
+ " WWPN:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x",
+ ndlp->nlp_sid, ndlp->cmd_qdepth,
+ atomic_read(&ndlp->cmd_pending),
+ ndlp->nlp_nodename.u.wwn[0],
+ ndlp->nlp_nodename.u.wwn[1],
+ ndlp->nlp_nodename.u.wwn[2],
+ ndlp->nlp_nodename.u.wwn[3],
+ ndlp->nlp_nodename.u.wwn[4],
+ ndlp->nlp_nodename.u.wwn[5],
+ ndlp->nlp_nodename.u.wwn[6],
+ ndlp->nlp_nodename.u.wwn[7],
+ ndlp->nlp_portname.u.wwn[0],
+ ndlp->nlp_portname.u.wwn[1],
+ ndlp->nlp_portname.u.wwn[2],
+ ndlp->nlp_portname.u.wwn[3],
+ ndlp->nlp_portname.u.wwn[4],
+ ndlp->nlp_portname.u.wwn[5],
+ ndlp->nlp_portname.u.wwn[6],
+ ndlp->nlp_portname.u.wwn[7]);
+ goto out_tgt_busy;
+ }
}
- atomic_inc(&ndlp->cmd_pending);
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
@@ -4599,6 +4625,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
*/
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
+ lpfc_cmd->ndlp = ndlp;
lpfc_cmd->timeout = 0;
lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
@@ -4681,7 +4708,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
- atomic_dec(&ndlp->cmd_pending);
return SCSI_MLQUEUE_HOST_BUSY;
out_tgt_busy:
@@ -4714,7 +4740,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct lpfc_scsi_buf *lpfc_cmd;
IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
- struct lpfc_sli_ring *pring_s4;
+ struct lpfc_sli_ring *pring_s4 = NULL;
int ret_val;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
@@ -4744,8 +4770,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
}
iocb = &lpfc_cmd->cur_iocbq;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (!(phba->cfg_fof) ||
+ (!(iocb->iocb_flag & LPFC_IO_FOF))) {
+ pring_s4 =
+ phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
+ } else {
+ iocb->hba_wqidx = 0;
+ pring_s4 = phba->sli4_hba.oas_wq->pring;
+ }
+ if (!pring_s4) {
+ ret = FAILED;
+ goto out_unlock;
+ }
+ spin_lock(&pring_s4->ring_lock);
+ }
/* the command is in process of being cancelled */
if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3169 SCSI Layer abort requested I/O has been "
@@ -4759,6 +4802,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
* see the completion before the eh fired. Just return SUCCESS.
*/
if (lpfc_cmd->pCmd != cmnd) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3170 SCSI Layer abort requested I/O has been "
"completed by LLD.\n");
@@ -4771,6 +4816,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3389 SCSI Layer I/O Abort Request is pending\n");
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
goto wait_for_cmpl;
}
@@ -4778,6 +4825,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb = __lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
ret = FAILED;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock(&pring_s4->ring_lock);
goto out_unlock;
}
@@ -4815,14 +4864,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
+ lpfc_cmd->waitq = &waitq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
- if (pring_s4 == NULL) {
- ret = FAILED;
- goto out_unlock;
- }
/* Note: both hbalock and ring_lock must be set here */
- spin_lock(&pring_s4->ring_lock);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
abtsiocb, 0);
spin_unlock(&pring_s4->ring_lock);
@@ -4835,6 +4879,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (ret_val == IOCB_ERROR) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring_s4->ring_lock, flags);
+ else
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* Indicate the IO is not being aborted by the driver. */
+ iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->waitq = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring_s4->ring_lock, flags);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
@@ -4845,7 +4900,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
wait_for_cmpl:
- lpfc_cmd->waitq = &waitq;
/* Wait for abort to complete */
wait_event_timeout(waitq,
(lpfc_cmd->pCmd != cmnd),
@@ -5006,6 +5060,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->ndlp = pnode;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c38e4da71f5f..cc99859774ff 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -134,11 +134,13 @@ struct lpfc_scsi_buf {
struct list_head list;
struct scsi_cmnd *pCmd;
struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
uint32_t timeout;
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
+#define LPFC_SBUF_BUMP_QDEPTH 0x8 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6f3c00a233ec..9830bdb6e072 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -145,6 +145,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
uint32_t idx;
uint32_t i = 0;
uint8_t *tmp;
+ u32 if_type;
/* sanity check on queue memory */
if (unlikely(!q))
@@ -199,8 +200,14 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
q->queue_id);
} else {
bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
- bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+
+ /* Leave bits <23:16> clear for if_type 6 dpp */
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &q->phba->sli4_hba.sli_intf);
+ if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
+ bf_set(lpfc_wq_db_list_fm_index, &doorbell,
+ host_index);
}
} else if (q->db_format == LPFC_DB_RING_FORMAT) {
bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
@@ -4591,7 +4598,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
- psli->stats_start = get_seconds();
+ psli->stats_start = ktime_get_seconds();
/* Give the INITFF and Post time to settle. */
mdelay(100);
@@ -4638,7 +4645,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
- psli->stats_start = get_seconds();
+ psli->stats_start = ktime_get_seconds();
/* Reset HBA AER if it was enabled, note hba_flag was reset above */
if (hba_aer_enabled)
@@ -9110,8 +9117,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
/* Note, word 10 is already initialized to 0 */
- /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
- if (phba->fcp_embed_pbde)
+ /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
+ if (phba->cfg_enable_pbde)
bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
else
bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
@@ -9174,8 +9181,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
/* Note, word 10 is already initialized to 0 */
- /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
- if (phba->fcp_embed_pbde)
+ /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
+ if (phba->cfg_enable_pbde)
bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
else
bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
@@ -10696,6 +10703,12 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(&phba->hbalock);
if (phba->sli_rev < LPFC_SLI_REV4) {
+ if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
+ irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
+ spin_unlock_irq(&phba->hbalock);
+ goto release_iocb;
+ }
if (abort_iotag != 0 &&
abort_iotag <= phba->sli.last_iotag)
abort_iocb =
@@ -10717,6 +10730,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&phba->hbalock);
}
+release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -10773,6 +10787,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iabt = NULL;
int retval;
unsigned long iflags;
+ struct lpfc_nodelist *ndlp;
lockdep_assert_held(&phba->hbalock);
@@ -10803,9 +10818,13 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli_rev == LPFC_SLI_REV4) {
iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
iabt->un.acxri.abortContextTag = cmdiocb->iotag;
- }
- else
+ } else {
iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+ if (pring->ringno == LPFC_ELS_RING) {
+ ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
+ iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
+ }
+ }
iabt->ulpLe = 1;
iabt->ulpClass = icmd->ulpClass;
@@ -11084,10 +11103,11 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd;
int rc = 1;
- if (!(iocbq->iocb_flag & LPFC_IO_FCP))
+ if (iocbq->vport != vport)
return rc;
- if (iocbq->vport != vport)
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
return rc;
lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
@@ -11097,13 +11117,13 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
switch (ctx_cmd) {
case LPFC_CTX_LUN:
- if ((lpfc_cmd->rdata->pnode) &&
+ if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
(lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
(scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
rc = 0;
break;
case LPFC_CTX_TGT:
- if ((lpfc_cmd->rdata->pnode) &&
+ if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
(lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
rc = 0;
break;
@@ -11218,6 +11238,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
int errcnt = 0, ret_val = 0;
int i;
+ /* all I/Os are in process of being flushed */
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
+ return errcnt;
+
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 431754195505..34b7ab69b9b4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -339,7 +339,7 @@ struct lpfc_sli {
struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
size_t iocbq_lookup_len; /* current lengs of the array */
uint16_t last_iotag; /* last allocated IOTAG */
- unsigned long stats_start; /* in seconds */
+ time64_t stats_start; /* in seconds */
struct lpfc_lnk_stat lnk_stat_offsets;
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index cf64aca82bd0..399c0015c546 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -490,6 +490,7 @@ struct lpfc_pc_sli4_params {
uint8_t eqav;
uint8_t cqav;
uint8_t wqsize;
+ uint8_t bv1s;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
@@ -774,7 +775,9 @@ struct lpfc_rdp_context {
struct lpfc_lcb_context {
uint8_t sub_command;
uint8_t type;
+ uint8_t capability;
uint8_t frequency;
+ uint16_t duration;
uint16_t ox_id;
uint16_t rx_id;
struct lpfc_nodelist *ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 18c23afcf46b..501249509af4 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -2,7 +2,7 @@
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.0.0.4"
+#define LPFC_DRIVER_VERSION "12.0.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -33,5 +33,5 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright (C) 2017-2018 Broadcom. All Rights " \
- "Reserved. The term \"Broadcom\" refers to Broadcom Limited " \
+ "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 81bc12dedf41..1ff0f7de9105 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 62295971f66c..f4b8528dd2e7 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
- * “Broadcom†refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8e8cf1145d7f..8c7154143a4e 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -371,7 +371,7 @@ mega_runpendq(adapter_t *adapter)
* The command queuing entry point for the mid-layer.
*/
static int
-megaraid_queue_lck(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *))
+megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
{
adapter_t *adapter;
scb_t *scb;
@@ -425,7 +425,7 @@ static DEF_SCSI_QCMD(megaraid_queue)
* commands.
*/
static inline scb_t *
-mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd)
+mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
{
struct list_head *head = &adapter->free_list;
scb_t *scb;
@@ -457,7 +457,7 @@ mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd)
* and the channel number.
*/
static inline int
-mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel)
+mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
{
int tgt;
int ldrv_num;
@@ -520,7 +520,7 @@ mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel)
* boot settings.
*/
static scb_t *
-mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
+mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
{
mega_ext_passthru *epthru;
mega_passthru *pthru;
@@ -951,8 +951,8 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
* prepare a command for the scsi physical devices.
*/
static mega_passthru *
-mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
- int channel, int target)
+mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
+ int channel, int target)
{
mega_passthru *pthru;
@@ -1015,8 +1015,9 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
* commands for devices which can take extended CDBs (>10 bytes)
*/
static mega_ext_passthru *
-mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
- int channel, int target)
+mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
+ struct scsi_cmnd *cmd,
+ int channel, int target)
{
mega_ext_passthru *epthru;
@@ -1417,7 +1418,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
{
mega_ext_passthru *epthru = NULL;
struct scatterlist *sgl;
- Scsi_Cmnd *cmd = NULL;
+ struct scsi_cmnd *cmd = NULL;
mega_passthru *pthru = NULL;
mbox_t *mbox = NULL;
u8 c;
@@ -1652,14 +1653,14 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
static void
mega_rundoneq (adapter_t *adapter)
{
- Scsi_Cmnd *cmd;
+ struct scsi_cmnd *cmd;
struct list_head *pos;
list_for_each(pos, &adapter->completed_list) {
struct scsi_pointer* spos = (struct scsi_pointer *)pos;
- cmd = list_entry(spos, Scsi_Cmnd, SCp);
+ cmd = list_entry(spos, struct scsi_cmnd, SCp);
cmd->scsi_done(cmd);
}
@@ -1722,7 +1723,7 @@ static int
mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
{
struct scatterlist *sg;
- Scsi_Cmnd *cmd;
+ struct scsi_cmnd *cmd;
int sgcnt;
int idx;
@@ -1869,7 +1870,7 @@ megaraid_info(struct Scsi_Host *host)
* aborted. All the commands issued to the F/W must complete.
*/
static int
-megaraid_abort(Scsi_Cmnd *cmd)
+megaraid_abort(struct scsi_cmnd *cmd)
{
adapter_t *adapter;
int rval;
@@ -1933,7 +1934,7 @@ megaraid_reset(struct scsi_cmnd *cmd)
* issued to the controller, abort/reset it. Otherwise return failure
*/
static int
-megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
+megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
{
struct list_head *pos, *next;
scb_t *scb;
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 18e85d9267ff..cce23a086fbe 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -191,7 +191,7 @@ typedef struct {
u32 dma_type;
u32 dma_direction;
- Scsi_Cmnd *cmd;
+ struct scsi_cmnd *cmd;
dma_addr_t dma_h_bulkdata;
dma_addr_t dma_h_sgdata;
@@ -942,7 +942,7 @@ static int issue_scb(adapter_t *, scb_t *);
static int mega_setup_mailbox(adapter_t *);
static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *);
-static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *);
+static scb_t * mega_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
static void __mega_runpendq(adapter_t *);
static int issue_scb_block(adapter_t *, u_char *);
@@ -951,9 +951,9 @@ static irqreturn_t megaraid_isr_iomapped(int, void *);
static void mega_free_scb(adapter_t *, scb_t *);
-static int megaraid_abort(Scsi_Cmnd *);
-static int megaraid_reset(Scsi_Cmnd *);
-static int megaraid_abort_and_reset(adapter_t *, Scsi_Cmnd *, int);
+static int megaraid_abort(struct scsi_cmnd *);
+static int megaraid_reset(struct scsi_cmnd *);
+static int megaraid_abort_and_reset(adapter_t *, struct scsi_cmnd *, int);
static int megaraid_biosparam(struct scsi_device *, struct block_device *,
sector_t, int []);
@@ -983,9 +983,9 @@ static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t);
static int mega_support_ext_cdb(adapter_t *);
static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *,
- Scsi_Cmnd *, int, int);
+ struct scsi_cmnd *, int, int);
static mega_ext_passthru* mega_prepare_extpassthru(adapter_t *,
- scb_t *, Scsi_Cmnd *, int, int);
+ scb_t *, struct scsi_cmnd *, int, int);
static void mega_enum_raid_scsi(adapter_t *);
static void mega_get_boot_drv(adapter_t *);
static int mega_support_random_del(adapter_t *);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 75dc25f78336..67d356d84717 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.705.02.00-rc1"
-#define MEGASAS_RELDATE "April 4, 2018"
+#define MEGASAS_VERSION "07.706.03.00-rc1"
+#define MEGASAS_RELDATE "May 21, 2018"
/*
* Device IDs
@@ -709,7 +709,8 @@ struct MR_TARGET_PROPERTIES {
u32 max_io_size_kb;
u32 device_qdepth;
u32 sector_size;
- u8 reserved[500];
+ u8 reset_tmo;
+ u8 reserved[499];
} __packed;
/*
@@ -1400,6 +1401,19 @@ struct megasas_ctrl_info {
#endif
} adapter_operations4;
u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
+
+ u32 size;
+ u32 pad1;
+
+ u8 reserved6[64];
+
+ u32 rsvdForAdptOp[64];
+
+ u8 reserved7[3];
+
+ u8 TaskAbortTO; /* Timeout value in seconds used by Abort Task TM */
+ u8 MaxResetTO; /* Max Supported Reset timeout in seconds. */
+ u8 reserved8[3];
} __packed;
/*
@@ -1472,6 +1486,7 @@ enum FW_BOOT_CONTEXT {
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
+#define MEGASAS_DEFAULT_TM_TIMEOUT 50
/*
* FW reports the maximum of number of commands that it can accept (maximum
* commands that can be outstanding) at any time. The driver must report a
@@ -1915,7 +1930,9 @@ struct MR_PRIV_DEVICE {
bool is_tm_capable;
bool tm_busy;
atomic_t r1_ldio_hint;
- u8 interface_type;
+ u8 interface_type;
+ u8 task_abort_tmo;
+ u8 target_reset_tmo;
};
struct megasas_cmd;
@@ -2291,6 +2308,8 @@ struct megasas_instance {
u8 adapter_type;
bool consistent_mask_64bit;
bool support_nvme_passthru;
+ u8 task_abort_tmo;
+ u8 max_reset_tmo;
};
struct MR_LD_VF_MAP {
u32 size;
@@ -2512,7 +2531,11 @@ int megasas_get_ctrl_info(struct megasas_instance *instance);
/* PD sequence */
int
megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend);
-void megasas_set_dynamic_target_properties(struct scsi_device *sdev);
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
+ bool is_target_prop);
+int megasas_get_target_prop(struct megasas_instance *instance,
+ struct scsi_device *sdev);
+
int megasas_set_crash_dump_params(struct megasas_instance *instance,
u8 crash_buf_state);
void megasas_free_host_crash_buffer(struct megasas_instance *instance);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 71d97573a667..9aa9590c5373 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -120,8 +120,7 @@ static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
static void megasas_get_pd_info(struct megasas_instance *instance,
struct scsi_device *sdev);
-static int megasas_get_target_prop(struct megasas_instance *instance,
- struct scsi_device *sdev);
+
/*
* PCI ID table for all supported controllers
*/
@@ -1794,7 +1793,8 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
*
* Returns void
*/
-void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
+ bool is_target_prop)
{
u16 pd_index = 0, ld;
u32 device_id;
@@ -1834,6 +1834,22 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
mr_device_priv_data->is_tm_capable =
pd_sync->seq[pd_index].capability.tmCapable;
}
+
+ if (is_target_prop && instance->tgt_prop->reset_tmo) {
+ /*
+ * If FW provides a target reset timeout value, driver will use
+ * it. If not set, fallback to default values.
+ */
+ mr_device_priv_data->target_reset_tmo =
+ min_t(u8, instance->max_reset_tmo,
+ instance->tgt_prop->reset_tmo);
+ mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
+ } else {
+ mr_device_priv_data->target_reset_tmo =
+ MEGASAS_DEFAULT_TM_TIMEOUT;
+ mr_device_priv_data->task_abort_tmo =
+ MEGASAS_DEFAULT_TM_TIMEOUT;
+ }
}
/*
@@ -1967,10 +1983,10 @@ static int megasas_slave_configure(struct scsi_device *sdev)
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
megasas_set_static_target_properties(sdev, is_target_prop);
- mutex_unlock(&instance->reset_mutex);
-
/* This sdev property may change post OCR */
- megasas_set_dynamic_target_properties(sdev);
+ megasas_set_dynamic_target_properties(sdev, is_target_prop);
+
+ mutex_unlock(&instance->reset_mutex);
return 0;
}
@@ -2818,7 +2834,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
"SCSI command pointer: (%p)\t SCSI host state: %d\t"
" SCSI host busy: %d\t FW outstanding: %d\n",
scmd, scmd->device->host->shost_state,
- atomic_read((atomic_t *)&scmd->device->host->host_busy),
+ scsi_host_busy(scmd->device->host),
atomic_read(&instance->fw_outstanding));
/*
@@ -4720,6 +4736,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
ci->adapter_operations4.support_pd_map_target_id;
instance->support_nvme_passthru =
ci->adapter_operations4.support_nvme_passthru;
+ instance->task_abort_tmo = ci->TaskAbortTO;
+ instance->max_reset_tmo = ci->MaxResetTO;
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
@@ -4738,6 +4756,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
instance->secure_jbod_support ? "Yes" : "No");
dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
instance->support_nvme_passthru ? "Yes" : "No");
+ dev_info(&instance->pdev->dev,
+ "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
+ instance->task_abort_tmo, instance->max_reset_tmo);
+
break;
case DCMD_TIMEOUT:
@@ -4755,14 +4777,15 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
__func__, __LINE__);
break;
}
+ break;
case DCMD_FAILED:
megaraid_sas_kill_hba(instance);
break;
}
- megasas_return_cmd(instance, cmd);
-
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -5831,7 +5854,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
*
* Returns 0 on success non-zero on failure.
*/
-static int
+int
megasas_get_target_prop(struct megasas_instance *instance,
struct scsi_device *sdev)
{
@@ -6789,6 +6812,9 @@ megasas_resume(struct pci_dev *pdev)
goto fail_init_mfi;
}
+ if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
+ goto fail_init_mfi;
+
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
@@ -6842,12 +6868,12 @@ megasas_wait_for_adapter_operational(struct megasas_instance *instance)
{
int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
int i;
-
- if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
- return 1;
+ u8 adp_state;
for (i = 0; i < wait_time; i++) {
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
+ adp_state = atomic_read(&instance->adprecovery);
+ if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
+ (adp_state == MEGASAS_HW_CRITICAL_ERROR))
break;
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
@@ -6856,9 +6882,10 @@ megasas_wait_for_adapter_operational(struct megasas_instance *instance)
msleep(1000);
}
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
- __func__);
+ if (adp_state != MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev,
+ "%s HBA failed to become operational, adp_state %d\n",
+ __func__, adp_state);
return 1;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 94c23ad51179..c7f95bace353 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -4108,7 +4108,8 @@ megasas_tm_response_code(struct megasas_instance *instance,
*/
static int
megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
- uint channel, uint id, u16 smid_task, u8 type)
+ uint channel, uint id, u16 smid_task, u8 type,
+ struct MR_PRIV_DEVICE *mr_device_priv_data)
{
struct MR_TASK_MANAGE_REQUEST *mr_request;
struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
@@ -4119,6 +4120,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
struct fusion_context *fusion = NULL;
struct megasas_cmd_fusion *scsi_lookup;
int rc;
+ int timeout = MEGASAS_DEFAULT_TM_TIMEOUT;
struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
fusion = instance->ctrl_context;
@@ -4170,7 +4172,16 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
init_completion(&cmd_fusion->done);
megasas_fire_cmd_fusion(instance, req_desc);
- timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ timeout = mr_device_priv_data->task_abort_tmo;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ timeout = mr_device_priv_data->target_reset_tmo;
+ break;
+ }
+
+ timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ);
if (!timeleft) {
dev_err(&instance->pdev->dev,
@@ -4363,7 +4374,8 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
mr_device_priv_data->tm_busy = 1;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, smid,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ mr_device_priv_data);
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
@@ -4435,7 +4447,8 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
mr_device_priv_data->tm_busy = 1;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ mr_device_priv_data);
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
out:
@@ -4490,6 +4503,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
u32 io_timeout_in_crash_mode = 0;
struct scsi_cmnd *scmd_local = NULL;
struct scsi_device *sdev;
+ int ret_target_prop = DCMD_FAILED;
+ bool is_target_prop = false;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
@@ -4661,9 +4676,6 @@ transition_to_ready:
megasas_setup_jbod_map(instance);
- shost_for_each_device(sdev, shost)
- megasas_set_dynamic_target_properties(sdev);
-
/* reset stream detection array */
if (instance->adapter_type == VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
@@ -4677,6 +4689,16 @@ transition_to_ready:
clear_bit(MEGASAS_FUSION_IN_RESET,
&instance->reset_flags);
instance->instancet->enable_intr(instance);
+
+ shost_for_each_device(sdev, shost) {
+ if ((instance->tgt_prop) &&
+ (instance->nvme_page_size))
+ ret_target_prop = megasas_get_target_prop(instance, sdev);
+
+ is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
+ megasas_set_dynamic_target_properties(sdev, is_target_prop);
+ }
+
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
dev_info(&instance->pdev->dev, "Interrupts are enabled and"
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 1753e42826dd..82e01dbe90af 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -594,9 +594,9 @@ static void mesh_done(struct mesh_state *ms, int start_next)
ms->current_req = NULL;
tp->current_req = NULL;
if (cmd) {
- cmd->result = (ms->stat << 16) + cmd->SCp.Status;
+ cmd->result = (ms->stat << 16) | cmd->SCp.Status;
if (ms->stat == DID_OK)
- cmd->result += (cmd->SCp.Message << 8);
+ cmd->result |= cmd->SCp.Message << 8;
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
cmd->result, ms->data_ptr, scsi_bufflen(cmd));
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e44c91edf92d..59d7844ee022 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -102,8 +102,39 @@ static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
/**
+ * mpt3sas_base_check_cmd_timeout - Function
+ * to check timeout and command termination due
+ * to Host reset.
+ *
+ * @ioc: per adapter object.
+ * @status: Status of issued command.
+ * @mpi_request:mf request pointer.
+ * @sz: size of buffer.
+ *
+ * @Returns - 1/0 Reset to be done or Not
+ */
+u8
+mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
+ u8 status, void *mpi_request, int sz)
+{
+ u8 issue_reset = 0;
+
+ if (!(status & MPT3_CMD_RESET))
+ issue_reset = 1;
+
+ pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
+ ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
+ _debug_dump_mf(mpi_request, sz);
+
+ return issue_reset;
+}
+
+/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
+ * @val: ?
+ * @kp: ?
*
+ * Return: ?
*/
static int
_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
@@ -132,8 +163,6 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
* @ioc: per adapter object
* @reply: reply message frame(lower 32bit addr)
* @index: System request message index.
- *
- * @Returns - Nothing
*/
static void
_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
@@ -156,7 +185,7 @@ _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
* _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
* to system/BAR0 region.
*
- * @dst_iomem: Pointer to the destinaltion location in BAR0 space.
+ * @dst_iomem: Pointer to the destination location in BAR0 space.
* @src: Pointer to the Source data.
* @size: Size of data to be copied.
*/
@@ -197,7 +226,7 @@ _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
* @smid: system request message index
* @sge_chain_count: Scatter gather chain count.
*
- * @Return: chain address.
+ * Return: the chain address.
*/
static inline void __iomem*
_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -223,7 +252,7 @@ _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @smid: system request message index
* @sge_chain_count: Scatter gather chain count.
*
- * @Return - Physical chain address.
+ * Return: Physical chain address.
*/
static inline phys_addr_t
_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -248,7 +277,7 @@ _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
*
- * @Returns - Pointer to buffer location in BAR0.
+ * Return: Pointer to buffer location in BAR0.
*/
static void __iomem *
@@ -270,7 +299,7 @@ _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * @Returns - Pointer to buffer location in BAR0.
+ * Return: Pointer to buffer location in BAR0.
*/
static phys_addr_t
_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -291,7 +320,7 @@ _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @chain_buffer_dma: Chain buffer dma address.
*
- * @Returns - Pointer to chain buffer. Or Null on Failure.
+ * Return: Pointer to chain buffer. Or Null on Failure.
*/
static void *
_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
@@ -322,8 +351,6 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object.
* @mpi_request: mf request pointer.
* @smid: system request message index.
- *
- * @Returns: Nothing.
*/
static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
void *mpi_request, u16 smid)
@@ -496,8 +523,9 @@ eob_clone_chain:
* mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
* @arg: input argument, used to derive ioc
*
- * Return 0 if controller is removed from pci subsystem.
- * Return -1 for other case.
+ * Return:
+ * 0 if controller is removed from pci subsystem.
+ * -1 for other case.
*/
static int mpt3sas_remove_dead_ioc_func(void *arg)
{
@@ -517,9 +545,8 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
/**
* _base_fault_reset_work - workq handling ioc fault conditions
* @work: input argument, used to derive ioc
- * Context: sleep.
*
- * Return nothing.
+ * Context: sleep.
*/
static void
_base_fault_reset_work(struct work_struct *work)
@@ -610,9 +637,8 @@ _base_fault_reset_work(struct work_struct *work)
/**
* mpt3sas_base_start_watchdog - start the fault_reset_work_q
* @ioc: per adapter object
- * Context: sleep.
*
- * Return nothing.
+ * Context: sleep.
*/
void
mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
@@ -633,7 +659,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->fault_reset_work_q) {
pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
ioc->name, __func__, __LINE__);
- return;
+ return;
}
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->fault_reset_work_q)
@@ -646,9 +672,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
* @ioc: per adapter object
- * Context: sleep.
*
- * Return nothing.
+ * Context: sleep.
*/
void
mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
@@ -671,8 +696,6 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_fault_info - verbose translation of firmware FAULT code
* @ioc: per adapter object
* @fault_code: fault code
- *
- * Return nothing.
*/
void
mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
@@ -721,8 +744,6 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @request_hdr: request mf
- *
- * Return nothing.
*/
static void
_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
@@ -945,8 +966,6 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
* _base_display_event_data - verbose translation of firmware asyn events
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
- *
- * Return nothing.
*/
static void
_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
@@ -1065,8 +1084,6 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
* _base_sas_log_info - verbose translation of firmware log info
* @ioc: per adapter object
* @log_info: log info
- *
- * Return nothing.
*/
static void
_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
@@ -1124,8 +1141,6 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
* @smid: system request message index
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
- *
- * Return nothing.
*/
static void
_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1167,8 +1182,9 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return:
+ * 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1200,8 +1216,9 @@ mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return:
+ * 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
@@ -1279,7 +1296,7 @@ _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Return callback index.
+ * Return: callback index.
*/
static u8
_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -1312,8 +1329,6 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
*
* Disabling ResetIRQ, Reply and Doorbell Interrupts
- *
- * Return nothing.
*/
static void
_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
@@ -1332,8 +1347,6 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Enabling only Reply Interrupts
- *
- * Return nothing.
*/
static void
_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
@@ -1358,9 +1371,8 @@ union reply_descriptor {
* _base_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
- * @r: pt_regs pointer (not used)
*
- * Return IRQ_HANDLE if processed, else IRQ_NONE.
+ * Return: IRQ_HANDLED if processed, else IRQ_NONE.
*/
static irqreturn_t
_base_interrupt(int irq, void *bus_id)
@@ -1535,6 +1547,7 @@ _base_interrupt(int irq, void *bus_id)
* _base_is_controller_msix_enabled - is controller support muli-reply queues
* @ioc: per adapter object
*
+ * Return: Whether or not MSI/X is enabled.
*/
static inline int
_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
@@ -1549,8 +1562,6 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
* Context: non ISR conext
*
* Called when a Task Management request has completed.
- *
- * Return nothing.
*/
void
mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
@@ -1577,8 +1588,6 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_release_callback_handler - clear interrupt callback handler
* @cb_idx: callback index
- *
- * Return nothing.
*/
void
mpt3sas_base_release_callback_handler(u8 cb_idx)
@@ -1590,7 +1599,7 @@ mpt3sas_base_release_callback_handler(u8 cb_idx)
* mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
* @cb_func: callback function
*
- * Returns cb_func.
+ * Return: Index of @cb_func.
*/
u8
mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
@@ -1607,8 +1616,6 @@ mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
/**
* mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
- *
- * Return nothing.
*/
void
mpt3sas_base_initialize_callback_handler(void)
@@ -1628,8 +1635,6 @@ mpt3sas_base_initialize_callback_handler(void)
* Create a zero length scatter gather entry to insure the IOCs hardware has
* something to use if the target device goes brain dead and tries
* to send data even when none is asked for.
- *
- * Return nothing.
*/
static void
_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
@@ -1646,8 +1651,6 @@ _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
* @paddr: virtual address for SGE
* @flags_length: SGE flags and data transfer length
* @dma_addr: Physical address
- *
- * Return nothing.
*/
static void
_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
@@ -1666,8 +1669,6 @@ _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
* @paddr: virtual address for SGE
* @flags_length: SGE flags and data transfer length
* @dma_addr: Physical address
- *
- * Return nothing.
*/
static void
_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
@@ -1685,7 +1686,7 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
* @ioc: per adapter object
* @scmd: SCSI commands of the IO request
*
- * Returns chain tracker from chain_lookup table using key as
+ * Return: chain tracker from chain_lookup table using key as
* smid and smid's chain_offset.
*/
static struct chain_tracker *
@@ -1715,8 +1716,6 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
* @data_out_sz: data xfer size for WRITES
* @data_in_dma: physical address for READS
* @data_in_sz: data xfer size for READS
- *
- * Return nothing.
*/
static void
_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
@@ -1777,7 +1776,7 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* describes the first data memory segment, and PRP2 contains a pointer to a PRP
* list located elsewhere in memory to describe the remaining data memory
* segments. The PRP list will be contiguous.
-
+ *
* The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
* consists of a list of PRP entries to describe a number of noncontigous
* physical memory segments as a single memory buffer, just as a SGL does. Note
@@ -1820,8 +1819,6 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* @data_out_sz: data xfer size for WRITES
* @data_in_dma: physical address for READS
* @data_in_sz: data xfer size for READS
- *
- * Returns nothing.
*/
static void
_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -1836,6 +1833,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u32 offset, entry_len;
u32 page_mask_result, page_mask;
size_t length;
+ struct mpt3sas_nvme_cmd *nvme_cmd =
+ (void *)nvme_encap_request->NVMe_Command;
/*
* Not all commands require a data transfer. If no data, just return
@@ -1843,15 +1842,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*/
if (!data_in_sz && !data_out_sz)
return;
- /*
- * Set pointers to PRP1 and PRP2, which are in the NVMe command.
- * PRP1 is located at a 24 byte offset from the start of the NVMe
- * command. Then set the current PRP entry pointer to PRP1.
- */
- prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
- NVME_CMD_PRP1_OFFSET);
- prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
- NVME_CMD_PRP2_OFFSET);
+ prp1_entry = &nvme_cmd->prp1;
+ prp2_entry = &nvme_cmd->prp2;
prp_entry = prp1_entry;
/*
* For the PRP entries, use the specially allocated buffer of
@@ -1992,7 +1984,7 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @smid: msg Index
* @sge_count: scatter gather element count.
*
- * Returns: true: PRPs are built
+ * Return: true: PRPs are built
* false: IEEE SGLs needs to be built
*/
static void
@@ -2127,11 +2119,9 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
{
u32 data_length = 0;
- struct scatterlist *sg_scmd;
bool build_prp = true;
data_length = scsi_bufflen(scmd);
- sg_scmd = scsi_sglist(scmd);
/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
* we built IEEE SGL
@@ -2155,18 +2145,16 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
* @scmd: scsi command
* @pcie_device: points to the PCIe device's info
*
- * Returns 0 if native SGL was built, 1 if no SGL was built
+ * Return: 0 if native SGL was built, 1 if no SGL was built
*/
static int
_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
struct _pcie_device *pcie_device)
{
- struct scatterlist *sg_scmd;
int sges_left;
/* Get the SG list pointer and info. */
- sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
@@ -2201,8 +2189,6 @@ out:
* @chain_offset: number of 128 byte elements from start of segment
* @length: data transfer length
* @dma_addr: Physical address
- *
- * Return nothing.
*/
static void
_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
@@ -2224,8 +2210,6 @@ _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
* Create a zero length scatter gather entry to insure the IOCs hardware has
* something to use if the target device goes brain dead and tries
* to send data even when none is asked for.
- *
- * Return nothing.
*/
static void
_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
@@ -2249,7 +2233,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
* The main routine that builds scatter gather table from a given
* scsi request sent via the .queuecommand main handler.
*
- * Returns 0 success, anything else error
+ * Return: 0 success, anything else error
*/
static int
_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
@@ -2394,7 +2378,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
* The main routine that builds scatter gather table from a given
* scsi request sent via the .queuecommand main handler.
*
- * Returns 0 success, anything else error
+ * Return: 0 success, anything else error
*/
static int
_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
@@ -2525,8 +2509,6 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
* @data_out_sz: data xfer size for WRITES
* @data_in_dma: physical address for READS
* @data_in_sz: data xfer size for READS
- *
- * Return nothing.
*/
static void
_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
@@ -2576,7 +2558,7 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
* @ioc: per adapter object
* @pdev: PCI device struct
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
@@ -2924,10 +2906,9 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->combined_reply_queue) {
- kfree(ioc->replyPostRegisterIndex);
- ioc->replyPostRegisterIndex = NULL;
- }
+ kfree(ioc->replyPostRegisterIndex);
+ ioc->replyPostRegisterIndex = NULL;
+
if (ioc->chip_phys) {
iounmap(ioc->chip);
@@ -2945,7 +2926,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
@@ -3034,7 +3015,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
/* Use the Combined reply queue feature only for SAS3 C0 & higher
* revision HBAs and also only when reply queue count is greater than 8
*/
- if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
+ if (ioc->combined_reply_queue) {
/* Determine the Supplemental Reply Post Host Index Registers
* Addresse. Supplemental Reply Post Host Index Registers
* starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
@@ -3058,8 +3039,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
(i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
- } else
- ioc->combined_reply_queue = 0;
+ }
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
@@ -3097,7 +3077,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @smid: system request message index(smid zero is invalid)
*
- * Returns virt pointer to message frame.
+ * Return: virt pointer to message frame.
*/
void *
mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3110,7 +3090,7 @@ mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns virt pointer to sense buffer.
+ * Return: virt pointer to sense buffer.
*/
void *
mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3123,7 +3103,7 @@ mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns phys pointer to the low 32bit address of the sense buffer.
+ * Return: phys pointer to the low 32bit address of the sense buffer.
*/
__le32
mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3137,7 +3117,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns virt pointer to a PCIe SGL.
+ * Return: virt pointer to a PCIe SGL.
*/
void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3150,7 +3130,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns phys pointer to the address of the PCIe buffer.
+ * Return: phys pointer to the address of the PCIe buffer.
*/
dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3184,7 +3164,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @cb_idx: callback index
*
- * Returns smid (zero is invalid)
+ * Return: smid (zero is invalid)
*/
u16
mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
@@ -3216,7 +3196,7 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
* @cb_idx: callback index
* @scmd: pointer to scsi command object
*
- * Returns smid (zero is invalid)
+ * Return: smid (zero is invalid)
*/
u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
@@ -3239,7 +3219,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
* @ioc: per adapter object
* @cb_idx: callback index
*
- * Returns smid (zero is invalid)
+ * Return: smid (zero is invalid)
*/
u16
mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
@@ -3270,7 +3250,7 @@ _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
* See _wait_for_commands_to_complete() call with regards to this code.
*/
if (ioc->shost_recovery && ioc->pending_io_count) {
- ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ ioc->pending_io_count = scsi_host_busy(ioc->shost);
if (ioc->pending_io_count == 0)
wake_up(&ioc->reset_wq);
}
@@ -3284,14 +3264,13 @@ void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
st->cb_idx = 0xFF;
st->direct_io = 0;
atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
+ st->smid = 0;
}
/**
* mpt3sas_base_free_smid - put smid back on free_list
* @ioc: per adapter object
* @smid: system request message index
- *
- * Return nothing.
*/
void
mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3353,7 +3332,6 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
/**
* _base_writeq - 64 bit write to MMIO
- * @ioc: per adapter object
* @b: data payload
* @addr: address in MMIO space
* @writeq_lock: spin lock
@@ -3382,8 +3360,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
- *
- * Return nothing.
*/
static void
_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
@@ -3412,8 +3388,6 @@ _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
- *
- * Return nothing.
*/
static void
_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
@@ -3436,8 +3410,6 @@ _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
- *
- * Return nothing.
*/
void
mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3461,7 +3433,6 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
- * Return nothing.
*/
void
mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3472,11 +3443,8 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u64 *request;
if (ioc->is_mcpu_endpoint) {
- MPI2RequestHeader_t *request_hdr;
-
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
- request_hdr = (MPI2RequestHeader_t *)mfp;
/* TBD 256 is offset within sys register. */
mpi_req_iomem = (void __force *)ioc->chip
+ MPI_FRAME_START_OFFSET
@@ -3507,8 +3475,6 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* firmware
* @ioc: per adapter object
* @smid: system request message index
- *
- * Return nothing.
*/
void
mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3530,8 +3496,6 @@ mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* mpt3sas_base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
- *
- * Return nothing.
*/
void
mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3539,13 +3503,10 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
Mpi2RequestDescriptorUnion_t descriptor;
void *mpi_req_iomem;
u64 *request;
- MPI2RequestHeader_t *request_hdr;
if (ioc->is_mcpu_endpoint) {
__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
- request_hdr = (MPI2RequestHeader_t *)mfp;
-
_clone_sg_entries(ioc, (void *) mfp, smid);
/* TBD 256 is offset within sys register */
mpi_req_iomem = (void __force *)ioc->chip +
@@ -3571,8 +3532,6 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
/**
* _base_display_OEMs_branding - Display branding string
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
@@ -3833,7 +3792,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
* version from FW Image Header.
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
@@ -3930,8 +3889,6 @@ out:
/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
@@ -4047,8 +4004,6 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
* @device_missing_delay: amount of time till device is reported missing
* @io_missing_delay: interval IO is returned when there is a missing device
*
- * Return nothing.
- *
* Passed on the command line, this function will modify the device missing
* delay, as well as the io missing delay. This should be called at driver
* load time.
@@ -4131,11 +4086,10 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
out:
kfree(sas_iounit_pg1);
}
+
/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
@@ -4207,8 +4161,6 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Free memory allocated during encloure add.
- *
- * Return nothing.
*/
void
mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
@@ -4228,8 +4180,6 @@ mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Free memory allocated from _base_allocate_memory_pools.
- *
- * Return nothing.
*/
static void
_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
@@ -4350,9 +4300,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* @reply_pool_start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
*
- * Returns 1 if reply queues in a set have a same upper 32bits
- * in their base memory address,
- * else 0
+ * Return: 1 if reply queues in a set have a same upper 32bits in their base
+ * memory address, else 0.
*/
static int
@@ -4373,7 +4322,7 @@ is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
*
- * Returns 0 success, anything else error
+ * Return: 0 success, anything else error.
*/
static int
_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
@@ -4975,7 +4924,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure
* @cooked: Request raw or cooked IOC state
*
- * Returns all IOC Doorbell register bits if cooked==0, else just the
+ * Return: all IOC Doorbell register bits if cooked==0, else just the
* Doorbell bits in MPI_IOC_STATE_MASK.
*/
u32
@@ -4990,10 +4939,11 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
/**
* _base_wait_on_iocstate - waiting on a particular ioc state
+ * @ioc: ?
* @ioc_state: controller state { READY, OPERATIONAL, or RESET }
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
@@ -5021,9 +4971,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
* _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
* a write to the doorbell)
* @ioc: per adapter object
- * @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
* Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
*/
@@ -5090,7 +5039,7 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
* @ioc: per adapter object
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
* Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
* doorbell.
@@ -5137,8 +5086,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
* @ioc: per adapter object
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
- *
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -5173,7 +5121,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
* @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
@@ -5222,7 +5170,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
* @reply: pointer to reply payload
* @timeout: timeout in second
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
@@ -5346,7 +5294,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
* identifying information about the device, in addition allows the host to
* remove IOC resources associated with the device.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
@@ -5355,7 +5303,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
{
u16 smid;
u32 ioc_state;
- bool issue_reset = false;
+ u8 issue_reset = 0;
int rc;
void *request;
u16 wait_state_count;
@@ -5414,12 +5362,10 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
ioc->ioc_link_reset_in_progress)
ioc->ioc_link_reset_in_progress = 0;
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SasIoUnitControlRequest_t)/4);
- if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
- issue_reset = true;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->base_cmds.status, mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5449,7 +5395,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
* The SCSI Enclosure Processor request message causes the IOC to
* communicate with SES devices to control LED status signals.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
@@ -5457,7 +5403,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
{
u16 smid;
u32 ioc_state;
- bool issue_reset = false;
+ u8 issue_reset = 0;
int rc;
void *request;
u16 wait_state_count;
@@ -5510,12 +5456,10 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SepRequest_t)/4);
- if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
- issue_reset = false;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->base_cmds.status, mpi_request,
+ sizeof(Mpi2SepRequest_t)/4);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5539,8 +5483,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
/**
* _base_get_port_facts - obtain port facts reply and save in ioc
* @ioc: per adapter object
+ * @port: ?
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
@@ -5583,7 +5528,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
* @ioc: per adapter object
* @timeout:
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -5637,7 +5582,7 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
* _base_get_ioc_facts - obtain ioc facts reply and save in ioc
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
@@ -5681,6 +5626,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
facts->WhoInit = mpi_reply.WhoInit;
facts->NumberOfPorts = mpi_reply.NumberOfPorts;
facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
+ if (ioc->msix_enable && (facts->MaxMSIxVectors <=
+ MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
+ ioc->combined_reply_queue = 0;
facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
facts->MaxReplyDescriptorPostQueueDepth =
le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
@@ -5736,7 +5684,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
* _base_send_ioc_init - send ioc_init to firmware
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
@@ -5837,8 +5785,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -5883,7 +5831,7 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* _base_send_port_enable - send port_enable(discovery stuff) to firmware
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
@@ -5950,7 +5898,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
@@ -5990,7 +5938,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
* Decide whether to wait on discovery to complete. Used to either
* locate boot device, or report volumes ahead of physical devices.
*
- * Returns 1 for wait, 0 for don't wait
+ * Return: 1 for wait, 0 for don't wait.
*/
static int
_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
@@ -6062,7 +6010,7 @@ _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
* _base_event_notification - send event notification
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
@@ -6119,7 +6067,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_validate_event_type - validating event types
* @ioc: per adapter object
- * @event: firmware event
+ * @event_type: firmware event
*
* This will turn on firmware event notification when application
* ask for that event. We don't mask events that are already enabled.
@@ -6157,7 +6105,7 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
* _base_diag_reset - the "big hammer" start of day reset
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
@@ -6271,7 +6219,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
@@ -6340,7 +6288,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
* _base_make_ioc_operational - put controller in OPERATIONAL state
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
@@ -6513,8 +6461,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_free_resources - free resources controller resources
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
@@ -6540,7 +6486,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_base_attach - attach controller instance
* @ioc: per adapter object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
@@ -6797,8 +6743,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_detach - remove controller instance
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
@@ -6830,65 +6774,69 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_reset_handler - reset callback handler (for base)
+ * _base_pre_reset_handler - pre reset handler
* @ioc: per adapter object
- * @reset_phase: phase
- *
- * The handler for doing any required cleanup or initialization.
- *
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
- *
- * Return nothing.
*/
-static void
-_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- mpt3sas_scsih_reset_handler(ioc, reset_phase);
- mpt3sas_ctl_reset_handler(ioc, reset_phase);
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
- ioc->transport_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
- complete(&ioc->transport_cmds.done);
- }
- if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
- ioc->base_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
- complete(&ioc->base_cmds.done);
- }
- if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
- ioc->port_enable_failed = 1;
- ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
- if (ioc->is_driver_loading) {
- ioc->start_scan_failed =
- MPI2_IOCSTATUS_INTERNAL_ERROR;
- ioc->start_scan = 0;
- ioc->port_enable_cmds.status =
- MPT3_CMD_NOT_USED;
- } else
- complete(&ioc->port_enable_cmds.done);
- }
- if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
- ioc->config_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
- ioc->config_cmds.smid = USHRT_MAX;
- complete(&ioc->config_cmds.done);
+ mpt3sas_scsih_pre_reset_handler(ioc);
+ mpt3sas_ctl_pre_reset_handler(ioc);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+}
+
+/**
+ * _base_after_reset_handler - after reset handler
+ * @ioc: per adapter object
+ */
+static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ mpt3sas_scsih_after_reset_handler(ioc);
+ mpt3sas_ctl_after_reset_handler(ioc);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
+ ioc->transport_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
+ complete(&ioc->transport_cmds.done);
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ ioc->base_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
+ complete(&ioc->base_cmds.done);
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ ioc->port_enable_failed = 1;
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+ if (ioc->is_driver_loading) {
+ ioc->start_scan_failed =
+ MPI2_IOCSTATUS_INTERNAL_ERROR;
+ ioc->start_scan = 0;
+ ioc->port_enable_cmds.status =
+ MPT3_CMD_NOT_USED;
+ } else {
+ complete(&ioc->port_enable_cmds.done);
}
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- break;
}
+ if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
+ ioc->config_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ }
+}
+
+/**
+ * _base_reset_done_handler - reset done handler
+ * @ioc: per adapter object
+ */
+static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ mpt3sas_scsih_reset_done_handler(ioc);
+ mpt3sas_ctl_reset_done_handler(ioc);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
}
/**
@@ -6910,7 +6858,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
return;
/* pending command count */
- ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ ioc->pending_io_count = scsi_host_busy(ioc->shost);
if (!ioc->pending_io_count)
return;
@@ -6924,7 +6872,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure
* @type: FORCE_BIG_HAMMER or SOFT_RESET
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
@@ -6949,14 +6897,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_halt_firmware(ioc);
/* wait for an active reset in progress to complete */
- if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
- do {
- ssleep(1);
- } while (ioc->shost_recovery == 1);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
- return ioc->ioc_reset_in_progress_status;
- }
+ mutex_lock(&ioc->reset_in_progress_mutex);
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 1;
@@ -6971,13 +6912,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
is_fault = 1;
}
- _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
+ _base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
_base_mask_interrupts(ioc);
r = _base_make_ioc_ready(ioc, type);
if (r)
goto out;
- _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
+ _base_after_reset_handler(ioc);
/* If this hard reset is called while port enable is active, then
* there is no reason to call make_ioc_operational
@@ -6998,14 +6939,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
r = _base_make_ioc_operational(ioc);
if (!r)
- _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
+ _base_reset_done_handler(ioc);
out:
dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- ioc->ioc_reset_in_progress_status = r;
ioc->shost_recovery = 0;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
ioc->ioc_reset_count++;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index f02974c0be4a..96dc15e90bd8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -74,8 +74,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "25.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 25
+#define MPT3SAS_DRIVER_VERSION "26.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 26
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -143,21 +143,17 @@
* NVMe defines
*/
#define NVME_PRP_SIZE 8 /* PRP size */
-#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
-#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
#define NVME_TASK_ABORT_MIN_TIMEOUT 6
#define NVME_TASK_ABORT_MAX_TIMEOUT 60
#define NVME_TASK_MNGT_CUSTOM_MASK (0x0010)
#define NVME_PRP_PAGE_SIZE 4096 /* Page size */
-
-/*
- * reset phases
- */
-#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
-#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */
-#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */
+struct mpt3sas_nvme_cmd {
+ u8 rsvd[24];
+ __le64 prp1;
+ __le64 prp2;
+};
/*
* logging format
@@ -323,6 +319,7 @@
* There are twelve Supplemental Reply Post Host Index Registers
* and each register is at offset 0x10 bytes from the previous one.
*/
+#define MAX_COMBINED_MSIX_VECTORS(gen35) ((gen35 == 1) ? 16 : 8)
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
@@ -1162,7 +1159,6 @@ struct MPT3SAS_ADAPTER {
struct mutex reset_in_progress_mutex;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
- u8 ioc_reset_in_progress_status;
u8 ignore_loginfos;
u8 remove_host;
@@ -1482,13 +1478,17 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
void
mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
+u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
+ u8 status, void *mpi_request, int sz);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
-void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
@@ -1615,7 +1615,9 @@ void mpt3sas_ctl_init(ushort hbas_to_enumerate);
void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
-void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
u8 msix_index, u32 reply);
void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index e87c76a832f6..d29a2dcc7d0e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -198,7 +198,7 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*
* A wrapper for obtaining dma-able memory for config page request.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
@@ -230,7 +230,7 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
*
* A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static void
_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
@@ -251,8 +251,8 @@ _config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
*
* The callback handler when using _config_request.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -295,7 +295,7 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
*
* The callback index is set inside `ioc->config_cb_idx.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
@@ -406,10 +406,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2ConfigRequest_t)/4);
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->config_cmds.status, mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
@@ -519,7 +518,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -556,7 +555,7 @@ mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
* @sz: size of buffer passed in config_page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
@@ -593,7 +592,7 @@ mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
@@ -630,7 +629,7 @@ mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
@@ -667,7 +666,7 @@ mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
@@ -708,7 +707,7 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
@@ -744,7 +743,7 @@ mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -780,7 +779,7 @@ mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -816,7 +815,7 @@ mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -852,7 +851,7 @@ mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -889,7 +888,7 @@ mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* @sz: size of buffer passed in config_page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc,
@@ -924,7 +923,7 @@ mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc,
@@ -960,7 +959,7 @@ mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc,
* @config_page: contents of the config page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
@@ -998,7 +997,7 @@ mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1039,7 +1038,7 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1080,7 +1079,7 @@ mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1121,7 +1120,7 @@ out:
* @handle: device handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
@@ -1159,7 +1158,7 @@ out:
* @num_phys: pointer returned with the number of phys
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
@@ -1209,7 +1208,7 @@ mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1250,7 +1249,7 @@ mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1291,7 +1290,7 @@ mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1333,7 +1332,7 @@ mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
* @handle: expander handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1373,7 +1372,7 @@ mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @handle: expander handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1416,7 +1415,7 @@ mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @handle: expander handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1455,7 +1454,7 @@ mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @phy_number: phy number
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1495,7 +1494,7 @@ mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @phy_number: phy number
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1536,7 +1535,7 @@ mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @handle: volume handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
@@ -1574,7 +1573,7 @@ mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
* @num_pds: returns pds count
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -1626,7 +1625,7 @@ mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
* @sz: size of buffer passed in config_page
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1665,7 +1664,7 @@ mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
* @form_specific: specific to the form
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -1704,7 +1703,7 @@ mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
* @volume_handle: volume handle
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
@@ -1794,7 +1793,7 @@ mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
* @wwid: volume wwid
* Context: sleep.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 3269ef43f07e..5e8c059ce2c9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -253,8 +253,8 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
*
* The callback handler when using ioc->ctl_cb_idx.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -317,7 +317,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* The bitmask in ioc->event_type[] indicates which events should be
* be saved in the driver event_log. This bitmask is set by application.
*
- * Returns 1 when event should be captured, or zero means no match.
+ * Return: 1 when event should be captured, or zero means no match.
*/
static int
_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
@@ -339,8 +339,6 @@ _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
* mpt3sas_ctl_add_to_event_log - add event
* @ioc: per adapter object
* @mpi_reply: reply message frame
- *
- * Return nothing.
*/
void
mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
@@ -395,8 +393,8 @@ mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
@@ -412,12 +410,12 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
/**
* _ctl_verify_adapter - validates ioc_number passed from application
- * @ioc: per adapter object
+ * @ioc_number: ?
* @iocpp: The ioc pointer is returned in this.
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
* MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*
- * Return (-1) means error, else ioc_number.
+ * Return: (-1) means error, else ioc_number.
*/
static int
_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
@@ -460,65 +458,74 @@ out:
/**
* mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
* @ioc: per adapter object
- * @reset_phase: phase
*
* The handler for doing any required cleanup or initialization.
- *
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
*/
-void
-mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
int i;
u8 issue_reset;
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
- mpt3sas_send_diag_release(ioc, i, &issue_reset);
- }
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ mpt3sas_send_diag_release(ioc, i, &issue_reset);
+ }
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
- ioc->ctl_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
- complete(&ioc->ctl_cmds.done);
- }
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
+ ioc->ctl_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
+ complete(&ioc->ctl_cmds.done);
+ }
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
- ioc->diag_buffer_status[i] |=
- MPT3_DIAG_BUFFER_IS_DIAG_RESET;
- }
- break;
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ ioc->diag_buffer_status[i] |=
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET;
}
}
/**
* _ctl_fasync -
- * @fd -
- * @filep -
- * @mode -
+ * @fd: ?
+ * @filep: ?
+ * @mode: ?
*
* Called when application request fasyn callback handler.
*/
@@ -530,8 +537,8 @@ _ctl_fasync(int fd, struct file *filep, int mode)
/**
* _ctl_poll -
- * @file -
- * @wait -
+ * @filep: ?
+ * @wait: ?
*
*/
static __poll_t
@@ -556,10 +563,10 @@ _ctl_poll(struct file *filep, poll_table *wait)
/**
* _ctl_set_task_mid - assign an active smid to tm request
* @ioc: per adapter object
- * @karg - (struct mpt3_ioctl_command)
- * @tm_request - pointer to mf from user space
+ * @karg: (struct mpt3_ioctl_command)
+ * @tm_request: pointer to mf from user space
*
- * Returns 0 when an smid if found, else fail.
+ * Return: 0 when an smid if found, else fail.
* during failure, the reply frame is filled.
*/
static int
@@ -634,8 +641,8 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
/**
* _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
* @ioc: per adapter object
- * @karg - (struct mpt3_ioctl_command)
- * @mf - pointer to mf in user space
+ * @karg: (struct mpt3_ioctl_command)
+ * @mf: pointer to mf in user space
*/
static long
_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
@@ -970,6 +977,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
/* drop to default case for posting the request */
}
+ /* fall through */
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
@@ -995,11 +1003,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->ignore_loginfos = 0;
}
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request, karg.data_sge_offset);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ karg.data_sge_offset);
goto issue_host_reset;
}
@@ -1114,7 +1121,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
/**
* _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1168,7 +1175,7 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1199,7 +1206,7 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1237,7 +1244,7 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1281,7 +1288,7 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_do_reset - main handler for MPT3HARDRESET opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1419,7 +1426,7 @@ _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
/**
* _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -1621,12 +1628,10 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
goto issue_host_reset;
}
@@ -1719,7 +1724,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
/**
* _ctl_diag_register - application register with driver
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*
* This will allow the driver to setup any required buffers that will be
* needed by firmware to communicate with the driver.
@@ -1743,7 +1748,7 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_diag_unregister - application unregister with driver
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*
* This will allow the driver to cleanup any memory allocated for diag
* messages and to free up any resources.
@@ -1816,7 +1821,7 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_diag_query - query relevant info associated with diag buffers
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*
* The application will send only buffer_type and unique_id. Driver will
* inspect unique_id first, if valid, fill in all the info. If unique_id is
@@ -1903,8 +1908,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* mpt3sas_send_diag_release - Diag Release Message
* @ioc: per adapter object
- * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
- * @issue_reset - specifies whether host reset is required.
+ * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @issue_reset: specifies whether host reset is required.
*
*/
int
@@ -1968,12 +1973,9 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2DiagReleaseRequest_t)/4);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- *issue_reset = 1;
+ *issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4);
rc = -EFAULT;
goto out;
}
@@ -2009,7 +2011,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
/**
* _ctl_diag_release - request to send Diag Release Message to firmware
- * @arg - user space buffer containing ioctl content
+ * @ioc: ?
+ * @arg: user space buffer containing ioctl content
*
* This allows ownership of the specified buffer to returned to the driver,
* allowing an application to read the buffer without fear that firmware is
@@ -2098,7 +2101,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_diag_read_buffer - request for copy of the diag buffer
* @ioc: per adapter object
- * @arg - user space buffer containing ioctl content
+ * @arg: user space buffer containing ioctl content
*/
static long
_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
@@ -2235,12 +2238,10 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
- __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
- if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
goto issue_host_reset;
}
@@ -2284,8 +2285,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/**
* _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
* @ioc: per adapter object
- * @cmd - ioctl opcode
- * @arg - (struct mpt3_ioctl_command32)
+ * @cmd: ioctl opcode
+ * @arg: (struct mpt3_ioctl_command32)
*
* MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
*/
@@ -2328,10 +2329,10 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
/**
* _ctl_ioctl_main - main ioctl entry point
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg - user space data buffer
- * @compat - handles 32 bit applications in 64bit os
+ * @file: (struct file)
+ * @cmd: ioctl opcode
+ * @arg: user space data buffer
+ * @compat: handles 32 bit applications in 64bit os
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
* MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*/
@@ -2462,9 +2463,9 @@ out_unlock_pciaccess:
/**
* _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg -
+ * @file: (struct file)
+ * @cmd: ioctl opcode
+ * @arg: ?
*/
static long
_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -2482,9 +2483,9 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/**
* _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg -
+ * @file: (struct file)
+ * @cmd: ioctl opcode
+ * @arg: ?
*/
static long
_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -2500,9 +2501,9 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_COMPAT
/**
*_ ctl_ioctl_compat - main ioctl entry point (compat)
- * @file -
- * @cmd -
- * @arg -
+ * @file: ?
+ * @cmd: ?
+ * @arg: ?
*
* This routine handles 32 bit applications in 64bit os.
*/
@@ -2518,9 +2519,9 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
/**
*_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
- * @file -
- * @cmd -
- * @arg -
+ * @file: ?
+ * @cmd: ?
+ * @arg: ?
*
* This routine handles 32 bit applications in 64bit os.
*/
@@ -2537,8 +2538,9 @@ _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
/* scsi host attributes */
/**
* _ctl_version_fw_show - firmware version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2559,8 +2561,9 @@ static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
/**
* _ctl_version_bios_show - bios version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2583,8 +2586,9 @@ static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
/**
* _ctl_version_mpi_show - MPI (message passing interface) version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2602,8 +2606,9 @@ static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
/**
* _ctl_version_product_show - product name
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2620,8 +2625,9 @@ static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
/**
* _ctl_version_nvdata_persistent_show - ndvata persistent version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2640,8 +2646,9 @@ static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
/**
* _ctl_version_nvdata_default_show - nvdata default version
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2660,8 +2667,9 @@ static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
/**
* _ctl_board_name_show - board name
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2678,8 +2686,9 @@ static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
/**
* _ctl_board_assembly_show - board assembly name
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2696,8 +2705,9 @@ static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
/**
* _ctl_board_tracer_show - board tracer number
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -2714,8 +2724,9 @@ static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
/**
* _ctl_io_delay_show - io missing delay
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is for firmware implemention for deboucing device
* removal events.
@@ -2735,8 +2746,9 @@ static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
/**
* _ctl_device_delay_show - device missing delay
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is for firmware implemention for deboucing device
* removal events.
@@ -2756,8 +2768,9 @@ static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
/**
* _ctl_fw_queue_depth_show - global credits
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is firmware queue depth limit
*
@@ -2776,8 +2789,9 @@ static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
/**
* _ctl_sas_address_show - sas address
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is the controller sas address
*
@@ -2799,8 +2813,9 @@ static DEVICE_ATTR(host_sas_address, S_IRUGO,
/**
* _ctl_logging_level_show - logging level
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -2834,8 +2849,9 @@ static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
/**
* _ctl_fwfault_debug_show - show/store fwfault_debug
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* mpt3sas_fwfault_debug is command line option
* A sysfs 'read/write' shost attribute.
@@ -2870,8 +2886,9 @@ static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
/**
* _ctl_ioc_reset_count_show - ioc reset count
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is firmware queue depth limit
*
@@ -2890,8 +2907,9 @@ static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
/**
* _ctl_ioc_reply_queue_count_show - number of reply queues
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is number of reply queues
*
@@ -2918,8 +2936,9 @@ static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
/**
* _ctl_BRM_status_show - Backup Rail Monitor Status
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is number of reply queues
*
@@ -3004,8 +3023,9 @@ struct DIAG_BUFFER_START {
/**
* _ctl_host_trace_buffer_size_show - host buffer size (trace only)
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
@@ -3049,8 +3069,9 @@ static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
/**
* _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*
@@ -3114,8 +3135,9 @@ static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
/**
* _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*
@@ -3200,8 +3222,9 @@ static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3224,8 +3247,10 @@ _ctl_diag_trigger_master_show(struct device *cdev,
/**
* _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3255,8 +3280,9 @@ static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3278,8 +3304,10 @@ _ctl_diag_trigger_event_show(struct device *cdev,
/**
* _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3309,8 +3337,9 @@ static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3332,8 +3361,10 @@ _ctl_diag_trigger_scsi_show(struct device *cdev,
/**
* _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3362,8 +3393,9 @@ static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
/**
* _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3385,8 +3417,10 @@ _ctl_diag_trigger_mpi_show(struct device *cdev,
/**
* _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @cdev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
+ * @count: ?
*
* A sysfs 'read/write' shost attribute.
*/
@@ -3450,8 +3484,9 @@ struct device_attribute *mpt3sas_host_attrs[] = {
/**
* _ctl_device_sas_address_show - sas address
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @dev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is the sas address for the target
*
@@ -3471,8 +3506,9 @@ static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
/**
* _ctl_device_handle_show - device handle
- * @cdev - pointer to embedded class device
- * @buf - the buffer returned
+ * @dev: pointer to embedded class device
+ * @attr: ?
+ * @buf: the buffer returned
*
* This is the firmware assigned device handle
*
@@ -3492,8 +3528,9 @@ static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
/**
* _ctl_device_ncq_io_prio_show - send prioritized io commands to device
- * @dev - pointer to embedded device
- * @buf - the buffer returned
+ * @dev: pointer to embedded device
+ * @attr: ?
+ * @buf: the buffer returned
*
* A sysfs 'read/write' sdev attribute, only works with SATA
*/
@@ -3573,7 +3610,7 @@ static struct miscdevice gen2_ctl_dev = {
/**
* mpt3sas_ctl_init - main entry point for ctl.
- *
+ * @hbas_to_enumerate: ?
*/
void
mpt3sas_ctl_init(ushort hbas_to_enumerate)
@@ -3601,7 +3638,7 @@ mpt3sas_ctl_init(ushort hbas_to_enumerate)
/**
* mpt3sas_ctl_exit - exit point for ctl
- *
+ * @hbas_to_enumerate: ?
*/
void
mpt3sas_ctl_exit(ushort hbas_to_enumerate)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b8d131a455d0..53133cfd420f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -284,6 +284,8 @@ struct _scsi_io_transfer {
/**
* _scsih_set_debug_level - global setting of ioc->logging_level.
+ * @val: ?
+ * @kp: ?
*
* Note: The logging levels are defined in mpt3sas_debug.h.
*/
@@ -311,7 +313,7 @@ module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
* @sas_address: sas address
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_sas_address(u64 sas_address,
@@ -325,7 +327,7 @@ _scsih_srch_boot_sas_address(u64 sas_address,
* @device_name: device name specified in INDENTIFY fram
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_device_name(u64 device_name,
@@ -340,7 +342,7 @@ _scsih_srch_boot_device_name(u64 device_name,
* @slot_number: slot number
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
@@ -356,11 +358,11 @@ _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
* @sas_address: sas address
* @device_name: device name specified in INDENTIFY fram
* @enclosure_logical_id: enclosure logical id
- * @slot_number: slot number
+ * @slot: slot number
* @form: specifies boot device form
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static int
_scsih_is_boot_device(u64 sas_address, u64 device_name,
@@ -398,10 +400,11 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name,
/**
* _scsih_get_sas_address - set the sas_address for given device handle
+ * @ioc: ?
* @handle: device handle
* @sas_address: sas address
*
- * Returns 0 success, non-zero when failure
+ * Return: 0 success, non-zero when failure
*/
static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -710,8 +713,6 @@ mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @sas_device: per sas device object
* @sdev: scsi device struct
* @starget: scsi target struct
- *
- * Returns nothing.
*/
static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
@@ -806,8 +807,6 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
* _scsih_device_remove_by_handle - removing device object by handle
* @ioc: per adapter object
* @handle: device handle
- *
- * Return nothing.
*/
static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -835,8 +834,6 @@ _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* mpt3sas_device_remove_by_sas_address - removing device object by sas address
* @ioc: per adapter object
* @sas_address: device sas_address
- *
- * Return nothing.
*/
void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
@@ -1109,8 +1106,6 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
* _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
* @ioc: per adapter object
* @handle: device handle
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -1273,7 +1268,7 @@ mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* _scsih_raid_device_find_by_wwid - raid device search
* @ioc: per adapter object
- * @handle: sas device handle (assigned by firmware)
+ * @wwid: ?
* Context: Calling function should acquire ioc->raid_device_lock
*
* This searches for raid_device based on wwid, then return raid_device
@@ -1418,8 +1413,6 @@ mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
* Context: This function will acquire ioc->sas_node_lock.
*
* Adding new object to the ioc->sas_expander_list.
- *
- * Return nothing.
*/
static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
@@ -1437,7 +1430,7 @@ _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
* @device_info: bitfield providing information about the device.
* Context: none
*
- * Returns 1 if end device.
+ * Return: 1 if end device.
*/
static int
_scsih_is_end_device(u32 device_info)
@@ -1456,7 +1449,7 @@ _scsih_is_end_device(u32 device_info)
* @device_info: bitfield providing information about the device.
* Context: none
*
- * Returns 1 if nvme device.
+ * Return: 1 if nvme device.
*/
static int
_scsih_is_nvme_device(u32 device_info)
@@ -1473,7 +1466,7 @@ _scsih_is_nvme_device(u32 device_info)
* @ioc: per adapter object
* @smid: system request message index
*
- * Returns the smid stored scmd pointer.
+ * Return: the smid stored scmd pointer.
* Then will dereference the stored scmd pointer.
*/
struct scsi_cmnd *
@@ -1489,7 +1482,7 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
scmd = scsi_host_find_tag(ioc->shost, unique_tag);
if (scmd) {
st = scsi_cmd_priv(scmd);
- if (st->cb_idx == 0xFF)
+ if (st->cb_idx == 0xFF || st->smid == 0)
scmd = NULL;
}
}
@@ -1501,7 +1494,7 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @sdev: scsi device struct
* @qdepth: requested queue depth
*
- * Returns queue depth.
+ * Return: queue depth.
*/
static int
scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
@@ -1549,7 +1542,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* scsih_target_alloc - target add routine
* @starget: scsi target struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
@@ -1640,8 +1633,6 @@ scsih_target_alloc(struct scsi_target *starget)
/**
* scsih_target_destroy - target destroy routine
* @starget: scsi target struct
- *
- * Returns nothing.
*/
static void
scsih_target_destroy(struct scsi_target *starget)
@@ -1653,7 +1644,6 @@ scsih_target_destroy(struct scsi_target *starget)
struct _raid_device *raid_device;
struct _pcie_device *pcie_device;
unsigned long flags;
- struct sas_rphy *rphy;
sas_target_priv_data = starget->hostdata;
if (!sas_target_priv_data)
@@ -1693,7 +1683,6 @@ scsih_target_destroy(struct scsi_target *starget)
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- rphy = dev_to_rphy(starget->dev.parent);
sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
if (sas_device && (sas_device->starget == starget) &&
(sas_device->id == starget->id) &&
@@ -1720,7 +1709,7 @@ scsih_target_destroy(struct scsi_target *starget)
* scsih_slave_alloc - device add routine
* @sdev: scsi device struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
@@ -1800,8 +1789,6 @@ scsih_slave_alloc(struct scsi_device *sdev)
/**
* scsih_slave_destroy - device destroy routine
* @sdev: scsi device struct
- *
- * Returns nothing.
*/
static void
scsih_slave_destroy(struct scsi_device *sdev)
@@ -1907,7 +1894,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
/**
* scsih_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * @dev: the device struct object
*/
static int
scsih_is_raid(struct device *dev)
@@ -1930,7 +1917,7 @@ scsih_is_nvme(struct device *dev)
/**
* scsih_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
scsih_get_resync(struct device *dev)
@@ -1991,7 +1978,7 @@ scsih_get_resync(struct device *dev)
/**
* scsih_get_state - get raid volume level
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
scsih_get_state(struct device *dev)
@@ -2057,6 +2044,7 @@ scsih_get_state(struct device *dev)
/**
* _scsih_set_level - set raid level
+ * @ioc: ?
* @sdev: scsi device struct
* @volume_type: volume type
*/
@@ -2098,9 +2086,9 @@ _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_get_volume_capabilities - volume capabilities
* @ioc: per adapter object
- * @sas_device: the raid_device object
+ * @raid_device: the raid_device object
*
- * Returns 0 for success, else 1
+ * Return: 0 for success, else 1
*/
static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
@@ -2192,7 +2180,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
* scsih_slave_configure - device configure routine.
* @sdev: scsi device struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
@@ -2256,7 +2244,7 @@ scsih_slave_configure(struct scsi_device *sdev)
ds = "SSP";
} else {
qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
- if (raid_device->device_info &
+ if (raid_device->device_info &
MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
ds = "SATA";
else
@@ -2365,13 +2353,14 @@ scsih_slave_configure(struct scsi_device *sdev)
"connector name( %s)\n", ds,
pcie_device->enclosure_level,
pcie_device->connector_name);
- pcie_device_put(pcie_device);
- spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- scsih_change_queue_depth(sdev, qdepth);
if (pcie_device->nvme_mdts)
blk_queue_max_hw_sectors(sdev->request_queue,
pcie_device->nvme_mdts/512);
+
+ pcie_device_put(pcie_device);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ scsih_change_queue_depth(sdev, qdepth);
/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
** merged and can eliminate holes created during merging
** operation.
@@ -2450,8 +2439,6 @@ scsih_slave_configure(struct scsi_device *sdev)
* params[0] number of heads (max 255)
* params[1] number of sectors (max 63)
* params[2] number of cylinders
- *
- * Return nothing.
*/
static int
scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
@@ -2493,8 +2480,6 @@ scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
* _scsih_response_code - translation of device response code
* @ioc: per adapter object
* @response_code: response code returned by the device
- *
- * Return nothing.
*/
static void
_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
@@ -2544,8 +2529,8 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
*
* The callback handler when using scsih_issue_tm.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -2640,7 +2625,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* The callback index is set inside `ioc->tm_cb_idx`.
* The caller is responsible to check for outstanding commands.
*
- * Return SUCCESS or FAILED.
+ * Return: SUCCESS or FAILED.
*/
int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
@@ -2708,11 +2693,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SCSITaskManagementRequest_t)/4);
- if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
+ if (mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->tm_cmds.status, mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
rc = mpt3sas_base_hard_reset_handler(ioc,
FORCE_BIG_HAMMER);
rc = (!rc) ? SUCCESS : FAILED;
@@ -2846,7 +2829,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
* scsih_abort - eh threads main abort routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_abort(struct scsi_cmnd *scmd)
@@ -2914,7 +2897,7 @@ scsih_abort(struct scsi_cmnd *scmd)
* scsih_dev_reset - eh threads main device reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_dev_reset(struct scsi_cmnd *scmd)
@@ -2992,7 +2975,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
* scsih_target_reset - eh threads main target reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_target_reset(struct scsi_cmnd *scmd)
@@ -3069,7 +3052,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
* scsih_host_reset - eh threads main host reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
scsih_host_reset(struct scsi_cmnd *scmd)
@@ -3105,8 +3088,6 @@ out:
*
* This adds the firmware event object into link list, then queues it up to
* be processed from user context.
- *
- * Return nothing.
*/
static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
@@ -3133,8 +3114,6 @@ _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
* Context: This function will acquire ioc->fw_event_lock.
*
* If the fw_event is on the fw_event_list, remove it and do a put.
- *
- * Return nothing.
*/
static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
@@ -3155,8 +3134,6 @@ _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
* mpt3sas_send_trigger_data_event - send event for processing trigger data
* @ioc: per adapter object
* @event_data: trigger event data
- *
- * Return nothing.
*/
void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
@@ -3181,8 +3158,6 @@ mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_error_recovery_delete_devices - remove devices not responding
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -3203,8 +3178,6 @@ _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_port_enable_complete - port enable completed (fake event)
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
@@ -3242,8 +3215,6 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
*
* Walk the firmware event queue, either killing timers, or waiting
* for outstanding events to complete
- *
- * Return nothing.
*/
static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
@@ -3369,7 +3340,7 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_ublock_io_device - prepare device to be deleted
* @ioc: per adapter object
- * @sas_addr: sas address
+ * @sas_address: sas address
*
* unblock then put device in offline state
*/
@@ -3395,7 +3366,6 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
/**
* _scsih_block_io_all_device - set the device state to SDEV_BLOCK
* @ioc: per adapter object
- * @handle: device handle
*
* During device pull we need to appropriately set the sdev state.
*/
@@ -3730,8 +3700,8 @@ out:
* handshake protocol with controller firmware.
* It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -3822,8 +3792,8 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* This code is part of the code to initiate the device removal
* handshake protocol with controller firmware.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3909,8 +3879,8 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @reply: reply message frame(lower 32bit addr)
* Context: interrupt time.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4004,19 +3974,19 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
u16 smid, u16 handle)
- {
- Mpi2SasIoUnitControlRequest_t *mpi_request;
- u32 ioc_state;
- int i = smid - ioc->internal_smid;
- unsigned long flags;
+{
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u32 ioc_state;
+ int i = smid - ioc->internal_smid;
+ unsigned long flags;
- if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host has been removed\n",
__func__, ioc->name));
- return;
- } else if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host in pci error recovery\n",
__func__, ioc->name));
return;
@@ -4059,8 +4029,8 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
* This will check delayed internal messages list, and process the
* next request.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4098,8 +4068,8 @@ mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* This will check delayed target reset list, and feed the
* next reqeust.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4139,8 +4109,6 @@ _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* This handles the case where driver receives multiple expander
* add and delete events in a single shot. When there is a delete event
* the routine will void any pending add events waiting in the event queue.
- *
- * Return nothing.
*/
static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4222,8 +4190,6 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
* or device add and delete events in a single shot. When there
* is a delete event the routine will void any pending add
* events waiting in the event queue.
- *
- * Return nothing.
*/
static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4348,8 +4314,6 @@ _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
* volume has been deleted or removed. When the target reset is sent
* to volume, the PD target resets need to be queued to start upon
* completion of the volume target reset.
- *
- * Return nothing.
*/
static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4433,8 +4397,6 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
* This will handle the case when the cable connected to entire volume is
* pulled. We will take care of setting the deleted flag so normal IO will
* not be sent.
- *
- * Return nothing.
*/
static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4456,8 +4418,6 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @event_data: the temp threshold event data
* Context: interrupt time.
- *
- * Return nothing.
*/
static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
@@ -4496,8 +4456,6 @@ static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
*
* The flushing out of all pending scmd commands following host reset,
* where all IO is dropped to the floor.
- *
- * Return nothing.
*/
static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
@@ -4533,8 +4491,6 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
* @mpi_request: pointer to the SCSI_IO request message frame
*
* Supporting protection 1 and 3.
- *
- * Returns nothing
*/
static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
@@ -4568,7 +4524,7 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
- cpu_to_be32(scsi_prot_ref_tag(scmd));
+ cpu_to_be32(t10_pi_ref_tag(scmd->request));
break;
case SCSI_PROT_DIF_TYPE3:
@@ -4593,8 +4549,6 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* _scsih_eedp_error_handling - return sense code for EEDP errors
* @scmd: pointer to scsi command object
* @ioc_status: ioc status
- *
- * Returns nothing
*/
static void
_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
@@ -4623,12 +4577,12 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
/**
* scsih_qcmd - main scsi request entry point
+ * @shost: SCSI host pointer
* @scmd: pointer to scsi command object
- * @done: function pointer to be invoked on completion
*
* The callback index is set inside `ioc->scsi_io_cb_idx`.
*
- * Returns 0 on success. If there's a failure, return either:
+ * Return: 0 on success. If there's a failure, return either:
* SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
@@ -4674,19 +4628,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
}
- /* host recovery or link resets sent via IOCTLs */
- if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
+ /* host recovery or link resets sent via IOCTLs */
return SCSI_MLQUEUE_HOST_BUSY;
-
- /* device has been deleted */
- else if (sas_target_priv_data->deleted) {
+ } else if (sas_target_priv_data->deleted) {
+ /* device has been deleted */
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
- /* device busy with task management */
} else if (sas_target_priv_data->tm_busy ||
- sas_device_priv_data->block)
+ sas_device_priv_data->block) {
+ /* device busy with task management */
return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
/*
* Bug work around for firmware SATL handling. The loop
@@ -4791,8 +4745,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
* _scsih_normalize_sense - normalize descriptor and fixed format sense data
* @sense_buffer: sense data returned by target
* @data: normalized skey/asc/ascq
- *
- * Return nothing.
*/
static void
_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
@@ -4815,12 +4767,11 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
* @ioc: per adapter object
* @scmd: pointer to scsi command object
* @mpi_reply: reply mf payload returned from firmware
+ * @smid: ?
*
* scsi_status - SCSI Status code returned from target device
* scsi_state - state info associated with SCSI_IO determined by ioc
* ioc_status - ioc supplied status info
- *
- * Return nothing.
*/
static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
@@ -5044,8 +4995,6 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* @ioc: per adapter object
* @handle: device handle
* Context: process
- *
- * Return nothing.
*/
static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5089,8 +5038,6 @@ out:
* @ioc: per adapter object
* @sas_device: sas device whose PFA LED has to turned off
* Context: process
- *
- * Return nothing.
*/
static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
@@ -5128,8 +5075,6 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
- *
- * Return nothing.
*/
static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5151,8 +5096,6 @@ _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
- *
- * Return nothing.
*/
static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5228,8 +5171,8 @@ out_unlock:
*
* Callback handler when using _scsih_qcmd.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -5416,6 +5359,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
scsi_set_resid(scmd, 0);
+ /* fall through */
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
case MPI2_IOCSTATUS_SUCCESS:
scmd->result = (DID_OK << 16) | scsi_status;
@@ -5468,8 +5412,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* During port enable, fw will send topology events for every device. Its
* possible that the handles may change from the previous setting, so this
* code keeping handles updating if changed.
- *
- * Return nothing.
*/
static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
@@ -5523,8 +5465,6 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Creating host side data object, stored in ioc->sas_hba
- *
- * Return nothing.
*/
static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
@@ -5672,7 +5612,7 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
*
* Creating expander object, stored in ioc->sas_expander_list.
*
- * Return 0 for success, else error.
+ * Return: 0 for success, else error.
*/
static int
_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5812,7 +5752,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
_scsih_expander_node_add(ioc, sas_expander);
- return 0;
+ return 0;
out_fail:
@@ -5827,8 +5767,6 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* mpt3sas_expander_remove - removing expander object
* @ioc: per adapter object
* @sas_address: expander sas_address
- *
- * Return nothing.
*/
void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
@@ -5857,8 +5795,8 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
* Callback handler when sending internal generated SCSI_IO.
* The callback index passed is `ioc->scsih_cb_idx`
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -5892,9 +5830,9 @@ _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* @ioc: per adapter object
* @sas_address: sas address
* @handle: sas device handle
- * @access_flags: errors returned during discovery of the device
+ * @access_status: errors returned during discovery of the device
*
- * Return 0 for success, else failure
+ * Return: 0 for success, else failure
*/
static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
@@ -5956,10 +5894,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
* @ioc: per adapter object
* @parent_sas_address: sas address of parent expander or sas host
* @handle: attached device handle
- * @phy_numberv: phy number
+ * @phy_number: phy number
* @link_rate: new link rate
- *
- * Returns nothing.
*/
static void
_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
@@ -6076,7 +6012,7 @@ out_unlock:
*
* Creating end device object, stored in ioc->sas_device_list.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
@@ -6208,9 +6144,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
/**
* _scsih_remove_device - removing sas device object
* @ioc: per adapter object
- * @sas_device_delete: the sas_device object
- *
- * Return nothing.
+ * @sas_device: the sas_device object
*/
static void
_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
@@ -6446,6 +6380,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (!test_bit(handle, ioc->pend_os_device_add))
break;
+ /* fall through */
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -6475,10 +6410,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_sas_device_status_change_event_debug - debug for device event
+ * @ioc: ?
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -6546,8 +6480,6 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -6608,9 +6540,9 @@ out:
* @ioc: per adapter object
* @wwid: wwid
* @handle: sas device handle
- * @access_flags: errors returned during discovery of the device
+ * @access_status: errors returned during discovery of the device
*
- * Return 0 for success, else failure
+ * Return: 0 for success, else failure
*/
static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
@@ -6695,8 +6627,6 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
* from SML and free up associated memory
* @ioc: per adapter object
* @pcie_device: the pcie_device object
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
@@ -6770,8 +6700,6 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
* _scsih_pcie_check_device - checking device responsiveness
* @ioc: per adapter object
* @handle: attached device handle
- *
- * Returns nothing.
*/
static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -6863,7 +6791,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
*
* Creating end device object, stored in ioc->pcie_device_list.
*
- * Return 1 means queue the event later, 0 means complete the event
+ * Return: 1 means queue the event later, 0 means complete the event
*/
static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -6873,7 +6801,6 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
Mpi2ConfigReply_t mpi_reply;
struct _pcie_device *pcie_device;
struct _enclosure_node *enclosure_dev;
- u32 pcie_device_type;
u32 ioc_status;
u64 wwid;
@@ -6935,8 +6862,6 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device->port_num = pcie_device_pg0.PortNum;
pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
- pcie_device_type = pcie_device->device_info &
- MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE;
pcie_device->enclosure_handle =
le16_to_cpu(pcie_device_pg0.EnclosureHandle);
@@ -7165,6 +7090,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
event_data->PortEntry[i].PortStatus &= 0xF0;
event_data->PortEntry[i].PortStatus |=
MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
+ /* fall through */
case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
if (ioc->shost_recovery)
break;
@@ -7190,12 +7116,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _scsih_pcie_device_status_change_event_debug - debug for
- * device event
+ * _scsih_pcie_device_status_change_event_debug - debug for device event
+ * @ioc: ?
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -7262,8 +7186,6 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7314,8 +7236,6 @@ out:
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -7348,8 +7268,6 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7416,8 +7334,6 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7483,6 +7399,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_VOLUME)
continue;
+ /* skip PCIe devices */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_PCIE_DEVICE)
+ continue;
handle = sas_device_priv_data->sas_target->handle;
lun = sas_device_priv_data->lun;
@@ -7580,8 +7500,6 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7617,8 +7535,6 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7654,8 +7570,6 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
@@ -7684,7 +7598,7 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
* @handle: device handle for physical disk
* @phys_disk_num: physical disk number
*
- * Return 0 for success, else failure.
+ * Return: 0 for success, else failure.
*/
static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
@@ -7736,10 +7650,10 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ issue_reset =
+ mpt3sas_base_check_cmd_timeout(ioc,
+ ioc->scsih_cmds.status, mpi_request,
+ sizeof(Mpi2RaidActionRequest_t)/4);
rc = -EFAULT;
goto out;
}
@@ -7794,8 +7708,6 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
@@ -7852,8 +7764,6 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @handle: volume device handle
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -7887,8 +7797,6 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
@@ -7929,8 +7837,6 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
@@ -7980,8 +7886,6 @@ _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
@@ -7997,8 +7901,6 @@ _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
@@ -8050,8 +7952,6 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -8130,8 +8030,6 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8202,8 +8100,6 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8286,8 +8182,6 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8372,8 +8266,6 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -8414,8 +8306,6 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
@@ -8473,8 +8363,6 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_sas_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
@@ -8569,8 +8457,6 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
* _scsih_create_enclosure_list_after_reset - Free Existing list,
* And create enclosure list by scanning all Enclosure Page(0)s
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
@@ -8617,8 +8503,6 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -8661,8 +8545,6 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponding_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
@@ -8736,8 +8618,6 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -8785,8 +8665,6 @@ out:
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_raid_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
@@ -8842,8 +8720,6 @@ _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -8914,8 +8790,6 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_expanders.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
@@ -8968,8 +8842,6 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
@@ -9009,8 +8881,6 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_remove_unresponding_devices - removing unresponding devices
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -9136,8 +9006,6 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_scan_for_devices_after_reset - scan for devices after host reset
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
@@ -9421,60 +9289,68 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc->name);
pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
}
+
/**
* mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
* @ioc: per adapter object
- * @reset_phase: phase
*
* The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+}
+
+/**
+ * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
*
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
- *
- * Return nothing.
+ * The handler for doing any required cleanup or initialization.
*/
void
-mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
- ioc->scsih_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
- complete(&ioc->scsih_cmds.done);
- }
- if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
- ioc->tm_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
- complete(&ioc->tm_cmds.done);
- }
+ if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
+ if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
+ ioc->tm_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+ complete(&ioc->tm_cmds.done);
+ }
- memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
- memset(ioc->device_remove_in_progress, 0,
- ioc->device_remove_in_progress_sz);
- _scsih_fw_event_cleanup_queue(ioc);
- _scsih_flush_running_cmds(ioc);
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+ memset(ioc->device_remove_in_progress, 0,
+ ioc->device_remove_in_progress_sz);
+ _scsih_fw_event_cleanup_queue(ioc);
+ _scsih_flush_running_cmds(ioc);
+}
+
+/**
+ * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void
+mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
- !ioc->sas_hba.num_phys)) {
- _scsih_prep_device_scan(ioc);
- _scsih_create_enclosure_list_after_reset(ioc);
- _scsih_search_responding_sas_devices(ioc);
- _scsih_search_responding_pcie_devices(ioc);
- _scsih_search_responding_raid_devices(ioc);
- _scsih_search_responding_expanders(ioc);
- _scsih_error_recovery_delete_devices(ioc);
- }
- break;
+ if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+ !ioc->sas_hba.num_phys)) {
+ _scsih_prep_device_scan(ioc);
+ _scsih_create_enclosure_list_after_reset(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_pcie_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
}
}
@@ -9483,8 +9359,6 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
@@ -9519,7 +9393,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
ioc->start_scan = 0;
- if (missing_delay[0] != -1 && missing_delay[1] != -1)
+ if (missing_delay[0] != -1 && missing_delay[1] != -1)
mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -9577,13 +9451,10 @@ out:
/**
* _firmware_event_work
- * @ioc: per adapter object
* @work: The fw_event_work object
* Context: user.
*
* wrappers for the work thread handling firmware events
- *
- * Return nothing.
*/
static void
@@ -9605,8 +9476,8 @@ _firmware_event_work(struct work_struct *work)
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
@@ -9791,8 +9662,6 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
*
* Removing object and freeing associated memory from the
* ioc->sas_expander_list.
- *
- * Return nothing.
*/
static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
@@ -9841,8 +9710,6 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
*
* Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
* the host system is shutting down.
- *
- * Return nothing.
*/
static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
@@ -9914,7 +9781,6 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
* @pdev: PCI device struct
*
* Routine called when unloading the driver.
- * Return nothing.
*/
static void scsih_remove(struct pci_dev *pdev)
{
@@ -9996,8 +9862,6 @@ static void scsih_remove(struct pci_dev *pdev)
/**
* scsih_shutdown - routine call during system shutdown
* @pdev: PCI device struct
- *
- * Return nothing.
*/
static void
scsih_shutdown(struct pci_dev *pdev)
@@ -10220,7 +10084,7 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
*
* Get the next pcie device from pcie_device_init_list list.
*
- * Returns pcie device structure if pcie_device_init_list list is not empty
+ * Return: pcie device structure if pcie_device_init_list list is not empty
* otherwise returns NULL
*/
static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
@@ -10390,7 +10254,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
if (time >= (300 * HZ)) {
- ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
pr_info(MPT3SAS_FMT
"port enable: FAILED with timeout (timeout=300s)\n",
ioc->name);
@@ -10412,7 +10276,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
- ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
if (ioc->wait_for_discovery_to_complete) {
ioc->wait_for_discovery_to_complete = 0;
@@ -10568,7 +10432,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
* @pdev: PCI device struct
* @id: pci device id
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -10818,7 +10682,7 @@ out_add_shost_fail:
* @pdev: PCI device struct
* @state: PM state change to (usually PCI_D3)
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
scsih_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -10845,7 +10709,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
* scsih_resume - power management resume main entry point
* @pdev: PCI device struct
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
scsih_resume(struct pci_dev *pdev)
@@ -10881,8 +10745,7 @@ scsih_resume(struct pci_dev *pdev)
*
* Description: Called when a PCI error is detected.
*
- * Return value:
- * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
*/
static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
@@ -11143,7 +11006,7 @@ static struct pci_driver mpt3sas_driver = {
/**
* scsih_init - main entry point for this driver.
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
scsih_init(void)
@@ -11193,7 +11056,7 @@ scsih_init(void)
/**
* scsih_exit - exit point for this driver (when it is a module).
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static void
scsih_exit(void)
@@ -11223,7 +11086,7 @@ scsih_exit(void)
/**
* _mpt3sas_init - main entry point for this driver.
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int __init
_mpt3sas_init(void)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 3a143bb5ca72..f8cc2677c1cd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -134,7 +134,7 @@ _transport_convert_phy_link_rate(u8 link_rate)
*
* Populates sas identify info.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -226,8 +226,8 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
* Callback handler when sending internal generated transport cmds.
* The callback index passed is `ioc->transport_cb_idx`
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -287,7 +287,7 @@ struct rep_manu_reply {
*
* Fills in the sas_expander_device object when SMP port is created.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
@@ -460,8 +460,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
* _transport_delete_port - helper function to removing a port
* @ioc: per adapter object
* @mpt3sas_port: mpt3sas per port object
- *
- * Returns nothing.
*/
static void
_transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
@@ -489,8 +487,6 @@ _transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @mpt3sas_port: mpt3sas per port object
* @mpt3sas_phy: mpt3sas per phy object
- *
- * Returns nothing.
*/
static void
_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
@@ -513,8 +509,6 @@ _transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @mpt3sas_port: mpt3sas per port object
* @mpt3sas_phy: mpt3sas per phy object
- *
- * Returns nothing.
*/
static void
_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
@@ -538,8 +532,6 @@ _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
* @sas_address: sas address of device/expander were phy needs to be added to
- *
- * Returns nothing.
*/
static void
_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
@@ -563,7 +555,7 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
return;
}
_transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy);
- return;
+ return;
}
}
@@ -573,8 +565,6 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @sas_node: sas node object (either expander or sas host)
* @mpt3sas_phy: mpt3sas per phy object
- *
- * Returns nothing.
*/
static void
_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
@@ -635,7 +625,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
*
* Adding new port object to the sas_node->sas_port_list.
*
- * Returns mpt3sas_port.
+ * Return: mpt3sas_port.
*/
struct _sas_port *
mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -794,8 +784,6 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
*
* Removing object and freeing associated memory from the
* ioc->sas_port_list.
- *
- * Return nothing.
*/
void
mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
@@ -860,7 +848,7 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
* @phy_pg0: sas phy page 0
* @parent_dev: parent device class object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
@@ -928,7 +916,7 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
* @expander_pg1: expander page 1
* @parent_dev: parent device class object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
@@ -995,10 +983,8 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
* @ioc: per adapter object
* @sas_address: sas address of parent expander or sas host
* @handle: attached device handle
- * @phy_numberv: phy number
+ * @phy_number: phy number
* @link_rate: new link rate
- *
- * Returns nothing.
*/
void
mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
@@ -1090,7 +1076,7 @@ struct phy_error_log_reply {
* @ioc: per adapter object
* @phy: The sas phy object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
*/
static int
@@ -1262,7 +1248,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
* _transport_get_linkerrors - return phy counters for both hba and expanders
* @phy: The sas phy object
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
*/
static int
@@ -1311,10 +1297,11 @@ _transport_get_linkerrors(struct sas_phy *phy)
/**
* _transport_get_enclosure_identifier -
- * @phy: The sas phy object
+ * @rphy: The sas phy object
+ * @identifier: ?
*
* Obtain the enclosure logical id for an expander.
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
@@ -1342,9 +1329,9 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
/**
* _transport_get_bay_identifier -
- * @phy: The sas phy object
+ * @rphy: The sas phy object
*
- * Returns the slot id for a device that resides inside an enclosure.
+ * Return: the slot id for a device that resides inside an enclosure.
*/
static int
_transport_get_bay_identifier(struct sas_rphy *rphy)
@@ -1400,8 +1387,9 @@ struct phy_control_reply {
* _transport_expander_phy_control - expander phy control
* @ioc: per adapter object
* @phy: The sas phy object
+ * @phy_operation: ?
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*
*/
static int
@@ -1571,7 +1559,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
* @phy: The sas phy object
* @hard_reset:
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -1623,7 +1611,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
* @enable: enable phy when true
*
* Only support sas_host direct attached phys.
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_phy_enable(struct sas_phy *phy, int enable)
@@ -1761,7 +1749,8 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
* @rates: rates defined in sas_phy_linkrates
*
* Only support sas_host direct attached phys.
- * Returns 0 for success, non-zero for failure.
+ *
+ * Return: 0 for success, non-zero for failure.
*/
static int
_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
@@ -1904,9 +1893,9 @@ _transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
/**
* _transport_smp_handler - transport portal for smp passthru
+ * @job: ?
* @shost: shost object
* @rphy: sas transport rphy object
- * @req:
*
* This used primarily for smp_utils.
* Example:
@@ -1936,12 +1925,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
rc = -EFAULT;
- goto out;
+ goto job_done;
}
rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
if (rc)
- goto out;
+ goto job_done;
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
@@ -2066,6 +2055,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
mutex_unlock(&ioc->transport_cmds.mutex);
+job_done:
bsg_job_done(job, rc, reslen);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index b60fd7a3b571..cae7c1eaef34 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -62,7 +62,7 @@
/**
* _mpt3sas_raise_sigio - notifiy app
* @ioc: per adapter object
- * @event_data:
+ * @event_data: ?
*/
static void
_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
@@ -107,7 +107,7 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
/**
* mpt3sas_process_trigger_data - process the event data for the trigger
* @ioc: per adapter object
- * @event_data:
+ * @event_data: ?
*/
void
mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
@@ -209,8 +209,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
/**
* mpt3sas_trigger_event - Event trigger handler
* @ioc: per adapter object
- * @event:
- * @log_entry_qualifier:
+ * @event: ?
+ * @log_entry_qualifier: ?
*
*/
void
@@ -288,9 +288,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
/**
* mpt3sas_trigger_scsi - SCSI trigger handler
* @ioc: per adapter object
- * @sense_key:
- * @asc:
- * @ascq:
+ * @sense_key: ?
+ * @asc: ?
+ * @ascq: ?
*
*/
void
@@ -364,8 +364,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
/**
* mpt3sas_trigger_mpi - MPI trigger handler
* @ioc: per adapter object
- * @ioc_status:
- * @loginfo:
+ * @ioc_status: ?
+ * @loginfo: ?
*
*/
void
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index 45aa94915cbf..b4927f2b7677 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -267,9 +267,6 @@ out_error:
* @scmd: pointer to scsi command object
* @raid_device: pointer to raid device data structure
* @mpi_request: pointer to the SCSI_IO reqest message frame
- * @smid: system request message index
- *
- * Returns nothing
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index dc4e801b2cef..6cd3e289ef99 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4611,7 +4611,7 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
* in order to keep it alive.
*/
if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
- cmd->result = ScsiResult(DID_RESET, 0);
+ cmd->result = DID_RESET << 16;
ncr_queue_done_cmd(np, cmd);
}
@@ -4957,7 +4957,7 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** Check condition code
*/
- cmd->result = ScsiResult(DID_OK, S_CHECK_COND);
+ cmd->result = DID_OK << 16 | S_CHECK_COND;
/*
** Copy back sense data to caller's buffer.
@@ -4978,7 +4978,7 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** Reservation Conflict condition code
*/
- cmd->result = ScsiResult(DID_OK, S_CONFLICT);
+ cmd->result = DID_OK << 16 | S_CONFLICT;
} else if ((cp->host_status == HS_COMPLETE)
&& (cp->scsi_status == S_BUSY ||
@@ -8043,7 +8043,7 @@ printk("ncr53c8xx_queue_command\n");
spin_lock_irqsave(&np->smp_lock, flags);
if ((sts = ncr_queue_command(np, cmd)) != DID_OK) {
- cmd->result = ScsiResult(sts, 0);
+ cmd->result = sts << 16;
#ifdef DEBUG_NCR53C8XX
printk("ncr53c8xx : command not queued - result=%d\n", sts);
#endif
@@ -8234,7 +8234,7 @@ static void process_waiting_list(struct ncb *np, int sts)
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
#endif
- wcmd->result = ScsiResult(sts, 0);
+ wcmd->result = sts << 16;
ncr_queue_done_cmd(np, wcmd);
}
}
diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c
index 58806f432a16..4f1d4bf9c775 100644
--- a/drivers/scsi/nsp32_debug.c
+++ b/drivers/scsi/nsp32_debug.c
@@ -137,7 +137,7 @@ static void print_commandk (unsigned char *command)
printk("\n");
}
-static void show_command(Scsi_Cmnd *SCpnt)
+static void show_command(struct scsi_cmnd *SCpnt)
{
print_commandk(SCpnt->cmnd);
}
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index a269da1a6c75..387dc87e4d22 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -126,22 +126,24 @@ static void init_sqe(struct iscsi_task_params *task_params,
sgl_task_params,
dif_task_params);
- if (scsi_is_slow_sgl(sgl_task_params->num_sges,
- sgl_task_params->small_mid_sge))
- num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
- else
- num_sges = min(sgl_task_params->num_sges,
- (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
- }
+ if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min(sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
- SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
- SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
- buf_size);
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+ num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
+ buf_size);
- if (GET_FIELD(pdu_header->hdr_second_dword,
- ISCSI_CMD_HDR_TOTAL_AHS_LEN))
- SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
- cmd_params->extended_cdb_sge.sge_len);
+ if (GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ ISCSI_WQE_CDB_SIZE,
+ cmd_params->extended_cdb_sge.sge_len);
}
break;
case ISCSI_TASK_TYPE_INITIATOR_READ:
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cff83b9457f7..aa96bccb5a96 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -524,7 +524,7 @@ static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+ id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index c8731568f9c4..4888b999e82f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -518,6 +518,9 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -570,7 +573,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
return 0;
- if (qla2x00_reset_active(vha))
+ if (qla2x00_chip_is_down(vha))
return 0;
rval = qla2x00_read_sfp_dev(vha, buf, count);
@@ -733,6 +736,15 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
int type;
port_id_t did;
+ if (!capable(CAP_SYS_ADMIN))
+ return 0;
+
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return 0;
+
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
type = simple_strtol(buf, NULL, 10);
did.b.domain = (type & 0x00ff0000) >> 16;
@@ -771,6 +783,12 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
return 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
if (ha->xgmac_data)
goto do_read;
@@ -825,6 +843,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (ha->dcbx_tlv)
goto do_read;
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) {
@@ -1036,7 +1057,7 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
vha->device_flags & DFLG_NO_CABLE)
len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&vha->loop_state) != LOOP_READY ||
- qla2x00_reset_active(vha))
+ qla2x00_chip_is_down(vha))
len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
else {
len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
@@ -1163,7 +1184,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return -EPERM;
- if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ if (qla2x00_chip_is_down(vha)) {
ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
@@ -1350,7 +1371,7 @@ qla2x00_thermal_temp_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
uint16_t temp = 0;
- if (qla2x00_reset_active(vha)) {
+ if (qla2x00_chip_is_down(vha)) {
ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
goto done;
}
@@ -1381,7 +1402,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
}
- if (qla2x00_reset_active(vha))
+ if (qla2x00_chip_is_down(vha))
ql_log(ql_log_warn, vha, 0x707c,
"ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
@@ -1840,7 +1861,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (unlikely(pci_channel_offline(ha->pdev)))
goto done;
- if (qla2x00_reset_active(vha))
+ if (qla2x00_chip_is_down(vha))
goto done;
stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 5fd44c50bbac..c7533fa7f46e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1130,6 +1130,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ha->fw_dump);
goto qla24xx_fw_dump_failed;
}
+ QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp24;
qla2xxx_prep_dump(ha, ha->fw_dump);
@@ -1384,6 +1385,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ha->fw_dump);
goto qla25xx_fw_dump_failed;
}
+ QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp25;
qla2xxx_prep_dump(ha, ha->fw_dump);
ha->fw_dump->version = htonl(2);
@@ -2036,6 +2038,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"request...\n", ha->fw_dump);
goto qla83xx_fw_dump_failed;
}
+ QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp83;
qla2xxx_prep_dump(ha, ha->fw_dump);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 0f94b1d62d3f..a9dc9c4a6382 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -313,6 +313,7 @@ struct srb_cmd {
#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */
#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
+#define SRB_WAKEUP_ON_COMP BIT_6
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
@@ -379,6 +380,7 @@ struct srb_iocb {
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
#define SRB_LOGIN_NVME_PRLI BIT_3
+#define SRB_LOGIN_PRLI_ONLY BIT_4
uint16_t data[2];
u32 iop[2];
} logio;
@@ -398,6 +400,8 @@ struct srb_iocb {
struct completion comp;
struct els_plogi_payload *els_plogi_pyld;
struct els_plogi_payload *els_resp_pyld;
+ u32 tx_size;
+ u32 rx_size;
dma_addr_t els_plogi_pyld_dma;
dma_addr_t els_resp_pyld_dma;
uint32_t fw_status[3];
@@ -2312,6 +2316,7 @@ enum fcport_mgt_event {
FCME_ADISC_DONE,
FCME_GNNID_DONE,
FCME_GFPNID_DONE,
+ FCME_ELS_PLOGI_DONE,
};
enum rscn_addr_format {
@@ -2408,6 +2413,7 @@ typedef struct fc_port {
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
enum login_state fw_login_state;
+ unsigned long dm_login_expire;
unsigned long plogi_nack_done_deadline;
u32 login_gen, last_login_gen;
@@ -2418,7 +2424,8 @@ typedef struct fc_port {
u8 iocb[IOCB_SIZE];
u8 current_login_state;
u8 last_login_state;
- struct completion n2n_done;
+ u16 n2n_link_reset_cnt;
+ u16 n2n_chip_reset;
} fc_port_t;
#define QLA_FCPORT_SCAN 1
@@ -3228,6 +3235,7 @@ enum qla_work_type {
QLA_EVT_GFPNID,
QLA_EVT_SP_RETRY,
QLA_EVT_IIDMA,
+ QLA_EVT_ELS_PLOGI,
};
@@ -3599,6 +3607,8 @@ struct qla_hw_data {
uint32_t detected_lr_sfp:1;
uint32_t using_lr_setting:1;
uint32_t rida_fmt2:1;
+ uint32_t purge_mbox:1;
+ uint32_t n2n_bigger:1;
} flags;
uint16_t max_exchg;
@@ -3844,6 +3854,10 @@ struct qla_hw_data {
int port_down_retry_count;
uint8_t mbx_count;
uint8_t aen_mbx_count;
+ atomic_t num_pend_mbx_stage1;
+ atomic_t num_pend_mbx_stage2;
+ atomic_t num_pend_mbx_stage3;
+ uint16_t frame_payload_size;
uint32_t login_retry_count;
/* SNS command interfaces. */
@@ -3903,6 +3917,9 @@ struct qla_hw_data {
int exchoffld_size;
int exchoffld_count;
+ /* n2n */
+ struct els_plogi_payload plogi_els_payld;
+
void *swl;
/* These are used by mailbox operations. */
@@ -4157,6 +4174,7 @@ struct qla_hw_data {
struct work_struct board_disable;
struct mr_data_fx00 mr;
+ uint32_t chip_reset;
struct qlt_hw_data tgt;
int allow_cna_fw_dump;
@@ -4238,7 +4256,7 @@ typedef struct scsi_qla_host {
#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
-#define FREE_BIT 21
+#define N2N_LINK_RESET 21
#define PORT_UPDATE_NEEDED 22
#define FX00_RESET_RECOVERY 23
#define FX00_TARGET_SCAN 24
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 5d8688e5bc7c..50c1e6c62e31 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1366,6 +1366,11 @@ struct vp_rpt_id_entry_24xx {
/* format 1 fabric */
uint8_t vpstat1_subcode; /* vp_status=1 subcode */
uint8_t flags;
+#define TOPO_MASK 0xE
+#define TOPO_FL 0x2
+#define TOPO_N2N 0x4
+#define TOPO_F 0x6
+
uint16_t fip_flags;
uint8_t rsv2[12];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2660a48d918a..178974896b5c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -45,8 +45,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
-extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *,
- port_id_t);
+extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
@@ -118,6 +117,7 @@ extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
+int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
/*
* Global Data in qla_os.c source file.
*/
@@ -212,7 +212,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
-int qla24xx_async_abort_cmd(srb_t *);
+int qla24xx_async_abort_cmd(srb_t *, bool);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 7a3744006419..a0038d879b9d 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1962,7 +1962,6 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
void *entries;
struct ct_fdmiv2_hba_attr *eiter;
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
struct new_utsname *p_sysid = NULL;
/* Issue RHBA */
@@ -2142,9 +2141,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
/* MAX CT Payload Length */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size) :
- le16_to_cpu(ha->init_cb->frame_payload_size);
+ eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
eiter->len = cpu_to_be16(4 + 4);
size += 4 + 4;
@@ -3394,19 +3391,40 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ struct srb_iocb *c = &sp->u.iocb_cmd;
+
+ switch (sp->type) {
+ case SRB_ELS_DCMD:
+ if (c->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ c->u.els_plogi.tx_size,
+ c->u.els_plogi.els_plogi_pyld,
+ c->u.els_plogi.els_plogi_pyld_dma);
+
+ if (c->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ c->u.els_plogi.rx_size,
+ c->u.els_plogi.els_resp_pyld,
+ c->u.els_plogi.els_resp_pyld_dma);
+ break;
+ case SRB_CT_PTHRU_CMD:
+ default:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+ break;
}
sp->free(sp);
@@ -3483,6 +3501,14 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->rscn_gen++;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
+ if (fcport->login_retry == 0) {
+ fcport->login_retry =
+ vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
switch (fcport->disc_state) {
case DSC_LOGIN_COMPLETE:
/* recheck session is still intact. */
@@ -3981,6 +4007,14 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
} else {
if (fcport->rscn_rcvd ||
fcport->disc_state != DSC_LOGIN_COMPLETE) {
+ if (fcport->login_retry == 0) {
+ fcport->login_retry =
+ vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0x20a3,
+ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
fcport->rscn_rcvd = 0;
qla24xx_fcport_handle_login(vha, fcport);
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1b19b954bbae..b934977c5c26 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -50,16 +50,15 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
- scsi_qla_host_t *vha = sp->vha;
struct req_que *req;
unsigned long flags;
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- req = vha->hw->req_q_map[0];
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ req = sp->qpair->req;
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
iocb->timeout(sp);
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
void
@@ -100,6 +99,8 @@ qla2x00_async_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
+ int rc, h;
+ unsigned long flags;
if (fcport) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
@@ -114,11 +115,26 @@ qla2x00_async_iocb_timeout(void *data)
switch (sp->type) {
case SRB_LOGIN_CMD:
- /* Retry as needed. */
- lio->u.logio.data[0] = MBS_COMMAND_ERROR;
- lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED : 0;
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ rc = qla24xx_async_abort_cmd(sp, false);
+ if (rc) {
+ /* Retry as needed. */
+ lio->u.logio.data[0] = MBS_COMMAND_ERROR;
+ lio->u.logio.data[1] =
+ lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
+ h++) {
+ if (sp->qpair->req->outstanding_cmds[h] ==
+ sp) {
+ sp->qpair->req->outstanding_cmds[h] =
+ NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ }
break;
case SRB_LOGOUT_CMD:
case SRB_CT_PTHRU_CMD:
@@ -127,7 +143,21 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
case SRB_CTRL_VP:
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ rc = qla24xx_async_abort_cmd(sp, false);
+ if (rc) {
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
+ h++) {
+ if (sp->qpair->req->outstanding_cmds[h] ==
+ sp) {
+ sp->qpair->req->outstanding_cmds[h] =
+ NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ }
break;
}
}
@@ -160,6 +190,22 @@ qla2x00_async_login_sp_done(void *ptr, int res)
sp->free(sp);
}
+static inline bool
+fcport_is_smaller(fc_port_t *fcport)
+{
+ if (wwn_to_u64(fcport->port_name) <
+ wwn_to_u64(fcport->vha->port_name))
+ return true;
+ else
+ return false;
+}
+
+static inline bool
+fcport_is_bigger(fc_port_t *fcport)
+{
+ return !fcport_is_smaller(fcport);
+}
+
int
qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
@@ -189,13 +235,16 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_login_sp_done;
- lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+ if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
+ lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
+ } else {
+ lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
- if (fcport->fc4f_nvme)
- lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+ if (fcport->fc4f_nvme)
+ lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+
+ }
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
- lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
fcport->flags |= FCF_LOGIN_NEEDED;
@@ -370,6 +419,19 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__qla24xx_handle_gpdb_event(vha, ea);
}
+int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
+ return qla2x00_post_work(vha, e);
+}
+
static void
qla2x00_async_adisc_sp_done(void *ptr, int res)
{
@@ -382,7 +444,7 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
"Async done-%s res %x %8phC\n",
sp->name, res, sp->fcport->port_name);
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.event = FCME_ADISC_DONE;
@@ -418,6 +480,8 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_adisc_sp_done;
@@ -464,7 +528,6 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
- fcport->login_retry = vha->hw->login_retry_count;
ql_dbg(ql_dbg_disc, vha, 0x20de,
"GNL failed Port login retry %8phN, retry cnt=%d.\n",
fcport->port_name, fcport->login_retry);
@@ -497,35 +560,51 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
for (i = 0; i < n; i++) {
e = &vha->gnl.l[i];
wwn = wwn_to_u64(e->port_name);
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
continue;
+ if (IS_SW_RESV_ADDR(id))
+ continue;
+
found = 1;
- id.b.domain = e->port_id[2];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[0];
- id.b.rsvd_1 = 0;
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
+ if (fcport->fc4f_nvme)
+ current_login_state = e->current_login_state >> 4;
+ else
+ current_login_state = e->current_login_state & 0xf;
+
ql_dbg(ql_dbg_disc, vha, 0x20e2,
- "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
__func__, fcport->port_name,
e->current_login_state, fcport->fw_login_state,
- id.b.domain, id.b.area, id.b.al_pa,
+ fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
- if ((id.b24 != fcport->d_id.b24) ||
- ((fcport->loop_id != FC_NO_LOOP_ID) &&
- (fcport->loop_id != loop_id))) {
- ql_dbg(ql_dbg_disc, vha, 0x20e3,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- return;
+ switch (fcport->disc_state) {
+ case DSC_DELETE_PEND:
+ case DSC_DELETED:
+ break;
+ default:
+ if ((id.b24 != fcport->d_id.b24 &&
+ fcport->d_id.b24) ||
+ (fcport->loop_id != FC_NO_LOOP_ID &&
+ fcport->loop_id != loop_id)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e3,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ break;
}
fcport->loop_id = loop_id;
@@ -544,68 +623,148 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport->login_pause = 1;
}
- if (fcport->fc4f_nvme)
- current_login_state = e->current_login_state >> 4;
- else
- current_login_state = e->current_login_state & 0xf;
-
- switch (current_login_state) {
- case DSC_LS_PRLI_COMP:
- ql_dbg(ql_dbg_disc, vha, 0x20e4,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
+ switch (vha->hw->current_topology) {
+ default:
+ switch (current_login_state) {
+ case DSC_LS_PRLI_COMP:
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ vha, 0x20e4, "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
- if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
- fcport->port_type = FCT_INITIATOR;
- else
- fcport->port_type = FCT_TARGET;
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_LS_PORT_UNAVAIL:
+ default:
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ qla2x00_clear_loop_id(fcport);
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(vha, fcport, data);
- break;
- case DSC_LS_PORT_UNAVAIL:
- default:
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- qla2x00_find_new_loop_id(vha, fcport);
+ fcport->loop_id = loop_id;
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
}
- ql_dbg(ql_dbg_disc, vha, 0x20e5,
- "%s %d %8phC\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
break;
- }
+ case ISP_CFG_N:
+ fcport->fw_login_state = current_login_state;
+ fcport->d_id = id;
+ switch (current_login_state) {
+ case DSC_LS_PRLI_COMP:
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_LS_PLOGI_COMP:
+ if (fcport_is_bigger(fcport)) {
+ /* local adapter is smaller */
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ qla2x00_clear_loop_id(fcport);
+
+ fcport->loop_id = loop_id;
+ qla24xx_fcport_handle_login(vha,
+ fcport);
+ break;
+ }
+ /* drop through */
+ default:
+ if (fcport_is_smaller(fcport)) {
+ /* local adapter is bigger */
+ if (fcport->loop_id != FC_NO_LOOP_ID)
+ qla2x00_clear_loop_id(fcport);
+
+ fcport->loop_id = loop_id;
+ qla24xx_fcport_handle_login(vha,
+ fcport);
+ }
+ break;
+ }
+ break;
+ } /* switch (ha->current_topology) */
}
if (!found) {
- /* fw has no record of this port */
- for (i = 0; i < n; i++) {
- e = &vha->gnl.l[i];
- id.b.domain = e->port_id[0];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[2];
- id.b.rsvd_1 = 0;
- loop_id = le16_to_cpu(e->nport_handle);
-
- if (fcport->d_id.b24 == id.b24) {
- conflict_fcport =
- qla2x00_find_fcport_by_wwpn(vha,
- e->port_name, 0);
- if (conflict_fcport) {
- qlt_schedule_sess_for_deletion
- (conflict_fcport);
- ql_dbg(ql_dbg_disc, vha, 0x20e6,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict_fcport->port_name);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_F:
+ case ISP_CFG_FL:
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+ if (conflict_fcport) {
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ vha, 0x20e5,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport);
+ }
}
+ /*
+ * FW already picked this loop id for
+ * another fcport
+ */
+ if (fcport->loop_id == loop_id)
+ fcport->loop_id = FC_NO_LOOP_ID;
}
-
- /* FW already picked this loop id for another fcport */
- if (fcport->loop_id == loop_id)
- fcport->loop_id = FC_NO_LOOP_ID;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case ISP_CFG_N:
+ fcport->disc_state = DSC_DELETED;
+ if (time_after_eq(jiffies, fcport->dm_login_expire)) {
+ if (fcport->n2n_link_reset_cnt < 2) {
+ fcport->n2n_link_reset_cnt++;
+ /*
+ * remote port is not sending PLOGI.
+ * Reset link to kick start his state
+ * machine
+ */
+ set_bit(N2N_LINK_RESET,
+ &vha->dpc_flags);
+ } else {
+ if (fcport->n2n_chip_reset < 1) {
+ ql_log(ql_log_info, vha, 0x705d,
+ "Chip reset to bring laser down");
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ fcport->n2n_chip_reset++;
+ } else {
+ ql_log(ql_log_info, vha, 0x705d,
+ "Remote port %8ph is not coming back\n",
+ fcport->port_name);
+ fcport->scan_state = 0;
+ }
+ }
+ qla2xxx_wake_dpc(vha);
+ } else {
+ /*
+ * report port suppose to do PLOGI. Give him
+ * more time. FW will catch it.
+ */
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
+ break;
+ default:
+ break;
}
- qla24xx_fcport_handle_login(vha, fcport);
}
} /* gnl_event */
@@ -911,9 +1070,9 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
}
ql_dbg(ql_dbg_disc, vha, 0x211b,
- "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
- fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b24, fcport->login_retry);
+ "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
+ fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
+ fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
return rval;
@@ -1055,8 +1214,9 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
- fcport->disc_state, pd->current_login_state, ea->rc);
+ "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
+ fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
+ ea->rc);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
@@ -1074,9 +1234,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
case PDS_PLOGI_COMPLETE:
case PDS_PRLI_PENDING:
case PDS_PRLI2_PENDING:
- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
- __func__, __LINE__, fcport->port_name);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ /* Set discovery state back to GNL to Relogin attempt */
+ if (qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) {
+ fcport->disc_state = DSC_GNL;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
return;
case PDS_LOGO_PENDING:
case PDS_PORT_UNAVAILABLE:
@@ -1174,39 +1337,80 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
- if (fcport->login_retry > 0)
- fcport->login_retry--;
-
switch (fcport->disc_state) {
case DSC_DELETED:
wwn = wwn_to_u64(fcport->node_name);
- if (wwn == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post GNNID\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gnnid_work(vha, fcport);
- } else if (fcport->loop_id == FC_NO_LOOP_ID) {
- ql_dbg(ql_dbg_disc, vha, 0x20bd,
- "%s %d %8phC post gnl\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gnl_work(vha, fcport);
- } else {
- qla_chk_n2n_b4_login(vha, fcport);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_N:
+ if (fcport_is_smaller(fcport)) {
+ /* this adapter is bigger */
+ if (fcport->login_retry) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ qla2x00_find_new_loop_id(vha,
+ fcport);
+ fcport->fw_login_state =
+ DSC_LS_PORT_UNAVAIL;
+ }
+ fcport->login_retry--;
+ qla_post_els_plogi_work(vha, fcport);
+ } else {
+ ql_log(ql_log_info, vha, 0x705d,
+ "Unable to reach remote port %8phC",
+ fcport->port_name);
+ }
+ } else {
+ qla24xx_post_gnl_work(vha, fcport);
+ }
+ break;
+ default:
+ if (wwn == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post GNNID\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnnid_work(vha, fcport);
+ } else if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_disc, vha, 0x20bd,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnl_work(vha, fcport);
+ } else {
+ qla_chk_n2n_b4_login(vha, fcport);
+ }
+ break;
}
break;
case DSC_GNL:
- if (fcport->login_pause) {
- fcport->last_rscn_gen = fcport->rscn_gen;
- fcport->last_login_gen = fcport->login_gen;
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_N:
+ if ((fcport->current_login_state & 0xf) == 0x6) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post GPDB work\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->chip_reset =
+ vha->hw->base_qpair->chip_reset;
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post NVMe PRLI\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ }
+ break;
+ default:
+ if (fcport->login_pause) {
+ fcport->last_rscn_gen = fcport->rscn_gen;
+ fcport->last_login_gen = fcport->login_gen;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+ qla_chk_n2n_b4_login(vha, fcport);
break;
}
-
- qla_chk_n2n_b4_login(vha, fcport);
break;
case DSC_LOGIN_FAILED:
+ fcport->login_retry--;
ql_dbg(ql_dbg_disc, vha, 0x20d0,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
@@ -1221,6 +1425,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x20d1,
"%s %d %8phC post adisc\n",
__func__, __LINE__, fcport->port_name);
+ fcport->login_retry--;
data[0] = data[1] = 0;
qla2x00_post_async_adisc_work(vha, fcport, data);
break;
@@ -1304,17 +1509,6 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
}
}
- if (fcport->flags & FCF_ASYNC_SENT) {
- fcport->login_retry++;
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- return;
- }
-
- if (fcport->disc_state == DSC_DELETE_PEND) {
- fcport->login_retry++;
- return;
- }
-
if (fcport->last_rscn_gen != fcport->rscn_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
@@ -1326,6 +1520,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
qla24xx_fcport_handle_login(vha, fcport);
}
+
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post PRLI\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_prli_work(vha, ea->fcport);
+}
+
void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
fc_port_t *f, *tf;
@@ -1427,6 +1630,9 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_GFPNID_DONE:
qla24xx_handle_gfpnid_event(vha, ea);
break;
+ case FCME_ELS_PLOGI_DONE:
+ qla_handle_els_plogi_done(vha, ea);
+ break;
default:
BUG_ON(1);
break;
@@ -1520,7 +1726,7 @@ qla24xx_abort_iocb_timeout(void *data)
struct srb_iocb *abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = CS_TIMEOUT;
- complete(&abt->u.abt.comp);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
static void
@@ -1529,12 +1735,16 @@ qla24xx_abort_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
- if (del_timer(&sp->u.iocb_cmd.timer))
- complete(&abt->u.abt.comp);
+ if (del_timer(&sp->u.iocb_cmd.timer)) {
+ if (sp->flags & SRB_WAKEUP_ON_COMP)
+ complete(&abt->u.abt.comp);
+ else
+ sp->free(sp);
+ }
}
int
-qla24xx_async_abort_cmd(srb_t *cmd_sp)
+qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
{
scsi_qla_host_t *vha = cmd_sp->vha;
fc_port_t *fcport = cmd_sp->fcport;
@@ -1549,6 +1759,8 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
@@ -1572,10 +1784,11 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
"Abort command issued - hdl=%x, target_id=%x\n",
cmd_sp->handle, fcport->tgt_id);
- wait_for_completion(&abt_iocb->u.abt.comp);
-
- rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ if (wait) {
+ wait_for_completion(&abt_iocb->u.abt.comp);
+ rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ }
done_free_sp:
sp->free(sp);
@@ -1611,7 +1824,7 @@ qla24xx_async_abort_command(srb_t *sp)
return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
FXDISC_ABORT_IOCTL);
- return qla24xx_async_abort_cmd(sp);
+ return qla24xx_async_abort_cmd(sp, true);
}
static void
@@ -1799,7 +2012,6 @@ void
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
qlt_logo_completion_handler(fcport, data[0]);
fcport->login_gen++;
fcport->flags &= ~FCF_ASYNC_ACTIVE;
@@ -4050,7 +4262,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_update_host_map(vha, id);
+ if (!(topo == 2 && ha->flags.n2n_bigger))
+ qlt_update_host_map(vha, id);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
@@ -4308,7 +4521,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
while (cnt--)
*dptr1++ = *dptr2++;
-
+ ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
/* Use alternate WWN? */
if (nv->host_p[1] & BIT_7) {
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
@@ -4591,20 +4804,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
- if (ha->flags.rida_fmt2) {
- /* With Rida Format 2, the login is already triggered.
- * We know who is on the other side of the wire.
- * No need to login to do login to find out or drop into
- * qla2x00_configure_local_loop().
- */
+ if (qla_tgt_mode_enabled(vha)) {
+ /* allow the other side to start the login */
clear_bit(LOCAL_LOOP_UPDATE, &flags);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- } else {
- if (qla_tgt_mode_enabled(vha)) {
- /* allow the other side to start the login */
- clear_bit(LOCAL_LOOP_UPDATE, &flags);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- }
}
} else if (ha->current_topology == ISP_CFG_NL) {
clear_bit(RSCN_UPDATE, &flags);
@@ -4688,110 +4891,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
}
/*
- * N2N Login
- * Updates Fibre Channel Device Database with local loop devices.
- *
- * Input:
- * ha = adapter block pointer.
- *
- * Returns:
- */
-static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
- fc_port_t *fcport)
-{
- struct qla_hw_data *ha = vha->hw;
- int res = QLA_SUCCESS, rval;
- int greater_wwpn = 0;
- int logged_in = 0;
-
- if (ha->current_topology != ISP_CFG_N)
- return res;
-
- if (wwn_to_u64(vha->port_name) >
- wwn_to_u64(vha->n2n_port_name)) {
- ql_dbg(ql_dbg_disc, vha, 0x2002,
- "HBA WWPN is greater %llx > target %llx\n",
- wwn_to_u64(vha->port_name),
- wwn_to_u64(vha->n2n_port_name));
- greater_wwpn = 1;
- fcport->d_id.b24 = vha->n2n_id;
- }
-
- fcport->loop_id = vha->loop_id;
- fcport->fc4f_nvme = 0;
- fcport->query = 1;
-
- ql_dbg(ql_dbg_disc, vha, 0x4001,
- "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
- fcport->d_id.b24, vha->loop_id);
-
- /* Fill in member data. */
- if (!greater_wwpn) {
- rval = qla2x00_get_port_database(vha, fcport, 0);
- ql_dbg(ql_dbg_disc, vha, 0x1051,
- "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
- fcport->current_login_state, fcport->last_login_state,
- fcport->d_id.b24, fcport->loop_id, rval);
-
- if (((fcport->current_login_state & 0xf) == 0x4) ||
- ((fcport->current_login_state & 0xf) == 0x6))
- logged_in = 1;
- }
-
- if (logged_in || greater_wwpn) {
- if (!vha->nvme_local_port && vha->flags.nvme_enabled)
- qla_nvme_register_hba(vha);
-
- /* Set connected N_Port d_id */
- if (vha->flags.nvme_enabled)
- fcport->fc4f_nvme = 1;
-
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
- fcport->disc_state = DSC_GNL;
- fcport->n2n_flag = 1;
- fcport->flags = 3;
- vha->hw->flags.gpsc_supported = 0;
-
- if (greater_wwpn) {
- ql_dbg(ql_dbg_disc, vha, 0x20e5,
- "%s %d PLOGI ELS %8phC\n",
- __func__, __LINE__, fcport->port_name);
-
- res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
- fcport, fcport->d_id);
- }
-
- if (res != QLA_SUCCESS) {
- ql_log(ql_log_info, vha, 0xd04d,
- "PLOGI Failed: portid=%06x - retrying\n",
- fcport->d_id.b24);
- res = QLA_SUCCESS;
- } else {
- /* State 0x6 means FCP PRLI complete */
- if ((fcport->current_login_state & 0xf) == 0x6) {
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post GPDB work\n",
- __func__, __LINE__, fcport->port_name);
- fcport->chip_reset =
- vha->hw->base_qpair->chip_reset;
- qla24xx_post_gpdb_work(vha, fcport, 0);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post NVMe PRLI\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_prli_work(vha, fcport);
- }
- }
- } else {
- /* Wait for next database change */
- set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
- }
-
- return res;
-}
-
-/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
*
@@ -4817,6 +4916,31 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ /* Inititae N2N login. */
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ /* borrowing */
+ u32 *bp, i, sz;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct els_plogi_payload),
+ ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ (void *)ha->init_cb, sz);
+ if (rval == QLA_SUCCESS) {
+ bp = (uint32_t *)ha->init_cb;
+ for (i = 0; i < sz/4 ; i++, bp++)
+ *bp = cpu_to_be32(*bp);
+
+ memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
+ sizeof(ha->plogi_els_payld.data));
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ }
+ return QLA_SUCCESS;
+ }
+
found_devs = 0;
new_fcport = NULL;
entries = MAX_FIBRE_DEVICES_LOOP;
@@ -4848,14 +4972,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
- /* Inititae N2N login. */
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- rval = qla24xx_n2n_handle_login(vha, new_fcport);
- if (rval != QLA_SUCCESS)
- goto cleanup_allocation;
- return QLA_SUCCESS;
- }
-
/* Add devices to port list. */
id_iter = (char *)ha->gid_list;
for (index = 0; index < entries; index++) {
@@ -5054,6 +5170,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
struct fc_rport *rport;
unsigned long flags;
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ return;
+
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
@@ -5109,25 +5228,28 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
- ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
- __func__, fcport->port_name);
-
- if (IS_QLAFX00(vha->hw)) {
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- } else {
- fcport->login_retry = 0;
- fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- fcport->deleted = 0;
- fcport->logout_on_delete = 1;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- }
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ fcport->deleted = 0;
+ fcport->logout_on_delete = 1;
+ fcport->login_retry = vha->hw->login_retry_count;
+ fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
+ switch (vha->hw->current_topology) {
+ case ISP_CFG_N:
+ case ISP_CFG_NL:
+ fcport->keep_nport_handle = 1;
+ break;
+ default:
+ break;
+ }
+
if (fcport->fc4f_nvme) {
qla_nvme_register_remote(vha, fcport);
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
return;
}
@@ -5168,6 +5290,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla24xx_post_gpsc_work(vha, fcport);
}
}
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
}
/*
@@ -5668,6 +5791,34 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
}
+/* FW does not set aside Loop id for MGMT Server/FFFFFAh */
+int
+qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
+{
+ int loop_id = FC_NO_LOOP_ID;
+ int lid = NPH_MGMT_SERVER - vha->vp_idx;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx == 0) {
+ set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
+ return NPH_MGMT_SERVER;
+ }
+
+ /* pick id from high and work down to low */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ for (; lid > 0; lid--) {
+ if (!test_bit(lid, vha->hw->loop_id_map)) {
+ set_bit(lid, vha->hw->loop_id_map);
+ loop_id = lid;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return loop_id;
+}
+
/*
* qla2x00_fabric_login
* Issue fabric login command.
@@ -6335,6 +6486,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0x00af,
"Performing ISP error recovery - ha=%p.\n", ha);
+ ha->flags.purge_mbox = 1;
/* For ISP82XX, reset_chip is just disabling interrupts.
* Driver waits for the completion of the commands.
* the interrupts need to be enabled.
@@ -6349,13 +6501,31 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->current_topology = 0;
ha->flags.fw_started = 0;
ha->flags.fw_init_done = 0;
- ha->base_qpair->chip_reset++;
+ ha->chip_reset++;
+ ha->base_qpair->chip_reset = ha->chip_reset;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i])
ha->queue_pair_map[i]->chip_reset =
ha->base_qpair->chip_reset;
}
+ /* purge MBox commands */
+ if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+
+ i = 0;
+ while (atomic_read(&ha->num_pend_mbx_stage3) ||
+ atomic_read(&ha->num_pend_mbx_stage2) ||
+ atomic_read(&ha->num_pend_mbx_stage1)) {
+ msleep(20);
+ i++;
+ if (i > 50)
+ break;
+ }
+ ha->flags.purge_mbox = 0;
+
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -6861,7 +7031,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
(uint8_t *)&icb->interrupt_delay_timer;
while (cnt--)
*dptr1++ = *dptr2++;
-
+ ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
/*
* Setup driver NVRAM options.
*/
@@ -6960,6 +7130,9 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
if (ql2xloginretrycount)
ha->login_retry_count = ql2xloginretrycount;
+ /* N2N: driver will initiate Login instead of FW */
+ icb->firmware_options_3 |= BIT_8;
+
/* Enable ZIO. */
if (!vha->flags.init_done) {
ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
@@ -7069,7 +7242,7 @@ check_valid_image:
ha->active_image = QLA27XX_SECONDARY_IMAGE;
}
- ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
ha->active_image == 0 ? "default bootld and fw" :
ha->active_image == 1 ? "primary" :
ha->active_image == 2 ? "secondary" :
@@ -7917,7 +8090,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* Use extended-initialization control block. */
memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
-
+ ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
/*
* Setup driver NVRAM options.
*/
@@ -8042,8 +8215,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
/* enable RIDA Format2 */
- if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
- icb->firmware_options_3 |= BIT_0;
+ icb->firmware_options_3 |= BIT_0;
+
+ /* N2N: driver will initiate Login instead of FW */
+ icb->firmware_options_3 |= BIT_8;
if (IS_QLA27XX(ha)) {
icb->firmware_options_3 |= BIT_8;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 59fd5a9dfeb8..4351736b2426 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -58,14 +58,12 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr)
static inline void
qla2x00_poll(struct rsp_que *rsp)
{
- unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
- local_irq_save(flags);
+
if (IS_P3P_TYPE(ha))
qla82xx_poll(0, rsp);
else
ha->isp_ops->intr_handler(0, rsp);
- local_irq_restore(flags);
}
static inline uint8_t *
@@ -204,6 +202,12 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
}
+static inline int
+qla2x00_chip_is_down(scsi_qla_host_t *vha)
+{
+ return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
+}
+
static inline srb_t *
qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
{
@@ -278,8 +282,6 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
- if (sp->type == SRB_ELS_DCMD)
- init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
add_timer(&sp->u.iocb_cmd.timer);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dd93a22fe843..42ac8e097419 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2241,12 +2241,15 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
struct srb_iocb *lio = &sp->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
- logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
-
- if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
- logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
- if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
- logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
+ } else {
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+ if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
+ logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
+ if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
+ logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+ }
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
@@ -2463,6 +2466,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->fcport = fcport;
elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+ init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
sp->done = qla2x00_els_dcmd_sp_done;
sp->free = qla2x00_els_dcmd_sp_free;
@@ -2510,7 +2514,6 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
scsi_qla_host_t *vha = sp->vha;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
- uint32_t dsd_len = 24;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2533,20 +2536,21 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->control_flags = 0;
if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
- els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->tx_byte_count = els_iocb->tx_len =
+ sizeof(struct els_plogi_payload);
els_iocb->tx_address[0] =
cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
els_iocb->tx_address[1] =
cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
- els_iocb->tx_len = dsd_len;
els_iocb->rx_dsd_count = 1;
- els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->rx_byte_count = els_iocb->rx_len =
+ sizeof(struct els_plogi_payload);
els_iocb->rx_address[0] =
cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
els_iocb->rx_address[1] =
cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
- els_iocb->rx_len = dsd_len;
+
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
"PLOGI ELS IOCB:\n");
ql_dump_buffer(ql_log_info, vha, 0x0109,
@@ -2569,33 +2573,12 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
}
static void
-qla2x00_els_dcmd2_sp_free(void *data)
-{
- srb_t *sp = data;
- struct srb_iocb *elsio = &sp->u.iocb_cmd;
-
- if (elsio->u.els_plogi.els_plogi_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
- elsio->u.els_plogi.els_plogi_pyld,
- elsio->u.els_plogi.els_plogi_pyld_dma);
-
- if (elsio->u.els_plogi.els_resp_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
- elsio->u.els_plogi.els_resp_pyld,
- elsio->u.els_plogi.els_resp_pyld_dma);
-
- del_timer(&elsio->timer);
- qla2x00_rel_sp(sp);
-}
-
-static void
qla2x00_els_dcmd2_iocb_timeout(void *data)
{
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- struct srb_iocb *lio = &sp->u.iocb_cmd;
unsigned long flags = 0;
int res;
@@ -2611,7 +2594,7 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
(res == QLA_SUCCESS) ? "successful" : "failed");
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- complete(&lio->u.els_plogi.comp);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
static void
@@ -2621,17 +2604,55 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = sp->vha;
+ struct event_arg ea;
+ struct qla_work_evt *e;
+
+ ql_dbg(ql_dbg_disc, vha, 0x3072,
+ "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
+ sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
- ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
- "%s ELS hdl=%x, portid=%06x done %8phC\n",
- sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+ fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
+ del_timer(&sp->u.iocb_cmd.timer);
- complete(&lio->u.els_plogi.comp);
+ if (sp->flags & SRB_WAKEUP_ON_COMP)
+ complete(&lio->u.els_plogi.comp);
+ else {
+ if (res) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.rc = res;
+ ea.event = FCME_ELS_PLOGI_DONE;
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
+ if (!e) {
+ struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+ if (elsio->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.tx_size,
+ elsio->u.els_plogi.els_plogi_pyld,
+ elsio->u.els_plogi.els_plogi_pyld_dma);
+
+ if (elsio->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.rx_size,
+ elsio->u.els_plogi.els_resp_pyld,
+ elsio->u.els_plogi.els_resp_pyld_dma);
+ sp->free(sp);
+ return;
+ }
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+ }
}
int
qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
- fc_port_t *fcport, port_id_t remote_did)
+ fc_port_t *fcport, bool wait)
{
srb_t *sp;
struct srb_iocb *elsio = NULL;
@@ -2649,23 +2670,23 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
}
elsio = &sp->u.iocb_cmd;
- fcport->d_id.b.domain = remote_did.b.domain;
- fcport->d_id.b.area = remote_did.b.area;
- fcport->d_id.b.al_pa = remote_did.b.al_pa;
-
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
init_completion(&elsio->u.els_plogi.comp);
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
+
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
sp->done = qla2x00_els_dcmd2_sp_done;
- sp->free = qla2x00_els_dcmd2_sp_free;
+ elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
ptr = elsio->u.els_plogi.els_plogi_pyld =
dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
@@ -2690,33 +2711,52 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
memset(ptr, 0, sizeof(struct els_plogi_payload));
memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
+ memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
+ &ha->plogi_els_payld.data,
+ sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
+
elsio->u.els_plogi.els_cmd = els_opcode;
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
- qla24xx_get_port_login_templ(vha, ptr_dma + 4,
- &elsio->u.els_plogi.els_plogi_pyld->data[0],
- sizeof(struct els_plogi_payload));
- ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
- ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
- goto out;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x3074,
+ "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
+ sp->name, sp->handle, fcport->loop_id,
+ fcport->d_id.b24, vha->d_id.b24);
}
- ql_dbg(ql_dbg_io, vha, 0x3074,
- "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
- sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
-
- wait_for_completion(&elsio->u.els_plogi.comp);
+ if (wait) {
+ wait_for_completion(&elsio->u.els_plogi.comp);
- if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
- rval = QLA_FUNCTION_FAILED;
+ if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ goto done;
+ }
out:
+ fcport->flags &= ~(FCF_ASYNC_SENT);
+ if (elsio->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.tx_size,
+ elsio->u.els_plogi.els_plogi_pyld,
+ elsio->u.els_plogi.els_plogi_pyld_dma);
+
+ if (elsio->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev,
+ elsio->u.els_plogi.rx_size,
+ elsio->u.els_plogi.els_resp_pyld,
+ elsio->u.els_plogi.els_resp_pyld_dma);
+
sp->free(sp);
+done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7756106d4555..36cbb29c84f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -911,7 +911,8 @@ skip_rio:
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 1);
+ if (!N2N_TOPO(ha))
+ qla2x00_mark_all_devices_lost(vha, 1);
}
if (vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index f0ec13d48bf3..2c6c2cd5a0d0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -59,6 +59,7 @@ static struct rom_cmd {
{ MBC_IOCB_COMMAND_A64 },
{ MBC_GET_ADAPTER_LOOP_ID },
{ MBC_READ_SFP },
+ { MBC_GET_RNID_PARAMS },
};
static int is_rom_cmd(uint16_t cmd)
@@ -110,6 +111,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
unsigned long wait_time;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ u32 chip_reset;
ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
@@ -140,7 +142,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_SUCCESS;
abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
-
+ chip_reset = ha->chip_reset;
if (ha->flags.pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x1003,
@@ -167,6 +169,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ atomic_inc(&ha->num_pend_mbx_stage1);
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -177,8 +180,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_log(ql_log_warn, vha, 0xd035,
"Cmd access timeout, cmd=0x%x, Exiting.\n",
mcp->mb[0]);
+ atomic_dec(&ha->num_pend_mbx_stage1);
return QLA_FUNCTION_TIMEOUT;
}
+ atomic_dec(&ha->num_pend_mbx_stage1);
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
ha->flags.mbox_busy = 1;
/* Save mailbox command for debug */
@@ -189,6 +198,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ rval = QLA_ABORTED;
+ ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ goto premature_exit;
+ }
+
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
@@ -231,7 +247,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"jiffies=%lx.\n", jiffies);
/* Wait for mbx cmd completion until timeout */
-
+ atomic_inc(&ha->num_pend_mbx_stage2);
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
@@ -241,6 +257,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -254,6 +271,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
+ atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -261,7 +279,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ } else if (ha->flags.purge_mbox ||
+ chip_reset != ha->chip_reset) {
+ ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ atomic_dec(&ha->num_pend_mbx_stage3);
+ rval = QLA_ABORTED;
+ goto premature_exit;
}
+ atomic_dec(&ha->num_pend_mbx_stage3);
+
if (time_after(jiffies, wait_time + 5 * HZ))
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
command, jiffies_to_msecs(jiffies - wait_time));
@@ -275,6 +303,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -289,6 +318,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
while (!ha->flags.mbox_int) {
+ if (ha->flags.purge_mbox ||
+ chip_reset != ha->chip_reset) {
+ ha->flags.mbox_busy = 0;
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
+
if (time_after(jiffies, wait_time))
break;
@@ -312,6 +349,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Waited %d sec.\n",
(uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
}
+ atomic_dec(&ha->num_pend_mbx_stage2);
/* Check whether we timed out */
if (ha->flags.mbox_int) {
@@ -390,7 +428,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Capture FW dump only, if PCI device active */
if (!pci_channel_offline(vha->hw->pdev)) {
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
- if (w == 0xffff || ictrl == 0xffffffff) {
+ if (w == 0xffff || ictrl == 0xffffffff ||
+ (chip_reset != ha->chip_reset)) {
/* This is special case if there is unload
* of driver happening and if PCI device go
* into bad state due to PCI error condition
@@ -497,7 +536,11 @@ premature_exit:
complete(&ha->mbx_cmd_comp);
mbx_done:
- if (rval) {
+ if (rval == QLA_ABORTED) {
+ ql_log(ql_log_info, vha, 0xd035,
+ "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
+ mcp->mb[0]);
+ } else if (rval) {
if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
dev_name(&ha->pdev->dev), 0x1020+0x800,
@@ -2177,7 +2220,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mcp->out_mb = MBX_2|MBX_1|MBX_0;
} else if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
- mcp->mb[1] = BIT_6;
+ if (N2N_TOPO(vha->hw))
+ mcp->mb[1] = BIT_4; /* re-init */
+ else
+ mcp->mb[1] = BIT_6; /* LIP */
mcp->mb[2] = 0;
mcp->mb[3] = vha->hw->loop_reset_delay;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3797,30 +3843,68 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Format 1: WWPN %8phC.\n",
vha->port_name);
- /* N2N. direct connect */
- if (IS_QLA27XX(ha) &&
- ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
- /* if our portname is higher then initiate N2N login */
- if (wwn_to_u64(vha->port_name) >
- wwn_to_u64(rptid_entry->u.f1.port_name)) {
- // ??? qlt_update_host_map(vha, id);
- vha->n2n_id = 0x1;
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: Setting n2n_update_needed for id %d\n",
- vha->n2n_id);
+ switch (rptid_entry->u.f1.flags & TOPO_MASK) {
+ case TOPO_N2N:
+ ha->current_topology = ISP_CFG_N;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ rptid_entry->u.f1.port_name, 1);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (fcport) {
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+ fcport->dm_login_expire = jiffies + 3*HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ set_bit(RELOGIN_NEEDED,
+ &vha->dpc_flags);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
} else {
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: Remote login - Waiting for WWPN %8phC.\n",
- rptid_entry->u.f1.port_name);
+ id.b24 = 0;
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(rptid_entry->u.f1.port_name)) {
+ vha->d_id.b24 = 0;
+ vha->d_id.b.al_pa = 1;
+ ha->flags.n2n_bigger = 1;
+
+ id.b.al_pa = 2;
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: assign local id %x remote id %x\n",
+ vha->d_id.b24, id.b24);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote login - Waiting for WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+ ha->flags.n2n_bigger = 0;
+ }
+ qla24xx_post_newsess_work(vha, &id,
+ rptid_entry->u.f1.port_name,
+ rptid_entry->u.f1.node_name,
+ NULL,
+ FC4_TYPE_UNKNOWN);
}
- memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
- WWN_SIZE);
+ /* if our portname is higher then initiate N2N login */
+
set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
- set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
- set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
ha->flags.n2n_ae = 1;
return;
+ break;
+ case TOPO_FL:
+ ha->current_topology = ISP_CFG_FL;
+ break;
+ case TOPO_F:
+ ha->current_topology = ISP_CFG_F;
+ break;
+ default:
+ break;
}
ha->flags.gpsc_supported = 1;
@@ -3909,30 +3993,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.port_name, 1);
if (fcport) {
+ fcport->login_retry = vha->hw->login_retry_count;
fcport->plogi_nack_done_deadline = jiffies + HZ;
fcport->scan_state = QLA_FCPORT_FOUND;
- switch (fcport->disc_state) {
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- break;
- default:
- qlt_schedule_sess_for_deletion(fcport);
- break;
- }
- } else {
- id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
- id.b.area = rptid_entry->u.f2.remote_nport_id[1];
- id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
- qla24xx_post_newsess_work(vha, &id,
- rptid_entry->u.f2.port_name,
- rptid_entry->u.f2.node_name,
- NULL,
- FC4_TYPE_UNKNOWN);
}
}
}
@@ -4663,7 +4726,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
"Done %s.\n", __func__);
bp = (uint32_t *) buf;
for (i = 0; i < (bufsiz-4)/4; i++, bp++)
- *bp = cpu_to_be32(*bp);
+ *bp = le32_to_cpu(*bp);
}
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index aa727d07b702..d620f4bebcd0 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -492,7 +492,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
- vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
+ vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
vha->dpc_flags = 0L;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index c5a963c2c86e..20d9dc39f0fb 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -30,6 +30,9 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return 0;
}
+ if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
+ return 0;
+
if (!(fcport->nvme_prli_service_param &
(NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
@@ -676,15 +679,15 @@ void qla_nvme_delete(struct scsi_qla_host *vha)
}
}
-void qla_nvme_register_hba(struct scsi_qla_host *vha)
+int qla_nvme_register_hba(struct scsi_qla_host *vha)
{
struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo;
- int ret;
+ int ret = EINVAL;
if (!IS_ENABLED(CONFIG_NVME_FC))
- return;
+ return ret;
ha = vha->hw;
tmpl = &qla_nvme_fc_transport;
@@ -711,7 +714,9 @@ void qla_nvme_register_hba(struct scsi_qla_host *vha)
if (ret) {
ql_log(ql_log_warn, vha, 0xffff,
"register_localport failed: ret=%x\n", ret);
- return;
+ } else {
+ vha->nvme_local_port->private = vha;
}
- vha->nvme_local_port->private = vha;
+
+ return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index 816854ada654..4941d107fb1c 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -142,7 +142,7 @@ struct pt_ls4_rx_unsol {
/*
* Global functions prototype in qla_nvme.c source file.
*/
-void qla_nvme_register_hba(struct scsi_qla_host *);
+int qla_nvme_register_hba(struct scsi_qla_host *);
int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
void qla_nvme_delete(struct scsi_qla_host *);
void qla_nvme_abort(struct qla_hw_data *, struct srb *sp, int res);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1fbd16c8c9a7..42b8f0d3e580 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2816,6 +2816,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
+ atomic_set(&ha->num_pend_mbx_stage1, 0);
+ atomic_set(&ha->num_pend_mbx_stage2, 0);
+ atomic_set(&ha->num_pend_mbx_stage3, 0);
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3046,7 +3049,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
if (IS_QLA2XXX_MIDTYPE(ha))
- base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
+ base_vha->mgmt_svr_loop_id =
+ qla2x00_reserve_mgmt_server_loop_id(base_vha);
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
@@ -3830,14 +3834,6 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
return;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-
- if (fcport->login_retry == 0) {
- fcport->login_retry = vha->hw->login_retry_count;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a3,
- "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
- fcport->port_name, fcport->loop_id, fcport->login_retry);
- }
}
/*
@@ -4785,7 +4781,6 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
struct qlt_plogi_ack_t *pla =
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
uint8_t free_fcport = 0;
- u64 wwn;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s %d %8phC enter\n",
@@ -4813,10 +4808,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
fcport->fc4_type = FC4_TYPE_FCP_SCSI;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
fcport->fc4_type = FC4_TYPE_OTHER;
fcport->fc4f_nvme = FC4_TYPE_NVME;
}
@@ -4858,9 +4853,6 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
- if (N2N_TOPO(vha->hw))
- fcport->flags &= ~FCF_FABRIC_DEVICE;
-
fcport->id_changed = 1;
fcport->scan_state = QLA_FCPORT_FOUND;
memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
@@ -4921,12 +4913,22 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (dfcp)
qlt_schedule_sess_for_deletion(tfcp);
- wwn = wwn_to_u64(fcport->node_name);
- if (!wwn)
- qla24xx_async_gnnid(vha, fcport);
- else
- qla24xx_async_gnl(vha, fcport);
+ if (N2N_TOPO(vha->hw))
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+ if (N2N_TOPO(vha->hw)) {
+ if (vha->flags.nvme_enabled) {
+ fcport->fc4f_nvme = 1;
+ fcport->n2n_flag = 1;
+ }
+ fcport->fw_login_state = 0;
+ /*
+ * wait link init done before sending login
+ */
+ } else {
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
}
}
@@ -5061,6 +5063,10 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_IIDMA:
qla_do_iidma_work(vha, e->u.fcport.fcport);
break;
+ case QLA_EVT_ELS_PLOGI:
+ qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
+ e->u.fcport.fcport, false);
+ break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -5090,7 +5096,7 @@ int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
void qla2x00_relogin(struct scsi_qla_host *vha)
{
fc_port_t *fcport;
- int status;
+ int status, relogin_needed = 0;
struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -5099,47 +5105,59 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
* to it if we haven't run out of retries.
*/
if (atomic_read(&fcport->state) != FCS_ONLINE &&
- fcport->login_retry &&
- !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) {
- if (vha->hw->current_topology != ISP_CFG_NL) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
- "%s %8phC DS %d LS %d\n", __func__,
- fcport->port_name, fcport->disc_state,
- fcport->fw_login_state);
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_RELOGIN;
- ea.fcport = fcport;
- qla2x00_fcport_event_handler(vha, &ea);
- } else if (vha->hw->current_topology == ISP_CFG_NL) {
- fcport->login_retry--;
- status = qla2x00_local_device_login(vha,
- fcport);
- if (status == QLA_SUCCESS) {
- fcport->old_loop_id = fcport->loop_id;
- ql_dbg(ql_dbg_disc, vha, 0x2003,
- "Port login OK: logged in ID 0x%x.\n",
- fcport->loop_id);
- qla2x00_update_fcport(vha, fcport);
- } else if (status == 1) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- /* retry the login again */
- ql_dbg(ql_dbg_disc, vha, 0x2007,
- "Retrying %d login again loop_id 0x%x.\n",
- fcport->login_retry,
- fcport->loop_id);
- } else {
- fcport->login_retry = 0;
- }
+ fcport->login_retry) {
+ if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_LOGIN_COMPLETE)
+ continue;
+
+ if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) ||
+ fcport->disc_state == DSC_DELETE_PEND) {
+ relogin_needed = 1;
+ } else {
+ if (vha->hw->current_topology != ISP_CFG_NL) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RELOGIN;
+ ea.fcport = fcport;
+ qla2x00_fcport_event_handler(vha, &ea);
+ } else if (vha->hw->current_topology ==
+ ISP_CFG_NL) {
+ fcport->login_retry--;
+ status =
+ qla2x00_local_device_login(vha,
+ fcport);
+ if (status == QLA_SUCCESS) {
+ fcport->old_loop_id =
+ fcport->loop_id;
+ ql_dbg(ql_dbg_disc, vha, 0x2003,
+ "Port login OK: logged in ID 0x%x.\n",
+ fcport->loop_id);
+ qla2x00_update_fcport
+ (vha, fcport);
+ } else if (status == 1) {
+ set_bit(RELOGIN_NEEDED,
+ &vha->dpc_flags);
+ /* retry the login again */
+ ql_dbg(ql_dbg_disc, vha, 0x2007,
+ "Retrying %d login again loop_id 0x%x.\n",
+ fcport->login_retry,
+ fcport->loop_id);
+ } else {
+ fcport->login_retry = 0;
+ }
- if (fcport->login_retry == 0 &&
- status != QLA_SUCCESS)
- qla2x00_clear_loop_id(fcport);
+ if (fcport->login_retry == 0 &&
+ status != QLA_SUCCESS)
+ qla2x00_clear_loop_id(fcport);
+ }
}
}
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
}
+ if (relogin_needed)
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
ql_dbg(ql_dbg_disc, vha, 0x400e,
"Relogin end.\n");
}
@@ -6179,6 +6197,11 @@ intr_on_check:
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
+ if (test_and_clear_bit(N2N_LINK_RESET,
+ &base_vha->dpc_flags)) {
+ qla2x00_lip_reset(base_vha);
+ }
+
ha->dpc_active = 0;
end_loop:
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 1027b0cb7fa3..8c811b251d42 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -805,6 +805,10 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
list_for_each_entry(pla, &vha->plogi_ack_list, list) {
if (pla->id.b24 == id->b24) {
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
+ "%s %d %8phC Term INOT due to new INOT",
+ __func__, __LINE__,
+ pla->iocb.u.isp24.port_name);
qlt_send_term_imm_notif(vha, &pla->iocb, 1);
memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
return pla;
@@ -982,8 +986,9 @@ void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ if (!own)
+ qlt_send_first_logo(vha, &logo);
sess->send_els_logo = 0;
- qlt_send_first_logo(vha, &logo);
}
if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
@@ -1053,7 +1058,6 @@ void qlt_free_session_done(struct work_struct *work)
sess->disc_state = DSC_DELETED;
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
sess->deleted = QLA_SESS_DELETED;
- sess->login_retry = vha->hw->login_retry_count;
if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
vha->fcport_count--;
@@ -1073,6 +1077,7 @@ void qlt_free_session_done(struct work_struct *work)
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
+ own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
if (con) {
iocb = &con->iocb;
@@ -1156,7 +1161,7 @@ void qlt_unreg_sess(struct fc_port *sess)
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- qla2x00_mark_device_lost(vha, sess, 1, 1);
+ qla2x00_mark_device_lost(vha, sess, 0, 0);
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
sess->disc_state = DSC_DELETE_PEND;
@@ -3782,7 +3787,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
return;
}
cmd->jiffies_at_free = get_jiffies_64();
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -4145,7 +4150,7 @@ out_term:
qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4276,9 +4281,9 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
{
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_cmd *cmd;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return NULL;
@@ -4291,6 +4296,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
qlt_incr_num_pend_cmds(vha);
cmd->vha = vha;
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
@@ -4714,6 +4720,10 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
if (!pla) {
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s %d %8phC Term INOT due to mem alloc fail",
+ __func__, __LINE__,
+ iocb->u.isp24.port_name);
qlt_send_term_imm_notif(vha, iocb, 1);
goto out;
}
@@ -5293,7 +5303,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct fc_port *sess;
struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
- int tag;
+ int tag, cpu;
unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
@@ -5325,7 +5335,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
se_sess = sess->se_sess;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return;
@@ -5356,6 +5366,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
cmd->qpair = ha->base_qpair;
+ cmd->se_cmd.map_cpu = cpu;
if (qfull) {
cmd->q_full = 1;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 731ca0d8520a..0ccd06f11f12 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -571,6 +571,15 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
}
break;
+ case T268_BUF_TYPE_REQ_MIRROR:
+ case T268_BUF_TYPE_RSP_MIRROR:
+ /*
+ * Mirror pointers are not implemented in the
+ * driver, instead shadow pointers are used by
+ * the drier. Skip these entries.
+ */
+ qla27xx_skip_entry(ent, buf);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0xd02b,
"%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
@@ -1028,8 +1037,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd300,
"Firmware has been previously dumped (%p),"
" -- ignoring request\n", vha->hw->fw_dump);
- else
+ else {
+ QLA_FW_STOPPED(vha->hw);
qla27xx_execute_fwdt_template(vha);
+ }
#ifndef __CHECKER__
if (!hardware_locked)
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 1ad7582220c3..3850b28518e5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.07-k"
+#define QLA2XXX_VERSION "10.00.00.08-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7732e9336d43..e03d12a5f986 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1049,10 +1049,8 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
NULL,
};
-static struct se_portal_group *tcm_qla2xxx_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_qla2xxx_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
@@ -1171,10 +1169,8 @@ static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
NULL,
};
-static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_qla2xxx_lport *lport = container_of(wwn,
struct tcm_qla2xxx_lport, lport_wwn);
@@ -1465,8 +1461,7 @@ static void tcm_qla2xxx_free_session(struct fc_port *sess)
}
target_wait_for_sess_cmds(se_sess);
- transport_deregister_session_configfs(sess->se_sess);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(se_sess);
}
static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
@@ -1543,7 +1538,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
* Locate our struct se_node_acl either from an explict NodeACL created
* via ConfigFS, or via running in TPG demo mode.
*/
- se_sess = target_alloc_session(&tpg->se_tpg, num_tags,
+ se_sess = target_setup_session(&tpg->se_tpg, num_tags,
sizeof(struct qla_tgt_cmd),
TARGET_PROT_ALL, port_name,
qlat_sess, tcm_qla2xxx_session_cb);
@@ -1624,9 +1619,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
sess->conf_compl_supported = conf_compl_supported;
- /* Reset logout parameters to default */
- sess->logout_on_delete = 1;
- sess->keep_nport_handle = 0;
}
/*
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 8578e566ab41..9d09228eee28 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -959,7 +959,7 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
/* Temporary workaround until bug is found and fixed (one bug has been found
already, but fixing it makes things even worse) -jj */
int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
- host->can_queue = atomic_read(&host->host_busy) + num_free;
+ host->can_queue = scsi_host_busy(host) + num_free;
host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 4c60c260c5da..fc1356d101b0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -162,12 +162,12 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
(level > 1)) {
scsi_print_result(cmd, "Done", disposition);
scsi_print_command(cmd);
- if (status_byte(cmd->result) & CHECK_CONDITION)
+ if (status_byte(cmd->result) == CHECK_CONDITION)
scsi_print_sense(cmd);
if (level > 3)
scmd_printk(KERN_INFO, cmd,
"scsi host busy %d failed %d\n",
- atomic_read(&cmd->device->host->host_busy),
+ scsi_host_busy(cmd->device->host),
cmd->device->host->host_failed);
}
}
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index 6dcc4c685d1d..4fd75a3aff66 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -43,7 +43,4 @@ struct scsi_device;
struct scsi_target;
struct scatterlist;
-/* obsolete typedef junk. */
-#include "scsi_typedefs.h"
-
#endif /* _SCSI_H */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 364e71861bfd..60bcc6df97a9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -164,29 +164,29 @@ static const char *sdebug_version_date = "20180128";
#define SDEBUG_OPT_RESET_NOISE 0x2000
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
#define SDEBUG_OPT_HOST_BUSY 0x8000
+#define SDEBUG_OPT_CMD_ABORT 0x10000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_TRANSPORT_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
SDEBUG_OPT_SHORT_TRANSFER | \
- SDEBUG_OPT_HOST_BUSY)
+ SDEBUG_OPT_HOST_BUSY | \
+ SDEBUG_OPT_CMD_ABORT)
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
* commands if SDEBUG_OPT_RECOVERED_ERR is set.
* - a TRANSPORT_ERROR is simulated on successful read and write
* commands if SDEBUG_OPT_TRANSPORT_ERR is set.
+ * - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
+ * CMD_ABORT
*
- * When "every_nth" < 0 then after "- every_nth" commands:
- * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
- * - a RECOVERED_ERROR is simulated on successful read and write
- * commands if SDEBUG_OPT_RECOVERED_ERR is set.
- * - a TRANSPORT_ERROR is simulated on successful read and write
- * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
- * This will continue on every subsequent command until some other action
- * occurs (e.g. the user * writing a new value (other than -1 or 1) to
- * every_nth via sysfs).
+ * When "every_nth" < 0 then after "- every_nth" commands the selected
+ * error will be injected. The error will be injected on every subsequent
+ * command until some other action occurs; for example, the user writing
+ * a new value (other than -1 or 1) to every_nth:
+ * echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
*/
/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
@@ -281,6 +281,7 @@ struct sdebug_defer {
int issuing_cpu;
bool init_hrt;
bool init_wq;
+ bool aborted; /* true when blk_abort_request() already called */
enum sdeb_defer_type defer_t;
};
@@ -296,6 +297,7 @@ struct sdebug_queued_cmd {
unsigned int inj_dix:1;
unsigned int inj_short:1;
unsigned int inj_host_busy:1;
+ unsigned int inj_cmd_abort:1;
};
struct sdebug_queue {
@@ -3792,6 +3794,7 @@ static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
+ bool aborted = sd_dp->aborted;
int qc_idx;
int retiring = 0;
unsigned long iflags;
@@ -3801,6 +3804,8 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct sdebug_dev_info *devip;
sd_dp->defer_t = SDEB_DEFER_NONE;
+ if (unlikely(aborted))
+ sd_dp->aborted = false;
qc_idx = sd_dp->qc_idx;
sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
@@ -3852,6 +3857,11 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
atomic_set(&retired_max_queue, k + 1);
}
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ if (unlikely(aborted)) {
+ if (sdebug_verbose)
+ pr_info("bypassing scsi_done() due to aborted cmd\n");
+ return;
+ }
scp->scsi_done(scp); /* callback to mid level */
}
@@ -4312,7 +4322,8 @@ static void setup_inject(struct sdebug_queue *sqp,
if (sdebug_every_nth > 0)
sqcp->inj_recovered = sqcp->inj_transport
= sqcp->inj_dif
- = sqcp->inj_dix = sqcp->inj_short = 0;
+ = sqcp->inj_dix = sqcp->inj_short
+ = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
return;
}
sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
@@ -4321,6 +4332,7 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
+ sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
}
/* Complete the processing of the thread that queued a SCSI command to this
@@ -4458,7 +4470,14 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
sd_dp->defer_t = SDEB_DEFER_WQ;
+ if (unlikely(sqcp->inj_cmd_abort))
+ sd_dp->aborted = true;
schedule_work(&sd_dp->ew.work);
+ if (unlikely(sqcp->inj_cmd_abort)) {
+ sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
+ cmnd->request->tag);
+ blk_abort_request(cmnd->request);
+ }
}
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
(scsi_result == device_qfull_result)))
@@ -4844,12 +4863,11 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
(unsigned long)sdebug_dev_size_mb *
1048576;
- fake_storep = vmalloc(sz);
+ fake_storep = vzalloc(sz);
if (NULL == fake_storep) {
pr_err("out of memory, 9\n");
return -ENOMEM;
}
- memset(fake_storep, 0, sz);
}
sdebug_fake_rw = n;
}
@@ -5391,13 +5409,12 @@ static int __init scsi_debug_init(void)
}
if (sdebug_fake_rw == 0) {
- fake_storep = vmalloc(sz);
+ fake_storep = vzalloc(sz);
if (NULL == fake_storep) {
pr_err("out of memory, 1\n");
ret = -ENOMEM;
goto free_q_arr;
}
- memset(fake_storep, 0, sz);
if (sdebug_num_parts > 0)
sdebug_build_parts(fake_storep, sz);
}
@@ -5790,11 +5807,13 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
fini:
if (F_DELAY_OVERR & flags)
return schedule_resp(scp, devip, errsts, pfp, 0, 0);
- else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
+ else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
+ sdebug_ndelay > 10000)) {
/*
- * If any delay is active, for F_SSU_DELAY want at least 1
- * second and if sdebug_jdelay>0 want a long delay of that
- * many seconds; for F_SYNC_DELAY want 1/20 of that.
+ * Skip long delays if ndelay <= 10 microseconds. Otherwise
+ * for Start Stop Unit (SSU) want at least 1 second delay and
+ * if sdebug_jdelay>1 want a long delay of that many seconds.
+ * For Synchronize Cache want 1/20 of SSU's delay.
*/
int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2715cdaa669c..b7a8fdfeb2f4 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -66,7 +66,7 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
{
lockdep_assert_held(shost->host_lock);
- if (atomic_read(&shost->host_busy) == shost->host_failed) {
+ if (scsi_host_busy(shost) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
@@ -2169,7 +2169,7 @@ int scsi_error_handler(void *data)
break;
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
- shost->host_failed != atomic_read(&shost->host_busy)) {
+ shost->host_failed != scsi_host_busy(shost)) {
SCSI_LOG_ERROR_RECOVERY(1,
shost_printk(KERN_INFO, shost,
"scsi_eh_%d: sleeping\n",
@@ -2184,7 +2184,7 @@ int scsi_error_handler(void *data)
"scsi_eh_%d: waking up %d/%d/%d\n",
shost->host_no, shost->host_eh_scheduled,
shost->host_failed,
- atomic_read(&shost->host_busy)));
+ scsi_host_busy(shost)));
/*
* We have a host that is failing for some reason. Figure out
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 0a875491f5a7..cc30fccc1a2e 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -100,8 +100,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"Ioctl returned 0x%x\n", result));
- if ((driver_byte(result) & DRIVER_SENSE) &&
- (scsi_sense_valid(&sshdr))) {
+ if (driver_byte(result) == DRIVER_SENSE &&
+ scsi_sense_valid(&sshdr)) {
switch (sshdr.sense_key) {
case ILLEGAL_REQUEST:
if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 41e9ac9fc138..0adfb3bce0fd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -238,7 +238,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
/**
- * scsi_execute - insert request and wait for the result
+ * __scsi_execute - insert request and wait for the result
* @sdev: scsi device
* @cmd: scsi command
* @data_direction: data direction
@@ -255,7 +255,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* Returns the scsi_cmnd result field if a command was executed, or a negative
* Linux error code if we didn't get that far.
*/
-int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
int timeout, int retries, u64 flags, req_flags_t rq_flags,
@@ -309,7 +309,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
return ret;
}
-EXPORT_SYMBOL(scsi_execute);
+EXPORT_SYMBOL(__scsi_execute);
/*
* Function: scsi_init_cmd_errh()
@@ -345,7 +345,8 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
unsigned long flags;
rcu_read_lock();
- atomic_dec(&shost->host_busy);
+ if (!shost->use_blk_mq)
+ atomic_dec(&shost->host_busy);
if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
@@ -371,7 +372,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
static void scsi_kick_queue(struct request_queue *q)
{
if (q->mq_ops)
- blk_mq_start_hw_queues(q);
+ blk_mq_run_hw_queues(q, false);
else
blk_run_queue(q);
}
@@ -444,7 +445,12 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
- if (shost->can_queue > 0 &&
+ /*
+ * blk-mq can handle host queue busy efficiently via host-wide driver
+ * tag allocation
+ */
+
+ if (!shost->use_blk_mq && shost->can_queue > 0 &&
atomic_read(&shost->host_busy) >= shost->can_queue)
return true;
if (atomic_read(&shost->host_blocked) > 0)
@@ -662,6 +668,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
cmd->request->next_rq->special = NULL;
}
+/* Returns false when no more bytes to process, true if there are more */
static bool scsi_end_request(struct request *req, blk_status_t error,
unsigned int bytes, unsigned int bidi_bytes)
{
@@ -760,161 +767,39 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
}
}
-/*
- * Function: scsi_io_completion()
- *
- * Purpose: Completion processing for block device I/O requests.
- *
- * Arguments: cmd - command that is finished.
- *
- * Lock status: Assumed that no lock is held upon entry.
- *
- * Returns: Nothing
- *
- * Notes: We will finish off the specified number of sectors. If we
- * are done, the command block will be released and the queue
- * function will be goosed. If we are not done then we have to
- * figure out what to do next:
- *
- * a) We can call scsi_requeue_command(). The request
- * will be unprepared and put back on the queue. Then
- * a new command will be created for it. This should
- * be used if we made forward progress, or if we want
- * to switch from READ(10) to READ(6) for example.
- *
- * b) We can call __scsi_queue_insert(). The request will
- * be put back on the queue and retried using the same
- * command as before, possibly after a delay.
- *
- * c) We can call scsi_end_request() with -EIO to fail
- * the remainder of the request.
- */
-void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+/* Helper for scsi_io_completion() when "reprep" action required. */
+static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
+ struct request_queue *q)
+{
+ /* A new command will be prepared and issued. */
+ if (q->mq_ops) {
+ scsi_mq_requeue_cmd(cmd);
+ } else {
+ /* Unprep request and put it back at head of the queue. */
+ scsi_release_buffers(cmd);
+ scsi_requeue_command(q, cmd);
+ }
+}
+
+/* Helper for scsi_io_completion() when special action required. */
+static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- int result = cmd->result;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
- blk_status_t error = BLK_STS_OK;
- struct scsi_sense_hdr sshdr;
- bool sense_valid = false;
- int sense_deferred = 0, level = 0;
+ int level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
+ struct scsi_sense_hdr sshdr;
+ bool sense_valid;
+ bool sense_current = true; /* false implies "deferred sense" */
+ blk_status_t blk_stat;
- if (result) {
- sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
- if (sense_valid)
- sense_deferred = scsi_sense_is_deferred(&sshdr);
- }
-
- if (blk_rq_is_passthrough(req)) {
- if (result) {
- if (sense_valid) {
- /*
- * SG_IO wants current and deferred errors
- */
- scsi_req(req)->sense_len =
- min(8 + cmd->sense_buffer[7],
- SCSI_SENSE_BUFFERSIZE);
- }
- if (!sense_deferred)
- error = scsi_result_to_blk_status(cmd, result);
- }
- /*
- * scsi_result_to_blk_status may have reset the host_byte
- */
- scsi_req(req)->result = cmd->result;
- scsi_req(req)->resid_len = scsi_get_resid(cmd);
-
- if (scsi_bidi_cmnd(cmd)) {
- /*
- * Bidi commands Must be complete as a whole,
- * both sides at once.
- */
- scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
- if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
- blk_rq_bytes(req->next_rq)))
- BUG();
- return;
- }
- } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
- /*
- * Flush commands do not transfers any data, and thus cannot use
- * good_bytes != blk_rq_bytes(req) as the signal for an error.
- * This sets the error explicitly for the problem case.
- */
- error = scsi_result_to_blk_status(cmd, result);
- }
-
- /* no bidi support for !blk_rq_is_passthrough yet */
- BUG_ON(blk_bidi_rq(req));
-
- /*
- * Next deal with any sectors which we were able to correctly
- * handle.
- */
- SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
- "%u sectors total, %d bytes done.\n",
- blk_rq_sectors(req), good_bytes));
-
- /*
- * Recovered errors need reporting, but they're always treated as
- * success, so fiddle the result code here. For passthrough requests
- * we already took a copy of the original into sreq->result which
- * is what gets returned to the user
- */
- if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
- /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
- * print since caller wants ATA registers. Only occurs on
- * SCSI ATA PASS_THROUGH commands when CK_COND=1
- */
- if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
- ;
- else if (!(req->rq_flags & RQF_QUIET))
- scsi_print_sense(cmd);
- result = 0;
- /* for passthrough error may be set */
- error = BLK_STS_OK;
- }
- /*
- * Another corner case: the SCSI status byte is non-zero but 'good'.
- * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
- * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
- * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
- * intermediate statuses (both obsolete in SAM-4) as good.
- */
- if (status_byte(result) && scsi_status_is_good(result)) {
- result = 0;
- error = BLK_STS_OK;
- }
-
- /*
- * special case: failed zero length commands always need to
- * drop down into the retry code. Otherwise, if we finished
- * all bytes in the request we are done now.
- */
- if (!(blk_rq_bytes(req) == 0 && error) &&
- !scsi_end_request(req, error, good_bytes, 0))
- return;
-
- /*
- * Kill remainder if no retrys.
- */
- if (error && scsi_noretry_cmd(cmd)) {
- if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
- BUG();
- return;
- }
-
- /*
- * If there had been no error, but we have leftover bytes in the
- * requeues just queue the command up again.
- */
- if (result == 0)
- goto requeue;
+ sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
+ if (sense_valid)
+ sense_current = !scsi_sense_is_deferred(&sshdr);
- error = scsi_result_to_blk_status(cmd, result);
+ blk_stat = scsi_result_to_blk_status(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -922,7 +807,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* happens.
*/
action = ACTION_RETRY;
- } else if (sense_valid && !sense_deferred) {
+ } else if (sense_valid && sense_current) {
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
if (cmd->device->removable) {
@@ -958,18 +843,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
action = ACTION_REPREP;
} else if (sshdr.asc == 0x10) /* DIX */ {
action = ACTION_FAIL;
- error = BLK_STS_PROTECTION;
+ blk_stat = BLK_STS_PROTECTION;
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
action = ACTION_FAIL;
- error = BLK_STS_TARGET;
+ blk_stat = BLK_STS_TARGET;
} else
action = ACTION_FAIL;
break;
case ABORTED_COMMAND:
action = ACTION_FAIL;
if (sshdr.asc == 0x10) /* DIF */
- error = BLK_STS_PROTECTION;
+ blk_stat = BLK_STS_PROTECTION;
break;
case NOT_READY:
/* If the device is in the process of becoming
@@ -1022,8 +907,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
DEFAULT_RATELIMIT_BURST);
if (unlikely(scsi_logging_level))
- level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
- SCSI_LOG_MLCOMPLETE_BITS);
+ level =
+ SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
+ SCSI_LOG_MLCOMPLETE_BITS);
/*
* if logging is enabled the failure will be printed
@@ -1031,25 +917,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if (!level && __ratelimit(&_rs)) {
scsi_print_result(cmd, NULL, FAILED);
- if (driver_byte(result) & DRIVER_SENSE)
+ if (driver_byte(result) == DRIVER_SENSE)
scsi_print_sense(cmd);
scsi_print_command(cmd);
}
}
- if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
+ if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req), 0))
return;
/*FALLTHRU*/
case ACTION_REPREP:
- requeue:
- /* Unprep the request and put it back at the head of the queue.
- * A new command will be prepared and issued.
- */
- if (q->mq_ops) {
- scsi_mq_requeue_cmd(cmd);
- } else {
- scsi_release_buffers(cmd);
- scsi_requeue_command(q, cmd);
- }
+ scsi_io_completion_reprep(cmd, q);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -1062,6 +939,185 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
}
+/*
+ * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
+ * new result that may suppress further error checking. Also modifies
+ * *blk_statp in some cases.
+ */
+static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
+ blk_status_t *blk_statp)
+{
+ bool sense_valid;
+ bool sense_current = true; /* false implies "deferred sense" */
+ struct request *req = cmd->request;
+ struct scsi_sense_hdr sshdr;
+
+ sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
+ if (sense_valid)
+ sense_current = !scsi_sense_is_deferred(&sshdr);
+
+ if (blk_rq_is_passthrough(req)) {
+ if (sense_valid) {
+ /*
+ * SG_IO wants current and deferred errors
+ */
+ scsi_req(req)->sense_len =
+ min(8 + cmd->sense_buffer[7],
+ SCSI_SENSE_BUFFERSIZE);
+ }
+ if (sense_current)
+ *blk_statp = scsi_result_to_blk_status(cmd, result);
+ } else if (blk_rq_bytes(req) == 0 && sense_current) {
+ /*
+ * Flush commands do not transfers any data, and thus cannot use
+ * good_bytes != blk_rq_bytes(req) as the signal for an error.
+ * This sets *blk_statp explicitly for the problem case.
+ */
+ *blk_statp = scsi_result_to_blk_status(cmd, result);
+ }
+ /*
+ * Recovered errors need reporting, but they're always treated as
+ * success, so fiddle the result code here. For passthrough requests
+ * we already took a copy of the original into sreq->result which
+ * is what gets returned to the user
+ */
+ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
+ bool do_print = true;
+ /*
+ * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d]
+ * skip print since caller wants ATA registers. Only occurs
+ * on SCSI ATA PASS_THROUGH commands when CK_COND=1
+ */
+ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
+ do_print = false;
+ else if (req->rq_flags & RQF_QUIET)
+ do_print = false;
+ if (do_print)
+ scsi_print_sense(cmd);
+ result = 0;
+ /* for passthrough, *blk_statp may be set */
+ *blk_statp = BLK_STS_OK;
+ }
+ /*
+ * Another corner case: the SCSI status byte is non-zero but 'good'.
+ * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
+ * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
+ * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
+ * intermediate statuses (both obsolete in SAM-4) as good.
+ */
+ if (status_byte(result) && scsi_status_is_good(result)) {
+ result = 0;
+ *blk_statp = BLK_STS_OK;
+ }
+ return result;
+}
+
+/*
+ * Function: scsi_io_completion()
+ *
+ * Purpose: Completion processing for block device I/O requests.
+ *
+ * Arguments: cmd - command that is finished.
+ *
+ * Lock status: Assumed that no lock is held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: We will finish off the specified number of sectors. If we
+ * are done, the command block will be released and the queue
+ * function will be goosed. If we are not done then we have to
+ * figure out what to do next:
+ *
+ * a) We can call scsi_requeue_command(). The request
+ * will be unprepared and put back on the queue. Then
+ * a new command will be created for it. This should
+ * be used if we made forward progress, or if we want
+ * to switch from READ(10) to READ(6) for example.
+ *
+ * b) We can call __scsi_queue_insert(). The request will
+ * be put back on the queue and retried using the same
+ * command as before, possibly after a delay.
+ *
+ * c) We can call scsi_end_request() with blk_stat other than
+ * BLK_STS_OK, to fail the remainder of the request.
+ */
+void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+{
+ int result = cmd->result;
+ struct request_queue *q = cmd->device->request_queue;
+ struct request *req = cmd->request;
+ blk_status_t blk_stat = BLK_STS_OK;
+
+ if (unlikely(result)) /* a nz result may or may not be an error */
+ result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
+
+ if (unlikely(blk_rq_is_passthrough(req))) {
+ /*
+ * scsi_result_to_blk_status may have reset the host_byte
+ */
+ scsi_req(req)->result = cmd->result;
+ scsi_req(req)->resid_len = scsi_get_resid(cmd);
+
+ if (unlikely(scsi_bidi_cmnd(cmd))) {
+ /*
+ * Bidi commands Must be complete as a whole,
+ * both sides at once.
+ */
+ scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
+ if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
+ blk_rq_bytes(req->next_rq)))
+ WARN_ONCE(true,
+ "Bidi command with remaining bytes");
+ return;
+ }
+ }
+
+ /* no bidi support yet, other than in pass-through */
+ if (unlikely(blk_bidi_rq(req))) {
+ WARN_ONCE(true, "Only support bidi command in passthrough");
+ scmd_printk(KERN_ERR, cmd, "Killing bidi command\n");
+ if (scsi_end_request(req, BLK_STS_IOERR, blk_rq_bytes(req),
+ blk_rq_bytes(req->next_rq)))
+ WARN_ONCE(true, "Bidi command with remaining bytes");
+ return;
+ }
+
+ /*
+ * Next deal with any sectors which we were able to correctly
+ * handle.
+ */
+ SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
+ "%u sectors total, %d bytes done.\n",
+ blk_rq_sectors(req), good_bytes));
+
+ /*
+ * Next deal with any sectors which we were able to correctly
+ * handle. Failed, zero length commands always need to drop down
+ * to retry code. Fast path should return in this block.
+ */
+ if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
+ if (likely(!scsi_end_request(req, blk_stat, good_bytes, 0)))
+ return; /* no bytes remaining */
+ }
+
+ /* Kill remainder if no retries. */
+ if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
+ if (scsi_end_request(req, blk_stat, blk_rq_bytes(req), 0))
+ WARN_ONCE(true,
+ "Bytes remaining after failed, no-retry command");
+ return;
+ }
+
+ /*
+ * If there had been no error, but we have leftover bytes in the
+ * requeues just queue the command up again.
+ */
+ if (likely(result == 0))
+ scsi_io_completion_reprep(cmd, q);
+ else
+ scsi_io_completion_action(cmd, result);
+}
+
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
{
int count;
@@ -1550,7 +1606,10 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
if (scsi_host_in_recovery(shost))
return 0;
- busy = atomic_inc_return(&shost->host_busy) - 1;
+ if (!shost->use_blk_mq)
+ busy = atomic_inc_return(&shost->host_busy) - 1;
+ else
+ busy = 0;
if (atomic_read(&shost->host_blocked) > 0) {
if (busy)
goto starved;
@@ -1566,7 +1625,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
"unblocking host at zero depth\n"));
}
- if (shost->can_queue > 0 && busy >= shost->can_queue)
+ if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
goto starved;
if (shost->host_self_blocked)
goto starved;
@@ -1652,7 +1711,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
* with the locks as normal issue path does.
*/
atomic_inc(&sdev->device_busy);
- atomic_inc(&shost->host_busy);
+
+ if (!shost->use_blk_mq)
+ atomic_inc(&shost->host_busy);
if (starget->can_queue > 0)
atomic_inc(&starget->target_busy);
@@ -2555,7 +2616,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
* ILLEGAL REQUEST if the code page isn't supported */
if (use_10_for_ms && !scsi_status_is_good(result) &&
- (driver_byte(result) & DRIVER_SENSE)) {
+ driver_byte(result) == DRIVER_SENSE) {
if (scsi_sense_valid(sshdr)) {
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
(sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0880d975eed3..78ca63dfba4a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -614,7 +614,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* INQUIRY should not yield UNIT_ATTENTION
* but many buggy devices do so anyway.
*/
- if ((driver_byte(result) & DRIVER_SENSE) &&
+ if (driver_byte(result) == DRIVER_SENSE &&
scsi_sense_valid(&sshdr)) {
if ((sshdr.sense_key == UNIT_ATTENTION) &&
((sshdr.asc == 0x28) ||
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 7943b762c12d..3aee9464a7bf 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -382,7 +382,7 @@ static ssize_t
show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
- return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy));
+ return snprintf(buf, 20, "%d\n", scsi_host_busy(shost));
}
static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
@@ -722,8 +722,24 @@ static ssize_t
sdev_store_delete(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- if (device_remove_file_self(dev, attr))
- scsi_remove_device(to_scsi_device(dev));
+ struct kernfs_node *kn;
+
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ WARN_ON_ONCE(!kn);
+ /*
+ * Concurrent writes into the "delete" sysfs attribute may trigger
+ * concurrent calls to device_remove_file() and scsi_remove_device().
+ * device_remove_file() handles concurrent removal calls by
+ * serializing these and by ignoring the second and later removal
+ * attempts. Concurrent calls of scsi_remove_device() are
+ * serialized. The second and later calls of scsi_remove_device() are
+ * ignored because the first call of that function changes the device
+ * state into SDEV_DEL.
+ */
+ device_remove_file(dev, attr);
+ scsi_remove_device(to_scsi_device(dev));
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 13948102ca29..381668fa135d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -567,7 +567,7 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
FC_NL_ASYNC_EVENT, len);
- event->seconds = get_seconds();
+ event->seconds = ktime_get_real_seconds();
event->vendor_id = 0;
event->host_no = shost->host_no;
event->event_datalen = sizeof(u32); /* bytes */
@@ -635,7 +635,7 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
FC_NL_ASYNC_EVENT, len);
- event->seconds = get_seconds();
+ event->seconds = ktime_get_real_seconds();
event->vendor_id = vendor_id;
event->host_no = shost->host_no;
event->event_datalen = data_len; /* bytes */
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 2ca150b16764..40b85b752b79 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -136,7 +136,7 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
0, NULL);
- if (!(driver_byte(result) & DRIVER_SENSE) ||
+ if (driver_byte(result) != DRIVER_SENSE ||
sshdr->sense_key != UNIT_ATTENTION)
break;
}
diff --git a/drivers/scsi/scsi_typedefs.h b/drivers/scsi/scsi_typedefs.h
deleted file mode 100644
index 2ed4c5cb7088..000000000000
--- a/drivers/scsi/scsi_typedefs.h
+++ /dev/null
@@ -1,2 +0,0 @@
-
-typedef struct scsi_cmnd Scsi_Cmnd;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9421d9877730..a58cee7a85f2 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1119,7 +1119,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
SCpnt->cmnd[0] = WRITE_6;
if (blk_integrity_rq(rq))
- sd_dif_prepare(SCpnt);
+ t10_pi_prepare(SCpnt->request, sdkp->protection_type);
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
@@ -1635,7 +1635,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
if (res) {
sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
- if (driver_byte(res) & DRIVER_SENSE)
+ if (driver_byte(res) == DRIVER_SENSE)
sd_print_sense_hdr(sdkp, sshdr);
/* we need to evaluate the error return */
@@ -1737,8 +1737,8 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
&sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
- if ((driver_byte(result) & DRIVER_SENSE) &&
- (scsi_sense_valid(&sshdr))) {
+ if (driver_byte(result) == DRIVER_SENSE &&
+ scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
scsi_print_sense_hdr(sdev, NULL, &sshdr);
}
@@ -2028,7 +2028,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
} else {
sdkp->device->no_write_same = 1;
sd_config_write_same(sdkp);
- req->__data_len = blk_rq_bytes(req);
req->rq_flags |= RQF_QUIET;
}
break;
@@ -2047,8 +2046,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
"sd_done: completed %d of %d bytes\n",
good_bytes, scsi_bufflen(SCpnt)));
- if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
- sd_dif_complete(SCpnt, good_bytes);
+ if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) &&
+ good_bytes)
+ t10_pi_complete(SCpnt->request, sdkp->protection_type,
+ good_bytes / scsi_prot_interval(SCpnt));
return good_bytes;
}
@@ -2095,10 +2096,10 @@ sd_spinup_disk(struct scsi_disk *sdkp)
retries++;
} while (retries < 3 &&
(!scsi_status_is_good(the_result) ||
- ((driver_byte(the_result) & DRIVER_SENSE) &&
+ ((driver_byte(the_result) == DRIVER_SENSE) &&
sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
- if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
+ if (driver_byte(the_result) != DRIVER_SENSE) {
/* no sense, TUR either succeeded or failed
* with a status error */
if(!spintime && !scsi_status_is_good(the_result)) {
@@ -2224,7 +2225,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr *sshdr, int sense_valid,
int the_result)
{
- if (driver_byte(the_result) & DRIVER_SENSE)
+ if (driver_byte(the_result) == DRIVER_SENSE)
sd_print_sense_hdr(sdkp, sshdr);
else
sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
@@ -3490,7 +3491,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
- if (driver_byte(res) & DRIVER_SENSE)
+ if (driver_byte(res) == DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr);
if (scsi_sense_valid(&sshdr) &&
/* 0x3a is medium not present */
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 392c7d078ae3..a7d4f50b67d4 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -254,21 +254,12 @@ static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
#ifdef CONFIG_BLK_DEV_INTEGRITY
extern void sd_dif_config_host(struct scsi_disk *);
-extern void sd_dif_prepare(struct scsi_cmnd *scmd);
-extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void sd_dif_config_host(struct scsi_disk *disk)
{
}
-static inline int sd_dif_prepare(struct scsi_cmnd *scmd)
-{
- return 0;
-}
-static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
-{
-}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 9035380c0dda..db72c82486e3 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -95,116 +95,3 @@ out:
blk_integrity_register(disk, &bi);
}
-/*
- * The virtual start sector is the one that was originally submitted
- * by the block layer. Due to partitioning, MD/DM cloning, etc. the
- * actual physical start sector is likely to be different. Remap
- * protection information to match the physical LBA.
- *
- * From a protocol perspective there's a slight difference between
- * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
- * reference tag is seeded in the CDB. This gives us the potential to
- * avoid virt->phys remapping during write. However, at read time we
- * don't know whether the virt sector is the same as when we wrote it
- * (we could be reading from real disk as opposed to MD/DM device. So
- * we always remap Type 2 making it identical to Type 1.
- *
- * Type 3 does not have a reference tag so no remapping is required.
- */
-void sd_dif_prepare(struct scsi_cmnd *scmd)
-{
- const int tuple_sz = sizeof(struct t10_pi_tuple);
- struct bio *bio;
- struct scsi_disk *sdkp;
- struct t10_pi_tuple *pi;
- u32 phys, virt;
-
- sdkp = scsi_disk(scmd->request->rq_disk);
-
- if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
- phys = scsi_prot_ref_tag(scmd);
-
- __rq_for_each_bio(bio, scmd->request) {
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_vec iv;
- struct bvec_iter iter;
- unsigned int j;
-
- /* Already remapped? */
- if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
- break;
-
- virt = bip_get_seed(bip) & 0xffffffff;
-
- bip_for_each_vec(iv, bip, iter) {
- pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
-
- for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
-
- if (be32_to_cpu(pi->ref_tag) == virt)
- pi->ref_tag = cpu_to_be32(phys);
-
- virt++;
- phys++;
- }
-
- kunmap_atomic(pi);
- }
-
- bip->bip_flags |= BIP_MAPPED_INTEGRITY;
- }
-}
-
-/*
- * Remap physical sector values in the reference tag to the virtual
- * values expected by the block layer.
- */
-void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
-{
- const int tuple_sz = sizeof(struct t10_pi_tuple);
- struct scsi_disk *sdkp;
- struct bio *bio;
- struct t10_pi_tuple *pi;
- unsigned int j, intervals;
- u32 phys, virt;
-
- sdkp = scsi_disk(scmd->request->rq_disk);
-
- if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION || good_bytes == 0)
- return;
-
- intervals = good_bytes / scsi_prot_interval(scmd);
- phys = scsi_prot_ref_tag(scmd);
-
- __rq_for_each_bio(bio, scmd->request) {
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_vec iv;
- struct bvec_iter iter;
-
- virt = bip_get_seed(bip) & 0xffffffff;
-
- bip_for_each_vec(iv, bip, iter) {
- pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
-
- for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
-
- if (intervals == 0) {
- kunmap_atomic(pi);
- return;
- }
-
- if (be32_to_cpu(pi->ref_tag) == phys)
- pi->ref_tag = cpu_to_be32(virt);
-
- virt++;
- phys++;
- intervals--;
- }
-
- kunmap_atomic(pi);
- }
- }
-}
-
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 2bf3bf73886e..412c1787dcd9 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -148,12 +148,6 @@ int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
cmd->transfersize = sdkp->device->sector_size;
cmd->allowed = 0;
- /*
- * Report may return less bytes than requested. Make sure
- * to report completion on the entire initial request.
- */
- rq->__data_len = nr_bytes;
-
return BLKPREP_OK;
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ba9ba0e04f42..8a254bb46a9b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1103,15 +1103,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- if (read_only) {
- unsigned char opcode = WRITE_6;
- Scsi_Ioctl_Command __user *siocp = p;
-
- if (copy_from_user(&opcode, siocp->data, 1))
- return -EFAULT;
- if (sg_allow_access(filp, &opcode))
- return -EPERM;
- }
return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
@@ -1884,7 +1875,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
- gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+ gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO;
struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
@@ -1914,9 +1905,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- gfp_mask |= __GFP_ZERO;
-
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
@@ -1927,7 +1915,7 @@ retry:
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
- schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
+ schp->pages[k] = alloc_pages(gfp_mask, order);
if (!schp->pages[k])
goto out;
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index dc3a0542a2e8..e97bf2670315 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -483,6 +483,8 @@ struct pqi_raid_error_info {
#define CISS_CMD_STATUS_TMF 0xd
#define CISS_CMD_STATUS_AIO_DISABLED 0xe
+#define PQI_CMD_STATUS_ABORTED CISS_CMD_STATUS_ABORTED
+
#define PQI_NUM_EVENT_QUEUE_ELEMENTS 32
#define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response)
@@ -581,8 +583,8 @@ struct pqi_admin_queues_aligned {
struct pqi_admin_queues {
void *iq_element_array;
void *oq_element_array;
- volatile pqi_index_t *iq_ci;
- volatile pqi_index_t *oq_pi;
+ pqi_index_t *iq_ci;
+ pqi_index_t __iomem *oq_pi;
dma_addr_t iq_element_array_bus_addr;
dma_addr_t oq_element_array_bus_addr;
dma_addr_t iq_ci_bus_addr;
@@ -606,8 +608,8 @@ struct pqi_queue_group {
dma_addr_t oq_element_array_bus_addr;
__le32 __iomem *iq_pi[2];
pqi_index_t iq_pi_copy[2];
- volatile pqi_index_t *iq_ci[2];
- volatile pqi_index_t *oq_pi;
+ pqi_index_t __iomem *iq_ci[2];
+ pqi_index_t __iomem *oq_pi;
dma_addr_t iq_ci_bus_addr[2];
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
@@ -620,7 +622,7 @@ struct pqi_event_queue {
u16 oq_id;
u16 int_msg_num;
void *oq_element_array;
- volatile pqi_index_t *oq_pi;
+ pqi_index_t __iomem *oq_pi;
dma_addr_t oq_element_array_bus_addr;
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index b78d20b74ed8..2112ea6723c6 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.1.4-115"
+#define DRIVER_VERSION "1.1.4-130"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_RELEASE 4
-#define DRIVER_REVISION 115
+#define DRIVER_REVISION 130
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -1197,20 +1197,30 @@ no_buffer:
device->volume_offline = volume_offline;
}
+#define PQI_INQUIRY_PAGE0_RETRIES 3
+
static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
u8 *buffer;
+ unsigned int retries;
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Send an inquiry to the device to see what it is. */
- rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
- if (rc)
- goto out;
+ for (retries = 0;;) {
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
+ buffer, 64);
+ if (rc == 0)
+ break;
+ if (pqi_is_logical_device(device) ||
+ rc != PQI_CMD_STATUS_ABORTED ||
+ ++retries > PQI_INQUIRY_PAGE0_RETRIES)
+ goto out;
+ }
scsi_sanitize_inquiry_string(&buffer[8], 8);
scsi_sanitize_inquiry_string(&buffer[16], 16);
@@ -2693,7 +2703,7 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
oq_ci = queue_group->oq_ci_copy;
while (1) {
- oq_pi = *queue_group->oq_pi;
+ oq_pi = readl(queue_group->oq_pi);
if (oq_pi == oq_ci)
break;
@@ -2784,7 +2794,7 @@ static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
iq_pi = queue_group->iq_pi_copy[RAID_PATH];
- iq_ci = *queue_group->iq_ci[RAID_PATH];
+ iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
if (pqi_num_elements_free(iq_pi, iq_ci,
ctrl_info->num_elements_per_iq))
@@ -2943,7 +2953,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
oq_ci = event_queue->oq_ci_copy;
while (1) {
- oq_pi = *event_queue->oq_pi;
+ oq_pi = readl(event_queue->oq_pi);
if (oq_pi == oq_ci)
break;
@@ -3167,7 +3177,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
size_t element_array_length_per_iq;
size_t element_array_length_per_oq;
void *element_array;
- void *next_queue_index;
+ void __iomem *next_queue_index;
void *aligned_pointer;
unsigned int num_inbound_queues;
unsigned int num_outbound_queues;
@@ -3263,7 +3273,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
PQI_EVENT_OQ_ELEMENT_LENGTH;
- next_queue_index = PTR_ALIGN(element_array,
+ next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
@@ -3271,21 +3281,24 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
queue_group->iq_ci[RAID_PATH] = next_queue_index;
queue_group->iq_ci_bus_addr[RAID_PATH] =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
next_queue_index += sizeof(pqi_index_t);
next_queue_index = PTR_ALIGN(next_queue_index,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
queue_group->iq_ci[AIO_PATH] = next_queue_index;
queue_group->iq_ci_bus_addr[AIO_PATH] =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
next_queue_index += sizeof(pqi_index_t);
next_queue_index = PTR_ALIGN(next_queue_index,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
queue_group->oq_pi = next_queue_index;
queue_group->oq_pi_bus_addr =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
next_queue_index += sizeof(pqi_index_t);
next_queue_index = PTR_ALIGN(next_queue_index,
PQI_OPERATIONAL_INDEX_ALIGNMENT);
@@ -3294,7 +3307,8 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
ctrl_info->event_queue.oq_pi = next_queue_index;
ctrl_info->event_queue.oq_pi_bus_addr =
ctrl_info->queue_memory_base_dma_handle +
- (next_queue_index - ctrl_info->queue_memory_base);
+ (next_queue_index -
+ (void __iomem *)ctrl_info->queue_memory_base);
return 0;
}
@@ -3368,7 +3382,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
admin_queues->oq_element_array =
&admin_queues_aligned->oq_element_array;
admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
- admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
+ admin_queues->oq_pi =
+ (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
admin_queues->iq_element_array_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
@@ -3384,8 +3399,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
ctrl_info->admin_queue_memory_base);
admin_queues->oq_pi_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
- ((void *)admin_queues->oq_pi -
- ctrl_info->admin_queue_memory_base);
+ ((void __iomem *)admin_queues->oq_pi -
+ (void __iomem *)ctrl_info->admin_queue_memory_base);
return 0;
}
@@ -3486,7 +3501,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
- oq_pi = *admin_queues->oq_pi;
+ oq_pi = readl(admin_queues->oq_pi);
if (oq_pi != oq_ci)
break;
if (time_after(jiffies, timeout)) {
@@ -3545,7 +3560,7 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
DIV_ROUND_UP(iu_length,
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
- iq_ci = *queue_group->iq_ci[path];
+ iq_ci = readl(queue_group->iq_ci[path]);
if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
ctrl_info->num_elements_per_iq))
@@ -3621,29 +3636,24 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-static int pqi_submit_raid_request_synchronous_with_io_request(
- struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
- unsigned long timeout_msecs)
+static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
+ *error_info)
{
- int rc = 0;
- DECLARE_COMPLETION_ONSTACK(wait);
+ int rc = -EIO;
- io_request->io_complete_callback = pqi_raid_synchronous_complete;
- io_request->context = &wait;
-
- pqi_start_io(ctrl_info,
- &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
- io_request);
-
- if (timeout_msecs == NO_TIMEOUT) {
- pqi_wait_for_completion_io(ctrl_info, &wait);
- } else {
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(timeout_msecs))) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "command timed out\n");
- rc = -ETIMEDOUT;
- }
+ switch (error_info->data_out_result) {
+ case PQI_DATA_IN_OUT_GOOD:
+ if (error_info->status == SAM_STAT_GOOD)
+ rc = 0;
+ break;
+ case PQI_DATA_IN_OUT_UNDERFLOW:
+ if (error_info->status == SAM_STAT_GOOD ||
+ error_info->status == SAM_STAT_CHECK_CONDITION)
+ rc = 0;
+ break;
+ case PQI_DATA_IN_OUT_ABORTED:
+ rc = PQI_CMD_STATUS_ABORTED;
+ break;
}
return rc;
@@ -3653,11 +3663,12 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
struct pqi_iu_header *request, unsigned int flags,
struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
{
- int rc;
+ int rc = 0;
struct pqi_io_request *io_request;
unsigned long start_jiffies;
unsigned long msecs_blocked;
size_t iu_length;
+ DECLARE_COMPLETION_ONSTACK(wait);
/*
* Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
@@ -3686,11 +3697,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_ctrl_busy(ctrl_info);
timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
if (timeout_msecs == 0) {
+ pqi_ctrl_unbusy(ctrl_info);
rc = -ETIMEDOUT;
goto out;
}
if (pqi_ctrl_offline(ctrl_info)) {
+ pqi_ctrl_unbusy(ctrl_info);
rc = -ENXIO;
goto out;
}
@@ -3708,8 +3721,25 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
PQI_REQUEST_HEADER_LENGTH;
memcpy(io_request->iu, request, iu_length);
- rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
- io_request, timeout_msecs);
+ io_request->io_complete_callback = pqi_raid_synchronous_complete;
+ io_request->context = &wait;
+
+ pqi_start_io(ctrl_info,
+ &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ io_request);
+
+ pqi_ctrl_unbusy(ctrl_info);
+
+ if (timeout_msecs == NO_TIMEOUT) {
+ pqi_wait_for_completion_io(ctrl_info, &wait);
+ } else {
+ if (!wait_for_completion_io_timeout(&wait,
+ msecs_to_jiffies(timeout_msecs))) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "command timed out\n");
+ rc = -ETIMEDOUT;
+ }
+ }
if (error_info) {
if (io_request->error_info)
@@ -3718,25 +3748,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
else
memset(error_info, 0, sizeof(*error_info));
} else if (rc == 0 && io_request->error_info) {
- u8 scsi_status;
- struct pqi_raid_error_info *raid_error_info;
-
- raid_error_info = io_request->error_info;
- scsi_status = raid_error_info->status;
-
- if (scsi_status == SAM_STAT_CHECK_CONDITION &&
- raid_error_info->data_out_result ==
- PQI_DATA_IN_OUT_UNDERFLOW)
- scsi_status = SAM_STAT_GOOD;
-
- if (scsi_status != SAM_STAT_GOOD)
- rc = -EIO;
+ rc = pqi_process_raid_io_error_synchronous(
+ io_request->error_info);
}
pqi_free_io_request(io_request);
out:
- pqi_ctrl_unbusy(ctrl_info);
up(&ctrl_info->sync_request_sem);
return rc;
@@ -5041,7 +5059,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
iq_pi = queue_group->iq_pi_copy[path];
while (1) {
- iq_ci = *queue_group->iq_ci[path];
+ iq_ci = readl(queue_group->iq_ci[path]);
if (iq_ci == iq_pi)
break;
pqi_check_ctrl_health(ctrl_info);
@@ -6230,20 +6248,20 @@ static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
admin_queues = &ctrl_info->admin_queues;
admin_queues->iq_pi_copy = 0;
admin_queues->oq_ci_copy = 0;
- *admin_queues->oq_pi = 0;
+ writel(0, admin_queues->oq_pi);
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
ctrl_info->queue_groups[i].oq_ci_copy = 0;
- *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
- *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
- *ctrl_info->queue_groups[i].oq_pi = 0;
+ writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
+ writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
+ writel(0, ctrl_info->queue_groups[i].oq_pi);
}
event_queue = &ctrl_info->event_queue;
- *event_queue->oq_pi = 0;
+ writel(0, event_queue->oq_pi);
event_queue->oq_ci_copy = 0;
}
@@ -6826,6 +6844,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0110)
},
{
@@ -6950,6 +6980,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADVANTECH, 0x8312)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_DELL, 0x1fe0)
},
{
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 269ddf791a73..0abe17c1a73b 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -200,7 +200,7 @@ snic_stats_show(struct seq_file *sfp, void *data)
{
struct snic *snic = (struct snic *) sfp->private;
struct snic_stats *stats = &snic->s_stats;
- struct timespec last_isr_tms, last_ack_tms;
+ struct timespec64 last_isr_tms, last_ack_tms;
u64 maxio_tm;
int i;
@@ -312,12 +312,12 @@ snic_stats_show(struct seq_file *sfp, void *data)
"\t\t Other Statistics\n"
"\n---------------------------------------------\n");
- jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms);
- jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms);
+ jiffies_to_timespec64(stats->misc.last_isr_time, &last_isr_tms);
+ jiffies_to_timespec64(stats->misc.last_ack_time, &last_ack_tms);
seq_printf(sfp,
- "Last ISR Time : %llu (%8lu.%8lu)\n"
- "Last Ack Time : %llu (%8lu.%8lu)\n"
+ "Last ISR Time : %llu (%8llu.%09lu)\n"
+ "Last Ack Time : %llu (%8llu.%09lu)\n"
"Ack ISRs : %llu\n"
"IO Cmpl ISRs : %llu\n"
"Err Notify ISRs : %llu\n"
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index f00ebf4717e0..fc60c933d6c0 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -65,12 +65,12 @@ static int
snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz)
{
int len = 0;
- struct timespec tmspec;
+ struct timespec64 tmspec;
- jiffies_to_timespec(td->ts, &tmspec);
+ jiffies_to_timespec64(td->ts, &tmspec);
len += snprintf(buf, buf_sz,
- "%lu.%10lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
+ "%llu.%09lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
tmspec.tv_sec,
tmspec.tv_nsec,
td->fn,
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 35fab1e18adc..ffcf902da390 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -186,14 +186,13 @@ static int sr_play_trkind(struct cdrom_device_info *cdi,
int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
{
struct scsi_device *SDev;
- struct scsi_sense_hdr sshdr;
+ struct scsi_sense_hdr local_sshdr, *sshdr = &local_sshdr;
int result, err = 0, retries = 0;
- unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
SDev = cd->device;
- if (cgc->sense)
- senseptr = sense_buffer;
+ if (cgc->sshdr)
+ sshdr = cgc->sshdr;
retry:
if (!scsi_block_when_processing_errors(SDev)) {
@@ -202,15 +201,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
}
result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
- cgc->buffer, cgc->buflen, senseptr, &sshdr,
+ cgc->buffer, cgc->buflen, NULL, sshdr,
cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
- if (cgc->sense)
- memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
-
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (driver_byte(result) != 0) {
- switch (sshdr.sense_key) {
+ switch (sshdr->sense_key) {
case UNIT_ATTENTION:
SDev->changed = 1;
if (!cgc->quiet)
@@ -221,8 +217,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
err = -ENOMEDIUM;
break;
case NOT_READY: /* This happens if there is no disc in drive */
- if (sshdr.asc == 0x04 &&
- sshdr.ascq == 0x01) {
+ if (sshdr->asc == 0x04 &&
+ sshdr->ascq == 0x01) {
/* sense: Logical unit is in process of becoming ready */
if (!cgc->quiet)
sr_printk(KERN_INFO, cd,
@@ -245,8 +241,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
break;
case ILLEGAL_REQUEST:
err = -EIO;
- if (sshdr.asc == 0x20 &&
- sshdr.ascq == 0x00)
+ if (sshdr->asc == 0x20 &&
+ sshdr->ascq == 0x00)
/* sense: Invalid command operation code */
err = -EDRIVE_CANT_DO_THIS;
break;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 50c66ccc4b41..307df2fa39a3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -828,11 +828,8 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
static int flush_buffer(struct scsi_tape *STp, int seek_next)
{
int backspace, result;
- struct st_buffer *STbuffer;
struct st_partstat *STps;
- STbuffer = STp->buffer;
-
/*
* If there was a bus reset, block further access
* to this device.
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
index 190770bdc194..91db17727963 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw.c
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -295,10 +295,8 @@ static void
sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw)
{
struct sym_fw1a_scr *scripta0;
- struct sym_fw1b_scr *scriptb0;
scripta0 = (struct sym_fw1a_scr *) np->scripta0;
- scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
/*
* Fill variable parts in scripts.
@@ -319,10 +317,8 @@ static void
sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw)
{
struct sym_fw2a_scr *scripta0;
- struct sym_fw2b_scr *scriptb0;
scripta0 = (struct sym_fw2a_scr *) np->scripta0;
- scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
/*
* Fill variable parts in scripts.
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 7320d5fe4cbc..5f10aa9bad9b 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -252,7 +252,7 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
}
scsi_set_resid(cmd, resid);
- cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
+ cmd->result = (drv_status << 24) | (cam_status << 16) | scsi_status;
}
static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index 805369521df8..e34801ae5d69 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -256,7 +256,7 @@ sym_get_cam_status(struct scsi_cmnd *cmd)
static inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
{
scsi_set_resid(cmd, resid);
- cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
+ cmd->result = (DID_OK << 16) | (cp->ssss_status & 0x7f);
}
void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 378af306fda1..bd3f6e2d6834 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3855,7 +3855,7 @@ out_reject:
int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
{
- int dp_sg, dp_sgmin, resid = 0;
+ int dp_sg, resid = 0;
int dp_ofs = 0;
/*
@@ -3902,7 +3902,6 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
* We are now full comfortable in the computation
* of the data residual (2's complement).
*/
- dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
resid = -cp->ext_ofs;
for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e27b4d4e6ae2..e09fe6ab3572 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -100,3 +100,12 @@ config SCSI_UFS_QCOM
Select this if you have UFS controller on QCOM chipset.
If unsure, say N.
+
+config SCSI_UFS_HISI
+ tristate "Hisilicon specific hooks to UFS controller platform driver"
+ depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
+ ---help---
+ This selects the Hisilicon specific additions to UFSHCD platform driver.
+
+ Select this if you have UFS controller on Hisilicon chipset.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 918f5791202d..2c50f03d8c4a 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-objs := ufshcd.o ufs-sysfs.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
+obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
new file mode 100644
index 000000000000..46df707e6f2c
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -0,0 +1,619 @@
+/*
+ * HiSilicon Hixxxx UFS Driver
+ *
+ * Copyright (c) 2016-2017 Linaro Ltd.
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "ufshcd.h"
+#include "ufshcd-pltfrm.h"
+#include "unipro.h"
+#include "ufs-hisi.h"
+#include "ufshci.h"
+
+static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 tx_fsm_val_0 = 0;
+ u32 tx_fsm_val_1 = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
+
+ do {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
+ if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
+ tx_fsm_val_1 == TX_FSM_HIBERN8))
+ break;
+
+ /* sleep for max. 200us */
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ /*
+ * we might have scheduled out for long during polling so
+ * check the state again.
+ */
+ if (time_after(jiffies, timeout)) {
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
+ &tx_fsm_val_0);
+ err |= ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
+ }
+
+ if (err) {
+ dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
+ __func__, err);
+ } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
+ tx_fsm_val_1 != TX_FSM_HIBERN8) {
+ err = -1;
+ dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
+ __func__, tx_fsm_val_0, tx_fsm_val_1);
+ }
+
+ return err;
+}
+
+static void ufs_hi3660_clk_init(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
+ mdelay(1);
+ /* use abb clk */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
+ /* open mphy ref clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+}
+
+static void ufs_hi3660_soc_init(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+ u32 reg;
+
+ if (!IS_ERR(host->rst))
+ reset_control_assert(host->rst);
+
+ /* HC_PSW powerup */
+ ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
+ udelay(10);
+ /* notify PWR ready */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
+ UFS_DEVICE_RESET_CTRL);
+
+ reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
+ reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
+ /* set cfg clk freq */
+ ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
+ /* set ref clk freq */
+ ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
+ /* bypass ufs clk gate */
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
+ CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
+
+ /* open psw clk */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
+ /* disable ufshc iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
+ /* disable phy iso */
+ ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
+ /* notice iso disable */
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
+
+ /* disable lp_reset_n */
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
+ mdelay(1);
+
+ ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
+ UFS_DEVICE_RESET_CTRL);
+
+ msleep(20);
+
+ /*
+ * enable the fix of linereset recovery,
+ * and enable rx_reset/tx_rest beat
+ * enable ref_clk_en override(bit5) &
+ * override value = 1(bit4), with mask
+ */
+ ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
+
+ if (!IS_ERR(host->rst))
+ reset_control_deassert(host->rst);
+}
+
+static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
+{
+ int err;
+ uint32_t value;
+ uint32_t reg;
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
+ /* PA_HSSeries */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
+ /* MPHY CBRATESEL */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
+ /* MPHY CBOVRCTRL2 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
+ /* MPHY CBOVRCTRL3 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* MPHY RXOVRCTRL4 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
+ /* MPHY RXOVRCTRL4 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
+ /* MPHY RXOVRCTRL5 rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
+ /* MPHY RXOVRCTRL5 rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
+ /* MPHY RXSQCONTROL rx0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
+ /* MPHY RXSQCONTROL rx1 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
+ /* Unipro VS_MphyCfgUpdt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
+ /* Tactive RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
+
+ /* Gear3 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
+ /* Gear3 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
+ /* Gear2 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
+ /* Gear2 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
+ /* Gear1 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
+ /* Gear1 Synclength */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
+ /* Thibernate Tx */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
+ /* Thibernate Tx */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
+ if (value != 0x1)
+ dev_info(hba->dev,
+ "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
+
+ /* Unipro VS_mphy_disable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
+ err = ufs_hisi_check_hibern8(hba);
+ if (err)
+ dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
+
+ ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
+
+ /* disable auto H8 */
+ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ reg = reg & (~UFS_AHIT_AH8ITV_MASK);
+ ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ /* Unipro PA_Local_TX_LCC_Enable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0);
+ /* close Unipro VS_Mk2ExtnSupport */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
+ if (value != 0) {
+ /* Ensure close success */
+ dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
+ }
+
+ return err;
+}
+
+static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ /* Unipro DL_AFC0CreditThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
+ /* Unipro DL_TC0OutAckThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
+ /* Unipro DL_TC0TXFCThreshold */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
+
+ /* not bypass ufs clk gate */
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
+ CLOCK_GATE_BYPASS);
+ ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
+ UFS_SYSCTRL);
+
+ /* select received symbol cnt */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
+ /* reset counter0 and enable */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
+
+ return 0;
+}
+
+static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ err = ufs_hisi_link_startup_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ err = ufs_hisi_link_startup_post_change(hba);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+struct ufs_hisi_dev_params {
+ u32 pwm_rx_gear; /* pwm rx gear to work in */
+ u32 pwm_tx_gear; /* pwm tx gear to work in */
+ u32 hs_rx_gear; /* hs rx gear to work in */
+ u32 hs_tx_gear; /* hs tx gear to work in */
+ u32 rx_lanes; /* number of rx lanes */
+ u32 tx_lanes; /* number of tx lanes */
+ u32 rx_pwr_pwm; /* rx pwm working pwr */
+ u32 tx_pwr_pwm; /* tx pwm working pwr */
+ u32 rx_pwr_hs; /* rx hs working pwr */
+ u32 tx_pwr_hs; /* tx hs working pwr */
+ u32 hs_rate; /* rate A/B to work in HS */
+ u32 desired_working_mode;
+};
+
+static int ufs_hisi_get_pwr_dev_param(
+ struct ufs_hisi_dev_params *hisi_param,
+ struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
+{
+ int min_hisi_gear;
+ int min_dev_gear;
+ bool is_dev_sup_hs = false;
+ bool is_hisi_max_hs = false;
+
+ if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE)
+ is_dev_sup_hs = true;
+
+ if (hisi_param->desired_working_mode == FAST) {
+ is_hisi_max_hs = true;
+ min_hisi_gear = min_t(u32, hisi_param->hs_rx_gear,
+ hisi_param->hs_tx_gear);
+ } else {
+ min_hisi_gear = min_t(u32, hisi_param->pwm_rx_gear,
+ hisi_param->pwm_tx_gear);
+ }
+
+ /*
+ * device doesn't support HS but
+ * hisi_param->desired_working_mode is HS,
+ * thus device and hisi_param don't agree
+ */
+ if (!is_dev_sup_hs && is_hisi_max_hs) {
+ pr_err("%s: device not support HS\n", __func__);
+ return -ENOTSUPP;
+ } else if (is_dev_sup_hs && is_hisi_max_hs) {
+ /*
+ * since device supports HS, it supports FAST_MODE.
+ * since hisi_param->desired_working_mode is also HS
+ * then final decision (FAST/FASTAUTO) is done according
+ * to hisi_params as it is the restricting factor
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ hisi_param->rx_pwr_hs;
+ } else {
+ /*
+ * here hisi_param->desired_working_mode is PWM.
+ * it doesn't matter whether device supports HS or PWM,
+ * in both cases hisi_param->desired_working_mode will
+ * determine the mode
+ */
+ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
+ hisi_param->rx_pwr_pwm;
+ }
+
+ /*
+ * we would like tx to work in the minimum number of lanes
+ * between device capability and vendor preferences.
+ * the same decision will be made for rx
+ */
+ agreed_pwr->lane_tx =
+ min_t(u32, dev_max->lane_tx, hisi_param->tx_lanes);
+ agreed_pwr->lane_rx =
+ min_t(u32, dev_max->lane_rx, hisi_param->rx_lanes);
+
+ /* device maximum gear is the minimum between device rx and tx gears */
+ min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+ /*
+ * if both device capabilities and vendor pre-defined preferences are
+ * both HS or both PWM then set the minimum gear to be the chosen
+ * working gear.
+ * if one is PWM and one is HS then the one that is PWM get to decide
+ * what is the gear, as it is the one that also decided previously what
+ * pwr the device will be configured to.
+ */
+ if ((is_dev_sup_hs && is_hisi_max_hs) ||
+ (!is_dev_sup_hs && !is_hisi_max_hs))
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx =
+ min_t(u32, min_dev_gear, min_hisi_gear);
+ else
+ agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hisi_gear;
+
+ agreed_pwr->hs_rate = hisi_param->hs_rate;
+
+ pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, rate = %d\n",
+ agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx,
+ agreed_pwr->hs_rate);
+ return 0;
+}
+
+static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
+{
+ hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
+ hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
+ hisi_param->hs_rx_gear = UFS_HISI_LIMIT_HSGEAR_RX;
+ hisi_param->hs_tx_gear = UFS_HISI_LIMIT_HSGEAR_TX;
+ hisi_param->pwm_rx_gear = UFS_HISI_LIMIT_PWMGEAR_RX;
+ hisi_param->pwm_tx_gear = UFS_HISI_LIMIT_PWMGEAR_TX;
+ hisi_param->rx_pwr_pwm = UFS_HISI_LIMIT_RX_PWR_PWM;
+ hisi_param->tx_pwr_pwm = UFS_HISI_LIMIT_TX_PWR_PWM;
+ hisi_param->rx_pwr_hs = UFS_HISI_LIMIT_RX_PWR_HS;
+ hisi_param->tx_pwr_hs = UFS_HISI_LIMIT_TX_PWR_HS;
+ hisi_param->hs_rate = UFS_HISI_LIMIT_HS_RATE;
+ hisi_param->desired_working_mode = UFS_HISI_LIMIT_DESIRED_MODE;
+}
+
+static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
+{
+ /* update */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
+ /* PA_TxSkip */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
+ /*PA_PWRModeUserData0 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
+ /*PA_PWRModeUserData1 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
+ /*PA_PWRModeUserData2 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
+ /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
+ /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
+ /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
+ /*PA_PWRModeUserData3 = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
+ /*PA_PWRModeUserData4 = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
+ /*PA_PWRModeUserData5 = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
+ /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
+ /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
+ /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
+}
+
+static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_hisi_dev_params ufs_hisi_cap;
+ int ret = 0;
+
+ if (!dev_req_params) {
+ dev_err(hba->dev,
+ "%s: incoming dev_req_params is NULL\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (status) {
+ case PRE_CHANGE:
+ ufs_hisi_set_dev_cap(&ufs_hisi_cap);
+ ret = ufs_hisi_get_pwr_dev_param(
+ &ufs_hisi_cap, dev_max_params, dev_req_params);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: failed to determine capabilities\n", __func__);
+ goto out;
+ }
+
+ ufs_hisi_pwr_change_pre_change(hba);
+ break;
+ case POST_CHANGE:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (ufshcd_is_runtime_pm(pm_op))
+ return 0;
+
+ if (host->in_suspend) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+ udelay(10);
+ /* set ref_dig_clk override of PHY PCS to 0 */
+ ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
+
+ host->in_suspend = true;
+
+ return 0;
+}
+
+static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+ if (!host->in_suspend)
+ return 0;
+
+ /* set ref_dig_clk override of PHY PCS to 1 */
+ ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
+ udelay(10);
+ ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
+
+ host->in_suspend = false;
+ return 0;
+}
+
+static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
+{
+ struct resource *mem_res;
+ struct device *dev = host->hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* get resource of ufs sys ctrl */
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(host->ufs_sys_ctrl))
+ return PTR_ERR(host->ufs_sys_ctrl);
+
+ return 0;
+}
+
+static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
+{
+ hba->rpm_lvl = UFS_PM_LVL_1;
+ hba->spm_lvl = UFS_PM_LVL_3;
+}
+
+/**
+ * ufs_hisi_init_common
+ * @hba: host controller instance
+ */
+static int ufs_hisi_init_common(struct ufs_hba *hba)
+{
+ int err = 0;
+ struct device *dev = hba->dev;
+ struct ufs_hisi_host *host;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ host->rst = devm_reset_control_get(dev, "rst");
+
+ ufs_hisi_set_pm_lvl(hba);
+
+ err = ufs_hisi_get_resource(host);
+ if (err) {
+ ufshcd_set_variant(hba, NULL);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ufs_hi3660_init(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct device *dev = hba->dev;
+
+ ret = ufs_hisi_init_common(hba);
+ if (ret) {
+ dev_err(dev, "%s: ufs common init fail\n", __func__);
+ return ret;
+ }
+
+ ufs_hi3660_clk_init(hba);
+
+ ufs_hi3660_soc_init(hba);
+
+ return 0;
+}
+
+static struct ufs_hba_variant_ops ufs_hba_hisi_vops = {
+ .name = "hi3660",
+ .init = ufs_hi3660_init,
+ .link_startup_notify = ufs_hi3660_link_startup_notify,
+ .pwr_change_notify = ufs_hi3660_pwr_change_notify,
+ .suspend = ufs_hisi_suspend,
+ .resume = ufs_hisi_resume,
+};
+
+static int ufs_hisi_probe(struct platform_device *pdev)
+{
+ return ufshcd_pltfrm_init(pdev, &ufs_hba_hisi_vops);
+}
+
+static int ufs_hisi_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct of_device_id ufs_hisi_of_match[] = {
+ { .compatible = "hisilicon,hi3660-ufs" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
+
+static const struct dev_pm_ops ufs_hisi_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver ufs_hisi_pltform = {
+ .probe = ufs_hisi_probe,
+ .remove = ufs_hisi_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-hisi",
+ .pm = &ufs_hisi_pm_ops,
+ .of_match_table = of_match_ptr(ufs_hisi_of_match),
+ },
+};
+module_platform_driver(ufs_hisi_pltform);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ufshcd-hisi");
+MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h
new file mode 100644
index 000000000000..3df9cd7acc29
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hisi.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017, HiSilicon. All rights reserved.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef UFS_HISI_H_
+#define UFS_HISI_H_
+
+#define HBRN8_POLL_TOUT_MS 1000
+
+/*
+ * ufs sysctrl specific define
+ */
+#define PSW_POWER_CTRL (0x04)
+#define PHY_ISO_EN (0x08)
+#define HC_LP_CTRL (0x0C)
+#define PHY_CLK_CTRL (0x10)
+#define PSW_CLK_CTRL (0x14)
+#define CLOCK_GATE_BYPASS (0x18)
+#define RESET_CTRL_EN (0x1C)
+#define UFS_SYSCTRL (0x5C)
+#define UFS_DEVICE_RESET_CTRL (0x60)
+
+#define BIT_UFS_PSW_ISO_CTRL (1 << 16)
+#define BIT_UFS_PSW_MTCMOS_EN (1 << 0)
+#define BIT_UFS_REFCLK_ISO_EN (1 << 16)
+#define BIT_UFS_PHY_ISO_CTRL (1 << 0)
+#define BIT_SYSCTRL_LP_ISOL_EN (1 << 16)
+#define BIT_SYSCTRL_PWR_READY (1 << 8)
+#define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24)
+#define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8)
+#define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF)
+#define UFS_FREQ_CFG_CLK (0x39)
+#define BIT_SYSCTRL_PSW_CLK_EN (1 << 4)
+#define MASK_UFS_CLK_GATE_BYPASS (0x3F)
+#define BIT_SYSCTRL_LP_RESET_N (1 << 0)
+#define BIT_UFS_REFCLK_SRC_SEl (1 << 0)
+#define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16)
+#define MASK_UFS_DEVICE_RESET (0x1 << 16)
+#define BIT_UFS_DEVICE_RESET (0x1)
+
+/*
+ * M-TX Configuration Attributes for Hixxxx
+ */
+#define MPHY_TX_FSM_STATE 0x41
+#define TX_FSM_HIBERN8 0x1
+
+/*
+ * Hixxxx UFS HC specific Registers
+ */
+enum {
+ UFS_REG_OCPTHRTL = 0xc0,
+ UFS_REG_OOCPR = 0xc4,
+
+ UFS_REG_CDACFG = 0xd0,
+ UFS_REG_CDATX1 = 0xd4,
+ UFS_REG_CDATX2 = 0xd8,
+ UFS_REG_CDARX1 = 0xdc,
+ UFS_REG_CDARX2 = 0xe0,
+ UFS_REG_CDASTA = 0xe4,
+
+ UFS_REG_LBMCFG = 0xf0,
+ UFS_REG_LBMSTA = 0xf4,
+ UFS_REG_UFSMODE = 0xf8,
+
+ UFS_REG_HCLKDIV = 0xfc,
+};
+
+/* AHIT - Auto-Hibernate Idle Timer */
+#define UFS_AHIT_AH8ITV_MASK 0x3FF
+
+/* REG UFS_REG_OCPTHRTL definition */
+#define UFS_HCLKDIV_NORMAL_VALUE 0xE4
+
+/* vendor specific pre-defined parameters */
+#define SLOW 1
+#define FAST 2
+
+#define UFS_HISI_LIMIT_NUM_LANES_RX 2
+#define UFS_HISI_LIMIT_NUM_LANES_TX 2
+#define UFS_HISI_LIMIT_HSGEAR_RX UFS_HS_G3
+#define UFS_HISI_LIMIT_HSGEAR_TX UFS_HS_G3
+#define UFS_HISI_LIMIT_PWMGEAR_RX UFS_PWM_G4
+#define UFS_HISI_LIMIT_PWMGEAR_TX UFS_PWM_G4
+#define UFS_HISI_LIMIT_RX_PWR_PWM SLOW_MODE
+#define UFS_HISI_LIMIT_TX_PWR_PWM SLOW_MODE
+#define UFS_HISI_LIMIT_RX_PWR_HS FAST_MODE
+#define UFS_HISI_LIMIT_TX_PWR_HS FAST_MODE
+#define UFS_HISI_LIMIT_HS_RATE PA_HS_MODE_B
+#define UFS_HISI_LIMIT_DESIRED_MODE FAST
+
+struct ufs_hisi_host {
+ struct ufs_hba *hba;
+ void __iomem *ufs_sys_ctrl;
+
+ struct reset_control *rst;
+
+ uint64_t caps;
+
+ bool in_suspend;
+};
+
+#define ufs_sys_ctrl_writel(host, val, reg) \
+ writel((val), (host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg))
+#define ufs_sys_ctrl_set_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel( \
+ (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg))
+#define ufs_sys_ctrl_clr_bits(host, mask, reg) \
+ ufs_sys_ctrl_writel((host), \
+ ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \
+ (reg))
+#endif /* UFS_HISI_H_ */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 221820a7c78b..75ee5906b966 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -50,19 +50,10 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles);
-static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
- char *prefix)
-{
- print_hex_dump(KERN_ERR, prefix,
- len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
- 16, 4, (void __force *)hba->mmio_base + offset,
- len * 4, false);
-}
-
static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
- char *prefix, void *priv)
+ const char *prefix, void *priv)
{
- ufs_qcom_dump_regs(hba, offset, len, prefix);
+ ufshcd_dump_regs(hba, offset, len * 4, prefix);
}
static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
@@ -1431,7 +1422,7 @@ out:
static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
void *priv, void (*print_fn)(struct ufs_hba *hba,
- int offset, int num_regs, char *str, void *priv))
+ int offset, int num_regs, const char *str, void *priv))
{
u32 reg;
struct ufs_qcom_host *host;
@@ -1613,7 +1604,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
static void ufs_qcom_testbus_read(struct ufs_hba *hba)
{
- ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
+ ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
}
static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
@@ -1639,8 +1630,8 @@ static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
- ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
- "HCI Vendor Specific Registers ");
+ ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
+ "HCI Vendor Specific Registers ");
/* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 397081d320b1..9d5d2ca7fc4f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -99,8 +99,29 @@
_ret; \
})
-#define ufshcd_hex_dump(prefix_str, buf, len) \
-print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+#define ufshcd_hex_dump(prefix_str, buf, len) do { \
+ size_t __len = (len); \
+ print_hex_dump(KERN_ERR, prefix_str, \
+ __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
+ 16, 4, buf, __len, false); \
+} while (0)
+
+int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix)
+{
+ u8 *regs;
+
+ regs = kzalloc(len, GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ memcpy_fromio(regs, hba->mmio_base + offset, len);
+ ufshcd_hex_dump(prefix, regs, len);
+ kfree(regs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
@@ -321,18 +342,19 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
sector_t lba = -1;
u8 opcode = 0;
u32 intr, doorbell;
- struct ufshcd_lrb *lrbp;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int transfer_len = -1;
- /* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
-
- if (!trace_ufshcd_command_enabled())
+ if (!trace_ufshcd_command_enabled()) {
+ /* trace UPIU W/O tracing command */
+ if (lrbp->cmd)
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
return;
-
- lrbp = &hba->lrb[tag];
+ }
if (lrbp->cmd) { /* data phase exists */
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
opcode = (u8)(*lrbp->cmd->cmnd);
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
@@ -386,15 +408,7 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
static void ufshcd_print_host_regs(struct ufs_hba *hba)
{
- /*
- * hex_dump reads its data without the readl macro. This might
- * cause inconsistency issues on some platform, as the printed
- * values may be from cache and not the most recent value.
- * To know whether you are looking at an un-cached version verify
- * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
- * during platform/pci probe function.
- */
- ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
hba->ufs_version, hba->capabilities);
dev_err(hba->dev,
@@ -7290,7 +7304,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (driver_byte(ret) & DRIVER_SENSE)
+ if (driver_byte(ret) == DRIVER_SENSE)
scsi_print_sense_hdr(sdp, NULL, &sshdr);
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index f51758f1e5cc..33fdd3f281ae 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -1043,4 +1043,7 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
}
+int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix);
+
#endif /* End of Header */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 6dc8891ccb74..1c72db94270e 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -513,12 +513,12 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
if (sc->sc_data_direction == DMA_TO_DEVICE)
cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
- blk_rq_sectors(rq) *
- bi->tuple_size);
+ bio_integrity_bytes(bi,
+ blk_rq_sectors(rq)));
else if (sc->sc_data_direction == DMA_FROM_DEVICE)
cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
- blk_rq_sectors(rq) *
- bi->tuple_size);
+ bio_integrity_bytes(bi,
+ blk_rq_sectors(rq)));
}
#endif
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 2e45988d1259..e5d7fb81ad66 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -300,8 +300,8 @@ static void maple_send(void)
mutex_unlock(&maple_wlist_lock);
if (maple_packets > 0) {
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
- sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
+ PAGE_SIZE);
}
finish:
@@ -642,7 +642,8 @@ static void maple_dma_handler(struct work_struct *work)
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mdev = mq->dev;
recvbuf = mq->recvbuf->buf;
- sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
+ __flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
+ 0x400);
code = recvbuf[0];
kfree(mq->sendbuf);
list_del_init(&mq->list);
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 0097a939487f..546960a18d60 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -209,7 +209,7 @@ static int imx_pgc_power_domain_probe(struct platform_device *pdev)
goto genpd_err;
}
- device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE);
+ device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE_CONSUMER);
return 0;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5856e792d09c..ba79b609aca2 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -40,6 +40,23 @@ config QCOM_GSBI
functions for connecting the underlying serial UART, SPI, and I2C
devices to the output pins.
+config QCOM_LLCC
+ tristate "Qualcomm Technologies, Inc. LLCC driver"
+ depends on ARCH_QCOM
+ help
+ Qualcomm Technologies, Inc. platform specific
+ Last Level Cache Controller(LLCC) driver. This provides interfaces
+ to clients that use the LLCC. Say yes here to enable LLCC slice
+ driver.
+
+config QCOM_SDM845_LLCC
+ tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
+ depends on QCOM_LLCC
+ help
+ Say yes here to enable the LLCC driver for SDM845. This provides
+ data required to configure LLCC so that clients can start using the
+ LLCC slices.
+
config QCOM_MDT_LOADER
tristate
select QCOM_SCM
@@ -75,6 +92,16 @@ config QCOM_RMTFS_MEM
Say y here if you intend to boot the modem remoteproc.
+config QCOM_RPMH
+ bool "Qualcomm RPM-Hardened (RPMH) Communication"
+ depends on ARCH_QCOM && ARM64 && OF || COMPILE_TEST
+ help
+ Support for communication with the hardened-RPM blocks in
+ Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
+ internal bus to transmit state requests for shared resources. A set
+ of hardware components aggregate requests for these resources and
+ help apply the aggregated state on the resource.
+
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 19dcf957cb3a..f25b54cd6cf8 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+CFLAGS_rpmh-rsc.o := -I$(src)
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
@@ -8,6 +9,9 @@ obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
+obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
+qcom_rpmh-y += rpmh-rsc.o
+qcom_rpmh-y += rpmh.o
obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
obj-$(CONFIG_QCOM_SMEM) += smem.o
obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
@@ -15,3 +19,5 @@ obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
obj-$(CONFIG_QCOM_SMSM) += smsm.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
+obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
new file mode 100644
index 000000000000..2e1e4f0a5db8
--- /dev/null
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+/*
+ * SCT(System Cache Table) entry contains of the following members:
+ * usecase_id: Unique id for the client's use case
+ * slice_id: llcc slice id for each client
+ * max_cap: The maximum capacity of the cache slice provided in KB
+ * priority: Priority of the client used to select victim line for replacement
+ * fixed_size: Boolean indicating if the slice has a fixed capacity
+ * bonus_ways: Bonus ways are additional ways to be used for any slice,
+ * if client ends up using more than reserved cache ways. Bonus
+ * ways are allocated only if they are not reserved for some
+ * other client.
+ * res_ways: Reserved ways for the cache slice, the reserved ways cannot
+ * be used by any other client than the one its assigned to.
+ * cache_mode: Each slice operates as a cache, this controls the mode of the
+ * slice: normal or TCM(Tightly Coupled Memory)
+ * probe_target_ways: Determines what ways to probe for access hit. When
+ * configured to 1 only bonus and reserved ways are probed.
+ * When configured to 0 all ways in llcc are probed.
+ * dis_cap_alloc: Disable capacity based allocation for a client
+ * retain_on_pc: If this bit is set and client has maintained active vote
+ * then the ways assigned to this client are not flushed on power
+ * collapse.
+ * activate_on_init: Activate the slice immediately after the SCT is programmed
+ */
+#define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
+ { \
+ .usecase_id = uid, \
+ .slice_id = sid, \
+ .max_cap = mc, \
+ .priority = p, \
+ .fixed_size = fs, \
+ .bonus_ways = bway, \
+ .res_ways = rway, \
+ .cache_mode = cmod, \
+ .probe_target_ways = ptw, \
+ .dis_cap_alloc = dca, \
+ .retain_on_pc = rp, \
+ .activate_on_init = a, \
+ }
+
+static struct llcc_slice_config sdm845_data[] = {
+ SCT_ENTRY(LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1),
+ SCT_ENTRY(LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1),
+ SCT_ENTRY(LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
+};
+
+static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
+{
+ return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
+}
+
+static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
+ { .compatible = "qcom,sdm845-llcc", },
+ { }
+};
+
+static struct platform_driver sdm845_qcom_llcc_driver = {
+ .driver = {
+ .name = "sdm845-llcc",
+ .of_match_table = sdm845_qcom_llcc_of_match,
+ },
+ .probe = sdm845_qcom_llcc_probe,
+};
+module_platform_driver(sdm845_qcom_llcc_driver);
+
+MODULE_DESCRIPTION("QCOM sdm845 LLCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
new file mode 100644
index 000000000000..54063a31132f
--- /dev/null
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define ACTIVATE BIT(0)
+#define DEACTIVATE BIT(1)
+#define ACT_CTRL_OPCODE_ACTIVATE BIT(0)
+#define ACT_CTRL_OPCODE_DEACTIVATE BIT(1)
+#define ACT_CTRL_ACT_TRIG BIT(0)
+#define ACT_CTRL_OPCODE_SHIFT 0x01
+#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
+#define ATTR1_FIXED_SIZE_SHIFT 0x03
+#define ATTR1_PRIORITY_SHIFT 0x04
+#define ATTR1_MAX_CAP_SHIFT 0x10
+#define ATTR0_RES_WAYS_MASK GENMASK(11, 0)
+#define ATTR0_BONUS_WAYS_MASK GENMASK(27, 16)
+#define ATTR0_BONUS_WAYS_SHIFT 0x10
+#define LLCC_STATUS_READ_DELAY 100
+
+#define CACHE_LINE_SIZE_SHIFT 6
+
+#define LLCC_COMMON_STATUS0 0x0003000c
+#define LLCC_LB_CNT_MASK GENMASK(31, 28)
+#define LLCC_LB_CNT_SHIFT 28
+
+#define MAX_CAP_TO_BYTES(n) (n * SZ_1K)
+#define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K)
+#define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
+#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
+#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
+
+#define BANK_OFFSET_STRIDE 0x80000
+
+static struct llcc_drv_data *drv_data;
+
+static const struct regmap_config llcc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @uid: usecase_id for the client
+ *
+ * A pointer to llcc slice descriptor will be returned on success and
+ * and error pointer is returned on failure
+ */
+struct llcc_slice_desc *llcc_slice_getd(u32 uid)
+{
+ const struct llcc_slice_config *cfg;
+ struct llcc_slice_desc *desc;
+ u32 sz, count;
+
+ cfg = drv_data->cfg;
+ sz = drv_data->cfg_size;
+
+ for (count = 0; cfg && count < sz; count++, cfg++)
+ if (cfg->usecase_id == uid)
+ break;
+
+ if (count == sz || !cfg)
+ return ERR_PTR(-ENODEV);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->slice_id = cfg->slice_id;
+ desc->slice_size = cfg->max_cap;
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_getd);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+ kfree(desc);
+}
+EXPORT_SYMBOL_GPL(llcc_slice_putd);
+
+static int llcc_update_act_ctrl(u32 sid,
+ u32 act_ctrl_reg_val, u32 status)
+{
+ u32 act_ctrl_reg;
+ u32 status_reg;
+ u32 slice_status;
+ int ret;
+
+ act_ctrl_reg = drv_data->bcast_off + LLCC_TRP_ACT_CTRLn(sid);
+ status_reg = drv_data->bcast_off + LLCC_TRP_STATUSn(sid);
+
+ /* Set the ACTIVE trigger */
+ act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ /* Clear the ACTIVE trigger */
+ act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(drv_data->regmap, status_reg,
+ slice_status, !(slice_status & status),
+ 0, LLCC_STATUS_READ_DELAY);
+ return ret;
+}
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+ int ret;
+ u32 act_ctrl_val;
+
+ mutex_lock(&drv_data->lock);
+ if (test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+
+ act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ DEACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __set_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_activate);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+ u32 act_ctrl_val;
+ int ret;
+
+ mutex_lock(&drv_data->lock);
+ if (!test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+ act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ ACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __clear_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_deactivate);
+
+/**
+ * llcc_get_slice_id - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+ return desc->slice_id;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_id);
+
+/**
+ * llcc_get_slice_size - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+ return desc->slice_size;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_size);
+
+static int qcom_llcc_cfg_program(struct platform_device *pdev)
+{
+ int i;
+ u32 attr1_cfg;
+ u32 attr0_cfg;
+ u32 attr1_val;
+ u32 attr0_val;
+ u32 max_cap_cacheline;
+ u32 sz;
+ int ret;
+ const struct llcc_slice_config *llcc_table;
+ struct llcc_slice_desc desc;
+ u32 bcast_off = drv_data->bcast_off;
+
+ sz = drv_data->cfg_size;
+ llcc_table = drv_data->cfg;
+
+ for (i = 0; i < sz; i++) {
+ attr1_cfg = bcast_off +
+ LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
+ attr0_cfg = bcast_off +
+ LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
+
+ attr1_val = llcc_table[i].cache_mode;
+ attr1_val |= llcc_table[i].probe_target_ways <<
+ ATTR1_PROBE_TARGET_WAYS_SHIFT;
+ attr1_val |= llcc_table[i].fixed_size <<
+ ATTR1_FIXED_SIZE_SHIFT;
+ attr1_val |= llcc_table[i].priority <<
+ ATTR1_PRIORITY_SHIFT;
+
+ max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
+
+ /* LLCC instances can vary for each target.
+ * The SW writes to broadcast register which gets propagated
+ * to each llcc instace (llcc0,.. llccN).
+ * Since the size of the memory is divided equally amongst the
+ * llcc instances, we need to configure the max cap accordingly.
+ */
+ max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
+ max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+ attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+
+ attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
+ attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
+
+ ret = regmap_write(drv_data->regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+ ret = regmap_write(drv_data->regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+ if (llcc_table[i].activate_on_init) {
+ desc.slice_id = llcc_table[i].slice_id;
+ ret = llcc_slice_activate(&desc);
+ }
+ }
+ return ret;
+}
+
+int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *llcc_cfg, u32 sz)
+{
+ u32 num_banks;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ int ret, i;
+
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drv_data->regmap = devm_regmap_init_mmio(dev, base,
+ &llcc_regmap_config);
+ if (IS_ERR(drv_data->regmap))
+ return PTR_ERR(drv_data->regmap);
+
+ ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
+ &num_banks);
+ if (ret)
+ return ret;
+
+ num_banks &= LLCC_LB_CNT_MASK;
+ num_banks >>= LLCC_LB_CNT_SHIFT;
+ drv_data->num_banks = num_banks;
+
+ for (i = 0; i < sz; i++)
+ if (llcc_cfg[i].slice_id > drv_data->max_slices)
+ drv_data->max_slices = llcc_cfg[i].slice_id;
+
+ drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
+ GFP_KERNEL);
+ if (!drv_data->offsets)
+ return -ENOMEM;
+
+ for (i = 0; i < num_banks; i++)
+ drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
+
+ drv_data->bcast_off = num_banks * BANK_OFFSET_STRIDE;
+
+ drv_data->bitmap = devm_kcalloc(dev,
+ BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!drv_data->bitmap)
+ return -ENOMEM;
+
+ drv_data->cfg = llcc_cfg;
+ drv_data->cfg_size = sz;
+ mutex_init(&drv_data->lock);
+ platform_set_drvdata(pdev, drv_data);
+
+ return qcom_llcc_cfg_program(pdev);
+}
+EXPORT_SYMBOL_GPL(qcom_llcc_probe);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index c8999e38b005..8a3678c2e83c 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -184,6 +184,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
device_initialize(&rmtfs_mem->dev);
rmtfs_mem->dev.parent = &pdev->dev;
rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
+ rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
rmtfs_mem->size, MEMREMAP_WC);
@@ -206,8 +207,6 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
goto put_device;
}
- rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
-
ret = of_property_read_u32(node, "qcom,vmid", &vmid);
if (ret < 0 && ret != -EINVAL) {
dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
new file mode 100644
index 000000000000..a7bbbb67991c
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef __RPM_INTERNAL_H__
+#define __RPM_INTERNAL_H__
+
+#include <linux/bitmap.h>
+#include <soc/qcom/tcs.h>
+
+#define TCS_TYPE_NR 4
+#define MAX_CMDS_PER_TCS 16
+#define MAX_TCS_PER_TYPE 3
+#define MAX_TCS_NR (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
+#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
+
+struct rsc_drv;
+
+/**
+ * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
+ * to the controller
+ *
+ * @drv: the controller
+ * @type: type of the TCS in this group - active, sleep, wake
+ * @mask: mask of the TCSes relative to all the TCSes in the RSC
+ * @offset: start of the TCS group relative to the TCSes in the RSC
+ * @num_tcs: number of TCSes in this type
+ * @ncpt: number of commands in each TCS
+ * @lock: lock for synchronizing this TCS writes
+ * @req: requests that are sent from the TCS
+ * @cmd_cache: flattened cache of cmds in sleep/wake TCS
+ * @slots: indicates which of @cmd_addr are occupied
+ */
+struct tcs_group {
+ struct rsc_drv *drv;
+ int type;
+ u32 mask;
+ u32 offset;
+ int num_tcs;
+ int ncpt;
+ spinlock_t lock;
+ const struct tcs_request *req[MAX_TCS_PER_TYPE];
+ u32 *cmd_cache;
+ DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
+};
+
+/**
+ * struct rpmh_request: the message to be sent to rpmh-rsc
+ *
+ * @msg: the request
+ * @cmd: the payload that will be part of the @msg
+ * @completion: triggered when request is done
+ * @dev: the device making the request
+ * @err: err return from the controller
+ * @needs_free: check to free dynamically allocated request object
+ */
+struct rpmh_request {
+ struct tcs_request msg;
+ struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
+ struct completion *completion;
+ const struct device *dev;
+ int err;
+ bool needs_free;
+};
+
+/**
+ * struct rpmh_ctrlr: our representation of the controller
+ *
+ * @cache: the list of cached requests
+ * @cache_lock: synchronize access to the cache data
+ * @dirty: was the cache updated since flush
+ * @batch_cache: Cache sleep and wake requests sent as batch
+ */
+struct rpmh_ctrlr {
+ struct list_head cache;
+ spinlock_t cache_lock;
+ bool dirty;
+ struct list_head batch_cache;
+};
+
+/**
+ * struct rsc_drv: the Direct Resource Voter (DRV) of the
+ * Resource State Coordinator controller (RSC)
+ *
+ * @name: controller identifier
+ * @tcs_base: start address of the TCS registers in this controller
+ * @id: instance id in the controller (Direct Resource Voter)
+ * @num_tcs: number of TCSes in this DRV
+ * @tcs: TCS groups
+ * @tcs_in_use: s/w state of the TCS
+ * @lock: synchronize state of the controller
+ * @client: handle to the DRV's client.
+ */
+struct rsc_drv {
+ const char *name;
+ void __iomem *tcs_base;
+ int id;
+ int num_tcs;
+ struct tcs_group tcs[TCS_TYPE_NR];
+ DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
+ spinlock_t lock;
+ struct rpmh_ctrlr client;
+};
+
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
+ const struct tcs_request *msg);
+int rpmh_rsc_invalidate(struct rsc_drv *drv);
+
+void rpmh_tx_done(const struct tcs_request *msg, int r);
+
+#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
new file mode 100644
index 000000000000..ee75da66d64b
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+#include "rpmh-internal.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace-rpmh.h"
+
+#define RSC_DRV_TCS_OFFSET 672
+#define RSC_DRV_CMD_OFFSET 20
+
+/* DRV Configuration Information Register */
+#define DRV_PRNT_CHLD_CONFIG 0x0C
+#define DRV_NUM_TCS_MASK 0x3F
+#define DRV_NUM_TCS_SHIFT 6
+#define DRV_NCPT_MASK 0x1F
+#define DRV_NCPT_SHIFT 27
+
+/* Register offsets */
+#define RSC_DRV_IRQ_ENABLE 0x00
+#define RSC_DRV_IRQ_STATUS 0x04
+#define RSC_DRV_IRQ_CLEAR 0x08
+#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
+#define RSC_DRV_CONTROL 0x14
+#define RSC_DRV_STATUS 0x18
+#define RSC_DRV_CMD_ENABLE 0x1C
+#define RSC_DRV_CMD_MSGID 0x30
+#define RSC_DRV_CMD_ADDR 0x34
+#define RSC_DRV_CMD_DATA 0x38
+#define RSC_DRV_CMD_STATUS 0x3C
+#define RSC_DRV_CMD_RESP_DATA 0x40
+
+#define TCS_AMC_MODE_ENABLE BIT(16)
+#define TCS_AMC_MODE_TRIGGER BIT(24)
+
+/* TCS CMD register bit mask */
+#define CMD_MSGID_LEN 8
+#define CMD_MSGID_RESP_REQ BIT(8)
+#define CMD_MSGID_WRITE BIT(16)
+#define CMD_STATUS_ISSUED BIT(8)
+#define CMD_STATUS_COMPL BIT(16)
+
+static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+{
+ return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
+ RSC_DRV_CMD_OFFSET * cmd_id);
+}
+
+static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
+ u32 data)
+{
+ writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
+ RSC_DRV_CMD_OFFSET * cmd_id);
+}
+
+static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
+{
+ writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+}
+
+static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
+{
+ writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+ for (;;) {
+ if (data == readl(drv->tcs_base + reg +
+ RSC_DRV_TCS_OFFSET * tcs_id))
+ break;
+ udelay(1);
+ }
+}
+
+static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
+{
+ return !test_bit(tcs_id, drv->tcs_in_use) &&
+ read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
+}
+
+static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
+{
+ return &drv->tcs[type];
+}
+
+static int tcs_invalidate(struct rsc_drv *drv, int type)
+{
+ int m;
+ struct tcs_group *tcs;
+
+ tcs = get_tcs_of_type(drv, type);
+
+ spin_lock(&tcs->lock);
+ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
+ spin_unlock(&tcs->lock);
+ return 0;
+ }
+
+ for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
+ if (!tcs_is_free(drv, m)) {
+ spin_unlock(&tcs->lock);
+ return -EAGAIN;
+ }
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
+ }
+ bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
+ spin_unlock(&tcs->lock);
+
+ return 0;
+}
+
+/**
+ * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
+ *
+ * @drv: the RSC controller
+ */
+int rpmh_rsc_invalidate(struct rsc_drv *drv)
+{
+ int ret;
+
+ ret = tcs_invalidate(drv, SLEEP_TCS);
+ if (!ret)
+ ret = tcs_invalidate(drv, WAKE_TCS);
+
+ return ret;
+}
+
+static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
+ const struct tcs_request *msg)
+{
+ int type, ret;
+ struct tcs_group *tcs;
+
+ switch (msg->state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ type = ACTIVE_TCS;
+ break;
+ case RPMH_WAKE_ONLY_STATE:
+ type = WAKE_TCS;
+ break;
+ case RPMH_SLEEP_STATE:
+ type = SLEEP_TCS;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * If we are making an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purpose a wake TCS to
+ * send active votes.
+ * NOTE: The driver must be aware that this RSC does not have a
+ * dedicated AMC, and therefore would invalidate the sleep and wake
+ * TCSes before making an active state request.
+ */
+ tcs = get_tcs_of_type(drv, type);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) {
+ tcs = get_tcs_of_type(drv, WAKE_TCS);
+ if (tcs->num_tcs) {
+ ret = rpmh_rsc_invalidate(drv);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ }
+
+ return tcs;
+}
+
+static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
+ int tcs_id)
+{
+ struct tcs_group *tcs;
+ int i;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[i];
+ if (tcs->mask & BIT(tcs_id))
+ return tcs->req[tcs_id - tcs->offset];
+ }
+
+ return NULL;
+}
+
+/**
+ * tcs_tx_done: TX Done interrupt handler
+ */
+static irqreturn_t tcs_tx_done(int irq, void *p)
+{
+ struct rsc_drv *drv = p;
+ int i, j, err = 0;
+ unsigned long irq_status;
+ const struct tcs_request *req;
+ struct tcs_cmd *cmd;
+
+ irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
+
+ for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
+ req = get_req_from_tcs(drv, i);
+ if (!req) {
+ WARN_ON(1);
+ goto skip;
+ }
+
+ err = 0;
+ for (j = 0; j < req->num_cmds; j++) {
+ u32 sts;
+
+ cmd = &req->cmds[j];
+ sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
+ if (!(sts & CMD_STATUS_ISSUED) ||
+ ((req->wait_for_compl || cmd->wait) &&
+ !(sts & CMD_STATUS_COMPL))) {
+ pr_err("Incomplete request: %s: addr=%#x data=%#x",
+ drv->name, cmd->addr, cmd->data);
+ err = -EIO;
+ }
+ }
+
+ trace_rpmh_tx_done(drv, i, req, err);
+skip:
+ /* Reclaim the TCS */
+ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
+ write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
+ spin_lock(&drv->lock);
+ clear_bit(i, drv->tcs_in_use);
+ spin_unlock(&drv->lock);
+ if (req)
+ rpmh_tx_done(req, err);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
+ const struct tcs_request *msg)
+{
+ u32 msgid, cmd_msgid;
+ u32 cmd_enable = 0;
+ u32 cmd_complete;
+ struct tcs_cmd *cmd;
+ int i, j;
+
+ cmd_msgid = CMD_MSGID_LEN;
+ cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
+ cmd_msgid |= CMD_MSGID_WRITE;
+
+ cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+
+ for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
+ cmd = &msg->cmds[i];
+ cmd_enable |= BIT(j);
+ cmd_complete |= cmd->wait << j;
+ msgid = cmd_msgid;
+ msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
+
+ write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
+ write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
+ write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
+ trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
+ }
+
+ write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
+ cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
+}
+
+static void __tcs_trigger(struct rsc_drv *drv, int tcs_id)
+{
+ u32 enable;
+
+ /*
+ * HW req: Clear the DRV_CONTROL and enable TCS again
+ * While clearing ensure that the AMC mode trigger is cleared
+ * and then the mode enable is cleared.
+ */
+ enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
+ enable &= ~TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+
+ /* Enable the AMC mode on the TCS and then trigger the TCS */
+ enable = TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable |= TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+}
+
+static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+ const struct tcs_request *msg)
+{
+ unsigned long curr_enabled;
+ u32 addr;
+ int i, j, k;
+ int tcs_id = tcs->offset;
+
+ for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
+ if (tcs_is_free(drv, tcs_id))
+ continue;
+
+ curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+
+ for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
+ addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
+ for (k = 0; k < msg->num_cmds; k++) {
+ if (addr == msg->cmds[k].addr)
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int find_free_tcs(struct tcs_group *tcs)
+{
+ int i;
+
+ for (i = 0; i < tcs->num_tcs; i++) {
+ if (tcs_is_free(tcs->drv, tcs->offset + i))
+ return tcs->offset + i;
+ }
+
+ return -EBUSY;
+}
+
+static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id;
+ unsigned long flags;
+ int ret;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock_irqsave(&tcs->lock, flags);
+ spin_lock(&drv->lock);
+ /*
+ * The h/w does not like if we send a request to the same address,
+ * when one is already in-flight or being processed.
+ */
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret) {
+ spin_unlock(&drv->lock);
+ goto done_write;
+ }
+
+ tcs_id = find_free_tcs(tcs);
+ if (tcs_id < 0) {
+ ret = tcs_id;
+ spin_unlock(&drv->lock);
+ goto done_write;
+ }
+
+ tcs->req[tcs_id - tcs->offset] = msg;
+ set_bit(tcs_id, drv->tcs_in_use);
+ spin_unlock(&drv->lock);
+
+ __tcs_buffer_write(drv, tcs_id, 0, msg);
+ __tcs_trigger(drv, tcs_id);
+
+done_write:
+ spin_unlock_irqrestore(&tcs->lock, flags);
+ return ret;
+}
+
+/**
+ * rpmh_rsc_send_data: Validate the incoming message and write to the
+ * appropriate TCS block.
+ *
+ * @drv: the controller
+ * @msg: the data to be sent
+ *
+ * Return: 0 on success, -EINVAL on error.
+ * Note: This call blocks until a valid data is written to the TCS.
+ */
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ int ret;
+
+ if (!msg || !msg->cmds || !msg->num_cmds ||
+ msg->num_cmds > MAX_RPMH_PAYLOAD) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ do {
+ ret = tcs_write(drv, msg);
+ if (ret == -EBUSY) {
+ pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
+ msg->cmds[0].addr);
+ udelay(10);
+ }
+ } while (ret == -EBUSY);
+
+ return ret;
+}
+
+static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
+ int len)
+{
+ int i, j;
+
+ /* Check for already cached commands */
+ for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
+ if (tcs->cmd_cache[i] != cmd[0].addr)
+ continue;
+ if (i + len >= tcs->num_tcs * tcs->ncpt)
+ goto seq_err;
+ for (j = 0; j < len; j++) {
+ if (tcs->cmd_cache[i + j] != cmd[j].addr)
+ goto seq_err;
+ }
+ return i;
+ }
+
+ return -ENODATA;
+
+seq_err:
+ WARN(1, "Message does not match previous sequence.\n");
+ return -EINVAL;
+}
+
+static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
+ int *tcs_id, int *cmd_id)
+{
+ int slot, offset;
+ int i = 0;
+
+ /* Find if we already have the msg in our TCS */
+ slot = find_match(tcs, msg->cmds, msg->num_cmds);
+ if (slot >= 0)
+ goto copy_data;
+
+ /* Do over, until we can fit the full payload in a TCS */
+ do {
+ slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
+ i, msg->num_cmds, 0);
+ if (slot == tcs->num_tcs * tcs->ncpt)
+ return -ENOMEM;
+ i += tcs->ncpt;
+ } while (slot + msg->num_cmds - 1 >= i);
+
+copy_data:
+ bitmap_set(tcs->slots, slot, msg->num_cmds);
+ /* Copy the addresses of the resources over to the slots */
+ for (i = 0; i < msg->num_cmds; i++)
+ tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
+
+ offset = slot / tcs->ncpt;
+ *tcs_id = offset + tcs->offset;
+ *cmd_id = slot % tcs->ncpt;
+
+ return 0;
+}
+
+static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id = 0, cmd_id = 0;
+ unsigned long flags;
+ int ret;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock_irqsave(&tcs->lock, flags);
+ /* find the TCS id and the command in the TCS to write to */
+ ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
+ if (!ret)
+ __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
+ spin_unlock_irqrestore(&tcs->lock, flags);
+
+ return ret;
+}
+
+/**
+ * rpmh_rsc_write_ctrl_data: Write request to the controller
+ *
+ * @drv: the controller
+ * @msg: the data to be written to the controller
+ *
+ * There is no response returned for writing the request to the controller.
+ */
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ if (!msg || !msg->cmds || !msg->num_cmds ||
+ msg->num_cmds > MAX_RPMH_PAYLOAD) {
+ pr_err("Payload error\n");
+ return -EINVAL;
+ }
+
+ /* Data sent to this API will not be sent immediately */
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE)
+ return -EINVAL;
+
+ return tcs_ctrl_write(drv, msg);
+}
+
+static int rpmh_probe_tcs_config(struct platform_device *pdev,
+ struct rsc_drv *drv)
+{
+ struct tcs_type_config {
+ u32 type;
+ u32 n;
+ } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
+ struct device_node *dn = pdev->dev.of_node;
+ u32 config, max_tcs, ncpt, offset;
+ int i, ret, n, st = 0;
+ struct tcs_group *tcs;
+ struct resource *res;
+ void __iomem *base;
+ char drv_id[10] = {0};
+
+ snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
+ if (ret)
+ return ret;
+ drv->tcs_base = base + offset;
+
+ config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
+
+ max_tcs = config;
+ max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
+ max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
+
+ ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
+ ncpt = ncpt >> DRV_NCPT_SHIFT;
+
+ n = of_property_count_u32_elems(dn, "qcom,tcs-config");
+ if (n != 2 * TCS_TYPE_NR)
+ return -EINVAL;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2, &tcs_cfg[i].type);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].type >= TCS_TYPE_NR)
+ return -EINVAL;
+
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2 + 1, &tcs_cfg[i].n);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[tcs_cfg[i].type];
+ if (tcs->drv)
+ return -EINVAL;
+ tcs->drv = drv;
+ tcs->type = tcs_cfg[i].type;
+ tcs->num_tcs = tcs_cfg[i].n;
+ tcs->ncpt = ncpt;
+ spin_lock_init(&tcs->lock);
+
+ if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
+ continue;
+
+ if (st + tcs->num_tcs > max_tcs ||
+ st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
+ return -EINVAL;
+
+ tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
+ tcs->offset = st;
+ st += tcs->num_tcs;
+
+ /*
+ * Allocate memory to cache sleep and wake requests to
+ * avoid reading TCS register memory.
+ */
+ if (tcs->type == ACTIVE_TCS)
+ continue;
+
+ tcs->cmd_cache = devm_kcalloc(&pdev->dev,
+ tcs->num_tcs * ncpt, sizeof(u32),
+ GFP_KERNEL);
+ if (!tcs->cmd_cache)
+ return -ENOMEM;
+ }
+
+ drv->num_tcs = st;
+
+ return 0;
+}
+
+static int rpmh_rsc_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct rsc_drv *drv;
+ int ret, irq;
+
+ /*
+ * Even though RPMh doesn't directly use cmd-db, all of its children
+ * do. To avoid adding this check to our children we'll do it now.
+ */
+ ret = cmd_db_ready();
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Command DB not available (%d)\n",
+ ret);
+ return ret;
+ }
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
+ if (ret)
+ return ret;
+
+ drv->name = of_get_property(dn, "label", NULL);
+ if (!drv->name)
+ drv->name = dev_name(&pdev->dev);
+
+ ret = rpmh_probe_tcs_config(pdev, drv);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&drv->lock);
+ bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
+
+ irq = platform_get_irq(pdev, drv->id);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
+ drv->name, drv);
+ if (ret)
+ return ret;
+
+ /* Enable the active TCS to send requests immediately */
+ write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
+
+ spin_lock_init(&drv->client.cache_lock);
+ INIT_LIST_HEAD(&drv->client.cache);
+ INIT_LIST_HEAD(&drv->client.batch_cache);
+
+ dev_set_drvdata(&pdev->dev, drv);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id rpmh_drv_match[] = {
+ { .compatible = "qcom,rpmh-rsc", },
+ { }
+};
+
+static struct platform_driver rpmh_driver = {
+ .probe = rpmh_rsc_probe,
+ .driver = {
+ .name = "rpmh",
+ .of_match_table = rpmh_drv_match,
+ },
+};
+
+static int __init rpmh_driver_init(void)
+{
+ return platform_driver_register(&rpmh_driver);
+}
+arch_initcall(rpmh_driver_init);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
new file mode 100644
index 000000000000..c7beb6841289
--- /dev/null
+++ b/drivers/soc/qcom/rpmh.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <soc/qcom/rpmh.h>
+
+#include "rpmh-internal.h"
+
+#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
+
+#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
+ struct rpmh_request name = { \
+ .msg = { \
+ .state = s, \
+ .cmds = name.cmd, \
+ .num_cmds = 0, \
+ .wait_for_compl = true, \
+ }, \
+ .cmd = { { 0 } }, \
+ .completion = q, \
+ .dev = dev, \
+ .needs_free = false, \
+ }
+
+#define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
+
+/**
+ * struct cache_req: the request object for caching
+ *
+ * @addr: the address of the resource
+ * @sleep_val: the sleep vote
+ * @wake_val: the wake vote
+ * @list: linked list obj
+ */
+struct cache_req {
+ u32 addr;
+ u32 sleep_val;
+ u32 wake_val;
+ struct list_head list;
+};
+
+/**
+ * struct batch_cache_req - An entry in our batch catch
+ *
+ * @list: linked list obj
+ * @count: number of messages
+ * @rpm_msgs: the messages
+ */
+
+struct batch_cache_req {
+ struct list_head list;
+ int count;
+ struct rpmh_request rpm_msgs[];
+};
+
+static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
+{
+ struct rsc_drv *drv = dev_get_drvdata(dev->parent);
+
+ return &drv->client;
+}
+
+void rpmh_tx_done(const struct tcs_request *msg, int r)
+{
+ struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
+ msg);
+ struct completion *compl = rpm_msg->completion;
+
+ rpm_msg->err = r;
+
+ if (r)
+ dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
+ rpm_msg->msg.cmds[0].addr, r);
+
+ if (!compl)
+ goto exit;
+
+ /* Signal the blocking thread we are done */
+ complete(compl);
+
+exit:
+ if (rpm_msg->needs_free)
+ kfree(rpm_msg);
+}
+
+static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
+{
+ struct cache_req *p, *req = NULL;
+
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (p->addr == addr) {
+ req = p;
+ break;
+ }
+ }
+
+ return req;
+}
+
+static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
+ enum rpmh_state state,
+ struct tcs_cmd *cmd)
+{
+ struct cache_req *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ req = __find_req(ctrlr, cmd->addr);
+ if (req)
+ goto existing;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req) {
+ req = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ req->addr = cmd->addr;
+ req->sleep_val = req->wake_val = UINT_MAX;
+ INIT_LIST_HEAD(&req->list);
+ list_add_tail(&req->list, &ctrlr->cache);
+
+existing:
+ switch (state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ if (req->sleep_val != UINT_MAX)
+ req->wake_val = cmd->data;
+ break;
+ case RPMH_WAKE_ONLY_STATE:
+ req->wake_val = cmd->data;
+ break;
+ case RPMH_SLEEP_STATE:
+ req->sleep_val = cmd->data;
+ break;
+ default:
+ break;
+ }
+
+ ctrlr->dirty = true;
+unlock:
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return req;
+}
+
+/**
+ * __rpmh_write: Cache and send the RPMH request
+ *
+ * @dev: The device making the request
+ * @state: Active/Sleep request type
+ * @rpm_msg: The data that needs to be sent (cmds).
+ *
+ * Cache the RPMH request and send if the state is ACTIVE_ONLY.
+ * SLEEP/WAKE_ONLY requests are not sent to the controller at
+ * this time. Use rpmh_flush() to send them to the controller.
+ */
+static int __rpmh_write(const struct device *dev, enum rpmh_state state,
+ struct rpmh_request *rpm_msg)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret = -EINVAL;
+ struct cache_req *req;
+ int i;
+
+ rpm_msg->msg.state = state;
+
+ /* Cache the request in our store and link the payload */
+ for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
+ req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
+
+ rpm_msg->msg.state = state;
+
+ if (state == RPMH_ACTIVE_ONLY_STATE) {
+ WARN_ON(irqs_disabled());
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
+ } else {
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+ &rpm_msg->msg);
+ /* Clean up our call by spoofing tx_done */
+ rpmh_tx_done(&rpm_msg->msg, ret);
+ }
+
+ return ret;
+}
+
+static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+ return -EINVAL;
+
+ memcpy(req->cmd, cmd, n * sizeof(*cmd));
+
+ req->msg.state = state;
+ req->msg.cmds = req->cmd;
+ req->msg.num_cmds = n;
+
+ return 0;
+}
+
+/**
+ * rpmh_write_async: Write a set of RPMH commands
+ *
+ * @dev: The device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in payload
+ *
+ * Write a set of RPMH commands, the order of commands is maintained
+ * and will be sent as a single shot.
+ */
+int rpmh_write_async(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ struct rpmh_request *rpm_msg;
+ int ret;
+
+ rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
+ if (!rpm_msg)
+ return -ENOMEM;
+ rpm_msg->needs_free = true;
+
+ ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
+ if (ret) {
+ kfree(rpm_msg);
+ return ret;
+ }
+
+ return __rpmh_write(dev, state, rpm_msg);
+}
+EXPORT_SYMBOL(rpmh_write_async);
+
+/**
+ * rpmh_write: Write a set of RPMH commands and block until response
+ *
+ * @rc: The RPMH handle got from rpmh_get_client
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in @cmd
+ *
+ * May sleep. Do not call from atomic contexts.
+ */
+int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ DECLARE_COMPLETION_ONSTACK(compl);
+ DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
+ int ret;
+
+ if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+ return -EINVAL;
+
+ memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
+ rpm_msg.msg.num_cmds = n;
+
+ ret = __rpmh_write(dev, state, &rpm_msg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
+ WARN_ON(!ret);
+ return (ret > 0) ? 0 : -ETIMEDOUT;
+}
+EXPORT_SYMBOL(rpmh_write);
+
+static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_add_tail(&req->list, &ctrlr->batch_cache);
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+static int flush_batch(struct rpmh_ctrlr *ctrlr)
+{
+ struct batch_cache_req *req;
+ const struct rpmh_request *rpm_msg;
+ unsigned long flags;
+ int ret = 0;
+ int i;
+
+ /* Send Sleep/Wake requests to the controller, expect no response */
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry(req, &ctrlr->batch_cache, list) {
+ for (i = 0; i < req->count; i++) {
+ rpm_msg = req->rpm_msgs + i;
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+ &rpm_msg->msg);
+ if (ret)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return ret;
+}
+
+static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
+{
+ struct batch_cache_req *req, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+ kfree(req);
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+/**
+ * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
+ * batch to finish.
+ *
+ * @dev: the device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The array of count of elements in each batch, 0 terminated.
+ *
+ * Write a request to the RSC controller without caching. If the request
+ * state is ACTIVE, then the requests are treated as completion request
+ * and sent to the controller immediately. The function waits until all the
+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
+ * request is sent as fire-n-forget and no ack is expected.
+ *
+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
+ */
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n)
+{
+ struct batch_cache_req *req;
+ struct rpmh_request *rpm_msgs;
+ DECLARE_COMPLETION_ONSTACK(compl);
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ unsigned long time_left;
+ int count = 0;
+ int ret, i, j;
+
+ if (!cmd || !n)
+ return -EINVAL;
+
+ while (n[count] > 0)
+ count++;
+ if (!count)
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+ GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+ req->count = count;
+ rpm_msgs = req->rpm_msgs;
+
+ for (i = 0; i < count; i++) {
+ __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
+ cmd += n[i];
+ }
+
+ if (state != RPMH_ACTIVE_ONLY_STATE) {
+ cache_batch(ctrlr, req);
+ return 0;
+ }
+
+ for (i = 0; i < count; i++) {
+ rpm_msgs[i].completion = &compl;
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
+ if (ret) {
+ pr_err("Error(%d) sending RPMH message addr=%#x\n",
+ ret, rpm_msgs[i].msg.cmds[0].addr);
+ for (j = i; j < count; j++)
+ rpmh_tx_done(&rpm_msgs[j].msg, ret);
+ break;
+ }
+ }
+
+ time_left = RPMH_TIMEOUT_MS;
+ for (i = 0; i < count; i++) {
+ time_left = wait_for_completion_timeout(&compl, time_left);
+ if (!time_left) {
+ /*
+ * Better hope they never finish because they'll signal
+ * the completion on our stack and that's bad once
+ * we've returned from the function.
+ */
+ WARN_ON(1);
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ }
+
+exit:
+ kfree(req);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmh_write_batch);
+
+static int is_req_valid(struct cache_req *req)
+{
+ return (req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX &&
+ req->sleep_val != req->wake_val);
+}
+
+static int send_single(const struct device *dev, enum rpmh_state state,
+ u32 addr, u32 data)
+{
+ DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+
+ /* Wake sets are always complete and sleep sets are not */
+ rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
+ rpm_msg.cmd[0].addr = addr;
+ rpm_msg.cmd[0].data = data;
+ rpm_msg.msg.num_cmds = 1;
+
+ return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
+}
+
+/**
+ * rpmh_flush: Flushes the buffered active and sleep sets to TCS
+ *
+ * @dev: The device making the request
+ *
+ * Return: -EBUSY if the controller is busy, probably waiting on a response
+ * to a RPMH request sent earlier.
+ *
+ * This function is always called from the sleep code from the last CPU
+ * that is powering down the entire system. Since no other RPMH API would be
+ * executing at this time, it is safe to run lockless.
+ */
+int rpmh_flush(const struct device *dev)
+{
+ struct cache_req *p;
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret;
+
+ if (!ctrlr->dirty) {
+ pr_debug("Skipping flush, TCS has latest data.\n");
+ return 0;
+ }
+
+ /* First flush the cached batch requests */
+ ret = flush_batch(ctrlr);
+ if (ret)
+ return ret;
+
+ /*
+ * Nobody else should be calling this function other than system PM,
+ * hence we can run without locks.
+ */
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (!is_req_valid(p)) {
+ pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
+ __func__, p->addr, p->sleep_val, p->wake_val);
+ continue;
+ }
+ ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
+ if (ret)
+ return ret;
+ ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
+ p->addr, p->wake_val);
+ if (ret)
+ return ret;
+ }
+
+ ctrlr->dirty = false;
+
+ return 0;
+}
+EXPORT_SYMBOL(rpmh_flush);
+
+/**
+ * rpmh_invalidate: Invalidate all sleep and active sets
+ * sets.
+ *
+ * @dev: The device making the request
+ *
+ * Invalidate the sleep and active values in the TCS blocks.
+ */
+int rpmh_invalidate(const struct device *dev)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret;
+
+ invalidate_batch(ctrlr);
+ ctrlr->dirty = true;
+
+ do {
+ ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 70b2ee80d6bd..bf4bd71ab53f 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -364,11 +364,6 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
end = phdr_to_last_uncached_entry(phdr);
cached = phdr_to_last_cached_entry(phdr);
- if (smem->global_partition) {
- dev_err(smem->dev, "Already found the global partition\n");
- return -EINVAL;
- }
-
while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY)
goto bad_canary;
@@ -736,6 +731,11 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
bool found = false;
int i;
+ if (smem->global_partition) {
+ dev_err(smem->dev, "Already found the global partition\n");
+ return -EINVAL;
+ }
+
ptable = qcom_smem_get_ptable(smem);
if (IS_ERR(ptable))
return PTR_ERR(ptable);
diff --git a/drivers/soc/qcom/trace-rpmh.h b/drivers/soc/qcom/trace-rpmh.h
new file mode 100644
index 000000000000..feb0cb455e37
--- /dev/null
+++ b/drivers/soc/qcom/trace-rpmh.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPMH_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpmh
+
+#include <linux/tracepoint.h>
+#include "rpmh-internal.h"
+
+TRACE_EVENT(rpmh_tx_done,
+
+ TP_PROTO(struct rsc_drv *d, int m, const struct tcs_request *r, int e),
+
+ TP_ARGS(d, m, r, e),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->addr = r->cmds[0].addr;
+ __entry->data = r->cmds[0].data;
+ __entry->err = e;
+ ),
+
+ TP_printk("%s: ack: tcs-m: %d addr: %#x data: %#x errno: %d",
+ __get_str(name), __entry->m, __entry->addr, __entry->data,
+ __entry->err)
+);
+
+TRACE_EVENT(rpmh_send_msg,
+
+ TP_PROTO(struct rsc_drv *d, int m, int n, u32 h,
+ const struct tcs_cmd *c),
+
+ TP_ARGS(d, m, n, h, c),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(int, n)
+ __field(u32, hdr)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(bool, wait)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->n = n;
+ __entry->hdr = h;
+ __entry->addr = c->addr;
+ __entry->data = c->data;
+ __entry->wait = c->wait;
+ ),
+
+ TP_printk("%s: send-msg: tcs(m): %d cmd(n): %d msgid: %#x addr: %#x data: %#x complete: %d",
+ __get_str(name), __entry->m, __entry->n, __entry->hdr,
+ __entry->addr, __entry->data, __entry->wait)
+);
+
+#endif /* _TRACE_RPMH_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-rpmh
+
+#include <trace/define_trace.h>
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ad5d68e1dab7..671d078349cc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -688,6 +688,19 @@ config SPI_TXX9
help
SPI driver for Toshiba TXx9 MIPS SoCs
+config SPI_UNIPHIER
+ tristate "Socionext UniPhier SPI Controller"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ help
+ This enables a driver for the Socionext UniPhier SoC SCSSI SPI controller.
+
+ UniPhier SoCs have SCSSI and MCSSI SPI controllers.
+ Every UniPhier SoC has SCSSI which supports single channel.
+ Older UniPhier Pro4/Pro5 also has MCSSI which support multiple channels.
+ This driver supports SCSSI only.
+
+ If your SoC supports SCSSI, say Y here.
+
config SPI_XCOMM
tristate "Analog Devices AD-FMCOMMS1-EBZ SPI-I2C-bridge driver"
depends on I2C
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index cb1f4378b87c..a90d55970036 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -101,6 +101,7 @@ spi-thunderx-objs := spi-cavium.o spi-cavium-thunderx.o
obj-$(CONFIG_SPI_THUNDERX) += spi-thunderx.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
+obj-$(CONFIG_SPI_UNIPHIER) += spi-uniphier.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
obj-$(CONFIG_SPI_XLP) += spi-xlp.o
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 0719bd484891..3f6b657394de 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -176,7 +176,7 @@ static void ath79_spi_cleanup(struct spi_device *spi)
}
static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
- u32 word, u8 bits)
+ u32 word, u8 bits, unsigned flags)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
u32 ioc = sp->ioc_base;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 3aa9e6e3dac8..f29176000b8d 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -49,22 +49,26 @@
struct spi_bitbang_cs {
unsigned nsecs; /* (clock cycle time)/2 */
u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs,
- u32 word, u8 bits);
+ u32 word, u8 bits, unsigned flags);
unsigned (*txrx_bufs)(struct spi_device *,
u32 (*txrx_word)(
struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits),
- unsigned, struct spi_transfer *);
+ u32 word, u8 bits,
+ unsigned flags),
+ unsigned, struct spi_transfer *,
+ unsigned);
};
static unsigned bitbang_txrx_8(
struct spi_device *spi,
u32 (*txrx_word)(struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits),
+ u32 word, u8 bits,
+ unsigned flags),
unsigned ns,
- struct spi_transfer *t
+ struct spi_transfer *t,
+ unsigned flags
) {
unsigned bits = t->bits_per_word;
unsigned count = t->len;
@@ -76,7 +80,7 @@ static unsigned bitbang_txrx_8(
if (tx)
word = *tx++;
- word = txrx_word(spi, ns, word, bits);
+ word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 1;
@@ -88,9 +92,11 @@ static unsigned bitbang_txrx_16(
struct spi_device *spi,
u32 (*txrx_word)(struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits),
+ u32 word, u8 bits,
+ unsigned flags),
unsigned ns,
- struct spi_transfer *t
+ struct spi_transfer *t,
+ unsigned flags
) {
unsigned bits = t->bits_per_word;
unsigned count = t->len;
@@ -102,7 +108,7 @@ static unsigned bitbang_txrx_16(
if (tx)
word = *tx++;
- word = txrx_word(spi, ns, word, bits);
+ word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 2;
@@ -114,9 +120,11 @@ static unsigned bitbang_txrx_32(
struct spi_device *spi,
u32 (*txrx_word)(struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits),
+ u32 word, u8 bits,
+ unsigned flags),
unsigned ns,
- struct spi_transfer *t
+ struct spi_transfer *t,
+ unsigned flags
) {
unsigned bits = t->bits_per_word;
unsigned count = t->len;
@@ -128,7 +136,7 @@ static unsigned bitbang_txrx_32(
if (tx)
word = *tx++;
- word = txrx_word(spi, ns, word, bits);
+ word = txrx_word(spi, ns, word, bits, flags);
if (rx)
*rx++ = word;
count -= 4;
@@ -235,8 +243,24 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct spi_bitbang_cs *cs = spi->controller_state;
unsigned nsecs = cs->nsecs;
+ struct spi_bitbang *bitbang;
+
+ bitbang = spi_master_get_devdata(spi->master);
+ if (bitbang->set_line_direction) {
+ int err;
- return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t);
+ err = bitbang->set_line_direction(spi, !!(t->tx_buf));
+ if (err < 0)
+ return err;
+ }
+
+ if (spi->mode & SPI_3WIRE) {
+ unsigned flags;
+
+ flags = t->tx_buf ? SPI_MASTER_NO_RX : SPI_MASTER_NO_TX;
+ return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags);
+ }
+ return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, 0);
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 22a31e4a1a11..1a3510215841 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -144,9 +144,9 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
static u32
butterfly_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word,
- u8 bits)
+ u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index f3dad6fcdc35..7c88f74f7f47 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -319,7 +319,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
*/
if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
CDNS_SPI_IXR_TXFULL)
- usleep_range(10, 20);
+ udelay(10);
if (xspi->txbuf)
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
@@ -739,7 +739,7 @@ static int __maybe_unused cnds_runtime_resume(struct device *dev)
ret = clk_prepare_enable(xspi->ref_clk);
if (ret) {
dev_err(dev, "Cannot enable device clock.\n");
- clk_disable(xspi->pclk);
+ clk_disable_unprepare(xspi->pclk);
return ret;
}
return 0;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 577084bb911b..a02099c90c5c 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -217,7 +217,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
pdata = &dspi->pdata;
/* program delay transfers if tx_delay is non zero */
- if (spicfg->wdelay)
+ if (spicfg && spicfg->wdelay)
spidat1 |= SPIDAT1_WDEL;
/*
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index d25cc4037e23..e80f60ed6fdf 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -15,11 +15,13 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/scatterlist.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/property.h>
+#include <linux/regmap.h>
#include "spi-dw.h"
@@ -28,10 +30,90 @@
struct dw_spi_mmio {
struct dw_spi dws;
struct clk *clk;
+ void *priv;
};
+#define MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
+#define OCELOT_IF_SI_OWNER_MASK GENMASK(5, 4)
+#define OCELOT_IF_SI_OWNER_OFFSET 4
+#define MSCC_IF_SI_OWNER_SISL 0
+#define MSCC_IF_SI_OWNER_SIBM 1
+#define MSCC_IF_SI_OWNER_SIMC 2
+
+#define MSCC_SPI_MST_SW_MODE 0x14
+#define MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE BIT(13)
+#define MSCC_SPI_MST_SW_MODE_SW_SPI_CS(x) (x << 5)
+
+struct dw_spi_mscc {
+ struct regmap *syscon;
+ void __iomem *spi_mst;
+};
+
+/*
+ * The Designware SPI controller (referred to as master in the documentation)
+ * automatically deasserts chip select when the tx fifo is empty. The chip
+ * selects then needs to be either driven as GPIOs or, for the first 4 using the
+ * the SPI boot controller registers. the final chip select is an OR gate
+ * between the Designware SPI controller and the SPI boot controller.
+ */
+static void dw_spi_mscc_set_cs(struct spi_device *spi, bool enable)
+{
+ struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
+ struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
+ u32 cs = spi->chip_select;
+
+ if (cs < 4) {
+ u32 sw_mode = MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE;
+
+ if (!enable)
+ sw_mode |= MSCC_SPI_MST_SW_MODE_SW_SPI_CS(BIT(cs));
+
+ writel(sw_mode, dwsmscc->spi_mst + MSCC_SPI_MST_SW_MODE);
+ }
+
+ dw_spi_set_cs(spi, enable);
+}
+
+static int dw_spi_mscc_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ struct dw_spi_mscc *dwsmscc;
+ struct resource *res;
+
+ dwsmscc = devm_kzalloc(&pdev->dev, sizeof(*dwsmscc), GFP_KERNEL);
+ if (!dwsmscc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ dwsmscc->spi_mst = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dwsmscc->spi_mst)) {
+ dev_err(&pdev->dev, "SPI_MST region map failed\n");
+ return PTR_ERR(dwsmscc->spi_mst);
+ }
+
+ dwsmscc->syscon = syscon_regmap_lookup_by_compatible("mscc,ocelot-cpu-syscon");
+ if (IS_ERR(dwsmscc->syscon))
+ return PTR_ERR(dwsmscc->syscon);
+
+ /* Deassert all CS */
+ writel(0, dwsmscc->spi_mst + MSCC_SPI_MST_SW_MODE);
+
+ /* Select the owner of the SI interface */
+ regmap_update_bits(dwsmscc->syscon, MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL,
+ OCELOT_IF_SI_OWNER_MASK,
+ MSCC_IF_SI_OWNER_SIMC << OCELOT_IF_SI_OWNER_OFFSET);
+
+ dwsmmio->dws.set_cs = dw_spi_mscc_set_cs;
+ dwsmmio->priv = dwsmscc;
+
+ return 0;
+}
+
static int dw_spi_mmio_probe(struct platform_device *pdev)
{
+ int (*init_func)(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio);
struct dw_spi_mmio *dwsmmio;
struct dw_spi *dws;
struct resource *mem;
@@ -99,6 +181,13 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
}
}
+ init_func = device_get_match_data(&pdev->dev);
+ if (init_func) {
+ ret = init_func(pdev, dwsmmio);
+ if (ret)
+ goto out;
+ }
+
ret = dw_spi_add_host(&pdev->dev, dws);
if (ret)
goto out;
@@ -123,6 +212,7 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "snps,dw-apb-ssi", },
+ { .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_init},
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index f693bfe95ab9..ac2eb89ef7a5 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -133,7 +133,7 @@ static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
}
#endif /* CONFIG_DEBUG_FS */
-static void dw_spi_set_cs(struct spi_device *spi, bool enable)
+void dw_spi_set_cs(struct spi_device *spi, bool enable)
{
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct chip_data *chip = spi_get_ctldata(spi);
@@ -145,6 +145,7 @@ static void dw_spi_set_cs(struct spi_device *spi, bool enable)
if (!enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
}
+EXPORT_SYMBOL_GPL(dw_spi_set_cs);
/* Return the max entries we can fill into tx fifo */
static inline u32 tx_max(struct dw_spi *dws)
@@ -485,6 +486,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+ spi_controller_set_devdata(master, dws);
+
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
master);
if (ret < 0) {
@@ -505,6 +508,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->dev.of_node = dev->of_node;
master->flags = SPI_MASTER_GPIO_SS;
+ if (dws->set_cs)
+ master->set_cs = dws->set_cs;
+
/* Basic HW init */
spi_hw_init(dev, dws);
@@ -518,7 +524,6 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
}
}
- spi_controller_set_devdata(master, dws);
ret = devm_spi_register_controller(dev, master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 2cde2473b3e9..0168b08364d5 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -112,6 +112,7 @@ struct dw_spi {
u32 reg_io_width; /* DR I/O width in bytes */
u16 bus_num;
u16 num_cs; /* supported slave numbers */
+ void (*set_cs)(struct spi_device *spi, bool enable);
/* Current message transfer state info */
size_t len;
@@ -244,6 +245,7 @@ struct dw_spi_chip {
void (*cs_control)(u32 command);
};
+extern void dw_spi_set_cs(struct spi_device *spi, bool enable);
extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 0630962ce442..7cb3ab0a35a0 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1,17 +1,9 @@
-/*
- * drivers/spi/spi-fsl-dspi.c
- *
- * Copyright 2013 Freescale Semiconductor, Inc.
- *
- * Freescale DSPI driver
- * This file contains a driver for the Freescale DSPI
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2013 Freescale Semiconductor, Inc.
+//
+// Freescale DSPI driver
+// This file contains a driver for the Freescale DSPI
#include <linux/clk.h>
#include <linux/delay.h>
@@ -38,10 +30,6 @@
#define DRIVER_NAME "fsl-dspi"
-#define TRAN_STATE_RX_VOID 0x01
-#define TRAN_STATE_TX_VOID 0x02
-#define TRAN_STATE_WORD_ODD_NUM 0x04
-
#define DSPI_FIFO_SIZE 4
#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
@@ -50,6 +38,7 @@
#define SPI_MCR_PCSIS (0x3F << 16)
#define SPI_MCR_CLR_TXF (1 << 11)
#define SPI_MCR_CLR_RXF (1 << 10)
+#define SPI_MCR_XSPI (1 << 3)
#define SPI_TCR 0x08
#define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
@@ -86,11 +75,16 @@
#define SPI_RSER_TCFQE 0x80000000
#define SPI_PUSHR 0x34
-#define SPI_PUSHR_CONT (1 << 31)
-#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28)
-#define SPI_PUSHR_EOQ (1 << 27)
-#define SPI_PUSHR_CTCNT (1 << 26)
-#define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
+#define SPI_PUSHR_CMD_CONT (1 << 15)
+#define SPI_PUSHR_CONT (SPI_PUSHR_CMD_CONT << 16)
+#define SPI_PUSHR_CMD_CTAS(x) (((x) & 0x0003) << 12)
+#define SPI_PUSHR_CTAS(x) (SPI_PUSHR_CMD_CTAS(x) << 16)
+#define SPI_PUSHR_CMD_EOQ (1 << 11)
+#define SPI_PUSHR_EOQ (SPI_PUSHR_CMD_EOQ << 16)
+#define SPI_PUSHR_CMD_CTCNT (1 << 10)
+#define SPI_PUSHR_CTCNT (SPI_PUSHR_CMD_CTCNT << 16)
+#define SPI_PUSHR_CMD_PCS(x) ((1 << x) & 0x003f)
+#define SPI_PUSHR_PCS(x) (SPI_PUSHR_CMD_PCS(x) << 16)
#define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
#define SPI_PUSHR_SLAVE 0x34
@@ -107,21 +101,31 @@
#define SPI_RXFR2 0x84
#define SPI_RXFR3 0x88
+#define SPI_CTARE(x) (0x11c + (((x) & 0x3) * 4))
+#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
+#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
+
+#define SPI_SREX 0x13c
+
#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
#define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
#define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
#define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
+#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
+#define SPI_FRAME_EBITS_MASK SPI_CTARE_FMSZE(1)
+
+/* Register offsets for regmap_pushr */
+#define PUSHR_CMD 0x0
+#define PUSHR_TX 0x2
+
#define SPI_CS_INIT 0x01
#define SPI_CS_ASSERT 0x02
#define SPI_CS_DROP 0x04
-#define SPI_TCR_TCNT_MAX 0x10000
-
#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
struct chip_data {
- u32 mcr_val;
u32 ctar_val;
u16 void_write_data;
};
@@ -135,6 +139,7 @@ enum dspi_trans_mode {
struct fsl_dspi_devtype_data {
enum dspi_trans_mode trans_mode;
u8 max_clock_factor;
+ bool xspi_mode;
};
static const struct fsl_dspi_devtype_data vf610_data = {
@@ -145,6 +150,7 @@ static const struct fsl_dspi_devtype_data vf610_data = {
static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
.trans_mode = DSPI_TCFQ_MODE,
.max_clock_factor = 8,
+ .xspi_mode = true,
};
static const struct fsl_dspi_devtype_data ls2085a_data = {
@@ -179,6 +185,7 @@ struct fsl_dspi {
struct platform_device *pdev;
struct regmap *regmap;
+ struct regmap *regmap_pushr;
int irq;
struct clk *clk;
@@ -186,32 +193,62 @@ struct fsl_dspi {
struct spi_message *cur_msg;
struct chip_data *cur_chip;
size_t len;
- void *tx;
- void *tx_end;
+ const void *tx;
void *rx;
void *rx_end;
- char dataflags;
- u8 cs;
u16 void_write_data;
- u32 cs_change;
+ u16 tx_cmd;
+ u8 bits_per_word;
+ u8 bytes_per_word;
const struct fsl_dspi_devtype_data *devtype_data;
wait_queue_head_t waitq;
u32 waitflags;
- u32 spi_tcnt;
struct fsl_dspi_dma *dma;
};
-static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
+static u32 dspi_pop_tx(struct fsl_dspi *dspi)
+{
+ u32 txdata = 0;
+
+ if (dspi->tx) {
+ if (dspi->bytes_per_word == 1)
+ txdata = *(u8 *)dspi->tx;
+ else if (dspi->bytes_per_word == 2)
+ txdata = *(u16 *)dspi->tx;
+ else /* dspi->bytes_per_word == 4 */
+ txdata = *(u32 *)dspi->tx;
+ dspi->tx += dspi->bytes_per_word;
+ }
+ dspi->len -= dspi->bytes_per_word;
+ return txdata;
+}
-static inline int is_double_byte_mode(struct fsl_dspi *dspi)
+static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
- unsigned int val;
+ u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
- regmap_read(dspi->regmap, SPI_CTAR(0), &val);
+ if (dspi->len > 0)
+ cmd |= SPI_PUSHR_CMD_CONT;
+ return cmd << 16 | data;
+}
- return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
+static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
+{
+ if (!dspi->rx)
+ return;
+
+ /* Mask of undefined bits */
+ rxdata &= (1 << dspi->bits_per_word) - 1;
+
+ if (dspi->bytes_per_word == 1)
+ *(u8 *)dspi->rx = rxdata;
+ else if (dspi->bytes_per_word == 2)
+ *(u16 *)dspi->rx = rxdata;
+ else /* dspi->bytes_per_word == 4 */
+ *(u32 *)dspi->rx = rxdata;
+ dspi->rx += dspi->bytes_per_word;
}
static void dspi_tx_dma_callback(void *arg)
@@ -226,19 +263,11 @@ static void dspi_rx_dma_callback(void *arg)
{
struct fsl_dspi *dspi = arg;
struct fsl_dspi_dma *dma = dspi->dma;
- int rx_word;
int i;
- u16 d;
-
- rx_word = is_double_byte_mode(dspi);
- if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
- for (i = 0; i < dma->curr_xfer_len; i++) {
- d = dspi->dma->rx_dma_buf[i];
- rx_word ? (*(u16 *)dspi->rx = d) :
- (*(u8 *)dspi->rx = d);
- dspi->rx += rx_word + 1;
- }
+ if (dspi->rx) {
+ for (i = 0; i < dma->curr_xfer_len; i++)
+ dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
}
complete(&dma->cmd_rx_complete);
@@ -249,16 +278,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
struct fsl_dspi_dma *dma = dspi->dma;
struct device *dev = &dspi->pdev->dev;
int time_left;
- int tx_word;
int i;
- tx_word = is_double_byte_mode(dspi);
-
- for (i = 0; i < dma->curr_xfer_len; i++) {
- dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
- if ((dspi->cs_change) && (!dspi->len))
- dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
- }
+ for (i = 0; i < dma->curr_xfer_len; i++)
+ dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
dma->tx_dma_phys,
@@ -327,18 +350,17 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
{
struct fsl_dspi_dma *dma = dspi->dma;
struct device *dev = &dspi->pdev->dev;
+ struct spi_message *message = dspi->cur_msg;
int curr_remaining_bytes;
int bytes_per_buffer;
- int word = 1;
int ret = 0;
- if (is_double_byte_mode(dspi))
- word = 2;
curr_remaining_bytes = dspi->len;
bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
while (curr_remaining_bytes) {
/* Check if current transfer fits the DMA buffer */
- dma->curr_xfer_len = curr_remaining_bytes / word;
+ dma->curr_xfer_len = curr_remaining_bytes
+ / dspi->bytes_per_word;
if (dma->curr_xfer_len > bytes_per_buffer)
dma->curr_xfer_len = bytes_per_buffer;
@@ -348,7 +370,10 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
goto exit;
} else {
- curr_remaining_bytes -= dma->curr_xfer_len * word;
+ const int len =
+ dma->curr_xfer_len * dspi->bytes_per_word;
+ curr_remaining_bytes -= len;
+ message->actual_length += len;
if (curr_remaining_bytes < 0)
curr_remaining_bytes = 0;
}
@@ -534,125 +559,91 @@ static void ns_delay_scale(char *psc, char *sc, int delay_ns,
}
}
-static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word)
+static void fifo_write(struct fsl_dspi *dspi)
{
- u16 d16;
-
- if (!(dspi->dataflags & TRAN_STATE_TX_VOID))
- d16 = tx_word ? *(u16 *)dspi->tx : *(u8 *)dspi->tx;
- else
- d16 = dspi->void_write_data;
-
- dspi->tx += tx_word + 1;
- dspi->len -= tx_word + 1;
-
- return SPI_PUSHR_TXDATA(d16) |
- SPI_PUSHR_PCS(dspi->cs) |
- SPI_PUSHR_CTAS(0) |
- SPI_PUSHR_CONT;
+ regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
}
-static void dspi_data_from_popr(struct fsl_dspi *dspi, int rx_word)
+static void cmd_fifo_write(struct fsl_dspi *dspi)
{
- u16 d;
- unsigned int val;
-
- regmap_read(dspi->regmap, SPI_POPR, &val);
- d = SPI_POPR_RXDATA(val);
+ u16 cmd = dspi->tx_cmd;
- if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
- rx_word ? (*(u16 *)dspi->rx = d) : (*(u8 *)dspi->rx = d);
-
- dspi->rx += rx_word + 1;
+ if (dspi->len > 0)
+ cmd |= SPI_PUSHR_CMD_CONT;
+ regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
}
-static int dspi_eoq_write(struct fsl_dspi *dspi)
+static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
{
- int tx_count = 0;
- int tx_word;
- u32 dspi_pushr = 0;
+ regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
+}
- tx_word = is_double_byte_mode(dspi);
+static void dspi_tcfq_write(struct fsl_dspi *dspi)
+{
+ /* Clear transfer count */
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
- while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) {
- /* If we are in word mode, only have a single byte to transfer
- * switch to byte mode temporarily. Will switch back at the
- * end of the transfer.
+ if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
+ /* Write two TX FIFO entries first, and then the corresponding
+ * CMD FIFO entry.
*/
- if (tx_word && (dspi->len == 1)) {
- dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- regmap_update_bits(dspi->regmap, SPI_CTAR(0),
- SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
- tx_word = 0;
- }
-
- dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
-
- if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
- /* last transfer in the transfer */
- dspi_pushr |= SPI_PUSHR_EOQ;
- if ((dspi->cs_change) && (!dspi->len))
- dspi_pushr &= ~SPI_PUSHR_CONT;
- } else if (tx_word && (dspi->len == 1))
- dspi_pushr |= SPI_PUSHR_EOQ;
+ u32 data = dspi_pop_tx(dspi);
- regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
-
- tx_count++;
+ if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE(1)) {
+ /* LSB */
+ tx_fifo_write(dspi, data & 0xFFFF);
+ tx_fifo_write(dspi, data >> 16);
+ } else {
+ /* MSB */
+ tx_fifo_write(dspi, data >> 16);
+ tx_fifo_write(dspi, data & 0xFFFF);
+ }
+ cmd_fifo_write(dspi);
+ } else {
+ /* Write one entry to both TX FIFO and CMD FIFO
+ * simultaneously.
+ */
+ fifo_write(dspi);
}
-
- return tx_count * (tx_word + 1);
}
-static int dspi_eoq_read(struct fsl_dspi *dspi)
+static u32 fifo_read(struct fsl_dspi *dspi)
{
- int rx_count = 0;
- int rx_word = is_double_byte_mode(dspi);
-
- while ((dspi->rx < dspi->rx_end)
- && (rx_count < DSPI_FIFO_SIZE)) {
- if (rx_word && (dspi->rx_end - dspi->rx) == 1)
- rx_word = 0;
+ u32 rxdata = 0;
- dspi_data_from_popr(dspi, rx_word);
- rx_count++;
- }
-
- return rx_count;
+ regmap_read(dspi->regmap, SPI_POPR, &rxdata);
+ return rxdata;
}
-static int dspi_tcfq_write(struct fsl_dspi *dspi)
+static void dspi_tcfq_read(struct fsl_dspi *dspi)
{
- int tx_word;
- u32 dspi_pushr = 0;
-
- tx_word = is_double_byte_mode(dspi);
+ dspi_push_rx(dspi, fifo_read(dspi));
+}
- if (tx_word && (dspi->len == 1)) {
- dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- regmap_update_bits(dspi->regmap, SPI_CTAR(0),
- SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
- tx_word = 0;
+static void dspi_eoq_write(struct fsl_dspi *dspi)
+{
+ int fifo_size = DSPI_FIFO_SIZE;
+
+ /* Fill TX FIFO with as many transfers as possible */
+ while (dspi->len && fifo_size--) {
+ /* Request EOQF for last transfer in FIFO */
+ if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
+ dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
+ /* Clear transfer count for first transfer in FIFO */
+ if (fifo_size == (DSPI_FIFO_SIZE - 1))
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
+ /* Write combined TX FIFO and CMD FIFO entry */
+ fifo_write(dspi);
}
-
- dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
-
- if ((dspi->cs_change) && (!dspi->len))
- dspi_pushr &= ~SPI_PUSHR_CONT;
-
- regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
-
- return tx_word + 1;
}
-static void dspi_tcfq_read(struct fsl_dspi *dspi)
+static void dspi_eoq_read(struct fsl_dspi *dspi)
{
- int rx_word = is_double_byte_mode(dspi);
-
- if (rx_word && (dspi->rx_end - dspi->rx) == 1)
- rx_word = 0;
+ int fifo_size = DSPI_FIFO_SIZE;
- dspi_data_from_popr(dspi, rx_word);
+ /* Read one FIFO entry at and push to rx buffer */
+ while ((dspi->rx < dspi->rx_end) && fifo_size--)
+ dspi_push_rx(dspi, fifo_read(dspi));
}
static int dspi_transfer_one_message(struct spi_master *master,
@@ -663,10 +654,6 @@ static int dspi_transfer_one_message(struct spi_master *master,
struct spi_transfer *transfer;
int status = 0;
enum dspi_trans_mode trans_mode;
- u32 spi_tcr;
-
- regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
- dspi->spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
message->actual_length = 0;
@@ -674,32 +661,51 @@ static int dspi_transfer_one_message(struct spi_master *master,
dspi->cur_transfer = transfer;
dspi->cur_msg = message;
dspi->cur_chip = spi_get_ctldata(spi);
- dspi->cs = spi->chip_select;
- dspi->cs_change = 0;
+ /* Prepare command word for CMD FIFO */
+ dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
+ SPI_PUSHR_CMD_PCS(spi->chip_select);
if (list_is_last(&dspi->cur_transfer->transfer_list,
- &dspi->cur_msg->transfers) || transfer->cs_change)
- dspi->cs_change = 1;
+ &dspi->cur_msg->transfers)) {
+ /* Leave PCS activated after last transfer when
+ * cs_change is set.
+ */
+ if (transfer->cs_change)
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
+ } else {
+ /* Keep PCS active between transfers in same message
+ * when cs_change is not set, and de-activate PCS
+ * between transfers in the same message when
+ * cs_change is set.
+ */
+ if (!transfer->cs_change)
+ dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
+ }
+
dspi->void_write_data = dspi->cur_chip->void_write_data;
- dspi->dataflags = 0;
- dspi->tx = (void *)transfer->tx_buf;
- dspi->tx_end = dspi->tx + transfer->len;
+ dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
dspi->rx_end = dspi->rx + transfer->len;
dspi->len = transfer->len;
+ /* Validated transfer specific frame size (defaults applied) */
+ dspi->bits_per_word = transfer->bits_per_word;
+ if (transfer->bits_per_word <= 8)
+ dspi->bytes_per_word = 1;
+ else if (transfer->bits_per_word <= 16)
+ dspi->bytes_per_word = 2;
+ else
+ dspi->bytes_per_word = 4;
- if (!dspi->rx)
- dspi->dataflags |= TRAN_STATE_RX_VOID;
-
- if (!dspi->tx)
- dspi->dataflags |= TRAN_STATE_TX_VOID;
-
- regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
regmap_update_bits(dspi->regmap, SPI_MCR,
- SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
- SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
regmap_write(dspi->regmap, SPI_CTAR(0),
- dspi->cur_chip->ctar_val);
+ dspi->cur_chip->ctar_val |
+ SPI_FRAME_BITS(transfer->bits_per_word));
+ if (dspi->devtype_data->xspi_mode)
+ regmap_write(dspi->regmap, SPI_CTARE(0),
+ SPI_FRAME_EBITS(transfer->bits_per_word)
+ | SPI_CTARE_DTCP(1));
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
@@ -750,16 +756,9 @@ static int dspi_setup(struct spi_device *spi)
struct fsl_dspi_platform_data *pdata;
u32 cs_sck_delay = 0, sck_cs_delay = 0;
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
- unsigned char pasc = 0, asc = 0, fmsz = 0;
+ unsigned char pasc = 0, asc = 0;
unsigned long clkrate;
- if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
- fmsz = spi->bits_per_word - 1;
- } else {
- pr_err("Invalid wordsize\n");
- return -ENODEV;
- }
-
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
@@ -781,9 +780,6 @@ static int dspi_setup(struct spi_device *spi)
sck_cs_delay = pdata->sck_cs_delay;
}
- chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
- SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
-
chip->void_write_data = 0;
clkrate = clk_get_rate(dspi->clk);
@@ -795,8 +791,7 @@ static int dspi_setup(struct spi_device *spi)
/* Set After SCK delay scale values */
ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
- chip->ctar_val = SPI_CTAR_FMSZ(fmsz)
- | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
+ chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
| SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
| SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
| SPI_CTAR_PCSSCK(pcssck)
@@ -827,36 +822,20 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
struct spi_message *msg = dspi->cur_msg;
enum dspi_trans_mode trans_mode;
u32 spi_sr, spi_tcr;
- u32 spi_tcnt, tcnt_diff;
- int tx_word;
+ u16 spi_tcnt;
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
- tx_word = is_double_byte_mode(dspi);
-
+ /* Get transfer counter (in number of SPI transfers). It was
+ * reset to 0 when transfer(s) were started.
+ */
regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
- /*
- * The width of SPI Transfer Counter in SPI_TCR is 16bits,
- * so the max couner is 65535. When the counter reach 65535,
- * it will wrap around, counter reset to zero.
- * spi_tcnt my be less than dspi->spi_tcnt, it means the
- * counter already wrapped around.
- * SPI Transfer Counter is a counter of transmitted frames.
- * The size of frame maybe two bytes.
- */
- tcnt_diff = ((spi_tcnt + SPI_TCR_TCNT_MAX) - dspi->spi_tcnt)
- % SPI_TCR_TCNT_MAX;
- tcnt_diff *= (tx_word + 1);
- if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
- tcnt_diff--;
-
- msg->actual_length += tcnt_diff;
-
- dspi->spi_tcnt = spi_tcnt;
+ /* Update total number of bytes that were transferred */
+ msg->actual_length += spi_tcnt * dspi->bytes_per_word;
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
@@ -873,14 +852,6 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
}
if (!dspi->len) {
- if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) {
- regmap_update_bits(dspi->regmap,
- SPI_CTAR(0),
- SPI_FRAME_BITS_MASK,
- SPI_FRAME_BITS(16));
- dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
- }
-
dspi->waitflags = 1;
wake_up_interruptible(&dspi->waitq);
} else {
@@ -943,16 +914,62 @@ static int dspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+static const struct regmap_range dspi_volatile_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_TCR),
+ regmap_reg_range(SPI_SR, SPI_SR),
+ regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
+};
+
+static const struct regmap_access_table dspi_volatile_table = {
+ .yes_ranges = dspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
+};
+
static const struct regmap_config dspi_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x88,
+ .volatile_table = &dspi_volatile_table,
+};
+
+static const struct regmap_range dspi_xspi_volatile_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_TCR),
+ regmap_reg_range(SPI_SR, SPI_SR),
+ regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
+ regmap_reg_range(SPI_SREX, SPI_SREX),
+};
+
+static const struct regmap_access_table dspi_xspi_volatile_table = {
+ .yes_ranges = dspi_xspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
+};
+
+static const struct regmap_config dspi_xspi_regmap_config[] = {
+ {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x13c,
+ .volatile_table = &dspi_xspi_volatile_table,
+ },
+ {
+ .name = "pushr",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = 0x2,
+ },
};
static void dspi_init(struct fsl_dspi *dspi)
{
+ regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
+ (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+ if (dspi->devtype_data->xspi_mode)
+ regmap_write(dspi->regmap, SPI_CTARE(0),
+ SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
}
static int dspi_probe(struct platform_device *pdev)
@@ -961,6 +978,7 @@ static int dspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct fsl_dspi *dspi;
struct resource *res;
+ const struct regmap_config *regmap_config;
void __iomem *base;
struct fsl_dspi_platform_data *pdata;
int ret = 0, cs_num, bus_num;
@@ -980,8 +998,6 @@ static int dspi_probe(struct platform_device *pdev)
master->cleanup = dspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
- master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
- SPI_BPW_MASK(16);
pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
@@ -1013,6 +1029,11 @@ static int dspi_probe(struct platform_device *pdev)
}
}
+ if (dspi->devtype_data->xspi_mode)
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ else
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
@@ -1020,8 +1041,11 @@ static int dspi_probe(struct platform_device *pdev)
goto out_master_put;
}
- dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
- &dspi_regmap_config);
+ if (dspi->devtype_data->xspi_mode)
+ regmap_config = &dspi_xspi_regmap_config[0];
+ else
+ regmap_config = &dspi_regmap_config;
+ dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
if (IS_ERR(dspi->regmap)) {
dev_err(&pdev->dev, "failed to init regmap: %ld\n",
PTR_ERR(dspi->regmap));
@@ -1029,30 +1053,43 @@ static int dspi_probe(struct platform_device *pdev)
goto out_master_put;
}
+ if (dspi->devtype_data->xspi_mode) {
+ dspi->regmap_pushr = devm_regmap_init_mmio(
+ &pdev->dev, base + SPI_PUSHR,
+ &dspi_xspi_regmap_config[1]);
+ if (IS_ERR(dspi->regmap_pushr)) {
+ dev_err(&pdev->dev,
+ "failed to init pushr regmap: %ld\n",
+ PTR_ERR(dspi->regmap_pushr));
+ ret = PTR_ERR(dspi->regmap_pushr);
+ goto out_master_put;
+ }
+ }
+
+ dspi->clk = devm_clk_get(&pdev->dev, "dspi");
+ if (IS_ERR(dspi->clk)) {
+ ret = PTR_ERR(dspi->clk);
+ dev_err(&pdev->dev, "unable to get clock\n");
+ goto out_master_put;
+ }
+ ret = clk_prepare_enable(dspi->clk);
+ if (ret)
+ goto out_master_put;
+
dspi_init(dspi);
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
ret = dspi->irq;
- goto out_master_put;
+ goto out_clk_put;
}
ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
- goto out_master_put;
- }
-
- dspi->clk = devm_clk_get(&pdev->dev, "dspi");
- if (IS_ERR(dspi->clk)) {
- ret = PTR_ERR(dspi->clk);
- dev_err(&pdev->dev, "unable to get clock\n");
- goto out_master_put;
+ goto out_clk_put;
}
- ret = clk_prepare_enable(dspi->clk);
- if (ret)
- goto out_master_put;
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 1d332e23f6ed..1e8ff6256079 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -547,8 +547,11 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
dev_err(espi->dev,
"Transfer done but SPIE_DON isn't set!\n");
- if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE)
+ if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE) {
dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
+ dev_err(espi->dev, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n",
+ SPIE_RXCNT(events), SPIE_TXCNT(events));
+ }
complete(&espi->done);
}
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 6ae92d4dca19..0626e6e3ea0c 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -121,7 +121,10 @@ static inline int getmiso(const struct spi_device *spi)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
- return !!gpiod_get_value_cansleep(spi_gpio->miso);
+ if (spi->mode & SPI_3WIRE)
+ return !!gpiod_get_value_cansleep(spi_gpio->mosi);
+ else
+ return !!gpiod_get_value_cansleep(spi_gpio->miso);
}
/*
@@ -149,27 +152,27 @@ static inline int getmiso(const struct spi_device *spi)
*/
static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
/*
@@ -183,30 +186,30 @@ static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
*/
static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- unsigned flags = spi->master->flags;
+ flags = spi->master->flags;
return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- unsigned flags = spi->master->flags;
+ flags = spi->master->flags;
return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- unsigned flags = spi->master->flags;
+ flags = spi->master->flags;
return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- unsigned flags = spi->master->flags;
+ flags = spi->master->flags;
return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
@@ -250,6 +253,16 @@ static int spi_gpio_setup(struct spi_device *spi)
return status;
}
+static int spi_gpio_set_direction(struct spi_device *spi, bool output)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+
+ if (output)
+ return gpiod_direction_output(spi_gpio->mosi, 1);
+ else
+ return gpiod_direction_input(spi_gpio->mosi);
+}
+
static void spi_gpio_cleanup(struct spi_device *spi)
{
spi_bitbang_cleanup(spi);
@@ -395,6 +408,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
return status;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL;
master->flags = master_flags;
master->bus_num = pdev->id;
/* The master needs to think there is a chipselect even if not connected */
@@ -407,6 +421,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
spi_gpio->bitbang.master = master;
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
+ spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction;
if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 7a37090dabbe..e6eb979f1b8a 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -419,6 +419,9 @@ static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
u32 val;
val = spfi_readl(spfi, SPFI_PORT_STATE);
+ val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK <<
+ SPFI_PORT_STATE_DEV_SEL_SHIFT);
+ val |= msg->spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT;
if (msg->spi->mode & SPI_CPHA)
val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
else
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d3b21faf6b1f..08dd3a31a3e5 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -94,8 +94,7 @@ struct spi_imx_data {
void *rx_buf;
const void *tx_buf;
unsigned int txfifo; /* number of words pushed in tx FIFO */
- unsigned int dynamic_burst, read_u32;
- unsigned int word_mask;
+ unsigned int dynamic_burst;
/* Slave mode */
bool slave_mode;
@@ -140,6 +139,8 @@ static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
*(type *)spi_imx->rx_buf = val; \
spi_imx->rx_buf += sizeof(type); \
} \
+ \
+ spi_imx->remainder -= sizeof(type); \
}
#define MXC_SPI_BUF_TX(type) \
@@ -203,7 +204,12 @@ out:
static int spi_imx_bytes_per_word(const int bits_per_word)
{
- return DIV_ROUND_UP(bits_per_word, BITS_PER_BYTE);
+ if (bits_per_word <= 8)
+ return 1;
+ else if (bits_per_word <= 16)
+ return 2;
+ else
+ return 4;
}
static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
@@ -220,17 +226,11 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
- if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4)
- return false;
-
for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
if (!(transfer->len % (i * bytes_per_word)))
break;
}
- if (i == 0)
- return false;
-
spi_imx->wml = i;
spi_imx->dynamic_burst = 0;
@@ -291,26 +291,39 @@ static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
else if (bytes_per_word == 2)
val = (val << 16) | (val >> 16);
#endif
- val &= spi_imx->word_mask;
*(u32 *)spi_imx->rx_buf = val;
spi_imx->rx_buf += sizeof(u32);
}
+
+ spi_imx->remainder -= sizeof(u32);
}
static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
{
- unsigned int bytes_per_word;
+ int unaligned;
+ u32 val;
- bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
- if (spi_imx->read_u32) {
+ unaligned = spi_imx->remainder % 4;
+
+ if (!unaligned) {
spi_imx_buf_rx_swap_u32(spi_imx);
return;
}
- if (bytes_per_word == 1)
- spi_imx_buf_rx_u8(spi_imx);
- else if (bytes_per_word == 2)
+ if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
spi_imx_buf_rx_u16(spi_imx);
+ return;
+ }
+
+ val = readl(spi_imx->base + MXC_CSPIRXDATA);
+
+ while (unaligned--) {
+ if (spi_imx->rx_buf) {
+ *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
+ spi_imx->rx_buf++;
+ }
+ spi_imx->remainder--;
+ }
}
static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
@@ -322,7 +335,6 @@ static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
if (spi_imx->tx_buf) {
val = *(u32 *)spi_imx->tx_buf;
- val &= spi_imx->word_mask;
spi_imx->tx_buf += sizeof(u32);
}
@@ -340,40 +352,30 @@ static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
{
- u32 ctrl, val;
- unsigned int bytes_per_word;
+ int unaligned;
+ u32 val = 0;
- if (spi_imx->count == spi_imx->remainder) {
- ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
- ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
- if (spi_imx->count > MX51_ECSPI_CTRL_MAX_BURST) {
- spi_imx->remainder = spi_imx->count %
- MX51_ECSPI_CTRL_MAX_BURST;
- val = MX51_ECSPI_CTRL_MAX_BURST * 8 - 1;
- } else if (spi_imx->count >= sizeof(u32)) {
- spi_imx->remainder = spi_imx->count % sizeof(u32);
- val = (spi_imx->count - spi_imx->remainder) * 8 - 1;
- } else {
- spi_imx->remainder = 0;
- val = spi_imx->bits_per_word - 1;
- spi_imx->read_u32 = 0;
- }
+ unaligned = spi_imx->count % 4;
- ctrl |= (val << MX51_ECSPI_CTRL_BL_OFFSET);
- writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+ if (!unaligned) {
+ spi_imx_buf_tx_swap_u32(spi_imx);
+ return;
}
- if (spi_imx->count >= sizeof(u32)) {
- spi_imx_buf_tx_swap_u32(spi_imx);
+ if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
+ spi_imx_buf_tx_u16(spi_imx);
return;
}
- bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
+ while (unaligned--) {
+ if (spi_imx->tx_buf) {
+ val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
+ spi_imx->tx_buf++;
+ }
+ spi_imx->count--;
+ }
- if (bytes_per_word == 1)
- spi_imx_buf_tx_u8(spi_imx);
- else if (bytes_per_word == 2)
- spi_imx_buf_tx_u16(spi_imx);
+ writel(val, spi_imx->base + MXC_CSPITXDATA);
}
static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
@@ -392,6 +394,8 @@ static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
spi_imx->rx_buf += n_bytes;
spi_imx->slave_burst -= n_bytes;
}
+
+ spi_imx->remainder -= sizeof(u32);
}
static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
@@ -1001,12 +1005,52 @@ static void spi_imx_chipselect(struct spi_device *spi, int is_active)
gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active);
}
+static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
+{
+ u32 ctrl;
+
+ ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
+ ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
static void spi_imx_push(struct spi_imx_data *spi_imx)
{
+ unsigned int burst_len, fifo_words;
+
+ if (spi_imx->dynamic_burst)
+ fifo_words = 4;
+ else
+ fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
+ /*
+ * Reload the FIFO when the remaining bytes to be transferred in the
+ * current burst is 0. This only applies when bits_per_word is a
+ * multiple of 8.
+ */
+ if (!spi_imx->remainder) {
+ if (spi_imx->dynamic_burst) {
+
+ /* We need to deal unaligned data first */
+ burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
+
+ if (!burst_len)
+ burst_len = MX51_ECSPI_CTRL_MAX_BURST;
+
+ spi_imx_set_burst_len(spi_imx, burst_len * 8);
+
+ spi_imx->remainder = burst_len;
+ } else {
+ spi_imx->remainder = fifo_words;
+ }
+ }
+
while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
if (!spi_imx->count)
break;
- if (spi_imx->txfifo && (spi_imx->count == spi_imx->remainder))
+ if (spi_imx->dynamic_burst &&
+ spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
+ fifo_words))
break;
spi_imx->tx(spi_imx);
spi_imx->txfifo++;
@@ -1102,27 +1146,20 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->bits_per_word = t->bits_per_word;
spi_imx->speed_hz = t->speed_hz;
- /* Initialize the functions for transfer */
- if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode) {
- u32 mask;
-
- spi_imx->dynamic_burst = 0;
- spi_imx->remainder = 0;
- spi_imx->read_u32 = 1;
+ /*
+ * Initialize the functions for transfer. To transfer non byte-aligned
+ * words, we have to use multiple word-size bursts, we can't use
+ * dynamic_burst in that case.
+ */
+ if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
+ (spi_imx->bits_per_word == 8 ||
+ spi_imx->bits_per_word == 16 ||
+ spi_imx->bits_per_word == 32)) {
- mask = (1 << spi_imx->bits_per_word) - 1;
spi_imx->rx = spi_imx_buf_rx_swap;
spi_imx->tx = spi_imx_buf_tx_swap;
spi_imx->dynamic_burst = 1;
- spi_imx->remainder = t->len;
-
- if (spi_imx->bits_per_word <= 8)
- spi_imx->word_mask = mask << 24 | mask << 16
- | mask << 8 | mask;
- else if (spi_imx->bits_per_word <= 16)
- spi_imx->word_mask = mask << 16 | mask;
- else
- spi_imx->word_mask = mask;
+
} else {
if (spi_imx->bits_per_word <= 8) {
spi_imx->rx = spi_imx_buf_rx_u8;
@@ -1134,6 +1171,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->rx = spi_imx_buf_rx_u32;
spi_imx->tx = spi_imx_buf_tx_u32;
}
+ spi_imx->dynamic_burst = 0;
}
if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
@@ -1317,6 +1355,7 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
spi_imx->rx_buf = transfer->rx_buf;
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
+ spi_imx->remainder = 0;
reinit_completion(&spi_imx->xfer_done);
@@ -1354,6 +1393,7 @@ static int spi_imx_pio_transfer_slave(struct spi_device *spi,
spi_imx->rx_buf = transfer->rx_buf;
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
+ spi_imx->remainder = 0;
reinit_completion(&spi_imx->xfer_done);
spi_imx->slave_aborted = false;
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 61ee0f4269ae..4549efd792da 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -188,9 +188,10 @@ static void lm70_chipselect(struct spi_device *spi, int value)
/*
* Our actual bitbanger routine.
*/
-static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
+static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static void spi_lm70llp_attach(struct parport *p)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 990770dfa5cf..e43842c7a31a 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -311,6 +311,24 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
EXPORT_SYMBOL_GPL(spi_mem_exec_op);
/**
+ * spi_mem_get_name() - Return the SPI mem device name to be used by the
+ * upper layer if necessary
+ * @mem: the SPI memory
+ *
+ * This function allows SPI mem users to retrieve the SPI mem device name.
+ * It is useful if the upper layer needs to expose a custom name for
+ * compatibility reasons.
+ *
+ * Return: a string containing the name of the memory device to be used
+ * by the SPI mem user
+ */
+const char *spi_mem_get_name(struct spi_mem *mem)
+{
+ return mem->name;
+}
+EXPORT_SYMBOL_GPL(spi_mem_get_name);
+
+/**
* spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
* match controller limitations
* @mem: the SPI memory
@@ -344,6 +362,7 @@ static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
static int spi_mem_probe(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+ struct spi_controller *ctlr = spi->controller;
struct spi_mem *mem;
mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
@@ -351,6 +370,15 @@ static int spi_mem_probe(struct spi_device *spi)
return -ENOMEM;
mem->spi = spi;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->get_name)
+ mem->name = ctlr->mem_ops->get_name(mem);
+ else
+ mem->name = dev_name(&spi->dev);
+
+ if (IS_ERR_OR_NULL(mem->name))
+ return PTR_ERR(mem->name);
+
spi_set_drvdata(spi, mem);
return memdrv->probe(mem);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 6c628a54e946..508c61c669e7 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -398,11 +398,9 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
- unsigned int count;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- count = xfer->len;
if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx;
@@ -582,7 +580,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi_dma *mcspi_dma;
unsigned int count;
- u32 l;
u8 *rx;
const u8 *tx;
struct dma_slave_config cfg;
@@ -595,8 +592,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- l = mcspi_cached_chconf0(spi);
-
if (cs->word_len <= 8) {
width = DMA_SLAVE_BUSWIDTH_1_BYTE;
@@ -676,7 +671,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
static unsigned
omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct omap2_mcspi *mcspi;
struct omap2_mcspi_cs *cs = spi->controller_state;
unsigned int count, c;
u32 l;
@@ -686,7 +680,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
void __iomem *chstat_reg;
int word_len;
- mcspi = spi_master_get_devdata(spi->master);
count = xfer->len;
c = count;
word_len = cs->word_len;
@@ -883,13 +876,11 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
{
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
- struct spi_master *spi_cntrl;
u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
u8 word_len = spi->bits_per_word;
u32 speed_hz = spi->max_speed_hz;
mcspi = spi_master_get_devdata(spi->master);
- spi_cntrl = mcspi->master;
if (t != NULL && t->bits_per_word)
word_len = t->bits_per_word;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index d01a6adc726e..47ef6b1a2e76 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/clk.h>
#include <linux/sizes.h>
#include <linux/gpio.h>
@@ -681,9 +682,9 @@ static int orion_spi_probe(struct platform_device *pdev)
goto out_rel_axi_clk;
}
- /* Scan all SPI devices of this controller for direct mapped devices */
for_each_available_child_of_node(pdev->dev.of_node, np) {
u32 cs;
+ int cs_gpio;
/* Get chip-select number from the "reg" property */
status = of_property_read_u32(np, "reg", &cs);
@@ -695,6 +696,44 @@ static int orion_spi_probe(struct platform_device *pdev)
}
/*
+ * Initialize the CS GPIO:
+ * - properly request the actual GPIO signal
+ * - de-assert the logical signal so that all GPIO CS lines
+ * are inactive when probing for slaves
+ * - find an unused physical CS which will be driven for any
+ * slave which uses a CS GPIO
+ */
+ cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", cs);
+ if (cs_gpio > 0) {
+ char *gpio_name;
+ int cs_flags;
+
+ if (spi->unused_hw_gpio == -1) {
+ dev_info(&pdev->dev,
+ "Selected unused HW CS#%d for any GPIO CSes\n",
+ cs);
+ spi->unused_hw_gpio = cs;
+ }
+
+ gpio_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%s-CS%d", dev_name(&pdev->dev), cs);
+ if (!gpio_name) {
+ status = -ENOMEM;
+ goto out_rel_axi_clk;
+ }
+
+ cs_flags = of_property_read_bool(np, "spi-cs-high") ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
+ status = devm_gpio_request_one(&pdev->dev, cs_gpio,
+ cs_flags, gpio_name);
+ if (status) {
+ dev_err(&pdev->dev,
+ "Can't request GPIO for CS %d\n", cs);
+ goto out_rel_axi_clk;
+ }
+ }
+
+ /*
* Check if an address is configured for this SPI device. If
* not, the MBus mapping via the 'ranges' property in the 'soc'
* node is not configured and this device should not use the
@@ -740,44 +779,8 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status < 0)
goto out_rel_pm;
- if (master->cs_gpios) {
- int i;
- for (i = 0; i < master->num_chipselect; ++i) {
- char *gpio_name;
-
- if (!gpio_is_valid(master->cs_gpios[i])) {
- continue;
- }
-
- gpio_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "%s-CS%d", dev_name(&pdev->dev), i);
- if (!gpio_name) {
- status = -ENOMEM;
- goto out_rel_master;
- }
-
- status = devm_gpio_request(&pdev->dev,
- master->cs_gpios[i], gpio_name);
- if (status) {
- dev_err(&pdev->dev,
- "Can't request GPIO for CS %d\n",
- master->cs_gpios[i]);
- goto out_rel_master;
- }
- if (spi->unused_hw_gpio == -1) {
- dev_info(&pdev->dev,
- "Selected unused HW CS#%d for any GPIO CSes\n",
- i);
- spi->unused_hw_gpio = i;
- }
- }
- }
-
-
return status;
-out_rel_master:
- spi_unregister_master(master);
out_rel_pm:
pm_runtime_disable(&pdev->dev);
out_rel_axi_clk:
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 0b2d60d30f69..14f4ea59caff 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1391,6 +1391,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
+ /* ICL-LP */
+ { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0e74cbf9929d..539d6d1a277a 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -49,6 +49,7 @@ struct sh_msiof_spi_priv {
struct platform_device *pdev;
struct sh_msiof_spi_info *info;
struct completion done;
+ struct completion done_txdma;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
unsigned int min_div_pow;
@@ -649,19 +650,21 @@ static int sh_msiof_slave_abort(struct spi_master *master)
p->slave_aborted = true;
complete(&p->done);
+ complete(&p->done_txdma);
return 0;
}
-static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p)
+static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
+ struct completion *x)
{
if (spi_controller_is_slave(p->master)) {
- if (wait_for_completion_interruptible(&p->done) ||
+ if (wait_for_completion_interruptible(x) ||
p->slave_aborted) {
dev_dbg(&p->pdev->dev, "interrupted\n");
return -EINTR;
}
} else {
- if (!wait_for_completion_timeout(&p->done, HZ)) {
+ if (!wait_for_completion_timeout(x, HZ)) {
dev_err(&p->pdev->dev, "timeout\n");
return -ETIMEDOUT;
}
@@ -711,7 +714,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
}
/* wait for tx fifo to be emptied / rx fifo to be filled */
- ret = sh_msiof_wait_for_completion(p);
+ ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
@@ -740,10 +743,7 @@ stop_ier:
static void sh_msiof_dma_complete(void *arg)
{
- struct sh_msiof_spi_priv *p = arg;
-
- sh_msiof_write(p, IER, 0);
- complete(&p->done);
+ complete(arg);
}
static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
@@ -764,7 +764,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
return -EAGAIN;
desc_rx->callback = sh_msiof_dma_complete;
- desc_rx->callback_param = p;
+ desc_rx->callback_param = &p->done;
cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie))
return cookie;
@@ -782,13 +782,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
goto no_dma_tx;
}
- if (rx) {
- /* No callback */
- desc_tx->callback = NULL;
- } else {
- desc_tx->callback = sh_msiof_dma_complete;
- desc_tx->callback_param = p;
- }
+ desc_tx->callback = sh_msiof_dma_complete;
+ desc_tx->callback_param = &p->done_txdma;
cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie)) {
ret = cookie;
@@ -805,6 +800,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
sh_msiof_write(p, IER, ier_bits);
reinit_completion(&p->done);
+ if (tx)
+ reinit_completion(&p->done_txdma);
p->slave_aborted = false;
/* Now start DMA */
@@ -819,17 +816,24 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
goto stop_dma;
}
- /* wait for tx/rx DMA completion */
- ret = sh_msiof_wait_for_completion(p);
- if (ret)
- goto stop_reset;
+ if (tx) {
+ /* wait for tx DMA completion */
+ ret = sh_msiof_wait_for_completion(p, &p->done_txdma);
+ if (ret)
+ goto stop_reset;
+ }
- if (!rx) {
- reinit_completion(&p->done);
- sh_msiof_write(p, IER, IER_TEOFE);
+ if (rx) {
+ /* wait for rx DMA completion */
+ ret = sh_msiof_wait_for_completion(p, &p->done);
+ if (ret)
+ goto stop_reset;
+ sh_msiof_write(p, IER, 0);
+ } else {
/* wait for tx fifo to be emptied */
- ret = sh_msiof_wait_for_completion(p);
+ sh_msiof_write(p, IER, IER_TEOFE);
+ ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
}
@@ -1327,6 +1331,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p->min_div_pow = chipdata->min_div_pow;
init_completion(&p->done);
+ init_completion(&p->done_txdma);
p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index a9beeeed812c..393701cfca3c 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -80,27 +80,31 @@ static inline u32 getmiso(struct spi_device *dev)
#include "spi-bitbang-txrx.h"
static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
}
static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
}
static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
}
static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
+ unsigned nsecs, u32 word, u8 bits,
+ unsigned flags)
{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
}
static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
new file mode 100644
index 000000000000..5a6137fe172d
--- /dev/null
+++ b/drivers/spi/spi-uniphier.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0
+// spi-uniphier.c - Socionext UniPhier SPI controller driver
+// Copyright 2012 Panasonic Corporation
+// Copyright 2016-2018 Socionext Inc.
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+#define SSI_TIMEOUT_MS 2000
+#define SSI_MAX_CLK_DIVIDER 254
+#define SSI_MIN_CLK_DIVIDER 4
+
+struct uniphier_spi_priv {
+ void __iomem *base;
+ struct clk *clk;
+ struct spi_master *master;
+ struct completion xfer_done;
+
+ int error;
+ unsigned int tx_bytes;
+ unsigned int rx_bytes;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+
+ bool is_save_param;
+ u8 bits_per_word;
+ u16 mode;
+ u32 speed_hz;
+};
+
+#define SSI_CTL 0x00
+#define SSI_CTL_EN BIT(0)
+
+#define SSI_CKS 0x04
+#define SSI_CKS_CKRAT_MASK GENMASK(7, 0)
+#define SSI_CKS_CKPHS BIT(14)
+#define SSI_CKS_CKINIT BIT(13)
+#define SSI_CKS_CKDLY BIT(12)
+
+#define SSI_TXWDS 0x08
+#define SSI_TXWDS_WDLEN_MASK GENMASK(13, 8)
+#define SSI_TXWDS_TDTF_MASK GENMASK(7, 6)
+#define SSI_TXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_RXWDS 0x0c
+#define SSI_RXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_FPS 0x10
+#define SSI_FPS_FSPOL BIT(15)
+#define SSI_FPS_FSTRT BIT(14)
+
+#define SSI_SR 0x14
+#define SSI_SR_RNE BIT(0)
+
+#define SSI_IE 0x18
+#define SSI_IE_RCIE BIT(3)
+#define SSI_IE_RORIE BIT(0)
+
+#define SSI_IS 0x1c
+#define SSI_IS_RXRS BIT(9)
+#define SSI_IS_RCID BIT(3)
+#define SSI_IS_RORID BIT(0)
+
+#define SSI_IC 0x1c
+#define SSI_IC_TCIC BIT(4)
+#define SSI_IC_RCIC BIT(3)
+#define SSI_IC_RORIC BIT(0)
+
+#define SSI_FC 0x20
+#define SSI_FC_TXFFL BIT(12)
+#define SSI_FC_TXFTH_MASK GENMASK(11, 8)
+#define SSI_FC_RXFFL BIT(4)
+#define SSI_FC_RXFTH_MASK GENMASK(3, 0)
+
+#define SSI_TXDR 0x24
+#define SSI_RXDR 0x24
+
+#define SSI_FIFO_DEPTH 8U
+
+static inline unsigned int bytes_per_word(unsigned int bits)
+{
+ return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
+}
+
+static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_IE);
+ val |= mask;
+ writel(val, priv->base + SSI_IE);
+}
+
+static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_IE);
+ val &= ~mask;
+ writel(val, priv->base + SSI_IE);
+}
+
+static void uniphier_spi_set_mode(struct spi_device *spi)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val1, val2;
+
+ /*
+ * clock setting
+ * CKPHS capture timing. 0:rising edge, 1:falling edge
+ * CKINIT clock initial level. 0:low, 1:high
+ * CKDLY clock delay. 0:no delay, 1:delay depending on FSTRT
+ * (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
+ *
+ * frame setting
+ * FSPOL frame signal porarity. 0: low, 1: high
+ * FSTRT start frame timing
+ * 0: rising edge of clock, 1: falling edge of clock
+ */
+ switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ case SPI_MODE_0:
+ /* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
+ val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
+ val2 = 0;
+ break;
+ case SPI_MODE_1:
+ /* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
+ val1 = 0;
+ val2 = SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_2:
+ /* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
+ val1 = SSI_CKS_CKINIT | SSI_CKS_CKDLY;
+ val2 = SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_3:
+ /* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
+ val1 = SSI_CKS_CKPHS | SSI_CKS_CKINIT;
+ val2 = 0;
+ break;
+ }
+
+ if (!(spi->mode & SPI_CS_HIGH))
+ val2 |= SSI_FPS_FSPOL;
+
+ writel(val1, priv->base + SSI_CKS);
+ writel(val2, priv->base + SSI_FPS);
+
+ val1 = 0;
+ if (spi->mode & SPI_LSB_FIRST)
+ val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
+ writel(val1, priv->base + SSI_TXWDS);
+ writel(val1, priv->base + SSI_RXWDS);
+}
+
+static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_TXWDS);
+ val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
+ val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
+ val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_TXWDS);
+
+ val = readl(priv->base + SSI_RXWDS);
+ val &= ~SSI_RXWDS_DTLEN_MASK;
+ val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_RXWDS);
+}
+
+static void uniphier_spi_set_baudrate(struct spi_device *spi,
+ unsigned int speed)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val, ckdiv;
+
+ /*
+ * the supported rates are even numbers from 4 to 254. (4,6,8...254)
+ * round up as we look for equal or less speed
+ */
+ ckdiv = DIV_ROUND_UP(clk_get_rate(priv->clk), speed);
+ ckdiv = round_up(ckdiv, 2);
+
+ val = readl(priv->base + SSI_CKS);
+ val &= ~SSI_CKS_CKRAT_MASK;
+ val |= ckdiv & SSI_CKS_CKRAT_MASK;
+ writel(val, priv->base + SSI_CKS);
+}
+
+static void uniphier_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ priv->error = 0;
+ priv->tx_buf = t->tx_buf;
+ priv->rx_buf = t->rx_buf;
+ priv->tx_bytes = priv->rx_bytes = t->len;
+
+ if (!priv->is_save_param || priv->mode != spi->mode) {
+ uniphier_spi_set_mode(spi);
+ priv->mode = spi->mode;
+ }
+
+ if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
+ uniphier_spi_set_transfer_size(spi, t->bits_per_word);
+ priv->bits_per_word = t->bits_per_word;
+ }
+
+ if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
+ uniphier_spi_set_baudrate(spi, t->speed_hz);
+ priv->speed_hz = t->speed_hz;
+ }
+
+ if (!priv->is_save_param)
+ priv->is_save_param = true;
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+}
+
+static void uniphier_spi_send(struct uniphier_spi_priv *priv)
+{
+ int wsize;
+ u32 val = 0;
+
+ wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
+ priv->tx_bytes -= wsize;
+
+ if (priv->tx_buf) {
+ switch (wsize) {
+ case 1:
+ val = *priv->tx_buf;
+ break;
+ case 2:
+ val = get_unaligned_le16(priv->tx_buf);
+ break;
+ case 4:
+ val = get_unaligned_le32(priv->tx_buf);
+ break;
+ }
+
+ priv->tx_buf += wsize;
+ }
+
+ writel(val, priv->base + SSI_TXDR);
+}
+
+static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
+{
+ int rsize;
+ u32 val;
+
+ rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
+ priv->rx_bytes -= rsize;
+
+ val = readl(priv->base + SSI_RXDR);
+
+ if (priv->rx_buf) {
+ switch (rsize) {
+ case 1:
+ *priv->rx_buf = val;
+ break;
+ case 2:
+ put_unaligned_le16(val, priv->rx_buf);
+ break;
+ case 4:
+ put_unaligned_le32(val, priv->rx_buf);
+ break;
+ }
+
+ priv->rx_buf += rsize;
+ }
+}
+
+static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+{
+ unsigned int tx_count;
+ u32 val;
+
+ tx_count = DIV_ROUND_UP(priv->tx_bytes,
+ bytes_per_word(priv->bits_per_word));
+ tx_count = min(tx_count, SSI_FIFO_DEPTH);
+
+ /* set fifo threshold */
+ val = readl(priv->base + SSI_FC);
+ val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, tx_count);
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, tx_count);
+ writel(val, priv->base + SSI_FC);
+
+ while (tx_count--)
+ uniphier_spi_send(priv);
+}
+
+static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(priv->base + SSI_FPS);
+
+ if (enable)
+ val |= SSI_FPS_FSPOL;
+ else
+ val &= ~SSI_FPS_FSPOL;
+
+ writel(val, priv->base + SSI_FPS);
+}
+
+static int uniphier_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int status;
+
+ uniphier_spi_setup_transfer(spi, t);
+
+ reinit_completion(&priv->xfer_done);
+
+ uniphier_spi_fill_tx_fifo(priv);
+
+ uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+
+ status = wait_for_completion_timeout(&priv->xfer_done,
+ msecs_to_jiffies(SSI_TIMEOUT_MS));
+
+ uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+
+ if (status < 0)
+ return status;
+
+ return priv->error;
+}
+
+static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+
+ writel(SSI_CTL_EN, priv->base + SSI_CTL);
+
+ return 0;
+}
+
+static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+
+ writel(0, priv->base + SSI_CTL);
+
+ return 0;
+}
+
+static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
+{
+ struct uniphier_spi_priv *priv = dev_id;
+ u32 val, stat;
+
+ stat = readl(priv->base + SSI_IS);
+ val = SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC;
+ writel(val, priv->base + SSI_IC);
+
+ /* rx fifo overrun */
+ if (stat & SSI_IS_RORID) {
+ priv->error = -EIO;
+ goto done;
+ }
+
+ /* rx complete */
+ if ((stat & SSI_IS_RCID) && (stat & SSI_IS_RXRS)) {
+ while ((readl(priv->base + SSI_SR) & SSI_SR_RNE) &&
+ (priv->rx_bytes - priv->tx_bytes) > 0)
+ uniphier_spi_recv(priv);
+
+ if ((readl(priv->base + SSI_SR) & SSI_SR_RNE) ||
+ (priv->rx_bytes != priv->tx_bytes)) {
+ priv->error = -EIO;
+ goto done;
+ } else if (priv->rx_bytes == 0)
+ goto done;
+
+ /* next tx transfer */
+ uniphier_spi_fill_tx_fifo(priv);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+
+done:
+ complete(&priv->xfer_done);
+ return IRQ_HANDLED;
+}
+
+static int uniphier_spi_probe(struct platform_device *pdev)
+{
+ struct uniphier_spi_priv *priv;
+ struct spi_master *master;
+ struct resource *res;
+ unsigned long clk_rate;
+ int irq;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*priv));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ priv = spi_master_get_devdata(master);
+ priv->master = master;
+ priv->is_save_param = false;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_master_put;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(priv->clk);
+ goto out_master_put;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_master_put;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ ret = irq;
+ goto out_disable_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, uniphier_spi_handler,
+ 0, "uniphier-spi", priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto out_disable_clk;
+ }
+
+ init_completion(&priv->xfer_done);
+
+ clk_rate = clk_get_rate(priv->clk);
+
+ master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
+ master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+
+ master->set_cs = uniphier_spi_set_cs;
+ master->transfer_one = uniphier_spi_transfer_one;
+ master->prepare_transfer_hardware
+ = uniphier_spi_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware
+ = uniphier_spi_unprepare_transfer_hardware;
+ master->num_chipselect = 1;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ goto out_disable_clk;
+
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+out_master_put:
+ spi_master_put(master);
+ return ret;
+}
+
+static int uniphier_spi_remove(struct platform_device *pdev)
+{
+ struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id uniphier_spi_match[] = {
+ { .compatible = "socionext,uniphier-scssi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_spi_match);
+
+static struct platform_driver uniphier_spi_driver = {
+ .probe = uniphier_spi_probe,
+ .remove = uniphier_spi_remove,
+ .driver = {
+ .name = "uniphier-spi",
+ .of_match_table = uniphier_spi_match,
+ },
+};
+module_platform_driver(uniphier_spi_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
+MODULE_DESCRIPTION("Socionext UniPhier SPI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index be6155cba9de..8ce04f829a80 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -54,7 +54,7 @@ static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
}
static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
- u32 v, u8 bits)
+ u32 v, u8 bits, unsigned flags)
{
struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index c574dd210500..df30e1323252 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -89,27 +89,6 @@ config SSB_HOST_SOC
If unsure, say N
-config SSB_SILENT
- bool "No SSB kernel messages"
- depends on SSB && EXPERT
- help
- This option turns off all Sonics Silicon Backplane printks.
- Note that you won't be able to identify problems, once
- messages are turned off.
- This might only be desired for production kernels on
- embedded devices to reduce the kernel size.
-
- Say N
-
-config SSB_DEBUG
- bool "SSB debugging"
- depends on SSB && !SSB_SILENT
- help
- This turns on additional runtime checks and debugging
- messages. Turn this on for SSB troubleshooting.
-
- If unsure, say N
-
config SSB_SERIAL
bool
depends on SSB
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index bed2fedeb057..9c7316b5685f 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -10,12 +10,12 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/ssb/ssb.h>
-#include "ssb_private.h"
-
static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) },
diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
index d70568ea02d5..f51f150307df 100644
--- a/drivers/ssb/bridge_pcmcia_80211.c
+++ b/drivers/ssb/bridge_pcmcia_80211.c
@@ -6,6 +6,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -15,8 +17,6 @@
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
-#include "ssb_private.h"
-
static const struct pcmcia_device_id ssb_host_pcmcia_tbl[] = {
PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448),
PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476),
@@ -70,7 +70,7 @@ err_disable:
err_kfree_ssb:
kfree(ssb);
out_error:
- ssb_err("Initialization failed (%d, %d)\n", res, err);
+ dev_err(&dev->dev, "Initialization failed (%d, %d)\n", res, err);
return err;
}
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 7cb7d2c8fd86..99a4656d113d 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -9,14 +9,14 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/bcm47xx_wdt.h>
-#include "ssb_private.h"
-
/* Clock sources */
enum ssb_clksrc {
@@ -56,7 +56,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
if (cc->capabilities & SSB_CHIPCO_CAP_PMU)
return; /* PMU controls clockmode, separated function needed */
- SSB_WARN_ON(ccdev->id.revision >= 20);
+ WARN_ON(ccdev->id.revision >= 20);
/* chipcommon cores prior to rev6 don't support dynamic clock control */
if (ccdev->id.revision < 6)
@@ -111,7 +111,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
}
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
@@ -164,7 +164,7 @@ static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max)
divisor = 32;
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
} else if (cc->dev->id.revision < 10) {
switch (clocksrc) {
@@ -277,7 +277,7 @@ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
minfreq = chipco_pctl_clockfreqlimit(cc, 0);
pll_on_delay = chipco_read32(cc, SSB_CHIPCO_PLLONDELAY);
tmp = (((pll_on_delay + 2) * 1000000) + (minfreq - 1)) / minfreq;
- SSB_WARN_ON(tmp & ~0xFFFF);
+ WARN_ON(tmp & ~0xFFFF);
cc->fast_pwrup_delay = tmp;
}
@@ -354,7 +354,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
if (cc->dev->id.revision >= 11)
cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
- ssb_dbg("chipcommon status is 0x%x\n", cc->status);
+ dev_dbg(cc->dev->dev, "chipcommon status is 0x%x\n", cc->status);
if (cc->dev->id.revision >= 20) {
chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index c5352ea4821e..0f60e90ded26 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -8,6 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/ssb/ssb_driver_chipcommon.h>
@@ -17,8 +19,6 @@
#include <linux/bcm47xx_nvram.h>
#endif
-#include "ssb_private.h"
-
static u32 ssb_chipco_pll_read(struct ssb_chipcommon *cc, u32 offset)
{
chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset);
@@ -110,7 +110,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
return;
}
- ssb_info("Programming PLL to %u.%03u MHz\n",
+ dev_info(cc->dev->dev, "Programming PLL to %u.%03u MHz\n",
crystalfreq / 1000, crystalfreq % 1000);
/* First turn the PLL off. */
@@ -128,7 +128,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
~(1 << SSB_PMURES_5354_BB_PLL_PU));
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
for (i = 1500; i; i--) {
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
@@ -138,7 +138,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
}
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
- ssb_emerg("Failed to turn the PLL off!\n");
+ dev_emerg(cc->dev->dev, "Failed to turn the PLL off!\n");
/* Set PDIV in PLL control 0. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0);
@@ -249,7 +249,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
return;
}
- ssb_info("Programming PLL to %u.%03u MHz\n",
+ dev_info(cc->dev->dev, "Programming PLL to %u.%03u MHz\n",
crystalfreq / 1000, crystalfreq % 1000);
/* First turn the PLL off. */
@@ -265,7 +265,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
buffer_strength = 0x222222;
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
for (i = 1500; i; i--) {
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
@@ -275,7 +275,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
}
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
- ssb_emerg("Failed to turn the PLL off!\n");
+ dev_emerg(cc->dev->dev, "Failed to turn the PLL off!\n");
/* Set p1div and p2div. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0);
@@ -349,7 +349,7 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
case 43222:
break;
default:
- ssb_err("ERROR: PLL init unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PLL init unknown for device %04X\n",
bus->chip_id);
}
}
@@ -471,7 +471,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
max_msk = 0xFFFFF;
break;
default:
- ssb_err("ERROR: PMU resource config unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PMU resource config unknown for device %04X\n",
bus->chip_id);
}
@@ -501,7 +501,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
~(depend_tab[i].depend));
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
}
@@ -524,7 +524,7 @@ void ssb_pmu_init(struct ssb_chipcommon *cc)
pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP);
cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
- ssb_dbg("Found rev %u PMU (capabilities 0x%08X)\n",
+ dev_dbg(cc->dev->dev, "Found rev %u PMU (capabilities 0x%08X)\n",
cc->pmu.rev, pmucap);
if (cc->pmu.rev == 1)
@@ -568,12 +568,12 @@ void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
mask = 0x3F;
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
return;
}
break;
case 0x4312:
- if (SSB_WARN_ON(id != LDO_PAREF))
+ if (WARN_ON(id != LDO_PAREF))
return;
addr = 0;
shift = 21;
@@ -636,7 +636,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
case 0x5354:
return ssb_pmu_get_alp_clock_clk0(cc);
default:
- ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PMU alp clock unknown for device %04X\n",
bus->chip_id);
return 0;
}
@@ -651,7 +651,7 @@ u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc)
/* 5354 chip uses a non programmable PLL of frequency 240MHz */
return 240000000;
default:
- ssb_err("ERROR: PMU cpu clock unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PMU cpu clock unknown for device %04X\n",
bus->chip_id);
return 0;
}
@@ -665,7 +665,7 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
case 0x5354:
return 120000000;
default:
- ssb_err("ERROR: PMU controlclock unknown for device %04X\n",
+ dev_err(cc->dev->dev, "ERROR: PMU controlclock unknown for device %04X\n",
bus->chip_id);
return 0;
}
@@ -705,9 +705,9 @@ void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
break;
default:
- ssb_printk(KERN_ERR PFX
- "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
- cc->dev->bus->chip_id);
+ dev_err(cc->dev->dev,
+ "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
+ cc->dev->bus->chip_id);
return;
}
diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c
index 937fc31971a7..fac0e6828288 100644
--- a/drivers/ssb/driver_chipcommon_sflash.c
+++ b/drivers/ssb/driver_chipcommon_sflash.c
@@ -5,10 +5,10 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
-#include <linux/ssb/ssb.h>
-
#include "ssb_private.h"
+#include <linux/ssb/ssb.h>
+
static struct resource ssb_sflash_resource = {
.name = "ssb_sflash",
.start = SSB_FLASH2,
@@ -80,7 +80,7 @@ static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode)
return;
cpu_relax();
}
- pr_err("SFLASH control command failed (timeout)!\n");
+ dev_err(cc->dev->dev, "SFLASH control command failed (timeout)!\n");
}
/* Initialize serial flash access */
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index 59385fdab5b0..06b68dd6e022 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -10,12 +10,12 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
-#include "ssb_private.h"
-
static inline u32 extif_read32(struct ssb_extif *extif, u16 offset)
{
diff --git a/drivers/ssb/driver_gige.c b/drivers/ssb/driver_gige.c
index e9734051e3c4..ebee6b0e3c34 100644
--- a/drivers/ssb/driver_gige.c
+++ b/drivers/ssb/driver_gige.c
@@ -242,7 +242,7 @@ static int ssb_gige_probe(struct ssb_device *sdev,
bool pdev_is_ssb_gige_core(struct pci_dev *pdev)
{
if (!pdev->resource[0].name)
- return 0;
+ return false;
return (strcmp(pdev->resource[0].name, SSB_GIGE_MEM_RES_NAME) == 0);
}
EXPORT_SYMBOL(pdev_is_ssb_gige_core);
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 796e22037bc4..e809dae4c470 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -8,6 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/gpio/driver.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -15,8 +17,6 @@
#include <linux/export.h>
#include <linux/ssb/ssb.h>
-#include "ssb_private.h"
-
/**************************************************
* Shared
@@ -461,7 +461,7 @@ int ssb_gpio_init(struct ssb_bus *bus)
else if (ssb_extif_available(&bus->extif))
return ssb_gpio_extif_init(bus);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
return -1;
}
@@ -473,7 +473,7 @@ int ssb_gpio_unregister(struct ssb_bus *bus)
gpiochip_remove(&bus->gpio);
return 0;
} else {
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
return -1;
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index f87efef42252..1ca2ac5ef2b8 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -8,6 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/mtd/physmap.h>
@@ -19,8 +21,6 @@
#include <linux/bcm47xx_nvram.h>
#endif
-#include "ssb_private.h"
-
static const char * const part_probes[] = { "bcm47xxpart", NULL };
static struct physmap_flash_data ssb_pflash_data = {
@@ -170,14 +170,15 @@ static void set_irq(struct ssb_device *dev, unsigned int irq)
irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
ssb_write32(mdev, SSB_IPSFLAG, irqflag);
}
- ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n",
+ dev_dbg(dev->dev, "set_irq: core 0x%04x, irq %d => %d\n",
dev->id.coreid, oldirq+2, irq+2);
}
static void print_irq(struct ssb_device *dev, unsigned int irq)
{
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
- ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n",
+ dev_dbg(dev->dev,
+ "core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n",
dev->id.coreid,
irq_name[0], irq == 0 ? "*" : " ",
irq_name[1], irq == 1 ? "*" : " ",
@@ -229,11 +230,11 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
case SSB_CHIPCO_FLASHT_STSER:
case SSB_CHIPCO_FLASHT_ATSER:
- pr_debug("Found serial flash\n");
+ dev_dbg(mcore->dev->dev, "Found serial flash\n");
ssb_sflash_init(&bus->chipco);
break;
case SSB_CHIPCO_FLASHT_PARA:
- pr_debug("Found parallel flash\n");
+ dev_dbg(mcore->dev->dev, "Found parallel flash\n");
pflash->present = true;
pflash->window = SSB_FLASH2;
pflash->window_size = SSB_FLASH2_SZ;
@@ -299,7 +300,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
if (!mcore->dev)
return; /* We don't have a MIPS core */
- ssb_dbg("Initializing MIPS core...\n");
+ dev_dbg(mcore->dev->dev, "Initializing MIPS core...\n");
bus = mcore->dev->bus;
hz = ssb_clockspeed(bus);
@@ -347,7 +348,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
break;
}
}
- ssb_dbg("after irq reconfiguration\n");
+ dev_dbg(mcore->dev->dev, "after irq reconfiguration\n");
dump_irq(bus);
ssb_mips_serial_init(mcore);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 5fe1c22e289b..6a5622e0ded5 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -8,14 +8,14 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/ssb/ssb_embedded.h>
-#include "ssb_private.h"
-
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address);
static void ssb_pcie_write(struct ssb_pcicore *pc, u32 address, u32 data);
static u16 ssb_pcie_mdio_read(struct ssb_pcicore *pc, u8 device, u8 address);
@@ -115,7 +115,7 @@ static int ssb_extpci_read_config(struct ssb_pcicore *pc,
u32 addr, val;
void __iomem *mmio;
- SSB_WARN_ON(!pc->hostmode);
+ WARN_ON(!pc->hostmode);
if (unlikely(len != 1 && len != 2 && len != 4))
goto out;
addr = get_cfgspace_addr(pc, bus, dev, func, off);
@@ -161,7 +161,7 @@ static int ssb_extpci_write_config(struct ssb_pcicore *pc,
u32 addr, val = 0;
void __iomem *mmio;
- SSB_WARN_ON(!pc->hostmode);
+ WARN_ON(!pc->hostmode);
if (unlikely(len != 1 && len != 2 && len != 4))
goto out;
addr = get_cfgspace_addr(pc, bus, dev, func, off);
@@ -263,7 +263,7 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d)
return -ENODEV;
}
- ssb_info("PCI: Fixing up device %s\n", pci_name(d));
+ dev_info(&d->dev, "PCI: Fixing up device %s\n", pci_name(d));
/* Fix up interrupt lines */
d->irq = ssb_mips_irq(extpci_core->dev) + 2;
@@ -284,12 +284,12 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0)
return;
- ssb_info("PCI: Fixing up bridge %s\n", pci_name(dev));
+ dev_info(&dev->dev, "PCI: Fixing up bridge %s\n", pci_name(dev));
/* Enable PCI bridge bus mastering and memory space */
pci_set_master(dev);
if (pcibios_enable_device(dev, ~0) < 0) {
- ssb_err("PCI: SSB bridge enable failed\n");
+ dev_err(&dev->dev, "PCI: SSB bridge enable failed\n");
return;
}
@@ -298,7 +298,8 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
/* Make sure our latency is high enough to handle the devices behind us */
lat = 168;
- ssb_info("PCI: Fixing latency timer of device %s to %u\n",
+ dev_info(&dev->dev,
+ "PCI: Fixing latency timer of device %s to %u\n",
pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
@@ -322,7 +323,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
return;
extpci_core = pc;
- ssb_dbg("PCIcore in host mode found\n");
+ dev_dbg(pc->dev->dev, "PCIcore in host mode found\n");
/* Reset devices on the external PCI bus */
val = SSB_PCICORE_CTL_RST_OE;
val |= SSB_PCICORE_CTL_CLK_OE;
@@ -337,7 +338,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
udelay(1); /* Assertion time demanded by the PCI standard */
if (pc->dev->bus->has_cardbus_slot) {
- ssb_dbg("CardBus slot detected\n");
+ dev_dbg(pc->dev->dev, "CardBus slot detected\n");
pc->cardbusmode = 1;
/* GPIO 1 resets the bridge */
ssb_gpio_out(pc->dev->bus, 1, 1);
@@ -701,7 +702,7 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
/* Calculate the "coremask" for the device. */
coremask = (1 << dev->core_index);
- SSB_WARN_ON(bus->bustype != SSB_BUSTYPE_PCI);
+ WARN_ON(bus->bustype != SSB_BUSTYPE_PCI);
err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp);
if (err)
goto out;
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index 55e101115038..8254ed25e063 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -9,6 +9,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/ssb/ssb.h>
@@ -17,8 +19,6 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/pci.h>
-#include "ssb_private.h"
-
int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks)
{
@@ -57,8 +57,8 @@ int ssb_watchdog_register(struct ssb_bus *bus)
bus->busnumber, &wdt,
sizeof(wdt));
if (IS_ERR(pdev)) {
- ssb_dbg("can not register watchdog device, err: %li\n",
- PTR_ERR(pdev));
+ pr_debug("can not register watchdog device, err: %li\n",
+ PTR_ERR(pdev));
return PTR_ERR(pdev);
}
@@ -77,7 +77,7 @@ u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_in(&bus->extif, mask);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
@@ -95,7 +95,7 @@ u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_out(&bus->extif, mask, value);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
@@ -113,7 +113,7 @@ u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_outen(&bus->extif, mask, value);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
@@ -145,7 +145,7 @@ u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_intmask(&bus->extif, mask, value);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
@@ -163,7 +163,7 @@ u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value)
else if (ssb_extif_available(&bus->extif))
res = ssb_extif_gpio_polarity(&bus->extif, mask, value);
else
- SSB_WARN_ON(1);
+ WARN_ON(1);
spin_unlock_irqrestore(&bus->gpio_lock, flags);
return res;
diff --git a/drivers/ssb/host_soc.c b/drivers/ssb/host_soc.c
index d62992dc08b2..3b438480515c 100644
--- a/drivers/ssb/host_soc.c
+++ b/drivers/ssb/host_soc.c
@@ -8,11 +8,11 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/bcm47xx_nvram.h>
#include <linux/ssb/ssb.h>
-#include "ssb_private.h"
-
static u8 ssb_host_soc_read8(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
@@ -61,7 +61,7 @@ static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer,
case sizeof(u16): {
__le16 *buf = buffer;
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
while (count) {
*buf = (__force __le16)__raw_readw(addr);
buf++;
@@ -72,7 +72,7 @@ static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer,
case sizeof(u32): {
__le32 *buf = buffer;
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
while (count) {
*buf = (__force __le32)__raw_readl(addr);
buf++;
@@ -81,7 +81,7 @@ static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer,
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
#endif /* CONFIG_SSB_BLOCKIO */
@@ -134,7 +134,7 @@ static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer,
case sizeof(u16): {
const __le16 *buf = buffer;
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
while (count) {
__raw_writew((__force u16)(*buf), addr);
buf++;
@@ -145,7 +145,7 @@ static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer,
case sizeof(u32): {
const __le32 *buf = buffer;
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
while (count) {
__raw_writel((__force u32)(*buf), addr);
buf++;
@@ -154,7 +154,7 @@ static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer,
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
#endif /* CONFIG_SSB_BLOCKIO */
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 116594413f66..0a26984acb2c 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -209,7 +209,7 @@ int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx)
memset(ctx, 0, sizeof(*ctx));
ctx->bus = bus;
- SSB_WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen));
+ WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen));
for (i = 0; i < bus->nr_devices; i++) {
sdev = ssb_device_get(&bus->devices[i]);
@@ -220,7 +220,7 @@ int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx)
continue;
}
sdrv = drv_to_ssb_drv(sdev->dev->driver);
- if (SSB_WARN_ON(!sdrv->remove))
+ if (WARN_ON(!sdrv->remove))
continue;
sdrv->remove(sdev);
ctx->device_frozen[i] = 1;
@@ -248,15 +248,16 @@ int ssb_devices_thaw(struct ssb_freeze_context *ctx)
continue;
sdev = &bus->devices[i];
- if (SSB_WARN_ON(!sdev->dev || !sdev->dev->driver))
+ if (WARN_ON(!sdev->dev || !sdev->dev->driver))
continue;
sdrv = drv_to_ssb_drv(sdev->dev->driver);
- if (SSB_WARN_ON(!sdrv || !sdrv->probe))
+ if (WARN_ON(!sdrv || !sdrv->probe))
continue;
err = sdrv->probe(sdev, &sdev->id);
if (err) {
- ssb_err("Failed to thaw device %s\n",
+ dev_err(sdev->dev,
+ "Failed to thaw device %s\n",
dev_name(sdev->dev));
result = err;
}
@@ -431,9 +432,9 @@ void ssb_bus_unregister(struct ssb_bus *bus)
err = ssb_gpio_unregister(bus);
if (err == -EBUSY)
- ssb_dbg("Some GPIOs are still in use\n");
+ pr_debug("Some GPIOs are still in use\n");
else if (err)
- ssb_dbg("Can not unregister GPIO driver: %i\n", err);
+ pr_debug("Can not unregister GPIO driver: %i\n", err);
ssb_buses_lock();
ssb_devices_unregister(bus);
@@ -518,7 +519,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
sdev->dev = dev;
err = device_register(dev);
if (err) {
- ssb_err("Could not register %s\n", dev_name(dev));
+ pr_err("Could not register %s\n", dev_name(dev));
/* Set dev to NULL to not unregister
* dev on error unwinding. */
sdev->dev = NULL;
@@ -576,9 +577,9 @@ static int ssb_attach_queued_buses(void)
err = ssb_gpio_init(bus);
if (err == -ENOTSUPP)
- ssb_dbg("GPIO driver not activated\n");
+ pr_debug("GPIO driver not activated\n");
else if (err)
- ssb_dbg("Error registering GPIO driver: %i\n", err);
+ pr_debug("Error registering GPIO driver: %i\n", err);
ssb_bus_may_powerdown(bus);
@@ -707,10 +708,12 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci)
err = ssb_bus_register(bus, ssb_pci_get_invariants, 0);
if (!err) {
- ssb_info("Sonics Silicon Backplane found on PCI device %s\n",
+ dev_info(&host_pci->dev,
+ "Sonics Silicon Backplane found on PCI device %s\n",
dev_name(&host_pci->dev));
} else {
- ssb_err("Failed to register PCI version of SSB with error %d\n",
+ dev_err(&host_pci->dev,
+ "Failed to register PCI version of SSB with error %d\n",
err);
}
@@ -731,7 +734,8 @@ int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr);
if (!err) {
- ssb_info("Sonics Silicon Backplane found on PCMCIA device %s\n",
+ dev_info(&pcmcia_dev->dev,
+ "Sonics Silicon Backplane found on PCMCIA device %s\n",
pcmcia_dev->devname);
}
@@ -752,7 +756,8 @@ int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0);
if (!err) {
- ssb_info("Sonics Silicon Backplane found on SDIO device %s\n",
+ dev_info(&func->dev,
+ "Sonics Silicon Backplane found on SDIO device %s\n",
sdio_func_id(func));
}
@@ -771,8 +776,8 @@ int ssb_bus_host_soc_register(struct ssb_bus *bus, unsigned long baseaddr)
err = ssb_bus_register(bus, ssb_host_soc_get_invariants, baseaddr);
if (!err) {
- ssb_info("Sonics Silicon Backplane found at address 0x%08lX\n",
- baseaddr);
+ pr_info("Sonics Silicon Backplane found at address 0x%08lX\n",
+ baseaddr);
}
return err;
@@ -856,13 +861,13 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m)
case SSB_PLLTYPE_2: /* 48Mhz, 4 dividers */
n1 += SSB_CHIPCO_CLK_T2_BIAS;
n2 += SSB_CHIPCO_CLK_T2_BIAS;
- SSB_WARN_ON(!((n1 >= 2) && (n1 <= 7)));
- SSB_WARN_ON(!((n2 >= 5) && (n2 <= 23)));
+ WARN_ON(!((n1 >= 2) && (n1 <= 7)));
+ WARN_ON(!((n2 >= 5) && (n2 <= 23)));
break;
case SSB_PLLTYPE_5: /* 25Mhz, 4 dividers */
return 100000000;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
switch (plltype) {
@@ -911,9 +916,9 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m)
m1 += SSB_CHIPCO_CLK_T2_BIAS;
m2 += SSB_CHIPCO_CLK_T2M2_BIAS;
m3 += SSB_CHIPCO_CLK_T2_BIAS;
- SSB_WARN_ON(!((m1 >= 2) && (m1 <= 7)));
- SSB_WARN_ON(!((m2 >= 3) && (m2 <= 10)));
- SSB_WARN_ON(!((m3 >= 2) && (m3 <= 7)));
+ WARN_ON(!((m1 >= 2) && (m1 <= 7)));
+ WARN_ON(!((m2 >= 3) && (m2 <= 10)));
+ WARN_ON(!((m3 >= 2) && (m3 <= 7)));
if (!(mc & SSB_CHIPCO_CLK_T2MC_M1BYP))
clock /= m1;
@@ -923,7 +928,7 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m)
clock /= m3;
return clock;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
return 0;
}
@@ -1057,9 +1062,9 @@ static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask,
}
udelay(10);
}
- printk(KERN_ERR PFX "Timeout waiting for bitmask %08X on "
- "register %04X to %s.\n",
- bitmask, reg, (set ? "set" : "clear"));
+ dev_err(dev->dev,
+ "Timeout waiting for bitmask %08X on register %04X to %s\n",
+ bitmask, reg, set ? "set" : "clear");
return -ETIMEDOUT;
}
@@ -1164,12 +1169,10 @@ int ssb_bus_may_powerdown(struct ssb_bus *bus)
if (err)
goto error;
out:
-#ifdef CONFIG_SSB_DEBUG
bus->powered_up = 0;
-#endif
return err;
error:
- ssb_err("Bus powerdown failed\n");
+ pr_err("Bus powerdown failed\n");
goto out;
}
EXPORT_SYMBOL(ssb_bus_may_powerdown);
@@ -1183,16 +1186,14 @@ int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl)
if (err)
goto error;
-#ifdef CONFIG_SSB_DEBUG
bus->powered_up = 1;
-#endif
mode = dynamic_pctl ? SSB_CLKMODE_DYNAMIC : SSB_CLKMODE_FAST;
ssb_chipco_set_clockmode(&bus->chipco, mode);
return 0;
error:
- ssb_err("Bus powerup failed\n");
+ pr_err("Bus powerup failed\n");
return err;
}
EXPORT_SYMBOL(ssb_bus_powerup);
@@ -1237,15 +1238,15 @@ u32 ssb_admatch_base(u32 adm)
base = (adm & SSB_ADM_BASE0);
break;
case SSB_ADM_TYPE1:
- SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
+ WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
base = (adm & SSB_ADM_BASE1);
break;
case SSB_ADM_TYPE2:
- SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
+ WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
base = (adm & SSB_ADM_BASE2);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
return base;
@@ -1261,15 +1262,15 @@ u32 ssb_admatch_size(u32 adm)
size = ((adm & SSB_ADM_SZ0) >> SSB_ADM_SZ0_SHIFT);
break;
case SSB_ADM_TYPE1:
- SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
+ WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
size = ((adm & SSB_ADM_SZ1) >> SSB_ADM_SZ1_SHIFT);
break;
case SSB_ADM_TYPE2:
- SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
+ WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
size = ((adm & SSB_ADM_SZ2) >> SSB_ADM_SZ2_SHIFT);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
size = (1 << (size + 1));
@@ -1300,19 +1301,19 @@ static int __init ssb_modinit(void)
err = b43_pci_ssb_bridge_init();
if (err) {
- ssb_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
+ pr_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
/* don't fail SSB init because of this */
err = 0;
}
err = ssb_host_pcmcia_init();
if (err) {
- ssb_err("PCMCIA host initialization failed\n");
+ pr_err("PCMCIA host initialization failed\n");
/* don't fail SSB init because of this */
err = 0;
}
err = ssb_gige_init();
if (err) {
- ssb_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n");
+ pr_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n");
/* don't fail SSB init because of this */
err = 0;
}
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 77b551da5728..84807a9b4b13 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -15,14 +15,14 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ssb_private.h"
-
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_PCICORESWITCH_DEBUG 0
@@ -56,7 +56,7 @@ int ssb_pci_switch_coreidx(struct ssb_bus *bus, u8 coreidx)
}
return 0;
error:
- ssb_err("Failed to switch to core %u\n", coreidx);
+ pr_err("Failed to switch to core %u\n", coreidx);
return -ENODEV;
}
@@ -67,9 +67,8 @@ int ssb_pci_switch_core(struct ssb_bus *bus,
unsigned long flags;
#if SSB_VERBOSE_PCICORESWITCH_DEBUG
- ssb_info("Switching to %s core, index %d\n",
- ssb_core_name(dev->id.coreid),
- dev->core_index);
+ pr_info("Switching to %s core, index %d\n",
+ ssb_core_name(dev->id.coreid), dev->core_index);
#endif
spin_lock_irqsave(&bus->bar_lock, flags);
@@ -161,7 +160,7 @@ out:
return err;
err_pci:
- printk(KERN_ERR PFX "Error: ssb_pci_xtal() could not access PCI config space!\n");
+ pr_err("Error: ssb_pci_xtal() could not access PCI config space!\n");
err = -EBUSY;
goto out;
}
@@ -286,7 +285,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
u32 spromctl;
u16 size = bus->sprom_size;
- ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
+ pr_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl);
if (err)
goto err_ctlreg;
@@ -294,17 +293,17 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl);
if (err)
goto err_ctlreg;
- ssb_notice("[ 0%%");
+ pr_notice("[ 0%%");
msleep(500);
for (i = 0; i < size; i++) {
if (i == size / 4)
- ssb_cont("25%%");
+ pr_cont("25%%");
else if (i == size / 2)
- ssb_cont("50%%");
+ pr_cont("50%%");
else if (i == (size * 3) / 4)
- ssb_cont("75%%");
+ pr_cont("75%%");
else if (i % 2)
- ssb_cont(".");
+ pr_cont(".");
writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
mmiowb();
msleep(20);
@@ -317,12 +316,12 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
if (err)
goto err_ctlreg;
msleep(500);
- ssb_cont("100%% ]\n");
- ssb_notice("SPROM written\n");
+ pr_cont("100%% ]\n");
+ pr_notice("SPROM written\n");
return 0;
err_ctlreg:
- ssb_err("Could not access SPROM control register.\n");
+ pr_err("Could not access SPROM control register.\n");
return err;
}
@@ -816,7 +815,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
memset(out, 0, sizeof(*out));
out->revision = in[size - 1] & 0x00FF;
- ssb_dbg("SPROM revision %d detected\n", out->revision);
+ pr_debug("SPROM revision %d detected\n", out->revision);
memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */
memset(out->et1mac, 0xFF, 6);
@@ -825,7 +824,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
* number stored in the SPROM.
* Always extract r1. */
out->revision = 1;
- ssb_dbg("SPROM treated as revision %d\n", out->revision);
+ pr_debug("SPROM treated as revision %d\n", out->revision);
}
switch (out->revision) {
@@ -842,8 +841,8 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
sprom_extract_r8(out, in);
break;
default:
- ssb_warn("Unsupported SPROM revision %d detected. Will extract v1\n",
- out->revision);
+ pr_warn("Unsupported SPROM revision %d detected. Will extract v1\n",
+ out->revision);
out->revision = 1;
sprom_extract_r123(out, in);
}
@@ -863,7 +862,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
u16 *buf;
if (!ssb_is_sprom_available(bus)) {
- ssb_err("No SPROM available!\n");
+ pr_err("No SPROM available!\n");
return -ENODEV;
}
if (bus->chipco.dev) { /* can be unavailable! */
@@ -882,7 +881,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
} else {
bus->sprom_offset = SSB_SPROM_BASE1;
}
- ssb_dbg("SPROM offset is 0x%x\n", bus->sprom_offset);
+ pr_debug("SPROM offset is 0x%x\n", bus->sprom_offset);
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
if (!buf)
@@ -907,16 +906,16 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
* available for this device in some other storage */
err = ssb_fill_sprom_with_fallback(bus, sprom);
if (err) {
- ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
- err);
+ pr_warn("WARNING: Using fallback SPROM failed (err %d)\n",
+ err);
goto out_free;
} else {
- ssb_dbg("Using SPROM revision %d provided by platform\n",
- sprom->revision);
+ pr_debug("Using SPROM revision %d provided by platform\n",
+ sprom->revision);
err = 0;
goto out_free;
}
- ssb_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n");
+ pr_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n");
}
}
err = sprom_extract(bus, sprom, buf, bus->sprom_size);
@@ -947,14 +946,12 @@ out:
return err;
}
-#ifdef CONFIG_SSB_DEBUG
static int ssb_pci_assert_buspower(struct ssb_bus *bus)
{
if (likely(bus->powered_up))
return 0;
- printk(KERN_ERR PFX "FATAL ERROR: Bus powered down "
- "while accessing PCI MMIO space\n");
+ pr_err("FATAL ERROR: Bus powered down while accessing PCI MMIO space\n");
if (bus->power_warn_count <= 10) {
bus->power_warn_count++;
dump_stack();
@@ -962,12 +959,6 @@ static int ssb_pci_assert_buspower(struct ssb_bus *bus)
return -ENODEV;
}
-#else /* DEBUG */
-static inline int ssb_pci_assert_buspower(struct ssb_bus *bus)
-{
- return 0;
-}
-#endif /* DEBUG */
static u8 ssb_pci_read8(struct ssb_device *dev, u16 offset)
{
@@ -1026,15 +1017,15 @@ static void ssb_pci_block_read(struct ssb_device *dev, void *buffer,
ioread8_rep(addr, buffer, count);
break;
case sizeof(u16):
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
ioread16_rep(addr, buffer, count >> 1);
break;
case sizeof(u32):
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
ioread32_rep(addr, buffer, count >> 2);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
return;
@@ -1100,15 +1091,15 @@ static void ssb_pci_block_write(struct ssb_device *dev, const void *buffer,
iowrite8_rep(addr, buffer, count);
break;
case sizeof(u16):
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
iowrite16_rep(addr, buffer, count >> 1);
break;
case sizeof(u32):
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
iowrite32_rep(addr, buffer, count >> 2);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
}
#endif /* CONFIG_SSB_BLOCKIO */
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index f03422bbf087..567013f8a8be 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -8,6 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -18,8 +20,6 @@
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
-#include "ssb_private.h"
-
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_PCMCIACORESWITCH_DEBUG 0
@@ -143,7 +143,7 @@ int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus,
return 0;
error:
- ssb_err("Failed to switch to core %u\n", coreidx);
+ pr_err("Failed to switch to core %u\n", coreidx);
return err;
}
@@ -152,9 +152,8 @@ static int ssb_pcmcia_switch_core(struct ssb_bus *bus, struct ssb_device *dev)
int err;
#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG
- ssb_info("Switching to %s core, index %d\n",
- ssb_core_name(dev->id.coreid),
- dev->core_index);
+ pr_info("Switching to %s core, index %d\n",
+ ssb_core_name(dev->id.coreid), dev->core_index);
#endif
err = ssb_pcmcia_switch_coreidx(bus, dev->core_index);
@@ -170,7 +169,7 @@ int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg)
int err;
u8 val;
- SSB_WARN_ON((seg != 0) && (seg != 1));
+ WARN_ON((seg != 0) && (seg != 1));
while (1) {
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_MEMSEG, seg);
if (err)
@@ -190,7 +189,7 @@ int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg)
return 0;
error:
- ssb_err("Failed to switch pcmcia segment\n");
+ pr_err("Failed to switch pcmcia segment\n");
return err;
}
@@ -300,7 +299,7 @@ static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer,
case sizeof(u16): {
__le16 *buf = buffer;
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
while (count) {
*buf = (__force __le16)__raw_readw(addr);
buf++;
@@ -311,7 +310,7 @@ static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer,
case sizeof(u32): {
__le16 *buf = buffer;
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
while (count) {
*buf = (__force __le16)__raw_readw(addr);
buf++;
@@ -322,7 +321,7 @@ static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer,
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
unlock:
spin_unlock_irqrestore(&bus->bar_lock, flags);
@@ -400,7 +399,7 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer,
case sizeof(u16): {
const __le16 *buf = buffer;
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
while (count) {
__raw_writew((__force u16)(*buf), addr);
buf++;
@@ -411,7 +410,7 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer,
case sizeof(u32): {
const __le16 *buf = buffer;
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
while (count) {
__raw_writew((__force u16)(*buf), addr);
buf++;
@@ -422,7 +421,7 @@ static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer,
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
unlock:
mmiowb();
@@ -547,39 +546,39 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
bool failed = 0;
size_t size = SSB_PCMCIA_SPROM_SIZE;
- ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
+ pr_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN);
if (err) {
- ssb_notice("Could not enable SPROM write access\n");
+ pr_notice("Could not enable SPROM write access\n");
return -EBUSY;
}
- ssb_notice("[ 0%%");
+ pr_notice("[ 0%%");
msleep(500);
for (i = 0; i < size; i++) {
if (i == size / 4)
- ssb_cont("25%%");
+ pr_cont("25%%");
else if (i == size / 2)
- ssb_cont("50%%");
+ pr_cont("50%%");
else if (i == (size * 3) / 4)
- ssb_cont("75%%");
+ pr_cont("75%%");
else if (i % 2)
- ssb_cont(".");
+ pr_cont(".");
err = ssb_pcmcia_sprom_write(bus, i, sprom[i]);
if (err) {
- ssb_notice("Failed to write to SPROM\n");
+ pr_notice("Failed to write to SPROM\n");
failed = 1;
break;
}
}
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS);
if (err) {
- ssb_notice("Could not disable SPROM write access\n");
+ pr_notice("Could not disable SPROM write access\n");
failed = 1;
}
msleep(500);
if (!failed) {
- ssb_cont("100%% ]\n");
- ssb_notice("SPROM written\n");
+ pr_cont("100%% ]\n");
+ pr_notice("SPROM written\n");
}
return failed ? -EBUSY : 0;
@@ -693,9 +692,8 @@ static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev,
return -ENOSPC; /* continue with next entry */
error:
- ssb_err(
- "PCMCIA: Failed to fetch device invariants: %s\n",
- error_description);
+ pr_err("PCMCIA: Failed to fetch device invariants: %s\n",
+ error_description);
return -ENODEV;
}
@@ -715,8 +713,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE,
ssb_pcmcia_get_mac, sprom);
if (res != 0) {
- ssb_err(
- "PCMCIA: Failed to fetch MAC address\n");
+ pr_err("PCMCIA: Failed to fetch MAC address\n");
return -ENODEV;
}
@@ -726,8 +723,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
if ((res == 0) || (res == -ENOSPC))
return 0;
- ssb_err(
- "PCMCIA: Failed to fetch device invariants\n");
+ pr_err("PCMCIA: Failed to fetch device invariants\n");
return -ENODEV;
}
@@ -836,6 +832,6 @@ int ssb_pcmcia_init(struct ssb_bus *bus)
return 0;
error:
- ssb_err("Failed to initialize PCMCIA host device\n");
+ pr_err("Failed to initialize PCMCIA host device\n");
return err;
}
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index b9429df583eb..6ceee98ed6ff 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -12,6 +12,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/pci.h>
@@ -20,8 +22,6 @@
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
-#include "ssb_private.h"
-
const char *ssb_core_name(u16 coreid)
{
@@ -125,7 +125,7 @@ static u16 pcidev_to_chipid(struct pci_dev *pci_dev)
chipid_fallback = 0x4401;
break;
default:
- ssb_err("PCI-ID not in fallback list\n");
+ dev_err(&pci_dev->dev, "PCI-ID not in fallback list\n");
}
return chipid_fallback;
@@ -151,7 +151,7 @@ static u8 chipid_to_nrcores(u16 chipid)
case 0x4704:
return 9;
default:
- ssb_err("CHIPID not in nrcores fallback list\n");
+ pr_err("CHIPID not in nrcores fallback list\n");
}
return 1;
@@ -210,7 +210,7 @@ void ssb_iounmap(struct ssb_bus *bus)
#ifdef CONFIG_SSB_PCIHOST
pci_iounmap(bus->host_pci, bus->mmio);
#else
- SSB_BUG_ON(1); /* Can't reach this code. */
+ WARN_ON(1); /* Can't reach this code. */
#endif
break;
case SSB_BUSTYPE_SDIO:
@@ -236,7 +236,7 @@ static void __iomem *ssb_ioremap(struct ssb_bus *bus,
#ifdef CONFIG_SSB_PCIHOST
mmio = pci_iomap(bus->host_pci, 0, ~0UL);
#else
- SSB_BUG_ON(1); /* Can't reach this code. */
+ WARN_ON(1); /* Can't reach this code. */
#endif
break;
case SSB_BUSTYPE_SDIO:
@@ -318,13 +318,13 @@ int ssb_bus_scan(struct ssb_bus *bus,
bus->chip_package = 0;
}
}
- ssb_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
- bus->chip_id, bus->chip_rev, bus->chip_package);
+ pr_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
+ bus->chip_id, bus->chip_rev, bus->chip_package);
if (!bus->nr_devices)
bus->nr_devices = chipid_to_nrcores(bus->chip_id);
if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
- ssb_err("More than %d ssb cores found (%d)\n",
- SSB_MAX_NR_CORES, bus->nr_devices);
+ pr_err("More than %d ssb cores found (%d)\n",
+ SSB_MAX_NR_CORES, bus->nr_devices);
goto err_unmap;
}
if (bus->bustype == SSB_BUSTYPE_SSB) {
@@ -355,18 +355,16 @@ int ssb_bus_scan(struct ssb_bus *bus,
dev->bus = bus;
dev->ops = bus->ops;
- printk(KERN_DEBUG PFX
- "Core %d found: %s "
- "(cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n",
- i, ssb_core_name(dev->id.coreid),
- dev->id.coreid, dev->id.revision, dev->id.vendor);
+ pr_debug("Core %d found: %s (cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n",
+ i, ssb_core_name(dev->id.coreid),
+ dev->id.coreid, dev->id.revision, dev->id.vendor);
switch (dev->id.coreid) {
case SSB_DEV_80211:
nr_80211_cores++;
if (nr_80211_cores > 1) {
if (!we_support_multiple_80211_cores(bus)) {
- ssb_dbg("Ignoring additional 802.11 core\n");
+ pr_debug("Ignoring additional 802.11 core\n");
continue;
}
}
@@ -374,7 +372,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
case SSB_DEV_EXTIF:
#ifdef CONFIG_SSB_DRIVER_EXTIF
if (bus->extif.dev) {
- ssb_warn("WARNING: Multiple EXTIFs found\n");
+ pr_warn("WARNING: Multiple EXTIFs found\n");
break;
}
bus->extif.dev = dev;
@@ -382,7 +380,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
break;
case SSB_DEV_CHIPCOMMON:
if (bus->chipco.dev) {
- ssb_warn("WARNING: Multiple ChipCommon found\n");
+ pr_warn("WARNING: Multiple ChipCommon found\n");
break;
}
bus->chipco.dev = dev;
@@ -391,7 +389,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
case SSB_DEV_MIPS_3302:
#ifdef CONFIG_SSB_DRIVER_MIPS
if (bus->mipscore.dev) {
- ssb_warn("WARNING: Multiple MIPS cores found\n");
+ pr_warn("WARNING: Multiple MIPS cores found\n");
break;
}
bus->mipscore.dev = dev;
@@ -412,7 +410,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
}
}
if (bus->pcicore.dev) {
- ssb_warn("WARNING: Multiple PCI(E) cores found\n");
+ pr_warn("WARNING: Multiple PCI(E) cores found\n");
break;
}
bus->pcicore.dev = dev;
diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
index 2278e43614bd..7fe0afb42234 100644
--- a/drivers/ssb/sdio.c
+++ b/drivers/ssb/sdio.c
@@ -12,14 +12,14 @@
*
*/
+#include "ssb_private.h"
+
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
#include <linux/mmc/sdio_func.h>
-#include "ssb_private.h"
-
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_SDIOCORESWITCH_DEBUG 0
@@ -316,18 +316,18 @@ static void ssb_sdio_block_read(struct ssb_device *dev, void *buffer,
break;
}
case sizeof(u16): {
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
case sizeof(u32): {
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
if (!error)
goto out;
@@ -423,18 +423,18 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
(void *)buffer, count);
break;
case sizeof(u16):
- SSB_WARN_ON(count & 1);
+ WARN_ON(count & 1);
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
case sizeof(u32):
- SSB_WARN_ON(count & 3);
+ WARN_ON(count & 3);
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
default:
- SSB_WARN_ON(1);
+ WARN_ON(1);
}
if (!error)
goto out;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index e753fbe302a7..4f028a80d6c4 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -127,13 +127,13 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
goto out_kfree;
err = ssb_devices_freeze(bus, &freeze);
if (err) {
- ssb_err("SPROM write: Could not freeze all devices\n");
+ pr_err("SPROM write: Could not freeze all devices\n");
goto out_unlock;
}
res = sprom_write(bus, sprom);
err = ssb_devices_thaw(&freeze);
if (err)
- ssb_err("SPROM write: Could not thaw all devices\n");
+ pr_err("SPROM write: Could not thaw all devices\n");
out_unlock:
mutex_unlock(&bus->sprom_mutex);
out_kfree:
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index ef9ac8efcab4..5f31bdfbe77f 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -2,47 +2,14 @@
#ifndef LINUX_SSB_PRIVATE_H_
#define LINUX_SSB_PRIVATE_H_
+#define PFX "ssb: "
+#define pr_fmt(fmt) PFX fmt
+
#include <linux/ssb/ssb.h>
#include <linux/types.h>
#include <linux/bcm47xx_wdt.h>
-#define PFX "ssb: "
-
-#ifdef CONFIG_SSB_SILENT
-# define ssb_printk(fmt, ...) \
- do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
-#else
-# define ssb_printk(fmt, ...) \
- printk(fmt, ##__VA_ARGS__)
-#endif /* CONFIG_SSB_SILENT */
-
-#define ssb_emerg(fmt, ...) ssb_printk(KERN_EMERG PFX fmt, ##__VA_ARGS__)
-#define ssb_err(fmt, ...) ssb_printk(KERN_ERR PFX fmt, ##__VA_ARGS__)
-#define ssb_warn(fmt, ...) ssb_printk(KERN_WARNING PFX fmt, ##__VA_ARGS__)
-#define ssb_notice(fmt, ...) ssb_printk(KERN_NOTICE PFX fmt, ##__VA_ARGS__)
-#define ssb_info(fmt, ...) ssb_printk(KERN_INFO PFX fmt, ##__VA_ARGS__)
-#define ssb_cont(fmt, ...) ssb_printk(KERN_CONT fmt, ##__VA_ARGS__)
-
-/* dprintk: Debugging printk; vanishes for non-debug compilation */
-#ifdef CONFIG_SSB_DEBUG
-# define ssb_dbg(fmt, ...) \
- ssb_printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__)
-#else
-# define ssb_dbg(fmt, ...) \
- do { if (0) printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__); } while (0)
-#endif
-
-#ifdef CONFIG_SSB_DEBUG
-# define SSB_WARN_ON(x) WARN_ON(x)
-# define SSB_BUG_ON(x) BUG_ON(x)
-#else
-static inline int __ssb_do_nothing(int x) { return x; }
-# define SSB_WARN_ON(x) __ssb_do_nothing(unlikely(!!(x)))
-# define SSB_BUG_ON(x) __ssb_do_nothing(unlikely(!!(x)))
-#endif
-
-
/* pci.c */
#ifdef CONFIG_SSB_PCIHOST
extern int ssb_pci_switch_core(struct ssb_bus *bus,
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 9d1109e43ed4..99073325b0c0 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -201,7 +201,7 @@ struct ion_dma_buf_attachment {
struct list_head list;
};
-static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
+static int ion_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct ion_dma_buf_attachment *a;
@@ -219,7 +219,7 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
}
a->table = table;
- a->dev = dev;
+ a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
attachment->priv = a;
@@ -375,8 +375,6 @@ static const struct dma_buf_ops dma_buf_ops = {
.detach = ion_dma_buf_detatch,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
- .map_atomic = ion_dma_buf_kmap,
- .unmap_atomic = ion_dma_buf_kunmap,
.map = ion_dma_buf_kmap,
.unmap = ion_dma_buf_kunmap,
};
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 06d1920150da..a90b2eb112f9 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2175,7 +2175,7 @@ static int bcm2048_fops_release(struct file *file)
}
static __poll_t bcm2048_fops_poll(struct file *file,
- struct poll_table_struct *pts)
+ struct poll_table_struct *pts)
{
struct bcm2048_device *bdev = video_drvdata(file);
__poll_t retval = 0;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 390fc98d07dd..1269a983455e 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1312,6 +1312,8 @@ static const struct vb2_ops video_qops = {
.stop_streaming = vpfe_stop_streaming,
.buf_cleanup = vpfe_buf_cleanup,
.buf_queue = vpfe_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/*
@@ -1357,6 +1359,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->dev = vpfe_dev->pdev;
+ q->lock = &video->lock;
ret = vb2_queue_init(q);
if (ret) {
@@ -1598,17 +1601,18 @@ int vpfe_video_init(struct vpfe_video_device *video, const char *name)
return -EINVAL;
}
/* Initialize field of video device */
+ mutex_init(&video->lock);
video->video_dev.release = video_device_release;
video->video_dev.fops = &vpfe_fops;
video->video_dev.ioctl_ops = &vpfe_ioctl_ops;
video->video_dev.minor = -1;
video->video_dev.tvnorms = 0;
+ video->video_dev.lock = &video->lock;
snprintf(video->video_dev.name, sizeof(video->video_dev.name),
"DAVINCI VIDEO %s %s", name, direction);
spin_lock_init(&video->irqlock);
spin_lock_init(&video->dma_queue_lock);
- mutex_init(&video->lock);
ret = media_entity_pads_init(&video->video_dev.entity,
1, &video->pad);
if (ret < 0)
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
index 22136d3dadcb..4bbd219e8329 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -128,7 +128,7 @@ struct vpfe_video_device {
spinlock_t irqlock;
/* IRQ lock for DMA queue */
spinlock_t dma_queue_lock;
- /* lock used to access this structure */
+ /* lock used to serialize all video4linux ioctls */
struct mutex lock;
/* number of users performing IO */
u32 io_usrs;
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index ae453fd422f0..28f41caba05d 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -103,6 +103,7 @@ struct prp_priv {
int nfb4eof_irq;
int stream_count;
+ u32 frame_sequence; /* frame sequence counter */
bool last_eof; /* waiting for last EOF at stream off */
bool nfb4eof; /* NFB4EOF encountered during streaming */
struct completion last_eof_comp;
@@ -210,12 +211,15 @@ static void prp_vb2_buf_done(struct prp_priv *priv, struct ipuv3_channel *ch)
done = priv->active_vb2_buf[priv->ipu_buf_num];
if (done) {
+ done->vbuf.field = vdev->fmt.fmt.pix.field;
+ done->vbuf.sequence = priv->frame_sequence;
vb = &done->vbuf.vb2_buf;
vb->timestamp = ktime_get_ns();
vb2_buffer_done(vb, priv->nfb4eof ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
+ priv->frame_sequence++;
priv->nfb4eof = false;
/* get next queued buffer */
@@ -637,6 +641,7 @@ static int prp_start(struct prp_priv *priv)
/* init EOF completion waitq */
init_completion(&priv->last_eof_comp);
+ priv->frame_sequence = 0;
priv->last_eof = false;
priv->nfb4eof = false;
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index 4e3fdf8aeef5..256039ce561e 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -170,23 +170,22 @@ static int capture_enum_fmt_vid_cap(struct file *file, void *fh,
}
cc_src = imx_media_find_ipu_format(fmt_src.format.code, CS_SEL_ANY);
- if (!cc_src)
- cc_src = imx_media_find_mbus_format(fmt_src.format.code,
- CS_SEL_ANY, true);
- if (!cc_src)
- return -EINVAL;
-
- if (cc_src->bayer) {
- if (f->index != 0)
- return -EINVAL;
- fourcc = cc_src->fourcc;
- } else {
+ if (cc_src) {
u32 cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
CS_SEL_YUV : CS_SEL_RGB;
ret = imx_media_enum_format(&fourcc, f->index, cs_sel);
if (ret)
return ret;
+ } else {
+ cc_src = imx_media_find_mbus_format(fmt_src.format.code,
+ CS_SEL_ANY, true);
+ if (WARN_ON(!cc_src))
+ return -EINVAL;
+
+ if (f->index != 0)
+ return -EINVAL;
+ fourcc = cc_src->fourcc;
}
f->pixelformat = fourcc;
@@ -219,15 +218,7 @@ static int capture_try_fmt_vid_cap(struct file *file, void *fh,
return ret;
cc_src = imx_media_find_ipu_format(fmt_src.format.code, CS_SEL_ANY);
- if (!cc_src)
- cc_src = imx_media_find_mbus_format(fmt_src.format.code,
- CS_SEL_ANY, true);
- if (!cc_src)
- return -EINVAL;
-
- if (cc_src->bayer) {
- cc = cc_src;
- } else {
+ if (cc_src) {
u32 fourcc, cs_sel;
cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
@@ -239,6 +230,13 @@ static int capture_try_fmt_vid_cap(struct file *file, void *fh,
imx_media_enum_format(&fourcc, 0, cs_sel);
cc = imx_media_find_format(fourcc, cs_sel, false);
}
+ } else {
+ cc_src = imx_media_find_mbus_format(fmt_src.format.code,
+ CS_SEL_ANY, true);
+ if (WARN_ON(!cc_src))
+ return -EINVAL;
+
+ cc = cc_src;
}
imx_media_mbus_fmt_to_pix_fmt(&f->fmt.pix, &fmt_src.format, cc);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 95d7805f3485..cd2c291e1e94 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -111,6 +111,7 @@ struct csi_priv {
struct v4l2_ctrl_handler ctrl_hdlr;
int stream_count; /* streaming counter */
+ u32 frame_sequence; /* frame sequence counter */
bool last_eof; /* waiting for last EOF at stream off */
bool nfb4eof; /* NFB4EOF encountered during streaming */
struct completion last_eof_comp;
@@ -121,10 +122,32 @@ static inline struct csi_priv *sd_to_dev(struct v4l2_subdev *sdev)
return container_of(sdev, struct csi_priv, sd);
}
+static inline bool is_parallel_bus(struct v4l2_fwnode_endpoint *ep)
+{
+ return ep->bus_type != V4L2_MBUS_CSI2;
+}
+
static inline bool is_parallel_16bit_bus(struct v4l2_fwnode_endpoint *ep)
{
- return ep->bus_type != V4L2_MBUS_CSI2 &&
- ep->bus.parallel.bus_width >= 16;
+ return is_parallel_bus(ep) && ep->bus.parallel.bus_width >= 16;
+}
+
+/*
+ * Check for conditions that require the IPU to handle the
+ * data internally as generic data, aka passthrough mode:
+ * - raw bayer media bus formats, or
+ * - the CSI is receiving from a 16-bit parallel bus, or
+ * - the CSI is receiving from an 8-bit parallel bus and the incoming
+ * media bus format is other than UYVY8_2X8/YUYV8_2X8.
+ */
+static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
+ struct v4l2_mbus_framefmt *infmt,
+ const struct imx_media_pixfmt *incc)
+{
+ return incc->bayer || is_parallel_16bit_bus(ep) ||
+ (is_parallel_bus(ep) &&
+ infmt->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
+ infmt->code != MEDIA_BUS_FMT_YUYV8_2X8);
}
/*
@@ -236,12 +259,15 @@ static void csi_vb2_buf_done(struct csi_priv *priv)
done = priv->active_vb2_buf[priv->ipu_buf_num];
if (done) {
+ done->vbuf.field = vdev->fmt.fmt.pix.field;
+ done->vbuf.sequence = priv->frame_sequence;
vb = &done->vbuf.vb2_buf;
vb->timestamp = ktime_get_ns();
vb2_buffer_done(vb, priv->nfb4eof ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
+ priv->frame_sequence++;
priv->nfb4eof = false;
/* get next queued buffer */
@@ -367,15 +393,18 @@ static void csi_idmac_unsetup_vb2_buf(struct csi_priv *priv,
static int csi_idmac_setup_channel(struct csi_priv *priv)
{
struct imx_media_video_dev *vdev = priv->vdev;
+ const struct imx_media_pixfmt *incc;
struct v4l2_mbus_framefmt *infmt;
struct ipu_image image;
u32 passthrough_bits;
+ u32 passthrough_cycles;
dma_addr_t phys[2];
bool passthrough;
u32 burst_size;
int ret;
infmt = &priv->format_mbus[CSI_SINK_PAD];
+ incc = priv->cc[CSI_SINK_PAD];
ipu_cpmem_zero(priv->idmac_ch);
@@ -389,12 +418,9 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
image.phys0 = phys[0];
image.phys1 = phys[1];
- /*
- * Check for conditions that require the IPU to handle the
- * data internally as generic data, aka passthrough mode:
- * - raw bayer formats
- * - the CSI is receiving from a 16-bit parallel bus
- */
+ passthrough = requires_passthrough(&priv->upstream_ep, infmt, incc);
+ passthrough_cycles = 1;
+
switch (image.pix.pixelformat) {
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
@@ -402,7 +428,6 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
case V4L2_PIX_FMT_SRGGB8:
case V4L2_PIX_FMT_GREY:
burst_size = 16;
- passthrough = true;
passthrough_bits = 8;
break;
case V4L2_PIX_FMT_SBGGR16:
@@ -411,7 +436,6 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
case V4L2_PIX_FMT_SRGGB16:
case V4L2_PIX_FMT_Y16:
burst_size = 8;
- passthrough = true;
passthrough_bits = 16;
break;
case V4L2_PIX_FMT_YUV420:
@@ -419,7 +443,6 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
burst_size = (image.pix.width & 0x3f) ?
((image.pix.width & 0x1f) ?
((image.pix.width & 0xf) ? 8 : 16) : 32) : 64;
- passthrough = is_parallel_16bit_bus(&priv->upstream_ep);
passthrough_bits = 16;
/* Skip writing U and V components to odd rows */
ipu_cpmem_skip_odd_chroma_rows(priv->idmac_ch);
@@ -428,18 +451,25 @@ static int csi_idmac_setup_channel(struct csi_priv *priv)
case V4L2_PIX_FMT_UYVY:
burst_size = (image.pix.width & 0x1f) ?
((image.pix.width & 0xf) ? 8 : 16) : 32;
- passthrough = is_parallel_16bit_bus(&priv->upstream_ep);
passthrough_bits = 16;
break;
+ case V4L2_PIX_FMT_RGB565:
+ if (passthrough) {
+ burst_size = 16;
+ passthrough_bits = 8;
+ passthrough_cycles = incc->cycles;
+ break;
+ }
+ /* fallthrough - non-passthrough RGB565 (CSI-2 bus) */
default:
burst_size = (image.pix.width & 0xf) ? 8 : 16;
- passthrough = is_parallel_16bit_bus(&priv->upstream_ep);
passthrough_bits = 16;
break;
}
if (passthrough) {
- ipu_cpmem_set_resolution(priv->idmac_ch, image.rect.width,
+ ipu_cpmem_set_resolution(priv->idmac_ch,
+ image.rect.width * passthrough_cycles,
image.rect.height);
ipu_cpmem_set_stride(priv->idmac_ch, image.pix.bytesperline);
ipu_cpmem_set_buffer(priv->idmac_ch, 0, image.phys0);
@@ -545,6 +575,7 @@ static int csi_idmac_start(struct csi_priv *priv)
/* init EOF completion waitq */
init_completion(&priv->last_eof_comp);
+ priv->frame_sequence = 0;
priv->last_eof = false;
priv->nfb4eof = false;
@@ -630,17 +661,20 @@ static void csi_idmac_stop(struct csi_priv *priv)
static int csi_setup(struct csi_priv *priv)
{
struct v4l2_mbus_framefmt *infmt, *outfmt;
+ const struct imx_media_pixfmt *incc;
struct v4l2_mbus_config mbus_cfg;
struct v4l2_mbus_framefmt if_fmt;
+ struct v4l2_rect crop;
infmt = &priv->format_mbus[CSI_SINK_PAD];
+ incc = priv->cc[CSI_SINK_PAD];
outfmt = &priv->format_mbus[priv->active_output_pad];
/* compose mbus_config from the upstream endpoint */
mbus_cfg.type = priv->upstream_ep.bus_type;
- mbus_cfg.flags = (priv->upstream_ep.bus_type == V4L2_MBUS_CSI2) ?
- priv->upstream_ep.bus.mipi_csi2.flags :
- priv->upstream_ep.bus.parallel.flags;
+ mbus_cfg.flags = is_parallel_bus(&priv->upstream_ep) ?
+ priv->upstream_ep.bus.parallel.flags :
+ priv->upstream_ep.bus.mipi_csi2.flags;
/*
* we need to pass input frame to CSI interface, but
@@ -648,8 +682,18 @@ static int csi_setup(struct csi_priv *priv)
*/
if_fmt = *infmt;
if_fmt.field = outfmt->field;
+ crop = priv->crop;
- ipu_csi_set_window(priv->csi, &priv->crop);
+ /*
+ * if cycles is set, we need to handle this over multiple cycles as
+ * generic/bayer data
+ */
+ if (is_parallel_bus(&priv->upstream_ep) && incc->cycles) {
+ if_fmt.width *= incc->cycles;
+ crop.width *= incc->cycles;
+ }
+
+ ipu_csi_set_window(priv->csi, &crop);
ipu_csi_set_downsize(priv->csi,
priv->crop.width == 2 * priv->compose.width,
@@ -1007,7 +1051,6 @@ static int csi_link_validate(struct v4l2_subdev *sd,
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_fwnode_endpoint upstream_ep = {};
- const struct imx_media_pixfmt *incc;
bool is_csi2;
int ret;
@@ -1025,17 +1068,7 @@ static int csi_link_validate(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
priv->upstream_ep = upstream_ep;
- is_csi2 = (upstream_ep.bus_type == V4L2_MBUS_CSI2);
- incc = priv->cc[CSI_SINK_PAD];
-
- if (priv->dest != IPU_CSI_DEST_IDMAC &&
- (incc->bayer || is_parallel_16bit_bus(&upstream_ep))) {
- v4l2_err(&priv->sd,
- "bayer/16-bit parallel buses must go to IDMAC pad\n");
- ret = -EINVAL;
- goto out;
- }
-
+ is_csi2 = !is_parallel_bus(&upstream_ep);
if (is_csi2) {
int vc_num = 0;
/*
@@ -1059,7 +1092,7 @@ static int csi_link_validate(struct v4l2_subdev *sd,
/* select either parallel or MIPI-CSI2 as input to CSI */
ipu_set_csi_src_mux(priv->ipu, priv->csi_id, is_csi2);
-out:
+
mutex_unlock(&priv->lock);
return ret;
}
@@ -1131,6 +1164,7 @@ static int csi_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fwnode_endpoint upstream_ep;
const struct imx_media_pixfmt *incc;
struct v4l2_mbus_framefmt *infmt;
int ret = 0;
@@ -1147,7 +1181,13 @@ static int csi_enum_mbus_code(struct v4l2_subdev *sd,
break;
case CSI_SRC_PAD_DIRECT:
case CSI_SRC_PAD_IDMAC:
- if (incc->bayer) {
+ ret = csi_get_upstream_endpoint(priv, &upstream_ep);
+ if (ret) {
+ v4l2_err(&priv->sd, "failed to find upstream endpoint\n");
+ goto out;
+ }
+
+ if (requires_passthrough(&upstream_ep, infmt, incc)) {
if (code->index != 0) {
ret = -EINVAL;
goto out;
@@ -1192,10 +1232,12 @@ static int csi_enum_frame_size(struct v4l2_subdev *sd,
} else {
crop = __csi_get_crop(priv, cfg, fse->which);
- fse->min_width = fse->max_width = fse->index & 1 ?
+ fse->min_width = fse->index & 1 ?
crop->width / 2 : crop->width;
- fse->min_height = fse->max_height = fse->index & 2 ?
+ fse->max_width = fse->min_width;
+ fse->min_height = fse->index & 2 ?
crop->height / 2 : crop->height;
+ fse->max_height = fse->min_height;
}
mutex_unlock(&priv->lock);
@@ -1286,7 +1328,7 @@ static void csi_try_fmt(struct csi_priv *priv,
sdformat->format.width = compose->width;
sdformat->format.height = compose->height;
- if (incc->bayer) {
+ if (requires_passthrough(upstream_ep, infmt, incc)) {
sdformat->format.code = infmt->code;
*cc = incc;
} else {
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 7ec2db84451c..8aa13403b09d 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -78,6 +78,7 @@ static const struct imx_media_pixfmt rgb_formats[] = {
.codes = {MEDIA_BUS_FMT_RGB565_2X8_LE},
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
+ .cycles = 2,
}, {
.fourcc = V4L2_PIX_FMT_RGB24,
.codes = {
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index e945e0ed6dd6..57bd094cf765 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -62,6 +62,8 @@ struct imx_media_pixfmt {
u32 fourcc;
u32 codes[4];
int bpp; /* total bpp */
+ /* cycles per pixel for generic (bayer) formats for the parallel bus */
+ int cycles;
enum ipu_color_space cs;
bool planar; /* is a planar format */
bool bayer; /* is a raw bayer format */
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 04c18323c2ea..89b02fc305b8 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -457,7 +457,6 @@ static const struct snd_pcm_ops pcm_ops = {
.trigger = pcm_trigger,
.pointer = pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static int split_arg_list(char *buf, char **card_name, u16 *ch_num,
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index e461168313bf..4e6611e4c59b 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -290,13 +290,6 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- return (u16)smp_processor_id();
-}
-
static void xlr_hw_set_mac_addr(struct net_device *ndev)
{
struct xlr_net_priv *priv = netdev_priv(ndev);
@@ -403,7 +396,7 @@ static const struct net_device_ops xlr_netdev_ops = {
.ndo_open = xlr_net_open,
.ndo_stop = xlr_net_stop,
.ndo_start_xmit = xlr_net_start_xmit,
- .ndo_select_queue = xlr_net_select_queue,
+ .ndo_select_queue = dev_pick_tx_cpu_id,
.ndo_set_mac_address = xlr_net_set_mac_addr,
.ndo_set_rx_mode = xlr_set_rx_mode,
.ndo_get_stats64 = xlr_stats,
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 084a246eec19..6790b7c8cfb1 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -575,7 +575,6 @@ enum ht_cap_ampdu_factor {
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
#define OP_MODE_PURE 0
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index add1ba00f3e9..38e85c8a85c8 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -253,7 +253,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 7bc74d7d8a3a..1075eacdb441 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -40,6 +40,7 @@
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
+#include <linux/crc32poly.h>
#include <linux/semaphore.h>
#include "osdep_service.h"
@@ -49,8 +50,6 @@
/* =====WEP related===== */
-#define CRC32_POLY 0x04c11db7
-
struct arc4context {
u32 x;
u32 y;
@@ -135,7 +134,7 @@ static void crc32_init(void)
for (i = 0; i < 256; ++i) {
k = crc32_reverseBit((u8)i);
for (c = ((u32)k) << 24, j = 8; j > 0; --j)
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
p1 = (u8 *)&crc32_table[i];
p1[0] = crc32_reverseBit(p[3]);
p1[1] = crc32_reverseBit(p[2]);
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 0ed2f44ab4e9..00a4302e9983 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -574,7 +574,6 @@ struct ieee80211_ht_addt_info {
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
/* Spatial Multiplexing Power Save Modes */
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 612277a555d2..6c8ac9e86c9f 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -6,6 +6,7 @@
******************************************************************************/
#define _RTW_SECURITY_C_
+#include <linux/crc32poly.h>
#include <drv_types.h>
#include <rtw_debug.h>
@@ -87,8 +88,6 @@ const char *security_type_str(u8 value)
/* WEP related ===== */
-#define CRC32_POLY 0x04c11db7
-
struct arc4context {
u32 x;
u32 y;
@@ -178,7 +177,7 @@ static void crc32_init(void)
for (i = 0; i < 256; ++i) {
k = crc32_reverseBit((u8)i);
for (c = ((u32)k) << 24, j = 8; j > 0; --j) {
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
}
p1 = (u8 *)&crc32_table[i];
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
index 08bc79840b23..559bf2606fb7 100644
--- a/drivers/staging/rtl8723bs/include/wifi.h
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -799,7 +799,6 @@ enum HT_CAP_AMPDU_FACTOR {
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
/* Spatial Multiplexing Power Save Modes */
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index ace68f023b49..181642358e3f 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -403,10 +403,9 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb
- , void *accel_priv
- , select_queue_fallback_t fallback
-)
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtlwifi/base.c b/drivers/staging/rtlwifi/base.c
index e46e47d93d7d..094827c1879a 100644
--- a/drivers/staging/rtlwifi/base.c
+++ b/drivers/staging/rtlwifi/base.c
@@ -1838,7 +1838,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes =
- (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF);
+ (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
}
/*********************************************************
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
index 11f5e530a75f..c31fc6408383 100644
--- a/drivers/staging/skein/skein_generic.c
+++ b/drivers/staging/skein/skein_generic.c
@@ -137,7 +137,6 @@ static struct shash_alg alg256 = {
.base = {
.cra_name = "skein256",
.cra_driver_name = "skein",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SKEIN_256_BLOCK_BYTES,
.cra_module = THIS_MODULE,
}
@@ -155,7 +154,6 @@ static struct shash_alg alg512 = {
.base = {
.cra_name = "skein512",
.cra_driver_name = "skein",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SKEIN_512_BLOCK_BYTES,
.cra_module = THIS_MODULE,
}
@@ -173,7 +171,6 @@ static struct shash_alg alg1024 = {
.base = {
.cra_name = "skein1024",
.cra_driver_name = "skein",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SKEIN_1024_BLOCK_BYTES,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index 5c7ea237893e..da4a93df8d75 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -504,7 +504,7 @@ static void vbox_set_edid(struct drm_connector *connector, int width,
for (i = 0; i < EDID_SIZE - 1; ++i)
sum += edid[i];
edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
- drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
+ drm_connector_update_edid_property(connector, (struct edid *)edid);
}
static int vbox_get_modes(struct drm_connector *connector)
@@ -655,7 +655,7 @@ static int vbox_connector_init(struct drm_device *dev,
dev->mode_config.suggested_y_property, 0);
drm_connector_register(connector);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index ce26741ae9d9..6dd0c838db05 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -628,20 +628,6 @@ static void stop_streaming(struct vb2_queue *vq)
v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
}
-static void bm2835_mmal_lock(struct vb2_queue *vq)
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
-
- mutex_lock(&dev->mutex);
-}
-
-static void bm2835_mmal_unlock(struct vb2_queue *vq)
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
-
- mutex_unlock(&dev->mutex);
-}
-
static const struct vb2_ops bm2835_mmal_video_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
@@ -650,8 +636,8 @@ static const struct vb2_ops bm2835_mmal_video_qops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
- .wait_prepare = bm2835_mmal_unlock,
- .wait_finish = bm2835_mmal_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
@@ -1864,6 +1850,8 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
goto cleanup_gdev;
}
+ /* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
+ mutex_init(&dev->mutex);
dev->camera_num = camera;
dev->max_width = resolutions[camera][0];
dev->max_height = resolutions[camera][1];
@@ -1908,13 +1896,11 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
q->ops = &bm2835_mmal_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &dev->mutex;
ret = vb2_queue_init(q);
if (ret < 0)
goto unreg_dev;
- /* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
- mutex_init(&dev->mutex);
-
/* initialise video devices */
ret = bm2835_mmal_init_device(dev, &dev->vdev);
if (ret < 0)
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 4c44d7bed01a..cb6f32ce7de8 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -1,10 +1,10 @@
menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
- depends on SCSI && BLOCK
+ depends on BLOCK
select CONFIGFS_FS
select CRC_T10DIF
- select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
+ select BLK_SCSI_REQUEST
select SGL_ALLOC
default n
help
@@ -29,6 +29,7 @@ config TCM_FILEIO
config TCM_PSCSI
tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+ depends on SCSI
help
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0ebc4818e132..95d0a22b2ad6 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1090,10 +1090,8 @@ static struct configfs_attribute *lio_target_tpg_attrs[] = {
/* Start items for lio_target_tiqn_cit */
-static struct se_portal_group *lio_target_tiqn_addtpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *lio_target_tiqn_addtpg(struct se_wwn *wwn,
+ const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 99501785cdc1..923b1a9fc3dc 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -369,7 +369,7 @@ static int iscsi_login_zero_tsih_s1(
return -ENOMEM;
}
- sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+ sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4b34f71547c6..101d62105c93 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -636,8 +636,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
none = strstr(buf1, NONE);
if (none)
goto out;
- strncat(buf1, ",", strlen(","));
- strncat(buf1, NONE, strlen(NONE));
+ strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 4435bf374d2d..49be1e41290c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -17,7 +17,7 @@
******************************************************************************/
#include <linux/list.h>
-#include <linux/percpu_ida.h>
+#include <linux/sched/signal.h>
#include <net/ipv6.h> /* ipv6_addr_equal() */
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
@@ -147,6 +147,30 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
spin_unlock_bh(&cmd->r2t_lock);
}
+static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
+{
+ int tag = -1;
+ DEFINE_WAIT(wait);
+ struct sbq_wait_state *ws;
+
+ if (state == TASK_RUNNING)
+ return tag;
+
+ ws = &se_sess->sess_tag_pool.ws[0];
+ for (;;) {
+ prepare_to_wait_exclusive(&ws->wait, &wait, state);
+ if (signal_pending_state(state, current))
+ break;
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup);
+ if (tag >= 0)
+ break;
+ schedule();
+ }
+
+ finish_wait(&ws->wait, &wait);
+ return tag;
+}
+
/*
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
@@ -155,9 +179,11 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
{
struct iscsi_cmd *cmd;
struct se_session *se_sess = conn->sess->se_sess;
- int size, tag;
+ int size, tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ tag = iscsit_wait_for_tag(se_sess, state, &cpu);
if (tag < 0)
return NULL;
@@ -166,6 +192,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
memset(cmd, 0, size);
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->conn = conn;
cmd->data_direction = DMA_NONE;
INIT_LIST_HEAD(&cmd->i_conn_node);
@@ -711,7 +738,7 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kfree(cmd->iov_data);
kfree(cmd->text_in_ptr);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(sess->se_sess, se_cmd);
}
EXPORT_SYMBOL(iscsit_release_cmd);
@@ -1026,26 +1053,8 @@ void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
void iscsit_start_nopin_timer(struct iscsi_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
- struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
- /*
- * NOPIN timeout is disabled..
- */
- if (!na->nopin_timeout)
- return;
-
spin_lock_bh(&conn->nopin_timer_lock);
- if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
- spin_unlock_bh(&conn->nopin_timer_lock);
- return;
- }
-
- conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
- conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
- mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
-
- pr_debug("Started NOPIN Timer on CID: %d at %u second"
- " interval\n", conn->cid, na->nopin_timeout);
+ __iscsit_start_nopin_timer(conn);
spin_unlock_bh(&conn->nopin_timer_lock);
}
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
index abe8ecbcdf06..158ee9d522f7 100644
--- a/drivers/target/loopback/Kconfig
+++ b/drivers/target/loopback/Kconfig
@@ -1,5 +1,6 @@
config LOOPBACK_TARGET
tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
+ depends on SCSI
help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module.
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 60d5b918c4ac..bc8918f382e4 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -239,10 +239,7 @@ out:
return ret;
release:
- if (se_cmd)
- transport_generic_free_cmd(se_cmd, 0);
- else
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
goto out;
}
@@ -768,7 +765,7 @@ static int tcm_loop_make_nexus(
if (!tl_nexus)
return -ENOMEM;
- tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
+ tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
name, tl_nexus, tcm_loop_alloc_sess_cb);
if (IS_ERR(tl_nexus->se_sess)) {
@@ -808,7 +805,7 @@ static int tcm_loop_drop_nexus(
/*
* Release the SCSI I_T Nexus to the emulated Target Port
*/
- transport_deregister_session(tl_nexus->se_sess);
+ target_remove_session(se_sess);
tpg->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
@@ -983,10 +980,8 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
/* Start items for tcm_loop_naa_cit */
-static struct se_portal_group *tcm_loop_make_naa_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index fb1003921d85..3d10189ecedc 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -209,7 +209,7 @@ static struct sbp_session *sbp_session_create(
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
sess->guid = guid;
- sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
+ sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
sizeof(struct sbp_target_request),
TARGET_PROT_NORMAL, guid_str,
sess, NULL);
@@ -235,8 +235,7 @@ static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
if (cancel_work)
cancel_delayed_work_sync(&sess->maint_work);
- transport_deregister_session_configfs(sess->se_sess);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
if (sess->card)
fw_card_put(sess->card);
@@ -926,15 +925,16 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
{
struct se_session *se_sess = sess->se_sess;
struct sbp_target_request *req;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return ERR_PTR(-ENOMEM);
req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
memset(req, 0, sizeof(*req));
req->se_cmd.map_tag = tag;
+ req->se_cmd.map_cpu = cpu;
req->se_cmd.tag = next_orb;
return req;
@@ -1460,7 +1460,7 @@ static void sbp_free_request(struct sbp_target_request *req)
kfree(req->pg_tbl);
kfree(req->cmd_buf);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static void sbp_mgt_agent_process(struct work_struct *work)
@@ -2005,10 +2005,8 @@ static void sbp_pre_unlink_lun(
pr_err("unlink LUN: failed to update unit directory\n");
}
-static struct se_portal_group *sbp_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct sbp_tport *tport =
container_of(wwn, struct sbp_tport, tport_wwn);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 5ccef7d597fa..f6b1549f4142 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -263,8 +263,8 @@ static struct config_group *target_core_register_fabric(
&tf->tf_discovery_cit);
configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
- pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
- " %s\n", tf->tf_group.cg_item.ci_name);
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
+ config_item_name(&tf->tf_group.cg_item));
return &tf->tf_group;
}
@@ -810,7 +810,7 @@ static ssize_t pi_prot_type_store(struct config_item *item,
dev->transport->name);
return -ENOSYS;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("DIF protection requires device to be configured\n");
return -ENODEV;
}
@@ -859,7 +859,7 @@ static ssize_t pi_prot_format_store(struct config_item *item,
dev->transport->name);
return -ENOSYS;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("DIF protection format requires device to be configured\n");
return -ENODEV;
}
@@ -1948,7 +1948,7 @@ static ssize_t target_dev_enable_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
- return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+ return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
}
static ssize_t target_dev_enable_store(struct config_item *item,
@@ -2473,7 +2473,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("Unable to set alua_access_state while device is"
" not configured\n");
return -ENODEV;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index e27db4d45a9d..47b5ef153135 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -336,7 +336,6 @@ int core_enable_device_list_for_node(
return -ENOMEM;
}
- atomic_set(&new->ua_count, 0);
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
@@ -879,39 +878,21 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
}
EXPORT_SYMBOL(target_to_linux_sector);
-/**
- * target_find_device - find a se_device by its dev_index
- * @id: dev_index
- * @do_depend: true if caller needs target_depend_item to be done
- *
- * If do_depend is true, the caller must do a target_undepend_item
- * when finished using the device.
- *
- * If do_depend is false, the caller must be called in a configfs
- * callback or during removal.
- */
-struct se_device *target_find_device(int id, bool do_depend)
-{
- struct se_device *dev;
-
- mutex_lock(&device_mutex);
- dev = idr_find(&devices_idr, id);
- if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item))
- dev = NULL;
- mutex_unlock(&device_mutex);
- return dev;
-}
-EXPORT_SYMBOL(target_find_device);
-
struct devices_idr_iter {
+ struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
static int target_devices_idr_iter(int id, void *p, void *data)
+ __must_hold(&device_mutex)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
+ int ret;
+
+ config_item_put(iter->prev_item);
+ iter->prev_item = NULL;
/*
* We add the device early to the idr, so it can be used
@@ -919,10 +900,18 @@ static int target_devices_idr_iter(int id, void *p, void *data)
* to allow other callers to access partially setup devices,
* so we skip them here.
*/
- if (!(dev->dev_flags & DF_CONFIGURED))
+ if (!target_dev_configured(dev))
+ return 0;
+
+ iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+ if (!iter->prev_item)
return 0;
+ mutex_unlock(&device_mutex);
+
+ ret = iter->fn(dev, iter->data);
- return iter->fn(dev, iter->data);
+ mutex_lock(&device_mutex);
+ return ret;
}
/**
@@ -936,15 +925,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data)
{
- struct devices_idr_iter iter;
+ struct devices_idr_iter iter = { .fn = fn, .data = data };
int ret;
- iter.fn = fn;
- iter.data = data;
-
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
+ config_item_put(iter.prev_item);
return ret;
}
@@ -953,7 +940,7 @@ int target_configure_device(struct se_device *dev)
struct se_hba *hba = dev->se_hba;
int ret, id;
- if (dev->dev_flags & DF_CONFIGURED) {
+ if (target_dev_configured(dev)) {
pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
@@ -1058,7 +1045,7 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
- if (dev->dev_flags & DF_CONFIGURED) {
+ if (target_dev_configured(dev)) {
destroy_workqueue(dev->tmr_wq);
dev->transport->destroy_device(dev);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index e1416b007aa4..aa2f4f632ebe 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -34,6 +34,7 @@
#include <linux/configfs.h>
#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
@@ -642,7 +643,7 @@ static int target_fabric_port_link(
}
dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("se_device not configured yet, cannot port link\n");
return -ENODEV;
}
@@ -841,7 +842,7 @@ static struct config_group *target_fabric_make_tpg(
return ERR_PTR(-ENOSYS);
}
- se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name);
+ se_tpg = tf->tf_ops->fabric_make_tpg(wwn, name);
if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dead30b1d32c..0c6635587930 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -138,7 +138,7 @@ int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-int transport_cmd_finish_abort(struct se_cmd *, int);
+int transport_cmd_finish_abort(struct se_cmd *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index b054682e974f..ebac2b49b9c6 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -978,9 +978,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
}
case COMPARE_AND_WRITE:
if (!dev->dev_attrib.emulate_caw) {
- pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject"
- " COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name,
- dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial);
+ pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n",
+ dev->se_hba->backend->ops->name,
+ config_item_name(&dev->dev_group.cg_item),
+ dev->t10_wwn.unit_serial);
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
sectors = cdb[13];
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 9c7bc1ca341a..6d1179a7f043 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,25 +75,6 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
kfree(tmr);
}
-static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
-{
- unsigned long flags;
- bool remove = true, send_tas;
- /*
- * TASK ABORTED status (TAS) bit support
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- send_tas = (cmd->transport_state & CMD_T_TAS);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- if (send_tas) {
- remove = false;
- transport_send_task_abort(cmd);
- }
-
- return transport_cmd_finish_abort(cmd, remove);
-}
-
static int target_check_cdb_and_preempt(struct list_head *list,
struct se_cmd *cmd)
{
@@ -142,7 +123,7 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
return false;
}
}
- if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
spin_unlock(&se_cmd->t_state_lock);
@@ -187,13 +168,12 @@ void core_tmr_abort_task(
if (!__target_check_io_state(se_cmd, se_sess, 0))
continue;
- list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- if (!transport_cmd_finish_abort(se_cmd, true))
+ if (!transport_cmd_finish_abort(se_cmd))
target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
@@ -259,7 +239,7 @@ static void core_tmr_drain_tmr_list(
spin_unlock(&sess->sess_cmd_lock);
continue;
}
- if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue;
@@ -291,7 +271,7 @@ static void core_tmr_drain_tmr_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- if (!transport_cmd_finish_abort(cmd, 1))
+ if (!transport_cmd_finish_abort(cmd))
target_put_sess_cmd(cmd);
}
}
@@ -380,7 +360,7 @@ static void core_tmr_drain_state_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- if (!core_tmr_handle_tas_abort(cmd, tas))
+ if (!transport_cmd_finish_abort(cmd))
target_put_sess_cmd(cmd);
}
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ee5081ba5313..86c0156e6c88 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -64,7 +64,7 @@ struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
-static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
static void target_complete_ok_work(struct work_struct *work);
@@ -224,7 +224,27 @@ void transport_subsystem_check_init(void)
sub_api_initialized = 1;
}
-struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
+/**
+ * transport_init_session - initialize a session object
+ * @se_sess: Session object pointer.
+ *
+ * The caller must have zero-initialized @se_sess before calling this function.
+ */
+void transport_init_session(struct se_session *se_sess)
+{
+ INIT_LIST_HEAD(&se_sess->sess_list);
+ INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+ spin_lock_init(&se_sess->sess_cmd_lock);
+ init_waitqueue_head(&se_sess->cmd_list_wq);
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/**
+ * transport_alloc_session - allocate a session object and initialize it
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
@@ -234,17 +254,20 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&se_sess->sess_list);
- INIT_LIST_HEAD(&se_sess->sess_acl_list);
- INIT_LIST_HEAD(&se_sess->sess_cmd_list);
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
- spin_lock_init(&se_sess->sess_cmd_lock);
+ transport_init_session(se_sess);
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
}
-EXPORT_SYMBOL(transport_init_session);
+EXPORT_SYMBOL(transport_alloc_session);
+/**
+ * transport_alloc_session_tags - allocate target driver private data
+ * @se_sess: Session pointer.
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ */
int transport_alloc_session_tags(struct se_session *se_sess,
unsigned int tag_num, unsigned int tag_size)
{
@@ -260,7 +283,8 @@ int transport_alloc_session_tags(struct se_session *se_sess,
}
}
- rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
+ rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
+ false, GFP_KERNEL, NUMA_NO_NODE);
if (rc < 0) {
pr_err("Unable to init se_sess->sess_tag_pool,"
" tag_num: %u\n", tag_num);
@@ -273,9 +297,16 @@ int transport_alloc_session_tags(struct se_session *se_sess,
}
EXPORT_SYMBOL(transport_alloc_session_tags);
-struct se_session *transport_init_session_tags(unsigned int tag_num,
- unsigned int tag_size,
- enum target_prot_op sup_prot_ops)
+/**
+ * transport_init_session_tags - allocate a session and target driver private data
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+static struct se_session *
+transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
+ enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
int rc;
@@ -291,7 +322,7 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
return ERR_PTR(-EINVAL);
}
- se_sess = transport_init_session(sup_prot_ops);
+ se_sess = transport_alloc_session(sup_prot_ops);
if (IS_ERR(se_sess))
return se_sess;
@@ -303,7 +334,6 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
return se_sess;
}
-EXPORT_SYMBOL(transport_init_session_tags);
/*
* Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
@@ -316,6 +346,7 @@ void __transport_register_session(
{
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
+ unsigned long flags;
se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr;
@@ -352,7 +383,7 @@ void __transport_register_session(
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
/*
* The se_nacl->nacl_sess pointer will be set to the
* last active I_T Nexus for each struct se_node_acl.
@@ -361,7 +392,7 @@ void __transport_register_session(
list_add_tail(&se_sess->sess_acl_list,
&se_nacl->acl_sess_list);
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
@@ -385,7 +416,7 @@ void transport_register_session(
EXPORT_SYMBOL(transport_register_session);
struct se_session *
-target_alloc_session(struct se_portal_group *tpg,
+target_setup_session(struct se_portal_group *tpg,
unsigned int tag_num, unsigned int tag_size,
enum target_prot_op prot_op,
const char *initiatorname, void *private,
@@ -401,7 +432,7 @@ target_alloc_session(struct se_portal_group *tpg,
if (tag_num != 0)
sess = transport_init_session_tags(tag_num, tag_size, prot_op);
else
- sess = transport_init_session(prot_op);
+ sess = transport_alloc_session(prot_op);
if (IS_ERR(sess))
return sess;
@@ -427,7 +458,7 @@ target_alloc_session(struct se_portal_group *tpg,
transport_register_session(tpg, sess->se_node_acl, sess, private);
return sess;
}
-EXPORT_SYMBOL(target_alloc_session);
+EXPORT_SYMBOL(target_setup_session);
ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{
@@ -547,7 +578,7 @@ void transport_free_session(struct se_session *se_sess)
target_put_nacl(se_nacl);
}
if (se_sess->sess_cmd_map) {
- percpu_ida_destroy(&se_sess->sess_tag_pool);
+ sbitmap_queue_free(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
}
kmem_cache_free(se_sess_cache, se_sess);
@@ -585,6 +616,13 @@ void transport_deregister_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(transport_deregister_session);
+void target_remove_session(struct se_session *se_sess)
+{
+ transport_deregister_session_configfs(se_sess);
+ transport_deregister_session(se_sess);
+}
+EXPORT_SYMBOL(target_remove_session);
+
static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -601,6 +639,13 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
+/*
+ * This function is called by the target core after the target core has
+ * finished processing a SCSI command or SCSI TMF. Both the regular command
+ * processing code and the code for aborting commands can call this
+ * function. CMD_T_STOP is set if and only if another thread is waiting
+ * inside transport_wait_for_tasks() for t_transport_stop_comp.
+ */
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
unsigned long flags;
@@ -650,23 +695,27 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
percpu_ref_put(&lun->lun_ref);
}
-int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd)
{
+ bool send_tas = cmd->transport_state & CMD_T_TAS;
bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
int ret = 0;
+ if (send_tas)
+ transport_send_task_abort(cmd);
+
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
/*
* Allow the fabric driver to unmap any resources before
* releasing the descriptor via TFO->release_cmd()
*/
- if (remove)
+ if (!send_tas)
cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return 1;
- if (remove && ack_kref)
+ if (!send_tas && ack_kref)
ret = target_put_sess_cmd(cmd);
return ret;
@@ -1267,7 +1316,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
- init_completion(&cmd->cmd_wait_comp);
+ cmd->compl = NULL;
spin_lock_init(&cmd->t_state_lock);
INIT_WORK(&cmd->work, NULL);
kref_init(&cmd->cmd_kref);
@@ -2079,9 +2128,6 @@ static void transport_complete_qf(struct se_cmd *cmd)
if (cmd->scsi_status)
goto queue_status;
- cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
goto queue_status;
}
@@ -2593,20 +2639,37 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+/*
+ * This function is called by frontend drivers after processing of a command
+ * has finished.
+ *
+ * The protocol for ensuring that either the regular flow or the TMF
+ * code drops one reference is as follows:
+ * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
+ * the frontend driver to drop one reference, synchronously or asynchronously.
+ * - During regular command processing the target core sets CMD_T_COMPLETE
+ * before invoking one of the .queue_*() functions.
+ * - The code that aborts commands skips commands and TMFs for which
+ * CMD_T_COMPLETE has been set.
+ * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
+ * commands that will be aborted.
+ * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
+ * transport_generic_free_cmd() skips its call to target_put_sess_cmd().
+ * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
+ * be called and will drop a reference.
+ * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
+ * will be called. transport_cmd_finish_abort() will drop the final reference.
+ */
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
+ DECLARE_COMPLETION_ONSTACK(compl);
int ret = 0;
bool aborted = false, tas = false;
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
- if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- target_wait_free_cmd(cmd, &aborted, &tas);
+ if (wait_for_tasks)
+ target_wait_free_cmd(cmd, &aborted, &tas);
- if (!aborted || tas)
- ret = target_put_sess_cmd(cmd);
- } else {
- if (wait_for_tasks)
- target_wait_free_cmd(cmd, &aborted, &tas);
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
/*
* Handle WRITE failure case where transport_generic_new_cmd()
* has already added se_cmd to state_list, but fabric has
@@ -2617,20 +2680,14 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
-
- if (!aborted || tas)
- ret = target_put_sess_cmd(cmd);
}
- /*
- * If the task has been internally aborted due to TMR ABORT_TASK
- * or LUN_RESET, target_core_tmr.c is responsible for performing
- * the remaining calls to target_put_sess_cmd(), and not the
- * callers of this function.
- */
+ if (aborted)
+ cmd->compl = &compl;
+ if (!aborted || tas)
+ ret = target_put_sess_cmd(cmd);
if (aborted) {
pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
- wait_for_completion(&cmd->cmd_wait_comp);
- cmd->se_tfo->release_cmd(cmd);
+ wait_for_completion(&compl);
ret = 1;
}
return ret;
@@ -2691,30 +2748,21 @@ static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
+ struct completion *compl = se_cmd->compl;
unsigned long flags;
- bool fabric_stop;
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-
- spin_lock(&se_cmd->t_state_lock);
- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
- (se_cmd->transport_state & CMD_T_ABORTED);
- spin_unlock(&se_cmd->t_state_lock);
-
- if (se_cmd->cmd_wait_set || fabric_stop) {
- list_del_init(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_free_cmd_mem(se_cmd);
- complete(&se_cmd->cmd_wait_comp);
- return;
- }
list_del_init(&se_cmd->se_cmd_list);
+ if (list_empty(&se_sess->sess_cmd_list))
+ wake_up(&se_sess->cmd_list_wq);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
+ if (compl)
+ complete(compl);
}
/**
@@ -2833,78 +2881,41 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
EXPORT_SYMBOL(target_show_cmd);
/**
- * target_sess_cmd_list_set_waiting - Flag all commands in
- * sess_cmd_list to complete cmd_wait_comp. Set
- * sess_tearing_down so no more commands are queued.
+ * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
* @se_sess: session to flag
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
- int rc;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (se_sess->sess_tearing_down) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- return;
- }
se_sess->sess_tearing_down = 1;
- list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- rc = kref_get_unless_zero(&se_cmd->cmd_kref);
- if (rc) {
- se_cmd->cmd_wait_set = 1;
- spin_lock(&se_cmd->t_state_lock);
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- spin_unlock(&se_cmd->t_state_lock);
- } else
- list_del_init(&se_cmd->se_cmd_list);
- }
-
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
/**
- * target_wait_for_sess_cmds - Wait for outstanding descriptors
+ * target_wait_for_sess_cmds - Wait for outstanding commands
* @se_sess: session to wait for active I/O
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
- unsigned long flags;
- bool tas;
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
- " %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- tas = (se_cmd->transport_state & CMD_T_TAS);
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-
- if (!target_put_sess_cmd(se_cmd)) {
- if (tas)
- target_put_sess_cmd(se_cmd);
- }
-
- wait_for_completion(&se_cmd->cmd_wait_comp);
- pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- se_cmd->se_tfo->release_cmd(se_cmd);
- }
+ struct se_cmd *cmd;
+ int ret;
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- WARN_ON(!list_empty(&se_sess->sess_cmd_list));
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ WARN_ON_ONCE(!se_sess->sess_tearing_down);
+ spin_lock_irq(&se_sess->sess_cmd_lock);
+ do {
+ ret = wait_event_interruptible_lock_irq_timeout(
+ se_sess->cmd_list_wq,
+ list_empty(&se_sess->sess_cmd_list),
+ se_sess->sess_cmd_lock, 180 * HZ);
+ list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
+ target_show_cmd("session shutdown: still waiting for ",
+ cmd);
+ } while (ret <= 0);
+ spin_unlock_irq(&se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
@@ -3166,12 +3177,23 @@ static const struct sense_info sense_info_table[] = {
},
};
-static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
+/**
+ * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
+ * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
+ * be stored.
+ * @reason: LIO sense reason code. If this argument has the value
+ * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
+ * dequeuing a unit attention fails due to multiple commands being processed
+ * concurrently, set the command status to BUSY.
+ *
+ * Return: 0 upon success or -EINVAL if the sense buffer is too small.
+ */
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
const struct sense_info *si;
u8 *buffer = cmd->sense_buffer;
int r = (__force int)reason;
- u8 asc, ascq;
+ u8 key, asc, ascq;
bool desc_format = target_sense_desc_format(cmd->se_dev);
if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
@@ -3180,9 +3202,13 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
si = &sense_info_table[(__force int)
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
+ key = si->key;
if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
- core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
- WARN_ON_ONCE(asc == 0);
+ if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
+ &ascq)) {
+ cmd->scsi_status = SAM_STAT_BUSY;
+ return;
+ }
} else if (si->asc == 0) {
WARN_ON_ONCE(cmd->scsi_asc == 0);
asc = cmd->scsi_asc;
@@ -3192,13 +3218,14 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
ascq = si->ascq;
}
- scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
+ scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
if (si->add_sector_info)
- return scsi_set_sense_information(buffer,
- cmd->scsi_sense_length,
- cmd->bad_sector);
-
- return 0;
+ WARN_ON_ONCE(scsi_set_sense_information(buffer,
+ cmd->scsi_sense_length,
+ cmd->bad_sector) < 0);
}
int
@@ -3215,16 +3242,8 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (!from_transport) {
- int rc;
-
- cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
- rc = translate_sense_reason(cmd, reason);
- if (rc)
- return rc;
- }
+ if (!from_transport)
+ translate_sense_reason(cmd, reason);
trace_target_cmd_complete(cmd);
return cmd->se_tfo->queue_status(cmd);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index be25eb807a5f..c8ac242ce888 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -55,7 +55,7 @@ target_scsi3_ua_check(struct se_cmd *cmd)
rcu_read_unlock();
return 0;
}
- if (!atomic_read(&deve->ua_count)) {
+ if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return 0;
}
@@ -154,7 +154,6 @@ int core_scsi3_ua_allocate(
&deve->ua_list);
spin_unlock(&deve->ua_lock);
- atomic_inc_mb(&deve->ua_count);
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -164,7 +163,6 @@ int core_scsi3_ua_allocate(
" 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
asc, ascq);
- atomic_inc_mb(&deve->ua_count);
return 0;
}
@@ -196,16 +194,17 @@ void core_scsi3_ua_release_all(
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
}
-void core_scsi3_ua_for_check_condition(
- struct se_cmd *cmd,
- u8 *asc,
- u8 *ascq)
+/*
+ * Dequeue a unit attention from the unit attention list. This function
+ * returns true if the dequeuing succeeded and if *@key, *@asc and *@ascq have
+ * been set.
+ */
+bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
+ u8 *ascq)
{
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
@@ -214,23 +213,23 @@ void core_scsi3_ua_for_check_condition(
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!sess)
- return;
+ if (WARN_ON_ONCE(!sess))
+ return false;
nacl = sess->se_node_acl;
- if (!nacl)
- return;
+ if (WARN_ON_ONCE(!nacl))
+ return false;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
- return;
- }
- if (!atomic_read(&deve->ua_count)) {
- rcu_read_unlock();
- return;
+ *key = ILLEGAL_REQUEST;
+ *asc = 0x25; /* LOGICAL UNIT NOT SUPPORTED */
+ *ascq = 0;
+ return true;
}
+ *key = UNIT_ATTENTION;
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
@@ -260,8 +259,6 @@ void core_scsi3_ua_for_check_condition(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
@@ -273,6 +270,8 @@ void core_scsi3_ua_for_check_condition(
(dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
+
+ return head == 0;
}
int core_scsi3_ua_clear_for_request_sense(
@@ -299,7 +298,7 @@ int core_scsi3_ua_clear_for_request_sense(
rcu_read_unlock();
return -EINVAL;
}
- if (!atomic_read(&deve->ua_count)) {
+ if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return -EPERM;
}
@@ -322,8 +321,6 @@ int core_scsi3_ua_clear_for_request_sense(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index b0f4205a96cd..76487c9be090 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -37,7 +37,8 @@ extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
-extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern bool core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *,
+ u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
u8 *, u8 *);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index d8dc3d22051f..9cd404acdb82 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -83,14 +83,10 @@
#define DATA_BLOCK_SIZE PAGE_SIZE
#define DATA_BLOCK_SHIFT PAGE_SHIFT
#define DATA_BLOCK_BITS_DEF (256 * 1024)
-#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
-/* The total size of the ring is 8M + 256K * PAGE_SIZE */
-#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
-
/*
* Default number of global data blocks(512K * PAGE_SIZE)
* when the unmap thread will be started.
@@ -98,6 +94,7 @@
#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
+static u8 tcmu_netlink_blocked;
static struct device *tcmu_root_device;
@@ -107,9 +104,16 @@ struct tcmu_hba {
#define TCMU_CONFIG_LEN 256
+static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
+static LIST_HEAD(tcmu_nl_cmd_list);
+
+struct tcmu_dev;
+
struct tcmu_nl_cmd {
/* wake up thread waiting for reply */
struct completion complete;
+ struct list_head nl_list;
+ struct tcmu_dev *udev;
int cmd;
int status;
};
@@ -133,7 +137,7 @@ struct tcmu_dev {
struct inode *inode;
struct tcmu_mailbox *mb_addr;
- size_t dev_size;
+ uint64_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data area from start of mb */
@@ -161,10 +165,7 @@ struct tcmu_dev {
struct list_head timedout_entry;
- spinlock_t nl_cmd_lock;
struct tcmu_nl_cmd curr_nl_cmd;
- /* wake up threads waiting on curr_nl_cmd */
- wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
@@ -255,6 +256,92 @@ MODULE_PARM_DESC(global_max_data_area_mb,
"Max MBs allowed to be allocated to all the tcmu device's "
"data areas.");
+static int tcmu_get_block_netlink(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
+ "blocked" : "unblocked");
+}
+
+static int tcmu_set_block_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val > 1) {
+ pr_err("Invalid block netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ tcmu_netlink_blocked = val;
+ return 0;
+}
+
+static const struct kernel_param_ops tcmu_block_netlink_op = {
+ .set = tcmu_set_block_netlink,
+ .get = tcmu_get_block_netlink,
+};
+
+module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
+
+static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
+{
+ struct tcmu_dev *udev = nl_cmd->udev;
+
+ if (!tcmu_netlink_blocked) {
+ pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
+ return -EBUSY;
+ }
+
+ if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
+ pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
+ nl_cmd->status = -EINTR;
+ list_del(&nl_cmd->nl_list);
+ complete(&nl_cmd->complete);
+ }
+ return 0;
+}
+
+static int tcmu_set_reset_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != 1) {
+ pr_err("Invalid reset netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
+ ret = tcmu_fail_netlink_cmd(nl_cmd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+
+ return ret;
+}
+
+static const struct kernel_param_ops tcmu_reset_netlink_op = {
+ .set = tcmu_set_reset_netlink,
+};
+
+module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
+MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
+
/* multicast group */
enum tcmu_multicast_groups {
TCMU_MCGRP_CONFIG,
@@ -274,48 +361,50 @@ static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
{
- struct se_device *dev;
- struct tcmu_dev *udev;
+ struct tcmu_dev *udev = NULL;
struct tcmu_nl_cmd *nl_cmd;
int dev_id, rc, ret = 0;
- bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
!info->attrs[TCMU_ATTR_DEVICE_ID]) {
printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
- return -EINVAL;
+ return -EINVAL;
}
dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
- dev = target_find_device(dev_id, !is_removed);
- if (!dev) {
- printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
- completed_cmd, rc, dev_id);
- return -ENODEV;
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
+ if (nl_cmd->udev->se_dev.dev_index == dev_id) {
+ udev = nl_cmd->udev;
+ break;
+ }
}
- udev = TCMU_DEV(dev);
- spin_lock(&udev->nl_cmd_lock);
- nl_cmd = &udev->curr_nl_cmd;
+ if (!udev) {
+ pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
+ completed_cmd, rc, dev_id);
+ ret = -ENODEV;
+ goto unlock;
+ }
+ list_del(&nl_cmd->nl_list);
- pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
- nl_cmd->cmd, completed_cmd, rc);
+ pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
+ udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
+ nl_cmd->status);
if (nl_cmd->cmd != completed_cmd) {
- printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
- completed_cmd, nl_cmd->cmd);
+ pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
+ udev->name, completed_cmd, nl_cmd->cmd);
ret = -EINVAL;
- } else {
- nl_cmd->status = rc;
+ goto unlock;
}
- spin_unlock(&udev->nl_cmd_lock);
- if (!is_removed)
- target_undepend_item(&dev->dev_group.cg_item);
- if (!ret)
- complete(&nl_cmd->complete);
+ nl_cmd->status = rc;
+ complete(&nl_cmd->complete);
+unlock:
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -982,7 +1071,6 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
&udev->cmd_timer);
if (ret) {
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- mutex_unlock(&udev->cmdr_lock);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
@@ -1282,6 +1370,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->max_blocks = DATA_BLOCK_BITS_DEF;
mutex_init(&udev->cmdr_lock);
+ INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
INIT_LIST_HEAD(&udev->cmdr_queue);
idr_init(&udev->commands);
@@ -1289,9 +1378,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
- init_waitqueue_head(&udev->nl_cmd_wq);
- spin_lock_init(&udev->nl_cmd_lock);
-
INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
return &udev->se_dev;
@@ -1565,38 +1651,48 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
return 0;
}
-static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
+static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
if (!tcmu_kern_cmd_reply_supported)
- return;
+ return 0;
if (udev->nl_reply_supported <= 0)
- return;
+ return 0;
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
-relock:
- spin_lock(&udev->nl_cmd_lock);
+ if (tcmu_netlink_blocked) {
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
+ udev->name);
+ return -EAGAIN;
+ }
if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
- spin_unlock(&udev->nl_cmd_lock);
- pr_debug("sleeping for open nl cmd\n");
- wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
- goto relock;
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("netlink cmd %d already executing on %s\n",
+ nl_cmd->cmd, udev->name);
+ return -EBUSY;
}
memset(nl_cmd, 0, sizeof(*nl_cmd));
nl_cmd->cmd = cmd;
+ nl_cmd->udev = udev;
init_completion(&nl_cmd->complete);
+ INIT_LIST_HEAD(&nl_cmd->nl_list);
+
+ list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
- spin_unlock(&udev->nl_cmd_lock);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ return 0;
}
static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
int ret;
- DEFINE_WAIT(__wait);
if (!tcmu_kern_cmd_reply_supported)
return 0;
@@ -1607,13 +1703,10 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
- spin_lock(&udev->nl_cmd_lock);
+ mutex_lock(&tcmu_nl_cmd_mutex);
nl_cmd->cmd = TCMU_CMD_UNSPEC;
ret = nl_cmd->status;
- nl_cmd->status = 0;
- spin_unlock(&udev->nl_cmd_lock);
-
- wake_up_all(&udev->nl_cmd_wq);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -1657,19 +1750,21 @@ free_skb:
static int tcmu_netlink_event_send(struct tcmu_dev *udev,
enum tcmu_genl_cmd cmd,
- struct sk_buff **buf, void **hdr)
+ struct sk_buff *skb, void *msg_header)
{
- int ret = 0;
- struct sk_buff *skb = *buf;
- void *msg_header = *hdr;
+ int ret;
genlmsg_end(skb, msg_header);
- tcmu_init_genl_cmd_reply(udev, cmd);
+ ret = tcmu_init_genl_cmd_reply(udev, cmd);
+ if (ret) {
+ nlmsg_free(skb);
+ return ret;
+ }
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
- /* We don't care if no one is listening */
+ /* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
if (!ret)
@@ -1687,9 +1782,8 @@ static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
&msg_header);
if (ret < 0)
return ret;
- return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb,
- &msg_header);
-
+ return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
+ msg_header);
}
static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
@@ -1703,7 +1797,7 @@ static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
if (ret < 0)
return ret;
return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static int tcmu_update_uio_info(struct tcmu_dev *udev)
@@ -1745,9 +1839,11 @@ static int tcmu_configure_device(struct se_device *dev)
info = &udev->uio_info;
+ mutex_lock(&udev->cmdr_lock);
udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
sizeof(unsigned long),
GFP_KERNEL);
+ mutex_unlock(&udev->cmdr_lock);
if (!udev->data_bitmap) {
ret = -ENOMEM;
goto err_bitmap_alloc;
@@ -1842,11 +1938,6 @@ err_bitmap_alloc:
return ret;
}
-static bool tcmu_dev_configured(struct tcmu_dev *udev)
-{
- return udev->uio_info.uio_dev ? true : false;
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1953,45 +2044,76 @@ enum {
static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
- {Opt_dev_size, "dev_size=%u"},
- {Opt_hw_block_size, "hw_block_size=%u"},
- {Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_dev_size, "dev_size=%s"},
+ {Opt_hw_block_size, "hw_block_size=%d"},
+ {Opt_hw_max_sectors, "hw_max_sectors=%d"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
- {Opt_max_data_area_mb, "max_data_area_mb=%u"},
+ {Opt_max_data_area_mb, "max_data_area_mb=%d"},
{Opt_err, NULL}
};
static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
{
- unsigned long tmp_ul;
- char *arg_p;
- int ret;
+ int val, ret;
- arg_p = match_strdup(arg);
- if (!arg_p)
- return -ENOMEM;
-
- ret = kstrtoul(arg_p, 0, &tmp_ul);
- kfree(arg_p);
+ ret = match_int(arg, &val);
if (ret < 0) {
- pr_err("kstrtoul() failed for dev attrib\n");
+ pr_err("match_int() failed for dev attrib. Error %d.\n",
+ ret);
return ret;
}
- if (!tmp_ul) {
- pr_err("dev attrib must be nonzero\n");
+
+ if (val <= 0) {
+ pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
+ val);
return -EINVAL;
}
- *dev_attrib = tmp_ul;
+ *dev_attrib = val;
return 0;
}
+static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid max_data_area %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
+ if (udev->max_blocks > tcmu_global_max_blocks) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ udev->max_blocks = tcmu_global_max_blocks;
+ }
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- char *orig, *ptr, *opts, *arg_p;
+ char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, token, tmpval;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -2014,15 +2136,10 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
break;
case Opt_dev_size:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
- kfree(arg_p);
+ ret = match_u64(&args[0], &udev->dev_size);
if (ret < 0)
- pr_err("kstrtoul() failed for dev_size=\n");
+ pr_err("match_u64() failed for dev_size=. Error %d.\n",
+ ret);
break;
case Opt_hw_block_size:
ret = tcmu_set_dev_attrib(&args[0],
@@ -2033,48 +2150,13 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
&(dev->dev_attrib.hw_max_sectors));
break;
case Opt_nl_reply_supported:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
- kfree(arg_p);
+ ret = match_int(&args[0], &udev->nl_reply_supported);
if (ret < 0)
- pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
+ ret);
break;
case Opt_max_data_area_mb:
- if (dev->export_count) {
- pr_err("Unable to set max_data_area_mb while exports exist\n");
- ret = -EINVAL;
- break;
- }
-
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &tmpval);
- kfree(arg_p);
- if (ret < 0) {
- pr_err("kstrtoint() failed for max_data_area_mb=\n");
- break;
- }
-
- if (tmpval <= 0) {
- pr_err("Invalid max_data_area %d\n", tmpval);
- ret = -EINVAL;
- break;
- }
-
- udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
- if (udev->max_blocks > tcmu_global_max_blocks) {
- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
- tmpval,
- TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
- udev->max_blocks = tcmu_global_max_blocks;
- }
+ ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
default:
break;
@@ -2095,7 +2177,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
- bl += sprintf(b + bl, "Size: %zu ", udev->dev_size);
+ bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
TCMU_BLOCKS_TO_MBS(udev->max_blocks));
@@ -2222,7 +2304,7 @@ static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
@@ -2239,7 +2321,7 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
return -EINVAL;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_config_event(udev, page);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2264,7 +2346,7 @@ static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
- return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
+ return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
}
static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
@@ -2284,7 +2366,7 @@ static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
@@ -2301,7 +2383,7 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_size_event(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2366,7 +2448,7 @@ static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
@@ -2383,7 +2465,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_emulate_write_cache(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2419,6 +2501,11 @@ static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2446,6 +2533,11 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2510,6 +2602,11 @@ static void find_free_blocks(void)
list_for_each_entry(udev, &root_udev, node) {
mutex_lock(&udev->cmdr_lock);
+ if (!target_dev_configured(&udev->se_dev)) {
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ }
+
/* Try to complete the finished commands first */
tcmu_handle_completions(udev);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 9ee89e00cd77..2718a933c0c6 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -497,10 +497,7 @@ int target_xcopy_setup_pt(void)
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
- spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
+ transport_init_session(&xcopy_pt_sess);
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index ec372860106f..a183d4da7db2 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -28,7 +28,6 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
-#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
@@ -92,7 +91,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
if (fr_seq(fp))
fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
ft_sess_put(sess); /* undo get from lookup at recv */
}
@@ -448,9 +447,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
struct se_session *se_sess = sess->se_sess;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
goto busy;
@@ -458,10 +457,11 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
memset(cmd, 0, sizeof(struct ft_cmd));
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
- percpu_ida_free(&se_sess->sess_tag_pool, tag);
+ target_free_tag(se_sess, &cmd->se_cmd);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 42ee91123dca..e55c4d537592 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -223,10 +223,7 @@ static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
/*
* local_port port_group (tpg) ops.
*/
-static struct se_portal_group *ft_add_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index c91979c1463d..6d4adf5ec26c 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -239,7 +239,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess->tport = tport;
sess->port_id = port_id;
- sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
+ sess->se_sess = target_setup_session(se_tpg, TCM_FC_DEFAULT_TAGS,
sizeof(struct ft_cmd),
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
@@ -287,7 +287,6 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
static void ft_close_sess(struct ft_sess *sess)
{
- transport_deregister_session_configfs(sess->se_sess);
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
ft_sess_put(sess);
@@ -448,7 +447,7 @@ static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
kfree_rcu(sess, rcu);
}
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 07d3be6f0780..0b9ab1d0dd45 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -80,11 +80,6 @@ static void tee_shm_op_release(struct dma_buf *dmabuf)
tee_shm_release(shm);
}
-static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
-{
- return NULL;
-}
-
static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
{
return NULL;
@@ -107,7 +102,6 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.map_dma_buf = tee_shm_op_map_dma_buf,
.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
.release = tee_shm_op_release,
- .map_atomic = tee_shm_op_map_atomic,
.map = tee_shm_op_map,
.mmap = tee_shm_op_mmap,
};
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 4c275ec10ac5..2c2f6d93034e 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -24,6 +24,8 @@
#include <linux/of_device.h>
#include <linux/thermal.h>
#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
/* Thermal Manager Control and Status Register */
#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
@@ -39,24 +41,24 @@
#define A375_READOUT_INVERT BIT(15)
#define A375_HW_RESETn BIT(8)
-/* Legacy bindings */
-#define LEGACY_CONTROL_MEM_LEN 0x4
-
-/* Current bindings with the 2 control registers under the same memory area */
-#define LEGACY_CONTROL1_OFFSET 0x0
-#define CONTROL0_OFFSET 0x0
-#define CONTROL1_OFFSET 0x4
-
/* Errata fields */
#define CONTROL0_TSEN_TC_TRIM_MASK 0x7
#define CONTROL0_TSEN_TC_TRIM_VAL 0x3
-/* TSEN refers to the temperature sensors within the AP */
#define CONTROL0_TSEN_START BIT(0)
#define CONTROL0_TSEN_RESET BIT(1)
#define CONTROL0_TSEN_ENABLE BIT(2)
-
-/* EXT_TSEN refers to the external temperature sensors, out of the AP */
+#define CONTROL0_TSEN_AVG_BYPASS BIT(6)
+#define CONTROL0_TSEN_CHAN_SHIFT 13
+#define CONTROL0_TSEN_CHAN_MASK 0xF
+#define CONTROL0_TSEN_OSR_SHIFT 24
+#define CONTROL0_TSEN_OSR_MAX 0x3
+#define CONTROL0_TSEN_MODE_SHIFT 30
+#define CONTROL0_TSEN_MODE_EXTERNAL 0x2
+#define CONTROL0_TSEN_MODE_MASK 0x3
+
+#define CONTROL1_TSEN_AVG_SHIFT 0
+#define CONTROL1_TSEN_AVG_MASK 0x7
#define CONTROL1_EXT_TSEN_SW_RESET BIT(7)
#define CONTROL1_EXT_TSEN_HW_RESETn BIT(8)
@@ -67,19 +69,19 @@ struct armada_thermal_data;
/* Marvell EBU Thermal Sensor Dev Structure */
struct armada_thermal_priv {
- void __iomem *status;
- void __iomem *control0;
- void __iomem *control1;
+ struct device *dev;
+ struct regmap *syscon;
+ char zone_name[THERMAL_NAME_LENGTH];
+ /* serialize temperature reads/updates */
+ struct mutex update_lock;
struct armada_thermal_data *data;
+ int current_channel;
};
struct armada_thermal_data {
- /* Initialize the sensor */
- void (*init_sensor)(struct platform_device *pdev,
- struct armada_thermal_priv *);
-
- /* Test for a valid sensor value (optional) */
- bool (*is_valid)(struct armada_thermal_priv *);
+ /* Initialize the thermal IC */
+ void (*init)(struct platform_device *pdev,
+ struct armada_thermal_priv *priv);
/* Formula coeficients: temp = (b - m * reg) / div */
s64 coef_b;
@@ -92,141 +94,242 @@ struct armada_thermal_data {
unsigned int temp_shift;
unsigned int temp_mask;
u32 is_valid_bit;
- bool needs_control0;
+
+ /* Syscon access */
+ unsigned int syscon_control0_off;
+ unsigned int syscon_control1_off;
+ unsigned int syscon_status_off;
+
+ /* One sensor is in the thermal IC, the others are in the CPUs if any */
+ unsigned int cpu_nr;
};
-static void armadaxp_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+struct armada_drvdata {
+ enum drvtype {
+ LEGACY,
+ SYSCON
+ } type;
+ union {
+ struct armada_thermal_priv *priv;
+ struct thermal_zone_device *tz;
+ } data;
+};
+
+/*
+ * struct armada_thermal_sensor - hold the information of one thermal sensor
+ * @thermal: pointer to the local private structure
+ * @tzd: pointer to the thermal zone device
+ * @id: identifier of the thermal sensor
+ */
+struct armada_thermal_sensor {
+ struct armada_thermal_priv *priv;
+ int id;
+};
+
+static void armadaxp_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
+ struct armada_thermal_data *data = priv->data;
u32 reg;
- reg = readl_relaxed(priv->control1);
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
reg |= PMU_TDC0_OTF_CAL_MASK;
- writel(reg, priv->control1);
/* Reference calibration value */
reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
- writel(reg, priv->control1);
/* Reset the sensor */
- reg = readl_relaxed(priv->control1);
- writel((reg | PMU_TDC0_SW_RST_MASK), priv->control1);
+ reg |= PMU_TDC0_SW_RST_MASK;
- writel(reg, priv->control1);
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
/* Enable the sensor */
- reg = readl_relaxed(priv->status);
+ regmap_read(priv->syscon, data->syscon_status_off, &reg);
reg &= ~PMU_TM_DISABLE_MASK;
- writel(reg, priv->status);
+ regmap_write(priv->syscon, data->syscon_status_off, reg);
}
-static void armada370_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+static void armada370_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
+ struct armada_thermal_data *data = priv->data;
u32 reg;
- reg = readl_relaxed(priv->control1);
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
reg |= PMU_TDC0_OTF_CAL_MASK;
- writel(reg, priv->control1);
/* Reference calibration value */
reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
- writel(reg, priv->control1);
+ /* Reset the sensor */
reg &= ~PMU_TDC0_START_CAL_MASK;
- writel(reg, priv->control1);
+
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
msleep(10);
}
-static void armada375_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+static void armada375_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
+ struct armada_thermal_data *data = priv->data;
u32 reg;
- reg = readl(priv->control1);
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
reg &= ~(A375_UNIT_CONTROL_MASK << A375_UNIT_CONTROL_SHIFT);
reg &= ~A375_READOUT_INVERT;
reg &= ~A375_HW_RESETn;
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
- writel(reg, priv->control1);
msleep(20);
reg |= A375_HW_RESETn;
- writel(reg, priv->control1);
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
+
msleep(50);
}
-static void armada_wait_sensor_validity(struct armada_thermal_priv *priv)
+static int armada_wait_sensor_validity(struct armada_thermal_priv *priv)
{
u32 reg;
- readl_relaxed_poll_timeout(priv->status, reg,
- reg & priv->data->is_valid_bit,
- STATUS_POLL_PERIOD_US,
- STATUS_POLL_TIMEOUT_US);
+ return regmap_read_poll_timeout(priv->syscon,
+ priv->data->syscon_status_off, reg,
+ reg & priv->data->is_valid_bit,
+ STATUS_POLL_PERIOD_US,
+ STATUS_POLL_TIMEOUT_US);
}
-static void armada380_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+static void armada380_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
- u32 reg = readl_relaxed(priv->control1);
+ struct armada_thermal_data *data = priv->data;
+ u32 reg;
/* Disable the HW/SW reset */
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
reg |= CONTROL1_EXT_TSEN_HW_RESETn;
reg &= ~CONTROL1_EXT_TSEN_SW_RESET;
- writel(reg, priv->control1);
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
/* Set Tsen Tc Trim to correct default value (errata #132698) */
- if (priv->control0) {
- reg = readl_relaxed(priv->control0);
- reg &= ~CONTROL0_TSEN_TC_TRIM_MASK;
- reg |= CONTROL0_TSEN_TC_TRIM_VAL;
- writel(reg, priv->control0);
- }
-
- /* Wait the sensors to be valid or the core will warn the user */
- armada_wait_sensor_validity(priv);
+ regmap_read(priv->syscon, data->syscon_control0_off, &reg);
+ reg &= ~CONTROL0_TSEN_TC_TRIM_MASK;
+ reg |= CONTROL0_TSEN_TC_TRIM_VAL;
+ regmap_write(priv->syscon, data->syscon_control0_off, reg);
}
-static void armada_ap806_init_sensor(struct platform_device *pdev,
- struct armada_thermal_priv *priv)
+static void armada_ap806_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
{
+ struct armada_thermal_data *data = priv->data;
u32 reg;
- reg = readl_relaxed(priv->control0);
+ regmap_read(priv->syscon, data->syscon_control0_off, &reg);
reg &= ~CONTROL0_TSEN_RESET;
reg |= CONTROL0_TSEN_START | CONTROL0_TSEN_ENABLE;
- writel(reg, priv->control0);
- /* Wait the sensors to be valid or the core will warn the user */
- armada_wait_sensor_validity(priv);
+ /* Sample every ~2ms */
+ reg |= CONTROL0_TSEN_OSR_MAX << CONTROL0_TSEN_OSR_SHIFT;
+
+ /* Enable average (2 samples by default) */
+ reg &= ~CONTROL0_TSEN_AVG_BYPASS;
+
+ regmap_write(priv->syscon, data->syscon_control0_off, reg);
+}
+
+static void armada_cp110_init(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ struct armada_thermal_data *data = priv->data;
+ u32 reg;
+
+ armada380_init(pdev, priv);
+
+ /* Sample every ~2ms */
+ regmap_read(priv->syscon, data->syscon_control0_off, &reg);
+ reg |= CONTROL0_TSEN_OSR_MAX << CONTROL0_TSEN_OSR_SHIFT;
+ regmap_write(priv->syscon, data->syscon_control0_off, reg);
+
+ /* Average the output value over 2^1 = 2 samples */
+ regmap_read(priv->syscon, data->syscon_control1_off, &reg);
+ reg &= ~CONTROL1_TSEN_AVG_MASK << CONTROL1_TSEN_AVG_SHIFT;
+ reg |= 1 << CONTROL1_TSEN_AVG_SHIFT;
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
}
static bool armada_is_valid(struct armada_thermal_priv *priv)
{
- u32 reg = readl_relaxed(priv->status);
+ u32 reg;
+
+ if (!priv->data->is_valid_bit)
+ return true;
+
+ regmap_read(priv->syscon, priv->data->syscon_status_off, &reg);
return reg & priv->data->is_valid_bit;
}
-static int armada_get_temp(struct thermal_zone_device *thermal,
- int *temp)
+/* There is currently no board with more than one sensor per channel */
+static int armada_select_channel(struct armada_thermal_priv *priv, int channel)
{
- struct armada_thermal_priv *priv = thermal->devdata;
- u32 reg, div;
- s64 sample, b, m;
+ struct armada_thermal_data *data = priv->data;
+ u32 ctrl0;
+
+ if (channel < 0 || channel > priv->data->cpu_nr)
+ return -EINVAL;
+
+ if (priv->current_channel == channel)
+ return 0;
+
+ /* Stop the measurements */
+ regmap_read(priv->syscon, data->syscon_control0_off, &ctrl0);
+ ctrl0 &= ~CONTROL0_TSEN_START;
+ regmap_write(priv->syscon, data->syscon_control0_off, ctrl0);
+
+ /* Reset the mode, internal sensor will be automatically selected */
+ ctrl0 &= ~(CONTROL0_TSEN_MODE_MASK << CONTROL0_TSEN_MODE_SHIFT);
+
+ /* Other channels are external and should be selected accordingly */
+ if (channel) {
+ /* Change the mode to external */
+ ctrl0 |= CONTROL0_TSEN_MODE_EXTERNAL <<
+ CONTROL0_TSEN_MODE_SHIFT;
+ /* Select the sensor */
+ ctrl0 &= ~(CONTROL0_TSEN_CHAN_MASK << CONTROL0_TSEN_CHAN_SHIFT);
+ ctrl0 |= (channel - 1) << CONTROL0_TSEN_CHAN_SHIFT;
+ }
- /* Valid check */
- if (priv->data->is_valid && !priv->data->is_valid(priv)) {
- dev_err(&thermal->device,
+ /* Actually set the mode/channel */
+ regmap_write(priv->syscon, data->syscon_control0_off, ctrl0);
+ priv->current_channel = channel;
+
+ /* Re-start the measurements */
+ ctrl0 |= CONTROL0_TSEN_START;
+ regmap_write(priv->syscon, data->syscon_control0_off, ctrl0);
+
+ /*
+ * The IP has a latency of ~15ms, so after updating the selected source,
+ * we must absolutely wait for the sensor validity bit to ensure we read
+ * actual data.
+ */
+ if (armada_wait_sensor_validity(priv)) {
+ dev_err(priv->dev,
"Temperature sensor reading not valid\n");
return -EIO;
}
- reg = readl_relaxed(priv->status);
+ return 0;
+}
+
+static int armada_read_sensor(struct armada_thermal_priv *priv, int *temp)
+{
+ u32 reg, div;
+ s64 sample, b, m;
+
+ regmap_read(priv->syscon, priv->data->syscon_status_off, &reg);
reg = (reg >> priv->data->temp_shift) & priv->data->temp_mask;
if (priv->data->signed_sample)
/* The most significant bit is the sign bit */
@@ -247,45 +350,93 @@ static int armada_get_temp(struct thermal_zone_device *thermal,
return 0;
}
-static struct thermal_zone_device_ops ops = {
+static int armada_get_temp_legacy(struct thermal_zone_device *thermal,
+ int *temp)
+{
+ struct armada_thermal_priv *priv = thermal->devdata;
+ int ret;
+
+ /* Valid check */
+ if (armada_is_valid(priv)) {
+ dev_err(priv->dev,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ /* Do the actual reading */
+ ret = armada_read_sensor(priv, temp);
+
+ return ret;
+}
+
+static struct thermal_zone_device_ops legacy_ops = {
+ .get_temp = armada_get_temp_legacy,
+};
+
+static int armada_get_temp(void *_sensor, int *temp)
+{
+ struct armada_thermal_sensor *sensor = _sensor;
+ struct armada_thermal_priv *priv = sensor->priv;
+ int ret;
+
+ mutex_lock(&priv->update_lock);
+
+ /* Select the desired channel */
+ ret = armada_select_channel(priv, sensor->id);
+ if (ret)
+ goto unlock_mutex;
+
+ /* Do the actual reading */
+ ret = armada_read_sensor(priv, temp);
+
+unlock_mutex:
+ mutex_unlock(&priv->update_lock);
+
+ return ret;
+}
+
+static struct thermal_zone_of_device_ops of_ops = {
.get_temp = armada_get_temp,
};
static const struct armada_thermal_data armadaxp_data = {
- .init_sensor = armadaxp_init_sensor,
+ .init = armadaxp_init,
.temp_shift = 10,
.temp_mask = 0x1ff,
.coef_b = 3153000000ULL,
.coef_m = 10000000ULL,
.coef_div = 13825,
+ .syscon_status_off = 0xb0,
+ .syscon_control1_off = 0xd0,
};
static const struct armada_thermal_data armada370_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada370_init_sensor,
+ .init = armada370_init,
.is_valid_bit = BIT(9),
.temp_shift = 10,
.temp_mask = 0x1ff,
.coef_b = 3153000000ULL,
.coef_m = 10000000ULL,
.coef_div = 13825,
+ .syscon_status_off = 0x0,
+ .syscon_control1_off = 0x4,
};
static const struct armada_thermal_data armada375_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada375_init_sensor,
+ .init = armada375_init,
.is_valid_bit = BIT(10),
.temp_shift = 0,
.temp_mask = 0x1ff,
.coef_b = 3171900000ULL,
.coef_m = 10000000ULL,
.coef_div = 13616,
- .needs_control0 = true,
+ .syscon_status_off = 0x78,
+ .syscon_control0_off = 0x7c,
+ .syscon_control1_off = 0x80,
};
static const struct armada_thermal_data armada380_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada380_init_sensor,
+ .init = armada380_init,
.is_valid_bit = BIT(10),
.temp_shift = 0,
.temp_mask = 0x3ff,
@@ -293,11 +444,13 @@ static const struct armada_thermal_data armada380_data = {
.coef_m = 2000096ULL,
.coef_div = 4201,
.inverted = true,
+ .syscon_control0_off = 0x70,
+ .syscon_control1_off = 0x74,
+ .syscon_status_off = 0x78,
};
static const struct armada_thermal_data armada_ap806_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada_ap806_init_sensor,
+ .init = armada_ap806_init,
.is_valid_bit = BIT(16),
.temp_shift = 0,
.temp_mask = 0x3ff,
@@ -306,12 +459,14 @@ static const struct armada_thermal_data armada_ap806_data = {
.coef_div = 1,
.inverted = true,
.signed_sample = true,
- .needs_control0 = true,
+ .syscon_control0_off = 0x84,
+ .syscon_control1_off = 0x88,
+ .syscon_status_off = 0x8C,
+ .cpu_nr = 4,
};
static const struct armada_thermal_data armada_cp110_data = {
- .is_valid = armada_is_valid,
- .init_sensor = armada380_init_sensor,
+ .init = armada_cp110_init,
.is_valid_bit = BIT(10),
.temp_shift = 0,
.temp_mask = 0x3ff,
@@ -319,7 +474,9 @@ static const struct armada_thermal_data armada_cp110_data = {
.coef_m = 2000096ULL,
.coef_div = 4201,
.inverted = true,
- .needs_control0 = true,
+ .syscon_control0_off = 0x70,
+ .syscon_control1_off = 0x74,
+ .syscon_status_off = 0x78,
};
static const struct of_device_id armada_thermal_id_table[] = {
@@ -353,13 +510,97 @@ static const struct of_device_id armada_thermal_id_table[] = {
};
MODULE_DEVICE_TABLE(of, armada_thermal_id_table);
+static const struct regmap_config armada_thermal_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static int armada_thermal_probe_legacy(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ struct armada_thermal_data *data = priv->data;
+ struct resource *res;
+ void __iomem *base;
+
+ /* First memory region points towards the status register */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ /*
+ * Edit the resource start address and length to map over all the
+ * registers, instead of pointing at them one by one.
+ */
+ res->start -= data->syscon_status_off;
+ res->end = res->start + max(data->syscon_status_off,
+ max(data->syscon_control0_off,
+ data->syscon_control1_off)) +
+ sizeof(unsigned int) - 1;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->syscon = devm_regmap_init_mmio(&pdev->dev, base,
+ &armada_thermal_regmap_config);
+ if (IS_ERR(priv->syscon))
+ return PTR_ERR(priv->syscon);
+
+ return 0;
+}
+
+static int armada_thermal_probe_syscon(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ priv->syscon = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(priv->syscon))
+ return PTR_ERR(priv->syscon);
+
+ return 0;
+}
+
+static void armada_set_sane_name(struct platform_device *pdev,
+ struct armada_thermal_priv *priv)
+{
+ const char *name = dev_name(&pdev->dev);
+ char *insane_char;
+
+ if (strlen(name) > THERMAL_NAME_LENGTH) {
+ /*
+ * When inside a system controller, the device name has the
+ * form: f06f8000.system-controller:ap-thermal so stripping
+ * after the ':' should give us a shorter but meaningful name.
+ */
+ name = strrchr(name, ':');
+ if (!name)
+ name = "armada_thermal";
+ else
+ name++;
+ }
+
+ /* Save the name locally */
+ strncpy(priv->zone_name, name, THERMAL_NAME_LENGTH - 1);
+ priv->zone_name[THERMAL_NAME_LENGTH - 1] = '\0';
+
+ /* Then check there are no '-' or hwmon core will complain */
+ do {
+ insane_char = strpbrk(priv->zone_name, "-");
+ if (insane_char)
+ *insane_char = '_';
+ } while (insane_char);
+}
+
static int armada_thermal_probe(struct platform_device *pdev)
{
- void __iomem *control = NULL;
- struct thermal_zone_device *thermal;
+ struct thermal_zone_device *tz;
+ struct armada_thermal_sensor *sensor;
+ struct armada_drvdata *drvdata;
const struct of_device_id *match;
struct armada_thermal_priv *priv;
- struct resource *res;
+ int sensor_id;
+ int ret;
match = of_match_device(armada_thermal_id_table, &pdev->dev);
if (!match)
@@ -369,58 +610,99 @@ static int armada_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->status = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->status))
- return PTR_ERR(priv->status);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- control = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(control))
- return PTR_ERR(control);
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ priv->dev = &pdev->dev;
priv->data = (struct armada_thermal_data *)match->data;
+ mutex_init(&priv->update_lock);
+
/*
* Legacy DT bindings only described "control1" register (also referred
- * as "control MSB" on old documentation). New bindings cover
+ * as "control MSB" on old documentation). Then, bindings moved to cover
* "control0/control LSB" and "control1/control MSB" registers within
- * the same resource, which is then of size 8 instead of 4.
+ * the same resource, which was then of size 8 instead of 4.
+ *
+ * The logic of defining sporadic registers is broken. For instance, it
+ * blocked the addition of the overheat interrupt feature that needed
+ * another resource somewhere else in the same memory area. One solution
+ * is to define an overall system controller and put the thermal node
+ * into it, which requires the use of regmaps across all the driver.
*/
- if (resource_size(res) == LEGACY_CONTROL_MEM_LEN) {
- /* ->control0 unavailable in this configuration */
- if (priv->data->needs_control0) {
- dev_err(&pdev->dev, "No access to control0 register\n");
- return -EINVAL;
+ if (IS_ERR(syscon_node_to_regmap(pdev->dev.parent->of_node))) {
+ /* Ensure device name is correct for the thermal core */
+ armada_set_sane_name(pdev, priv);
+
+ ret = armada_thermal_probe_legacy(pdev, priv);
+ if (ret)
+ return ret;
+
+ priv->data->init(pdev, priv);
+
+ /* Wait the sensors to be valid */
+ armada_wait_sensor_validity(priv);
+
+ tz = thermal_zone_device_register(priv->zone_name, 0, 0, priv,
+ &legacy_ops, NULL, 0, 0);
+ if (IS_ERR(tz)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(tz);
}
- priv->control1 = control + LEGACY_CONTROL1_OFFSET;
- } else {
- priv->control0 = control + CONTROL0_OFFSET;
- priv->control1 = control + CONTROL1_OFFSET;
+ drvdata->type = LEGACY;
+ drvdata->data.tz = tz;
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0;
}
- priv->data->init_sensor(pdev, priv);
+ ret = armada_thermal_probe_syscon(pdev, priv);
+ if (ret)
+ return ret;
- thermal = thermal_zone_device_register(dev_name(&pdev->dev), 0, 0, priv,
- &ops, NULL, 0, 0);
- if (IS_ERR(thermal)) {
- dev_err(&pdev->dev,
- "Failed to register thermal zone device\n");
- return PTR_ERR(thermal);
- }
+ priv->current_channel = -1;
+ priv->data->init(pdev, priv);
+ drvdata->type = SYSCON;
+ drvdata->data.priv = priv;
+ platform_set_drvdata(pdev, drvdata);
- platform_set_drvdata(pdev, thermal);
+ /*
+ * There is one channel for the IC and one per CPU (if any), each
+ * channel has one sensor.
+ */
+ for (sensor_id = 0; sensor_id <= priv->data->cpu_nr; sensor_id++) {
+ sensor = devm_kzalloc(&pdev->dev,
+ sizeof(struct armada_thermal_sensor),
+ GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ /* Register the sensor */
+ sensor->priv = priv;
+ sensor->id = sensor_id;
+ tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ sensor->id, sensor,
+ &of_ops);
+ if (IS_ERR(tz)) {
+ dev_info(&pdev->dev, "Thermal sensor %d unavailable\n",
+ sensor_id);
+ devm_kfree(&pdev->dev, sensor);
+ continue;
+ }
+ }
return 0;
}
static int armada_thermal_exit(struct platform_device *pdev)
{
- struct thermal_zone_device *armada_thermal =
- platform_get_drvdata(pdev);
+ struct armada_drvdata *drvdata = platform_get_drvdata(pdev);
- thermal_zone_device_unregister(armada_thermal);
+ if (drvdata->type == LEGACY)
+ thermal_zone_device_unregister(drvdata->data.tz);
return 0;
}
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 334d98be03b9..aa452acb60b6 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -3,6 +3,7 @@
// Copyright 2013 Freescale Semiconductor, Inc.
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/delay.h>
@@ -604,7 +605,10 @@ static int imx_init_from_nvmem_cells(struct platform_device *pdev)
ret = nvmem_cell_read_u32(&pdev->dev, "calib", &val);
if (ret)
return ret;
- imx_init_calib(pdev, val);
+
+ ret = imx_init_calib(pdev, val);
+ if (ret)
+ return ret;
ret = nvmem_cell_read_u32(&pdev->dev, "temp_grade", &val);
if (ret)
@@ -644,6 +648,27 @@ static const struct of_device_id of_imx_thermal_match[] = {
};
MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
+/*
+ * Create cooling device in case no #cooling-cells property is available in
+ * CPU node
+ */
+static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
+{
+ struct device_node *np = of_get_cpu_node(data->policy->cpu, NULL);
+ int ret;
+
+ if (!np || !of_find_property(np, "#cooling-cells", NULL)) {
+ data->cdev = cpufreq_cooling_register(data->policy);
+ if (IS_ERR(data->cdev)) {
+ ret = PTR_ERR(data->cdev);
+ cpufreq_cpu_put(data->policy);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int imx_thermal_probe(struct platform_device *pdev)
{
struct imx_thermal_data *data;
@@ -724,12 +749,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- data->cdev = cpufreq_cooling_register(data->policy);
- if (IS_ERR(data->cdev)) {
- ret = PTR_ERR(data->cdev);
+ ret = imx_thermal_register_legacy_cooling(data);
+ if (ret) {
dev_err(&pdev->dev,
"failed to register cpufreq cooling device: %d\n", ret);
- cpufreq_cpu_put(data->policy);
return ret;
}
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 2cc2193637e7..a821929ede0b 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
-qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
+qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-v2.o
diff --git a/drivers/thermal/qcom/tsens-8996.c b/drivers/thermal/qcom/tsens-8996.c
deleted file mode 100644
index e1f77818d8fa..000000000000
--- a/drivers/thermal/qcom/tsens-8996.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include "tsens.h"
-
-#define STATUS_OFFSET 0x10a0
-#define LAST_TEMP_MASK 0xfff
-#define STATUS_VALID_BIT BIT(21)
-#define CODE_SIGN_BIT BIT(11)
-
-static int get_temp_8996(struct tsens_device *tmdev, int id, int *temp)
-{
- struct tsens_sensor *s = &tmdev->sensor[id];
- u32 code;
- unsigned int sensor_addr;
- int last_temp = 0, last_temp2 = 0, last_temp3 = 0, ret;
-
- sensor_addr = STATUS_OFFSET + s->hw_id * 4;
- ret = regmap_read(tmdev->map, sensor_addr, &code);
- if (ret)
- return ret;
- last_temp = code & LAST_TEMP_MASK;
- if (code & STATUS_VALID_BIT)
- goto done;
-
- /* Try a second time */
- ret = regmap_read(tmdev->map, sensor_addr, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp2 = code & LAST_TEMP_MASK;
- }
-
- /* Try a third/last time */
- ret = regmap_read(tmdev->map, sensor_addr, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp3 = code & LAST_TEMP_MASK;
- }
-
- if (last_temp == last_temp2)
- last_temp = last_temp2;
- else if (last_temp2 == last_temp3)
- last_temp = last_temp3;
-done:
- /* Code sign bit is the sign extension for a negative value */
- if (last_temp & CODE_SIGN_BIT)
- last_temp |= ~CODE_SIGN_BIT;
-
- /* Temperatures are in deciCelicius */
- *temp = last_temp * 100;
-
- return 0;
-}
-
-static const struct tsens_ops ops_8996 = {
- .init = init_common,
- .get_temp = get_temp_8996,
-};
-
-const struct tsens_data data_8996 = {
- .num_sensors = 13,
- .ops = &ops_8996,
-};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index b1449ad67fc0..6207d8d92351 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/nvmem-consumer.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "tsens.h"
@@ -103,11 +104,11 @@ int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
{
struct tsens_sensor *s = &tmdev->sensor[id];
u32 code;
- unsigned int sensor_addr;
+ unsigned int status_reg;
int last_temp = 0, ret;
- sensor_addr = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
- ret = regmap_read(tmdev->map, sensor_addr, &code);
+ status_reg = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
+ ret = regmap_read(tmdev->map, status_reg, &code);
if (ret)
return ret;
last_temp = code & SN_ST_TEMP_MASK;
@@ -126,16 +127,28 @@ static const struct regmap_config tsens_config = {
int __init init_common(struct tsens_device *tmdev)
{
void __iomem *base;
+ struct resource *res;
+ struct platform_device *op = of_find_device_by_node(tmdev->dev->of_node);
- base = of_iomap(tmdev->dev->of_node, 0);
- if (!base)
+ if (!op)
return -EINVAL;
+ /* The driver only uses the TM register address space for now */
+ if (op->num_resources > 1) {
+ tmdev->tm_offset = 0;
+ } else {
+ /* old DTs where SROT and TM were in a contiguous 2K block */
+ tmdev->tm_offset = 0x1000;
+ }
+
+ res = platform_get_resource(op, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&op->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config);
- if (IS_ERR(tmdev->map)) {
- iounmap(base);
+ if (IS_ERR(tmdev->map))
return PTR_ERR(tmdev->map);
- }
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
new file mode 100644
index 000000000000..44da02f594ac
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, Linaro Limited
+ */
+
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include "tsens.h"
+
+#define STATUS_OFFSET 0xa0
+#define LAST_TEMP_MASK 0xfff
+#define STATUS_VALID_BIT BIT(21)
+
+static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
+{
+ struct tsens_sensor *s = &tmdev->sensor[id];
+ u32 code;
+ unsigned int status_reg;
+ u32 last_temp = 0, last_temp2 = 0, last_temp3 = 0;
+ int ret;
+
+ status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * 4;
+ ret = regmap_read(tmdev->map, status_reg, &code);
+ if (ret)
+ return ret;
+ last_temp = code & LAST_TEMP_MASK;
+ if (code & STATUS_VALID_BIT)
+ goto done;
+
+ /* Try a second time */
+ ret = regmap_read(tmdev->map, status_reg, &code);
+ if (ret)
+ return ret;
+ if (code & STATUS_VALID_BIT) {
+ last_temp = code & LAST_TEMP_MASK;
+ goto done;
+ } else {
+ last_temp2 = code & LAST_TEMP_MASK;
+ }
+
+ /* Try a third/last time */
+ ret = regmap_read(tmdev->map, status_reg, &code);
+ if (ret)
+ return ret;
+ if (code & STATUS_VALID_BIT) {
+ last_temp = code & LAST_TEMP_MASK;
+ goto done;
+ } else {
+ last_temp3 = code & LAST_TEMP_MASK;
+ }
+
+ if (last_temp == last_temp2)
+ last_temp = last_temp2;
+ else if (last_temp2 == last_temp3)
+ last_temp = last_temp3;
+done:
+ /* Convert temperature from deciCelsius to milliCelsius */
+ *temp = sign_extend32(last_temp, fls(LAST_TEMP_MASK) - 1) * 100;
+
+ return 0;
+}
+
+static const struct tsens_ops ops_generic_v2 = {
+ .init = init_common,
+ .get_temp = get_temp_tsens_v2,
+};
+
+const struct tsens_data data_tsens_v2 = {
+ .ops = &ops_generic_v2,
+};
+
+/* Kept around for backward compatibility with old msm8996.dtsi */
+const struct tsens_data data_8996 = {
+ .num_sensors = 13,
+ .ops = &ops_generic_v2,
+};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 3440166c2ae9..a2c9bfae3d86 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -72,6 +72,9 @@ static const struct of_device_id tsens_table[] = {
}, {
.compatible = "qcom,msm8996-tsens",
.data = &data_8996,
+ }, {
+ .compatible = "qcom,tsens-v2",
+ .data = &data_tsens_v2,
},
{}
};
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 911c1978892b..14331eb45a86 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -77,9 +77,8 @@ struct tsens_device {
struct device *dev;
u32 num_sensors;
struct regmap *map;
- struct regmap_field *status_field;
+ u32 tm_offset;
struct tsens_context ctx;
- bool trdy;
const struct tsens_ops *ops;
struct tsens_sensor sensor[0];
};
@@ -89,6 +88,9 @@ void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32);
int init_common(struct tsens_device *);
int get_temp_common(struct tsens_device *, int, int *);
-extern const struct tsens_data data_8916, data_8974, data_8960, data_8996;
+/* TSENS v1 targets */
+extern const struct tsens_data data_8916, data_8974, data_8960;
+/* TSENS v2 targets */
+extern const struct tsens_data data_8996, data_tsens_v2;
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 45fb284d4c11..e77e63070e99 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -598,7 +598,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
enr_bits |= 3 << (i * 8);
}
- if (enr_bits)
+ if (common->base && enr_bits)
rcar_thermal_common_write(common, ENR, enr_bits);
dev_info(dev, "%d sensor probed\n", i);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index a992e51ef065..48eef552cba4 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -789,11 +789,6 @@ static void exynos_tmu_work(struct work_struct *work)
struct exynos_tmu_data *data = container_of(work,
struct exynos_tmu_data, irq_work);
- if (!IS_ERR(data->clk_sec))
- clk_enable(data->clk_sec);
- if (!IS_ERR(data->clk_sec))
- clk_disable(data->clk_sec);
-
thermal_zone_device_update(data->tzd, THERMAL_EVENT_UNSPECIFIED);
mutex_lock(&data->lock);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 11278836ed12..40c69a533b24 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -142,7 +142,8 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
INIT_LIST_HEAD(&hwmon->tz_list);
strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
- hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type,
+ strreplace(hwmon->type, '-', '_');
+ hwmon->device = hwmon_device_register_with_info(&tz->device, hwmon->type,
hwmon, NULL, NULL);
if (IS_ERR(hwmon->device)) {
result = PTR_ERR(hwmon->device);
diff --git a/drivers/thermal/ti-soc-thermal/dra752-bandgap.h b/drivers/thermal/ti-soc-thermal/dra752-bandgap.h
index a31e4b5e82cd..9490cd63fa6a 100644
--- a/drivers/thermal/ti-soc-thermal/dra752-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/dra752-bandgap.h
@@ -54,56 +54,36 @@
#define DRA752_STD_FUSE_OPP_BGAP_CORE_OFFSET 0x8
#define DRA752_TEMP_SENSOR_CORE_OFFSET 0x154
#define DRA752_BANDGAP_THRESHOLD_CORE_OFFSET 0x1ac
-#define DRA752_BANDGAP_CUMUL_DTEMP_CORE_OFFSET 0x1c4
-#define DRA752_DTEMP_CORE_0_OFFSET 0x208
#define DRA752_DTEMP_CORE_1_OFFSET 0x20c
#define DRA752_DTEMP_CORE_2_OFFSET 0x210
-#define DRA752_DTEMP_CORE_3_OFFSET 0x214
-#define DRA752_DTEMP_CORE_4_OFFSET 0x218
/* DRA752.iva register offsets */
#define DRA752_STD_FUSE_OPP_BGAP_IVA_OFFSET 0x388
#define DRA752_TEMP_SENSOR_IVA_OFFSET 0x398
#define DRA752_BANDGAP_THRESHOLD_IVA_OFFSET 0x3a4
-#define DRA752_BANDGAP_CUMUL_DTEMP_IVA_OFFSET 0x3b4
-#define DRA752_DTEMP_IVA_0_OFFSET 0x3d0
#define DRA752_DTEMP_IVA_1_OFFSET 0x3d4
#define DRA752_DTEMP_IVA_2_OFFSET 0x3d8
-#define DRA752_DTEMP_IVA_3_OFFSET 0x3dc
-#define DRA752_DTEMP_IVA_4_OFFSET 0x3e0
/* DRA752.mpu register offsets */
#define DRA752_STD_FUSE_OPP_BGAP_MPU_OFFSET 0x4
#define DRA752_TEMP_SENSOR_MPU_OFFSET 0x14c
#define DRA752_BANDGAP_THRESHOLD_MPU_OFFSET 0x1a4
-#define DRA752_BANDGAP_CUMUL_DTEMP_MPU_OFFSET 0x1bc
-#define DRA752_DTEMP_MPU_0_OFFSET 0x1e0
#define DRA752_DTEMP_MPU_1_OFFSET 0x1e4
#define DRA752_DTEMP_MPU_2_OFFSET 0x1e8
-#define DRA752_DTEMP_MPU_3_OFFSET 0x1ec
-#define DRA752_DTEMP_MPU_4_OFFSET 0x1f0
/* DRA752.dspeve register offsets */
#define DRA752_STD_FUSE_OPP_BGAP_DSPEVE_OFFSET 0x384
#define DRA752_TEMP_SENSOR_DSPEVE_OFFSET 0x394
#define DRA752_BANDGAP_THRESHOLD_DSPEVE_OFFSET 0x3a0
-#define DRA752_BANDGAP_CUMUL_DTEMP_DSPEVE_OFFSET 0x3b0
-#define DRA752_DTEMP_DSPEVE_0_OFFSET 0x3bc
#define DRA752_DTEMP_DSPEVE_1_OFFSET 0x3c0
#define DRA752_DTEMP_DSPEVE_2_OFFSET 0x3c4
-#define DRA752_DTEMP_DSPEVE_3_OFFSET 0x3c8
-#define DRA752_DTEMP_DSPEVE_4_OFFSET 0x3cc
/* DRA752.gpu register offsets */
#define DRA752_STD_FUSE_OPP_BGAP_GPU_OFFSET 0x0
#define DRA752_TEMP_SENSOR_GPU_OFFSET 0x150
#define DRA752_BANDGAP_THRESHOLD_GPU_OFFSET 0x1a8
-#define DRA752_BANDGAP_CUMUL_DTEMP_GPU_OFFSET 0x1c0
-#define DRA752_DTEMP_GPU_0_OFFSET 0x1f4
#define DRA752_DTEMP_GPU_1_OFFSET 0x1f8
#define DRA752_DTEMP_GPU_2_OFFSET 0x1fc
-#define DRA752_DTEMP_GPU_3_OFFSET 0x200
-#define DRA752_DTEMP_GPU_4_OFFSET 0x204
/**
* Register bitfields for DRA752
@@ -114,7 +94,6 @@
*/
/* DRA752.BANDGAP_STATUS_1 */
-#define DRA752_BANDGAP_STATUS_1_ALERT_MASK BIT(31)
#define DRA752_BANDGAP_STATUS_1_HOT_CORE_MASK BIT(5)
#define DRA752_BANDGAP_STATUS_1_COLD_CORE_MASK BIT(4)
#define DRA752_BANDGAP_STATUS_1_HOT_GPU_MASK BIT(3)
@@ -125,10 +104,6 @@
/* DRA752.BANDGAP_CTRL_2 */
#define DRA752_BANDGAP_CTRL_2_FREEZE_IVA_MASK BIT(22)
#define DRA752_BANDGAP_CTRL_2_FREEZE_DSPEVE_MASK BIT(21)
-#define DRA752_BANDGAP_CTRL_2_CLEAR_IVA_MASK BIT(19)
-#define DRA752_BANDGAP_CTRL_2_CLEAR_DSPEVE_MASK BIT(18)
-#define DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_IVA_MASK BIT(16)
-#define DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_DSPEVE_MASK BIT(15)
#define DRA752_BANDGAP_CTRL_2_MASK_HOT_IVA_MASK BIT(3)
#define DRA752_BANDGAP_CTRL_2_MASK_COLD_IVA_MASK BIT(2)
#define DRA752_BANDGAP_CTRL_2_MASK_HOT_DSPEVE_MASK BIT(1)
@@ -141,17 +116,10 @@
#define DRA752_BANDGAP_STATUS_2_COLD_DSPEVE_MASK BIT(0)
/* DRA752.BANDGAP_CTRL_1 */
-#define DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK (0x3 << 30)
#define DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK (0x7 << 27)
#define DRA752_BANDGAP_CTRL_1_FREEZE_CORE_MASK BIT(23)
#define DRA752_BANDGAP_CTRL_1_FREEZE_GPU_MASK BIT(22)
#define DRA752_BANDGAP_CTRL_1_FREEZE_MPU_MASK BIT(21)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_CORE_MASK BIT(20)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_GPU_MASK BIT(19)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_MPU_MASK BIT(18)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_CORE_MASK BIT(17)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_GPU_MASK BIT(16)
-#define DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_MPU_MASK BIT(15)
#define DRA752_BANDGAP_CTRL_1_MASK_HOT_CORE_MASK BIT(5)
#define DRA752_BANDGAP_CTRL_1_MASK_COLD_CORE_MASK BIT(4)
#define DRA752_BANDGAP_CTRL_1_MASK_HOT_GPU_MASK BIT(3)
@@ -168,22 +136,6 @@
#define DRA752_BANDGAP_THRESHOLD_HOT_MASK (0x3ff << 16)
#define DRA752_BANDGAP_THRESHOLD_COLD_MASK (0x3ff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_CORE */
-#define DRA752_BANDGAP_CUMUL_DTEMP_CORE_MASK (0xffffffff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_IVA */
-#define DRA752_BANDGAP_CUMUL_DTEMP_IVA_MASK (0xffffffff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_MPU */
-#define DRA752_BANDGAP_CUMUL_DTEMP_MPU_MASK (0xffffffff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_DSPEVE */
-#define DRA752_BANDGAP_CUMUL_DTEMP_DSPEVE_MASK (0xffffffff << 0)
-
-/* DRA752.BANDGAP_CUMUL_DTEMP_GPU */
-#define DRA752_BANDGAP_CUMUL_DTEMP_GPU_MASK (0xffffffff << 0)
-
/**
* Temperature limits and thresholds for DRA752
*
@@ -202,10 +154,6 @@
/* bandgap clock limits */
#define DRA752_GPU_MAX_FREQ 1500000
#define DRA752_GPU_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_GPU_MIN_TEMP -40000
-#define DRA752_GPU_MAX_TEMP 125000
-#define DRA752_GPU_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_GPU_T_HOT 800
#define DRA752_GPU_T_COLD 795
@@ -214,10 +162,6 @@
/* bandgap clock limits */
#define DRA752_MPU_MAX_FREQ 1500000
#define DRA752_MPU_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_MPU_MIN_TEMP -40000
-#define DRA752_MPU_MAX_TEMP 125000
-#define DRA752_MPU_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_MPU_T_HOT 800
#define DRA752_MPU_T_COLD 795
@@ -226,10 +170,6 @@
/* bandgap clock limits */
#define DRA752_CORE_MAX_FREQ 1500000
#define DRA752_CORE_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_CORE_MIN_TEMP -40000
-#define DRA752_CORE_MAX_TEMP 125000
-#define DRA752_CORE_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_CORE_T_HOT 800
#define DRA752_CORE_T_COLD 795
@@ -238,10 +178,6 @@
/* bandgap clock limits */
#define DRA752_DSPEVE_MAX_FREQ 1500000
#define DRA752_DSPEVE_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_DSPEVE_MIN_TEMP -40000
-#define DRA752_DSPEVE_MAX_TEMP 125000
-#define DRA752_DSPEVE_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_DSPEVE_T_HOT 800
#define DRA752_DSPEVE_T_COLD 795
@@ -250,10 +186,6 @@
/* bandgap clock limits */
#define DRA752_IVA_MAX_FREQ 1500000
#define DRA752_IVA_MIN_FREQ 1000000
-/* sensor limits */
-#define DRA752_IVA_MIN_TEMP -40000
-#define DRA752_IVA_MAX_TEMP 125000
-#define DRA752_IVA_HYST_VAL 5000
/* interrupts thresholds */
#define DRA752_IVA_T_HOT 800
#define DRA752_IVA_T_COLD 795
diff --git a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
index 4167373327d9..33a3030aa3c0 100644
--- a/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
@@ -41,24 +41,16 @@ dra752_core_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_1_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_CORE_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_CORE_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_CORE_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_1_CLEAR_CORE_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_CORE_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_CORE_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_CORE_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_1_COLD_CORE_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_CORE_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_CORE_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_CORE_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_CORE_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_CORE_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_CORE_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_CORE_OFFSET,
};
@@ -74,24 +66,16 @@ dra752_iva_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_2_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_2_MASK_HOT_IVA_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_2_MASK_COLD_IVA_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_2_FREEZE_IVA_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_2_CLEAR_IVA_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_IVA_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_IVA_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_IVA_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_2_COLD_IVA_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_IVA_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_IVA_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_IVA_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_IVA_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_IVA_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_IVA_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_IVA_OFFSET,
};
@@ -107,24 +91,16 @@ dra752_mpu_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_1_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_MPU_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_MPU_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_MPU_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_1_CLEAR_MPU_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_MPU_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_MPU_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_MPU_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_1_COLD_MPU_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_MPU_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_MPU_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_MPU_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_MPU_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_MPU_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_MPU_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_MPU_OFFSET,
};
@@ -140,24 +116,16 @@ dra752_dspeve_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_2_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_2_MASK_HOT_DSPEVE_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_2_MASK_COLD_DSPEVE_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_2_FREEZE_DSPEVE_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_2_CLEAR_DSPEVE_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_2_CLEAR_ACCUM_DSPEVE_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_DSPEVE_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_2_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_2_HOT_DSPEVE_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_2_COLD_DSPEVE_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_DSPEVE_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_DSPEVE_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_DSPEVE_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_DSPEVE_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_DSPEVE_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_DSPEVE_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_DSPEVE_OFFSET,
};
@@ -173,24 +141,16 @@ dra752_gpu_temp_sensor_registers = {
.bgap_mask_ctrl = DRA752_BANDGAP_CTRL_1_OFFSET,
.mask_hot_mask = DRA752_BANDGAP_CTRL_1_MASK_HOT_GPU_MASK,
.mask_cold_mask = DRA752_BANDGAP_CTRL_1_MASK_COLD_GPU_MASK,
- .mask_sidlemode_mask = DRA752_BANDGAP_CTRL_1_SIDLEMODE_MASK,
.mask_counter_delay_mask = DRA752_BANDGAP_CTRL_1_COUNTER_DELAY_MASK,
.mask_freeze_mask = DRA752_BANDGAP_CTRL_1_FREEZE_GPU_MASK,
- .mask_clear_mask = DRA752_BANDGAP_CTRL_1_CLEAR_GPU_MASK,
- .mask_clear_accum_mask = DRA752_BANDGAP_CTRL_1_CLEAR_ACCUM_GPU_MASK,
.bgap_threshold = DRA752_BANDGAP_THRESHOLD_GPU_OFFSET,
.threshold_thot_mask = DRA752_BANDGAP_THRESHOLD_HOT_MASK,
.threshold_tcold_mask = DRA752_BANDGAP_THRESHOLD_COLD_MASK,
.bgap_status = DRA752_BANDGAP_STATUS_1_OFFSET,
- .status_bgap_alert_mask = DRA752_BANDGAP_STATUS_1_ALERT_MASK,
.status_hot_mask = DRA752_BANDGAP_STATUS_1_HOT_GPU_MASK,
.status_cold_mask = DRA752_BANDGAP_STATUS_1_COLD_GPU_MASK,
- .bgap_cumul_dtemp = DRA752_BANDGAP_CUMUL_DTEMP_GPU_OFFSET,
- .ctrl_dtemp_0 = DRA752_DTEMP_GPU_0_OFFSET,
.ctrl_dtemp_1 = DRA752_DTEMP_GPU_1_OFFSET,
.ctrl_dtemp_2 = DRA752_DTEMP_GPU_2_OFFSET,
- .ctrl_dtemp_3 = DRA752_DTEMP_GPU_3_OFFSET,
- .ctrl_dtemp_4 = DRA752_DTEMP_GPU_4_OFFSET,
.bgap_efuse = DRA752_STD_FUSE_OPP_BGAP_GPU_OFFSET,
};
@@ -200,11 +160,6 @@ static struct temp_sensor_data dra752_mpu_temp_sensor_data = {
.t_cold = DRA752_MPU_T_COLD,
.min_freq = DRA752_MPU_MIN_FREQ,
.max_freq = DRA752_MPU_MAX_FREQ,
- .max_temp = DRA752_MPU_MAX_TEMP,
- .min_temp = DRA752_MPU_MIN_TEMP,
- .hyst_val = DRA752_MPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for DRA752 GPU temperature sensor */
@@ -213,11 +168,6 @@ static struct temp_sensor_data dra752_gpu_temp_sensor_data = {
.t_cold = DRA752_GPU_T_COLD,
.min_freq = DRA752_GPU_MIN_FREQ,
.max_freq = DRA752_GPU_MAX_FREQ,
- .max_temp = DRA752_GPU_MAX_TEMP,
- .min_temp = DRA752_GPU_MIN_TEMP,
- .hyst_val = DRA752_GPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for DRA752 CORE temperature sensor */
@@ -226,11 +176,6 @@ static struct temp_sensor_data dra752_core_temp_sensor_data = {
.t_cold = DRA752_CORE_T_COLD,
.min_freq = DRA752_CORE_MIN_FREQ,
.max_freq = DRA752_CORE_MAX_FREQ,
- .max_temp = DRA752_CORE_MAX_TEMP,
- .min_temp = DRA752_CORE_MIN_TEMP,
- .hyst_val = DRA752_CORE_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for DRA752 DSPEVE temperature sensor */
@@ -239,11 +184,6 @@ static struct temp_sensor_data dra752_dspeve_temp_sensor_data = {
.t_cold = DRA752_DSPEVE_T_COLD,
.min_freq = DRA752_DSPEVE_MIN_FREQ,
.max_freq = DRA752_DSPEVE_MAX_FREQ,
- .max_temp = DRA752_DSPEVE_MAX_TEMP,
- .min_temp = DRA752_DSPEVE_MIN_TEMP,
- .hyst_val = DRA752_DSPEVE_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for DRA752 IVA temperature sensor */
@@ -252,11 +192,6 @@ static struct temp_sensor_data dra752_iva_temp_sensor_data = {
.t_cold = DRA752_IVA_T_COLD,
.min_freq = DRA752_IVA_MIN_FREQ,
.max_freq = DRA752_IVA_MAX_FREQ,
- .max_temp = DRA752_IVA_MAX_TEMP,
- .min_temp = DRA752_IVA_MIN_TEMP,
- .hyst_val = DRA752_IVA_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/*
diff --git a/drivers/thermal/ti-soc-thermal/omap3-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap3-thermal-data.c
index c6d217913dd1..f5366807daf0 100644
--- a/drivers/thermal/ti-soc-thermal/omap3-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap3-thermal-data.c
@@ -48,9 +48,6 @@ omap34xx_mpu_temp_sensor_registers = {
static struct temp_sensor_data omap34xx_mpu_temp_sensor_data = {
.min_freq = 32768,
.max_freq = 32768,
- .max_temp = 125000,
- .min_temp = -40000,
- .hyst_val = 5000,
};
/*
@@ -119,9 +116,6 @@ omap36xx_mpu_temp_sensor_registers = {
static struct temp_sensor_data omap36xx_mpu_temp_sensor_data = {
.min_freq = 32768,
.max_freq = 32768,
- .max_temp = 125000,
- .min_temp = -40000,
- .hyst_val = 5000,
};
/*
diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
index fd1113360603..c12211eaaac4 100644
--- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
@@ -42,9 +42,6 @@ omap4430_mpu_temp_sensor_registers = {
static struct temp_sensor_data omap4430_mpu_temp_sensor_data = {
.min_freq = OMAP4430_MIN_FREQ,
.max_freq = OMAP4430_MAX_FREQ,
- .max_temp = OMAP4430_MAX_TEMP,
- .min_temp = OMAP4430_MIN_TEMP,
- .hyst_val = OMAP4430_HYST_VAL,
};
/*
@@ -121,8 +118,6 @@ omap4460_mpu_temp_sensor_registers = {
.tshut_cold_mask = OMAP4460_TSHUT_COLD_MASK,
.bgap_status = OMAP4460_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = OMAP4460_CLEAN_STOP_MASK,
- .status_bgap_alert_mask = OMAP4460_BGAP_ALERT_MASK,
.status_hot_mask = OMAP4460_HOT_FLAG_MASK,
.status_cold_mask = OMAP4460_COLD_FLAG_MASK,
@@ -137,11 +132,6 @@ static struct temp_sensor_data omap4460_mpu_temp_sensor_data = {
.t_cold = OMAP4460_T_COLD,
.min_freq = OMAP4460_MIN_FREQ,
.max_freq = OMAP4460_MAX_FREQ,
- .max_temp = OMAP4460_MAX_TEMP,
- .min_temp = OMAP4460_MIN_TEMP,
- .hyst_val = OMAP4460_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/*
diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
index 6f2de3a3356d..b87c8659ec60 100644
--- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
@@ -73,10 +73,6 @@
/* bandgap clock limits (no control on 4430) */
#define OMAP4430_MAX_FREQ 32768
#define OMAP4430_MIN_FREQ 32768
-/* sensor limits */
-#define OMAP4430_MIN_TEMP -40000
-#define OMAP4430_MAX_TEMP 125000
-#define OMAP4430_HYST_VAL 5000
/**
* *** OMAP4460 *** Applicable for OMAP4470
@@ -143,8 +139,6 @@
#define OMAP4460_TSHUT_COLD_MASK (0x3ff << 0)
/* OMAP4460.BANDGAP_STATUS bits */
-#define OMAP4460_CLEAN_STOP_MASK BIT(3)
-#define OMAP4460_BGAP_ALERT_MASK BIT(2)
#define OMAP4460_HOT_FLAG_MASK BIT(1)
#define OMAP4460_COLD_FLAG_MASK BIT(0)
@@ -162,10 +156,6 @@
/* bandgap clock limits */
#define OMAP4460_MAX_FREQ 1500000
#define OMAP4460_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP4460_MIN_TEMP -40000
-#define OMAP4460_MAX_TEMP 123000
-#define OMAP4460_HYST_VAL 5000
/* interrupts thresholds */
#define OMAP4460_TSHUT_HOT 900 /* 122 deg C */
#define OMAP4460_TSHUT_COLD 895 /* 100 deg C */
diff --git a/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
index 6ac037098b52..8191bae834de 100644
--- a/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
@@ -38,12 +38,8 @@ omap5430_mpu_temp_sensor_registers = {
.bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET,
.mask_hot_mask = OMAP5430_MASK_HOT_MPU_MASK,
.mask_cold_mask = OMAP5430_MASK_COLD_MPU_MASK,
- .mask_sidlemode_mask = OMAP5430_MASK_SIDLEMODE_MASK,
.mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK,
.mask_freeze_mask = OMAP5430_MASK_FREEZE_MPU_MASK,
- .mask_clear_mask = OMAP5430_MASK_CLEAR_MPU_MASK,
- .mask_clear_accum_mask = OMAP5430_MASK_CLEAR_ACCUM_MPU_MASK,
-
.bgap_counter = OMAP5430_BGAP_CTRL_OFFSET,
.counter_mask = OMAP5430_COUNTER_MASK,
@@ -57,17 +53,11 @@ omap5430_mpu_temp_sensor_registers = {
.tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK,
.bgap_status = OMAP5430_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = 0x0,
- .status_bgap_alert_mask = OMAP5430_BGAP_ALERT_MASK,
.status_hot_mask = OMAP5430_HOT_MPU_FLAG_MASK,
.status_cold_mask = OMAP5430_COLD_MPU_FLAG_MASK,
- .bgap_cumul_dtemp = OMAP5430_BGAP_CUMUL_DTEMP_MPU_OFFSET,
- .ctrl_dtemp_0 = OMAP5430_BGAP_DTEMP_MPU_0_OFFSET,
.ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_MPU_1_OFFSET,
.ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_MPU_2_OFFSET,
- .ctrl_dtemp_3 = OMAP5430_BGAP_DTEMP_MPU_3_OFFSET,
- .ctrl_dtemp_4 = OMAP5430_BGAP_DTEMP_MPU_4_OFFSET,
.bgap_efuse = OMAP5430_FUSE_OPP_BGAP_MPU,
};
@@ -84,11 +74,8 @@ omap5430_gpu_temp_sensor_registers = {
.bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET,
.mask_hot_mask = OMAP5430_MASK_HOT_GPU_MASK,
.mask_cold_mask = OMAP5430_MASK_COLD_GPU_MASK,
- .mask_sidlemode_mask = OMAP5430_MASK_SIDLEMODE_MASK,
.mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK,
.mask_freeze_mask = OMAP5430_MASK_FREEZE_GPU_MASK,
- .mask_clear_mask = OMAP5430_MASK_CLEAR_GPU_MASK,
- .mask_clear_accum_mask = OMAP5430_MASK_CLEAR_ACCUM_GPU_MASK,
.bgap_counter = OMAP5430_BGAP_CTRL_OFFSET,
.counter_mask = OMAP5430_COUNTER_MASK,
@@ -102,17 +89,11 @@ omap5430_gpu_temp_sensor_registers = {
.tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK,
.bgap_status = OMAP5430_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = 0x0,
- .status_bgap_alert_mask = OMAP5430_BGAP_ALERT_MASK,
.status_hot_mask = OMAP5430_HOT_GPU_FLAG_MASK,
.status_cold_mask = OMAP5430_COLD_GPU_FLAG_MASK,
- .bgap_cumul_dtemp = OMAP5430_BGAP_CUMUL_DTEMP_GPU_OFFSET,
- .ctrl_dtemp_0 = OMAP5430_BGAP_DTEMP_GPU_0_OFFSET,
.ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_GPU_1_OFFSET,
.ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_GPU_2_OFFSET,
- .ctrl_dtemp_3 = OMAP5430_BGAP_DTEMP_GPU_3_OFFSET,
- .ctrl_dtemp_4 = OMAP5430_BGAP_DTEMP_GPU_4_OFFSET,
.bgap_efuse = OMAP5430_FUSE_OPP_BGAP_GPU,
};
@@ -130,11 +111,8 @@ omap5430_core_temp_sensor_registers = {
.bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET,
.mask_hot_mask = OMAP5430_MASK_HOT_CORE_MASK,
.mask_cold_mask = OMAP5430_MASK_COLD_CORE_MASK,
- .mask_sidlemode_mask = OMAP5430_MASK_SIDLEMODE_MASK,
.mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK,
.mask_freeze_mask = OMAP5430_MASK_FREEZE_CORE_MASK,
- .mask_clear_mask = OMAP5430_MASK_CLEAR_CORE_MASK,
- .mask_clear_accum_mask = OMAP5430_MASK_CLEAR_ACCUM_CORE_MASK,
.bgap_counter = OMAP5430_BGAP_CTRL_OFFSET,
.counter_mask = OMAP5430_COUNTER_MASK,
@@ -148,17 +126,11 @@ omap5430_core_temp_sensor_registers = {
.tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK,
.bgap_status = OMAP5430_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = 0x0,
- .status_bgap_alert_mask = OMAP5430_BGAP_ALERT_MASK,
.status_hot_mask = OMAP5430_HOT_CORE_FLAG_MASK,
.status_cold_mask = OMAP5430_COLD_CORE_FLAG_MASK,
- .bgap_cumul_dtemp = OMAP5430_BGAP_CUMUL_DTEMP_CORE_OFFSET,
- .ctrl_dtemp_0 = OMAP5430_BGAP_DTEMP_CORE_0_OFFSET,
.ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_CORE_1_OFFSET,
.ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_CORE_2_OFFSET,
- .ctrl_dtemp_3 = OMAP5430_BGAP_DTEMP_CORE_3_OFFSET,
- .ctrl_dtemp_4 = OMAP5430_BGAP_DTEMP_CORE_4_OFFSET,
.bgap_efuse = OMAP5430_FUSE_OPP_BGAP_CORE,
};
@@ -171,11 +143,6 @@ static struct temp_sensor_data omap5430_mpu_temp_sensor_data = {
.t_cold = OMAP5430_MPU_T_COLD,
.min_freq = OMAP5430_MPU_MIN_FREQ,
.max_freq = OMAP5430_MPU_MAX_FREQ,
- .max_temp = OMAP5430_MPU_MAX_TEMP,
- .min_temp = OMAP5430_MPU_MIN_TEMP,
- .hyst_val = OMAP5430_MPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for OMAP5430 GPU temperature sensor */
@@ -186,11 +153,6 @@ static struct temp_sensor_data omap5430_gpu_temp_sensor_data = {
.t_cold = OMAP5430_GPU_T_COLD,
.min_freq = OMAP5430_GPU_MIN_FREQ,
.max_freq = OMAP5430_GPU_MAX_FREQ,
- .max_temp = OMAP5430_GPU_MAX_TEMP,
- .min_temp = OMAP5430_GPU_MIN_TEMP,
- .hyst_val = OMAP5430_GPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/* Thresholds and limits for OMAP5430 CORE temperature sensor */
@@ -201,11 +163,6 @@ static struct temp_sensor_data omap5430_core_temp_sensor_data = {
.t_cold = OMAP5430_CORE_T_COLD,
.min_freq = OMAP5430_CORE_MIN_FREQ,
.max_freq = OMAP5430_CORE_MAX_FREQ,
- .max_temp = OMAP5430_CORE_MAX_TEMP,
- .min_temp = OMAP5430_CORE_MIN_TEMP,
- .hyst_val = OMAP5430_CORE_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
};
/*
@@ -319,8 +276,7 @@ const struct ti_bandgap_data omap5430_data = {
TI_BANDGAP_FEATURE_FREEZE_BIT |
TI_BANDGAP_FEATURE_TALERT |
TI_BANDGAP_FEATURE_COUNTER_DELAY |
- TI_BANDGAP_FEATURE_HISTORY_BUFFER |
- TI_BANDGAP_FEATURE_ERRATA_813,
+ TI_BANDGAP_FEATURE_HISTORY_BUFFER,
.fclock_name = "l3instr_ts_gclk_div",
.div_ck_name = "l3instr_ts_gclk_div",
.conv_table = omap5430_adc_to_temp,
diff --git a/drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h
index 400b55dffadd..9096403b74b0 100644
--- a/drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h
@@ -44,36 +44,24 @@
#define OMAP5430_TEMP_SENSOR_GPU_OFFSET 0x150
#define OMAP5430_BGAP_THRESHOLD_GPU_OFFSET 0x1A8
#define OMAP5430_BGAP_TSHUT_GPU_OFFSET 0x1B4
-#define OMAP5430_BGAP_CUMUL_DTEMP_GPU_OFFSET 0x1C0
-#define OMAP5430_BGAP_DTEMP_GPU_0_OFFSET 0x1F4
#define OMAP5430_BGAP_DTEMP_GPU_1_OFFSET 0x1F8
#define OMAP5430_BGAP_DTEMP_GPU_2_OFFSET 0x1FC
-#define OMAP5430_BGAP_DTEMP_GPU_3_OFFSET 0x200
-#define OMAP5430_BGAP_DTEMP_GPU_4_OFFSET 0x204
/* OMAP5430.MPU register offsets */
#define OMAP5430_FUSE_OPP_BGAP_MPU 0x4
#define OMAP5430_TEMP_SENSOR_MPU_OFFSET 0x14C
#define OMAP5430_BGAP_THRESHOLD_MPU_OFFSET 0x1A4
#define OMAP5430_BGAP_TSHUT_MPU_OFFSET 0x1B0
-#define OMAP5430_BGAP_CUMUL_DTEMP_MPU_OFFSET 0x1BC
-#define OMAP5430_BGAP_DTEMP_MPU_0_OFFSET 0x1E0
#define OMAP5430_BGAP_DTEMP_MPU_1_OFFSET 0x1E4
#define OMAP5430_BGAP_DTEMP_MPU_2_OFFSET 0x1E8
-#define OMAP5430_BGAP_DTEMP_MPU_3_OFFSET 0x1EC
-#define OMAP5430_BGAP_DTEMP_MPU_4_OFFSET 0x1F0
/* OMAP5430.MPU register offsets */
#define OMAP5430_FUSE_OPP_BGAP_CORE 0x8
#define OMAP5430_TEMP_SENSOR_CORE_OFFSET 0x154
#define OMAP5430_BGAP_THRESHOLD_CORE_OFFSET 0x1AC
#define OMAP5430_BGAP_TSHUT_CORE_OFFSET 0x1B8
-#define OMAP5430_BGAP_CUMUL_DTEMP_CORE_OFFSET 0x1C4
-#define OMAP5430_BGAP_DTEMP_CORE_0_OFFSET 0x208
#define OMAP5430_BGAP_DTEMP_CORE_1_OFFSET 0x20C
#define OMAP5430_BGAP_DTEMP_CORE_2_OFFSET 0x210
-#define OMAP5430_BGAP_DTEMP_CORE_3_OFFSET 0x214
-#define OMAP5430_BGAP_DTEMP_CORE_4_OFFSET 0x218
/* OMAP5430.common register offsets */
#define OMAP5430_BGAP_CTRL_OFFSET 0x1A0
@@ -94,17 +82,10 @@
#define OMAP5430_BGAP_TEMP_SENSOR_DTEMP_MASK (0x3ff << 0)
/* OMAP5430.BANDGAP_CTRL */
-#define OMAP5430_MASK_SIDLEMODE_MASK (0x3 << 30)
#define OMAP5430_MASK_COUNTER_DELAY_MASK (0x7 << 27)
#define OMAP5430_MASK_FREEZE_CORE_MASK BIT(23)
#define OMAP5430_MASK_FREEZE_GPU_MASK BIT(22)
#define OMAP5430_MASK_FREEZE_MPU_MASK BIT(21)
-#define OMAP5430_MASK_CLEAR_CORE_MASK BIT(20)
-#define OMAP5430_MASK_CLEAR_GPU_MASK BIT(19)
-#define OMAP5430_MASK_CLEAR_MPU_MASK BIT(18)
-#define OMAP5430_MASK_CLEAR_ACCUM_CORE_MASK BIT(17)
-#define OMAP5430_MASK_CLEAR_ACCUM_GPU_MASK BIT(16)
-#define OMAP5430_MASK_CLEAR_ACCUM_MPU_MASK BIT(15)
#define OMAP5430_MASK_HOT_CORE_MASK BIT(5)
#define OMAP5430_MASK_COLD_CORE_MASK BIT(4)
#define OMAP5430_MASK_HOT_GPU_MASK BIT(3)
@@ -123,17 +104,7 @@
#define OMAP5430_TSHUT_HOT_MASK (0x3ff << 16)
#define OMAP5430_TSHUT_COLD_MASK (0x3ff << 0)
-/* OMAP5430.BANDGAP_CUMUL_DTEMP_MPU */
-#define OMAP5430_CUMUL_DTEMP_MPU_MASK (0xffffffff << 0)
-
-/* OMAP5430.BANDGAP_CUMUL_DTEMP_GPU */
-#define OMAP5430_CUMUL_DTEMP_GPU_MASK (0xffffffff << 0)
-
-/* OMAP5430.BANDGAP_CUMUL_DTEMP_CORE */
-#define OMAP5430_CUMUL_DTEMP_CORE_MASK (0xffffffff << 0)
-
/* OMAP5430.BANDGAP_STATUS */
-#define OMAP5430_BGAP_ALERT_MASK BIT(31)
#define OMAP5430_HOT_CORE_FLAG_MASK BIT(5)
#define OMAP5430_COLD_CORE_FLAG_MASK BIT(4)
#define OMAP5430_HOT_GPU_FLAG_MASK BIT(3)
@@ -159,10 +130,6 @@
/* bandgap clock limits */
#define OMAP5430_GPU_MAX_FREQ 1500000
#define OMAP5430_GPU_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP5430_GPU_MIN_TEMP -40000
-#define OMAP5430_GPU_MAX_TEMP 125000
-#define OMAP5430_GPU_HYST_VAL 5000
/* interrupts thresholds */
#define OMAP5430_GPU_TSHUT_HOT 915
#define OMAP5430_GPU_TSHUT_COLD 900
@@ -173,10 +140,6 @@
/* bandgap clock limits */
#define OMAP5430_MPU_MAX_FREQ 1500000
#define OMAP5430_MPU_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP5430_MPU_MIN_TEMP -40000
-#define OMAP5430_MPU_MAX_TEMP 125000
-#define OMAP5430_MPU_HYST_VAL 5000
/* interrupts thresholds */
#define OMAP5430_MPU_TSHUT_HOT 915
#define OMAP5430_MPU_TSHUT_COLD 900
@@ -187,10 +150,6 @@
/* bandgap clock limits */
#define OMAP5430_CORE_MAX_FREQ 1500000
#define OMAP5430_CORE_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP5430_CORE_MIN_TEMP -40000
-#define OMAP5430_CORE_MAX_TEMP 125000
-#define OMAP5430_CORE_HYST_VAL 5000
/* interrupts thresholds */
#define OMAP5430_CORE_TSHUT_HOT 915
#define OMAP5430_CORE_TSHUT_COLD 900
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 696ab3046b87..097328d8e943 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -306,217 +306,6 @@ int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t)
}
/**
- * ti_bandgap_mcelsius_to_adc() - converts a mCelsius value to ADC scale
- * @bgp: struct ti_bandgap pointer
- * @temp: value in mCelsius
- * @adc: address where to write the resulting temperature in ADC representation
- *
- * Simple conversion from mCelsius to ADC values. In case the temp value
- * is out of the ADC conv table range, it returns -ERANGE, 0 on success.
- * The conversion table is indexed by the ADC values.
- *
- * Return: 0 if conversion was successful, else -ERANGE in case the @temp
- * argument is out of the ADC conv table range.
- */
-static
-int ti_bandgap_mcelsius_to_adc(struct ti_bandgap *bgp, long temp, int *adc)
-{
- const struct ti_bandgap_data *conf = bgp->conf;
- const int *conv_table = bgp->conf->conv_table;
- int high, low, mid;
-
- low = 0;
- high = conf->adc_end_val - conf->adc_start_val;
- mid = (high + low) / 2;
-
- if (temp < conv_table[low] || temp > conv_table[high])
- return -ERANGE;
-
- while (low < high) {
- if (temp < conv_table[mid])
- high = mid - 1;
- else
- low = mid + 1;
- mid = (low + high) / 2;
- }
-
- *adc = conf->adc_start_val + low;
- return 0;
-}
-
-/**
- * ti_bandgap_add_hyst() - add hysteresis (in mCelsius) to an ADC value
- * @bgp: struct ti_bandgap pointer
- * @adc_val: temperature value in ADC representation
- * @hyst_val: hysteresis value in mCelsius
- * @sum: address where to write the resulting temperature (in ADC scale)
- *
- * Adds an hysteresis value (in mCelsius) to a ADC temperature value.
- *
- * Return: 0 on success, -ERANGE otherwise.
- */
-static
-int ti_bandgap_add_hyst(struct ti_bandgap *bgp, int adc_val, int hyst_val,
- u32 *sum)
-{
- int temp, ret;
-
- /*
- * Need to add in the mcelsius domain, so we have a temperature
- * the conv_table range
- */
- ret = ti_bandgap_adc_to_mcelsius(bgp, adc_val, &temp);
- if (ret < 0)
- return ret;
-
- temp += hyst_val;
-
- ret = ti_bandgap_mcelsius_to_adc(bgp, temp, sum);
- return ret;
-}
-
-/*** Helper functions handling device Alert/Shutdown signals ***/
-
-/**
- * ti_bandgap_unmask_interrupts() - unmasks the events of thot & tcold
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @t_hot: hot temperature value to trigger alert signal
- * @t_cold: cold temperature value to trigger alert signal
- *
- * Checks the requested t_hot and t_cold values and configures the IRQ event
- * masks accordingly. Call this function only if bandgap features HAS(TALERT).
- */
-static void ti_bandgap_unmask_interrupts(struct ti_bandgap *bgp, int id,
- u32 t_hot, u32 t_cold)
-{
- struct temp_sensor_registers *tsr;
- u32 temp, reg_val;
-
- /* Read the current on die temperature */
- temp = ti_bandgap_read_temp(bgp, id);
-
- tsr = bgp->conf->sensors[id].registers;
- reg_val = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
-
- if (temp < t_hot)
- reg_val |= tsr->mask_hot_mask;
- else
- reg_val &= ~tsr->mask_hot_mask;
-
- if (t_cold < temp)
- reg_val |= tsr->mask_cold_mask;
- else
- reg_val &= ~tsr->mask_cold_mask;
- ti_bandgap_writel(bgp, reg_val, tsr->bgap_mask_ctrl);
-}
-
-/**
- * ti_bandgap_update_alert_threshold() - sequence to update thresholds
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @val: value (ADC) of a new threshold
- * @hot: desired threshold to be updated. true if threshold hot, false if
- * threshold cold
- *
- * It will program the required thresholds (hot and cold) for TALERT signal.
- * This function can be used to update t_hot or t_cold, depending on @hot value.
- * It checks the resulting t_hot and t_cold values, based on the new passed @val
- * and configures the thresholds so that t_hot is always greater than t_cold.
- * Call this function only if bandgap features HAS(TALERT).
- *
- * Return: 0 if no error, else corresponding error
- */
-static int ti_bandgap_update_alert_threshold(struct ti_bandgap *bgp, int id,
- int val, bool hot)
-{
- struct temp_sensor_data *ts_data = bgp->conf->sensors[id].ts_data;
- struct temp_sensor_registers *tsr;
- u32 thresh_val, reg_val, t_hot, t_cold, ctrl;
- int err = 0;
-
- tsr = bgp->conf->sensors[id].registers;
-
- /* obtain the current value */
- thresh_val = ti_bandgap_readl(bgp, tsr->bgap_threshold);
- t_cold = (thresh_val & tsr->threshold_tcold_mask) >>
- __ffs(tsr->threshold_tcold_mask);
- t_hot = (thresh_val & tsr->threshold_thot_mask) >>
- __ffs(tsr->threshold_thot_mask);
- if (hot)
- t_hot = val;
- else
- t_cold = val;
-
- if (t_cold > t_hot) {
- if (hot)
- err = ti_bandgap_add_hyst(bgp, t_hot,
- -ts_data->hyst_val,
- &t_cold);
- else
- err = ti_bandgap_add_hyst(bgp, t_cold,
- ts_data->hyst_val,
- &t_hot);
- }
-
- /* write the new threshold values */
- reg_val = thresh_val &
- ~(tsr->threshold_thot_mask | tsr->threshold_tcold_mask);
- reg_val |= (t_hot << __ffs(tsr->threshold_thot_mask)) |
- (t_cold << __ffs(tsr->threshold_tcold_mask));
-
- /**
- * Errata i813:
- * Spurious Thermal Alert: Talert can happen randomly while the device
- * remains under the temperature limit defined for this event to trig.
- * This spurious event is caused by a incorrect re-synchronization
- * between clock domains. The comparison between configured threshold
- * and current temperature value can happen while the value is
- * transitioning (metastable), thus causing inappropriate event
- * generation. No spurious event occurs as long as the threshold value
- * stays unchanged. Spurious event can be generated while a thermal
- * alert threshold is modified in
- * CONTROL_BANDGAP_THRESHOLD_MPU/GPU/CORE/DSPEVE/IVA_n.
- */
-
- if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
- /* Mask t_hot and t_cold events at the IP Level */
- ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
-
- if (hot)
- ctrl &= ~tsr->mask_hot_mask;
- else
- ctrl &= ~tsr->mask_cold_mask;
-
- ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
- }
-
- /* Write the threshold value */
- ti_bandgap_writel(bgp, reg_val, tsr->bgap_threshold);
-
- if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
- /* Unmask t_hot and t_cold events at the IP Level */
- ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
- if (hot)
- ctrl |= tsr->mask_hot_mask;
- else
- ctrl |= tsr->mask_cold_mask;
-
- ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
- }
-
- if (err) {
- dev_err(bgp->dev, "failed to reprogram thot threshold\n");
- err = -EIO;
- goto exit;
- }
-
- ti_bandgap_unmask_interrupts(bgp, id, t_hot, t_cold);
-exit:
- return err;
-}
-
-/**
* ti_bandgap_validate() - helper to check the sanity of a struct ti_bandgap
* @bgp: struct ti_bandgap pointer
* @id: bandgap sensor id
@@ -544,165 +333,6 @@ static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id)
}
/**
- * _ti_bandgap_write_threshold() - helper to update TALERT t_cold or t_hot
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @val: value (mCelsius) of a new threshold
- * @hot: desired threshold to be updated. true if threshold hot, false if
- * threshold cold
- *
- * It will update the required thresholds (hot and cold) for TALERT signal.
- * This function can be used to update t_hot or t_cold, depending on @hot value.
- * Validates the mCelsius range and update the requested threshold.
- * Call this function only if bandgap features HAS(TALERT).
- *
- * Return: 0 if no error, else corresponding error value.
- */
-static int _ti_bandgap_write_threshold(struct ti_bandgap *bgp, int id, int val,
- bool hot)
-{
- struct temp_sensor_data *ts_data;
- struct temp_sensor_registers *tsr;
- u32 adc_val;
- int ret;
-
- ret = ti_bandgap_validate(bgp, id);
- if (ret)
- return ret;
-
- if (!TI_BANDGAP_HAS(bgp, TALERT))
- return -ENOTSUPP;
-
- ts_data = bgp->conf->sensors[id].ts_data;
- tsr = bgp->conf->sensors[id].registers;
- if (hot) {
- if (val < ts_data->min_temp + ts_data->hyst_val)
- ret = -EINVAL;
- } else {
- if (val > ts_data->max_temp + ts_data->hyst_val)
- ret = -EINVAL;
- }
-
- if (ret)
- return ret;
-
- ret = ti_bandgap_mcelsius_to_adc(bgp, val, &adc_val);
- if (ret < 0)
- return ret;
-
- spin_lock(&bgp->lock);
- ret = ti_bandgap_update_alert_threshold(bgp, id, adc_val, hot);
- spin_unlock(&bgp->lock);
- return ret;
-}
-
-/**
- * _ti_bandgap_read_threshold() - helper to read TALERT t_cold or t_hot
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @val: value (mCelsius) of a threshold
- * @hot: desired threshold to be read. true if threshold hot, false if
- * threshold cold
- *
- * It will fetch the required thresholds (hot and cold) for TALERT signal.
- * This function can be used to read t_hot or t_cold, depending on @hot value.
- * Call this function only if bandgap features HAS(TALERT).
- *
- * Return: 0 if no error, -ENOTSUPP if it has no TALERT support, or the
- * corresponding error value if some operation fails.
- */
-static int _ti_bandgap_read_threshold(struct ti_bandgap *bgp, int id,
- int *val, bool hot)
-{
- struct temp_sensor_registers *tsr;
- u32 temp, mask;
- int ret = 0;
-
- ret = ti_bandgap_validate(bgp, id);
- if (ret)
- goto exit;
-
- if (!TI_BANDGAP_HAS(bgp, TALERT)) {
- ret = -ENOTSUPP;
- goto exit;
- }
-
- tsr = bgp->conf->sensors[id].registers;
- if (hot)
- mask = tsr->threshold_thot_mask;
- else
- mask = tsr->threshold_tcold_mask;
-
- temp = ti_bandgap_readl(bgp, tsr->bgap_threshold);
- temp = (temp & mask) >> __ffs(mask);
- ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
- if (ret) {
- dev_err(bgp->dev, "failed to read thot\n");
- ret = -EIO;
- goto exit;
- }
-
- *val = temp;
-
-exit:
- return ret;
-}
-
-/*** Exposed APIs ***/
-
-/**
- * ti_bandgap_read_thot() - reads sensor current thot
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @thot: resulting current thot value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_read_thot(struct ti_bandgap *bgp, int id, int *thot)
-{
- return _ti_bandgap_read_threshold(bgp, id, thot, true);
-}
-
-/**
- * ti_bandgap_write_thot() - sets sensor current thot
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @val: desired thot value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_write_thot(struct ti_bandgap *bgp, int id, int val)
-{
- return _ti_bandgap_write_threshold(bgp, id, val, true);
-}
-
-/**
- * ti_bandgap_read_tcold() - reads sensor current tcold
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @tcold: resulting current tcold value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_read_tcold(struct ti_bandgap *bgp, int id, int *tcold)
-{
- return _ti_bandgap_read_threshold(bgp, id, tcold, false);
-}
-
-/**
- * ti_bandgap_write_tcold() - sets the sensor tcold
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @val: desired tcold value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_write_tcold(struct ti_bandgap *bgp, int id, int val)
-{
- return _ti_bandgap_write_threshold(bgp, id, val, false);
-}
-
-/**
* ti_bandgap_read_counter() - read the sensor counter
* @bgp: pointer to bandgap instance
* @id: sensor id
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
index 209c664c2823..68d39ad43241 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
@@ -78,11 +78,8 @@
* @bgap_mask_ctrl: BANDGAP_MASK_CTRL register offset
* @mask_hot_mask: mask to bandgap_mask_ctrl.mask_hot
* @mask_cold_mask: mask to bandgap_mask_ctrl.mask_cold
- * @mask_sidlemode_mask: mask to bandgap_mask_ctrl.mask_sidlemode
* @mask_counter_delay_mask: mask to bandgap_mask_ctrl.mask_counter_delay
* @mask_freeze_mask: mask to bandgap_mask_ctrl.mask_free
- * @mask_clear_mask: mask to bandgap_mask_ctrl.mask_clear
- * @mask_clear_accum_mask: mask to bandgap_mask_ctrl.mask_clear_accum
* @bgap_mode_ctrl: BANDGAP_MODE_CTRL register offset
* @mode_ctrl_mask: mask to bandgap_mode_ctrl.mode_ctrl
* @bgap_counter: BANDGAP_COUNTER register offset
@@ -91,21 +88,13 @@
* @threshold_thot_mask: mask to bandgap_threhold.thot
* @threshold_tcold_mask: mask to bandgap_threhold.tcold
* @tshut_threshold: TSHUT_THRESHOLD register offset (TSHUT thresholds)
- * @tshut_efuse_mask: mask to tshut_threshold.tshut_efuse
- * @tshut_efuse_shift: shift to tshut_threshold.tshut_efuse
* @tshut_hot_mask: mask to tshut_threhold.thot
* @tshut_cold_mask: mask to tshut_threhold.thot
* @bgap_status: BANDGAP_STATUS register offset
- * @status_clean_stop_mask: mask to bandgap_status.clean_stop
- * @status_bgap_alert_mask: mask to bandgap_status.bandgap_alert
* @status_hot_mask: mask to bandgap_status.hot
* @status_cold_mask: mask to bandgap_status.cold
- * @bgap_cumul_dtemp: BANDGAP_CUMUL_DTEMP register offset
- * @ctrl_dtemp_0: CTRL_DTEMP0 register offset
* @ctrl_dtemp_1: CTRL_DTEMP1 register offset
* @ctrl_dtemp_2: CTRL_DTEMP2 register offset
- * @ctrl_dtemp_3: CTRL_DTEMP3 register offset
- * @ctrl_dtemp_4: CTRL_DTEMP4 register offset
* @bgap_efuse: BANDGAP_EFUSE register offset
*
* The register offsets and bitfields might change across
@@ -121,17 +110,14 @@ struct temp_sensor_registers {
u32 temp_sensor_ctrl;
u32 bgap_tempsoff_mask;
u32 bgap_soc_mask;
- u32 bgap_eocz_mask; /* not used: but needs revisit */
+ u32 bgap_eocz_mask;
u32 bgap_dtemp_mask;
u32 bgap_mask_ctrl;
u32 mask_hot_mask;
u32 mask_cold_mask;
- u32 mask_sidlemode_mask; /* not used: but may be needed for pm */
u32 mask_counter_delay_mask;
u32 mask_freeze_mask;
- u32 mask_clear_mask; /* not used: but needed for trending */
- u32 mask_clear_accum_mask; /* not used: but needed for trending */
u32 bgap_mode_ctrl;
u32 mode_ctrl_mask;
@@ -144,23 +130,15 @@ struct temp_sensor_registers {
u32 threshold_tcold_mask;
u32 tshut_threshold;
- u32 tshut_efuse_mask; /* not used */
- u32 tshut_efuse_shift; /* not used */
u32 tshut_hot_mask;
u32 tshut_cold_mask;
u32 bgap_status;
- u32 status_clean_stop_mask; /* not used: but needed for trending */
- u32 status_bgap_alert_mask; /* not used */
u32 status_hot_mask;
u32 status_cold_mask;
- u32 bgap_cumul_dtemp; /* not used: but needed for trending */
- u32 ctrl_dtemp_0; /* not used: but needed for trending */
- u32 ctrl_dtemp_1; /* not used: but needed for trending */
- u32 ctrl_dtemp_2; /* not used: but needed for trending */
- u32 ctrl_dtemp_3; /* not used: but needed for trending */
- u32 ctrl_dtemp_4; /* not used: but needed for trending */
+ u32 ctrl_dtemp_1;
+ u32 ctrl_dtemp_2;
u32 bgap_efuse;
};
@@ -172,11 +150,6 @@ struct temp_sensor_registers {
* @t_cold: temperature to trigger a thermal alert (low initial value)
* @min_freq: sensor minimum clock rate
* @max_freq: sensor maximum clock rate
- * @max_temp: sensor maximum temperature
- * @min_temp: sensor minimum temperature
- * @hyst_val: temperature hysteresis considered while converting ADC values
- * @update_int1: update interval
- * @update_int2: update interval
*
* This data structure will hold the required thresholds and temperature limits
* for a specific temperature sensor, like shutdown temperature, alert
@@ -189,11 +162,6 @@ struct temp_sensor_data {
u32 t_cold;
u32 min_freq;
u32 max_freq;
- int max_temp;
- int min_temp;
- int hyst_val;
- u32 update_int1; /* not used */
- u32 update_int2; /* not used */
};
struct ti_bandgap_data;
@@ -316,8 +284,6 @@ struct ti_temp_sensor {
*
* TI_BANDGAP_FEATURE_ERRATA_814 - used to workaorund when the bandgap device
* has Errata 814
- * TI_BANDGAP_FEATURE_ERRATA_813 - used to workaorund when the bandgap device
- * has Errata 813
* TI_BANDGAP_FEATURE_UNRELIABLE - used when the sensor readings are too
* inaccurate.
* TI_BANDGAP_HAS(b, f) - macro to check if a bandgap device is capable of a
@@ -334,8 +300,7 @@ struct ti_temp_sensor {
#define TI_BANDGAP_FEATURE_COUNTER_DELAY BIT(8)
#define TI_BANDGAP_FEATURE_HISTORY_BUFFER BIT(9)
#define TI_BANDGAP_FEATURE_ERRATA_814 BIT(10)
-#define TI_BANDGAP_FEATURE_ERRATA_813 BIT(11)
-#define TI_BANDGAP_FEATURE_UNRELIABLE BIT(12)
+#define TI_BANDGAP_FEATURE_UNRELIABLE BIT(11)
#define TI_BANDGAP_HAS(b, f) \
((b)->conf->features & TI_BANDGAP_FEATURE_ ## f)
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index e30aa6bf9ff9..50f567b6a66e 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -92,7 +92,7 @@ static void tty_audit_buf_push(struct tty_audit_buf *buf)
{
if (buf->valid == 0)
return;
- if (audit_enabled == 0) {
+ if (audit_enabled == AUDIT_OFF) {
buf->valid = 0;
return;
}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index d5b4a2b44ab8..de310621b8e7 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -959,7 +959,7 @@ struct kbd_led_trigger {
unsigned int mask;
};
-static void kbd_led_trigger_activate(struct led_classdev *cdev)
+static int kbd_led_trigger_activate(struct led_classdev *cdev)
{
struct kbd_led_trigger *trigger =
container_of(cdev->trigger, struct kbd_led_trigger, trigger);
@@ -970,6 +970,8 @@ static void kbd_led_trigger_activate(struct led_classdev *cdev)
ledstate & trigger->mask ?
LED_FULL : LED_OFF);
tasklet_enable(&keyboard_tasklet);
+
+ return 0;
}
#define KBD_LED_TRIGGER(_led_bit, _name) { \
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index d775ffea20c3..dc7f7fd71684 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -113,11 +113,17 @@ static ssize_t usbport_trig_port_store(struct device *dev,
static struct attribute *ports_attrs[] = {
NULL,
};
+
static const struct attribute_group ports_group = {
.name = "ports",
.attrs = ports_attrs,
};
+static const struct attribute_group *ports_groups[] = {
+ &ports_group,
+ NULL
+};
+
/***************************************
* Adding & removing ports
***************************************/
@@ -298,61 +304,47 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
}
-static void usbport_trig_activate(struct led_classdev *led_cdev)
+static int usbport_trig_activate(struct led_classdev *led_cdev)
{
struct usbport_trig_data *usbport_data;
- int err;
usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
if (!usbport_data)
- return;
+ return -ENOMEM;
usbport_data->led_cdev = led_cdev;
/* List of ports */
INIT_LIST_HEAD(&usbport_data->ports);
- err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
- if (err)
- goto err_free;
usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
usbport_trig_update_count(usbport_data);
/* Notifications */
- usbport_data->nb.notifier_call = usbport_trig_notify,
- led_cdev->trigger_data = usbport_data;
+ usbport_data->nb.notifier_call = usbport_trig_notify;
+ led_set_trigger_data(led_cdev, usbport_data);
usb_register_notify(&usbport_data->nb);
- led_cdev->activated = true;
- return;
-
-err_free:
- kfree(usbport_data);
+ return 0;
}
static void usbport_trig_deactivate(struct led_classdev *led_cdev)
{
- struct usbport_trig_data *usbport_data = led_cdev->trigger_data;
+ struct usbport_trig_data *usbport_data = led_get_trigger_data(led_cdev);
struct usbport_trig_port *port, *tmp;
- if (!led_cdev->activated)
- return;
-
list_for_each_entry_safe(port, tmp, &usbport_data->ports, list) {
usbport_trig_remove_port(usbport_data, port);
}
usb_unregister_notify(&usbport_data->nb);
- sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
-
kfree(usbport_data);
-
- led_cdev->activated = false;
}
static struct led_trigger usbport_led_trigger = {
.name = "usbport",
.activate = usbport_trig_activate,
.deactivate = usbport_trig_deactivate,
+ .groups = ports_groups,
};
static int __init usbport_trig_init(void)
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index d78dbb73bde8..106988a6661a 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1071,15 +1071,16 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
{
struct se_session *se_sess = tv_nexus->tvn_se_sess;
struct usbg_cmd *cmd;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return ERR_PTR(-ENOMEM);
cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(*cmd));
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->se_cmd.tag = cmd->tag = scsi_tag;
cmd->fu = fu;
@@ -1288,7 +1289,7 @@ static void usbg_release_cmd(struct se_cmd *se_cmd)
struct se_session *se_sess = se_cmd->se_sess;
kfree(cmd->data_buf);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static u32 usbg_sess_get_index(struct se_session *se_sess)
@@ -1343,10 +1344,8 @@ static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
return 0;
}
-static struct se_portal_group *usbg_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
tport_wwn);
@@ -1379,7 +1378,7 @@ static struct se_portal_group *usbg_make_tpg(
goto unlock_dep;
} else {
ret = configfs_depend_item_unlocked(
- group->cg_subsys,
+ wwn->wwn_group.cg_subsys,
&opts->func_inst.group.cg_item);
if (ret)
goto unlock_dep;
@@ -1593,7 +1592,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
goto out_unlock;
}
- tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
USB_G_DEFAULT_SESSION_TAGS,
sizeof(struct usbg_cmd),
TARGET_PROT_NORMAL, name,
@@ -1639,7 +1638,7 @@ static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
/*
* Release the SCSI I_T Nexus to the emulated vHost Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
kfree(tv_nexus);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 125b58eff936..cddb453a1ba5 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -789,7 +789,7 @@ static long vfio_pci_ioctl(void *device_data,
case VFIO_PCI_ERR_IRQ_INDEX:
if (pci_is_pcie(vdev->pdev))
break;
- /* pass thru to return error */
+ /* fall through */
default:
return -EINVAL;
}
@@ -1014,8 +1014,7 @@ reset_info_exit:
&info, slot);
if (!ret)
/* User has access, do the reset */
- ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
- pci_try_reset_bus(vdev->pdev->bus);
+ ret = pci_reset_bus(vdev->pdev);
hot_reset_release:
for (i--; i >= 0; i--)
@@ -1193,6 +1192,19 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
+ /*
+ * Prevent binding to PFs with VFs enabled, this too easily allows
+ * userspace instance with VFs and PFs from the same device, which
+ * cannot work. Disabling SR-IOV here would initiate removing the
+ * VFs, which would unbind the driver, which is prone to blocking
+ * if that VF is also in use by vfio-pci. Just reject these PFs
+ * and let the user sort it out.
+ */
+ if (pci_num_vf(pdev)) {
+ pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
+ return -EBUSY;
+ }
+
group = vfio_iommu_group_get(&pdev->dev);
if (!group)
return -EINVAL;
@@ -1377,8 +1389,7 @@ static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
}
if (needs_reset)
- ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
- pci_try_reset_bus(vdev->pdev->bus);
+ ret = pci_reset_bus(vdev->pdev);
put_devs:
for (i = 0; i < devs.cur_index; i++) {
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 3e5b17710a4f..d9fd3188615d 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1601,6 +1601,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
break;
case VFIO_TYPE1_NESTING_IOMMU:
iommu->nesting = true;
+ /* fall through */
case VFIO_TYPE1v2_IOMMU:
iommu->v2 = true;
break;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 29756d88799b..4e656f89cb22 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -78,6 +78,10 @@ enum {
};
enum {
+ VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
+};
+
+enum {
VHOST_NET_VQ_RX = 0,
VHOST_NET_VQ_TX = 1,
VHOST_NET_VQ_MAX = 2,
@@ -94,7 +98,7 @@ struct vhost_net_ubuf_ref {
struct vhost_virtqueue *vq;
};
-#define VHOST_RX_BATCH 64
+#define VHOST_NET_BATCH 64
struct vhost_net_buf {
void **queue;
int tail;
@@ -168,7 +172,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
rxq->head = 0;
rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
- VHOST_RX_BATCH);
+ VHOST_NET_BATCH);
return rxq->tail;
}
@@ -396,13 +400,10 @@ static inline unsigned long busy_clock(void)
return local_clock() >> 10;
}
-static bool vhost_can_busy_poll(struct vhost_dev *dev,
- unsigned long endtime)
+static bool vhost_can_busy_poll(unsigned long endtime)
{
- return likely(!need_resched()) &&
- likely(!time_after(busy_clock(), endtime)) &&
- likely(!signal_pending(current)) &&
- !vhost_has_work(dev);
+ return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
+ !signal_pending(current));
}
static void vhost_net_disable_vq(struct vhost_net *n,
@@ -431,21 +432,42 @@ static int vhost_net_enable_vq(struct vhost_net *n,
return vhost_poll_start(poll, sock->file);
}
+static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_dev *dev = vq->dev;
+
+ if (!nvq->done_idx)
+ return;
+
+ vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
+ nvq->done_idx = 0;
+}
+
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
- struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num)
+ struct vhost_net_virtqueue *nvq,
+ unsigned int *out_num, unsigned int *in_num,
+ bool *busyloop_intr)
{
+ struct vhost_virtqueue *vq = &nvq->vq;
unsigned long uninitialized_var(endtime);
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
if (r == vq->num && vq->busyloop_timeout) {
+ if (!vhost_sock_zcopy(vq->private_data))
+ vhost_net_signal_used(nvq);
preempt_disable();
endtime = busy_clock() + vq->busyloop_timeout;
- while (vhost_can_busy_poll(vq->dev, endtime) &&
- vhost_vq_avail_empty(vq->dev, vq))
+ while (vhost_can_busy_poll(endtime)) {
+ if (vhost_has_work(vq->dev)) {
+ *busyloop_intr = true;
+ break;
+ }
+ if (!vhost_vq_avail_empty(vq->dev, vq))
+ break;
cpu_relax();
+ }
preempt_enable();
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
@@ -463,9 +485,62 @@ static bool vhost_exceeds_maxpend(struct vhost_net *net)
min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
}
-/* Expects to be always run from workqueue - which acts as
- * read-size critical section for our kind of RCU. */
-static void handle_tx(struct vhost_net *net)
+static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
+ size_t hdr_size, int out)
+{
+ /* Skip header. TODO: support TSO. */
+ size_t len = iov_length(vq->iov, out);
+
+ iov_iter_init(iter, WRITE, vq->iov, out, len);
+ iov_iter_advance(iter, hdr_size);
+
+ return iov_iter_count(iter);
+}
+
+static bool vhost_exceeds_weight(int pkts, int total_len)
+{
+ return total_len >= VHOST_NET_WEIGHT ||
+ pkts >= VHOST_NET_PKT_WEIGHT;
+}
+
+static int get_tx_bufs(struct vhost_net *net,
+ struct vhost_net_virtqueue *nvq,
+ struct msghdr *msg,
+ unsigned int *out, unsigned int *in,
+ size_t *len, bool *busyloop_intr)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ int ret;
+
+ ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
+
+ if (ret < 0 || ret == vq->num)
+ return ret;
+
+ if (*in) {
+ vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
+ *out, *in);
+ return -EFAULT;
+ }
+
+ /* Sanity check */
+ *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
+ if (*len == 0) {
+ vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
+ *len, nvq->vhost_hlen);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
+{
+ return total_len < VHOST_NET_WEIGHT &&
+ !vhost_vq_avail_empty(vq->dev, vq);
+}
+
+static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
@@ -480,67 +555,103 @@ static void handle_tx(struct vhost_net *net)
};
size_t len, total_len = 0;
int err;
- size_t hdr_size;
- struct socket *sock;
- struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
- bool zcopy, zcopy_used;
int sent_pkts = 0;
- mutex_lock(&vq->mutex);
- sock = vq->private_data;
- if (!sock)
- goto out;
+ for (;;) {
+ bool busyloop_intr = false;
- if (!vq_iotlb_prefetch(vq))
- goto out;
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+ &busyloop_intr);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev,
+ vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ continue;
+ }
+ break;
+ }
- vhost_disable_notify(&net->dev, vq);
- vhost_net_disable_vq(net, vq);
+ vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
+ vq->heads[nvq->done_idx].len = 0;
- hdr_size = nvq->vhost_hlen;
- zcopy = nvq->ubufs;
+ total_len += len;
+ if (tx_can_batch(vq, total_len))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags &= ~MSG_MORE;
+
+ /* TODO: Check specific error and bomb out unless ENOBUFS? */
+ err = sock->ops->sendmsg(sock, &msg, len);
+ if (unlikely(err < 0)) {
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+ if (err != len)
+ pr_debug("Truncated TX packet: len %d != %zd\n",
+ err, len);
+ if (++nvq->done_idx >= VHOST_NET_BATCH)
+ vhost_net_signal_used(nvq);
+ if (vhost_exceeds_weight(++sent_pkts, total_len)) {
+ vhost_poll_queue(&vq->poll);
+ break;
+ }
+ }
+
+ vhost_net_signal_used(nvq);
+}
+
+static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ unsigned out, in;
+ int head;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = MSG_DONTWAIT,
+ };
+ size_t len, total_len = 0;
+ int err;
+ struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
+ bool zcopy_used;
+ int sent_pkts = 0;
for (;;) {
- /* Release DMAs done buffers first */
- if (zcopy)
- vhost_zerocopy_signal_used(net, vq);
+ bool busyloop_intr;
+ /* Release DMAs done buffers first */
+ vhost_zerocopy_signal_used(net, vq);
- head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
- ARRAY_SIZE(vq->iov),
- &out, &in);
+ busyloop_intr = false;
+ head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
+ &busyloop_intr);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
}
break;
}
- if (in) {
- vq_err(vq, "Unexpected descriptor format for TX: "
- "out %d, int %d\n", out, in);
- break;
- }
- /* Skip header. TODO: support TSO. */
- len = iov_length(vq->iov, out);
- iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
- iov_iter_advance(&msg.msg_iter, hdr_size);
- /* Sanity check */
- if (!msg_data_left(&msg)) {
- vq_err(vq, "Unexpected header len for TX: "
- "%zd expected %zd\n",
- len, hdr_size);
- break;
- }
- len = msg_data_left(&msg);
- zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
- && !vhost_exceeds_maxpend(net)
- && vhost_net_tx_select_zcopy(net);
+ zcopy_used = len >= VHOST_GOODCOPY_LEN
+ && !vhost_exceeds_maxpend(net)
+ && vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
@@ -562,10 +673,8 @@ static void handle_tx(struct vhost_net *net)
msg.msg_control = NULL;
ubufs = NULL;
}
-
total_len += len;
- if (total_len < VHOST_NET_WEIGHT &&
- !vhost_vq_avail_empty(&net->dev, vq) &&
+ if (tx_can_batch(vq, total_len) &&
likely(!vhost_exceeds_maxpend(net))) {
msg.msg_flags |= MSG_MORE;
} else {
@@ -592,12 +701,37 @@ static void handle_tx(struct vhost_net *net)
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
- if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
- unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT)) {
+ if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) {
vhost_poll_queue(&vq->poll);
break;
}
}
+}
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_tx(struct vhost_net *net)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct socket *sock;
+
+ mutex_lock(&vq->mutex);
+ sock = vq->private_data;
+ if (!sock)
+ goto out;
+
+ if (!vq_iotlb_prefetch(vq))
+ goto out;
+
+ vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
+
+ if (vhost_sock_zcopy(sock))
+ handle_tx_zerocopy(net, sock);
+ else
+ handle_tx_copy(net, sock);
+
out:
mutex_unlock(&vq->mutex);
}
@@ -633,53 +767,50 @@ static int sk_has_rx_data(struct sock *sk)
return skb_queue_empty(&sk->sk_receive_queue);
}
-static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
+static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
+ bool *busyloop_intr)
{
- struct vhost_virtqueue *vq = &nvq->vq;
- struct vhost_dev *dev = vq->dev;
-
- if (!nvq->done_idx)
- return;
-
- vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
- nvq->done_idx = 0;
-}
-
-static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
-{
- struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
- struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
- struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *rvq = &rnvq->vq;
+ struct vhost_virtqueue *tvq = &tnvq->vq;
unsigned long uninitialized_var(endtime);
- int len = peek_head_len(rvq, sk);
+ int len = peek_head_len(rnvq, sk);
- if (!len && vq->busyloop_timeout) {
+ if (!len && tvq->busyloop_timeout) {
/* Flush batched heads first */
- vhost_rx_signal_used(rvq);
+ vhost_net_signal_used(rnvq);
/* Both tx vq and rx socket were polled here */
- mutex_lock_nested(&vq->mutex, 1);
- vhost_disable_notify(&net->dev, vq);
+ mutex_lock_nested(&tvq->mutex, 1);
+ vhost_disable_notify(&net->dev, tvq);
preempt_disable();
- endtime = busy_clock() + vq->busyloop_timeout;
+ endtime = busy_clock() + tvq->busyloop_timeout;
- while (vhost_can_busy_poll(&net->dev, endtime) &&
- !sk_has_rx_data(sk) &&
- vhost_vq_avail_empty(&net->dev, vq))
+ while (vhost_can_busy_poll(endtime)) {
+ if (vhost_has_work(&net->dev)) {
+ *busyloop_intr = true;
+ break;
+ }
+ if ((sk_has_rx_data(sk) &&
+ !vhost_vq_avail_empty(&net->dev, rvq)) ||
+ !vhost_vq_avail_empty(&net->dev, tvq))
+ break;
cpu_relax();
+ }
preempt_enable();
- if (!vhost_vq_avail_empty(&net->dev, vq))
- vhost_poll_queue(&vq->poll);
- else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
- vhost_disable_notify(&net->dev, vq);
- vhost_poll_queue(&vq->poll);
+ if (!vhost_vq_avail_empty(&net->dev, tvq)) {
+ vhost_poll_queue(&tvq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
+ vhost_disable_notify(&net->dev, tvq);
+ vhost_poll_queue(&tvq->poll);
}
- mutex_unlock(&vq->mutex);
+ mutex_unlock(&tvq->mutex);
- len = peek_head_len(rvq, sk);
+ len = peek_head_len(rnvq, sk);
}
return len;
@@ -786,6 +917,7 @@ static void handle_rx(struct vhost_net *net)
s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
+ bool busyloop_intr = false;
struct socket *sock;
struct iov_iter fixup;
__virtio16 num_buffers;
@@ -809,7 +941,8 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
- while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
+ while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+ &busyloop_intr))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -820,7 +953,9 @@ static void handle_rx(struct vhost_net *net)
goto out;
/* OK, now we need to know about added descriptors. */
if (!headcount) {
- if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
/* They have slipped one in as we were
* doing that: check again. */
vhost_disable_notify(&net->dev, vq);
@@ -830,6 +965,7 @@ static void handle_rx(struct vhost_net *net)
* they refilled. */
goto out;
}
+ busyloop_intr = false;
if (nvq->rx_ring)
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
@@ -885,20 +1021,22 @@ static void handle_rx(struct vhost_net *net)
goto out;
}
nvq->done_idx += headcount;
- if (nvq->done_idx > VHOST_RX_BATCH)
- vhost_rx_signal_used(nvq);
+ if (nvq->done_idx > VHOST_NET_BATCH)
+ vhost_net_signal_used(nvq);
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
- if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
- unlikely(++recv_pkts >= VHOST_NET_PKT_WEIGHT)) {
+ if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
vhost_poll_queue(&vq->poll);
goto out;
}
}
- vhost_net_enable_vq(net, vq);
+ if (unlikely(busyloop_intr))
+ vhost_poll_queue(&vq->poll);
+ else
+ vhost_net_enable_vq(net, vq);
out:
- vhost_rx_signal_used(nvq);
+ vhost_net_signal_used(nvq);
mutex_unlock(&vq->mutex);
}
@@ -951,7 +1089,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return -ENOMEM;
}
- queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
+ queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
GFP_KERNEL);
if (!queue) {
kfree(vqs);
@@ -1265,6 +1403,21 @@ done:
return err;
}
+static int vhost_net_set_backend_features(struct vhost_net *n, u64 features)
+{
+ int i;
+
+ mutex_lock(&n->dev.mutex);
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+ mutex_lock(&n->vqs[i].vq.mutex);
+ n->vqs[i].vq.acked_backend_features = features;
+ mutex_unlock(&n->vqs[i].vq.mutex);
+ }
+ mutex_unlock(&n->dev.mutex);
+
+ return 0;
+}
+
static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
size_t vhost_hlen, sock_hlen, hdr_len;
@@ -1355,6 +1508,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
if (features & ~VHOST_NET_FEATURES)
return -EOPNOTSUPP;
return vhost_net_set_features(n, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_NET_BACKEND_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_NET_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ return vhost_net_set_backend_features(n, features);
case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n);
case VHOST_SET_OWNER:
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 17fcd3b2e686..76f8d649147b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -46,7 +46,6 @@
#include <linux/virtio_scsi.h>
#include <linux/llist.h>
#include <linux/bitmap.h>
-#include <linux/percpu_ida.h>
#include "vhost.h"
@@ -324,7 +323,7 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
}
vhost_scsi_put_inflight(tv_cmd->inflight);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
@@ -567,7 +566,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
struct se_session *se_sess;
struct scatterlist *sg, *prot_sg;
struct page **pages;
- int tag;
+ int tag, cpu;
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
@@ -576,7 +575,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
}
se_sess = tv_nexus->tvn_se_sess;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0) {
pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
return ERR_PTR(-ENOMEM);
@@ -591,6 +590,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
cmd->tvc_prot_sgl = prot_sg;
cmd->tvc_upages = pages;
cmd->tvc_se_cmd.map_tag = tag;
+ cmd->tvc_se_cmd.map_cpu = cpu;
cmd->tvc_tag = scsi_tag;
cmd->tvc_lun = lun;
cmd->tvc_task_attr = task_attr;
@@ -1738,7 +1738,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
* struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'.
*/
- tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
VHOST_SCSI_DEFAULT_TAGS,
sizeof(struct vhost_scsi_cmd),
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
@@ -1797,7 +1797,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
/*
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1912,9 +1912,7 @@ static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
};
static struct se_portal_group *
-vhost_scsi_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
{
struct vhost_scsi_tport *tport = container_of(wwn,
struct vhost_scsi_tport, tport_wwn);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ed3114556fda..96c1d8400822 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -315,6 +315,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->log_addr = -1ull;
vq->private_data = NULL;
vq->acked_features = 0;
+ vq->acked_backend_features = 0;
vq->log_base = NULL;
vq->error_ctx = NULL;
vq->kick = NULL;
@@ -1027,28 +1028,40 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
struct iov_iter *from)
{
- struct vhost_msg_node node;
- unsigned size = sizeof(struct vhost_msg);
- size_t ret;
- int err;
+ struct vhost_iotlb_msg msg;
+ size_t offset;
+ int type, ret;
- if (iov_iter_count(from) < size)
- return 0;
- ret = copy_from_iter(&node.msg, size, from);
- if (ret != size)
+ ret = copy_from_iter(&type, sizeof(type), from);
+ if (ret != sizeof(type))
goto done;
- switch (node.msg.type) {
+ switch (type) {
case VHOST_IOTLB_MSG:
- err = vhost_process_iotlb_msg(dev, &node.msg.iotlb);
- if (err)
- ret = err;
+ /* There maybe a hole after type for V1 message type,
+ * so skip it here.
+ */
+ offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ offset = sizeof(__u32);
break;
default:
ret = -EINVAL;
- break;
+ goto done;
+ }
+
+ iov_iter_advance(from, offset);
+ ret = copy_from_iter(&msg, sizeof(msg), from);
+ if (ret != sizeof(msg))
+ goto done;
+ if (vhost_process_iotlb_msg(dev, &msg)) {
+ ret = -EFAULT;
+ goto done;
}
+ ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
+ sizeof(struct vhost_msg_v2);
done:
return ret;
}
@@ -1107,13 +1120,28 @@ ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
finish_wait(&dev->wait, &wait);
if (node) {
- ret = copy_to_iter(&node->msg, size, to);
+ struct vhost_iotlb_msg *msg;
+ void *start = &node->msg;
+
+ switch (node->msg.type) {
+ case VHOST_IOTLB_MSG:
+ size = sizeof(node->msg);
+ msg = &node->msg.iotlb;
+ break;
+ case VHOST_IOTLB_MSG_V2:
+ size = sizeof(node->msg_v2);
+ msg = &node->msg_v2.iotlb;
+ break;
+ default:
+ BUG();
+ break;
+ }
- if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
+ ret = copy_to_iter(start, size, to);
+ if (ret != size || msg->type != VHOST_IOTLB_MISS) {
kfree(node);
return ret;
}
-
vhost_enqueue_msg(dev, &dev->pending_list, node);
}
@@ -1126,12 +1154,19 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
struct vhost_dev *dev = vq->dev;
struct vhost_msg_node *node;
struct vhost_iotlb_msg *msg;
+ bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
- node = vhost_new_msg(vq, VHOST_IOTLB_MISS);
+ node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
if (!node)
return -ENOMEM;
- msg = &node->msg.iotlb;
+ if (v2) {
+ node->msg_v2.type = VHOST_IOTLB_MSG_V2;
+ msg = &node->msg_v2.iotlb;
+ } else {
+ msg = &node->msg.iotlb;
+ }
+
msg->type = VHOST_IOTLB_MISS;
msg->iova = iova;
msg->perm = access;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 6c844b90a168..466ef7542291 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -132,6 +132,7 @@ struct vhost_virtqueue {
struct vhost_umem *iotlb;
void *private_data;
u64 acked_features;
+ u64 acked_backend_features;
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
@@ -147,7 +148,10 @@ struct vhost_virtqueue {
};
struct vhost_msg_node {
- struct vhost_msg msg;
+ union {
+ struct vhost_msg msg;
+ struct vhost_msg_v2 msg_v2;
+ };
struct vhost_virtqueue *vq;
struct list_head node;
};
@@ -238,6 +242,11 @@ static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
return vq->acked_features & (1ULL << bit);
}
+static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
+{
+ return vq->acked_backend_features & (1ULL << bit);
+}
+
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
{
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 4110ba7d7ca9..e91edef98633 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -150,6 +150,17 @@ config FRAMEBUFFER_CONSOLE_ROTATION
such that other users of the framebuffer will remain normally
oriented.
+config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+ bool "Framebuffer Console Deferred Takeover"
+ depends on FRAMEBUFFER_CONSOLE=y && DUMMY_CONSOLE=y
+ help
+ If enabled this defers the framebuffer console taking over the
+ console from the dummy console until the first text is displayed on
+ the console. This is useful in combination with the "quiet" kernel
+ commandline option to keep the framebuffer contents initially put up
+ by the firmware in place, rather then replacing the contents with a
+ black screen as soon as fbcon loads.
+
config STI_CONSOLE
bool "STI text console"
depends on PARISC && HAS_IOMEM
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index f2eafe2ed980..0254251fdd79 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -26,6 +26,67 @@
#define DUMMY_ROWS CONFIG_DUMMY_CONSOLE_ROWS
#endif
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+/* These are both protected by the console_lock */
+static RAW_NOTIFIER_HEAD(dummycon_output_nh);
+static bool dummycon_putc_called;
+
+void dummycon_register_output_notifier(struct notifier_block *nb)
+{
+ raw_notifier_chain_register(&dummycon_output_nh, nb);
+
+ if (dummycon_putc_called)
+ nb->notifier_call(nb, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(dummycon_register_output_notifier);
+
+void dummycon_unregister_output_notifier(struct notifier_block *nb)
+{
+ raw_notifier_chain_unregister(&dummycon_output_nh, nb);
+}
+EXPORT_SYMBOL_GPL(dummycon_unregister_output_notifier);
+
+static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+{
+ dummycon_putc_called = true;
+ raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
+}
+
+static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
+ int count, int ypos, int xpos)
+{
+ int i;
+
+ if (!dummycon_putc_called) {
+ /* Ignore erases */
+ for (i = 0 ; i < count; i++) {
+ if (s[i] != vc->vc_video_erase_char)
+ break;
+ }
+ if (i == count)
+ return;
+
+ dummycon_putc_called = true;
+ }
+
+ raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
+}
+
+static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+{
+ /* Redraw, so that we get putc(s) for output done while blanked */
+ return 1;
+}
+#else
+static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
+static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
+ int count, int ypos, int xpos) { }
+static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+{
+ return 0;
+}
+#endif
+
static const char *dummycon_startup(void)
{
return "dummy device";
@@ -44,9 +105,6 @@ static void dummycon_init(struct vc_data *vc, int init)
static void dummycon_deinit(struct vc_data *vc) { }
static void dummycon_clear(struct vc_data *vc, int sy, int sx, int height,
int width) { }
-static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
-static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos) { }
static void dummycon_cursor(struct vc_data *vc, int mode) { }
static bool dummycon_scroll(struct vc_data *vc, unsigned int top,
@@ -61,11 +119,6 @@ static int dummycon_switch(struct vc_data *vc)
return 0;
}
-static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
-{
- return 0;
-}
-
static int dummycon_font_set(struct vc_data *vc, struct console_font *font,
unsigned int flags)
{
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index c910e74d46ff..5fb156bdcf4e 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -129,6 +129,12 @@ static inline void fbcon_map_override(void)
}
#endif /* CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY */
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+static bool deferred_takeover = true;
+#else
+#define deferred_takeover false
+#endif
+
/* font data */
static char fontname[40];
@@ -499,6 +505,12 @@ static int __init fb_console_setup(char *this_opt)
margin_color = simple_strtoul(options, &options, 0);
continue;
}
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+ if (!strcmp(options, "nodefer")) {
+ deferred_takeover = false;
+ continue;
+ }
+#endif
}
return 1;
}
@@ -828,6 +840,8 @@ static int set_con2fb_map(int unit, int newidx, int user)
struct fb_info *oldinfo = NULL;
int found, err = 0;
+ WARN_CONSOLE_UNLOCKED();
+
if (oldidx == newidx)
return 0;
@@ -3044,6 +3058,8 @@ static int fbcon_fb_unbind(int idx)
{
int i, new_idx = -1, ret = 0;
+ WARN_CONSOLE_UNLOCKED();
+
if (!fbcon_has_console_bind)
return 0;
@@ -3094,6 +3110,11 @@ static int fbcon_fb_unregistered(struct fb_info *info)
{
int i, idx;
+ WARN_CONSOLE_UNLOCKED();
+
+ if (deferred_takeover)
+ return 0;
+
idx = info->node;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] == idx)
@@ -3131,6 +3152,16 @@ static int fbcon_fb_unregistered(struct fb_info *info)
static void fbcon_remap_all(int idx)
{
int i;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (deferred_takeover) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map_boot[i] = idx;
+ fbcon_map_override();
+ return;
+ }
+
for (i = first_fb_vc; i <= last_fb_vc; i++)
set_con2fb_map(i, idx, 0);
@@ -3177,9 +3208,16 @@ static int fbcon_fb_registered(struct fb_info *info)
{
int ret = 0, i, idx;
+ WARN_CONSOLE_UNLOCKED();
+
idx = info->node;
fbcon_select_primary(info);
+ if (deferred_takeover) {
+ pr_info("fbcon: Deferring console take-over\n");
+ return 0;
+ }
+
if (info_idx == -1) {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map_boot[i] == idx) {
@@ -3555,8 +3593,46 @@ static int fbcon_init_device(void)
return 0;
}
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+static struct notifier_block fbcon_output_nb;
+
+static int fbcon_output_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int i;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ pr_info("fbcon: Taking over console\n");
+
+ dummycon_unregister_output_notifier(&fbcon_output_nb);
+ deferred_takeover = false;
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
+ for (i = 0; i < FB_MAX; i++) {
+ if (registered_fb[i])
+ fbcon_fb_registered(registered_fb[i]);
+ }
+
+ return NOTIFY_OK;
+}
+
+static void fbcon_register_output_notifier(void)
+{
+ fbcon_output_nb.notifier_call = fbcon_output_notifier;
+ dummycon_register_output_notifier(&fbcon_output_nb);
+}
+#else
+static inline void fbcon_register_output_notifier(void) {}
+#endif
+
static void fbcon_start(void)
{
+ if (deferred_takeover) {
+ fbcon_register_output_notifier();
+ return;
+ }
+
if (num_registered_fb) {
int i;
@@ -3583,6 +3659,13 @@ static void fbcon_exit(void)
if (fbcon_has_exited)
return;
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+ if (deferred_takeover) {
+ dummycon_unregister_output_notifier(&fbcon_output_nb);
+ deferred_takeover = false;
+ }
+#endif
+
kfree((void *)softback_buf);
softback_buf = 0UL;
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 46a4484e3da7..c6f78d27947b 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -20,7 +20,7 @@
#include <drm/drm_connector.h> /* For DRM_MODE_PANEL_ORIENTATION_* */
static bool request_mem_succeeded = false;
-static bool nowc = false;
+static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
static struct fb_var_screeninfo efifb_defined = {
.activate = FB_ACTIVATE_NOW,
@@ -68,8 +68,12 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
static void efifb_destroy(struct fb_info *info)
{
- if (info->screen_base)
- iounmap(info->screen_base);
+ if (info->screen_base) {
+ if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+ iounmap(info->screen_base);
+ else
+ memunmap(info->screen_base);
+ }
if (request_mem_succeeded)
release_mem_region(info->apertures->ranges[0].base,
info->apertures->ranges[0].size);
@@ -104,7 +108,7 @@ static int efifb_setup(char *options)
else if (!strncmp(this_opt, "width:", 6))
screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
else if (!strcmp(this_opt, "nowc"))
- nowc = true;
+ mem_flags &= ~EFI_MEMORY_WC;
}
}
@@ -164,6 +168,7 @@ static int efifb_probe(struct platform_device *dev)
unsigned int size_remap;
unsigned int size_total;
char *option = NULL;
+ efi_memory_desc_t md;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
return -ENODEV;
@@ -272,12 +277,35 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- if (nowc)
- info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
- else
- info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+ if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+ if ((efifb_fix.smem_start + efifb_fix.smem_len) >
+ (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
+ pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
+ efifb_fix.smem_start);
+ err = -EIO;
+ goto err_release_fb;
+ }
+ /*
+ * If the UEFI memory map covers the efifb region, we may only
+ * remap it using the attributes the memory map prescribes.
+ */
+ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+ mem_flags &= md.attribute;
+ }
+ if (mem_flags & EFI_MEMORY_WC)
+ info->screen_base = ioremap_wc(efifb_fix.smem_start,
+ efifb_fix.smem_len);
+ else if (mem_flags & EFI_MEMORY_UC)
+ info->screen_base = ioremap(efifb_fix.smem_start,
+ efifb_fix.smem_len);
+ else if (mem_flags & EFI_MEMORY_WT)
+ info->screen_base = memremap(efifb_fix.smem_start,
+ efifb_fix.smem_len, MEMREMAP_WT);
+ else if (mem_flags & EFI_MEMORY_WB)
+ info->screen_base = memremap(efifb_fix.smem_start,
+ efifb_fix.smem_len, MEMREMAP_WB);
if (!info->screen_base) {
- pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
+ pr_err("efifb: abort, cannot remap video memory 0x%x @ 0x%lx\n",
efifb_fix.smem_len, efifb_fix.smem_start);
err = -EIO;
goto err_release_fb;
@@ -371,7 +399,10 @@ err_fb_dealoc:
err_groups:
sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
err_unmap:
- iounmap(info->screen_base);
+ if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+ iounmap(info->screen_base);
+ else
+ memunmap(info->screen_base);
err_release_fb:
framebuffer_release(info);
err_release_mem:
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index 09e5bb013d28..a5e58a829ea0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -137,8 +137,7 @@ static int dss_initialize_debugfs(void)
static void dss_uninitialize_debugfs(void)
{
- if (dss_debugfs_dir)
- debugfs_remove_recursive(dss_debugfs_dir);
+ debugfs_remove_recursive(dss_debugfs_dir);
}
int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
index 8fc843b56b26..e8d428bc47e3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
@@ -891,8 +891,7 @@ bool dss_has_feature(enum dss_feat_id id)
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
{
- if (id >= omap_current_dss_features->num_reg_fields)
- BUG();
+ BUG_ON(id >= omap_current_dss_features->num_reg_fields);
*start = omap_current_dss_features->reg_fields[id].start;
*end = omap_current_dss_features->reg_fields[id].end;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index e08e5664e330..3e7887c53d7c 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -287,7 +287,7 @@ static bool cmp_var_to_colormode(struct fb_var_screeninfo *var,
var->red.length == 0 ||
var->blue.length == 0 ||
var->green.length == 0)
- return 0;
+ return false;
return var->bits_per_pixel == color->bits_per_pixel &&
cmp_component(&var->red, &color->red) &&
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 705aebd74e56..465a6f5142cc 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -421,7 +421,7 @@ const char *vp_bus_name(struct virtio_device *vdev)
* - OR over all affinities for shared MSI
* - ignore the affinity request if we're using INTX
*/
-int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
+int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
{
struct virtio_device *vdev = vq->vdev;
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -435,11 +435,10 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
if (vp_dev->msix_enabled) {
mask = vp_dev->msix_affinity_masks[info->msix_vector];
irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
- if (cpu == -1)
+ if (!cpu_mask)
irq_set_affinity_hint(irq, NULL);
else {
- cpumask_clear(mask);
- cpumask_set_cpu(cpu, mask);
+ cpumask_copy(mask, cpu_mask);
irq_set_affinity_hint(irq, mask);
}
}
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 135ee3cf7175..02271002c2f3 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -141,7 +141,7 @@ const char *vp_bus_name(struct virtio_device *vdev);
* - OR over all affinities for shared MSI
* - ignore the affinity request if we're using INTX
*/
-int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
+int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask);
const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index);
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 1708b2300c7a..00827d2897b5 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -49,7 +49,7 @@ config W1_MASTER_DS1WM
config W1_MASTER_GPIO
tristate "GPIO 1-wire busmaster"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you want to communicate with your 1-wire devices using
GPIO pins. This driver uses the GPIO API to control the wire.
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index e5d0c28372ea..b459edfacff3 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -152,6 +152,16 @@ config XEN_GNTDEV
help
Allows userspace processes to use grants.
+config XEN_GNTDEV_DMABUF
+ bool "Add support for dma-buf grant access device driver extension"
+ depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER
+ help
+ Allows userspace processes and kernel modules to use Xen backed
+ dma-buf implementation. With this extension grant references to
+ the pages of an imported dma-buf can be exported for other domain
+ use and grant references coming from a foreign domain can be
+ converted into a local dma-buf for local export.
+
config XEN_GRANT_DEV_ALLOC
tristate "User-space grant reference allocator driver"
depends on XEN
@@ -161,6 +171,20 @@ config XEN_GRANT_DEV_ALLOC
to other domains. This can be used to implement frontend drivers
or as part of an inter-domain shared memory channel.
+config XEN_GRANT_DMA_ALLOC
+ bool "Allow allocating DMA capable buffers with grant reference module"
+ depends on XEN && HAS_DMA
+ help
+ Extends grant table module API to allow allocating DMA capable
+ buffers and mapping foreign grant references on top of it.
+ The resulting buffer is similar to one allocated by the balloon
+ driver in that proper memory reservation is made by
+ ({increase|decrease}_reservation and VA mappings are updated if
+ needed).
+ This is useful for sharing foreign buffers with HW drivers which
+ cannot work with scattered buffers provided by the balloon driver,
+ but require DMAable memory instead.
+
config SWIOTLB_XEN
def_bool y
select SWIOTLB
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 48b154276179..3e542f60f29f 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
+obj-y += mem-reservation.o
obj-y += events/
obj-y += xenbus/
@@ -40,5 +41,6 @@ obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o
obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
+xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o
xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 065f0b607373..e12bb256036f 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -71,6 +71,7 @@
#include <xen/balloon.h>
#include <xen/features.h>
#include <xen/page.h>
+#include <xen/mem-reservation.h>
static int xen_hotplug_unpopulated;
@@ -157,13 +158,6 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
#define GFP_BALLOON \
(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
-static void scrub_page(struct page *page)
-{
-#ifdef CONFIG_XEN_SCRUB_PAGES
- clear_highpage(page);
-#endif
-}
-
/* balloon_append: add the given page to the balloon. */
static void __balloon_append(struct page *page)
{
@@ -463,11 +457,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
int rc;
unsigned long i;
struct page *page;
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = EXTENT_ORDER,
- .domid = DOMID_SELF
- };
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
@@ -479,16 +468,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
break;
}
- /* XENMEM_populate_physmap requires a PFN based on Xen
- * granularity.
- */
frame_list[i] = page_to_xen_pfn(page);
page = balloon_next_page(page);
}
- set_xen_guest_handle(reservation.extent_start, frame_list);
- reservation.nr_extents = nr_pages;
- rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+ rc = xenmem_reservation_increase(nr_pages, frame_list);
if (rc <= 0)
return BP_EAGAIN;
@@ -496,29 +480,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
page = balloon_retrieve(false);
BUG_ON(page == NULL);
-#ifdef CONFIG_XEN_HAVE_PVMMU
- /*
- * We don't support PV MMU when Linux and Xen is using
- * different page granularity.
- */
- BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
-
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- unsigned long pfn = page_to_pfn(page);
-
- set_phys_to_machine(pfn, frame_list[i]);
-
- /* Link back into the page tables if not highmem. */
- if (!PageHighMem(page)) {
- int ret;
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- mfn_pte(frame_list[i], PAGE_KERNEL),
- 0);
- BUG_ON(ret);
- }
- }
-#endif
+ xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
/* Relinquish the page back to the allocator. */
free_reserved_page(page);
@@ -535,11 +497,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
unsigned long i;
struct page *page, *tmp;
int ret;
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = EXTENT_ORDER,
- .domid = DOMID_SELF
- };
LIST_HEAD(pages);
if (nr_pages > ARRAY_SIZE(frame_list))
@@ -553,7 +510,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
break;
}
adjust_managed_page_count(page, -1);
- scrub_page(page);
+ xenmem_reservation_scrub_page(page);
list_add(&page->lru, &pages);
}
@@ -572,28 +529,10 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
*/
i = 0;
list_for_each_entry_safe(page, tmp, &pages, lru) {
- /* XENMEM_decrease_reservation requires a GFN */
frame_list[i++] = xen_page_to_gfn(page);
-#ifdef CONFIG_XEN_HAVE_PVMMU
- /*
- * We don't support PV MMU when Linux and Xen is using
- * different page granularity.
- */
- BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
-
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- unsigned long pfn = page_to_pfn(page);
+ xenmem_reservation_va_mapping_reset(1, &page);
- if (!PageHighMem(page)) {
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- __pte_ma(0), 0);
- BUG_ON(ret);
- }
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
- }
-#endif
list_del(&page->lru);
balloon_append(page);
@@ -601,9 +540,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
flush_tlb_all();
- set_xen_guest_handle(reservation.extent_start, frame_list);
- reservation.nr_extents = nr_pages;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+ ret = xenmem_reservation_decrease(nr_pages, frame_list);
BUG_ON(ret != nr_pages);
balloon_stats.current_pages -= nr_pages;
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 30d7f52eb7ca..55ed80c3a17c 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -17,7 +17,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
* XXX: Add support for merging bio_vec when using different page
* size in Xen and Linux.
*/
- return 0;
+ return false;
#endif
}
EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
new file mode 100644
index 000000000000..2f8b949c3eeb
--- /dev/null
+++ b/drivers/xen/gntdev-common.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Common functionality of grant device.
+ *
+ * Copyright (c) 2006-2007, D G Murray.
+ * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#ifndef _GNTDEV_COMMON_H
+#define _GNTDEV_COMMON_H
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/mmu_notifier.h>
+#include <linux/types.h>
+
+struct gntdev_dmabuf_priv;
+
+struct gntdev_priv {
+ /* Maps with visible offsets in the file descriptor. */
+ struct list_head maps;
+ /*
+ * Maps that are not visible; will be freed on munmap.
+ * Only populated if populate_freeable_maps == 1
+ */
+ struct list_head freeable_maps;
+ /* lock protects maps and freeable_maps. */
+ struct mutex lock;
+ struct mm_struct *mm;
+ struct mmu_notifier mn;
+
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ /* Device for which DMA memory is allocated. */
+ struct device *dma_dev;
+#endif
+
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ struct gntdev_dmabuf_priv *dmabuf_priv;
+#endif
+};
+
+struct gntdev_unmap_notify {
+ int flags;
+ /* Address relative to the start of the gntdev_grant_map. */
+ int addr;
+ int event;
+};
+
+struct gntdev_grant_map {
+ struct list_head next;
+ struct vm_area_struct *vma;
+ int index;
+ int count;
+ int flags;
+ refcount_t users;
+ struct gntdev_unmap_notify notify;
+ struct ioctl_gntdev_grant_ref *grants;
+ struct gnttab_map_grant_ref *map_ops;
+ struct gnttab_unmap_grant_ref *unmap_ops;
+ struct gnttab_map_grant_ref *kmap_ops;
+ struct gnttab_unmap_grant_ref *kunmap_ops;
+ struct page **pages;
+ unsigned long pages_vm_start;
+
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ /*
+ * If dmabuf_vaddr is not NULL then this mapping is backed by DMA
+ * capable memory.
+ */
+
+ struct device *dma_dev;
+ /* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */
+ int dma_flags;
+ void *dma_vaddr;
+ dma_addr_t dma_bus_addr;
+ /* Needed to avoid allocation in gnttab_dma_free_pages(). */
+ xen_pfn_t *frames;
+#endif
+};
+
+struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
+ int dma_flags);
+
+void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
+
+void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
+
+bool gntdev_account_mapped_pages(int count);
+
+int gntdev_map_grant_pages(struct gntdev_grant_map *map);
+
+#endif
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
new file mode 100644
index 000000000000..cba6b586bfbd
--- /dev/null
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Xen dma-buf functionality for gntdev.
+ *
+ * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
+ *
+ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <xen/xen.h>
+#include <xen/grant_table.h>
+
+#include "gntdev-common.h"
+#include "gntdev-dmabuf.h"
+
+#ifndef GRANT_INVALID_REF
+/*
+ * Note on usage of grant reference 0 as invalid grant reference:
+ * grant reference 0 is valid, but never exposed to a driver,
+ * because of the fact it is already in use/reserved by the PV console.
+ */
+#define GRANT_INVALID_REF 0
+#endif
+
+struct gntdev_dmabuf {
+ struct gntdev_dmabuf_priv *priv;
+ struct dma_buf *dmabuf;
+ struct list_head next;
+ int fd;
+
+ union {
+ struct {
+ /* Exported buffers are reference counted. */
+ struct kref refcount;
+
+ struct gntdev_priv *priv;
+ struct gntdev_grant_map *map;
+ } exp;
+ struct {
+ /* Granted references of the imported buffer. */
+ grant_ref_t *refs;
+ /* Scatter-gather table of the imported buffer. */
+ struct sg_table *sgt;
+ /* dma-buf attachment of the imported buffer. */
+ struct dma_buf_attachment *attach;
+ } imp;
+ } u;
+
+ /* Number of pages this buffer has. */
+ int nr_pages;
+ /* Pages of this buffer. */
+ struct page **pages;
+};
+
+struct gntdev_dmabuf_wait_obj {
+ struct list_head next;
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ struct completion completion;
+};
+
+struct gntdev_dmabuf_attachment {
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+};
+
+struct gntdev_dmabuf_priv {
+ /* List of exported DMA buffers. */
+ struct list_head exp_list;
+ /* List of wait objects. */
+ struct list_head exp_wait_list;
+ /* List of imported DMA buffers. */
+ struct list_head imp_list;
+ /* This is the lock which protects dma_buf_xxx lists. */
+ struct mutex lock;
+};
+
+/* DMA buffer export support. */
+
+/* Implementation of wait for exported DMA buffer to be released. */
+
+static void dmabuf_exp_release(struct kref *kref);
+
+static struct gntdev_dmabuf_wait_obj *
+dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
+ struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ struct gntdev_dmabuf_wait_obj *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ init_completion(&obj->completion);
+ obj->gntdev_dmabuf = gntdev_dmabuf;
+
+ mutex_lock(&priv->lock);
+ list_add(&obj->next, &priv->exp_wait_list);
+ /* Put our reference and wait for gntdev_dmabuf's release to fire. */
+ kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
+ mutex_unlock(&priv->lock);
+ return obj;
+}
+
+static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
+ struct gntdev_dmabuf_wait_obj *obj)
+{
+ mutex_lock(&priv->lock);
+ list_del(&obj->next);
+ mutex_unlock(&priv->lock);
+ kfree(obj);
+}
+
+static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
+ u32 wait_to_ms)
+{
+ if (wait_for_completion_timeout(&obj->completion,
+ msecs_to_jiffies(wait_to_ms)) <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
+ struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ struct gntdev_dmabuf_wait_obj *obj;
+
+ list_for_each_entry(obj, &priv->exp_wait_list, next)
+ if (obj->gntdev_dmabuf == gntdev_dmabuf) {
+ pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
+ complete_all(&obj->completion);
+ break;
+ }
+}
+
+static struct gntdev_dmabuf *
+dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
+
+ mutex_lock(&priv->lock);
+ list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
+ if (gntdev_dmabuf->fd == fd) {
+ pr_debug("Found gntdev_dmabuf in the wait list\n");
+ kref_get(&gntdev_dmabuf->u.exp.refcount);
+ ret = gntdev_dmabuf;
+ break;
+ }
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
+ int wait_to_ms)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ struct gntdev_dmabuf_wait_obj *obj;
+ int ret;
+
+ pr_debug("Will wait for dma-buf with fd %d\n", fd);
+ /*
+ * Try to find the DMA buffer: if not found means that
+ * either the buffer has already been released or file descriptor
+ * provided is wrong.
+ */
+ gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
+ if (IS_ERR(gntdev_dmabuf))
+ return PTR_ERR(gntdev_dmabuf);
+
+ /*
+ * gntdev_dmabuf still exists and is reference count locked by us now,
+ * so prepare to wait: allocate wait object and add it to the wait list,
+ * so we can find it on release.
+ */
+ obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
+ dmabuf_exp_wait_obj_free(priv, obj);
+ return ret;
+}
+
+/* DMA buffer export support. */
+
+static struct sg_table *
+dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
+{
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ return sgt;
+
+out:
+ kfree(sgt);
+ return ERR_PTR(ret);
+}
+
+static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
+
+ gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
+ GFP_KERNEL);
+ if (!gntdev_dmabuf_attach)
+ return -ENOMEM;
+
+ gntdev_dmabuf_attach->dir = DMA_NONE;
+ attach->priv = gntdev_dmabuf_attach;
+ return 0;
+}
+
+static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
+
+ if (gntdev_dmabuf_attach) {
+ struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
+
+ if (sgt) {
+ if (gntdev_dmabuf_attach->dir != DMA_NONE)
+ dma_unmap_sg_attrs(attach->dev, sgt->sgl,
+ sgt->nents,
+ gntdev_dmabuf_attach->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ }
+
+ kfree(sgt);
+ kfree(gntdev_dmabuf_attach);
+ attach->priv = NULL;
+ }
+}
+
+static struct sg_table *
+dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
+ struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
+ struct sg_table *sgt;
+
+ pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
+ attach->dev);
+
+ if (dir == DMA_NONE || !gntdev_dmabuf_attach)
+ return ERR_PTR(-EINVAL);
+
+ /* Return the cached mapping when possible. */
+ if (gntdev_dmabuf_attach->dir == dir)
+ return gntdev_dmabuf_attach->sgt;
+
+ /*
+ * Two mappings with different directions for the same attachment are
+ * not allowed.
+ */
+ if (gntdev_dmabuf_attach->dir != DMA_NONE)
+ return ERR_PTR(-EBUSY);
+
+ sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
+ gntdev_dmabuf->nr_pages);
+ if (!IS_ERR(sgt)) {
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = ERR_PTR(-ENOMEM);
+ } else {
+ gntdev_dmabuf_attach->sgt = sgt;
+ gntdev_dmabuf_attach->dir = dir;
+ }
+ }
+ if (IS_ERR(sgt))
+ pr_debug("Failed to map sg table for dev %p\n", attach->dev);
+ return sgt;
+}
+
+static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
+}
+
+static void dmabuf_exp_release(struct kref *kref)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf =
+ container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
+
+ dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
+ list_del(&gntdev_dmabuf->next);
+ kfree(gntdev_dmabuf);
+}
+
+static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
+ struct gntdev_grant_map *map)
+{
+ mutex_lock(&priv->lock);
+ list_del(&map->next);
+ gntdev_put_map(NULL /* already removed */, map);
+ mutex_unlock(&priv->lock);
+}
+
+static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
+ struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
+
+ dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
+ gntdev_dmabuf->u.exp.map);
+ mutex_lock(&priv->lock);
+ kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
+ mutex_unlock(&priv->lock);
+}
+
+static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* Not implemented. */
+ return NULL;
+}
+
+static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+ /* Not implemented. */
+}
+
+static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ /* Not implemented. */
+ return 0;
+}
+
+static const struct dma_buf_ops dmabuf_exp_ops = {
+ .attach = dmabuf_exp_ops_attach,
+ .detach = dmabuf_exp_ops_detach,
+ .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
+ .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
+ .release = dmabuf_exp_ops_release,
+ .map = dmabuf_exp_ops_kmap,
+ .unmap = dmabuf_exp_ops_kunmap,
+ .mmap = dmabuf_exp_ops_mmap,
+};
+
+struct gntdev_dmabuf_export_args {
+ struct gntdev_priv *priv;
+ struct gntdev_grant_map *map;
+ struct gntdev_dmabuf_priv *dmabuf_priv;
+ struct device *dev;
+ int count;
+ struct page **pages;
+ u32 fd;
+};
+
+static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ int ret;
+
+ gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
+ if (!gntdev_dmabuf)
+ return -ENOMEM;
+
+ kref_init(&gntdev_dmabuf->u.exp.refcount);
+
+ gntdev_dmabuf->priv = args->dmabuf_priv;
+ gntdev_dmabuf->nr_pages = args->count;
+ gntdev_dmabuf->pages = args->pages;
+ gntdev_dmabuf->u.exp.priv = args->priv;
+ gntdev_dmabuf->u.exp.map = args->map;
+
+ exp_info.exp_name = KBUILD_MODNAME;
+ if (args->dev->driver && args->dev->driver->owner)
+ exp_info.owner = args->dev->driver->owner;
+ else
+ exp_info.owner = THIS_MODULE;
+ exp_info.ops = &dmabuf_exp_ops;
+ exp_info.size = args->count << PAGE_SHIFT;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = gntdev_dmabuf;
+
+ gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(gntdev_dmabuf->dmabuf)) {
+ ret = PTR_ERR(gntdev_dmabuf->dmabuf);
+ gntdev_dmabuf->dmabuf = NULL;
+ goto fail;
+ }
+
+ ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
+ if (ret < 0)
+ goto fail;
+
+ gntdev_dmabuf->fd = ret;
+ args->fd = ret;
+
+ pr_debug("Exporting DMA buffer with fd %d\n", ret);
+
+ mutex_lock(&args->dmabuf_priv->lock);
+ list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
+ mutex_unlock(&args->dmabuf_priv->lock);
+ return 0;
+
+fail:
+ if (gntdev_dmabuf->dmabuf)
+ dma_buf_put(gntdev_dmabuf->dmabuf);
+ kfree(gntdev_dmabuf);
+ return ret;
+}
+
+static struct gntdev_grant_map *
+dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
+ int count)
+{
+ struct gntdev_grant_map *map;
+
+ if (unlikely(count <= 0))
+ return ERR_PTR(-EINVAL);
+
+ if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
+ (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
+ pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
+ return ERR_PTR(-EINVAL);
+ }
+
+ map = gntdev_alloc_map(priv, count, dmabuf_flags);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(gntdev_account_mapped_pages(count))) {
+ pr_debug("can't map %d pages: over limit\n", count);
+ gntdev_put_map(NULL, map);
+ return ERR_PTR(-ENOMEM);
+ }
+ return map;
+}
+
+static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
+ int count, u32 domid, u32 *refs, u32 *fd)
+{
+ struct gntdev_grant_map *map;
+ struct gntdev_dmabuf_export_args args;
+ int i, ret;
+
+ map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ for (i = 0; i < count; i++) {
+ map->grants[i].domid = domid;
+ map->grants[i].ref = refs[i];
+ }
+
+ mutex_lock(&priv->lock);
+ gntdev_add_map(priv, map);
+ mutex_unlock(&priv->lock);
+
+ map->flags |= GNTMAP_host_map;
+#if defined(CONFIG_X86)
+ map->flags |= GNTMAP_device_map;
+#endif
+
+ ret = gntdev_map_grant_pages(map);
+ if (ret < 0)
+ goto out;
+
+ args.priv = priv;
+ args.map = map;
+ args.dev = priv->dma_dev;
+ args.dmabuf_priv = priv->dmabuf_priv;
+ args.count = map->count;
+ args.pages = map->pages;
+ args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
+
+ ret = dmabuf_exp_from_pages(&args);
+ if (ret < 0)
+ goto out;
+
+ *fd = args.fd;
+ return 0;
+
+out:
+ dmabuf_exp_remove_map(priv, map);
+ return ret;
+}
+
+/* DMA buffer import support. */
+
+static int
+dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
+ int count, int domid)
+{
+ grant_ref_t priv_gref_head;
+ int i, ret;
+
+ ret = gnttab_alloc_grant_references(count, &priv_gref_head);
+ if (ret < 0) {
+ pr_debug("Cannot allocate grant references, ret %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ int cur_ref;
+
+ cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
+ if (cur_ref < 0) {
+ ret = cur_ref;
+ pr_debug("Cannot claim grant reference, ret %d\n", ret);
+ goto out;
+ }
+
+ gnttab_grant_foreign_access_ref(cur_ref, domid,
+ xen_page_to_gfn(pages[i]), 0);
+ refs[i] = cur_ref;
+ }
+
+ return 0;
+
+out:
+ gnttab_free_grant_references(priv_gref_head);
+ return ret;
+}
+
+static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (refs[i] != GRANT_INVALID_REF)
+ gnttab_end_foreign_access(refs[i], 0, 0UL);
+}
+
+static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
+{
+ kfree(gntdev_dmabuf->pages);
+ kfree(gntdev_dmabuf->u.imp.refs);
+ kfree(gntdev_dmabuf);
+}
+
+static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ int i;
+
+ gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
+ if (!gntdev_dmabuf)
+ goto fail_no_free;
+
+ gntdev_dmabuf->u.imp.refs = kcalloc(count,
+ sizeof(gntdev_dmabuf->u.imp.refs[0]),
+ GFP_KERNEL);
+ if (!gntdev_dmabuf->u.imp.refs)
+ goto fail;
+
+ gntdev_dmabuf->pages = kcalloc(count,
+ sizeof(gntdev_dmabuf->pages[0]),
+ GFP_KERNEL);
+ if (!gntdev_dmabuf->pages)
+ goto fail;
+
+ gntdev_dmabuf->nr_pages = count;
+
+ for (i = 0; i < count; i++)
+ gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
+
+ return gntdev_dmabuf;
+
+fail:
+ dmabuf_imp_free_storage(gntdev_dmabuf);
+fail_no_free:
+ return ERR_PTR(-ENOMEM);
+}
+
+static struct gntdev_dmabuf *
+dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
+ int fd, int count, int domid)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf, *ret;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct sg_page_iter sg_iter;
+ int i;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
+ if (IS_ERR(gntdev_dmabuf)) {
+ ret = gntdev_dmabuf;
+ goto fail_put;
+ }
+
+ gntdev_dmabuf->priv = priv;
+ gntdev_dmabuf->fd = fd;
+
+ attach = dma_buf_attach(dma_buf, dev);
+ if (IS_ERR(attach)) {
+ ret = ERR_CAST(attach);
+ goto fail_free_obj;
+ }
+
+ gntdev_dmabuf->u.imp.attach = attach;
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = ERR_CAST(sgt);
+ goto fail_detach;
+ }
+
+ /* Check number of pages that imported buffer has. */
+ if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
+ ret = ERR_PTR(-EINVAL);
+ pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
+ attach->dmabuf->size, gntdev_dmabuf->nr_pages);
+ goto fail_unmap;
+ }
+
+ gntdev_dmabuf->u.imp.sgt = sgt;
+
+ /* Now convert sgt to array of pages and check for page validity. */
+ i = 0;
+ for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ /*
+ * Check if page is valid: this can happen if we are given
+ * a page from VRAM or other resources which are not backed
+ * by a struct page.
+ */
+ if (!pfn_valid(page_to_pfn(page))) {
+ ret = ERR_PTR(-EINVAL);
+ goto fail_unmap;
+ }
+
+ gntdev_dmabuf->pages[i++] = page;
+ }
+
+ ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
+ gntdev_dmabuf->u.imp.refs,
+ count, domid));
+ if (IS_ERR(ret))
+ goto fail_end_access;
+
+ pr_debug("Imported DMA buffer with fd %d\n", fd);
+
+ mutex_lock(&priv->lock);
+ list_add(&gntdev_dmabuf->next, &priv->imp_list);
+ mutex_unlock(&priv->lock);
+
+ return gntdev_dmabuf;
+
+fail_end_access:
+ dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+fail_free_obj:
+ dmabuf_imp_free_storage(gntdev_dmabuf);
+fail_put:
+ dma_buf_put(dma_buf);
+ return ret;
+}
+
+/*
+ * Find the hyper dma-buf by its file descriptor and remove
+ * it from the buffer's list.
+ */
+static struct gntdev_dmabuf *
+dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
+{
+ struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
+
+ mutex_lock(&priv->lock);
+ list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
+ if (gntdev_dmabuf->fd == fd) {
+ pr_debug("Found gntdev_dmabuf in the import list\n");
+ ret = gntdev_dmabuf;
+ list_del(&gntdev_dmabuf->next);
+ break;
+ }
+ }
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
+{
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dma_buf;
+
+ gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
+ if (IS_ERR(gntdev_dmabuf))
+ return PTR_ERR(gntdev_dmabuf);
+
+ pr_debug("Releasing DMA buffer with fd %d\n", fd);
+
+ dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
+ gntdev_dmabuf->nr_pages);
+
+ attach = gntdev_dmabuf->u.imp.attach;
+
+ if (gntdev_dmabuf->u.imp.sgt)
+ dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
+ DMA_BIDIRECTIONAL);
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(attach->dmabuf, attach);
+ dma_buf_put(dma_buf);
+
+ dmabuf_imp_free_storage(gntdev_dmabuf);
+ return 0;
+}
+
+/* DMA buffer IOCTL support. */
+
+long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
+ struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
+{
+ struct ioctl_gntdev_dmabuf_exp_from_refs op;
+ u32 *refs;
+ long ret;
+
+ if (use_ptemod) {
+ pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
+ use_ptemod);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ if (unlikely(op.count <= 0))
+ return -EINVAL;
+
+ refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
+ if (!refs)
+ return -ENOMEM;
+
+ if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
+ op.domid, refs, &op.fd);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ ret = -EFAULT;
+
+out:
+ kfree(refs);
+ return ret;
+}
+
+long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
+{
+ struct ioctl_gntdev_dmabuf_exp_wait_released op;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
+ op.wait_to_ms);
+}
+
+long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
+{
+ struct ioctl_gntdev_dmabuf_imp_to_refs op;
+ struct gntdev_dmabuf *gntdev_dmabuf;
+ long ret;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ if (unlikely(op.count <= 0))
+ return -EINVAL;
+
+ gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
+ priv->dma_dev, op.fd,
+ op.count, op.domid);
+ if (IS_ERR(gntdev_dmabuf))
+ return PTR_ERR(gntdev_dmabuf);
+
+ if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
+ sizeof(*u->refs) * op.count) != 0) {
+ ret = -EFAULT;
+ goto out_release;
+ }
+ return 0;
+
+out_release:
+ dmabuf_imp_release(priv->dmabuf_priv, op.fd);
+ return ret;
+}
+
+long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_imp_release __user *u)
+{
+ struct ioctl_gntdev_dmabuf_imp_release op;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+
+ return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
+}
+
+struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
+{
+ struct gntdev_dmabuf_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->exp_list);
+ INIT_LIST_HEAD(&priv->exp_wait_list);
+ INIT_LIST_HEAD(&priv->imp_list);
+
+ return priv;
+}
+
+void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
+{
+ kfree(priv);
+}
diff --git a/drivers/xen/gntdev-dmabuf.h b/drivers/xen/gntdev-dmabuf.h
new file mode 100644
index 000000000000..7220a53d0fc5
--- /dev/null
+++ b/drivers/xen/gntdev-dmabuf.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Xen dma-buf functionality for gntdev.
+ *
+ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#ifndef _GNTDEV_DMABUF_H
+#define _GNTDEV_DMABUF_H
+
+#include <xen/gntdev.h>
+
+struct gntdev_dmabuf_priv;
+struct gntdev_priv;
+
+struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void);
+
+void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
+
+long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
+ struct ioctl_gntdev_dmabuf_exp_from_refs __user *u);
+
+long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_exp_wait_released __user *u);
+
+long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_imp_to_refs __user *u);
+
+long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
+ struct ioctl_gntdev_dmabuf_imp_release __user *u);
+
+#endif
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index bd56653b9bbc..c866a62f766d 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -6,6 +6,7 @@
*
* Copyright (c) 2006-2007, D G Murray.
* (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -26,10 +27,6 @@
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/mmu_notifier.h>
-#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -37,6 +34,9 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+#include <linux/of_device.h>
+#endif
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -47,6 +47,11 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include "gntdev-common.h"
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+#include "gntdev-dmabuf.h"
+#endif
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
@@ -62,51 +67,23 @@ static atomic_t pages_mapped = ATOMIC_INIT(0);
static int use_ptemod;
#define populate_freeable_maps use_ptemod
-struct gntdev_priv {
- /* maps with visible offsets in the file descriptor */
- struct list_head maps;
- /* maps that are not visible; will be freed on munmap.
- * Only populated if populate_freeable_maps == 1 */
- struct list_head freeable_maps;
- /* lock protects maps and freeable_maps */
- struct mutex lock;
- struct mm_struct *mm;
- struct mmu_notifier mn;
-};
-
-struct unmap_notify {
- int flags;
- /* Address relative to the start of the grant_map */
- int addr;
- int event;
-};
-
-struct grant_map {
- struct list_head next;
- struct vm_area_struct *vma;
- int index;
- int count;
- int flags;
- refcount_t users;
- struct unmap_notify notify;
- struct ioctl_gntdev_grant_ref *grants;
- struct gnttab_map_grant_ref *map_ops;
- struct gnttab_unmap_grant_ref *unmap_ops;
- struct gnttab_map_grant_ref *kmap_ops;
- struct gnttab_unmap_grant_ref *kunmap_ops;
- struct page **pages;
- unsigned long pages_vm_start;
-};
+static int unmap_grant_pages(struct gntdev_grant_map *map,
+ int offset, int pages);
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
+static struct miscdevice gntdev_miscdev;
/* ------------------------------------------------------------------ */
+bool gntdev_account_mapped_pages(int count)
+{
+ return atomic_add_return(count, &pages_mapped) > limit;
+}
+
static void gntdev_print_maps(struct gntdev_priv *priv,
char *text, int text_index)
{
#ifdef DEBUG
- struct grant_map *map;
+ struct gntdev_grant_map *map;
pr_debug("%s: maps list (priv %p)\n", __func__, priv);
list_for_each_entry(map, &priv->maps, next)
@@ -116,13 +93,32 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
#endif
}
-static void gntdev_free_map(struct grant_map *map)
+static void gntdev_free_map(struct gntdev_grant_map *map)
{
if (map == NULL)
return;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ if (map->dma_vaddr) {
+ struct gnttab_dma_alloc_args args;
+
+ args.dev = map->dma_dev;
+ args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
+ args.nr_pages = map->count;
+ args.pages = map->pages;
+ args.frames = map->frames;
+ args.vaddr = map->dma_vaddr;
+ args.dev_bus_addr = map->dma_bus_addr;
+
+ gnttab_dma_free_pages(&args);
+ } else
+#endif
if (map->pages)
gnttab_free_pages(map->count, map->pages);
+
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ kfree(map->frames);
+#endif
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
@@ -132,12 +128,13 @@ static void gntdev_free_map(struct grant_map *map)
kfree(map);
}
-static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
+ int dma_flags)
{
- struct grant_map *add;
+ struct gntdev_grant_map *add;
int i;
- add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
+ add = kzalloc(sizeof(*add), GFP_KERNEL);
if (NULL == add)
return NULL;
@@ -155,6 +152,37 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
NULL == add->pages)
goto err;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ add->dma_flags = dma_flags;
+
+ /*
+ * Check if this mapping is requested to be backed
+ * by a DMA buffer.
+ */
+ if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
+ struct gnttab_dma_alloc_args args;
+
+ add->frames = kcalloc(count, sizeof(add->frames[0]),
+ GFP_KERNEL);
+ if (!add->frames)
+ goto err;
+
+ /* Remember the device, so we can free DMA memory. */
+ add->dma_dev = priv->dma_dev;
+
+ args.dev = priv->dma_dev;
+ args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
+ args.nr_pages = count;
+ args.pages = add->pages;
+ args.frames = add->frames;
+
+ if (gnttab_dma_alloc_pages(&args))
+ goto err;
+
+ add->dma_vaddr = args.vaddr;
+ add->dma_bus_addr = args.dev_bus_addr;
+ } else
+#endif
if (gnttab_alloc_pages(count, add->pages))
goto err;
@@ -176,9 +204,9 @@ err:
return NULL;
}
-static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
+void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
{
- struct grant_map *map;
+ struct gntdev_grant_map *map;
list_for_each_entry(map, &priv->maps, next) {
if (add->index + add->count < map->index) {
@@ -193,10 +221,10 @@ done:
gntdev_print_maps(priv, "[new]", add->index);
}
-static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
- int index, int count)
+static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
+ int index, int count)
{
- struct grant_map *map;
+ struct gntdev_grant_map *map;
list_for_each_entry(map, &priv->maps, next) {
if (map->index != index)
@@ -208,7 +236,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
return NULL;
}
-static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
+void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
{
if (!map)
return;
@@ -239,7 +267,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
static int find_grant_ptes(pte_t *pte, pgtable_t token,
unsigned long addr, void *data)
{
- struct grant_map *map = data;
+ struct gntdev_grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
u64 pte_maddr;
@@ -272,7 +300,7 @@ static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
}
#endif
-static int map_grant_pages(struct grant_map *map)
+int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
int i, err = 0;
@@ -325,11 +353,20 @@ static int map_grant_pages(struct grant_map *map)
map->unmap_ops[i].handle = map->map_ops[i].handle;
if (use_ptemod)
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ else if (map->dma_vaddr) {
+ unsigned long bfn;
+
+ bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
+ map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
+ }
+#endif
}
return err;
}
-static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
int i, err = 0;
struct gntab_unmap_queue_data unmap_data;
@@ -364,7 +401,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
return err;
}
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
int range, err = 0;
@@ -396,7 +434,7 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
static void gntdev_vma_open(struct vm_area_struct *vma)
{
- struct grant_map *map = vma->vm_private_data;
+ struct gntdev_grant_map *map = vma->vm_private_data;
pr_debug("gntdev_vma_open %p\n", vma);
refcount_inc(&map->users);
@@ -404,7 +442,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
static void gntdev_vma_close(struct vm_area_struct *vma)
{
- struct grant_map *map = vma->vm_private_data;
+ struct gntdev_grant_map *map = vma->vm_private_data;
struct file *file = vma->vm_file;
struct gntdev_priv *priv = file->private_data;
@@ -428,7 +466,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
unsigned long addr)
{
- struct grant_map *map = vma->vm_private_data;
+ struct gntdev_grant_map *map = vma->vm_private_data;
return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
}
@@ -441,7 +479,7 @@ static const struct vm_operations_struct gntdev_vmops = {
/* ------------------------------------------------------------------ */
-static void unmap_if_in_range(struct grant_map *map,
+static void unmap_if_in_range(struct gntdev_grant_map *map,
unsigned long start, unsigned long end)
{
unsigned long mstart, mend;
@@ -470,7 +508,7 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
unsigned long start, unsigned long end)
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct grant_map *map;
+ struct gntdev_grant_map *map;
mutex_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
@@ -486,7 +524,7 @@ static void mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int err;
mutex_lock(&priv->lock);
@@ -531,6 +569,15 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->freeable_maps);
mutex_init(&priv->lock);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ priv->dmabuf_priv = gntdev_dmabuf_init();
+ if (IS_ERR(priv->dmabuf_priv)) {
+ ret = PTR_ERR(priv->dmabuf_priv);
+ kfree(priv);
+ return ret;
+ }
+#endif
+
if (use_ptemod) {
priv->mm = get_task_mm(current);
if (!priv->mm) {
@@ -548,6 +595,17 @@ static int gntdev_open(struct inode *inode, struct file *flip)
}
flip->private_data = priv;
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ priv->dma_dev = gntdev_miscdev.this_device;
+
+ /*
+ * The device is not spawn from a device tree, so arch_setup_dma_ops
+ * is not called, thus leaving the device with dummy DMA ops.
+ * Fix this by calling of_dma_configure() with a NULL node to set
+ * default DMA ops.
+ */
+ of_dma_configure(priv->dma_dev, NULL, true);
+#endif
pr_debug("priv %p\n", priv);
return 0;
@@ -556,21 +614,27 @@ static int gntdev_open(struct inode *inode, struct file *flip)
static int gntdev_release(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv = flip->private_data;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
pr_debug("priv %p\n", priv);
mutex_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
- map = list_entry(priv->maps.next, struct grant_map, next);
+ map = list_entry(priv->maps.next,
+ struct gntdev_grant_map, next);
list_del(&map->next);
gntdev_put_map(NULL /* already removed */, map);
}
WARN_ON(!list_empty(&priv->freeable_maps));
mutex_unlock(&priv->lock);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ gntdev_dmabuf_fini(priv->dmabuf_priv);
+#endif
+
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
+
kfree(priv);
return 0;
}
@@ -579,7 +643,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
struct ioctl_gntdev_map_grant_ref __user *u)
{
struct ioctl_gntdev_map_grant_ref op;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int err;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -589,11 +653,11 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
return -EINVAL;
err = -ENOMEM;
- map = gntdev_alloc_map(priv, op.count);
+ map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
if (!map)
return err;
- if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
+ if (unlikely(gntdev_account_mapped_pages(op.count))) {
pr_debug("can't map: over limit\n");
gntdev_put_map(NULL, map);
return err;
@@ -620,7 +684,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
struct ioctl_gntdev_unmap_grant_ref __user *u)
{
struct ioctl_gntdev_unmap_grant_ref op;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int err = -ENOENT;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -646,7 +710,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
{
struct ioctl_gntdev_get_offset_for_vaddr op;
struct vm_area_struct *vma;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int rv = -EINVAL;
if (copy_from_user(&op, u, sizeof(op)) != 0)
@@ -677,7 +741,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{
struct ioctl_gntdev_unmap_notify op;
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int rc;
int out_flags;
unsigned int out_event;
@@ -962,6 +1026,20 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_GRANT_COPY:
return gntdev_ioctl_grant_copy(priv, ptr);
+#ifdef CONFIG_XEN_GNTDEV_DMABUF
+ case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
+ return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
+ return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
+ return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
+
+ case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
+ return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
+#endif
+
default:
pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
return -ENOIOCTLCMD;
@@ -975,7 +1053,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
struct gntdev_priv *priv = flip->private_data;
int index = vma->vm_pgoff;
int count = vma_pages(vma);
- struct grant_map *map;
+ struct gntdev_grant_map *map;
int i, err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
@@ -1032,7 +1110,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
}
}
- err = map_grant_pages(map);
+ err = gntdev_map_grant_pages(map);
if (err)
goto out_put_map;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index ba9f3eec2bd0..7bafa703a992 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -45,6 +45,9 @@
#include <linux/workqueue.h>
#include <linux/ratelimit.h>
#include <linux/moduleparam.h>
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+#include <linux/dma-mapping.h>
+#endif
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -57,6 +60,7 @@
#ifdef CONFIG_X86
#include <asm/xen/cpuid.h>
#endif
+#include <xen/mem-reservation.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
@@ -769,29 +773,18 @@ void gnttab_free_auto_xlat_frames(void)
}
EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
-/**
- * gnttab_alloc_pages - alloc pages suitable for grant mapping into
- * @nr_pages: number of pages to alloc
- * @pages: returns the pages
- */
-int gnttab_alloc_pages(int nr_pages, struct page **pages)
+int gnttab_pages_set_private(int nr_pages, struct page **pages)
{
int i;
- int ret;
-
- ret = alloc_xenballooned_pages(nr_pages, pages);
- if (ret < 0)
- return ret;
for (i = 0; i < nr_pages; i++) {
#if BITS_PER_LONG < 64
struct xen_page_foreign *foreign;
foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
- if (!foreign) {
- gnttab_free_pages(nr_pages, pages);
+ if (!foreign)
return -ENOMEM;
- }
+
set_page_private(pages[i], (unsigned long)foreign);
#endif
SetPagePrivate(pages[i]);
@@ -799,14 +792,30 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
return 0;
}
-EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
+EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
/**
- * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
- * @nr_pages; number of pages to free
- * @pages: the pages
+ * gnttab_alloc_pages - alloc pages suitable for grant mapping into
+ * @nr_pages: number of pages to alloc
+ * @pages: returns the pages
*/
-void gnttab_free_pages(int nr_pages, struct page **pages)
+int gnttab_alloc_pages(int nr_pages, struct page **pages)
+{
+ int ret;
+
+ ret = alloc_xenballooned_pages(nr_pages, pages);
+ if (ret < 0)
+ return ret;
+
+ ret = gnttab_pages_set_private(nr_pages, pages);
+ if (ret < 0)
+ gnttab_free_pages(nr_pages, pages);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
+
+void gnttab_pages_clear_private(int nr_pages, struct page **pages)
{
int i;
@@ -818,10 +827,114 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
ClearPagePrivate(pages[i]);
}
}
+}
+EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
+
+/**
+ * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
+ * @nr_pages; number of pages to free
+ * @pages: the pages
+ */
+void gnttab_free_pages(int nr_pages, struct page **pages)
+{
+ gnttab_pages_clear_private(nr_pages, pages);
free_xenballooned_pages(nr_pages, pages);
}
EXPORT_SYMBOL_GPL(gnttab_free_pages);
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+/**
+ * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
+ * @args: arguments to the function
+ */
+int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
+{
+ unsigned long pfn, start_pfn;
+ size_t size;
+ int i, ret;
+
+ size = args->nr_pages << PAGE_SHIFT;
+ if (args->coherent)
+ args->vaddr = dma_alloc_coherent(args->dev, size,
+ &args->dev_bus_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ else
+ args->vaddr = dma_alloc_wc(args->dev, size,
+ &args->dev_bus_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!args->vaddr) {
+ pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
+ return -ENOMEM;
+ }
+
+ start_pfn = __phys_to_pfn(args->dev_bus_addr);
+ for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
+ pfn++, i++) {
+ struct page *page = pfn_to_page(pfn);
+
+ args->pages[i] = page;
+ args->frames[i] = xen_page_to_gfn(page);
+ xenmem_reservation_scrub_page(page);
+ }
+
+ xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
+
+ ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
+ if (ret != args->nr_pages) {
+ pr_debug("Failed to decrease reservation for DMA buffer\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ ret = gnttab_pages_set_private(args->nr_pages, args->pages);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ gnttab_dma_free_pages(args);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
+
+/**
+ * gnttab_dma_free_pages - free DMAable pages
+ * @args: arguments to the function
+ */
+int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
+{
+ size_t size;
+ int i, ret;
+
+ gnttab_pages_clear_private(args->nr_pages, args->pages);
+
+ for (i = 0; i < args->nr_pages; i++)
+ args->frames[i] = page_to_xen_pfn(args->pages[i]);
+
+ ret = xenmem_reservation_increase(args->nr_pages, args->frames);
+ if (ret != args->nr_pages) {
+ pr_debug("Failed to decrease reservation for DMA buffer\n");
+ ret = -EFAULT;
+ } else {
+ ret = 0;
+ }
+
+ xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
+ args->frames);
+
+ size = args->nr_pages << PAGE_SHIFT;
+ if (args->coherent)
+ dma_free_coherent(args->dev, size,
+ args->vaddr, args->dev_bus_addr);
+ else
+ dma_free_wc(args->dev, size,
+ args->vaddr, args->dev_bus_addr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
+#endif
+
/* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256
static inline void
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c
new file mode 100644
index 000000000000..084799c6180e
--- /dev/null
+++ b/drivers/xen/mem-reservation.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/******************************************************************************
+ * Xen memory reservation utilities.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ * Copyright (c) 2010 Daniel Kiper
+ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#include <asm/xen/hypercall.h>
+
+#include <xen/interface/memory.h>
+#include <xen/mem-reservation.h>
+
+/*
+ * Use one extent per PAGE_SIZE to avoid to break down the page into
+ * multiple frame.
+ */
+#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+void __xenmem_reservation_va_mapping_update(unsigned long count,
+ struct page **pages,
+ xen_pfn_t *frames)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct page *page = pages[i];
+ unsigned long pfn = page_to_pfn(page);
+
+ BUG_ON(!page);
+
+ /*
+ * We don't support PV MMU when Linux and Xen is using
+ * different page granularity.
+ */
+ BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
+
+ set_phys_to_machine(pfn, frames[i]);
+
+ /* Link back into the page tables if not highmem. */
+ if (!PageHighMem(page)) {
+ int ret;
+
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(frames[i], PAGE_KERNEL),
+ 0);
+ BUG_ON(ret);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
+
+void __xenmem_reservation_va_mapping_reset(unsigned long count,
+ struct page **pages)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct page *page = pages[i];
+ unsigned long pfn = page_to_pfn(page);
+
+ /*
+ * We don't support PV MMU when Linux and Xen are using
+ * different page granularity.
+ */
+ BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
+
+ if (!PageHighMem(page)) {
+ int ret;
+
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ __pte_ma(0), 0);
+ BUG_ON(ret);
+ }
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ }
+}
+EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
+#endif /* CONFIG_XEN_HAVE_PVMMU */
+
+/* @frames is an array of PFNs */
+int xenmem_reservation_increase(int count, xen_pfn_t *frames)
+{
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = EXTENT_ORDER,
+ .domid = DOMID_SELF
+ };
+
+ /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
+ set_xen_guest_handle(reservation.extent_start, frames);
+ reservation.nr_extents = count;
+ return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+}
+EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
+
+/* @frames is an array of GFNs */
+int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
+{
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = EXTENT_ORDER,
+ .domid = DOMID_SELF
+ };
+
+ /* XENMEM_decrease_reservation requires a GFN */
+ set_xen_guest_handle(reservation.extent_start, frames);
+ reservation.nr_extents = count;
+ return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+}
+EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index b437fccd4e62..294f35ce9e46 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch,
static_max = new_target;
else
static_max >>= PAGE_SHIFT - 10;
- target_diff = xen_pv_domain() ? 0
+ target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
: static_max - balloon_stats.target_pages;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index e2f3e8b0fba9..14a3d4cbc2a7 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -654,9 +654,9 @@ static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring
struct scsiback_nexus *nexus = tpg->tpg_nexus;
struct se_session *se_sess = nexus->tvn_se_sess;
struct vscsibk_pend *req;
- int tag, i;
+ int tag, cpu, i;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0) {
pr_err("Unable to obtain tag for vscsiif_request\n");
return ERR_PTR(-ENOMEM);
@@ -665,6 +665,7 @@ static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring
req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
memset(req, 0, sizeof(*req));
req->se_cmd.map_tag = tag;
+ req->se_cmd.map_cpu = cpu;
for (i = 0; i < VSCSI_MAX_GRANTS; i++)
req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
@@ -1387,9 +1388,7 @@ static int scsiback_check_stop_free(struct se_cmd *se_cmd)
static void scsiback_release_cmd(struct se_cmd *se_cmd)
{
- struct se_session *se_sess = se_cmd->se_sess;
-
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_cmd->se_sess, se_cmd);
}
static u32 scsiback_sess_get_index(struct se_session *se_sess)
@@ -1532,7 +1531,7 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg,
goto out_unlock;
}
- tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
VSCSI_DEFAULT_SESSION_TAGS,
sizeof(struct vscsibk_pend),
TARGET_PROT_NORMAL, name,
@@ -1587,7 +1586,7 @@ static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
/*
* Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port
*/
- transport_deregister_session(tv_nexus->tvn_se_sess);
+ target_remove_session(se_sess);
tpg->tpg_nexus = NULL;
mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1743,9 +1742,7 @@ static void scsiback_port_unlink(struct se_portal_group *se_tpg,
}
static struct se_portal_group *
-scsiback_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+scsiback_make_tpg(struct se_wwn *wwn, const char *name)
{
struct scsiback_tport *tport = container_of(wwn,
struct scsiback_tport, tport_wwn);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 42e102e2e74a..85ff859d3af5 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -859,8 +859,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
static int
v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened)
+ struct file *file, unsigned flags, umode_t mode)
{
int err;
u32 perm;
@@ -917,7 +916,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
v9inode->writeback_fid = (void *) inode_fid;
}
mutex_unlock(&v9inode->v_mutex);
- err = finish_open(file, dentry, generic_file_open, opened);
+ err = finish_open(file, dentry, generic_file_open);
if (err)
goto error;
@@ -925,7 +924,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(d_inode(dentry), file);
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
out:
dput(res);
return err;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 7f6ae21a27b3..4823e1c46999 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -241,8 +241,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
static int
v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t omode,
- int *opened)
+ struct file *file, unsigned flags, umode_t omode)
{
int err = 0;
kgid_t gid;
@@ -352,13 +351,13 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
}
mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
- err = finish_open(file, dentry, generic_file_open, opened);
+ err = finish_open(file, dentry, generic_file_open);
if (err)
goto err_clunk_old_fid;
file->private_data = ofid;
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(inode, file);
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
out:
v9fs_put_acl(dacl, pacl);
dput(res);
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 56df483de619..b795f8da81f3 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -1,3 +1,6 @@
+
+menu "Executable file formats"
+
config BINFMT_ELF
bool "Kernel support for ELF binaries"
depends on MMU
@@ -187,3 +190,5 @@ config COREDUMP
This option enables support for performing core dumps. You almost
certainly want to say Y here. Not necessary on systems that never
need debugging or only ever run flawless code.
+
+endmenu
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index c836c425ca94..e91028d4340a 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -287,7 +287,7 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->mmu_private = inode->i_size;
}
- insert_inode_hash(inode);
+ inode_fake_hash(inode);
out:
return inode;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 71fa525d63a0..7e099a7a4eb1 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -291,6 +291,7 @@ static void destroy_inodecache(void)
static const struct super_operations adfs_sops = {
.alloc_inode = adfs_alloc_inode,
.destroy_inode = adfs_destroy_inode,
+ .drop_inode = generic_delete_inode,
.write_inode = adfs_write_inode,
.put_super = adfs_put_super,
.statfs = adfs_statfs,
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 7d623008157f..855bf2b79fed 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -822,6 +822,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct inode *inode;
+ struct dentry *d;
struct key *key;
int ret;
@@ -862,43 +863,17 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
afs_stat_v(dvnode, n_lookup);
inode = afs_do_lookup(dir, dentry, key);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- if (ret == -ENOENT) {
- inode = afs_try_auto_mntpt(dentry, dir);
- if (!IS_ERR(inode)) {
- key_put(key);
- goto success;
- }
-
- ret = PTR_ERR(inode);
- }
-
- key_put(key);
- if (ret == -ENOENT) {
- d_add(dentry, NULL);
- _leave(" = NULL [negative]");
- return NULL;
- }
- _leave(" = %d [do]", ret);
- return ERR_PTR(ret);
- }
- dentry->d_fsdata = (void *)(unsigned long)dvnode->status.data_version;
-
- /* instantiate the dentry */
key_put(key);
- if (IS_ERR(inode)) {
- _leave(" = %ld", PTR_ERR(inode));
- return ERR_CAST(inode);
+ if (inode == ERR_PTR(-ENOENT)) {
+ inode = afs_try_auto_mntpt(dentry, dir);
+ } else {
+ dentry->d_fsdata =
+ (void *)(unsigned long)dvnode->status.data_version;
}
-
-success:
- d_add(dentry, inode);
- _leave(" = 0 { ino=%lu v=%u }",
- d_inode(dentry)->i_ino,
- d_inode(dentry)->i_generation);
-
- return NULL;
+ d = d_splice_alias(inode, dentry);
+ if (!IS_ERR_OR_NULL(d))
+ d->d_fsdata = dentry->d_fsdata;
+ return d;
}
/*
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 174e843f0633..1cde710a8013 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -83,7 +83,7 @@ struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
out:
_leave("= %d", ret);
- return ERR_PTR(ret);
+ return ret == -ENOENT ? NULL : ERR_PTR(ret);
}
/*
@@ -141,12 +141,6 @@ out_p:
static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
- struct afs_vnode *vnode;
- struct inode *inode;
- int ret;
-
- vnode = AFS_FS_I(dir);
-
_enter("%pd", dentry);
ASSERTCMP(d_inode(dentry), ==, NULL);
@@ -160,22 +154,7 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
memcmp(dentry->d_name.name, "@cell", 5) == 0)
return afs_lookup_atcell(dentry);
- inode = afs_try_auto_mntpt(dentry, dir);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- if (ret == -ENOENT) {
- d_add(dentry, NULL);
- _leave(" = NULL [negative]");
- return NULL;
- }
- _leave(" = %d [do]", ret);
- return ERR_PTR(ret);
- }
-
- d_add(dentry, inode);
- _leave(" = 0 { ino=%lu v=%u }",
- d_inode(dentry)->i_ino, d_inode(dentry)->i_generation);
- return NULL;
+ return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry);
}
const struct inode_operations afs_dynroot_inode_operations = {
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index a1b18082991b..35f2ae30f31f 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -346,7 +346,6 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
struct rxrpc_call *rxcall;
struct msghdr msg;
struct kvec iov[1];
- size_t offset;
s64 tx_total_len;
int ret;
@@ -433,10 +432,10 @@ error_do_abort:
rxrpc_kernel_abort_call(call->net->socket, rxcall,
RX_USER_ABORT, ret, "KSD");
} else {
- offset = 0;
- rxrpc_kernel_recv_data(call->net->socket, rxcall, NULL,
- 0, &offset, false, &call->abort_code,
- &call->service_id);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, NULL, 0, 0);
+ rxrpc_kernel_recv_data(call->net->socket, rxcall,
+ &msg.msg_iter, false,
+ &call->abort_code, &call->service_id);
ac->abort_code = call->abort_code;
ac->responded = true;
}
@@ -467,13 +466,14 @@ static void afs_deliver_to_call(struct afs_call *call)
state == AFS_CALL_SV_AWAIT_ACK
) {
if (state == AFS_CALL_SV_AWAIT_ACK) {
- size_t offset = 0;
+ struct iov_iter iter;
+
+ iov_iter_kvec(&iter, READ | ITER_KVEC, NULL, 0, 0);
ret = rxrpc_kernel_recv_data(call->net->socket,
- call->rxcall,
- NULL, 0, &offset, false,
+ call->rxcall, &iter, false,
&remote_abort,
&call->service_id);
- trace_afs_recv_data(call, 0, offset, false, ret);
+ trace_afs_recv_data(call, 0, 0, false, ret);
if (ret == -EINPROGRESS || ret == -EAGAIN)
return;
@@ -648,7 +648,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
trace_afs_notify_call(rxcall, call);
call->need_attention = true;
- u = __atomic_add_unless(&call->usage, 1, 0);
+ u = atomic_fetch_add_unless(&call->usage, 1, 0);
if (u != 0) {
trace_afs_call(call, afs_call_trace_wake, u,
atomic_read(&call->net->nr_outstanding_calls),
@@ -894,6 +894,8 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
bool want_more)
{
struct afs_net *net = call->net;
+ struct iov_iter iter;
+ struct kvec iov;
enum afs_call_state state;
u32 remote_abort = 0;
int ret;
@@ -903,10 +905,14 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
ASSERTCMP(call->offset, <=, count);
- ret = rxrpc_kernel_recv_data(net->socket, call->rxcall,
- buf, count, &call->offset,
+ iov.iov_base = buf + call->offset;
+ iov.iov_len = count - call->offset;
+ iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, count - call->offset);
+
+ ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, &iter,
want_more, &remote_abort,
&call->service_id);
+ call->offset += (count - call->offset) - iov_iter_count(&iter);
trace_afs_recv_data(call, count, call->offset, want_more, ret);
if (ret == 0 || ret == -EAGAIN)
return ret;
diff --git a/fs/aio.c b/fs/aio.c
index 27454594e37a..b9350f3360c6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -5,6 +5,7 @@
* Implements an efficient asynchronous io interface.
*
* Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright 2018 Christoph Hellwig.
*
* See ../COPYING for licensing terms.
*/
@@ -18,6 +19,7 @@
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/backing-dev.h>
+#include <linux/refcount.h>
#include <linux/uio.h>
#include <linux/sched/signal.h>
@@ -164,10 +166,21 @@ struct fsync_iocb {
bool datasync;
};
+struct poll_iocb {
+ struct file *file;
+ struct wait_queue_head *head;
+ __poll_t events;
+ bool woken;
+ bool cancelled;
+ struct wait_queue_entry wait;
+ struct work_struct work;
+};
+
struct aio_kiocb {
union {
struct kiocb rw;
struct fsync_iocb fsync;
+ struct poll_iocb poll;
};
struct kioctx *ki_ctx;
@@ -178,6 +191,7 @@ struct aio_kiocb {
struct list_head ki_list; /* the aio core uses this
* for cancellation */
+ refcount_t ki_refcnt;
/*
* If the aio_resfd field of the userspace iocb is not zero,
@@ -202,9 +216,7 @@ static const struct address_space_operations aio_ctx_aops;
static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
{
- struct qstr this = QSTR_INIT("[aio]", 5);
struct file *file;
- struct path path;
struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -213,31 +225,17 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
inode->i_mapping->private_data = ctx;
inode->i_size = PAGE_SIZE * nr_pages;
- path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
- if (!path.dentry) {
+ file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
+ O_RDWR, &aio_ring_fops);
+ if (IS_ERR(file))
iput(inode);
- return ERR_PTR(-ENOMEM);
- }
- path.mnt = mntget(aio_mnt);
-
- d_instantiate(path.dentry, inode);
- file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops);
- if (IS_ERR(file)) {
- path_put(&path);
- return file;
- }
-
- file->f_flags = O_RDWR;
return file;
}
static struct dentry *aio_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- static const struct dentry_operations ops = {
- .d_dname = simple_dname,
- };
- struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
+ struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL,
AIO_RING_MAGIC);
if (!IS_ERR(root))
@@ -1015,6 +1013,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
percpu_ref_get(&ctx->reqs);
INIT_LIST_HEAD(&req->ki_list);
+ refcount_set(&req->ki_refcnt, 0);
req->ki_ctx = ctx;
return req;
out_put:
@@ -1049,6 +1048,15 @@ out:
return ret;
}
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+ if (refcount_read(&iocb->ki_refcnt) == 0 ||
+ refcount_dec_and_test(&iocb->ki_refcnt)) {
+ percpu_ref_put(&iocb->ki_ctx->reqs);
+ kmem_cache_free(kiocb_cachep, iocb);
+ }
+}
+
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
@@ -1118,8 +1126,6 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
eventfd_ctx_put(iocb->ki_eventfd);
}
- kmem_cache_free(kiocb_cachep, iocb);
-
/*
* We have to order our ring_info tail store above and test
* of the wait list below outside the wait lock. This is
@@ -1130,8 +1136,7 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
-
- percpu_ref_put(&ctx->reqs);
+ iocb_put(iocb);
}
/* aio_read_events_ring
@@ -1592,6 +1597,182 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
return 0;
}
+static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
+{
+ struct file *file = iocb->poll.file;
+
+ aio_complete(iocb, mangle_poll(mask), 0);
+ fput(file);
+}
+
+static void aio_poll_complete_work(struct work_struct *work)
+{
+ struct poll_iocb *req = container_of(work, struct poll_iocb, work);
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+ struct poll_table_struct pt = { ._key = req->events };
+ struct kioctx *ctx = iocb->ki_ctx;
+ __poll_t mask = 0;
+
+ if (!READ_ONCE(req->cancelled))
+ mask = vfs_poll(req->file, &pt) & req->events;
+
+ /*
+ * Note that ->ki_cancel callers also delete iocb from active_reqs after
+ * calling ->ki_cancel. We need the ctx_lock roundtrip here to
+ * synchronize with them. In the cancellation case the list_del_init
+ * itself is not actually needed, but harmless so we keep it in to
+ * avoid further branches in the fast path.
+ */
+ spin_lock_irq(&ctx->ctx_lock);
+ if (!mask && !READ_ONCE(req->cancelled)) {
+ add_wait_queue(req->head, &req->wait);
+ spin_unlock_irq(&ctx->ctx_lock);
+ return;
+ }
+ list_del_init(&iocb->ki_list);
+ spin_unlock_irq(&ctx->ctx_lock);
+
+ aio_poll_complete(iocb, mask);
+}
+
+/* assumes we are called with irqs disabled */
+static int aio_poll_cancel(struct kiocb *iocb)
+{
+ struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
+ struct poll_iocb *req = &aiocb->poll;
+
+ spin_lock(&req->head->lock);
+ WRITE_ONCE(req->cancelled, true);
+ if (!list_empty(&req->wait.entry)) {
+ list_del_init(&req->wait.entry);
+ schedule_work(&aiocb->poll.work);
+ }
+ spin_unlock(&req->head->lock);
+
+ return 0;
+}
+
+static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ void *key)
+{
+ struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+ __poll_t mask = key_to_poll(key);
+
+ req->woken = true;
+
+ /* for instances that support it check for an event match first: */
+ if (mask) {
+ if (!(mask & req->events))
+ return 0;
+
+ /* try to complete the iocb inline if we can: */
+ if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
+ list_del(&iocb->ki_list);
+ spin_unlock(&iocb->ki_ctx->ctx_lock);
+
+ list_del_init(&req->wait.entry);
+ aio_poll_complete(iocb, mask);
+ return 1;
+ }
+ }
+
+ list_del_init(&req->wait.entry);
+ schedule_work(&req->work);
+ return 1;
+}
+
+struct aio_poll_table {
+ struct poll_table_struct pt;
+ struct aio_kiocb *iocb;
+ int error;
+};
+
+static void
+aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
+ struct poll_table_struct *p)
+{
+ struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
+
+ /* multiple wait queues per file are not supported */
+ if (unlikely(pt->iocb->poll.head)) {
+ pt->error = -EINVAL;
+ return;
+ }
+
+ pt->error = 0;
+ pt->iocb->poll.head = head;
+ add_wait_queue(head, &pt->iocb->poll.wait);
+}
+
+static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
+{
+ struct kioctx *ctx = aiocb->ki_ctx;
+ struct poll_iocb *req = &aiocb->poll;
+ struct aio_poll_table apt;
+ __poll_t mask;
+
+ /* reject any unknown events outside the normal event mask. */
+ if ((u16)iocb->aio_buf != iocb->aio_buf)
+ return -EINVAL;
+ /* reject fields that are not defined for poll */
+ if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
+ return -EINVAL;
+
+ INIT_WORK(&req->work, aio_poll_complete_work);
+ req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
+ req->file = fget(iocb->aio_fildes);
+ if (unlikely(!req->file))
+ return -EBADF;
+
+ apt.pt._qproc = aio_poll_queue_proc;
+ apt.pt._key = req->events;
+ apt.iocb = aiocb;
+ apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
+
+ /* initialized the list so that we can do list_empty checks */
+ INIT_LIST_HEAD(&req->wait.entry);
+ init_waitqueue_func_entry(&req->wait, aio_poll_wake);
+
+ /* one for removal from waitqueue, one for this function */
+ refcount_set(&aiocb->ki_refcnt, 2);
+
+ mask = vfs_poll(req->file, &apt.pt) & req->events;
+ if (unlikely(!req->head)) {
+ /* we did not manage to set up a waitqueue, done */
+ goto out;
+ }
+
+ spin_lock_irq(&ctx->ctx_lock);
+ spin_lock(&req->head->lock);
+ if (req->woken) {
+ /* wake_up context handles the rest */
+ mask = 0;
+ apt.error = 0;
+ } else if (mask || apt.error) {
+ /* if we get an error or a mask we are done */
+ WARN_ON_ONCE(list_empty(&req->wait.entry));
+ list_del_init(&req->wait.entry);
+ } else {
+ /* actually waiting for an event */
+ list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+ aiocb->ki_cancel = aio_poll_cancel;
+ }
+ spin_unlock(&req->head->lock);
+ spin_unlock_irq(&ctx->ctx_lock);
+
+out:
+ if (unlikely(apt.error)) {
+ fput(req->file);
+ return apt.error;
+ }
+
+ if (mask)
+ aio_poll_complete(aiocb, mask);
+ iocb_put(aiocb);
+ return 0;
+}
+
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
{
@@ -1665,6 +1846,9 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
case IOCB_CMD_FDSYNC:
ret = aio_fsync(&req->fsync, &iocb, true);
break;
+ case IOCB_CMD_POLL:
+ ret = aio_poll(req, &iocb);
+ break;
default:
pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
ret = -EINVAL;
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 3168ee4e77f4..91262c34b797 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -71,8 +71,6 @@ struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
- struct qstr this;
- struct path path;
struct file *file;
if (IS_ERR(anon_inode_inode))
@@ -82,39 +80,23 @@ struct file *anon_inode_getfile(const char *name,
return ERR_PTR(-ENOENT);
/*
- * Link the inode to a directory entry by creating a unique name
- * using the inode sequence number.
- */
- file = ERR_PTR(-ENOMEM);
- this.name = name;
- this.len = strlen(name);
- this.hash = 0;
- path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this);
- if (!path.dentry)
- goto err_module;
-
- path.mnt = mntget(anon_inode_mnt);
- /*
* We know the anon_inode inode count is always greater than zero,
* so ihold() is safe.
*/
ihold(anon_inode_inode);
-
- d_instantiate(path.dentry, anon_inode_inode);
-
- file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ file = alloc_file_pseudo(anon_inode_inode, anon_inode_mnt, name,
+ flags & (O_ACCMODE | O_NONBLOCK), fops);
if (IS_ERR(file))
- goto err_dput;
+ goto err;
+
file->f_mapping = anon_inode_inode->i_mapping;
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
return file;
-err_dput:
- path_put(&path);
-err_module:
+err:
+ iput(anon_inode_inode);
module_put(fops->owner);
return file;
}
diff --git a/fs/attr.c b/fs/attr.c
index e3d53bf12240..d22e8187477f 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(setattr_prepare);
* inode_newsize_ok - may this inode be truncated to a given size
* @inode: the inode to be truncated
* @offset: the new size to assign to the inode
- * @Returns: 0 on success, -ve errno on failure
*
* inode_newsize_ok must be called with i_mutex held.
*
@@ -130,6 +129,8 @@ EXPORT_SYMBOL(setattr_prepare);
* returned. @inode must be a file (not directory), with appropriate
* permissions to allow truncate (inode_newsize_ok does NOT check these
* conditions).
+ *
+ * Return: 0 on success, -ve errno on failure
*/
int inode_newsize_ok(const struct inode *inode, loff_t offset)
{
@@ -205,7 +206,7 @@ EXPORT_SYMBOL(setattr_copy);
/**
* notify_change - modify attributes of a filesytem object
* @dentry: object affected
- * @iattr: new attributes
+ * @attr: new attributes
* @delegated_inode: returns inode, if the inode is delegated
*
* The caller must hold the i_mutex on the affected object.
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 125e8bbd22a2..8035d2a44561 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -134,7 +134,7 @@ static int bad_inode_update_time(struct inode *inode, struct timespec64 *time,
static int bad_inode_atomic_open(struct inode *inode, struct dentry *dentry,
struct file *file, unsigned int open_flag,
- umode_t create_mode, int *opened)
+ umode_t create_mode)
{
return -EIO;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 816cc921cf36..efae2fb0930a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1751,7 +1751,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
if (regset->core_note_type && regset->get &&
- (!regset->active || regset->active(t->task, regset))) {
+ (!regset->active || regset->active(t->task, regset) > 0)) {
int ret;
size_t size = regset_size(t->task, regset);
void *data = kmalloc(size, GFP_KERNEL);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 4b5fff31ef27..aa4a7a23ff99 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -205,7 +205,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
goto error;
if (fmt->flags & MISC_FMT_OPEN_FILE) {
- interp_file = filp_clone_open(fmt->interp_file);
+ interp_file = file_clone_open(fmt->interp_file);
if (!IS_ERR(interp_file))
deny_write_access(interp_file);
} else {
diff --git a/fs/block_dev.c b/fs/block_dev.c
index aba25414231a..38b8ce05cbc7 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -666,7 +666,8 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
result = blk_queue_enter(bdev->bd_queue, 0);
if (result)
return result;
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
+ REQ_OP_READ);
blk_queue_exit(bdev->bd_queue);
return result;
}
@@ -704,7 +705,8 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
return result;
set_page_writeback(page);
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
+ REQ_OP_WRITE);
if (result) {
end_page_writeback(page);
} else {
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 15e1dfef56a5..3b66c957ea6f 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -30,23 +30,22 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
name = XATTR_NAME_POSIX_ACL_DEFAULT;
break;
default:
- BUG();
+ return ERR_PTR(-EINVAL);
}
- size = btrfs_getxattr(inode, name, "", 0);
+ size = btrfs_getxattr(inode, name, NULL, 0);
if (size > 0) {
value = kzalloc(size, GFP_KERNEL);
if (!value)
return ERR_PTR(-ENOMEM);
size = btrfs_getxattr(inode, name, value, size);
}
- if (size > 0) {
+ if (size > 0)
acl = posix_acl_from_xattr(&init_user_ns, value, size);
- } else if (size == -ERANGE || size == -ENODATA || size == 0) {
+ else if (size == -ENODATA || size == 0)
acl = NULL;
- } else {
- acl = ERR_PTR(-EIO);
- }
+ else
+ acl = ERR_PTR(size);
kfree(value);
return acl;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 0a8e2e29a66b..ae750b1574a2 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -925,7 +925,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
type = btrfs_get_extent_inline_ref_type(leaf, iref,
BTRFS_REF_TYPE_ANY);
if (type == BTRFS_REF_TYPE_INVALID)
- return -EINVAL;
+ return -EUCLEAN;
offset = btrfs_extent_inline_ref_offset(leaf, iref);
@@ -1793,7 +1793,7 @@ static int get_extent_inline_ref(unsigned long *ptr,
*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
BTRFS_REF_TYPE_ANY);
if (*out_type == BTRFS_REF_TYPE_INVALID)
- return -EINVAL;
+ return -EUCLEAN;
*ptr += btrfs_extent_inline_ref_size(*out_type);
WARN_ON(*ptr > end);
@@ -2225,7 +2225,7 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
fspath = init_data_container(total_bytes);
if (IS_ERR(fspath))
- return (void *)fspath;
+ return ERR_CAST(fspath);
ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
if (!ifp) {
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 7e075343daa5..1343ac57b438 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -178,7 +178,7 @@ struct btrfs_inode {
struct btrfs_delayed_node *delayed_node;
/* File creation time. */
- struct timespec i_otime;
+ struct timespec64 i_otime;
/* Hook into fs_info->delayed_iputs */
struct list_head delayed_iput;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index a3fdb4fe967d..833cf3c35b4d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1539,7 +1539,12 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
}
device = multi->stripes[0].dev;
- block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev->bd_dev);
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
+ !device->bdev || !device->name)
+ block_ctx_out->dev = NULL;
+ else
+ block_ctx_out->dev = btrfsic_dev_state_lookup(
+ device->bdev->bd_dev);
block_ctx_out->dev_bytenr = multi->stripes[0].physical;
block_ctx_out->start = bytenr;
block_ctx_out->len = len;
@@ -1624,7 +1629,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
bio = btrfs_io_bio_alloc(num_pages - i);
bio_set_dev(bio, block_ctx->dev->bdev);
bio->bi_iter.bi_sector = dev_bytenr >> 9;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
for (j = i; j < num_pages; j++) {
ret = bio_add_page(bio, block_ctx->pagev[j],
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d3e447b45bf7..9bfa66592aa7 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -5,7 +5,6 @@
#include <linux/kernel.h>
#include <linux/bio.h>
-#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
@@ -14,10 +13,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
-#include <linux/mpage.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
-#include <linux/bit_spinlock.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include <linux/log2.h>
@@ -303,7 +299,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
struct bio *bio = NULL;
struct compressed_bio *cb;
unsigned long bytes_left;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
int pg_index = 0;
struct page *page;
u64 first_byte = disk_start;
@@ -342,9 +337,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_iter.bi_size)
- submit = io_tree->ops->merge_bio_hook(page, 0,
- PAGE_SIZE,
- bio, 0);
+ submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, bio, 0);
page->mapping = NULL;
if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
@@ -613,7 +606,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->len = bio->bi_iter.bi_size;
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
- bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
+ comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
refcount_set(&cb->pending_bios, 1);
@@ -626,9 +619,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->index = em_start >> PAGE_SHIFT;
if (comp_bio->bi_iter.bi_size)
- submit = tree->ops->merge_bio_hook(page, 0,
- PAGE_SIZE,
- comp_bio, 0);
+ submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE,
+ comp_bio, 0);
page->mapping = NULL;
if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
@@ -660,7 +652,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
}
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
- bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
+ comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 4bc326df472e..d436fb4c002e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -888,11 +888,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
btrfs_root_last_snapshot(&root->root_item) ||
btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
return 1;
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
- btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
- return 1;
-#endif
+
return 0;
}
@@ -3128,8 +3124,7 @@ again:
* higher levels
*
*/
-static void fixup_low_keys(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+static void fixup_low_keys(struct btrfs_path *path,
struct btrfs_disk_key *key, int level)
{
int i;
@@ -3181,7 +3176,7 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
btrfs_set_item_key(eb, &disk_key, slot);
btrfs_mark_buffer_dirty(eb);
if (slot == 0)
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
/*
@@ -3359,17 +3354,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
root_add_used(root, fs_info->nodesize);
- memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
btrfs_set_header_nritems(c, 1);
- btrfs_set_header_level(c, level);
- btrfs_set_header_bytenr(c, c->start);
- btrfs_set_header_generation(c, trans->transid);
- btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(c, root->root_key.objectid);
-
- write_extent_buffer_fsid(c, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
-
btrfs_set_node_key(c, &lower_key, 0);
btrfs_set_node_blockptr(c, 0, lower->start);
lower_gen = btrfs_header_generation(lower);
@@ -3498,15 +3483,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
return PTR_ERR(split);
root_add_used(root, fs_info->nodesize);
-
- memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
- btrfs_set_header_level(split, btrfs_header_level(c));
- btrfs_set_header_bytenr(split, split->start);
- btrfs_set_header_generation(split, trans->transid);
- btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(split, root->root_key.objectid);
- write_extent_buffer_fsid(split, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
+ ASSERT(btrfs_header_level(c) == level);
ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
if (ret) {
@@ -3945,7 +3922,7 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
clean_tree_block(fs_info, right);
btrfs_item_key(right, &disk_key, 0);
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
@@ -4292,15 +4269,6 @@ again:
root_add_used(root, fs_info->nodesize);
- memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
- btrfs_set_header_bytenr(right, right->start);
- btrfs_set_header_generation(right, trans->transid);
- btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(right, root->root_key.objectid);
- btrfs_set_header_level(right, 0);
- write_extent_buffer_fsid(right, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
-
if (split == 0) {
if (mid <= slot) {
btrfs_set_header_nritems(right, 0);
@@ -4320,7 +4288,7 @@ again:
path->nodes[0] = right;
path->slots[0] = 0;
if (path->slots[1] == 0)
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
/*
* We create a new leaf 'right' for the required ins_len and
@@ -4642,7 +4610,7 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0)
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
item = btrfs_item_nr(slot);
@@ -4744,7 +4712,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (path->slots[0] == 0) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
btrfs_unlock_up_safe(path, 1);
@@ -4886,7 +4854,6 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
int level, int slot)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *parent = path->nodes[level];
u32 nritems;
int ret;
@@ -4919,7 +4886,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0);
- fixup_low_keys(fs_info, path, &disk_key, level + 1);
+ fixup_low_keys(path, &disk_key, level + 1);
}
btrfs_mark_buffer_dirty(parent);
}
@@ -5022,7 +4989,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0);
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
/* delete the leaf if it is mostly empty */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 118346aceea9..318be7864072 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -55,8 +55,6 @@ struct btrfs_ordered_sum;
#define BTRFS_OLDEST_GENERATION 0ULL
-#define BTRFS_COMPAT_EXTENT_TREE_V0
-
/*
* the max metadata block size. This limit is somewhat artificial,
* but the memmove costs go through the roof for larger blocks.
@@ -86,6 +84,14 @@ static const int btrfs_csum_sizes[] = { 4 };
#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
+/*
+ * Use large batch size to reduce overhead of metadata updates. On the reader
+ * side, we only read it when we are close to ENOSPC and the read overhead is
+ * mostly related to the number of CPUs, so it is OK to use arbitrary large
+ * value here.
+ */
+#define BTRFS_TOTAL_BYTES_PINNED_BATCH SZ_128M
+
#define BTRFS_MAX_EXTENT_SIZE SZ_128M
@@ -342,8 +348,8 @@ struct btrfs_path {
sizeof(struct btrfs_item))
struct btrfs_dev_replace {
u64 replace_state; /* see #define above */
- u64 time_started; /* seconds since 1-Jan-1970 */
- u64 time_stopped; /* seconds since 1-Jan-1970 */
+ time64_t time_started; /* seconds since 1-Jan-1970 */
+ time64_t time_stopped; /* seconds since 1-Jan-1970 */
atomic64_t num_write_errors;
atomic64_t num_uncorrectable_read_errors;
@@ -359,8 +365,6 @@ struct btrfs_dev_replace {
struct btrfs_device *srcdev;
struct btrfs_device *tgtdev;
- pid_t lock_owner;
- atomic_t nesting_level;
struct mutex lock_finishing_cancel_unmount;
rwlock_t lock;
atomic_t read_locks;
@@ -1213,7 +1217,6 @@ struct btrfs_root {
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
struct btrfs_key defrag_max;
- char *name;
/* the dirty list is only used by non-reference counted roots */
struct list_head dirty_list;
@@ -2428,32 +2431,6 @@ static inline u32 btrfs_file_extent_inline_item_len(
return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
-/* this returns the number of file bytes represented by the inline item.
- * If an item is compressed, this is the uncompressed size
- */
-static inline u32 btrfs_file_extent_inline_len(const struct extent_buffer *eb,
- int slot,
- const struct btrfs_file_extent_item *fi)
-{
- struct btrfs_map_token token;
-
- btrfs_init_map_token(&token);
- /*
- * return the space used on disk if this item isn't
- * compressed or encoded
- */
- if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 &&
- btrfs_token_file_extent_encryption(eb, fi, &token) == 0 &&
- btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) {
- return btrfs_file_extent_inline_item_len(eb,
- btrfs_item_nr(slot));
- }
-
- /* otherwise use the ram bytes field */
- return btrfs_token_file_extent_ram_bytes(eb, fi, &token);
-}
-
-
/* btrfs_dev_stats_item */
static inline u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
const struct btrfs_dev_stats_item *ptr,
@@ -2676,7 +2653,6 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins);
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins);
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
@@ -2716,15 +2692,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytes_used,
- u64 type, u64 chunk_offset, u64 size);
+ u64 bytes_used, u64 type, u64 chunk_offset,
+ u64 size);
void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 group_start,
- struct extent_map *em);
+ u64 group_start, struct extent_map *em);
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
@@ -2786,7 +2761,6 @@ void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type);
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
-void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
@@ -2803,8 +2777,7 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
-int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache);
+int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
@@ -2812,8 +2785,7 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end);
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes);
-int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
@@ -2822,10 +2794,10 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
int btrfs_start_write_no_snapshotting(struct btrfs_root *root);
void btrfs_end_write_no_snapshotting(struct btrfs_root *root);
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
-void check_system_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, const u64 type);
+void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
u64 start, u64 end);
+void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
@@ -3011,16 +2983,14 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
/* root-item.c */
-int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
- const char *name, int name_len);
-int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
- const char *name, int name_len);
+int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 sequence, const char *name,
+ int name_len);
+int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 *sequence, const char *name,
+ int name_len);
int btrfs_del_root(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, const struct btrfs_key *key);
+ const struct btrfs_key *key);
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *key,
struct btrfs_root_item *item);
@@ -3196,7 +3166,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags);
-void btrfs_set_range_writeback(void *private_data, u64 start, u64 end);
+void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end);
vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
@@ -3452,7 +3422,7 @@ do { \
#ifdef CONFIG_BTRFS_ASSERT
__cold
-static inline void assfail(char *expr, char *file, int line)
+static inline void assfail(const char *expr, const char *file, int line)
{
pr_err("assertion failed: %s, file: %s, line: %d\n",
expr, file, line);
@@ -3465,6 +3435,13 @@ static inline void assfail(char *expr, char *file, int line)
#define ASSERT(expr) ((void)0)
#endif
+__cold
+static inline void btrfs_print_v0_err(struct btrfs_fs_info *fs_info)
+{
+ btrfs_err(fs_info,
+"Unsupported V0 extent filesystem detected. Aborting. Please re-create your filesystem with a newer kernel");
+}
+
__printf(5, 6)
__cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index fe6caa7e698b..f51b509f2d9b 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1222,7 +1222,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path;
@@ -1418,7 +1418,6 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
/* Will return 0 or -ENOMEM */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
const char *name, int name_len,
struct btrfs_inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
@@ -1458,11 +1457,10 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
*/
BUG_ON(ret);
-
mutex_lock(&delayed_node->mutex);
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
if (unlikely(ret)) {
- btrfs_err(fs_info,
+ btrfs_err(trans->fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
name_len, name, delayed_node->root->objectid,
delayed_node->inode_id, ret);
@@ -1495,7 +1493,6 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
}
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_inode *dir, u64 index)
{
struct btrfs_delayed_node *node;
@@ -1511,7 +1508,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
item_key.type = BTRFS_DIR_INDEX_KEY;
item_key.offset = index;
- ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
+ ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
+ &item_key);
if (!ret)
goto end;
@@ -1533,7 +1531,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
if (unlikely(ret)) {
- btrfs_err(fs_info,
+ btrfs_err(trans->fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id, ret);
BUG();
@@ -1837,7 +1835,7 @@ release_node:
int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_delayed_node *delayed_node;
/*
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index ca7a97f3ab6b..33536cd681d4 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -86,14 +86,12 @@ static inline void btrfs_init_delayed_root(
}
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
const char *name, int name_len,
struct btrfs_inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index);
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_inode *dir, u64 index);
int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 03dec673d12a..62ff545ba1f7 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -709,13 +709,13 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
* to make sure the delayed ref is eventually processed before this
* transaction commits.
*/
-int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action,
struct btrfs_delayed_extent_op *extent_op,
int *old_ref_mod, int *new_ref_mod)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_tree_ref *ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -730,27 +730,33 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
if (!ref)
return -ENOMEM;
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+ if (!head_ref) {
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ return -ENOMEM;
+ }
+
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+ is_fstree(ref_root)) {
+ record = kmalloc(sizeof(*record), GFP_NOFS);
+ if (!record) {
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+ return -ENOMEM;
+ }
+ }
+
if (parent)
ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
else
ref_type = BTRFS_TREE_BLOCK_REF_KEY;
+
init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
ref_root, action, ref_type);
ref->root = ref_root;
ref->parent = parent;
ref->level = level;
- head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
- if (!head_ref)
- goto free_ref;
-
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
- is_fstree(ref_root)) {
- record = kmalloc(sizeof(*record), GFP_NOFS);
- if (!record)
- goto free_head_ref;
- }
-
init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
ref_root, 0, action, false, is_system);
head_ref->extent_op = extent_op;
@@ -779,25 +785,18 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
btrfs_qgroup_trace_extent_post(fs_info, record);
return 0;
-
-free_head_ref:
- kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
-free_ref:
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
-
- return -ENOMEM;
}
/*
* add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
*/
-int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root,
u64 owner, u64 offset, u64 reserved, int action,
int *old_ref_mod, int *new_ref_mod)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_data_ref *ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index ea1aecb6a50d..d9f2a4ebd5db 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -234,14 +234,12 @@ static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *hea
kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
}
-int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action,
struct btrfs_delayed_extent_op *extent_op,
int *old_ref_mod, int *new_ref_mod);
-int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root,
u64 owner, u64 offset, u64 reserved, int action,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index e2ba0419297a..dec01970d8c5 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -6,14 +6,9 @@
#include <linux/sched.h>
#include <linux/bio.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include <linux/blkdev.h>
-#include <linux/random.h>
-#include <linux/iocontext.h>
-#include <linux/capability.h>
#include <linux/kthread.h>
#include <linux/math64.h>
-#include <asm/div64.h>
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
@@ -465,7 +460,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
* go to the tgtdev as well (refer to btrfs_map_block()).
*/
dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
- dev_replace->time_started = get_seconds();
+ dev_replace->time_started = ktime_get_real_seconds();
dev_replace->cursor_left = 0;
dev_replace->committed_cursor_left = 0;
dev_replace->cursor_left_last_write_of_item = 0;
@@ -511,7 +506,7 @@ leave:
dev_replace->srcdev = NULL;
dev_replace->tgtdev = NULL;
btrfs_dev_replace_write_unlock(dev_replace);
- btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ btrfs_destroy_dev_replace_tgtdev(tgt_device);
return ret;
}
@@ -618,7 +613,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
: BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED;
dev_replace->tgtdev = NULL;
dev_replace->srcdev = NULL;
- dev_replace->time_stopped = get_seconds();
+ dev_replace->time_stopped = ktime_get_real_seconds();
dev_replace->item_needs_writeback = 1;
/* replace old device with new one in mapping tree */
@@ -637,7 +632,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
btrfs_rm_dev_replace_blocked(fs_info);
if (tgt_device)
- btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ btrfs_destroy_dev_replace_tgtdev(tgt_device);
btrfs_rm_dev_replace_unblocked(fs_info);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
@@ -663,7 +658,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
tgt_device->commit_total_bytes = src_device->commit_total_bytes;
tgt_device->commit_bytes_used = src_device->bytes_used;
- btrfs_assign_next_active_device(fs_info, src_device, tgt_device);
+ btrfs_assign_next_active_device(src_device, tgt_device);
list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
fs_info->fs_devices->rw_devices++;
@@ -672,11 +667,17 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
btrfs_rm_dev_replace_blocked(fs_info);
- btrfs_rm_dev_replace_remove_srcdev(fs_info, src_device);
+ btrfs_rm_dev_replace_remove_srcdev(src_device);
btrfs_rm_dev_replace_unblocked(fs_info);
/*
+ * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
+ * update on-disk dev stats value during commit transaction
+ */
+ atomic_inc(&tgt_device->dev_stats_ccnt);
+
+ /*
* this is again a consistent state where no dev_replace procedure
* is running, the target device is part of the filesystem, the
* source device is not part of the filesystem anymore and its 1st
@@ -807,7 +808,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
break;
}
dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
- dev_replace->time_stopped = get_seconds();
+ dev_replace->time_stopped = ktime_get_real_seconds();
dev_replace->item_needs_writeback = 1;
btrfs_dev_replace_write_unlock(dev_replace);
btrfs_scrub_cancel(fs_info);
@@ -826,7 +827,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
btrfs_dev_name(tgt_device));
if (tgt_device)
- btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ btrfs_destroy_dev_replace_tgtdev(tgt_device);
leave:
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
@@ -848,7 +849,7 @@ void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info)
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
dev_replace->replace_state =
BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
- dev_replace->time_stopped = get_seconds();
+ dev_replace->time_stopped = ktime_get_real_seconds();
dev_replace->item_needs_writeback = 1;
btrfs_info(fs_info, "suspending dev_replace for unmount");
break;
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 39e9766d1cbd..a678b07fcf01 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -160,8 +160,8 @@ second_insert:
}
btrfs_release_path(path);
- ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name,
- name_len, dir, &disk_key, type, index);
+ ret2 = btrfs_insert_delayed_dir_index(trans, name, name_len, dir,
+ &disk_key, type, index);
out_free:
btrfs_free_path(path);
if (ret)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 205092dc9390..5124c15705ce 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -5,8 +5,6 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
-#include <linux/scatterlist.h>
-#include <linux/swap.h>
#include <linux/radix-tree.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h>
@@ -54,7 +52,6 @@
static const struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
-static void free_fs_root(struct btrfs_root *root);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_fs_info *fs_info);
@@ -108,12 +105,9 @@ void __cold btrfs_end_io_wq_exit(void)
*/
struct async_submit_bio {
void *private_data;
- struct btrfs_fs_info *fs_info;
struct bio *bio;
extent_submit_bio_start_t *submit_bio_start;
- extent_submit_bio_done_t *submit_bio_done;
int mirror_num;
- unsigned long bio_flags;
/*
* bio_offset is optional, can be used if the pages in the bio
* can't tell us where in the file the bio should go
@@ -212,7 +206,7 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
int ret;
@@ -615,8 +609,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
found_start = btrfs_header_bytenr(eb);
if (found_start != eb->start) {
- btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
- found_start, eb->start);
+ btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
+ eb->start, found_start);
ret = -EIO;
goto err;
}
@@ -628,8 +622,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
}
found_level = btrfs_header_level(eb);
if (found_level >= BTRFS_MAX_LEVEL) {
- btrfs_err(fs_info, "bad tree block level %d",
- (int)btrfs_header_level(eb));
+ btrfs_err(fs_info, "bad tree block level %d on %llu",
+ (int)btrfs_header_level(eb), eb->start);
ret = -EIO;
goto err;
}
@@ -779,7 +773,7 @@ static void run_one_async_done(struct btrfs_work *work)
return;
}
- async->submit_bio_done(async->private_data, async->bio, async->mirror_num);
+ btrfs_submit_bio_done(async->private_data, async->bio, async->mirror_num);
}
static void run_one_async_free(struct btrfs_work *work)
@@ -793,8 +787,7 @@ static void run_one_async_free(struct btrfs_work *work)
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset, void *private_data,
- extent_submit_bio_start_t *submit_bio_start,
- extent_submit_bio_done_t *submit_bio_done)
+ extent_submit_bio_start_t *submit_bio_start)
{
struct async_submit_bio *async;
@@ -803,16 +796,13 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
return BLK_STS_RESOURCE;
async->private_data = private_data;
- async->fs_info = fs_info;
async->bio = bio;
async->mirror_num = mirror_num;
async->submit_bio_start = submit_bio_start;
- async->submit_bio_done = submit_bio_done;
btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
run_one_async_done, run_one_async_free);
- async->bio_flags = bio_flags;
async->bio_offset = bio_offset;
async->status = 0;
@@ -851,24 +841,6 @@ static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
return btree_csum_one_bio(bio);
}
-static blk_status_t btree_submit_bio_done(void *private_data, struct bio *bio,
- int mirror_num)
-{
- struct inode *inode = private_data;
- blk_status_t ret;
-
- /*
- * when we're called for a write, we're already in the async
- * submission context. Just jump into btrfs_map_bio
- */
- ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
- if (ret) {
- bio->bi_status = ret;
- bio_endio(bio);
- }
- return ret;
-}
-
static int check_async_write(struct btrfs_inode *bi)
{
if (atomic_read(&bi->sync_writers))
@@ -911,8 +883,7 @@ static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
*/
ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
bio_offset, private_data,
- btree_submit_bio_start,
- btree_submit_bio_done);
+ btree_submit_bio_start);
}
if (ret)
@@ -961,8 +932,9 @@ static int btree_writepages(struct address_space *mapping,
fs_info = BTRFS_I(mapping->host)->root->fs_info;
/* this is a bit racy, but that's ok */
- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
- BTRFS_DIRTY_METADATA_THRESH);
+ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH,
+ fs_info->dirty_metadata_batch);
if (ret < 0)
return 0;
}
@@ -1181,7 +1153,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->highest_objectid = 0;
root->nr_delalloc_inodes = 0;
root->nr_ordered_extents = 0;
- root->name = NULL;
root->inode_tree = RB_ROOT;
INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
root->block_rsv = NULL;
@@ -1292,15 +1263,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
goto fail;
}
- memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
- btrfs_set_header_bytenr(leaf, leaf->start);
- btrfs_set_header_generation(leaf, trans->transid);
- btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(leaf, objectid);
root->node = leaf;
-
- write_extent_buffer_fsid(leaf, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
btrfs_mark_buffer_dirty(leaf);
root->commit_root = btrfs_root_node(root);
@@ -1374,14 +1337,8 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
return ERR_CAST(leaf);
}
- memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
- btrfs_set_header_bytenr(leaf, leaf->start);
- btrfs_set_header_generation(leaf, trans->transid);
- btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
root->node = leaf;
- write_extent_buffer_fsid(root->node, fs_info->fsid);
btrfs_mark_buffer_dirty(root->node);
btrfs_tree_unlock(root->node);
return root;
@@ -1546,7 +1503,7 @@ int btrfs_init_fs_root(struct btrfs_root *root)
return 0;
fail:
- /* the caller is responsible to call free_fs_root */
+ /* The caller is responsible to call btrfs_free_fs_root */
return ret;
}
@@ -1651,14 +1608,14 @@ again:
ret = btrfs_insert_fs_root(fs_info, root);
if (ret) {
if (ret == -EEXIST) {
- free_fs_root(root);
+ btrfs_free_fs_root(root);
goto again;
}
goto fail;
}
return root;
fail:
- free_fs_root(root);
+ btrfs_free_fs_root(root);
return ERR_PTR(ret);
}
@@ -1803,7 +1760,7 @@ static int transaction_kthread(void *arg)
struct btrfs_trans_handle *trans;
struct btrfs_transaction *cur;
u64 transid;
- unsigned long now;
+ time64_t now;
unsigned long delay;
bool cannot_commit;
@@ -1819,7 +1776,7 @@ static int transaction_kthread(void *arg)
goto sleep;
}
- now = get_seconds();
+ now = ktime_get_seconds();
if (cur->state < TRANS_STATE_BLOCKED &&
!test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
(now < cur->start_time ||
@@ -2196,8 +2153,6 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
{
- fs_info->dev_replace.lock_owner = 0;
- atomic_set(&fs_info->dev_replace.nesting_level, 0);
mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
rwlock_init(&fs_info->dev_replace.lock);
atomic_set(&fs_info->dev_replace.read_locks, 0);
@@ -3075,6 +3030,13 @@ retry_root_backup:
fs_info->generation = generation;
fs_info->last_trans_committed = generation;
+ ret = btrfs_verify_dev_extents(fs_info);
+ if (ret) {
+ btrfs_err(fs_info,
+ "failed to verify dev extents against chunks: %d",
+ ret);
+ goto fail_block_groups;
+ }
ret = btrfs_recover_balance(fs_info);
if (ret) {
btrfs_err(fs_info, "failed to recover balance: %d", ret);
@@ -3875,10 +3837,10 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
__btrfs_remove_free_space_cache(root->free_ino_pinned);
if (root->free_ino_ctl)
__btrfs_remove_free_space_cache(root->free_ino_ctl);
- free_fs_root(root);
+ btrfs_free_fs_root(root);
}
-static void free_fs_root(struct btrfs_root *root)
+void btrfs_free_fs_root(struct btrfs_root *root)
{
iput(root->ino_cache_inode);
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
@@ -3890,15 +3852,9 @@ static void free_fs_root(struct btrfs_root *root)
free_extent_buffer(root->commit_root);
kfree(root->free_ino_ctl);
kfree(root->free_ino_pinned);
- kfree(root->name);
btrfs_put_fs_root(root);
}
-void btrfs_free_fs_root(struct btrfs_root *root)
-{
- free_fs_root(root);
-}
-
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
{
u64 root_objectid = 0;
@@ -4104,10 +4060,10 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/*
* This is a fast path so only do this check if we have sanity tests
- * enabled. Normal people shouldn't be marking dummy buffers as dirty
+ * enabled. Normal people shouldn't be using umapped buffers as dirty
* outside of the sanity tests.
*/
- if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
+ if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
return;
#endif
root = BTRFS_I(buf->pages[0]->mapping->host)->root;
@@ -4150,8 +4106,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
if (flush_delayed)
btrfs_balance_delayed_items(fs_info);
- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
- BTRFS_DIRTY_METADATA_THRESH);
+ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH,
+ fs_info->dirty_metadata_batch);
if (ret > 0) {
balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
}
@@ -4563,21 +4520,11 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
return 0;
}
-static struct btrfs_fs_info *btree_fs_info(void *private_data)
-{
- struct inode *inode = private_data;
- return btrfs_sb(inode->i_sb);
-}
-
static const struct extent_io_ops btree_extent_io_ops = {
/* mandatory callbacks */
.submit_bio_hook = btree_submit_bio_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook,
- /* note we're sharing with inode.c for the merge bio hook */
- .merge_bio_hook = btrfs_merge_bio_hook,
.readpage_io_failed_hook = btree_io_failed_hook,
- .set_range_writeback = btrfs_set_range_writeback,
- .tree_fs_info = btree_fs_info,
/* optional callbacks */
};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 1a3d277b027b..4cccba22640f 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -120,8 +120,9 @@ blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset, void *private_data,
- extent_submit_bio_start_t *submit_bio_start,
- extent_submit_bio_done_t *submit_bio_done);
+ extent_submit_bio_start_t *submit_bio_start);
+blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
+ int mirror_num);
int btrfs_write_tree_block(struct extent_buffer *buf);
void btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3d9fe58c0080..de6f75f5547b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -52,24 +52,21 @@ enum {
};
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_node *node, u64 parent,
- u64 root_objectid, u64 owner_objectid,
- u64 owner_offset, int refs_to_drop,
- struct btrfs_delayed_extent_op *extra_op);
+ struct btrfs_delayed_ref_node *node, u64 parent,
+ u64 root_objectid, u64 owner_objectid,
+ u64 owner_offset, int refs_to_drop,
+ struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei);
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op);
-static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 flags,
+static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
int force);
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
@@ -220,9 +217,9 @@ static int add_excluded_extent(struct btrfs_fs_info *fs_info,
return 0;
}
-static void free_excluded_extents(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache)
+static void free_excluded_extents(struct btrfs_block_group_cache *cache)
{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
u64 start, end;
start = cache->key.objectid;
@@ -234,9 +231,9 @@ static void free_excluded_extents(struct btrfs_fs_info *fs_info,
start, end, EXTENT_UPTODATE);
}
-static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache)
+static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
u64 bytenr;
u64 *logical;
int stripe_len;
@@ -558,7 +555,7 @@ static noinline void caching_thread(struct btrfs_work *work)
caching_ctl->progress = (u64)-1;
up_read(&fs_info->commit_root_sem);
- free_excluded_extents(fs_info, block_group);
+ free_excluded_extents(block_group);
mutex_unlock(&caching_ctl->mutex);
wake_up(&caching_ctl->wait);
@@ -666,7 +663,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
wake_up(&caching_ctl->wait);
if (ret == 1) {
put_caching_control(caching_ctl);
- free_excluded_extents(fs_info, cache);
+ free_excluded_extents(cache);
return 0;
}
} else {
@@ -758,7 +755,8 @@ static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
space_info = __find_space_info(fs_info, flags);
ASSERT(space_info);
- percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
+ percpu_counter_add_batch(&space_info->total_bytes_pinned, num_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
}
/*
@@ -870,18 +868,16 @@ search_again:
num_refs = btrfs_extent_refs(leaf, ei);
extent_flags = btrfs_extent_flags(leaf, ei);
} else {
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- struct btrfs_extent_item_v0 *ei0;
- BUG_ON(item_size != sizeof(*ei0));
- ei0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_item_v0);
- num_refs = btrfs_extent_refs_v0(leaf, ei0);
- /* FIXME: this isn't correct for data */
- extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
-#else
- BUG();
-#endif
+ ret = -EINVAL;
+ btrfs_print_v0_err(fs_info);
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
+
+ goto out_free;
}
+
BUG_ON(num_refs == 0);
} else {
num_refs = 0;
@@ -1039,89 +1035,6 @@ out_free:
* tree block info structure.
*/
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
-static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
- u64 owner, u32 extra_size)
-{
- struct btrfs_root *root = fs_info->extent_root;
- struct btrfs_extent_item *item;
- struct btrfs_extent_item_v0 *ei0;
- struct btrfs_extent_ref_v0 *ref0;
- struct btrfs_tree_block_info *bi;
- struct extent_buffer *leaf;
- struct btrfs_key key;
- struct btrfs_key found_key;
- u32 new_size = sizeof(*item);
- u64 refs;
- int ret;
-
- leaf = path->nodes[0];
- BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
-
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- ei0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_item_v0);
- refs = btrfs_extent_refs_v0(leaf, ei0);
-
- if (owner == (u64)-1) {
- while (1) {
- if (path->slots[0] >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- return ret;
- BUG_ON(ret > 0); /* Corruption */
- leaf = path->nodes[0];
- }
- btrfs_item_key_to_cpu(leaf, &found_key,
- path->slots[0]);
- BUG_ON(key.objectid != found_key.objectid);
- if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
- path->slots[0]++;
- continue;
- }
- ref0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref_v0);
- owner = btrfs_ref_objectid_v0(leaf, ref0);
- break;
- }
- }
- btrfs_release_path(path);
-
- if (owner < BTRFS_FIRST_FREE_OBJECTID)
- new_size += sizeof(*bi);
-
- new_size -= sizeof(*ei0);
- ret = btrfs_search_slot(trans, root, &key, path,
- new_size + extra_size, 1);
- if (ret < 0)
- return ret;
- BUG_ON(ret); /* Corruption */
-
- btrfs_extend_item(fs_info, path, new_size);
-
- leaf = path->nodes[0];
- item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
- btrfs_set_extent_refs(leaf, item, refs);
- /* FIXME: get real generation */
- btrfs_set_extent_generation(leaf, item, 0);
- if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- btrfs_set_extent_flags(leaf, item,
- BTRFS_EXTENT_FLAG_TREE_BLOCK |
- BTRFS_BLOCK_FLAG_FULL_BACKREF);
- bi = (struct btrfs_tree_block_info *)(item + 1);
- /* FIXME: get first key of the block */
- memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
- btrfs_set_tree_block_level(leaf, bi, (int)owner);
- } else {
- btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
- }
- btrfs_mark_buffer_dirty(leaf);
- return 0;
-}
-#endif
-
/*
* is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
* is_data == BTRFS_REF_TYPE_DATA, data type is requried,
@@ -1216,13 +1129,12 @@ static int match_extent_data_ref(struct extent_buffer *leaf,
}
static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid,
u64 owner, u64 offset)
{
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = trans->fs_info->extent_root;
struct btrfs_key key;
struct btrfs_extent_data_ref *ref;
struct extent_buffer *leaf;
@@ -1251,17 +1163,6 @@ again:
if (parent) {
if (!ret)
return 0;
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- key.type = BTRFS_EXTENT_REF_V0_KEY;
- btrfs_release_path(path);
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0) {
- err = ret;
- goto fail;
- }
- if (!ret)
- return 0;
-#endif
goto fail;
}
@@ -1304,13 +1205,12 @@ fail:
}
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid, u64 owner,
u64 offset, int refs_to_add)
{
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = trans->fs_info->extent_root;
struct btrfs_key key;
struct extent_buffer *leaf;
u32 size;
@@ -1384,7 +1284,6 @@ fail:
}
static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int refs_to_drop, int *last_ref)
{
@@ -1406,13 +1305,10 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
- struct btrfs_extent_ref_v0 *ref0;
- ref0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref_v0);
- num_refs = btrfs_ref_count_v0(leaf, ref0);
-#endif
+ } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
+ btrfs_print_v0_err(trans->fs_info);
+ btrfs_abort_transaction(trans, -EINVAL);
+ return -EINVAL;
} else {
BUG();
}
@@ -1421,21 +1317,13 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
num_refs -= refs_to_drop;
if (num_refs == 0) {
- ret = btrfs_del_item(trans, fs_info->extent_root, path);
+ ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
*last_ref = 1;
} else {
if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- else {
- struct btrfs_extent_ref_v0 *ref0;
- ref0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref_v0);
- btrfs_set_ref_count_v0(leaf, ref0, num_refs);
- }
-#endif
btrfs_mark_buffer_dirty(leaf);
}
return ret;
@@ -1453,6 +1341,8 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+ BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
if (iref) {
/*
* If type is invalid, we should have bailed out earlier than
@@ -1475,13 +1365,6 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
- struct btrfs_extent_ref_v0 *ref0;
- ref0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref_v0);
- num_refs = btrfs_ref_count_v0(leaf, ref0);
-#endif
} else {
WARN_ON(1);
}
@@ -1489,12 +1372,11 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
}
static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
{
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = trans->fs_info->extent_root;
struct btrfs_key key;
int ret;
@@ -1510,20 +1392,10 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -ENOENT;
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (ret == -ENOENT && parent) {
- btrfs_release_path(path);
- key.type = BTRFS_EXTENT_REF_V0_KEY;
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret > 0)
- ret = -ENOENT;
- }
-#endif
return ret;
}
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
@@ -1540,7 +1412,7 @@ static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
key.offset = root_objectid;
}
- ret = btrfs_insert_empty_item(trans, fs_info->extent_root,
+ ret = btrfs_insert_empty_item(trans, trans->fs_info->extent_root,
path, &key, 0);
btrfs_release_path(path);
return ret;
@@ -1599,13 +1471,13 @@ static int find_next_key(struct btrfs_path *path, int level,
*/
static noinline_for_stack
int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
u64 bytenr, u64 num_bytes,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int insert)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_key key;
struct extent_buffer *leaf;
@@ -1635,8 +1507,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
extra_size = -1;
/*
- * Owner is our parent level, so we can just add one to get the level
- * for the block we are interested in.
+ * Owner is our level, so we can just add one to get the level for the
+ * block we are interested in.
*/
if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
key.type = BTRFS_METADATA_ITEM_KEY;
@@ -1684,23 +1556,12 @@ again:
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (item_size < sizeof(*ei)) {
- if (!insert) {
- err = -ENOENT;
- goto out;
- }
- ret = convert_extent_item_v0(trans, fs_info, path, owner,
- extra_size);
- if (ret < 0) {
- err = ret;
- goto out;
- }
- leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ if (unlikely(item_size < sizeof(*ei))) {
+ err = -EINVAL;
+ btrfs_print_v0_err(fs_info);
+ btrfs_abort_transaction(trans, err);
+ goto out;
}
-#endif
- BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
flags = btrfs_extent_flags(leaf, ei);
@@ -1727,7 +1588,7 @@ again:
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
if (type == BTRFS_REF_TYPE_INVALID) {
- err = -EINVAL;
+ err = -EUCLEAN;
goto out;
}
@@ -1863,7 +1724,6 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
}
static int lookup_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -1871,9 +1731,9 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
{
int ret;
- ret = lookup_inline_extent_backref(trans, fs_info, path, ref_ret,
- bytenr, num_bytes, parent,
- root_objectid, owner, offset, 0);
+ ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
+ num_bytes, parent, root_objectid,
+ owner, offset, 0);
if (ret != -ENOENT)
return ret;
@@ -1881,12 +1741,11 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
*ref_ret = NULL;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- ret = lookup_tree_block_ref(trans, fs_info, path, bytenr,
- parent, root_objectid);
+ ret = lookup_tree_block_ref(trans, path, bytenr, parent,
+ root_objectid);
} else {
- ret = lookup_extent_data_ref(trans, fs_info, path, bytenr,
- parent, root_objectid, owner,
- offset);
+ ret = lookup_extent_data_ref(trans, path, bytenr, parent,
+ root_objectid, owner, offset);
}
return ret;
}
@@ -1895,14 +1754,14 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
* helper to update/remove inline back ref
*/
static noinline_for_stack
-void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+void update_inline_extent_backref(struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_mod,
struct btrfs_delayed_extent_op *extent_op,
int *last_ref)
{
- struct extent_buffer *leaf;
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_extent_item *ei;
struct btrfs_extent_data_ref *dref = NULL;
struct btrfs_shared_data_ref *sref = NULL;
@@ -1913,7 +1772,6 @@ void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
int type;
u64 refs;
- leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
@@ -1965,7 +1823,6 @@ void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
static noinline_for_stack
int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner,
@@ -1975,15 +1832,15 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_extent_inline_ref *iref;
int ret;
- ret = lookup_inline_extent_backref(trans, fs_info, path, &iref,
- bytenr, num_bytes, parent,
- root_objectid, owner, offset, 1);
+ ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
+ num_bytes, parent, root_objectid,
+ owner, offset, 1);
if (ret == 0) {
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
- update_inline_extent_backref(fs_info, path, iref,
- refs_to_add, extent_op, NULL);
+ update_inline_extent_backref(path, iref, refs_to_add,
+ extent_op, NULL);
} else if (ret == -ENOENT) {
- setup_inline_extent_backref(fs_info, path, iref, parent,
+ setup_inline_extent_backref(trans->fs_info, path, iref, parent,
root_objectid, owner, offset,
refs_to_add, extent_op);
ret = 0;
@@ -1992,7 +1849,6 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
}
static int insert_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add)
@@ -2000,18 +1856,17 @@ static int insert_extent_backref(struct btrfs_trans_handle *trans,
int ret;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
BUG_ON(refs_to_add != 1);
- ret = insert_tree_block_ref(trans, fs_info, path, bytenr,
- parent, root_objectid);
+ ret = insert_tree_block_ref(trans, path, bytenr, parent,
+ root_objectid);
} else {
- ret = insert_extent_data_ref(trans, fs_info, path, bytenr,
- parent, root_objectid,
- owner, offset, refs_to_add);
+ ret = insert_extent_data_ref(trans, path, bytenr, parent,
+ root_objectid, owner, offset,
+ refs_to_add);
}
return ret;
}
static int remove_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_drop, int is_data, int *last_ref)
@@ -2020,14 +1875,14 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
BUG_ON(!is_data && refs_to_drop != 1);
if (iref) {
- update_inline_extent_backref(fs_info, path, iref,
- -refs_to_drop, NULL, last_ref);
+ update_inline_extent_backref(path, iref, -refs_to_drop, NULL,
+ last_ref);
} else if (is_data) {
- ret = remove_extent_data_ref(trans, fs_info, path, refs_to_drop,
+ ret = remove_extent_data_ref(trans, path, refs_to_drop,
last_ref);
} else {
*last_ref = 1;
- ret = btrfs_del_item(trans, fs_info->extent_root, path);
+ ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
}
return ret;
}
@@ -2185,13 +2040,13 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
owner, offset, BTRFS_ADD_DELAYED_REF);
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+ ret = btrfs_add_delayed_tree_ref(trans, bytenr,
num_bytes, parent,
root_objectid, (int)owner,
BTRFS_ADD_DELAYED_REF, NULL,
&old_ref_mod, &new_ref_mod);
} else {
- ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+ ret = btrfs_add_delayed_data_ref(trans, bytenr,
num_bytes, parent,
root_objectid, owner, offset,
0, BTRFS_ADD_DELAYED_REF,
@@ -2207,8 +2062,41 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
return ret;
}
+/*
+ * __btrfs_inc_extent_ref - insert backreference for a given extent
+ *
+ * @trans: Handle of transaction
+ *
+ * @node: The delayed ref node used to get the bytenr/length for
+ * extent whose references are incremented.
+ *
+ * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
+ * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
+ * bytenr of the parent block. Since new extents are always
+ * created with indirect references, this will only be the case
+ * when relocating a shared extent. In that case, root_objectid
+ * will be BTRFS_TREE_RELOC_OBJECTID. Otheriwse, parent must
+ * be 0
+ *
+ * @root_objectid: The id of the root where this modification has originated,
+ * this can be either one of the well-known metadata trees or
+ * the subvolume id which references this extent.
+ *
+ * @owner: For data extents it is the inode number of the owning file.
+ * For metadata extents this parameter holds the level in the
+ * tree of the extent.
+ *
+ * @offset: For metadata extents the offset is ignored and is currently
+ * always passed as 0. For data extents it is the fileoffset
+ * this extent belongs to.
+ *
+ * @refs_to_add Number of references to add
+ *
+ * @extent_op Pointer to a structure, holding information necessary when
+ * updating a tree block's flags
+ *
+ */
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add,
@@ -2230,10 +2118,9 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
path->reada = READA_FORWARD;
path->leave_spinning = 1;
/* this will setup the path even if it fails to insert the back ref */
- ret = insert_inline_extent_backref(trans, fs_info, path, bytenr,
- num_bytes, parent, root_objectid,
- owner, offset,
- refs_to_add, extent_op);
+ ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
+ parent, root_objectid, owner,
+ offset, refs_to_add, extent_op);
if ((ret < 0 && ret != -EAGAIN) || !ret)
goto out;
@@ -2256,8 +2143,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
path->reada = READA_FORWARD;
path->leave_spinning = 1;
/* now insert the actual backref */
- ret = insert_extent_backref(trans, fs_info, path, bytenr, parent,
- root_objectid, owner, offset, refs_to_add);
+ ret = insert_extent_backref(trans, path, bytenr, parent, root_objectid,
+ owner, offset, refs_to_add);
if (ret)
btrfs_abort_transaction(trans, ret);
out:
@@ -2266,7 +2153,6 @@ out:
}
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
@@ -2283,7 +2169,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
ref = btrfs_delayed_node_to_data_ref(node);
- trace_run_delayed_data_ref(fs_info, node, ref, node->action);
+ trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
parent = ref->parent;
@@ -2292,17 +2178,16 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
if (extent_op)
flags |= extent_op->flags_to_set;
- ret = alloc_reserved_file_extent(trans, fs_info,
- parent, ref_root, flags,
- ref->objectid, ref->offset,
- &ins, node->ref_mod);
+ ret = alloc_reserved_file_extent(trans, parent, ref_root,
+ flags, ref->objectid,
+ ref->offset, &ins,
+ node->ref_mod);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
- ref_root, ref->objectid,
- ref->offset, node->ref_mod,
- extent_op);
+ ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
+ ref->objectid, ref->offset,
+ node->ref_mod, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, fs_info, node, parent,
+ ret = __btrfs_free_extent(trans, node, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
@@ -2331,10 +2216,10 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
}
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_extent_op *extent_op)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_key key;
struct btrfs_path *path;
struct btrfs_extent_item *ei;
@@ -2400,18 +2285,14 @@ again:
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (item_size < sizeof(*ei)) {
- ret = convert_extent_item_v0(trans, fs_info, path, (u64)-1, 0);
- if (ret < 0) {
- err = ret;
- goto out;
- }
- leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+
+ if (unlikely(item_size < sizeof(*ei))) {
+ err = -EINVAL;
+ btrfs_print_v0_err(fs_info);
+ btrfs_abort_transaction(trans, err);
+ goto out;
}
-#endif
- BUG_ON(item_size < sizeof(*ei));
+
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
__run_delayed_extent_op(extent_op, leaf, ei);
@@ -2422,7 +2303,6 @@ out:
}
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
@@ -2433,14 +2313,14 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
u64 ref_root = 0;
ref = btrfs_delayed_node_to_tree_ref(node);
- trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
+ trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
parent = ref->parent;
ref_root = ref->root;
if (node->ref_mod != 1) {
- btrfs_err(fs_info,
+ btrfs_err(trans->fs_info,
"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
@@ -2450,13 +2330,10 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, node, extent_op);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, fs_info, node,
- parent, ref_root,
- ref->level, 0, 1,
- extent_op);
+ ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
+ ref->level, 0, 1, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, fs_info, node,
- parent, ref_root,
+ ret = __btrfs_free_extent(trans, node, parent, ref_root,
ref->level, 0, 1, extent_op);
} else {
BUG();
@@ -2466,7 +2343,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
/* helper function to actually process a single delayed ref entry */
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
@@ -2475,18 +2351,18 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
if (trans->aborted) {
if (insert_reserved)
- btrfs_pin_extent(fs_info, node->bytenr,
+ btrfs_pin_extent(trans->fs_info, node->bytenr,
node->num_bytes, 1);
return 0;
}
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
- ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
+ ret = run_delayed_tree_ref(trans, node, extent_op,
insert_reserved);
else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
- ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
+ ret = run_delayed_data_ref(trans, node, extent_op,
insert_reserved);
else
BUG();
@@ -2528,7 +2404,6 @@ static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_ref
}
static int cleanup_extent_op(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
@@ -2542,21 +2417,22 @@ static int cleanup_extent_op(struct btrfs_trans_handle *trans,
return 0;
}
spin_unlock(&head->lock);
- ret = run_delayed_extent_op(trans, fs_info, head, extent_op);
+ ret = run_delayed_extent_op(trans, head, extent_op);
btrfs_free_delayed_extent_op(extent_op);
return ret ? ret : 1;
}
static int cleanup_ref_head(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head)
{
+
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
delayed_refs = &trans->transaction->delayed_refs;
- ret = cleanup_extent_op(trans, fs_info, head);
+ ret = cleanup_extent_op(trans, head);
if (ret < 0) {
unselect_delayed_ref_head(delayed_refs, head);
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
@@ -2598,8 +2474,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
flags = BTRFS_BLOCK_GROUP_METADATA;
space_info = __find_space_info(fs_info, flags);
ASSERT(space_info);
- percpu_counter_add(&space_info->total_bytes_pinned,
- -head->num_bytes);
+ percpu_counter_add_batch(&space_info->total_bytes_pinned,
+ -head->num_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
if (head->is_data) {
spin_lock(&delayed_refs->lock);
@@ -2705,7 +2582,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
* up and move on to the next ref_head.
*/
if (!ref) {
- ret = cleanup_ref_head(trans, fs_info, locked_ref);
+ ret = cleanup_ref_head(trans, locked_ref);
if (ret > 0 ) {
/* We dropped our lock, we need to loop. */
ret = 0;
@@ -2752,7 +2629,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
locked_ref->extent_op = NULL;
spin_unlock(&locked_ref->lock);
- ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
+ ret = run_one_delayed_ref(trans, ref, extent_op,
must_insert_reserved);
btrfs_free_delayed_extent_op(extent_op);
@@ -3227,12 +3104,6 @@ static noinline int check_committed_ref(struct btrfs_root *root,
ret = 1;
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (item_size < sizeof(*ei)) {
- WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
- goto out;
- }
-#endif
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
if (item_size != sizeof(*ei) +
@@ -4060,11 +3931,7 @@ static void update_space_info(struct btrfs_fs_info *info, u64 flags,
struct btrfs_space_info *found;
int factor;
- if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))
- factor = 2;
- else
- factor = 1;
+ factor = btrfs_bg_type_to_factor(flags);
found = __find_space_info(info, flags);
ASSERT(found);
@@ -4289,7 +4156,7 @@ again:
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = do_chunk_alloc(trans, fs_info, alloc_target,
+ ret = do_chunk_alloc(trans, alloc_target,
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans);
if (ret < 0) {
@@ -4309,9 +4176,10 @@ again:
* allocation, and no removed chunk in current transaction,
* don't bother committing the transaction.
*/
- have_pinned_space = percpu_counter_compare(
+ have_pinned_space = __percpu_counter_compare(
&data_sinfo->total_bytes_pinned,
- used + bytes - data_sinfo->total_bytes);
+ used + bytes - data_sinfo->total_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
spin_unlock(&data_sinfo->lock);
/* commit the current transaction and try again */
@@ -4358,7 +4226,7 @@ commit_trans:
data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
- return ret;
+ return 0;
}
int btrfs_check_data_free_space(struct inode *inode,
@@ -4511,9 +4379,9 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
* for allocating a chunk, otherwise if it's false, reserve space necessary for
* removing a chunk.
*/
-void check_system_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type)
+void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_space_info *info;
u64 left;
u64 thresh;
@@ -4552,7 +4420,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
* the paths we visit in the chunk tree (they were already COWed
* or created in the current transaction for example).
*/
- ret = btrfs_alloc_chunk(trans, fs_info, flags);
+ ret = btrfs_alloc_chunk(trans, flags);
}
if (!ret) {
@@ -4573,11 +4441,13 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
*/
-static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 flags, int force)
+static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+ int force)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_space_info *space_info;
- int wait_for_alloc = 0;
+ bool wait_for_alloc = false;
+ bool should_alloc = false;
int ret = 0;
/* Don't re-enter if we're already allocating a chunk */
@@ -4587,45 +4457,44 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info = __find_space_info(fs_info, flags);
ASSERT(space_info);
-again:
- spin_lock(&space_info->lock);
- if (force < space_info->force_alloc)
- force = space_info->force_alloc;
- if (space_info->full) {
- if (should_alloc_chunk(fs_info, space_info, force))
- ret = -ENOSPC;
- else
- ret = 0;
- spin_unlock(&space_info->lock);
- return ret;
- }
-
- if (!should_alloc_chunk(fs_info, space_info, force)) {
- spin_unlock(&space_info->lock);
- return 0;
- } else if (space_info->chunk_alloc) {
- wait_for_alloc = 1;
- } else {
- space_info->chunk_alloc = 1;
- }
-
- spin_unlock(&space_info->lock);
-
- mutex_lock(&fs_info->chunk_mutex);
+ do {
+ spin_lock(&space_info->lock);
+ if (force < space_info->force_alloc)
+ force = space_info->force_alloc;
+ should_alloc = should_alloc_chunk(fs_info, space_info, force);
+ if (space_info->full) {
+ /* No more free physical space */
+ if (should_alloc)
+ ret = -ENOSPC;
+ else
+ ret = 0;
+ spin_unlock(&space_info->lock);
+ return ret;
+ } else if (!should_alloc) {
+ spin_unlock(&space_info->lock);
+ return 0;
+ } else if (space_info->chunk_alloc) {
+ /*
+ * Someone is already allocating, so we need to block
+ * until this someone is finished and then loop to
+ * recheck if we should continue with our allocation
+ * attempt.
+ */
+ wait_for_alloc = true;
+ spin_unlock(&space_info->lock);
+ mutex_lock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->chunk_mutex);
+ } else {
+ /* Proceed with allocation */
+ space_info->chunk_alloc = 1;
+ wait_for_alloc = false;
+ spin_unlock(&space_info->lock);
+ }
- /*
- * The chunk_mutex is held throughout the entirety of a chunk
- * allocation, so once we've acquired the chunk_mutex we know that the
- * other guy is done and we need to recheck and see if we should
- * allocate.
- */
- if (wait_for_alloc) {
- mutex_unlock(&fs_info->chunk_mutex);
- wait_for_alloc = 0;
cond_resched();
- goto again;
- }
+ } while (wait_for_alloc);
+ mutex_lock(&fs_info->chunk_mutex);
trans->allocating_chunk = true;
/*
@@ -4651,9 +4520,9 @@ again:
* Check if we have enough space in SYSTEM chunk because we may need
* to update devices.
*/
- check_system_chunk(trans, fs_info, flags);
+ check_system_chunk(trans, flags);
- ret = btrfs_alloc_chunk(trans, fs_info, flags);
+ ret = btrfs_alloc_chunk(trans, flags);
trans->allocating_chunk = false;
spin_lock(&space_info->lock);
@@ -4703,6 +4572,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
u64 space_size;
u64 avail;
u64 used;
+ int factor;
/* Don't overcommit when in mixed mode. */
if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
@@ -4737,10 +4607,8 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
* doesn't include the parity drive, so we don't have to
* change the math
*/
- if (profile & (BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))
- avail >>= 1;
+ factor = btrfs_bg_type_to_factor(profile);
+ avail = div_u64(avail, factor);
/*
* If we aren't flushing all things, let us overcommit up to
@@ -4912,8 +4780,9 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
return 0;
/* See if there is enough pinned space to make this reservation */
- if (percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes) >= 0)
+ if (__percpu_counter_compare(&space_info->total_bytes_pinned,
+ bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
goto commit;
/*
@@ -4930,8 +4799,9 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
bytes -= delayed_rsv->size;
spin_unlock(&delayed_rsv->lock);
- if (percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes) < 0) {
+ if (__percpu_counter_compare(&space_info->total_bytes_pinned,
+ bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) {
return -ENOSPC;
}
@@ -4984,7 +4854,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
ret = PTR_ERR(trans);
break;
}
- ret = do_chunk_alloc(trans, fs_info,
+ ret = do_chunk_alloc(trans,
btrfs_metadata_alloc_profile(fs_info),
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans);
@@ -5659,11 +5529,6 @@ void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
kfree(rsv);
}
-void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
-{
- kfree(rsv);
-}
-
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush)
@@ -6019,7 +5884,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned nr_extents;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
@@ -6092,7 +5957,7 @@ out_fail:
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
bool qgroup_free)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
spin_lock(&inode->lock);
@@ -6121,7 +5986,7 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
bool qgroup_free)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned num_extents;
spin_lock(&inode->lock);
@@ -6219,12 +6084,8 @@ static int update_block_group(struct btrfs_trans_handle *trans,
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache)
return -ENOENT;
- if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))
- factor = 2;
- else
- factor = 1;
+ factor = btrfs_bg_type_to_factor(cache->flags);
+
/*
* If this block group has free space cache written out, we
* need to make sure to load it if we are removing space. This
@@ -6268,8 +6129,9 @@ static int update_block_group(struct btrfs_trans_handle *trans,
trace_btrfs_space_reservation(info, "pinned",
cache->space_info->flags,
num_bytes, 1);
- percpu_counter_add(&cache->space_info->total_bytes_pinned,
- num_bytes);
+ percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
+ num_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
set_extent_dirty(info->pinned_extents,
bytenr, bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL);
@@ -6279,7 +6141,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
if (list_empty(&cache->dirty_list)) {
list_add_tail(&cache->dirty_list,
&trans->transaction->dirty_bgs);
- trans->transaction->num_dirty_bgs++;
+ trans->transaction->num_dirty_bgs++;
btrfs_get_block_group(cache);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
@@ -6290,16 +6152,8 @@ static int update_block_group(struct btrfs_trans_handle *trans,
* dirty list to avoid races between cleaner kthread and space
* cache writeout.
*/
- if (!alloc && old_val == 0) {
- spin_lock(&info->unused_bgs_lock);
- if (list_empty(&cache->bg_list)) {
- btrfs_get_block_group(cache);
- trace_btrfs_add_unused_block_group(cache);
- list_add_tail(&cache->bg_list,
- &info->unused_bgs);
- }
- spin_unlock(&info->unused_bgs_lock);
- }
+ if (!alloc && old_val == 0)
+ btrfs_mark_bg_unused(cache);
btrfs_put_block_group(cache);
total -= num_bytes;
@@ -6347,7 +6201,8 @@ static int pin_down_extent(struct btrfs_fs_info *fs_info,
trace_btrfs_space_reservation(fs_info, "pinned",
cache->space_info->flags, num_bytes, 1);
- percpu_counter_add(&cache->space_info->total_bytes_pinned, num_bytes);
+ percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
+ num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
set_extent_dirty(fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
return 0;
@@ -6711,7 +6566,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
trace_btrfs_space_reservation(fs_info, "pinned",
space_info->flags, len, 0);
space_info->max_extent_size = 0;
- percpu_counter_add(&space_info->total_bytes_pinned, -len);
+ percpu_counter_add_batch(&space_info->total_bytes_pinned,
+ -len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
if (cache->ro) {
space_info->bytes_readonly += len;
readonly = true;
@@ -6815,12 +6671,12 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
}
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *info,
- struct btrfs_delayed_ref_node *node, u64 parent,
- u64 root_objectid, u64 owner_objectid,
- u64 owner_offset, int refs_to_drop,
- struct btrfs_delayed_extent_op *extent_op)
+ struct btrfs_delayed_ref_node *node, u64 parent,
+ u64 root_objectid, u64 owner_objectid,
+ u64 owner_offset, int refs_to_drop,
+ struct btrfs_delayed_extent_op *extent_op)
{
+ struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_key key;
struct btrfs_path *path;
struct btrfs_root *extent_root = info->extent_root;
@@ -6852,9 +6708,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
if (is_data)
skinny_metadata = false;
- ret = lookup_extent_backref(trans, info, path, &iref,
- bytenr, num_bytes, parent,
- root_objectid, owner_objectid,
+ ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
+ parent, root_objectid, owner_objectid,
owner_offset);
if (ret == 0) {
extent_slot = path->slots[0];
@@ -6877,14 +6732,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
break;
extent_slot--;
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
- if (found_extent && item_size < sizeof(*ei))
- found_extent = 0;
-#endif
+
if (!found_extent) {
BUG_ON(iref);
- ret = remove_extent_backref(trans, info, path, NULL,
+ ret = remove_extent_backref(trans, path, NULL,
refs_to_drop,
is_data, &last_ref);
if (ret) {
@@ -6957,42 +6808,12 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, extent_slot);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (item_size < sizeof(*ei)) {
- BUG_ON(found_extent || extent_slot != path->slots[0]);
- ret = convert_extent_item_v0(trans, info, path, owner_objectid,
- 0);
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
-
- btrfs_release_path(path);
- path->leave_spinning = 1;
-
- key.objectid = bytenr;
- key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = num_bytes;
-
- ret = btrfs_search_slot(trans, extent_root, &key, path,
- -1, 1);
- if (ret) {
- btrfs_err(info,
- "umm, got %d back from search, was looking for %llu",
- ret, bytenr);
- btrfs_print_leaf(path->nodes[0]);
- }
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
-
- extent_slot = path->slots[0];
- leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, extent_slot);
+ if (unlikely(item_size < sizeof(*ei))) {
+ ret = -EINVAL;
+ btrfs_print_v0_err(info);
+ btrfs_abort_transaction(trans, ret);
+ goto out;
}
-#endif
- BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, extent_slot,
struct btrfs_extent_item);
if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
@@ -7028,9 +6849,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
}
if (found_extent) {
- ret = remove_extent_backref(trans, info, path,
- iref, refs_to_drop,
- is_data, &last_ref);
+ ret = remove_extent_backref(trans, path, iref,
+ refs_to_drop, is_data,
+ &last_ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -7172,7 +6993,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
root->root_key.objectid,
btrfs_header_level(buf), 0,
BTRFS_DROP_DELAYED_REF);
- ret = btrfs_add_delayed_tree_ref(fs_info, trans, buf->start,
+ ret = btrfs_add_delayed_tree_ref(trans, buf->start,
buf->len, parent,
root->root_key.objectid,
btrfs_header_level(buf),
@@ -7251,13 +7072,13 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
old_ref_mod = new_ref_mod = 0;
ret = 0;
} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+ ret = btrfs_add_delayed_tree_ref(trans, bytenr,
num_bytes, parent,
root_objectid, (int)owner,
BTRFS_DROP_DELAYED_REF, NULL,
&old_ref_mod, &new_ref_mod);
} else {
- ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+ ret = btrfs_add_delayed_data_ref(trans, bytenr,
num_bytes, parent,
root_objectid, owner, offset,
0, BTRFS_DROP_DELAYED_REF,
@@ -7534,7 +7355,7 @@ search:
* for the proper type.
*/
if (!block_group_bits(block_group, flags)) {
- u64 extra = BTRFS_BLOCK_GROUP_DUP |
+ u64 extra = BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6 |
@@ -7738,7 +7559,7 @@ unclustered_alloc:
goto loop;
}
checks:
- search_start = ALIGN(offset, fs_info->stripesize);
+ search_start = round_up(offset, fs_info->stripesize);
/* move on to the next group */
if (search_start + num_bytes >
@@ -7750,7 +7571,6 @@ checks:
if (offset < search_start)
btrfs_add_free_space(block_group, offset,
search_start - offset);
- BUG_ON(offset > search_start);
ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
num_bytes, delalloc);
@@ -7826,8 +7646,7 @@ loop:
goto out;
}
- ret = do_chunk_alloc(trans, fs_info, flags,
- CHUNK_ALLOC_FORCE);
+ ret = do_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
/*
* If we can't allocate a new chunk we've already looped
@@ -8053,11 +7872,11 @@ int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
}
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_extent_item *extent_item;
struct btrfs_extent_inline_ref *iref;
@@ -8231,7 +8050,6 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
@@ -8240,7 +8058,7 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
root->root_key.objectid, owner, offset,
BTRFS_ADD_DELAYED_EXTENT);
- ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
+ ret = btrfs_add_delayed_data_ref(trans, ins->objectid,
ins->offset, 0,
root->root_key.objectid, owner,
offset, ram_bytes,
@@ -8254,10 +8072,10 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
* space cache bits as well
*/
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
@@ -8285,15 +8103,15 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
- ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
- 0, owner, offset, ins, 1);
+ ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
+ offset, ins, 1);
btrfs_put_block_group(block_group);
return ret;
}
static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- u64 bytenr, int level)
+ u64 bytenr, int level, u64 owner)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
@@ -8302,7 +8120,6 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (IS_ERR(buf))
return buf;
- btrfs_set_header_generation(buf, trans->transid);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
clean_tree_block(fs_info, buf);
@@ -8311,6 +8128,14 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_lock_blocking(buf);
set_extent_buffer_uptodate(buf);
+ memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
+ btrfs_set_header_level(buf, level);
+ btrfs_set_header_bytenr(buf, buf->start);
+ btrfs_set_header_generation(buf, trans->transid);
+ btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
+ btrfs_set_header_owner(buf, owner);
+ write_extent_buffer_fsid(buf, fs_info->fsid);
+ write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
buf->log_index = root->log_transid % 2;
/*
@@ -8419,7 +8244,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (btrfs_is_testing(fs_info)) {
buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
- level);
+ level, root_objectid);
if (!IS_ERR(buf))
root->alloc_bytenr += blocksize;
return buf;
@@ -8435,7 +8260,8 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
if (ret)
goto out_unuse;
- buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
+ buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
+ root_objectid);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_free_reserved;
@@ -8467,7 +8293,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
root_objectid, level, 0,
BTRFS_ADD_DELAYED_EXTENT);
- ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid,
+ ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
ins.offset, parent,
root_objectid, level,
BTRFS_ADD_DELAYED_EXTENT,
@@ -8499,7 +8325,6 @@ struct walk_control {
int keep_locks;
int reada_slot;
int reada_count;
- int for_reloc;
};
#define DROP_REFERENCE 1
@@ -8819,7 +8644,7 @@ skip:
}
if (need_account) {
- ret = btrfs_qgroup_trace_subtree(trans, root, next,
+ ret = btrfs_qgroup_trace_subtree(trans, next,
generation, level - 1);
if (ret) {
btrfs_err_rl(fs_info,
@@ -8919,7 +8744,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
else
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
+ ret = btrfs_qgroup_trace_leaf_items(trans, eb);
if (ret) {
btrfs_err_rl(fs_info,
"error %d accounting leaf items. Quota is out of sync, rescan required.",
@@ -9136,7 +8961,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
wc->stage = DROP_REFERENCE;
wc->update_ref = update_ref;
wc->keep_locks = 0;
- wc->for_reloc = for_reloc;
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
@@ -9199,7 +9023,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
if (err)
goto out_end_trans;
- ret = btrfs_del_root(trans, fs_info, &root->root_key);
+ ret = btrfs_del_root(trans, &root->root_key);
if (ret) {
btrfs_abort_transaction(trans, ret);
err = ret;
@@ -9302,7 +9126,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
wc->stage = DROP_REFERENCE;
wc->update_ref = 0;
wc->keep_locks = 1;
- wc->for_reloc = 1;
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
@@ -9417,10 +9240,10 @@ out:
return ret;
}
-int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache)
+int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_trans_handle *trans;
u64 alloc_flags;
int ret;
@@ -9454,7 +9277,7 @@ again:
*/
alloc_flags = update_block_group_flags(fs_info, cache->flags);
if (alloc_flags != cache->flags) {
- ret = do_chunk_alloc(trans, fs_info, alloc_flags,
+ ret = do_chunk_alloc(trans, alloc_flags,
CHUNK_ALLOC_FORCE);
/*
* ENOSPC is allowed here, we may have enough space
@@ -9471,8 +9294,7 @@ again:
if (!ret)
goto out;
alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
- ret = do_chunk_alloc(trans, fs_info, alloc_flags,
- CHUNK_ALLOC_FORCE);
+ ret = do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
ret = inc_block_group_ro(cache, 0);
@@ -9480,7 +9302,7 @@ out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = update_block_group_flags(fs_info, cache->flags);
mutex_lock(&fs_info->chunk_mutex);
- check_system_chunk(trans, fs_info, alloc_flags);
+ check_system_chunk(trans, alloc_flags);
mutex_unlock(&fs_info->chunk_mutex);
}
mutex_unlock(&fs_info->ro_block_group_mutex);
@@ -9489,12 +9311,11 @@ out:
return ret;
}
-int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type)
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
{
- u64 alloc_flags = get_alloc_profile(fs_info, type);
+ u64 alloc_flags = get_alloc_profile(trans->fs_info, type);
- return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
+ return do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
}
/*
@@ -9520,13 +9341,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
continue;
}
- if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10 |
- BTRFS_BLOCK_GROUP_DUP))
- factor = 2;
- else
- factor = 1;
-
+ factor = btrfs_bg_type_to_factor(block_group->flags);
free_bytes += (block_group->key.offset -
btrfs_block_group_used(&block_group->item)) *
factor;
@@ -9717,6 +9532,8 @@ static int find_first_block_group(struct btrfs_fs_info *fs_info,
int ret = 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
+ struct btrfs_block_group_item bg;
+ u64 flags;
int slot;
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
@@ -9751,8 +9568,32 @@ static int find_first_block_group(struct btrfs_fs_info *fs_info,
"logical %llu len %llu found bg but no related chunk",
found_key.objectid, found_key.offset);
ret = -ENOENT;
+ } else if (em->start != found_key.objectid ||
+ em->len != found_key.offset) {
+ btrfs_err(fs_info,
+ "block group %llu len %llu mismatch with chunk %llu len %llu",
+ found_key.objectid, found_key.offset,
+ em->start, em->len);
+ ret = -EUCLEAN;
} else {
- ret = 0;
+ read_extent_buffer(leaf, &bg,
+ btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bg));
+ flags = btrfs_block_group_flags(&bg) &
+ BTRFS_BLOCK_GROUP_TYPE_MASK;
+
+ if (flags != (em->map_lookup->type &
+ BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(fs_info,
+"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
+ found_key.objectid,
+ found_key.offset, flags,
+ (BTRFS_BLOCK_GROUP_TYPE_MASK &
+ em->map_lookup->type));
+ ret = -EUCLEAN;
+ } else {
+ ret = 0;
+ }
}
free_extent_map(em);
goto out;
@@ -9847,7 +9688,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
*/
if (block_group->cached == BTRFS_CACHE_NO ||
block_group->cached == BTRFS_CACHE_ERROR)
- free_excluded_extents(info, block_group);
+ free_excluded_extents(block_group);
btrfs_remove_free_space_cache(block_group);
ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
@@ -10003,6 +9844,62 @@ btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
return cache;
}
+
+/*
+ * Iterate all chunks and verify that each of them has the corresponding block
+ * group
+ */
+static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+ struct extent_map *em;
+ struct btrfs_block_group_cache *bg;
+ u64 start = 0;
+ int ret = 0;
+
+ while (1) {
+ read_lock(&map_tree->map_tree.lock);
+ /*
+ * lookup_extent_mapping will return the first extent map
+ * intersecting the range, so setting @len to 1 is enough to
+ * get the first chunk.
+ */
+ em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
+ read_unlock(&map_tree->map_tree.lock);
+ if (!em)
+ break;
+
+ bg = btrfs_lookup_block_group(fs_info, em->start);
+ if (!bg) {
+ btrfs_err(fs_info,
+ "chunk start=%llu len=%llu doesn't have corresponding block group",
+ em->start, em->len);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ break;
+ }
+ if (bg->key.objectid != em->start ||
+ bg->key.offset != em->len ||
+ (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
+ (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(fs_info,
+"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
+ em->start, em->len,
+ em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
+ bg->key.objectid, bg->key.offset,
+ bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ break;
+ }
+ start = em->start + em->len;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ }
+ return ret;
+}
+
int btrfs_read_block_groups(struct btrfs_fs_info *info)
{
struct btrfs_path *path;
@@ -10089,13 +9986,13 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
* info has super bytes accounted for, otherwise we'll think
* we have more space than we actually do.
*/
- ret = exclude_super_stripes(info, cache);
+ ret = exclude_super_stripes(cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
- free_excluded_extents(info, cache);
+ free_excluded_extents(cache);
btrfs_put_block_group(cache);
goto error;
}
@@ -10110,14 +10007,14 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- free_excluded_extents(info, cache);
+ free_excluded_extents(cache);
} else if (btrfs_block_group_used(&cache->item) == 0) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
add_new_free_space(cache, found_key.objectid,
found_key.objectid +
found_key.offset);
- free_excluded_extents(info, cache);
+ free_excluded_extents(cache);
}
ret = btrfs_add_block_group_cache(info, cache);
@@ -10140,15 +10037,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
if (btrfs_chunk_readonly(info, cache->key.objectid)) {
inc_block_group_ro(cache, 1);
} else if (btrfs_block_group_used(&cache->item) == 0) {
- spin_lock(&info->unused_bgs_lock);
- /* Should always be true but just in case. */
- if (list_empty(&cache->bg_list)) {
- btrfs_get_block_group(cache);
- trace_btrfs_add_unused_block_group(cache);
- list_add_tail(&cache->bg_list,
- &info->unused_bgs);
- }
- spin_unlock(&info->unused_bgs_lock);
+ ASSERT(list_empty(&cache->bg_list));
+ btrfs_mark_bg_unused(cache);
}
}
@@ -10176,7 +10066,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
btrfs_add_raid_kobjects(info);
init_global_block_rsv(info);
- ret = 0;
+ ret = check_chunk_block_group_mappings(info);
error:
btrfs_free_path(path);
return ret;
@@ -10206,8 +10096,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
sizeof(item));
if (ret)
btrfs_abort_transaction(trans, ret);
- ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid,
- key.offset);
+ ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
if (ret)
btrfs_abort_transaction(trans, ret);
add_block_group_free_space(trans, block_group);
@@ -10218,10 +10107,10 @@ next:
trans->can_flush_pending_bgs = can_flush_pending_bgs;
}
-int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytes_used,
+int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
u64 type, u64 chunk_offset, u64 size)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group_cache *cache;
int ret;
@@ -10240,20 +10129,20 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
cache->needs_free_space = 1;
- ret = exclude_super_stripes(fs_info, cache);
+ ret = exclude_super_stripes(cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
- free_excluded_extents(fs_info, cache);
+ free_excluded_extents(cache);
btrfs_put_block_group(cache);
return ret;
}
add_new_free_space(cache, chunk_offset, chunk_offset + size);
- free_excluded_extents(fs_info, cache);
+ free_excluded_extents(cache);
#ifdef CONFIG_BTRFS_DEBUG
if (btrfs_should_fragment_free_space(cache)) {
@@ -10311,9 +10200,9 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
}
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 group_start,
- struct extent_map *em)
+ u64 group_start, struct extent_map *em)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_path *path;
struct btrfs_block_group_cache *block_group;
@@ -10337,18 +10226,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* Free the reserved super bytes from this block group before
* remove it.
*/
- free_excluded_extents(fs_info, block_group);
+ free_excluded_extents(block_group);
btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
block_group->key.offset);
memcpy(&key, &block_group->key, sizeof(key));
index = btrfs_bg_flags_to_raid_index(block_group->flags);
- if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))
- factor = 2;
- else
- factor = 1;
+ factor = btrfs_bg_type_to_factor(block_group->flags);
/* make sure this block group isn't part of an allocation cluster */
cluster = &fs_info->data_alloc_cluster;
@@ -10687,7 +10571,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
/* Don't want to race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
spin_lock(&block_group->lock);
- if (block_group->reserved ||
+ if (block_group->reserved || block_group->pinned ||
btrfs_block_group_used(&block_group->item) ||
block_group->ro ||
list_is_singular(&block_group->list)) {
@@ -10764,8 +10648,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
space_info->bytes_pinned -= block_group->pinned;
space_info->bytes_readonly += block_group->pinned;
- percpu_counter_add(&space_info->total_bytes_pinned,
- -block_group->pinned);
+ percpu_counter_add_batch(&space_info->total_bytes_pinned,
+ -block_group->pinned,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
block_group->pinned = 0;
spin_unlock(&block_group->lock);
@@ -10782,8 +10667,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* Btrfs_remove_chunk will abort the transaction if things go
* horribly wrong.
*/
- ret = btrfs_remove_chunk(trans, fs_info,
- block_group->key.objectid);
+ ret = btrfs_remove_chunk(trans, block_group->key.objectid);
if (ret) {
if (trimming)
@@ -11066,3 +10950,16 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
!atomic_read(&root->will_be_snapshotted));
}
}
+
+void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+ spin_lock(&fs_info->unused_bgs_lock);
+ if (list_empty(&bg->bg_list)) {
+ btrfs_get_block_group(bg);
+ trace_btrfs_add_unused_block_group(bg);
+ list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b3e45714d28f..628f1aef34b0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -140,14 +140,6 @@ static int add_extent_changeset(struct extent_state *state, unsigned bits,
static void flush_write_bio(struct extent_page_data *epd);
-static inline struct btrfs_fs_info *
-tree_fs_info(struct extent_io_tree *tree)
-{
- if (tree->ops)
- return tree->ops->tree_fs_info(tree->private_data);
- return NULL;
-}
-
int __init extent_io_init(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
@@ -564,8 +556,10 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
{
- btrfs_panic(tree_fs_info(tree), err,
- "Locking error: Extent tree was modified by another thread while locked.");
+ struct inode *inode = tree->private_data;
+
+ btrfs_panic(btrfs_sb(inode->i_sb), err,
+ "locking error: extent tree was modified by another thread while locked");
}
/*
@@ -1386,14 +1380,6 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
}
}
-/*
- * helper function to set both pages and extents in the tree writeback
- */
-static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
-{
- tree->ops->set_range_writeback(tree->private_data, start, end);
-}
-
/* find the first state struct with 'bits' set after 'start', and
* return it. tree->lock must be held. NULL will returned if
* nothing was found after 'start'
@@ -2059,7 +2045,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int mirror_num)
{
u64 start = eb->start;
- unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
+ int i, num_pages = num_extent_pages(eb);
int ret = 0;
if (sb_rdonly(fs_info->sb))
@@ -2398,7 +2384,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
start - page_offset(page),
(int)phy_offset, failed_bio->bi_end_io,
NULL);
- bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
+ bio->bi_opf = REQ_OP_READ | read_mode;
btrfs_debug(btrfs_sb(inode->i_sb),
"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
@@ -2790,8 +2776,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
else
contig = bio_end_sector(bio) == sector;
- if (tree->ops && tree->ops->merge_bio_hook(page, offset,
- page_size, bio, bio_flags))
+ if (tree->ops && btrfs_merge_bio_hook(page, offset, page_size,
+ bio, bio_flags))
can_merge = false;
if (prev_bio_flags != bio_flags || !contig || !can_merge ||
@@ -3422,7 +3408,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
continue;
}
- set_range_writeback(tree, cur, cur + iosize - 1);
+ btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
if (!PageWriteback(page)) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"page %lu not writeback, cur %llu end %llu",
@@ -3538,7 +3524,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
struct btrfs_fs_info *fs_info,
struct extent_page_data *epd)
{
- unsigned long i, num_pages;
+ int i, num_pages;
int flush = 0;
int ret = 0;
@@ -3588,7 +3574,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
if (!ret)
return ret;
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
@@ -3712,13 +3698,13 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
u32 nritems;
- unsigned long i, num_pages;
+ int i, num_pages;
unsigned long start, end;
unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
int ret = 0;
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
atomic_set(&eb->io_pages, num_pages);
/* set btree blocks beyond nritems with 0 to avoid stale content. */
@@ -4643,23 +4629,20 @@ int extent_buffer_under_io(struct extent_buffer *eb)
}
/*
- * Helper for releasing extent buffer page.
+ * Release all pages attached to the extent buffer.
*/
-static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
+static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
{
- unsigned long index;
- struct page *page;
- int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
+ int i;
+ int num_pages;
+ int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
BUG_ON(extent_buffer_under_io(eb));
- index = num_extent_pages(eb->start, eb->len);
- if (index == 0)
- return;
+ num_pages = num_extent_pages(eb);
+ for (i = 0; i < num_pages; i++) {
+ struct page *page = eb->pages[i];
- do {
- index--;
- page = eb->pages[index];
if (!page)
continue;
if (mapped)
@@ -4691,7 +4674,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
/* One for when we allocated the page */
put_page(page);
- } while (index != 0);
+ }
}
/*
@@ -4699,7 +4682,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
*/
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
- btrfs_release_extent_buffer_page(eb);
+ btrfs_release_extent_buffer_pages(eb);
__free_extent_buffer(eb);
}
@@ -4743,10 +4726,10 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
{
- unsigned long i;
+ int i;
struct page *p;
struct extent_buffer *new;
- unsigned long num_pages = num_extent_pages(src->start, src->len);
+ int num_pages = num_extent_pages(src);
new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
if (new == NULL)
@@ -4766,7 +4749,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
}
set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
- set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
+ set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
return new;
}
@@ -4775,15 +4758,14 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len)
{
struct extent_buffer *eb;
- unsigned long num_pages;
- unsigned long i;
-
- num_pages = num_extent_pages(start, len);
+ int num_pages;
+ int i;
eb = __alloc_extent_buffer(fs_info, start, len);
if (!eb)
return NULL;
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
eb->pages[i] = alloc_page(GFP_NOFS);
if (!eb->pages[i])
@@ -4791,7 +4773,7 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
}
set_extent_buffer_uptodate(eb);
btrfs_set_header_nritems(eb, 0);
- set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
+ set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
return eb;
err:
@@ -4843,11 +4825,11 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
static void mark_extent_buffer_accessed(struct extent_buffer *eb,
struct page *accessed)
{
- unsigned long num_pages, i;
+ int num_pages, i;
check_buffer_tree_ref(eb);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
@@ -4944,8 +4926,8 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
unsigned long len = fs_info->nodesize;
- unsigned long num_pages = num_extent_pages(start, len);
- unsigned long i;
+ int num_pages;
+ int i;
unsigned long index = start >> PAGE_SHIFT;
struct extent_buffer *eb;
struct extent_buffer *exists = NULL;
@@ -4967,6 +4949,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
if (!eb)
return ERR_PTR(-ENOMEM);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++, index++) {
p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
if (!p) {
@@ -5009,8 +4992,11 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
uptodate = 0;
/*
- * see below about how we avoid a nasty race with release page
- * and why we unlock later
+ * We can't unlock the pages just yet since the extent buffer
+ * hasn't been properly inserted in the radix tree, this
+ * opens a race with btree_releasepage which can free a page
+ * while we are still filling in all pages for the buffer and
+ * we could crash.
*/
}
if (uptodate)
@@ -5039,21 +5025,12 @@ again:
set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
/*
- * there is a race where release page may have
- * tried to find this extent buffer in the radix
- * but failed. It will tell the VM it is safe to
- * reclaim the, and it will clear the page private bit.
- * We must make sure to set the page private bit properly
- * after the extent buffer is in the radix tree so
- * it doesn't get lost
+ * Now it's safe to unlock the pages because any calls to
+ * btree_releasepage will correctly detect that a page belongs to a
+ * live buffer and won't free them prematurely.
*/
- SetPageChecked(eb->pages[0]);
- for (i = 1; i < num_pages; i++) {
- p = eb->pages[i];
- ClearPageChecked(p);
- unlock_page(p);
- }
- unlock_page(eb->pages[0]);
+ for (i = 0; i < num_pages; i++)
+ unlock_page(eb->pages[i]);
return eb;
free_eb:
@@ -5075,9 +5052,10 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
__free_extent_buffer(eb);
}
-/* Expects to have eb->eb_lock already held */
static int release_extent_buffer(struct extent_buffer *eb)
{
+ lockdep_assert_held(&eb->refs_lock);
+
WARN_ON(atomic_read(&eb->refs) == 0);
if (atomic_dec_and_test(&eb->refs)) {
if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
@@ -5094,9 +5072,9 @@ static int release_extent_buffer(struct extent_buffer *eb)
}
/* Should be safe to release our pages at this point */
- btrfs_release_extent_buffer_page(eb);
+ btrfs_release_extent_buffer_pages(eb);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
+ if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
__free_extent_buffer(eb);
return 1;
}
@@ -5127,7 +5105,7 @@ void free_extent_buffer(struct extent_buffer *eb)
spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) == 2 &&
- test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
+ test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))
atomic_dec(&eb->refs);
if (atomic_read(&eb->refs) == 2 &&
@@ -5159,11 +5137,11 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
void clear_extent_buffer_dirty(struct extent_buffer *eb)
{
- unsigned long i;
- unsigned long num_pages;
+ int i;
+ int num_pages;
struct page *page;
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
@@ -5189,15 +5167,15 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
int set_extent_buffer_dirty(struct extent_buffer *eb)
{
- unsigned long i;
- unsigned long num_pages;
+ int i;
+ int num_pages;
int was_dirty = 0;
check_buffer_tree_ref(eb);
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
WARN_ON(atomic_read(&eb->refs) == 0);
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
@@ -5208,12 +5186,12 @@ int set_extent_buffer_dirty(struct extent_buffer *eb)
void clear_extent_buffer_uptodate(struct extent_buffer *eb)
{
- unsigned long i;
+ int i;
struct page *page;
- unsigned long num_pages;
+ int num_pages;
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (page)
@@ -5223,12 +5201,12 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb)
void set_extent_buffer_uptodate(struct extent_buffer *eb)
{
- unsigned long i;
+ int i;
struct page *page;
- unsigned long num_pages;
+ int num_pages;
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
SetPageUptodate(page);
@@ -5238,13 +5216,13 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb)
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, int wait, int mirror_num)
{
- unsigned long i;
+ int i;
struct page *page;
int err;
int ret = 0;
int locked_pages = 0;
int all_uptodate = 1;
- unsigned long num_pages;
+ int num_pages;
unsigned long num_reads = 0;
struct bio *bio = NULL;
unsigned long bio_flags = 0;
@@ -5252,7 +5230,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (wait == WAIT_NONE) {
@@ -5576,11 +5554,11 @@ void copy_extent_buffer_full(struct extent_buffer *dst,
struct extent_buffer *src)
{
int i;
- unsigned num_pages;
+ int num_pages;
ASSERT(dst->len == src->len);
- num_pages = num_extent_pages(dst->start, dst->len);
+ num_pages = num_extent_pages(dst);
for (i = 0; i < num_pages; i++)
copy_page(page_address(dst->pages[i]),
page_address(src->pages[i]));
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 0bfd4aeb822d..b4d03e677e1d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -46,7 +46,7 @@
#define EXTENT_BUFFER_STALE 6
#define EXTENT_BUFFER_WRITEBACK 7
#define EXTENT_BUFFER_READ_ERR 8 /* read IO error */
-#define EXTENT_BUFFER_DUMMY 9
+#define EXTENT_BUFFER_UNMAPPED 9
#define EXTENT_BUFFER_IN_TREE 10
#define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */
@@ -92,9 +92,6 @@ typedef blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *
typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
struct bio *bio, u64 bio_offset);
-typedef blk_status_t (extent_submit_bio_done_t)(void *private_data,
- struct bio *bio, int mirror_num);
-
struct extent_io_ops {
/*
* The following callbacks must be allways defined, the function
@@ -104,12 +101,7 @@ struct extent_io_ops {
int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
struct page *page, u64 start, u64 end,
int mirror);
- int (*merge_bio_hook)(struct page *page, unsigned long offset,
- size_t size, struct bio *bio,
- unsigned long bio_flags);
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
- struct btrfs_fs_info *(*tree_fs_info)(void *private_data);
- void (*set_range_writeback)(void *private_data, u64 start, u64 end);
/*
* Optional hooks, called if the pointer is not NULL
@@ -440,10 +432,10 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
int mirror_num);
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
-static inline unsigned long num_extent_pages(u64 start, u64 len)
+static inline int num_extent_pages(const struct extent_buffer *eb)
{
- return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
- (start >> PAGE_SHIFT);
+ return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
+ (eb->start >> PAGE_SHIFT);
}
static inline void extent_buffer_get(struct extent_buffer *eb)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index f9dd6d1836a3..ba74827beb32 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -922,7 +922,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
const bool new_inline,
struct extent_map *em)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf = path->nodes[0];
const int slot = path->slots[0];
@@ -942,7 +942,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
btrfs_file_extent_num_bytes(leaf, fi);
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
- size = btrfs_file_extent_inline_len(leaf, slot, fi);
+ size = btrfs_file_extent_ram_bytes(leaf, fi);
extent_end = ALIGN(extent_start + size,
fs_info->sectorsize);
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 51e77d72068a..2be00e873e92 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -5,14 +5,11 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
-#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
-#include <linux/mpage.h>
#include <linux/falloc.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/compat.h>
#include <linux/slab.h>
@@ -83,7 +80,7 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
struct inode_defrag *defrag)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct inode_defrag *entry;
struct rb_node **p;
struct rb_node *parent = NULL;
@@ -135,8 +132,8 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct inode_defrag *defrag;
u64 transid;
int ret;
@@ -185,7 +182,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
struct inode_defrag *defrag)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
if (!__need_auto_defrag(fs_info))
@@ -833,8 +830,7 @@ next_slot:
btrfs_file_extent_num_bytes(leaf, fi);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = key.offset +
- btrfs_file_extent_inline_len(leaf,
- path->slots[0], fi);
+ btrfs_file_extent_ram_bytes(leaf, fi);
} else {
/* can't happen */
BUG();
@@ -1133,7 +1129,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u64 start, u64 end)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
@@ -1470,7 +1466,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
u64 *lockstart, u64 *lockend,
struct extent_state **cached_state)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 start_pos;
u64 last_pos;
int i;
@@ -1526,7 +1522,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
size_t *write_bytes)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
struct btrfs_ordered_extent *ordered;
u64 lockstart, lockend;
@@ -1569,10 +1565,11 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
return ret;
}
-static noinline ssize_t __btrfs_buffered_write(struct file *file,
- struct iov_iter *i,
- loff_t pos)
+static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
+ struct iov_iter *i)
{
+ struct file *file = iocb->ki_filp;
+ loff_t pos = iocb->ki_pos;
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -1804,7 +1801,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- loff_t pos = iocb->ki_pos;
+ loff_t pos;
ssize_t written;
ssize_t written_buffered;
loff_t endbyte;
@@ -1815,8 +1812,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
if (written < 0 || !iov_iter_count(from))
return written;
- pos += written;
- written_buffered = __btrfs_buffered_write(file, from, pos);
+ pos = iocb->ki_pos;
+ written_buffered = btrfs_buffered_write(iocb, from);
if (written_buffered < 0) {
err = written_buffered;
goto out;
@@ -1953,7 +1950,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
if (iocb->ki_flags & IOCB_DIRECT) {
num_written = __btrfs_direct_write(iocb, from);
} else {
- num_written = __btrfs_buffered_write(file, from, pos);
+ num_written = btrfs_buffered_write(iocb, from);
if (num_written > 0)
iocb->ki_pos = pos + num_written;
if (clean_page)
@@ -2042,7 +2039,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
int ret = 0, err;
- bool full_sync = false;
u64 len;
/*
@@ -2066,96 +2062,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
inode_lock(inode);
atomic_inc(&root->log_batch);
- full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+
/*
- * We might have have had more pages made dirty after calling
- * start_ordered_ops and before acquiring the inode's i_mutex.
+ * We have to do this here to avoid the priority inversion of waiting on
+ * IO of a lower priority task while holding a transaciton open.
*/
- if (full_sync) {
- /*
- * For a full sync, we need to make sure any ordered operations
- * start and finish before we start logging the inode, so that
- * all extents are persisted and the respective file extent
- * items are in the fs/subvol btree.
- */
- ret = btrfs_wait_ordered_range(inode, start, len);
- } else {
- /*
- * Start any new ordered operations before starting to log the
- * inode. We will wait for them to finish in btrfs_sync_log().
- *
- * Right before acquiring the inode's mutex, we might have new
- * writes dirtying pages, which won't immediately start the
- * respective ordered operations - that is done through the
- * fill_delalloc callbacks invoked from the writepage and
- * writepages address space operations. So make sure we start
- * all ordered operations before starting to log our inode. Not
- * doing this means that while logging the inode, writeback
- * could start and invoke writepage/writepages, which would call
- * the fill_delalloc callbacks (cow_file_range,
- * submit_compressed_extents). These callbacks add first an
- * extent map to the modified list of extents and then create
- * the respective ordered operation, which means in
- * tree-log.c:btrfs_log_inode() we might capture all existing
- * ordered operations (with btrfs_get_logged_extents()) before
- * the fill_delalloc callback adds its ordered operation, and by
- * the time we visit the modified list of extent maps (with
- * btrfs_log_changed_extents()), we see and process the extent
- * map they created. We then use the extent map to construct a
- * file extent item for logging without waiting for the
- * respective ordered operation to finish - this file extent
- * item points to a disk location that might not have yet been
- * written to, containing random data - so after a crash a log
- * replay will make our inode have file extent items that point
- * to disk locations containing invalid data, as we returned
- * success to userspace without waiting for the respective
- * ordered operation to finish, because it wasn't captured by
- * btrfs_get_logged_extents().
- */
- ret = start_ordered_ops(inode, start, end);
- }
+ ret = btrfs_wait_ordered_range(inode, start, len);
if (ret) {
inode_unlock(inode);
goto out;
}
atomic_inc(&root->log_batch);
- /*
- * If the last transaction that changed this file was before the current
- * transaction and we have the full sync flag set in our inode, we can
- * bail out now without any syncing.
- *
- * Note that we can't bail out if the full sync flag isn't set. This is
- * because when the full sync flag is set we start all ordered extents
- * and wait for them to fully complete - when they complete they update
- * the inode's last_trans field through:
- *
- * btrfs_finish_ordered_io() ->
- * btrfs_update_inode_fallback() ->
- * btrfs_update_inode() ->
- * btrfs_set_inode_last_trans()
- *
- * So we are sure that last_trans is up to date and can do this check to
- * bail out safely. For the fast path, when the full sync flag is not
- * set in our inode, we can not do it because we start only our ordered
- * extents and don't wait for them to complete (that is when
- * btrfs_finish_ordered_io runs), so here at this point their last_trans
- * value might be less than or equals to fs_info->last_trans_committed,
- * and setting a speculative last_trans for an inode when a buffered
- * write is made (such as fs_info->generation + 1 for example) would not
- * be reliable since after setting the value and before fsync is called
- * any number of transactions can start and commit (transaction kthread
- * commits the current transaction periodically), and a transaction
- * commit does not start nor waits for ordered extents to complete.
- */
smp_mb();
if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
- (full_sync && BTRFS_I(inode)->last_trans <=
- fs_info->last_trans_committed) ||
- (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
- BTRFS_I(inode)->last_trans
- <= fs_info->last_trans_committed)) {
+ BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) {
/*
* We've had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever
@@ -2239,13 +2160,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
}
- if (!full_sync) {
- ret = btrfs_wait_ordered_range(inode, start, len);
- if (ret) {
- btrfs_end_transaction(trans);
- goto out;
- }
- }
ret = btrfs_commit_transaction(trans);
} else {
ret = btrfs_end_transaction(trans);
@@ -2310,7 +2224,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_path *path, u64 offset, u64 end)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index d5f80cb300be..0adf38b00fa0 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -71,10 +71,6 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
inode = btrfs_iget(fs_info->sb, &location, root, NULL);
if (IS_ERR(inode))
return inode;
- if (is_bad_inode(inode)) {
- iput(inode);
- return ERR_PTR(-ENOENT);
- }
mapping_set_gfp_mask(inode->i_mapping,
mapping_gfp_constraint(inode->i_mapping,
@@ -300,9 +296,9 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FREE_INO_OBJECTID)
check_crcs = 1;
- /* Make sure we can fit our crcs into the first page */
+ /* Make sure we can fit our crcs and generation into the first page */
if (write && check_crcs &&
- (num_pages * sizeof(u32)) >= PAGE_SIZE)
+ (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
return -ENOSPC;
memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
@@ -547,7 +543,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
io_ctl_map_page(io_ctl, 0);
}
- memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
+ copy_page(io_ctl->cur, bitmap);
io_ctl_set_crc(io_ctl, io_ctl->index - 1);
if (io_ctl->index < io_ctl->num_pages)
io_ctl_map_page(io_ctl, 0);
@@ -607,7 +603,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
if (ret)
return ret;
- memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
+ copy_page(entry->bitmap, io_ctl->cur);
io_ctl_unmap_page(io_ctl);
return 0;
@@ -655,7 +651,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl,
struct btrfs_path *path, u64 offset)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct btrfs_io_ctl io_ctl;
@@ -1123,13 +1119,10 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
{
int ret;
struct inode *inode = io_ctl->inode;
- struct btrfs_fs_info *fs_info;
if (!inode)
return 0;
- fs_info = btrfs_sb(inode->i_sb);
-
/* Flush the dirty pages in the cache file. */
ret = flush_dirty_cache(inode);
if (ret)
@@ -1145,7 +1138,7 @@ out:
BTRFS_I(inode)->generation = 0;
if (block_group) {
#ifdef DEBUG
- btrfs_err(fs_info,
+ btrfs_err(root->fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
#endif
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index b5950aacd697..d6736595ec57 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -1236,7 +1236,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
if (ret)
goto abort;
- ret = btrfs_del_root(trans, fs_info, &free_space_root->root_key);
+ ret = btrfs_del_root(trans, &free_space_root->root_key);
if (ret)
goto abort;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 12fcd8897c33..ffca2abf13d0 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -3,7 +3,6 @@
* Copyright (C) 2007 Oracle. All rights reserved.
*/
-#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
@@ -244,8 +243,6 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
return;
while (1) {
- bool add_to_ctl = true;
-
spin_lock(rbroot_lock);
n = rb_first(rbroot);
if (!n) {
@@ -257,15 +254,14 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
BUG_ON(info->bitmap); /* Logic error */
if (info->offset > root->ino_cache_progress)
- add_to_ctl = false;
- else if (info->offset + info->bytes > root->ino_cache_progress)
- count = root->ino_cache_progress - info->offset + 1;
+ count = 0;
else
- count = info->bytes;
+ count = min(root->ino_cache_progress - info->offset + 1,
+ info->bytes);
rb_erase(&info->offset_index, rbroot);
spin_unlock(rbroot_lock);
- if (add_to_ctl)
+ if (count)
__btrfs_add_free_space(root->fs_info, ctl,
info->offset, count);
kmem_cache_free(btrfs_free_space_cachep, info);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index eba61bcb9bb3..9357a19d2bff 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -14,17 +14,13 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
-#include <linux/mpage.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/compat.h>
-#include <linux/bit_spinlock.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
-#include <linux/mount.h>
#include <linux/btrfs.h>
#include <linux/blkdev.h>
#include <linux/posix_acl_xattr.h>
@@ -1443,8 +1439,7 @@ next_slot:
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +
- btrfs_file_extent_inline_len(leaf,
- path->slots[0], fi);
+ btrfs_file_extent_ram_bytes(leaf, fi);
extent_end = ALIGN(extent_end,
fs_info->sectorsize);
} else {
@@ -1752,7 +1747,7 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
void __btrfs_del_delalloc_inode(struct btrfs_root *root,
struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = root->fs_info;
if (!list_empty(&inode->delalloc_inodes)) {
list_del_init(&inode->delalloc_inodes);
@@ -1903,8 +1898,8 @@ static void btrfs_clear_bit_hook(void *private_data,
}
/*
- * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
- * we don't create bios that span stripes or chunks
+ * Merge bio hook, this must check the chunk tree to make sure we don't create
+ * bios that span stripes or chunks
*
* return 1 if page cannot be merged to bio
* return 0 if page can be merged to bio
@@ -1962,7 +1957,7 @@ static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
-static blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
+blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
int mirror_num)
{
struct inode *inode = private_data;
@@ -2035,8 +2030,7 @@ static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
/* we're doing a write, do the async checksumming */
ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
bio_offset, inode,
- btrfs_submit_bio_start,
- btrfs_submit_bio_done);
+ btrfs_submit_bio_start);
goto out;
} else if (!skip_sum) {
ret = btrfs_csum_one_bio(inode, bio, 0, 0);
@@ -3610,18 +3604,15 @@ static int btrfs_read_locked_inode(struct inode *inode)
filled = true;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto make_bad;
- }
+ if (!path)
+ return -ENOMEM;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
- if (ret > 0)
- ret = -ENOENT;
- goto make_bad;
+ btrfs_free_path(path);
+ return ret;
}
leaf = path->nodes[0];
@@ -3774,11 +3765,6 @@ cache_acl:
btrfs_sync_inode_flags_to_i_flags(inode);
return 0;
-
-make_bad:
- btrfs_free_path(path);
- make_bad_inode(inode);
- return ret;
}
/*
@@ -3984,7 +3970,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
goto err;
}
skip_backref:
- ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
+ ret = btrfs_delete_delayed_dir_index(trans, dir, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto err;
@@ -4087,11 +4073,10 @@ out:
}
static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *dir, u64 objectid,
- const char *name, int name_len)
+ struct inode *dir, u64 objectid,
+ const char *name, int name_len)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
@@ -4124,9 +4109,8 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- ret = btrfs_del_root_ref(trans, fs_info, objectid,
- root->root_key.objectid, dir_ino,
- &index, name, name_len);
+ ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid,
+ dir_ino, &index, name, name_len);
if (ret < 0) {
if (ret != -ENOENT) {
btrfs_abort_transaction(trans, ret);
@@ -4145,12 +4129,11 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- btrfs_release_path(path);
index = key.offset;
}
btrfs_release_path(path);
- ret = btrfs_delete_delayed_dir_index(trans, fs_info, BTRFS_I(dir), index);
+ ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -4243,9 +4226,9 @@ again:
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
- if (objectid < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
+ if (objectid < btrfs_ino(entry))
node = node->rb_left;
- else if (objectid > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
+ else if (objectid > btrfs_ino(entry))
node = node->rb_right;
else
break;
@@ -4253,7 +4236,7 @@ again:
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= btrfs_ino(BTRFS_I(&entry->vfs_inode))) {
+ if (objectid <= btrfs_ino(entry)) {
node = prev;
break;
}
@@ -4262,7 +4245,7 @@ again:
}
while (node) {
entry = rb_entry(node, struct btrfs_inode, rb_node);
- objectid = btrfs_ino(BTRFS_I(&entry->vfs_inode)) + 1;
+ objectid = btrfs_ino(entry) + 1;
inode = igrab(&entry->vfs_inode);
if (inode) {
spin_unlock(&root->inode_lock);
@@ -4343,10 +4326,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
- ret = btrfs_unlink_subvol(trans, root, dir,
- dest->root_key.objectid,
- dentry->d_name.name,
- dentry->d_name.len);
+ ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid,
+ dentry->d_name.name, dentry->d_name.len);
if (ret) {
err = ret;
btrfs_abort_transaction(trans, ret);
@@ -4441,7 +4422,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
return PTR_ERR(trans);
if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
- err = btrfs_unlink_subvol(trans, root, dir,
+ err = btrfs_unlink_subvol(trans, dir,
BTRFS_I(inode)->location.objectid,
dentry->d_name.name,
dentry->d_name.len);
@@ -4643,8 +4624,8 @@ search_again:
BTRFS_I(inode), leaf, fi,
found_key.offset);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- item_end += btrfs_file_extent_inline_len(leaf,
- path->slots[0], fi);
+ item_end += btrfs_file_extent_ram_bytes(leaf,
+ fi);
trace_btrfs_truncate_show_fi_inline(
BTRFS_I(inode), leaf, fi, path->slots[0],
@@ -5615,9 +5596,9 @@ static void inode_tree_add(struct inode *inode)
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
- if (ino < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
+ if (ino < btrfs_ino(entry))
p = &parent->rb_left;
- else if (ino > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
+ else if (ino > btrfs_ino(entry))
p = &parent->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
@@ -5708,16 +5689,21 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
int ret;
ret = btrfs_read_locked_inode(inode);
- if (!is_bad_inode(inode)) {
+ if (!ret) {
inode_tree_add(inode);
unlock_new_inode(inode);
if (new)
*new = 1;
} else {
- unlock_new_inode(inode);
- iput(inode);
- ASSERT(ret < 0);
- inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
+ iget_failed(inode);
+ /*
+ * ret > 0 can come from btrfs_search_slot called by
+ * btrfs_read_locked_inode, this means the inode item
+ * was not found.
+ */
+ if (ret > 0)
+ ret = -ENOENT;
+ inode = ERR_PTR(ret);
}
}
@@ -5745,7 +5731,7 @@ static struct inode *new_simple_dir(struct super_block *s,
inode->i_mtime = current_time(inode);
inode->i_atime = inode->i_mtime;
inode->i_ctime = inode->i_mtime;
- BTRFS_I(inode)->i_otime = timespec64_to_timespec(inode->i_mtime);
+ BTRFS_I(inode)->i_otime = inode->i_mtime;
return inode;
}
@@ -6027,32 +6013,6 @@ err:
return ret;
}
-int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_trans_handle *trans;
- int ret = 0;
- bool nolock = false;
-
- if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
- return 0;
-
- if (btrfs_fs_closing(root->fs_info) &&
- btrfs_is_free_space_inode(BTRFS_I(inode)))
- nolock = true;
-
- if (wbc->sync_mode == WB_SYNC_ALL) {
- if (nolock)
- trans = btrfs_join_transaction_nolock(root);
- else
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- ret = btrfs_commit_transaction(trans);
- }
- return ret;
-}
-
/*
* This is somewhat expensive, updating the tree every time the
* inode changes. But, it is most likely to find the inode in cache.
@@ -6335,8 +6295,10 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
location->type = BTRFS_INODE_ITEM_KEY;
ret = btrfs_insert_inode_locked(inode);
- if (ret < 0)
+ if (ret < 0) {
+ iput(inode);
goto fail;
+ }
path->leave_spinning = 1;
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
@@ -6349,7 +6311,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
inode->i_mtime = current_time(inode);
inode->i_atime = inode->i_mtime;
inode->i_ctime = inode->i_mtime;
- BTRFS_I(inode)->i_otime = timespec64_to_timespec(inode->i_mtime);
+ BTRFS_I(inode)->i_otime = inode->i_mtime;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
@@ -6395,12 +6357,11 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
return inode;
fail_unlock:
- unlock_new_inode(inode);
+ discard_new_inode(inode);
fail:
if (dir && name)
BTRFS_I(dir)->index_cnt--;
btrfs_free_path(path);
- iput(inode);
return ERR_PTR(ret);
}
@@ -6419,7 +6380,6 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
const char *name, int name_len, int add_backref, u64 index)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = parent_inode->root;
@@ -6435,7 +6395,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
}
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
- ret = btrfs_add_root_ref(trans, fs_info, key.objectid,
+ ret = btrfs_add_root_ref(trans, key.objectid,
root->root_key.objectid, parent_ino,
index, name, name_len);
} else if (add_backref) {
@@ -6471,7 +6431,7 @@ fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
int err;
- err = btrfs_del_root_ref(trans, fs_info, key.objectid,
+ err = btrfs_del_root_ref(trans, key.objectid,
root->root_key.objectid, parent_ino,
&local_index, name, name_len);
@@ -6505,7 +6465,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int err;
- int drop_inode = 0;
u64 objectid;
u64 index = 0;
@@ -6527,6 +6486,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_unlock;
}
@@ -6541,31 +6501,24 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
0, index);
- if (err) {
- goto out_unlock_inode;
- } else {
- btrfs_update_inode(trans, root, inode);
- d_instantiate_new(dentry, inode);
- }
+ if (err)
+ goto out_unlock;
+
+ btrfs_update_inode(trans, root, inode);
+ d_instantiate_new(dentry, inode);
out_unlock:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
- if (drop_inode) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
return err;
-
-out_unlock_inode:
- drop_inode = 1;
- unlock_new_inode(inode);
- goto out_unlock;
-
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
@@ -6575,7 +6528,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
- int drop_inode_on_err = 0;
int err;
u64 objectid;
u64 index = 0;
@@ -6598,9 +6550,9 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_unlock;
}
- drop_inode_on_err = 1;
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
@@ -6613,33 +6565,28 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
err = btrfs_update_inode(trans, root, inode);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
0, index);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
d_instantiate_new(dentry, inode);
out_unlock:
btrfs_end_transaction(trans);
- if (err && drop_inode_on_err) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
btrfs_btree_balance_dirty(fs_info);
return err;
-
-out_unlock_inode:
- unlock_new_inode(inode);
- goto out_unlock;
-
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -6748,6 +6695,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_fail;
}
@@ -6758,34 +6706,30 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_fail_inode;
+ goto out_fail;
btrfs_i_size_write(BTRFS_I(inode), 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
- goto out_fail_inode;
+ goto out_fail;
err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
- goto out_fail_inode;
+ goto out_fail;
d_instantiate_new(dentry, inode);
drop_on_err = 0;
out_fail:
btrfs_end_transaction(trans);
- if (drop_on_err) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
btrfs_btree_balance_dirty(fs_info);
return err;
-
-out_fail_inode:
- unlock_new_inode(inode);
- goto out_fail;
}
static noinline int uncompress_inline(struct btrfs_path *path,
@@ -6847,7 +6791,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
size_t pg_offset, u64 start, u64 len,
int create)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
int err = 0;
u64 extent_start = 0;
@@ -6943,7 +6887,8 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
extent_start);
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
- size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
+
+ size = btrfs_file_extent_ram_bytes(leaf, item);
extent_end = ALIGN(extent_start + size,
fs_info->sectorsize);
@@ -6994,7 +6939,7 @@ next:
if (new_inline)
goto out;
- size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
+ size = btrfs_file_extent_ram_bytes(leaf, item);
extent_offset = page_offset(page) + pg_offset - extent_start;
copy_size = min_t(u64, PAGE_SIZE - pg_offset,
size - extent_offset);
@@ -7865,7 +7810,7 @@ static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
isector >>= inode->i_sb->s_blocksize_bits;
bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
pgoff, isector, repair_endio, repair_arg);
- bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
+ bio->bi_opf = REQ_OP_READ | read_mode;
btrfs_debug(BTRFS_I(inode)->root->fs_info,
"repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d",
@@ -8299,8 +8244,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
if (write && async_submit) {
ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
file_offset, inode,
- btrfs_submit_bio_start_direct_io,
- btrfs_submit_bio_done);
+ btrfs_submit_bio_start_direct_io);
goto err;
} else if (write) {
/*
@@ -9540,8 +9484,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* src is a subvolume */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
- ret = btrfs_unlink_subvol(trans, root, old_dir,
- root_objectid,
+ ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else { /* src is an inode */
@@ -9560,8 +9503,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* dest is a subvolume */
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
- ret = btrfs_unlink_subvol(trans, dest, new_dir,
- root_objectid,
+ ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
new_dentry->d_name.name,
new_dentry->d_name.len);
} else { /* dest is an inode */
@@ -9821,7 +9763,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
- ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
+ ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else {
@@ -9843,8 +9785,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
root_objectid = BTRFS_I(new_inode)->location.objectid;
- ret = btrfs_unlink_subvol(trans, dest, new_dir,
- root_objectid,
+ ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
new_dentry->d_name.name,
new_dentry->d_name.len);
BUG_ON(new_inode->i_nlink == 0);
@@ -10115,7 +10056,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
struct btrfs_key key;
struct inode *inode = NULL;
int err;
- int drop_inode = 0;
u64 objectid;
u64 index = 0;
int name_len;
@@ -10148,6 +10088,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
objectid, S_IFLNK|S_IRWXUGO, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_unlock;
}
@@ -10164,12 +10105,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
- goto out_unlock_inode;
+ goto out_unlock;
}
key.objectid = btrfs_ino(BTRFS_I(inode));
key.offset = 0;
@@ -10179,7 +10120,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
datasize);
if (err) {
btrfs_free_path(path);
- goto out_unlock_inode;
+ goto out_unlock;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -10211,26 +10152,19 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (!err)
err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
BTRFS_I(inode), 0, index);
- if (err) {
- drop_inode = 1;
- goto out_unlock_inode;
- }
+ if (err)
+ goto out_unlock;
d_instantiate_new(dentry, inode);
out_unlock:
btrfs_end_transaction(trans);
- if (drop_inode) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
btrfs_btree_balance_dirty(fs_info);
return err;
-
-out_unlock_inode:
- drop_inode = 1;
- unlock_new_inode(inode);
- goto out_unlock;
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -10439,14 +10373,14 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
ret = btrfs_init_inode_security(trans, inode, dir, NULL);
if (ret)
- goto out_inode;
+ goto out;
ret = btrfs_update_inode(trans, root, inode);
if (ret)
- goto out_inode;
+ goto out;
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret)
- goto out_inode;
+ goto out;
/*
* We set number of links to 0 in btrfs_new_inode(), and here we set
@@ -10456,21 +10390,15 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
* d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
*/
set_nlink(inode, 1);
- unlock_new_inode(inode);
d_tmpfile(dentry, inode);
+ unlock_new_inode(inode);
mark_inode_dirty(inode);
-
out:
btrfs_end_transaction(trans);
- if (ret)
- iput(inode);
+ if (ret && inode)
+ discard_new_inode(inode);
btrfs_btree_balance_dirty(fs_info);
return ret;
-
-out_inode:
- unlock_new_inode(inode);
- goto out;
-
}
__attribute__((const))
@@ -10479,12 +10407,6 @@ static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
return -EAGAIN;
}
-static struct btrfs_fs_info *iotree_fs_info(void *private_data)
-{
- struct inode *inode = private_data;
- return btrfs_sb(inode->i_sb);
-}
-
static void btrfs_check_extent_io_range(void *private_data, const char *caller,
u64 start, u64 end)
{
@@ -10499,9 +10421,9 @@ static void btrfs_check_extent_io_range(void *private_data, const char *caller,
}
}
-void btrfs_set_range_writeback(void *private_data, u64 start, u64 end)
+void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{
- struct inode *inode = private_data;
+ struct inode *inode = tree->private_data;
unsigned long index = start >> PAGE_SHIFT;
unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
@@ -10557,10 +10479,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
/* mandatory callbacks */
.submit_bio_hook = btrfs_submit_bio_hook,
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
- .merge_bio_hook = btrfs_merge_bio_hook,
.readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
- .tree_fs_info = iotree_fs_info,
- .set_range_writeback = btrfs_set_range_writeback,
/* optional callbacks */
.fill_delalloc = run_delalloc_range,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b077544b5232..d3a5d2a41e5f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -5,23 +5,18 @@
#include <linux/kernel.h>
#include <linux/bio.h>
-#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
-#include <linux/mpage.h>
#include <linux/namei.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/compat.h>
-#include <linux/bit_spinlock.h>
#include <linux/security.h>
#include <linux/xattr.h>
#include <linux/mm.h>
@@ -606,7 +601,7 @@ static noinline int create_subvol(struct inode *dir,
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
- ret = btrfs_qgroup_inherit(trans, fs_info, 0, objectid, inherit);
+ ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
if (ret)
goto fail;
@@ -616,14 +611,6 @@ static noinline int create_subvol(struct inode *dir,
goto fail;
}
- memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
- btrfs_set_header_bytenr(leaf, leaf->start);
- btrfs_set_header_generation(leaf, trans->transid);
- btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(leaf, objectid);
-
- write_extent_buffer_fsid(leaf, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
btrfs_mark_buffer_dirty(leaf);
inode_item = &root_item->inode;
@@ -711,8 +698,7 @@ static noinline int create_subvol(struct inode *dir,
ret = btrfs_update_inode(trans, root, dir);
BUG_ON(ret);
- ret = btrfs_add_root_ref(trans, fs_info,
- objectid, root->root_key.objectid,
+ ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
btrfs_ino(BTRFS_I(dir)), index, name, namelen);
BUG_ON(ret);
@@ -2507,8 +2493,8 @@ out:
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
void __user *argp)
{
- struct btrfs_ioctl_ino_lookup_args *args;
- struct inode *inode;
+ struct btrfs_ioctl_ino_lookup_args *args;
+ struct inode *inode;
int ret = 0;
args = memdup_user(argp, sizeof(*args));
@@ -2941,8 +2927,14 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
ret = btrfs_defrag_root(root);
break;
case S_IFREG:
- if (!(file->f_mode & FMODE_WRITE)) {
- ret = -EINVAL;
+ /*
+ * Note that this does not check the file descriptor for write
+ * access. This prevents defragmenting executables that are
+ * running and allows defrag on files open in read-only mode.
+ */
+ if (!capable(CAP_SYS_ADMIN) &&
+ inode_permission(inode, MAY_WRITE)) {
+ ret = -EPERM;
goto out;
}
@@ -3165,10 +3157,8 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
di_args->total_bytes = btrfs_device_get_total_bytes(dev);
memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
if (dev->name) {
- struct rcu_string *name;
-
- name = rcu_dereference(dev->name);
- strncpy(di_args->path, name->str, sizeof(di_args->path) - 1);
+ strncpy(di_args->path, rcu_str_deref(dev->name),
+ sizeof(di_args->path) - 1);
di_args->path[sizeof(di_args->path) - 1] = 0;
} else {
di_args->path[0] = '\0';
@@ -5118,9 +5108,7 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_quota_ctl_args *sa;
- struct btrfs_trans_handle *trans = NULL;
int ret;
- int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -5136,28 +5124,19 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
}
down_write(&fs_info->subvol_sem);
- trans = btrfs_start_transaction(fs_info->tree_root, 2);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- goto out;
- }
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
- ret = btrfs_quota_enable(trans, fs_info);
+ ret = btrfs_quota_enable(fs_info);
break;
case BTRFS_QUOTA_CTL_DISABLE:
- ret = btrfs_quota_disable(trans, fs_info);
+ ret = btrfs_quota_disable(fs_info);
break;
default:
ret = -EINVAL;
break;
}
- err = btrfs_commit_transaction(trans);
- if (err && !ret)
- ret = err;
-out:
kfree(sa);
up_write(&fs_info->subvol_sem);
drop_write:
@@ -5195,15 +5174,13 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
}
if (sa->assign) {
- ret = btrfs_add_qgroup_relation(trans, fs_info,
- sa->src, sa->dst);
+ ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
} else {
- ret = btrfs_del_qgroup_relation(trans, fs_info,
- sa->src, sa->dst);
+ ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
}
/* update qgroup status and info */
- err = btrfs_run_qgroups(trans, fs_info);
+ err = btrfs_run_qgroups(trans);
if (err < 0)
btrfs_handle_fs_error(fs_info, err,
"failed to update qgroup status and info");
@@ -5221,7 +5198,6 @@ drop_write:
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_qgroup_create_args *sa;
struct btrfs_trans_handle *trans;
@@ -5253,9 +5229,9 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
}
if (sa->create) {
- ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
+ ret = btrfs_create_qgroup(trans, sa->qgroupid);
} else {
- ret = btrfs_remove_qgroup(trans, fs_info, sa->qgroupid);
+ ret = btrfs_remove_qgroup(trans, sa->qgroupid);
}
err = btrfs_end_transaction(trans);
@@ -5272,7 +5248,6 @@ drop_write:
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_qgroup_limit_args *sa;
struct btrfs_trans_handle *trans;
@@ -5305,7 +5280,7 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
qgroupid = root->root_key.objectid;
}
- ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
+ ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
err = btrfs_end_transaction(trans);
if (err && !ret)
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 2e1a1694a33d..0c4ef208b8b9 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -6,7 +6,6 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
-#include <linux/pagevec.h>
#include "ctree.h"
#include "transaction.h"
#include "btrfs_inode.h"
@@ -421,129 +420,6 @@ out:
return ret == 0;
}
-/* Needs to either be called under a log transaction or the log_mutex */
-void btrfs_get_logged_extents(struct btrfs_inode *inode,
- struct list_head *logged_list,
- const loff_t start,
- const loff_t end)
-{
- struct btrfs_ordered_inode_tree *tree;
- struct btrfs_ordered_extent *ordered;
- struct rb_node *n;
- struct rb_node *prev;
-
- tree = &inode->ordered_tree;
- spin_lock_irq(&tree->lock);
- n = __tree_search(&tree->tree, end, &prev);
- if (!n)
- n = prev;
- for (; n; n = rb_prev(n)) {
- ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
- if (ordered->file_offset > end)
- continue;
- if (entry_end(ordered) <= start)
- break;
- if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
- continue;
- list_add(&ordered->log_list, logged_list);
- refcount_inc(&ordered->refs);
- }
- spin_unlock_irq(&tree->lock);
-}
-
-void btrfs_put_logged_extents(struct list_head *logged_list)
-{
- struct btrfs_ordered_extent *ordered;
-
- while (!list_empty(logged_list)) {
- ordered = list_first_entry(logged_list,
- struct btrfs_ordered_extent,
- log_list);
- list_del_init(&ordered->log_list);
- btrfs_put_ordered_extent(ordered);
- }
-}
-
-void btrfs_submit_logged_extents(struct list_head *logged_list,
- struct btrfs_root *log)
-{
- int index = log->log_transid % 2;
-
- spin_lock_irq(&log->log_extents_lock[index]);
- list_splice_tail(logged_list, &log->logged_list[index]);
- spin_unlock_irq(&log->log_extents_lock[index]);
-}
-
-void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *log, u64 transid)
-{
- struct btrfs_ordered_extent *ordered;
- int index = transid % 2;
-
- spin_lock_irq(&log->log_extents_lock[index]);
- while (!list_empty(&log->logged_list[index])) {
- struct inode *inode;
- ordered = list_first_entry(&log->logged_list[index],
- struct btrfs_ordered_extent,
- log_list);
- list_del_init(&ordered->log_list);
- inode = ordered->inode;
- spin_unlock_irq(&log->log_extents_lock[index]);
-
- if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
- !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
- u64 start = ordered->file_offset;
- u64 end = ordered->file_offset + ordered->len - 1;
-
- WARN_ON(!inode);
- filemap_fdatawrite_range(inode->i_mapping, start, end);
- }
- wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
- &ordered->flags));
-
- /*
- * In order to keep us from losing our ordered extent
- * information when committing the transaction we have to make
- * sure that any logged extents are completed when we go to
- * commit the transaction. To do this we simply increase the
- * current transactions pending_ordered counter and decrement it
- * when the ordered extent completes.
- */
- if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
- struct btrfs_ordered_inode_tree *tree;
-
- tree = &BTRFS_I(inode)->ordered_tree;
- spin_lock_irq(&tree->lock);
- if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
- set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
- atomic_inc(&trans->transaction->pending_ordered);
- }
- spin_unlock_irq(&tree->lock);
- }
- btrfs_put_ordered_extent(ordered);
- spin_lock_irq(&log->log_extents_lock[index]);
- }
- spin_unlock_irq(&log->log_extents_lock[index]);
-}
-
-void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
-{
- struct btrfs_ordered_extent *ordered;
- int index = transid % 2;
-
- spin_lock_irq(&log->log_extents_lock[index]);
- while (!list_empty(&log->logged_list[index])) {
- ordered = list_first_entry(&log->logged_list[index],
- struct btrfs_ordered_extent,
- log_list);
- list_del_init(&ordered->log_list);
- spin_unlock_irq(&log->log_extents_lock[index]);
- btrfs_put_ordered_extent(ordered);
- spin_lock_irq(&log->log_extents_lock[index]);
- }
- spin_unlock_irq(&log->log_extents_lock[index]);
-}
-
/*
* used to drop a reference on an ordered extent. This will free
* the extent if the last reference is dropped
@@ -913,20 +789,6 @@ out:
return entry;
}
-bool btrfs_have_ordered_extents_in_range(struct inode *inode,
- u64 file_offset,
- u64 len)
-{
- struct btrfs_ordered_extent *oe;
-
- oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len);
- if (oe) {
- btrfs_put_ordered_extent(oe);
- return true;
- }
- return false;
-}
-
/*
* lookup and return any extent before 'file_offset'. NULL is returned
* if none is found
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 3be443fb3001..02d813aaa261 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -54,15 +54,11 @@ struct btrfs_ordered_sum {
#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
* has done its due diligence in updating
* the isize. */
-#define BTRFS_ORDERED_LOGGED_CSUM 8 /* We've logged the csums on this ordered
- ordered extent */
-#define BTRFS_ORDERED_TRUNCATED 9 /* Set when we have to truncate an extent */
+#define BTRFS_ORDERED_TRUNCATED 8 /* Set when we have to truncate an extent */
-#define BTRFS_ORDERED_LOGGED 10 /* Set when we've waited on this ordered extent
- * in the logging code. */
-#define BTRFS_ORDERED_PENDING 11 /* We are waiting for this ordered extent to
+#define BTRFS_ORDERED_PENDING 9 /* We are waiting for this ordered extent to
* complete in the current transaction. */
-#define BTRFS_ORDERED_REGULAR 12 /* Regular IO for COW */
+#define BTRFS_ORDERED_REGULAR 10 /* Regular IO for COW */
struct btrfs_ordered_extent {
/* logical offset in the file */
@@ -182,9 +178,6 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
struct btrfs_inode *inode,
u64 file_offset,
u64 len);
-bool btrfs_have_ordered_extents_in_range(struct inode *inode,
- u64 file_offset,
- u64 len);
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
@@ -193,16 +186,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len);
u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len);
-void btrfs_get_logged_extents(struct btrfs_inode *inode,
- struct list_head *logged_list,
- const loff_t start,
- const loff_t end);
-void btrfs_put_logged_extents(struct list_head *logged_list);
-void btrfs_submit_logged_extents(struct list_head *logged_list,
- struct btrfs_root *log);
-void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *log, u64 transid);
-void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
int __init ordered_data_init(void);
void __cold ordered_data_exit(void);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index a4e11cf04671..df49931ffe92 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -52,17 +52,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
u64 offset;
int ref_index = 0;
- if (item_size < sizeof(*ei)) {
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- struct btrfs_extent_item_v0 *ei0;
- BUG_ON(item_size != sizeof(*ei0));
- ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
- pr_info("\t\textent refs %u\n",
- btrfs_extent_refs_v0(eb, ei0));
- return;
-#else
- BUG();
-#endif
+ if (unlikely(item_size < sizeof(*ei))) {
+ btrfs_print_v0_err(eb->fs_info);
+ btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
}
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
@@ -133,20 +125,6 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
WARN_ON(ptr > end);
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
-static void print_extent_ref_v0(struct extent_buffer *eb, int slot)
-{
- struct btrfs_extent_ref_v0 *ref0;
-
- ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0);
- printk("\t\textent back ref root %llu gen %llu owner %llu num_refs %lu\n",
- btrfs_ref_root_v0(eb, ref0),
- btrfs_ref_generation_v0(eb, ref0),
- btrfs_ref_objectid_v0(eb, ref0),
- (unsigned long)btrfs_ref_count_v0(eb, ref0));
-}
-#endif
-
static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
u32 item_size)
{
@@ -267,8 +245,8 @@ void btrfs_print_leaf(struct extent_buffer *l)
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(l, fi) ==
BTRFS_FILE_EXTENT_INLINE) {
- pr_info("\t\tinline extent data size %u\n",
- btrfs_file_extent_inline_len(l, i, fi));
+ pr_info("\t\tinline extent data size %llu\n",
+ btrfs_file_extent_ram_bytes(l, fi));
break;
}
pr_info("\t\textent data disk bytenr %llu nr %llu\n",
@@ -280,11 +258,8 @@ void btrfs_print_leaf(struct extent_buffer *l)
btrfs_file_extent_ram_bytes(l, fi));
break;
case BTRFS_EXTENT_REF_V0_KEY:
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- print_extent_ref_v0(l, i);
-#else
- BUG();
-#endif
+ btrfs_print_v0_err(fs_info);
+ btrfs_handle_fs_error(fs_info, -EINVAL, NULL);
break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index c25dc47210a3..4353bb69bb86 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -530,11 +530,11 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
fs_info->qgroup_ulist = NULL;
}
-static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *quota_root,
- u64 src, u64 dst)
+static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
int ret;
+ struct btrfs_root *quota_root = trans->fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -554,11 +554,11 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
return ret;
}
-static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *quota_root,
- u64 src, u64 dst)
+static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
int ret;
+ struct btrfs_root *quota_root = trans->fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -653,10 +653,10 @@ out:
return ret;
}
-static int del_qgroup_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *quota_root, u64 qgroupid)
+static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
{
int ret;
+ struct btrfs_root *quota_root = trans->fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -700,9 +700,9 @@ out:
}
static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct btrfs_qgroup *qgroup)
{
+ struct btrfs_root *quota_root = trans->fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *l;
@@ -718,7 +718,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -742,9 +742,10 @@ out:
}
static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct btrfs_qgroup *qgroup)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *quota_root = fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *l;
@@ -752,7 +753,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
int ret;
int slot;
- if (btrfs_is_testing(root->fs_info))
+ if (btrfs_is_testing(fs_info))
return 0;
key.objectid = 0;
@@ -763,7 +764,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -786,10 +787,10 @@ out:
return ret;
}
-static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- struct btrfs_root *root)
+static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *quota_root = fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *l;
@@ -805,7 +806,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -875,8 +876,7 @@ out:
return ret;
}
-int btrfs_quota_enable(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *quota_root;
struct btrfs_root *tree_root = fs_info->tree_root;
@@ -886,6 +886,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_qgroup *qgroup = NULL;
+ struct btrfs_trans_handle *trans = NULL;
int ret = 0;
int slot;
@@ -893,9 +894,25 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
if (fs_info->quota_root)
goto out;
+ /*
+ * 1 for quota root item
+ * 1 for BTRFS_QGROUP_STATUS item
+ *
+ * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
+ * per subvolume. However those are not currently reserved since it
+ * would be a lot of overkill.
+ */
+ trans = btrfs_start_transaction(tree_root, 2);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
if (!fs_info->qgroup_ulist) {
ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -906,12 +923,14 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
BTRFS_QUOTA_TREE_OBJECTID);
if (IS_ERR(quota_root)) {
ret = PTR_ERR(quota_root);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
goto out_free_root;
}
@@ -921,8 +940,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*ptr));
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
+ }
leaf = path->nodes[0];
ptr = btrfs_item_ptr(leaf, path->slots[0],
@@ -944,9 +965,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
if (ret > 0)
goto out_add_root;
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
-
+ }
while (1) {
slot = path->slots[0];
@@ -956,18 +978,23 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
if (found_key.type == BTRFS_ROOT_REF_KEY) {
ret = add_qgroup_item(trans, quota_root,
found_key.offset);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
+ }
qgroup = add_qgroup_rb(fs_info, found_key.offset);
if (IS_ERR(qgroup)) {
ret = PTR_ERR(qgroup);
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
}
ret = btrfs_next_item(tree_root, path);
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
+ }
if (ret)
break;
}
@@ -975,18 +1002,28 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
out_add_root:
btrfs_release_path(path);
ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
+ }
qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
if (IS_ERR(qgroup)) {
ret = PTR_ERR(qgroup);
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
spin_unlock(&fs_info->qgroup_lock);
+
+ ret = btrfs_commit_transaction(trans);
+ if (ret) {
+ trans = NULL;
+ goto out_free_path;
+ }
+
ret = qgroup_rescan_init(fs_info, 0, 1);
if (!ret) {
qgroup_rescan_zero_tracking(fs_info);
@@ -1006,20 +1043,35 @@ out:
if (ret) {
ulist_free(fs_info->qgroup_ulist);
fs_info->qgroup_ulist = NULL;
+ if (trans)
+ btrfs_end_transaction(trans);
}
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
-int btrfs_quota_disable(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *quota_root;
+ struct btrfs_trans_handle *trans = NULL;
int ret = 0;
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
+
+ /*
+ * 1 For the root item
+ *
+ * We should also reserve enough items for the quota tree deletion in
+ * btrfs_clean_quota_tree but this is not done.
+ */
+ trans = btrfs_start_transaction(fs_info->tree_root, 1);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
@@ -1031,12 +1083,16 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
btrfs_free_qgroup_config(fs_info);
ret = btrfs_clean_quota_tree(trans, quota_root);
- if (ret)
- goto out;
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto end_trans;
+ }
- ret = btrfs_del_root(trans, fs_info, &quota_root->root_key);
- if (ret)
- goto out;
+ ret = btrfs_del_root(trans, &quota_root->root_key);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto end_trans;
+ }
list_del(&quota_root->dirty_list);
@@ -1048,6 +1104,9 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
free_extent_buffer(quota_root->node);
free_extent_buffer(quota_root->commit_root);
kfree(quota_root);
+
+end_trans:
+ ret = btrfs_end_transaction(trans);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
@@ -1177,9 +1236,10 @@ out:
return ret;
}
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
@@ -1216,13 +1276,13 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
}
}
- ret = add_qgroup_relation_item(trans, quota_root, src, dst);
+ ret = add_qgroup_relation_item(trans, src, dst);
if (ret)
goto out;
- ret = add_qgroup_relation_item(trans, quota_root, dst, src);
+ ret = add_qgroup_relation_item(trans, dst, src);
if (ret) {
- del_qgroup_relation_item(trans, quota_root, src, dst);
+ del_qgroup_relation_item(trans, src, dst);
goto out;
}
@@ -1240,9 +1300,10 @@ out:
return ret;
}
-static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
@@ -1276,8 +1337,8 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
ret = -ENOENT;
goto out;
exist:
- ret = del_qgroup_relation_item(trans, quota_root, src, dst);
- err = del_qgroup_relation_item(trans, quota_root, dst, src);
+ ret = del_qgroup_relation_item(trans, src, dst);
+ err = del_qgroup_relation_item(trans, dst, src);
if (err && !ret)
ret = err;
@@ -1290,21 +1351,22 @@ out:
return ret;
}
-int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret = 0;
mutex_lock(&fs_info->qgroup_ioctl_lock);
- ret = __del_qgroup_relation(trans, fs_info, src, dst);
+ ret = __del_qgroup_relation(trans, src, dst);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
-int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid)
+int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
int ret = 0;
@@ -1336,9 +1398,9 @@ out:
return ret;
}
-int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid)
+int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
struct btrfs_qgroup_list *list;
@@ -1362,16 +1424,15 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
goto out;
}
}
- ret = del_qgroup_item(trans, quota_root, qgroupid);
+ ret = del_qgroup_item(trans, qgroupid);
if (ret && ret != -ENOENT)
goto out;
while (!list_empty(&qgroup->groups)) {
list = list_first_entry(&qgroup->groups,
struct btrfs_qgroup_list, next_group);
- ret = __del_qgroup_relation(trans, fs_info,
- qgroupid,
- list->group->qgroupid);
+ ret = __del_qgroup_relation(trans, qgroupid,
+ list->group->qgroupid);
if (ret)
goto out;
}
@@ -1384,10 +1445,10 @@ out:
return ret;
}
-int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid,
+int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
struct btrfs_qgroup_limit *limit)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
int ret = 0;
@@ -1451,7 +1512,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
spin_unlock(&fs_info->qgroup_lock);
- ret = update_qgroup_limit_item(trans, quota_root, qgroup);
+ ret = update_qgroup_limit_item(trans, qgroup);
if (ret) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
btrfs_info(fs_info, "unable to update quota limit for %llu",
@@ -1519,10 +1580,10 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
return 0;
}
-int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
- gfp_t gfp_flag)
+int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ u64 num_bytes, gfp_t gfp_flag)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_qgroup_extent_record *record;
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
@@ -1530,8 +1591,6 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
|| bytenr == 0 || num_bytes == 0)
return 0;
- if (WARN_ON(trans == NULL))
- return -EINVAL;
record = kmalloc(sizeof(*record), gfp_flag);
if (!record)
return -ENOMEM;
@@ -1552,9 +1611,9 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
}
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct extent_buffer *eb)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int nr = btrfs_header_nritems(eb);
int i, extent_type, ret;
struct btrfs_key key;
@@ -1584,8 +1643,8 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
- ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
- num_bytes, GFP_NOFS);
+ ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
+ GFP_NOFS);
if (ret)
return ret;
}
@@ -1655,11 +1714,10 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
}
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct extent_buffer *root_eb,
u64 root_gen, int root_level)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret = 0;
int level;
struct extent_buffer *eb = root_eb;
@@ -1678,7 +1736,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
}
if (root_level == 0) {
- ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
+ ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
goto out;
}
@@ -1736,8 +1794,7 @@ walk_down:
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
- ret = btrfs_qgroup_trace_extent(trans, fs_info,
- child_bytenr,
+ ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
fs_info->nodesize,
GFP_NOFS);
if (ret)
@@ -1745,8 +1802,8 @@ walk_down:
}
if (level == 0) {
- ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
- path->nodes[level]);
+ ret = btrfs_qgroup_trace_leaf_items(trans,
+ path->nodes[level]);
if (ret)
goto out;
@@ -1981,12 +2038,11 @@ static int maybe_fs_roots(struct ulist *roots)
return is_fstree(unode->val);
}
-int
-btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 bytenr, u64 num_bytes,
- struct ulist *old_roots, struct ulist *new_roots)
+int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ u64 num_bytes, struct ulist *old_roots,
+ struct ulist *new_roots)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct ulist *qgroups = NULL;
struct ulist *tmp = NULL;
u64 seq;
@@ -2116,9 +2172,10 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
ulist_del(record->old_roots, qgroup_to_skip,
0);
}
- ret = btrfs_qgroup_account_extent(trans, fs_info,
- record->bytenr, record->num_bytes,
- record->old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(trans, record->bytenr,
+ record->num_bytes,
+ record->old_roots,
+ new_roots);
record->old_roots = NULL;
new_roots = NULL;
}
@@ -2136,9 +2193,9 @@ cleanup:
/*
* called from commit_transaction. Writes all changed qgroups to disk.
*/
-int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root = fs_info->quota_root;
int ret = 0;
@@ -2152,11 +2209,11 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
struct btrfs_qgroup, dirty);
list_del_init(&qgroup->dirty);
spin_unlock(&fs_info->qgroup_lock);
- ret = update_qgroup_info_item(trans, quota_root, qgroup);
+ ret = update_qgroup_info_item(trans, qgroup);
if (ret)
fs_info->qgroup_flags |=
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- ret = update_qgroup_limit_item(trans, quota_root, qgroup);
+ ret = update_qgroup_limit_item(trans, qgroup);
if (ret)
fs_info->qgroup_flags |=
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -2168,7 +2225,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
spin_unlock(&fs_info->qgroup_lock);
- ret = update_qgroup_status_item(trans, fs_info, quota_root);
+ ret = update_qgroup_status_item(trans);
if (ret)
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -2181,13 +2238,13 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
* cause a transaction abort so we take extra care here to only error
* when a readonly fs is a reasonable outcome.
*/
-int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
- struct btrfs_qgroup_inherit *inherit)
+int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
+ u64 objectid, struct btrfs_qgroup_inherit *inherit)
{
int ret = 0;
int i;
u64 *i_qgroups;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root = fs_info->quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
@@ -2229,22 +2286,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- if (srcid) {
- struct btrfs_root *srcroot;
- struct btrfs_key srckey;
-
- srckey.objectid = srcid;
- srckey.type = BTRFS_ROOT_ITEM_KEY;
- srckey.offset = (u64)-1;
- srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
- if (IS_ERR(srcroot)) {
- ret = PTR_ERR(srcroot);
- goto out;
- }
-
- level_size = fs_info->nodesize;
- }
-
/*
* add qgroup to all inherited groups
*/
@@ -2253,12 +2294,12 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
if (*i_qgroups == 0)
continue;
- ret = add_qgroup_relation_item(trans, quota_root,
- objectid, *i_qgroups);
+ ret = add_qgroup_relation_item(trans, objectid,
+ *i_qgroups);
if (ret && ret != -EEXIST)
goto out;
- ret = add_qgroup_relation_item(trans, quota_root,
- *i_qgroups, objectid);
+ ret = add_qgroup_relation_item(trans, *i_qgroups,
+ objectid);
if (ret && ret != -EEXIST)
goto out;
}
@@ -2281,7 +2322,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
dstgroup->rsv_excl = inherit->lim.rsv_excl;
- ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
+ ret = update_qgroup_limit_item(trans, dstgroup);
if (ret) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
btrfs_info(fs_info,
@@ -2301,6 +2342,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
* our counts don't go crazy, so at this point the only
* difference between the two roots should be the root node.
*/
+ level_size = fs_info->nodesize;
dstgroup->rfer = srcgroup->rfer;
dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
dstgroup->excl = level_size;
@@ -2598,10 +2640,10 @@ static bool is_last_leaf(struct btrfs_path *path)
* returns < 0 on error, 0 when more leafs are to be scanned.
* returns 1 when done.
*/
-static int
-qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
- struct btrfs_trans_handle *trans)
+static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_key found;
struct extent_buffer *scratch_leaf = NULL;
struct ulist *roots = NULL;
@@ -2669,8 +2711,8 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
if (ret < 0)
goto out;
/* For rescan, just pass old_roots as NULL */
- ret = btrfs_qgroup_account_extent(trans, fs_info,
- found.objectid, num_bytes, NULL, roots);
+ ret = btrfs_qgroup_account_extent(trans, found.objectid,
+ num_bytes, NULL, roots);
if (ret < 0)
goto out;
}
@@ -2716,7 +2758,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
err = -EINTR;
} else {
- err = qgroup_rescan_leaf(fs_info, path, trans);
+ err = qgroup_rescan_leaf(trans, path);
}
if (err > 0)
btrfs_commit_transaction(trans);
@@ -2751,7 +2793,7 @@ out:
err);
goto done;
}
- ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
+ ret = update_qgroup_status_item(trans);
if (ret < 0) {
err = ret;
btrfs_err(fs_info, "fail to update qgroup status: %d", err);
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index d60dd06445ce..54b8bb282c0e 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -141,24 +141,19 @@ struct btrfs_qgroup {
#define QGROUP_RELEASE (1<<1)
#define QGROUP_FREE (1<<2)
-int btrfs_quota_enable(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
-int btrfs_quota_disable(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
+int btrfs_quota_enable(struct btrfs_fs_info *fs_info);
+int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
bool interruptible);
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst);
-int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst);
-int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid);
-int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid);
-int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid,
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst);
+int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst);
+int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
+int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
+int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
struct btrfs_qgroup_limit *limit);
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
@@ -217,9 +212,8 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
* Return <0 for error, like memory allocation failure or invalid parameter
* (NULL trans)
*/
-int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
- gfp_t gfp_flag);
+int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ u64 num_bytes, gfp_t gfp_flag);
/*
* Inform qgroup to trace all leaf items of data
@@ -228,7 +222,6 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
* Return <0 for error(ENOMEM)
*/
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
/*
* Inform qgroup to trace a whole subtree, including all its child tree
@@ -241,20 +234,15 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
* Return <0 for error(ENOMEM or tree search error)
*/
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
-int
-btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 bytenr, u64 num_bytes,
- struct ulist *old_roots, struct ulist *new_roots);
+int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ u64 num_bytes, struct ulist *old_roots,
+ struct ulist *new_roots);
int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
-int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
-int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
- struct btrfs_qgroup_inherit *inherit);
+int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
+int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
+ u64 objectid, struct btrfs_qgroup_inherit *inherit);
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes,
enum btrfs_qgroup_rsv_type type);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 5e4ad134b9ad..df41d7049936 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -5,32 +5,19 @@
*/
#include <linux/sched.h>
-#include <linux/wait.h>
#include <linux/bio.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include <linux/blkdev.h>
-#include <linux/random.h>
-#include <linux/iocontext.h>
-#include <linux/capability.h>
-#include <linux/ratelimit.h>
-#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/hash.h>
#include <linux/list_sort.h>
#include <linux/raid/xor.h>
#include <linux/mm.h>
-#include <asm/div64.h>
#include "ctree.h"
-#include "extent_map.h"
#include "disk-io.h"
-#include "transaction.h"
-#include "print-tree.h"
#include "volumes.h"
#include "raid56.h"
#include "async-thread.h"
-#include "check-integrity.h"
-#include "rcu-string.h"
/* set when additional merges to this rbio are not allowed */
#define RBIO_RMW_LOCKED_BIT 1
@@ -175,8 +162,6 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
static void rmw_work(struct btrfs_work *work);
static void read_rebuild_work(struct btrfs_work *work);
-static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
-static void async_read_rebuild(struct btrfs_raid_bio *rbio);
static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
static void __free_raid_bio(struct btrfs_raid_bio *rbio);
@@ -185,7 +170,13 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
int need_check);
-static void async_scrub_parity(struct btrfs_raid_bio *rbio);
+static void scrub_parity_work(struct btrfs_work *work);
+
+static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
+{
+ btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
+ btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
+}
/*
* the stripe hash table is used for locking, and to collect
@@ -260,7 +251,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
s = kmap(rbio->bio_pages[i]);
d = kmap(rbio->stripe_pages[i]);
- memcpy(d, s, PAGE_SIZE);
+ copy_page(d, s);
kunmap(rbio->bio_pages[i]);
kunmap(rbio->stripe_pages[i]);
@@ -516,32 +507,21 @@ static void run_xor(void **pages, int src_cnt, ssize_t len)
}
/*
- * returns true if the bio list inside this rbio
- * covers an entire stripe (no rmw required).
- * Must be called with the bio list lock held, or
- * at a time when you know it is impossible to add
- * new bios into the list
+ * Returns true if the bio list inside this rbio covers an entire stripe (no
+ * rmw required).
*/
-static int __rbio_is_full(struct btrfs_raid_bio *rbio)
+static int rbio_is_full(struct btrfs_raid_bio *rbio)
{
+ unsigned long flags;
unsigned long size = rbio->bio_list_bytes;
int ret = 1;
+ spin_lock_irqsave(&rbio->bio_list_lock, flags);
if (size != rbio->nr_data * rbio->stripe_len)
ret = 0;
-
BUG_ON(size > rbio->nr_data * rbio->stripe_len);
- return ret;
-}
-
-static int rbio_is_full(struct btrfs_raid_bio *rbio)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&rbio->bio_list_lock, flags);
- ret = __rbio_is_full(rbio);
spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
+
return ret;
}
@@ -812,16 +792,16 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
spin_unlock_irqrestore(&h->lock, flags);
if (next->operation == BTRFS_RBIO_READ_REBUILD)
- async_read_rebuild(next);
+ start_async_work(next, read_rebuild_work);
else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
steal_rbio(rbio, next);
- async_read_rebuild(next);
+ start_async_work(next, read_rebuild_work);
} else if (next->operation == BTRFS_RBIO_WRITE) {
steal_rbio(rbio, next);
- async_rmw_stripe(next);
+ start_async_work(next, rmw_work);
} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
steal_rbio(rbio, next);
- async_scrub_parity(next);
+ start_async_work(next, scrub_parity_work);
}
goto done_nolock;
@@ -1275,7 +1255,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
pointers);
} else {
/* raid5 */
- memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
+ copy_page(pointers[nr_data], pointers[0]);
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
}
@@ -1343,7 +1323,7 @@ write_data:
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE;
submit_bio(bio);
}
@@ -1508,20 +1488,6 @@ cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
}
-static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
-{
- btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
- btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
-}
-
-static void async_read_rebuild(struct btrfs_raid_bio *rbio)
-{
- btrfs_init_work(&rbio->work, btrfs_rmw_helper,
- read_rebuild_work, NULL, NULL);
-
- btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
-}
-
/*
* the stripe must be locked by the caller. It will
* unlock after all the writes are done
@@ -1599,7 +1565,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
bio->bi_private = rbio;
bio->bi_end_io = raid_rmw_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
@@ -1652,7 +1618,7 @@ static int partial_stripe_write(struct btrfs_raid_bio *rbio)
ret = lock_stripe_add(rbio);
if (ret == 0)
- async_rmw_stripe(rbio);
+ start_async_work(rbio, rmw_work);
return 0;
}
@@ -1720,8 +1686,11 @@ static void run_plug(struct btrfs_plug_cb *plug)
list_del_init(&cur->plug_list);
if (rbio_is_full(cur)) {
+ int ret;
+
/* we have a full stripe, send it down */
- full_stripe_write(cur);
+ ret = full_stripe_write(cur);
+ BUG_ON(ret);
continue;
}
if (last) {
@@ -1941,9 +1910,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
BUG_ON(failb != -1);
pstripe:
/* Copy parity block into failed block to start with */
- memcpy(pointers[faila],
- pointers[rbio->nr_data],
- PAGE_SIZE);
+ copy_page(pointers[faila], pointers[rbio->nr_data]);
/* rearrange the pointer array */
p = pointers[faila];
@@ -2145,7 +2112,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
bio->bi_private = rbio;
bio->bi_end_io = raid_recover_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
@@ -2448,7 +2415,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
pointers);
} else {
/* raid5 */
- memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
+ copy_page(pointers[nr_data], pointers[0]);
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
}
@@ -2456,7 +2423,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
parity = kmap(p);
if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
- memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
+ copy_page(parity, pointers[rbio->scrubp]);
else
/* Parity is right, needn't writeback */
bitmap_clear(rbio->dbitmap, pagenr, 1);
@@ -2517,7 +2484,7 @@ submit_write:
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE;
submit_bio(bio);
}
@@ -2699,7 +2666,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
bio->bi_private = rbio;
bio->bi_end_io = raid56_parity_scrub_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
@@ -2728,18 +2695,10 @@ static void scrub_parity_work(struct btrfs_work *work)
raid56_parity_scrub_stripe(rbio);
}
-static void async_scrub_parity(struct btrfs_raid_bio *rbio)
-{
- btrfs_init_work(&rbio->work, btrfs_rmw_helper,
- scrub_parity_work, NULL, NULL);
-
- btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
-}
-
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
{
if (!lock_stripe_add(rbio))
- async_scrub_parity(rbio);
+ start_async_work(rbio, scrub_parity_work);
}
/* The following code is used for dev replace of a missing RAID 5/6 device. */
@@ -2781,5 +2740,5 @@ raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
{
if (!lock_stripe_add(rbio))
- async_read_rebuild(rbio);
+ start_async_work(rbio, read_rebuild_work);
}
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 40f1bcef394d..dec14b739b10 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -7,7 +7,6 @@
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
-#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "ctree.h"
@@ -355,7 +354,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
dev = bbio->stripes[nzones].dev;
/* cannot read ahead on missing device. */
- if (!dev->bdev)
+ if (!dev->bdev)
continue;
zone = reada_find_zone(dev, logical, bbio);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 879b76fa881a..8783a1776540 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -586,29 +586,6 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
return btrfs_get_fs_root(fs_info, &key, false);
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
-static noinline_for_stack
-struct btrfs_root *find_tree_root(struct reloc_control *rc,
- struct extent_buffer *leaf,
- struct btrfs_extent_ref_v0 *ref0)
-{
- struct btrfs_root *root;
- u64 root_objectid = btrfs_ref_root_v0(leaf, ref0);
- u64 generation = btrfs_ref_generation_v0(leaf, ref0);
-
- BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID);
-
- root = read_fs_root(rc->extent_root->fs_info, root_objectid);
- BUG_ON(IS_ERR(root));
-
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
- generation != btrfs_root_generation(&root->root_item))
- return NULL;
-
- return root;
-}
-#endif
-
static noinline_for_stack
int find_inline_backref(struct extent_buffer *leaf, int slot,
unsigned long *ptr, unsigned long *end)
@@ -621,12 +598,11 @@ int find_inline_backref(struct extent_buffer *leaf, int slot,
btrfs_item_key_to_cpu(leaf, &key, slot);
item_size = btrfs_item_size_nr(leaf, slot);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
- WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
+ btrfs_print_v0_err(leaf->fs_info);
+ btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
return 1;
}
-#endif
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
WARN_ON(!(btrfs_extent_flags(leaf, ei) &
BTRFS_EXTENT_FLAG_TREE_BLOCK));
@@ -792,7 +768,7 @@ again:
type = btrfs_get_extent_inline_ref_type(eb, iref,
BTRFS_REF_TYPE_BLOCK);
if (type == BTRFS_REF_TYPE_INVALID) {
- err = -EINVAL;
+ err = -EUCLEAN;
goto out;
}
key.type = type;
@@ -811,29 +787,7 @@ again:
goto next;
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (key.type == BTRFS_SHARED_BLOCK_REF_KEY ||
- key.type == BTRFS_EXTENT_REF_V0_KEY) {
- if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
- struct btrfs_extent_ref_v0 *ref0;
- ref0 = btrfs_item_ptr(eb, path1->slots[0],
- struct btrfs_extent_ref_v0);
- if (key.objectid == key.offset) {
- root = find_tree_root(rc, eb, ref0);
- if (root && !should_ignore_root(root))
- cur->root = root;
- else
- list_add(&cur->list, &useless);
- break;
- }
- if (is_cowonly_root(btrfs_ref_root_v0(eb,
- ref0)))
- cur->cowonly = 1;
- }
-#else
- ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY);
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
-#endif
if (key.objectid == key.offset) {
/*
* only root blocks of reloc trees use
@@ -876,6 +830,12 @@ again:
edge->node[UPPER] = upper;
goto next;
+ } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
+ err = -EINVAL;
+ btrfs_print_v0_err(rc->extent_root->fs_info);
+ btrfs_handle_fs_error(rc->extent_root->fs_info, err,
+ NULL);
+ goto out;
} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
goto next;
}
@@ -1321,18 +1281,19 @@ static void __del_reloc_root(struct btrfs_root *root)
struct mapping_node *node = NULL;
struct reloc_control *rc = fs_info->reloc_ctl;
- spin_lock(&rc->reloc_root_tree.lock);
- rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->node->start);
- if (rb_node) {
- node = rb_entry(rb_node, struct mapping_node, rb_node);
- rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+ if (rc) {
+ spin_lock(&rc->reloc_root_tree.lock);
+ rb_node = tree_search(&rc->reloc_root_tree.rb_root,
+ root->node->start);
+ if (rb_node) {
+ node = rb_entry(rb_node, struct mapping_node, rb_node);
+ rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+ }
+ spin_unlock(&rc->reloc_root_tree.lock);
+ if (!node)
+ return;
+ BUG_ON((struct btrfs_root *)node->data != root);
}
- spin_unlock(&rc->reloc_root_tree.lock);
-
- if (!node)
- return;
- BUG_ON((struct btrfs_root *)node->data != root);
spin_lock(&fs_info->trans_lock);
list_del_init(&root->root_list);
@@ -1918,13 +1879,12 @@ again:
* and tree block numbers, if current trans doesn't free
* data reloc tree inode.
*/
- ret = btrfs_qgroup_trace_subtree(trans, src, parent,
+ ret = btrfs_qgroup_trace_subtree(trans, parent,
btrfs_header_generation(parent),
btrfs_header_level(parent));
if (ret < 0)
break;
- ret = btrfs_qgroup_trace_subtree(trans, dest,
- path->nodes[level],
+ ret = btrfs_qgroup_trace_subtree(trans, path->nodes[level],
btrfs_header_generation(path->nodes[level]),
btrfs_header_level(path->nodes[level]));
if (ret < 0)
@@ -3333,48 +3293,6 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
return 0;
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
-static int get_ref_objectid_v0(struct reloc_control *rc,
- struct btrfs_path *path,
- struct btrfs_key *extent_key,
- u64 *ref_objectid, int *path_change)
-{
- struct btrfs_key key;
- struct extent_buffer *leaf;
- struct btrfs_extent_ref_v0 *ref0;
- int ret;
- int slot;
-
- leaf = path->nodes[0];
- slot = path->slots[0];
- while (1) {
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(rc->extent_root, path);
- if (ret < 0)
- return ret;
- BUG_ON(ret > 0);
- leaf = path->nodes[0];
- slot = path->slots[0];
- if (path_change)
- *path_change = 1;
- }
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid != extent_key->objectid)
- return -ENOENT;
-
- if (key.type != BTRFS_EXTENT_REF_V0_KEY) {
- slot++;
- continue;
- }
- ref0 = btrfs_item_ptr(leaf, slot,
- struct btrfs_extent_ref_v0);
- *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0);
- break;
- }
- return 0;
-}
-#endif
-
/*
* helper to add a tree block to the list.
* the major work is getting the generation and level of the block
@@ -3407,23 +3325,12 @@ static int add_tree_block(struct reloc_control *rc,
level = (int)extent_key->offset;
}
generation = btrfs_extent_generation(eb, ei);
+ } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
+ btrfs_print_v0_err(eb->fs_info);
+ btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
+ return -EINVAL;
} else {
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- u64 ref_owner;
- int ret;
-
- BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0));
- ret = get_ref_objectid_v0(rc, path, extent_key,
- &ref_owner, NULL);
- if (ret < 0)
- return ret;
- BUG_ON(ref_owner >= BTRFS_MAX_LEVEL);
- level = (int)ref_owner;
- /* FIXME: get real generation */
- generation = 0;
-#else
BUG();
-#endif
}
btrfs_release_path(path);
@@ -3563,11 +3470,8 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
- if (IS_ERR(inode) || is_bad_inode(inode)) {
- if (!IS_ERR(inode))
- iput(inode);
+ if (IS_ERR(inode))
return -ENOENT;
- }
truncate:
ret = btrfs_check_trunc_cache_free_space(fs_info,
@@ -3781,12 +3685,7 @@ int add_data_references(struct reloc_control *rc,
eb = path->nodes[0];
ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (ptr + sizeof(struct btrfs_extent_item_v0) == end)
- ptr = end;
- else
-#endif
- ptr += sizeof(struct btrfs_extent_item);
+ ptr += sizeof(struct btrfs_extent_item);
while (ptr < end) {
iref = (struct btrfs_extent_inline_ref *)ptr;
@@ -3801,7 +3700,7 @@ int add_data_references(struct reloc_control *rc,
ret = find_data_references(rc, extent_key,
eb, dref, blocks);
} else {
- ret = -EINVAL;
+ ret = -EUCLEAN;
btrfs_err(rc->extent_root->fs_info,
"extent %llu slot %d has an invalid inline ref type",
eb->start, path->slots[0]);
@@ -3832,13 +3731,7 @@ int add_data_references(struct reloc_control *rc,
if (key.objectid != extent_key->objectid)
break;
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (key.type == BTRFS_SHARED_DATA_REF_KEY ||
- key.type == BTRFS_EXTENT_REF_V0_KEY) {
-#else
- BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
-#endif
ret = __add_tree_block(rc, key.offset, blocksize,
blocks);
} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
@@ -3846,6 +3739,10 @@ int add_data_references(struct reloc_control *rc,
struct btrfs_extent_data_ref);
ret = find_data_references(rc, extent_key,
eb, dref, blocks);
+ } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
+ btrfs_print_v0_err(eb->fs_info);
+ btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
+ ret = -EINVAL;
} else {
ret = 0;
}
@@ -4084,41 +3981,13 @@ restart:
flags = btrfs_extent_flags(path->nodes[0], ei);
ret = check_extent_flags(flags);
BUG_ON(ret);
-
+ } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
+ err = -EINVAL;
+ btrfs_print_v0_err(trans->fs_info);
+ btrfs_abort_transaction(trans, err);
+ break;
} else {
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- u64 ref_owner;
- int path_change = 0;
-
- BUG_ON(item_size !=
- sizeof(struct btrfs_extent_item_v0));
- ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
- &path_change);
- if (ret < 0) {
- err = ret;
- break;
- }
- if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
- flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
- else
- flags = BTRFS_EXTENT_FLAG_DATA;
-
- if (path_change) {
- btrfs_release_path(path);
-
- path->search_commit_root = 1;
- path->skip_locking = 1;
- ret = btrfs_search_slot(NULL, rc->extent_root,
- &key, path, 0, 0);
- if (ret < 0) {
- err = ret;
- break;
- }
- BUG_ON(ret > 0);
- }
-#else
BUG();
-#endif
}
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
@@ -4169,8 +4038,7 @@ restart:
}
}
if (trans && progress && err == -ENOSPC) {
- ret = btrfs_force_chunk_alloc(trans, fs_info,
- rc->block_group->flags);
+ ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
if (ret == 1) {
err = 0;
progress = 0;
@@ -4284,7 +4152,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
- BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
+ BUG_ON(IS_ERR(inode));
BTRFS_I(inode)->index_cnt = group->key.objectid;
err = btrfs_orphan_add(trans, BTRFS_I(inode));
@@ -4375,7 +4243,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
BUG_ON(!rc->block_group);
- ret = btrfs_inc_block_group_ro(fs_info, rc->block_group);
+ ret = btrfs_inc_block_group_ro(rc->block_group);
if (ret) {
err = ret;
goto out;
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index c451285976ac..65bda0682928 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -320,9 +320,9 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
/* drop the root item for 'key' from the tree root */
int btrfs_del_root(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, const struct btrfs_key *key)
+ const struct btrfs_key *key)
{
- struct btrfs_root *root = fs_info->tree_root;
+ struct btrfs_root *root = trans->fs_info->tree_root;
struct btrfs_path *path;
int ret;
@@ -341,13 +341,12 @@ out:
return ret;
}
-int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
- const char *name, int name_len)
+int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 *sequence, const char *name,
+ int name_len)
{
- struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *tree_root = trans->fs_info->tree_root;
struct btrfs_path *path;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
@@ -413,12 +412,11 @@ out:
*
* Will return 0, -ENOMEM, or anything from the CoW path
*/
-int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
- const char *name, int name_len)
+int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 sequence, const char *name,
+ int name_len)
{
- struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *tree_root = trans->fs_info->tree_root;
struct btrfs_key key;
int ret;
struct btrfs_path *path;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6702896cdb8f..3be1456b5116 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -188,32 +188,6 @@ struct scrub_ctx {
refcount_t refs;
};
-struct scrub_fixup_nodatasum {
- struct scrub_ctx *sctx;
- struct btrfs_device *dev;
- u64 logical;
- struct btrfs_root *root;
- struct btrfs_work work;
- int mirror_num;
-};
-
-struct scrub_nocow_inode {
- u64 inum;
- u64 offset;
- u64 root;
- struct list_head list;
-};
-
-struct scrub_copy_nocow_ctx {
- struct scrub_ctx *sctx;
- u64 logical;
- u64 len;
- int mirror_num;
- u64 physical_for_dev_replace;
- struct list_head inodes;
- struct btrfs_work work;
-};
-
struct scrub_warning {
struct btrfs_path *path;
u64 extent_item_size;
@@ -232,8 +206,6 @@ struct full_stripe_lock {
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
-static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
-static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
struct scrub_block *sblocks_for_recheck);
@@ -277,13 +249,6 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
static void scrub_wr_submit(struct scrub_ctx *sctx);
static void scrub_wr_bio_end_io(struct bio *bio);
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
-static int write_page_nocow(struct scrub_ctx *sctx,
- u64 physical_for_dev_replace, struct page *page);
-static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
- struct scrub_copy_nocow_ctx *ctx);
-static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
- int mirror_num, u64 physical_for_dev_replace);
-static void copy_nocow_pages_worker(struct btrfs_work *work);
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_put_ctx(struct scrub_ctx *sctx);
@@ -555,60 +520,6 @@ out:
return ret;
}
-/*
- * used for workers that require transaction commits (i.e., for the
- * NOCOW case)
- */
-static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
-{
- struct btrfs_fs_info *fs_info = sctx->fs_info;
-
- refcount_inc(&sctx->refs);
- /*
- * increment scrubs_running to prevent cancel requests from
- * completing as long as a worker is running. we must also
- * increment scrubs_paused to prevent deadlocking on pause
- * requests used for transactions commits (as the worker uses a
- * transaction context). it is safe to regard the worker
- * as paused for all matters practical. effectively, we only
- * avoid cancellation requests from completing.
- */
- mutex_lock(&fs_info->scrub_lock);
- atomic_inc(&fs_info->scrubs_running);
- atomic_inc(&fs_info->scrubs_paused);
- mutex_unlock(&fs_info->scrub_lock);
-
- /*
- * check if @scrubs_running=@scrubs_paused condition
- * inside wait_event() is not an atomic operation.
- * which means we may inc/dec @scrub_running/paused
- * at any time. Let's wake up @scrub_pause_wait as
- * much as we can to let commit transaction blocked less.
- */
- wake_up(&fs_info->scrub_pause_wait);
-
- atomic_inc(&sctx->workers_pending);
-}
-
-/* used for workers that require transaction commits */
-static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
-{
- struct btrfs_fs_info *fs_info = sctx->fs_info;
-
- /*
- * see scrub_pending_trans_workers_inc() why we're pretending
- * to be paused in the scrub counters
- */
- mutex_lock(&fs_info->scrub_lock);
- atomic_dec(&fs_info->scrubs_running);
- atomic_dec(&fs_info->scrubs_paused);
- mutex_unlock(&fs_info->scrub_lock);
- atomic_dec(&sctx->workers_pending);
- wake_up(&fs_info->scrub_pause_wait);
- wake_up(&sctx->list_wait);
- scrub_put_ctx(sctx);
-}
-
static void scrub_free_csums(struct scrub_ctx *sctx)
{
while (!list_empty(&sctx->csum_list)) {
@@ -882,194 +793,6 @@ out:
btrfs_free_path(path);
}
-static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
-{
- struct page *page = NULL;
- unsigned long index;
- struct scrub_fixup_nodatasum *fixup = fixup_ctx;
- int ret;
- int corrected = 0;
- struct btrfs_key key;
- struct inode *inode = NULL;
- struct btrfs_fs_info *fs_info;
- u64 end = offset + PAGE_SIZE - 1;
- struct btrfs_root *local_root;
- int srcu_index;
-
- key.objectid = root;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
-
- fs_info = fixup->root->fs_info;
- srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
-
- local_root = btrfs_read_fs_root_no_name(fs_info, &key);
- if (IS_ERR(local_root)) {
- srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
- return PTR_ERR(local_root);
- }
-
- key.type = BTRFS_INODE_ITEM_KEY;
- key.objectid = inum;
- key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
- srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- index = offset >> PAGE_SHIFT;
-
- page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
- if (!page) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (PageUptodate(page)) {
- if (PageDirty(page)) {
- /*
- * we need to write the data to the defect sector. the
- * data that was in that sector is not in memory,
- * because the page was modified. we must not write the
- * modified page to that sector.
- *
- * TODO: what could be done here: wait for the delalloc
- * runner to write out that page (might involve
- * COW) and see whether the sector is still
- * referenced afterwards.
- *
- * For the meantime, we'll treat this error
- * incorrectable, although there is a chance that a
- * later scrub will find the bad sector again and that
- * there's no dirty page in memory, then.
- */
- ret = -EIO;
- goto out;
- }
- ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE,
- fixup->logical, page,
- offset - page_offset(page),
- fixup->mirror_num);
- unlock_page(page);
- corrected = !ret;
- } else {
- /*
- * we need to get good data first. the general readpage path
- * will call repair_io_failure for us, we just have to make
- * sure we read the bad mirror.
- */
- ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
- EXTENT_DAMAGED);
- if (ret) {
- /* set_extent_bits should give proper error */
- WARN_ON(ret > 0);
- if (ret > 0)
- ret = -EFAULT;
- goto out;
- }
-
- ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
- btrfs_get_extent,
- fixup->mirror_num);
- wait_on_page_locked(page);
-
- corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
- end, EXTENT_DAMAGED, 0, NULL);
- if (!corrected)
- clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
- EXTENT_DAMAGED);
- }
-
-out:
- if (page)
- put_page(page);
-
- iput(inode);
-
- if (ret < 0)
- return ret;
-
- if (ret == 0 && corrected) {
- /*
- * we only need to call readpage for one of the inodes belonging
- * to this extent. so make iterate_extent_inodes stop
- */
- return 1;
- }
-
- return -EIO;
-}
-
-static void scrub_fixup_nodatasum(struct btrfs_work *work)
-{
- struct btrfs_fs_info *fs_info;
- int ret;
- struct scrub_fixup_nodatasum *fixup;
- struct scrub_ctx *sctx;
- struct btrfs_trans_handle *trans = NULL;
- struct btrfs_path *path;
- int uncorrectable = 0;
-
- fixup = container_of(work, struct scrub_fixup_nodatasum, work);
- sctx = fixup->sctx;
- fs_info = fixup->root->fs_info;
-
- path = btrfs_alloc_path();
- if (!path) {
- spin_lock(&sctx->stat_lock);
- ++sctx->stat.malloc_errors;
- spin_unlock(&sctx->stat_lock);
- uncorrectable = 1;
- goto out;
- }
-
- trans = btrfs_join_transaction(fixup->root);
- if (IS_ERR(trans)) {
- uncorrectable = 1;
- goto out;
- }
-
- /*
- * the idea is to trigger a regular read through the standard path. we
- * read a page from the (failed) logical address by specifying the
- * corresponding copynum of the failed sector. thus, that readpage is
- * expected to fail.
- * that is the point where on-the-fly error correction will kick in
- * (once it's finished) and rewrite the failed sector if a good copy
- * can be found.
- */
- ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
- scrub_fixup_readpage, fixup, false);
- if (ret < 0) {
- uncorrectable = 1;
- goto out;
- }
- WARN_ON(ret != 1);
-
- spin_lock(&sctx->stat_lock);
- ++sctx->stat.corrected_errors;
- spin_unlock(&sctx->stat_lock);
-
-out:
- if (trans && !IS_ERR(trans))
- btrfs_end_transaction(trans);
- if (uncorrectable) {
- spin_lock(&sctx->stat_lock);
- ++sctx->stat.uncorrectable_errors;
- spin_unlock(&sctx->stat_lock);
- btrfs_dev_replace_stats_inc(
- &fs_info->dev_replace.num_uncorrectable_read_errors);
- btrfs_err_rl_in_rcu(fs_info,
- "unable to fixup (nodatasum) error at logical %llu on dev %s",
- fixup->logical, rcu_str_deref(fixup->dev->name));
- }
-
- btrfs_free_path(path);
- kfree(fixup);
-
- scrub_pending_trans_workers_dec(sctx);
-}
-
static inline void scrub_get_recover(struct scrub_recover *recover)
{
refcount_inc(&recover->refs);
@@ -1264,42 +987,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
}
/*
- * NOTE: Even for nodatasum case, it's still possible that it's a
- * compressed data extent, thus scrub_fixup_nodatasum(), which write
- * inode page cache onto disk, could cause serious data corruption.
- *
- * So here we could only read from disk, and hope our recovery could
- * reach disk before the newer write.
- */
- if (0 && !is_metadata && !have_csum) {
- struct scrub_fixup_nodatasum *fixup_nodatasum;
-
- WARN_ON(sctx->is_dev_replace);
-
- /*
- * !is_metadata and !have_csum, this means that the data
- * might not be COWed, that it might be modified
- * concurrently. The general strategy to work on the
- * commit root does not help in the case when COW is not
- * used.
- */
- fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
- if (!fixup_nodatasum)
- goto did_not_correct_error;
- fixup_nodatasum->sctx = sctx;
- fixup_nodatasum->dev = dev;
- fixup_nodatasum->logical = logical;
- fixup_nodatasum->root = fs_info->extent_root;
- fixup_nodatasum->mirror_num = failed_mirror_index + 1;
- scrub_pending_trans_workers_inc(sctx);
- btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
- scrub_fixup_nodatasum, NULL, NULL);
- btrfs_queue_work(fs_info->scrub_workers,
- &fixup_nodatasum->work);
- goto out;
- }
-
- /*
* now build and submit the bios for the other mirrors, check
* checksums.
* First try to pick the mirror which is completely without I/O
@@ -1866,7 +1553,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
bio = btrfs_io_bio_alloc(1);
bio_set_dev(bio, page_bad->dev->bdev);
bio->bi_iter.bi_sector = page_bad->physical >> 9;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE;
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
if (PAGE_SIZE != ret) {
@@ -1961,7 +1648,7 @@ again:
bio->bi_end_io = scrub_wr_bio_end_io;
bio_set_dev(bio, sbio->dev->bdev);
bio->bi_iter.bi_sector = sbio->physical >> 9;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE;
sbio->status = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical_for_dev_replace ||
@@ -2361,7 +2048,7 @@ again:
bio->bi_end_io = scrub_bio_end_io;
bio_set_dev(bio, sbio->dev->bdev);
bio->bi_iter.bi_sector = sbio->physical >> 9;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
sbio->status = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical ||
@@ -2800,17 +2487,10 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
have_csum = scrub_find_csum(sctx, logical, csum);
if (have_csum == 0)
++sctx->stat.no_csum;
- if (0 && sctx->is_dev_replace && !have_csum) {
- ret = copy_nocow_pages(sctx, logical, l,
- mirror_num,
- physical_for_dev_replace);
- goto behind_scrub_pages;
- }
}
ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
mirror_num, have_csum ? csum : NULL, 0,
physical_for_dev_replace);
-behind_scrub_pages:
if (ret)
return ret;
len -= l;
@@ -3863,7 +3543,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* -> btrfs_scrub_pause()
*/
scrub_pause_on(fs_info);
- ret = btrfs_inc_block_group_ro(fs_info, cache);
+ ret = btrfs_inc_block_group_ro(cache);
if (!ret && is_dev_replace) {
/*
* If we are doing a device replace wait for any tasks
@@ -3982,14 +3662,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (!cache->removed && !cache->ro && cache->reserved == 0 &&
btrfs_block_group_used(&cache->item) == 0) {
spin_unlock(&cache->lock);
- spin_lock(&fs_info->unused_bgs_lock);
- if (list_empty(&cache->bg_list)) {
- btrfs_get_block_group(cache);
- trace_btrfs_add_unused_block_group(cache);
- list_add_tail(&cache->bg_list,
- &fs_info->unused_bgs);
- }
- spin_unlock(&fs_info->unused_bgs_lock);
+ btrfs_mark_bg_unused(cache);
} else {
spin_unlock(&cache->lock);
}
@@ -4072,10 +3745,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
if (!fs_info->scrub_wr_completion_workers)
goto fail_scrub_wr_completion_workers;
- fs_info->scrub_nocow_workers =
- btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
- if (!fs_info->scrub_nocow_workers)
- goto fail_scrub_nocow_workers;
fs_info->scrub_parity_workers =
btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
max_active, 2);
@@ -4086,8 +3755,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
return 0;
fail_scrub_parity_workers:
- btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
-fail_scrub_nocow_workers:
btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
fail_scrub_wr_completion_workers:
btrfs_destroy_workqueue(fs_info->scrub_workers);
@@ -4100,7 +3767,6 @@ static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
if (--fs_info->scrub_workers_refcnt == 0) {
btrfs_destroy_workqueue(fs_info->scrub_workers);
btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
- btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
}
WARN_ON(fs_info->scrub_workers_refcnt < 0);
@@ -4113,7 +3779,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
struct scrub_ctx *sctx;
int ret;
struct btrfs_device *dev;
- struct rcu_string *name;
if (btrfs_fs_closing(fs_info))
return -EINVAL;
@@ -4167,11 +3832,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (!is_dev_replace && !readonly &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- rcu_read_lock();
- name = rcu_dereference(dev->name);
- btrfs_err(fs_info, "scrub: device %s is not writable",
- name->str);
- rcu_read_unlock();
+ btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
+ rcu_str_deref(dev->name));
return -EROFS;
}
@@ -4359,330 +4021,3 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
*extent_dev = bbio->stripes[0].dev;
btrfs_put_bbio(bbio);
}
-
-static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
- int mirror_num, u64 physical_for_dev_replace)
-{
- struct scrub_copy_nocow_ctx *nocow_ctx;
- struct btrfs_fs_info *fs_info = sctx->fs_info;
-
- nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
- if (!nocow_ctx) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- return -ENOMEM;
- }
-
- scrub_pending_trans_workers_inc(sctx);
-
- nocow_ctx->sctx = sctx;
- nocow_ctx->logical = logical;
- nocow_ctx->len = len;
- nocow_ctx->mirror_num = mirror_num;
- nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
- btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
- copy_nocow_pages_worker, NULL, NULL);
- INIT_LIST_HEAD(&nocow_ctx->inodes);
- btrfs_queue_work(fs_info->scrub_nocow_workers,
- &nocow_ctx->work);
-
- return 0;
-}
-
-static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
-{
- struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
- struct scrub_nocow_inode *nocow_inode;
-
- nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
- if (!nocow_inode)
- return -ENOMEM;
- nocow_inode->inum = inum;
- nocow_inode->offset = offset;
- nocow_inode->root = root;
- list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
- return 0;
-}
-
-#define COPY_COMPLETE 1
-
-static void copy_nocow_pages_worker(struct btrfs_work *work)
-{
- struct scrub_copy_nocow_ctx *nocow_ctx =
- container_of(work, struct scrub_copy_nocow_ctx, work);
- struct scrub_ctx *sctx = nocow_ctx->sctx;
- struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct btrfs_root *root = fs_info->extent_root;
- u64 logical = nocow_ctx->logical;
- u64 len = nocow_ctx->len;
- int mirror_num = nocow_ctx->mirror_num;
- u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
- int ret;
- struct btrfs_trans_handle *trans = NULL;
- struct btrfs_path *path;
- int not_written = 0;
-
- path = btrfs_alloc_path();
- if (!path) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- not_written = 1;
- goto out;
- }
-
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- not_written = 1;
- goto out;
- }
-
- ret = iterate_inodes_from_logical(logical, fs_info, path,
- record_inode_for_nocow, nocow_ctx, false);
- if (ret != 0 && ret != -ENOENT) {
- btrfs_warn(fs_info,
- "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
- logical, physical_for_dev_replace, len, mirror_num,
- ret);
- not_written = 1;
- goto out;
- }
-
- btrfs_end_transaction(trans);
- trans = NULL;
- while (!list_empty(&nocow_ctx->inodes)) {
- struct scrub_nocow_inode *entry;
- entry = list_first_entry(&nocow_ctx->inodes,
- struct scrub_nocow_inode,
- list);
- list_del_init(&entry->list);
- ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
- entry->root, nocow_ctx);
- kfree(entry);
- if (ret == COPY_COMPLETE) {
- ret = 0;
- break;
- } else if (ret) {
- break;
- }
- }
-out:
- while (!list_empty(&nocow_ctx->inodes)) {
- struct scrub_nocow_inode *entry;
- entry = list_first_entry(&nocow_ctx->inodes,
- struct scrub_nocow_inode,
- list);
- list_del_init(&entry->list);
- kfree(entry);
- }
- if (trans && !IS_ERR(trans))
- btrfs_end_transaction(trans);
- if (not_written)
- btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
- num_uncorrectable_read_errors);
-
- btrfs_free_path(path);
- kfree(nocow_ctx);
-
- scrub_pending_trans_workers_dec(sctx);
-}
-
-static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
- u64 logical)
-{
- struct extent_state *cached_state = NULL;
- struct btrfs_ordered_extent *ordered;
- struct extent_io_tree *io_tree;
- struct extent_map *em;
- u64 lockstart = start, lockend = start + len - 1;
- int ret = 0;
-
- io_tree = &inode->io_tree;
-
- lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
- ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
- if (ordered) {
- btrfs_put_ordered_extent(ordered);
- ret = 1;
- goto out_unlock;
- }
-
- em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto out_unlock;
- }
-
- /*
- * This extent does not actually cover the logical extent anymore,
- * move on to the next inode.
- */
- if (em->block_start > logical ||
- em->block_start + em->block_len < logical + len ||
- test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
- free_extent_map(em);
- ret = 1;
- goto out_unlock;
- }
- free_extent_map(em);
-
-out_unlock:
- unlock_extent_cached(io_tree, lockstart, lockend, &cached_state);
- return ret;
-}
-
-static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
- struct scrub_copy_nocow_ctx *nocow_ctx)
-{
- struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
- struct btrfs_key key;
- struct inode *inode;
- struct page *page;
- struct btrfs_root *local_root;
- struct extent_io_tree *io_tree;
- u64 physical_for_dev_replace;
- u64 nocow_ctx_logical;
- u64 len = nocow_ctx->len;
- unsigned long index;
- int srcu_index;
- int ret = 0;
- int err = 0;
-
- key.objectid = root;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
-
- srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
-
- local_root = btrfs_read_fs_root_no_name(fs_info, &key);
- if (IS_ERR(local_root)) {
- srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
- return PTR_ERR(local_root);
- }
-
- key.type = BTRFS_INODE_ITEM_KEY;
- key.objectid = inum;
- key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
- srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- /* Avoid truncate/dio/punch hole.. */
- inode_lock(inode);
- inode_dio_wait(inode);
-
- physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
- io_tree = &BTRFS_I(inode)->io_tree;
- nocow_ctx_logical = nocow_ctx->logical;
-
- ret = check_extent_to_block(BTRFS_I(inode), offset, len,
- nocow_ctx_logical);
- if (ret) {
- ret = ret > 0 ? 0 : ret;
- goto out;
- }
-
- while (len >= PAGE_SIZE) {
- index = offset >> PAGE_SHIFT;
-again:
- page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
- if (!page) {
- btrfs_err(fs_info, "find_or_create_page() failed");
- ret = -ENOMEM;
- goto out;
- }
-
- if (PageUptodate(page)) {
- if (PageDirty(page))
- goto next_page;
- } else {
- ClearPageError(page);
- err = extent_read_full_page(io_tree, page,
- btrfs_get_extent,
- nocow_ctx->mirror_num);
- if (err) {
- ret = err;
- goto next_page;
- }
-
- lock_page(page);
- /*
- * If the page has been remove from the page cache,
- * the data on it is meaningless, because it may be
- * old one, the new data may be written into the new
- * page in the page cache.
- */
- if (page->mapping != inode->i_mapping) {
- unlock_page(page);
- put_page(page);
- goto again;
- }
- if (!PageUptodate(page)) {
- ret = -EIO;
- goto next_page;
- }
- }
-
- ret = check_extent_to_block(BTRFS_I(inode), offset, len,
- nocow_ctx_logical);
- if (ret) {
- ret = ret > 0 ? 0 : ret;
- goto next_page;
- }
-
- err = write_page_nocow(nocow_ctx->sctx,
- physical_for_dev_replace, page);
- if (err)
- ret = err;
-next_page:
- unlock_page(page);
- put_page(page);
-
- if (ret)
- break;
-
- offset += PAGE_SIZE;
- physical_for_dev_replace += PAGE_SIZE;
- nocow_ctx_logical += PAGE_SIZE;
- len -= PAGE_SIZE;
- }
- ret = COPY_COMPLETE;
-out:
- inode_unlock(inode);
- iput(inode);
- return ret;
-}
-
-static int write_page_nocow(struct scrub_ctx *sctx,
- u64 physical_for_dev_replace, struct page *page)
-{
- struct bio *bio;
- struct btrfs_device *dev;
-
- dev = sctx->wr_tgtdev;
- if (!dev)
- return -EIO;
- if (!dev->bdev) {
- btrfs_warn_rl(dev->fs_info,
- "scrub write_page_nocow(bdev == NULL) is unexpected");
- return -EIO;
- }
- bio = btrfs_io_bio_alloc(1);
- bio->bi_iter.bi_size = 0;
- bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
- bio_set_dev(bio, dev->bdev);
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
- /* bio_add_page won't fail on a freshly allocated bio */
- bio_add_page(bio, page, PAGE_SIZE, 0);
-
- if (btrfsic_submit_bio_wait(bio)) {
- bio_put(bio);
- btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
- return -EIO;
- }
-
- bio_put(bio);
- return 0;
-}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index c47f62b19226..ba8950bfd9c7 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -100,6 +100,7 @@ struct send_ctx {
u64 cur_inode_rdev;
u64 cur_inode_last_extent;
u64 cur_inode_next_write_offset;
+ bool ignore_cur_inode;
u64 send_progress;
@@ -1500,7 +1501,7 @@ static int read_symlink(struct btrfs_root *root,
BUG_ON(compression);
off = btrfs_file_extent_inline_start(ei);
- len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
+ len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
@@ -5006,6 +5007,15 @@ static int send_hole(struct send_ctx *sctx, u64 end)
u64 len;
int ret = 0;
+ /*
+ * A hole that starts at EOF or beyond it. Since we do not yet support
+ * fallocate (for extent preallocation and hole punching), sending a
+ * write of zeroes starting at EOF or beyond would later require issuing
+ * a truncate operation which would undo the write and achieve nothing.
+ */
+ if (offset >= sctx->cur_inode_size)
+ return 0;
+
if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
return send_update_extent(sctx, offset, end - offset);
@@ -5160,7 +5170,7 @@ static int clone_range(struct send_ctx *sctx,
ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
type = btrfs_file_extent_type(leaf, ei);
if (type == BTRFS_FILE_EXTENT_INLINE) {
- ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
+ ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
ext_len = PAGE_ALIGN(ext_len);
} else {
ext_len = btrfs_file_extent_num_bytes(leaf, ei);
@@ -5236,8 +5246,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
struct btrfs_file_extent_item);
type = btrfs_file_extent_type(path->nodes[0], ei);
if (type == BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_inline_len(path->nodes[0],
- path->slots[0], ei);
+ len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
/*
* it is possible the inline item won't cover the whole page,
* but there may be items after this page. Make
@@ -5375,7 +5384,7 @@ static int is_extent_unchanged(struct send_ctx *sctx,
}
if (right_type == BTRFS_FILE_EXTENT_INLINE) {
- right_len = btrfs_file_extent_inline_len(eb, slot, ei);
+ right_len = btrfs_file_extent_ram_bytes(eb, ei);
right_len = PAGE_ALIGN(right_len);
} else {
right_len = btrfs_file_extent_num_bytes(eb, ei);
@@ -5496,8 +5505,7 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset)
struct btrfs_file_extent_item);
type = btrfs_file_extent_type(path->nodes[0], fi);
if (type == BTRFS_FILE_EXTENT_INLINE) {
- u64 size = btrfs_file_extent_inline_len(path->nodes[0],
- path->slots[0], fi);
+ u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
extent_end = ALIGN(key.offset + size,
sctx->send_root->fs_info->sectorsize);
} else {
@@ -5560,7 +5568,7 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) ==
BTRFS_FILE_EXTENT_INLINE) {
- u64 size = btrfs_file_extent_inline_len(leaf, slot, fi);
+ u64 size = btrfs_file_extent_ram_bytes(leaf, fi);
extent_end = ALIGN(key.offset + size,
root->fs_info->sectorsize);
@@ -5606,8 +5614,7 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
struct btrfs_file_extent_item);
type = btrfs_file_extent_type(path->nodes[0], fi);
if (type == BTRFS_FILE_EXTENT_INLINE) {
- u64 size = btrfs_file_extent_inline_len(path->nodes[0],
- path->slots[0], fi);
+ u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
extent_end = ALIGN(key->offset + size,
sctx->send_root->fs_info->sectorsize);
} else {
@@ -5799,6 +5806,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
int pending_move = 0;
int refs_processed = 0;
+ if (sctx->ignore_cur_inode)
+ return 0;
+
ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
&refs_processed);
if (ret < 0)
@@ -5917,6 +5927,93 @@ out:
return ret;
}
+struct parent_paths_ctx {
+ struct list_head *refs;
+ struct send_ctx *sctx;
+};
+
+static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
+ void *ctx)
+{
+ struct parent_paths_ctx *ppctx = ctx;
+
+ return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
+ ppctx->refs);
+}
+
+/*
+ * Issue unlink operations for all paths of the current inode found in the
+ * parent snapshot.
+ */
+static int btrfs_unlink_all_paths(struct send_ctx *sctx)
+{
+ LIST_HEAD(deleted_refs);
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct parent_paths_ctx ctx;
+ int ret;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = sctx->cur_ino;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ ctx.refs = &deleted_refs;
+ ctx.sctx = sctx;
+
+ while (true) {
+ struct extent_buffer *eb = path->nodes[0];
+ int slot = path->slots[0];
+
+ if (slot >= btrfs_header_nritems(eb)) {
+ ret = btrfs_next_leaf(sctx->parent_root, path);
+ if (ret < 0)
+ goto out;
+ else if (ret > 0)
+ break;
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ if (key.objectid != sctx->cur_ino)
+ break;
+ if (key.type != BTRFS_INODE_REF_KEY &&
+ key.type != BTRFS_INODE_EXTREF_KEY)
+ break;
+
+ ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
+ record_parent_ref, &ctx);
+ if (ret < 0)
+ goto out;
+
+ path->slots[0]++;
+ }
+
+ while (!list_empty(&deleted_refs)) {
+ struct recorded_ref *ref;
+
+ ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
+ ret = send_unlink(sctx, ref->full_path);
+ if (ret < 0)
+ goto out;
+ fs_path_free(ref->full_path);
+ list_del(&ref->list);
+ kfree(ref);
+ }
+ ret = 0;
+out:
+ btrfs_free_path(path);
+ if (ret)
+ __free_recorded_refs(&deleted_refs);
+ return ret;
+}
+
static int changed_inode(struct send_ctx *sctx,
enum btrfs_compare_tree_result result)
{
@@ -5931,6 +6028,7 @@ static int changed_inode(struct send_ctx *sctx,
sctx->cur_inode_new_gen = 0;
sctx->cur_inode_last_extent = (u64)-1;
sctx->cur_inode_next_write_offset = 0;
+ sctx->ignore_cur_inode = false;
/*
* Set send_progress to current inode. This will tell all get_cur_xxx
@@ -5971,6 +6069,33 @@ static int changed_inode(struct send_ctx *sctx,
sctx->cur_inode_new_gen = 1;
}
+ /*
+ * Normally we do not find inodes with a link count of zero (orphans)
+ * because the most common case is to create a snapshot and use it
+ * for a send operation. However other less common use cases involve
+ * using a subvolume and send it after turning it to RO mode just
+ * after deleting all hard links of a file while holding an open
+ * file descriptor against it or turning a RO snapshot into RW mode,
+ * keep an open file descriptor against a file, delete it and then
+ * turn the snapshot back to RO mode before using it for a send
+ * operation. So if we find such cases, ignore the inode and all its
+ * items completely if it's a new inode, or if it's a changed inode
+ * make sure all its previous paths (from the parent snapshot) are all
+ * unlinked and all other the inode items are ignored.
+ */
+ if (result == BTRFS_COMPARE_TREE_NEW ||
+ result == BTRFS_COMPARE_TREE_CHANGED) {
+ u32 nlinks;
+
+ nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
+ if (nlinks == 0) {
+ sctx->ignore_cur_inode = true;
+ if (result == BTRFS_COMPARE_TREE_CHANGED)
+ ret = btrfs_unlink_all_paths(sctx);
+ goto out;
+ }
+ }
+
if (result == BTRFS_COMPARE_TREE_NEW) {
sctx->cur_inode_gen = left_gen;
sctx->cur_inode_new = 1;
@@ -6309,15 +6434,17 @@ static int changed_cb(struct btrfs_path *left_path,
key->objectid == BTRFS_FREE_SPACE_OBJECTID)
goto out;
- if (key->type == BTRFS_INODE_ITEM_KEY)
+ if (key->type == BTRFS_INODE_ITEM_KEY) {
ret = changed_inode(sctx, result);
- else if (key->type == BTRFS_INODE_REF_KEY ||
- key->type == BTRFS_INODE_EXTREF_KEY)
- ret = changed_ref(sctx, result);
- else if (key->type == BTRFS_XATTR_ITEM_KEY)
- ret = changed_xattr(sctx, result);
- else if (key->type == BTRFS_EXTENT_DATA_KEY)
- ret = changed_extent(sctx, result);
+ } else if (!sctx->ignore_cur_inode) {
+ if (key->type == BTRFS_INODE_REF_KEY ||
+ key->type == BTRFS_INODE_EXTREF_KEY)
+ ret = changed_ref(sctx, result);
+ else if (key->type == BTRFS_XATTR_ITEM_KEY)
+ ret = changed_xattr(sctx, result);
+ else if (key->type == BTRFS_EXTENT_DATA_KEY)
+ ret = changed_extent(sctx, result);
+ }
out:
return ret;
@@ -6328,7 +6455,6 @@ static int full_send_tree(struct send_ctx *sctx)
int ret;
struct btrfs_root *send_root = sctx->send_root;
struct btrfs_key key;
- struct btrfs_key found_key;
struct btrfs_path *path;
struct extent_buffer *eb;
int slot;
@@ -6350,17 +6476,13 @@ static int full_send_tree(struct send_ctx *sctx)
while (1) {
eb = path->nodes[0];
slot = path->slots[0];
- btrfs_item_key_to_cpu(eb, &found_key, slot);
+ btrfs_item_key_to_cpu(eb, &key, slot);
- ret = changed_cb(path, NULL, &found_key,
+ ret = changed_cb(path, NULL, &key,
BTRFS_COMPARE_TREE_NEW, sctx);
if (ret < 0)
goto out;
- key.objectid = found_key.objectid;
- key.type = found_key.type;
- key.offset = found_key.offset + 1;
-
ret = btrfs_next_item(send_root, path);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b7b4acb12833..4c13b737f568 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -3,7 +3,6 @@
* Copyright (C) 2007 Oracle. All rights reserved.
*/
-#include <linux/highmem.h>
#include <asm/unaligned.h>
#include "ctree.h"
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 81107ad49f3a..6601c9aa5e35 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -5,7 +5,6 @@
#include <linux/blkdev.h>
#include <linux/module.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
@@ -15,8 +14,6 @@
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
-#include <linux/mpage.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
@@ -468,9 +465,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_subvolrootid:
case Opt_device:
/*
- * These are parsed by btrfs_parse_subvol_options
- * and btrfs_parse_early_options
- * and can be happily ignored here.
+ * These are parsed by btrfs_parse_subvol_options or
+ * btrfs_parse_device_options and can be ignored here.
*/
break;
case Opt_nodatasum:
@@ -760,6 +756,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_recovery:
btrfs_warn(info,
"'recovery' is deprecated, use 'usebackuproot' instead");
+ /* fall through */
case Opt_usebackuproot:
btrfs_info(info,
"trying to use backup root at mount time");
@@ -885,13 +882,16 @@ out:
* All other options will be parsed on much later in the mount process and
* only when we need to allocate a new super block.
*/
-static int btrfs_parse_early_options(const char *options, fmode_t flags,
- void *holder, struct btrfs_fs_devices **fs_devices)
+static int btrfs_parse_device_options(const char *options, fmode_t flags,
+ void *holder)
{
substring_t args[MAX_OPT_ARGS];
char *device_name, *opts, *orig, *p;
+ struct btrfs_device *device = NULL;
int error = 0;
+ lockdep_assert_held(&uuid_mutex);
+
if (!options)
return 0;
@@ -917,11 +917,13 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
error = -ENOMEM;
goto out;
}
- error = btrfs_scan_one_device(device_name,
- flags, holder, fs_devices);
+ device = btrfs_scan_one_device(device_name, flags,
+ holder);
kfree(device_name);
- if (error)
+ if (IS_ERR(device)) {
+ error = PTR_ERR(device);
goto out;
+ }
}
}
@@ -935,8 +937,8 @@ out:
*
* The value is later passed to mount_subvol()
*/
-static int btrfs_parse_subvol_options(const char *options, fmode_t flags,
- char **subvol_name, u64 *subvol_objectid)
+static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
+ u64 *subvol_objectid)
{
substring_t args[MAX_OPT_ARGS];
char *opts, *orig, *p;
@@ -948,7 +950,7 @@ static int btrfs_parse_subvol_options(const char *options, fmode_t flags,
/*
* strsep changes the string, duplicate it because
- * btrfs_parse_early_options gets called later
+ * btrfs_parse_device_options gets called later
*/
opts = kstrdup(options, GFP_KERNEL);
if (!opts)
@@ -1517,6 +1519,7 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
{
struct block_device *bdev = NULL;
struct super_block *s;
+ struct btrfs_device *device = NULL;
struct btrfs_fs_devices *fs_devices = NULL;
struct btrfs_fs_info *fs_info = NULL;
struct security_mnt_opts new_sec_opts;
@@ -1526,12 +1529,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
- error = btrfs_parse_early_options(data, mode, fs_type,
- &fs_devices);
- if (error) {
- return ERR_PTR(error);
- }
-
security_init_mnt_opts(&new_sec_opts);
if (data) {
error = parse_security_options(data, &new_sec_opts);
@@ -1539,10 +1536,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
return ERR_PTR(error);
}
- error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
- if (error)
- goto error_sec_opts;
-
/*
* Setup a dummy root and fs_info for test/set super. This is because
* we don't actually fill this stuff out until open_ctree, but we need
@@ -1555,8 +1548,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
goto error_sec_opts;
}
- fs_info->fs_devices = fs_devices;
-
fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
security_init_mnt_opts(&fs_info->security_opts);
@@ -1565,7 +1556,25 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
goto error_fs_info;
}
+ mutex_lock(&uuid_mutex);
+ error = btrfs_parse_device_options(data, mode, fs_type);
+ if (error) {
+ mutex_unlock(&uuid_mutex);
+ goto error_fs_info;
+ }
+
+ device = btrfs_scan_one_device(device_name, mode, fs_type);
+ if (IS_ERR(device)) {
+ mutex_unlock(&uuid_mutex);
+ error = PTR_ERR(device);
+ goto error_fs_info;
+ }
+
+ fs_devices = device->fs_devices;
+ fs_info->fs_devices = fs_devices;
+
error = btrfs_open_devices(fs_devices, mode, fs_type);
+ mutex_unlock(&uuid_mutex);
if (error)
goto error_fs_info;
@@ -1650,8 +1659,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
- error = btrfs_parse_subvol_options(data, mode,
- &subvol_name, &subvol_objectid);
+ error = btrfs_parse_subvol_options(data, &subvol_name,
+ &subvol_objectid);
if (error) {
kfree(subvol_name);
return ERR_PTR(error);
@@ -2098,14 +2107,9 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
btrfs_account_ro_block_groups_free_space(found);
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
- if (!list_empty(&found->block_groups[i])) {
- switch (i) {
- case BTRFS_RAID_DUP:
- case BTRFS_RAID_RAID1:
- case BTRFS_RAID_RAID10:
- factor = 2;
- }
- }
+ if (!list_empty(&found->block_groups[i]))
+ factor = btrfs_bg_type_to_factor(
+ btrfs_raid_array[i].bg_flag);
}
}
@@ -2222,7 +2226,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct btrfs_ioctl_vol_args *vol;
- struct btrfs_fs_devices *fs_devices;
+ struct btrfs_device *device = NULL;
int ret = -ENOTTY;
if (!capable(CAP_SYS_ADMIN))
@@ -2234,15 +2238,24 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
- ret = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_root_fs_type, &fs_devices);
+ mutex_lock(&uuid_mutex);
+ device = btrfs_scan_one_device(vol->name, FMODE_READ,
+ &btrfs_root_fs_type);
+ ret = PTR_ERR_OR_ZERO(device);
+ mutex_unlock(&uuid_mutex);
break;
case BTRFS_IOC_DEVICES_READY:
- ret = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_root_fs_type, &fs_devices);
- if (ret)
+ mutex_lock(&uuid_mutex);
+ device = btrfs_scan_one_device(vol->name, FMODE_READ,
+ &btrfs_root_fs_type);
+ if (IS_ERR(device)) {
+ mutex_unlock(&uuid_mutex);
+ ret = PTR_ERR(device);
break;
- ret = !(fs_devices->num_devices == fs_devices->total_devices);
+ }
+ ret = !(device->fs_devices->num_devices ==
+ device->fs_devices->total_devices);
+ mutex_unlock(&uuid_mutex);
break;
case BTRFS_IOC_GET_SUPPORTED_FEATURES:
ret = btrfs_ioctl_get_supported_features((void __user*)arg);
@@ -2290,7 +2303,6 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
struct btrfs_fs_devices *cur_devices;
struct btrfs_device *dev, *first_dev = NULL;
struct list_head *head;
- struct rcu_string *name;
/*
* Lightweight locking of the devices. We should not need
@@ -2314,12 +2326,10 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
cur_devices = cur_devices->seed;
}
- if (first_dev) {
- name = rcu_dereference(first_dev->name);
- seq_escape(m, name->str, " \t\n\\");
- } else {
+ if (first_dev)
+ seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\");
+ else
WARN_ON(1);
- }
rcu_read_unlock();
return 0;
}
@@ -2331,7 +2341,6 @@ static const struct super_operations btrfs_super_ops = {
.sync_fs = btrfs_sync_fs,
.show_options = btrfs_show_options,
.show_devname = btrfs_show_devname,
- .write_inode = btrfs_write_inode,
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_destroy_inode,
.statfs = btrfs_statfs,
@@ -2369,7 +2378,7 @@ static __cold void btrfs_interface_exit(void)
static void __init btrfs_print_mod_info(void)
{
- pr_info("Btrfs loaded, crc32c=%s"
+ static const char options[] = ""
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
@@ -2382,8 +2391,8 @@ static void __init btrfs_print_mod_info(void)
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
", ref-verify=on"
#endif
- "\n",
- crc32c_impl());
+ ;
+ pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
}
static int __init init_btrfs_fs(void)
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 4a4e960c7c66..3717c864ba23 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -7,10 +7,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
-#include <linux/buffer_head.h>
#include <linux/kobject.h>
#include <linux/bug.h>
-#include <linux/genhd.h>
#include <linux/debugfs.h>
#include "ctree.h"
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index ace94db09d29..412b910b04cc 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -216,7 +216,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
btrfs_init_dummy_trans(&trans, fs_info);
test_msg("qgroup basic add");
- ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
+ ret = btrfs_create_qgroup(&trans, BTRFS_FS_TREE_OBJECTID);
if (ret) {
test_err("couldn't create a qgroup %d", ret);
return ret;
@@ -249,8 +249,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return ret;
@@ -285,8 +285,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return -EINVAL;
@@ -322,7 +322,7 @@ static int test_multiple_refs(struct btrfs_root *root,
* We have BTRFS_FS_TREE_OBJECTID created already from the
* previous test.
*/
- ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
+ ret = btrfs_create_qgroup(&trans, BTRFS_FIRST_FREE_OBJECTID);
if (ret) {
test_err("couldn't create a qgroup %d", ret);
return ret;
@@ -350,8 +350,8 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return ret;
@@ -385,8 +385,8 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return ret;
@@ -426,8 +426,8 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return ret;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index ff5f6c719976..3b84f5015029 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -241,7 +241,7 @@ loop:
refcount_set(&cur_trans->use_count, 2);
atomic_set(&cur_trans->pending_ordered, 0);
cur_trans->flags = 0;
- cur_trans->start_time = get_seconds();
+ cur_trans->start_time = ktime_get_seconds();
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
@@ -680,7 +680,7 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH, true);
- if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
+ if (trans == ERR_PTR(-ENOENT))
btrfs_wait_for_commit(root->fs_info, 0);
return trans;
@@ -1152,7 +1152,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
ret = btrfs_run_dev_replace(trans, fs_info);
if (ret)
return ret;
- ret = btrfs_run_qgroups(trans, fs_info);
+ ret = btrfs_run_qgroups(trans);
if (ret)
return ret;
@@ -1355,8 +1355,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
goto out;
/* Now qgroup are all updated, we can inherit it to new qgroups */
- ret = btrfs_qgroup_inherit(trans, fs_info,
- src->root_key.objectid, dst_objectid,
+ ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
inherit);
if (ret < 0)
goto out;
@@ -1574,7 +1573,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
/*
* insert root back/forward references
*/
- ret = btrfs_add_root_ref(trans, fs_info, objectid,
+ ret = btrfs_add_root_ref(trans, objectid,
parent_root->root_key.objectid,
btrfs_ino(BTRFS_I(parent_inode)), index,
dentry->d_name.name, dentry->d_name.len);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 94439482a0ec..4cbb1b55387d 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -48,7 +48,7 @@ struct btrfs_transaction {
int aborted;
struct list_head list;
struct extent_io_tree dirty_pages;
- unsigned long start_time;
+ time64_t start_time;
wait_queue_head_t writer_wait;
wait_queue_head_t commit_wait;
wait_queue_head_t pending_wait;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 8d40e7dd8c30..db835635372f 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -19,6 +19,7 @@
#include "tree-checker.h"
#include "disk-io.h"
#include "compression.h"
+#include "volumes.h"
/*
* Error message should follow the following format:
@@ -353,6 +354,102 @@ static int check_dir_item(struct btrfs_fs_info *fs_info,
return 0;
}
+__printf(4, 5)
+__cold
+static void block_group_err(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
+ key.objectid, key.offset, &vaf);
+ va_end(args);
+}
+
+static int check_block_group_item(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_block_group_item bgi;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u64 flags;
+ u64 type;
+
+ /*
+ * Here we don't really care about alignment since extent allocator can
+ * handle it. We care more about the size, as if one block group is
+ * larger than maximum size, it's must be some obvious corruption.
+ */
+ if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group size, have %llu expect (0, %llu]",
+ key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
+ return -EUCLEAN;
+ }
+
+ if (item_size != sizeof(bgi)) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid item size, have %u expect %zu",
+ item_size, sizeof(bgi));
+ return -EUCLEAN;
+ }
+
+ read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bgi));
+ if (btrfs_block_group_chunk_objectid(&bgi) !=
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group chunk objectid, have %llu expect %llu",
+ btrfs_block_group_chunk_objectid(&bgi),
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ return -EUCLEAN;
+ }
+
+ if (btrfs_block_group_used(&bgi) > key->offset) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group used, have %llu expect [0, %llu)",
+ btrfs_block_group_used(&bgi), key->offset);
+ return -EUCLEAN;
+ }
+
+ flags = btrfs_block_group_flags(&bgi);
+ if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
+ block_group_err(fs_info, leaf, slot,
+"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
+ flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
+ hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
+ return -EUCLEAN;
+ }
+
+ type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
+ if (type != BTRFS_BLOCK_GROUP_DATA &&
+ type != BTRFS_BLOCK_GROUP_METADATA &&
+ type != BTRFS_BLOCK_GROUP_SYSTEM &&
+ type != (BTRFS_BLOCK_GROUP_METADATA |
+ BTRFS_BLOCK_GROUP_DATA)) {
+ block_group_err(fs_info, leaf, slot,
+"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx",
+ type, hweight64(type),
+ BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
+ BTRFS_BLOCK_GROUP_SYSTEM,
+ BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
/*
* Common point to switch the item-specific validation.
*/
@@ -374,6 +471,9 @@ static int check_leaf_item(struct btrfs_fs_info *fs_info,
case BTRFS_XATTR_ITEM_KEY:
ret = check_dir_item(fs_info, leaf, key, slot);
break;
+ case BTRFS_BLOCK_GROUP_ITEM_KEY:
+ ret = check_block_group_item(fs_info, leaf, key, slot);
+ break;
}
return ret;
}
@@ -396,9 +496,22 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
* skip this check for relocation trees.
*/
if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
+ u64 owner = btrfs_header_owner(leaf);
struct btrfs_root *check_root;
- key.objectid = btrfs_header_owner(leaf);
+ /* These trees must never be empty */
+ if (owner == BTRFS_ROOT_TREE_OBJECTID ||
+ owner == BTRFS_CHUNK_TREE_OBJECTID ||
+ owner == BTRFS_EXTENT_TREE_OBJECTID ||
+ owner == BTRFS_DEV_TREE_OBJECTID ||
+ owner == BTRFS_FS_TREE_OBJECTID ||
+ owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ generic_err(fs_info, leaf, 0,
+ "invalid root, root %llu must never be empty",
+ owner);
+ return -EUCLEAN;
+ }
+ key.objectid = owner;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f8220ec02036..1650dc44a5e3 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -545,12 +545,8 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
- if (IS_ERR(inode)) {
+ if (IS_ERR(inode))
inode = NULL;
- } else if (is_bad_inode(inode)) {
- iput(inode);
- inode = NULL;
- }
return inode;
}
@@ -597,7 +593,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
nbytes = 0;
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
- size = btrfs_file_extent_inline_len(eb, slot, item);
+ size = btrfs_file_extent_ram_bytes(eb, item);
nbytes = btrfs_file_extent_ram_bytes(eb, item);
extent_end = ALIGN(start + size,
fs_info->sectorsize);
@@ -685,7 +681,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* as the owner of the file extent changed from log tree
* (doesn't affect qgroup) to fs/file tree(affects qgroup)
*/
- ret = btrfs_qgroup_trace_extent(trans, fs_info,
+ ret = btrfs_qgroup_trace_extent(trans,
btrfs_file_extent_disk_bytenr(eb, item),
btrfs_file_extent_disk_num_bytes(eb, item),
GFP_NOFS);
@@ -715,7 +711,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* allocation tree
*/
ret = btrfs_alloc_logged_file_extent(trans,
- fs_info,
root->root_key.objectid,
key->objectid, offset, &ins);
if (ret)
@@ -1291,6 +1286,46 @@ again:
return ret;
}
+static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
+ const u8 ref_type, const char *name,
+ const int namelen)
+{
+ struct btrfs_key key;
+ struct btrfs_path *path;
+ const u64 parent_id = btrfs_ino(BTRFS_I(dir));
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = btrfs_ino(BTRFS_I(inode));
+ key.type = ref_type;
+ if (key.type == BTRFS_INODE_REF_KEY)
+ key.offset = parent_id;
+ else
+ key.offset = btrfs_extref_hash(parent_id, name, namelen);
+
+ ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (ret > 0) {
+ ret = 0;
+ goto out;
+ }
+ if (key.type == BTRFS_INODE_EXTREF_KEY)
+ ret = btrfs_find_name_in_ext_backref(path->nodes[0],
+ path->slots[0], parent_id,
+ name, namelen, NULL);
+ else
+ ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
+ name, namelen, NULL);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
/*
* replay one inode back reference item found in the log tree.
* eb, slot and key refer to the buffer and key found in the log tree.
@@ -1400,6 +1435,32 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
}
}
+ /*
+ * If a reference item already exists for this inode
+ * with the same parent and name, but different index,
+ * drop it and the corresponding directory index entries
+ * from the parent before adding the new reference item
+ * and dir index entries, otherwise we would fail with
+ * -EEXIST returned from btrfs_add_link() below.
+ */
+ ret = btrfs_inode_ref_exists(inode, dir, key->type,
+ name, namelen);
+ if (ret > 0) {
+ ret = btrfs_unlink_inode(trans, root,
+ BTRFS_I(dir),
+ BTRFS_I(inode),
+ name, namelen);
+ /*
+ * If we dropped the link count to 0, bump it so
+ * that later the iput() on the inode will not
+ * free it. We will fixup the link count later.
+ */
+ if (!ret && inode->i_nlink == 0)
+ inc_nlink(inode);
+ }
+ if (ret < 0)
+ goto out;
+
/* insert our name */
ret = btrfs_add_link(trans, BTRFS_I(dir),
BTRFS_I(inode),
@@ -2120,7 +2181,7 @@ again:
dir_key->offset,
name, name_len, 0);
}
- if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
+ if (!log_di || log_di == ERR_PTR(-ENOENT)) {
btrfs_dir_item_key_to_cpu(eb, di, &location);
btrfs_release_path(path);
btrfs_release_path(log_path);
@@ -2933,7 +2994,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
/* bail out if we need to do a full commit */
if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN;
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
goto out;
}
@@ -2951,7 +3011,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (ret) {
blk_finish_plug(&plug);
btrfs_abort_transaction(trans, ret);
- btrfs_free_logged_extents(log, log_transid);
btrfs_set_log_full_commit(fs_info, trans);
mutex_unlock(&root->log_mutex);
goto out;
@@ -3002,7 +3061,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out;
}
btrfs_wait_tree_log_extents(log, mark);
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out;
@@ -3020,7 +3078,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (atomic_read(&log_root_tree->log_commit[index2])) {
blk_finish_plug(&plug);
ret = btrfs_wait_tree_log_extents(log, mark);
- btrfs_wait_logged_extents(trans, log, log_transid);
wait_log_commit(log_root_tree,
root_log_ctx.log_transid);
mutex_unlock(&log_root_tree->log_mutex);
@@ -3045,7 +3102,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (btrfs_need_log_full_commit(fs_info, trans)) {
blk_finish_plug(&plug);
btrfs_wait_tree_log_extents(log, mark);
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
@@ -3058,7 +3114,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (ret) {
btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
@@ -3068,11 +3123,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
EXTENT_NEW | EXTENT_DIRTY);
if (ret) {
btrfs_set_log_full_commit(fs_info, trans);
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
- btrfs_wait_logged_extents(trans, log, log_transid);
btrfs_set_super_log_root(fs_info->super_for_commit,
log_root_tree->node->start);
@@ -3159,14 +3212,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
}
- /*
- * We may have short-circuited the log tree with the full commit logic
- * and left ordered extents on our list, so clear these out to keep us
- * from leaking inodes and memory.
- */
- btrfs_free_logged_extents(log, 0);
- btrfs_free_logged_extents(log, 1);
-
free_extent_buffer(log->node);
kfree(log);
}
@@ -3756,7 +3801,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
int start_slot, int nr, int inode_only,
u64 logged_isize)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
unsigned long src_offset;
unsigned long dst_offset;
struct btrfs_root *log = inode->root->log_root;
@@ -3937,9 +3982,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(src, extent) ==
BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_inline_len(src,
- src_path->slots[0],
- extent);
+ len = btrfs_file_extent_ram_bytes(src, extent);
*last_extent = ALIGN(key.offset + len,
fs_info->sectorsize);
} else {
@@ -4004,7 +4047,7 @@ fill_holes:
extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(src, extent) ==
BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_inline_len(src, i, extent);
+ len = btrfs_file_extent_ram_bytes(src, extent);
extent_end = ALIGN(key.offset + len,
fs_info->sectorsize);
} else {
@@ -4078,131 +4121,32 @@ static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
return 0;
}
-static int wait_ordered_extents(struct btrfs_trans_handle *trans,
- struct inode *inode,
- struct btrfs_root *root,
- const struct extent_map *em,
- const struct list_head *logged_list,
- bool *ordered_io_error)
+static int log_extent_csums(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_root *log_root,
+ const struct extent_map *em)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_ordered_extent *ordered;
- struct btrfs_root *log = root->log_root;
- u64 mod_start = em->mod_start;
- u64 mod_len = em->mod_len;
- const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
u64 csum_offset;
u64 csum_len;
LIST_HEAD(ordered_sums);
int ret = 0;
- *ordered_io_error = false;
-
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
+ if (inode->flags & BTRFS_INODE_NODATASUM ||
+ test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
em->block_start == EXTENT_MAP_HOLE)
return 0;
- /*
- * Wait far any ordered extent that covers our extent map. If it
- * finishes without an error, first check and see if our csums are on
- * our outstanding ordered extents.
- */
- list_for_each_entry(ordered, logged_list, log_list) {
- struct btrfs_ordered_sum *sum;
-
- if (!mod_len)
- break;
-
- if (ordered->file_offset + ordered->len <= mod_start ||
- mod_start + mod_len <= ordered->file_offset)
- continue;
-
- if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
- !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
- !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
- const u64 start = ordered->file_offset;
- const u64 end = ordered->file_offset + ordered->len - 1;
-
- WARN_ON(ordered->inode != inode);
- filemap_fdatawrite_range(inode->i_mapping, start, end);
- }
-
- wait_event(ordered->wait,
- (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
- test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
-
- if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
- /*
- * Clear the AS_EIO/AS_ENOSPC flags from the inode's
- * i_mapping flags, so that the next fsync won't get
- * an outdated io error too.
- */
- filemap_check_errors(inode->i_mapping);
- *ordered_io_error = true;
- break;
- }
- /*
- * We are going to copy all the csums on this ordered extent, so
- * go ahead and adjust mod_start and mod_len in case this
- * ordered extent has already been logged.
- */
- if (ordered->file_offset > mod_start) {
- if (ordered->file_offset + ordered->len >=
- mod_start + mod_len)
- mod_len = ordered->file_offset - mod_start;
- /*
- * If we have this case
- *
- * |--------- logged extent ---------|
- * |----- ordered extent ----|
- *
- * Just don't mess with mod_start and mod_len, we'll
- * just end up logging more csums than we need and it
- * will be ok.
- */
- } else {
- if (ordered->file_offset + ordered->len <
- mod_start + mod_len) {
- mod_len = (mod_start + mod_len) -
- (ordered->file_offset + ordered->len);
- mod_start = ordered->file_offset +
- ordered->len;
- } else {
- mod_len = 0;
- }
- }
-
- if (skip_csum)
- continue;
-
- /*
- * To keep us from looping for the above case of an ordered
- * extent that falls inside of the logged extent.
- */
- if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
- &ordered->flags))
- continue;
-
- list_for_each_entry(sum, &ordered->list, list) {
- ret = btrfs_csum_file_blocks(trans, log, sum);
- if (ret)
- break;
- }
- }
-
- if (*ordered_io_error || !mod_len || ret || skip_csum)
- return ret;
-
+ /* If we're compressed we have to save the entire range of csums. */
if (em->compress_type) {
csum_offset = 0;
csum_len = max(em->block_len, em->orig_block_len);
} else {
- csum_offset = mod_start - em->start;
- csum_len = mod_len;
+ csum_offset = em->mod_start - em->start;
+ csum_len = em->mod_len;
}
/* block start is already adjusted for the file extent offset. */
- ret = btrfs_lookup_csums_range(fs_info->csum_root,
+ ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
em->block_start + csum_offset,
em->block_start + csum_offset +
csum_len - 1, &ordered_sums, 0);
@@ -4214,7 +4158,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum,
list);
if (!ret)
- ret = btrfs_csum_file_blocks(trans, log, sums);
+ ret = btrfs_csum_file_blocks(trans, log_root, sums);
list_del(&sums->list);
kfree(sums);
}
@@ -4226,7 +4170,6 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, struct btrfs_root *root,
const struct extent_map *em,
struct btrfs_path *path,
- const struct list_head *logged_list,
struct btrfs_log_ctx *ctx)
{
struct btrfs_root *log = root->log_root;
@@ -4238,18 +4181,11 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
u64 block_len;
int ret;
int extent_inserted = 0;
- bool ordered_io_err = false;
- ret = wait_ordered_extents(trans, &inode->vfs_inode, root, em,
- logged_list, &ordered_io_err);
+ ret = log_extent_csums(trans, inode, log, em);
if (ret)
return ret;
- if (ordered_io_err) {
- ctx->io_err = -EIO;
- return ctx->io_err;
- }
-
btrfs_init_map_token(&token);
ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
@@ -4424,7 +4360,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode,
struct btrfs_path *path,
- struct list_head *logged_list,
struct btrfs_log_ctx *ctx,
const u64 start,
const u64 end)
@@ -4480,20 +4415,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
}
list_sort(NULL, &extents, extent_cmp);
- btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
- /*
- * Some ordered extents started by fsync might have completed
- * before we could collect them into the list logged_list, which
- * means they're gone, not in our logged_list nor in the inode's
- * ordered tree. We want the application/user space to know an
- * error happened while attempting to persist file data so that
- * it can take proper action. If such error happened, we leave
- * without writing to the log tree and the fsync must report the
- * file data write error and not commit the current transaction.
- */
- ret = filemap_check_errors(inode->vfs_inode.i_mapping);
- if (ret)
- ctx->io_err = ret;
process:
while (!list_empty(&extents)) {
em = list_entry(extents.next, struct extent_map, list);
@@ -4512,8 +4433,7 @@ process:
write_unlock(&tree->lock);
- ret = log_one_extent(trans, inode, root, em, path, logged_list,
- ctx);
+ ret = log_one_extent(trans, inode, root, em, path, ctx);
write_lock(&tree->lock);
clear_em_logging(tree, em);
free_extent_map(em);
@@ -4712,9 +4632,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
if (btrfs_file_extent_type(leaf, extent) ==
BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_inline_len(leaf,
- path->slots[0],
- extent);
+ len = btrfs_file_extent_ram_bytes(leaf, extent);
ASSERT(len == i_size ||
(len == fs_info->sectorsize &&
btrfs_file_extent_compression(leaf, extent) !=
@@ -4898,7 +4816,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_key min_key;
struct btrfs_key max_key;
struct btrfs_root *log = root->log_root;
- LIST_HEAD(logged_list);
u64 last_extent = 0;
int err = 0;
int ret;
@@ -5094,8 +5011,7 @@ again:
* we don't need to do more work nor fallback to
* a transaction commit.
*/
- if (IS_ERR(other_inode) &&
- PTR_ERR(other_inode) == -ENOENT) {
+ if (other_inode == ERR_PTR(-ENOENT)) {
goto next_key;
} else if (IS_ERR(other_inode)) {
err = PTR_ERR(other_inode);
@@ -5235,7 +5151,7 @@ log_extents:
}
if (fast_search) {
ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
- &logged_list, ctx, start, end);
+ ctx, start, end);
if (ret) {
err = ret;
goto out_unlock;
@@ -5286,10 +5202,6 @@ log_extents:
inode->last_log_commit = inode->last_sub_trans;
spin_unlock(&inode->lock);
out_unlock:
- if (unlikely(err))
- btrfs_put_logged_extents(&logged_list);
- else
- btrfs_submit_logged_extents(&logged_list, log);
mutex_unlock(&inode->log_mutex);
btrfs_free_path(path);
@@ -5585,7 +5497,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_log_ctx *ctx)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_path *path;
struct btrfs_key key;
@@ -6120,7 +6032,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, struct btrfs_inode *old_dir,
struct dentry *parent)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
/*
* this will force the logging code to walk the dentry chain
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1da162928d1a..da86706123ff 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -8,15 +8,12 @@
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
-#include <linux/iocontext.h>
-#include <linux/capability.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/semaphore.h>
#include <linux/uuid.h>
#include <linux/list_sort.h>
-#include <asm/div64.h>
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
@@ -634,44 +631,48 @@ static void pending_bios_fn(struct btrfs_work *work)
* devices.
*/
static void btrfs_free_stale_devices(const char *path,
- struct btrfs_device *skip_dev)
+ struct btrfs_device *skip_device)
{
- struct btrfs_fs_devices *fs_devs, *tmp_fs_devs;
- struct btrfs_device *dev, *tmp_dev;
+ struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
+ struct btrfs_device *device, *tmp_device;
- list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, fs_list) {
-
- if (fs_devs->opened)
+ list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
+ mutex_lock(&fs_devices->device_list_mutex);
+ if (fs_devices->opened) {
+ mutex_unlock(&fs_devices->device_list_mutex);
continue;
+ }
- list_for_each_entry_safe(dev, tmp_dev,
- &fs_devs->devices, dev_list) {
+ list_for_each_entry_safe(device, tmp_device,
+ &fs_devices->devices, dev_list) {
int not_found = 0;
- if (skip_dev && skip_dev == dev)
+ if (skip_device && skip_device == device)
continue;
- if (path && !dev->name)
+ if (path && !device->name)
continue;
rcu_read_lock();
if (path)
- not_found = strcmp(rcu_str_deref(dev->name),
+ not_found = strcmp(rcu_str_deref(device->name),
path);
rcu_read_unlock();
if (not_found)
continue;
/* delete the stale device */
- if (fs_devs->num_devices == 1) {
- btrfs_sysfs_remove_fsid(fs_devs);
- list_del(&fs_devs->fs_list);
- free_fs_devices(fs_devs);
+ fs_devices->num_devices--;
+ list_del(&device->dev_list);
+ btrfs_free_device(device);
+
+ if (fs_devices->num_devices == 0)
break;
- } else {
- fs_devs->num_devices--;
- list_del(&dev->dev_list);
- btrfs_free_device(dev);
- }
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+ if (fs_devices->num_devices == 0) {
+ btrfs_sysfs_remove_fsid(fs_devices);
+ list_del(&fs_devices->fs_list);
+ free_fs_devices(fs_devices);
}
}
}
@@ -750,7 +751,8 @@ error_brelse:
* error pointer when failed
*/
static noinline struct btrfs_device *device_list_add(const char *path,
- struct btrfs_super_block *disk_super)
+ struct btrfs_super_block *disk_super,
+ bool *new_device_added)
{
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices;
@@ -764,21 +766,26 @@ static noinline struct btrfs_device *device_list_add(const char *path,
if (IS_ERR(fs_devices))
return ERR_CAST(fs_devices);
+ mutex_lock(&fs_devices->device_list_mutex);
list_add(&fs_devices->fs_list, &fs_uuids);
device = NULL;
} else {
+ mutex_lock(&fs_devices->device_list_mutex);
device = find_device(fs_devices, devid,
disk_super->dev_item.uuid);
}
if (!device) {
- if (fs_devices->opened)
+ if (fs_devices->opened) {
+ mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-EBUSY);
+ }
device = btrfs_alloc_device(NULL, &devid,
disk_super->dev_item.uuid);
if (IS_ERR(device)) {
+ mutex_unlock(&fs_devices->device_list_mutex);
/* we can safely leave the fs_devices entry around */
return device;
}
@@ -786,17 +793,16 @@ static noinline struct btrfs_device *device_list_add(const char *path,
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
btrfs_free_device(device);
+ mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-ENOMEM);
}
rcu_assign_pointer(device->name, name);
- mutex_lock(&fs_devices->device_list_mutex);
list_add_rcu(&device->dev_list, &fs_devices->devices);
fs_devices->num_devices++;
- mutex_unlock(&fs_devices->device_list_mutex);
device->fs_devices = fs_devices;
- btrfs_free_stale_devices(path, device);
+ *new_device_added = true;
if (disk_super->label[0])
pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
@@ -840,12 +846,15 @@ static noinline struct btrfs_device *device_list_add(const char *path,
* with larger generation number or the last-in if
* generation are equal.
*/
+ mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-EEXIST);
}
name = rcu_string_strdup(path, GFP_NOFS);
- if (!name)
+ if (!name) {
+ mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-ENOMEM);
+ }
rcu_string_free(device->name);
rcu_assign_pointer(device->name, name);
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
@@ -865,6 +874,7 @@ static noinline struct btrfs_device *device_list_add(const char *path,
fs_devices->total_devices = btrfs_super_num_devices(disk_super);
+ mutex_unlock(&fs_devices->device_list_mutex);
return device;
}
@@ -1004,7 +1014,7 @@ static void btrfs_close_bdev(struct btrfs_device *device)
blkdev_put(device->bdev, device->mode);
}
-static void btrfs_prepare_close_one_device(struct btrfs_device *device)
+static void btrfs_close_one_device(struct btrfs_device *device)
{
struct btrfs_fs_devices *fs_devices = device->fs_devices;
struct btrfs_device *new_device;
@@ -1022,6 +1032,8 @@ static void btrfs_prepare_close_one_device(struct btrfs_device *device)
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
fs_devices->missing_devices--;
+ btrfs_close_bdev(device);
+
new_device = btrfs_alloc_device(NULL, &device->devid,
device->uuid);
BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
@@ -1035,39 +1047,23 @@ static void btrfs_prepare_close_one_device(struct btrfs_device *device)
list_replace_rcu(&device->dev_list, &new_device->dev_list);
new_device->fs_devices = device->fs_devices;
+
+ call_rcu(&device->rcu, free_device_rcu);
}
static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device, *tmp;
- struct list_head pending_put;
-
- INIT_LIST_HEAD(&pending_put);
if (--fs_devices->opened > 0)
return 0;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
- btrfs_prepare_close_one_device(device);
- list_add(&device->dev_list, &pending_put);
+ btrfs_close_one_device(device);
}
mutex_unlock(&fs_devices->device_list_mutex);
- /*
- * btrfs_show_devname() is using the device_list_mutex,
- * sometimes call to blkdev_put() leads vfs calling
- * into this func. So do put outside of device_list_mutex,
- * as of now.
- */
- while (!list_empty(&pending_put)) {
- device = list_first_entry(&pending_put,
- struct btrfs_device, dev_list);
- list_del(&device->dev_list);
- btrfs_close_bdev(device);
- call_rcu(&device->rcu, free_device_rcu);
- }
-
WARN_ON(fs_devices->open_devices);
WARN_ON(fs_devices->rw_devices);
fs_devices->opened = 0;
@@ -1146,7 +1142,8 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
{
int ret;
- mutex_lock(&uuid_mutex);
+ lockdep_assert_held(&uuid_mutex);
+
mutex_lock(&fs_devices->device_list_mutex);
if (fs_devices->opened) {
fs_devices->opened++;
@@ -1156,7 +1153,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
ret = open_fs_devices(fs_devices, flags, holder);
}
mutex_unlock(&fs_devices->device_list_mutex);
- mutex_unlock(&uuid_mutex);
return ret;
}
@@ -1217,16 +1213,18 @@ static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
* and we are not allowed to call set_blocksize during the scan. The superblock
* is read via pagecache
*/
-int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
- struct btrfs_fs_devices **fs_devices_ret)
+struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
+ void *holder)
{
struct btrfs_super_block *disk_super;
- struct btrfs_device *device;
+ bool new_device_added = false;
+ struct btrfs_device *device = NULL;
struct block_device *bdev;
struct page *page;
- int ret = 0;
u64 bytenr;
+ lockdep_assert_held(&uuid_mutex);
+
/*
* we would like to check all the supers, but that would make
* a btrfs mount succeed after a mkfs from a different FS.
@@ -1238,112 +1236,25 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
bdev = blkdev_get_by_path(path, flags, holder);
if (IS_ERR(bdev))
- return PTR_ERR(bdev);
+ return ERR_CAST(bdev);
if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
- ret = -EINVAL;
+ device = ERR_PTR(-EINVAL);
goto error_bdev_put;
}
- mutex_lock(&uuid_mutex);
- device = device_list_add(path, disk_super);
- if (IS_ERR(device))
- ret = PTR_ERR(device);
- else
- *fs_devices_ret = device->fs_devices;
- mutex_unlock(&uuid_mutex);
+ device = device_list_add(path, disk_super, &new_device_added);
+ if (!IS_ERR(device)) {
+ if (new_device_added)
+ btrfs_free_stale_devices(path, device);
+ }
btrfs_release_disk_super(page);
error_bdev_put:
blkdev_put(bdev, flags);
- return ret;
-}
-
-/* helper to account the used device space in the range */
-int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
- u64 end, u64 *length)
-{
- struct btrfs_key key;
- struct btrfs_root *root = device->fs_info->dev_root;
- struct btrfs_dev_extent *dev_extent;
- struct btrfs_path *path;
- u64 extent_end;
- int ret;
- int slot;
- struct extent_buffer *l;
-
- *length = 0;
-
- if (start >= device->total_bytes ||
- test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
- return 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- path->reada = READA_FORWARD;
-
- key.objectid = device->devid;
- key.offset = start;
- key.type = BTRFS_DEV_EXTENT_KEY;
-
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
- if (ret > 0) {
- ret = btrfs_previous_item(root, path, key.objectid, key.type);
- if (ret < 0)
- goto out;
- }
-
- while (1) {
- l = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(l)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto out;
-
- break;
- }
- btrfs_item_key_to_cpu(l, &key, slot);
-
- if (key.objectid < device->devid)
- goto next;
-
- if (key.objectid > device->devid)
- break;
-
- if (key.type != BTRFS_DEV_EXTENT_KEY)
- goto next;
-
- dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
- extent_end = key.offset + btrfs_dev_extent_length(l,
- dev_extent);
- if (key.offset <= start && extent_end > end) {
- *length = end - start + 1;
- break;
- } else if (key.offset <= start && extent_end > start)
- *length += extent_end - start;
- else if (key.offset > start && extent_end <= end)
- *length += extent_end - key.offset;
- else if (key.offset > start && key.offset <= end) {
- *length += end - key.offset + 1;
- break;
- } else if (key.offset > end)
- break;
-
-next:
- path->slots[0]++;
- }
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return device;
}
static int contains_pending_extent(struct btrfs_transaction *transaction,
@@ -1755,10 +1666,8 @@ error:
* the btrfs_device struct should be fully filled in
*/
static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
- struct btrfs_root *root = fs_info->chunk_root;
int ret;
struct btrfs_path *path;
struct btrfs_dev_item *dev_item;
@@ -1774,8 +1683,8 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
- ret = btrfs_insert_empty_item(trans, root, path, &key,
- sizeof(*dev_item));
+ ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
+ &key, sizeof(*dev_item));
if (ret)
goto out;
@@ -1800,7 +1709,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
ptr = btrfs_device_uuid(dev_item);
write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
ptr = btrfs_device_fsid(dev_item);
- write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_FSID_SIZE);
+ write_extent_buffer(leaf, trans->fs_info->fsid, ptr, BTRFS_FSID_SIZE);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
@@ -1924,9 +1833,10 @@ static struct btrfs_device * btrfs_find_next_active_device(
* where this function called, there should be always be another device (or
* this_dev) which is active.
*/
-void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
- struct btrfs_device *device, struct btrfs_device *this_dev)
+void btrfs_assign_next_active_device(struct btrfs_device *device,
+ struct btrfs_device *this_dev)
{
+ struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_device *next_device;
if (this_dev)
@@ -2029,11 +1939,14 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
cur_devices->num_devices--;
cur_devices->total_devices--;
+ /* Update total_devices of the parent fs_devices if it's seed */
+ if (cur_devices != fs_devices)
+ fs_devices->total_devices--;
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
cur_devices->missing_devices--;
- btrfs_assign_next_active_device(fs_info, device, NULL);
+ btrfs_assign_next_active_device(device, NULL);
if (device->bdev) {
cur_devices->open_devices--;
@@ -2084,12 +1997,11 @@ error_undo:
goto out;
}
-void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *srcdev)
+void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
{
struct btrfs_fs_devices *fs_devices;
- lockdep_assert_held(&fs_info->fs_devices->device_list_mutex);
+ lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
/*
* in case of fs with no seed, srcdev->fs_devices will point
@@ -2151,10 +2063,9 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
}
}
-void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *tgtdev)
+void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
{
- struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
WARN_ON(!tgtdev);
mutex_lock(&fs_devices->device_list_mutex);
@@ -2166,7 +2077,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
fs_devices->num_devices--;
- btrfs_assign_next_active_device(fs_info, tgtdev, NULL);
+ btrfs_assign_next_active_device(tgtdev, NULL);
list_del_rcu(&tgtdev->dev_list);
@@ -2297,7 +2208,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&seed_devices->alloc_list);
mutex_init(&seed_devices->device_list_mutex);
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_devices->device_list_mutex);
list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
synchronize_rcu);
list_for_each_entry(device, &seed_devices->devices, dev_list)
@@ -2317,7 +2228,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
generate_random_uuid(fs_devices->fsid);
memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_devices->device_list_mutex);
super_flags = btrfs_super_flags(disk_super) &
~BTRFS_SUPER_FLAG_SEEDING;
@@ -2407,15 +2318,16 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
struct block_device *bdev;
- struct list_head *devices;
struct super_block *sb = fs_info->sb;
struct rcu_string *name;
- u64 tmp;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ u64 orig_super_total_bytes;
+ u64 orig_super_num_devices;
int seeding_dev = 0;
int ret = 0;
bool unlocked = false;
- if (sb_rdonly(sb) && !fs_info->fs_devices->seeding)
+ if (sb_rdonly(sb) && !fs_devices->seeding)
return -EROFS;
bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
@@ -2423,7 +2335,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
if (IS_ERR(bdev))
return PTR_ERR(bdev);
- if (fs_info->fs_devices->seeding) {
+ if (fs_devices->seeding) {
seeding_dev = 1;
down_write(&sb->s_umount);
mutex_lock(&uuid_mutex);
@@ -2431,18 +2343,16 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
filemap_write_and_wait(bdev->bd_inode->i_mapping);
- devices = &fs_info->fs_devices->devices;
-
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
- list_for_each_entry(device, devices, dev_list) {
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (device->bdev == bdev) {
ret = -EEXIST;
mutex_unlock(
- &fs_info->fs_devices->device_list_mutex);
+ &fs_devices->device_list_mutex);
goto error;
}
}
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_devices->device_list_mutex);
device = btrfs_alloc_device(fs_info, NULL, NULL);
if (IS_ERR(device)) {
@@ -2491,33 +2401,34 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
}
- device->fs_devices = fs_info->fs_devices;
+ device->fs_devices = fs_devices;
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_devices->device_list_mutex);
mutex_lock(&fs_info->chunk_mutex);
- list_add_rcu(&device->dev_list, &fs_info->fs_devices->devices);
- list_add(&device->dev_alloc_list,
- &fs_info->fs_devices->alloc_list);
- fs_info->fs_devices->num_devices++;
- fs_info->fs_devices->open_devices++;
- fs_info->fs_devices->rw_devices++;
- fs_info->fs_devices->total_devices++;
- fs_info->fs_devices->total_rw_bytes += device->total_bytes;
+ list_add_rcu(&device->dev_list, &fs_devices->devices);
+ list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
+ fs_devices->num_devices++;
+ fs_devices->open_devices++;
+ fs_devices->rw_devices++;
+ fs_devices->total_devices++;
+ fs_devices->total_rw_bytes += device->total_bytes;
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
if (!blk_queue_nonrot(q))
- fs_info->fs_devices->rotating = 1;
+ fs_devices->rotating = 1;
- tmp = btrfs_super_total_bytes(fs_info->super_copy);
+ orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
btrfs_set_super_total_bytes(fs_info->super_copy,
- round_down(tmp + device->total_bytes, fs_info->sectorsize));
+ round_down(orig_super_total_bytes + device->total_bytes,
+ fs_info->sectorsize));
- tmp = btrfs_super_num_devices(fs_info->super_copy);
- btrfs_set_super_num_devices(fs_info->super_copy, tmp + 1);
+ orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
+ btrfs_set_super_num_devices(fs_info->super_copy,
+ orig_super_num_devices + 1);
/* add sysfs device entry */
- btrfs_sysfs_add_device_link(fs_info->fs_devices, device);
+ btrfs_sysfs_add_device_link(fs_devices, device);
/*
* we've got more storage, clear any full flags on the space
@@ -2526,7 +2437,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
btrfs_clear_space_info_full(fs_info);
mutex_unlock(&fs_info->chunk_mutex);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_devices->device_list_mutex);
if (seeding_dev) {
mutex_lock(&fs_info->chunk_mutex);
@@ -2538,7 +2449,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
}
- ret = btrfs_add_dev_item(trans, fs_info, device);
+ ret = btrfs_add_dev_item(trans, device);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
@@ -2558,7 +2469,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
*/
snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
fs_info->fsid);
- if (kobject_rename(&fs_info->fs_devices->fsid_kobj, fsid_buf))
+ if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf))
btrfs_warn(fs_info,
"sysfs: failed to create fsid for sprout");
}
@@ -2593,7 +2504,23 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
return ret;
error_sysfs:
- btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
+ btrfs_sysfs_rm_device_link(fs_devices, device);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_info->chunk_mutex);
+ list_del_rcu(&device->dev_list);
+ list_del(&device->dev_alloc_list);
+ fs_info->fs_devices->num_devices--;
+ fs_info->fs_devices->open_devices--;
+ fs_info->fs_devices->rw_devices--;
+ fs_info->fs_devices->total_devices--;
+ fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
+ atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
+ btrfs_set_super_total_bytes(fs_info->super_copy,
+ orig_super_total_bytes);
+ btrfs_set_super_num_devices(fs_info->super_copy,
+ orig_super_num_devices);
+ mutex_unlock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
error_trans:
if (seeding_dev)
sb->s_flags |= SB_RDONLY;
@@ -2697,9 +2624,9 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
return btrfs_update_device(trans, device);
}
-static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 chunk_offset)
+static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->chunk_root;
int ret;
struct btrfs_path *path;
@@ -2808,9 +2735,9 @@ static struct extent_map *get_chunk_map(struct btrfs_fs_info *fs_info,
return em;
}
-int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 chunk_offset)
+int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct extent_map *em;
struct map_lookup *map;
u64 dev_extent_len = 0;
@@ -2829,7 +2756,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
map = em->map_lookup;
mutex_lock(&fs_info->chunk_mutex);
- check_system_chunk(trans, fs_info, map->type);
+ check_system_chunk(trans, map->type);
mutex_unlock(&fs_info->chunk_mutex);
/*
@@ -2869,7 +2796,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
mutex_unlock(&fs_devices->device_list_mutex);
- ret = btrfs_free_chunk(trans, fs_info, chunk_offset);
+ ret = btrfs_free_chunk(trans, chunk_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -2885,7 +2812,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
}
- ret = btrfs_remove_block_group(trans, fs_info, chunk_offset, em);
+ ret = btrfs_remove_block_group(trans, chunk_offset, em);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -2950,7 +2877,7 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
* step two, delete the device extents and the
* chunk tree entries
*/
- ret = btrfs_remove_chunk(trans, fs_info, chunk_offset);
+ ret = btrfs_remove_chunk(trans, chunk_offset);
btrfs_end_transaction(trans);
return ret;
}
@@ -3059,7 +2986,7 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_force_chunk_alloc(trans, fs_info,
+ ret = btrfs_force_chunk_alloc(trans,
BTRFS_BLOCK_GROUP_DATA);
btrfs_end_transaction(trans);
if (ret < 0)
@@ -4692,7 +4619,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (type & BTRFS_BLOCK_GROUP_DATA) {
max_stripe_size = SZ_1G;
- max_chunk_size = 10 * max_stripe_size;
+ max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
if (!devs_max)
devs_max = BTRFS_MAX_DEVS(info);
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
@@ -4900,7 +4827,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
refcount_inc(&em->refs);
write_unlock(&em_tree->lock);
- ret = btrfs_make_block_group(trans, info, 0, type, start, num_bytes);
+ ret = btrfs_make_block_group(trans, 0, type, start, num_bytes);
if (ret)
goto error_del_extent;
@@ -4934,9 +4861,9 @@ error:
}
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 chunk_offset, u64 chunk_size)
+ u64 chunk_offset, u64 chunk_size)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_root *chunk_root = fs_info->chunk_root;
struct btrfs_key key;
@@ -5038,13 +4965,12 @@ out:
* require modifying the chunk tree. This division is important for the
* bootstrap process of adding storage to a seed btrfs.
*/
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type)
+int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
{
u64 chunk_offset;
- lockdep_assert_held(&fs_info->chunk_mutex);
- chunk_offset = find_next_chunk(fs_info);
+ lockdep_assert_held(&trans->fs_info->chunk_mutex);
+ chunk_offset = find_next_chunk(trans->fs_info);
return __btrfs_alloc_chunk(trans, chunk_offset, type);
}
@@ -5175,7 +5101,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
/*
* There could be two corrupted data stripes, we need
* to loop retry in order to rebuild the correct data.
- *
+ *
* Fail a stripe at a time on every retry except the
* stripe under reconstruction.
*/
@@ -6187,21 +6113,11 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
btrfs_io_bio(bio)->stripe_index = dev_nr;
bio->bi_end_io = btrfs_end_bio;
bio->bi_iter.bi_sector = physical >> 9;
-#ifdef DEBUG
- {
- struct rcu_string *name;
-
- rcu_read_lock();
- name = rcu_dereference(dev->name);
- btrfs_debug(fs_info,
- "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
- bio_op(bio), bio->bi_opf,
- (u64)bio->bi_iter.bi_sector,
- (u_long)dev->bdev->bd_dev, name->str, dev->devid,
- bio->bi_iter.bi_size);
- rcu_read_unlock();
- }
-#endif
+ btrfs_debug_in_rcu(fs_info,
+ "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
+ bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
+ (u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
+ bio->bi_iter.bi_size);
bio_set_dev(bio, dev->bdev);
btrfs_bio_counter_inc_noblocked(fs_info);
@@ -6403,6 +6319,8 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
u16 num_stripes;
u16 sub_stripes;
u64 type;
+ u64 features;
+ bool mixed = false;
length = btrfs_chunk_length(leaf, chunk);
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
@@ -6441,6 +6359,32 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
btrfs_chunk_type(leaf, chunk));
return -EIO;
}
+
+ if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
+ btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
+ return -EIO;
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
+ (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
+ btrfs_err(fs_info,
+ "system chunk with data or metadata type: 0x%llx", type);
+ return -EIO;
+ }
+
+ features = btrfs_super_incompat_flags(fs_info->super_copy);
+ if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+ mixed = true;
+
+ if (!mixed) {
+ if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
+ (type & BTRFS_BLOCK_GROUP_DATA)) {
+ btrfs_err(fs_info,
+ "mixed chunk type in non-mixed mode: 0x%llx", type);
+ return -EIO;
+ }
+ }
+
if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
(type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
(type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
@@ -6527,6 +6471,7 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
map->type = btrfs_chunk_type(leaf, chunk);
map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ map->verified_stripes = 0;
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
btrfs_stripe_offset_nr(leaf, chunk, i);
@@ -6563,10 +6508,14 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em, 0);
write_unlock(&map_tree->map_tree.lock);
- BUG_ON(ret); /* Tree corruption */
+ if (ret < 0) {
+ btrfs_err(fs_info,
+ "failed to add chunk map, start=%llu len=%llu: %d",
+ em->start, em->len, ret);
+ }
free_extent_map(em);
- return 0;
+ return ret;
}
static void fill_device_from_item(struct extent_buffer *leaf,
@@ -7108,9 +7057,9 @@ out:
}
static int update_dev_stat_item(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -7203,7 +7152,7 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
*/
smp_rmb();
- ret = update_dev_stat_item(trans, fs_info, device);
+ ret = update_dev_stat_item(trans, device);
if (!ret)
atomic_sub(stats_cnt, &device->dev_stats_ccnt);
}
@@ -7382,3 +7331,197 @@ void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
fs_devices = fs_devices->seed;
}
}
+
+/*
+ * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
+ */
+int btrfs_bg_type_to_factor(u64 flags)
+{
+ if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10))
+ return 2;
+ return 1;
+}
+
+
+static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
+{
+ int index = btrfs_bg_flags_to_raid_index(type);
+ int ncopies = btrfs_raid_array[index].ncopies;
+ int data_stripes;
+
+ switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ case BTRFS_BLOCK_GROUP_RAID5:
+ data_stripes = num_stripes - 1;
+ break;
+ case BTRFS_BLOCK_GROUP_RAID6:
+ data_stripes = num_stripes - 2;
+ break;
+ default:
+ data_stripes = num_stripes / ncopies;
+ break;
+ }
+ return div_u64(chunk_len, data_stripes);
+}
+
+static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
+ u64 chunk_offset, u64 devid,
+ u64 physical_offset, u64 physical_len)
+{
+ struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
+ struct extent_map *em;
+ struct map_lookup *map;
+ u64 stripe_len;
+ bool found = false;
+ int ret = 0;
+ int i;
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, chunk_offset, 1);
+ read_unlock(&em_tree->lock);
+
+ if (!em) {
+ btrfs_err(fs_info,
+"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
+ physical_offset, devid);
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ map = em->map_lookup;
+ stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
+ if (physical_len != stripe_len) {
+ btrfs_err(fs_info,
+"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
+ physical_offset, devid, em->start, physical_len,
+ stripe_len);
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ for (i = 0; i < map->num_stripes; i++) {
+ if (map->stripes[i].dev->devid == devid &&
+ map->stripes[i].physical == physical_offset) {
+ found = true;
+ if (map->verified_stripes >= map->num_stripes) {
+ btrfs_err(fs_info,
+ "too many dev extents for chunk %llu found",
+ em->start);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ map->verified_stripes++;
+ break;
+ }
+ }
+ if (!found) {
+ btrfs_err(fs_info,
+ "dev extent physical offset %llu devid %llu has no corresponding chunk",
+ physical_offset, devid);
+ ret = -EUCLEAN;
+ }
+out:
+ free_extent_map(em);
+ return ret;
+}
+
+static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
+{
+ struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
+ struct extent_map *em;
+ struct rb_node *node;
+ int ret = 0;
+
+ read_lock(&em_tree->lock);
+ for (node = rb_first(&em_tree->map); node; node = rb_next(node)) {
+ em = rb_entry(node, struct extent_map, rb_node);
+ if (em->map_lookup->num_stripes !=
+ em->map_lookup->verified_stripes) {
+ btrfs_err(fs_info,
+ "chunk %llu has missing dev extent, have %d expect %d",
+ em->start, em->map_lookup->verified_stripes,
+ em->map_lookup->num_stripes);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+out:
+ read_unlock(&em_tree->lock);
+ return ret;
+}
+
+/*
+ * Ensure that all dev extents are mapped to correct chunk, otherwise
+ * later chunk allocation/free would cause unexpected behavior.
+ *
+ * NOTE: This will iterate through the whole device tree, which should be of
+ * the same size level as the chunk tree. This slightly increases mount time.
+ */
+int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_path *path;
+ struct btrfs_root *root = fs_info->dev_root;
+ struct btrfs_key key;
+ int ret = 0;
+
+ key.objectid = 1;
+ key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ path->reada = READA_FORWARD;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_item(root, path);
+ if (ret < 0)
+ goto out;
+ /* No dev extents at all? Not good */
+ if (ret > 0) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+ while (1) {
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_dev_extent *dext;
+ int slot = path->slots[0];
+ u64 chunk_offset;
+ u64 physical_offset;
+ u64 physical_len;
+ u64 devid;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (key.type != BTRFS_DEV_EXTENT_KEY)
+ break;
+ devid = key.objectid;
+ physical_offset = key.offset;
+
+ dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
+ chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
+ physical_len = btrfs_dev_extent_length(leaf, dext);
+
+ ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
+ physical_offset, physical_len);
+ if (ret < 0)
+ goto out;
+ ret = btrfs_next_item(root, path);
+ if (ret < 0)
+ goto out;
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ }
+
+ /* Ensure all chunks have corresponding dev extents */
+ ret = verify_chunk_dev_extent_mapping(fs_info);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 5139ec8daf4c..23e9285d88de 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -11,6 +11,8 @@
#include <linux/btrfs.h>
#include "async-thread.h"
+#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
+
extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN SZ_64K
@@ -343,6 +345,7 @@ struct map_lookup {
u64 stripe_len;
int num_stripes;
int sub_stripes;
+ int verified_stripes; /* For mount time dev extent verification */
struct btrfs_bio_stripe stripes[];
};
@@ -382,8 +385,6 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
}
}
-int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
- u64 end, u64 *length);
void btrfs_get_bbio(struct btrfs_bio *bbio);
void btrfs_put_bbio(struct btrfs_bio *bbio);
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
@@ -396,20 +397,19 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
u64 physical, u64 **logical, int *naddrs, int *stripe_len);
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type);
+int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
-int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
- struct btrfs_fs_devices **fs_devices_ret);
+struct btrfs_device *btrfs_scan_one_device(const char *path,
+ fmode_t flags, void *holder);
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
-void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
- struct btrfs_device *device, struct btrfs_device *this_dev);
+void btrfs_assign_next_active_device(struct btrfs_device *device,
+ struct btrfs_device *this_dev);
int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
const char *device_path,
struct btrfs_device **device);
@@ -453,22 +453,18 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
-void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *srcdev);
+void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev);
-void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *tgtdev);
+void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path);
int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
u64 logical, u64 len);
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical);
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 chunk_offset, u64 chunk_size);
-int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 chunk_offset);
+ u64 chunk_offset, u64 chunk_size);
+int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index)
@@ -560,4 +556,7 @@ void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
struct btrfs_device *failing_dev);
+int btrfs_bg_type_to_factor(u64 flags);
+int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
+
#endif
diff --git a/fs/buffer.c b/fs/buffer.c
index cabc045f483d..c8c2b7d8b8d6 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1900,15 +1900,16 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
break;
case IOMAP_UNWRITTEN:
/*
- * For unwritten regions, we always need to ensure that
- * sub-block writes cause the regions in the block we are not
- * writing to are zeroed. Set the buffer as new to ensure this.
+ * For unwritten regions, we always need to ensure that regions
+ * in the block we are not writing to are zeroed. Mark the
+ * buffer as new to ensure this.
*/
set_buffer_new(bh);
set_buffer_unwritten(bh);
/* FALLTHRU */
case IOMAP_MAPPED:
- if (offset >= i_size_read(inode))
+ if ((iomap->flags & IOMAP_F_NEW) ||
+ offset >= i_size_read(inode))
set_buffer_new(bh);
bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
inode->i_blkbits;
@@ -2076,6 +2077,40 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
}
EXPORT_SYMBOL(block_write_begin);
+int __generic_write_end(struct inode *inode, loff_t pos, unsigned copied,
+ struct page *page)
+{
+ loff_t old_size = inode->i_size;
+ bool i_size_changed = false;
+
+ /*
+ * No need to use i_size_read() here, the i_size cannot change under us
+ * because we hold i_rwsem.
+ *
+ * But it's important to update i_size while still holding page lock:
+ * page writeout could otherwise come in and zero beyond i_size.
+ */
+ if (pos + copied > inode->i_size) {
+ i_size_write(inode, pos + copied);
+ i_size_changed = true;
+ }
+
+ unlock_page(page);
+ put_page(page);
+
+ if (old_size < pos)
+ pagecache_isize_extended(inode, old_size, pos);
+ /*
+ * Don't mark the inode dirty under page lock. First, it unnecessarily
+ * makes the holding time of page lock longer. Second, it forces lock
+ * ordering of page lock and transaction start for journaling
+ * filesystems.
+ */
+ if (i_size_changed)
+ mark_inode_dirty(inode);
+ return copied;
+}
+
int block_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
@@ -2116,39 +2151,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- struct inode *inode = mapping->host;
- loff_t old_size = inode->i_size;
- int i_size_changed = 0;
-
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
-
- /*
- * No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_mutex.
- *
- * But it's important to update i_size while still holding page lock:
- * page writeout could otherwise come in and zero beyond i_size.
- */
- if (pos+copied > inode->i_size) {
- i_size_write(inode, pos+copied);
- i_size_changed = 1;
- }
-
- unlock_page(page);
- put_page(page);
-
- if (old_size < pos)
- pagecache_isize_extended(inode, old_size, pos);
- /*
- * Don't mark the inode dirty under page lock. First, it unnecessarily
- * makes the holding time of page lock longer. Second, it forces lock
- * ordering of page lock and transaction start for journaling
- * filesystems.
- */
- if (i_size_changed)
- mark_inode_dirty(inode);
-
- return copied;
+ return __generic_write_end(mapping->host, pos, copied, page);
}
EXPORT_SYMBOL(generic_write_end);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ad0bed99b1d5..e2679e8a2535 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -429,8 +429,7 @@ out:
* file or symlink, return 1 so the VFS can retry.
*/
int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened)
+ struct file *file, unsigned flags, umode_t mode)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -507,9 +506,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
dout("atomic_open finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
ceph_init_inode_acls(d_inode(dentry), &acls);
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
}
- err = finish_open(file, dentry, ceph_open, opened);
+ err = finish_open(file, dentry, ceph_open);
}
out_req:
if (!req->r_err && req->r_target_inode)
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index a7077a0c989f..971328b99ede 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1025,8 +1025,7 @@ extern const struct file_operations ceph_file_fops;
extern int ceph_renew_caps(struct inode *inode);
extern int ceph_open(struct inode *inode, struct file *file);
extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened);
+ struct file *file, unsigned flags, umode_t mode);
extern int ceph_release(struct inode *inode, struct file *filp);
extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 5f132d59dfc2..35c83fe7dba0 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -16,24 +16,28 @@ config CIFS
select CRYPTO_DES
help
This is the client VFS module for the SMB3 family of NAS protocols,
- as well as for earlier dialects such as SMB2.1, SMB2 and the
+ (including support for the most recent, most secure dialect SMB3.1.1)
+ as well as for earlier dialects such as SMB2.1, SMB2 and the older
Common Internet File System (CIFS) protocol. CIFS was the successor
to the original dialect, the Server Message Block (SMB) protocol, the
native file sharing mechanism for most early PC operating systems.
- The SMB3 protocol is supported by most modern operating systems and
- NAS appliances (e.g. Samba, Windows 8, Windows 2012, MacOS).
+ The SMB3 protocol is supported by most modern operating systems
+ and NAS appliances (e.g. Samba, Windows 10, Windows Server 2016,
+ MacOS) and even in the cloud (e.g. Microsoft Azure).
The older CIFS protocol was included in Windows NT4, 2000 and XP (and
later) as well by Samba (which provides excellent CIFS and SMB3
- server support for Linux and many other operating systems). Limited
- support for OS/2 and Windows ME and similar very old servers is
- provided as well.
+ server support for Linux and many other operating systems). Use of
+ dialects older than SMB2.1 is often discouraged on public networks.
+ This module also provides limited support for OS/2 and Windows ME
+ and similar very old servers.
- The cifs module provides an advanced network file system client
+ This module provides an advanced network file system client
for mounting to SMB3 (and CIFS) compliant servers. It includes
support for DFS (hierarchical name space), secure per-user
- session establishment via Kerberos or NTLM or NTLMv2,
- safe distributed caching (oplock), optional packet
+ session establishment via Kerberos or NTLM or NTLMv2, RDMA
+ (smbdirect), advanced security features, per-share encryption,
+ directory leases, safe distributed caching (oplock), optional packet
signing, Unicode and other internationalization improvements.
In general, the default dialects, SMB3 and later, enable better
@@ -43,18 +47,11 @@ config CIFS
than SMB3 mounts. SMB2/SMB3 mount options are also
slightly simpler (compared to CIFS) due to protocol improvements.
- If you need to mount to Samba, Macs or Windows from this machine, say Y.
-
-config CIFS_STATS
- bool "CIFS statistics"
- depends on CIFS
- help
- Enabling this option will cause statistics for each server share
- mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
+ If you need to mount to Samba, Azure, Macs or Windows from this machine, say Y.
config CIFS_STATS2
bool "Extended statistics"
- depends on CIFS_STATS
+ depends on CIFS
help
Enabling this option will allow more detailed statistics on SMB
request timing to be displayed in /proc/fs/cifs/DebugData and also
@@ -66,9 +63,24 @@ config CIFS_STATS2
Unless you are a developer or are doing network performance analysis
or tuning, say N.
+config CIFS_ALLOW_INSECURE_LEGACY
+ bool "Support legacy servers which use less secure dialects"
+ depends on CIFS
+ default y
+ help
+ Modern dialects, SMB2.1 and later (including SMB3 and 3.1.1), have
+ additional security features, including protection against
+ man-in-the-middle attacks and stronger crypto hashes, so the use
+ of legacy dialects (SMB1/CIFS and SMB2.0) is discouraged.
+
+ Disabling this option prevents users from using vers=1.0 or vers=2.0
+ on mounts with cifs.ko
+
+ If unsure, say Y.
+
config CIFS_WEAK_PW_HASH
bool "Support legacy servers which use weaker LANMAN security"
- depends on CIFS
+ depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY
help
Modern CIFS servers including Samba and most Windows versions
(since 1997) support stronger NTLM (and even NTLMv2 and Kerberos)
@@ -186,15 +198,6 @@ config CIFS_NFSD_EXPORT
help
Allows NFS server to export a CIFS mounted share (nfsd over cifs)
-config CIFS_SMB311
- bool "SMB3.1.1 network file system support"
- depends on CIFS
- select CRYPTO_SHA512
-
- help
- This enables support for the newest, and most secure dialect, SMB3.11.
- If unsure, say Y
-
config CIFS_SMB_DIRECT
bool "SMB Direct support (Experimental)"
depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index e1553d1e0e50..b7420e605b28 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -128,8 +128,10 @@ fscache_checkaux cifs_fscache_inode_check_aux(void *cookie_netfs_data,
memset(&auxdata, 0, sizeof(auxdata));
auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time = timespec64_to_timespec(cifsi->vfs_inode.i_mtime);
- auxdata.last_change_time = timespec64_to_timespec(cifsi->vfs_inode.i_ctime);
+ auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
if (memcmp(data, &auxdata, datalen) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index bfe999505815..f1fbea947fef 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -160,25 +160,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
seq_printf(m, "Features:");
#ifdef CONFIG_CIFS_DFS_UPCALL
- seq_printf(m, " dfs");
+ seq_printf(m, " DFS");
#endif
#ifdef CONFIG_CIFS_FSCACHE
- seq_printf(m, " fscache");
+ seq_printf(m, ",FSCACHE");
+#endif
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ seq_printf(m, ",SMB_DIRECT");
+#endif
+#ifdef CONFIG_CIFS_STATS2
+ seq_printf(m, ",STATS2");
+#else
+ seq_printf(m, ",STATS");
+#endif
+#ifdef CONFIG_CIFS_DEBUG2
+ seq_printf(m, ",DEBUG2");
+#elif defined(CONFIG_CIFS_DEBUG)
+ seq_printf(m, ",DEBUG");
+#endif
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ seq_printf(m, ",ALLOW_INSECURE_LEGACY");
#endif
#ifdef CONFIG_CIFS_WEAK_PW_HASH
- seq_printf(m, " lanman");
+ seq_printf(m, ",WEAK_PW_HASH");
#endif
#ifdef CONFIG_CIFS_POSIX
- seq_printf(m, " posix");
+ seq_printf(m, ",CIFS_POSIX");
#endif
#ifdef CONFIG_CIFS_UPCALL
- seq_printf(m, " spnego");
+ seq_printf(m, ",UPCALL(SPNEGO)");
#endif
#ifdef CONFIG_CIFS_XATTR
- seq_printf(m, " xattr");
+ seq_printf(m, ",XATTR");
#endif
#ifdef CONFIG_CIFS_ACL
- seq_printf(m, " acl");
+ seq_printf(m, ",ACL");
#endif
seq_putc(m, '\n');
seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
@@ -259,10 +275,9 @@ skip_rdma:
server->credits, server->dialect);
if (server->sign)
seq_printf(m, " signed");
-#ifdef CONFIG_CIFS_SMB311
if (server->posix_ext_supported)
seq_printf(m, " posix");
-#endif /* 3.1.1 */
+
i++;
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
@@ -350,7 +365,6 @@ skip_rdma:
return 0;
}
-#ifdef CONFIG_CIFS_STATS
static ssize_t cifs_stats_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
@@ -364,13 +378,23 @@ static ssize_t cifs_stats_proc_write(struct file *file,
rc = kstrtobool_from_user(buffer, count, &bv);
if (rc == 0) {
#ifdef CONFIG_CIFS_STATS2
+ int i;
+
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */
+ spin_lock(&GlobalMid_Lock);
+ GlobalMaxActiveXid = 0;
+ GlobalCurrentXid = 0;
+ spin_unlock(&GlobalMid_Lock);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+#ifdef CONFIG_CIFS_STATS2
+ for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++)
+ atomic_set(&server->smb2slowcmd[i], 0);
+#endif /* CONFIG_CIFS_STATS2 */
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
@@ -379,6 +403,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
struct cifs_tcon,
tcon_list);
atomic_set(&tcon->num_smbs_sent, 0);
+ spin_lock(&tcon->stat_lock);
+ tcon->bytes_read = 0;
+ tcon->bytes_written = 0;
+ spin_unlock(&tcon->stat_lock);
if (server->ops->clear_stats)
server->ops->clear_stats(tcon);
}
@@ -395,13 +423,15 @@ static ssize_t cifs_stats_proc_write(struct file *file,
static int cifs_stats_proc_show(struct seq_file *m, void *v)
{
int i;
+#ifdef CONFIG_CIFS_STATS2
+ int j;
+#endif /* STATS2 */
struct list_head *tmp1, *tmp2, *tmp3;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- seq_printf(m,
- "Resources in use\nCIFS Session: %d\n",
+ seq_printf(m, "Resources in use\nCIFS Session: %d\n",
sesInfoAllocCount.counter);
seq_printf(m, "Share (unique mount targets): %d\n",
tconInfoAllocCount.counter);
@@ -430,6 +460,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+#ifdef CONFIG_CIFS_STATS2
+ for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
+ if (atomic_read(&server->smb2slowcmd[j]))
+ seq_printf(m, "%d slow responses from %s for command %d\n",
+ atomic_read(&server->smb2slowcmd[j]),
+ server->hostname, j);
+#endif /* STATS2 */
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
@@ -466,7 +503,6 @@ static const struct file_operations cifs_stats_proc_fops = {
.release = single_release,
.write = cifs_stats_proc_write,
};
-#endif /* STATS */
#ifdef CONFIG_CIFS_SMB_DIRECT
#define PROC_FILE_DEFINE(name) \
@@ -524,9 +560,7 @@ cifs_proc_init(void)
proc_create_single("DebugData", 0, proc_fs_cifs,
cifs_debug_data_proc_show);
-#ifdef CONFIG_CIFS_STATS
proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_fops);
-#endif /* STATS */
proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_fops);
proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_fops);
proc_create("LinuxExtensionsEnabled", 0644, proc_fs_cifs,
@@ -564,9 +598,7 @@ cifs_proc_clean(void)
remove_proc_entry("DebugData", proc_fs_cifs);
remove_proc_entry("cifsFYI", proc_fs_cifs);
remove_proc_entry("traceSMB", proc_fs_cifs);
-#ifdef CONFIG_CIFS_STATS
remove_proc_entry("Stats", proc_fs_cifs);
-#endif
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index ee2a8ec70056..85b31cfa2f3c 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -83,7 +83,13 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
kaddr = (char *) kmap(rqst->rq_pages[i]) + offset;
- crypto_shash_update(shash, kaddr, len);
+ rc = crypto_shash_update(shash, kaddr, len);
+ if (rc) {
+ cifs_dbg(VFS, "%s: Could not update with payload\n",
+ __func__);
+ kunmap(rqst->rq_pages[i]);
+ return rc;
+ }
kunmap(rqst->rq_pages[i]);
}
@@ -452,7 +458,7 @@ find_timestamp(struct cifs_ses *ses)
unsigned char *blobptr;
unsigned char *blobend;
struct ntlmssp2_name *attrptr;
- struct timespec ts;
+ struct timespec64 ts;
if (!ses->auth_key.len || !ses->auth_key.response)
return 0;
@@ -477,7 +483,7 @@ find_timestamp(struct cifs_ses *ses)
blobptr += attrsize; /* advance attr value */
}
- ktime_get_real_ts(&ts);
+ ktime_get_real_ts64(&ts);
return cpu_to_le64(cifs_UnixTimeToNT(ts));
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d5aa7ae917bf..7065426b3280 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -139,6 +139,9 @@ cifs_read_super(struct super_block *sb)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
sb->s_flags |= SB_POSIXACL;
+ if (tcon->snapshot_time)
+ sb->s_flags |= SB_RDONLY;
+
if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
sb->s_maxbytes = MAX_LFS_FILESIZE;
else
@@ -209,14 +212,16 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
xid = get_xid();
- /*
- * PATH_MAX may be too long - it would presumably be total path,
- * but note that some servers (includinng Samba 3) have a shorter
- * maximum path.
- *
- * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
- */
- buf->f_namelen = PATH_MAX;
+ if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
+ buf->f_namelen =
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
+ else
+ buf->f_namelen = PATH_MAX;
+
+ buf->f_fsid.val[0] = tcon->vol_serial_number;
+ /* are using part of create time for more randomness, see man statfs */
+ buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
+
buf->f_files = 0; /* undefined */
buf->f_ffree = 0; /* unlimited */
@@ -427,7 +432,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
else if (tcon->ses->user_name)
seq_show_option(s, "username", tcon->ses->user_name);
- if (tcon->ses->domainName)
+ if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
seq_show_option(s, "domain", tcon->ses->domainName);
if (srcaddr->sa_family != AF_UNSPEC) {
@@ -481,20 +486,12 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",persistenthandles");
else if (tcon->use_resilient)
seq_puts(s, ",resilienthandles");
-
-#ifdef CONFIG_CIFS_SMB311
if (tcon->posix_extensions)
seq_puts(s, ",posix");
else if (tcon->unix_ext)
seq_puts(s, ",unix");
else
seq_puts(s, ",nounix");
-#else
- if (tcon->unix_ext)
- seq_puts(s, ",unix");
- else
- seq_puts(s, ",nounix");
-#endif /* SMB311 */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
seq_puts(s, ",posixpaths");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
@@ -546,6 +543,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",wsize=%u", cifs_sb->wsize);
seq_printf(s, ",echo_interval=%lu",
tcon->ses->server->echo_interval / HZ);
+ if (tcon->snapshot_time)
+ seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
/* convert actimeo and display it in seconds */
seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 5f0231803431..f3a78efc3109 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -65,8 +65,7 @@ extern struct inode *cifs_root_iget(struct super_block *);
extern int cifs_create(struct inode *, struct dentry *, umode_t,
bool excl);
extern int cifs_atomic_open(struct inode *, struct dentry *,
- struct file *, unsigned, umode_t,
- int *);
+ struct file *, unsigned, umode_t);
extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
unsigned int);
extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index c923c7854027..0c9ab62c3df4 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -76,6 +76,9 @@
#define SMB_ECHO_INTERVAL_MAX 600
#define SMB_ECHO_INTERVAL_DEFAULT 60
+/* maximum number of PDUs in one compound */
+#define MAX_COMPOUND 5
+
/*
* Default number of credits to keep available for SMB3.
* This value is chosen somewhat arbitrarily. The Windows client
@@ -191,9 +194,7 @@ enum smb_version {
Smb_21,
Smb_30,
Smb_302,
-#ifdef CONFIG_CIFS_SMB311
Smb_311,
-#endif /* SMB311 */
Smb_3any,
Smb_default,
Smb_version_err
@@ -456,13 +457,11 @@ struct smb_version_operations {
long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
loff_t);
/* init transform request - used for encryption for now */
- int (*init_transform_rq)(struct TCP_Server_Info *, struct smb_rqst *,
- struct smb_rqst *);
- /* free transform request */
- void (*free_transform_rq)(struct smb_rqst *);
+ int (*init_transform_rq)(struct TCP_Server_Info *, int num_rqst,
+ struct smb_rqst *, struct smb_rqst *);
int (*is_transform_hdr)(void *buf);
int (*receive_transform)(struct TCP_Server_Info *,
- struct mid_q_entry **);
+ struct mid_q_entry **, char **, int *);
enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
enum securityEnum);
int (*next_header)(char *);
@@ -684,15 +683,14 @@ struct TCP_Server_Info {
#ifdef CONFIG_CIFS_STATS2
atomic_t in_send; /* requests trying to send */
atomic_t num_waiters; /* blocked waiting to get in sendrecv */
-#endif
+ atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */
+#endif /* STATS2 */
unsigned int max_read;
unsigned int max_write;
-#ifdef CONFIG_CIFS_SMB311
__le16 cipher_type;
/* save initital negprot hash */
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
bool posix_ext_supported;
-#endif /* 3.1.1 */
struct delayed_work reconnect; /* reconnect workqueue job */
struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
unsigned long echo_interval;
@@ -886,9 +884,7 @@ struct cifs_ses {
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
-#ifdef CONFIG_CIFS_SMB311
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
-#endif /* 3.1.1 */
/*
* Network interfaces available on the server this session is
@@ -913,6 +909,7 @@ cap_unix(struct cifs_ses *ses)
struct cached_fid {
bool is_valid:1; /* Do we have a useable root fid */
+ struct kref refcount;
struct cifs_fid *fid;
struct mutex fid_mutex;
struct cifs_tcon *tcon;
@@ -936,7 +933,6 @@ struct cifs_tcon {
__u32 tid; /* The 4 byte tree id */
__u16 Flags; /* optional support bits */
enum statusEnum tidStatus;
-#ifdef CONFIG_CIFS_STATS
atomic_t num_smbs_sent;
union {
struct {
@@ -967,24 +963,9 @@ struct cifs_tcon {
atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
} smb2_stats;
} stats;
-#ifdef CONFIG_CIFS_STATS2
- unsigned long long time_writes;
- unsigned long long time_reads;
- unsigned long long time_opens;
- unsigned long long time_deletes;
- unsigned long long time_closes;
- unsigned long long time_mkdirs;
- unsigned long long time_rmdirs;
- unsigned long long time_renames;
- unsigned long long time_t2renames;
- unsigned long long time_ffirst;
- unsigned long long time_fnext;
- unsigned long long time_fclose;
-#endif /* CONFIG_CIFS_STATS2 */
__u64 bytes_read;
__u64 bytes_written;
spinlock_t stat_lock; /* protects the two fields above */
-#endif /* CONFIG_CIFS_STATS */
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
FILE_SYSTEM_UNIX_INFO fsUnixInfo;
@@ -997,9 +978,7 @@ struct cifs_tcon {
bool seal:1; /* transport encryption for this mounted share */
bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol
for this mount even if server would support */
-#ifdef CONFIG_CIFS_SMB311
bool posix_extensions; /* if true SMB3.11 posix extensions enabled */
-#endif /* CIFS_311 */
bool local_lease:1; /* check leases (only) on local system not remote */
bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
bool broken_sparse_sup; /* if server or share does not support sparse */
@@ -1046,6 +1025,7 @@ struct tcon_link {
};
extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
+extern void smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst);
static inline struct cifs_tcon *
tlink_tcon(struct tcon_link *tlink)
@@ -1352,7 +1332,6 @@ convert_delimiter(char *path, char delim)
*pos = delim;
}
-#ifdef CONFIG_CIFS_STATS
#define cifs_stats_inc atomic_inc
static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
@@ -1372,13 +1351,6 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
tcon->bytes_read += bytes;
spin_unlock(&tcon->stat_lock);
}
-#else
-
-#define cifs_stats_inc(field) do {} while (0)
-#define cifs_stats_bytes_written(tcon, bytes) do {} while (0)
-#define cifs_stats_bytes_read(tcon, bytes) do {} while (0)
-
-#endif
/*
@@ -1544,9 +1516,9 @@ struct cifs_fattr {
dev_t cf_rdev;
unsigned int cf_nlink;
unsigned int cf_dtype;
- struct timespec cf_atime;
- struct timespec cf_mtime;
- struct timespec cf_ctime;
+ struct timespec64 cf_atime;
+ struct timespec64 cf_mtime;
+ struct timespec64 cf_ctime;
};
static inline void free_dfs_info_param(struct dfs_info3_param *param)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1890f534c88b..20adda4de83b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -94,6 +94,10 @@ extern int cifs_call_async(struct TCP_Server_Info *server,
extern int cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
struct smb_rqst *rqst, int *resp_buf_type,
const int flags, struct kvec *resp_iov);
+extern int compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ const int flags, const int num_rqst,
+ struct smb_rqst *rqst, int *resp_buf_type,
+ struct kvec *resp_iov);
extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
@@ -143,9 +147,9 @@ extern enum securityEnum select_sectype(struct TCP_Server_Info *server,
enum securityEnum requested);
extern int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp);
-extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
-extern u64 cifs_UnixTimeToNT(struct timespec);
-extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
+extern struct timespec64 cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
+extern u64 cifs_UnixTimeToNT(struct timespec64);
+extern struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
int offset);
extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
extern int cifs_get_writer(struct cifsInodeInfo *cinode);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 93408eab92e7..dc2f4cf08fe9 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -508,13 +508,13 @@ decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
* this requirement.
*/
int val, seconds, remain, result;
- struct timespec ts;
- unsigned long utc = ktime_get_real_seconds();
+ struct timespec64 ts;
+ time64_t utc = ktime_get_real_seconds();
ts = cnvrtDosUnixTm(rsp->SrvTime.Date,
rsp->SrvTime.Time, 0);
- cifs_dbg(FYI, "SrvTime %d sec since 1970 (utc: %d) diff: %d\n",
- (int)ts.tv_sec, (int)utc,
- (int)(utc - ts.tv_sec));
+ cifs_dbg(FYI, "SrvTime %lld sec since 1970 (utc: %lld) diff: %lld\n",
+ ts.tv_sec, utc,
+ utc - ts.tv_sec);
val = (int)(utc - ts.tv_sec);
seconds = abs(val);
result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
@@ -4082,7 +4082,7 @@ QInfRetry:
if (rc) {
cifs_dbg(FYI, "Send error in QueryInfo = %d\n", rc);
} else if (data) {
- struct timespec ts;
+ struct timespec64 ts;
__u32 time = le32_to_cpu(pSMBr->last_write_time);
/* decode response */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5df2c0698cda..c832a8a1970a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -303,10 +303,8 @@ static const match_table_t cifs_smb_version_tokens = {
{ Smb_21, SMB21_VERSION_STRING },
{ Smb_30, SMB30_VERSION_STRING },
{ Smb_302, SMB302_VERSION_STRING },
-#ifdef CONFIG_CIFS_SMB311
{ Smb_311, SMB311_VERSION_STRING },
{ Smb_311, ALT_SMB311_VERSION_STRING },
-#endif /* SMB311 */
{ Smb_3any, SMB3ANY_VERSION_STRING },
{ Smb_default, SMBDEFAULT_VERSION_STRING },
{ Smb_version_err, NULL }
@@ -350,6 +348,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->max_read = 0;
cifs_dbg(FYI, "Reconnecting tcp session\n");
+ trace_smb3_reconnect(server->CurrentMid, server->hostname);
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
@@ -851,13 +850,14 @@ cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
static int
cifs_demultiplex_thread(void *p)
{
- int length;
+ int i, num_mids, length;
struct TCP_Server_Info *server = p;
unsigned int pdu_length;
unsigned int next_offset;
char *buf = NULL;
struct task_struct *task_to_wake = NULL;
- struct mid_q_entry *mid_entry;
+ struct mid_q_entry *mids[MAX_COMPOUND];
+ char *bufs[MAX_COMPOUND];
current->flags |= PF_MEMALLOC;
cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
@@ -924,58 +924,75 @@ next_pdu:
server->pdu_size = next_offset;
}
- mid_entry = NULL;
+ memset(mids, 0, sizeof(mids));
+ memset(bufs, 0, sizeof(bufs));
+ num_mids = 0;
+
if (server->ops->is_transform_hdr &&
server->ops->receive_transform &&
server->ops->is_transform_hdr(buf)) {
length = server->ops->receive_transform(server,
- &mid_entry);
+ mids,
+ bufs,
+ &num_mids);
} else {
- mid_entry = server->ops->find_mid(server, buf);
+ mids[0] = server->ops->find_mid(server, buf);
+ bufs[0] = buf;
+ if (mids[0])
+ num_mids = 1;
- if (!mid_entry || !mid_entry->receive)
- length = standard_receive3(server, mid_entry);
+ if (!mids[0] || !mids[0]->receive)
+ length = standard_receive3(server, mids[0]);
else
- length = mid_entry->receive(server, mid_entry);
+ length = mids[0]->receive(server, mids[0]);
}
if (length < 0) {
- if (mid_entry)
- cifs_mid_q_entry_release(mid_entry);
+ for (i = 0; i < num_mids; i++)
+ if (mids[i])
+ cifs_mid_q_entry_release(mids[i]);
continue;
}
if (server->large_buf)
buf = server->bigbuf;
+
server->lstrp = jiffies;
- if (mid_entry != NULL) {
- mid_entry->resp_buf_size = server->pdu_size;
- if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
- mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
- server->ops->handle_cancelled_mid)
- server->ops->handle_cancelled_mid(
- mid_entry->resp_buf,
- server);
- if (!mid_entry->multiRsp || mid_entry->multiEnd)
- mid_entry->callback(mid_entry);
+ for (i = 0; i < num_mids; i++) {
+ if (mids[i] != NULL) {
+ mids[i]->resp_buf_size = server->pdu_size;
+ if ((mids[i]->mid_flags & MID_WAIT_CANCELLED) &&
+ mids[i]->mid_state == MID_RESPONSE_RECEIVED &&
+ server->ops->handle_cancelled_mid)
+ server->ops->handle_cancelled_mid(
+ mids[i]->resp_buf,
+ server);
- cifs_mid_q_entry_release(mid_entry);
- } else if (server->ops->is_oplock_break &&
- server->ops->is_oplock_break(buf, server)) {
- cifs_dbg(FYI, "Received oplock break\n");
- } else {
- cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
- atomic_read(&midCount));
- cifs_dump_mem("Received Data is: ", buf,
- HEADER_SIZE(server));
+ if (!mids[i]->multiRsp || mids[i]->multiEnd)
+ mids[i]->callback(mids[i]);
+
+ cifs_mid_q_entry_release(mids[i]);
+ } else if (server->ops->is_oplock_break &&
+ server->ops->is_oplock_break(bufs[i],
+ server)) {
+ cifs_dbg(FYI, "Received oplock break\n");
+ } else {
+ cifs_dbg(VFS, "No task to wake, unknown frame "
+ "received! NumMids %d\n",
+ atomic_read(&midCount));
+ cifs_dump_mem("Received Data is: ", bufs[i],
+ HEADER_SIZE(server));
#ifdef CONFIG_CIFS_DEBUG2
- if (server->ops->dump_detail)
- server->ops->dump_detail(buf, server);
- cifs_dump_mids(server);
+ if (server->ops->dump_detail)
+ server->ops->dump_detail(bufs[i],
+ server);
+ cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
+ }
}
+
if (pdu_length > server->pdu_size) {
if (!allocate_buffers(server))
continue;
@@ -1174,6 +1191,7 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol, bool is_smb3)
substring_t args[MAX_OPT_ARGS];
switch (match_token(value, cifs_smb_version_tokens, args)) {
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
case Smb_1:
if (disable_legacy_dialects) {
cifs_dbg(VFS, "mount with legacy dialect disabled\n");
@@ -1198,6 +1216,14 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol, bool is_smb3)
vol->ops = &smb20_operations;
vol->vals = &smb20_values;
break;
+#else
+ case Smb_1:
+ cifs_dbg(VFS, "vers=1.0 (cifs) mount not permitted when legacy dialects disabled\n");
+ return 1;
+ case Smb_20:
+ cifs_dbg(VFS, "vers=2.0 mount not permitted when legacy dialects disabled\n");
+ return 1;
+#endif /* CIFS_ALLOW_INSECURE_LEGACY */
case Smb_21:
vol->ops = &smb21_operations;
vol->vals = &smb21_values;
@@ -1210,12 +1236,10 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol, bool is_smb3)
vol->ops = &smb30_operations; /* currently identical with 3.0 */
vol->vals = &smb302_values;
break;
-#ifdef CONFIG_CIFS_SMB311
case Smb_311:
vol->ops = &smb311_operations;
vol->vals = &smb311_values;
break;
-#endif /* SMB311 */
case Smb_3any:
vol->ops = &smb30_operations; /* currently identical with 3.0 */
vol->vals = &smb3any_values;
@@ -3030,15 +3054,17 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
}
}
-#ifdef CONFIG_CIFS_SMB311
- if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
- if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
+ if (volume_info->linux_ext) {
+ if (ses->server->posix_ext_supported) {
tcon->posix_extensions = true;
printk_once(KERN_WARNING
"SMB3.11 POSIX Extensions are experimental\n");
+ } else {
+ cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions.\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
}
}
-#endif /* 311 */
/*
* BB Do we need to wrap session_mutex around this TCon call and Unix
@@ -3992,11 +4018,9 @@ try_mount_again:
goto remote_path_check;
}
-#ifdef CONFIG_CIFS_SMB311
/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
if (tcon->posix_extensions)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-#endif /* SMB3.11 */
/* tell server which Unix caps we support */
if (cap_unix(tcon->ses)) {
@@ -4459,11 +4483,10 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
goto out;
}
-#ifdef CONFIG_CIFS_SMB311
/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
if (tcon->posix_extensions)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-#endif /* SMB3.11 */
+
if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index ddae52bd1993..3713d22b95a7 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -465,8 +465,7 @@ out_err:
int
cifs_atomic_open(struct inode *inode, struct dentry *direntry,
- struct file *file, unsigned oflags, umode_t mode,
- int *opened)
+ struct file *file, unsigned oflags, umode_t mode)
{
int rc;
unsigned int xid;
@@ -539,9 +538,9 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
}
if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
- rc = finish_open(file, direntry, generic_file_open, opened);
+ rc = finish_open(file, direntry, generic_file_open);
if (rc) {
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 85145a763021..ea6ace9c2417 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -129,8 +129,10 @@ static void cifs_fscache_acquire_inode_cookie(struct cifsInodeInfo *cifsi,
memset(&auxdata, 0, sizeof(auxdata));
auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time = timespec64_to_timespec(cifsi->vfs_inode.i_mtime);
- auxdata.last_change_time = timespec64_to_timespec(cifsi->vfs_inode.i_ctime);
+ auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
cifsi->fscache =
fscache_acquire_cookie(tcon->fscache,
@@ -166,8 +168,10 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
if (cifsi->fscache) {
memset(&auxdata, 0, sizeof(auxdata));
auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time = timespec64_to_timespec(cifsi->vfs_inode.i_mtime);
- auxdata.last_change_time = timespec64_to_timespec(cifsi->vfs_inode.i_ctime);
+ auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
fscache_relinquish_cookie(cifsi->fscache, &auxdata, false);
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index c7e3ac251e16..8c0862e41306 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -31,9 +31,11 @@
* Auxiliary data attached to CIFS inode within the cache
*/
struct cifs_fscache_inode_auxdata {
- struct timespec last_write_time;
- struct timespec last_change_time;
- u64 eof;
+ u64 last_write_time_sec;
+ u64 last_change_time_sec;
+ u32 last_write_time_nsec;
+ u32 last_change_time_nsec;
+ u64 eof;
};
/*
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index a2cfb33e85c1..d32eaa4b2437 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -95,7 +95,6 @@ static void
cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
- struct timespec ts;
cifs_dbg(FYI, "%s: revalidating inode %llu\n",
__func__, cifs_i->uniqueid);
@@ -114,8 +113,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
}
/* revalidate if mtime or size have changed */
- ts = timespec64_to_timespec(inode->i_mtime);
- if (timespec_equal(&ts, &fattr->cf_mtime) &&
+ if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
cifs_i->server_eof == fattr->cf_eof) {
cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
__func__, cifs_i->uniqueid);
@@ -164,9 +162,9 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
cifs_revalidate_cache(inode, fattr);
spin_lock(&inode->i_lock);
- inode->i_atime = timespec_to_timespec64(fattr->cf_atime);
- inode->i_mtime = timespec_to_timespec64(fattr->cf_mtime);
- inode->i_ctime = timespec_to_timespec64(fattr->cf_ctime);
+ inode->i_atime = fattr->cf_atime;
+ inode->i_mtime = fattr->cf_mtime;
+ inode->i_ctime = fattr->cf_ctime;
inode->i_rdev = fattr->cf_rdev;
cifs_nlink_fattr_to_inode(inode, fattr);
inode->i_uid = fattr->cf_uid;
@@ -327,8 +325,8 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
- ktime_get_real_ts(&fattr->cf_mtime);
- fattr->cf_mtime = timespec_trunc(fattr->cf_mtime, sb->s_time_gran);
+ ktime_get_real_ts64(&fattr->cf_mtime);
+ fattr->cf_mtime = timespec64_trunc(fattr->cf_mtime, sb->s_time_gran);
fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime;
fattr->cf_nlink = 2;
fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
@@ -604,8 +602,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
if (info->LastAccessTime)
fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
else {
- ktime_get_real_ts(&fattr->cf_atime);
- fattr->cf_atime = timespec_trunc(fattr->cf_atime, sb->s_time_gran);
+ ktime_get_real_ts64(&fattr->cf_atime);
+ fattr->cf_atime = timespec64_trunc(fattr->cf_atime, sb->s_time_gran);
}
fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
@@ -1122,17 +1120,19 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
if (!server->ops->set_file_info)
return -ENOSYS;
+ info_buf.Pad = 0;
+
if (attrs->ia_valid & ATTR_ATIME) {
set_time = true;
info_buf.LastAccessTime =
- cpu_to_le64(cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_atime)));
+ cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_atime));
} else
info_buf.LastAccessTime = 0;
if (attrs->ia_valid & ATTR_MTIME) {
set_time = true;
info_buf.LastWriteTime =
- cpu_to_le64(cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_mtime)));
+ cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_mtime));
} else
info_buf.LastWriteTime = 0;
@@ -1145,7 +1145,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
cifs_dbg(FYI, "CIFS - CTIME changed\n");
info_buf.ChangeTime =
- cpu_to_le64(cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_ctime)));
+ cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
} else
info_buf.ChangeTime = 0;
@@ -1577,14 +1577,12 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
server = tcon->ses->server;
-#ifdef CONFIG_CIFS_SMB311
if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
cifs_sb);
d_drop(direntry); /* for time being always refresh inode info */
goto mkdir_out;
}
-#endif /* SMB311 */
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
@@ -2071,8 +2069,8 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
/* old CIFS Unix Extensions doesn't return create time */
if (CIFS_I(inode)->createtime) {
stat->result_mask |= STATX_BTIME;
- stat->btime = timespec_to_timespec64(
- cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime)));
+ stat->btime =
+ cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime));
}
stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED);
@@ -2278,17 +2276,17 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
args->gid = INVALID_GID; /* no change */
if (attrs->ia_valid & ATTR_ATIME)
- args->atime = cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_atime));
+ args->atime = cifs_UnixTimeToNT(attrs->ia_atime);
else
args->atime = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_MTIME)
- args->mtime = cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_mtime));
+ args->mtime = cifs_UnixTimeToNT(attrs->ia_mtime);
else
args->mtime = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_CTIME)
- args->ctime = cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_ctime));
+ args->ctime = cifs_UnixTimeToNT(attrs->ia_ctime);
else
args->ctime = NO_CHANGE_64;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index de41f96aba49..2148b0f60e5e 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -396,7 +396,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
__le16 *utf16_path;
- __u8 oplock = SMB2_OPLOCK_LEVEL_II;
+ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct smb2_file_all_info *pfile_info = NULL;
oparms.tcon = tcon;
@@ -459,7 +459,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_io_parms io_parms;
int create_options = CREATE_NOT_DIR;
__le16 *utf16_path;
- __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct kvec iov[2];
if (backup_cred(cifs_sb))
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 53e8362cbc4a..dacb2c05674c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -122,9 +122,7 @@ tconInfoAlloc(void)
mutex_init(&ret_buf->crfid.fid_mutex);
ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
GFP_KERNEL);
-#ifdef CONFIG_CIFS_STATS
spin_lock_init(&ret_buf->stat_lock);
-#endif
}
return ret_buf;
}
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index d7ad0dfe4e68..fdd908e4a26b 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -918,10 +918,10 @@ smbCalcSize(void *buf, struct TCP_Server_Info *server)
* Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
* into Unix UTC (based 1970-01-01, in seconds).
*/
-struct timespec
+struct timespec64
cifs_NTtimeToUnix(__le64 ntutc)
{
- struct timespec ts;
+ struct timespec64 ts;
/* BB what about the timezone? BB */
/* Subtract the NTFS time offset, then convert to 1s intervals. */
@@ -935,12 +935,12 @@ cifs_NTtimeToUnix(__le64 ntutc)
*/
if (t < 0) {
abs_t = -t;
- ts.tv_nsec = (long)(do_div(abs_t, 10000000) * 100);
+ ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100);
ts.tv_nsec = -ts.tv_nsec;
ts.tv_sec = -abs_t;
} else {
abs_t = t;
- ts.tv_nsec = (long)do_div(abs_t, 10000000) * 100;
+ ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100;
ts.tv_sec = abs_t;
}
@@ -949,7 +949,7 @@ cifs_NTtimeToUnix(__le64 ntutc)
/* Convert the Unix UTC into NT UTC. */
u64
-cifs_UnixTimeToNT(struct timespec t)
+cifs_UnixTimeToNT(struct timespec64 t)
{
/* Convert to 100ns intervals and then add the NTFS time offset. */
return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET;
@@ -959,10 +959,11 @@ static const int total_days_of_prev_months[] = {
0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
};
-struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
+struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
{
- struct timespec ts;
- int sec, min, days, month, year;
+ struct timespec64 ts;
+ time64_t sec;
+ int min, days, month, year;
u16 date = le16_to_cpu(le_date);
u16 time = le16_to_cpu(le_time);
SMB_TIME *st = (SMB_TIME *)&time;
@@ -973,7 +974,7 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
sec = 2 * st->TwoSeconds;
min = st->Minutes;
if ((sec > 59) || (min > 59))
- cifs_dbg(VFS, "illegal time min %d sec %d\n", min, sec);
+ cifs_dbg(VFS, "illegal time min %d sec %lld\n", min, sec);
sec += (min * 60);
sec += 60 * 60 * st->Hours;
if (st->Hours > 24)
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 646dcd149de1..378151e09e91 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -624,7 +624,6 @@ cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
static void
cifs_clear_stats(struct cifs_tcon *tcon)
{
-#ifdef CONFIG_CIFS_STATS
atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
@@ -646,13 +645,11 @@ cifs_clear_stats(struct cifs_tcon *tcon)
atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
-#endif
}
static void
cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
{
-#ifdef CONFIG_CIFS_STATS
seq_printf(m, " Oplocks breaks: %d",
atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
seq_printf(m, "\nReads: %d Bytes: %llu",
@@ -684,7 +681,6 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
atomic_read(&tcon->stats.cifs_stats.num_ffirst),
atomic_read(&tcon->stats.cifs_stats.num_fnext),
atomic_read(&tcon->stats.cifs_stats.num_fclose));
-#endif
}
static void
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index d01ad706d7fc..1eef1791d0c4 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -120,7 +120,9 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
break;
}
- if (use_cached_root_handle == false)
+ if (use_cached_root_handle)
+ close_shroot(&tcon->crfid);
+ else
rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
if (tmprc)
rc = tmprc;
@@ -281,7 +283,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
int rc;
if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
- (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
+ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
(buf->Attributes == 0))
return 0; /* would be a no op, no sense sending this */
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 3ff7cec2da81..303d4592ebe7 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -93,7 +93,6 @@ static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
/* SMB2_OPLOCK_BREAK */ cpu_to_le16(24)
};
-#ifdef CONFIG_CIFS_SMB311
static __u32 get_neg_ctxt_len(struct smb2_sync_hdr *hdr, __u32 len,
__u32 non_ctxlen)
{
@@ -127,7 +126,6 @@ static __u32 get_neg_ctxt_len(struct smb2_sync_hdr *hdr, __u32 len,
/* length of negcontexts including pad from end of sec blob to them */
return (len - nc_offset) + size_of_pad_before_neg_ctxts;
}
-#endif /* CIFS_SMB311 */
int
smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
@@ -222,10 +220,9 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
clc_len = smb2_calc_size(buf, srvr);
-#ifdef CONFIG_CIFS_SMB311
if (shdr->Command == SMB2_NEGOTIATE)
clc_len += get_neg_ctxt_len(shdr, len, clc_len);
-#endif /* SMB311 */
+
if (len != clc_len) {
cifs_dbg(FYI, "Calculated size %u length %u mismatch mid %llu\n",
clc_len, len, mid);
@@ -451,15 +448,13 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
/* Windows doesn't allow paths beginning with \ */
if (from[0] == '\\')
start_of_path = from + 1;
-#ifdef CONFIG_CIFS_SMB311
+
/* SMB311 POSIX extensions paths do not include leading slash */
else if (cifs_sb_master_tlink(cifs_sb) &&
cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
(from[0] == '/')) {
start_of_path = from + 1;
- }
-#endif /* 311 */
- else
+ } else
start_of_path = from;
to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
@@ -759,7 +754,6 @@ smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
return 0;
}
-#ifdef CONFIG_CIFS_SMB311
/**
* smb311_update_preauth_hash - update @ses hash with the packet data in @iov
*
@@ -821,4 +815,3 @@ smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec)
return 0;
}
-#endif
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index ea92a38b2f08..541258447c4c 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -444,7 +444,11 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
NULL /* no data input */, 0 /* no data input */,
(char **)&out_buf, &ret_data_len);
- if (rc != 0) {
+ if (rc == -EOPNOTSUPP) {
+ cifs_dbg(FYI,
+ "server does not support query network interfaces\n");
+ goto out;
+ } else if (rc != 0) {
cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
goto out;
}
@@ -466,21 +470,36 @@ out:
return rc;
}
-void
-smb2_cached_lease_break(struct work_struct *work)
+static void
+smb2_close_cached_fid(struct kref *ref)
{
- struct cached_fid *cfid = container_of(work,
- struct cached_fid, lease_break);
- mutex_lock(&cfid->fid_mutex);
+ struct cached_fid *cfid = container_of(ref, struct cached_fid,
+ refcount);
+
if (cfid->is_valid) {
cifs_dbg(FYI, "clear cached root file handle\n");
SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
cfid->fid->volatile_fid);
cfid->is_valid = false;
}
+}
+
+void close_shroot(struct cached_fid *cfid)
+{
+ mutex_lock(&cfid->fid_mutex);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
mutex_unlock(&cfid->fid_mutex);
}
+void
+smb2_cached_lease_break(struct work_struct *work)
+{
+ struct cached_fid *cfid = container_of(work,
+ struct cached_fid, lease_break);
+
+ close_shroot(cfid);
+}
+
/*
* Open the directory at the root of a share
*/
@@ -495,6 +514,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
if (tcon->crfid.is_valid) {
cifs_dbg(FYI, "found a cached root file handle\n");
memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+ kref_get(&tcon->crfid.refcount);
mutex_unlock(&tcon->crfid.fid_mutex);
return 0;
}
@@ -511,6 +531,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
tcon->crfid.tcon = tcon;
tcon->crfid.is_valid = true;
+ kref_init(&tcon->crfid.refcount);
+ kref_get(&tcon->crfid.refcount);
}
mutex_unlock(&tcon->crfid.fid_mutex);
return rc;
@@ -549,9 +571,14 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
FS_DEVICE_INFORMATION);
SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+ FS_VOLUME_INFORMATION);
+ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
if (no_cached_open)
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ else
+ close_shroot(&tcon->crfid);
+
return;
}
@@ -873,13 +900,11 @@ smb2_can_echo(struct TCP_Server_Info *server)
static void
smb2_clear_stats(struct cifs_tcon *tcon)
{
-#ifdef CONFIG_CIFS_STATS
int i;
for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
}
-#endif
}
static void
@@ -918,67 +943,58 @@ smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
static void
smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
{
-#ifdef CONFIG_CIFS_STATS
atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
- seq_printf(m, "\nNegotiates: %d sent %d failed",
- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
- seq_printf(m, "\nSessionSetups: %d sent %d failed",
- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
- seq_printf(m, "\nLogoffs: %d sent %d failed",
- atomic_read(&sent[SMB2_LOGOFF_HE]),
- atomic_read(&failed[SMB2_LOGOFF_HE]));
- seq_printf(m, "\nTreeConnects: %d sent %d failed",
+
+ /*
+ * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
+ * totals (requests sent) since those SMBs are per-session not per tcon
+ */
+ seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
+ (long long)(tcon->bytes_read),
+ (long long)(tcon->bytes_written));
+ seq_printf(m, "\nTreeConnects: %d total %d failed",
atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
- seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
+ seq_printf(m, "\nTreeDisconnects: %d total %d failed",
atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
- seq_printf(m, "\nCreates: %d sent %d failed",
+ seq_printf(m, "\nCreates: %d total %d failed",
atomic_read(&sent[SMB2_CREATE_HE]),
atomic_read(&failed[SMB2_CREATE_HE]));
- seq_printf(m, "\nCloses: %d sent %d failed",
+ seq_printf(m, "\nCloses: %d total %d failed",
atomic_read(&sent[SMB2_CLOSE_HE]),
atomic_read(&failed[SMB2_CLOSE_HE]));
- seq_printf(m, "\nFlushes: %d sent %d failed",
+ seq_printf(m, "\nFlushes: %d total %d failed",
atomic_read(&sent[SMB2_FLUSH_HE]),
atomic_read(&failed[SMB2_FLUSH_HE]));
- seq_printf(m, "\nReads: %d sent %d failed",
+ seq_printf(m, "\nReads: %d total %d failed",
atomic_read(&sent[SMB2_READ_HE]),
atomic_read(&failed[SMB2_READ_HE]));
- seq_printf(m, "\nWrites: %d sent %d failed",
+ seq_printf(m, "\nWrites: %d total %d failed",
atomic_read(&sent[SMB2_WRITE_HE]),
atomic_read(&failed[SMB2_WRITE_HE]));
- seq_printf(m, "\nLocks: %d sent %d failed",
+ seq_printf(m, "\nLocks: %d total %d failed",
atomic_read(&sent[SMB2_LOCK_HE]),
atomic_read(&failed[SMB2_LOCK_HE]));
- seq_printf(m, "\nIOCTLs: %d sent %d failed",
+ seq_printf(m, "\nIOCTLs: %d total %d failed",
atomic_read(&sent[SMB2_IOCTL_HE]),
atomic_read(&failed[SMB2_IOCTL_HE]));
- seq_printf(m, "\nCancels: %d sent %d failed",
- atomic_read(&sent[SMB2_CANCEL_HE]),
- atomic_read(&failed[SMB2_CANCEL_HE]));
- seq_printf(m, "\nEchos: %d sent %d failed",
- atomic_read(&sent[SMB2_ECHO_HE]),
- atomic_read(&failed[SMB2_ECHO_HE]));
- seq_printf(m, "\nQueryDirectories: %d sent %d failed",
+ seq_printf(m, "\nQueryDirectories: %d total %d failed",
atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
- seq_printf(m, "\nChangeNotifies: %d sent %d failed",
+ seq_printf(m, "\nChangeNotifies: %d total %d failed",
atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
- seq_printf(m, "\nQueryInfos: %d sent %d failed",
+ seq_printf(m, "\nQueryInfos: %d total %d failed",
atomic_read(&sent[SMB2_QUERY_INFO_HE]),
atomic_read(&failed[SMB2_QUERY_INFO_HE]));
- seq_printf(m, "\nSetInfos: %d sent %d failed",
+ seq_printf(m, "\nSetInfos: %d total %d failed",
atomic_read(&sent[SMB2_SET_INFO_HE]),
atomic_read(&failed[SMB2_SET_INFO_HE]));
seq_printf(m, "\nOplockBreaks: %d sent %d failed",
atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
-#endif
}
static void
@@ -1353,6 +1369,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
}
+/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
+#define GMT_TOKEN_SIZE 50
+
+/*
+ * Input buffer contains (empty) struct smb_snapshot array with size filled in
+ * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
+ */
static int
smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *cfile, void __user *ioc_buf)
@@ -1382,14 +1405,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
kfree(retbuf);
return rc;
}
- if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
- rc = -ERANGE;
- kfree(retbuf);
- return rc;
- }
- if (ret_data_len > snapshot_in.snapshot_array_size)
- ret_data_len = snapshot_in.snapshot_array_size;
+ /*
+ * Check for min size, ie not large enough to fit even one GMT
+ * token (snapshot). On the first ioctl some users may pass in
+ * smaller size (or zero) to simply get the size of the array
+ * so the user space caller can allocate sufficient memory
+ * and retry the ioctl again with larger array size sufficient
+ * to hold all of the snapshot GMT tokens on the second try.
+ */
+ if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
+ ret_data_len = sizeof(struct smb_snapshot_array);
+
+ /*
+ * We return struct SRV_SNAPSHOT_ARRAY, followed by
+ * the snapshot array (of 50 byte GMT tokens) each
+ * representing an available previous version of the data
+ */
+ if (ret_data_len > (snapshot_in.snapshot_array_size +
+ sizeof(struct smb_snapshot_array)))
+ ret_data_len = snapshot_in.snapshot_array_size +
+ sizeof(struct smb_snapshot_array);
if (copy_to_user(ioc_buf, retbuf, ret_data_len))
rc = -EFAULT;
@@ -1487,7 +1523,11 @@ smb2_is_session_expired(char *buf)
shdr->Status != STATUS_USER_SESSION_DELETED)
return false;
+ trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
+ le16_to_cpu(shdr->Command),
+ le64_to_cpu(shdr->MessageId));
cifs_dbg(FYI, "Session expired or deleted\n");
+
return true;
}
@@ -1504,16 +1544,140 @@ smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
CIFS_CACHE_READ(cinode) ? 1 : 0);
}
+static void
+smb2_set_related(struct smb_rqst *rqst)
+{
+ struct smb2_sync_hdr *shdr;
+
+ shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
+ shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
+}
+
+char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
+
+static void
+smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+{
+ struct smb2_sync_hdr *shdr;
+ unsigned long len = smb_rqst_len(server, rqst);
+
+ /* SMB headers in a compound are 8 byte aligned. */
+ if (len & 7) {
+ rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
+ rqst->rq_iov[rqst->rq_nvec].iov_len = 8 - (len & 7);
+ rqst->rq_nvec++;
+ len = smb_rqst_len(server, rqst);
+ }
+
+ shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
+ shdr->NextCommand = cpu_to_le32(len);
+}
+
static int
smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
struct kstatfs *buf)
{
+ struct smb2_query_info_rsp *rsp;
+ struct smb2_fs_full_size_info *info = NULL;
+ struct smb_rqst rqst[3];
+ int resp_buftype[3];
+ struct kvec rsp_iov[3];
+ struct kvec open_iov[5]; /* 4 + potential padding. */
+ struct kvec qi_iov[1];
+ struct kvec close_iov[1];
+ struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = ses->server;
+ __le16 srch_path = 0; /* Null - open root of share */
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ struct cifs_open_parms oparms;
+ struct cifs_fid fid;
+ int flags = 0;
+ int rc;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(rqst, 0, sizeof(rqst));
+ memset(resp_buftype, 0, sizeof(resp_buftype));
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ memset(&open_iov, 0, sizeof(open_iov));
+ rqst[0].rq_iov = open_iov;
+ rqst[0].rq_nvec = 4;
+
+ oparms.tcon = tcon;
+ oparms.desired_access = FILE_READ_ATTRIBUTES;
+ oparms.disposition = FILE_OPEN;
+ oparms.create_options = 0;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &srch_path);
+ if (rc)
+ goto qfs_exit;
+ smb2_set_next_command(server, &rqst[0]);
+
+ memset(&qi_iov, 0, sizeof(qi_iov));
+ rqst[1].rq_iov = qi_iov;
+ rqst[1].rq_nvec = 1;
+
+ rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+ FS_FULL_SIZE_INFORMATION,
+ SMB2_O_INFO_FILESYSTEM, 0,
+ sizeof(struct smb2_fs_full_size_info));
+ if (rc)
+ goto qfs_exit;
+ smb2_set_next_command(server, &rqst[1]);
+ smb2_set_related(&rqst[1]);
+
+ memset(&close_iov, 0, sizeof(close_iov));
+ rqst[2].rq_iov = close_iov;
+ rqst[2].rq_nvec = 1;
+
+ rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
+ if (rc)
+ goto qfs_exit;
+ smb2_set_related(&rqst[2]);
+
+ rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ resp_buftype, rsp_iov);
+ if (rc)
+ goto qfs_exit;
+
+ rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ buf->f_type = SMB2_MAGIC_NUMBER;
+ info = (struct smb2_fs_full_size_info *)(
+ le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
+ rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength),
+ &rsp_iov[1],
+ sizeof(struct smb2_fs_full_size_info));
+ if (!rc)
+ smb2_copy_fs_info_to_kstatfs(info, buf);
+
+qfs_exit:
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ SMB2_close_free(&rqst[2]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+ return rc;
+}
+
+static int
+smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+ struct kstatfs *buf)
+{
int rc;
__le16 srch_path = 0; /* Null - open root of share */
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
struct cifs_fid fid;
+ if (!tcon->posix_extensions)
+ return smb2_queryfs(xid, tcon, buf);
+
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
@@ -1524,9 +1688,10 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
if (rc)
return rc;
+
+ rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid, buf);
buf->f_type = SMB2_MAGIC_NUMBER;
- rc = SMB2_QFS_info(xid, tcon, fid.persistent_fid, fid.volatile_fid,
- buf);
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
return rc;
}
@@ -1700,7 +1865,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
&resp_buftype);
if (!rc || !err_iov.iov_base) {
rc = -ENOENT;
- goto querty_exit;
+ goto free_path;
}
err_buf = err_iov.iov_base;
@@ -1741,6 +1906,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
querty_exit:
free_rsp_buf(resp_buftype, err_buf);
+ free_path:
kfree(utf16_path);
return rc;
}
@@ -2326,35 +2492,51 @@ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
}
-/* Assumes:
- * rqst->rq_iov[0] is transform header
- * rqst->rq_iov[1+] data to be encrypted/decrypted
+/* Assumes the first rqst has a transform header as the first iov.
+ * I.e.
+ * rqst[0].rq_iov[0] is transform header
+ * rqst[0].rq_iov[1+] data to be encrypted/decrypted
+ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
*/
static struct scatterlist *
-init_sg(struct smb_rqst *rqst, u8 *sign)
+init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
{
- unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1;
- unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
+ unsigned int sg_len;
struct scatterlist *sg;
unsigned int i;
unsigned int j;
+ unsigned int idx = 0;
+ int skip;
+
+ sg_len = 1;
+ for (i = 0; i < num_rqst; i++)
+ sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
if (!sg)
return NULL;
sg_init_table(sg, sg_len);
- smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 20, assoc_data_len);
- for (i = 1; i < rqst->rq_nvec; i++)
- smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
- rqst->rq_iov[i].iov_len);
- for (j = 0; i < sg_len - 1; i++, j++) {
- unsigned int len, offset;
+ for (i = 0; i < num_rqst; i++) {
+ for (j = 0; j < rqst[i].rq_nvec; j++) {
+ /*
+ * The first rqst has a transform header where the
+ * first 20 bytes are not part of the encrypted blob
+ */
+ skip = (i == 0) && (j == 0) ? 20 : 0;
+ smb2_sg_set_buf(&sg[idx++],
+ rqst[i].rq_iov[j].iov_base + skip,
+ rqst[i].rq_iov[j].iov_len - skip);
+ }
- rqst_page_get_length(rqst, j, &len, &offset);
- sg_set_page(&sg[i], rqst->rq_pages[j], len, offset);
+ for (j = 0; j < rqst[i].rq_npages; j++) {
+ unsigned int len, offset;
+
+ rqst_page_get_length(&rqst[i], j, &len, &offset);
+ sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
+ }
}
- smb2_sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE);
+ smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
return sg;
}
@@ -2386,10 +2568,11 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
* untouched.
*/
static int
-crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
+crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *rqst, int enc)
{
struct smb2_transform_hdr *tr_hdr =
- (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
+ (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int rc = 0;
struct scatterlist *sg;
@@ -2440,7 +2623,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
crypt_len += SMB2_SIGNATURE_SIZE;
}
- sg = init_sg(rqst, sign);
+ sg = init_sg(num_rqst, rqst, sign);
if (!sg) {
cifs_dbg(VFS, "%s: Failed to init sg", __func__);
rc = -ENOMEM;
@@ -2477,103 +2660,98 @@ free_req:
return rc;
}
+void
+smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
+{
+ int i, j;
+
+ for (i = 0; i < num_rqst; i++) {
+ if (rqst[i].rq_pages) {
+ for (j = rqst[i].rq_npages - 1; j >= 0; j--)
+ put_page(rqst[i].rq_pages[j]);
+ kfree(rqst[i].rq_pages);
+ }
+ }
+}
+
+/*
+ * This function will initialize new_rq and encrypt the content.
+ * The first entry, new_rq[0], only contains a single iov which contains
+ * a smb2_transform_hdr and is pre-allocated by the caller.
+ * This function then populates new_rq[1+] with the content from olq_rq[0+].
+ *
+ * The end result is an array of smb_rqst structures where the first structure
+ * only contains a single iov for the transform header which we then can pass
+ * to crypt_message().
+ *
+ * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
+ * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
+ */
static int
-smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
- struct smb_rqst *old_rq)
+smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *new_rq, struct smb_rqst *old_rq)
{
- struct kvec *iov;
struct page **pages;
- struct smb2_transform_hdr *tr_hdr;
- unsigned int npages = old_rq->rq_npages;
- unsigned int orig_len;
- int i;
+ struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
+ unsigned int npages;
+ unsigned int orig_len = 0;
+ int i, j;
int rc = -ENOMEM;
- pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- return rc;
-
- new_rq->rq_pages = pages;
- new_rq->rq_offset = old_rq->rq_offset;
- new_rq->rq_npages = old_rq->rq_npages;
- new_rq->rq_pagesz = old_rq->rq_pagesz;
- new_rq->rq_tailsz = old_rq->rq_tailsz;
-
- for (i = 0; i < npages; i++) {
- pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
- if (!pages[i])
- goto err_free_pages;
- }
-
- iov = kmalloc_array(old_rq->rq_nvec + 1, sizeof(struct kvec),
- GFP_KERNEL);
- if (!iov)
- goto err_free_pages;
+ for (i = 1; i < num_rqst; i++) {
+ npages = old_rq[i - 1].rq_npages;
+ pages = kmalloc_array(npages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages)
+ goto err_free;
+
+ new_rq[i].rq_pages = pages;
+ new_rq[i].rq_npages = npages;
+ new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
+ new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
+ new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
+ new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
+ new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
+
+ orig_len += smb_rqst_len(server, &old_rq[i - 1]);
+
+ for (j = 0; j < npages; j++) {
+ pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+ if (!pages[j])
+ goto err_free;
+ }
- /* copy all iovs from the old */
- memcpy(&iov[1], &old_rq->rq_iov[0],
- sizeof(struct kvec) * old_rq->rq_nvec);
+ /* copy pages form the old */
+ for (j = 0; j < npages; j++) {
+ char *dst, *src;
+ unsigned int offset, len;
- new_rq->rq_iov = iov;
- new_rq->rq_nvec = old_rq->rq_nvec + 1;
+ rqst_page_get_length(&new_rq[i], j, &len, &offset);
- tr_hdr = kmalloc(sizeof(struct smb2_transform_hdr), GFP_KERNEL);
- if (!tr_hdr)
- goto err_free_iov;
+ dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
+ src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
- orig_len = smb_rqst_len(server, old_rq);
+ memcpy(dst, src, len);
+ kunmap(new_rq[i].rq_pages[j]);
+ kunmap(old_rq[i - 1].rq_pages[j]);
+ }
+ }
- /* fill the 2nd iov with a transform header */
+ /* fill the 1st iov with a transform header */
fill_transform_hdr(tr_hdr, orig_len, old_rq);
- new_rq->rq_iov[0].iov_base = tr_hdr;
- new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr);
-
- /* copy pages form the old */
- for (i = 0; i < npages; i++) {
- char *dst, *src;
- unsigned int offset, len;
-
- rqst_page_get_length(new_rq, i, &len, &offset);
-
- dst = (char *) kmap(new_rq->rq_pages[i]) + offset;
- src = (char *) kmap(old_rq->rq_pages[i]) + offset;
- memcpy(dst, src, len);
- kunmap(new_rq->rq_pages[i]);
- kunmap(old_rq->rq_pages[i]);
- }
-
- rc = crypt_message(server, new_rq, 1);
+ rc = crypt_message(server, num_rqst, new_rq, 1);
cifs_dbg(FYI, "encrypt message returned %d", rc);
if (rc)
- goto err_free_tr_hdr;
+ goto err_free;
return rc;
-err_free_tr_hdr:
- kfree(tr_hdr);
-err_free_iov:
- kfree(iov);
-err_free_pages:
- for (i = i - 1; i >= 0; i--)
- put_page(pages[i]);
- kfree(pages);
+err_free:
+ smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
return rc;
}
-static void
-smb3_free_transform_rq(struct smb_rqst *rqst)
-{
- int i = rqst->rq_npages - 1;
-
- for (; i >= 0; i--)
- put_page(rqst->rq_pages[i]);
- kfree(rqst->rq_pages);
- /* free transform header */
- kfree(rqst->rq_iov[0].iov_base);
- kfree(rqst->rq_iov);
-}
-
static int
smb3_is_transform_hdr(void *buf)
{
@@ -2603,7 +2781,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
rqst.rq_pagesz = PAGE_SIZE;
rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
- rc = crypt_message(server, &rqst, 0);
+ rc = crypt_message(server, 1, &rqst, 0);
cifs_dbg(FYI, "decrypt message returned %d\n", rc);
if (rc)
@@ -2878,13 +3056,20 @@ discard_data:
static int
receive_encrypted_standard(struct TCP_Server_Info *server,
- struct mid_q_entry **mid)
+ struct mid_q_entry **mids, char **bufs,
+ int *num_mids)
{
- int length;
+ int ret, length;
char *buf = server->smallbuf;
+ char *tmpbuf;
+ struct smb2_sync_hdr *shdr;
unsigned int pdu_length = server->pdu_size;
unsigned int buf_size;
struct mid_q_entry *mid_entry;
+ int next_is_large;
+ char *next_buffer = NULL;
+
+ *num_mids = 0;
/* switch to large buffer if too big for a small one */
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
@@ -2905,24 +3090,61 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
if (length)
return length;
+ next_is_large = server->large_buf;
+ one_more:
+ shdr = (struct smb2_sync_hdr *)buf;
+ if (shdr->NextCommand) {
+ if (next_is_large) {
+ tmpbuf = server->bigbuf;
+ next_buffer = (char *)cifs_buf_get();
+ } else {
+ tmpbuf = server->smallbuf;
+ next_buffer = (char *)cifs_small_buf_get();
+ }
+ memcpy(next_buffer,
+ tmpbuf + le32_to_cpu(shdr->NextCommand),
+ pdu_length - le32_to_cpu(shdr->NextCommand));
+ }
+
mid_entry = smb2_find_mid(server, buf);
if (mid_entry == NULL)
cifs_dbg(FYI, "mid not found\n");
else {
cifs_dbg(FYI, "mid found\n");
mid_entry->decrypted = true;
+ mid_entry->resp_buf_size = server->pdu_size;
}
- *mid = mid_entry;
+ if (*num_mids >= MAX_COMPOUND) {
+ cifs_dbg(VFS, "too many PDUs in compound\n");
+ return -1;
+ }
+ bufs[*num_mids] = buf;
+ mids[(*num_mids)++] = mid_entry;
if (mid_entry && mid_entry->handle)
- return mid_entry->handle(server, mid_entry);
+ ret = mid_entry->handle(server, mid_entry);
+ else
+ ret = cifs_handle_standard(server, mid_entry);
- return cifs_handle_standard(server, mid_entry);
+ if (ret == 0 && shdr->NextCommand) {
+ pdu_length -= le32_to_cpu(shdr->NextCommand);
+ server->large_buf = next_is_large;
+ if (next_is_large)
+ server->bigbuf = next_buffer;
+ else
+ server->smallbuf = next_buffer;
+
+ buf += le32_to_cpu(shdr->NextCommand);
+ goto one_more;
+ }
+
+ return ret;
}
static int
-smb3_receive_transform(struct TCP_Server_Info *server, struct mid_q_entry **mid)
+smb3_receive_transform(struct TCP_Server_Info *server,
+ struct mid_q_entry **mids, char **bufs, int *num_mids)
{
char *buf = server->smallbuf;
unsigned int pdu_length = server->pdu_size;
@@ -2945,10 +3167,11 @@ smb3_receive_transform(struct TCP_Server_Info *server, struct mid_q_entry **mid)
return -ECONNABORTED;
}
+ /* TODO: add support for compounds containing READ. */
if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
- return receive_encrypted_read(server, mid);
+ return receive_encrypted_read(server, &mids[0]);
- return receive_encrypted_standard(server, mid);
+ return receive_encrypted_standard(server, mids, bufs, num_mids);
}
int
@@ -3250,7 +3473,6 @@ struct smb_version_operations smb30_operations = {
.fallocate = smb3_fallocate,
.enum_snapshots = smb3_enum_snapshots,
.init_transform_rq = smb3_init_transform_rq,
- .free_transform_rq = smb3_free_transform_rq,
.is_transform_hdr = smb3_is_transform_hdr,
.receive_transform = smb3_receive_transform,
.get_dfs_refer = smb2_get_dfs_refer,
@@ -3267,7 +3489,6 @@ struct smb_version_operations smb30_operations = {
.next_header = smb2_next_header,
};
-#ifdef CONFIG_CIFS_SMB311
struct smb_version_operations smb311_operations = {
.compare_fids = smb2_compare_fids,
.setup_request = smb2_setup_request,
@@ -3335,7 +3556,7 @@ struct smb_version_operations smb311_operations = {
.is_status_pending = smb2_is_status_pending,
.is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
- .queryfs = smb2_queryfs,
+ .queryfs = smb311_queryfs,
.mand_lock = smb2_mand_lock,
.mand_unlock_range = smb2_unlock_range,
.push_mand_locks = smb2_push_mandatory_locks,
@@ -3357,7 +3578,6 @@ struct smb_version_operations smb311_operations = {
.fallocate = smb3_fallocate,
.enum_snapshots = smb3_enum_snapshots,
.init_transform_rq = smb3_init_transform_rq,
- .free_transform_rq = smb3_free_transform_rq,
.is_transform_hdr = smb3_is_transform_hdr,
.receive_transform = smb3_receive_transform,
.get_dfs_refer = smb2_get_dfs_refer,
@@ -3366,9 +3586,13 @@ struct smb_version_operations smb311_operations = {
.query_all_EAs = smb2_query_eas,
.set_EA = smb2_set_ea,
#endif /* CIFS_XATTR */
+#ifdef CONFIG_CIFS_ACL
+ .get_acl = get_smb2_acl,
+ .get_acl_by_fid = get_smb2_acl_by_fid,
+ .set_acl = set_smb2_acl,
+#endif /* CIFS_ACL */
.next_header = smb2_next_header,
};
-#endif /* CIFS_SMB311 */
struct smb_version_values smb20_values = {
.version_string = SMB20_VERSION_STRING,
@@ -3496,7 +3720,6 @@ struct smb_version_values smb302_values = {
.create_lease_size = sizeof(struct create_lease_v2),
};
-#ifdef CONFIG_CIFS_SMB311
struct smb_version_values smb311_values = {
.version_string = SMB311_VERSION_STRING,
.protocol_id = SMB311_PROT_ID,
@@ -3517,4 +3740,3 @@ struct smb_version_values smb311_values = {
.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
.create_lease_size = sizeof(struct create_lease_v2),
};
-#endif /* SMB311 */
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 3c92678cb45b..2f1938011395 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -80,7 +80,7 @@ static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
/* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
};
-static int smb3_encryption_required(const struct cifs_tcon *tcon)
+int smb3_encryption_required(const struct cifs_tcon *tcon)
{
if (!tcon)
return 0;
@@ -360,17 +360,15 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
total_len);
if (tcon != NULL) {
-#ifdef CONFIG_CIFS_STATS2
uint16_t com_code = le16_to_cpu(smb2_command);
cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
-#endif
cifs_stats_inc(&tcon->num_smbs_sent);
}
return rc;
}
-#ifdef CONFIG_CIFS_SMB311
+
/* offset is sizeof smb2_negotiate_req but rounded up to 8 bytes */
#define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) */
@@ -585,13 +583,6 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
return 0;
}
-#else
-static void assemble_neg_contexts(struct smb2_negotiate_req *req,
- unsigned int *total_len)
-{
- return;
-}
-#endif /* SMB311 */
/*
*
@@ -636,10 +627,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
return rc;
req->sync_hdr.SessionId = 0;
-#ifdef CONFIG_CIFS_SMB311
+
memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
-#endif
if (strcmp(ses->server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
@@ -741,10 +731,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
-#ifdef CONFIG_CIFS_SMB311
else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
-#endif /* SMB311 */
else {
cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
le16_to_cpu(rsp->DialectRevision));
@@ -753,9 +741,6 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
}
server->dialect = le16_to_cpu(rsp->DialectRevision);
- /* BB: add check that dialect was valid given dialect(s) we asked for */
-
-#ifdef CONFIG_CIFS_SMB311
/*
* Keep a copy of the hash after negprot. This hash will be
* the starting hash value for all sessions made from this
@@ -763,7 +748,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
*/
memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
-#endif
+
/* SMB2 only has an extended negflavor */
server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
/* set it to the maximum buffer size value we can send with 1 credit */
@@ -804,7 +789,6 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
rc = -EIO;
}
-#ifdef CONFIG_CIFS_SMB311
if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
if (rsp->NegotiateContextCount)
rc = smb311_decode_neg_context(rsp, server,
@@ -812,7 +796,6 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
else
cifs_dbg(VFS, "Missing expected negotiate contexts\n");
}
-#endif /* CONFIG_CIFS_SMB311 */
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
@@ -1373,13 +1356,11 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
sess_data->nls_cp = (struct nls_table *) nls_cp;
sess_data->previous_session = ses->Suid;
-#ifdef CONFIG_CIFS_SMB311
/*
* Initialize the session hash with the server one.
*/
memcpy(ses->preauth_sha_hash, ses->server->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
-#endif
while (sess_data->func)
sess_data->func(sess_data);
@@ -1875,6 +1856,51 @@ add_durable_context(struct kvec *iov, unsigned int *num_iovec,
return 0;
}
+/* See MS-SMB2 2.2.13.2.7 */
+static struct crt_twarp_ctxt *
+create_twarp_buf(__u64 timewarp)
+{
+ struct crt_twarp_ctxt *buf;
+
+ buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->ccontext.DataOffset = cpu_to_le16(offsetof
+ (struct crt_twarp_ctxt, Timestamp));
+ buf->ccontext.DataLength = cpu_to_le32(8);
+ buf->ccontext.NameOffset = cpu_to_le16(offsetof
+ (struct crt_twarp_ctxt, Name));
+ buf->ccontext.NameLength = cpu_to_le16(4);
+ /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
+ buf->Name[0] = 'T';
+ buf->Name[1] = 'W';
+ buf->Name[2] = 'r';
+ buf->Name[3] = 'p';
+ buf->Timestamp = cpu_to_le64(timewarp);
+ return buf;
+}
+
+/* See MS-SMB2 2.2.13.2.7 */
+static int
+add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
+{
+ struct smb2_create_req *req = iov[0].iov_base;
+ unsigned int num = *num_iovec;
+
+ iov[num].iov_base = create_twarp_buf(timewarp);
+ if (iov[num].iov_base == NULL)
+ return -ENOMEM;
+ iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
+ if (!req->CreateContextsOffset)
+ req->CreateContextsOffset = cpu_to_le32(
+ sizeof(struct smb2_create_req) +
+ iov[num - 1].iov_len);
+ le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
+ *num_iovec = num + 1;
+ return 0;
+}
+
static int
alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
const char *treename, const __le16 *path)
@@ -1920,7 +1946,6 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
return 0;
}
-#ifdef CONFIG_CIFS_SMB311
int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
umode_t mode, struct cifs_tcon *tcon,
const char *full_path,
@@ -1928,7 +1953,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
{
struct smb_rqst rqst;
struct smb2_create_req *req;
- struct smb2_create_rsp *rsp;
+ struct smb2_create_rsp *rsp = NULL;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[3]; /* make sure at least one for each open context */
@@ -1943,27 +1968,31 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
char *pc_buf = NULL;
int flags = 0;
unsigned int total_len;
- __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
-
- if (!path)
- return -ENOMEM;
+ __le16 *utf16_path = NULL;
cifs_dbg(FYI, "mkdir\n");
+ /* resource #1: path allocation */
+ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
+
if (ses && (ses->server))
server = ses->server;
- else
- return -EIO;
+ else {
+ rc = -EIO;
+ goto err_free_path;
+ }
+ /* resource #2: request */
rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
-
if (rc)
- return rc;
+ goto err_free_path;
+
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
-
req->ImpersonationLevel = IL_IMPERSONATION;
req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
/* File attributes ignored on open (used in create though) */
@@ -1992,50 +2021,44 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
&name_len,
- tcon->treeName, path);
- if (rc) {
- cifs_small_buf_release(req);
- return rc;
- }
+ tcon->treeName, utf16_path);
+ if (rc)
+ goto err_free_req;
+
req->NameLength = cpu_to_le16(name_len * 2);
uni_path_len = copy_size;
- path = copy_path;
+ /* free before overwriting resource */
+ kfree(utf16_path);
+ utf16_path = copy_path;
} else {
- uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+ uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
/* MUST set path len (NameLength) to 0 opening root of share */
req->NameLength = cpu_to_le16(uni_path_len - 2);
if (uni_path_len % 8 != 0) {
copy_size = roundup(uni_path_len, 8);
copy_path = kzalloc(copy_size, GFP_KERNEL);
if (!copy_path) {
- cifs_small_buf_release(req);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_free_req;
}
- memcpy((char *)copy_path, (const char *)path,
+ memcpy((char *)copy_path, (const char *)utf16_path,
uni_path_len);
uni_path_len = copy_size;
- path = copy_path;
+ /* free before overwriting resource */
+ kfree(utf16_path);
+ utf16_path = copy_path;
}
}
iov[1].iov_len = uni_path_len;
- iov[1].iov_base = path;
+ iov[1].iov_base = utf16_path;
req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
if (tcon->posix_extensions) {
- if (n_iov > 2) {
- struct create_context *ccontext =
- (struct create_context *)iov[n_iov-1].iov_base;
- ccontext->Next =
- cpu_to_le32(iov[n_iov-1].iov_len);
- }
-
+ /* resource #3: posix buf */
rc = add_posix_context(iov, &n_iov, mode);
- if (rc) {
- cifs_small_buf_release(req);
- kfree(copy_path);
- return rc;
- }
+ if (rc)
+ goto err_free_req;
pc_buf = iov[n_iov-1].iov_base;
}
@@ -2044,73 +2067,57 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
rqst.rq_iov = iov;
rqst.rq_nvec = n_iov;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
- &rsp_iov);
-
- cifs_small_buf_release(req);
- rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
-
- if (rc != 0) {
+ /* resource #4: response buffer */
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ if (rc) {
cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
- CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
- goto smb311_mkdir_exit;
- } else
- trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
- ses->Suid, CREATE_NOT_FILE,
- FILE_WRITE_ATTRIBUTES);
+ CREATE_NOT_FILE,
+ FILE_WRITE_ATTRIBUTES, rc);
+ goto err_free_rsp_buf;
+ }
+
+ rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+ trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+ ses->Suid, CREATE_NOT_FILE,
+ FILE_WRITE_ATTRIBUTES);
SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
/* Eventually save off posix specific response info and timestaps */
-smb311_mkdir_exit:
- kfree(copy_path);
- kfree(pc_buf);
+err_free_rsp_buf:
free_rsp_buf(resp_buftype, rsp);
+ kfree(pc_buf);
+err_free_req:
+ cifs_small_buf_release(req);
+err_free_path:
+ kfree(utf16_path);
return rc;
-
}
-#endif /* SMB311 */
int
-SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
- __u8 *oplock, struct smb2_file_all_info *buf,
- struct kvec *err_iov, int *buftype)
+SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
+ struct cifs_open_parms *oparms, __le16 *path)
{
- struct smb_rqst rqst;
+ struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_create_req *req;
- struct smb2_create_rsp *rsp;
- struct TCP_Server_Info *server;
- struct cifs_tcon *tcon = oparms->tcon;
- struct cifs_ses *ses = tcon->ses;
- struct kvec iov[5]; /* make sure at least one for each open context */
- struct kvec rsp_iov = {NULL, 0};
- int resp_buftype;
- int uni_path_len;
- __le16 *copy_path = NULL;
- int copy_size;
- int rc = 0;
unsigned int n_iov = 2;
__u32 file_attributes = 0;
- char *dhc_buf = NULL, *lc_buf = NULL, *pc_buf = NULL;
- int flags = 0;
+ int copy_size;
+ int uni_path_len;
unsigned int total_len;
-
- cifs_dbg(FYI, "create/open\n");
-
- if (ses && (ses->server))
- server = ses->server;
- else
- return -EIO;
+ struct kvec *iov = rqst->rq_iov;
+ __le16 *copy_path;
+ int rc;
rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
-
if (rc)
return rc;
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
+ iov[0].iov_base = (char *)req;
+ /* -1 since last byte is buf[0] which is sent below (path) */
+ iov[0].iov_len = total_len - 1;
if (oparms->create_options & CREATE_OPTION_READONLY)
file_attributes |= ATTR_READONLY;
@@ -2124,11 +2131,6 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
req->ShareAccess = FILE_SHARE_ALL_LE;
req->CreateDisposition = cpu_to_le32(oparms->disposition);
req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
-
- iov[0].iov_base = (char *)req;
- /* -1 since last byte is buf[0] which is sent below (path) */
- iov[0].iov_len = total_len - 1;
-
req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
/* [MS-SMB2] 2.2.13 NameOffset:
@@ -2146,10 +2148,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
&name_len,
tcon->treeName, path);
- if (rc) {
- cifs_small_buf_release(req);
+ if (rc)
return rc;
- }
req->NameLength = cpu_to_le16(name_len * 2);
uni_path_len = copy_size;
path = copy_path;
@@ -2157,18 +2157,16 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
/* MUST set path len (NameLength) to 0 opening root of share */
req->NameLength = cpu_to_le16(uni_path_len - 2);
- if (uni_path_len % 8 != 0) {
- copy_size = roundup(uni_path_len, 8);
- copy_path = kzalloc(copy_size, GFP_KERNEL);
- if (!copy_path) {
- cifs_small_buf_release(req);
- return -ENOMEM;
- }
- memcpy((char *)copy_path, (const char *)path,
- uni_path_len);
- uni_path_len = copy_size;
- path = copy_path;
- }
+ copy_size = uni_path_len;
+ if (copy_size % 8 != 0)
+ copy_size = roundup(copy_size, 8);
+ copy_path = kzalloc(copy_size, GFP_KERNEL);
+ if (!copy_path)
+ return -ENOMEM;
+ memcpy((char *)copy_path, (const char *)path,
+ uni_path_len);
+ uni_path_len = copy_size;
+ path = copy_path;
}
iov[1].iov_len = uni_path_len;
@@ -2183,12 +2181,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
else {
rc = add_lease_context(server, iov, &n_iov,
oparms->fid->lease_key, oplock);
- if (rc) {
- cifs_small_buf_release(req);
- kfree(copy_path);
+ if (rc)
return rc;
- }
- lc_buf = iov[n_iov-1].iov_base;
}
if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
@@ -2202,16 +2196,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
rc = add_durable_context(iov, &n_iov, oparms,
tcon->use_persistent);
- if (rc) {
- cifs_small_buf_release(req);
- kfree(copy_path);
- kfree(lc_buf);
+ if (rc)
return rc;
- }
- dhc_buf = iov[n_iov-1].iov_base;
}
-#ifdef CONFIG_CIFS_SMB311
if (tcon->posix_extensions) {
if (n_iov > 2) {
struct create_context *ccontext =
@@ -2221,24 +2209,79 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
}
rc = add_posix_context(iov, &n_iov, oparms->mode);
- if (rc) {
- cifs_small_buf_release(req);
- kfree(copy_path);
- kfree(lc_buf);
- kfree(dhc_buf);
+ if (rc)
return rc;
+ }
+
+ if (tcon->snapshot_time) {
+ cifs_dbg(FYI, "adding snapshot context\n");
+ if (n_iov > 2) {
+ struct create_context *ccontext =
+ (struct create_context *)iov[n_iov-1].iov_base;
+ ccontext->Next =
+ cpu_to_le32(iov[n_iov-1].iov_len);
}
- pc_buf = iov[n_iov-1].iov_base;
+
+ rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
+ if (rc)
+ return rc;
}
-#endif /* SMB311 */
+
+
+ rqst->rq_nvec = n_iov;
+ return 0;
+}
+
+/* rq_iov[0] is the request and is released by cifs_small_buf_release().
+ * All other vectors are freed by kfree().
+ */
+void
+SMB2_open_free(struct smb_rqst *rqst)
+{
+ int i;
+
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base);
+ for (i = 1; i < rqst->rq_nvec; i++)
+ if (rqst->rq_iov[i].iov_base != smb2_padding)
+ kfree(rqst->rq_iov[i].iov_base);
+}
+
+int
+SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ __u8 *oplock, struct smb2_file_all_info *buf,
+ struct kvec *err_iov, int *buftype)
+{
+ struct smb_rqst rqst;
+ struct smb2_create_rsp *rsp = NULL;
+ struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon = oparms->tcon;
+ struct cifs_ses *ses = tcon->ses;
+ struct kvec iov[5]; /* make sure at least one for each open context */
+ struct kvec rsp_iov = {NULL, 0};
+ int resp_buftype;
+ int rc = 0;
+ int flags = 0;
+
+ cifs_dbg(FYI, "create/open\n");
+ if (ses && (ses->server))
+ server = ses->server;
+ else
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
- rqst.rq_nvec = n_iov;
+ rqst.rq_nvec = 5;
+
+ rc = SMB2_open_init(tcon, &rqst, oplock, oparms, path);
+ if (rc)
+ goto creat_exit;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
- cifs_small_buf_release(req);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
if (rc != 0) {
@@ -2275,10 +2318,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
else
*oplock = rsp->OplockLevel;
creat_exit:
- kfree(copy_path);
- kfree(lc_buf);
- kfree(dhc_buf);
- kfree(pc_buf);
+ SMB2_open_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
@@ -2469,43 +2509,62 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
}
int
+SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid)
+{
+ struct smb2_close_req *req;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned int total_len;
+ int rc;
+
+ rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
+ if (rc)
+ return rc;
+
+ req->PersistentFileId = persistent_fid;
+ req->VolatileFileId = volatile_fid;
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+
+ return 0;
+}
+
+void
+SMB2_close_free(struct smb_rqst *rqst)
+{
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+}
+
+int
SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int flags)
{
struct smb_rqst rqst;
- struct smb2_close_req *req;
- struct smb2_close_rsp *rsp;
+ struct smb2_close_rsp *rsp = NULL;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buftype;
int rc = 0;
- unsigned int total_len;
cifs_dbg(FYI, "Close\n");
if (!ses || !(ses->server))
return -EIO;
- rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
- if (rc)
- return rc;
-
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->PersistentFileId = persistent_fid;
- req->VolatileFileId = volatile_fid;
-
- iov[0].iov_base = (char *)req;
- iov[0].iov_len = total_len;
-
memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
+ rc = SMB2_close_init(tcon, &rqst, persistent_fid, volatile_fid);
+ if (rc)
+ goto close_exit;
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(req);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
if (rc != 0) {
@@ -2518,6 +2577,7 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
/* BB FIXME - decode close response, update inode for caching */
close_exit:
+ SMB2_close_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
@@ -2529,9 +2589,9 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
}
-static int
-validate_iov(unsigned int offset, unsigned int buffer_length,
- struct kvec *iov, unsigned int min_buf_size)
+int
+smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
+ struct kvec *iov, unsigned int min_buf_size)
{
unsigned int smb_len = iov->iov_len;
char *end_of_smb = smb_len + (char *)iov->iov_base;
@@ -2575,7 +2635,7 @@ validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
if (!data)
return -EINVAL;
- rc = validate_iov(offset, buffer_length, iov, minbufsize);
+ rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
if (rc)
return rc;
@@ -2584,36 +2644,22 @@ validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
return 0;
}
-static int
-query_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
- u32 additional_info, size_t output_len, size_t min_len, void **data,
- u32 *dlen)
+int
+SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid,
+ u8 info_class, u8 info_type, u32 additional_info,
+ size_t output_len)
{
- struct smb_rqst rqst;
struct smb2_query_info_req *req;
- struct smb2_query_info_rsp *rsp = NULL;
- struct kvec iov[2];
- struct kvec rsp_iov;
- int rc = 0;
- int resp_buftype;
- struct cifs_ses *ses = tcon->ses;
- int flags = 0;
+ struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
-
- cifs_dbg(FYI, "Query Info\n");
-
- if (!ses || !(ses->server))
- return -EIO;
+ int rc;
rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
-
req->InfoType = info_type;
req->FileInfoClass = info_class;
req->PersistentFileId = persistent_fid;
@@ -2630,13 +2676,50 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
+ return 0;
+}
+
+void
+SMB2_query_info_free(struct smb_rqst *rqst)
+{
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+}
+
+static int
+query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
+ u32 additional_info, size_t output_len, size_t min_len, void **data,
+ u32 *dlen)
+{
+ struct smb_rqst rqst;
+ struct smb2_query_info_rsp *rsp = NULL;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int rc = 0;
+ int resp_buftype;
+ struct cifs_ses *ses = tcon->ses;
+ int flags = 0;
+
+ cifs_dbg(FYI, "Query Info\n");
+
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
+ rc = SMB2_query_info_init(tcon, &rqst, persistent_fid, volatile_fid,
+ info_class, info_type, additional_info,
+ output_len);
+ if (rc)
+ goto qinf_exit;
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(req);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -2665,6 +2748,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
&rsp_iov, min_len, *data);
qinf_exit:
+ SMB2_query_info_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
@@ -3623,9 +3707,9 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
goto qdir_exit;
}
- rc = validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
- le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
- info_buf_size);
+ rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
+ info_buf_size);
if (rc)
goto qdir_exit;
@@ -3927,9 +4011,9 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
-static void
-copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
- struct kstatfs *kst)
+void
+smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
+ struct kstatfs *kst)
{
kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
@@ -3939,6 +4023,25 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
return;
}
+static void
+copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
+ struct kstatfs *kst)
+{
+ kst->f_bsize = le32_to_cpu(response_data->BlockSize);
+ kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
+ kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
+ if (response_data->UserBlocksAvail == cpu_to_le64(-1))
+ kst->f_bavail = kst->f_bfree;
+ else
+ kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
+ if (response_data->TotalFileNodes != cpu_to_le64(-1))
+ kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
+ if (response_data->FreeFileNodes != cpu_to_le64(-1))
+ kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
+
+ return;
+}
+
static int
build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
int outbuf_len, u64 persistent_fid, u64 volatile_fid)
@@ -3976,6 +4079,54 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
}
int
+SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
+{
+ struct smb_rqst rqst;
+ struct smb2_query_info_rsp *rsp = NULL;
+ struct kvec iov;
+ struct kvec rsp_iov;
+ int rc = 0;
+ int resp_buftype;
+ struct cifs_ses *ses = tcon->ses;
+ FILE_SYSTEM_POSIX_INFO *info = NULL;
+ int flags = 0;
+
+ rc = build_qfs_info_req(&iov, tcon, FS_POSIX_INFORMATION,
+ sizeof(FILE_SYSTEM_POSIX_INFO),
+ persistent_fid, volatile_fid);
+ if (rc)
+ return rc;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = &iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ cifs_small_buf_release(iov.iov_base);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto posix_qfsinf_exit;
+ }
+ rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
+
+ info = (FILE_SYSTEM_POSIX_INFO *)(
+ le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
+ rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
+ sizeof(FILE_SYSTEM_POSIX_INFO));
+ if (!rc)
+ copy_posix_fs_info_to_kstatfs(info, fsdata);
+
+posix_qfsinf_exit:
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+ return rc;
+}
+
+int
SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
{
@@ -4012,11 +4163,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
info = (struct smb2_fs_full_size_info *)(
le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
- rc = validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
- le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
- sizeof(struct smb2_fs_full_size_info));
+ rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
+ sizeof(struct smb2_fs_full_size_info));
if (!rc)
- copy_fs_info_to_kstatfs(info, fsdata);
+ smb2_copy_fs_info_to_kstatfs(info, fsdata);
qfsinf_exit:
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
@@ -4046,6 +4197,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
} else if (level == FS_SECTOR_SIZE_INFORMATION) {
max_len = sizeof(struct smb3_fs_ss_info);
min_len = sizeof(struct smb3_fs_ss_info);
+ } else if (level == FS_VOLUME_INFORMATION) {
+ max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
+ min_len = sizeof(struct smb3_fs_vol_info);
} else {
cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
return -EINVAL;
@@ -4073,7 +4227,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
rsp_len = le32_to_cpu(rsp->OutputBufferLength);
offset = le16_to_cpu(rsp->OutputBufferOffset);
- rc = validate_iov(offset, rsp_len, &rsp_iov, min_len);
+ rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
if (rc)
goto qfsattr_exit;
@@ -4090,6 +4244,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
tcon->ss_flags = le32_to_cpu(ss_info->Flags);
tcon->perf_sector_size =
le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
+ } else if (level == FS_VOLUME_INFORMATION) {
+ struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
+ (offset + (char *)rsp);
+ tcon->vol_serial_number = vol_info->VolumeSerialNumber;
+ tcon->vol_create_time = vol_info->VolumeCreationTime;
}
qfsattr_exit:
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index a671adcc44a6..a2eeae9e0432 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -153,6 +153,8 @@ struct smb2_transform_hdr {
*
*/
+#define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
+
#define SMB2_ERROR_STRUCTURE_SIZE2 cpu_to_le16(9)
struct smb2_err_rsp {
@@ -765,6 +767,14 @@ struct create_durable_handle_reconnect_v2 {
struct durable_reconnect_context_v2 dcontext;
} __packed;
+/* See MS-SMB2 2.2.13.2.5 */
+struct crt_twarp_ctxt {
+ struct create_context ccontext;
+ __u8 Name[8];
+ __le64 Timestamp;
+
+} __packed;
+
#define COPY_CHUNK_RES_KEY_SIZE 24
struct resume_key_req {
char ResumeKey[COPY_CHUNK_RES_KEY_SIZE];
@@ -1223,6 +1233,7 @@ struct smb2_lease_ack {
#define FS_DRIVER_PATH_INFORMATION 9 /* Local only */
#define FS_VOLUME_FLAGS_INFORMATION 10 /* Local only */
#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */
+#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */
struct smb2_fs_full_size_info {
__le64 TotalAllocationUnits;
@@ -1248,6 +1259,17 @@ struct smb3_fs_ss_info {
__le32 ByteOffsetForPartitionAlignment;
} __packed;
+/* volume info struct - see MS-FSCC 2.5.9 */
+#define MAX_VOL_LABEL_LEN 32
+struct smb3_fs_vol_info {
+ __le64 VolumeCreationTime;
+ __u32 VolumeSerialNumber;
+ __le32 VolumeLabelLength; /* includes trailing null */
+ __u8 SupportsObjects; /* True if eg like NTFS, supports objects */
+ __u8 Reserved;
+ __u8 VolumeLabel[0]; /* variable len */
+} __packed;
+
/* partial list of QUERY INFO levels */
#define FILE_DIRECTORY_INFORMATION 1
#define FILE_FULL_DIRECTORY_INFORMATION 2
@@ -1361,4 +1383,6 @@ struct smb2_file_eof_info { /* encoding of request for level 10 */
__le64 EndOfFile; /* new end of file value */
} __packed; /* level 20 Set */
+extern char smb2_padding[7];
+
#endif /* _SMB2PDU_H */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 6e6a4f2ec890..b4076577eeb7 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -68,6 +68,7 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
extern int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *pfid);
+extern void close_shroot(struct cached_fid *cfid);
extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
struct smb2_file_all_info *src);
extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
@@ -132,6 +133,10 @@ extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
__le16 *path, __u8 *oplock,
struct smb2_file_all_info *buf,
struct kvec *err_iov, int *resp_buftype);
+extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ __u8 *oplock, struct cifs_open_parms *oparms,
+ __le16 *path);
+extern void SMB2_open_free(struct smb_rqst *rqst);
extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen,
@@ -140,6 +145,9 @@ extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int flags);
+extern int SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_file_id, u64 volatile_file_id);
+extern void SMB2_close_free(struct smb_rqst *rqst);
extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
@@ -149,6 +157,11 @@ extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct smb2_file_all_info *data);
+extern int SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid,
+ u8 info_class, u8 info_type,
+ u32 additional_info, size_t output_len);
+extern void SMB2_query_info_free(struct smb_rqst *rqst);
extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
void **data, unsigned int *plen);
@@ -197,6 +210,9 @@ void smb2_cancelled_close_fid(struct work_struct *work);
extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct kstatfs *FSData);
+extern int SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_file_id, u64 volatile_file_id,
+ struct kstatfs *FSData);
extern int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id, int lvl);
extern int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
@@ -213,9 +229,13 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
-#ifdef CONFIG_CIFS_SMB311
+extern int smb3_encryption_required(const struct cifs_tcon *tcon);
+extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
+ struct kvec *iov, unsigned int min_buf_size);
+extern void smb2_copy_fs_info_to_kstatfs(
+ struct smb2_fs_full_size_info *pfs_inf,
+ struct kstatfs *kst);
extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
extern int smb311_update_preauth_hash(struct cifs_ses *ses,
struct kvec *iov, int nvec);
-#endif
#endif /* _SMB2PROTO_H */
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 719d55e63d88..7b351c65ee46 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -70,7 +70,6 @@ err:
return rc;
}
-#ifdef CONFIG_CIFS_SMB311
int
smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
{
@@ -98,7 +97,6 @@ err:
cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
return rc;
}
-#endif
static struct cifs_ses *
smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
@@ -173,7 +171,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
struct kvec *iov = rqst->rq_iov;
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
- struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash;
+ struct shash_desc *shash;
struct smb_rqst drqst;
ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -187,7 +185,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
rc = smb2_crypto_shash_allocate(server);
if (rc) {
- cifs_dbg(VFS, "%s: shah256 alloc failed\n", __func__);
+ cifs_dbg(VFS, "%s: sha256 alloc failed\n", __func__);
return rc;
}
@@ -198,6 +196,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
return rc;
}
+ shash = &server->secmech.sdeschmacsha256->shash;
rc = crypto_shash_init(shash);
if (rc) {
cifs_dbg(VFS, "%s: Could not init sha256", __func__);
@@ -395,7 +394,6 @@ generate_smb30signingkey(struct cifs_ses *ses)
return generate_smb3signingkey(ses, &triplet);
}
-#ifdef CONFIG_CIFS_SMB311
int
generate_smb311signingkey(struct cifs_ses *ses)
@@ -423,7 +421,6 @@ generate_smb311signingkey(struct cifs_ses *ses)
return generate_smb3signingkey(ses, &triplet);
}
-#endif /* 311 */
int
smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index 67e413f6ee4d..d4aed5217a56 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -281,6 +281,44 @@ DEFINE_EVENT(smb3_cmd_done_class, smb3_##name, \
TP_ARGS(tid, sesid, cmd, mid))
DEFINE_SMB3_CMD_DONE_EVENT(cmd_done);
+DEFINE_SMB3_CMD_DONE_EVENT(ses_expired);
+
+DECLARE_EVENT_CLASS(smb3_mid_class,
+ TP_PROTO(__u16 cmd,
+ __u64 mid,
+ __u32 pid,
+ unsigned long when_sent,
+ unsigned long when_received),
+ TP_ARGS(cmd, mid, pid, when_sent, when_received),
+ TP_STRUCT__entry(
+ __field(__u16, cmd)
+ __field(__u64, mid)
+ __field(__u32, pid)
+ __field(unsigned long, when_sent)
+ __field(unsigned long, when_received)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->mid = mid;
+ __entry->pid = pid;
+ __entry->when_sent = when_sent;
+ __entry->when_received = when_received;
+ ),
+ TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
+ __entry->cmd, __entry->mid, __entry->pid, __entry->when_sent,
+ __entry->when_received)
+)
+
+#define DEFINE_SMB3_MID_EVENT(name) \
+DEFINE_EVENT(smb3_mid_class, smb3_##name, \
+ TP_PROTO(__u16 cmd, \
+ __u64 mid, \
+ __u32 pid, \
+ unsigned long when_sent, \
+ unsigned long when_received), \
+ TP_ARGS(cmd, mid, pid, when_sent, when_received))
+
+DEFINE_SMB3_MID_EVENT(slow_rsp);
DECLARE_EVENT_CLASS(smb3_exit_err_class,
TP_PROTO(unsigned int xid,
@@ -422,6 +460,32 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name, \
DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
+DECLARE_EVENT_CLASS(smb3_reconnect_class,
+ TP_PROTO(__u64 currmid,
+ char *hostname),
+ TP_ARGS(currmid, hostname),
+ TP_STRUCT__entry(
+ __field(__u64, currmid)
+ __field(char *, hostname)
+ ),
+ TP_fast_assign(
+ __entry->currmid = currmid;
+ __entry->hostname = hostname;
+ ),
+ TP_printk("server=%s current_mid=0x%llx",
+ __entry->hostname,
+ __entry->currmid)
+)
+
+#define DEFINE_SMB3_RECONNECT_EVENT(name) \
+DEFINE_EVENT(smb3_reconnect_class, smb3_##name, \
+ TP_PROTO(__u64 currmid, \
+ char *hostname), \
+ TP_ARGS(currmid, hostname))
+
+DEFINE_SMB3_RECONNECT_EVENT(reconnect);
+DEFINE_SMB3_RECONNECT_EVENT(partial_send_reconnect);
+
#endif /* _CIFS_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index a341ec839c83..78f96fa3d7d9 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -115,8 +115,17 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
now = jiffies;
/* commands taking longer than one second are indications that
something is wrong, unless it is quite a slow link or server */
- if (time_after(now, midEntry->when_alloc + HZ)) {
- if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
+ if (time_after(now, midEntry->when_alloc + HZ) &&
+ (midEntry->command != command)) {
+ /* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
+ if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
+ (le16_to_cpu(midEntry->command) >= 0))
+ cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
+
+ trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
+ midEntry->mid, midEntry->pid,
+ midEntry->when_sent, midEntry->when_received);
+ if (cifsFYI & CIFS_TIMER) {
pr_debug(" CIFS slow rsp: cmd %d mid %llu",
midEntry->command, midEntry->mid);
pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
@@ -361,6 +370,8 @@ uncork:
* socket so the server throws away the partial SMB
*/
server->tcpStatus = CifsNeedReconnect;
+ trace_smb3_partial_send_reconnect(server->CurrentMid,
+ server->hostname);
}
smbd_done:
if (rc < 0 && rc != -EINTR)
@@ -373,26 +384,42 @@ smbd_done:
}
static int
-smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
+smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *rqst, int flags)
{
- struct smb_rqst cur_rqst;
+ struct kvec iov;
+ struct smb2_transform_hdr tr_hdr;
+ struct smb_rqst cur_rqst[MAX_COMPOUND];
int rc;
if (!(flags & CIFS_TRANSFORM_REQ))
- return __smb_send_rqst(server, 1, rqst);
+ return __smb_send_rqst(server, num_rqst, rqst);
- if (!server->ops->init_transform_rq ||
- !server->ops->free_transform_rq) {
- cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n");
+ if (num_rqst > MAX_COMPOUND - 1)
+ return -ENOMEM;
+
+ memset(&cur_rqst[0], 0, sizeof(cur_rqst));
+ memset(&iov, 0, sizeof(iov));
+ memset(&tr_hdr, 0, sizeof(tr_hdr));
+
+ iov.iov_base = &tr_hdr;
+ iov.iov_len = sizeof(tr_hdr);
+ cur_rqst[0].rq_iov = &iov;
+ cur_rqst[0].rq_nvec = 1;
+
+ if (!server->ops->init_transform_rq) {
+ cifs_dbg(VFS, "Encryption requested but transform callback "
+ "is missing\n");
return -EIO;
}
- rc = server->ops->init_transform_rq(server, &cur_rqst, rqst);
+ rc = server->ops->init_transform_rq(server, num_rqst + 1,
+ &cur_rqst[0], rqst);
if (rc)
return rc;
- rc = __smb_send_rqst(server, 1, &cur_rqst);
- server->ops->free_transform_rq(&cur_rqst);
+ rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
+ smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
return rc;
}
@@ -606,7 +633,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
*/
cifs_save_when_sent(mid);
cifs_in_send_inc(server);
- rc = smb_send_rqst(server, rqst, flags);
+ rc = smb_send_rqst(server, 1, rqst, flags);
cifs_in_send_dec(server);
if (rc < 0) {
@@ -746,20 +773,21 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
}
int
-cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
- struct smb_rqst *rqst, int *resp_buf_type, const int flags,
- struct kvec *resp_iov)
+compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ const int flags, const int num_rqst, struct smb_rqst *rqst,
+ int *resp_buf_type, struct kvec *resp_iov)
{
- int rc = 0;
+ int i, j, rc = 0;
int timeout, optype;
- struct mid_q_entry *midQ;
+ struct mid_q_entry *midQ[MAX_COMPOUND];
unsigned int credits = 1;
char *buf;
timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
- *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
+ for (i = 0; i < num_rqst; i++)
+ resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
if ((ses == NULL) || (ses->server == NULL)) {
cifs_dbg(VFS, "Null session\n");
@@ -786,98 +814,117 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
mutex_lock(&ses->server->srv_mutex);
- midQ = ses->server->ops->setup_request(ses, rqst);
- if (IS_ERR(midQ)) {
- mutex_unlock(&ses->server->srv_mutex);
- /* Update # of requests on wire to server */
- add_credits(ses->server, 1, optype);
- return PTR_ERR(midQ);
+ for (i = 0; i < num_rqst; i++) {
+ midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
+ if (IS_ERR(midQ[i])) {
+ for (j = 0; j < i; j++)
+ cifs_delete_mid(midQ[j]);
+ mutex_unlock(&ses->server->srv_mutex);
+ /* Update # of requests on wire to server */
+ add_credits(ses->server, 1, optype);
+ return PTR_ERR(midQ[i]);
+ }
+
+ midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
}
- midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
- rc = smb_send_rqst(ses->server, rqst, flags);
+ rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
cifs_in_send_dec(ses->server);
- cifs_save_when_sent(midQ);
+
+ for (i = 0; i < num_rqst; i++)
+ cifs_save_when_sent(midQ[i]);
if (rc < 0)
ses->server->sequence_number -= 2;
+
mutex_unlock(&ses->server->srv_mutex);
- if (rc < 0)
- goto out;
+ for (i = 0; i < num_rqst; i++) {
+ if (rc < 0)
+ goto out;
-#ifdef CONFIG_CIFS_SMB311
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
- smb311_update_preauth_hash(ses, rqst->rq_iov,
- rqst->rq_nvec);
-#endif
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
+ smb311_update_preauth_hash(ses, rqst[i].rq_iov,
+ rqst[i].rq_nvec);
- if (timeout == CIFS_ASYNC_OP)
- goto out;
+ if (timeout == CIFS_ASYNC_OP)
+ goto out;
- rc = wait_for_response(ses->server, midQ);
- if (rc != 0) {
- cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
- send_cancel(ses->server, rqst, midQ);
- spin_lock(&GlobalMid_Lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
- midQ->mid_flags |= MID_WAIT_CANCELLED;
- midQ->callback = DeleteMidQEntry;
+ rc = wait_for_response(ses->server, midQ[i]);
+ if (rc != 0) {
+ cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
+ midQ[i]->mid);
+ send_cancel(ses->server, &rqst[i], midQ[i]);
+ spin_lock(&GlobalMid_Lock);
+ if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
+ midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
+ midQ[i]->callback = DeleteMidQEntry;
+ spin_unlock(&GlobalMid_Lock);
+ add_credits(ses->server, 1, optype);
+ return rc;
+ }
spin_unlock(&GlobalMid_Lock);
+ }
+
+ rc = cifs_sync_mid_result(midQ[i], ses->server);
+ if (rc != 0) {
add_credits(ses->server, 1, optype);
return rc;
}
- spin_unlock(&GlobalMid_Lock);
- }
-
- rc = cifs_sync_mid_result(midQ, ses->server);
- if (rc != 0) {
- add_credits(ses->server, 1, optype);
- return rc;
- }
-
- if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
- rc = -EIO;
- cifs_dbg(FYI, "Bad MID state?\n");
- goto out;
- }
- buf = (char *)midQ->resp_buf;
- resp_iov->iov_base = buf;
- resp_iov->iov_len = midQ->resp_buf_size +
- ses->server->vals->header_preamble_size;
- if (midQ->large_buf)
- *resp_buf_type = CIFS_LARGE_BUFFER;
- else
- *resp_buf_type = CIFS_SMALL_BUFFER;
+ if (!midQ[i]->resp_buf ||
+ midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
+ rc = -EIO;
+ cifs_dbg(FYI, "Bad MID state?\n");
+ goto out;
+ }
-#ifdef CONFIG_CIFS_SMB311
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
- struct kvec iov = {
- .iov_base = resp_iov->iov_base,
- .iov_len = resp_iov->iov_len
- };
- smb311_update_preauth_hash(ses, &iov, 1);
- }
-#endif
+ buf = (char *)midQ[i]->resp_buf;
+ resp_iov[i].iov_base = buf;
+ resp_iov[i].iov_len = midQ[i]->resp_buf_size +
+ ses->server->vals->header_preamble_size;
+
+ if (midQ[i]->large_buf)
+ resp_buf_type[i] = CIFS_LARGE_BUFFER;
+ else
+ resp_buf_type[i] = CIFS_SMALL_BUFFER;
+
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
+ struct kvec iov = {
+ .iov_base = resp_iov[i].iov_base,
+ .iov_len = resp_iov[i].iov_len
+ };
+ smb311_update_preauth_hash(ses, &iov, 1);
+ }
- credits = ses->server->ops->get_credits(midQ);
+ credits = ses->server->ops->get_credits(midQ[i]);
- rc = ses->server->ops->check_receive(midQ, ses->server,
- flags & CIFS_LOG_ERROR);
+ rc = ses->server->ops->check_receive(midQ[i], ses->server,
+ flags & CIFS_LOG_ERROR);
- /* mark it so buf will not be freed by cifs_delete_mid */
- if ((flags & CIFS_NO_RESP) == 0)
- midQ->resp_buf = NULL;
+ /* mark it so buf will not be freed by cifs_delete_mid */
+ if ((flags & CIFS_NO_RESP) == 0)
+ midQ[i]->resp_buf = NULL;
+ }
out:
- cifs_delete_mid(midQ);
+ for (i = 0; i < num_rqst; i++)
+ cifs_delete_mid(midQ[i]);
add_credits(ses->server, credits, optype);
return rc;
}
int
+cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct smb_rqst *rqst, int *resp_buf_type, const int flags,
+ struct kvec *resp_iov)
+{
+ return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
+ resp_iov);
+}
+
+int
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
const int flags, struct kvec *resp_iov)
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 316af84674f1..50ddb795aaeb 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -35,6 +35,14 @@
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
#define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
#define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
+/*
+ * Although these three are just aliases for the above, need to move away from
+ * confusing users and using the 20+ year old term 'cifs' when it is no longer
+ * secure, replaced by SMB2 (then even more highly secure SMB3) many years ago
+ */
+#define SMB3_XATTR_CIFS_ACL "system.smb3_acl"
+#define SMB3_XATTR_ATTRIB "smb3.dosattrib" /* full name: user.smb3.dosattrib */
+#define SMB3_XATTR_CREATETIME "smb3.creationtime" /* user.smb3.creationtime */
/* BB need to add server (Samba e.g) support for security and trusted prefix */
enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
@@ -220,10 +228,12 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
switch (handler->flags) {
case XATTR_USER:
cifs_dbg(FYI, "%s:querying user xattr %s\n", __func__, name);
- if (strcmp(name, CIFS_XATTR_ATTRIB) == 0) {
+ if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) ||
+ (strcmp(name, SMB3_XATTR_ATTRIB) == 0)) {
rc = cifs_attrib_get(dentry, inode, value, size);
break;
- } else if (strcmp(name, CIFS_XATTR_CREATETIME) == 0) {
+ } else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) ||
+ (strcmp(name, SMB3_XATTR_CREATETIME) == 0)) {
rc = cifs_creation_time_get(dentry, inode, value, size);
break;
}
@@ -363,6 +373,19 @@ static const struct xattr_handler cifs_cifs_acl_xattr_handler = {
.set = cifs_xattr_set,
};
+/*
+ * Although this is just an alias for the above, need to move away from
+ * confusing users and using the 20 year old term 'cifs' when it is no
+ * longer secure and was replaced by SMB2/SMB3 a long time ago, and
+ * SMB3 and later are highly secure.
+ */
+static const struct xattr_handler smb3_acl_xattr_handler = {
+ .name = SMB3_XATTR_CIFS_ACL,
+ .flags = XATTR_CIFS_ACL,
+ .get = cifs_xattr_get,
+ .set = cifs_xattr_set,
+};
+
static const struct xattr_handler cifs_posix_acl_access_xattr_handler = {
.name = XATTR_NAME_POSIX_ACL_ACCESS,
.flags = XATTR_ACL_ACCESS,
@@ -381,6 +404,7 @@ const struct xattr_handler *cifs_xattr_handlers[] = {
&cifs_user_xattr_handler,
&cifs_os2_xattr_handler,
&cifs_cifs_acl_xattr_handler,
+ &smb3_acl_xattr_handler, /* alias for above since avoiding "cifs" */
&cifs_posix_acl_access_xattr_handler,
&cifs_posix_acl_default_xattr_handler,
NULL
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 9907475b4226..a9b00942e87d 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -198,34 +198,6 @@ static int do_video_stillpicture(struct file *file,
return err;
}
-struct compat_video_spu_palette {
- int length;
- compat_uptr_t palette;
-};
-
-static int do_video_set_spu_palette(struct file *file,
- unsigned int cmd, struct compat_video_spu_palette __user *up)
-{
- struct video_spu_palette __user *up_native;
- compat_uptr_t palp;
- int length, err;
-
- err = get_user(palp, &up->palette);
- err |= get_user(length, &up->length);
- if (err)
- return -EFAULT;
-
- up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
- err = put_user(compat_ptr(palp), &up_native->palette);
- err |= put_user(length, &up_native->length);
- if (err)
- return -EFAULT;
-
- err = do_ioctl(file, cmd, (unsigned long) up_native);
-
- return err;
-}
-
#ifdef CONFIG_BLOCK
typedef struct sg_io_hdr32 {
compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
@@ -1206,9 +1178,6 @@ COMPATIBLE_IOCTL(AUDIO_CLEAR_BUFFER)
COMPATIBLE_IOCTL(AUDIO_SET_ID)
COMPATIBLE_IOCTL(AUDIO_SET_MIXER)
COMPATIBLE_IOCTL(AUDIO_SET_STREAMTYPE)
-COMPATIBLE_IOCTL(AUDIO_SET_EXT_ID)
-COMPATIBLE_IOCTL(AUDIO_SET_ATTRIBUTES)
-COMPATIBLE_IOCTL(AUDIO_SET_KARAOKE)
COMPATIBLE_IOCTL(DMX_START)
COMPATIBLE_IOCTL(DMX_STOP)
COMPATIBLE_IOCTL(DMX_SET_FILTER)
@@ -1233,16 +1202,9 @@ COMPATIBLE_IOCTL(VIDEO_FAST_FORWARD)
COMPATIBLE_IOCTL(VIDEO_SLOWMOTION)
COMPATIBLE_IOCTL(VIDEO_GET_CAPABILITIES)
COMPATIBLE_IOCTL(VIDEO_CLEAR_BUFFER)
-COMPATIBLE_IOCTL(VIDEO_SET_ID)
COMPATIBLE_IOCTL(VIDEO_SET_STREAMTYPE)
COMPATIBLE_IOCTL(VIDEO_SET_FORMAT)
-COMPATIBLE_IOCTL(VIDEO_SET_SYSTEM)
-COMPATIBLE_IOCTL(VIDEO_SET_HIGHLIGHT)
-COMPATIBLE_IOCTL(VIDEO_SET_SPU)
-COMPATIBLE_IOCTL(VIDEO_GET_NAVI)
-COMPATIBLE_IOCTL(VIDEO_SET_ATTRIBUTES)
COMPATIBLE_IOCTL(VIDEO_GET_SIZE)
-COMPATIBLE_IOCTL(VIDEO_GET_FRAME_RATE)
/* cec */
COMPATIBLE_IOCTL(CEC_ADAP_G_CAPS)
COMPATIBLE_IOCTL(CEC_ADAP_G_LOG_ADDRS)
@@ -1347,8 +1309,6 @@ static long do_ioctl_trans(unsigned int cmd,
return do_video_get_event(file, cmd, argp);
case VIDEO_STILLPICTURE:
return do_video_stillpicture(file, cmd, argp);
- case VIDEO_SET_SPU_PALETTE:
- return do_video_set_spu_palette(file, cmd, argp);
}
/*
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 577cff24707b..39843fa7e11b 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1777,6 +1777,16 @@ void configfs_unregister_group(struct config_group *group)
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
+ mutex_lock(&subsys->su_mutex);
+ if (!group->cg_item.ci_parent->ci_group) {
+ /*
+ * The parent has already been unlinked and detached
+ * due to a rmdir.
+ */
+ goto unlink_group;
+ }
+ mutex_unlock(&subsys->su_mutex);
+
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
spin_lock(&configfs_dirent_lock);
configfs_detach_prep(dentry, NULL);
@@ -1791,6 +1801,7 @@ void configfs_unregister_group(struct config_group *group)
dput(dentry);
mutex_lock(&subsys->su_mutex);
+unlink_group:
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
}
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 88f266efc09b..99d491cd01f9 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -64,7 +64,6 @@ static void config_item_init(struct config_item *item)
*/
int config_item_set_name(struct config_item *item, const char *fmt, ...)
{
- int error = 0;
int limit = CONFIGFS_ITEM_NAME_LEN;
int need;
va_list args;
@@ -79,25 +78,11 @@ int config_item_set_name(struct config_item *item, const char *fmt, ...)
if (need < limit)
name = item->ci_namebuf;
else {
- /*
- * Need more space? Allocate it and try again
- */
- limit = need + 1;
- name = kmalloc(limit, GFP_KERNEL);
- if (!name) {
- error = -ENOMEM;
- goto Done;
- }
va_start(args, fmt);
- need = vsnprintf(name, limit, fmt, args);
+ name = kvasprintf(GFP_KERNEL, fmt, args);
va_end(args);
-
- /* Still? Give up. */
- if (need >= limit) {
- kfree(name);
- error = -EFAULT;
- goto Done;
- }
+ if (!name)
+ return -EFAULT;
}
/* Free the old name, if necessary. */
@@ -106,8 +91,7 @@ int config_item_set_name(struct config_item *item, const char *fmt, ...)
/* Now, set the new name */
item->ci_name = name;
- Done:
- return error;
+ return 0;
}
EXPORT_SYMBOL(config_item_set_name);
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 78ffc2699993..a5c54af861f7 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -64,7 +64,7 @@ static void fill_item_path(struct config_item * item, char * buffer, int length)
/* back up enough to print this bus id with '/' */
length -= cur;
- strncpy(buffer + length,config_item_name(p),cur);
+ memcpy(buffer + length, config_item_name(p), cur);
*(buffer + --length) = '/';
}
}
diff --git a/fs/dax.c b/fs/dax.c
index 641192808bb6..897b51e41d8f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -566,7 +566,8 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
if (index >= end)
break;
- if (!radix_tree_exceptional_entry(pvec_ent))
+ if (WARN_ON_ONCE(
+ !radix_tree_exceptional_entry(pvec_ent)))
continue;
xa_lock_irq(&mapping->i_pages);
@@ -578,6 +579,13 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
if (page)
break;
}
+
+ /*
+ * We don't expect normal struct page entries to exist in our
+ * tree, but we keep these pagevec calls so that this code is
+ * consistent with the common pattern for handling pagevecs
+ * throughout the kernel.
+ */
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
index++;
diff --git a/fs/dcache.c b/fs/dcache.c
index ceb7b491d1b9..8d2ec4898c2b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -729,16 +729,16 @@ static inline bool fast_dput(struct dentry *dentry)
if (dentry->d_lockref.count > 1) {
dentry->d_lockref.count--;
spin_unlock(&dentry->d_lock);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
* If we weren't the last ref, we're done.
*/
if (ret)
- return 1;
+ return true;
/*
* Careful, careful. The reference count went down
@@ -767,7 +767,7 @@ static inline bool fast_dput(struct dentry *dentry)
/* Nothing to do? Dropping the reference was all we needed? */
if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
- return 1;
+ return true;
/*
* Not the fast normal case? Get the lock. We've already decremented
@@ -784,7 +784,7 @@ static inline bool fast_dput(struct dentry *dentry)
*/
if (dentry->d_lockref.count) {
spin_unlock(&dentry->d_lock);
- return 1;
+ return true;
}
/*
@@ -793,7 +793,7 @@ static inline bool fast_dput(struct dentry *dentry)
* set it to 1.
*/
dentry->d_lockref.count = 1;
- return 0;
+ return false;
}
@@ -1889,40 +1889,13 @@ void d_instantiate_new(struct dentry *entry, struct inode *inode)
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW;
+ inode->i_state &= ~I_NEW & ~I_CREATING;
smp_mb();
wake_up_bit(&inode->i_state, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_instantiate_new);
-/**
- * d_instantiate_no_diralias - instantiate a non-aliased dentry
- * @entry: dentry to complete
- * @inode: inode to attach to this dentry
- *
- * Fill in inode information in the entry. If a directory alias is found, then
- * return an error (and drop inode). Together with d_materialise_unique() this
- * guarantees that a directory inode may never have more than one alias.
- */
-int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
-{
- BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
-
- security_d_instantiate(entry, inode);
- spin_lock(&inode->i_lock);
- if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
- spin_unlock(&inode->i_lock);
- iput(inode);
- return -EBUSY;
- }
- __d_instantiate(entry, inode);
- spin_unlock(&inode->i_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(d_instantiate_no_diralias);
-
struct dentry *d_make_root(struct inode *root_inode)
{
struct dentry *res = NULL;
@@ -2675,33 +2648,6 @@ struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
}
EXPORT_SYMBOL(d_exact_alias);
-/**
- * dentry_update_name_case - update case insensitive dentry with a new name
- * @dentry: dentry to be updated
- * @name: new name
- *
- * Update a case insensitive dentry with new case of name.
- *
- * dentry must have been returned by d_lookup with name @name. Old and new
- * name lengths must match (ie. no d_compare which allows mismatched name
- * lengths).
- *
- * Parent inode i_mutex must be held over d_lookup and into this call (to
- * keep renames and concurrent inserts, and readdir(2) away).
- */
-void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
-{
- BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
- BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
-
- spin_lock(&dentry->d_lock);
- write_seqcount_begin(&dentry->d_seq);
- memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
- write_seqcount_end(&dentry->d_seq);
- spin_unlock(&dentry->d_lock);
-}
-EXPORT_SYMBOL(dentry_update_name_case);
-
static void swap_names(struct dentry *dentry, struct dentry *target)
{
if (unlikely(dname_external(target))) {
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 71fccccf317e..8c6ab6c95727 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -86,7 +86,9 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
/* length of the variable name itself: remove GUID and separator */
namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
- uuid_le_to_bin(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+ err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+ if (err)
+ goto out;
if (efivar_variable_is_removable(var->var.VendorGuid,
dentry->d_name.name, namelen))
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 1b8b44637e70..5331a15a61f1 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -873,8 +873,8 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
struct bio *bio;
if (per_dev != master_dev) {
- bio = bio_clone_kmalloc(master_dev->bio,
- GFP_KERNEL);
+ bio = bio_clone_fast(master_dev->bio,
+ GFP_KERNEL, NULL);
if (unlikely(!bio)) {
ORE_DBGMSG(
"Failed to allocate BIO size=%u\n",
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 6484199b35d1..5c3d7b7e4975 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -611,8 +611,7 @@ fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
clear_nlink(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return ERR_PTR(err);
fail:
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 152453a91877..0c26dcc5d850 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -45,8 +45,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
return 0;
}
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
@@ -192,8 +191,7 @@ out:
out_fail:
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput (inode);
+ discard_new_inode(inode);
goto out;
}
@@ -261,8 +259,7 @@ out:
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
out_dir:
inode_dec_link_count(dir);
goto out;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index aa52d87985aa..e5d6ee61ff48 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -426,9 +426,9 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
}
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
- ext4_error(sb, "Cannot get buffer for block bitmap - "
- "block_group = %u, block_bitmap = %llu",
- block_group, bitmap_blk);
+ ext4_warning(sb, "Cannot get buffer for block bitmap - "
+ "block_group = %u, block_bitmap = %llu",
+ block_group, bitmap_blk);
return ERR_PTR(-ENOMEM);
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 7c7123f265c2..1fc013f3d944 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -789,17 +789,16 @@ struct move_extent {
* affected filesystem before 2242.
*/
-static inline __le32 ext4_encode_extra_time(struct timespec *time)
+static inline __le32 ext4_encode_extra_time(struct timespec64 *time)
{
- u32 extra = sizeof(time->tv_sec) > 4 ?
- ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
+ u32 extra =((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK;
return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
}
-static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
+static inline void ext4_decode_extra_time(struct timespec64 *time,
+ __le32 extra)
{
- if (unlikely(sizeof(time->tv_sec) > 4 &&
- (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
+ if (unlikely(extra & cpu_to_le32(EXT4_EPOCH_MASK))) {
#if 1
/* Handle legacy encoding of pre-1970 dates with epoch
@@ -821,9 +820,8 @@ static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
do { \
(raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) {\
- struct timespec ts = timespec64_to_timespec((inode)->xtime); \
(raw_inode)->xtime ## _extra = \
- ext4_encode_extra_time(&ts); \
+ ext4_encode_extra_time(&(inode)->xtime); \
} \
} while (0)
@@ -840,10 +838,8 @@ do { \
do { \
(inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime); \
if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) { \
- struct timespec ts = timespec64_to_timespec((inode)->xtime); \
- ext4_decode_extra_time(&ts, \
+ ext4_decode_extra_time(&(inode)->xtime, \
raw_inode->xtime ## _extra); \
- (inode)->xtime = timespec_to_timespec64(ts); \
} \
else \
(inode)->xtime.tv_nsec = 0; \
@@ -993,9 +989,9 @@ struct ext4_inode_info {
/*
* File creation time. Its function is same as that of
- * struct timespec i_{a,c,m}time in the generic inode.
+ * struct timespec64 i_{a,c,m}time in the generic inode.
*/
- struct timespec i_crtime;
+ struct timespec64 i_crtime;
/* mballoc */
struct list_head i_prealloc_list;
@@ -1299,7 +1295,14 @@ struct ext4_super_block {
__le32 s_lpf_ino; /* Location of the lost+found inode */
__le32 s_prj_quota_inum; /* inode for tracking project quota */
__le32 s_checksum_seed; /* crc32c(uuid) if csum_seed set */
- __le32 s_reserved[98]; /* Padding to the end of the block */
+ __u8 s_wtime_hi;
+ __u8 s_mtime_hi;
+ __u8 s_mkfs_time_hi;
+ __u8 s_lastcheck_hi;
+ __u8 s_first_error_time_hi;
+ __u8 s_last_error_time_hi;
+ __u8 s_pad[2];
+ __le32 s_reserved[96]; /* Padding to the end of the block */
__le32 s_checksum; /* crc32c(superblock) */
};
@@ -2456,6 +2459,7 @@ extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern int ext4_truncate(struct inode *);
+extern int ext4_break_layouts(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
extern void ext4_set_inode_flags(struct inode *);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 8ce6fd5b10dd..72a361d5ef74 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4826,6 +4826,13 @@ static long ext4_zero_range(struct file *file, loff_t offset,
* released from page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_break_layouts(inode);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_mmap_sem);
+ goto out_mutex;
+ }
+
ret = ext4_update_disksize_before_punch(inode, offset, len);
if (ret) {
up_write(&EXT4_I(inode)->i_mmap_sem);
@@ -5499,6 +5506,11 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+ goto out_mmap;
+
/*
* Need to round down offset to be aligned with page size boundary
* for page size > block size.
@@ -5647,6 +5659,11 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+ goto out_mmap;
+
/*
* Need to round down to align start offset to page size boundary
* for page size > block size.
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f336cbc6e932..2addcb8730e1 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -138,9 +138,9 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
}
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
- ext4_error(sb, "Cannot read inode bitmap - "
- "block_group = %u, inode_bitmap = %llu",
- block_group, bitmap_blk);
+ ext4_warning(sb, "Cannot read inode bitmap - "
+ "block_group = %u, inode_bitmap = %llu",
+ block_group, bitmap_blk);
return ERR_PTR(-ENOMEM);
}
if (bitmap_uptodate(bh))
@@ -1086,7 +1086,7 @@ got:
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
- ei->i_crtime = timespec64_to_timespec(inode->i_mtime);
+ ei->i_crtime = inode->i_mtime;
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_dir_start_lookup = 0;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4efe77286ecd..8f6ad7667974 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -317,7 +317,7 @@ stop_handle:
* (Well, we could do this if we need to, but heck - it works)
*/
ext4_orphan_del(handle, inode);
- EXT4_I(inode)->i_dtime = get_seconds();
+ EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
/*
* One subtle ordering requirement: if anything has gone wrong
@@ -4191,6 +4191,39 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
return 0;
}
+static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock)
+{
+ *did_unlock = true;
+ up_write(&ei->i_mmap_sem);
+ schedule();
+ down_write(&ei->i_mmap_sem);
+}
+
+int ext4_break_layouts(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct page *page;
+ bool retry;
+ int error;
+
+ if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
+ return -EINVAL;
+
+ do {
+ retry = false;
+ page = dax_layout_busy_page(inode->i_mapping);
+ if (!page)
+ return 0;
+
+ error = ___wait_var_event(&page->_refcount,
+ atomic_read(&page->_refcount) == 1,
+ TASK_INTERRUPTIBLE, 0, 0,
+ ext4_wait_dax_page(ei, &retry));
+ } while (error == 0 && retry);
+
+ return error;
+}
+
/*
* ext4_punch_hole: punches a hole in a file by releasing the blocks
* associated with the given offset and length
@@ -4264,6 +4297,11 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+ goto out_dio;
+
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
@@ -4944,17 +4982,14 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ret = -EFSCORRUPTED;
goto bad_inode;
} else if (!ext4_has_inline_data(inode)) {
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- (S_ISLNK(inode->i_mode) &&
- !ext4_inode_is_fast_symlink(inode))))
- /* Validate extent which is part of inode */
+ /* validate the block references in the inode */
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ (S_ISLNK(inode->i_mode) &&
+ !ext4_inode_is_fast_symlink(inode))) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_check_inode(inode);
- } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- (S_ISLNK(inode->i_mode) &&
- !ext4_inode_is_fast_symlink(inode))) {
- /* Validate block references which are part of inode */
- ret = ext4_ind_check_inode(inode);
+ else
+ ret = ext4_ind_check_inode(inode);
}
}
if (ret)
@@ -5553,6 +5588,14 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
ext4_wait_for_tail_page_commit(inode);
}
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ rc = ext4_break_layouts(inode);
+ if (rc) {
+ up_write(&EXT4_I(inode)->i_mmap_sem);
+ error = rc;
+ goto err_out;
+ }
+
/*
* Truncate pagecache after we've waited for commit
* in data=journal mode to make pages freeable.
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f7ab34088162..e29fce2fbf25 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -14,6 +14,7 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/backing-dev.h>
#include <trace/events/ext4.h>
@@ -2140,7 +2141,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
* This should tell if fe_len is exactly power of 2
*/
if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
- ac->ac_2order = i - 1;
+ ac->ac_2order = array_index_nospec(i - 1,
+ sb->s_blocksize_bits + 2);
}
/* if stream allocation is enabled, use global goal */
@@ -3799,7 +3801,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
ext4_group_t group;
ext4_grpblk_t bit;
unsigned long long grp_blk_start;
- int err = 0;
int free = 0;
BUG_ON(pa->pa_deleted == 0);
@@ -3840,7 +3841,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
}
atomic_add(free, &sbi->s_mb_discarded);
- return err;
+ return 0;
}
static noinline_for_stack int
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 638ad4743477..39b07c2d3384 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -147,7 +147,7 @@ static int kmmpd(void *data)
mmp_block = le64_to_cpu(es->s_mmp_block);
mmp = (struct mmp_struct *)(bh->b_data);
- mmp->mmp_time = cpu_to_le64(get_seconds());
+ mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
/*
* Start with the higher mmp_check_interval and reduce it if
* the MMP block is being updated on time.
@@ -165,7 +165,7 @@ static int kmmpd(void *data)
seq = 1;
mmp->mmp_seq = cpu_to_le32(seq);
- mmp->mmp_time = cpu_to_le64(get_seconds());
+ mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
last_update_time = jiffies;
retval = write_mmp_block(sb, bh);
@@ -241,7 +241,7 @@ static int kmmpd(void *data)
* Unmount seems to be clean.
*/
mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
- mmp->mmp_time = cpu_to_le64(get_seconds());
+ mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
retval = write_mmp_block(sb, bh);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 8e17efdcbf11..a409ff70d67b 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -134,9 +134,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
mapping[0] = inode1->i_mapping;
mapping[1] = inode2->i_mapping;
} else {
- pgoff_t tmp = index1;
- index1 = index2;
- index2 = tmp;
+ swap(index1, index2);
mapping[0] = inode2->i_mapping;
mapping[1] = inode1->i_mapping;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2a4c25c4681d..116ff68c5bd4 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1398,6 +1398,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
goto cleanup_and_exit;
dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
"falling back\n"));
+ ret = NULL;
}
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
if (!nblocks) {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b7f7922061be..5863fd22e90b 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -312,6 +312,24 @@ void ext4_itable_unused_set(struct super_block *sb,
bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}
+static void __ext4_update_tstamp(__le32 *lo, __u8 *hi)
+{
+ time64_t now = ktime_get_real_seconds();
+
+ now = clamp_val(now, 0, (1ull << 40) - 1);
+
+ *lo = cpu_to_le32(lower_32_bits(now));
+ *hi = upper_32_bits(now);
+}
+
+static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
+{
+ return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
+}
+#define ext4_update_tstamp(es, tstamp) \
+ __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
+#define ext4_get_tstamp(es, tstamp) \
+ __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
static void __save_error_info(struct super_block *sb, const char *func,
unsigned int line)
@@ -322,11 +340,12 @@ static void __save_error_info(struct super_block *sb, const char *func,
if (bdev_read_only(sb->s_bdev))
return;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
- es->s_last_error_time = cpu_to_le32(get_seconds());
+ ext4_update_tstamp(es, s_last_error_time);
strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
es->s_last_error_line = cpu_to_le32(line);
if (!es->s_first_error_time) {
es->s_first_error_time = es->s_last_error_time;
+ es->s_first_error_time_hi = es->s_last_error_time_hi;
strncpy(es->s_first_error_func, func,
sizeof(es->s_first_error_func));
es->s_first_error_line = cpu_to_le32(line);
@@ -776,26 +795,26 @@ void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
+ int ret;
- if ((flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) &&
- !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) {
- percpu_counter_sub(&sbi->s_freeclusters_counter,
- grp->bb_free);
- set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
- &grp->bb_state);
+ if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
+ ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
+ &grp->bb_state);
+ if (!ret)
+ percpu_counter_sub(&sbi->s_freeclusters_counter,
+ grp->bb_free);
}
- if ((flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) &&
- !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
- if (gdp) {
+ if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
+ ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
+ &grp->bb_state);
+ if (!ret && gdp) {
int count;
count = ext4_free_inodes_count(sb, gdp);
percpu_counter_sub(&sbi->s_freeinodes_counter,
count);
}
- set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
- &grp->bb_state);
}
}
@@ -2174,8 +2193,8 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
"warning: maximal mount count reached, "
"running e2fsck is recommended");
else if (le32_to_cpu(es->s_checkinterval) &&
- (le32_to_cpu(es->s_lastcheck) +
- le32_to_cpu(es->s_checkinterval) <= get_seconds()))
+ (ext4_get_tstamp(es, s_lastcheck) +
+ le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
ext4_msg(sb, KERN_WARNING,
"warning: checktime reached, "
"running e2fsck is recommended");
@@ -2184,7 +2203,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
- es->s_mtime = cpu_to_le32(get_seconds());
+ ext4_update_tstamp(es, s_mtime);
ext4_update_dynamic_rev(sb);
if (sbi->s_journal)
ext4_set_feature_journal_needs_recovery(sb);
@@ -2875,8 +2894,9 @@ static void print_daily_error_info(struct timer_list *t)
ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
le32_to_cpu(es->s_error_count));
if (es->s_first_error_time) {
- printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
- sb->s_id, le32_to_cpu(es->s_first_error_time),
+ printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
+ sb->s_id,
+ ext4_get_tstamp(es, s_first_error_time),
(int) sizeof(es->s_first_error_func),
es->s_first_error_func,
le32_to_cpu(es->s_first_error_line));
@@ -2889,8 +2909,9 @@ static void print_daily_error_info(struct timer_list *t)
printk(KERN_CONT "\n");
}
if (es->s_last_error_time) {
- printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
- sb->s_id, le32_to_cpu(es->s_last_error_time),
+ printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
+ sb->s_id,
+ ext4_get_tstamp(es, s_last_error_time),
(int) sizeof(es->s_last_error_func),
es->s_last_error_func,
le32_to_cpu(es->s_last_error_line));
@@ -3508,7 +3529,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_sb_block = sb_block;
if (sb->s_bdev->bd_part)
sbi->s_sectors_written_start =
- part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+ part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
/* Cleanup superblock name */
strreplace(sb->s_id, '/', '!');
@@ -4813,11 +4834,12 @@ static int ext4_commit_super(struct super_block *sb, int sync)
* to complain and force a full file system check.
*/
if (!(sb->s_flags & SB_RDONLY))
- es->s_wtime = cpu_to_le32(get_seconds());
+ ext4_update_tstamp(es, s_wtime);
if (sb->s_bdev->bd_part)
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
- ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
+ ((part_stat_read(sb->s_bdev->bd_part,
+ sectors[STAT_WRITE]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1));
else
es->s_kbytes_written =
@@ -5080,6 +5102,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
#endif
char *orig_data = kstrdup(data, GFP_KERNEL);
+ if (data && !orig_data)
+ return -ENOMEM;
+
/* Store the original options */
old_sb_flags = sb->s_flags;
old_opts.s_mount_opt = sbi->s_mount_opt;
@@ -5665,13 +5690,13 @@ static int ext4_enable_quotas(struct super_block *sb)
DQUOT_USAGE_ENABLED |
(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
if (err) {
- for (type--; type >= 0; type--)
- dquot_quota_off(sb, type);
-
ext4_warning(sb,
"Failed to enable quota tracking "
"(type=%d, err=%d). Please run "
"e2fsck to fix.", type, err);
+ for (type--; type >= 0; type--)
+ dquot_quota_off(sb, type);
+
return err;
}
}
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index f34da0bb8f17..9212a026a1f1 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -25,6 +25,8 @@ typedef enum {
attr_reserved_clusters,
attr_inode_readahead,
attr_trigger_test_error,
+ attr_first_error_time,
+ attr_last_error_time,
attr_feature,
attr_pointer_ui,
attr_pointer_atomic,
@@ -56,7 +58,8 @@ static ssize_t session_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
if (!sb->s_bdev->bd_part)
return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%lu\n",
- (part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
+ (part_stat_read(sb->s_bdev->bd_part,
+ sectors[STAT_WRITE]) -
sbi->s_sectors_written_start) >> 1);
}
@@ -68,7 +71,8 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)(sbi->s_kbytes_written +
- ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
+ ((part_stat_read(sb->s_bdev->bd_part,
+ sectors[STAT_WRITE]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1)));
}
@@ -182,8 +186,8 @@ EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
-EXT4_RO_ATTR_ES_UI(first_error_time, s_first_error_time);
-EXT4_RO_ATTR_ES_UI(last_error_time, s_last_error_time);
+EXT4_ATTR(first_error_time, 0444, first_error_time);
+EXT4_ATTR(last_error_time, 0444, last_error_time);
static unsigned int old_bump_val = 128;
EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val);
@@ -249,6 +253,15 @@ static void *calc_ptr(struct ext4_attr *a, struct ext4_sb_info *sbi)
return NULL;
}
+static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi)
+{
+ return snprintf(buf, PAGE_SIZE, "%lld",
+ ((time64_t)hi << 32) + le32_to_cpu(lo));
+}
+
+#define print_tstamp(buf, es, tstamp) \
+ __print_tstamp(buf, (es)->tstamp, (es)->tstamp ## _hi)
+
static ssize_t ext4_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -274,8 +287,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
case attr_pointer_ui:
if (!ptr)
return 0;
- return snprintf(buf, PAGE_SIZE, "%u\n",
- *((unsigned int *) ptr));
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ le32_to_cpup(ptr));
+ else
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ *((unsigned int *) ptr));
case attr_pointer_atomic:
if (!ptr)
return 0;
@@ -283,6 +300,10 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
atomic_read((atomic_t *) ptr));
case attr_feature:
return snprintf(buf, PAGE_SIZE, "supported\n");
+ case attr_first_error_time:
+ return print_tstamp(buf, sbi->s_es, s_first_error_time);
+ case attr_last_error_time:
+ return print_tstamp(buf, sbi->s_es, s_last_error_time);
}
return 0;
@@ -308,7 +329,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret)
return ret;
- *((unsigned int *) ptr) = t;
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ *((__le32 *) ptr) = cpu_to_le32(t);
+ else
+ *((unsigned int *) ptr) = t;
return len;
case attr_inode_readahead:
return inode_readahead_blks_store(sbi, buf, len);
diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
index 0cb13badf473..bcbe3668c1d4 100644
--- a/fs/ext4/truncate.h
+++ b/fs/ext4/truncate.h
@@ -11,6 +11,10 @@
*/
static inline void ext4_truncate_failed_write(struct inode *inode)
{
+ /*
+ * We don't need to call ext4_break_layouts() because the blocks we
+ * are truncating were never visible to userspace.
+ */
down_write(&EXT4_I(inode)->i_mmap_sem);
truncate_inode_pages(inode->i_mapping, inode->i_size);
ext4_truncate(inode);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 723df14f4084..f36fc5d5b257 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -190,6 +190,8 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
if ((void *)next >= end)
return -EFSCORRUPTED;
+ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
+ return -EFSCORRUPTED;
e = next;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 4d8b1de83143..6799c3fc44e3 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1304,7 +1304,7 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
* and the return value is in kbytes. s is of struct f2fs_sb_info.
*/
#define BD_PART_WRITTEN(s) \
-(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[1]) - \
+(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) - \
(s)->sectors_written_start) >> 1)
static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 3995e926ba3a..17bcff789c08 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2882,7 +2882,8 @@ try_onemore:
/* For write statistics */
if (sb->s_bdev->bd_part)
sbi->sectors_written_start =
- (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+ (u64)part_stat_read(sb->s_bdev->bd_part,
+ sectors[STAT_WRITE]);
/* Read accumulated write IO statistics if exists */
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
diff --git a/fs/file_table.c b/fs/file_table.c
index 7ec0b3e5f05d..d6eccd04d703 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -51,6 +51,7 @@ static void file_free_rcu(struct rcu_head *head)
static inline void file_free(struct file *f)
{
+ security_file_free(f);
percpu_counter_dec(&nr_files);
call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
}
@@ -100,9 +101,8 @@ int proc_nr_files(struct ctl_table *table, int write,
* done, you will imbalance int the mount's writer count
* and a warning at __fput() time.
*/
-struct file *get_empty_filp(void)
+struct file *alloc_empty_file(int flags, const struct cred *cred)
{
- const struct cred *cred = current_cred();
static long old_max;
struct file *f;
int error;
@@ -123,11 +123,10 @@ struct file *get_empty_filp(void)
if (unlikely(!f))
return ERR_PTR(-ENOMEM);
- percpu_counter_inc(&nr_files);
f->f_cred = get_cred(cred);
error = security_file_alloc(f);
if (unlikely(error)) {
- file_free(f);
+ file_free_rcu(&f->f_u.fu_rcuhead);
return ERR_PTR(error);
}
@@ -136,7 +135,10 @@ struct file *get_empty_filp(void)
spin_lock_init(&f->f_lock);
mutex_init(&f->f_pos_lock);
eventpoll_init_file(f);
+ f->f_flags = flags;
+ f->f_mode = OPEN_FMODE(flags);
/* f->f_version: 0 */
+ percpu_counter_inc(&nr_files);
return f;
over:
@@ -152,15 +154,15 @@ over:
* alloc_file - allocate and initialize a 'struct file'
*
* @path: the (dentry, vfsmount) pair for the new file
- * @mode: the mode with which the new file will be opened
+ * @flags: O_... flags with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
*/
-struct file *alloc_file(const struct path *path, fmode_t mode,
+static struct file *alloc_file(const struct path *path, int flags,
const struct file_operations *fop)
{
struct file *file;
- file = get_empty_filp();
+ file = alloc_empty_file(flags, current_cred());
if (IS_ERR(file))
return file;
@@ -168,19 +170,56 @@ struct file *alloc_file(const struct path *path, fmode_t mode,
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
- if ((mode & FMODE_READ) &&
+ if ((file->f_mode & FMODE_READ) &&
likely(fop->read || fop->read_iter))
- mode |= FMODE_CAN_READ;
- if ((mode & FMODE_WRITE) &&
+ file->f_mode |= FMODE_CAN_READ;
+ if ((file->f_mode & FMODE_WRITE) &&
likely(fop->write || fop->write_iter))
- mode |= FMODE_CAN_WRITE;
- file->f_mode = mode;
+ file->f_mode |= FMODE_CAN_WRITE;
+ file->f_mode |= FMODE_OPENED;
file->f_op = fop;
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(path->dentry->d_inode);
return file;
}
-EXPORT_SYMBOL(alloc_file);
+
+struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
+ const char *name, int flags,
+ const struct file_operations *fops)
+{
+ static const struct dentry_operations anon_ops = {
+ .d_dname = simple_dname
+ };
+ struct qstr this = QSTR_INIT(name, strlen(name));
+ struct path path;
+ struct file *file;
+
+ path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
+ if (!path.dentry)
+ return ERR_PTR(-ENOMEM);
+ if (!mnt->mnt_sb->s_d_op)
+ d_set_d_op(path.dentry, &anon_ops);
+ path.mnt = mntget(mnt);
+ d_instantiate(path.dentry, inode);
+ file = alloc_file(&path, flags, fops);
+ if (IS_ERR(file)) {
+ ihold(inode);
+ path_put(&path);
+ }
+ return file;
+}
+EXPORT_SYMBOL(alloc_file_pseudo);
+
+struct file *alloc_file_clone(struct file *base, int flags,
+ const struct file_operations *fops)
+{
+ struct file *f = alloc_file(&base->f_path, flags, fops);
+ if (!IS_ERR(f)) {
+ path_get(&f->f_path);
+ f->f_mapping = base->f_mapping;
+ }
+ return f;
+}
/* the real guts of fput() - releasing the last reference to file
*/
@@ -190,6 +229,9 @@ static void __fput(struct file *file)
struct vfsmount *mnt = file->f_path.mnt;
struct inode *inode = file->f_inode;
+ if (unlikely(!(file->f_mode & FMODE_OPENED)))
+ goto out;
+
might_sleep();
fsnotify_close(file);
@@ -207,7 +249,6 @@ static void __fput(struct file *file)
}
if (file->f_op->release)
file->f_op->release(inode, file);
- security_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
!(file->f_mode & FMODE_PATH))) {
cdev_put(inode->i_cdev);
@@ -220,12 +261,10 @@ static void __fput(struct file *file)
put_write_access(inode);
__mnt_drop_write(mnt);
}
- file->f_path.dentry = NULL;
- file->f_path.mnt = NULL;
- file->f_inode = NULL;
- file_free(file);
dput(dentry);
mntput(mnt);
+out:
+ file_free(file);
}
static LLIST_HEAD(delayed_fput_list);
@@ -300,14 +339,6 @@ void __fput_sync(struct file *file)
EXPORT_SYMBOL(fput);
-void put_filp(struct file *file)
-{
- if (atomic_long_dec_and_test(&file->f_count)) {
- security_file_free(file);
- file_free(file);
- }
-}
-
void __init files_init(void)
{
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 56231b31f806..d80aab0d5982 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -399,7 +399,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
*/
static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct file *file, unsigned flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
int err;
struct inode *inode;
@@ -469,7 +469,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
d_instantiate(entry, inode);
fuse_change_entry_timeout(entry, &outentry);
fuse_invalidate_attr(dir);
- err = finish_open(file, entry, generic_file_open, opened);
+ err = finish_open(file, entry, generic_file_open);
if (err) {
fuse_sync_release(ff, flags);
} else {
@@ -489,7 +489,7 @@ out_err:
static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
struct file *file, unsigned flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
int err;
struct fuse_conn *fc = get_fuse_conn(dir);
@@ -508,12 +508,12 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
goto no_open;
/* Only creates */
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
if (fc->no_create)
goto mknod;
- err = fuse_create_open(dir, entry, file, flags, mode, opened);
+ err = fuse_create_open(dir, entry, file, flags, mode);
if (err == -ENOSYS) {
fc->no_create = 1;
goto mknod;
@@ -539,6 +539,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
{
struct fuse_entry_out outarg;
struct inode *inode;
+ struct dentry *d;
int err;
struct fuse_forget_link *forget;
@@ -570,11 +571,17 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
}
kfree(forget);
- err = d_instantiate_no_diralias(entry, inode);
- if (err)
- return err;
+ d_drop(entry);
+ d = d_splice_alias(inode, entry);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
- fuse_change_entry_timeout(entry, &outarg);
+ if (d) {
+ fuse_change_entry_timeout(d, &outarg);
+ dput(d);
+ } else {
+ fuse_change_entry_timeout(entry, &outarg);
+ }
fuse_invalidate_attr(dir);
return 0;
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 776717f1eeea..af5f87a493d9 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -82,14 +82,12 @@ struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int error;
- int len;
+ size_t len;
char *data;
const char *name = gfs2_acl_name(type);
if (acl) {
- len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0);
- if (len == 0)
- return 0;
+ len = posix_acl_xattr_size(acl->a_count);
data = kmalloc(len, GFP_NOFS);
if (data == NULL)
return -ENOMEM;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 35f5ee23566d..31e8270d0b26 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -22,6 +22,7 @@
#include <linux/backing-dev.h>
#include <linux/uio.h>
#include <trace/events/writeback.h>
+#include <linux/sched/signal.h>
#include "gfs2.h"
#include "incore.h"
@@ -36,10 +37,11 @@
#include "super.h"
#include "util.h"
#include "glops.h"
+#include "aops.h"
-static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
- unsigned int from, unsigned int len)
+void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+ unsigned int from, unsigned int len)
{
struct buffer_head *head = page_buffers(page);
unsigned int bsize = head->b_size;
@@ -82,12 +84,6 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
return 0;
}
-static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
- struct buffer_head *bh_result, int create)
-{
- return gfs2_block_map(inode, lblock, bh_result, 0);
-}
-
/**
* gfs2_writepage_common - Common bits of writepage
* @page: The page to be written
@@ -462,7 +458,7 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
* Returns: errno
*/
-static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
+int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
{
struct buffer_head *dibh;
u64 dsize = i_size_read(&ip->i_inode);
@@ -512,9 +508,13 @@ static int __gfs2_readpage(void *file, struct page *page)
{
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
+
int error;
- if (gfs2_is_stuffed(ip)) {
+ if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
+ !page_has_buffers(page)) {
+ error = iomap_readpage(page, &gfs2_iomap_ops);
+ } else if (gfs2_is_stuffed(ip)) {
error = stuffed_readpage(ip, page);
unlock_page(page);
} else {
@@ -644,139 +644,10 @@ out_uninit:
}
/**
- * gfs2_write_begin - Begin to write to a file
- * @file: The file to write to
- * @mapping: The mapping in which to write
- * @pos: The file offset at which to start writing
- * @len: Length of the write
- * @flags: Various flags
- * @pagep: Pointer to return the page
- * @fsdata: Pointer to return fs data (unused by GFS2)
- *
- * Returns: errno
- */
-
-static int gfs2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- struct gfs2_inode *ip = GFS2_I(mapping->host);
- struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
- struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
- unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
- unsigned requested = 0;
- int alloc_required;
- int error = 0;
- pgoff_t index = pos >> PAGE_SHIFT;
- unsigned from = pos & (PAGE_SIZE - 1);
- struct page *page;
-
- gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
- error = gfs2_glock_nq(&ip->i_gh);
- if (unlikely(error))
- goto out_uninit;
- if (&ip->i_inode == sdp->sd_rindex) {
- error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
- GL_NOCACHE, &m_ip->i_gh);
- if (unlikely(error)) {
- gfs2_glock_dq(&ip->i_gh);
- goto out_uninit;
- }
- }
-
- alloc_required = gfs2_write_alloc_required(ip, pos, len);
-
- if (alloc_required || gfs2_is_jdata(ip))
- gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
-
- if (alloc_required) {
- struct gfs2_alloc_parms ap = { .aflags = 0, };
- requested = data_blocks + ind_blocks;
- ap.target = requested;
- error = gfs2_quota_lock_check(ip, &ap);
- if (error)
- goto out_unlock;
-
- error = gfs2_inplace_reserve(ip, &ap);
- if (error)
- goto out_qunlock;
- }
-
- rblocks = RES_DINODE + ind_blocks;
- if (gfs2_is_jdata(ip))
- rblocks += data_blocks ? data_blocks : 1;
- if (ind_blocks || data_blocks)
- rblocks += RES_STATFS + RES_QUOTA;
- if (&ip->i_inode == sdp->sd_rindex)
- rblocks += 2 * RES_STATFS;
- if (alloc_required)
- rblocks += gfs2_rg_blocks(ip, requested);
-
- error = gfs2_trans_begin(sdp, rblocks,
- PAGE_SIZE/sdp->sd_sb.sb_bsize);
- if (error)
- goto out_trans_fail;
-
- error = -ENOMEM;
- flags |= AOP_FLAG_NOFS;
- page = grab_cache_page_write_begin(mapping, index, flags);
- *pagep = page;
- if (unlikely(!page))
- goto out_endtrans;
-
- if (gfs2_is_stuffed(ip)) {
- error = 0;
- if (pos + len > gfs2_max_stuffed_size(ip)) {
- error = gfs2_unstuff_dinode(ip, page);
- if (error == 0)
- goto prepare_write;
- } else if (!PageUptodate(page)) {
- error = stuffed_readpage(ip, page);
- }
- goto out;
- }
-
-prepare_write:
- error = __block_write_begin(page, from, len, gfs2_block_map);
-out:
- if (error == 0)
- return 0;
-
- unlock_page(page);
- put_page(page);
-
- gfs2_trans_end(sdp);
- if (alloc_required) {
- gfs2_inplace_release(ip);
- if (pos + len > ip->i_inode.i_size)
- gfs2_trim_blocks(&ip->i_inode);
- }
- goto out_qunlock;
-
-out_endtrans:
- gfs2_trans_end(sdp);
-out_trans_fail:
- if (alloc_required)
- gfs2_inplace_release(ip);
-out_qunlock:
- if (alloc_required)
- gfs2_quota_unlock(ip);
-out_unlock:
- if (&ip->i_inode == sdp->sd_rindex) {
- gfs2_glock_dq(&m_ip->i_gh);
- gfs2_holder_uninit(&m_ip->i_gh);
- }
- gfs2_glock_dq(&ip->i_gh);
-out_uninit:
- gfs2_holder_uninit(&ip->i_gh);
- return error;
-}
-
-/**
* adjust_fs_space - Adjusts the free space available due to gfs2_grow
* @inode: the rindex inode
*/
-static void adjust_fs_space(struct inode *inode)
+void adjust_fs_space(struct inode *inode)
{
struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -822,11 +693,11 @@ out:
* This copies the data from the page into the inode block after
* the inode data structure itself.
*
- * Returns: errno
+ * Returns: copied bytes or errno
*/
-static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
- loff_t pos, unsigned copied,
- struct page *page)
+int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
+ loff_t pos, unsigned copied,
+ struct page *page)
{
struct gfs2_inode *ip = GFS2_I(inode);
u64 to = pos + copied;
@@ -853,84 +724,6 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
}
/**
- * gfs2_write_end
- * @file: The file to write to
- * @mapping: The address space to write to
- * @pos: The file position
- * @len: The length of the data
- * @copied: How much was actually copied by the VFS
- * @page: The page that has been written
- * @fsdata: The fsdata (unused in GFS2)
- *
- * The main write_end function for GFS2. We just put our locking around the VFS
- * provided functions.
- *
- * Returns: errno
- */
-
-static int gfs2_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = page->mapping->host;
- struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
- struct buffer_head *dibh;
- int ret;
- struct gfs2_trans *tr = current->journal_info;
- BUG_ON(!tr);
-
- BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
-
- ret = gfs2_meta_inode_buffer(ip, &dibh);
- if (unlikely(ret))
- goto out;
-
- if (gfs2_is_stuffed(ip)) {
- ret = gfs2_stuffed_write_end(inode, dibh, pos, copied, page);
- page = NULL;
- goto out2;
- }
-
- if (gfs2_is_jdata(ip))
- gfs2_page_add_databufs(ip, page, pos & ~PAGE_MASK, len);
- else
- gfs2_ordered_add_inode(ip);
-
- ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
- page = NULL;
- if (tr->tr_num_buf_new)
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- else
- gfs2_trans_add_meta(ip->i_gl, dibh);
-
-out2:
- if (inode == sdp->sd_rindex) {
- adjust_fs_space(inode);
- sdp->sd_rindex_uptodate = 0;
- }
-
- brelse(dibh);
-out:
- if (page) {
- unlock_page(page);
- put_page(page);
- }
- gfs2_trans_end(sdp);
- gfs2_inplace_release(ip);
- if (ip->i_qadata && ip->i_qadata->qa_qd_num)
- gfs2_quota_unlock(ip);
- if (inode == sdp->sd_rindex) {
- gfs2_glock_dq(&m_ip->i_gh);
- gfs2_holder_uninit(&m_ip->i_gh);
- }
- gfs2_glock_dq(&ip->i_gh);
- gfs2_holder_uninit(&ip->i_gh);
- return ret;
-}
-
-/**
* jdata_set_page_dirty - Page dirtying function
* @page: The page to dirty
*
@@ -1023,96 +816,6 @@ out:
}
/**
- * gfs2_ok_for_dio - check that dio is valid on this file
- * @ip: The inode
- * @offset: The offset at which we are reading or writing
- *
- * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
- * 1 (to accept the i/o request)
- */
-static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
-{
- /*
- * Should we return an error here? I can't see that O_DIRECT for
- * a stuffed file makes any sense. For now we'll silently fall
- * back to buffered I/O
- */
- if (gfs2_is_stuffed(ip))
- return 0;
-
- if (offset >= i_size_read(&ip->i_inode))
- return 0;
- return 1;
-}
-
-
-
-static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct address_space *mapping = inode->i_mapping;
- struct gfs2_inode *ip = GFS2_I(inode);
- loff_t offset = iocb->ki_pos;
- struct gfs2_holder gh;
- int rv;
-
- /*
- * Deferred lock, even if its a write, since we do no allocation
- * on this path. All we need change is atime, and this lock mode
- * ensures that other nodes have flushed their buffered read caches
- * (i.e. their page cache entries for this inode). We do not,
- * unfortunately have the option of only flushing a range like
- * the VFS does.
- */
- gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
- rv = gfs2_glock_nq(&gh);
- if (rv)
- goto out_uninit;
- rv = gfs2_ok_for_dio(ip, offset);
- if (rv != 1)
- goto out; /* dio not valid, fall back to buffered i/o */
-
- /*
- * Now since we are holding a deferred (CW) lock at this point, you
- * might be wondering why this is ever needed. There is a case however
- * where we've granted a deferred local lock against a cached exclusive
- * glock. That is ok provided all granted local locks are deferred, but
- * it also means that it is possible to encounter pages which are
- * cached and possibly also mapped. So here we check for that and sort
- * them out ahead of the dio. The glock state machine will take care of
- * everything else.
- *
- * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
- * the first place, mapping->nr_pages will always be zero.
- */
- if (mapping->nrpages) {
- loff_t lstart = offset & ~(PAGE_SIZE - 1);
- loff_t len = iov_iter_count(iter);
- loff_t end = PAGE_ALIGN(offset + len) - 1;
-
- rv = 0;
- if (len == 0)
- goto out;
- if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
- unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
- rv = filemap_write_and_wait_range(mapping, lstart, end);
- if (rv)
- goto out;
- if (iov_iter_rw(iter) == WRITE)
- truncate_inode_pages_range(mapping, lstart, end);
- }
-
- rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- gfs2_get_block_direct, NULL, NULL, 0);
-out:
- gfs2_glock_dq(&gh);
-out_uninit:
- gfs2_holder_uninit(&gh);
- return rv;
-}
-
-/**
* gfs2_releasepage - free the metadata associated with a page
* @page: the page that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
@@ -1187,12 +890,10 @@ static const struct address_space_operations gfs2_writeback_aops = {
.writepages = gfs2_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .write_begin = gfs2_write_begin,
- .write_end = gfs2_write_end,
.bmap = gfs2_bmap,
.invalidatepage = gfs2_invalidatepage,
.releasepage = gfs2_releasepage,
- .direct_IO = gfs2_direct_IO,
+ .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@@ -1203,13 +904,11 @@ static const struct address_space_operations gfs2_ordered_aops = {
.writepages = gfs2_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .write_begin = gfs2_write_begin,
- .write_end = gfs2_write_end,
.set_page_dirty = __set_page_dirty_buffers,
.bmap = gfs2_bmap,
.invalidatepage = gfs2_invalidatepage,
.releasepage = gfs2_releasepage,
- .direct_IO = gfs2_direct_IO,
+ .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@@ -1220,8 +919,6 @@ static const struct address_space_operations gfs2_jdata_aops = {
.writepages = gfs2_jdata_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .write_begin = gfs2_write_begin,
- .write_end = gfs2_write_end,
.set_page_dirty = jdata_set_page_dirty,
.bmap = gfs2_bmap,
.invalidatepage = gfs2_invalidatepage,
diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h
new file mode 100644
index 000000000000..fa8e5d0144dd
--- /dev/null
+++ b/fs/gfs2/aops.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Red Hat, Inc. All rights reserved.
+ */
+
+#ifndef __AOPS_DOT_H__
+#define __AOPS_DOT_H__
+
+#include "incore.h"
+
+extern int stuffed_readpage(struct gfs2_inode *ip, struct page *page);
+extern int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
+ loff_t pos, unsigned copied,
+ struct page *page);
+extern void adjust_fs_space(struct inode *inode);
+extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+ unsigned int from, unsigned int len);
+
+#endif /* __AOPS_DOT_H__ */
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index ed6699705c13..03128ed1f34e 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -28,6 +28,7 @@
#include "trans.h"
#include "dir.h"
#include "util.h"
+#include "aops.h"
#include "trace_gfs2.h"
/* This doesn't need to be that large as max 64 bit pointers in a 4k
@@ -41,6 +42,8 @@ struct metapath {
int mp_aheight; /* actual height (lookup height) */
};
+static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
+
/**
* gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
* @ip: the inode
@@ -389,7 +392,7 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
return mp->mp_aheight - x - 1;
}
-static inline void release_metapath(struct metapath *mp)
+static void release_metapath(struct metapath *mp)
{
int i;
@@ -397,27 +400,23 @@ static inline void release_metapath(struct metapath *mp)
if (mp->mp_bh[i] == NULL)
break;
brelse(mp->mp_bh[i]);
+ mp->mp_bh[i] = NULL;
}
}
/**
* gfs2_extent_length - Returns length of an extent of blocks
- * @start: Start of the buffer
- * @len: Length of the buffer in bytes
- * @ptr: Current position in the buffer
- * @limit: Max extent length to return (0 = unlimited)
+ * @bh: The metadata block
+ * @ptr: Current position in @bh
+ * @limit: Max extent length to return
* @eob: Set to 1 if we hit "end of block"
*
- * If the first block is zero (unallocated) it will return the number of
- * unallocated blocks in the extent, otherwise it will return the number
- * of contiguous blocks in the extent.
- *
* Returns: The length of the extent (minimum of one block)
*/
-static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
+static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
{
- const __be64 *end = (start + len);
+ const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
const __be64 *first = ptr;
u64 d = be64_to_cpu(*ptr);
@@ -426,14 +425,11 @@ static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __b
ptr++;
if (ptr >= end)
break;
- if (limit && --limit == 0)
- break;
- if (d)
- d++;
+ d++;
} while(be64_to_cpu(*ptr) == d);
if (ptr >= end)
*eob = 1;
- return (ptr - first);
+ return ptr - first;
}
typedef const __be64 *(*gfs2_metadata_walker)(
@@ -609,11 +605,13 @@ enum alloc_state {
* ii) Indirect blocks to fill in lower part of the metadata tree
* iii) Data blocks
*
- * The function is in two parts. The first part works out the total
- * number of blocks which we need. The second part does the actual
- * allocation asking for an extent at a time (if enough contiguous free
- * blocks are available, there will only be one request per bmap call)
- * and uses the state machine to initialise the blocks in order.
+ * This function is called after gfs2_iomap_get, which works out the
+ * total number of blocks which we need via gfs2_alloc_size.
+ *
+ * We then do the actual allocation asking for an extent at a time (if
+ * enough contiguous free blocks are available, there will only be one
+ * allocation request per call) and uses the state machine to initialise
+ * the blocks in order.
*
* Right now, this function will allocate at most one indirect block
* worth of data -- with a default block size of 4K, that's slightly
@@ -633,39 +631,26 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
struct buffer_head *dibh = mp->mp_bh[0];
u64 bn;
unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
- unsigned dblks = 0;
- unsigned ptrs_per_blk;
+ size_t dblks = iomap->length >> inode->i_blkbits;
const unsigned end_of_metadata = mp->mp_fheight - 1;
int ret;
enum alloc_state state;
__be64 *ptr;
__be64 zero_bn = 0;
- size_t maxlen = iomap->length >> inode->i_blkbits;
BUG_ON(mp->mp_aheight < 1);
BUG_ON(dibh == NULL);
+ BUG_ON(dblks < 1);
gfs2_trans_add_meta(ip->i_gl, dibh);
down_write(&ip->i_rw_mutex);
if (mp->mp_fheight == mp->mp_aheight) {
- struct buffer_head *bh;
- int eob;
-
- /* Bottom indirect block exists, find unalloced extent size */
- ptr = metapointer(end_of_metadata, mp);
- bh = mp->mp_bh[end_of_metadata];
- dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr,
- maxlen, &eob);
- BUG_ON(dblks < 1);
+ /* Bottom indirect block exists */
state = ALLOC_DATA;
} else {
/* Need to allocate indirect blocks */
- ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs :
- sdp->sd_diptrs;
- dblks = min(maxlen, (size_t)(ptrs_per_blk -
- mp->mp_list[end_of_metadata]));
if (mp->mp_fheight == ip->i_height) {
/* Writing into existing tree, extend tree down */
iblks = mp->mp_fheight - mp->mp_aheight;
@@ -750,6 +735,7 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
}
} while (iomap->addr == IOMAP_NULL_ADDR);
+ iomap->type = IOMAP_MAPPED;
iomap->length = (u64)dblks << inode->i_blkbits;
ip->i_height = mp->mp_fheight;
gfs2_add_inode_blocks(&ip->i_inode, alloced);
@@ -759,18 +745,51 @@ out:
return ret;
}
-static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap)
+#define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
+
+/**
+ * gfs2_alloc_size - Compute the maximum allocation size
+ * @inode: The inode
+ * @mp: The metapath
+ * @size: Requested size in blocks
+ *
+ * Compute the maximum size of the next allocation at @mp.
+ *
+ * Returns: size in blocks
+ */
+static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
{
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ const __be64 *first, *ptr, *end;
- iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
- sizeof(struct gfs2_dinode);
- iomap->offset = 0;
- iomap->length = i_size_read(inode);
- iomap->type = IOMAP_INLINE;
-}
+ /*
+ * For writes to stuffed files, this function is called twice via
+ * gfs2_iomap_get, before and after unstuffing. The size we return the
+ * first time needs to be large enough to get the reservation and
+ * allocation sizes right. The size we return the second time must
+ * be exact or else gfs2_iomap_alloc won't do the right thing.
+ */
-#define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
+ if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
+ unsigned int maxsize = mp->mp_fheight > 1 ?
+ sdp->sd_inptrs : sdp->sd_diptrs;
+ maxsize -= mp->mp_list[mp->mp_fheight - 1];
+ if (size > maxsize)
+ size = maxsize;
+ return size;
+ }
+
+ first = metapointer(ip->i_height - 1, mp);
+ end = metaend(ip->i_height - 1, mp);
+ if (end - first > size)
+ end = first + size;
+ for (ptr = first; ptr < end; ptr++) {
+ if (*ptr)
+ break;
+ }
+ return ptr - first;
+}
/**
* gfs2_iomap_get - Map blocks from an inode to disk blocks
@@ -789,37 +808,63 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ loff_t size = i_size_read(inode);
__be64 *ptr;
sector_t lblock;
sector_t lblock_stop;
int ret;
int eob;
u64 len;
- struct buffer_head *bh;
+ struct buffer_head *dibh = NULL, *bh;
u8 height;
if (!length)
return -EINVAL;
+ down_read(&ip->i_rw_mutex);
+
+ ret = gfs2_meta_inode_buffer(ip, &dibh);
+ if (ret)
+ goto unlock;
+ iomap->private = dibh;
+
if (gfs2_is_stuffed(ip)) {
- if (flags & IOMAP_REPORT) {
- if (pos >= i_size_read(inode))
- return -ENOENT;
- gfs2_stuffed_iomap(inode, iomap);
- return 0;
+ if (flags & IOMAP_WRITE) {
+ loff_t max_size = gfs2_max_stuffed_size(ip);
+
+ if (pos + length > max_size)
+ goto unstuff;
+ iomap->length = max_size;
+ } else {
+ if (pos >= size) {
+ if (flags & IOMAP_REPORT) {
+ ret = -ENOENT;
+ goto unlock;
+ } else {
+ /* report a hole */
+ iomap->offset = pos;
+ iomap->length = length;
+ goto do_alloc;
+ }
+ }
+ iomap->length = size;
}
- BUG_ON(!(flags & IOMAP_WRITE));
+ iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
+ sizeof(struct gfs2_dinode);
+ iomap->type = IOMAP_INLINE;
+ iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
+ goto out;
}
+
+unstuff:
lblock = pos >> inode->i_blkbits;
iomap->offset = lblock << inode->i_blkbits;
lblock_stop = (pos + length - 1) >> inode->i_blkbits;
len = lblock_stop - lblock + 1;
+ iomap->length = len << inode->i_blkbits;
- down_read(&ip->i_rw_mutex);
-
- ret = gfs2_meta_inode_buffer(ip, &mp->mp_bh[0]);
- if (ret)
- goto unlock;
+ get_bh(dibh);
+ mp->mp_bh[0] = dibh;
height = ip->i_height;
while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
@@ -840,12 +885,12 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
goto do_alloc;
bh = mp->mp_bh[ip->i_height - 1];
- len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, len, &eob);
+ len = gfs2_extent_length(bh, ptr, len, &eob);
iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
iomap->length = len << inode->i_blkbits;
iomap->type = IOMAP_MAPPED;
- iomap->flags = IOMAP_F_MERGED;
+ iomap->flags |= IOMAP_F_MERGED;
if (eob)
iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
@@ -853,25 +898,185 @@ out:
iomap->bdev = inode->i_sb->s_bdev;
unlock:
up_read(&ip->i_rw_mutex);
+ if (ret && dibh)
+ brelse(dibh);
return ret;
do_alloc:
iomap->addr = IOMAP_NULL_ADDR;
- iomap->length = len << inode->i_blkbits;
iomap->type = IOMAP_HOLE;
- iomap->flags = 0;
if (flags & IOMAP_REPORT) {
- loff_t size = i_size_read(inode);
if (pos >= size)
ret = -ENOENT;
else if (height == ip->i_height)
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
else
iomap->length = size - pos;
+ } else if (flags & IOMAP_WRITE) {
+ u64 alloc_size;
+
+ if (flags & IOMAP_DIRECT)
+ goto out; /* (see gfs2_file_direct_write) */
+
+ len = gfs2_alloc_size(inode, mp, len);
+ alloc_size = len << inode->i_blkbits;
+ if (alloc_size < iomap->length)
+ iomap->length = alloc_size;
+ } else {
+ if (pos < size && height == ip->i_height)
+ ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
}
goto out;
}
+static int gfs2_write_lock(struct inode *inode)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ int error;
+
+ gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
+ error = gfs2_glock_nq(&ip->i_gh);
+ if (error)
+ goto out_uninit;
+ if (&ip->i_inode == sdp->sd_rindex) {
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+ error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
+ GL_NOCACHE, &m_ip->i_gh);
+ if (error)
+ goto out_unlock;
+ }
+ return 0;
+
+out_unlock:
+ gfs2_glock_dq(&ip->i_gh);
+out_uninit:
+ gfs2_holder_uninit(&ip->i_gh);
+ return error;
+}
+
+static void gfs2_write_unlock(struct inode *inode)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+
+ if (&ip->i_inode == sdp->sd_rindex) {
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+ gfs2_glock_dq_uninit(&m_ip->i_gh);
+ }
+ gfs2_glock_dq_uninit(&ip->i_gh);
+}
+
+static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
+ unsigned copied, struct page *page,
+ struct iomap *iomap)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+
+ gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+}
+
+static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
+ loff_t length, unsigned flags,
+ struct iomap *iomap)
+{
+ struct metapath mp = { .mp_aheight = 1, };
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
+ bool unstuff, alloc_required;
+ int ret;
+
+ ret = gfs2_write_lock(inode);
+ if (ret)
+ return ret;
+
+ unstuff = gfs2_is_stuffed(ip) &&
+ pos + length > gfs2_max_stuffed_size(ip);
+
+ ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
+ if (ret)
+ goto out_release;
+
+ alloc_required = unstuff || iomap->type == IOMAP_HOLE;
+
+ if (alloc_required || gfs2_is_jdata(ip))
+ gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
+ &ind_blocks);
+
+ if (alloc_required) {
+ struct gfs2_alloc_parms ap = {
+ .target = data_blocks + ind_blocks
+ };
+
+ ret = gfs2_quota_lock_check(ip, &ap);
+ if (ret)
+ goto out_release;
+
+ ret = gfs2_inplace_reserve(ip, &ap);
+ if (ret)
+ goto out_qunlock;
+ }
+
+ rblocks = RES_DINODE + ind_blocks;
+ if (gfs2_is_jdata(ip))
+ rblocks += data_blocks;
+ if (ind_blocks || data_blocks)
+ rblocks += RES_STATFS + RES_QUOTA;
+ if (inode == sdp->sd_rindex)
+ rblocks += 2 * RES_STATFS;
+ if (alloc_required)
+ rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
+
+ ret = gfs2_trans_begin(sdp, rblocks, iomap->length >> inode->i_blkbits);
+ if (ret)
+ goto out_trans_fail;
+
+ if (unstuff) {
+ ret = gfs2_unstuff_dinode(ip, NULL);
+ if (ret)
+ goto out_trans_end;
+ release_metapath(&mp);
+ brelse(iomap->private);
+ iomap->private = NULL;
+ ret = gfs2_iomap_get(inode, iomap->offset, iomap->length,
+ flags, iomap, &mp);
+ if (ret)
+ goto out_trans_end;
+ }
+
+ if (iomap->type == IOMAP_HOLE) {
+ ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
+ if (ret) {
+ gfs2_trans_end(sdp);
+ gfs2_inplace_release(ip);
+ punch_hole(ip, iomap->offset, iomap->length);
+ goto out_qunlock;
+ }
+ }
+ release_metapath(&mp);
+ if (gfs2_is_jdata(ip))
+ iomap->page_done = gfs2_iomap_journaled_page_done;
+ return 0;
+
+out_trans_end:
+ gfs2_trans_end(sdp);
+out_trans_fail:
+ if (alloc_required)
+ gfs2_inplace_release(ip);
+out_qunlock:
+ if (alloc_required)
+ gfs2_quota_unlock(ip);
+out_release:
+ if (iomap->private)
+ brelse(iomap->private);
+ release_metapath(&mp);
+ gfs2_write_unlock(inode);
+ return ret;
+}
+
static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, struct iomap *iomap)
{
@@ -879,22 +1084,79 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
struct metapath mp = { .mp_aheight = 1, };
int ret;
+ iomap->flags |= IOMAP_F_BUFFER_HEAD;
+
trace_gfs2_iomap_start(ip, pos, length, flags);
- if (flags & IOMAP_WRITE) {
- ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
- if (!ret && iomap->type == IOMAP_HOLE)
- ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
- release_metapath(&mp);
+ if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
+ ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap);
} else {
ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
release_metapath(&mp);
+ /*
+ * Silently fall back to buffered I/O for stuffed files or if
+ * we've hot a hole (see gfs2_file_direct_write).
+ */
+ if ((flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT) &&
+ iomap->type != IOMAP_MAPPED)
+ ret = -ENOTBLK;
}
trace_gfs2_iomap_end(ip, iomap, ret);
return ret;
}
+static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ ssize_t written, unsigned flags, struct iomap *iomap)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_trans *tr = current->journal_info;
+ struct buffer_head *dibh = iomap->private;
+
+ if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE)
+ goto out;
+
+ if (iomap->type != IOMAP_INLINE) {
+ gfs2_ordered_add_inode(ip);
+
+ if (tr->tr_num_buf_new)
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ else
+ gfs2_trans_add_meta(ip->i_gl, dibh);
+ }
+
+ if (inode == sdp->sd_rindex) {
+ adjust_fs_space(inode);
+ sdp->sd_rindex_uptodate = 0;
+ }
+
+ gfs2_trans_end(sdp);
+ gfs2_inplace_release(ip);
+
+ if (length != written && (iomap->flags & IOMAP_F_NEW)) {
+ /* Deallocate blocks that were just allocated. */
+ loff_t blockmask = i_blocksize(inode) - 1;
+ loff_t end = (pos + length) & ~blockmask;
+
+ pos = (pos + written + blockmask) & ~blockmask;
+ if (pos < end) {
+ truncate_pagecache_range(inode, pos, end - 1);
+ punch_hole(ip, pos, end - pos);
+ }
+ }
+
+ if (ip->i_qadata && ip->i_qadata->qa_qd_num)
+ gfs2_quota_unlock(ip);
+ gfs2_write_unlock(inode);
+
+out:
+ if (dibh)
+ brelse(dibh);
+ return 0;
+}
+
const struct iomap_ops gfs2_iomap_ops = {
.iomap_begin = gfs2_iomap_begin,
+ .iomap_end = gfs2_iomap_end,
};
/**
@@ -941,12 +1203,6 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
} else {
ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
release_metapath(&mp);
-
- /* Return unmapped buffer beyond the end of file. */
- if (ret == -ENOENT) {
- ret = 0;
- goto out;
- }
}
if (ret)
goto out;
@@ -2060,7 +2316,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
lblock = offset >> shift;
lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
- if (lblock_stop > end_of_file)
+ if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
return 1;
size = (lblock_stop - lblock) << shift;
@@ -2154,11 +2410,11 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
if (error)
goto out;
} else {
- unsigned int start_off, end_off, blocksize;
+ unsigned int start_off, end_len, blocksize;
blocksize = i_blocksize(inode);
start_off = offset & (blocksize - 1);
- end_off = (offset + length) & (blocksize - 1);
+ end_len = (offset + length) & (blocksize - 1);
if (start_off) {
unsigned int len = length;
if (length > blocksize - start_off)
@@ -2167,11 +2423,11 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
if (error)
goto out;
if (start_off + length < blocksize)
- end_off = 0;
+ end_len = 0;
}
- if (end_off) {
+ if (end_len) {
error = gfs2_block_zero_range(inode,
- offset + length - end_off, end_off);
+ offset + length - end_len, end_len);
if (error)
goto out;
}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index d97ad89955d1..e37002560c11 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1011,7 +1011,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
u64 bn, leaf_no;
__be64 *lp;
u32 index;
- int x, moved = 0;
+ int x;
int error;
index = name->hash >> (32 - dip->i_depth);
@@ -1113,8 +1113,6 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
if (!prev)
prev = dent;
-
- moved = 1;
} else {
prev = dent;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 7137db7b0119..08369c6cd127 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -26,10 +26,12 @@
#include <linux/dlm.h>
#include <linux/dlm_plock.h>
#include <linux/delay.h>
+#include <linux/backing-dev.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
+#include "aops.h"
#include "dir.h"
#include "glock.h"
#include "glops.h"
@@ -387,7 +389,7 @@ static int gfs2_allocate_page_backing(struct page *page)
* blocks allocated on disk to back that page.
*/
-static int gfs2_page_mkwrite(struct vm_fault *vmf)
+static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
@@ -688,12 +690,83 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
return ret ? ret : ret1;
}
+static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
+ size_t count = iov_iter_count(to);
+ struct gfs2_holder gh;
+ ssize_t ret;
+
+ if (!count)
+ return 0; /* skip atime */
+
+ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
+ ret = gfs2_glock_nq(&gh);
+ if (ret)
+ goto out_uninit;
+
+ ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);
+
+ gfs2_glock_dq(&gh);
+out_uninit:
+ gfs2_holder_uninit(&gh);
+ return ret;
+}
+
+static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ size_t len = iov_iter_count(from);
+ loff_t offset = iocb->ki_pos;
+ struct gfs2_holder gh;
+ ssize_t ret;
+
+ /*
+ * Deferred lock, even if its a write, since we do no allocation on
+ * this path. All we need to change is the atime, and this lock mode
+ * ensures that other nodes have flushed their buffered read caches
+ * (i.e. their page cache entries for this inode). We do not,
+ * unfortunately, have the option of only flushing a range like the
+ * VFS does.
+ */
+ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
+ ret = gfs2_glock_nq(&gh);
+ if (ret)
+ goto out_uninit;
+
+ /* Silently fall back to buffered I/O when writing beyond EOF */
+ if (offset + len > i_size_read(&ip->i_inode))
+ goto out;
+
+ ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL);
+
+out:
+ gfs2_glock_dq(&gh);
+out_uninit:
+ gfs2_holder_uninit(&gh);
+ return ret;
+}
+
+static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ ssize_t ret;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ret = gfs2_file_direct_read(iocb, to);
+ if (likely(ret != -ENOTBLK))
+ return ret;
+ iocb->ki_flags &= ~IOCB_DIRECT;
+ }
+ return generic_file_read_iter(iocb, to);
+}
+
/**
* gfs2_file_write_iter - Perform a write to a file
* @iocb: The io context
- * @iov: The data to write
- * @nr_segs: Number of @iov segments
- * @pos: The file position
+ * @from: The data to write
*
* We have to do a lock/unlock here to refresh the inode size for
* O_APPEND writes, otherwise we can land up writing at the wrong
@@ -705,8 +778,9 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- struct gfs2_inode *ip = GFS2_I(file_inode(file));
- int ret;
+ struct inode *inode = file_inode(file);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ ssize_t written = 0, ret;
ret = gfs2_rsqa_alloc(ip);
if (ret)
@@ -723,7 +797,71 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
gfs2_glock_dq_uninit(&gh);
}
- return generic_file_write_iter(iocb, from);
+ inode_lock(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out;
+
+ /* We can write back this queue in page reclaim */
+ current->backing_dev_info = inode_to_bdi(inode);
+
+ ret = file_remove_privs(file);
+ if (ret)
+ goto out2;
+
+ ret = file_update_time(file);
+ if (ret)
+ goto out2;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ struct address_space *mapping = file->f_mapping;
+ loff_t pos, endbyte;
+ ssize_t buffered;
+
+ written = gfs2_file_direct_write(iocb, from);
+ if (written < 0 || !iov_iter_count(from))
+ goto out2;
+
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ if (unlikely(ret < 0))
+ goto out2;
+ buffered = ret;
+
+ /*
+ * We need to ensure that the page cache pages are written to
+ * disk and invalidated to preserve the expected O_DIRECT
+ * semantics.
+ */
+ pos = iocb->ki_pos;
+ endbyte = pos + buffered - 1;
+ ret = filemap_write_and_wait_range(mapping, pos, endbyte);
+ if (!ret) {
+ iocb->ki_pos += buffered;
+ written += buffered;
+ invalidate_mapping_pages(mapping,
+ pos >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
+ } else {
+ /*
+ * We don't know how much we wrote, so just return
+ * the number of bytes which were direct-written
+ */
+ }
+ } else {
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ if (likely(ret > 0))
+ iocb->ki_pos += ret;
+ }
+
+out2:
+ current->backing_dev_info = NULL;
+out:
+ inode_unlock(inode);
+ if (likely(ret > 0)) {
+ /* Handle various SYNC-type writes */
+ ret = generic_write_sync(iocb, ret);
+ }
+ return written ? written : ret;
}
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
@@ -733,7 +871,6 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
struct gfs2_inode *ip = GFS2_I(inode);
loff_t end = offset + len;
struct buffer_head *dibh;
- struct iomap iomap = { };
int error;
error = gfs2_meta_inode_buffer(ip, &dibh);
@@ -749,12 +886,14 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
}
while (offset < end) {
+ struct iomap iomap = { };
+
error = gfs2_iomap_get_alloc(inode, offset, end - offset,
&iomap);
if (error)
goto out;
offset = iomap.offset + iomap.length;
- if (iomap.type != IOMAP_HOLE)
+ if (!(iomap.flags & IOMAP_F_NEW))
continue;
error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
iomap.length >> inode->i_blkbits,
@@ -1125,7 +1264,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
const struct file_operations gfs2_file_fops = {
.llseek = gfs2_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = gfs2_file_read_iter,
.write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
@@ -1155,7 +1294,7 @@ const struct file_operations gfs2_dir_fops = {
const struct file_operations gfs2_file_fops_nolock = {
.llseek = gfs2_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = gfs2_file_read_iter,
.write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index d2ad817e089f..b96d39c28e17 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -65,6 +65,27 @@ struct gfs2_log_operations {
#define GBF_FULL 1
+/**
+ * Clone bitmaps (bi_clone):
+ *
+ * - When a block is freed, we remember the previous state of the block in the
+ * clone bitmap, and only mark the block as free in the real bitmap.
+ *
+ * - When looking for a block to allocate, we check for a free block in the
+ * clone bitmap, and if no clone bitmap exists, in the real bitmap.
+ *
+ * - For allocating a block, we mark it as allocated in the real bitmap, and if
+ * a clone bitmap exists, also in the clone bitmap.
+ *
+ * - At the end of a log_flush, we copy the real bitmap into the clone bitmap
+ * to make the clone bitmap reflect the current allocation state.
+ * (Alternatively, we could remove the clone bitmap.)
+ *
+ * The clone bitmaps are in-core only, and is never written to disk.
+ *
+ * These steps ensure that blocks which have been freed in a transaction cannot
+ * be reallocated in that same transaction.
+ */
struct gfs2_bitmap {
struct buffer_head *bi_bh;
char *bi_clone;
@@ -295,7 +316,6 @@ struct gfs2_blkreserv {
struct rb_node rs_node; /* link to other block reservations */
struct gfs2_rbm rs_rbm; /* Start of reservation */
u32 rs_free; /* how many blocks are still free */
- u64 rs_inum; /* Inode number for reservation */
};
/*
@@ -398,7 +418,6 @@ struct gfs2_inode {
struct gfs2_holder i_gh; /* for prepare/commit_write only */
struct gfs2_qadata *i_qadata; /* quota allocation data */
struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
- struct gfs2_rgrpd *i_rgd;
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;
struct list_head i_ordered;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index feda55f67050..648f0ca1ad57 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -580,7 +580,7 @@ static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
struct file *file,
umode_t mode, dev_t dev, const char *symname,
- unsigned int size, int excl, int *opened)
+ unsigned int size, int excl)
{
const struct qstr *name = &dentry->d_name;
struct posix_acl *default_acl, *acl;
@@ -626,7 +626,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = 0;
if (file) {
if (S_ISREG(inode->i_mode))
- error = finish_open(file, dentry, gfs2_open_common, opened);
+ error = finish_open(file, dentry, gfs2_open_common);
else
error = finish_no_open(file, NULL);
}
@@ -767,8 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode);
d_instantiate(dentry, inode);
if (file) {
- *opened |= FILE_CREATED;
- error = finish_open(file, dentry, gfs2_open_common, opened);
+ file->f_mode |= FMODE_CREATED;
+ error = finish_open(file, dentry, gfs2_open_common);
}
gfs2_glock_dq_uninit(ghs);
gfs2_glock_dq_uninit(ghs + 1);
@@ -822,7 +822,7 @@ fail:
static int gfs2_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
- return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl);
}
/**
@@ -830,14 +830,13 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
* @dir: The directory inode
* @dentry: The dentry of the new inode
* @file: File to be opened
- * @opened: atomic_open flags
*
*
* Returns: errno
*/
static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
- struct file *file, int *opened)
+ struct file *file)
{
struct inode *inode;
struct dentry *d;
@@ -866,7 +865,7 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
return d;
}
if (file && S_ISREG(inode->i_mode))
- error = finish_open(file, dentry, gfs2_open_common, opened);
+ error = finish_open(file, dentry, gfs2_open_common);
gfs2_glock_dq_uninit(&gh);
if (error) {
@@ -879,7 +878,7 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
unsigned flags)
{
- return __gfs2_lookup(dir, dentry, NULL, NULL);
+ return __gfs2_lookup(dir, dentry, NULL);
}
/**
@@ -1189,7 +1188,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
if (size >= gfs2_max_stuffed_size(GFS2_I(dir)))
return -ENAMETOOLONG;
- return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0);
}
/**
@@ -1204,7 +1203,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
- return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
}
/**
@@ -1219,7 +1218,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t dev)
{
- return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0);
}
/**
@@ -1229,14 +1228,13 @@ static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
* @file: The proposed new struct file
* @flags: open flags
* @mode: File mode
- * @opened: Flag to say whether the file has been opened or not
*
* Returns: error code or 0 for success
*/
static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
struct dentry *d;
bool excl = !!(flags & O_EXCL);
@@ -1244,13 +1242,13 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
if (!d_in_lookup(dentry))
goto skip_lookup;
- d = __gfs2_lookup(dir, dentry, file, opened);
+ d = __gfs2_lookup(dir, dentry, file);
if (IS_ERR(d))
return PTR_ERR(d);
if (d != NULL)
dentry = d;
if (d_really_is_positive(dentry)) {
- if (!(*opened & FILE_OPENED))
+ if (!(file->f_mode & FMODE_OPENED))
return finish_no_open(file, d);
dput(d);
return 0;
@@ -1262,7 +1260,7 @@ skip_lookup:
if (!(flags & O_CREAT))
return -ENOENT;
- return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl, opened);
+ return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl);
}
/*
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 006c6164f759..ac7caa267ed6 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -821,6 +821,13 @@ restart:
goto fail;
}
+ /**
+ * If we're a spectator, we don't want to take the lock in EX because
+ * we cannot do the first-mount responsibility it implies: recovery.
+ */
+ if (sdp->sd_args.ar_spectator)
+ goto locks_done;
+
error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
if (!error) {
mounted_mode = DLM_LOCK_EX;
@@ -896,9 +903,16 @@ locks_done:
if (lvb_gen < mount_gen) {
/* wait for mounted nodes to update control_lock lvb to our
generation, which might include new recovery bits set */
- fs_info(sdp, "control_mount wait1 block %u start %u mount %u "
- "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
- lvb_gen, ls->ls_recover_flags);
+ if (sdp->sd_args.ar_spectator) {
+ fs_info(sdp, "Recovery is required. Waiting for a "
+ "non-spectator to mount.\n");
+ msleep_interruptible(1000);
+ } else {
+ fs_info(sdp, "control_mount wait1 block %u start %u "
+ "mount %u lvb %u flags %lx\n", block_gen,
+ start_gen, mount_gen, lvb_gen,
+ ls->ls_recover_flags);
+ }
spin_unlock(&ls->ls_recover_spin);
goto restart;
}
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 0248835625f1..ee20ea42e7b5 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -92,7 +92,8 @@ static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
struct writeback_control *wbc,
- struct gfs2_trans *tr)
+ struct gfs2_trans *tr,
+ bool *withdraw)
__releases(&sdp->sd_ail_lock)
__acquires(&sdp->sd_ail_lock)
{
@@ -107,8 +108,10 @@ __acquires(&sdp->sd_ail_lock)
gfs2_assert(sdp, bd->bd_tr == tr);
if (!buffer_busy(bh)) {
- if (!buffer_uptodate(bh))
+ if (!buffer_uptodate(bh)) {
gfs2_io_error_bh(sdp, bh);
+ *withdraw = true;
+ }
list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
continue;
}
@@ -148,6 +151,7 @@ void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
struct list_head *head = &sdp->sd_ail1_list;
struct gfs2_trans *tr;
struct blk_plug plug;
+ bool withdraw = false;
trace_gfs2_ail_flush(sdp, wbc, 1);
blk_start_plug(&plug);
@@ -156,11 +160,13 @@ restart:
list_for_each_entry_reverse(tr, head, tr_list) {
if (wbc->nr_to_write <= 0)
break;
- if (gfs2_ail1_start_one(sdp, wbc, tr))
+ if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
goto restart;
}
spin_unlock(&sdp->sd_ail_lock);
blk_finish_plug(&plug);
+ if (withdraw)
+ gfs2_lm_withdraw(sdp, NULL);
trace_gfs2_ail_flush(sdp, wbc, 0);
}
@@ -188,7 +194,8 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp)
*
*/
-static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ bool *withdraw)
{
struct gfs2_bufdata *bd, *s;
struct buffer_head *bh;
@@ -199,11 +206,12 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_assert(sdp, bd->bd_tr == tr);
if (buffer_busy(bh))
continue;
- if (!buffer_uptodate(bh))
+ if (!buffer_uptodate(bh)) {
gfs2_io_error_bh(sdp, bh);
+ *withdraw = true;
+ }
list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
}
-
}
/**
@@ -218,10 +226,11 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
struct gfs2_trans *tr, *s;
int oldest_tr = 1;
int ret;
+ bool withdraw = false;
spin_lock(&sdp->sd_ail_lock);
list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
- gfs2_ail1_empty_one(sdp, tr);
+ gfs2_ail1_empty_one(sdp, tr, &withdraw);
if (list_empty(&tr->tr_ail1_list) && oldest_tr)
list_move(&tr->tr_list, &sdp->sd_ail2_list);
else
@@ -230,6 +239,9 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
ret = list_empty(&sdp->sd_ail1_list);
spin_unlock(&sdp->sd_ail_lock);
+ if (withdraw)
+ gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
+
return ret;
}
@@ -689,7 +701,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
hash = ~crc32(~0, lh, LH_V1_SIZE);
lh->lh_hash = cpu_to_be32(hash);
- tv = current_kernel_time64();
+ ktime_get_coarse_real_ts64(&tv);
lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
lh->lh_sec = cpu_to_be64(tv.tv_sec);
addr = gfs2_log_bmap(sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 4d6567990baf..f2567f958d00 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -49,7 +49,7 @@ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (test_set_buffer_pinned(bh))
gfs2_assert_withdraw(sdp, 0);
if (!buffer_uptodate(bh))
- gfs2_io_error_bh(sdp, bh);
+ gfs2_io_error_bh_wd(sdp, bh);
bd = bh->b_private;
/* If this buffer is in the AIL and it has already been written
* to in-place disk block, remove it from the AIL.
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 52de1036d9f9..be9c0bf697fe 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -293,7 +293,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
if (unlikely(!buffer_uptodate(bh))) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
- gfs2_io_error_bh(sdp, bh);
+ gfs2_io_error_bh_wd(sdp, bh);
brelse(bh);
*bhp = NULL;
return -EIO;
@@ -320,7 +320,7 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (!buffer_uptodate(bh)) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
- gfs2_io_error_bh(sdp, bh);
+ gfs2_io_error_bh_wd(sdp, bh);
return -EIO;
}
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index d8b622c375ab..0f501f938d1c 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -413,12 +413,13 @@ void gfs2_recover_func(struct work_struct *work)
ktime_t t_start, t_jlck, t_jhd, t_tlck, t_rep;
int ro = 0;
unsigned int pass;
- int error;
+ int error = 0;
int jlocked = 0;
t_start = ktime_get();
- if (sdp->sd_args.ar_spectator ||
- (jd->jd_jid != sdp->sd_lockstruct.ls_jid)) {
+ if (sdp->sd_args.ar_spectator)
+ goto fail;
+ if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
jd->jd_jid);
jlocked = 1;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 33abcf29bc05..1ad3256b9cbc 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -123,17 +123,26 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
/**
* gfs2_testbit - test a bit in the bitmaps
* @rbm: The bit to test
+ * @use_clone: If true, test the clone bitmap, not the official bitmap.
+ *
+ * Some callers like gfs2_unaligned_extlen need to test the clone bitmaps,
+ * not the "real" bitmaps, to avoid allocating recently freed blocks.
*
* Returns: The two bit block state of the requested bit
*/
-static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
+static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone)
{
struct gfs2_bitmap *bi = rbm_bi(rbm);
- const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
+ const u8 *buffer;
const u8 *byte;
unsigned int bit;
+ if (use_clone && bi->bi_clone)
+ buffer = bi->bi_clone;
+ else
+ buffer = bi->bi_bh->b_data;
+ buffer += bi->bi_offset;
byte = buffer + (rbm->offset / GFS2_NBBY);
bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
@@ -322,7 +331,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
u8 res;
for (n = 0; n < n_unaligned; n++) {
- res = gfs2_testbit(rbm);
+ res = gfs2_testbit(rbm, true);
if (res != GFS2_BLKST_FREE)
return true;
(*len)--;
@@ -607,8 +616,10 @@ int gfs2_rsqa_alloc(struct gfs2_inode *ip)
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
{
+ struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res);
+
gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
- (unsigned long long)rs->rs_inum,
+ (unsigned long long)ip->i_no_addr,
(unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
rs->rs_rbm.offset, rs->rs_free);
}
@@ -1051,6 +1062,18 @@ static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
/* rd_data0, rd_data and rd_bitbytes already set from rindex */
}
+static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
+{
+ const struct gfs2_rgrp *str = buf;
+
+ rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
+ rgl->rl_flags = str->rg_flags;
+ rgl->rl_free = str->rg_free;
+ rgl->rl_dinodes = str->rg_dinodes;
+ rgl->rl_igeneration = str->rg_igeneration;
+ rgl->__pad = 0UL;
+}
+
static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
{
struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
@@ -1073,6 +1096,7 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
str->rg_crc = cpu_to_be32(crc);
memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
+ gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf);
}
static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
@@ -1087,25 +1111,6 @@ static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
return 1;
}
-static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
-{
- const struct gfs2_rgrp *str = buf;
-
- rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
- rgl->rl_flags = str->rg_flags;
- rgl->rl_free = str->rg_free;
- rgl->rl_dinodes = str->rg_dinodes;
- rgl->rl_igeneration = str->rg_igeneration;
- rgl->__pad = 0UL;
-}
-
-static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
-{
- struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
- u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
- rgl->rl_unlinked = cpu_to_be32(unlinked);
-}
-
static u32 count_unlinked(struct gfs2_rgrpd *rgd)
{
struct gfs2_bitmap *bi;
@@ -1424,7 +1429,6 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
rgd->rd_flags |= GFS2_RGF_TRIMMED;
gfs2_trans_add_meta(rgd->rd_gl, bh);
gfs2_rgrp_out(rgd, bh->b_data);
- gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
gfs2_trans_end(sdp);
}
}
@@ -1488,6 +1492,34 @@ static void rs_insert(struct gfs2_inode *ip)
}
/**
+ * rgd_free - return the number of free blocks we can allocate.
+ * @rgd: the resource group
+ *
+ * This function returns the number of free blocks for an rgrp.
+ * That's the clone-free blocks (blocks that are free, not including those
+ * still being used for unlinked files that haven't been deleted.)
+ *
+ * It also subtracts any blocks reserved by someone else, but does not
+ * include free blocks that are still part of our current reservation,
+ * because obviously we can (and will) allocate them.
+ */
+static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
+{
+ u32 tot_reserved, tot_free;
+
+ if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
+ return 0;
+ tot_reserved = rgd->rd_reserved - rs->rs_free;
+
+ if (rgd->rd_free_clone < tot_reserved)
+ tot_reserved = 0;
+
+ tot_free = rgd->rd_free_clone - tot_reserved;
+
+ return tot_free;
+}
+
+/**
* rg_mblk_search - find a group of multiple free blocks to form a reservation
* @rgd: the resource group descriptor
* @ip: pointer to the inode for which we're reserving blocks
@@ -1502,7 +1534,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
u64 goal;
struct gfs2_blkreserv *rs = &ip->i_res;
u32 extlen;
- u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
+ u32 free_blocks = rgd_free(rgd, rs);
int ret;
struct inode *inode = &ip->i_inode;
@@ -1528,7 +1560,6 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
if (ret == 0) {
rs->rs_rbm = rbm;
rs->rs_free = extlen;
- rs->rs_inum = ip->i_no_addr;
rs_insert(ip);
} else {
if (goal == rgd->rd_last_alloc + rgd->rd_data0)
@@ -1686,7 +1717,8 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
while(1) {
bi = rbm_bi(rbm);
- if (test_bit(GBF_FULL, &bi->bi_flags) &&
+ if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
+ test_bit(GBF_FULL, &bi->bi_flags) &&
(state == GFS2_BLKST_FREE))
goto next_bitmap;
@@ -1983,7 +2015,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
int error = 0, rg_locked, flags = 0;
u64 last_unlinked = NO_BLOCK;
int loops = 0;
- u32 skip = 0;
+ u32 free_blocks, skip = 0;
if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP;
@@ -1991,8 +2023,9 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
return -EINVAL;
if (gfs2_rs_active(rs)) {
begin = rs->rs_rbm.rgd;
- } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
- rs->rs_rbm.rgd = begin = ip->i_rgd;
+ } else if (rs->rs_rbm.rgd &&
+ rgrp_contains_block(rs->rs_rbm.rgd, ip->i_goal)) {
+ begin = rs->rs_rbm.rgd;
} else {
check_and_update_goal(ip);
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
@@ -2053,11 +2086,11 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
goto check_rgrp;
/* If rgrp has enough free space, use it */
- if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
+ free_blocks = rgd_free(rs->rs_rbm.rgd, rs);
+ if (free_blocks >= ap->target ||
(loops == 2 && ap->min_target &&
- rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
- ip->i_rgd = rs->rs_rbm.rgd;
- ap->allowed = ip->i_rgd->rd_free_clone;
+ free_blocks >= ap->min_target)) {
+ ap->allowed = free_blocks;
return 0;
}
check_rgrp:
@@ -2116,26 +2149,6 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
}
/**
- * gfs2_get_block_type - Check a block in a RG is of given type
- * @rgd: the resource group holding the block
- * @block: the block number
- *
- * Returns: The block type (GFS2_BLKST_*)
- */
-
-static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
-{
- struct gfs2_rbm rbm = { .rgd = rgd, };
- int ret;
-
- ret = gfs2_rbm_from_block(&rbm, block);
- WARN_ON_ONCE(ret != 0);
-
- return gfs2_testbit(&rbm);
-}
-
-
-/**
* gfs2_alloc_extent - allocate an extent from a given bitmap
* @rbm: the resource group information
* @dinode: TRUE if the first block we allocate is for a dinode
@@ -2159,7 +2172,7 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
block++;
while (*n < elen) {
ret = gfs2_rbm_from_block(&pos, block);
- if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
+ if (ret || gfs2_testbit(&pos, true) != GFS2_BLKST_FREE)
break;
gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
gfs2_setbit(&pos, true, GFS2_BLKST_USED);
@@ -2335,7 +2348,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
- struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
+ struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, };
unsigned int ndata;
u64 block; /* block, within the file system scope */
int error;
@@ -2393,7 +2406,6 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
- gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
if (dinode)
@@ -2434,7 +2446,6 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
/* Directories keep their data in the metadata address space */
if (meta || ip->i_depth)
@@ -2471,8 +2482,7 @@ void gfs2_unlink_di(struct inode *inode)
trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
- update_rgrp_lvb_unlinked(rgd, 1);
+ be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
}
void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
@@ -2492,8 +2502,7 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
- update_rgrp_lvb_unlinked(rgd, -1);
+ be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
gfs2_statfs_change(sdp, 0, +1, -1);
trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
@@ -2516,6 +2525,7 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
{
struct gfs2_rgrpd *rgd;
struct gfs2_holder rgd_gh;
+ struct gfs2_rbm rbm;
int error = -EINVAL;
rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
@@ -2526,7 +2536,11 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
if (error)
goto fail;
- if (gfs2_get_block_type(rgd, no_addr) != type)
+ rbm.rgd = rgd;
+ error = gfs2_rbm_from_block(&rbm, no_addr);
+ WARN_ON_ONCE(error != 0);
+
+ if (gfs2_testbit(&rbm, false) != type)
error = -ESTALE;
gfs2_glock_dq_uninit(&rgd_gh);
@@ -2558,19 +2572,34 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
return;
- if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
- rgd = ip->i_rgd;
- else
+ /*
+ * The resource group last accessed is kept in the last position.
+ */
+
+ if (rlist->rl_rgrps) {
+ rgd = rlist->rl_rgd[rlist->rl_rgrps - 1];
+ if (rgrp_contains_block(rgd, block))
+ return;
rgd = gfs2_blk2rgrpd(sdp, block, 1);
+ } else {
+ rgd = ip->i_res.rs_rbm.rgd;
+ if (!rgd || !rgrp_contains_block(rgd, block))
+ rgd = gfs2_blk2rgrpd(sdp, block, 1);
+ }
+
if (!rgd) {
- fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
+ fs_err(sdp, "rlist_add: no rgrp for block %llu\n",
+ (unsigned long long)block);
return;
}
- ip->i_rgd = rgd;
- for (x = 0; x < rlist->rl_rgrps; x++)
- if (rlist->rl_rgd[x] == rgd)
+ for (x = 0; x < rlist->rl_rgrps; x++) {
+ if (rlist->rl_rgd[x] == rgd) {
+ swap(rlist->rl_rgd[x],
+ rlist->rl_rgd[rlist->rl_rgrps - 1]);
return;
+ }
+ }
if (rlist->rl_rgrps == rlist->rl_space) {
new_space = rlist->rl_space + 10;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index af0d5b01cf0b..c212893534ed 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1729,7 +1729,6 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb)
if (ip) {
ip->i_flags = 0;
ip->i_gl = NULL;
- ip->i_rgd = NULL;
memset(&ip->i_res, 0, sizeof(ip->i_res));
RB_CLEAR_NODE(&ip->i_res.rs_node);
ip->i_rahead = 0;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c191fa58a1df..1787d295834e 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -429,11 +429,18 @@ int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
spin_lock(&sdp->sd_jindex_spin);
rv = -EBUSY;
- if (sdp->sd_jdesc->jd_jid == jid)
+ /**
+ * If we're a spectator, we use journal0, but it's not really ours.
+ * So we need to wait for its recovery too. If we skip it we'd never
+ * queue work to the recovery workqueue, and so its completion would
+ * never clear the DFL_BLOCK_LOCKS flag, so all our locks would
+ * permanently stop working.
+ */
+ if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator)
goto out;
rv = -ENOENT;
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
- if (jd->jd_jid != jid)
+ if (jd->jd_jid != jid && !sdp->sd_args.ar_spectator)
continue;
rv = gfs2_recover_journal(jd, false);
break;
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index cb10b95efe0f..e0025258107a 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -606,7 +606,8 @@ TRACE_EVENT(gfs2_rs,
__entry->rd_addr = rs->rs_rbm.rgd->rd_addr;
__entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone;
__entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved;
- __entry->inum = rs->rs_inum;
+ __entry->inum = container_of(rs, struct gfs2_inode,
+ i_res)->i_no_addr;
__entry->start = gfs2_rbm_to_block(&rs->rs_rbm);
__entry->free = rs->rs_free;
__entry->func = func;
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index 1e6e7da25a17..ad70087d0597 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -30,9 +30,11 @@ struct gfs2_glock;
* block, or all of the blocks in the rg, whichever is smaller */
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested)
{
- if (requested < ip->i_rgd->rd_length)
+ struct gfs2_rgrpd *rgd = ip->i_res.rs_rbm.rgd;
+
+ if (requested < rgd->rd_length)
return requested + 1;
- return ip->i_rgd->rd_length;
+ return rgd->rd_length;
}
extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 763d659db91b..59c811de0dc7 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -46,14 +46,16 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sdp, const char *fmt, ...)
test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
return 0;
- va_start(args, fmt);
+ if (fmt) {
+ va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
+ vaf.fmt = fmt;
+ vaf.va = &args;
- fs_err(sdp, "%pV", &vaf);
+ fs_err(sdp, "%pV", &vaf);
- va_end(args);
+ va_end(args);
+ }
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
fs_err(sdp, "about to withdraw this file system\n");
@@ -246,21 +248,21 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
}
/**
- * gfs2_io_error_bh_i - Flag a buffer I/O error and withdraw
- * Returns: -1 if this call withdrew the machine,
- * 0 if it was already withdrawn
+ * gfs2_io_error_bh_i - Flag a buffer I/O error
+ * @withdraw: withdraw the filesystem
*/
-int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function, char *file, unsigned int line)
+void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ const char *function, char *file, unsigned int line,
+ bool withdraw)
{
- int rv;
- rv = gfs2_lm_withdraw(sdp,
- "fatal: I/O error\n"
- " block = %llu\n"
- " function = %s, file = %s, line = %u\n",
- (unsigned long long)bh->b_blocknr,
- function, file, line);
- return rv;
+ fs_err(sdp,
+ "fatal: I/O error\n"
+ " block = %llu\n"
+ " function = %s, file = %s, line = %u\n",
+ (unsigned long long)bh->b_blocknr,
+ function, file, line);
+ if (withdraw)
+ gfs2_lm_withdraw(sdp, NULL);
}
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 3926f95a6eb7..96ac4aba4738 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -136,11 +136,15 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__);
-int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function, char *file, unsigned int line);
+void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ const char *function, char *file, unsigned int line,
+ bool withdraw);
+
+#define gfs2_io_error_bh_wd(sdp, bh) \
+gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, true);
#define gfs2_io_error_bh(sdp, bh) \
-gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__);
+gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, false);
extern struct kmem_cache *gfs2_glock_cachep;
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index f2bce1e0f6fb..38515988aaf7 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -343,60 +343,45 @@ struct ea_list {
unsigned int ei_size;
};
-static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
-{
- switch (ea->ea_type) {
- case GFS2_EATYPE_USR:
- return 5 + ea->ea_name_len + 1;
- case GFS2_EATYPE_SYS:
- return 7 + ea->ea_name_len + 1;
- case GFS2_EATYPE_SECURITY:
- return 9 + ea->ea_name_len + 1;
- default:
- return 0;
- }
-}
-
static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
void *private)
{
struct ea_list *ei = private;
struct gfs2_ea_request *er = ei->ei_er;
- unsigned int ea_size = gfs2_ea_strlen(ea);
+ unsigned int ea_size;
+ char *prefix;
+ unsigned int l;
if (ea->ea_type == GFS2_EATYPE_UNUSED)
return 0;
- if (er->er_data_len) {
- char *prefix = NULL;
- unsigned int l = 0;
- char c = 0;
+ switch (ea->ea_type) {
+ case GFS2_EATYPE_USR:
+ prefix = "user.";
+ l = 5;
+ break;
+ case GFS2_EATYPE_SYS:
+ prefix = "system.";
+ l = 7;
+ break;
+ case GFS2_EATYPE_SECURITY:
+ prefix = "security.";
+ l = 9;
+ break;
+ default:
+ BUG();
+ }
+ ea_size = l + ea->ea_name_len + 1;
+ if (er->er_data_len) {
if (ei->ei_size + ea_size > er->er_data_len)
return -ERANGE;
- switch (ea->ea_type) {
- case GFS2_EATYPE_USR:
- prefix = "user.";
- l = 5;
- break;
- case GFS2_EATYPE_SYS:
- prefix = "system.";
- l = 7;
- break;
- case GFS2_EATYPE_SECURITY:
- prefix = "security.";
- l = 9;
- break;
- }
-
- BUG_ON(l == 0);
-
memcpy(er->er_data + ei->ei_size, prefix, l);
memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
ea->ea_name_len);
- memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
+ er->er_data[ei->ei_size + ea_size - 1] = 0;
}
ei->ei_size += ea_size;
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 2a16111d312f..a2dfa1b2a89c 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -541,7 +541,7 @@ static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
HFS_I(inode)->rsrc_inode = dir;
HFS_I(dir)->rsrc_inode = inode;
igrab(dir);
- hlist_add_fake(&inode->i_hash);
+ inode_fake_hash(inode);
mark_inode_dirty(inode);
dont_mount(dentry);
out:
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2597b290c2a5..444c7b170359 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -610,33 +610,21 @@ static struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
int err;
inode = hostfs_iget(ino->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
+ if (IS_ERR(inode))
goto out;
- }
err = -ENOMEM;
name = dentry_name(dentry);
- if (name == NULL)
- goto out_put;
-
- err = read_name(inode, name);
-
- __putname(name);
- if (err == -ENOENT) {
+ if (name) {
+ err = read_name(inode, name);
+ __putname(name);
+ }
+ if (err) {
iput(inode);
- inode = NULL;
+ inode = (err == -ENOENT) ? NULL : ERR_PTR(err);
}
- else if (err)
- goto out_put;
-
- d_add(dentry, inode);
- return NULL;
-
- out_put:
- iput(inode);
out:
- return ERR_PTR(err);
+ return d_splice_alias(inode, dentry);
}
static int hostfs_link(struct dentry *to, struct inode *ino,
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index c83ece7facc5..d85230c84ef2 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -244,6 +244,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
result = iget_locked(dir->i_sb, ino);
if (!result) {
hpfs_error(dir->i_sb, "hpfs_lookup: can't get inode");
+ result = ERR_PTR(-ENOMEM);
goto bail1;
}
if (result->i_state & I_NEW) {
@@ -266,6 +267,8 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
if (de->has_acl || de->has_xtd_perm) if (!sb_rdonly(dir->i_sb)) {
hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
+ iput(result);
+ result = ERR_PTR(-EINVAL);
goto bail1;
}
@@ -301,29 +304,17 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
}
}
+bail1:
hpfs_brelse4(&qbh);
/*
* Made it.
*/
- end:
- end_add:
+end:
+end_add:
hpfs_unlock(dir->i_sb);
- d_add(dentry, result);
- return NULL;
-
- /*
- * Didn't.
- */
- bail1:
-
- hpfs_brelse4(&qbh);
-
- /*bail:*/
-
- hpfs_unlock(dir->i_sb);
- return ERR_PTR(-ENOENT);
+ return d_splice_alias(result, dentry);
}
const struct file_operations hpfs_dir_ops =
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 40d4c66c7751..346a146c7617 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1310,10 +1310,6 @@ static int get_hstate_idx(int page_size_log)
return h - hstates;
}
-static const struct dentry_operations anon_ops = {
- .d_dname = simple_dname
-};
-
/*
* Note that size should be aligned to proper hugepage size in caller side,
* otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
@@ -1322,19 +1318,18 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
vm_flags_t acctflag, struct user_struct **user,
int creat_flags, int page_size_log)
{
- struct file *file = ERR_PTR(-ENOMEM);
struct inode *inode;
- struct path path;
- struct super_block *sb;
- struct qstr quick_string;
+ struct vfsmount *mnt;
int hstate_idx;
+ struct file *file;
hstate_idx = get_hstate_idx(page_size_log);
if (hstate_idx < 0)
return ERR_PTR(-ENODEV);
*user = NULL;
- if (!hugetlbfs_vfsmount[hstate_idx])
+ mnt = hugetlbfs_vfsmount[hstate_idx];
+ if (!mnt)
return ERR_PTR(-ENOENT);
if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
@@ -1350,45 +1345,28 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
}
}
- sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
- quick_string.name = name;
- quick_string.len = strlen(quick_string.name);
- quick_string.hash = 0;
- path.dentry = d_alloc_pseudo(sb, &quick_string);
- if (!path.dentry)
- goto out_shm_unlock;
-
- d_set_d_op(path.dentry, &anon_ops);
- path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
file = ERR_PTR(-ENOSPC);
- inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
+ inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
if (!inode)
- goto out_dentry;
+ goto out;
if (creat_flags == HUGETLB_SHMFS_INODE)
inode->i_flags |= S_PRIVATE;
- file = ERR_PTR(-ENOMEM);
- if (hugetlb_reserve_pages(inode, 0,
- size >> huge_page_shift(hstate_inode(inode)), NULL,
- acctflag))
- goto out_inode;
-
- d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode);
- file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
- &hugetlbfs_file_operations);
- if (IS_ERR(file))
- goto out_dentry; /* inode is already attached */
-
- return file;
+ if (hugetlb_reserve_pages(inode, 0,
+ size >> huge_page_shift(hstate_inode(inode)), NULL,
+ acctflag))
+ file = ERR_PTR(-ENOMEM);
+ else
+ file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
+ &hugetlbfs_file_operations);
+ if (!IS_ERR(file))
+ return file;
-out_inode:
iput(inode);
-out_dentry:
- path_put(&path);
-out_shm_unlock:
+out:
if (*user) {
user_shm_unlock(size, *user);
*user = NULL;
diff --git a/fs/inode.c b/fs/inode.c
index 8c86c809ca17..a06de4454232 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -804,6 +804,10 @@ repeat:
__wait_on_freeing_inode(inode);
goto repeat;
}
+ if (unlikely(inode->i_state & I_CREATING)) {
+ spin_unlock(&inode->i_lock);
+ return ERR_PTR(-ESTALE);
+ }
__iget(inode);
spin_unlock(&inode->i_lock);
return inode;
@@ -831,6 +835,10 @@ repeat:
__wait_on_freeing_inode(inode);
goto repeat;
}
+ if (unlikely(inode->i_state & I_CREATING)) {
+ spin_unlock(&inode->i_lock);
+ return ERR_PTR(-ESTALE);
+ }
__iget(inode);
spin_unlock(&inode->i_lock);
return inode;
@@ -961,13 +969,26 @@ void unlock_new_inode(struct inode *inode)
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW;
+ inode->i_state &= ~I_NEW & ~I_CREATING;
smp_mb();
wake_up_bit(&inode->i_state, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(unlock_new_inode);
+void discard_new_inode(struct inode *inode)
+{
+ lockdep_annotate_inode_mutex_key(inode);
+ spin_lock(&inode->i_lock);
+ WARN_ON(!(inode->i_state & I_NEW));
+ inode->i_state &= ~I_NEW;
+ smp_mb();
+ wake_up_bit(&inode->i_state, __I_NEW);
+ spin_unlock(&inode->i_lock);
+ iput(inode);
+}
+EXPORT_SYMBOL(discard_new_inode);
+
/**
* lock_two_nondirectories - take two i_mutexes on non-directory objects
*
@@ -1029,6 +1050,7 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
struct inode *old;
+ bool creating = inode->i_state & I_CREATING;
again:
spin_lock(&inode_hash_lock);
@@ -1039,6 +1061,8 @@ again:
* Use the old inode instead of the preallocated one.
*/
spin_unlock(&inode_hash_lock);
+ if (IS_ERR(old))
+ return NULL;
wait_on_inode(old);
if (unlikely(inode_unhashed(old))) {
iput(old);
@@ -1060,6 +1084,8 @@ again:
inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
+ if (!creating)
+ inode_sb_list_add(inode);
unlock:
spin_unlock(&inode_hash_lock);
@@ -1094,12 +1120,13 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
struct inode *inode = ilookup5(sb, hashval, test, data);
if (!inode) {
- struct inode *new = new_inode(sb);
+ struct inode *new = alloc_inode(sb);
if (new) {
+ new->i_state = 0;
inode = inode_insert5(new, hashval, test, set, data);
if (unlikely(inode != new))
- iput(new);
+ destroy_inode(new);
}
}
return inode;
@@ -1128,6 +1155,8 @@ again:
inode = find_inode_fast(sb, head, ino);
spin_unlock(&inode_hash_lock);
if (inode) {
+ if (IS_ERR(inode))
+ return NULL;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
@@ -1165,6 +1194,8 @@ again:
*/
spin_unlock(&inode_hash_lock);
destroy_inode(inode);
+ if (IS_ERR(old))
+ return NULL;
inode = old;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
@@ -1282,7 +1313,7 @@ struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
inode = find_inode(sb, head, test, data);
spin_unlock(&inode_hash_lock);
- return inode;
+ return IS_ERR(inode) ? NULL : inode;
}
EXPORT_SYMBOL(ilookup5_nowait);
@@ -1338,6 +1369,8 @@ again:
spin_unlock(&inode_hash_lock);
if (inode) {
+ if (IS_ERR(inode))
+ return NULL;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
@@ -1421,12 +1454,17 @@ int insert_inode_locked(struct inode *inode)
}
if (likely(!old)) {
spin_lock(&inode->i_lock);
- inode->i_state |= I_NEW;
+ inode->i_state |= I_NEW | I_CREATING;
hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
return 0;
}
+ if (unlikely(old->i_state & I_CREATING)) {
+ spin_unlock(&old->i_lock);
+ spin_unlock(&inode_hash_lock);
+ return -EBUSY;
+ }
__iget(old);
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
@@ -1443,7 +1481,10 @@ EXPORT_SYMBOL(insert_inode_locked);
int insert_inode_locked4(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
- struct inode *old = inode_insert5(inode, hashval, test, NULL, data);
+ struct inode *old;
+
+ inode->i_state |= I_CREATING;
+ old = inode_insert5(inode, hashval, test, NULL, data);
if (old != inode) {
iput(old);
diff --git a/fs/internal.h b/fs/internal.h
index 5645b4ebf494..50a28fc71300 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -43,6 +43,8 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
extern void guard_bio_eod(int rw, struct bio *bio);
extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block, struct iomap *iomap);
+int __generic_write_end(struct inode *inode, loff_t pos, unsigned copied,
+ struct page *page);
/*
* char_dev.c
@@ -93,7 +95,7 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
/*
* file_table.c
*/
-extern struct file *get_empty_filp(void);
+extern struct file *alloc_empty_file(int, const struct cred *);
/*
* super.c
@@ -125,8 +127,7 @@ int do_fchmodat(int dfd, const char __user *filename, umode_t mode);
int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
int flag);
-extern int open_check_o_direct(struct file *f);
-extern int vfs_open(const struct path *, struct file *, const struct cred *);
+extern int vfs_open(const struct path *, struct file *);
/*
* inode.c
diff --git a/fs/iomap.c b/fs/iomap.c
index 0d0bd8845586..009071e73bc0 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016 Christoph Hellwig.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -17,7 +17,9 @@
#include <linux/iomap.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
+#include <linux/migrate.h>
#include <linux/mm.h>
+#include <linux/mm_inline.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
@@ -103,6 +105,467 @@ iomap_sector(struct iomap *iomap, loff_t pos)
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
+static struct iomap_page *
+iomap_page_create(struct inode *inode, struct page *page)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (iop || i_blocksize(inode) == PAGE_SIZE)
+ return iop;
+
+ iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
+ atomic_set(&iop->read_count, 0);
+ atomic_set(&iop->write_count, 0);
+ bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+ set_page_private(page, (unsigned long)iop);
+ SetPagePrivate(page);
+ return iop;
+}
+
+static void
+iomap_page_release(struct page *page)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (!iop)
+ return;
+ WARN_ON_ONCE(atomic_read(&iop->read_count));
+ WARN_ON_ONCE(atomic_read(&iop->write_count));
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ kfree(iop);
+}
+
+/*
+ * Calculate the range inside the page that we actually need to read.
+ */
+static void
+iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
+ loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+{
+ unsigned block_bits = inode->i_blkbits;
+ unsigned block_size = (1 << block_bits);
+ unsigned poff = offset_in_page(*pos);
+ unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+ unsigned first = poff >> block_bits;
+ unsigned last = (poff + plen - 1) >> block_bits;
+ unsigned end = offset_in_page(i_size_read(inode)) >> block_bits;
+
+ /*
+ * If the block size is smaller than the page size we need to check the
+ * per-block uptodate status and adjust the offset and length if needed
+ * to avoid reading in already uptodate ranges.
+ */
+ if (iop) {
+ unsigned int i;
+
+ /* move forward for each leading block marked uptodate */
+ for (i = first; i <= last; i++) {
+ if (!test_bit(i, iop->uptodate))
+ break;
+ *pos += block_size;
+ poff += block_size;
+ plen -= block_size;
+ first++;
+ }
+
+ /* truncate len if we find any trailing uptodate block(s) */
+ for ( ; i <= last; i++) {
+ if (test_bit(i, iop->uptodate)) {
+ plen -= (last - i + 1) * block_size;
+ last = i - 1;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If the extent spans the block that contains the i_size we need to
+ * handle both halves separately so that we properly zero data in the
+ * page cache for blocks that are entirely outside of i_size.
+ */
+ if (first <= end && last > end)
+ plen -= (last - end) * block_size;
+
+ *offp = poff;
+ *lenp = plen;
+}
+
+static void
+iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+ struct inode *inode = page->mapping->host;
+ unsigned first = off >> inode->i_blkbits;
+ unsigned last = (off + len - 1) >> inode->i_blkbits;
+ unsigned int i;
+ bool uptodate = true;
+
+ if (iop) {
+ for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
+ if (i >= first && i <= last)
+ set_bit(i, iop->uptodate);
+ else if (!test_bit(i, iop->uptodate))
+ uptodate = false;
+ }
+ }
+
+ if (uptodate && !PageError(page))
+ SetPageUptodate(page);
+}
+
+static void
+iomap_read_finish(struct iomap_page *iop, struct page *page)
+{
+ if (!iop || atomic_dec_and_test(&iop->read_count))
+ unlock_page(page);
+}
+
+static void
+iomap_read_page_end_io(struct bio_vec *bvec, int error)
+{
+ struct page *page = bvec->bv_page;
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (unlikely(error)) {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ } else {
+ iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+ }
+
+ iomap_read_finish(iop, page);
+}
+
+static void
+iomap_read_inline_data(struct inode *inode, struct page *page,
+ struct iomap *iomap)
+{
+ size_t size = i_size_read(inode);
+ void *addr;
+
+ if (PageUptodate(page))
+ return;
+
+ BUG_ON(page->index);
+ BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
+
+ addr = kmap_atomic(page);
+ memcpy(addr, iomap->inline_data, size);
+ memset(addr + size, 0, PAGE_SIZE - size);
+ kunmap_atomic(addr);
+ SetPageUptodate(page);
+}
+
+static void
+iomap_read_end_io(struct bio *bio)
+{
+ int error = blk_status_to_errno(bio->bi_status);
+ struct bio_vec *bvec;
+ int i;
+
+ bio_for_each_segment_all(bvec, bio, i)
+ iomap_read_page_end_io(bvec, error);
+ bio_put(bio);
+}
+
+struct iomap_readpage_ctx {
+ struct page *cur_page;
+ bool cur_page_in_bio;
+ bool is_readahead;
+ struct bio *bio;
+ struct list_head *pages;
+};
+
+static loff_t
+iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ struct iomap_readpage_ctx *ctx = data;
+ struct page *page = ctx->cur_page;
+ struct iomap_page *iop = iomap_page_create(inode, page);
+ bool is_contig = false;
+ loff_t orig_pos = pos;
+ unsigned poff, plen;
+ sector_t sector;
+
+ if (iomap->type == IOMAP_INLINE) {
+ WARN_ON_ONCE(poff);
+ iomap_read_inline_data(inode, page, iomap);
+ return PAGE_SIZE;
+ }
+
+ /* zero post-eof blocks as the page may be mapped */
+ iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
+ if (plen == 0)
+ goto done;
+
+ if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
+ zero_user(page, poff, plen);
+ iomap_set_range_uptodate(page, poff, plen);
+ goto done;
+ }
+
+ ctx->cur_page_in_bio = true;
+
+ /*
+ * Try to merge into a previous segment if we can.
+ */
+ sector = iomap_sector(iomap, pos);
+ if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
+ if (__bio_try_merge_page(ctx->bio, page, plen, poff))
+ goto done;
+ is_contig = true;
+ }
+
+ /*
+ * If we start a new segment we need to increase the read count, and we
+ * need to do so before submitting any previous full bio to make sure
+ * that we don't prematurely unlock the page.
+ */
+ if (iop)
+ atomic_inc(&iop->read_count);
+
+ if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
+ gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ if (ctx->bio)
+ submit_bio(ctx->bio);
+
+ if (ctx->is_readahead) /* same as readahead_gfp_mask */
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
+ ctx->bio->bi_opf = REQ_OP_READ;
+ if (ctx->is_readahead)
+ ctx->bio->bi_opf |= REQ_RAHEAD;
+ ctx->bio->bi_iter.bi_sector = sector;
+ bio_set_dev(ctx->bio, iomap->bdev);
+ ctx->bio->bi_end_io = iomap_read_end_io;
+ }
+
+ __bio_add_page(ctx->bio, page, plen, poff);
+done:
+ /*
+ * Move the caller beyond our range so that it keeps making progress.
+ * For that we have to include any leading non-uptodate ranges, but
+ * we can skip trailing ones as they will be handled in the next
+ * iteration.
+ */
+ return pos - orig_pos + plen;
+}
+
+int
+iomap_readpage(struct page *page, const struct iomap_ops *ops)
+{
+ struct iomap_readpage_ctx ctx = { .cur_page = page };
+ struct inode *inode = page->mapping->host;
+ unsigned poff;
+ loff_t ret;
+
+ for (poff = 0; poff < PAGE_SIZE; poff += ret) {
+ ret = iomap_apply(inode, page_offset(page) + poff,
+ PAGE_SIZE - poff, 0, ops, &ctx,
+ iomap_readpage_actor);
+ if (ret <= 0) {
+ WARN_ON_ONCE(ret == 0);
+ SetPageError(page);
+ break;
+ }
+ }
+
+ if (ctx.bio) {
+ submit_bio(ctx.bio);
+ WARN_ON_ONCE(!ctx.cur_page_in_bio);
+ } else {
+ WARN_ON_ONCE(ctx.cur_page_in_bio);
+ unlock_page(page);
+ }
+
+ /*
+ * Just like mpage_readpages and block_read_full_page we always
+ * return 0 and just mark the page as PageError on errors. This
+ * should be cleaned up all through the stack eventually.
+ */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_readpage);
+
+static struct page *
+iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
+ loff_t length, loff_t *done)
+{
+ while (!list_empty(pages)) {
+ struct page *page = lru_to_page(pages);
+
+ if (page_offset(page) >= (u64)pos + length)
+ break;
+
+ list_del(&page->lru);
+ if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
+ GFP_NOFS))
+ return page;
+
+ /*
+ * If we already have a page in the page cache at index we are
+ * done. Upper layers don't care if it is uptodate after the
+ * readpages call itself as every page gets checked again once
+ * actually needed.
+ */
+ *done += PAGE_SIZE;
+ put_page(page);
+ }
+
+ return NULL;
+}
+
+static loff_t
+iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ struct iomap_readpage_ctx *ctx = data;
+ loff_t done, ret;
+
+ for (done = 0; done < length; done += ret) {
+ if (ctx->cur_page && offset_in_page(pos + done) == 0) {
+ if (!ctx->cur_page_in_bio)
+ unlock_page(ctx->cur_page);
+ put_page(ctx->cur_page);
+ ctx->cur_page = NULL;
+ }
+ if (!ctx->cur_page) {
+ ctx->cur_page = iomap_next_page(inode, ctx->pages,
+ pos, length, &done);
+ if (!ctx->cur_page)
+ break;
+ ctx->cur_page_in_bio = false;
+ }
+ ret = iomap_readpage_actor(inode, pos + done, length - done,
+ ctx, iomap);
+ }
+
+ return done;
+}
+
+int
+iomap_readpages(struct address_space *mapping, struct list_head *pages,
+ unsigned nr_pages, const struct iomap_ops *ops)
+{
+ struct iomap_readpage_ctx ctx = {
+ .pages = pages,
+ .is_readahead = true,
+ };
+ loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
+ loff_t last = page_offset(list_entry(pages->next, struct page, lru));
+ loff_t length = last - pos + PAGE_SIZE, ret = 0;
+
+ while (length > 0) {
+ ret = iomap_apply(mapping->host, pos, length, 0, ops,
+ &ctx, iomap_readpages_actor);
+ if (ret <= 0) {
+ WARN_ON_ONCE(ret == 0);
+ goto done;
+ }
+ pos += ret;
+ length -= ret;
+ }
+ ret = 0;
+done:
+ if (ctx.bio)
+ submit_bio(ctx.bio);
+ if (ctx.cur_page) {
+ if (!ctx.cur_page_in_bio)
+ unlock_page(ctx.cur_page);
+ put_page(ctx.cur_page);
+ }
+
+ /*
+ * Check that we didn't lose a page due to the arcance calling
+ * conventions..
+ */
+ WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iomap_readpages);
+
+int
+iomap_is_partially_uptodate(struct page *page, unsigned long from,
+ unsigned long count)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+ struct inode *inode = page->mapping->host;
+ unsigned first = from >> inode->i_blkbits;
+ unsigned last = (from + count - 1) >> inode->i_blkbits;
+ unsigned i;
+
+ if (iop) {
+ for (i = first; i <= last; i++)
+ if (!test_bit(i, iop->uptodate))
+ return 0;
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
+
+int
+iomap_releasepage(struct page *page, gfp_t gfp_mask)
+{
+ /*
+ * mm accommodates an old ext3 case where clean pages might not have had
+ * the dirty bit cleared. Thus, it can send actual dirty pages to
+ * ->releasepage() via shrink_active_list(), skip those here.
+ */
+ if (PageDirty(page) || PageWriteback(page))
+ return 0;
+ iomap_page_release(page);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(iomap_releasepage);
+
+void
+iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
+{
+ /*
+ * If we are invalidating the entire page, clear the dirty state from it
+ * and release it to avoid unnecessary buildup of the LRU.
+ */
+ if (offset == 0 && len == PAGE_SIZE) {
+ WARN_ON_ONCE(PageWriteback(page));
+ cancel_dirty_page(page);
+ iomap_page_release(page);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_invalidatepage);
+
+#ifdef CONFIG_MIGRATION
+int
+iomap_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode)
+{
+ int ret;
+
+ ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+ if (ret != MIGRATEPAGE_SUCCESS)
+ return ret;
+
+ if (page_has_private(page)) {
+ ClearPagePrivate(page);
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ SetPagePrivate(newpage);
+ }
+
+ if (mode != MIGRATE_SYNC_NO_COPY)
+ migrate_page_copy(newpage, page);
+ else
+ migrate_page_states(newpage, page);
+ return MIGRATEPAGE_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(iomap_migrate_page);
+#endif /* CONFIG_MIGRATION */
+
static void
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
{
@@ -117,6 +580,61 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
}
static int
+iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
+ unsigned poff, unsigned plen, unsigned from, unsigned to,
+ struct iomap *iomap)
+{
+ struct bio_vec bvec;
+ struct bio bio;
+
+ if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
+ zero_user_segments(page, poff, from, to, poff + plen);
+ iomap_set_range_uptodate(page, poff, plen);
+ return 0;
+ }
+
+ bio_init(&bio, &bvec, 1);
+ bio.bi_opf = REQ_OP_READ;
+ bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
+ bio_set_dev(&bio, iomap->bdev);
+ __bio_add_page(&bio, page, plen, poff);
+ return submit_bio_wait(&bio);
+}
+
+static int
+__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
+ struct page *page, struct iomap *iomap)
+{
+ struct iomap_page *iop = iomap_page_create(inode, page);
+ loff_t block_size = i_blocksize(inode);
+ loff_t block_start = pos & ~(block_size - 1);
+ loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
+ unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+ int status = 0;
+
+ if (PageUptodate(page))
+ return 0;
+
+ do {
+ iomap_adjust_read_range(inode, iop, &block_start,
+ block_end - block_start, &poff, &plen);
+ if (plen == 0)
+ break;
+
+ if ((from > poff && from < poff + plen) ||
+ (to > poff && to < poff + plen)) {
+ status = iomap_read_page_sync(inode, block_start, page,
+ poff, plen, from, to, iomap);
+ if (status)
+ break;
+ }
+
+ } while ((block_start += plen) < block_end);
+
+ return status;
+}
+
+static int
iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
struct page **pagep, struct iomap *iomap)
{
@@ -133,7 +651,12 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
if (!page)
return -ENOMEM;
- status = __block_write_begin_int(page, pos, len, NULL, iomap);
+ if (iomap->type == IOMAP_INLINE)
+ iomap_read_inline_data(inode, page, iomap);
+ else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
+ status = __block_write_begin_int(page, pos, len, NULL, iomap);
+ else
+ status = __iomap_write_begin(inode, pos, len, page, iomap);
if (unlikely(status)) {
unlock_page(page);
put_page(page);
@@ -146,14 +669,93 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
return status;
}
+int
+iomap_set_page_dirty(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ int newly_dirty;
+
+ if (unlikely(!mapping))
+ return !TestSetPageDirty(page);
+
+ /*
+ * Lock out page->mem_cgroup migration to keep PageDirty
+ * synchronized with per-memcg dirty page counters.
+ */
+ lock_page_memcg(page);
+ newly_dirty = !TestSetPageDirty(page);
+ if (newly_dirty)
+ __set_page_dirty(page, mapping, 0);
+ unlock_page_memcg(page);
+
+ if (newly_dirty)
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return newly_dirty;
+}
+EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
+
+static int
+__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
+ unsigned copied, struct page *page, struct iomap *iomap)
+{
+ flush_dcache_page(page);
+
+ /*
+ * The blocks that were entirely written will now be uptodate, so we
+ * don't have to worry about a readpage reading them and overwriting a
+ * partial write. However if we have encountered a short write and only
+ * partially written into a block, it will not be marked uptodate, so a
+ * readpage might come in and destroy our partial write.
+ *
+ * Do the simplest thing, and just treat any short write to a non
+ * uptodate page as a zero-length write, and force the caller to redo
+ * the whole thing.
+ */
+ if (unlikely(copied < len && !PageUptodate(page))) {
+ copied = 0;
+ } else {
+ iomap_set_range_uptodate(page, offset_in_page(pos), len);
+ iomap_set_page_dirty(page);
+ }
+ return __generic_write_end(inode, pos, copied, page);
+}
+
+static int
+iomap_write_end_inline(struct inode *inode, struct page *page,
+ struct iomap *iomap, loff_t pos, unsigned copied)
+{
+ void *addr;
+
+ WARN_ON_ONCE(!PageUptodate(page));
+ BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
+
+ addr = kmap_atomic(page);
+ memcpy(iomap->inline_data + pos, addr + pos, copied);
+ kunmap_atomic(addr);
+
+ mark_inode_dirty(inode);
+ __generic_write_end(inode, pos, copied, page);
+ return copied;
+}
+
static int
iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page)
+ unsigned copied, struct page *page, struct iomap *iomap)
{
int ret;
- ret = generic_write_end(NULL, inode->i_mapping, pos, len,
- copied, page, NULL);
+ if (iomap->type == IOMAP_INLINE) {
+ ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
+ } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+ ret = generic_write_end(NULL, inode->i_mapping, pos, len,
+ copied, page, NULL);
+ } else {
+ ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
+ }
+
+ if (iomap->page_done)
+ iomap->page_done(inode, pos, copied, page, iomap);
+
if (ret < len)
iomap_write_failed(inode, pos, len);
return ret;
@@ -174,7 +776,7 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */
- offset = (pos & (PAGE_SIZE - 1));
+ offset = offset_in_page(pos);
bytes = min_t(unsigned long, PAGE_SIZE - offset,
iov_iter_count(i));
again:
@@ -208,7 +810,8 @@ again:
flush_dcache_page(page);
- status = iomap_write_end(inode, pos, bytes, copied, page);
+ status = iomap_write_end(inode, pos, bytes, copied, page,
+ iomap);
if (unlikely(status < 0))
break;
copied = status;
@@ -287,7 +890,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
- offset = (pos & (PAGE_SIZE - 1));
+ offset = offset_in_page(pos);
bytes = min_t(loff_t, PAGE_SIZE - offset, length);
rpage = __iomap_read_page(inode, pos);
@@ -302,7 +905,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
WARN_ON_ONCE(!PageUptodate(page));
- status = iomap_write_end(inode, pos, bytes, bytes, page);
+ status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
if (unlikely(status <= 0)) {
if (WARN_ON_ONCE(status == 0))
return -EIO;
@@ -354,7 +957,7 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
zero_user(page, offset, bytes);
mark_page_accessed(page);
- return iomap_write_end(inode, pos, bytes, bytes, page);
+ return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
}
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
@@ -379,7 +982,7 @@ iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
do {
unsigned offset, bytes;
- offset = pos & (PAGE_SIZE - 1); /* Within page */
+ offset = offset_in_page(pos);
bytes = min_t(loff_t, PAGE_SIZE - offset, count);
if (IS_DAX(inode))
@@ -440,11 +1043,16 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
struct page *page = data;
int ret;
- ret = __block_write_begin_int(page, pos, length, NULL, iomap);
- if (ret)
- return ret;
+ if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+ ret = __block_write_begin_int(page, pos, length, NULL, iomap);
+ if (ret)
+ return ret;
+ block_commit_write(page, 0, length);
+ } else {
+ WARN_ON_ONCE(!PageUptodate(page));
+ iomap_page_create(inode, page);
+ }
- block_commit_write(page, 0, length);
return length;
}
@@ -467,7 +1075,7 @@ int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
/* page is wholly or partially inside EOF */
if (((page->index + 1) << PAGE_SHIFT) > size)
- length = size & ~PAGE_MASK;
+ length = offset_in_page(size);
else
length = PAGE_SIZE;
@@ -630,7 +1238,7 @@ page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
goto out_unlock_not_found;
for (off = 0; off < PAGE_SIZE; off += bsize) {
- if ((*lastoff & ~PAGE_MASK) >= off + bsize)
+ if (offset_in_page(*lastoff) >= off + bsize)
continue;
if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
unlock_page(page);
@@ -811,6 +1419,7 @@ struct iomap_dio {
atomic_t ref;
unsigned flags;
int error;
+ bool wait_for_completion;
union {
/* used during submission and for synchronous completion: */
@@ -914,9 +1523,8 @@ static void iomap_dio_bio_end_io(struct bio *bio)
iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
if (atomic_dec_and_test(&dio->ref)) {
- if (is_sync_kiocb(dio->iocb)) {
+ if (dio->wait_for_completion) {
struct task_struct *waiter = dio->submit.waiter;
-
WRITE_ONCE(dio->submit.waiter, NULL);
wake_up_process(waiter);
} else if (dio->flags & IOMAP_DIO_WRITE) {
@@ -963,10 +1571,9 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
}
static loff_t
-iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap_dio *dio, struct iomap *iomap)
{
- struct iomap_dio *dio = data;
unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
unsigned int fs_block_size = i_blocksize(inode), pad;
unsigned int align = iov_iter_alignment(dio->submit.iter);
@@ -980,41 +1587,27 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
if ((pos | length | align) & ((1 << blkbits) - 1))
return -EINVAL;
- switch (iomap->type) {
- case IOMAP_HOLE:
- if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
- return -EIO;
- /*FALLTHRU*/
- case IOMAP_UNWRITTEN:
- if (!(dio->flags & IOMAP_DIO_WRITE)) {
- length = iov_iter_zero(length, dio->submit.iter);
- dio->size += length;
- return length;
- }
+ if (iomap->type == IOMAP_UNWRITTEN) {
dio->flags |= IOMAP_DIO_UNWRITTEN;
need_zeroout = true;
- break;
- case IOMAP_MAPPED:
- if (iomap->flags & IOMAP_F_SHARED)
- dio->flags |= IOMAP_DIO_COW;
- if (iomap->flags & IOMAP_F_NEW) {
- need_zeroout = true;
- } else {
- /*
- * Use a FUA write if we need datasync semantics, this
- * is a pure data IO that doesn't require any metadata
- * updates and the underlying device supports FUA. This
- * allows us to avoid cache flushes on IO completion.
- */
- if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
- (dio->flags & IOMAP_DIO_WRITE_FUA) &&
- blk_queue_fua(bdev_get_queue(iomap->bdev)))
- use_fua = true;
- }
- break;
- default:
- WARN_ON_ONCE(1);
- return -EIO;
+ }
+
+ if (iomap->flags & IOMAP_F_SHARED)
+ dio->flags |= IOMAP_DIO_COW;
+
+ if (iomap->flags & IOMAP_F_NEW) {
+ need_zeroout = true;
+ } else {
+ /*
+ * Use a FUA write if we need datasync semantics, this
+ * is a pure data IO that doesn't require any metadata
+ * updates and the underlying device supports FUA. This
+ * allows us to avoid cache flushes on IO completion.
+ */
+ if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
+ (dio->flags & IOMAP_DIO_WRITE_FUA) &&
+ blk_queue_fua(bdev_get_queue(iomap->bdev)))
+ use_fua = true;
}
/*
@@ -1093,6 +1686,66 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
return copied;
}
+static loff_t
+iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
+{
+ length = iov_iter_zero(length, dio->submit.iter);
+ dio->size += length;
+ return length;
+}
+
+static loff_t
+iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap_dio *dio, struct iomap *iomap)
+{
+ struct iov_iter *iter = dio->submit.iter;
+ size_t copied;
+
+ BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
+
+ if (dio->flags & IOMAP_DIO_WRITE) {
+ loff_t size = inode->i_size;
+
+ if (pos > size)
+ memset(iomap->inline_data + size, 0, pos - size);
+ copied = copy_from_iter(iomap->inline_data + pos, length, iter);
+ if (copied) {
+ if (pos + copied > size)
+ i_size_write(inode, pos + copied);
+ mark_inode_dirty(inode);
+ }
+ } else {
+ copied = copy_to_iter(iomap->inline_data + pos, length, iter);
+ }
+ dio->size += copied;
+ return copied;
+}
+
+static loff_t
+iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ struct iomap_dio *dio = data;
+
+ switch (iomap->type) {
+ case IOMAP_HOLE:
+ if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
+ return -EIO;
+ return iomap_dio_hole_actor(length, dio);
+ case IOMAP_UNWRITTEN:
+ if (!(dio->flags & IOMAP_DIO_WRITE))
+ return iomap_dio_hole_actor(length, dio);
+ return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+ case IOMAP_MAPPED:
+ return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+ case IOMAP_INLINE:
+ return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+
/*
* iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
* is being issued as AIO or not. This allows us to optimise pure data writes
@@ -1131,13 +1784,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->end_io = end_io;
dio->error = 0;
dio->flags = 0;
+ dio->wait_for_completion = is_sync_kiocb(iocb);
dio->submit.iter = iter;
- if (is_sync_kiocb(iocb)) {
- dio->submit.waiter = current;
- dio->submit.cookie = BLK_QC_T_NONE;
- dio->submit.last_queue = NULL;
- }
+ dio->submit.waiter = current;
+ dio->submit.cookie = BLK_QC_T_NONE;
+ dio->submit.last_queue = NULL;
if (iov_iter_rw(iter) == READ) {
if (pos >= dio->i_size)
@@ -1187,7 +1839,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio_warn_stale_pagecache(iocb->ki_filp);
ret = 0;
- if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
+ if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
!inode->i_sb->s_dio_done_wq) {
ret = sb_init_dio_done_wq(inode->i_sb);
if (ret < 0)
@@ -1202,8 +1854,10 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomap_dio_actor);
if (ret <= 0) {
/* magic error code to fall back to buffered I/O */
- if (ret == -ENOTBLK)
+ if (ret == -ENOTBLK) {
+ dio->wait_for_completion = true;
ret = 0;
+ }
break;
}
pos += ret;
@@ -1224,7 +1878,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
if (!atomic_dec_and_test(&dio->ref)) {
- if (!is_sync_kiocb(iocb))
+ if (!dio->wait_for_completion)
return -EIOCBQUEUED;
for (;;) {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 8de0e7723316..150cc030b4d7 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -121,7 +121,7 @@ static int journal_submit_commit_record(journal_t *journal,
struct commit_header *tmp;
struct buffer_head *bh;
int ret;
- struct timespec64 now = current_kernel_time64();
+ struct timespec64 now;
*cbh = NULL;
@@ -134,6 +134,7 @@ static int journal_submit_commit_record(journal_t *journal,
return 1;
tmp = (struct commit_header *)bh->b_data;
+ ktime_get_coarse_real_ts64(&now);
tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index b2944f9218f7..f20cff1194bb 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -201,7 +201,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
if (ret)
goto fail;
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->ctime)));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
jffs2_free_raw_inode(ri);
@@ -227,14 +227,14 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(d_inode(dentry));
int ret;
- uint32_t now = get_seconds();
+ uint32_t now = JFFS2_NOW();
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, dead_f, now);
if (dead_f->inocache)
set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink);
if (!ret)
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
return ret;
}
/***********************************************************************/
@@ -260,7 +260,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12;
if (!type) type = DT_REG;
- now = get_seconds();
+ now = JFFS2_NOW();
ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now);
if (!ret) {
@@ -268,7 +268,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink);
mutex_unlock(&f->sem);
d_instantiate(dentry, d_inode(old_dentry));
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
ihold(d_inode(old_dentry));
}
return ret;
@@ -400,7 +400,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
rd->pino = cpu_to_je32(dir_i->i_ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(inode->i_ino);
- rd->mctime = cpu_to_je32(get_seconds());
+ rd->mctime = cpu_to_je32(JFFS2_NOW());
rd->nsize = namelen;
rd->type = DT_LNK;
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
@@ -418,7 +418,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(rd->mctime)));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
jffs2_free_raw_dirent(rd);
@@ -543,7 +543,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
rd->pino = cpu_to_je32(dir_i->i_ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(inode->i_ino);
- rd->mctime = cpu_to_je32(get_seconds());
+ rd->mctime = cpu_to_je32(JFFS2_NOW());
rd->nsize = namelen;
rd->type = DT_DIR;
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
@@ -561,7 +561,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(rd->mctime)));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
inc_nlink(dir_i);
jffs2_free_raw_dirent(rd);
@@ -588,7 +588,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(dentry));
struct jffs2_full_dirent *fd;
int ret;
- uint32_t now = get_seconds();
+ uint32_t now = JFFS2_NOW();
for (fd = f->dents ; fd; fd = fd->next) {
if (fd->ino)
@@ -598,7 +598,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, f, now);
if (!ret) {
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
clear_nlink(d_inode(dentry));
drop_nlink(dir_i);
}
@@ -712,7 +712,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
rd->pino = cpu_to_je32(dir_i->i_ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(inode->i_ino);
- rd->mctime = cpu_to_je32(get_seconds());
+ rd->mctime = cpu_to_je32(JFFS2_NOW());
rd->nsize = namelen;
/* XXX: This is ugly. */
@@ -733,7 +733,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(rd->mctime)));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
jffs2_free_raw_dirent(rd);
@@ -797,7 +797,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12;
if (!type) type = DT_REG;
- now = get_seconds();
+ now = JFFS2_NOW();
ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i),
d_inode(old_dentry)->i_ino, type,
new_dentry->d_name.name, new_dentry->d_name.len, now);
@@ -853,14 +853,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
* caller won't do it on its own since we are returning an error.
*/
d_invalidate(new_dentry);
- new_dir_i->i_mtime = new_dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
return ret;
}
if (d_is_dir(old_dentry))
drop_nlink(old_dir_i);
- new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
return 0;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 481afd4c2e1a..7d8654a1472e 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -175,7 +175,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
ri.uid = cpu_to_je16(i_uid_read(inode));
ri.gid = cpu_to_je16(i_gid_read(inode));
ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
- ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds());
+ ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW());
ri.offset = cpu_to_je32(inode->i_size);
ri.dsize = cpu_to_je32(pageofs - inode->i_size);
ri.csize = cpu_to_je32(0);
@@ -283,7 +283,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
ri->uid = cpu_to_je16(i_uid_read(inode));
ri->gid = cpu_to_je16(i_gid_read(inode));
ri->isize = cpu_to_je32((uint32_t)inode->i_size);
- ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());
+ ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW());
/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
hurt to do it again. The alternative is ifdefs, which are ugly. */
@@ -308,7 +308,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
inode->i_size = pos + writtenlen;
inode->i_blocks = (inode->i_size + 511) >> 9;
- inode->i_ctime = inode->i_mtime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->ctime)));
+ inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
}
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 0ecfb8ea38cd..eab04eca95a3 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -146,9 +146,9 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
return PTR_ERR(new_metadata);
}
/* It worked. Update the inode */
- inode->i_atime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->atime)));
- inode->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->ctime)));
- inode->i_mtime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->mtime)));
+ inode->i_atime = ITIME(je32_to_cpu(ri->atime));
+ inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+ inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
inode->i_mode = jemode_to_cpu(ri->mode);
i_uid_write(inode, je16_to_cpu(ri->uid));
i_gid_write(inode, je16_to_cpu(ri->gid));
@@ -280,9 +280,9 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
i_uid_write(inode, je16_to_cpu(latest_node.uid));
i_gid_write(inode, je16_to_cpu(latest_node.gid));
inode->i_size = je32_to_cpu(latest_node.isize);
- inode->i_atime = timespec_to_timespec64(ITIME(je32_to_cpu(latest_node.atime)));
- inode->i_mtime = timespec_to_timespec64(ITIME(je32_to_cpu(latest_node.mtime)));
- inode->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(latest_node.ctime)));
+ inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
+ inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
+ inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
set_nlink(inode, f->inocache->pino_nlink);
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index c2fbec19c616..a2dbbb3f4c74 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -31,12 +31,13 @@ struct kvec;
#define JFFS2_F_I_GID(f) (i_gid_read(OFNI_EDONI_2SFFJ(f)))
#define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev)
-#define ITIME(sec) ((struct timespec){sec, 0})
-#define I_SEC(tv) ((tv).tv_sec)
-#define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime.tv_sec)
-#define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime.tv_sec)
-#define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime.tv_sec)
-
+#define JFFS2_CLAMP_TIME(t) ((uint32_t)clamp_t(time64_t, (t), 0, U32_MAX))
+#define ITIME(sec) ((struct timespec64){sec, 0})
+#define JFFS2_NOW() JFFS2_CLAMP_TIME(ktime_get_real_seconds())
+#define I_SEC(tv) JFFS2_CLAMP_TIME((tv).tv_sec)
+#define JFFS2_F_I_CTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_ctime)
+#define JFFS2_F_I_MTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_mtime)
+#define JFFS2_F_I_ATIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_atime)
#define sleep_on_spinunlock(wq, s) \
do { \
DECLARE_WAITQUEUE(__wait, current); \
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index f36ef68905a7..93e8c590ff5c 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -491,13 +491,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
/* release the page */
release_metapage(mp);
- /*
- * __mark_inode_dirty expects inodes to be hashed. Since we don't
- * want special inodes in the fileset inode space, we make them
- * appear hashed, but do not put on any lists. hlist_del()
- * will work fine and require no locking.
- */
- hlist_add_fake(&ip->i_hash);
+ inode_fake_hash(ip);
return (ip);
}
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 9940a1e04cbf..912a3af2393e 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -43,7 +43,7 @@ struct jfs_inode_info {
pxd_t ixpxd; /* inode extent descriptor */
dxd_t acl; /* dxd describing acl */
dxd_t ea; /* dxd describing ea */
- time_t otime; /* time created */
+ time64_t otime; /* time created */
uint next_index; /* next available directory entry index */
int acltype; /* Type of ACL */
short btorder; /* access order */
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 5e9b7bb3aabf..4572b7cf183d 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -61,8 +61,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
inode = new_inode(sb);
if (!inode) {
jfs_warn("ialloc: new_inode returned NULL!");
- rc = -ENOMEM;
- goto fail;
+ return ERR_PTR(-ENOMEM);
}
jfs_inode = JFS_IP(inode);
@@ -70,8 +69,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
rc = diAlloc(parent, S_ISDIR(mode), inode);
if (rc) {
jfs_warn("ialloc: diAlloc returned %d!", rc);
- if (rc == -EIO)
- make_bad_inode(inode);
goto fail_put;
}
@@ -141,9 +138,10 @@ fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
clear_nlink(inode);
- unlock_new_inode(inode);
+ discard_new_inode(inode);
+ return ERR_PTR(rc);
+
fail_put:
iput(inode);
-fail:
return ERR_PTR(rc);
}
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 56c3fcbfe80e..14528c0ffe63 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -175,8 +175,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
@@ -309,8 +308,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
@@ -1054,8 +1052,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
@@ -1441,8 +1438,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index f08571433aba..09da5cf14e27 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -581,7 +581,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
inode->i_ino = 0;
inode->i_size = i_size_read(sb->s_bdev->bd_inode);
inode->i_mapping->a_ops = &jfs_metapage_aops;
- hlist_add_fake(&inode->i_hash);
+ inode_fake_hash(inode);
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
sbi->direct_inode = inode;
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index d66cc0777303..4ca0b5c18192 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -619,6 +619,7 @@ struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
unsigned flags)
{
struct kernfs_node *kn;
@@ -661,8 +662,22 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
kn->mode = mode;
kn->flags = flags;
+ if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) {
+ struct iattr iattr = {
+ .ia_valid = ATTR_UID | ATTR_GID,
+ .ia_uid = uid,
+ .ia_gid = gid,
+ };
+
+ ret = __kernfs_setattr(kn, &iattr);
+ if (ret < 0)
+ goto err_out3;
+ }
+
return kn;
+ err_out3:
+ idr_remove(&root->ino_idr, kn->id.ino);
err_out2:
kmem_cache_free(kernfs_node_cache, kn);
err_out1:
@@ -672,11 +687,13 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
unsigned flags)
{
struct kernfs_node *kn;
- kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags);
+ kn = __kernfs_new_node(kernfs_root(parent),
+ name, mode, uid, gid, flags);
if (kn) {
kernfs_get(parent);
kn->parent = parent;
@@ -946,6 +963,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
root->next_generation = 1;
kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
KERNFS_DIR);
if (!kn) {
idr_destroy(&root->ino_idr);
@@ -984,6 +1002,8 @@ void kernfs_destroy_root(struct kernfs_root *root)
* @parent: parent in which to create a new directory
* @name: name of the new directory
* @mode: mode of the new directory
+ * @uid: uid of the new directory
+ * @gid: gid of the new directory
* @priv: opaque data associated with the new directory
* @ns: optional namespace tag of the directory
*
@@ -991,13 +1011,15 @@ void kernfs_destroy_root(struct kernfs_root *root)
*/
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
void *priv, const void *ns)
{
struct kernfs_node *kn;
int rc;
/* allocate */
- kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR);
+ kn = kernfs_new_node(parent, name, mode | S_IFDIR,
+ uid, gid, KERNFS_DIR);
if (!kn)
return ERR_PTR(-ENOMEM);
@@ -1028,7 +1050,8 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
int rc;
/* allocate */
- kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, KERNFS_DIR);
+ kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR);
if (!kn)
return ERR_PTR(-ENOMEM);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 2015d8c45e4a..dbf5bc250bfd 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -965,6 +965,8 @@ const struct file_operations kernfs_file_fops = {
* @parent: directory to create the file in
* @name: name of the file
* @mode: mode of the file
+ * @uid: uid of the file
+ * @gid: gid of the file
* @size: size of the file
* @ops: kernfs operations for the file
* @priv: private data for the file
@@ -975,7 +977,8 @@ const struct file_operations kernfs_file_fops = {
*/
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
const char *name,
- umode_t mode, loff_t size,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key)
@@ -986,7 +989,8 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
flags = KERNFS_FILE;
- kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
+ kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG,
+ uid, gid, flags);
if (!kn)
return ERR_PTR(-ENOMEM);
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 3d73fe9d56e2..80cebcd94c90 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -63,7 +63,7 @@ out_unlock:
return ret;
}
-static int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
+int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
{
struct kernfs_iattrs *attrs;
struct iattr *iattrs;
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 0f260dcca177..3d83b114bb08 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -90,6 +90,7 @@ int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr);
int kernfs_iop_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags);
ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
+int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
/*
* dir.c
@@ -104,6 +105,7 @@ void kernfs_put_active(struct kernfs_node *kn);
int kernfs_add_one(struct kernfs_node *kn);
struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
unsigned flags);
struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root,
unsigned int ino);
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 08ccabd7047f..5ffed48f3d0e 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -21,6 +21,7 @@
* @target: target node for the symlink to point to
*
* Returns the created node on success, ERR_PTR() value on error.
+ * Ownership of the link matches ownership of the target.
*/
struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
@@ -28,8 +29,16 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
{
struct kernfs_node *kn;
int error;
+ kuid_t uid = GLOBAL_ROOT_UID;
+ kgid_t gid = GLOBAL_ROOT_GID;
- kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, KERNFS_LINK);
+ if (target->iattr) {
+ uid = target->iattr->ia_iattr.ia_uid;
+ gid = target->iattr->ia_iattr.ia_gid;
+ }
+
+ kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, uid, gid,
+ KERNFS_LINK);
if (!kn)
return ERR_PTR(-ENOMEM);
diff --git a/fs/locks.c b/fs/locks.c
index db7b6917d9c5..bc047a7edc47 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -202,10 +202,6 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
* we often hold the flc_lock as well. In certain cases, when reading the fields
* protected by this lock, we can skip acquiring it iff we already hold the
* flc_lock.
- *
- * In particular, adding an entry to the fl_block list requires that you hold
- * both the flc_lock and the blocked_lock_lock (acquired in that order).
- * Deleting an entry from the list however only requires the file_lock_lock.
*/
static DEFINE_SPINLOCK(blocked_lock_lock);
@@ -990,6 +986,7 @@ out:
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
+ trace_flock_lock_inode(inode, request, error);
return error;
}
@@ -2072,6 +2069,13 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
return -1;
if (IS_REMOTELCK(fl))
return fl->fl_pid;
+ /*
+ * If the flock owner process is dead and its pid has been already
+ * freed, the translation below won't work, but we still want to show
+ * flock owner pid number in init pidns.
+ */
+ if (ns == &init_pid_ns)
+ return (pid_t)fl->fl_pid;
rcu_read_lock();
pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
@@ -2626,12 +2630,10 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
fl_pid = locks_translate_pid(fl, proc_pidns);
/*
- * If there isn't a fl_pid don't display who is waiting on
- * the lock if we are called from locks_show, or if we are
- * called from __show_fd_info - skip lock entirely
+ * If lock owner is dead (and pid is freed) or not visible in current
+ * pidns, zero is shown as a pid value. Check lock info from
+ * init_pid_ns to get saved lock pid value.
*/
- if (fl_pid == 0)
- return;
if (fl->fl_file != NULL)
inode = locks_inode(fl->fl_file);
diff --git a/fs/mpage.c b/fs/mpage.c
index b7e7f570733a..b73638db9866 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -51,8 +51,8 @@ static void mpage_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- page_endio(page, op_is_write(bio_op(bio)),
- blk_status_to_errno(bio->bi_status));
+ page_endio(page, bio_op(bio),
+ blk_status_to_errno(bio->bi_status));
}
bio_put(bio);
diff --git a/fs/namei.c b/fs/namei.c
index 734cef54fdf8..3cd396277cd3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2028,6 +2028,8 @@ static int link_path_walk(const char *name, struct nameidata *nd)
{
int err;
+ if (IS_ERR(name))
+ return PTR_ERR(name);
while (*name=='/')
name++;
if (!*name)
@@ -2125,12 +2127,15 @@ OK:
}
}
+/* must be paired with terminate_walk() */
static const char *path_init(struct nameidata *nd, unsigned flags)
{
const char *s = nd->name->name;
if (!*s)
flags &= ~LOOKUP_RCU;
+ if (flags & LOOKUP_RCU)
+ rcu_read_lock();
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
@@ -2143,7 +2148,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
- rcu_read_lock();
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
nd->root_seq = nd->seq;
nd->m_seq = read_seqbegin(&mount_lock);
@@ -2159,21 +2163,15 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->m_seq = read_seqbegin(&mount_lock);
if (*s == '/') {
- if (flags & LOOKUP_RCU)
- rcu_read_lock();
set_root(nd);
if (likely(!nd_jump_root(nd)))
return s;
- nd->root.mnt = NULL;
- rcu_read_unlock();
return ERR_PTR(-ECHILD);
} else if (nd->dfd == AT_FDCWD) {
if (flags & LOOKUP_RCU) {
struct fs_struct *fs = current->fs;
unsigned seq;
- rcu_read_lock();
-
do {
seq = read_seqcount_begin(&fs->seq);
nd->path = fs->pwd;
@@ -2195,16 +2193,13 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
dentry = f.file->f_path.dentry;
- if (*s) {
- if (!d_can_lookup(dentry)) {
- fdput(f);
- return ERR_PTR(-ENOTDIR);
- }
+ if (*s && unlikely(!d_can_lookup(dentry))) {
+ fdput(f);
+ return ERR_PTR(-ENOTDIR);
}
nd->path = f.file->f_path;
if (flags & LOOKUP_RCU) {
- rcu_read_lock();
nd->inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
@@ -2272,24 +2267,15 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
const char *s = path_init(nd, flags);
int err;
- if (IS_ERR(s))
- return PTR_ERR(s);
-
- if (unlikely(flags & LOOKUP_DOWN)) {
+ if (unlikely(flags & LOOKUP_DOWN) && !IS_ERR(s)) {
err = handle_lookup_down(nd);
- if (unlikely(err < 0)) {
- terminate_walk(nd);
- return err;
- }
+ if (unlikely(err < 0))
+ s = ERR_PTR(err);
}
while (!(err = link_path_walk(s, nd))
&& ((err = lookup_last(nd)) > 0)) {
s = trailing_symlink(nd);
- if (IS_ERR(s)) {
- err = PTR_ERR(s);
- break;
- }
}
if (!err)
err = complete_walk(nd);
@@ -2336,10 +2322,7 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
struct path *parent)
{
const char *s = path_init(nd, flags);
- int err;
- if (IS_ERR(s))
- return PTR_ERR(s);
- err = link_path_walk(s, nd);
+ int err = link_path_walk(s, nd);
if (!err)
err = complete_walk(nd);
if (!err) {
@@ -2666,15 +2649,10 @@ path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
int err;
- if (IS_ERR(s))
- return PTR_ERR(s);
+
while (!(err = link_path_walk(s, nd)) &&
(err = mountpoint_last(nd)) > 0) {
s = trailing_symlink(nd);
- if (IS_ERR(s)) {
- err = PTR_ERR(s);
- break;
- }
}
if (!err) {
*path = nd->path;
@@ -3027,17 +3005,16 @@ static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t m
* Returns 0 if successful. The file will have been created and attached to
* @file by the filesystem calling finish_open().
*
- * Returns 1 if the file was looked up only or didn't need creating. The
- * caller will need to perform the open themselves. @path will have been
- * updated to point to the new dentry. This may be negative.
+ * If the file was looked up only or didn't need creating, FMODE_OPENED won't
+ * be set. The caller will need to perform the open themselves. @path will
+ * have been updated to point to the new dentry. This may be negative.
*
* Returns an error code otherwise.
*/
static int atomic_open(struct nameidata *nd, struct dentry *dentry,
struct path *path, struct file *file,
const struct open_flags *op,
- int open_flag, umode_t mode,
- int *opened)
+ int open_flag, umode_t mode)
{
struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
struct inode *dir = nd->path.dentry->d_inode;
@@ -3052,39 +3029,38 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
file->f_path.dentry = DENTRY_NOT_SET;
file->f_path.mnt = nd->path.mnt;
error = dir->i_op->atomic_open(dir, dentry, file,
- open_to_namei_flags(open_flag),
- mode, opened);
+ open_to_namei_flags(open_flag), mode);
d_lookup_done(dentry);
if (!error) {
- /*
- * We didn't have the inode before the open, so check open
- * permission here.
- */
- int acc_mode = op->acc_mode;
- if (*opened & FILE_CREATED) {
- WARN_ON(!(open_flag & O_CREAT));
- fsnotify_create(dir, dentry);
- acc_mode = 0;
- }
- error = may_open(&file->f_path, acc_mode, open_flag);
- if (WARN_ON(error > 0))
- error = -EINVAL;
- } else if (error > 0) {
- if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
+ if (file->f_mode & FMODE_OPENED) {
+ /*
+ * We didn't have the inode before the open, so check open
+ * permission here.
+ */
+ int acc_mode = op->acc_mode;
+ if (file->f_mode & FMODE_CREATED) {
+ WARN_ON(!(open_flag & O_CREAT));
+ fsnotify_create(dir, dentry);
+ acc_mode = 0;
+ }
+ error = may_open(&file->f_path, acc_mode, open_flag);
+ if (WARN_ON(error > 0))
+ error = -EINVAL;
+ } else if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
error = -EIO;
} else {
if (file->f_path.dentry) {
dput(dentry);
dentry = file->f_path.dentry;
}
- if (*opened & FILE_CREATED)
+ if (file->f_mode & FMODE_CREATED)
fsnotify_create(dir, dentry);
if (unlikely(d_is_negative(dentry))) {
error = -ENOENT;
} else {
path->dentry = dentry;
path->mnt = nd->path.mnt;
- return 1;
+ return 0;
}
}
}
@@ -3095,25 +3071,22 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
/*
* Look up and maybe create and open the last component.
*
- * Must be called with i_mutex held on parent.
- *
- * Returns 0 if the file was successfully atomically created (if necessary) and
- * opened. In this case the file will be returned attached to @file.
- *
- * Returns 1 if the file was not completely opened at this time, though lookups
- * and creations will have been performed and the dentry returned in @path will
- * be positive upon return if O_CREAT was specified. If O_CREAT wasn't
- * specified then a negative dentry may be returned.
+ * Must be called with parent locked (exclusive in O_CREAT case).
*
- * An error code is returned otherwise.
+ * Returns 0 on success, that is, if
+ * the file was successfully atomically created (if necessary) and opened, or
+ * the file was not completely opened at this time, though lookups and
+ * creations were performed.
+ * These case are distinguished by presence of FMODE_OPENED on file->f_mode.
+ * In the latter case dentry returned in @path might be negative if O_CREAT
+ * hadn't been specified.
*
- * FILE_CREATE will be set in @*opened if the dentry was created and will be
- * cleared otherwise prior to returning.
+ * An error code is returned on failure.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
struct file *file,
const struct open_flags *op,
- bool got_write, int *opened)
+ bool got_write)
{
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
@@ -3126,7 +3099,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
if (unlikely(IS_DEADDIR(dir_inode)))
return -ENOENT;
- *opened &= ~FILE_CREATED;
+ file->f_mode &= ~FMODE_CREATED;
dentry = d_lookup(dir, &nd->last);
for (;;) {
if (!dentry) {
@@ -3188,7 +3161,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
if (dir_inode->i_op->atomic_open) {
error = atomic_open(nd, dentry, path, file, op, open_flag,
- mode, opened);
+ mode);
if (unlikely(error == -ENOENT) && create_error)
error = create_error;
return error;
@@ -3211,7 +3184,7 @@ no_open:
/* Negative dentry, just create the file */
if (!dentry->d_inode && (open_flag & O_CREAT)) {
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
if (!dir_inode->i_op->create) {
error = -EACCES;
@@ -3230,7 +3203,7 @@ no_open:
out_no_open:
path->dentry = dentry;
path->mnt = nd->path.mnt;
- return 1;
+ return 0;
out_dput:
dput(dentry);
@@ -3241,8 +3214,7 @@ out_dput:
* Handle the last step of open()
*/
static int do_last(struct nameidata *nd,
- struct file *file, const struct open_flags *op,
- int *opened)
+ struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
@@ -3308,17 +3280,17 @@ static int do_last(struct nameidata *nd,
inode_lock(dir->d_inode);
else
inode_lock_shared(dir->d_inode);
- error = lookup_open(nd, &path, file, op, got_write, opened);
+ error = lookup_open(nd, &path, file, op, got_write);
if (open_flag & O_CREAT)
inode_unlock(dir->d_inode);
else
inode_unlock_shared(dir->d_inode);
- if (error <= 0) {
- if (error)
- goto out;
+ if (error)
+ goto out;
- if ((*opened & FILE_CREATED) ||
+ if (file->f_mode & FMODE_OPENED) {
+ if ((file->f_mode & FMODE_CREATED) ||
!S_ISREG(file_inode(file)->i_mode))
will_truncate = false;
@@ -3326,7 +3298,7 @@ static int do_last(struct nameidata *nd,
goto opened;
}
- if (*opened & FILE_CREATED) {
+ if (file->f_mode & FMODE_CREATED) {
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
will_truncate = false;
@@ -3395,20 +3367,15 @@ finish_open_created:
error = may_open(&nd->path, acc_mode, open_flag);
if (error)
goto out;
- BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
- error = vfs_open(&nd->path, file, current_cred());
+ BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
+ error = vfs_open(&nd->path, file);
if (error)
goto out;
- *opened |= FILE_OPENED;
opened:
- error = open_check_o_direct(file);
- if (!error)
- error = ima_file_check(file, op->acc_mode, *opened);
+ error = ima_file_check(file, op->acc_mode);
if (!error && will_truncate)
error = handle_truncate(file);
out:
- if (unlikely(error) && (*opened & FILE_OPENED))
- fput(file);
if (unlikely(error > 0)) {
WARN_ON(1);
error = -EINVAL;
@@ -3458,7 +3425,7 @@ EXPORT_SYMBOL(vfs_tmpfile);
static int do_tmpfile(struct nameidata *nd, unsigned flags,
const struct open_flags *op,
- struct file *file, int *opened)
+ struct file *file)
{
struct dentry *child;
struct path path;
@@ -3480,12 +3447,7 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
if (error)
goto out2;
file->f_path.mnt = path.mnt;
- error = finish_open(file, child, NULL, opened);
- if (error)
- goto out2;
- error = open_check_o_direct(file);
- if (error)
- fput(file);
+ error = finish_open(file, child, NULL);
out2:
mnt_drop_write(path.mnt);
out:
@@ -3499,7 +3461,7 @@ static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
int error = path_lookupat(nd, flags, &path);
if (!error) {
audit_inode(nd->name, path.dentry, 0);
- error = vfs_open(&path, file, current_cred());
+ error = vfs_open(&path, file);
path_put(&path);
}
return error;
@@ -3508,59 +3470,40 @@ static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
static struct file *path_openat(struct nameidata *nd,
const struct open_flags *op, unsigned flags)
{
- const char *s;
struct file *file;
- int opened = 0;
int error;
- file = get_empty_filp();
+ file = alloc_empty_file(op->open_flag, current_cred());
if (IS_ERR(file))
return file;
- file->f_flags = op->open_flag;
-
if (unlikely(file->f_flags & __O_TMPFILE)) {
- error = do_tmpfile(nd, flags, op, file, &opened);
- goto out2;
- }
-
- if (unlikely(file->f_flags & O_PATH)) {
+ error = do_tmpfile(nd, flags, op, file);
+ } else if (unlikely(file->f_flags & O_PATH)) {
error = do_o_path(nd, flags, file);
- if (!error)
- opened |= FILE_OPENED;
- goto out2;
- }
-
- s = path_init(nd, flags);
- if (IS_ERR(s)) {
- put_filp(file);
- return ERR_CAST(s);
- }
- while (!(error = link_path_walk(s, nd)) &&
- (error = do_last(nd, file, op, &opened)) > 0) {
- nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
- s = trailing_symlink(nd);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- break;
+ } else {
+ const char *s = path_init(nd, flags);
+ while (!(error = link_path_walk(s, nd)) &&
+ (error = do_last(nd, file, op)) > 0) {
+ nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
+ s = trailing_symlink(nd);
}
+ terminate_walk(nd);
}
- terminate_walk(nd);
-out2:
- if (!(opened & FILE_OPENED)) {
- BUG_ON(!error);
- put_filp(file);
+ if (likely(!error)) {
+ if (likely(file->f_mode & FMODE_OPENED))
+ return file;
+ WARN_ON(1);
+ error = -EINVAL;
}
- if (unlikely(error)) {
- if (error == -EOPENSTALE) {
- if (flags & LOOKUP_RCU)
- error = -ECHILD;
- else
- error = -ESTALE;
- }
- file = ERR_PTR(error);
+ fput(file);
+ if (error == -EOPENSTALE) {
+ if (flags & LOOKUP_RCU)
+ error = -ECHILD;
+ else
+ error = -ESTALE;
}
- return file;
+ return ERR_PTR(error);
}
struct file *do_filp_open(int dfd, struct filename *pathname,
@@ -4712,29 +4655,6 @@ out:
return len;
}
-/*
- * A helper for ->readlink(). This should be used *ONLY* for symlinks that
- * have ->get_link() not calling nd_jump_link(). Using (or not using) it
- * for any given inode is up to filesystem.
- */
-static int generic_readlink(struct dentry *dentry, char __user *buffer,
- int buflen)
-{
- DEFINE_DELAYED_CALL(done);
- struct inode *inode = d_inode(dentry);
- const char *link = inode->i_link;
- int res;
-
- if (!link) {
- link = inode->i_op->get_link(dentry, inode, &done);
- if (IS_ERR(link))
- return PTR_ERR(link);
- }
- res = readlink_copy(buffer, buflen, link);
- do_delayed_call(&done);
- return res;
-}
-
/**
* vfs_readlink - copy symlink body into userspace buffer
* @dentry: dentry on which to get symbolic link
@@ -4748,6 +4668,9 @@ static int generic_readlink(struct dentry *dentry, char __user *buffer,
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct inode *inode = d_inode(dentry);
+ DEFINE_DELAYED_CALL(done);
+ const char *link;
+ int res;
if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
if (unlikely(inode->i_op->readlink))
@@ -4761,7 +4684,15 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
spin_unlock(&inode->i_lock);
}
- return generic_readlink(dentry, buffer, buflen);
+ link = inode->i_link;
+ if (!link) {
+ link = inode->i_op->get_link(dentry, inode, &done);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+ }
+ res = readlink_copy(buffer, buflen, link);
+ do_delayed_call(&done);
+ return res;
}
EXPORT_SYMBOL(vfs_readlink);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7a9c14426855..d7f158c3efc8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1434,12 +1434,11 @@ static int do_open(struct inode *inode, struct file *filp)
static int nfs_finish_open(struct nfs_open_context *ctx,
struct dentry *dentry,
- struct file *file, unsigned open_flags,
- int *opened)
+ struct file *file, unsigned open_flags)
{
int err;
- err = finish_open(file, dentry, do_open, opened);
+ err = finish_open(file, dentry, do_open);
if (err)
goto out;
if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
@@ -1452,7 +1451,7 @@ out:
int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned open_flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct nfs_open_context *ctx;
@@ -1461,6 +1460,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct inode *inode;
unsigned int lookup_flags = 0;
bool switched = false;
+ int created = 0;
int err;
/* Expect a negative dentry */
@@ -1521,7 +1521,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
goto out;
trace_nfs_atomic_open_enter(dir, ctx, open_flags);
- inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
+ inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, &created);
+ if (created)
+ file->f_mode |= FMODE_CREATED;
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
@@ -1546,7 +1548,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
goto out;
}
- err = nfs_finish_open(ctx, ctx->dentry, file, open_flags, opened);
+ err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
out:
@@ -1641,6 +1643,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
struct dentry *parent = dget_parent(dentry);
struct inode *dir = d_inode(parent);
struct inode *inode;
+ struct dentry *d;
int error = -EACCES;
d_drop(dentry);
@@ -1662,10 +1665,12 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
goto out_error;
}
inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
- error = PTR_ERR(inode);
- if (IS_ERR(inode))
+ d = d_splice_alias(inode, dentry);
+ if (IS_ERR(d)) {
+ error = PTR_ERR(d);
goto out_error;
- d_add(dentry, inode);
+ }
+ dput(d);
out:
dput(parent);
return 0;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 137e18abb7e7..51beb6e38c90 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -258,7 +258,7 @@ extern const struct dentry_operations nfs4_dentry_operations;
/* dir.c */
int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
- unsigned, umode_t, int *);
+ unsigned, umode_t);
/* super.c */
extern struct file_system_type nfs4_fs_type;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f6c4ccd693f4..b790976d3913 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2951,7 +2951,7 @@ static int _nfs4_do_open(struct inode *dir,
}
}
if (opened && opendata->file_created)
- *opened |= FILE_CREATED;
+ *opened = 1;
if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
*ctx_th = opendata->f_attr.mdsthreshold;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index b0555d7d8200..55a099e47ba2 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -763,7 +763,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
goto out_nfserr;
}
- host_err = ima_file_check(file, may_flags, 0);
+ host_err = ima_file_check(file, may_flags);
if (host_err) {
fput(file);
goto out_nfserr;
diff --git a/fs/open.c b/fs/open.c
index d0e955b558ad..d98e19239bb7 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -724,27 +724,13 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
return ksys_fchown(fd, user, group);
}
-int open_check_o_direct(struct file *f)
-{
- /* NB: we're sure to have correct a_ops only after f_op->open */
- if (f->f_flags & O_DIRECT) {
- if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
- return -EINVAL;
- }
- return 0;
-}
-
static int do_dentry_open(struct file *f,
struct inode *inode,
- int (*open)(struct inode *, struct file *),
- const struct cred *cred)
+ int (*open)(struct inode *, struct file *))
{
static const struct file_operations empty_fops = {};
int error;
- f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
- FMODE_PREAD | FMODE_PWRITE;
-
path_get(&f->f_path);
f->f_inode = inode;
f->f_mapping = inode->i_mapping;
@@ -753,7 +739,7 @@ static int do_dentry_open(struct file *f,
f->f_wb_err = filemap_sample_wb_err(f->f_mapping);
if (unlikely(f->f_flags & O_PATH)) {
- f->f_mode = FMODE_PATH;
+ f->f_mode = FMODE_PATH | FMODE_OPENED;
f->f_op = &empty_fops;
return 0;
}
@@ -780,7 +766,7 @@ static int do_dentry_open(struct file *f,
goto cleanup_all;
}
- error = security_file_open(f, cred);
+ error = security_file_open(f);
if (error)
goto cleanup_all;
@@ -788,6 +774,8 @@ static int do_dentry_open(struct file *f,
if (error)
goto cleanup_all;
+ /* normally all 3 are set; ->open() can clear them if needed */
+ f->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
if (!open)
open = f->f_op->open;
if (open) {
@@ -795,6 +783,7 @@ static int do_dentry_open(struct file *f,
if (error)
goto cleanup_all;
}
+ f->f_mode |= FMODE_OPENED;
if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(inode);
if ((f->f_mode & FMODE_READ) &&
@@ -809,9 +798,16 @@ static int do_dentry_open(struct file *f,
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
+ /* NB: we're sure to have correct a_ops only after f_op->open */
+ if (f->f_flags & O_DIRECT) {
+ if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
+ return -EINVAL;
+ }
return 0;
cleanup_all:
+ if (WARN_ON_ONCE(error > 0))
+ error = -EINVAL;
fops_put(f->f_op);
if (f->f_mode & FMODE_WRITER) {
put_write_access(inode);
@@ -847,19 +843,12 @@ cleanup_file:
* Returns zero on success or -errno if the open failed.
*/
int finish_open(struct file *file, struct dentry *dentry,
- int (*open)(struct inode *, struct file *),
- int *opened)
+ int (*open)(struct inode *, struct file *))
{
- int error;
- BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
+ BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
file->f_path.dentry = dentry;
- error = do_dentry_open(file, d_backing_inode(dentry), open,
- current_cred());
- if (!error)
- *opened |= FILE_OPENED;
-
- return error;
+ return do_dentry_open(file, d_backing_inode(dentry), open);
}
EXPORT_SYMBOL(finish_open);
@@ -874,13 +863,13 @@ EXPORT_SYMBOL(finish_open);
* NB: unlike finish_open() this function does consume the dentry reference and
* the caller need not dput() it.
*
- * Returns "1" which must be the return value of ->atomic_open() after having
+ * Returns "0" which must be the return value of ->atomic_open() after having
* called this function.
*/
int finish_no_open(struct file *file, struct dentry *dentry)
{
file->f_path.dentry = dentry;
- return 1;
+ return 0;
}
EXPORT_SYMBOL(finish_no_open);
@@ -896,8 +885,7 @@ EXPORT_SYMBOL(file_path);
* @file: newly allocated file with f_flag initialized
* @cred: credentials to use
*/
-int vfs_open(const struct path *path, struct file *file,
- const struct cred *cred)
+int vfs_open(const struct path *path, struct file *file)
{
struct dentry *dentry = d_real(path->dentry, NULL, file->f_flags, 0);
@@ -905,7 +893,7 @@ int vfs_open(const struct path *path, struct file *file,
return PTR_ERR(dentry);
file->f_path = *path;
- return do_dentry_open(file, d_backing_inode(dentry), NULL, cred);
+ return do_dentry_open(file, d_backing_inode(dentry), NULL);
}
struct file *dentry_open(const struct path *path, int flags,
@@ -919,19 +907,11 @@ struct file *dentry_open(const struct path *path, int flags,
/* We must always pass in a valid mount pointer. */
BUG_ON(!path->mnt);
- f = get_empty_filp();
+ f = alloc_empty_file(flags, cred);
if (!IS_ERR(f)) {
- f->f_flags = flags;
- error = vfs_open(path, f, cred);
- if (!error) {
- /* from now on we need fput() to dispose of f */
- error = open_check_o_direct(f);
- if (error) {
- fput(f);
- f = ERR_PTR(error);
- }
- } else {
- put_filp(f);
+ error = vfs_open(path, f);
+ if (error) {
+ fput(f);
f = ERR_PTR(error);
}
}
@@ -1063,26 +1043,6 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
}
EXPORT_SYMBOL(file_open_root);
-struct file *filp_clone_open(struct file *oldfile)
-{
- struct file *file;
- int retval;
-
- file = get_empty_filp();
- if (IS_ERR(file))
- return file;
-
- file->f_flags = oldfile->f_flags;
- retval = vfs_open(&oldfile->f_path, file, oldfile->f_cred);
- if (retval) {
- put_filp(file);
- return ERR_PTR(retval);
- }
-
- return file;
-}
-EXPORT_SYMBOL(filp_clone_open);
-
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_flags op;
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index db0b52187cbc..a5a2fe76568f 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -528,18 +528,19 @@ static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long ar
return ret;
}
-static int orangefs_fault(struct vm_fault *vmf)
+static vm_fault_t orangefs_fault(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
- int rc;
- rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1,
+ int ret;
+
+ ret = orangefs_inode_getattr(file->f_mapping->host, 0, 1,
STATX_SIZE);
- if (rc == -ESTALE)
- rc = -EIO;
- if (rc) {
- gossip_err("%s: orangefs_inode_getattr failed, "
- "rc:%d:.\n", __func__, rc);
- return rc;
+ if (ret == -ESTALE)
+ ret = -EIO;
+ if (ret) {
+ gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n",
+ __func__, ret);
+ return VM_FAULT_SIGBUS;
}
return filemap_fault(vmf);
}
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 6e4d2af8f5bc..31932879b716 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -251,7 +251,6 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
{
int ret = -ENOENT;
struct inode *inode = path->dentry->d_inode;
- struct orangefs_inode_s *orangefs_inode = NULL;
gossip_debug(GOSSIP_INODE_DEBUG,
"orangefs_getattr: called on %pd\n",
@@ -262,8 +261,6 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
generic_fillattr(inode, stat);
/* override block size reported to stat */
- orangefs_inode = ORANGEFS_I(inode);
-
if (request_mask & STATX_SIZE)
stat->result_mask = STATX_BASIC_STATS;
else
diff --git a/fs/pipe.c b/fs/pipe.c
index 39d6f431da83..bdc5d3c0977d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -741,54 +741,33 @@ fail_inode:
int create_pipe_files(struct file **res, int flags)
{
- int err;
struct inode *inode = get_pipe_inode();
struct file *f;
- struct path path;
if (!inode)
return -ENFILE;
- err = -ENOMEM;
- path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &empty_name);
- if (!path.dentry)
- goto err_inode;
- path.mnt = mntget(pipe_mnt);
-
- d_instantiate(path.dentry, inode);
-
- f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
+ f = alloc_file_pseudo(inode, pipe_mnt, "",
+ O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
+ &pipefifo_fops);
if (IS_ERR(f)) {
- err = PTR_ERR(f);
- goto err_dentry;
+ free_pipe_info(inode->i_pipe);
+ iput(inode);
+ return PTR_ERR(f);
}
- f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
f->private_data = inode->i_pipe;
- res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
+ res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
+ &pipefifo_fops);
if (IS_ERR(res[0])) {
- err = PTR_ERR(res[0]);
- goto err_file;
+ put_pipe_info(inode, inode->i_pipe);
+ fput(f);
+ return PTR_ERR(res[0]);
}
-
- path_get(&path);
res[0]->private_data = inode->i_pipe;
- res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
res[1] = f;
return 0;
-
-err_file:
- put_filp(f);
-err_dentry:
- free_pipe_info(inode->i_pipe);
- path_put(&path);
- return err;
-
-err_inode:
- free_pipe_info(inode->i_pipe);
- iput(inode);
- return err;
}
static int __do_pipe_flags(int *fd, struct file **files, int flags)
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 09c19ef91526..503086f7f7c1 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -50,12 +50,19 @@ config PSTORE_842_COMPRESS
help
This option enables 842 compression algorithm support.
+config PSTORE_ZSTD_COMPRESS
+ bool "zstd compression"
+ depends on PSTORE
+ select CRYPTO_ZSTD
+ help
+ This option enables zstd compression algorithm support.
+
config PSTORE_COMPRESS
def_bool y
depends on PSTORE
depends on PSTORE_DEFLATE_COMPRESS || PSTORE_LZO_COMPRESS || \
PSTORE_LZ4_COMPRESS || PSTORE_LZ4HC_COMPRESS || \
- PSTORE_842_COMPRESS
+ PSTORE_842_COMPRESS || PSTORE_ZSTD_COMPRESS
choice
prompt "Default pstore compression algorithm"
@@ -65,8 +72,8 @@ choice
This change be changed at boot with "pstore.compress=..." on
the kernel command line.
- Currently, pstore has support for 5 compression algorithms:
- deflate, lzo, lz4, lz4hc and 842.
+ Currently, pstore has support for 6 compression algorithms:
+ deflate, lzo, lz4, lz4hc, 842 and zstd.
The default compression algorithm is deflate.
@@ -85,6 +92,9 @@ choice
config PSTORE_842_COMPRESS_DEFAULT
bool "842" if PSTORE_842_COMPRESS
+ config PSTORE_ZSTD_COMPRESS_DEFAULT
+ bool "zstd" if PSTORE_ZSTD_COMPRESS
+
endchoice
config PSTORE_COMPRESS_DEFAULT
@@ -95,6 +105,7 @@ config PSTORE_COMPRESS_DEFAULT
default "lz4" if PSTORE_LZ4_COMPRESS_DEFAULT
default "lz4hc" if PSTORE_LZ4HC_COMPRESS_DEFAULT
default "842" if PSTORE_842_COMPRESS_DEFAULT
+ default "zstd" if PSTORE_ZSTD_COMPRESS_DEFAULT
config PSTORE_CONSOLE
bool "Log kernel console messages"
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index c238ab8ba31d..15e99d5a681d 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -34,6 +34,9 @@
#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
#include <linux/lz4.h>
#endif
+#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
+#include <linux/zstd.h>
+#endif
#include <linux/crypto.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -192,6 +195,13 @@ static int zbufsize_842(size_t size)
}
#endif
+#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
+static int zbufsize_zstd(size_t size)
+{
+ return ZSTD_compressBound(size);
+}
+#endif
+
static const struct pstore_zbackend *zbackend __ro_after_init;
static const struct pstore_zbackend zbackends[] = {
@@ -225,6 +235,12 @@ static const struct pstore_zbackend zbackends[] = {
.name = "842",
},
#endif
+#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
+ {
+ .zbufsize = zbufsize_zstd,
+ .name = "zstd",
+ },
+#endif
{ }
};
diff --git a/fs/statfs.c b/fs/statfs.c
index 5b2a24f0f263..f0216629621d 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -335,7 +335,7 @@ static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstat
return 0;
}
-COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf)
+int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, struct compat_statfs64 __user * buf)
{
struct kstatfs tmp;
int error;
@@ -349,7 +349,12 @@ COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, s
return error;
}
-COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf)
+COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf)
+{
+ return kcompat_sys_statfs64(pathname, sz, buf);
+}
+
+int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user * buf)
{
struct kstatfs tmp;
int error;
@@ -363,6 +368,11 @@ COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct co
return error;
}
+COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf)
+{
+ return kcompat_sys_fstatfs64(fd, sz, buf);
+}
+
/*
* This is a copy of sys_ustat, just dealing with a structure layout.
* Given how simple this syscall is that apporach is more maintainable
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 58eba92a0e41..feeae8081c22 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -40,6 +40,8 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
{
struct kernfs_node *parent, *kn;
+ kuid_t uid;
+ kgid_t gid;
BUG_ON(!kobj);
@@ -51,8 +53,11 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
if (!parent)
return -ENOENT;
+ kobject_get_ownership(kobj, &uid, &gid);
+
kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
+ S_IRWXU | S_IRUGO | S_IXUGO, uid, gid,
+ kobj, ns);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(parent, kobject_name(kobj));
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 5c13f29bfcdb..0a7252aecfa5 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -245,7 +245,7 @@ static const struct kernfs_ops sysfs_bin_kfops_mmap = {
int sysfs_add_file_mode_ns(struct kernfs_node *parent,
const struct attribute *attr, bool is_bin,
- umode_t mode, const void *ns)
+ umode_t mode, kuid_t uid, kgid_t gid, const void *ns)
{
struct lock_class_key *key = NULL;
const struct kernfs_ops *ops;
@@ -302,8 +302,9 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent,
if (!attr->ignore_lockdep)
key = attr->key ?: (struct lock_class_key *)&attr->skey;
#endif
- kn = __kernfs_create_file(parent, attr->name, mode & 0777, size, ops,
- (void *)attr, ns, key);
+
+ kn = __kernfs_create_file(parent, attr->name, mode & 0777, uid, gid,
+ size, ops, (void *)attr, ns, key);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(parent, attr->name);
@@ -312,12 +313,6 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent,
return 0;
}
-int sysfs_add_file(struct kernfs_node *parent, const struct attribute *attr,
- bool is_bin)
-{
- return sysfs_add_file_mode_ns(parent, attr, is_bin, attr->mode, NULL);
-}
-
/**
* sysfs_create_file_ns - create an attribute file for an object with custom ns
* @kobj: object we're creating for
@@ -327,9 +322,14 @@ int sysfs_add_file(struct kernfs_node *parent, const struct attribute *attr,
int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns)
{
+ kuid_t uid;
+ kgid_t gid;
+
BUG_ON(!kobj || !kobj->sd || !attr);
- return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, ns);
+ kobject_get_ownership(kobj, &uid, &gid);
+ return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode,
+ uid, gid, ns);
}
EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
@@ -358,6 +358,8 @@ int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
struct kernfs_node *parent;
+ kuid_t uid;
+ kgid_t gid;
int error;
if (group) {
@@ -370,7 +372,9 @@ int sysfs_add_file_to_group(struct kobject *kobj,
if (!parent)
return -ENOENT;
- error = sysfs_add_file(parent, attr, false);
+ kobject_get_ownership(kobj, &uid, &gid);
+ error = sysfs_add_file_mode_ns(parent, attr, false,
+ attr->mode, uid, gid, NULL);
kernfs_put(parent);
return error;
@@ -406,6 +410,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
EXPORT_SYMBOL_GPL(sysfs_chmod_file);
/**
+ * sysfs_break_active_protection - break "active" protection
+ * @kobj: The kernel object @attr is associated with.
+ * @attr: The attribute to break the "active" protection for.
+ *
+ * With sysfs, just like kernfs, deletion of an attribute is postponed until
+ * all active .show() and .store() callbacks have finished unless this function
+ * is called. Hence this function is useful in methods that implement self
+ * deletion.
+ */
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ struct kernfs_node *kn;
+
+ kobject_get(kobj);
+ kn = kernfs_find_and_get(kobj->sd, attr->name);
+ if (kn)
+ kernfs_break_active_protection(kn);
+ return kn;
+}
+EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
+
+/**
+ * sysfs_unbreak_active_protection - restore "active" protection
+ * @kn: Pointer returned by sysfs_break_active_protection().
+ *
+ * Undo the effects of sysfs_break_active_protection(). Since this function
+ * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
+ * argument passed to sysfs_break_active_protection() that attribute may have
+ * been removed between the sysfs_break_active_protection() and
+ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
+ * this function has returned.
+ */
+void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+ struct kobject *kobj = kn->parent->priv;
+
+ kernfs_unbreak_active_protection(kn);
+ kernfs_put(kn);
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
+
+/**
* sysfs_remove_file_ns - remove an object attribute with a custom ns tag
* @kobj: object we're acting for
* @attr: attribute descriptor
@@ -486,9 +534,14 @@ EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
int sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
+ kuid_t uid;
+ kgid_t gid;
+
BUG_ON(!kobj || !kobj->sd || !attr);
- return sysfs_add_file(kobj->sd, &attr->attr, true);
+ kobject_get_ownership(kobj, &uid, &gid);
+ return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true,
+ attr->attr.mode, uid, gid, NULL);
}
EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 4802ec0e1e3a..c7a716c4acc9 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -31,6 +31,7 @@ static void remove_files(struct kernfs_node *parent,
}
static int create_files(struct kernfs_node *parent, struct kobject *kobj,
+ kuid_t uid, kgid_t gid,
const struct attribute_group *grp, int update)
{
struct attribute *const *attr;
@@ -60,7 +61,7 @@ static int create_files(struct kernfs_node *parent, struct kobject *kobj,
mode &= SYSFS_PREALLOC | 0664;
error = sysfs_add_file_mode_ns(parent, *attr, false,
- mode, NULL);
+ mode, uid, gid, NULL);
if (unlikely(error))
break;
}
@@ -90,7 +91,8 @@ static int create_files(struct kernfs_node *parent, struct kobject *kobj,
mode &= SYSFS_PREALLOC | 0664;
error = sysfs_add_file_mode_ns(parent,
&(*bin_attr)->attr, true,
- mode, NULL);
+ mode,
+ uid, gid, NULL);
if (error)
break;
}
@@ -106,6 +108,8 @@ static int internal_create_group(struct kobject *kobj, int update,
const struct attribute_group *grp)
{
struct kernfs_node *kn;
+ kuid_t uid;
+ kgid_t gid;
int error;
BUG_ON(!kobj || (!update && !kobj->sd));
@@ -118,9 +122,11 @@ static int internal_create_group(struct kobject *kobj, int update,
kobj->name, grp->name ?: "");
return -EINVAL;
}
+ kobject_get_ownership(kobj, &uid, &gid);
if (grp->name) {
- kn = kernfs_create_dir(kobj->sd, grp->name,
- S_IRWXU | S_IRUGO | S_IXUGO, kobj);
+ kn = kernfs_create_dir_ns(kobj->sd, grp->name,
+ S_IRWXU | S_IRUGO | S_IXUGO,
+ uid, gid, kobj, NULL);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(kobj->sd, grp->name);
@@ -129,7 +135,7 @@ static int internal_create_group(struct kobject *kobj, int update,
} else
kn = kobj->sd;
kernfs_get(kn);
- error = create_files(kn, kobj, grp, update);
+ error = create_files(kn, kobj, uid, gid, grp, update);
if (error) {
if (grp->name)
kernfs_remove(kn);
@@ -281,6 +287,8 @@ int sysfs_merge_group(struct kobject *kobj,
const struct attribute_group *grp)
{
struct kernfs_node *parent;
+ kuid_t uid;
+ kgid_t gid;
int error = 0;
struct attribute *const *attr;
int i;
@@ -289,8 +297,11 @@ int sysfs_merge_group(struct kobject *kobj,
if (!parent)
return -ENOENT;
+ kobject_get_ownership(kobj, &uid, &gid);
+
for ((i = 0, attr = grp->attrs); *attr && !error; (++i, ++attr))
- error = sysfs_add_file(parent, *attr, false);
+ error = sysfs_add_file_mode_ns(parent, *attr, false,
+ (*attr)->mode, uid, gid, NULL);
if (error) {
while (--i >= 0)
kernfs_remove_by_name(parent, (*--attr)->name);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index d098e015fcc9..0050cc0c0236 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -27,11 +27,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name);
/*
* file.c
*/
-int sysfs_add_file(struct kernfs_node *parent,
- const struct attribute *attr, bool is_bin);
int sysfs_add_file_mode_ns(struct kernfs_node *parent,
const struct attribute *attr, bool is_bin,
- umode_t amode, const void *ns);
+ umode_t amode, kuid_t uid, kgid_t gid,
+ const void *ns);
/*
* symlink.c
diff --git a/fs/timerfd.c b/fs/timerfd.c
index cdad49da3ff7..d69ad801eb80 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -66,7 +66,7 @@ static void timerfd_triggered(struct timerfd_ctx *ctx)
spin_lock_irqsave(&ctx->wqh.lock, flags);
ctx->expired = 1;
ctx->ticks++;
- wake_up_locked(&ctx->wqh);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
}
@@ -107,7 +107,7 @@ void timerfd_clock_was_set(void)
if (ctx->moffs != moffs) {
ctx->moffs = KTIME_MAX;
ctx->ticks++;
- wake_up_locked(&ctx->wqh);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
}
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
}
@@ -345,7 +345,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
spin_lock_irq(&ctx->wqh.lock);
if (!timerfd_canceled(ctx)) {
ctx->ticks = ticks;
- wake_up_locked(&ctx->wqh);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
} else
ret = -ECANCELED;
spin_unlock_irq(&ctx->wqh.lock);
@@ -533,8 +533,8 @@ static int do_timerfd_gettime(int ufd, struct itimerspec64 *t)
}
SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
- const struct itimerspec __user *, utmr,
- struct itimerspec __user *, otmr)
+ const struct __kernel_itimerspec __user *, utmr,
+ struct __kernel_itimerspec __user *, otmr)
{
struct itimerspec64 new, old;
int ret;
@@ -550,7 +550,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
return ret;
}
-SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
+SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *, otmr)
{
struct itimerspec64 kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
@@ -559,7 +559,7 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
return put_itimerspec64(&kotmr, otmr) ? -EFAULT : 0;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
const struct compat_itimerspec __user *, utmr,
struct compat_itimerspec __user *, otmr)
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 06f37ddd2997..58cc2414992b 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -602,8 +602,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
if (unlikely(!fi)) {
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -694,8 +693,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
if (!fi) {
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
goto out;
}
set_nlink(inode, 2);
@@ -713,8 +711,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (!fi) {
clear_nlink(inode);
mark_inode_dirty(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
goto out;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -1041,8 +1038,7 @@ out:
out_no_entry:
up_write(&iinfo->i_data_sem);
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
goto out;
}
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index e1ef0f0a1353..02c0a4be4212 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -343,8 +343,7 @@ cg_found:
fail_remove_inode:
mutex_unlock(&sbi->s_lock);
clear_nlink(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
UFSD("EXIT (FAILED): err %d\n", err);
return ERR_PTR(err);
failed:
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index d5f43ba76c59..9ef40f100415 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -43,8 +43,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
return 0;
}
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
@@ -142,8 +141,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
out_fail:
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
@@ -198,8 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput (inode);
+ discard_new_inode(inode);
out_dir:
inode_dec_link_count(dir);
return err;
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 2f3f75a7f180..7f96bdadc372 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -158,6 +158,7 @@ xfs-$(CONFIG_XFS_QUOTA) += scrub/quota.o
ifeq ($(CONFIG_XFS_ONLINE_REPAIR),y)
xfs-y += $(addprefix scrub/, \
agheader_repair.o \
+ bitmap.o \
repair.o \
)
endif
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index fecd187fcf2c..e701ebc36c06 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -248,7 +248,8 @@ __xfs_ag_resv_init(
/* Create a per-AG block reservation. */
int
xfs_ag_resv_init(
- struct xfs_perag *pag)
+ struct xfs_perag *pag,
+ struct xfs_trans *tp)
{
struct xfs_mount *mp = pag->pag_mount;
xfs_agnumber_t agno = pag->pag_agno;
@@ -260,11 +261,11 @@ xfs_ag_resv_init(
if (pag->pag_meta_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
+ error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask, &used);
if (error)
goto out;
- error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
+ error = xfs_finobt_calc_reserves(mp, tp, agno, &ask, &used);
if (error)
goto out;
@@ -282,7 +283,7 @@ xfs_ag_resv_init(
mp->m_inotbt_nores = true;
- error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
+ error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask,
&used);
if (error)
goto out;
@@ -298,7 +299,7 @@ xfs_ag_resv_init(
if (pag->pag_rmapbt_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
+ error = xfs_rmapbt_calc_reserves(mp, tp, agno, &ask, &used);
if (error)
goto out;
@@ -309,7 +310,7 @@ xfs_ag_resv_init(
#ifdef DEBUG
/* need to read in the AGF for the ASSERT below to work */
- error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
+ error = xfs_alloc_pagf_init(pag->pag_mount, tp, pag->pag_agno, 0);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h
index 4619b554ee90..c0352edc8e41 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.h
+++ b/fs/xfs/libxfs/xfs_ag_resv.h
@@ -7,7 +7,7 @@
#define __XFS_AG_RESV_H__
int xfs_ag_resv_free(struct xfs_perag *pag);
-int xfs_ag_resv_init(struct xfs_perag *pag);
+int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp);
bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type);
xfs_extlen_t xfs_ag_resv_needed(struct xfs_perag *pag,
@@ -28,7 +28,7 @@ xfs_ag_resv_rmapbt_alloc(
struct xfs_mount *mp,
xfs_agnumber_t agno)
{
- struct xfs_alloc_arg args = {0};
+ struct xfs_alloc_arg args = { NULL };
struct xfs_perag *pag;
args.len = 1;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 75dbdc14c45f..e1c0c0d2f1b0 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -2198,12 +2198,12 @@ xfs_agfl_reset(
*/
STATIC void
xfs_defer_agfl_block(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_fsblock_t agbno,
struct xfs_owner_info *oinfo)
{
+ struct xfs_mount *mp = tp->t_mountp;
struct xfs_extent_free_item *new; /* new element */
ASSERT(xfs_bmap_free_item_zone != NULL);
@@ -2216,7 +2216,7 @@ xfs_defer_agfl_block(
trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
}
/*
@@ -2323,16 +2323,8 @@ xfs_alloc_fix_freelist(
if (error)
goto out_agbp_relse;
- /* defer agfl frees if dfops is provided */
- if (tp->t_agfl_dfops) {
- xfs_defer_agfl_block(mp, tp->t_agfl_dfops, args->agno,
- bno, &targs.oinfo);
- } else {
- error = xfs_free_agfl_block(tp, args->agno, bno, agbp,
- &targs.oinfo);
- if (error)
- goto out_agbp_relse;
- }
+ /* defer agfl frees */
+ xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
}
targs.tp = tp;
@@ -2755,9 +2747,6 @@ xfs_alloc_read_agf(
pag->pagf_levels[XFS_BTNUM_RMAPi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
- spin_lock_init(&pag->pagb_lock);
- pag->pagb_count = 0;
- pag->pagb_tree = RB_ROOT;
pag->pagf_init = 1;
pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
}
@@ -2784,16 +2773,16 @@ xfs_alloc_read_agf(
*/
int /* error */
xfs_alloc_vextent(
- xfs_alloc_arg_t *args) /* allocation argument structure */
+ struct xfs_alloc_arg *args) /* allocation argument structure */
{
- xfs_agblock_t agsize; /* allocation group size */
- int error;
- int flags; /* XFS_ALLOC_FLAG_... locking flags */
- xfs_mount_t *mp; /* mount structure pointer */
- xfs_agnumber_t sagno; /* starting allocation group number */
- xfs_alloctype_t type; /* input allocation type */
- int bump_rotor = 0;
- xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
+ xfs_agblock_t agsize; /* allocation group size */
+ int error;
+ int flags; /* XFS_ALLOC_FLAG_... locking flags */
+ struct xfs_mount *mp; /* mount structure pointer */
+ xfs_agnumber_t sagno; /* starting allocation group number */
+ xfs_alloctype_t type; /* input allocation type */
+ int bump_rotor = 0;
+ xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
mp = args->mp;
type = args->otype = args->type;
@@ -2914,7 +2903,7 @@ xfs_alloc_vextent(
* locking of AGF, which might cause deadlock.
*/
if (++(args->agno) == mp->m_sb.sb_agcount) {
- if (args->firstblock != NULLFSBLOCK)
+ if (args->tp->t_firstblock != NULLFSBLOCK)
args->agno = sagno;
else
args->agno = 0;
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index e716c993ac4c..00cd5ec4cb6b 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -74,7 +74,6 @@ typedef struct xfs_alloc_arg {
int datatype; /* mask defining data type treatment */
char wasdel; /* set if allocation was prev delayed */
char wasfromfl; /* set if allocation is from freelist */
- xfs_fsblock_t firstblock; /* io first block allocated */
struct xfs_owner_info oinfo; /* owner of blocks being allocated */
enum xfs_ag_resv_type resv; /* block reservation to use */
} xfs_alloc_arg_t;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 99590f61d624..1e671d4eb6fa 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -202,9 +202,7 @@ xfs_attr_set(
struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *leaf_bp = NULL;
struct xfs_da_args args;
- struct xfs_defer_ops dfops;
struct xfs_trans_res tres;
- xfs_fsblock_t firstblock;
int rsvd = (flags & ATTR_ROOT) != 0;
int error, err2, local;
@@ -219,8 +217,6 @@ xfs_attr_set(
args.value = value;
args.valuelen = valuelen;
- args.firstblock = &firstblock;
- args.dfops = &dfops;
args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
args.total = xfs_attr_calc_size(&args, &local);
@@ -315,21 +311,18 @@ xfs_attr_set(
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
- xfs_defer_init(args.dfops, args.firstblock);
error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
if (error)
- goto out_defer_cancel;
+ goto out;
/*
* Prevent the leaf buffer from being unlocked so that a
* concurrent AIL push cannot grab the half-baked leaf
* buffer and run into problems with the write verifier.
*/
xfs_trans_bhold(args.trans, leaf_bp);
- xfs_defer_bjoin(args.dfops, leaf_bp);
- xfs_defer_ijoin(args.dfops, dp);
- error = xfs_defer_finish(&args.trans, args.dfops);
+ error = xfs_defer_finish(&args.trans);
if (error)
- goto out_defer_cancel;
+ goto out;
/*
* Commit the leaf transformation. We'll need another (linked)
@@ -369,8 +362,6 @@ xfs_attr_set(
return error;
-out_defer_cancel:
- xfs_defer_cancel(&dfops);
out:
if (leaf_bp)
xfs_trans_brelse(args.trans, leaf_bp);
@@ -392,8 +383,6 @@ xfs_attr_remove(
{
struct xfs_mount *mp = dp->i_mount;
struct xfs_da_args args;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t firstblock;
int error;
XFS_STATS_INC(mp, xs_attr_remove);
@@ -405,9 +394,6 @@ xfs_attr_remove(
if (error)
return error;
- args.firstblock = &firstblock;
- args.dfops = &dfops;
-
/*
* we have no control over the attribute names that userspace passes us
* to remove, so we have to allow the name lookup prior to attribute
@@ -536,11 +522,12 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
-xfs_attr_leaf_addname(xfs_da_args_t *args)
+xfs_attr_leaf_addname(
+ struct xfs_da_args *args)
{
- xfs_inode_t *dp;
- struct xfs_buf *bp;
- int retval, error, forkoff;
+ struct xfs_inode *dp;
+ struct xfs_buf *bp;
+ int retval, error, forkoff;
trace_xfs_attr_leaf_addname(args);
@@ -598,14 +585,12 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Commit that transaction so that the node_addname() call
* can manage its own transactions.
*/
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
/*
* Commit the current trans (including the inode) and start
@@ -687,15 +672,13 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
}
/*
@@ -711,7 +694,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
}
return error;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
+ xfs_defer_cancel(args->trans);
return error;
}
@@ -722,11 +705,12 @@ out_defer_cancel:
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
-xfs_attr_leaf_removename(xfs_da_args_t *args)
+xfs_attr_leaf_removename(
+ struct xfs_da_args *args)
{
- xfs_inode_t *dp;
- struct xfs_buf *bp;
- int error, forkoff;
+ struct xfs_inode *dp;
+ struct xfs_buf *bp;
+ int error, forkoff;
trace_xfs_attr_leaf_removename(args);
@@ -751,19 +735,17 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
}
return 0;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
+ xfs_defer_cancel(args->trans);
return error;
}
@@ -814,13 +796,14 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
* add a whole extra layer of confusion on top of that.
*/
STATIC int
-xfs_attr_node_addname(xfs_da_args_t *args)
+xfs_attr_node_addname(
+ struct xfs_da_args *args)
{
- xfs_da_state_t *state;
- xfs_da_state_blk_t *blk;
- xfs_inode_t *dp;
- xfs_mount_t *mp;
- int retval, error;
+ struct xfs_da_state *state;
+ struct xfs_da_state_blk *blk;
+ struct xfs_inode *dp;
+ struct xfs_mount *mp;
+ int retval, error;
trace_xfs_attr_node_addname(args);
@@ -879,14 +862,12 @@ restart:
*/
xfs_da_state_free(state);
state = NULL;
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
/*
* Commit the node conversion and start the next
@@ -905,14 +886,12 @@ restart:
* in the index/blkno/rmtblkno/rmtblkcnt fields and
* in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
*/
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_split(state);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
} else {
/*
* Addition succeeded, update Btree hashvals.
@@ -1003,14 +982,12 @@ restart:
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
}
/*
@@ -1037,7 +1014,7 @@ out:
return error;
return retval;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
+ xfs_defer_cancel(args->trans);
goto out;
}
@@ -1049,13 +1026,14 @@ out_defer_cancel:
* the root node (a special case of an intermediate node).
*/
STATIC int
-xfs_attr_node_removename(xfs_da_args_t *args)
+xfs_attr_node_removename(
+ struct xfs_da_args *args)
{
- xfs_da_state_t *state;
- xfs_da_state_blk_t *blk;
- xfs_inode_t *dp;
- struct xfs_buf *bp;
- int retval, error, forkoff;
+ struct xfs_da_state *state;
+ struct xfs_da_state_blk *blk;
+ struct xfs_inode *dp;
+ struct xfs_buf *bp;
+ int retval, error, forkoff;
trace_xfs_attr_node_removename(args);
@@ -1127,14 +1105,12 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
/*
* Commit the Btree join operation and start a new trans.
*/
@@ -1159,15 +1135,13 @@ xfs_attr_node_removename(xfs_da_args_t *args)
goto out;
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
} else
xfs_trans_brelse(args->trans, bp);
}
@@ -1177,7 +1151,7 @@ out:
xfs_da_state_free(state);
return error;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
+ xfs_defer_cancel(args->trans);
goto out;
}
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 76e90046731c..6fc5425b1474 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -242,8 +242,9 @@ xfs_attr3_leaf_verify(
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
- struct xfs_perag *pag = bp->b_pag;
struct xfs_attr_leaf_entry *entries;
+ uint16_t end;
+ int i;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -268,7 +269,7 @@ xfs_attr3_leaf_verify(
* because we may have transitioned an empty shortform attr to a leaf
* if the attr didn't fit in shortform.
*/
- if (pag && pag->pagf_init && ichdr.count == 0)
+ if (!xfs_log_in_recovery(mp) && ichdr.count == 0)
return __this_address;
/*
@@ -289,6 +290,26 @@ xfs_attr3_leaf_verify(
/* XXX: need to range check rest of attr header values */
/* XXX: hash order check? */
+ /*
+ * Quickly check the freemap information. Attribute data has to be
+ * aligned to 4-byte boundaries, and likewise for the free space.
+ */
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
+ return __this_address;
+ if (ichdr.freemap[i].base & 0x3)
+ return __this_address;
+ if (ichdr.freemap[i].size > mp->m_attr_geo->blksize)
+ return __this_address;
+ if (ichdr.freemap[i].size & 0x3)
+ return __this_address;
+ end = ichdr.freemap[i].base + ichdr.freemap[i].size;
+ if (end < ichdr.freemap[i].base)
+ return __this_address;
+ if (end > mp->m_attr_geo->blksize)
+ return __this_address;
+ }
+
return NULL;
}
@@ -506,7 +527,7 @@ xfs_attr_shortform_create(xfs_da_args_t *args)
{
xfs_attr_sf_hdr_t *hdr;
xfs_inode_t *dp;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
trace_xfs_attr_sf_create(args);
@@ -541,7 +562,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
int i, offset, size;
xfs_mount_t *mp;
xfs_inode_t *dp;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
trace_xfs_attr_sf_add(args);
@@ -682,7 +703,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
trace_xfs_attr_sf_lookup(args);
@@ -747,18 +768,18 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
*/
int
xfs_attr_shortform_to_leaf(
- struct xfs_da_args *args,
- struct xfs_buf **leaf_bp)
+ struct xfs_da_args *args,
+ struct xfs_buf **leaf_bp)
{
- xfs_inode_t *dp;
- xfs_attr_shortform_t *sf;
- xfs_attr_sf_entry_t *sfe;
- xfs_da_args_t nargs;
- char *tmpbuffer;
- int error, i, size;
- xfs_dablk_t blkno;
- struct xfs_buf *bp;
- xfs_ifork_t *ifp;
+ struct xfs_inode *dp;
+ struct xfs_attr_shortform *sf;
+ struct xfs_attr_sf_entry *sfe;
+ struct xfs_da_args nargs;
+ char *tmpbuffer;
+ int error, i, size;
+ xfs_dablk_t blkno;
+ struct xfs_buf *bp;
+ struct xfs_ifork *ifp;
trace_xfs_attr_sf_to_leaf(args);
@@ -802,8 +823,6 @@ xfs_attr_shortform_to_leaf(
memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
nargs.geo = args->geo;
- nargs.firstblock = args->firstblock;
- nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
@@ -1006,8 +1025,6 @@ xfs_attr3_leaf_to_shortform(
memset((char *)&nargs, 0, sizeof(nargs));
nargs.geo = args->geo;
nargs.dp = dp;
- nargs.firstblock = args->firstblock;
- nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
@@ -1570,17 +1587,10 @@ xfs_attr3_leaf_rebalance(
*/
swap = 0;
if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) {
- struct xfs_da_state_blk *tmp_blk;
- struct xfs_attr3_icleaf_hdr tmp_ichdr;
-
- tmp_blk = blk1;
- blk1 = blk2;
- blk2 = tmp_blk;
+ swap(blk1, blk2);
- /* struct copies to swap them rather than reconverting */
- tmp_ichdr = ichdr1;
- ichdr1 = ichdr2;
- ichdr2 = tmp_ichdr;
+ /* swap structures rather than reconverting them */
+ swap(ichdr1, ichdr2);
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index bf2e0371149b..af094063e402 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -480,17 +480,15 @@ xfs_attr_rmtval_set(
* extent and then crash then the block may not contain the
* correct metadata after log recovery occurs.
*/
- xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
- blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
- args->total, &map, &nmap, args->dfops);
+ blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
+ &nmap);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
@@ -522,7 +520,6 @@ xfs_attr_rmtval_set(
ASSERT(blkcnt > 0);
- xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
blkcnt, &map, &nmap,
@@ -557,8 +554,7 @@ xfs_attr_rmtval_set(
ASSERT(valuelen == 0);
return 0;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
- args->trans = NULL;
+ xfs_defer_cancel(args->trans);
return error;
}
@@ -626,16 +622,13 @@ xfs_attr_rmtval_remove(
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
- XFS_BMAPI_ATTRFORK, 1, args->firstblock,
- args->dfops, &done);
+ XFS_BMAPI_ATTRFORK, 1, &done);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, args->dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
/*
* Close out trans and start the next one in the chain.
@@ -646,7 +639,6 @@ xfs_attr_rmtval_remove(
}
return 0;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
- args->trans = NULL;
+ xfs_defer_cancel(args->trans);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 7205268b30bc..2760314fdf7f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -326,7 +326,7 @@ xfs_bmap_check_leaf_extents(
xfs_buf_t *bp; /* buffer for "block" */
int error; /* error return value */
xfs_extnum_t i=0, j; /* index into the extents list */
- xfs_ifork_t *ifp; /* fork structure */
+ struct xfs_ifork *ifp; /* fork structure */
int level; /* btree level, for checking */
xfs_mount_t *mp; /* file system mount structure */
__be64 *pp; /* pointer to block address */
@@ -533,8 +533,7 @@ xfs_bmap_validate_ret(
*/
void
__xfs_bmap_add_free(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_fsblock_t bno,
xfs_filblks_t len,
struct xfs_owner_info *oinfo,
@@ -542,8 +541,9 @@ __xfs_bmap_add_free(
{
struct xfs_extent_free_item *new; /* new element */
#ifdef DEBUG
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
ASSERT(bno != NULLFSBLOCK);
ASSERT(len > 0);
@@ -566,9 +566,10 @@ __xfs_bmap_add_free(
else
xfs_rmap_skip_owner_update(&new->xefi_oinfo);
new->xefi_skip_discard = skip_discard;
- trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
- XFS_FSB_TO_AGBNO(mp, bno), len);
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
+ trace_xfs_bmap_free_defer(tp->t_mountp,
+ XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
+ XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
}
/*
@@ -594,7 +595,7 @@ xfs_bmap_btree_to_extents(
xfs_fsblock_t cbno; /* child block number */
xfs_buf_t *cbp; /* child block's buffer */
int error; /* error return value */
- xfs_ifork_t *ifp; /* inode fork data */
+ struct xfs_ifork *ifp; /* inode fork data */
xfs_mount_t *mp; /* mount point structure */
__be64 *pp; /* ptr to block address */
struct xfs_btree_block *rblock;/* root btree block */
@@ -624,7 +625,7 @@ xfs_bmap_btree_to_extents(
if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
return error;
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
- xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
+ xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
ip->i_d.di_nblocks--;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(tp, cbp);
@@ -644,25 +645,23 @@ xfs_bmap_btree_to_extents(
*/
STATIC int /* error */
xfs_bmap_extents_to_btree(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first-block-allocated */
- struct xfs_defer_ops *dfops, /* blocks freed in xaction */
- xfs_btree_cur_t **curp, /* cursor returned to caller */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* incore inode pointer */
+ struct xfs_btree_cur **curp, /* cursor returned to caller */
int wasdel, /* converting a delayed alloc */
int *logflagsp, /* inode logging flags */
int whichfork) /* data or attr fork */
{
struct xfs_btree_block *ablock; /* allocated (child) bt block */
- xfs_buf_t *abp; /* buffer for ablock */
- xfs_alloc_arg_t args; /* allocation arguments */
- xfs_bmbt_rec_t *arp; /* child record pointer */
+ struct xfs_buf *abp; /* buffer for ablock */
+ struct xfs_alloc_arg args; /* allocation arguments */
+ struct xfs_bmbt_rec *arp; /* child record pointer */
struct xfs_btree_block *block; /* btree root block */
- xfs_btree_cur_t *cur; /* bmap btree cursor */
+ struct xfs_btree_cur *cur; /* bmap btree cursor */
int error; /* error return value */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_bmbt_key_t *kp; /* root block key pointer */
- xfs_mount_t *mp; /* mount structure */
+ struct xfs_ifork *ifp; /* inode fork pointer */
+ struct xfs_bmbt_key *kp; /* root block key pointer */
+ struct xfs_mount *mp; /* mount structure */
xfs_bmbt_ptr_t *pp; /* root block address pointer */
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec rec;
@@ -690,8 +689,6 @@ xfs_bmap_extents_to_btree(
* Need a cursor. Can't allocate until bb_level is filled in.
*/
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
/*
* Convert to a btree with two levels, one record in root.
@@ -701,45 +698,43 @@ xfs_bmap_extents_to_btree(
args.tp = tp;
args.mp = mp;
xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
- args.firstblock = *firstblock;
- if (*firstblock == NULLFSBLOCK) {
+ if (tp->t_firstblock == NULLFSBLOCK) {
args.type = XFS_ALLOCTYPE_START_BNO;
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
- } else if (dfops->dop_low) {
+ } else if (tp->t_flags & XFS_TRANS_LOWMODE) {
args.type = XFS_ALLOCTYPE_START_BNO;
- args.fsbno = *firstblock;
+ args.fsbno = tp->t_firstblock;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
- args.fsbno = *firstblock;
+ args.fsbno = tp->t_firstblock;
}
args.minlen = args.maxlen = args.prod = 1;
args.wasdel = wasdel;
*logflagsp = 0;
if ((error = xfs_alloc_vextent(&args))) {
- xfs_iroot_realloc(ip, -1, whichfork);
ASSERT(ifp->if_broot == NULL);
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- return error;
+ goto err1;
}
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
- xfs_iroot_realloc(ip, -1, whichfork);
ASSERT(ifp->if_broot == NULL);
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- return -ENOSPC;
+ error = -ENOSPC;
+ goto err1;
}
/*
* Allocation can't fail, the space was reserved.
*/
- ASSERT(*firstblock == NULLFSBLOCK ||
- args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
- *firstblock = cur->bc_private.b.firstblock = args.fsbno;
+ ASSERT(tp->t_firstblock == NULLFSBLOCK ||
+ args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
+ tp->t_firstblock = args.fsbno;
cur->bc_private.b.allocated++;
ip->i_d.di_nblocks++;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
+ if (!abp) {
+ error = -ENOSPC;
+ goto err2;
+ }
/*
* Fill in the child block.
*/
@@ -779,6 +774,15 @@ xfs_bmap_extents_to_btree(
*curp = cur;
*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
return 0;
+
+err2:
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
+err1:
+ xfs_iroot_realloc(ip, -1, whichfork);
+ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+
+ return error;
}
/*
@@ -812,7 +816,6 @@ STATIC int /* error */
xfs_bmap_local_to_extents(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated in xaction */
xfs_extlen_t total, /* total blocks needed by transaction */
int *logflagsp, /* inode logging flags */
int whichfork,
@@ -823,7 +826,7 @@ xfs_bmap_local_to_extents(
{
int error = 0;
int flags; /* logging flags returned */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_alloc_arg_t args; /* allocation arguments */
xfs_buf_t *bp; /* buffer for extent block */
struct xfs_bmbt_irec rec;
@@ -850,16 +853,15 @@ xfs_bmap_local_to_extents(
args.tp = tp;
args.mp = ip->i_mount;
xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
- args.firstblock = *firstblock;
/*
* Allocate a block. We know we need only one, since the
* file currently fits in an inode.
*/
- if (*firstblock == NULLFSBLOCK) {
+ if (tp->t_firstblock == NULLFSBLOCK) {
args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
- args.fsbno = *firstblock;
+ args.fsbno = tp->t_firstblock;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
}
args.total = total;
@@ -871,7 +873,7 @@ xfs_bmap_local_to_extents(
/* Can't fail, the space was reserved. */
ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(args.len == 1);
- *firstblock = args.fsbno;
+ tp->t_firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
/*
@@ -917,8 +919,6 @@ STATIC int /* error */
xfs_bmap_add_attrfork_btree(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated */
- struct xfs_defer_ops *dfops, /* blocks to free at commit */
int *flags) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
@@ -931,8 +931,6 @@ xfs_bmap_add_attrfork_btree(
*flags |= XFS_ILOG_DBROOT;
else {
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
- cur->bc_private.b.dfops = dfops;
- cur->bc_private.b.firstblock = *firstblock;
error = xfs_bmbt_lookup_first(cur, &stat);
if (error)
goto error0;
@@ -944,7 +942,6 @@ xfs_bmap_add_attrfork_btree(
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
return -ENOSPC;
}
- *firstblock = cur->bc_private.b.firstblock;
cur->bc_private.b.allocated = 0;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
}
@@ -959,10 +956,8 @@ error0:
*/
STATIC int /* error */
xfs_bmap_add_attrfork_extents(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated */
- struct xfs_defer_ops *dfops, /* blocks to free at commit */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* incore inode pointer */
int *flags) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -971,12 +966,11 @@ xfs_bmap_add_attrfork_extents(
if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
return 0;
cur = NULL;
- error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
- flags, XFS_DATA_FORK);
+ error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
+ XFS_DATA_FORK);
if (cur) {
cur->bc_private.b.allocated = 0;
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
}
return error;
}
@@ -994,13 +988,11 @@ xfs_bmap_add_attrfork_extents(
*/
STATIC int /* error */
xfs_bmap_add_attrfork_local(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated */
- struct xfs_defer_ops *dfops, /* blocks to free at commit */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* incore inode pointer */
int *flags) /* inode logging flags */
{
- xfs_da_args_t dargs; /* args for dir/attr code */
+ struct xfs_da_args dargs; /* args for dir/attr code */
if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
return 0;
@@ -1009,8 +1001,6 @@ xfs_bmap_add_attrfork_local(
memset(&dargs, 0, sizeof(dargs));
dargs.geo = ip->i_mount->m_dir_geo;
dargs.dp = ip;
- dargs.firstblock = firstblock;
- dargs.dfops = dfops;
dargs.total = dargs.geo->fsbcount;
dargs.whichfork = XFS_DATA_FORK;
dargs.trans = tp;
@@ -1018,8 +1008,8 @@ xfs_bmap_add_attrfork_local(
}
if (S_ISLNK(VFS_I(ip)->i_mode))
- return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
- flags, XFS_DATA_FORK,
+ return xfs_bmap_local_to_extents(tp, ip, 1, flags,
+ XFS_DATA_FORK,
xfs_symlink_local_to_remote);
/* should only be called for types that support local format data */
@@ -1037,8 +1027,6 @@ xfs_bmap_add_attrfork(
int size, /* space new attribute needs */
int rsvd) /* xact may use reserved blks */
{
- xfs_fsblock_t firstblock; /* 1st block/ag allocated */
- struct xfs_defer_ops dfops; /* freed extent records */
xfs_mount_t *mp; /* mount structure */
xfs_trans_t *tp; /* transaction pointer */
int blks; /* space reservation */
@@ -1104,19 +1092,15 @@ xfs_bmap_add_attrfork(
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
ip->i_afp->if_flags = XFS_IFEXTENTS;
logflags = 0;
- xfs_defer_init(&dfops, &firstblock);
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_LOCAL:
- error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
- &logflags);
+ error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
break;
case XFS_DINODE_FMT_EXTENTS:
- error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
- &dfops, &logflags);
+ error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
break;
case XFS_DINODE_FMT_BTREE:
- error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
- &logflags);
+ error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
break;
default:
error = 0;
@@ -1125,7 +1109,7 @@ xfs_bmap_add_attrfork(
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
if (error)
- goto bmap_cancel;
+ goto trans_cancel;
if (!xfs_sb_version_hasattr(&mp->m_sb) ||
(!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
bool log_sb = false;
@@ -1144,15 +1128,10 @@ xfs_bmap_add_attrfork(
xfs_log_sb(tp);
}
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto bmap_cancel;
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
-bmap_cancel:
- xfs_defer_cancel(&dfops);
trans_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1501,7 +1480,7 @@ xfs_bmap_one_block(
xfs_inode_t *ip, /* incore inode */
int whichfork) /* data or attr fork */
{
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
int rval; /* return value */
xfs_bmbt_irec_t s; /* internal version of extent */
struct xfs_iext_cursor icur;
@@ -1539,7 +1518,7 @@ xfs_bmap_add_extent_delay_real(
struct xfs_bmbt_irec *new = &bma->got;
int error; /* error return value */
int i; /* temp state */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_fileoff_t new_endoff; /* end offset of new entry */
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
/* left is 0, right is 1, prev is 2 */
@@ -1809,7 +1788,6 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->dfops,
&bma->cur, 1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
@@ -1887,8 +1865,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->dfops, &bma->cur, 1,
- &tmp_rval, whichfork);
+ &bma->cur, 1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
goto done;
@@ -1968,8 +1945,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->dfops, &bma->cur,
- 1, &tmp_rval, whichfork);
+ &bma->cur, 1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
goto done;
@@ -1994,8 +1970,7 @@ xfs_bmap_add_extent_delay_real(
/* add reverse mapping unless caller opted out */
if (!(bma->flags & XFS_BMAPI_NORMAP)) {
- error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip,
- whichfork, new);
+ error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
if (error)
goto done;
}
@@ -2006,8 +1981,8 @@ xfs_bmap_add_extent_delay_real(
ASSERT(bma->cur == NULL);
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->dfops, &bma->cur,
- da_old > 0, &tmp_logflags, whichfork);
+ &bma->cur, da_old > 0, &tmp_logflags,
+ whichfork);
bma->logflags |= tmp_logflags;
if (error)
goto done;
@@ -2046,14 +2021,12 @@ xfs_bmap_add_extent_unwritten_real(
struct xfs_iext_cursor *icur,
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
- xfs_fsblock_t *first, /* pointer to firstblock variable */
- struct xfs_defer_ops *dfops, /* list of extents to be freed */
int *logflagsp) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
int error; /* error return value */
int i; /* temp state */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_fileoff_t new_endoff; /* end offset of new entry */
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
/* left is 0, right is 1, prev is 2 */
@@ -2479,7 +2452,7 @@ xfs_bmap_add_extent_unwritten_real(
}
/* update reverse mappings */
- error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
+ error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
if (error)
goto done;
@@ -2488,8 +2461,8 @@ xfs_bmap_add_extent_unwritten_real(
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
- 0, &tmp_logflags, whichfork);
+ error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
+ &tmp_logflags, whichfork);
*logflagsp |= tmp_logflags;
if (error)
goto done;
@@ -2520,7 +2493,7 @@ xfs_bmap_add_extent_hole_delay(
struct xfs_iext_cursor *icur,
xfs_bmbt_irec_t *new) /* new data to add to file extents */
{
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_bmbt_irec_t left; /* left neighbor extent entry */
xfs_filblks_t newlen=0; /* new indirect size */
xfs_filblks_t oldlen=0; /* old indirect size */
@@ -2660,8 +2633,6 @@ xfs_bmap_add_extent_hole_real(
struct xfs_iext_cursor *icur,
struct xfs_btree_cur **curp,
struct xfs_bmbt_irec *new,
- xfs_fsblock_t *first,
- struct xfs_defer_ops *dfops,
int *logflagsp,
int flags)
{
@@ -2842,7 +2813,7 @@ xfs_bmap_add_extent_hole_real(
/* add reverse mapping unless caller opted out */
if (!(flags & XFS_BMAPI_NORMAP)) {
- error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new);
+ error = xfs_rmap_map_extent(tp, ip, whichfork, new);
if (error)
goto done;
}
@@ -2852,8 +2823,8 @@ xfs_bmap_add_extent_hole_real(
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp,
- 0, &tmp_logflags, whichfork);
+ error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
+ &tmp_logflags, whichfork);
*logflagsp |= tmp_logflags;
cur = *curp;
if (error)
@@ -3071,10 +3042,11 @@ xfs_bmap_adjacent(
XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
mp = ap->ip->i_mount;
- nullfb = *ap->firstblock == NULLFSBLOCK;
+ nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
rt = XFS_IS_REALTIME_INODE(ap->ip) &&
xfs_alloc_is_userdata(ap->datatype);
- fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
+ fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
+ ap->tp->t_firstblock);
/*
* If allocating at eof, and there's a previous real block,
* try to use its last block as our starting point.
@@ -3432,8 +3404,9 @@ xfs_bmap_btalloc(
}
- nullfb = *ap->firstblock == NULLFSBLOCK;
- fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
+ nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
+ fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
+ ap->tp->t_firstblock);
if (nullfb) {
if (xfs_alloc_is_userdata(ap->datatype) &&
xfs_inode_is_filestream(ap->ip)) {
@@ -3444,7 +3417,7 @@ xfs_bmap_btalloc(
ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
}
} else
- ap->blkno = *ap->firstblock;
+ ap->blkno = ap->tp->t_firstblock;
xfs_bmap_adjacent(ap);
@@ -3455,7 +3428,7 @@ xfs_bmap_btalloc(
if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
;
else
- ap->blkno = *ap->firstblock;
+ ap->blkno = ap->tp->t_firstblock;
/*
* Normal allocation, done through xfs_alloc_vextent.
*/
@@ -3468,7 +3441,6 @@ xfs_bmap_btalloc(
/* Trim the allocation back to the maximum an AG can fit. */
args.maxlen = min(ap->length, mp->m_ag_max_usable);
- args.firstblock = *ap->firstblock;
blen = 0;
if (nullfb) {
/*
@@ -3483,7 +3455,7 @@ xfs_bmap_btalloc(
error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
if (error)
return error;
- } else if (ap->dfops->dop_low) {
+ } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
if (xfs_inode_is_filestream(ap->ip))
args.type = XFS_ALLOCTYPE_FIRST_AG;
else
@@ -3518,7 +3490,7 @@ xfs_bmap_btalloc(
* is >= the stripe unit and the allocation offset is
* at the end of file.
*/
- if (!ap->dfops->dop_low && ap->aeof) {
+ if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
if (!ap->offset) {
args.alignment = stripe_align;
atype = args.type;
@@ -3610,20 +3582,20 @@ xfs_bmap_btalloc(
args.total = ap->minlen;
if ((error = xfs_alloc_vextent(&args)))
return error;
- ap->dfops->dop_low = true;
+ ap->tp->t_flags |= XFS_TRANS_LOWMODE;
}
if (args.fsbno != NULLFSBLOCK) {
/*
* check the allocation happened at the same or higher AG than
* the first block that was allocated.
*/
- ASSERT(*ap->firstblock == NULLFSBLOCK ||
- XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
+ ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
+ XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
XFS_FSB_TO_AGNO(mp, args.fsbno));
ap->blkno = args.fsbno;
- if (*ap->firstblock == NULLFSBLOCK)
- *ap->firstblock = args.fsbno;
+ if (ap->tp->t_firstblock == NULLFSBLOCK)
+ ap->tp->t_firstblock = args.fsbno;
ASSERT(nullfb || fb_agno <= args.agno);
ap->length = args.len;
/*
@@ -3788,8 +3760,7 @@ xfs_bmapi_update_map(
mval[-1].br_startblock != HOLESTARTBLOCK &&
mval->br_startblock == mval[-1].br_startblock +
mval[-1].br_blockcount &&
- ((flags & XFS_BMAPI_IGSTATE) ||
- mval[-1].br_state == mval->br_state)) {
+ mval[-1].br_state == mval->br_state) {
ASSERT(mval->br_startoff ==
mval[-1].br_startoff + mval[-1].br_blockcount);
mval[-1].br_blockcount += mval->br_blockcount;
@@ -3834,7 +3805,7 @@ xfs_bmapi_read(
ASSERT(*nmap >= 1);
ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
- XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
+ XFS_BMAPI_COWFORK)));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
if (unlikely(XFS_TEST_ERROR(
@@ -4079,15 +4050,10 @@ xfs_bmapi_allocate(
if (error)
return error;
- if (bma->cur)
- bma->cur->bc_private.b.firstblock = *bma->firstblock;
if (bma->blkno == NULLFSBLOCK)
return 0;
- if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
+ if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
- bma->cur->bc_private.b.firstblock = *bma->firstblock;
- bma->cur->bc_private.b.dfops = bma->dfops;
- }
/*
* Bump the number of extents we've allocated
* in this call.
@@ -4122,8 +4088,7 @@ xfs_bmapi_allocate(
else
error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
whichfork, &bma->icur, &bma->cur, &bma->got,
- bma->firstblock, bma->dfops, &bma->logflags,
- bma->flags);
+ &bma->logflags, bma->flags);
bma->logflags |= tmp_logflags;
if (error)
@@ -4174,8 +4139,6 @@ xfs_bmapi_convert_unwritten(
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
bma->ip, whichfork);
- bma->cur->bc_private.b.firstblock = *bma->firstblock;
- bma->cur->bc_private.b.dfops = bma->dfops;
}
mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
@@ -4192,8 +4155,7 @@ xfs_bmapi_convert_unwritten(
}
error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
- &bma->icur, &bma->cur, mval, bma->firstblock,
- bma->dfops, &tmp_logflags);
+ &bma->icur, &bma->cur, mval, &tmp_logflags);
/*
* Log the inode core unconditionally in the unwritten extent conversion
* path because the conversion might not have done so (e.g., if the
@@ -4231,12 +4193,6 @@ xfs_bmapi_convert_unwritten(
* extent state if necessary. Details behaviour is controlled by the flags
* parameter. Only allocates blocks from a single allocation group, to avoid
* locking problems.
- *
- * The returned value in "firstblock" from the first call in a transaction
- * must be remembered and presented to subsequent calls in "firstblock".
- * An upper bound for the number of blocks to be allocated is supplied to
- * the first call in "total"; if no allocation group has that many free
- * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
*/
int
xfs_bmapi_write(
@@ -4245,12 +4201,9 @@ xfs_bmapi_write(
xfs_fileoff_t bno, /* starting file offs. mapped */
xfs_filblks_t len, /* length to map in file */
int flags, /* XFS_BMAPI_... */
- xfs_fsblock_t *firstblock, /* first allocated block
- controls a.g. for allocs */
xfs_extlen_t total, /* total blocks needed */
struct xfs_bmbt_irec *mval, /* output: map values */
- int *nmap, /* i/o: mval size/count */
- struct xfs_defer_ops *dfops) /* i/o: list extents to free */
+ int *nmap) /* i/o: mval size/count */
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
@@ -4279,7 +4232,6 @@ xfs_bmapi_write(
ASSERT(*nmap >= 1);
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
- ASSERT(!(flags & XFS_BMAPI_IGSTATE));
ASSERT(tp != NULL ||
(flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
(XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
@@ -4315,7 +4267,7 @@ xfs_bmapi_write(
XFS_STATS_INC(mp, xs_blk_mapw);
- if (*firstblock == NULLFSBLOCK) {
+ if (!tp || tp->t_firstblock == NULLFSBLOCK) {
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
else
@@ -4342,8 +4294,6 @@ xfs_bmapi_write(
bma.ip = ip;
bma.total = total;
bma.datatype = 0;
- bma.dfops = dfops;
- bma.firstblock = firstblock;
while (bno < end && n < *nmap) {
bool need_alloc = false, wasdelay = false;
@@ -4419,7 +4369,7 @@ xfs_bmapi_write(
* the refcount btree for orphan recovery.
*/
if (whichfork == XFS_COW_FORK) {
- error = xfs_refcount_alloc_cow_extent(mp, dfops,
+ error = xfs_refcount_alloc_cow_extent(tp,
bma.blkno, bma.length);
if (error)
goto error0;
@@ -4493,15 +4443,7 @@ error0:
xfs_trans_log_inode(tp, ip, bma.logflags);
if (bma.cur) {
- if (!error) {
- ASSERT(*firstblock == NULLFSBLOCK ||
- XFS_FSB_TO_AGNO(mp, *firstblock) <=
- XFS_FSB_TO_AGNO(mp,
- bma.cur->bc_private.b.firstblock));
- *firstblock = bma.cur->bc_private.b.firstblock;
- }
- xfs_btree_del_cursor(bma.cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(bma.cur, error);
}
if (!error)
xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
@@ -4516,13 +4458,11 @@ xfs_bmapi_remap(
xfs_fileoff_t bno,
xfs_filblks_t len,
xfs_fsblock_t startblock,
- struct xfs_defer_ops *dfops,
int flags)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
struct xfs_btree_cur *cur = NULL;
- xfs_fsblock_t firstblock = NULLFSBLOCK;
struct xfs_bmbt_irec got;
struct xfs_iext_cursor icur;
int whichfork = xfs_bmapi_whichfork(flags);
@@ -4565,8 +4505,6 @@ xfs_bmapi_remap(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
}
@@ -4579,7 +4517,7 @@ xfs_bmapi_remap(
got.br_state = XFS_EXT_NORM;
error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
- &cur, &got, &firstblock, dfops, &logflags, flags);
+ &cur, &got, &logflags, flags);
if (error)
goto error0;
@@ -4599,10 +4537,8 @@ error0:
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
- if (cur) {
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
- }
+ if (cur)
+ xfs_btree_del_cursor(cur, error);
return error;
}
@@ -4898,7 +4834,6 @@ xfs_bmap_del_extent_real(
xfs_inode_t *ip, /* incore inode pointer */
xfs_trans_t *tp, /* current transaction pointer */
struct xfs_iext_cursor *icur,
- struct xfs_defer_ops *dfops, /* list of extents to be freed */
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *del, /* data to remove from extents */
int *logflagsp, /* inode logging flags */
@@ -4913,7 +4848,7 @@ xfs_bmap_del_extent_real(
struct xfs_bmbt_irec got; /* current extent entry */
xfs_fileoff_t got_endoff; /* first offset past got */
int i; /* temp state */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_mount_t *mp; /* mount structure */
xfs_filblks_t nblks; /* quota/sb block count */
xfs_bmbt_irec_t new; /* new record to be inserted */
@@ -5104,7 +5039,7 @@ xfs_bmap_del_extent_real(
}
/* remove reverse mapping */
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
+ error = xfs_rmap_unmap_extent(tp, ip, whichfork, del);
if (error)
goto done;
@@ -5113,11 +5048,11 @@ xfs_bmap_del_extent_real(
*/
if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
- error = xfs_refcount_decrease_extent(mp, dfops, del);
+ error = xfs_refcount_decrease_extent(tp, del);
if (error)
goto done;
} else {
- __xfs_bmap_add_free(mp, dfops, del->br_startblock,
+ __xfs_bmap_add_free(tp, del->br_startblock,
del->br_blockcount, NULL,
(bflags & XFS_BMAPI_NODISCARD) ||
del->br_state == XFS_EXT_UNWRITTEN);
@@ -5148,26 +5083,23 @@ done:
*/
int /* error */
__xfs_bunmapi(
- xfs_trans_t *tp, /* transaction pointer */
+ struct xfs_trans *tp, /* transaction pointer */
struct xfs_inode *ip, /* incore inode */
xfs_fileoff_t start, /* first file offset deleted */
xfs_filblks_t *rlen, /* i/o: amount remaining */
int flags, /* misc flags */
- xfs_extnum_t nexts, /* number of extents max */
- xfs_fsblock_t *firstblock, /* first allocated block
- controls a.g. for allocs */
- struct xfs_defer_ops *dfops) /* i/o: deferred updates */
+ xfs_extnum_t nexts) /* number of extents max */
{
- xfs_btree_cur_t *cur; /* bmap btree cursor */
- xfs_bmbt_irec_t del; /* extent being deleted */
+ struct xfs_btree_cur *cur; /* bmap btree cursor */
+ struct xfs_bmbt_irec del; /* extent being deleted */
int error; /* error return value */
xfs_extnum_t extno; /* extent number in list */
- xfs_bmbt_irec_t got; /* current extent record */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_bmbt_irec got; /* current extent record */
+ struct xfs_ifork *ifp; /* inode fork pointer */
int isrt; /* freeing in rt area */
int logflags; /* transaction logging flags */
xfs_extlen_t mod; /* rt extent offset */
- xfs_mount_t *mp; /* mount structure */
+ struct xfs_mount *mp; /* mount structure */
int tmp_logflags; /* partial logging flags */
int wasdel; /* was a delayed alloc extent */
int whichfork; /* data or attribute fork */
@@ -5230,8 +5162,6 @@ __xfs_bunmapi(
if (ifp->if_flags & XFS_IFBROOT) {
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
} else
cur = NULL;
@@ -5347,7 +5277,7 @@ __xfs_bunmapi(
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp, ip,
whichfork, &icur, &cur, &del,
- firstblock, dfops, &logflags);
+ &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5404,8 +5334,7 @@ __xfs_bunmapi(
prev.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp,
ip, whichfork, &icur, &cur,
- &prev, firstblock, dfops,
- &logflags);
+ &prev, &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5414,8 +5343,7 @@ __xfs_bunmapi(
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp,
ip, whichfork, &icur, &cur,
- &del, firstblock, dfops,
- &logflags);
+ &del, &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5427,8 +5355,8 @@ delete:
error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
&got, &del);
} else {
- error = xfs_bmap_del_extent_real(ip, tp, &icur, dfops,
- cur, &del, &tmp_logflags, whichfork,
+ error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
+ &del, &tmp_logflags, whichfork,
flags);
logflags |= tmp_logflags;
}
@@ -5462,8 +5390,8 @@ nodelete:
*/
if (xfs_bmap_needs_btree(ip, whichfork)) {
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
- &cur, 0, &tmp_logflags, whichfork);
+ error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
+ &tmp_logflags, whichfork);
logflags |= tmp_logflags;
if (error)
goto error0;
@@ -5501,12 +5429,9 @@ error0:
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
if (cur) {
- if (!error) {
- *firstblock = cur->bc_private.b.firstblock;
+ if (!error)
cur->bc_private.b.allocated = 0;
- }
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
}
return error;
}
@@ -5520,14 +5445,11 @@ xfs_bunmapi(
xfs_filblks_t len,
int flags,
xfs_extnum_t nexts,
- xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops,
int *done)
{
int error;
- error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
- dfops);
+ error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
*done = (len == 0);
return error;
}
@@ -5570,6 +5492,7 @@ xfs_bmse_can_merge(
*/
STATIC int
xfs_bmse_merge(
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
xfs_fileoff_t shift, /* shift fsb */
@@ -5577,8 +5500,7 @@ xfs_bmse_merge(
struct xfs_bmbt_irec *got, /* extent to shift */
struct xfs_bmbt_irec *left, /* preceding extent */
struct xfs_btree_cur *cur,
- int *logflags, /* output */
- struct xfs_defer_ops *dfops)
+ int *logflags) /* output */
{
struct xfs_bmbt_irec new;
xfs_filblks_t blockcount;
@@ -5634,23 +5556,23 @@ done:
&new);
/* update reverse mapping. rmap functions merge the rmaps for us */
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
+ error = xfs_rmap_unmap_extent(tp, ip, whichfork, got);
if (error)
return error;
memcpy(&new, got, sizeof(new));
new.br_startoff = left->br_startoff + left->br_blockcount;
- return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
+ return xfs_rmap_map_extent(tp, ip, whichfork, &new);
}
static int
xfs_bmap_shift_update_extent(
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_iext_cursor *icur,
struct xfs_bmbt_irec *got,
struct xfs_btree_cur *cur,
int *logflags,
- struct xfs_defer_ops *dfops,
xfs_fileoff_t startoff)
{
struct xfs_mount *mp = ip->i_mount;
@@ -5678,10 +5600,10 @@ xfs_bmap_shift_update_extent(
got);
/* update reverse mapping */
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &prev);
+ error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
if (error)
return error;
- return xfs_rmap_map_extent(mp, dfops, ip, whichfork, got);
+ return xfs_rmap_map_extent(tp, ip, whichfork, got);
}
int
@@ -5690,9 +5612,7 @@ xfs_bmap_collapse_extents(
struct xfs_inode *ip,
xfs_fileoff_t *next_fsb,
xfs_fileoff_t offset_shift_fsb,
- bool *done,
- xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops)
+ bool *done)
{
int whichfork = XFS_DATA_FORK;
struct xfs_mount *mp = ip->i_mount;
@@ -5725,8 +5645,6 @@ xfs_bmap_collapse_extents(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
}
@@ -5745,9 +5663,9 @@ xfs_bmap_collapse_extents(
}
if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
- error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
- &icur, &got, &prev, cur, &logflags,
- dfops);
+ error = xfs_bmse_merge(tp, ip, whichfork,
+ offset_shift_fsb, &icur, &got, &prev,
+ cur, &logflags);
if (error)
goto del_cursor;
goto done;
@@ -5759,8 +5677,8 @@ xfs_bmap_collapse_extents(
}
}
- error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
- &logflags, dfops, new_startoff);
+ error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
+ cur, &logflags, new_startoff);
if (error)
goto del_cursor;
@@ -5773,8 +5691,7 @@ done:
*next_fsb = got.br_startoff;
del_cursor:
if (cur)
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
return error;
@@ -5813,9 +5730,7 @@ xfs_bmap_insert_extents(
xfs_fileoff_t *next_fsb,
xfs_fileoff_t offset_shift_fsb,
bool *done,
- xfs_fileoff_t stop_fsb,
- xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops)
+ xfs_fileoff_t stop_fsb)
{
int whichfork = XFS_DATA_FORK;
struct xfs_mount *mp = ip->i_mount;
@@ -5848,8 +5763,6 @@ xfs_bmap_insert_extents(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
}
@@ -5891,8 +5804,8 @@ xfs_bmap_insert_extents(
WARN_ON_ONCE(1);
}
- error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
- &logflags, dfops, new_startoff);
+ error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
+ cur, &logflags, new_startoff);
if (error)
goto del_cursor;
@@ -5905,8 +5818,7 @@ xfs_bmap_insert_extents(
*next_fsb = got.br_startoff;
del_cursor:
if (cur)
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
return error;
@@ -5922,9 +5834,7 @@ STATIC int
xfs_bmap_split_extent_at(
struct xfs_trans *tp,
struct xfs_inode *ip,
- xfs_fileoff_t split_fsb,
- xfs_fsblock_t *firstfsb,
- struct xfs_defer_ops *dfops)
+ xfs_fileoff_t split_fsb)
{
int whichfork = XFS_DATA_FORK;
struct xfs_btree_cur *cur = NULL;
@@ -5973,8 +5883,6 @@ xfs_bmap_split_extent_at(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstfsb;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
@@ -6018,16 +5926,15 @@ xfs_bmap_split_extent_at(
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
- &cur, 0, &tmp_logflags, whichfork);
+ error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
+ &tmp_logflags, whichfork);
logflags |= tmp_logflags;
}
del_cursor:
if (cur) {
cur->bc_private.b.allocated = 0;
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
}
if (logflags)
@@ -6042,8 +5949,6 @@ xfs_bmap_split_extent(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t firstfsb;
int error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
@@ -6054,21 +5959,13 @@ xfs_bmap_split_extent(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_defer_init(&dfops, &firstfsb);
-
- error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
- &firstfsb, &dfops);
- if (error)
- goto out;
-
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
if (error)
goto out;
return xfs_trans_commit(tp);
out:
- xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
return error;
}
@@ -6085,20 +5982,18 @@ xfs_bmap_is_update_needed(
/* Record a bmap intent. */
static int
__xfs_bmap_add(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
enum xfs_bmap_intent_type type,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *bmap)
{
- int error;
struct xfs_bmap_intent *bi;
- trace_xfs_bmap_defer(mp,
- XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
+ trace_xfs_bmap_defer(tp->t_mountp,
+ XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
type,
- XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
+ XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
ip->i_ino, whichfork,
bmap->br_startoff,
bmap->br_blockcount,
@@ -6111,44 +6006,34 @@ __xfs_bmap_add(
bi->bi_whichfork = whichfork;
bi->bi_bmap = *bmap;
- error = xfs_defer_ijoin(dfops, bi->bi_owner);
- if (error) {
- kmem_free(bi);
- return error;
- }
-
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
return 0;
}
/* Map an extent into a file. */
int
xfs_bmap_map_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_bmap_is_update_needed(PREV))
return 0;
- return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
- XFS_DATA_FORK, PREV);
+ return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
}
/* Unmap an extent out of a file. */
int
xfs_bmap_unmap_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_bmap_is_update_needed(PREV))
return 0;
- return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
- XFS_DATA_FORK, PREV);
+ return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
}
/*
@@ -6158,7 +6043,6 @@ xfs_bmap_unmap_extent(
int
xfs_bmap_finish_one(
struct xfs_trans *tp,
- struct xfs_defer_ops *dfops,
struct xfs_inode *ip,
enum xfs_bmap_intent_type type,
int whichfork,
@@ -6167,17 +6051,9 @@ xfs_bmap_finish_one(
xfs_filblks_t *blockcount,
xfs_exntst_t state)
{
- xfs_fsblock_t firstfsb;
int error = 0;
- /*
- * firstfsb is tied to the transaction lifetime and is used to
- * ensure correct AG locking order and schedule work item
- * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
- * to only making one bmap call per transaction, so it should
- * be safe to have it as a local variable here.
- */
- firstfsb = NULLFSBLOCK;
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
trace_xfs_bmap_deferred(tp->t_mountp,
XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
@@ -6194,12 +6070,12 @@ xfs_bmap_finish_one(
switch (type) {
case XFS_BMAP_MAP:
error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
- startblock, dfops, 0);
+ startblock, 0);
*blockcount = 0;
break;
case XFS_BMAP_UNMAP:
error = __xfs_bunmapi(tp, ip, startoff, blockcount,
- XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
+ XFS_BMAPI_REMAP, 1);
break;
default:
ASSERT(0);
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 9b49ddf99c41..b6e9b639e731 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -19,8 +19,6 @@ extern kmem_zone_t *xfs_bmap_free_item_zone;
* Argument structure for xfs_bmap_alloc.
*/
struct xfs_bmalloca {
- xfs_fsblock_t *firstblock; /* i/o first block allocated */
- struct xfs_defer_ops *dfops; /* bmap freelist */
struct xfs_trans *tp; /* transaction pointer */
struct xfs_inode *ip; /* incore inode pointer */
struct xfs_bmbt_irec prev; /* extent before the new one */
@@ -68,8 +66,6 @@ struct xfs_extent_free_item
#define XFS_BMAPI_METADATA 0x002 /* mapping metadata not user data */
#define XFS_BMAPI_ATTRFORK 0x004 /* use attribute fork not data */
#define XFS_BMAPI_PREALLOC 0x008 /* preallocation op: unwritten space */
-#define XFS_BMAPI_IGSTATE 0x010 /* Ignore state - */
- /* combine contig. space */
#define XFS_BMAPI_CONTIG 0x020 /* must allocate only one extent */
/*
* unwritten extent conversion - this needs write cache flushing and no additional
@@ -116,7 +112,6 @@ struct xfs_extent_free_item
{ XFS_BMAPI_METADATA, "METADATA" }, \
{ XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
{ XFS_BMAPI_PREALLOC, "PREALLOC" }, \
- { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
{ XFS_BMAPI_CONTIG, "CONTIG" }, \
{ XFS_BMAPI_CONVERT, "CONVERT" }, \
{ XFS_BMAPI_ZERO, "ZERO" }, \
@@ -189,9 +184,9 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
-void __xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- xfs_fsblock_t bno, xfs_filblks_t len,
- struct xfs_owner_info *oinfo, bool skip_discard);
+void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
+ xfs_filblks_t len, struct xfs_owner_info *oinfo,
+ bool skip_discard);
void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
@@ -205,17 +200,13 @@ int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
int *nmap, int flags);
int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
- xfs_fsblock_t *firstblock, xfs_extlen_t total,
- struct xfs_bmbt_irec *mval, int *nmap,
- struct xfs_defer_ops *dfops);
+ xfs_extlen_t total, struct xfs_bmbt_irec *mval, int *nmap);
int __xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t *rlen, int flags,
- xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops);
+ xfs_extnum_t nexts);
int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
- xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops, int *done);
+ xfs_extnum_t nexts, int *done);
int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got,
struct xfs_bmbt_irec *del);
@@ -225,14 +216,12 @@ void xfs_bmap_del_extent_cow(struct xfs_inode *ip,
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
- bool *done, xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops);
+ bool *done);
int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
xfs_fileoff_t shift);
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
- bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops);
+ bool *done, xfs_fileoff_t stop_fsb);
int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
@@ -241,13 +230,12 @@ int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
static inline void
xfs_bmap_add_free(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_fsblock_t bno,
xfs_filblks_t len,
struct xfs_owner_info *oinfo)
{
- __xfs_bmap_add_free(mp, dfops, bno, len, oinfo, false);
+ __xfs_bmap_add_free(tp, bno, len, oinfo, false);
}
enum xfs_bmap_intent_type {
@@ -263,14 +251,14 @@ struct xfs_bmap_intent {
struct xfs_bmbt_irec bi_bmap;
};
-int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, enum xfs_bmap_intent_type type,
- int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
+int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_inode *ip,
+ enum xfs_bmap_intent_type type, int whichfork,
+ xfs_fileoff_t startoff, xfs_fsblock_t startblock,
xfs_filblks_t *blockcount, xfs_exntst_t state);
-int xfs_bmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
-int xfs_bmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
+int xfs_bmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap);
+int xfs_bmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap);
static inline int xfs_bmap_fork_to_state(int whichfork)
{
@@ -289,6 +277,6 @@ xfs_failaddr_t xfs_bmap_validate_extent(struct xfs_inode *ip, int whichfork,
int xfs_bmapi_remap(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, xfs_fsblock_t startblock,
- struct xfs_defer_ops *dfops, int flags);
+ int flags);
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index e1a2d9ceb615..cdb74d2e2a43 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -175,8 +175,6 @@ xfs_bmbt_dup_cursor(
* Copy the firstblock, dfops, and flags values,
* since init cursor doesn't get them.
*/
- new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
- new->bc_private.b.dfops = cur->bc_private.b.dfops;
new->bc_private.b.flags = cur->bc_private.b.flags;
return new;
@@ -187,12 +185,11 @@ xfs_bmbt_update_cursor(
struct xfs_btree_cur *src,
struct xfs_btree_cur *dst)
{
- ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
+ ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
(dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
- ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
dst->bc_private.b.allocated += src->bc_private.b.allocated;
- dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
+ dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
src->bc_private.b.allocated = 0;
}
@@ -210,8 +207,7 @@ xfs_bmbt_alloc_block(
memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
- args.fsbno = cur->bc_private.b.firstblock;
- args.firstblock = args.fsbno;
+ args.fsbno = cur->bc_tp->t_firstblock;
xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
cur->bc_private.b.whichfork);
@@ -230,7 +226,7 @@ xfs_bmbt_alloc_block(
* block allocation here and corrupt the filesystem.
*/
args.minleft = args.tp->t_blk_res;
- } else if (cur->bc_private.b.dfops->dop_low) {
+ } else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) {
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -257,7 +253,7 @@ xfs_bmbt_alloc_block(
error = xfs_alloc_vextent(&args);
if (error)
goto error0;
- cur->bc_private.b.dfops->dop_low = true;
+ cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
}
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
*stat = 0;
@@ -265,7 +261,7 @@ xfs_bmbt_alloc_block(
}
ASSERT(args.len == 1);
- cur->bc_private.b.firstblock = args.fsbno;
+ cur->bc_tp->t_firstblock = args.fsbno;
cur->bc_private.b.allocated++;
cur->bc_private.b.ip->i_d.di_nblocks++;
xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
@@ -293,7 +289,7 @@ xfs_bmbt_free_block(
struct xfs_owner_info oinfo;
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
- xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
+ xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
ip->i_d.di_nblocks--;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
@@ -564,8 +560,6 @@ xfs_bmbt_init_cursor(
cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
cur->bc_private.b.ip = ip;
- cur->bc_private.b.firstblock = NULLFSBLOCK;
- cur->bc_private.b.dfops = NULL;
cur->bc_private.b.allocated = 0;
cur->bc_private.b.flags = 0;
cur->bc_private.b.whichfork = whichfork;
@@ -645,7 +639,7 @@ xfs_bmbt_change_owner(
cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
error = xfs_btree_change_owner(cur, new_owner, buffer_list);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 0a4fdf7f11a7..e3b3e9dce5da 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -7,7 +7,6 @@
#define __XFS_BTREE_H__
struct xfs_buf;
-struct xfs_defer_ops;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
@@ -209,14 +208,11 @@ typedef struct xfs_btree_cur
union {
struct { /* needed for BNO, CNT, INO */
struct xfs_buf *agbp; /* agf/agi buffer pointer */
- struct xfs_defer_ops *dfops; /* deferred updates */
xfs_agnumber_t agno; /* ag number */
union xfs_btree_cur_private priv;
} a;
struct { /* needed for BMAP */
struct xfs_inode *ip; /* pointer to our inode */
- struct xfs_defer_ops *dfops; /* deferred updates */
- xfs_fsblock_t firstblock; /* 1st blk allocated */
int allocated; /* count of alloced */
short forksize; /* fork's inode space */
char whichfork; /* data or attr fork */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 8a301402bbc4..376bee94b5dd 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -1481,6 +1481,7 @@ xfs_da3_node_lookup_int(
int error;
int retval;
unsigned int expected_level = 0;
+ uint16_t magic;
struct xfs_inode *dp = state->args->dp;
args = state->args;
@@ -1505,25 +1506,27 @@ xfs_da3_node_lookup_int(
return error;
}
curr = blk->bp->b_addr;
- blk->magic = be16_to_cpu(curr->magic);
+ magic = be16_to_cpu(curr->magic);
- if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
- blk->magic == XFS_ATTR3_LEAF_MAGIC) {
+ if (magic == XFS_ATTR_LEAF_MAGIC ||
+ magic == XFS_ATTR3_LEAF_MAGIC) {
blk->magic = XFS_ATTR_LEAF_MAGIC;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
}
- if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
- blk->magic == XFS_DIR3_LEAFN_MAGIC) {
+ if (magic == XFS_DIR2_LEAFN_MAGIC ||
+ magic == XFS_DIR3_LEAFN_MAGIC) {
blk->magic = XFS_DIR2_LEAFN_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
blk->bp, NULL);
break;
}
- blk->magic = XFS_DA_NODE_MAGIC;
+ if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
+ return -EFSCORRUPTED;
+ blk->magic = XFS_DA_NODE_MAGIC;
/*
* Search an intermediate node for a match.
@@ -2059,11 +2062,9 @@ xfs_da_grow_inode_int(
* Try mapping it in one filesystem block.
*/
nmap = 1;
- ASSERT(args->firstblock != NULL);
error = xfs_bmapi_write(tp, dp, *bno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
- args->firstblock, args->total, &map, &nmap,
- args->dfops);
+ args->total, &map, &nmap);
if (error)
return error;
@@ -2085,8 +2086,7 @@ xfs_da_grow_inode_int(
c = (int)(*bno + count - b);
error = xfs_bmapi_write(tp, dp, b, c,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
- args->firstblock, args->total,
- &mapp[mapi], &nmap, args->dfops);
+ args->total, &mapp[mapi], &nmap);
if (error)
goto out_free_map;
if (nmap < 1)
@@ -2375,13 +2375,13 @@ done:
*/
int
xfs_da_shrink_inode(
- xfs_da_args_t *args,
- xfs_dablk_t dead_blkno,
- struct xfs_buf *dead_buf)
+ struct xfs_da_args *args,
+ xfs_dablk_t dead_blkno,
+ struct xfs_buf *dead_buf)
{
- xfs_inode_t *dp;
- int done, error, w, count;
- xfs_trans_t *tp;
+ struct xfs_inode *dp;
+ int done, error, w, count;
+ struct xfs_trans *tp;
trace_xfs_da_shrink_inode(args);
@@ -2395,8 +2395,7 @@ xfs_da_shrink_inode(
* the last block to the place we want to kill.
*/
error = xfs_bunmapi(tp, dp, dead_blkno, count,
- xfs_bmapi_aflag(w), 0, args->firstblock,
- args->dfops, &done);
+ xfs_bmapi_aflag(w), 0, &done);
if (error == -ENOSPC) {
if (w != XFS_DATA_FORK)
break;
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index 28260073ae71..84dd865b6c3d 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -7,7 +7,6 @@
#ifndef __XFS_DA_BTREE_H__
#define __XFS_DA_BTREE_H__
-struct xfs_defer_ops;
struct xfs_inode;
struct xfs_trans;
struct zone;
@@ -57,8 +56,6 @@ typedef struct xfs_da_args {
xfs_dahash_t hashval; /* hash value of name */
xfs_ino_t inumber; /* input/output inode number */
struct xfs_inode *dp; /* directory inode to manipulate */
- xfs_fsblock_t *firstblock; /* ptr to firstblock for bmap calls */
- struct xfs_defer_ops *dfops; /* ptr to freelist for bmap_finish */
struct xfs_trans *trans; /* current trans (changes over time) */
xfs_extlen_t total; /* total blocks needed, for 1st bmap */
int whichfork; /* data or attribute fork */
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index c3e5bffda4f5..e792b167150a 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -14,6 +14,9 @@
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_inode.h"
+#include "xfs_inode_item.h"
#include "xfs_trace.h"
/*
@@ -177,146 +180,157 @@ static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX];
* the pending list.
*/
STATIC void
-xfs_defer_intake_work(
- struct xfs_trans *tp,
- struct xfs_defer_ops *dop)
+xfs_defer_create_intents(
+ struct xfs_trans *tp)
{
struct list_head *li;
struct xfs_defer_pending *dfp;
- list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
+ list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
dfp->dfp_count);
- trace_xfs_defer_intake_work(tp->t_mountp, dfp);
+ trace_xfs_defer_create_intent(tp->t_mountp, dfp);
list_sort(tp->t_mountp, &dfp->dfp_work,
dfp->dfp_type->diff_items);
list_for_each(li, &dfp->dfp_work)
dfp->dfp_type->log_item(tp, dfp->dfp_intent, li);
}
-
- list_splice_tail_init(&dop->dop_intake, &dop->dop_pending);
}
/* Abort all the intents that were committed. */
STATIC void
xfs_defer_trans_abort(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
- int error)
+ struct list_head *dop_pending)
{
struct xfs_defer_pending *dfp;
- trace_xfs_defer_trans_abort(tp->t_mountp, dop, _RET_IP_);
+ trace_xfs_defer_trans_abort(tp, _RET_IP_);
/* Abort intent items that don't have a done item. */
- list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
+ list_for_each_entry(dfp, dop_pending, dfp_list) {
trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
if (dfp->dfp_intent && !dfp->dfp_done) {
dfp->dfp_type->abort_intent(dfp->dfp_intent);
dfp->dfp_intent = NULL;
}
}
-
- /* Shut down FS. */
- xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ?
- SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR);
}
/* Roll a transaction so we can do some deferred op processing. */
STATIC int
xfs_defer_trans_roll(
- struct xfs_trans **tp,
- struct xfs_defer_ops *dop)
+ struct xfs_trans **tpp)
{
+ struct xfs_trans *tp = *tpp;
+ struct xfs_buf_log_item *bli;
+ struct xfs_inode_log_item *ili;
+ struct xfs_log_item *lip;
+ struct xfs_buf *bplist[XFS_DEFER_OPS_NR_BUFS];
+ struct xfs_inode *iplist[XFS_DEFER_OPS_NR_INODES];
+ int bpcount = 0, ipcount = 0;
int i;
int error;
- /* Log all the joined inodes. */
- for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
- xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
-
- /* Hold the (previously bjoin'd) buffer locked across the roll. */
- for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
- xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
+ list_for_each_entry(lip, &tp->t_items, li_trans) {
+ switch (lip->li_type) {
+ case XFS_LI_BUF:
+ bli = container_of(lip, struct xfs_buf_log_item,
+ bli_item);
+ if (bli->bli_flags & XFS_BLI_HOLD) {
+ if (bpcount >= XFS_DEFER_OPS_NR_BUFS) {
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+ xfs_trans_dirty_buf(tp, bli->bli_buf);
+ bplist[bpcount++] = bli->bli_buf;
+ }
+ break;
+ case XFS_LI_INODE:
+ ili = container_of(lip, struct xfs_inode_log_item,
+ ili_item);
+ if (ili->ili_lock_flags == 0) {
+ if (ipcount >= XFS_DEFER_OPS_NR_INODES) {
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+ xfs_trans_log_inode(tp, ili->ili_inode,
+ XFS_ILOG_CORE);
+ iplist[ipcount++] = ili->ili_inode;
+ }
+ break;
+ default:
+ break;
+ }
+ }
- trace_xfs_defer_trans_roll((*tp)->t_mountp, dop, _RET_IP_);
+ trace_xfs_defer_trans_roll(tp, _RET_IP_);
/* Roll the transaction. */
- error = xfs_trans_roll(tp);
+ error = xfs_trans_roll(tpp);
+ tp = *tpp;
if (error) {
- trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error);
- xfs_defer_trans_abort(*tp, dop, error);
+ trace_xfs_defer_trans_roll_error(tp, error);
return error;
}
- dop->dop_committed = true;
/* Rejoin the joined inodes. */
- for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
- xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
+ for (i = 0; i < ipcount; i++)
+ xfs_trans_ijoin(tp, iplist[i], 0);
/* Rejoin the buffers and dirty them so the log moves forward. */
- for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
- xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
- xfs_trans_bhold(*tp, dop->dop_bufs[i]);
+ for (i = 0; i < bpcount; i++) {
+ xfs_trans_bjoin(tp, bplist[i]);
+ xfs_trans_bhold(tp, bplist[i]);
}
return error;
}
-/* Do we have any work items to finish? */
-bool
-xfs_defer_has_unfinished_work(
- struct xfs_defer_ops *dop)
-{
- return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake);
-}
-
/*
- * Add this inode to the deferred op. Each joined inode is relogged
- * each time we roll the transaction.
+ * Reset an already used dfops after finish.
*/
-int
-xfs_defer_ijoin(
- struct xfs_defer_ops *dop,
- struct xfs_inode *ip)
+static void
+xfs_defer_reset(
+ struct xfs_trans *tp)
{
- int i;
-
- for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) {
- if (dop->dop_inodes[i] == ip)
- return 0;
- else if (dop->dop_inodes[i] == NULL) {
- dop->dop_inodes[i] = ip;
- return 0;
- }
- }
+ ASSERT(list_empty(&tp->t_dfops));
- ASSERT(0);
- return -EFSCORRUPTED;
+ /*
+ * Low mode state transfers across transaction rolls to mirror dfops
+ * lifetime. Clear it now that dfops is reset.
+ */
+ tp->t_flags &= ~XFS_TRANS_LOWMODE;
}
/*
- * Add this buffer to the deferred op. Each joined buffer is relogged
- * each time we roll the transaction.
+ * Free up any items left in the list.
*/
-int
-xfs_defer_bjoin(
- struct xfs_defer_ops *dop,
- struct xfs_buf *bp)
+static void
+xfs_defer_cancel_list(
+ struct xfs_mount *mp,
+ struct list_head *dop_list)
{
- int i;
+ struct xfs_defer_pending *dfp;
+ struct xfs_defer_pending *pli;
+ struct list_head *pwi;
+ struct list_head *n;
- for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
- if (dop->dop_bufs[i] == bp)
- return 0;
- else if (dop->dop_bufs[i] == NULL) {
- dop->dop_bufs[i] = bp;
- return 0;
+ /*
+ * Free the pending items. Caller should already have arranged
+ * for the intent items to be released.
+ */
+ list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
+ trace_xfs_defer_cancel_list(mp, dfp);
+ list_del(&dfp->dfp_list);
+ list_for_each_safe(pwi, n, &dfp->dfp_work) {
+ list_del(pwi);
+ dfp->dfp_count--;
+ dfp->dfp_type->cancel_item(pwi);
}
+ ASSERT(dfp->dfp_count == 0);
+ kmem_free(dfp);
}
-
- ASSERT(0);
- return -EFSCORRUPTED;
}
/*
@@ -328,9 +342,8 @@ xfs_defer_bjoin(
* If an inode is provided, relog it to the new transaction.
*/
int
-xfs_defer_finish(
- struct xfs_trans **tp,
- struct xfs_defer_ops *dop)
+xfs_defer_finish_noroll(
+ struct xfs_trans **tp)
{
struct xfs_defer_pending *dfp;
struct list_head *li;
@@ -338,35 +351,28 @@ xfs_defer_finish(
void *state;
int error = 0;
void (*cleanup_fn)(struct xfs_trans *, void *, int);
- struct xfs_defer_ops *orig_dop;
+ LIST_HEAD(dop_pending);
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
- trace_xfs_defer_finish((*tp)->t_mountp, dop, _RET_IP_);
-
- /*
- * Attach dfops to the transaction during deferred ops processing. This
- * explicitly causes calls into the allocator to defer AGFL block frees.
- * Note that this code can go away once all dfops users attach to the
- * associated tp.
- */
- ASSERT(!(*tp)->t_agfl_dfops || ((*tp)->t_agfl_dfops == dop));
- orig_dop = (*tp)->t_agfl_dfops;
- (*tp)->t_agfl_dfops = dop;
+ trace_xfs_defer_finish(*tp, _RET_IP_);
/* Until we run out of pending work to finish... */
- while (xfs_defer_has_unfinished_work(dop)) {
- /* Log intents for work items sitting in the intake. */
- xfs_defer_intake_work(*tp, dop);
-
- /* Roll the transaction. */
- error = xfs_defer_trans_roll(tp, dop);
+ while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
+ /* log intents and pull in intake items */
+ xfs_defer_create_intents(*tp);
+ list_splice_tail_init(&(*tp)->t_dfops, &dop_pending);
+
+ /*
+ * Roll the transaction.
+ */
+ error = xfs_defer_trans_roll(tp);
if (error)
goto out;
/* Log an intent-done item for the first pending item. */
- dfp = list_first_entry(&dop->dop_pending,
- struct xfs_defer_pending, dfp_list);
+ dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
+ dfp_list);
trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
dfp->dfp_count);
@@ -377,7 +383,7 @@ xfs_defer_finish(
list_for_each_safe(li, n, &dfp->dfp_work) {
list_del(li);
dfp->dfp_count--;
- error = dfp->dfp_type->finish_item(*tp, dop, li,
+ error = dfp->dfp_type->finish_item(*tp, li,
dfp->dfp_done, &state);
if (error == -EAGAIN) {
/*
@@ -396,7 +402,6 @@ xfs_defer_finish(
*/
if (cleanup_fn)
cleanup_fn(*tp, state, error);
- xfs_defer_trans_abort(*tp, dop, error);
goto out;
}
}
@@ -425,72 +430,72 @@ xfs_defer_finish(
}
out:
- (*tp)->t_agfl_dfops = orig_dop;
- if (error)
- trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error);
- else
- trace_xfs_defer_finish_done((*tp)->t_mountp, dop, _RET_IP_);
- return error;
+ if (error) {
+ xfs_defer_trans_abort(*tp, &dop_pending);
+ xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
+ trace_xfs_defer_finish_error(*tp, error);
+ xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
+ xfs_defer_cancel(*tp);
+ return error;
+ }
+
+ trace_xfs_defer_finish_done(*tp, _RET_IP_);
+ return 0;
}
-/*
- * Free up any items left in the list.
- */
-void
-xfs_defer_cancel(
- struct xfs_defer_ops *dop)
+int
+xfs_defer_finish(
+ struct xfs_trans **tp)
{
- struct xfs_defer_pending *dfp;
- struct xfs_defer_pending *pli;
- struct list_head *pwi;
- struct list_head *n;
-
- trace_xfs_defer_cancel(NULL, dop, _RET_IP_);
+ int error;
/*
- * Free the pending items. Caller should already have arranged
- * for the intent items to be released.
+ * Finish and roll the transaction once more to avoid returning to the
+ * caller with a dirty transaction.
*/
- list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) {
- trace_xfs_defer_intake_cancel(NULL, dfp);
- list_del(&dfp->dfp_list);
- list_for_each_safe(pwi, n, &dfp->dfp_work) {
- list_del(pwi);
- dfp->dfp_count--;
- dfp->dfp_type->cancel_item(pwi);
- }
- ASSERT(dfp->dfp_count == 0);
- kmem_free(dfp);
- }
- list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) {
- trace_xfs_defer_pending_cancel(NULL, dfp);
- list_del(&dfp->dfp_list);
- list_for_each_safe(pwi, n, &dfp->dfp_work) {
- list_del(pwi);
- dfp->dfp_count--;
- dfp->dfp_type->cancel_item(pwi);
+ error = xfs_defer_finish_noroll(tp);
+ if (error)
+ return error;
+ if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
+ error = xfs_defer_trans_roll(tp);
+ if (error) {
+ xfs_force_shutdown((*tp)->t_mountp,
+ SHUTDOWN_CORRUPT_INCORE);
+ return error;
}
- ASSERT(dfp->dfp_count == 0);
- kmem_free(dfp);
}
+ xfs_defer_reset(*tp);
+ return 0;
+}
+
+void
+xfs_defer_cancel(
+ struct xfs_trans *tp)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+
+ trace_xfs_defer_cancel(tp, _RET_IP_);
+ xfs_defer_cancel_list(mp, &tp->t_dfops);
}
/* Add an item for later deferred processing. */
void
xfs_defer_add(
- struct xfs_defer_ops *dop,
+ struct xfs_trans *tp,
enum xfs_defer_ops_type type,
struct list_head *li)
{
struct xfs_defer_pending *dfp = NULL;
+ ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+
/*
* Add the item to a pending item at the end of the intake list.
* If the last pending item has the same type, reuse it. Else,
* create a new pending item at the end of the intake list.
*/
- if (!list_empty(&dop->dop_intake)) {
- dfp = list_last_entry(&dop->dop_intake,
+ if (!list_empty(&tp->t_dfops)) {
+ dfp = list_last_entry(&tp->t_dfops,
struct xfs_defer_pending, dfp_list);
if (dfp->dfp_type->type != type ||
(dfp->dfp_type->max_items &&
@@ -505,7 +510,7 @@ xfs_defer_add(
dfp->dfp_done = NULL;
dfp->dfp_count = 0;
INIT_LIST_HEAD(&dfp->dfp_work);
- list_add_tail(&dfp->dfp_list, &dop->dop_intake);
+ list_add_tail(&dfp->dfp_list, &tp->t_dfops);
}
list_add_tail(li, &dfp->dfp_work);
@@ -520,15 +525,25 @@ xfs_defer_init_op_type(
defer_op_types[type->type] = type;
}
-/* Initialize a deferred operation. */
+/*
+ * Move deferred ops from one transaction to another and reset the source to
+ * initial state. This is primarily used to carry state forward across
+ * transaction rolls with pending dfops.
+ */
void
-xfs_defer_init(
- struct xfs_defer_ops *dop,
- xfs_fsblock_t *fbp)
+xfs_defer_move(
+ struct xfs_trans *dtp,
+ struct xfs_trans *stp)
{
- memset(dop, 0, sizeof(struct xfs_defer_ops));
- *fbp = NULLFSBLOCK;
- INIT_LIST_HEAD(&dop->dop_intake);
- INIT_LIST_HEAD(&dop->dop_pending);
- trace_xfs_defer_init(NULL, dop, _RET_IP_);
+ list_splice_init(&stp->t_dfops, &dtp->t_dfops);
+
+ /*
+ * Low free space mode was historically controlled by a dfops field.
+ * This meant that low mode state potentially carried across multiple
+ * transaction rolls. Transfer low mode on a dfops move to preserve
+ * that behavior.
+ */
+ dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
+
+ xfs_defer_reset(stp);
}
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index a02b2b748b6d..2584a5b95b0d 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -24,17 +24,6 @@ struct xfs_defer_pending {
/*
* Header for deferred operation list.
- *
- * dop_low is used by the allocator to activate the lowspace algorithm -
- * when free space is running low the extent allocator may choose to
- * allocate an extent from an AG without leaving sufficient space for
- * a btree split when inserting the new extent. In this case the allocator
- * will enable the lowspace algorithm which is supposed to allow further
- * allocations (such as btree splits and newroots) to allocate from
- * sequential AGs. In order to avoid locking AGs out of order the lowspace
- * algorithm will start searching for free space from AG 0. If the correct
- * transaction reservations have been made then this algorithm will eventually
- * find all the space it needs.
*/
enum xfs_defer_ops_type {
XFS_DEFER_OPS_TYPE_BMAP,
@@ -45,28 +34,12 @@ enum xfs_defer_ops_type {
XFS_DEFER_OPS_TYPE_MAX,
};
-#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
-#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
-
-struct xfs_defer_ops {
- bool dop_committed; /* did any trans commit? */
- bool dop_low; /* alloc in low mode */
- struct list_head dop_intake; /* unlogged pending work */
- struct list_head dop_pending; /* logged pending work */
-
- /* relog these with each roll */
- struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
- struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
-};
-
-void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
+void xfs_defer_add(struct xfs_trans *tp, enum xfs_defer_ops_type type,
struct list_head *h);
-int xfs_defer_finish(struct xfs_trans **tp, struct xfs_defer_ops *dop);
-void xfs_defer_cancel(struct xfs_defer_ops *dop);
-void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
-bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
-int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
-int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
+int xfs_defer_finish_noroll(struct xfs_trans **tp);
+int xfs_defer_finish(struct xfs_trans **tp);
+void xfs_defer_cancel(struct xfs_trans *);
+void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp);
/* Description of a deferred type. */
struct xfs_defer_op_type {
@@ -74,8 +47,8 @@ struct xfs_defer_op_type {
unsigned int max_items;
void (*abort_intent)(void *);
void *(*create_done)(struct xfs_trans *, void *, unsigned int);
- int (*finish_item)(struct xfs_trans *, struct xfs_defer_ops *,
- struct list_head *, void *, void **);
+ int (*finish_item)(struct xfs_trans *, struct list_head *, void *,
+ void **);
void (*finish_cleanup)(struct xfs_trans *, void *, int);
void (*cancel_item)(struct list_head *);
int (*diff_items)(void *, struct list_head *, struct list_head *);
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 59169aff30fe..229152cd1a24 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -239,12 +239,10 @@ xfs_dir_init(
*/
int
xfs_dir_createname(
- xfs_trans_t *tp,
- xfs_inode_t *dp,
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_name *name,
xfs_ino_t inum, /* new entry inode number */
- xfs_fsblock_t *first, /* bmap's firstblock */
- struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
@@ -252,6 +250,7 @@ xfs_dir_createname(
int v; /* type-checking value */
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
+
if (inum) {
rval = xfs_dir_ino_validate(tp->t_mountp, inum);
if (rval)
@@ -270,8 +269,6 @@ xfs_dir_createname(
args->hashval = dp->i_mount->m_dirnameops->hashname(name);
args->inumber = inum;
args->dp = dp;
- args->firstblock = first;
- args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -416,17 +413,15 @@ out_free:
*/
int
xfs_dir_removename(
- xfs_trans_t *tp,
- xfs_inode_t *dp,
- struct xfs_name *name,
- xfs_ino_t ino,
- xfs_fsblock_t *first, /* bmap's firstblock */
- struct xfs_defer_ops *dfops, /* bmap's freeblock list */
- xfs_extlen_t total) /* bmap's total block count */
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
+ struct xfs_name *name,
+ xfs_ino_t ino,
+ xfs_extlen_t total) /* bmap's total block count */
{
- struct xfs_da_args *args;
- int rval;
- int v; /* type-checking value */
+ struct xfs_da_args *args;
+ int rval;
+ int v; /* type-checking value */
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
XFS_STATS_INC(dp->i_mount, xs_dir_remove);
@@ -442,8 +437,6 @@ xfs_dir_removename(
args->hashval = dp->i_mount->m_dirnameops->hashname(name);
args->inumber = ino;
args->dp = dp;
- args->firstblock = first;
- args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -478,17 +471,15 @@ out_free:
*/
int
xfs_dir_replace(
- xfs_trans_t *tp,
- xfs_inode_t *dp,
- struct xfs_name *name, /* name of entry to replace */
- xfs_ino_t inum, /* new inode number */
- xfs_fsblock_t *first, /* bmap's firstblock */
- struct xfs_defer_ops *dfops, /* bmap's freeblock list */
- xfs_extlen_t total) /* bmap's total block count */
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
+ struct xfs_name *name, /* name of entry to replace */
+ xfs_ino_t inum, /* new inode number */
+ xfs_extlen_t total) /* bmap's total block count */
{
- struct xfs_da_args *args;
- int rval;
- int v; /* type-checking value */
+ struct xfs_da_args *args;
+ int rval;
+ int v; /* type-checking value */
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
@@ -507,8 +498,6 @@ xfs_dir_replace(
args->hashval = dp->i_mount->m_dirnameops->hashname(name);
args->inumber = inum;
args->dp = dp;
- args->firstblock = first;
- args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -547,7 +536,7 @@ xfs_dir_canenter(
xfs_inode_t *dp,
struct xfs_name *name) /* name of entry to add */
{
- return xfs_dir_createname(tp, dp, name, 0, NULL, NULL, 0);
+ return xfs_dir_createname(tp, dp, name, 0, 0);
}
/*
@@ -645,17 +634,17 @@ xfs_dir2_isleaf(
*/
int
xfs_dir2_shrink_inode(
- xfs_da_args_t *args,
- xfs_dir2_db_t db,
- struct xfs_buf *bp)
+ struct xfs_da_args *args,
+ xfs_dir2_db_t db,
+ struct xfs_buf *bp)
{
- xfs_fileoff_t bno; /* directory file offset */
- xfs_dablk_t da; /* directory file offset */
- int done; /* bunmap is finished */
- xfs_inode_t *dp;
- int error;
- xfs_mount_t *mp;
- xfs_trans_t *tp;
+ xfs_fileoff_t bno; /* directory file offset */
+ xfs_dablk_t da; /* directory file offset */
+ int done; /* bunmap is finished */
+ struct xfs_inode *dp;
+ int error;
+ struct xfs_mount *mp;
+ struct xfs_trans *tp;
trace_xfs_dir2_shrink_inode(args, db);
@@ -665,8 +654,7 @@ xfs_dir2_shrink_inode(
da = xfs_dir2_db_to_da(args->geo, db);
/* Unmap the fsblock(s). */
- error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0,
- args->firstblock, args->dfops, &done);
+ error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0, &done);
if (error) {
/*
* ENOSPC actually can happen if we're in a removename with no
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index ed385316c7dc..c3e3f6b813d8 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -9,7 +9,6 @@
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-struct xfs_defer_ops;
struct xfs_da_args;
struct xfs_inode;
struct xfs_mount;
@@ -118,19 +117,16 @@ extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_inode *pdp);
extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
- xfs_fsblock_t *first,
- struct xfs_defer_ops *dfops, xfs_extlen_t tot);
+ xfs_extlen_t tot);
extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t *inum,
struct xfs_name *ci_name);
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t ino,
- xfs_fsblock_t *first,
- struct xfs_defer_ops *dfops, xfs_extlen_t tot);
+ xfs_extlen_t tot);
extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
- xfs_fsblock_t *first,
- struct xfs_defer_ops *dfops, xfs_extlen_t tot);
+ xfs_extlen_t tot);
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name);
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 2daf874969ab..f1bb3434f51c 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -1012,7 +1012,7 @@ xfs_dir2_leafn_rebalance(
int oldstale; /* old count of stale leaves */
#endif
int oldsum; /* old total leaf count */
- int swap; /* swapped leaf blocks */
+ int swap_blocks; /* swapped leaf blocks */
struct xfs_dir2_leaf_entry *ents1;
struct xfs_dir2_leaf_entry *ents2;
struct xfs_dir3_icleaf_hdr hdr1;
@@ -1023,13 +1023,10 @@ xfs_dir2_leafn_rebalance(
/*
* If the block order is wrong, swap the arguments.
*/
- if ((swap = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp))) {
- xfs_da_state_blk_t *tmp; /* temp for block swap */
+ swap_blocks = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp);
+ if (swap_blocks)
+ swap(blk1, blk2);
- tmp = blk1;
- blk1 = blk2;
- blk2 = tmp;
- }
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
@@ -1093,11 +1090,11 @@ xfs_dir2_leafn_rebalance(
* Mark whether we're inserting into the old or new leaf.
*/
if (hdr1.count < hdr2.count)
- state->inleaf = swap;
+ state->inleaf = swap_blocks;
else if (hdr1.count > hdr2.count)
- state->inleaf = !swap;
+ state->inleaf = !swap_blocks;
else
- state->inleaf = swap ^ (blk1->index <= hdr1.count);
+ state->inleaf = swap_blocks ^ (blk1->index <= hdr1.count);
/*
* Adjust the expected index for insertion.
*/
diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h
index b9974e7a8e6e..66077a105cbb 100644
--- a/fs/xfs/libxfs/xfs_errortag.h
+++ b/fs/xfs/libxfs/xfs_errortag.h
@@ -53,7 +53,8 @@
#define XFS_ERRTAG_LOG_ITEM_PIN 30
#define XFS_ERRTAG_BUF_LRU_REF 31
#define XFS_ERRTAG_FORCE_SCRUB_REPAIR 32
-#define XFS_ERRTAG_MAX 33
+#define XFS_ERRTAG_FORCE_SUMMARY_RECALC 33
+#define XFS_ERRTAG_MAX 34
/*
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
@@ -91,5 +92,6 @@
#define XFS_RANDOM_LOG_ITEM_PIN 1
#define XFS_RANDOM_BUF_LRU_REF 2
#define XFS_RANDOM_FORCE_SCRUB_REPAIR 1
+#define XFS_RANDOM_FORCE_SUMMARY_RECALC 1
#endif /* __XFS_ERRORTAG_H_ */
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 0d968e8143aa..a8f6db735d5d 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1838,23 +1838,24 @@ out_error:
*/
STATIC void
xfs_difree_inode_chunk(
- struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
- struct xfs_inobt_rec_incore *rec,
- struct xfs_defer_ops *dfops)
+ struct xfs_inobt_rec_incore *rec)
{
- xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, rec->ir_startino);
- int startidx, endidx;
- int nextbit;
- xfs_agblock_t agbno;
- int contigblk;
- struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
+ rec->ir_startino);
+ int startidx, endidx;
+ int nextbit;
+ xfs_agblock_t agbno;
+ int contigblk;
+ struct xfs_owner_info oinfo;
DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
if (!xfs_inobt_issparse(rec->ir_holemask)) {
/* not sparse, calculate extent info directly */
- xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, sagbno),
+ xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
mp->m_ialloc_blks, &oinfo);
return;
}
@@ -1898,7 +1899,7 @@ xfs_difree_inode_chunk(
ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
- xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, agbno),
+ xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
contigblk, &oinfo);
/* reset range to current bit and carry on... */
@@ -1915,7 +1916,6 @@ xfs_difree_inobt(
struct xfs_trans *tp,
struct xfs_buf *agbp,
xfs_agino_t agino,
- struct xfs_defer_ops *dfops,
struct xfs_icluster *xic,
struct xfs_inobt_rec_incore *orec)
{
@@ -2003,7 +2003,7 @@ xfs_difree_inobt(
goto error0;
}
- xfs_difree_inode_chunk(mp, agno, &rec, dfops);
+ xfs_difree_inode_chunk(tp, agno, &rec);
} else {
xic->deleted = false;
@@ -2148,7 +2148,6 @@ int
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
- struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *xic) /* cluster info if deleted */
{
/* REFERENCED */
@@ -2200,7 +2199,7 @@ xfs_difree(
/*
* Fix up the inode allocation btree.
*/
- error = xfs_difree_inobt(mp, tp, agbp, agino, dfops, xic, &rec);
+ error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec);
if (error)
goto error0;
@@ -2260,7 +2259,7 @@ xfs_imap_lookup(
}
xfs_trans_brelse(tp, agbp);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
if (error)
return error;
@@ -2539,7 +2538,7 @@ xfs_agi_verify(
return __this_address;
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
- if (agi->agi_unlinked[i] == NULLAGINO)
+ if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
continue;
if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
return __this_address;
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 90b09c5f163b..e936b7cc9389 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -82,7 +82,6 @@ int /* error */
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
- struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *ifree); /* cluster info if deleted */
/*
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index a5237afec5ab..86c50208a143 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -552,6 +552,7 @@ xfs_inobt_max_size(
static int
xfs_inobt_count_blocks(
struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_btnum_t btnum,
xfs_extlen_t *tree_blocks)
@@ -560,14 +561,14 @@ xfs_inobt_count_blocks(
struct xfs_btree_cur *cur;
int error;
- error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
if (error)
return error;
- cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
error = xfs_btree_count_blocks(cur, tree_blocks);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
- xfs_buf_relse(agbp);
+ xfs_btree_del_cursor(cur, error);
+ xfs_trans_brelse(tp, agbp);
return error;
}
@@ -578,6 +579,7 @@ xfs_inobt_count_blocks(
int
xfs_finobt_calc_reserves(
struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_extlen_t *ask,
xfs_extlen_t *used)
@@ -588,7 +590,7 @@ xfs_finobt_calc_reserves(
if (!xfs_sb_version_hasfinobt(&mp->m_sb))
return 0;
- error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
+ error = xfs_inobt_count_blocks(mp, tp, agno, XFS_BTNUM_FINO, &tree_len);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index bf8f0c405e7d..ebdd0c6b8766 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -60,8 +60,8 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
#define xfs_inobt_rec_check_count(mp, rec) 0
#endif /* DEBUG */
-int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
- xfs_extlen_t *ask, xfs_extlen_t *used);
+int xfs_finobt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
extern xfs_extlen_t xfs_iallocbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c
index b80c63faace2..771dd072015d 100644
--- a/fs/xfs/libxfs/xfs_iext_tree.c
+++ b/fs/xfs/libxfs/xfs_iext_tree.c
@@ -14,6 +14,7 @@
#include "xfs_inode_fork.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_bmap.h"
#include "xfs_trace.h"
/*
@@ -612,6 +613,19 @@ xfs_iext_realloc_root(
cur->leaf = new;
}
+/*
+ * Increment the sequence counter if we are on a COW fork. This allows
+ * the writeback code to skip looking for a COW extent if the COW fork
+ * hasn't changed. We use WRITE_ONCE here to ensure the update to the
+ * sequence counter is seen before the modifications to the extent
+ * tree itself take effect.
+ */
+static inline void xfs_iext_inc_seq(struct xfs_ifork *ifp, int state)
+{
+ if (state & BMAP_COWFORK)
+ WRITE_ONCE(ifp->if_seq, READ_ONCE(ifp->if_seq) + 1);
+}
+
void
xfs_iext_insert(
struct xfs_inode *ip,
@@ -624,6 +638,8 @@ xfs_iext_insert(
struct xfs_iext_leaf *new = NULL;
int nr_entries, i;
+ xfs_iext_inc_seq(ifp, state);
+
if (ifp->if_height == 0)
xfs_iext_alloc_root(ifp, cur);
else if (ifp->if_height == 1)
@@ -864,6 +880,8 @@ xfs_iext_remove(
ASSERT(ifp->if_u1.if_root != NULL);
ASSERT(xfs_iext_valid(ifp, cur));
+ xfs_iext_inc_seq(ifp, state);
+
nr_entries = xfs_iext_leaf_nr_entries(ifp, leaf, cur->pos) - 1;
for (i = cur->pos; i < nr_entries; i++)
leaf->recs[i] = leaf->recs[i + 1];
@@ -970,6 +988,8 @@ xfs_iext_update_extent(
{
struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
+ xfs_iext_inc_seq(ifp, state);
+
if (cur->pos == 0) {
struct xfs_bmbt_irec old;
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 183ec0cb8921..f9acf1d436f6 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -158,7 +158,6 @@ xfs_init_local_fork(
}
ifp->if_bytes = size;
- ifp->if_real_bytes = real_size;
ifp->if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
ifp->if_flags |= XFS_IFINLINE;
}
@@ -226,7 +225,6 @@ xfs_iformat_extents(
return -EFSCORRUPTED;
}
- ifp->if_real_bytes = 0;
ifp->if_bytes = 0;
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
@@ -271,7 +269,7 @@ xfs_iformat_btree(
{
struct xfs_mount *mp = ip->i_mount;
xfs_bmdr_block_t *dfp;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
/* REFERENCED */
int nrecs;
int size;
@@ -317,7 +315,6 @@ xfs_iformat_btree(
ifp->if_flags &= ~XFS_IFEXTENTS;
ifp->if_flags |= XFS_IFBROOT;
- ifp->if_real_bytes = 0;
ifp->if_bytes = 0;
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
@@ -350,7 +347,7 @@ xfs_iroot_realloc(
{
struct xfs_mount *mp = ip->i_mount;
int cur_max;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
struct xfs_btree_block *new_broot;
int new_max;
size_t new_size;
@@ -471,55 +468,34 @@ xfs_iroot_realloc(
*/
void
xfs_idata_realloc(
- xfs_inode_t *ip,
- int byte_diff,
- int whichfork)
+ struct xfs_inode *ip,
+ int byte_diff,
+ int whichfork)
{
- xfs_ifork_t *ifp;
- int new_size;
- int real_size;
-
- if (byte_diff == 0) {
- return;
- }
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ int new_size = (int)ifp->if_bytes + byte_diff;
- ifp = XFS_IFORK_PTR(ip, whichfork);
- new_size = (int)ifp->if_bytes + byte_diff;
ASSERT(new_size >= 0);
+ ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork));
+
+ if (byte_diff == 0)
+ return;
if (new_size == 0) {
kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = NULL;
- real_size = 0;
- } else {
- /*
- * Stuck with malloc/realloc.
- * For inline data, the underlying buffer must be
- * a multiple of 4 bytes in size so that it can be
- * logged and stay on word boundaries. We enforce
- * that here.
- */
- real_size = roundup(new_size, 4);
- if (ifp->if_u1.if_data == NULL) {
- ASSERT(ifp->if_real_bytes == 0);
- ifp->if_u1.if_data = kmem_alloc(real_size,
- KM_SLEEP | KM_NOFS);
- } else {
- /*
- * Only do the realloc if the underlying size
- * is really changing.
- */
- if (ifp->if_real_bytes != real_size) {
- ifp->if_u1.if_data =
- kmem_realloc(ifp->if_u1.if_data,
- real_size,
- KM_SLEEP | KM_NOFS);
- }
- }
+ ifp->if_bytes = 0;
+ return;
}
- ifp->if_real_bytes = real_size;
+
+ /*
+ * For inline data, the underlying buffer must be a multiple of 4 bytes
+ * in size so that it can be logged and stay on word boundaries.
+ * We enforce that here.
+ */
+ ifp->if_u1.if_data = kmem_realloc(ifp->if_u1.if_data,
+ roundup(new_size, 4), KM_SLEEP | KM_NOFS);
ifp->if_bytes = new_size;
- ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
}
void
@@ -527,7 +503,7 @@ xfs_idestroy_fork(
xfs_inode_t *ip,
int whichfork)
{
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
ifp = XFS_IFORK_PTR(ip, whichfork);
if (ifp->if_broot != NULL) {
@@ -543,17 +519,13 @@ xfs_idestroy_fork(
*/
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
if (ifp->if_u1.if_data != NULL) {
- ASSERT(ifp->if_real_bytes != 0);
kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = NULL;
- ifp->if_real_bytes = 0;
}
} else if ((ifp->if_flags & XFS_IFEXTENTS) && ifp->if_height) {
xfs_iext_destroy(ifp);
}
- ASSERT(ifp->if_real_bytes == 0);
-
if (whichfork == XFS_ATTR_FORK) {
kmem_zone_free(xfs_ifork_zone, ip->i_afp);
ip->i_afp = NULL;
@@ -620,7 +592,7 @@ xfs_iflush_fork(
int whichfork)
{
char *cp;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
xfs_mount_t *mp;
static const short brootflag[2] =
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 781b1603df5e..60361d2d74a1 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -12,9 +12,9 @@ struct xfs_dinode;
/*
* File incore extent information, present for each of data & attr forks.
*/
-typedef struct xfs_ifork {
+struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
- int if_real_bytes; /* bytes allocated in if_u1 */
+ unsigned int if_seq; /* cow fork mod counter */
struct xfs_btree_block *if_broot; /* file's incore btree root */
short if_broot_bytes; /* bytes allocated for root */
unsigned char if_flags; /* per-fork flags */
@@ -23,7 +23,7 @@ typedef struct xfs_ifork {
void *if_root; /* extent tree root */
char *if_data; /* inline file data */
} if_u1;
-} xfs_ifork_t;
+};
/*
* Per-fork incore inode flags.
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 79bb79853c9f..e5f97c69b320 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -77,6 +77,19 @@ static inline uint xlog_get_cycle(char *ptr)
#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */
+/*
+ * Log item for unmount records.
+ *
+ * The unmount record used to have a string "Unmount filesystem--" in the
+ * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
+ * We just write the magic number now; see xfs_log_unmount_write.
+ */
+struct xfs_unmount_log_format {
+ uint16_t magic; /* XLOG_UNMOUNT_TYPE */
+ uint16_t pad1;
+ uint32_t pad2; /* may as well make it 64 bits */
+};
+
/* Region types for iovec's i_type */
#define XLOG_REG_TYPE_BFORMAT 1
#define XLOG_REG_TYPE_BCHUNK 2
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 9dda6fd0bb13..542aa1475b5f 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -34,11 +34,9 @@ enum xfs_refc_adjust_op {
};
STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
- xfs_agblock_t agbno, xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops);
+ xfs_agblock_t agbno, xfs_extlen_t aglen);
STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
- xfs_agblock_t agbno, xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops);
+ xfs_agblock_t agbno, xfs_extlen_t aglen);
/*
* Look up the first record less than or equal to [bno, len] in the btree
@@ -870,7 +868,6 @@ xfs_refcount_adjust_extents(
xfs_agblock_t *agbno,
xfs_extlen_t *aglen,
enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops,
struct xfs_owner_info *oinfo)
{
struct xfs_refcount_irec ext, tmp;
@@ -925,8 +922,8 @@ xfs_refcount_adjust_extents(
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
cur->bc_private.a.agno,
tmp.rc_startblock);
- xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
- tmp.rc_blockcount, oinfo);
+ xfs_bmap_add_free(cur->bc_tp, fsbno,
+ tmp.rc_blockcount, oinfo);
}
(*agbno) += tmp.rc_blockcount;
@@ -968,8 +965,8 @@ xfs_refcount_adjust_extents(
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
cur->bc_private.a.agno,
ext.rc_startblock);
- xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
- ext.rc_blockcount, oinfo);
+ xfs_bmap_add_free(cur->bc_tp, fsbno, ext.rc_blockcount,
+ oinfo);
}
skip:
@@ -998,7 +995,6 @@ xfs_refcount_adjust(
xfs_agblock_t *new_agbno,
xfs_extlen_t *new_aglen,
enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops,
struct xfs_owner_info *oinfo)
{
bool shape_changed;
@@ -1043,7 +1039,7 @@ xfs_refcount_adjust(
/* Now that we've taken care of the ends, adjust the middle extents */
error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen,
- adj, dfops, oinfo);
+ adj, oinfo);
if (error)
goto out_error;
@@ -1067,7 +1063,7 @@ xfs_refcount_finish_one_cleanup(
if (rcur == NULL)
return;
agbp = rcur->bc_private.a.agbp;
- xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(rcur, error);
if (error)
xfs_trans_brelse(tp, agbp);
}
@@ -1082,7 +1078,6 @@ xfs_refcount_finish_one_cleanup(
int
xfs_refcount_finish_one(
struct xfs_trans *tp,
- struct xfs_defer_ops *dfops,
enum xfs_refcount_intent_type type,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount,
@@ -1132,7 +1127,7 @@ xfs_refcount_finish_one(
if (!agbp)
return -EFSCORRUPTED;
- rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, dfops);
+ rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
if (!rcur) {
error = -ENOMEM;
goto out_cur;
@@ -1145,23 +1140,23 @@ xfs_refcount_finish_one(
switch (type) {
case XFS_REFCOUNT_INCREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
- new_len, XFS_REFCOUNT_ADJUST_INCREASE, dfops, NULL);
+ new_len, XFS_REFCOUNT_ADJUST_INCREASE, NULL);
*new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
break;
case XFS_REFCOUNT_DECREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
- new_len, XFS_REFCOUNT_ADJUST_DECREASE, dfops, NULL);
+ new_len, XFS_REFCOUNT_ADJUST_DECREASE, NULL);
*new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
break;
case XFS_REFCOUNT_ALLOC_COW:
*new_fsb = startblock + blockcount;
*new_len = 0;
- error = __xfs_refcount_cow_alloc(rcur, bno, blockcount, dfops);
+ error = __xfs_refcount_cow_alloc(rcur, bno, blockcount);
break;
case XFS_REFCOUNT_FREE_COW:
*new_fsb = startblock + blockcount;
*new_len = 0;
- error = __xfs_refcount_cow_free(rcur, bno, blockcount, dfops);
+ error = __xfs_refcount_cow_free(rcur, bno, blockcount);
break;
default:
ASSERT(0);
@@ -1183,16 +1178,16 @@ out_cur:
*/
static int
__xfs_refcount_add(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
enum xfs_refcount_intent_type type,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount)
{
struct xfs_refcount_intent *ri;
- trace_xfs_refcount_defer(mp, XFS_FSB_TO_AGNO(mp, startblock),
- type, XFS_FSB_TO_AGBNO(mp, startblock),
+ trace_xfs_refcount_defer(tp->t_mountp,
+ XFS_FSB_TO_AGNO(tp->t_mountp, startblock),
+ type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
blockcount);
ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
@@ -1202,7 +1197,7 @@ __xfs_refcount_add(
ri->ri_startblock = startblock;
ri->ri_blockcount = blockcount;
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
return 0;
}
@@ -1211,14 +1206,13 @@ __xfs_refcount_add(
*/
int
xfs_refcount_increase_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb))
return 0;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_INCREASE,
+ return __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE,
PREV->br_startblock, PREV->br_blockcount);
}
@@ -1227,14 +1221,13 @@ xfs_refcount_increase_extent(
*/
int
xfs_refcount_decrease_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb))
return 0;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_DECREASE,
+ return __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE,
PREV->br_startblock, PREV->br_blockcount);
}
@@ -1522,8 +1515,7 @@ STATIC int
__xfs_refcount_cow_alloc(
struct xfs_btree_cur *rcur,
xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops)
+ xfs_extlen_t aglen)
{
trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
agbno, aglen);
@@ -1540,8 +1532,7 @@ STATIC int
__xfs_refcount_cow_free(
struct xfs_btree_cur *rcur,
xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops)
+ xfs_extlen_t aglen)
{
trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
agbno, aglen);
@@ -1554,47 +1545,45 @@ __xfs_refcount_cow_free(
/* Record a CoW staging extent in the refcount btree. */
int
xfs_refcount_alloc_cow_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
+ struct xfs_mount *mp = tp->t_mountp;
int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
- error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
- fsb, len);
+ error = __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len);
if (error)
return error;
/* Add rmap entry */
- return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+ return xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
}
/* Forget a CoW staging event in the refcount btree. */
int
xfs_refcount_free_cow_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
+ struct xfs_mount *mp = tp->t_mountp;
int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
/* Remove rmap entry */
- error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+ error = xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
if (error)
return error;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
- fsb, len);
+ return __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len);
}
struct xfs_refcount_recovery {
@@ -1635,7 +1624,6 @@ xfs_refcount_recover_cow_leftovers(
struct list_head debris;
union xfs_btree_irec low;
union xfs_btree_irec high;
- struct xfs_defer_ops dfops;
xfs_fsblock_t fsb;
xfs_agblock_t agbno;
int error;
@@ -1666,7 +1654,7 @@ xfs_refcount_recover_cow_leftovers(
error = -ENOMEM;
goto out_trans;
}
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
/* Find all the leftover CoW staging extents. */
memset(&low, 0, sizeof(low));
@@ -1675,11 +1663,11 @@ xfs_refcount_recover_cow_leftovers(
high.rc.rc_startblock = -1U;
error = xfs_btree_query_range(cur, &low, &high,
xfs_refcount_recover_extent, &debris);
- if (error)
- goto out_cursor;
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
xfs_trans_brelse(tp, agbp);
xfs_trans_cancel(tp);
+ if (error)
+ goto out_free;
/* Now iterate the list to free the leftovers */
list_for_each_entry_safe(rr, n, &debris, rr_list) {
@@ -1691,21 +1679,15 @@ xfs_refcount_recover_cow_leftovers(
trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
/* Free the orphan record */
- xfs_defer_init(&dfops, &fsb);
agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
- error = xfs_refcount_free_cow_extent(mp, &dfops, fsb,
+ error = xfs_refcount_free_cow_extent(tp, fsb,
rr->rr_rrec.rc_blockcount);
if (error)
- goto out_defer;
+ goto out_trans;
/* Free the block. */
- xfs_bmap_add_free(mp, &dfops, fsb,
- rr->rr_rrec.rc_blockcount, NULL);
-
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_defer;
+ xfs_bmap_add_free(tp, fsb, rr->rr_rrec.rc_blockcount, NULL);
error = xfs_trans_commit(tp);
if (error)
@@ -1716,8 +1698,6 @@ xfs_refcount_recover_cow_leftovers(
}
return error;
-out_defer:
- xfs_defer_cancel(&dfops);
out_trans:
xfs_trans_cancel(tp);
out_free:
@@ -1727,11 +1707,6 @@ out_free:
kmem_free(rr);
}
return error;
-
-out_cursor:
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- xfs_trans_brelse(tp, agbp);
- goto out_trans;
}
/* Is there a record covering a given extent? */
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index 5fef74412727..1d9c518575e7 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -29,29 +29,26 @@ struct xfs_refcount_intent {
xfs_extlen_t ri_blockcount;
};
-extern int xfs_refcount_increase_extent(struct xfs_mount *mp,
- struct xfs_defer_ops *dfops, struct xfs_bmbt_irec *irec);
-extern int xfs_refcount_decrease_extent(struct xfs_mount *mp,
- struct xfs_defer_ops *dfops, struct xfs_bmbt_irec *irec);
+extern int xfs_refcount_increase_extent(struct xfs_trans *tp,
+ struct xfs_bmbt_irec *irec);
+extern int xfs_refcount_decrease_extent(struct xfs_trans *tp,
+ struct xfs_bmbt_irec *irec);
extern void xfs_refcount_finish_one_cleanup(struct xfs_trans *tp,
struct xfs_btree_cur *rcur, int error);
extern int xfs_refcount_finish_one(struct xfs_trans *tp,
- struct xfs_defer_ops *dfops, enum xfs_refcount_intent_type type,
- xfs_fsblock_t startblock, xfs_extlen_t blockcount,
- xfs_fsblock_t *new_fsb, xfs_extlen_t *new_len,
- struct xfs_btree_cur **pcur);
+ enum xfs_refcount_intent_type type, xfs_fsblock_t startblock,
+ xfs_extlen_t blockcount, xfs_fsblock_t *new_fsb,
+ xfs_extlen_t *new_len, struct xfs_btree_cur **pcur);
extern int xfs_refcount_find_shared(struct xfs_btree_cur *cur,
xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno,
xfs_extlen_t *flen, bool find_end_of_shared);
-extern int xfs_refcount_alloc_cow_extent(struct xfs_mount *mp,
- struct xfs_defer_ops *dfops, xfs_fsblock_t fsb,
- xfs_extlen_t len);
-extern int xfs_refcount_free_cow_extent(struct xfs_mount *mp,
- struct xfs_defer_ops *dfops, xfs_fsblock_t fsb,
- xfs_extlen_t len);
+extern int xfs_refcount_alloc_cow_extent(struct xfs_trans *tp,
+ xfs_fsblock_t fsb, xfs_extlen_t len);
+extern int xfs_refcount_free_cow_extent(struct xfs_trans *tp,
+ xfs_fsblock_t fsb, xfs_extlen_t len);
extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
xfs_agnumber_t agno);
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index b71937982c5b..1aaa01c97517 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -27,8 +27,7 @@ xfs_refcountbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agbp, cur->bc_private.a.agno,
- cur->bc_private.a.dfops);
+ cur->bc_private.a.agbp, cur->bc_private.a.agno);
}
STATIC void
@@ -71,7 +70,6 @@ xfs_refcountbt_alloc_block(
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
xfs_refc_block(args.mp));
- args.firstblock = args.fsbno;
xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
args.minlen = args.maxlen = args.prod = 1;
args.resv = XFS_AG_RESV_METADATA;
@@ -323,8 +321,7 @@ xfs_refcountbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno,
- struct xfs_defer_ops *dfops)
+ xfs_agnumber_t agno)
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
struct xfs_btree_cur *cur;
@@ -344,7 +341,6 @@ xfs_refcountbt_init_cursor(
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
- cur->bc_private.a.dfops = dfops;
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_private.a.priv.refc.nr_ops = 0;
@@ -408,6 +404,7 @@ xfs_refcountbt_max_size(
int
xfs_refcountbt_calc_reserves(
struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_extlen_t *ask,
xfs_extlen_t *used)
@@ -422,14 +419,14 @@ xfs_refcountbt_calc_reserves(
return 0;
- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
if (error)
return error;
agf = XFS_BUF_TO_AGF(agbp);
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_refcount_blocks);
- xfs_buf_relse(agbp);
+ xfs_trans_brelse(tp, agbp);
*ask += xfs_refcountbt_max_size(mp, agblocks);
*used += tree_len;
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
index d2852b6e1fa8..ba416f71c824 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.h
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -44,8 +44,8 @@ struct xfs_mount;
((index) - 1) * sizeof(xfs_refcount_ptr_t)))
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
- struct xfs_trans *tp, struct xfs_buf *agbp, xfs_agnumber_t agno,
- struct xfs_defer_ops *dfops);
+ struct xfs_trans *tp, struct xfs_buf *agbp,
+ xfs_agnumber_t agno);
extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
@@ -55,6 +55,7 @@ extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
- xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
+ struct xfs_trans *tp, xfs_agnumber_t agno, xfs_extlen_t *ask,
+ xfs_extlen_t *used);
#endif /* __XFS_REFCOUNT_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index d4460b0d2d81..245af452840e 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -670,14 +670,8 @@ xfs_rmap_free(
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
- if (error)
- goto out_error;
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
- return 0;
-
-out_error:
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ xfs_btree_del_cursor(cur, error);
return error;
}
@@ -753,19 +747,19 @@ xfs_rmap_map(
&have_lt);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
-
- error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
- if (error)
- goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
- trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_private.a.agno, ltrec.rm_startblock,
- ltrec.rm_blockcount, ltrec.rm_owner,
- ltrec.rm_offset, ltrec.rm_flags);
+ if (have_lt) {
+ error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
+ trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
+ cur->bc_private.a.agno, ltrec.rm_startblock,
+ ltrec.rm_blockcount, ltrec.rm_owner,
+ ltrec.rm_offset, ltrec.rm_flags);
- if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
- have_lt = 0;
+ if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
+ have_lt = 0;
+ }
XFS_WANT_CORRUPTED_GOTO(mp,
have_lt == 0 ||
@@ -912,14 +906,8 @@ xfs_rmap_alloc(
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
error = xfs_rmap_map(cur, bno, len, false, oinfo);
- if (error)
- goto out_error;
-
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
- return 0;
-out_error:
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ xfs_btree_del_cursor(cur, error);
return error;
}
@@ -2156,7 +2144,7 @@ xfs_rmap_finish_one_cleanup(
if (rcur == NULL)
return;
agbp = rcur->bc_private.a.agbp;
- xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(rcur, error);
if (error)
xfs_trans_brelse(tp, agbp);
}
@@ -2289,18 +2277,18 @@ xfs_rmap_update_is_needed(
*/
static int
__xfs_rmap_add(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
enum xfs_rmap_intent_type type,
uint64_t owner,
int whichfork,
struct xfs_bmbt_irec *bmap)
{
- struct xfs_rmap_intent *ri;
+ struct xfs_rmap_intent *ri;
- trace_xfs_rmap_defer(mp, XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
+ trace_xfs_rmap_defer(tp->t_mountp,
+ XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
type,
- XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
+ XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
owner, whichfork,
bmap->br_startoff,
bmap->br_blockcount,
@@ -2313,23 +2301,22 @@ __xfs_rmap_add(
ri->ri_whichfork = whichfork;
ri->ri_bmap = *bmap;
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
return 0;
}
/* Map an extent into a file. */
int
xfs_rmap_map_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_rmap_update_is_needed(mp, whichfork))
+ if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return 0;
- return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
+ return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_MAP_SHARED : XFS_RMAP_MAP, ip->i_ino,
whichfork, PREV);
}
@@ -2337,25 +2324,29 @@ xfs_rmap_map_extent(
/* Unmap an extent out of a file. */
int
xfs_rmap_unmap_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_rmap_update_is_needed(mp, whichfork))
+ if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return 0;
- return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
+ return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_UNMAP_SHARED : XFS_RMAP_UNMAP, ip->i_ino,
whichfork, PREV);
}
-/* Convert a data fork extent from unwritten to real or vice versa. */
+/*
+ * Convert a data fork extent from unwritten to real or vice versa.
+ *
+ * Note that tp can be NULL here as no transaction is used for COW fork
+ * unwritten conversion.
+ */
int
xfs_rmap_convert_extent(
struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *PREV)
@@ -2363,7 +2354,7 @@ xfs_rmap_convert_extent(
if (!xfs_rmap_update_is_needed(mp, whichfork))
return 0;
- return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
+ return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_CONVERT_SHARED : XFS_RMAP_CONVERT, ip->i_ino,
whichfork, PREV);
}
@@ -2371,8 +2362,7 @@ xfs_rmap_convert_extent(
/* Schedule the creation of an rmap for non-file data. */
int
xfs_rmap_alloc_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len,
@@ -2380,23 +2370,21 @@ xfs_rmap_alloc_extent(
{
struct xfs_bmbt_irec bmap;
- if (!xfs_rmap_update_is_needed(mp, XFS_DATA_FORK))
+ if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
return 0;
- bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
+ bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- return __xfs_rmap_add(mp, dfops, XFS_RMAP_ALLOC, owner,
- XFS_DATA_FORK, &bmap);
+ return __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
}
/* Schedule the deletion of an rmap for non-file data. */
int
xfs_rmap_free_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len,
@@ -2404,16 +2392,15 @@ xfs_rmap_free_extent(
{
struct xfs_bmbt_irec bmap;
- if (!xfs_rmap_update_is_needed(mp, XFS_DATA_FORK))
+ if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
return 0;
- bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
+ bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- return __xfs_rmap_add(mp, dfops, XFS_RMAP_FREE, owner,
- XFS_DATA_FORK, &bmap);
+ return __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
}
/* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 9f19454768b2..157dc722ad35 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -185,21 +185,17 @@ struct xfs_rmap_intent {
};
/* functions for updating the rmapbt based on bmbt map/unmap operations */
-int xfs_rmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+int xfs_rmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ int whichfork, struct xfs_bmbt_irec *imap);
+int xfs_rmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ int whichfork, struct xfs_bmbt_irec *imap);
+int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *imap);
-int xfs_rmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, int whichfork,
- struct xfs_bmbt_irec *imap);
-int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, int whichfork,
- struct xfs_bmbt_irec *imap);
-int xfs_rmap_alloc_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
- uint64_t owner);
-int xfs_rmap_free_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
- uint64_t owner);
+int xfs_rmap_alloc_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
+ xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
+int xfs_rmap_free_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
+ xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
void xfs_rmap_finish_one_cleanup(struct xfs_trans *tp,
struct xfs_btree_cur *rcur, int error);
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 221a88ea60bb..f79cf040d745 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -554,6 +554,7 @@ xfs_rmapbt_max_size(
int
xfs_rmapbt_calc_reserves(
struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_extlen_t *ask,
xfs_extlen_t *used)
@@ -567,14 +568,14 @@ xfs_rmapbt_calc_reserves(
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
if (error)
return error;
agf = XFS_BUF_TO_AGF(agbp);
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_rmap_blocks);
- xfs_buf_relse(agbp);
+ xfs_trans_brelse(tp, agbp);
/* Reserve 1% of the AG or enough for 1 block per record. */
*ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index 50198b6c3bb2..820d668b063d 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -51,7 +51,7 @@ extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
-extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp,
+extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
#endif /* __XFS_RMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 350119eeaecb..081f46e30556 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -96,80 +96,146 @@ xfs_perag_put(
trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
}
-/*
- * Check the validity of the SB found.
- */
+/* Check all the superblock fields we care about when reading one in. */
STATIC int
-xfs_mount_validate_sb(
- xfs_mount_t *mp,
- xfs_sb_t *sbp,
- bool check_inprogress,
- bool check_version)
+xfs_validate_sb_read(
+ struct xfs_mount *mp,
+ struct xfs_sb *sbp)
{
- uint32_t agcount = 0;
- uint32_t rem;
-
- if (sbp->sb_magicnum != XFS_SB_MAGIC) {
- xfs_warn(mp, "bad magic number");
- return -EWRONGFS;
- }
-
-
- if (!xfs_sb_good_version(sbp)) {
- xfs_warn(mp, "bad version");
- return -EWRONGFS;
- }
+ if (XFS_SB_VERSION_NUM(sbp) != XFS_SB_VERSION_5)
+ return 0;
/*
- * Version 5 superblock feature mask validation. Reject combinations the
- * kernel cannot support up front before checking anything else. For
- * write validation, we don't need to check feature masks.
+ * Version 5 superblock feature mask validation. Reject combinations
+ * the kernel cannot support up front before checking anything else.
*/
- if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
- if (xfs_sb_has_compat_feature(sbp,
- XFS_SB_FEAT_COMPAT_UNKNOWN)) {
- xfs_warn(mp,
+ if (xfs_sb_has_compat_feature(sbp, XFS_SB_FEAT_COMPAT_UNKNOWN)) {
+ xfs_warn(mp,
"Superblock has unknown compatible features (0x%x) enabled.",
- (sbp->sb_features_compat &
- XFS_SB_FEAT_COMPAT_UNKNOWN));
- xfs_warn(mp,
+ (sbp->sb_features_compat & XFS_SB_FEAT_COMPAT_UNKNOWN));
+ xfs_warn(mp,
"Using a more recent kernel is recommended.");
- }
+ }
- if (xfs_sb_has_ro_compat_feature(sbp,
- XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
- xfs_alert(mp,
+ if (xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ xfs_alert(mp,
"Superblock has unknown read-only compatible features (0x%x) enabled.",
- (sbp->sb_features_ro_compat &
- XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
- if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
- xfs_warn(mp,
+ (sbp->sb_features_ro_compat &
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ xfs_warn(mp,
"Attempted to mount read-only compatible filesystem read-write.");
- xfs_warn(mp,
+ xfs_warn(mp,
"Filesystem can only be safely mounted read only.");
- return -EINVAL;
- }
- }
- if (xfs_sb_has_incompat_feature(sbp,
- XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
- xfs_warn(mp,
-"Superblock has unknown incompatible features (0x%x) enabled.",
- (sbp->sb_features_incompat &
- XFS_SB_FEAT_INCOMPAT_UNKNOWN));
- xfs_warn(mp,
-"Filesystem can not be safely mounted by this kernel.");
return -EINVAL;
}
- } else if (xfs_sb_version_hascrc(sbp)) {
- /*
- * We can't read verify the sb LSN because the read verifier is
- * called before the log is allocated and processed. We know the
- * log is set up before write verifier (!check_version) calls,
- * so just check it here.
- */
- if (!xfs_log_check_lsn(mp, sbp->sb_lsn))
- return -EFSCORRUPTED;
+ }
+ if (xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+"Superblock has unknown incompatible features (0x%x) enabled.",
+ (sbp->sb_features_incompat &
+ XFS_SB_FEAT_INCOMPAT_UNKNOWN));
+ xfs_warn(mp,
+"Filesystem cannot be safely mounted by this kernel.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Check all the superblock fields we care about when writing one out. */
+STATIC int
+xfs_validate_sb_write(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ struct xfs_sb *sbp)
+{
+ /*
+ * Carry out additional sb summary counter sanity checks when we write
+ * the superblock. We skip this in the read validator because there
+ * could be newer superblocks in the log and if the values are garbage
+ * even after replay we'll recalculate them at the end of log mount.
+ *
+ * mkfs has traditionally written zeroed counters to inprogress and
+ * secondary superblocks, so allow this usage to continue because
+ * we never read counters from such superblocks.
+ */
+ if (XFS_BUF_ADDR(bp) == XFS_SB_DADDR && !sbp->sb_inprogress &&
+ (sbp->sb_fdblocks > sbp->sb_dblocks ||
+ !xfs_verify_icount(mp, sbp->sb_icount) ||
+ sbp->sb_ifree > sbp->sb_icount)) {
+ xfs_warn(mp, "SB summary counter sanity check failed");
+ return -EFSCORRUPTED;
+ }
+
+ if (XFS_SB_VERSION_NUM(sbp) != XFS_SB_VERSION_5)
+ return 0;
+
+ /*
+ * Version 5 superblock feature mask validation. Reject combinations
+ * the kernel cannot support since we checked for unsupported bits in
+ * the read verifier, which means that memory is corrupt.
+ */
+ if (xfs_sb_has_compat_feature(sbp, XFS_SB_FEAT_COMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+"Corruption detected in superblock compatible features (0x%x)!",
+ (sbp->sb_features_compat & XFS_SB_FEAT_COMPAT_UNKNOWN));
+ return -EFSCORRUPTED;
+ }
+
+ if (xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ xfs_alert(mp,
+"Corruption detected in superblock read-only compatible features (0x%x)!",
+ (sbp->sb_features_ro_compat &
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+ return -EFSCORRUPTED;
+ }
+ if (xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+"Corruption detected in superblock incompatible features (0x%x)!",
+ (sbp->sb_features_incompat &
+ XFS_SB_FEAT_INCOMPAT_UNKNOWN));
+ return -EFSCORRUPTED;
+ }
+ if (xfs_sb_has_incompat_log_feature(sbp,
+ XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
+ xfs_warn(mp,
+"Corruption detected in superblock incompatible log features (0x%x)!",
+ (sbp->sb_features_log_incompat &
+ XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * We can't read verify the sb LSN because the read verifier is called
+ * before the log is allocated and processed. We know the log is set up
+ * before write verifier calls, so check it here.
+ */
+ if (!xfs_log_check_lsn(mp, sbp->sb_lsn))
+ return -EFSCORRUPTED;
+
+ return 0;
+}
+
+/* Check the validity of the SB. */
+STATIC int
+xfs_validate_sb_common(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ struct xfs_sb *sbp)
+{
+ uint32_t agcount = 0;
+ uint32_t rem;
+
+ if (sbp->sb_magicnum != XFS_SB_MAGIC) {
+ xfs_warn(mp, "bad magic number");
+ return -EWRONGFS;
+ }
+
+ if (!xfs_sb_good_version(sbp)) {
+ xfs_warn(mp, "bad version");
+ return -EWRONGFS;
}
if (xfs_sb_version_has_pquotino(sbp)) {
@@ -321,7 +387,12 @@ xfs_mount_validate_sb(
return -EFBIG;
}
- if (check_inprogress && sbp->sb_inprogress) {
+ /*
+ * Don't touch the filesystem if a user tool thinks it owns the primary
+ * superblock. mkfs doesn't clear the flag from secondary supers, so
+ * we don't check them at all.
+ */
+ if (XFS_BUF_ADDR(bp) == XFS_SB_DADDR && sbp->sb_inprogress) {
xfs_warn(mp, "Offline file system operation in progress!");
return -EFSCORRUPTED;
}
@@ -596,29 +667,6 @@ xfs_sb_to_disk(
}
}
-static int
-xfs_sb_verify(
- struct xfs_buf *bp,
- bool check_version)
-{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_sb sb;
-
- /*
- * Use call variant which doesn't convert quota flags from disk
- * format, because xfs_mount_validate_sb checks the on-disk flags.
- */
- __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
-
- /*
- * Only check the in progress field for the primary superblock as
- * mkfs.xfs doesn't clear it from secondary superblocks.
- */
- return xfs_mount_validate_sb(mp, &sb,
- bp->b_maps[0].bm_bn == XFS_SB_DADDR,
- check_version);
-}
-
/*
* If the superblock has the CRC feature bit set or the CRC field is non-null,
* check that the CRC is valid. We check the CRC field is non-null because a
@@ -633,11 +681,12 @@ xfs_sb_verify(
*/
static void
xfs_sb_read_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
- int error;
+ struct xfs_sb sb;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
+ int error;
/*
* open code the version check to avoid needing to convert the entire
@@ -657,7 +706,16 @@ xfs_sb_read_verify(
}
}
}
- error = xfs_sb_verify(bp, true);
+
+ /*
+ * Check all the superblock fields. Don't byteswap the xquota flags
+ * because _verify_common checks the on-disk values.
+ */
+ __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
+ error = xfs_validate_sb_common(mp, bp, &sb);
+ if (error)
+ goto out_error;
+ error = xfs_validate_sb_read(mp, &sb);
out_error:
if (error == -EFSCORRUPTED || error == -EFSBADCRC)
@@ -691,15 +749,22 @@ static void
xfs_sb_write_verify(
struct xfs_buf *bp)
{
+ struct xfs_sb sb;
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_log_item;
int error;
- error = xfs_sb_verify(bp, false);
- if (error) {
- xfs_verifier_error(bp, error, __this_address);
- return;
- }
+ /*
+ * Check all the superblock fields. Don't byteswap the xquota flags
+ * because _verify_common checks the on-disk values.
+ */
+ __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
+ error = xfs_validate_sb_common(mp, bp, &sb);
+ if (error)
+ goto out_error;
+ error = xfs_validate_sb_write(mp, bp, &sb);
+ if (error)
+ goto out_error;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
@@ -708,6 +773,10 @@ xfs_sb_write_verify(
XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF);
+ return;
+
+out_error:
+ xfs_verifier_error(bp, error, __this_address);
}
const struct xfs_buf_ops xfs_sb_buf_ops = {
@@ -804,6 +873,7 @@ xfs_initialize_perag_data(
uint64_t bfree = 0;
uint64_t bfreelst = 0;
uint64_t btree = 0;
+ uint64_t fdblocks;
int error;
for (index = 0; index < agcount; index++) {
@@ -827,17 +897,31 @@ xfs_initialize_perag_data(
btree += pag->pagf_btreeblks;
xfs_perag_put(pag);
}
+ fdblocks = bfree + bfreelst + btree;
+
+ /*
+ * If the new summary counts are obviously incorrect, fail the
+ * mount operation because that implies the AGFs are also corrupt.
+ * Clear BAD_SUMMARY so that we don't unmount with a dirty log, which
+ * will prevent xfs_repair from fixing anything.
+ */
+ if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
+ xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
+ error = -EFSCORRUPTED;
+ goto out;
+ }
/* Overwrite incore superblock counters with just-read data */
spin_lock(&mp->m_sb_lock);
sbp->sb_ifree = ifree;
sbp->sb_icount = ialloc;
- sbp->sb_fdblocks = bfree + bfreelst + btree;
+ sbp->sb_fdblocks = fdblocks;
spin_unlock(&mp->m_sb_lock);
xfs_reinit_percpu_counters(mp);
-
- return 0;
+out:
+ mp->m_flags &= ~XFS_MOUNT_BAD_SUMMARY;
+ return error;
}
/*
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 22089f1c880a..1c5debe748f0 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -64,6 +64,18 @@ void xfs_log_get_max_trans_res(struct xfs_mount *mp,
#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */
#define XFS_TRANS_NO_WRITECOUNT 0x40 /* do not elevate SB writecount */
#define XFS_TRANS_NOFS 0x80 /* pass KM_NOFS to kmem_alloc */
+/*
+ * LOWMODE is used by the allocator to activate the lowspace algorithm - when
+ * free space is running low the extent allocator may choose to allocate an
+ * extent from an AG without leaving sufficient space for a btree split when
+ * inserting the new extent. In this case the allocator will enable the
+ * lowspace algorithm which is supposed to allow further allocations (such as
+ * btree splits and newroots) to allocate from sequential AGs. In order to
+ * avoid locking AGs out of order the lowspace algorithm will start searching
+ * for free space from AG 0. If the correct transaction reservations have been
+ * made then this algorithm will eventually find all the space it needs.
+ */
+#define XFS_TRANS_LOWMODE 0x100 /* allocate in low space mode */
/*
* Field values for xfs_trans_mod_sb.
diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c
index 2e2a243cef2e..33a5ca346baf 100644
--- a/fs/xfs/libxfs/xfs_types.c
+++ b/fs/xfs/libxfs/xfs_types.c
@@ -171,3 +171,37 @@ xfs_verify_rtbno(
{
return rtbno < mp->m_sb.sb_rblocks;
}
+
+/* Calculate the range of valid icount values. */
+static void
+xfs_icount_range(
+ struct xfs_mount *mp,
+ unsigned long long *min,
+ unsigned long long *max)
+{
+ unsigned long long nr_inos = 0;
+ xfs_agnumber_t agno;
+
+ /* root, rtbitmap, rtsum all live in the first chunk */
+ *min = XFS_INODES_PER_CHUNK;
+
+ for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ xfs_agino_t first, last;
+
+ xfs_agino_range(mp, agno, &first, &last);
+ nr_inos += last - first + 1;
+ }
+ *max = nr_inos;
+}
+
+/* Sanity-checking of inode counts. */
+bool
+xfs_verify_icount(
+ struct xfs_mount *mp,
+ unsigned long long icount)
+{
+ unsigned long long min, max;
+
+ xfs_icount_range(mp, &min, &max);
+ return icount >= min && icount <= max;
+}
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 4055d62f690c..b9e6c89284c3 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -165,5 +165,6 @@ bool xfs_verify_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_dir_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
+bool xfs_verify_icount(struct xfs_mount *mp, unsigned long long icount);
#endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 9bb0745f1ad2..3068a9382feb 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -28,30 +28,30 @@
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_superblock_xref(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_superblock_xref(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- xfs_agnumber_t agno = sc->sm->sm_agno;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sc->sm->sm_agno;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
agbno = XFS_SB_BLOCK(mp);
- error = xfs_scrub_ag_init(sc, agno, &sc->sa);
- if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ error = xchk_ag_init(sc, agno, &sc->sa);
+ if (!xchk_xref_process_error(sc, agno, agbno, &error))
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
/* scrub teardown will take care of sc->sa for us */
}
@@ -65,17 +65,17 @@ xfs_scrub_superblock_xref(
* sb 0 is ok and we can use its information to check everything else.
*/
int
-xfs_scrub_superblock(
- struct xfs_scrub_context *sc)
+xchk_superblock(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_buf *bp;
- struct xfs_dsb *sb;
- xfs_agnumber_t agno;
- uint32_t v2_ok;
- __be32 features_mask;
- int error;
- __be16 vernum_mask;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *bp;
+ struct xfs_dsb *sb;
+ xfs_agnumber_t agno;
+ uint32_t v2_ok;
+ __be32 features_mask;
+ int error;
+ __be16 vernum_mask;
agno = sc->sm->sm_agno;
if (agno == 0)
@@ -98,7 +98,7 @@ xfs_scrub_superblock(
default:
break;
}
- if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
+ if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
return error;
sb = XFS_BUF_TO_SBP(bp);
@@ -110,46 +110,46 @@ xfs_scrub_superblock(
* checked.
*/
if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check sb_versionnum bits that are set at mkfs time. */
vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
@@ -163,7 +163,7 @@ xfs_scrub_superblock(
XFS_SB_VERSION_DIRV2BIT);
if ((sb->sb_versionnum & vernum_mask) !=
(cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check sb_versionnum bits that can be set after mkfs time. */
vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
@@ -171,40 +171,40 @@ xfs_scrub_superblock(
XFS_SB_VERSION_QUOTABIT);
if ((sb->sb_versionnum & vernum_mask) !=
(cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
/*
* Skip the summary counters since we track them in memory anyway.
@@ -212,10 +212,10 @@ xfs_scrub_superblock(
*/
if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
/*
* Skip the quota flags since repair will force quotacheck.
@@ -223,46 +223,46 @@ xfs_scrub_superblock(
*/
if (sb->sb_flags != mp->m_sb.sb_flags)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Do we see any invalid bits in sb_features2? */
if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
if (sb->sb_features2 != 0)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
} else {
v2_ok = XFS_SB_VERSION2_OKBITS;
if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
v2_ok |= XFS_SB_VERSION2_CRCBIT;
if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_features2 != sb->sb_bad_features2)
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
}
/* Check sb_features2 flags that are set at mkfs time. */
@@ -272,26 +272,26 @@ xfs_scrub_superblock(
XFS_SB_VERSION2_FTYPE);
if ((sb->sb_features2 & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check sb_features2 flags that can be set after mkfs time. */
features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
if ((sb->sb_features2 & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (!xfs_sb_version_hascrc(&mp->m_sb)) {
/* all v5 fields must be zero */
if (memchr_inv(&sb->sb_features_compat, 0,
sizeof(struct xfs_dsb) -
offsetof(struct xfs_dsb, sb_features_compat)))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
} else {
/* Check compat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
if ((sb->sb_features_compat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check ro compat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
@@ -301,7 +301,7 @@ xfs_scrub_superblock(
if ((sb->sb_features_ro_compat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check incompat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
@@ -311,22 +311,22 @@ xfs_scrub_superblock(
if ((sb->sb_features_incompat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_incompat) &
features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check log incompat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
if ((sb->sb_features_log_incompat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Don't care about sb_crc */
if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
/* Don't care about sb_lsn */
}
@@ -334,15 +334,15 @@ xfs_scrub_superblock(
if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
/* The metadata UUID must be the same for all supers */
if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
}
/* Everything else must be zero. */
if (memchr_inv(sb + 1, 0,
BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
- xfs_scrub_superblock_xref(sc, bp);
+ xchk_superblock_xref(sc, bp);
return error;
}
@@ -351,7 +351,7 @@ xfs_scrub_superblock(
/* Tally freespace record lengths. */
STATIC int
-xfs_scrub_agf_record_bno_lengths(
+xchk_agf_record_bno_lengths(
struct xfs_btree_cur *cur,
struct xfs_alloc_rec_incore *rec,
void *priv)
@@ -364,75 +364,75 @@ xfs_scrub_agf_record_bno_lengths(
/* Check agf_freeblks */
static inline void
-xfs_scrub_agf_xref_freeblks(
- struct xfs_scrub_context *sc)
+xchk_agf_xref_freeblks(
+ struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
- xfs_extlen_t blocks = 0;
- int error;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_extlen_t blocks = 0;
+ int error;
if (!sc->sa.bno_cur)
return;
error = xfs_alloc_query_all(sc->sa.bno_cur,
- xfs_scrub_agf_record_bno_lengths, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ xchk_agf_record_bno_lengths, &blocks);
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return;
if (blocks != be32_to_cpu(agf->agf_freeblks))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
}
/* Cross reference the AGF with the cntbt (freespace by length btree) */
static inline void
-xfs_scrub_agf_xref_cntbt(
- struct xfs_scrub_context *sc)
+xchk_agf_xref_cntbt(
+ struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
- xfs_agblock_t agbno;
- xfs_extlen_t blocks;
- int have;
- int error;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_agblock_t agbno;
+ xfs_extlen_t blocks;
+ int have;
+ int error;
if (!sc->sa.cnt_cur)
return;
/* Any freespace at all? */
error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return;
if (!have) {
if (agf->agf_freeblks != be32_to_cpu(0))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
return;
}
/* Check agf_longest */
error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return;
if (!have || blocks != be32_to_cpu(agf->agf_longest))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
}
/* Check the btree block counts in the AGF against the btrees. */
STATIC void
-xfs_scrub_agf_xref_btreeblks(
- struct xfs_scrub_context *sc)
+xchk_agf_xref_btreeblks(
+ struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
- struct xfs_mount *mp = sc->mp;
- xfs_agblock_t blocks;
- xfs_agblock_t btreeblks;
- int error;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t blocks;
+ xfs_agblock_t btreeblks;
+ int error;
/* Check agf_rmap_blocks; set up for agf_btreeblks check */
if (sc->sa.rmap_cur) {
error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
btreeblks = blocks - 1;
if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
} else {
btreeblks = 0;
}
@@ -447,136 +447,136 @@ xfs_scrub_agf_xref_btreeblks(
/* Check agf_btreeblks */
error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return;
btreeblks += blocks - 1;
error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return;
btreeblks += blocks - 1;
if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
}
/* Check agf_refcount_blocks against tree size */
static inline void
-xfs_scrub_agf_xref_refcblks(
- struct xfs_scrub_context *sc)
+xchk_agf_xref_refcblks(
+ struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
- xfs_agblock_t blocks;
- int error;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_agblock_t blocks;
+ int error;
if (!sc->sa.refc_cur)
return;
error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_agf_xref(
- struct xfs_scrub_context *sc)
+xchk_agf_xref(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
agbno = XFS_AGF_BLOCK(mp);
- error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ error = xchk_ag_btcur_init(sc, &sc->sa);
if (error)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_agf_xref_freeblks(sc);
- xfs_scrub_agf_xref_cntbt(sc);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_agf_xref_freeblks(sc);
+ xchk_agf_xref_cntbt(sc);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_agf_xref_btreeblks(sc);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
- xfs_scrub_agf_xref_refcblks(sc);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_agf_xref_btreeblks(sc);
+ xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_agf_xref_refcblks(sc);
/* scrub teardown will take care of sc->sa for us */
}
/* Scrub the AGF. */
int
-xfs_scrub_agf(
- struct xfs_scrub_context *sc)
+xchk_agf(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_agf *agf;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_agblock_t eoag;
- xfs_agblock_t agfl_first;
- xfs_agblock_t agfl_last;
- xfs_agblock_t agfl_count;
- xfs_agblock_t fl_count;
- int level;
- int error = 0;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_agf *agf;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_agblock_t eoag;
+ xfs_agblock_t agfl_first;
+ xfs_agblock_t agfl_last;
+ xfs_agblock_t agfl_count;
+ xfs_agblock_t fl_count;
+ int level;
+ int error = 0;
agno = sc->sa.agno = sc->sm->sm_agno;
- error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
+ error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
&sc->sa.agf_bp, &sc->sa.agfl_bp);
- if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
+ if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
goto out;
- xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp);
+ xchk_buffer_recheck(sc, sc->sa.agf_bp);
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
/* Check the AG length */
eoag = be32_to_cpu(agf->agf_length);
if (eoag != xfs_ag_block_count(mp, agno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
/* Check the AGF btree roots and levels */
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
}
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
agbno = be32_to_cpu(agf->agf_refcount_root);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_refcount_level);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
}
/* Check the AGFL counters */
@@ -588,57 +588,57 @@ xfs_scrub_agf(
else
fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
if (agfl_count != 0 && fl_count != agfl_count)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
- xfs_scrub_agf_xref(sc);
+ xchk_agf_xref(sc);
out:
return error;
}
/* AGFL */
-struct xfs_scrub_agfl_info {
- struct xfs_owner_info oinfo;
- unsigned int sz_entries;
- unsigned int nr_entries;
- xfs_agblock_t *entries;
- struct xfs_scrub_context *sc;
+struct xchk_agfl_info {
+ struct xfs_owner_info oinfo;
+ unsigned int sz_entries;
+ unsigned int nr_entries;
+ xfs_agblock_t *entries;
+ struct xfs_scrub *sc;
};
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_agfl_block_xref(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- struct xfs_owner_info *oinfo)
+xchk_agfl_block_xref(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ struct xfs_owner_info *oinfo)
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_xref_is_owned_by(sc, agbno, 1, oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
}
/* Scrub an AGFL block. */
STATIC int
-xfs_scrub_agfl_block(
- struct xfs_mount *mp,
- xfs_agblock_t agbno,
- void *priv)
+xchk_agfl_block(
+ struct xfs_mount *mp,
+ xfs_agblock_t agbno,
+ void *priv)
{
- struct xfs_scrub_agfl_info *sai = priv;
- struct xfs_scrub_context *sc = sai->sc;
- xfs_agnumber_t agno = sc->sa.agno;
+ struct xchk_agfl_info *sai = priv;
+ struct xfs_scrub *sc = sai->sc;
+ xfs_agnumber_t agno = sc->sa.agno;
if (xfs_verify_agbno(mp, agno, agbno) &&
sai->nr_entries < sai->sz_entries)
sai->entries[sai->nr_entries++] = agbno;
else
- xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
- xfs_scrub_agfl_block_xref(sc, agbno, priv);
+ xchk_agfl_block_xref(sc, agbno, priv);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return XFS_BTREE_QUERY_RANGE_ABORT;
@@ -647,7 +647,7 @@ xfs_scrub_agfl_block(
}
static int
-xfs_scrub_agblock_cmp(
+xchk_agblock_cmp(
const void *pa,
const void *pb)
{
@@ -659,28 +659,28 @@ xfs_scrub_agblock_cmp(
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_agfl_xref(
- struct xfs_scrub_context *sc)
+xchk_agfl_xref(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
agbno = XFS_AGFL_BLOCK(mp);
- error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ error = xchk_ag_btcur_init(sc, &sc->sa);
if (error)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
/*
* Scrub teardown will take care of sc->sa for us. Leave sc->sa
@@ -690,26 +690,26 @@ xfs_scrub_agfl_xref(
/* Scrub the AGFL. */
int
-xfs_scrub_agfl(
- struct xfs_scrub_context *sc)
+xchk_agfl(
+ struct xfs_scrub *sc)
{
- struct xfs_scrub_agfl_info sai;
- struct xfs_agf *agf;
- xfs_agnumber_t agno;
- unsigned int agflcount;
- unsigned int i;
- int error;
+ struct xchk_agfl_info sai;
+ struct xfs_agf *agf;
+ xfs_agnumber_t agno;
+ unsigned int agflcount;
+ unsigned int i;
+ int error;
agno = sc->sa.agno = sc->sm->sm_agno;
- error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
+ error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
&sc->sa.agf_bp, &sc->sa.agfl_bp);
- if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
+ if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
goto out;
if (!sc->sa.agf_bp)
return -EFSCORRUPTED;
- xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp);
+ xchk_buffer_recheck(sc, sc->sa.agfl_bp);
- xfs_scrub_agfl_xref(sc);
+ xchk_agfl_xref(sc);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
@@ -718,7 +718,7 @@ xfs_scrub_agfl(
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
agflcount = be32_to_cpu(agf->agf_flcount);
if (agflcount > xfs_agfl_size(sc->mp)) {
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
goto out;
}
memset(&sai, 0, sizeof(sai));
@@ -734,7 +734,7 @@ xfs_scrub_agfl(
/* Check the blocks in the AGFL. */
xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
- sc->sa.agfl_bp, xfs_scrub_agfl_block, &sai);
+ sc->sa.agfl_bp, xchk_agfl_block, &sai);
if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
error = 0;
goto out_free;
@@ -743,16 +743,16 @@ xfs_scrub_agfl(
goto out_free;
if (agflcount != sai.nr_entries) {
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
goto out_free;
}
/* Sort entries, check for duplicates. */
sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
- xfs_scrub_agblock_cmp, NULL);
+ xchk_agblock_cmp, NULL);
for (i = 1; i < sai.nr_entries; i++) {
if (sai.entries[i] == sai.entries[i - 1]) {
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
break;
}
}
@@ -767,103 +767,103 @@ out:
/* Check agi_count/agi_freecount */
static inline void
-xfs_scrub_agi_xref_icounts(
- struct xfs_scrub_context *sc)
+xchk_agi_xref_icounts(
+ struct xfs_scrub *sc)
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
- xfs_agino_t icount;
- xfs_agino_t freecount;
- int error;
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
+ xfs_agino_t icount;
+ xfs_agino_t freecount;
+ int error;
if (!sc->sa.ino_cur)
return;
error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
return;
if (be32_to_cpu(agi->agi_count) != icount ||
be32_to_cpu(agi->agi_freecount) != freecount)
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_agi_xref(
- struct xfs_scrub_context *sc)
+xchk_agi_xref(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
agbno = XFS_AGI_BLOCK(mp);
- error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ error = xchk_ag_btcur_init(sc, &sc->sa);
if (error)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
- xfs_scrub_agi_xref_icounts(sc);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_agi_xref_icounts(sc);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
/* scrub teardown will take care of sc->sa for us */
}
/* Scrub the AGI. */
int
-xfs_scrub_agi(
- struct xfs_scrub_context *sc)
+xchk_agi(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_agi *agi;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_agblock_t eoag;
- xfs_agino_t agino;
- xfs_agino_t first_agino;
- xfs_agino_t last_agino;
- xfs_agino_t icount;
- int i;
- int level;
- int error = 0;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_agi *agi;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_agblock_t eoag;
+ xfs_agino_t agino;
+ xfs_agino_t first_agino;
+ xfs_agino_t last_agino;
+ xfs_agino_t icount;
+ int i;
+ int level;
+ int error = 0;
agno = sc->sa.agno = sc->sm->sm_agno;
- error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
+ error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
&sc->sa.agf_bp, &sc->sa.agfl_bp);
- if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
+ if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
goto out;
- xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp);
+ xchk_buffer_recheck(sc, sc->sa.agi_bp);
agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
/* Check the AG length */
eoag = be32_to_cpu(agi->agi_length);
if (eoag != xfs_ag_block_count(mp, agno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check btree roots and levels */
agbno = be32_to_cpu(agi->agi_root);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
level = be32_to_cpu(agi->agi_level);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
agbno = be32_to_cpu(agi->agi_free_root);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
level = be32_to_cpu(agi->agi_free_level);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
}
/* Check inode counters */
@@ -871,16 +871,16 @@ xfs_scrub_agi(
icount = be32_to_cpu(agi->agi_count);
if (icount > last_agino - first_agino + 1 ||
icount < be32_to_cpu(agi->agi_freecount))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check inode pointers */
agino = be32_to_cpu(agi->agi_newino);
if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
agino = be32_to_cpu(agi->agi_dirino);
if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check unlinked inode buckets */
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
@@ -888,13 +888,13 @@ xfs_scrub_agi(
if (agino == NULLAGINO)
continue;
if (!xfs_verify_agino(mp, agno, agino))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
}
if (agi->agi_pad32 != cpu_to_be32(0))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
- xfs_scrub_agi_xref(sc);
+ xchk_agi_xref(sc);
out:
return error;
}
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 117eedac53df..f7568a4b5fe5 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -17,24 +17,31 @@
#include "xfs_sb.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
+#include "xfs_alloc_btree.h"
#include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_refcount.h"
+#include "xfs_refcount_btree.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
+#include "scrub/repair.h"
+#include "scrub/bitmap.h"
/* Superblock */
/* Repair the superblock. */
int
-xfs_repair_superblock(
- struct xfs_scrub_context *sc)
+xrep_superblock(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_buf *bp;
- xfs_agnumber_t agno;
- int error;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *bp;
+ xfs_agnumber_t agno;
+ int error;
/* Don't try to repair AG 0's sb; let xfs_repair deal with it. */
agno = sc->sm->sm_agno;
@@ -54,3 +61,873 @@ xfs_repair_superblock(
xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1);
return error;
}
+
+/* AGF */
+
+struct xrep_agf_allocbt {
+ struct xfs_scrub *sc;
+ xfs_agblock_t freeblks;
+ xfs_agblock_t longest;
+};
+
+/* Record free space shape information. */
+STATIC int
+xrep_agf_walk_allocbt(
+ struct xfs_btree_cur *cur,
+ struct xfs_alloc_rec_incore *rec,
+ void *priv)
+{
+ struct xrep_agf_allocbt *raa = priv;
+ int error = 0;
+
+ if (xchk_should_terminate(raa->sc, &error))
+ return error;
+
+ raa->freeblks += rec->ar_blockcount;
+ if (rec->ar_blockcount > raa->longest)
+ raa->longest = rec->ar_blockcount;
+ return error;
+}
+
+/* Does this AGFL block look sane? */
+STATIC int
+xrep_agf_check_agfl_block(
+ struct xfs_mount *mp,
+ xfs_agblock_t agbno,
+ void *priv)
+{
+ struct xfs_scrub *sc = priv;
+
+ if (!xfs_verify_agbno(mp, sc->sa.agno, agbno))
+ return -EFSCORRUPTED;
+ return 0;
+}
+
+/*
+ * Offset within the xrep_find_ag_btree array for each btree type. Avoid the
+ * XFS_BTNUM_ names here to avoid creating a sparse array.
+ */
+enum {
+ XREP_AGF_BNOBT = 0,
+ XREP_AGF_CNTBT,
+ XREP_AGF_RMAPBT,
+ XREP_AGF_REFCOUNTBT,
+ XREP_AGF_END,
+ XREP_AGF_MAX
+};
+
+/* Check a btree root candidate. */
+static inline bool
+xrep_check_btree_root(
+ struct xfs_scrub *sc,
+ struct xrep_find_ag_btree *fab)
+{
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sc->sm->sm_agno;
+
+ return xfs_verify_agbno(mp, agno, fab->root) &&
+ fab->height <= XFS_BTREE_MAXLEVELS;
+}
+
+/*
+ * Given the btree roots described by *fab, find the roots, check them for
+ * sanity, and pass the root data back out via *fab.
+ *
+ * This is /also/ a chicken and egg problem because we have to use the rmapbt
+ * (rooted in the AGF) to find the btrees rooted in the AGF. We also have no
+ * idea if the btrees make any sense. If we hit obvious corruptions in those
+ * btrees we'll bail out.
+ */
+STATIC int
+xrep_agf_find_btrees(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp,
+ struct xrep_find_ag_btree *fab,
+ struct xfs_buf *agfl_bp)
+{
+ struct xfs_agf *old_agf = XFS_BUF_TO_AGF(agf_bp);
+ int error;
+
+ /* Go find the root data. */
+ error = xrep_find_ag_btree_roots(sc, agf_bp, fab, agfl_bp);
+ if (error)
+ return error;
+
+ /* We must find the bnobt, cntbt, and rmapbt roots. */
+ if (!xrep_check_btree_root(sc, &fab[XREP_AGF_BNOBT]) ||
+ !xrep_check_btree_root(sc, &fab[XREP_AGF_CNTBT]) ||
+ !xrep_check_btree_root(sc, &fab[XREP_AGF_RMAPBT]))
+ return -EFSCORRUPTED;
+
+ /*
+ * We relied on the rmapbt to reconstruct the AGF. If we get a
+ * different root then something's seriously wrong.
+ */
+ if (fab[XREP_AGF_RMAPBT].root !=
+ be32_to_cpu(old_agf->agf_roots[XFS_BTNUM_RMAPi]))
+ return -EFSCORRUPTED;
+
+ /* We must find the refcountbt root if that feature is enabled. */
+ if (xfs_sb_version_hasreflink(&sc->mp->m_sb) &&
+ !xrep_check_btree_root(sc, &fab[XREP_AGF_REFCOUNTBT]))
+ return -EFSCORRUPTED;
+
+ return 0;
+}
+
+/*
+ * Reinitialize the AGF header, making an in-core copy of the old contents so
+ * that we know which in-core state needs to be reinitialized.
+ */
+STATIC void
+xrep_agf_init_header(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp,
+ struct xfs_agf *old_agf)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+
+ memcpy(old_agf, agf, sizeof(*old_agf));
+ memset(agf, 0, BBTOB(agf_bp->b_length));
+ agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
+ agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
+ agf->agf_seqno = cpu_to_be32(sc->sa.agno);
+ agf->agf_length = cpu_to_be32(xfs_ag_block_count(mp, sc->sa.agno));
+ agf->agf_flfirst = old_agf->agf_flfirst;
+ agf->agf_fllast = old_agf->agf_fllast;
+ agf->agf_flcount = old_agf->agf_flcount;
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
+
+ /* Mark the incore AGF data stale until we're done fixing things. */
+ ASSERT(sc->sa.pag->pagf_init);
+ sc->sa.pag->pagf_init = 0;
+}
+
+/* Set btree root information in an AGF. */
+STATIC void
+xrep_agf_set_roots(
+ struct xfs_scrub *sc,
+ struct xfs_agf *agf,
+ struct xrep_find_ag_btree *fab)
+{
+ agf->agf_roots[XFS_BTNUM_BNOi] =
+ cpu_to_be32(fab[XREP_AGF_BNOBT].root);
+ agf->agf_levels[XFS_BTNUM_BNOi] =
+ cpu_to_be32(fab[XREP_AGF_BNOBT].height);
+
+ agf->agf_roots[XFS_BTNUM_CNTi] =
+ cpu_to_be32(fab[XREP_AGF_CNTBT].root);
+ agf->agf_levels[XFS_BTNUM_CNTi] =
+ cpu_to_be32(fab[XREP_AGF_CNTBT].height);
+
+ agf->agf_roots[XFS_BTNUM_RMAPi] =
+ cpu_to_be32(fab[XREP_AGF_RMAPBT].root);
+ agf->agf_levels[XFS_BTNUM_RMAPi] =
+ cpu_to_be32(fab[XREP_AGF_RMAPBT].height);
+
+ if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
+ agf->agf_refcount_root =
+ cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].root);
+ agf->agf_refcount_level =
+ cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].height);
+ }
+}
+
+/* Update all AGF fields which derive from btree contents. */
+STATIC int
+xrep_agf_calc_from_btrees(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp)
+{
+ struct xrep_agf_allocbt raa = { .sc = sc };
+ struct xfs_btree_cur *cur = NULL;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t btreeblks;
+ xfs_agblock_t blocks;
+ int error;
+
+ /* Update the AGF counters from the bnobt. */
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
+ XFS_BTNUM_BNO);
+ error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
+ if (error)
+ goto err;
+ error = xfs_btree_count_blocks(cur, &blocks);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+ btreeblks = blocks - 1;
+ agf->agf_freeblks = cpu_to_be32(raa.freeblks);
+ agf->agf_longest = cpu_to_be32(raa.longest);
+
+ /* Update the AGF counters from the cntbt. */
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
+ XFS_BTNUM_CNT);
+ error = xfs_btree_count_blocks(cur, &blocks);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+ btreeblks += blocks - 1;
+
+ /* Update the AGF counters from the rmapbt. */
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ error = xfs_btree_count_blocks(cur, &blocks);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+ agf->agf_rmap_blocks = cpu_to_be32(blocks);
+ btreeblks += blocks - 1;
+
+ agf->agf_btreeblks = cpu_to_be32(btreeblks);
+
+ /* Update the AGF counters from the refcountbt. */
+ if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.agno);
+ error = xfs_btree_count_blocks(cur, &blocks);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+ agf->agf_refcount_blocks = cpu_to_be32(blocks);
+ }
+
+ return 0;
+err:
+ xfs_btree_del_cursor(cur, error);
+ return error;
+}
+
+/* Commit the new AGF and reinitialize the incore state. */
+STATIC int
+xrep_agf_commit_new(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp)
+{
+ struct xfs_perag *pag;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+
+ /* Trigger fdblocks recalculation */
+ xfs_force_summary_recalc(sc->mp);
+
+ /* Write this to disk. */
+ xfs_trans_buf_set_type(sc->tp, agf_bp, XFS_BLFT_AGF_BUF);
+ xfs_trans_log_buf(sc->tp, agf_bp, 0, BBTOB(agf_bp->b_length) - 1);
+
+ /* Now reinitialize the in-core counters we changed. */
+ pag = sc->sa.pag;
+ pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
+ pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
+ pag->pagf_longest = be32_to_cpu(agf->agf_longest);
+ pag->pagf_levels[XFS_BTNUM_BNOi] =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
+ pag->pagf_levels[XFS_BTNUM_CNTi] =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
+ pag->pagf_levels[XFS_BTNUM_RMAPi] =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
+ pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
+ pag->pagf_init = 1;
+
+ return 0;
+}
+
+/* Repair the AGF. v5 filesystems only. */
+int
+xrep_agf(
+ struct xfs_scrub *sc)
+{
+ struct xrep_find_ag_btree fab[XREP_AGF_MAX] = {
+ [XREP_AGF_BNOBT] = {
+ .rmap_owner = XFS_RMAP_OWN_AG,
+ .buf_ops = &xfs_allocbt_buf_ops,
+ .magic = XFS_ABTB_CRC_MAGIC,
+ },
+ [XREP_AGF_CNTBT] = {
+ .rmap_owner = XFS_RMAP_OWN_AG,
+ .buf_ops = &xfs_allocbt_buf_ops,
+ .magic = XFS_ABTC_CRC_MAGIC,
+ },
+ [XREP_AGF_RMAPBT] = {
+ .rmap_owner = XFS_RMAP_OWN_AG,
+ .buf_ops = &xfs_rmapbt_buf_ops,
+ .magic = XFS_RMAP_CRC_MAGIC,
+ },
+ [XREP_AGF_REFCOUNTBT] = {
+ .rmap_owner = XFS_RMAP_OWN_REFC,
+ .buf_ops = &xfs_refcountbt_buf_ops,
+ .magic = XFS_REFC_CRC_MAGIC,
+ },
+ [XREP_AGF_END] = {
+ .buf_ops = NULL,
+ },
+ };
+ struct xfs_agf old_agf;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *agf_bp;
+ struct xfs_buf *agfl_bp;
+ struct xfs_agf *agf;
+ int error;
+
+ /* We require the rmapbt to rebuild anything. */
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ xchk_perag_get(sc->mp, &sc->sa);
+ /*
+ * Make sure we have the AGF buffer, as scrub might have decided it
+ * was corrupt after xfs_alloc_read_agf failed with -EFSCORRUPTED.
+ */
+ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
+ XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGF_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL);
+ if (error)
+ return error;
+ agf_bp->b_ops = &xfs_agf_buf_ops;
+ agf = XFS_BUF_TO_AGF(agf_bp);
+
+ /*
+ * Load the AGFL so that we can screen out OWN_AG blocks that are on
+ * the AGFL now; these blocks might have once been part of the
+ * bno/cnt/rmap btrees but are not now. This is a chicken and egg
+ * problem: the AGF is corrupt, so we have to trust the AGFL contents
+ * because we can't do any serious cross-referencing with any of the
+ * btrees rooted in the AGF. If the AGFL contents are obviously bad
+ * then we'll bail out.
+ */
+ error = xfs_alloc_read_agfl(mp, sc->tp, sc->sa.agno, &agfl_bp);
+ if (error)
+ return error;
+
+ /*
+ * Spot-check the AGFL blocks; if they're obviously corrupt then
+ * there's nothing we can do but bail out.
+ */
+ error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(agf_bp), agfl_bp,
+ xrep_agf_check_agfl_block, sc);
+ if (error)
+ return error;
+
+ /*
+ * Find the AGF btree roots. This is also a chicken-and-egg situation;
+ * see the function for more details.
+ */
+ error = xrep_agf_find_btrees(sc, agf_bp, fab, agfl_bp);
+ if (error)
+ return error;
+
+ /* Start rewriting the header and implant the btrees we found. */
+ xrep_agf_init_header(sc, agf_bp, &old_agf);
+ xrep_agf_set_roots(sc, agf, fab);
+ error = xrep_agf_calc_from_btrees(sc, agf_bp);
+ if (error)
+ goto out_revert;
+
+ /* Commit the changes and reinitialize incore state. */
+ return xrep_agf_commit_new(sc, agf_bp);
+
+out_revert:
+ /* Mark the incore AGF state stale and revert the AGF. */
+ sc->sa.pag->pagf_init = 0;
+ memcpy(agf, &old_agf, sizeof(old_agf));
+ return error;
+}
+
+/* AGFL */
+
+struct xrep_agfl {
+ /* Bitmap of other OWN_AG metadata blocks. */
+ struct xfs_bitmap agmetablocks;
+
+ /* Bitmap of free space. */
+ struct xfs_bitmap *freesp;
+
+ struct xfs_scrub *sc;
+};
+
+/* Record all OWN_AG (free space btree) information from the rmap data. */
+STATIC int
+xrep_agfl_walk_rmap(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xrep_agfl *ra = priv;
+ xfs_fsblock_t fsb;
+ int error = 0;
+
+ if (xchk_should_terminate(ra->sc, &error))
+ return error;
+
+ /* Record all the OWN_AG blocks. */
+ if (rec->rm_owner == XFS_RMAP_OWN_AG) {
+ fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
+ rec->rm_startblock);
+ error = xfs_bitmap_set(ra->freesp, fsb, rec->rm_blockcount);
+ if (error)
+ return error;
+ }
+
+ return xfs_bitmap_set_btcur_path(&ra->agmetablocks, cur);
+}
+
+/*
+ * Map out all the non-AGFL OWN_AG space in this AG so that we can deduce
+ * which blocks belong to the AGFL.
+ *
+ * Compute the set of old AGFL blocks by subtracting from the list of OWN_AG
+ * blocks the list of blocks owned by all other OWN_AG metadata (bnobt, cntbt,
+ * rmapbt). These are the old AGFL blocks, so return that list and the number
+ * of blocks we're actually going to put back on the AGFL.
+ */
+STATIC int
+xrep_agfl_collect_blocks(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp,
+ struct xfs_bitmap *agfl_extents,
+ xfs_agblock_t *flcount)
+{
+ struct xrep_agfl ra;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_btree_cur *cur;
+ struct xfs_bitmap_range *br;
+ struct xfs_bitmap_range *n;
+ int error;
+
+ ra.sc = sc;
+ ra.freesp = agfl_extents;
+ xfs_bitmap_init(&ra.agmetablocks);
+
+ /* Find all space used by the free space btrees & rmapbt. */
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+
+ /* Find all blocks currently being used by the bnobt. */
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
+ XFS_BTNUM_BNO);
+ error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+
+ /* Find all blocks currently being used by the cntbt. */
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
+ XFS_BTNUM_CNT);
+ error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
+ if (error)
+ goto err;
+
+ xfs_btree_del_cursor(cur, error);
+
+ /*
+ * Drop the freesp meta blocks that are in use by btrees.
+ * The remaining blocks /should/ be AGFL blocks.
+ */
+ error = xfs_bitmap_disunion(agfl_extents, &ra.agmetablocks);
+ xfs_bitmap_destroy(&ra.agmetablocks);
+ if (error)
+ return error;
+
+ /*
+ * Calculate the new AGFL size. If we found more blocks than fit in
+ * the AGFL we'll free them later.
+ */
+ *flcount = 0;
+ for_each_xfs_bitmap_extent(br, n, agfl_extents) {
+ *flcount += br->len;
+ if (*flcount > xfs_agfl_size(mp))
+ break;
+ }
+ if (*flcount > xfs_agfl_size(mp))
+ *flcount = xfs_agfl_size(mp);
+ return 0;
+
+err:
+ xfs_bitmap_destroy(&ra.agmetablocks);
+ xfs_btree_del_cursor(cur, error);
+ return error;
+}
+
+/* Update the AGF and reset the in-core state. */
+STATIC void
+xrep_agfl_update_agf(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp,
+ xfs_agblock_t flcount)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+
+ ASSERT(flcount <= xfs_agfl_size(sc->mp));
+
+ /* Trigger fdblocks recalculation */
+ xfs_force_summary_recalc(sc->mp);
+
+ /* Update the AGF counters. */
+ if (sc->sa.pag->pagf_init)
+ sc->sa.pag->pagf_flcount = flcount;
+ agf->agf_flfirst = cpu_to_be32(0);
+ agf->agf_flcount = cpu_to_be32(flcount);
+ agf->agf_fllast = cpu_to_be32(flcount - 1);
+
+ xfs_alloc_log_agf(sc->tp, agf_bp,
+ XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
+}
+
+/* Write out a totally new AGFL. */
+STATIC void
+xrep_agfl_init_header(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agfl_bp,
+ struct xfs_bitmap *agfl_extents,
+ xfs_agblock_t flcount)
+{
+ struct xfs_mount *mp = sc->mp;
+ __be32 *agfl_bno;
+ struct xfs_bitmap_range *br;
+ struct xfs_bitmap_range *n;
+ struct xfs_agfl *agfl;
+ xfs_agblock_t agbno;
+ unsigned int fl_off;
+
+ ASSERT(flcount <= xfs_agfl_size(mp));
+
+ /*
+ * Start rewriting the header by setting the bno[] array to
+ * NULLAGBLOCK, then setting AGFL header fields.
+ */
+ agfl = XFS_BUF_TO_AGFL(agfl_bp);
+ memset(agfl, 0xFF, BBTOB(agfl_bp->b_length));
+ agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
+ agfl->agfl_seqno = cpu_to_be32(sc->sa.agno);
+ uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
+
+ /*
+ * Fill the AGFL with the remaining blocks. If agfl_extents has more
+ * blocks than fit in the AGFL, they will be freed in a subsequent
+ * step.
+ */
+ fl_off = 0;
+ agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp);
+ for_each_xfs_bitmap_extent(br, n, agfl_extents) {
+ agbno = XFS_FSB_TO_AGBNO(mp, br->start);
+
+ trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
+
+ while (br->len > 0 && fl_off < flcount) {
+ agfl_bno[fl_off] = cpu_to_be32(agbno);
+ fl_off++;
+ agbno++;
+
+ /*
+ * We've now used br->start by putting it in the AGFL,
+ * so bump br so that we don't reap the block later.
+ */
+ br->start++;
+ br->len--;
+ }
+
+ if (br->len)
+ break;
+ list_del(&br->list);
+ kmem_free(br);
+ }
+
+ /* Write new AGFL to disk. */
+ xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF);
+ xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1);
+}
+
+/* Repair the AGFL. */
+int
+xrep_agfl(
+ struct xfs_scrub *sc)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_bitmap agfl_extents;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *agf_bp;
+ struct xfs_buf *agfl_bp;
+ xfs_agblock_t flcount;
+ int error;
+
+ /* We require the rmapbt to rebuild anything. */
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ xchk_perag_get(sc->mp, &sc->sa);
+ xfs_bitmap_init(&agfl_extents);
+
+ /*
+ * Read the AGF so that we can query the rmapbt. We hope that there's
+ * nothing wrong with the AGF, but all the AG header repair functions
+ * have this chicken-and-egg problem.
+ */
+ error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
+ if (error)
+ return error;
+ if (!agf_bp)
+ return -ENOMEM;
+
+ /*
+ * Make sure we have the AGFL buffer, as scrub might have decided it
+ * was corrupt after xfs_alloc_read_agfl failed with -EFSCORRUPTED.
+ */
+ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
+ XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGFL_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL);
+ if (error)
+ return error;
+ agfl_bp->b_ops = &xfs_agfl_buf_ops;
+
+ /* Gather all the extents we're going to put on the new AGFL. */
+ error = xrep_agfl_collect_blocks(sc, agf_bp, &agfl_extents, &flcount);
+ if (error)
+ goto err;
+
+ /*
+ * Update AGF and AGFL. We reset the global free block counter when
+ * we adjust the AGF flcount (which can fail) so avoid updating any
+ * buffers until we know that part works.
+ */
+ xrep_agfl_update_agf(sc, agf_bp, flcount);
+ xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount);
+
+ /*
+ * Ok, the AGFL should be ready to go now. Roll the transaction to
+ * make the new AGFL permanent before we start using it to return
+ * freespace overflow to the freespace btrees.
+ */
+ sc->sa.agf_bp = agf_bp;
+ sc->sa.agfl_bp = agfl_bp;
+ error = xrep_roll_ag_trans(sc);
+ if (error)
+ goto err;
+
+ /* Dump any AGFL overflow. */
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
+ return xrep_reap_extents(sc, &agfl_extents, &oinfo, XFS_AG_RESV_AGFL);
+err:
+ xfs_bitmap_destroy(&agfl_extents);
+ return error;
+}
+
+/* AGI */
+
+/*
+ * Offset within the xrep_find_ag_btree array for each btree type. Avoid the
+ * XFS_BTNUM_ names here to avoid creating a sparse array.
+ */
+enum {
+ XREP_AGI_INOBT = 0,
+ XREP_AGI_FINOBT,
+ XREP_AGI_END,
+ XREP_AGI_MAX
+};
+
+/*
+ * Given the inode btree roots described by *fab, find the roots, check them
+ * for sanity, and pass the root data back out via *fab.
+ */
+STATIC int
+xrep_agi_find_btrees(
+ struct xfs_scrub *sc,
+ struct xrep_find_ag_btree *fab)
+{
+ struct xfs_buf *agf_bp;
+ struct xfs_mount *mp = sc->mp;
+ int error;
+
+ /* Read the AGF. */
+ error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
+ if (error)
+ return error;
+ if (!agf_bp)
+ return -ENOMEM;
+
+ /* Find the btree roots. */
+ error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL);
+ if (error)
+ return error;
+
+ /* We must find the inobt root. */
+ if (!xrep_check_btree_root(sc, &fab[XREP_AGI_INOBT]))
+ return -EFSCORRUPTED;
+
+ /* We must find the finobt root if that feature is enabled. */
+ if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
+ !xrep_check_btree_root(sc, &fab[XREP_AGI_FINOBT]))
+ return -EFSCORRUPTED;
+
+ return 0;
+}
+
+/*
+ * Reinitialize the AGI header, making an in-core copy of the old contents so
+ * that we know which in-core state needs to be reinitialized.
+ */
+STATIC void
+xrep_agi_init_header(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agi_bp,
+ struct xfs_agi *old_agi)
+{
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
+ struct xfs_mount *mp = sc->mp;
+
+ memcpy(old_agi, agi, sizeof(*old_agi));
+ memset(agi, 0, BBTOB(agi_bp->b_length));
+ agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
+ agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
+ agi->agi_seqno = cpu_to_be32(sc->sa.agno);
+ agi->agi_length = cpu_to_be32(xfs_ag_block_count(mp, sc->sa.agno));
+ agi->agi_newino = cpu_to_be32(NULLAGINO);
+ agi->agi_dirino = cpu_to_be32(NULLAGINO);
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
+
+ /* We don't know how to fix the unlinked list yet. */
+ memcpy(&agi->agi_unlinked, &old_agi->agi_unlinked,
+ sizeof(agi->agi_unlinked));
+
+ /* Mark the incore AGF data stale until we're done fixing things. */
+ ASSERT(sc->sa.pag->pagi_init);
+ sc->sa.pag->pagi_init = 0;
+}
+
+/* Set btree root information in an AGI. */
+STATIC void
+xrep_agi_set_roots(
+ struct xfs_scrub *sc,
+ struct xfs_agi *agi,
+ struct xrep_find_ag_btree *fab)
+{
+ agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root);
+ agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height);
+
+ if (xfs_sb_version_hasfinobt(&sc->mp->m_sb)) {
+ agi->agi_free_root = cpu_to_be32(fab[XREP_AGI_FINOBT].root);
+ agi->agi_free_level = cpu_to_be32(fab[XREP_AGI_FINOBT].height);
+ }
+}
+
+/* Update the AGI counters. */
+STATIC int
+xrep_agi_calc_from_btrees(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agi_bp)
+{
+ struct xfs_btree_cur *cur;
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
+ struct xfs_mount *mp = sc->mp;
+ xfs_agino_t count;
+ xfs_agino_t freecount;
+ int error;
+
+ cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, sc->sa.agno,
+ XFS_BTNUM_INO);
+ error = xfs_ialloc_count_inodes(cur, &count, &freecount);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+
+ agi->agi_count = cpu_to_be32(count);
+ agi->agi_freecount = cpu_to_be32(freecount);
+ return 0;
+err:
+ xfs_btree_del_cursor(cur, error);
+ return error;
+}
+
+/* Trigger reinitialization of the in-core data. */
+STATIC int
+xrep_agi_commit_new(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agi_bp)
+{
+ struct xfs_perag *pag;
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
+
+ /* Trigger inode count recalculation */
+ xfs_force_summary_recalc(sc->mp);
+
+ /* Write this to disk. */
+ xfs_trans_buf_set_type(sc->tp, agi_bp, XFS_BLFT_AGI_BUF);
+ xfs_trans_log_buf(sc->tp, agi_bp, 0, BBTOB(agi_bp->b_length) - 1);
+
+ /* Now reinitialize the in-core counters if necessary. */
+ pag = sc->sa.pag;
+ pag->pagi_count = be32_to_cpu(agi->agi_count);
+ pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
+ pag->pagi_init = 1;
+
+ return 0;
+}
+
+/* Repair the AGI. */
+int
+xrep_agi(
+ struct xfs_scrub *sc)
+{
+ struct xrep_find_ag_btree fab[XREP_AGI_MAX] = {
+ [XREP_AGI_INOBT] = {
+ .rmap_owner = XFS_RMAP_OWN_INOBT,
+ .buf_ops = &xfs_inobt_buf_ops,
+ .magic = XFS_IBT_CRC_MAGIC,
+ },
+ [XREP_AGI_FINOBT] = {
+ .rmap_owner = XFS_RMAP_OWN_INOBT,
+ .buf_ops = &xfs_inobt_buf_ops,
+ .magic = XFS_FIBT_CRC_MAGIC,
+ },
+ [XREP_AGI_END] = {
+ .buf_ops = NULL
+ },
+ };
+ struct xfs_agi old_agi;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *agi_bp;
+ struct xfs_agi *agi;
+ int error;
+
+ /* We require the rmapbt to rebuild anything. */
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ xchk_perag_get(sc->mp, &sc->sa);
+ /*
+ * Make sure we have the AGI buffer, as scrub might have decided it
+ * was corrupt after xfs_ialloc_read_agi failed with -EFSCORRUPTED.
+ */
+ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
+ XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGI_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), 0, &agi_bp, NULL);
+ if (error)
+ return error;
+ agi_bp->b_ops = &xfs_agi_buf_ops;
+ agi = XFS_BUF_TO_AGI(agi_bp);
+
+ /* Find the AGI btree roots. */
+ error = xrep_agi_find_btrees(sc, fab);
+ if (error)
+ return error;
+
+ /* Start rewriting the header and implant the btrees we found. */
+ xrep_agi_init_header(sc, agi_bp, &old_agi);
+ xrep_agi_set_roots(sc, agi, fab);
+ error = xrep_agi_calc_from_btrees(sc, agi_bp);
+ if (error)
+ goto out_revert;
+
+ /* Reinitialize in-core state. */
+ return xrep_agi_commit_new(sc, agi_bp);
+
+out_revert:
+ /* Mark the incore AGI state stale and revert the AGI. */
+ sc->sa.pag->pagi_init = 0;
+ memcpy(agi, &old_agi, sizeof(old_agi));
+ return error;
+}
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 50e4f7fa06f0..036b5c7021eb 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -28,11 +28,11 @@
* Set us up to scrub free space btrees.
*/
int
-xfs_scrub_setup_ag_allocbt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_ag_allocbt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_ag_btree(sc, ip, false);
+ return xchk_setup_ag_btree(sc, ip, false);
}
/* Free space btree scrubber. */
@@ -41,71 +41,71 @@ xfs_scrub_setup_ag_allocbt(
* bnobt/cntbt record, respectively.
*/
STATIC void
-xfs_scrub_allocbt_xref_other(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_allocbt_xref_other(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- struct xfs_btree_cur **pcur;
- xfs_agblock_t fbno;
- xfs_extlen_t flen;
- int has_otherrec;
- int error;
+ struct xfs_btree_cur **pcur;
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ int has_otherrec;
+ int error;
if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT)
pcur = &sc->sa.cnt_cur;
else
pcur = &sc->sa.bno_cur;
- if (!*pcur || xfs_scrub_skip_xref(sc->sm))
+ if (!*pcur || xchk_skip_xref(sc->sm))
return;
error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec);
- if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ if (!xchk_should_check_xref(sc, &error, pcur))
return;
if (!has_otherrec) {
- xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return;
}
error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec);
- if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ if (!xchk_should_check_xref(sc, &error, pcur))
return;
if (!has_otherrec) {
- xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return;
}
if (fbno != agbno || flen != len)
- xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ xchk_btree_xref_set_corrupt(sc, *pcur, 0);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_allocbt_xref(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_allocbt_xref(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_allocbt_xref_other(sc, agbno, len);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
- xfs_scrub_xref_has_no_owner(sc, agbno, len);
- xfs_scrub_xref_is_not_shared(sc, agbno, len);
+ xchk_allocbt_xref_other(sc, agbno, len);
+ xchk_xref_is_not_inode_chunk(sc, agbno, len);
+ xchk_xref_has_no_owner(sc, agbno, len);
+ xchk_xref_is_not_shared(sc, agbno, len);
}
/* Scrub a bnobt/cntbt record. */
STATIC int
-xfs_scrub_allocbt_rec(
- struct xfs_scrub_btree *bs,
- union xfs_btree_rec *rec)
+xchk_allocbt_rec(
+ struct xchk_btree *bs,
+ union xfs_btree_rec *rec)
{
- struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
- xfs_agblock_t bno;
- xfs_extlen_t len;
- int error = 0;
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ int error = 0;
bno = be32_to_cpu(rec->alloc.ar_startblock);
len = be32_to_cpu(rec->alloc.ar_blockcount);
@@ -113,57 +113,57 @@ xfs_scrub_allocbt_rec(
if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- xfs_scrub_allocbt_xref(bs->sc, bno, len);
+ xchk_allocbt_xref(bs->sc, bno, len);
return error;
}
/* Scrub the freespace btrees for some AG. */
STATIC int
-xfs_scrub_allocbt(
- struct xfs_scrub_context *sc,
- xfs_btnum_t which)
+xchk_allocbt(
+ struct xfs_scrub *sc,
+ xfs_btnum_t which)
{
- struct xfs_owner_info oinfo;
- struct xfs_btree_cur *cur;
+ struct xfs_owner_info oinfo;
+ struct xfs_btree_cur *cur;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur;
- return xfs_scrub_btree(sc, cur, xfs_scrub_allocbt_rec, &oinfo, NULL);
+ return xchk_btree(sc, cur, xchk_allocbt_rec, &oinfo, NULL);
}
int
-xfs_scrub_bnobt(
- struct xfs_scrub_context *sc)
+xchk_bnobt(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_allocbt(sc, XFS_BTNUM_BNO);
+ return xchk_allocbt(sc, XFS_BTNUM_BNO);
}
int
-xfs_scrub_cntbt(
- struct xfs_scrub_context *sc)
+xchk_cntbt(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_allocbt(sc, XFS_BTNUM_CNT);
+ return xchk_allocbt(sc, XFS_BTNUM_CNT);
}
/* xref check that the extent is not free */
void
-xfs_scrub_xref_is_used_space(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_xref_is_used_space(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- bool is_freesp;
- int error;
+ bool is_freesp;
+ int error;
- if (!sc->sa.bno_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return;
if (is_freesp)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
}
diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
index de51cf8a8516..81d5e90547a1 100644
--- a/fs/xfs/scrub/attr.c
+++ b/fs/xfs/scrub/attr.c
@@ -32,11 +32,11 @@
/* Set us up to scrub an inode's extended attributes. */
int
-xfs_scrub_setup_xattr(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_xattr(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- size_t sz;
+ size_t sz;
/*
* Allocate the buffer without the inode lock held. We need enough
@@ -50,14 +50,14 @@ xfs_scrub_setup_xattr(
if (!sc->buf)
return -ENOMEM;
- return xfs_scrub_setup_inode_contents(sc, ip, 0);
+ return xchk_setup_inode_contents(sc, ip, 0);
}
/* Extended Attributes */
-struct xfs_scrub_xattr {
+struct xchk_xattr {
struct xfs_attr_list_context context;
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
};
/*
@@ -69,22 +69,22 @@ struct xfs_scrub_xattr {
* or if we get more or less data than we expected.
*/
static void
-xfs_scrub_xattr_listent(
+xchk_xattr_listent(
struct xfs_attr_list_context *context,
int flags,
unsigned char *name,
int namelen,
int valuelen)
{
- struct xfs_scrub_xattr *sx;
+ struct xchk_xattr *sx;
struct xfs_da_args args = { NULL };
int error = 0;
- sx = container_of(context, struct xfs_scrub_xattr, context);
+ sx = container_of(context, struct xchk_xattr, context);
if (flags & XFS_ATTR_INCOMPLETE) {
/* Incomplete attr key, just mark the inode for preening. */
- xfs_scrub_ino_set_preen(sx->sc, context->dp->i_ino);
+ xchk_ino_set_preen(sx->sc, context->dp->i_ino);
return;
}
@@ -106,11 +106,11 @@ xfs_scrub_xattr_listent(
error = xfs_attr_get_ilocked(context->dp, &args);
if (error == -EEXIST)
error = 0;
- if (!xfs_scrub_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
+ if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
&error))
goto fail_xref;
if (args.valuelen != valuelen)
- xfs_scrub_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
+ xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
args.blkno);
fail_xref:
if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
@@ -126,14 +126,14 @@ fail_xref:
* the smallest address
*/
STATIC bool
-xfs_scrub_xattr_set_map(
- struct xfs_scrub_context *sc,
- unsigned long *map,
- unsigned int start,
- unsigned int len)
+xchk_xattr_set_map(
+ struct xfs_scrub *sc,
+ unsigned long *map,
+ unsigned int start,
+ unsigned int len)
{
- unsigned int mapsize = sc->mp->m_attr_geo->blksize;
- bool ret = true;
+ unsigned int mapsize = sc->mp->m_attr_geo->blksize;
+ bool ret = true;
if (start >= mapsize)
return false;
@@ -154,8 +154,8 @@ xfs_scrub_xattr_set_map(
* attr freemap has problems or points to used space.
*/
STATIC bool
-xfs_scrub_xattr_check_freemap(
- struct xfs_scrub_context *sc,
+xchk_xattr_check_freemap(
+ struct xfs_scrub *sc,
unsigned long *map,
struct xfs_attr3_icleaf_hdr *leafhdr)
{
@@ -168,7 +168,7 @@ xfs_scrub_xattr_check_freemap(
freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize);
bitmap_zero(freemap, mapsize);
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
- if (!xfs_scrub_xattr_set_map(sc, freemap,
+ if (!xchk_xattr_set_map(sc, freemap,
leafhdr->freemap[i].base,
leafhdr->freemap[i].size))
return false;
@@ -184,8 +184,8 @@ xfs_scrub_xattr_check_freemap(
* Returns the number of bytes used for the name/value data.
*/
STATIC void
-xfs_scrub_xattr_entry(
- struct xfs_scrub_da_btree *ds,
+xchk_xattr_entry(
+ struct xchk_da_btree *ds,
int level,
char *buf_end,
struct xfs_attr_leafblock *leaf,
@@ -204,17 +204,17 @@ xfs_scrub_xattr_entry(
unsigned int namesize;
if (ent->pad2 != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
/* Hash values in order? */
if (be32_to_cpu(ent->hashval) < *last_hashval)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
*last_hashval = be32_to_cpu(ent->hashval);
nameidx = be16_to_cpu(ent->nameidx);
if (nameidx < leafhdr->firstused ||
nameidx >= mp->m_attr_geo->blksize) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return;
}
@@ -225,27 +225,27 @@ xfs_scrub_xattr_entry(
be16_to_cpu(lentry->valuelen));
name_end = (char *)lentry + namesize;
if (lentry->namelen == 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
} else {
rentry = xfs_attr3_leaf_name_remote(leaf, idx);
namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
name_end = (char *)rentry + namesize;
if (rentry->namelen == 0 || rentry->valueblk == 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
}
if (name_end > buf_end)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
- if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
- xfs_scrub_da_set_corrupt(ds, level);
+ if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
+ xchk_da_set_corrupt(ds, level);
if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
*usedbytes += namesize;
}
/* Scrub an attribute leaf. */
STATIC int
-xfs_scrub_xattr_block(
- struct xfs_scrub_da_btree *ds,
+xchk_xattr_block(
+ struct xchk_da_btree *ds,
int level)
{
struct xfs_attr3_icleaf_hdr leafhdr;
@@ -275,10 +275,10 @@ xfs_scrub_xattr_block(
if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
leaf->hdr.info.hdr.pad != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
} else {
if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
}
/* Check the leaf header */
@@ -286,44 +286,44 @@ xfs_scrub_xattr_block(
hdrsize = xfs_attr3_leaf_hdr_size(leaf);
if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused > mp->m_attr_geo->blksize)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused < hdrsize)
- xfs_scrub_da_set_corrupt(ds, level);
- if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
+ if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
+ xchk_da_set_corrupt(ds, level);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
entries = xfs_attr3_leaf_entryp(leaf);
if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
/* Mark the leaf entry itself. */
off = (char *)ent - (char *)leaf;
- if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, off,
+ if (!xchk_xattr_set_map(ds->sc, usedmap, off,
sizeof(xfs_attr_leaf_entry_t))) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
/* Check the entry and nameval. */
- xfs_scrub_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
+ xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
usedmap, ent, i, &usedbytes, &last_hashval);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
}
- if (!xfs_scrub_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
- xfs_scrub_da_set_corrupt(ds, level);
+ if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
+ xchk_da_set_corrupt(ds, level);
if (leafhdr.usedbytes != usedbytes)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
out:
return 0;
@@ -331,8 +331,8 @@ out:
/* Scrub a attribute btree record. */
STATIC int
-xfs_scrub_xattr_rec(
- struct xfs_scrub_da_btree *ds,
+xchk_xattr_rec(
+ struct xchk_da_btree *ds,
int level,
void *rec)
{
@@ -352,14 +352,14 @@ xfs_scrub_xattr_rec(
blk = &ds->state->path.blk[level];
/* Check the whole block, if necessary. */
- error = xfs_scrub_xattr_block(ds, level);
+ error = xchk_xattr_block(ds, level);
if (error)
goto out;
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
/* Check the hash of the entry. */
- error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval);
+ error = xchk_da_btree_hash(ds, level, &ent->hashval);
if (error)
goto out;
@@ -368,7 +368,7 @@ xfs_scrub_xattr_rec(
hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
nameidx = be16_to_cpu(ent->nameidx);
if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
@@ -377,12 +377,12 @@ xfs_scrub_xattr_rec(
badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
XFS_ATTR_INCOMPLETE);
if ((ent->flags & badflags) != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
if (ent->flags & XFS_ATTR_LOCAL) {
lentry = (struct xfs_attr_leaf_name_local *)
(((char *)bp->b_addr) + nameidx);
if (lentry->namelen <= 0) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
@@ -390,13 +390,13 @@ xfs_scrub_xattr_rec(
rentry = (struct xfs_attr_leaf_name_remote *)
(((char *)bp->b_addr) + nameidx);
if (rentry->namelen <= 0) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
}
if (calc_hash != hash)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
out:
return error;
@@ -404,10 +404,10 @@ out:
/* Scrub the extended attribute metadata. */
int
-xfs_scrub_xattr(
- struct xfs_scrub_context *sc)
+xchk_xattr(
+ struct xfs_scrub *sc)
{
- struct xfs_scrub_xattr sx;
+ struct xchk_xattr sx;
struct attrlist_cursor_kern cursor = { 0 };
xfs_dablk_t last_checked = -1U;
int error = 0;
@@ -417,7 +417,7 @@ xfs_scrub_xattr(
memset(&sx, 0, sizeof(sx));
/* Check attribute tree structure */
- error = xfs_scrub_da_btree(sc, XFS_ATTR_FORK, xfs_scrub_xattr_rec,
+ error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec,
&last_checked);
if (error)
goto out;
@@ -429,7 +429,7 @@ xfs_scrub_xattr(
sx.context.dp = sc->ip;
sx.context.cursor = &cursor;
sx.context.resynch = 1;
- sx.context.put_listent = xfs_scrub_xattr_listent;
+ sx.context.put_listent = xchk_xattr_listent;
sx.context.tp = sc->tp;
sx.context.flags = ATTR_INCOMPLETE;
sx.sc = sc;
@@ -438,7 +438,7 @@ xfs_scrub_xattr(
* Look up every xattr in this file by name.
*
* Use the backend implementation of xfs_attr_list to call
- * xfs_scrub_xattr_listent on every attribute key in this inode.
+ * xchk_xattr_listent on every attribute key in this inode.
* In other words, we use the same iterator/callback mechanism
* that listattr uses to scrub extended attributes, though in our
* _listent function, we check the value of the attribute.
@@ -451,7 +451,7 @@ xfs_scrub_xattr(
* locking order.
*/
error = xfs_attr_list_int_ilocked(&sx.context);
- if (!xfs_scrub_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
+ if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
goto out;
out:
return error;
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
new file mode 100644
index 000000000000..fdadc9e1dc49
--- /dev/null
+++ b/fs/xfs/scrub/bitmap.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_btree.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+#include "scrub/repair.h"
+#include "scrub/bitmap.h"
+
+/*
+ * Set a range of this bitmap. Caller must ensure the range is not set.
+ *
+ * This is the logical equivalent of bitmap |= mask(start, len).
+ */
+int
+xfs_bitmap_set(
+ struct xfs_bitmap *bitmap,
+ uint64_t start,
+ uint64_t len)
+{
+ struct xfs_bitmap_range *bmr;
+
+ bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
+ if (!bmr)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&bmr->list);
+ bmr->start = start;
+ bmr->len = len;
+ list_add_tail(&bmr->list, &bitmap->list);
+
+ return 0;
+}
+
+/* Free everything related to this bitmap. */
+void
+xfs_bitmap_destroy(
+ struct xfs_bitmap *bitmap)
+{
+ struct xfs_bitmap_range *bmr;
+ struct xfs_bitmap_range *n;
+
+ for_each_xfs_bitmap_extent(bmr, n, bitmap) {
+ list_del(&bmr->list);
+ kmem_free(bmr);
+ }
+}
+
+/* Set up a per-AG block bitmap. */
+void
+xfs_bitmap_init(
+ struct xfs_bitmap *bitmap)
+{
+ INIT_LIST_HEAD(&bitmap->list);
+}
+
+/* Compare two btree extents. */
+static int
+xfs_bitmap_range_cmp(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct xfs_bitmap_range *ap;
+ struct xfs_bitmap_range *bp;
+
+ ap = container_of(a, struct xfs_bitmap_range, list);
+ bp = container_of(b, struct xfs_bitmap_range, list);
+
+ if (ap->start > bp->start)
+ return 1;
+ if (ap->start < bp->start)
+ return -1;
+ return 0;
+}
+
+/*
+ * Remove all the blocks mentioned in @sub from the extents in @bitmap.
+ *
+ * The intent is that callers will iterate the rmapbt for all of its records
+ * for a given owner to generate @bitmap; and iterate all the blocks of the
+ * metadata structures that are not being rebuilt and have the same rmapbt
+ * owner to generate @sub. This routine subtracts all the extents
+ * mentioned in sub from all the extents linked in @bitmap, which leaves
+ * @bitmap as the list of blocks that are not accounted for, which we assume
+ * are the dead blocks of the old metadata structure. The blocks mentioned in
+ * @bitmap can be reaped.
+ *
+ * This is the logical equivalent of bitmap &= ~sub.
+ */
+#define LEFT_ALIGNED (1 << 0)
+#define RIGHT_ALIGNED (1 << 1)
+int
+xfs_bitmap_disunion(
+ struct xfs_bitmap *bitmap,
+ struct xfs_bitmap *sub)
+{
+ struct list_head *lp;
+ struct xfs_bitmap_range *br;
+ struct xfs_bitmap_range *new_br;
+ struct xfs_bitmap_range *sub_br;
+ uint64_t sub_start;
+ uint64_t sub_len;
+ int state;
+ int error = 0;
+
+ if (list_empty(&bitmap->list) || list_empty(&sub->list))
+ return 0;
+ ASSERT(!list_empty(&sub->list));
+
+ list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
+ list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
+
+ /*
+ * Now that we've sorted both lists, we iterate bitmap once, rolling
+ * forward through sub and/or bitmap as necessary until we find an
+ * overlap or reach the end of either list. We do not reset lp to the
+ * head of bitmap nor do we reset sub_br to the head of sub. The
+ * list traversal is similar to merge sort, but we're deleting
+ * instead. In this manner we avoid O(n^2) operations.
+ */
+ sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
+ list);
+ lp = bitmap->list.next;
+ while (lp != &bitmap->list) {
+ br = list_entry(lp, struct xfs_bitmap_range, list);
+
+ /*
+ * Advance sub_br and/or br until we find a pair that
+ * intersect or we run out of extents.
+ */
+ while (sub_br->start + sub_br->len <= br->start) {
+ if (list_is_last(&sub_br->list, &sub->list))
+ goto out;
+ sub_br = list_next_entry(sub_br, list);
+ }
+ if (sub_br->start >= br->start + br->len) {
+ lp = lp->next;
+ continue;
+ }
+
+ /* trim sub_br to fit the extent we have */
+ sub_start = sub_br->start;
+ sub_len = sub_br->len;
+ if (sub_br->start < br->start) {
+ sub_len -= br->start - sub_br->start;
+ sub_start = br->start;
+ }
+ if (sub_len > br->len)
+ sub_len = br->len;
+
+ state = 0;
+ if (sub_start == br->start)
+ state |= LEFT_ALIGNED;
+ if (sub_start + sub_len == br->start + br->len)
+ state |= RIGHT_ALIGNED;
+ switch (state) {
+ case LEFT_ALIGNED:
+ /* Coincides with only the left. */
+ br->start += sub_len;
+ br->len -= sub_len;
+ break;
+ case RIGHT_ALIGNED:
+ /* Coincides with only the right. */
+ br->len -= sub_len;
+ lp = lp->next;
+ break;
+ case LEFT_ALIGNED | RIGHT_ALIGNED:
+ /* Total overlap, just delete ex. */
+ lp = lp->next;
+ list_del(&br->list);
+ kmem_free(br);
+ break;
+ case 0:
+ /*
+ * Deleting from the middle: add the new right extent
+ * and then shrink the left extent.
+ */
+ new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
+ KM_MAYFAIL);
+ if (!new_br) {
+ error = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(&new_br->list);
+ new_br->start = sub_start + sub_len;
+ new_br->len = br->start + br->len - new_br->start;
+ list_add(&new_br->list, &br->list);
+ br->len = sub_start - br->start;
+ lp = lp->next;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+out:
+ return error;
+}
+#undef LEFT_ALIGNED
+#undef RIGHT_ALIGNED
+
+/*
+ * Record all btree blocks seen while iterating all records of a btree.
+ *
+ * We know that the btree query_all function starts at the left edge and walks
+ * towards the right edge of the tree. Therefore, we know that we can walk up
+ * the btree cursor towards the root; if the pointer for a given level points
+ * to the first record/key in that block, we haven't seen this block before;
+ * and therefore we need to remember that we saw this block in the btree.
+ *
+ * So if our btree is:
+ *
+ * 4
+ * / | \
+ * 1 2 3
+ *
+ * Pretend for this example that each leaf block has 100 btree records. For
+ * the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
+ * that we saw block 1. Then we observe that bc_ptrs[1] == 1, so we record
+ * block 4. The list is [1, 4].
+ *
+ * For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
+ * loop. The list remains [1, 4].
+ *
+ * For the 101st btree record, we've moved onto leaf block 2. Now
+ * bc_ptrs[0] == 1 again, so we record that we saw block 2. We see that
+ * bc_ptrs[1] == 2, so we exit the loop. The list is now [1, 4, 2].
+ *
+ * For the 102nd record, bc_ptrs[0] == 2, so we continue.
+ *
+ * For the 201st record, we've moved on to leaf block 3. bc_ptrs[0] == 1, so
+ * we add 3 to the list. Now it is [1, 4, 2, 3].
+ *
+ * For the 300th record we just exit, with the list being [1, 4, 2, 3].
+ */
+
+/*
+ * Record all the buffers pointed to by the btree cursor. Callers already
+ * engaged in a btree walk should call this function to capture the list of
+ * blocks going from the leaf towards the root.
+ */
+int
+xfs_bitmap_set_btcur_path(
+ struct xfs_bitmap *bitmap,
+ struct xfs_btree_cur *cur)
+{
+ struct xfs_buf *bp;
+ xfs_fsblock_t fsb;
+ int i;
+ int error;
+
+ for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
+ xfs_btree_get_block(cur, i, &bp);
+ if (!bp)
+ continue;
+ fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
+ error = xfs_bitmap_set(bitmap, fsb, 1);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/* Collect a btree's block in the bitmap. */
+STATIC int
+xfs_bitmap_collect_btblock(
+ struct xfs_btree_cur *cur,
+ int level,
+ void *priv)
+{
+ struct xfs_bitmap *bitmap = priv;
+ struct xfs_buf *bp;
+ xfs_fsblock_t fsbno;
+
+ xfs_btree_get_block(cur, level, &bp);
+ if (!bp)
+ return 0;
+
+ fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
+ return xfs_bitmap_set(bitmap, fsbno, 1);
+}
+
+/* Walk the btree and mark the bitmap wherever a btree block is found. */
+int
+xfs_bitmap_set_btblocks(
+ struct xfs_bitmap *bitmap,
+ struct xfs_btree_cur *cur)
+{
+ return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
+}
diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
new file mode 100644
index 000000000000..ae8ecbce6fa6
--- /dev/null
+++ b/fs/xfs/scrub/bitmap.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ */
+#ifndef __XFS_SCRUB_BITMAP_H__
+#define __XFS_SCRUB_BITMAP_H__
+
+struct xfs_bitmap_range {
+ struct list_head list;
+ uint64_t start;
+ uint64_t len;
+};
+
+struct xfs_bitmap {
+ struct list_head list;
+};
+
+void xfs_bitmap_init(struct xfs_bitmap *bitmap);
+void xfs_bitmap_destroy(struct xfs_bitmap *bitmap);
+
+#define for_each_xfs_bitmap_extent(bex, n, bitmap) \
+ list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
+
+#define for_each_xfs_bitmap_block(b, bex, n, bitmap) \
+ list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
+ for ((b) = bex->start; (b) < bex->start + bex->len; (b)++)
+
+int xfs_bitmap_set(struct xfs_bitmap *bitmap, uint64_t start, uint64_t len);
+int xfs_bitmap_disunion(struct xfs_bitmap *bitmap, struct xfs_bitmap *sub);
+int xfs_bitmap_set_btcur_path(struct xfs_bitmap *bitmap,
+ struct xfs_btree_cur *cur);
+int xfs_bitmap_set_btblocks(struct xfs_bitmap *bitmap,
+ struct xfs_btree_cur *cur);
+
+#endif /* __XFS_SCRUB_BITMAP_H__ */
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 3d08589f5c60..e1d11f3223e3 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -33,13 +33,13 @@
/* Set us up with an inode's bmap. */
int
-xfs_scrub_setup_inode_bmap(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_inode_bmap(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- int error;
+ int error;
- error = xfs_scrub_get_inode(sc, ip);
+ error = xchk_get_inode(sc, ip);
if (error)
goto out;
@@ -60,7 +60,7 @@ xfs_scrub_setup_inode_bmap(
}
/* Got the inode, lock it and we're ready to go. */
- error = xfs_scrub_trans_alloc(sc, 0);
+ error = xchk_trans_alloc(sc, 0);
if (error)
goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL;
@@ -78,27 +78,27 @@ out:
* is in btree format.
*/
-struct xfs_scrub_bmap_info {
- struct xfs_scrub_context *sc;
- xfs_fileoff_t lastoff;
- bool is_rt;
- bool is_shared;
- int whichfork;
+struct xchk_bmap_info {
+ struct xfs_scrub *sc;
+ xfs_fileoff_t lastoff;
+ bool is_rt;
+ bool is_shared;
+ int whichfork;
};
/* Look for a corresponding rmap for this irec. */
static inline bool
-xfs_scrub_bmap_get_rmap(
- struct xfs_scrub_bmap_info *info,
- struct xfs_bmbt_irec *irec,
- xfs_agblock_t agbno,
- uint64_t owner,
- struct xfs_rmap_irec *rmap)
+xchk_bmap_get_rmap(
+ struct xchk_bmap_info *info,
+ struct xfs_bmbt_irec *irec,
+ xfs_agblock_t agbno,
+ uint64_t owner,
+ struct xfs_rmap_irec *rmap)
{
- xfs_fileoff_t offset;
- unsigned int rflags = 0;
- int has_rmap;
- int error;
+ xfs_fileoff_t offset;
+ unsigned int rflags = 0;
+ int has_rmap;
+ int error;
if (info->whichfork == XFS_ATTR_FORK)
rflags |= XFS_RMAP_ATTR_FORK;
@@ -120,7 +120,7 @@ xfs_scrub_bmap_get_rmap(
if (info->is_shared) {
error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
owner, offset, rflags, rmap, &has_rmap);
- if (!xfs_scrub_should_check_xref(info->sc, &error,
+ if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur))
return false;
goto out;
@@ -131,36 +131,36 @@ xfs_scrub_bmap_get_rmap(
*/
error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
offset, rflags, &has_rmap);
- if (!xfs_scrub_should_check_xref(info->sc, &error,
+ if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur))
return false;
if (!has_rmap)
goto out;
error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
- if (!xfs_scrub_should_check_xref(info->sc, &error,
+ if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur))
return false;
out:
if (!has_rmap)
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
return has_rmap;
}
/* Make sure that we have rmapbt records for this extent. */
STATIC void
-xfs_scrub_bmap_xref_rmap(
- struct xfs_scrub_bmap_info *info,
- struct xfs_bmbt_irec *irec,
- xfs_agblock_t agbno)
+xchk_bmap_xref_rmap(
+ struct xchk_bmap_info *info,
+ struct xfs_bmbt_irec *irec,
+ xfs_agblock_t agbno)
{
- struct xfs_rmap_irec rmap;
- unsigned long long rmap_end;
- uint64_t owner;
+ struct xfs_rmap_irec rmap;
+ unsigned long long rmap_end;
+ uint64_t owner;
- if (!info->sc->sa.rmap_cur || xfs_scrub_skip_xref(info->sc->sm))
+ if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
return;
if (info->whichfork == XFS_COW_FORK)
@@ -169,14 +169,14 @@ xfs_scrub_bmap_xref_rmap(
owner = info->sc->ip->i_ino;
/* Find the rmap record for this irec. */
- if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap))
+ if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
return;
/* Check the rmap. */
rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
if (rmap.rm_startblock > agbno ||
agbno + irec->br_blockcount > rmap_end)
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/*
@@ -189,12 +189,12 @@ xfs_scrub_bmap_xref_rmap(
rmap.rm_blockcount;
if (rmap.rm_offset > irec->br_startoff ||
irec->br_startoff + irec->br_blockcount > rmap_end)
- xfs_scrub_fblock_xref_set_corrupt(info->sc,
+ xchk_fblock_xref_set_corrupt(info->sc,
info->whichfork, irec->br_startoff);
}
if (rmap.rm_owner != owner)
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/*
@@ -207,46 +207,46 @@ xfs_scrub_bmap_xref_rmap(
if (owner != XFS_RMAP_OWN_COW &&
irec->br_state == XFS_EXT_UNWRITTEN &&
!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (info->whichfork == XFS_ATTR_FORK &&
!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
}
/* Cross-reference a single rtdev extent record. */
STATIC void
-xfs_scrub_bmap_rt_extent_xref(
- struct xfs_scrub_bmap_info *info,
- struct xfs_inode *ip,
- struct xfs_btree_cur *cur,
- struct xfs_bmbt_irec *irec)
+xchk_bmap_rt_extent_xref(
+ struct xchk_bmap_info *info,
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xfs_bmbt_irec *irec)
{
if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock,
+ xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
irec->br_blockcount);
}
/* Cross-reference a single datadev extent record. */
STATIC void
-xfs_scrub_bmap_extent_xref(
- struct xfs_scrub_bmap_info *info,
- struct xfs_inode *ip,
- struct xfs_btree_cur *cur,
- struct xfs_bmbt_irec *irec)
+xchk_bmap_extent_xref(
+ struct xchk_bmap_info *info,
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xfs_bmbt_irec *irec)
{
- struct xfs_mount *mp = info->sc->mp;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_extlen_t len;
- int error;
+ struct xfs_mount *mp = info->sc->mp;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ int error;
if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
@@ -255,44 +255,44 @@ xfs_scrub_bmap_extent_xref(
agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
len = irec->br_blockcount;
- error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
- if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
+ error = xchk_ag_init(info->sc, agno, &info->sc->sa);
+ if (!xchk_fblock_process_error(info->sc, info->whichfork,
irec->br_startoff, &error))
return;
- xfs_scrub_xref_is_used_space(info->sc, agbno, len);
- xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len);
- xfs_scrub_bmap_xref_rmap(info, irec, agbno);
+ xchk_xref_is_used_space(info->sc, agbno, len);
+ xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
+ xchk_bmap_xref_rmap(info, irec, agbno);
switch (info->whichfork) {
case XFS_DATA_FORK:
if (xfs_is_reflink_inode(info->sc->ip))
break;
/* fall through */
case XFS_ATTR_FORK:
- xfs_scrub_xref_is_not_shared(info->sc, agbno,
+ xchk_xref_is_not_shared(info->sc, agbno,
irec->br_blockcount);
break;
case XFS_COW_FORK:
- xfs_scrub_xref_is_cow_staging(info->sc, agbno,
+ xchk_xref_is_cow_staging(info->sc, agbno,
irec->br_blockcount);
break;
}
- xfs_scrub_ag_free(info->sc, &info->sc->sa);
+ xchk_ag_free(info->sc, &info->sc->sa);
}
/* Scrub a single extent record. */
STATIC int
-xfs_scrub_bmap_extent(
- struct xfs_inode *ip,
- struct xfs_btree_cur *cur,
- struct xfs_scrub_bmap_info *info,
- struct xfs_bmbt_irec *irec)
+xchk_bmap_extent(
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xchk_bmap_info *info,
+ struct xfs_bmbt_irec *irec)
{
- struct xfs_mount *mp = info->sc->mp;
- struct xfs_buf *bp = NULL;
- xfs_filblks_t end;
- int error = 0;
+ struct xfs_mount *mp = info->sc->mp;
+ struct xfs_buf *bp = NULL;
+ xfs_filblks_t end;
+ int error = 0;
if (cur)
xfs_btree_get_block(cur, 0, &bp);
@@ -302,12 +302,12 @@ xfs_scrub_bmap_extent(
* from the incore list, for which there is no ordering check.
*/
if (irec->br_startoff < info->lastoff)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/* There should never be a "hole" extent in either extent list. */
if (irec->br_startblock == HOLESTARTBLOCK)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/*
@@ -315,40 +315,40 @@ xfs_scrub_bmap_extent(
* in-core extent scan, and we should never see these in the bmbt.
*/
if (isnullstartblock(irec->br_startblock))
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/* Make sure the extent points to a valid place. */
if (irec->br_blockcount > MAXEXTLEN)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
end = irec->br_startblock + irec->br_blockcount - 1;
if (info->is_rt &&
(!xfs_verify_rtbno(mp, irec->br_startblock) ||
!xfs_verify_rtbno(mp, end)))
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (!info->is_rt &&
(!xfs_verify_fsbno(mp, irec->br_startblock) ||
!xfs_verify_fsbno(mp, end) ||
XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
XFS_FSB_TO_AGNO(mp, end)))
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/* We don't allow unwritten extents on attr forks. */
if (irec->br_state == XFS_EXT_UNWRITTEN &&
info->whichfork == XFS_ATTR_FORK)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (info->is_rt)
- xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec);
+ xchk_bmap_rt_extent_xref(info, ip, cur, irec);
else
- xfs_scrub_bmap_extent_xref(info, ip, cur, irec);
+ xchk_bmap_extent_xref(info, ip, cur, irec);
info->lastoff = irec->br_startoff + irec->br_blockcount;
return error;
@@ -356,17 +356,17 @@ xfs_scrub_bmap_extent(
/* Scrub a bmbt record. */
STATIC int
-xfs_scrub_bmapbt_rec(
- struct xfs_scrub_btree *bs,
- union xfs_btree_rec *rec)
+xchk_bmapbt_rec(
+ struct xchk_btree *bs,
+ union xfs_btree_rec *rec)
{
- struct xfs_bmbt_irec irec;
- struct xfs_scrub_bmap_info *info = bs->private;
- struct xfs_inode *ip = bs->cur->bc_private.b.ip;
- struct xfs_buf *bp = NULL;
- struct xfs_btree_block *block;
- uint64_t owner;
- int i;
+ struct xfs_bmbt_irec irec;
+ struct xchk_bmap_info *info = bs->private;
+ struct xfs_inode *ip = bs->cur->bc_private.b.ip;
+ struct xfs_buf *bp = NULL;
+ struct xfs_btree_block *block;
+ uint64_t owner;
+ int i;
/*
* Check the owners of the btree blocks up to the level below
@@ -378,54 +378,53 @@ xfs_scrub_bmapbt_rec(
block = xfs_btree_get_block(bs->cur, i, &bp);
owner = be64_to_cpu(block->bb_u.l.bb_owner);
if (owner != ip->i_ino)
- xfs_scrub_fblock_set_corrupt(bs->sc,
+ xchk_fblock_set_corrupt(bs->sc,
info->whichfork, 0);
}
}
/* Set up the in-core record and scrub it. */
xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
- return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec);
+ return xchk_bmap_extent(ip, bs->cur, info, &irec);
}
/* Scan the btree records. */
STATIC int
-xfs_scrub_bmap_btree(
- struct xfs_scrub_context *sc,
- int whichfork,
- struct xfs_scrub_bmap_info *info)
+xchk_bmap_btree(
+ struct xfs_scrub *sc,
+ int whichfork,
+ struct xchk_bmap_info *info)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- struct xfs_inode *ip = sc->ip;
- struct xfs_btree_cur *cur;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip = sc->ip;
+ struct xfs_btree_cur *cur;
+ int error;
cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
- error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR :
- XFS_BTREE_NOERROR);
+ error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
+ xfs_btree_del_cursor(cur, error);
return error;
}
-struct xfs_scrub_bmap_check_rmap_info {
- struct xfs_scrub_context *sc;
- int whichfork;
- struct xfs_iext_cursor icur;
+struct xchk_bmap_check_rmap_info {
+ struct xfs_scrub *sc;
+ int whichfork;
+ struct xfs_iext_cursor icur;
};
/* Can we find bmaps that fit this rmap? */
STATIC int
-xfs_scrub_bmap_check_rmap(
+xchk_bmap_check_rmap(
struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec,
void *priv)
{
struct xfs_bmbt_irec irec;
- struct xfs_scrub_bmap_check_rmap_info *sbcri = priv;
+ struct xchk_bmap_check_rmap_info *sbcri = priv;
struct xfs_ifork *ifp;
- struct xfs_scrub_context *sc = sbcri->sc;
+ struct xfs_scrub *sc = sbcri->sc;
bool have_map;
/* Is this even the right fork? */
@@ -440,14 +439,14 @@ xfs_scrub_bmap_check_rmap(
/* Now look up the bmbt record. */
ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
if (!ifp) {
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
goto out;
}
have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
&sbcri->icur, &irec);
if (!have_map)
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
/*
* bmap extent record lengths are constrained to 2^21 blocks in length
@@ -458,14 +457,14 @@ xfs_scrub_bmap_check_rmap(
*/
while (have_map) {
if (irec.br_startoff != rec->rm_offset)
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
cur->bc_private.a.agno, rec->rm_startblock))
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_blockcount > rec->rm_blockcount)
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
break;
@@ -476,7 +475,7 @@ xfs_scrub_bmap_check_rmap(
break;
have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
if (!have_map)
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
}
@@ -488,12 +487,12 @@ out:
/* Make sure each rmap has a corresponding bmbt entry. */
STATIC int
-xfs_scrub_bmap_check_ag_rmaps(
- struct xfs_scrub_context *sc,
+xchk_bmap_check_ag_rmaps(
+ struct xfs_scrub *sc,
int whichfork,
xfs_agnumber_t agno)
{
- struct xfs_scrub_bmap_check_rmap_info sbcri;
+ struct xchk_bmap_check_rmap_info sbcri;
struct xfs_btree_cur *cur;
struct xfs_buf *agf;
int error;
@@ -510,11 +509,11 @@ xfs_scrub_bmap_check_ag_rmaps(
sbcri.sc = sc;
sbcri.whichfork = whichfork;
- error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri);
+ error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
if (error == XFS_BTREE_QUERY_RANGE_ABORT)
error = 0;
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
out_agf:
xfs_trans_brelse(sc->tp, agf);
return error;
@@ -522,13 +521,13 @@ out_agf:
/* Make sure each rmap has a corresponding bmbt entry. */
STATIC int
-xfs_scrub_bmap_check_rmaps(
- struct xfs_scrub_context *sc,
- int whichfork)
+xchk_bmap_check_rmaps(
+ struct xfs_scrub *sc,
+ int whichfork)
{
- loff_t size;
- xfs_agnumber_t agno;
- int error;
+ loff_t size;
+ xfs_agnumber_t agno;
+ int error;
if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
whichfork == XFS_COW_FORK ||
@@ -562,7 +561,7 @@ xfs_scrub_bmap_check_rmaps(
return 0;
for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
- error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno);
+ error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
if (error)
return error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
@@ -579,18 +578,18 @@ xfs_scrub_bmap_check_rmaps(
* Then we unconditionally scan the incore extent cache.
*/
STATIC int
-xfs_scrub_bmap(
- struct xfs_scrub_context *sc,
- int whichfork)
+xchk_bmap(
+ struct xfs_scrub *sc,
+ int whichfork)
{
- struct xfs_bmbt_irec irec;
- struct xfs_scrub_bmap_info info = { NULL };
- struct xfs_mount *mp = sc->mp;
- struct xfs_inode *ip = sc->ip;
- struct xfs_ifork *ifp;
- xfs_fileoff_t endoff;
- struct xfs_iext_cursor icur;
- int error = 0;
+ struct xfs_bmbt_irec irec;
+ struct xchk_bmap_info info = { NULL };
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip = sc->ip;
+ struct xfs_ifork *ifp;
+ xfs_fileoff_t endoff;
+ struct xfs_iext_cursor icur;
+ int error = 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -606,7 +605,7 @@ xfs_scrub_bmap(
goto out;
/* No CoW forks on non-reflink inodes/filesystems. */
if (!xfs_is_reflink_inode(ip)) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
goto out;
}
break;
@@ -615,7 +614,7 @@ xfs_scrub_bmap(
goto out_check_rmap;
if (!xfs_sb_version_hasattr(&mp->m_sb) &&
!xfs_sb_version_hasattr2(&mp->m_sb))
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
break;
default:
ASSERT(whichfork == XFS_DATA_FORK);
@@ -631,22 +630,22 @@ xfs_scrub_bmap(
goto out;
case XFS_DINODE_FMT_EXTENTS:
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
- xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
+ xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out;
}
break;
case XFS_DINODE_FMT_BTREE:
if (whichfork == XFS_COW_FORK) {
- xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
+ xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out;
}
- error = xfs_scrub_bmap_btree(sc, whichfork, &info);
+ error = xchk_bmap_btree(sc, whichfork, &info);
if (error)
goto out;
break;
default:
- xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
+ xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out;
}
@@ -656,37 +655,37 @@ xfs_scrub_bmap(
/* Now try to scrub the in-memory extent list. */
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(sc->tp, ip, whichfork);
- if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
+ if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
goto out;
}
/* Find the offset of the last extent in the mapping. */
error = xfs_bmap_last_offset(ip, &endoff, whichfork);
- if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
+ if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
goto out;
/* Scrub extent records. */
info.lastoff = 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
for_each_xfs_iext(ifp, &icur, &irec) {
- if (xfs_scrub_should_terminate(sc, &error) ||
+ if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break;
if (isnullstartblock(irec.br_startblock))
continue;
if (irec.br_startoff >= endoff) {
- xfs_scrub_fblock_set_corrupt(sc, whichfork,
+ xchk_fblock_set_corrupt(sc, whichfork,
irec.br_startoff);
goto out;
}
- error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec);
+ error = xchk_bmap_extent(ip, NULL, &info, &irec);
if (error)
goto out;
}
out_check_rmap:
- error = xfs_scrub_bmap_check_rmaps(sc, whichfork);
- if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error))
+ error = xchk_bmap_check_rmaps(sc, whichfork);
+ if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
goto out;
out:
return error;
@@ -694,27 +693,27 @@ out:
/* Scrub an inode's data fork. */
int
-xfs_scrub_bmap_data(
- struct xfs_scrub_context *sc)
+xchk_bmap_data(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_bmap(sc, XFS_DATA_FORK);
+ return xchk_bmap(sc, XFS_DATA_FORK);
}
/* Scrub an inode's attr fork. */
int
-xfs_scrub_bmap_attr(
- struct xfs_scrub_context *sc)
+xchk_bmap_attr(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_bmap(sc, XFS_ATTR_FORK);
+ return xchk_bmap(sc, XFS_ATTR_FORK);
}
/* Scrub an inode's CoW fork. */
int
-xfs_scrub_bmap_cow(
- struct xfs_scrub_context *sc)
+xchk_bmap_cow(
+ struct xfs_scrub *sc)
{
if (!xfs_is_reflink_inode(sc->ip))
return -ENOENT;
- return xfs_scrub_bmap(sc, XFS_COW_FORK);
+ return xchk_bmap(sc, XFS_COW_FORK);
}
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 5b472045f036..4ae959f7ad2c 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -29,13 +29,13 @@
* operational errors in common.c.
*/
static bool
-__xfs_scrub_btree_process_error(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level,
- int *error,
- __u32 errflag,
- void *ret_ip)
+__xchk_btree_process_error(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
if (*error == 0)
return true;
@@ -43,7 +43,7 @@ __xfs_scrub_btree_process_error(
switch (*error) {
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
@@ -53,10 +53,10 @@ __xfs_scrub_btree_process_error(
/* fall through */
default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
- trace_xfs_scrub_ifork_btree_op_error(sc, cur, level,
+ trace_xchk_ifork_btree_op_error(sc, cur, level,
*error, ret_ip);
else
- trace_xfs_scrub_btree_op_error(sc, cur, level,
+ trace_xchk_btree_op_error(sc, cur, level,
*error, ret_ip);
break;
}
@@ -64,63 +64,63 @@ __xfs_scrub_btree_process_error(
}
bool
-xfs_scrub_btree_process_error(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level,
- int *error)
+xchk_btree_process_error(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error)
{
- return __xfs_scrub_btree_process_error(sc, cur, level, error,
+ return __xchk_btree_process_error(sc, cur, level, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
}
bool
-xfs_scrub_btree_xref_process_error(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level,
- int *error)
+xchk_btree_xref_process_error(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error)
{
- return __xfs_scrub_btree_process_error(sc, cur, level, error,
+ return __xchk_btree_process_error(sc, cur, level, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address);
}
/* Record btree block corruption. */
static void
-__xfs_scrub_btree_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level,
- __u32 errflag,
- void *ret_ip)
+__xchk_btree_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ __u32 errflag,
+ void *ret_ip)
{
sc->sm->sm_flags |= errflag;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
- trace_xfs_scrub_ifork_btree_error(sc, cur, level,
+ trace_xchk_ifork_btree_error(sc, cur, level,
ret_ip);
else
- trace_xfs_scrub_btree_error(sc, cur, level,
+ trace_xchk_btree_error(sc, cur, level,
ret_ip);
}
void
-xfs_scrub_btree_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level)
+xchk_btree_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level)
{
- __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
+ __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
__return_address);
}
void
-xfs_scrub_btree_xref_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level)
+xchk_btree_xref_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level)
{
- __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
+ __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
__return_address);
}
@@ -129,8 +129,8 @@ xfs_scrub_btree_xref_set_corrupt(
* keys.
*/
STATIC void
-xfs_scrub_btree_rec(
- struct xfs_scrub_btree *bs)
+xchk_btree_rec(
+ struct xchk_btree *bs)
{
struct xfs_btree_cur *cur = bs->cur;
union xfs_btree_rec *rec;
@@ -144,11 +144,11 @@ xfs_scrub_btree_rec(
block = xfs_btree_get_block(cur, 0, &bp);
rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
- trace_xfs_scrub_btree_rec(bs->sc, cur, 0);
+ trace_xchk_btree_rec(bs->sc, cur, 0);
/* If this isn't the first record, are they in order? */
if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 0);
+ xchk_btree_set_corrupt(bs->sc, cur, 0);
bs->firstrec = false;
memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
@@ -160,7 +160,7 @@ xfs_scrub_btree_rec(
keyblock = xfs_btree_get_block(cur, 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+ xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return;
@@ -169,7 +169,7 @@ xfs_scrub_btree_rec(
cur->bc_ops->init_high_key_from_rec(&hkey, rec);
keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+ xchk_btree_set_corrupt(bs->sc, cur, 1);
}
/*
@@ -177,8 +177,8 @@ xfs_scrub_btree_rec(
* keys.
*/
STATIC void
-xfs_scrub_btree_key(
- struct xfs_scrub_btree *bs,
+xchk_btree_key(
+ struct xchk_btree *bs,
int level)
{
struct xfs_btree_cur *cur = bs->cur;
@@ -191,12 +191,12 @@ xfs_scrub_btree_key(
block = xfs_btree_get_block(cur, level, &bp);
key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
- trace_xfs_scrub_btree_key(bs->sc, cur, level);
+ trace_xchk_btree_key(bs->sc, cur, level);
/* If this isn't the first key, are they in order? */
if (!bs->firstkey[level] &&
!cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key))
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
bs->firstkey[level] = false;
memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len);
@@ -207,7 +207,7 @@ xfs_scrub_btree_key(
keyblock = xfs_btree_get_block(cur, level + 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return;
@@ -216,7 +216,7 @@ xfs_scrub_btree_key(
key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
}
/*
@@ -224,12 +224,12 @@ xfs_scrub_btree_key(
* Callers do not need to set the corrupt flag.
*/
static bool
-xfs_scrub_btree_ptr_ok(
- struct xfs_scrub_btree *bs,
- int level,
- union xfs_btree_ptr *ptr)
+xchk_btree_ptr_ok(
+ struct xchk_btree *bs,
+ int level,
+ union xfs_btree_ptr *ptr)
{
- bool res;
+ bool res;
/* A btree rooted in an inode has no block pointer to the root. */
if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
@@ -242,29 +242,29 @@ xfs_scrub_btree_ptr_ok(
else
res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
if (!res)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return res;
}
/* Check that a btree block's sibling matches what we expect it. */
STATIC int
-xfs_scrub_btree_block_check_sibling(
- struct xfs_scrub_btree *bs,
- int level,
- int direction,
- union xfs_btree_ptr *sibling)
+xchk_btree_block_check_sibling(
+ struct xchk_btree *bs,
+ int level,
+ int direction,
+ union xfs_btree_ptr *sibling)
{
- struct xfs_btree_cur *cur = bs->cur;
- struct xfs_btree_block *pblock;
- struct xfs_buf *pbp;
- struct xfs_btree_cur *ncur = NULL;
- union xfs_btree_ptr *pp;
- int success;
- int error;
+ struct xfs_btree_cur *cur = bs->cur;
+ struct xfs_btree_block *pblock;
+ struct xfs_buf *pbp;
+ struct xfs_btree_cur *ncur = NULL;
+ union xfs_btree_ptr *pp;
+ int success;
+ int error;
error = xfs_btree_dup_cursor(cur, &ncur);
- if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error) ||
+ if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error) ||
!ncur)
return error;
@@ -278,7 +278,7 @@ xfs_scrub_btree_block_check_sibling(
else
error = xfs_btree_decrement(ncur, level + 1, &success);
if (error == 0 && success)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
error = 0;
goto out;
}
@@ -288,23 +288,23 @@ xfs_scrub_btree_block_check_sibling(
error = xfs_btree_increment(ncur, level + 1, &success);
else
error = xfs_btree_decrement(ncur, level + 1, &success);
- if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error))
+ if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error))
goto out;
if (!success) {
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level + 1);
+ xchk_btree_set_corrupt(bs->sc, cur, level + 1);
goto out;
}
/* Compare upper level pointer to sibling pointer. */
pblock = xfs_btree_get_block(ncur, level + 1, &pbp);
pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock);
- if (!xfs_scrub_btree_ptr_ok(bs, level + 1, pp))
+ if (!xchk_btree_ptr_ok(bs, level + 1, pp))
goto out;
if (pbp)
- xfs_scrub_buffer_recheck(bs->sc, pbp);
+ xchk_buffer_recheck(bs->sc, pbp);
if (xfs_btree_diff_two_ptrs(cur, pp, sibling))
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
out:
xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR);
return error;
@@ -312,15 +312,15 @@ out:
/* Check the siblings of a btree block. */
STATIC int
-xfs_scrub_btree_block_check_siblings(
- struct xfs_scrub_btree *bs,
- struct xfs_btree_block *block)
+xchk_btree_block_check_siblings(
+ struct xchk_btree *bs,
+ struct xfs_btree_block *block)
{
- struct xfs_btree_cur *cur = bs->cur;
- union xfs_btree_ptr leftsib;
- union xfs_btree_ptr rightsib;
- int level;
- int error = 0;
+ struct xfs_btree_cur *cur = bs->cur;
+ union xfs_btree_ptr leftsib;
+ union xfs_btree_ptr rightsib;
+ int level;
+ int error = 0;
xfs_btree_get_sibling(cur, block, &leftsib, XFS_BB_LEFTSIB);
xfs_btree_get_sibling(cur, block, &rightsib, XFS_BB_RIGHTSIB);
@@ -330,7 +330,7 @@ xfs_scrub_btree_block_check_siblings(
if (level == cur->bc_nlevels - 1) {
if (!xfs_btree_ptr_is_null(cur, &leftsib) ||
!xfs_btree_ptr_is_null(cur, &rightsib))
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
goto out;
}
@@ -339,10 +339,10 @@ xfs_scrub_btree_block_check_siblings(
* parent level pointers?
* (These function absorbs error codes for us.)
*/
- error = xfs_scrub_btree_block_check_sibling(bs, level, -1, &leftsib);
+ error = xchk_btree_block_check_sibling(bs, level, -1, &leftsib);
if (error)
return error;
- error = xfs_scrub_btree_block_check_sibling(bs, level, 1, &rightsib);
+ error = xchk_btree_block_check_sibling(bs, level, 1, &rightsib);
if (error)
return error;
out:
@@ -360,16 +360,16 @@ struct check_owner {
* an rmap record for it.
*/
STATIC int
-xfs_scrub_btree_check_block_owner(
- struct xfs_scrub_btree *bs,
- int level,
- xfs_daddr_t daddr)
+xchk_btree_check_block_owner(
+ struct xchk_btree *bs,
+ int level,
+ xfs_daddr_t daddr)
{
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_btnum_t btnum;
- bool init_sa;
- int error = 0;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_btnum_t btnum;
+ bool init_sa;
+ int error = 0;
if (!bs->cur)
return 0;
@@ -380,13 +380,13 @@ xfs_scrub_btree_check_block_owner(
init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
if (init_sa) {
- error = xfs_scrub_ag_init(bs->sc, agno, &bs->sc->sa);
- if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur,
+ error = xchk_ag_init(bs->sc, agno, &bs->sc->sa);
+ if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
level, &error))
return error;
}
- xfs_scrub_xref_is_used_space(bs->sc, agbno, 1);
+ xchk_xref_is_used_space(bs->sc, agbno, 1);
/*
* The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we
* have to nullify it (to shut down further block owner checks) if
@@ -395,25 +395,25 @@ xfs_scrub_btree_check_block_owner(
if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
bs->cur = NULL;
- xfs_scrub_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
+ xchk_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP)
bs->cur = NULL;
if (init_sa)
- xfs_scrub_ag_free(bs->sc, &bs->sc->sa);
+ xchk_ag_free(bs->sc, &bs->sc->sa);
return error;
}
/* Check the owner of a btree block. */
STATIC int
-xfs_scrub_btree_check_owner(
- struct xfs_scrub_btree *bs,
- int level,
- struct xfs_buf *bp)
+xchk_btree_check_owner(
+ struct xchk_btree *bs,
+ int level,
+ struct xfs_buf *bp)
{
- struct xfs_btree_cur *cur = bs->cur;
- struct check_owner *co;
+ struct xfs_btree_cur *cur = bs->cur;
+ struct check_owner *co;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
return 0;
@@ -437,7 +437,7 @@ xfs_scrub_btree_check_owner(
return 0;
}
- return xfs_scrub_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
+ return xchk_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
}
/*
@@ -445,8 +445,8 @@ xfs_scrub_btree_check_owner(
* special blocks that don't require that.
*/
STATIC void
-xfs_scrub_btree_check_minrecs(
- struct xfs_scrub_btree *bs,
+xchk_btree_check_minrecs(
+ struct xchk_btree *bs,
int level,
struct xfs_btree_block *block)
{
@@ -475,7 +475,7 @@ xfs_scrub_btree_check_minrecs(
if (level >= ok_level)
return;
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, level);
}
/*
@@ -483,21 +483,21 @@ xfs_scrub_btree_check_minrecs(
* and buffer pointers (if applicable) if they're ok to use.
*/
STATIC int
-xfs_scrub_btree_get_block(
- struct xfs_scrub_btree *bs,
- int level,
- union xfs_btree_ptr *pp,
- struct xfs_btree_block **pblock,
- struct xfs_buf **pbp)
+xchk_btree_get_block(
+ struct xchk_btree *bs,
+ int level,
+ union xfs_btree_ptr *pp,
+ struct xfs_btree_block **pblock,
+ struct xfs_buf **pbp)
{
- void *failed_at;
- int error;
+ xfs_failaddr_t failed_at;
+ int error;
*pblock = NULL;
*pbp = NULL;
error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock);
- if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, level, &error) ||
+ if (!xchk_btree_process_error(bs->sc, bs->cur, level, &error) ||
!*pblock)
return error;
@@ -509,19 +509,19 @@ xfs_scrub_btree_get_block(
failed_at = __xfs_btree_check_sblock(bs->cur, *pblock,
level, *pbp);
if (failed_at) {
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0;
}
if (*pbp)
- xfs_scrub_buffer_recheck(bs->sc, *pbp);
+ xchk_buffer_recheck(bs->sc, *pbp);
- xfs_scrub_btree_check_minrecs(bs, level, *pblock);
+ xchk_btree_check_minrecs(bs, level, *pblock);
/*
* Check the block's owner; this function absorbs error codes
* for us.
*/
- error = xfs_scrub_btree_check_owner(bs, level, *pbp);
+ error = xchk_btree_check_owner(bs, level, *pbp);
if (error)
return error;
@@ -529,7 +529,7 @@ xfs_scrub_btree_get_block(
* Check the block's siblings; this function absorbs error codes
* for us.
*/
- return xfs_scrub_btree_block_check_siblings(bs, *pblock);
+ return xchk_btree_block_check_siblings(bs, *pblock);
}
/*
@@ -537,18 +537,18 @@ xfs_scrub_btree_get_block(
* in the parent block.
*/
STATIC void
-xfs_scrub_btree_block_keys(
- struct xfs_scrub_btree *bs,
- int level,
- struct xfs_btree_block *block)
+xchk_btree_block_keys(
+ struct xchk_btree *bs,
+ int level,
+ struct xfs_btree_block *block)
{
- union xfs_btree_key block_keys;
- struct xfs_btree_cur *cur = bs->cur;
- union xfs_btree_key *high_bk;
- union xfs_btree_key *parent_keys;
- union xfs_btree_key *high_pk;
- struct xfs_btree_block *parent_block;
- struct xfs_buf *bp;
+ union xfs_btree_key block_keys;
+ struct xfs_btree_cur *cur = bs->cur;
+ union xfs_btree_key *high_bk;
+ union xfs_btree_key *parent_keys;
+ union xfs_btree_key *high_pk;
+ struct xfs_btree_block *parent_block;
+ struct xfs_buf *bp;
if (level >= cur->bc_nlevels - 1)
return;
@@ -562,7 +562,7 @@ xfs_scrub_btree_block_keys(
parent_block);
if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+ xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return;
@@ -573,7 +573,7 @@ xfs_scrub_btree_block_keys(
parent_block);
if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+ xchk_btree_set_corrupt(bs->sc, cur, 1);
}
/*
@@ -582,24 +582,24 @@ xfs_scrub_btree_block_keys(
* so that the caller can verify individual records.
*/
int
-xfs_scrub_btree(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- xfs_scrub_btree_rec_fn scrub_fn,
- struct xfs_owner_info *oinfo,
- void *private)
+xchk_btree(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ xchk_btree_rec_fn scrub_fn,
+ struct xfs_owner_info *oinfo,
+ void *private)
{
- struct xfs_scrub_btree bs = { NULL };
- union xfs_btree_ptr ptr;
- union xfs_btree_ptr *pp;
- union xfs_btree_rec *recp;
- struct xfs_btree_block *block;
- int level;
- struct xfs_buf *bp;
- struct check_owner *co;
- struct check_owner *n;
- int i;
- int error = 0;
+ struct xchk_btree bs = { NULL };
+ union xfs_btree_ptr ptr;
+ union xfs_btree_ptr *pp;
+ union xfs_btree_rec *recp;
+ struct xfs_btree_block *block;
+ int level;
+ struct xfs_buf *bp;
+ struct check_owner *co;
+ struct check_owner *n;
+ int i;
+ int error = 0;
/* Initialize scrub state */
bs.cur = cur;
@@ -614,7 +614,7 @@ xfs_scrub_btree(
/* Don't try to check a tree with a height we can't handle. */
if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) {
- xfs_scrub_btree_set_corrupt(sc, cur, 0);
+ xchk_btree_set_corrupt(sc, cur, 0);
goto out;
}
@@ -624,9 +624,9 @@ xfs_scrub_btree(
*/
level = cur->bc_nlevels - 1;
cur->bc_ops->init_ptr_from_cur(cur, &ptr);
- if (!xfs_scrub_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr))
+ if (!xchk_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr))
goto out;
- error = xfs_scrub_btree_get_block(&bs, level, &ptr, &block, &bp);
+ error = xchk_btree_get_block(&bs, level, &ptr, &block, &bp);
if (error || !block)
goto out;
@@ -639,7 +639,7 @@ xfs_scrub_btree(
/* End of leaf, pop back towards the root. */
if (cur->bc_ptrs[level] >
be16_to_cpu(block->bb_numrecs)) {
- xfs_scrub_btree_block_keys(&bs, level, block);
+ xchk_btree_block_keys(&bs, level, block);
if (level < cur->bc_nlevels - 1)
cur->bc_ptrs[level + 1]++;
level++;
@@ -647,14 +647,14 @@ xfs_scrub_btree(
}
/* Records in order for scrub? */
- xfs_scrub_btree_rec(&bs);
+ xchk_btree_rec(&bs);
/* Call out to the record checker. */
recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
error = bs.scrub_rec(&bs, recp);
if (error)
break;
- if (xfs_scrub_should_terminate(sc, &error) ||
+ if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break;
@@ -664,7 +664,7 @@ xfs_scrub_btree(
/* End of node, pop back towards the root. */
if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
- xfs_scrub_btree_block_keys(&bs, level, block);
+ xchk_btree_block_keys(&bs, level, block);
if (level < cur->bc_nlevels - 1)
cur->bc_ptrs[level + 1]++;
level++;
@@ -672,16 +672,16 @@ xfs_scrub_btree(
}
/* Keys in order for scrub? */
- xfs_scrub_btree_key(&bs, level);
+ xchk_btree_key(&bs, level);
/* Drill another level deeper. */
pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
- if (!xfs_scrub_btree_ptr_ok(&bs, level, pp)) {
+ if (!xchk_btree_ptr_ok(&bs, level, pp)) {
cur->bc_ptrs[level]++;
continue;
}
level--;
- error = xfs_scrub_btree_get_block(&bs, level, pp, &block, &bp);
+ error = xchk_btree_get_block(&bs, level, pp, &block, &bp);
if (error || !block)
goto out;
@@ -692,7 +692,7 @@ out:
/* Process deferred owner checks on btree blocks. */
list_for_each_entry_safe(co, n, &bs.to_check, list) {
if (!error && bs.cur)
- error = xfs_scrub_btree_check_block_owner(&bs,
+ error = xchk_btree_check_block_owner(&bs,
co->level, co->daddr);
list_del(&co->list);
kmem_free(co);
diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h
index 956627500f2c..aada763cd006 100644
--- a/fs/xfs/scrub/btree.h
+++ b/fs/xfs/scrub/btree.h
@@ -9,44 +9,43 @@
/* btree scrub */
/* Check for btree operation errors. */
-bool xfs_scrub_btree_process_error(struct xfs_scrub_context *sc,
+bool xchk_btree_process_error(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level, int *error);
/* Check for btree xref operation errors. */
-bool xfs_scrub_btree_xref_process_error(struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur, int level,
- int *error);
+bool xchk_btree_xref_process_error(struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur, int level, int *error);
/* Check for btree corruption. */
-void xfs_scrub_btree_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_btree_set_corrupt(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level);
/* Check for btree xref discrepancies. */
-void xfs_scrub_btree_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_btree_xref_set_corrupt(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level);
-struct xfs_scrub_btree;
-typedef int (*xfs_scrub_btree_rec_fn)(
- struct xfs_scrub_btree *bs,
+struct xchk_btree;
+typedef int (*xchk_btree_rec_fn)(
+ struct xchk_btree *bs,
union xfs_btree_rec *rec);
-struct xfs_scrub_btree {
+struct xchk_btree {
/* caller-provided scrub state */
- struct xfs_scrub_context *sc;
- struct xfs_btree_cur *cur;
- xfs_scrub_btree_rec_fn scrub_rec;
- struct xfs_owner_info *oinfo;
- void *private;
+ struct xfs_scrub *sc;
+ struct xfs_btree_cur *cur;
+ xchk_btree_rec_fn scrub_rec;
+ struct xfs_owner_info *oinfo;
+ void *private;
/* internal scrub state */
- union xfs_btree_rec lastrec;
- bool firstrec;
- union xfs_btree_key lastkey[XFS_BTREE_MAXLEVELS];
- bool firstkey[XFS_BTREE_MAXLEVELS];
- struct list_head to_check;
+ union xfs_btree_rec lastrec;
+ bool firstrec;
+ union xfs_btree_key lastkey[XFS_BTREE_MAXLEVELS];
+ bool firstkey[XFS_BTREE_MAXLEVELS];
+ struct list_head to_check;
};
-int xfs_scrub_btree(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
- xfs_scrub_btree_rec_fn scrub_fn,
- struct xfs_owner_info *oinfo, void *private);
+int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
+ xchk_btree_rec_fn scrub_fn, struct xfs_owner_info *oinfo,
+ void *private);
#endif /* __XFS_SCRUB_BTREE_H__ */
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 70e70c69f83f..346b02abccf7 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -68,20 +68,20 @@
/* Check for operational errors. */
static bool
-__xfs_scrub_process_error(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- int *error,
- __u32 errflag,
- void *ret_ip)
+__xchk_process_error(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
switch (*error) {
case 0:
return true;
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
@@ -90,7 +90,7 @@ __xfs_scrub_process_error(
*error = 0;
/* fall through */
default:
- trace_xfs_scrub_op_error(sc, agno, bno, *error,
+ trace_xchk_op_error(sc, agno, bno, *error,
ret_ip);
break;
}
@@ -98,43 +98,43 @@ __xfs_scrub_process_error(
}
bool
-xfs_scrub_process_error(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- int *error)
+xchk_process_error(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error)
{
- return __xfs_scrub_process_error(sc, agno, bno, error,
+ return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
}
bool
-xfs_scrub_xref_process_error(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- int *error)
+xchk_xref_process_error(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error)
{
- return __xfs_scrub_process_error(sc, agno, bno, error,
+ return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address);
}
/* Check for operational errors for a file offset. */
static bool
-__xfs_scrub_fblock_process_error(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset,
- int *error,
- __u32 errflag,
- void *ret_ip)
+__xchk_fblock_process_error(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
switch (*error) {
case 0:
return true;
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
@@ -143,7 +143,7 @@ __xfs_scrub_fblock_process_error(
*error = 0;
/* fall through */
default:
- trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error,
+ trace_xchk_file_op_error(sc, whichfork, offset, *error,
ret_ip);
break;
}
@@ -151,24 +151,24 @@ __xfs_scrub_fblock_process_error(
}
bool
-xfs_scrub_fblock_process_error(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset,
- int *error)
+xchk_fblock_process_error(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error)
{
- return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
+ return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
}
bool
-xfs_scrub_fblock_xref_process_error(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset,
- int *error)
+xchk_fblock_xref_process_error(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error)
{
- return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
+ return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address);
}
@@ -186,12 +186,12 @@ xfs_scrub_fblock_xref_process_error(
/* Record a block which could be optimized. */
void
-xfs_scrub_block_set_preen(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_block_set_preen(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
- trace_xfs_scrub_block_preen(sc, bp->b_bn, __return_address);
+ trace_xchk_block_preen(sc, bp->b_bn, __return_address);
}
/*
@@ -200,32 +200,32 @@ xfs_scrub_block_set_preen(
* the block location of the inode record itself.
*/
void
-xfs_scrub_ino_set_preen(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_ino_set_preen(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
- trace_xfs_scrub_ino_preen(sc, ino, __return_address);
+ trace_xchk_ino_preen(sc, ino, __return_address);
}
/* Record a corrupt block. */
void
-xfs_scrub_block_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_block_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
+ trace_xchk_block_error(sc, bp->b_bn, __return_address);
}
/* Record a corruption while cross-referencing. */
void
-xfs_scrub_block_xref_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_block_xref_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
- trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
+ trace_xchk_block_error(sc, bp->b_bn, __return_address);
}
/*
@@ -234,44 +234,44 @@ xfs_scrub_block_xref_set_corrupt(
* inode record itself.
*/
void
-xfs_scrub_ino_set_corrupt(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_ino_set_corrupt(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_ino_error(sc, ino, __return_address);
+ trace_xchk_ino_error(sc, ino, __return_address);
}
/* Record a corruption while cross-referencing with an inode. */
void
-xfs_scrub_ino_xref_set_corrupt(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_ino_xref_set_corrupt(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
- trace_xfs_scrub_ino_error(sc, ino, __return_address);
+ trace_xchk_ino_error(sc, ino, __return_address);
}
/* Record corruption in a block indexed by a file fork. */
void
-xfs_scrub_fblock_set_corrupt(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset)
+xchk_fblock_set_corrupt(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
+ trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
}
/* Record a corruption while cross-referencing a fork block. */
void
-xfs_scrub_fblock_xref_set_corrupt(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset)
+xchk_fblock_xref_set_corrupt(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
- trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
+ trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
}
/*
@@ -279,32 +279,32 @@ xfs_scrub_fblock_xref_set_corrupt(
* incorrect.
*/
void
-xfs_scrub_ino_set_warning(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_ino_set_warning(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
- trace_xfs_scrub_ino_warning(sc, ino, __return_address);
+ trace_xchk_ino_warning(sc, ino, __return_address);
}
/* Warn about a block indexed by a file fork that needs review. */
void
-xfs_scrub_fblock_set_warning(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset)
+xchk_fblock_set_warning(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
- trace_xfs_scrub_fblock_warning(sc, whichfork, offset, __return_address);
+ trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
}
/* Signal an incomplete scrub. */
void
-xfs_scrub_set_incomplete(
- struct xfs_scrub_context *sc)
+xchk_set_incomplete(
+ struct xfs_scrub *sc)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
- trace_xfs_scrub_incomplete(sc, __return_address);
+ trace_xchk_incomplete(sc, __return_address);
}
/*
@@ -312,20 +312,20 @@ xfs_scrub_set_incomplete(
* at least according to the reverse mapping data.
*/
-struct xfs_scrub_rmap_ownedby_info {
+struct xchk_rmap_ownedby_info {
struct xfs_owner_info *oinfo;
xfs_filblks_t *blocks;
};
STATIC int
-xfs_scrub_count_rmap_ownedby_irec(
- struct xfs_btree_cur *cur,
- struct xfs_rmap_irec *rec,
- void *priv)
+xchk_count_rmap_ownedby_irec(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv)
{
- struct xfs_scrub_rmap_ownedby_info *sroi = priv;
- bool irec_attr;
- bool oinfo_attr;
+ struct xchk_rmap_ownedby_info *sroi = priv;
+ bool irec_attr;
+ bool oinfo_attr;
irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
@@ -344,19 +344,19 @@ xfs_scrub_count_rmap_ownedby_irec(
* The caller should pass us an rmapbt cursor.
*/
int
-xfs_scrub_count_rmap_ownedby_ag(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- struct xfs_owner_info *oinfo,
- xfs_filblks_t *blocks)
+xchk_count_rmap_ownedby_ag(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t *blocks)
{
- struct xfs_scrub_rmap_ownedby_info sroi;
+ struct xchk_rmap_ownedby_info sroi;
sroi.oinfo = oinfo;
*blocks = 0;
sroi.blocks = blocks;
- return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec,
+ return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
&sroi);
}
@@ -371,8 +371,8 @@ xfs_scrub_count_rmap_ownedby_ag(
/* Decide if we want to return an AG header read failure. */
static inline bool
want_ag_read_header_failure(
- struct xfs_scrub_context *sc,
- unsigned int type)
+ struct xfs_scrub *sc,
+ unsigned int type)
{
/* Return all AG header read failures when scanning btrees. */
if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
@@ -392,20 +392,20 @@ want_ag_read_header_failure(
/*
* Grab all the headers for an AG.
*
- * The headers should be released by xfs_scrub_ag_free, but as a fail
+ * The headers should be released by xchk_ag_free, but as a fail
* safe we attach all the buffers we grab to the scrub transaction so
* they'll all be freed when we cancel it.
*/
int
-xfs_scrub_ag_read_headers(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- struct xfs_buf **agi,
- struct xfs_buf **agf,
- struct xfs_buf **agfl)
-{
- struct xfs_mount *mp = sc->mp;
- int error;
+xchk_ag_read_headers(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ struct xfs_buf **agi,
+ struct xfs_buf **agf,
+ struct xfs_buf **agfl)
+{
+ struct xfs_mount *mp = sc->mp;
+ int error;
error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
@@ -425,8 +425,8 @@ out:
/* Release all the AG btree cursors. */
void
-xfs_scrub_ag_btcur_free(
- struct xfs_scrub_ag *sa)
+xchk_ag_btcur_free(
+ struct xchk_ag *sa)
{
if (sa->refc_cur)
xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
@@ -451,12 +451,12 @@ xfs_scrub_ag_btcur_free(
/* Initialize all the btree cursors for an AG. */
int
-xfs_scrub_ag_btcur_init(
- struct xfs_scrub_context *sc,
- struct xfs_scrub_ag *sa)
+xchk_ag_btcur_init(
+ struct xfs_scrub *sc,
+ struct xchk_ag *sa)
{
- struct xfs_mount *mp = sc->mp;
- xfs_agnumber_t agno = sa->agno;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sa->agno;
if (sa->agf_bp) {
/* Set up a bnobt cursor for cross-referencing. */
@@ -499,7 +499,7 @@ xfs_scrub_ag_btcur_init(
/* Set up a refcountbt cursor for cross-referencing. */
if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb)) {
sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
- sa->agf_bp, agno, NULL);
+ sa->agf_bp, agno);
if (!sa->refc_cur)
goto err;
}
@@ -511,11 +511,11 @@ err:
/* Release the AG header context and btree cursors. */
void
-xfs_scrub_ag_free(
- struct xfs_scrub_context *sc,
- struct xfs_scrub_ag *sa)
+xchk_ag_free(
+ struct xfs_scrub *sc,
+ struct xchk_ag *sa)
{
- xfs_scrub_ag_btcur_free(sa);
+ xchk_ag_btcur_free(sa);
if (sa->agfl_bp) {
xfs_trans_brelse(sc->tp, sa->agfl_bp);
sa->agfl_bp = NULL;
@@ -543,30 +543,30 @@ xfs_scrub_ag_free(
* transaction ourselves.
*/
int
-xfs_scrub_ag_init(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- struct xfs_scrub_ag *sa)
+xchk_ag_init(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ struct xchk_ag *sa)
{
- int error;
+ int error;
sa->agno = agno;
- error = xfs_scrub_ag_read_headers(sc, agno, &sa->agi_bp,
+ error = xchk_ag_read_headers(sc, agno, &sa->agi_bp,
&sa->agf_bp, &sa->agfl_bp);
if (error)
return error;
- return xfs_scrub_ag_btcur_init(sc, sa);
+ return xchk_ag_btcur_init(sc, sa);
}
/*
* Grab the per-ag structure if we haven't already gotten it. Teardown of the
- * xfs_scrub_ag will release it for us.
+ * xchk_ag will release it for us.
*/
void
-xfs_scrub_perag_get(
+xchk_perag_get(
struct xfs_mount *mp,
- struct xfs_scrub_ag *sa)
+ struct xchk_ag *sa)
{
if (!sa->pag)
sa->pag = xfs_perag_get(mp, sa->agno);
@@ -585,9 +585,9 @@ xfs_scrub_perag_get(
* the metadata object.
*/
int
-xfs_scrub_trans_alloc(
- struct xfs_scrub_context *sc,
- uint resblks)
+xchk_trans_alloc(
+ struct xfs_scrub *sc,
+ uint resblks)
{
if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
@@ -598,25 +598,25 @@ xfs_scrub_trans_alloc(
/* Set us up with a transaction and an empty context. */
int
-xfs_scrub_setup_fs(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_fs(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- uint resblks;
+ uint resblks;
- resblks = xfs_repair_calc_ag_resblks(sc);
- return xfs_scrub_trans_alloc(sc, resblks);
+ resblks = xrep_calc_ag_resblks(sc);
+ return xchk_trans_alloc(sc, resblks);
}
/* Set us up with AG headers and btree cursors. */
int
-xfs_scrub_setup_ag_btree(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip,
- bool force_log)
+xchk_setup_ag_btree(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip,
+ bool force_log)
{
- struct xfs_mount *mp = sc->mp;
- int error;
+ struct xfs_mount *mp = sc->mp;
+ int error;
/*
* If the caller asks us to checkpont the log, do so. This
@@ -625,21 +625,21 @@ xfs_scrub_setup_ag_btree(
* document why they need to do so.
*/
if (force_log) {
- error = xfs_scrub_checkpoint_log(mp);
+ error = xchk_checkpoint_log(mp);
if (error)
return error;
}
- error = xfs_scrub_setup_fs(sc, ip);
+ error = xchk_setup_fs(sc, ip);
if (error)
return error;
- return xfs_scrub_ag_init(sc, sc->sm->sm_agno, &sc->sa);
+ return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
}
/* Push everything out of the log onto disk. */
int
-xfs_scrub_checkpoint_log(
+xchk_checkpoint_log(
struct xfs_mount *mp)
{
int error;
@@ -657,14 +657,14 @@ xfs_scrub_checkpoint_log(
* The inode is not locked.
*/
int
-xfs_scrub_get_inode(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip_in)
+xchk_get_inode(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip_in)
{
- struct xfs_imap imap;
- struct xfs_mount *mp = sc->mp;
- struct xfs_inode *ip = NULL;
- int error;
+ struct xfs_imap imap;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip = NULL;
+ int error;
/* We want to scan the inode we already had opened. */
if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
@@ -704,14 +704,14 @@ xfs_scrub_get_inode(
error = -EFSCORRUPTED;
/* fall through */
default:
- trace_xfs_scrub_op_error(sc,
+ trace_xchk_op_error(sc,
XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
error, __return_address);
return error;
}
if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
- iput(VFS_I(ip));
+ xfs_irele(ip);
return -ENOENT;
}
@@ -721,21 +721,21 @@ xfs_scrub_get_inode(
/* Set us up to scrub a file's contents. */
int
-xfs_scrub_setup_inode_contents(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip,
- unsigned int resblks)
+xchk_setup_inode_contents(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip,
+ unsigned int resblks)
{
- int error;
+ int error;
- error = xfs_scrub_get_inode(sc, ip);
+ error = xchk_get_inode(sc, ip);
if (error)
return error;
/* Got the inode, lock it and we're ready to go. */
sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags);
- error = xfs_scrub_trans_alloc(sc, resblks);
+ error = xchk_trans_alloc(sc, resblks);
if (error)
goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL;
@@ -752,13 +752,13 @@ out:
* the cursor and skip the check.
*/
bool
-xfs_scrub_should_check_xref(
- struct xfs_scrub_context *sc,
- int *error,
- struct xfs_btree_cur **curpp)
+xchk_should_check_xref(
+ struct xfs_scrub *sc,
+ int *error,
+ struct xfs_btree_cur **curpp)
{
/* No point in xref if we already know we're corrupt. */
- if (xfs_scrub_skip_xref(sc->sm))
+ if (xchk_skip_xref(sc->sm))
return false;
if (*error == 0)
@@ -775,7 +775,7 @@ xfs_scrub_should_check_xref(
}
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
- trace_xfs_scrub_xref_error(sc, *error, __return_address);
+ trace_xchk_xref_error(sc, *error, __return_address);
/*
* Errors encountered during cross-referencing with another
@@ -787,25 +787,25 @@ xfs_scrub_should_check_xref(
/* Run the structure verifiers on in-memory buffers to detect bad memory. */
void
-xfs_scrub_buffer_recheck(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_buffer_recheck(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
- xfs_failaddr_t fa;
+ xfs_failaddr_t fa;
if (bp->b_ops == NULL) {
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
return;
}
if (bp->b_ops->verify_struct == NULL) {
- xfs_scrub_set_incomplete(sc);
+ xchk_set_incomplete(sc);
return;
}
fa = bp->b_ops->verify_struct(bp);
if (!fa)
return;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_block_error(sc, bp->b_bn, fa);
+ trace_xchk_block_error(sc, bp->b_bn, fa);
}
/*
@@ -813,38 +813,38 @@ xfs_scrub_buffer_recheck(
* pointed to by sc->ip and the ILOCK must be held.
*/
int
-xfs_scrub_metadata_inode_forks(
- struct xfs_scrub_context *sc)
+xchk_metadata_inode_forks(
+ struct xfs_scrub *sc)
{
- __u32 smtype;
- bool shared;
- int error;
+ __u32 smtype;
+ bool shared;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return 0;
/* Metadata inodes don't live on the rt device. */
if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
/* They should never participate in reflink. */
if (xfs_is_reflink_inode(sc->ip)) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
/* They also should never have extended attributes. */
if (xfs_inode_hasattr(sc->ip)) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
/* Invoke the data fork scrubber. */
smtype = sc->sm->sm_type;
sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
- error = xfs_scrub_bmap_data(sc);
+ error = xchk_bmap_data(sc);
sc->sm->sm_type = smtype;
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
@@ -853,11 +853,11 @@ xfs_scrub_metadata_inode_forks(
if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&shared);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
&error))
return error;
if (shared)
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
}
return error;
@@ -871,7 +871,7 @@ xfs_scrub_metadata_inode_forks(
* we can't.
*/
int
-xfs_scrub_ilock_inverted(
+xchk_ilock_inverted(
struct xfs_inode *ip,
uint lock_mode)
{
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 2172bd5361e2..2d4324d12f9a 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -12,8 +12,8 @@
* Note that we're careful not to make any judgements about *error.
*/
static inline bool
-xfs_scrub_should_terminate(
- struct xfs_scrub_context *sc,
+xchk_should_terminate(
+ struct xfs_scrub *sc,
int *error)
{
if (fatal_signal_pending(current)) {
@@ -24,121 +24,118 @@ xfs_scrub_should_terminate(
return false;
}
-int xfs_scrub_trans_alloc(struct xfs_scrub_context *sc, uint resblks);
-bool xfs_scrub_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
+bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int *error);
-bool xfs_scrub_fblock_process_error(struct xfs_scrub_context *sc, int whichfork,
+bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, int *error);
-bool xfs_scrub_xref_process_error(struct xfs_scrub_context *sc,
+bool xchk_xref_process_error(struct xfs_scrub *sc,
xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
-bool xfs_scrub_fblock_xref_process_error(struct xfs_scrub_context *sc,
+bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
int whichfork, xfs_fileoff_t offset, int *error);
-void xfs_scrub_block_set_preen(struct xfs_scrub_context *sc,
+void xchk_block_set_preen(struct xfs_scrub *sc,
struct xfs_buf *bp);
-void xfs_scrub_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino);
+void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
-void xfs_scrub_block_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_block_set_corrupt(struct xfs_scrub *sc,
struct xfs_buf *bp);
-void xfs_scrub_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino);
-void xfs_scrub_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork,
+void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
+void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset);
-void xfs_scrub_block_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
struct xfs_buf *bp);
-void xfs_scrub_ino_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
xfs_ino_t ino);
-void xfs_scrub_fblock_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
int whichfork, xfs_fileoff_t offset);
-void xfs_scrub_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino);
-void xfs_scrub_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
+void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
+void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset);
-void xfs_scrub_set_incomplete(struct xfs_scrub_context *sc);
-int xfs_scrub_checkpoint_log(struct xfs_mount *mp);
+void xchk_set_incomplete(struct xfs_scrub *sc);
+int xchk_checkpoint_log(struct xfs_mount *mp);
/* Are we set up for a cross-referencing check? */
-bool xfs_scrub_should_check_xref(struct xfs_scrub_context *sc, int *error,
+bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
struct xfs_btree_cur **curpp);
/* Setup functions */
-int xfs_scrub_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip);
-int xfs_scrub_setup_ag_allocbt(struct xfs_scrub_context *sc,
+int xchk_setup_fs(struct xfs_scrub *sc, struct xfs_inode *ip);
+int xchk_setup_ag_allocbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_ag_iallocbt(struct xfs_scrub_context *sc,
+int xchk_setup_ag_iallocbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_ag_rmapbt(struct xfs_scrub_context *sc,
+int xchk_setup_ag_rmapbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_ag_refcountbt(struct xfs_scrub_context *sc,
+int xchk_setup_ag_refcountbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_inode(struct xfs_scrub_context *sc,
+int xchk_setup_inode(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_inode_bmap(struct xfs_scrub_context *sc,
+int xchk_setup_inode_bmap(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_inode_bmap_data(struct xfs_scrub_context *sc,
+int xchk_setup_inode_bmap_data(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_directory(struct xfs_scrub_context *sc,
+int xchk_setup_directory(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_xattr(struct xfs_scrub_context *sc,
+int xchk_setup_xattr(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_symlink(struct xfs_scrub_context *sc,
+int xchk_setup_symlink(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_parent(struct xfs_scrub_context *sc,
+int xchk_setup_parent(struct xfs_scrub *sc,
struct xfs_inode *ip);
#ifdef CONFIG_XFS_RT
-int xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip);
+int xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip);
#else
static inline int
-xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip)
+xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip)
{
return -ENOENT;
}
#endif
#ifdef CONFIG_XFS_QUOTA
-int xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip);
+int xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip);
#else
static inline int
-xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip)
+xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip)
{
return -ENOENT;
}
#endif
-void xfs_scrub_ag_free(struct xfs_scrub_context *sc, struct xfs_scrub_ag *sa);
-int xfs_scrub_ag_init(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
- struct xfs_scrub_ag *sa);
-void xfs_scrub_perag_get(struct xfs_mount *mp, struct xfs_scrub_ag *sa);
-int xfs_scrub_ag_read_headers(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
- struct xfs_buf **agi, struct xfs_buf **agf,
- struct xfs_buf **agfl);
-void xfs_scrub_ag_btcur_free(struct xfs_scrub_ag *sa);
-int xfs_scrub_ag_btcur_init(struct xfs_scrub_context *sc,
- struct xfs_scrub_ag *sa);
-int xfs_scrub_count_rmap_ownedby_ag(struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- struct xfs_owner_info *oinfo,
- xfs_filblks_t *blocks);
+void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
+int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
+ struct xchk_ag *sa);
+void xchk_perag_get(struct xfs_mount *mp, struct xchk_ag *sa);
+int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
+ struct xfs_buf **agi, struct xfs_buf **agf,
+ struct xfs_buf **agfl);
+void xchk_ag_btcur_free(struct xchk_ag *sa);
+int xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
+int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
+ struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
-int xfs_scrub_setup_ag_btree(struct xfs_scrub_context *sc,
- struct xfs_inode *ip, bool force_log);
-int xfs_scrub_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in);
-int xfs_scrub_setup_inode_contents(struct xfs_scrub_context *sc,
- struct xfs_inode *ip, unsigned int resblks);
-void xfs_scrub_buffer_recheck(struct xfs_scrub_context *sc, struct xfs_buf *bp);
+int xchk_setup_ag_btree(struct xfs_scrub *sc, struct xfs_inode *ip,
+ bool force_log);
+int xchk_get_inode(struct xfs_scrub *sc, struct xfs_inode *ip_in);
+int xchk_setup_inode_contents(struct xfs_scrub *sc, struct xfs_inode *ip,
+ unsigned int resblks);
+void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
/*
* Don't bother cross-referencing if we already found corruption or cross
* referencing discrepancies.
*/
-static inline bool xfs_scrub_skip_xref(struct xfs_scrub_metadata *sm)
+static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
{
return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
XFS_SCRUB_OFLAG_XCORRUPT);
}
-int xfs_scrub_metadata_inode_forks(struct xfs_scrub_context *sc);
-int xfs_scrub_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
+int xchk_metadata_inode_forks(struct xfs_scrub *sc);
+int xchk_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
#endif /* __XFS_SCRUB_COMMON_H__ */
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index d700c4d4d4ef..f1260b4bfdee 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -35,12 +35,12 @@
* operational errors in common.c.
*/
bool
-xfs_scrub_da_process_error(
- struct xfs_scrub_da_btree *ds,
- int level,
- int *error)
+xchk_da_process_error(
+ struct xchk_da_btree *ds,
+ int level,
+ int *error)
{
- struct xfs_scrub_context *sc = ds->sc;
+ struct xfs_scrub *sc = ds->sc;
if (*error == 0)
return true;
@@ -48,7 +48,7 @@ xfs_scrub_da_process_error(
switch (*error) {
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
@@ -57,7 +57,7 @@ xfs_scrub_da_process_error(
*error = 0;
/* fall through */
default:
- trace_xfs_scrub_file_op_error(sc, ds->dargs.whichfork,
+ trace_xchk_file_op_error(sc, ds->dargs.whichfork,
xfs_dir2_da_to_db(ds->dargs.geo,
ds->state->path.blk[level].blkno),
*error, __return_address);
@@ -71,15 +71,15 @@ xfs_scrub_da_process_error(
* operational errors in common.c.
*/
void
-xfs_scrub_da_set_corrupt(
- struct xfs_scrub_da_btree *ds,
- int level)
+xchk_da_set_corrupt(
+ struct xchk_da_btree *ds,
+ int level)
{
- struct xfs_scrub_context *sc = ds->sc;
+ struct xfs_scrub *sc = ds->sc;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_fblock_error(sc, ds->dargs.whichfork,
+ trace_xchk_fblock_error(sc, ds->dargs.whichfork,
xfs_dir2_da_to_db(ds->dargs.geo,
ds->state->path.blk[level].blkno),
__return_address);
@@ -87,14 +87,14 @@ xfs_scrub_da_set_corrupt(
/* Find an entry at a certain level in a da btree. */
STATIC void *
-xfs_scrub_da_btree_entry(
- struct xfs_scrub_da_btree *ds,
- int level,
- int rec)
+xchk_da_btree_entry(
+ struct xchk_da_btree *ds,
+ int level,
+ int rec)
{
- char *ents;
- struct xfs_da_state_blk *blk;
- void *baddr;
+ char *ents;
+ struct xfs_da_state_blk *blk;
+ void *baddr;
/* Dispatch the entry finding function. */
blk = &ds->state->path.blk[level];
@@ -123,8 +123,8 @@ xfs_scrub_da_btree_entry(
/* Scrub a da btree hash (key). */
int
-xfs_scrub_da_btree_hash(
- struct xfs_scrub_da_btree *ds,
+xchk_da_btree_hash(
+ struct xchk_da_btree *ds,
int level,
__be32 *hashp)
{
@@ -136,7 +136,7 @@ xfs_scrub_da_btree_hash(
/* Is this hash in order? */
hash = be32_to_cpu(*hashp);
if (hash < ds->hashes[level])
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
ds->hashes[level] = hash;
if (level == 0)
@@ -144,10 +144,10 @@ xfs_scrub_da_btree_hash(
/* Is this hash no larger than the parent hash? */
blks = ds->state->path.blk;
- entry = xfs_scrub_da_btree_entry(ds, level - 1, blks[level - 1].index);
+ entry = xchk_da_btree_entry(ds, level - 1, blks[level - 1].index);
parent_hash = be32_to_cpu(entry->hashval);
if (parent_hash < hash)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return 0;
}
@@ -157,13 +157,13 @@ xfs_scrub_da_btree_hash(
* pointer.
*/
STATIC bool
-xfs_scrub_da_btree_ptr_ok(
- struct xfs_scrub_da_btree *ds,
- int level,
- xfs_dablk_t blkno)
+xchk_da_btree_ptr_ok(
+ struct xchk_da_btree *ds,
+ int level,
+ xfs_dablk_t blkno)
{
if (blkno < ds->lowest || (ds->highest != 0 && blkno >= ds->highest)) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return false;
}
@@ -176,7 +176,7 @@ xfs_scrub_da_btree_ptr_ok(
* leaf1, we must multiplex the verifiers.
*/
static void
-xfs_scrub_da_btree_read_verify(
+xchk_da_btree_read_verify(
struct xfs_buf *bp)
{
struct xfs_da_blkinfo *info = bp->b_addr;
@@ -198,7 +198,7 @@ xfs_scrub_da_btree_read_verify(
}
}
static void
-xfs_scrub_da_btree_write_verify(
+xchk_da_btree_write_verify(
struct xfs_buf *bp)
{
struct xfs_da_blkinfo *info = bp->b_addr;
@@ -220,7 +220,7 @@ xfs_scrub_da_btree_write_verify(
}
}
static void *
-xfs_scrub_da_btree_verify(
+xchk_da_btree_verify(
struct xfs_buf *bp)
{
struct xfs_da_blkinfo *info = bp->b_addr;
@@ -236,23 +236,23 @@ xfs_scrub_da_btree_verify(
}
}
-static const struct xfs_buf_ops xfs_scrub_da_btree_buf_ops = {
- .name = "xfs_scrub_da_btree",
- .verify_read = xfs_scrub_da_btree_read_verify,
- .verify_write = xfs_scrub_da_btree_write_verify,
- .verify_struct = xfs_scrub_da_btree_verify,
+static const struct xfs_buf_ops xchk_da_btree_buf_ops = {
+ .name = "xchk_da_btree",
+ .verify_read = xchk_da_btree_read_verify,
+ .verify_write = xchk_da_btree_write_verify,
+ .verify_struct = xchk_da_btree_verify,
};
/* Check a block's sibling. */
STATIC int
-xfs_scrub_da_btree_block_check_sibling(
- struct xfs_scrub_da_btree *ds,
- int level,
- int direction,
- xfs_dablk_t sibling)
+xchk_da_btree_block_check_sibling(
+ struct xchk_da_btree *ds,
+ int level,
+ int direction,
+ xfs_dablk_t sibling)
{
- int retval;
- int error;
+ int retval;
+ int error;
memcpy(&ds->state->altpath, &ds->state->path,
sizeof(ds->state->altpath));
@@ -265,7 +265,7 @@ xfs_scrub_da_btree_block_check_sibling(
error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
direction, false, &retval);
if (error == 0 && retval == 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
error = 0;
goto out;
}
@@ -273,19 +273,19 @@ xfs_scrub_da_btree_block_check_sibling(
/* Move the alternate cursor one block in the direction given. */
error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
direction, false, &retval);
- if (!xfs_scrub_da_process_error(ds, level, &error))
+ if (!xchk_da_process_error(ds, level, &error))
return error;
if (retval) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return error;
}
if (ds->state->altpath.blk[level].bp)
- xfs_scrub_buffer_recheck(ds->sc,
+ xchk_buffer_recheck(ds->sc,
ds->state->altpath.blk[level].bp);
/* Compare upper level pointer to sibling pointer. */
if (ds->state->altpath.blk[level].blkno != sibling)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp);
out:
return error;
@@ -293,14 +293,14 @@ out:
/* Check a block's sibling pointers. */
STATIC int
-xfs_scrub_da_btree_block_check_siblings(
- struct xfs_scrub_da_btree *ds,
- int level,
- struct xfs_da_blkinfo *hdr)
+xchk_da_btree_block_check_siblings(
+ struct xchk_da_btree *ds,
+ int level,
+ struct xfs_da_blkinfo *hdr)
{
- xfs_dablk_t forw;
- xfs_dablk_t back;
- int error = 0;
+ xfs_dablk_t forw;
+ xfs_dablk_t back;
+ int error = 0;
forw = be32_to_cpu(hdr->forw);
back = be32_to_cpu(hdr->back);
@@ -308,7 +308,7 @@ xfs_scrub_da_btree_block_check_siblings(
/* Top level blocks should not have sibling pointers. */
if (level == 0) {
if (forw != 0 || back != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return 0;
}
@@ -316,10 +316,10 @@ xfs_scrub_da_btree_block_check_siblings(
* Check back (left) and forw (right) pointers. These functions
* absorb error codes for us.
*/
- error = xfs_scrub_da_btree_block_check_sibling(ds, level, 0, back);
+ error = xchk_da_btree_block_check_sibling(ds, level, 0, back);
if (error)
goto out;
- error = xfs_scrub_da_btree_block_check_sibling(ds, level, 1, forw);
+ error = xchk_da_btree_block_check_sibling(ds, level, 1, forw);
out:
memset(&ds->state->altpath, 0, sizeof(ds->state->altpath));
@@ -328,8 +328,8 @@ out:
/* Load a dir/attribute block from a btree. */
STATIC int
-xfs_scrub_da_btree_block(
- struct xfs_scrub_da_btree *ds,
+xchk_da_btree_block(
+ struct xchk_da_btree *ds,
int level,
xfs_dablk_t blkno)
{
@@ -355,17 +355,17 @@ xfs_scrub_da_btree_block(
/* Check the pointer. */
blk->blkno = blkno;
- if (!xfs_scrub_da_btree_ptr_ok(ds, level, blkno))
+ if (!xchk_da_btree_ptr_ok(ds, level, blkno))
goto out_nobuf;
/* Read the buffer. */
error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, -2,
&blk->bp, dargs->whichfork,
- &xfs_scrub_da_btree_buf_ops);
- if (!xfs_scrub_da_process_error(ds, level, &error))
+ &xchk_da_btree_buf_ops);
+ if (!xchk_da_process_error(ds, level, &error))
goto out_nobuf;
if (blk->bp)
- xfs_scrub_buffer_recheck(ds->sc, blk->bp);
+ xchk_buffer_recheck(ds->sc, blk->bp);
/*
* We didn't find a dir btree root block, which means that
@@ -378,7 +378,7 @@ xfs_scrub_da_btree_block(
/* It's /not/ ok for attr trees not to have a da btree. */
if (blk->bp == NULL) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out_nobuf;
}
@@ -388,17 +388,17 @@ xfs_scrub_da_btree_block(
/* We only started zeroing the header on v5 filesystems. */
if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb) && hdr3->hdr.pad)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
/* Check the owner. */
if (xfs_sb_version_hascrc(&ip->i_mount->m_sb)) {
owner = be64_to_cpu(hdr3->owner);
if (owner != ip->i_ino)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
}
/* Check the siblings. */
- error = xfs_scrub_da_btree_block_check_siblings(ds, level, &hdr3->hdr);
+ error = xchk_da_btree_block_check_siblings(ds, level, &hdr3->hdr);
if (error)
goto out;
@@ -411,7 +411,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_ATTR_LEAF_MAGIC;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, pmaxrecs);
if (ds->tree_level != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
break;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
@@ -420,7 +420,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_DIR2_LEAFN_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
if (ds->tree_level != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
break;
case XFS_DIR2_LEAF1_MAGIC:
case XFS_DIR3_LEAF1_MAGIC:
@@ -429,7 +429,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_DIR2_LEAF1_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
if (ds->tree_level != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
break;
case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC:
@@ -443,13 +443,13 @@ xfs_scrub_da_btree_block(
blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval);
if (level == 0) {
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out_freebp;
}
ds->tree_level = nodehdr.level;
} else {
if (ds->tree_level != nodehdr.level) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out_freebp;
}
}
@@ -457,7 +457,7 @@ xfs_scrub_da_btree_block(
/* XXX: Check hdr3.pad32 once we know how to fix it. */
break;
default:
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out_freebp;
}
@@ -473,13 +473,13 @@ out_nobuf:
/* Visit all nodes and leaves of a da btree. */
int
-xfs_scrub_da_btree(
- struct xfs_scrub_context *sc,
+xchk_da_btree(
+ struct xfs_scrub *sc,
int whichfork,
- xfs_scrub_da_btree_rec_fn scrub_fn,
+ xchk_da_btree_rec_fn scrub_fn,
void *private)
{
- struct xfs_scrub_da_btree ds = {};
+ struct xchk_da_btree ds = {};
struct xfs_mount *mp = sc->mp;
struct xfs_da_state_blk *blks;
struct xfs_da_node_entry *key;
@@ -517,7 +517,7 @@ xfs_scrub_da_btree(
/* Find the root of the da tree, if present. */
blks = ds.state->path.blk;
- error = xfs_scrub_da_btree_block(&ds, level, blkno);
+ error = xchk_da_btree_block(&ds, level, blkno);
if (error)
goto out_state;
/*
@@ -542,12 +542,12 @@ xfs_scrub_da_btree(
}
/* Dispatch record scrubbing. */
- rec = xfs_scrub_da_btree_entry(&ds, level,
+ rec = xchk_da_btree_entry(&ds, level,
blks[level].index);
error = scrub_fn(&ds, level, rec);
if (error)
break;
- if (xfs_scrub_should_terminate(sc, &error) ||
+ if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break;
@@ -566,8 +566,8 @@ xfs_scrub_da_btree(
}
/* Hashes in order for scrub? */
- key = xfs_scrub_da_btree_entry(&ds, level, blks[level].index);
- error = xfs_scrub_da_btree_hash(&ds, level, &key->hashval);
+ key = xchk_da_btree_entry(&ds, level, blks[level].index);
+ error = xchk_da_btree_hash(&ds, level, &key->hashval);
if (error)
goto out;
@@ -575,7 +575,7 @@ xfs_scrub_da_btree(
blkno = be32_to_cpu(key->before);
level++;
ds.tree_level--;
- error = xfs_scrub_da_btree_block(&ds, level, blkno);
+ error = xchk_da_btree_block(&ds, level, blkno);
if (error)
goto out;
if (blks[level].bp == NULL)
diff --git a/fs/xfs/scrub/dabtree.h b/fs/xfs/scrub/dabtree.h
index 365f9f0019e6..cb3f0003245b 100644
--- a/fs/xfs/scrub/dabtree.h
+++ b/fs/xfs/scrub/dabtree.h
@@ -8,13 +8,13 @@
/* dir/attr btree */
-struct xfs_scrub_da_btree {
- struct xfs_da_args dargs;
- xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH];
- int maxrecs[XFS_DA_NODE_MAXDEPTH];
- struct xfs_da_state *state;
- struct xfs_scrub_context *sc;
- void *private;
+struct xchk_da_btree {
+ struct xfs_da_args dargs;
+ xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH];
+ int maxrecs[XFS_DA_NODE_MAXDEPTH];
+ struct xfs_da_state *state;
+ struct xfs_scrub *sc;
+ void *private;
/*
* Lowest and highest directory block address in which we expect
@@ -22,24 +22,23 @@ struct xfs_scrub_da_btree {
* (presumably) means between LEAF_OFFSET and FREE_OFFSET; for
* attributes there is no limit.
*/
- xfs_dablk_t lowest;
- xfs_dablk_t highest;
+ xfs_dablk_t lowest;
+ xfs_dablk_t highest;
- int tree_level;
+ int tree_level;
};
-typedef int (*xfs_scrub_da_btree_rec_fn)(struct xfs_scrub_da_btree *ds,
+typedef int (*xchk_da_btree_rec_fn)(struct xchk_da_btree *ds,
int level, void *rec);
/* Check for da btree operation errors. */
-bool xfs_scrub_da_process_error(struct xfs_scrub_da_btree *ds, int level, int *error);
+bool xchk_da_process_error(struct xchk_da_btree *ds, int level, int *error);
/* Check for da btree corruption. */
-void xfs_scrub_da_set_corrupt(struct xfs_scrub_da_btree *ds, int level);
+void xchk_da_set_corrupt(struct xchk_da_btree *ds, int level);
-int xfs_scrub_da_btree_hash(struct xfs_scrub_da_btree *ds, int level,
- __be32 *hashp);
-int xfs_scrub_da_btree(struct xfs_scrub_context *sc, int whichfork,
- xfs_scrub_da_btree_rec_fn scrub_fn, void *private);
+int xchk_da_btree_hash(struct xchk_da_btree *ds, int level, __be32 *hashp);
+int xchk_da_btree(struct xfs_scrub *sc, int whichfork,
+ xchk_da_btree_rec_fn scrub_fn, void *private);
#endif /* __XFS_SCRUB_DABTREE_H__ */
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index 86324775fc9b..cd3e4d768a18 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -31,40 +31,40 @@
/* Set us up to scrub directories. */
int
-xfs_scrub_setup_directory(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_directory(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_inode_contents(sc, ip, 0);
+ return xchk_setup_inode_contents(sc, ip, 0);
}
/* Directories */
/* Scrub a directory entry. */
-struct xfs_scrub_dir_ctx {
+struct xchk_dir_ctx {
/* VFS fill-directory iterator */
- struct dir_context dir_iter;
+ struct dir_context dir_iter;
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
};
/* Check that an inode's mode matches a given DT_ type. */
STATIC int
-xfs_scrub_dir_check_ftype(
- struct xfs_scrub_dir_ctx *sdc,
- xfs_fileoff_t offset,
- xfs_ino_t inum,
- int dtype)
+xchk_dir_check_ftype(
+ struct xchk_dir_ctx *sdc,
+ xfs_fileoff_t offset,
+ xfs_ino_t inum,
+ int dtype)
{
- struct xfs_mount *mp = sdc->sc->mp;
- struct xfs_inode *ip;
- int ino_dtype;
- int error = 0;
+ struct xfs_mount *mp = sdc->sc->mp;
+ struct xfs_inode *ip;
+ int ino_dtype;
+ int error = 0;
if (!xfs_sb_version_hasftype(&mp->m_sb)) {
if (dtype != DT_UNKNOWN && dtype != DT_DIR)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
goto out;
}
@@ -78,7 +78,7 @@ xfs_scrub_dir_check_ftype(
* inodes can trigger immediate inactive cleanup of the inode.
*/
error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip);
- if (!xfs_scrub_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ if (!xchk_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error))
goto out;
@@ -86,8 +86,8 @@ xfs_scrub_dir_check_ftype(
ino_dtype = xfs_dir3_get_dtype(mp,
xfs_mode_to_ftype(VFS_I(ip)->i_mode));
if (ino_dtype != dtype)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
- iput(VFS_I(ip));
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ xfs_irele(ip);
out:
return error;
}
@@ -101,23 +101,23 @@ out:
* we can look up this filename. Finally, we check the ftype.
*/
STATIC int
-xfs_scrub_dir_actor(
- struct dir_context *dir_iter,
- const char *name,
- int namelen,
- loff_t pos,
- u64 ino,
- unsigned type)
+xchk_dir_actor(
+ struct dir_context *dir_iter,
+ const char *name,
+ int namelen,
+ loff_t pos,
+ u64 ino,
+ unsigned type)
{
- struct xfs_mount *mp;
- struct xfs_inode *ip;
- struct xfs_scrub_dir_ctx *sdc;
- struct xfs_name xname;
- xfs_ino_t lookup_ino;
- xfs_dablk_t offset;
- int error = 0;
-
- sdc = container_of(dir_iter, struct xfs_scrub_dir_ctx, dir_iter);
+ struct xfs_mount *mp;
+ struct xfs_inode *ip;
+ struct xchk_dir_ctx *sdc;
+ struct xfs_name xname;
+ xfs_ino_t lookup_ino;
+ xfs_dablk_t offset;
+ int error = 0;
+
+ sdc = container_of(dir_iter, struct xchk_dir_ctx, dir_iter);
ip = sdc->sc->ip;
mp = ip->i_mount;
offset = xfs_dir2_db_to_da(mp->m_dir_geo,
@@ -125,17 +125,17 @@ xfs_scrub_dir_actor(
/* Does this inode number make sense? */
if (!xfs_verify_dir_ino(mp, ino)) {
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
goto out;
}
if (!strncmp(".", name, namelen)) {
/* If this is "." then check that the inum matches the dir. */
if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
if (ino != ip->i_ino)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
} else if (!strncmp("..", name, namelen)) {
/*
@@ -143,10 +143,10 @@ xfs_scrub_dir_actor(
* matches this dir.
*/
if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
if (ip->i_ino == mp->m_sb.sb_rootino && ino != ip->i_ino)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
}
@@ -156,23 +156,23 @@ xfs_scrub_dir_actor(
xname.type = XFS_DIR3_FT_UNKNOWN;
error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
- if (!xfs_scrub_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error))
goto out;
if (lookup_ino != ino) {
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
goto out;
}
/* Verify the file type. This function absorbs error codes. */
- error = xfs_scrub_dir_check_ftype(sdc, offset, lookup_ino, type);
+ error = xchk_dir_check_ftype(sdc, offset, lookup_ino, type);
if (error)
goto out;
out:
/*
* A negative error code returned here is supposed to cause the
* dir_emit caller (xfs_readdir) to abort the directory iteration
- * and return zero to xfs_scrub_directory.
+ * and return zero to xchk_directory.
*/
if (error == 0 && sdc->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return -EFSCORRUPTED;
@@ -181,8 +181,8 @@ out:
/* Scrub a directory btree record. */
STATIC int
-xfs_scrub_dir_rec(
- struct xfs_scrub_da_btree *ds,
+xchk_dir_rec(
+ struct xchk_da_btree *ds,
int level,
void *rec)
{
@@ -203,7 +203,7 @@ xfs_scrub_dir_rec(
int error;
/* Check the hash of the entry. */
- error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval);
+ error = xchk_da_btree_hash(ds, level, &ent->hashval);
if (error)
goto out;
@@ -218,18 +218,18 @@ xfs_scrub_dir_rec(
rec_bno = xfs_dir2_db_to_da(mp->m_dir_geo, db);
if (rec_bno >= mp->m_dir_geo->leafblk) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno, -2, &bp);
- if (!xfs_scrub_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
+ if (!xchk_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
&error))
goto out;
if (!bp) {
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out;
}
- xfs_scrub_buffer_recheck(ds->sc, bp);
+ xchk_buffer_recheck(ds->sc, bp);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out_relse;
@@ -240,7 +240,7 @@ xfs_scrub_dir_rec(
p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr);
endp = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
if (!endp) {
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse;
}
while (p < endp) {
@@ -258,7 +258,7 @@ xfs_scrub_dir_rec(
p += mp->m_dir_inode_ops->data_entsize(dep->namelen);
}
if (p >= endp) {
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse;
}
@@ -267,14 +267,14 @@ xfs_scrub_dir_rec(
hash = be32_to_cpu(ent->hashval);
tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent));
if (!xfs_verify_dir_ino(mp, ino) || tag != off)
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
if (dent->namelen == 0) {
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse;
}
calc_hash = xfs_da_hashname(dent->name, dent->namelen);
if (calc_hash != hash)
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
out_relse:
xfs_trans_brelse(ds->dargs.trans, bp);
@@ -288,8 +288,8 @@ out:
* shortest, and that there aren't any bogus entries.
*/
STATIC void
-xfs_scrub_directory_check_free_entry(
- struct xfs_scrub_context *sc,
+xchk_directory_check_free_entry(
+ struct xfs_scrub *sc,
xfs_dablk_t lblk,
struct xfs_dir2_data_free *bf,
struct xfs_dir2_data_unused *dup)
@@ -308,13 +308,13 @@ xfs_scrub_directory_check_free_entry(
return;
/* Unused entry should be in the bestfrees but wasn't found. */
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
}
/* Check free space info in a directory data block. */
STATIC int
-xfs_scrub_directory_data_bestfree(
- struct xfs_scrub_context *sc,
+xchk_directory_data_bestfree(
+ struct xfs_scrub *sc,
xfs_dablk_t lblk,
bool is_block)
{
@@ -339,15 +339,15 @@ xfs_scrub_directory_data_bestfree(
if (is_block) {
/* dir block format */
if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
error = xfs_dir3_block_read(sc->tp, sc->ip, &bp);
} else {
/* dir data format */
error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, -1, &bp);
}
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
- xfs_scrub_buffer_recheck(sc, bp);
+ xchk_buffer_recheck(sc, bp);
/* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */
@@ -362,7 +362,7 @@ xfs_scrub_directory_data_bestfree(
if (offset == 0)
continue;
if (offset >= mp->m_dir_geo->blksize) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
dup = (struct xfs_dir2_data_unused *)(bp->b_addr + offset);
@@ -372,13 +372,13 @@ xfs_scrub_directory_data_bestfree(
if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) ||
be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) ||
tag != ((char *)dup - (char *)bp->b_addr)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
/* bestfree records should be ordered largest to smallest */
if (smallest_bestfree < be16_to_cpu(dfp->length)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
@@ -400,7 +400,7 @@ xfs_scrub_directory_data_bestfree(
dep = (struct xfs_dir2_data_entry *)ptr;
newlen = d_ops->data_entsize(dep->namelen);
if (newlen <= 0) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
lblk);
goto out_buf;
}
@@ -411,7 +411,7 @@ xfs_scrub_directory_data_bestfree(
/* Spot check this free entry */
tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
if (tag != ((char *)dup - (char *)bp->b_addr)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
@@ -419,14 +419,14 @@ xfs_scrub_directory_data_bestfree(
* Either this entry is a bestfree or it's smaller than
* any of the bestfrees.
*/
- xfs_scrub_directory_check_free_entry(sc, lblk, bf, dup);
+ xchk_directory_check_free_entry(sc, lblk, bf, dup);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out_buf;
/* Move on. */
newlen = be16_to_cpu(dup->length);
if (newlen <= 0) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
ptr += newlen;
@@ -436,11 +436,11 @@ xfs_scrub_directory_data_bestfree(
/* We're required to fill all the space. */
if (ptr != endptr)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
/* Did we see at least as many free slots as there are bestfrees? */
if (nr_frees < nr_bestfrees)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
out_buf:
xfs_trans_brelse(sc->tp, bp);
out:
@@ -454,8 +454,8 @@ out:
* array is in order.
*/
STATIC void
-xfs_scrub_directory_check_freesp(
- struct xfs_scrub_context *sc,
+xchk_directory_check_freesp(
+ struct xfs_scrub *sc,
xfs_dablk_t lblk,
struct xfs_buf *dbp,
unsigned int len)
@@ -465,16 +465,16 @@ xfs_scrub_directory_check_freesp(
dfp = sc->ip->d_ops->data_bestfree_p(dbp->b_addr);
if (len != be16_to_cpu(dfp->length))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
if (len > 0 && be16_to_cpu(dfp->offset) == 0)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
}
/* Check free space info in a directory leaf1 block. */
STATIC int
-xfs_scrub_directory_leaf1_bestfree(
- struct xfs_scrub_context *sc,
+xchk_directory_leaf1_bestfree(
+ struct xfs_scrub *sc,
struct xfs_da_args *args,
xfs_dablk_t lblk)
{
@@ -497,9 +497,9 @@ xfs_scrub_directory_leaf1_bestfree(
/* Read the free space block. */
error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
- xfs_scrub_buffer_recheck(sc, bp);
+ xchk_buffer_recheck(sc, bp);
leaf = bp->b_addr;
d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
@@ -512,7 +512,7 @@ xfs_scrub_directory_leaf1_bestfree(
struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
if (hdr3->pad != cpu_to_be32(0))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
}
/*
@@ -520,19 +520,19 @@ xfs_scrub_directory_leaf1_bestfree(
* blocks that can fit under i_size.
*/
if (bestcount != xfs_dir2_byte_to_db(geo, sc->ip->i_d.di_size)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
/* Is the leaf count even remotely sane? */
if (leafhdr.count > d_ops->leaf_max_ents(geo)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
/* Leaves and bests don't overlap in leaf format. */
if ((char *)&ents[leafhdr.count] > (char *)bestp) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
@@ -540,13 +540,13 @@ xfs_scrub_directory_leaf1_bestfree(
for (i = 0; i < leafhdr.count; i++) {
hash = be32_to_cpu(ents[i].hashval);
if (i > 0 && lasthash > hash)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
lasthash = hash;
if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
if (leafhdr.stale != stale)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
@@ -557,10 +557,10 @@ xfs_scrub_directory_leaf1_bestfree(
continue;
error = xfs_dir3_data_read(sc->tp, sc->ip,
i * args->geo->fsbcount, -1, &dbp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error))
break;
- xfs_scrub_directory_check_freesp(sc, lblk, dbp, best);
+ xchk_directory_check_freesp(sc, lblk, dbp, best);
xfs_trans_brelse(sc->tp, dbp);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
@@ -571,8 +571,8 @@ out:
/* Check free space info in a directory freespace block. */
STATIC int
-xfs_scrub_directory_free_bestfree(
- struct xfs_scrub_context *sc,
+xchk_directory_free_bestfree(
+ struct xfs_scrub *sc,
struct xfs_da_args *args,
xfs_dablk_t lblk)
{
@@ -587,15 +587,15 @@ xfs_scrub_directory_free_bestfree(
/* Read the free space block */
error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
- xfs_scrub_buffer_recheck(sc, bp);
+ xchk_buffer_recheck(sc, bp);
if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
if (hdr3->pad != cpu_to_be32(0))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
}
/* Check all the entries. */
@@ -610,36 +610,36 @@ xfs_scrub_directory_free_bestfree(
error = xfs_dir3_data_read(sc->tp, sc->ip,
(freehdr.firstdb + i) * args->geo->fsbcount,
-1, &dbp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error))
break;
- xfs_scrub_directory_check_freesp(sc, lblk, dbp, best);
+ xchk_directory_check_freesp(sc, lblk, dbp, best);
xfs_trans_brelse(sc->tp, dbp);
}
if (freehdr.nused + stale != freehdr.nvalid)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
out:
return error;
}
/* Check free space information in directories. */
STATIC int
-xfs_scrub_directory_blocks(
- struct xfs_scrub_context *sc)
+xchk_directory_blocks(
+ struct xfs_scrub *sc)
{
- struct xfs_bmbt_irec got;
- struct xfs_da_args args;
- struct xfs_ifork *ifp;
- struct xfs_mount *mp = sc->mp;
- xfs_fileoff_t leaf_lblk;
- xfs_fileoff_t free_lblk;
- xfs_fileoff_t lblk;
- struct xfs_iext_cursor icur;
- xfs_dablk_t dabno;
- bool found;
- int is_block = 0;
- int error;
+ struct xfs_bmbt_irec got;
+ struct xfs_da_args args;
+ struct xfs_ifork *ifp;
+ struct xfs_mount *mp = sc->mp;
+ xfs_fileoff_t leaf_lblk;
+ xfs_fileoff_t free_lblk;
+ xfs_fileoff_t lblk;
+ struct xfs_iext_cursor icur;
+ xfs_dablk_t dabno;
+ bool found;
+ int is_block = 0;
+ int error;
/* Ignore local format directories. */
if (sc->ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
@@ -656,7 +656,7 @@ xfs_scrub_directory_blocks(
args.geo = mp->m_dir_geo;
args.trans = sc->tp;
error = xfs_dir2_isblock(&args, &is_block);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
/* Iterate all the data extents in the directory... */
@@ -666,7 +666,7 @@ xfs_scrub_directory_blocks(
if (is_block &&
(got.br_startoff > 0 ||
got.br_blockcount != args.geo->fsbcount)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
got.br_startoff);
break;
}
@@ -690,7 +690,7 @@ xfs_scrub_directory_blocks(
args.geo->fsbcount);
lblk < got.br_startoff + got.br_blockcount;
lblk += args.geo->fsbcount) {
- error = xfs_scrub_directory_data_bestfree(sc, lblk,
+ error = xchk_directory_data_bestfree(sc, lblk,
is_block);
if (error)
goto out;
@@ -709,10 +709,10 @@ xfs_scrub_directory_blocks(
got.br_blockcount == args.geo->fsbcount &&
!xfs_iext_next_extent(ifp, &icur, &got)) {
if (is_block) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
- error = xfs_scrub_directory_leaf1_bestfree(sc, &args,
+ error = xchk_directory_leaf1_bestfree(sc, &args,
leaf_lblk);
if (error)
goto out;
@@ -731,11 +731,11 @@ xfs_scrub_directory_blocks(
*/
lblk = got.br_startoff;
if (lblk & ~0xFFFFFFFFULL) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
if (is_block) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
@@ -754,7 +754,7 @@ xfs_scrub_directory_blocks(
args.geo->fsbcount);
lblk < got.br_startoff + got.br_blockcount;
lblk += args.geo->fsbcount) {
- error = xfs_scrub_directory_free_bestfree(sc, &args,
+ error = xchk_directory_free_bestfree(sc, &args,
lblk);
if (error)
goto out;
@@ -769,29 +769,29 @@ out:
/* Scrub a whole directory. */
int
-xfs_scrub_directory(
- struct xfs_scrub_context *sc)
+xchk_directory(
+ struct xfs_scrub *sc)
{
- struct xfs_scrub_dir_ctx sdc = {
- .dir_iter.actor = xfs_scrub_dir_actor,
+ struct xchk_dir_ctx sdc = {
+ .dir_iter.actor = xchk_dir_actor,
.dir_iter.pos = 0,
.sc = sc,
};
- size_t bufsize;
- loff_t oldpos;
- int error = 0;
+ size_t bufsize;
+ loff_t oldpos;
+ int error = 0;
if (!S_ISDIR(VFS_I(sc->ip)->i_mode))
return -ENOENT;
/* Plausible size? */
if (sc->ip->i_d.di_size < xfs_dir2_sf_hdr_size(0)) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
goto out;
}
/* Check directory tree structure */
- error = xfs_scrub_da_btree(sc, XFS_DATA_FORK, xfs_scrub_dir_rec, NULL);
+ error = xchk_da_btree(sc, XFS_DATA_FORK, xchk_dir_rec, NULL);
if (error)
return error;
@@ -799,7 +799,7 @@ xfs_scrub_directory(
return error;
/* Check the freespace. */
- error = xfs_scrub_directory_blocks(sc);
+ error = xchk_directory_blocks(sc);
if (error)
return error;
@@ -816,7 +816,7 @@ xfs_scrub_directory(
/*
* Look up every name in this directory by hash.
*
- * Use the xfs_readdir function to call xfs_scrub_dir_actor on
+ * Use the xfs_readdir function to call xchk_dir_actor on
* every directory entry in this directory. In _actor, we check
* the name, inode number, and ftype (if applicable) of the
* entry. xfs_readdir uses the VFS filldir functions to provide
@@ -834,7 +834,7 @@ xfs_scrub_directory(
xfs_iunlock(sc->ip, XFS_ILOCK_EXCL);
while (true) {
error = xfs_readdir(sc->tp, sc->ip, &sdc.dir_iter, bufsize);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
&error))
goto out;
if (oldpos == sdc.dir_iter.pos)
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 13d43d108574..224dba937492 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -35,11 +35,11 @@
* try again after forcing logged inode cores out to disk.
*/
int
-xfs_scrub_setup_ag_iallocbt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_ag_iallocbt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_ag_btree(sc, ip, sc->try_harder);
+ return xchk_setup_ag_btree(sc, ip, sc->try_harder);
}
/* Inode btree scrubber. */
@@ -50,8 +50,8 @@ xfs_scrub_setup_ag_iallocbt(
* we have a record or not depending on freecount.
*/
static inline void
-xfs_scrub_iallocbt_chunk_xref_other(
- struct xfs_scrub_context *sc,
+xchk_iallocbt_chunk_xref_other(
+ struct xfs_scrub *sc,
struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino)
{
@@ -66,17 +66,17 @@ xfs_scrub_iallocbt_chunk_xref_other(
if (!(*pcur))
return;
error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
- if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ if (!xchk_should_check_xref(sc, &error, pcur))
return;
if (((irec->ir_freecount > 0 && !has_irec) ||
(irec->ir_freecount == 0 && has_irec)))
- xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ xchk_btree_xref_set_corrupt(sc, *pcur, 0);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_iallocbt_chunk_xref(
- struct xfs_scrub_context *sc,
+xchk_iallocbt_chunk_xref(
+ struct xfs_scrub *sc,
struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino,
xfs_agblock_t agbno,
@@ -87,17 +87,17 @@ xfs_scrub_iallocbt_chunk_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, len);
- xfs_scrub_iallocbt_chunk_xref_other(sc, irec, agino);
+ xchk_xref_is_used_space(sc, agbno, len);
+ xchk_iallocbt_chunk_xref_other(sc, irec, agino);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
- xfs_scrub_xref_is_owned_by(sc, agbno, len, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, len);
+ xchk_xref_is_owned_by(sc, agbno, len, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, len);
}
/* Is this chunk worth checking? */
STATIC bool
-xfs_scrub_iallocbt_chunk(
- struct xfs_scrub_btree *bs,
+xchk_iallocbt_chunk(
+ struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino,
xfs_extlen_t len)
@@ -110,16 +110,16 @@ xfs_scrub_iallocbt_chunk(
if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- xfs_scrub_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
+ xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
return true;
}
/* Count the number of free inodes. */
static unsigned int
-xfs_scrub_iallocbt_freecount(
+xchk_iallocbt_freecount(
xfs_inofree_t freemask)
{
BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
@@ -128,8 +128,8 @@ xfs_scrub_iallocbt_freecount(
/* Check a particular inode with ir_free. */
STATIC int
-xfs_scrub_iallocbt_check_cluster_freemask(
- struct xfs_scrub_btree *bs,
+xchk_iallocbt_check_cluster_freemask(
+ struct xchk_btree *bs,
xfs_ino_t fsino,
xfs_agino_t chunkino,
xfs_agino_t clusterino,
@@ -143,14 +143,14 @@ xfs_scrub_iallocbt_check_cluster_freemask(
bool inuse;
int error = 0;
- if (xfs_scrub_should_terminate(bs->sc, &error))
+ if (xchk_should_terminate(bs->sc, &error))
return error;
dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize);
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
(dip->di_version >= 3 &&
be64_to_cpu(dip->di_ino) != fsino + clusterino)) {
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out;
}
@@ -175,15 +175,15 @@ xfs_scrub_iallocbt_check_cluster_freemask(
freemask_ok = inode_is_free ^ inuse;
}
if (!freemask_ok)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
out:
return 0;
}
/* Make sure the free mask is consistent with what the inodes think. */
STATIC int
-xfs_scrub_iallocbt_check_freemask(
- struct xfs_scrub_btree *bs,
+xchk_iallocbt_check_freemask(
+ struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec)
{
struct xfs_owner_info oinfo;
@@ -223,18 +223,18 @@ xfs_scrub_iallocbt_check_freemask(
/* The whole cluster must be a hole or not a hole. */
ir_holemask = (irec->ir_holemask & holemask);
if (ir_holemask != holemask && ir_holemask != 0) {
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
continue;
}
/* If any part of this is a hole, skip it. */
if (ir_holemask) {
- xfs_scrub_xref_is_not_owned_by(bs->sc, agbno,
+ xchk_xref_is_not_owned_by(bs->sc, agbno,
blks_per_cluster, &oinfo);
continue;
}
- xfs_scrub_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
+ xchk_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
&oinfo);
/* Grab the inode cluster buffer. */
@@ -245,13 +245,13 @@ xfs_scrub_iallocbt_check_freemask(
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
&dip, &bp, 0, 0);
- if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur, 0,
+ if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
&error))
continue;
/* Which inodes are free? */
for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
- error = xfs_scrub_iallocbt_check_cluster_freemask(bs,
+ error = xchk_iallocbt_check_cluster_freemask(bs,
fsino, chunkino, clusterino, irec, bp);
if (error) {
xfs_trans_brelse(bs->cur->bc_tp, bp);
@@ -267,8 +267,8 @@ xfs_scrub_iallocbt_check_freemask(
/* Scrub an inobt/finobt record. */
STATIC int
-xfs_scrub_iallocbt_rec(
- struct xfs_scrub_btree *bs,
+xchk_iallocbt_rec(
+ struct xchk_btree *bs,
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
@@ -289,18 +289,18 @@ xfs_scrub_iallocbt_rec(
if (irec.ir_count > XFS_INODES_PER_CHUNK ||
irec.ir_freecount > XFS_INODES_PER_CHUNK)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
real_freecount = irec.ir_freecount +
(XFS_INODES_PER_CHUNK - irec.ir_count);
- if (real_freecount != xfs_scrub_iallocbt_freecount(irec.ir_free))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
agino = irec.ir_startino;
/* Record has to be properly aligned within the AG. */
if (!xfs_verify_agino(mp, agno, agino) ||
!xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out;
}
@@ -308,7 +308,7 @@ xfs_scrub_iallocbt_rec(
agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) ||
(agbno & (xfs_icluster_size_fsb(mp) - 1)))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
*inode_blocks += XFS_B_TO_FSB(mp,
irec.ir_count * mp->m_sb.sb_inodesize);
@@ -318,9 +318,9 @@ xfs_scrub_iallocbt_rec(
len = XFS_B_TO_FSB(mp,
XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
if (irec.ir_count != XFS_INODES_PER_CHUNK)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len))
+ if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
goto out;
goto check_freemask;
}
@@ -333,12 +333,12 @@ xfs_scrub_iallocbt_rec(
holes = ~xfs_inobt_irec_to_allocmask(&irec);
if ((holes & irec.ir_free) != holes ||
irec.ir_freecount > irec.ir_count)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
if (holemask & 1)
holecount += XFS_INODES_PER_HOLEMASK_BIT;
- else if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len))
+ else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
break;
holemask >>= 1;
agino += XFS_INODES_PER_HOLEMASK_BIT;
@@ -346,10 +346,10 @@ xfs_scrub_iallocbt_rec(
if (holecount > XFS_INODES_PER_CHUNK ||
holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
check_freemask:
- error = xfs_scrub_iallocbt_check_freemask(bs, &irec);
+ error = xchk_iallocbt_check_freemask(bs, &irec);
if (error)
goto out;
@@ -362,39 +362,39 @@ out:
* Don't bother if we're missing btree cursors, as we're already corrupt.
*/
STATIC void
-xfs_scrub_iallocbt_xref_rmap_btreeblks(
- struct xfs_scrub_context *sc,
- int which)
+xchk_iallocbt_xref_rmap_btreeblks(
+ struct xfs_scrub *sc,
+ int which)
{
- struct xfs_owner_info oinfo;
- xfs_filblks_t blocks;
- xfs_extlen_t inobt_blocks = 0;
- xfs_extlen_t finobt_blocks = 0;
- int error;
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t blocks;
+ xfs_extlen_t inobt_blocks = 0;
+ xfs_extlen_t finobt_blocks = 0;
+ int error;
if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
(xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
- xfs_scrub_skip_xref(sc->sm))
+ xchk_skip_xref(sc->sm))
return;
/* Check that we saw as many inobt blocks as the rmap says. */
error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
- if (!xfs_scrub_process_error(sc, 0, 0, &error))
+ if (!xchk_process_error(sc, 0, 0, &error))
return;
if (sc->sa.fino_cur) {
error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
- if (!xfs_scrub_process_error(sc, 0, 0, &error))
+ if (!xchk_process_error(sc, 0, 0, &error))
return;
}
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
- error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
&blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != inobt_blocks + finobt_blocks)
- xfs_scrub_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
+ xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
}
/*
@@ -402,47 +402,47 @@ xfs_scrub_iallocbt_xref_rmap_btreeblks(
* the rmap says are owned by inodes.
*/
STATIC void
-xfs_scrub_iallocbt_xref_rmap_inodes(
- struct xfs_scrub_context *sc,
- int which,
- xfs_filblks_t inode_blocks)
+xchk_iallocbt_xref_rmap_inodes(
+ struct xfs_scrub *sc,
+ int which,
+ xfs_filblks_t inode_blocks)
{
- struct xfs_owner_info oinfo;
- xfs_filblks_t blocks;
- int error;
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t blocks;
+ int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
/* Check that we saw as many inode blocks as the rmap knows about. */
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
- error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
&blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != inode_blocks)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* Scrub the inode btrees for some AG. */
STATIC int
-xfs_scrub_iallocbt(
- struct xfs_scrub_context *sc,
- xfs_btnum_t which)
+xchk_iallocbt(
+ struct xfs_scrub *sc,
+ xfs_btnum_t which)
{
- struct xfs_btree_cur *cur;
- struct xfs_owner_info oinfo;
- xfs_filblks_t inode_blocks = 0;
- int error;
+ struct xfs_btree_cur *cur;
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t inode_blocks = 0;
+ int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
- error = xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo,
+ error = xchk_btree(sc, cur, xchk_iallocbt_rec, &oinfo,
&inode_blocks);
if (error)
return error;
- xfs_scrub_iallocbt_xref_rmap_btreeblks(sc, which);
+ xchk_iallocbt_xref_rmap_btreeblks(sc, which);
/*
* If we're scrubbing the inode btree, inode_blocks is the number of
@@ -452,64 +452,64 @@ xfs_scrub_iallocbt(
* to inode chunks with free inodes.
*/
if (which == XFS_BTNUM_INO)
- xfs_scrub_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
+ xchk_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
return error;
}
int
-xfs_scrub_inobt(
- struct xfs_scrub_context *sc)
+xchk_inobt(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_iallocbt(sc, XFS_BTNUM_INO);
+ return xchk_iallocbt(sc, XFS_BTNUM_INO);
}
int
-xfs_scrub_finobt(
- struct xfs_scrub_context *sc)
+xchk_finobt(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_iallocbt(sc, XFS_BTNUM_FINO);
+ return xchk_iallocbt(sc, XFS_BTNUM_FINO);
}
/* See if an inode btree has (or doesn't have) an inode chunk record. */
static inline void
-xfs_scrub_xref_inode_check(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len,
- struct xfs_btree_cur **icur,
- bool should_have_inodes)
+xchk_xref_inode_check(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ struct xfs_btree_cur **icur,
+ bool should_have_inodes)
{
- bool has_inodes;
- int error;
+ bool has_inodes;
+ int error;
- if (!(*icur) || xfs_scrub_skip_xref(sc->sm))
+ if (!(*icur) || xchk_skip_xref(sc->sm))
return;
error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
- if (!xfs_scrub_should_check_xref(sc, &error, icur))
+ if (!xchk_should_check_xref(sc, &error, icur))
return;
if (has_inodes != should_have_inodes)
- xfs_scrub_btree_xref_set_corrupt(sc, *icur, 0);
+ xchk_btree_xref_set_corrupt(sc, *icur, 0);
}
/* xref check that the extent is not covered by inodes */
void
-xfs_scrub_xref_is_not_inode_chunk(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_xref_is_not_inode_chunk(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
- xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
+ xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
+ xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
}
/* xref check that the extent is covered by inodes */
void
-xfs_scrub_xref_is_inode_chunk(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_xref_is_inode_chunk(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
+ xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
}
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 7a6208505980..5b3b177c0fc9 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -37,23 +37,23 @@
* the goal.
*/
int
-xfs_scrub_setup_inode(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_inode(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- int error;
+ int error;
/*
* Try to get the inode. If the verifiers fail, we try again
* in raw mode.
*/
- error = xfs_scrub_get_inode(sc, ip);
+ error = xchk_get_inode(sc, ip);
switch (error) {
case 0:
break;
case -EFSCORRUPTED:
case -EFSBADCRC:
- return xfs_scrub_trans_alloc(sc, 0);
+ return xchk_trans_alloc(sc, 0);
default:
return error;
}
@@ -61,7 +61,7 @@ xfs_scrub_setup_inode(
/* Got the inode, lock it and we're ready to go. */
sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags);
- error = xfs_scrub_trans_alloc(sc, 0);
+ error = xchk_trans_alloc(sc, 0);
if (error)
goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL;
@@ -76,19 +76,19 @@ out:
/* Validate di_extsize hint. */
STATIC void
-xfs_scrub_inode_extsize(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino,
- uint16_t mode,
- uint16_t flags)
+xchk_inode_extsize(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags)
{
- xfs_failaddr_t fa;
+ xfs_failaddr_t fa;
fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize),
mode, flags);
if (fa)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/*
@@ -98,33 +98,33 @@ xfs_scrub_inode_extsize(
* These functions must be kept in sync with each other.
*/
STATIC void
-xfs_scrub_inode_cowextsize(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino,
- uint16_t mode,
- uint16_t flags,
- uint64_t flags2)
+xchk_inode_cowextsize(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags,
+ uint64_t flags2)
{
- xfs_failaddr_t fa;
+ xfs_failaddr_t fa;
fa = xfs_inode_validate_cowextsize(sc->mp,
be32_to_cpu(dip->di_cowextsize), mode, flags,
flags2);
if (fa)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/* Make sure the di_flags make sense for the inode. */
STATIC void
-xfs_scrub_inode_flags(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino,
- uint16_t mode,
- uint16_t flags)
+xchk_inode_flags(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags)
{
- struct xfs_mount *mp = sc->mp;
+ struct xfs_mount *mp = sc->mp;
if (flags & ~XFS_DIFLAG_ANY)
goto bad;
@@ -157,20 +157,20 @@ xfs_scrub_inode_flags(
return;
bad:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/* Make sure the di_flags2 make sense for the inode. */
STATIC void
-xfs_scrub_inode_flags2(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino,
- uint16_t mode,
- uint16_t flags,
- uint64_t flags2)
+xchk_inode_flags2(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags,
+ uint64_t flags2)
{
- struct xfs_mount *mp = sc->mp;
+ struct xfs_mount *mp = sc->mp;
if (flags2 & ~XFS_DIFLAG2_ANY)
goto bad;
@@ -200,23 +200,23 @@ xfs_scrub_inode_flags2(
return;
bad:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/* Scrub all the ondisk inode fields. */
STATIC void
-xfs_scrub_dinode(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino)
+xchk_dinode(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino)
{
- struct xfs_mount *mp = sc->mp;
- size_t fork_recs;
- unsigned long long isize;
- uint64_t flags2;
- uint32_t nextents;
- uint16_t flags;
- uint16_t mode;
+ struct xfs_mount *mp = sc->mp;
+ size_t fork_recs;
+ unsigned long long isize;
+ uint64_t flags2;
+ uint32_t nextents;
+ uint16_t flags;
+ uint16_t mode;
flags = be16_to_cpu(dip->di_flags);
if (dip->di_version >= 3)
@@ -237,7 +237,7 @@ xfs_scrub_dinode(
/* mode is recognized */
break;
default:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
}
@@ -248,22 +248,22 @@ xfs_scrub_dinode(
* We autoconvert v1 inodes into v2 inodes on writeout,
* so just mark this inode for preening.
*/
- xfs_scrub_ino_set_preen(sc, ino);
+ xchk_ino_set_preen(sc, ino);
break;
case 2:
case 3:
if (dip->di_onlink != 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (dip->di_mode == 0 && sc->ip)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (dip->di_projid_hi != 0 &&
!xfs_sb_version_hasprojid32bit(&mp->m_sb))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
default:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
return;
}
@@ -273,40 +273,40 @@ xfs_scrub_dinode(
*/
if (dip->di_uid == cpu_to_be32(-1U) ||
dip->di_gid == cpu_to_be32(-1U))
- xfs_scrub_ino_set_warning(sc, ino);
+ xchk_ino_set_warning(sc, ino);
/* di_format */
switch (dip->di_format) {
case XFS_DINODE_FMT_DEV:
if (!S_ISCHR(mode) && !S_ISBLK(mode) &&
!S_ISFIFO(mode) && !S_ISSOCK(mode))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_LOCAL:
if (!S_ISDIR(mode) && !S_ISLNK(mode))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_EXTENTS:
if (!S_ISREG(mode) && !S_ISDIR(mode) && !S_ISLNK(mode))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_BTREE:
if (!S_ISREG(mode) && !S_ISDIR(mode))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_UUID:
default:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
}
/* di_[amc]time.nsec */
if (be32_to_cpu(dip->di_atime.t_nsec) >= NSEC_PER_SEC)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (be32_to_cpu(dip->di_mtime.t_nsec) >= NSEC_PER_SEC)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (be32_to_cpu(dip->di_ctime.t_nsec) >= NSEC_PER_SEC)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/*
* di_size. xfs_dinode_verify checks for things that screw up
@@ -315,19 +315,19 @@ xfs_scrub_dinode(
*/
isize = be64_to_cpu(dip->di_size);
if (isize & (1ULL << 63))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* Devices, fifos, and sockets must have zero size */
if (!S_ISDIR(mode) && !S_ISREG(mode) && !S_ISLNK(mode) && isize != 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* Directories can't be larger than the data section size (32G) */
if (S_ISDIR(mode) && (isize == 0 || isize >= XFS_DIR2_SPACE_SIZE))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* Symlinks can't be larger than SYMLINK_MAXLEN */
if (S_ISLNK(mode) && (isize == 0 || isize >= XFS_SYMLINK_MAXLEN))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/*
* Warn if the running kernel can't handle the kinds of offsets
@@ -336,7 +336,7 @@ xfs_scrub_dinode(
* overly large offsets, flag the inode for admin review.
*/
if (isize >= mp->m_super->s_maxbytes)
- xfs_scrub_ino_set_warning(sc, ino);
+ xchk_ino_set_warning(sc, ino);
/* di_nblocks */
if (flags2 & XFS_DIFLAG2_REFLINK) {
@@ -351,15 +351,15 @@ xfs_scrub_dinode(
*/
if (be64_to_cpu(dip->di_nblocks) >=
mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
} else {
if (be64_to_cpu(dip->di_nblocks) >= mp->m_sb.sb_dblocks)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
- xfs_scrub_inode_flags(sc, dip, ino, mode, flags);
+ xchk_inode_flags(sc, dip, ino, mode, flags);
- xfs_scrub_inode_extsize(sc, dip, ino, mode, flags);
+ xchk_inode_extsize(sc, dip, ino, mode, flags);
/* di_nextents */
nextents = be32_to_cpu(dip->di_nextents);
@@ -367,31 +367,31 @@ xfs_scrub_dinode(
switch (dip->di_format) {
case XFS_DINODE_FMT_EXTENTS:
if (nextents > fork_recs)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_BTREE:
if (nextents <= fork_recs)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
default:
if (nextents != 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
}
/* di_forkoff */
if (XFS_DFORK_APTR(dip) >= (char *)dip + mp->m_sb.sb_inodesize)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (dip->di_anextents != 0 && dip->di_forkoff == 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (dip->di_forkoff == 0 && dip->di_aformat != XFS_DINODE_FMT_EXTENTS)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* di_aformat */
if (dip->di_aformat != XFS_DINODE_FMT_LOCAL &&
dip->di_aformat != XFS_DINODE_FMT_EXTENTS &&
dip->di_aformat != XFS_DINODE_FMT_BTREE)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* di_anextents */
nextents = be16_to_cpu(dip->di_anextents);
@@ -399,22 +399,22 @@ xfs_scrub_dinode(
switch (dip->di_aformat) {
case XFS_DINODE_FMT_EXTENTS:
if (nextents > fork_recs)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_BTREE:
if (nextents <= fork_recs)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
default:
if (nextents != 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
if (dip->di_version >= 3) {
if (be32_to_cpu(dip->di_crtime.t_nsec) >= NSEC_PER_SEC)
- xfs_scrub_ino_set_corrupt(sc, ino);
- xfs_scrub_inode_flags2(sc, dip, ino, mode, flags, flags2);
- xfs_scrub_inode_cowextsize(sc, dip, ino, mode, flags,
+ xchk_ino_set_corrupt(sc, ino);
+ xchk_inode_flags2(sc, dip, ino, mode, flags, flags2);
+ xchk_inode_cowextsize(sc, dip, ino, mode, flags,
flags2);
}
}
@@ -425,8 +425,8 @@ xfs_scrub_dinode(
* IGET_UNTRUSTED, which checks the inobt for us.
*/
static void
-xfs_scrub_inode_xref_finobt(
- struct xfs_scrub_context *sc,
+xchk_inode_xref_finobt(
+ struct xfs_scrub *sc,
xfs_ino_t ino)
{
struct xfs_inobt_rec_incore rec;
@@ -434,7 +434,7 @@ xfs_scrub_inode_xref_finobt(
int has_record;
int error;
- if (!sc->sa.fino_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm))
return;
agino = XFS_INO_TO_AGINO(sc->mp, ino);
@@ -445,12 +445,12 @@ xfs_scrub_inode_xref_finobt(
*/
error = xfs_inobt_lookup(sc->sa.fino_cur, agino, XFS_LOOKUP_LE,
&has_record);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
!has_record)
return;
error = xfs_inobt_get_rec(sc->sa.fino_cur, &rec, &has_record);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
!has_record)
return;
@@ -463,54 +463,54 @@ xfs_scrub_inode_xref_finobt(
return;
if (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0);
}
/* Cross reference the inode fields with the forks. */
STATIC void
-xfs_scrub_inode_xref_bmap(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip)
+xchk_inode_xref_bmap(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip)
{
- xfs_extnum_t nextents;
- xfs_filblks_t count;
- xfs_filblks_t acount;
- int error;
+ xfs_extnum_t nextents;
+ xfs_filblks_t count;
+ xfs_filblks_t acount;
+ int error;
- if (xfs_scrub_skip_xref(sc->sm))
+ if (xchk_skip_xref(sc->sm))
return;
/* Walk all the extents to check nextents/naextents/nblocks. */
error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK,
&nextents, &count);
- if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ if (!xchk_should_check_xref(sc, &error, NULL))
return;
if (nextents < be32_to_cpu(dip->di_nextents))
- xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK,
&nextents, &acount);
- if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ if (!xchk_should_check_xref(sc, &error, NULL))
return;
if (nextents != be16_to_cpu(dip->di_anextents))
- xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
/* Check nblocks against the inode. */
if (count + acount != be64_to_cpu(dip->di_nblocks))
- xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_inode_xref(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino,
- struct xfs_dinode *dip)
+xchk_inode_xref(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino,
+ struct xfs_dinode *dip)
{
- struct xfs_owner_info oinfo;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
@@ -518,18 +518,18 @@ xfs_scrub_inode_xref(
agno = XFS_INO_TO_AGNO(sc->mp, ino);
agbno = XFS_INO_TO_AGBNO(sc->mp, ino);
- error = xfs_scrub_ag_init(sc, agno, &sc->sa);
- if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ error = xchk_ag_init(sc, agno, &sc->sa);
+ if (!xchk_xref_process_error(sc, agno, agbno, &error))
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_inode_xref_finobt(sc, ino);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_inode_xref_finobt(sc, ino);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
- xfs_scrub_inode_xref_bmap(sc, dip);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_inode_xref_bmap(sc, dip);
- xfs_scrub_ag_free(sc, &sc->sa);
+ xchk_ag_free(sc, &sc->sa);
}
/*
@@ -539,35 +539,35 @@ xfs_scrub_inode_xref(
* reflink filesystem.
*/
static void
-xfs_scrub_inode_check_reflink_iflag(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_inode_check_reflink_iflag(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
- struct xfs_mount *mp = sc->mp;
- bool has_shared;
- int error;
+ struct xfs_mount *mp = sc->mp;
+ bool has_shared;
+ int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return;
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&has_shared);
- if (!xfs_scrub_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
+ if (!xchk_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
XFS_INO_TO_AGBNO(mp, ino), &error))
return;
if (xfs_is_reflink_inode(sc->ip) && !has_shared)
- xfs_scrub_ino_set_preen(sc, ino);
+ xchk_ino_set_preen(sc, ino);
else if (!xfs_is_reflink_inode(sc->ip) && has_shared)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/* Scrub an inode. */
int
-xfs_scrub_inode(
- struct xfs_scrub_context *sc)
+xchk_inode(
+ struct xfs_scrub *sc)
{
- struct xfs_dinode di;
- int error = 0;
+ struct xfs_dinode di;
+ int error = 0;
/*
* If sc->ip is NULL, that means that the setup function called
@@ -575,13 +575,13 @@ xfs_scrub_inode(
* and a NULL inode, so flag the corruption error and return.
*/
if (!sc->ip) {
- xfs_scrub_ino_set_corrupt(sc, sc->sm->sm_ino);
+ xchk_ino_set_corrupt(sc, sc->sm->sm_ino);
return 0;
}
/* Scrub the inode core. */
xfs_inode_to_disk(sc->ip, &di, 0);
- xfs_scrub_dinode(sc, &di, sc->ip->i_ino);
+ xchk_dinode(sc, &di, sc->ip->i_ino);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
@@ -591,9 +591,9 @@ xfs_scrub_inode(
* we scrubbed the dinode.
*/
if (S_ISREG(VFS_I(sc->ip)->i_mode))
- xfs_scrub_inode_check_reflink_iflag(sc, sc->ip->i_ino);
+ xchk_inode_check_reflink_iflag(sc, sc->ip->i_ino);
- xfs_scrub_inode_xref(sc, sc->ip->i_ino, &di);
+ xchk_inode_xref(sc, sc->ip->i_ino, &di);
out:
return error;
}
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index e2bda58c32f0..1c9d7c7f64f5 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -27,36 +27,36 @@
/* Set us up to scrub parents. */
int
-xfs_scrub_setup_parent(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_parent(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_inode_contents(sc, ip, 0);
+ return xchk_setup_inode_contents(sc, ip, 0);
}
/* Parent pointers */
/* Look for an entry in a parent pointing to this inode. */
-struct xfs_scrub_parent_ctx {
- struct dir_context dc;
- xfs_ino_t ino;
- xfs_nlink_t nlink;
+struct xchk_parent_ctx {
+ struct dir_context dc;
+ xfs_ino_t ino;
+ xfs_nlink_t nlink;
};
/* Look for a single entry in a directory pointing to an inode. */
STATIC int
-xfs_scrub_parent_actor(
- struct dir_context *dc,
- const char *name,
- int namelen,
- loff_t pos,
- u64 ino,
- unsigned type)
+xchk_parent_actor(
+ struct dir_context *dc,
+ const char *name,
+ int namelen,
+ loff_t pos,
+ u64 ino,
+ unsigned type)
{
- struct xfs_scrub_parent_ctx *spc;
+ struct xchk_parent_ctx *spc;
- spc = container_of(dc, struct xfs_scrub_parent_ctx, dc);
+ spc = container_of(dc, struct xchk_parent_ctx, dc);
if (spc->ino == ino)
spc->nlink++;
return 0;
@@ -64,21 +64,21 @@ xfs_scrub_parent_actor(
/* Count the number of dentries in the parent dir that point to this inode. */
STATIC int
-xfs_scrub_parent_count_parent_dentries(
- struct xfs_scrub_context *sc,
- struct xfs_inode *parent,
- xfs_nlink_t *nlink)
+xchk_parent_count_parent_dentries(
+ struct xfs_scrub *sc,
+ struct xfs_inode *parent,
+ xfs_nlink_t *nlink)
{
- struct xfs_scrub_parent_ctx spc = {
- .dc.actor = xfs_scrub_parent_actor,
+ struct xchk_parent_ctx spc = {
+ .dc.actor = xchk_parent_actor,
.dc.pos = 0,
.ino = sc->ip->i_ino,
.nlink = 0,
};
- size_t bufsize;
- loff_t oldpos;
- uint lock_mode;
- int error = 0;
+ size_t bufsize;
+ loff_t oldpos;
+ uint lock_mode;
+ int error = 0;
/*
* If there are any blocks, read-ahead block 0 as we're almost
@@ -120,16 +120,16 @@ out:
* entry pointing back to the inode being scrubbed.
*/
STATIC int
-xfs_scrub_parent_validate(
- struct xfs_scrub_context *sc,
- xfs_ino_t dnum,
- bool *try_again)
+xchk_parent_validate(
+ struct xfs_scrub *sc,
+ xfs_ino_t dnum,
+ bool *try_again)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_inode *dp = NULL;
- xfs_nlink_t expected_nlink;
- xfs_nlink_t nlink;
- int error = 0;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *dp = NULL;
+ xfs_nlink_t expected_nlink;
+ xfs_nlink_t nlink;
+ int error = 0;
*try_again = false;
@@ -138,7 +138,7 @@ xfs_scrub_parent_validate(
/* '..' must not point to ourselves. */
if (sc->ip->i_ino == dnum) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
@@ -165,13 +165,13 @@ xfs_scrub_parent_validate(
error = xfs_iget(mp, sc->tp, dnum, XFS_IGET_UNTRUSTED, 0, &dp);
if (error == -EINVAL) {
error = -EFSCORRUPTED;
- xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error);
+ xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error);
goto out;
}
- if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_rele;
}
@@ -183,12 +183,12 @@ xfs_scrub_parent_validate(
* the child inodes.
*/
if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) {
- error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
- if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
+ error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
+ if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
&error))
goto out_unlock;
if (nlink != expected_nlink)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_unlock;
}
@@ -200,18 +200,18 @@ xfs_scrub_parent_validate(
*/
xfs_iunlock(sc->ip, sc->ilock_flags);
sc->ilock_flags = 0;
- error = xfs_scrub_ilock_inverted(dp, XFS_IOLOCK_SHARED);
+ error = xchk_ilock_inverted(dp, XFS_IOLOCK_SHARED);
if (error)
goto out_rele;
/* Go looking for our dentry. */
- error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
- if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
+ error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
+ if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_unlock;
/* Drop the parent lock, relock this inode. */
xfs_iunlock(dp, XFS_IOLOCK_SHARED);
- error = xfs_scrub_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL);
+ error = xchk_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL);
if (error)
goto out_rele;
sc->ilock_flags = XFS_IOLOCK_EXCL;
@@ -225,43 +225,43 @@ xfs_scrub_parent_validate(
/* Look up '..' to see if the inode changed. */
error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_rele;
/* Drat, parent changed. Try again! */
if (dnum != dp->i_ino) {
- iput(VFS_I(dp));
+ xfs_irele(dp);
*try_again = true;
return 0;
}
- iput(VFS_I(dp));
+ xfs_irele(dp);
/*
* '..' didn't change, so check that there was only one entry
* for us in the parent.
*/
if (nlink != expected_nlink)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
return error;
out_unlock:
xfs_iunlock(dp, XFS_IOLOCK_SHARED);
out_rele:
- iput(VFS_I(dp));
+ xfs_irele(dp);
out:
return error;
}
/* Scrub a parent pointer. */
int
-xfs_scrub_parent(
- struct xfs_scrub_context *sc)
+xchk_parent(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- xfs_ino_t dnum;
- bool try_again;
- int tries = 0;
- int error = 0;
+ struct xfs_mount *mp = sc->mp;
+ xfs_ino_t dnum;
+ bool try_again;
+ int tries = 0;
+ int error = 0;
/*
* If we're a directory, check that the '..' link points up to
@@ -272,7 +272,7 @@ xfs_scrub_parent(
/* We're not a special inode, are we? */
if (!xfs_verify_dir_ino(mp, sc->ip->i_ino)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
@@ -288,10 +288,10 @@ xfs_scrub_parent(
/* Look up '..' */
error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
if (!xfs_verify_dir_ino(mp, dnum)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
@@ -299,12 +299,12 @@ xfs_scrub_parent(
if (sc->ip == mp->m_rootip) {
if (sc->ip->i_ino != mp->m_sb.sb_rootino ||
sc->ip->i_ino != dnum)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
do {
- error = xfs_scrub_parent_validate(sc, dnum, &try_again);
+ error = xchk_parent_validate(sc, dnum, &try_again);
if (error)
goto out;
} while (try_again && ++tries < 20);
@@ -314,7 +314,7 @@ xfs_scrub_parent(
* incomplete. Userspace can decide if it wants to try again.
*/
if (try_again && tries == 20)
- xfs_scrub_set_incomplete(sc);
+ xchk_set_incomplete(sc);
out:
/*
* If we failed to lock the parent inode even after a retry, just mark
@@ -322,7 +322,7 @@ out:
*/
if (sc->try_harder && error == -EDEADLOCK) {
error = 0;
- xfs_scrub_set_incomplete(sc);
+ xchk_set_incomplete(sc);
}
return error;
}
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 6ff906aa0a3b..782d582d3edd 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -30,8 +30,8 @@
/* Convert a scrub type code to a DQ flag, or return 0 if error. */
static inline uint
-xfs_scrub_quota_to_dqtype(
- struct xfs_scrub_context *sc)
+xchk_quota_to_dqtype(
+ struct xfs_scrub *sc)
{
switch (sc->sm->sm_type) {
case XFS_SCRUB_TYPE_UQUOTA:
@@ -47,24 +47,24 @@ xfs_scrub_quota_to_dqtype(
/* Set us up to scrub a quota. */
int
-xfs_scrub_setup_quota(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_quota(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- uint dqtype;
- int error;
+ uint dqtype;
+ int error;
if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
return -ENOENT;
- dqtype = xfs_scrub_quota_to_dqtype(sc);
+ dqtype = xchk_quota_to_dqtype(sc);
if (dqtype == 0)
return -EINVAL;
sc->has_quotaofflock = true;
mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
if (!xfs_this_quota_on(sc->mp, dqtype))
return -ENOENT;
- error = xfs_scrub_setup_fs(sc, ip);
+ error = xchk_setup_fs(sc, ip);
if (error)
return error;
sc->ip = xfs_quota_inode(sc->mp, dqtype);
@@ -75,35 +75,35 @@ xfs_scrub_setup_quota(
/* Quotas. */
-struct xfs_scrub_quota_info {
- struct xfs_scrub_context *sc;
- xfs_dqid_t last_id;
+struct xchk_quota_info {
+ struct xfs_scrub *sc;
+ xfs_dqid_t last_id;
};
/* Scrub the fields in an individual quota item. */
STATIC int
-xfs_scrub_quota_item(
- struct xfs_dquot *dq,
- uint dqtype,
- void *priv)
+xchk_quota_item(
+ struct xfs_dquot *dq,
+ uint dqtype,
+ void *priv)
{
- struct xfs_scrub_quota_info *sqi = priv;
- struct xfs_scrub_context *sc = sqi->sc;
- struct xfs_mount *mp = sc->mp;
- struct xfs_disk_dquot *d = &dq->q_core;
- struct xfs_quotainfo *qi = mp->m_quotainfo;
- xfs_fileoff_t offset;
- unsigned long long bsoft;
- unsigned long long isoft;
- unsigned long long rsoft;
- unsigned long long bhard;
- unsigned long long ihard;
- unsigned long long rhard;
- unsigned long long bcount;
- unsigned long long icount;
- unsigned long long rcount;
- xfs_ino_t fs_icount;
- xfs_dqid_t id = be32_to_cpu(d->d_id);
+ struct xchk_quota_info *sqi = priv;
+ struct xfs_scrub *sc = sqi->sc;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_disk_dquot *d = &dq->q_core;
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ xfs_fileoff_t offset;
+ unsigned long long bsoft;
+ unsigned long long isoft;
+ unsigned long long rsoft;
+ unsigned long long bhard;
+ unsigned long long ihard;
+ unsigned long long rhard;
+ unsigned long long bcount;
+ unsigned long long icount;
+ unsigned long long rcount;
+ xfs_ino_t fs_icount;
+ xfs_dqid_t id = be32_to_cpu(d->d_id);
/*
* Except for the root dquot, the actual dquot we got must either have
@@ -111,16 +111,16 @@ xfs_scrub_quota_item(
*/
offset = id / qi->qi_dqperchunk;
if (id && id <= sqi->last_id)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
sqi->last_id = id;
/* Did we get the dquot type we wanted? */
if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the limits. */
bhard = be64_to_cpu(d->d_blk_hardlimit);
@@ -140,19 +140,19 @@ xfs_scrub_quota_item(
* the hard limit.
*/
if (bhard > mp->m_sb.sb_dblocks)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (bsoft > bhard)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (ihard > mp->m_maxicount)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (isoft > ihard)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (rhard > mp->m_sb.sb_rblocks)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (rsoft > rhard)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the resource counts. */
bcount = be64_to_cpu(d->d_bcount);
@@ -167,15 +167,15 @@ xfs_scrub_quota_item(
*/
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
if (mp->m_sb.sb_dblocks < bcount)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK,
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK,
offset);
} else {
if (mp->m_sb.sb_dblocks < bcount)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
offset);
}
if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/*
* We can violate the hard limits if the admin suddenly sets a
@@ -183,29 +183,29 @@ xfs_scrub_quota_item(
* admin review.
*/
if (id != 0 && bhard != 0 && bcount > bhard)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (id != 0 && ihard != 0 && icount > ihard)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (id != 0 && rhard != 0 && rcount > rhard)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
return 0;
}
/* Check the quota's data fork. */
STATIC int
-xfs_scrub_quota_data_fork(
- struct xfs_scrub_context *sc)
+xchk_quota_data_fork(
+ struct xfs_scrub *sc)
{
- struct xfs_bmbt_irec irec = { 0 };
- struct xfs_iext_cursor icur;
- struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
- struct xfs_ifork *ifp;
- xfs_fileoff_t max_dqid_off;
- int error = 0;
+ struct xfs_bmbt_irec irec = { 0 };
+ struct xfs_iext_cursor icur;
+ struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
+ struct xfs_ifork *ifp;
+ xfs_fileoff_t max_dqid_off;
+ int error = 0;
/* Invoke the fork scrubber. */
- error = xfs_scrub_metadata_inode_forks(sc);
+ error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
@@ -213,7 +213,7 @@ xfs_scrub_quota_data_fork(
max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
for_each_xfs_iext(ifp, &icur, &irec) {
- if (xfs_scrub_should_terminate(sc, &error))
+ if (xchk_should_terminate(sc, &error))
break;
/*
* delalloc extents or blocks mapped above the highest
@@ -222,7 +222,7 @@ xfs_scrub_quota_data_fork(
if (isnullstartblock(irec.br_startblock) ||
irec.br_startoff > max_dqid_off ||
irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
irec.br_startoff);
break;
}
@@ -233,19 +233,19 @@ xfs_scrub_quota_data_fork(
/* Scrub all of a quota type's items. */
int
-xfs_scrub_quota(
- struct xfs_scrub_context *sc)
+xchk_quota(
+ struct xfs_scrub *sc)
{
- struct xfs_scrub_quota_info sqi;
- struct xfs_mount *mp = sc->mp;
- struct xfs_quotainfo *qi = mp->m_quotainfo;
- uint dqtype;
- int error = 0;
+ struct xchk_quota_info sqi;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ uint dqtype;
+ int error = 0;
- dqtype = xfs_scrub_quota_to_dqtype(sc);
+ dqtype = xchk_quota_to_dqtype(sc);
/* Look for problem extents. */
- error = xfs_scrub_quota_data_fork(sc);
+ error = xchk_quota_data_fork(sc);
if (error)
goto out;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
@@ -260,10 +260,10 @@ xfs_scrub_quota(
sc->ilock_flags = 0;
sqi.sc = sc;
sqi.last_id = 0;
- error = xfs_qm_dqiterate(mp, dqtype, xfs_scrub_quota_item, &sqi);
+ error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
sc->ilock_flags = XFS_ILOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
sqi.last_id * qi->qi_dqperchunk, &error))
goto out;
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 607a9faa8ecc..e8c82b026083 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -28,11 +28,11 @@
* Set us up to scrub reference count btrees.
*/
int
-xfs_scrub_setup_ag_refcountbt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_ag_refcountbt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_ag_btree(sc, ip, false);
+ return xchk_setup_ag_btree(sc, ip, false);
}
/* Reference count btree scrubber. */
@@ -73,22 +73,22 @@ xfs_scrub_setup_ag_refcountbt(
* If the refcount is correct, all the check conditions in the algorithm
* should always hold true. If not, the refcount is incorrect.
*/
-struct xfs_scrub_refcnt_frag {
- struct list_head list;
- struct xfs_rmap_irec rm;
+struct xchk_refcnt_frag {
+ struct list_head list;
+ struct xfs_rmap_irec rm;
};
-struct xfs_scrub_refcnt_check {
- struct xfs_scrub_context *sc;
- struct list_head fragments;
+struct xchk_refcnt_check {
+ struct xfs_scrub *sc;
+ struct list_head fragments;
/* refcount extent we're examining */
- xfs_agblock_t bno;
- xfs_extlen_t len;
- xfs_nlink_t refcount;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ xfs_nlink_t refcount;
/* number of owners seen */
- xfs_nlink_t seen;
+ xfs_nlink_t seen;
};
/*
@@ -99,18 +99,18 @@ struct xfs_scrub_refcnt_check {
* fragments as the refcountbt says we should have.
*/
STATIC int
-xfs_scrub_refcountbt_rmap_check(
+xchk_refcountbt_rmap_check(
struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec,
void *priv)
{
- struct xfs_scrub_refcnt_check *refchk = priv;
- struct xfs_scrub_refcnt_frag *frag;
+ struct xchk_refcnt_check *refchk = priv;
+ struct xchk_refcnt_frag *frag;
xfs_agblock_t rm_last;
xfs_agblock_t rc_last;
int error = 0;
- if (xfs_scrub_should_terminate(refchk->sc, &error))
+ if (xchk_should_terminate(refchk->sc, &error))
return error;
rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
@@ -118,7 +118,7 @@ xfs_scrub_refcountbt_rmap_check(
/* Confirm that a single-owner refc extent is a CoW stage. */
if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
- xfs_scrub_btree_xref_set_corrupt(refchk->sc, cur, 0);
+ xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
return 0;
}
@@ -135,7 +135,7 @@ xfs_scrub_refcountbt_rmap_check(
* is healthy each rmap_irec we see will be in agbno order
* so we don't need insertion sort here.
*/
- frag = kmem_alloc(sizeof(struct xfs_scrub_refcnt_frag),
+ frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
KM_MAYFAIL);
if (!frag)
return -ENOMEM;
@@ -154,12 +154,12 @@ xfs_scrub_refcountbt_rmap_check(
* we have a refcountbt error.
*/
STATIC void
-xfs_scrub_refcountbt_process_rmap_fragments(
- struct xfs_scrub_refcnt_check *refchk)
+xchk_refcountbt_process_rmap_fragments(
+ struct xchk_refcnt_check *refchk)
{
struct list_head worklist;
- struct xfs_scrub_refcnt_frag *frag;
- struct xfs_scrub_refcnt_frag *n;
+ struct xchk_refcnt_frag *frag;
+ struct xchk_refcnt_frag *n;
xfs_agblock_t bno;
xfs_agblock_t rbno;
xfs_agblock_t next_rbno;
@@ -277,13 +277,13 @@ done:
/* Use the rmap entries covering this extent to verify the refcount. */
STATIC void
-xfs_scrub_refcountbt_xref_rmap(
- struct xfs_scrub_context *sc,
+xchk_refcountbt_xref_rmap(
+ struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
xfs_nlink_t refcount)
{
- struct xfs_scrub_refcnt_check refchk = {
+ struct xchk_refcnt_check refchk = {
.sc = sc,
.bno = bno,
.len = len,
@@ -292,11 +292,11 @@ xfs_scrub_refcountbt_xref_rmap(
};
struct xfs_rmap_irec low;
struct xfs_rmap_irec high;
- struct xfs_scrub_refcnt_frag *frag;
- struct xfs_scrub_refcnt_frag *n;
+ struct xchk_refcnt_frag *frag;
+ struct xchk_refcnt_frag *n;
int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
/* Cross-reference with the rmapbt to confirm the refcount. */
@@ -307,13 +307,13 @@ xfs_scrub_refcountbt_xref_rmap(
INIT_LIST_HEAD(&refchk.fragments);
error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
- &xfs_scrub_refcountbt_rmap_check, &refchk);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ &xchk_refcountbt_rmap_check, &refchk);
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
goto out_free;
- xfs_scrub_refcountbt_process_rmap_fragments(&refchk);
+ xchk_refcountbt_process_rmap_fragments(&refchk);
if (refcount != refchk.seen)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
out_free:
list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
@@ -324,34 +324,34 @@ out_free:
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_refcountbt_xref(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len,
- xfs_nlink_t refcount)
+xchk_refcountbt_xref(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ xfs_nlink_t refcount)
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, len);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
- xfs_scrub_refcountbt_xref_rmap(sc, agbno, len, refcount);
+ xchk_xref_is_used_space(sc, agbno, len);
+ xchk_xref_is_not_inode_chunk(sc, agbno, len);
+ xchk_refcountbt_xref_rmap(sc, agbno, len, refcount);
}
/* Scrub a refcountbt record. */
STATIC int
-xfs_scrub_refcountbt_rec(
- struct xfs_scrub_btree *bs,
- union xfs_btree_rec *rec)
+xchk_refcountbt_rec(
+ struct xchk_btree *bs,
+ union xfs_btree_rec *rec)
{
- struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agblock_t *cow_blocks = bs->private;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
- xfs_agblock_t bno;
- xfs_extlen_t len;
- xfs_nlink_t refcount;
- bool has_cowflag;
- int error = 0;
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agblock_t *cow_blocks = bs->private;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ xfs_nlink_t refcount;
+ bool has_cowflag;
+ int error = 0;
bno = be32_to_cpu(rec->refc.rc_startblock);
len = be32_to_cpu(rec->refc.rc_blockcount);
@@ -360,7 +360,7 @@ xfs_scrub_refcountbt_rec(
/* Only CoW records can have refcount == 1. */
has_cowflag = (bno & XFS_REFC_COW_START);
if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (has_cowflag)
(*cow_blocks) += len;
@@ -369,75 +369,75 @@ xfs_scrub_refcountbt_rec(
if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (refcount == 0)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- xfs_scrub_refcountbt_xref(bs->sc, bno, len, refcount);
+ xchk_refcountbt_xref(bs->sc, bno, len, refcount);
return error;
}
/* Make sure we have as many refc blocks as the rmap says. */
STATIC void
-xfs_scrub_refcount_xref_rmap(
- struct xfs_scrub_context *sc,
- struct xfs_owner_info *oinfo,
- xfs_filblks_t cow_blocks)
+xchk_refcount_xref_rmap(
+ struct xfs_scrub *sc,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t cow_blocks)
{
- xfs_extlen_t refcbt_blocks = 0;
- xfs_filblks_t blocks;
- int error;
+ xfs_extlen_t refcbt_blocks = 0;
+ xfs_filblks_t blocks;
+ int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
/* Check that we saw as many refcbt blocks as the rmap knows about. */
error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
- if (!xfs_scrub_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
+ if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
return;
- error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
&blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != refcbt_blocks)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
/* Check that we saw as many cow blocks as the rmap knows about. */
xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
- error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
&blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != cow_blocks)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* Scrub the refcount btree for some AG. */
int
-xfs_scrub_refcountbt(
- struct xfs_scrub_context *sc)
+xchk_refcountbt(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
- xfs_agblock_t cow_blocks = 0;
- int error;
+ struct xfs_owner_info oinfo;
+ xfs_agblock_t cow_blocks = 0;
+ int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
- error = xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec,
+ error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
&oinfo, &cow_blocks);
if (error)
return error;
- xfs_scrub_refcount_xref_rmap(sc, &oinfo, cow_blocks);
+ xchk_refcount_xref_rmap(sc, &oinfo, cow_blocks);
return 0;
}
/* xref check that a cow staging extent is marked in the refcountbt. */
void
-xfs_scrub_xref_is_cow_staging(
- struct xfs_scrub_context *sc,
+xchk_xref_is_cow_staging(
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
@@ -446,35 +446,35 @@ xfs_scrub_xref_is_cow_staging(
int has_refcount;
int error;
- if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
/* Find the CoW staging extent. */
error = xfs_refcount_lookup_le(sc->sa.refc_cur,
agbno + XFS_REFC_COW_START, &has_refcount);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (!has_refcount) {
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
return;
}
error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (!has_refcount) {
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
return;
}
/* CoW flag must be set, refcount must be 1. */
has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
if (!has_cowflag || rc.rc_refcount != 1)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
/* Must be at least as long as what was passed in */
if (rc.rc_blockcount < len)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
/*
@@ -482,20 +482,20 @@ xfs_scrub_xref_is_cow_staging(
* can have multiple owners.
*/
void
-xfs_scrub_xref_is_not_shared(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_xref_is_not_shared(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- bool shared;
- int error;
+ bool shared;
+ int error;
- if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (shared)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 326be4e8b71e..17cf48564390 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -34,6 +34,7 @@
#include "scrub/common.h"
#include "scrub/trace.h"
#include "scrub/repair.h"
+#include "scrub/bitmap.h"
/*
* Attempt to repair some metadata, if the metadata is corrupt and userspace
@@ -41,21 +42,21 @@
* and will set *fixed to true if it thinks it repaired anything.
*/
int
-xfs_repair_attempt(
- struct xfs_inode *ip,
- struct xfs_scrub_context *sc,
- bool *fixed)
+xrep_attempt(
+ struct xfs_inode *ip,
+ struct xfs_scrub *sc,
+ bool *fixed)
{
- int error = 0;
+ int error = 0;
- trace_xfs_repair_attempt(ip, sc->sm, error);
+ trace_xrep_attempt(ip, sc->sm, error);
- xfs_scrub_ag_btcur_free(&sc->sa);
+ xchk_ag_btcur_free(&sc->sa);
/* Repair whatever's broken. */
ASSERT(sc->ops->repair);
error = sc->ops->repair(sc);
- trace_xfs_repair_done(ip, sc->sm, error);
+ trace_xrep_done(ip, sc->sm, error);
switch (error) {
case 0:
/*
@@ -93,8 +94,8 @@ xfs_repair_attempt(
* structure to track rate limiting information.
*/
void
-xfs_repair_failure(
- struct xfs_mount *mp)
+xrep_failure(
+ struct xfs_mount *mp)
{
xfs_alert_ratelimited(mp,
"Corruption not fixed during online repair. Unmount and run xfs_repair.");
@@ -105,12 +106,12 @@ xfs_repair_failure(
* given mountpoint.
*/
int
-xfs_repair_probe(
- struct xfs_scrub_context *sc)
+xrep_probe(
+ struct xfs_scrub *sc)
{
- int error = 0;
+ int error = 0;
- if (xfs_scrub_should_terminate(sc, &error))
+ if (xchk_should_terminate(sc, &error))
return error;
return 0;
@@ -121,15 +122,18 @@ xfs_repair_probe(
* the btree cursors.
*/
int
-xfs_repair_roll_ag_trans(
- struct xfs_scrub_context *sc)
+xrep_roll_ag_trans(
+ struct xfs_scrub *sc)
{
- int error;
+ int error;
/* Keep the AG header buffers locked so we can keep going. */
- xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
- xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
- xfs_trans_bhold(sc->tp, sc->sa.agfl_bp);
+ if (sc->sa.agi_bp)
+ xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
+ if (sc->sa.agf_bp)
+ xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
+ if (sc->sa.agfl_bp)
+ xfs_trans_bhold(sc->tp, sc->sa.agfl_bp);
/* Roll the transaction. */
error = xfs_trans_roll(&sc->tp);
@@ -137,9 +141,12 @@ xfs_repair_roll_ag_trans(
goto out_release;
/* Join AG headers to the new transaction. */
- xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
- xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
- xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp);
+ if (sc->sa.agi_bp)
+ xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
+ if (sc->sa.agf_bp)
+ xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
+ if (sc->sa.agfl_bp)
+ xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp);
return 0;
@@ -149,9 +156,12 @@ out_release:
* buffers will be released during teardown on our way out
* of the kernel.
*/
- xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp);
- xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp);
- xfs_trans_bhold_release(sc->tp, sc->sa.agfl_bp);
+ if (sc->sa.agi_bp)
+ xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp);
+ if (sc->sa.agf_bp)
+ xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp);
+ if (sc->sa.agfl_bp)
+ xfs_trans_bhold_release(sc->tp, sc->sa.agfl_bp);
return error;
}
@@ -162,10 +172,10 @@ out_release:
* in AG reservations) to construct a whole btree.
*/
bool
-xfs_repair_ag_has_space(
- struct xfs_perag *pag,
- xfs_extlen_t nr_blocks,
- enum xfs_ag_resv_type type)
+xrep_ag_has_space(
+ struct xfs_perag *pag,
+ xfs_extlen_t nr_blocks,
+ enum xfs_ag_resv_type type)
{
return !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
!xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
@@ -178,8 +188,8 @@ xfs_repair_ag_has_space(
* any type of per-AG btree.
*/
xfs_extlen_t
-xfs_repair_calc_ag_resblks(
- struct xfs_scrub_context *sc)
+xrep_calc_ag_resblks(
+ struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_scrub_metadata *sm = sc->sm;
@@ -231,7 +241,7 @@ xfs_repair_calc_ag_resblks(
}
xfs_perag_put(pag);
- trace_xfs_repair_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
+ trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
freelen, usedlen);
/*
@@ -270,7 +280,7 @@ xfs_repair_calc_ag_resblks(
rmapbt_sz = 0;
}
- trace_xfs_repair_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
+ trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
inobt_sz, rmapbt_sz, refcbt_sz);
return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
@@ -278,15 +288,15 @@ xfs_repair_calc_ag_resblks(
/* Allocate a block in an AG. */
int
-xfs_repair_alloc_ag_block(
- struct xfs_scrub_context *sc,
- struct xfs_owner_info *oinfo,
- xfs_fsblock_t *fsbno,
- enum xfs_ag_resv_type resv)
+xrep_alloc_ag_block(
+ struct xfs_scrub *sc,
+ struct xfs_owner_info *oinfo,
+ xfs_fsblock_t *fsbno,
+ enum xfs_ag_resv_type resv)
{
- struct xfs_alloc_arg args = {0};
- xfs_agblock_t bno;
- int error;
+ struct xfs_alloc_arg args = {0};
+ xfs_agblock_t bno;
+ int error;
switch (resv) {
case XFS_AG_RESV_AGFL:
@@ -329,8 +339,8 @@ xfs_repair_alloc_ag_block(
/* Initialize a new AG btree root block with zero entries. */
int
-xfs_repair_init_btblock(
- struct xfs_scrub_context *sc,
+xrep_init_btblock(
+ struct xfs_scrub *sc,
xfs_fsblock_t fsb,
struct xfs_buf **bpp,
xfs_btnum_t btnum,
@@ -340,7 +350,7 @@ xfs_repair_init_btblock(
struct xfs_mount *mp = sc->mp;
struct xfs_buf *bp;
- trace_xfs_repair_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
+ trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), btnum);
ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno);
@@ -367,222 +377,29 @@ xfs_repair_init_btblock(
*
* However, that leaves the matter of removing all the metadata describing the
* old broken structure. For primary metadata we use the rmap data to collect
- * every extent with a matching rmap owner (exlist); we then iterate all other
+ * every extent with a matching rmap owner (bitmap); we then iterate all other
* metadata structures with the same rmap owner to collect the extents that
- * cannot be removed (sublist). We then subtract sublist from exlist to
+ * cannot be removed (sublist). We then subtract sublist from bitmap to
* derive the blocks that were used by the old btree. These blocks can be
* reaped.
*
* For rmapbt reconstructions we must use different tactics for extent
* collection. First we iterate all primary metadata (this excludes the old
* rmapbt, obviously) to generate new rmap records. The gaps in the rmap
- * records are collected as exlist. The bnobt records are collected as
- * sublist. As with the other btrees we subtract sublist from exlist, and the
+ * records are collected as bitmap. The bnobt records are collected as
+ * sublist. As with the other btrees we subtract sublist from bitmap, and the
* result (since the rmapbt lives in the free space) are the blocks from the
* old rmapbt.
- */
-
-/* Collect a dead btree extent for later disposal. */
-int
-xfs_repair_collect_btree_extent(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
- xfs_fsblock_t fsbno,
- xfs_extlen_t len)
-{
- struct xfs_repair_extent *rex;
-
- trace_xfs_repair_collect_btree_extent(sc->mp,
- XFS_FSB_TO_AGNO(sc->mp, fsbno),
- XFS_FSB_TO_AGBNO(sc->mp, fsbno), len);
-
- rex = kmem_alloc(sizeof(struct xfs_repair_extent), KM_MAYFAIL);
- if (!rex)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&rex->list);
- rex->fsbno = fsbno;
- rex->len = len;
- list_add_tail(&rex->list, &exlist->list);
-
- return 0;
-}
-
-/*
- * An error happened during the rebuild so the transaction will be cancelled.
- * The fs will shut down, and the administrator has to unmount and run repair.
- * Therefore, free all the memory associated with the list so we can die.
- */
-void
-xfs_repair_cancel_btree_extents(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist)
-{
- struct xfs_repair_extent *rex;
- struct xfs_repair_extent *n;
-
- for_each_xfs_repair_extent_safe(rex, n, exlist) {
- list_del(&rex->list);
- kmem_free(rex);
- }
-}
-
-/* Compare two btree extents. */
-static int
-xfs_repair_btree_extent_cmp(
- void *priv,
- struct list_head *a,
- struct list_head *b)
-{
- struct xfs_repair_extent *ap;
- struct xfs_repair_extent *bp;
-
- ap = container_of(a, struct xfs_repair_extent, list);
- bp = container_of(b, struct xfs_repair_extent, list);
-
- if (ap->fsbno > bp->fsbno)
- return 1;
- if (ap->fsbno < bp->fsbno)
- return -1;
- return 0;
-}
-
-/*
- * Remove all the blocks mentioned in @sublist from the extents in @exlist.
*
- * The intent is that callers will iterate the rmapbt for all of its records
- * for a given owner to generate @exlist; and iterate all the blocks of the
- * metadata structures that are not being rebuilt and have the same rmapbt
- * owner to generate @sublist. This routine subtracts all the extents
- * mentioned in sublist from all the extents linked in @exlist, which leaves
- * @exlist as the list of blocks that are not accounted for, which we assume
- * are the dead blocks of the old metadata structure. The blocks mentioned in
- * @exlist can be reaped.
- */
-#define LEFT_ALIGNED (1 << 0)
-#define RIGHT_ALIGNED (1 << 1)
-int
-xfs_repair_subtract_extents(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
- struct xfs_repair_extent_list *sublist)
-{
- struct list_head *lp;
- struct xfs_repair_extent *ex;
- struct xfs_repair_extent *newex;
- struct xfs_repair_extent *subex;
- xfs_fsblock_t sub_fsb;
- xfs_extlen_t sub_len;
- int state;
- int error = 0;
-
- if (list_empty(&exlist->list) || list_empty(&sublist->list))
- return 0;
- ASSERT(!list_empty(&sublist->list));
-
- list_sort(NULL, &exlist->list, xfs_repair_btree_extent_cmp);
- list_sort(NULL, &sublist->list, xfs_repair_btree_extent_cmp);
-
- /*
- * Now that we've sorted both lists, we iterate exlist once, rolling
- * forward through sublist and/or exlist as necessary until we find an
- * overlap or reach the end of either list. We do not reset lp to the
- * head of exlist nor do we reset subex to the head of sublist. The
- * list traversal is similar to merge sort, but we're deleting
- * instead. In this manner we avoid O(n^2) operations.
- */
- subex = list_first_entry(&sublist->list, struct xfs_repair_extent,
- list);
- lp = exlist->list.next;
- while (lp != &exlist->list) {
- ex = list_entry(lp, struct xfs_repair_extent, list);
-
- /*
- * Advance subex and/or ex until we find a pair that
- * intersect or we run out of extents.
- */
- while (subex->fsbno + subex->len <= ex->fsbno) {
- if (list_is_last(&subex->list, &sublist->list))
- goto out;
- subex = list_next_entry(subex, list);
- }
- if (subex->fsbno >= ex->fsbno + ex->len) {
- lp = lp->next;
- continue;
- }
-
- /* trim subex to fit the extent we have */
- sub_fsb = subex->fsbno;
- sub_len = subex->len;
- if (subex->fsbno < ex->fsbno) {
- sub_len -= ex->fsbno - subex->fsbno;
- sub_fsb = ex->fsbno;
- }
- if (sub_len > ex->len)
- sub_len = ex->len;
-
- state = 0;
- if (sub_fsb == ex->fsbno)
- state |= LEFT_ALIGNED;
- if (sub_fsb + sub_len == ex->fsbno + ex->len)
- state |= RIGHT_ALIGNED;
- switch (state) {
- case LEFT_ALIGNED:
- /* Coincides with only the left. */
- ex->fsbno += sub_len;
- ex->len -= sub_len;
- break;
- case RIGHT_ALIGNED:
- /* Coincides with only the right. */
- ex->len -= sub_len;
- lp = lp->next;
- break;
- case LEFT_ALIGNED | RIGHT_ALIGNED:
- /* Total overlap, just delete ex. */
- lp = lp->next;
- list_del(&ex->list);
- kmem_free(ex);
- break;
- case 0:
- /*
- * Deleting from the middle: add the new right extent
- * and then shrink the left extent.
- */
- newex = kmem_alloc(sizeof(struct xfs_repair_extent),
- KM_MAYFAIL);
- if (!newex) {
- error = -ENOMEM;
- goto out;
- }
- INIT_LIST_HEAD(&newex->list);
- newex->fsbno = sub_fsb + sub_len;
- newex->len = ex->fsbno + ex->len - newex->fsbno;
- list_add(&newex->list, &ex->list);
- ex->len = sub_fsb - ex->fsbno;
- lp = lp->next;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
-
-out:
- return error;
-}
-#undef LEFT_ALIGNED
-#undef RIGHT_ALIGNED
-
-/*
* Disposal of Blocks from Old per-AG Btrees
*
* Now that we've constructed a new btree to replace the damaged one, we want
* to dispose of the blocks that (we think) the old btree was using.
- * Previously, we used the rmapbt to collect the extents (exlist) with the
+ * Previously, we used the rmapbt to collect the extents (bitmap) with the
* rmap owner corresponding to the tree we rebuilt, collected extents for any
* blocks with the same rmap owner that are owned by another data structure
- * (sublist), and subtracted sublist from exlist. In theory the extents
- * remaining in exlist are the old btree's blocks.
+ * (sublist), and subtracted sublist from bitmap. In theory the extents
+ * remaining in bitmap are the old btree's blocks.
*
* Unfortunately, it's possible that the btree was crosslinked with other
* blocks on disk. The rmap data can tell us if there are multiple owners, so
@@ -598,7 +415,7 @@ out:
* If there are no rmap records at all, we also free the block. If the btree
* being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't
* supposed to be a rmap record and everything is ok. For other btrees there
- * had to have been an rmap entry for the block to have ended up on @exlist,
+ * had to have been an rmap entry for the block to have ended up on @bitmap,
* so if it's gone now there's something wrong and the fs will shut down.
*
* Note: If there are multiple rmap records with only the same rmap owner as
@@ -611,7 +428,7 @@ out:
* The caller is responsible for locking the AG headers for the entire rebuild
* operation so that nothing else can sneak in and change the AG state while
* we're not looking. We also assume that the caller already invalidated any
- * buffers associated with @exlist.
+ * buffers associated with @bitmap.
*/
/*
@@ -619,15 +436,14 @@ out:
* is not intended for use with file data repairs; we have bunmapi for that.
*/
int
-xfs_repair_invalidate_blocks(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist)
+xrep_invalidate_blocks(
+ struct xfs_scrub *sc,
+ struct xfs_bitmap *bitmap)
{
- struct xfs_repair_extent *rex;
- struct xfs_repair_extent *n;
- struct xfs_buf *bp;
- xfs_fsblock_t fsbno;
- xfs_agblock_t i;
+ struct xfs_bitmap_range *bmr;
+ struct xfs_bitmap_range *n;
+ struct xfs_buf *bp;
+ xfs_fsblock_t fsbno;
/*
* For each block in each extent, see if there's an incore buffer for
@@ -637,18 +453,16 @@ xfs_repair_invalidate_blocks(
* because we never own those; and if we can't TRYLOCK the buffer we
* assume it's owned by someone else.
*/
- for_each_xfs_repair_extent_safe(rex, n, exlist) {
- for (fsbno = rex->fsbno, i = rex->len; i > 0; fsbno++, i--) {
- /* Skip AG headers and post-EOFS blocks */
- if (!xfs_verify_fsbno(sc->mp, fsbno))
- continue;
- bp = xfs_buf_incore(sc->mp->m_ddev_targp,
- XFS_FSB_TO_DADDR(sc->mp, fsbno),
- XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK);
- if (bp) {
- xfs_trans_bjoin(sc->tp, bp);
- xfs_trans_binval(sc->tp, bp);
- }
+ for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
+ /* Skip AG headers and post-EOFS blocks */
+ if (!xfs_verify_fsbno(sc->mp, fsbno))
+ continue;
+ bp = xfs_buf_incore(sc->mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(sc->mp, fsbno),
+ XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK);
+ if (bp) {
+ xfs_trans_bjoin(sc->tp, bp);
+ xfs_trans_binval(sc->tp, bp);
}
}
@@ -657,11 +471,11 @@ xfs_repair_invalidate_blocks(
/* Ensure the freelist is the correct size. */
int
-xfs_repair_fix_freelist(
- struct xfs_scrub_context *sc,
- bool can_shrink)
+xrep_fix_freelist(
+ struct xfs_scrub *sc,
+ bool can_shrink)
{
- struct xfs_alloc_arg args = {0};
+ struct xfs_alloc_arg args = {0};
args.mp = sc->mp;
args.tp = sc->tp;
@@ -677,15 +491,15 @@ xfs_repair_fix_freelist(
* Put a block back on the AGFL.
*/
STATIC int
-xfs_repair_put_freelist(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno)
+xrep_put_freelist(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno)
{
- struct xfs_owner_info oinfo;
- int error;
+ struct xfs_owner_info oinfo;
+ int error;
/* Make sure there's space on the freelist. */
- error = xfs_repair_fix_freelist(sc, true);
+ error = xrep_fix_freelist(sc, true);
if (error)
return error;
@@ -711,20 +525,20 @@ xfs_repair_put_freelist(
return 0;
}
-/* Dispose of a single metadata block. */
+/* Dispose of a single block. */
STATIC int
-xfs_repair_dispose_btree_block(
- struct xfs_scrub_context *sc,
- xfs_fsblock_t fsbno,
- struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type resv)
+xrep_reap_block(
+ struct xfs_scrub *sc,
+ xfs_fsblock_t fsbno,
+ struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type resv)
{
- struct xfs_btree_cur *cur;
- struct xfs_buf *agf_bp = NULL;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- bool has_other_rmap;
- int error;
+ struct xfs_btree_cur *cur;
+ struct xfs_buf *agf_bp = NULL;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ bool has_other_rmap;
+ int error;
agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
@@ -747,9 +561,9 @@ xfs_repair_dispose_btree_block(
/* Can we find any other rmappings? */
error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
+ xfs_btree_del_cursor(cur, error);
if (error)
- goto out_cur;
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ goto out_free;
/*
* If there are other rmappings, this block is cross linked and must
@@ -767,7 +581,7 @@ xfs_repair_dispose_btree_block(
if (has_other_rmap)
error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
else if (resv == XFS_AG_RESV_AGFL)
- error = xfs_repair_put_freelist(sc, agbno);
+ error = xrep_put_freelist(sc, agbno);
else
error = xfs_free_extent(sc->tp, fsbno, 1, oinfo, resv);
if (agf_bp != sc->sa.agf_bp)
@@ -777,50 +591,43 @@ xfs_repair_dispose_btree_block(
if (sc->ip)
return xfs_trans_roll_inode(&sc->tp, sc->ip);
- return xfs_repair_roll_ag_trans(sc);
+ return xrep_roll_ag_trans(sc);
-out_cur:
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+out_free:
if (agf_bp != sc->sa.agf_bp)
xfs_trans_brelse(sc->tp, agf_bp);
return error;
}
-/* Dispose of btree blocks from an old per-AG btree. */
+/* Dispose of every block of every extent in the bitmap. */
int
-xfs_repair_reap_btree_extents(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
- struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type type)
+xrep_reap_extents(
+ struct xfs_scrub *sc,
+ struct xfs_bitmap *bitmap,
+ struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type type)
{
- struct xfs_repair_extent *rex;
- struct xfs_repair_extent *n;
- int error = 0;
+ struct xfs_bitmap_range *bmr;
+ struct xfs_bitmap_range *n;
+ xfs_fsblock_t fsbno;
+ int error = 0;
ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
- /* Dispose of every block from the old btree. */
- for_each_xfs_repair_extent_safe(rex, n, exlist) {
+ for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
ASSERT(sc->ip != NULL ||
- XFS_FSB_TO_AGNO(sc->mp, rex->fsbno) == sc->sa.agno);
+ XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
+ trace_xrep_dispose_btree_extent(sc->mp,
+ XFS_FSB_TO_AGNO(sc->mp, fsbno),
+ XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
- trace_xfs_repair_dispose_btree_extent(sc->mp,
- XFS_FSB_TO_AGNO(sc->mp, rex->fsbno),
- XFS_FSB_TO_AGBNO(sc->mp, rex->fsbno), rex->len);
-
- for (; rex->len > 0; rex->len--, rex->fsbno++) {
- error = xfs_repair_dispose_btree_block(sc, rex->fsbno,
- oinfo, type);
- if (error)
- goto out;
- }
- list_del(&rex->list);
- kmem_free(rex);
+ error = xrep_reap_block(sc, fsbno, oinfo, type);
+ if (error)
+ goto out;
}
out:
- xfs_repair_cancel_btree_extents(sc, exlist);
+ xfs_bitmap_destroy(bitmap);
return error;
}
@@ -832,12 +639,12 @@ out:
* btree roots. This is not guaranteed to work if the AG is heavily damaged
* or the rmap data are corrupt.
*
- * Callers of xfs_repair_find_ag_btree_roots must lock the AGF and AGFL
+ * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
* buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
* AGI is being rebuilt. It must maintain these locks until it's safe for
* other threads to change the btrees' shapes. The caller provides
* information about the btrees to look for by passing in an array of
- * xfs_repair_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
+ * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
* The (root, height) fields will be set on return if anything is found. The
* last element of the array should have a NULL buf_ops to mark the end of the
* array.
@@ -851,30 +658,30 @@ out:
* should be the roots.
*/
-struct xfs_repair_findroot {
- struct xfs_scrub_context *sc;
+struct xrep_findroot {
+ struct xfs_scrub *sc;
struct xfs_buf *agfl_bp;
struct xfs_agf *agf;
- struct xfs_repair_find_ag_btree *btree_info;
+ struct xrep_find_ag_btree *btree_info;
};
/* See if our block is in the AGFL. */
STATIC int
-xfs_repair_findroot_agfl_walk(
- struct xfs_mount *mp,
- xfs_agblock_t bno,
- void *priv)
+xrep_findroot_agfl_walk(
+ struct xfs_mount *mp,
+ xfs_agblock_t bno,
+ void *priv)
{
- xfs_agblock_t *agbno = priv;
+ xfs_agblock_t *agbno = priv;
return (*agbno == bno) ? XFS_BTREE_QUERY_RANGE_ABORT : 0;
}
/* Does this block match the btree information passed in? */
STATIC int
-xfs_repair_findroot_block(
- struct xfs_repair_findroot *ri,
- struct xfs_repair_find_ag_btree *fab,
+xrep_findroot_block(
+ struct xrep_findroot *ri,
+ struct xrep_find_ag_btree *fab,
uint64_t owner,
xfs_agblock_t agbno,
bool *found_it)
@@ -895,7 +702,7 @@ xfs_repair_findroot_block(
*/
if (owner == XFS_RMAP_OWN_AG) {
error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
- xfs_repair_findroot_agfl_walk, &agbno);
+ xrep_findroot_agfl_walk, &agbno);
if (error == XFS_BTREE_QUERY_RANGE_ABORT)
return 0;
if (error)
@@ -933,7 +740,7 @@ xfs_repair_findroot_block(
fab->height = xfs_btree_get_level(btblock) + 1;
*found_it = true;
- trace_xfs_repair_findroot_block(mp, ri->sc->sa.agno, agbno,
+ trace_xrep_findroot_block(mp, ri->sc->sa.agno, agbno,
be32_to_cpu(btblock->bb_magic), fab->height - 1);
out:
xfs_trans_brelse(ri->sc->tp, bp);
@@ -945,13 +752,13 @@ out:
* looking for?
*/
STATIC int
-xfs_repair_findroot_rmap(
+xrep_findroot_rmap(
struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec,
void *priv)
{
- struct xfs_repair_findroot *ri = priv;
- struct xfs_repair_find_ag_btree *fab;
+ struct xrep_findroot *ri = priv;
+ struct xrep_find_ag_btree *fab;
xfs_agblock_t b;
bool found_it;
int error = 0;
@@ -966,7 +773,7 @@ xfs_repair_findroot_rmap(
for (fab = ri->btree_info; fab->buf_ops; fab++) {
if (rec->rm_owner != fab->rmap_owner)
continue;
- error = xfs_repair_findroot_block(ri, fab,
+ error = xrep_findroot_block(ri, fab,
rec->rm_owner, rec->rm_startblock + b,
&found_it);
if (error)
@@ -981,15 +788,15 @@ xfs_repair_findroot_rmap(
/* Find the roots of the per-AG btrees described in btree_info. */
int
-xfs_repair_find_ag_btree_roots(
- struct xfs_scrub_context *sc,
+xrep_find_ag_btree_roots(
+ struct xfs_scrub *sc,
struct xfs_buf *agf_bp,
- struct xfs_repair_find_ag_btree *btree_info,
+ struct xrep_find_ag_btree *btree_info,
struct xfs_buf *agfl_bp)
{
struct xfs_mount *mp = sc->mp;
- struct xfs_repair_findroot ri;
- struct xfs_repair_find_ag_btree *fab;
+ struct xrep_findroot ri;
+ struct xrep_find_ag_btree *fab;
struct xfs_btree_cur *cur;
int error;
@@ -1008,19 +815,19 @@ xfs_repair_find_ag_btree_roots(
}
cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
- error = xfs_rmap_query_all(cur, xfs_repair_findroot_rmap, &ri);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
+ xfs_btree_del_cursor(cur, error);
return error;
}
/* Force a quotacheck the next time we mount. */
void
-xfs_repair_force_quotacheck(
- struct xfs_scrub_context *sc,
- uint dqtype)
+xrep_force_quotacheck(
+ struct xfs_scrub *sc,
+ uint dqtype)
{
- uint flag;
+ uint flag;
flag = xfs_quota_chkd_flag(dqtype);
if (!(flag & sc->mp->m_qflags))
@@ -1044,10 +851,10 @@ xfs_repair_force_quotacheck(
* repair corruptions in the quota metadata.
*/
int
-xfs_repair_ino_dqattach(
- struct xfs_scrub_context *sc)
+xrep_ino_dqattach(
+ struct xfs_scrub *sc)
{
- int error;
+ int error;
error = xfs_qm_dqattach_locked(sc->ip, false);
switch (error) {
@@ -1058,11 +865,11 @@ xfs_repair_ino_dqattach(
"inode %llu repair encountered quota error %d, quotacheck forced.",
(unsigned long long)sc->ip->i_ino, error);
if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
- xfs_repair_force_quotacheck(sc, XFS_DQ_USER);
+ xrep_force_quotacheck(sc, XFS_DQ_USER);
if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
- xfs_repair_force_quotacheck(sc, XFS_DQ_GROUP);
+ xrep_force_quotacheck(sc, XFS_DQ_GROUP);
if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
- xfs_repair_force_quotacheck(sc, XFS_DQ_PROJ);
+ xrep_force_quotacheck(sc, XFS_DQ_PROJ);
/* fall through */
case -ESRCH:
error = 0;
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index ef47826b6725..9de321eee4ab 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -6,7 +6,7 @@
#ifndef __XFS_SCRUB_REPAIR_H__
#define __XFS_SCRUB_REPAIR_H__
-static inline int xfs_repair_notsupported(struct xfs_scrub_context *sc)
+static inline int xrep_notsupported(struct xfs_scrub *sc)
{
return -EOPNOTSUPP;
}
@@ -15,55 +15,26 @@ static inline int xfs_repair_notsupported(struct xfs_scrub_context *sc)
/* Repair helpers */
-int xfs_repair_attempt(struct xfs_inode *ip, struct xfs_scrub_context *sc,
- bool *fixed);
-void xfs_repair_failure(struct xfs_mount *mp);
-int xfs_repair_roll_ag_trans(struct xfs_scrub_context *sc);
-bool xfs_repair_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks,
+int xrep_attempt(struct xfs_inode *ip, struct xfs_scrub *sc, bool *fixed);
+void xrep_failure(struct xfs_mount *mp);
+int xrep_roll_ag_trans(struct xfs_scrub *sc);
+bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks,
enum xfs_ag_resv_type type);
-xfs_extlen_t xfs_repair_calc_ag_resblks(struct xfs_scrub_context *sc);
-int xfs_repair_alloc_ag_block(struct xfs_scrub_context *sc,
- struct xfs_owner_info *oinfo, xfs_fsblock_t *fsbno,
- enum xfs_ag_resv_type resv);
-int xfs_repair_init_btblock(struct xfs_scrub_context *sc, xfs_fsblock_t fsb,
+xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc);
+int xrep_alloc_ag_block(struct xfs_scrub *sc, struct xfs_owner_info *oinfo,
+ xfs_fsblock_t *fsbno, enum xfs_ag_resv_type resv);
+int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb,
struct xfs_buf **bpp, xfs_btnum_t btnum,
const struct xfs_buf_ops *ops);
-struct xfs_repair_extent {
- struct list_head list;
- xfs_fsblock_t fsbno;
- xfs_extlen_t len;
-};
-
-struct xfs_repair_extent_list {
- struct list_head list;
-};
-
-static inline void
-xfs_repair_init_extent_list(
- struct xfs_repair_extent_list *exlist)
-{
- INIT_LIST_HEAD(&exlist->list);
-}
+struct xfs_bitmap;
-#define for_each_xfs_repair_extent_safe(rbe, n, exlist) \
- list_for_each_entry_safe((rbe), (n), &(exlist)->list, list)
-int xfs_repair_collect_btree_extent(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *btlist, xfs_fsblock_t fsbno,
- xfs_extlen_t len);
-void xfs_repair_cancel_btree_extents(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *btlist);
-int xfs_repair_subtract_extents(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
- struct xfs_repair_extent_list *sublist);
-int xfs_repair_fix_freelist(struct xfs_scrub_context *sc, bool can_shrink);
-int xfs_repair_invalidate_blocks(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *btlist);
-int xfs_repair_reap_btree_extents(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
+int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
+int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xfs_bitmap *btlist);
+int xrep_reap_extents(struct xfs_scrub *sc, struct xfs_bitmap *exlist,
struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
-struct xfs_repair_find_ag_btree {
+struct xrep_find_ag_btree {
/* in: rmap owner of the btree we're looking for */
uint64_t rmap_owner;
@@ -78,40 +49,44 @@ struct xfs_repair_find_ag_btree {
unsigned int height;
};
-int xfs_repair_find_ag_btree_roots(struct xfs_scrub_context *sc,
- struct xfs_buf *agf_bp,
- struct xfs_repair_find_ag_btree *btree_info,
- struct xfs_buf *agfl_bp);
-void xfs_repair_force_quotacheck(struct xfs_scrub_context *sc, uint dqtype);
-int xfs_repair_ino_dqattach(struct xfs_scrub_context *sc);
+int xrep_find_ag_btree_roots(struct xfs_scrub *sc, struct xfs_buf *agf_bp,
+ struct xrep_find_ag_btree *btree_info, struct xfs_buf *agfl_bp);
+void xrep_force_quotacheck(struct xfs_scrub *sc, uint dqtype);
+int xrep_ino_dqattach(struct xfs_scrub *sc);
/* Metadata repairers */
-int xfs_repair_probe(struct xfs_scrub_context *sc);
-int xfs_repair_superblock(struct xfs_scrub_context *sc);
+int xrep_probe(struct xfs_scrub *sc);
+int xrep_superblock(struct xfs_scrub *sc);
+int xrep_agf(struct xfs_scrub *sc);
+int xrep_agfl(struct xfs_scrub *sc);
+int xrep_agi(struct xfs_scrub *sc);
#else
-static inline int xfs_repair_attempt(
- struct xfs_inode *ip,
- struct xfs_scrub_context *sc,
- bool *fixed)
+static inline int xrep_attempt(
+ struct xfs_inode *ip,
+ struct xfs_scrub *sc,
+ bool *fixed)
{
return -EOPNOTSUPP;
}
-static inline void xfs_repair_failure(struct xfs_mount *mp) {}
+static inline void xrep_failure(struct xfs_mount *mp) {}
static inline xfs_extlen_t
-xfs_repair_calc_ag_resblks(
- struct xfs_scrub_context *sc)
+xrep_calc_ag_resblks(
+ struct xfs_scrub *sc)
{
ASSERT(!(sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR));
return 0;
}
-#define xfs_repair_probe xfs_repair_notsupported
-#define xfs_repair_superblock xfs_repair_notsupported
+#define xrep_probe xrep_notsupported
+#define xrep_superblock xrep_notsupported
+#define xrep_agf xrep_notsupported
+#define xrep_agfl xrep_notsupported
+#define xrep_agi xrep_notsupported
#endif /* CONFIG_XFS_ONLINE_REPAIR */
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index c6d763236ba7..5e293c129813 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -29,30 +29,30 @@
* Set us up to scrub reverse mapping btrees.
*/
int
-xfs_scrub_setup_ag_rmapbt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_ag_rmapbt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_ag_btree(sc, ip, false);
+ return xchk_setup_ag_btree(sc, ip, false);
}
/* Reverse-mapping scrubber. */
/* Cross-reference a rmap against the refcount btree. */
STATIC void
-xfs_scrub_rmapbt_xref_refc(
- struct xfs_scrub_context *sc,
- struct xfs_rmap_irec *irec)
+xchk_rmapbt_xref_refc(
+ struct xfs_scrub *sc,
+ struct xfs_rmap_irec *irec)
{
- xfs_agblock_t fbno;
- xfs_extlen_t flen;
- bool non_inode;
- bool is_bmbt;
- bool is_attr;
- bool is_unwritten;
- int error;
-
- if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ bool non_inode;
+ bool is_bmbt;
+ bool is_attr;
+ bool is_unwritten;
+ int error;
+
+ if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
@@ -63,58 +63,58 @@ xfs_scrub_rmapbt_xref_refc(
/* If this is shared, must be a data fork extent. */
error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
irec->rm_blockcount, &fbno, &flen, false);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_rmapbt_xref(
- struct xfs_scrub_context *sc,
- struct xfs_rmap_irec *irec)
+xchk_rmapbt_xref(
+ struct xfs_scrub *sc,
+ struct xfs_rmap_irec *irec)
{
- xfs_agblock_t agbno = irec->rm_startblock;
- xfs_extlen_t len = irec->rm_blockcount;
+ xfs_agblock_t agbno = irec->rm_startblock;
+ xfs_extlen_t len = irec->rm_blockcount;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, len);
+ xchk_xref_is_used_space(sc, agbno, len);
if (irec->rm_owner == XFS_RMAP_OWN_INODES)
- xfs_scrub_xref_is_inode_chunk(sc, agbno, len);
+ xchk_xref_is_inode_chunk(sc, agbno, len);
else
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
+ xchk_xref_is_not_inode_chunk(sc, agbno, len);
if (irec->rm_owner == XFS_RMAP_OWN_COW)
- xfs_scrub_xref_is_cow_staging(sc, irec->rm_startblock,
+ xchk_xref_is_cow_staging(sc, irec->rm_startblock,
irec->rm_blockcount);
else
- xfs_scrub_rmapbt_xref_refc(sc, irec);
+ xchk_rmapbt_xref_refc(sc, irec);
}
/* Scrub an rmapbt record. */
STATIC int
-xfs_scrub_rmapbt_rec(
- struct xfs_scrub_btree *bs,
- union xfs_btree_rec *rec)
+xchk_rmapbt_rec(
+ struct xchk_btree *bs,
+ union xfs_btree_rec *rec)
{
- struct xfs_mount *mp = bs->cur->bc_mp;
- struct xfs_rmap_irec irec;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
- bool non_inode;
- bool is_unwritten;
- bool is_bmbt;
- bool is_attr;
- int error;
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ struct xfs_rmap_irec irec;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ bool non_inode;
+ bool is_unwritten;
+ bool is_bmbt;
+ bool is_attr;
+ int error;
error = xfs_rmap_btrec_to_irec(rec, &irec);
- if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, 0, &error))
+ if (!xchk_btree_process_error(bs->sc, bs->cur, 0, &error))
goto out;
/* Check extent. */
if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (irec.rm_owner == XFS_RMAP_OWN_FS) {
/*
@@ -124,7 +124,7 @@ xfs_scrub_rmapbt_rec(
*/
if (irec.rm_startblock != 0 ||
irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} else {
/*
* Otherwise we must point somewhere past the static metadata
@@ -133,7 +133,7 @@ xfs_scrub_rmapbt_rec(
if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) ||
!xfs_verify_agbno(mp, agno, irec.rm_startblock +
irec.rm_blockcount - 1))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
}
/* Check flags. */
@@ -143,105 +143,105 @@ xfs_scrub_rmapbt_rec(
is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN;
if (is_bmbt && irec.rm_offset != 0)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (non_inode && irec.rm_offset != 0)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (is_unwritten && (is_bmbt || non_inode || is_attr))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (non_inode && (is_bmbt || is_unwritten || is_attr))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (!non_inode) {
if (!xfs_verify_ino(mp, irec.rm_owner))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} else {
/* Non-inode owner within the magic values? */
if (irec.rm_owner <= XFS_RMAP_OWN_MIN ||
irec.rm_owner > XFS_RMAP_OWN_FS)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
}
- xfs_scrub_rmapbt_xref(bs->sc, &irec);
+ xchk_rmapbt_xref(bs->sc, &irec);
out:
return error;
}
/* Scrub the rmap btree for some AG. */
int
-xfs_scrub_rmapbt(
- struct xfs_scrub_context *sc)
+xchk_rmapbt(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
+ struct xfs_owner_info oinfo;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
- return xfs_scrub_btree(sc, sc->sa.rmap_cur, xfs_scrub_rmapbt_rec,
+ return xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec,
&oinfo, NULL);
}
/* xref check that the extent is owned by a given owner */
static inline void
-xfs_scrub_xref_check_owner(
- struct xfs_scrub_context *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo,
- bool should_have_rmap)
+xchk_xref_check_owner(
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo,
+ bool should_have_rmap)
{
- bool has_rmap;
- int error;
+ bool has_rmap;
+ int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo,
&has_rmap);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (has_rmap != should_have_rmap)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* xref check that the extent is owned by a given owner */
void
-xfs_scrub_xref_is_owned_by(
- struct xfs_scrub_context *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo)
+xchk_xref_is_owned_by(
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
{
- xfs_scrub_xref_check_owner(sc, bno, len, oinfo, true);
+ xchk_xref_check_owner(sc, bno, len, oinfo, true);
}
/* xref check that the extent is not owned by a given owner */
void
-xfs_scrub_xref_is_not_owned_by(
- struct xfs_scrub_context *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo)
+xchk_xref_is_not_owned_by(
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
{
- xfs_scrub_xref_check_owner(sc, bno, len, oinfo, false);
+ xchk_xref_check_owner(sc, bno, len, oinfo, false);
}
/* xref check that the extent has no reverse mapping at all */
void
-xfs_scrub_xref_has_no_owner(
- struct xfs_scrub_context *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len)
+xchk_xref_has_no_owner(
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len)
{
- bool has_rmap;
- int error;
+ bool has_rmap;
+ int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (has_rmap)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
index 1f86e02a07ca..665d4bbb17cc 100644
--- a/fs/xfs/scrub/rtbitmap.c
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -25,13 +25,13 @@
/* Set us up with the realtime metadata locked. */
int
-xfs_scrub_setup_rt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_rt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- int error;
+ int error;
- error = xfs_scrub_setup_fs(sc, ip);
+ error = xchk_setup_fs(sc, ip);
if (error)
return error;
@@ -46,14 +46,14 @@ xfs_scrub_setup_rt(
/* Scrub a free extent record from the realtime bitmap. */
STATIC int
-xfs_scrub_rtbitmap_rec(
- struct xfs_trans *tp,
- struct xfs_rtalloc_rec *rec,
- void *priv)
+xchk_rtbitmap_rec(
+ struct xfs_trans *tp,
+ struct xfs_rtalloc_rec *rec,
+ void *priv)
{
- struct xfs_scrub_context *sc = priv;
- xfs_rtblock_t startblock;
- xfs_rtblock_t blockcount;
+ struct xfs_scrub *sc = priv;
+ xfs_rtblock_t startblock;
+ xfs_rtblock_t blockcount;
startblock = rec->ar_startext * tp->t_mountp->m_sb.sb_rextsize;
blockcount = rec->ar_extcount * tp->t_mountp->m_sb.sb_rextsize;
@@ -61,24 +61,24 @@ xfs_scrub_rtbitmap_rec(
if (startblock + blockcount <= startblock ||
!xfs_verify_rtbno(sc->mp, startblock) ||
!xfs_verify_rtbno(sc->mp, startblock + blockcount - 1))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
return 0;
}
/* Scrub the realtime bitmap. */
int
-xfs_scrub_rtbitmap(
- struct xfs_scrub_context *sc)
+xchk_rtbitmap(
+ struct xfs_scrub *sc)
{
- int error;
+ int error;
/* Invoke the fork scrubber. */
- error = xfs_scrub_metadata_inode_forks(sc);
+ error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
- error = xfs_rtalloc_query_all(sc->tp, xfs_scrub_rtbitmap_rec, sc);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ error = xfs_rtalloc_query_all(sc->tp, xchk_rtbitmap_rec, sc);
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
out:
@@ -87,13 +87,13 @@ out:
/* Scrub the realtime summary. */
int
-xfs_scrub_rtsummary(
- struct xfs_scrub_context *sc)
+xchk_rtsummary(
+ struct xfs_scrub *sc)
{
- struct xfs_inode *rsumip = sc->mp->m_rsumip;
- struct xfs_inode *old_ip = sc->ip;
- uint old_ilock_flags = sc->ilock_flags;
- int error = 0;
+ struct xfs_inode *rsumip = sc->mp->m_rsumip;
+ struct xfs_inode *old_ip = sc->ip;
+ uint old_ilock_flags = sc->ilock_flags;
+ int error = 0;
/*
* We ILOCK'd the rt bitmap ip in the setup routine, now lock the
@@ -107,12 +107,12 @@ xfs_scrub_rtsummary(
xfs_ilock(sc->ip, sc->ilock_flags);
/* Invoke the fork scrubber. */
- error = xfs_scrub_metadata_inode_forks(sc);
+ error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
goto out;
/* XXX: implement this some day */
- xfs_scrub_set_incomplete(sc);
+ xchk_set_incomplete(sc);
out:
/* Switch back to the rtbitmap inode and lock flags. */
xfs_iunlock(sc->ip, sc->ilock_flags);
@@ -124,18 +124,18 @@ out:
/* xref check that the extent is not free in the rtbitmap */
void
-xfs_scrub_xref_is_used_rt_space(
- struct xfs_scrub_context *sc,
- xfs_rtblock_t fsbno,
- xfs_extlen_t len)
+xchk_xref_is_used_rt_space(
+ struct xfs_scrub *sc,
+ xfs_rtblock_t fsbno,
+ xfs_extlen_t len)
{
- xfs_rtblock_t startext;
- xfs_rtblock_t endext;
- xfs_rtblock_t extcount;
- bool is_free;
- int error;
+ xfs_rtblock_t startext;
+ xfs_rtblock_t endext;
+ xfs_rtblock_t extcount;
+ bool is_free;
+ int error;
- if (xfs_scrub_skip_xref(sc->sm))
+ if (xchk_skip_xref(sc->sm))
return;
startext = fsbno;
@@ -147,10 +147,10 @@ xfs_scrub_xref_is_used_rt_space(
xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext, extcount,
&is_free);
- if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ if (!xchk_should_check_xref(sc, &error, NULL))
goto out_unlock;
if (is_free)
- xfs_scrub_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino);
+ xchk_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino);
out_unlock:
xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
}
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 58ae76b3a421..4bfae1e61d30 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -131,6 +131,12 @@
* optimize the structure so that the rebuild knows what to do. The
* second check evaluates the completeness of the repair; that is what
* is reported to userspace.
+ *
+ * A quick note on symbol prefixes:
+ * - "xfs_" are general XFS symbols.
+ * - "xchk_" are symbols related to metadata checking.
+ * - "xrep_" are symbols related to metadata repair.
+ * - "xfs_scrub_" are symbols that tie online fsck to the rest of XFS.
*/
/*
@@ -144,12 +150,12 @@
* supported by the running kernel.
*/
static int
-xfs_scrub_probe(
- struct xfs_scrub_context *sc)
+xchk_probe(
+ struct xfs_scrub *sc)
{
- int error = 0;
+ int error = 0;
- if (xfs_scrub_should_terminate(sc, &error))
+ if (xchk_should_terminate(sc, &error))
return error;
return 0;
@@ -159,12 +165,12 @@ xfs_scrub_probe(
/* Free all the resources and finish the transactions. */
STATIC int
-xfs_scrub_teardown(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip_in,
- int error)
+xchk_teardown(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip_in,
+ int error)
{
- xfs_scrub_ag_free(sc, &sc->sa);
+ xchk_ag_free(sc, &sc->sa);
if (sc->tp) {
if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
error = xfs_trans_commit(sc->tp);
@@ -177,7 +183,7 @@ xfs_scrub_teardown(
xfs_iunlock(sc->ip, sc->ilock_flags);
if (sc->ip != ip_in &&
!xfs_internal_inum(sc->mp, sc->ip->i_ino))
- iput(VFS_I(sc->ip));
+ xfs_irele(sc->ip);
sc->ip = NULL;
}
if (sc->has_quotaofflock)
@@ -191,165 +197,165 @@ xfs_scrub_teardown(
/* Scrubbing dispatch. */
-static const struct xfs_scrub_meta_ops meta_scrub_ops[] = {
+static const struct xchk_meta_ops meta_scrub_ops[] = {
[XFS_SCRUB_TYPE_PROBE] = { /* ioctl presence test */
.type = ST_NONE,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_probe,
- .repair = xfs_repair_probe,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_probe,
+ .repair = xrep_probe,
},
[XFS_SCRUB_TYPE_SB] = { /* superblock */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_superblock,
- .repair = xfs_repair_superblock,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_superblock,
+ .repair = xrep_superblock,
},
[XFS_SCRUB_TYPE_AGF] = { /* agf */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_agf,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_agf,
+ .repair = xrep_agf,
},
[XFS_SCRUB_TYPE_AGFL]= { /* agfl */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_agfl,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_agfl,
+ .repair = xrep_agfl,
},
[XFS_SCRUB_TYPE_AGI] = { /* agi */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_agi,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_agi,
+ .repair = xrep_agi,
},
[XFS_SCRUB_TYPE_BNOBT] = { /* bnobt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_allocbt,
- .scrub = xfs_scrub_bnobt,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_ag_allocbt,
+ .scrub = xchk_bnobt,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_CNTBT] = { /* cntbt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_allocbt,
- .scrub = xfs_scrub_cntbt,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_ag_allocbt,
+ .scrub = xchk_cntbt,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_INOBT] = { /* inobt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_iallocbt,
- .scrub = xfs_scrub_inobt,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_ag_iallocbt,
+ .scrub = xchk_inobt,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_FINOBT] = { /* finobt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_iallocbt,
- .scrub = xfs_scrub_finobt,
+ .setup = xchk_setup_ag_iallocbt,
+ .scrub = xchk_finobt,
.has = xfs_sb_version_hasfinobt,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_RMAPBT] = { /* rmapbt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_rmapbt,
- .scrub = xfs_scrub_rmapbt,
+ .setup = xchk_setup_ag_rmapbt,
+ .scrub = xchk_rmapbt,
.has = xfs_sb_version_hasrmapbt,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_REFCNTBT] = { /* refcountbt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_refcountbt,
- .scrub = xfs_scrub_refcountbt,
+ .setup = xchk_setup_ag_refcountbt,
+ .scrub = xchk_refcountbt,
.has = xfs_sb_version_hasreflink,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_INODE] = { /* inode record */
.type = ST_INODE,
- .setup = xfs_scrub_setup_inode,
- .scrub = xfs_scrub_inode,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_inode,
+ .scrub = xchk_inode,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_BMBTD] = { /* inode data fork */
.type = ST_INODE,
- .setup = xfs_scrub_setup_inode_bmap,
- .scrub = xfs_scrub_bmap_data,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_inode_bmap,
+ .scrub = xchk_bmap_data,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_BMBTA] = { /* inode attr fork */
.type = ST_INODE,
- .setup = xfs_scrub_setup_inode_bmap,
- .scrub = xfs_scrub_bmap_attr,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_inode_bmap,
+ .scrub = xchk_bmap_attr,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_BMBTC] = { /* inode CoW fork */
.type = ST_INODE,
- .setup = xfs_scrub_setup_inode_bmap,
- .scrub = xfs_scrub_bmap_cow,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_inode_bmap,
+ .scrub = xchk_bmap_cow,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_DIR] = { /* directory */
.type = ST_INODE,
- .setup = xfs_scrub_setup_directory,
- .scrub = xfs_scrub_directory,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_directory,
+ .scrub = xchk_directory,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_XATTR] = { /* extended attributes */
.type = ST_INODE,
- .setup = xfs_scrub_setup_xattr,
- .scrub = xfs_scrub_xattr,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_xattr,
+ .scrub = xchk_xattr,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_SYMLINK] = { /* symbolic link */
.type = ST_INODE,
- .setup = xfs_scrub_setup_symlink,
- .scrub = xfs_scrub_symlink,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_symlink,
+ .scrub = xchk_symlink,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_PARENT] = { /* parent pointers */
.type = ST_INODE,
- .setup = xfs_scrub_setup_parent,
- .scrub = xfs_scrub_parent,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_parent,
+ .scrub = xchk_parent,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_RTBITMAP] = { /* realtime bitmap */
.type = ST_FS,
- .setup = xfs_scrub_setup_rt,
- .scrub = xfs_scrub_rtbitmap,
+ .setup = xchk_setup_rt,
+ .scrub = xchk_rtbitmap,
.has = xfs_sb_version_hasrealtime,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_RTSUM] = { /* realtime summary */
.type = ST_FS,
- .setup = xfs_scrub_setup_rt,
- .scrub = xfs_scrub_rtsummary,
+ .setup = xchk_setup_rt,
+ .scrub = xchk_rtsummary,
.has = xfs_sb_version_hasrealtime,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_UQUOTA] = { /* user quota */
.type = ST_FS,
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_quota,
+ .scrub = xchk_quota,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_GQUOTA] = { /* group quota */
.type = ST_FS,
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_quota,
+ .scrub = xchk_quota,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_PQUOTA] = { /* project quota */
.type = ST_FS,
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_quota,
+ .scrub = xchk_quota,
+ .repair = xrep_notsupported,
},
};
/* This isn't a stable feature, warn once per day. */
static inline void
-xfs_scrub_experimental_warning(
+xchk_experimental_warning(
struct xfs_mount *mp)
{
static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT(
- "xfs_scrub_warning", 86400 * HZ, 1);
+ "xchk_warning", 86400 * HZ, 1);
ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE);
if (__ratelimit(&scrub_warning))
@@ -358,12 +364,12 @@ xfs_scrub_experimental_warning(
}
static int
-xfs_scrub_validate_inputs(
+xchk_validate_inputs(
struct xfs_mount *mp,
struct xfs_scrub_metadata *sm)
{
int error;
- const struct xfs_scrub_meta_ops *ops;
+ const struct xchk_meta_ops *ops;
error = -EINVAL;
/* Check our inputs. */
@@ -441,7 +447,7 @@ out:
}
#ifdef CONFIG_XFS_ONLINE_REPAIR
-static inline void xfs_scrub_postmortem(struct xfs_scrub_context *sc)
+static inline void xchk_postmortem(struct xfs_scrub *sc)
{
/*
* Userspace asked us to repair something, we repaired it, rescanned
@@ -451,10 +457,10 @@ static inline void xfs_scrub_postmortem(struct xfs_scrub_context *sc)
if ((sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) &&
(sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
XFS_SCRUB_OFLAG_XCORRUPT)))
- xfs_repair_failure(sc->mp);
+ xrep_failure(sc->mp);
}
#else
-static inline void xfs_scrub_postmortem(struct xfs_scrub_context *sc)
+static inline void xchk_postmortem(struct xfs_scrub *sc)
{
/*
* Userspace asked us to scrub something, it's broken, and we have no
@@ -473,16 +479,16 @@ xfs_scrub_metadata(
struct xfs_inode *ip,
struct xfs_scrub_metadata *sm)
{
- struct xfs_scrub_context sc;
+ struct xfs_scrub sc;
struct xfs_mount *mp = ip->i_mount;
bool try_harder = false;
bool already_fixed = false;
int error = 0;
BUILD_BUG_ON(sizeof(meta_scrub_ops) !=
- (sizeof(struct xfs_scrub_meta_ops) * XFS_SCRUB_TYPE_NR));
+ (sizeof(struct xchk_meta_ops) * XFS_SCRUB_TYPE_NR));
- trace_xfs_scrub_start(ip, sm, error);
+ trace_xchk_start(ip, sm, error);
/* Forbidden if we are shut down or mounted norecovery. */
error = -ESHUTDOWN;
@@ -492,11 +498,11 @@ xfs_scrub_metadata(
if (mp->m_flags & XFS_MOUNT_NORECOVERY)
goto out;
- error = xfs_scrub_validate_inputs(mp, sm);
+ error = xchk_validate_inputs(mp, sm);
if (error)
goto out;
- xfs_scrub_experimental_warning(mp);
+ xchk_experimental_warning(mp);
retry_op:
/* Set up for the operation. */
@@ -518,7 +524,7 @@ retry_op:
* Tear down everything we hold, then set up again with
* preparation for worst-case scenarios.
*/
- error = xfs_scrub_teardown(&sc, ip, 0);
+ error = xchk_teardown(&sc, ip, 0);
if (error)
goto out;
try_harder = true;
@@ -549,13 +555,13 @@ retry_op:
* If it's broken, userspace wants us to fix it, and we haven't
* already tried to fix it, then attempt a repair.
*/
- error = xfs_repair_attempt(ip, &sc, &already_fixed);
+ error = xrep_attempt(ip, &sc, &already_fixed);
if (error == -EAGAIN) {
if (sc.try_harder)
try_harder = true;
- error = xfs_scrub_teardown(&sc, ip, 0);
+ error = xchk_teardown(&sc, ip, 0);
if (error) {
- xfs_repair_failure(mp);
+ xrep_failure(mp);
goto out;
}
goto retry_op;
@@ -563,11 +569,11 @@ retry_op:
}
out_nofix:
- xfs_scrub_postmortem(&sc);
+ xchk_postmortem(&sc);
out_teardown:
- error = xfs_scrub_teardown(&sc, ip, error);
+ error = xchk_teardown(&sc, ip, error);
out:
- trace_xfs_scrub_done(ip, sm, error);
+ trace_xchk_done(ip, sm, error);
if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
error = 0;
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index b295edd5fc0e..af323b229c4b 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -6,58 +6,58 @@
#ifndef __XFS_SCRUB_SCRUB_H__
#define __XFS_SCRUB_SCRUB_H__
-struct xfs_scrub_context;
+struct xfs_scrub;
/* Type info and names for the scrub types. */
-enum xfs_scrub_type {
+enum xchk_type {
ST_NONE = 1, /* disabled */
ST_PERAG, /* per-AG metadata */
ST_FS, /* per-FS metadata */
ST_INODE, /* per-inode metadata */
};
-struct xfs_scrub_meta_ops {
+struct xchk_meta_ops {
/* Acquire whatever resources are needed for the operation. */
- int (*setup)(struct xfs_scrub_context *,
+ int (*setup)(struct xfs_scrub *,
struct xfs_inode *);
/* Examine metadata for errors. */
- int (*scrub)(struct xfs_scrub_context *);
+ int (*scrub)(struct xfs_scrub *);
/* Repair or optimize the metadata. */
- int (*repair)(struct xfs_scrub_context *);
+ int (*repair)(struct xfs_scrub *);
/* Decide if we even have this piece of metadata. */
bool (*has)(struct xfs_sb *);
/* type describing required/allowed inputs */
- enum xfs_scrub_type type;
+ enum xchk_type type;
};
/* Buffer pointers and btree cursors for an entire AG. */
-struct xfs_scrub_ag {
- xfs_agnumber_t agno;
- struct xfs_perag *pag;
+struct xchk_ag {
+ xfs_agnumber_t agno;
+ struct xfs_perag *pag;
/* AG btree roots */
- struct xfs_buf *agf_bp;
- struct xfs_buf *agfl_bp;
- struct xfs_buf *agi_bp;
+ struct xfs_buf *agf_bp;
+ struct xfs_buf *agfl_bp;
+ struct xfs_buf *agi_bp;
/* AG btrees */
- struct xfs_btree_cur *bno_cur;
- struct xfs_btree_cur *cnt_cur;
- struct xfs_btree_cur *ino_cur;
- struct xfs_btree_cur *fino_cur;
- struct xfs_btree_cur *rmap_cur;
- struct xfs_btree_cur *refc_cur;
+ struct xfs_btree_cur *bno_cur;
+ struct xfs_btree_cur *cnt_cur;
+ struct xfs_btree_cur *ino_cur;
+ struct xfs_btree_cur *fino_cur;
+ struct xfs_btree_cur *rmap_cur;
+ struct xfs_btree_cur *refc_cur;
};
-struct xfs_scrub_context {
+struct xfs_scrub {
/* General scrub state. */
struct xfs_mount *mp;
struct xfs_scrub_metadata *sm;
- const struct xfs_scrub_meta_ops *ops;
+ const struct xchk_meta_ops *ops;
struct xfs_trans *tp;
struct xfs_inode *ip;
void *buf;
@@ -66,78 +66,76 @@ struct xfs_scrub_context {
bool has_quotaofflock;
/* State tracking for single-AG operations. */
- struct xfs_scrub_ag sa;
+ struct xchk_ag sa;
};
/* Metadata scrubbers */
-int xfs_scrub_tester(struct xfs_scrub_context *sc);
-int xfs_scrub_superblock(struct xfs_scrub_context *sc);
-int xfs_scrub_agf(struct xfs_scrub_context *sc);
-int xfs_scrub_agfl(struct xfs_scrub_context *sc);
-int xfs_scrub_agi(struct xfs_scrub_context *sc);
-int xfs_scrub_bnobt(struct xfs_scrub_context *sc);
-int xfs_scrub_cntbt(struct xfs_scrub_context *sc);
-int xfs_scrub_inobt(struct xfs_scrub_context *sc);
-int xfs_scrub_finobt(struct xfs_scrub_context *sc);
-int xfs_scrub_rmapbt(struct xfs_scrub_context *sc);
-int xfs_scrub_refcountbt(struct xfs_scrub_context *sc);
-int xfs_scrub_inode(struct xfs_scrub_context *sc);
-int xfs_scrub_bmap_data(struct xfs_scrub_context *sc);
-int xfs_scrub_bmap_attr(struct xfs_scrub_context *sc);
-int xfs_scrub_bmap_cow(struct xfs_scrub_context *sc);
-int xfs_scrub_directory(struct xfs_scrub_context *sc);
-int xfs_scrub_xattr(struct xfs_scrub_context *sc);
-int xfs_scrub_symlink(struct xfs_scrub_context *sc);
-int xfs_scrub_parent(struct xfs_scrub_context *sc);
+int xchk_tester(struct xfs_scrub *sc);
+int xchk_superblock(struct xfs_scrub *sc);
+int xchk_agf(struct xfs_scrub *sc);
+int xchk_agfl(struct xfs_scrub *sc);
+int xchk_agi(struct xfs_scrub *sc);
+int xchk_bnobt(struct xfs_scrub *sc);
+int xchk_cntbt(struct xfs_scrub *sc);
+int xchk_inobt(struct xfs_scrub *sc);
+int xchk_finobt(struct xfs_scrub *sc);
+int xchk_rmapbt(struct xfs_scrub *sc);
+int xchk_refcountbt(struct xfs_scrub *sc);
+int xchk_inode(struct xfs_scrub *sc);
+int xchk_bmap_data(struct xfs_scrub *sc);
+int xchk_bmap_attr(struct xfs_scrub *sc);
+int xchk_bmap_cow(struct xfs_scrub *sc);
+int xchk_directory(struct xfs_scrub *sc);
+int xchk_xattr(struct xfs_scrub *sc);
+int xchk_symlink(struct xfs_scrub *sc);
+int xchk_parent(struct xfs_scrub *sc);
#ifdef CONFIG_XFS_RT
-int xfs_scrub_rtbitmap(struct xfs_scrub_context *sc);
-int xfs_scrub_rtsummary(struct xfs_scrub_context *sc);
+int xchk_rtbitmap(struct xfs_scrub *sc);
+int xchk_rtsummary(struct xfs_scrub *sc);
#else
static inline int
-xfs_scrub_rtbitmap(struct xfs_scrub_context *sc)
+xchk_rtbitmap(struct xfs_scrub *sc)
{
return -ENOENT;
}
static inline int
-xfs_scrub_rtsummary(struct xfs_scrub_context *sc)
+xchk_rtsummary(struct xfs_scrub *sc)
{
return -ENOENT;
}
#endif
#ifdef CONFIG_XFS_QUOTA
-int xfs_scrub_quota(struct xfs_scrub_context *sc);
+int xchk_quota(struct xfs_scrub *sc);
#else
static inline int
-xfs_scrub_quota(struct xfs_scrub_context *sc)
+xchk_quota(struct xfs_scrub *sc)
{
return -ENOENT;
}
#endif
/* cross-referencing helpers */
-void xfs_scrub_xref_is_used_space(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len);
-void xfs_scrub_xref_is_not_inode_chunk(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len);
-void xfs_scrub_xref_is_inode_chunk(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len);
-void xfs_scrub_xref_is_owned_by(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len,
- struct xfs_owner_info *oinfo);
-void xfs_scrub_xref_is_not_owned_by(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len,
- struct xfs_owner_info *oinfo);
-void xfs_scrub_xref_has_no_owner(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len);
-void xfs_scrub_xref_is_cow_staging(struct xfs_scrub_context *sc,
- xfs_agblock_t bno, xfs_extlen_t len);
-void xfs_scrub_xref_is_not_shared(struct xfs_scrub_context *sc,
- xfs_agblock_t bno, xfs_extlen_t len);
+void xchk_xref_is_used_space(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len);
+void xchk_xref_is_not_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len);
+void xchk_xref_is_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len);
+void xchk_xref_is_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len, struct xfs_owner_info *oinfo);
+void xchk_xref_is_not_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len, struct xfs_owner_info *oinfo);
+void xchk_xref_has_no_owner(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len);
+void xchk_xref_is_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
+ xfs_extlen_t len);
+void xchk_xref_is_not_shared(struct xfs_scrub *sc, xfs_agblock_t bno,
+ xfs_extlen_t len);
#ifdef CONFIG_XFS_RT
-void xfs_scrub_xref_is_used_rt_space(struct xfs_scrub_context *sc,
- xfs_rtblock_t rtbno, xfs_extlen_t len);
+void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno,
+ xfs_extlen_t len);
#else
-# define xfs_scrub_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
+# define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
#endif
#endif /* __XFS_SCRUB_SCRUB_H__ */
diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c
index 570a89812116..f7ebaa946999 100644
--- a/fs/xfs/scrub/symlink.c
+++ b/fs/xfs/scrub/symlink.c
@@ -25,28 +25,28 @@
/* Set us up to scrub a symbolic link. */
int
-xfs_scrub_setup_symlink(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_symlink(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
/* Allocate the buffer without the inode lock held. */
sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, KM_SLEEP);
if (!sc->buf)
return -ENOMEM;
- return xfs_scrub_setup_inode_contents(sc, ip, 0);
+ return xchk_setup_inode_contents(sc, ip, 0);
}
/* Symbolic links. */
int
-xfs_scrub_symlink(
- struct xfs_scrub_context *sc)
+xchk_symlink(
+ struct xfs_scrub *sc)
{
- struct xfs_inode *ip = sc->ip;
- struct xfs_ifork *ifp;
- loff_t len;
- int error = 0;
+ struct xfs_inode *ip = sc->ip;
+ struct xfs_ifork *ifp;
+ loff_t len;
+ int error = 0;
if (!S_ISLNK(VFS_I(ip)->i_mode))
return -ENOENT;
@@ -55,7 +55,7 @@ xfs_scrub_symlink(
/* Plausible size? */
if (len > XFS_SYMLINK_MAXLEN || len <= 0) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
@@ -63,16 +63,16 @@ xfs_scrub_symlink(
if (ifp->if_flags & XFS_IFINLINE) {
if (len > XFS_IFORK_DSIZE(ip) ||
len > strnlen(ifp->if_u1.if_data, XFS_IFORK_DSIZE(ip)))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
/* Remote symlink; must read the contents. */
error = xfs_readlink_bmap_ilocked(sc->ip, sc->buf);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
if (strnlen(sc->buf, XFS_SYMLINK_MAXLEN) < len)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
out:
return error;
}
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 7c76d8b5cb05..96feaf8dcdec 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -22,9 +22,9 @@
/* Figure out which block the btree cursor was pointing to. */
static inline xfs_fsblock_t
-xfs_scrub_btree_cur_fsbno(
- struct xfs_btree_cur *cur,
- int level)
+xchk_btree_cur_fsbno(
+ struct xfs_btree_cur *cur,
+ int level)
{
if (level < cur->bc_nlevels && cur->bc_bufs[level])
return XFS_DADDR_TO_FSB(cur->bc_mp, cur->bc_bufs[level]->b_bn);
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index cec3e5ece5a1..4e20f0e48232 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -12,7 +12,7 @@
#include <linux/tracepoint.h>
#include "xfs_bit.h"
-DECLARE_EVENT_CLASS(xfs_scrub_class,
+DECLARE_EVENT_CLASS(xchk_class,
TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm,
int error),
TP_ARGS(ip, sm, error),
@@ -47,19 +47,19 @@ DECLARE_EVENT_CLASS(xfs_scrub_class,
__entry->error)
)
#define DEFINE_SCRUB_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_class, name, \
+DEFINE_EVENT(xchk_class, name, \
TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, \
int error), \
TP_ARGS(ip, sm, error))
-DEFINE_SCRUB_EVENT(xfs_scrub_start);
-DEFINE_SCRUB_EVENT(xfs_scrub_done);
-DEFINE_SCRUB_EVENT(xfs_scrub_deadlock_retry);
-DEFINE_SCRUB_EVENT(xfs_repair_attempt);
-DEFINE_SCRUB_EVENT(xfs_repair_done);
+DEFINE_SCRUB_EVENT(xchk_start);
+DEFINE_SCRUB_EVENT(xchk_done);
+DEFINE_SCRUB_EVENT(xchk_deadlock_retry);
+DEFINE_SCRUB_EVENT(xrep_attempt);
+DEFINE_SCRUB_EVENT(xrep_done);
-TRACE_EVENT(xfs_scrub_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+TRACE_EVENT(xchk_op_error,
+ TP_PROTO(struct xfs_scrub *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int error, void *ret_ip),
TP_ARGS(sc, agno, bno, error, ret_ip),
TP_STRUCT__entry(
@@ -87,8 +87,8 @@ TRACE_EVENT(xfs_scrub_op_error,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_file_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
+TRACE_EVENT(xchk_file_op_error,
+ TP_PROTO(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, int error, void *ret_ip),
TP_ARGS(sc, whichfork, offset, error, ret_ip),
TP_STRUCT__entry(
@@ -119,8 +119,8 @@ TRACE_EVENT(xfs_scrub_file_op_error,
__entry->ret_ip)
);
-DECLARE_EVENT_CLASS(xfs_scrub_block_error_class,
- TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, void *ret_ip),
+DECLARE_EVENT_CLASS(xchk_block_error_class,
+ TP_PROTO(struct xfs_scrub *sc, xfs_daddr_t daddr, void *ret_ip),
TP_ARGS(sc, daddr, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -153,16 +153,16 @@ DECLARE_EVENT_CLASS(xfs_scrub_block_error_class,
)
#define DEFINE_SCRUB_BLOCK_ERROR_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_block_error_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, \
+DEFINE_EVENT(xchk_block_error_class, name, \
+ TP_PROTO(struct xfs_scrub *sc, xfs_daddr_t daddr, \
void *ret_ip), \
TP_ARGS(sc, daddr, ret_ip))
-DEFINE_SCRUB_BLOCK_ERROR_EVENT(xfs_scrub_block_error);
-DEFINE_SCRUB_BLOCK_ERROR_EVENT(xfs_scrub_block_preen);
+DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_error);
+DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_preen);
-DECLARE_EVENT_CLASS(xfs_scrub_ino_error_class,
- TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, void *ret_ip),
+DECLARE_EVENT_CLASS(xchk_ino_error_class,
+ TP_PROTO(struct xfs_scrub *sc, xfs_ino_t ino, void *ret_ip),
TP_ARGS(sc, ino, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -184,17 +184,17 @@ DECLARE_EVENT_CLASS(xfs_scrub_ino_error_class,
)
#define DEFINE_SCRUB_INO_ERROR_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_ino_error_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, \
+DEFINE_EVENT(xchk_ino_error_class, name, \
+ TP_PROTO(struct xfs_scrub *sc, xfs_ino_t ino, \
void *ret_ip), \
TP_ARGS(sc, ino, ret_ip))
-DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_error);
-DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_preen);
-DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_warning);
+DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_error);
+DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_preen);
+DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_warning);
-DECLARE_EVENT_CLASS(xfs_scrub_fblock_error_class,
- TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
+DECLARE_EVENT_CLASS(xchk_fblock_error_class,
+ TP_PROTO(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, void *ret_ip),
TP_ARGS(sc, whichfork, offset, ret_ip),
TP_STRUCT__entry(
@@ -223,16 +223,16 @@ DECLARE_EVENT_CLASS(xfs_scrub_fblock_error_class,
);
#define DEFINE_SCRUB_FBLOCK_ERROR_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_fblock_error_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, int whichfork, \
+DEFINE_EVENT(xchk_fblock_error_class, name, \
+ TP_PROTO(struct xfs_scrub *sc, int whichfork, \
xfs_fileoff_t offset, void *ret_ip), \
TP_ARGS(sc, whichfork, offset, ret_ip))
-DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xfs_scrub_fblock_error);
-DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xfs_scrub_fblock_warning);
+DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xchk_fblock_error);
+DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xchk_fblock_warning);
-TRACE_EVENT(xfs_scrub_incomplete,
- TP_PROTO(struct xfs_scrub_context *sc, void *ret_ip),
+TRACE_EVENT(xchk_incomplete,
+ TP_PROTO(struct xfs_scrub *sc, void *ret_ip),
TP_ARGS(sc, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -250,8 +250,8 @@ TRACE_EVENT(xfs_scrub_incomplete,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_btree_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+TRACE_EVENT(xchk_btree_op_error,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, int error, void *ret_ip),
TP_ARGS(sc, cur, level, error, ret_ip),
TP_STRUCT__entry(
@@ -266,7 +266,7 @@ TRACE_EVENT(xfs_scrub_btree_op_error,
__field(void *, ret_ip)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
@@ -290,8 +290,8 @@ TRACE_EVENT(xfs_scrub_btree_op_error,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+TRACE_EVENT(xchk_ifork_btree_op_error,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, int error, void *ret_ip),
TP_ARGS(sc, cur, level, error, ret_ip),
TP_STRUCT__entry(
@@ -308,7 +308,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
__field(void *, ret_ip)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->ino = sc->ip->i_ino;
__entry->whichfork = cur->bc_private.b.whichfork;
@@ -335,8 +335,8 @@ TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_btree_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+TRACE_EVENT(xchk_btree_error,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, void *ret_ip),
TP_ARGS(sc, cur, level, ret_ip),
TP_STRUCT__entry(
@@ -350,7 +350,7 @@ TRACE_EVENT(xfs_scrub_btree_error,
__field(void *, ret_ip)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum;
@@ -371,8 +371,8 @@ TRACE_EVENT(xfs_scrub_btree_error,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_ifork_btree_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+TRACE_EVENT(xchk_ifork_btree_error,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, void *ret_ip),
TP_ARGS(sc, cur, level, ret_ip),
TP_STRUCT__entry(
@@ -388,7 +388,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_error,
__field(void *, ret_ip)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->ino = sc->ip->i_ino;
__entry->whichfork = cur->bc_private.b.whichfork;
@@ -413,8 +413,8 @@ TRACE_EVENT(xfs_scrub_ifork_btree_error,
__entry->ret_ip)
);
-DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+DECLARE_EVENT_CLASS(xchk_sbtree_class,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level),
TP_ARGS(sc, cur, level),
TP_STRUCT__entry(
@@ -428,7 +428,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class,
__field(int, ptr)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
@@ -450,16 +450,16 @@ DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class,
__entry->ptr)
)
#define DEFINE_SCRUB_SBTREE_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_sbtree_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, \
+DEFINE_EVENT(xchk_sbtree_class, name, \
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, \
int level), \
TP_ARGS(sc, cur, level))
-DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_rec);
-DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_key);
+DEFINE_SCRUB_SBTREE_EVENT(xchk_btree_rec);
+DEFINE_SCRUB_SBTREE_EVENT(xchk_btree_key);
-TRACE_EVENT(xfs_scrub_xref_error,
- TP_PROTO(struct xfs_scrub_context *sc, int error, void *ret_ip),
+TRACE_EVENT(xchk_xref_error,
+ TP_PROTO(struct xfs_scrub *sc, int error, void *ret_ip),
TP_ARGS(sc, error, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -483,7 +483,7 @@ TRACE_EVENT(xfs_scrub_xref_error,
/* repair tracepoints */
#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR)
-DECLARE_EVENT_CLASS(xfs_repair_extent_class,
+DECLARE_EVENT_CLASS(xrep_extent_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len),
TP_ARGS(mp, agno, agbno, len),
@@ -506,15 +506,14 @@ DECLARE_EVENT_CLASS(xfs_repair_extent_class,
__entry->len)
);
#define DEFINE_REPAIR_EXTENT_EVENT(name) \
-DEFINE_EVENT(xfs_repair_extent_class, name, \
+DEFINE_EVENT(xrep_extent_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len), \
TP_ARGS(mp, agno, agbno, len))
-DEFINE_REPAIR_EXTENT_EVENT(xfs_repair_dispose_btree_extent);
-DEFINE_REPAIR_EXTENT_EVENT(xfs_repair_collect_btree_extent);
-DEFINE_REPAIR_EXTENT_EVENT(xfs_repair_agfl_insert);
+DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_btree_extent);
+DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert);
-DECLARE_EVENT_CLASS(xfs_repair_rmap_class,
+DECLARE_EVENT_CLASS(xrep_rmap_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len,
uint64_t owner, uint64_t offset, unsigned int flags),
@@ -547,17 +546,17 @@ DECLARE_EVENT_CLASS(xfs_repair_rmap_class,
__entry->flags)
);
#define DEFINE_REPAIR_RMAP_EVENT(name) \
-DEFINE_EVENT(xfs_repair_rmap_class, name, \
+DEFINE_EVENT(xrep_rmap_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len, \
uint64_t owner, uint64_t offset, unsigned int flags), \
TP_ARGS(mp, agno, agbno, len, owner, offset, flags))
-DEFINE_REPAIR_RMAP_EVENT(xfs_repair_alloc_extent_fn);
-DEFINE_REPAIR_RMAP_EVENT(xfs_repair_ialloc_extent_fn);
-DEFINE_REPAIR_RMAP_EVENT(xfs_repair_rmap_extent_fn);
-DEFINE_REPAIR_RMAP_EVENT(xfs_repair_bmap_extent_fn);
+DEFINE_REPAIR_RMAP_EVENT(xrep_alloc_extent_fn);
+DEFINE_REPAIR_RMAP_EVENT(xrep_ialloc_extent_fn);
+DEFINE_REPAIR_RMAP_EVENT(xrep_rmap_extent_fn);
+DEFINE_REPAIR_RMAP_EVENT(xrep_bmap_extent_fn);
-TRACE_EVENT(xfs_repair_refcount_extent_fn,
+TRACE_EVENT(xrep_refcount_extent_fn,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
struct xfs_refcount_irec *irec),
TP_ARGS(mp, agno, irec),
@@ -583,7 +582,7 @@ TRACE_EVENT(xfs_repair_refcount_extent_fn,
__entry->refcount)
)
-TRACE_EVENT(xfs_repair_init_btblock,
+TRACE_EVENT(xrep_init_btblock,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
xfs_btnum_t btnum),
TP_ARGS(mp, agno, agbno, btnum),
@@ -605,7 +604,7 @@ TRACE_EVENT(xfs_repair_init_btblock,
__entry->agbno,
__entry->btnum)
)
-TRACE_EVENT(xfs_repair_findroot_block,
+TRACE_EVENT(xrep_findroot_block,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
uint32_t magic, uint16_t level),
TP_ARGS(mp, agno, agbno, magic, level),
@@ -630,7 +629,7 @@ TRACE_EVENT(xfs_repair_findroot_block,
__entry->magic,
__entry->level)
)
-TRACE_EVENT(xfs_repair_calc_ag_resblks,
+TRACE_EVENT(xrep_calc_ag_resblks,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agino_t icount, xfs_agblock_t aglen, xfs_agblock_t freelen,
xfs_agblock_t usedlen),
@@ -659,7 +658,7 @@ TRACE_EVENT(xfs_repair_calc_ag_resblks,
__entry->freelen,
__entry->usedlen)
)
-TRACE_EVENT(xfs_repair_calc_ag_resblks_btsize,
+TRACE_EVENT(xrep_calc_ag_resblks_btsize,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t bnobt_sz, xfs_agblock_t inobt_sz,
xfs_agblock_t rmapbt_sz, xfs_agblock_t refcbt_sz),
@@ -688,7 +687,7 @@ TRACE_EVENT(xfs_repair_calc_ag_resblks_btsize,
__entry->rmapbt_sz,
__entry->refcbt_sz)
)
-TRACE_EVENT(xfs_repair_reset_counters,
+TRACE_EVENT(xrep_reset_counters,
TP_PROTO(struct xfs_mount *mp),
TP_ARGS(mp),
TP_STRUCT__entry(
@@ -701,7 +700,7 @@ TRACE_EVENT(xfs_repair_reset_counters,
MAJOR(__entry->dev), MINOR(__entry->dev))
)
-TRACE_EVENT(xfs_repair_ialloc_insert,
+TRACE_EVENT(xrep_ialloc_insert,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agino_t startino, uint16_t holemask, uint8_t count,
uint8_t freecount, uint64_t freemask),
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 583a9f539bf1..f6ffb4f248f7 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -8,7 +8,6 @@
#ifdef CONFIG_XFS_DEBUG
#define DEBUG 1
-#define XFS_BUF_LOCK_TRACKING 1
#endif
#ifdef CONFIG_XFS_ASSERT_FATAL
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 8eb3ba3d4d00..49f5f5896a43 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
* All Rights Reserved.
*/
#include "xfs.h"
@@ -20,9 +21,6 @@
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
#include "xfs_reflink.h"
-#include <linux/gfp.h>
-#include <linux/mpage.h>
-#include <linux/pagevec.h>
#include <linux/writeback.h>
/*
@@ -30,31 +28,11 @@
*/
struct xfs_writepage_ctx {
struct xfs_bmbt_irec imap;
- bool imap_valid;
unsigned int io_type;
+ unsigned int cow_seq;
struct xfs_ioend *ioend;
- sector_t last_block;
};
-void
-xfs_count_page_state(
- struct page *page,
- int *delalloc,
- int *unwritten)
-{
- struct buffer_head *bh, *head;
-
- *delalloc = *unwritten = 0;
-
- bh = head = page_buffers(page);
- do {
- if (buffer_unwritten(bh))
- (*unwritten) = 1;
- else if (buffer_delay(bh))
- (*delalloc) = 1;
- } while ((bh = bh->b_this_page) != head);
-}
-
struct block_device *
xfs_find_bdev_for_inode(
struct inode *inode)
@@ -81,60 +59,23 @@ xfs_find_daxdev_for_inode(
return mp->m_ddev_targp->bt_daxdev;
}
-/*
- * We're now finished for good with this page. Update the page state via the
- * associated buffer_heads, paying attention to the start and end offsets that
- * we need to process on the page.
- *
- * Note that we open code the action in end_buffer_async_write here so that we
- * only have to iterate over the buffers attached to the page once. This is not
- * only more efficient, but also ensures that we only calls end_page_writeback
- * at the end of the iteration, and thus avoids the pitfall of having the page
- * and buffers potentially freed after every call to end_buffer_async_write.
- */
static void
xfs_finish_page_writeback(
struct inode *inode,
struct bio_vec *bvec,
int error)
{
- struct buffer_head *head = page_buffers(bvec->bv_page), *bh = head;
- bool busy = false;
- unsigned int off = 0;
- unsigned long flags;
-
- ASSERT(bvec->bv_offset < PAGE_SIZE);
- ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
- ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
- ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
-
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
- do {
- if (off >= bvec->bv_offset &&
- off < bvec->bv_offset + bvec->bv_len) {
- ASSERT(buffer_async_write(bh));
- ASSERT(bh->b_end_io == NULL);
-
- if (error) {
- mark_buffer_write_io_error(bh);
- clear_buffer_uptodate(bh);
- SetPageError(bvec->bv_page);
- } else {
- set_buffer_uptodate(bh);
- }
- clear_buffer_async_write(bh);
- unlock_buffer(bh);
- } else if (buffer_async_write(bh)) {
- ASSERT(buffer_locked(bh));
- busy = true;
- }
- off += bh->b_size;
- } while ((bh = bh->b_this_page) != head);
- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
- local_irq_restore(flags);
+ struct iomap_page *iop = to_iomap_page(bvec->bv_page);
+
+ if (error) {
+ SetPageError(bvec->bv_page);
+ mapping_set_error(inode->i_mapping, -EIO);
+ }
+
+ ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
+ ASSERT(!iop || atomic_read(&iop->write_count) > 0);
- if (!busy)
+ if (!iop || atomic_dec_and_test(&iop->write_count))
end_page_writeback(bvec->bv_page);
}
@@ -170,7 +111,6 @@ xfs_destroy_ioend(
/* walk each page on bio, ending page IO on them */
bio_for_each_segment_all(bvec, bio, i)
xfs_finish_page_writeback(inode, bvec, error);
-
bio_put(bio);
}
@@ -363,39 +303,66 @@ xfs_end_bio(
STATIC int
xfs_map_blocks(
+ struct xfs_writepage_ctx *wpc,
struct inode *inode,
- loff_t offset,
- struct xfs_bmbt_irec *imap,
- int type)
+ loff_t offset)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
ssize_t count = i_blocksize(inode);
- xfs_fileoff_t offset_fsb, end_fsb;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset), end_fsb;
+ xfs_fileoff_t cow_fsb = NULLFILEOFF;
+ struct xfs_bmbt_irec imap;
+ int whichfork = XFS_DATA_FORK;
+ struct xfs_iext_cursor icur;
+ bool imap_valid;
int error = 0;
- int bmapi_flags = XFS_BMAPI_ENTIRE;
- int nimaps = 1;
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
+ /*
+ * We have to make sure the cached mapping is within EOF to protect
+ * against eofblocks trimming on file release leaving us with a stale
+ * mapping. Otherwise, a page for a subsequent file extending buffered
+ * write could get picked up by this writeback cycle and written to the
+ * wrong blocks.
+ *
+ * Note that what we really want here is a generic mapping invalidation
+ * mechanism to protect us from arbitrary extent modifying contexts, not
+ * just eofblocks.
+ */
+ xfs_trim_extent_eof(&wpc->imap, ip);
/*
- * Truncate can race with writeback since writeback doesn't take the
- * iolock and truncate decreases the file size before it starts
- * truncating the pages between new_size and old_size. Therefore, we
- * can end up in the situation where writeback gets a CoW fork mapping
- * but the truncate makes the mapping invalid and we end up in here
- * trying to get a new mapping. Bail out here so that we simply never
- * get a valid mapping and so we drop the write altogether. The page
- * truncation will kill the contents anyway.
+ * COW fork blocks can overlap data fork blocks even if the blocks
+ * aren't shared. COW I/O always takes precedent, so we must always
+ * check for overlap on reflink inodes unless the mapping is already a
+ * COW one, or the COW fork hasn't changed from the last time we looked
+ * at it.
+ *
+ * It's safe to check the COW fork if_seq here without the ILOCK because
+ * we've indirectly protected against concurrent updates: writeback has
+ * the page locked, which prevents concurrent invalidations by reflink
+ * and directio and prevents concurrent buffered writes to the same
+ * page. Changes to if_seq always happen under i_lock, which protects
+ * against concurrent updates and provides a memory barrier on the way
+ * out that ensures that we always see the current value.
*/
- if (type == XFS_IO_COW && offset > i_size_read(inode))
+ imap_valid = offset_fsb >= wpc->imap.br_startoff &&
+ offset_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount;
+ if (imap_valid &&
+ (!xfs_inode_has_cow_data(ip) ||
+ wpc->io_type == XFS_IO_COW ||
+ wpc->cow_seq == READ_ONCE(ip->i_cowfp->if_seq)))
return 0;
- ASSERT(type != XFS_IO_COW);
- if (type == XFS_IO_UNWRITTEN)
- bmapi_flags |= XFS_BMAPI_IGSTATE;
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+ /*
+ * If we don't have a valid map, now it's time to get a new one for this
+ * offset. This will convert delayed allocations (including COW ones)
+ * into real extents. If we return without a valid map, it means we
+ * landed in a hole and we skip the block.
+ */
xfs_ilock(ip, XFS_ILOCK_SHARED);
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
(ip->i_df.if_flags & XFS_IFEXTENTS));
@@ -404,109 +371,96 @@ xfs_map_blocks(
if (offset > mp->m_super->s_maxbytes - count)
count = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
- imap, &nimaps, bmapi_flags);
+
/*
- * Truncate an overwrite extent if there's a pending CoW
- * reservation before the end of this extent. This forces us
- * to come back to writepage to take care of the CoW.
+ * Check if this is offset is covered by a COW extents, and if yes use
+ * it directly instead of looking up anything in the data fork.
*/
- if (nimaps && type == XFS_IO_OVERWRITE)
- xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- if (error)
- return error;
-
- if (type == XFS_IO_DELALLOC &&
- (!nimaps || isnullstartblock(imap->br_startblock))) {
- error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
- imap);
- if (!error)
- trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
- return error;
- }
-
-#ifdef DEBUG
- if (type == XFS_IO_UNWRITTEN) {
- ASSERT(nimaps);
- ASSERT(imap->br_startblock != HOLESTARTBLOCK);
- ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
+ if (xfs_inode_has_cow_data(ip) &&
+ xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
+ cow_fsb = imap.br_startoff;
+ if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
+ wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ /*
+ * Truncate can race with writeback since writeback doesn't
+ * take the iolock and truncate decreases the file size before
+ * it starts truncating the pages between new_size and old_size.
+ * Therefore, we can end up in the situation where writeback
+ * gets a CoW fork mapping but the truncate makes the mapping
+ * invalid and we end up in here trying to get a new mapping.
+ * bail out here so that we simply never get a valid mapping
+ * and so we drop the write altogether. The page truncation
+ * will kill the contents anyway.
+ */
+ if (offset > i_size_read(inode)) {
+ wpc->io_type = XFS_IO_HOLE;
+ return 0;
+ }
+ whichfork = XFS_COW_FORK;
+ wpc->io_type = XFS_IO_COW;
+ goto allocate_blocks;
}
-#endif
- if (nimaps)
- trace_xfs_map_blocks_found(ip, offset, count, type, imap);
- return 0;
-}
-
-STATIC bool
-xfs_imap_valid(
- struct inode *inode,
- struct xfs_bmbt_irec *imap,
- xfs_off_t offset)
-{
- offset >>= inode->i_blkbits;
/*
- * We have to make sure the cached mapping is within EOF to protect
- * against eofblocks trimming on file release leaving us with a stale
- * mapping. Otherwise, a page for a subsequent file extending buffered
- * write could get picked up by this writeback cycle and written to the
- * wrong blocks.
- *
- * Note that what we really want here is a generic mapping invalidation
- * mechanism to protect us from arbitrary extent modifying contexts, not
- * just eofblocks.
+ * Map valid and no COW extent in the way? We're done.
*/
- xfs_trim_extent_eof(imap, XFS_I(inode));
-
- return offset >= imap->br_startoff &&
- offset < imap->br_startoff + imap->br_blockcount;
-}
-
-STATIC void
-xfs_start_buffer_writeback(
- struct buffer_head *bh)
-{
- ASSERT(buffer_mapped(bh));
- ASSERT(buffer_locked(bh));
- ASSERT(!buffer_delay(bh));
- ASSERT(!buffer_unwritten(bh));
-
- bh->b_end_io = NULL;
- set_buffer_async_write(bh);
- set_buffer_uptodate(bh);
- clear_buffer_dirty(bh);
-}
-
-STATIC void
-xfs_start_page_writeback(
- struct page *page,
- int clear_dirty)
-{
- ASSERT(PageLocked(page));
- ASSERT(!PageWriteback(page));
+ if (imap_valid) {
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ return 0;
+ }
/*
- * if the page was not fully cleaned, we need to ensure that the higher
- * layers come back to it correctly. That means we need to keep the page
- * dirty, and for WB_SYNC_ALL writeback we need to ensure the
- * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
- * write this page in this writeback sweep will be made.
+ * If we don't have a valid map, now it's time to get a new one for this
+ * offset. This will convert delayed allocations (including COW ones)
+ * into real extents.
*/
- if (clear_dirty) {
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- } else
- set_page_writeback_keepwrite(page);
+ if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
+ imap.br_startoff = end_fsb; /* fake a hole past EOF */
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
- unlock_page(page);
-}
+ if (imap.br_startoff > offset_fsb) {
+ /* landed in a hole or beyond EOF */
+ imap.br_blockcount = imap.br_startoff - offset_fsb;
+ imap.br_startoff = offset_fsb;
+ imap.br_startblock = HOLESTARTBLOCK;
+ wpc->io_type = XFS_IO_HOLE;
+ } else {
+ /*
+ * Truncate to the next COW extent if there is one. This is the
+ * only opportunity to do this because we can skip COW fork
+ * lookups for the subsequent blocks in the mapping; however,
+ * the requirement to treat the COW range separately remains.
+ */
+ if (cow_fsb != NULLFILEOFF &&
+ cow_fsb < imap.br_startoff + imap.br_blockcount)
+ imap.br_blockcount = cow_fsb - imap.br_startoff;
+
+ if (isnullstartblock(imap.br_startblock)) {
+ /* got a delalloc extent */
+ wpc->io_type = XFS_IO_DELALLOC;
+ goto allocate_blocks;
+ }
-static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
-{
- return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+ if (imap.br_state == XFS_EXT_UNWRITTEN)
+ wpc->io_type = XFS_IO_UNWRITTEN;
+ else
+ wpc->io_type = XFS_IO_OVERWRITE;
+ }
+
+ wpc->imap = imap;
+ trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
+ return 0;
+allocate_blocks:
+ error = xfs_iomap_write_allocate(ip, whichfork, offset, &imap,
+ &wpc->cow_seq);
+ if (error)
+ return error;
+ ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
+ imap.br_startoff + imap.br_blockcount <= cow_fsb);
+ wpc->imap = imap;
+ trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
+ return 0;
}
/*
@@ -574,27 +528,20 @@ xfs_submit_ioend(
return 0;
}
-static void
-xfs_init_bio_from_bh(
- struct bio *bio,
- struct buffer_head *bh)
-{
- bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
-}
-
static struct xfs_ioend *
xfs_alloc_ioend(
struct inode *inode,
unsigned int type,
xfs_off_t offset,
- struct buffer_head *bh)
+ struct block_device *bdev,
+ sector_t sector)
{
struct xfs_ioend *ioend;
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
- xfs_init_bio_from_bh(bio, bh);
+ bio_set_dev(bio, bdev);
+ bio->bi_iter.bi_sector = sector;
ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
INIT_LIST_HEAD(&ioend->io_list);
@@ -619,13 +566,14 @@ static void
xfs_chain_bio(
struct xfs_ioend *ioend,
struct writeback_control *wbc,
- struct buffer_head *bh)
+ struct block_device *bdev,
+ sector_t sector)
{
struct bio *new;
new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
- xfs_init_bio_from_bh(new, bh);
-
+ bio_set_dev(new, bdev);
+ new->bi_iter.bi_sector = sector;
bio_chain(ioend->io_bio, new);
bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
@@ -635,122 +583,47 @@ xfs_chain_bio(
}
/*
- * Test to see if we've been building up a completion structure for
- * earlier buffers -- if so, we try to append to this ioend if we
- * can, otherwise we finish off any current ioend and start another.
- * Return the ioend we finished off so that the caller can submit it
- * once it has finished processing the dirty page.
+ * Test to see if we have an existing ioend structure that we could append to
+ * first, otherwise finish off the current ioend and start another.
*/
STATIC void
xfs_add_to_ioend(
struct inode *inode,
- struct buffer_head *bh,
xfs_off_t offset,
+ struct page *page,
+ struct iomap_page *iop,
struct xfs_writepage_ctx *wpc,
struct writeback_control *wbc,
struct list_head *iolist)
{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ struct block_device *bdev = xfs_find_bdev_for_inode(inode);
+ unsigned len = i_blocksize(inode);
+ unsigned poff = offset & (PAGE_SIZE - 1);
+ sector_t sector;
+
+ sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
+ ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
+
if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
- bh->b_blocknr != wpc->last_block + 1 ||
+ sector != bio_end_sector(wpc->ioend->io_bio) ||
offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
+ wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset,
+ bdev, sector);
}
- /*
- * If the buffer doesn't fit into the bio we need to allocate a new
- * one. This shouldn't happen more than once for a given buffer.
- */
- while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
- xfs_chain_bio(wpc->ioend, wbc, bh);
-
- wpc->ioend->io_size += bh->b_size;
- wpc->last_block = bh->b_blocknr;
- xfs_start_buffer_writeback(bh);
-}
-
-STATIC void
-xfs_map_buffer(
- struct inode *inode,
- struct buffer_head *bh,
- struct xfs_bmbt_irec *imap,
- xfs_off_t offset)
-{
- sector_t bn;
- struct xfs_mount *m = XFS_I(inode)->i_mount;
- xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
- xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
-
- ASSERT(imap->br_startblock != HOLESTARTBLOCK);
- ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
-
- bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
- ((offset - iomap_offset) >> inode->i_blkbits);
-
- ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
-
- bh->b_blocknr = bn;
- set_buffer_mapped(bh);
-}
-
-STATIC void
-xfs_map_at_offset(
- struct inode *inode,
- struct buffer_head *bh,
- struct xfs_bmbt_irec *imap,
- xfs_off_t offset)
-{
- ASSERT(imap->br_startblock != HOLESTARTBLOCK);
- ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
-
- xfs_map_buffer(inode, bh, imap, offset);
- set_buffer_mapped(bh);
- clear_buffer_delay(bh);
- clear_buffer_unwritten(bh);
-}
-
-/*
- * Test if a given page contains at least one buffer of a given @type.
- * If @check_all_buffers is true, then we walk all the buffers in the page to
- * try to find one of the type passed in. If it is not set, then the caller only
- * needs to check the first buffer on the page for a match.
- */
-STATIC bool
-xfs_check_page_type(
- struct page *page,
- unsigned int type,
- bool check_all_buffers)
-{
- struct buffer_head *bh;
- struct buffer_head *head;
-
- if (PageWriteback(page))
- return false;
- if (!page->mapping)
- return false;
- if (!page_has_buffers(page))
- return false;
-
- bh = head = page_buffers(page);
- do {
- if (buffer_unwritten(bh)) {
- if (type == XFS_IO_UNWRITTEN)
- return true;
- } else if (buffer_delay(bh)) {
- if (type == XFS_IO_DELALLOC)
- return true;
- } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
- if (type == XFS_IO_OVERWRITE)
- return true;
- }
-
- /* If we are only checking the first buffer, we are done now. */
- if (!check_all_buffers)
- break;
- } while ((bh = bh->b_this_page) != head);
+ if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff)) {
+ if (iop)
+ atomic_inc(&iop->write_count);
+ if (bio_full(wpc->ioend->io_bio))
+ xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
+ __bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ }
- return false;
+ wpc->ioend->io_size += len;
}
STATIC void
@@ -759,34 +632,20 @@ xfs_vm_invalidatepage(
unsigned int offset,
unsigned int length)
{
- trace_xfs_invalidatepage(page->mapping->host, page, offset,
- length);
-
- /*
- * If we are invalidating the entire page, clear the dirty state from it
- * so that we can check for attempts to release dirty cached pages in
- * xfs_vm_releasepage().
- */
- if (offset == 0 && length >= PAGE_SIZE)
- cancel_dirty_page(page);
- block_invalidatepage(page, offset, length);
+ trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
+ iomap_invalidatepage(page, offset, length);
}
/*
- * If the page has delalloc buffers on it, we need to punch them out before we
- * invalidate the page. If we don't, we leave a stale delalloc mapping on the
- * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
- * is done on that same region - the delalloc extent is returned when none is
- * supposed to be there.
- *
- * We prevent this by truncating away the delalloc regions on the page before
- * invalidating it. Because they are delalloc, we can do this without needing a
- * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
- * truncation without a transaction as there is no space left for block
- * reservation (typically why we see a ENOSPC in writeback).
+ * If the page has delalloc blocks on it, we need to punch them out before we
+ * invalidate the page. If we don't, we leave a stale delalloc mapping on the
+ * inode that can trip up a later direct I/O read operation on the same region.
*
- * This is not a performance critical path, so for now just do the punching a
- * buffer head at a time.
+ * We prevent this by truncating away the delalloc regions on the page. Because
+ * they are delalloc, we can do this without needing a transaction. Indeed - if
+ * we get ENOSPC errors, we have to be able to do this truncation without a
+ * transaction as there is no space left for block reservation (typically why we
+ * see a ENOSPC in writeback).
*/
STATIC void
xfs_aops_discard_page(
@@ -794,104 +653,31 @@ xfs_aops_discard_page(
{
struct inode *inode = page->mapping->host;
struct xfs_inode *ip = XFS_I(inode);
- struct buffer_head *bh, *head;
+ struct xfs_mount *mp = ip->i_mount;
loff_t offset = page_offset(page);
+ xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset);
+ int error;
- if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
- goto out_invalidate;
-
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ if (XFS_FORCED_SHUTDOWN(mp))
goto out_invalidate;
- xfs_alert(ip->i_mount,
+ xfs_alert(mp,
"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
page, ip->i_ino, offset);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- bh = head = page_buffers(page);
- do {
- int error;
- xfs_fileoff_t start_fsb;
-
- if (!buffer_delay(bh))
- goto next_buffer;
-
- start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
- error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
- if (error) {
- /* something screwed, just bail */
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_alert(ip->i_mount,
- "page discard unable to remove delalloc mapping.");
- }
- break;
- }
-next_buffer:
- offset += i_blocksize(inode);
-
- } while ((bh = bh->b_this_page) != head);
-
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
+ PAGE_SIZE / i_blocksize(inode));
+ if (error && !XFS_FORCED_SHUTDOWN(mp))
+ xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate:
xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
- return;
-}
-
-static int
-xfs_map_cow(
- struct xfs_writepage_ctx *wpc,
- struct inode *inode,
- loff_t offset,
- unsigned int *new_type)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_bmbt_irec imap;
- bool is_cow = false;
- int error;
-
- /*
- * If we already have a valid COW mapping keep using it.
- */
- if (wpc->io_type == XFS_IO_COW) {
- wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
- if (wpc->imap_valid) {
- *new_type = XFS_IO_COW;
- return 0;
- }
- }
-
- /*
- * Else we need to check if there is a COW mapping at this offset.
- */
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- if (!is_cow)
- return 0;
-
- /*
- * And if the COW mapping has a delayed extent here we need to
- * allocate real space for it now.
- */
- if (isnullstartblock(imap.br_startblock)) {
- error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
- &imap);
- if (error)
- return error;
- }
-
- wpc->io_type = *new_type = XFS_IO_COW;
- wpc->imap_valid = true;
- wpc->imap = imap;
- return 0;
}
/*
* We implement an immediate ioend submission policy here to avoid needing to
* chain multiple ioends and hence nest mempool allocations which can violate
* forward progress guarantees we need to provide. The current ioend we are
- * adding buffers to is cached on the writepage context, and if the new buffer
+ * adding blocks to is cached on the writepage context, and if the new block
* does not append to the cached ioend it will create a new ioend and cache that
* instead.
*
@@ -912,138 +698,99 @@ xfs_writepage_map(
uint64_t end_offset)
{
LIST_HEAD(submit_list);
+ struct iomap_page *iop = to_iomap_page(page);
+ unsigned len = i_blocksize(inode);
struct xfs_ioend *ioend, *next;
- struct buffer_head *bh, *head;
- ssize_t len = i_blocksize(inode);
- uint64_t offset;
- int error = 0;
- int count = 0;
- int uptodate = 1;
- unsigned int new_type;
-
- bh = head = page_buffers(page);
- offset = page_offset(page);
- do {
- if (offset >= end_offset)
- break;
- if (!buffer_uptodate(bh))
- uptodate = 0;
+ uint64_t file_offset; /* file offset of page */
+ int error = 0, count = 0, i;
- /*
- * set_page_dirty dirties all buffers in a page, independent
- * of their state. The dirty state however is entirely
- * meaningless for holes (!mapped && uptodate), so skip
- * buffers covering holes here.
- */
- if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
- wpc->imap_valid = false;
- continue;
- }
+ ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
+ ASSERT(!iop || atomic_read(&iop->write_count) == 0);
- if (buffer_unwritten(bh))
- new_type = XFS_IO_UNWRITTEN;
- else if (buffer_delay(bh))
- new_type = XFS_IO_DELALLOC;
- else if (buffer_uptodate(bh))
- new_type = XFS_IO_OVERWRITE;
- else {
- if (PageUptodate(page))
- ASSERT(buffer_mapped(bh));
- /*
- * This buffer is not uptodate and will not be
- * written to disk. Ensure that we will put any
- * subsequent writeable buffers into a new
- * ioend.
- */
- wpc->imap_valid = false;
+ /*
+ * Walk through the page to find areas to write back. If we run off the
+ * end of the current map or find the current map invalid, grab a new
+ * one.
+ */
+ for (i = 0, file_offset = page_offset(page);
+ i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
+ i++, file_offset += len) {
+ if (iop && !test_bit(i, iop->uptodate))
continue;
- }
- if (xfs_is_reflink_inode(XFS_I(inode))) {
- error = xfs_map_cow(wpc, inode, offset, &new_type);
- if (error)
- goto out;
- }
-
- if (wpc->io_type != new_type) {
- wpc->io_type = new_type;
- wpc->imap_valid = false;
- }
-
- if (wpc->imap_valid)
- wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
- offset);
- if (!wpc->imap_valid) {
- error = xfs_map_blocks(inode, offset, &wpc->imap,
- wpc->io_type);
- if (error)
- goto out;
- wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
- offset);
- }
- if (wpc->imap_valid) {
- lock_buffer(bh);
- if (wpc->io_type != XFS_IO_OVERWRITE)
- xfs_map_at_offset(inode, bh, &wpc->imap, offset);
- xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
- count++;
- }
-
- } while (offset += len, ((bh = bh->b_this_page) != head));
-
- if (uptodate && bh == head)
- SetPageUptodate(page);
+ error = xfs_map_blocks(wpc, inode, file_offset);
+ if (error)
+ break;
+ if (wpc->io_type == XFS_IO_HOLE)
+ continue;
+ xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
+ &submit_list);
+ count++;
+ }
ASSERT(wpc->ioend || list_empty(&submit_list));
+ ASSERT(PageLocked(page));
+ ASSERT(!PageWriteback(page));
-out:
/*
- * On error, we have to fail the ioend here because we have locked
- * buffers in the ioend. If we don't do this, we'll deadlock
- * invalidating the page as that tries to lock the buffers on the page.
- * Also, because we may have set pages under writeback, we have to make
- * sure we run IO completion to mark the error state of the IO
- * appropriately, so we can't cancel the ioend directly here. That means
- * we have to mark this page as under writeback if we included any
- * buffers from it in the ioend chain so that completion treats it
- * correctly.
+ * On error, we have to fail the ioend here because we may have set
+ * pages under writeback, we have to make sure we run IO completion to
+ * mark the error state of the IO appropriately, so we can't cancel the
+ * ioend directly here. That means we have to mark this page as under
+ * writeback if we included any blocks from it in the ioend chain so
+ * that completion treats it correctly.
*
* If we didn't include the page in the ioend, the on error we can
* simply discard and unlock it as there are no other users of the page
- * or it's buffers right now. The caller will still need to trigger
- * submission of outstanding ioends on the writepage context so they are
- * treated correctly on error.
+ * now. The caller will still need to trigger submission of outstanding
+ * ioends on the writepage context so they are treated correctly on
+ * error.
*/
- if (count) {
- xfs_start_page_writeback(page, !error);
+ if (unlikely(error)) {
+ if (!count) {
+ xfs_aops_discard_page(page);
+ ClearPageUptodate(page);
+ unlock_page(page);
+ goto done;
+ }
/*
- * Preserve the original error if there was one, otherwise catch
- * submission errors here and propagate into subsequent ioend
- * submissions.
+ * If the page was not fully cleaned, we need to ensure that the
+ * higher layers come back to it correctly. That means we need
+ * to keep the page dirty, and for WB_SYNC_ALL writeback we need
+ * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
+ * so another attempt to write this page in this writeback sweep
+ * will be made.
*/
- list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
- int error2;
-
- list_del_init(&ioend->io_list);
- error2 = xfs_submit_ioend(wbc, ioend, error);
- if (error2 && !error)
- error = error2;
- }
- } else if (error) {
- xfs_aops_discard_page(page);
- ClearPageUptodate(page);
- unlock_page(page);
+ set_page_writeback_keepwrite(page);
} else {
- /*
- * We can end up here with no error and nothing to write if we
- * race with a partial page truncate on a sub-page block sized
- * filesystem. In that case we need to mark the page clean.
- */
- xfs_start_page_writeback(page, 1);
- end_page_writeback(page);
+ clear_page_dirty_for_io(page);
+ set_page_writeback(page);
}
+ unlock_page(page);
+
+ /*
+ * Preserve the original error if there was one, otherwise catch
+ * submission errors here and propagate into subsequent ioend
+ * submissions.
+ */
+ list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
+ int error2;
+
+ list_del_init(&ioend->io_list);
+ error2 = xfs_submit_ioend(wbc, ioend, error);
+ if (error2 && !error)
+ error = error2;
+ }
+
+ /*
+ * We can end up here with no error and nothing to write only if we race
+ * with a partial page truncate on a sub-page block sized filesystem.
+ */
+ if (!count)
+ end_page_writeback(page);
+done:
mapping_set_error(page->mapping, error);
return error;
}
@@ -1054,7 +801,6 @@ out:
* For delalloc space on the page we need to allocate space and flush it.
* For unwritten space on the page we need to start the conversion to
* regular allocated space.
- * For any other dirty buffer heads on the page we should flush them.
*/
STATIC int
xfs_do_writepage(
@@ -1070,8 +816,6 @@ xfs_do_writepage(
trace_xfs_writepage(inode, page, 0, 0);
- ASSERT(page_has_buffers(page));
-
/*
* Refuse to write the page out if we are called from reclaim context.
*
@@ -1210,166 +954,13 @@ xfs_dax_writepages(
xfs_find_bdev_for_inode(mapping->host), wbc);
}
-/*
- * Called to move a page into cleanable state - and from there
- * to be released. The page should already be clean. We always
- * have buffer heads in this call.
- *
- * Returns 1 if the page is ok to release, 0 otherwise.
- */
STATIC int
xfs_vm_releasepage(
struct page *page,
gfp_t gfp_mask)
{
- int delalloc, unwritten;
-
trace_xfs_releasepage(page->mapping->host, page, 0, 0);
-
- /*
- * mm accommodates an old ext3 case where clean pages might not have had
- * the dirty bit cleared. Thus, it can send actual dirty pages to
- * ->releasepage() via shrink_active_list(). Conversely,
- * block_invalidatepage() can send pages that are still marked dirty but
- * otherwise have invalidated buffers.
- *
- * We want to release the latter to avoid unnecessary buildup of the
- * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
- * that are entirely invalidated and need to be released. Hence the
- * only time we should get dirty pages here is through
- * shrink_active_list() and so we can simply skip those now.
- *
- * warn if we've left any lingering delalloc/unwritten buffers on clean
- * or invalidated pages we are about to release.
- */
- if (PageDirty(page))
- return 0;
-
- xfs_count_page_state(page, &delalloc, &unwritten);
-
- if (WARN_ON_ONCE(delalloc))
- return 0;
- if (WARN_ON_ONCE(unwritten))
- return 0;
-
- return try_to_free_buffers(page);
-}
-
-/*
- * If this is O_DIRECT or the mpage code calling tell them how large the mapping
- * is, so that we can avoid repeated get_blocks calls.
- *
- * If the mapping spans EOF, then we have to break the mapping up as the mapping
- * for blocks beyond EOF must be marked new so that sub block regions can be
- * correctly zeroed. We can't do this for mappings within EOF unless the mapping
- * was just allocated or is unwritten, otherwise the callers would overwrite
- * existing data with zeros. Hence we have to split the mapping into a range up
- * to and including EOF, and a second mapping for beyond EOF.
- */
-static void
-xfs_map_trim_size(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- struct xfs_bmbt_irec *imap,
- xfs_off_t offset,
- ssize_t size)
-{
- xfs_off_t mapping_size;
-
- mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
- mapping_size <<= inode->i_blkbits;
-
- ASSERT(mapping_size > 0);
- if (mapping_size > size)
- mapping_size = size;
- if (offset < i_size_read(inode) &&
- (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
- /* limit mapping to block that spans EOF */
- mapping_size = roundup_64(i_size_read(inode) - offset,
- i_blocksize(inode));
- }
- if (mapping_size > LONG_MAX)
- mapping_size = LONG_MAX;
-
- bh_result->b_size = mapping_size;
-}
-
-static int
-xfs_get_blocks(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb, end_fsb;
- int error = 0;
- int lockmode = 0;
- struct xfs_bmbt_irec imap;
- int nimaps = 1;
- xfs_off_t offset;
- ssize_t size;
-
- BUG_ON(create);
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- offset = (xfs_off_t)iblock << inode->i_blkbits;
- ASSERT(bh_result->b_size >= i_blocksize(inode));
- size = bh_result->b_size;
-
- if (offset >= i_size_read(inode))
- return 0;
-
- /*
- * Direct I/O is usually done on preallocated files, so try getting
- * a block mapping without an exclusive lock first.
- */
- lockmode = xfs_ilock_data_map_shared(ip);
-
- ASSERT(offset <= mp->m_super->s_maxbytes);
- if (offset > mp->m_super->s_maxbytes - size)
- size = mp->m_super->s_maxbytes - offset;
- end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
-
- error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
- &nimaps, 0);
- if (error)
- goto out_unlock;
- if (!nimaps) {
- trace_xfs_get_blocks_notfound(ip, offset, size);
- goto out_unlock;
- }
-
- trace_xfs_get_blocks_found(ip, offset, size,
- imap.br_state == XFS_EXT_UNWRITTEN ?
- XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
- xfs_iunlock(ip, lockmode);
-
- /* trim mapping down to size requested */
- xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
-
- /*
- * For unwritten extents do not report a disk address in the buffered
- * read case (treat as if we're reading into a hole).
- */
- if (xfs_bmap_is_real_extent(&imap))
- xfs_map_buffer(inode, bh_result, &imap, offset);
-
- /*
- * If this is a realtime file, data may be on a different device.
- * to that pointed to from the buffer_head b_bdev currently.
- */
- bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
- return 0;
-
-out_unlock:
- xfs_iunlock(ip, lockmode);
- return error;
+ return iomap_releasepage(page, gfp_mask);
}
STATIC sector_t
@@ -1401,7 +992,7 @@ xfs_vm_readpage(
struct page *page)
{
trace_xfs_vm_readpage(page->mapping->host, 1);
- return mpage_readpage(page, xfs_get_blocks);
+ return iomap_readpage(page, &xfs_iomap_ops);
}
STATIC int
@@ -1412,63 +1003,7 @@ xfs_vm_readpages(
unsigned nr_pages)
{
trace_xfs_vm_readpages(mapping->host, nr_pages);
- return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
-}
-
-/*
- * This is basically a copy of __set_page_dirty_buffers() with one
- * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
- * dirty, we'll never be able to clean them because we don't write buffers
- * beyond EOF, and that means we can't invalidate pages that span EOF
- * that have been marked dirty. Further, the dirty state can leak into
- * the file interior if the file is extended, resulting in all sorts of
- * bad things happening as the state does not match the underlying data.
- *
- * XXX: this really indicates that bufferheads in XFS need to die. Warts like
- * this only exist because of bufferheads and how the generic code manages them.
- */
-STATIC int
-xfs_vm_set_page_dirty(
- struct page *page)
-{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- loff_t end_offset;
- loff_t offset;
- int newly_dirty;
-
- if (unlikely(!mapping))
- return !TestSetPageDirty(page);
-
- end_offset = i_size_read(inode);
- offset = page_offset(page);
-
- spin_lock(&mapping->private_lock);
- if (page_has_buffers(page)) {
- struct buffer_head *head = page_buffers(page);
- struct buffer_head *bh = head;
-
- do {
- if (offset < end_offset)
- set_buffer_dirty(bh);
- bh = bh->b_this_page;
- offset += i_blocksize(inode);
- } while (bh != head);
- }
- /*
- * Lock out page->mem_cgroup migration to keep PageDirty
- * synchronized with per-memcg dirty page counters.
- */
- lock_page_memcg(page);
- newly_dirty = !TestSetPageDirty(page);
- spin_unlock(&mapping->private_lock);
-
- if (newly_dirty)
- __set_page_dirty(page, mapping, 1);
- unlock_page_memcg(page);
- if (newly_dirty)
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return newly_dirty;
+ return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
}
static int
@@ -1486,13 +1021,13 @@ const struct address_space_operations xfs_address_space_operations = {
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
- .set_page_dirty = xfs_vm_set_page_dirty,
+ .set_page_dirty = iomap_set_page_dirty,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
.bmap = xfs_vm_bmap,
.direct_IO = noop_direct_IO,
- .migratepage = buffer_migrate_page,
- .is_partially_uptodate = block_is_partially_uptodate,
+ .migratepage = iomap_migrate_page,
+ .is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
.swap_activate = xfs_iomap_swapfile_activate,
};
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 25bc6d4a1231..9af867951a10 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -17,6 +17,7 @@ enum {
XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
XFS_IO_OVERWRITE, /* covers already allocated extent */
XFS_IO_COW, /* covers copy-on-write extent */
+ XFS_IO_HOLE, /* covers region without any block allocation */
};
#define XFS_IO_TYPES \
@@ -24,7 +25,8 @@ enum {
{ XFS_IO_DELALLOC, "delalloc" }, \
{ XFS_IO_UNWRITTEN, "unwritten" }, \
{ XFS_IO_OVERWRITE, "overwrite" }, \
- { XFS_IO_COW, "CoW" }
+ { XFS_IO_COW, "CoW" }, \
+ { XFS_IO_HOLE, "hole" }
/*
* Structure for buffered I/O completions.
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 7ce10055f275..228821b2ebe0 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -26,6 +26,7 @@
#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_dir2.h"
+#include "xfs_defer.h"
/*
* Look at all the extents for this logical region,
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index f9ca80154c9c..a58034049995 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -87,7 +87,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
*/
if (context->bufsize == 0 ||
(XFS_ISRESET_CURSOR(cursor) &&
- (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
+ (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
context->put_listent(context,
sfe->flags,
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 956ebd583e27..ce45f066995e 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -375,9 +375,8 @@ xfs_bud_init(
*/
int
xfs_bui_recover(
- struct xfs_mount *mp,
- struct xfs_bui_log_item *buip,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *parent_tp,
+ struct xfs_bui_log_item *buip)
{
int error = 0;
unsigned int bui_type;
@@ -393,6 +392,7 @@ xfs_bui_recover(
struct xfs_trans *tp;
struct xfs_inode *ip = NULL;
struct xfs_bmbt_irec irec;
+ struct xfs_mount *mp = parent_tp->t_mountp;
ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
@@ -441,6 +441,12 @@ xfs_bui_recover(
XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
if (error)
return error;
+ /*
+ * Recovery stashes all deferred ops during intent processing and
+ * finishes them on completion. Transfer current dfops state to this
+ * transaction and transfer the result back before we return.
+ */
+ xfs_defer_move(tp, parent_tp);
budp = xfs_trans_get_bud(tp, buip);
/* Grab the inode. */
@@ -469,9 +475,8 @@ xfs_bui_recover(
xfs_trans_ijoin(tp, ip, 0);
count = bmap->me_len;
- error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type,
- ip, whichfork, bmap->me_startoff,
- bmap->me_startblock, &count, state);
+ error = xfs_trans_log_finish_bmap_update(tp, budp, type, ip, whichfork,
+ bmap->me_startoff, bmap->me_startblock, &count, state);
if (error)
goto err_inode;
@@ -481,23 +486,25 @@ xfs_bui_recover(
irec.br_blockcount = count;
irec.br_startoff = bmap->me_startoff;
irec.br_state = state;
- error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec);
+ error = xfs_bmap_unmap_extent(tp, ip, &irec);
if (error)
goto err_inode;
}
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
+ xfs_defer_move(parent_tp, tp);
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- IRELE(ip);
+ xfs_irele(ip);
return error;
err_inode:
+ xfs_defer_move(parent_tp, tp);
xfs_trans_cancel(tp);
if (ip) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- IRELE(ip);
+ xfs_irele(ip);
}
return error;
}
diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h
index fd1a1b13df51..89e043a88bb8 100644
--- a/fs/xfs/xfs_bmap_item.h
+++ b/fs/xfs/xfs_bmap_item.h
@@ -79,7 +79,6 @@ struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *,
struct xfs_bui_log_item *);
void xfs_bui_item_free(struct xfs_bui_log_item *);
void xfs_bui_release(struct xfs_bui_log_item *);
-int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip,
- struct xfs_defer_ops *dfops);
+int xfs_bui_recover(struct xfs_trans *parent_tp, struct xfs_bui_log_item *buip);
#endif /* __XFS_BMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 83b1e8c6c18f..addbd74ecd8e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -702,16 +702,15 @@ xfs_bmap_punch_delalloc_range(
struct xfs_iext_cursor icur;
int error = 0;
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
if (error)
- return error;
+ goto out_unlock;
}
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
- return 0;
+ goto out_unlock;
while (got.br_startoff + got.br_blockcount > start_fsb) {
del = got;
@@ -735,6 +734,8 @@ xfs_bmap_punch_delalloc_range(
break;
}
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -872,13 +873,11 @@ xfs_alloc_file_space(
xfs_filblks_t allocatesize_fsb;
xfs_extlen_t extsz, temp;
xfs_fileoff_t startoffset_fsb;
- xfs_fsblock_t firstfsb;
int nimaps;
int quota_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imaps[1], *imapp;
- struct xfs_defer_ops dfops;
uint qblocks, resblks, resrtextents;
int error;
@@ -971,20 +970,15 @@ xfs_alloc_file_space(
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &firstfsb);
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
- allocatesize_fsb, alloc_type, &firstfsb,
- resblks, imapp, &nimaps, &dfops);
+ allocatesize_fsb, alloc_type, resblks,
+ imapp, &nimaps);
if (error)
goto error0;
/*
* Complete the transaction
*/
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto error0;
-
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
@@ -1003,8 +997,7 @@ xfs_alloc_file_space(
return error;
-error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
- xfs_defer_cancel(&dfops);
+error0: /* unlock inode, unreserve quota blocks, cancel trans */
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
error1: /* Just cancel transaction */
@@ -1022,8 +1015,6 @@ xfs_unmap_extent(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t firstfsb;
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
int error;
@@ -1041,24 +1032,15 @@ xfs_unmap_extent(
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &firstfsb);
- error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
- &dfops, done);
- if (error)
- goto out_bmap_cancel;
-
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
error = xfs_trans_commit(tp);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
goto out_unlock;
@@ -1279,7 +1261,7 @@ xfs_prepare_shift(
* we've flushed all the dirty data out to disk to avoid having
* CoW extents at the wrong offsets.
*/
- if (xfs_is_reflink_inode(ip)) {
+ if (xfs_inode_has_cow_data(ip)) {
error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
true);
if (error)
@@ -1310,8 +1292,6 @@ xfs_collapse_file_space(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
@@ -1344,22 +1324,16 @@ xfs_collapse_file_space(
goto out_trans_cancel;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_defer_init(&dfops, &first_block);
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
- &done, &first_block, &dfops);
+ &done);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
error = xfs_trans_commit(tp);
}
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
@@ -1386,8 +1360,6 @@ xfs_insert_file_space(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
xfs_fileoff_t next_fsb = NULLFSBLOCK;
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
@@ -1423,22 +1395,17 @@ xfs_insert_file_space(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_defer_init(&dfops, &first_block);
error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
- &done, stop_fsb, &first_block, &dfops);
+ &done, stop_fsb);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
error = xfs_trans_commit(tp);
}
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
+out_trans_cancel:
xfs_trans_cancel(tp);
return error;
}
@@ -1566,14 +1533,13 @@ xfs_swap_extent_rmap(
struct xfs_inode *ip,
struct xfs_inode *tip)
{
+ struct xfs_trans *tp = *tpp;
struct xfs_bmbt_irec irec;
struct xfs_bmbt_irec uirec;
struct xfs_bmbt_irec tirec;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
xfs_filblks_t count_fsb;
- xfs_fsblock_t firstfsb;
- struct xfs_defer_ops dfops;
int error;
xfs_filblks_t ilen;
xfs_filblks_t rlen;
@@ -1609,7 +1575,7 @@ xfs_swap_extent_rmap(
/* Unmap the old blocks in the source file. */
while (tirec.br_blockcount) {
- xfs_defer_init(&dfops, &firstfsb);
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
/* Read extent from the source file */
@@ -1631,33 +1597,29 @@ xfs_swap_extent_rmap(
trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
/* Remove the mapping from the donor file. */
- error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
- tip, &uirec);
+ error = xfs_bmap_unmap_extent(tp, tip, &uirec);
if (error)
goto out_defer;
/* Remove the mapping from the source file. */
- error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
- ip, &irec);
+ error = xfs_bmap_unmap_extent(tp, ip, &irec);
if (error)
goto out_defer;
/* Map the donor file's blocks into the source file. */
- error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
- ip, &uirec);
+ error = xfs_bmap_map_extent(tp, ip, &uirec);
if (error)
goto out_defer;
/* Map the source file's blocks into the donor file. */
- error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
- tip, &irec);
+ error = xfs_bmap_map_extent(tp, tip, &irec);
if (error)
goto out_defer;
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(tpp, &dfops);
+ error = xfs_defer_finish(tpp);
+ tp = *tpp;
if (error)
- goto out_defer;
+ goto out;
tirec.br_startoff += rlen;
if (tirec.br_startblock != HOLESTARTBLOCK &&
@@ -1675,7 +1637,7 @@ xfs_swap_extent_rmap(
return 0;
out_defer:
- xfs_defer_cancel(&dfops);
+ xfs_defer_cancel(tp);
out:
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
tip->i_d.di_flags2 = tip_flags2;
@@ -1691,7 +1653,6 @@ xfs_swap_extent_forks(
int *src_log_flags,
int *target_log_flags)
{
- struct xfs_ifork tempifp, *ifp, *tifp;
xfs_filblks_t aforkblks = 0;
xfs_filblks_t taforkblks = 0;
xfs_extnum_t junk;
@@ -1733,11 +1694,7 @@ xfs_swap_extent_forks(
/*
* Swap the data forks of the inodes
*/
- ifp = &ip->i_df;
- tifp = &tip->i_df;
- tempifp = *ifp; /* struct copy */
- *ifp = *tifp; /* struct copy */
- *tifp = tempifp; /* struct copy */
+ swap(ip->i_df, tip->i_df);
/*
* Fix the on-disk inode values
@@ -1746,13 +1703,8 @@ xfs_swap_extent_forks(
ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
- tmp = (uint64_t) ip->i_d.di_nextents;
- ip->i_d.di_nextents = tip->i_d.di_nextents;
- tip->i_d.di_nextents = tmp;
-
- tmp = (uint64_t) ip->i_d.di_format;
- ip->i_d.di_format = tip->i_d.di_format;
- tip->i_d.di_format = tmp;
+ swap(ip->i_d.di_nextents, tip->i_d.di_nextents);
+ swap(ip->i_d.di_format, tip->i_d.di_format);
/*
* The extents in the source inode could still contain speculative
@@ -1846,7 +1798,6 @@ xfs_swap_extents(
int src_log_flags, target_log_flags;
int error = 0;
int lock_flags;
- struct xfs_ifork *cowfp;
uint64_t f;
int resblks = 0;
@@ -1987,18 +1938,11 @@ xfs_swap_extents(
/* Swap the cow forks. */
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
- xfs_extnum_t extnum;
-
ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
- extnum = ip->i_cnextents;
- ip->i_cnextents = tip->i_cnextents;
- tip->i_cnextents = extnum;
-
- cowfp = ip->i_cowfp;
- ip->i_cowfp = tip->i_cowfp;
- tip->i_cowfp = cowfp;
+ swap(ip->i_cnextents, tip->i_cnextents);
+ swap(ip->i_cowfp, tip->i_cowfp);
if (ip->i_cowfp && ip->i_cowfp->if_bytes)
xfs_inode_set_cowblocks_tag(ip);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index e9c058e3761c..e839907e8492 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -34,16 +34,6 @@
static kmem_zone_t *xfs_buf_zone;
-#ifdef XFS_BUF_LOCK_TRACKING
-# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
-# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
-# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
-#else
-# define XB_SET_OWNER(bp) do { } while (0)
-# define XB_CLEAR_OWNER(bp) do { } while (0)
-# define XB_GET_OWNER(bp) do { } while (0)
-#endif
-
#define xb_to_gfp(flags) \
((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
@@ -226,7 +216,6 @@ _xfs_buf_alloc(
INIT_LIST_HEAD(&bp->b_li_list);
sema_init(&bp->b_sema, 0); /* held, no waiters */
spin_lock_init(&bp->b_lock);
- XB_SET_OWNER(bp);
bp->b_target = target;
bp->b_flags = flags;
@@ -757,11 +746,7 @@ _xfs_buf_read(
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
- if (flags & XBF_ASYNC) {
- xfs_buf_submit(bp);
- return 0;
- }
- return xfs_buf_submit_wait(bp);
+ return xfs_buf_submit(bp);
}
xfs_buf_t *
@@ -846,7 +831,7 @@ xfs_buf_read_uncached(
bp->b_flags |= XBF_READ;
bp->b_ops = ops;
- xfs_buf_submit_wait(bp);
+ xfs_buf_submit(bp);
if (bp->b_error) {
int error = bp->b_error;
xfs_buf_relse(bp);
@@ -1095,12 +1080,10 @@ xfs_buf_trylock(
int locked;
locked = down_trylock(&bp->b_sema) == 0;
- if (locked) {
- XB_SET_OWNER(bp);
+ if (locked)
trace_xfs_buf_trylock(bp, _RET_IP_);
- } else {
+ else
trace_xfs_buf_trylock_fail(bp, _RET_IP_);
- }
return locked;
}
@@ -1122,7 +1105,6 @@ xfs_buf_lock(
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
down(&bp->b_sema);
- XB_SET_OWNER(bp);
trace_xfs_buf_lock_done(bp, _RET_IP_);
}
@@ -1133,9 +1115,7 @@ xfs_buf_unlock(
{
ASSERT(xfs_buf_islocked(bp));
- XB_CLEAR_OWNER(bp);
up(&bp->b_sema);
-
trace_xfs_buf_unlock(bp, _RET_IP_);
}
@@ -1249,7 +1229,7 @@ xfs_bwrite(
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
XBF_WRITE_FAIL | XBF_DONE);
- error = xfs_buf_submit_wait(bp);
+ error = xfs_buf_submit(bp);
if (error) {
xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR);
@@ -1453,29 +1433,55 @@ _xfs_buf_ioapply(
}
/*
- * Asynchronous IO submission path. This transfers the buffer lock ownership and
- * the current reference to the IO. It is not safe to reference the buffer after
- * a call to this function unless the caller holds an additional reference
- * itself.
+ * Wait for I/O completion of a sync buffer and return the I/O error code.
*/
-void
-xfs_buf_submit(
+static int
+xfs_buf_iowait(
struct xfs_buf *bp)
{
+ ASSERT(!(bp->b_flags & XBF_ASYNC));
+
+ trace_xfs_buf_iowait(bp, _RET_IP_);
+ wait_for_completion(&bp->b_iowait);
+ trace_xfs_buf_iowait_done(bp, _RET_IP_);
+
+ return bp->b_error;
+}
+
+/*
+ * Buffer I/O submission path, read or write. Asynchronous submission transfers
+ * the buffer lock ownership and the current reference to the IO. It is not
+ * safe to reference the buffer after a call to this function unless the caller
+ * holds an additional reference itself.
+ */
+int
+__xfs_buf_submit(
+ struct xfs_buf *bp,
+ bool wait)
+{
+ int error = 0;
+
trace_xfs_buf_submit(bp, _RET_IP_);
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
- ASSERT(bp->b_flags & XBF_ASYNC);
/* on shutdown we stale and complete the buffer immediately */
if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
xfs_buf_ioerror(bp, -EIO);
bp->b_flags &= ~XBF_DONE;
xfs_buf_stale(bp);
- xfs_buf_ioend(bp);
- return;
+ if (bp->b_flags & XBF_ASYNC)
+ xfs_buf_ioend(bp);
+ return -EIO;
}
+ /*
+ * Grab a reference so the buffer does not go away underneath us. For
+ * async buffers, I/O completion drops the callers reference, which
+ * could occur before submission returns.
+ */
+ xfs_buf_hold(bp);
+
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
@@ -1483,22 +1489,13 @@ xfs_buf_submit(
bp->b_io_error = 0;
/*
- * The caller's reference is released during I/O completion.
- * This occurs some time after the last b_io_remaining reference is
- * released, so after we drop our Io reference we have to have some
- * other reference to ensure the buffer doesn't go away from underneath
- * us. Take a direct reference to ensure we have safe access to the
- * buffer until we are finished with it.
- */
- xfs_buf_hold(bp);
-
- /*
* Set the count to 1 initially, this will stop an I/O completion
* callout which happens before we have started all the I/O from calling
* xfs_buf_ioend too early.
*/
atomic_set(&bp->b_io_remaining, 1);
- xfs_buf_ioacct_inc(bp);
+ if (bp->b_flags & XBF_ASYNC)
+ xfs_buf_ioacct_inc(bp);
_xfs_buf_ioapply(bp);
/*
@@ -1507,74 +1504,19 @@ xfs_buf_submit(
* that we don't return to the caller with completion still pending.
*/
if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
- if (bp->b_error)
+ if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
xfs_buf_ioend(bp);
else
xfs_buf_ioend_async(bp);
}
- xfs_buf_rele(bp);
- /* Note: it is not safe to reference bp now we've dropped our ref */
-}
-
-/*
- * Synchronous buffer IO submission path, read or write.
- */
-int
-xfs_buf_submit_wait(
- struct xfs_buf *bp)
-{
- int error;
-
- trace_xfs_buf_submit_wait(bp, _RET_IP_);
-
- ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
-
- if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
- xfs_buf_ioerror(bp, -EIO);
- xfs_buf_stale(bp);
- bp->b_flags &= ~XBF_DONE;
- return -EIO;
- }
-
- if (bp->b_flags & XBF_WRITE)
- xfs_buf_wait_unpin(bp);
-
- /* clear the internal error state to avoid spurious errors */
- bp->b_io_error = 0;
+ if (wait)
+ error = xfs_buf_iowait(bp);
/*
- * For synchronous IO, the IO does not inherit the submitters reference
- * count, nor the buffer lock. Hence we cannot release the reference we
- * are about to take until we've waited for all IO completion to occur,
- * including any xfs_buf_ioend_async() work that may be pending.
- */
- xfs_buf_hold(bp);
-
- /*
- * Set the count to 1 initially, this will stop an I/O completion
- * callout which happens before we have started all the I/O from calling
- * xfs_buf_ioend too early.
- */
- atomic_set(&bp->b_io_remaining, 1);
- _xfs_buf_ioapply(bp);
-
- /*
- * make sure we run completion synchronously if it raced with us and is
- * already complete.
- */
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
- xfs_buf_ioend(bp);
-
- /* wait for completion before gathering the error from the buffer */
- trace_xfs_buf_iowait(bp, _RET_IP_);
- wait_for_completion(&bp->b_iowait);
- trace_xfs_buf_iowait_done(bp, _RET_IP_);
- error = bp->b_error;
-
- /*
- * all done now, we can release the hold that keeps the buffer
- * referenced for the entire IO.
+ * Release the hold that keeps the buffer referenced for the entire
+ * I/O. Note that if the buffer is async, it is not safe to reference
+ * after this release.
*/
xfs_buf_rele(bp);
return error;
@@ -1972,16 +1914,11 @@ xfs_buf_cmp(
}
/*
- * submit buffers for write.
- *
- * When we have a large buffer list, we do not want to hold all the buffers
- * locked while we block on the request queue waiting for IO dispatch. To avoid
- * this problem, we lock and submit buffers in groups of 50, thereby minimising
- * the lock hold times for lists which may contain thousands of objects.
- *
- * To do this, we sort the buffer list before we walk the list to lock and
- * submit buffers, and we plug and unplug around each group of buffers we
- * submit.
+ * Submit buffers for write. If wait_list is specified, the buffers are
+ * submitted using sync I/O and placed on the wait list such that the caller can
+ * iowait each buffer. Otherwise async I/O is used and the buffers are released
+ * at I/O completion time. In either case, buffers remain locked until I/O
+ * completes and the buffer is released from the queue.
*/
static int
xfs_buf_delwri_submit_buffers(
@@ -2023,21 +1960,21 @@ xfs_buf_delwri_submit_buffers(
trace_xfs_buf_delwri_split(bp, _RET_IP_);
/*
- * We do all IO submission async. This means if we need
- * to wait for IO completion we need to take an extra
- * reference so the buffer is still valid on the other
- * side. We need to move the buffer onto the io_list
- * at this point so the caller can still access it.
+ * If we have a wait list, each buffer (and associated delwri
+ * queue reference) transfers to it and is submitted
+ * synchronously. Otherwise, drop the buffer from the delwri
+ * queue and submit async.
*/
bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
- bp->b_flags |= XBF_WRITE | XBF_ASYNC;
+ bp->b_flags |= XBF_WRITE;
if (wait_list) {
- xfs_buf_hold(bp);
+ bp->b_flags &= ~XBF_ASYNC;
list_move_tail(&bp->b_list, wait_list);
- } else
+ } else {
+ bp->b_flags |= XBF_ASYNC;
list_del_init(&bp->b_list);
-
- xfs_buf_submit(bp);
+ }
+ __xfs_buf_submit(bp, false);
}
blk_finish_plug(&plug);
@@ -2084,9 +2021,11 @@ xfs_buf_delwri_submit(
list_del_init(&bp->b_list);
- /* locking the buffer will wait for async IO completion. */
- xfs_buf_lock(bp);
- error2 = bp->b_error;
+ /*
+ * Wait on the locked buffer, check for errors and unlock and
+ * release the delwri queue reference.
+ */
+ error2 = xfs_buf_iowait(bp);
xfs_buf_relse(bp);
if (!error)
error = error2;
@@ -2132,23 +2071,18 @@ xfs_buf_delwri_pushbuf(
/*
* Delwri submission clears the DELWRI_Q buffer flag and returns with
- * the buffer on the wait list with an associated reference. Rather than
+ * the buffer on the wait list with the original reference. Rather than
* bounce the buffer from a local wait list back to the original list
* after I/O completion, reuse the original list as the wait list.
*/
xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
/*
- * The buffer is now under I/O and wait listed as during typical delwri
- * submission. Lock the buffer to wait for I/O completion. Rather than
- * remove the buffer from the wait list and release the reference, we
- * want to return with the buffer queued to the original list. The
- * buffer already sits on the original list with a wait list reference,
- * however. If we let the queue inherit that wait list reference, all we
- * need to do is reset the DELWRI_Q flag.
+ * The buffer is now locked, under I/O and wait listed on the original
+ * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
+ * return with the buffer unlocked and on the original queue.
*/
- xfs_buf_lock(bp);
- error = bp->b_error;
+ error = xfs_buf_iowait(bp);
bp->b_flags |= _XBF_DELWRI_Q;
xfs_buf_unlock(bp);
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index d24dbd4dac39..4e3171acd0f8 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -12,7 +12,6 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/dax.h>
-#include <linux/buffer_head.h>
#include <linux/uio.h>
#include <linux/list_lru.h>
@@ -199,10 +198,6 @@ typedef struct xfs_buf {
int b_last_error;
const struct xfs_buf_ops *b_ops;
-
-#ifdef XFS_BUF_LOCK_TRACKING
- int b_last_holder;
-#endif
} xfs_buf_t;
/* Finding and Reading Buffers */
@@ -298,8 +293,14 @@ extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
xfs_failaddr_t failaddr);
#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
-extern void xfs_buf_submit(struct xfs_buf *bp);
-extern int xfs_buf_submit_wait(struct xfs_buf *bp);
+
+extern int __xfs_buf_submit(struct xfs_buf *bp, bool);
+static inline int xfs_buf_submit(struct xfs_buf *bp)
+{
+ bool wait = bp->b_flags & XBF_ASYNC ? false : true;
+ return __xfs_buf_submit(bp, wait);
+}
+
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t);
#define xfs_buf_zero(bp, off, len) \
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 678a5fcd7576..93f07edafd81 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -128,7 +128,7 @@ next_extent:
}
out_del_cursor:
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
xfs_buf_relse(agbp);
out_put_perag:
xfs_perag_put(pag);
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 0973a0423bed..87e6dd5326d5 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -286,17 +286,15 @@ xfs_dquot_disk_alloc(
struct xfs_buf **bpp)
{
struct xfs_bmbt_irec map;
- struct xfs_defer_ops dfops;
- struct xfs_mount *mp = (*tpp)->t_mountp;
+ struct xfs_trans *tp = *tpp;
+ struct xfs_mount *mp = tp->t_mountp;
struct xfs_buf *bp;
struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags);
- xfs_fsblock_t firstblock;
int nmaps = 1;
int error;
trace_xfs_dqalloc(dqp);
- xfs_defer_init(&dfops, &firstblock);
xfs_ilock(quotip, XFS_ILOCK_EXCL);
if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
/*
@@ -308,13 +306,12 @@ xfs_dquot_disk_alloc(
}
/* Create the block mapping. */
- xfs_trans_ijoin(*tpp, quotip, XFS_ILOCK_EXCL);
- error = xfs_bmapi_write(*tpp, quotip, dqp->q_fileoffset,
+ xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
+ error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
- &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
- &map, &nmaps, &dfops);
+ XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps);
if (error)
- goto error0;
+ return error;
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
ASSERT(nmaps == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
@@ -326,19 +323,17 @@ xfs_dquot_disk_alloc(
dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
/* now we can just get the buffer (there's nothing to read yet) */
- bp = xfs_trans_get_buf(*tpp, mp->m_ddev_targp, dqp->q_blkno,
+ bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, 0);
- if (!bp) {
- error = -ENOMEM;
- goto error1;
- }
+ if (!bp)
+ return -ENOMEM;
bp->b_ops = &xfs_dquot_buf_ops;
/*
* Make a chunk of dquots out of this buffer and log
* the entire thing.
*/
- xfs_qm_init_dquot_blk(*tpp, mp, be32_to_cpu(dqp->q_core.d_id),
+ xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
xfs_buf_set_ref(bp, XFS_DQUOT_REF);
@@ -352,10 +347,8 @@ xfs_dquot_disk_alloc(
* the buffer locked across the _defer_finish call. We can now do
* this correctly with xfs_defer_bjoin.
*
- * Above, we allocated a disk block for the dquot information and
- * used get_buf to initialize the dquot. If the _defer_bjoin fails,
- * the buffer is still locked to *tpp, so we must _bhold_release and
- * then _trans_brelse the buffer. If the _defer_finish fails, the old
+ * Above, we allocated a disk block for the dquot information and used
+ * get_buf to initialize the dquot. If the _defer_finish fails, the old
* transaction is gone but the new buffer is not joined or held to any
* transaction, so we must _buf_relse it.
*
@@ -364,25 +357,15 @@ xfs_dquot_disk_alloc(
* is responsible for unlocking any buffer passed back, either
* manually or by committing the transaction.
*/
- xfs_trans_bhold(*tpp, bp);
- error = xfs_defer_bjoin(&dfops, bp);
- if (error) {
- xfs_trans_bhold_release(*tpp, bp);
- xfs_trans_brelse(*tpp, bp);
- goto error1;
- }
- error = xfs_defer_finish(tpp, &dfops);
+ xfs_trans_bhold(tp, bp);
+ error = xfs_defer_finish(tpp);
+ tp = *tpp;
if (error) {
xfs_buf_relse(bp);
- goto error1;
+ return error;
}
*bpp = bp;
return 0;
-
-error1:
- xfs_defer_cancel(&dfops);
-error0:
- return error;
}
/*
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 0470114a8d80..9866f542e77b 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -50,6 +50,7 @@ static unsigned int xfs_errortag_random_default[] = {
XFS_RANDOM_LOG_ITEM_PIN,
XFS_RANDOM_BUF_LRU_REF,
XFS_RANDOM_FORCE_SCRUB_REPAIR,
+ XFS_RANDOM_FORCE_SUMMARY_RECALC,
};
struct xfs_errortag_attr {
@@ -157,6 +158,7 @@ XFS_ERRORTAG_ATTR_RW(log_bad_crc, XFS_ERRTAG_LOG_BAD_CRC);
XFS_ERRORTAG_ATTR_RW(log_item_pin, XFS_ERRTAG_LOG_ITEM_PIN);
XFS_ERRORTAG_ATTR_RW(buf_lru_ref, XFS_ERRTAG_BUF_LRU_REF);
XFS_ERRORTAG_ATTR_RW(force_repair, XFS_ERRTAG_FORCE_SCRUB_REPAIR);
+XFS_ERRORTAG_ATTR_RW(bad_summary, XFS_ERRTAG_FORCE_SUMMARY_RECALC);
static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(noerror),
@@ -192,6 +194,7 @@ static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(log_item_pin),
XFS_ERRORTAG_ATTR_LIST(buf_lru_ref),
XFS_ERRORTAG_ATTR_LIST(force_repair),
+ XFS_ERRORTAG_ATTR_LIST(bad_summary),
NULL,
};
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 3cf4682e2510..f2284ceb129f 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -150,7 +150,7 @@ xfs_nfs_get_inode(
}
if (VFS_I(ip)->i_generation != generation) {
- IRELE(ip);
+ xfs_irele(ip);
return ERR_PTR(-ESTALE);
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index a3e7767a5715..181e9084519b 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -721,12 +721,10 @@ xfs_file_write_iter(
static void
xfs_wait_dax_page(
- struct inode *inode,
- bool *did_unlock)
+ struct inode *inode)
{
struct xfs_inode *ip = XFS_I(inode);
- *did_unlock = true;
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
schedule();
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
@@ -735,8 +733,7 @@ xfs_wait_dax_page(
static int
xfs_break_dax_layouts(
struct inode *inode,
- uint iolock,
- bool *did_unlock)
+ bool *retry)
{
struct page *page;
@@ -746,9 +743,10 @@ xfs_break_dax_layouts(
if (!page)
return 0;
+ *retry = true;
return ___wait_var_event(&page->_refcount,
atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, xfs_wait_dax_page(inode, did_unlock));
+ 0, 0, xfs_wait_dax_page(inode));
}
int
@@ -766,7 +764,7 @@ xfs_break_layouts(
retry = false;
switch (reason) {
case BREAK_UNMAP:
- error = xfs_break_dax_layouts(inode, *iolock, &retry);
+ error = xfs_break_dax_layouts(inode, &retry);
if (error || retry)
break;
/* fall through */
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 2d2c5ab9143c..182501373af2 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -19,6 +19,8 @@
#include "xfs_filestream.h"
#include "xfs_trace.h"
#include "xfs_ag_resv.h"
+#include "xfs_trans.h"
+#include "xfs_shared.h"
struct xfs_fstrm_item {
struct xfs_mru_cache_elem mru;
@@ -339,7 +341,7 @@ xfs_filestream_lookup_ag(
if (xfs_filestream_pick_ag(pip, startag, &ag, 0, 0))
ag = NULLAGNUMBER;
out:
- IRELE(pip);
+ xfs_irele(pip);
return ag;
}
@@ -377,7 +379,7 @@ xfs_filestream_new_ag(
if (xfs_alloc_is_userdata(ap->datatype))
flags |= XFS_PICK_USERDATA;
- if (ap->dfops->dop_low)
+ if (ap->tp->t_flags & XFS_TRANS_LOWMODE)
flags |= XFS_PICK_LOWSPACE;
err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen);
@@ -388,7 +390,7 @@ xfs_filestream_new_ag(
if (mru)
xfs_fstrm_free_func(mp, mru);
- IRELE(pip);
+ xfs_irele(pip);
exit:
if (*agp == NULLAGNUMBER)
*agp = 0;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index c7157bc48bd1..3d76a9e35870 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -214,12 +214,12 @@ xfs_getfsmap_is_shared(
/* Are there any shared blocks here? */
flen = 0;
cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
- info->agno, NULL);
+ info->agno);
error = xfs_refcount_find_shared(cur, rec->rm_startblock,
rec->rm_blockcount, &fbno, &flen, false);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
if (error)
return error;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 3f2bd6032cf8..7c00b8bedfe3 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -536,7 +536,7 @@ xfs_fs_reserve_ag_blocks(
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
pag = xfs_perag_get(mp, agno);
- err2 = xfs_ag_resv_init(pag);
+ err2 = xfs_ag_resv_init(pag, NULL);
xfs_perag_put(pag);
if (err2 && !error)
error = err2;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 47f417d20a30..245483cc282b 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -66,7 +66,7 @@ xfs_inode_alloc(
ip->i_cowfp = NULL;
ip->i_cnextents = 0;
ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
- memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
+ memset(&ip->i_df, 0, sizeof(ip->i_df));
ip->i_flags = 0;
ip->i_delayed_blks = 0;
memset(&ip->i_d, 0, sizeof(ip->i_d));
@@ -716,7 +716,7 @@ xfs_icache_inode_is_allocated(
return error;
*inuse = !!(VFS_I(ip)->i_mode);
- IRELE(ip);
+ xfs_irele(ip);
return 0;
}
@@ -856,7 +856,7 @@ restart:
xfs_iflags_test(batch[i], XFS_INEW))
xfs_inew_wait(batch[i]);
error = execute(batch[i], flags, args);
- IRELE(batch[i]);
+ xfs_irele(batch[i]);
if (error == -EAGAIN) {
skipped++;
continue;
@@ -1697,14 +1697,13 @@ xfs_inode_clear_eofblocks_tag(
*/
static bool
xfs_prep_free_cowblocks(
- struct xfs_inode *ip,
- struct xfs_ifork *ifp)
+ struct xfs_inode *ip)
{
/*
* Just clear the tag if we have an empty cow fork or none at all. It's
* possible the inode was fully unshared since it was originally tagged.
*/
- if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
+ if (!xfs_inode_has_cow_data(ip)) {
trace_xfs_inode_free_cowblocks_invalid(ip);
xfs_inode_clear_cowblocks_tag(ip);
return false;
@@ -1742,11 +1741,10 @@ xfs_inode_free_cowblocks(
void *args)
{
struct xfs_eofblocks *eofb = args;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
int match;
int ret = 0;
- if (!xfs_prep_free_cowblocks(ip, ifp))
+ if (!xfs_prep_free_cowblocks(ip))
return 0;
if (eofb) {
@@ -1771,7 +1769,7 @@ xfs_inode_free_cowblocks(
* Check again, nobody else should be able to dirty blocks or change
* the reflink iflag now that we have the first two locks held.
*/
- if (xfs_prep_free_cowblocks(ip, ifp))
+ if (xfs_prep_free_cowblocks(ip))
ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 5df4de666cc1..d957a46dc1cb 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -927,7 +927,7 @@ xfs_ialloc(
case S_IFLNK:
ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
ip->i_df.if_flags = XFS_IFEXTENTS;
- ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
+ ip->i_df.if_bytes = 0;
ip->i_df.if_u1.if_root = NULL;
break;
default:
@@ -1142,8 +1142,6 @@ xfs_create(
struct xfs_inode *ip = NULL;
struct xfs_trans *tp = NULL;
int error;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
bool unlock_dp_on_error = false;
prid_t prid;
struct xfs_dquot *udqp = NULL;
@@ -1195,9 +1193,6 @@ xfs_create(
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
unlock_dp_on_error = true;
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
-
/*
* Reserve disk quota and the inode.
*/
@@ -1226,7 +1221,7 @@ xfs_create(
unlock_dp_on_error = false;
error = xfs_dir_createname(tp, dp, name, ip->i_ino,
- &first_block, &dfops, resblks ?
+ resblks ?
resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
if (error) {
ASSERT(error != -ENOSPC);
@@ -1238,11 +1233,11 @@ xfs_create(
if (is_dir) {
error = xfs_dir_init(tp, ip, dp);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
error = xfs_bumplink(tp, dp);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
/*
@@ -1260,10 +1255,6 @@ xfs_create(
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
goto out_release_inode;
@@ -1275,8 +1266,6 @@ xfs_create(
*ipp = ip;
return 0;
- out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_inode:
@@ -1287,7 +1276,7 @@ xfs_create(
*/
if (ip) {
xfs_finish_inode_setup(ip);
- IRELE(ip);
+ xfs_irele(ip);
}
xfs_qm_dqrele(udqp);
@@ -1382,7 +1371,7 @@ xfs_create_tmpfile(
*/
if (ip) {
xfs_finish_inode_setup(ip);
- IRELE(ip);
+ xfs_irele(ip);
}
xfs_qm_dqrele(udqp);
@@ -1401,8 +1390,6 @@ xfs_link(
xfs_mount_t *mp = tdp->i_mount;
xfs_trans_t *tp;
int error;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
int resblks;
trace_xfs_link(tdp, target_name);
@@ -1451,9 +1438,6 @@ xfs_link(
goto error_return;
}
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
-
/*
* Handle initial link state of O_TMPFILE inode
*/
@@ -1464,7 +1448,7 @@ xfs_link(
}
error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
- &first_block, &dfops, resblks);
+ resblks);
if (error)
goto error_return;
xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -1482,12 +1466,6 @@ xfs_link(
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_defer_finish(&tp, &dfops);
- if (error) {
- xfs_defer_cancel(&dfops);
- goto error_return;
- }
-
return xfs_trans_commit(tp);
error_return:
@@ -1545,8 +1523,6 @@ xfs_itruncate_extents_flags(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp = *tpp;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
xfs_fileoff_t first_unmap_block;
xfs_fileoff_t last_block;
xfs_filblks_t unmap_len;
@@ -1583,10 +1559,9 @@ xfs_itruncate_extents_flags(
ASSERT(first_unmap_block < last_block);
unmap_len = last_block - first_unmap_block + 1;
while (!done) {
- xfs_defer_init(&dfops, &first_block);
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
- XFS_ITRUNC_MAX_EXTENTS, &first_block,
- &dfops, &done);
+ XFS_ITRUNC_MAX_EXTENTS, &done);
if (error)
goto out_bmap_cancel;
@@ -1594,10 +1569,9 @@ xfs_itruncate_extents_flags(
* Duplicate the transaction that has the permanent
* reservation and commit the old transaction.
*/
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_defer_finish(&tp);
if (error)
- goto out_bmap_cancel;
+ goto out;
error = xfs_trans_roll_inode(&tp, ip);
if (error)
@@ -1631,7 +1605,7 @@ out_bmap_cancel:
* the transaction can be properly aborted. We just need to make sure
* we're not holding any resources that we were not when we came in.
*/
- xfs_defer_cancel(&dfops);
+ xfs_defer_cancel(tp);
goto out;
}
@@ -1733,7 +1707,6 @@ xfs_inactive_truncate(
ASSERT(XFS_FORCED_SHUTDOWN(mp));
return error;
}
-
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
@@ -1774,8 +1747,6 @@ STATIC int
xfs_inactive_ifree(
struct xfs_inode *ip)
{
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
@@ -1812,9 +1783,7 @@ xfs_inactive_ifree(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
- error = xfs_ifree(tp, ip, &dfops);
+ error = xfs_ifree(tp, ip);
if (error) {
/*
* If we fail to free the inode, shut down. The cancel
@@ -1840,12 +1809,6 @@ xfs_inactive_ifree(
* Just ignore errors at this point. There is nothing we can do except
* to try to keep going. Make sure it's not a silent error.
*/
- error = xfs_defer_finish(&tp, &dfops);
- if (error) {
- xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
- __func__, error);
- xfs_defer_cancel(&dfops);
- }
error = xfs_trans_commit(tp);
if (error)
xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
@@ -1868,7 +1831,6 @@ xfs_inactive(
xfs_inode_t *ip)
{
struct xfs_mount *mp;
- struct xfs_ifork *cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
int error;
int truncate = 0;
@@ -1877,7 +1839,6 @@ xfs_inactive(
* to clean up here.
*/
if (VFS_I(ip)->i_mode == 0) {
- ASSERT(ip->i_df.if_real_bytes == 0);
ASSERT(ip->i_df.if_broot_bytes == 0);
return;
}
@@ -1890,7 +1851,7 @@ xfs_inactive(
return;
/* Try to clean out the cow blocks if there are any. */
- if (xfs_is_reflink_inode(ip) && cow_ifp->if_bytes > 0)
+ if (xfs_inode_has_cow_data(ip))
xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
if (VFS_I(ip)->i_nlink != 0) {
@@ -2445,9 +2406,8 @@ xfs_ifree_local_data(
*/
int
xfs_ifree(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
{
int error;
struct xfs_icluster xic = { 0 };
@@ -2466,7 +2426,7 @@ xfs_ifree(
if (error)
return error;
- error = xfs_difree(tp, ip->i_ino, dfops, &xic);
+ error = xfs_difree(tp, ip->i_ino, &xic);
if (error)
return error;
@@ -2577,8 +2537,6 @@ xfs_remove(
xfs_trans_t *tp = NULL;
int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
int error = 0;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
uint resblks;
trace_xfs_remove(dp, name);
@@ -2658,13 +2616,10 @@ xfs_remove(
if (error)
goto out_trans_cancel;
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
- error = xfs_dir_removename(tp, dp, name, ip->i_ino,
- &first_block, &dfops, resblks);
+ error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
if (error) {
ASSERT(error != -ENOENT);
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
/*
@@ -2675,10 +2630,6 @@ xfs_remove(
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
goto std_return;
@@ -2688,8 +2639,6 @@ xfs_remove(
return 0;
- out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
std_return:
@@ -2749,11 +2698,8 @@ xfs_sort_for_rename(
static int
xfs_finish_rename(
- struct xfs_trans *tp,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *tp)
{
- int error;
-
/*
* If this is a synchronous mount, make sure that the rename transaction
* goes to disk before returning to the user.
@@ -2761,13 +2707,6 @@ xfs_finish_rename(
if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_defer_finish(&tp, dfops);
- if (error) {
- xfs_defer_cancel(dfops);
- xfs_trans_cancel(tp);
- return error;
- }
-
return xfs_trans_commit(tp);
}
@@ -2785,8 +2724,6 @@ xfs_cross_rename(
struct xfs_inode *dp2,
struct xfs_name *name2,
struct xfs_inode *ip2,
- struct xfs_defer_ops *dfops,
- xfs_fsblock_t *first_block,
int spaceres)
{
int error = 0;
@@ -2795,16 +2732,12 @@ xfs_cross_rename(
int dp2_flags = 0;
/* Swap inode number for dirent in first parent */
- error = xfs_dir_replace(tp, dp1, name1,
- ip2->i_ino,
- first_block, dfops, spaceres);
+ error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
if (error)
goto out_trans_abort;
/* Swap inode number for dirent in second parent */
- error = xfs_dir_replace(tp, dp2, name2,
- ip1->i_ino,
- first_block, dfops, spaceres);
+ error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
if (error)
goto out_trans_abort;
@@ -2818,8 +2751,7 @@ xfs_cross_rename(
if (S_ISDIR(VFS_I(ip2)->i_mode)) {
error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
- dp1->i_ino, first_block,
- dfops, spaceres);
+ dp1->i_ino, spaceres);
if (error)
goto out_trans_abort;
@@ -2845,8 +2777,7 @@ xfs_cross_rename(
if (S_ISDIR(VFS_I(ip1)->i_mode)) {
error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
- dp2->i_ino, first_block,
- dfops, spaceres);
+ dp2->i_ino, spaceres);
if (error)
goto out_trans_abort;
@@ -2885,10 +2816,9 @@ xfs_cross_rename(
}
xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
- return xfs_finish_rename(tp, dfops);
+ return xfs_finish_rename(tp);
out_trans_abort:
- xfs_defer_cancel(dfops);
xfs_trans_cancel(tp);
return error;
}
@@ -2943,8 +2873,6 @@ xfs_rename(
{
struct xfs_mount *mp = src_dp->i_mount;
struct xfs_trans *tp;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
struct xfs_inode *wip = NULL; /* whiteout inode */
struct xfs_inode *inodes[__XFS_SORT_INODES];
int num_inodes = __XFS_SORT_INODES;
@@ -3026,14 +2954,11 @@ xfs_rename(
goto out_trans_cancel;
}
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
-
/* RENAME_EXCHANGE is unique from here on. */
if (flags & RENAME_EXCHANGE)
return xfs_cross_rename(tp, src_dp, src_name, src_ip,
target_dp, target_name, target_ip,
- &dfops, &first_block, spaceres);
+ spaceres);
/*
* Set up the target.
@@ -3054,10 +2979,9 @@ xfs_rename(
* to account for the ".." reference from the new entry.
*/
error = xfs_dir_createname(tp, target_dp, target_name,
- src_ip->i_ino, &first_block,
- &dfops, spaceres);
+ src_ip->i_ino, spaceres);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_trans_ichgtime(tp, target_dp,
XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -3065,7 +2989,7 @@ xfs_rename(
if (new_parent && src_is_directory) {
error = xfs_bumplink(tp, target_dp);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
} else { /* target_ip != NULL */
/*
@@ -3094,10 +3018,9 @@ xfs_rename(
* name at the destination directory, remove it first.
*/
error = xfs_dir_replace(tp, target_dp, target_name,
- src_ip->i_ino,
- &first_block, &dfops, spaceres);
+ src_ip->i_ino, spaceres);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_trans_ichgtime(tp, target_dp,
XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -3108,7 +3031,7 @@ xfs_rename(
*/
error = xfs_droplink(tp, target_ip);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
if (src_is_directory) {
/*
@@ -3116,7 +3039,7 @@ xfs_rename(
*/
error = xfs_droplink(tp, target_ip);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
} /* target_ip != NULL */
@@ -3129,11 +3052,10 @@ xfs_rename(
* directory.
*/
error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
- target_dp->i_ino,
- &first_block, &dfops, spaceres);
+ target_dp->i_ino, spaceres);
ASSERT(error != -EEXIST);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
/*
@@ -3159,7 +3081,7 @@ xfs_rename(
*/
error = xfs_droplink(tp, src_dp);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
/*
@@ -3169,12 +3091,12 @@ xfs_rename(
*/
if (wip) {
error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
- &first_block, &dfops, spaceres);
+ spaceres);
} else
error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
- &first_block, &dfops, spaceres);
+ spaceres);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
/*
* For whiteouts, we need to bump the link count on the whiteout inode.
@@ -3188,10 +3110,10 @@ xfs_rename(
ASSERT(VFS_I(wip)->i_nlink == 0);
error = xfs_bumplink(tp, wip);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
error = xfs_iunlink_remove(tp, wip);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
/*
@@ -3207,18 +3129,16 @@ xfs_rename(
if (new_parent)
xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
- error = xfs_finish_rename(tp, &dfops);
+ error = xfs_finish_rename(tp);
if (wip)
- IRELE(wip);
+ xfs_irele(wip);
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_wip:
if (wip)
- IRELE(wip);
+ xfs_irele(wip);
return error;
}
@@ -3674,3 +3594,12 @@ xfs_iflush_int(
corrupt_out:
return -EFSCORRUPTED;
}
+
+/* Release an inode. */
+void
+xfs_irele(
+ struct xfs_inode *ip)
+{
+ trace_xfs_irele(ip, _RET_IP_);
+ iput(VFS_I(ip));
+}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 2ed63a49e890..be2014520155 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -15,7 +15,6 @@
struct xfs_dinode;
struct xfs_inode;
struct xfs_buf;
-struct xfs_defer_ops;
struct xfs_bmbt_irec;
struct xfs_inode_log_item;
struct xfs_mount;
@@ -34,9 +33,9 @@ typedef struct xfs_inode {
struct xfs_imap i_imap; /* location for xfs_imap() */
/* Extent information. */
- xfs_ifork_t *i_afp; /* attribute fork pointer */
- xfs_ifork_t *i_cowfp; /* copy on write extents */
- xfs_ifork_t i_df; /* data fork */
+ struct xfs_ifork *i_afp; /* attribute fork pointer */
+ struct xfs_ifork *i_cowfp; /* copy on write extents */
+ struct xfs_ifork i_df; /* data fork */
/* operations vectors */
const struct xfs_dir_ops *d_ops; /* directory ops vector */
@@ -199,6 +198,15 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
}
/*
+ * Check if an inode has any data in the COW fork. This might be often false
+ * even for inodes with the reflink flag when there is no pending COW operation.
+ */
+static inline bool xfs_inode_has_cow_data(struct xfs_inode *ip)
+{
+ return ip->i_cowfp && ip->i_cowfp->if_bytes;
+}
+
+/*
* In-core inode flags.
*/
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
@@ -415,8 +423,7 @@ uint xfs_ilock_data_map_shared(struct xfs_inode *);
uint xfs_ilock_attr_map_shared(struct xfs_inode *);
uint xfs_ip2xflags(struct xfs_inode *);
-int xfs_ifree(struct xfs_trans *, xfs_inode_t *,
- struct xfs_defer_ops *);
+int xfs_ifree(struct xfs_trans *, struct xfs_inode *);
int xfs_itruncate_extents_flags(struct xfs_trans **,
struct xfs_inode *, int, xfs_fsize_t, int);
void xfs_iext_realloc(xfs_inode_t *, int, int);
@@ -484,18 +491,7 @@ static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
xfs_finish_inode_setup(ip);
}
-#define IHOLD(ip) \
-do { \
- ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
- ihold(VFS_I(ip)); \
- trace_xfs_ihold(ip, _THIS_IP_); \
-} while (0)
-
-#define IRELE(ip) \
-do { \
- trace_xfs_irele(ip, _THIS_IP_); \
- iput(VFS_I(ip)); \
-} while (0)
+void xfs_irele(struct xfs_inode *ip);
extern struct kmem_zone *xfs_inode_zone;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 2389c34c172d..fa1c4fe2ffbf 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -194,8 +194,6 @@ xfs_inode_item_format_data_fork(
* to be there by xfs_idata_realloc().
*/
data_bytes = roundup(ip->i_df.if_bytes, 4);
- ASSERT(ip->i_df.if_real_bytes == 0 ||
- ip->i_df.if_real_bytes >= data_bytes);
ASSERT(ip->i_df.if_u1.if_data != NULL);
ASSERT(ip->i_d.di_size > 0);
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL,
@@ -280,8 +278,6 @@ xfs_inode_item_format_attr_fork(
* to be there by xfs_idata_realloc().
*/
data_bytes = roundup(ip->i_afp->if_bytes, 4);
- ASSERT(ip->i_afp->if_real_bytes == 0 ||
- ip->i_afp->if_real_bytes >= data_bytes);
ASSERT(ip->i_afp->if_u1.if_data != NULL);
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL,
ip->i_afp->if_u1.if_data,
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 55876dd02f0c..6320aca39f39 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * Copyright (c) 2016 Christoph Hellwig.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
* All Rights Reserved.
*/
#include <linux/iomap.h>
@@ -152,13 +152,11 @@ xfs_iomap_write_direct(
xfs_fileoff_t offset_fsb;
xfs_fileoff_t last_fsb;
xfs_filblks_t count_fsb, resaligned;
- xfs_fsblock_t firstfsb;
xfs_extlen_t extsz;
int nimaps;
int quota_flag;
int rt;
xfs_trans_t *tp;
- struct xfs_defer_ops dfops;
uint qblocks, resblks, resrtextents;
int error;
int lockmode;
@@ -254,21 +252,15 @@ xfs_iomap_write_direct(
* From this point onwards we overwrite the imap pointer that the
* caller gave to us.
*/
- xfs_defer_init(&dfops, &firstfsb);
nimaps = 1;
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
- bmapi_flags, &firstfsb, resblks, imap,
- &nimaps, &dfops);
+ bmapi_flags, resblks, imap, &nimaps);
if (error)
- goto out_bmap_cancel;
+ goto out_res_cancel;
/*
* Complete the transaction
*/
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
goto out_unlock;
@@ -288,8 +280,7 @@ out_unlock:
xfs_iunlock(ip, lockmode);
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
+out_res_cancel:
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
out_trans_cancel:
xfs_trans_cancel(tp);
@@ -626,7 +617,7 @@ retry:
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail.
*/
- iomap->flags = IOMAP_F_NEW;
+ iomap->flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
done:
if (isnullstartblock(got.br_startblock))
@@ -660,13 +651,13 @@ xfs_iomap_write_allocate(
xfs_inode_t *ip,
int whichfork,
xfs_off_t offset,
- xfs_bmbt_irec_t *imap)
+ xfs_bmbt_irec_t *imap,
+ unsigned int *cow_seq)
{
xfs_mount_t *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_fileoff_t offset_fsb, last_block;
xfs_fileoff_t end_fsb, map_start_fsb;
- xfs_fsblock_t first_block;
- struct xfs_defer_ops dfops;
xfs_filblks_t count_fsb;
xfs_trans_t *tp;
int nimaps;
@@ -716,8 +707,6 @@ xfs_iomap_write_allocate(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &first_block);
-
/*
* it is possible that the extents have changed since
* we did the read call as we dropped the ilock for a
@@ -770,13 +759,8 @@ xfs_iomap_write_allocate(
* pointer that the caller gave to us.
*/
error = xfs_bmapi_write(tp, ip, map_start_fsb,
- count_fsb, flags, &first_block,
- nres, imap, &nimaps,
- &dfops);
- if (error)
- goto trans_cancel;
-
- error = xfs_defer_finish(&tp, &dfops);
+ count_fsb, flags, nres, imap,
+ &nimaps);
if (error)
goto trans_cancel;
@@ -784,6 +768,8 @@ xfs_iomap_write_allocate(
if (error)
goto error0;
+ if (whichfork == XFS_COW_FORK)
+ *cow_seq = READ_ONCE(ifp->if_seq);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
@@ -810,7 +796,6 @@ xfs_iomap_write_allocate(
}
trans_cancel:
- xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
error0:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -828,11 +813,9 @@ xfs_iomap_write_unwritten(
xfs_fileoff_t offset_fsb;
xfs_filblks_t count_fsb;
xfs_filblks_t numblks_fsb;
- xfs_fsblock_t firstfsb;
int nimaps;
xfs_trans_t *tp;
xfs_bmbt_irec_t imap;
- struct xfs_defer_ops dfops;
struct inode *inode = VFS_I(ip);
xfs_fsize_t i_size;
uint resblks;
@@ -877,11 +860,10 @@ xfs_iomap_write_unwritten(
/*
* Modify the unwritten extent state of the buffer.
*/
- xfs_defer_init(&dfops, &firstfsb);
nimaps = 1;
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
- XFS_BMAPI_CONVERT, &firstfsb, resblks,
- &imap, &nimaps, &dfops);
+ XFS_BMAPI_CONVERT, resblks, &imap,
+ &nimaps);
if (error)
goto error_on_bmapi_transaction;
@@ -901,10 +883,6 @@ xfs_iomap_write_unwritten(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto error_on_bmapi_transaction;
-
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
@@ -928,7 +906,6 @@ xfs_iomap_write_unwritten(
return 0;
error_on_bmapi_transaction:
- xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
@@ -1132,7 +1109,7 @@ xfs_file_iomap_begin(
if (error)
return error;
- iomap->flags = IOMAP_F_NEW;
+ iomap->flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
out_finish:
@@ -1202,11 +1179,8 @@ xfs_file_iomap_end_delalloc(
truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
XFS_FSB_TO_B(mp, end_fsb) - 1);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
end_fsb - start_fsb);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
if (error && !XFS_FORCED_SHUTDOWN(mp)) {
xfs_alert(mp, "%s: unable to clean up ino %lld",
__func__, ip->i_ino);
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 83474c9cede9..c6170548831b 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -14,7 +14,7 @@ struct xfs_bmbt_irec;
int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
struct xfs_bmbt_irec *, int);
int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
- struct xfs_bmbt_irec *);
+ struct xfs_bmbt_irec *, unsigned int *);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 0fa29f39d658..c3e74f9128e8 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -26,6 +26,7 @@
#include "xfs_dir2.h"
#include "xfs_trans_space.h"
#include "xfs_iomap.h"
+#include "xfs_defer.h"
#include <linux/capability.h>
#include <linux/xattr.h>
@@ -208,7 +209,7 @@ xfs_generic_create(
xfs_finish_inode_setup(ip);
if (!tmpfile)
xfs_cleanup_inode(dir, inode, dentry);
- iput(inode);
+ xfs_irele(ip);
goto out_free_acl;
}
@@ -390,7 +391,7 @@ xfs_vn_symlink(
out_cleanup_inode:
xfs_finish_inode_setup(cip);
xfs_cleanup_inode(dir, inode, dentry);
- iput(inode);
+ xfs_irele(cip);
out:
return error;
}
@@ -1253,7 +1254,7 @@ xfs_setup_inode(
inode_sb_list_add(inode);
/* make the inode look hashed for the writeback code */
- hlist_add_fake(&inode->i_hash);
+ inode_fake_hash(inode);
inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid);
inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 24f4f1c555b5..e9508ba01ed1 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -114,7 +114,7 @@ xfs_bulkstat_one_int(
break;
}
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- IRELE(ip);
+ xfs_irele(ip);
error = formatter(buffer, ubsize, ubused, buf);
if (!error)
@@ -458,8 +458,7 @@ xfs_bulkstat(
* pending error, then we are done.
*/
del_cursor:
- xfs_btree_del_cursor(cur, error ?
- XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
xfs_buf_relse(agbp);
if (error)
break;
@@ -632,8 +631,7 @@ next_ag:
kmem_free(buffer);
if (cur)
- xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
- XFS_BTREE_NOERROR));
+ xfs_btree_del_cursor(cur, error);
if (agbp)
xfs_buf_relse(agbp);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 5e56f3b93d4b..c3b610b687d1 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -410,7 +410,7 @@ out_error:
}
/*
- * Reserve log space and return a ticket corresponding the reservation.
+ * Reserve log space and return a ticket corresponding to the reservation.
*
* Each reservation is going to reserve extra space for a log record header.
* When writes happen to the on-disk log, we don't subtract the length of the
@@ -826,6 +826,88 @@ xfs_log_mount_cancel(
* deallocation must not be done until source-end.
*/
+/* Actually write the unmount record to disk. */
+static void
+xfs_log_write_unmount_record(
+ struct xfs_mount *mp)
+{
+ /* the data section must be 32 bit size aligned */
+ struct xfs_unmount_log_format magic = {
+ .magic = XLOG_UNMOUNT_TYPE,
+ };
+ struct xfs_log_iovec reg = {
+ .i_addr = &magic,
+ .i_len = sizeof(magic),
+ .i_type = XLOG_REG_TYPE_UNMOUNT,
+ };
+ struct xfs_log_vec vec = {
+ .lv_niovecs = 1,
+ .lv_iovecp = &reg,
+ };
+ struct xlog *log = mp->m_log;
+ struct xlog_in_core *iclog;
+ struct xlog_ticket *tic = NULL;
+ xfs_lsn_t lsn;
+ uint flags = XLOG_UNMOUNT_TRANS;
+ int error;
+
+ error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
+ if (error)
+ goto out_err;
+
+ /*
+ * If we think the summary counters are bad, clear the unmount header
+ * flag in the unmount record so that the summary counters will be
+ * recalculated during log recovery at next mount. Refer to
+ * xlog_check_unmount_rec for more details.
+ */
+ if (XFS_TEST_ERROR((mp->m_flags & XFS_MOUNT_BAD_SUMMARY), mp,
+ XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
+ xfs_alert(mp, "%s: will fix summary counters at next mount",
+ __func__);
+ flags &= ~XLOG_UNMOUNT_TRANS;
+ }
+
+ /* remove inited flag, and account for space used */
+ tic->t_flags = 0;
+ tic->t_curr_res -= sizeof(magic);
+ error = xlog_write(log, &vec, tic, &lsn, NULL, flags);
+ /*
+ * At this point, we're umounting anyway, so there's no point in
+ * transitioning log state to IOERROR. Just continue...
+ */
+out_err:
+ if (error)
+ xfs_alert(mp, "%s: unmount record failed", __func__);
+
+ spin_lock(&log->l_icloglock);
+ iclog = log->l_iclog;
+ atomic_inc(&iclog->ic_refcnt);
+ xlog_state_want_sync(log, iclog);
+ spin_unlock(&log->l_icloglock);
+ error = xlog_state_release_iclog(log, iclog);
+
+ spin_lock(&log->l_icloglock);
+ switch (iclog->ic_state) {
+ default:
+ if (!XLOG_FORCED_SHUTDOWN(log)) {
+ xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
+ break;
+ }
+ /* fall through */
+ case XLOG_STATE_ACTIVE:
+ case XLOG_STATE_DIRTY:
+ spin_unlock(&log->l_icloglock);
+ break;
+ }
+
+ if (tic) {
+ trace_xfs_log_umount_write(log, tic);
+ xlog_ungrant_log_space(log, tic);
+ xfs_log_ticket_put(tic);
+ }
+}
+
/*
* Unmount record used to have a string "Unmount filesystem--" in the
* data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
@@ -842,8 +924,6 @@ xfs_log_unmount_write(xfs_mount_t *mp)
#ifdef DEBUG
xlog_in_core_t *first_iclog;
#endif
- xlog_ticket_t *tic = NULL;
- xfs_lsn_t lsn;
int error;
/*
@@ -870,66 +950,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
} while (iclog != first_iclog);
#endif
if (! (XLOG_FORCED_SHUTDOWN(log))) {
- error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
- if (!error) {
- /* the data section must be 32 bit size aligned */
- struct {
- uint16_t magic;
- uint16_t pad1;
- uint32_t pad2; /* may as well make it 64 bits */
- } magic = {
- .magic = XLOG_UNMOUNT_TYPE,
- };
- struct xfs_log_iovec reg = {
- .i_addr = &magic,
- .i_len = sizeof(magic),
- .i_type = XLOG_REG_TYPE_UNMOUNT,
- };
- struct xfs_log_vec vec = {
- .lv_niovecs = 1,
- .lv_iovecp = &reg,
- };
-
- /* remove inited flag, and account for space used */
- tic->t_flags = 0;
- tic->t_curr_res -= sizeof(magic);
- error = xlog_write(log, &vec, tic, &lsn,
- NULL, XLOG_UNMOUNT_TRANS);
- /*
- * At this point, we're umounting anyway,
- * so there's no point in transitioning log state
- * to IOERROR. Just continue...
- */
- }
-
- if (error)
- xfs_alert(mp, "%s: unmount record failed", __func__);
-
-
- spin_lock(&log->l_icloglock);
- iclog = log->l_iclog;
- atomic_inc(&iclog->ic_refcnt);
- xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
- error = xlog_state_release_iclog(log, iclog);
-
- spin_lock(&log->l_icloglock);
- if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
- iclog->ic_state == XLOG_STATE_DIRTY)) {
- if (!XLOG_FORCED_SHUTDOWN(log)) {
- xlog_wait(&iclog->ic_force_wait,
- &log->l_icloglock);
- } else {
- spin_unlock(&log->l_icloglock);
- }
- } else {
- spin_unlock(&log->l_icloglock);
- }
- if (tic) {
- trace_xfs_log_umount_write(log, tic);
- xlog_ungrant_log_space(log, tic);
- xfs_log_ticket_put(tic);
- }
+ xfs_log_write_unmount_record(mp);
} else {
/*
* We're already in forced_shutdown mode, couldn't
@@ -4083,3 +4104,12 @@ xfs_log_check_lsn(
return valid;
}
+
+bool
+xfs_log_in_recovery(
+ struct xfs_mount *mp)
+{
+ struct xlog *log = mp->m_log;
+
+ return log->l_flags & XLOG_ACTIVE_RECOVERY;
+}
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 3c1f6a8b4b70..73a64bf32f6f 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -153,5 +153,6 @@ bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
void xfs_log_work_queue(struct xfs_mount *mp);
void xfs_log_quiesce(struct xfs_mount *mp);
bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
+bool xfs_log_in_recovery(struct xfs_mount *);
#endif /* __XFS_LOG_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b181b5f57a19..a21dc61ec09e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -196,7 +196,7 @@ xlog_bread_noalign(
bp->b_io_length = nbblks;
bp->b_error = 0;
- error = xfs_buf_submit_wait(bp);
+ error = xfs_buf_submit(bp);
if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
xfs_buf_ioerror_alert(bp, __func__);
return error;
@@ -4733,10 +4733,9 @@ xlog_recover_cancel_rui(
/* Recover the CUI if necessary. */
STATIC int
xlog_recover_process_cui(
- struct xfs_mount *mp,
+ struct xfs_trans *parent_tp,
struct xfs_ail *ailp,
- struct xfs_log_item *lip,
- struct xfs_defer_ops *dfops)
+ struct xfs_log_item *lip)
{
struct xfs_cui_log_item *cuip;
int error;
@@ -4749,7 +4748,7 @@ xlog_recover_process_cui(
return 0;
spin_unlock(&ailp->ail_lock);
- error = xfs_cui_recover(mp, cuip, dfops);
+ error = xfs_cui_recover(parent_tp, cuip);
spin_lock(&ailp->ail_lock);
return error;
@@ -4774,10 +4773,9 @@ xlog_recover_cancel_cui(
/* Recover the BUI if necessary. */
STATIC int
xlog_recover_process_bui(
- struct xfs_mount *mp,
+ struct xfs_trans *parent_tp,
struct xfs_ail *ailp,
- struct xfs_log_item *lip,
- struct xfs_defer_ops *dfops)
+ struct xfs_log_item *lip)
{
struct xfs_bui_log_item *buip;
int error;
@@ -4790,7 +4788,7 @@ xlog_recover_process_bui(
return 0;
spin_unlock(&ailp->ail_lock);
- error = xfs_bui_recover(mp, buip, dfops);
+ error = xfs_bui_recover(parent_tp, buip);
spin_lock(&ailp->ail_lock);
return error;
@@ -4829,9 +4827,9 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
/* Take all the collected deferred ops and finish them in order. */
static int
xlog_finish_defer_ops(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *parent_tp)
{
+ struct xfs_mount *mp = parent_tp->t_mountp;
struct xfs_trans *tp;
int64_t freeblks;
uint resblks;
@@ -4854,16 +4852,10 @@ xlog_finish_defer_ops(
0, XFS_TRANS_RESERVE, &tp);
if (error)
return error;
-
- error = xfs_defer_finish(&tp, dfops);
- if (error)
- goto out_cancel;
+ /* transfer all collected dfops to this transaction */
+ xfs_defer_move(tp, parent_tp);
return xfs_trans_commit(tp);
-
-out_cancel:
- xfs_trans_cancel(tp);
- return error;
}
/*
@@ -4886,23 +4878,34 @@ STATIC int
xlog_recover_process_intents(
struct xlog *log)
{
- struct xfs_defer_ops dfops;
+ struct xfs_trans *parent_tp;
struct xfs_ail_cursor cur;
struct xfs_log_item *lip;
struct xfs_ail *ailp;
- xfs_fsblock_t firstfsb;
- int error = 0;
+ int error;
#if defined(DEBUG) || defined(XFS_WARN)
xfs_lsn_t last_lsn;
#endif
+ /*
+ * The intent recovery handlers commit transactions to complete recovery
+ * for individual intents, but any new deferred operations that are
+ * queued during that process are held off until the very end. The
+ * purpose of this transaction is to serve as a container for deferred
+ * operations. Each intent recovery handler must transfer dfops here
+ * before its local transaction commits, and we'll finish the entire
+ * list below.
+ */
+ error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
+ if (error)
+ return error;
+
ailp = log->l_ailp;
spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
#if defined(DEBUG) || defined(XFS_WARN)
last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
#endif
- xfs_defer_init(&dfops, &firstfsb);
while (lip != NULL) {
/*
* We're done when we see something other than an intent.
@@ -4937,12 +4940,10 @@ xlog_recover_process_intents(
error = xlog_recover_process_rui(log->l_mp, ailp, lip);
break;
case XFS_LI_CUI:
- error = xlog_recover_process_cui(log->l_mp, ailp, lip,
- &dfops);
+ error = xlog_recover_process_cui(parent_tp, ailp, lip);
break;
case XFS_LI_BUI:
- error = xlog_recover_process_bui(log->l_mp, ailp, lip,
- &dfops);
+ error = xlog_recover_process_bui(parent_tp, ailp, lip);
break;
}
if (error)
@@ -4952,10 +4953,9 @@ xlog_recover_process_intents(
out:
xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->ail_lock);
- if (error)
- xfs_defer_cancel(&dfops);
- else
- error = xlog_finish_defer_ops(log->l_mp, &dfops);
+ if (!error)
+ error = xlog_finish_defer_ops(parent_tp);
+ xfs_trans_cancel(parent_tp);
return error;
}
@@ -5094,11 +5094,11 @@ xlog_recover_process_one_iunlink(
*/
ip->i_d.di_dmevmask = 0;
- IRELE(ip);
+ xfs_irele(ip);
return agino;
fail_iput:
- IRELE(ip);
+ xfs_irele(ip);
fail:
/*
* We can't read in the inode this bucket points to, or this inode
@@ -5707,7 +5707,7 @@ xlog_do_recover(
bp->b_flags |= XBF_READ;
bp->b_ops = &xfs_sb_buf_ops;
- error = xfs_buf_submit_wait(bp);
+ error = xfs_buf_submit(bp);
if (error) {
if (!XFS_FORCED_SHUTDOWN(mp)) {
xfs_buf_ioerror_alert(bp, __func__);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index a3378252baa1..99db27d6ac8a 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -207,6 +207,9 @@ xfs_initialize_perag(
if (xfs_buf_hash_init(pag))
goto out_free_pag;
init_waitqueue_head(&pag->pagb_wait);
+ spin_lock_init(&pag->pagb_lock);
+ pag->pagb_count = 0;
+ pag->pagb_tree = RB_ROOT;
if (radix_tree_preload(GFP_NOFS))
goto out_hash_destroy;
@@ -606,6 +609,56 @@ xfs_default_resblks(xfs_mount_t *mp)
return resblks;
}
+/* Ensure the summary counts are correct. */
+STATIC int
+xfs_check_summary_counts(
+ struct xfs_mount *mp)
+{
+ /*
+ * The AG0 superblock verifier rejects in-progress filesystems,
+ * so we should never see the flag set this far into mounting.
+ */
+ if (mp->m_sb.sb_inprogress) {
+ xfs_err(mp, "sb_inprogress set after log recovery??");
+ WARN_ON(1);
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * Now the log is mounted, we know if it was an unclean shutdown or
+ * not. If it was, with the first phase of recovery has completed, we
+ * have consistent AG blocks on disk. We have not recovered EFIs yet,
+ * but they are recovered transactionally in the second recovery phase
+ * later.
+ *
+ * If the log was clean when we mounted, we can check the summary
+ * counters. If any of them are obviously incorrect, we can recompute
+ * them from the AGF headers in the next step.
+ */
+ if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
+ (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
+ mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
+ mp->m_flags |= XFS_MOUNT_BAD_SUMMARY;
+
+ /*
+ * We can safely re-initialise incore superblock counters from the
+ * per-ag data. These may not be correct if the filesystem was not
+ * cleanly unmounted, so we waited for recovery to finish before doing
+ * this.
+ *
+ * If the filesystem was cleanly unmounted or the previous check did
+ * not flag anything weird, then we can trust the values in the
+ * superblock to be correct and we don't need to do anything here.
+ * Otherwise, recalculate the summary counters.
+ */
+ if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+ XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
+ !(mp->m_flags & XFS_MOUNT_BAD_SUMMARY))
+ return 0;
+
+ return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
+}
+
/*
* This function does the following on an initial mount of a file system:
* - reads the superblock from disk and init the mount struct
@@ -831,32 +884,10 @@ xfs_mountfs(
goto out_fail_wait;
}
- /*
- * Now the log is mounted, we know if it was an unclean shutdown or
- * not. If it was, with the first phase of recovery has completed, we
- * have consistent AG blocks on disk. We have not recovered EFIs yet,
- * but they are recovered transactionally in the second recovery phase
- * later.
- *
- * Hence we can safely re-initialise incore superblock counters from
- * the per-ag data. These may not be correct if the filesystem was not
- * cleanly unmounted, so we need to wait for recovery to finish before
- * doing this.
- *
- * If the filesystem was cleanly unmounted, then we can trust the
- * values in the superblock to be correct and we don't need to do
- * anything here.
- *
- * If we are currently making the filesystem, the initialisation will
- * fail as the perag data is in an undefined state.
- */
- if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
- !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
- !mp->m_sb.sb_inprogress) {
- error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
- if (error)
- goto out_log_dealloc;
- }
+ /* Make sure the summary counts are ok. */
+ error = xfs_check_summary_counts(mp);
+ if (error)
+ goto out_log_dealloc;
/*
* Get and sanity-check the root inode.
@@ -1011,7 +1042,7 @@ xfs_mountfs(
out_rtunmount:
xfs_rtunmount_inodes(mp);
out_rele_rip:
- IRELE(rip);
+ xfs_irele(rip);
/* Clean out dquots that might be in memory after quotacheck. */
xfs_qm_unmount(mp);
/*
@@ -1067,7 +1098,7 @@ xfs_unmountfs(
xfs_fs_unreserve_ag_blocks(mp);
xfs_qm_unmount_quotas(mp);
xfs_rtunmount_inodes(mp);
- IRELE(mp->m_rootip);
+ xfs_irele(mp->m_rootip);
/*
* We can potentially deadlock here if we have an inode cluster
@@ -1395,3 +1426,16 @@ xfs_dev_is_read_only(
}
return 0;
}
+
+/* Force the summary counters to be recalculated at next mount. */
+void
+xfs_force_summary_recalc(
+ struct xfs_mount *mp)
+{
+ if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
+ return;
+
+ spin_lock(&mp->m_sb_lock);
+ mp->m_flags |= XFS_MOUNT_BAD_SUMMARY;
+ spin_unlock(&mp->m_sb_lock);
+}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 245349d1e23f..7964513c3128 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -202,6 +202,7 @@ typedef struct xfs_mount {
must be synchronous except
for space allocations */
#define XFS_MOUNT_UNMOUNTING (1ULL << 1) /* filesystem is unmounting */
+#define XFS_MOUNT_BAD_SUMMARY (1ULL << 2) /* summary counters are bad */
#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
operations, typically for
@@ -216,7 +217,6 @@ typedef struct xfs_mount {
#define XFS_MOUNT_SMALL_INUMS (1ULL << 14) /* user wants 32bit inodes */
#define XFS_MOUNT_32BITINODES (1ULL << 15) /* inode32 allocator active */
#define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */
-#define XFS_MOUNT_BARRIER (1ULL << 17)
#define XFS_MOUNT_IKEEP (1ULL << 18) /* keep empty inode clusters*/
#define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width
* allocation */
@@ -434,5 +434,6 @@ int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
int error_class, int error);
+void xfs_force_summary_recalc(struct xfs_mount *mp);
#endif /* __XFS_MOUNT_H__ */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 9ceb85cce33a..52ed7904df10 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -231,15 +231,15 @@ xfs_qm_unmount_quotas(
*/
if (mp->m_quotainfo) {
if (mp->m_quotainfo->qi_uquotaip) {
- IRELE(mp->m_quotainfo->qi_uquotaip);
+ xfs_irele(mp->m_quotainfo->qi_uquotaip);
mp->m_quotainfo->qi_uquotaip = NULL;
}
if (mp->m_quotainfo->qi_gquotaip) {
- IRELE(mp->m_quotainfo->qi_gquotaip);
+ xfs_irele(mp->m_quotainfo->qi_gquotaip);
mp->m_quotainfo->qi_gquotaip = NULL;
}
if (mp->m_quotainfo->qi_pquotaip) {
- IRELE(mp->m_quotainfo->qi_pquotaip);
+ xfs_irele(mp->m_quotainfo->qi_pquotaip);
mp->m_quotainfo->qi_pquotaip = NULL;
}
}
@@ -1200,12 +1200,12 @@ xfs_qm_dqusage_adjust(
goto error0;
}
- IRELE(ip);
+ xfs_irele(ip);
*res = BULKSTAT_RV_DIDONE;
return 0;
error0:
- IRELE(ip);
+ xfs_irele(ip);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
@@ -1575,11 +1575,11 @@ xfs_qm_init_quotainos(
error_rele:
if (uip)
- IRELE(uip);
+ xfs_irele(uip);
if (gip)
- IRELE(gip);
+ xfs_irele(gip);
if (pip)
- IRELE(pip);
+ xfs_irele(pip);
return error;
}
@@ -1588,15 +1588,15 @@ xfs_qm_destroy_quotainos(
xfs_quotainfo_t *qi)
{
if (qi->qi_uquotaip) {
- IRELE(qi->qi_uquotaip);
+ xfs_irele(qi->qi_uquotaip);
qi->qi_uquotaip = NULL; /* paranoia */
}
if (qi->qi_gquotaip) {
- IRELE(qi->qi_gquotaip);
+ xfs_irele(qi->qi_gquotaip);
qi->qi_gquotaip = NULL;
}
if (qi->qi_pquotaip) {
- IRELE(qi->qi_pquotaip);
+ xfs_irele(qi->qi_pquotaip);
qi->qi_pquotaip = NULL;
}
}
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index abc8a21e3a82..b3190890f096 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -22,6 +22,7 @@
#include "xfs_qm.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_defer.h"
STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
@@ -189,15 +190,15 @@ xfs_qm_scall_quotaoff(
* Release our quotainode references if we don't need them anymore.
*/
if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
- IRELE(q->qi_uquotaip);
+ xfs_irele(q->qi_uquotaip);
q->qi_uquotaip = NULL;
}
if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
- IRELE(q->qi_gquotaip);
+ xfs_irele(q->qi_gquotaip);
q->qi_gquotaip = NULL;
}
if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
- IRELE(q->qi_pquotaip);
+ xfs_irele(q->qi_pquotaip);
q->qi_pquotaip = NULL;
}
@@ -250,7 +251,7 @@ xfs_qm_scall_trunc_qfile(
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
out_put:
- IRELE(ip);
+ xfs_irele(ip);
return error;
}
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 205fbb2a77e4..a7c0c657dfaf 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -45,7 +45,7 @@ xfs_qm_fill_state(
tstate->ino_warnlimit = q->qi_iwarnlimit;
tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit;
if (tempqip)
- IRELE(ip);
+ xfs_irele(ip);
}
/*
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 472a73e9d331..fce38b56b962 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -380,9 +380,8 @@ xfs_cud_init(
*/
int
xfs_cui_recover(
- struct xfs_mount *mp,
- struct xfs_cui_log_item *cuip,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *parent_tp,
+ struct xfs_cui_log_item *cuip)
{
int i;
int error = 0;
@@ -398,6 +397,7 @@ xfs_cui_recover(
xfs_extlen_t new_len;
struct xfs_bmbt_irec irec;
bool requeue_only = false;
+ struct xfs_mount *mp = parent_tp->t_mountp;
ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
@@ -452,6 +452,12 @@ xfs_cui_recover(
mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
if (error)
return error;
+ /*
+ * Recovery stashes all deferred ops during intent processing and
+ * finishes them on completion. Transfer current dfops state to this
+ * transaction and transfer the result back before we return.
+ */
+ xfs_defer_move(tp, parent_tp);
cudp = xfs_trans_get_cud(tp, cuip);
for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
@@ -473,7 +479,7 @@ xfs_cui_recover(
new_len = refc->pe_len;
} else
error = xfs_trans_log_finish_refcount_update(tp, cudp,
- dfops, type, refc->pe_startblock, refc->pe_len,
+ type, refc->pe_startblock, refc->pe_len,
&new_fsb, &new_len, &rcur);
if (error)
goto abort_error;
@@ -484,22 +490,18 @@ xfs_cui_recover(
irec.br_blockcount = new_len;
switch (type) {
case XFS_REFCOUNT_INCREASE:
- error = xfs_refcount_increase_extent(
- tp->t_mountp, dfops, &irec);
+ error = xfs_refcount_increase_extent(tp, &irec);
break;
case XFS_REFCOUNT_DECREASE:
- error = xfs_refcount_decrease_extent(
- tp->t_mountp, dfops, &irec);
+ error = xfs_refcount_decrease_extent(tp, &irec);
break;
case XFS_REFCOUNT_ALLOC_COW:
- error = xfs_refcount_alloc_cow_extent(
- tp->t_mountp, dfops,
+ error = xfs_refcount_alloc_cow_extent(tp,
irec.br_startblock,
irec.br_blockcount);
break;
case XFS_REFCOUNT_FREE_COW:
- error = xfs_refcount_free_cow_extent(
- tp->t_mountp, dfops,
+ error = xfs_refcount_free_cow_extent(tp,
irec.br_startblock,
irec.br_blockcount);
break;
@@ -514,11 +516,13 @@ xfs_cui_recover(
xfs_refcount_finish_one_cleanup(tp, rcur, error);
set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
+ xfs_defer_move(parent_tp, tp);
error = xfs_trans_commit(tp);
return error;
abort_error:
xfs_refcount_finish_one_cleanup(tp, rcur, error);
+ xfs_defer_move(parent_tp, tp);
xfs_trans_cancel(tp);
return error;
}
diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h
index dd830b69cd1e..3896dcc2368f 100644
--- a/fs/xfs/xfs_refcount_item.h
+++ b/fs/xfs/xfs_refcount_item.h
@@ -82,7 +82,6 @@ struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *,
struct xfs_cui_log_item *);
void xfs_cui_item_free(struct xfs_cui_log_item *);
void xfs_cui_release(struct xfs_cui_log_item *);
-int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip,
- struct xfs_defer_ops *dfops);
+int xfs_cui_recover(struct xfs_trans *parent_tp, struct xfs_cui_log_item *cuip);
#endif /* __XFS_REFCOUNT_ITEM_H__ */
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 592fb2071a03..38f405415b88 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -157,12 +157,12 @@ xfs_reflink_find_shared(
if (!agbp)
return -ENOMEM;
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
find_end_of_shared);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
xfs_trans_brelse(tp, agbp);
return error;
@@ -312,10 +312,8 @@ xfs_reflink_convert_cow_extent(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap,
xfs_fileoff_t offset_fsb,
- xfs_filblks_t count_fsb,
- struct xfs_defer_ops *dfops)
+ xfs_filblks_t count_fsb)
{
- xfs_fsblock_t first_block = NULLFSBLOCK;
int nimaps = 1;
if (imap->br_state == XFS_EXT_NORM)
@@ -326,8 +324,8 @@ xfs_reflink_convert_cow_extent(
if (imap->br_blockcount == 0)
return 0;
return xfs_bmapi_write(NULL, ip, imap->br_startoff, imap->br_blockcount,
- XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT, &first_block,
- 0, imap, &nimaps, dfops);
+ XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT, 0, imap,
+ &nimaps);
}
/* Convert all of the unwritten CoW extents in a file's range to real ones. */
@@ -342,8 +340,6 @@ xfs_reflink_convert_cow(
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
xfs_filblks_t count_fsb = end_fsb - offset_fsb;
struct xfs_bmbt_irec imap;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block = NULLFSBLOCK;
int nimaps = 1, error = 0;
ASSERT(count != 0);
@@ -351,8 +347,7 @@ xfs_reflink_convert_cow(
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_bmapi_write(NULL, ip, offset_fsb, count_fsb,
XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT |
- XFS_BMAPI_CONVERT_ONLY, &first_block, 0, &imap, &nimaps,
- &dfops);
+ XFS_BMAPI_CONVERT_ONLY, 0, &imap, &nimaps);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -369,9 +364,7 @@ xfs_reflink_allocate_cow(
xfs_fileoff_t offset_fsb = imap->br_startoff;
xfs_filblks_t count_fsb = imap->br_blockcount;
struct xfs_bmbt_irec got;
- struct xfs_defer_ops dfops;
struct xfs_trans *tp = NULL;
- xfs_fsblock_t first_block;
int nimaps, error = 0;
bool trimmed;
xfs_filblks_t resaligned;
@@ -430,23 +423,18 @@ retry:
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &first_block);
nimaps = 1;
/* Allocate the entire reservation as unwritten blocks. */
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
- XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block,
- resblks, imap, &nimaps, &dfops);
+ XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
+ resblks, imap, &nimaps);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_inode_set_cowblocks_tag(ip);
/* Finish up. */
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
return error;
@@ -458,10 +446,8 @@ retry:
if (nimaps == 0)
return -ENOSPC;
convert:
- return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb,
- &dfops);
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
+ return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
+out_trans_cancel:
xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
XFS_QMOPT_RES_REGBLKS);
out:
@@ -471,69 +457,6 @@ out:
}
/*
- * Find the CoW reservation for a given byte offset of a file.
- */
-bool
-xfs_reflink_find_cow_mapping(
- struct xfs_inode *ip,
- xfs_off_t offset,
- struct xfs_bmbt_irec *imap)
-{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
- xfs_fileoff_t offset_fsb;
- struct xfs_bmbt_irec got;
- struct xfs_iext_cursor icur;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
-
- if (!xfs_is_reflink_inode(ip))
- return false;
- offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
- if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
- return false;
- if (got.br_startoff > offset_fsb)
- return false;
-
- trace_xfs_reflink_find_cow_mapping(ip, offset, 1, XFS_IO_OVERWRITE,
- &got);
- *imap = got;
- return true;
-}
-
-/*
- * Trim an extent to end at the next CoW reservation past offset_fsb.
- */
-void
-xfs_reflink_trim_irec_to_next_cow(
- struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb,
- struct xfs_bmbt_irec *imap)
-{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
- struct xfs_bmbt_irec got;
- struct xfs_iext_cursor icur;
-
- if (!xfs_is_reflink_inode(ip))
- return;
-
- /* Find the extent in the CoW fork. */
- if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
- return;
-
- /* This is the extent before; try sliding up one. */
- if (got.br_startoff < offset_fsb) {
- if (!xfs_iext_next_extent(ifp, &icur, &got))
- return;
- }
-
- if (got.br_startoff >= imap->br_startoff + imap->br_blockcount)
- return;
-
- imap->br_blockcount = got.br_startoff - imap->br_startoff;
- trace_xfs_reflink_trim_irec(ip, imap);
-}
-
-/*
* Cancel CoW reservations for some block range of an inode.
*
* If cancel_real is true this function cancels all COW fork extents for the
@@ -553,11 +476,9 @@ xfs_reflink_cancel_cow_blocks(
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got, del;
struct xfs_iext_cursor icur;
- xfs_fsblock_t firstfsb;
- struct xfs_defer_ops dfops;
int error = 0;
- if (!xfs_is_reflink_inode(ip))
+ if (!xfs_inode_has_cow_data(ip))
return 0;
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
return 0;
@@ -581,26 +502,21 @@ xfs_reflink_cancel_cow_blocks(
if (error)
break;
} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
- xfs_defer_init(&dfops, &firstfsb);
+ ASSERT((*tpp)->t_firstblock == NULLFSBLOCK);
/* Free the CoW orphan record. */
- error = xfs_refcount_free_cow_extent(ip->i_mount,
- &dfops, del.br_startblock,
- del.br_blockcount);
+ error = xfs_refcount_free_cow_extent(*tpp,
+ del.br_startblock, del.br_blockcount);
if (error)
break;
- xfs_bmap_add_free(ip->i_mount, &dfops,
- del.br_startblock, del.br_blockcount,
- NULL);
+ xfs_bmap_add_free(*tpp, del.br_startblock,
+ del.br_blockcount, NULL);
/* Roll the transaction */
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(tpp, &dfops);
- if (error) {
- xfs_defer_cancel(&dfops);
+ error = xfs_defer_finish(tpp);
+ if (error)
break;
- }
/* Remove the mapping from the CoW fork. */
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
@@ -623,7 +539,6 @@ next_extent:
/* clear tag if cow fork is emptied */
if (!ifp->if_bytes)
xfs_inode_clear_cowblocks_tag(ip);
-
return error;
}
@@ -696,8 +611,6 @@ xfs_reflink_end_cow(
struct xfs_trans *tp;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
- xfs_fsblock_t firstfsb;
- struct xfs_defer_ops dfops;
int error;
unsigned int resblks;
xfs_filblks_t rlen;
@@ -764,12 +677,11 @@ xfs_reflink_end_cow(
goto prev_extent;
/* Unmap the old blocks in the data fork. */
- xfs_defer_init(&dfops, &firstfsb);
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
rlen = del.br_blockcount;
- error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1,
- &firstfsb, &dfops);
+ error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Trim the extent to whatever got unmapped. */
if (rlen) {
@@ -779,15 +691,15 @@ xfs_reflink_end_cow(
trace_xfs_reflink_cow_remap(ip, &del);
/* Free the CoW orphan record. */
- error = xfs_refcount_free_cow_extent(tp->t_mountp, &dfops,
- del.br_startblock, del.br_blockcount);
+ error = xfs_refcount_free_cow_extent(tp, del.br_startblock,
+ del.br_blockcount);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Map the new blocks into the data fork. */
- error = xfs_bmap_map_extent(tp->t_mountp, &dfops, ip, &del);
+ error = xfs_bmap_map_extent(tp, ip, &del);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Charge this new data fork mapping to the on-disk quota. */
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
@@ -796,10 +708,9 @@ xfs_reflink_end_cow(
/* Remove the mapping from the CoW fork. */
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_defer_finish(&tp);
if (error)
- goto out_defer;
+ goto out_cancel;
if (!xfs_iext_get_extent(ifp, &icur, &got))
break;
continue;
@@ -814,8 +725,6 @@ prev_extent:
goto out;
return 0;
-out_defer:
- xfs_defer_cancel(&dfops);
out_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1070,9 +979,7 @@ xfs_reflink_remap_extent(
struct xfs_mount *mp = ip->i_mount;
bool real_extent = xfs_bmap_is_real_extent(irec);
struct xfs_trans *tp;
- xfs_fsblock_t firstfsb;
unsigned int resblks;
- struct xfs_defer_ops dfops;
struct xfs_bmbt_irec uirec;
xfs_filblks_t rlen;
xfs_filblks_t unmap_len;
@@ -1113,11 +1020,10 @@ xfs_reflink_remap_extent(
/* Unmap the old blocks in the data fork. */
rlen = unmap_len;
while (rlen) {
- xfs_defer_init(&dfops, &firstfsb);
- error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1,
- &firstfsb, &dfops);
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
+ error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1);
if (error)
- goto out_defer;
+ goto out_cancel;
/*
* Trim the extent to whatever got unmapped.
@@ -1136,14 +1042,14 @@ xfs_reflink_remap_extent(
uirec.br_blockcount, uirec.br_startblock);
/* Update the refcount tree */
- error = xfs_refcount_increase_extent(mp, &dfops, &uirec);
+ error = xfs_refcount_increase_extent(tp, &uirec);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Map the new blocks into the data fork. */
- error = xfs_bmap_map_extent(mp, &dfops, ip, &uirec);
+ error = xfs_bmap_map_extent(tp, ip, &uirec);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Update quota accounting. */
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
@@ -1162,10 +1068,9 @@ xfs_reflink_remap_extent(
next_extent:
/* Process all the deferred stuff. */
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_defer_finish(&tp);
if (error)
- goto out_defer;
+ goto out_cancel;
}
error = xfs_trans_commit(tp);
@@ -1174,8 +1079,6 @@ next_extent:
goto out;
return 0;
-out_defer:
- xfs_defer_cancel(&dfops);
out_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 1532827ba911..c585ad9552b2 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -18,10 +18,6 @@ extern int xfs_reflink_allocate_cow(struct xfs_inode *ip,
struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode);
extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
-extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
- struct xfs_bmbt_irec *imap);
-extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap);
extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 329d4d26c13e..926ed314ffba 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -761,8 +761,6 @@ xfs_growfs_rt_alloc(
struct xfs_buf *bp; /* temporary buffer for zeroing */
xfs_daddr_t d; /* disk block address */
int error; /* error return value */
- xfs_fsblock_t firstblock;/* first block allocated in xaction */
- struct xfs_defer_ops dfops; /* list of freed blocks */
xfs_fsblock_t fsbno; /* filesystem block for bno */
struct xfs_bmbt_irec map; /* block map output */
int nmap; /* number of block maps */
@@ -787,24 +785,20 @@ xfs_growfs_rt_alloc(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_defer_init(&dfops, &firstblock);
/*
* Allocate blocks to the bitmap file.
*/
nmap = 1;
error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
- XFS_BMAPI_METADATA, &firstblock,
- resblks, &map, &nmap, &dfops);
+ XFS_BMAPI_METADATA, resblks, &map,
+ &nmap);
if (!error && nmap < 1)
error = -ENOSPC;
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
/*
* Free any blocks freed up in the transaction, then commit.
*/
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
error = xfs_trans_commit(tp);
if (error)
return error;
@@ -854,8 +848,6 @@ xfs_growfs_rt_alloc(
return 0;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
@@ -1215,7 +1207,7 @@ xfs_rtmount_inodes(
ASSERT(sbp->sb_rsumino != NULLFSINO);
error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
if (error) {
- IRELE(mp->m_rbmip);
+ xfs_irele(mp->m_rbmip);
return error;
}
ASSERT(mp->m_rsumip != NULL);
@@ -1227,9 +1219,9 @@ xfs_rtunmount_inodes(
struct xfs_mount *mp)
{
if (mp->m_rbmip)
- IRELE(mp->m_rbmip);
+ xfs_irele(mp->m_rbmip);
if (mp->m_rsumip)
- IRELE(mp->m_rsumip);
+ xfs_irele(mp->m_rsumip);
}
/*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 9d791f158dfe..207ee302b1bb 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -65,11 +65,10 @@ enum {
Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Opt_mtpt, Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
- Opt_allocsize, Opt_norecovery, Opt_barrier, Opt_nobarrier,
- Opt_inode64, Opt_inode32, Opt_ikeep, Opt_noikeep,
- Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, Opt_filestreams,
- Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota,
- Opt_uquota, Opt_gquota, Opt_pquota,
+ Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
+ Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
+ Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
+ Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
};
@@ -118,14 +117,7 @@ static const match_table_t tokens = {
{Opt_qnoenforce, "qnoenforce"}, /* same as uqnoenforce */
{Opt_discard, "discard"}, /* Discard unused blocks */
{Opt_nodiscard, "nodiscard"}, /* Do not discard unused blocks */
-
{Opt_dax, "dax"}, /* Enable direct access to bdev pages */
-
- /* Deprecated mount options scheduled for removal */
- {Opt_barrier, "barrier"}, /* use writer barriers for log write and
- * unwritten extent conversion */
- {Opt_nobarrier, "nobarrier"}, /* .. disable */
-
{Opt_err, NULL},
};
@@ -209,7 +201,6 @@ xfs_parseargs(
* Set some default flags that could be cleared by the mount option
* parsing.
*/
- mp->m_flags |= XFS_MOUNT_BARRIER;
mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
/*
@@ -362,14 +353,6 @@ xfs_parseargs(
mp->m_flags |= XFS_MOUNT_DAX;
break;
#endif
- case Opt_barrier:
- xfs_warn(mp, "%s option is deprecated, ignoring.", p);
- mp->m_flags |= XFS_MOUNT_BARRIER;
- break;
- case Opt_nobarrier:
- xfs_warn(mp, "%s option is deprecated, ignoring.", p);
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- break;
default:
xfs_warn(mp, "unknown mount option [%s].", p);
return -EINVAL;
@@ -487,7 +470,6 @@ xfs_showargs(
static struct proc_xfs_info xfs_info_unset[] = {
/* the few simple ones we can get from the mount struct */
{ XFS_MOUNT_COMPAT_IOSIZE, ",largeio" },
- { XFS_MOUNT_BARRIER, ",nobarrier" },
{ XFS_MOUNT_SMALL_INUMS, ",inode64" },
{ 0, NULL }
};
@@ -1278,14 +1260,6 @@ xfs_fs_remount(
token = match_token(p, tokens, args);
switch (token) {
- case Opt_barrier:
- xfs_warn(mp, "%s option is deprecated, ignoring.", p);
- mp->m_flags |= XFS_MOUNT_BARRIER;
- break;
- case Opt_nobarrier:
- xfs_warn(mp, "%s option is deprecated, ignoring.", p);
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- break;
case Opt_inode64:
mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
@@ -1860,7 +1834,7 @@ MODULE_ALIAS_FS("xfs");
STATIC int __init
xfs_init_zones(void)
{
- if (bioset_init(&xfs_ioend_bioset, 4 * MAX_BUF_PER_PAGE,
+ if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
offsetof(struct xfs_ioend, io_inline_bio),
BIOSET_NEED_BVECS))
goto out;
@@ -1886,7 +1860,7 @@ xfs_init_zones(void)
if (!xfs_da_state_zone)
goto out_destroy_btree_cur_zone;
- xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
+ xfs_ifork_zone = kmem_zone_init(sizeof(struct xfs_ifork), "xfs_ifork");
if (!xfs_ifork_zone)
goto out_destroy_da_state_zone;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 3783afcb68d2..a3e98c64b6e3 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -163,8 +163,6 @@ xfs_symlink(
struct xfs_inode *ip = NULL;
int error = 0;
int pathlen;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
bool unlock_dp_on_error = false;
xfs_fileoff_t first_fsb;
xfs_filblks_t fs_blocks;
@@ -243,13 +241,6 @@ xfs_symlink(
goto out_trans_cancel;
/*
- * Initialize the bmap freelist prior to calling either
- * bmapi or the directory create code.
- */
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
-
- /*
* Allocate an inode for the symlink.
*/
error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
@@ -290,10 +281,9 @@ xfs_symlink(
nmaps = XFS_SYMLINK_MAPS;
error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
- XFS_BMAPI_METADATA, &first_block, resblks,
- mval, &nmaps, &dfops);
+ XFS_BMAPI_METADATA, resblks, mval, &nmaps);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
if (resblks)
resblks -= fs_blocks;
@@ -311,7 +301,7 @@ xfs_symlink(
BTOBB(byte_cnt), 0);
if (!bp) {
error = -ENOMEM;
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
bp->b_ops = &xfs_symlink_buf_ops;
@@ -338,10 +328,9 @@ xfs_symlink(
/*
* Create the directory entry for the symlink.
*/
- error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
- &first_block, &dfops, resblks);
+ error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, resblks);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
@@ -354,10 +343,6 @@ xfs_symlink(
xfs_trans_set_sync(tp);
}
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
goto out_release_inode;
@@ -369,8 +354,6 @@ xfs_symlink(
*ipp = ip;
return 0;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_inode:
@@ -381,7 +364,7 @@ out_release_inode:
*/
if (ip) {
xfs_finish_inode_setup(ip);
- IRELE(ip);
+ xfs_irele(ip);
}
xfs_qm_dqrele(udqp);
@@ -403,8 +386,6 @@ xfs_inactive_symlink_rmt(
xfs_buf_t *bp;
int done;
int error;
- xfs_fsblock_t first_block;
- struct xfs_defer_ops dfops;
int i;
xfs_mount_t *mp;
xfs_bmbt_irec_t mval[XFS_SYMLINK_MAPS];
@@ -443,7 +424,6 @@ xfs_inactive_symlink_rmt(
* Find the block(s) so we can inval and unmap them.
*/
done = 0;
- xfs_defer_init(&dfops, &first_block);
nmaps = ARRAY_SIZE(mval);
error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),
mval, &nmaps, 0);
@@ -458,28 +438,21 @@ xfs_inactive_symlink_rmt(
XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
if (!bp) {
error = -ENOMEM;
- goto error_bmap_cancel;
+ goto error_trans_cancel;
}
xfs_trans_binval(tp, bp);
}
/*
* Unmap the dead block(s) to the dfops.
*/
- error = xfs_bunmapi(tp, ip, 0, size, 0, nmaps,
- &first_block, &dfops, &done);
+ error = xfs_bunmapi(tp, ip, 0, size, 0, nmaps, &done);
if (error)
- goto error_bmap_cancel;
+ goto error_trans_cancel;
ASSERT(done);
- /*
- * Commit the first transaction. This logs the EFI and the inode.
- */
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto error_bmap_cancel;
/*
- * Commit the transaction containing extent freeing and EFDs.
+ * Commit the transaction. This first logs the EFI and the inode, then
+ * rolls and commits the transaction that frees the extents.
*/
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_trans_commit(tp);
@@ -498,8 +471,6 @@ xfs_inactive_symlink_rmt(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
-error_bmap_cancel:
- xfs_defer_cancel(&dfops);
error_trans_cancel:
xfs_trans_cancel(tp);
error_unlock:
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 972d45d28097..ad315e83bc02 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -310,7 +310,6 @@ DEFINE_BUF_EVENT(xfs_buf_hold);
DEFINE_BUF_EVENT(xfs_buf_rele);
DEFINE_BUF_EVENT(xfs_buf_iodone);
DEFINE_BUF_EVENT(xfs_buf_submit);
-DEFINE_BUF_EVENT(xfs_buf_submit_wait);
DEFINE_BUF_EVENT(xfs_buf_lock);
DEFINE_BUF_EVENT(xfs_buf_lock_done);
DEFINE_BUF_EVENT(xfs_buf_trylock_fail);
@@ -1153,33 +1152,23 @@ DECLARE_EVENT_CLASS(xfs_page_class,
__field(loff_t, size)
__field(unsigned long, offset)
__field(unsigned int, length)
- __field(int, delalloc)
- __field(int, unwritten)
),
TP_fast_assign(
- int delalloc = -1, unwritten = -1;
-
- if (page_has_buffers(page))
- xfs_count_page_state(page, &delalloc, &unwritten);
__entry->dev = inode->i_sb->s_dev;
__entry->ino = XFS_I(inode)->i_ino;
__entry->pgoff = page_offset(page);
__entry->size = i_size_read(inode);
__entry->offset = off;
__entry->length = len;
- __entry->delalloc = delalloc;
- __entry->unwritten = unwritten;
),
TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
- "length %x delalloc %d unwritten %d",
+ "length %x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->pgoff,
__entry->size,
__entry->offset,
- __entry->length,
- __entry->delalloc,
- __entry->unwritten)
+ __entry->length)
)
#define DEFINE_PAGE_EVENT(name) \
@@ -1263,9 +1252,6 @@ DEFINE_EVENT(xfs_imap_class, name, \
TP_ARGS(ip, offset, count, type, irec))
DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
-DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
-DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
-DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct);
DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
DEFINE_IOMAP_EVENT(xfs_iomap_found);
@@ -1304,7 +1290,6 @@ DEFINE_EVENT(xfs_simple_io_class, name, \
TP_ARGS(ip, offset, count))
DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
-DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof);
DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write);
@@ -1604,7 +1589,7 @@ DECLARE_EVENT_CLASS(xfs_alloc_class,
__entry->wasfromfl = args->wasfromfl;
__entry->resv = args->resv;
__entry->datatype = args->datatype;
- __entry->firstblock = args->firstblock;
+ __entry->firstblock = args->tp->t_firstblock;
),
TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
"prod %u minleft %u total %u alignment %u minalignslop %u "
@@ -2228,67 +2213,54 @@ DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range);
/* deferred ops */
struct xfs_defer_pending;
-struct xfs_defer_ops;
DECLARE_EVENT_CLASS(xfs_defer_class,
- TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop,
- unsigned long caller_ip),
- TP_ARGS(mp, dop, caller_ip),
+ TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip),
+ TP_ARGS(tp, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(void *, dop)
+ __field(struct xfs_trans *, tp)
__field(char, committed)
- __field(char, low)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
- __entry->dev = mp ? mp->m_super->s_dev : 0;
- __entry->dop = dop;
- __entry->committed = dop->dop_committed;
- __entry->low = dop->dop_low;
+ __entry->dev = tp->t_mountp->m_super->s_dev;
+ __entry->tp = tp;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ops %p committed %d low %d, caller %pS",
+ TP_printk("dev %d:%d tp %p caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->dop,
- __entry->committed,
- __entry->low,
+ __entry->tp,
(char *)__entry->caller_ip)
)
#define DEFINE_DEFER_EVENT(name) \
DEFINE_EVENT(xfs_defer_class, name, \
- TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, \
- unsigned long caller_ip), \
- TP_ARGS(mp, dop, caller_ip))
+ TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \
+ TP_ARGS(tp, caller_ip))
DECLARE_EVENT_CLASS(xfs_defer_error_class,
- TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error),
- TP_ARGS(mp, dop, error),
+ TP_PROTO(struct xfs_trans *tp, int error),
+ TP_ARGS(tp, error),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(void *, dop)
+ __field(struct xfs_trans *, tp)
__field(char, committed)
- __field(char, low)
__field(int, error)
),
TP_fast_assign(
- __entry->dev = mp ? mp->m_super->s_dev : 0;
- __entry->dop = dop;
- __entry->committed = dop->dop_committed;
- __entry->low = dop->dop_low;
+ __entry->dev = tp->t_mountp->m_super->s_dev;
+ __entry->tp = tp;
__entry->error = error;
),
- TP_printk("dev %d:%d ops %p committed %d low %d err %d",
+ TP_printk("dev %d:%d tp %p err %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->dop,
- __entry->committed,
- __entry->low,
+ __entry->tp,
__entry->error)
)
#define DEFINE_DEFER_ERROR_EVENT(name) \
DEFINE_EVENT(xfs_defer_error_class, name, \
- TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error), \
- TP_ARGS(mp, dop, error))
+ TP_PROTO(struct xfs_trans *tp, int error), \
+ TP_ARGS(tp, error))
DECLARE_EVENT_CLASS(xfs_defer_pending_class,
TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp),
@@ -2407,7 +2379,6 @@ DEFINE_EVENT(xfs_map_extent_deferred_class, name, \
xfs_exntst_t state), \
TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state))
-DEFINE_DEFER_EVENT(xfs_defer_init);
DEFINE_DEFER_EVENT(xfs_defer_cancel);
DEFINE_DEFER_EVENT(xfs_defer_trans_roll);
DEFINE_DEFER_EVENT(xfs_defer_trans_abort);
@@ -2417,9 +2388,8 @@ DEFINE_DEFER_EVENT(xfs_defer_finish_done);
DEFINE_DEFER_ERROR_EVENT(xfs_defer_trans_roll_error);
DEFINE_DEFER_ERROR_EVENT(xfs_defer_finish_error);
-DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_work);
-DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_cancel);
-DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_cancel);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_create_intent);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_cancel_list);
DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_finish);
DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_abort);
@@ -3215,8 +3185,6 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow);
DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_bounce_dio_write);
-DEFINE_IOMAP_EVENT(xfs_reflink_find_cow_mapping);
-DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 524f543c5b82..bedc5a5133a5 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -100,6 +100,8 @@ xfs_trans_dup(
ntp->t_mountp = tp->t_mountp;
INIT_LIST_HEAD(&ntp->t_items);
INIT_LIST_HEAD(&ntp->t_busy);
+ INIT_LIST_HEAD(&ntp->t_dfops);
+ ntp->t_firstblock = NULLFSBLOCK;
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(tp->t_ticket != NULL);
@@ -118,7 +120,9 @@ xfs_trans_dup(
ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
tp->t_rtx_res = tp->t_rtx_res_used;
ntp->t_pflags = tp->t_pflags;
- ntp->t_agfl_dfops = tp->t_agfl_dfops;
+
+ /* move deferred ops over to the new tp */
+ xfs_defer_move(ntp, tp);
xfs_trans_dup_dqinfo(tp, ntp);
@@ -273,6 +277,8 @@ xfs_trans_alloc(
tp->t_mountp = mp;
INIT_LIST_HEAD(&tp->t_items);
INIT_LIST_HEAD(&tp->t_busy);
+ INIT_LIST_HEAD(&tp->t_dfops);
+ tp->t_firstblock = NULLFSBLOCK;
error = xfs_trans_reserve(tp, resp, blocks, rtextents);
if (error) {
@@ -914,12 +920,21 @@ __xfs_trans_commit(
int error = 0;
int sync = tp->t_flags & XFS_TRANS_SYNC;
- ASSERT(!tp->t_agfl_dfops ||
- !xfs_defer_has_unfinished_work(tp->t_agfl_dfops) || regrant);
-
trace_xfs_trans_commit(tp, _RET_IP_);
/*
+ * Finish deferred items on final commit. Only permanent transactions
+ * should ever have deferred ops.
+ */
+ WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
+ !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
+ if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
+ error = xfs_defer_finish_noroll(&tp);
+ if (error)
+ goto out_unreserve;
+ }
+
+ /*
* If there is nothing to be logged by the transaction,
* then unlock all of the items associated with the
* transaction and free the transaction structure.
@@ -1008,6 +1023,9 @@ xfs_trans_cancel(
trace_xfs_trans_cancel(tp, _RET_IP_);
+ if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
+ xfs_defer_cancel(tp);
+
/*
* See if the caller is relying on us to shut down the
* filesystem. This happens in paths where we detect
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 6526314f0b8f..c3d278e96ad1 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -24,7 +24,6 @@ struct xfs_rui_log_item;
struct xfs_btree_cur;
struct xfs_cui_log_item;
struct xfs_cud_log_item;
-struct xfs_defer_ops;
struct xfs_bui_log_item;
struct xfs_bud_log_item;
@@ -90,6 +89,11 @@ void xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
#define XFS_ITEM_LOCKED 2
#define XFS_ITEM_FLUSHING 3
+/*
+ * Deferred operation item relogging limits.
+ */
+#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
+#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
/*
* This is the structure maintained for every active transaction.
@@ -102,11 +106,11 @@ typedef struct xfs_trans {
unsigned int t_blk_res_used; /* # of resvd blocks used */
unsigned int t_rtx_res; /* # of rt extents resvd */
unsigned int t_rtx_res_used; /* # of resvd rt extents used */
+ unsigned int t_flags; /* misc flags */
+ xfs_fsblock_t t_firstblock; /* first block allocated */
struct xlog_ticket *t_ticket; /* log mgr ticket */
struct xfs_mount *t_mountp; /* ptr to fs mount struct */
struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
- struct xfs_defer_ops *t_agfl_dfops; /* optional agfl fixup dfops */
- unsigned int t_flags; /* misc flags */
int64_t t_icount_delta; /* superblock icount change */
int64_t t_ifree_delta; /* superblock ifree change */
int64_t t_fdblocks_delta; /* superblock fdblocks chg */
@@ -128,6 +132,7 @@ typedef struct xfs_trans {
int64_t t_rextslog_delta;/* superblocks rextslog chg */
struct list_head t_items; /* log item descriptors */
struct list_head t_busy; /* list of busy extents */
+ struct list_head t_dfops; /* deferred operations */
unsigned long t_pflags; /* saved process flags state */
} xfs_trans_t;
@@ -258,7 +263,7 @@ void xfs_refcount_update_init_defer_op(void);
struct xfs_cud_log_item *xfs_trans_get_cud(struct xfs_trans *tp,
struct xfs_cui_log_item *cuip);
int xfs_trans_log_finish_refcount_update(struct xfs_trans *tp,
- struct xfs_cud_log_item *cudp, struct xfs_defer_ops *dfops,
+ struct xfs_cud_log_item *cudp,
enum xfs_refcount_intent_type type, xfs_fsblock_t startblock,
xfs_extlen_t blockcount, xfs_fsblock_t *new_fsb,
xfs_extlen_t *new_len, struct xfs_btree_cur **pcur);
@@ -270,9 +275,9 @@ void xfs_bmap_update_init_defer_op(void);
struct xfs_bud_log_item *xfs_trans_get_bud(struct xfs_trans *tp,
struct xfs_bui_log_item *buip);
int xfs_trans_log_finish_bmap_update(struct xfs_trans *tp,
- struct xfs_bud_log_item *rudp, struct xfs_defer_ops *dfops,
- enum xfs_bmap_intent_type type, struct xfs_inode *ip,
- int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
- xfs_filblks_t *blockcount, xfs_exntst_t state);
+ struct xfs_bud_log_item *rudp, enum xfs_bmap_intent_type type,
+ struct xfs_inode *ip, int whichfork, xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock, xfs_filblks_t *blockcount,
+ xfs_exntst_t state);
#endif /* __XFS_TRANS_H__ */
diff --git a/fs/xfs/xfs_trans_bmap.c b/fs/xfs/xfs_trans_bmap.c
index a15a5cd867f9..741c558b2179 100644
--- a/fs/xfs/xfs_trans_bmap.c
+++ b/fs/xfs/xfs_trans_bmap.c
@@ -43,7 +43,6 @@ int
xfs_trans_log_finish_bmap_update(
struct xfs_trans *tp,
struct xfs_bud_log_item *budp,
- struct xfs_defer_ops *dop,
enum xfs_bmap_intent_type type,
struct xfs_inode *ip,
int whichfork,
@@ -54,7 +53,7 @@ xfs_trans_log_finish_bmap_update(
{
int error;
- error = xfs_bmap_finish_one(tp, dop, ip, type, whichfork, startoff,
+ error = xfs_bmap_finish_one(tp, ip, type, whichfork, startoff,
startblock, blockcount, state);
/*
@@ -176,7 +175,6 @@ xfs_bmap_update_create_done(
STATIC int
xfs_bmap_update_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)
@@ -187,7 +185,7 @@ xfs_bmap_update_finish_item(
bmap = container_of(item, struct xfs_bmap_intent, bi_list);
count = bmap->bi_bmap.br_blockcount;
- error = xfs_trans_log_finish_bmap_update(tp, done_item, dop,
+ error = xfs_trans_log_finish_bmap_update(tp, done_item,
bmap->bi_type,
bmap->bi_owner, bmap->bi_whichfork,
bmap->bi_bmap.br_startoff,
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index bd66c76f55e6..855c0b651fd4 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -171,7 +171,6 @@ xfs_extent_free_create_done(
STATIC int
xfs_extent_free_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)
@@ -226,7 +225,6 @@ static const struct xfs_defer_op_type xfs_extent_free_defer_type = {
STATIC int
xfs_agfl_free_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)
diff --git a/fs/xfs/xfs_trans_refcount.c b/fs/xfs/xfs_trans_refcount.c
index 46dd4fca8aa7..523c55663954 100644
--- a/fs/xfs/xfs_trans_refcount.c
+++ b/fs/xfs/xfs_trans_refcount.c
@@ -42,7 +42,6 @@ int
xfs_trans_log_finish_refcount_update(
struct xfs_trans *tp,
struct xfs_cud_log_item *cudp,
- struct xfs_defer_ops *dop,
enum xfs_refcount_intent_type type,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount,
@@ -52,7 +51,7 @@ xfs_trans_log_finish_refcount_update(
{
int error;
- error = xfs_refcount_finish_one(tp, dop, type, startblock,
+ error = xfs_refcount_finish_one(tp, type, startblock,
blockcount, new_fsb, new_len, pcur);
/*
@@ -169,7 +168,6 @@ xfs_refcount_update_create_done(
STATIC int
xfs_refcount_update_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)
@@ -180,7 +178,7 @@ xfs_refcount_update_finish_item(
int error;
refc = container_of(item, struct xfs_refcount_intent, ri_list);
- error = xfs_trans_log_finish_refcount_update(tp, done_item, dop,
+ error = xfs_trans_log_finish_refcount_update(tp, done_item,
refc->ri_type,
refc->ri_startblock,
refc->ri_blockcount,
diff --git a/fs/xfs/xfs_trans_rmap.c b/fs/xfs/xfs_trans_rmap.c
index 726d8e2c0558..05b00e40251f 100644
--- a/fs/xfs/xfs_trans_rmap.c
+++ b/fs/xfs/xfs_trans_rmap.c
@@ -193,7 +193,6 @@ xfs_rmap_update_create_done(
STATIC int
xfs_rmap_update_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 48d84f0d9547..88072c92ace2 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20180531
+#define ACPI_CA_VERSION 0x20180629
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 1624e2be485c..82cb4eb225a4 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -118,6 +118,10 @@ static inline void *acpi_hest_get_next(struct acpi_hest_generic_data *gdata)
(void *)section - (void *)(estatus + 1) < estatus->data_length; \
section = acpi_hest_get_next(section))
+#ifdef CONFIG_ACPI_APEI_SEA
int ghes_notify_sea(void);
+#else
+static inline int ghes_notify_sea(void) { return -ENOENT; }
+#endif
#endif /* GHES_H */
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index ec07f23678ea..0d4b1d3dbc1e 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -84,42 +84,59 @@ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 ne
}
#endif
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+#ifdef arch_atomic_fetch_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
kasan_check_write(v, sizeof(*v));
- return __arch_atomic_add_unless(v, a, u);
+ return arch_atomic_fetch_add_unless(v, a, u);
}
+#endif
-
-static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+#ifdef arch_atomic64_fetch_add_unless
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
+ return arch_atomic64_fetch_add_unless(v, a, u);
}
+#endif
+#ifdef arch_atomic_inc
+#define atomic_inc atomic_inc
static __always_inline void atomic_inc(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_inc(v);
}
+#endif
+#ifdef arch_atomic64_inc
+#define atomic64_inc atomic64_inc
static __always_inline void atomic64_inc(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
+#endif
+#ifdef arch_atomic_dec
+#define atomic_dec atomic_dec
static __always_inline void atomic_dec(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_dec(v);
}
+#endif
+#ifdef atch_atomic64_dec
+#define atomic64_dec
static __always_inline void atomic64_dec(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
+#endif
static __always_inline void atomic_add(int i, atomic_t *v)
{
@@ -181,65 +198,95 @@ static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
arch_atomic64_xor(i, v);
}
+#ifdef arch_atomic_inc_return
+#define atomic_inc_return atomic_inc_return
static __always_inline int atomic_inc_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
+#endif
+#ifdef arch_atomic64_in_return
+#define atomic64_inc_return atomic64_inc_return
static __always_inline s64 atomic64_inc_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
+#endif
+#ifdef arch_atomic_dec_return
+#define atomic_dec_return atomic_dec_return
static __always_inline int atomic_dec_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
+#endif
+#ifdef arch_atomic64_dec_return
+#define atomic64_dec_return atomic64_dec_return
static __always_inline s64 atomic64_dec_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
+#endif
-static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
+#ifdef arch_atomic64_inc_not_zero
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
+#endif
+#ifdef arch_atomic64_dec_if_positive
+#define atomic64_dec_if_positive atomic64_dec_if_positive
static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
+#endif
+#ifdef arch_atomic_dec_and_test
+#define atomic_dec_and_test atomic_dec_and_test
static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic64_dec_and_test
+#define atomic64_dec_and_test atomic64_dec_and_test
static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic_inc_and_test
+#define atomic_inc_and_test atomic_inc_and_test
static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
+#endif
+#ifdef arch_atomic64_inc_and_test
+#define atomic64_inc_and_test atomic64_inc_and_test
static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
+#endif
static __always_inline int atomic_add_return(int i, atomic_t *v)
{
@@ -325,152 +372,96 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
return arch_atomic64_fetch_xor(i, v);
}
+#ifdef arch_atomic_sub_and_test
+#define atomic_sub_and_test atomic_sub_and_test
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic64_sub_and_test
+#define atomic64_sub_and_test atomic64_sub_and_test
static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic_add_negative
+#define atomic_add_negative atomic_add_negative
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
+#endif
+#ifdef arch_atomic64_add_negative
+#define atomic64_add_negative atomic64_add_negative
static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
+#endif
-static __always_inline unsigned long
-cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
+#define xchg(ptr, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg(__ai_ptr, (new)); \
+})
#define cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
- (unsigned long)(new), sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define sync_cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_sync_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define cmpxchg_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_local(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64(ptr, old, new);
-}
-
#define cmpxchg64(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64_local(ptr, old, new);
-}
-
#define cmpxchg64_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_local(__ai_ptr, (old), (new)); \
})
-/*
- * Originally we had the following code here:
- * __typeof__(p1) ____p1 = (p1);
- * kasan_check_write(____p1, 2 * sizeof(*____p1));
- * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
- * But it leads to compilation failures (see gcc issue 72873).
- * So for now it's left non-instrumented.
- * There are few callers of cmpxchg_double(), so it's not critical.
- */
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
- arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
-({ \
- arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+({ \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index abe6dd9ca2a8..13324aa828eb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -186,11 +186,6 @@ ATOMIC_OP(xor, ^)
#include <linux/irqflags.h>
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
@@ -201,35 +196,7 @@ static inline void atomic_sub(int i, atomic_t *v)
atomic_sub_return(i, v);
}
-static inline void atomic_inc(atomic_t *v)
-{
- atomic_add_return(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
- atomic_sub_return(1, v);
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#ifndef __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
- c = old;
- return c;
-}
-#endif
-
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 8d28eb010d0d..97b28b7f1f29 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -11,6 +11,7 @@
*/
#ifndef _ASM_GENERIC_ATOMIC64_H
#define _ASM_GENERIC_ATOMIC64_H
+#include <linux/types.h>
typedef struct {
long long counter;
@@ -50,18 +51,10 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP
extern long long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
-extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 04deffaf5f7d..dd90c9792909 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,189 +2,67 @@
#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_ATOMIC_H_
-#include <asm/types.h>
-#include <linux/irqflags.h>
-
-#ifdef CONFIG_SMP
-#include <asm/spinlock.h>
-#include <asm/cache.h> /* we use L1_CACHE_BYTES */
-
-/* Use an array of spinlocks for our atomic_ts.
- * Hash function to index into a different SPINLOCK.
- * Since "a" is usually an address, use one spinlock per cacheline.
- */
-# define ATOMIC_HASH_SIZE 4
-# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-
-extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
-
-/* Can't use raw_spin_lock_irq because of #include problems, so
- * this is the substitute */
-#define _atomic_spin_lock_irqsave(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- local_irq_save(f); \
- arch_spin_lock(s); \
-} while(0)
-
-#define _atomic_spin_unlock_irqrestore(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- arch_spin_unlock(s); \
- local_irq_restore(f); \
-} while(0)
-
-
-#else
-# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
-# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
-#endif
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
/*
- * NMI events can occur at any time, including when interrupts have been
- * disabled by *_irqsave(). So you can get NMI events occurring while a
- * *_bit function is holding a spin lock. If the NMI handler also wants
- * to do bit manipulation (and they do) then you can get a deadlock
- * between the original caller of *_bit() and the NMI handler.
- *
- * by Keith Owens
+ * Implementation of atomic bitops using atomic-fetch ops.
+ * See Documentation/atomic_bitops.txt for details.
*/
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- *
- * Note: there are no guarantees that this function will not be reordered
- * on non x86 architectures, so if you are writing portable code,
- * make sure not to rely on its reordering guarantees.
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void set_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p |= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
- * in order to ensure changes are visible on other processors.
- */
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p &= ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered. It may be
- * reordered on other architectures than x86.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p ^= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It may be reordered on other architectures than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old | mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
- return (old & mask) != 0;
+ old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It can be reorderdered on other architectures other than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old & ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (!(READ_ONCE(*p) & mask))
+ return 0;
- return (old & mask) != 0;
+ old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old ^ mask;
- _atomic_spin_unlock_irqrestore(p, flags);
- return (old & mask) != 0;
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 67ab280ad134..3ae021368f48 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -2,6 +2,10 @@
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
#define _ASM_GENERIC_BITOPS_LOCK_H_
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
@@ -11,7 +15,20 @@
* the returned value is 0.
* It can be used to implement bit locks.
*/
-#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
+static inline int test_and_set_bit_lock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
+
+ old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -20,11 +37,11 @@
*
* This operation is atomic and provides release barrier semantics.
*/
-#define clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+}
/**
* __clear_bit_unlock - Clear a bit in memory, for unlock
@@ -37,11 +54,38 @@ do { \
*
* See for example x86's implementation.
*/
-#define __clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void __clear_bit_unlock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ unsigned long old;
-#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+ p += BIT_WORD(nr);
+ old = READ_ONCE(*p);
+ old &= ~BIT_MASK(nr);
+ atomic_long_set_release((atomic_long_t *)p, old);
+}
+
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+#ifndef clear_bit_unlock_is_negative_byte
+static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ return !!(old & BIT(7));
+}
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f59639afaa39..a75cb371cd19 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
-int pud_free_pmd_page(pud_t *pud);
-int pmd_free_pte_page(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
-static inline int pud_free_pmd_page(pud_t *pud)
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return 0;
}
-static inline int pmd_free_pte_page(pmd_t *pmd)
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return 0;
}
@@ -1083,6 +1083,18 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
static inline void init_espfix_bsp(void) { }
#endif
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 3063125197ad..e811ef7b8350 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -303,4 +303,14 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_migrate_finish(mm) do {} while (0)
+/*
+ * Used to flush the TLB when page tables are removed, when lazy
+ * TLB mode may cause a CPU to retain intermediate translations
+ * pointing to about-to-be-freed page table memory.
+ */
+#ifndef HAVE_TLB_FLUSH_REMOVE_TABLES
+#define tlb_flush_remove_tables(mm) do {} while (0)
+#define tlb_flush_remove_tables_local(mm) do {} while (0)
+#endif
+
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index 71e1bb24d79f..7e0dad94cb2b 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -29,17 +29,21 @@
*
* @key: Private DH key
* @p: Diffie-Hellman parameter P
+ * @q: Diffie-Hellman parameter Q
* @g: Diffie-Hellman generator G
* @key_size: Size of the private DH key
* @p_size: Size of DH parameter P
+ * @q_size: Size of DH parameter Q
* @g_size: Size of DH generator G
*/
struct dh {
void *key;
void *p;
+ void *q;
void *g;
unsigned int key_size;
unsigned int p_size;
+ unsigned int q_size;
unsigned int g_size;
};
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 8f941102af36..3fb581bf3b87 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -122,11 +122,10 @@ struct drbg_state {
struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */
struct skcipher_request *ctr_req; /* CTR mode request handle */
- __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */
- __u8 *ctr_null_value; /* CTR mode aligned zero buf */
__u8 *outscratchpadbuf; /* CTR mode output scratchpad */
__u8 *outscratchpad; /* CTR mode aligned outbuf */
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
+ struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 880e6be9e95e..a66c127a20ed 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -22,27 +22,14 @@
#include <linux/scatterlist.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
- struct scatterlist *sg,
- int chain, int num)
+ struct scatterlist *sg, int num)
{
- if (chain) {
- head->length += sg->length;
- sg = sg_next(sg);
- }
-
if (sg)
sg_chain(head, num, sg);
else
sg_mark_end(head);
}
-static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
- struct scatter_walk *walk_out)
-{
- return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
- (int)(walk_in->offset - walk_out->offset));
-}
-
static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
{
unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 0555b571dd34..8a46202b1857 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -71,6 +71,10 @@ extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
+extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
+
+extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
+
struct sha1_state {
u32 state[SHA1_DIGEST_SIZE / 4];
u64 count;
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
deleted file mode 100644
index 6b700c7b2fe1..000000000000
--- a/include/crypto/vmac.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Modified to interface to the Linux kernel
- * Copyright (c) 2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef __CRYPTO_VMAC_H
-#define __CRYPTO_VMAC_H
-
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN 64
-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-
-/*
- * This implementation uses u32 and u64 as names for unsigned 32-
- * and 64-bit integer types. These are defined in C99 stdint.h. The
- * following may need adaptation if you are not running a C99 or
- * Microsoft C environment.
- */
-struct vmac_ctx {
- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
- u64 polykey[2*VMAC_TAG_LEN/64];
- u64 l3key[2*VMAC_TAG_LEN/64];
- u64 polytmp[2*VMAC_TAG_LEN/64];
- u64 cached_nonce[2];
- u64 cached_aes[2];
- int first_block_processed;
-};
-
-typedef u64 vmac_t;
-
-struct vmac_ctx_t {
- struct crypto_cipher *child;
- struct vmac_ctx __vmac_ctx;
- u8 partial[VMAC_NHBYTES]; /* partial block */
- int partial_size; /* size of the partial block */
-};
-
-#endif /* __CRYPTO_VMAC_H */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index f5099c12c6a6..f7a19c2a7a80 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -97,29 +97,11 @@ struct pci_controller;
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-/**
- * drm_drv_uses_atomic_modeset - check if the driver implements
- * atomic_commit()
- * @dev: DRM device
- *
- * This check is useful if drivers do not have DRIVER_ATOMIC set but
- * have atomic modesetting internally implemented.
- */
-static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
-{
- return dev->mode_config.funcs->atomic_commit != NULL;
-}
-
#define DRM_SWITCH_POWER_ON 0
#define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2
#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
-static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
-{
- return dev->driver->driver_features & feature;
-}
-
/* returns true if currently okay to sleep */
static inline bool drm_can_sleep(void)
{
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index a57a8aa90ffb..da9d95a19580 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -160,6 +160,14 @@ struct __drm_crtcs_state {
struct __drm_connnectors_state {
struct drm_connector *ptr;
struct drm_connector_state *state, *old_state, *new_state;
+ /**
+ * @out_fence_ptr:
+ *
+ * User-provided pointer which the kernel uses to return a sync_file
+ * file descriptor. Used by writeback connectors to signal completion of
+ * the writeback.
+ */
+ s32 __user *out_fence_ptr;
};
struct drm_private_obj;
@@ -594,6 +602,9 @@ void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
+int drm_atomic_set_writeback_fb_for_connector(
+ struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb);
int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
@@ -601,9 +612,6 @@ int __must_check
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc);
-void
-drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
-
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 26aaba58d6ce..99e2a5297c69 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -100,6 +100,7 @@ int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state,
int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
bool nonblock);
void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state);
void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
new file mode 100644
index 000000000000..4923b00328c1
--- /dev/null
+++ b/include/drm/drm_audio_component.h
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: MIT
+// Copyright © 2014 Intel Corporation
+
+#ifndef _DRM_AUDIO_COMPONENT_H_
+#define _DRM_AUDIO_COMPONENT_H_
+
+struct drm_audio_component;
+
+/**
+ * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver
+ */
+struct drm_audio_component_ops {
+ /**
+ * @owner: drm module to pin down
+ */
+ struct module *owner;
+ /**
+ * @get_power: get the POWER_DOMAIN_AUDIO power well
+ *
+ * Request the power well to be turned on.
+ */
+ void (*get_power)(struct device *);
+ /**
+ * @put_power: put the POWER_DOMAIN_AUDIO power well
+ *
+ * Allow the power well to be turned off.
+ */
+ void (*put_power)(struct device *);
+ /**
+ * @codec_wake_override: Enable/disable codec wake signal
+ */
+ void (*codec_wake_override)(struct device *, bool enable);
+ /**
+ * @get_cdclk_freq: Get the Core Display Clock in kHz
+ */
+ int (*get_cdclk_freq)(struct device *);
+ /**
+ * @sync_audio_rate: set n/cts based on the sample rate
+ *
+ * Called from audio driver. After audio driver sets the
+ * sample rate, it will call this function to set n/cts
+ */
+ int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
+ /**
+ * @get_eld: fill the audio state and ELD bytes for the given port
+ *
+ * Called from audio driver to get the HDMI/DP audio state of the given
+ * digital port, and also fetch ELD bytes to the given pointer.
+ *
+ * It returns the byte size of the original ELD (not the actually
+ * copied size), zero for an invalid ELD, or a negative error code.
+ *
+ * Note that the returned size may be over @max_bytes. Then it
+ * implies that only a part of ELD has been copied to the buffer.
+ */
+ int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes);
+};
+
+/**
+ * struct drm_audio_component_audio_ops - Ops implemented by hda driver, called by DRM driver
+ */
+struct drm_audio_component_audio_ops {
+ /**
+ * @audio_ptr: Pointer to be used in call to pin_eld_notify
+ */
+ void *audio_ptr;
+ /**
+ * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
+ *
+ * Called when the DRM driver has set up audio pipeline or has just
+ * begun to tear it down. This allows the HDA driver to update its
+ * status accordingly (even when the HDA controller is in power save
+ * mode).
+ */
+ void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
+ /**
+ * @pin2port: Check and convert from pin node to port number
+ *
+ * Called by HDA driver to check and convert from the pin widget node
+ * number to a port number in the graphics side.
+ */
+ int (*pin2port)(void *audio_ptr, int pin);
+ /**
+ * @master_bind: (Optional) component master bind callback
+ *
+ * Called at binding master component, for HDA codec-specific
+ * handling of dynamic binding.
+ */
+ int (*master_bind)(struct device *dev, struct drm_audio_component *);
+ /**
+ * @master_unbind: (Optional) component master unbind callback
+ *
+ * Called at unbinding master component, for HDA codec-specific
+ * handling of dynamic unbinding.
+ */
+ void (*master_unbind)(struct device *dev, struct drm_audio_component *);
+};
+
+/**
+ * struct drm_audio_component - Used for direct communication between DRM and hda drivers
+ */
+struct drm_audio_component {
+ /**
+ * @dev: DRM device, used as parameter for ops
+ */
+ struct device *dev;
+ /**
+ * @ops: Ops implemented by DRM driver, called by hda driver
+ */
+ const struct drm_audio_component_ops *ops;
+ /**
+ * @audio_ops: Ops implemented by hda driver, called by DRM driver
+ */
+ const struct drm_audio_component_audio_ops *audio_ops;
+};
+
+#endif /* _DRM_AUDIO_COMPONENT_H_ */
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 3270fec46979..bd850747ce54 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -97,7 +97,7 @@ struct drm_bridge_funcs {
/**
* @mode_fixup:
*
- * This callback is used to validate and adjust a mode. The paramater
+ * This callback is used to validate and adjust a mode. The parameter
* mode is the display mode that should be fed to the next element in
* the display chain, either the final &drm_connector or the next
* &drm_bridge. The parameter adjusted_mode is the input mode the bridge
@@ -178,6 +178,22 @@ struct drm_bridge_funcs {
* then this would be &drm_encoder_helper_funcs.mode_set. The display
* pipe (i.e. clocks and timing signals) is off when this function is
* called.
+ *
+ * The adjusted_mode parameter is the mode output by the CRTC for the
+ * first bridge in the chain. It can be different from the mode
+ * parameter that contains the desired mode for the connector at the end
+ * of the bridges chain, for instance when the first bridge in the chain
+ * performs scaling. The adjusted mode is mostly useful for the first
+ * bridge in the chain and is likely irrelevant for the other bridges.
+ *
+ * For atomic drivers the adjusted_mode is the mode stored in
+ * &drm_crtc_state.adjusted_mode.
+ *
+ * NOTE:
+ *
+ * If a need arises to store and access modes adjusted for other
+ * locations than the connection between the CRTC and the first bridge,
+ * the DRM framework will have to be extended with DRM bridge states.
*/
void (*mode_set)(struct drm_bridge *bridge,
struct drm_display_mode *mode,
@@ -254,27 +270,29 @@ struct drm_bridge_timings {
/**
* struct drm_bridge - central DRM bridge control structure
- * @dev: DRM device this bridge belongs to
- * @encoder: encoder to which this bridge is connected
- * @next: the next bridge in the encoder chain
- * @of_node: device node pointer to the bridge
- * @list: to keep track of all added bridges
- * @timings: the timing specification for the bridge, if any (may
- * be NULL)
- * @funcs: control functions
- * @driver_private: pointer to the bridge driver's internal context
*/
struct drm_bridge {
+ /** @dev: DRM device this bridge belongs to */
struct drm_device *dev;
+ /** @encoder: encoder to which this bridge is connected */
struct drm_encoder *encoder;
+ /** @next: the next bridge in the encoder chain */
struct drm_bridge *next;
#ifdef CONFIG_OF
+ /** @of_node: device node pointer to the bridge */
struct device_node *of_node;
#endif
+ /** @list: to keep track of all added bridges */
struct list_head list;
+ /**
+ * @timings:
+ *
+ * the timing specification for the bridge, if any (may be NULL)
+ */
const struct drm_bridge_timings *timings;
-
+ /** @funcs: control functions */
const struct drm_bridge_funcs *funcs;
+ /** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
};
@@ -285,15 +303,15 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous);
bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode);
void drm_bridge_disable(struct drm_bridge *bridge);
void drm_bridge_post_disable(struct drm_bridge *bridge);
void drm_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
void drm_bridge_pre_enable(struct drm_bridge *bridge);
void drm_bridge_enable(struct drm_bridge *bridge);
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
new file mode 100644
index 000000000000..989f8e52864d
--- /dev/null
+++ b/include/drm/drm_client.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DRM_CLIENT_H_
+#define _DRM_CLIENT_H_
+
+#include <linux/types.h>
+
+struct drm_client_dev;
+struct drm_device;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_minor;
+struct module;
+
+/**
+ * struct drm_client_funcs - DRM client callbacks
+ */
+struct drm_client_funcs {
+ /**
+ * @owner: The module owner
+ */
+ struct module *owner;
+
+ /**
+ * @unregister:
+ *
+ * Called when &drm_device is unregistered. The client should respond by
+ * releasing it's resources using drm_client_release().
+ *
+ * This callback is optional.
+ */
+ void (*unregister)(struct drm_client_dev *client);
+
+ /**
+ * @restore:
+ *
+ * Called on drm_lastclose(). The first client instance in the list that
+ * returns zero gets the privilege to restore and no more clients are
+ * called. This callback is not called after @unregister has been called.
+ *
+ * This callback is optional.
+ */
+ int (*restore)(struct drm_client_dev *client);
+
+ /**
+ * @hotplug:
+ *
+ * Called on drm_kms_helper_hotplug_event().
+ * This callback is not called after @unregister has been called.
+ *
+ * This callback is optional.
+ */
+ int (*hotplug)(struct drm_client_dev *client);
+};
+
+/**
+ * struct drm_client_dev - DRM client instance
+ */
+struct drm_client_dev {
+ /**
+ * @dev: DRM device
+ */
+ struct drm_device *dev;
+
+ /**
+ * @name: Name of the client.
+ */
+ const char *name;
+
+ /**
+ * @list:
+ *
+ * List of all clients of a DRM device, linked into
+ * &drm_device.clientlist. Protected by &drm_device.clientlist_mutex.
+ */
+ struct list_head list;
+
+ /**
+ * @funcs: DRM client functions (optional)
+ */
+ const struct drm_client_funcs *funcs;
+
+ /**
+ * @file: DRM file
+ */
+ struct drm_file *file;
+};
+
+int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs);
+void drm_client_release(struct drm_client_dev *client);
+
+void drm_client_dev_unregister(struct drm_device *dev);
+void drm_client_dev_hotplug(struct drm_device *dev);
+void drm_client_dev_restore(struct drm_device *dev);
+
+/**
+ * struct drm_client_buffer - DRM client buffer
+ */
+struct drm_client_buffer {
+ /**
+ * @client: DRM client
+ */
+ struct drm_client_dev *client;
+
+ /**
+ * @handle: Buffer handle
+ */
+ u32 handle;
+
+ /**
+ * @pitch: Buffer pitch
+ */
+ u32 pitch;
+
+ /**
+ * @gem: GEM object backing this buffer
+ */
+ struct drm_gem_object *gem;
+
+ /**
+ * @vaddr: Virtual address for the buffer
+ */
+ void *vaddr;
+
+ /**
+ * @fb: DRM framebuffer
+ */
+ struct drm_framebuffer *fb;
+};
+
+struct drm_client_buffer *
+drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
+void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
+
+int drm_client_debugfs_init(struct drm_minor *minor);
+
+#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 675cc3f8cf85..97ea41dc678f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -290,6 +290,10 @@ struct drm_display_info {
#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4)
/* data is transmitted LSB to MSB on the bus */
#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5)
+/* drive sync on pos. edge */
+#define DRM_BUS_FLAG_SYNC_POSEDGE (1<<6)
+/* drive sync on neg. edge */
+#define DRM_BUS_FLAG_SYNC_NEGEDGE (1<<7)
/**
* @bus_flags: Additional information (like pixel signal polarity) for
@@ -374,12 +378,9 @@ struct drm_tv_connector_state {
/**
* struct drm_connector_state - mutable connector state
- * @connector: backpointer to the connector
- * @best_encoder: can be used by helpers and drivers to select the encoder
- * @state: backpointer to global drm_atomic_state
- * @tv: TV connector state
*/
struct drm_connector_state {
+ /** @connector: backpointer to the connector */
struct drm_connector *connector;
/**
@@ -390,6 +391,13 @@ struct drm_connector_state {
*/
struct drm_crtc *crtc;
+ /**
+ * @best_encoder:
+ *
+ * Used by the atomic helpers to select the encoder, through the
+ * &drm_connector_helper_funcs.atomic_best_encoder or
+ * &drm_connector_helper_funcs.best_encoder callbacks.
+ */
struct drm_encoder *best_encoder;
/**
@@ -398,6 +406,7 @@ struct drm_connector_state {
*/
enum drm_link_status link_status;
+ /** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
/**
@@ -407,6 +416,7 @@ struct drm_connector_state {
*/
struct drm_crtc_commit *commit;
+ /** @tv: TV connector state */
struct drm_tv_connector_state tv;
/**
@@ -419,6 +429,14 @@ struct drm_connector_state {
enum hdmi_picture_aspect picture_aspect_ratio;
/**
+ * @content_type: Connector property to control the
+ * HDMI infoframe content type setting.
+ * The %DRM_MODE_CONTENT_TYPE_\* values much
+ * match the values.
+ */
+ unsigned int content_type;
+
+ /**
* @scaling_mode: Connector property to control the
* upscaling, mostly used for built-in panels.
*/
@@ -429,6 +447,19 @@ struct drm_connector_state {
* protection. This is most commonly used for HDCP.
*/
unsigned int content_protection;
+
+ /**
+ * @writeback_job: Writeback job for writeback connectors
+ *
+ * Holds the framebuffer and out-fence for a writeback connector. As
+ * the writeback completion may be asynchronous to the normal commit
+ * cycle, the writeback job lifetime is managed separately from the
+ * normal atomic state by this object.
+ *
+ * See also: drm_writeback_queue_job() and
+ * drm_writeback_signal_completion()
+ */
+ struct drm_writeback_job *writeback_job;
};
/**
@@ -530,8 +561,7 @@ struct drm_connector_funcs {
* received for this output connector->edid must be NULL.
*
* Drivers using the probe helpers should use
- * drm_helper_probe_single_connector_modes() or
- * drm_helper_probe_single_connector_modes_nomerge() to implement this
+ * drm_helper_probe_single_connector_modes() to implement this
* function.
*
* RETURNS:
@@ -608,6 +638,8 @@ struct drm_connector_funcs {
* cleaned up by calling the @atomic_destroy_state hook in this
* structure.
*
+ * This callback is mandatory for atomic drivers.
+ *
* Atomic drivers which don't subclass &struct drm_connector_state should use
* drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
@@ -634,6 +666,8 @@ struct drm_connector_funcs {
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
+ *
+ * This callback is mandatory for atomic drivers.
*/
void (*atomic_destroy_state)(struct drm_connector *connector,
struct drm_connector_state *state);
@@ -738,45 +772,6 @@ struct drm_cmdline_mode {
/**
* struct drm_connector - central DRM connector control structure
- * @dev: parent DRM device
- * @kdev: kernel device for sysfs attributes
- * @attr: sysfs attributes
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
- * @connector_type_id: index into connector type enum
- * @interlace_allowed: can this connector handle interlaced modes?
- * @doublescan_allowed: can this connector handle doublescan?
- * @stereo_allowed: can this connector handle stereo modes?
- * @funcs: connector control functions
- * @edid_blob_ptr: DRM property containing EDID if present
- * @properties: property tracking for this connector
- * @dpms: current dpms state
- * @helper_private: mid-layer private data
- * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
- * @force: a DRM_FORCE_<foo> state for forced mode sets
- * @override_edid: has the EDID been overwritten through debugfs for testing?
- * @encoder_ids: valid encoders for this connector
- * @eld: EDID-like data, if present
- * @latency_present: AV delay info from ELD, if found
- * @video_latency: video latency info from ELD, if found
- * @audio_latency: audio latency info from ELD, if found
- * @null_edid_counter: track sinks that give us all zeros for the EDID
- * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
- * @edid_corrupt: indicates whether the last read EDID was corrupt
- * @debugfs_entry: debugfs directory for this connector
- * @has_tile: is this connector connected to a tiled monitor
- * @tile_group: tile group for the connected monitor
- * @tile_is_single_monitor: whether the tile is one monitor housing
- * @num_h_tile: number of horizontal tiles in the tile group
- * @num_v_tile: number of vertical tiles in the tile group
- * @tile_h_loc: horizontal location of this tile
- * @tile_v_loc: vertical location of this tile
- * @tile_h_size: horizontal size of this tile.
- * @tile_v_size: vertical size of this tile.
- * @scaling_mode_property: Optional atomic property to control the upscaling.
- * @content_protection_property: Optional property to control content protection
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -784,13 +779,27 @@ struct drm_cmdline_mode {
* span multiple monitors).
*/
struct drm_connector {
+ /** @dev: parent DRM device */
struct drm_device *dev;
+ /** @kdev: kernel device for sysfs attributes */
struct device *kdev;
+ /** @attr: sysfs attributes */
struct device_attribute *attr;
+
+ /**
+ * @head:
+ *
+ * List of all connectors on a @dev, linked from
+ * &drm_mode_config.connector_list. Protected by
+ * &drm_mode_config.connector_list_lock, but please only use
+ * &drm_connector_list_iter to walk this list.
+ */
struct list_head head;
+ /** @base: base KMS object */
struct drm_mode_object base;
+ /** @name: human readable name, can be overwritten by the driver */
char *name;
/**
@@ -808,10 +817,30 @@ struct drm_connector {
*/
unsigned index;
+ /**
+ * @connector_type:
+ * one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ */
int connector_type;
+ /** @connector_type_id: index into connector type enum */
int connector_type_id;
+ /**
+ * @interlace_allowed:
+ * Can this connector handle interlaced modes? Only used by
+ * drm_helper_probe_single_connector_modes() for mode filtering.
+ */
bool interlace_allowed;
+ /**
+ * @doublescan_allowed:
+ * Can this connector handle doublescan? Only used by
+ * drm_helper_probe_single_connector_modes() for mode filtering.
+ */
bool doublescan_allowed;
+ /**
+ * @stereo_allowed:
+ * Can this connector handle stereo modes? Only used by
+ * drm_helper_probe_single_connector_modes() for mode filtering.
+ */
bool stereo_allowed;
/**
@@ -860,45 +889,42 @@ struct drm_connector {
* Protected by &drm_mode_config.mutex.
*/
struct drm_display_info display_info;
+
+ /** @funcs: connector control functions */
const struct drm_connector_funcs *funcs;
+ /**
+ * @edid_blob_ptr: DRM property containing EDID if present. Protected by
+ * &drm_mode_config.mutex. This should be updated only by calling
+ * drm_connector_update_edid_property().
+ */
struct drm_property_blob *edid_blob_ptr;
+
+ /** @properties: property tracking for this connector */
struct drm_object_properties properties;
+ /**
+ * @scaling_mode_property: Optional atomic property to control the
+ * upscaling. See drm_connector_attach_content_protection_property().
+ */
struct drm_property *scaling_mode_property;
/**
* @content_protection_property: DRM ENUM property for content
- * protection
+ * protection. See drm_connector_attach_content_protection_property().
*/
struct drm_property *content_protection_property;
/**
* @path_blob_ptr:
*
- * DRM blob property data for the DP MST path property.
+ * DRM blob property data for the DP MST path property. This should only
+ * be updated by calling drm_connector_set_path_property().
*/
struct drm_property_blob *path_blob_ptr;
- /**
- * @tile_blob_ptr:
- *
- * DRM blob property data for the tile property (used mostly by DP MST).
- * This is meant for screens which are driven through separate display
- * pipelines represented by &drm_crtc, which might not be running with
- * genlocked clocks. For tiled panels which are genlocked, like
- * dual-link LVDS or dual-link DSI, the driver should try to not expose
- * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
- */
- struct drm_property_blob *tile_blob_ptr;
-
-/* should we poll this connector for connects and disconnects */
-/* hot plug detectable */
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
-/* poll for connections */
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
-/* can cleanly poll for disconnections without flickering the screen */
-/* DACs should rarely do this without a lot of testing */
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
/**
@@ -915,25 +941,40 @@ struct drm_connector {
* Periodically poll the connector for connection.
*
* DRM_CONNECTOR_POLL_DISCONNECT
- * Periodically poll the connector for disconnection.
+ * Periodically poll the connector for disconnection, without
+ * causing flickering even when the connector is in use. DACs should
+ * rarely do this without a lot of testing.
*
* Set to 0 for connectors that don't support connection status
* discovery.
*/
uint8_t polled;
- /* requested DPMS state */
+ /**
+ * @dpms: Current dpms state. For legacy drivers the
+ * &drm_connector_funcs.dpms callback must update this. For atomic
+ * drivers, this is handled by the core atomic code, and drivers must
+ * only take &drm_crtc_state.active into account.
+ */
int dpms;
+ /** @helper_private: mid-layer private data */
const struct drm_connector_helper_funcs *helper_private;
- /* forced on connector */
+ /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */
struct drm_cmdline_mode cmdline_mode;
+ /** @force: a DRM_FORCE_<foo> state for forced mode sets */
enum drm_connector_force force;
+ /** @override_edid: has the EDID been overwritten through debugfs for testing? */
bool override_edid;
#define DRM_CONNECTOR_MAX_ENCODER 3
+ /**
+ * @encoder_ids: Valid encoders for this connector. Please only use
+ * drm_connector_for_each_possible_encoder() to enumerate these.
+ */
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+
/**
* @encoder: Currently bound encoder driving this connector, if any.
* Only really meaningful for non-atomic drivers. Atomic drivers should
@@ -943,19 +984,37 @@ struct drm_connector {
struct drm_encoder *encoder;
#define MAX_ELD_BYTES 128
- /* EDID bits */
+ /** @eld: EDID-like data, if present */
uint8_t eld[MAX_ELD_BYTES];
+ /** @latency_present: AV delay info from ELD, if found */
bool latency_present[2];
- int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ /**
+ * @video_latency: Video latency info from ELD, if found.
+ * [0]: progressive, [1]: interlaced
+ */
+ int video_latency[2];
+ /**
+ * @audio_latency: audio latency info from ELD, if found
+ * [0]: progressive, [1]: interlaced
+ */
int audio_latency[2];
- int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ /**
+ * @null_edid_counter: track sinks that give us all zeros for the EDID.
+ * Needed to workaround some HW bugs where we get all 0s
+ */
+ int null_edid_counter;
+
+ /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */
unsigned bad_edid_counter;
- /* Flag for raw EDID header corruption - used in Displayport
- * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+ /**
+ * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used
+ * in Displayport compliance testing - Displayport Link CTS Core 1.2
+ * rev1.1 4.2.2.6
*/
bool edid_corrupt;
+ /** @debugfs_entry: debugfs directory for this connector */
struct dentry *debugfs_entry;
/**
@@ -963,7 +1022,7 @@ struct drm_connector {
*
* Current atomic state for this connector.
*
- * This is protected by @drm_mode_config.connection_mutex. Note that
+ * This is protected by &drm_mode_config.connection_mutex. Note that
* nonblocking atomic commits access the current connector state without
* taking locks. Either by going through the &struct drm_atomic_state
* pointers, see for_each_oldnew_connector_in_state(),
@@ -974,19 +1033,44 @@ struct drm_connector {
*/
struct drm_connector_state *state;
- /* DisplayID bits */
+ /* DisplayID bits. FIXME: Extract into a substruct? */
+
+ /**
+ * @tile_blob_ptr:
+ *
+ * DRM blob property data for the tile property (used mostly by DP MST).
+ * This is meant for screens which are driven through separate display
+ * pipelines represented by &drm_crtc, which might not be running with
+ * genlocked clocks. For tiled panels which are genlocked, like
+ * dual-link LVDS or dual-link DSI, the driver should try to not expose
+ * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+ *
+ * This should only be updated by calling
+ * drm_connector_set_tile_property().
+ */
+ struct drm_property_blob *tile_blob_ptr;
+
+ /** @has_tile: is this connector connected to a tiled monitor */
bool has_tile;
+ /** @tile_group: tile group for the connected monitor */
struct drm_tile_group *tile_group;
+ /** @tile_is_single_monitor: whether the tile is one monitor housing */
bool tile_is_single_monitor;
+ /** @num_h_tile: number of horizontal tiles in the tile group */
+ /** @num_v_tile: number of vertical tiles in the tile group */
uint8_t num_h_tile, num_v_tile;
+ /** @tile_h_loc: horizontal location of this tile */
+ /** @tile_v_loc: vertical location of this tile */
uint8_t tile_h_loc, tile_v_loc;
+ /** @tile_h_size: horizontal size of this tile. */
+ /** @tile_v_size: vertical size of this tile. */
uint16_t tile_h_size, tile_v_size;
/**
* @free_node:
*
- * List used only by &drm_connector_iter to be able to clean up a
+ * List used only by &drm_connector_list_iter to be able to clean up a
* connector from any context, in conjunction with
* &drm_mode_config.connector_free_work.
*/
@@ -1001,15 +1085,21 @@ int drm_connector_init(struct drm_device *dev,
int connector_type);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
void drm_connector_cleanup(struct drm_connector *connector);
-static inline unsigned drm_connector_index(struct drm_connector *connector)
+
+static inline unsigned int drm_connector_index(const struct drm_connector *connector)
{
return connector->index;
}
+static inline u32 drm_connector_mask(const struct drm_connector *connector)
+{
+ return 1 << connector->index;
+}
+
/**
* drm_connector_lookup - lookup connector object
* @dev: DRM device
@@ -1089,20 +1179,25 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes,
const char * const modes[]);
int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+int drm_connector_attach_content_type_property(struct drm_connector *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_content_type_property(struct drm_device *dev);
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
- const char *path);
-int drm_mode_connector_set_tile_property(struct drm_connector *connector);
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- const struct edid *edid);
-void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
- uint64_t link_status);
+int drm_connector_set_path_property(struct drm_connector *connector,
+ const char *path);
+int drm_connector_set_tile_property(struct drm_connector *connector);
+int drm_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid);
+void drm_connector_set_link_status_property(struct drm_connector *connector,
+ uint64_t link_status);
int drm_connector_init_panel_orientation_property(
struct drm_connector *connector, int width, int height);
@@ -1151,6 +1246,9 @@ struct drm_connector *
drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
+bool drm_connector_has_possible_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+
/**
* drm_for_each_connector_iter - connector_list iterator macro
* @connector: &struct drm_connector pointer used as cursor
@@ -1163,4 +1261,17 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
#define drm_for_each_connector_iter(connector, iter) \
while ((connector = drm_connector_list_iter_next(iter)))
+/**
+ * drm_connector_for_each_possible_encoder - iterate connector's possible encoders
+ * @connector: &struct drm_connector pointer
+ * @encoder: &struct drm_encoder pointer used as cursor
+ * @__i: int iteration cursor, for macro-internal use
+ */
+#define drm_connector_for_each_possible_encoder(connector, encoder, __i) \
+ for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
+ (connector)->encoder_ids[(__i)] != 0; (__i)++) \
+ for_each_if((encoder) = \
+ drm_encoder_find((connector)->dev, NULL, \
+ (connector)->encoder_ids[(__i)])) \
+
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index a2d81d2907a9..92e7fc7f05a4 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -77,21 +77,6 @@ struct drm_plane_helper_funcs;
/**
* struct drm_crtc_state - mutable CRTC state
- * @crtc: backpointer to the CRTC
- * @enable: whether the CRTC should be enabled, gates all other state
- * @active: whether the CRTC is actively displaying (used for DPMS)
- * @planes_changed: planes on this crtc are updated
- * @mode_changed: @mode or @enable has been changed
- * @active_changed: @active has been toggled.
- * @connectors_changed: connectors to this crtc have been updated
- * @zpos_changed: zpos values of planes on this crtc have been updated
- * @color_mgmt_changed: color management properties have changed (degamma or
- * gamma LUT or CSC matrix)
- * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
- * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
- * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
- * @mode_blob: &drm_property_blob for @mode
- * @state: backpointer to global drm_atomic_state
*
* Note that the distinction between @enable and @active is rather subtile:
* Flipping @active while @enable is set without changing anything else may
@@ -102,31 +87,127 @@ struct drm_plane_helper_funcs;
*
* The three booleans active_changed, connectors_changed and mode_changed are
* intended to indicate whether a full modeset is needed, rather than strictly
- * describing what has changed in a commit.
- * See also: drm_atomic_crtc_needs_modeset()
+ * describing what has changed in a commit. See also:
+ * drm_atomic_crtc_needs_modeset()
+ *
+ * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
+ * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
+ * state like @plane_mask so drivers not converted over to atomic helpers should
+ * not rely on these being accurate!
*/
struct drm_crtc_state {
+ /** @crtc: backpointer to the CRTC */
struct drm_crtc *crtc;
+ /**
+ * @enable: Whether the CRTC should be enabled, gates all other state.
+ * This controls reservations of shared resources. Actual hardware state
+ * is controlled by @active.
+ */
bool enable;
+
+ /**
+ * @active: Whether the CRTC is actively displaying (used for DPMS).
+ * Implies that @enable is set. The driver must not release any shared
+ * resources if @active is set to false but @enable still true, because
+ * userspace expects that a DPMS ON always succeeds.
+ *
+ * Hence drivers must not consult @active in their various
+ * &drm_mode_config_funcs.atomic_check callback to reject an atomic
+ * commit. They can consult it to aid in the computation of derived
+ * hardware state, since even in the DPMS OFF state the display hardware
+ * should be as much powered down as when the CRTC is completely
+ * disabled through setting @enable to false.
+ */
bool active;
- /* computed state bits used by helpers and drivers */
+ /**
+ * @planes_changed: Planes on this crtc are updated. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow.
+ */
bool planes_changed : 1;
+
+ /**
+ * @mode_changed: @mode or @enable has been changed. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow. See also
+ * drm_atomic_crtc_needs_modeset().
+ *
+ * Drivers are supposed to set this for any CRTC state changes that
+ * require a full modeset. They can also reset it to false if e.g. a
+ * @mode change can be done without a full modeset by only changing
+ * scaler settings.
+ */
bool mode_changed : 1;
+
+ /**
+ * @active_changed: @active has been toggled. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow. See also
+ * drm_atomic_crtc_needs_modeset().
+ */
bool active_changed : 1;
+
+ /**
+ * @connectors_changed: Connectors to this crtc have been updated,
+ * either in their state or routing. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow. See also
+ * drm_atomic_crtc_needs_modeset().
+ *
+ * Drivers are supposed to set this as-needed from their own atomic
+ * check code, e.g. from &drm_encoder_helper_funcs.atomic_check
+ */
bool connectors_changed : 1;
+ /**
+ * @zpos_changed: zpos values of planes on this crtc have been updated.
+ * Used by the atomic helpers and drivers to steer the atomic commit
+ * control flow.
+ */
bool zpos_changed : 1;
+ /**
+ * @color_mgmt_changed: Color management properties have changed
+ * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and
+ * drivers to steer the atomic commit control flow.
+ */
bool color_mgmt_changed : 1;
- /* attached planes bitmask:
- * WARNING: transitional helpers do not maintain plane_mask so
- * drivers not converted over to atomic helpers should not rely
- * on plane_mask being accurate!
+ /**
+ * @no_vblank:
+ *
+ * Reflects the ability of a CRTC to send VBLANK events. This state
+ * usually depends on the pipeline configuration, and the main usuage
+ * is CRTCs feeding a writeback connector operating in oneshot mode.
+ * In this case the VBLANK event is only generated when a job is queued
+ * to the writeback connector, and we want the core to fake VBLANK
+ * events when this part of the pipeline hasn't changed but others had
+ * or when the CRTC and connectors are being disabled.
+ *
+ * __drm_atomic_helper_crtc_duplicate_state() will not reset the value
+ * from the current state, the CRTC driver is then responsible for
+ * updating this field when needed.
+ *
+ * Note that the combination of &drm_crtc_state.event == NULL and
+ * &drm_crtc_state.no_blank == true is valid and usually used when the
+ * writeback connector attached to the CRTC has a new job queued. In
+ * this case the driver will send the VBLANK event on its own when the
+ * writeback job is complete.
+ */
+ bool no_vblank : 1;
+
+ /**
+ * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to
+ * this CRTC.
*/
u32 plane_mask;
+ /**
+ * @connector_mask: Bitmask of drm_connector_mask(connector) of
+ * connectors attached to this CRTC.
+ */
u32 connector_mask;
+
+ /**
+ * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders
+ * attached to this CRTC.
+ */
u32 encoder_mask;
/**
@@ -134,10 +215,13 @@ struct drm_crtc_state {
*
* Internal display timings which can be used by the driver to handle
* differences between the mode requested by userspace in @mode and what
- * is actually programmed into the hardware. It is purely driver
- * implementation defined what exactly this adjusted mode means. Usually
- * it is used to store the hardware display timings used between the
- * CRTC and encoder blocks.
+ * is actually programmed into the hardware.
+ *
+ * For drivers using &drm_bridge, this stores hardware display timings
+ * used between the CRTC and the first bridge. For other drivers, the
+ * meaning of the adjusted_mode field is purely driver implementation
+ * defined information, and will usually be used to store the hardware
+ * display timings used between the CRTC and encoder blocks.
*/
struct drm_display_mode adjusted_mode;
@@ -158,7 +242,10 @@ struct drm_crtc_state {
*/
struct drm_display_mode mode;
- /* blob property to expose current mode to atomic userspace */
+ /**
+ * @mode_blob: &drm_property_blob for @mode, for exposing the mode to
+ * atomic userspace.
+ */
struct drm_property_blob *mode_blob;
/**
@@ -262,6 +349,7 @@ struct drm_crtc_state {
*/
struct drm_crtc_commit *commit;
+ /** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
};
@@ -503,6 +591,8 @@ struct drm_crtc_funcs {
* cleaned up by calling the @atomic_destroy_state hook in this
* structure.
*
+ * This callback is mandatory for atomic drivers.
+ *
* Atomic drivers which don't subclass &struct drm_crtc_state should use
* drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
@@ -529,6 +619,8 @@ struct drm_crtc_funcs {
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
+ *
+ * This callback is mandatory for atomic drivers.
*/
void (*atomic_destroy_state)(struct drm_crtc *crtc,
struct drm_crtc_state *state);
@@ -717,35 +809,25 @@ struct drm_crtc_funcs {
/**
* struct drm_crtc - central CRTC control structure
- * @dev: parent DRM device
- * @port: OF node used by drm_of_find_possible_crtcs()
- * @head: list management
- * @name: human readable name, can be overwritten by the driver
- * @mutex: per-CRTC locking
- * @base: base KMS object for ID tracking etc.
- * @primary: primary plane for this CRTC
- * @cursor: cursor plane for this CRTC
- * @cursor_x: current x position of the cursor, used for universal cursor planes
- * @cursor_y: current y position of the cursor, used for universal cursor planes
- * @enabled: is this CRTC enabled?
- * @mode: current mode timings
- * @hwmode: mode timings as programmed to hw regs
- * @x: x position on screen
- * @y: y position on screen
- * @funcs: CRTC control functions
- * @gamma_size: size of gamma ramp
- * @gamma_store: gamma ramp values
- * @helper_private: mid-layer private data
- * @properties: property tracking for this CRTC
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
*/
struct drm_crtc {
+ /** @dev: parent DRM device */
struct drm_device *dev;
+ /** @port: OF node used by drm_of_find_possible_crtcs(). */
struct device_node *port;
+ /**
+ * @head:
+ *
+ * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list.
+ * Invariant over the lifetime of @dev and therefore does not need
+ * locking.
+ */
struct list_head head;
+ /** @name: human readable name, can be overwritten by the driver */
char *name;
/**
@@ -760,10 +842,25 @@ struct drm_crtc {
*/
struct drm_modeset_lock mutex;
+ /** @base: base KMS object for ID tracking etc. */
struct drm_mode_object base;
- /* primary and cursor planes for CRTC */
+ /**
+ * @primary:
+ * Primary plane for this CRTC. Note that this is only
+ * relevant for legacy IOCTL, it specifies the plane implicitly used by
+ * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance
+ * beyond that.
+ */
struct drm_plane *primary;
+
+ /**
+ * @cursor:
+ * Cursor plane for this CRTC. Note that this is only relevant for
+ * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR
+ * and SETCURSOR2 IOCTLs. It does not have any significance
+ * beyond that.
+ */
struct drm_plane *cursor;
/**
@@ -772,30 +869,94 @@ struct drm_crtc {
*/
unsigned index;
- /* position of cursor plane on crtc */
+ /**
+ * @cursor_x: Current x position of the cursor, used for universal
+ * cursor planes because the SETCURSOR IOCTL only can update the
+ * framebuffer without supplying the coordinates. Drivers should not use
+ * this directly, atomic drivers should look at &drm_plane_state.crtc_x
+ * of the cursor plane instead.
+ */
int cursor_x;
+ /**
+ * @cursor_y: Current y position of the cursor, used for universal
+ * cursor planes because the SETCURSOR IOCTL only can update the
+ * framebuffer without supplying the coordinates. Drivers should not use
+ * this directly, atomic drivers should look at &drm_plane_state.crtc_y
+ * of the cursor plane instead.
+ */
int cursor_y;
+ /**
+ * @enabled:
+ *
+ * Is this CRTC enabled? Should only be used by legacy drivers, atomic
+ * drivers should instead consult &drm_crtc_state.enable and
+ * &drm_crtc_state.active. Atomic drivers can update this by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
bool enabled;
- /* Requested mode from modesetting. */
+ /**
+ * @mode:
+ *
+ * Current mode timings. Should only be used by legacy drivers, atomic
+ * drivers should instead consult &drm_crtc_state.mode. Atomic drivers
+ * can update this by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
struct drm_display_mode mode;
- /* Programmed mode in hw, after adjustments for encoders,
- * crtc, panel scaling etc. Needed for timestamping etc.
+ /**
+ * @hwmode:
+ *
+ * Programmed mode in hw, after adjustments for encoders, crtc, panel
+ * scaling etc. Should only be used by legacy drivers, for high
+ * precision vblank timestamps in
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ *
+ * Note that atomic drivers should not use this, but instead use
+ * &drm_crtc_state.adjusted_mode. And for high-precision timestamps
+ * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode,
+ * which is filled out by calling drm_calc_timestamping_constants().
*/
struct drm_display_mode hwmode;
- int x, y;
+ /**
+ * @x:
+ * x position on screen. Should only be used by legacy drivers, atomic
+ * drivers should look at &drm_plane_state.crtc_x of the primary plane
+ * instead. Updated by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
+ int x;
+ /**
+ * @y:
+ * y position on screen. Should only be used by legacy drivers, atomic
+ * drivers should look at &drm_plane_state.crtc_y of the primary plane
+ * instead. Updated by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
+ int y;
+
+ /** @funcs: CRTC control functions */
const struct drm_crtc_funcs *funcs;
- /* Legacy FB CRTC gamma size for reporting to userspace */
+ /**
+ * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
+ * by calling drm_mode_crtc_set_gamma_size().
+ */
uint32_t gamma_size;
+
+ /**
+ * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
+ * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
+ */
uint16_t *gamma_store;
- /* if you are using the helper */
+ /** @helper_private: mid-layer private data */
const struct drm_crtc_helper_funcs *helper_private;
+ /** @properties: property tracking for this CRTC */
struct drm_object_properties properties;
/**
@@ -865,7 +1026,6 @@ struct drm_crtc {
*
* spinlock to protect the fences in the fence_context.
*/
-
spinlock_t fence_lock;
/**
* @fence_seqno:
@@ -935,8 +1095,8 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
* drm_crtc_mask - find the mask of a registered CRTC
* @crtc: CRTC to find mask for
*
- * Given a registered CRTC, return the mask bit of that CRTC for an
- * encoder's possible_crtcs field.
+ * Given a registered CRTC, return the mask bit of that CRTC for the
+ * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields.
*/
static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
{
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
index 7d63b1d4adb9..b225eeb30d05 100644
--- a/include/drm/drm_debugfs_crc.h
+++ b/include/drm/drm_debugfs_crc.h
@@ -43,6 +43,7 @@ struct drm_crtc_crc_entry {
* @lock: protects the fields in this struct
* @source: name of the currently configured source of CRCs
* @opened: whether userspace has opened the data file for reading
+ * @overflow: whether an overflow occured.
* @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
* @head: head of circular queue
* @tail: tail of circular queue
@@ -52,7 +53,7 @@ struct drm_crtc_crc_entry {
struct drm_crtc_crc {
spinlock_t lock;
const char *source;
- bool opened;
+ bool opened, overflow;
struct drm_crtc_crc_entry *entries;
int head, tail;
size_t values_cnt;
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 858ba19a3e29..f9c6e0e3aec7 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -74,6 +74,27 @@ struct drm_device {
struct mutex filelist_mutex;
struct list_head filelist;
+ /**
+ * @filelist_internal:
+ *
+ * List of open DRM files for in-kernel clients. Protected by @filelist_mutex.
+ */
+ struct list_head filelist_internal;
+
+ /**
+ * @clientlist_mutex:
+ *
+ * Protects @clientlist access.
+ */
+ struct mutex clientlist_mutex;
+
+ /**
+ * @clientlist:
+ *
+ * List of in-kernel clients. Protected by @clientlist_mutex.
+ */
+ struct list_head clientlist;
+
/** \name Memory management */
/*@{ */
struct list_head maplist; /**< Linked list of regions */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index c01564991a9f..05cc31b5db16 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1078,6 +1078,25 @@ struct drm_dp_aux_msg {
size_t size;
};
+struct cec_adapter;
+struct edid;
+
+/**
+ * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
+ * @lock: mutex protecting this struct
+ * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
+ * @name: name of the CEC adapter
+ * @parent: parent device of the CEC adapter
+ * @unregister_work: unregister the CEC adapter
+ */
+struct drm_dp_aux_cec {
+ struct mutex lock;
+ struct cec_adapter *adap;
+ const char *name;
+ struct device *parent;
+ struct delayed_work unregister_work;
+};
+
/**
* struct drm_dp_aux - DisplayPort AUX channel
* @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
@@ -1136,6 +1155,10 @@ struct drm_dp_aux {
* @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
*/
unsigned i2c_defer_count;
+ /**
+ * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
+ */
+ struct drm_dp_aux_cec cec;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -1258,4 +1281,37 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
return desc->quirks & BIT(quirk);
}
+#ifdef CONFIG_DRM_DP_CEC
+void drm_dp_cec_irq(struct drm_dp_aux *aux);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+ struct device *parent);
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
+#else
+static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ const char *name,
+ struct device *parent)
+{
+}
+
+static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
+ const struct edid *edid)
+{
+}
+
+static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 7e545f5f94d3..46a8009784df 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -649,6 +649,35 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
return true;
}
+/**
+ * drm_core_check_feature - check driver feature flags
+ * @dev: DRM device to check
+ * @feature: feature flag
+ *
+ * This checks @dev for driver features, see &drm_driver.driver_features and the
+ * various DRIVER_\* flags.
+ *
+ * Returns true if the @feature is supported, false otherwise.
+ */
+static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
+{
+ return dev->driver->driver_features & feature;
+}
+
+/**
+ * drm_drv_uses_atomic_modeset - check if the driver implements
+ * atomic_commit()
+ * @dev: DRM device
+ *
+ * This check is useful if drivers do not have DRIVER_ATOMIC set but
+ * have atomic modesetting internally implemented.
+ */
+static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
+{
+ return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
+ dev->mode_config.funcs->atomic_commit != NULL;
+}
+
int drm_dev_set_unique(struct drm_device *dev, const char *name);
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index fb299696c7c4..4f597c0730b4 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -191,12 +191,24 @@ int drm_encoder_init(struct drm_device *dev,
* Given a registered encoder, return the index of that encoder within a DRM
* device's list of encoders.
*/
-static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder)
{
return encoder->index;
}
/**
+ * drm_encoder_mask - find the mask of a registered ENCODER
+ * @encoder: encoder to find mask for
+ *
+ * Given a registered encoder, return the mask bit of that encoder for an
+ * encoder's possible_clones field.
+ */
+static inline u32 drm_encoder_mask(const struct drm_encoder *encoder)
+{
+ return 1 << drm_encoder_index(encoder);
+}
+
+/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
* @crtc: crtc to test
@@ -241,7 +253,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder);
*/
#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
- for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
+ for_each_if ((encoder_mask) & drm_encoder_mask(encoder))
/**
* drm_for_each_encoder - iterate over all encoders
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index d532f88a8d55..96e26e3b9a0c 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -16,16 +16,10 @@ struct drm_mode_fb_cmd2;
struct drm_plane;
struct drm_plane_state;
-int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs);
int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
unsigned int max_conn_count);
void drm_fb_cma_fbdev_fini(struct drm_device *dev);
-struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs);
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int max_conn_count);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index b069433e7fc1..5db08c8f1d25 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -32,6 +32,7 @@
struct drm_fb_helper;
+#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <linux/kgdb.h>
@@ -154,6 +155,20 @@ struct drm_fb_helper_connector {
* operations.
*/
struct drm_fb_helper {
+ /**
+ * @client:
+ *
+ * DRM client used by the generic fbdev emulation.
+ */
+ struct drm_client_dev client;
+
+ /**
+ * @buffer:
+ *
+ * Framebuffer used by the generic fbdev emulation.
+ */
+ struct drm_client_buffer *buffer;
+
struct drm_framebuffer *fb;
struct drm_device *dev;
int crtc_count;
@@ -234,6 +249,12 @@ struct drm_fb_helper {
int preferred_bpp;
};
+static inline struct drm_fb_helper *
+drm_fb_helper_from_client(struct drm_client_dev *client)
+{
+ return container_of(client, struct drm_fb_helper, client);
+}
+
/**
* define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers
*
@@ -330,6 +351,10 @@ void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
void drm_fb_helper_lastclose(struct drm_device *dev);
void drm_fb_helper_output_poll_changed(struct drm_device *dev);
+
+int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -564,6 +589,19 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
{
}
+static inline int
+drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ return 0;
+}
+
+static inline int
+drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+ return 0;
+}
+
#endif
static inline int
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 027ac16da3d1..26485acc51d7 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -193,6 +193,13 @@ struct drm_file {
unsigned aspect_ratio_allowed:1;
/**
+ * @writeback_connectors:
+ *
+ * True if client understands writeback connectors
+ */
+ unsigned writeback_connectors:1;
+
+ /**
* @is_master:
*
* This client is the creator of @master. Protected by struct
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 3e86408dac9f..f9c15845f465 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -39,6 +39,7 @@ struct drm_mode_fb_cmd2;
* @hsub: Horizontal chroma subsampling factor
* @vsub: Vertical chroma subsampling factor
* @has_alpha: Does the format embeds an alpha component?
+ * @is_yuv: Is it a YUV format?
*/
struct drm_format_info {
u32 format;
@@ -48,6 +49,7 @@ struct drm_format_info {
u8 hsub;
u8 vsub;
bool has_alpha;
+ bool is_yuv;
};
/**
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 101f566ae43d..2c3bbb43c7d1 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -109,6 +109,38 @@ enum drm_mm_insert_mode {
* Allocates the node from the bottom of the found hole.
*/
DRM_MM_INSERT_EVICT,
+
+ /**
+ * @DRM_MM_INSERT_ONCE:
+ *
+ * Only check the first hole for suitablity and report -ENOSPC
+ * immediately otherwise, rather than check every hole until a
+ * suitable one is found. Can only be used in conjunction with another
+ * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
+ */
+ DRM_MM_INSERT_ONCE = BIT(31),
+
+ /**
+ * @DRM_MM_INSERT_HIGHEST:
+ *
+ * Only check the highest hole (the hole with the largest address) and
+ * insert the node at the top of the hole or report -ENOSPC if
+ * unsuitable.
+ *
+ * Does not search all holes.
+ */
+ DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
+
+ /**
+ * @DRM_MM_INSERT_LOWEST:
+ *
+ * Only check the lowest hole (the hole with the smallest address) and
+ * insert the node at the bottom of the hole or report -ENOSPC if
+ * unsuitable.
+ *
+ * Does not search all holes.
+ */
+ DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
};
/**
@@ -173,7 +205,7 @@ struct drm_mm {
struct drm_mm_node head_node;
/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
struct rb_root_cached interval_tree;
- struct rb_root holes_size;
+ struct rb_root_cached holes_size;
struct rb_root holes_addr;
unsigned long scan_active;
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 33b3a96d66d0..a0b202e1d69a 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -329,10 +329,10 @@ struct drm_mode_config_funcs {
/**
* struct drm_mode_config - Mode configuration control structure
- * @min_width: minimum pixel width on this device
- * @min_height: minimum pixel height on this device
- * @max_width: maximum pixel width on this device
- * @max_height: maximum pixel height on this device
+ * @min_width: minimum fb pixel width on this device
+ * @min_height: minimum fb pixel height on this device
+ * @max_width: maximum fb pixel width on this device
+ * @max_height: maximum fb pixel height on this device
* @funcs: core driver provided mode setting functions
* @fb_base: base address of the framebuffer
* @poll_enabled: track polling support for this device
@@ -727,6 +727,11 @@ struct drm_mode_config {
*/
struct drm_property *aspect_ratio_property;
/**
+ * @content_type_property: Optional connector property to control the
+ * HDMI infoframe content type setting.
+ */
+ struct drm_property *content_type_property;
+ /**
* @degamma_lut_property: Optional CRTC property to set the LUT used to
* convert the framebuffer's colors to linear gamma.
*/
@@ -779,6 +784,29 @@ struct drm_mode_config {
*/
struct drm_property *panel_orientation_property;
+ /**
+ * @writeback_fb_id_property: Property for writeback connectors, storing
+ * the ID of the output framebuffer.
+ * See also: drm_writeback_connector_init()
+ */
+ struct drm_property *writeback_fb_id_property;
+
+ /**
+ * @writeback_pixel_formats_property: Property for writeback connectors,
+ * storing an array of the supported pixel formats for the writeback
+ * engine (read-only).
+ * See also: drm_writeback_connector_init()
+ */
+ struct drm_property *writeback_pixel_formats_property;
+ /**
+ * @writeback_out_fence_ptr_property: Property for writeback connectors,
+ * fd pointer representing the outgoing fences for a writeback
+ * connector. Userspace should provide a pointer to a value of type s32,
+ * and then cast that pointer to u64.
+ * See also: drm_writeback_connector_init()
+ */
+ struct drm_property *writeback_out_fence_ptr_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index b159fe07fcf9..baded6514456 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -530,7 +530,7 @@ drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
void drm_mode_sort(struct list_head *mode_list);
-void drm_mode_connector_list_update(struct drm_connector *connector);
+void drm_connector_list_update(struct drm_connector *connector);
/* parsing cmdline modes */
bool
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 35e2a3a79fc5..61142aa0ab23 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -785,7 +785,7 @@ struct drm_connector_helper_funcs {
*
* This function should fill in all modes currently valid for the sink
* into the &drm_connector.probed_modes list. It should also update the
- * EDID property by calling drm_mode_connector_update_edid_property().
+ * EDID property by calling drm_connector_update_edid_property().
*
* The usual way to implement this is to cache the EDID retrieved in the
* probe callback somewhere in the driver-private connector structure.
@@ -974,6 +974,21 @@ struct drm_connector_helper_funcs {
*/
int (*atomic_check)(struct drm_connector *connector,
struct drm_connector_state *state);
+
+ /**
+ * @atomic_commit:
+ *
+ * This hook is to be used by drivers implementing writeback connectors
+ * that need a point when to commit the writeback job to the hardware.
+ * The writeback_job to commit is available in
+ * &drm_connector_state.writeback_job.
+ *
+ * This hook is optional.
+ *
+ * This callback is used by the atomic modeset helpers.
+ */
+ void (*atomic_commit)(struct drm_connector *connector,
+ struct drm_connector_state *state);
};
/**
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index b93c239afb60..ead34ab5ca4e 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -17,6 +17,8 @@ struct drm_bridge;
struct device_node;
#ifdef CONFIG_OF
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port);
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port);
void drm_of_component_match_add(struct device *master,
@@ -34,6 +36,12 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_panel **panel,
struct drm_bridge **bridge);
#else
+static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port)
+{
+ return 0;
+}
+
static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
{
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 14ac240a1f64..582a0ec0aa70 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -89,6 +89,7 @@ struct drm_panel {
struct drm_device *drm;
struct drm_connector *connector;
struct device *dev;
+ struct device_link *link;
const struct drm_panel_funcs *funcs;
@@ -199,7 +200,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np);
#else
static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
#endif
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 674599025d7d..8181e9e7cf1d 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -58,11 +58,4 @@ static inline int drm_get_pci_dev(struct pci_dev *pdev,
}
#endif
-#define DRM_PCIE_SPEED_25 1
-#define DRM_PCIE_SPEED_50 2
-#define DRM_PCIE_SPEED_80 4
-
-int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
-int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
-
#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 26fa50c2a50e..8a152dc16ea5 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -34,31 +34,15 @@ struct drm_modeset_acquire_ctx;
/**
* struct drm_plane_state - mutable plane state
- * @plane: backpointer to the plane
- * @crtc_w: width of visible portion of plane on crtc
- * @crtc_h: height of visible portion of plane on crtc
- * @src_x: left position of visible portion of plane within
- * plane (in 16.16)
- * @src_y: upper position of visible portion of plane within
- * plane (in 16.16)
- * @src_w: width of visible portion of plane (in 16.16)
- * @src_h: height of visible portion of plane (in 16.16)
- * @alpha: opacity of the plane
- * @rotation: rotation of the plane
- * @zpos: priority of the given plane on crtc (optional)
- * Note that multiple active planes on the same crtc can have an identical
- * zpos value. The rule to solving the conflict is to compare the plane
- * object IDs; the plane with a higher ID must be stacked on top of a
- * plane with a lower ID.
- * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- * where N is the number of active planes for given crtc. Note that
- * the driver must set drm_mode_config.normalize_zpos or call
- * drm_atomic_normalize_zpos() to update this before it can be trusted.
- * @src: clipped source coordinates of the plane (in 16.16)
- * @dst: clipped destination coordinates of the plane
- * @state: backpointer to global drm_atomic_state
+ *
+ * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
+ * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
+ * raw coordinates provided by userspace. Drivers should use
+ * drm_atomic_helper_check_plane_state() and only use the derived rectangles in
+ * @src and @dst to program the hardware.
*/
struct drm_plane_state {
+ /** @plane: backpointer to the plane */
struct drm_plane *plane;
/**
@@ -87,7 +71,7 @@ struct drm_plane_state {
* preserved.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb()
+ * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
* and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
*/
struct dma_fence *fence;
@@ -108,20 +92,60 @@ struct drm_plane_state {
*/
int32_t crtc_y;
+ /** @crtc_w: width of visible portion of plane on crtc */
+ /** @crtc_h: height of visible portion of plane on crtc */
uint32_t crtc_w, crtc_h;
- /* Source values are 16.16 fixed point */
- uint32_t src_x, src_y;
+ /**
+ * @src_x: left position of visible portion of plane within plane (in
+ * 16.16 fixed point).
+ */
+ uint32_t src_x;
+ /**
+ * @src_y: upper position of visible portion of plane within plane (in
+ * 16.16 fixed point).
+ */
+ uint32_t src_y;
+ /** @src_w: width of visible portion of plane (in 16.16) */
+ /** @src_h: height of visible portion of plane (in 16.16) */
uint32_t src_h, src_w;
- /* Plane opacity */
+ /**
+ * @alpha:
+ * Opacity of the plane with 0 as completely transparent and 0xffff as
+ * completely opaque. See drm_plane_create_alpha_property() for more
+ * details.
+ */
u16 alpha;
- /* Plane rotation */
+ /**
+ * @rotation:
+ * Rotation of the plane. See drm_plane_create_rotation_property() for
+ * more details.
+ */
unsigned int rotation;
- /* Plane zpos */
+ /**
+ * @zpos:
+ * Priority of the given plane on crtc (optional).
+ *
+ * Note that multiple active planes on the same crtc can have an
+ * identical zpos value. The rule to solving the conflict is to compare
+ * the plane object IDs; the plane with a higher ID must be stacked on
+ * top of a plane with a lower ID.
+ *
+ * See drm_plane_create_zpos_property() and
+ * drm_plane_create_zpos_immutable_property() for more details.
+ */
unsigned int zpos;
+
+ /**
+ * @normalized_zpos:
+ * Normalized value of zpos: unique, range from 0 to N-1 where N is the
+ * number of active planes for given crtc. Note that the driver must set
+ * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to
+ * update this before it can be trusted.
+ */
unsigned int normalized_zpos;
/**
@@ -138,7 +162,8 @@ struct drm_plane_state {
*/
enum drm_color_range color_range;
- /* Clipped coordinates */
+ /** @src: clipped source coordinates of the plane (in 16.16) */
+ /** @dst: clipped destination coordinates of the plane */
struct drm_rect src, dst;
/**
@@ -157,6 +182,7 @@ struct drm_plane_state {
*/
struct drm_crtc_commit *commit;
+ /** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
};
@@ -288,6 +314,8 @@ struct drm_plane_funcs {
* cleaned up by calling the @atomic_destroy_state hook in this
* structure.
*
+ * This callback is mandatory for atomic drivers.
+ *
* Atomic drivers which don't subclass &struct drm_plane_state should use
* drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
@@ -314,6 +342,8 @@ struct drm_plane_funcs {
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
+ *
+ * This callback is mandatory for atomic drivers.
*/
void (*atomic_destroy_state)(struct drm_plane *plane,
struct drm_plane_state *state);
@@ -431,7 +461,10 @@ struct drm_plane_funcs {
* This optional hook is used for the DRM to determine if the given
* format/modifier combination is valid for the plane. This allows the
* DRM to generate the correct format bitmask (which formats apply to
- * which modifier).
+ * which modifier), and to valdiate modifiers at atomic_check time.
+ *
+ * If not present, then any modifier in the plane's modifier
+ * list is allowed with any of the plane's formats.
*
* Returns:
*
@@ -492,30 +525,27 @@ enum drm_plane_type {
/**
* struct drm_plane - central DRM plane control structure
- * @dev: DRM device this plane belongs to
- * @head: for list management
- * @name: human readable name, can be overwritten by the driver
- * @base: base mode object
- * @possible_crtcs: pipes this plane can be bound to
- * @format_types: array of formats supported by this plane
- * @format_count: number of formats supported
- * @format_default: driver hasn't supplied supported formats for the plane
- * @modifiers: array of modifiers supported by this plane
- * @modifier_count: number of modifiers supported
- * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
- * drm_mode_set_config_internal() to implement correct refcounting.
- * @funcs: helper functions
- * @properties: property tracking for this plane
- * @type: type of plane (overlay, primary, cursor)
- * @alpha_property: alpha property for this plane
- * @zpos_property: zpos property for this plane
- * @rotation_property: rotation property for this plane
- * @helper_private: mid-layer private data
+ *
+ * Planes represent the scanout hardware of a display block. They receive their
+ * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control
+ * the color conversion, see `Plane Composition Properties`_ for more details,
+ * and are also involved in the color conversion of input pixels, see `Color
+ * Management Properties`_ for details on that.
*/
struct drm_plane {
+ /** @dev: DRM device this plane belongs to */
struct drm_device *dev;
+
+ /**
+ * @head:
+ *
+ * List of all planes on @dev, linked from &drm_mode_config.plane_list.
+ * Invariant over the lifetime of @dev and therefore does not need
+ * locking.
+ */
struct list_head head;
+ /** @name: human readable name, can be overwritten by the driver */
char *name;
/**
@@ -529,35 +559,62 @@ struct drm_plane {
*/
struct drm_modeset_lock mutex;
+ /** @base: base mode object */
struct drm_mode_object base;
+ /**
+ * @possible_crtcs: pipes this plane can be bound to constructed from
+ * drm_crtc_mask()
+ */
uint32_t possible_crtcs;
+ /** @format_types: array of formats supported by this plane */
uint32_t *format_types;
+ /** @format_count: Size of the array pointed at by @format_types. */
unsigned int format_count;
+ /**
+ * @format_default: driver hasn't supplied supported formats for the
+ * plane. Used by the drm_plane_init compatibility wrapper only.
+ */
bool format_default;
+ /** @modifiers: array of modifiers supported by this plane */
uint64_t *modifiers;
+ /** @modifier_count: Size of the array pointed at by @modifier_count. */
unsigned int modifier_count;
/**
- * @crtc: Currently bound CRTC, only really meaningful for non-atomic
- * drivers. Atomic drivers should instead check &drm_plane_state.crtc.
+ * @crtc:
+ *
+ * Currently bound CRTC, only meaningful for non-atomic drivers. For
+ * atomic drivers this is forced to be NULL, atomic drivers should
+ * instead check &drm_plane_state.crtc.
*/
struct drm_crtc *crtc;
/**
- * @fb: Currently bound framebuffer, only really meaningful for
- * non-atomic drivers. Atomic drivers should instead check
- * &drm_plane_state.fb.
+ * @fb:
+ *
+ * Currently bound framebuffer, only meaningful for non-atomic drivers.
+ * For atomic drivers this is forced to be NULL, atomic drivers should
+ * instead check &drm_plane_state.fb.
*/
struct drm_framebuffer *fb;
+ /**
+ * @old_fb:
+ *
+ * Temporary tracking of the old fb while a modeset is ongoing. Only
+ * used by non-atomic drivers, forced to be NULL for atomic drivers.
+ */
struct drm_framebuffer *old_fb;
+ /** @funcs: plane control functions */
const struct drm_plane_funcs *funcs;
+ /** @properties: property tracking for this plane */
struct drm_object_properties properties;
+ /** @type: Type of plane, see &enum drm_plane_type for details. */
enum drm_plane_type type;
/**
@@ -566,6 +623,7 @@ struct drm_plane {
*/
unsigned index;
+ /** @helper_private: mid-layer private data */
const struct drm_plane_helper_funcs *helper_private;
/**
@@ -583,8 +641,23 @@ struct drm_plane {
*/
struct drm_plane_state *state;
+ /**
+ * @alpha_property:
+ * Optional alpha property for this plane. See
+ * drm_plane_create_alpha_property().
+ */
struct drm_property *alpha_property;
+ /**
+ * @zpos_property:
+ * Optional zpos property for this plane. See
+ * drm_plane_create_zpos_property().
+ */
struct drm_property *zpos_property;
+ /**
+ * @rotation_property:
+ * Optional rotation property for this plane. See
+ * drm_plane_create_rotation_property().
+ */
struct drm_property *rotation_property;
/**
@@ -632,10 +705,20 @@ void drm_plane_cleanup(struct drm_plane *plane);
* Given a registered plane, return the index of that plane within a DRM
* device's list of planes.
*/
-static inline unsigned int drm_plane_index(struct drm_plane *plane)
+static inline unsigned int drm_plane_index(const struct drm_plane *plane)
{
return plane->index;
}
+
+/**
+ * drm_plane_mask - find the mask of a registered plane
+ * @plane: plane to find mask for
+ */
+static inline u32 drm_plane_mask(const struct drm_plane *plane)
+{
+ return 1 << drm_plane_index(plane);
+}
+
struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
void drm_plane_force_disable(struct drm_plane *plane);
@@ -671,7 +754,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
*/
#define drm_for_each_plane_mask(plane, dev, plane_mask) \
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
- for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
+ for_each_if ((plane_mask) & drm_plane_mask(plane))
/**
* drm_for_each_legacy_plane - iterate over all planes for legacy userspace
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 28d7ce620729..26cee2934781 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -67,8 +67,10 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-int drm_plane_helper_disable(struct drm_plane *plane);
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_plane_helper_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
/* For use by drm_crtc_helper.c */
int drm_plane_helper_commit(struct drm_plane *plane,
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 4d5f5d6cf6a6..d716d653b096 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -82,7 +82,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
@@ -93,10 +93,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num);
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr);
void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
void *addr);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index e1a46e9991cc..f3e6eed3e79c 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -69,16 +69,21 @@
struct drm_printer {
/* private: */
void (*printfn)(struct drm_printer *p, struct va_format *vaf);
+ void (*puts)(struct drm_printer *p, const char *str);
void *arg;
const char *prefix;
};
+void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf);
+void __drm_puts_coredump(struct drm_printer *p, const char *str);
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
+void __drm_puts_seq_file(struct drm_printer *p, const char *str);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
+void drm_puts(struct drm_printer *p, const char *str);
__printf(2, 0)
/**
@@ -105,6 +110,71 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__)
/**
+ * struct drm_print_iterator - local struct used with drm_printer_coredump
+ * @data: Pointer to the devcoredump output buffer
+ * @start: The offset within the buffer to start writing
+ * @remain: The number of bytes to write for this iteration
+ */
+struct drm_print_iterator {
+ void *data;
+ ssize_t start;
+ ssize_t remain;
+ /* private: */
+ ssize_t offset;
+};
+
+/**
+ * drm_coredump_printer - construct a &drm_printer that can output to a buffer
+ * from the read function for devcoredump
+ * @iter: A pointer to a struct drm_print_iterator for the read instance
+ *
+ * This wrapper extends drm_printf() to work with a dev_coredumpm() callback
+ * function. The passed in drm_print_iterator struct contains the buffer
+ * pointer, size and offset as passed in from devcoredump.
+ *
+ * For example::
+ *
+ * void coredump_read(char *buffer, loff_t offset, size_t count,
+ * void *data, size_t datalen)
+ * {
+ * struct drm_print_iterator iter;
+ * struct drm_printer p;
+ *
+ * iter.data = buffer;
+ * iter.start = offset;
+ * iter.remain = count;
+ *
+ * p = drm_coredump_printer(&iter);
+ *
+ * drm_printf(p, "foo=%d\n", foo);
+ * }
+ *
+ * void makecoredump(...)
+ * {
+ * ...
+ * dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL,
+ * coredump_read, ...)
+ * }
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer
+drm_coredump_printer(struct drm_print_iterator *iter)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_coredump,
+ .puts = __drm_puts_coredump,
+ .arg = iter,
+ };
+
+ /* Set the internal offset of the iterator to zero */
+ iter->offset = 0;
+
+ return p;
+}
+
+/**
* drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
* @f: the &struct seq_file to output to
*
@@ -115,6 +185,7 @@ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f)
{
struct drm_printer p = {
.printfn = __drm_printfn_seq_file,
+ .puts = __drm_puts_seq_file,
.arg = f,
};
return p;
@@ -195,6 +266,7 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
#define DRM_UT_VBL 0x20
#define DRM_UT_STATE 0x40
#define DRM_UT_LEASE 0x80
+#define DRM_UT_DP 0x100
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
@@ -307,6 +379,11 @@ void drm_err(const char *format, ...);
#define DRM_DEBUG_LEASE(fmt, ...) \
drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
+#define DRM_DEBUG_DP(dev, fmt, ...) \
+ drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
+
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 1d5c0b2a8956..c030f6ccab99 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -147,10 +147,10 @@ struct drm_property {
* properties are not exposed to legacy userspace.
*
* DRM_MODE_PROP_IMMUTABLE
- * Set for properties where userspace cannot be changed by
+ * Set for properties whose values cannot be changed by
* userspace. The kernel is allowed to update the value of these
* properties. This is generally used to expose probe state to
- * usersapce, e.g. the EDID, or the connector path property on DP
+ * userspace, e.g. the EDID, or the connector path property on DP
* MST sinks.
*/
uint32_t flags;
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 8758df94e9a0..c7987daeaed0 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -41,6 +41,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
+ bool readonly:1;
};
struct drm_vma_offset_manager {
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
new file mode 100644
index 000000000000..23df9d463003
--- /dev/null
+++ b/include/drm/drm_writeback.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#ifndef __DRM_WRITEBACK_H__
+#define __DRM_WRITEBACK_H__
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <linux/workqueue.h>
+
+struct drm_writeback_connector {
+ struct drm_connector base;
+
+ /**
+ * @encoder: Internal encoder used by the connector to fulfill
+ * the DRM framework requirements. The users of the
+ * @drm_writeback_connector control the behaviour of the @encoder
+ * by passing the @enc_funcs parameter to drm_writeback_connector_init()
+ * function.
+ */
+ struct drm_encoder encoder;
+
+ /**
+ * @pixel_formats_blob_ptr:
+ *
+ * DRM blob property data for the pixel formats list on writeback
+ * connectors
+ * See also drm_writeback_connector_init()
+ */
+ struct drm_property_blob *pixel_formats_blob_ptr;
+
+ /** @job_lock: Protects job_queue */
+ spinlock_t job_lock;
+
+ /**
+ * @job_queue:
+ *
+ * Holds a list of a connector's writeback jobs; the last item is the
+ * most recent. The first item may be either waiting for the hardware
+ * to begin writing, or currently being written.
+ *
+ * See also: drm_writeback_queue_job() and
+ * drm_writeback_signal_completion()
+ */
+ struct list_head job_queue;
+
+ /**
+ * @fence_context:
+ *
+ * timeline context used for fence operations.
+ */
+ unsigned int fence_context;
+ /**
+ * @fence_lock:
+ *
+ * spinlock to protect the fences in the fence_context.
+ */
+ spinlock_t fence_lock;
+ /**
+ * @fence_seqno:
+ *
+ * Seqno variable used as monotonic counter for the fences
+ * created on the connector's timeline.
+ */
+ unsigned long fence_seqno;
+ /**
+ * @timeline_name:
+ *
+ * The name of the connector's fence timeline.
+ */
+ char timeline_name[32];
+};
+
+struct drm_writeback_job {
+ /**
+ * @cleanup_work:
+ *
+ * Used to allow drm_writeback_signal_completion to defer dropping the
+ * framebuffer reference to a workqueue
+ */
+ struct work_struct cleanup_work;
+
+ /**
+ * @list_entry:
+ *
+ * List item for the writeback connector's @job_queue
+ */
+ struct list_head list_entry;
+
+ /**
+ * @fb:
+ *
+ * Framebuffer to be written to by the writeback connector. Do not set
+ * directly, use drm_atomic_set_writeback_fb_for_connector()
+ */
+ struct drm_framebuffer *fb;
+
+ /**
+ * @out_fence:
+ *
+ * Fence which will signal once the writeback has completed
+ */
+ struct dma_fence *out_fence;
+};
+
+static inline struct drm_writeback_connector *
+drm_connector_to_writeback(struct drm_connector *connector)
+{
+ return container_of(connector, struct drm_writeback_connector, base);
+}
+
+int drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ const struct drm_encoder_helper_funcs *enc_helper_funcs,
+ const u32 *formats, int n_formats);
+
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job);
+
+void drm_writeback_cleanup_job(struct drm_writeback_job *job);
+
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+ int status);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector);
+#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dec655894d08..21c648b0b2a1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,6 +27,8 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
+#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+
struct drm_gpu_scheduler;
struct drm_sched_rq;
@@ -43,18 +45,37 @@ enum drm_sched_priority {
};
/**
- * drm_sched_entity - A wrapper around a job queue (typically attached
- * to the DRM file_priv).
+ * struct drm_sched_entity - A wrapper around a job queue (typically
+ * attached to the DRM file_priv).
+ *
+ * @list: used to append this struct to the list of entities in the
+ * runqueue.
+ * @rq: runqueue to which this entity belongs.
+ * @rq_lock: lock to modify the runqueue to which this entity belongs.
+ * @job_queue: the list of jobs of this entity.
+ * @fence_seq: a linearly increasing seqno incremented with each
+ * new &drm_sched_fence which is part of the entity.
+ * @fence_context: a unique context for all the fences which belong
+ * to this entity.
+ * The &drm_sched_fence.scheduled uses the
+ * fence_context but &drm_sched_fence.finished uses
+ * fence_context + 1.
+ * @dependency: the dependency fence of the job which is on the top
+ * of the job queue.
+ * @cb: callback for the dependency fence above.
+ * @guilty: points to ctx's guilty.
+ * @fini_status: contains the exit status in case the process was signalled.
+ * @last_scheduled: points to the finished fence of the last scheduled job.
+ * @last_user: last group leader pushing a job into the entity.
*
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
* scheduling policy.
-*/
+ */
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
spinlock_t rq_lock;
- struct drm_gpu_scheduler *sched;
struct spsc_queue job_queue;
@@ -63,47 +84,98 @@ struct drm_sched_entity {
struct dma_fence *dependency;
struct dma_fence_cb cb;
- atomic_t *guilty; /* points to ctx's guilty */
- int fini_status;
- struct dma_fence *last_scheduled;
+ atomic_t *guilty;
+ struct dma_fence *last_scheduled;
+ struct task_struct *last_user;
};
/**
+ * struct drm_sched_rq - queue of entities to be scheduled.
+ *
+ * @lock: to modify the entities list.
+ * @sched: the scheduler to which this rq belongs to.
+ * @entities: list of the entities to be scheduled.
+ * @current_entity: the entity which is to be scheduled.
+ *
* Run queue is a set of entities scheduling command submissions for
* one specific ring. It implements the scheduling policy that selects
* the next entity to emit commands from.
-*/
+ */
struct drm_sched_rq {
spinlock_t lock;
+ struct drm_gpu_scheduler *sched;
struct list_head entities;
struct drm_sched_entity *current_entity;
};
+/**
+ * struct drm_sched_fence - fences corresponding to the scheduling of a job.
+ */
struct drm_sched_fence {
+ /**
+ * @scheduled: this fence is what will be signaled by the scheduler
+ * when the job is scheduled.
+ */
struct dma_fence scheduled;
- /* This fence is what will be signaled by the scheduler when
- * the job is completed.
- *
- * When setting up an out fence for the job, you should use
- * this, since it's available immediately upon
- * drm_sched_job_init(), and the fence returned by the driver
- * from run_job() won't be created until the dependencies have
- * resolved.
- */
+ /**
+ * @finished: this fence is what will be signaled by the scheduler
+ * when the job is completed.
+ *
+ * When setting up an out fence for the job, you should use
+ * this, since it's available immediately upon
+ * drm_sched_job_init(), and the fence returned by the driver
+ * from run_job() won't be created until the dependencies have
+ * resolved.
+ */
struct dma_fence finished;
+ /**
+ * @cb: the callback for the parent fence below.
+ */
struct dma_fence_cb cb;
+ /**
+ * @parent: the fence returned by &drm_sched_backend_ops.run_job
+ * when scheduling the job on hardware. We signal the
+ * &drm_sched_fence.finished fence once parent is signalled.
+ */
struct dma_fence *parent;
+ /**
+ * @sched: the scheduler instance to which the job having this struct
+ * belongs to.
+ */
struct drm_gpu_scheduler *sched;
+ /**
+ * @lock: the lock used by the scheduled and the finished fences.
+ */
spinlock_t lock;
+ /**
+ * @owner: job owner for debugging
+ */
void *owner;
};
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
/**
- * drm_sched_job - A job to be run by an entity.
+ * struct drm_sched_job - A job to be run by an entity.
+ *
+ * @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @sched: the scheduler instance on which this job is scheduled.
+ * @s_fence: contains the fences for the scheduling of job.
+ * @finish_cb: the callback for the finished fence.
+ * @finish_work: schedules the function @drm_sched_job_finish once the job has
+ * finished to remove the job from the
+ * @drm_gpu_scheduler.ring_mirror_list.
+ * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout
+ * interval is over.
+ * @id: a unique id assigned to each job scheduled on the scheduler.
+ * @karma: increment on every hang caused by this job. If this exceeds the hang
+ * limit of the scheduler then the job is marked guilty and will not
+ * be scheduled further.
+ * @s_priority: the priority of the job.
+ * @entity: the entity to which this job belongs.
*
* A job is created by the driver using drm_sched_job_init(), and
* should call drm_sched_entity_push_job() once it wants the scheduler
@@ -130,38 +202,64 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
}
/**
+ * struct drm_sched_backend_ops
+ *
* Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side
-*/
+ * these functions should be implemented in driver side.
+ */
struct drm_sched_backend_ops {
- /* Called when the scheduler is considering scheduling this
- * job next, to get another struct dma_fence for this job to
+ /**
+ * @dependency: Called when the scheduler is considering scheduling
+ * this job next, to get another struct dma_fence for this job to
* block on. Once it returns NULL, run_job() may be called.
*/
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity);
- /* Called to execute the job once all of the dependencies have
- * been resolved. This may be called multiple times, if
+ /**
+ * @run_job: Called to execute the job once all of the dependencies
+ * have been resolved. This may be called multiple times, if
* timedout_job() has happened and drm_sched_job_recovery()
* decides to try it again.
*/
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
- /* Called when a job has taken too long to execute, to trigger
- * GPU recovery.
+ /**
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
*/
void (*timedout_job)(struct drm_sched_job *sched_job);
- /* Called once the job's finished fence has been signaled and
- * it's time to clean it up.
+ /**
+ * @free_job: Called once the job's finished fence has been signaled
+ * and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
};
/**
- * One scheduler is implemented for each hardware ring
-*/
+ * struct drm_gpu_scheduler
+ *
+ * @ops: backend operations provided by the driver.
+ * @hw_submission_limit: the max size of the hardware queue.
+ * @timeout: the time after which a job is removed from the scheduler.
+ * @name: name of the ring for which this scheduler is being used.
+ * @sched_rq: priority wise array of run queues.
+ * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
+ * is ready to be scheduled.
+ * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
+ * waits on this wait queue until all the scheduled jobs are
+ * finished.
+ * @hw_rq_count: the number of jobs currently in the hardware queue.
+ * @job_id_count: used to assign unique id to the each job.
+ * @thread: the kthread on which the scheduler which run.
+ * @ring_mirror_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the ring_mirror_list.
+ * @hang_limit: once the hangs by a job crosses this limit then it is marked
+ * guilty and it will be considered for scheduling further.
+ *
+ * One scheduler is implemented for each hardware ring.
+ */
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops *ops;
uint32_t hw_submission_limit;
@@ -184,16 +282,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity,
- struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+ struct drm_sched_rq **rq_list,
+ unsigned int num_rq_list,
atomic_t *guilty);
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
+void drm_sched_entity_fini(struct drm_sched_entity *entity);
+void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@@ -204,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 346b1f5cb180..fca22d463e1b 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -24,101 +24,26 @@
#ifndef _I915_COMPONENT_H_
#define _I915_COMPONENT_H_
+#include "drm_audio_component.h"
+
/* MAX_PORT is the number of port
* It must be sync with I915_MAX_PORTS defined i915_drv.h
*/
#define MAX_PORTS 6
/**
- * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
- */
-struct i915_audio_component_ops {
- /**
- * @owner: i915 module
- */
- struct module *owner;
- /**
- * @get_power: get the POWER_DOMAIN_AUDIO power well
- *
- * Request the power well to be turned on.
- */
- void (*get_power)(struct device *);
- /**
- * @put_power: put the POWER_DOMAIN_AUDIO power well
- *
- * Allow the power well to be turned off.
- */
- void (*put_power)(struct device *);
- /**
- * @codec_wake_override: Enable/disable codec wake signal
- */
- void (*codec_wake_override)(struct device *, bool enable);
- /**
- * @get_cdclk_freq: Get the Core Display Clock in kHz
- */
- int (*get_cdclk_freq)(struct device *);
- /**
- * @sync_audio_rate: set n/cts based on the sample rate
- *
- * Called from audio driver. After audio driver sets the
- * sample rate, it will call this function to set n/cts
- */
- int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
- /**
- * @get_eld: fill the audio state and ELD bytes for the given port
- *
- * Called from audio driver to get the HDMI/DP audio state of the given
- * digital port, and also fetch ELD bytes to the given pointer.
- *
- * It returns the byte size of the original ELD (not the actually
- * copied size), zero for an invalid ELD, or a negative error code.
- *
- * Note that the returned size may be over @max_bytes. Then it
- * implies that only a part of ELD has been copied to the buffer.
- */
- int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
- unsigned char *buf, int max_bytes);
-};
-
-/**
- * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver
- */
-struct i915_audio_component_audio_ops {
- /**
- * @audio_ptr: Pointer to be used in call to pin_eld_notify
- */
- void *audio_ptr;
- /**
- * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
- *
- * Called when the i915 driver has set up audio pipeline or has just
- * begun to tear it down. This allows the HDA driver to update its
- * status accordingly (even when the HDA controller is in power save
- * mode).
- */
- void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
-};
-
-/**
* struct i915_audio_component - Used for direct communication between i915 and hda drivers
*/
struct i915_audio_component {
/**
- * @dev: i915 device, used as parameter for ops
+ * @base: the drm_audio_component base class
*/
- struct device *dev;
+ struct drm_audio_component base;
+
/**
* @aud_sample_rate: the array of audio sample rate per port
*/
int aud_sample_rate[MAX_PORTS];
- /**
- * @ops: Ops implemented by i915 driver, called by hda driver
- */
- const struct i915_audio_component_ops *ops;
- /**
- * @audio_ops: Ops implemented by hda driver, called by i915 driver
- */
- const struct i915_audio_component_audio_ops *audio_ops;
};
#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index c9e5a6621b95..c44703f471b3 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -95,7 +95,9 @@ extern struct resource intel_graphics_stolen_res;
#define I845_TSEG_SIZE_512K (2 << 1)
#define I845_TSEG_SIZE_1M (3 << 1)
-#define INTEL_BSM 0x5c
+#define INTEL_BSM 0x5c
+#define INTEL_GEN11_BSM_DW0 0xc0
+#define INTEL_GEN11_BSM_DW1 0xc4
#define INTEL_BSM_MASK (-(1u << 20))
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index bab70ff6e78b..fbf5cfc9b352 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -349,7 +349,6 @@
#define INTEL_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
- INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
@@ -365,11 +364,17 @@
#define INTEL_KBL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
+/* AML/KBL Y GT2 */
+#define INTEL_AML_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
+ INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
- INTEL_KBL_GT4_IDS(info)
+ INTEL_KBL_GT4_IDS(info), \
+ INTEL_AML_GT2_IDS(info)
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
@@ -388,32 +393,40 @@
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
-/* CFL U GT1 */
-#define INTEL_CFL_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA1, info), \
- INTEL_VGA_DEVICE(0x3EA4, info)
-
/* CFL U GT2 */
#define INTEL_CFL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA0, info), \
- INTEL_VGA_DEVICE(0x3EA3, info), \
INTEL_VGA_DEVICE(0x3EA9, info)
/* CFL U GT3 */
#define INTEL_CFL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
+/* WHL/CFL U GT1 */
+#define INTEL_WHL_U_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA1, info)
+
+/* WHL/CFL U GT2 */
+#define INTEL_WHL_U_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA0, info)
+
+/* WHL/CFL U GT3 */
+#define INTEL_WHL_U_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA2, info), \
+ INTEL_VGA_DEVICE(0x3EA3, info), \
+ INTEL_VGA_DEVICE(0x3EA4, info)
+
#define INTEL_CFL_IDS(info) \
INTEL_CFL_S_GT1_IDS(info), \
INTEL_CFL_S_GT2_IDS(info), \
INTEL_CFL_H_GT2_IDS(info), \
- INTEL_CFL_U_GT1_IDS(info), \
INTEL_CFL_U_GT2_IDS(info), \
- INTEL_CFL_U_GT3_IDS(info)
+ INTEL_CFL_U_GT3_IDS(info), \
+ INTEL_WHL_U_GT1_IDS(info), \
+ INTEL_WHL_U_GT2_IDS(info), \
+ INTEL_WHL_U_GT3_IDS(info)
/* CNL */
#define INTEL_CNL_IDS(info) \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 56e4a916b5e8..fe9827d0ca8a 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -16,16 +16,31 @@
/**
* struct tinydrm_device - tinydrm device
- * @drm: DRM device
- * @pipe: Display pipe structure
- * @dirty_lock: Serializes framebuffer flushing
- * @fb_funcs: Framebuffer functions used when creating framebuffers
*/
struct tinydrm_device {
+ /**
+ * @drm: DRM device
+ */
struct drm_device *drm;
+
+ /**
+ * @pipe: Display pipe structure
+ */
struct drm_simple_display_pipe pipe;
+
+ /**
+ * @dirty_lock: Serializes framebuffer flushing
+ */
struct mutex dirty_lock;
+
+ /**
+ * @fb_funcs: Framebuffer functions used when creating framebuffers
+ */
const struct drm_framebuffer_funcs *fb_funcs;
+
+ /**
+ * @fb_dirty: Framebuffer dirty callback
+ */
int (*fb_dirty)(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv, unsigned flags,
unsigned color, struct drm_clip_rect *clips,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c67977aa1a0e..a01ba2032f0e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -284,17 +284,29 @@ struct ttm_operation_ctx {
#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
*
* Returns a refcounted pointer to a buffer object.
+ *
+ * This function is deprecated. Use @ttm_bo_get instead.
*/
static inline struct ttm_buffer_object *
ttm_bo_reference(struct ttm_buffer_object *bo)
{
- kref_get(&bo->kref);
+ ttm_bo_get(bo);
return bo;
}
@@ -346,11 +358,22 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx);
/**
+ * ttm_bo_put
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference a buffer object.
+ */
+void ttm_bo_put(struct ttm_buffer_object *bo);
+
+/**
* ttm_bo_unref
*
* @bo: The buffer object.
*
* Unreference and clear a pointer to a buffer object.
+ *
+ * This function is deprecated. Use @ttm_bo_put instead.
*/
void ttm_bo_unref(struct ttm_buffer_object **bo);
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
new file mode 100644
index 000000000000..7c492b49e38c
--- /dev/null
+++ b/include/drm/ttm/ttm_set_memory.h
@@ -0,0 +1,150 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2018 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Huang Rui <ray.huang@amd.com>
+ */
+
+#ifndef TTM_SET_MEMORY
+#define TTM_SET_MEMORY
+
+#include <linux/mm.h>
+
+#ifdef CONFIG_X86
+
+#include <asm/set_memory.h>
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ return set_pages_array_wb(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ return set_pages_array_wc(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ return set_pages_array_uc(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+ return set_pages_wb(page, numpages);
+}
+
+static inline int ttm_set_pages_wc(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ return set_memory_wc(addr, numpages);
+}
+
+static inline int ttm_set_pages_uc(struct page *page, int numpages)
+{
+ return set_pages_uc(page, numpages);
+}
+
+#else /* for CONFIG_X86 */
+
+#if IS_ENABLED(CONFIG_AGP)
+
+#include <asm/agp.h>
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+ return 0;
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+ return 0;
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+ return 0;
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+ int i;
+
+ for (i = 0; i < numpages; i++)
+ unmap_page_from_agp(page++);
+ return 0;
+}
+
+#else /* for CONFIG_AGP */
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+ return 0;
+}
+
+#endif /* for CONFIG_AGP */
+
+static inline int ttm_set_pages_wc(struct page *page, int numpages)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_uc(struct page *page, int numpages)
+{
+ return 0;
+}
+
+#endif /* for CONFIG_X86 */
+
+#endif
diff --git a/include/dt-bindings/clock/actions,s700-cmu.h b/include/dt-bindings/clock/actions,s700-cmu.h
new file mode 100644
index 000000000000..3e1942996724
--- /dev/null
+++ b/include/dt-bindings/clock/actions,s700-cmu.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Device Tree binding constants for Actions Semi S700 Clock Management Unit
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Author: Pathiban Nallathambi <pn@denx.de>
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_S700_H
+#define __DT_BINDINGS_CLOCK_S700_H
+
+#define CLK_NONE 0
+
+/* pll clocks */
+#define CLK_CORE_PLL 1
+#define CLK_DEV_PLL 2
+#define CLK_DDR_PLL 3
+#define CLK_NAND_PLL 4
+#define CLK_DISPLAY_PLL 5
+#define CLK_TVOUT_PLL 6
+#define CLK_CVBS_PLL 7
+#define CLK_AUDIO_PLL 8
+#define CLK_ETHERNET_PLL 9
+
+/* system clock */
+#define CLK_CPU 10
+#define CLK_DEV 11
+#define CLK_AHB 12
+#define CLK_APB 13
+#define CLK_DMAC 14
+#define CLK_NOC0_CLK_MUX 15
+#define CLK_NOC1_CLK_MUX 16
+#define CLK_HP_CLK_MUX 17
+#define CLK_HP_CLK_DIV 18
+#define CLK_NOC1_CLK_DIV 19
+#define CLK_NOC0 20
+#define CLK_NOC1 21
+#define CLK_SENOR_SRC 22
+
+/* peripheral device clock */
+#define CLK_GPIO 23
+#define CLK_TIMER 24
+#define CLK_DSI 25
+#define CLK_CSI 26
+#define CLK_SI 27
+#define CLK_DE 28
+#define CLK_HDE 29
+#define CLK_VDE 30
+#define CLK_VCE 31
+#define CLK_NAND 32
+#define CLK_SD0 33
+#define CLK_SD1 34
+#define CLK_SD2 35
+
+#define CLK_UART0 36
+#define CLK_UART1 37
+#define CLK_UART2 38
+#define CLK_UART3 39
+#define CLK_UART4 40
+#define CLK_UART5 41
+#define CLK_UART6 42
+
+#define CLK_PWM0 43
+#define CLK_PWM1 44
+#define CLK_PWM2 45
+#define CLK_PWM3 46
+#define CLK_PWM4 47
+#define CLK_PWM5 48
+#define CLK_GPU3D 49
+
+#define CLK_I2C0 50
+#define CLK_I2C1 51
+#define CLK_I2C2 52
+#define CLK_I2C3 53
+
+#define CLK_SPI0 54
+#define CLK_SPI1 55
+#define CLK_SPI2 56
+#define CLK_SPI3 57
+
+#define CLK_USB3_480MPLL0 58
+#define CLK_USB3_480MPHY0 59
+#define CLK_USB3_5GPHY 60
+#define CLK_USB3_CCE 61
+#define CLK_USB3_MAC 62
+
+#define CLK_LCD 63
+#define CLK_HDMI_AUDIO 64
+#define CLK_I2SRX 65
+#define CLK_I2STX 66
+
+#define CLK_SENSOR0 67
+#define CLK_SENSOR1 68
+
+#define CLK_HDMI_DEV 69
+
+#define CLK_ETHERNET 70
+#define CLK_RMII_REF 71
+
+#define CLK_USB2H0_PLLEN 72
+#define CLK_USB2H0_PHY 73
+#define CLK_USB2H0_CCE 74
+#define CLK_USB2H1_PLLEN 75
+#define CLK_USB2H1_PHY 76
+#define CLK_USB2H1_CCE 77
+
+#define CLK_TVOUT 78
+
+#define CLK_THERMAL_SENSOR 79
+
+#define CLK_IRC_SWITCH 80
+#define CLK_PCM1 81
+#define CLK_NR_CLKS (CLK_PCM1 + 1)
+
+#endif /* __DT_BINDINGS_CLOCK_S700_H */
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 44761849fcbe..f43738607d77 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -25,7 +25,7 @@
#define ASPEED_CLK_GATE_RSACLK 19
#define ASPEED_CLK_GATE_UART3CLK 20
#define ASPEED_CLK_GATE_UART4CLK 21
-#define ASPEED_CLK_GATE_SDCLKCLK 22
+#define ASPEED_CLK_GATE_SDCLK 22
#define ASPEED_CLK_GATE_LHCCLK 23
#define ASPEED_CLK_HPLL 24
#define ASPEED_CLK_AHB 25
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
new file mode 100644
index 000000000000..fd9c362099d9
--- /dev/null
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 Baylibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __AXG_AUDIO_CLKC_BINDINGS_H
+#define __AXG_AUDIO_CLKC_BINDINGS_H
+
+#define AUD_CLKID_SLV_SCLK0 9
+#define AUD_CLKID_SLV_SCLK1 10
+#define AUD_CLKID_SLV_SCLK2 11
+#define AUD_CLKID_SLV_SCLK3 12
+#define AUD_CLKID_SLV_SCLK4 13
+#define AUD_CLKID_SLV_SCLK5 14
+#define AUD_CLKID_SLV_SCLK6 15
+#define AUD_CLKID_SLV_SCLK7 16
+#define AUD_CLKID_SLV_SCLK8 17
+#define AUD_CLKID_SLV_SCLK9 18
+#define AUD_CLKID_SLV_LRCLK0 19
+#define AUD_CLKID_SLV_LRCLK1 20
+#define AUD_CLKID_SLV_LRCLK2 21
+#define AUD_CLKID_SLV_LRCLK3 22
+#define AUD_CLKID_SLV_LRCLK4 23
+#define AUD_CLKID_SLV_LRCLK5 24
+#define AUD_CLKID_SLV_LRCLK6 25
+#define AUD_CLKID_SLV_LRCLK7 26
+#define AUD_CLKID_SLV_LRCLK8 27
+#define AUD_CLKID_SLV_LRCLK9 28
+#define AUD_CLKID_DDR_ARB 29
+#define AUD_CLKID_PDM 30
+#define AUD_CLKID_TDMIN_A 31
+#define AUD_CLKID_TDMIN_B 32
+#define AUD_CLKID_TDMIN_C 33
+#define AUD_CLKID_TDMIN_LB 34
+#define AUD_CLKID_TDMOUT_A 35
+#define AUD_CLKID_TDMOUT_B 36
+#define AUD_CLKID_TDMOUT_C 37
+#define AUD_CLKID_FRDDR_A 38
+#define AUD_CLKID_FRDDR_B 39
+#define AUD_CLKID_FRDDR_C 40
+#define AUD_CLKID_TODDR_A 41
+#define AUD_CLKID_TODDR_B 42
+#define AUD_CLKID_TODDR_C 43
+#define AUD_CLKID_LOOPBACK 44
+#define AUD_CLKID_SPDIFIN 45
+#define AUD_CLKID_SPDIFOUT 46
+#define AUD_CLKID_RESAMPLE 47
+#define AUD_CLKID_POWER_DETECT 48
+#define AUD_CLKID_MST_A_MCLK 49
+#define AUD_CLKID_MST_B_MCLK 50
+#define AUD_CLKID_MST_C_MCLK 51
+#define AUD_CLKID_MST_D_MCLK 52
+#define AUD_CLKID_MST_E_MCLK 53
+#define AUD_CLKID_MST_F_MCLK 54
+#define AUD_CLKID_SPDIFOUT_CLK 55
+#define AUD_CLKID_SPDIFIN_CLK 56
+#define AUD_CLKID_PDM_DCLK 57
+#define AUD_CLKID_PDM_SYSCLK 58
+#define AUD_CLKID_MST_A_SCLK 79
+#define AUD_CLKID_MST_B_SCLK 80
+#define AUD_CLKID_MST_C_SCLK 81
+#define AUD_CLKID_MST_D_SCLK 82
+#define AUD_CLKID_MST_E_SCLK 83
+#define AUD_CLKID_MST_F_SCLK 84
+#define AUD_CLKID_MST_A_LRCLK 86
+#define AUD_CLKID_MST_B_LRCLK 87
+#define AUD_CLKID_MST_C_LRCLK 88
+#define AUD_CLKID_MST_D_LRCLK 89
+#define AUD_CLKID_MST_E_LRCLK 90
+#define AUD_CLKID_MST_F_LRCLK 91
+#define AUD_CLKID_TDMIN_A_SCLK_SEL 116
+#define AUD_CLKID_TDMIN_B_SCLK_SEL 117
+#define AUD_CLKID_TDMIN_C_SCLK_SEL 118
+#define AUD_CLKID_TDMIN_LB_SCLK_SEL 119
+#define AUD_CLKID_TDMOUT_A_SCLK_SEL 120
+#define AUD_CLKID_TDMOUT_B_SCLK_SEL 121
+#define AUD_CLKID_TDMOUT_C_SCLK_SEL 122
+#define AUD_CLKID_TDMIN_A_SCLK 123
+#define AUD_CLKID_TDMIN_B_SCLK 124
+#define AUD_CLKID_TDMIN_C_SCLK 125
+#define AUD_CLKID_TDMIN_LB_SCLK 126
+#define AUD_CLKID_TDMOUT_A_SCLK 127
+#define AUD_CLKID_TDMOUT_B_SCLK 128
+#define AUD_CLKID_TDMOUT_C_SCLK 129
+#define AUD_CLKID_TDMIN_A_LRCLK 130
+#define AUD_CLKID_TDMIN_B_LRCLK 131
+#define AUD_CLKID_TDMIN_C_LRCLK 132
+#define AUD_CLKID_TDMIN_LB_LRCLK 133
+#define AUD_CLKID_TDMOUT_A_LRCLK 134
+#define AUD_CLKID_TDMOUT_B_LRCLK 135
+#define AUD_CLKID_TDMOUT_C_LRCLK 136
+
+#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index 555937a25504..fd1f938c38d1 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -68,5 +68,9 @@
#define CLKID_SD_EMMC_B_CLK0 59
#define CLKID_SD_EMMC_C_CLK0 60
#define CLKID_HIFI_PLL 69
+#define CLKID_PCIE_CML_EN0 79
+#define CLKID_PCIE_CML_EN1 80
+#define CLKID_MIPI_ENABLE 81
+#define CLKID_GEN_CLK 84
#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 7a892be90549..3979d48c025f 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -127,5 +127,6 @@
#define CLKID_VAPB 140
#define CLKID_VDEC_1 153
#define CLKID_VDEC_HEVC 156
+#define CLKID_GEN_CLK 159
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
index 151111e68f4f..1036475f997d 100644
--- a/include/dt-bindings/clock/imx6sll-clock.h
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -197,6 +197,13 @@
#define IMX6SLL_CLK_EXTERN_AUDIO_PODF 171
#define IMX6SLL_CLK_EXTERN_AUDIO 172
-#define IMX6SLL_CLK_END 173
+#define IMX6SLL_CLK_GPIO1 173
+#define IMX6SLL_CLK_GPIO2 174
+#define IMX6SLL_CLK_GPIO3 175
+#define IMX6SLL_CLK_GPIO4 176
+#define IMX6SLL_CLK_GPIO5 177
+#define IMX6SLL_CLK_GPIO6 178
+
+#define IMX6SLL_CLK_END 179
#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index 0aa1d9c3e0b9..f8e0476a3a0e 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -254,6 +254,12 @@
#define IMX6UL_CLK_CKO2_PODF 241
#define IMX6UL_CLK_CKO2 242
#define IMX6UL_CLK_CKO 243
-#define IMX6UL_CLK_END 244
+#define IMX6UL_CLK_GPIO1 244
+#define IMX6UL_CLK_GPIO2 245
+#define IMX6UL_CLK_GPIO3 246
+#define IMX6UL_CLK_GPIO4 247
+#define IMX6UL_CLK_GPIO5 248
+
+#define IMX6UL_CLK_END 249
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/maxim,max9485.h b/include/dt-bindings/clock/maxim,max9485.h
new file mode 100644
index 000000000000..185b09ce1869
--- /dev/null
+++ b/include/dt-bindings/clock/maxim,max9485.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2018 Daniel Mack
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_MAX9485_CLK_H
+#define __DT_BINDINGS_MAX9485_CLK_H
+
+#define MAX9485_MCLKOUT 0
+#define MAX9485_CLKOUT 1
+#define MAX9485_CLKOUT1 2
+#define MAX9485_CLKOUT2 3
+
+#endif /* __DT_BINDINGS_MAX9485_CLK_H */
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
new file mode 100644
index 000000000000..00101479f7c4
--- /dev/null
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_PX30_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_PX30_H
+
+/* core clocks */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_NPLL 4
+#define APLL_BOOST_H 5
+#define APLL_BOOST_L 6
+#define ARMCLK 7
+
+/* sclk gates (special clocks) */
+#define USB480M 14
+#define SCLK_PDM 15
+#define SCLK_I2S0_TX 16
+#define SCLK_I2S0_TX_OUT 17
+#define SCLK_I2S0_RX 18
+#define SCLK_I2S0_RX_OUT 19
+#define SCLK_I2S1 20
+#define SCLK_I2S1_OUT 21
+#define SCLK_I2S2 22
+#define SCLK_I2S2_OUT 23
+#define SCLK_UART1 24
+#define SCLK_UART2 25
+#define SCLK_UART3 26
+#define SCLK_UART4 27
+#define SCLK_UART5 28
+#define SCLK_I2C0 29
+#define SCLK_I2C1 30
+#define SCLK_I2C2 31
+#define SCLK_I2C3 32
+#define SCLK_I2C4 33
+#define SCLK_PWM0 34
+#define SCLK_PWM1 35
+#define SCLK_SPI0 36
+#define SCLK_SPI1 37
+#define SCLK_TIMER0 38
+#define SCLK_TIMER1 39
+#define SCLK_TIMER2 40
+#define SCLK_TIMER3 41
+#define SCLK_TIMER4 42
+#define SCLK_TIMER5 43
+#define SCLK_TSADC 44
+#define SCLK_SARADC 45
+#define SCLK_OTP 46
+#define SCLK_OTP_USR 47
+#define SCLK_CRYPTO 48
+#define SCLK_CRYPTO_APK 49
+#define SCLK_DDRC 50
+#define SCLK_ISP 51
+#define SCLK_CIF_OUT 52
+#define SCLK_RGA_CORE 53
+#define SCLK_VOPB_PWM 54
+#define SCLK_NANDC 55
+#define SCLK_SDIO 56
+#define SCLK_EMMC 57
+#define SCLK_SFC 58
+#define SCLK_SDMMC 59
+#define SCLK_OTG_ADP 60
+#define SCLK_GMAC_SRC 61
+#define SCLK_GMAC 62
+#define SCLK_GMAC_RX_TX 63
+#define SCLK_MAC_REF 64
+#define SCLK_MAC_REFOUT 65
+#define SCLK_MAC_OUT 66
+#define SCLK_SDMMC_DRV 67
+#define SCLK_SDMMC_SAMPLE 68
+#define SCLK_SDIO_DRV 69
+#define SCLK_SDIO_SAMPLE 70
+#define SCLK_EMMC_DRV 71
+#define SCLK_EMMC_SAMPLE 72
+#define SCLK_GPU 73
+#define SCLK_PVTM 74
+#define SCLK_CORE_VPU 75
+#define SCLK_GMAC_RMII 76
+#define SCLK_UART2_SRC 77
+#define SCLK_NANDC_DIV 78
+#define SCLK_NANDC_DIV50 79
+#define SCLK_SDIO_DIV 80
+#define SCLK_SDIO_DIV50 81
+#define SCLK_EMMC_DIV 82
+#define SCLK_EMMC_DIV50 83
+#define SCLK_DDRCLK 84
+#define SCLK_UART1_SRC 85
+
+/* dclk gates */
+#define DCLK_VOPB 150
+#define DCLK_VOPL 151
+
+/* aclk gates */
+#define ACLK_GPU 170
+#define ACLK_BUS_PRE 171
+#define ACLK_CRYPTO 172
+#define ACLK_VI_PRE 173
+#define ACLK_VO_PRE 174
+#define ACLK_VPU 175
+#define ACLK_PERI_PRE 176
+#define ACLK_GMAC 178
+#define ACLK_CIF 179
+#define ACLK_ISP 180
+#define ACLK_VOPB 181
+#define ACLK_VOPL 182
+#define ACLK_RGA 183
+#define ACLK_GIC 184
+#define ACLK_DCF 186
+#define ACLK_DMAC 187
+#define ACLK_BUS_SRC 188
+#define ACLK_PERI_SRC 189
+
+/* hclk gates */
+#define HCLK_BUS_PRE 240
+#define HCLK_CRYPTO 241
+#define HCLK_VI_PRE 242
+#define HCLK_VO_PRE 243
+#define HCLK_VPU 244
+#define HCLK_PERI_PRE 245
+#define HCLK_MMC_NAND 246
+#define HCLK_SDMMC 247
+#define HCLK_USB 248
+#define HCLK_CIF 249
+#define HCLK_ISP 250
+#define HCLK_VOPB 251
+#define HCLK_VOPL 252
+#define HCLK_RGA 253
+#define HCLK_NANDC 254
+#define HCLK_SDIO 255
+#define HCLK_EMMC 256
+#define HCLK_SFC 257
+#define HCLK_OTG 258
+#define HCLK_HOST 259
+#define HCLK_HOST_ARB 260
+#define HCLK_PDM 261
+#define HCLK_I2S0 262
+#define HCLK_I2S1 263
+#define HCLK_I2S2 264
+
+/* pclk gates */
+#define PCLK_BUS_PRE 320
+#define PCLK_DDR 321
+#define PCLK_VO_PRE 322
+#define PCLK_GMAC 323
+#define PCLK_MIPI_DSI 324
+#define PCLK_MIPIDSIPHY 325
+#define PCLK_MIPICSIPHY 326
+#define PCLK_USB_GRF 327
+#define PCLK_DCF 328
+#define PCLK_UART1 329
+#define PCLK_UART2 330
+#define PCLK_UART3 331
+#define PCLK_UART4 332
+#define PCLK_UART5 333
+#define PCLK_I2C0 334
+#define PCLK_I2C1 335
+#define PCLK_I2C2 336
+#define PCLK_I2C3 337
+#define PCLK_I2C4 338
+#define PCLK_PWM0 339
+#define PCLK_PWM1 340
+#define PCLK_SPI0 341
+#define PCLK_SPI1 342
+#define PCLK_SARADC 343
+#define PCLK_TSADC 344
+#define PCLK_TIMER 345
+#define PCLK_OTP_NS 346
+#define PCLK_WDT_NS 347
+#define PCLK_GPIO1 348
+#define PCLK_GPIO2 349
+#define PCLK_GPIO3 350
+#define PCLK_ISP 351
+#define PCLK_CIF 352
+#define PCLK_OTP_PHY 353
+
+#define CLK_NR_CLKS (PCLK_OTP_PHY + 1)
+
+/* pmu-clocks indices */
+
+#define PLL_GPLL 1
+
+#define SCLK_RTC32K_PMU 4
+#define SCLK_WIFI_PMU 5
+#define SCLK_UART0_PMU 6
+#define SCLK_PVTM_PMU 7
+#define PCLK_PMU_PRE 8
+#define SCLK_REF24M_PMU 9
+#define SCLK_USBPHY_REF 10
+#define SCLK_MIPIDSIPHY_REF 11
+
+#define XIN24M_DIV 12
+
+#define PCLK_GPIO0_PMU 20
+#define PCLK_UART0_PMU 21
+
+#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1)
+
+/* soft-reset indices */
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_CORE0_DBG 8
+#define SRST_CORE1_DBG 9
+#define SRST_CORE2_DBG 10
+#define SRST_CORE3_DBG 11
+#define SRST_TOPDBG 12
+#define SRST_CORE_NOC 13
+#define SRST_STRC_A 14
+#define SRST_L2C 15
+
+#define SRST_DAP 16
+#define SRST_CORE_PVTM 17
+#define SRST_GPU 18
+#define SRST_GPU_NIU 19
+#define SRST_UPCTL2 20
+#define SRST_UPCTL2_A 21
+#define SRST_UPCTL2_P 22
+#define SRST_MSCH 23
+#define SRST_MSCH_P 24
+#define SRST_DDRMON_P 25
+#define SRST_DDRSTDBY_P 26
+#define SRST_DDRSTDBY 27
+#define SRST_DDRGRF_p 28
+#define SRST_AXI_SPLIT_A 29
+#define SRST_AXI_CMD_A 30
+#define SRST_AXI_CMD_P 31
+
+#define SRST_DDRPHY 32
+#define SRST_DDRPHYDIV 33
+#define SRST_DDRPHY_P 34
+#define SRST_VPU_A 36
+#define SRST_VPU_NIU_A 37
+#define SRST_VPU_H 38
+#define SRST_VPU_NIU_H 39
+#define SRST_VI_NIU_A 40
+#define SRST_VI_NIU_H 41
+#define SRST_ISP_H 42
+#define SRST_ISP 43
+#define SRST_CIF_A 44
+#define SRST_CIF_H 45
+#define SRST_CIF_PCLKIN 46
+#define SRST_MIPICSIPHY_P 47
+
+#define SRST_VO_NIU_A 48
+#define SRST_VO_NIU_H 49
+#define SRST_VO_NIU_P 50
+#define SRST_VOPB_A 51
+#define SRST_VOPB_H 52
+#define SRST_VOPB 53
+#define SRST_PWM_VOPB 54
+#define SRST_VOPL_A 55
+#define SRST_VOPL_H 56
+#define SRST_VOPL 57
+#define SRST_RGA_A 58
+#define SRST_RGA_H 59
+#define SRST_RGA 60
+#define SRST_MIPIDSI_HOST_P 61
+#define SRST_MIPIDSIPHY_P 62
+#define SRST_VPU_CORE 63
+
+#define SRST_PERI_NIU_A 64
+#define SRST_USB_NIU_H 65
+#define SRST_USB2OTG_H 66
+#define SRST_USB2OTG 67
+#define SRST_USB2OTG_ADP 68
+#define SRST_USB2HOST_H 69
+#define SRST_USB2HOST_ARB_H 70
+#define SRST_USB2HOST_AUX_H 71
+#define SRST_USB2HOST_EHCI 72
+#define SRST_USB2HOST 73
+#define SRST_USBPHYPOR 74
+#define SRST_USBPHY_OTG_PORT 75
+#define SRST_USBPHY_HOST_PORT 76
+#define SRST_USBPHY_GRF 77
+#define SRST_CPU_BOOST_P 78
+#define SRST_CPU_BOOST 79
+
+#define SRST_MMC_NAND_NIU_H 80
+#define SRST_SDIO_H 81
+#define SRST_EMMC_H 82
+#define SRST_SFC_H 83
+#define SRST_SFC 84
+#define SRST_SDCARD_NIU_H 85
+#define SRST_SDMMC_H 86
+#define SRST_NANDC_H 89
+#define SRST_NANDC 90
+#define SRST_GMAC_NIU_A 92
+#define SRST_GMAC_NIU_P 93
+#define SRST_GMAC_A 94
+
+#define SRST_PMU_NIU_P 96
+#define SRST_PMU_SGRF_P 97
+#define SRST_PMU_GRF_P 98
+#define SRST_PMU 99
+#define SRST_PMU_MEM_P 100
+#define SRST_PMU_GPIO0_P 101
+#define SRST_PMU_UART0_P 102
+#define SRST_PMU_CRU_P 103
+#define SRST_PMU_PVTM 104
+#define SRST_PMU_UART 105
+#define SRST_PMU_NIU_H 106
+#define SRST_PMU_DDR_FAIL_SAVE 107
+#define SRST_PMU_CORE_PERF_A 108
+#define SRST_PMU_CORE_GRF_P 109
+#define SRST_PMU_GPU_PERF_A 110
+#define SRST_PMU_GPU_GRF_P 111
+
+#define SRST_CRYPTO_NIU_A 112
+#define SRST_CRYPTO_NIU_H 113
+#define SRST_CRYPTO_A 114
+#define SRST_CRYPTO_H 115
+#define SRST_CRYPTO 116
+#define SRST_CRYPTO_APK 117
+#define SRST_BUS_NIU_H 120
+#define SRST_USB_NIU_P 121
+#define SRST_BUS_TOP_NIU_P 122
+#define SRST_INTMEM_A 123
+#define SRST_GIC_A 124
+#define SRST_ROM_H 126
+#define SRST_DCF_A 127
+
+#define SRST_DCF_P 128
+#define SRST_PDM_H 129
+#define SRST_PDM 130
+#define SRST_I2S0_H 131
+#define SRST_I2S0_TX 132
+#define SRST_I2S1_H 133
+#define SRST_I2S1 134
+#define SRST_I2S2_H 135
+#define SRST_I2S2 136
+#define SRST_UART1_P 137
+#define SRST_UART1 138
+#define SRST_UART2_P 139
+#define SRST_UART2 140
+#define SRST_UART3_P 141
+#define SRST_UART3 142
+#define SRST_UART4_P 143
+
+#define SRST_UART4 144
+#define SRST_UART5_P 145
+#define SRST_UART5 146
+#define SRST_I2C0_P 147
+#define SRST_I2C0 148
+#define SRST_I2C1_P 149
+#define SRST_I2C1 150
+#define SRST_I2C2_P 151
+#define SRST_I2C2 152
+#define SRST_I2C3_P 153
+#define SRST_I2C3 154
+#define SRST_PWM0_P 157
+#define SRST_PWM0 158
+#define SRST_PWM1_P 159
+
+#define SRST_PWM1 160
+#define SRST_SPI0_P 161
+#define SRST_SPI0 162
+#define SRST_SPI1_P 163
+#define SRST_SPI1 164
+#define SRST_SARADC_P 165
+#define SRST_SARADC 166
+#define SRST_TSADC_P 167
+#define SRST_TSADC 168
+#define SRST_TIMER_P 169
+#define SRST_TIMER0 170
+#define SRST_TIMER1 171
+#define SRST_TIMER2 172
+#define SRST_TIMER3 173
+#define SRST_TIMER4 174
+#define SRST_TIMER5 175
+
+#define SRST_OTP_NS_P 176
+#define SRST_OTP_NS_SBPI 177
+#define SRST_OTP_NS_USR 178
+#define SRST_OTP_PHY_P 179
+#define SRST_OTP_PHY 180
+#define SRST_WDT_NS_P 181
+#define SRST_GPIO1_P 182
+#define SRST_GPIO2_P 183
+#define SRST_GPIO3_P 184
+#define SRST_SGRF_P 185
+#define SRST_GRF_P 186
+#define SRST_I2S0_RX 191
+
+#endif
diff --git a/include/dt-bindings/clock/pxa-clock.h b/include/dt-bindings/clock/pxa-clock.h
index e65803b1dc7e..0b0fd2b01538 100644
--- a/include/dt-bindings/clock/pxa-clock.h
+++ b/include/dt-bindings/clock/pxa-clock.h
@@ -72,6 +72,7 @@
#define CLK_USIM 58
#define CLK_USIM1 59
#define CLK_USMI0 60
-#define CLK_MAX 61
+#define CLK_OSC32k768 61
+#define CLK_MAX 62
#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
new file mode 100644
index 000000000000..11eed4bc9646
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
+
+/* DISP_CC clock registers */
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AXI_CLK 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 4
+#define DISP_CC_MDSS_BYTE1_CLK 5
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK 8
+#define DISP_CC_MDSS_ESC0_CLK_SRC 9
+#define DISP_CC_MDSS_ESC1_CLK 10
+#define DISP_CC_MDSS_ESC1_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_CLK 12
+#define DISP_CC_MDSS_MDP_CLK_SRC 13
+#define DISP_CC_MDSS_MDP_LUT_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK 15
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 16
+#define DISP_CC_MDSS_PCLK1_CLK 17
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 18
+#define DISP_CC_MDSS_ROT_CLK 19
+#define DISP_CC_MDSS_ROT_CLK_SRC 20
+#define DISP_CC_MDSS_RSCC_AHB_CLK 21
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 22
+#define DISP_CC_MDSS_VSYNC_CLK 23
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 24
+#define DISP_CC_PLL0 25
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 26
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 27
+
+/* DISP_CC Reset */
+#define DISP_CC_MDSS_RSCC_BCR 0
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index aca61264f12c..f96fc2dbf60e 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -192,6 +192,8 @@
#define GCC_VS_CTRL_CLK_SRC 182
#define GCC_VSENSOR_CLK_SRC 183
#define GPLL4 184
+#define GCC_CPUSS_DVM_BUS_CLK 185
+#define GCC_CPUSS_GNOC_CLK 186
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h
new file mode 100644
index 000000000000..90c0f3dc1ba1
--- /dev/null
+++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R9A06G032 sysctrl IDs
+ *
+ * Copyright (C) 2018 Renesas Electronics Europe Limited
+ *
+ * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_R9A06G032_SYSCTRL_H__
+#define __DT_BINDINGS_R9A06G032_SYSCTRL_H__
+
+#define R9A06G032_CLK_PLL_USB 1
+#define R9A06G032_CLK_48 1 /* AKA CLK_PLL_USB */
+#define R9A06G032_MSEBIS_CLK 3 /* AKA CLKOUT_D16 */
+#define R9A06G032_MSEBIM_CLK 3 /* AKA CLKOUT_D16 */
+#define R9A06G032_CLK_DDRPHY_PLLCLK 5 /* AKA CLKOUT_D1OR2 */
+#define R9A06G032_CLK50 6 /* AKA CLKOUT_D20 */
+#define R9A06G032_CLK25 7 /* AKA CLKOUT_D40 */
+#define R9A06G032_CLK125 9 /* AKA CLKOUT_D8 */
+#define R9A06G032_CLK_P5_PG1 17 /* AKA DIV_P5_PG */
+#define R9A06G032_CLK_REF_SYNC 21 /* AKA DIV_REF_SYNC */
+#define R9A06G032_CLK_25_PG4 26
+#define R9A06G032_CLK_25_PG5 27
+#define R9A06G032_CLK_25_PG6 28
+#define R9A06G032_CLK_25_PG7 29
+#define R9A06G032_CLK_25_PG8 30
+#define R9A06G032_CLK_ADC 31
+#define R9A06G032_CLK_ECAT100 32
+#define R9A06G032_CLK_HSR100 33
+#define R9A06G032_CLK_I2C0 34
+#define R9A06G032_CLK_I2C1 35
+#define R9A06G032_CLK_MII_REF 36
+#define R9A06G032_CLK_NAND 37
+#define R9A06G032_CLK_NOUSBP2_PG6 38
+#define R9A06G032_CLK_P1_PG2 39
+#define R9A06G032_CLK_P1_PG3 40
+#define R9A06G032_CLK_P1_PG4 41
+#define R9A06G032_CLK_P4_PG3 42
+#define R9A06G032_CLK_P4_PG4 43
+#define R9A06G032_CLK_P6_PG1 44
+#define R9A06G032_CLK_P6_PG2 45
+#define R9A06G032_CLK_P6_PG3 46
+#define R9A06G032_CLK_P6_PG4 47
+#define R9A06G032_CLK_PCI_USB 48
+#define R9A06G032_CLK_QSPI0 49
+#define R9A06G032_CLK_QSPI1 50
+#define R9A06G032_CLK_RGMII_REF 51
+#define R9A06G032_CLK_RMII_REF 52
+#define R9A06G032_CLK_SDIO0 53
+#define R9A06G032_CLK_SDIO1 54
+#define R9A06G032_CLK_SERCOS100 55
+#define R9A06G032_CLK_SLCD 56
+#define R9A06G032_CLK_SPI0 57
+#define R9A06G032_CLK_SPI1 58
+#define R9A06G032_CLK_SPI2 59
+#define R9A06G032_CLK_SPI3 60
+#define R9A06G032_CLK_SPI4 61
+#define R9A06G032_CLK_SPI5 62
+#define R9A06G032_CLK_SWITCH 63
+#define R9A06G032_HCLK_ECAT125 65
+#define R9A06G032_HCLK_PINCONFIG 66
+#define R9A06G032_HCLK_SERCOS 67
+#define R9A06G032_HCLK_SGPIO2 68
+#define R9A06G032_HCLK_SGPIO3 69
+#define R9A06G032_HCLK_SGPIO4 70
+#define R9A06G032_HCLK_TIMER0 71
+#define R9A06G032_HCLK_TIMER1 72
+#define R9A06G032_HCLK_USBF 73
+#define R9A06G032_HCLK_USBH 74
+#define R9A06G032_HCLK_USBPM 75
+#define R9A06G032_CLK_48_PG_F 76
+#define R9A06G032_CLK_48_PG4 77
+#define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */
+#define R9A06G032_HCLK_CAN0 85
+#define R9A06G032_HCLK_CAN1 86
+#define R9A06G032_HCLK_DELTASIGMA 87
+#define R9A06G032_HCLK_PWMPTO 88
+#define R9A06G032_HCLK_RSV 89
+#define R9A06G032_HCLK_SGPIO0 90
+#define R9A06G032_HCLK_SGPIO1 91
+#define R9A06G032_RTOS_MDC 92
+#define R9A06G032_CLK_CM3 93
+#define R9A06G032_CLK_DDRC 94
+#define R9A06G032_CLK_ECAT25 95
+#define R9A06G032_CLK_HSR50 96
+#define R9A06G032_CLK_HW_RTOS 97
+#define R9A06G032_CLK_SERCOS50 98
+#define R9A06G032_HCLK_ADC 99
+#define R9A06G032_HCLK_CM3 100
+#define R9A06G032_HCLK_CRYPTO_EIP150 101
+#define R9A06G032_HCLK_CRYPTO_EIP93 102
+#define R9A06G032_HCLK_DDRC 103
+#define R9A06G032_HCLK_DMA0 104
+#define R9A06G032_HCLK_DMA1 105
+#define R9A06G032_HCLK_GMAC0 106
+#define R9A06G032_HCLK_GMAC1 107
+#define R9A06G032_HCLK_GPIO0 108
+#define R9A06G032_HCLK_GPIO1 109
+#define R9A06G032_HCLK_GPIO2 110
+#define R9A06G032_HCLK_HSR 111
+#define R9A06G032_HCLK_I2C0 112
+#define R9A06G032_HCLK_I2C1 113
+#define R9A06G032_HCLK_LCD 114
+#define R9A06G032_HCLK_MSEBI_M 115
+#define R9A06G032_HCLK_MSEBI_S 116
+#define R9A06G032_HCLK_NAND 117
+#define R9A06G032_HCLK_PG_I 118
+#define R9A06G032_HCLK_PG19 119
+#define R9A06G032_HCLK_PG20 120
+#define R9A06G032_HCLK_PG3 121
+#define R9A06G032_HCLK_PG4 122
+#define R9A06G032_HCLK_QSPI0 123
+#define R9A06G032_HCLK_QSPI1 124
+#define R9A06G032_HCLK_ROM 125
+#define R9A06G032_HCLK_RTC 126
+#define R9A06G032_HCLK_SDIO0 127
+#define R9A06G032_HCLK_SDIO1 128
+#define R9A06G032_HCLK_SEMAP 129
+#define R9A06G032_HCLK_SPI0 130
+#define R9A06G032_HCLK_SPI1 131
+#define R9A06G032_HCLK_SPI2 132
+#define R9A06G032_HCLK_SPI3 133
+#define R9A06G032_HCLK_SPI4 134
+#define R9A06G032_HCLK_SPI5 135
+#define R9A06G032_HCLK_SWITCH 136
+#define R9A06G032_HCLK_SWITCH_RG 137
+#define R9A06G032_HCLK_UART0 138
+#define R9A06G032_HCLK_UART1 139
+#define R9A06G032_HCLK_UART2 140
+#define R9A06G032_HCLK_UART3 141
+#define R9A06G032_HCLK_UART4 142
+#define R9A06G032_HCLK_UART5 143
+#define R9A06G032_HCLK_UART6 144
+#define R9A06G032_HCLK_UART7 145
+#define R9A06G032_CLK_UART0 146
+#define R9A06G032_CLK_UART1 147
+#define R9A06G032_CLK_UART2 148
+#define R9A06G032_CLK_UART3 149
+#define R9A06G032_CLK_UART4 150
+#define R9A06G032_CLK_UART5 151
+#define R9A06G032_CLK_UART6 152
+#define R9A06G032_CLK_UART7 153
+
+#endif /* __DT_BINDINGS_R9A06G032_SYSCTRL_H__ */
diff --git a/include/dt-bindings/clock/rk3399-ddr.h b/include/dt-bindings/clock/rk3399-ddr.h
new file mode 100644
index 000000000000..ed2280844963
--- /dev/null
+++ b/include/dt-bindings/clock/rk3399-ddr.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+
+#ifndef DT_BINDINGS_DDR_H
+#define DT_BINDINGS_DDR_H
+
+/*
+ * DDR3 SDRAM Standard Speed Bins include tCK, tRCD, tRP, tRAS and tRC for
+ * each corresponding bin.
+ */
+
+/* DDR3-800 (5-5-5) */
+#define DDR3_800D 0
+/* DDR3-800 (6-6-6) */
+#define DDR3_800E 1
+/* DDR3-1066 (6-6-6) */
+#define DDR3_1066E 2
+/* DDR3-1066 (7-7-7) */
+#define DDR3_1066F 3
+/* DDR3-1066 (8-8-8) */
+#define DDR3_1066G 4
+/* DDR3-1333 (7-7-7) */
+#define DDR3_1333F 5
+/* DDR3-1333 (8-8-8) */
+#define DDR3_1333G 6
+/* DDR3-1333 (9-9-9) */
+#define DDR3_1333H 7
+/* DDR3-1333 (10-10-10) */
+#define DDR3_1333J 8
+/* DDR3-1600 (8-8-8) */
+#define DDR3_1600G 9
+/* DDR3-1600 (9-9-9) */
+#define DDR3_1600H 10
+/* DDR3-1600 (10-10-10) */
+#define DDR3_1600J 11
+/* DDR3-1600 (11-11-11) */
+#define DDR3_1600K 12
+/* DDR3-1600 (10-10-10) */
+#define DDR3_1866J 13
+/* DDR3-1866 (11-11-11) */
+#define DDR3_1866K 14
+/* DDR3-1866 (12-12-12) */
+#define DDR3_1866L 15
+/* DDR3-1866 (13-13-13) */
+#define DDR3_1866M 16
+/* DDR3-2133 (11-11-11) */
+#define DDR3_2133K 17
+/* DDR3-2133 (12-12-12) */
+#define DDR3_2133L 18
+/* DDR3-2133 (13-13-13) */
+#define DDR3_2133M 19
+/* DDR3-2133 (14-14-14) */
+#define DDR3_2133N 20
+/* DDR3 ATF default */
+#define DDR3_DEFAULT 21
+
+#endif
diff --git a/include/dt-bindings/clock/sun8i-r40-ccu.h b/include/dt-bindings/clock/sun8i-r40-ccu.h
index 4fa5f69fc297..f9e15a235626 100644
--- a/include/dt-bindings/clock/sun8i-r40-ccu.h
+++ b/include/dt-bindings/clock/sun8i-r40-ccu.h
@@ -43,6 +43,10 @@
#ifndef _DT_BINDINGS_CLK_SUN8I_R40_H_
#define _DT_BINDINGS_CLK_SUN8I_R40_H_
+#define CLK_PLL_VIDEO0 7
+
+#define CLK_PLL_VIDEO1 16
+
#define CLK_CPU 24
#define CLK_BUS_MIPI_DSI 29
diff --git a/include/dt-bindings/clock/sun8i-tcon-top.h b/include/dt-bindings/clock/sun8i-tcon-top.h
new file mode 100644
index 000000000000..25164d767835
--- /dev/null
+++ b/include/dt-bindings/clock/sun8i-tcon-top.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/* Copyright (C) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+#define _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+
+#define CLK_TCON_TOP_TV0 0
+#define CLK_TCON_TOP_TV1 1
+#define CLK_TCON_TOP_DSI 2
+
+#endif /* _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ */
diff --git a/include/dt-bindings/gce/mt8173-gce.h b/include/dt-bindings/gce/mt8173-gce.h
new file mode 100644
index 000000000000..ffcf94ba96c6
--- /dev/null
+++ b/include/dt-bindings/gce/mt8173-gce.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8173_H
+#define _DT_BINDINGS_GCE_MT8173_H
+
+/* GCE HW thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_HIGHEST 1
+
+/* GCE SUBSYS */
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+
+/* GCE HW EVENT */
+#define CMDQ_EVENT_DISP_OVL0_SOF 11
+#define CMDQ_EVENT_DISP_OVL1_SOF 12
+#define CMDQ_EVENT_DISP_RDMA0_SOF 13
+#define CMDQ_EVENT_DISP_RDMA1_SOF 14
+#define CMDQ_EVENT_DISP_RDMA2_SOF 15
+#define CMDQ_EVENT_DISP_WDMA0_SOF 16
+#define CMDQ_EVENT_DISP_WDMA1_SOF 17
+#define CMDQ_EVENT_DISP_OVL0_EOF 39
+#define CMDQ_EVENT_DISP_OVL1_EOF 40
+#define CMDQ_EVENT_DISP_RDMA0_EOF 41
+#define CMDQ_EVENT_DISP_RDMA1_EOF 42
+#define CMDQ_EVENT_DISP_RDMA2_EOF 43
+#define CMDQ_EVENT_DISP_WDMA0_EOF 44
+#define CMDQ_EVENT_DISP_WDMA1_EOF 45
+#define CMDQ_EVENT_MUTEX0_STREAM_EOF 53
+#define CMDQ_EVENT_MUTEX1_STREAM_EOF 54
+#define CMDQ_EVENT_MUTEX2_STREAM_EOF 55
+#define CMDQ_EVENT_MUTEX3_STREAM_EOF 56
+#define CMDQ_EVENT_MUTEX4_STREAM_EOF 57
+#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 63
+#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 64
+#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 65
+
+#endif
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index 2732d6c0fb39..eb81867eac77 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -39,4 +39,8 @@
#define AT91_PERIPH_C 3
#define AT91_PERIPH_D 4
+#define ATMEL_PIO_DRVSTR_LO 1
+#define ATMEL_PIO_DRVSTR_ME 2
+#define ATMEL_PIO_DRVSTR_HI 3
+
#endif /* __DT_BINDINGS_AT91_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
index ceb672305f59..b1832506b923 100644
--- a/include/dt-bindings/pinctrl/samsung.h
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -1,14 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Samsung's Exynos pinctrl bindings
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
diff --git a/include/dt-bindings/regulator/maxim,max77802.h b/include/dt-bindings/regulator/maxim,max77802.h
index cf28631d7109..d0baba1973d4 100644
--- a/include/dt-bindings/regulator/maxim,max77802.h
+++ b/include/dt-bindings/regulator/maxim,max77802.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for the Maxim 77802 PMIC regulators
*/
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator.h b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
new file mode 100644
index 000000000000..86713dcf9e02
--- /dev/null
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+#ifndef __QCOM_RPMH_REGULATOR_H
+#define __QCOM_RPMH_REGULATOR_H
+
+/*
+ * These mode constants may be used to specify modes for various RPMh regulator
+ * device tree properties (e.g. regulator-initial-mode). Each type of regulator
+ * supports a subset of the possible modes.
+ *
+ * %RPMH_REGULATOR_MODE_RET: Retention mode in which only an extremely small
+ * load current is allowed. This mode is supported
+ * by LDO and SMPS type regulators.
+ * %RPMH_REGULATOR_MODE_LPM: Low power mode in which a small load current is
+ * allowed. This mode corresponds to PFM for SMPS
+ * and BOB type regulators. This mode is supported
+ * by LDO, HFSMPS, BOB, and PMIC4 FTSMPS type
+ * regulators.
+ * %RPMH_REGULATOR_MODE_AUTO: Auto mode in which the regulator hardware
+ * automatically switches between LPM and HPM based
+ * upon the real-time load current. This mode is
+ * supported by HFSMPS, BOB, and PMIC4 FTSMPS type
+ * regulators.
+ * %RPMH_REGULATOR_MODE_HPM: High power mode in which the full rated current
+ * of the regulator is allowed. This mode
+ * corresponds to PWM for SMPS and BOB type
+ * regulators. This mode is supported by all types
+ * of regulators.
+ */
+#define RPMH_REGULATOR_MODE_RET 0
+#define RPMH_REGULATOR_MODE_LPM 1
+#define RPMH_REGULATOR_MODE_AUTO 2
+#define RPMH_REGULATOR_MODE_HPM 3
+
+#endif
diff --git a/include/dt-bindings/soc/qcom,rpmh-rsc.h b/include/dt-bindings/soc/qcom,rpmh-rsc.h
new file mode 100644
index 000000000000..868f998ea998
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,rpmh-rsc.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_QCOM_RPMH_RSC_H__
+#define __DT_QCOM_RPMH_RSC_H__
+
+#define SLEEP_TCS 0
+#define WAKE_TCS 1
+#define ACTIVE_TCS 2
+#define CONTROL_TCS 3
+
+#endif /* __DT_QCOM_RPMH_RSC_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index e54f40974eb0..de8d3d3fa651 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1058,27 +1058,20 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
/* Device properties */
-#define MAX_ACPI_REFERENCE_ARGS 8
-struct acpi_reference_args {
- struct acpi_device *adev;
- size_t nargs;
- u64 args[MAX_ACPI_REFERENCE_ARGS];
-};
-
#ifdef CONFIG_ACPI
int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args);
+ struct fwnode_reference_args *args);
static inline int acpi_node_get_property_reference(
const struct fwnode_handle *fwnode,
const char *name, size_t index,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return __acpi_node_get_property_reference(fwnode, name, index,
- MAX_ACPI_REFERENCE_ARGS, args);
+ NR_FWNODE_REFERENCE_ARGS, args);
}
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
@@ -1096,14 +1089,6 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
-struct fwnode_handle *
-acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle *prev);
-int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle **remote,
- struct fwnode_handle **port,
- struct fwnode_handle **endpoint);
-
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
struct acpi_probe_entry *);
@@ -1169,7 +1154,7 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
static inline int
__acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
@@ -1177,7 +1162,7 @@ __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
static inline int
acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
new file mode 100644
index 000000000000..4cc40201273e
--- /dev/null
+++ b/include/linux/ascii85.h
@@ -0,0 +1,38 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2008 Intel Corporation
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ASCII85_H_
+#define _ASCII85_H_
+
+#include <linux/kernel.h>
+
+#define ASCII85_BUFSZ 6
+
+static inline long
+ascii85_encode_len(long len)
+{
+ return DIV_ROUND_UP(len, 4);
+}
+
+static inline const char *
+ascii85_encode(u32 in, char *out)
+{
+ int i;
+
+ if (in == 0)
+ return "z";
+
+ out[5] = '\0';
+ for (i = 5; i--; ) {
+ out[i] = '!' + in % 85;
+ in /= 85;
+ }
+
+ return out;
+}
+
+#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 01ce3997cb42..1e8e88bdaf09 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,8 @@
/* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
+#include <linux/types.h>
+
#include <asm/atomic.h>
#include <asm/barrier.h>
@@ -36,40 +38,46 @@
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
- * Besides, if an arch has a special barrier for acquire/release, it could
- * implement its own __atomic_op_* and use the same framework for building
- * variants
- *
- * If an architecture overrides __atomic_op_acquire() it will probably want
- * to define smp_mb__after_spinlock().
+ * If an architecture overrides __atomic_acquire_fence() it will probably
+ * want to define smp_mb__after_spinlock().
*/
-#ifndef __atomic_op_acquire
+#ifndef __atomic_acquire_fence
+#define __atomic_acquire_fence smp_mb__after_atomic
+#endif
+
+#ifndef __atomic_release_fence
+#define __atomic_release_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_pre_full_fence
+#define __atomic_pre_full_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_post_full_fence
+#define __atomic_post_full_fence smp_mb__after_atomic
+#endif
+
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_acquire_fence(); \
__ret; \
})
-#endif
-#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
- smp_mb__before_atomic(); \
+ __atomic_release_fence(); \
op##_relaxed(args); \
})
-#endif
-#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
- smp_mb__before_atomic(); \
+ __atomic_pre_full_fence(); \
__ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_post_full_fence(); \
__ret; \
})
-#endif
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
@@ -95,11 +103,23 @@
#endif
#endif /* atomic_add_return_relaxed */
+#ifndef atomic_inc
+#define atomic_inc(v) atomic_add(1, (v))
+#endif
+
/* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_relaxed
+
+#ifndef atomic_inc_return
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
+#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
+#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
+#else /* atomic_inc_return */
#define atomic_inc_return_relaxed atomic_inc_return
#define atomic_inc_return_acquire atomic_inc_return
#define atomic_inc_return_release atomic_inc_return
+#endif /* atomic_inc_return */
#else /* atomic_inc_return_relaxed */
@@ -143,11 +163,23 @@
#endif
#endif /* atomic_sub_return_relaxed */
+#ifndef atomic_dec
+#define atomic_dec(v) atomic_sub(1, (v))
+#endif
+
/* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_relaxed
+
+#ifndef atomic_dec_return
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
+#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
+#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
+#else /* atomic_dec_return */
#define atomic_dec_return_relaxed atomic_dec_return
#define atomic_dec_return_acquire atomic_dec_return
#define atomic_dec_return_release atomic_dec_return
+#endif /* atomic_dec_return */
#else /* atomic_dec_return_relaxed */
@@ -328,12 +360,22 @@
#endif
#endif /* atomic_fetch_and_relaxed */
-#ifdef atomic_andnot
-/* atomic_fetch_andnot_relaxed */
+#ifndef atomic_andnot
+#define atomic_andnot(i, v) atomic_and(~(int)(i), (v))
+#endif
+
#ifndef atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
+
+#ifndef atomic_fetch_andnot
+#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v))
+#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v))
+#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v))
+#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v))
+#else /* atomic_fetch_andnot */
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot
+#define atomic_fetch_andnot_release atomic_fetch_andnot
+#endif /* atomic_fetch_andnot */
#else /* atomic_fetch_andnot_relaxed */
@@ -352,7 +394,6 @@
__atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic_fetch_andnot_relaxed */
-#endif /* atomic_andnot */
/* atomic_fetch_xor_relaxed */
#ifndef atomic_fetch_xor_relaxed
@@ -520,112 +561,140 @@
#endif /* xchg_relaxed */
/**
+ * atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic_fetch_add_unless
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#endif
+
+/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
*/
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline bool atomic_add_unless(atomic_t *v, int a, int u)
{
- return __atomic_add_unless(v, a, u) != u;
+ return atomic_fetch_add_unless(v, a, u) != u;
}
/**
* atomic_inc_not_zero - increment unless the number is zero
* @v: pointer of type atomic_t
*
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
*/
#ifndef atomic_inc_not_zero
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
-#ifndef atomic_andnot
-static inline void atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_inc_and_test
+static inline bool atomic_inc_and_test(atomic_t *v)
{
- return atomic_fetch_and_relaxed(~i, v);
+ return atomic_inc_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic_dec_and_test
+static inline bool atomic_dec_and_test(atomic_t *v)
{
- return atomic_fetch_and_acquire(~i, v);
+ return atomic_dec_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_sub_and_test
+static inline bool atomic_sub_and_test(int i, atomic_t *v)
{
- return atomic_fetch_and_release(~i, v);
+ return atomic_sub_return(i, v) == 0;
}
#endif
/**
- * atomic_inc_not_zero_hint - increment if not null
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
* @v: pointer of type atomic_t
- * @hint: probable value of the atomic before the increment
- *
- * This version of atomic_inc_not_zero() gives a hint of probable
- * value of the atomic. This helps processor to not read the memory
- * before doing the atomic read/modify/write cycle, lowering
- * number of bus transactions on some arches.
*
- * Returns: 0 if increment was not done, 1 otherwise.
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
*/
-#ifndef atomic_inc_not_zero_hint
-static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+#ifndef atomic_add_negative
+static inline bool atomic_add_negative(int i, atomic_t *v)
{
- int val, c = hint;
-
- /* sanity test, should be removed by compiler if hint is a constant */
- if (!hint)
- return atomic_inc_not_zero(v);
-
- do {
- val = atomic_cmpxchg(v, c, c + 1);
- if (val == c)
- return 1;
- c = val;
- } while (c);
-
- return 0;
+ return atomic_add_return(i, v) < 0;
}
#endif
#ifndef atomic_inc_unless_negative
-static inline int atomic_inc_unless_negative(atomic_t *p)
+static inline bool atomic_inc_unless_negative(atomic_t *v)
{
- int v, v1;
- for (v = 0; v >= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v + 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
}
#endif
#ifndef atomic_dec_unless_positive
-static inline int atomic_dec_unless_positive(atomic_t *p)
+static inline bool atomic_dec_unless_positive(atomic_t *v)
{
- int v, v1;
- for (v = 0; v <= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v - 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
}
#endif
@@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p)
#ifndef atomic_dec_if_positive
static inline int atomic_dec_if_positive(atomic_t *v)
{
- int c, old, dec;
- c = atomic_read(v);
- for (;;) {
+ int dec, c = atomic_read(v);
+
+ do {
dec = c - 1;
if (unlikely(dec < 0))
break;
- old = atomic_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
+ } while (!atomic_try_cmpxchg(v, &c, dec));
+
return dec;
}
#endif
@@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_add_return_relaxed */
+#ifndef atomic64_inc
+#define atomic64_inc(v) atomic64_add(1, (v))
+#endif
+
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_relaxed
+
+#ifndef atomic64_inc_return
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
+#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
+#else /* atomic64_inc_return */
#define atomic64_inc_return_relaxed atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#define atomic64_inc_return_release atomic64_inc_return
+#endif /* atomic64_inc_return */
#else /* atomic64_inc_return_relaxed */
@@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_sub_return_relaxed */
+#ifndef atomic64_dec
+#define atomic64_dec(v) atomic64_sub(1, (v))
+#endif
+
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_relaxed
+
+#ifndef atomic64_dec_return
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
+#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
+#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
+#else /* atomic64_dec_return */
#define atomic64_dec_return_relaxed atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#define atomic64_dec_return_release atomic64_dec_return
+#endif /* atomic64_dec_return */
#else /* atomic64_dec_return_relaxed */
@@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_fetch_and_relaxed */
-#ifdef atomic64_andnot
-/* atomic64_fetch_andnot_relaxed */
+#ifndef atomic64_andnot
+#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v))
+#endif
+
#ifndef atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+
+#ifndef atomic64_fetch_andnot
+#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v))
+#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v))
+#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v))
+#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v))
+#else /* atomic64_fetch_andnot */
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+#endif /* atomic64_fetch_andnot */
#else /* atomic64_fetch_andnot_relaxed */
@@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
__atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic64_fetch_andnot_relaxed */
-#endif /* atomic64_andnot */
/* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_relaxed
@@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
#endif /* atomic64_try_cmpxchg */
-#ifndef atomic64_andnot
-static inline void atomic64_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic64_fetch_add_unless
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- atomic64_and(~i, v);
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
}
+#endif
-static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
- return atomic64_fetch_and(~i, v);
+ return atomic64_fetch_add_unless(v, a, u) != u;
}
-static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
+/**
+ * atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+#ifndef atomic64_inc_not_zero
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#endif
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_inc_and_test
+static inline bool atomic64_inc_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_relaxed(~i, v);
+ return atomic64_inc_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic64_dec_and_test
+static inline bool atomic64_dec_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_acquire(~i, v);
+ return atomic64_dec_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_sub_and_test
+static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
+#endif
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#ifndef atomic64_add_negative
+static inline bool atomic64_add_negative(long long i, atomic64_t *v)
{
- return atomic64_fetch_and_release(~i, v);
+ return atomic64_add_return(i, v) < 0;
+}
+#endif
+
+#ifndef atomic64_inc_unless_negative
+static inline bool atomic64_inc_unless_negative(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#endif
+
+#ifndef atomic64_dec_unless_positive
+static inline bool atomic64_dec_unless_positive(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#endif
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic64 variable, v, was not decremented.
+ */
+#ifndef atomic64_dec_if_positive
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long dec, c = atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
}
#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 69c78477590b..9334fbef7bae 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -117,6 +117,9 @@ struct filename;
extern void audit_log_session_info(struct audit_buffer *ab);
+#define AUDIT_OFF 0
+#define AUDIT_ON 1
+#define AUDIT_LOCKED 2
#ifdef CONFIG_AUDIT
/* These are defined in audit.c */
/* Public API */
@@ -202,7 +205,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
static inline void audit_log_task_info(struct audit_buffer *ab,
struct task_struct *tsk)
{ }
-#define audit_enabled 0
+#define audit_enabled AUDIT_OFF
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
diff --git a/include/linux/bio.h b/include/linux/bio.h
index f08f5fe7bd08..51371740d2a8 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -429,7 +429,6 @@ extern void bio_put(struct bio *);
extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
-extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
extern struct bio_set fs_bio_set;
@@ -443,12 +442,6 @@ static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
}
-static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
-{
- return bio_clone_bioset(bio, gfp_mask, NULL);
-
-}
-
extern blk_qc_t submit_bio(struct bio *);
extern void bio_endio(struct bio *);
@@ -496,9 +489,9 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-void generic_start_io_acct(struct request_queue *q, int rw,
+void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part);
-void generic_end_io_acct(struct request_queue *q, int rw,
+void generic_end_io_acct(struct request_queue *q, int op,
struct hd_struct *part,
unsigned long start_time);
@@ -553,8 +546,16 @@ do { \
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
+#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
+int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
+#else
+static inline int bio_associate_blkcg_from_page(struct bio *bio,
+ struct page *page) { return 0; }
+#endif
+
#ifdef CONFIG_BLK_CGROUP
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
+int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
void bio_disassociate_task(struct bio *bio);
void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index cf2588d81148..65a6981eef7b 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -104,7 +104,7 @@
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
-extern void __compiletime_warning("value doesn't fit into mask")
+extern void __compiletime_error("value doesn't fit into mask")
__field_overflow(void);
extern void __compiletime_error("bad bitfield mask")
__bad_mask(void);
@@ -121,8 +121,8 @@ static __always_inline u64 field_mask(u64 field)
#define ____MAKE_OP(type,base,to,from) \
static __always_inline __##type type##_encode_bits(base v, base field) \
{ \
- if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
- __field_overflow(); \
+ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
+ __field_overflow(); \
return to((v & field_mask(field)) * field_multiplier(field)); \
} \
static __always_inline __##type type##_replace_bits(__##type old, \
@@ -143,6 +143,7 @@ static __always_inline base type##_get_bits(__##type v, base field) \
____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
____MAKE_OP(u##size,u##size,,)
+____MAKE_OP(u8,u8,,)
__MAKE_OP(16)
__MAKE_OP(32)
__MAKE_OP(64)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 4cac4e1a72ff..af419012d77d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -2,29 +2,9 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
+#include <linux/bits.h>
-#ifdef __KERNEL__
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
- (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
- (((~0ULL) - (1ULL << (l)) + 1) & \
- (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6c666fd7de3c..34aec30e06c7 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -35,6 +35,7 @@ enum blkg_rwstat_type {
BLKG_RWSTAT_WRITE,
BLKG_RWSTAT_SYNC,
BLKG_RWSTAT_ASYNC,
+ BLKG_RWSTAT_DISCARD,
BLKG_RWSTAT_NR,
BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
@@ -136,6 +137,12 @@ struct blkcg_gq {
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
struct rcu_head rcu_head;
+
+ atomic_t use_delay;
+ atomic64_t delay_nsec;
+ atomic64_t delay_start;
+ u64 last_delay;
+ int last_use;
};
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
@@ -148,6 +155,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
+typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
+ size_t size);
struct blkcg_policy {
int plid;
@@ -167,6 +176,7 @@ struct blkcg_policy {
blkcg_pol_offline_pd_fn *pd_offline_fn;
blkcg_pol_free_pd_fn *pd_free_fn;
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
+ blkcg_pol_stat_pd_fn *pd_stat_fn;
};
extern struct blkcg blkcg_root;
@@ -238,6 +248,42 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
return css_to_blkcg(task_css(current, io_cgrp_id));
}
+static inline bool blk_cgroup_congested(void)
+{
+ struct cgroup_subsys_state *css;
+ bool ret = false;
+
+ rcu_read_lock();
+ css = kthread_blkcg();
+ if (!css)
+ css = task_css(current, io_cgrp_id);
+ while (css) {
+ if (atomic_read(&css->cgroup->congestion_count)) {
+ ret = true;
+ break;
+ }
+ css = css->parent;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
+ * @return: true if this bio needs to be submitted with the root blkg context.
+ *
+ * In order to avoid priority inversions we sometimes need to issue a bio as if
+ * it were attached to the root blkg, and then backcharge to the actual owning
+ * blkg. The idea is we do bio_blkcg() to look up the actual context for the
+ * bio and attach the appropriate blkg to the bio. Then we call this helper and
+ * if it is true run with the root blkg for that queue and then do any
+ * backcharging to the originating cgroup once the io is complete.
+ */
+static inline bool bio_issue_as_root_blkg(struct bio *bio)
+{
+ return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
+}
+
/**
* blkcg_parent - get the parent of a blkcg
* @blkcg: blkcg of interest
@@ -296,6 +342,17 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
}
/**
+ * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for @q at the root level. See also blkg_lookup().
+ */
+static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
+{
+ return q->root_blkg;
+}
+
+/**
* blkg_to_pdata - get policy private data
* @blkg: blkg of interest
* @pol: policy of interest
@@ -355,6 +412,21 @@ static inline void blkg_get(struct blkcg_gq *blkg)
atomic_inc(&blkg->refcnt);
}
+/**
+ * blkg_try_get - try and get a blkg reference
+ * @blkg: blkg to get
+ *
+ * This is for use when doing an RCU lookup of the blkg. We may be in the midst
+ * of freeing this blkg, so we can only use it if the refcnt is not zero.
+ */
+static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
+{
+ if (atomic_inc_not_zero(&blkg->refcnt))
+ return blkg;
+ return NULL;
+}
+
+
void __blkg_release_rcu(struct rcu_head *rcu);
/**
@@ -589,7 +661,9 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
{
struct percpu_counter *cnt;
- if (op_is_write(op))
+ if (op_is_discard(op))
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
+ else if (op_is_write(op))
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
@@ -706,8 +780,14 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
if (!throtl) {
blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
- bio->bi_iter.bi_size);
+ /*
+ * If the bio is flagged with BIO_QUEUE_ENTERED it means this
+ * is a split bio and we would have already accounted for the
+ * size of the bio.
+ */
+ if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
+ blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
+ bio->bi_iter.bi_size);
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
@@ -715,6 +795,59 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
return !throtl;
}
+static inline void blkcg_use_delay(struct blkcg_gq *blkg)
+{
+ if (atomic_add_return(1, &blkg->use_delay) == 1)
+ atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
+}
+
+static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
+{
+ int old = atomic_read(&blkg->use_delay);
+
+ if (old == 0)
+ return 0;
+
+ /*
+ * We do this song and dance because we can race with somebody else
+ * adding or removing delay. If we just did an atomic_dec we'd end up
+ * negative and we'd already be in trouble. We need to subtract 1 and
+ * then check to see if we were the last delay so we can drop the
+ * congestion count on the cgroup.
+ */
+ while (old) {
+ int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
+ if (cur == old)
+ break;
+ old = cur;
+ }
+
+ if (old == 0)
+ return 0;
+ if (old == 1)
+ atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
+ return 1;
+}
+
+static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
+{
+ int old = atomic_read(&blkg->use_delay);
+ if (!old)
+ return;
+ /* We only want 1 person clearing the congestion count for this blkg. */
+ while (old) {
+ int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
+ if (cur == old) {
+ atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
+ break;
+ }
+ old = cur;
+ }
+}
+
+void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
+void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
+void blkcg_maybe_throttle_current(void);
#else /* CONFIG_BLK_CGROUP */
struct blkcg {
@@ -734,9 +867,16 @@ struct blkcg_policy {
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
+static inline void blkcg_maybe_throttle_current(void) { }
+static inline bool blk_cgroup_congested(void) { return false; }
+
#ifdef CONFIG_BLOCK
+static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
+
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
+{ return NULL; }
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ca3f2c2edd85..1da59c16f637 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -35,10 +35,12 @@ struct blk_mq_hw_ctx {
struct sbitmap ctx_map;
struct blk_mq_ctx *dispatch_from;
+ unsigned int dispatch_busy;
- struct blk_mq_ctx **ctxs;
unsigned int nr_ctx;
+ struct blk_mq_ctx **ctxs;
+ spinlock_t dispatch_wait_lock;
wait_queue_entry_t dispatch_wait;
atomic_t wait_index;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 3c4f390aea4b..f6dfb30737d8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -179,11 +179,9 @@ struct bio {
*/
struct io_context *bi_ioc;
struct cgroup_subsys_state *bi_css;
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- void *bi_cg_private;
+ struct blkcg_gq *bi_blkg;
struct bio_issue bi_issue;
#endif
-#endif
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
@@ -329,7 +327,7 @@ enum req_flag_bits {
/* for driver use */
__REQ_DRV,
-
+ __REQ_SWAP, /* swapping request. */
__REQ_NR_BITS, /* stops here */
};
@@ -351,6 +349,7 @@ enum req_flag_bits {
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_DRV (1ULL << __REQ_DRV)
+#define REQ_SWAP (1ULL << __REQ_SWAP)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -358,6 +357,14 @@ enum req_flag_bits {
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
+enum stat_group {
+ STAT_READ,
+ STAT_WRITE,
+ STAT_DISCARD,
+
+ NR_STAT_GROUPS
+};
+
#define bio_op(bio) \
((bio)->bi_opf & REQ_OP_MASK)
#define req_op(req) \
@@ -395,6 +402,18 @@ static inline bool op_is_sync(unsigned int op)
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}
+static inline bool op_is_discard(unsigned int op)
+{
+ return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
+}
+
+static inline int op_stat_group(unsigned int op)
+{
+ if (op_is_discard(op))
+ return STAT_DISCARD;
+ return op_is_write(op);
+}
+
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
#define BLK_QC_T_SHIFT 16
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 79226ca8f80f..d6869e0e2b64 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,8 +27,6 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
-#include <linux/seqlock.h>
-#include <linux/u64_stats_sync.h>
struct module;
struct scsi_ioctl_command;
@@ -42,7 +40,7 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
-struct rq_wb;
+struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
@@ -442,10 +440,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
- atomic_t shared_hctx_restart;
-
struct blk_queue_stats *stats;
- struct rq_wb *rq_wb;
+ struct rq_qos *rq_qos;
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
@@ -592,6 +588,7 @@ struct request_queue {
struct queue_limits limits;
+#ifdef CONFIG_BLK_DEV_ZONED
/*
* Zoned block device information for request dispatch control.
* nr_zones is the total number of zones of the device. This is always
@@ -612,6 +609,7 @@ struct request_queue {
unsigned int nr_zones;
unsigned long *seq_zones_bitmap;
unsigned long *seq_zones_wlock;
+#endif /* CONFIG_BLK_DEV_ZONED */
/*
* sg stuff
@@ -800,11 +798,7 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
-static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
-{
- return q->nr_zones;
-}
-
+#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
{
@@ -820,6 +814,7 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
return false;
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
}
+#endif /* CONFIG_BLK_DEV_ZONED */
static inline bool rq_is_sync(struct request *rq)
{
@@ -1070,6 +1065,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
}
+#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_rq_zone_no(struct request *rq)
{
return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
@@ -1079,6 +1075,7 @@ static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
{
return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
}
+#endif /* CONFIG_BLK_DEV_ZONED */
/*
* Some commands like WRITE SAME have a payload or data transfer size which
@@ -1437,8 +1434,6 @@ enum blk_default_limits {
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
-#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-
static inline unsigned long queue_segment_boundary(struct request_queue *q)
{
return q->limits.seg_boundary_mask;
@@ -1639,15 +1634,6 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
return 0;
}
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_nr_zones(q);
- return 0;
-}
-
static inline int queue_dma_alignment(struct request_queue *q)
{
return q ? q->dma_alignment : 511;
@@ -1877,6 +1863,28 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
bip_next->bip_vec[0].bv_offset);
}
+/**
+ * bio_integrity_intervals - Return number of integrity intervals for a bio
+ * @bi: blk_integrity profile for device
+ * @sectors: Size of the bio in 512-byte sectors
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the data integrity
+ * interval size of the storage device. Convert the block layer sectors
+ * to the appropriate number of integrity intervals.
+ */
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return sectors >> (bi->interval_exp - 9);
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+}
+
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
@@ -1950,12 +1958,24 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
return false;
}
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
unsigned int (*check_events) (struct gendisk *disk,
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 7942a96b1a9d..42515195d7d8 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -27,9 +27,20 @@ extern unsigned long max_pfn;
extern unsigned long long max_possible_pfn;
#ifndef CONFIG_NO_BOOTMEM
-/*
- * node_bootmem_map is a map pointer - the bits represent all physical
- * memory pages (including holes) on the node.
+/**
+ * struct bootmem_data - per-node information used by the bootmem allocator
+ * @node_min_pfn: the starting physical address of the node's memory
+ * @node_low_pfn: the end physical address of the directly addressable memory
+ * @node_bootmem_map: is a bitmap pointer - the bits represent all physical
+ * memory pages (including holes) on the node.
+ * @last_end_off: the offset within the page of the end of the last allocation;
+ * if 0, the page used is full
+ * @hint_idx: the PFN of the page used with the last allocation;
+ * together with using this with the @last_end_offset field,
+ * a test can be made to see if allocations can be merged
+ * with the page used for the last allocation rather than
+ * using up a full new page.
+ * @list: list entry in the linked list ordered by the memory addresses
*/
typedef struct bootmem_data {
unsigned long node_min_pfn;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index d50c2f0a655a..f91b0f8ff3a9 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -4,22 +4,46 @@
#include <linux/errno.h>
#include <linux/jump_label.h>
+#include <linux/percpu.h>
+#include <linux/rbtree.h>
#include <uapi/linux/bpf.h>
struct sock;
struct sockaddr;
struct cgroup;
struct sk_buff;
+struct bpf_map;
+struct bpf_prog;
struct bpf_sock_ops_kern;
+struct bpf_cgroup_storage;
#ifdef CONFIG_CGROUP_BPF
extern struct static_key_false cgroup_bpf_enabled_key;
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+DECLARE_PER_CPU(void*, bpf_cgroup_storage);
+
+struct bpf_cgroup_storage_map;
+
+struct bpf_storage_buffer {
+ struct rcu_head rcu;
+ char data[0];
+};
+
+struct bpf_cgroup_storage {
+ struct bpf_storage_buffer *buf;
+ struct bpf_cgroup_storage_map *map;
+ struct bpf_cgroup_storage_key key;
+ struct list_head list;
+ struct rb_node node;
+ struct rcu_head rcu;
+};
+
struct bpf_prog_list {
struct list_head node;
struct bpf_prog *prog;
+ struct bpf_cgroup_storage *storage;
};
struct bpf_prog_array;
@@ -77,6 +101,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum bpf_attach_type type);
+static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
+{
+ struct bpf_storage_buffer *buf;
+
+ if (!storage)
+ return;
+
+ buf = READ_ONCE(storage->buf);
+ this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
+}
+
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
+void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
+void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
+ struct cgroup *cgroup,
+ enum bpf_attach_type type);
+void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
+int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
+void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
@@ -221,6 +265,16 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
+static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
+static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
+ struct bpf_map *map) { return 0; }
+static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
+ struct bpf_map *map) {}
+static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
+ struct bpf_prog *prog) { return 0; }
+static inline void bpf_cgroup_storage_free(
+ struct bpf_cgroup_storage *storage) {}
+
#define cgroup_bpf_enabled (0)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8827e797ff97..523481a3471b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,7 +23,7 @@ struct bpf_prog;
struct bpf_map;
struct sock;
struct seq_file;
-struct btf;
+struct btf_type;
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
@@ -48,8 +48,9 @@ struct bpf_map_ops {
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
- int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf,
- u32 key_type_id, u32 value_type_id);
+ int (*map_check_btf)(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
};
struct bpf_map {
@@ -85,6 +86,7 @@ struct bpf_map {
char name[BPF_OBJ_NAME_LEN];
};
+struct bpf_offload_dev;
struct bpf_offloaded_map;
struct bpf_map_dev_ops {
@@ -117,9 +119,13 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
- return map->ops->map_seq_show_elem && map->ops->map_check_btf;
+ return map->btf && map->ops->map_seq_show_elem;
}
+int map_check_no_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
extern const struct bpf_map_ops bpf_map_offload_ops;
/* function argument constraints */
@@ -154,6 +160,7 @@ enum bpf_arg_type {
enum bpf_return_type {
RET_INTEGER, /* function returns integer */
RET_VOID, /* function doesn't return anything */
+ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
};
@@ -281,6 +288,7 @@ struct bpf_prog_aux {
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
+ struct bpf_map *cgroup_storage;
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
@@ -347,12 +355,17 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
* The 'struct bpf_prog_array *' should only be replaced with xchg()
* since other cpus are walking the array of pointers in parallel.
*/
+struct bpf_prog_array_item {
+ struct bpf_prog *prog;
+ struct bpf_cgroup_storage *cgroup_storage;
+};
+
struct bpf_prog_array {
struct rcu_head rcu;
- struct bpf_prog *progs[0];
+ struct bpf_prog_array_item items[0];
};
-struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
+struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
@@ -370,7 +383,8 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
({ \
- struct bpf_prog **_prog, *__prog; \
+ struct bpf_prog_array_item *_item; \
+ struct bpf_prog *_prog; \
struct bpf_prog_array *_array; \
u32 _ret = 1; \
preempt_disable(); \
@@ -378,10 +392,11 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
_array = rcu_dereference(array); \
if (unlikely(check_non_null && !_array))\
goto _out; \
- _prog = _array->progs; \
- while ((__prog = READ_ONCE(*_prog))) { \
- _ret &= func(__prog, ctx); \
- _prog++; \
+ _item = &_array->items[0]; \
+ while ((_prog = READ_ONCE(_item->prog))) { \
+ bpf_cgroup_storage_set(_item->cgroup_storage); \
+ _ret &= func(_prog, ctx); \
+ _item++; \
} \
_out: \
rcu_read_unlock(); \
@@ -434,6 +449,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
+int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
+void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
@@ -512,6 +529,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
}
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
+int array_map_alloc_check(union bpf_attr *attr);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
@@ -648,7 +666,15 @@ int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
void *key, void *next_key);
-bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
+bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
+
+struct bpf_offload_dev *bpf_offload_dev_create(void);
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
@@ -749,6 +775,33 @@ static inline void __xsk_map_flush(struct bpf_map *map)
}
#endif
+#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
+void bpf_sk_reuseport_detach(struct sock *sk);
+int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags);
+#else
+static inline void bpf_sk_reuseport_detach(struct sock *sk)
+{
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
+ void *key, void *value)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
+ void *key, void *value,
+ u64 map_flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -768,6 +821,8 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_local_storage_proto;
+
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index c5700c2d5549..cd26c090e7c0 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -29,6 +29,9 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
#ifdef CONFIG_BPF_LIRC_MODE2
BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
#endif
+#ifdef CONFIG_INET
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -37,6 +40,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
#ifdef CONFIG_CGROUPS
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
#endif
+#ifdef CONFIG_CGROUP_BPF
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
@@ -57,4 +63,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
#endif
+#ifdef CONFIG_INET
+BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
+#endif
#endif
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index daa9234a9baf..949e9af8d9d6 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -45,6 +45,7 @@
#define PHY_ID_BCM7445 0x600d8510
#define PHY_ID_BCM_CYGNUS 0xae025200
+#define PHY_ID_BCM_OMEGA 0xae025100
#define PHY_BCM_OUI_MASK 0xfffffc00
#define PHY_BCM_OUI_1 0x00206000
diff --git a/include/linux/build-salt.h b/include/linux/build-salt.h
new file mode 100644
index 000000000000..bb007bd05e7a
--- /dev/null
+++ b/include/linux/build-salt.h
@@ -0,0 +1,20 @@
+#ifndef __BUILD_SALT_H
+#define __BUILD_SALT_H
+
+#include <linux/elfnote.h>
+
+#define LINUX_ELFNOTE_BUILD_SALT 0x100
+
+#ifdef __ASSEMBLER__
+
+#define BUILD_SALT \
+ ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT)
+
+#else
+
+#define BUILD_SALT \
+ ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT)
+
+#endif
+
+#endif /* __BUILD_SALT_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 055aaf5ed9af..a83e1f632eb7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -143,7 +143,12 @@ u8 can_dlc2len(u8 can_dlc);
/* map the sanitized data length to an appropriate data length code */
u8 can_len2dlc(u8 len);
-struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs);
+#define alloc_candev(sizeof_priv, echo_skb_max) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1)
+#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count)
void free_candev(struct net_device *dev);
/* a candev safe wrapper around netdev_priv */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index e75dfd1f1dec..528271c60018 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -13,6 +13,7 @@
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/list.h>
+#include <scsi/scsi_common.h>
#include <uapi/linux/cdrom.h>
struct packet_command
@@ -21,7 +22,7 @@ struct packet_command
unsigned char *buffer;
unsigned int buflen;
int stat;
- struct request_sense *sense;
+ struct scsi_sense_hdr *sshdr;
unsigned char data_direction;
int quiet;
int timeout;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index c0e68f903011..ff20b677fb9f 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -438,6 +438,9 @@ struct cgroup {
/* used to store eBPF programs */
struct cgroup_bpf bpf;
+ /* If there is block congestion on this cgroup. */
+ atomic_t congestion_count;
+
/* ids of the ancestors at each level including self */
int ancestor_ids[];
};
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c9fdf6f57913..32c553556bbd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -554,6 +554,36 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
}
/**
+ * cgroup_ancestor - find ancestor of cgroup
+ * @cgrp: cgroup to find ancestor of
+ * @ancestor_level: level of ancestor to find starting from root
+ *
+ * Find ancestor of cgroup at specified level starting from root if it exists
+ * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
+ * @ancestor_level.
+ *
+ * This function is safe to call as long as @cgrp is accessible.
+ */
+static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
+ int ancestor_level)
+{
+ struct cgroup *ptr;
+
+ if (cgrp->level < ancestor_level)
+ return NULL;
+
+ for (ptr = cgrp;
+ ptr && ptr->level > ancestor_level;
+ ptr = cgroup_parent(ptr))
+ ;
+
+ if (ptr && ptr->level == ancestor_level)
+ return ptr;
+
+ return NULL;
+}
+
+/**
* task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
* @task: the task to be tested
* @ancestor: possible ancestor of @task's cgroup
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index b7cfa037e593..08b1aa70a38d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -38,6 +38,8 @@
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
/* parents need enable during gate/ungate, set rate and re-parent */
#define CLK_OPS_PARENT_ENABLE BIT(12)
+/* duty cycle call may be forwarded to the parent clock */
+#define CLK_DUTY_CYCLE_PARENT BIT(13)
struct clk;
struct clk_hw;
@@ -67,6 +69,17 @@ struct clk_rate_request {
};
/**
+ * struct clk_duty - Struture encoding the duty cycle ratio of a clock
+ *
+ * @num: Numerator of the duty cycle ratio
+ * @den: Denominator of the duty cycle ratio
+ */
+struct clk_duty {
+ unsigned int num;
+ unsigned int den;
+};
+
+/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
* through the clk_* api.
@@ -169,6 +182,15 @@ struct clk_rate_request {
* by the second argument. Valid values for degrees are
* 0-359. Return 0 on success, otherwise -EERROR.
*
+ * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio
+ * of a clock. Returned values denominator cannot be 0 and must be
+ * superior or equal to the numerator.
+ *
+ * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by
+ * the numerator (2nd argurment) and denominator (3rd argument).
+ * Argument must be a valid ratio (denominator > 0
+ * and >= numerator) Return 0 on success, otherwise -EERROR.
+ *
* @init: Perform platform-specific initialization magic.
* This is not not used by any of the basic clock types.
* Please consider other ways of solving initialization problems
@@ -218,6 +240,10 @@ struct clk_ops {
unsigned long parent_accuracy);
int (*get_phase)(struct clk_hw *hw);
int (*set_phase)(struct clk_hw *hw, int degrees);
+ int (*get_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
+ int (*set_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
void (*init)(struct clk_hw *hw);
void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
};
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0dbd0885b2c2..4f750c481b82 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -142,6 +142,27 @@ int clk_set_phase(struct clk *clk, int degrees);
int clk_get_phase(struct clk *clk);
/**
+ * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @num: numerator of the duty cycle ratio to be applied
+ * @den: denominator of the duty cycle ratio to be applied
+ *
+ * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
+ * success, -EERROR otherwise.
+ */
+int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
+
+/**
+ * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @scale: scaling factor to be applied to represent the ratio as an integer
+ *
+ * Returns the duty cycle ratio multiplied by the scale provided, otherwise
+ * returns -EERROR.
+ */
+int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
+
+/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
* @q: clk compared against p
@@ -183,6 +204,18 @@ static inline long clk_get_phase(struct clk *clk)
return -ENOTSUPP;
}
+static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
+ unsigned int den)
+{
+ return -ENOTSUPP;
+}
+
+static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
+ unsigned int scale)
+{
+ return 0;
+}
+
static inline bool clk_is_match(const struct clk *p, const struct clk *q)
{
return p == q;
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 7dff1963c185..308918928767 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -194,6 +194,9 @@ extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void
+clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
+extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index c68acc47da57..1a3c4f37e908 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -115,11 +115,6 @@ typedef compat_ulong_t compat_aio_context_t;
struct compat_sel_arg_struct;
struct rusage;
-struct compat_itimerspec {
- struct compat_timespec it_interval;
- struct compat_timespec it_value;
-};
-
struct compat_utimbuf {
compat_time_t actime;
compat_time_t modtime;
@@ -300,10 +295,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
extern int compat_put_timespec(const struct timespec *, void __user *);
extern int compat_get_timeval(struct timeval *, const void __user *);
extern int compat_put_timeval(const struct timeval *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits);
struct compat_iovec {
compat_uptr_t iov_base;
@@ -1028,6 +1019,17 @@ static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
return ctv;
}
+/*
+ * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz())
+ * directly. Instead, use one of the functions which work equivalently, such
+ * as the kcompat_sys_xyzyyz() functions prototyped below.
+ */
+
+int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
index 31f2774f1994..e70bfd1d2c3f 100644
--- a/include/linux/compat_time.h
+++ b/include/linux/compat_time.h
@@ -17,7 +17,16 @@ struct compat_timeval {
s32 tv_usec;
};
+struct compat_itimerspec {
+ struct compat_timespec it_interval;
+ struct compat_timespec it_value;
+};
+
extern int compat_get_timespec64(struct timespec64 *, const void __user *);
extern int compat_put_timespec64(const struct timespec64 *, void __user *);
+extern int get_compat_itimerspec64(struct itimerspec64 *its,
+ const struct compat_itimerspec __user *uits);
+extern int put_compat_itimerspec64(const struct itimerspec64 *its,
+ struct compat_itimerspec __user *uits);
#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index dfd6b0e97855..f59f3dbca65c 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -21,6 +21,7 @@ struct console_font_op;
struct console_font;
struct module;
struct tty_struct;
+struct notifier_block;
/*
* this is what the terminal answers to a ESC-Z or csi0c query.
@@ -220,4 +221,8 @@ static inline bool vgacon_text_force(void) { return false; }
extern void console_init(void);
+/* For deferred console takeover */
+void dummycon_register_output_notifier(struct notifier_block *nb);
+void dummycon_unregister_output_notifier(struct notifier_block *nb);
+
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 3233fbe23594..218df7f4d3e1 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -55,6 +55,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -103,6 +105,7 @@ extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
extern void cpus_read_lock(void);
extern void cpus_read_unlock(void);
+extern int cpus_read_trylock(void);
extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -115,6 +118,7 @@ static inline void cpus_write_lock(void) { }
static inline void cpus_write_unlock(void) { }
static inline void cpus_read_lock(void) { }
static inline void cpus_read_unlock(void) { }
+static inline int cpus_read_trylock(void) { return true; }
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
@@ -166,4 +170,23 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED,
+ CPU_SMT_DISABLED,
+ CPU_SMT_FORCE_DISABLED,
+ CPU_SMT_NOT_SUPPORTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_check_topology_early(void);
+extern void cpu_smt_check_topology(void);
+#else
+# define cpu_smt_control (CPU_SMT_ENABLED)
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_check_topology_early(void) { }
+static inline void cpu_smt_check_topology(void) { }
+#endif
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 8796ba387152..4cf06a64bc02 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -164,6 +164,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+ CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bf53d893ad02..147bdec42215 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -115,12 +115,17 @@ extern struct cpumask __cpu_active_mask;
#define cpu_active(cpu) ((cpu) == 0)
#endif
-/* verify cpu argument to cpumask_* operators */
-static inline unsigned int cpumask_check(unsigned int cpu)
+static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
- WARN_ON_ONCE(cpu >= nr_cpumask_bits);
+ WARN_ON_ONCE(cpu >= bits);
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+}
+
+/* verify cpu argument to cpumask_* operators */
+static inline unsigned int cpumask_check(unsigned int cpu)
+{
+ cpu_max_bits_warn(cpu, nr_cpumask_bits);
return cpu;
}
@@ -154,6 +159,13 @@ static inline unsigned int cpumask_next_and(int n,
return n+1;
}
+static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
+ int start, bool wrap)
+{
+ /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
+ return (wrap && n == 0);
+}
+
/* cpu must be a valid cpu, ie 0, so there's no other choice. */
static inline unsigned int cpumask_any_but(const struct cpumask *mask,
unsigned int cpu)
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
new file mode 100644
index 000000000000..62c4b7790a28
--- /dev/null
+++ b/include/linux/crc32poly.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CRC32_POLY_H
+#define _LINUX_CRC32_POLY_H
+
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRC32_POLY_LE 0xedb88320
+#define CRC32_POLY_BE 0x04c11db7
+
+/*
+ * This is the CRC32c polynomial, as outlined by Castagnoli.
+ * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
+ * x^8+x^6+x^0
+ */
+#define CRC32C_POLY_LE 0x82F63B78
+
+#endif /* _LINUX_CRC32_POLY_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 631286535d0f..7eed6101c791 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -65,6 +65,12 @@ extern void groups_free(struct group_info *);
extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
+extern int groups_search(const struct group_info *, kgid_t);
+
+extern int set_current_groups(struct group_info *);
+extern void set_groups(struct cred *, struct group_info *);
+extern bool may_setgroups(void);
+extern void groups_sort(struct group_info *);
#else
static inline void groups_free(struct group_info *group_info)
{
@@ -78,12 +84,11 @@ static inline int in_egroup_p(kgid_t grp)
{
return 1;
}
+static inline int groups_search(const struct group_info *group_info, kgid_t grp)
+{
+ return 1;
+}
#endif
-extern int set_current_groups(struct group_info *);
-extern void set_groups(struct cred *, struct group_info *);
-extern int groups_search(const struct group_info *, kgid_t);
-extern bool may_setgroups(void);
-extern void groups_sort(struct group_info *);
/*
* The security context of a task
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6eb06101089f..e8839d3a7559 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -113,6 +113,11 @@
#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
/*
+ * Don't trigger module loading
+ */
+#define CRYPTO_NOLOAD 0x00008000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 66c6e17e61e5..d32957b423d5 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -227,7 +227,6 @@ extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
-extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
@@ -271,8 +270,6 @@ extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
-extern void dentry_update_name_case(struct dentry *, const struct qstr *);
-
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
diff --git a/include/linux/device.h b/include/linux/device.h
index 055a69dbcd18..2a562f4ded07 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -90,7 +90,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @num_vf: Called to find out how many virtual functions a device on this
* bus supports.
* @dma_configure: Called to setup DMA configuration on a device on
- this bus.
+ * this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -384,6 +384,9 @@ int subsys_virtual_register(struct bus_type *subsys,
* @shutdown_pre: Called at shut-down time before driver shutdown.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
+ * @get_ownership: Allows class to specify uid/gid of the sysfs directories
+ * for the devices belonging to the class. Usually tied to
+ * device's namespace.
* @pm: The default device power management operations of this class.
* @p: The private data of the driver core, no one other than the
* driver core can touch this.
@@ -413,6 +416,8 @@ struct class {
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
+ void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
+
const struct dev_pm_ops *pm;
struct subsys_private *p;
@@ -784,14 +789,16 @@ enum device_link_state {
* Device link flags.
*
* STATELESS: The core won't track the presence of supplier/consumer drivers.
- * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
+ * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
* PM_RUNTIME: If set, the runtime PM framework will use this link.
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+ * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
*/
-#define DL_FLAG_STATELESS BIT(0)
-#define DL_FLAG_AUTOREMOVE BIT(1)
-#define DL_FLAG_PM_RUNTIME BIT(2)
-#define DL_FLAG_RPM_ACTIVE BIT(3)
+#define DL_FLAG_STATELESS BIT(0)
+#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
+#define DL_FLAG_PM_RUNTIME BIT(2)
+#define DL_FLAG_RPM_ACTIVE BIT(3)
+#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
/**
* struct device_link - Device link representation.
@@ -886,6 +893,8 @@ struct dev_links_info {
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
+ * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
+ * limit than the device itself supports.
* @dma_pfn_offset: offset of DMA memory range relatively of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
@@ -912,8 +921,6 @@ struct dev_links_info {
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
- * @dma_32bit_limit: bridge limited to 32bit DMA even if the device itself
- * indicates support for a higher limit in the dma_mask field.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -967,6 +974,7 @@ struct device {
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
+ u64 bus_dma_mask; /* upstream dma_mask constraint */
unsigned long dma_pfn_offset;
struct device_dma_parameters *dma_parms;
@@ -1002,7 +1010,6 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
- bool dma_32bit_limit:1;
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
@@ -1316,6 +1323,7 @@ extern const char *dev_driver_string(const struct device *dev);
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
+void device_link_remove(void *consumer, struct device *supplier);
#ifdef CONFIG_PRINTK
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 085db2fee2d7..58725f890b5b 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,12 +39,12 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @map_atomic: maps a page from the buffer into kernel address
+ * @map_atomic: [optional] maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
* This callback must not sleep.
* @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
* This Callback must not sleep.
- * @map: maps a page from the buffer into kernel address space.
+ * @map: [optional] maps a page from the buffer into kernel address space.
* @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
@@ -55,11 +55,11 @@ struct dma_buf_ops {
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
- * &device can access the provided &dma_buf. Exporters which support
- * buffer objects in special locations like VRAM or device-specific
- * carveout areas should check whether the buffer could be move to
- * system memory (or directly accessed by the provided device), and
- * otherwise need to fail the attach operation.
+ * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
+ * which support buffer objects in special locations like VRAM or
+ * device-specific carveout areas should check whether the buffer could
+ * be move to system memory (or directly accessed by the provided
+ * device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
* allocation fullfills the DMA constraints of the new device. If this
@@ -77,8 +77,7 @@ struct dma_buf_ops {
* to signal that backing storage is already allocated and incompatible
* with the requirements of requesting device.
*/
- int (*attach)(struct dma_buf *, struct device *,
- struct dma_buf_attachment *);
+ int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
/**
* @detach:
@@ -206,8 +205,6 @@ struct dma_buf_ops {
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*map_atomic)(struct dma_buf *, unsigned long);
- void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*map)(struct dma_buf *, unsigned long);
void (*unmap)(struct dma_buf *, unsigned long, void *);
@@ -395,8 +392,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
-void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h
index 3649a031893a..9c96e30e6a0b 100644
--- a/include/linux/dma-direction.h
+++ b/include/linux/dma-direction.h
@@ -1,14 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DMA_DIRECTION_H
#define _LINUX_DMA_DIRECTION_H
-/*
- * These definitions mirror those in pci.h, so they can be used
- * interchangeably with their PCI_ counterparts.
- */
+
enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
+
#endif
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index eb9b05aa5aea..02dba8cd033d 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -166,7 +166,8 @@ struct dma_fence_ops {
* released when the fence is signalled (through e.g. the interrupt
* handler).
*
- * This callback is mandatory.
+ * This callback is optional. If this callback is not present, then the
+ * driver must always have signaling enabled.
*/
bool (*enable_signaling)(struct dma_fence *fence);
@@ -190,11 +191,14 @@ struct dma_fence_ops {
/**
* @wait:
*
- * Custom wait implementation, or dma_fence_default_wait.
+ * Custom wait implementation, defaults to dma_fence_default_wait() if
+ * not set.
*
- * Must not be NULL, set to dma_fence_default_wait for default implementation.
- * the dma_fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
+ * The dma_fence_default_wait implementation should work for any fence, as long
+ * as @enable_signaling works correctly. This hook allows drivers to
+ * have an optimized version for the case where a process context is
+ * already available, e.g. if @enable_signaling for the general case
+ * needs to set up a worker thread.
*
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
@@ -202,7 +206,7 @@ struct dma_fence_ops {
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
*
- * This callback is mandatory.
+ * This callback is optional.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -218,17 +222,6 @@ struct dma_fence_ops {
void (*release)(struct dma_fence *fence);
/**
- * @fill_driver_data:
- *
- * Callback to fill in free-form debug info.
- *
- * Returns amount of bytes filled, or negative error on failure.
- *
- * This callback is optional.
- */
- int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
-
- /**
* @fence_value_str:
*
* Callback to fill in free-form debug info specific to this fence, like
@@ -242,8 +235,9 @@ struct dma_fence_ops {
* @timeline_value_str:
*
* Fills in the current value of the timeline as a string, like the
- * sequence number. This should match what @fill_driver_data prints for
- * the most recently signalled fence (assuming no delayed signalling).
+ * sequence number. Note that the specific fence passed to this function
+ * should not matter, drivers should only use it to look up the
+ * corresponding timeline structures.
*/
void (*timeline_value_str)(struct dma_fence *fence,
char *str, int size);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f9cc309507d9..1db6a6b46d0d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -538,10 +538,17 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
- WARN_ON(irqs_disabled());
if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
return;
+ /*
+ * On non-coherent platforms which implement DMA-coherent buffers via
+ * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
+ * this far in IRQ context is a) at risk of a BUG_ON() or trying to
+ * sleep on some machines, and b) an indication that the driver is
+ * probably misusing the coherent API anyway.
+ */
+ WARN_ON(irqs_disabled());
if (!ops->free || !cpu_addr)
return;
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 10b2654d549b..a0aa00cc909d 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -44,4 +44,12 @@ static inline void arch_sync_dma_for_cpu(struct device *dev,
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+void arch_sync_dma_for_cpu_all(struct device *dev);
+#else
+static inline void arch_sync_dma_for_cpu_all(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+
#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h
index e56ec7af4fd7..9fc594f69eff 100644
--- a/include/linux/dma/pxa-dma.h
+++ b/include/linux/dma/pxa-dma.h
@@ -9,6 +9,15 @@ enum pxad_chan_prio {
PXAD_PRIO_LOWEST,
};
+/**
+ * struct pxad_param - dma channel request parameters
+ * @drcmr: requestor line number
+ * @prio: minimal mandatory priority of the channel
+ *
+ * If a requested channel is granted, its priority will be at least @prio,
+ * ie. if PXAD_PRIO_LOW is required, the requested channel will be either
+ * PXAD_PRIO_LOW, PXAD_PRIO_NORMAL or PXAD_PRIO_HIGHEST.
+ */
struct pxad_param {
unsigned int drcmr;
enum pxad_chan_prio prio;
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 56add823f190..401e4b254e30 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
void *flush;
} efi_file_handle_t;
+typedef struct {
+ u64 revision;
+ u32 open_volume;
+} efi_file_io_interface_32_t;
+
+typedef struct {
+ u64 revision;
+ u64 open_volume;
+} efi_file_io_interface_64_t;
+
typedef struct _efi_file_io_interface {
u64 revision;
int (*open_volume)(struct _efi_file_io_interface *,
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
-extern void efi_late_init(void);
extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
extern void efi_find_mirror(void);
#else
-static inline void efi_late_init(void) {}
static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
extern int efi_tpm_eventlog_init(void);
+/* Workqueue to queue EFI Runtime Services */
+extern struct workqueue_struct *efi_rts_wq;
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 79563840c295..572e11bb8696 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
unsigned int rxqs);
#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
-struct sk_buff **eth_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
+struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);
/* Reserved Ethernet Addresses per IEEE 802.1Q */
diff --git a/include/linux/file.h b/include/linux/file.h
index 279720db984a..6b2fb032416c 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -17,9 +17,12 @@ extern void fput(struct file *);
struct file_operations;
struct vfsmount;
struct dentry;
+struct inode;
struct path;
-extern struct file *alloc_file(const struct path *, fmode_t mode,
- const struct file_operations *fop);
+extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
+ const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_clone(struct file *, int flags,
+ const struct file_operations *);
static inline void fput_light(struct file *file, int fput_needed)
{
@@ -78,7 +81,6 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
extern bool get_close_on_exec(unsigned int fd);
-extern void put_filp(struct file *);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c73dd7396886..5d565c50bcb2 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -32,6 +32,7 @@ struct seccomp_data;
struct bpf_prog_aux;
struct xdp_rxq_info;
struct xdp_buff;
+struct sock_reuseport;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -537,6 +538,20 @@ struct sk_msg_buff {
struct list_head list;
};
+struct bpf_redirect_info {
+ u32 ifindex;
+ u32 flags;
+ struct bpf_map *map;
+ struct bpf_map *map_to_flush;
+ unsigned long map_owner;
+ u32 kern_flags;
+};
+
+DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+
+/* flags for bpf_redirect_info kern_flags */
+#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+
/* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf,
* lwt, ...). Subsystems allowing direct data access must (!)
@@ -738,6 +753,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
+void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
@@ -765,6 +781,27 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+static inline bool xdp_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_set_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_clear_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
+}
+
static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
unsigned int pktlen)
{
@@ -798,6 +835,20 @@ void bpf_warn_invalid_xdp_action(u32 act);
struct sock *do_sk_redirect_map(struct sk_buff *skb);
struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
+#ifdef CONFIG_INET
+struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ u32 hash);
+#else
+static inline struct sock *
+bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ u32 hash)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
extern int bpf_jit_harden;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 805bf22898cf..1ec33fd0423f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -148,6 +148,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Has write method(s) */
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+#define FMODE_OPENED ((__force fmode_t)0x80000)
+#define FMODE_CREATED ((__force fmode_t)0x100000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -275,6 +278,7 @@ struct writeback_control;
/*
* Write life time hint values.
+ * Stored in struct inode as u8.
*/
enum rw_hint {
WRITE_LIFE_NOT_SET = 0,
@@ -609,8 +613,8 @@ struct inode {
struct timespec64 i_ctime;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
- unsigned int i_blkbits;
- enum rw_hint i_write_hint;
+ u8 i_blkbits;
+ u8 i_write_hint;
blkcnt_t i_blocks;
#ifdef __NEED_I_SIZE_ORDERED
@@ -685,6 +689,17 @@ static inline int inode_unhashed(struct inode *inode)
}
/*
+ * __mark_inode_dirty expects inodes to be hashed. Since we don't
+ * want special inodes in the fileset inode space, we make them
+ * appear hashed, but do not put on any lists. hlist_del()
+ * will work fine and require no locking.
+ */
+static inline void inode_fake_hash(struct inode *inode)
+{
+ hlist_add_fake(&inode->i_hash);
+}
+
+/*
* inode->i_mutex nesting subclasses for the lock validator:
*
* 0: the object of the current VFS operation
@@ -1776,7 +1791,7 @@ struct inode_operations {
int (*update_time)(struct inode *, struct timespec64 *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
+ umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
int (*set_acl)(struct inode *, struct posix_acl *, int);
} ____cacheline_aligned;
@@ -2014,6 +2029,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
* I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
* and work dirs among overlayfs mounts.
*
+ * I_CREATING New object's inode in the middle of setting up.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2034,7 +2051,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
#define __I_DIRTY_TIME_EXPIRED 12
#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
+#define I_OVL_INUSE (1 << 14)
+#define I_CREATING (1 << 15)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2420,7 +2438,10 @@ extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
-extern struct file *filp_clone_open(struct file *);
+static inline struct file *file_clone_open(struct file *file)
+{
+ return dentry_open(&file->f_path, file->f_flags, file->f_cred);
+}
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
@@ -2428,13 +2449,8 @@ extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
extern void putname(struct filename *name);
-enum {
- FILE_CREATED = 1,
- FILE_OPENED = 2
-};
extern int finish_open(struct file *file, struct dentry *dentry,
- int (*open)(struct inode *, struct file *),
- int *opened);
+ int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
/* fs/ioctl.c */
@@ -2622,8 +2638,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
-extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
- loff_t lend);
extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
@@ -2918,6 +2932,7 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
+extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index b462d9ea8007..c1f003aadcce 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -11,9 +11,8 @@
/*
* qoriq ptp registers
- * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
*/
-struct qoriq_ptp_registers {
+struct ctrl_regs {
u32 tmr_ctrl; /* Timer control register */
u32 tmr_tevent; /* Timestamp event register */
u32 tmr_temask; /* Timer event mask register */
@@ -28,22 +27,47 @@ struct qoriq_ptp_registers {
u8 res1[4];
u32 tmroff_h; /* Timer offset high */
u32 tmroff_l; /* Timer offset low */
- u8 res2[8];
+};
+
+struct alarm_regs {
u32 tmr_alarm1_h; /* Timer alarm 1 high register */
u32 tmr_alarm1_l; /* Timer alarm 1 high register */
u32 tmr_alarm2_h; /* Timer alarm 2 high register */
u32 tmr_alarm2_l; /* Timer alarm 2 high register */
- u8 res3[48];
+};
+
+struct fiper_regs {
u32 tmr_fiper1; /* Timer fixed period interval */
u32 tmr_fiper2; /* Timer fixed period interval */
u32 tmr_fiper3; /* Timer fixed period interval */
- u8 res4[20];
+};
+
+struct etts_regs {
u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
};
+struct qoriq_ptp_registers {
+ struct ctrl_regs __iomem *ctrl_regs;
+ struct alarm_regs __iomem *alarm_regs;
+ struct fiper_regs __iomem *fiper_regs;
+ struct etts_regs __iomem *etts_regs;
+};
+
+/* Offset definitions for the four register groups */
+#define CTRL_REGS_OFFSET 0x0
+#define ALARM_REGS_OFFSET 0x40
+#define FIPER_REGS_OFFSET 0x80
+#define ETTS_REGS_OFFSET 0xa0
+
+#define FMAN_CTRL_REGS_OFFSET 0x80
+#define FMAN_ALARM_REGS_OFFSET 0xb8
+#define FMAN_FIPER_REGS_OFFSET 0xd0
+#define FMAN_ETTS_REGS_OFFSET 0xe0
+
+
/* Bit definitions for the TMR_CTRL register */
#define ALM1P (1<<31) /* Alarm1 output polarity */
#define ALM2P (1<<30) /* Alarm2 output polarity */
@@ -103,12 +127,16 @@ struct qoriq_ptp_registers {
#define DRIVER "ptp_qoriq"
-#define DEFAULT_CKSEL 1
#define N_EXT_TS 2
-#define REG_SIZE sizeof(struct qoriq_ptp_registers)
+
+#define DEFAULT_CKSEL 1
+#define DEFAULT_TMR_PRSC 2
+#define DEFAULT_FIPER1_PERIOD 1000000000
+#define DEFAULT_FIPER2_PERIOD 100000
struct qoriq_ptp {
- struct qoriq_ptp_registers __iomem *regs;
+ void __iomem *base;
+ struct qoriq_ptp_registers regs;
spinlock_t lock; /* protects regs */
struct ptp_clock *clock;
struct ptp_clock_info caps;
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 4fe8f289b3f6..faebf0ca0686 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -45,7 +45,7 @@ struct fwnode_endpoint {
struct fwnode_reference_args {
struct fwnode_handle *fwnode;
unsigned int nargs;
- unsigned int args[NR_FWNODE_REFERENCE_ARGS];
+ u64 args[NR_FWNODE_REFERENCE_ARGS];
};
/**
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 6cb8a5789668..57864422a2c8 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
#include <linux/uuid.h>
+#include <linux/blk_types.h>
#ifdef CONFIG_BLOCK
@@ -82,10 +83,10 @@ struct partition {
} __attribute__((packed));
struct disk_stats {
- unsigned long sectors[2]; /* READs and WRITEs */
- unsigned long ios[2];
- unsigned long merges[2];
- unsigned long ticks[2];
+ unsigned long sectors[NR_STAT_GROUPS];
+ unsigned long ios[NR_STAT_GROUPS];
+ unsigned long merges[NR_STAT_GROUPS];
+ unsigned long ticks[NR_STAT_GROUPS];
unsigned long io_ticks;
unsigned long time_in_queue;
};
@@ -353,6 +354,11 @@ static inline void free_part_stats(struct hd_struct *part)
#endif /* CONFIG_SMP */
+#define part_stat_read_accum(part, field) \
+ (part_stat_read(part, field[STAT_READ]) + \
+ part_stat_read(part, field[STAT_WRITE]) + \
+ part_stat_read(part, field[STAT_DISCARD]))
+
#define part_stat_add(cpu, part, field, addnd) do { \
__part_stat_add((cpu), (part), field, addnd); \
if ((part)->partno) \
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 91ed23468530..39745b8bdd65 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -14,7 +14,7 @@
#include <linux/errno.h>
-/* see Documentation/gpio/gpio-legacy.txt */
+/* see Documentation/driver-api/gpio/legacy.rst */
/* make these flag values available regardless of GPIO kconfig options */
#define GPIOF_DIR_OUT (0 << 0)
diff --git a/include/linux/gpio/aspeed.h b/include/linux/gpio/aspeed.h
new file mode 100644
index 000000000000..1bfb3cdc86d0
--- /dev/null
+++ b/include/linux/gpio/aspeed.h
@@ -0,0 +1,15 @@
+#ifndef __GPIO_ASPEED_H
+#define __GPIO_ASPEED_H
+
+struct aspeed_gpio_copro_ops {
+ int (*request_access)(void *data);
+ int (*release_access)(void *data);
+};
+
+int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
+ u16 *vreg_offset, u16 *dreg_offset, u8 *bit);
+int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc);
+int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data);
+
+
+#endif /* __GPIO_ASPEED_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 243112c7fa7d..21ddbe440030 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -41,11 +41,8 @@ enum gpiod_flags {
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
GPIOD_FLAGS_BIT_DIR_VAL,
- GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
- GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_OPEN_DRAIN,
- GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
- GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_DIR_VAL |
- GPIOD_FLAGS_BIT_OPEN_DRAIN,
+ GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN,
+ GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN,
};
#ifdef CONFIG_GPIOLIB
@@ -145,6 +142,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
int gpiod_to_irq(const struct gpio_desc *desc);
+void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
@@ -467,6 +465,12 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
return -EINVAL;
}
+static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
return ERR_PTR(-EINVAL);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 5382b5183b7e..0ea328e71ec9 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -201,6 +201,8 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
* @reg_set: output set register (out=high) for generic GPIO
* @reg_clr: output clear register (out=low) for generic GPIO
* @reg_dir: direction setting register for generic GPIO
+ * @bgpio_dir_inverted: indicates that the direction register is inverted
+ * (gpiolib private state variable)
* @bgpio_bits: number of register bits used for a generic GPIO i.e.
* <register width> * 8
* @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
@@ -267,6 +269,7 @@ struct gpio_chip {
void __iomem *reg_set;
void __iomem *reg_clr;
void __iomem *reg_dir;
+ bool bgpio_dir_inverted;
int bgpio_bits;
spinlock_t bgpio_lock;
unsigned long bgpio_data;
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index e5fd2707b6df..9493d4a388db 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -93,6 +93,7 @@ enum hwmon_temp_attributes {
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
+#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm)
#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
#define HWMON_T_FAULT BIT(hwmon_temp_fault)
#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
@@ -187,12 +188,16 @@ enum hwmon_power_attributes {
hwmon_power_cap_hyst,
hwmon_power_cap_max,
hwmon_power_cap_min,
+ hwmon_power_min,
hwmon_power_max,
hwmon_power_crit,
+ hwmon_power_lcrit,
hwmon_power_label,
hwmon_power_alarm,
hwmon_power_cap_alarm,
+ hwmon_power_min_alarm,
hwmon_power_max_alarm,
+ hwmon_power_lcrit_alarm,
hwmon_power_crit_alarm,
};
@@ -213,12 +218,16 @@ enum hwmon_power_attributes {
#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
+#define HWMON_P_MIN BIT(hwmon_power_min)
#define HWMON_P_MAX BIT(hwmon_power_max)
+#define HWMON_P_LCRIT BIT(hwmon_power_lcrit)
#define HWMON_P_CRIT BIT(hwmon_power_crit)
#define HWMON_P_LABEL BIT(hwmon_power_label)
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
+#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm)
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
+#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
enum hwmon_energy_attributes {
@@ -389,4 +398,27 @@ devm_hwmon_device_register_with_info(struct device *dev,
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
+/**
+ * hwmon_is_bad_char - Is the char invalid in a hwmon name
+ * @ch: the char to be considered
+ *
+ * hwmon_is_bad_char() can be used to determine if the given character
+ * may not be used in a hwmon name.
+ *
+ * Returns true if the char is invalid, false otherwise.
+ */
+static inline bool hwmon_is_bad_char(const char ch)
+{
+ switch (ch) {
+ case '-':
+ case '*':
+ case ' ':
+ case '\t':
+ case '\n':
+ return true;
+ default:
+ return false;
+ }
+}
+
#endif
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 254cd34eeae2..465afb092fa7 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -140,9 +140,14 @@ extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
and probably just as fast.
Note that we use i2c_adapter here, because you do not need a specific
smbus adapter to call this function. */
-extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
- unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data *data);
+s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
+
+/* Unlocked flavor */
+s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
new file mode 100644
index 000000000000..bdc0293fb6cb
--- /dev/null
+++ b/include/linux/idle_inject.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ */
+#ifndef __IDLE_INJECT_H__
+#define __IDLE_INJECT_H__
+
+/* private idle injection device structure */
+struct idle_inject_device;
+
+struct idle_inject_device *idle_inject_register(struct cpumask *cpumask);
+
+void idle_inject_unregister(struct idle_inject_device *ii_dev);
+
+int idle_inject_start(struct idle_inject_device *ii_dev);
+
+void idle_inject_stop(struct idle_inject_device *ii_dev);
+
+void idle_inject_set_duration(struct idle_inject_device *ii_dev,
+ unsigned int run_duration_ms,
+ unsigned int idle_duration_ms);
+
+void idle_inject_get_duration(struct idle_inject_device *ii_dev,
+ unsigned int *run_duration_ms,
+ unsigned int *idle_duration_ms);
+#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 8fe7e4306816..9c03a7d5e400 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1433,11 +1433,13 @@ struct ieee80211_ht_operation {
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
/*
- * A-PMDU buffer sizes
- * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ * A-MPDU buffer sizes
+ * According to HT size varies from 8 to 64 frames
+ * HE adds the ability to have up to 256 frames.
*/
-#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
+#define IEEE80211_MAX_AMPDU_BUF 0x100
/* Spatial Multiplexing Power Save Modes (for capability) */
@@ -1539,6 +1541,106 @@ struct ieee80211_vht_operation {
__le16 basic_mcs_set;
} __packed;
+/**
+ * struct ieee80211_he_cap_elem - HE capabilities element
+ *
+ * This structure is the "HE capabilities element" fixed fields as
+ * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3
+ */
+struct ieee80211_he_cap_elem {
+ u8 mac_cap_info[5];
+ u8 phy_cap_info[9];
+} __packed;
+
+#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
+
+/**
+ * enum ieee80211_he_mcs_support - HE MCS support definitions
+ * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
+ * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the rx_mcs_*
+ * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_he_mcs_support {
+ IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
+ IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
+ IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
+ *
+ * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
+ * described in P802.11ax_D2.0 section 9.4.2.237.4
+ *
+ * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ */
+struct ieee80211_he_mcs_nss_supp {
+ __le16 rx_mcs_80;
+ __le16 tx_mcs_80;
+ __le16 rx_mcs_160;
+ __le16 tx_mcs_160;
+ __le16 rx_mcs_80p80;
+ __le16 tx_mcs_80p80;
+} __packed;
+
+/**
+ * struct ieee80211_he_operation - HE capabilities element
+ *
+ * This structure is the "HE operation element" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.238
+ */
+struct ieee80211_he_operation {
+ __le32 he_oper_params;
+ __le16 he_mcs_nss_set;
+ /* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */
+ u8 optional[0];
+} __packed;
+
+/**
+ * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ *
+ * This structure is the "MU AC Parameter Record" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.240
+ */
+struct ieee80211_he_mu_edca_param_ac_rec {
+ u8 aifsn;
+ u8 ecw_min_max;
+ u8 mu_edca_timer;
+} __packed;
+
+/**
+ * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ *
+ * This structure is the "MU EDCA Parameter Set element" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.240
+ */
+struct ieee80211_mu_edca_param_set {
+ u8 mu_qos_info;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_be;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
+} __packed;
/* 802.11ac VHT Capabilities */
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
@@ -1577,6 +1679,328 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+/* 802.11ax HE MAC capabilities */
+#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
+#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
+#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
+
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70
+
+/* Link adaptation is split between byte HE_MAC_CAP1 and
+ * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
+ * in which case the following values apply:
+ * 0 = No feedback.
+ * 1 = reserved.
+ * 2 = Unsolicited feedback.
+ * 3 = both
+ */
+#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
+
+#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
+#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
+#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04
+#define IEEE80211_HE_MAC_CAP2_BSR 0x08
+#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
+#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
+#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
+#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
+
+#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01
+#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
+#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
+
+/* The maximum length of an A-MDPU is defined by the combination of the Maximum
+ * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
+ * same field in the HE capabilities.
+ */
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18
+#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20
+#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
+#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
+
+#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
+#define IEEE80211_HE_MAC_CAP4_QTP 0x02
+#define IEEE80211_HE_MAC_CAP4_BQR 0x04
+#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
+#define IEEE80211_HE_MAC_CAP4_OPS 0x20
+#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
+
+/* 802.11ax HE PHY capabilities */
+#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
+
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
+#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
+#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
+#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
+/* Midamble RX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80
+
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01
+#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
+#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
+#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
+
+/* Note that the meaning of UL MU below is different between an AP and a non-AP
+ * sta, where in the AP case it indicates support for Rx and in the non-AP sta
+ * case it indicates support for Tx.
+ */
+#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
+#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
+
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
+#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40
+#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
+
+#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
+#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
+
+/* Minimal allowed value of Max STS under 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
+
+/* Minimal allowed value of Max STS above 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
+
+#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
+#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
+
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
+#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
+
+#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02
+#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
+#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
+#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
+
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
+#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20
+
+/* 802.11ax HE TX/RX MCS NSS Support */
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
+
+/* TX/RX HE MCS Support field Highest MCS subfield encoding */
+enum ieee80211_he_highest_mcs_supported_subfield_enc {
+ HIGHEST_MCS_SUPPORTED_MCS7 = 0,
+ HIGHEST_MCS_SUPPORTED_MCS8,
+ HIGHEST_MCS_SUPPORTED_MCS9,
+ HIGHEST_MCS_SUPPORTED_MCS10,
+ HIGHEST_MCS_SUPPORTED_MCS11,
+};
+
+/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
+static inline u8
+ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
+{
+ u8 count = 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ count += 4;
+
+ return count;
+}
+
+/* 802.11ax HE PPE Thresholds */
+#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
+#define IEEE80211_PPE_THRES_NSS_POS (0)
+#define IEEE80211_PPE_THRES_NSS_MASK (7)
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
+ (BIT(5) | BIT(6))
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
+#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
+
+/*
+ * Calculate 802.11ax HE capabilities IE PPE field size
+ * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
+ */
+static inline u8
+ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u8 n;
+
+ if ((phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+ return 0;
+
+ n = hweight8(ppe_thres_hdr &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
+ IEEE80211_PPE_THRES_NSS_POS));
+
+ /*
+ * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+ * total size.
+ */
+ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+ n = DIV_ROUND_UP(n, 8);
+
+ return n;
+}
+
+/* HE Operation defines */
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000
+#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
+#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
+
+/*
+ * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
+ * @he_oper_ie: byte data of the He Operations IE, stating from the the byte
+ * after the ext ID byte. It is assumed that he_oper_ie has at least
+ * sizeof(struct ieee80211_he_operation) bytes, checked already in
+ * ieee802_11_parse_elems_crc()
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_oper_size(const u8 *he_oper_ie)
+{
+ struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
+ u8 oper_len = sizeof(struct ieee80211_he_operation);
+ u32 he_oper_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_oper_ie)
+ return 0;
+
+ /* Calc required length */
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ oper_len += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP)
+ oper_len++;
+
+ /* Add the first byte (extension ID) to the total length */
+ oper_len++;
+
+ return oper_len;
+}
+
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -1992,6 +2416,11 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
WLAN_EID_EXT_FILS_NONCE = 13,
+ WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+ WLAN_EID_EXT_HE_CAPABILITY = 35,
+ WLAN_EID_EXT_HE_OPERATION = 36,
+ WLAN_EID_EXT_UORA = 37,
+ WLAN_EID_EXT_HE_MU_EDCA = 38,
};
/* Action category code */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index d95cae09dea0..ac42da56f7a2 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -74,6 +74,11 @@ struct team_port {
long mode_priv[0];
};
+static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
static inline bool team_port_enabled(struct team_port *port)
{
return port->index != -1;
@@ -84,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port)
return port->linkup && team_port_enabled(port);
}
+static inline bool team_port_dev_txable(const struct net_device *port_dev)
+{
+ struct team_port *port;
+ bool txable;
+
+ rcu_read_lock();
+ port = team_port_get_rcu(port_dev);
+ txable = port ? team_port_txable(port) : false;
+ rcu_read_unlock();
+
+ return txable;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline void team_netpoll_send_skb(struct team_port *port,
struct sk_buff *skb)
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e4647e0eb60..97914a2833d1 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -11,14 +11,16 @@
#define _LINUX_IMA_H
#include <linux/fs.h>
+#include <linux/security.h>
#include <linux/kexec.h>
struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask, int opened);
+extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern int ima_load_data(enum kernel_load_data_id id);
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
@@ -34,7 +36,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_file_check(struct file *file, int mask, int opened)
+static inline int ima_file_check(struct file *file, int mask)
{
return 0;
}
@@ -49,6 +51,11 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
+static inline int ima_load_data(enum kernel_load_data_id id)
+{
+ return 0;
+}
+
static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
{
return 0;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 27650f1bff3d..c759d1cbcedd 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -93,6 +93,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
+#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 858d3f4a2241..54c853ec2fd1 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -44,4 +44,17 @@ static inline void integrity_load_keys(void)
}
#endif /* CONFIG_INTEGRITY */
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+
+extern int integrity_kernel_module_request(char *kmod_name);
+
+#else
+
+static inline int integrity_kernel_module_request(char *kmod_name)
+{
+ return 0;
+}
+
+#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
+
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index a044a824da85..3555d54bf79a 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -2,6 +2,9 @@
#ifndef LINUX_IOMAP_H
#define LINUX_IOMAP_H 1
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/mm.h>
#include <linux/types.h>
struct address_space;
@@ -9,6 +12,7 @@ struct fiemap_extent_info;
struct inode;
struct iov_iter;
struct kiocb;
+struct page;
struct vm_area_struct;
struct vm_fault;
@@ -29,6 +33,7 @@ struct vm_fault;
*/
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
+#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */
/*
* Flags that only need to be reported for IOMAP_REPORT requests:
@@ -55,6 +60,16 @@ struct iomap {
u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
struct dax_device *dax_dev; /* dax_dev for dax operations */
+ void *inline_data;
+ void *private; /* filesystem private */
+
+ /*
+ * Called when finished processing a page in the mapping returned in
+ * this iomap. At least for now this is only supported in the buffered
+ * write path.
+ */
+ void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
+ struct page *page, struct iomap *iomap);
};
/*
@@ -86,8 +101,40 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
+/*
+ * Structure allocate for each page when block size < PAGE_SIZE to track
+ * sub-page uptodate status and I/O completions.
+ */
+struct iomap_page {
+ atomic_t read_count;
+ atomic_t write_count;
+ DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
+};
+
+static inline struct iomap_page *to_iomap_page(struct page *page)
+{
+ if (page_has_private(page))
+ return (struct iomap_page *)page_private(page);
+ return NULL;
+}
+
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
+int iomap_readpage(struct page *page, const struct iomap_ops *ops);
+int iomap_readpages(struct address_space *mapping, struct list_head *pages,
+ unsigned nr_pages, const struct iomap_ops *ops);
+int iomap_set_page_dirty(struct page *page);
+int iomap_is_partially_uptodate(struct page *page, unsigned long from,
+ unsigned long count);
+int iomap_releasepage(struct page *page, gfp_t gfp_mask);
+void iomap_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int len);
+#ifdef CONFIG_MIGRATION
+int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
+#else
+#define iomap_migrate_page NULL
+#endif
int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 6cc2df7f7ac9..e1c9eea6015b 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -4,7 +4,7 @@
#include <linux/spinlock.h>
#include <linux/uidgid.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
#include <linux/refcount.h>
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index b5630c8eb2f3..6cea726612b7 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -9,7 +9,7 @@
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/refcount.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
struct user_namespace;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index cbb872c1b607..9d2ea3e907d0 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -73,6 +73,7 @@
#define GICD_TYPER_MBIS (1U << 16)
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
@@ -576,8 +577,8 @@ struct rdists {
phys_addr_t phys_base;
} __percpu *rdist;
struct page *prop_page;
- int id_bits;
u64 flags;
+ u32 gicd_typer;
bool has_vlpis;
bool has_direct_lpi;
};
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b46b541c67c4..1a0b6f17a5d6 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -299,12 +299,18 @@ struct static_key_false {
#define DEFINE_STATIC_KEY_TRUE(name) \
struct static_key_true name = STATIC_KEY_TRUE_INIT
+#define DEFINE_STATIC_KEY_TRUE_RO(name) \
+ struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
+
#define DECLARE_STATIC_KEY_TRUE(name) \
extern struct static_key_true name
#define DEFINE_STATIC_KEY_FALSE(name) \
struct static_key_false name = STATIC_KEY_FALSE_INIT
+#define DEFINE_STATIC_KEY_FALSE_RO(name) \
+ struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
+
#define DECLARE_STATIC_KEY_FALSE(name) \
extern struct static_key_false name
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index ab25c8b6d9e3..814643f7ee52 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -15,6 +15,7 @@
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
+#include <linux/uidgid.h>
#include <linux/wait.h>
struct file;
@@ -325,12 +326,14 @@ void kernfs_destroy_root(struct kernfs_root *root);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
void *priv, const void *ns);
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
const char *name);
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
- const char *name,
- umode_t mode, loff_t size,
+ const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
+ loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key);
@@ -415,12 +418,14 @@ static inline void kernfs_destroy_root(struct kernfs_root *root) { }
static inline struct kernfs_node *
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, void *priv, const void *ns)
+ umode_t mode, kuid_t uid, kgid_t gid,
+ void *priv, const void *ns)
{ return ERR_PTR(-ENOSYS); }
static inline struct kernfs_node *
__kernfs_create_file(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size, const struct kernfs_ops *ops,
void *priv, const void *ns, struct lock_class_key *key)
{ return ERR_PTR(-ENOSYS); }
@@ -498,12 +503,15 @@ static inline struct kernfs_node *
kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
void *priv)
{
- return kernfs_create_dir_ns(parent, name, mode, priv, NULL);
+ return kernfs_create_dir_ns(parent, name, mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ priv, NULL);
}
static inline struct kernfs_node *
kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size, const struct kernfs_ops *ops,
void *priv, const void *ns)
{
struct lock_class_key *key = NULL;
@@ -511,15 +519,17 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
#ifdef CONFIG_DEBUG_LOCK_ALLOC
key = (struct lock_class_key *)&ops->lockdep_key;
#endif
- return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
- key);
+ return __kernfs_create_file(parent, name, mode, uid, gid,
+ size, ops, priv, ns, key);
}
static inline struct kernfs_node *
kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
loff_t size, const struct kernfs_ops *ops, void *priv)
{
- return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL);
+ return kernfs_create_file_ns(parent, name, mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ size, ops, priv, NULL);
}
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 7f6f93c3df9c..b49ff230beba 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -26,6 +26,7 @@
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
+#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
#define UEVENT_NUM_ENVP 32 /* number of env pointers */
@@ -114,6 +115,8 @@ extern struct kobject * __must_check kobject_get_unless_zero(
extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
+extern void kobject_get_ownership(struct kobject *kobj,
+ kuid_t *uid, kgid_t *gid);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
struct kobj_type {
@@ -122,6 +125,7 @@ struct kobj_type {
struct attribute **default_attrs;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
+ void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};
struct kobj_uevent_env {
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9440a2fc8893..e909413e4e38 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -63,7 +63,6 @@ struct pt_regs;
struct kretprobe;
struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
-typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
@@ -101,12 +100,6 @@ struct kprobe {
*/
kprobe_fault_handler_t fault_handler;
- /*
- * ... called if breakpoint trap occurs in probe handler.
- * Return 1 if it handled break, otherwise kernel will see it.
- */
- kprobe_break_handler_t break_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
@@ -155,24 +148,6 @@ static inline int kprobe_ftrace(struct kprobe *p)
}
/*
- * Special probe type that uses setjmp-longjmp type tricks to resume
- * execution at a specified entry with a matching prototype corresponding
- * to the probed function - a trick to enable arguments to become
- * accessible seamlessly by probe handling logic.
- * Note:
- * Because of the way compilers allocate stack space for local variables
- * etc upfront, regardless of sub-scopes within a function, this mirroring
- * principle currently works only for probes placed on function entry points.
- */
-struct jprobe {
- struct kprobe kp;
- void *entry; /* probe handling code to jump to */
-};
-
-/* For backward compatibility with old code using JPROBE_ENTRY() */
-#define JPROBE_ENTRY(handler) (handler)
-
-/*
* Function-return probe -
* Note:
* User needs to provide a handler function, and initialize maxactive.
@@ -389,9 +364,6 @@ int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
-int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-void jprobe_return(void);
unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
@@ -439,9 +411,6 @@ static inline void unregister_kprobe(struct kprobe *p)
static inline void unregister_kprobes(struct kprobe **kps, int num)
{
}
-static inline void jprobe_return(void)
-{
-}
static inline int register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
@@ -468,20 +437,6 @@ static inline int enable_kprobe(struct kprobe *kp)
return -ENOSYS;
}
#endif /* CONFIG_KPROBES */
-static inline int register_jprobe(struct jprobe *p)
-{
- return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
- return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -490,14 +445,6 @@ static inline int enable_kretprobe(struct kretprobe *rp)
{
return enable_kprobe(&rp->kp);
}
-static inline int disable_jprobe(struct jprobe *jp)
-{
- return -ENOSYS;
-}
-static inline int enable_jprobe(struct jprobe *jp)
-{
- return -ENOSYS;
-}
#ifndef CONFIG_KPROBES
static inline bool is_kprobe_insn_slot(unsigned long addr)
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 5b9fddbaac41..b2bb44f87f5a 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -93,8 +93,11 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
#define ktime_to_timeval(kt) ns_to_timeval((kt))
-/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
-#define ktime_to_ns(kt) (kt)
+/* Convert ktime_t to nanoseconds */
+static inline s64 ktime_to_ns(const ktime_t kt)
+{
+ return kt;
+}
/**
* ktime_compare - Compares two ktime_t variables for less, greater or equal
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b7e82550e655..834683d603f9 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -253,7 +253,7 @@ static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
struct led_trigger {
/* Trigger Properties */
const char *name;
- void (*activate)(struct led_classdev *led_cdev);
+ int (*activate)(struct led_classdev *led_cdev);
void (*deactivate)(struct led_classdev *led_cdev);
/* LEDs under control by this trigger (for simple triggers) */
@@ -262,8 +262,19 @@ struct led_trigger {
/* Link to next registered trigger */
struct list_head next_trig;
+
+ const struct attribute_group **groups;
};
+/*
+ * Currently the attributes in struct led_trigger::groups are added directly to
+ * the LED device. As this might change in the future, the following
+ * macros abstract getting the LED device and its trigger_data from the dev
+ * parameter passed to the attribute accessor functions.
+ */
+#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev)))
+#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev)))
+
ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
@@ -288,10 +299,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_off,
int invert);
extern void led_trigger_set_default(struct led_classdev *led_cdev);
-extern void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger);
+extern int led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger);
extern void led_trigger_remove(struct led_classdev *led_cdev);
+static inline void led_set_trigger_data(struct led_classdev *led_cdev,
+ void *trigger_data)
+{
+ led_cdev->trigger_data = trigger_data;
+}
+
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
return led_cdev->trigger_data;
@@ -315,6 +332,10 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
extern void led_trigger_rename_static(const char *name,
struct led_trigger *trig);
+#define module_led_trigger(__led_trigger) \
+ module_driver(__led_trigger, led_trigger_register, \
+ led_trigger_unregister)
+
#else
/* Trigger has no members */
@@ -334,9 +355,14 @@ static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_off,
int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
-static inline void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger) {}
+static inline int led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger)
+{
+ return 0;
+}
+
static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
+static inline void led_set_trigger_data(struct led_classdev *led_cdev) {}
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
return NULL;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 32f247cb5e9e..bc4f87cbe7f4 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1111,6 +1111,8 @@ extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_host_get(struct ata_host *host);
+extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
struct scsi_host_template *sht);
diff --git a/include/linux/list.h b/include/linux/list.h
index 4b129df4d46b..de04cc5ed536 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list,
__list_cut_position(list, head, entry);
}
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list. You should pass
+ * in @entry an element you know is on @head. @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ if (head->next == entry) {
+ INIT_LIST_HEAD(list);
+ return;
+ }
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry->prev;
+ list->prev->next = list;
+ head->next = entry;
+ entry->prev = head;
+}
+
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 8f1131c8dd54..97a020c616ad 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -576,6 +576,10 @@
* userspace to load a kernel module with the given name.
* @kmod_name name of the module requested by the kernel
* Return 0 if successful.
+ * @kernel_load_data:
+ * Load data provided by userspace.
+ * @id kernel load data identifier
+ * Return 0 if permission is granted.
* @kernel_read_file:
* Read a file specified by userspace.
* @file contains the file structure pointing to the file being read
@@ -1569,7 +1573,7 @@ union security_list_options {
int (*file_send_sigiotask)(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int (*file_receive)(struct file *file);
- int (*file_open)(struct file *file, const struct cred *cred);
+ int (*file_open)(struct file *file);
int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
void (*task_free)(struct task_struct *task);
@@ -1582,6 +1586,7 @@ union security_list_options {
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*kernel_module_request)(char *kmod_name);
+ int (*kernel_load_data)(enum kernel_load_data_id id);
int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -1872,6 +1877,7 @@ struct security_hook_heads {
struct hlist_head cred_getsecid;
struct hlist_head kernel_act_as;
struct hlist_head kernel_create_files_as;
+ struct hlist_head kernel_load_data;
struct hlist_head kernel_read_file;
struct hlist_head kernel_post_read_file;
struct hlist_head kernel_module_request;
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
new file mode 100644
index 000000000000..ccb73422c2fa
--- /dev/null
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ */
+
+#ifndef __MTK_CMDQ_MAILBOX_H__
+#define __MTK_CMDQ_MAILBOX_H__
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define CMDQ_INST_SIZE 8 /* instruction is 64-bit */
+#define CMDQ_SUBSYS_SHIFT 16
+#define CMDQ_OP_CODE_SHIFT 24
+#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
+
+#define CMDQ_WFE_UPDATE BIT(31)
+#define CMDQ_WFE_WAIT BIT(15)
+#define CMDQ_WFE_WAIT_VALUE 0x1
+
+/*
+ * CMDQ_CODE_MASK:
+ * set write mask
+ * format: op mask
+ * CMDQ_CODE_WRITE:
+ * write value into target register
+ * format: op subsys address value
+ * CMDQ_CODE_JUMP:
+ * jump by offset
+ * format: op offset
+ * CMDQ_CODE_WFE:
+ * wait for event and clear
+ * it is just clear if no wait
+ * format: [wait] op event update:1 to_wait:1 wait:1
+ * [clear] op event update:1 to_wait:0 wait:0
+ * CMDQ_CODE_EOC:
+ * end of command
+ * format: op irq_flag
+ */
+enum cmdq_code {
+ CMDQ_CODE_MASK = 0x02,
+ CMDQ_CODE_WRITE = 0x04,
+ CMDQ_CODE_JUMP = 0x10,
+ CMDQ_CODE_WFE = 0x20,
+ CMDQ_CODE_EOC = 0x40,
+};
+
+enum cmdq_cb_status {
+ CMDQ_CB_NORMAL = 0,
+ CMDQ_CB_ERROR
+};
+
+struct cmdq_cb_data {
+ enum cmdq_cb_status sta;
+ void *data;
+};
+
+typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data);
+
+struct cmdq_task_cb {
+ cmdq_async_flush_cb cb;
+ void *data;
+};
+
+struct cmdq_pkt {
+ void *va_base;
+ dma_addr_t pa_base;
+ size_t cmd_buf_size; /* command occupied size */
+ size_t buf_size; /* real buffer size */
+ struct cmdq_task_cb cb;
+ struct cmdq_task_cb async_cb;
+ void *cl;
+};
+
+#endif /* __MTK_CMDQ_MAILBOX_H__ */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ca59883c8364..516920549378 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -20,31 +20,60 @@
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_PHYSMEM_REGIONS 4
-/* Definition of memblock flags. */
-enum {
+/**
+ * enum memblock_flags - definition of memory region attributes
+ * @MEMBLOCK_NONE: no special request
+ * @MEMBLOCK_HOTPLUG: hotpluggable region
+ * @MEMBLOCK_MIRROR: mirrored region
+ * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
+ */
+enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
};
+/**
+ * struct memblock_region - represents a memory region
+ * @base: physical address of the region
+ * @size: size of the region
+ * @flags: memory region attributes
+ * @nid: NUMA node id
+ */
struct memblock_region {
phys_addr_t base;
phys_addr_t size;
- unsigned long flags;
+ enum memblock_flags flags;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int nid;
#endif
};
+/**
+ * struct memblock_type - collection of memory regions of certain type
+ * @cnt: number of regions
+ * @max: size of the allocated array
+ * @total_size: size of all regions
+ * @regions: array of regions
+ * @name: the memory type symbolic name
+ */
struct memblock_type {
- unsigned long cnt; /* number of regions */
- unsigned long max; /* size of the allocated array */
- phys_addr_t total_size; /* size of all regions */
+ unsigned long cnt;
+ unsigned long max;
+ phys_addr_t total_size;
struct memblock_region *regions;
char *name;
};
+/**
+ * struct memblock - memblock allocator metadata
+ * @bottom_up: is bottom up direction?
+ * @current_limit: physical address of the current allocation limit
+ * @memory: usabe memory regions
+ * @reserved: reserved memory regions
+ * @physmem: all physical memory
+ */
struct memblock {
bool bottom_up; /* is bottom up direction? */
phys_addr_t current_limit;
@@ -72,7 +101,7 @@ void memblock_discard(void);
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- int nid, ulong flags);
+ int nid, enum memblock_flags flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void);
@@ -89,19 +118,19 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
-ulong choose_memblock_flags(void);
+enum memblock_flags choose_memblock_flags(void);
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
- int nid, unsigned long flags);
+ int nid, enum memblock_flags flags);
-void __next_mem_range(u64 *idx, int nid, ulong flags,
+void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
@@ -239,7 +268,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
/**
* for_each_resv_unavail_range - iterate through reserved and unavailable memory
* @i: u64 used as loop variable
- * @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
*
@@ -253,13 +281,13 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
static inline void memblock_set_region_flags(struct memblock_region *r,
- unsigned long flags)
+ enum memblock_flags flags)
{
r->flags |= flags;
}
static inline void memblock_clear_region_flags(struct memblock_region *r,
- unsigned long flags)
+ enum memblock_flags flags)
{
r->flags &= ~flags;
}
@@ -317,10 +345,10 @@ static inline bool memblock_bottom_up(void)
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- ulong flags);
+ enum memblock_flags flags);
phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t max_addr,
- int nid, ulong flags);
+ int nid, enum memblock_flags flags);
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
@@ -367,8 +395,10 @@ phys_addr_t memblock_get_current_limit(void);
*/
/**
- * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
+ * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the memory region
*/
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
{
@@ -376,8 +406,10 @@ static inline unsigned long memblock_region_memory_base_pfn(const struct membloc
}
/**
- * memblock_region_memory_end_pfn - Return the end_pfn this region
+ * memblock_region_memory_end_pfn - get the end pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
{
@@ -385,8 +417,10 @@ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock
}
/**
- * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
+ * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the reserved region
*/
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
{
@@ -394,8 +428,10 @@ static inline unsigned long memblock_region_reserved_base_pfn(const struct membl
}
/**
- * memblock_region_reserved_end_pfn - Return the end_pfn this region
+ * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c6fb116e925..680d3395fc83 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -317,6 +317,9 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
bool compound);
+int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp,
+ bool compound);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare, bool compound);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
@@ -789,6 +792,16 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
return 0;
}
+static inline int mem_cgroup_try_charge_delay(struct page *page,
+ struct mm_struct *mm,
+ gfp_t gfp_mask,
+ struct mem_cgroup **memcgp,
+ bool compound)
+{
+ *memcgp = NULL;
+ return 0;
+}
+
static inline void mem_cgroup_commit_charge(struct page *page,
struct mem_cgroup *memcg,
bool lrucare, bool compound)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 122e7e9d3091..dca6ab4eaa99 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -630,6 +630,7 @@ struct mlx4_caps {
u32 vf_caps;
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
};
struct mlx4_buf_list {
@@ -851,6 +852,12 @@ struct mlx4_vf_dev {
u8 n_ports;
};
+struct mlx4_fw_crdump {
+ bool snapshot_enable;
+ struct devlink_region *region_crspace;
+ struct devlink_region *region_fw_health;
+};
+
enum mlx4_pci_status {
MLX4_PCI_STATUS_DISABLED,
MLX4_PCI_STATUS_ENABLED,
@@ -871,6 +878,7 @@ struct mlx4_dev_persistent {
u8 interface_state;
struct mutex pci_status_mutex; /* sync pci state */
enum mlx4_pci_status pci_status;
+ struct mlx4_fw_crdump crdump;
};
struct mlx4_dev {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0566c6a94805..11fa4e66afc5 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -332,6 +332,13 @@ enum mlx5_event {
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
+
+ MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
+};
+
+enum {
+ MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
+ MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
};
enum {
@@ -939,9 +946,9 @@ enum {
};
enum {
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
- MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
- MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
+ MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLX5_VPORT_ADMIN_STATE_UP = 0x1,
+ MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
};
enum {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 3723f6d4e2b7..7a452716de4b 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -817,6 +817,9 @@ struct mlx5_clock {
struct mlx5_pps pps_info;
};
+struct mlx5_fw_tracer;
+struct mlx5_vxlan;
+
struct mlx5_core_dev {
struct pci_dev *pdev;
/* sync pci state */
@@ -848,6 +851,7 @@ struct mlx5_core_dev {
atomic_t num_qps;
u32 issi;
struct mlx5e_resources mlx5e_res;
+ struct mlx5_vxlan *vxlan;
struct {
struct mlx5_rsvd_gids reserved_gids;
u32 roce_en;
@@ -861,6 +865,7 @@ struct mlx5_core_dev {
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct page *clock_info_page;
+ struct mlx5_fw_tracer *tracer;
};
struct mlx5_db {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index af0592400499..804516e4f483 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -178,7 +178,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
- int dest_num);
+ int num_dest);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index c2a5b480b156..f043d65b9bac 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -672,7 +672,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp[0x1];
u8 swp_csum[0x1];
u8 swp_lso[0x1];
- u8 reserved_at_23[0x1b];
+ u8 reserved_at_23[0xd];
+ u8 max_vxlan_udp_ports[0x8];
+ u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
u8 tunnel_stateless_geneve_rx[0x1];
@@ -1135,7 +1137,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 general_obj_types[0x40];
- u8 reserved_at_440[0x40];
+ u8 reserved_at_440[0x20];
+
+ u8 reserved_at_460[0x10];
+ u8 max_num_eqs[0x10];
u8 reserved_at_480[0x3];
u8 log_max_l2_table[0x5];
@@ -3764,8 +3769,8 @@ struct mlx5_ifc_query_vport_state_out_bits {
};
enum {
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
};
struct mlx5_ifc_query_vport_state_in_bits {
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 64d0f40d4cc3..37e065a80a43 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa {
enum fpga_tls_cmds {
CMD_SETUP_STREAM = 0x1001,
CMD_TEARDOWN_STREAM = 0x1002,
+ CMD_RESYNC_RX = 0x1003,
};
#define MLX5_TLS_1_2 (0)
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 9208cb8809ac..7e7c6dfcfb09 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -43,8 +43,6 @@ enum {
};
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 99ce070e7dcb..efdc24dd9e97 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -335,176 +335,183 @@ struct core_state {
struct kioctx_table;
struct mm_struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ struct {
+ struct vm_area_struct *mmap; /* list of VMAs */
+ struct rb_root mm_rb;
+ u32 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
- unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
#endif
- unsigned long mmap_base; /* base of mmap area */
- unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
- unsigned long mmap_compat_base;
- unsigned long mmap_compat_legacy_base;
+ /* Base adresses for compatible mmap() */
+ unsigned long mmap_compat_base;
+ unsigned long mmap_compat_legacy_base;
#endif
- unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
- pgd_t * pgd;
-
- /**
- * @mm_users: The number of users including userspace.
- *
- * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
- * to 0 (i.e. when the task exits and there are no other temporary
- * reference holders), we also release a reference on @mm_count
- * (which may then free the &struct mm_struct if @mm_count also
- * drops to 0).
- */
- atomic_t mm_users;
-
- /**
- * @mm_count: The number of references to &struct mm_struct
- * (@mm_users count as 1).
- *
- * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
- * &struct mm_struct is freed.
- */
- atomic_t mm_count;
+ unsigned long task_size; /* size of task vm space */
+ unsigned long highest_vm_end; /* highest vma end address */
+ pgd_t * pgd;
+
+ /**
+ * @mm_users: The number of users including userspace.
+ *
+ * Use mmget()/mmget_not_zero()/mmput() to modify. When this
+ * drops to 0 (i.e. when the task exits and there are no other
+ * temporary reference holders), we also release a reference on
+ * @mm_count (which may then free the &struct mm_struct if
+ * @mm_count also drops to 0).
+ */
+ atomic_t mm_users;
+
+ /**
+ * @mm_count: The number of references to &struct mm_struct
+ * (@mm_users count as 1).
+ *
+ * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
+ * &struct mm_struct is freed.
+ */
+ atomic_t mm_count;
#ifdef CONFIG_MMU
- atomic_long_t pgtables_bytes; /* PTE page table pages */
+ atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
- int map_count; /* number of VMAs */
+ int map_count; /* number of VMAs */
- spinlock_t page_table_lock; /* Protects page tables and some counters */
- struct rw_semaphore mmap_sem;
+ spinlock_t page_table_lock; /* Protects page tables and some
+ * counters
+ */
+ struct rw_semaphore mmap_sem;
- struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
- * together off init_mm.mmlist, and are protected
- * by mmlist_lock
- */
+ struct list_head mmlist; /* List of maybe swapped mm's. These
+ * are globally strung together off
+ * init_mm.mmlist, and are protected
+ * by mmlist_lock
+ */
- unsigned long hiwater_rss; /* High-watermark of RSS usage */
- unsigned long hiwater_vm; /* High-water virtual memory usage */
+ unsigned long hiwater_rss; /* High-watermark of RSS usage */
+ unsigned long hiwater_vm; /* High-water virtual memory usage */
- unsigned long total_vm; /* Total pages mapped */
- unsigned long locked_vm; /* Pages that have PG_mlocked set */
- unsigned long pinned_vm; /* Refcount permanently increased */
- unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
- unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
- unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ unsigned long total_vm; /* Total pages mapped */
+ unsigned long locked_vm; /* Pages that have PG_mlocked set */
+ unsigned long pinned_vm; /* Refcount permanently increased */
+ unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+ unsigned long stack_vm; /* VM_STACK */
+ unsigned long def_flags;
- spinlock_t arg_lock; /* protect the below fields */
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
+ spinlock_t arg_lock; /* protect the below fields */
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
- unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- /*
- * Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- struct mm_rss_stat rss_stat;
-
- struct linux_binfmt *binfmt;
+ /*
+ * Special counters, in some configurations protected by the
+ * page_table_lock, in other configurations by being atomic.
+ */
+ struct mm_rss_stat rss_stat;
- cpumask_var_t cpu_vm_mask_var;
+ struct linux_binfmt *binfmt;
- /* Architecture-specific MM context */
- mm_context_t context;
+ /* Architecture-specific MM context */
+ mm_context_t context;
- unsigned long flags; /* Must use atomic bitops to access the bits */
+ unsigned long flags; /* Must use atomic bitops to access */
- struct core_state *core_state; /* coredumping support */
+ struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_MEMBARRIER
- atomic_t membarrier_state;
+ atomic_t membarrier_state;
#endif
#ifdef CONFIG_AIO
- spinlock_t ioctx_lock;
- struct kioctx_table __rcu *ioctx_table;
+ spinlock_t ioctx_lock;
+ struct kioctx_table __rcu *ioctx_table;
#endif
#ifdef CONFIG_MEMCG
- /*
- * "owner" points to a task that is regarded as the canonical
- * user/owner of this mm. All of the following must be true in
- * order for it to be changed:
- *
- * current == mm->owner
- * current->mm != mm
- * new_owner->mm == mm
- * new_owner->alloc_lock is held
- */
- struct task_struct __rcu *owner;
+ /*
+ * "owner" points to a task that is regarded as the canonical
+ * user/owner of this mm. All of the following must be true in
+ * order for it to be changed:
+ *
+ * current == mm->owner
+ * current->mm != mm
+ * new_owner->mm == mm
+ * new_owner->alloc_lock is held
+ */
+ struct task_struct __rcu *owner;
#endif
- struct user_namespace *user_ns;
+ struct user_namespace *user_ns;
- /* store ref to file /proc/<pid>/exe symlink points to */
- struct file __rcu *exe_file;
+ /* store ref to file /proc/<pid>/exe symlink points to */
+ struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier_mm *mmu_notifier_mm;
+ struct mmu_notifier_mm *mmu_notifier_mm;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page_table_lock */
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
- struct cpumask cpumask_allocation;
+ pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
- /*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and migrate
- * pages to new nodes if necessary.
- */
- unsigned long numa_next_scan;
+ /*
+ * numa_next_scan is the next time that the PTEs will be marked
+ * pte_numa. NUMA hinting faults will gather statistics and
+ * migrate pages to new nodes if necessary.
+ */
+ unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
- unsigned long numa_scan_offset;
+ /* Restart point for scanning and setting pte_numa */
+ unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
- int numa_scan_seq;
+ /* numa_scan_seq prevents two threads setting pte_numa */
+ int numa_scan_seq;
#endif
- /*
- * An operation with batched TLB flushing is going on. Anything that
- * can move process memory needs to flush the TLB when moving a
- * PROT_NONE or PROT_NUMA mapped page.
- */
- atomic_t tlb_flush_pending;
+ /*
+ * An operation with batched TLB flushing is going on. Anything
+ * that can move process memory needs to flush the TLB when
+ * moving a PROT_NONE or PROT_NUMA mapped page.
+ */
+ atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
- /* See flush_tlb_batched_pending() */
- bool tlb_flush_batched;
+ /* See flush_tlb_batched_pending() */
+ bool tlb_flush_batched;
#endif
- struct uprobes_state uprobes_state;
+ struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
- atomic_long_t hugetlb_usage;
+ atomic_long_t hugetlb_usage;
#endif
- struct work_struct async_put_work;
+ struct work_struct async_put_work;
#if IS_ENABLED(CONFIG_HMM)
- /* HMM needs to track a few things per mm */
- struct hmm *hmm;
+ /* HMM needs to track a few things per mm */
+ struct hmm *hmm;
#endif
-} __randomize_layout;
+ } __randomize_layout;
+
+ /*
+ * The mm_cpumask needs to be at the end of mm_struct, because it
+ * is dynamically sized based on nr_cpu_ids.
+ */
+ unsigned long cpu_bitmap[];
+};
extern struct mm_struct init_mm;
+/* Pointer magic because the dynamic array size confuses some compilers. */
static inline void mm_init_cpumask(struct mm_struct *mm)
{
-#ifdef CONFIG_CPUMASK_OFFSTACK
- mm->cpu_vm_mask_var = &mm->cpumask_allocation;
-#endif
- cpumask_clear(mm->cpu_vm_mask_var);
+ unsigned long cpu_bitmap = (unsigned long)mm;
+
+ cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ cpumask_clear((struct cpumask *)cpu_bitmap);
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
- return mm->cpu_vm_mask_var;
+ return (struct cpumask *)&mm->cpu_bitmap;
}
struct mmu_gather;
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index d633f737b3c6..6675b9f81979 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -2,7 +2,7 @@
#define __LINUX_MROUTE_BASE_H
#include <linux/netdevice.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -254,6 +254,7 @@ struct mr_table {
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
bool mroute_do_pim;
+ bool mroute_do_wrvifwhole;
int mroute_reg_vif_num;
};
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index a86c4fa93115..cd0be91bdefa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -67,9 +67,11 @@ struct mtd_erase_region_info {
* @datbuf: data buffer - if NULL only oob data are read/written
* @oobbuf: oob data buffer
*
- * Note, it is allowed to read more than one OOB area at one go, but not write.
- * The interface assumes that the OOB write requests program only one page's
- * OOB area.
+ * Note, some MTD drivers do not allow you to write more than one OOB area at
+ * one go. If you try to do that on such an MTD device, -EINVAL will be
+ * returned. If you want to make your implementation portable on all kind of MTD
+ * devices you should split the write request into several sub-requests when the
+ * request crosses a page boundary.
*/
struct mtd_oob_ops {
unsigned int mode;
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 3e8ec3b8a39c..efb2345359bb 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -21,11 +21,10 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
+#include <linux/of.h>
#include <linux/types.h>
-struct mtd_info;
struct nand_flash_dev;
-struct device_node;
/* Scan and identify a NAND device */
int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
@@ -36,17 +35,6 @@ static inline int nand_scan(struct mtd_info *mtd, int max_chips)
return nand_scan_with_ids(mtd, max_chips, NULL);
}
-/*
- * Separate phases of nand_scan(), allowing board driver to intervene
- * and override command or ECC setup according to flash type.
- */
-int nand_scan_ident(struct mtd_info *mtd, int max_chips,
- struct nand_flash_dev *table);
-int nand_scan_tail(struct mtd_info *mtd);
-
-/* Unregister the MTD device and free resources held by the NAND device */
-void nand_release(struct mtd_info *mtd);
-
/* Internal helper for board drivers which need to override command function */
void nand_wait_ready(struct mtd_info *mtd);
@@ -121,6 +109,7 @@ enum nand_ecc_algo {
NAND_ECC_UNKNOWN,
NAND_ECC_HAMMING,
NAND_ECC_BCH,
+ NAND_ECC_RS,
};
/*
@@ -218,6 +207,12 @@ enum nand_ecc_algo {
*/
#define NAND_WAIT_TCCS 0x00200000
+/*
+ * Whether the NAND chip is a boot medium. Drivers might use this information
+ * to select ECC algorithms supported by the boot ROM or similar restrictions.
+ */
+#define NAND_IS_BOOT_MEDIUM 0x00400000
+
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -230,6 +225,17 @@ enum nand_ecc_algo {
/* Keep gcc happy */
struct nand_chip;
+/* ONFI version bits */
+#define ONFI_VERSION_1_0 BIT(1)
+#define ONFI_VERSION_2_0 BIT(2)
+#define ONFI_VERSION_2_1 BIT(3)
+#define ONFI_VERSION_2_2 BIT(4)
+#define ONFI_VERSION_2_3 BIT(5)
+#define ONFI_VERSION_3_0 BIT(6)
+#define ONFI_VERSION_3_1 BIT(7)
+#define ONFI_VERSION_3_2 BIT(8)
+#define ONFI_VERSION_4_0 BIT(9)
+
/* ONFI features */
#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
@@ -470,13 +476,13 @@ struct onfi_params {
*/
struct nand_parameters {
/* Generic parameters */
- char model[100];
+ const char *model;
bool supports_set_get_features;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
/* ONFI parameters */
- struct onfi_params onfi;
+ struct onfi_params *onfi;
};
/* The maximum expected count of bytes in the NAND ID sequence */
@@ -493,20 +499,42 @@ struct nand_id {
};
/**
- * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
* @lock: protection lock
* @active: the mtd device which holds the controller currently
* @wq: wait queue to sleep on if a NAND operation is in
* progress used instead of the per chip wait queue
* when a hw controller is available.
+ * @ops: NAND controller operations.
*/
-struct nand_hw_control {
+struct nand_controller {
spinlock_t lock;
struct nand_chip *active;
wait_queue_head_t wq;
+ const struct nand_controller_ops *ops;
};
-static inline void nand_hw_control_init(struct nand_hw_control *nfc)
+static inline void nand_controller_init(struct nand_controller *nfc)
{
nfc->active = NULL;
spin_lock_init(&nfc->lock);
@@ -778,11 +806,15 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
* implementation) if any.
* @cleanup: the ->init() function may have allocated resources, ->cleanup()
* is here to let vendor specific code release those resources.
+ * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
+ * page. This is called after the checksum is verified.
*/
struct nand_manufacturer_ops {
void (*detect)(struct nand_chip *chip);
int (*init)(struct nand_chip *chip);
void (*cleanup)(struct nand_chip *chip);
+ void (*fixup_onfi_param_page)(struct nand_chip *chip,
+ struct nand_onfi_params *p);
};
/**
@@ -986,14 +1018,14 @@ struct nand_subop {
unsigned int last_instr_end_off;
};
-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_data_start_off(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_data_len(const struct nand_subop *subop,
- unsigned int op_id);
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
/**
* struct nand_op_parser_addr_constraints - Constraints for address instructions
@@ -1176,9 +1208,9 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buf_align: minimum buffer alignment required by a platform
- * @hwcontrol: platform-specific hardware control structure
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
* @erase: [REPLACEABLE] erase function
- * @scan_bbt: [REPLACEABLE] function to scan bad block table
* @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
* data from array to read regs (tR).
* @state: [INTERN] the current state of the NAND device
@@ -1271,7 +1303,6 @@ struct nand_chip {
const struct nand_operation *op,
bool check_only);
int (*erase)(struct mtd_info *mtd, int page);
- int (*scan_bbt)(struct mtd_info *mtd);
int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1314,11 +1345,11 @@ struct nand_chip {
flstate_t state;
uint8_t *oob_poi;
- struct nand_hw_control *controller;
+ struct nand_controller *controller;
struct nand_ecc_ctrl ecc;
unsigned long buf_align;
- struct nand_hw_control hwcontrol;
+ struct nand_controller dummy_controller;
uint8_t *bbt;
struct nand_bbt_descr *bbt_td;
@@ -1517,14 +1548,12 @@ extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
-int nand_default_bbt(struct mtd_info *mtd);
+int nand_create_bbt(struct nand_chip *chip);
int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
int allowbbt);
-int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf);
/**
* struct platform_nand_chip - chip level device structure
@@ -1555,14 +1584,12 @@ struct platform_device;
* struct platform_nand_ctrl - controller level device structure
* @probe: platform specific function to probe/setup hardware
* @remove: platform specific function to remove/teardown hardware
- * @hwcontrol: platform specific hardware control structure
* @dev_ready: platform specific function to read ready/busy pin
* @select_chip: platform specific chip select function
* @cmd_ctrl: platform specific function for controlling
* ALE/CLE/nCE. Also used to write command and address
* @write_buf: platform specific function for write buffer
* @read_buf: platform specific function for read buffer
- * @read_byte: platform specific function to read one byte from chip
* @priv: private data to transport driver specific settings
*
* All fields are optional and depend on the hardware driver requirements
@@ -1570,13 +1597,11 @@ struct platform_device;
struct platform_nand_ctrl {
int (*probe)(struct platform_device *pdev);
void (*remove)(struct platform_device *pdev);
- void (*hwcontrol)(struct mtd_info *mtd, int cmd);
int (*dev_ready)(struct mtd_info *mtd);
void (*select_chip)(struct mtd_info *mtd, int chip);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- unsigned char (*read_byte)(struct mtd_info *mtd);
void *priv;
};
@@ -1593,10 +1618,10 @@ struct platform_nand_data {
/* return the supported asynchronous timing mode. */
static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
{
- if (!chip->parameters.onfi.version)
+ if (!chip->parameters.onfi)
return ONFI_TIMING_MODE_UNKNOWN;
- return chip->parameters.onfi.async_timing_mode;
+ return chip->parameters.onfi->async_timing_mode;
}
int onfi_fill_data_interface(struct nand_chip *chip,
@@ -1641,14 +1666,8 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
void *extraoob, int extraooblen,
int threshold);
-int nand_check_ecc_caps(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
-
-int nand_match_ecc_req(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
-
-int nand_maximize_ecc(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
/* Default write_oob implementation */
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
@@ -1674,10 +1693,14 @@ int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
/* Default read_page_raw implementation */
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page);
+int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page);
/* Default write_page_raw implementation */
int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page);
+int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1711,8 +1734,13 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
-/* Free resources held by the NAND device */
+/*
+ * Free resources held by the NAND device, must be called on error after a
+ * sucessful nand_scan().
+ */
void nand_cleanup(struct nand_chip *chip);
+/* Unregister the MTD device and calls nand_cleanup() */
+void nand_release(struct mtd_info *mtd);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index e60da0d34cc1..c922e97f205a 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -235,6 +235,7 @@ enum spi_nor_option_flags {
SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
SNOR_F_READY_XSR_RDY = BIT(4),
SNOR_F_USE_CLSR = BIT(5),
+ SNOR_F_BROKEN_RESET = BIT(6),
};
/**
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
new file mode 100644
index 000000000000..088ff96c3eb6
--- /dev/null
+++ b/include/linux/mtd/spinand.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ */
+#ifndef __LINUX_MTD_SPINAND_H
+#define __LINUX_MTD_SPINAND_H
+
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/**
+ * Standard SPI NAND flash operations
+ */
+
+#define SPINAND_RESET_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_WR_EN_DIS_OP(enable) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_READID_OP(ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_SET_FEATURE_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, valptr, 1))
+
+#define SPINAND_GET_FEATURE_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_IN(1, valptr, 1))
+
+#define SPINAND_BLK_ERASE_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 2), \
+ SPI_MEM_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 4), \
+ SPI_MEM_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PROG_EXEC_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 1))
+
+#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 4))
+
+/**
+ * Standard SPI NAND flash commands
+ */
+#define SPINAND_CMD_PROG_LOAD_X4 0x32
+#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
+
+/* feature register */
+#define REG_BLOCK_LOCK 0xa0
+#define BL_ALL_UNLOCKED 0x00
+
+/* configuration register */
+#define REG_CFG 0xb0
+#define CFG_OTP_ENABLE BIT(6)
+#define CFG_ECC_ENABLE BIT(4)
+#define CFG_QUAD_ENABLE BIT(0)
+
+/* status register */
+#define REG_STATUS 0xc0
+#define STATUS_BUSY BIT(0)
+#define STATUS_ERASE_FAILED BIT(2)
+#define STATUS_PROG_FAILED BIT(3)
+#define STATUS_ECC_MASK GENMASK(5, 4)
+#define STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define STATUS_ECC_HAS_BITFLIPS (1 << 4)
+#define STATUS_ECC_UNCOR_ERROR (2 << 4)
+
+struct spinand_op;
+struct spinand_device;
+
+#define SPINAND_MAX_ID_LEN 4
+
+/**
+ * struct spinand_id - SPI NAND id structure
+ * @data: buffer containing the id bytes. Currently 4 bytes large, but can
+ * be extended if required
+ * @len: ID length
+ *
+ * struct_spinand_id->data contains all bytes returned after a READ_ID command,
+ * including dummy bytes if the chip does not emit ID bytes right after the
+ * READ_ID command. The responsibility to extract real ID bytes is left to
+ * struct_manufacurer_ops->detect().
+ */
+struct spinand_id {
+ u8 data[SPINAND_MAX_ID_LEN];
+ int len;
+};
+
+/**
+ * struct manufacurer_ops - SPI NAND manufacturer specific operations
+ * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
+ * the core calls the struct_manufacurer_ops->detect() hook of each
+ * registered manufacturer until one of them return 1. Note that
+ * the first thing to check in this hook is that the manufacturer ID
+ * in struct_spinand_device->id matches the manufacturer whose
+ * ->detect() hook has been called. Should return 1 if there's a
+ * match, 0 if the manufacturer ID does not match and a negative
+ * error code otherwise. When true is returned, the core assumes
+ * that properties of the NAND chip (spinand->base.memorg and
+ * spinand->base.eccreq) have been filled
+ * @init: initialize a SPI NAND device
+ * @cleanup: cleanup a SPI NAND device
+ *
+ * Each SPI NAND manufacturer driver should implement this interface so that
+ * NAND chips coming from this vendor can be detected and initialized properly.
+ */
+struct spinand_manufacturer_ops {
+ int (*detect)(struct spinand_device *spinand);
+ int (*init)(struct spinand_device *spinand);
+ void (*cleanup)(struct spinand_device *spinand);
+};
+
+/**
+ * struct spinand_manufacturer - SPI NAND manufacturer instance
+ * @id: manufacturer ID
+ * @name: manufacturer name
+ * @ops: manufacturer operations
+ */
+struct spinand_manufacturer {
+ u8 id;
+ char *name;
+ const struct spinand_manufacturer_ops *ops;
+};
+
+/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer macronix_spinand_manufacturer;
+extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+
+/**
+ * struct spinand_op_variants - SPI NAND operation variants
+ * @ops: the list of variants for a given operation
+ * @nops: the number of variants
+ *
+ * Some operations like read-from-cache/write-to-cache have several variants
+ * depending on the number of IO lines you use to transfer data or address
+ * cycles. This structure is a way to describe the different variants supported
+ * by a chip and let the core pick the best one based on the SPI mem controller
+ * capabilities.
+ */
+struct spinand_op_variants {
+ const struct spi_mem_op *ops;
+ unsigned int nops;
+};
+
+#define SPINAND_OP_VARIANTS(name, ...) \
+ const struct spinand_op_variants name = { \
+ .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
+ .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
+ sizeof(struct spi_mem_op), \
+ }
+
+/**
+ * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
+ * chip
+ * @get_status: get the ECC status. Should return a positive number encoding
+ * the number of corrected bitflips if correction was possible or
+ * -EBADMSG if there are uncorrectable errors. I can also return
+ * other negative error codes if the error is not caused by
+ * uncorrectable bitflips
+ * @ooblayout: the OOB layout used by the on-die ECC implementation
+ */
+struct spinand_ecc_info {
+ int (*get_status)(struct spinand_device *spinand, u8 status);
+ const struct mtd_ooblayout_ops *ooblayout;
+};
+
+#define SPINAND_HAS_QE_BIT BIT(0)
+
+/**
+ * struct spinand_info - Structure used to describe SPI NAND chips
+ * @model: model name
+ * @devid: device ID
+ * @flags: OR-ing of the SPINAND_XXX flags
+ * @memorg: memory organization
+ * @eccreq: ECC requirements
+ * @eccinfo: on-die ECC info
+ * @op_variants: operations variants
+ * @op_variants.read_cache: variants of the read-cache operation
+ * @op_variants.write_cache: variants of the write-cache operation
+ * @op_variants.update_cache: variants of the update-cache operation
+ * @select_target: function used to select a target/die. Required only for
+ * multi-die chips
+ *
+ * Each SPI NAND manufacturer driver should have a spinand_info table
+ * describing all the chips supported by the driver.
+ */
+struct spinand_info {
+ const char *model;
+ u8 devid;
+ u32 flags;
+ struct nand_memory_organization memorg;
+ struct nand_ecc_req eccreq;
+ struct spinand_ecc_info eccinfo;
+ struct {
+ const struct spinand_op_variants *read_cache;
+ const struct spinand_op_variants *write_cache;
+ const struct spinand_op_variants *update_cache;
+ } op_variants;
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+};
+
+#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
+ { \
+ .read_cache = __read, \
+ .write_cache = __write, \
+ .update_cache = __update, \
+ }
+
+#define SPINAND_ECCINFO(__ooblayout, __get_status) \
+ .eccinfo = { \
+ .ooblayout = __ooblayout, \
+ .get_status = __get_status, \
+ }
+
+#define SPINAND_SELECT_TARGET(__func) \
+ .select_target = __func,
+
+#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
+ __flags, ...) \
+ { \
+ .model = __model, \
+ .devid = __id, \
+ .memorg = __memorg, \
+ .eccreq = __eccreq, \
+ .op_variants = __op_variants, \
+ .flags = __flags, \
+ __VA_ARGS__ \
+ }
+
+/**
+ * struct spinand_device - SPI NAND device instance
+ * @base: NAND device instance
+ * @spimem: pointer to the SPI mem object
+ * @lock: lock used to serialize accesses to the NAND
+ * @id: NAND ID as returned by READ_ID
+ * @flags: NAND flags
+ * @op_templates: various SPI mem op templates
+ * @op_templates.read_cache: read cache op template
+ * @op_templates.write_cache: write cache op template
+ * @op_templates.update_cache: update cache op template
+ * @select_target: select a specific target/die. Usually called before sending
+ * a command addressing a page or an eraseblock embedded in
+ * this die. Only required if your chip exposes several dies
+ * @cur_target: currently selected target/die
+ * @eccinfo: on-die ECC information
+ * @cfg_cache: config register cache. One entry per die
+ * @databuf: bounce buffer for data
+ * @oobbuf: bounce buffer for OOB data
+ * @scratchbuf: buffer used for everything but page accesses. This is needed
+ * because the spi-mem interface explicitly requests that buffers
+ * passed in spi_mem_op be DMA-able, so we can't based the bufs on
+ * the stack
+ * @manufacturer: SPI NAND manufacturer information
+ * @priv: manufacturer private data
+ */
+struct spinand_device {
+ struct nand_device base;
+ struct spi_mem *spimem;
+ struct mutex lock;
+ struct spinand_id id;
+ u32 flags;
+
+ struct {
+ const struct spi_mem_op *read_cache;
+ const struct spi_mem_op *write_cache;
+ const struct spi_mem_op *update_cache;
+ } op_templates;
+
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+ unsigned int cur_target;
+
+ struct spinand_ecc_info eccinfo;
+
+ u8 *cfg_cache;
+ u8 *databuf;
+ u8 *oobbuf;
+ u8 *scratchbuf;
+ const struct spinand_manufacturer *manufacturer;
+ void *priv;
+};
+
+/**
+ * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the SPI NAND device attached to @mtd.
+ */
+static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
+}
+
+/**
+ * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
+ * @spinand: SPI NAND device
+ *
+ * Return: the MTD device embedded in @spinand.
+ */
+static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
+{
+ return nanddev_to_mtd(&spinand->base);
+}
+
+/**
+ * nand_to_spinand() - Get the SPI NAND device embedding an NAND object
+ * @nand: NAND object
+ *
+ * Return: the SPI NAND device embedding @nand.
+ */
+static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
+{
+ return container_of(nand, struct spinand_device, base);
+}
+
+/**
+ * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
+ * @spinand: SPI NAND device
+ *
+ * Return: the NAND device embedded in @spinand.
+ */
+static inline struct nand_device *
+spinand_to_nand(struct spinand_device *spinand)
+{
+ return &spinand->base;
+}
+
+/**
+ * spinand_set_of_node - Attach a DT node to a SPI NAND device
+ * @spinand: SPI NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a SPI NAND device.
+ */
+static inline void spinand_set_of_node(struct spinand_device *spinand,
+ struct device_node *np)
+{
+ nanddev_set_of_node(&spinand->base, np);
+}
+
+int spinand_match_and_init(struct spinand_device *dev,
+ const struct spinand_info *table,
+ unsigned int table_size, u8 devid);
+
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_select_target(struct spinand_device *spinand, unsigned int target);
+
+#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 6554d3ba4396..e0930678c8bf 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -114,7 +114,7 @@ struct socket {
unsigned long flags;
- struct socket_wq __rcu *wq;
+ struct socket_wq *wq;
struct file *file;
struct sock *sk;
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 623bb8ced060..2b2a6dce1630 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -79,6 +79,7 @@ enum {
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
+ NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
@@ -151,6 +152,7 @@ enum {
#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
+#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3d0cc0b5cec2..ca5ab98053c8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -302,6 +302,17 @@ struct netdev_boot_setup {
int __init netdev_boot_setup(char *str);
+struct gro_list {
+ struct list_head list;
+ int count;
+};
+
+/*
+ * size of gro hash buckets, must less than bit number of
+ * napi_struct::gro_bitmask
+ */
+#define GRO_HASH_BUCKETS 8
+
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
@@ -316,13 +327,13 @@ struct napi_struct {
unsigned long state;
int weight;
- unsigned int gro_count;
+ unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
int poll_owner;
#endif
struct net_device *dev;
- struct sk_buff *gro_list;
+ struct gro_list gro_hash[GRO_HASH_BUCKETS];
struct sk_buff *skb;
struct hrtimer timer;
struct list_head dev_list;
@@ -569,6 +580,9 @@ struct netdev_queue {
* (/sys/class/net/DEV/Q/trans_timeout)
*/
unsigned long trans_timeout;
+
+ /* Subordinate device that the queue has been assigned to */
+ struct net_device *sb_dev;
/*
* write-mostly part
*/
@@ -730,10 +744,15 @@ struct xps_map {
*/
struct xps_dev_maps {
struct rcu_head rcu;
- struct xps_map __rcu *cpu_map[0];
+ struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
};
-#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
+
+#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
+
+#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
+ (_rxqs * (_tcs) * sizeof(struct xps_map *)))
+
#endif /* CONFIG_XPS */
#define TC_MAX_QUEUE 16
@@ -779,7 +798,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
}
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ struct net_device *sb_dev);
enum tc_setup_type {
TC_SETUP_QDISC_MQPRIO,
@@ -792,6 +812,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_RED,
TC_SETUP_QDISC_PRIO,
TC_SETUP_QDISC_MQ,
+ TC_SETUP_QDISC_ETF,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -807,11 +828,8 @@ enum bpf_netdev_command {
*/
XDP_SETUP_PROG,
XDP_SETUP_PROG_HW,
- /* Check if a bpf program is set on the device. The callee should
- * set @prog_attached to one of XDP_ATTACHED_* values, note that "true"
- * is equivalent to XDP_ATTACHED_DRV.
- */
XDP_QUERY_PROG,
+ XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_VERIFIER_PREP,
BPF_OFFLOAD_TRANSLATE,
@@ -835,9 +853,8 @@ struct netdev_bpf {
struct bpf_prog *prog;
struct netlink_ext_ack *extack;
};
- /* XDP_QUERY_PROG */
+ /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
struct {
- u8 prog_attached;
u32 prog_id;
/* flags with which program was installed */
u32 prog_flags;
@@ -855,10 +872,10 @@ struct netdev_bpf {
struct {
struct bpf_offloaded_map *offmap;
};
- /* XDP_SETUP_XSK_UMEM */
+ /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
struct {
- struct xdp_umem *umem;
- u16 queue_id;
+ struct xdp_umem *umem; /* out for query*/
+ u16 queue_id; /* in for query */
} xsk;
};
};
@@ -891,6 +908,8 @@ struct tlsdev_ops {
void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx,
enum tls_offload_ctx_dir direction);
+ void (*tls_dev_resync_rx)(struct net_device *netdev,
+ struct sock *sk, u32 seq, u64 rcd_sn);
};
#endif
@@ -942,7 +961,8 @@ struct dev_ifalias {
* those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- * void *accel_priv, select_queue_fallback_t fallback);
+ * struct net_device *sb_dev,
+ * select_queue_fallback_t fallback);
* Called to decide which queue to use when device supports multiple
* transmit queues.
*
@@ -1214,7 +1234,7 @@ struct net_device_ops {
netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
@@ -1909,7 +1929,8 @@ struct net_device {
int watchdog_timeo;
#ifdef CONFIG_XPS
- struct xps_dev_maps __rcu *xps_maps;
+ struct xps_dev_maps __rcu *xps_cpus_map;
+ struct xps_dev_maps __rcu *xps_rxqs_map;
#endif
#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc __rcu *miniq_egress;
@@ -1978,7 +1999,7 @@ struct net_device {
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- u8 num_tc;
+ s16 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
@@ -2032,6 +2053,17 @@ int netdev_get_num_tc(struct net_device *dev)
return dev->num_tc;
}
+void netdev_unbind_sb_channel(struct net_device *dev,
+ struct net_device *sb_dev);
+int netdev_bind_sb_channel_queue(struct net_device *dev,
+ struct net_device *sb_dev,
+ u8 tc, u16 count, u16 offset);
+int netdev_set_sb_channel(struct net_device *dev, u16 channel);
+static inline int netdev_get_sb_channel(struct net_device *dev)
+{
+ return max_t(int, -dev->num_tc, 0);
+}
+
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
@@ -2076,7 +2108,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv);
+ struct net_device *sb_dev);
/* returns the headroom that the master device needs to take in account
* when forwarding to this dev
@@ -2255,10 +2287,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
}
-typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
-static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
- struct sk_buff **head,
- struct sk_buff *skb)
+typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
+static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
+ struct list_head *head,
+ struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
@@ -2268,12 +2300,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
return cb(head, skb);
}
-typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
- struct sk_buff *);
-static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
- struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
+typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
+ struct sk_buff *);
+static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
@@ -2290,6 +2322,9 @@ struct packet_type {
struct net_device *,
struct packet_type *,
struct net_device *);
+ void (*list_func) (struct list_head *,
+ struct packet_type *,
+ struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
void *af_packet_priv;
@@ -2299,8 +2334,8 @@ struct packet_type {
struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
- struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb);
+ struct sk_buff *(*gro_receive)(struct list_head *head,
+ struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
@@ -2537,8 +2572,14 @@ void dev_close(struct net_device *dev);
void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
+u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
+u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2568,7 +2609,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
-int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
@@ -2784,13 +2825,13 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
}
#ifdef CONFIG_XFRM_OFFLOAD
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
{
if (PTR_ERR(pp) != -EINPROGRESS)
NAPI_GRO_CB(skb)->flush |= flush;
}
static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff **pp,
+ struct sk_buff *pp,
int flush,
struct gro_remcsum *grc)
{
@@ -2801,12 +2842,12 @@ static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
}
}
#else
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
{
NAPI_GRO_CB(skb)->flush |= flush;
}
static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff **pp,
+ struct sk_buff *pp,
int flush,
struct gro_remcsum *grc)
{
@@ -3278,6 +3319,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index);
+int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
+ u16 index, bool is_rxqs_map);
+
+/**
+ * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
+ * @j: CPU/Rx queue index
+ * @mask: bitmask of all cpus/rx queues
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
+ */
+static inline bool netif_attr_test_mask(unsigned long j,
+ const unsigned long *mask,
+ unsigned int nr_bits)
+{
+ cpu_max_bits_warn(j, nr_bits);
+ return test_bit(j, mask);
+}
+
+/**
+ * netif_attr_test_online - Test for online CPU/Rx queue
+ * @j: CPU/Rx queue index
+ * @online_mask: bitmask for CPUs/Rx queues that are online
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns true if a CPU/Rx queue is online.
+ */
+static inline bool netif_attr_test_online(unsigned long j,
+ const unsigned long *online_mask,
+ unsigned int nr_bits)
+{
+ cpu_max_bits_warn(j, nr_bits);
+
+ if (online_mask)
+ return test_bit(j, online_mask);
+
+ return (j < nr_bits);
+}
+
+/**
+ * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
+ * @n: CPU/Rx queue index
+ * @srcp: the cpumask/Rx queue mask pointer
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set.
+ */
+static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
+ unsigned int nr_bits)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
+
+ if (srcp)
+ return find_next_bit(srcp, nr_bits, n + 1);
+
+ return n + 1;
+}
+
+/**
+ * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
+ * @n: CPU/Rx queue index
+ * @src1p: the first CPUs/Rx queues mask pointer
+ * @src2p: the second CPUs/Rx queues mask pointer
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set in both.
+ */
+static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
+ const unsigned long *src2p,
+ unsigned int nr_bits)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
+
+ if (src1p && src2p)
+ return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
+ else if (src1p)
+ return find_next_bit(src1p, nr_bits, n + 1);
+ else if (src2p)
+ return find_next_bit(src2p, nr_bits, n + 1);
+
+ return n + 1;
+}
#else
static inline int netif_set_xps_queue(struct net_device *dev,
const struct cpumask *mask,
@@ -3285,6 +3412,13 @@ static inline int netif_set_xps_queue(struct net_device *dev,
{
return 0;
}
+
+static inline int __netif_set_xps_queue(struct net_device *dev,
+ const unsigned long *mask,
+ u16 index, bool is_rxqs_map)
+{
+ return 0;
+}
#endif
/**
@@ -3304,8 +3438,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
- unsigned int rxq)
+ unsigned int rxqs)
{
+ dev->real_num_rx_queues = rxqs;
return 0;
}
#endif
@@ -3384,6 +3519,7 @@ int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
+void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -3418,6 +3554,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int __dev_set_mtu(struct net_device *, int);
+int dev_set_mtu_ext(struct net_device *dev, int mtu,
+ struct netlink_ext_ack *extack);
int dev_set_mtu(struct net_device *, int);
int dev_change_tx_queue_len(struct net_device *, unsigned long);
void dev_set_group(struct net_device *, int);
@@ -3435,8 +3573,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, u32 flags);
-void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
- struct netdev_bpf *xdp);
+u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
+ enum bpf_netdev_command cmd);
+int xdp_umem_query(struct net_device *dev, u16 queue_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index dd2052f0efb7..07efffd0c759 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -288,6 +288,24 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct
return ret;
}
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct list_head *head, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+ list_del(&skb->list);
+ if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
+ list_add_tail(&skb->list, &sublist);
+ }
+ /* Put passed packets back on main list */
+ list_splice(&sublist, head);
+}
+
/* Call setsockopt() */
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
unsigned int len);
@@ -369,6 +387,14 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
return okfn(net, sk, skb);
}
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct list_head *head, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+ /* nothing to do */
+}
+
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct sock *sk, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
@@ -388,8 +414,17 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+struct nf_conntrack_tuple;
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb);
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+struct nf_conntrack_tuple;
+static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb)
+{
+ return false;
+}
#endif
struct nf_conn;
@@ -398,6 +433,8 @@ enum ip_conntrack_info;
struct nf_ct_hook {
int (*update)(struct net *net, struct sk_buff *skb);
void (*destroy)(struct nf_conntrack *);
+ bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
+ const struct sk_buff *);
};
extern struct nf_ct_hook __rcu *nf_ct_hook;
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 3ecc3050be0e..4a520d3304a2 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -29,6 +29,7 @@ struct nfnetlink_subsystem {
__u8 subsys_id; /* nfnetlink subsystem ID */
__u8 cb_count; /* number of callbacks */
const struct nfnl_callback *cb; /* callback for individual types */
+ struct module *owner;
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb);
void (*cleanup)(struct net *net);
diff --git a/include/linux/netfilter/nf_osf.h b/include/linux/netfilter/nfnetlink_osf.h
index 0e114c492fb8..ecf7dab81e9e 100644
--- a/include/linux/netfilter/nf_osf.h
+++ b/include/linux/netfilter/nfnetlink_osf.h
@@ -1,16 +1,8 @@
-#include <uapi/linux/netfilter/nf_osf.h>
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NFOSF_H
+#define _NFOSF_H
-/* Initial window size option state machine: multiple of mss, mtu or
- * plain numeric value. Can also be made as plain numeric value which
- * is not a multiple of specified value.
- */
-enum nf_osf_window_size_options {
- OSF_WSS_PLAIN = 0,
- OSF_WSS_MSS,
- OSF_WSS_MTU,
- OSF_WSS_MODULO,
- OSF_WSS_MAX,
-};
+#include <uapi/linux/netfilter/nfnetlink_osf.h>
enum osf_fmatch_states {
/* Packet does not match the fingerprint */
@@ -21,6 +13,8 @@ enum osf_fmatch_states {
FMATCH_OPT_WRONG,
};
+extern struct list_head nf_osf_fingers[2];
+
struct nf_osf_finger {
struct rcu_head rcu_head;
struct list_head finger_entry;
@@ -31,3 +25,8 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
int hooknum, struct net_device *in, struct net_device *out,
const struct nf_osf_info *info, struct net *net,
const struct list_head *nf_osf_fingers);
+
+const char *nf_osf_find(const struct sk_buff *skb,
+ const struct list_head *nf_osf_fingers);
+
+#endif /* _NFOSF_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index b671fdfd212b..fa0686500970 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -5,17 +5,6 @@
#include <uapi/linux/netfilter_bridge.h>
#include <linux/skbuff.h>
-enum nf_br_hook_priorities {
- NF_BR_PRI_FIRST = INT_MIN,
- NF_BR_PRI_NAT_DST_BRIDGED = -300,
- NF_BR_PRI_FILTER_BRIDGED = -200,
- NF_BR_PRI_BRNF = 0,
- NF_BR_PRI_NAT_DST_OTHER = 100,
- NF_BR_PRI_FILTER_OTHER = 200,
- NF_BR_PRI_NAT_SRC = 300,
- NF_BR_PRI_LAST = INT_MAX,
-};
-
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index b31dabfdb453..95ab5cc64422 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -23,9 +23,6 @@ struct nf_queue_entry;
#ifdef CONFIG_INET
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
-__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol);
int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict);
int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
@@ -35,14 +32,6 @@ static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
{
return 0;
}
-static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
- unsigned int hook,
- unsigned int dataoff,
- unsigned int len,
- u_int8_t protocol)
-{
- return 0;
-}
static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict)
{
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 288c597e75b3..c0dc4dd78887 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -30,11 +30,6 @@ struct nf_ipv6_ops {
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
- __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
- __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol);
int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict);
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index f3075d6c7e82..71f121b66ca8 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -170,7 +170,6 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
struct netlink_callback {
struct sk_buff *skb;
const struct nlmsghdr *nlh;
- int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff * skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b8d868d23e79..08f9247e9827 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -45,12 +45,18 @@ extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic;
-#else
+
+extern int lockup_detector_online_cpu(unsigned int cpu);
+extern int lockup_detector_offline_cpu(unsigned int cpu);
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
static inline void touch_softlockup_watchdog_sched(void) { }
static inline void touch_softlockup_watchdog(void) { }
static inline void touch_softlockup_watchdog_sync(void) { }
static inline void touch_all_softlockup_watchdogs(void) { }
-#endif
+
+#define lockup_detector_online_cpu NULL
+#define lockup_detector_offline_cpu NULL
+#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2950ce957656..68e91ef5494c 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -242,7 +242,12 @@ struct nvme_id_ctrl {
__le32 sanicap;
__le32 hmminds;
__le16 hmmaxd;
- __u8 rsvd338[174];
+ __u8 rsvd338[4];
+ __u8 anatt;
+ __u8 anacap;
+ __le32 anagrpmax;
+ __le32 nanagrpid;
+ __u8 rsvd352[160];
__u8 sqes;
__u8 cqes;
__le16 maxcmd;
@@ -254,11 +259,12 @@ struct nvme_id_ctrl {
__le16 awun;
__le16 awupf;
__u8 nvscc;
- __u8 rsvd531;
+ __u8 nwpc;
__le16 acwu;
__u8 rsvd534[2];
__le32 sgls;
- __u8 rsvd540[228];
+ __le32 mnan;
+ __u8 rsvd544[224];
char subnqn[256];
__u8 rsvd1024[768];
__le32 ioccsz;
@@ -312,7 +318,11 @@ struct nvme_id_ns {
__le16 nabspf;
__le16 noiob;
__u8 nvmcap[16];
- __u8 rsvd64[40];
+ __u8 rsvd64[28];
+ __le32 anagrpid;
+ __u8 rsvd96[3];
+ __u8 nsattr;
+ __u8 rsvd100[4];
__u8 nguid[16];
__u8 eui64[8];
struct nvme_lbaf lbaf[16];
@@ -425,6 +435,32 @@ struct nvme_effects_log {
__u8 resv[2048];
};
+enum nvme_ana_state {
+ NVME_ANA_OPTIMIZED = 0x01,
+ NVME_ANA_NONOPTIMIZED = 0x02,
+ NVME_ANA_INACCESSIBLE = 0x03,
+ NVME_ANA_PERSISTENT_LOSS = 0x04,
+ NVME_ANA_CHANGE = 0x0f,
+};
+
+struct nvme_ana_group_desc {
+ __le32 grpid;
+ __le32 nnsids;
+ __le64 chgcnt;
+ __u8 state;
+ __u8 rsvd17[15];
+ __le32 nsids[];
+};
+
+/* flag for the log specific field of the ANA log */
+#define NVME_ANA_LOG_RGO (1 << 0)
+
+struct nvme_ana_rsp_hdr {
+ __le64 chgcnt;
+ __le16 ngrps;
+ __le16 rsvd10[3];
+};
+
enum {
NVME_SMART_CRIT_SPARE = 1 << 0,
NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
@@ -444,11 +480,13 @@ enum {
enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
+ NVME_AER_NOTICE_ANA = 0x03,
};
enum {
NVME_AEN_CFG_NS_ATTR = 1 << 8,
NVME_AEN_CFG_FW_ACT = 1 << 9,
+ NVME_AEN_CFG_ANA_CHANGE = 1 << 11,
};
struct nvme_lba_range_type {
@@ -749,15 +787,22 @@ enum {
NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_TIMESTAMP = 0x0e,
NVME_FEAT_KATO = 0x0f,
+ NVME_FEAT_HCTM = 0x10,
+ NVME_FEAT_NOPSC = 0x11,
+ NVME_FEAT_RRL = 0x12,
+ NVME_FEAT_PLM_CONFIG = 0x13,
+ NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
NVME_FEAT_RESV_PERSIST = 0x83,
+ NVME_FEAT_WRITE_PROTECT = 0x84,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
NVME_LOG_CHANGED_NS = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_ANA = 0x0c,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
@@ -765,6 +810,14 @@ enum {
NVME_FWACT_ACTV = (2 << 3),
};
+/* NVMe Namespace Write Protect State */
+enum {
+ NVME_NS_NO_WRITE_PROTECT = 0,
+ NVME_NS_WRITE_PROTECT,
+ NVME_NS_WRITE_PROTECT_POWER_CYCLE,
+ NVME_NS_WRITE_PROTECT_PERMANENT,
+};
+
#define NVME_MAX_CHANGED_NAMESPACES 1024
struct nvme_identify {
@@ -880,7 +933,7 @@ struct nvme_get_log_page_command {
__u64 rsvd2[2];
union nvme_data_ptr dptr;
__u8 lid;
- __u8 rsvd10;
+ __u8 lsp; /* upper 4 bits reserved */
__le16 numdl;
__le16 numdu;
__u16 rsvd11;
@@ -1111,6 +1164,8 @@ enum {
NVME_SC_SGL_INVALID_OFFSET = 0x16,
NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+ NVME_SC_NS_WRITE_PROTECTED = 0x20,
+
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
@@ -1180,6 +1235,13 @@ enum {
NVME_SC_ACCESS_DENIED = 0x286,
NVME_SC_UNWRITTEN_BLOCK = 0x287,
+ /*
+ * Path-related Errors:
+ */
+ NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
+ NVME_SC_ANA_INACCESSIBLE = 0x302,
+ NVME_SC_ANA_TRANSITION = 0x303,
+
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index c726bd833761..6dbcd2da0332 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* omap-mailbox: interprocessor communication module for OMAP
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OMAP_MAILBOX_H
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
index e6b240b6196c..379affc63e24 100644
--- a/include/linux/openvswitch.h
+++ b/include/linux/openvswitch.h
@@ -21,4 +21,9 @@
#include <uapi/linux/openvswitch.h>
+#define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero,
+ * actions in clone will not change flow
+ * keys. False otherwise.
+ */
+
#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index 0dd1a3f7b309..c3f1b44ade29 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -8,10 +8,10 @@
#include <linux/dma-mapping.h>
/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL 0
-#define PCI_DMA_TODEVICE 1
-#define PCI_DMA_FROMDEVICE 2
-#define PCI_DMA_NONE 3
+#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
+#define PCI_DMA_TODEVICE DMA_TO_DEVICE
+#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE
+#define PCI_DMA_NONE DMA_NONE
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 243eaa5a66ff..37dab8116901 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -17,6 +17,7 @@ enum pci_epc_irq_type {
PCI_EPC_IRQ_UNKNOWN,
PCI_EPC_IRQ_LEGACY,
PCI_EPC_IRQ_MSI,
+ PCI_EPC_IRQ_MSIX,
};
/**
@@ -30,7 +31,11 @@ enum pci_epc_irq_type {
* capability register
* @get_msi: ops to get the number of MSI interrupts allocated by the RC from
* the MSI capability register
- * @raise_irq: ops to raise a legacy or MSI interrupt
+ * @set_msix: ops to set the requested number of MSI-X interrupts in the
+ * MSI-X capability register
+ * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
+ * from the MSI-X capability register
+ * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
* @owner: the module owner containing the ops
@@ -48,8 +53,10 @@ struct pci_epc_ops {
phys_addr_t addr);
int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
int (*get_msi)(struct pci_epc *epc, u8 func_no);
+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts);
+ int (*get_msix)(struct pci_epc *epc, u8 func_no);
int (*raise_irq)(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num);
+ enum pci_epc_irq_type type, u16 interrupt_num);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
struct module *owner;
@@ -95,6 +102,7 @@ struct pci_epc {
#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
+#define EPC_FEATURE_MSIX_AVAILABLE BIT(4)
#define EPC_FEATURE_SET_BAR(features, bar) \
(features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
#define EPC_FEATURE_GET_BAR(features) \
@@ -144,8 +152,10 @@ void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr);
int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts);
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num);
+ enum pci_epc_irq_type type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
struct pci_epc *pci_epc_get(const char *epc_name);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 4e7764935fa8..ec02f58758c8 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -119,6 +119,7 @@ struct pci_epf {
struct pci_epf_header *header;
struct pci_epf_bar bar[6];
u8 msi_interrupts;
+ u16 msix_interrupts;
u8 func_no;
struct pci_epc *epc;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c133ccfa002e..9b87f1936906 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -261,6 +261,9 @@ enum pci_bus_speed {
PCI_SPEED_UNKNOWN = 0xff,
};
+enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
+enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
+
struct pci_cap_saved_data {
u16 cap_nr;
bool cap_extended;
@@ -299,6 +302,7 @@ struct pci_dev {
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
#ifdef CONFIG_PCIEAER
u16 aer_cap; /* AER capability offset */
+ struct aer_stats *aer_stats; /* AER stats for this device */
#endif
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
@@ -350,6 +354,7 @@ struct pci_dev {
unsigned int ltr_path:1; /* Latency Tolerance Reporting
supported from root to here */
#endif
+ unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
pci_channel_state_t error_state; /* Current connectivity state */
struct device dev; /* Generic device interface */
@@ -387,6 +392,7 @@ struct pci_dev {
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
+ unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
@@ -818,6 +824,21 @@ struct pci_driver {
.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
+/**
+ * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
+ * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
+ * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
+ * @data: the driver data to be filled
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific PCI device. The subvendor, and subdevice fields will be set
+ * to PCI_ANY_ID.
+ */
+#define PCI_DEVICE_DATA(vend, dev, data) \
+ .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+ .driver_data = (kernel_ulong_t)(data)
+
enum {
PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
@@ -1088,20 +1109,17 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
void pcie_print_link_status(struct pci_dev *dev);
+bool pcie_has_flr(struct pci_dev *dev);
int pcie_flr(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
int pci_reset_function_locked(struct pci_dev *dev);
int pci_try_reset_function(struct pci_dev *dev);
int pci_probe_reset_slot(struct pci_slot *slot);
-int pci_reset_slot(struct pci_slot *slot);
-int pci_try_reset_slot(struct pci_slot *slot);
int pci_probe_reset_bus(struct pci_bus *bus);
-int pci_reset_bus(struct pci_bus *bus);
-int pci_try_reset_bus(struct pci_bus *bus);
+int pci_reset_bus(struct pci_dev *dev);
void pci_reset_secondary_bus(struct pci_dev *dev);
void pcibios_reset_secondary_bus(struct pci_dev *dev);
-int pci_reset_bridge_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
@@ -1121,7 +1139,6 @@ int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
-size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
/* Power management related routines */
@@ -1469,13 +1486,9 @@ static inline bool pcie_aspm_support_enabled(void) { return false; }
#endif
#ifdef CONFIG_PCIEAER
-void pci_no_aer(void);
bool pci_aer_available(void);
-int pci_aer_init(struct pci_dev *dev);
#else
-static inline void pci_no_aer(void) { }
static inline bool pci_aer_available(void) { return false; }
-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -1877,20 +1890,9 @@ enum pci_fixup_pass {
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
-int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
-int pci_dev_specific_enable_acs(struct pci_dev *dev);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
-static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
- u16 acs_flags)
-{
- return -ENOTTY;
-}
-static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
-{
- return -ENOTTY;
-}
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index cf5e22103f68..a6d6650a0490 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -80,15 +80,12 @@ struct hotplug_slot_info {
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
* @info: pointer to the &struct hotplug_slot_info for the initial values for
* this slot.
- * @release: called during pci_hp_deregister to free memory allocated in a
- * hotplug_slot structure.
* @private: used by the hotplug pci controller driver to store whatever it
* needs.
*/
struct hotplug_slot {
struct hotplug_slot_ops *ops;
struct hotplug_slot_info *info;
- void (*release) (struct hotplug_slot *slot);
void *private;
/* Variables below this are for use only by the hotplug pci core. */
@@ -104,13 +101,23 @@ static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, int nr,
const char *name, struct module *owner,
const char *mod_name);
-int pci_hp_deregister(struct hotplug_slot *slot);
+int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus, int nr,
+ const char *name, struct module *owner,
+ const char *mod_name);
+int pci_hp_add(struct hotplug_slot *slot);
+
+void pci_hp_del(struct hotplug_slot *slot);
+void pci_hp_destroy(struct hotplug_slot *slot);
+void pci_hp_deregister(struct hotplug_slot *slot);
+
int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
struct hotplug_slot_info *info);
/* use a define to avoid include chaining to get THIS_MODULE & friends */
#define pci_hp_register(slot, pbus, devnr, name) \
__pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME)
+#define pci_hp_initialize(slot, bus, nr, name) \
+ __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME)
/* PCI Setting Record (Type 0) */
struct hpp_type0 {
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 29502238e510..99d366cb0e9f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1668,6 +1668,7 @@
#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
#define PCI_VENDOR_ID_PMC_Sierra 0x11f8
+#define PCI_VENDOR_ID_MICROSEMI 0x11f8
#define PCI_VENDOR_ID_RP 0x11fe
#define PCI_DEVICE_ID_RP32INTF 0x0001
@@ -2541,6 +2542,7 @@
#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
+#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
deleted file mode 100644
index 07d78e4653bc..000000000000
--- a/include/linux/percpu_ida.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PERCPU_IDA_H__
-#define __PERCPU_IDA_H__
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/spinlock_types.h>
-#include <linux/wait.h>
-#include <linux/cpumask.h>
-
-struct percpu_ida_cpu;
-
-struct percpu_ida {
- /*
- * number of tags available to be allocated, as passed to
- * percpu_ida_init()
- */
- unsigned nr_tags;
- unsigned percpu_max_size;
- unsigned percpu_batch_size;
-
- struct percpu_ida_cpu __percpu *tag_cpu;
-
- /*
- * Bitmap of cpus that (may) have tags on their percpu freelists:
- * steal_tags() uses this to decide when to steal tags, and which cpus
- * to try stealing from.
- *
- * It's ok for a freelist to be empty when its bit is set - steal_tags()
- * will just keep looking - but the bitmap _must_ be set whenever a
- * percpu freelist does have tags.
- */
- cpumask_t cpus_have_tags;
-
- struct {
- spinlock_t lock;
- /*
- * When we go to steal tags from another cpu (see steal_tags()),
- * we want to pick a cpu at random. Cycling through them every
- * time we steal is a bit easier and more or less equivalent:
- */
- unsigned cpu_last_stolen;
-
- /* For sleeping on allocation failure */
- wait_queue_head_t wait;
-
- /*
- * Global freelist - it's a stack where nr_free points to the
- * top
- */
- unsigned nr_free;
- unsigned *freelist;
- } ____cacheline_aligned_in_smp;
-};
-
-/*
- * Number of tags we move between the percpu freelist and the global freelist at
- * a time
- */
-#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
-/* Max size of percpu freelist, */
-#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
-
-int percpu_ida_alloc(struct percpu_ida *pool, int state);
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
-
-void percpu_ida_destroy(struct percpu_ida *pool);
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size);
-static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
-{
- return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
- IDA_DEFAULT_PCPU_BATCH_MOVE);
-}
-
-typedef int (*percpu_ida_cb)(unsigned, void *);
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data);
-
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
-#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index ad5444491975..10f92e1d8e7b 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -25,6 +25,12 @@
*/
#define ARMPMU_MAX_HWEVENTS 32
+/*
+ * ARM PMU hw_event flags
+ */
+/* Event uses a 64bit counter */
+#define ARMPMU_EVT_64BIT 1
+
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) PERF_COUNT_HW_CACHE_##_x
#define CACHE_OP_UNSUPPORTED 0xFFFF
@@ -87,14 +93,13 @@ struct arm_pmu {
struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
- u32 (*read_counter)(struct perf_event *event);
- void (*write_counter)(struct perf_event *event, u32 val);
+ u64 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u64 val);
void (*start)(struct arm_pmu *);
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
int num_events;
- u64 max_period;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 87f6db437e4a..53c500f0ca79 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -490,7 +490,7 @@ struct perf_addr_filters_head {
};
/**
- * enum perf_event_state - the states of a event
+ * enum perf_event_state - the states of an event:
*/
enum perf_event_state {
PERF_EVENT_STATE_DEAD = -4,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6cd09098427c..cd6f637cbbfb 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -825,6 +825,16 @@ static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
}
/**
+ * phy_polling_mode - Convenience function for testing whether polling is
+ * used to detect PHY status changes
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_polling_mode(struct phy_device *phydev)
+{
+ return phydev->irq == PHY_POLL;
+}
+
+/**
* phy_is_internal - Convenience function for testing if a PHY is internal
* @phydev: the phy_device struct
*/
@@ -942,6 +952,8 @@ void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
+int phy_speed_down(struct phy_device *phydev, bool sync);
+int phy_speed_up(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
int phy_restart_aneg(struct phy_device *phydev);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 50eeae025f1e..021fc6595856 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -234,5 +234,6 @@ int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
void phylink_set_port_modes(unsigned long *bits);
+void phylink_helper_basex_speed(struct phylink_link_state *state);
#endif
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index 09eb80f2574a..8dd85d302b90 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -28,7 +28,8 @@ struct seq_file;
* is not available on this controller this should return -ENOTSUPP
* and if it is available but disabled it should return -EINVAL
* @pin_config_set: configure an individual pin
- * @pin_config_group_get: get configurations for an entire pin group
+ * @pin_config_group_get: get configurations for an entire pin group; should
+ * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get.
* @pin_config_group_set: configure all pins in a group
* @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration
* @pin_config_dbg_show: optional debugfs display hook that will provide
diff --git a/include/linux/platform_data/bt-nokia-h4p.h b/include/linux/platform_data/bt-nokia-h4p.h
deleted file mode 100644
index 30d169dfadf3..000000000000
--- a/include/linux/platform_data/bt-nokia-h4p.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This file is part of Nokia H4P bluetooth driver
- *
- * Copyright (C) 2010 Nokia Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-
-/**
- * struct hci_h4p_platform data - hci_h4p Platform data structure
- */
-struct hci_h4p_platform_data {
- int chip_type;
- int bt_sysclk;
- unsigned int bt_wakeup_gpio;
- unsigned int host_wakeup_gpio;
- unsigned int reset_gpio;
- int reset_gpio_shared;
- unsigned int uart_irq;
- phys_addr_t uart_base;
- const char *uart_iclk;
- const char *uart_fclk;
- void (*set_pm_limits)(struct device *dev, bool set);
-};
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index 90ae19ca828f..57a5a35e0073 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -22,6 +22,7 @@
#include <asm-generic/gpio.h>
#define MAX_REGS_BANKS 5
+#define MAX_INT_PER_BANK 32
struct davinci_gpio_platform_data {
u32 ngpio;
@@ -41,7 +42,7 @@ struct davinci_gpio_controller {
spinlock_t lock;
void __iomem *regs[MAX_REGS_BANKS];
int gpio_unbanked;
- unsigned int base_irq;
+ int irqs[MAX_INT_PER_BANK];
unsigned int base;
};
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/include/linux/platform_data/jz4740/jz4740_nand.h
index f381d465e768..bc571f6d5ced 100644
--- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
+++ b/include/linux/platform_data/jz4740/jz4740_nand.h
@@ -13,8 +13,8 @@
*
*/
-#ifndef __ASM_MACH_JZ4740_JZ4740_NAND_H__
-#define __ASM_MACH_JZ4740_JZ4740_NAND_H__
+#ifndef __JZ4740_NAND_H__
+#define __JZ4740_NAND_H__
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
diff --git a/include/linux/platform_data/media/sii9234.h b/include/linux/platform_data/media/sii9234.h
deleted file mode 100644
index 6a4a809fe9a3..000000000000
--- a/include/linux/platform_data/media/sii9234.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Driver header for SII9234 MHL converter chip.
- *
- * Copyright (c) 2011 Samsung Electronics, Co. Ltd
- * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef SII9234_H
-#define SII9234_H
-
-/**
- * @gpio_n_reset: GPIO driving nRESET pin
- */
-
-struct sii9234_platform_data {
- int gpio_n_reset;
-};
-
-#endif /* SII9234_H */
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
index d1397c8ed94e..6397b9c8149a 100644
--- a/include/linux/platform_data/mmp_dma.h
+++ b/include/linux/platform_data/mmp_dma.h
@@ -12,9 +12,13 @@
#ifndef MMP_DMA_H
#define MMP_DMA_H
+struct dma_slave_map;
+
struct mmp_dma_platdata {
int dma_channels;
int nb_requestors;
+ int slave_map_cnt;
+ const struct dma_slave_map *slave_map;
};
#endif /* MMP_DMA_H */
diff --git a/include/linux/platform_data/mtd-orion_nand.h b/include/linux/platform_data/mtd-orion_nand.h
index a7ce77c7c1a8..34828eb85982 100644
--- a/include/linux/platform_data/mtd-orion_nand.h
+++ b/include/linux/platform_data/mtd-orion_nand.h
@@ -12,7 +12,6 @@
*/
struct orion_nand_data {
struct mtd_partition *parts;
- int (*dev_ready)(struct mtd_info *mtd);
u32 nr_parts;
u8 ale; /* address line number connected to ALE */
u8 cle; /* address line number connected to CLE */
diff --git a/arch/mips/include/asm/txx9/ndfmc.h b/include/linux/platform_data/txx9/ndfmc.h
index fa67f3df78fc..fc172627d54e 100644
--- a/arch/mips/include/asm/txx9/ndfmc.h
+++ b/include/linux/platform_data/txx9/ndfmc.h
@@ -5,8 +5,8 @@
*
* (C) Copyright TOSHIBA CORPORATION 2007
*/
-#ifndef __ASM_TXX9_NDFMC_H
-#define __ASM_TXX9_NDFMC_H
+#ifndef __TXX9_NDFMC_H
+#define __TXX9_NDFMC_H
#define NDFMC_PLAT_FLAG_USE_BSPRT 0x01
#define NDFMC_PLAT_FLAG_NO_RSTR 0x02
@@ -27,4 +27,4 @@ struct txx9ndfmc_platform_data {
void txx9_ndfmc_init(unsigned long baseaddr,
const struct txx9ndfmc_platform_data *plat_data);
-#endif /* __ASM_TXX9_NDFMC_H */
+#endif /* __TXX9_NDFMC_H */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index cb8d84090cfb..776c546d581a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -239,6 +239,8 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev,
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
unsigned int index);
+struct device *genpd_dev_pm_attach_by_name(struct device *dev,
+ char *name);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
static inline int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
@@ -290,6 +292,12 @@ static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
return NULL;
}
+static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
+ char *name)
+{
+ return NULL;
+}
+
static inline
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
@@ -301,6 +309,8 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
int dev_pm_domain_attach(struct device *dev, bool power_on);
struct device *dev_pm_domain_attach_by_id(struct device *dev,
unsigned int index);
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ char *name);
void dev_pm_domain_detach(struct device *dev, bool power_off);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
@@ -313,6 +323,11 @@ static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
{
return NULL;
}
+static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ char *name)
+{
+ return NULL;
+}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index c85704fcdbd2..ee7e987ea1b4 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -95,8 +95,8 @@ struct k_itimer {
clockid_t it_clock;
timer_t it_id;
int it_active;
- int it_overrun;
- int it_overrun_last;
+ s64 it_overrun;
+ s64 it_overrun_last;
int it_requeue_pending;
int it_sigev_notify;
ktime_t it_interval;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 6d7e800affd8..cf3eccfe1543 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -50,15 +50,15 @@ static inline const char *printk_skip_headers(const char *buffer)
/* We show everything that is MORE important than this.. */
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */
-#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */
#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */
#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */
/*
- * Default used to be hard-coded at 7, we're now allowing it to be set from
- * kernel config.
+ * Default used to be hard-coded at 7, quiet used to be hardcoded at 4,
+ * we're now allowing both to be set from kernel config.
*/
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
+#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
extern int console_printk[];
@@ -148,9 +148,13 @@ void early_printk(const char *s, ...) { }
#ifdef CONFIG_PRINTK_NMI
extern void printk_nmi_enter(void);
extern void printk_nmi_exit(void);
+extern void printk_nmi_direct_enter(void);
+extern void printk_nmi_direct_exit(void);
#else
static inline void printk_nmi_enter(void) { }
static inline void printk_nmi_exit(void) { }
+static inline void printk_nmi_direct_enter(void) { }
+static inline void printk_nmi_direct_exit(void) { }
#endif /* PRINTK_NMI */
#ifdef CONFIG_PRINTK
diff --git a/include/linux/pti.h b/include/linux/pti.h
index 0174883a935a..1a941efcaa62 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -6,6 +6,7 @@
#include <asm/pti.h>
#else
static inline void pti_init(void) { }
+static inline void pti_finalize(void) { }
#endif
#endif
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 8461b18e4608..13b4244d44c1 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -171,6 +171,14 @@
#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
+#define SSACD_ACDS_1 (0)
+#define SSACD_ACDS_2 (1)
+#define SSACD_ACDS_4 (2)
+#define SSACD_ACDS_8 (3)
+#define SSACD_ACDS_16 (4)
+#define SSACD_ACDS_32 (5)
+#define SSACD_SCDB_4X (0)
+#define SSACD_SCDB_1X (1)
#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
/* LPSS SSP */
@@ -212,8 +220,6 @@ struct ssp_device {
int type;
int use_count;
int irq;
- int drcmr_rx;
- int drcmr_tx;
struct device_node *of_node;
};
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index b401b962afff..5d65521260b3 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -87,6 +87,10 @@ static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
static inline int
qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; }
static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
+static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ unsigned int *src,
+ struct qcom_scm_vmperm *newvm,
+ int dest_cnt) { return -ENODEV; }
static inline void qcom_scm_cpu_power_down(u32 flags) {}
static inline u32 qcom_scm_get_version(void) { return 0; }
static inline u32
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 2978fa4add42..a1310482c4ed 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -39,6 +39,10 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
+/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */
+#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2)
+#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS))
+
struct qed_queue_start_common_params {
/* Should always be relative to entity sending this. */
u8 vport_id;
@@ -49,6 +53,8 @@ struct qed_queue_start_common_params {
struct qed_sb_info *p_sb;
u8 sb_idx;
+
+ u8 tc;
};
struct qed_rxq_start_ret_params {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b4040023cbfb..8cd34645e892 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -759,6 +759,9 @@ struct qed_generic_tlvs {
u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
};
+#define QED_I2C_DEV_ADDR_A0 0xA0
+#define QED_I2C_DEV_ADDR_A2 0xA2
+
#define QED_NVM_SIGNATURE 0x12435687
enum qed_nvm_flash_cmd {
@@ -1026,6 +1029,18 @@ struct qed_common_ops {
* @param enabled - true iff WoL should be enabled.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
+
+/**
+ * @brief read_module_eeprom
+ *
+ * @param cdev
+ * @param buf - buffer
+ * @param dev_addr - PHY device memory region
+ * @param offset - offset into eeprom contents to be read
+ * @param len - buffer length, i.e., max bytes to be read
+ */
+ int (*read_module_eeprom)(struct qed_dev *cdev,
+ char *buf, u8 dev_addr, u32 offset, u32 len);
};
#define MASK_FIELD(_name, _value) \
diff --git a/include/linux/random.h b/include/linux/random.h
index 2ddf13b4281e..445a0ea4ff49 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -36,9 +36,10 @@ extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int wait_for_random_bytes(void);
+extern bool rng_is_initialized(void);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
-extern void get_random_bytes_arch(void *buf, int nbytes);
+extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 36df6ccbc874..4786c2235b98 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
- * the current position.
+ * the current position which must have been in the list when the RCU read
+ * lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_from_rcu() except
+ * this starts after the given position and that one starts at the given
+ * position.
*/
#define list_for_each_entry_continue_rcu(pos, head, member) \
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
*
* Iterate over the tail of a list starting from a given position,
* which must have been in the list when the RCU read lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_continue_rcu() except
+ * this starts from the given position and that one starts from the position
+ * after the given position.
*/
#define list_for_each_entry_from_rcu(pos, head, member) \
for (; &(pos)->member != (head); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 65163aa0bb04..75e5b393cf44 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
-void rcu_read_unlock_special(struct task_struct *t);
void synchronize_rcu(void);
/*
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
} while (0)
/*
- * Note a voluntary context switch for RCU-tasks benefit. This is a
- * macro rather than an inline function to avoid #include hell.
+ * Note a quasi-voluntary context switch for RCU-tasks's benefit.
+ * This is a macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU
-#define rcu_note_voluntary_context_switch_lite(t) \
+#define rcu_tasks_qs(t) \
do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
- rcu_note_voluntary_context_switch_lite(t); \
+ rcu_tasks_qs(t); \
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
-#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
+#define rcu_tasks_qs(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#define call_rcu_tasks call_rcu_sched
#define synchronize_rcu_tasks synchronize_sched
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
*/
#define cond_resched_tasks_rcu_qs() \
do { \
- if (!cond_resched()) \
- rcu_note_voluntary_context_switch_lite(current); \
+ rcu_tasks_qs(current); \
+ cond_resched(); \
} while (0)
/*
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
* This is simply an identity function, but it documents where a pointer
* is handed off from RCU to some other synchronization mechanism, for
* example, reference counting or locking. In C11, it would map to
- * kill_dependency(). It could be used as follows:
- * ``
+ * kill_dependency(). It could be used as follows::
+ *
* rcu_read_lock();
* p = rcu_dereference(gp);
* long_lived = is_long_lived(p);
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
* p = rcu_pointer_handoff(p);
* }
* rcu_read_unlock();
- *``
*/
#define rcu_pointer_handoff(p) (p)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7b3c82e8a625..8d9a0ea8f0b5 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
#define rcu_note_context_switch(preempt) \
do { \
rcu_sched_qs(); \
- rcu_note_voluntary_context_switch_lite(current); \
+ rcu_tasks_qs(current); \
} while (0)
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h
index e031e9f2f9d8..585ce89c0f33 100644
--- a/include/linux/reciprocal_div.h
+++ b/include/linux/reciprocal_div.h
@@ -25,6 +25,9 @@ struct reciprocal_value {
u8 sh1, sh2;
};
+/* "reciprocal_value" and "reciprocal_divide" together implement the basic
+ * version of the algorithm described in Figure 4.1 of the paper.
+ */
struct reciprocal_value reciprocal_value(u32 d);
static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
@@ -33,4 +36,69 @@ static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
return (t + ((a - t) >> R.sh1)) >> R.sh2;
}
+struct reciprocal_value_adv {
+ u32 m;
+ u8 sh, exp;
+ bool is_wide_m;
+};
+
+/* "reciprocal_value_adv" implements the advanced version of the algorithm
+ * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose
+ * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The
+ * exception case could be easily handled before calling "reciprocal_value_adv".
+ *
+ * The advanced version requires more complex calculation to get the reciprocal
+ * multiplier and other control variables, but then could reduce the required
+ * emulation operations.
+ *
+ * It makes no sense to use this advanced version for host divide emulation,
+ * those extra complexities for calculating multiplier etc could completely
+ * waive our saving on emulation operations.
+ *
+ * However, it makes sense to use it for JIT divide code generation for which
+ * we are willing to trade performance of JITed code with that of host. As shown
+ * by the following pseudo code, the required emulation operations could go down
+ * from 6 (the basic version) to 3 or 4.
+ *
+ * To use the result of "reciprocal_value_adv", suppose we want to calculate
+ * n/d, the pseudo C code will be:
+ *
+ * struct reciprocal_value_adv rvalue;
+ * u8 pre_shift, exp;
+ *
+ * // handle exception case.
+ * if (d >= (1U << 31)) {
+ * result = n >= d;
+ * return;
+ * }
+ *
+ * rvalue = reciprocal_value_adv(d, 32)
+ * exp = rvalue.exp;
+ * if (rvalue.is_wide_m && !(d & 1)) {
+ * // floor(log2(d & (2^32 -d)))
+ * pre_shift = fls(d & -d) - 1;
+ * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift);
+ * } else {
+ * pre_shift = 0;
+ * }
+ *
+ * // code generation starts.
+ * if (imm == 1U << exp) {
+ * result = n >> exp;
+ * } else if (rvalue.is_wide_m) {
+ * // pre_shift must be zero when reached here.
+ * t = (n * rvalue.m) >> 32;
+ * result = n - t;
+ * result >>= 1;
+ * result += t;
+ * result >>= rvalue.sh - 1;
+ * } else {
+ * if (pre_shift)
+ * result = n >> pre_shift;
+ * result = ((u64)result * rvalue.m) >> 32;
+ * result >>= rvalue.sh;
+ * }
+ */
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec);
+
#endif /* _LINUX_RECIPROCAL_DIV_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a685da2c4522..e28cce21bad6 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -3,9 +3,10 @@
#define _LINUX_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock_types.h>
+
+struct mutex;
/**
* struct refcount_t - variant of atomic_t specialized for reference counts
@@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
+extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
+extern void refcount_add_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r);
+extern void refcount_inc_checked(refcount_t *r);
+
+extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_dec_and_test_checked(refcount_t *r);
+extern void refcount_dec_checked(refcount_t *r);
+
#ifdef CONFIG_REFCOUNT_FULL
-extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
-extern void refcount_add(unsigned int i, refcount_t *r);
-extern __must_check bool refcount_inc_not_zero(refcount_t *r);
-extern void refcount_inc(refcount_t *r);
+#define refcount_add_not_zero refcount_add_not_zero_checked
+#define refcount_add refcount_add_checked
+
+#define refcount_inc_not_zero refcount_inc_not_zero_checked
+#define refcount_inc refcount_inc_checked
+
+#define refcount_sub_and_test refcount_sub_and_test_checked
-extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
+#define refcount_dec_and_test refcount_dec_and_test_checked
+#define refcount_dec refcount_dec_checked
-extern __must_check bool refcount_dec_and_test(refcount_t *r);
-extern void refcount_dec(refcount_t *r);
#else
# ifdef CONFIG_ARCH_HAS_REFCOUNT
# include <asm/refcount.h>
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4f38068ffb71..379505a53722 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -268,6 +268,13 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @readable_noinc_reg: Optional callback returning true if the register
+ * supports multiple read operations without incrementing
+ * the register number. If this field is NULL but
+ * rd_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * readable if it belongs to one of the ranges specified
+ * by rd_noinc_table).
* @disable_locking: This regmap is either protected by external means or
* is guaranteed not be be accessed from multiple threads.
* Don't use any locking mechanisms.
@@ -295,6 +302,7 @@ typedef void (*regmap_unlock)(void *);
* @rd_table: As above, for read access.
* @volatile_table: As above, for volatile registers.
* @precious_table: As above, for precious registers.
+ * @rd_noinc_table: As above, for no increment readable registers.
* @reg_defaults: Power on reset values for registers (for use with
* register cache support).
* @num_reg_defaults: Number of elements in reg_defaults.
@@ -344,6 +352,7 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
bool disable_locking;
regmap_lock lock;
@@ -360,6 +369,7 @@ struct regmap_config {
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *rd_noinc_table;
const struct reg_default *reg_defaults;
unsigned int num_reg_defaults;
enum regcache_type cache_type;
@@ -514,6 +524,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -558,6 +572,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -646,6 +664,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_sccb() - Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
* regmap_init_slimbus() - Initialise register map
*
* @slimbus: Device that will be interacted with
@@ -798,6 +829,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
+ * devm_regmap_init_sccb() - Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
* devm_regmap_init_spi() - Initialise register map
*
* @dev: Device that will be interacted with
@@ -946,6 +991,8 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
@@ -1196,6 +1243,13 @@ static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
void *val, size_t val_count)
{
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fc2dc8df476f..0fd8fbb74763 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -46,7 +46,7 @@ enum regulator_status {
/**
* struct regulator_linear_range - specify linear voltage ranges
*
- * Specify a range of voltages for regulator_map_linar_range() and
+ * Specify a range of voltages for regulator_map_linear_range() and
* regulator_list_linear_range().
*
* @min_uV: Lowest voltage in range
@@ -220,7 +220,7 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
- int (*resume_early)(struct regulator_dev *rdev);
+ int (*resume)(struct regulator_dev *rdev);
int (*set_pull_down) (struct regulator_dev *);
};
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index e0ccf46f66cf..cb5aecd40f07 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -64,6 +64,17 @@
#define PFUZE3000_VLDO3 11
#define PFUZE3000_VLDO4 12
+#define PFUZE3001_SW1 0
+#define PFUZE3001_SW2 1
+#define PFUZE3001_SW3 2
+#define PFUZE3001_VSNVS 3
+#define PFUZE3001_VLDO1 4
+#define PFUZE3001_VLDO2 5
+#define PFUZE3001_VCCSD 6
+#define PFUZE3001_V33 7
+#define PFUZE3001_VLDO3 8
+#define PFUZE3001_VLDO4 9
+
struct regulator_init_data;
struct pfuze_regulator_platform_data {
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index e6a0031d1b1f..8ad2487a86d5 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -66,7 +66,7 @@ struct rfkill_ops {
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
/**
- * rfkill_alloc - allocate rfkill structure
+ * rfkill_alloc - Allocate rfkill structure
* @name: name of the struct -- the string is not copied internally
* @parent: device that has rf switch on it
* @type: type of the switch (RFKILL_TYPE_*)
@@ -112,7 +112,7 @@ void rfkill_pause_polling(struct rfkill *rfkill);
/**
* rfkill_resume_polling(struct rfkill *rfkill)
*
- * Pause polling -- say transmitter is off for other reasons.
+ * Resume polling
* NOTE: not necessary for suspend/resume -- in that case the
* core stops polling anyway
*/
@@ -130,7 +130,7 @@ void rfkill_resume_polling(struct rfkill *rfkill);
void rfkill_unregister(struct rfkill *rfkill);
/**
- * rfkill_destroy - free rfkill structure
+ * rfkill_destroy - Free rfkill structure
* @rfkill: rfkill structure to be destroyed
*
* Destroys the rfkill structure.
@@ -140,7 +140,7 @@ void rfkill_destroy(struct rfkill *rfkill);
/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current hardware block state to set
+ * @blocked: the current hardware block state to set
*
* rfkill drivers that get events when the hard-blocked state changes
* use this function to notify the rfkill core (and through that also
@@ -161,7 +161,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
/**
* rfkill_set_sw_state - Set the internal rfkill software block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
*
* rfkill drivers that get events when the soft-blocked state changes
* (yes, some platforms directly act on input but allow changing again)
@@ -183,7 +183,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
/**
* rfkill_init_sw_state - Initialize persistent software block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
*
* rfkill drivers that preserve their software block state over power off
* use this function to notify the rfkill core (and through that also
@@ -208,17 +208,17 @@ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
/**
- * rfkill_blocked - query rfkill block
+ * rfkill_blocked - Query rfkill block state
*
* @rfkill: rfkill struct to query
*/
bool rfkill_blocked(struct rfkill *rfkill);
/**
- * rfkill_find_type - Helpper for finding rfkill type by name
+ * rfkill_find_type - Helper for finding rfkill type by name
* @name: the name of the type
*
- * Returns enum rfkill_type that conrresponds the name.
+ * Returns enum rfkill_type that corresponds to the name.
*/
enum rfkill_type rfkill_find_type(const char *name);
@@ -296,7 +296,7 @@ static inline enum rfkill_type rfkill_find_type(const char *name)
const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
/**
- * rfkill_set_led_trigger_name -- set the LED trigger name
+ * rfkill_set_led_trigger_name - Set the LED trigger name
* @rfkill: rfkill struct
* @name: LED trigger name
*
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
new file mode 100644
index 000000000000..763d613ce2c2
--- /dev/null
+++ b/include/linux/rhashtable-types.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Simple structures that might be needed in include
+ * files.
+ */
+
+#ifndef _LINUX_RHASHTABLE_TYPES_H
+#define _LINUX_RHASHTABLE_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct rhash_head {
+ struct rhash_head __rcu *next;
+};
+
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
+struct bucket_table;
+
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
+typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+ const void *obj);
+
+/**
+ * struct rhashtable_params - Hash table construction parameters
+ * @nelem_hint: Hint on number of elements, should be 75% of desired size
+ * @key_len: Length of key
+ * @key_offset: Offset of key in struct to be hashed
+ * @head_offset: Offset of rhash_head in struct to be hashed
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
+ * @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
+ */
+struct rhashtable_params {
+ u16 nelem_hint;
+ u16 key_len;
+ u16 key_offset;
+ u16 head_offset;
+ unsigned int max_size;
+ u16 min_size;
+ bool automatic_shrinking;
+ u8 locks_mul;
+ rht_hashfn_t hashfn;
+ rht_obj_hashfn_t obj_hashfn;
+ rht_obj_cmpfn_t obj_cmpfn;
+};
+
+/**
+ * struct rhashtable - Hash table handle
+ * @tbl: Bucket table
+ * @key_len: Key length for hashfn
+ * @max_elems: Maximum number of elements in table
+ * @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
+ */
+struct rhashtable {
+ struct bucket_table __rcu *tbl;
+ unsigned int key_len;
+ unsigned int max_elems;
+ struct rhashtable_params p;
+ bool rhlist;
+ struct work_struct run_work;
+ struct mutex mutex;
+ spinlock_t lock;
+ atomic_t nelems;
+};
+
+/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+ struct rhashtable ht;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @tbl: The table that we were walking over
+ */
+struct rhashtable_walker {
+ struct list_head list;
+ struct bucket_table *tbl;
+};
+
+/**
+ * struct rhashtable_iter - Hash table iterator
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @list: Current hash list pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+ struct rhashtable *ht;
+ struct rhash_head *p;
+ struct rhlist_head *list;
+ struct rhashtable_walker walker;
+ unsigned int slot;
+ unsigned int skip;
+ bool end_of_table;
+};
+
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params);
+
+#endif /* _LINUX_RHASHTABLE_TYPES_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 4e1f535c2034..eb7111039247 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Resizable, Scalable, Concurrent Hash Table
*
@@ -17,37 +18,18 @@
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
-#include <linux/atomic.h>
-#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
-#include <linux/mutex.h>
#include <linux/rculist.h>
+#include <linux/rhashtable-types.h>
/*
* The end of the chain is marked with a special nulls marks which has
- * the following format:
- *
- * +-------+-----------------------------------------------------+-+
- * | Base | Hash |1|
- * +-------+-----------------------------------------------------+-+
- *
- * Base (4 bits) : Reserved to distinguish between multiple tables.
- * Specified via &struct rhashtable_params.nulls_base.
- * Hash (27 bits): Full hash (unmasked) of first element added to bucket
- * 1 (1 bit) : Nulls marker (always set)
- *
- * The remaining bits of the next pointer remain unused for now.
+ * the least significant bit set.
*/
-#define RHT_BASE_BITS 4
-#define RHT_HASH_BITS 27
-#define RHT_BASE_SHIFT RHT_HASH_BITS
-
-/* Base bits plus 1 bit for nulls marker */
-#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
/* Maximum chain length before rehash
*
@@ -64,15 +46,6 @@
*/
#define RHT_ELASTICITY 16u
-struct rhash_head {
- struct rhash_head __rcu *next;
-};
-
-struct rhlist_head {
- struct rhash_head rhead;
- struct rhlist_head __rcu *next;
-};
-
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
@@ -102,132 +75,14 @@ struct bucket_table {
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
-/**
- * struct rhashtable_compare_arg - Key for the function rhashtable_compare
- * @ht: Hash table
- * @key: Key to compare against
- */
-struct rhashtable_compare_arg {
- struct rhashtable *ht;
- const void *key;
-};
-
-typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
- const void *obj);
-
-struct rhashtable;
-
-/**
- * struct rhashtable_params - Hash table construction parameters
- * @nelem_hint: Hint on number of elements, should be 75% of desired size
- * @key_len: Length of key
- * @key_offset: Offset of key in struct to be hashed
- * @head_offset: Offset of rhash_head in struct to be hashed
- * @max_size: Maximum size while expanding
- * @min_size: Minimum size while shrinking
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
- * @automatic_shrinking: Enable automatic shrinking of tables
- * @nulls_base: Base value to generate nulls marker
- * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
- * @obj_hashfn: Function to hash object
- * @obj_cmpfn: Function to compare key with object
- */
-struct rhashtable_params {
- u16 nelem_hint;
- u16 key_len;
- u16 key_offset;
- u16 head_offset;
- unsigned int max_size;
- u16 min_size;
- bool automatic_shrinking;
- u8 locks_mul;
- u32 nulls_base;
- rht_hashfn_t hashfn;
- rht_obj_hashfn_t obj_hashfn;
- rht_obj_cmpfn_t obj_cmpfn;
-};
-
-/**
- * struct rhashtable - Hash table handle
- * @tbl: Bucket table
- * @key_len: Key length for hashfn
- * @max_elems: Maximum number of elements in table
- * @p: Configuration parameters
- * @rhlist: True if this is an rhltable
- * @run_work: Deferred worker to expand/shrink asynchronously
- * @mutex: Mutex to protect current/future table swapping
- * @lock: Spin lock to protect walker list
- * @nelems: Number of elements in table
- */
-struct rhashtable {
- struct bucket_table __rcu *tbl;
- unsigned int key_len;
- unsigned int max_elems;
- struct rhashtable_params p;
- bool rhlist;
- struct work_struct run_work;
- struct mutex mutex;
- spinlock_t lock;
- atomic_t nelems;
-};
-
-/**
- * struct rhltable - Hash table with duplicate objects in a list
- * @ht: Underlying rhtable
- */
-struct rhltable {
- struct rhashtable ht;
-};
-
-/**
- * struct rhashtable_walker - Hash table walker
- * @list: List entry on list of walkers
- * @tbl: The table that we were walking over
- */
-struct rhashtable_walker {
- struct list_head list;
- struct bucket_table *tbl;
-};
-
-/**
- * struct rhashtable_iter - Hash table iterator
- * @ht: Table to iterate through
- * @p: Current pointer
- * @list: Current hash list pointer
- * @walker: Associated rhashtable walker
- * @slot: Current slot
- * @skip: Number of entries to skip in slot
- */
-struct rhashtable_iter {
- struct rhashtable *ht;
- struct rhash_head *p;
- struct rhlist_head *list;
- struct rhashtable_walker walker;
- unsigned int slot;
- unsigned int skip;
- bool end_of_table;
-};
-
-static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
-{
- return NULLS_MARKER(ht->p.nulls_base + hash);
-}
-
-#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
- ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+#define INIT_RHT_NULLS_HEAD(ptr) \
+ ((ptr) = (typeof(ptr)) NULLS_MARKER(0))
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
return ((unsigned long) ptr & 1);
}
-static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
-{
- return ((unsigned long) ptr) >> 1;
-}
-
static inline void *rht_obj(const struct rhashtable *ht,
const struct rhash_head *he)
{
@@ -237,7 +92,7 @@ static inline void *rht_obj(const struct rhashtable *ht,
static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
unsigned int hash)
{
- return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+ return hash & (tbl->size - 1);
}
static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
@@ -376,11 +231,6 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
}
#endif /* CONFIG_PROVE_LOCKING */
-int rhashtable_init(struct rhashtable *ht,
- const struct rhashtable_params *params);
-int rhltable_init(struct rhltable *hlt,
- const struct rhashtable_params *params);
-
void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj);
@@ -745,7 +595,7 @@ static inline void *__rhashtable_insert_fast(
lock = rht_bucket_lock(tbl, hash);
spin_lock_bh(lock);
- if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
+ if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
spin_unlock_bh(lock);
rcu_read_unlock();
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index e6539536dea9..804a50983ec5 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -23,6 +23,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+struct seq_file;
+
/**
* struct sbitmap_word - Word in a &struct sbitmap.
*/
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 43731fe51c97..95a5018c338e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -167,8 +167,8 @@ struct task_group;
* need_sleep = false;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
- * Where wake_up_state() (and all other wakeup primitives) imply enough
- * barriers to order the store of the variable against wakeup.
+ * where wake_up_state() executes a full memory barrier before accessing the
+ * task state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -734,6 +734,10 @@ struct task_struct {
/* disallow userland-initiated cgroup migration */
unsigned no_cgroup_migration:1;
#endif
+#ifdef CONFIG_BLK_CGROUP
+ /* to be used once the psi infrastructure lands upstream. */
+ unsigned use_memdelay:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -1017,7 +1021,6 @@ struct task_struct {
u64 last_sum_exec_runtime;
struct callback_head numa_work;
- struct list_head numa_entry;
struct numa_group *numa_group;
/*
@@ -1151,6 +1154,10 @@ struct task_struct {
unsigned int memcg_nr_pages_over_high;
#endif
+#ifdef CONFIG_BLK_CGROUP
+ struct request_queue *throttle_queue;
+#endif
+
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
#endif
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1c1a1512ec55..913488d828cb 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -40,7 +40,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG
extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-extern __read_mostly unsigned int sysctl_sched_time_avg;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 411b52e424e1..abe28d5cb3f4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -9,17 +9,16 @@
#define LINUX_SCHED_CLOCK
#ifdef CONFIG_GENERIC_SCHED_CLOCK
-extern void sched_clock_postinit(void);
+extern void generic_sched_clock_init(void);
extern void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate);
#else
-static inline void sched_clock_postinit(void) { }
+static inline void generic_sched_clock_init(void) { }
static inline void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate)
{
- ;
}
#endif
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index b36c76635f18..83d94341e003 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -801,4 +801,11 @@ struct sctp_strreset_resptsn {
__be32 receivers_next_tsn;
};
+enum {
+ SCTP_DSCP_SET_MASK = 0x1,
+ SCTP_DSCP_VAL_MASK = 0xfc,
+ SCTP_FLOWLABEL_SET_MASK = 0x100000,
+ SCTP_FLOWLABEL_VAL_MASK = 0xfffff
+};
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index 63030c85ee19..75f4156c84d7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -159,6 +159,27 @@ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
typedef int (*initxattrs) (struct inode *inode,
const struct xattr *xattr_array, void *fs_data);
+
+/* Keep the kernel_load_data_id enum in sync with kernel_read_file_id */
+#define __data_id_enumify(ENUM, dummy) LOADING_ ## ENUM,
+#define __data_id_stringify(dummy, str) #str,
+
+enum kernel_load_data_id {
+ __kernel_read_file_id(__data_id_enumify)
+};
+
+static const char * const kernel_load_data_str[] = {
+ __kernel_read_file_id(__data_id_stringify)
+};
+
+static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id)
+{
+ if ((unsigned)id >= LOADING_MAX_ID)
+ return kernel_load_data_str[LOADING_UNKNOWN];
+
+ return kernel_load_data_str[id];
+}
+
#ifdef CONFIG_SECURITY
struct security_mnt_opts {
@@ -309,7 +330,7 @@ void security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
-int security_file_open(struct file *file, const struct cred *cred);
+int security_file_open(struct file *file);
int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -320,6 +341,7 @@ void security_cred_getsecid(const struct cred *c, u32 *secid);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
+int security_kernel_load_data(enum kernel_load_data_id id);
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -858,8 +880,7 @@ static inline int security_file_receive(struct file *file)
return 0;
}
-static inline int security_file_open(struct file *file,
- const struct cred *cred)
+static inline int security_file_open(struct file *file)
{
return 0;
}
@@ -909,6 +930,11 @@ static inline int security_kernel_module_request(char *kmod_name)
return 0;
}
+static inline int security_kernel_load_data(enum kernel_load_data_id id)
+{
+ return 0;
+}
+
static inline int security_kernel_read_file(struct file *file,
enum kernel_read_file_id id)
{
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index ebce9e24906a..d37518e89db2 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -231,6 +231,50 @@ struct sfp_eeprom_id {
struct sfp_eeprom_ext ext;
} __packed;
+struct sfp_diag {
+ __be16 temp_high_alarm;
+ __be16 temp_low_alarm;
+ __be16 temp_high_warn;
+ __be16 temp_low_warn;
+ __be16 volt_high_alarm;
+ __be16 volt_low_alarm;
+ __be16 volt_high_warn;
+ __be16 volt_low_warn;
+ __be16 bias_high_alarm;
+ __be16 bias_low_alarm;
+ __be16 bias_high_warn;
+ __be16 bias_low_warn;
+ __be16 txpwr_high_alarm;
+ __be16 txpwr_low_alarm;
+ __be16 txpwr_high_warn;
+ __be16 txpwr_low_warn;
+ __be16 rxpwr_high_alarm;
+ __be16 rxpwr_low_alarm;
+ __be16 rxpwr_high_warn;
+ __be16 rxpwr_low_warn;
+ __be16 laser_temp_high_alarm;
+ __be16 laser_temp_low_alarm;
+ __be16 laser_temp_high_warn;
+ __be16 laser_temp_low_warn;
+ __be16 tec_cur_high_alarm;
+ __be16 tec_cur_low_alarm;
+ __be16 tec_cur_high_warn;
+ __be16 tec_cur_low_warn;
+ __be32 cal_rxpwr4;
+ __be32 cal_rxpwr3;
+ __be32 cal_rxpwr2;
+ __be32 cal_rxpwr1;
+ __be32 cal_rxpwr0;
+ __be16 cal_txi_slope;
+ __be16 cal_txi_offset;
+ __be16 cal_txpwr_slope;
+ __be16 cal_txpwr_offset;
+ __be16 cal_t_slope;
+ __be16 cal_t_offset;
+ __be16 cal_v_slope;
+ __be16 cal_v_offset;
+} __packed;
+
/* SFP EEPROM registers */
enum {
SFP_PHYS_ID = 0x00,
@@ -384,7 +428,33 @@ enum {
SFP_TEC_CUR = 0x6c,
SFP_STATUS = 0x6e,
- SFP_ALARM = 0x70,
+ SFP_ALARM0 = 0x70,
+ SFP_ALARM0_TEMP_HIGH = BIT(7),
+ SFP_ALARM0_TEMP_LOW = BIT(6),
+ SFP_ALARM0_VCC_HIGH = BIT(5),
+ SFP_ALARM0_VCC_LOW = BIT(4),
+ SFP_ALARM0_TX_BIAS_HIGH = BIT(3),
+ SFP_ALARM0_TX_BIAS_LOW = BIT(2),
+ SFP_ALARM0_TXPWR_HIGH = BIT(1),
+ SFP_ALARM0_TXPWR_LOW = BIT(0),
+
+ SFP_ALARM1 = 0x71,
+ SFP_ALARM1_RXPWR_HIGH = BIT(7),
+ SFP_ALARM1_RXPWR_LOW = BIT(6),
+
+ SFP_WARN0 = 0x74,
+ SFP_WARN0_TEMP_HIGH = BIT(7),
+ SFP_WARN0_TEMP_LOW = BIT(6),
+ SFP_WARN0_VCC_HIGH = BIT(5),
+ SFP_WARN0_VCC_LOW = BIT(4),
+ SFP_WARN0_TX_BIAS_HIGH = BIT(3),
+ SFP_WARN0_TX_BIAS_LOW = BIT(2),
+ SFP_WARN0_TXPWR_HIGH = BIT(1),
+ SFP_WARN0_TXPWR_LOW = BIT(0),
+
+ SFP_WARN1 = 0x75,
+ SFP_WARN1_RXPWR_HIGH = BIT(7),
+ SFP_WARN1_RXPWR_LOW = BIT(6),
SFP_EXT_STATUS = 0x76,
SFP_VSL = 0x78,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 610a201126ee..17a13e4785fc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -641,6 +641,7 @@ typedef unsigned char *sk_buff_data_t;
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
* @dst_pending_confirm: need to confirm neighbour
+ * @decrypted: Decrypted SKB
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
@@ -675,12 +676,16 @@ struct sk_buff {
* UDP receive path is one user.
*/
unsigned long dev_scratch;
- int ip_defrag_offset;
};
};
- struct rb_node rbnode; /* used in netem & tcp stack */
+ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
+ struct list_head list;
+ };
+
+ union {
+ struct sock *sk;
+ int ip_defrag_offset;
};
- struct sock *sk;
union {
ktime_t tstamp;
@@ -791,6 +796,9 @@ struct sk_buff {
__u8 tc_redirected:1;
__u8 tc_from_ingress:1;
#endif
+#ifdef CONFIG_TLS_DEVICE
+ __u8 decrypted:1;
+#endif
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -1030,6 +1038,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
}
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+void skb_headers_offset_update(struct sk_buff *skb, int off);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
@@ -2354,7 +2363,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- if (skb_flow_dissect_flow_keys_basic(skb, &keys, 0, 0, 0, 0, 0))
+ if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
@@ -2580,7 +2589,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
-void skb_rbtree_purge(struct rb_root *root);
+unsigned int skb_rbtree_purge(struct rb_root *root);
void *netdev_alloc_frag(unsigned int fragsz);
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index c174844cf663..d0884b525001 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -25,8 +25,6 @@ struct smpboot_thread_data;
* parked (cpu offline)
* @unpark: Optional unpark function, called when the thread is
* unparked (cpu online)
- * @cpumask: Internal state. To update which threads are unparked,
- * call smpboot_update_cpumask_percpu_thread().
* @selfparking: Thread is not parked by the park function.
* @thread_comm: The base name of the thread
*/
@@ -40,23 +38,12 @@ struct smp_hotplug_thread {
void (*cleanup)(unsigned int cpu, bool online);
void (*park)(unsigned int cpu);
void (*unpark)(unsigned int cpu);
- cpumask_var_t cpumask;
bool selfparking;
const char *thread_comm;
};
-int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *cpumask);
-
-static inline int
-smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
-{
- return smpboot_register_percpu_thread_cpumask(plug_thread,
- cpu_possible_mask);
-}
+int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *);
#endif
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
new file mode 100644
index 000000000000..7e3b9c605ab2
--- /dev/null
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/platform_device.h>
+#ifndef __LLCC_QCOM__
+#define __LLCC_QCOM__
+
+#define LLCC_CPUSS 1
+#define LLCC_VIDSC0 2
+#define LLCC_VIDSC1 3
+#define LLCC_ROTATOR 4
+#define LLCC_VOICE 5
+#define LLCC_AUDIO 6
+#define LLCC_MDMHPGRW 7
+#define LLCC_MDM 8
+#define LLCC_CMPT 10
+#define LLCC_GPUHTW 11
+#define LLCC_GPU 12
+#define LLCC_MMUHWT 13
+#define LLCC_CMPTDMA 15
+#define LLCC_DISP 16
+#define LLCC_VIDFW 17
+#define LLCC_MDMHPFX 20
+#define LLCC_MDMPNG 21
+#define LLCC_AUDHW 22
+
+/**
+ * llcc_slice_desc - Cache slice descriptor
+ * @slice_id: llcc slice id
+ * @slice_size: Size allocated for the llcc slice
+ */
+struct llcc_slice_desc {
+ u32 slice_id;
+ size_t slice_size;
+};
+
+/**
+ * llcc_slice_config - Data associated with the llcc slice
+ * @usecase_id: usecase id for which the llcc slice is used
+ * @slice_id: llcc slice id assigned to each slice
+ * @max_cap: maximum capacity of the llcc slice
+ * @priority: priority of the llcc slice
+ * @fixed_size: whether the llcc slice can grow beyond its size
+ * @bonus_ways: bonus ways associated with llcc slice
+ * @res_ways: reserved ways associated with llcc slice
+ * @cache_mode: mode of the llcc slice
+ * @probe_target_ways: Probe only reserved and bonus ways on a cache miss
+ * @dis_cap_alloc: Disable capacity based allocation
+ * @retain_on_pc: Retain through power collapse
+ * @activate_on_init: activate the slice on init
+ */
+struct llcc_slice_config {
+ u32 usecase_id;
+ u32 slice_id;
+ u32 max_cap;
+ u32 priority;
+ bool fixed_size;
+ u32 bonus_ways;
+ u32 res_ways;
+ u32 cache_mode;
+ u32 probe_target_ways;
+ bool dis_cap_alloc;
+ bool retain_on_pc;
+ bool activate_on_init;
+};
+
+/**
+ * llcc_drv_data - Data associated with the llcc driver
+ * @regmap: regmap associated with the llcc device
+ * @cfg: pointer to the data structure for slice configuration
+ * @lock: mutex associated with each slice
+ * @cfg_size: size of the config data table
+ * @max_slices: max slices as read from device tree
+ * @bcast_off: Offset of the broadcast bank
+ * @num_banks: Number of llcc banks
+ * @bitmap: Bit map to track the active slice ids
+ * @offsets: Pointer to the bank offsets array
+ */
+struct llcc_drv_data {
+ struct regmap *regmap;
+ const struct llcc_slice_config *cfg;
+ struct mutex lock;
+ u32 cfg_size;
+ u32 max_slices;
+ u32 bcast_off;
+ u32 num_banks;
+ unsigned long *bitmap;
+ u32 *offsets;
+};
+
+#if IS_ENABLED(CONFIG_QCOM_LLCC)
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @uid: usecase_id of the client
+ */
+struct llcc_slice_desc *llcc_slice_getd(u32 uid);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_get_slice_id - get slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_get_slice_size - llcc slice size
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc);
+
+/**
+ * qcom_llcc_probe - program the sct table
+ * @pdev: platform device pointer
+ * @table: soc sct table
+ * @sz: Size of the config table
+ */
+int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *table, u32 sz);
+#else
+static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid)
+{
+ return NULL;
+}
+
+static inline void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+
+};
+
+static inline int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+ return -EINVAL;
+}
+
+static inline size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+ return 0;
+}
+static inline int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+ return -EINVAL;
+}
+
+static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+ return -EINVAL;
+}
+static inline int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *table, u32 sz)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_llcc_remove(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index 66dcb9ec273a..5addaf5ccbce 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -42,7 +42,9 @@
#define EXYNOS_SWRESET 0x0400
#define S5P_WAKEUP_STAT 0x0600
-#define S5P_EINT_WAKEUP_MASK 0x0604
+/* Value for EXYNOS_EINT_WAKEUP_MASK disabling all external wakeup interrupts */
+#define EXYNOS_EINT_WAKEUP_MASK_DISABLED 0xffffffff
+#define EXYNOS_EINT_WAKEUP_MASK 0x0604
#define S5P_WAKEUP_MASK 0x0608
#define S5P_WAKEUP_MASK2 0x0614
@@ -180,6 +182,9 @@
#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
+/* Only for S5Pv210 */
+#define S5PV210_EINT_WAKEUP_MASK 0xC004
+
/* Only for EXYNOS4210 */
#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
@@ -641,6 +646,7 @@
| EXYNOS5420_KFC_USE_STANDBY_WFI3)
/* For EXYNOS5433 */
+#define EXYNOS5433_EINT_WAKEUP_MASK (0x060C)
#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728)
#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8)
diff --git a/include/linux/spi/adi_spi3.h b/include/linux/spi/adi_spi3.h
deleted file mode 100644
index c84123aa1d06..000000000000
--- a/include/linux/spi/adi_spi3.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Analog Devices SPI3 controller driver
- *
- * Copyright (c) 2014 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _ADI_SPI3_H_
-#define _ADI_SPI3_H_
-
-#include <linux/types.h>
-
-/* SPI_CONTROL */
-#define SPI_CTL_EN 0x00000001 /* Enable */
-#define SPI_CTL_MSTR 0x00000002 /* Master/Slave */
-#define SPI_CTL_PSSE 0x00000004 /* controls modf error in master mode */
-#define SPI_CTL_ODM 0x00000008 /* Open Drain Mode */
-#define SPI_CTL_CPHA 0x00000010 /* Clock Phase */
-#define SPI_CTL_CPOL 0x00000020 /* Clock Polarity */
-#define SPI_CTL_ASSEL 0x00000040 /* Slave Select Pin Control */
-#define SPI_CTL_SELST 0x00000080 /* Slave Select Polarity in-between transfers */
-#define SPI_CTL_EMISO 0x00000100 /* Enable MISO */
-#define SPI_CTL_SIZE 0x00000600 /* Word Transfer Size */
-#define SPI_CTL_SIZE08 0x00000000 /* SIZE: 8 bits */
-#define SPI_CTL_SIZE16 0x00000200 /* SIZE: 16 bits */
-#define SPI_CTL_SIZE32 0x00000400 /* SIZE: 32 bits */
-#define SPI_CTL_LSBF 0x00001000 /* LSB First */
-#define SPI_CTL_FCEN 0x00002000 /* Flow-Control Enable */
-#define SPI_CTL_FCCH 0x00004000 /* Flow-Control Channel Selection */
-#define SPI_CTL_FCPL 0x00008000 /* Flow-Control Polarity */
-#define SPI_CTL_FCWM 0x00030000 /* Flow-Control Water-Mark */
-#define SPI_CTL_FIFO0 0x00000000 /* FCWM: TFIFO empty or RFIFO Full */
-#define SPI_CTL_FIFO1 0x00010000 /* FCWM: TFIFO 75% or more empty or RFIFO 75% or more full */
-#define SPI_CTL_FIFO2 0x00020000 /* FCWM: TFIFO 50% or more empty or RFIFO 50% or more full */
-#define SPI_CTL_FMODE 0x00040000 /* Fast-mode Enable */
-#define SPI_CTL_MIOM 0x00300000 /* Multiple I/O Mode */
-#define SPI_CTL_MIO_DIS 0x00000000 /* MIOM: Disable */
-#define SPI_CTL_MIO_DUAL 0x00100000 /* MIOM: Enable DIOM (Dual I/O Mode) */
-#define SPI_CTL_MIO_QUAD 0x00200000 /* MIOM: Enable QUAD (Quad SPI Mode) */
-#define SPI_CTL_SOSI 0x00400000 /* Start on MOSI */
-/* SPI_RX_CONTROL */
-#define SPI_RXCTL_REN 0x00000001 /* Receive Channel Enable */
-#define SPI_RXCTL_RTI 0x00000004 /* Receive Transfer Initiate */
-#define SPI_RXCTL_RWCEN 0x00000008 /* Receive Word Counter Enable */
-#define SPI_RXCTL_RDR 0x00000070 /* Receive Data Request */
-#define SPI_RXCTL_RDR_DIS 0x00000000 /* RDR: Disabled */
-#define SPI_RXCTL_RDR_NE 0x00000010 /* RDR: RFIFO not empty */
-#define SPI_RXCTL_RDR_25 0x00000020 /* RDR: RFIFO 25% full */
-#define SPI_RXCTL_RDR_50 0x00000030 /* RDR: RFIFO 50% full */
-#define SPI_RXCTL_RDR_75 0x00000040 /* RDR: RFIFO 75% full */
-#define SPI_RXCTL_RDR_FULL 0x00000050 /* RDR: RFIFO full */
-#define SPI_RXCTL_RDO 0x00000100 /* Receive Data Over-Run */
-#define SPI_RXCTL_RRWM 0x00003000 /* FIFO Regular Water-Mark */
-#define SPI_RXCTL_RWM_0 0x00000000 /* RRWM: RFIFO Empty */
-#define SPI_RXCTL_RWM_25 0x00001000 /* RRWM: RFIFO 25% full */
-#define SPI_RXCTL_RWM_50 0x00002000 /* RRWM: RFIFO 50% full */
-#define SPI_RXCTL_RWM_75 0x00003000 /* RRWM: RFIFO 75% full */
-#define SPI_RXCTL_RUWM 0x00070000 /* FIFO Urgent Water-Mark */
-#define SPI_RXCTL_UWM_DIS 0x00000000 /* RUWM: Disabled */
-#define SPI_RXCTL_UWM_25 0x00010000 /* RUWM: RFIFO 25% full */
-#define SPI_RXCTL_UWM_50 0x00020000 /* RUWM: RFIFO 50% full */
-#define SPI_RXCTL_UWM_75 0x00030000 /* RUWM: RFIFO 75% full */
-#define SPI_RXCTL_UWM_FULL 0x00040000 /* RUWM: RFIFO full */
-/* SPI_TX_CONTROL */
-#define SPI_TXCTL_TEN 0x00000001 /* Transmit Channel Enable */
-#define SPI_TXCTL_TTI 0x00000004 /* Transmit Transfer Initiate */
-#define SPI_TXCTL_TWCEN 0x00000008 /* Transmit Word Counter Enable */
-#define SPI_TXCTL_TDR 0x00000070 /* Transmit Data Request */
-#define SPI_TXCTL_TDR_DIS 0x00000000 /* TDR: Disabled */
-#define SPI_TXCTL_TDR_NF 0x00000010 /* TDR: TFIFO not full */
-#define SPI_TXCTL_TDR_25 0x00000020 /* TDR: TFIFO 25% empty */
-#define SPI_TXCTL_TDR_50 0x00000030 /* TDR: TFIFO 50% empty */
-#define SPI_TXCTL_TDR_75 0x00000040 /* TDR: TFIFO 75% empty */
-#define SPI_TXCTL_TDR_EMPTY 0x00000050 /* TDR: TFIFO empty */
-#define SPI_TXCTL_TDU 0x00000100 /* Transmit Data Under-Run */
-#define SPI_TXCTL_TRWM 0x00003000 /* FIFO Regular Water-Mark */
-#define SPI_TXCTL_RWM_FULL 0x00000000 /* TRWM: TFIFO full */
-#define SPI_TXCTL_RWM_25 0x00001000 /* TRWM: TFIFO 25% empty */
-#define SPI_TXCTL_RWM_50 0x00002000 /* TRWM: TFIFO 50% empty */
-#define SPI_TXCTL_RWM_75 0x00003000 /* TRWM: TFIFO 75% empty */
-#define SPI_TXCTL_TUWM 0x00070000 /* FIFO Urgent Water-Mark */
-#define SPI_TXCTL_UWM_DIS 0x00000000 /* TUWM: Disabled */
-#define SPI_TXCTL_UWM_25 0x00010000 /* TUWM: TFIFO 25% empty */
-#define SPI_TXCTL_UWM_50 0x00020000 /* TUWM: TFIFO 50% empty */
-#define SPI_TXCTL_UWM_75 0x00030000 /* TUWM: TFIFO 75% empty */
-#define SPI_TXCTL_UWM_EMPTY 0x00040000 /* TUWM: TFIFO empty */
-/* SPI_CLOCK */
-#define SPI_CLK_BAUD 0x0000FFFF /* Baud Rate */
-/* SPI_DELAY */
-#define SPI_DLY_STOP 0x000000FF /* Transfer delay time in multiples of SCK period */
-#define SPI_DLY_LEADX 0x00000100 /* Extended (1 SCK) LEAD Control */
-#define SPI_DLY_LAGX 0x00000200 /* Extended (1 SCK) LAG control */
-/* SPI_SSEL */
-#define SPI_SLVSEL_SSE1 0x00000002 /* SPISSEL1 Enable */
-#define SPI_SLVSEL_SSE2 0x00000004 /* SPISSEL2 Enable */
-#define SPI_SLVSEL_SSE3 0x00000008 /* SPISSEL3 Enable */
-#define SPI_SLVSEL_SSE4 0x00000010 /* SPISSEL4 Enable */
-#define SPI_SLVSEL_SSE5 0x00000020 /* SPISSEL5 Enable */
-#define SPI_SLVSEL_SSE6 0x00000040 /* SPISSEL6 Enable */
-#define SPI_SLVSEL_SSE7 0x00000080 /* SPISSEL7 Enable */
-#define SPI_SLVSEL_SSEL1 0x00000200 /* SPISSEL1 Value */
-#define SPI_SLVSEL_SSEL2 0x00000400 /* SPISSEL2 Value */
-#define SPI_SLVSEL_SSEL3 0x00000800 /* SPISSEL3 Value */
-#define SPI_SLVSEL_SSEL4 0x00001000 /* SPISSEL4 Value */
-#define SPI_SLVSEL_SSEL5 0x00002000 /* SPISSEL5 Value */
-#define SPI_SLVSEL_SSEL6 0x00004000 /* SPISSEL6 Value */
-#define SPI_SLVSEL_SSEL7 0x00008000 /* SPISSEL7 Value */
-/* SPI_RWC */
-#define SPI_RWC_VALUE 0x0000FFFF /* Received Word-Count */
-/* SPI_RWCR */
-#define SPI_RWCR_VALUE 0x0000FFFF /* Received Word-Count Reload */
-/* SPI_TWC */
-#define SPI_TWC_VALUE 0x0000FFFF /* Transmitted Word-Count */
-/* SPI_TWCR */
-#define SPI_TWCR_VALUE 0x0000FFFF /* Transmitted Word-Count Reload */
-/* SPI_IMASK */
-#define SPI_IMSK_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
-#define SPI_IMSK_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
-#define SPI_IMSK_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
-#define SPI_IMSK_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
-#define SPI_IMSK_RSM 0x00000100 /* Receive Start Interrupt Mask */
-#define SPI_IMSK_TSM 0x00000200 /* Transmit Start Interrupt Mask */
-#define SPI_IMSK_RFM 0x00000400 /* Receive Finish Interrupt Mask */
-#define SPI_IMSK_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
-/* SPI_IMASKCL */
-#define SPI_IMSK_CLR_RUW 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_CLR_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_CLR_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
-#define SPI_IMSK_CLR_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
-#define SPI_IMSK_CLR_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
-#define SPI_IMSK_CLR_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
-#define SPI_IMSK_CLR_RSM 0x00000100 /* Receive Start Interrupt Mask */
-#define SPI_IMSK_CLR_TSM 0x00000200 /* Transmit Start Interrupt Mask */
-#define SPI_IMSK_CLR_RFM 0x00000400 /* Receive Finish Interrupt Mask */
-#define SPI_IMSK_CLR_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
-/* SPI_IMASKST */
-#define SPI_IMSK_SET_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_SET_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_SET_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
-#define SPI_IMSK_SET_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
-#define SPI_IMSK_SET_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
-#define SPI_IMSK_SET_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
-#define SPI_IMSK_SET_RSM 0x00000100 /* Receive Start Interrupt Mask */
-#define SPI_IMSK_SET_TSM 0x00000200 /* Transmit Start Interrupt Mask */
-#define SPI_IMSK_SET_RFM 0x00000400 /* Receive Finish Interrupt Mask */
-#define SPI_IMSK_SET_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
-/* SPI_STATUS */
-#define SPI_STAT_SPIF 0x00000001 /* SPI Finished */
-#define SPI_STAT_RUWM 0x00000002 /* Receive Urgent Water-Mark Breached */
-#define SPI_STAT_TUWM 0x00000004 /* Transmit Urgent Water-Mark Breached */
-#define SPI_STAT_ROE 0x00000010 /* Receive Over-Run Error Indication */
-#define SPI_STAT_TUE 0x00000020 /* Transmit Under-Run Error Indication */
-#define SPI_STAT_TCE 0x00000040 /* Transmit Collision Error Indication */
-#define SPI_STAT_MODF 0x00000080 /* Mode Fault Error Indication */
-#define SPI_STAT_RS 0x00000100 /* Receive Start Indication */
-#define SPI_STAT_TS 0x00000200 /* Transmit Start Indication */
-#define SPI_STAT_RF 0x00000400 /* Receive Finish Indication */
-#define SPI_STAT_TF 0x00000800 /* Transmit Finish Indication */
-#define SPI_STAT_RFS 0x00007000 /* SPI_RFIFO status */
-#define SPI_STAT_RFIFO_EMPTY 0x00000000 /* RFS: RFIFO Empty */
-#define SPI_STAT_RFIFO_25 0x00001000 /* RFS: RFIFO 25% Full */
-#define SPI_STAT_RFIFO_50 0x00002000 /* RFS: RFIFO 50% Full */
-#define SPI_STAT_RFIFO_75 0x00003000 /* RFS: RFIFO 75% Full */
-#define SPI_STAT_RFIFO_FULL 0x00004000 /* RFS: RFIFO Full */
-#define SPI_STAT_TFS 0x00070000 /* SPI_TFIFO status */
-#define SPI_STAT_TFIFO_FULL 0x00000000 /* TFS: TFIFO full */
-#define SPI_STAT_TFIFO_25 0x00010000 /* TFS: TFIFO 25% empty */
-#define SPI_STAT_TFIFO_50 0x00020000 /* TFS: TFIFO 50% empty */
-#define SPI_STAT_TFIFO_75 0x00030000 /* TFS: TFIFO 75% empty */
-#define SPI_STAT_TFIFO_EMPTY 0x00040000 /* TFS: TFIFO empty */
-#define SPI_STAT_FCS 0x00100000 /* Flow-Control Stall Indication */
-#define SPI_STAT_RFE 0x00400000 /* SPI_RFIFO Empty */
-#define SPI_STAT_TFF 0x00800000 /* SPI_TFIFO Full */
-/* SPI_ILAT */
-#define SPI_ILAT_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
-#define SPI_ILAT_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
-#define SPI_ILAT_ROI 0x00000010 /* Receive Over-Run Error Indication */
-#define SPI_ILAT_TUI 0x00000020 /* Transmit Under-Run Error Indication */
-#define SPI_ILAT_TCI 0x00000040 /* Transmit Collision Error Indication */
-#define SPI_ILAT_MFI 0x00000080 /* Mode Fault Error Indication */
-#define SPI_ILAT_RSI 0x00000100 /* Receive Start Indication */
-#define SPI_ILAT_TSI 0x00000200 /* Transmit Start Indication */
-#define SPI_ILAT_RFI 0x00000400 /* Receive Finish Indication */
-#define SPI_ILAT_TFI 0x00000800 /* Transmit Finish Indication */
-/* SPI_ILATCL */
-#define SPI_ILAT_CLR_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
-#define SPI_ILAT_CLR_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
-#define SPI_ILAT_CLR_ROI 0x00000010 /* Receive Over-Run Error Indication */
-#define SPI_ILAT_CLR_TUI 0x00000020 /* Transmit Under-Run Error Indication */
-#define SPI_ILAT_CLR_TCI 0x00000040 /* Transmit Collision Error Indication */
-#define SPI_ILAT_CLR_MFI 0x00000080 /* Mode Fault Error Indication */
-#define SPI_ILAT_CLR_RSI 0x00000100 /* Receive Start Indication */
-#define SPI_ILAT_CLR_TSI 0x00000200 /* Transmit Start Indication */
-#define SPI_ILAT_CLR_RFI 0x00000400 /* Receive Finish Indication */
-#define SPI_ILAT_CLR_TFI 0x00000800 /* Transmit Finish Indication */
-
-/*
- * adi spi3 registers layout
- */
-struct adi_spi_regs {
- u32 revid;
- u32 control;
- u32 rx_control;
- u32 tx_control;
- u32 clock;
- u32 delay;
- u32 ssel;
- u32 rwc;
- u32 rwcr;
- u32 twc;
- u32 twcr;
- u32 reserved0;
- u32 emask;
- u32 emaskcl;
- u32 emaskst;
- u32 reserved1;
- u32 status;
- u32 elat;
- u32 elatcl;
- u32 reserved2;
- u32 rfifo;
- u32 reserved3;
- u32 tfifo;
-};
-
-#define MAX_CTRL_CS 8 /* cs in spi controller */
-
-/* device.platform_data for SSP controller devices */
-struct adi_spi3_master {
- u16 num_chipselect;
- u16 pin_req[7];
-};
-
-/* spi_board_info.controller_data for SPI slave devices,
- * copied to spi_device.platform_data ... mostly for dma tuning
- */
-struct adi_spi3_chip {
- u32 control;
- u16 cs_chg_udelay; /* Some devices require 16-bit delays */
- u32 tx_dummy_val; /* tx value for rx only transfer */
- bool enable_dma;
-};
-
-#endif /* _ADI_SPI3_H_ */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index bb4bd15ae1f6..b2bd4b4127c4 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -3,7 +3,9 @@
* Copyright (C) 2018 Exceet Electronics GmbH
* Copyright (C) 2018 Bootlin
*
- * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ * Author:
+ * Peter Pan <peterpandong@micron.com>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
*/
#ifndef __LINUX_SPI_MEM_H
@@ -122,7 +124,8 @@ struct spi_mem_op {
/**
* struct spi_mem - describes a SPI memory device
* @spi: the underlying SPI device
- * @drvpriv: spi_mem_drviver private data
+ * @drvpriv: spi_mem_driver private data
+ * @name: name of the SPI memory device
*
* Extra information that describe the SPI memory device and may be needed by
* the controller to properly handle this device should be placed here.
@@ -133,6 +136,7 @@ struct spi_mem_op {
struct spi_mem {
struct spi_device *spi;
void *drvpriv;
+ const char *name;
};
/**
@@ -165,6 +169,13 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * @get_name: get a custom name for the SPI mem device from the controller.
+ * This might be needed if the controller driver has been ported
+ * to use the SPI mem layer and a custom name is used to keep
+ * mtdparts compatible.
+ * Note that if the implementation of this function allocates memory
+ * dynamically, then it should do so with devm_xxx(), as we don't
+ * have a ->free_name() function.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
@@ -176,6 +187,7 @@ struct spi_controller_mem_ops {
const struct spi_mem_op *op);
int (*exec_op)(struct spi_mem *mem,
const struct spi_mem_op *op);
+ const char *(*get_name)(struct spi_mem *mem);
};
/**
@@ -234,6 +246,8 @@ bool spi_mem_supports_op(struct spi_mem *mem,
int spi_mem_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op);
+const char *spi_mem_get_name(struct spi_mem *mem);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 51d8c060e513..b7e021b274dc 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -8,7 +8,7 @@ struct spi_bitbang {
struct mutex lock;
u8 busy;
u8 use_dma;
- u8 flags; /* extra spi->mode support */
+ u16 flags; /* extra spi->mode support */
struct spi_master *master;
@@ -30,7 +30,8 @@ struct spi_bitbang {
/* txrx_word[SPI_MODE_*]() just looks like a shift register */
u32 (*txrx_word[4])(struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits);
+ u32 word, u8 bits, unsigned flags);
+ int (*set_line_direction)(struct spi_device *spi, bool output);
};
/* you can call these default bitbang->master methods from your custom
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index fd57888d4942..3190997df9ca 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -114,29 +114,48 @@ do { \
#endif /*arch_spin_is_contended*/
/*
- * This barrier must provide two things:
+ * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
+ * between program-order earlier lock acquisitions and program-order later
+ * memory accesses.
*
- * - it must guarantee a STORE before the spin_lock() is ordered against a
- * LOAD after it, see the comments at its two usage sites.
+ * This guarantees that the following two properties hold:
*
- * - it must ensure the critical section is RCsc.
+ * 1) Given the snippet:
*
- * The latter is important for cases where we observe values written by other
- * CPUs in spin-loops, without barriers, while being subject to scheduling.
+ * { X = 0; Y = 0; }
*
- * CPU0 CPU1 CPU2
+ * CPU0 CPU1
*
- * for (;;) {
- * if (READ_ONCE(X))
- * break;
- * }
- * X=1
- * <sched-out>
- * <sched-in>
- * r = X;
+ * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
+ * spin_lock(S); smp_mb();
+ * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
+ * r0 = READ_ONCE(Y);
+ * spin_unlock(S);
*
- * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
- * we get migrated and CPU2 sees X==0.
+ * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
+ * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
+ * preceding the call to smp_mb__after_spinlock() in __schedule() and in
+ * try_to_wake_up().
+ *
+ * 2) Given the snippet:
+ *
+ * { X = 0; Y = 0; }
+ *
+ * CPU0 CPU1 CPU2
+ *
+ * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
+ * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
+ * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
+ * WRITE_ONCE(Y, 1);
+ * spin_unlock(S);
+ *
+ * it is forbidden that CPU0's critical section executes before CPU1's
+ * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
+ * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
+ * preceding the calls to smp_rmb() in try_to_wake_up() for similar
+ * snippets but "projected" onto two CPUs.
+ *
+ * Property (2) upgrades the lock to an RCsc lock.
*
* Since most load-store architectures implement ACQUIRE with an smp_mb() after
* the LL/SC loop, they need no further barriers. Similarly all our TSO
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 91494d7e8e41..3e72a291c401 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
return retval;
}
+/* Used by tracing, cannot be traced and cannot invoke lockdep. */
+static inline notrace int
+srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+{
+ int retval;
+
+ retval = __srcu_read_lock(sp);
+ return retval;
+}
+
/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @sp: srcu_struct in which to unregister the old reader.
@@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__srcu_read_unlock(sp, idx);
}
+/* Used by tracing, cannot be traced and cannot call lockdep. */
+static inline notrace void
+srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+{
+ __srcu_read_unlock(sp, idx);
+}
+
/**
* smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
*
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 3b43655cabe6..0d5a2691e7e9 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -499,11 +499,9 @@ struct ssb_bus {
/* Internal-only stuff follows. Do not touch. */
struct list_head list;
-#ifdef CONFIG_SSB_DEBUG
/* Is the bus already powered up? */
bool powered_up;
int power_warn_count;
-#endif /* DEBUG */
};
enum ssb_quirks {
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 32feac5bbd75..c43e9a01b892 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -190,5 +190,6 @@ struct plat_stmmacenet_data {
bool tso_en;
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
+ int has_xgmac;
};
#endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 440b62f7502e..5a28ac9284f0 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -414,7 +414,7 @@ static inline bool hibernation_available(void) { return false; }
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
#define PM_POST_RESTORE 0x0006 /* Restore failed */
-extern struct mutex pm_mutex;
+extern struct mutex system_transition_mutex;
#ifdef CONFIG_PM_SLEEP
void save_processor_state(void);
diff --git a/include/linux/swait.h b/include/linux/swait.h
index bf8cb0dee23c..73e06e9986d4 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -16,7 +16,7 @@
* wait-queues, but the semantics are actually completely different, and
* every single user we have ever had has been buggy (or pointless).
*
- * A "swake_up()" only wakes up _one_ waiter, which is not at all what
+ * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
* "wake_up()" does, and has led to problems. In other cases, it has
* been fine, because there's only ever one waiter (kvm), but in that
* case gthe whole "simple" wait-queue is just pointless to begin with,
@@ -38,8 +38,8 @@
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
* sleeper state.
*
- * - the exclusive mode; because this requires preserving the list order
- * and this is hard.
+ * - the !exclusive mode; because that leads to O(n) wakeups, everything is
+ * exclusive.
*
* - custom wake callback functions; because you cannot give any guarantees
* about random code. This also allows swait to be used in RT, such that
@@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
* CPU0 - waker CPU1 - waiter
*
* for (;;) {
- * @cond = true; prepare_to_swait(&wq_head, &wait, state);
+ * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state);
* smp_mb(); // smp_mb() from set_current_state()
* if (swait_active(wq_head)) if (@cond)
* wake_up(wq_head); break;
@@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
return swait_active(wq);
}
-extern void swake_up(struct swait_queue_head *q);
+extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
extern void swake_up_locked(struct swait_queue_head *q);
-extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
-/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
+/* as per ___wait_event() but for swait, therefore "exclusive == 1" */
#define ___swait_event(wq, condition, state, ret, cmd) \
({ \
+ __label__ __out; \
struct swait_queue __wait; \
long __ret = ret; \
\
@@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
- break; \
+ goto __out; \
} \
\
cmd; \
} \
finish_swait(&wq, &__wait); \
- __ret; \
+__out: __ret; \
})
#define __swait_event(wq, condition) \
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
schedule())
-#define swait_event(wq, condition) \
+#define swait_event_exclusive(wq, condition) \
do { \
if (condition) \
break; \
@@ -208,7 +208,7 @@ do { \
TASK_UNINTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_timeout(wq, condition, timeout) \
+#define swait_event_timeout_exclusive(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -220,7 +220,7 @@ do { \
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
schedule())
-#define swait_event_interruptible(wq, condition) \
+#define swait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
@@ -233,7 +233,7 @@ do { \
TASK_INTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_interruptible_timeout(wq, condition, timeout) \
+#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -246,7 +246,7 @@ do { \
(void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
/**
- * swait_event_idle - wait without system load contribution
+ * swait_event_idle_exclusive - wait without system load contribution
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
@@ -257,7 +257,7 @@ do { \
* condition and doesn't want to contribute to system load. Signals are
* ignored.
*/
-#define swait_event_idle(wq, condition) \
+#define swait_event_idle_exclusive(wq, condition) \
do { \
if (condition) \
break; \
@@ -270,7 +270,7 @@ do { \
__ret = schedule_timeout(__ret))
/**
- * swait_event_idle_timeout - wait up to timeout without load contribution
+ * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout at which we'll give up in jiffies
@@ -288,7 +288,7 @@ do { \
* or the remaining jiffies (at least 1) if the @condition evaluated
* to %true before the @timeout elapsed.
*/
-#define swait_event_idle_timeout(wq, condition, timeout) \
+#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c063443d8638..1a8bd05a335e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -629,7 +629,6 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
return memcg->swappiness;
}
-
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
@@ -637,6 +636,16 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
}
#endif
+#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
+extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
+ gfp_t gfp_mask);
+#else
+static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
+ int node, gfp_t gfp_mask)
+{
+}
+#endif
+
#ifdef CONFIG_MEMCG_SWAP
extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 06bd7b096167..e06febf62978 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -10,5 +10,7 @@ extern spinlock_t swap_lock;
extern struct plist_head swap_active_head;
extern struct swap_info_struct *swap_info[];
extern int try_to_unuse(unsigned int, bool, unsigned long);
+extern unsigned long generic_max_swapfile_size(void);
+extern unsigned long max_swapfile_size(void);
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index ec93e93371fa..ab400af6f0ce 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -19,10 +19,6 @@
#include <linux/pci.h>
#include <linux/cdev.h>
-#define MICROSEMI_VENDOR_ID 0x11f8
-#define MICROSEMI_NTB_CLASSCODE 0x068000
-#define MICROSEMI_MGMT_CLASSCODE 0x058000
-
#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
#define SWITCHTEC_MAX_PFF_CSR 48
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 5c1a0933768e..2ff814c92f7f 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -81,6 +81,7 @@ union bpf_attr;
#include <linux/unistd.h>
#include <linux/quota.h>
#include <linux/key.h>
+#include <linux/personality.h>
#include <trace/syscall.h>
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
@@ -506,9 +507,9 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
/* fs/timerfd.c */
asmlinkage long sys_timerfd_create(int clockid, int flags);
asmlinkage long sys_timerfd_settime(int ufd, int flags,
- const struct itimerspec __user *utmr,
- struct itimerspec __user *otmr);
-asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
+ const struct __kernel_itimerspec __user *utmr,
+ struct __kernel_itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr);
/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
@@ -573,10 +574,10 @@ asmlinkage long sys_timer_create(clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id);
asmlinkage long sys_timer_gettime(timer_t timer_id,
- struct itimerspec __user *setting);
+ struct __kernel_itimerspec __user *setting);
asmlinkage long sys_timer_getoverrun(timer_t timer_id);
asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
- const struct itimerspec __user *new_setting,
+ const struct __kernel_itimerspec __user *new_setting,
struct itimerspec __user *old_setting);
asmlinkage long sys_timer_delete(timer_t timer_id);
asmlinkage long sys_clock_settime(clockid_t which_clock,
@@ -1282,4 +1283,14 @@ static inline long ksys_truncate(const char __user *pathname, loff_t length)
return do_sys_truncate(pathname, length);
}
+static inline unsigned int ksys_personality(unsigned int personality)
+{
+ unsigned int old = current->personality;
+
+ if (personality != 0xffffffff)
+ set_personality(personality);
+
+ return old;
+}
+
#endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index b8bfdc173ec0..3c12198c0103 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -237,6 +237,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr);
+void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
@@ -350,6 +353,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
return 0;
}
+static inline struct kernfs_node *
+sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return NULL;
+}
+
+static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+}
+
static inline void sysfs_remove_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns)
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index c6aa8a3c42ed..b9626aa7e90c 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -37,9 +37,33 @@ struct t10_pi_tuple {
#define T10_PI_APP_ESCAPE cpu_to_be16(0xffff)
#define T10_PI_REF_ESCAPE cpu_to_be32(0xffffffff)
+static inline u32 t10_pi_ref_tag(struct request *rq)
+{
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ return blk_rq_pos(rq) >>
+ (rq->q->integrity.interval_exp - 9) & 0xffffffff;
+#else
+ return -1U;
+#endif
+}
+
extern const struct blk_integrity_profile t10_pi_type1_crc;
extern const struct blk_integrity_profile t10_pi_type1_ip;
extern const struct blk_integrity_profile t10_pi_type3_crc;
extern const struct blk_integrity_profile t10_pi_type3_ip;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+extern void t10_pi_prepare(struct request *rq, u8 protection_type);
+extern void t10_pi_complete(struct request *rq, u8 protection_type,
+ unsigned int intervals);
+#else
+static inline void t10_pi_complete(struct request *rq, u8 protection_type,
+ unsigned int intervals)
+{
+}
+static inline void t10_pi_prepare(struct request *rq, u8 protection_type)
+{
+}
+#endif
+
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 72705eaf4b84..263e37271afd 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -89,7 +89,7 @@ struct tcp_sack_block {
struct tcp_options_received {
/* PAWS/RTTM data */
- long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
+ int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
u32 ts_recent; /* Time stamp to echo next */
u32 rcv_tsval; /* Time stamp value */
u32 rcv_tsecr; /* Time stamp echo reply */
@@ -181,10 +181,16 @@ struct tcp_sock {
u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
* total number of data segments sent.
*/
+ u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
+ * total number of data bytes sent.
+ */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
+ u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
+ * total number of DSACK blocks received
+ */
u32 snd_una; /* First byte we want an ack for */
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
@@ -214,8 +220,7 @@ struct tcp_sock {
#define TCP_RACK_RECOVERY_THRESH 16
u8 reo_wnd_persist:5, /* No. of recovery since last adj */
dsack_seen:1, /* Whether DSACK seen after last adj */
- advanced:1, /* mstamp advanced since last lost marking */
- reord:1; /* reordering detected */
+ advanced:1; /* mstamp advanced since last lost marking */
} rack;
u16 advmss; /* Advertised MSS */
u8 compressed_ack;
@@ -261,6 +266,7 @@ struct tcp_sock {
u8 ecn_flags; /* ECN status bits. */
u8 keepalive_probes; /* num of allowed keep alive probes */
u32 reordering; /* Packet reordering metric. */
+ u32 reord_seen; /* number of data packet reordering events */
u32 snd_up; /* Urgent pointer */
/*
@@ -330,6 +336,9 @@ struct tcp_sock {
* the first SYN. */
u32 undo_marker; /* snd_una upon a new recovery episode. */
int undo_retrans; /* number of undoable retransmissions. */
+ u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans
+ * Total data bytes retransmitted
+ */
u32 total_retrans; /* Total retransmits for entire connection */
u32 urg_seq; /* Seq of received urgent pointer */
@@ -350,6 +359,7 @@ struct tcp_sock {
#endif
/* Receiver side RTT estimation */
+ u32 rcv_rtt_last_tsecr;
struct {
u32 rtt_us;
u32 seq;
@@ -425,7 +435,7 @@ struct tcp_timewait_sock {
/* The time we sent the last out-of-window ACK: */
u32 tw_last_oow_ack_time;
- long tw_ts_recent_stamp;
+ int tw_ts_recent_stamp;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index aed74463592d..27d83fd2ae61 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -14,9 +14,9 @@ int get_timespec64(struct timespec64 *ts,
int put_timespec64(const struct timespec64 *ts,
struct __kernel_timespec __user *uts);
int get_itimerspec64(struct itimerspec64 *it,
- const struct itimerspec __user *uit);
+ const struct __kernel_itimerspec __user *uit);
int put_itimerspec64(const struct itimerspec64 *it,
- struct itimerspec __user *uit);
+ struct __kernel_itimerspec __user *uit);
extern time64_t mktime64(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 0a7b2f79cec7..05634afba0db 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,6 +12,7 @@ typedef __u64 timeu64_t;
*/
#ifndef CONFIG_64BIT_TIME
#define __kernel_timespec timespec
+#define __kernel_itimerspec itimerspec
#endif
#include <uapi/linux/time.h>
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 86bc2026efce..5d738804e3d6 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -21,6 +21,21 @@ extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
/*
+ * ktime_get() family: read the current time in a multitude of ways,
+ *
+ * The default time reference is CLOCK_MONOTONIC, starting at
+ * boot time but not counting the time spent in suspend.
+ * For other references, use the functions with "real", "clocktai",
+ * "boottime" and "raw" suffixes.
+ *
+ * To get the time in a different format, use the ones wit
+ * "ns", "ts64" and "seconds" suffix.
+ *
+ * See Documentation/core-api/timekeeping.rst for more details.
+ */
+
+
+/*
* timespec64 based interfaces
*/
extern void ktime_get_raw_ts64(struct timespec64 *ts);
@@ -177,7 +192,7 @@ static inline time64_t ktime_get_clocktai_seconds(void)
extern bool timekeeping_rtc_skipsuspend(void);
extern bool timekeeping_rtc_skipresume(void);
-extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
+extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
/*
* struct system_time_snapshot - simultaneous raw/real time capture with
@@ -243,7 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
-extern void read_boot_clock64(struct timespec64 *ts);
+void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
+ struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
/*
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 66272862070b..61dfd93b6ee4 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -64,6 +64,8 @@ struct torture_random_state {
long trs_count;
};
#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
+ DEFINE_PER_CPU(struct torture_random_state, name)
unsigned long torture_random(struct torture_random_state *trsp);
/* Task shuffler, which causes CPUs to occasionally go idle. */
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v);
+bool torture_init_begin(char *ttype, int v);
void torture_init_end(void);
bool torture_cleanup_begin(void);
void torture_cleanup_end(void);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 06639fb6ab85..4609b94142d4 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -43,6 +43,8 @@ struct tpm_class_ops {
u8 (*status) (struct tpm_chip *chip);
bool (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
+ int (*go_idle)(struct tpm_chip *chip);
+ int (*cmd_ready)(struct tpm_chip *chip);
int (*request_locality)(struct tpm_chip *chip, int loc);
int (*relinquish_locality)(struct tpm_chip *chip, int loc);
void (*clk_enable)(struct tpm_chip *chip, bool value);
@@ -61,6 +63,7 @@ extern int tpm_seal_trusted(struct tpm_chip *chip,
extern int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
+extern struct tpm_chip *tpm_default_chip(void);
#else
static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
@@ -96,5 +99,9 @@ static inline int tpm_unseal_trusted(struct tpm_chip *chip,
{
return -ENODEV;
}
+static inline struct tpm_chip *tpm_default_chip(void)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 4a8841963c2e..05589a3e37f4 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -51,6 +51,7 @@
#include <linux/security.h>
#include <linux/task_work.h>
#include <linux/memcontrol.h>
+#include <linux/blk-cgroup.h>
struct linux_binprm;
/*
@@ -192,6 +193,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
task_work_run();
mem_cgroup_handle_over_high();
+ blkcg_maybe_throttle_current();
}
#endif /* <linux/tracehook.h> */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index ca840345571b..320d49d85484 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -74,8 +74,8 @@ struct udp_sock {
void (*encap_destroy)(struct sock *sk);
/* GRO functions for UDP socket */
- struct sk_buff ** (*gro_receive)(struct sock *sk,
- struct sk_buff **head,
+ struct sk_buff * (*gro_receive)(struct sock *sk,
+ struct list_head *head,
struct sk_buff *skb);
int (*gro_complete)(struct sock *sk,
struct sk_buff *skb,
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index a710e28b5215..6b708434b7f9 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -387,6 +387,12 @@ struct uac3_interrupt_data_msg {
#define UAC3_CONNECTORS 0x0f
#define UAC3_POWER_DOMAIN 0x10
+/* A.20 PROCESSING UNIT PROCESS TYPES */
+#define UAC3_PROCESS_UNDEFINED 0x00
+#define UAC3_PROCESS_UP_DOWNMIX 0x01
+#define UAC3_PROCESS_STEREO_EXTENDER 0x02
+#define UAC3_PROCESS_MULTI_FUNCTION 0x03
+
/* A.22 AUDIO CLASS-SPECIFIC REQUEST CODES */
/* see audio-v2.h for the rest, which is identical to v2 */
#define UAC3_CS_REQ_INTEN 0x04
@@ -406,6 +412,15 @@ struct uac3_interrupt_data_msg {
#define UAC3_TE_OVERFLOW 0x04
#define UAC3_TE_LATENCY 0x05
+/* A.23.10 PROCESSING UNITS CONTROL SELECTROS */
+
+/* Up/Down Mixer */
+#define UAC3_UD_MODE_SELECT 0x01
+
+/* Stereo Extender */
+#define UAC3_EXT_WIDTH_CONTROL 0x01
+
+
/* BADD predefined Unit/Terminal values */
#define UAC3_BADD_IT_ID1 1 /* Input Terminal ID1: bTerminalID = 1 */
#define UAC3_BADD_FU_ID2 2 /* Feature Unit ID2: bUnitID = 2 */
@@ -432,4 +447,8 @@ struct uac3_interrupt_data_msg {
/* BADD sample rate is always fixed to 48kHz */
#define UAC3_BADD_SAMPLING_RATE 48000
+/* BADD power domains recovery times in 50us increments */
+#define UAC3_BADD_PD_RECOVER_D1D0 0x0258 /* 30ms */
+#define UAC3_BADD_PD_RECOVER_D2D0 0x1770 /* 300ms */
+
#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/linux/verification.h b/include/linux/verification.h
index a10549a6c7cd..cfa4730d607a 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -13,6 +13,12 @@
#define _LINUX_VERIFICATION_H
/*
+ * Indicate that both builtin trusted keys and secondary trusted keys
+ * should be used.
+ */
+#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
+
+/*
* The use to which an asymmetric key is being put.
*/
enum key_being_used_for {
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 77f0f0af3a71..a34539b7f750 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -84,8 +84,8 @@ enum vga_switcheroo_state {
* Client identifier. Audio clients use the same identifier & 0x100.
*/
enum vga_switcheroo_client_id {
- VGA_SWITCHEROO_UNKNOWN_ID = -1,
- VGA_SWITCHEROO_IGD,
+ VGA_SWITCHEROO_UNKNOWN_ID = 0x1000,
+ VGA_SWITCHEROO_IGD = 0,
VGA_SWITCHEROO_DIS,
VGA_SWITCHEROO_MAX_CLIENTS,
};
@@ -151,7 +151,7 @@ int vga_switcheroo_register_client(struct pci_dev *dev,
bool driver_power_control);
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id);
+ struct pci_dev *vga_dev);
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
@@ -180,7 +180,7 @@ static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_ha
enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id) { return 0; }
+ struct pci_dev *vga_dev) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 5559a2d31c46..32baf8e26735 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -79,7 +79,8 @@ struct virtio_config_ops {
u64 (*get_features)(struct virtio_device *vdev);
int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
- int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
+ int (*set_vq_affinity)(struct virtqueue *vq,
+ const struct cpumask *cpu_mask);
const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
int index);
};
@@ -236,11 +237,11 @@ const char *virtio_bus_name(struct virtio_device *vdev)
*
*/
static inline
-int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
+int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
{
struct virtio_device *vdev = vq->vdev;
if (vdev->config->set_vq_affinity)
- return vdev->config->set_vq_affinity(vq, cpu);
+ return vdev->config->set_vq_affinity(vq, cpu_mask);
return 0;
}
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 39fda195bf78..3af7c0e03be5 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -6,8 +6,10 @@
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
- * Wound/wait implementation:
+ * Wait/Die implementation:
* Copyright (C) 2013 Canonical Ltd.
+ * Choice of algorithm:
+ * Copyright (C) 2018 WMWare Inc.
*
* This file contains the main data structure and API definitions.
*/
@@ -23,14 +25,17 @@ struct ww_class {
struct lock_class_key mutex_key;
const char *acquire_name;
const char *mutex_name;
+ unsigned int is_wait_die;
};
struct ww_acquire_ctx {
struct task_struct *task;
unsigned long stamp;
- unsigned acquired;
+ unsigned int acquired;
+ unsigned short wounded;
+ unsigned short is_wait_die;
#ifdef CONFIG_DEBUG_MUTEXES
- unsigned done_acquire;
+ unsigned int done_acquire;
struct ww_class *ww_class;
struct ww_mutex *contending_lock;
#endif
@@ -38,8 +43,8 @@ struct ww_acquire_ctx {
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
- unsigned deadlock_inject_interval;
- unsigned deadlock_inject_countdown;
+ unsigned int deadlock_inject_interval;
+ unsigned int deadlock_inject_countdown;
#endif
};
@@ -58,17 +63,21 @@ struct ww_mutex {
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
#endif
-#define __WW_CLASS_INITIALIZER(ww_class) \
+#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \
{ .stamp = ATOMIC_LONG_INIT(0) \
, .acquire_name = #ww_class "_acquire" \
- , .mutex_name = #ww_class "_mutex" }
+ , .mutex_name = #ww_class "_mutex" \
+ , .is_wait_die = _is_wait_die }
#define __WW_MUTEX_INITIALIZER(lockname, class) \
{ .base = __MUTEX_INITIALIZER(lockname.base) \
__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
+#define DEFINE_WD_CLASS(classname) \
+ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
+
#define DEFINE_WW_CLASS(classname) \
- struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
+ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
#define DEFINE_WW_MUTEX(mutexname, ww_class) \
struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
@@ -102,7 +111,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
*
* Context-based w/w mutex acquiring can be done in any order whatsoever within
* a given lock class. Deadlocks will be detected and handled with the
- * wait/wound logic.
+ * wait/die logic.
*
* Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
* result in undetected deadlocks and is so forbidden. Mixing different contexts
@@ -123,6 +132,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
ctx->task = current;
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
ctx->acquired = 0;
+ ctx->wounded = false;
+ ctx->is_wait_die = ww_class->is_wait_die;
#ifdef CONFIG_DEBUG_MUTEXES
ctx->ww_class = ww_class;
ctx->done_acquire = 0;
@@ -195,13 +206,13 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
- * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * wait/die algorithm. If the lock isn't immediately available this function
* will either sleep until it is (wait case). Or it selects the current context
- * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * for backing off by returning -EDEADLK (die case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired.
*
- * In the wound case the caller must release all currently held w/w mutexes for
+ * In the die case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
* lock and proceed with trying to acquire further w/w mutexes (e.g. when
@@ -226,14 +237,14 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
- * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * wait/die algorithm. If the lock isn't immediately available this function
* will either sleep until it is (wait case). Or it selects the current context
- * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * for backing off by returning -EDEADLK (die case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
* signal arrives while waiting for the lock then this function returns -EINTR.
*
- * In the wound case the caller must release all currently held w/w mutexes for
+ * In the die case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
* not acquire this lock and proceed with trying to acquire further w/w mutexes
@@ -256,7 +267,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
- * Acquires a w/w mutex with the given context after a wound case. This function
+ * Acquires a w/w mutex with the given context after a die case. This function
* will sleep until the lock becomes available.
*
* The caller must have released all w/w mutexes already acquired with the
@@ -290,7 +301,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
- * Acquires a w/w mutex with the given context after a wound case. This function
+ * Acquires a w/w mutex with the given context after a die case. This function
* will sleep until the lock becomes available and returns 0 when the lock has
* been acquired. If a signal arrives while waiting for the lock then this
* function returns -EINTR.
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index ed16c6dde0ba..604e79cb6cbf 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -25,6 +25,9 @@
* @read_hpd: read the HPD pin. Return true if high, false if low or
* an error if negative. If NULL or -ENOTTY is returned,
* then this is not supported.
+ * @read_5v: read the 5V pin. Return true if high, false if low or
+ * an error if negative. If NULL or -ENOTTY is returned,
+ * then this is not supported.
*
* These operations are used by the cec pin framework to manipulate
* the CEC pin.
@@ -38,6 +41,7 @@ struct cec_pin_ops {
void (*free)(struct cec_adapter *adap);
void (*status)(struct cec_adapter *adap, struct seq_file *file);
int (*read_hpd)(struct cec_adapter *adap);
+ int (*read_5v)(struct cec_adapter *adap);
};
/**
diff --git a/include/media/cec.h b/include/media/cec.h
index 580ab1042898..ff9847f7f99d 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -79,7 +79,7 @@ struct cec_event_entry {
};
#define CEC_NUM_CORE_EVENTS 2
-#define CEC_NUM_EVENTS CEC_EVENT_PIN_HPD_HIGH
+#define CEC_NUM_EVENTS CEC_EVENT_PIN_5V_HIGH
struct cec_fh {
struct list_head list;
@@ -309,6 +309,16 @@ void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high,
void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
/**
+ * cec_queue_pin_5v_event() - queue a pin event with a given timestamp.
+ *
+ * @adap: pointer to the cec adapter
+ * @is_high: when true the 5V pin is high, otherwise it is low
+ * @ts: the timestamp for this event
+ *
+ */
+void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
+
+/**
* cec_get_edid_phys_addr() - find and return the physical address
*
* @edid: pointer to the EDID data
diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
index 331c8269c00e..6f7a85ab3541 100644
--- a/include/media/dvb_frontend.h
+++ b/include/media/dvb_frontend.h
@@ -52,6 +52,10 @@
*/
#define MAX_DELSYS 8
+/* Helper definitions to be used at frontend drivers */
+#define kHz 1000UL
+#define MHz 1000000UL
+
/**
* struct dvb_frontend_tune_settings - parameters to adjust frontend tuning
*
@@ -73,22 +77,19 @@ struct dvb_frontend;
* struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths
*
* @name: name of the Frontend
- * @frequency_min: minimal frequency supported
- * @frequency_max: maximum frequency supported
- * @frequency_step: frequency step
+ * @frequency_min_hz: minimal frequency supported in Hz
+ * @frequency_max_hz: maximum frequency supported in Hz
+ * @frequency_step_hz: frequency step in Hz
* @bandwidth_min: minimal frontend bandwidth supported
* @bandwidth_max: maximum frontend bandwidth supported
* @bandwidth_step: frontend bandwidth step
- *
- * NOTE: frequency parameters are in Hz, for terrestrial/cable or kHz for
- * satellite.
*/
struct dvb_tuner_info {
char name[128];
- u32 frequency_min;
- u32 frequency_max;
- u32 frequency_step;
+ u32 frequency_min_hz;
+ u32 frequency_max_hz;
+ u32 frequency_step_hz;
u32 bandwidth_min;
u32 bandwidth_max;
@@ -316,6 +317,34 @@ struct analog_demod_ops {
struct dtv_frontend_properties;
+/**
+ * struct dvb_frontend_internal_info - Frontend properties and capabilities
+ *
+ * @name: Name of the frontend
+ * @frequency_min_hz: Minimal frequency supported by the frontend.
+ * @frequency_max_hz: Minimal frequency supported by the frontend.
+ * @frequency_stepsize_hz: All frequencies are multiple of this value.
+ * @frequency_tolerance_hz: Frequency tolerance.
+ * @symbol_rate_min: Minimal symbol rate, in bauds
+ * (for Cable/Satellite systems).
+ * @symbol_rate_max: Maximal symbol rate, in bauds
+ * (for Cable/Satellite systems).
+ * @symbol_rate_tolerance: Maximal symbol rate tolerance, in ppm
+ * (for Cable/Satellite systems).
+ * @caps: Capabilities supported by the frontend,
+ * as specified in &enum fe_caps.
+ */
+struct dvb_frontend_internal_info {
+ char name[128];
+ u32 frequency_min_hz;
+ u32 frequency_max_hz;
+ u32 frequency_stepsize_hz;
+ u32 frequency_tolerance_hz;
+ u32 symbol_rate_min;
+ u32 symbol_rate_max;
+ u32 symbol_rate_tolerance;
+ enum fe_caps caps;
+};
/**
* struct dvb_frontend_ops - Demodulation information and callbacks for
@@ -403,7 +432,7 @@ struct dtv_frontend_properties;
* @analog_ops: pointer to &struct analog_demod_ops
*/
struct dvb_frontend_ops {
- struct dvb_frontend_info info;
+ struct dvb_frontend_internal_info info;
u8 delsys[MAX_DELSYS];
diff --git a/include/media/i2c/lm3560.h b/include/media/i2c/lm3560.h
index a5bd310c9e1e..0e2b1c751a5d 100644
--- a/include/media/i2c/lm3560.h
+++ b/include/media/i2c/lm3560.h
@@ -22,6 +22,7 @@
#include <media/v4l2-subdev.h>
+#define LM3559_NAME "lm3559"
#define LM3560_NAME "lm3560"
#define LM3560_I2C_ADDR (0x53)
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 160bca96d524..cdc87ec61e54 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -338,7 +338,7 @@ void v4l_bound_align_image(unsigned int *width, unsigned int wmin,
({ \
BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \
sizeof((array)->height_field) != sizeof(u32)); \
- (typeof(&(*(array))))__v4l2_find_nearest_size( \
+ (typeof(&(array)[0]))__v4l2_find_nearest_size( \
(array), array_size, sizeof(*(array)), \
offsetof(typeof(*(array)), width_field), \
offsetof(typeof(*(array)), height_field), \
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 5b445b5654f7..f615ba1b29dd 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -181,10 +181,10 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* not freed when the control is deleted. Should this be needed
* then a new internal bitfield can be added to tell the framework
* to free this pointer.
- * @p_cur: The control's current value represented via a union with
+ * @p_cur: The control's current value represented via a union which
* provides a standard way of accessing control types
* through a pointer.
- * @p_new: The control's new value represented via a union with provides
+ * @p_new: The control's new value represented via a union which provides
* a standard way of accessing control types
* through a pointer.
*/
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index a8dbf5b54b5c..5848d92c30da 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -621,7 +621,7 @@ const char *v4l2_norm_to_name(v4l2_std_id id);
* v4l2_video_std_frame_period - Ancillary routine that fills a
* struct &v4l2_fract pointer with the default framerate fraction.
*
- * @id: analog TV sdandard ID.
+ * @id: analog TV standard ID.
* @frameperiod: struct &v4l2_fract pointer to be filled
*
*/
@@ -632,7 +632,7 @@ void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
* a &v4l2_standard structure according to the @id parameter.
*
* @vs: struct &v4l2_standard pointer to be filled
- * @id: analog TV sdandard ID.
+ * @id: analog TV standard ID.
* @name: name of the standard to be used
*
* .. note::
@@ -643,6 +643,17 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
int id, const char *name);
/**
+ * v4l_video_std_enumstd - Ancillary routine that fills in the fields of
+ * a &v4l2_standard structure according to the @id and @vs->index
+ * parameters.
+ *
+ * @vs: struct &v4l2_standard pointer to be filled.
+ * @id: analog TV standard ID.
+ *
+ */
+int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id);
+
+/**
* v4l_printk_ioctl - Ancillary routine that prints the ioctl in a
* human-readable format.
*
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 4d8626c468bc..4bbb5f3d2b02 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -45,6 +45,8 @@
/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */
#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(12)
#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(13)
+#define V4L2_MBUS_DATA_ENABLE_HIGH BIT(14)
+#define V4L2_MBUS_DATA_ENABLE_LOW BIT(15)
/* Serial flags */
/* How many lanes the client can use */
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 3d07ba3a8262..d655720e16a1 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -32,7 +32,7 @@
* assumed that one source and one destination buffer are all
* that is required for the driver to perform one full transaction.
* This method may not sleep.
- * @job_abort: required. Informs the driver that it has to abort the currently
+ * @job_abort: optional. Informs the driver that it has to abort the currently
* running transaction as soon as possible (i.e. as soon as it can
* stop the device safely; e.g. in the next interrupt handler),
* even if the transaction would not have been finished by then.
@@ -40,19 +40,14 @@
* v4l2_m2m_job_finish() (as if the transaction ended normally).
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
- * @lock: optional. Define a driver's own lock callback, instead of using
- * &v4l2_m2m_ctx->q_lock.
- * @unlock: optional. Define a driver's own unlock callback, instead of
- * using &v4l2_m2m_ctx->q_lock.
*/
struct v4l2_m2m_ops {
void (*device_run)(void *priv);
int (*job_ready)(void *priv);
void (*job_abort)(void *priv);
- void (*lock)(void *priv);
- void (*unlock)(void *priv);
};
+struct video_device;
struct v4l2_m2m_dev;
/**
@@ -328,6 +323,24 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
*/
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
+int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function);
+#else
+static inline void
+v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
+{
+}
+
+static inline int
+v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function)
+{
+ return 0;
+}
+#endif
+
/**
* v4l2_m2m_release() - cleans up and frees a m2m_dev structure
*
@@ -437,6 +450,35 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
}
/**
+ * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
+ *
+ * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
+ */
+void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
+
+/**
+ * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
+ * ready buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void *v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
+}
+
+/**
+ * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
+ * ready buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void *v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
+}
+
+/**
* v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
* buffers
*
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 678c24de1ac6..3093b9cb9067 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -25,6 +25,7 @@ int vsp1_du_init(struct device *dev);
* struct vsp1_du_lif_config - VSP LIF configuration
* @width: output frame width
* @height: output frame height
+ * @interlaced: true for interlaced pipelines
* @callback: frame completion callback function (optional). When a callback
* is provided, the VSP driver guarantees that it will be called once
* and only once for each vsp1_du_atomic_flush() call.
@@ -33,6 +34,7 @@ int vsp1_du_init(struct device *dev);
struct vsp1_du_lif_config {
unsigned int width;
unsigned int height;
+ bool interlaced;
void (*callback)(void *data, bool completed, u32 crc);
void *callback_data;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9e59ebfded62..1ad5b19e83a9 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -6,6 +6,7 @@
* Public action API for classifiers/qdiscs
*/
+#include <linux/refcount.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
@@ -26,8 +27,8 @@ struct tc_action {
struct tcf_idrinfo *idrinfo;
u32 tcfa_index;
- int tcfa_refcnt;
- int tcfa_bindcnt;
+ refcount_t tcfa_refcnt;
+ atomic_t tcfa_bindcnt;
u32 tcfa_capab;
int tcfa_action;
struct tcf_t tcfa_tm;
@@ -37,7 +38,7 @@ struct tc_action {
spinlock_t tcfa_lock;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct tc_cookie *act_cookie;
+ struct tc_cookie __rcu *act_cookie;
struct tcf_chain *goto_chain;
};
#define tcf_index common.tcfa_index
@@ -84,14 +85,15 @@ struct tc_action_ops {
size_t size;
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *,
- struct tcf_result *);
+ struct tcf_result *); /* called under RCU BH lock*/
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
void (*cleanup)(struct tc_action *);
int (*lookup)(struct net *net, struct tc_action **a, u32 index,
struct netlink_ext_ack *extack);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
- int bind, struct netlink_ext_ack *extack);
+ int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
const struct tc_action_ops *,
@@ -99,6 +101,8 @@ struct tc_action_ops {
void (*stats_update)(struct tc_action *, u64, u32, u64);
size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a);
+ void (*put_dev)(struct net_device *dev);
+ int (*delete)(struct net *net, u32 index);
};
struct tc_action_net {
@@ -151,6 +155,10 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
int bind, bool cpustats);
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
+void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ struct tc_action **a, int bind);
+int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
static inline int tcf_idr_release(struct tc_action *a, bool bind)
@@ -161,18 +169,20 @@ static inline int tcf_idr_release(struct tc_action *a, bool bind)
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
struct pernet_operations *ops);
-int tcf_action_destroy(struct list_head *actions, int bind);
+int tcf_action_destroy(struct tc_action *actions[], int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est, char *name, int ovr, int bind,
- struct list_head *actions, size_t *attr_size,
- struct netlink_ext_ack *extack);
+ struct tc_action *actions[], size_t *attr_size,
+ bool rtnl_held, struct netlink_ext_ack *extack);
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind,
+ bool rtnl_held,
struct netlink_ext_ack *extack);
-int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
+ int ref);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
@@ -190,9 +200,6 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
#endif
}
-typedef int tc_setup_cb_t(enum tc_setup_type type,
- void *type_data, void *cb_priv);
-
#ifdef CONFIG_NET_CLS_ACT
int tc_setup_cb_egdev_register(const struct net_device *dev,
tc_setup_cb_t *cb, void *cb_priv);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 5f43f7a70fe6..6def0351bcc3 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -108,6 +108,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
bool match_wildcard);
+bool inet_rcv_saddr_any(const struct sock *sk);
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index a5563d27a3eb..8003a9f6eb43 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -56,6 +56,7 @@ struct sockaddr_ieee802154 {
#define WPAN_WANTACK 0
#define WPAN_SECURITY 1
#define WPAN_SECURITY_LEVEL 2
+#define WPAN_WANTLQI 3
#define WPAN_SECURITY_DEFAULT 0
#define WPAN_SECURITY_OFF 1
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 8ae8ee004258..f53edb3754bc 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -61,7 +61,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
rxrpc_notify_end_tx_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
- void *, size_t, size_t *, bool, u32 *, u16 *);
+ struct iov_iter *, bool, u32 *, u16 *);
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
u32, int, const char *);
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 1668211297a9..cdd9f1fe7cfa 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -183,6 +183,15 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_NON_PERSISTENT_DIAG,
+
+ /* When this quirk is set, setup() would be run after every
+ * open() and not just after the first open().
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ *
+ */
+ HCI_QUIRK_NON_PERSISTENT_SETUP,
};
/* HCI device flags */
@@ -260,6 +269,7 @@ enum {
HCI_VENDOR_DIAG,
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
+ HCI_LL_RPA_RESOLUTION,
__HCI_NUM_FLAGS,
};
@@ -291,6 +301,14 @@ enum {
#define HCI_DH3 0x0800
#define HCI_DH5 0x8000
+/* HCI packet types inverted masks */
+#define HCI_2DH1 0x0002
+#define HCI_3DH1 0x0004
+#define HCI_2DH3 0x0100
+#define HCI_3DH3 0x0200
+#define HCI_2DH5 0x1000
+#define HCI_3DH5 0x2000
+
#define HCI_HV1 0x0020
#define HCI_HV2 0x0040
#define HCI_HV3 0x0080
@@ -354,6 +372,8 @@ enum {
#define LMP_PCONTROL 0x04
#define LMP_TRANSPARENT 0x08
+#define LMP_EDR_2M 0x02
+#define LMP_EDR_3M 0x04
#define LMP_RSSI_INQ 0x40
#define LMP_ESCO 0x80
@@ -361,7 +381,9 @@ enum {
#define LMP_EV5 0x02
#define LMP_NO_BREDR 0x20
#define LMP_LE 0x40
+#define LMP_EDR_3SLOT 0x80
+#define LMP_EDR_5SLOT 0x01
#define LMP_SNIFF_SUBR 0x02
#define LMP_PAUSE_ENC 0x04
#define LMP_EDR_ESCO_2M 0x20
@@ -398,7 +420,12 @@ enum {
#define HCI_LE_SLAVE_FEATURES 0x08
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
+#define HCI_LE_PHY_2M 0x01
+#define HCI_LE_PHY_CODED 0x08
+#define HCI_LE_EXT_ADV 0x10
#define HCI_LE_EXT_SCAN_POLICY 0x80
+#define HCI_LE_PHY_2M 0x01
+#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_CHAN_SEL_ALG2 0x40
/* Connection modes */
@@ -1490,6 +1517,16 @@ struct hci_cp_le_write_def_data_len {
__le16 tx_time;
} __packed;
+#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
+
+#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
+struct hci_rp_le_read_resolv_list_size {
+ __u8 status;
+ __u8 size;
+} __packed;
+
+#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
+
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
struct hci_rp_le_read_max_data_len {
__u8 status;
@@ -1506,6 +1543,134 @@ struct hci_cp_le_set_default_phy {
__u8 rx_phys;
} __packed;
+#define HCI_LE_SET_PHY_1M 0x01
+#define HCI_LE_SET_PHY_2M 0x02
+#define HCI_LE_SET_PHY_CODED 0x04
+
+#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041
+struct hci_cp_le_set_ext_scan_params {
+ __u8 own_addr_type;
+ __u8 filter_policy;
+ __u8 scanning_phys;
+ __u8 data[0];
+} __packed;
+
+#define LE_SCAN_PHY_1M 0x01
+#define LE_SCAN_PHY_2M 0x02
+#define LE_SCAN_PHY_CODED 0x04
+
+struct hci_cp_le_scan_phy_params {
+ __u8 type;
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042
+struct hci_cp_le_set_ext_scan_enable {
+ __u8 enable;
+ __u8 filter_dup;
+ __le16 duration;
+ __le16 period;
+} __packed;
+
+#define HCI_OP_LE_EXT_CREATE_CONN 0x2043
+struct hci_cp_le_ext_create_conn {
+ __u8 filter_policy;
+ __u8 own_addr_type;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 phys;
+ __u8 data[0];
+} __packed;
+
+struct hci_cp_le_ext_conn_param {
+ __le16 scan_interval;
+ __le16 scan_window;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
+struct hci_rp_le_read_num_supported_adv_sets {
+ __u8 status;
+ __u8 num_of_sets;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036
+struct hci_cp_le_set_ext_adv_params {
+ __u8 handle;
+ __le16 evt_properties;
+ __u8 min_interval[3];
+ __u8 max_interval[3];
+ __u8 channel_map;
+ __u8 own_addr_type;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 filter_policy;
+ __u8 tx_power;
+ __u8 primary_phy;
+ __u8 secondary_max_skip;
+ __u8 secondary_phy;
+ __u8 sid;
+ __u8 notif_enable;
+} __packed;
+
+#define HCI_ADV_PHY_1M 0X01
+#define HCI_ADV_PHY_2M 0x02
+#define HCI_ADV_PHY_CODED 0x03
+
+struct hci_rp_le_set_ext_adv_params {
+ __u8 status;
+ __u8 tx_power;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
+struct hci_cp_le_set_ext_adv_enable {
+ __u8 enable;
+ __u8 num_of_sets;
+ __u8 data[0];
+} __packed;
+
+struct hci_cp_ext_adv_set {
+ __u8 handle;
+ __le16 duration;
+ __u8 max_events;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
+struct hci_cp_le_set_ext_adv_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 frag_pref;
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
+struct hci_cp_le_set_ext_scan_rsp_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 frag_pref;
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
+
+#define LE_SET_ADV_DATA_NO_FRAG 0x01
+
+#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
+
+#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
+struct hci_cp_le_set_adv_set_rand_addr {
+ __u8 handle;
+ bdaddr_t bdaddr;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -1893,6 +2058,23 @@ struct hci_ev_le_conn_complete {
#define LE_ADV_SCAN_IND 0x02
#define LE_ADV_NONCONN_IND 0x03
#define LE_ADV_SCAN_RSP 0x04
+#define LE_ADV_INVALID 0x05
+
+/* Legacy event types in extended adv report */
+#define LE_LEGACY_ADV_IND 0x0013
+#define LE_LEGACY_ADV_DIRECT_IND 0x0015
+#define LE_LEGACY_ADV_SCAN_IND 0x0012
+#define LE_LEGACY_NONCONN_IND 0x0010
+#define LE_LEGACY_SCAN_RSP_ADV 0x001b
+#define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a
+
+/* Extended Advertising event types */
+#define LE_EXT_ADV_NON_CONN_IND 0x0000
+#define LE_EXT_ADV_CONN_IND 0x0001
+#define LE_EXT_ADV_SCAN_IND 0x0002
+#define LE_EXT_ADV_DIRECT_IND 0x0004
+#define LE_EXT_ADV_SCAN_RSP 0x0008
+#define LE_EXT_ADV_LEGACY_PDU 0x0010
#define ADDR_LE_DEV_PUBLIC 0x00
#define ADDR_LE_DEV_RANDOM 0x01
@@ -1957,6 +2139,48 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi;
} __packed;
+#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
+struct hci_ev_le_ext_adv_report {
+ __le16 evt_type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 primary_phy;
+ __u8 secondary_phy;
+ __u8 sid;
+ __u8 tx_power;
+ __s8 rssi;
+ __le16 interval;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __u8 length;
+ __u8 data[0];
+} __packed;
+
+#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
+struct hci_ev_le_enh_conn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ bdaddr_t local_rpa;
+ bdaddr_t peer_rpa;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+ __u8 clk_accurancy;
+} __packed;
+
+#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
+struct hci_evt_le_ext_adv_set_term {
+ __u8 status;
+ __u8 handle;
+ __le16 conn_handle;
+ __u8 num_evts;
+} __packed;
+
+#define HCI_EV_VENDOR 0xff
+
/* Internal events generated by Bluetooth stack */
#define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 893bbbb5d2fa..0db1b9b428b7 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -171,6 +171,10 @@ struct adv_info {
__u8 adv_data[HCI_MAX_AD_LENGTH];
__u16 scan_rsp_len;
__u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __s8 tx_power;
+ bdaddr_t random_addr;
+ bool rpa_expired;
+ struct delayed_work rpa_expired_cb;
};
#define HCI_MAX_ADV_INSTANCES 5
@@ -221,6 +225,8 @@ struct hci_dev {
__u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8];
__u8 le_white_list_size;
+ __u8 le_resolv_list_size;
+ __u8 le_num_of_adv_sets;
__u8 le_states[8];
__u8 commands[64];
__u8 hci_ver;
@@ -314,6 +320,9 @@ struct hci_dev {
unsigned long sco_last_tx;
unsigned long le_last_tx;
+ __u8 le_tx_def_phys;
+ __u8 le_rx_def_phys;
+
struct workqueue_struct *workqueue;
struct workqueue_struct *req_workqueue;
@@ -367,6 +376,7 @@ struct hci_dev {
struct list_head identity_resolving_keys;
struct list_head remote_oob_data;
struct list_head le_white_list;
+ struct list_head le_resolv_list;
struct list_head le_conn_params;
struct list_head pend_le_conns;
struct list_head pend_le_reports;
@@ -1106,6 +1116,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
u16 scan_rsp_len, u8 *scan_rsp_data,
u16 timeout, u16 duration);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
+void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@@ -1136,6 +1147,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
+#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M)
+#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M)
+#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
+#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
/* ----- Extended LMP capabilities ----- */
#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
@@ -1156,6 +1171,24 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
hci_dev_test_flag(dev, HCI_SC_ENABLED))
+#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
+
+#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
+
+#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+
+/* Use ext scanning if set ext scan param and ext scan enable is supported */
+#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
+ ((dev)->commands[37] & 0x40))
+/* Use ext create connection if command is supported */
+#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
+
+/* Extended advertising support */
+#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1529,6 +1562,7 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance);
+int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index e7303eee65cd..9cee7ddc6741 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -101,6 +101,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
+#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -561,6 +562,12 @@ struct mgmt_rp_add_advertising {
#define MGMT_ADV_FLAG_TX_POWER BIT(4)
#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
+#define MGMT_ADV_FLAG_SEC_1M BIT(7)
+#define MGMT_ADV_FLAG_SEC_2M BIT(8)
+#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
+
+#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
+ MGMT_ADV_FLAG_SEC_CODED)
#define MGMT_OP_REMOVE_ADVERTISING 0x003F
struct mgmt_cp_remove_advertising {
@@ -604,6 +611,49 @@ struct mgmt_cp_set_appearance {
} __packed;
#define MGMT_SET_APPEARANCE_SIZE 2
+#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
+struct mgmt_rp_get_phy_confguration {
+ __le32 supported_phys;
+ __le32 configurable_phys;
+ __le32 selected_phys;
+} __packed;
+#define MGMT_GET_PHY_CONFIGURATION_SIZE 0
+
+#define MGMT_PHY_BR_1M_1SLOT 0x00000001
+#define MGMT_PHY_BR_1M_3SLOT 0x00000002
+#define MGMT_PHY_BR_1M_5SLOT 0x00000004
+#define MGMT_PHY_EDR_2M_1SLOT 0x00000008
+#define MGMT_PHY_EDR_2M_3SLOT 0x00000010
+#define MGMT_PHY_EDR_2M_5SLOT 0x00000020
+#define MGMT_PHY_EDR_3M_1SLOT 0x00000040
+#define MGMT_PHY_EDR_3M_3SLOT 0x00000080
+#define MGMT_PHY_EDR_3M_5SLOT 0x00000100
+#define MGMT_PHY_LE_1M_TX 0x00000200
+#define MGMT_PHY_LE_1M_RX 0x00000400
+#define MGMT_PHY_LE_2M_TX 0x00000800
+#define MGMT_PHY_LE_2M_RX 0x00001000
+#define MGMT_PHY_LE_CODED_TX 0x00002000
+#define MGMT_PHY_LE_CODED_RX 0x00004000
+
+#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
+ MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
+ MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \
+ MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \
+ MGMT_PHY_EDR_3M_5SLOT)
+#define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \
+ MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \
+ MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX)
+#define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \
+ MGMT_PHY_LE_CODED_TX)
+#define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \
+ MGMT_PHY_LE_CODED_RX)
+
+#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
+struct mgmt_cp_set_phy_confguration {
+ __le32 selected_phys;
+} __packed;
+#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -824,3 +874,8 @@ struct mgmt_ev_ext_info_changed {
__le16 eir_len;
__u8 eir[0];
} __packed;
+
+#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026
+struct mgmt_ev_phy_configuration_changed {
+ __le32 selected_phys;
+} __packed;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index f358ad5e4214..fc3111515f5c 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -283,7 +283,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
"none",
"unknown"
};
- int max_size = sizeof(churn_description) / sizeof(churn_description[0]);
+ int max_size = ARRAY_SIZE(churn_description);
if (state >= max_size)
state = max_size - 1;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 808f1d167349..a2d058170ea3 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave)
bond_is_active_slave(slave);
}
+static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev)
+{
+ struct slave *slave;
+ bool active;
+
+ rcu_read_lock();
+ slave = bond_slave_get_rcu(slave_dev);
+ active = bond_is_active_slave(slave);
+ rcu_read_unlock();
+
+ return active;
+}
+
static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
{
if (len == ETH_ALEN) {
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c5187438af38..ba61cdd09eaa 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -121,21 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
#endif
}
-static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events)
-{
- if (sk_can_busy_loop(sock->sk) &&
- events && (events & POLL_BUSY_LOOP)) {
- /* once, only if requested by syscall */
- sk_busy_loop(sock->sk, 1);
- }
-}
-
-/* if this socket can poll_ll, tell the system call */
-static inline __poll_t sock_poll_busy_flag(struct socket *sock)
-{
- return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0;
-}
-
/* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
@@ -151,6 +136,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = skb->napi_id;
#endif
+ sk_rx_queue_set(sk, skb);
}
/* variant used for unconnected sockets */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1beb3ead0385..9a850973e09a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -285,6 +285,41 @@ struct ieee80211_sta_vht_cap {
struct ieee80211_vht_mcs_info vht_mcs;
};
+#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
+
+/**
+ * struct ieee80211_sta_he_cap - STA's HE capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ax HE capabilities for a STA.
+ *
+ * @has_he: true iff HE data is valid.
+ * @he_cap_elem: Fixed portion of the HE capabilities element.
+ * @he_mcs_nss_supp: The supported NSS/MCS combinations.
+ * @ppe_thres: Holds the PPE Thresholds data.
+ */
+struct ieee80211_sta_he_cap {
+ bool has_he;
+ struct ieee80211_he_cap_elem he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
+ u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
+};
+
+/**
+ * struct ieee80211_sband_iftype_data
+ *
+ * This structure encapsulates sband data that is relevant for the
+ * interface types defined in @types_mask. Each type in the
+ * @types_mask must be unique across all instances of iftype_data.
+ *
+ * @types_mask: interface types mask
+ * @he_cap: holds the HE capabilities
+ */
+struct ieee80211_sband_iftype_data {
+ u16 types_mask;
+ struct ieee80211_sta_he_cap he_cap;
+};
+
/**
* struct ieee80211_supported_band - frequency band definition
*
@@ -301,6 +336,11 @@ struct ieee80211_sta_vht_cap {
* @n_bitrates: Number of bitrates in @bitrates
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
+ * @n_iftype_data: number of iftype data entries
+ * @iftype_data: interface type data entries. Note that the bits in
+ * @types_mask inside this structure cannot overlap (i.e. only
+ * one occurrence of each type is allowed across all instances of
+ * iftype_data).
*/
struct ieee80211_supported_band {
struct ieee80211_channel *channels;
@@ -310,9 +350,56 @@ struct ieee80211_supported_band {
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u16 n_iftype_data;
+ const struct ieee80211_sband_iftype_data *iftype_data;
};
/**
+ * ieee80211_get_sband_iftype_data - return sband data for a given iftype
+ * @sband: the sband to search for the STA on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found
+ */
+static inline const struct ieee80211_sband_iftype_data *
+ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
+ u8 iftype)
+{
+ int i;
+
+ if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
+ return NULL;
+
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *data =
+ &sband->iftype_data[i];
+
+ if (data->types_mask & BIT(iftype))
+ return data;
+ }
+
+ return NULL;
+}
+
+/**
+ * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
+ * @sband: the sband to search for the STA on
+ *
+ * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
+{
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION);
+
+ if (data && data->he_cap.has_he)
+ return &data->he_cap;
+
+ return NULL;
+}
+
+/**
* wiphy_read_of_freq_limits - read frequency limits from device tree
*
* @wiphy: the wireless device to get extra limits for
@@ -899,6 +986,8 @@ enum station_parameters_apply_mask {
* @opmode_notif: operating mode field from Operating Mode Notification
* @opmode_notif_used: information if operating mode field is used
* @support_p2p_ps: information if station supports P2P PS mechanism
+ * @he_capa: HE capabilities of station
+ * @he_capa_len: the length of the HE capabilities
*/
struct station_parameters {
const u8 *supported_rates;
@@ -926,6 +1015,8 @@ struct station_parameters {
u8 opmode_notif;
bool opmode_notif_used;
int support_p2p_ps;
+ const struct ieee80211_he_cap_elem *he_capa;
+ u8 he_capa_len;
};
/**
@@ -1000,12 +1091,14 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
* @RATE_INFO_FLAGS_60G: 60GHz MCS
+ * @RATE_INFO_FLAGS_HE_MCS: HE MCS information
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
RATE_INFO_FLAGS_VHT_MCS = BIT(1),
RATE_INFO_FLAGS_SHORT_GI = BIT(2),
RATE_INFO_FLAGS_60G = BIT(3),
+ RATE_INFO_FLAGS_HE_MCS = BIT(4),
};
/**
@@ -1019,6 +1112,7 @@ enum rate_info_flags {
* @RATE_INFO_BW_40: 40 MHz bandwidth
* @RATE_INFO_BW_80: 80 MHz bandwidth
* @RATE_INFO_BW_160: 160 MHz bandwidth
+ * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
*/
enum rate_info_bw {
RATE_INFO_BW_20 = 0,
@@ -1027,6 +1121,7 @@ enum rate_info_bw {
RATE_INFO_BW_40,
RATE_INFO_BW_80,
RATE_INFO_BW_160,
+ RATE_INFO_BW_HE_RU,
};
/**
@@ -1035,10 +1130,14 @@ enum rate_info_bw {
* Information about a receiving or transmitting bitrate
*
* @flags: bitflag of flags from &enum rate_info_flags
- * @mcs: mcs index if struct describes a 802.11n bitrate
+ * @mcs: mcs index if struct describes an HT/VHT/HE rate
* @legacy: bitrate in 100kbit/s for 802.11abg
- * @nss: number of streams (VHT only)
+ * @nss: number of streams (VHT & HE only)
* @bw: bandwidth (from &enum rate_info_bw)
+ * @he_gi: HE guard interval (from &enum nl80211_he_gi)
+ * @he_dcm: HE DCM value
+ * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
+ * only valid if bw is %RATE_INFO_BW_HE_RU)
*/
struct rate_info {
u8 flags;
@@ -1046,6 +1145,9 @@ struct rate_info {
u16 legacy;
u8 nss;
u8 bw;
+ u8 he_gi;
+ u8 he_dcm;
+ u8 he_ru_alloc;
};
/**
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 0e5e91be2d30..e22a8a3c089b 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -34,6 +34,19 @@ int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
+struct dcb_ieee_app_prio_map {
+ u64 map[IEEE_8021QAZ_MAX_TCS];
+};
+void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_prio_map *p_map);
+
+struct dcb_ieee_app_dscp_map {
+ u8 map[64];
+};
+void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_dscp_map *p_map);
+u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev);
+
int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
u32 seq, u32 pid);
int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
diff --git a/include/net/devlink.h b/include/net/devlink.h
index e336ea9c73df..b9b89d6604d4 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -27,6 +27,9 @@ struct devlink {
struct list_head sb_list;
struct list_head dpipe_table_list;
struct list_head resource_list;
+ struct list_head param_list;
+ struct list_head region_list;
+ u32 snapshot_id;
struct devlink_dpipe_headers *dpipe_headers;
const struct devlink_ops *ops;
struct device *dev;
@@ -295,6 +298,115 @@ struct devlink_resource {
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+#define DEVLINK_PARAM_MAX_STRING_VALUE 32
+enum devlink_param_type {
+ DEVLINK_PARAM_TYPE_U8,
+ DEVLINK_PARAM_TYPE_U16,
+ DEVLINK_PARAM_TYPE_U32,
+ DEVLINK_PARAM_TYPE_STRING,
+ DEVLINK_PARAM_TYPE_BOOL,
+};
+
+union devlink_param_value {
+ u8 vu8;
+ u16 vu16;
+ u32 vu32;
+ const char *vstr;
+ bool vbool;
+};
+
+struct devlink_param_gset_ctx {
+ union devlink_param_value val;
+ enum devlink_param_cmode cmode;
+};
+
+/**
+ * struct devlink_param - devlink configuration parameter data
+ * @name: name of the parameter
+ * @generic: indicates if the parameter is generic or driver specific
+ * @type: parameter type
+ * @supported_cmodes: bitmap of supported configuration modes
+ * @get: get parameter value, used for runtime and permanent
+ * configuration modes
+ * @set: set parameter value, used for runtime and permanent
+ * configuration modes
+ * @validate: validate input value is applicable (within value range, etc.)
+ *
+ * This struct should be used by the driver to fill the data for
+ * a parameter it registers.
+ */
+struct devlink_param {
+ u32 id;
+ const char *name;
+ bool generic;
+ enum devlink_param_type type;
+ unsigned long supported_cmodes;
+ int (*get)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*set)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*validate)(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack);
+};
+
+struct devlink_param_item {
+ struct list_head list;
+ const struct devlink_param *param;
+ union devlink_param_value driverinit_value;
+ bool driverinit_value_valid;
+};
+
+enum devlink_param_generic_id {
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+
+ /* add new param generic ids above here*/
+ __DEVLINK_PARAM_GENERIC_ID_MAX,
+ DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1,
+};
+
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME "internal_error_reset"
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs"
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov"
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
+{ \
+ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \
+ .name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \
+ .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \
+ .generic = true, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+}
+
+#define DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, _get, _set, _validate) \
+{ \
+ .id = _id, \
+ .name = _name, \
+ .type = _type, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+}
+
+struct devlink_region;
+
+typedef void devlink_snapshot_data_dest_t(const void *data);
+
struct devlink_ops {
int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
@@ -430,6 +542,26 @@ void devlink_resource_occ_get_register(struct devlink *devlink,
void *occ_get_priv);
void devlink_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
+int devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
+void devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val);
+int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val);
+void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+struct devlink_region *devlink_region_create(struct devlink *devlink,
+ const char *region_name,
+ u32 region_max_snapshots,
+ u64 region_size);
+void devlink_region_destroy(struct devlink_region *region);
+u32 devlink_region_shapshot_id_get(struct devlink *devlink);
+int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+ u8 *data, u32 snapshot_id,
+ devlink_snapshot_data_dest_t *data_destructor);
#else
@@ -622,6 +754,69 @@ devlink_resource_occ_get_unregister(struct devlink *devlink,
{
}
+static inline int
+devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return 0;
+}
+
+static inline void
+devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+
+}
+
+static inline int
+devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+devlink_param_value_changed(struct devlink *devlink, u32 param_id)
+{
+}
+
+static inline struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+ const char *region_name,
+ u32 region_max_snapshots,
+ u64 region_size)
+{
+ return NULL;
+}
+
+static inline void
+devlink_region_destroy(struct devlink_region *region)
+{
+}
+
+static inline u32
+devlink_region_shapshot_id_get(struct devlink *devlink)
+{
+ return 0;
+}
+
+static inline int
+devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+ u8 *data, u32 snapshot_id,
+ devlink_snapshot_data_dest_t *data_destructor)
+{
+ return 0;
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fdbd6082945d..461e8a7661b7 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -259,6 +259,9 @@ struct dsa_switch {
/* Number of switch port queues */
unsigned int num_tx_queues;
+ unsigned long *bitmap;
+ unsigned long _bitmap;
+
/* Dynamically allocated ports, keep last */
size_t num_ports;
struct dsa_port ports[];
diff --git a/include/net/dst.h b/include/net/dst.h
index b3219cd8a5a1..7f735e76ca73 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -475,6 +475,14 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
return dst_orig;
}
+static inline struct dst_entry *
+xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, const struct sock *sk,
+ int flags, u32 if_id)
+{
+ return dst_orig;
+}
+
static inline struct dst_entry *xfrm_lookup_route(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
@@ -494,6 +502,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags);
+struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
+ struct dst_entry *dst_orig,
+ const struct flowi *fl,
+ const struct sock *sk, int flags,
+ u32 if_id);
+
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags);
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index adc24df56b90..6a4586dcdede 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -47,7 +47,7 @@ struct flow_dissector_key_tags {
struct flow_dissector_key_vlan {
u16 vlan_id:12,
vlan_priority:3;
- u16 padding;
+ __be16 vlan_tpid;
};
struct flow_dissector_key_mpls {
@@ -57,6 +57,21 @@ struct flow_dissector_key_mpls {
mpls_label:20;
};
+#define FLOW_DIS_TUN_OPTS_MAX 255
+/**
+ * struct flow_dissector_key_enc_opts:
+ * @data: tunnel option data
+ * @len: length of tunnel option data
+ * @dst_opt_type: tunnel option type
+ */
+struct flow_dissector_key_enc_opts {
+ u8 data[FLOW_DIS_TUN_OPTS_MAX]; /* Using IP_TUNNEL_OPTS_MAX is desired
+ * here but seems difficult to #include
+ */
+ u8 len;
+ __be16 dst_opt_type;
+};
+
struct flow_dissector_key_keyid {
__be32 keyid;
};
@@ -206,6 +221,9 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
+ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
+ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
+ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
FLOW_DISSECTOR_KEY_MAX,
};
@@ -237,6 +255,7 @@ struct flow_keys {
struct flow_dissector_key_basic basic;
struct flow_dissector_key_tags tags;
struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
struct flow_dissector_key_addrs addrs;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 0304ba2ae353..883bb9085f15 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -59,13 +59,13 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
- spinlock_t *stats_lock,
+ spinlock_t *lock,
seqcount_t *running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr,
- spinlock_t *stats_lock,
+ spinlock_t *lock,
seqcount_t *running, struct nlattr *opt);
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 960236fb1681..feef706e1158 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -72,6 +73,8 @@ enum ieee80211_radiotap_presence {
IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
IEEE80211_RADIOTAP_VHT = 21,
IEEE80211_RADIOTAP_TIMESTAMP = 22,
+ IEEE80211_RADIOTAP_HE = 23,
+ IEEE80211_RADIOTAP_HE_MU = 24,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -202,6 +205,126 @@ enum ieee80211_radiotap_timestamp_flags {
IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02,
};
+struct ieee80211_radiotap_he {
+ __le16 data1, data2, data3, data4, data5, data6;
+};
+
+enum ieee80211_radiotap_he_bits {
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3,
+
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008,
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100,
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000,
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001,
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002,
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008,
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f,
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00,
+ IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000,
+ IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000,
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000,
+
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10,
+
+ IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2,
+
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700,
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000,
+ IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00,
+ IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000,
+};
+
+struct ieee80211_radiotap_he_mu {
+ __le16 flags1, flags2;
+ u8 ru_ch1[4];
+ u8 ru_ch2[4];
+};
+
+enum ieee80211_radiotap_he_mu_bits {
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800,
+};
+
/**
* ieee80211_get_radiotap_len - get radiotap header length
*/
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 384b90c62c0b..3ca969cbd161 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -43,7 +43,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
-struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
int inet_gro_complete(struct sk_buff *skb, int nhoff);
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0a6c9e0f2b5a..371b3b45fd5c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/poll.h>
+#include <linux/kernel.h>
#include <net/inet_sock.h>
#include <net/request_sock.h>
@@ -167,7 +168,8 @@ enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
ICSK_ACK_TIMER = 2,
ICSK_ACK_PUSHED = 4,
- ICSK_ACK_PUSHED2 = 8
+ ICSK_ACK_PUSHED2 = 8,
+ ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
};
void inet_csk_init_xmit_timers(struct sock *sk,
@@ -224,7 +226,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
if (when > max_when) {
pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
- sk, what, when, current_text_addr());
+ sk, what, when, (void *)_THIS_IP_);
when = max_when;
}
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index ed07e3786d98..1662cbc0b46b 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -2,7 +2,7 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
struct netns_frags {
/* sysctls */
@@ -57,7 +57,9 @@ struct frag_v6_compare_key {
* @lock: spinlock protecting this frag
* @refcnt: reference count of the queue
* @fragments: received fragments head
+ * @rb_fragments: received fragments rb-tree root
* @fragments_tail: received fragments tail
+ * @last_run_head: the head of the last "run". see ip_fragment.c
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
@@ -75,8 +77,10 @@ struct inet_frag_queue {
struct timer_list timer;
spinlock_t lock;
refcount_t refcnt;
- struct sk_buff *fragments;
+ struct sk_buff *fragments; /* Used in IPv6. */
+ struct rb_root rb_fragments; /* Used in IPv4. */
struct sk_buff *fragments_tail;
+ struct sk_buff *last_run_head;
ktime_t stamp;
int len;
int meat;
@@ -112,6 +116,9 @@ void inet_frag_kill(struct inet_frag_queue *q);
void inet_frag_destroy(struct inet_frag_queue *q);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
+/* Free all skbs in the queue; return the sum of their truesizes. */
+unsigned int inet_frag_rbtree_purge(struct rb_root *root);
+
static inline void inet_frag_put(struct inet_frag_queue *q)
{
if (refcount_dec_and_test(&q->refcnt))
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 83d5b3c2ac42..e03b93360f33 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -148,6 +148,7 @@ struct inet_cork {
__s16 tos;
char priority;
__u16 gso_size;
+ u64 transmit_time;
};
struct inet_cork_full {
@@ -358,4 +359,12 @@ static inline bool inet_get_convert_csum(struct sock *sk)
return !!inet_sk(sk)->convert_csum;
}
+
+static inline bool inet_can_nonlocal_bind(struct net *net,
+ struct inet_sock *inet)
+{
+ return net->ipv4.sysctl_ip_nonlocal_bind ||
+ inet->freebind || inet->transparent;
+}
+
#endif /* _INET_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 0d2281b4b27a..e44b1a44f67a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -72,13 +72,27 @@ struct ipcm_cookie {
__be32 addr;
int oif;
struct ip_options_rcu *opt;
- __u8 tx_flags;
__u8 ttl;
__s16 tos;
char priority;
__u16 gso_size;
};
+static inline void ipcm_init(struct ipcm_cookie *ipcm)
+{
+ *ipcm = (struct ipcm_cookie) { .tos = -1 };
+}
+
+static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+ const struct inet_sock *inet)
+{
+ ipcm_init(ipcm);
+
+ ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+ ipcm->oif = inet->sk.sk_bound_dev_if;
+ ipcm->addr = inet->inet_saddr;
+}
+
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
@@ -138,6 +152,8 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
struct ip_options_rcu *opt);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
+void ip_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev);
int ip_local_deliver(struct sk_buff *skb);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -148,7 +164,8 @@ void ip_send_check(struct iphdr *ip);
int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
-int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
+int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+ __u8 tos);
void ip_init(void);
int ip_append_data(struct sock *sk, struct flowi4 *fl4,
int getfrag(void *from, char *to, int offset, int len,
@@ -174,6 +191,12 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rtp,
struct inet_cork *cork, unsigned int flags);
+static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
+ struct flowi *fl)
+{
+ return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
+}
+
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 90ff430f5e9d..b0d022ff6ea1 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -466,10 +466,12 @@ static inline void ip_tunnel_info_opts_get(void *to,
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
- const void *from, int len)
+ const void *from, int len,
+ __be16 flags)
{
memcpy(ip_tunnel_info_opts(info), from, len);
info->options_len = len;
+ info->key.tun_flags |= flags;
}
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
@@ -511,9 +513,11 @@ static inline void ip_tunnel_info_opts_get(void *to,
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
- const void *from, int len)
+ const void *from, int len,
+ __be16 flags)
{
info->options_len = 0;
+ info->key.tun_flags |= flags;
}
#endif /* CONFIG_INET */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a0bec23c6d5e..a0d2e0bb9a94 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -335,6 +335,11 @@ enum ip_vs_sctp_states {
IP_VS_SCTP_S_LAST
};
+/* Connection templates use bits from state */
+#define IP_VS_CTPL_S_NONE 0x0000
+#define IP_VS_CTPL_S_ASSURED 0x0001
+#define IP_VS_CTPL_S_LAST 0x0002
+
/* Delta sequence info structure
* Each ip_vs_conn has 2 (output AND input seq. changes).
* Only used in the VS/NAT.
@@ -1221,7 +1226,7 @@ struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
struct ip_vs_dest *dest, __u32 fwmark);
void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
-const char *ip_vs_state_name(__u16 proto, int state);
+const char *ip_vs_state_name(const struct ip_vs_conn *cp);
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
@@ -1289,6 +1294,17 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
atomic_inc(&ctl_cp->n_control);
}
+/* Mark our template as assured */
+static inline void
+ip_vs_control_assure_ct(struct ip_vs_conn *cp)
+{
+ struct ip_vs_conn *ct = cp->control;
+
+ if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) &&
+ (ct->flags & IP_VS_CONN_F_TEMPLATE))
+ ct->state |= IP_VS_CTPL_S_ASSURED;
+}
+
/* IPVS netns init & cleanup functions */
int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
int ip_vs_control_net_init(struct netns_ipvs *ipvs);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 8f73be494503..ff33f498c137 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -294,6 +294,7 @@ struct ipv6_fl_socklist {
};
struct ipcm6_cookie {
+ struct sockcm_cookie sockc;
__s16 hlimit;
__s16 tclass;
__s8 dontfrag;
@@ -301,6 +302,25 @@ struct ipcm6_cookie {
__u16 gso_size;
};
+static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
+{
+ *ipc6 = (struct ipcm6_cookie) {
+ .hlimit = -1,
+ .tclass = -1,
+ .dontfrag = -1,
+ };
+}
+
+static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
+ const struct ipv6_pinfo *np)
+{
+ *ipc6 = (struct ipcm6_cookie) {
+ .hlimit = -1,
+ .tclass = np->tclass,
+ .dontfrag = np->dontfrag,
+ };
+}
+
static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
{
struct ipv6_txoptions *opt;
@@ -554,34 +574,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
}
#endif
-struct inet_frag_queue;
-
-enum ip6_defrag_users {
- IP6_DEFRAG_LOCAL_DELIVER,
- IP6_DEFRAG_CONNTRACK_IN,
- __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
- IP6_DEFRAG_CONNTRACK_OUT,
- __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
- IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
- __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
-};
-
-void ip6_frag_init(struct inet_frag_queue *q, const void *a);
-extern const struct rhashtable_params ip6_rhash_params;
-
-/*
- * Equivalent of ipv4 struct ip
- */
-struct frag_queue {
- struct inet_frag_queue q;
-
- int iif;
- __u16 nhoffset;
- u8 ecn;
-};
-
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
-
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
@@ -790,6 +782,13 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
#if IS_ENABLED(CONFIG_IPV6)
+static inline bool ipv6_can_nonlocal_bind(struct net *net,
+ struct inet_sock *inet)
+{
+ return net->ipv6.sysctl.ip_nonlocal_bind ||
+ inet->freebind || inet->transparent;
+}
+
/* Sysctl settings for net ipv6.auto_flowlabels */
#define IP6_AUTO_FLOW_LABEL_OFF 0
#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
@@ -915,6 +914,8 @@ static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
+void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev);
int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -931,8 +932,7 @@ int ip6_append_data(struct sock *sk,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
- struct rt6_info *rt, unsigned int flags,
- const struct sockcm_cookie *sockc);
+ struct rt6_info *rt, unsigned int flags);
int ip6_push_pending_frames(struct sock *sk);
@@ -949,8 +949,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags,
- struct inet_cork_full *cork,
- const struct sockcm_cookie *sockc);
+ struct inet_cork_full *cork);
static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
{
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
new file mode 100644
index 000000000000..6ced1e6899b6
--- /dev/null
+++ b/include/net/ipv6_frag.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV6_FRAG_H
+#define _IPV6_FRAG_H
+#include <linux/kernel.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+enum ip6_defrag_users {
+ IP6_DEFRAG_LOCAL_DELIVER,
+ IP6_DEFRAG_CONNTRACK_IN,
+ __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_OUT,
+ __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
+ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
+};
+
+/*
+ * Equivalent of ipv4 struct ip
+ */
+struct frag_queue {
+ struct inet_frag_queue q;
+
+ int iif;
+ __u16 nhoffset;
+ u8 ecn;
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
+{
+ struct frag_queue *fq = container_of(q, struct frag_queue, q);
+ const struct frag_v6_compare_key *key = a;
+
+ q->key.v6 = *key;
+ fq->ecn = 0;
+}
+
+static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key.v6,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline int
+ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_v6_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static inline void
+ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
+{
+ struct net_device *dev = NULL;
+ struct sk_buff *head;
+
+ rcu_read_lock();
+ spin_lock(&fq->q.lock);
+
+ if (fq->q.flags & INET_FRAG_COMPLETE)
+ goto out;
+
+ inet_frag_kill(&fq->q);
+
+ dev = dev_get_by_index_rcu(net, fq->iif);
+ if (!dev)
+ goto out;
+
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+
+ /* Don't send error if the first segment did not arrive. */
+ head = fq->q.fragments;
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+ goto out;
+
+ head->dev = dev;
+ skb_get(head);
+ spin_unlock(&fq->q.lock);
+
+ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+ kfree_skb(head);
+ goto out_rcu_unlock;
+
+out:
+ spin_unlock(&fq->q.lock);
+out_rcu_unlock:
+ rcu_read_unlock();
+ inet_frag_put(&fq->q);
+}
+#endif
+#endif
diff --git a/include/net/lag.h b/include/net/lag.h
new file mode 100644
index 000000000000..95b880e6fdde
--- /dev/null
+++ b/include/net/lag.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_LAG_H
+#define _LINUX_IF_LAG_H
+
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+#include <net/bonding.h>
+
+static inline bool net_lag_port_dev_txable(const struct net_device *port_dev)
+{
+ if (netif_is_team_port(port_dev))
+ return team_port_dev_txable(port_dev);
+ else
+ return bond_is_active_slave_dev(port_dev);
+}
+
+#endif /* _LINUX_IF_LAG_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 851a5e19ae32..5790f55c241d 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -23,6 +23,7 @@
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/codel.h>
+#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
/**
@@ -162,6 +163,8 @@ enum ieee80211_ac_numbers {
* @txop: maximum burst time in units of 32 usecs, 0 meaning disabled
* @acm: is mandatory admission control required for the access category
* @uapsd: is U-APSD mode enabled for the queue
+ * @mu_edca: is the MU EDCA configured
+ * @mu_edca_param_rec: MU EDCA Parameter Record for HE
*/
struct ieee80211_tx_queue_params {
u16 txop;
@@ -170,6 +173,8 @@ struct ieee80211_tx_queue_params {
u8 aifs;
bool acm;
bool uapsd;
+ bool mu_edca;
+ struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec;
};
struct ieee80211_low_level_stats {
@@ -463,6 +468,15 @@ struct ieee80211_mu_group_data {
* This structure keeps information about a BSS (and an association
* to that BSS) that can change during the lifetime of the BSS.
*
+ * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE
+ * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
+ * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
+ * @uora_exists: is the UORA element advertised by AP
+ * @ack_enabled: indicates support to receive a multi-TID that solicits either
+ * ACK, BACK or both
+ * @uora_ocw_range: UORA element's OCW Range field
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @he_support: does this BSS support HE
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS
* or not
@@ -550,6 +564,14 @@ struct ieee80211_mu_group_data {
*/
struct ieee80211_bss_conf {
const u8 *bssid;
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ bool multi_sta_back_32bit;
+ bool uora_exists;
+ bool ack_enabled;
+ u8 uora_ocw_range;
+ u16 frame_time_rts_th;
+ bool he_support;
/* association related data */
bool assoc, ibss_joined;
bool ibss_creator;
@@ -1106,6 +1128,18 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this
* frame
* @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known
+ * @RX_FLAG_RADIOTAP_HE: HE radiotap data is present
+ * (&struct ieee80211_radiotap_he, mac80211 will fill in
+ * - DATA3_DATA_MCS
+ * - DATA3_DATA_DCM
+ * - DATA3_CODING
+ * - DATA5_GI
+ * - DATA5_DATA_BW_RU_ALLOC
+ * - DATA6_NSTS
+ * - DATA3_STBC
+ * from the RX info data, so leave those zeroed when building this data)
+ * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
+ * (&struct ieee80211_radiotap_he_mu)
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1134,6 +1168,8 @@ enum mac80211_rx_flags {
RX_FLAG_ICV_STRIPPED = BIT(23),
RX_FLAG_AMPDU_EOF_BIT = BIT(24),
RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25),
+ RX_FLAG_RADIOTAP_HE = BIT(26),
+ RX_FLAG_RADIOTAP_HE_MU = BIT(27),
};
/**
@@ -1164,6 +1200,7 @@ enum mac80211_rx_encoding {
RX_ENC_LEGACY = 0,
RX_ENC_HT,
RX_ENC_VHT,
+ RX_ENC_HE,
};
/**
@@ -1198,6 +1235,9 @@ enum mac80211_rx_encoding {
* @encoding: &enum mac80211_rx_encoding
* @bw: &enum rate_info_bw
* @enc_flags: uses bits from &enum mac80211_rx_encoding_flags
+ * @he_ru: HE RU, from &enum nl80211_he_ru_alloc
+ * @he_gi: HE GI, from &enum nl80211_he_gi
+ * @he_dcm: HE DCM value
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1211,7 +1251,8 @@ struct ieee80211_rx_status {
u32 flag;
u16 freq;
u8 enc_flags;
- u8 encoding:2, bw:3;
+ u8 encoding:2, bw:3, he_ru:3;
+ u8 he_gi:2, he_dcm:1;
u8 rate_idx;
u8 nss;
u8 rx_flags;
@@ -1770,6 +1811,7 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @he_cap: HE capabilities of this STA
* @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
* that this station is allowed to transmit to us.
* Can be modified by driver.
@@ -1805,7 +1847,8 @@ struct ieee80211_sta {
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
- u8 max_rx_aggregation_subframes;
+ struct ieee80211_sta_he_cap he_cap;
+ u16 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
@@ -2196,10 +2239,11 @@ enum ieee80211_hw_flags {
* it shouldn't be set.
*
* @max_tx_aggregation_subframes: maximum number of subframes in an
- * aggregate an HT driver will transmit. Though ADDBA will advertise
- * a constant value of 64 as some older APs can crash if the window
- * size is smaller (an example is LinkSys WRT120N with FW v1.0.07
- * build 002 Jun 18 2012).
+ * aggregate an HT/HE device will transmit. In HT AddBA we'll
+ * advertise a constant value of 64 as some older APs crash if
+ * the window size is smaller (an example is LinkSys WRT120N
+ * with FW v1.0.07 build 002 Jun 18 2012).
+ * For AddBA to HE capable peers this value will be used.
*
* @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
* of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
@@ -2216,6 +2260,8 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
*
+ * @radiotap_he: HE radiotap validity flags
+ *
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
* 'units_pos' member is set to a non-negative value it must be set to
* a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
@@ -2263,8 +2309,8 @@ struct ieee80211_hw {
u8 max_rates;
u8 max_report_rates;
u8 max_rate_tries;
- u8 max_rx_aggregation_subframes;
- u8 max_tx_aggregation_subframes;
+ u16 max_rx_aggregation_subframes;
+ u16 max_tx_aggregation_subframes;
u8 max_tx_fragments;
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
@@ -2904,7 +2950,7 @@ struct ieee80211_ampdu_params {
struct ieee80211_sta *sta;
u16 tid;
u16 ssn;
- u8 buf_size;
+ u16 buf_size;
bool amsdu;
u16 timeout;
};
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index a71264d75d7f..9b5fdc50519a 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/sysctl.h>
+#include <linux/uidgid.h>
#include <net/flow.h>
#include <net/netns/core.h>
@@ -170,6 +171,8 @@ extern struct net init_net;
struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
struct net *old_net);
+void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
+
void net_ns_barrier(void);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
@@ -182,6 +185,13 @@ static inline struct net *copy_net_ns(unsigned long flags,
return old_net;
}
+static inline void net_ns_get_ownership(const struct net *net,
+ kuid_t *uid, kgid_t *gid)
+{
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+}
+
static inline void net_ns_barrier(void) {}
#endif /* CONFIG_NET_NS */
diff --git a/include/net/netevent.h b/include/net/netevent.h
index d9918261701c..4107016c3bb4 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -28,6 +28,7 @@ enum netevent_notif_type {
NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */
NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */
NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */
+ NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, /* arg is struct net ptr */
};
int register_netevent_notifier(struct notifier_block *nb);
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 73f825732326..c84b51682f08 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -10,9 +10,6 @@
#ifndef _NF_CONNTRACK_IPV4_H
#define _NF_CONNTRACK_IPV4_H
-
-const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
-
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 062dc19b5840..7e012312cd61 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -41,6 +41,11 @@ union nf_conntrack_expect_proto {
/* insert expect proto private data here */
};
+struct nf_conntrack_net {
+ unsigned int users4;
+ unsigned int users6;
+};
+
#include <linux/types.h>
#include <linux/skbuff.h>
@@ -171,8 +176,6 @@ void nf_ct_netns_put(struct net *net, u8 nfproto);
*/
void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
-void nf_ct_free_hashtable(void *hash, unsigned int size);
-
int nf_conntrack_hash_check_insert(struct nf_conn *ct);
bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 9b5e7634713e..2a3e0974a6af 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -14,7 +14,6 @@
#define _NF_CONNTRACK_CORE_H
#include <linux/netfilter.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
@@ -40,16 +39,8 @@ void nf_conntrack_cleanup_start(void);
void nf_conntrack_init_end(void);
void nf_conntrack_cleanup_end(void);
-bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
- unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
- struct net *net,
- struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *l4proto);
-
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
/* Find a connection corresponding to a tuple. */
@@ -75,10 +66,8 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
return ret;
}
-void
-print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *proto);
+void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_l4proto *proto);
#define CONNTRACK_LOCKS 1024
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 3a188a0923a3..4b2b2baf8ab4 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -1,8 +1,23 @@
#ifndef _NF_CONNTRACK_COUNT_H
#define _NF_CONNTRACK_COUNT_H
+#include <linux/list.h>
+
struct nf_conncount_data;
+enum nf_conncount_list_add {
+ NF_CONNCOUNT_ADDED, /* list add was ok */
+ NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
+ NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
+};
+
+struct nf_conncount_list {
+ spinlock_t list_lock;
+ struct list_head head; /* connections with the same filtering key */
+ unsigned int count; /* length of list */
+ bool dead;
+};
+
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
unsigned int keylen);
void nf_conncount_destroy(struct net *net, unsigned int family,
@@ -14,15 +29,21 @@ unsigned int nf_conncount_count(struct net *net,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
-unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone,
- bool *addit);
+void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone,
+ bool *addit);
+
+void nf_conncount_list_init(struct nf_conncount_list *list);
+
+enum nf_conncount_list_add
+nf_conncount_add(struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
-bool nf_conncount_add(struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone);
+bool nf_conncount_gc_list(struct net *net,
+ struct nf_conncount_list *list);
-void nf_conncount_cache_free(struct hlist_head *hhead);
+void nf_conncount_cache_free(struct nf_conncount_list *list);
#endif
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 32c2a94a219d..2492120b8097 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -103,9 +103,7 @@ int nf_conntrack_helpers_register(struct nf_conntrack_helper *, unsigned int);
void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *,
unsigned int);
-struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
- struct nf_conntrack_helper *helper,
- gfp_t gfp);
+struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags);
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
deleted file mode 100644
index d5808f3e2715..000000000000
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C)2003,2004 USAGI/WIDE Project
- *
- * Header for use in defining a given L3 protocol for connection tracking.
- *
- * Author:
- * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *
- * Derived from include/netfilter_ipv4/ip_conntrack_protocol.h
- */
-
-#ifndef _NF_CONNTRACK_L3PROTO_H
-#define _NF_CONNTRACK_L3PROTO_H
-#include <linux/netlink.h>
-#include <net/netlink.h>
-#include <linux/seq_file.h>
-#include <net/netfilter/nf_conntrack.h>
-
-struct nf_conntrack_l3proto {
- /* L3 Protocol Family number. ex) PF_INET */
- u_int16_t l3proto;
-
- /* size of tuple nlattr, fills a hole */
- u16 nla_size;
-
- /*
- * Try to fill in the third arg: nhoff is offset of l3 proto
- * hdr. Return true if possible.
- */
- bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
- struct nf_conntrack_tuple *tuple);
-
- /*
- * Invert the per-proto part of the tuple: ie. turn xmit into reply.
- * Some packets can't be inverted: return 0 in that case.
- */
- bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig);
-
- /*
- * Called before tracking.
- * *dataoff: offset of protocol header (TCP, UDP,...) in skb
- * *protonum: protocol number
- */
- int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff,
- unsigned int *dataoff, u_int8_t *protonum);
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- int (*tuple_to_nlattr)(struct sk_buff *skb,
- const struct nf_conntrack_tuple *t);
- int (*nlattr_to_tuple)(struct nlattr *tb[],
- struct nf_conntrack_tuple *t);
- const struct nla_policy *nla_policy;
-#endif
-
- /* Called when netns wants to use connection tracking */
- int (*net_ns_get)(struct net *);
- void (*net_ns_put)(struct net *);
-
- /* Module (if any) which this is connected to. */
- struct module *me;
-};
-
-extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO];
-
-/* Protocol global registration. */
-int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto);
-void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto);
-
-const struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
-
-/* Existing built-in protocols */
-extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
-
-static inline struct nf_conntrack_l3proto *
-__nf_ct_l3proto_find(u_int16_t l3proto)
-{
- if (unlikely(l3proto >= NFPROTO_NUMPROTO))
- return &nf_conntrack_l3proto_generic;
- return rcu_dereference(nf_ct_l3protos[l3proto]);
-}
-
-#endif /*_NF_CONNTRACK_L3PROTO_H*/
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index a7220eef9aee..8465263b297d 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -36,7 +36,7 @@ struct nf_conntrack_l4proto {
struct net *net, struct nf_conntrack_tuple *tuple);
/* Invert the per-proto part of the tuple: ie. turn xmit into reply.
- * Some packets can't be inverted: return 0 in that case.
+ * Only used by icmp, most protocols use a generic version.
*/
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
@@ -45,13 +45,12 @@ struct nf_conntrack_l4proto {
int (*packet)(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeouts);
+ enum ip_conntrack_info ctinfo);
/* Called when a new connection for this protocol found;
* returns TRUE if it's OK. If so, packet() called next. */
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts);
+ unsigned int dataoff);
/* Called when a conntrack entry is destroyed */
void (*destroy)(struct nf_conn *ct);
@@ -63,9 +62,6 @@ struct nf_conntrack_l4proto {
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
- /* Return the array of timeouts for this protocol. */
- unsigned int *(*get_timeouts)(struct net *net);
-
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct);
@@ -81,7 +77,6 @@ struct nf_conntrack_l4proto {
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
struct {
int (*nlattr_to_obj)(struct nlattr *tb[],
struct net *net, void *data);
@@ -91,7 +86,6 @@ struct nf_conntrack_l4proto {
u16 nlattr_max;
const struct nla_policy *nla_policy;
} ctnl_timeout;
-#endif
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
@@ -134,10 +128,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
/* Protocol global registration. */
int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
-int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
- unsigned int num_proto);
-void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
- unsigned int num_proto);
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 9468ab4ad12d..d5f62cc6c2ae 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -11,24 +11,28 @@
#define CTNL_TIMEOUT_NAME_MAX 32
+struct nf_ct_timeout {
+ __u16 l3num;
+ const struct nf_conntrack_l4proto *l4proto;
+ char data[0];
+};
+
struct ctnl_timeout {
struct list_head head;
struct rcu_head rcu_head;
refcount_t refcnt;
char name[CTNL_TIMEOUT_NAME_MAX];
- __u16 l3num;
- const struct nf_conntrack_l4proto *l4proto;
- char data[0];
+ struct nf_ct_timeout timeout;
};
struct nf_conn_timeout {
- struct ctnl_timeout __rcu *timeout;
+ struct nf_ct_timeout __rcu *timeout;
};
static inline unsigned int *
nf_ct_timeout_data(struct nf_conn_timeout *t)
{
- struct ctnl_timeout *timeout;
+ struct nf_ct_timeout *timeout;
timeout = rcu_dereference(t->timeout);
if (timeout == NULL)
@@ -49,7 +53,7 @@ struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
static inline
struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
- struct ctnl_timeout *timeout,
+ struct nf_ct_timeout *timeout,
gfp_t gfp)
{
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
@@ -67,32 +71,23 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
#endif
};
-static inline unsigned int *
-nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
- const struct nf_conntrack_l4proto *l4proto)
+static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
{
+ unsigned int *timeouts = NULL;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
struct nf_conn_timeout *timeout_ext;
- unsigned int *timeouts;
timeout_ext = nf_ct_timeout_find(ct);
- if (timeout_ext) {
+ if (timeout_ext)
timeouts = nf_ct_timeout_data(timeout_ext);
- if (unlikely(!timeouts))
- timeouts = l4proto->get_timeouts(net);
- } else {
- timeouts = l4proto->get_timeouts(net);
- }
-
- return timeouts;
-#else
- return l4proto->get_timeouts(net);
#endif
+ return timeouts;
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
int nf_conntrack_timeout_init(void);
void nf_conntrack_timeout_fini(void);
+void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout);
#else
static inline int nf_conntrack_timeout_init(void)
{
@@ -106,8 +101,8 @@ static inline void nf_conntrack_timeout_fini(void)
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
-extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout);
+extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
+extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
#endif
#endif /* _NF_CONNTRACK_TIMEOUT_H */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index ba9fa4592f2b..0e355f4a3d76 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -4,7 +4,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <linux/rcupdate.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/dst.h>
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index e811ac07ea94..0d3920896d50 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset,
unsigned int logflags);
-void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
+void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
+ struct sock *sk);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index a05134507e7b..8da837d2aaf9 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -71,4 +71,11 @@ extern struct nft_set_type nft_set_hash_fast_type;
extern struct nft_set_type nft_set_rbtree_type;
extern struct nft_set_type nft_set_bitmap_type;
+struct nft_expr;
+struct nft_regs;
+struct nft_pktinfo;
+void nft_meta_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_lookup_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index 4cc64c8446eb..82d0e41b76f2 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -17,6 +17,14 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
return false;
}
+/* assign a socket to the skb -- consumes sk */
+static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+}
+
__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
/**
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 24c78183a4c2..16a842456189 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -9,12 +9,7 @@ struct net;
static inline u32 net_hash_mix(const struct net *net)
{
#ifdef CONFIG_NET_NS
- /*
- * shift this right to eliminate bits, that are
- * always zeroed
- */
-
- return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
+ return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
#else
return 0;
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 661348f23ea5..e47503b4e4d1 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -98,6 +98,7 @@ struct netns_ipv4 {
int sysctl_ip_default_ttl;
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
+ int sysctl_ip_fwd_update_priority;
int sysctl_ip_nonlocal_bind;
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 762ac9931b62..f0e396ab9bec 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -32,6 +32,7 @@ struct netns_sysctl_ipv6 {
int flowlabel_consistency;
int auto_flowlabels;
int icmpv6_time;
+ int icmpv6_echo_ignore_all;
int anycast_src_echo_reply;
int ip_nonlocal_bind;
int fwmark_reflect;
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 94767ea3a490..286fd960896f 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -7,6 +7,7 @@
struct netns_nftables {
struct list_head tables;
struct list_head commit_list;
+ struct mutex commit_mutex;
unsigned int base_seq;
u8 gencursor;
u8 validate_state;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 20b059574e60..ef727f71336e 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -7,12 +7,16 @@
#include <net/sch_generic.h>
#include <net/act_api.h>
+/* TC action not accessible from user space */
+#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
+
/* Basic packet classifier frontend definitions. */
struct tcf_walker {
int stop;
int skip;
int count;
+ unsigned long cookie;
int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
};
@@ -36,9 +40,9 @@ struct tcf_block_cb;
bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
#ifdef CONFIG_NET_CLS
-struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
- bool create);
-void tcf_chain_put(struct tcf_chain *chain);
+struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
+ u32 chain_index);
+void tcf_chain_put_by_act(struct tcf_chain *chain);
void tcf_block_netif_keep_dst(struct tcf_block *block);
int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
@@ -73,11 +77,13 @@ void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv);
+ void *cb_priv,
+ struct netlink_ext_ack *extack);
int tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv);
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
+ void *cb_priv, struct netlink_ext_ack *extack);
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb);
void tcf_block_cb_unregister(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
@@ -111,11 +117,6 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
{
}
-static inline bool tcf_block_shared(struct tcf_block *block)
-{
- return false;
-}
-
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
{
return NULL;
@@ -166,7 +167,8 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
static inline
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv)
+ void *cb_priv,
+ struct netlink_ext_ack *extack)
{
return NULL;
}
@@ -174,13 +176,14 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
static inline
int tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv)
+ void *cb_priv, struct netlink_ext_ack *extack)
{
return 0;
}
static inline
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb)
{
}
@@ -601,6 +604,7 @@ struct tc_block_offload {
enum tc_block_command command;
enum tcf_block_binder_type binder_type;
struct tcf_block *block;
+ struct netlink_ext_ack *extack;
};
struct tc_cls_common_offload {
@@ -720,6 +724,8 @@ enum tc_fl_command {
TC_CLSFLOWER_REPLACE,
TC_CLSFLOWER_DESTROY,
TC_CLSFLOWER_STATS,
+ TC_CLSFLOWER_TMPLT_CREATE,
+ TC_CLSFLOWER_TMPLT_DESTROY,
};
struct tc_cls_flower_offload {
@@ -776,6 +782,7 @@ struct tc_mqprio_qopt_offload {
struct tc_cookie {
u8 *data;
u32 len;
+ struct rcu_head rcu;
};
struct tc_qopt_offload_stats {
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 815b92a23936..7dc769e5452b 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -72,6 +72,8 @@ struct qdisc_watchdog {
struct Qdisc *qdisc;
};
+void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
+ clockid_t clockid);
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
@@ -153,4 +155,9 @@ struct tc_cbs_qopt_offload {
s32 sendslope;
};
+struct tc_etf_qopt_offload {
+ u8 enable;
+ s32 queue;
+};
+
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6488daa32f82..a6d00093f35e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -20,6 +20,9 @@ struct qdisc_walker;
struct tcf_walker;
struct module;
+typedef int tc_setup_cb_t(enum tc_setup_type type,
+ void *type_data, void *cb_priv);
+
struct qdisc_rate_table {
struct tc_ratespec rate;
u32 data[256];
@@ -232,9 +235,17 @@ struct tcf_result {
u32 classid;
};
const struct tcf_proto *goto_tp;
+
+ /* used by the TC_ACT_REINSERT action */
+ struct {
+ bool ingress;
+ struct gnet_stats_queue *qstats;
+ };
};
};
+struct tcf_chain;
+
struct tcf_proto_ops {
struct list_head head;
char kind[IFNAMSIZ];
@@ -256,11 +267,22 @@ struct tcf_proto_ops {
bool *last,
struct netlink_ext_ack *);
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
+ int (*reoffload)(struct tcf_proto *tp, bool add,
+ tc_setup_cb_t *cb, void *cb_priv,
+ struct netlink_ext_ack *extack);
void (*bind_class)(void *, u32, unsigned long);
+ void * (*tmplt_create)(struct net *net,
+ struct tcf_chain *chain,
+ struct nlattr **tca,
+ struct netlink_ext_ack *extack);
+ void (*tmplt_destroy)(void *tmplt_priv);
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
struct sk_buff *skb, struct tcmsg*);
+ int (*tmplt_dump)(struct sk_buff *skb,
+ struct net *net,
+ void *tmplt_priv);
struct module *owner;
};
@@ -269,6 +291,8 @@ struct tcf_proto {
/* Fast access part */
struct tcf_proto __rcu *next;
void __rcu *root;
+
+ /* called under RCU BH lock*/
int (*classify)(struct sk_buff *,
const struct tcf_proto *,
struct tcf_result *);
@@ -294,11 +318,14 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
struct tcf_proto __rcu *filter_chain;
- struct list_head filter_chain_list;
struct list_head list;
struct tcf_block *block;
u32 index; /* chain index */
unsigned int refcnt;
+ unsigned int action_refcnt;
+ bool explicitly_created;
+ const struct tcf_proto_ops *tmplt_ops;
+ void *tmplt_priv;
};
struct tcf_block {
@@ -312,6 +339,10 @@ struct tcf_block {
bool keep_dst;
unsigned int offloadcnt; /* Number of oddloaded filters */
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
+ struct {
+ struct tcf_chain *chain;
+ struct list_head filter_chain_list;
+ } chain0;
};
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
@@ -330,6 +361,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
block->offloadcnt--;
}
+static inline void
+tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
+ u32 *flags, bool add)
+{
+ if (add) {
+ if (!*cnt)
+ tcf_block_offload_inc(block, flags);
+ (*cnt)++;
+ } else {
+ (*cnt)--;
+ if (!*cnt)
+ tcf_block_offload_dec(block, flags);
+ }
+}
+
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
@@ -529,6 +575,15 @@ static inline void skb_reset_tc(struct sk_buff *skb)
#endif
}
+static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return skb->tc_redirected;
+#else
+ return false;
+#endif
+}
+
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
@@ -1068,4 +1123,17 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
+static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+{
+ struct gnet_stats_queue *stats = res->qstats;
+ int ret;
+
+ if (res->ingress)
+ ret = netif_receive_skb(skb);
+ else
+ ret = dev_queue_xmit(skb);
+ if (ret && stats)
+ qstats_overlimit_inc(res->qstats);
+}
+
#endif
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index dbe1b911a24d..28a7c8e44636 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -48,7 +48,7 @@
#define __sctp_structs_h__
#include <linux/ktime.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <linux/socket.h> /* linux/in.h needs this!! */
#include <linux/in.h> /* We get struct sockaddr_in. */
#include <linux/in6.h> /* We get struct in6_addr */
@@ -57,6 +57,7 @@
#include <linux/atomic.h> /* This gets us atomic counters. */
#include <linux/skbuff.h> /* We need sk_buff_head. */
#include <linux/workqueue.h> /* We need tq_struct. */
+#include <linux/flex_array.h> /* We need flex_array. */
#include <linux/sctp.h> /* We need sctp* header structs. */
#include <net/sctp/auth.h> /* We need auth specific structs */
#include <net/ip.h> /* For inet_skb_parm */
@@ -193,6 +194,9 @@ struct sctp_sock {
/* This is the max_retrans value for new associations. */
__u16 pathmaxrxt;
+ __u32 flowlabel;
+ __u8 dscp;
+
/* The initial Path MTU to use for new associations. */
__u32 pathmtu;
@@ -220,6 +224,7 @@ struct sctp_sock {
__u32 adaptation_ind;
__u32 pd_point;
__u16 nodelay:1,
+ reuse:1,
disable_fragments:1,
v4mapped:1,
frag_interleave:1,
@@ -394,37 +399,35 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
/* What is the current SSN number for this stream? */
#define sctp_ssn_peek(stream, type, sid) \
- ((stream)->type[sid].ssn)
+ (sctp_stream_##type((stream), (sid))->ssn)
/* Return the next SSN number for this stream. */
#define sctp_ssn_next(stream, type, sid) \
- ((stream)->type[sid].ssn++)
+ (sctp_stream_##type((stream), (sid))->ssn++)
/* Skip over this ssn and all below. */
#define sctp_ssn_skip(stream, type, sid, ssn) \
- ((stream)->type[sid].ssn = ssn + 1)
+ (sctp_stream_##type((stream), (sid))->ssn = ssn + 1)
/* What is the current MID number for this stream? */
#define sctp_mid_peek(stream, type, sid) \
- ((stream)->type[sid].mid)
+ (sctp_stream_##type((stream), (sid))->mid)
/* Return the next MID number for this stream. */
#define sctp_mid_next(stream, type, sid) \
- ((stream)->type[sid].mid++)
+ (sctp_stream_##type((stream), (sid))->mid++)
/* Skip over this mid and all below. */
#define sctp_mid_skip(stream, type, sid, mid) \
- ((stream)->type[sid].mid = mid + 1)
-
-#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
+ (sctp_stream_##type((stream), (sid))->mid = mid + 1)
/* What is the current MID_uo number for this stream? */
#define sctp_mid_uo_peek(stream, type, sid) \
- ((stream)->type[sid].mid_uo)
+ (sctp_stream_##type((stream), (sid))->mid_uo)
/* Return the next MID_uo number for this stream. */
#define sctp_mid_uo_next(stream, type, sid) \
- ((stream)->type[sid].mid_uo++)
+ (sctp_stream_##type((stream), (sid))->mid_uo++)
/*
* Pointers to address related SCTP functions.
@@ -894,6 +897,9 @@ struct sctp_transport {
*/
__u16 pathmaxrxt;
+ __u32 flowlabel;
+ __u8 dscp;
+
/* This is the partially failed retrans value for the transport
* and will be initialized from the assocs value. This can be changed
* using the SCTP_PEER_ADDR_THLDS socket option
@@ -1433,8 +1439,8 @@ struct sctp_stream_in {
};
struct sctp_stream {
- struct sctp_stream_out *out;
- struct sctp_stream_in *in;
+ struct flex_array *out;
+ struct flex_array *in;
__u16 outcnt;
__u16 incnt;
/* Current stream being sent, if any */
@@ -1456,6 +1462,23 @@ struct sctp_stream {
struct sctp_stream_interleave *si;
};
+static inline struct sctp_stream_out *sctp_stream_out(
+ const struct sctp_stream *stream,
+ __u16 sid)
+{
+ return flex_array_get(stream->out, sid);
+}
+
+static inline struct sctp_stream_in *sctp_stream_in(
+ const struct sctp_stream *stream,
+ __u16 sid)
+{
+ return flex_array_get(stream->in, sid);
+}
+
+#define SCTP_SO(s, i) sctp_stream_out((s), (i))
+#define SCTP_SI(s, i) sctp_stream_in((s), (i))
+
#define SCTP_STREAM_CLOSED 0x00
#define SCTP_STREAM_OPEN 0x01
@@ -1771,6 +1794,9 @@ struct sctp_association {
*/
__u16 pathmaxrxt;
+ __u32 flowlabel;
+ __u8 dscp;
+
/* Flag that path mtu update is pending */
__u8 pmtu_pending;
diff --git a/include/net/seg6.h b/include/net/seg6.h
index e029e301faa5..2567941a2f32 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -18,7 +18,7 @@
#include <linux/ipv6.h>
#include <net/lwtunnel.h>
#include <linux/seg6.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
__be32 to)
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
index 69c3a106056b..7fda469e2758 100644
--- a/include/net/seg6_hmac.h
+++ b/include/net/seg6_hmac.h
@@ -22,7 +22,7 @@
#include <linux/route.h>
#include <net/seg6.h>
#include <linux/seg6_hmac.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#define SEG6_HMAC_MAX_DIGESTSIZE 160
#define SEG6_HMAC_RING_SIZE 256
diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h
index 661fd5b4d3e0..08359e2d8b35 100644
--- a/include/net/seg6_local.h
+++ b/include/net/seg6_local.h
@@ -21,10 +21,12 @@
extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
u32 tbl_id);
+extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
struct seg6_bpf_srh_state {
- bool valid;
+ struct ipv6_sr_hdr *srh;
u16 hdrlen;
+ bool valid;
};
DECLARE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
diff --git a/include/net/smc.h b/include/net/smc.h
index 8381d163fefa..9ef49f8b1002 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -11,6 +11,8 @@
#ifndef _SMC_H
#define _SMC_H
+#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */
+
struct smc_hashinfo {
rwlock_t lock;
struct hlist_head ht;
@@ -18,4 +20,67 @@ struct smc_hashinfo {
int smc_hash_sk(struct sock *sk);
void smc_unhash_sk(struct sock *sk);
+
+/* SMCD/ISM device driver interface */
+struct smcd_dmb {
+ u64 dmb_tok;
+ u64 rgid;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+#define ISM_EVENT_DMB 0
+#define ISM_EVENT_GID 1
+#define ISM_EVENT_SWR 2
+
+struct smcd_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct smcd_dev;
+
+struct smcd_ops {
+ int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
+ u32 vid);
+ int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*set_vlan_required)(struct smcd_dev *dev);
+ int (*reset_vlan_required)(struct smcd_dev *dev);
+ int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info);
+ int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size);
+};
+
+struct smcd_dev {
+ const struct smcd_ops *ops;
+ struct device dev;
+ void *priv;
+ u64 local_gid;
+ struct list_head list;
+ spinlock_t lock;
+ struct smc_connection **conn;
+ struct list_head vlan;
+ struct workqueue_struct *event_wq;
+ u8 pnetid[SMC_MAX_PNETID_LEN];
+};
+
+struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+ const struct smcd_ops *ops, int max_dmbs);
+int smcd_register_dev(struct smcd_dev *smcd);
+void smcd_unregister_dev(struct smcd_dev *smcd);
+void smcd_free_dev(struct smcd_dev *smcd);
+void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
+void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit);
#endif /* _SMC_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index b3b75419eafe..433f45fc2d68 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -139,6 +139,7 @@ typedef __u64 __bitwise __addrpair;
* @skc_node: main hash linkage for various protocol lookup tables
* @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
* @skc_tx_queue_mapping: tx queue number for this connection
+ * @skc_rx_queue_mapping: rx queue number for this connection
* @skc_flags: place holder for sk_flags
* %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -214,7 +215,10 @@ struct sock_common {
struct hlist_node skc_node;
struct hlist_nulls_node skc_nulls_node;
};
- int skc_tx_queue_mapping;
+ unsigned short skc_tx_queue_mapping;
+#ifdef CONFIG_XPS
+ unsigned short skc_rx_queue_mapping;
+#endif
union {
int skc_incoming_cpu;
u32 skc_rcv_wnd;
@@ -315,6 +319,9 @@ struct sock_common {
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
* @sk_reuseport_cb: reuseport group container
* @sk_rcu: used during RCU grace period
+ * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
+ * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
+ * @sk_txtime_unused: unused txtime flags
*/
struct sock {
/*
@@ -326,6 +333,9 @@ struct sock {
#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_refcnt __sk_common.skc_refcnt
#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
+#ifdef CONFIG_XPS
+#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
+#endif
#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
#define sk_dontcopy_end __sk_common.skc_dontcopy_end
@@ -468,6 +478,12 @@ struct sock {
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
+
+ u8 sk_clockid;
+ u8 sk_txtime_deadline_mode : 1,
+ sk_txtime_report_errors : 1,
+ sk_txtime_unused : 6;
+
struct socket *sk_socket;
void *sk_user_data;
#ifdef CONFIG_SECURITY
@@ -783,6 +799,7 @@ enum sock_flags {
SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
+ SOCK_TXTIME,
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1578,10 +1595,17 @@ void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
struct sockcm_cookie {
+ u64 transmit_time;
u32 mark;
u16 tsflags;
};
+static inline void sockcm_init(struct sockcm_cookie *sockc,
+ const struct sock *sk)
+{
+ *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
+}
+
int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
struct sockcm_cookie *sockc);
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
@@ -1681,19 +1705,58 @@ static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
+ /* sk_tx_queue_mapping accept only upto a 16-bit value */
+ if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+ return;
sk->sk_tx_queue_mapping = tx_queue;
}
+#define NO_QUEUE_MAPPING USHRT_MAX
+
static inline void sk_tx_queue_clear(struct sock *sk)
{
- sk->sk_tx_queue_mapping = -1;
+ sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
}
static inline int sk_tx_queue_get(const struct sock *sk)
{
- return sk ? sk->sk_tx_queue_mapping : -1;
+ if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
+ return sk->sk_tx_queue_mapping;
+
+ return -1;
}
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+ if (skb_rx_queue_recorded(skb)) {
+ u16 rx_queue = skb_get_rx_queue(skb);
+
+ if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
+ return;
+
+ sk->sk_rx_queue_mapping = rx_queue;
+ }
+#endif
+}
+
+static inline void sk_rx_queue_clear(struct sock *sk)
+{
+#ifdef CONFIG_XPS
+ sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+#endif
+}
+
+#ifdef CONFIG_XPS
+static inline int sk_rx_queue_get(const struct sock *sk)
+{
+ if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
+ return sk->sk_rx_queue_mapping;
+
+ return -1;
+}
+#endif
+
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
sk_tx_queue_clear(sk);
@@ -1725,7 +1788,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
{
WARN_ON(parent->sk);
write_lock_bh(&sk->sk_callback_lock);
- sk->sk_wq = parent->wq;
+ rcu_assign_pointer(sk->sk_wq, parent->wq);
parent->sk = sk;
sk_set_socket(sk, parent);
sk->sk_uid = SOCK_INODE(parent)->i_uid;
@@ -1994,16 +2057,16 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
/**
* sock_poll_wait - place memory barrier behind the poll_wait call.
* @filp: file
- * @wait_address: socket wait queue
* @p: poll_table
*
* See the comments in the wq_has_sleeper function.
*/
-static inline void sock_poll_wait(struct file *filp,
- wait_queue_head_t *wait_address, poll_table *p)
+static inline void sock_poll_wait(struct file *filp, poll_table *p)
{
- if (!poll_does_not_wait(p) && wait_address) {
- poll_wait(filp, wait_address, p);
+ struct socket *sock = filp->private_data;
+
+ if (!poll_does_not_wait(p)) {
+ poll_wait(filp, &sock->wq->wait, p);
/* We need to be sure we are in sync with the
* socket flags modification.
*
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 0054b3a9b923..8a5f70c7cdf2 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -5,25 +5,36 @@
#include <linux/filter.h>
#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#include <net/sock.h>
+extern spinlock_t reuseport_lock;
+
struct sock_reuseport {
struct rcu_head rcu;
u16 max_socks; /* length of socks */
u16 num_socks; /* elements in socks */
+ /* The last synq overflow event timestamp of this
+ * reuse->socks[] group.
+ */
+ unsigned int synq_overflow_ts;
+ /* ID stays the same even after the size of socks[] grows. */
+ unsigned int reuseport_id;
+ bool bind_inany;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
struct sock *socks[0]; /* array of sock pointers */
};
-extern int reuseport_alloc(struct sock *sk);
-extern int reuseport_add_sock(struct sock *sk, struct sock *sk2);
+extern int reuseport_alloc(struct sock *sk, bool bind_inany);
+extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
+ bool bind_inany);
extern void reuseport_detach_sock(struct sock *sk);
extern struct sock *reuseport_select_sock(struct sock *sk,
u32 hash,
struct sk_buff *skb,
int hdr_len);
-extern struct bpf_prog *reuseport_attach_prog(struct sock *sk,
- struct bpf_prog *prog);
+extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
+int reuseport_get_id(struct sock_reuseport *reuse);
#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 227a6f1d02f4..fac3ad4a86de 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -17,6 +17,7 @@ struct tcf_pedit {
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
};
+
#define to_pedit(a) ((struct tcf_pedit *)a)
static inline bool is_tcf_pedit(const struct tc_action *a)
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 19cd3d345804..911bbac838a2 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -22,14 +22,19 @@
#include <net/act_api.h>
#include <linux/tc_act/tc_skbedit.h>
+struct tcf_skbedit_params {
+ u32 flags;
+ u32 priority;
+ u32 mark;
+ u32 mask;
+ u16 queue_mapping;
+ u16 ptype;
+ struct rcu_head rcu;
+};
+
struct tcf_skbedit {
- struct tc_action common;
- u32 flags;
- u32 priority;
- u32 mark;
- u32 mask;
- u16 queue_mapping;
- u16 ptype;
+ struct tc_action common;
+ struct tcf_skbedit_params __rcu *params;
};
#define to_skbedit(a) ((struct tcf_skbedit *)a)
@@ -37,15 +42,27 @@ struct tcf_skbedit {
static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->type == TCA_ACT_SKBEDIT)
- return to_skbedit(a)->flags == SKBEDIT_F_MARK;
+ u32 flags;
+
+ if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) {
+ rcu_read_lock();
+ flags = rcu_dereference(to_skbedit(a)->params)->flags;
+ rcu_read_unlock();
+ return flags == SKBEDIT_F_MARK;
+ }
#endif
return false;
}
static inline u32 tcf_skbedit_mark(const struct tc_action *a)
{
- return to_skbedit(a)->mark;
+ u32 mark;
+
+ rcu_read_lock();
+ mark = rcu_dereference(to_skbedit(a)->params)->mark;
+ rcu_read_unlock();
+
+ return mark;
}
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cd3ecda9386a..d196901c9dba 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -36,6 +36,7 @@
#include <net/inet_hashtables.h>
#include <net/checksum.h>
#include <net/request_sock.h>
+#include <net/sock_reuseport.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
@@ -473,19 +474,45 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
*/
static inline void tcp_synq_overflow(const struct sock *sk)
{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
- unsigned long now = jiffies;
+ unsigned int last_overflow;
+ unsigned int now = jiffies;
- if (time_after(now, last_overflow + HZ))
+ if (sk->sk_reuseport) {
+ struct sock_reuseport *reuse;
+
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (likely(reuse)) {
+ last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+ if (time_after32(now, last_overflow + HZ))
+ WRITE_ONCE(reuse->synq_overflow_ts, now);
+ return;
+ }
+ }
+
+ last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ if (time_after32(now, last_overflow + HZ))
tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
}
/* syncookies: no recent synqueue overflow on this listening socket? */
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ unsigned int last_overflow;
+ unsigned int now = jiffies;
+
+ if (sk->sk_reuseport) {
+ struct sock_reuseport *reuse;
+
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (likely(reuse)) {
+ last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+ return time_after32(now, last_overflow +
+ TCP_SYNCOOKIE_VALID);
+ }
+ }
- return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+ last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
}
static inline u32 tcp_cookie_time(void)
@@ -963,6 +990,8 @@ struct rate_sample {
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
long interval_us; /* time for tp->delivered to incr "delivered" */
+ u32 snd_interval_us; /* snd interval for delivered packets */
+ u32 rcv_interval_us; /* rcv interval for delivered packets */
long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
int losses; /* number of packets marked lost upon ACK */
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
@@ -1194,6 +1223,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
return tp->is_cwnd_limited;
}
+/* BBR congestion control needs pacing.
+ * Same remark for SO_MAX_PACING_RATE.
+ * sch_fq packet scheduler is efficiently handling pacing,
+ * but is not always installed/used.
+ * Return true if TCP stack should pace packets itself.
+ */
+static inline bool tcp_needs_internal_pacing(const struct sock *sk)
+{
+ return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
+}
+
/* Something is really bad, we could not queue an additional packet,
* because qdisc is full or receiver sent a 0 window.
* We do not want to add fuel to the fire, or abort too early,
@@ -1371,7 +1411,8 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
{
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
return true;
- if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
+ if (unlikely(!time_before32(ktime_get_seconds(),
+ rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
return true;
/*
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
@@ -1401,7 +1442,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
However, we can relax time bounds for RST segments to MSL.
*/
- if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
+ if (rst && !time_before32(ktime_get_seconds(),
+ rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
return false;
return true;
}
@@ -1787,7 +1829,7 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
-struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
int tcp_gro_complete(struct sk_buff *skb);
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
diff --git a/include/net/tls.h b/include/net/tls.h
index 70c273777fe9..d5c683e8bb22 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -83,6 +83,16 @@ struct tls_device {
void (*unhash)(struct tls_device *device, struct sock *sk);
};
+enum {
+ TLS_BASE,
+ TLS_SW,
+#ifdef CONFIG_TLS_DEVICE
+ TLS_HW,
+#endif
+ TLS_HW_RECORD,
+ TLS_NUM_CONFIG,
+};
+
struct tls_sw_context_tx {
struct crypto_aead *aead_send;
struct crypto_wait async_wait;
@@ -114,10 +124,6 @@ struct tls_sw_context_rx {
struct sk_buff *recv_pkt;
u8 control;
bool decrypted;
-
- char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
- char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
-
};
struct tls_record_info {
@@ -128,7 +134,7 @@ struct tls_record_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-struct tls_offload_context {
+struct tls_offload_context_tx {
struct crypto_aead *aead_send;
spinlock_t lock; /* protects records list */
struct list_head records_list;
@@ -147,8 +153,8 @@ struct tls_offload_context {
#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
};
-#define TLS_OFFLOAD_CONTEXT_SIZE \
- (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \
+#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
+ (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
enum {
@@ -197,6 +203,7 @@ struct tls_context {
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
+ void (*sk_destruct)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout);
int (*setsockopt)(struct sock *sk, int level,
@@ -209,13 +216,27 @@ struct tls_context {
void (*unhash)(struct sock *sk);
};
+struct tls_offload_context_rx {
+ /* sw must be the first member of tls_offload_context_rx */
+ struct tls_sw_context_rx sw;
+ atomic64_t resync_req;
+ u8 driver_state[];
+ /* The TLS layer reserves room for driver specific state
+ * Currently the belief is that there is not enough
+ * driver specific state to justify another layer of indirection
+ */
+};
+
+#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
+ (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
+ TLS_DRIVER_STATE_SIZE)
+
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
-
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page,
@@ -223,6 +244,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
void tls_sw_close(struct sock *sk, long timeout);
void tls_sw_free_resources_tx(struct sock *sk);
void tls_sw_free_resources_rx(struct sock *sk);
+void tls_sw_release_resources_rx(struct sock *sk);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
unsigned int tls_sw_poll(struct file *file, struct socket *sock,
@@ -239,7 +261,7 @@ void tls_device_sk_destruct(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
-struct tls_record_info *tls_get_record(struct tls_offload_context *context,
+struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
@@ -289,11 +311,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
+struct sk_buff *
+tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
+
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{
- return sk_fullsock(sk) &&
- /* matches smp_store_release in tls_set_device_offload */
- smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct;
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ return sk_fullsock(sk) &
+ (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
+ &tls_validate_xmit_skb);
+#else
+ return false;
+#endif
}
static inline void tls_err_abort(struct sock *sk, int err)
@@ -380,23 +410,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
}
-static inline struct tls_offload_context *tls_offload_ctx(
- const struct tls_context *tls_ctx)
+static inline struct tls_offload_context_tx *
+tls_offload_ctx_tx(const struct tls_context *tls_ctx)
+{
+ return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
+}
+
+static inline struct tls_offload_context_rx *
+tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
- return (struct tls_offload_context *)tls_ctx->priv_ctx_tx;
+ return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
+/* The TLS context is valid until sk_destruct is called */
+static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+ atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
+}
+
+
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
void tls_register_device(struct tls_device *device);
void tls_unregister_device(struct tls_device *device);
+int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
+int decrypt_skb(struct sock *sk, struct sk_buff *skb,
+ struct scatterlist *sgout);
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
struct sk_buff *skb);
int tls_sw_fallback_init(struct sock *sk,
- struct tls_offload_context *offload_ctx,
+ struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
+int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
+
+void tls_device_offload_cleanup_rx(struct sock *sk);
+void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
+
#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index f6a3543e5247..a8f6020f1196 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -42,8 +42,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb);
int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
- struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
- struct sockcm_cookie *sockc);
+ struct flowi6 *fl6, struct ipcm6_cookie *ipc6);
void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
__u16 srcp, __u16 destp, int rqueue, int bucket);
diff --git a/include/net/udp.h b/include/net/udp.h
index 81afdacd4fff..8482a990b0bb 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -170,8 +170,8 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
__be16 dport);
-struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
- struct udphdr *uh, udp_lookup_t lookup);
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct udphdr *uh, udp_lookup_t lookup);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b95a6927c718..fe680ab6b15a 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -65,9 +65,9 @@ static inline int udp_sock_create(struct net *net,
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
-typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb);
+typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb);
typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
int nhoff);
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 2deea7166a34..76b95256c266 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -84,6 +84,13 @@ struct xdp_frame {
struct net_device *dev_rx; /* used by cpumap */
};
+/* Clear kernel pointers in xdp_frame */
+static inline void xdp_scrub_frame(struct xdp_frame *frame)
+{
+ frame->data = NULL;
+ frame->dev_rx = NULL;
+}
+
/* Convert xdp_buff to xdp_frame */
static inline
struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
@@ -144,4 +151,17 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
return unlikely(xdp->data_meta > xdp->data);
}
+struct xdp_attachment_info {
+ struct bpf_prog *prog;
+ u32 flags;
+};
+
+struct netdev_bpf;
+int xdp_attachment_query(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+void xdp_attachment_setup(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 557122846e0e..0eb390c205af 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -23,6 +23,7 @@
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/flow.h>
+#include <net/gro_cells.h>
#include <linux/interrupt.h>
@@ -147,6 +148,7 @@ struct xfrm_state {
struct xfrm_id id;
struct xfrm_selector sel;
struct xfrm_mark mark;
+ u32 if_id;
u32 tfcpad;
u32 genid;
@@ -166,7 +168,7 @@ struct xfrm_state {
int header_len;
int trailer_len;
u32 extra_flags;
- u32 output_mark;
+ struct xfrm_mark smark;
} props;
struct xfrm_lifetime_cfg lft;
@@ -225,7 +227,7 @@ struct xfrm_state {
long saved_tmo;
/* Last used time */
- unsigned long lastused;
+ time64_t lastused;
struct page_frag xfrag;
@@ -292,6 +294,13 @@ struct xfrm_replay {
int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
};
+struct xfrm_if_cb {
+ struct xfrm_if *(*decode_session)(struct sk_buff *skb);
+};
+
+void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
+void xfrm_if_unregister_cb(void);
+
struct net_device;
struct xfrm_type;
struct xfrm_dst;
@@ -323,7 +332,6 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
void km_policy_notify(struct xfrm_policy *xp, int dir,
const struct km_event *c);
-void xfrm_policy_cache_flush(void);
void km_state_notify(struct xfrm_state *x, const struct km_event *c);
struct xfrm_tmpl;
@@ -574,6 +582,7 @@ struct xfrm_policy {
atomic_t genid;
u32 priority;
u32 index;
+ u32 if_id;
struct xfrm_mark mark;
struct xfrm_selector selector;
struct xfrm_lifetime_cfg lft;
@@ -735,7 +744,7 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
{
struct audit_buffer *audit_buf = NULL;
- if (audit_enabled == 0)
+ if (audit_enabled == AUDIT_OFF)
return NULL;
audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
AUDIT_MAC_IPSEC_EVENT);
@@ -1037,6 +1046,22 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+struct xfrm_if_parms {
+ char name[IFNAMSIZ]; /* name of XFRM device */
+ int link; /* ifindex of underlying L2 interface */
+ u32 if_id; /* interface identifyer */
+};
+
+struct xfrm_if {
+ struct xfrm_if __rcu *next; /* next interface in list */
+ struct net_device *dev; /* virtual device associated with interface */
+ struct net_device *phydev; /* physical device */
+ struct net *net; /* netns for packet i/o */
+ struct xfrm_if_parms p; /* interface parms */
+
+ struct gro_cells gro_cells;
+};
+
struct xfrm_offload {
/* Output sequence number for replay protection on offloading. */
struct {
@@ -1532,8 +1557,8 @@ struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
const struct flowi *fl,
struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
- unsigned short family);
-struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
+ unsigned short family, u32 if_id);
+struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
xfrm_address_t *daddr,
xfrm_address_t *saddr,
unsigned short family,
@@ -1690,20 +1715,20 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
void *);
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
- u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
+ int dir, u32 id, int delete, int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
int verify_spi_info(u8 proto, u32 min, u32 max);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
- u8 mode, u32 reqid, u8 proto,
+ u8 mode, u32 reqid, u32 if_id, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
unsigned short family);
@@ -2012,6 +2037,22 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
return ret;
}
+static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
+{
+ struct xfrm_mark *m = &x->props.smark;
+
+ return (m->v & m->m) | (mark & ~m->m);
+}
+
+static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
+{
+ int ret = 0;
+
+ if (if_id)
+ ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
+ return ret;
+}
+
static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
unsigned int family)
{
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 225ab7783dfd..3de3b10da19a 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -161,7 +161,7 @@ struct sata_device {
u8 port_no; /* port number, if this is a PM (Port) */
struct ata_port *ap;
- struct ata_host ata_host;
+ struct ata_host *ata_host;
struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
u8 fis[ATA_RESP_FIS_SIZE];
};
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index aaf1e971c6a3..c891ada3c5c2 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -4,6 +4,7 @@
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
+#include <linux/t10-pi.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/timer.h>
@@ -14,8 +15,6 @@
struct Scsi_Host;
struct scsi_driver;
-#include <scsi/scsi_device.h>
-
/*
* MAX_COMMAND_SIZE is:
* The longest fixed-length SCSI CDB as per the SCSI standard.
@@ -120,11 +119,11 @@ struct scsi_cmnd {
struct request *request; /* The command we are
working on */
-#define SCSI_SENSE_BUFFERSIZE 96
unsigned char *sense_buffer;
/* obtained by REQUEST SENSE when
* CHECK CONDITION is received on original
- * command (auto-sense) */
+ * command (auto-sense). Length must be
+ * SCSI_SENSE_BUFFERSIZE bytes. */
/* Low-level done function - can be used by low-level driver to point
* to completion function. Not used by mid/upper level code. */
@@ -313,12 +312,6 @@ static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
return scmd->device->sector_size;
}
-static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
-{
- return blk_rq_pos(scmd->request) >>
- (ilog2(scsi_prot_interval(scmd)) - 9) & 0xffffffff;
-}
-
static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
{
return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 4c36af6edd79..202f4d6a4342 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -17,6 +17,8 @@ struct scsi_sense_hdr;
typedef __u64 __bitwise blist_flags_t;
+#define SCSI_SENSE_BUFFERSIZE 96
+
struct scsi_mode_data {
__u32 length;
__u16 block_descriptor_length;
@@ -426,11 +428,21 @@ extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
-extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
int timeout, int retries, u64 flags,
req_flags_t rq_flags, int *resid);
+/* Make sure any sense buffer is the correct size. */
+#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, \
+ sshdr, timeout, retries, flags, rq_flags, resid) \
+({ \
+ BUILD_BUG_ON((sense) != NULL && \
+ sizeof(sense) != SCSI_SENSE_BUFFERSIZE); \
+ __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, \
+ sense, sshdr, timeout, retries, flags, rq_flags, \
+ resid); \
+})
static inline int scsi_execute_req(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 53b485fe9b67..5ea06d310a25 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -758,6 +758,7 @@ extern void scsi_scan_host(struct Scsi_Host *);
extern void scsi_rescan_device(struct device *);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
extern struct Scsi_Host *scsi_host_lookup(unsigned short);
extern const char *scsi_host_state_name(enum scsi_host_state);
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
new file mode 100644
index 000000000000..619e07c75da9
--- /dev/null
+++ b/include/soc/qcom/rpmh.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_QCOM_RPMH_H__
+#define __SOC_QCOM_RPMH_H__
+
+#include <soc/qcom/tcs.h>
+#include <linux/platform_device.h>
+
+
+#if IS_ENABLED(CONFIG_QCOM_RPMH)
+int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n);
+
+int rpmh_write_async(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n);
+
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n);
+
+int rpmh_flush(const struct device *dev);
+
+int rpmh_invalidate(const struct device *dev);
+
+#else
+
+static inline int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{ return -ENODEV; }
+
+static inline int rpmh_write_async(const struct device *dev,
+ enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{ return -ENODEV; }
+
+static inline int rpmh_write_batch(const struct device *dev,
+ enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n)
+{ return -ENODEV; }
+
+static inline int rpmh_flush(const struct device *dev)
+{ return -ENODEV; }
+
+static inline int rpmh_invalidate(const struct device *dev)
+{ return -ENODEV; }
+
+#endif /* CONFIG_QCOM_RPMH */
+
+#endif /* __SOC_QCOM_RPMH_H__ */
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
new file mode 100644
index 000000000000..262876a59e86
--- /dev/null
+++ b/include/soc/qcom/tcs.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_QCOM_TCS_H__
+#define __SOC_QCOM_TCS_H__
+
+#define MAX_RPMH_PAYLOAD 16
+
+/**
+ * rpmh_state: state for the request
+ *
+ * RPMH_SLEEP_STATE: State of the resource when the processor subsystem
+ * is powered down. There is no client using the
+ * resource actively.
+ * RPMH_WAKE_ONLY_STATE: Resume resource state to the value previously
+ * requested before the processor was powered down.
+ * RPMH_ACTIVE_ONLY_STATE: Active or AMC mode requests. Resource state
+ * is aggregated immediately.
+ */
+enum rpmh_state {
+ RPMH_SLEEP_STATE,
+ RPMH_WAKE_ONLY_STATE,
+ RPMH_ACTIVE_ONLY_STATE,
+};
+
+/**
+ * struct tcs_cmd: an individual request to RPMH.
+ *
+ * @addr: the address of the resource slv_id:18:16 | offset:0:15
+ * @data: the resource state request
+ * @wait: wait for this request to be complete before sending the next
+ */
+struct tcs_cmd {
+ u32 addr;
+ u32 data;
+ u32 wait;
+};
+
+/**
+ * struct tcs_request: A set of tcs_cmds sent together in a TCS
+ *
+ * @state: state for the request.
+ * @wait_for_compl: wait until we get a response from the h/w accelerator
+ * @num_cmds: the number of @cmds in this request
+ * @cmds: an array of tcs_cmds
+ */
+struct tcs_request {
+ enum rpmh_state state;
+ u32 wait_for_compl;
+ u32 num_cmds;
+ struct tcs_cmd *cmds;
+};
+
+#endif /* __SOC_QCOM_TCS_H__ */
diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h
index ec04be9ab119..9792d25fa369 100644
--- a/include/sound/ac97/codec.h
+++ b/include/sound/ac97/codec.h
@@ -1,10 +1,8 @@
-/*
- * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+/* SPDX-License-Identifier: GPL-2.0
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
*/
+
#ifndef __SOUND_AC97_CODEC2_H
#define __SOUND_AC97_CODEC2_H
diff --git a/include/sound/ac97/compat.h b/include/sound/ac97/compat.h
index 1351cba40048..57e19afa31ab 100644
--- a/include/sound/ac97/compat.h
+++ b/include/sound/ac97/compat.h
@@ -1,14 +1,11 @@
-/*
- * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+/* SPDX-License-Identifier: GPL-2.0
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
*
* This file is for backward compatibility with snd_ac97 structure and its
* multiple usages, such as the snd_ac97_bus and snd_ac97_build_ops.
- *
*/
+
#ifndef AC97_COMPAT_H
#define AC97_COMPAT_H
diff --git a/include/sound/ac97/controller.h b/include/sound/ac97/controller.h
index b36ecdd64f14..06b5afb7fa6b 100644
--- a/include/sound/ac97/controller.h
+++ b/include/sound/ac97/controller.h
@@ -1,10 +1,8 @@
-/*
- * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+/* SPDX-License-Identifier: GPL-2.0
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
*/
+
#ifndef AC97_CONTROLLER_H
#define AC97_CONTROLLER_H
diff --git a/include/sound/ac97/regs.h b/include/sound/ac97/regs.h
index 9a4fa0c3264a..843f73f3705a 100644
--- a/include/sound/ac97/regs.h
+++ b/include/sound/ac97/regs.h
@@ -1,27 +1,11 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Universal interface for Audio Codec '97
*
* For more details look to AC '97 component specification revision 2.1
* by Intel Corporation (http://developer.intel.com).
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
-
/*
* AC'97 codec registers
*/
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 89d311a503d3..cc383991c0fe 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -1,30 +1,15 @@
-#ifndef __SOUND_AC97_CODEC_H
-#define __SOUND_AC97_CODEC_H
-
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Universal interface for Audio Codec '97
*
* For more details look to AC '97 component specification revision 2.1
* by Intel Corporation (http://developer.intel.com).
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
+#ifndef __SOUND_AC97_CODEC_H
+#define __SOUND_AC97_CODEC_H
+
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/workqueue.h>
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 9924bc9cbc7c..ea8c93bbb0e0 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -1,27 +1,12 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* compress_driver.h - compress offload driver definations
*
* Copyright (C) 2011 Intel Corporation
* Authors: Vinod Koul <vinod.koul@linux.intel.com>
* Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
*/
+
#ifndef __COMPRESS_DRIVER_H
#define __COMPRESS_DRIVER_H
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index e3481eebdd98..2c4cfaa135a6 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -1,17 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (C) 2012, Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
+
#ifndef __SOUND_DMAENGINE_PCM_H__
#define __SOUND_DMAENGINE_PCM_H__
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
new file mode 100644
index 000000000000..78626cde7081
--- /dev/null
+++ b/include/sound/hda_component.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+// HD-Audio helpers to sync with DRM driver
+
+#ifndef __SOUND_HDA_COMPONENT_H
+#define __SOUND_HDA_COMPONENT_H
+
+#include <drm/drm_audio_component.h>
+
+#ifdef CONFIG_SND_HDA_COMPONENT
+int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
+int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
+ bool *audio_enabled, char *buffer, int max_bytes);
+int snd_hdac_acomp_init(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *aops,
+ int (*match_master)(struct device *, void *),
+ size_t extra_size);
+int snd_hdac_acomp_exit(struct hdac_bus *bus);
+int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *ops);
+#else
+static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+{
+ return 0;
+}
+static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+{
+ return 0;
+}
+static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
+ hda_nid_t nid, int dev_id, int rate)
+{
+ return 0;
+}
+static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, bool *audio_enabled,
+ char *buffer, int max_bytes)
+{
+ return -ENODEV;
+}
+static inline int snd_hdac_acomp_init(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *aops,
+ int (*match_master)(struct device *, void *),
+ size_t extra_size)
+{
+ return -ENODEV;
+}
+static inline int snd_hdac_acomp_exit(struct hdac_bus *bus)
+{
+ return 0;
+}
+static inline int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *ops)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __SOUND_HDA_COMPONENT_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index a94f5b6f92ac..6b79614a893b 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -5,54 +5,23 @@
#ifndef __SOUND_HDA_I915_H
#define __SOUND_HDA_I915_H
-#include <drm/i915_component.h>
+#include "hda_component.h"
#ifdef CONFIG_SND_HDA_I915
-int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
-int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
- int dev_id, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
- bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_i915_init(struct hdac_bus *bus);
-int snd_hdac_i915_exit(struct hdac_bus *bus);
-int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
#else
-static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
-{
- return 0;
-}
-static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
-{
- return 0;
-}
static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
}
-static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
- hda_nid_t nid, int dev_id, int rate)
-{
- return 0;
-}
-static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
- int dev_id, bool *audio_enabled,
- char *buffer, int max_bytes)
-{
- return -ENODEV;
-}
static inline int snd_hdac_i915_init(struct hdac_bus *bus)
{
return -ENODEV;
}
+#endif
static inline int snd_hdac_i915_exit(struct hdac_bus *bus)
{
- return 0;
+ return snd_hdac_acomp_exit(bus);
}
-static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
-{
- return -ENODEV;
-}
-#endif
#endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index c052afc27547..6f1e1f3b3063 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -8,8 +8,10 @@
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
+#include <sound/pcm.h>
#include <sound/memalloc.h>
#include <sound/hda_verbs.h>
#include <drm/i915_component.h>
@@ -132,7 +134,7 @@ int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
hda_nid_t *start_id);
unsigned int snd_hdac_calc_stream_format(unsigned int rate,
unsigned int channels,
- unsigned int format,
+ snd_pcm_format_t format,
unsigned int maxbps,
unsigned short spdif_ctls);
int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid,
@@ -171,12 +173,38 @@ int snd_hdac_power_down(struct hdac_device *codec);
int snd_hdac_power_up_pm(struct hdac_device *codec);
int snd_hdac_power_down_pm(struct hdac_device *codec);
int snd_hdac_keep_power_up(struct hdac_device *codec);
+
+/* call this at entering into suspend/resume callbacks in codec driver */
+static inline void snd_hdac_enter_pm(struct hdac_device *codec)
+{
+ atomic_inc(&codec->in_pm);
+}
+
+/* call this at leaving from suspend/resume callbacks in codec driver */
+static inline void snd_hdac_leave_pm(struct hdac_device *codec)
+{
+ atomic_dec(&codec->in_pm);
+}
+
+static inline bool snd_hdac_is_in_pm(struct hdac_device *codec)
+{
+ return atomic_read(&codec->in_pm);
+}
+
+static inline bool snd_hdac_is_power_on(struct hdac_device *codec)
+{
+ return !pm_runtime_suspended(&codec->dev);
+}
#else
static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
+static inline void snd_hdac_enter_pm(struct hdac_device *codec) {}
+static inline void snd_hdac_leave_pm(struct hdac_device *codec) {}
+static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return 0; }
+static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return 1; }
#endif
/*
@@ -188,6 +216,11 @@ struct hdac_driver {
const struct hda_device_id *id_table;
int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
void (*unsol_event)(struct hdac_device *dev, unsigned int event);
+
+ /* fields used by ext bus APIs */
+ int (*probe)(struct hdac_device *dev);
+ int (*remove)(struct hdac_device *dev);
+ void (*shutdown)(struct hdac_device *dev);
};
#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
@@ -209,6 +242,14 @@ struct hdac_bus_ops {
};
/*
+ * ops used for ASoC HDA codec drivers
+ */
+struct hdac_ext_bus_ops {
+ int (*hdev_attach)(struct hdac_device *hdev);
+ int (*hdev_detach)(struct hdac_device *hdev);
+};
+
+/*
* Lowlevel I/O operators
*/
struct hdac_io_ops {
@@ -250,11 +291,17 @@ struct hdac_rb {
* @mlcap: MultiLink capabilities pointer
* @gtscap: gts capabilities pointer
* @drsmcap: dma resume capabilities pointer
+ * @num_streams: streams supported
+ * @idx: HDA link index
+ * @hlink_list: link list of HDA links
+ * @lock: lock for link mgmt
+ * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
*/
struct hdac_bus {
struct device *dev;
const struct hdac_bus_ops *ops;
const struct hdac_io_ops *io_ops;
+ const struct hdac_ext_bus_ops *ext_ops;
/* h/w resources */
unsigned long addr;
@@ -314,9 +361,19 @@ struct hdac_bus {
spinlock_t reg_lock;
struct mutex cmd_mutex;
- /* i915 component interface */
- struct i915_audio_component *audio_component;
- int i915_power_refcount;
+ /* DRM component interface */
+ struct drm_audio_component *audio_component;
+ int drm_power_refcount;
+
+ /* parameters required for enhanced capabilities */
+ int num_streams;
+ int idx;
+
+ struct list_head hlink_list;
+
+ struct mutex lock;
+ bool cmd_dma_state;
+
};
int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 9c14e21dda85..f34aced69ca8 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -4,38 +4,16 @@
#include <sound/hdaudio.h>
-/**
- * hdac_ext_bus: HDAC extended bus for extended HDA caps
- *
- * @bus: hdac bus
- * @num_streams: streams supported
- * @hlink_list: link list of HDA links
- * @lock: lock for link mgmt
- * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
- */
-struct hdac_ext_bus {
- struct hdac_bus bus;
- int num_streams;
- int idx;
-
- struct list_head hlink_list;
-
- struct mutex lock;
- bool cmd_dma_state;
-};
-
-int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
+int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
const struct hdac_bus_ops *ops,
- const struct hdac_io_ops *io_ops);
+ const struct hdac_io_ops *io_ops,
+ const struct hdac_ext_bus_ops *ext_ops);
-void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
-int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
+void snd_hdac_ext_bus_exit(struct hdac_bus *bus);
+int snd_hdac_ext_bus_device_init(struct hdac_bus *bus, int addr,
+ struct hdac_device *hdev);
void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
-void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
-
-#define ebus_to_hbus(ebus) (&(ebus)->bus)
-#define hbus_to_ebus(_bus) \
- container_of(_bus, struct hdac_ext_bus, bus)
+void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus);
#define HDA_CODEC_REV_EXT_ENTRY(_vid, _rev, _name, drv_data) \
{ .vendor_id = (_vid), .rev_id = (_rev), .name = (_name), \
@@ -44,14 +22,14 @@ void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
#define HDA_CODEC_EXT_ENTRY(_vid, _revid, _name, _drv_data) \
HDA_CODEC_REV_EXT_ENTRY(_vid, _revid, _name, _drv_data)
-void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *chip, bool enable);
-void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *chip, bool enable);
+void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *chip, bool enable);
+void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *chip, bool enable);
-void snd_hdac_ext_stream_spbcap_enable(struct hdac_ext_bus *chip,
+void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *chip,
bool enable, int index);
-int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *bus);
-struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_ext_bus *bus,
+int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus);
+struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
const char *codec_name);
enum hdac_ext_stream_type {
@@ -100,28 +78,28 @@ struct hdac_ext_stream {
#define stream_to_hdac_ext_stream(s) \
container_of(s, struct hdac_ext_stream, hstream)
-void snd_hdac_ext_stream_init(struct hdac_ext_bus *bus,
+void snd_hdac_ext_stream_init(struct hdac_bus *bus,
struct hdac_ext_stream *stream, int idx,
int direction, int tag);
-int snd_hdac_ext_stream_init_all(struct hdac_ext_bus *ebus, int start_idx,
+int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
int num_stream, int dir);
-void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus);
-void snd_hdac_link_free_all(struct hdac_ext_bus *ebus);
-struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_ext_bus *bus,
+void snd_hdac_stream_free_all(struct hdac_bus *bus);
+void snd_hdac_link_free_all(struct hdac_bus *bus);
+struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
int type);
void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
-void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *bus,
+void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
-void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
+void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
-int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
struct hdac_ext_stream *stream, u32 value);
-int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
struct hdac_ext_stream *stream);
-void snd_hdac_ext_stream_drsm_enable(struct hdac_ext_bus *ebus,
+void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index);
-int snd_hdac_ext_stream_set_dpibr(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_set_dpibr(struct hdac_bus *bus,
struct hdac_ext_stream *stream, u32 value);
int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value);
@@ -144,17 +122,15 @@ struct hdac_ext_link {
int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
-int snd_hdac_ext_bus_link_power_up_all(struct hdac_ext_bus *ebus);
-int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus);
+int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus);
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus);
void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
int stream);
void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
int stream);
-int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus,
- struct hdac_ext_link *link);
-int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
- struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *link);
/* update register macro */
#define snd_hdac_updatel(addr, reg, mask, val) \
@@ -181,53 +157,12 @@ struct hda_dai_map {
u32 maxbps;
};
-#define HDA_MAX_NIDS 16
-
-/**
- * struct hdac_ext_device - HDAC Ext device
- *
- * @hdac: hdac core device
- * @nid_list - the dai map which matches the dai-name with the nid
- * @map_cur_idx - the idx in use in dai_map
- * @ops - the hda codec ops common to all codec drivers
- * @pvt_data - private data, for asoc contains asoc codec object
- */
-struct hdac_ext_device {
- struct hdac_device hdev;
- struct hdac_ext_bus *ebus;
-
- /* soc-dai to nid map */
- struct hda_dai_map nid_list[HDA_MAX_NIDS];
- unsigned int map_cur_idx;
-
- /* codec ops */
- struct hdac_ext_codec_ops ops;
-
- struct snd_card *card;
- void *scodec;
- void *private_data;
-};
-
struct hdac_ext_dma_params {
u32 format;
u8 stream_tag;
};
-#define to_ehdac_device(dev) (container_of((dev), \
- struct hdac_ext_device, hdev))
-/*
- * HD-audio codec base driver
- */
-struct hdac_ext_driver {
- struct hdac_driver hdac;
-
- int (*probe)(struct hdac_ext_device *dev);
- int (*remove)(struct hdac_ext_device *dev);
- void (*shutdown)(struct hdac_ext_device *dev);
-};
-
-int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
-void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
-#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac)
+int snd_hda_ext_driver_register(struct hdac_driver *drv);
+void snd_hda_ext_driver_unregister(struct hdac_driver *drv);
#endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 9c3db3dce32b..67561b997915 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -24,6 +24,8 @@
#ifndef __SOUND_MEMALLOC_H
#define __SOUND_MEMALLOC_H
+#include <asm/page.h>
+
struct device;
/*
@@ -67,6 +69,14 @@ struct snd_dma_buffer {
void *private_data; /* private for allocator; don't touch */
};
+/*
+ * return the pages matching with the given byte size
+ */
+static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
+{
+ return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
#ifdef CONFIG_SND_DMA_SGBUF
/*
* Scatter-Gather generic device pages
@@ -91,14 +101,6 @@ struct snd_sg_buf {
};
/*
- * return the pages matching with the given byte size
- */
-static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
-{
- return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-}
-
-/*
* return the physical address at the corresponding offset
*/
static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index e054c583d3b3..d6bd3caf6878 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -462,6 +462,7 @@ struct snd_pcm_substream {
/* -- timer section -- */
struct snd_timer *timer; /* timer */
unsigned timer_running: 1; /* time is running */
+ long wait_time; /* time in ms for R/W to wait for avail */
/* -- next substream -- */
struct snd_pcm_substream *next;
/* -- linked substreams -- */
@@ -1089,14 +1090,14 @@ static inline snd_pcm_sframes_t
snd_pcm_lib_write(struct snd_pcm_substream *substream,
const void __user *buf, snd_pcm_uframes_t frames)
{
- return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false);
+ return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false);
}
static inline snd_pcm_sframes_t
snd_pcm_lib_read(struct snd_pcm_substream *substream,
void __user *buf, snd_pcm_uframes_t frames)
{
- return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false);
+ return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false);
}
static inline snd_pcm_sframes_t
@@ -1341,8 +1342,6 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_iomem NULL
#endif
-#define snd_pcm_lib_mmap_vmalloc NULL
-
/**
* snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
* @dma: DMA number
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index c704357775fc..2dd37cada7c0 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -87,6 +87,13 @@ static inline void snd_mask_set(struct snd_mask *mask, unsigned int val)
mask->bits[MASK_OFS(val)] |= MASK_BIT(val);
}
+/* Most of drivers need only this one */
+static inline void snd_mask_set_format(struct snd_mask *mask,
+ snd_pcm_format_t format)
+{
+ snd_mask_set(mask, (__force unsigned int)format);
+}
+
static inline void snd_mask_reset(struct snd_mask *mask, unsigned int val)
{
mask->bits[MASK_OFS(val)] &= ~MASK_BIT(val);
@@ -369,8 +376,7 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p)
static inline void
params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
{
- snd_mask_set(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT),
- (__force int)fmt);
+ snd_mask_set_format(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT), fmt);
}
#endif /* __SOUND_PCM_PARAMS_H */
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 63f75450d3db..6758fc12fa84 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -8,20 +8,23 @@
/* PCM */
struct snd_pcm_substream;
struct snd_pcm_hw_params;
+struct snd_soc_pcm_runtime;
struct snd_pcm;
-extern int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
+extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-extern int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream);
-extern int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream);
-extern int __pxa2xx_pcm_open(struct snd_pcm_substream *substream);
-extern int __pxa2xx_pcm_close(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_open(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_close(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream);
extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
+extern int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd);
+extern const struct snd_pcm_ops pxa2xx_pcm_ops;
/* AC97 */
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
new file mode 100644
index 000000000000..0251797ab438
--- /dev/null
+++ b/include/sound/rt5682.h
@@ -0,0 +1,40 @@
+/*
+ * linux/sound/rt5682.h -- Platform data for RT5682
+ *
+ * Copyright 2018 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5682_H
+#define __LINUX_SND_RT5682_H
+
+enum rt5682_dmic1_data_pin {
+ RT5682_DMIC1_NULL,
+ RT5682_DMIC1_DATA_GPIO2,
+ RT5682_DMIC1_DATA_GPIO5,
+};
+
+enum rt5682_dmic1_clk_pin {
+ RT5682_DMIC1_CLK_GPIO1,
+ RT5682_DMIC1_CLK_GPIO3,
+};
+
+enum rt5682_jd_src {
+ RT5682_JD_NULL,
+ RT5682_JD1,
+};
+
+struct rt5682_platform_data {
+
+ int ldo1_en; /* GPIO for LDO1_EN */
+
+ enum rt5682_dmic1_data_pin dmic1_data_pin;
+ enum rt5682_dmic1_clk_pin dmic1_clk_pin;
+ enum rt5682_jd_src jd_src;
+};
+
+#endif
+
diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
index c7c7788005e4..7817e88bd08d 100644
--- a/include/sound/sb16_csp.h
+++ b/include/sound/sb16_csp.h
@@ -46,7 +46,7 @@ enum {
struct snd_sb_csp_ops {
int (*csp_use) (struct snd_sb_csp * p);
int (*csp_unuse) (struct snd_sb_csp * p);
- int (*csp_autoload) (struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode);
+ int (*csp_autoload) (struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode);
int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
int (*csp_stop) (struct snd_sb_csp * p);
int (*csp_qsound_transfer) (struct snd_sb_csp * p);
diff --git a/include/sound/seq_midi_event.h b/include/sound/seq_midi_event.h
index e40f43e6fc7b..2f135bccf457 100644
--- a/include/sound/seq_midi_event.h
+++ b/include/sound/seq_midi_event.h
@@ -43,10 +43,8 @@ void snd_midi_event_free(struct snd_midi_event *dev);
void snd_midi_event_reset_encode(struct snd_midi_event *dev);
void snd_midi_event_reset_decode(struct snd_midi_event *dev);
void snd_midi_event_no_status(struct snd_midi_event *dev, int on);
-/* encode from byte stream - return number of written bytes if success */
-long snd_midi_event_encode(struct snd_midi_event *dev, unsigned char *buf, long count,
- struct snd_seq_event *ev);
-int snd_midi_event_encode_byte(struct snd_midi_event *dev, int c, struct snd_seq_event *ev);
+bool snd_midi_event_encode_byte(struct snd_midi_event *dev, unsigned char c,
+ struct snd_seq_event *ev);
/* decode from event to bytes - return number of written bytes if success */
long snd_midi_event_decode(struct snd_midi_event *dev, unsigned char *buf, long count,
struct snd_seq_event *ev);
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
index 695257ae64ac..796ce7772213 100644
--- a/include/sound/seq_virmidi.h
+++ b/include/sound/seq_virmidi.h
@@ -36,11 +36,12 @@ struct snd_virmidi {
int seq_mode;
int client;
int port;
- unsigned int trigger: 1;
+ bool trigger;
struct snd_midi_event *parser;
struct snd_seq_event event;
struct snd_virmidi_dev *rdev;
struct snd_rawmidi_substream *substream;
+ struct work_struct output_work;
};
#define SNDRV_VIRMIDI_SUBSCRIBE (1<<0)
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h
index 7a9710b4b799..89eafe23ef88 100644
--- a/include/sound/sh_fsi.h
+++ b/include/sound/sh_fsi.h
@@ -1,16 +1,13 @@
-#ifndef __SOUND_FSI_H
-#define __SOUND_FSI_H
-
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* Fifo-attached Serial Interface (FSI) support for SH7724
*
* Copyright (C) 2009 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#ifndef __SOUND_FSI_H
+#define __SOUND_FSI_H
+
#include <linux/clk.h>
#include <sound/soc.h>
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index a6a2e1547092..d264e5463f22 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -1,12 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* ASoC simple sound card support
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SIMPLE_CARD_H
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 7e25afce6566..8bc5e2d8b13c 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -1,17 +1,20 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* simple_card_utils.h
*
* Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+
#ifndef __SIMPLE_CARD_UTILS_H
#define __SIMPLE_CARD_UTILS_H
#include <sound/soc.h>
+#define asoc_simple_card_init_hp(card, sjack, prefix) \
+ asoc_simple_card_init_jack(card, sjack, 1, prefix)
+#define asoc_simple_card_init_mic(card, sjack, prefix) \
+ asoc_simple_card_init_jack(card, sjack, 0, prefix)
+
struct asoc_simple_dai {
const char *name;
unsigned int sysclk;
@@ -28,6 +31,12 @@ struct asoc_simple_card_data {
u32 convert_channels;
};
+struct asoc_simple_jack {
+ struct snd_soc_jack jack;
+ struct snd_soc_jack_pin pin;
+ struct snd_soc_jack_gpio gpio;
+};
+
int asoc_simple_card_parse_daifmt(struct device *dev,
struct device_node *node,
struct device_node *codec,
@@ -107,4 +116,8 @@ int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
char *prefix);
+int asoc_simple_card_init_jack(struct snd_soc_card *card,
+ struct asoc_simple_jack *sjack,
+ int is_hp, char *prefix);
+
#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 9da6388c20a1..bb1d24b703fb 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -1,16 +1,6 @@
-
-/*
- * Copyright (C) 2017, Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0
*
+ * Copyright (C) 2017, Intel Corporation. All rights reserved.
*/
#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
@@ -29,5 +19,10 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_skl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
#endif
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 082224275f52..e45b2330d16a 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -1,15 +1,6 @@
-/*
- * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0
*
+ * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
*/
#ifndef __LINUX_SND_SOC_ACPI_H
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e6f8c40ed43c..f5d70041108f 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -1,12 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-dai.h -- ALSA SoC Layer
*
* Copyright: 2005-2008 Wolfson Microelectronics. PLC.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Digital Audio Interface (DAI) API.
*/
@@ -141,6 +138,11 @@ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate);
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
int direction);
+
+int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai,
+ unsigned int *tx_num, unsigned int *tx_slot,
+ unsigned int *rx_num, unsigned int *rx_slot);
+
int snd_soc_dai_is_dummy(struct snd_soc_dai *dai);
struct snd_soc_dai_ops {
@@ -168,6 +170,9 @@ struct snd_soc_dai_ops {
int (*set_channel_map)(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot);
+ int (*get_channel_map)(struct snd_soc_dai *dai,
+ unsigned int *tx_num, unsigned int *tx_slot,
+ unsigned int *rx_num, unsigned int *rx_slot);
int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
int (*set_sdw_stream)(struct snd_soc_dai *dai,
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index a6ce2de4e20a..af9ef16cc34d 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -1,13 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-dapm.h -- ALSA SoC Dynamic Audio Power Management
*
- * Author: Liam Girdwood
- * Created: Aug 11th 2005
+ * Author: Liam Girdwood
+ * Created: Aug 11th 2005
* Copyright: Wolfson Microelectronics. PLC.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SND_SOC_DAPM_H
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 806059052bfc..9bb92f187af8 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-dpcm.h -- ALSA SoC Dynamic PCM Support
*
* Author: Liam Girdwood <lrg@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SND_SOC_DPCM_H
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index f552c3f56368..fa4b8413d2e2 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -1,13 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-topology.h -- ALSA SoC Firmware Controls and DAPM
*
* Copyright (C) 2012 Texas Instruments Inc.
* Copyright (C) 2015 Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
* algorithms, equalisers, DAIs, widgets, FE caps, BE caps, codec link caps etc.
*/
@@ -30,6 +27,9 @@ struct snd_soc_dapm_context;
struct snd_soc_card;
struct snd_kcontrol_new;
struct snd_soc_dai_link;
+struct snd_soc_dai_driver;
+struct snd_soc_dai;
+struct snd_soc_dapm_route;
/* object scan be loaded and unloaded in groups with identfying indexes */
#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */
@@ -109,35 +109,44 @@ struct snd_soc_tplg_widget_events {
struct snd_soc_tplg_ops {
/* external kcontrol init - used for any driver specific init */
- int (*control_load)(struct snd_soc_component *,
+ int (*control_load)(struct snd_soc_component *, int index,
struct snd_kcontrol_new *, struct snd_soc_tplg_ctl_hdr *);
int (*control_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
+ /* DAPM graph route element loading and unloading */
+ int (*dapm_route_load)(struct snd_soc_component *, int index,
+ struct snd_soc_dapm_route *route);
+ int (*dapm_route_unload)(struct snd_soc_component *,
+ struct snd_soc_dobj *);
+
/* external widget init - used for any driver specific init */
- int (*widget_load)(struct snd_soc_component *,
+ int (*widget_load)(struct snd_soc_component *, int index,
struct snd_soc_dapm_widget *,
struct snd_soc_tplg_dapm_widget *);
- int (*widget_ready)(struct snd_soc_component *,
+ int (*widget_ready)(struct snd_soc_component *, int index,
struct snd_soc_dapm_widget *,
struct snd_soc_tplg_dapm_widget *);
int (*widget_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
/* FE DAI - used for any driver specific init */
- int (*dai_load)(struct snd_soc_component *,
- struct snd_soc_dai_driver *dai_drv);
+ int (*dai_load)(struct snd_soc_component *, int index,
+ struct snd_soc_dai_driver *dai_drv,
+ struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai);
+
int (*dai_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
/* DAI link - used for any driver specific init */
- int (*link_load)(struct snd_soc_component *,
- struct snd_soc_dai_link *link);
+ int (*link_load)(struct snd_soc_component *, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg);
int (*link_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
/* callback to handle vendor bespoke data */
- int (*vendor_load)(struct snd_soc_component *,
+ int (*vendor_load)(struct snd_soc_component *, int index,
struct snd_soc_tplg_hdr *);
int (*vendor_unload)(struct snd_soc_component *,
struct snd_soc_tplg_hdr *);
@@ -146,7 +155,7 @@ struct snd_soc_tplg_ops {
void (*complete)(struct snd_soc_component *);
/* manifest - optional to inform component of manifest */
- int (*manifest)(struct snd_soc_component *,
+ int (*manifest)(struct snd_soc_component *, int index,
struct snd_soc_tplg_manifest *);
/* vendor specific kcontrol handlers available for binding */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1378dcd2128a..41cec42fb456 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1,13 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc.h -- ALSA SoC Layer
*
- * Author: Liam Girdwood
- * Created: Aug 11th 2005
+ * Author: Liam Girdwood
+ * Created: Aug 11th 2005
* Copyright: Wolfson Microelectronics. PLC.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SND_SOC_H
@@ -806,6 +803,14 @@ struct snd_soc_component_driver {
unsigned int use_pmdown_time:1; /* care pmdown_time at stop */
unsigned int endianness:1;
unsigned int non_legacy_dai_naming:1;
+
+ /* this component uses topology and ignore machine driver FEs */
+ const char *ignore_machine;
+ const char *topology_name_prefix;
+ int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+ bool use_dai_pcm_id; /* use the DAI link PCM ID as PCM device number */
+ int be_pcm_base; /* base device ID for all BE PCMs */
};
struct snd_soc_component {
@@ -957,10 +962,17 @@ struct snd_soc_dai_link {
/* DPCM used FE & BE merged format */
unsigned int dpcm_merged_format:1;
+ /* DPCM used FE & BE merged channel */
+ unsigned int dpcm_merged_chan:1;
+ /* DPCM used FE & BE merged rate */
+ unsigned int dpcm_merged_rate:1;
/* pmdown_time is ignored at stop */
unsigned int ignore_pmdown_time:1;
+ /* Do not create a PCM for this DAI link (Backend link) */
+ unsigned int ignore:1;
+
struct list_head list; /* DAI link list of the soc card */
struct snd_soc_dobj dobj; /* For topology */
};
@@ -1000,6 +1012,7 @@ struct snd_soc_card {
const char *long_name;
const char *driver_name;
char dmi_longname[80];
+ char topology_shortname[32];
struct device *dev;
struct snd_card *snd_card;
@@ -1009,6 +1022,7 @@ struct snd_soc_card {
struct mutex dapm_mutex;
bool instantiated;
+ bool topology_shortname_created;
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
@@ -1412,6 +1426,9 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
const char *propname);
int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
const char *propname);
+int snd_soc_of_get_slot_mask(struct device_node *np,
+ const char *prop_name,
+ unsigned int *mask);
int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *tx_mask,
unsigned int *rx_mask,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index cf5f3fff1f1a..f2e6abea8490 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -4,6 +4,7 @@
#include <linux/dma-direction.h> /* enum dma_data_direction */
#include <linux/list.h> /* struct list_head */
+#include <linux/sched.h>
#include <linux/socket.h> /* struct sockaddr_storage */
#include <linux/types.h> /* u8 */
#include <scsi/iscsi_proto.h> /* itt_t */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 34a15d59ed88..51b6f50eabee 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -106,13 +106,15 @@ bool target_lun_is_rdonly(struct se_cmd *);
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
-struct se_device *target_find_device(int id, bool do_depend);
-
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q);
+static inline bool target_dev_configured(struct se_device *se_dev)
+{
+ return !!(se_dev->dev_flags & DF_CONFIGURED);
+}
/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
static inline uint32_t get_unaligned_be24(const uint8_t *const p)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 922a39f45abc..7a4ee7852ca4 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -4,7 +4,7 @@
#include <linux/configfs.h> /* struct config_group */
#include <linux/dma-direction.h> /* enum dma_data_direction */
-#include <linux/percpu_ida.h> /* struct percpu_ida */
+#include <linux/sbitmap.h>
#include <linux/percpu-refcount.h>
#include <linux/semaphore.h> /* struct semaphore */
#include <linux/completion.h>
@@ -443,7 +443,6 @@ struct se_cmd {
u8 scsi_asc;
u8 scsi_ascq;
u16 scsi_sense_length;
- unsigned cmd_wait_set:1;
unsigned unknown_data_length:1;
bool state_active:1;
u64 tag; /* SAM command identifier aka task tag */
@@ -455,6 +454,7 @@ struct se_cmd {
int sam_task_attr;
/* Used for se_sess->sess_tag_pool */
unsigned int map_tag;
+ int map_cpu;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
/* See se_cmd_flags_table */
@@ -475,7 +475,7 @@ struct se_cmd {
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
struct list_head se_cmd_list;
- struct completion cmd_wait_comp;
+ struct completion *compl;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
@@ -605,10 +605,10 @@ struct se_session {
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
- struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
+ wait_queue_head_t cmd_list_wq;
void *sess_cmd_map;
- struct percpu_ida sess_tag_pool;
+ struct sbitmap_queue sess_tag_pool;
};
struct se_device;
@@ -638,7 +638,6 @@ struct se_dev_entry {
atomic_long_t total_cmds;
atomic_long_t read_bytes;
atomic_long_t write_bytes;
- atomic_t ua_count;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
struct kref pr_kref;
struct completion pr_comp;
@@ -934,4 +933,9 @@ static inline void atomic_dec_mb(atomic_t *v)
smp_mb__after_atomic();
}
+static inline void target_free_tag(struct se_session *sess, struct se_cmd *cmd)
+{
+ sbitmap_queue_clear(&sess->sess_tag_pool, cmd->map_tag, cmd->map_cpu);
+}
+
#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index b297aa0d9651..f4147b398431 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -79,7 +79,7 @@ struct target_core_fabric_ops {
void (*fabric_drop_wwn)(struct se_wwn *);
void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
- struct config_group *, const char *);
+ const char *);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);
@@ -109,17 +109,17 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
int target_depend_item(struct config_item *item);
void target_undepend_item(struct config_item *item);
-struct se_session *target_alloc_session(struct se_portal_group *,
+struct se_session *target_setup_session(struct se_portal_group *,
unsigned int, unsigned int, enum target_prot_op prot_op,
const char *, void *,
int (*callback)(struct se_portal_group *,
struct se_session *, void *));
+void target_remove_session(struct se_session *);
-struct se_session *transport_init_session(enum target_prot_op);
+void transport_init_session(struct se_session *);
+struct se_session *transport_alloc_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
-struct se_session *transport_init_session_tags(unsigned int, unsigned int,
- enum target_prot_op);
void __transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 39b94ec965be..b401c4e36394 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -374,7 +374,7 @@ DECLARE_EVENT_CLASS(
__entry->extent_type = btrfs_file_extent_type(l, fi);
__entry->compression = btrfs_file_extent_compression(l, fi);
__entry->extent_start = start;
- __entry->extent_end = (start + btrfs_file_extent_inline_len(l, slot, fi));
+ __entry->extent_end = (start + btrfs_file_extent_ram_bytes(l, fi));
),
TP_printk_btrfs(
@@ -433,7 +433,6 @@ DEFINE_EVENT(
{ (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
{ (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
{ (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \
- { (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \
{ (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 2cd449328aee..9004ffff7f32 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -192,6 +192,42 @@ DEFINE_EVENT(clk_phase, clk_set_phase_complete,
TP_ARGS(core, phase)
);
+DECLARE_EVENT_CLASS(clk_duty_cycle,
+
+ TP_PROTO(struct clk_core *core, struct clk_duty *duty),
+
+ TP_ARGS(core, duty),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field( unsigned int, num )
+ __field( unsigned int, den )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->num = duty->num;
+ __entry->den = duty->den;
+ ),
+
+ TP_printk("%s %u/%u", __get_str(name), (unsigned int)__entry->num,
+ (unsigned int)__entry->den)
+);
+
+DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle,
+
+ TP_PROTO(struct clk_core *core, struct clk_duty *duty),
+
+ TP_ARGS(core, duty)
+);
+
+DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle_complete,
+
+ TP_PROTO(struct clk_core *core, struct clk_duty *duty),
+
+ TP_ARGS(core, duty)
+);
+
#endif /* _TRACE_CLK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 9763cddd0594..6271bab63bfb 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -22,6 +22,7 @@ TRACE_EVENT(fib_table_lookup,
__field( int, err )
__field( int, oif )
__field( int, iif )
+ __field( u8, proto )
__field( __u8, tos )
__field( __u8, scope )
__field( __u8, flags )
@@ -31,7 +32,6 @@ TRACE_EVENT(fib_table_lookup,
__array( __u8, saddr, 4 )
__field( u16, sport )
__field( u16, dport )
- __field( u8, proto )
__dynamic_array(char, name, IFNAMSIZ )
),
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index d1faf3597b9d..68b17c116907 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -112,8 +112,11 @@ DEFINE_EVENT(filelock_lock, locks_remove_posix,
TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
TP_ARGS(inode, fl, ret));
-DECLARE_EVENT_CLASS(filelock_lease,
+DEFINE_EVENT(filelock_lock, flock_lock_inode,
+ TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
+ TP_ARGS(inode, fl, ret));
+DECLARE_EVENT_CLASS(filelock_lease,
TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl),
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 9c886739246a..00aa72ce0e7c 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -223,6 +223,13 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
TP_ARGS(skb)
);
+DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_list_entry,
+
+ TP_PROTO(const struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
TP_PROTO(const struct sk_buff *skb),
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 908977d69783..f7aece721aed 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -5,6 +5,7 @@
#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_POWER_H
+#include <linux/cpufreq.h>
#include <linux/ktime.h>
#include <linux/pm_qos.h>
#include <linux/tracepoint.h>
@@ -148,6 +149,30 @@ DEFINE_EVENT(cpu, cpu_frequency,
TP_ARGS(frequency, cpu_id)
);
+TRACE_EVENT(cpu_frequency_limits,
+
+ TP_PROTO(struct cpufreq_policy *policy),
+
+ TP_ARGS(policy),
+
+ TP_STRUCT__entry(
+ __field(u32, min_freq)
+ __field(u32, max_freq)
+ __field(u32, cpu_id)
+ ),
+
+ TP_fast_assign(
+ __entry->min_freq = policy->min;
+ __entry->max_freq = policy->max;
+ __entry->cpu_id = policy->cpu;
+ ),
+
+ TP_printk("min=%lu max=%lu cpu_id=%lu",
+ (unsigned long)__entry->min_freq,
+ (unsigned long)__entry->max_freq,
+ (unsigned long)__entry->cpu_id)
+);
+
TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event),
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 5936aac357ab..a8d07feff6a0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -52,6 +52,7 @@ TRACE_EVENT(rcu_utilization,
* "cpuqs": CPU passes through a quiescent state.
* "cpuonl": CPU comes online.
* "cpuofl": CPU goes offline.
+ * "cpuofl-bgp": CPU goes offline while blocking a grace period.
* "reqwait": GP kthread sleeps waiting for grace-period request.
* "reqwaitsig": GP kthread awakened by signal from reqwait state.
* "fqswait": GP kthread waiting until time to force quiescent states.
@@ -63,24 +64,24 @@ TRACE_EVENT(rcu_utilization,
*/
TRACE_EVENT(rcu_grace_period,
- TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
- TP_ARGS(rcuname, gpnum, gpevent),
+ TP_ARGS(rcuname, gp_seq, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->gpevent = gpevent;
),
TP_printk("%s %lu %s",
- __entry->rcuname, __entry->gpnum, __entry->gpevent)
+ __entry->rcuname, __entry->gp_seq, __entry->gpevent)
);
/*
@@ -90,8 +91,8 @@ TRACE_EVENT(rcu_grace_period,
*
* "Startleaf": Request a grace period based on leaf-node data.
* "Prestarted": Someone beat us to the request
- * "Startedleaf": Leaf-node start proved sufficient.
- * "Startedleafroot": Leaf-node start proved sufficient after checking root.
+ * "Startedleaf": Leaf node marked for future GP.
+ * "Startedleafroot": All nodes from leaf to root marked for future GP.
* "Startedroot": Requested a nocb grace period based on root-node data.
* "NoGPkthread": The RCU grace-period kthread has not yet started.
* "StartWait": Start waiting for the requested grace period.
@@ -102,17 +103,16 @@ TRACE_EVENT(rcu_grace_period,
*/
TRACE_EVENT(rcu_future_grace_period,
- TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
- unsigned long c, u8 level, int grplo, int grphi,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq,
+ unsigned long gp_seq_req, u8 level, int grplo, int grphi,
const char *gpevent),
- TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
+ TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
- __field(unsigned long, completed)
- __field(unsigned long, c)
+ __field(unsigned long, gp_seq)
+ __field(unsigned long, gp_seq_req)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -121,19 +121,17 @@ TRACE_EVENT(rcu_future_grace_period,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
- __entry->completed = completed;
- __entry->c = c;
+ __entry->gp_seq = gp_seq;
+ __entry->gp_seq_req = gp_seq_req;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %lu %lu %u %d %d %s",
- __entry->rcuname, __entry->gpnum, __entry->completed,
- __entry->c, __entry->level, __entry->grplo, __entry->grphi,
- __entry->gpevent)
+ TP_printk("%s %lu %lu %u %d %d %s",
+ __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->gpevent)
);
/*
@@ -145,14 +143,14 @@ TRACE_EVENT(rcu_future_grace_period,
*/
TRACE_EVENT(rcu_grace_period_init,
- TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
int grplo, int grphi, unsigned long qsmask),
- TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+ TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -161,7 +159,7 @@ TRACE_EVENT(rcu_grace_period_init,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
@@ -169,7 +167,7 @@ TRACE_EVENT(rcu_grace_period_init,
),
TP_printk("%s %lu %u %d %d %lx",
- __entry->rcuname, __entry->gpnum, __entry->level,
+ __entry->rcuname, __entry->gp_seq, __entry->level,
__entry->grplo, __entry->grphi, __entry->qsmask)
);
@@ -301,24 +299,24 @@ TRACE_EVENT(rcu_nocb_wake,
*/
TRACE_EVENT(rcu_preempt_task,
- TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
+ TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
- TP_ARGS(rcuname, pid, gpnum),
+ TP_ARGS(rcuname, pid, gp_seq),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->pid = pid;
),
TP_printk("%s %lu %d",
- __entry->rcuname, __entry->gpnum, __entry->pid)
+ __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -328,23 +326,23 @@ TRACE_EVENT(rcu_preempt_task,
*/
TRACE_EVENT(rcu_unlock_preempted_task,
- TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
- TP_ARGS(rcuname, gpnum, pid),
+ TP_ARGS(rcuname, gp_seq, pid),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->pid = pid;
),
- TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+ TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -357,15 +355,15 @@ TRACE_EVENT(rcu_unlock_preempted_task,
*/
TRACE_EVENT(rcu_quiescent_state_report,
- TP_PROTO(const char *rcuname, unsigned long gpnum,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq,
unsigned long mask, unsigned long qsmask,
u8 level, int grplo, int grphi, int gp_tasks),
- TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+ TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(unsigned long, mask)
__field(unsigned long, qsmask)
__field(u8, level)
@@ -376,7 +374,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->mask = mask;
__entry->qsmask = qsmask;
__entry->level = level;
@@ -386,41 +384,41 @@ TRACE_EVENT(rcu_quiescent_state_report,
),
TP_printk("%s %lu %lx>%lx %u %d %d %u",
- __entry->rcuname, __entry->gpnum,
+ __entry->rcuname, __entry->gp_seq,
__entry->mask, __entry->qsmask, __entry->level,
__entry->grplo, __entry->grphi, __entry->gp_tasks)
);
/*
* Tracepoint for quiescent states detected by force_quiescent_state().
- * These trace events include the type of RCU, the grace-period number that
- * was blocked by the CPU, the CPU itself, and the type of quiescent state,
- * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick"
- * when kicking a CPU that has been in dyntick-idle mode for too long, or
- * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr.
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
+ * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
+ * CPU got a quiescent state via its rcu_qs_ctr.
*/
TRACE_EVENT(rcu_fqs,
- TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
- TP_ARGS(rcuname, gpnum, cpu, qsevent),
+ TP_ARGS(rcuname, gp_seq, cpu, qsevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, cpu)
__field(const char *, qsevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->cpu = cpu;
__entry->qsevent = qsevent;
),
TP_printk("%s %lu %d %s",
- __entry->rcuname, __entry->gpnum,
+ __entry->rcuname, __entry->gp_seq,
__entry->cpu, __entry->qsevent)
);
@@ -753,23 +751,23 @@ TRACE_EVENT(rcu_barrier,
#else /* #ifdef CONFIG_RCU_TRACE */
-#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
+#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
+#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
level, grplo, grphi, event) \
do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
qsmask) do { } while (0)
#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
do { } while (0)
#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
do { } while (0)
#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
-#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
-#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
grplo, grphi, gp_tasks) do { } \
while (0)
-#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 4fff00e9da8a..196587b8f204 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -211,18 +211,18 @@ enum rxrpc_congest_change {
rxrpc_cong_saw_nack,
};
-enum rxrpc_tx_fail_trace {
- rxrpc_tx_fail_call_abort,
- rxrpc_tx_fail_call_ack,
- rxrpc_tx_fail_call_data_frag,
- rxrpc_tx_fail_call_data_nofrag,
- rxrpc_tx_fail_call_final_resend,
- rxrpc_tx_fail_conn_abort,
- rxrpc_tx_fail_conn_challenge,
- rxrpc_tx_fail_conn_response,
- rxrpc_tx_fail_reject,
- rxrpc_tx_fail_version_keepalive,
- rxrpc_tx_fail_version_reply,
+enum rxrpc_tx_point {
+ rxrpc_tx_point_call_abort,
+ rxrpc_tx_point_call_ack,
+ rxrpc_tx_point_call_data_frag,
+ rxrpc_tx_point_call_data_nofrag,
+ rxrpc_tx_point_call_final_resend,
+ rxrpc_tx_point_conn_abort,
+ rxrpc_tx_point_rxkad_challenge,
+ rxrpc_tx_point_rxkad_response,
+ rxrpc_tx_point_reject,
+ rxrpc_tx_point_version_keepalive,
+ rxrpc_tx_point_version_reply,
};
#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
@@ -396,7 +396,7 @@ enum rxrpc_tx_fail_trace {
#define rxrpc_propose_ack_outcomes \
EM(rxrpc_propose_ack_subsume, " Subsume") \
EM(rxrpc_propose_ack_update, " Update") \
- E_(rxrpc_propose_ack_use, "")
+ E_(rxrpc_propose_ack_use, " New")
#define rxrpc_congest_modes \
EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \
@@ -452,18 +452,18 @@ enum rxrpc_tx_fail_trace {
EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \
E_(RXRPC_CALL_NETWORK_ERROR, "NetError")
-#define rxrpc_tx_fail_traces \
- EM(rxrpc_tx_fail_call_abort, "CallAbort") \
- EM(rxrpc_tx_fail_call_ack, "CallAck") \
- EM(rxrpc_tx_fail_call_data_frag, "CallDataFrag") \
- EM(rxrpc_tx_fail_call_data_nofrag, "CallDataNofrag") \
- EM(rxrpc_tx_fail_call_final_resend, "CallFinalResend") \
- EM(rxrpc_tx_fail_conn_abort, "ConnAbort") \
- EM(rxrpc_tx_fail_conn_challenge, "ConnChall") \
- EM(rxrpc_tx_fail_conn_response, "ConnResp") \
- EM(rxrpc_tx_fail_reject, "Reject") \
- EM(rxrpc_tx_fail_version_keepalive, "VerKeepalive") \
- E_(rxrpc_tx_fail_version_reply, "VerReply")
+#define rxrpc_tx_points \
+ EM(rxrpc_tx_point_call_abort, "CallAbort") \
+ EM(rxrpc_tx_point_call_ack, "CallAck") \
+ EM(rxrpc_tx_point_call_data_frag, "CallDataFrag") \
+ EM(rxrpc_tx_point_call_data_nofrag, "CallDataNofrag") \
+ EM(rxrpc_tx_point_call_final_resend, "CallFinalResend") \
+ EM(rxrpc_tx_point_conn_abort, "ConnAbort") \
+ EM(rxrpc_tx_point_reject, "Reject") \
+ EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \
+ EM(rxrpc_tx_point_rxkad_response, "RxkadResp") \
+ EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \
+ E_(rxrpc_tx_point_version_reply, "VerReply")
/*
* Export enum symbols via userspace.
@@ -488,7 +488,7 @@ rxrpc_propose_ack_traces;
rxrpc_propose_ack_outcomes;
rxrpc_congest_modes;
rxrpc_congest_changes;
-rxrpc_tx_fail_traces;
+rxrpc_tx_points;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -801,7 +801,7 @@ TRACE_EVENT(rxrpc_transmit,
);
TRACE_EVENT(rxrpc_rx_data,
- TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ TP_PROTO(unsigned int call, rxrpc_seq_t seq,
rxrpc_serial_t serial, u8 flags, u8 anno),
TP_ARGS(call, seq, serial, flags, anno),
@@ -815,7 +815,7 @@ TRACE_EVENT(rxrpc_rx_data,
),
TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
@@ -918,6 +918,37 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
__entry->wake ? " wake" : "")
);
+TRACE_EVENT(rxrpc_tx_packet,
+ TP_PROTO(unsigned int call_id, struct rxrpc_wire_header *whdr,
+ enum rxrpc_tx_point where),
+
+ TP_ARGS(call_id, whdr, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(enum rxrpc_tx_point, where )
+ __field_struct(struct rxrpc_wire_header, whdr )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call_id;
+ memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
+ ),
+
+ TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
+ __entry->call,
+ ntohl(__entry->whdr.epoch),
+ ntohl(__entry->whdr.cid),
+ ntohl(__entry->whdr.callNumber),
+ ntohs(__entry->whdr.serviceId),
+ ntohl(__entry->whdr.serial),
+ ntohl(__entry->whdr.seq),
+ __entry->whdr.type, __entry->whdr.flags,
+ __entry->whdr.type <= 15 ?
+ __print_symbolic(__entry->whdr.type, rxrpc_pkts) : "?UNK",
+ __print_symbolic(__entry->where, rxrpc_tx_points))
+ );
+
TRACE_EVENT(rxrpc_tx_data,
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
@@ -928,6 +959,8 @@ TRACE_EVENT(rxrpc_tx_data,
__field(unsigned int, call )
__field(rxrpc_seq_t, seq )
__field(rxrpc_serial_t, serial )
+ __field(u32, cid )
+ __field(u32, call_id )
__field(u8, flags )
__field(bool, retrans )
__field(bool, lose )
@@ -935,6 +968,8 @@ TRACE_EVENT(rxrpc_tx_data,
TP_fast_assign(
__entry->call = call->debug_id;
+ __entry->cid = call->cid;
+ __entry->call_id = call->call_id;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
@@ -942,8 +977,10 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->lose = lose;
),
- TP_printk("c=%08x DATA %08x q=%08x fl=%02x%s%s",
+ TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s%s",
__entry->call,
+ __entry->cid,
+ __entry->call_id,
__entry->serial,
__entry->seq,
__entry->flags,
@@ -952,7 +989,7 @@ TRACE_EVENT(rxrpc_tx_data,
);
TRACE_EVENT(rxrpc_tx_ack,
- TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ TP_PROTO(unsigned int call, rxrpc_serial_t serial,
rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
u8 reason, u8 n_acks),
@@ -968,7 +1005,7 @@ TRACE_EVENT(rxrpc_tx_ack,
),
TP_fast_assign(
- __entry->call = call ? call->debug_id : 0;
+ __entry->call = call;
__entry->serial = serial;
__entry->ack_first = ack_first;
__entry->ack_serial = ack_serial;
@@ -1434,29 +1471,29 @@ TRACE_EVENT(rxrpc_rx_icmp,
TRACE_EVENT(rxrpc_tx_fail,
TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret,
- enum rxrpc_tx_fail_trace what),
+ enum rxrpc_tx_point where),
- TP_ARGS(debug_id, serial, ret, what),
+ TP_ARGS(debug_id, serial, ret, where),
TP_STRUCT__entry(
__field(unsigned int, debug_id )
__field(rxrpc_serial_t, serial )
__field(int, ret )
- __field(enum rxrpc_tx_fail_trace, what )
+ __field(enum rxrpc_tx_point, where )
),
TP_fast_assign(
__entry->debug_id = debug_id;
__entry->serial = serial;
__entry->ret = ret;
- __entry->what = what;
+ __entry->where = where;
),
TP_printk("c=%08x r=%x ret=%d %s",
__entry->debug_id,
__entry->serial,
__entry->ret,
- __print_symbolic(__entry->what, rxrpc_tx_fail_traces))
+ __print_symbolic(__entry->where, rxrpc_tx_points))
);
TRACE_EVENT(rxrpc_call_reset,
@@ -1491,6 +1528,26 @@ TRACE_EVENT(rxrpc_call_reset,
__entry->tx_seq, __entry->rx_seq)
);
+TRACE_EVENT(rxrpc_notify_socket,
+ TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial),
+
+ TP_ARGS(debug_id, serial),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, debug_id )
+ __field(rxrpc_serial_t, serial )
+ ),
+
+ TP_fast_assign(
+ __entry->debug_id = debug_id;
+ __entry->serial = serial;
+ ),
+
+ TP_printk("c=%08x r=%08x",
+ __entry->debug_id,
+ __entry->serial)
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 3176a3931107..a0c4b8a30966 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -35,6 +35,10 @@
EM(TCP_CLOSING) \
EMe(TCP_NEW_SYN_RECV)
+#define skmem_kind_names \
+ EM(SK_MEM_SEND) \
+ EMe(SK_MEM_RECV)
+
/* enums need to be exported to user space */
#undef EM
#undef EMe
@@ -44,6 +48,7 @@
family_names
inet_protocol_names
tcp_state_names
+skmem_kind_names
#undef EM
#undef EMe
@@ -59,6 +64,9 @@ tcp_state_names
#define show_tcp_state_name(val) \
__print_symbolic(val, tcp_state_names)
+#define show_skmem_kind_names(val) \
+ __print_symbolic(val, skmem_kind_names)
+
TRACE_EVENT(sock_rcvqueue_full,
TP_PROTO(struct sock *sk, struct sk_buff *skb),
@@ -83,9 +91,9 @@ TRACE_EVENT(sock_rcvqueue_full,
TRACE_EVENT(sock_exceed_buf_limit,
- TP_PROTO(struct sock *sk, struct proto *prot, long allocated),
+ TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind),
- TP_ARGS(sk, prot, allocated),
+ TP_ARGS(sk, prot, allocated, kind),
TP_STRUCT__entry(
__array(char, name, 32)
@@ -93,6 +101,10 @@ TRACE_EVENT(sock_exceed_buf_limit,
__field(long, allocated)
__field(int, sysctl_rmem)
__field(int, rmem_alloc)
+ __field(int, sysctl_wmem)
+ __field(int, wmem_alloc)
+ __field(int, wmem_queued)
+ __field(int, kind)
),
TP_fast_assign(
@@ -101,17 +113,25 @@ TRACE_EVENT(sock_exceed_buf_limit,
__entry->allocated = allocated;
__entry->sysctl_rmem = sk_get_rmem0(sk, prot);
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
+ __entry->sysctl_wmem = sk_get_wmem0(sk, prot);
+ __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
+ __entry->wmem_queued = sk->sk_wmem_queued;
+ __entry->kind = kind;
),
- TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld "
- "sysctl_rmem=%d rmem_alloc=%d",
+ TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld sysctl_rmem=%d rmem_alloc=%d sysctl_wmem=%d wmem_alloc=%d wmem_queued=%d kind=%s",
__entry->name,
__entry->sysctl_mem[0],
__entry->sysctl_mem[1],
__entry->sysctl_mem[2],
__entry->allocated,
__entry->sysctl_rmem,
- __entry->rmem_alloc)
+ __entry->rmem_alloc,
+ __entry->sysctl_wmem,
+ __entry->wmem_alloc,
+ __entry->wmem_queued,
+ show_skmem_kind_names(__entry->kind)
+ )
);
TRACE_EVENT(inet_sock_set_state,
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 0ae758c90e54..a12692e5f7a8 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -107,4 +107,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 42990676a55e..df4bedb9b01c 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
__SYSCALL(__NR_statx, sys_statx)
#define __NR_io_pgetevents 292
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+#define __NR_rseq 293
+__SYSCALL(__NR_rseq, sys_rseq)
#undef __NR_syscalls
-#define __NR_syscalls 293
+#define __NR_syscalls 294
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 78b4dd89fcb4..1ceec56de015 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -72,6 +72,29 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
+/**
+ * DOC: memory domains
+ *
+ * %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible.
+ * Memory in this pool could be swapped out to disk if there is pressure.
+ *
+ * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
+ * GPU's virtual address space via gart. Gart memory linearizes non-contiguous
+ * pages of system memory, allows GPU access system memory in a linezrized
+ * fashion.
+ *
+ * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
+ * carved out by the BIOS.
+ *
+ * %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data
+ * across shader threads.
+ *
+ * %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the
+ * execution of all the waves on a device.
+ *
+ * %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
+ * for appending data.
+ */
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
#define AMDGPU_GEM_DOMAIN_VRAM 0x4
@@ -483,7 +506,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_UVD_ENC 5
#define AMDGPU_HW_IP_VCN_DEC 6
#define AMDGPU_HW_IP_VCN_ENC 7
-#define AMDGPU_HW_IP_NUM 8
+#define AMDGPU_HW_IP_VCN_JPEG 8
+#define AMDGPU_HW_IP_NUM 9
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@@ -492,6 +516,7 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
+#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 9c660e1688ab..300f336633f2 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -687,6 +687,15 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index e04613d30a13..721ab7e54d96 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -183,6 +183,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
+#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
@@ -298,6 +299,19 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+/*
+ * Qualcomm Compressed Format
+ *
+ * Refers to a compressed variant of the base format that is compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+
/* Vivante framebuffer modifiers */
/*
@@ -385,6 +399,23 @@ extern "C" {
fourcc_mod_code(NVIDIA, 0x15)
/*
+ * Some Broadcom modifiers take parameters, for example the number of
+ * vertical lines in the image. Reserve the lower 32 bits for modifier
+ * type, and the next 24 bits for parameters. Top 8 bits are the
+ * vendor code.
+ */
+#define __fourcc_mod_broadcom_param_shift 8
+#define __fourcc_mod_broadcom_param_bits 48
+#define fourcc_mod_broadcom_code(val, params) \
+ fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val))
+#define fourcc_mod_broadcom_param(m) \
+ ((int)(((m) >> __fourcc_mod_broadcom_param_shift) & \
+ ((1ULL << __fourcc_mod_broadcom_param_bits) - 1)))
+#define fourcc_mod_broadcom_mod(m) \
+ ((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) << \
+ __fourcc_mod_broadcom_param_shift))
+
+/*
* Broadcom VC4 "T" format
*
* This is the primary layout that the V3D GPU can texture from (it
@@ -405,6 +436,151 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
+/*
+ * Broadcom SAND format
+ *
+ * This is the native format that the H.264 codec block uses. For VC4
+ * HVS, it is only valid for H.264 (NV12/21) and RGBA modes.
+ *
+ * The image can be considered to be split into columns, and the
+ * columns are placed consecutively into memory. The width of those
+ * columns can be either 32, 64, 128, or 256 pixels, but in practice
+ * only 128 pixel columns are used.
+ *
+ * The pitch between the start of each column is set to optimally
+ * switch between SDRAM banks. This is passed as the number of lines
+ * of column width in the modifier (we can't use the stride value due
+ * to various core checks that look at it , so you should set the
+ * stride to width*cpp).
+ *
+ * Note that the column height for this format modifier is the same
+ * for all of the planes, assuming that each column contains both Y
+ * and UV. Some SAND-using hardware stores UV in a separate tiled
+ * image from Y to reduce the column height, which is not supported
+ * with these modifiers.
+ */
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(2, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(3, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(4, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(5, v)
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32 \
+ DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64 \
+ DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128 \
+ DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256 \
+ DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0)
+
+/* Broadcom UIF format
+ *
+ * This is the common format for the current Broadcom multimedia
+ * blocks, including V3D 3.x and newer, newer video codecs, and
+ * displays.
+ *
+ * The image consists of utiles (64b blocks), UIF blocks (2x2 utiles),
+ * and macroblocks (4x4 UIF blocks). Those 4x4 UIF block groups are
+ * stored in columns, with padding between the columns to ensure that
+ * moving from one column to the next doesn't hit the same SDRAM page
+ * bank.
+ *
+ * To calculate the padding, it is assumed that each hardware block
+ * and the software driving it knows the platform's SDRAM page size,
+ * number of banks, and XOR address, and that it's identical between
+ * all blocks using the format. This tiling modifier will use XOR as
+ * necessary to reduce the padding. If a hardware block can't do XOR,
+ * the assumption is that a no-XOR tiling modifier will be created.
+ */
+#define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6)
+
+/*
+ * Arm Framebuffer Compression (AFBC) modifiers
+ *
+ * AFBC is a proprietary lossless image compression protocol and format.
+ * It provides fine-grained random access and minimizes the amount of data
+ * transferred between IP blocks.
+ *
+ * AFBC has several features which may be supported and/or used, which are
+ * represented using bits in the modifier. Not all combinations are valid,
+ * and different devices or use-cases may support different combinations.
+ */
+#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode)
+
+/*
+ * AFBC superblock size
+ *
+ * Indicates the superblock size(s) used for the AFBC buffer. The buffer
+ * size (in pixels) must be aligned to a multiple of the superblock size.
+ * Four lowest significant bits(LSBs) are reserved for block size.
+ */
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL)
+
+/*
+ * AFBC lossless colorspace transform
+ *
+ * Indicates that the buffer makes use of the AFBC lossless colorspace
+ * transform.
+ */
+#define AFBC_FORMAT_MOD_YTR (1ULL << 4)
+
+/*
+ * AFBC block-split
+ *
+ * Indicates that the payload of each superblock is split. The second
+ * half of the payload is positioned at a predefined offset from the start
+ * of the superblock payload.
+ */
+#define AFBC_FORMAT_MOD_SPLIT (1ULL << 5)
+
+/*
+ * AFBC sparse layout
+ *
+ * This flag indicates that the payload of each superblock must be stored at a
+ * predefined position relative to the other superblocks in the same AFBC
+ * buffer. This order is the same order used by the header buffer. In this mode
+ * each superblock is given the same amount of space as an uncompressed
+ * superblock of the particular format would require, rounding up to the next
+ * multiple of 128 bytes in size.
+ */
+#define AFBC_FORMAT_MOD_SPARSE (1ULL << 6)
+
+/*
+ * AFBC copy-block restrict
+ *
+ * Buffers with this flag must obey the copy-block restriction. The restriction
+ * is such that there are no copy-blocks referring across the border of 8x8
+ * blocks. For the subsampled data the 8x8 limitation is also subsampled.
+ */
+#define AFBC_FORMAT_MOD_CBR (1ULL << 7)
+
+/*
+ * AFBC tiled layout
+ *
+ * The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all
+ * superblocks inside a tile are stored together in memory. 8x8 tiles are used
+ * for pixel formats up to and including 32 bpp while 4x4 tiles are used for
+ * larger bpp formats. The order between the tiles is scan line.
+ * When the tiled layout is used, the buffer size (in pixels) must be aligned
+ * to the tile size.
+ */
+#define AFBC_FORMAT_MOD_TILED (1ULL << 8)
+
+/*
+ * AFBC solid color blocks
+ *
+ * Indicates that the buffer makes use of solid-color blocks, whereby bandwidth
+ * can be reduced if a whole superblock is a single color.
+ */
+#define AFBC_FORMAT_MOD_SC (1ULL << 9)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 4b3a1bb58e68..8d67243952f4 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -96,6 +96,13 @@ extern "C" {
#define DRM_MODE_PICTURE_ASPECT_64_27 3
#define DRM_MODE_PICTURE_ASPECT_256_135 4
+/* Content type options */
+#define DRM_MODE_CONTENT_TYPE_NO_DATA 0
+#define DRM_MODE_CONTENT_TYPE_GRAPHICS 1
+#define DRM_MODE_CONTENT_TYPE_PHOTO 2
+#define DRM_MODE_CONTENT_TYPE_CINEMA 3
+#define DRM_MODE_CONTENT_TYPE_GAME 4
+
/* Aspect ratio flag bitmask (4 bits 22:19) */
#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
#define DRM_MODE_FLAG_PIC_AR_NONE \
@@ -344,6 +351,7 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_VIRTUAL 15
#define DRM_MODE_CONNECTOR_DSI 16
#define DRM_MODE_CONNECTOR_DPI 17
+#define DRM_MODE_CONNECTOR_WRITEBACK 18
struct drm_mode_get_connector {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 0bc784f5e0db..399f58317cff 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -40,6 +40,7 @@ extern "C" {
#define DRM_VMW_GET_PARAM 0
#define DRM_VMW_ALLOC_DMABUF 1
+#define DRM_VMW_ALLOC_BO 1
#define DRM_VMW_UNREF_DMABUF 2
#define DRM_VMW_HANDLE_CLOSE 2
#define DRM_VMW_CURSOR_BYPASS 3
@@ -68,6 +69,8 @@ extern "C" {
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
+#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
+#define DRM_VMW_GB_SURFACE_REF_EXT 28
/*************************************************************************/
/**
@@ -79,6 +82,9 @@ extern "C" {
*
* DRM_VMW_PARAM_OVERLAY_IOCTL:
* Does the driver support the overlay ioctl.
+ *
+ * DRM_VMW_PARAM_SM4_1
+ * SM4_1 support is enabled.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -94,6 +100,8 @@ extern "C" {
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
#define DRM_VMW_PARAM_SCREEN_TARGET 11
#define DRM_VMW_PARAM_DX 12
+#define DRM_VMW_PARAM_HW_CAPS2 13
+#define DRM_VMW_PARAM_SM4_1 14
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -356,9 +364,9 @@ struct drm_vmw_fence_rep {
/*************************************************************************/
/**
- * DRM_VMW_ALLOC_DMABUF
+ * DRM_VMW_ALLOC_BO
*
- * Allocate a DMA buffer that is visible also to the host.
+ * Allocate a buffer object that is visible also to the host.
* NOTE: The buffer is
* identified by a handle and an offset, which are private to the guest, but
* useable in the command stream. The guest kernel may translate these
@@ -366,27 +374,28 @@ struct drm_vmw_fence_rep {
* be zero at all times, or it may disappear from the interface before it is
* fixed.
*
- * The DMA buffer may stay user-space mapped in the guest at all times,
+ * The buffer object may stay user-space mapped in the guest at all times,
* and is thus suitable for sub-allocation.
*
- * DMA buffers are mapped using the mmap() syscall on the drm device.
+ * Buffer objects are mapped using the mmap() syscall on the drm device.
*/
/**
- * struct drm_vmw_alloc_dmabuf_req
+ * struct drm_vmw_alloc_bo_req
*
* @size: Required minimum size of the buffer.
*
- * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Input data to the DRM_VMW_ALLOC_BO Ioctl.
*/
-struct drm_vmw_alloc_dmabuf_req {
+struct drm_vmw_alloc_bo_req {
__u32 size;
__u32 pad64;
};
+#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
/**
- * struct drm_vmw_dmabuf_rep
+ * struct drm_vmw_bo_rep
*
* @map_handle: Offset to use in the mmap() call used to map the buffer.
* @handle: Handle unique to this buffer. Used for unreferencing.
@@ -395,50 +404,32 @@ struct drm_vmw_alloc_dmabuf_req {
* @cur_gmr_offset: Offset to use in the command stream when this buffer is
* referenced. See note above.
*
- * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Output data from the DRM_VMW_ALLOC_BO Ioctl.
*/
-struct drm_vmw_dmabuf_rep {
+struct drm_vmw_bo_rep {
__u64 map_handle;
__u32 handle;
__u32 cur_gmr_id;
__u32 cur_gmr_offset;
__u32 pad64;
};
+#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
/**
- * union drm_vmw_dmabuf_arg
+ * union drm_vmw_alloc_bo_arg
*
* @req: Input data as described above.
* @rep: Output data as described above.
*
- * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Argument to the DRM_VMW_ALLOC_BO Ioctl.
*/
-union drm_vmw_alloc_dmabuf_arg {
- struct drm_vmw_alloc_dmabuf_req req;
- struct drm_vmw_dmabuf_rep rep;
-};
-
-/*************************************************************************/
-/**
- * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
- *
- */
-
-/**
- * struct drm_vmw_unref_dmabuf_arg
- *
- * @handle: Handle indicating what buffer to free. Obtained from the
- * DRM_VMW_ALLOC_DMABUF Ioctl.
- *
- * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
- */
-
-struct drm_vmw_unref_dmabuf_arg {
- __u32 handle;
- __u32 pad64;
+union drm_vmw_alloc_bo_arg {
+ struct drm_vmw_alloc_bo_req req;
+ struct drm_vmw_bo_rep rep;
};
+#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
/*************************************************************************/
/**
@@ -1103,9 +1094,8 @@ union drm_vmw_extended_context_arg {
* DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
* underlying resource.
*
- * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
- * The ioctl arguments therefore need to be identical in layout.
- *
+ * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
+ * Ioctl.
*/
/**
@@ -1119,7 +1109,107 @@ struct drm_vmw_handle_close_arg {
__u32 handle;
__u32 pad64;
};
+#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ *
+ * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
+ * parameter and 64 bit svga flag.
+ */
+
+/**
+ * enum drm_vmw_surface_version
+ *
+ * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
+ * svga3d surface flags split into 2, upper half and lower half.
+ */
+enum drm_vmw_surface_version {
+ drm_vmw_gb_surface_v1
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_ext_req
+ *
+ * @base: Surface create parameters.
+ * @version: Version of surface create ioctl.
+ * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
+ * @multisample_pattern: Multisampling pattern when msaa is supported.
+ * @quality_level: Precision settings for each sample.
+ * @must_be_zero: Reserved for future usage.
+ *
+ * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
+ */
+struct drm_vmw_gb_surface_create_ext_req {
+ struct drm_vmw_gb_surface_create_req base;
+ enum drm_vmw_surface_version version;
+ uint32_t svga3d_flags_upper_32_bits;
+ SVGA3dMSPattern multisample_pattern;
+ SVGA3dMSQualityLevel quality_level;
+ uint64_t must_be_zero;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_ext_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
+union drm_vmw_gb_surface_create_ext_arg {
+ struct drm_vmw_gb_surface_create_rep rep;
+ struct drm_vmw_gb_surface_create_ext_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
+/**
+ * struct drm_vmw_gb_surface_ref_ext_rep
+ *
+ * @creq: The data used as input when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_ext_req"
+ * @crep: Additional data output when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
+ */
+struct drm_vmw_gb_surface_ref_ext_rep {
+ struct drm_vmw_gb_surface_create_ext_req creq;
+ struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_ext_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at
+ * "struct drm_vmw_gb_surface_ref_ext_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_ext_arg {
+ struct drm_vmw_gb_surface_ref_ext_rep rep;
+ struct drm_vmw_surface_arg req;
+};
#if defined(__cplusplus)
}
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index d4593a6062ef..ce43d340f010 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -38,10 +38,8 @@ enum {
IOCB_CMD_PWRITE = 1,
IOCB_CMD_FSYNC = 2,
IOCB_CMD_FDSYNC = 3,
- /* These two are experimental.
- * IOCB_CMD_PREADX = 4,
- * IOCB_CMD_POLL = 5,
- */
+ /* 4 was the experimental IOCB_CMD_PREADX */
+ IOCB_CMD_POLL = 5,
IOCB_CMD_NOOP = 6,
IOCB_CMD_PREADV = 7,
IOCB_CMD_PWRITEV = 8,
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index c35aee9ad4a6..818ae690ab79 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -148,6 +148,7 @@
#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
#define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */
+#define AUDIT_INTEGRITY_POLICY_RULE 1807 /* IMA policy rules */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
@@ -157,7 +158,8 @@
#define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */
#define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */
#define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */
-#define AUDIT_FILTER_TYPE 0x05 /* Apply rule at audit_log_start */
+#define AUDIT_FILTER_EXCLUDE 0x05 /* Apply rule before record creation */
+#define AUDIT_FILTER_TYPE AUDIT_FILTER_EXCLUDE /* obsolete misleading naming */
#define AUDIT_FILTER_FS 0x06 /* Apply rule at __audit_inode_child */
#define AUDIT_NR_FILTERS 7
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 821f71a2e48f..8d19e02d752a 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -195,7 +195,7 @@ struct cache_sb {
};
};
- __u32 last_mount; /* time_t */
+ __u32 last_mount; /* time overflow in y2106 */
__u16 first_bucket;
union {
@@ -318,7 +318,7 @@ struct uuid_entry {
struct {
__u8 uuid[16];
__u8 label[32];
- __u32 first_reg;
+ __u32 first_reg; /* time overflow in y2106 */
__u32 last_reg;
__u32 invalidated;
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index e3c70fe6bf0f..ff5a5db8906a 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -117,7 +117,7 @@ struct blk_zone_report {
__u32 nr_zones;
__u8 reserved[4];
struct blk_zone zones[0];
-} __packed;
+};
/**
* struct blk_zone_range - BLKRESETZONE ioctl request
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index b7db3261c62d..66917a4eba27 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -75,6 +75,11 @@ struct bpf_lpm_trie_key {
__u8 data[0]; /* Arbitrary size */
};
+struct bpf_cgroup_storage_key {
+ __u64 cgroup_inode_id; /* cgroup inode id */
+ __u32 attach_type; /* program attach type */
+};
+
/* BPF syscall commands, see bpf(2) man-page for details. */
enum bpf_cmd {
BPF_MAP_CREATE,
@@ -120,6 +125,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_CPUMAP,
BPF_MAP_TYPE_XSKMAP,
BPF_MAP_TYPE_SOCKHASH,
+ BPF_MAP_TYPE_CGROUP_STORAGE,
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
};
enum bpf_prog_type {
@@ -144,6 +151,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_PROG_TYPE_LWT_SEG6LOCAL,
BPF_PROG_TYPE_LIRC_MODE2,
+ BPF_PROG_TYPE_SK_REUSEPORT,
};
enum bpf_attach_type {
@@ -1371,6 +1379,20 @@ union bpf_attr {
* A 8-byte long non-decreasing number on success, or 0 if the
* socket field is missing inside *skb*.
*
+ * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
+ * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
* u32 bpf_get_socket_uid(struct sk_buff *skb)
* Return
* The owner UID of the socket associated to *skb*. If the socket
@@ -1826,7 +1848,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
- * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -1877,7 +1899,7 @@ union bpf_attr {
* * < 0 if any input argument is invalid
* * 0 on success (packet is forwarded, nexthop neighbor exists)
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
- * * packet is not forwarded or needs assist from full stack
+ * packet is not forwarded or needs assist from full stack
*
* int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
* Description
@@ -2033,7 +2055,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2053,7 +2074,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2073,10 +2093,54 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
+ *
+ * void* get_local_storage(void *map, u64 flags)
+ * Description
+ * Get the pointer to the local storage area.
+ * The type and the size of the local storage is defined
+ * by the *map* argument.
+ * The *flags* meaning is specific for each map type,
+ * and has to be 0 for cgroup local storage.
+ *
+ * Depending on the bpf program type, a local storage area
+ * can be shared between multiple instances of the bpf program,
+ * running simultaneously.
+ *
+ * A user should care about the synchronization by himself.
+ * For example, by using the BPF_STX_XADD instruction to alter
+ * the shared data.
+ * Return
+ * Pointer to the local storage area.
+ *
+ * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
+ * Description
+ * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
+ * It checks the selected sk is matching the incoming
+ * request in the skb.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2159,7 +2223,10 @@ union bpf_attr {
FN(rc_repeat), \
FN(rc_keydown), \
FN(skb_cgroup_id), \
- FN(get_current_cgroup_id),
+ FN(get_current_cgroup_id), \
+ FN(get_local_storage), \
+ FN(sk_select_reuseport), \
+ FN(skb_ancestor_cgroup_id),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2376,6 +2443,30 @@ struct sk_msg_md {
__u32 local_port; /* stored in host byte order */
};
+struct sk_reuseport_md {
+ /*
+ * Start of directly accessible data. It begins from
+ * the tcp/udp header.
+ */
+ void *data;
+ void *data_end; /* End of directly accessible data */
+ /*
+ * Total length of packet (starting from the tcp/udp header).
+ * Note that the directly accessible bytes (data_end - data)
+ * could be less than this "len". Those bytes could be
+ * indirectly read by a helper "bpf_skb_load_bytes()".
+ */
+ __u32 len;
+ /*
+ * Eth protocol in the mac header (network byte order). e.g.
+ * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
+ */
+ __u32 eth_protocol;
+ __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
+ __u32 bind_inany; /* Is sock bound to an INANY address? */
+ __u32 hash; /* A hash of the packet 4 tuples */
+};
+
#define BPF_TAG_SIZE 8
struct bpf_prog_info {
@@ -2557,6 +2648,9 @@ enum {
* Arg1: old_state
* Arg2: new_state
*/
+ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
+ * socket transition to LISTEN state.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index d7f97ac197a9..0afb7d8e867f 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -77,7 +77,7 @@ typedef __u32 canid_t;
/*
* Controller Area Network Error Message Frame Mask structure
*
- * bit 0-28 : error class mask (see include/linux/can/error.h)
+ * bit 0-28 : error class mask (see include/uapi/linux/can/error.h)
* bit 29-31 : set to zero
*/
typedef __u32 can_err_mask_t;
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 20fe091b7e96..097fcd812471 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -384,6 +384,8 @@ struct cec_log_addrs {
#define CEC_EVENT_PIN_CEC_HIGH 4
#define CEC_EVENT_PIN_HPD_LOW 5
#define CEC_EVENT_PIN_HPD_HIGH 6
+#define CEC_EVENT_PIN_5V_LOW 7
+#define CEC_EVENT_PIN_5V_HIGH 8
#define CEC_EVENT_FL_INITIAL_STATE (1 << 0)
#define CEC_EVENT_FL_DROPPED_EVENTS (1 << 1)
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 60aa2e446698..69df19aa8e72 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -233,7 +233,8 @@ struct cee_pfc {
* 2 Well known port number over TCP or SCTP
* 3 Well known port number over UDP or DCCP
* 4 Well known port number over TCP, SCTP, UDP, or DCCP
- * 5-7 Reserved
+ * 5 Differentiated Services Code Point (DSCP) value
+ * 6-7 Reserved
*
* Selector field values for CEE
* 0 Ethertype
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 75cb5450c851..79407bbd296d 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -78,6 +78,17 @@ enum devlink_command {
*/
DEVLINK_CMD_RELOAD,
+ DEVLINK_CMD_PARAM_GET, /* can dump */
+ DEVLINK_CMD_PARAM_SET,
+ DEVLINK_CMD_PARAM_NEW,
+ DEVLINK_CMD_PARAM_DEL,
+
+ DEVLINK_CMD_REGION_GET,
+ DEVLINK_CMD_REGION_SET,
+ DEVLINK_CMD_REGION_NEW,
+ DEVLINK_CMD_REGION_DEL,
+ DEVLINK_CMD_REGION_READ,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -142,6 +153,16 @@ enum devlink_port_flavour {
*/
};
+enum devlink_param_cmode {
+ DEVLINK_PARAM_CMODE_RUNTIME,
+ DEVLINK_PARAM_CMODE_DRIVERINIT,
+ DEVLINK_PARAM_CMODE_PERMANENT,
+
+ /* Add new configuration modes above */
+ __DEVLINK_PARAM_CMODE_MAX,
+ DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -238,6 +259,27 @@ enum devlink_attr {
DEVLINK_ATTR_PORT_NUMBER, /* u32 */
DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */
+ DEVLINK_ATTR_PARAM, /* nested */
+ DEVLINK_ATTR_PARAM_NAME, /* string */
+ DEVLINK_ATTR_PARAM_GENERIC, /* flag */
+ DEVLINK_ATTR_PARAM_TYPE, /* u8 */
+ DEVLINK_ATTR_PARAM_VALUES_LIST, /* nested */
+ DEVLINK_ATTR_PARAM_VALUE, /* nested */
+ DEVLINK_ATTR_PARAM_VALUE_DATA, /* dynamic */
+ DEVLINK_ATTR_PARAM_VALUE_CMODE, /* u8 */
+
+ DEVLINK_ATTR_REGION_NAME, /* string */
+ DEVLINK_ATTR_REGION_SIZE, /* u64 */
+ DEVLINK_ATTR_REGION_SNAPSHOTS, /* nested */
+ DEVLINK_ATTR_REGION_SNAPSHOT, /* nested */
+ DEVLINK_ATTR_REGION_SNAPSHOT_ID, /* u32 */
+
+ DEVLINK_ATTR_REGION_CHUNKS, /* nested */
+ DEVLINK_ATTR_REGION_CHUNK, /* nested */
+ DEVLINK_ATTR_REGION_CHUNK_DATA, /* binary */
+ DEVLINK_ATTR_REGION_CHUNK_ADDR, /* u64 */
+ DEVLINK_ATTR_REGION_CHUNK_LEN, /* u64 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/dvb/audio.h b/include/uapi/linux/dvb/audio.h
index 69f7a85d81b1..afeae063e640 100644
--- a/include/uapi/linux/dvb/audio.h
+++ b/include/uapi/linux/dvb/audio.h
@@ -67,27 +67,6 @@ typedef struct audio_status {
} audio_status_t; /* separate decoder hardware */
-typedef
-struct audio_karaoke { /* if Vocal1 or Vocal2 are non-zero, they get mixed */
- int vocal1; /* into left and right t at 70% each */
- int vocal2; /* if both, Vocal1 and Vocal2 are non-zero, Vocal1 gets*/
- int melody; /* mixed into the left channel and */
- /* Vocal2 into the right channel at 100% each. */
- /* if Melody is non-zero, the melody channel gets mixed*/
-} audio_karaoke_t; /* into left and right */
-
-
-typedef __u16 audio_attributes_t;
-/* bits: descr. */
-/* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */
-/* 12 multichannel extension */
-/* 11-10 audio type (0=not spec, 1=language included) */
-/* 9- 8 audio application mode (0=not spec, 1=karaoke, 2=surround) */
-/* 7- 6 Quantization / DRC (mpeg audio: 1=DRC exists)(lpcm: 0=16bit, */
-/* 5- 4 Sample frequency fs (0=48kHz, 1=96kHz) */
-/* 2- 0 number of audio channels (n+1 channels) */
-
-
/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */
#define AUDIO_CAP_DTS 1
#define AUDIO_CAP_LPCM 2
@@ -115,22 +94,6 @@ typedef __u16 audio_attributes_t;
#define AUDIO_SET_ID _IO('o', 13)
#define AUDIO_SET_MIXER _IOW('o', 14, audio_mixer_t)
#define AUDIO_SET_STREAMTYPE _IO('o', 15)
-#define AUDIO_SET_EXT_ID _IO('o', 16)
-#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t)
-#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t)
-
-/**
- * AUDIO_GET_PTS
- *
- * Read the 33 bit presentation time stamp as defined
- * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
- *
- * The PTS should belong to the currently played
- * frame if possible, but may also be a value close to it
- * like the PTS of the last decoded frame or the last PTS
- * extracted by the PES parser.
- */
-#define AUDIO_GET_PTS _IOR('o', 19, __u64)
#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20)
#endif /* _DVBAUDIO_H_ */
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index df3d7028c807..43ba8b0a3d14 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -38,18 +38,6 @@ typedef enum {
typedef enum {
- VIDEO_SYSTEM_PAL,
- VIDEO_SYSTEM_NTSC,
- VIDEO_SYSTEM_PALN,
- VIDEO_SYSTEM_PALNc,
- VIDEO_SYSTEM_PALM,
- VIDEO_SYSTEM_NTSC60,
- VIDEO_SYSTEM_PAL60,
- VIDEO_SYSTEM_PALM60
-} video_system_t;
-
-
-typedef enum {
VIDEO_PAN_SCAN, /* use pan and scan format */
VIDEO_LETTER_BOX, /* use letterbox format */
VIDEO_CENTER_CUT_OUT /* use center cut out format */
@@ -160,44 +148,6 @@ struct video_still_picture {
};
-typedef
-struct video_highlight {
- int active; /* 1=show highlight, 0=hide highlight */
- __u8 contrast1; /* 7- 4 Pattern pixel contrast */
- /* 3- 0 Background pixel contrast */
- __u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */
- /* 3- 0 Emphasis pixel-1 contrast */
- __u8 color1; /* 7- 4 Pattern pixel color */
- /* 3- 0 Background pixel color */
- __u8 color2; /* 7- 4 Emphasis pixel-2 color */
- /* 3- 0 Emphasis pixel-1 color */
- __u32 ypos; /* 23-22 auto action mode */
- /* 21-12 start y */
- /* 9- 0 end y */
- __u32 xpos; /* 23-22 button color number */
- /* 21-12 start x */
- /* 9- 0 end x */
-} video_highlight_t;
-
-
-typedef struct video_spu {
- int active;
- int stream_id;
-} video_spu_t;
-
-
-typedef struct video_spu_palette { /* SPU Palette information */
- int length;
- __u8 __user *palette;
-} video_spu_palette_t;
-
-
-typedef struct video_navi_pack {
- int length; /* 0 ... 1024 */
- __u8 data[1024];
-} video_navi_pack_t;
-
-
typedef __u16 video_attributes_t;
/* bits: descr. */
/* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
@@ -242,17 +192,9 @@ typedef __u16 video_attributes_t;
#define VIDEO_SLOWMOTION _IO('o', 32)
#define VIDEO_GET_CAPABILITIES _IOR('o', 33, unsigned int)
#define VIDEO_CLEAR_BUFFER _IO('o', 34)
-#define VIDEO_SET_ID _IO('o', 35)
#define VIDEO_SET_STREAMTYPE _IO('o', 36)
#define VIDEO_SET_FORMAT _IO('o', 37)
-#define VIDEO_SET_SYSTEM _IO('o', 38)
-#define VIDEO_SET_HIGHLIGHT _IOW('o', 39, video_highlight_t)
-#define VIDEO_SET_SPU _IOW('o', 50, video_spu_t)
-#define VIDEO_SET_SPU_PALETTE _IOW('o', 51, video_spu_palette_t)
-#define VIDEO_GET_NAVI _IOR('o', 52, video_navi_pack_t)
-#define VIDEO_SET_ATTRIBUTES _IO('o', 53)
#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t)
-#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int)
/**
* VIDEO_GET_PTS
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 4e12c423b9fe..c5358e0ae7c5 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -422,6 +422,8 @@ typedef struct elf64_shdr {
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
+#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
+#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index dc64cfaf13da..c0151200f7d1 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -20,12 +20,16 @@ struct sock_extended_err {
#define SO_EE_ORIGIN_ICMP6 3
#define SO_EE_ORIGIN_TXSTATUS 4
#define SO_EE_ORIGIN_ZEROCOPY 5
+#define SO_EE_ORIGIN_TXTIME 6
#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))
#define SO_EE_CODE_ZEROCOPY_COPIED 1
+#define SO_EE_CODE_TXTIME_INVALID_PARAM 1
+#define SO_EE_CODE_TXTIME_MISSED 2
+
/**
* struct scm_timestamping - timestamps exposed through cmsg
*
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 7363f18e65a5..dc69391d2bba 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -870,7 +870,8 @@ struct ethtool_flow_ext {
* includes the %FLOW_EXT or %FLOW_MAC_EXT flag
* (see &struct ethtool_flow_ext description).
* @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC
- * if packets should be discarded
+ * if packets should be discarded, or %RX_CLS_FLOW_WAKE if the
+ * packets should be used for Wake-on-LAN with %WAKE_FILTER
* @location: Location of rule in the table. Locations must be
* numbered such that a flow matching multiple rules will be
* classified according to the first (lowest numbered) rule.
@@ -902,13 +903,13 @@ struct ethtool_rx_flow_spec {
static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
{
return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
-};
+}
static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
{
return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
-};
+}
/**
* struct ethtool_rxnfc - command to get or set RX flow classification rules
@@ -1634,6 +1635,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define WAKE_ARP (1 << 4)
#define WAKE_MAGIC (1 << 5)
#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
+#define WAKE_FILTER (1 << 7)
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
@@ -1671,6 +1673,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
+#define RX_CLS_FLOW_WAKE 0xfffffffffffffffeULL
/* Special RX classification rule insert location values */
#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index cf01b6824244..43391e2d1153 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -164,6 +164,8 @@ enum {
IFLA_CARRIER_UP_COUNT,
IFLA_CARRIER_DOWN_COUNT,
IFLA_NEW_IFINDEX,
+ IFLA_MIN_MTU,
+ IFLA_MAX_MTU,
__IFLA_MAX
};
@@ -334,6 +336,7 @@ enum {
IFLA_BRPORT_GROUP_FWD_MASK,
IFLA_BRPORT_NEIGH_SUPPRESS,
IFLA_BRPORT_ISOLATED,
+ IFLA_BRPORT_BACKUP_PORT,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +462,16 @@ enum {
#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
+/* XFRM section */
+enum {
+ IFLA_XFRM_UNSPEC,
+ IFLA_XFRM_LINK,
+ IFLA_XFRM_IF_ID,
+ __IFLA_XFRM_MAX
+};
+
+#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1)
+
enum macsec_validation_type {
MACSEC_VALIDATE_DISABLED = 0,
MACSEC_VALIDATE_CHECK = 1,
@@ -920,6 +933,7 @@ enum {
XDP_ATTACHED_DRV,
XDP_ATTACHED_SKB,
XDP_ATTACHED_HW,
+ XDP_ATTACHED_MULTI,
};
enum {
@@ -928,6 +942,9 @@ enum {
IFLA_XDP_ATTACHED,
IFLA_XDP_FLAGS,
IFLA_XDP_PROG_ID,
+ IFLA_XDP_DRV_PROG_ID,
+ IFLA_XDP_SKB_PROG_ID,
+ IFLA_XDP_HW_PROG_ID,
__IFLA_XDP_MAX,
};
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
index 483b77af4eb8..db45d3e49a12 100644
--- a/include/uapi/linux/ila.h
+++ b/include/uapi/linux/ila.h
@@ -30,6 +30,7 @@ enum {
ILA_CMD_ADD,
ILA_CMD_DEL,
ILA_CMD_GET,
+ ILA_CMD_FLUSH,
__ILA_CMD_MAX,
};
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index b24a742beae5..e42d13b55cf3 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -168,6 +168,7 @@ enum
IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
+ IPV4_DEVCONF_BC_FORWARDING,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index b4f5073dbac2..01674b56e14f 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -76,6 +76,12 @@ struct kfd_ioctl_update_queue_args {
__u32 queue_priority; /* to KFD */
};
+struct kfd_ioctl_set_cu_mask_args {
+ __u32 queue_id; /* to KFD */
+ __u32 num_cu_mask; /* to KFD */
+ __u64 cu_mask_ptr; /* to KFD */
+};
+
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
#define KFD_IOC_CACHE_POLICY_COHERENT 0
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@@ -189,6 +195,15 @@ struct kfd_ioctl_dbg_wave_control_args {
#define KFD_SIGNAL_EVENT_LIMIT 4096
+/* For kfd_event_data.hw_exception_data.reset_type. */
+#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
+#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
+
+/* For kfd_event_data.hw_exception_data.reset_cause. */
+#define KFD_HW_EXCEPTION_GPU_HANG 0
+#define KFD_HW_EXCEPTION_ECC 1
+
+
struct kfd_ioctl_create_event_args {
__u64 event_page_offset; /* from KFD */
__u32 event_trigger_data; /* from KFD - signal events only */
@@ -219,7 +234,7 @@ struct kfd_memory_exception_failure {
__u32 NotPresent; /* Page not present or supervisor privilege */
__u32 ReadOnly; /* Write access to a read-only page */
__u32 NoExecute; /* Execute access to a page marked NX */
- __u32 pad;
+ __u32 imprecise; /* Can't determine the exact fault address */
};
/* memory exception data*/
@@ -230,10 +245,19 @@ struct kfd_hsa_memory_exception_data {
__u32 pad;
};
-/* Event data*/
+/* hw exception data */
+struct kfd_hsa_hw_exception_data {
+ uint32_t reset_type;
+ uint32_t reset_cause;
+ uint32_t memory_lost;
+ uint32_t gpu_id;
+};
+
+/* Event data */
struct kfd_event_data {
union {
struct kfd_hsa_memory_exception_data memory_exception_data;
+ struct kfd_hsa_hw_exception_data hw_exception_data;
}; /* From KFD */
__u64 kfd_event_data_ext; /* pointer to an extension structure
for future exception types */
@@ -448,7 +472,10 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
+#define AMDKFD_IOC_SET_CU_MASK \
+ AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1A
+#define AMDKFD_COMMAND_END 0x1B
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index b6270a3b38e9..b955b986b341 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -949,6 +949,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 7d570c7bd117..61158f5a1a5b 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -60,14 +60,14 @@ struct sockaddr_l2tpip6 {
/*
* Commands.
* Valid TLVs of each command are:-
- * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum, vlanid
+ * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum
* TUNNEL_DELETE - CONN_ID
* TUNNEL_MODIFY - CONN_ID, udpcsum
* TUNNEL_GETSTATS - CONN_ID, (stats)
* TUNNEL_GET - CONN_ID, (...)
- * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec
+ * SESSION_CREATE - SESSION_ID, PW_TYPE, cookie, peer_cookie, l2spec
* SESSION_DELETE - SESSION_ID
- * SESSION_MODIFY - SESSION_ID, data_seq
+ * SESSION_MODIFY - SESSION_ID
* SESSION_GET - SESSION_ID, (...)
* SESSION_GETSTATS - SESSION_ID, (stats)
*
@@ -95,7 +95,7 @@ enum {
L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */
L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */
L2TP_ATTR_OFFSET, /* u16 (not used) */
- L2TP_ATTR_DATA_SEQ, /* u16 */
+ L2TP_ATTR_DATA_SEQ, /* u16 (not used) */
L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */
L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */
L2TP_ATTR_PROTO_VERSION, /* u8 */
@@ -105,7 +105,7 @@ enum {
L2TP_ATTR_SESSION_ID, /* u32 */
L2TP_ATTR_PEER_SESSION_ID, /* u32 */
L2TP_ATTR_UDP_CSUM, /* u8 */
- L2TP_ATTR_VLAN_ID, /* u16 */
+ L2TP_ATTR_VLAN_ID, /* u16 (not used) */
L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
@@ -119,8 +119,8 @@ enum {
L2TP_ATTR_IP_DADDR, /* u32 */
L2TP_ATTR_UDP_SPORT, /* u16 */
L2TP_ATTR_UDP_DPORT, /* u16 */
- L2TP_ATTR_MTU, /* u16 */
- L2TP_ATTR_MRU, /* u16 */
+ L2TP_ATTR_MTU, /* u16 (not used) */
+ L2TP_ATTR_MRU, /* u16 (not used) */
L2TP_ATTR_STATS, /* nested */
L2TP_ATTR_IP6_SADDR, /* struct in6_addr */
L2TP_ATTR_IP6_DADDR, /* struct in6_addr */
@@ -169,6 +169,7 @@ enum l2tp_encap_type {
L2TP_ENCAPTYPE_IP,
};
+/* For L2TP_ATTR_DATA_SEQ. Unused. */
enum l2tp_seqmode {
L2TP_SEQ_NONE = 0,
L2TP_SEQ_IP = 1,
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 9e3511742fdc..d6a5a3bfe6c4 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -62,7 +62,7 @@
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x202c */
+/* YUV (including grey) - next is 0x202d */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -74,6 +74,7 @@
#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008
#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009
#define MEDIA_BUS_FMT_Y10_1X10 0x200a
+#define MEDIA_BUS_FMT_Y10_2X8_PADHI_LE 0x202c
#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018
#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019
#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index c7e9a5cba24e..36f76e777ef9 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -25,7 +25,6 @@
#endif
#include <linux/ioctl.h>
#include <linux/types.h>
-#include <linux/version.h>
struct media_device_info {
char driver[16];
@@ -90,12 +89,6 @@ struct media_device_info {
#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3)
/*
- * Video decoder functions
- */
-#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
-#define MEDIA_ENT_F_DTV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
-
-/*
* Digital TV, analog TV, radio and/or software defined radio tuner functions.
*
* It is a responsibility of the master/bridge drivers to add connectors
@@ -132,6 +125,8 @@ struct media_device_info {
#define MEDIA_ENT_F_PROC_VIDEO_LUT (MEDIA_ENT_F_BASE + 0x4004)
#define MEDIA_ENT_F_PROC_VIDEO_SCALER (MEDIA_ENT_F_BASE + 0x4005)
#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
+#define MEDIA_ENT_F_PROC_VIDEO_ENCODER (MEDIA_ENT_F_BASE + 0x4007)
+#define MEDIA_ENT_F_PROC_VIDEO_DECODER (MEDIA_ENT_F_BASE + 0x4008)
/*
* Switch and bridge entity functions
@@ -139,6 +134,13 @@ struct media_device_info {
#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001)
#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002)
+/*
+ * Video decoder/encoder functions
+ */
+#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
+#define MEDIA_ENT_F_DV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
+#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002)
+
/* Entity flags */
#define MEDIA_ENT_FL_DEFAULT (1 << 0)
#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
@@ -280,11 +282,21 @@ struct media_links_enum {
* MC next gen API definitions
*/
+/*
+ * Appeared in 4.19.0.
+ *
+ * The media_version argument comes from the media_version field in
+ * struct media_device_info.
+ */
+#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \
+ ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+
struct media_v2_entity {
__u32 id;
char name[64];
__u32 function; /* Main function of the entity */
- __u32 reserved[6];
+ __u32 flags;
+ __u32 reserved[5];
} __attribute__ ((packed));
/* Should match the specific fields at media_intf_devnode */
@@ -305,11 +317,21 @@ struct media_v2_interface {
};
} __attribute__ ((packed));
+/*
+ * Appeared in 4.19.0.
+ *
+ * The media_version argument comes from the media_version field in
+ * struct media_device_info.
+ */
+#define MEDIA_V2_PAD_HAS_INDEX(media_version) \
+ ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+
struct media_v2_pad {
__u32 id;
__u32 entity_id;
__u32 flags;
- __u32 reserved[5];
+ __u32 index;
+ __u32 reserved[4];
} __attribute__ ((packed));
struct media_v2_link {
@@ -348,7 +370,7 @@ struct media_v2_topology {
#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc)
#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology)
-#if !defined(__KERNEL__) || defined(__NEED_MEDIA_LEGACY_API)
+#ifndef __KERNEL__
/*
* Legacy symbols used to avoid userspace compilation breakages.
@@ -380,6 +402,8 @@ struct media_v2_topology {
#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER
#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER
+#define MEDIA_ENT_F_DTV_DECODER MEDIA_ENT_F_DV_DECODER
+
/*
* There is still no ALSA support in the media controller. These
* defines should not have been added and we leave them here only
@@ -396,7 +420,7 @@ struct media_v2_topology {
#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
/* Obsolete symbol for media_version, no longer used in the kernel */
-#define MEDIA_API_VERSION KERNEL_VERSION(0, 1, 0)
+#define MEDIA_API_VERSION ((0 << 16) | (1 << 8) | 0)
#endif
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index b5c2fdcf23fd..a506216591d6 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -136,6 +136,7 @@
#define CTL1000_ENABLE_MASTER 0x1000
/* 1000BASE-T Status register */
+#define LPA_1000MSFAIL 0x8000 /* Master/Slave resolution failure */
#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h
index 10f9ff9426a2..5d37a9ccce63 100644
--- a/include/uapi/linux/mroute.h
+++ b/include/uapi/linux/mroute.h
@@ -120,6 +120,7 @@ enum {
IPMRA_TABLE_MROUTE_DO_ASSERT,
IPMRA_TABLE_MROUTE_DO_PIM,
IPMRA_TABLE_VIFS,
+ IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
__IPMRA_TABLE_MAX
};
#define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1)
@@ -173,5 +174,6 @@ enum {
#define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */
#define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */
#define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
+#define IGMPMSG_WRVIFWHOLE 4 /* For PIM Register and assert processing */
#endif /* _UAPI__LINUX_MROUTE_H */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 4fe104b2411f..97ff3c17ec4d 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -141,4 +141,22 @@ struct scm_ts_pktinfo {
__u32 reserved[2];
};
+/*
+ * SO_TXTIME gets a struct sock_txtime with flags being an integer bit
+ * field comprised of these values.
+ */
+enum txtime_flags {
+ SOF_TXTIME_DEADLINE_MODE = (1 << 0),
+ SOF_TXTIME_REPORT_ERRORS = (1 << 1),
+
+ SOF_TXTIME_FLAGS_LAST = SOF_TXTIME_REPORT_ERRORS,
+ SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_FLAGS_LAST - 1) |
+ SOF_TXTIME_FLAGS_LAST
+};
+
+struct sock_txtime {
+ clockid_t clockid; /* reference clockid */
+ __u32 flags; /* as defined by enum txtime_flags */
+};
+
#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index c84fcdfca862..fac4edd55379 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -18,6 +18,7 @@ enum {
NETCONFA_PROXY_NEIGH,
NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
NETCONFA_INPUT,
+ NETCONFA_BC_FORWARDING,
__NETCONFA_MAX
};
#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 89438e68dc03..e23290ffdc77 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -8,6 +8,7 @@
#define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_OBJ_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_USERDATA_MAXLEN 256
+#define NFT_OSF_MAXGENRELEN 16
/**
* enum nft_registers - nf_tables registers
@@ -921,10 +922,12 @@ enum nft_socket_attributes {
/*
* enum nft_socket_keys - nf_tables socket expression keys
*
- * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option_
+ * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option
+ * @NFT_SOCKET_MARK: Value of the socket mark
*/
enum nft_socket_keys {
NFT_SOCKET_TRANSPARENT,
+ NFT_SOCKET_MARK,
__NFT_SOCKET_MAX
};
#define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1)
@@ -955,6 +958,7 @@ enum nft_socket_keys {
* @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address)
* @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address)
* @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address)
+ * @NFT_CT_TIMEOUT: connection tracking timeout policy assigned to conntrack
*/
enum nft_ct_keys {
NFT_CT_STATE,
@@ -980,6 +984,7 @@ enum nft_ct_keys {
NFT_CT_DST_IP,
NFT_CT_SRC_IP6,
NFT_CT_DST_IP6,
+ NFT_CT_TIMEOUT,
__NFT_CT_MAX
};
#define NFT_CT_MAX (__NFT_CT_MAX - 1)
@@ -1251,6 +1256,22 @@ enum nft_nat_attributes {
#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1)
/**
+ * enum nft_tproxy_attributes - nf_tables tproxy expression netlink attributes
+ *
+ * NFTA_TPROXY_FAMILY: Target address family (NLA_U32: nft_registers)
+ * NFTA_TPROXY_REG_ADDR: Target address register (NLA_U32: nft_registers)
+ * NFTA_TPROXY_REG_PORT: Target port register (NLA_U32: nft_registers)
+ */
+enum nft_tproxy_attributes {
+ NFTA_TPROXY_UNSPEC,
+ NFTA_TPROXY_FAMILY,
+ NFTA_TPROXY_REG_ADDR,
+ NFTA_TPROXY_REG_PORT,
+ __NFTA_TPROXY_MAX
+};
+#define NFTA_TPROXY_MAX (__NFTA_TPROXY_MAX - 1)
+
+/**
* enum nft_masq_attributes - nf_tables masquerade expression attributes
*
* @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
@@ -1392,13 +1413,24 @@ enum nft_ct_helper_attributes {
};
#define NFTA_CT_HELPER_MAX (__NFTA_CT_HELPER_MAX - 1)
+enum nft_ct_timeout_timeout_attributes {
+ NFTA_CT_TIMEOUT_UNSPEC,
+ NFTA_CT_TIMEOUT_L3PROTO,
+ NFTA_CT_TIMEOUT_L4PROTO,
+ NFTA_CT_TIMEOUT_DATA,
+ __NFTA_CT_TIMEOUT_MAX,
+};
+#define NFTA_CT_TIMEOUT_MAX (__NFTA_CT_TIMEOUT_MAX - 1)
+
#define NFT_OBJECT_UNSPEC 0
#define NFT_OBJECT_COUNTER 1
#define NFT_OBJECT_QUOTA 2
#define NFT_OBJECT_CT_HELPER 3
#define NFT_OBJECT_LIMIT 4
#define NFT_OBJECT_CONNLIMIT 5
-#define __NFT_OBJECT_MAX 6
+#define NFT_OBJECT_TUNNEL 6
+#define NFT_OBJECT_CT_TIMEOUT 7
+#define __NFT_OBJECT_MAX 8
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**
@@ -1461,6 +1493,13 @@ enum nft_flowtable_hook_attributes {
};
#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1)
+enum nft_osf_attributes {
+ NFTA_OSF_UNSPEC,
+ NFTA_OSF_DREG,
+ __NFTA_OSF_MAX,
+};
+#define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1)
+
/**
* enum nft_device_attributes - nf_tables device netlink attributes
*
@@ -1555,4 +1594,85 @@ enum nft_ng_types {
};
#define NFT_NG_MAX (__NFT_NG_MAX - 1)
+enum nft_tunnel_key_ip_attributes {
+ NFTA_TUNNEL_KEY_IP_UNSPEC,
+ NFTA_TUNNEL_KEY_IP_SRC,
+ NFTA_TUNNEL_KEY_IP_DST,
+ __NFTA_TUNNEL_KEY_IP_MAX
+};
+#define NFTA_TUNNEL_KEY_IP_MAX (__NFTA_TUNNEL_KEY_IP_MAX - 1)
+
+enum nft_tunnel_ip6_attributes {
+ NFTA_TUNNEL_KEY_IP6_UNSPEC,
+ NFTA_TUNNEL_KEY_IP6_SRC,
+ NFTA_TUNNEL_KEY_IP6_DST,
+ NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
+ __NFTA_TUNNEL_KEY_IP6_MAX
+};
+#define NFTA_TUNNEL_KEY_IP6_MAX (__NFTA_TUNNEL_KEY_IP6_MAX - 1)
+
+enum nft_tunnel_opts_attributes {
+ NFTA_TUNNEL_KEY_OPTS_UNSPEC,
+ NFTA_TUNNEL_KEY_OPTS_VXLAN,
+ NFTA_TUNNEL_KEY_OPTS_ERSPAN,
+ __NFTA_TUNNEL_KEY_OPTS_MAX
+};
+#define NFTA_TUNNEL_KEY_OPTS_MAX (__NFTA_TUNNEL_KEY_OPTS_MAX - 1)
+
+enum nft_tunnel_opts_vxlan_attributes {
+ NFTA_TUNNEL_KEY_VXLAN_UNSPEC,
+ NFTA_TUNNEL_KEY_VXLAN_GBP,
+ __NFTA_TUNNEL_KEY_VXLAN_MAX
+};
+#define NFTA_TUNNEL_KEY_VXLAN_MAX (__NFTA_TUNNEL_KEY_VXLAN_MAX - 1)
+
+enum nft_tunnel_opts_erspan_attributes {
+ NFTA_TUNNEL_KEY_ERSPAN_UNSPEC,
+ NFTA_TUNNEL_KEY_ERSPAN_VERSION,
+ NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
+ NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
+ NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
+ __NFTA_TUNNEL_KEY_ERSPAN_MAX
+};
+#define NFTA_TUNNEL_KEY_ERSPAN_MAX (__NFTA_TUNNEL_KEY_ERSPAN_MAX - 1)
+
+enum nft_tunnel_flags {
+ NFT_TUNNEL_F_ZERO_CSUM_TX = (1 << 0),
+ NFT_TUNNEL_F_DONT_FRAGMENT = (1 << 1),
+ NFT_TUNNEL_F_SEQ_NUMBER = (1 << 2),
+};
+#define NFT_TUNNEL_F_MASK (NFT_TUNNEL_F_ZERO_CSUM_TX | \
+ NFT_TUNNEL_F_DONT_FRAGMENT | \
+ NFT_TUNNEL_F_SEQ_NUMBER)
+
+enum nft_tunnel_key_attributes {
+ NFTA_TUNNEL_KEY_UNSPEC,
+ NFTA_TUNNEL_KEY_ID,
+ NFTA_TUNNEL_KEY_IP,
+ NFTA_TUNNEL_KEY_IP6,
+ NFTA_TUNNEL_KEY_FLAGS,
+ NFTA_TUNNEL_KEY_TOS,
+ NFTA_TUNNEL_KEY_TTL,
+ NFTA_TUNNEL_KEY_SPORT,
+ NFTA_TUNNEL_KEY_DPORT,
+ NFTA_TUNNEL_KEY_OPTS,
+ __NFTA_TUNNEL_KEY_MAX
+};
+#define NFTA_TUNNEL_KEY_MAX (__NFTA_TUNNEL_KEY_MAX - 1)
+
+enum nft_tunnel_keys {
+ NFT_TUNNEL_PATH,
+ NFT_TUNNEL_ID,
+ __NFT_TUNNEL_MAX
+};
+#define NFT_TUNNEL_MAX (__NFT_TUNNEL_MAX - 1)
+
+enum nft_tunnel_attributes {
+ NFTA_TUNNEL_UNSPEC,
+ NFTA_TUNNEL_KEY,
+ NFTA_TUNNEL_DREG,
+ __NFTA_TUNNEL_MAX
+};
+#define NFTA_TUNNEL_MAX (__NFTA_TUNNEL_MAX - 1)
+
#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nfnetlink_osf.h
index 8f2f2f403183..76a3527df5dd 100644
--- a/include/uapi/linux/netfilter/nf_osf.h
+++ b/include/uapi/linux/netfilter/nfnetlink_osf.h
@@ -16,9 +16,14 @@
#define NF_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */
+/* Check if ip TTL is less than fingerprint one */
+#define NF_OSF_TTL_LESS 1
+
/* Do not compare ip and fingerprint TTL at all */
#define NF_OSF_TTL_NOCHECK 2
+#define NF_OSF_FLAGMASK (NF_OSF_GENRE | NF_OSF_TTL | \
+ NF_OSF_LOG | NF_OSF_INVERT)
/* Wildcard MSS (kind of).
* It is used to implement a state machine for the different wildcard values
* of the MSS and window sizes.
@@ -83,4 +88,31 @@ enum iana_options {
OSFOPT_EMPTY = 255,
};
+/* Initial window size option state machine: multiple of mss, mtu or
+ * plain numeric value. Can also be made as plain numeric value which
+ * is not a multiple of specified value.
+ */
+enum nf_osf_window_size_options {
+ OSF_WSS_PLAIN = 0,
+ OSF_WSS_MSS,
+ OSF_WSS_MTU,
+ OSF_WSS_MODULO,
+ OSF_WSS_MAX,
+};
+
+enum nf_osf_attr_type {
+ OSF_ATTR_UNSPEC,
+ OSF_ATTR_FINGER,
+ OSF_ATTR_MAX,
+};
+
+/*
+ * Add/remove fingerprint from the kernel.
+ */
+enum nf_osf_msg_types {
+ OSF_MSG_ADD,
+ OSF_MSG_REMOVE,
+ OSF_MSG_MAX,
+};
+
#endif /* _NF_OSF_H */
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index 72956eceeb09..24102b5286ec 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -23,7 +23,7 @@
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/tcp.h>
-#include <linux/netfilter/nf_osf.h>
+#include <linux/netfilter/nfnetlink_osf.h>
#define XT_OSF_GENRE NF_OSF_GENRE
#define XT_OSF_INVERT NF_OSF_INVERT
@@ -37,8 +37,7 @@
#define XT_OSF_TTL_TRUE NF_OSF_TTL_TRUE
#define XT_OSF_TTL_NOCHECK NF_OSF_TTL_NOCHECK
-
-#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */
+#define XT_OSF_TTL_LESS NF_OSF_TTL_LESS
#define xt_osf_wc nf_osf_wc
#define xt_osf_opt nf_osf_opt
@@ -47,19 +46,8 @@
#define xt_osf_finger nf_osf_finger
#define xt_osf_nlmsg nf_osf_nlmsg
-/*
- * Add/remove fingerprint from the kernel.
- */
-enum xt_osf_msg_types {
- OSF_MSG_ADD,
- OSF_MSG_REMOVE,
- OSF_MSG_MAX,
-};
-
-enum xt_osf_attr_type {
- OSF_ATTR_UNSPEC,
- OSF_ATTR_FINGER,
- OSF_ATTR_MAX,
-};
+#define xt_osf_window_size_options nf_osf_window_size_options
+#define xt_osf_attr_type nf_osf_attr_type
+#define xt_osf_msg_types nf_osf_msg_types
#endif /* _XT_OSF_H */
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 12fb77633f83..156ccd089df1 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -26,4 +26,15 @@
#define NF_BR_BROUTING 5
#define NF_BR_NUMHOOKS 6
+enum nf_br_hook_priorities {
+ NF_BR_PRI_FIRST = INT_MIN,
+ NF_BR_PRI_NAT_DST_BRIDGED = -300,
+ NF_BR_PRI_FILTER_BRIDGED = -200,
+ NF_BR_PRI_BRNF = 0,
+ NF_BR_PRI_NAT_DST_OTHER = 100,
+ NF_BR_PRI_FILTER_OTHER = 200,
+ NF_BR_PRI_NAT_SRC = 300,
+ NF_BR_PRI_LAST = INT_MAX,
+};
+
#endif /* _UAPI__LINUX_BRIDGE_NETFILTER_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 27e4e441caac..7acc16f34942 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2237,6 +2237,9 @@ enum nl80211_commands {
* enforced.
* @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes
* a flow is assigned on each round of the DRR scheduler.
+ * @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from
+ * association request when used with NL80211_CMD_NEW_STATION). Can be set
+ * only if %NL80211_STA_FLAG_WME is set.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2677,6 +2680,8 @@ enum nl80211_attrs {
NL80211_ATTR_TXQ_MEMORY_LIMIT,
NL80211_ATTR_TXQ_QUANTUM,
+ NL80211_ATTR_HE_CAPABILITY,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2726,7 +2731,8 @@ enum nl80211_attrs {
#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
#define NL80211_HT_CAPABILITY_LEN 26
#define NL80211_VHT_CAPABILITY_LEN 12
-
+#define NL80211_HE_MIN_CAPABILITY_LEN 16
+#define NL80211_HE_MAX_CAPABILITY_LEN 51
#define NL80211_MAX_NR_CIPHER_SUITES 5
#define NL80211_MAX_NR_AKM_SUITES 2
@@ -2854,6 +2860,38 @@ struct nl80211_sta_flag_update {
} __attribute__((packed));
/**
+ * enum nl80211_he_gi - HE guard interval
+ * @NL80211_RATE_INFO_HE_GI_0_8: 0.8 usec
+ * @NL80211_RATE_INFO_HE_GI_1_6: 1.6 usec
+ * @NL80211_RATE_INFO_HE_GI_3_2: 3.2 usec
+ */
+enum nl80211_he_gi {
+ NL80211_RATE_INFO_HE_GI_0_8,
+ NL80211_RATE_INFO_HE_GI_1_6,
+ NL80211_RATE_INFO_HE_GI_3_2,
+};
+
+/**
+ * enum nl80211_he_ru_alloc - HE RU allocation values
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_106: 106-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_242: 242-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_484: 484-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_996: 996-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_2x996: 2x996-tone RU allocation
+ */
+enum nl80211_he_ru_alloc {
+ NL80211_RATE_INFO_HE_RU_ALLOC_26,
+ NL80211_RATE_INFO_HE_RU_ALLOC_52,
+ NL80211_RATE_INFO_HE_RU_ALLOC_106,
+ NL80211_RATE_INFO_HE_RU_ALLOC_242,
+ NL80211_RATE_INFO_HE_RU_ALLOC_484,
+ NL80211_RATE_INFO_HE_RU_ALLOC_996,
+ NL80211_RATE_INFO_HE_RU_ALLOC_2x996,
+};
+
+/**
* enum nl80211_rate_info - bitrate information
*
* These attribute types are used with %NL80211_STA_INFO_TXRATE
@@ -2885,6 +2923,13 @@ struct nl80211_sta_flag_update {
* @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is
* a legacy rate and will be reported as the actual bitrate, i.e.
* a quarter of the base (20 MHz) rate
+ * @NL80211_RATE_INFO_HE_MCS: HE MCS index (u8, 0-11)
+ * @NL80211_RATE_INFO_HE_NSS: HE NSS value (u8, 1-8)
+ * @NL80211_RATE_INFO_HE_GI: HE guard interval identifier
+ * (u8, see &enum nl80211_he_gi)
+ * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1)
+ * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then
+ * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc)
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -2901,6 +2946,11 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_160_MHZ_WIDTH,
NL80211_RATE_INFO_10_MHZ_WIDTH,
NL80211_RATE_INFO_5_MHZ_WIDTH,
+ NL80211_RATE_INFO_HE_MCS,
+ NL80211_RATE_INFO_HE_NSS,
+ NL80211_RATE_INFO_HE_GI,
+ NL80211_RATE_INFO_HE_DCM,
+ NL80211_RATE_INFO_HE_RU_ALLOC,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -3167,6 +3217,38 @@ enum nl80211_mpath_info {
};
/**
+ * enum nl80211_band_iftype_attr - Interface type data attributes
+ *
+ * @__NL80211_BAND_IFTYPE_ATTR_INVALID: attribute number 0 is reserved
+ * @NL80211_BAND_IFTYPE_ATTR_IFTYPES: nested attribute containing a flag attribute
+ * for each interface type that supports the band data
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC: HE MAC capabilities as in HE
+ * capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY: HE PHY capabilities as in HE
+ * capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET: HE supported NSS/MCS as in HE
+ * capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as
+ * defined in HE capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently
+ * defined
+ * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_band_iftype_attr {
+ __NL80211_BAND_IFTYPE_ATTR_INVALID,
+
+ NL80211_BAND_IFTYPE_ATTR_IFTYPES,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
+
+ /* keep last */
+ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
+ NL80211_BAND_IFTYPE_ATTR_MAX = __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST - 1
+};
+
+/**
* enum nl80211_band_attr - band attributes
* @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved
* @NL80211_BAND_ATTR_FREQS: supported frequencies in this band,
@@ -3181,6 +3263,8 @@ enum nl80211_mpath_info {
* @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as
* defined in 802.11ac
* @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
+ * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using
+ * attributes from &enum nl80211_band_iftype_attr
* @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
* @__NL80211_BAND_ATTR_AFTER_LAST: internal use
*/
@@ -3196,6 +3280,7 @@ enum nl80211_band_attr {
NL80211_BAND_ATTR_VHT_MCS_SET,
NL80211_BAND_ATTR_VHT_CAPA,
+ NL80211_BAND_ATTR_IFTYPE_DATA,
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
@@ -5133,6 +5218,11 @@ enum nl80211_feature_flags {
* support to nl80211.
* @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
* TXQs.
+ * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the
+ * SN in probe request frames if requested by %NL80211_SCAN_FLAG_RANDOM_SN.
+ * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data
+ * except for supported rates from the probe request content if requested
+ * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -5167,6 +5257,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
NL80211_EXT_FEATURE_TXQS,
+ NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
+ NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5272,6 +5364,12 @@ enum nl80211_timeout_reason {
* possible scan results. This flag hints the driver to use the best
* possible scan configuration to improve the accuracy in scanning.
* Latency and power use may get impacted with this flag.
+ * @NL80211_SCAN_FLAG_RANDOM_SN: randomize the sequence number in probe
+ * request frames from this scan to avoid correlation/tracking being
+ * possible.
+ * @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to
+ * only have supported rates and no additional capabilities (unless
+ * added by userspace explicitly.)
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
@@ -5285,6 +5383,8 @@ enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_SPAN = 1<<8,
NL80211_SCAN_FLAG_LOW_POWER = 1<<9,
NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10,
+ NL80211_SCAN_FLAG_RANDOM_SN = 1<<11,
+ NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12,
};
/**
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 863aabaa5cc9..dbe0cbe4f1b7 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -840,6 +840,8 @@ struct ovs_action_push_eth {
* @OVS_ACTION_ATTR_POP_NSH: pop the outermost NSH header off the packet.
* @OVS_ACTION_ATTR_METER: Run packet through a meter, which may drop the
* packet, or modify the packet (e.g., change the DSCP field).
+ * @OVS_ACTION_ATTR_CLONE: make a copy of the packet and execute a list of
+ * actions without affecting the original packet and key.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -873,6 +875,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_PUSH_NSH, /* Nested OVS_NSH_KEY_ATTR_*. */
OVS_ACTION_ATTR_POP_NSH, /* No argument. */
OVS_ACTION_ATTR_METER, /* u32 meter ID. */
+ OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 4da87e2ef8a8..ee556ccc93f4 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -636,6 +636,7 @@
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
+#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
@@ -960,8 +961,9 @@
#define PCI_REBAR_CTRL 8 /* control register */
#define PCI_REBAR_CTRL_BAR_IDX 0x00000007 /* BAR index */
#define PCI_REBAR_CTRL_NBAR_MASK 0x000000E0 /* # of resizable BARs */
-#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # of BARs */
+#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # of BARs */
#define PCI_REBAR_CTRL_BAR_SIZE 0x00001F00 /* BAR size */
+#define PCI_REBAR_CTRL_BAR_SHIFT 8 /* shift for BAR size */
/* Dynamic Power Allocation */
#define PCI_DPA_CAP 4 /* capability register */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index 953cf036cb26..cbf422e56696 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -16,5 +16,8 @@
#define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
#define PCITEST_READ _IOW('P', 0x5, unsigned long)
#define PCITEST_COPY _IOW('P', 0x6, unsigned long)
+#define PCITEST_MSIX _IOW('P', 0x7, int)
+#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
+#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
#endif /* __UAPI_LINUX_PCITEST_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 84e4c1d0f874..be382fb0592d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -45,6 +45,7 @@ enum {
* the skb and act like everything
* is alright.
*/
+#define TC_ACT_VALUE_MAX TC_ACT_TRAP
/* There is a special kind of actions called "extended actions",
* which need a value parameter. These have a local opcode located in
@@ -55,11 +56,12 @@ enum {
#define __TC_ACT_EXT_SHIFT 28
#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
-#define TC_ACT_EXT_CMP(combined, opcode) \
- (((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode)
+#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
+#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
#define TC_ACT_JUMP __TC_ACT_EXT(1)
#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
+#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN
/* Action type identifiers*/
enum {
@@ -469,12 +471,47 @@ enum {
TCA_FLOWER_KEY_IP_TTL, /* u8 */
TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */
+ TCA_FLOWER_KEY_CVLAN_ID, /* be16 */
+ TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
+
+ TCA_FLOWER_KEY_ENC_OPTS,
+ TCA_FLOWER_KEY_ENC_OPTS_MASK,
+
__TCA_FLOWER_MAX,
};
#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
enum {
+ TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_GENEVE_
+ * attributes
+ */
+ __TCA_FLOWER_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
+
+ __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
+
+enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..8975fd1a1421 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -124,6 +124,21 @@ struct tc_fifo_qopt {
__u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
};
+/* SKBPRIO section */
+
+/*
+ * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
+ * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
+ * to map one to one the DS field of IPV4 and IPV6 headers.
+ * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
+ */
+
+#define SKBPRIO_MAX_PRIORITY 64
+
+struct tc_skbprio_qopt {
+ __u32 limit; /* Queue length in packets. */
+};
+
/* PRIO section */
#define TCQ_PRIO_BANDS 16
@@ -539,6 +554,7 @@ enum {
TCA_NETEM_LATENCY64,
TCA_NETEM_JITTER64,
TCA_NETEM_SLOT,
+ TCA_NETEM_SLOT_DIST,
__TCA_NETEM_MAX,
};
@@ -581,6 +597,8 @@ struct tc_netem_slot {
__s64 max_delay;
__s32 max_packets;
__s32 max_bytes;
+ __s64 dist_delay; /* nsec */
+ __s64 dist_jitter; /* nsec */
};
enum {
@@ -934,4 +952,136 @@ enum {
#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
+
+/* ETF */
+struct tc_etf_qopt {
+ __s32 delta;
+ __s32 clockid;
+ __u32 flags;
+#define TC_ETF_DEADLINE_MODE_ON BIT(0)
+#define TC_ETF_OFFLOAD_ON BIT(1)
+};
+
+enum {
+ TCA_ETF_UNSPEC,
+ TCA_ETF_PARMS,
+ __TCA_ETF_MAX,
+};
+
+#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
+
+
+/* CAKE */
+enum {
+ TCA_CAKE_UNSPEC,
+ TCA_CAKE_PAD,
+ TCA_CAKE_BASE_RATE64,
+ TCA_CAKE_DIFFSERV_MODE,
+ TCA_CAKE_ATM,
+ TCA_CAKE_FLOW_MODE,
+ TCA_CAKE_OVERHEAD,
+ TCA_CAKE_RTT,
+ TCA_CAKE_TARGET,
+ TCA_CAKE_AUTORATE,
+ TCA_CAKE_MEMORY,
+ TCA_CAKE_NAT,
+ TCA_CAKE_RAW,
+ TCA_CAKE_WASH,
+ TCA_CAKE_MPU,
+ TCA_CAKE_INGRESS,
+ TCA_CAKE_ACK_FILTER,
+ TCA_CAKE_SPLIT_GSO,
+ __TCA_CAKE_MAX
+};
+#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
+
+enum {
+ __TCA_CAKE_STATS_INVALID,
+ TCA_CAKE_STATS_PAD,
+ TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
+ TCA_CAKE_STATS_MEMORY_LIMIT,
+ TCA_CAKE_STATS_MEMORY_USED,
+ TCA_CAKE_STATS_AVG_NETOFF,
+ TCA_CAKE_STATS_MIN_NETLEN,
+ TCA_CAKE_STATS_MAX_NETLEN,
+ TCA_CAKE_STATS_MIN_ADJLEN,
+ TCA_CAKE_STATS_MAX_ADJLEN,
+ TCA_CAKE_STATS_TIN_STATS,
+ TCA_CAKE_STATS_DEFICIT,
+ TCA_CAKE_STATS_COBALT_COUNT,
+ TCA_CAKE_STATS_DROPPING,
+ TCA_CAKE_STATS_DROP_NEXT_US,
+ TCA_CAKE_STATS_P_DROP,
+ TCA_CAKE_STATS_BLUE_TIMER_US,
+ __TCA_CAKE_STATS_MAX
+};
+#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
+
+enum {
+ __TCA_CAKE_TIN_STATS_INVALID,
+ TCA_CAKE_TIN_STATS_PAD,
+ TCA_CAKE_TIN_STATS_SENT_PACKETS,
+ TCA_CAKE_TIN_STATS_SENT_BYTES64,
+ TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
+ TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
+ TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
+ TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
+ TCA_CAKE_TIN_STATS_TARGET_US,
+ TCA_CAKE_TIN_STATS_INTERVAL_US,
+ TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
+ TCA_CAKE_TIN_STATS_WAY_MISSES,
+ TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
+ TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
+ TCA_CAKE_TIN_STATS_AVG_DELAY_US,
+ TCA_CAKE_TIN_STATS_BASE_DELAY_US,
+ TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
+ TCA_CAKE_TIN_STATS_BULK_FLOWS,
+ TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
+ TCA_CAKE_TIN_STATS_MAX_SKBLEN,
+ TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
+ __TCA_CAKE_TIN_STATS_MAX
+};
+#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
+#define TC_CAKE_MAX_TINS (8)
+
+enum {
+ CAKE_FLOW_NONE = 0,
+ CAKE_FLOW_SRC_IP,
+ CAKE_FLOW_DST_IP,
+ CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
+ CAKE_FLOW_FLOWS,
+ CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_MAX,
+};
+
+enum {
+ CAKE_DIFFSERV_DIFFSERV3 = 0,
+ CAKE_DIFFSERV_DIFFSERV4,
+ CAKE_DIFFSERV_DIFFSERV8,
+ CAKE_DIFFSERV_BESTEFFORT,
+ CAKE_DIFFSERV_PRECEDENCE,
+ CAKE_DIFFSERV_MAX
+};
+
+enum {
+ CAKE_ACK_NONE = 0,
+ CAKE_ACK_FILTER,
+ CAKE_ACK_AGGRESSIVE,
+ CAKE_ACK_MAX
+};
+
+enum {
+ CAKE_ATM_NONE = 0,
+ CAKE_ATM_ATM,
+ CAKE_ATM_PTM,
+ CAKE_ATM_MAX
+};
+
#endif
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index 784c2e3e572e..88b5f9990320 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -68,7 +68,7 @@ struct ppp_option_data {
struct pppol2tp_ioc_stats {
__u16 tunnel_id; /* redundant */
__u16 session_id; /* if zero, get tunnel stats */
- __u32 using_ipsec:1; /* valid only for session_id == 0 */
+ __u32 using_ipsec:1;
__aligned_u64 tx_packets;
__aligned_u64 tx_bytes;
__aligned_u64 tx_errors;
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 20c6bd0b0007..dc520e1a4123 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
/*
- * Copyright (c) 2008 Oracle. All rights reserved.
+ * Copyright (c) 2008, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -118,7 +118,17 @@
#define RDS_INFO_IB_CONNECTIONS 10008
#define RDS_INFO_CONNECTION_STATS 10009
#define RDS_INFO_IWARP_CONNECTIONS 10010
-#define RDS_INFO_LAST 10010
+
+/* PF_RDS6 options */
+#define RDS6_INFO_CONNECTIONS 10011
+#define RDS6_INFO_SEND_MESSAGES 10012
+#define RDS6_INFO_RETRANS_MESSAGES 10013
+#define RDS6_INFO_RECV_MESSAGES 10014
+#define RDS6_INFO_SOCKETS 10015
+#define RDS6_INFO_TCP_SOCKETS 10016
+#define RDS6_INFO_IB_CONNECTIONS 10017
+
+#define RDS_INFO_LAST 10017
struct rds_info_counter {
__u8 name[32];
@@ -140,6 +150,15 @@ struct rds_info_connection {
__u8 flags;
} __attribute__((packed));
+struct rds6_info_connection {
+ __u64 next_tx_seq;
+ __u64 next_rx_seq;
+ struct in6_addr laddr;
+ struct in6_addr faddr;
+ __u8 transport[TRANSNAMSIZ]; /* null term ascii */
+ __u8 flags;
+} __attribute__((packed));
+
#define RDS_INFO_MESSAGE_FLAG_ACK 0x01
#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02
@@ -153,6 +172,17 @@ struct rds_info_message {
__u8 flags;
} __attribute__((packed));
+struct rds6_info_message {
+ __u64 seq;
+ __u32 len;
+ struct in6_addr laddr;
+ struct in6_addr faddr;
+ __be16 lport;
+ __be16 fport;
+ __u8 flags;
+ __u8 tos;
+} __attribute__((packed));
+
struct rds_info_socket {
__u32 sndbuf;
__be32 bound_addr;
@@ -163,6 +193,16 @@ struct rds_info_socket {
__u64 inum;
} __attribute__((packed));
+struct rds6_info_socket {
+ __u32 sndbuf;
+ struct in6_addr bound_addr;
+ struct in6_addr connected_addr;
+ __be16 bound_port;
+ __be16 connected_port;
+ __u32 rcvbuf;
+ __u64 inum;
+} __attribute__((packed));
+
struct rds_info_tcp_socket {
__be32 local_addr;
__be16 local_port;
@@ -175,6 +215,18 @@ struct rds_info_tcp_socket {
__u32 last_seen_una;
} __attribute__((packed));
+struct rds6_info_tcp_socket {
+ struct in6_addr local_addr;
+ __be16 local_port;
+ struct in6_addr peer_addr;
+ __be16 peer_port;
+ __u64 hdr_rem;
+ __u64 data_rem;
+ __u32 last_sent_nxt;
+ __u32 last_expected_una;
+ __u32 last_seen_una;
+} __attribute__((packed));
+
#define RDS_IB_GID_LEN 16
struct rds_info_rdma_connection {
__be32 src_addr;
@@ -189,6 +241,19 @@ struct rds_info_rdma_connection {
__u32 rdma_mr_size;
};
+struct rds6_info_rdma_connection {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ __u8 src_gid[RDS_IB_GID_LEN];
+ __u8 dst_gid[RDS_IB_GID_LEN];
+
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 rdma_mr_max;
+ __u32 rdma_mr_size;
+};
+
/* RDS message Receive Path Latency points */
enum rds_message_rxpath_latency {
RDS_MSG_RX_HDR_TO_DGRAM_START = 0,
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 7d8502313c99..46399367627f 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -150,6 +150,13 @@ enum {
RTM_NEWCACHEREPORT = 96,
#define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT
+ RTM_NEWCHAIN = 100,
+#define RTM_NEWCHAIN RTM_NEWCHAIN
+ RTM_DELCHAIN,
+#define RTM_DELCHAIN RTM_DELCHAIN
+ RTM_GETCHAIN,
+#define RTM_GETCHAIN RTM_GETCHAIN
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index b64d583bf053..b479db5c71d9 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -100,6 +100,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_RECVNXTINFO 33
#define SCTP_DEFAULT_SNDINFO 34
#define SCTP_AUTH_DEACTIVATE_KEY 35
+#define SCTP_REUSE_PORT 36
/* Internal Socket Options. Some of the sctp library functions are
* implemented using these socket options.
@@ -762,6 +763,8 @@ enum sctp_spp_flags {
SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/
SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE,
SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */
+ SPP_IPV6_FLOWLABEL = 1<<8,
+ SPP_DSCP = 1<<9,
};
struct sctp_paddrparams {
@@ -772,6 +775,8 @@ struct sctp_paddrparams {
__u32 spp_pathmtu;
__u32 spp_sackdelay;
__u32 spp_flags;
+ __u32 spp_ipv6_flowlabel;
+ __u8 spp_dscp;
} __attribute__((packed, aligned(4)));
/*
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index 0ae5d4685ba3..ac9e8c96d9bd 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -20,7 +20,7 @@ struct smc_diag_req {
struct smc_diag_msg {
__u8 diag_family;
__u8 diag_state;
- __u8 diag_fallback;
+ __u8 diag_mode;
__u8 diag_shutdown;
struct inet_diag_sockid id;
@@ -28,6 +28,13 @@ struct smc_diag_msg {
__u64 diag_inode;
};
+/* Mode of a connection */
+enum {
+ SMC_DIAG_MODE_SMCR,
+ SMC_DIAG_MODE_FALLBACK_TCP,
+ SMC_DIAG_MODE_SMCD,
+};
+
/* Extensions */
enum {
@@ -35,6 +42,8 @@ enum {
SMC_DIAG_CONNINFO,
SMC_DIAG_LGRINFO,
SMC_DIAG_SHUTDOWN,
+ SMC_DIAG_DMBINFO,
+ SMC_DIAG_FALLBACK,
__SMC_DIAG_MAX,
};
@@ -83,4 +92,18 @@ struct smc_diag_lgrinfo {
struct smc_diag_linkinfo lnk[1];
__u8 role;
};
+
+struct smc_diag_fallback {
+ __u32 reason;
+ __u32 peer_diagnosis;
+};
+
+struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
+ __u32 linkid; /* Link identifier */
+ __u64 peer_gid; /* Peer GID */
+ __u64 my_gid; /* My GID */
+ __u64 token; /* Token of DMB */
+ __u64 peer_token; /* Token of remote DMBE */
+};
+
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 750d89120335..f80135e5feaa 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -56,6 +56,7 @@ enum
IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
IPSTATS_MIB_CEPKTS, /* InCEPkts */
+ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
__IPSTATS_MIB_MAX
};
@@ -279,6 +280,8 @@ enum
LINUX_MIB_TCPDELIVERED, /* TCPDelivered */
LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */
LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */
+ LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */
+ LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 6b58371b1f0d..d71013fffaf6 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -575,7 +575,8 @@ enum {
/* /proc/sys/net/ipv6/icmp */
enum {
- NET_IPV6_ICMP_RATELIMIT=1
+ NET_IPV6_ICMP_RATELIMIT = 1,
+ NET_IPV6_ICMP_ECHO_IGNORE_ALL = 2
};
/* /proc/sys/net/<protocol>/neigh/<dev> */
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index 162d1094c41c..24ec792dacc1 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -17,13 +17,15 @@ enum {
TCA_PEDIT_KEY_EX,
__TCA_PEDIT_MAX
};
+
#define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
-
+
enum {
TCA_PEDIT_KEY_EX_HTYPE = 1,
TCA_PEDIT_KEY_EX_CMD = 2,
__TCA_PEDIT_KEY_EX_MAX
};
+
#define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1)
/* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It
@@ -38,6 +40,7 @@ enum pedit_header_type {
TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,
__PEDIT_HDR_TYPE_MAX,
};
+
#define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1)
enum pedit_cmd {
@@ -45,6 +48,7 @@ enum pedit_cmd {
TCA_PEDIT_KEY_EX_CMD_ADD = 1,
__PEDIT_CMD_MAX,
};
+
#define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1)
struct tc_pedit_key {
@@ -55,13 +59,14 @@ struct tc_pedit_key {
__u32 offmask;
__u32 shift;
};
-
+
struct tc_pedit_sel {
tc_gen;
unsigned char nkeys;
unsigned char flags;
struct tc_pedit_key keys[0];
};
+
#define tc_pedit tc_pedit_sel
#endif
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index fbcfe27a4e6c..6de6071ebed6 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -30,6 +30,7 @@
#define SKBEDIT_F_MARK 0x4
#define SKBEDIT_F_PTYPE 0x8
#define SKBEDIT_F_MASK 0x10
+#define SKBEDIT_F_INHERITDSFIELD 0x20
struct tc_skbedit {
tc_gen;
@@ -45,6 +46,7 @@ enum {
TCA_SKBEDIT_PAD,
TCA_SKBEDIT_PTYPE,
TCA_SKBEDIT_MASK,
+ TCA_SKBEDIT_FLAGS,
__TCA_SKBEDIT_MAX
};
#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 72bbefe5d1d1..be384d63e1b5 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -36,9 +36,37 @@ enum {
TCA_TUNNEL_KEY_PAD,
TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */
TCA_TUNNEL_KEY_NO_CSUM, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPTS, /* Nested TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
+ TCA_TUNNEL_KEY_ENC_TOS, /* u8 */
+ TCA_TUNNEL_KEY_ENC_TTL, /* u8 */
__TCA_TUNNEL_KEY_MAX,
};
#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
+enum {
+ TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPTS_GENEVE, /* Nested
+ * TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
+ __TCA_TUNNEL_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPTS_MAX (__TCA_TUNNEL_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, /* be16 */
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
+
+ __TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \
+ (__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index e3f6ed8a7064..e02d31986ff9 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -235,6 +235,11 @@ struct tcp_info {
__u32 tcpi_delivered;
__u32 tcpi_delivered_ce;
+
+ __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */
+ __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */
+ __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */
+ __u32 tcpi_reord_seen; /* reordering events seen */
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
@@ -257,7 +262,10 @@ enum {
TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */
TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */
TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */
-
+ TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */
+ TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */
+ TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
+ TCP_NLA_REORD_SEEN, /* reordering events seen */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index fcf936656493..6b56a2208be7 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -49,6 +49,13 @@ struct __kernel_timespec {
};
#endif
+#ifndef __kernel_itimerspec
+struct __kernel_itimerspec {
+ struct __kernel_timespec it_interval; /* timer period */
+ struct __kernel_timespec it_value; /* timer expiration */
+};
+#endif
+
/*
* legacy timeval structure, only embedded in structures that
* traditionally used 'timeval' to pass time intervals (not absolute
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 85c11982c89b..0ebe02ef1a86 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -121,6 +121,7 @@ enum {
TIPC_NLA_SOCK_TIPC_STATE, /* u32 */
TIPC_NLA_SOCK_COOKIE, /* u64 */
TIPC_NLA_SOCK_PAD, /* flag */
+ TIPC_NLA_SOCK_GROUP, /* nest */
__TIPC_NLA_SOCK_MAX,
TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1
@@ -233,6 +234,19 @@ enum {
TIPC_NLA_MON_PEER_MAX = __TIPC_NLA_MON_PEER_MAX - 1
};
+/* Nest, socket group info */
+enum {
+ TIPC_NLA_SOCK_GROUP_ID, /* u32 */
+ TIPC_NLA_SOCK_GROUP_OPEN, /* flag */
+ TIPC_NLA_SOCK_GROUP_NODE_SCOPE, /* flag */
+ TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE, /* flag */
+ TIPC_NLA_SOCK_GROUP_INSTANCE, /* u32 */
+ TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT, /* u32 */
+
+ __TIPC_NLA_SOCK_GROUP_MAX,
+ TIPC_NLA_SOCK_GROUP_MAX = __TIPC_NLA_SOCK_GROUP_MAX - 1
+};
+
/* Nest, connection info */
enum {
TIPC_NLA_CON_UNSPEC,
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 74e520fb944f..ddc5396800aa 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -390,33 +390,64 @@ static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_
static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc,
int protocol)
{
- return (protocol == UAC_VERSION_1) ?
- desc->baSourceID[desc->bNrInPins + 4] :
- 2; /* in UAC2, this value is constant */
+ switch (protocol) {
+ case UAC_VERSION_1:
+ return desc->baSourceID[desc->bNrInPins + 4];
+ case UAC_VERSION_2:
+ return 2; /* in UAC2, this value is constant */
+ case UAC_VERSION_3:
+ return 4; /* in UAC3, this value is constant */
+ default:
+ return 1;
+ }
}
static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
int protocol)
{
- return (protocol == UAC_VERSION_1) ?
- &desc->baSourceID[desc->bNrInPins + 5] :
- &desc->baSourceID[desc->bNrInPins + 6];
+ switch (protocol) {
+ case UAC_VERSION_1:
+ return &desc->baSourceID[desc->bNrInPins + 5];
+ case UAC_VERSION_2:
+ return &desc->baSourceID[desc->bNrInPins + 6];
+ case UAC_VERSION_3:
+ return &desc->baSourceID[desc->bNrInPins + 2];
+ default:
+ return NULL;
+ }
}
static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
int protocol)
{
__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
- return *(uac_processing_unit_bmControls(desc, protocol)
- + control_size);
+
+ switch (protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
+ return *(uac_processing_unit_bmControls(desc, protocol)
+ + control_size);
+ case UAC_VERSION_3:
+ return 0; /* UAC3 does not have this field */
+ }
}
static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
int protocol)
{
__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
- return uac_processing_unit_bmControls(desc, protocol)
+
+ switch (protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
+ return uac_processing_unit_bmControls(desc, protocol)
+ control_size + 1;
+ case UAC_VERSION_3:
+ return uac_processing_unit_bmControls(desc, protocol)
+ + control_size;
+ }
}
/* 4.5.2 Class-Specific AS Interface Descriptor */
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index 020714d2c5bd..f80f05b3c423 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -28,6 +28,8 @@
#define UVC_CTRL_FLAG_RESTORE (1 << 6)
/* Control can be updated by the camera. */
#define UVC_CTRL_FLAG_AUTO_UPDATE (1 << 7)
+/* Control supports asynchronous reporting */
+#define UVC_CTRL_FLAG_ASYNCHRONOUS (1 << 8)
#define UVC_CTRL_FLAG_GET_RANGE \
(UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_MIN | \
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 8d473c979b61..e4ee10ee917d 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -188,7 +188,7 @@ enum v4l2_colorfx {
/* The base for the imx driver controls.
* We reserve 16 controls for this driver. */
-#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x1090)
+#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x10b0)
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
@@ -587,7 +587,23 @@ enum v4l2_vp8_golden_frame_sel {
#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_MPEG_BASE+508)
#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_MPEG_BASE+509)
#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510)
-#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511)
+
+#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_MPEG_BASE+511)
+enum v4l2_mpeg_video_vp8_profile {
+ V4L2_MPEG_VIDEO_VP8_PROFILE_0 = 0,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_1 = 1,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_2 = 2,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3 = 3,
+};
+/* Deprecated alias for compatibility reasons. */
+#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE V4L2_CID_MPEG_VIDEO_VP8_PROFILE
+#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_MPEG_BASE+512)
+enum v4l2_mpeg_video_vp9_profile {
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0 = 0,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_1 = 1,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_2 = 2,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_3 = 3,
+};
/* CIDs for HEVC encoding. */
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index c95a53e6743c..03970ce30741 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -170,8 +170,12 @@ struct v4l2_subdev_selection {
#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection)
#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection)
/* The following ioctls are identical to the ioctls in videodev2.h */
+#define VIDIOC_SUBDEV_G_STD _IOR('V', 23, v4l2_std_id)
+#define VIDIOC_SUBDEV_S_STD _IOW('V', 24, v4l2_std_id)
+#define VIDIOC_SUBDEV_ENUMSTD _IOWR('V', 25, struct v4l2_standard)
#define VIDIOC_SUBDEV_G_EDID _IOWR('V', 40, struct v4l2_edid)
#define VIDIOC_SUBDEV_S_EDID _IOWR('V', 41, struct v4l2_edid)
+#define VIDIOC_SUBDEV_QUERYSTD _IOR('V', 63, v4l2_std_id)
#define VIDIOC_SUBDEV_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index c51f8e5cc608..b1e22c40c4b6 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -65,6 +65,7 @@ struct vhost_iotlb_msg {
};
#define VHOST_IOTLB_MSG 0x1
+#define VHOST_IOTLB_MSG_V2 0x2
struct vhost_msg {
int type;
@@ -74,6 +75,15 @@ struct vhost_msg {
};
};
+struct vhost_msg_v2 {
+ __u32 type;
+ __u32 reserved;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
+
struct vhost_memory_region {
__u64 guest_phys_addr;
__u64 memory_size; /* bytes */
@@ -160,6 +170,14 @@ struct vhost_memory {
#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
struct vhost_vring_state)
+/* Set or get vhost backend capability */
+
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
+
/* VHOST_NET specific defines */
/* Attach virtio net ring to a raw socket, or tap device.
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 600877be5c22..5d1a3685bea9 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -522,6 +522,7 @@ struct v4l2_pix_format {
/* Grey bit-packed formats */
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
+#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
@@ -609,6 +610,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C')
+ /* 14bit raw bayer packed, 7 bytes for every 4 pixels */
+#define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E')
+#define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E')
+#define V4L2_PIX_FMT_SGRBG14P v4l2_fourcc('p', 'g', 'E', 'E')
+#define V4L2_PIX_FMT_SRGGB14P v4l2_fourcc('p', 'R', 'E', 'E')
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */
@@ -636,6 +642,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
+#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -2310,7 +2317,6 @@ struct v4l2_create_buffers {
*
*/
#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability)
-#define VIDIOC_RESERVED _IO('V', 1)
#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc)
#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format)
#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format)
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index e3af2859188b..5f3b9fec7b5f 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -305,9 +305,12 @@ enum xfrm_attr_type_t {
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
- XFRMA_OUTPUT_MARK, /* __u32 */
+ XFRMA_SET_MARK, /* __u32 */
+ XFRMA_SET_MARK_MASK, /* __u32 */
+ XFRMA_IF_ID, /* __u32 */
__XFRMA_MAX
+#define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */
#define XFRMA_MAX (__XFRMA_MAX - 1)
};
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 6d1163456c03..fe4423e518c6 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -5,6 +5,7 @@
* Interface to /dev/xen/gntdev.
*
* Copyright (c) 2007, D G Murray
+ * Copyright (c) 2018, Oleksandr Andrushchenko, EPAM Systems Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
@@ -200,4 +201,109 @@ struct ioctl_gntdev_grant_copy {
/* Send an interrupt on the indicated event channel */
#define UNMAP_NOTIFY_SEND_EVENT 0x2
+/*
+ * Flags to be used while requesting memory mapping's backing storage
+ * to be allocated with DMA API.
+ */
+
+/*
+ * The buffer is backed with memory allocated with dma_alloc_wc.
+ */
+#define GNTDEV_DMA_FLAG_WC (1 << 0)
+
+/*
+ * The buffer is backed with memory allocated with dma_alloc_coherent.
+ */
+#define GNTDEV_DMA_FLAG_COHERENT (1 << 1)
+
+/*
+ * Create a dma-buf [1] from grant references @refs of count @count provided
+ * by the foreign domain @domid with flags @flags.
+ *
+ * By default dma-buf is backed by system memory pages, but by providing
+ * one of the GNTDEV_DMA_FLAG_XXX flags it can also be created as
+ * a DMA write-combine or coherent buffer, e.g. allocated with dma_alloc_wc/
+ * dma_alloc_coherent.
+ *
+ * Returns 0 if dma-buf was successfully created and the corresponding
+ * dma-buf's file descriptor is returned in @fd.
+ *
+ * [1] Documentation/driver-api/dma-buf.rst
+ */
+
+#define IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS \
+ _IOC(_IOC_NONE, 'G', 9, \
+ sizeof(struct ioctl_gntdev_dmabuf_exp_from_refs))
+struct ioctl_gntdev_dmabuf_exp_from_refs {
+ /* IN parameters. */
+ /* Specific options for this dma-buf: see GNTDEV_DMA_FLAG_XXX. */
+ __u32 flags;
+ /* Number of grant references in @refs array. */
+ __u32 count;
+ /* OUT parameters. */
+ /* File descriptor of the dma-buf. */
+ __u32 fd;
+ /* The domain ID of the grant references to be mapped. */
+ __u32 domid;
+ /* Variable IN parameter. */
+ /* Array of grant references of size @count. */
+ __u32 refs[1];
+};
+
+/*
+ * This will block until the dma-buf with the file descriptor @fd is
+ * released. This is only valid for buffers created with
+ * IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS.
+ *
+ * If within @wait_to_ms milliseconds the buffer is not released
+ * then -ETIMEDOUT error is returned.
+ * If the buffer with the file descriptor @fd does not exist or has already
+ * been released, then -ENOENT is returned. For valid file descriptors
+ * this must not be treated as error.
+ */
+#define IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED \
+ _IOC(_IOC_NONE, 'G', 10, \
+ sizeof(struct ioctl_gntdev_dmabuf_exp_wait_released))
+struct ioctl_gntdev_dmabuf_exp_wait_released {
+ /* IN parameters */
+ __u32 fd;
+ __u32 wait_to_ms;
+};
+
+/*
+ * Import a dma-buf with file descriptor @fd and export granted references
+ * to the pages of that dma-buf into array @refs of size @count.
+ */
+#define IOCTL_GNTDEV_DMABUF_IMP_TO_REFS \
+ _IOC(_IOC_NONE, 'G', 11, \
+ sizeof(struct ioctl_gntdev_dmabuf_imp_to_refs))
+struct ioctl_gntdev_dmabuf_imp_to_refs {
+ /* IN parameters. */
+ /* File descriptor of the dma-buf. */
+ __u32 fd;
+ /* Number of grant references in @refs array. */
+ __u32 count;
+ /* The domain ID for which references to be granted. */
+ __u32 domid;
+ /* Reserved - must be zero. */
+ __u32 reserved;
+ /* OUT parameters. */
+ /* Array of grant references of size @count. */
+ __u32 refs[1];
+};
+
+/*
+ * This will close all references to the imported buffer with file descriptor
+ * @fd, so it can be released by the owner. This is only valid for buffers
+ * created with IOCTL_GNTDEV_DMABUF_IMP_TO_REFS.
+ */
+#define IOCTL_GNTDEV_DMABUF_IMP_RELEASE \
+ _IOC(_IOC_NONE, 'G', 12, \
+ sizeof(struct ioctl_gntdev_dmabuf_imp_release))
+struct ioctl_gntdev_dmabuf_imp_release {
+ /* IN parameters */
+ __u32 fd;
+ __u32 reserved;
+};
+
#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h
index 19aa65a35546..49a53ef8da96 100644
--- a/include/video/mipi_display.h
+++ b/include/video/mipi_display.h
@@ -38,6 +38,9 @@ enum {
MIPI_DSI_DCS_READ = 0x06,
+ MIPI_DSI_DCS_COMPRESSION_MODE = 0x07,
+ MIPI_DSI_PPS_LONG_WRITE = 0x0A,
+
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37,
MIPI_DSI_END_OF_TRANSMISSION = 0x08,
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 2e37741f6b8d..9bc5bc07d4d3 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -198,6 +198,27 @@ void gnttab_free_auto_xlat_frames(void);
int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages);
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+struct gnttab_dma_alloc_args {
+ /* Device for which DMA memory will be/was allocated. */
+ struct device *dev;
+ /* If set then DMA buffer is coherent and write-combine otherwise. */
+ bool coherent;
+
+ int nr_pages;
+ struct page **pages;
+ xen_pfn_t *frames;
+ void *vaddr;
+ dma_addr_t dev_bus_addr;
+};
+
+int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
+int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
+#endif
+
+int gnttab_pages_set_private(int nr_pages, struct page **pages);
+void gnttab_pages_clear_private(int nr_pages, struct page **pages);
+
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h
new file mode 100644
index 000000000000..80b52b4945e9
--- /dev/null
+++ b/include/xen/mem-reservation.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Xen memory reservation utilities.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ * Copyright (c) 2010 Daniel Kiper
+ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#ifndef _XENMEM_RESERVATION_H
+#define _XENMEM_RESERVATION_H
+
+#include <linux/highmem.h>
+
+#include <xen/page.h>
+
+static inline void xenmem_reservation_scrub_page(struct page *page)
+{
+#ifdef CONFIG_XEN_SCRUB_PAGES
+ clear_highpage(page);
+#endif
+}
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+void __xenmem_reservation_va_mapping_update(unsigned long count,
+ struct page **pages,
+ xen_pfn_t *frames);
+
+void __xenmem_reservation_va_mapping_reset(unsigned long count,
+ struct page **pages);
+#endif
+
+static inline void xenmem_reservation_va_mapping_update(unsigned long count,
+ struct page **pages,
+ xen_pfn_t *frames)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ __xenmem_reservation_va_mapping_update(count, pages, frames);
+#endif
+}
+
+static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
+ struct page **pages)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ __xenmem_reservation_va_mapping_reset(count, pages);
+#endif
+}
+
+int xenmem_reservation_increase(int count, xen_pfn_t *frames);
+
+int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
+
+#endif
diff --git a/init/Kconfig b/init/Kconfig
index 041f3a022122..4dc783023e43 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2,9 +2,9 @@ config DEFCONFIG_LIST
string
depends on !UML
option defconfig_list
- default "/lib/modules/$(shell,uname --release)/.config"
+ default "/lib/modules/$(shell,uname -r)/.config"
default "/etc/kernel-config"
- default "/boot/config-$(shell,uname --release)"
+ default "/boot/config-$(shell,uname -r)"
default ARCH_DEFCONFIG
default "arch/$(ARCH)/defconfig"
@@ -107,6 +107,15 @@ config LOCALVERSION_AUTO
which is done within the script "scripts/setlocalversion".)
+config BUILD_SALT
+ string "Build ID Salt"
+ default ""
+ help
+ The build ID is used to link binaries and their debug info. Setting
+ this option will use the value in the calculation of the build id.
+ This is mostly useful for distributions which want to ensure the
+ build is unique between builds. It's safe to leave the default.
+
config HAVE_KERNEL_GZIP
bool
@@ -125,10 +134,13 @@ config HAVE_KERNEL_LZO
config HAVE_KERNEL_LZ4
bool
+config HAVE_KERNEL_UNCOMPRESSED
+ bool
+
choice
prompt "Kernel compression mode"
default KERNEL_GZIP
- depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4
+ depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 || HAVE_KERNEL_UNCOMPRESSED
help
The linux kernel is a kind of self-extracting executable.
Several compression algorithms are available, which differ
@@ -207,6 +219,16 @@ config KERNEL_LZ4
is about 8% bigger than LZO. But the decompression speed is
faster than LZO.
+config KERNEL_UNCOMPRESSED
+ bool "None"
+ depends on HAVE_KERNEL_UNCOMPRESSED
+ help
+ Produce uncompressed kernel image. This option is usually not what
+ you want. It is useful for debugging the kernel in slow simulation
+ environments, where decompressing and moving the kernel is awfully
+ slow. This option allows early boot code to skip the decompressor
+ and jump right at uncompressed kernel image.
+
endchoice
config DEFAULT_HOSTNAME
@@ -218,9 +240,16 @@ config DEFAULT_HOSTNAME
but you may wish to use a different default here to make a minimal
system more usable with less configuration.
+#
+# For some reason microblaze and nios2 hard code SWAP=n. Hopefully we can
+# add proper SWAP support to them, in which case this can be remove.
+#
+config ARCH_NO_SWAP
+ bool
+
config SWAP
bool "Support for paging of anonymous memory (swap)"
- depends on MMU && BLOCK
+ depends on MMU && BLOCK && !ARCH_NO_SWAP
default y
help
This option allows you to choose whether you want to have support
@@ -319,6 +348,7 @@ config AUDIT_TREE
source "kernel/irq/Kconfig"
source "kernel/time/Kconfig"
+source "kernel/Kconfig.preempt"
menu "CPU/Task time and stats accounting"
@@ -1714,10 +1744,10 @@ config PROFILING
config TRACEPOINTS
bool
-source "arch/Kconfig"
-
endmenu # General setup
+source "arch/Kconfig"
+
config RT_MUTEXES
bool
diff --git a/init/main.c b/init/main.c
index 5e13c544bbf4..38c68b593d0d 100644
--- a/init/main.c
+++ b/init/main.c
@@ -79,7 +79,7 @@
#include <linux/pti.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
-#include <linux/sched_clock.h>
+#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/context_tracking.h>
@@ -642,7 +642,6 @@ asmlinkage __visible void __init start_kernel(void)
softirq_init();
timekeeping_init();
time_init();
- sched_clock_postinit();
printk_safe_init();
perf_event_init();
profile_init();
@@ -697,6 +696,7 @@ asmlinkage __visible void __init start_kernel(void)
acpi_early_init();
if (late_time_init)
late_time_init();
+ sched_clock_init();
calibrate_delay();
pid_idr_init();
anon_vma_init();
@@ -1065,6 +1065,13 @@ static int __ref kernel_init(void *unused)
jump_label_invalidate_initmem();
free_initmem();
mark_readonly();
+
+ /*
+ * Kernel mappings are now finalized - update the userspace page-table
+ * to finalize PTI.
+ */
+ pti_finalize();
+
system_state = SYSTEM_RUNNING;
numa_default_policy();
diff --git a/init/version.c b/init/version.c
index bfb4e3f4955e..ef4012ec4375 100644
--- a/init/version.c
+++ b/init/version.c
@@ -7,6 +7,7 @@
*/
#include <generated/compile.h>
+#include <linux/build-salt.h>
#include <linux/export.h>
#include <linux/uts.h>
#include <linux/utsname.h>
@@ -49,3 +50,5 @@ const char linux_proc_banner[] =
"%s version %s"
" (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ")"
" (" LINUX_COMPILER ") %s\n";
+
+BUILD_SALT;
diff --git a/ipc/msg.c b/ipc/msg.c
index 3b6545302598..203281198079 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -38,6 +38,7 @@
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
+#include <linux/rhashtable.h>
#include <asm/current.h>
#include <linux/uaccess.h>
diff --git a/ipc/sem.c b/ipc/sem.c
index 76e95e4f3aa2..00ef2f743a62 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -86,6 +86,7 @@
#include <linux/ipc_namespace.h>
#include <linux/sched/wake_q.h>
#include <linux/nospec.h>
+#include <linux/rhashtable.h>
#include <linux/uaccess.h>
#include "util.h"
diff --git a/ipc/shm.c b/ipc/shm.c
index fefa00d310fb..b204feb38274 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -43,6 +43,7 @@
#include <linux/nsproxy.h>
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
+#include <linux/rhashtable.h>
#include <linux/uaccess.h>
@@ -1366,15 +1367,14 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
struct shmid_kernel *shp;
unsigned long addr = (unsigned long)shmaddr;
unsigned long size;
- struct file *file;
+ struct file *file, *base;
int err;
unsigned long flags = MAP_SHARED;
unsigned long prot;
int acc_mode;
struct ipc_namespace *ns;
struct shm_file_data *sfd;
- struct path path;
- fmode_t f_mode;
+ int f_flags;
unsigned long populate = 0;
err = -EINVAL;
@@ -1407,11 +1407,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
if (shmflg & SHM_RDONLY) {
prot = PROT_READ;
acc_mode = S_IRUGO;
- f_mode = FMODE_READ;
+ f_flags = O_RDONLY;
} else {
prot = PROT_READ | PROT_WRITE;
acc_mode = S_IRUGO | S_IWUGO;
- f_mode = FMODE_READ | FMODE_WRITE;
+ f_flags = O_RDWR;
}
if (shmflg & SHM_EXEC) {
prot |= PROT_EXEC;
@@ -1447,46 +1447,44 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
goto out_unlock;
}
- path = shp->shm_file->f_path;
- path_get(&path);
+ /*
+ * We need to take a reference to the real shm file to prevent the
+ * pointer from becoming stale in cases where the lifetime of the outer
+ * file extends beyond that of the shm segment. It's not usually
+ * possible, but it can happen during remap_file_pages() emulation as
+ * that unmaps the memory, then does ->mmap() via file reference only.
+ * We'll deny the ->mmap() if the shm segment was since removed, but to
+ * detect shm ID reuse we need to compare the file pointers.
+ */
+ base = get_file(shp->shm_file);
shp->shm_nattch++;
- size = i_size_read(d_inode(path.dentry));
+ size = i_size_read(file_inode(base));
ipc_unlock_object(&shp->shm_perm);
rcu_read_unlock();
err = -ENOMEM;
sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
if (!sfd) {
- path_put(&path);
+ fput(base);
goto out_nattch;
}
- file = alloc_file(&path, f_mode,
- is_file_hugepages(shp->shm_file) ?
+ file = alloc_file_clone(base, f_flags,
+ is_file_hugepages(base) ?
&shm_file_operations_huge :
&shm_file_operations);
err = PTR_ERR(file);
if (IS_ERR(file)) {
kfree(sfd);
- path_put(&path);
+ fput(base);
goto out_nattch;
}
- file->private_data = sfd;
- file->f_mapping = shp->shm_file->f_mapping;
sfd->id = shp->shm_perm.id;
sfd->ns = get_ipc_ns(ns);
- /*
- * We need to take a reference to the real shm file to prevent the
- * pointer from becoming stale in cases where the lifetime of the outer
- * file extends beyond that of the shm segment. It's not usually
- * possible, but it can happen during remap_file_pages() emulation as
- * that unmaps the memory, then does ->mmap() via file reference only.
- * We'll deny the ->mmap() if the shm segment was since removed, but to
- * detect shm ID reuse we need to compare the file pointers.
- */
- sfd->file = get_file(shp->shm_file);
+ sfd->file = base;
sfd->vm_ops = NULL;
+ file->private_data = sfd;
err = security_mmap_file(file, prot, flags);
if (err)
diff --git a/ipc/util.c b/ipc/util.c
index 4e81182fa0ac..fdffff41f65b 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -63,6 +63,7 @@
#include <linux/rwsem.h>
#include <linux/memory.h>
#include <linux/ipc_namespace.h>
+#include <linux/rhashtable.h>
#include <asm/unistd.h>
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 3f9c97419f02..cd1655122ec0 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -18,6 +18,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_NO_PREEMPT
help
This option reduces the latency of the kernel by adding more
"explicit preemption points" to the kernel code. These new
@@ -35,6 +36,7 @@ config PREEMPT_VOLUNTARY
config PREEMPT
bool "Preemptible Kernel (Low-Latency Desktop)"
+ depends on !ARCH_NO_PREEMPT
select PREEMPT_COUNT
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
help
diff --git a/kernel/Makefile b/kernel/Makefile
index 04bc07c2b42a..7a63d567fdb5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -123,7 +123,7 @@ targets += config_data.gz
$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
$(call if_changed,gzip)
- filechk_ikconfiggz = (echo "static const char kernel_config_data[] __used = MAGIC_START"; cat $< | scripts/basic/bin2c; echo "MAGIC_END;")
+ filechk_ikconfiggz = (echo "static const char kernel_config_data[] __used = MAGIC_START"; cat $< | scripts/bin2c; echo "MAGIC_END;")
targets += config_data.h
$(obj)/config_data.h: $(obj)/config_data.gz FORCE
$(call filechk,ikconfiggz)
diff --git a/kernel/audit.c b/kernel/audit.c
index e7478cb58079..2a8058764aa6 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -83,9 +83,6 @@
#define AUDIT_INITIALIZED 1
static int audit_initialized;
-#define AUDIT_OFF 0
-#define AUDIT_ON 1
-#define AUDIT_LOCKED 2
u32 audit_enabled = AUDIT_OFF;
bool audit_ever_enabled = !!AUDIT_OFF;
@@ -1724,7 +1721,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
struct timespec64 *t, unsigned int *serial)
{
if (!ctx || !auditsc_get_stamp(ctx, t, serial)) {
- *t = current_kernel_time64();
+ ktime_get_coarse_real_ts64(t);
*serial = audit_serial();
}
}
@@ -1754,7 +1751,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
if (audit_initialized != AUDIT_INITIALIZED)
return NULL;
- if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE)))
+ if (unlikely(!audit_filter(type, AUDIT_FILTER_EXCLUDE)))
return NULL;
/* NOTE: don't ever fail/sleep on these two conditions:
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index c99ebaae5abc..9f6eaeb6919f 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -497,6 +497,8 @@ static void audit_tree_log_remove_rule(struct audit_krule *rule)
{
struct audit_buffer *ab;
+ if (!audit_enabled)
+ return;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return;
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index c17c0c268436..787c7afdf829 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -238,20 +238,21 @@ out:
static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watch *w, char *op)
{
- if (audit_enabled) {
- struct audit_buffer *ab;
- ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
- if (unlikely(!ab))
- return;
- audit_log_format(ab, "auid=%u ses=%u op=%s",
- from_kuid(&init_user_ns, audit_get_loginuid(current)),
- audit_get_sessionid(current), op);
- audit_log_format(ab, " path=");
- audit_log_untrustedstring(ab, w->path);
- audit_log_key(ab, r->filterkey);
- audit_log_format(ab, " list=%d res=1", r->listnr);
- audit_log_end(ab);
- }
+ struct audit_buffer *ab;
+
+ if (!audit_enabled)
+ return;
+ ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
+ if (!ab)
+ return;
+ audit_log_format(ab, "auid=%u ses=%u op=%s",
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
+ audit_get_sessionid(current), op);
+ audit_log_format(ab, " path=");
+ audit_log_untrustedstring(ab, w->path);
+ audit_log_key(ab, r->filterkey);
+ audit_log_format(ab, " list=%d res=1", r->listnr);
+ audit_log_end(ab);
}
/* Update inode info in audit rules based on filesystem event. */
@@ -419,6 +420,13 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
struct path parent_path;
int h, ret = 0;
+ /*
+ * When we will be calling audit_add_to_parent, krule->watch might have
+ * been updated and watch might have been freed.
+ * So we need to keep a reference of watch.
+ */
+ audit_get_watch(watch);
+
mutex_unlock(&audit_filter_mutex);
/* Avoid calling path_lookup under audit_filter_mutex. */
@@ -427,8 +435,10 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
/* caller expects mutex locked */
mutex_lock(&audit_filter_mutex);
- if (ret)
+ if (ret) {
+ audit_put_watch(watch);
return ret;
+ }
/* either find an old parent or attach a new one */
parent = audit_find_parent(d_backing_inode(parent_path.dentry));
@@ -446,6 +456,7 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
*list = &audit_inode_hash[h];
error:
path_put(&parent_path);
+ audit_put_watch(watch);
return ret;
}
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index eaa320148d97..bf309f2592c4 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -264,7 +264,7 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *
case AUDIT_FILTER_TASK:
#endif
case AUDIT_FILTER_USER:
- case AUDIT_FILTER_TYPE:
+ case AUDIT_FILTER_EXCLUDE:
case AUDIT_FILTER_FS:
;
}
@@ -337,7 +337,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
{
switch(f->type) {
case AUDIT_MSGTYPE:
- if (entry->rule.listnr != AUDIT_FILTER_TYPE &&
+ if (entry->rule.listnr != AUDIT_FILTER_EXCLUDE &&
entry->rule.listnr != AUDIT_FILTER_USER)
return -EINVAL;
break;
@@ -428,8 +428,6 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
case AUDIT_EXE:
if (f->op != Audit_not_equal && f->op != Audit_equal)
return -EINVAL;
- if (entry->rule.listnr != AUDIT_FILTER_EXIT)
- return -EINVAL;
break;
}
return 0;
@@ -931,7 +929,7 @@ static inline int audit_add_rule(struct audit_entry *entry)
/* If any of these, don't count towards total */
switch(entry->rule.listnr) {
case AUDIT_FILTER_USER:
- case AUDIT_FILTER_TYPE:
+ case AUDIT_FILTER_EXCLUDE:
case AUDIT_FILTER_FS:
dont_count = 1;
}
@@ -1013,7 +1011,7 @@ int audit_del_rule(struct audit_entry *entry)
/* If any of these, don't count towards total */
switch(entry->rule.listnr) {
case AUDIT_FILTER_USER:
- case AUDIT_FILTER_TYPE:
+ case AUDIT_FILTER_EXCLUDE:
case AUDIT_FILTER_FS:
dont_count = 1;
}
@@ -1360,6 +1358,11 @@ int audit_filter(int msgtype, unsigned int listtype)
f->type, f->op, f->lsm_rule, NULL);
}
break;
+ case AUDIT_EXE:
+ result = audit_exe_compare(current, e->rule.exe);
+ if (f->op == Audit_not_equal)
+ result = !result;
+ break;
default:
goto unlock_and_return;
}
@@ -1369,7 +1372,7 @@ int audit_filter(int msgtype, unsigned int listtype)
break;
}
if (result > 0) {
- if (e->rule.action == AUDIT_NEVER || listtype == AUDIT_FILTER_TYPE)
+ if (e->rule.action == AUDIT_NEVER || listtype == AUDIT_FILTER_EXCLUDE)
ret = 0;
break;
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 80d672a11088..b2d1f043f17f 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -494,20 +494,20 @@ static int audit_filter_rules(struct task_struct *tsk,
result = audit_gid_comparator(cred->gid, f->op, f->gid);
if (f->op == Audit_equal) {
if (!result)
- result = in_group_p(f->gid);
+ result = groups_search(cred->group_info, f->gid);
} else if (f->op == Audit_not_equal) {
if (result)
- result = !in_group_p(f->gid);
+ result = !groups_search(cred->group_info, f->gid);
}
break;
case AUDIT_EGID:
result = audit_gid_comparator(cred->egid, f->op, f->gid);
if (f->op == Audit_equal) {
if (!result)
- result = in_egroup_p(f->gid);
+ result = groups_search(cred->group_info, f->gid);
} else if (f->op == Audit_not_equal) {
if (result)
- result = !in_egroup_p(f->gid);
+ result = !groups_search(cred->group_info, f->gid);
}
break;
case AUDIT_SGID:
@@ -1544,10 +1544,10 @@ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2,
context->argv[2] = a3;
context->argv[3] = a4;
context->serial = 0;
- context->ctime = current_kernel_time64();
context->in_syscall = 1;
context->current_state = state;
context->ppid = 0;
+ ktime_get_coarse_real_ts64(&context->ctime);
}
/**
@@ -2466,7 +2466,7 @@ void audit_core_dumps(long signr)
if (signr == SIGQUIT) /* don't care for those */
return;
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
+ ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_ANOM_ABEND);
if (unlikely(!ab))
return;
audit_log_task(ab);
@@ -2490,7 +2490,7 @@ void audit_seccomp(unsigned long syscall, long signr, int code)
{
struct audit_buffer *ab;
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP);
+ ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_SECCOMP);
if (unlikely(!ab))
return;
audit_log_task(ab);
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index f27f5496d6fe..0488b8258321 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -3,6 +3,7 @@ obj-y := core.o
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
+obj-$(CONFIG_BPF_SYSCALL) += local_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o
ifeq ($(CONFIG_NET),y)
@@ -22,3 +23,6 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
endif
obj-$(CONFIG_CGROUP_BPF) += cgroup.o
+ifeq ($(CONFIG_INET),y)
+obj-$(CONFIG_BPF_SYSCALL) += reuseport_array.o
+endif
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 2aa55d030c77..0c17aab3ce5f 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -54,7 +54,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
}
/* Called from syscall */
-static int array_map_alloc_check(union bpf_attr *attr)
+int array_map_alloc_check(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
int numa_node = bpf_map_attr_numa_node(attr);
@@ -358,27 +358,20 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock();
}
-static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
- u32 btf_key_id, u32 btf_value_id)
+static int array_map_check_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
{
- const struct btf_type *key_type, *value_type;
- u32 key_size, value_size;
u32 int_data;
- key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
- if (!key_type || BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
+ if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
return -EINVAL;
int_data = *(u32 *)(key_type + 1);
- /* bpf array can only take a u32 key. This check makes
- * sure that the btf matches the attr used during map_create.
+ /* bpf array can only take a u32 key. This check makes sure
+ * that the btf matches the attr used during map_create.
*/
- if (BTF_INT_BITS(int_data) != 32 || key_size != 4 ||
- BTF_INT_OFFSET(int_data))
- return -EINVAL;
-
- value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
- if (!value_type || value_size != map->value_size)
+ if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
return -EINVAL;
return 0;
@@ -405,6 +398,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_lookup_elem = percpu_array_map_lookup_elem,
.map_update_elem = array_map_update_elem,
.map_delete_elem = array_map_delete_elem,
+ .map_check_btf = array_map_check_btf,
};
static int fd_array_map_alloc_check(union bpf_attr *attr)
@@ -546,6 +540,7 @@ const struct bpf_map_ops prog_array_map_ops = {
.map_fd_put_ptr = prog_fd_array_put_ptr,
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
.map_release_uref = bpf_fd_array_map_clear,
+ .map_check_btf = map_check_no_btf,
};
static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
@@ -634,6 +629,7 @@ const struct bpf_map_ops perf_event_array_map_ops = {
.map_fd_get_ptr = perf_event_fd_array_get_ptr,
.map_fd_put_ptr = perf_event_fd_array_put_ptr,
.map_release = perf_event_fd_array_release,
+ .map_check_btf = map_check_no_btf,
};
#ifdef CONFIG_CGROUPS
@@ -665,6 +661,7 @@ const struct bpf_map_ops cgroup_array_map_ops = {
.map_delete_elem = fd_array_map_delete_elem,
.map_fd_get_ptr = cgroup_fd_array_get_ptr,
.map_fd_put_ptr = cgroup_fd_array_put_ptr,
+ .map_check_btf = map_check_no_btf,
};
#endif
@@ -749,4 +746,5 @@ const struct bpf_map_ops array_of_maps_map_ops = {
.map_fd_put_ptr = bpf_map_fd_put_ptr,
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = array_of_map_gen_lookup,
+ .map_check_btf = map_check_no_btf,
};
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 3d83ee7df381..6a7d931bbc55 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -34,6 +34,8 @@ void cgroup_bpf_put(struct cgroup *cgrp)
list_for_each_entry_safe(pl, tmp, progs, node) {
list_del(&pl->node);
bpf_prog_put(pl->prog);
+ bpf_cgroup_storage_unlink(pl->storage);
+ bpf_cgroup_storage_free(pl->storage);
kfree(pl);
static_branch_dec(&cgroup_bpf_enabled_key);
}
@@ -95,7 +97,7 @@ static int compute_effective_progs(struct cgroup *cgrp,
enum bpf_attach_type type,
struct bpf_prog_array __rcu **array)
{
- struct bpf_prog_array __rcu *progs;
+ struct bpf_prog_array *progs;
struct bpf_prog_list *pl;
struct cgroup *p = cgrp;
int cnt = 0;
@@ -115,18 +117,20 @@ static int compute_effective_progs(struct cgroup *cgrp,
cnt = 0;
p = cgrp;
do {
- if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
- list_for_each_entry(pl,
- &p->bpf.progs[type], node) {
- if (!pl->prog)
- continue;
- rcu_dereference_protected(progs, 1)->
- progs[cnt++] = pl->prog;
- }
- p = cgroup_parent(p);
- } while (p);
+ if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
+ continue;
+
+ list_for_each_entry(pl, &p->bpf.progs[type], node) {
+ if (!pl->prog)
+ continue;
- *array = progs;
+ progs->items[cnt].prog = pl->prog;
+ progs->items[cnt].cgroup_storage = pl->storage;
+ cnt++;
+ }
+ } while ((p = cgroup_parent(p)));
+
+ rcu_assign_pointer(*array, progs);
return 0;
}
@@ -173,6 +177,45 @@ cleanup:
return -ENOMEM;
}
+static int update_effective_progs(struct cgroup *cgrp,
+ enum bpf_attach_type type)
+{
+ struct cgroup_subsys_state *css;
+ int err;
+
+ /* allocate and recompute effective prog arrays */
+ css_for_each_descendant_pre(css, &cgrp->self) {
+ struct cgroup *desc = container_of(css, struct cgroup, self);
+
+ err = compute_effective_progs(desc, type, &desc->bpf.inactive);
+ if (err)
+ goto cleanup;
+ }
+
+ /* all allocations were successful. Activate all prog arrays */
+ css_for_each_descendant_pre(css, &cgrp->self) {
+ struct cgroup *desc = container_of(css, struct cgroup, self);
+
+ activate_effective_progs(desc, type, desc->bpf.inactive);
+ desc->bpf.inactive = NULL;
+ }
+
+ return 0;
+
+cleanup:
+ /* oom while computing effective. Free all computed effective arrays
+ * since they were not activated
+ */
+ css_for_each_descendant_pre(css, &cgrp->self) {
+ struct cgroup *desc = container_of(css, struct cgroup, self);
+
+ bpf_prog_array_free(desc->bpf.inactive);
+ desc->bpf.inactive = NULL;
+ }
+
+ return err;
+}
+
#define BPF_CGROUP_MAX_PROGS 64
/**
@@ -189,7 +232,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
{
struct list_head *progs = &cgrp->bpf.progs[type];
struct bpf_prog *old_prog = NULL;
- struct cgroup_subsys_state *css;
+ struct bpf_cgroup_storage *storage, *old_storage = NULL;
struct bpf_prog_list *pl;
bool pl_was_allocated;
int err;
@@ -211,72 +254,71 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
return -E2BIG;
+ storage = bpf_cgroup_storage_alloc(prog);
+ if (IS_ERR(storage))
+ return -ENOMEM;
+
if (flags & BPF_F_ALLOW_MULTI) {
- list_for_each_entry(pl, progs, node)
- if (pl->prog == prog)
+ list_for_each_entry(pl, progs, node) {
+ if (pl->prog == prog) {
/* disallow attaching the same prog twice */
+ bpf_cgroup_storage_free(storage);
return -EINVAL;
+ }
+ }
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
- if (!pl)
+ if (!pl) {
+ bpf_cgroup_storage_free(storage);
return -ENOMEM;
+ }
+
pl_was_allocated = true;
pl->prog = prog;
+ pl->storage = storage;
list_add_tail(&pl->node, progs);
} else {
if (list_empty(progs)) {
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
- if (!pl)
+ if (!pl) {
+ bpf_cgroup_storage_free(storage);
return -ENOMEM;
+ }
pl_was_allocated = true;
list_add_tail(&pl->node, progs);
} else {
pl = list_first_entry(progs, typeof(*pl), node);
old_prog = pl->prog;
+ old_storage = pl->storage;
+ bpf_cgroup_storage_unlink(old_storage);
pl_was_allocated = false;
}
pl->prog = prog;
+ pl->storage = storage;
}
cgrp->bpf.flags[type] = flags;
- /* allocate and recompute effective prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- err = compute_effective_progs(desc, type, &desc->bpf.inactive);
- if (err)
- goto cleanup;
- }
-
- /* all allocations were successful. Activate all prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- activate_effective_progs(desc, type, desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
+ err = update_effective_progs(cgrp, type);
+ if (err)
+ goto cleanup;
static_branch_inc(&cgroup_bpf_enabled_key);
+ if (old_storage)
+ bpf_cgroup_storage_free(old_storage);
if (old_prog) {
bpf_prog_put(old_prog);
static_branch_dec(&cgroup_bpf_enabled_key);
}
+ bpf_cgroup_storage_link(storage, cgrp, type);
return 0;
cleanup:
- /* oom while computing effective. Free all computed effective arrays
- * since they were not activated
- */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- bpf_prog_array_free(desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
-
/* and cleanup the prog list */
pl->prog = old_prog;
+ bpf_cgroup_storage_free(pl->storage);
+ pl->storage = old_storage;
+ bpf_cgroup_storage_link(old_storage, cgrp, type);
if (pl_was_allocated) {
list_del(&pl->node);
kfree(pl);
@@ -299,7 +341,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
struct list_head *progs = &cgrp->bpf.progs[type];
u32 flags = cgrp->bpf.flags[type];
struct bpf_prog *old_prog = NULL;
- struct cgroup_subsys_state *css;
struct bpf_prog_list *pl;
int err;
@@ -338,25 +379,14 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
pl->prog = NULL;
}
- /* allocate and recompute effective prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- err = compute_effective_progs(desc, type, &desc->bpf.inactive);
- if (err)
- goto cleanup;
- }
-
- /* all allocations were successful. Activate all prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- activate_effective_progs(desc, type, desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
+ err = update_effective_progs(cgrp, type);
+ if (err)
+ goto cleanup;
/* now can actually delete it from this cgroup list */
list_del(&pl->node);
+ bpf_cgroup_storage_unlink(pl->storage);
+ bpf_cgroup_storage_free(pl->storage);
kfree(pl);
if (list_empty(progs))
/* last program was detached, reset flags to zero */
@@ -367,16 +397,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
return 0;
cleanup:
- /* oom while computing effective. Free all computed effective arrays
- * since they were not activated
- */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- bpf_prog_array_free(desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
-
/* and restore back old_prog */
pl->prog = old_prog;
return err;
@@ -655,6 +675,8 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_map_delete_elem_proto;
case BPF_FUNC_get_current_uid_gid:
return &bpf_get_current_uid_gid_proto;
+ case BPF_FUNC_get_local_storage:
+ return &bpf_get_local_storage_proto;
case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto();
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1e5625d46414..4d09e610777f 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1538,11 +1538,12 @@ static struct {
.null_prog = NULL,
};
-struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
+struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
{
if (prog_cnt)
return kzalloc(sizeof(struct bpf_prog_array) +
- sizeof(struct bpf_prog *) * (prog_cnt + 1),
+ sizeof(struct bpf_prog_array_item) *
+ (prog_cnt + 1),
flags);
return &empty_prog_array.hdr;
@@ -1556,43 +1557,45 @@ void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
kfree_rcu(progs, rcu);
}
-int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
+int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
{
- struct bpf_prog **prog;
+ struct bpf_prog_array_item *item;
u32 cnt = 0;
rcu_read_lock();
- prog = rcu_dereference(progs)->progs;
- for (; *prog; prog++)
- if (*prog != &dummy_bpf_prog.prog)
+ item = rcu_dereference(array)->items;
+ for (; item->prog; item++)
+ if (item->prog != &dummy_bpf_prog.prog)
cnt++;
rcu_read_unlock();
return cnt;
}
-static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
+
+static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
u32 *prog_ids,
u32 request_cnt)
{
+ struct bpf_prog_array_item *item;
int i = 0;
- for (; *prog; prog++) {
- if (*prog == &dummy_bpf_prog.prog)
+ item = rcu_dereference(array)->items;
+ for (; item->prog; item++) {
+ if (item->prog == &dummy_bpf_prog.prog)
continue;
- prog_ids[i] = (*prog)->aux->id;
+ prog_ids[i] = item->prog->aux->id;
if (++i == request_cnt) {
- prog++;
+ item++;
break;
}
}
- return !!(*prog);
+ return !!(item->prog);
}
-int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
+int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
__u32 __user *prog_ids, u32 cnt)
{
- struct bpf_prog **prog;
unsigned long err = 0;
bool nospc;
u32 *ids;
@@ -1611,8 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
if (!ids)
return -ENOMEM;
rcu_read_lock();
- prog = rcu_dereference(progs)->progs;
- nospc = bpf_prog_array_copy_core(prog, ids, cnt);
+ nospc = bpf_prog_array_copy_core(array, ids, cnt);
rcu_read_unlock();
err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
kfree(ids);
@@ -1623,14 +1625,14 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
return 0;
}
-void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
+void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
struct bpf_prog *old_prog)
{
- struct bpf_prog **prog = progs->progs;
+ struct bpf_prog_array_item *item = array->items;
- for (; *prog; prog++)
- if (*prog == old_prog) {
- WRITE_ONCE(*prog, &dummy_bpf_prog.prog);
+ for (; item->prog; item++)
+ if (item->prog == old_prog) {
+ WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
break;
}
}
@@ -1641,7 +1643,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
struct bpf_prog_array **new_array)
{
int new_prog_cnt, carry_prog_cnt = 0;
- struct bpf_prog **existing_prog;
+ struct bpf_prog_array_item *existing;
struct bpf_prog_array *array;
bool found_exclude = false;
int new_prog_idx = 0;
@@ -1650,15 +1652,15 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
* the new array.
*/
if (old_array) {
- existing_prog = old_array->progs;
- for (; *existing_prog; existing_prog++) {
- if (*existing_prog == exclude_prog) {
+ existing = old_array->items;
+ for (; existing->prog; existing++) {
+ if (existing->prog == exclude_prog) {
found_exclude = true;
continue;
}
- if (*existing_prog != &dummy_bpf_prog.prog)
+ if (existing->prog != &dummy_bpf_prog.prog)
carry_prog_cnt++;
- if (*existing_prog == include_prog)
+ if (existing->prog == include_prog)
return -EEXIST;
}
}
@@ -1684,15 +1686,17 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
/* Fill in the new prog array */
if (carry_prog_cnt) {
- existing_prog = old_array->progs;
- for (; *existing_prog; existing_prog++)
- if (*existing_prog != exclude_prog &&
- *existing_prog != &dummy_bpf_prog.prog)
- array->progs[new_prog_idx++] = *existing_prog;
+ existing = old_array->items;
+ for (; existing->prog; existing++)
+ if (existing->prog != exclude_prog &&
+ existing->prog != &dummy_bpf_prog.prog) {
+ array->items[new_prog_idx++].prog =
+ existing->prog;
+ }
}
if (include_prog)
- array->progs[new_prog_idx++] = include_prog;
- array->progs[new_prog_idx] = NULL;
+ array->items[new_prog_idx++].prog = include_prog;
+ array->items[new_prog_idx].prog = NULL;
*new_array = array;
return 0;
}
@@ -1701,7 +1705,6 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
u32 *prog_ids, u32 request_cnt,
u32 *prog_cnt)
{
- struct bpf_prog **prog;
u32 cnt = 0;
if (array)
@@ -1714,8 +1717,7 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
return 0;
/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
- prog = rcu_dereference_check(array, 1)->progs;
- return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
+ return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
: 0;
}
@@ -1793,6 +1795,7 @@ const struct bpf_func_proto bpf_get_current_comm_proto __weak;
const struct bpf_func_proto bpf_sock_map_update_proto __weak;
const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
+const struct bpf_func_proto bpf_get_local_storage_proto __weak;
const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
{
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 46f5f29605d4..620bc5024d7d 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -555,6 +555,7 @@ const struct bpf_map_ops cpu_map_ops = {
.map_update_elem = cpu_map_update_elem,
.map_lookup_elem = cpu_map_lookup_elem,
.map_get_next_key = cpu_map_get_next_key,
+ .map_check_btf = map_check_no_btf,
};
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 750d45edae79..ac1df79f3788 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -488,6 +488,7 @@ const struct bpf_map_ops dev_map_ops = {
.map_lookup_elem = dev_map_lookup_elem,
.map_update_elem = dev_map_update_elem,
.map_delete_elem = dev_map_delete_elem,
+ .map_check_btf = map_check_no_btf,
};
static int dev_map_notification(struct notifier_block *notifier,
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 513d9dfcf4ee..04b8eda94e7d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -11,9 +11,11 @@
* General Public License for more details.
*/
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
+#include <uapi/linux/btf.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
#include "map_in_map.h"
@@ -1162,6 +1164,27 @@ static void htab_map_free(struct bpf_map *map)
kfree(htab);
}
+static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
+ struct seq_file *m)
+{
+ void *value;
+
+ rcu_read_lock();
+
+ value = htab_map_lookup_elem(map, key);
+ if (!value) {
+ rcu_read_unlock();
+ return;
+ }
+
+ btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
+ seq_puts(m, ": ");
+ btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
+ seq_puts(m, "\n");
+
+ rcu_read_unlock();
+}
+
const struct bpf_map_ops htab_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
@@ -1171,6 +1194,7 @@ const struct bpf_map_ops htab_map_ops = {
.map_update_elem = htab_map_update_elem,
.map_delete_elem = htab_map_delete_elem,
.map_gen_lookup = htab_map_gen_lookup,
+ .map_seq_show_elem = htab_map_seq_show_elem,
};
const struct bpf_map_ops htab_lru_map_ops = {
@@ -1182,6 +1206,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_update_elem = htab_lru_map_update_elem,
.map_delete_elem = htab_lru_map_delete_elem,
.map_gen_lookup = htab_lru_map_gen_lookup,
+ .map_seq_show_elem = htab_map_seq_show_elem,
};
/* Called from eBPF program */
@@ -1408,4 +1433,5 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
.map_fd_put_ptr = bpf_map_fd_put_ptr,
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = htab_of_map_gen_lookup,
+ .map_check_btf = map_check_no_btf,
};
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 73065e2d23c2..1991466b8327 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -193,4 +193,24 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
};
+
+DECLARE_PER_CPU(void*, bpf_cgroup_storage);
+
+BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
+{
+ /* map and flags arguments are not used now,
+ * but provide an ability to extend the API
+ * for other types of local storages.
+ * verifier checks that their values are correct.
+ */
+ return (unsigned long) this_cpu_read(bpf_cgroup_storage);
+}
+
+const struct bpf_func_proto bpf_get_local_storage_proto = {
+ .func = bpf_get_local_storage,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MAP_VALUE,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_ANYTHING,
+};
#endif
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 76efe9a183f5..2ada5e21dfa6 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -196,19 +196,21 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
struct bpf_map *map = seq_file_to_map(m);
void *key = map_iter(m)->key;
+ void *prev_key;
if (map_iter(m)->done)
return NULL;
if (unlikely(v == SEQ_START_TOKEN))
- goto done;
+ prev_key = NULL;
+ else
+ prev_key = key;
- if (map->ops->map_get_next_key(map, key, key)) {
+ if (map->ops->map_get_next_key(map, prev_key, key)) {
map_iter(m)->done = true;
return NULL;
}
-done:
++(*pos);
return key;
}
@@ -332,7 +334,8 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
struct bpf_map *map = arg;
return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
- map->btf ? &bpffs_map_fops : &bpffs_obj_fops);
+ bpf_map_support_seq_show(map) ?
+ &bpffs_map_fops : &bpffs_obj_fops);
}
static struct dentry *
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
new file mode 100644
index 000000000000..22ad967d1e5f
--- /dev/null
+++ b/kernel/bpf/local_storage.c
@@ -0,0 +1,379 @@
+//SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf-cgroup.h>
+#include <linux/bpf.h>
+#include <linux/bug.h>
+#include <linux/filter.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+
+DEFINE_PER_CPU(void*, bpf_cgroup_storage);
+
+#ifdef CONFIG_CGROUP_BPF
+
+#define LOCAL_STORAGE_CREATE_FLAG_MASK \
+ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+
+struct bpf_cgroup_storage_map {
+ struct bpf_map map;
+
+ spinlock_t lock;
+ struct bpf_prog *prog;
+ struct rb_root root;
+ struct list_head list;
+};
+
+static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
+{
+ return container_of(map, struct bpf_cgroup_storage_map, map);
+}
+
+static int bpf_cgroup_storage_key_cmp(
+ const struct bpf_cgroup_storage_key *key1,
+ const struct bpf_cgroup_storage_key *key2)
+{
+ if (key1->cgroup_inode_id < key2->cgroup_inode_id)
+ return -1;
+ else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
+ return 1;
+ else if (key1->attach_type < key2->attach_type)
+ return -1;
+ else if (key1->attach_type > key2->attach_type)
+ return 1;
+ return 0;
+}
+
+static struct bpf_cgroup_storage *cgroup_storage_lookup(
+ struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key,
+ bool locked)
+{
+ struct rb_root *root = &map->root;
+ struct rb_node *node;
+
+ if (!locked)
+ spin_lock_bh(&map->lock);
+
+ node = root->rb_node;
+ while (node) {
+ struct bpf_cgroup_storage *storage;
+
+ storage = container_of(node, struct bpf_cgroup_storage, node);
+
+ switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) {
+ case -1:
+ node = node->rb_left;
+ break;
+ case 1:
+ node = node->rb_right;
+ break;
+ default:
+ if (!locked)
+ spin_unlock_bh(&map->lock);
+ return storage;
+ }
+ }
+
+ if (!locked)
+ spin_unlock_bh(&map->lock);
+
+ return NULL;
+}
+
+static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
+ struct bpf_cgroup_storage *storage)
+{
+ struct rb_root *root = &map->root;
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ while (*new) {
+ struct bpf_cgroup_storage *this;
+
+ this = container_of(*new, struct bpf_cgroup_storage, node);
+
+ parent = *new;
+ switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) {
+ case -1:
+ new = &((*new)->rb_left);
+ break;
+ case 1:
+ new = &((*new)->rb_right);
+ break;
+ default:
+ return -EEXIST;
+ }
+ }
+
+ rb_link_node(&storage->node, parent, new);
+ rb_insert_color(&storage->node, root);
+
+ return 0;
+}
+
+static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+ struct bpf_cgroup_storage_key *key = _key;
+ struct bpf_cgroup_storage *storage;
+
+ storage = cgroup_storage_lookup(map, key, false);
+ if (!storage)
+ return NULL;
+
+ return &READ_ONCE(storage->buf)->data[0];
+}
+
+static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
+ void *value, u64 flags)
+{
+ struct bpf_cgroup_storage_key *key = _key;
+ struct bpf_cgroup_storage *storage;
+ struct bpf_storage_buffer *new;
+
+ if (flags & BPF_NOEXIST)
+ return -EINVAL;
+
+ storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
+ key, false);
+ if (!storage)
+ return -ENOENT;
+
+ new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
+ map->value_size, __GFP_ZERO | GFP_USER,
+ map->numa_node);
+ if (!new)
+ return -ENOMEM;
+
+ memcpy(&new->data[0], value, map->value_size);
+
+ new = xchg(&storage->buf, new);
+ kfree_rcu(new, rcu);
+
+ return 0;
+}
+
+static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key,
+ void *_next_key)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+ struct bpf_cgroup_storage_key *key = _key;
+ struct bpf_cgroup_storage_key *next = _next_key;
+ struct bpf_cgroup_storage *storage;
+
+ spin_lock_bh(&map->lock);
+
+ if (list_empty(&map->list))
+ goto enoent;
+
+ if (key) {
+ storage = cgroup_storage_lookup(map, key, true);
+ if (!storage)
+ goto enoent;
+
+ storage = list_next_entry(storage, list);
+ if (!storage)
+ goto enoent;
+ } else {
+ storage = list_first_entry(&map->list,
+ struct bpf_cgroup_storage, list);
+ }
+
+ spin_unlock_bh(&map->lock);
+ next->attach_type = storage->key.attach_type;
+ next->cgroup_inode_id = storage->key.cgroup_inode_id;
+ return 0;
+
+enoent:
+ spin_unlock_bh(&map->lock);
+ return -ENOENT;
+}
+
+static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
+{
+ int numa_node = bpf_map_attr_numa_node(attr);
+ struct bpf_cgroup_storage_map *map;
+
+ if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
+ return ERR_PTR(-EINVAL);
+
+ if (attr->value_size > PAGE_SIZE)
+ return ERR_PTR(-E2BIG);
+
+ if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK)
+ /* reserved bits should not be used */
+ return ERR_PTR(-EINVAL);
+
+ if (attr->max_entries)
+ /* max_entries is not used and enforced to be 0 */
+ return ERR_PTR(-EINVAL);
+
+ map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
+ __GFP_ZERO | GFP_USER, numa_node);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ map->map.pages = round_up(sizeof(struct bpf_cgroup_storage_map),
+ PAGE_SIZE) >> PAGE_SHIFT;
+
+ /* copy mandatory map attributes */
+ bpf_map_init_from_attr(&map->map, attr);
+
+ spin_lock_init(&map->lock);
+ map->root = RB_ROOT;
+ INIT_LIST_HEAD(&map->list);
+
+ return &map->map;
+}
+
+static void cgroup_storage_map_free(struct bpf_map *_map)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+
+ WARN_ON(!RB_EMPTY_ROOT(&map->root));
+ WARN_ON(!list_empty(&map->list));
+
+ kfree(map);
+}
+
+static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
+{
+ return -EINVAL;
+}
+
+const struct bpf_map_ops cgroup_storage_map_ops = {
+ .map_alloc = cgroup_storage_map_alloc,
+ .map_free = cgroup_storage_map_free,
+ .map_get_next_key = cgroup_storage_get_next_key,
+ .map_lookup_elem = cgroup_storage_lookup_elem,
+ .map_update_elem = cgroup_storage_update_elem,
+ .map_delete_elem = cgroup_storage_delete_elem,
+ .map_check_btf = map_check_no_btf,
+};
+
+int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+ int ret = -EBUSY;
+
+ spin_lock_bh(&map->lock);
+
+ if (map->prog && map->prog != prog)
+ goto unlock;
+ if (prog->aux->cgroup_storage && prog->aux->cgroup_storage != _map)
+ goto unlock;
+
+ map->prog = prog;
+ prog->aux->cgroup_storage = _map;
+ ret = 0;
+unlock:
+ spin_unlock_bh(&map->lock);
+
+ return ret;
+}
+
+void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+
+ spin_lock_bh(&map->lock);
+ if (map->prog == prog) {
+ WARN_ON(prog->aux->cgroup_storage != _map);
+ map->prog = NULL;
+ prog->aux->cgroup_storage = NULL;
+ }
+ spin_unlock_bh(&map->lock);
+}
+
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog)
+{
+ struct bpf_cgroup_storage *storage;
+ struct bpf_map *map;
+ u32 pages;
+
+ map = prog->aux->cgroup_storage;
+ if (!map)
+ return NULL;
+
+ pages = round_up(sizeof(struct bpf_cgroup_storage) +
+ sizeof(struct bpf_storage_buffer) +
+ map->value_size, PAGE_SIZE) >> PAGE_SHIFT;
+ if (bpf_map_charge_memlock(map, pages))
+ return ERR_PTR(-EPERM);
+
+ storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
+ __GFP_ZERO | GFP_USER, map->numa_node);
+ if (!storage) {
+ bpf_map_uncharge_memlock(map, pages);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ storage->buf = kmalloc_node(sizeof(struct bpf_storage_buffer) +
+ map->value_size, __GFP_ZERO | GFP_USER,
+ map->numa_node);
+ if (!storage->buf) {
+ bpf_map_uncharge_memlock(map, pages);
+ kfree(storage);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ storage->map = (struct bpf_cgroup_storage_map *)map;
+
+ return storage;
+}
+
+void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
+{
+ u32 pages;
+ struct bpf_map *map;
+
+ if (!storage)
+ return;
+
+ map = &storage->map->map;
+ pages = round_up(sizeof(struct bpf_cgroup_storage) +
+ sizeof(struct bpf_storage_buffer) +
+ map->value_size, PAGE_SIZE) >> PAGE_SHIFT;
+ bpf_map_uncharge_memlock(map, pages);
+
+ kfree_rcu(storage->buf, rcu);
+ kfree_rcu(storage, rcu);
+}
+
+void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
+ struct cgroup *cgroup,
+ enum bpf_attach_type type)
+{
+ struct bpf_cgroup_storage_map *map;
+
+ if (!storage)
+ return;
+
+ storage->key.attach_type = type;
+ storage->key.cgroup_inode_id = cgroup->kn->id.id;
+
+ map = storage->map;
+
+ spin_lock_bh(&map->lock);
+ WARN_ON(cgroup_storage_insert(map, storage));
+ list_add(&storage->list, &map->list);
+ spin_unlock_bh(&map->lock);
+}
+
+void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
+{
+ struct bpf_cgroup_storage_map *map;
+ struct rb_root *root;
+
+ if (!storage)
+ return;
+
+ map = storage->map;
+
+ spin_lock_bh(&map->lock);
+ root = &map->root;
+ rb_erase(&storage->node, root);
+
+ list_del(&storage->list);
+ spin_unlock_bh(&map->lock);
+}
+
+#endif
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 1603492c9cc7..9058317ba9de 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -10,11 +10,13 @@
*/
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <net/ipv6.h>
+#include <uapi/linux/btf.h>
/* Intermediate node */
#define LPM_TREE_NODE_FLAG_IM BIT(0)
@@ -686,6 +688,15 @@ free_stack:
return err;
}
+static int trie_check_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
+{
+ /* Keys must have struct bpf_lpm_trie_key embedded. */
+ return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ?
+ -EINVAL : 0;
+}
+
const struct bpf_map_ops trie_map_ops = {
.map_alloc = trie_alloc,
.map_free = trie_free,
@@ -693,4 +704,5 @@ const struct bpf_map_ops trie_map_ops = {
.map_lookup_elem = trie_lookup_elem,
.map_update_elem = trie_update_elem,
.map_delete_elem = trie_delete_elem,
+ .map_check_btf = trie_check_btf,
};
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 1da574612bea..3bfbf4464416 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -23,7 +23,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
* is a runtime binding. Doing static check alone
* in the verifier is not enough.
*/
- if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
+ if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
+ inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE) {
fdput(f);
return ERR_PTR(-ENOTSUPP);
}
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index ac747d5cf7c6..177a52436394 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -18,19 +18,43 @@
#include <linux/bug.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/proc_ns.h>
+#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
#include <linux/rwsem.h>
-/* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members
+/* Protects offdevs, members of bpf_offload_netdev and offload members
* of all progs.
* RTNL lock cannot be taken when holding this lock.
*/
static DECLARE_RWSEM(bpf_devs_lock);
-static LIST_HEAD(bpf_prog_offload_devs);
-static LIST_HEAD(bpf_map_offload_devs);
+
+struct bpf_offload_dev {
+ struct list_head netdevs;
+};
+
+struct bpf_offload_netdev {
+ struct rhash_head l;
+ struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
+ struct list_head progs;
+ struct list_head maps;
+ struct list_head offdev_netdevs;
+};
+
+static const struct rhashtable_params offdevs_params = {
+ .nelem_hint = 4,
+ .key_len = sizeof(struct net_device *),
+ .key_offset = offsetof(struct bpf_offload_netdev, netdev),
+ .head_offset = offsetof(struct bpf_offload_netdev, l),
+ .automatic_shrinking = true,
+};
+
+static struct rhashtable offdevs;
+static bool offdevs_inited;
static int bpf_dev_offload_check(struct net_device *netdev)
{
@@ -41,8 +65,19 @@ static int bpf_dev_offload_check(struct net_device *netdev)
return 0;
}
+static struct bpf_offload_netdev *
+bpf_offload_find_netdev(struct net_device *netdev)
+{
+ lockdep_assert_held(&bpf_devs_lock);
+
+ if (!offdevs_inited)
+ return NULL;
+ return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
+}
+
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
{
+ struct bpf_offload_netdev *ondev;
struct bpf_prog_offload *offload;
int err;
@@ -66,12 +101,13 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
goto err_maybe_put;
down_write(&bpf_devs_lock);
- if (offload->netdev->reg_state != NETREG_REGISTERED) {
+ ondev = bpf_offload_find_netdev(offload->netdev);
+ if (!ondev) {
err = -EINVAL;
goto err_unlock;
}
prog->aux->offload = offload;
- list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
+ list_add_tail(&offload->offloads, &ondev->progs);
dev_put(offload->netdev);
up_write(&bpf_devs_lock);
@@ -294,6 +330,7 @@ static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
struct net *net = current->nsproxy->net_ns;
+ struct bpf_offload_netdev *ondev;
struct bpf_offloaded_map *offmap;
int err;
@@ -316,11 +353,17 @@ struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
if (err)
goto err_unlock;
+ ondev = bpf_offload_find_netdev(offmap->netdev);
+ if (!ondev) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
if (err)
goto err_unlock;
- list_add_tail(&offmap->offloads, &bpf_map_offload_devs);
+ list_add_tail(&offmap->offloads, &ondev->maps);
up_write(&bpf_devs_lock);
rtnl_unlock();
@@ -468,77 +511,159 @@ int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
return 0;
}
-bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map)
+static bool __bpf_offload_dev_match(struct bpf_prog *prog,
+ struct net_device *netdev)
{
- struct bpf_offloaded_map *offmap;
+ struct bpf_offload_netdev *ondev1, *ondev2;
struct bpf_prog_offload *offload;
- bool ret;
if (!bpf_prog_is_dev_bound(prog->aux))
return false;
- if (!bpf_map_is_dev_bound(map))
- return bpf_map_offload_neutral(map);
- down_read(&bpf_devs_lock);
offload = prog->aux->offload;
- offmap = map_to_offmap(map);
+ if (!offload)
+ return false;
+ if (offload->netdev == netdev)
+ return true;
- ret = offload && offload->netdev == offmap->netdev;
+ ondev1 = bpf_offload_find_netdev(offload->netdev);
+ ondev2 = bpf_offload_find_netdev(netdev);
+
+ return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
+}
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
+{
+ bool ret;
+
+ down_read(&bpf_devs_lock);
+ ret = __bpf_offload_dev_match(prog, netdev);
up_read(&bpf_devs_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
-static void bpf_offload_orphan_all_progs(struct net_device *netdev)
+bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
{
- struct bpf_prog_offload *offload, *tmp;
+ struct bpf_offloaded_map *offmap;
+ bool ret;
- list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads)
- if (offload->netdev == netdev)
- __bpf_prog_offload_destroy(offload->prog);
+ if (!bpf_map_is_dev_bound(map))
+ return bpf_map_offload_neutral(map);
+ offmap = map_to_offmap(map);
+
+ down_read(&bpf_devs_lock);
+ ret = __bpf_offload_dev_match(prog, offmap->netdev);
+ up_read(&bpf_devs_lock);
+
+ return ret;
}
-static void bpf_offload_orphan_all_maps(struct net_device *netdev)
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
{
- struct bpf_offloaded_map *offmap, *tmp;
+ struct bpf_offload_netdev *ondev;
+ int err;
- list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads)
- if (offmap->netdev == netdev)
- __bpf_map_offload_destroy(offmap);
+ ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
+ if (!ondev)
+ return -ENOMEM;
+
+ ondev->netdev = netdev;
+ ondev->offdev = offdev;
+ INIT_LIST_HEAD(&ondev->progs);
+ INIT_LIST_HEAD(&ondev->maps);
+
+ down_write(&bpf_devs_lock);
+ err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
+ if (err) {
+ netdev_warn(netdev, "failed to register for BPF offload\n");
+ goto err_unlock_free;
+ }
+
+ list_add(&ondev->offdev_netdevs, &offdev->netdevs);
+ up_write(&bpf_devs_lock);
+ return 0;
+
+err_unlock_free:
+ up_write(&bpf_devs_lock);
+ kfree(ondev);
+ return err;
}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
-static int bpf_offload_notification(struct notifier_block *notifier,
- ulong event, void *ptr)
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct bpf_offload_netdev *ondev, *altdev;
+ struct bpf_offloaded_map *offmap, *mtmp;
+ struct bpf_prog_offload *offload, *ptmp;
ASSERT_RTNL();
- switch (event) {
- case NETDEV_UNREGISTER:
- /* ignore namespace changes */
- if (netdev->reg_state != NETREG_UNREGISTERING)
- break;
-
- down_write(&bpf_devs_lock);
- bpf_offload_orphan_all_progs(netdev);
- bpf_offload_orphan_all_maps(netdev);
- up_write(&bpf_devs_lock);
- break;
- default:
- break;
+ down_write(&bpf_devs_lock);
+ ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
+ if (WARN_ON(!ondev))
+ goto unlock;
+
+ WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
+ list_del(&ondev->offdev_netdevs);
+
+ /* Try to move the objects to another netdev of the device */
+ altdev = list_first_entry_or_null(&offdev->netdevs,
+ struct bpf_offload_netdev,
+ offdev_netdevs);
+ if (altdev) {
+ list_for_each_entry(offload, &ondev->progs, offloads)
+ offload->netdev = altdev->netdev;
+ list_splice_init(&ondev->progs, &altdev->progs);
+
+ list_for_each_entry(offmap, &ondev->maps, offloads)
+ offmap->netdev = altdev->netdev;
+ list_splice_init(&ondev->maps, &altdev->maps);
+ } else {
+ list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
+ __bpf_prog_offload_destroy(offload->prog);
+ list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
+ __bpf_map_offload_destroy(offmap);
}
- return NOTIFY_OK;
-}
-static struct notifier_block bpf_offload_notifier = {
- .notifier_call = bpf_offload_notification,
-};
+ WARN_ON(!list_empty(&ondev->progs));
+ WARN_ON(!list_empty(&ondev->maps));
+ kfree(ondev);
+unlock:
+ up_write(&bpf_devs_lock);
+}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
-static int __init bpf_offload_init(void)
+struct bpf_offload_dev *bpf_offload_dev_create(void)
{
- register_netdevice_notifier(&bpf_offload_notifier);
- return 0;
+ struct bpf_offload_dev *offdev;
+ int err;
+
+ down_write(&bpf_devs_lock);
+ if (!offdevs_inited) {
+ err = rhashtable_init(&offdevs, &offdevs_params);
+ if (err)
+ return ERR_PTR(err);
+ offdevs_inited = true;
+ }
+ up_write(&bpf_devs_lock);
+
+ offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
+ if (!offdev)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&offdev->netdevs);
+
+ return offdev;
}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
-subsys_initcall(bpf_offload_init);
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
+{
+ WARN_ON(!list_empty(&offdev->netdevs));
+ kfree(offdev);
+}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
new file mode 100644
index 000000000000..18e225de80ff
--- /dev/null
+++ b/kernel/bpf/reuseport_array.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Facebook
+ */
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <linux/sock_diag.h>
+#include <net/sock_reuseport.h>
+
+struct reuseport_array {
+ struct bpf_map map;
+ struct sock __rcu *ptrs[];
+};
+
+static struct reuseport_array *reuseport_array(struct bpf_map *map)
+{
+ return (struct reuseport_array *)map;
+}
+
+/* The caller must hold the reuseport_lock */
+void bpf_sk_reuseport_detach(struct sock *sk)
+{
+ struct sock __rcu **socks;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ socks = sk->sk_user_data;
+ if (socks) {
+ WRITE_ONCE(sk->sk_user_data, NULL);
+ /*
+ * Do not move this NULL assignment outside of
+ * sk->sk_callback_lock because there is
+ * a race with reuseport_array_free()
+ * which does not hold the reuseport_lock.
+ */
+ RCU_INIT_POINTER(*socks, NULL);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int reuseport_array_alloc_check(union bpf_attr *attr)
+{
+ if (attr->value_size != sizeof(u32) &&
+ attr->value_size != sizeof(u64))
+ return -EINVAL;
+
+ return array_map_alloc_check(attr);
+}
+
+static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ u32 index = *(u32 *)key;
+
+ if (unlikely(index >= array->map.max_entries))
+ return NULL;
+
+ return rcu_dereference(array->ptrs[index]);
+}
+
+/* Called from syscall only */
+static int reuseport_array_delete_elem(struct bpf_map *map, void *key)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ u32 index = *(u32 *)key;
+ struct sock *sk;
+ int err;
+
+ if (index >= map->max_entries)
+ return -E2BIG;
+
+ if (!rcu_access_pointer(array->ptrs[index]))
+ return -ENOENT;
+
+ spin_lock_bh(&reuseport_lock);
+
+ sk = rcu_dereference_protected(array->ptrs[index],
+ lockdep_is_held(&reuseport_lock));
+ if (sk) {
+ write_lock_bh(&sk->sk_callback_lock);
+ WRITE_ONCE(sk->sk_user_data, NULL);
+ RCU_INIT_POINTER(array->ptrs[index], NULL);
+ write_unlock_bh(&sk->sk_callback_lock);
+ err = 0;
+ } else {
+ err = -ENOENT;
+ }
+
+ spin_unlock_bh(&reuseport_lock);
+
+ return err;
+}
+
+static void reuseport_array_free(struct bpf_map *map)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ struct sock *sk;
+ u32 i;
+
+ synchronize_rcu();
+
+ /*
+ * ops->map_*_elem() will not be able to access this
+ * array now. Hence, this function only races with
+ * bpf_sk_reuseport_detach() which was triggerred by
+ * close() or disconnect().
+ *
+ * This function and bpf_sk_reuseport_detach() are
+ * both removing sk from "array". Who removes it
+ * first does not matter.
+ *
+ * The only concern here is bpf_sk_reuseport_detach()
+ * may access "array" which is being freed here.
+ * bpf_sk_reuseport_detach() access this "array"
+ * through sk->sk_user_data _and_ with sk->sk_callback_lock
+ * held which is enough because this "array" is not freed
+ * until all sk->sk_user_data has stopped referencing this "array".
+ *
+ * Hence, due to the above, taking "reuseport_lock" is not
+ * needed here.
+ */
+
+ /*
+ * Since reuseport_lock is not taken, sk is accessed under
+ * rcu_read_lock()
+ */
+ rcu_read_lock();
+ for (i = 0; i < map->max_entries; i++) {
+ sk = rcu_dereference(array->ptrs[i]);
+ if (sk) {
+ write_lock_bh(&sk->sk_callback_lock);
+ /*
+ * No need for WRITE_ONCE(). At this point,
+ * no one is reading it without taking the
+ * sk->sk_callback_lock.
+ */
+ sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
+ RCU_INIT_POINTER(array->ptrs[i], NULL);
+ }
+ }
+ rcu_read_unlock();
+
+ /*
+ * Once reaching here, all sk->sk_user_data is not
+ * referenceing this "array". "array" can be freed now.
+ */
+ bpf_map_area_free(array);
+}
+
+static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
+{
+ int err, numa_node = bpf_map_attr_numa_node(attr);
+ struct reuseport_array *array;
+ u64 cost, array_size;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ array_size = sizeof(*array);
+ array_size += (u64)attr->max_entries * sizeof(struct sock *);
+
+ /* make sure there is no u32 overflow later in round_up() */
+ cost = array_size;
+ if (cost >= U32_MAX - PAGE_SIZE)
+ return ERR_PTR(-ENOMEM);
+ cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+ err = bpf_map_precharge_memlock(cost);
+ if (err)
+ return ERR_PTR(err);
+
+ /* allocate all map elements and zero-initialize them */
+ array = bpf_map_area_alloc(array_size, numa_node);
+ if (!array)
+ return ERR_PTR(-ENOMEM);
+
+ /* copy mandatory map attributes */
+ bpf_map_init_from_attr(&array->map, attr);
+ array->map.pages = cost;
+
+ return &array->map;
+}
+
+int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
+ void *value)
+{
+ struct sock *sk;
+ int err;
+
+ if (map->value_size != sizeof(u64))
+ return -ENOSPC;
+
+ rcu_read_lock();
+ sk = reuseport_array_lookup_elem(map, key);
+ if (sk) {
+ *(u64 *)value = sock_gen_cookie(sk);
+ err = 0;
+ } else {
+ err = -ENOENT;
+ }
+ rcu_read_unlock();
+
+ return err;
+}
+
+static int
+reuseport_array_update_check(const struct reuseport_array *array,
+ const struct sock *nsk,
+ const struct sock *osk,
+ const struct sock_reuseport *nsk_reuse,
+ u32 map_flags)
+{
+ if (osk && map_flags == BPF_NOEXIST)
+ return -EEXIST;
+
+ if (!osk && map_flags == BPF_EXIST)
+ return -ENOENT;
+
+ if (nsk->sk_protocol != IPPROTO_UDP && nsk->sk_protocol != IPPROTO_TCP)
+ return -ENOTSUPP;
+
+ if (nsk->sk_family != AF_INET && nsk->sk_family != AF_INET6)
+ return -ENOTSUPP;
+
+ if (nsk->sk_type != SOCK_STREAM && nsk->sk_type != SOCK_DGRAM)
+ return -ENOTSUPP;
+
+ /*
+ * sk must be hashed (i.e. listening in the TCP case or binded
+ * in the UDP case) and
+ * it must also be a SO_REUSEPORT sk (i.e. reuse cannot be NULL).
+ *
+ * Also, sk will be used in bpf helper that is protected by
+ * rcu_read_lock().
+ */
+ if (!sock_flag(nsk, SOCK_RCU_FREE) || !sk_hashed(nsk) || !nsk_reuse)
+ return -EINVAL;
+
+ /* READ_ONCE because the sk->sk_callback_lock may not be held here */
+ if (READ_ONCE(nsk->sk_user_data))
+ return -EBUSY;
+
+ return 0;
+}
+
+/*
+ * Called from syscall only.
+ * The "nsk" in the fd refcnt.
+ * The "osk" and "reuse" are protected by reuseport_lock.
+ */
+int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ struct sock *free_osk = NULL, *osk, *nsk;
+ struct sock_reuseport *reuse;
+ u32 index = *(u32 *)key;
+ struct socket *socket;
+ int err, fd;
+
+ if (map_flags > BPF_EXIST)
+ return -EINVAL;
+
+ if (index >= map->max_entries)
+ return -E2BIG;
+
+ if (map->value_size == sizeof(u64)) {
+ u64 fd64 = *(u64 *)value;
+
+ if (fd64 > S32_MAX)
+ return -EINVAL;
+ fd = fd64;
+ } else {
+ fd = *(int *)value;
+ }
+
+ socket = sockfd_lookup(fd, &err);
+ if (!socket)
+ return err;
+
+ nsk = socket->sk;
+ if (!nsk) {
+ err = -EINVAL;
+ goto put_file;
+ }
+
+ /* Quick checks before taking reuseport_lock */
+ err = reuseport_array_update_check(array, nsk,
+ rcu_access_pointer(array->ptrs[index]),
+ rcu_access_pointer(nsk->sk_reuseport_cb),
+ map_flags);
+ if (err)
+ goto put_file;
+
+ spin_lock_bh(&reuseport_lock);
+ /*
+ * Some of the checks only need reuseport_lock
+ * but it is done under sk_callback_lock also
+ * for simplicity reason.
+ */
+ write_lock_bh(&nsk->sk_callback_lock);
+
+ osk = rcu_dereference_protected(array->ptrs[index],
+ lockdep_is_held(&reuseport_lock));
+ reuse = rcu_dereference_protected(nsk->sk_reuseport_cb,
+ lockdep_is_held(&reuseport_lock));
+ err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags);
+ if (err)
+ goto put_file_unlock;
+
+ /* Ensure reuse->reuseport_id is set */
+ err = reuseport_get_id(reuse);
+ if (err < 0)
+ goto put_file_unlock;
+
+ WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]);
+ rcu_assign_pointer(array->ptrs[index], nsk);
+ free_osk = osk;
+ err = 0;
+
+put_file_unlock:
+ write_unlock_bh(&nsk->sk_callback_lock);
+
+ if (free_osk) {
+ write_lock_bh(&free_osk->sk_callback_lock);
+ WRITE_ONCE(free_osk->sk_user_data, NULL);
+ write_unlock_bh(&free_osk->sk_callback_lock);
+ }
+
+ spin_unlock_bh(&reuseport_lock);
+put_file:
+ fput(socket->file);
+ return err;
+}
+
+/* Called from syscall */
+static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
+ void *next_key)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ u32 index = key ? *(u32 *)key : U32_MAX;
+ u32 *next = (u32 *)next_key;
+
+ if (index >= array->map.max_entries) {
+ *next = 0;
+ return 0;
+ }
+
+ if (index == array->map.max_entries - 1)
+ return -ENOENT;
+
+ *next = index + 1;
+ return 0;
+}
+
+const struct bpf_map_ops reuseport_array_ops = {
+ .map_alloc_check = reuseport_array_alloc_check,
+ .map_alloc = reuseport_array_alloc,
+ .map_free = reuseport_array_free,
+ .map_lookup_elem = reuseport_array_lookup_elem,
+ .map_get_next_key = reuseport_array_get_next_key,
+ .map_delete_elem = reuseport_array_delete_elem,
+};
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index c4d75c52b4fc..0c1a696b041b 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -725,11 +725,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
{
bool ingress = !!(md->flags & BPF_F_INGRESS);
struct smap_psock *psock;
- struct scatterlist *sg;
int err = 0;
- sg = md->sg_data;
-
rcu_read_lock();
psock = smap_psock_sk(sk);
if (unlikely(!psock))
@@ -2501,6 +2498,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_update_elem = sock_map_update_elem,
.map_delete_elem = sock_map_delete_elem,
.map_release_uref = sock_map_release,
+ .map_check_btf = map_check_no_btf,
};
const struct bpf_map_ops sock_hash_ops = {
@@ -2511,6 +2509,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_update_elem = sock_hash_update_elem,
.map_delete_elem = sock_hash_delete_elem,
.map_release_uref = sock_map_release,
+ .map_check_btf = map_check_no_btf,
};
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index b675a3f3d141..8061a439ef18 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -607,6 +607,7 @@ const struct bpf_map_ops stack_map_ops = {
.map_lookup_elem = stack_map_lookup_elem,
.map_update_elem = stack_map_update_elem,
.map_delete_elem = stack_map_delete_elem,
+ .map_check_btf = map_check_no_btf,
};
static int __init stack_map_init(void)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a31a1ba0f8ea..8339d81cba1d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -103,6 +103,7 @@ int bpf_check_uarg_tail_zero(void __user *uaddr,
const struct bpf_map_ops bpf_map_offload_ops = {
.map_alloc = bpf_map_offload_map_alloc,
.map_free = bpf_map_offload_map_free,
+ .map_check_btf = map_check_no_btf,
};
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
@@ -181,32 +182,60 @@ int bpf_map_precharge_memlock(u32 pages)
return 0;
}
-static int bpf_map_charge_memlock(struct bpf_map *map)
+static int bpf_charge_memlock(struct user_struct *user, u32 pages)
{
- struct user_struct *user = get_current_user();
- unsigned long memlock_limit;
+ unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
+ atomic_long_sub(pages, &user->locked_vm);
+ return -EPERM;
+ }
+ return 0;
+}
- atomic_long_add(map->pages, &user->locked_vm);
+static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
+{
+ atomic_long_sub(pages, &user->locked_vm);
+}
- if (atomic_long_read(&user->locked_vm) > memlock_limit) {
- atomic_long_sub(map->pages, &user->locked_vm);
+static int bpf_map_init_memlock(struct bpf_map *map)
+{
+ struct user_struct *user = get_current_user();
+ int ret;
+
+ ret = bpf_charge_memlock(user, map->pages);
+ if (ret) {
free_uid(user);
- return -EPERM;
+ return ret;
}
map->user = user;
- return 0;
+ return ret;
}
-static void bpf_map_uncharge_memlock(struct bpf_map *map)
+static void bpf_map_release_memlock(struct bpf_map *map)
{
struct user_struct *user = map->user;
-
- atomic_long_sub(map->pages, &user->locked_vm);
+ bpf_uncharge_memlock(user, map->pages);
free_uid(user);
}
+int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
+{
+ int ret;
+
+ ret = bpf_charge_memlock(map->user, pages);
+ if (ret)
+ return ret;
+ map->pages += pages;
+ return ret;
+}
+
+void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
+{
+ bpf_uncharge_memlock(map->user, pages);
+ map->pages -= pages;
+}
+
static int bpf_map_alloc_id(struct bpf_map *map)
{
int id;
@@ -256,7 +285,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_map, work);
- bpf_map_uncharge_memlock(map);
+ bpf_map_release_memlock(map);
security_bpf_map_free(map);
/* implementation dependent freeing */
map->ops->map_free(map);
@@ -427,6 +456,34 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
return 0;
}
+int map_check_no_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
+{
+ return -ENOTSUPP;
+}
+
+static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
+ u32 btf_key_id, u32 btf_value_id)
+{
+ const struct btf_type *key_type, *value_type;
+ u32 key_size, value_size;
+ int ret = 0;
+
+ key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
+ if (!key_type || key_size != map->key_size)
+ return -EINVAL;
+
+ value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
+ if (!value_type || value_size != map->value_size)
+ return -EINVAL;
+
+ if (map->ops->map_check_btf)
+ ret = map->ops->map_check_btf(map, key_type, value_type);
+
+ return ret;
+}
+
#define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id
/* called via syscall */
static int map_create(union bpf_attr *attr)
@@ -461,8 +518,7 @@ static int map_create(union bpf_attr *attr)
atomic_set(&map->refcnt, 1);
atomic_set(&map->usercnt, 1);
- if (bpf_map_support_seq_show(map) &&
- (attr->btf_key_type_id || attr->btf_value_type_id)) {
+ if (attr->btf_key_type_id || attr->btf_value_type_id) {
struct btf *btf;
if (!attr->btf_key_type_id || !attr->btf_value_type_id) {
@@ -476,8 +532,8 @@ static int map_create(union bpf_attr *attr)
goto free_map_nouncharge;
}
- err = map->ops->map_check_btf(map, btf, attr->btf_key_type_id,
- attr->btf_value_type_id);
+ err = map_check_btf(map, btf, attr->btf_key_type_id,
+ attr->btf_value_type_id);
if (err) {
btf_put(btf);
goto free_map_nouncharge;
@@ -492,7 +548,7 @@ static int map_create(union bpf_attr *attr)
if (err)
goto free_map_nouncharge;
- err = bpf_map_charge_memlock(map);
+ err = bpf_map_init_memlock(map);
if (err)
goto free_map_sec;
@@ -515,7 +571,7 @@ static int map_create(union bpf_attr *attr)
return err;
free_map:
- bpf_map_uncharge_memlock(map);
+ bpf_map_release_memlock(map);
free_map_sec:
security_bpf_map_free(map);
free_map_nouncharge:
@@ -575,7 +631,7 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
{
int refold;
- refold = __atomic_add_unless(&map->refcnt, 1, 0);
+ refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
if (refold >= BPF_MAX_REFCNT) {
__bpf_map_put(map, false);
@@ -656,6 +712,8 @@ static int map_lookup_elem(union bpf_attr *attr)
err = bpf_fd_array_map_lookup_elem(map, key, value);
} else if (IS_FD_HASH(map)) {
err = bpf_fd_htab_map_lookup_elem(map, key, value);
+ } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
+ err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
} else {
rcu_read_lock();
ptr = map->ops->map_lookup_elem(map, key);
@@ -762,6 +820,10 @@ static int map_update_elem(union bpf_attr *attr)
err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
attr->flags);
rcu_read_unlock();
+ } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
+ /* rcu_read_lock() is not needed */
+ err = bpf_fd_reuseport_array_update_elem(map, key, value,
+ attr->flags);
} else {
rcu_read_lock();
err = map->ops->map_update_elem(map, key, value, attr->flags);
@@ -929,6 +991,9 @@ static void free_used_maps(struct bpf_prog_aux *aux)
{
int i;
+ if (aux->cgroup_storage)
+ bpf_cgroup_storage_release(aux->prog, aux->cgroup_storage);
+
for (i = 0; i < aux->used_map_cnt; i++)
bpf_map_put(aux->used_maps[i]);
@@ -1144,7 +1209,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
int refold;
- refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
+ refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
if (refold >= BPF_MAX_REFCNT) {
__bpf_prog_put(prog, false);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 63aaac52a265..ca90679a7fe5 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1310,6 +1310,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
+ case BPF_PROG_TYPE_SK_REUSEPORT:
/* dst_input() and dst_output() can't write for now */
if (t == BPF_WRITE)
return false;
@@ -2127,6 +2128,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_current_task_under_cgroup)
goto error;
break;
+ case BPF_MAP_TYPE_CGROUP_STORAGE:
+ if (func_id != BPF_FUNC_get_local_storage)
+ goto error;
+ break;
/* devmap returns a pointer to a live net_device ifindex that we cannot
* allow to be modified from bpf side. So do not allow lookup elements
* for now.
@@ -2162,6 +2167,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_msg_redirect_hash)
goto error;
break;
+ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ if (func_id != BPF_FUNC_sk_select_reuseport)
+ goto error;
+ break;
default:
break;
}
@@ -2209,6 +2218,14 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
goto error;
break;
+ case BPF_FUNC_get_local_storage:
+ if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE)
+ goto error;
+ break;
+ case BPF_FUNC_sk_select_reuseport:
+ if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
+ goto error;
+ break;
default:
break;
}
@@ -2533,6 +2550,16 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
}
regs = cur_regs(env);
+
+ /* check that flags argument in get_local_storage(map, flags) is 0,
+ * this is required because get_local_storage() can't return an error.
+ */
+ if (func_id == BPF_FUNC_get_local_storage &&
+ !register_is_null(&regs[BPF_REG_2])) {
+ verbose(env, "get_local_storage() doesn't support non-zero flags\n");
+ return -EINVAL;
+ }
+
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
@@ -2545,8 +2572,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
mark_reg_unknown(env, regs, BPF_REG_0);
} else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT;
- } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
+ } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
+ fn->ret_type == RET_PTR_TO_MAP_VALUE) {
+ if (fn->ret_type == RET_PTR_TO_MAP_VALUE)
+ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
+ else
+ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
/* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].off = 0;
@@ -3238,8 +3269,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
}
- /* check dest operand */
- err = check_reg_arg(env, insn->dst_reg, DST_OP);
+ /* check dest operand, mark as required later */
+ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
@@ -3265,6 +3296,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
/* case: R = imm
* remember the value we stored into this reg
*/
+ /* clear any state __mark_reg_known doesn't set */
+ mark_reg_unknown(env, regs, insn->dst_reg);
regs[insn->dst_reg].type = SCALAR_VALUE;
if (BPF_CLASS(insn->code) == BPF_ALU64) {
__mark_reg_known(regs + insn->dst_reg,
@@ -5054,7 +5087,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
}
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
- !bpf_offload_dev_match(prog, map)) {
+ !bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
return -EINVAL;
}
@@ -5152,6 +5185,14 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
}
env->used_maps[env->used_map_cnt++] = map;
+ if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE &&
+ bpf_cgroup_storage_assign(env->prog, map)) {
+ verbose(env,
+ "only one cgroup storage is allowed\n");
+ fdput(f);
+ return -EBUSY;
+ }
+
fdput(f);
next_insn:
insn++;
@@ -5178,6 +5219,10 @@ static void release_maps(struct bpf_verifier_env *env)
{
int i;
+ if (env->prog->aux->cgroup_storage)
+ bpf_cgroup_storage_release(env->prog,
+ env->prog->aux->cgroup_storage);
+
for (i = 0; i < env->used_map_cnt; i++)
bpf_map_put(env->used_maps[i]);
}
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index b3c557476a8d..4ddf61e158f6 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -227,6 +227,5 @@ const struct bpf_map_ops xsk_map_ops = {
.map_lookup_elem = xsk_map_lookup_elem,
.map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem,
+ .map_check_btf = map_check_no_btf,
};
-
-
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 077370bf8964..35cf3d71f8aa 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3557,7 +3557,9 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
key = &cft->lockdep_key;
#endif
kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
- cgroup_file_mode(cft), 0, cft->kf_ops, cft,
+ cgroup_file_mode(cft),
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ 0, cft->kf_ops, cft,
NULL, key);
if (IS_ERR(kn))
return PTR_ERR(kn);
diff --git a/kernel/compat.c b/kernel/compat.c
index 702aa846ddac..8e40efc2928a 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -324,35 +324,6 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len,
return ret;
}
-/* Todo: Delete these extern declarations when get/put_compat_itimerspec64()
- * are moved to kernel/time/time.c .
- */
-extern int __compat_get_timespec64(struct timespec64 *ts64,
- const struct compat_timespec __user *cts);
-extern int __compat_put_timespec64(const struct timespec64 *ts64,
- struct compat_timespec __user *cts);
-
-int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits)
-{
-
- if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
- __compat_get_timespec64(&its->it_value, &uits->it_value))
- return -EFAULT;
- return 0;
-}
-EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
-
-int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits)
-{
- if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
- __compat_put_timespec64(&its->it_value, &uits->it_value))
- return -EFAULT;
- return 0;
-}
-EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
-
/*
* We currently only need the following fields from the sigevent
* structure: sigev_value, sigev_signo, sig_notify and (sometimes
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2f8f338e77cf..ed44d7d34c2d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -60,6 +60,7 @@ struct cpuhp_cpu_state {
bool rollback;
bool single;
bool bringup;
+ bool booted_once;
struct hlist_node *node;
struct hlist_node *last;
enum cpuhp_state cb_state;
@@ -290,6 +291,12 @@ void cpus_read_lock(void)
}
EXPORT_SYMBOL_GPL(cpus_read_lock);
+int cpus_read_trylock(void)
+{
+ return percpu_down_read_trylock(&cpu_hotplug_lock);
+}
+EXPORT_SYMBOL_GPL(cpus_read_trylock);
+
void cpus_read_unlock(void)
{
percpu_up_read(&cpu_hotplug_lock);
@@ -342,6 +349,85 @@ void cpu_hotplug_enable(void)
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#endif /* CONFIG_HOTPLUG_CPU */
+#ifdef CONFIG_HOTPLUG_SMT
+enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+EXPORT_SYMBOL_GPL(cpu_smt_control);
+
+static bool cpu_smt_available __read_mostly;
+
+void __init cpu_smt_disable(bool force)
+{
+ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
+ cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+ return;
+
+ if (force) {
+ pr_info("SMT: Force disabled\n");
+ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
+ } else {
+ cpu_smt_control = CPU_SMT_DISABLED;
+ }
+}
+
+/*
+ * The decision whether SMT is supported can only be done after the full
+ * CPU identification. Called from architecture code before non boot CPUs
+ * are brought up.
+ */
+void __init cpu_smt_check_topology_early(void)
+{
+ if (!topology_smt_supported())
+ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+}
+
+/*
+ * If SMT was disabled by BIOS, detect it here, after the CPUs have been
+ * brought online. This ensures the smt/l1tf sysfs entries are consistent
+ * with reality. cpu_smt_available is set to true during the bringup of non
+ * boot CPUs when a SMT sibling is detected. Note, this may overwrite
+ * cpu_smt_control's previous setting.
+ */
+void __init cpu_smt_check_topology(void)
+{
+ if (!cpu_smt_available)
+ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+}
+
+static int __init smt_cmdline_disable(char *str)
+{
+ cpu_smt_disable(str && !strcmp(str, "force"));
+ return 0;
+}
+early_param("nosmt", smt_cmdline_disable);
+
+static inline bool cpu_smt_allowed(unsigned int cpu)
+{
+ if (topology_is_primary_thread(cpu))
+ return true;
+
+ /*
+ * If the CPU is not a 'primary' thread and the booted_once bit is
+ * set then the processor has SMT support. Store this information
+ * for the late check of SMT support in cpu_smt_check_topology().
+ */
+ if (per_cpu(cpuhp_state, cpu).booted_once)
+ cpu_smt_available = true;
+
+ if (cpu_smt_control == CPU_SMT_ENABLED)
+ return true;
+
+ /*
+ * On x86 it's required to boot all logical CPUs at least once so
+ * that the init code can get a chance to set CR4.MCE on each
+ * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
+ * core will shutdown the machine.
+ */
+ return !per_cpu(cpuhp_state, cpu).booted_once;
+}
+#else
+static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+#endif
+
static inline enum cpuhp_state
cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
@@ -422,6 +508,16 @@ static int bringup_wait_for_ap(unsigned int cpu)
stop_machine_unpark(cpu);
kthread_unpark(st->thread);
+ /*
+ * SMT soft disabling on X86 requires to bring the CPU out of the
+ * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
+ * CPU marked itself as booted_once in cpu_notify_starting() so the
+ * cpu_smt_allowed() check will now return false if this is not the
+ * primary sibling.
+ */
+ if (!cpu_smt_allowed(cpu))
+ return -ECANCELED;
+
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
@@ -754,7 +850,6 @@ static int takedown_cpu(unsigned int cpu)
/* Park the smpboot threads */
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
- smpboot_park_threads(cpu);
/*
* Prevent irq alloc/free while the dying cpu reorganizes the
@@ -907,20 +1002,19 @@ out:
return ret;
}
+static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+{
+ if (cpu_hotplug_disabled)
+ return -EBUSY;
+ return _cpu_down(cpu, 0, target);
+}
+
static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
{
int err;
cpu_maps_update_begin();
-
- if (cpu_hotplug_disabled) {
- err = -EBUSY;
- goto out;
- }
-
- err = _cpu_down(cpu, 0, target);
-
-out:
+ err = cpu_down_maps_locked(cpu, target);
cpu_maps_update_done();
return err;
}
@@ -949,6 +1043,7 @@ void notify_cpu_starting(unsigned int cpu)
int ret;
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
+ st->booted_once = true;
while (st->state < target) {
st->state++;
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
@@ -1058,6 +1153,10 @@ static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
err = -EBUSY;
goto out;
}
+ if (!cpu_smt_allowed(cpu)) {
+ err = -EPERM;
+ goto out;
+ }
err = _cpu_up(cpu, 0, target);
out:
@@ -1274,7 +1373,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
* otherwise a RCU stall occurs.
*/
[CPUHP_TIMERS_PREPARE] = {
- .name = "timers:dead",
+ .name = "timers:prepare",
.startup.single = timers_prepare_cpu,
.teardown.single = timers_dead_cpu,
},
@@ -1332,7 +1431,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
[CPUHP_AP_SMPBOOT_THREADS] = {
.name = "smpboot/threads:online",
.startup.single = smpboot_unpark_threads,
- .teardown.single = NULL,
+ .teardown.single = smpboot_park_threads,
},
[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
.name = "irq/affinity:online",
@@ -1344,6 +1443,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.startup.single = perf_event_init_cpu,
.teardown.single = perf_event_exit_cpu,
},
+ [CPUHP_AP_WATCHDOG_ONLINE] = {
+ .name = "lockup_detector:online",
+ .startup.single = lockup_detector_online_cpu,
+ .teardown.single = lockup_detector_offline_cpu,
+ },
[CPUHP_AP_WORKQUEUE_ONLINE] = {
.name = "workqueue:online",
.startup.single = workqueue_online_cpu,
@@ -1906,10 +2010,172 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
NULL
};
+#ifdef CONFIG_HOTPLUG_SMT
+
+static const char *smt_states[] = {
+ [CPU_SMT_ENABLED] = "on",
+ [CPU_SMT_DISABLED] = "off",
+ [CPU_SMT_FORCE_DISABLED] = "forceoff",
+ [CPU_SMT_NOT_SUPPORTED] = "notsupported",
+};
+
+static ssize_t
+show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
+}
+
+static void cpuhp_offline_cpu_device(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ dev->offline = true;
+ /* Tell user space about the state change */
+ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+}
+
+static void cpuhp_online_cpu_device(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ dev->offline = false;
+ /* Tell user space about the state change */
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+}
+
+static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+{
+ int cpu, ret = 0;
+
+ cpu_maps_update_begin();
+ for_each_online_cpu(cpu) {
+ if (topology_is_primary_thread(cpu))
+ continue;
+ ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+ if (ret)
+ break;
+ /*
+ * As this needs to hold the cpu maps lock it's impossible
+ * to call device_offline() because that ends up calling
+ * cpu_down() which takes cpu maps lock. cpu maps lock
+ * needs to be held as this might race against in kernel
+ * abusers of the hotplug machinery (thermal management).
+ *
+ * So nothing would update device:offline state. That would
+ * leave the sysfs entry stale and prevent onlining after
+ * smt control has been changed to 'off' again. This is
+ * called under the sysfs hotplug lock, so it is properly
+ * serialized against the regular offline usage.
+ */
+ cpuhp_offline_cpu_device(cpu);
+ }
+ if (!ret)
+ cpu_smt_control = ctrlval;
+ cpu_maps_update_done();
+ return ret;
+}
+
+static int cpuhp_smt_enable(void)
+{
+ int cpu, ret = 0;
+
+ cpu_maps_update_begin();
+ cpu_smt_control = CPU_SMT_ENABLED;
+ for_each_present_cpu(cpu) {
+ /* Skip online CPUs and CPUs on offline nodes */
+ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+ continue;
+ ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+ if (ret)
+ break;
+ /* See comment in cpuhp_smt_disable() */
+ cpuhp_online_cpu_device(cpu);
+ }
+ cpu_maps_update_done();
+ return ret;
+}
+
+static ssize_t
+store_smt_control(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ctrlval, ret;
+
+ if (sysfs_streq(buf, "on"))
+ ctrlval = CPU_SMT_ENABLED;
+ else if (sysfs_streq(buf, "off"))
+ ctrlval = CPU_SMT_DISABLED;
+ else if (sysfs_streq(buf, "forceoff"))
+ ctrlval = CPU_SMT_FORCE_DISABLED;
+ else
+ return -EINVAL;
+
+ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
+ return -EPERM;
+
+ if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+ return -ENODEV;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ if (ctrlval != cpu_smt_control) {
+ switch (ctrlval) {
+ case CPU_SMT_ENABLED:
+ ret = cpuhp_smt_enable();
+ break;
+ case CPU_SMT_DISABLED:
+ case CPU_SMT_FORCE_DISABLED:
+ ret = cpuhp_smt_disable(ctrlval);
+ break;
+ }
+ }
+
+ unlock_device_hotplug();
+ return ret ? ret : count;
+}
+static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
+
+static ssize_t
+show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ bool active = topology_max_smt_threads() > 1;
+
+ return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
+}
+static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
+
+static struct attribute *cpuhp_smt_attrs[] = {
+ &dev_attr_control.attr,
+ &dev_attr_active.attr,
+ NULL
+};
+
+static const struct attribute_group cpuhp_smt_attr_group = {
+ .attrs = cpuhp_smt_attrs,
+ .name = "smt",
+ NULL
+};
+
+static int __init cpu_smt_state_init(void)
+{
+ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_smt_attr_group);
+}
+
+#else
+static inline int cpu_smt_state_init(void) { return 0; }
+#endif
+
static int __init cpuhp_sysfs_init(void)
{
int cpu, ret;
+ ret = cpu_smt_state_init();
+ if (ret)
+ return ret;
+
ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
&cpuhp_cpu_root_attr_group);
if (ret)
@@ -2012,5 +2278,8 @@ void __init boot_cpu_init(void)
*/
void __init boot_cpu_hotplug_init(void)
{
- per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
+#ifdef CONFIG_SMP
+ this_cpu_write(cpuhp_state.booted_once, true);
+#endif
+ this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
}
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 8be8106270c2..c2860c5a9e96 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -180,10 +180,10 @@ int dma_direct_supported(struct device *dev, u64 mask)
return 0;
#endif
/*
- * Various PCI/PCIe bridges have broken support for > 32bit DMA even
- * if the device itself might support it.
+ * Upstream PCI/PCIe bridges or SoC interconnects may not carry
+ * as many DMA address bits as the device itself supports.
*/
- if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32))
+ if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
return 0;
return 1;
}
diff --git a/kernel/dma/noncoherent.c b/kernel/dma/noncoherent.c
index 79e9a757387f..031fe235d958 100644
--- a/kernel/dma/noncoherent.c
+++ b/kernel/dma/noncoherent.c
@@ -49,11 +49,13 @@ static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
+ arch_sync_dma_for_cpu_all(dev);
}
static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
@@ -64,6 +66,7 @@ static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu_all(dev);
}
static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
@@ -89,7 +92,8 @@ const struct dma_map_ops dma_noncoherent_ops = {
.sync_sg_for_device = dma_noncoherent_sync_sg_for_device,
.map_page = dma_noncoherent_map_page,
.map_sg = dma_noncoherent_map_sg,
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
.sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu,
.sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu,
.unmap_page = dma_noncoherent_unmap_page,
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 904541055792..4f8a6dbf0b60 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -17,6 +17,8 @@
* 08/12/11 beckyb Add highmem support
*/
+#define pr_fmt(fmt) "software IO TLB: " fmt
+
#include <linux/cache.h>
#include <linux/dma-direct.h>
#include <linux/mm.h>
@@ -162,20 +164,16 @@ static bool no_iotlb_memory;
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
- unsigned char *vstart, *vend;
if (no_iotlb_memory) {
- pr_warn("software IO TLB: No low mem\n");
+ pr_warn("No low mem\n");
return;
}
- vstart = phys_to_virt(io_tlb_start);
- vend = phys_to_virt(io_tlb_end);
-
- printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
+ pr_info("mapped [mem %#010llx-%#010llx] (%luMB)\n",
(unsigned long long)io_tlb_start,
(unsigned long long)io_tlb_end,
- bytes >> 20, vstart, vend - 1);
+ bytes >> 20);
}
/*
@@ -275,7 +273,7 @@ swiotlb_init(int verbose)
if (io_tlb_start)
memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
- pr_warn("Cannot allocate SWIOTLB buffer");
+ pr_warn("Cannot allocate buffer");
no_iotlb_memory = true;
}
@@ -317,8 +315,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
return -ENOMEM;
}
if (order != get_order(bytes)) {
- printk(KERN_WARNING "Warning: only able to allocate %ld MB "
- "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
+ pr_warn("only able to allocate %ld MB\n",
+ (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
}
rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eec2d5fb676b..80f456ec5d89 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1656,7 +1656,7 @@ perf_event_groups_next(struct perf_event *event)
typeof(*event), group_node))
/*
- * Add a event from the lists for its context.
+ * Add an event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
@@ -1844,7 +1844,7 @@ static void perf_group_attach(struct perf_event *event)
}
/*
- * Remove a event from the lists for its context.
+ * Remove an event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
@@ -2148,7 +2148,7 @@ static void __perf_event_disable(struct perf_event *event,
}
/*
- * Disable a event.
+ * Disable an event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
@@ -2677,7 +2677,7 @@ static void __perf_event_enable(struct perf_event *event,
}
/*
- * Enable a event.
+ * Enable an event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
@@ -2755,7 +2755,7 @@ static int __perf_event_stop(void *info)
* events will refuse to restart because of rb::aux_mmap_count==0,
* see comments in perf_aux_output_begin().
*
- * Since this is happening on a event-local CPU, no trace is lost
+ * Since this is happening on an event-local CPU, no trace is lost
* while restarting.
*/
if (sd->restart)
@@ -4827,7 +4827,7 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count)
int ret;
/*
- * Return end-of-file for a read on a event that is in
+ * Return end-of-file for a read on an event that is in
* error state (i.e. because it was pinned but it couldn't be
* scheduled on to the CPU at some point).
*/
@@ -5246,8 +5246,8 @@ void perf_event_update_userpage(struct perf_event *event)
userpg = rb->user_page;
/*
- * Disable preemption so as to not let the corresponding user-space
- * spin too long if we get preempted.
+ * Disable preemption to guarantee consistent time stamps are stored to
+ * the user page.
*/
preempt_disable();
++userpg->lock;
@@ -5273,11 +5273,11 @@ unlock:
}
EXPORT_SYMBOL_GPL(perf_event_update_userpage);
-static int perf_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
{
struct perf_event *event = vmf->vma->vm_file->private_data;
struct ring_buffer *rb;
- int ret = VM_FAULT_SIGBUS;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
if (vmf->pgoff == 0)
@@ -9904,7 +9904,7 @@ enabled:
}
/*
- * Allocate and initialize a event structure
+ * Allocate and initialize an event structure
*/
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr, int cpu,
@@ -11235,7 +11235,7 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
}
/*
- * Inherit a event from parent task to child task.
+ * Inherit an event from parent task to child task.
*
* Returns:
* - valid pointer on success
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 6e28d2866be5..b3814fce5ecb 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -345,13 +345,13 @@ void release_bp_slot(struct perf_event *bp)
mutex_unlock(&nr_bp_mutex);
}
-static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
+static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
{
int err;
__release_bp_slot(bp, old_type);
- err = __reserve_bp_slot(bp, bp->attr.bp_type);
+ err = __reserve_bp_slot(bp, new_type);
if (err) {
/*
* Reserve the old_type slot back in case
@@ -367,12 +367,12 @@ static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
return err;
}
-static int modify_bp_slot(struct perf_event *bp, u64 old_type)
+static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
{
int ret;
mutex_lock(&nr_bp_mutex);
- ret = __modify_bp_slot(bp, old_type);
+ ret = __modify_bp_slot(bp, old_type, new_type);
mutex_unlock(&nr_bp_mutex);
return ret;
}
@@ -400,16 +400,18 @@ int dbg_release_bp_slot(struct perf_event *bp)
return 0;
}
-static int validate_hw_breakpoint(struct perf_event *bp)
+static int hw_breakpoint_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- int ret;
+ int err;
- ret = arch_validate_hwbkpt_settings(bp);
- if (ret)
- return ret;
+ err = hw_breakpoint_arch_parse(bp, attr, hw);
+ if (err)
+ return err;
- if (arch_check_bp_in_kernelspace(bp)) {
- if (bp->attr.exclude_kernel)
+ if (arch_check_bp_in_kernelspace(hw)) {
+ if (attr->exclude_kernel)
return -EINVAL;
/*
* Don't let unprivileged users set a breakpoint in the trap
@@ -424,19 +426,22 @@ static int validate_hw_breakpoint(struct perf_event *bp)
int register_perf_hw_breakpoint(struct perf_event *bp)
{
- int ret;
-
- ret = reserve_bp_slot(bp);
- if (ret)
- return ret;
+ struct arch_hw_breakpoint hw;
+ int err;
- ret = validate_hw_breakpoint(bp);
+ err = reserve_bp_slot(bp);
+ if (err)
+ return err;
- /* if arch_validate_hwbkpt_settings() fails then release bp slot */
- if (ret)
+ err = hw_breakpoint_parse(bp, &bp->attr, &hw);
+ if (err) {
release_bp_slot(bp);
+ return err;
+ }
- return ret;
+ bp->hw.info = hw;
+
+ return 0;
}
/**
@@ -456,35 +461,44 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
+static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
+ struct perf_event_attr *from)
+{
+ to->bp_addr = from->bp_addr;
+ to->bp_type = from->bp_type;
+ to->bp_len = from->bp_len;
+ to->disabled = from->disabled;
+}
+
int
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
bool check)
{
- u64 old_addr = bp->attr.bp_addr;
- u64 old_len = bp->attr.bp_len;
- int old_type = bp->attr.bp_type;
- bool modify = attr->bp_type != old_type;
- int err = 0;
+ struct arch_hw_breakpoint hw;
+ int err;
- bp->attr.bp_addr = attr->bp_addr;
- bp->attr.bp_type = attr->bp_type;
- bp->attr.bp_len = attr->bp_len;
+ err = hw_breakpoint_parse(bp, attr, &hw);
+ if (err)
+ return err;
- if (check && memcmp(&bp->attr, attr, sizeof(*attr)))
- return -EINVAL;
+ if (check) {
+ struct perf_event_attr old_attr;
- err = validate_hw_breakpoint(bp);
- if (!err && modify)
- err = modify_bp_slot(bp, old_type);
+ old_attr = bp->attr;
+ hw_breakpoint_copy_attr(&old_attr, attr);
+ if (memcmp(&old_attr, attr, sizeof(*attr)))
+ return -EINVAL;
+ }
- if (err) {
- bp->attr.bp_addr = old_addr;
- bp->attr.bp_type = old_type;
- bp->attr.bp_len = old_len;
- return err;
+ if (bp->attr.bp_type != attr->bp_type) {
+ err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
+ if (err)
+ return err;
}
- bp->attr.disabled = attr->disabled;
+ hw_breakpoint_copy_attr(&bp->attr, attr);
+ bp->hw.info = hw;
+
return 0;
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index ccc579a7d32e..aed1ba569954 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -918,7 +918,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
EXPORT_SYMBOL_GPL(uprobe_register);
/*
- * uprobe_apply - unregister a already registered probe.
+ * uprobe_apply - unregister an already registered probe.
* @inode: the file in which the probe has to be removed.
* @offset: offset from the start of the file.
* @uc: consumer which wants to add more or remove some breakpoints
@@ -947,7 +947,7 @@ int uprobe_apply(struct inode *inode, loff_t offset,
}
/*
- * uprobe_unregister - unregister a already registered probe.
+ * uprobe_unregister - unregister an already registered probe.
* @inode: the file in which the probe has to be removed.
* @offset: offset from the start of the file.
* @uc: identify which probe if multiple probes are colocated.
@@ -1403,7 +1403,7 @@ static struct return_instance *free_ret_instance(struct return_instance *ri)
/*
* Called with no locks held.
- * Called in context of a exiting or a exec-ing thread.
+ * Called in context of an exiting or an exec-ing thread.
*/
void uprobe_free_utask(struct task_struct *t)
{
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index 5349c91c2298..bc80a4e268c0 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -184,9 +184,6 @@ static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
if (should_fail(&fei_fault_attr, 1)) {
regs_set_return_value(regs, attr->retval);
override_function_with_return(regs);
- /* Kprobe specific fixup */
- reset_current_kprobe();
- preempt_enable_no_resched();
return 1;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 1b27babc4c78..33112315b5c0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -866,6 +866,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->fail_nth = 0;
#endif
+#ifdef CONFIG_BLK_CGROUP
+ tsk->throttle_queue = NULL;
+ tsk->use_memdelay = 0;
+#endif
+
return tsk;
free_stack:
@@ -2276,6 +2281,8 @@ static void sighand_ctor(void *data)
void __init proc_caches_init(void)
{
+ unsigned int mm_size;
+
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
@@ -2292,15 +2299,16 @@ void __init proc_caches_init(void)
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL);
+
/*
- * FIXME! The "sizeof(struct mm_struct)" currently includes the
- * whole struct cpumask for the OFFSTACK case. We could change
- * this to *only* allocate as much of it as required by the
- * maximum number of CPU's we can ever have. The cpumask_allocation
- * is at the end of the structure, exactly for that reason.
+ * The mm_cpumask is located at the end of mm_struct, and is
+ * dynamically sized based on the maximum CPU number this system
+ * can have, taking hotplug into account (nr_cpu_ids).
*/
+ mm_size = sizeof(struct mm_struct) + cpumask_size();
+
mm_cachep = kmem_cache_create_usercopy("mm_struct",
- sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
+ mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
offsetof(struct mm_struct, saved_auxv),
sizeof_field(struct mm_struct, saved_auxv),
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 6f56a9e219fa..b162b74611e4 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -15,7 +15,9 @@
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
EXPORT_SYMBOL(system_freezing_cnt);
-/* indicate whether PM freezing is in effect, protected by pm_mutex */
+/* indicate whether PM freezing is in effect, protected by
+ * system_transition_mutex
+ */
bool pm_freezing;
bool pm_nosig_freezing;
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index c6766f326072..5f3e2baefca9 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -134,7 +134,6 @@ config GENERIC_IRQ_DEBUGFS
endmenu
config GENERIC_IRQ_MULTI_HANDLER
- depends on !MULTI_IRQ_HANDLER
bool
help
Allow to specify the low level IRQ handler at run time.
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index afc7f902d74a..578d0e5f1b5b 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -443,6 +443,7 @@ static void free_desc(unsigned int irq)
* We free the descriptor, masks and stat fields via RCU. That
* allows demultiplex interrupts to do rcu based management of
* the child interrupts.
+ * This also allows us to use rcu in kstat_irqs_usr().
*/
call_rcu(&desc->rcu, delayed_free_desc);
}
@@ -928,17 +929,17 @@ unsigned int kstat_irqs(unsigned int irq)
* kstat_irqs_usr - Get the statistics for an interrupt
* @irq: The interrupt number
*
- * Returns the sum of interrupt counts on all cpus since boot for
- * @irq. Contrary to kstat_irqs() this can be called from any
- * preemptible context. It's protected against concurrent removal of
- * an interrupt descriptor when sparse irqs are enabled.
+ * Returns the sum of interrupt counts on all cpus since boot for @irq.
+ * Contrary to kstat_irqs() this can be called from any context.
+ * It uses rcu since a concurrent removal of an interrupt descriptor is
+ * observing an rcu grace period before delayed_free_desc()/irq_kobj_release().
*/
unsigned int kstat_irqs_usr(unsigned int irq)
{
unsigned int sum;
- irq_lock_sparse();
+ rcu_read_lock();
sum = kstat_irqs(irq);
- irq_unlock_sparse();
+ rcu_read_unlock();
return sum;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9a8b7ba9aa88..fb86146037a7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -790,9 +790,19 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
static int irq_wait_for_interrupt(struct irqaction *action)
{
- set_current_state(TASK_INTERRUPTIBLE);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
+ if (kthread_should_stop()) {
+ /* may need to run one last time */
+ if (test_and_clear_bit(IRQTF_RUNTHREAD,
+ &action->thread_flags)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+ __set_current_state(TASK_RUNNING);
+ return -1;
+ }
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
@@ -800,10 +810,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
return 0;
}
schedule();
- set_current_state(TASK_INTERRUPTIBLE);
}
- __set_current_state(TASK_RUNNING);
- return -1;
}
/*
@@ -1024,11 +1031,8 @@ static int irq_thread(void *data)
/*
* This is the regular exit path. __free_irq() is stopping the
* thread via kthread_stop() after calling
- * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
- * oneshot mask bit can be set. We cannot verify that as we
- * cannot touch the oneshot mask at this point anymore as
- * __setup_irq() might have given out currents thread_mask
- * again.
+ * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
+ * oneshot mask bit can be set.
*/
task_work_cancel(current, irq_thread_dtor);
return 0;
@@ -1251,8 +1255,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/*
* Protects against a concurrent __free_irq() call which might wait
- * for synchronize_irq() to complete without holding the optional
- * chip bus lock and desc->lock.
+ * for synchronize_hardirq() to complete without holding the optional
+ * chip bus lock and desc->lock. Also protects against handing out
+ * a recycled oneshot thread_mask bit while it's still in use by
+ * its previous owner.
*/
mutex_lock(&desc->request_mutex);
@@ -1571,9 +1577,6 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
- if (!desc)
- return NULL;
-
mutex_lock(&desc->request_mutex);
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1620,11 +1623,11 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
/*
* Drop bus_lock here so the changes which were done in the chip
* callbacks above are synced out to the irq chips which hang
- * behind a slow bus (I2C, SPI) before calling synchronize_irq().
+ * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
*
* Aside of that the bus_lock can also be taken from the threaded
* handler in irq_finalize_oneshot() which results in a deadlock
- * because synchronize_irq() would wait forever for the thread to
+ * because kthread_stop() would wait forever for the thread to
* complete, which is blocked on the bus lock.
*
* The still held desc->request_mutex() protects against a
@@ -1636,7 +1639,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
unregister_handler_proc(irq, action);
/* Make sure it's not being used on another CPU: */
- synchronize_irq(irq);
+ synchronize_hardirq(irq);
#ifdef CONFIG_DEBUG_SHIRQ
/*
@@ -1645,7 +1648,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
* is so by doing an extra call to the handler ....
*
* ( We do this after actually deregistering it, to make sure that a
- * 'real' IRQ doesn't run in * parallel with our fake. )
+ * 'real' IRQ doesn't run in parallel with our fake. )
*/
if (action->flags & IRQF_SHARED) {
local_irq_save(flags);
@@ -1654,6 +1657,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
}
#endif
+ /*
+ * The action has already been removed above, but the thread writes
+ * its oneshot mask bit when it completes. Though request_mutex is
+ * held across this which prevents __setup_irq() from handing out
+ * the same bit to a newly requested action.
+ */
if (action->thread) {
kthread_stop(action->thread);
put_task_struct(action->thread);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 37eda10f5c36..da9addb8d655 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -475,22 +475,24 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- irq_lock_sparse();
+ rcu_read_lock();
desc = irq_to_desc(i);
if (!desc)
goto outsparse;
- raw_spin_lock_irqsave(&desc->lock, flags);
- for_each_online_cpu(j)
- any_count |= kstat_irqs_cpu(i, j);
- action = desc->action;
- if ((!action || irq_desc_is_chained(desc)) && !any_count)
- goto out;
+ if (desc->kstat_irqs)
+ for_each_online_cpu(j)
+ any_count |= *per_cpu_ptr(desc->kstat_irqs, j);
+
+ if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
+ goto outsparse;
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+ seq_printf(p, "%10u ", desc->kstat_irqs ?
+ *per_cpu_ptr(desc->kstat_irqs, j) : 0);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->irq_data.chip) {
if (desc->irq_data.chip->irq_print_chip)
desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
@@ -511,6 +513,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (desc->name)
seq_printf(p, "-%-8s", desc->name);
+ action = desc->action;
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
@@ -518,10 +521,9 @@ int show_interrupts(struct seq_file *p, void *v)
}
seq_putc(p, '\n');
-out:
raw_spin_unlock_irqrestore(&desc->lock, flags);
outsparse:
- irq_unlock_sparse();
+ rcu_read_unlock();
return 0;
}
#endif
diff --git a/kernel/kexec.c b/kernel/kexec.c
index aed8fb2564b3..68559808fdfa 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -11,6 +11,7 @@
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/file.h>
+#include <linux/security.h>
#include <linux/kexec.h>
#include <linux/mutex.h>
#include <linux/list.h>
@@ -195,10 +196,17 @@ out:
static inline int kexec_load_check(unsigned long nr_segments,
unsigned long flags)
{
+ int result;
+
/* We only trust the superuser with rebooting the system. */
if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
return -EPERM;
+ /* Permit LSMs and IMA to fail the kexec */
+ result = security_kernel_load_data(LOADING_KEXEC_IMAGE);
+ if (result < 0)
+ return result;
+
/*
* Verify we have a legal set of flags
* This leaves us room for future extensions.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ea619021d901..ab257be4d924 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -627,8 +627,8 @@ static void optimize_kprobe(struct kprobe *p)
(kprobe_disabled(p) || kprobes_all_disarmed))
return;
- /* Both of break_handler and post_handler are not supported. */
- if (p->break_handler || p->post_handler)
+ /* kprobes with post_handler can not be optimized */
+ if (p->post_handler)
return;
op = container_of(p, struct optimized_kprobe, kp);
@@ -710,9 +710,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
* there is still a relative jump) and disabled.
*/
op = container_of(ap, struct optimized_kprobe, kp);
- if (unlikely(list_empty(&op->list)))
- printk(KERN_WARNING "Warning: found a stray unused "
- "aggrprobe@%p\n", ap->addr);
+ WARN_ON_ONCE(list_empty(&op->list));
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
@@ -985,7 +983,8 @@ static int arm_kprobe_ftrace(struct kprobe *p)
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 0, 0);
if (ret) {
- pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+ pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
+ p->addr, ret);
return ret;
}
@@ -1025,7 +1024,8 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 1, 0);
- WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+ WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
+ p->addr, ret);
return ret;
}
#else /* !CONFIG_KPROBES_ON_FTRACE */
@@ -1116,20 +1116,6 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
}
NOKPROBE_SYMBOL(aggr_fault_handler);
-static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe *cur = __this_cpu_read(kprobe_instance);
- int ret = 0;
-
- if (cur && cur->break_handler) {
- if (cur->break_handler(cur, regs))
- ret = 1;
- }
- reset_kprobe_instance();
- return ret;
-}
-NOKPROBE_SYMBOL(aggr_break_handler);
-
/* Walks the list and increments nmissed count for multiprobe case */
void kprobes_inc_nmissed_count(struct kprobe *p)
{
@@ -1270,24 +1256,15 @@ static void cleanup_rp_inst(struct kretprobe *rp)
}
NOKPROBE_SYMBOL(cleanup_rp_inst);
-/*
-* Add the new probe to ap->list. Fail if this is the
-* second jprobe at the address - two jprobes can't coexist
-*/
+/* Add the new probe to ap->list */
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{
BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
- if (p->break_handler || p->post_handler)
+ if (p->post_handler)
unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
- if (p->break_handler) {
- if (ap->break_handler)
- return -EEXIST;
- list_add_tail_rcu(&p->list, &ap->list);
- ap->break_handler = aggr_break_handler;
- } else
- list_add_rcu(&p->list, &ap->list);
+ list_add_rcu(&p->list, &ap->list);
if (p->post_handler && !ap->post_handler)
ap->post_handler = aggr_post_handler;
@@ -1310,8 +1287,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
/* We don't care the kprobe which has gone. */
if (p->post_handler && !kprobe_gone(p))
ap->post_handler = aggr_post_handler;
- if (p->break_handler && !kprobe_gone(p))
- ap->break_handler = aggr_break_handler;
INIT_LIST_HEAD(&ap->list);
INIT_HLIST_NODE(&ap->hlist);
@@ -1706,8 +1681,6 @@ static int __unregister_kprobe_top(struct kprobe *p)
goto disarmed;
else {
/* If disabling probe has special handlers, update aggrprobe */
- if (p->break_handler && !kprobe_gone(p))
- ap->break_handler = NULL;
if (p->post_handler && !kprobe_gone(p)) {
list_for_each_entry_rcu(list_p, &ap->list, list) {
if ((list_p != p) && (list_p->post_handler))
@@ -1812,77 +1785,6 @@ unsigned long __weak arch_deref_entry_point(void *entry)
return (unsigned long)entry;
}
-#if 0
-int register_jprobes(struct jprobe **jps, int num)
-{
- int ret = 0, i;
-
- if (num <= 0)
- return -EINVAL;
-
- for (i = 0; i < num; i++) {
- ret = register_jprobe(jps[i]);
-
- if (ret < 0) {
- if (i > 0)
- unregister_jprobes(jps, i);
- break;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(register_jprobes);
-
-int register_jprobe(struct jprobe *jp)
-{
- unsigned long addr, offset;
- struct kprobe *kp = &jp->kp;
-
- /*
- * Verify probepoint as well as the jprobe handler are
- * valid function entry points.
- */
- addr = arch_deref_entry_point(jp->entry);
-
- if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
- kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
- kp->pre_handler = setjmp_pre_handler;
- kp->break_handler = longjmp_break_handler;
- return register_kprobe(kp);
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(register_jprobe);
-
-void unregister_jprobe(struct jprobe *jp)
-{
- unregister_jprobes(&jp, 1);
-}
-EXPORT_SYMBOL_GPL(unregister_jprobe);
-
-void unregister_jprobes(struct jprobe **jps, int num)
-{
- int i;
-
- if (num <= 0)
- return;
- mutex_lock(&kprobe_mutex);
- for (i = 0; i < num; i++)
- if (__unregister_kprobe_top(&jps[i]->kp) < 0)
- jps[i]->kp.addr = NULL;
- mutex_unlock(&kprobe_mutex);
-
- synchronize_sched();
- for (i = 0; i < num; i++) {
- if (jps[i]->kp.addr)
- __unregister_kprobe_bottom(&jps[i]->kp);
- }
-}
-EXPORT_SYMBOL_GPL(unregister_jprobes);
-#endif
-
#ifdef CONFIG_KRETPROBES
/*
* This kprobe pre_handler is registered with every kretprobe. When probe
@@ -1982,7 +1884,6 @@ int register_kretprobe(struct kretprobe *rp)
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
rp->kp.fault_handler = NULL;
- rp->kp.break_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */
if (rp->maxactive <= 0) {
@@ -2105,7 +2006,6 @@ static void kill_kprobe(struct kprobe *p)
list_for_each_entry_rcu(kp, &p->list, list)
kp->flags |= KPROBE_FLAG_GONE;
p->post_handler = NULL;
- p->break_handler = NULL;
kill_optimized_kprobe(p);
}
/*
@@ -2169,11 +2069,12 @@ out:
}
EXPORT_SYMBOL_GPL(enable_kprobe);
+/* Caller must NOT call this in usual path. This is only for critical case */
void dump_kprobe(struct kprobe *kp)
{
- printk(KERN_WARNING "Dumping kprobe:\n");
- printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
- kp->symbol_name, kp->addr, kp->offset);
+ pr_err("Dumping kprobe:\n");
+ pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
+ kp->symbol_name, kp->offset, kp->addr);
}
NOKPROBE_SYMBOL(dump_kprobe);
@@ -2196,11 +2097,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
entry = arch_deref_entry_point((void *)*iter);
if (!kernel_text_address(entry) ||
- !kallsyms_lookup_size_offset(entry, &size, &offset)) {
- pr_err("Failed to find blacklist at %p\n",
- (void *)entry);
+ !kallsyms_lookup_size_offset(entry, &size, &offset))
continue;
- }
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
@@ -2326,21 +2224,23 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
const char *sym, int offset, char *modname, struct kprobe *pp)
{
char *kprobe_type;
+ void *addr = p->addr;
if (p->pre_handler == pre_handler_kretprobe)
kprobe_type = "r";
- else if (p->pre_handler == setjmp_pre_handler)
- kprobe_type = "j";
else
kprobe_type = "k";
+ if (!kallsyms_show_value())
+ addr = NULL;
+
if (sym)
- seq_printf(pi, "%p %s %s+0x%x %s ",
- p->addr, kprobe_type, sym, offset,
+ seq_printf(pi, "%px %s %s+0x%x %s ",
+ addr, kprobe_type, sym, offset,
(modname ? modname : " "));
- else
- seq_printf(pi, "%p %s %p ",
- p->addr, kprobe_type, p->addr);
+ else /* try to use %pS */
+ seq_printf(pi, "%px %s %pS ",
+ addr, kprobe_type, p->addr);
if (!pp)
pp = p;
@@ -2428,8 +2328,16 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
struct kprobe_blacklist_entry *ent =
list_entry(v, struct kprobe_blacklist_entry, list);
- seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
- (void *)ent->end_addr, (void *)ent->start_addr);
+ /*
+ * If /proc/kallsyms is not showing kernel address, we won't
+ * show them here either.
+ */
+ if (!kallsyms_show_value())
+ seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
+ (void *)ent->start_addr);
+ else
+ seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
+ (void *)ent->end_addr, (void *)ent->start_addr);
return 0;
}
@@ -2611,7 +2519,7 @@ static int __init debugfs_kprobe_init(void)
if (!dir)
return -ENOMEM;
- file = debugfs_create_file("list", 0444, dir, NULL,
+ file = debugfs_create_file("list", 0400, dir, NULL,
&debugfs_kprobes_operations);
if (!file)
goto error;
@@ -2621,7 +2529,7 @@ static int __init debugfs_kprobe_init(void)
if (!file)
goto error;
- file = debugfs_create_file("blacklist", 0444, dir, NULL,
+ file = debugfs_create_file("blacklist", 0400, dir, NULL,
&debugfs_kprobe_blacklist_ops);
if (!file)
goto error;
@@ -2637,6 +2545,3 @@ late_initcall(debugfs_kprobe_init);
#endif /* CONFIG_DEBUG_FS */
module_init(init_kprobes);
-
-/* defined in arch/.../kernel/kprobes.c */
-EXPORT_SYMBOL_GPL(jprobe_return);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 486dedbd9af5..087d18d771b5 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -190,7 +190,7 @@ static void __kthread_parkme(struct kthread *self)
if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
break;
- complete_all(&self->parked);
+ complete(&self->parked);
schedule();
}
__set_current_state(TASK_RUNNING);
@@ -471,7 +471,6 @@ void kthread_unpark(struct task_struct *k)
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
__kthread_bind(k, kthread->cpu, TASK_PARKED);
- reinit_completion(&kthread->parked);
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
/*
* __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
@@ -499,6 +498,9 @@ int kthread_park(struct task_struct *k)
if (WARN_ON(k->flags & PF_EXITING))
return -ENOSYS;
+ if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
+ return -EBUSY;
+
set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
if (k != current) {
wake_up_process(k);
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 8402b3349dca..7d0b0ed74404 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -21,6 +21,9 @@
* Davidlohr Bueso <dave@stgolabs.net>
* Based on kernel/rcu/torture.c.
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
@@ -57,7 +60,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
torture_param(int, stat_interval, 60,
"Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
"Enable verbose debugging printk()s");
static char *torture_type = "spin_lock";
@@ -365,7 +368,7 @@ static struct lock_torture_ops mutex_lock_ops = {
};
#include <linux/ww_mutex.h>
-static DEFINE_WW_CLASS(torture_ww_class);
+static DEFINE_WD_CLASS(torture_ww_class);
static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index f44f658ae629..1a81a1257b3f 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -174,6 +174,21 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
}
/*
+ * Add @waiter to a given location in the lock wait_list and set the
+ * FLAG_WAITERS flag if it's the first waiter.
+ */
+static void __sched
+__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ struct list_head *list)
+{
+ debug_mutex_add_waiter(lock, waiter, current);
+
+ list_add_tail(&waiter->list, list);
+ if (__mutex_waiter_is_first(lock, waiter))
+ __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
+}
+
+/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
* WAITERS. Provides RELEASE semantics like a regular unlock, the
@@ -244,6 +259,22 @@ void __sched mutex_lock(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock);
#endif
+/*
+ * Wait-Die:
+ * The newer transactions are killed when:
+ * It (the new transaction) makes a request for a lock being held
+ * by an older transaction.
+ *
+ * Wound-Wait:
+ * The newer transactions are wounded when:
+ * An older transaction makes a request for a lock being held by
+ * the newer transaction.
+ */
+
+/*
+ * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
+ * it.
+ */
static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
{
@@ -282,26 +313,108 @@ ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
#endif
ww_ctx->acquired++;
+ ww->ctx = ww_ctx;
}
+/*
+ * Determine if context @a is 'after' context @b. IOW, @a is a younger
+ * transaction than @b and depending on algorithm either needs to wait for
+ * @b or die.
+ */
static inline bool __sched
__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
{
- return a->stamp - b->stamp <= LONG_MAX &&
- (a->stamp != b->stamp || a > b);
+
+ return (signed long)(a->stamp - b->stamp) > 0;
+}
+
+/*
+ * Wait-Die; wake a younger waiter context (when locks held) such that it can
+ * die.
+ *
+ * Among waiters with context, only the first one can have other locks acquired
+ * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
+ * __ww_mutex_check_kill() wake any but the earliest context.
+ */
+static bool __sched
+__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ if (!ww_ctx->is_wait_die)
+ return false;
+
+ if (waiter->ww_ctx->acquired > 0 &&
+ __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
+ debug_mutex_wake_waiter(lock, waiter);
+ wake_up_process(waiter->task);
+ }
+
+ return true;
+}
+
+/*
+ * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
+ *
+ * Wound the lock holder if there are waiters with older transactions than
+ * the lock holders. Even if multiple waiters may wound the lock holder,
+ * it's sufficient that only one does.
+ */
+static bool __ww_mutex_wound(struct mutex *lock,
+ struct ww_acquire_ctx *ww_ctx,
+ struct ww_acquire_ctx *hold_ctx)
+{
+ struct task_struct *owner = __mutex_owner(lock);
+
+ lockdep_assert_held(&lock->wait_lock);
+
+ /*
+ * Possible through __ww_mutex_add_waiter() when we race with
+ * ww_mutex_set_context_fastpath(). In that case we'll get here again
+ * through __ww_mutex_check_waiters().
+ */
+ if (!hold_ctx)
+ return false;
+
+ /*
+ * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
+ * it cannot go away because we'll have FLAG_WAITERS set and hold
+ * wait_lock.
+ */
+ if (!owner)
+ return false;
+
+ if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
+ hold_ctx->wounded = 1;
+
+ /*
+ * wake_up_process() paired with set_current_state()
+ * inserts sufficient barriers to make sure @owner either sees
+ * it's wounded in __ww_mutex_lock_check_stamp() or has a
+ * wakeup pending to re-read the wounded state.
+ */
+ if (owner != current)
+ wake_up_process(owner);
+
+ return true;
+ }
+
+ return false;
}
/*
- * Wake up any waiters that may have to back off when the lock is held by the
- * given context.
+ * We just acquired @lock under @ww_ctx, if there are later contexts waiting
+ * behind us on the wait-list, check if they need to die, or wound us.
*
- * Due to the invariants on the wait list, this can only affect the first
- * waiter with a context.
+ * See __ww_mutex_add_waiter() for the list-order construction; basically the
+ * list is ordered by stamp, smallest (oldest) first.
+ *
+ * This relies on never mixing wait-die/wound-wait on the same wait-list;
+ * which is currently ensured by that being a ww_class property.
*
* The current task must not be on the wait list.
*/
static void __sched
-__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
{
struct mutex_waiter *cur;
@@ -311,66 +424,51 @@ __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
if (!cur->ww_ctx)
continue;
- if (cur->ww_ctx->acquired > 0 &&
- __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
- debug_mutex_wake_waiter(lock, cur);
- wake_up_process(cur->task);
- }
-
- break;
+ if (__ww_mutex_die(lock, cur, ww_ctx) ||
+ __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
+ break;
}
}
/*
- * After acquiring lock with fastpath or when we lost out in contested
- * slowpath, set ctx and wake up any waiters so they can recheck.
+ * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
+ * and wake up any waiters so they can recheck.
*/
static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
ww_mutex_lock_acquired(lock, ctx);
- lock->ctx = ctx;
-
/*
* The lock->ctx update should be visible on all cores before
- * the atomic read is done, otherwise contended waiters might be
+ * the WAITERS check is done, otherwise contended waiters might be
* missed. The contended waiters will either see ww_ctx == NULL
* and keep spinning, or it will acquire wait_lock, add itself
* to waiter list and sleep.
*/
- smp_mb(); /* ^^^ */
+ smp_mb(); /* See comments above and below. */
/*
- * Check if lock is contended, if not there is nobody to wake up
+ * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
+ * MB MB
+ * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
+ *
+ * The memory barrier above pairs with the memory barrier in
+ * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
+ * and/or !empty list.
*/
if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
return;
/*
- * Uh oh, we raced in fastpath, wake up everyone in this case,
- * so they can see the new lock->ctx.
+ * Uh oh, we raced in fastpath, check if any of the waiters need to
+ * die or wound us.
*/
spin_lock(&lock->base.wait_lock);
- __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
+ __ww_mutex_check_waiters(&lock->base, ctx);
spin_unlock(&lock->base.wait_lock);
}
-/*
- * After acquiring lock in the slowpath set ctx.
- *
- * Unlike for the fast path, the caller ensures that waiters are woken up where
- * necessary.
- *
- * Callers must hold the mutex wait_lock.
- */
-static __always_inline void
-ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
-{
- ww_mutex_lock_acquired(lock, ctx);
- lock->ctx = ctx;
-}
-
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
static inline
@@ -646,37 +744,83 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
}
EXPORT_SYMBOL(ww_mutex_unlock);
+
+static __always_inline int __sched
+__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+{
+ if (ww_ctx->acquired > 0) {
+#ifdef CONFIG_DEBUG_MUTEXES
+ struct ww_mutex *ww;
+
+ ww = container_of(lock, struct ww_mutex, base);
+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
+ ww_ctx->contending_lock = ww;
+#endif
+ return -EDEADLK;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Check the wound condition for the current lock acquire.
+ *
+ * Wound-Wait: If we're wounded, kill ourself.
+ *
+ * Wait-Die: If we're trying to acquire a lock already held by an older
+ * context, kill ourselves.
+ *
+ * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
+ * look at waiters before us in the wait-list.
+ */
static inline int __sched
-__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
- struct ww_acquire_ctx *ctx)
+__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
+ struct ww_acquire_ctx *ctx)
{
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
struct mutex_waiter *cur;
+ if (ctx->acquired == 0)
+ return 0;
+
+ if (!ctx->is_wait_die) {
+ if (ctx->wounded)
+ return __ww_mutex_kill(lock, ctx);
+
+ return 0;
+ }
+
if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
- goto deadlock;
+ return __ww_mutex_kill(lock, ctx);
/*
* If there is a waiter in front of us that has a context, then its
- * stamp is earlier than ours and we must back off.
+ * stamp is earlier than ours and we must kill ourself.
*/
cur = waiter;
list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
- if (cur->ww_ctx)
- goto deadlock;
+ if (!cur->ww_ctx)
+ continue;
+
+ return __ww_mutex_kill(lock, ctx);
}
return 0;
-
-deadlock:
-#ifdef CONFIG_DEBUG_MUTEXES
- DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
- ctx->contending_lock = ww;
-#endif
- return -EDEADLK;
}
+/*
+ * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
+ * first. Such that older contexts are preferred to acquire the lock over
+ * younger contexts.
+ *
+ * Waiters without context are interspersed in FIFO order.
+ *
+ * Furthermore, for Wait-Die kill ourself immediately when possible (there are
+ * older contexts already waiting) to avoid unnecessary waiting and for
+ * Wound-Wait ensure we wound the owning context when it is younger.
+ */
static inline int __sched
__ww_mutex_add_waiter(struct mutex_waiter *waiter,
struct mutex *lock,
@@ -684,16 +828,21 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
{
struct mutex_waiter *cur;
struct list_head *pos;
+ bool is_wait_die;
if (!ww_ctx) {
- list_add_tail(&waiter->list, &lock->wait_list);
+ __mutex_add_waiter(lock, waiter, &lock->wait_list);
return 0;
}
+ is_wait_die = ww_ctx->is_wait_die;
+
/*
* Add the waiter before the first waiter with a higher stamp.
* Waiters without a context are skipped to avoid starving
- * them.
+ * them. Wait-Die waiters may die here. Wound-Wait waiters
+ * never die here, but they are sorted in stamp order and
+ * may wound the lock holder.
*/
pos = &lock->wait_list;
list_for_each_entry_reverse(cur, &lock->wait_list, list) {
@@ -701,16 +850,16 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
continue;
if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
- /* Back off immediately if necessary. */
- if (ww_ctx->acquired > 0) {
-#ifdef CONFIG_DEBUG_MUTEXES
- struct ww_mutex *ww;
-
- ww = container_of(lock, struct ww_mutex, base);
- DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
- ww_ctx->contending_lock = ww;
-#endif
- return -EDEADLK;
+ /*
+ * Wait-Die: if we find an older context waiting, there
+ * is no point in queueing behind it, as we'd have to
+ * die the moment it would acquire the lock.
+ */
+ if (is_wait_die) {
+ int ret = __ww_mutex_kill(lock, ww_ctx);
+
+ if (ret)
+ return ret;
}
break;
@@ -718,17 +867,28 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
pos = &cur->list;
+ /* Wait-Die: ensure younger waiters die. */
+ __ww_mutex_die(lock, cur, ww_ctx);
+ }
+
+ __mutex_add_waiter(lock, waiter, pos);
+
+ /*
+ * Wound-Wait: if we're blocking on a mutex owned by a younger context,
+ * wound that such that we might proceed.
+ */
+ if (!is_wait_die) {
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+
/*
- * Wake up the waiter so that it gets a chance to back
- * off.
+ * See ww_mutex_set_context_fastpath(). Orders setting
+ * MUTEX_FLAG_WAITERS vs the ww->ctx load,
+ * such that either we or the fastpath will wound @ww->ctx.
*/
- if (cur->ww_ctx->acquired > 0) {
- debug_mutex_wake_waiter(lock, cur);
- wake_up_process(cur->task);
- }
+ smp_mb();
+ __ww_mutex_wound(lock, ww_ctx, ww->ctx);
}
- list_add_tail(&waiter->list, pos);
return 0;
}
@@ -751,6 +911,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
if (use_ww_ctx && ww_ctx) {
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
return -EALREADY;
+
+ /*
+ * Reset the wounded flag after a kill. No other process can
+ * race and wound us here since they can't have a valid owner
+ * pointer if we don't have any locks held.
+ */
+ if (ww_ctx->acquired == 0)
+ ww_ctx->wounded = 0;
}
preempt_disable();
@@ -772,7 +940,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
*/
if (__mutex_trylock(lock)) {
if (use_ww_ctx && ww_ctx)
- __ww_mutex_wakeup_for_backoff(lock, ww_ctx);
+ __ww_mutex_check_waiters(lock, ww_ctx);
goto skip_wait;
}
@@ -784,25 +952,26 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
if (!use_ww_ctx) {
/* add waiting tasks to the end of the waitqueue (FIFO): */
- list_add_tail(&waiter.list, &lock->wait_list);
+ __mutex_add_waiter(lock, &waiter, &lock->wait_list);
+
#ifdef CONFIG_DEBUG_MUTEXES
waiter.ww_ctx = MUTEX_POISON_WW_CTX;
#endif
} else {
- /* Add in stamp order, waking up waiters that must back off. */
+ /*
+ * Add in stamp order, waking up waiters that must kill
+ * themselves.
+ */
ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
if (ret)
- goto err_early_backoff;
+ goto err_early_kill;
waiter.ww_ctx = ww_ctx;
}
waiter.task = current;
- if (__mutex_waiter_is_first(lock, &waiter))
- __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
-
set_current_state(state);
for (;;) {
/*
@@ -815,7 +984,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto acquired;
/*
- * Check for signals and wound conditions while holding
+ * Check for signals and kill conditions while holding
* wait_lock. This ensures the lock cancellation is ordered
* against mutex_unlock() and wake-ups do not go missing.
*/
@@ -824,8 +993,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err;
}
- if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
- ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
+ if (use_ww_ctx && ww_ctx) {
+ ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
if (ret)
goto err;
}
@@ -859,6 +1028,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
acquired:
__set_current_state(TASK_RUNNING);
+ if (use_ww_ctx && ww_ctx) {
+ /*
+ * Wound-Wait; we stole the lock (!first_waiter), check the
+ * waiters as anyone might want to wound us.
+ */
+ if (!ww_ctx->is_wait_die &&
+ !__mutex_waiter_is_first(lock, &waiter))
+ __ww_mutex_check_waiters(lock, ww_ctx);
+ }
+
mutex_remove_waiter(lock, &waiter, current);
if (likely(list_empty(&lock->wait_list)))
__mutex_clear_flag(lock, MUTEX_FLAGS);
@@ -870,7 +1049,7 @@ skip_wait:
lock_acquired(&lock->dep_map, ip);
if (use_ww_ctx && ww_ctx)
- ww_mutex_set_context_slowpath(ww, ww_ctx);
+ ww_mutex_lock_acquired(ww, ww_ctx);
spin_unlock(&lock->wait_lock);
preempt_enable();
@@ -879,7 +1058,7 @@ skip_wait:
err:
__set_current_state(TASK_RUNNING);
mutex_remove_waiter(lock, &waiter, current);
-err_early_backoff:
+err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, 1, ip);
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 0e4cd64ad2c0..5b915b370d5a 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include <linux/ww_mutex.h>
-static DEFINE_WW_CLASS(ww_class);
+static DEFINE_WD_CLASS(ww_class);
struct workqueue_struct *wq;
struct test_mutex {
diff --git a/kernel/module.c b/kernel/module.c
index f475f30eed8c..a7615d661910 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2876,7 +2876,7 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
if (info->len < sizeof(*(info->hdr)))
return -ENOEXEC;
- err = security_kernel_read_file(NULL, READING_MODULE);
+ err = security_kernel_load_data(LOADING_MODULE);
if (err)
return err;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 9c85c7822383..abef759de7c8 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -338,7 +338,7 @@ static int create_image(int platform_mode)
* hibernation_snapshot - Quiesce devices and create a hibernation image.
* @platform_mode: If set, use platform driver to prepare for the transition.
*
- * This routine must be called with pm_mutex held.
+ * This routine must be called with system_transition_mutex held.
*/
int hibernation_snapshot(int platform_mode)
{
@@ -500,8 +500,9 @@ static int resume_target_kernel(bool platform_mode)
* hibernation_restore - Quiesce devices and restore from a hibernation image.
* @platform_mode: If set, use platform driver to prepare for the transition.
*
- * This routine must be called with pm_mutex held. If it is successful, control
- * reappears in the restored target kernel in hibernation_snapshot().
+ * This routine must be called with system_transition_mutex held. If it is
+ * successful, control reappears in the restored target kernel in
+ * hibernation_snapshot().
*/
int hibernation_restore(int platform_mode)
{
@@ -638,6 +639,7 @@ static void power_down(void)
break;
case HIBERNATION_PLATFORM:
hibernation_platform_enter();
+ /* Fall through */
case HIBERNATION_SHUTDOWN:
if (pm_power_off)
kernel_power_off();
@@ -805,13 +807,13 @@ static int software_resume(void)
* name_to_dev_t() below takes a sysfs buffer mutex when sysfs
* is configured into the kernel. Since the regular hibernate
* trigger path is via sysfs which takes a buffer mutex before
- * calling hibernate functions (which take pm_mutex) this can
- * cause lockdep to complain about a possible ABBA deadlock
+ * calling hibernate functions (which take system_transition_mutex)
+ * this can cause lockdep to complain about a possible ABBA deadlock
* which cannot happen since we're in the boot code here and
* sysfs can't be invoked yet. Therefore, we use a subclass
* here to avoid lockdep complaining.
*/
- mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&system_transition_mutex, SINGLE_DEPTH_NESTING);
if (swsusp_resume_device)
goto Check_image;
@@ -899,7 +901,7 @@ static int software_resume(void)
atomic_inc(&snapshot_device_available);
/* For success case, the suspend path will release the lock */
Unlock:
- mutex_unlock(&pm_mutex);
+ mutex_unlock(&system_transition_mutex);
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
diff --git a/kernel/power/main.c b/kernel/power/main.c
index d9706da10930..35b50823d83b 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -15,17 +15,16 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/suspend.h>
#include "power.h"
-DEFINE_MUTEX(pm_mutex);
-
#ifdef CONFIG_PM_SLEEP
void lock_system_sleep(void)
{
current->flags |= PF_FREEZER_SKIP;
- mutex_lock(&pm_mutex);
+ mutex_lock(&system_transition_mutex);
}
EXPORT_SYMBOL_GPL(lock_system_sleep);
@@ -37,8 +36,9 @@ void unlock_system_sleep(void)
*
* Reason:
* Fundamentally, we just don't need it, because freezing condition
- * doesn't come into effect until we release the pm_mutex lock,
- * since the freezer always works with pm_mutex held.
+ * doesn't come into effect until we release the
+ * system_transition_mutex lock, since the freezer always works with
+ * system_transition_mutex held.
*
* More importantly, in the case of hibernation,
* unlock_system_sleep() gets called in snapshot_read() and
@@ -47,7 +47,7 @@ void unlock_system_sleep(void)
* enter the refrigerator, thus causing hibernation to lockup.
*/
current->flags &= ~PF_FREEZER_SKIP;
- mutex_unlock(&pm_mutex);
+ mutex_unlock(&system_transition_mutex);
}
EXPORT_SYMBOL_GPL(unlock_system_sleep);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 87331565e505..5342f6fc022e 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -92,7 +92,7 @@ static void s2idle_enter(void)
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
/* Make the current CPU wait so it can enter the idle loop too. */
- swait_event(s2idle_wait_head,
+ swait_event_exclusive(s2idle_wait_head,
s2idle_state == S2IDLE_STATE_WAKE);
cpuidle_pause();
@@ -160,7 +160,7 @@ void s2idle_wake(void)
raw_spin_lock_irqsave(&s2idle_lock, flags);
if (s2idle_state > S2IDLE_STATE_NONE) {
s2idle_state = S2IDLE_STATE_WAKE;
- swake_up(&s2idle_wait_head);
+ swake_up_one(&s2idle_wait_head);
}
raw_spin_unlock_irqrestore(&s2idle_lock, flags);
}
@@ -556,7 +556,7 @@ static int enter_state(suspend_state_t state)
} else if (!valid_state(state)) {
return -EINVAL;
}
- if (!mutex_trylock(&pm_mutex))
+ if (!mutex_trylock(&system_transition_mutex))
return -EBUSY;
if (state == PM_SUSPEND_TO_IDLE)
@@ -590,7 +590,7 @@ static int enter_state(suspend_state_t state)
pm_pr_dbg("Finishing wakeup.\n");
suspend_finish();
Unlock:
- mutex_unlock(&pm_mutex);
+ mutex_unlock(&system_transition_mutex);
return error;
}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index c2bcf97d24c8..d7f6c1a288d3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -923,7 +923,7 @@ int swsusp_write(unsigned int flags)
}
memset(&snapshot, 0, sizeof(struct snapshot_handle));
error = snapshot_read_next(&snapshot);
- if (error < PAGE_SIZE) {
+ if (error < (int)PAGE_SIZE) {
if (error >= 0)
error = -EFAULT;
@@ -1483,7 +1483,7 @@ int swsusp_read(unsigned int *flags_p)
memset(&snapshot, 0, sizeof(struct snapshot_handle));
error = snapshot_write_next(&snapshot);
- if (error < PAGE_SIZE)
+ if (error < (int)PAGE_SIZE)
return error < 0 ? error : -EFAULT;
header = (struct swsusp_info *)data_of(snapshot);
error = get_swap_reader(&handle, flags_p);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index abd225550271..2d8b60a3c86b 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -216,7 +216,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!mutex_trylock(&pm_mutex))
+ if (!mutex_trylock(&system_transition_mutex))
return -EBUSY;
lock_device_hotplug();
@@ -394,7 +394,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
}
unlock_device_hotplug();
- mutex_unlock(&pm_mutex);
+ mutex_unlock(&system_transition_mutex);
return error;
}
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 2a7d04049af4..0f1898820cba 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -19,11 +19,16 @@
#ifdef CONFIG_PRINTK
#define PRINTK_SAFE_CONTEXT_MASK 0x3fffffff
-#define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000
+#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x40000000
#define PRINTK_NMI_CONTEXT_MASK 0x80000000
extern raw_spinlock_t logbuf_lock;
+__printf(5, 0)
+int vprintk_store(int facility, int level,
+ const char *dict, size_t dictlen,
+ const char *fmt, va_list args);
+
__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
__printf(1, 0) int vprintk_func(const char *fmt, va_list args);
@@ -54,6 +59,8 @@ void __printk_safe_exit(void);
local_irq_enable(); \
} while (0)
+void defer_console_output(void);
+
#else
__printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 247808333ba4..90b6ab01db59 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -349,7 +349,7 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
*/
enum log_flags {
- LOG_NOCONS = 1, /* already flushed, do not print to console */
+ LOG_NOCONS = 1, /* suppress print, do not print to console */
LOG_NEWLINE = 2, /* text ended with a newline */
LOG_PREFIX = 4, /* text started with a prefix */
LOG_CONT = 8, /* text is a fragment of a continuation line */
@@ -1352,71 +1352,68 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
{
char *text;
int len = 0;
+ u64 next_seq;
+ u64 seq;
+ u32 idx;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
return -ENOMEM;
logbuf_lock_irq();
- if (buf) {
- u64 next_seq;
- u64 seq;
- u32 idx;
+ /*
+ * Find first record that fits, including all following records,
+ * into the user-provided buffer for this dump.
+ */
+ seq = clear_seq;
+ idx = clear_idx;
+ while (seq < log_next_seq) {
+ struct printk_log *msg = log_from_idx(idx);
- /*
- * Find first record that fits, including all following records,
- * into the user-provided buffer for this dump.
- */
- seq = clear_seq;
- idx = clear_idx;
- while (seq < log_next_seq) {
- struct printk_log *msg = log_from_idx(idx);
-
- len += msg_print_text(msg, true, NULL, 0);
- idx = log_next(idx);
- seq++;
- }
+ len += msg_print_text(msg, true, NULL, 0);
+ idx = log_next(idx);
+ seq++;
+ }
- /* move first record forward until length fits into the buffer */
- seq = clear_seq;
- idx = clear_idx;
- while (len > size && seq < log_next_seq) {
- struct printk_log *msg = log_from_idx(idx);
+ /* move first record forward until length fits into the buffer */
+ seq = clear_seq;
+ idx = clear_idx;
+ while (len > size && seq < log_next_seq) {
+ struct printk_log *msg = log_from_idx(idx);
- len -= msg_print_text(msg, true, NULL, 0);
- idx = log_next(idx);
- seq++;
- }
+ len -= msg_print_text(msg, true, NULL, 0);
+ idx = log_next(idx);
+ seq++;
+ }
- /* last message fitting into this dump */
- next_seq = log_next_seq;
+ /* last message fitting into this dump */
+ next_seq = log_next_seq;
- len = 0;
- while (len >= 0 && seq < next_seq) {
- struct printk_log *msg = log_from_idx(idx);
- int textlen;
+ len = 0;
+ while (len >= 0 && seq < next_seq) {
+ struct printk_log *msg = log_from_idx(idx);
+ int textlen;
- textlen = msg_print_text(msg, true, text,
- LOG_LINE_MAX + PREFIX_MAX);
- if (textlen < 0) {
- len = textlen;
- break;
- }
- idx = log_next(idx);
- seq++;
+ textlen = msg_print_text(msg, true, text,
+ LOG_LINE_MAX + PREFIX_MAX);
+ if (textlen < 0) {
+ len = textlen;
+ break;
+ }
+ idx = log_next(idx);
+ seq++;
- logbuf_unlock_irq();
- if (copy_to_user(buf + len, text, textlen))
- len = -EFAULT;
- else
- len += textlen;
- logbuf_lock_irq();
-
- if (seq < log_first_seq) {
- /* messages are gone, move to next one */
- seq = log_first_seq;
- idx = log_first_idx;
- }
+ logbuf_unlock_irq();
+ if (copy_to_user(buf + len, text, textlen))
+ len = -EFAULT;
+ else
+ len += textlen;
+ logbuf_lock_irq();
+
+ if (seq < log_first_seq) {
+ /* messages are gone, move to next one */
+ seq = log_first_seq;
+ idx = log_first_idx;
}
}
@@ -1430,6 +1427,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
return len;
}
+static void syslog_clear(void)
+{
+ logbuf_lock_irq();
+ clear_seq = log_next_seq;
+ clear_idx = log_next_idx;
+ logbuf_unlock_irq();
+}
+
int do_syslog(int type, char __user *buf, int len, int source)
{
bool clear = false;
@@ -1474,7 +1479,7 @@ int do_syslog(int type, char __user *buf, int len, int source)
break;
/* Clear ring buffer */
case SYSLOG_ACTION_CLEAR:
- syslog_print_all(NULL, 0, true);
+ syslog_clear();
break;
/* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_OFF:
@@ -1824,28 +1829,16 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c
return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len);
}
-asmlinkage int vprintk_emit(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, va_list args)
+/* Must be called under logbuf_lock. */
+int vprintk_store(int facility, int level,
+ const char *dict, size_t dictlen,
+ const char *fmt, va_list args)
{
static char textbuf[LOG_LINE_MAX];
char *text = textbuf;
size_t text_len;
enum log_flags lflags = 0;
- unsigned long flags;
- int printed_len;
- bool in_sched = false;
- if (level == LOGLEVEL_SCHED) {
- level = LOGLEVEL_DEFAULT;
- in_sched = true;
- }
-
- boot_delay_msec(level);
- printk_delay();
-
- /* This stops the holder of console_sem just where we want him */
- logbuf_lock_irqsave(flags);
/*
* The printf needs to come first; we need the syslog
* prefix which might be passed-in as a parameter.
@@ -1886,8 +1879,32 @@ asmlinkage int vprintk_emit(int facility, int level,
if (dict)
lflags |= LOG_PREFIX|LOG_NEWLINE;
- printed_len = log_output(facility, level, lflags, dict, dictlen, text, text_len);
+ if (suppress_message_printing(level))
+ lflags |= LOG_NOCONS;
+
+ return log_output(facility, level, lflags,
+ dict, dictlen, text, text_len);
+}
+
+asmlinkage int vprintk_emit(int facility, int level,
+ const char *dict, size_t dictlen,
+ const char *fmt, va_list args)
+{
+ int printed_len;
+ bool in_sched = false;
+ unsigned long flags;
+
+ if (level == LOGLEVEL_SCHED) {
+ level = LOGLEVEL_DEFAULT;
+ in_sched = true;
+ }
+ boot_delay_msec(level);
+ printk_delay();
+
+ /* This stops the holder of console_sem just where we want him */
+ logbuf_lock_irqsave(flags);
+ printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
logbuf_unlock_irqrestore(flags);
/* If called from the scheduler, we can not call up(). */
@@ -2013,7 +2030,6 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
const char *text, size_t len) {}
static size_t msg_print_text(const struct printk_log *msg,
bool syslog, char *buf, size_t size) { return 0; }
-static bool suppress_message_printing(int level) { return false; }
#endif /* CONFIG_PRINTK */
@@ -2243,6 +2259,7 @@ int is_console_locked(void)
{
return console_locked;
}
+EXPORT_SYMBOL(is_console_locked);
/*
* Check if we have any console that is capable of printing while cpu is
@@ -2349,11 +2366,10 @@ skip:
break;
msg = log_from_idx(console_idx);
- if (suppress_message_printing(msg->level)) {
+ if (msg->flags & LOG_NOCONS) {
/*
- * Skip record we have buffered and already printed
- * directly to the console when we received it, and
- * record that has level above the console loglevel.
+ * Skip record if !ignore_loglevel, and
+ * record has level above the console loglevel.
*/
console_idx = log_next(console_idx);
console_seq++;
@@ -2878,16 +2894,20 @@ void wake_up_klogd(void)
preempt_enable();
}
-int vprintk_deferred(const char *fmt, va_list args)
+void defer_console_output(void)
{
- int r;
-
- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
-
preempt_disable();
__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
preempt_enable();
+}
+
+int vprintk_deferred(const char *fmt, va_list args)
+{
+ int r;
+
+ r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
+ defer_console_output();
return r;
}
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index d7d091309054..a0a74c533e4b 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -308,24 +308,33 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
void printk_nmi_enter(void)
{
- /*
- * The size of the extra per-CPU buffer is limited. Use it only when
- * the main one is locked. If this CPU is not in the safe context,
- * the lock must be taken on another CPU and we could wait for it.
- */
- if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) &&
- raw_spin_is_locked(&logbuf_lock)) {
- this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
- } else {
- this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK);
- }
+ this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
}
void printk_nmi_exit(void)
{
- this_cpu_and(printk_context,
- ~(PRINTK_NMI_CONTEXT_MASK |
- PRINTK_NMI_DEFERRED_CONTEXT_MASK));
+ this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
+}
+
+/*
+ * Marks a code that might produce many messages in NMI context
+ * and the risk of losing them is more critical than eventual
+ * reordering.
+ *
+ * It has effect only when called in NMI context. Then printk()
+ * will try to store the messages into the main logbuf directly
+ * and use the per-CPU buffers only as a fallback when the lock
+ * is not available.
+ */
+void printk_nmi_direct_enter(void)
+{
+ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
+ this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
+}
+
+void printk_nmi_direct_exit(void)
+{
+ this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
}
#else
@@ -363,6 +372,20 @@ void __printk_safe_exit(void)
__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
{
+ /*
+ * Try to use the main logbuf even in NMI. But avoid calling console
+ * drivers that might have their own locks.
+ */
+ if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
+ raw_spin_trylock(&logbuf_lock)) {
+ int len;
+
+ len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
+ raw_spin_unlock(&logbuf_lock);
+ defer_console_output();
+ return len;
+ }
+
/* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
return vprintk_nmi(fmt, args);
@@ -371,13 +394,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
return vprintk_safe(fmt, args);
- /*
- * Use the main logbuf when logbuf_lock is available in NMI.
- * But avoid calling console drivers that might have their own locks.
- */
- if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK)
- return vprintk_deferred(fmt, args);
-
/* No obstacles. */
return vprintk_default(fmt, args);
}
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 40cea6735c2d..4d04683c31b2 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -91,7 +91,17 @@ static inline void rcu_seq_end(unsigned long *sp)
WRITE_ONCE(*sp, rcu_seq_endval(sp));
}
-/* Take a snapshot of the update side's sequence number. */
+/*
+ * rcu_seq_snap - Take a snapshot of the update side's sequence number.
+ *
+ * This function returns the earliest value of the grace-period sequence number
+ * that will indicate that a full grace period has elapsed since the current
+ * time. Once the grace-period sequence number has reached this value, it will
+ * be safe to invoke all callbacks that have been registered prior to the
+ * current time. This value is the current grace-period number plus two to the
+ * power of the number of low-order bits reserved for state, then rounded up to
+ * the next value in which the state bits are all zero.
+ */
static inline unsigned long rcu_seq_snap(unsigned long *sp)
{
unsigned long s;
@@ -108,6 +118,15 @@ static inline unsigned long rcu_seq_current(unsigned long *sp)
}
/*
+ * Given a snapshot from rcu_seq_snap(), determine whether or not the
+ * corresponding update-side operation has started.
+ */
+static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
+{
+ return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
+}
+
+/*
* Given a snapshot from rcu_seq_snap(), determine whether or not a
* full update-side operation has occurred.
*/
@@ -117,6 +136,45 @@ static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
}
/*
+ * Has a grace period completed since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
+{
+ return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
+}
+
+/*
+ * Has a grace period started since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
+{
+ return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
+ new);
+}
+
+/*
+ * Roughly how many full grace periods have elapsed between the collection
+ * of the two specified grace periods?
+ */
+static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
+{
+ unsigned long rnd_diff;
+
+ if (old == new)
+ return 0;
+ /*
+ * Compute the number of grace periods (still shifted up), plus
+ * one if either of new and old is not an exact grace period.
+ */
+ rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
+ ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
+ ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
+ if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
+ return 1; /* Definitely no grace period has elapsed. */
+ return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
+}
+
+/*
* debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
* by call_rcu() and rcu callback execution, and are therefore not part of the
* RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
@@ -276,6 +334,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
/* Is this rcu_node a leaf? */
#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
+/* Is this rcu_node the last leaf? */
+#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
+
/*
* Do a full breadth-first scan of the rcu_node structures for the
* specified rcu_state structure.
@@ -405,8 +466,7 @@ enum rcutorture_type {
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
- unsigned long *gpnum, unsigned long *completed);
-void rcutorture_record_test_transition(void);
+ unsigned long *gp_seq);
void rcutorture_record_progress(unsigned long vernum);
void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
@@ -415,15 +475,11 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
unsigned long c);
#else
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
- int *flags,
- unsigned long *gpnum,
- unsigned long *completed)
+ int *flags, unsigned long *gp_seq)
{
*flags = 0;
- *gpnum = 0;
- *completed = 0;
+ *gp_seq = 0;
}
-static inline void rcutorture_record_test_transition(void) { }
static inline void rcutorture_record_progress(unsigned long vernum) { }
#ifdef CONFIG_RCU_TRACE
void do_trace_rcu_torture_read(const char *rcutorturename,
@@ -441,31 +497,26 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
struct srcu_struct *sp, int *flags,
- unsigned long *gpnum,
- unsigned long *completed)
+ unsigned long *gp_seq)
{
if (test_type != SRCU_FLAVOR)
return;
*flags = 0;
- *completed = sp->srcu_idx;
- *gpnum = *completed;
+ *gp_seq = sp->srcu_idx;
}
#elif defined(CONFIG_TREE_SRCU)
void srcutorture_get_gp_data(enum rcutorture_type test_type,
struct srcu_struct *sp, int *flags,
- unsigned long *gpnum, unsigned long *completed);
+ unsigned long *gp_seq);
#endif
#ifdef CONFIG_TINY_RCU
-static inline unsigned long rcu_batches_started(void) { return 0; }
-static inline unsigned long rcu_batches_started_bh(void) { return 0; }
-static inline unsigned long rcu_batches_started_sched(void) { return 0; }
-static inline unsigned long rcu_batches_completed(void) { return 0; }
-static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
-static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
+static inline unsigned long rcu_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
static inline unsigned long
@@ -474,19 +525,16 @@ static inline void rcu_force_quiescent_state(void) { }
static inline void rcu_bh_force_quiescent_state(void) { }
static inline void rcu_sched_force_quiescent_state(void) { }
static inline void show_rcu_gp_kthreads(void) { }
+static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
#else /* #ifdef CONFIG_TINY_RCU */
-extern unsigned long rcutorture_testseq;
-extern unsigned long rcutorture_vernum;
-unsigned long rcu_batches_started(void);
-unsigned long rcu_batches_started_bh(void);
-unsigned long rcu_batches_started_sched(void);
-unsigned long rcu_batches_completed(void);
-unsigned long rcu_batches_completed_bh(void);
-unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_get_gp_seq(void);
+unsigned long rcu_bh_get_gp_seq(void);
+unsigned long rcu_sched_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
unsigned long rcu_exp_batches_completed_sched(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
void show_rcu_gp_kthreads(void);
+int rcu_get_gp_kthreads_prio(void);
void rcu_force_quiescent_state(void);
void rcu_bh_force_quiescent_state(void);
void rcu_sched_force_quiescent_state(void);
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index e232846516b3..34244523550e 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -19,6 +19,9 @@
*
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -88,7 +91,7 @@ torture_param(int, nreaders, -1, "Number of RCU reader threads");
torture_param(int, nwriters, -1, "Number of RCU updater threads");
torture_param(bool, shutdown, !IS_ENABLED(MODULE),
"Shutdown at end of performance tests.");
-torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
+torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
static char *perf_type = "rcu";
@@ -135,8 +138,8 @@ struct rcu_perf_ops {
void (*cleanup)(void);
int (*readlock)(void);
void (*readunlock)(int idx);
- unsigned long (*started)(void);
- unsigned long (*completed)(void);
+ unsigned long (*get_gp_seq)(void);
+ unsigned long (*gp_diff)(unsigned long new, unsigned long old);
unsigned long (*exp_completed)(void);
void (*async)(struct rcu_head *head, rcu_callback_t func);
void (*gp_barrier)(void);
@@ -176,8 +179,8 @@ static struct rcu_perf_ops rcu_ops = {
.init = rcu_sync_perf_init,
.readlock = rcu_perf_read_lock,
.readunlock = rcu_perf_read_unlock,
- .started = rcu_batches_started,
- .completed = rcu_batches_completed,
+ .get_gp_seq = rcu_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.exp_completed = rcu_exp_batches_completed,
.async = call_rcu,
.gp_barrier = rcu_barrier,
@@ -206,8 +209,8 @@ static struct rcu_perf_ops rcu_bh_ops = {
.init = rcu_sync_perf_init,
.readlock = rcu_bh_perf_read_lock,
.readunlock = rcu_bh_perf_read_unlock,
- .started = rcu_batches_started_bh,
- .completed = rcu_batches_completed_bh,
+ .get_gp_seq = rcu_bh_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.exp_completed = rcu_exp_batches_completed_sched,
.async = call_rcu_bh,
.gp_barrier = rcu_barrier_bh,
@@ -263,8 +266,8 @@ static struct rcu_perf_ops srcu_ops = {
.init = rcu_sync_perf_init,
.readlock = srcu_perf_read_lock,
.readunlock = srcu_perf_read_unlock,
- .started = NULL,
- .completed = srcu_perf_completed,
+ .get_gp_seq = srcu_perf_completed,
+ .gp_diff = rcu_seq_diff,
.exp_completed = srcu_perf_completed,
.async = srcu_call_rcu,
.gp_barrier = srcu_rcu_barrier,
@@ -292,8 +295,8 @@ static struct rcu_perf_ops srcud_ops = {
.cleanup = srcu_sync_perf_cleanup,
.readlock = srcu_perf_read_lock,
.readunlock = srcu_perf_read_unlock,
- .started = NULL,
- .completed = srcu_perf_completed,
+ .get_gp_seq = srcu_perf_completed,
+ .gp_diff = rcu_seq_diff,
.exp_completed = srcu_perf_completed,
.async = srcu_call_rcu,
.gp_barrier = srcu_rcu_barrier,
@@ -322,8 +325,8 @@ static struct rcu_perf_ops sched_ops = {
.init = rcu_sync_perf_init,
.readlock = sched_perf_read_lock,
.readunlock = sched_perf_read_unlock,
- .started = rcu_batches_started_sched,
- .completed = rcu_batches_completed_sched,
+ .get_gp_seq = rcu_sched_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.exp_completed = rcu_exp_batches_completed_sched,
.async = call_rcu_sched,
.gp_barrier = rcu_barrier_sched,
@@ -350,8 +353,8 @@ static struct rcu_perf_ops tasks_ops = {
.init = rcu_sync_perf_init,
.readlock = tasks_perf_read_lock,
.readunlock = tasks_perf_read_unlock,
- .started = rcu_no_completed,
- .completed = rcu_no_completed,
+ .get_gp_seq = rcu_no_completed,
+ .gp_diff = rcu_seq_diff,
.async = call_rcu_tasks,
.gp_barrier = rcu_barrier_tasks,
.sync = synchronize_rcu_tasks,
@@ -359,9 +362,11 @@ static struct rcu_perf_ops tasks_ops = {
.name = "tasks"
};
-static bool __maybe_unused torturing_tasks(void)
+static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
{
- return cur_ops == &tasks_ops;
+ if (!cur_ops->gp_diff)
+ return new - old;
+ return cur_ops->gp_diff(new, old);
}
/*
@@ -444,8 +449,7 @@ rcu_perf_writer(void *arg)
b_rcu_perf_writer_started =
cur_ops->exp_completed() / 2;
} else {
- b_rcu_perf_writer_started =
- cur_ops->completed();
+ b_rcu_perf_writer_started = cur_ops->get_gp_seq();
}
}
@@ -502,7 +506,7 @@ retry:
cur_ops->exp_completed() / 2;
} else {
b_rcu_perf_writer_finished =
- cur_ops->completed();
+ cur_ops->get_gp_seq();
}
if (shutdown) {
smp_mb(); /* Assign before wake. */
@@ -527,7 +531,7 @@ retry:
return 0;
}
-static inline void
+static void
rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
{
pr_alert("%s" PERF_FLAG
@@ -582,8 +586,8 @@ rcu_perf_cleanup(void)
t_rcu_perf_writer_finished -
t_rcu_perf_writer_started,
ngps,
- b_rcu_perf_writer_finished -
- b_rcu_perf_writer_started);
+ rcuperf_seq_diff(b_rcu_perf_writer_finished,
+ b_rcu_perf_writer_started));
for (i = 0; i < nrealwriters; i++) {
if (!writer_durations)
break;
@@ -671,12 +675,11 @@ rcu_perf_init(void)
break;
}
if (i == ARRAY_SIZE(perf_ops)) {
- pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
- perf_type);
+ pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
pr_alert("rcu-perf types:");
for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
- pr_alert(" %s", perf_ops[i]->name);
- pr_alert("\n");
+ pr_cont(" %s", perf_ops[i]->name);
+ pr_cont("\n");
firsterr = -EINVAL;
goto unwind;
}
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 42fcb7f05fac..c596c6f1e457 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -22,6 +22,9 @@
*
* See also: Documentation/RCU/torture.txt
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -52,6 +55,7 @@
#include <linux/torture.h>
#include <linux/vmalloc.h>
#include <linux/sched/debug.h>
+#include <linux/sched/sysctl.h>
#include "rcu.h"
@@ -59,6 +63,19 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
+/* Bits for ->extendables field, extendables param, and related definitions. */
+#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
+#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
+#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */
+#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */
+#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */
+#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */
+#define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */
+#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
+ RCUTORTURE_RDR_PREEMPT)
+#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
+ /* Must be power of two minus one. */
+
torture_param(int, cbflood_inter_holdoff, HZ,
"Holdoff between floods (jiffies)");
torture_param(int, cbflood_intra_holdoff, 1,
@@ -66,6 +83,8 @@ torture_param(int, cbflood_intra_holdoff, 1,
torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
torture_param(int, cbflood_n_per_burst, 20000,
"# callbacks per burst in flood");
+torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
+ "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
torture_param(int, fqs_duration, 0,
"Duration of fqs bursts (us), 0 to disable");
torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
@@ -84,7 +103,7 @@ torture_param(int, object_debug, 0,
"Enable debug-object double call_rcu() testing");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
- "Time between CPU hotplugs (s), 0=disable");
+ "Time between CPU hotplugs (jiffies), 0=disable");
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
@@ -101,7 +120,7 @@ torture_param(int, test_boost_interval, 7,
"Interval between boost tests, seconds.");
torture_param(bool, test_no_idle_hz, true,
"Test support for tickless idle CPUs");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
"Enable verbose debugging printk()s");
static char *torture_type = "rcu";
@@ -148,9 +167,9 @@ static long n_rcu_torture_boost_ktrerror;
static long n_rcu_torture_boost_rterror;
static long n_rcu_torture_boost_failure;
static long n_rcu_torture_boosts;
-static long n_rcu_torture_timers;
+static atomic_long_t n_rcu_torture_timers;
static long n_barrier_attempts;
-static long n_barrier_successes;
+static long n_barrier_successes; /* did rcu_barrier test succeed? */
static atomic_long_t n_cbfloods;
static struct list_head rcu_torture_removed;
@@ -261,8 +280,8 @@ struct rcu_torture_ops {
int (*readlock)(void);
void (*read_delay)(struct torture_random_state *rrsp);
void (*readunlock)(int idx);
- unsigned long (*started)(void);
- unsigned long (*completed)(void);
+ unsigned long (*get_gp_seq)(void);
+ unsigned long (*gp_diff)(unsigned long new, unsigned long old);
void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
void (*exp_sync)(void);
@@ -274,6 +293,8 @@ struct rcu_torture_ops {
void (*stats)(void);
int irq_capable;
int can_boost;
+ int extendables;
+ int ext_irq_conflict;
const char *name;
};
@@ -302,10 +323,10 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
* force_quiescent_state. */
if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
- started = cur_ops->completed();
+ started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
mdelay(longdelay_ms);
- completed = cur_ops->completed();
+ completed = cur_ops->get_gp_seq();
do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
started, completed);
}
@@ -397,8 +418,8 @@ static struct rcu_torture_ops rcu_ops = {
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
- .started = rcu_batches_started,
- .completed = rcu_batches_completed,
+ .get_gp_seq = rcu_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.deferred_free = rcu_torture_deferred_free,
.sync = synchronize_rcu,
.exp_sync = synchronize_rcu_expedited,
@@ -439,8 +460,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
- .started = rcu_batches_started_bh,
- .completed = rcu_batches_completed_bh,
+ .get_gp_seq = rcu_bh_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.deferred_free = rcu_bh_torture_deferred_free,
.sync = synchronize_rcu_bh,
.exp_sync = synchronize_rcu_bh_expedited,
@@ -449,6 +470,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
.fqs = rcu_bh_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
+ .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
+ .ext_irq_conflict = RCUTORTURE_RDR_RCU,
.name = "rcu_bh"
};
@@ -483,8 +506,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock,
- .started = rcu_no_completed,
- .completed = rcu_no_completed,
+ .get_gp_seq = rcu_no_completed,
.deferred_free = rcu_busted_torture_deferred_free,
.sync = synchronize_rcu_busted,
.exp_sync = synchronize_rcu_busted,
@@ -572,8 +594,7 @@ static struct rcu_torture_ops srcu_ops = {
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
- .started = NULL,
- .completed = srcu_torture_completed,
+ .get_gp_seq = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
@@ -610,8 +631,7 @@ static struct rcu_torture_ops srcud_ops = {
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
- .started = NULL,
- .completed = srcu_torture_completed,
+ .get_gp_seq = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
@@ -622,6 +642,26 @@ static struct rcu_torture_ops srcud_ops = {
.name = "srcud"
};
+/* As above, but broken due to inappropriate reader extension. */
+static struct rcu_torture_ops busted_srcud_ops = {
+ .ttype = SRCU_FLAVOR,
+ .init = srcu_torture_init,
+ .cleanup = srcu_torture_cleanup,
+ .readlock = srcu_torture_read_lock,
+ .read_delay = rcu_read_delay,
+ .readunlock = srcu_torture_read_unlock,
+ .get_gp_seq = srcu_torture_completed,
+ .deferred_free = srcu_torture_deferred_free,
+ .sync = srcu_torture_synchronize,
+ .exp_sync = srcu_torture_synchronize_expedited,
+ .call = srcu_torture_call,
+ .cb_barrier = srcu_torture_barrier,
+ .stats = srcu_torture_stats,
+ .irq_capable = 1,
+ .extendables = RCUTORTURE_MAX_EXTEND,
+ .name = "busted_srcud"
+};
+
/*
* Definitions for sched torture testing.
*/
@@ -648,8 +688,8 @@ static struct rcu_torture_ops sched_ops = {
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
- .started = rcu_batches_started_sched,
- .completed = rcu_batches_completed_sched,
+ .get_gp_seq = rcu_sched_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.deferred_free = rcu_sched_torture_deferred_free,
.sync = synchronize_sched,
.exp_sync = synchronize_sched_expedited,
@@ -660,6 +700,7 @@ static struct rcu_torture_ops sched_ops = {
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
+ .extendables = RCUTORTURE_MAX_EXTEND,
.name = "sched"
};
@@ -687,8 +728,7 @@ static struct rcu_torture_ops tasks_ops = {
.readlock = tasks_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = tasks_torture_read_unlock,
- .started = rcu_no_completed,
- .completed = rcu_no_completed,
+ .get_gp_seq = rcu_no_completed,
.deferred_free = rcu_tasks_torture_deferred_free,
.sync = synchronize_rcu_tasks,
.exp_sync = synchronize_rcu_tasks,
@@ -700,6 +740,13 @@ static struct rcu_torture_ops tasks_ops = {
.name = "tasks"
};
+static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
+{
+ if (!cur_ops->gp_diff)
+ return new - old;
+ return cur_ops->gp_diff(new, old);
+}
+
static bool __maybe_unused torturing_tasks(void)
{
return cur_ops == &tasks_ops;
@@ -726,6 +773,44 @@ static void rcu_torture_boost_cb(struct rcu_head *head)
smp_store_release(&rbip->inflight, 0);
}
+static int old_rt_runtime = -1;
+
+static void rcu_torture_disable_rt_throttle(void)
+{
+ /*
+ * Disable RT throttling so that rcutorture's boost threads don't get
+ * throttled. Only possible if rcutorture is built-in otherwise the
+ * user should manually do this by setting the sched_rt_period_us and
+ * sched_rt_runtime sysctls.
+ */
+ if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
+ return;
+
+ old_rt_runtime = sysctl_sched_rt_runtime;
+ sysctl_sched_rt_runtime = -1;
+}
+
+static void rcu_torture_enable_rt_throttle(void)
+{
+ if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
+ return;
+
+ sysctl_sched_rt_runtime = old_rt_runtime;
+ old_rt_runtime = -1;
+}
+
+static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
+{
+ if (end - start > test_boost_duration * HZ - HZ / 2) {
+ VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
+ n_rcu_torture_boost_failure++;
+
+ return true; /* failed */
+ }
+
+ return false; /* passed */
+}
+
static int rcu_torture_boost(void *arg)
{
unsigned long call_rcu_time;
@@ -746,6 +831,21 @@ static int rcu_torture_boost(void *arg)
init_rcu_head_on_stack(&rbi.rcu);
/* Each pass through the following loop does one boost-test cycle. */
do {
+ /* Track if the test failed already in this test interval? */
+ bool failed = false;
+
+ /* Increment n_rcu_torture_boosts once per boost-test */
+ while (!kthread_should_stop()) {
+ if (mutex_trylock(&boost_mutex)) {
+ n_rcu_torture_boosts++;
+ mutex_unlock(&boost_mutex);
+ break;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+ if (kthread_should_stop())
+ goto checkwait;
+
/* Wait for the next test interval. */
oldstarttime = boost_starttime;
while (ULONG_CMP_LT(jiffies, oldstarttime)) {
@@ -764,11 +864,10 @@ static int rcu_torture_boost(void *arg)
/* RCU core before ->inflight = 1. */
smp_store_release(&rbi.inflight, 1);
call_rcu(&rbi.rcu, rcu_torture_boost_cb);
- if (jiffies - call_rcu_time >
- test_boost_duration * HZ - HZ / 2) {
- VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
- n_rcu_torture_boost_failure++;
- }
+ /* Check if the boost test failed */
+ failed = failed ||
+ rcu_torture_boost_failed(call_rcu_time,
+ jiffies);
call_rcu_time = jiffies;
}
stutter_wait("rcu_torture_boost");
@@ -777,6 +876,14 @@ static int rcu_torture_boost(void *arg)
}
/*
+ * If boost never happened, then inflight will always be 1, in
+ * this case the boost check would never happen in the above
+ * loop so do another one here.
+ */
+ if (!failed && smp_load_acquire(&rbi.inflight))
+ rcu_torture_boost_failed(call_rcu_time, jiffies);
+
+ /*
* Set the start time of the next test interval.
* Yes, this is vulnerable to long delays, but such
* delays simply cause a false negative for the next
@@ -788,7 +895,6 @@ static int rcu_torture_boost(void *arg)
if (mutex_trylock(&boost_mutex)) {
boost_starttime = jiffies +
test_boost_interval * HZ;
- n_rcu_torture_boosts++;
mutex_unlock(&boost_mutex);
break;
}
@@ -1010,7 +1116,7 @@ rcu_torture_writer(void *arg)
break;
}
}
- rcutorture_record_progress(++rcu_torture_current_version);
+ rcu_torture_current_version++;
/* Cycle through nesting levels of rcu_expedite_gp() calls. */
if (can_expedite &&
!(torture_random(&rand) & 0xff & (!!expediting - 1))) {
@@ -1084,27 +1190,133 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp)
}
/*
- * RCU torture reader from timer handler. Dereferences rcu_torture_current,
- * incrementing the corresponding element of the pipeline array. The
- * counter in the element should never be greater than 1, otherwise, the
- * RCU implementation is broken.
+ * Do one extension of an RCU read-side critical section using the
+ * current reader state in readstate (set to zero for initial entry
+ * to extended critical section), set the new state as specified by
+ * newstate (set to zero for final exit from extended critical section),
+ * and random-number-generator state in trsp. If this is neither the
+ * beginning or end of the critical section and if there was actually a
+ * change, do a ->read_delay().
*/
-static void rcu_torture_timer(struct timer_list *unused)
+static void rcutorture_one_extend(int *readstate, int newstate,
+ struct torture_random_state *trsp)
+{
+ int idxnew = -1;
+ int idxold = *readstate;
+ int statesnew = ~*readstate & newstate;
+ int statesold = *readstate & ~newstate;
+
+ WARN_ON_ONCE(idxold < 0);
+ WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
+
+ /* First, put new protection in place to avoid critical-section gap. */
+ if (statesnew & RCUTORTURE_RDR_BH)
+ local_bh_disable();
+ if (statesnew & RCUTORTURE_RDR_IRQ)
+ local_irq_disable();
+ if (statesnew & RCUTORTURE_RDR_PREEMPT)
+ preempt_disable();
+ if (statesnew & RCUTORTURE_RDR_RCU)
+ idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
+
+ /* Next, remove old protection, irq first due to bh conflict. */
+ if (statesold & RCUTORTURE_RDR_IRQ)
+ local_irq_enable();
+ if (statesold & RCUTORTURE_RDR_BH)
+ local_bh_enable();
+ if (statesold & RCUTORTURE_RDR_PREEMPT)
+ preempt_enable();
+ if (statesold & RCUTORTURE_RDR_RCU)
+ cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
+
+ /* Delay if neither beginning nor end and there was a change. */
+ if ((statesnew || statesold) && *readstate && newstate)
+ cur_ops->read_delay(trsp);
+
+ /* Update the reader state. */
+ if (idxnew == -1)
+ idxnew = idxold & ~RCUTORTURE_RDR_MASK;
+ WARN_ON_ONCE(idxnew < 0);
+ WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
+ *readstate = idxnew | newstate;
+ WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
+ WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
+}
+
+/* Return the biggest extendables mask given current RCU and boot parameters. */
+static int rcutorture_extend_mask_max(void)
+{
+ int mask;
+
+ WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
+ mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
+ mask = mask | RCUTORTURE_RDR_RCU;
+ return mask;
+}
+
+/* Return a random protection state mask, but with at least one bit set. */
+static int
+rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
+{
+ int mask = rcutorture_extend_mask_max();
+ unsigned long randmask1 = torture_random(trsp) >> 8;
+ unsigned long randmask2 = randmask1 >> 1;
+
+ WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
+ /* Half the time lots of bits, half the time only one bit. */
+ if (randmask1 & 0x1)
+ mask = mask & randmask2;
+ else
+ mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
+ if ((mask & RCUTORTURE_RDR_IRQ) &&
+ !(mask & RCUTORTURE_RDR_BH) &&
+ (oldmask & RCUTORTURE_RDR_BH))
+ mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
+ if ((mask & RCUTORTURE_RDR_IRQ) &&
+ !(mask & cur_ops->ext_irq_conflict) &&
+ (oldmask & cur_ops->ext_irq_conflict))
+ mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
+ return mask ?: RCUTORTURE_RDR_RCU;
+}
+
+/*
+ * Do a randomly selected number of extensions of an existing RCU read-side
+ * critical section.
+ */
+static void rcutorture_loop_extend(int *readstate,
+ struct torture_random_state *trsp)
+{
+ int i;
+ int mask = rcutorture_extend_mask_max();
+
+ WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
+ if (!((mask - 1) & mask))
+ return; /* Current RCU flavor not extendable. */
+ i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
+ while (i--) {
+ mask = rcutorture_extend_mask(*readstate, trsp);
+ rcutorture_one_extend(readstate, mask, trsp);
+ }
+}
+
+/*
+ * Do one read-side critical section, returning false if there was
+ * no data to read. Can be invoked both from process context and
+ * from a timer handler.
+ */
+static bool rcu_torture_one_read(struct torture_random_state *trsp)
{
- int idx;
unsigned long started;
unsigned long completed;
- static DEFINE_TORTURE_RANDOM(rand);
- static DEFINE_SPINLOCK(rand_lock);
+ int newstate;
struct rcu_torture *p;
int pipe_count;
+ int readstate = 0;
unsigned long long ts;
- idx = cur_ops->readlock();
- if (cur_ops->started)
- started = cur_ops->started();
- else
- started = cur_ops->completed();
+ newstate = rcutorture_extend_mask(readstate, trsp);
+ rcutorture_one_extend(&readstate, newstate, trsp);
+ started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current,
rcu_read_lock_bh_held() ||
@@ -1112,39 +1324,50 @@ static void rcu_torture_timer(struct timer_list *unused)
srcu_read_lock_held(srcu_ctlp) ||
torturing_tasks());
if (p == NULL) {
- /* Leave because rcu_torture_writer is not yet underway */
- cur_ops->readunlock(idx);
- return;
+ /* Wait for rcu_torture_writer to get underway */
+ rcutorture_one_extend(&readstate, 0, trsp);
+ return false;
}
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
- spin_lock(&rand_lock);
- cur_ops->read_delay(&rand);
- n_rcu_torture_timers++;
- spin_unlock(&rand_lock);
+ rcutorture_loop_extend(&readstate, trsp);
preempt_disable();
pipe_count = p->rtort_pipe_count;
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- completed = cur_ops->completed();
+ completed = cur_ops->get_gp_seq();
if (pipe_count > 1) {
- do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
- started, completed);
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
+ ts, started, completed);
rcu_ftrace_dump(DUMP_ALL);
}
__this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed - started;
- if (cur_ops->started)
- completed++;
+ completed = rcutorture_seq_diff(completed, started);
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
__this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
- cur_ops->readunlock(idx);
+ rcutorture_one_extend(&readstate, 0, trsp);
+ WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
+ return true;
+}
+
+static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
+
+/*
+ * RCU torture reader from timer handler. Dereferences rcu_torture_current,
+ * incrementing the corresponding element of the pipeline array. The
+ * counter in the element should never be greater than 1, otherwise, the
+ * RCU implementation is broken.
+ */
+static void rcu_torture_timer(struct timer_list *unused)
+{
+ atomic_long_inc(&n_rcu_torture_timers);
+ (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
/* Test call_rcu() invocation from interrupt handler. */
if (cur_ops->call) {
@@ -1164,14 +1387,8 @@ static void rcu_torture_timer(struct timer_list *unused)
static int
rcu_torture_reader(void *arg)
{
- unsigned long started;
- unsigned long completed;
- int idx;
DEFINE_TORTURE_RANDOM(rand);
- struct rcu_torture *p;
- int pipe_count;
struct timer_list t;
- unsigned long long ts;
VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
set_user_nice(current, MAX_NICE);
@@ -1183,49 +1400,8 @@ rcu_torture_reader(void *arg)
if (!timer_pending(&t))
mod_timer(&t, jiffies + 1);
}
- idx = cur_ops->readlock();
- if (cur_ops->started)
- started = cur_ops->started();
- else
- started = cur_ops->completed();
- ts = rcu_trace_clock_local();
- p = rcu_dereference_check(rcu_torture_current,
- rcu_read_lock_bh_held() ||
- rcu_read_lock_sched_held() ||
- srcu_read_lock_held(srcu_ctlp) ||
- torturing_tasks());
- if (p == NULL) {
- /* Wait for rcu_torture_writer to get underway */
- cur_ops->readunlock(idx);
+ if (!rcu_torture_one_read(&rand))
schedule_timeout_interruptible(HZ);
- continue;
- }
- if (p->rtort_mbtest == 0)
- atomic_inc(&n_rcu_torture_mberror);
- cur_ops->read_delay(&rand);
- preempt_disable();
- pipe_count = p->rtort_pipe_count;
- if (pipe_count > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- pipe_count = RCU_TORTURE_PIPE_LEN;
- }
- completed = cur_ops->completed();
- if (pipe_count > 1) {
- do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
- ts, started, completed);
- rcu_ftrace_dump(DUMP_ALL);
- }
- __this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed - started;
- if (cur_ops->started)
- completed++;
- if (completed > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
- completed = RCU_TORTURE_PIPE_LEN;
- }
- __this_cpu_inc(rcu_torture_batch[completed]);
- preempt_enable();
- cur_ops->readunlock(idx);
stutter_wait("rcu_torture_reader");
} while (!torture_must_stop());
if (irqreader && cur_ops->irq_capable) {
@@ -1282,7 +1458,7 @@ rcu_torture_stats_print(void)
pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
n_rcu_torture_boost_failure,
n_rcu_torture_boosts,
- n_rcu_torture_timers);
+ atomic_long_read(&n_rcu_torture_timers));
torture_onoff_stats();
pr_cont("barrier: %ld/%ld:%ld ",
n_barrier_successes,
@@ -1324,18 +1500,16 @@ rcu_torture_stats_print(void)
if (rtcv_snap == rcu_torture_current_version &&
rcu_torture_current != NULL) {
int __maybe_unused flags = 0;
- unsigned long __maybe_unused gpnum = 0;
- unsigned long __maybe_unused completed = 0;
+ unsigned long __maybe_unused gp_seq = 0;
rcutorture_get_gp_data(cur_ops->ttype,
- &flags, &gpnum, &completed);
+ &flags, &gp_seq);
srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
- &flags, &gpnum, &completed);
+ &flags, &gp_seq);
wtp = READ_ONCE(writer_task);
- pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx cpu %d\n",
+ pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
rcu_torture_writer_state_getname(),
- rcu_torture_writer_state,
- gpnum, completed, flags,
+ rcu_torture_writer_state, gp_seq, flags,
wtp == NULL ? ~0UL : wtp->state,
wtp == NULL ? -1 : (int)task_cpu(wtp));
if (!splatted && wtp) {
@@ -1365,7 +1539,7 @@ rcu_torture_stats(void *arg)
return 0;
}
-static inline void
+static void
rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
{
pr_alert("%s" TORTURE_FLAG
@@ -1397,6 +1571,7 @@ static int rcutorture_booster_cleanup(unsigned int cpu)
mutex_lock(&boost_mutex);
t = boost_tasks[cpu];
boost_tasks[cpu] = NULL;
+ rcu_torture_enable_rt_throttle();
mutex_unlock(&boost_mutex);
/* This must be outside of the mutex, otherwise deadlock! */
@@ -1413,6 +1588,7 @@ static int rcutorture_booster_init(unsigned int cpu)
/* Don't allow time recalculation while creating a new task. */
mutex_lock(&boost_mutex);
+ rcu_torture_disable_rt_throttle();
VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
cpu_to_node(cpu),
@@ -1446,7 +1622,7 @@ static int rcu_torture_stall(void *args)
VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
}
if (!kthread_should_stop()) {
- stop_at = get_seconds() + stall_cpu;
+ stop_at = ktime_get_seconds() + stall_cpu;
/* RCU CPU stall is expected behavior in following code. */
rcu_read_lock();
if (stall_cpu_irqsoff)
@@ -1455,7 +1631,8 @@ static int rcu_torture_stall(void *args)
preempt_disable();
pr_alert("rcu_torture_stall start on CPU %d.\n",
smp_processor_id());
- while (ULONG_CMP_LT(get_seconds(), stop_at))
+ while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
+ stop_at))
continue; /* Induce RCU CPU stall warning. */
if (stall_cpu_irqsoff)
local_irq_enable();
@@ -1546,8 +1723,9 @@ static int rcu_torture_barrier(void *arg)
atomic_read(&barrier_cbs_invoked),
n_barrier_cbs);
WARN_ON_ONCE(1);
+ } else {
+ n_barrier_successes++;
}
- n_barrier_successes++;
schedule_timeout_interruptible(HZ / 10);
} while (!torture_must_stop());
torture_kthread_stopping("rcu_torture_barrier");
@@ -1610,17 +1788,39 @@ static void rcu_torture_barrier_cleanup(void)
}
}
+static bool rcu_torture_can_boost(void)
+{
+ static int boost_warn_once;
+ int prio;
+
+ if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
+ return false;
+
+ prio = rcu_get_gp_kthreads_prio();
+ if (!prio)
+ return false;
+
+ if (prio < 2) {
+ if (boost_warn_once == 1)
+ return false;
+
+ pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
+ boost_warn_once = 1;
+ return false;
+ }
+
+ return true;
+}
+
static enum cpuhp_state rcutor_hp;
static void
rcu_torture_cleanup(void)
{
int flags = 0;
- unsigned long gpnum = 0;
- unsigned long completed = 0;
+ unsigned long gp_seq = 0;
int i;
- rcutorture_record_test_transition();
if (torture_cleanup_begin()) {
if (cur_ops->cb_barrier != NULL)
cur_ops->cb_barrier();
@@ -1648,17 +1848,15 @@ rcu_torture_cleanup(void)
fakewriter_tasks = NULL;
}
- rcutorture_get_gp_data(cur_ops->ttype, &flags, &gpnum, &completed);
- srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
- &flags, &gpnum, &completed);
- pr_alert("%s: End-test grace-period state: g%lu c%lu f%#x\n",
- cur_ops->name, gpnum, completed, flags);
+ rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
+ srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
+ pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
+ cur_ops->name, gp_seq, flags);
torture_stop_kthread(rcu_torture_stats, stats_task);
torture_stop_kthread(rcu_torture_fqs, fqs_task);
for (i = 0; i < ncbflooders; i++)
torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
- if ((test_boost == 1 && cur_ops->can_boost) ||
- test_boost == 2)
+ if (rcu_torture_can_boost())
cpuhp_remove_state(rcutor_hp);
/*
@@ -1746,7 +1944,7 @@ rcu_torture_init(void)
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] = {
&rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
- &sched_ops, &tasks_ops,
+ &busted_srcud_ops, &sched_ops, &tasks_ops,
};
if (!torture_init_begin(torture_type, verbose))
@@ -1763,8 +1961,8 @@ rcu_torture_init(void)
torture_type);
pr_alert("rcu-torture types:");
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
- pr_alert(" %s", torture_ops[i]->name);
- pr_alert("\n");
+ pr_cont(" %s", torture_ops[i]->name);
+ pr_cont("\n");
firsterr = -EINVAL;
goto unwind;
}
@@ -1882,8 +2080,7 @@ rcu_torture_init(void)
test_boost_interval = 1;
if (test_boost_duration < 2)
test_boost_duration = 2;
- if ((test_boost == 1 && cur_ops->can_boost) ||
- test_boost == 2) {
+ if (rcu_torture_can_boost()) {
boost_starttime = jiffies + test_boost_interval * HZ;
@@ -1897,7 +2094,7 @@ rcu_torture_init(void)
firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
if (firsterr)
goto unwind;
- firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
+ firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
if (firsterr)
goto unwind;
firsterr = rcu_torture_stall_init();
@@ -1926,7 +2123,6 @@ rcu_torture_init(void)
goto unwind;
}
}
- rcutorture_record_test_transition();
torture_init_end();
return 0;
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 622792abe41a..04fc2ed71af8 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -110,7 +110,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
if (!newval && READ_ONCE(sp->srcu_gp_waiting))
- swake_up(&sp->srcu_wq);
+ swake_up_one(&sp->srcu_wq);
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
@@ -140,7 +140,7 @@ void srcu_drive_gp(struct work_struct *wp)
idx = sp->srcu_idx;
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
- swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
+ swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
/* Invoke the callbacks we removed above. */
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index b4123d7a2cec..6c9866a854b1 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) "rcu: " fmt
+
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
@@ -390,7 +392,8 @@ void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
}
if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
WARN_ON(srcu_readers_active(sp))) {
- pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+ pr_info("%s: Active srcu_struct %p state: %d\n",
+ __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
return; /* Caller forgot to stop doing call_srcu()? */
}
free_percpu(sp->sda);
@@ -641,6 +644,9 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
* period s. Losers must either ensure that their desired grace-period
* number is recorded on at least their leaf srcu_node structure, or they
* must take steps to invoke their own callbacks.
+ *
+ * Note that this function also does the work of srcu_funnel_exp_start(),
+ * in some cases by directly invoking it.
*/
static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
unsigned long s, bool do_norm)
@@ -823,17 +829,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
* more than one CPU, this means that when "func()" is invoked, each CPU
* is guaranteed to have executed a full memory barrier since the end of
* its last corresponding SRCU read-side critical section whose beginning
- * preceded the call to call_rcu(). It also means that each CPU executing
+ * preceded the call to call_srcu(). It also means that each CPU executing
* an SRCU read-side critical section that continues beyond the start of
- * "func()" must have executed a memory barrier after the call_rcu()
+ * "func()" must have executed a memory barrier after the call_srcu()
* but before the beginning of that SRCU read-side critical section.
* Note that these guarantees include CPUs that are offline, idle, or
* executing in user mode, as well as CPUs that are executing in the kernel.
*
- * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
* resulting SRCU callback function "func()", then both CPU A and CPU
* B are guaranteed to execute a full memory barrier during the time
- * interval between the call to call_rcu() and the invocation of "func()".
+ * interval between the call to call_srcu() and the invocation of "func()".
* This guarantee applies even if CPU A and CPU B are the same CPU (but
* again only if the system has more than one CPU).
*
@@ -1246,13 +1252,12 @@ static void process_srcu(struct work_struct *work)
void srcutorture_get_gp_data(enum rcutorture_type test_type,
struct srcu_struct *sp, int *flags,
- unsigned long *gpnum, unsigned long *completed)
+ unsigned long *gp_seq)
{
if (test_type != SRCU_FLAVOR)
return;
*flags = 0;
- *completed = rcu_seq_ctr(sp->srcu_gp_seq);
- *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
+ *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
}
EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
@@ -1263,16 +1268,17 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
unsigned long s0 = 0, s1 = 0;
idx = sp->srcu_idx & 0x1;
- pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx);
+ pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
+ tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
for_each_possible_cpu(cpu) {
unsigned long l0, l1;
unsigned long u0, u1;
long c0, c1;
- struct srcu_data *counts;
+ struct srcu_data *sdp;
- counts = per_cpu_ptr(sp->sda, cpu);
- u0 = counts->srcu_unlock_count[!idx];
- u1 = counts->srcu_unlock_count[idx];
+ sdp = per_cpu_ptr(sp->sda, cpu);
+ u0 = sdp->srcu_unlock_count[!idx];
+ u1 = sdp->srcu_unlock_count[idx];
/*
* Make sure that a lock is always counted if the corresponding
@@ -1280,12 +1286,13 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
*/
smp_rmb();
- l0 = counts->srcu_lock_count[!idx];
- l1 = counts->srcu_lock_count[idx];
+ l0 = sdp->srcu_lock_count[!idx];
+ l1 = sdp->srcu_lock_count[idx];
c0 = l0 - u0;
c1 = l1 - u1;
- pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
+ pr_cont(" %d(%ld,%ld %1p)",
+ cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
s0 += c0;
s1 += c1;
}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index a64eee0db39e..befc9321a89c 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -122,10 +122,8 @@ void rcu_check_callbacks(int user)
{
if (user)
rcu_sched_qs();
- else if (!in_softirq())
+ if (user || !in_softirq())
rcu_bh_qs();
- if (user)
- rcu_note_voluntary_context_switch(current);
}
/*
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index aa7cade1b9f3..0b760c1369f7 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -27,6 +27,9 @@
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
*/
+
+#define pr_fmt(fmt) "rcu: " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -95,13 +98,13 @@ struct rcu_state sname##_state = { \
.rda = &sname##_data, \
.call = cr, \
.gp_state = RCU_GP_IDLE, \
- .gpnum = 0UL - 300UL, \
- .completed = 0UL - 300UL, \
+ .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
.name = RCU_STATE_NAME(sname), \
.abbr = sabbr, \
.exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
.exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
+ .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
}
RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -155,6 +158,9 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
*/
static int rcu_scheduler_fully_active __read_mostly;
+static void
+rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
+ struct rcu_node *rnp, unsigned long gps, unsigned long flags);
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
@@ -177,6 +183,13 @@ module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444);
+/* Retreive RCU kthreads priority for rcutorture */
+int rcu_get_gp_kthreads_prio(void)
+{
+ return kthread_prio;
+}
+EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
+
/*
* Number of grace periods between delays, normalized by the duration of
* the delay. The longer the delay, the more the grace periods between
@@ -189,18 +202,6 @@ module_param(gp_cleanup_delay, int, 0444);
#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
/*
- * Track the rcutorture test sequence number and the update version
- * number within a given test. The rcutorture_testseq is incremented
- * on every rcutorture module load and unload, so has an odd value
- * when a test is running. The rcutorture_vernum is set to zero
- * when rcutorture starts and is incremented on each rcutorture update.
- * These variables enable correlating rcutorture output with the
- * RCU tracing information.
- */
-unsigned long rcutorture_testseq;
-unsigned long rcutorture_vernum;
-
-/*
* Compute the mask of online CPUs for the specified rcu_node structure.
* This will not be stable unless the rcu_node structure's ->lock is
* held, but the bit corresponding to the current CPU will be stable
@@ -218,7 +219,7 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
*/
static int rcu_gp_in_progress(struct rcu_state *rsp)
{
- return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
+ return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
}
/*
@@ -233,7 +234,7 @@ void rcu_sched_qs(void)
if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
return;
trace_rcu_grace_period(TPS("rcu_sched"),
- __this_cpu_read(rcu_sched_data.gpnum),
+ __this_cpu_read(rcu_sched_data.gp_seq),
TPS("cpuqs"));
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
@@ -248,7 +249,7 @@ void rcu_bh_qs(void)
RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
trace_rcu_grace_period(TPS("rcu_bh"),
- __this_cpu_read(rcu_bh_data.gpnum),
+ __this_cpu_read(rcu_bh_data.gp_seq),
TPS("cpuqs"));
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
@@ -380,20 +381,6 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
}
/*
- * Do a double-increment of the ->dynticks counter to emulate a
- * momentary idle-CPU quiescent state.
- */
-static void rcu_dynticks_momentary_idle(void)
-{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
- &rdtp->dynticks);
-
- /* It is illegal to call this from idle state. */
- WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
-}
-
-/*
* Set the special (bottom) bit of the specified CPU so that it
* will take special action (such as flushing its TLB) on the
* next exit from an extended quiescent state. Returns true if
@@ -424,12 +411,17 @@ bool rcu_eqs_special_set(int cpu)
*
* We inform the RCU core by emulating a zero-duration dyntick-idle period.
*
- * The caller must have disabled interrupts.
+ * The caller must have disabled interrupts and must not be idle.
*/
static void rcu_momentary_dyntick_idle(void)
{
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ int special;
+
raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
- rcu_dynticks_momentary_idle();
+ special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+ /* It is illegal to call this from idle state. */
+ WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
}
/*
@@ -451,7 +443,7 @@ void rcu_note_context_switch(bool preempt)
rcu_momentary_dyntick_idle();
this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
if (!preempt)
- rcu_note_voluntary_context_switch_lite(current);
+ rcu_tasks_qs(current);
out:
trace_rcu_utilization(TPS("End context switch"));
barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -513,8 +505,38 @@ static ulong jiffies_till_first_fqs = ULONG_MAX;
static ulong jiffies_till_next_fqs = ULONG_MAX;
static bool rcu_kick_kthreads;
-module_param(jiffies_till_first_fqs, ulong, 0644);
-module_param(jiffies_till_next_fqs, ulong, 0644);
+static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+ ulong j;
+ int ret = kstrtoul(val, 0, &j);
+
+ if (!ret)
+ WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
+ return ret;
+}
+
+static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+ ulong j;
+ int ret = kstrtoul(val, 0, &j);
+
+ if (!ret)
+ WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
+ return ret;
+}
+
+static struct kernel_param_ops first_fqs_jiffies_ops = {
+ .set = param_set_first_fqs_jiffies,
+ .get = param_get_ulong,
+};
+
+static struct kernel_param_ops next_fqs_jiffies_ops = {
+ .set = param_set_next_fqs_jiffies,
+ .get = param_get_ulong,
+};
+
+module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
+module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
module_param(rcu_kick_kthreads, bool, 0644);
/*
@@ -529,58 +551,31 @@ static void force_quiescent_state(struct rcu_state *rsp);
static int rcu_pending(void);
/*
- * Return the number of RCU batches started thus far for debug & stats.
+ * Return the number of RCU GPs completed thus far for debug & stats.
*/
-unsigned long rcu_batches_started(void)
+unsigned long rcu_get_gp_seq(void)
{
- return rcu_state_p->gpnum;
+ return READ_ONCE(rcu_state_p->gp_seq);
}
-EXPORT_SYMBOL_GPL(rcu_batches_started);
+EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
/*
- * Return the number of RCU-sched batches started thus far for debug & stats.
+ * Return the number of RCU-sched GPs completed thus far for debug & stats.
*/
-unsigned long rcu_batches_started_sched(void)
+unsigned long rcu_sched_get_gp_seq(void)
{
- return rcu_sched_state.gpnum;
+ return READ_ONCE(rcu_sched_state.gp_seq);
}
-EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
+EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
/*
- * Return the number of RCU BH batches started thus far for debug & stats.
+ * Return the number of RCU-bh GPs completed thus far for debug & stats.
*/
-unsigned long rcu_batches_started_bh(void)
+unsigned long rcu_bh_get_gp_seq(void)
{
- return rcu_bh_state.gpnum;
+ return READ_ONCE(rcu_bh_state.gp_seq);
}
-EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
-
-/*
- * Return the number of RCU batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed(void)
-{
- return rcu_state_p->completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-/*
- * Return the number of RCU-sched batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_sched(void)
-{
- return rcu_sched_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
-
-/*
- * Return the number of RCU BH batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_bh(void)
-{
- return rcu_bh_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
/*
* Return the number of RCU expedited batches completed thus far for
@@ -636,35 +631,42 @@ EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
*/
void show_rcu_gp_kthreads(void)
{
+ int cpu;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
struct rcu_state *rsp;
for_each_rcu_flavor(rsp) {
pr_info("%s: wait state: %d ->state: %#lx\n",
rsp->name, rsp->gp_state, rsp->gp_kthread->state);
+ rcu_for_each_node_breadth_first(rsp, rnp) {
+ if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
+ continue;
+ pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
+ rnp->grplo, rnp->grphi, rnp->gp_seq,
+ rnp->gp_seq_needed);
+ if (!rcu_is_leaf_node(rnp))
+ continue;
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
+ rdp = per_cpu_ptr(rsp->rda, cpu);
+ if (rdp->gpwrap ||
+ ULONG_CMP_GE(rsp->gp_seq,
+ rdp->gp_seq_needed))
+ continue;
+ pr_info("\tcpu %d ->gp_seq_needed %lu\n",
+ cpu, rdp->gp_seq_needed);
+ }
+ }
/* sched_show_task(rsp->gp_kthread); */
}
}
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
/*
- * Record the number of times rcutorture tests have been initiated and
- * terminated. This information allows the debugfs tracing stats to be
- * correlated to the rcutorture messages, even when the rcutorture module
- * is being repeatedly loaded and unloaded. In other words, we cannot
- * store this state in rcutorture itself.
- */
-void rcutorture_record_test_transition(void)
-{
- rcutorture_testseq++;
- rcutorture_vernum = 0;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
-
-/*
* Send along grace-period-related data for rcutorture diagnostics.
*/
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
- unsigned long *gpnum, unsigned long *completed)
+ unsigned long *gp_seq)
{
struct rcu_state *rsp = NULL;
@@ -684,23 +686,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
if (rsp == NULL)
return;
*flags = READ_ONCE(rsp->gp_flags);
- *gpnum = READ_ONCE(rsp->gpnum);
- *completed = READ_ONCE(rsp->completed);
+ *gp_seq = rcu_seq_current(&rsp->gp_seq);
}
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
/*
- * Record the number of writer passes through the current rcutorture test.
- * This is also used to correlate debugfs tracing stats with the rcutorture
- * messages.
- */
-void rcutorture_record_progress(unsigned long vernum)
-{
- rcutorture_vernum++;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_progress);
-
-/*
* Return the root node of the specified rcu_state structure.
*/
static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
@@ -1059,41 +1049,41 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
- * Is the current CPU online? Disable preemption to avoid false positives
- * that could otherwise happen due to the current CPU number being sampled,
- * this task being preempted, its old CPU being taken offline, resuming
- * on some other CPU, then determining that its old CPU is now offline.
- * It is OK to use RCU on an offline processor during initial boot, hence
- * the check for rcu_scheduler_fully_active. Note also that it is OK
- * for a CPU coming online to use RCU for one jiffy prior to marking itself
- * online in the cpu_online_mask. Similarly, it is OK for a CPU going
- * offline to continue to use RCU for one jiffy after marking itself
- * offline in the cpu_online_mask. This leniency is necessary given the
- * non-atomic nature of the online and offline processing, for example,
- * the fact that a CPU enters the scheduler after completing the teardown
- * of the CPU.
+ * Is the current CPU online as far as RCU is concerned?
*
- * This is also why RCU internally marks CPUs online during in the
- * preparation phase and offline after the CPU has been taken down.
+ * Disable preemption to avoid false positives that could otherwise
+ * happen due to the current CPU number being sampled, this task being
+ * preempted, its old CPU being taken offline, resuming on some other CPU,
+ * then determining that its old CPU is now offline. Because there are
+ * multiple flavors of RCU, and because this function can be called in the
+ * midst of updating the flavors while a given CPU coming online or going
+ * offline, it is necessary to check all flavors. If any of the flavors
+ * believe that given CPU is online, it is considered to be online.
*
- * Disable checking if in an NMI handler because we cannot safely report
- * errors from NMI handlers anyway.
+ * Disable checking if in an NMI handler because we cannot safely
+ * report errors from NMI handlers anyway. In addition, it is OK to use
+ * RCU on an offline processor during initial boot, hence the check for
+ * rcu_scheduler_fully_active.
*/
bool rcu_lockdep_current_cpu_online(void)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
- bool ret;
+ struct rcu_state *rsp;
- if (in_nmi())
+ if (in_nmi() || !rcu_scheduler_fully_active)
return true;
preempt_disable();
- rdp = this_cpu_ptr(&rcu_sched_data);
- rnp = rdp->mynode;
- ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
- !rcu_scheduler_fully_active;
+ for_each_rcu_flavor(rsp) {
+ rdp = this_cpu_ptr(rsp->rda);
+ rnp = rdp->mynode;
+ if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
+ preempt_enable();
+ return true;
+ }
+ }
preempt_enable();
- return ret;
+ return false;
}
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
@@ -1115,17 +1105,18 @@ static int rcu_is_cpu_rrupt_from_idle(void)
/*
* We are reporting a quiescent state on behalf of some other CPU, so
* it is our responsibility to check for and handle potential overflow
- * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
+ * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
* After all, the CPU might be in deep idle state, and thus executing no
* code whatsoever.
*/
static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
{
raw_lockdep_assert_held_rcu_node(rnp);
- if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
+ if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
+ rnp->gp_seq))
WRITE_ONCE(rdp->gpwrap, true);
- if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
- rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
+ if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
+ rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
}
/*
@@ -1137,7 +1128,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+ trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rdp->mynode, rdp);
return 1;
}
@@ -1159,7 +1150,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp);
if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
- rdp->rcu_iw_gpnum = rnp->gpnum;
+ rdp->rcu_iw_gp_seq = rnp->gp_seq;
rdp->rcu_iw_pending = false;
}
raw_spin_unlock_rcu_node(rnp);
@@ -1187,7 +1178,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* of the current RCU grace period.
*/
if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+ trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rdp->dynticks_fqs++;
rcu_gpnum_ovf(rnp, rdp);
return 1;
@@ -1203,8 +1194,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
- READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
+ rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
+ trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
rcu_gpnum_ovf(rnp, rdp);
return 1;
} else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
@@ -1212,12 +1203,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
smp_store_release(ruqp, true);
}
- /* Check for the CPU being offline. */
- if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
- rdp->offline_fqs++;
- rcu_gpnum_ovf(rnp, rdp);
- return 1;
+ /* If waiting too long on an offline CPU, complain. */
+ if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
+ time_after(jiffies, rdp->rsp->gp_start + HZ)) {
+ bool onl;
+ struct rcu_node *rnp1;
+
+ WARN_ON(1); /* Offline CPUs are supposed to report QS! */
+ pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+ __func__, rnp->grplo, rnp->grphi, rnp->level,
+ (long)rnp->gp_seq, (long)rnp->completedqs);
+ for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+ pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
+ __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
+ onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+ pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
+ __func__, rdp->cpu, ".o"[onl],
+ (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+ (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+ return 1; /* Break things loose after complaining. */
}
/*
@@ -1256,11 +1260,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
resched_cpu(rdp->cpu);
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
- !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
+ !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
(rnp->ffmask & rdp->grpmask)) {
init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
rdp->rcu_iw_pending = true;
- rdp->rcu_iw_gpnum = rnp->gpnum;
+ rdp->rcu_iw_gp_seq = rnp->gp_seq;
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
}
}
@@ -1274,9 +1278,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
unsigned long j1;
rsp->gp_start = j;
- smp_wmb(); /* Record start time before stall time. */
j1 = rcu_jiffies_till_stall_check();
- WRITE_ONCE(rsp->jiffies_stall, j + j1);
+ /* Record ->gp_start before ->jiffies_stall. */
+ smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
rsp->jiffies_resched = j + j1 / 2;
rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
}
@@ -1302,9 +1306,9 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
j = jiffies;
gpa = READ_ONCE(rsp->gp_activity);
if (j - gpa > 2 * HZ) {
- pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
+ pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
rsp->name, j - gpa,
- rsp->gpnum, rsp->completed,
+ (long)rcu_seq_current(&rsp->gp_seq),
rsp->gp_flags,
gp_state_getname(rsp->gp_state), rsp->gp_state,
rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
@@ -1359,16 +1363,15 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
}
}
-static inline void panic_on_rcu_stall(void)
+static void panic_on_rcu_stall(void)
{
if (sysctl_panic_on_rcu_stall)
panic("RCU Stall\n");
}
-static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
+static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
{
int cpu;
- long delta;
unsigned long flags;
unsigned long gpa;
unsigned long j;
@@ -1381,25 +1384,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
if (rcu_cpu_stall_suppress)
return;
- /* Only let one CPU complain about others per time interval. */
-
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- delta = jiffies - READ_ONCE(rsp->jiffies_stall);
- if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- return;
- }
- WRITE_ONCE(rsp->jiffies_stall,
- jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
/*
* OK, time to rat on our buddy...
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
- pr_err("INFO: %s detected stalls on CPUs/tasks:",
- rsp->name);
+ pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
print_cpu_stall_info_begin();
rcu_for_each_leaf_node(rsp, rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1418,17 +1408,16 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
cpu)->cblist);
- pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
+ pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start),
- (long)rsp->gpnum, (long)rsp->completed, totqlen);
+ (long)rcu_seq_current(&rsp->gp_seq), totqlen);
if (ndetected) {
rcu_dump_cpu_stacks(rsp);
/* Complain about tasks blocking the grace period. */
rcu_print_detail_task_stall(rsp);
} else {
- if (READ_ONCE(rsp->gpnum) != gpnum ||
- READ_ONCE(rsp->completed) == gpnum) {
+ if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
pr_err("INFO: Stall ended before state dump start\n");
} else {
j = jiffies;
@@ -1441,6 +1430,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
sched_show_task(current);
}
}
+ /* Rewrite if needed in case of slow consoles. */
+ if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
+ WRITE_ONCE(rsp->jiffies_stall,
+ jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
rcu_check_gp_kthread_starvation(rsp);
@@ -1476,15 +1469,16 @@ static void print_cpu_stall(struct rcu_state *rsp)
for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
cpu)->cblist);
- pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
+ pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
jiffies - rsp->gp_start,
- (long)rsp->gpnum, (long)rsp->completed, totqlen);
+ (long)rcu_seq_current(&rsp->gp_seq), totqlen);
rcu_check_gp_kthread_starvation(rsp);
rcu_dump_cpu_stacks(rsp);
raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ /* Rewrite if needed in case of slow consoles. */
if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
WRITE_ONCE(rsp->jiffies_stall,
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
@@ -1504,10 +1498,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
{
- unsigned long completed;
- unsigned long gpnum;
+ unsigned long gs1;
+ unsigned long gs2;
unsigned long gps;
unsigned long j;
+ unsigned long jn;
unsigned long js;
struct rcu_node *rnp;
@@ -1520,43 +1515,46 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
/*
* Lots of memory barriers to reject false positives.
*
- * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
- * then rsp->gp_start, and finally rsp->completed. These values
- * are updated in the opposite order with memory barriers (or
- * equivalent) during grace-period initialization and cleanup.
- * Now, a false positive can occur if we get an new value of
- * rsp->gp_start and a old value of rsp->jiffies_stall. But given
- * the memory barriers, the only way that this can happen is if one
- * grace period ends and another starts between these two fetches.
- * Detect this by comparing rsp->completed with the previous fetch
- * from rsp->gpnum.
+ * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall,
+ * then rsp->gp_start, and finally another copy of rsp->gp_seq.
+ * These values are updated in the opposite order with memory
+ * barriers (or equivalent) during grace-period initialization
+ * and cleanup. Now, a false positive can occur if we get an new
+ * value of rsp->gp_start and a old value of rsp->jiffies_stall.
+ * But given the memory barriers, the only way that this can happen
+ * is if one grace period ends and another starts between these
+ * two fetches. This is detected by comparing the second fetch
+ * of rsp->gp_seq with the previous fetch from rsp->gp_seq.
*
* Given this check, comparisons of jiffies, rsp->jiffies_stall,
* and rsp->gp_start suffice to forestall false positives.
*/
- gpnum = READ_ONCE(rsp->gpnum);
- smp_rmb(); /* Pick up ->gpnum first... */
+ gs1 = READ_ONCE(rsp->gp_seq);
+ smp_rmb(); /* Pick up ->gp_seq first... */
js = READ_ONCE(rsp->jiffies_stall);
smp_rmb(); /* ...then ->jiffies_stall before the rest... */
gps = READ_ONCE(rsp->gp_start);
- smp_rmb(); /* ...and finally ->gp_start before ->completed. */
- completed = READ_ONCE(rsp->completed);
- if (ULONG_CMP_GE(completed, gpnum) ||
+ smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
+ gs2 = READ_ONCE(rsp->gp_seq);
+ if (gs1 != gs2 ||
ULONG_CMP_LT(j, js) ||
ULONG_CMP_GE(gps, js))
return; /* No stall or GP completed since entering function. */
rnp = rdp->mynode;
+ jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
if (rcu_gp_in_progress(rsp) &&
- (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
+ (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
+ cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
/* We haven't checked in, so go dump stack. */
print_cpu_stall(rsp);
} else if (rcu_gp_in_progress(rsp) &&
- ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
+ ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
+ cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
/* They had a few time units to dump stack, so complain. */
- print_other_cpu_stall(rsp, gpnum);
+ print_other_cpu_stall(rsp, gs2);
}
}
@@ -1577,123 +1575,99 @@ void rcu_cpu_stall_reset(void)
WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
}
-/*
- * Determine the value that ->completed will have at the end of the
- * next subsequent grace period. This is used to tag callbacks so that
- * a CPU can invoke callbacks in a timely fashion even if that CPU has
- * been dyntick-idle for an extended period with callbacks under the
- * influence of RCU_FAST_NO_HZ.
- *
- * The caller must hold rnp->lock with interrupts disabled.
- */
-static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
- struct rcu_node *rnp)
-{
- raw_lockdep_assert_held_rcu_node(rnp);
-
- /*
- * If RCU is idle, we just wait for the next grace period.
- * But we can only be sure that RCU is idle if we are looking
- * at the root rcu_node structure -- otherwise, a new grace
- * period might have started, but just not yet gotten around
- * to initializing the current non-root rcu_node structure.
- */
- if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
- return rnp->completed + 1;
-
- /*
- * If the current rcu_node structure believes that RCU is
- * idle, and if the rcu_state structure does not yet reflect
- * the start of a new grace period, then the next grace period
- * will suffice. The memory barrier is needed to accurately
- * sample the rsp->gpnum, and pairs with the second lock
- * acquisition in rcu_gp_init(), which is augmented with
- * smp_mb__after_unlock_lock() for this purpose.
- */
- if (rnp->gpnum == rnp->completed) {
- smp_mb(); /* See above block comment. */
- if (READ_ONCE(rsp->gpnum) == rnp->completed)
- return rnp->completed + 1;
- }
-
- /*
- * Otherwise, wait for a possible partial grace period and
- * then the subsequent full grace period.
- */
- return rnp->completed + 2;
-}
-
/* Trace-event wrapper function for trace_rcu_future_grace_period. */
static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long c, const char *s)
+ unsigned long gp_seq_req, const char *s)
{
- trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
- rnp->completed, c, rnp->level,
- rnp->grplo, rnp->grphi, s);
+ trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
+ rnp->level, rnp->grplo, rnp->grphi, s);
}
/*
+ * rcu_start_this_gp - Request the start of a particular grace period
+ * @rnp_start: The leaf node of the CPU from which to start.
+ * @rdp: The rcu_data corresponding to the CPU from which to start.
+ * @gp_seq_req: The gp_seq of the grace period to start.
+ *
* Start the specified grace period, as needed to handle newly arrived
* callbacks. The required future grace periods are recorded in each
- * rcu_node structure's ->need_future_gp[] field. Returns true if there
+ * rcu_node structure's ->gp_seq_needed field. Returns true if there
* is reason to awaken the grace-period kthread.
*
* The caller must hold the specified rcu_node structure's ->lock, which
* is why the caller is responsible for waking the grace-period kthread.
+ *
+ * Returns true if the GP thread needs to be awakened else false.
*/
-static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long c)
+static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
+ unsigned long gp_seq_req)
{
bool ret = false;
struct rcu_state *rsp = rdp->rsp;
- struct rcu_node *rnp_root;
+ struct rcu_node *rnp;
/*
* Use funnel locking to either acquire the root rcu_node
* structure's lock or bail out if the need for this grace period
- * has already been recorded -- or has already started. If there
- * is already a grace period in progress in a non-leaf node, no
- * recording is needed because the end of the grace period will
- * scan the leaf rcu_node structures. Note that rnp->lock must
- * not be released.
+ * has already been recorded -- or if that grace period has in
+ * fact already started. If there is already a grace period in
+ * progress in a non-leaf node, no recording is needed because the
+ * end of the grace period will scan the leaf rcu_node structures.
+ * Note that rnp_start->lock must not be released.
*/
- raw_lockdep_assert_held_rcu_node(rnp);
- trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
- for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
- if (rnp_root != rnp)
- raw_spin_lock_rcu_node(rnp_root);
- WARN_ON_ONCE(ULONG_CMP_LT(rnp_root->gpnum +
- need_future_gp_mask(), c));
- if (need_future_gp_element(rnp_root, c) ||
- ULONG_CMP_GE(rnp_root->gpnum, c) ||
- (rnp != rnp_root &&
- rnp_root->gpnum != rnp_root->completed)) {
- trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
+ raw_lockdep_assert_held_rcu_node(rnp_start);
+ trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
+ for (rnp = rnp_start; 1; rnp = rnp->parent) {
+ if (rnp != rnp_start)
+ raw_spin_lock_rcu_node(rnp);
+ if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
+ rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
+ (rnp != rnp_start &&
+ rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
+ trace_rcu_this_gp(rnp, rdp, gp_seq_req,
+ TPS("Prestarted"));
goto unlock_out;
}
- need_future_gp_element(rnp_root, c) = true;
- if (rnp_root != rnp && rnp_root->parent != NULL)
- raw_spin_unlock_rcu_node(rnp_root);
- if (!rnp_root->parent)
+ rnp->gp_seq_needed = gp_seq_req;
+ if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
+ /*
+ * We just marked the leaf or internal node, and a
+ * grace period is in progress, which means that
+ * rcu_gp_cleanup() will see the marking. Bail to
+ * reduce contention.
+ */
+ trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
+ TPS("Startedleaf"));
+ goto unlock_out;
+ }
+ if (rnp != rnp_start && rnp->parent != NULL)
+ raw_spin_unlock_rcu_node(rnp);
+ if (!rnp->parent)
break; /* At root, and perhaps also leaf. */
}
/* If GP already in progress, just leave, otherwise start one. */
- if (rnp_root->gpnum != rnp_root->completed) {
- trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot"));
+ if (rcu_gp_in_progress(rsp)) {
+ trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
goto unlock_out;
}
- trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
+ trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
+ rsp->gp_req_activity = jiffies;
if (!rsp->gp_kthread) {
- trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
+ trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
goto unlock_out;
}
- trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq"));
+ trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
ret = true; /* Caller must wake GP kthread. */
unlock_out:
- if (rnp != rnp_root)
- raw_spin_unlock_rcu_node(rnp_root);
+ /* Push furthest requested GP to leaf node and rcu_data structure. */
+ if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
+ rnp_start->gp_seq_needed = rnp->gp_seq_needed;
+ rdp->gp_seq_needed = rnp->gp_seq_needed;
+ }
+ if (rnp != rnp_start)
+ raw_spin_unlock_rcu_node(rnp);
return ret;
}
@@ -1703,13 +1677,13 @@ unlock_out:
*/
static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
{
- unsigned long c = rnp->completed;
bool needmore;
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
- need_future_gp_element(rnp, c) = false;
- needmore = need_any_future_gp(rnp);
- trace_rcu_this_gp(rnp, rdp, c,
+ needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
+ if (!needmore)
+ rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
+ trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
needmore ? TPS("CleanupMore") : TPS("Cleanup"));
return needmore;
}
@@ -1727,25 +1701,25 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
!READ_ONCE(rsp->gp_flags) ||
!rsp->gp_kthread)
return;
- swake_up(&rsp->gp_wq);
+ swake_up_one(&rsp->gp_wq);
}
/*
- * If there is room, assign a ->completed number to any callbacks on
- * this CPU that have not already been assigned. Also accelerate any
- * callbacks that were previously assigned a ->completed number that has
- * since proven to be too conservative, which can happen if callbacks get
- * assigned a ->completed number while RCU is idle, but with reference to
- * a non-root rcu_node structure. This function is idempotent, so it does
- * not hurt to call it repeatedly. Returns an flag saying that we should
- * awaken the RCU grace-period kthread.
+ * If there is room, assign a ->gp_seq number to any callbacks on this
+ * CPU that have not already been assigned. Also accelerate any callbacks
+ * that were previously assigned a ->gp_seq number that has since proven
+ * to be too conservative, which can happen if callbacks get assigned a
+ * ->gp_seq number while RCU is idle, but with reference to a non-root
+ * rcu_node structure. This function is idempotent, so it does not hurt
+ * to call it repeatedly. Returns an flag saying that we should awaken
+ * the RCU grace-period kthread.
*
* The caller must hold rnp->lock with interrupts disabled.
*/
static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp)
{
- unsigned long c;
+ unsigned long gp_seq_req;
bool ret = false;
raw_lockdep_assert_held_rcu_node(rnp);
@@ -1764,22 +1738,50 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
* accelerating callback invocation to an earlier grace-period
* number.
*/
- c = rcu_cbs_completed(rsp, rnp);
- if (rcu_segcblist_accelerate(&rdp->cblist, c))
- ret = rcu_start_this_gp(rnp, rdp, c);
+ gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
+ if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
+ ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
/* Trace depending on how much we were able to accelerate. */
if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
+ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB"));
else
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
+ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB"));
return ret;
}
/*
+ * Similar to rcu_accelerate_cbs(), but does not require that the leaf
+ * rcu_node structure's ->lock be held. It consults the cached value
+ * of ->gp_seq_needed in the rcu_data structure, and if that indicates
+ * that a new grace-period request be made, invokes rcu_accelerate_cbs()
+ * while holding the leaf rcu_node structure's ->lock.
+ */
+static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
+ struct rcu_node *rnp,
+ struct rcu_data *rdp)
+{
+ unsigned long c;
+ bool needwake;
+
+ lockdep_assert_irqs_disabled();
+ c = rcu_seq_snap(&rsp->gp_seq);
+ if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+ /* Old request still live, so mark recent callbacks. */
+ (void)rcu_segcblist_accelerate(&rdp->cblist, c);
+ return;
+ }
+ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+ needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
+ if (needwake)
+ rcu_gp_kthread_wake(rsp);
+}
+
+/*
* Move any callbacks whose grace period has completed to the
* RCU_DONE_TAIL sublist, then compact the remaining sublists and
- * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
+ * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
* sublist. This function is idempotent, so it does not hurt to
* invoke it repeatedly. As long as it is not invoked -too- often...
* Returns true if the RCU grace-period kthread needs to be awakened.
@@ -1796,10 +1798,10 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
return false;
/*
- * Find all callbacks whose ->completed numbers indicate that they
+ * Find all callbacks whose ->gp_seq numbers indicate that they
* are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
*/
- rcu_segcblist_advance(&rdp->cblist, rnp->completed);
+ rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
/* Classify any remaining callbacks. */
return rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1819,39 +1821,38 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
raw_lockdep_assert_held_rcu_node(rnp);
- /* Handle the ends of any preceding grace periods first. */
- if (rdp->completed == rnp->completed &&
- !unlikely(READ_ONCE(rdp->gpwrap))) {
-
- /* No grace period end, so just accelerate recent callbacks. */
- ret = rcu_accelerate_cbs(rsp, rnp, rdp);
+ if (rdp->gp_seq == rnp->gp_seq)
+ return false; /* Nothing to do. */
+ /* Handle the ends of any preceding grace periods first. */
+ if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
+ unlikely(READ_ONCE(rdp->gpwrap))) {
+ ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
+ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
} else {
-
- /* Advance callbacks. */
- ret = rcu_advance_cbs(rsp, rnp, rdp);
-
- /* Remember that we saw this grace-period completion. */
- rdp->completed = rnp->completed;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
+ ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
}
- if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
+ /* Now handle the beginnings of any new-to-this-CPU grace periods. */
+ if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
+ unlikely(READ_ONCE(rdp->gpwrap))) {
/*
* If the current grace period is waiting for this CPU,
* set up to detect a quiescent state, otherwise don't
* go looking for one.
*/
- rdp->gpnum = rnp->gpnum;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
+ trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
need_gp = !!(rnp->qsmask & rdp->grpmask);
rdp->cpu_no_qs.b.norm = need_gp;
rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
rdp->core_needs_qs = need_gp;
zero_cpu_stall_ticks(rdp);
- WRITE_ONCE(rdp->gpwrap, false);
- rcu_gpnum_ovf(rnp, rdp);
}
+ rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
+ if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap)
+ rdp->gp_seq_needed = rnp->gp_seq_needed;
+ WRITE_ONCE(rdp->gpwrap, false);
+ rcu_gpnum_ovf(rnp, rdp);
return ret;
}
@@ -1863,8 +1864,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_save(flags);
rnp = rdp->mynode;
- if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
- rdp->completed == READ_ONCE(rnp->completed) &&
+ if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
!unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
!raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
local_irq_restore(flags);
@@ -1879,7 +1879,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
static void rcu_gp_slow(struct rcu_state *rsp, int delay)
{
if (delay > 0 &&
- !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
+ !(rcu_seq_ctr(rsp->gp_seq) %
+ (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
schedule_timeout_uninterruptible(delay);
}
@@ -1888,7 +1889,9 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
*/
static bool rcu_gp_init(struct rcu_state *rsp)
{
+ unsigned long flags;
unsigned long oldmask;
+ unsigned long mask;
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
@@ -1912,9 +1915,9 @@ static bool rcu_gp_init(struct rcu_state *rsp)
/* Advance to a new grace period and initialize state. */
record_gp_stall_check_time(rsp);
- /* Record GP times before starting GP, hence smp_store_release(). */
- smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
- trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
+ /* Record GP times before starting GP, hence rcu_seq_start(). */
+ rcu_seq_start(&rsp->gp_seq);
+ trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
raw_spin_unlock_irq_rcu_node(rnp);
/*
@@ -1923,13 +1926,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* for subsequent online CPUs, and that quiescent-state forcing
* will handle subsequent offline CPUs.
*/
+ rsp->gp_state = RCU_GP_ONOFF;
rcu_for_each_leaf_node(rsp, rnp) {
- rcu_gp_slow(rsp, gp_preinit_delay);
+ spin_lock(&rsp->ofl_lock);
raw_spin_lock_irq_rcu_node(rnp);
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
!rnp->wait_blkd_tasks) {
/* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_irq_rcu_node(rnp);
+ spin_unlock(&rsp->ofl_lock);
continue;
}
@@ -1939,12 +1944,14 @@ static bool rcu_gp_init(struct rcu_state *rsp)
/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
if (!oldmask != !rnp->qsmaskinit) {
- if (!oldmask) /* First online CPU for this rcu_node. */
- rcu_init_new_rnp(rnp);
- else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
- rnp->wait_blkd_tasks = true;
- else /* Last offline CPU and can propagate. */
+ if (!oldmask) { /* First online CPU for rcu_node. */
+ if (!rnp->wait_blkd_tasks) /* Ever offline? */
+ rcu_init_new_rnp(rnp);
+ } else if (rcu_preempt_has_tasks(rnp)) {
+ rnp->wait_blkd_tasks = true; /* blocked tasks */
+ } else { /* Last offline CPU and can propagate. */
rcu_cleanup_dead_rnp(rnp);
+ }
}
/*
@@ -1953,18 +1960,19 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* still offline, propagate up the rcu_node tree and
* clear ->wait_blkd_tasks. Otherwise, if one of this
* rcu_node structure's CPUs has since come back online,
- * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
- * checks for this, so just call it unconditionally).
+ * simply clear ->wait_blkd_tasks.
*/
if (rnp->wait_blkd_tasks &&
- (!rcu_preempt_has_tasks(rnp) ||
- rnp->qsmaskinit)) {
+ (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
rnp->wait_blkd_tasks = false;
- rcu_cleanup_dead_rnp(rnp);
+ if (!rnp->qsmaskinit)
+ rcu_cleanup_dead_rnp(rnp);
}
raw_spin_unlock_irq_rcu_node(rnp);
+ spin_unlock(&rsp->ofl_lock);
}
+ rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
/*
* Set the quiescent-state-needed bits in all the rcu_node
@@ -1978,22 +1986,27 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* The grace period cannot complete until the initialization
* process finishes, because this kthread handles both.
*/
+ rsp->gp_state = RCU_GP_INIT;
rcu_for_each_node_breadth_first(rsp, rnp) {
rcu_gp_slow(rsp, gp_init_delay);
- raw_spin_lock_irq_rcu_node(rnp);
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
rdp = this_cpu_ptr(rsp->rda);
- rcu_preempt_check_blocked_tasks(rnp);
+ rcu_preempt_check_blocked_tasks(rsp, rnp);
rnp->qsmask = rnp->qsmaskinit;
- WRITE_ONCE(rnp->gpnum, rsp->gpnum);
- if (WARN_ON_ONCE(rnp->completed != rsp->completed))
- WRITE_ONCE(rnp->completed, rsp->completed);
+ WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
if (rnp == rdp->mynode)
(void)__note_gp_changes(rsp, rnp, rdp);
rcu_preempt_boost_start_gp(rnp);
- trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
+ trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
rnp->level, rnp->grplo,
rnp->grphi, rnp->qsmask);
- raw_spin_unlock_irq_rcu_node(rnp);
+ /* Quiescent states for tasks on any now-offline CPUs. */
+ mask = rnp->qsmask & ~rnp->qsmaskinitnext;
+ rnp->rcu_gp_init_mask = mask;
+ if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ else
+ raw_spin_unlock_irq_rcu_node(rnp);
cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies);
}
@@ -2002,7 +2015,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
}
/*
- * Helper function for swait_event_idle() wakeup at force-quiescent-state
+ * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
* time.
*/
static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
@@ -2053,6 +2066,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
{
unsigned long gp_duration;
bool needgp = false;
+ unsigned long new_gp_seq;
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
struct swait_queue_head *sq;
@@ -2074,19 +2088,22 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
raw_spin_unlock_irq_rcu_node(rnp);
/*
- * Propagate new ->completed value to rcu_node structures so
- * that other CPUs don't have to wait until the start of the next
- * grace period to process their callbacks. This also avoids
- * some nasty RCU grace-period initialization races by forcing
- * the end of the current grace period to be completely recorded in
- * all of the rcu_node structures before the beginning of the next
- * grace period is recorded in any of the rcu_node structures.
+ * Propagate new ->gp_seq value to rcu_node structures so that
+ * other CPUs don't have to wait until the start of the next grace
+ * period to process their callbacks. This also avoids some nasty
+ * RCU grace-period initialization races by forcing the end of
+ * the current grace period to be completely recorded in all of
+ * the rcu_node structures before the beginning of the next grace
+ * period is recorded in any of the rcu_node structures.
*/
+ new_gp_seq = rsp->gp_seq;
+ rcu_seq_end(&new_gp_seq);
rcu_for_each_node_breadth_first(rsp, rnp) {
raw_spin_lock_irq_rcu_node(rnp);
- WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
+ if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+ dump_blkd_tasks(rsp, rnp, 10);
WARN_ON_ONCE(rnp->qsmask);
- WRITE_ONCE(rnp->completed, rsp->gpnum);
+ WRITE_ONCE(rnp->gp_seq, new_gp_seq);
rdp = this_cpu_ptr(rsp->rda);
if (rnp == rdp->mynode)
needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -2100,26 +2117,28 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rcu_gp_slow(rsp, gp_cleanup_delay);
}
rnp = rcu_get_root(rsp);
- raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
+ raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
/* Declare grace period done. */
- WRITE_ONCE(rsp->completed, rsp->gpnum);
- trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
+ rcu_seq_end(&rsp->gp_seq);
+ trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
rsp->gp_state = RCU_GP_IDLE;
/* Check for GP requests since above loop. */
rdp = this_cpu_ptr(rsp->rda);
- if (need_any_future_gp(rnp)) {
- trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
+ if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
+ trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
TPS("CleanupMore"));
needgp = true;
}
/* Advance CBs to reduce false positives below. */
if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
- trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
+ rsp->gp_req_activity = jiffies;
+ trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
TPS("newreq"));
+ } else {
+ WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
}
- WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
raw_spin_unlock_irq_rcu_node(rnp);
}
@@ -2141,10 +2160,10 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* Handle grace-period start. */
for (;;) {
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("reqwait"));
rsp->gp_state = RCU_GP_WAIT_GPS;
- swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
+ swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
RCU_GP_FLAG_INIT);
rsp->gp_state = RCU_GP_DONE_GPS;
/* Locking provides needed memory barrier. */
@@ -2154,17 +2173,13 @@ static int __noreturn rcu_gp_kthread(void *arg)
WRITE_ONCE(rsp->gp_activity, jiffies);
WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("reqwaitsig"));
}
/* Handle quiescent-state forcing. */
first_gp_fqs = true;
j = jiffies_till_first_fqs;
- if (j > HZ) {
- j = HZ;
- jiffies_till_first_fqs = HZ;
- }
ret = 0;
for (;;) {
if (!ret) {
@@ -2173,10 +2188,10 @@ static int __noreturn rcu_gp_kthread(void *arg)
jiffies + 3 * j);
}
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("fqswait"));
rsp->gp_state = RCU_GP_WAIT_FQS;
- ret = swait_event_idle_timeout(rsp->gp_wq,
+ ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
rcu_gp_fqs_check_wake(rsp, &gf), j);
rsp->gp_state = RCU_GP_DOING_FQS;
/* Locking provides needed memory barriers. */
@@ -2188,31 +2203,24 @@ static int __noreturn rcu_gp_kthread(void *arg)
if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
(gf & RCU_GP_FLAG_FQS)) {
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("fqsstart"));
rcu_gp_fqs(rsp, first_gp_fqs);
first_gp_fqs = false;
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("fqsend"));
cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies);
ret = 0; /* Force full wait till next FQS. */
j = jiffies_till_next_fqs;
- if (j > HZ) {
- j = HZ;
- jiffies_till_next_fqs = HZ;
- } else if (j < 1) {
- j = 1;
- jiffies_till_next_fqs = 1;
- }
} else {
/* Deal with stray signal. */
cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies);
WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gpnum),
+ READ_ONCE(rsp->gp_seq),
TPS("fqswaitsig"));
ret = 1; /* Keep old FQS timing. */
j = jiffies;
@@ -2256,8 +2264,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
* must be represented by the same rcu_node structure (which need not be a
* leaf rcu_node structure, though it often will be). The gps parameter
* is the grace-period snapshot, which means that the quiescent states
- * are valid only if rnp->gpnum is equal to gps. That structure's lock
+ * are valid only if rnp->gp_seq is equal to gps. That structure's lock
* must be held upon entry, and it is released before return.
+ *
+ * As a special case, if mask is zero, the bit-already-cleared check is
+ * disabled. This allows propagating quiescent state due to resumed tasks
+ * during grace-period initialization.
*/
static void
rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
@@ -2271,7 +2283,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
/* Walk up the rcu_node hierarchy. */
for (;;) {
- if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
+ if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
/*
* Our bit has already been cleared, or the
@@ -2284,7 +2296,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
rcu_preempt_blocked_readers_cgp(rnp));
rnp->qsmask &= ~mask;
- trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
+ trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq,
mask, rnp->qsmask, rnp->level,
rnp->grplo, rnp->grphi,
!!rnp->gp_tasks);
@@ -2294,6 +2306,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
}
+ rnp->completedqs = rnp->gp_seq;
mask = rnp->grpmask;
if (rnp->parent == NULL) {
@@ -2323,8 +2336,9 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
* irqs disabled, and this lock is released upon return, but irqs remain
* disabled.
*/
-static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
- struct rcu_node *rnp, unsigned long flags)
+static void __maybe_unused
+rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
+ struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock)
{
unsigned long gps;
@@ -2332,12 +2346,15 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
struct rcu_node *rnp_p;
raw_lockdep_assert_held_rcu_node(rnp);
- if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
- rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
+ if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) ||
+ WARN_ON_ONCE(rsp != rcu_state_p) ||
+ WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
+ rnp->qsmask != 0) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; /* Still need more quiescent states! */
}
+ rnp->completedqs = rnp->gp_seq;
rnp_p = rnp->parent;
if (rnp_p == NULL) {
/*
@@ -2348,8 +2365,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
return;
}
- /* Report up the rest of the hierarchy, tracking current ->gpnum. */
- gps = rnp->gpnum;
+ /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
+ gps = rnp->gp_seq;
mask = rnp->grpmask;
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
@@ -2370,8 +2387,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
rnp = rdp->mynode;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
- if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
- rnp->completed == rnp->gpnum || rdp->gpwrap) {
+ if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
+ rdp->gpwrap) {
/*
* The grace period in which this quiescent state was
@@ -2396,7 +2413,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
*/
needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
/* ^^^ Released rnp->lock */
if (needwake)
rcu_gp_kthread_wake(rsp);
@@ -2441,17 +2458,16 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
*/
static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
{
- RCU_TRACE(unsigned long mask;)
+ RCU_TRACE(bool blkd;)
RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
return;
- RCU_TRACE(mask = rdp->grpmask;)
- trace_rcu_grace_period(rsp->name,
- rnp->gpnum + 1 - !!(rnp->qsmask & mask),
- TPS("cpuofl"));
+ RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
+ trace_rcu_grace_period(rsp->name, rnp->gp_seq,
+ blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
}
/*
@@ -2463,7 +2479,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
* This function therefore goes up the tree of rcu_node structures,
* clearing the corresponding bits in the ->qsmaskinit fields. Note that
* the leaf rcu_node structure's ->qsmaskinit field has already been
- * updated
+ * updated.
*
* This function does check that the specified rcu_node structure has
* all CPUs offline and no blocked tasks, so it is OK to invoke it
@@ -2476,9 +2492,10 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
long mask;
struct rcu_node *rnp = rnp_leaf;
- raw_lockdep_assert_held_rcu_node(rnp);
+ raw_lockdep_assert_held_rcu_node(rnp_leaf);
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
- rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+ WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
+ WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
return;
for (;;) {
mask = rnp->grpmask;
@@ -2487,7 +2504,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
break;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
rnp->qsmaskinit &= ~mask;
- rnp->qsmask &= ~mask;
+ /* Between grace periods, so better already be zero! */
+ WARN_ON_ONCE(rnp->qsmask);
if (rnp->qsmaskinit) {
raw_spin_unlock_rcu_node(rnp);
/* irqs remain disabled. */
@@ -2630,6 +2648,7 @@ void rcu_check_callbacks(int user)
rcu_sched_qs();
rcu_bh_qs();
+ rcu_note_voluntary_context_switch(current);
} else if (!in_softirq()) {
@@ -2645,8 +2664,7 @@ void rcu_check_callbacks(int user)
rcu_preempt_check_callbacks();
if (rcu_pending())
invoke_rcu_core();
- if (user)
- rcu_note_voluntary_context_switch(current);
+
trace_rcu_utilization(TPS("End scheduler-tick"));
}
@@ -2681,17 +2699,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
/* rcu_initiate_boost() releases rnp->lock */
continue;
}
- if (rnp->parent &&
- (rnp->parent->qsmask & rnp->grpmask)) {
- /*
- * Race between grace-period
- * initialization and task exiting RCU
- * read-side critical section: Report.
- */
- rcu_report_unblock_qs_rnp(rsp, rnp, flags);
- /* rcu_report_unblock_qs_rnp() rlses ->lock */
- continue;
- }
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ continue;
}
for_each_leaf_node_possible_cpu(rnp, cpu) {
unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
@@ -2701,8 +2710,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
}
}
if (mask != 0) {
- /* Idle/offline CPUs, report (releases rnp->lock. */
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+ /* Idle/offline CPUs, report (releases rnp->lock). */
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
} else {
/* Nothing to do here, so just drop the lock. */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2747,6 +2756,65 @@ static void force_quiescent_state(struct rcu_state *rsp)
}
/*
+ * This function checks for grace-period requests that fail to motivate
+ * RCU to come out of its idle mode.
+ */
+static void
+rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
+ struct rcu_data *rdp)
+{
+ const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
+ unsigned long flags;
+ unsigned long j;
+ struct rcu_node *rnp_root = rcu_get_root(rsp);
+ static atomic_t warned = ATOMIC_INIT(0);
+
+ if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
+ ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
+ return;
+ j = jiffies; /* Expensive access, and in common case don't get here. */
+ if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+ atomic_read(&warned))
+ return;
+
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ j = jiffies;
+ if (rcu_gp_in_progress(rsp) ||
+ ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+ time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+ atomic_read(&warned)) {
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ return;
+ }
+ /* Hold onto the leaf lock to make others see warned==1. */
+
+ if (rnp_root != rnp)
+ raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
+ j = jiffies;
+ if (rcu_gp_in_progress(rsp) ||
+ ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+ time_before(j, rsp->gp_req_activity + gpssdelay) ||
+ time_before(j, rsp->gp_activity + gpssdelay) ||
+ atomic_xchg(&warned, 1)) {
+ raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ return;
+ }
+ pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
+ __func__, (long)READ_ONCE(rsp->gp_seq),
+ (long)READ_ONCE(rnp_root->gp_seq_needed),
+ j - rsp->gp_req_activity, j - rsp->gp_activity,
+ rsp->gp_flags, rsp->gp_state, rsp->name,
+ rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
+ WARN_ON(1);
+ if (rnp_root != rnp)
+ raw_spin_unlock_rcu_node(rnp_root);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+}
+
+/*
* This does the RCU core processing work for the specified rcu_state
* and rcu_data structures. This may be called only from the CPU to
* whom the rdp belongs.
@@ -2755,9 +2823,8 @@ static void
__rcu_process_callbacks(struct rcu_state *rsp)
{
unsigned long flags;
- bool needwake;
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
- struct rcu_node *rnp;
+ struct rcu_node *rnp = rdp->mynode;
WARN_ON_ONCE(!rdp->beenonline);
@@ -2768,18 +2835,13 @@ __rcu_process_callbacks(struct rcu_state *rsp)
if (!rcu_gp_in_progress(rsp) &&
rcu_segcblist_is_enabled(&rdp->cblist)) {
local_irq_save(flags);
- if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
- local_irq_restore(flags);
- } else {
- rnp = rdp->mynode;
- raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
- needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- if (needwake)
- rcu_gp_kthread_wake(rsp);
- }
+ if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
+ rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
+ local_irq_restore(flags);
}
+ rcu_check_gp_start_stall(rsp, rnp, rdp);
+
/* If there are callbacks ready, invoke them. */
if (rcu_segcblist_ready_cbs(&rdp->cblist))
invoke_rcu_callbacks(rsp, rdp);
@@ -2833,8 +2895,6 @@ static void invoke_rcu_core(void)
static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
struct rcu_head *head, unsigned long flags)
{
- bool needwake;
-
/*
* If called from an extended quiescent state, invoke the RCU
* core in order to force a re-evaluation of RCU's idleness.
@@ -2861,13 +2921,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
/* Start a new grace period if one not already started. */
if (!rcu_gp_in_progress(rsp)) {
- struct rcu_node *rnp = rdp->mynode;
-
- raw_spin_lock_rcu_node(rnp);
- needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
- raw_spin_unlock_rcu_node(rnp);
- if (needwake)
- rcu_gp_kthread_wake(rsp);
+ rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
} else {
/* Give the grace period a kick. */
rdp->blimit = LONG_MAX;
@@ -3037,7 +3091,7 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
* when there was in fact only one the whole time, as this just adds
* some overhead: RCU still operates correctly.
*/
-static inline int rcu_blocking_is_gp(void)
+static int rcu_blocking_is_gp(void)
{
int ret;
@@ -3136,16 +3190,10 @@ unsigned long get_state_synchronize_rcu(void)
{
/*
* Any prior manipulation of RCU-protected data must happen
- * before the load from ->gpnum.
+ * before the load from ->gp_seq.
*/
smp_mb(); /* ^^^ */
-
- /*
- * Make sure this load happens before the purportedly
- * time-consuming work between get_state_synchronize_rcu()
- * and cond_synchronize_rcu().
- */
- return smp_load_acquire(&rcu_state_p->gpnum);
+ return rcu_seq_snap(&rcu_state_p->gp_seq);
}
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
@@ -3165,15 +3213,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
*/
void cond_synchronize_rcu(unsigned long oldstate)
{
- unsigned long newstate;
-
- /*
- * Ensure that this load happens before any RCU-destructive
- * actions the caller might carry out after we return.
- */
- newstate = smp_load_acquire(&rcu_state_p->completed);
- if (ULONG_CMP_GE(oldstate, newstate))
+ if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
synchronize_rcu();
+ else
+ smp_mb(); /* Ensure GP ends before subsequent accesses. */
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
@@ -3188,16 +3231,10 @@ unsigned long get_state_synchronize_sched(void)
{
/*
* Any prior manipulation of RCU-protected data must happen
- * before the load from ->gpnum.
+ * before the load from ->gp_seq.
*/
smp_mb(); /* ^^^ */
-
- /*
- * Make sure this load happens before the purportedly
- * time-consuming work between get_state_synchronize_sched()
- * and cond_synchronize_sched().
- */
- return smp_load_acquire(&rcu_sched_state.gpnum);
+ return rcu_seq_snap(&rcu_sched_state.gp_seq);
}
EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
@@ -3217,15 +3254,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
*/
void cond_synchronize_sched(unsigned long oldstate)
{
- unsigned long newstate;
-
- /*
- * Ensure that this load happens before any RCU-destructive
- * actions the caller might carry out after we return.
- */
- newstate = smp_load_acquire(&rcu_sched_state.completed);
- if (ULONG_CMP_GE(oldstate, newstate))
+ if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate))
synchronize_sched();
+ else
+ smp_mb(); /* Ensure GP ends before subsequent accesses. */
}
EXPORT_SYMBOL_GPL(cond_synchronize_sched);
@@ -3261,12 +3293,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
return 1;
- /* Has another RCU grace period completed? */
- if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
- return 1;
-
- /* Has a new RCU grace period started? */
- if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
+ /* Have RCU grace period completed or started? */
+ if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
return 1;
@@ -3298,7 +3326,7 @@ static int rcu_pending(void)
* non-NULL, store an indication of whether all callbacks are lazy.
* (If there are no callbacks, all of them are deemed to be lazy.)
*/
-static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
+static bool rcu_cpu_has_callbacks(bool *all_lazy)
{
bool al = true;
bool hc = false;
@@ -3484,17 +3512,22 @@ EXPORT_SYMBOL_GPL(rcu_barrier_sched);
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
{
long mask;
+ long oldmask;
struct rcu_node *rnp = rnp_leaf;
- raw_lockdep_assert_held_rcu_node(rnp);
+ raw_lockdep_assert_held_rcu_node(rnp_leaf);
+ WARN_ON_ONCE(rnp->wait_blkd_tasks);
for (;;) {
mask = rnp->grpmask;
rnp = rnp->parent;
if (rnp == NULL)
return;
raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
+ oldmask = rnp->qsmaskinit;
rnp->qsmaskinit |= mask;
raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
+ if (oldmask)
+ return;
}
}
@@ -3511,6 +3544,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
+ rdp->rcu_ofl_gp_seq = rsp->gp_seq;
+ rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
+ rdp->rcu_onl_gp_seq = rsp->gp_seq;
+ rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
rdp->cpu = cpu;
rdp->rsp = rsp;
rcu_boot_init_nocb_percpu_data(rdp);
@@ -3518,9 +3555,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
/*
* Initialize a CPU's per-CPU RCU data. Note that only one online or
- * offline event can be happening at a given time. Note also that we
- * can accept some slop in the rsp->completed access due to the fact
- * that this CPU cannot possibly have any RCU callbacks in flight yet.
+ * offline event can be happening at a given time. Note also that we can
+ * accept some slop in the rsp->gp_seq access due to the fact that this
+ * CPU cannot possibly have any RCU callbacks in flight yet.
*/
static void
rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
@@ -3549,14 +3586,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
rdp->beenonline = true; /* We have now been online. */
- rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
- rdp->completed = rnp->completed;
+ rdp->gp_seq = rnp->gp_seq;
+ rdp->gp_seq_needed = rnp->gp_seq;
rdp->cpu_no_qs.b.norm = true;
rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
rdp->core_needs_qs = false;
rdp->rcu_iw_pending = false;
- rdp->rcu_iw_gpnum = rnp->gpnum - 1;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
+ rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
+ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
@@ -3705,7 +3742,15 @@ void rcu_cpu_starting(unsigned int cpu)
nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
/* Allow lockless access for expedited grace periods. */
smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
+ rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
+ rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
+ if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+ /* Report QS -after- changing ->qsmaskinitnext! */
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ } else {
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
}
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
}
@@ -3713,7 +3758,7 @@ void rcu_cpu_starting(unsigned int cpu)
#ifdef CONFIG_HOTPLUG_CPU
/*
* The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function. We now remove it from the rcu_node tree's ->qsmaskinit
+ * function. We now remove it from the rcu_node tree's ->qsmaskinitnext
* bit masks.
*/
static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
@@ -3725,9 +3770,18 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
mask = rdp->grpmask;
+ spin_lock(&rsp->ofl_lock);
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
+ rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
+ rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
+ if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
+ /* Report quiescent state -before- changing ->qsmaskinitnext! */
+ rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ }
rnp->qsmaskinitnext &= ~mask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ spin_unlock(&rsp->ofl_lock);
}
/*
@@ -3839,12 +3893,16 @@ static int __init rcu_spawn_gp_kthread(void)
struct task_struct *t;
/* Force priority into range. */
- if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
+ if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
+ && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
+ kthread_prio = 2;
+ else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
kthread_prio = 1;
else if (kthread_prio < 0)
kthread_prio = 0;
else if (kthread_prio > 99)
kthread_prio = 99;
+
if (kthread_prio != kthread_prio_in)
pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
kthread_prio, kthread_prio_in);
@@ -3928,8 +3986,9 @@ static void __init rcu_init_one(struct rcu_state *rsp)
raw_spin_lock_init(&rnp->fqslock);
lockdep_set_class_and_name(&rnp->fqslock,
&rcu_fqs_class[i], fqs[i]);
- rnp->gpnum = rsp->gpnum;
- rnp->completed = rsp->completed;
+ rnp->gp_seq = rsp->gp_seq;
+ rnp->gp_seq_needed = rsp->gp_seq;
+ rnp->completedqs = rsp->gp_seq;
rnp->qsmask = 0;
rnp->qsmaskinit = 0;
rnp->grplo = j * cpustride;
@@ -3997,7 +4056,7 @@ static void __init rcu_init_geometry(void)
if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
nr_cpu_ids == NR_CPUS)
return;
- pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
+ pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
rcu_fanout_leaf, nr_cpu_ids);
/*
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 78e051dffc5b..4e74df768c57 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -81,18 +81,16 @@ struct rcu_node {
raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
/* some rcu_state fields as well as */
/* following. */
- unsigned long gpnum; /* Current grace period for this node. */
- /* This will either be equal to or one */
- /* behind the root rcu_node's gpnum. */
- unsigned long completed; /* Last GP completed for this node. */
- /* This will either be equal to or one */
- /* behind the root rcu_node's gpnum. */
+ unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */
+ unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */
+ unsigned long completedqs; /* All QSes done for this node. */
unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/
/* In leaf rcu_node, each bit corresponds to */
/* an rcu_data structure, otherwise, each */
/* bit corresponds to a child rcu_node */
/* structure. */
+ unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
unsigned long qsmaskinit;
/* Per-GP initial value for qsmask. */
/* Initialized from ->qsmaskinitnext at the */
@@ -158,7 +156,6 @@ struct rcu_node {
struct swait_queue_head nocb_gp_wq[2];
/* Place for rcu_nocb_kthread() to wait GP. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
- u8 need_future_gp[4]; /* Counts of upcoming GP requests. */
raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
@@ -168,22 +165,6 @@ struct rcu_node {
bool exp_need_flush; /* Need to flush workitem? */
} ____cacheline_internodealigned_in_smp;
-/* Accessors for ->need_future_gp[] array. */
-#define need_future_gp_mask() \
- (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1)
-#define need_future_gp_element(rnp, c) \
- ((rnp)->need_future_gp[(c) & need_future_gp_mask()])
-#define need_any_future_gp(rnp) \
-({ \
- int __i; \
- bool __nonzero = false; \
- \
- for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++) \
- __nonzero = __nonzero || \
- READ_ONCE((rnp)->need_future_gp[__i]); \
- __nonzero; \
-})
-
/*
* Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
* are indexed relative to this interval rather than the global CPU ID space.
@@ -206,16 +187,14 @@ union rcu_noqs {
/* Per-CPU data for read-copy update. */
struct rcu_data {
/* 1) quiescent-state and grace-period handling : */
- unsigned long completed; /* Track rsp->completed gp number */
- /* in order to detect GP end. */
- unsigned long gpnum; /* Highest gp number that this CPU */
- /* is aware of having started. */
+ unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */
+ unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */
unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
/* for rcu_all_qs() invocations. */
union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
bool core_needs_qs; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
- bool gpwrap; /* Possible gpnum/completed wrap. */
+ bool gpwrap; /* Possible ->gp_seq wrap. */
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
unsigned long ticks_this_gp; /* The number of scheduling-clock */
@@ -239,7 +218,6 @@ struct rcu_data {
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
- unsigned long offline_fqs; /* Kicked due to being offline. */
unsigned long cond_resched_completed;
/* Grace period that needs help */
/* from cond_resched(). */
@@ -278,12 +256,16 @@ struct rcu_data {
/* Leader CPU takes GP-end wakeups. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
- /* 7) RCU CPU stall data. */
+ /* 7) Diagnostic data, including RCU CPU stall warnings. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */
/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
struct irq_work rcu_iw; /* Check for non-irq activity. */
bool rcu_iw_pending; /* Is ->rcu_iw pending? */
- unsigned long rcu_iw_gpnum; /* ->gpnum associated with ->rcu_iw. */
+ unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */
+ unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */
+ short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */
+ unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
+ short rcu_onl_gp_flags; /* ->gp_flags at last online. */
int cpu;
struct rcu_state *rsp;
@@ -340,8 +322,7 @@ struct rcu_state {
u8 boost ____cacheline_internodealigned_in_smp;
/* Subject to priority boost. */
- unsigned long gpnum; /* Current gp number. */
- unsigned long completed; /* # of last completed gp. */
+ unsigned long gp_seq; /* Grace-period sequence #. */
struct task_struct *gp_kthread; /* Task for grace periods. */
struct swait_queue_head gp_wq; /* Where GP task waits. */
short gp_flags; /* Commands for GP task. */
@@ -373,6 +354,8 @@ struct rcu_state {
/* but in jiffies. */
unsigned long gp_activity; /* Time of last GP kthread */
/* activity in jiffies. */
+ unsigned long gp_req_activity; /* Time of last GP request */
+ /* in jiffies. */
unsigned long jiffies_stall; /* Time at which to check */
/* for CPU stalls. */
unsigned long jiffies_resched; /* Time at which to resched */
@@ -384,6 +367,10 @@ struct rcu_state {
const char *name; /* Name of structure. */
char abbr; /* Abbreviated name. */
struct list_head flavors; /* List of RCU flavors. */
+
+ spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
+ /* Synchronize offline with */
+ /* GP pre-initialization. */
};
/* Values for rcu_state structure's gp_flags field. */
@@ -394,16 +381,20 @@ struct rcu_state {
#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
-#define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */
-#define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */
-#define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */
-#define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */
+#define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */
+#define RCU_GP_INIT 4 /* Grace-period initialization. */
+#define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */
+#define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */
+#define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
+#define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
#ifndef RCU_TREE_NONCORE
static const char * const gp_state_names[] = {
"RCU_GP_IDLE",
"RCU_GP_WAIT_GPS",
"RCU_GP_DONE_GPS",
+ "RCU_GP_ONOFF",
+ "RCU_GP_INIT",
"RCU_GP_WAIT_FQS",
"RCU_GP_DOING_FQS",
"RCU_GP_CLEANUP",
@@ -449,10 +440,13 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static int rcu_print_task_stall(struct rcu_node *rnp);
static int rcu_print_task_exp_stall(struct rcu_node *rnp);
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
+static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
+ struct rcu_node *rnp);
static void rcu_preempt_check_callbacks(void);
void call_rcu(struct rcu_head *head, rcu_callback_t func);
static void __init __rcu_init_preempt(void);
+static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
+ int ncheck);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static void invoke_rcu_callbacks_kthread(void);
@@ -489,7 +483,6 @@ static void __init rcu_spawn_nocb_kthreads(void);
#ifdef CONFIG_RCU_NOCB_CPU
static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
static bool init_nocb_callback_list(struct rcu_data *rdp);
static void rcu_bind_gp_kthread(void);
static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index d40708e8c5d6..0b2c2ad69629 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (wake) {
smp_mb(); /* EGP done before wake_up(). */
- swake_up(&rsp->expedited_wq);
+ swake_up_one(&rsp->expedited_wq);
}
break;
}
@@ -472,6 +472,7 @@ retry_ipi:
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
smp_call_func_t func)
{
+ int cpu;
struct rcu_node *rnp;
trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -486,13 +487,20 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
rnp->rew.rew_func = func;
rnp->rew.rew_rsp = rsp;
if (!READ_ONCE(rcu_par_gp_wq) ||
- rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
- /* No workqueues yet. */
+ rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
+ rcu_is_last_leaf_node(rsp, rnp)) {
+ /* No workqueues yet or last leaf, do direct call. */
sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
continue;
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
- queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
+ preempt_disable();
+ cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+ /* If all offline, queue the work on an unbound CPU. */
+ if (unlikely(cpu > rnp->grphi))
+ cpu = WORK_CPU_UNBOUND;
+ queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
+ preempt_enable();
rnp->exp_need_flush = true;
}
@@ -518,7 +526,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
jiffies_start = jiffies;
for (;;) {
- ret = swait_event_timeout(
+ ret = swait_event_timeout_exclusive(
rsp->expedited_wq,
sync_rcu_preempt_exp_done_unlocked(rnp_root),
jiffies_stall);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 7fd12039e512..a97c20ea9bce 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -74,8 +74,8 @@ static void __init rcu_bootup_announce_oddness(void)
pr_info("\tRCU event tracing is enabled.\n");
if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
(!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
- pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
- RCU_FANOUT);
+ pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
+ RCU_FANOUT);
if (rcu_fanout_exact)
pr_info("\tHierarchical RCU autobalancing is disabled.\n");
if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
@@ -88,11 +88,13 @@ static void __init rcu_bootup_announce_oddness(void)
pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
RCU_FANOUT_LEAF);
if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
- pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
+ pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
+ rcu_fanout_leaf);
if (nr_cpu_ids != NR_CPUS)
pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
#ifdef CONFIG_RCU_BOOST
- pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
+ pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
+ kthread_prio, CONFIG_RCU_BOOST_DELAY);
#endif
if (blimit != DEFAULT_RCU_BLIMIT)
pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
@@ -127,6 +129,7 @@ static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
bool wake);
+static void rcu_read_unlock_special(struct task_struct *t);
/*
* Tell them what RCU they are running.
@@ -183,6 +186,9 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
raw_lockdep_assert_held_rcu_node(rnp);
WARN_ON_ONCE(rdp->mynode != rnp);
WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
+ /* RCU better not be waiting on newly onlined CPUs! */
+ WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
+ rdp->grpmask);
/*
* Decide where to queue the newly blocked task. In theory,
@@ -260,8 +266,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
* ->exp_tasks pointers, respectively, to reference the newly
* blocked tasks.
*/
- if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
+ if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
rnp->gp_tasks = &t->rcu_node_entry;
+ WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
+ }
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp->exp_tasks = &t->rcu_node_entry;
WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
@@ -286,20 +294,24 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
}
/*
- * Record a preemptible-RCU quiescent state for the specified CPU. Note
- * that this just means that the task currently running on the CPU is
- * not in a quiescent state. There might be any number of tasks blocked
- * while in an RCU read-side critical section.
+ * Record a preemptible-RCU quiescent state for the specified CPU.
+ * Note that this does not necessarily mean that the task currently running
+ * on the CPU is in a quiescent state: Instead, it means that the current
+ * grace period need not wait on any RCU read-side critical section that
+ * starts later on this CPU. It also means that if the current task is
+ * in an RCU read-side critical section, it has already added itself to
+ * some leaf rcu_node structure's ->blkd_tasks list. In addition to the
+ * current task, there might be any number of other tasks blocked while
+ * in an RCU read-side critical section.
*
- * As with the other rcu_*_qs() functions, callers to this function
- * must disable preemption.
+ * Callers to this function must disable preemption.
*/
static void rcu_preempt_qs(void)
{
RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
trace_rcu_grace_period(TPS("rcu_preempt"),
- __this_cpu_read(rcu_data_p->gpnum),
+ __this_cpu_read(rcu_data_p->gp_seq),
TPS("cpuqs"));
__this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
@@ -348,8 +360,8 @@ static void rcu_preempt_note_context_switch(bool preempt)
trace_rcu_preempt_task(rdp->rsp->name,
t->pid,
(rnp->qsmask & rdp->grpmask)
- ? rnp->gpnum
- : rnp->gpnum + 1);
+ ? rnp->gp_seq
+ : rcu_seq_snap(&rnp->gp_seq));
rcu_preempt_ctxt_queue(rnp, rdp);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special.s) {
@@ -456,7 +468,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
-void rcu_read_unlock_special(struct task_struct *t)
+static void rcu_read_unlock_special(struct task_struct *t)
{
bool empty_exp;
bool empty_norm;
@@ -535,13 +547,15 @@ void rcu_read_unlock_special(struct task_struct *t)
WARN_ON_ONCE(rnp != t->rcu_blocked_node);
WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
+ WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
+ (!empty_norm || rnp->qsmask));
empty_exp = sync_rcu_preempt_exp_done(rnp);
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry);
t->rcu_blocked_node = NULL;
trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
- rnp->gpnum, t->pid);
+ rnp->gp_seq, t->pid);
if (&t->rcu_node_entry == rnp->gp_tasks)
rnp->gp_tasks = np;
if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -562,7 +576,7 @@ void rcu_read_unlock_special(struct task_struct *t)
empty_exp_now = sync_rcu_preempt_exp_done(rnp);
if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
- rnp->gpnum,
+ rnp->gp_seq,
0, rnp->qsmask,
rnp->level,
rnp->grplo,
@@ -686,24 +700,27 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
* Check that the list of blocked tasks for the newly completed grace
* period is in fact empty. It is a serious bug to complete a grace
* period that still has RCU readers blocked! This function must be
- * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
+ * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock
* must be held by the caller.
*
* Also, if there are blocked tasks on the list, they automatically
* block the newly created grace period, so set up ->gp_tasks accordingly.
*/
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
{
struct task_struct *t;
RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
- WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
- if (rcu_preempt_has_tasks(rnp)) {
+ if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+ dump_blkd_tasks(rsp, rnp, 10);
+ if (rcu_preempt_has_tasks(rnp) &&
+ (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
rnp->gp_tasks = rnp->blkd_tasks.next;
t = container_of(rnp->gp_tasks, struct task_struct,
rcu_node_entry);
trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
- rnp->gpnum, t->pid);
+ rnp->gp_seq, t->pid);
}
WARN_ON_ONCE(rnp->qsmask);
}
@@ -717,6 +734,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
*/
static void rcu_preempt_check_callbacks(void)
{
+ struct rcu_state *rsp = &rcu_preempt_state;
struct task_struct *t = current;
if (t->rcu_read_lock_nesting == 0) {
@@ -725,7 +743,9 @@ static void rcu_preempt_check_callbacks(void)
}
if (t->rcu_read_lock_nesting > 0 &&
__this_cpu_read(rcu_data_p->core_needs_qs) &&
- __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
+ __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) &&
+ !t->rcu_read_unlock_special.b.need_qs &&
+ time_after(jiffies, rsp->gp_start + HZ))
t->rcu_read_unlock_special.b.need_qs = true;
}
@@ -841,6 +861,47 @@ void exit_rcu(void)
__rcu_read_unlock();
}
+/*
+ * Dump the blocked-tasks state, but limit the list dump to the
+ * specified number of elements.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+ int cpu;
+ int i;
+ struct list_head *lhp;
+ bool onl;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp1;
+
+ raw_lockdep_assert_held_rcu_node(rnp);
+ pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+ __func__, rnp->grplo, rnp->grphi, rnp->level,
+ (long)rnp->gp_seq, (long)rnp->completedqs);
+ for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+ pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
+ __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
+ pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
+ __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks);
+ pr_info("%s: ->blkd_tasks", __func__);
+ i = 0;
+ list_for_each(lhp, &rnp->blkd_tasks) {
+ pr_cont(" %p", lhp);
+ if (++i >= 10)
+ break;
+ }
+ pr_cont("\n");
+ for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
+ rdp = per_cpu_ptr(rsp->rda, cpu);
+ onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+ pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
+ cpu, ".o"[onl],
+ (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+ (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+ }
+}
+
#else /* #ifdef CONFIG_PREEMPT_RCU */
static struct rcu_state *const rcu_state_p = &rcu_sched_state;
@@ -911,7 +972,8 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
* so there is no need to check for blocked tasks. So check only for
* bogus qsmask values.
*/
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
{
WARN_ON_ONCE(rnp->qsmask);
}
@@ -949,6 +1011,15 @@ void exit_rcu(void)
{
}
+/*
+ * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+ WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
+}
+
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST
@@ -1433,7 +1504,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
* completed since we last checked and there are
* callbacks not yet ready to invoke.
*/
- if ((rdp->completed != rnp->completed ||
+ if ((rcu_seq_completed_gp(rdp->gp_seq,
+ rcu_seq_current(&rnp->gp_seq)) ||
unlikely(READ_ONCE(rdp->gpwrap))) &&
rcu_segcblist_pend_cbs(&rdp->cblist))
note_gp_changes(rsp, rdp);
@@ -1720,16 +1792,16 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
*/
touch_nmi_watchdog();
- if (rsp->gpnum == rdp->gpnum) {
+ ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq);
+ if (ticks_value) {
+ ticks_title = "GPs behind";
+ } else {
ticks_title = "ticks this GP";
ticks_value = rdp->ticks_this_gp;
- } else {
- ticks_title = "GPs behind";
- ticks_value = rsp->gpnum - rdp->gpnum;
}
print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
- delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
- pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
+ delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
+ pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
cpu,
"O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
@@ -1817,7 +1889,7 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
{
- return &rnp->nocb_gp_wq[rnp->completed & 0x1];
+ return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
}
static void rcu_init_one_nocb(struct rcu_node *rnp)
@@ -1854,8 +1926,8 @@ static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
del_timer(&rdp->nocb_timer);
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
- smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
- swake_up(&rdp_leader->nocb_wq);
+ smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */
+ swake_up_one(&rdp_leader->nocb_wq);
} else {
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
}
@@ -2069,12 +2141,17 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
bool needwake;
struct rcu_node *rnp = rdp->mynode;
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- c = rcu_cbs_completed(rdp->rsp, rnp);
- needwake = rcu_start_this_gp(rnp, rdp, c);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- if (needwake)
- rcu_gp_kthread_wake(rdp->rsp);
+ local_irq_save(flags);
+ c = rcu_seq_snap(&rdp->rsp->gp_seq);
+ if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+ local_irq_restore(flags);
+ } else {
+ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+ needwake = rcu_start_this_gp(rnp, rdp, c);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ if (needwake)
+ rcu_gp_kthread_wake(rdp->rsp);
+ }
/*
* Wait for the grace period. Do so interruptibly to avoid messing
@@ -2082,9 +2159,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
*/
trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) {
- swait_event_interruptible(
- rnp->nocb_gp_wq[c & 0x1],
- (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
+ swait_event_interruptible_exclusive(
+ rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1],
+ (d = rcu_seq_done(&rnp->gp_seq, c)));
if (likely(d))
break;
WARN_ON(signal_pending(current));
@@ -2111,7 +2188,7 @@ wait_again:
/* Wait for callbacks to appear. */
if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
- swait_event_interruptible(my_rdp->nocb_wq,
+ swait_event_interruptible_exclusive(my_rdp->nocb_wq,
!READ_ONCE(my_rdp->nocb_leader_sleep));
raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
my_rdp->nocb_leader_sleep = true;
@@ -2176,7 +2253,7 @@ wait_again:
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
/* List was empty, so wake up the follower. */
- swake_up(&rdp->nocb_wq);
+ swake_up_one(&rdp->nocb_wq);
}
}
@@ -2193,7 +2270,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
{
for (;;) {
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
- swait_event_interruptible(rdp->nocb_wq,
+ swait_event_interruptible_exclusive(rdp->nocb_wq,
READ_ONCE(rdp->nocb_follower_head));
if (smp_load_acquire(&rdp->nocb_follower_head)) {
/* ^^^ Ensure CB invocation follows _head test. */
@@ -2569,23 +2646,6 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*
- * An adaptive-ticks CPU can potentially execute in kernel mode for an
- * arbitrarily long period of time with the scheduling-clock tick turned
- * off. RCU will be paying attention to this CPU because it is in the
- * kernel, but the CPU cannot be guaranteed to be executing the RCU state
- * machine because the scheduling-clock tick has been disabled. Therefore,
- * if an adaptive-ticks CPU is failing to respond to the current grace
- * period and has not be idle from an RCU perspective, kick it.
- */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
-{
-#ifdef CONFIG_NO_HZ_FULL
- if (tick_nohz_full_cpu(cpu))
- smp_send_reschedule(cpu);
-#endif /* #ifdef CONFIG_NO_HZ_FULL */
-}
-
-/*
* Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
* grace-period kthread will do force_quiescent_state() processing?
* The idea is to avoid waking up RCU core processing on such a
@@ -2610,8 +2670,6 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
*/
static void rcu_bind_gp_kthread(void)
{
- int __maybe_unused cpu;
-
if (!tick_nohz_full_enabled())
return;
housekeeping_affine(current, HK_FLAG_RCU);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 4c230a60ece4..39cb23d22109 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
#ifdef CONFIG_TASKS_RCU
/*
- * Simple variant of RCU whose quiescent states are voluntary context switch,
- * user-space execution, and idle. As such, grace periods can take one good
- * long time. There are no read-side primitives similar to rcu_read_lock()
- * and rcu_read_unlock() because this implementation is intended to get
- * the system into a safe state for some of the manipulations involved in
- * tracing and the like. Finally, this implementation does not support
- * high call_rcu_tasks() rates from multiple CPUs. If this is required,
- * per-CPU callback lists will be needed.
+ * Simple variant of RCU whose quiescent states are voluntary context
+ * switch, cond_resched_rcu_qs(), user-space execution, and idle.
+ * As such, grace periods can take one good long time. There are no
+ * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
+ * because this implementation is intended to get the system into a safe
+ * state for some of the manipulations involved in tracing and the like.
+ * Finally, this implementation does not support high call_rcu_tasks()
+ * rates from multiple CPUs. If this is required, per-CPU callback lists
+ * will be needed.
*/
/* Global list of callbacks and associated lock. */
@@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
* period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_tasks() assumes
* that the read-side critical sections end at a voluntary context
- * switch (not a preemption!), entry into idle, or transition to usermode
- * execution. As such, there are no read-side primitives analogous to
- * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
- * to determine that all tasks have passed through a safe state, not so
- * much for data-strcuture synchronization.
+ * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
+ * or transition to usermode execution. As such, there are no read-side
+ * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
+ * this primitive is intended to determine that all tasks have passed
+ * through a safe state, not so much for data-strcuture synchronization.
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
@@ -667,6 +668,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
struct rcu_head *list;
struct rcu_head *next;
LIST_HEAD(rcu_tasks_holdouts);
+ int fract;
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
housekeeping_affine(current, HK_FLAG_RCU);
@@ -748,13 +750,25 @@ static int __noreturn rcu_tasks_kthread(void *arg)
* holdouts. When the list is empty, we are done.
*/
lastreport = jiffies;
- while (!list_empty(&rcu_tasks_holdouts)) {
+
+ /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
+ fract = 10;
+
+ for (;;) {
bool firstreport;
bool needreport;
int rtst;
struct task_struct *t1;
- schedule_timeout_interruptible(HZ);
+ if (list_empty(&rcu_tasks_holdouts))
+ break;
+
+ /* Slowly back off waiting for holdouts */
+ schedule_timeout_interruptible(HZ/fract);
+
+ if (fract > 1)
+ fract--;
+
rtst = READ_ONCE(rcu_task_stall_timeout);
needreport = rtst > 0 &&
time_after(jiffies, lastreport + rtst);
@@ -800,6 +814,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
list = next;
cond_resched();
}
+ /* Paranoid sleep to keep this from entering a tight loop */
schedule_timeout_uninterruptible(HZ/10);
}
}
diff --git a/kernel/reboot.c b/kernel/reboot.c
index e4ced883d8de..8fb44dec9ad7 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -294,7 +294,7 @@ void kernel_power_off(void)
}
EXPORT_SYMBOL_GPL(kernel_power_off);
-static DEFINE_MUTEX(reboot_mutex);
+DEFINE_MUTEX(system_transition_mutex);
/*
* Reboot system call: for obvious reasons only root may call it,
@@ -338,7 +338,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
cmd = LINUX_REBOOT_CMD_HALT;
- mutex_lock(&reboot_mutex);
+ mutex_lock(&system_transition_mutex);
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
kernel_restart(NULL);
@@ -389,7 +389,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
ret = -EINVAL;
break;
}
- mutex_unlock(&reboot_mutex);
+ mutex_unlock(&system_transition_mutex);
return ret;
}
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index d9a02b318108..7fe183404c38 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -20,7 +20,7 @@ obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle.o fair.o rt.o deadline.o
obj-y += wait.o wait_bit.o swait.o completion.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 10c83e73837a..e3e3b979f9bd 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -53,6 +53,7 @@
*
*/
#include "sched.h"
+#include <linux/sched_clock.h>
/*
* Scheduler clock - returns current time in nanosec units.
@@ -66,12 +67,7 @@ unsigned long long __weak sched_clock(void)
}
EXPORT_SYMBOL_GPL(sched_clock);
-__read_mostly int sched_clock_running;
-
-void sched_clock_init(void)
-{
- sched_clock_running = 1;
-}
+static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
@@ -195,17 +191,40 @@ void clear_sched_clock_stable(void)
smp_mb(); /* matches sched_clock_init_late() */
- if (sched_clock_running == 2)
+ if (static_key_count(&sched_clock_running.key) == 2)
__clear_sched_clock_stable();
}
+static void __sched_clock_gtod_offset(void)
+{
+ struct sched_clock_data *scd = this_scd();
+
+ __scd_stamp(scd);
+ __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod;
+}
+
+void __init sched_clock_init(void)
+{
+ /*
+ * Set __gtod_offset such that once we mark sched_clock_running,
+ * sched_clock_tick() continues where sched_clock() left off.
+ *
+ * Even if TSC is buggered, we're still UP at this point so it
+ * can't really be out of sync.
+ */
+ local_irq_disable();
+ __sched_clock_gtod_offset();
+ local_irq_enable();
+
+ static_branch_inc(&sched_clock_running);
+}
/*
* We run this as late_initcall() such that it runs after all built-in drivers,
* notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
*/
static int __init sched_clock_init_late(void)
{
- sched_clock_running = 2;
+ static_branch_inc(&sched_clock_running);
/*
* Ensure that it is impossible to not do a static_key update.
*
@@ -350,8 +369,8 @@ u64 sched_clock_cpu(int cpu)
if (sched_clock_stable())
return sched_clock() + __sched_clock_offset;
- if (unlikely(!sched_clock_running))
- return 0ull;
+ if (!static_branch_unlikely(&sched_clock_running))
+ return sched_clock();
preempt_disable_notrace();
scd = cpu_sdc(cpu);
@@ -373,7 +392,7 @@ void sched_clock_tick(void)
if (sched_clock_stable())
return;
- if (unlikely(!sched_clock_running))
+ if (!static_branch_unlikely(&sched_clock_running))
return;
lockdep_assert_irqs_disabled();
@@ -385,8 +404,6 @@ void sched_clock_tick(void)
void sched_clock_tick_stable(void)
{
- u64 gtod, clock;
-
if (!sched_clock_stable())
return;
@@ -398,9 +415,7 @@ void sched_clock_tick_stable(void)
* TSC to be unstable, any computation will be computing crap.
*/
local_irq_disable();
- gtod = ktime_get_ns();
- clock = sched_clock();
- __gtod_offset = (clock + __sched_clock_offset) - gtod;
+ __sched_clock_gtod_offset();
local_irq_enable();
}
@@ -434,9 +449,17 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
+void __init sched_clock_init(void)
+{
+ static_branch_inc(&sched_clock_running);
+ local_irq_disable();
+ generic_sched_clock_init();
+ local_irq_enable();
+}
+
u64 sched_clock_cpu(int cpu)
{
- if (unlikely(!sched_clock_running))
+ if (!static_branch_unlikely(&sched_clock_running))
return 0;
return sched_clock();
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index e426b0cb9ac6..a1ad5b7d5521 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -22,8 +22,8 @@
*
* See also complete_all(), wait_for_completion() and related routines.
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
*/
void complete(struct completion *x)
{
@@ -44,8 +44,8 @@ EXPORT_SYMBOL(complete);
*
* This will wake up all threads waiting on this particular completion event.
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
*
* Since complete_all() sets the completion of @x permanently to done
* to allow multiple waiters to finish, a call to reinit_completion()
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe365c9a08e9..454adf9f8180 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -17,6 +17,8 @@
#include "../workqueue_internal.h"
#include "../smpboot.h"
+#include "pelt.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -45,14 +47,6 @@ const_debug unsigned int sysctl_sched_features =
const_debug unsigned int sysctl_sched_nr_migrate = 32;
/*
- * period over which we average the RT time consumption, measured
- * in ms.
- *
- * default: 1s
- */
-const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
-
-/*
* period over which we measure -rt task CPU usage in us.
* default: 1s
*/
@@ -183,9 +177,9 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
rq->clock_task += delta;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef HAVE_SCHED_AVG_IRQ
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
- sched_rt_avg_update(rq, irq_delta + steal);
+ update_irq_load_avg(rq, irq_delta + steal);
#endif
}
@@ -412,8 +406,8 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
* its already queued (either by us or someone else) and will get the
* wakeup due to that.
*
- * This cmpxchg() implies a full barrier, which pairs with the write
- * barrier implied by the wakeup in wake_up_q().
+ * This cmpxchg() executes a full barrier, which pairs with the full
+ * barrier executed by the wakeup in wake_up_q().
*/
if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
return;
@@ -441,8 +435,8 @@ void wake_up_q(struct wake_q_head *head)
task->wake_q.next = NULL;
/*
- * wake_up_process() implies a wmb() to pair with the queueing
- * in wake_q_add() so as not to miss wakeups.
+ * wake_up_process() executes a full barrier, which pairs with
+ * the queueing in wake_q_add() so as not to miss wakeups.
*/
wake_up_process(task);
put_task_struct(task);
@@ -649,23 +643,6 @@ bool sched_can_stop_tick(struct rq *rq)
return true;
}
#endif /* CONFIG_NO_HZ_FULL */
-
-void sched_avg_update(struct rq *rq)
-{
- s64 period = sched_avg_period();
-
- while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
- /*
- * Inline assembly required to prevent the compiler
- * optimising this loop into a divmod call.
- * See __iter_div_u64_rem() for another example of this.
- */
- asm("" : "+rm" (rq->age_stamp));
- rq->age_stamp += period;
- rq->rt_avg /= 2;
- }
-}
-
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
@@ -1199,6 +1176,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
__set_task_cpu(p, new_cpu);
}
+#ifdef CONFIG_NUMA_BALANCING
static void __migrate_swap_task(struct task_struct *p, int cpu)
{
if (task_on_rq_queued(p)) {
@@ -1280,16 +1258,17 @@ unlock:
/*
* Cross migrate two tasks
*/
-int migrate_swap(struct task_struct *cur, struct task_struct *p)
+int migrate_swap(struct task_struct *cur, struct task_struct *p,
+ int target_cpu, int curr_cpu)
{
struct migration_swap_arg arg;
int ret = -EINVAL;
arg = (struct migration_swap_arg){
.src_task = cur,
- .src_cpu = task_cpu(cur),
+ .src_cpu = curr_cpu,
.dst_task = p,
- .dst_cpu = task_cpu(p),
+ .dst_cpu = target_cpu,
};
if (arg.src_cpu == arg.dst_cpu)
@@ -1314,6 +1293,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
out:
return ret;
}
+#endif /* CONFIG_NUMA_BALANCING */
/*
* wait_task_inactive - wait for a thread to unschedule.
@@ -1879,8 +1859,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
* rq(c1)->lock (if not at the same time, then in that order).
* C) LOCK of the rq(c1)->lock scheduling in task
*
- * Transitivity guarantees that B happens after A and C after B.
- * Note: we only require RCpc transitivity.
+ * Release/acquire chaining guarantees that B happens after A and C after B.
* Note: the CPU doing B need not be c0 or c1
*
* Example:
@@ -1942,16 +1921,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
* UNLOCK rq(0)->lock
*
*
- * However; for wakeups there is a second guarantee we must provide, namely we
- * must observe the state that lead to our wakeup. That is, not only must our
- * task observe its own prior state, it must also observe the stores prior to
- * its wakeup.
- *
- * This means that any means of doing remote wakeups must order the CPU doing
- * the wakeup against the CPU the task is going to end up running on. This,
- * however, is already required for the regular Program-Order guarantee above,
- * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
- *
+ * However, for wakeups there is a second guarantee we must provide, namely we
+ * must ensure that CONDITION=1 done by the caller can not be reordered with
+ * accesses to the task state; see try_to_wake_up() and set_current_state().
*/
/**
@@ -1967,6 +1939,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
* Atomic against schedule() which would dequeue a task, also see
* set_current_state().
*
+ * This function executes a full memory barrier before accessing the task
+ * state; see set_current_state().
+ *
* Return: %true if @p->state changes (an actual wakeup was done),
* %false otherwise.
*/
@@ -1998,21 +1973,20 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* be possible to, falsely, observe p->on_rq == 0 and get stuck
* in smp_cond_load_acquire() below.
*
- * sched_ttwu_pending() try_to_wake_up()
- * [S] p->on_rq = 1; [L] P->state
- * UNLOCK rq->lock -----.
- * \
- * +--- RMB
- * schedule() /
- * LOCK rq->lock -----'
- * UNLOCK rq->lock
+ * sched_ttwu_pending() try_to_wake_up()
+ * STORE p->on_rq = 1 LOAD p->state
+ * UNLOCK rq->lock
+ *
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * UNLOCK rq->lock
*
* [task p]
- * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
+ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
*
- * Pairs with the UNLOCK+LOCK on rq->lock from the
- * last wakeup of our task and the schedule that got our task
- * current.
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
*/
smp_rmb();
if (p->on_rq && ttwu_remote(p, wake_flags))
@@ -2026,15 +2000,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* One must be running (->on_cpu == 1) in order to remove oneself
* from the runqueue.
*
- * [S] ->on_cpu = 1; [L] ->on_rq
- * UNLOCK rq->lock
- * RMB
- * LOCK rq->lock
- * [S] ->on_rq = 0; [L] ->on_cpu
+ * __schedule() (switch to task 'p') try_to_wake_up()
+ * STORE p->on_cpu = 1 LOAD p->on_rq
+ * UNLOCK rq->lock
+ *
+ * __schedule() (put 'p' to sleep)
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * STORE p->on_rq = 0 LOAD p->on_cpu
*
- * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
- * from the consecutive calls to schedule(); the first switching to our
- * task, the second putting it to sleep.
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
*/
smp_rmb();
@@ -2140,8 +2116,7 @@ out:
*
* Return: 1 if the process was woken up, 0 if it was already running.
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * This function executes a full memory barrier before accessing the task state.
*/
int wake_up_process(struct task_struct *p)
{
@@ -2317,7 +2292,6 @@ static inline void init_schedstats(void) {}
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
unsigned long flags;
- int cpu = get_cpu();
__sched_fork(clone_flags, p);
/*
@@ -2353,14 +2327,12 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->sched_reset_on_fork = 0;
}
- if (dl_prio(p->prio)) {
- put_cpu();
+ if (dl_prio(p->prio))
return -EAGAIN;
- } else if (rt_prio(p->prio)) {
+ else if (rt_prio(p->prio))
p->sched_class = &rt_sched_class;
- } else {
+ else
p->sched_class = &fair_sched_class;
- }
init_entity_runnable_average(&p->se);
@@ -2376,7 +2348,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
* We're setting the CPU for the first time, we don't migrate,
* so use __set_task_cpu().
*/
- __set_task_cpu(p, cpu);
+ __set_task_cpu(p, smp_processor_id());
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
@@ -2393,8 +2365,6 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
#endif
-
- put_cpu();
return 0;
}
@@ -5714,13 +5684,6 @@ void set_rq_offline(struct rq *rq)
}
}
-static void set_cpu_rq_start_time(unsigned int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- rq->age_stamp = sched_clock_cpu(cpu);
-}
-
/*
* used to mark begin/end of suspend/resume:
*/
@@ -5774,6 +5737,18 @@ int sched_cpu_activate(unsigned int cpu)
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * The sched_smt_present static key needs to be evaluated on every
+ * hotplug event because at boot time SMT might be disabled when
+ * the number of booted CPUs is limited.
+ *
+ * If then later a sibling gets hotplugged, then the key would stay
+ * off and SMT scheduling would never be functional.
+ */
+ if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
+ static_branch_enable_cpuslocked(&sched_smt_present);
+#endif
set_cpu_active(cpu, true);
if (sched_smp_initialized) {
@@ -5838,7 +5813,6 @@ static void sched_rq_cpu_starting(unsigned int cpu)
int sched_cpu_starting(unsigned int cpu)
{
- set_cpu_rq_start_time(cpu);
sched_rq_cpu_starting(cpu);
sched_tick_start(cpu);
return 0;
@@ -5871,22 +5845,6 @@ int sched_cpu_dying(unsigned int cpu)
}
#endif
-#ifdef CONFIG_SCHED_SMT
-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-
-static void sched_init_smt(void)
-{
- /*
- * We've enumerated all CPUs and will assume that if any CPU
- * has SMT siblings, CPU0 will too.
- */
- if (cpumask_weight(cpu_smt_mask(0)) > 1)
- static_branch_enable(&sched_smt_present);
-}
-#else
-static inline void sched_init_smt(void) { }
-#endif
-
void __init sched_init_smp(void)
{
sched_init_numa();
@@ -5908,8 +5866,6 @@ void __init sched_init_smp(void)
init_sched_rt_class();
init_sched_dl_class();
- sched_init_smt();
-
sched_smp_initialized = true;
}
@@ -5954,7 +5910,6 @@ void __init sched_init(void)
int i, j;
unsigned long alloc_size = 0, ptr;
- sched_clock_init();
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -6106,7 +6061,6 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
idle_thread_set_boot_cpu();
- set_cpu_rq_start_time(smp_processor_id());
#endif
init_sched_fair_class();
@@ -6785,6 +6739,16 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
+ if (schedstat_enabled() && tg != &root_task_group) {
+ u64 ws = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ ws += schedstat_val(tg->se[i]->statistics.wait_sum);
+
+ seq_printf(sf, "wait_sum %llu\n", ws);
+ }
+
return 0;
}
#endif /* CONFIG_CFS_BANDWIDTH */
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index c907fde01eaa..3fffad3bc8a8 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -53,9 +53,7 @@ struct sugov_cpu {
unsigned int iowait_boost_max;
u64 last_update;
- /* The fields below are only needed when sharing a policy: */
- unsigned long util_cfs;
- unsigned long util_dl;
+ unsigned long bw_dl;
unsigned long max;
/* The field below is for single-CPU policies only: */
@@ -179,33 +177,90 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
return cpufreq_driver_resolve_freq(policy, freq);
}
-static void sugov_get_util(struct sugov_cpu *sg_cpu)
+/*
+ * This function computes an effective utilization for the given CPU, to be
+ * used for frequency selection given the linear relation: f = u * f_max.
+ *
+ * The scheduler tracks the following metrics:
+ *
+ * cpu_util_{cfs,rt,dl,irq}()
+ * cpu_bw_dl()
+ *
+ * Where the cfs,rt and dl util numbers are tracked with the same metric and
+ * synchronized windows and are thus directly comparable.
+ *
+ * The cfs,rt,dl utilization are the running times measured with rq->clock_task
+ * which excludes things like IRQ and steal-time. These latter are then accrued
+ * in the irq utilization.
+ *
+ * The DL bandwidth number otoh is not a measured metric but a value computed
+ * based on the task model parameters and gives the minimal utilization
+ * required to meet deadlines.
+ */
+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
+ unsigned long util, irq, max;
- sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
- sg_cpu->util_cfs = cpu_util_cfs(rq);
- sg_cpu->util_dl = cpu_util_dl(rq);
-}
-
-static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
-{
- struct rq *rq = cpu_rq(sg_cpu->cpu);
+ sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+ sg_cpu->bw_dl = cpu_bw_dl(rq);
if (rt_rq_is_runnable(&rq->rt))
- return sg_cpu->max;
+ return max;
+
+ /*
+ * Early check to see if IRQ/steal time saturates the CPU, can be
+ * because of inaccuracies in how we track these -- see
+ * update_irq_load_avg().
+ */
+ irq = cpu_util_irq(rq);
+ if (unlikely(irq >= max))
+ return max;
+
+ /*
+ * Because the time spend on RT/DL tasks is visible as 'lost' time to
+ * CFS tasks and we use the same metric to track the effective
+ * utilization (PELT windows are synchronized) we can directly add them
+ * to obtain the CPU's actual utilization.
+ */
+ util = cpu_util_cfs(rq);
+ util += cpu_util_rt(rq);
+
+ /*
+ * We do not make cpu_util_dl() a permanent part of this sum because we
+ * want to use cpu_bw_dl() later on, but we need to check if the
+ * CFS+RT+DL sum is saturated (ie. no idle time) such that we select
+ * f_max when there is no idle time.
+ *
+ * NOTE: numerical errors or stop class might cause us to not quite hit
+ * saturation when we should -- something for later.
+ */
+ if ((util + cpu_util_dl(rq)) >= max)
+ return max;
+
+ /*
+ * There is still idle time; further improve the number by using the
+ * irq metric. Because IRQ/steal time is hidden from the task clock we
+ * need to scale the task numbers:
+ *
+ * 1 - irq
+ * U' = irq + ------- * U
+ * max
+ */
+ util = scale_irq_capacity(util, irq, max);
+ util += irq;
/*
- * Utilization required by DEADLINE must always be granted while, for
- * FAIR, we use blocked utilization of IDLE CPUs as a mechanism to
- * gracefully reduce the frequency when no tasks show up for longer
+ * Bandwidth required by DEADLINE must always be granted while, for
+ * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
+ * to gracefully reduce the frequency when no tasks show up for longer
* periods of time.
*
- * Ideally we would like to set util_dl as min/guaranteed freq and
- * util_cfs + util_dl as requested freq. However, cpufreq is not yet
- * ready for such an interface. So, we only do the latter for now.
+ * Ideally we would like to set bw_dl as min/guaranteed freq and util +
+ * bw_dl as requested freq. However, cpufreq is not yet ready for such
+ * an interface. So, we only do the latter for now.
*/
- return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs));
+ return min(max, util + sg_cpu->bw_dl);
}
/**
@@ -360,7 +415,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
*/
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
{
- if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl)
+ if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
sg_policy->need_freq_update = true;
}
@@ -383,9 +438,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = sugov_cpu_is_busy(sg_cpu);
- sugov_get_util(sg_cpu);
+ util = sugov_get_util(sg_cpu);
max = sg_cpu->max;
- util = sugov_aggregate_util(sg_cpu);
sugov_iowait_apply(sg_cpu, time, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
/*
@@ -424,9 +478,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
unsigned long j_util, j_max;
- sugov_get_util(j_sg_cpu);
+ j_util = sugov_get_util(j_sg_cpu);
j_max = j_sg_cpu->max;
- j_util = sugov_aggregate_util(j_sg_cpu);
sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
if (j_util * max > j_max * util) {
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b5fbdde6afa9..997ea7b839fa 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -16,6 +16,7 @@
* Fabio Checconi <fchecconi@gmail.com>
*/
#include "sched.h"
+#include "pelt.h"
struct dl_bandwidth def_dl_bandwidth;
@@ -1179,8 +1180,6 @@ static void update_curr_dl(struct rq *rq)
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
-
if (dl_entity_is_special(dl_se))
return;
@@ -1761,6 +1760,9 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
deadline_queue_push_tasks(rq);
+ if (rq->curr->sched_class != &dl_sched_class)
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
return p;
}
@@ -1768,6 +1770,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{
update_curr_dl(rq);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
@@ -1784,6 +1787,7 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
/*
* Even when we have runtime, update_curr_dl() might have resulted in us
* not being the leftmost task anymore. In that case NEED_RESCHED will
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index e593b4118578..60caf1fb94e0 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -111,20 +111,19 @@ static int sched_feat_set(char *cmp)
cmp += 3;
}
- for (i = 0; i < __SCHED_FEAT_NR; i++) {
- if (strcmp(cmp, sched_feat_names[i]) == 0) {
- if (neg) {
- sysctl_sched_features &= ~(1UL << i);
- sched_feat_disable(i);
- } else {
- sysctl_sched_features |= (1UL << i);
- sched_feat_enable(i);
- }
- break;
- }
+ i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
+ if (i < 0)
+ return i;
+
+ if (neg) {
+ sysctl_sched_features &= ~(1UL << i);
+ sched_feat_disable(i);
+ } else {
+ sysctl_sched_features |= (1UL << i);
+ sched_feat_enable(i);
}
- return i;
+ return 0;
}
static ssize_t
@@ -133,7 +132,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
{
char buf[64];
char *cmp;
- int i;
+ int ret;
struct inode *inode;
if (cnt > 63)
@@ -148,10 +147,10 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
/* Ensure the static_key remains in a consistent state */
inode = file_inode(filp);
inode_lock(inode);
- i = sched_feat_set(cmp);
+ ret = sched_feat_set(cmp);
inode_unlock(inode);
- if (i == __SCHED_FEAT_NR)
- return -EINVAL;
+ if (ret < 0)
+ return ret;
*ppos += cnt;
@@ -623,8 +622,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
#undef PU
}
-extern __read_mostly int sched_clock_running;
-
static void print_cpu(struct seq_file *m, int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -843,8 +840,8 @@ void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
unsigned long tpf, unsigned long gsf, unsigned long gpf)
{
SEQ_printf(m, "numa_faults node=%d ", node);
- SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
- SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
+ SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
+ SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
}
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2f0a0be4d344..b39fb596f6c1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -255,9 +255,6 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
return cfs_rq->rq;
}
-/* An entity is a task if it doesn't "own" a runqueue */
-#define entity_is_task(se) (!se->my_q)
-
static inline struct task_struct *task_of(struct sched_entity *se)
{
SCHED_WARN_ON(!entity_is_task(se));
@@ -419,7 +416,6 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
return container_of(cfs_rq, struct rq, cfs);
}
-#define entity_is_task(se) 1
#define for_each_sched_entity(se) \
for (; se; se = NULL)
@@ -692,7 +688,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
#ifdef CONFIG_SMP
-
+#include "pelt.h"
#include "sched-pelt.h"
static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
@@ -735,11 +731,12 @@ static void attach_entity_cfs_rq(struct sched_entity *se);
* To solve this problem, we also cap the util_avg of successive tasks to
* only 1/2 of the left utilization budget:
*
- * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
+ * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
*
- * where n denotes the nth task.
+ * where n denotes the nth task and cpu_scale the CPU capacity.
*
- * For example, a simplest series from the beginning would be like:
+ * For example, for a CPU with 1024 of capacity, a simplest series from
+ * the beginning would be like:
*
* task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
* cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
@@ -751,7 +748,8 @@ void post_init_entity_util_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
- long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+ long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+ long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
if (cap > 0) {
if (cfs_rq->avg.util_avg != 0) {
@@ -1314,7 +1312,7 @@ static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
* of each group. Skip other nodes.
*/
if (sched_numa_topology_type == NUMA_BACKPLANE &&
- dist > maxdist)
+ dist >= maxdist)
continue;
/* Add up the faults from nearby nodes. */
@@ -1452,15 +1450,12 @@ static unsigned long capacity_of(int cpu);
/* Cached statistics for all CPUs within a node */
struct numa_stats {
- unsigned long nr_running;
unsigned long load;
/* Total compute capacity of CPUs on a node */
unsigned long compute_capacity;
- /* Approximate capacity in terms of runnable tasks on a node */
- unsigned long task_capacity;
- int has_free_capacity;
+ unsigned int nr_running;
};
/*
@@ -1487,8 +1482,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
* the @ns structure is NULL'ed and task_numa_compare() will
* not find this node attractive.
*
- * We'll either bail at !has_free_capacity, or we'll detect a huge
- * imbalance and bail there.
+ * We'll detect a huge imbalance and bail there.
*/
if (!cpus)
return;
@@ -1497,9 +1491,8 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
capacity = cpus / smt; /* cores */
- ns->task_capacity = min_t(unsigned, capacity,
+ capacity = min_t(unsigned, capacity,
DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
- ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
}
struct task_numa_env {
@@ -1548,28 +1541,12 @@ static bool load_too_imbalanced(long src_load, long dst_load,
src_capacity = env->src_stats.compute_capacity;
dst_capacity = env->dst_stats.compute_capacity;
- /* We care about the slope of the imbalance, not the direction. */
- if (dst_load < src_load)
- swap(dst_load, src_load);
+ imb = abs(dst_load * src_capacity - src_load * dst_capacity);
- /* Is the difference below the threshold? */
- imb = dst_load * src_capacity * 100 -
- src_load * dst_capacity * env->imbalance_pct;
- if (imb <= 0)
- return false;
-
- /*
- * The imbalance is above the allowed threshold.
- * Compare it with the old imbalance.
- */
orig_src_load = env->src_stats.load;
orig_dst_load = env->dst_stats.load;
- if (orig_dst_load < orig_src_load)
- swap(orig_dst_load, orig_src_load);
-
- old_imb = orig_dst_load * src_capacity * 100 -
- orig_src_load * dst_capacity * env->imbalance_pct;
+ old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
/* Would this change make things worse? */
return (imb > old_imb);
@@ -1582,9 +1559,8 @@ static bool load_too_imbalanced(long src_load, long dst_load,
* be exchanged with the source task
*/
static void task_numa_compare(struct task_numa_env *env,
- long taskimp, long groupimp)
+ long taskimp, long groupimp, bool maymove)
{
- struct rq *src_rq = cpu_rq(env->src_cpu);
struct rq *dst_rq = cpu_rq(env->dst_cpu);
struct task_struct *cur;
long src_load, dst_load;
@@ -1605,97 +1581,73 @@ static void task_numa_compare(struct task_numa_env *env,
if (cur == env->p)
goto unlock;
+ if (!cur) {
+ if (maymove || imp > env->best_imp)
+ goto assign;
+ else
+ goto unlock;
+ }
+
/*
* "imp" is the fault differential for the source task between the
* source and destination node. Calculate the total differential for
* the source task and potential destination task. The more negative
- * the value is, the more rmeote accesses that would be expected to
+ * the value is, the more remote accesses that would be expected to
* be incurred if the tasks were swapped.
*/
- if (cur) {
- /* Skip this swap candidate if cannot move to the source CPU: */
- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
- goto unlock;
+ /* Skip this swap candidate if cannot move to the source cpu */
+ if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
+ goto unlock;
+ /*
+ * If dst and source tasks are in the same NUMA group, or not
+ * in any group then look only at task weights.
+ */
+ if (cur->numa_group == env->p->numa_group) {
+ imp = taskimp + task_weight(cur, env->src_nid, dist) -
+ task_weight(cur, env->dst_nid, dist);
/*
- * If dst and source tasks are in the same NUMA group, or not
- * in any group then look only at task weights.
+ * Add some hysteresis to prevent swapping the
+ * tasks within a group over tiny differences.
*/
- if (cur->numa_group == env->p->numa_group) {
- imp = taskimp + task_weight(cur, env->src_nid, dist) -
- task_weight(cur, env->dst_nid, dist);
- /*
- * Add some hysteresis to prevent swapping the
- * tasks within a group over tiny differences.
- */
- if (cur->numa_group)
- imp -= imp/16;
- } else {
- /*
- * Compare the group weights. If a task is all by
- * itself (not part of a group), use the task weight
- * instead.
- */
- if (cur->numa_group)
- imp += group_weight(cur, env->src_nid, dist) -
- group_weight(cur, env->dst_nid, dist);
- else
- imp += task_weight(cur, env->src_nid, dist) -
- task_weight(cur, env->dst_nid, dist);
- }
+ if (cur->numa_group)
+ imp -= imp / 16;
+ } else {
+ /*
+ * Compare the group weights. If a task is all by itself
+ * (not part of a group), use the task weight instead.
+ */
+ if (cur->numa_group && env->p->numa_group)
+ imp += group_weight(cur, env->src_nid, dist) -
+ group_weight(cur, env->dst_nid, dist);
+ else
+ imp += task_weight(cur, env->src_nid, dist) -
+ task_weight(cur, env->dst_nid, dist);
}
- if (imp <= env->best_imp && moveimp <= env->best_imp)
+ if (imp <= env->best_imp)
goto unlock;
- if (!cur) {
- /* Is there capacity at our destination? */
- if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
- !env->dst_stats.has_free_capacity)
- goto unlock;
-
- goto balance;
- }
-
- /* Balance doesn't matter much if we're running a task per CPU: */
- if (imp > env->best_imp && src_rq->nr_running == 1 &&
- dst_rq->nr_running == 1)
+ if (maymove && moveimp > imp && moveimp > env->best_imp) {
+ imp = moveimp - 1;
+ cur = NULL;
goto assign;
+ }
/*
* In the overloaded case, try and keep the load balanced.
*/
-balance:
- load = task_h_load(env->p);
+ load = task_h_load(env->p) - task_h_load(cur);
+ if (!load)
+ goto assign;
+
dst_load = env->dst_stats.load + load;
src_load = env->src_stats.load - load;
- if (moveimp > imp && moveimp > env->best_imp) {
- /*
- * If the improvement from just moving env->p direction is
- * better than swapping tasks around, check if a move is
- * possible. Store a slightly smaller score than moveimp,
- * so an actually idle CPU will win.
- */
- if (!load_too_imbalanced(src_load, dst_load, env)) {
- imp = moveimp - 1;
- cur = NULL;
- goto assign;
- }
- }
-
- if (imp <= env->best_imp)
- goto unlock;
-
- if (cur) {
- load = task_h_load(cur);
- dst_load -= load;
- src_load += load;
- }
-
if (load_too_imbalanced(src_load, dst_load, env))
goto unlock;
+assign:
/*
* One idle CPU per node is evaluated for a task numa move.
* Call select_idle_sibling to maybe find a better one.
@@ -1711,7 +1663,6 @@ balance:
local_irq_enable();
}
-assign:
task_numa_assign(env, cur, imp);
unlock:
rcu_read_unlock();
@@ -1720,43 +1671,30 @@ unlock:
static void task_numa_find_cpu(struct task_numa_env *env,
long taskimp, long groupimp)
{
+ long src_load, dst_load, load;
+ bool maymove = false;
int cpu;
+ load = task_h_load(env->p);
+ dst_load = env->dst_stats.load + load;
+ src_load = env->src_stats.load - load;
+
+ /*
+ * If the improvement from just moving env->p direction is better
+ * than swapping tasks around, check if a move is possible.
+ */
+ maymove = !load_too_imbalanced(src_load, dst_load, env);
+
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
continue;
env->dst_cpu = cpu;
- task_numa_compare(env, taskimp, groupimp);
+ task_numa_compare(env, taskimp, groupimp, maymove);
}
}
-/* Only move tasks to a NUMA node less busy than the current node. */
-static bool numa_has_capacity(struct task_numa_env *env)
-{
- struct numa_stats *src = &env->src_stats;
- struct numa_stats *dst = &env->dst_stats;
-
- if (src->has_free_capacity && !dst->has_free_capacity)
- return false;
-
- /*
- * Only consider a task move if the source has a higher load
- * than the destination, corrected for CPU capacity on each node.
- *
- * src->load dst->load
- * --------------------- vs ---------------------
- * src->compute_capacity dst->compute_capacity
- */
- if (src->load * dst->compute_capacity * env->imbalance_pct >
-
- dst->load * src->compute_capacity * 100)
- return true;
-
- return false;
-}
-
static int task_numa_migrate(struct task_struct *p)
{
struct task_numa_env env = {
@@ -1797,7 +1735,7 @@ static int task_numa_migrate(struct task_struct *p)
* elsewhere, so there is no point in (re)trying.
*/
if (unlikely(!sd)) {
- p->numa_preferred_nid = task_node(p);
+ sched_setnuma(p, task_node(p));
return -EINVAL;
}
@@ -1811,8 +1749,7 @@ static int task_numa_migrate(struct task_struct *p)
update_numa_stats(&env.dst_stats, env.dst_nid);
/* Try to find a spot on the preferred nid. */
- if (numa_has_capacity(&env))
- task_numa_find_cpu(&env, taskimp, groupimp);
+ task_numa_find_cpu(&env, taskimp, groupimp);
/*
* Look at other nodes in these cases:
@@ -1842,8 +1779,7 @@ static int task_numa_migrate(struct task_struct *p)
env.dist = dist;
env.dst_nid = nid;
update_numa_stats(&env.dst_stats, env.dst_nid);
- if (numa_has_capacity(&env))
- task_numa_find_cpu(&env, taskimp, groupimp);
+ task_numa_find_cpu(&env, taskimp, groupimp);
}
}
@@ -1856,15 +1792,13 @@ static int task_numa_migrate(struct task_struct *p)
* trying for a better one later. Do not set the preferred node here.
*/
if (p->numa_group) {
- struct numa_group *ng = p->numa_group;
-
if (env.best_cpu == -1)
nid = env.src_nid;
else
- nid = env.dst_nid;
+ nid = cpu_to_node(env.best_cpu);
- if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
- sched_setnuma(p, env.dst_nid);
+ if (nid != p->numa_preferred_nid)
+ sched_setnuma(p, nid);
}
/* No better CPU than the current one was found. */
@@ -1884,7 +1818,8 @@ static int task_numa_migrate(struct task_struct *p)
return ret;
}
- ret = migrate_swap(p, env.best_task);
+ ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
+
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
put_task_struct(env.best_task);
@@ -2144,8 +2079,8 @@ static int preferred_group_nid(struct task_struct *p, int nid)
static void task_numa_placement(struct task_struct *p)
{
- int seq, nid, max_nid = -1, max_group_nid = -1;
- unsigned long max_faults = 0, max_group_faults = 0;
+ int seq, nid, max_nid = -1;
+ unsigned long max_faults = 0;
unsigned long fault_types[2] = { 0, 0 };
unsigned long total_faults;
u64 runtime, period;
@@ -2224,33 +2159,30 @@ static void task_numa_placement(struct task_struct *p)
}
}
- if (faults > max_faults) {
- max_faults = faults;
+ if (!p->numa_group) {
+ if (faults > max_faults) {
+ max_faults = faults;
+ max_nid = nid;
+ }
+ } else if (group_faults > max_faults) {
+ max_faults = group_faults;
max_nid = nid;
}
-
- if (group_faults > max_group_faults) {
- max_group_faults = group_faults;
- max_group_nid = nid;
- }
}
- update_task_scan_period(p, fault_types[0], fault_types[1]);
-
if (p->numa_group) {
numa_group_count_active_nodes(p->numa_group);
spin_unlock_irq(group_lock);
- max_nid = preferred_group_nid(p, max_group_nid);
+ max_nid = preferred_group_nid(p, max_nid);
}
if (max_faults) {
/* Set the new preferred node */
if (max_nid != p->numa_preferred_nid)
sched_setnuma(p, max_nid);
-
- if (task_node(p) != p->numa_preferred_nid)
- numa_migrate_preferred(p);
}
+
+ update_task_scan_period(p, fault_types[0], fault_types[1]);
}
static inline int get_numa_group(struct numa_group *grp)
@@ -2450,14 +2382,14 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
numa_is_active_node(mem_node, ng))
local = 1;
- task_numa_placement(p);
-
/*
* Retry task to preferred node migration periodically, in case it
* case it previously failed, or the scheduler moved us.
*/
- if (time_after(jiffies, p->numa_migrate_retry))
+ if (time_after(jiffies, p->numa_migrate_retry)) {
+ task_numa_placement(p);
numa_migrate_preferred(p);
+ }
if (migrated)
p->numa_pages_migrated += pages;
@@ -2749,19 +2681,6 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
} while (0)
#ifdef CONFIG_SMP
-/*
- * XXX we want to get rid of these helpers and use the full load resolution.
- */
-static inline long se_weight(struct sched_entity *se)
-{
- return scale_load_down(se->load.weight);
-}
-
-static inline long se_runnable(struct sched_entity *se)
-{
- return scale_load_down(se->runnable_weight);
-}
-
static inline void
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -3062,314 +2981,6 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
}
#ifdef CONFIG_SMP
-/*
- * Approximate:
- * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
- */
-static u64 decay_load(u64 val, u64 n)
-{
- unsigned int local_n;
-
- if (unlikely(n > LOAD_AVG_PERIOD * 63))
- return 0;
-
- /* after bounds checking we can collapse to 32-bit */
- local_n = n;
-
- /*
- * As y^PERIOD = 1/2, we can combine
- * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
- * With a look-up table which covers y^n (n<PERIOD)
- *
- * To achieve constant time decay_load.
- */
- if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
- val >>= local_n / LOAD_AVG_PERIOD;
- local_n %= LOAD_AVG_PERIOD;
- }
-
- val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
- return val;
-}
-
-static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
-{
- u32 c1, c2, c3 = d3; /* y^0 == 1 */
-
- /*
- * c1 = d1 y^p
- */
- c1 = decay_load((u64)d1, periods);
-
- /*
- * p-1
- * c2 = 1024 \Sum y^n
- * n=1
- *
- * inf inf
- * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
- * n=0 n=p
- */
- c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
-
- return c1 + c2 + c3;
-}
-
-/*
- * Accumulate the three separate parts of the sum; d1 the remainder
- * of the last (incomplete) period, d2 the span of full periods and d3
- * the remainder of the (incomplete) current period.
- *
- * d1 d2 d3
- * ^ ^ ^
- * | | |
- * |<->|<----------------->|<--->|
- * ... |---x---|------| ... |------|-----x (now)
- *
- * p-1
- * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
- * n=1
- *
- * = u y^p + (Step 1)
- *
- * p-1
- * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
- * n=1
- */
-static __always_inline u32
-accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
- unsigned long load, unsigned long runnable, int running)
-{
- unsigned long scale_freq, scale_cpu;
- u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
- u64 periods;
-
- scale_freq = arch_scale_freq_capacity(cpu);
- scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
-
- delta += sa->period_contrib;
- periods = delta / 1024; /* A period is 1024us (~1ms) */
-
- /*
- * Step 1: decay old *_sum if we crossed period boundaries.
- */
- if (periods) {
- sa->load_sum = decay_load(sa->load_sum, periods);
- sa->runnable_load_sum =
- decay_load(sa->runnable_load_sum, periods);
- sa->util_sum = decay_load((u64)(sa->util_sum), periods);
-
- /*
- * Step 2
- */
- delta %= 1024;
- contrib = __accumulate_pelt_segments(periods,
- 1024 - sa->period_contrib, delta);
- }
- sa->period_contrib = delta;
-
- contrib = cap_scale(contrib, scale_freq);
- if (load)
- sa->load_sum += load * contrib;
- if (runnable)
- sa->runnable_load_sum += runnable * contrib;
- if (running)
- sa->util_sum += contrib * scale_cpu;
-
- return periods;
-}
-
-/*
- * We can represent the historical contribution to runnable average as the
- * coefficients of a geometric series. To do this we sub-divide our runnable
- * history into segments of approximately 1ms (1024us); label the segment that
- * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
- *
- * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
- * p0 p1 p2
- * (now) (~1ms ago) (~2ms ago)
- *
- * Let u_i denote the fraction of p_i that the entity was runnable.
- *
- * We then designate the fractions u_i as our co-efficients, yielding the
- * following representation of historical load:
- * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
- *
- * We choose y based on the with of a reasonably scheduling period, fixing:
- * y^32 = 0.5
- *
- * This means that the contribution to load ~32ms ago (u_32) will be weighted
- * approximately half as much as the contribution to load within the last ms
- * (u_0).
- *
- * When a period "rolls over" and we have new u_0`, multiplying the previous
- * sum again by y is sufficient to update:
- * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
- * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
- */
-static __always_inline int
-___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
- unsigned long load, unsigned long runnable, int running)
-{
- u64 delta;
-
- delta = now - sa->last_update_time;
- /*
- * This should only happen when time goes backwards, which it
- * unfortunately does during sched clock init when we swap over to TSC.
- */
- if ((s64)delta < 0) {
- sa->last_update_time = now;
- return 0;
- }
-
- /*
- * Use 1024ns as the unit of measurement since it's a reasonable
- * approximation of 1us and fast to compute.
- */
- delta >>= 10;
- if (!delta)
- return 0;
-
- sa->last_update_time += delta << 10;
-
- /*
- * running is a subset of runnable (weight) so running can't be set if
- * runnable is clear. But there are some corner cases where the current
- * se has been already dequeued but cfs_rq->curr still points to it.
- * This means that weight will be 0 but not running for a sched_entity
- * but also for a cfs_rq if the latter becomes idle. As an example,
- * this happens during idle_balance() which calls
- * update_blocked_averages()
- */
- if (!load)
- runnable = running = 0;
-
- /*
- * Now we know we crossed measurement unit boundaries. The *_avg
- * accrues by two steps:
- *
- * Step 1: accumulate *_sum since last_update_time. If we haven't
- * crossed period boundaries, finish.
- */
- if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
- return 0;
-
- return 1;
-}
-
-static __always_inline void
-___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
-{
- u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
-
- /*
- * Step 2: update *_avg.
- */
- sa->load_avg = div_u64(load * sa->load_sum, divider);
- sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
- sa->util_avg = sa->util_sum / divider;
-}
-
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
-static inline void cfs_se_util_change(struct sched_avg *avg)
-{
- unsigned int enqueued;
-
- if (!sched_feat(UTIL_EST))
- return;
-
- /* Avoid store if the flag has been already set */
- enqueued = avg->util_est.enqueued;
- if (!(enqueued & UTIL_AVG_UNCHANGED))
- return;
-
- /* Reset flag to report util_avg has been updated */
- enqueued &= ~UTIL_AVG_UNCHANGED;
- WRITE_ONCE(avg->util_est.enqueued, enqueued);
-}
-
-/*
- * sched_entity:
- *
- * task:
- * se_runnable() == se_weight()
- *
- * group: [ see update_cfs_group() ]
- * se_weight() = tg->weight * grq->load_avg / tg->load_avg
- * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
- *
- * load_sum := runnable_sum
- * load_avg = se_weight(se) * runnable_avg
- *
- * runnable_load_sum := runnable_sum
- * runnable_load_avg = se_runnable(se) * runnable_avg
- *
- * XXX collapse load_sum and runnable_load_sum
- *
- * cfq_rs:
- *
- * load_sum = \Sum se_weight(se) * se->avg.load_sum
- * load_avg = \Sum se->avg.load_avg
- *
- * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
- * runnable_load_avg = \Sum se->avg.runable_load_avg
- */
-
-static int
-__update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
-{
- if (entity_is_task(se))
- se->runnable_weight = se->load.weight;
-
- if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
- ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
- return 1;
- }
-
- return 0;
-}
-
-static int
-__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- if (entity_is_task(se))
- se->runnable_weight = se->load.weight;
-
- if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
- cfs_rq->curr == se)) {
-
- ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
- cfs_se_util_change(&se->avg);
- return 1;
- }
-
- return 0;
-}
-
-static int
-__update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
-{
- if (___update_load_sum(now, cpu, &cfs_rq->avg,
- scale_load_down(cfs_rq->load.weight),
- scale_load_down(cfs_rq->runnable_weight),
- cfs_rq->curr != NULL)) {
-
- ___update_load_avg(&cfs_rq->avg, 1, 1);
- return 1;
- }
-
- return 0;
-}
-
#ifdef CONFIG_FAIR_GROUP_SCHED
/**
* update_tg_load_avg - update the tg's load avg
@@ -4037,12 +3648,6 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
#else /* CONFIG_SMP */
-static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
-{
- return 0;
-}
-
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
#define DO_ATTACH 0x0
@@ -4726,7 +4331,6 @@ static inline int throttled_lb_pair(struct task_group *tg,
throttled_hierarchy(dest_cfs_rq);
}
-/* updated child weight may affect parent so we have to do this bottom up */
static int tg_unthrottle_up(struct task_group *tg, void *data)
{
struct rq *rq = data;
@@ -5653,8 +5257,6 @@ static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
}
-
- sched_avg_update(this_rq);
}
/* Used instead of source_load when we know the type == 0 */
@@ -6237,6 +5839,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
}
#ifdef CONFIG_SCHED_SMT
+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
static inline void set_idle_cores(int cpu, int val)
{
@@ -7294,8 +6897,8 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
{
struct numa_group *numa_group = rcu_dereference(p->numa_group);
- unsigned long src_faults, dst_faults;
- int src_nid, dst_nid;
+ unsigned long src_weight, dst_weight;
+ int src_nid, dst_nid, dist;
if (!static_branch_likely(&sched_numa_balancing))
return -1;
@@ -7322,18 +6925,19 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
return 0;
/* Leaving a core idle is often worse than degrading locality. */
- if (env->idle != CPU_NOT_IDLE)
+ if (env->idle == CPU_IDLE)
return -1;
+ dist = node_distance(src_nid, dst_nid);
if (numa_group) {
- src_faults = group_faults(p, src_nid);
- dst_faults = group_faults(p, dst_nid);
+ src_weight = group_weight(p, src_nid, dist);
+ dst_weight = group_weight(p, dst_nid, dist);
} else {
- src_faults = task_faults(p, src_nid);
- dst_faults = task_faults(p, dst_nid);
+ src_weight = task_weight(p, src_nid, dist);
+ dst_weight = task_weight(p, dst_nid, dist);
}
- return dst_faults < src_faults;
+ return dst_weight < src_weight;
}
#else
@@ -7620,6 +7224,22 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
return false;
}
+static inline bool others_have_blocked(struct rq *rq)
+{
+ if (READ_ONCE(rq->avg_rt.util_avg))
+ return true;
+
+ if (READ_ONCE(rq->avg_dl.util_avg))
+ return true;
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+ if (READ_ONCE(rq->avg_irq.util_avg))
+ return true;
+#endif
+
+ return false;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
@@ -7679,6 +7299,12 @@ static void update_blocked_averages(int cpu)
if (cfs_rq_has_blocked(cfs_rq))
done = false;
}
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_irq_load_avg(rq, 0);
+ /* Don't need periodic decay once load/util_avg are null */
+ if (others_have_blocked(rq))
+ done = false;
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
@@ -7744,9 +7370,12 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_irq_load_avg(rq, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
- if (!cfs_rq_has_blocked(cfs_rq))
+ if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq))
rq->has_blocked_load = 0;
#endif
rq_unlock_irqrestore(rq, &rf);
@@ -7856,39 +7485,32 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
static unsigned long scale_rt_capacity(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- u64 total, used, age_stamp, avg;
- s64 delta;
+ unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
+ unsigned long used, free;
+ unsigned long irq;
- /*
- * Since we're reading these variables without serialization make sure
- * we read them once before doing sanity checks on them.
- */
- age_stamp = READ_ONCE(rq->age_stamp);
- avg = READ_ONCE(rq->rt_avg);
- delta = __rq_clock_broken(rq) - age_stamp;
+ irq = cpu_util_irq(rq);
- if (unlikely(delta < 0))
- delta = 0;
+ if (unlikely(irq >= max))
+ return 1;
- total = sched_avg_period() + delta;
+ used = READ_ONCE(rq->avg_rt.util_avg);
+ used += READ_ONCE(rq->avg_dl.util_avg);
- used = div_u64(avg, total);
+ if (unlikely(used >= max))
+ return 1;
- if (likely(used < SCHED_CAPACITY_SCALE))
- return SCHED_CAPACITY_SCALE - used;
+ free = max - used;
- return 1;
+ return scale_irq_capacity(free, irq, max);
}
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
- unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
+ unsigned long capacity = scale_rt_capacity(cpu);
struct sched_group *sdg = sd->groups;
- cpu_rq(cpu)->cpu_capacity_orig = capacity;
-
- capacity *= scale_rt_capacity(cpu);
- capacity >>= SCHED_CAPACITY_SHIFT;
+ cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
if (!capacity)
capacity = 1;
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
new file mode 100644
index 000000000000..35475c0c5419
--- /dev/null
+++ b/kernel/sched/pelt.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Per Entity Load Tracking
+ *
+ * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Interactivity improvements by Mike Galbraith
+ * (C) 2007 Mike Galbraith <efault@gmx.de>
+ *
+ * Various enhancements by Dmitry Adamushko.
+ * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
+ *
+ * Group scheduling enhancements by Srivatsa Vaddagiri
+ * Copyright IBM Corporation, 2007
+ * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
+ *
+ * Scaled math optimizations by Thomas Gleixner
+ * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * Move PELT related code from fair.c into this pelt.c file
+ * Author: Vincent Guittot <vincent.guittot@linaro.org>
+ */
+
+#include <linux/sched.h>
+#include "sched.h"
+#include "sched-pelt.h"
+#include "pelt.h"
+
+/*
+ * Approximate:
+ * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
+ */
+static u64 decay_load(u64 val, u64 n)
+{
+ unsigned int local_n;
+
+ if (unlikely(n > LOAD_AVG_PERIOD * 63))
+ return 0;
+
+ /* after bounds checking we can collapse to 32-bit */
+ local_n = n;
+
+ /*
+ * As y^PERIOD = 1/2, we can combine
+ * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
+ * With a look-up table which covers y^n (n<PERIOD)
+ *
+ * To achieve constant time decay_load.
+ */
+ if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
+ val >>= local_n / LOAD_AVG_PERIOD;
+ local_n %= LOAD_AVG_PERIOD;
+ }
+
+ val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
+ return val;
+}
+
+static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
+{
+ u32 c1, c2, c3 = d3; /* y^0 == 1 */
+
+ /*
+ * c1 = d1 y^p
+ */
+ c1 = decay_load((u64)d1, periods);
+
+ /*
+ * p-1
+ * c2 = 1024 \Sum y^n
+ * n=1
+ *
+ * inf inf
+ * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
+ * n=0 n=p
+ */
+ c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
+
+ return c1 + c2 + c3;
+}
+
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
+/*
+ * Accumulate the three separate parts of the sum; d1 the remainder
+ * of the last (incomplete) period, d2 the span of full periods and d3
+ * the remainder of the (incomplete) current period.
+ *
+ * d1 d2 d3
+ * ^ ^ ^
+ * | | |
+ * |<->|<----------------->|<--->|
+ * ... |---x---|------| ... |------|-----x (now)
+ *
+ * p-1
+ * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
+ * n=1
+ *
+ * = u y^p + (Step 1)
+ *
+ * p-1
+ * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
+ * n=1
+ */
+static __always_inline u32
+accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
+ unsigned long load, unsigned long runnable, int running)
+{
+ unsigned long scale_freq, scale_cpu;
+ u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
+ u64 periods;
+
+ scale_freq = arch_scale_freq_capacity(cpu);
+ scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+
+ delta += sa->period_contrib;
+ periods = delta / 1024; /* A period is 1024us (~1ms) */
+
+ /*
+ * Step 1: decay old *_sum if we crossed period boundaries.
+ */
+ if (periods) {
+ sa->load_sum = decay_load(sa->load_sum, periods);
+ sa->runnable_load_sum =
+ decay_load(sa->runnable_load_sum, periods);
+ sa->util_sum = decay_load((u64)(sa->util_sum), periods);
+
+ /*
+ * Step 2
+ */
+ delta %= 1024;
+ contrib = __accumulate_pelt_segments(periods,
+ 1024 - sa->period_contrib, delta);
+ }
+ sa->period_contrib = delta;
+
+ contrib = cap_scale(contrib, scale_freq);
+ if (load)
+ sa->load_sum += load * contrib;
+ if (runnable)
+ sa->runnable_load_sum += runnable * contrib;
+ if (running)
+ sa->util_sum += contrib * scale_cpu;
+
+ return periods;
+}
+
+/*
+ * We can represent the historical contribution to runnable average as the
+ * coefficients of a geometric series. To do this we sub-divide our runnable
+ * history into segments of approximately 1ms (1024us); label the segment that
+ * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
+ *
+ * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
+ * p0 p1 p2
+ * (now) (~1ms ago) (~2ms ago)
+ *
+ * Let u_i denote the fraction of p_i that the entity was runnable.
+ *
+ * We then designate the fractions u_i as our co-efficients, yielding the
+ * following representation of historical load:
+ * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
+ *
+ * We choose y based on the with of a reasonably scheduling period, fixing:
+ * y^32 = 0.5
+ *
+ * This means that the contribution to load ~32ms ago (u_32) will be weighted
+ * approximately half as much as the contribution to load within the last ms
+ * (u_0).
+ *
+ * When a period "rolls over" and we have new u_0`, multiplying the previous
+ * sum again by y is sufficient to update:
+ * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
+ * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
+ */
+static __always_inline int
+___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
+ unsigned long load, unsigned long runnable, int running)
+{
+ u64 delta;
+
+ delta = now - sa->last_update_time;
+ /*
+ * This should only happen when time goes backwards, which it
+ * unfortunately does during sched clock init when we swap over to TSC.
+ */
+ if ((s64)delta < 0) {
+ sa->last_update_time = now;
+ return 0;
+ }
+
+ /*
+ * Use 1024ns as the unit of measurement since it's a reasonable
+ * approximation of 1us and fast to compute.
+ */
+ delta >>= 10;
+ if (!delta)
+ return 0;
+
+ sa->last_update_time += delta << 10;
+
+ /*
+ * running is a subset of runnable (weight) so running can't be set if
+ * runnable is clear. But there are some corner cases where the current
+ * se has been already dequeued but cfs_rq->curr still points to it.
+ * This means that weight will be 0 but not running for a sched_entity
+ * but also for a cfs_rq if the latter becomes idle. As an example,
+ * this happens during idle_balance() which calls
+ * update_blocked_averages()
+ */
+ if (!load)
+ runnable = running = 0;
+
+ /*
+ * Now we know we crossed measurement unit boundaries. The *_avg
+ * accrues by two steps:
+ *
+ * Step 1: accumulate *_sum since last_update_time. If we haven't
+ * crossed period boundaries, finish.
+ */
+ if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
+ return 0;
+
+ return 1;
+}
+
+static __always_inline void
+___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
+{
+ u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
+
+ /*
+ * Step 2: update *_avg.
+ */
+ sa->load_avg = div_u64(load * sa->load_sum, divider);
+ sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
+ WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+}
+
+/*
+ * sched_entity:
+ *
+ * task:
+ * se_runnable() == se_weight()
+ *
+ * group: [ see update_cfs_group() ]
+ * se_weight() = tg->weight * grq->load_avg / tg->load_avg
+ * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
+ *
+ * load_sum := runnable_sum
+ * load_avg = se_weight(se) * runnable_avg
+ *
+ * runnable_load_sum := runnable_sum
+ * runnable_load_avg = se_runnable(se) * runnable_avg
+ *
+ * XXX collapse load_sum and runnable_load_sum
+ *
+ * cfq_rq:
+ *
+ * load_sum = \Sum se_weight(se) * se->avg.load_sum
+ * load_avg = \Sum se->avg.load_avg
+ *
+ * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
+ * runnable_load_avg = \Sum se->avg.runable_load_avg
+ */
+
+int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
+{
+ if (entity_is_task(se))
+ se->runnable_weight = se->load.weight;
+
+ if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
+ ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ return 1;
+ }
+
+ return 0;
+}
+
+int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ if (entity_is_task(se))
+ se->runnable_weight = se->load.weight;
+
+ if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
+ cfs_rq->curr == se)) {
+
+ ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ cfs_se_util_change(&se->avg);
+ return 1;
+ }
+
+ return 0;
+}
+
+int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
+{
+ if (___update_load_sum(now, cpu, &cfs_rq->avg,
+ scale_load_down(cfs_rq->load.weight),
+ scale_load_down(cfs_rq->runnable_weight),
+ cfs_rq->curr != NULL)) {
+
+ ___update_load_avg(&cfs_rq->avg, 1, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * rt_rq:
+ *
+ * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ * util_sum = cpu_scale * load_sum
+ * runnable_load_sum = load_sum
+ *
+ * load_avg and runnable_load_avg are not supported and meaningless.
+ *
+ */
+
+int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+ if (___update_load_sum(now, rq->cpu, &rq->avg_rt,
+ running,
+ running,
+ running)) {
+
+ ___update_load_avg(&rq->avg_rt, 1, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * dl_rq:
+ *
+ * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ * util_sum = cpu_scale * load_sum
+ * runnable_load_sum = load_sum
+ *
+ */
+
+int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+ if (___update_load_sum(now, rq->cpu, &rq->avg_dl,
+ running,
+ running,
+ running)) {
+
+ ___update_load_avg(&rq->avg_dl, 1, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+/*
+ * irq:
+ *
+ * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ * util_sum = cpu_scale * load_sum
+ * runnable_load_sum = load_sum
+ *
+ */
+
+int update_irq_load_avg(struct rq *rq, u64 running)
+{
+ int ret = 0;
+ /*
+ * We know the time that has been used by interrupt since last update
+ * but we don't when. Let be pessimistic and assume that interrupt has
+ * happened just before the update. This is not so far from reality
+ * because interrupt will most probably wake up task and trig an update
+ * of rq clock during which the metric si updated.
+ * We start to decay with normal context time and then we add the
+ * interrupt context time.
+ * We can safely remove running from rq->clock because
+ * rq->clock += delta with delta >= running
+ */
+ ret = ___update_load_sum(rq->clock - running, rq->cpu, &rq->avg_irq,
+ 0,
+ 0,
+ 0);
+ ret += ___update_load_sum(rq->clock, rq->cpu, &rq->avg_irq,
+ 1,
+ 1,
+ 1);
+
+ if (ret)
+ ___update_load_avg(&rq->avg_irq, 1, 1);
+
+ return ret;
+}
+#endif
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
new file mode 100644
index 000000000000..d2894db28955
--- /dev/null
+++ b/kernel/sched/pelt.h
@@ -0,0 +1,72 @@
+#ifdef CONFIG_SMP
+
+int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
+int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
+int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
+int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+int update_irq_load_avg(struct rq *rq, u64 running);
+#else
+static inline int
+update_irq_load_avg(struct rq *rq, u64 running)
+{
+ return 0;
+}
+#endif
+
+/*
+ * When a task is dequeued, its estimated utilization should not be update if
+ * its util_avg has not been updated at least once.
+ * This flag is used to synchronize util_avg updates with util_est updates.
+ * We map this information into the LSB bit of the utilization saved at
+ * dequeue time (i.e. util_est.dequeued).
+ */
+#define UTIL_AVG_UNCHANGED 0x1
+
+static inline void cfs_se_util_change(struct sched_avg *avg)
+{
+ unsigned int enqueued;
+
+ if (!sched_feat(UTIL_EST))
+ return;
+
+ /* Avoid store if the flag has been already set */
+ enqueued = avg->util_est.enqueued;
+ if (!(enqueued & UTIL_AVG_UNCHANGED))
+ return;
+
+ /* Reset flag to report util_avg has been updated */
+ enqueued &= ~UTIL_AVG_UNCHANGED;
+ WRITE_ONCE(avg->util_est.enqueued, enqueued);
+}
+
+#else
+
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+{
+ return 0;
+}
+
+static inline int
+update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+ return 0;
+}
+
+static inline int
+update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+ return 0;
+}
+
+static inline int
+update_irq_load_avg(struct rq *rq, u64 running)
+{
+ return 0;
+}
+#endif
+
+
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index eaaec8364f96..2e2955a8cf8f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -5,6 +5,8 @@
*/
#include "sched.h"
+#include "pelt.h"
+
int sched_rr_timeslice = RR_TIMESLICE;
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
@@ -973,8 +975,6 @@ static void update_curr_rt(struct rq *rq)
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
-
if (!rt_bandwidth_enabled())
return;
@@ -1578,6 +1578,14 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rt_queue_push_tasks(rq);
+ /*
+ * If prev task was rt, put_prev_task() has already updated the
+ * utilization. We only care of the case where we start to schedule a
+ * rt task
+ */
+ if (rq->curr->sched_class != &rt_sched_class)
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+
return p;
}
@@ -1585,6 +1593,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
+
/*
* The previous task needs to be made eligible for pushing
* if it is still active
@@ -2314,6 +2324,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
watchdog(rq, p);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c7742dcc136c..4a2e8cae63c4 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -594,6 +594,7 @@ struct rt_rq {
unsigned long rt_nr_total;
int overloaded;
struct plist_head pushable_tasks;
+
#endif /* CONFIG_SMP */
int rt_queued;
@@ -673,7 +674,26 @@ struct dl_rq {
u64 bw_ratio;
};
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/* An entity is a task if it doesn't "own" a runqueue */
+#define entity_is_task(se) (!se->my_q)
+#else
+#define entity_is_task(se) 1
+#endif
+
#ifdef CONFIG_SMP
+/*
+ * XXX we want to get rid of these helpers and use the full load resolution.
+ */
+static inline long se_weight(struct sched_entity *se)
+{
+ return scale_load_down(se->load.weight);
+}
+
+static inline long se_runnable(struct sched_entity *se)
+{
+ return scale_load_down(se->runnable_weight);
+}
static inline bool sched_asym_prefer(int a, int b)
{
@@ -833,8 +853,12 @@ struct rq {
struct list_head cfs_tasks;
- u64 rt_avg;
- u64 age_stamp;
+ struct sched_avg avg_rt;
+ struct sched_avg avg_dl;
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#define HAVE_SCHED_AVG_IRQ
+ struct sched_avg avg_irq;
+#endif
u64 idle_stamp;
u64 avg_idle;
@@ -1075,7 +1099,8 @@ enum numa_faults_stats {
};
extern void sched_setnuma(struct task_struct *p, int node);
extern int migrate_task_to(struct task_struct *p, int cpu);
-extern int migrate_swap(struct task_struct *, struct task_struct *);
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
+ int cpu, int scpu);
extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
#else
static inline void
@@ -1690,15 +1715,9 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
-extern const_debug unsigned int sysctl_sched_time_avg;
extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
-static inline u64 sched_avg_period(void)
-{
- return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
-}
-
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -1735,8 +1754,6 @@ unsigned long arch_scale_freq_capacity(int cpu)
#endif
#ifdef CONFIG_SMP
-extern void sched_avg_update(struct rq *rq);
-
#ifndef arch_scale_cpu_capacity
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
@@ -1747,12 +1764,6 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
return SCHED_CAPACITY_SCALE;
}
#endif
-
-static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
-{
- rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
- sched_avg_update(rq);
-}
#else
#ifndef arch_scale_cpu_capacity
static __always_inline
@@ -1761,8 +1772,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
return SCHED_CAPACITY_SCALE;
}
#endif
-static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
-static inline void sched_avg_update(struct rq *rq) { }
#endif
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
@@ -2177,11 +2186,16 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
-static inline unsigned long cpu_util_dl(struct rq *rq)
+static inline unsigned long cpu_bw_dl(struct rq *rq)
{
return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
}
+static inline unsigned long cpu_util_dl(struct rq *rq)
+{
+ return READ_ONCE(rq->avg_dl.util_avg);
+}
+
static inline unsigned long cpu_util_cfs(struct rq *rq)
{
unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
@@ -2193,4 +2207,37 @@ static inline unsigned long cpu_util_cfs(struct rq *rq)
return util;
}
+
+static inline unsigned long cpu_util_rt(struct rq *rq)
+{
+ return READ_ONCE(rq->avg_rt.util_avg);
+}
+#endif
+
+#ifdef HAVE_SCHED_AVG_IRQ
+static inline unsigned long cpu_util_irq(struct rq *rq)
+{
+ return rq->avg_irq.util_avg;
+}
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+ util *= (max - irq);
+ util /= max;
+
+ return util;
+
+}
+#else
+static inline unsigned long cpu_util_irq(struct rq *rq)
+{
+ return 0;
+}
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+ return util;
+}
#endif
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
index b6fb2c3b3ff7..66b59ac77c22 100644
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
@@ -32,7 +32,7 @@ void swake_up_locked(struct swait_queue_head *q)
}
EXPORT_SYMBOL(swake_up_locked);
-void swake_up(struct swait_queue_head *q)
+void swake_up_one(struct swait_queue_head *q)
{
unsigned long flags;
@@ -40,7 +40,7 @@ void swake_up(struct swait_queue_head *q)
swake_up_locked(q);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(swake_up);
+EXPORT_SYMBOL(swake_up_one);
/*
* Does not allow usage from IRQ disabled, since we must be able to
@@ -69,14 +69,14 @@ void swake_up_all(struct swait_queue_head *q)
}
EXPORT_SYMBOL(swake_up_all);
-void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
+static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
{
wait->task = current;
if (list_empty(&wait->task_list))
- list_add(&wait->task_list, &q->task_list);
+ list_add_tail(&wait->task_list, &q->task_list);
}
-void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
+void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
{
unsigned long flags;
@@ -85,16 +85,28 @@ void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int
set_current_state(state);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(prepare_to_swait);
+EXPORT_SYMBOL(prepare_to_swait_exclusive);
long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
{
- if (signal_pending_state(state, current))
- return -ERESTARTSYS;
+ unsigned long flags;
+ long ret = 0;
- prepare_to_swait(q, wait, state);
+ raw_spin_lock_irqsave(&q->lock, flags);
+ if (unlikely(signal_pending_state(state, current))) {
+ /*
+ * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
+ * must not see us.
+ */
+ list_del_init(&wait->task_list);
+ ret = -ERESTARTSYS;
+ } else {
+ __prepare_to_swait(q, wait);
+ set_current_state(state);
+ }
+ raw_spin_unlock_irqrestore(&q->lock, flags);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(prepare_to_swait_event);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 928be527477e..870f97b313e3 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -134,8 +134,8 @@ static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: is directly passed to the wakeup function
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
*/
void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
int nr_exclusive, void *key)
@@ -180,8 +180,8 @@ EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
*
* On UP it can prevent extra preemption.
*
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
*/
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
int nr_exclusive, void *key)
@@ -392,35 +392,36 @@ static inline bool is_kthread_should_stop(void)
* if (condition)
* break;
*
- * p->state = mode; condition = true;
- * smp_mb(); // A smp_wmb(); // C
- * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN;
- * schedule() try_to_wake_up();
- * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
- * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true;
- * smp_mb() // B smp_wmb(); // C
- * wq_entry->flags |= WQ_FLAG_WOKEN;
- * }
- * remove_wait_queue(&wq_head, &wait);
+ * // in wait_woken() // in woken_wake_function()
*
+ * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
+ * smp_mb(); // A try_to_wake_up():
+ * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
+ * schedule() if (p->state & mode)
+ * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
+ * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
+ * smp_mb(); // B condition = true;
+ * } smp_mb(); // C
+ * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
*/
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
{
- set_current_state(mode); /* A */
/*
- * The above implies an smp_mb(), which matches with the smp_wmb() from
- * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
- * also observe all state before the wakeup.
+ * The below executes an smp_mb(), which matches with the full barrier
+ * executed by the try_to_wake_up() in woken_wake_function() such that
+ * either we see the store to wq_entry->flags in woken_wake_function()
+ * or woken_wake_function() sees our store to current->state.
*/
+ set_current_state(mode); /* A */
if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
timeout = schedule_timeout(timeout);
__set_current_state(TASK_RUNNING);
/*
- * The below implies an smp_mb(), it too pairs with the smp_wmb() from
- * woken_wake_function() such that we must either observe the wait
- * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
- * an event.
+ * The below executes an smp_mb(), which matches with the smp_mb() (C)
+ * in woken_wake_function() such that either we see the wait condition
+ * being true or the store to wq_entry->flags in woken_wake_function()
+ * follows ours in the coherence order.
*/
smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
@@ -430,14 +431,8 @@ EXPORT_SYMBOL(wait_woken);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
{
- /*
- * Although this function is called under waitqueue lock, LOCK
- * doesn't imply write barrier and the users expects write
- * barrier semantics on wakeup functions. The following
- * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
- * and is paired with smp_store_mb() in wait_woken().
- */
- smp_wmb(); /* C */
+ /* Pairs with the smp_store_mb() in wait_woken(). */
+ smp_mb(); /* C */
wq_entry->flags |= WQ_FLAG_WOKEN;
return default_wake_function(wq_entry, mode, sync, key);
diff --git a/kernel/smp.c b/kernel/smp.c
index 084c8b3a2681..d86eec5f51c1 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -584,6 +584,8 @@ void __init smp_init(void)
num_nodes, (num_nodes > 1 ? "s" : ""),
num_cpus, (num_cpus > 1 ? "s" : ""));
+ /* Final decision about SMT support */
+ cpu_smt_check_topology();
/* Any cleanup work */
smp_cpus_done(setup_max_cpus);
}
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 5043e7433f4b..c230c2dd48e1 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -238,8 +238,7 @@ int smpboot_unpark_threads(unsigned int cpu)
mutex_lock(&smpboot_threads_lock);
list_for_each_entry(cur, &hotplug_threads, list)
- if (cpumask_test_cpu(cpu, cur->cpumask))
- smpboot_unpark_thread(cur, cpu);
+ smpboot_unpark_thread(cur, cpu);
mutex_unlock(&smpboot_threads_lock);
return 0;
}
@@ -280,34 +279,26 @@ static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
}
/**
- * smpboot_register_percpu_thread_cpumask - Register a per_cpu thread related
+ * smpboot_register_percpu_thread - Register a per_cpu thread related
* to hotplug
* @plug_thread: Hotplug thread descriptor
- * @cpumask: The cpumask where threads run
*
* Creates and starts the threads on all online cpus.
*/
-int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *cpumask)
+int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
{
unsigned int cpu;
int ret = 0;
- if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_copy(plug_thread->cpumask, cpumask);
-
get_online_cpus();
mutex_lock(&smpboot_threads_lock);
for_each_online_cpu(cpu) {
ret = __smpboot_create_thread(plug_thread, cpu);
if (ret) {
smpboot_destroy_threads(plug_thread);
- free_cpumask_var(plug_thread->cpumask);
goto out;
}
- if (cpumask_test_cpu(cpu, cpumask))
- smpboot_unpark_thread(plug_thread, cpu);
+ smpboot_unpark_thread(plug_thread, cpu);
}
list_add(&plug_thread->list, &hotplug_threads);
out:
@@ -315,7 +306,7 @@ out:
put_online_cpus();
return ret;
}
-EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask);
+EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
/**
* smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
@@ -331,44 +322,9 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
smpboot_destroy_threads(plug_thread);
mutex_unlock(&smpboot_threads_lock);
put_online_cpus();
- free_cpumask_var(plug_thread->cpumask);
}
EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
-/**
- * smpboot_update_cpumask_percpu_thread - Adjust which per_cpu hotplug threads stay parked
- * @plug_thread: Hotplug thread descriptor
- * @new: Revised mask to use
- *
- * The cpumask field in the smp_hotplug_thread must not be updated directly
- * by the client, but only by calling this function.
- * This function can only be called on a registered smp_hotplug_thread.
- */
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *new)
-{
- struct cpumask *old = plug_thread->cpumask;
- static struct cpumask tmp;
- unsigned int cpu;
-
- lockdep_assert_cpus_held();
- mutex_lock(&smpboot_threads_lock);
-
- /* Park threads that were exclusively enabled on the old mask. */
- cpumask_andnot(&tmp, old, new);
- for_each_cpu_and(cpu, &tmp, cpu_online_mask)
- smpboot_park_thread(plug_thread, cpu);
-
- /* Unpark threads that are exclusively enabled on the new mask. */
- cpumask_andnot(&tmp, new, old);
- for_each_cpu_and(cpu, &tmp, cpu_online_mask)
- smpboot_unpark_thread(plug_thread, cpu);
-
- cpumask_copy(old, new);
-
- mutex_unlock(&smpboot_threads_lock);
-}
-
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
/*
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index e190d1ef3a23..067cb83f37ea 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
unsigned long flags;
bool enabled;
+ preempt_disable();
raw_spin_lock_irqsave(&stopper->lock, flags);
enabled = stopper->enabled;
if (enabled)
@@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
raw_spin_unlock_irqrestore(&stopper->lock, flags);
wake_up_q(&wakeq);
+ preempt_enable();
return enabled;
}
@@ -236,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
DEFINE_WAKE_Q(wakeq);
int err;
+
retry:
+ /*
+ * The waking up of stopper threads has to happen in the same
+ * scheduling context as the queueing. Otherwise, there is a
+ * possibility of one of the above stoppers being woken up by another
+ * CPU, and preempting us. This will cause us to not wake up the other
+ * stopper forever.
+ */
+ preempt_disable();
raw_spin_lock_irq(&stopper1->lock);
raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
- err = -ENOENT;
- if (!stopper1->enabled || !stopper2->enabled)
+ if (!stopper1->enabled || !stopper2->enabled) {
+ err = -ENOENT;
goto unlock;
+ }
+
/*
* Ensure that if we race with __stop_cpus() the stoppers won't get
* queued up in reverse order leading to system deadlock.
@@ -253,36 +266,30 @@ retry:
* It can be falsely true but it is safe to spin until it is cleared,
* queue_stop_cpus_work() does everything under preempt_disable().
*/
- err = -EDEADLK;
- if (unlikely(stop_cpus_in_progress))
- goto unlock;
+ if (unlikely(stop_cpus_in_progress)) {
+ err = -EDEADLK;
+ goto unlock;
+ }
err = 0;
__cpu_stop_queue_work(stopper1, work1, &wakeq);
__cpu_stop_queue_work(stopper2, work2, &wakeq);
- /*
- * The waking up of stopper threads has to happen
- * in the same scheduling context as the queueing.
- * Otherwise, there is a possibility of one of the
- * above stoppers being woken up by another CPU,
- * and preempting us. This will cause us to n ot
- * wake up the other stopper forever.
- */
- preempt_disable();
+
unlock:
raw_spin_unlock(&stopper2->lock);
raw_spin_unlock_irq(&stopper1->lock);
if (unlikely(err == -EDEADLK)) {
+ preempt_enable();
+
while (stop_cpus_in_progress)
cpu_relax();
+
goto retry;
}
- if (!err) {
- wake_up_q(&wakeq);
- preempt_enable();
- }
+ wake_up_q(&wakeq);
+ preempt_enable();
return err;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 38509dc1f77b..e27b51d3facd 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2512,11 +2512,11 @@ static int do_sysinfo(struct sysinfo *info)
{
unsigned long mem_total, sav_total;
unsigned int mem_unit, bitcount;
- struct timespec tp;
+ struct timespec64 tp;
memset(info, 0, sizeof(struct sysinfo));
- get_monotonic_boottime(&tp);
+ ktime_get_boottime_ts64(&tp);
info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2d9837c0aff4..f22f76b7a138 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -368,14 +368,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
- .procname = "sched_time_avg_ms",
- .data = &sysctl_sched_time_avg,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &one,
- },
#ifdef CONFIG_SCHEDSTATS
{
.procname = "sched_schedstats",
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index dd53e354f630..7bca480151b0 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -162,90 +162,6 @@ static int test_kprobes(void)
}
-#if 0
-static u32 jph_val;
-
-static u32 j_kprobe_target(u32 value)
-{
- if (preemptible()) {
- handler_errors++;
- pr_err("jprobe-handler is preemptible\n");
- }
- if (value != rand1) {
- handler_errors++;
- pr_err("incorrect value in jprobe handler\n");
- }
-
- jph_val = rand1;
- jprobe_return();
- return 0;
-}
-
-static struct jprobe jp = {
- .entry = j_kprobe_target,
- .kp.symbol_name = "kprobe_target"
-};
-
-static int test_jprobe(void)
-{
- int ret;
-
- ret = register_jprobe(&jp);
- if (ret < 0) {
- pr_err("register_jprobe returned %d\n", ret);
- return ret;
- }
-
- ret = target(rand1);
- unregister_jprobe(&jp);
- if (jph_val == 0) {
- pr_err("jprobe handler not called\n");
- handler_errors++;
- }
-
- return 0;
-}
-
-static struct jprobe jp2 = {
- .entry = j_kprobe_target,
- .kp.symbol_name = "kprobe_target2"
-};
-
-static int test_jprobes(void)
-{
- int ret;
- struct jprobe *jps[2] = {&jp, &jp2};
-
- /* addr and flags should be cleard for reusing kprobe. */
- jp.kp.addr = NULL;
- jp.kp.flags = 0;
- ret = register_jprobes(jps, 2);
- if (ret < 0) {
- pr_err("register_jprobes returned %d\n", ret);
- return ret;
- }
-
- jph_val = 0;
- ret = target(rand1);
- if (jph_val == 0) {
- pr_err("jprobe handler not called\n");
- handler_errors++;
- }
-
- jph_val = 0;
- ret = target2(rand1);
- if (jph_val == 0) {
- pr_err("jprobe handler2 not called\n");
- handler_errors++;
- }
- unregister_jprobes(jps, 2);
-
- return 0;
-}
-#else
-#define test_jprobe() (0)
-#define test_jprobes() (0)
-#endif
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
@@ -383,16 +299,6 @@ int init_test_probes(void)
if (ret < 0)
errors++;
- num_tests++;
- ret = test_jprobe();
- if (ret < 0)
- errors++;
-
- num_tests++;
- ret = test_jprobes();
- if (ret < 0)
- errors++;
-
#ifdef CONFIG_KRETPROBES
num_tests++;
ret = test_kretprobe();
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 639321bf2e39..fa5de5e8de61 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -581,11 +581,11 @@ static void alarm_timer_rearm(struct k_itimer *timr)
* @timr: Pointer to the posixtimer data struct
* @now: Current time to forward the timer against
*/
-static int alarm_timer_forward(struct k_itimer *timr, ktime_t now)
+static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now)
{
struct alarm *alarm = &timr->it.alarm.alarmtimer;
- return (int) alarm_forward(alarm, timr->it_interval, now);
+ return alarm_forward(alarm, timr->it_interval, now);
}
/**
@@ -808,7 +808,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
/* Convert (if necessary) to absolute time */
if (flags != TIMER_ABSTIME) {
ktime_t now = alarm_bases[type].gettime();
- exp = ktime_add(now, exp);
+
+ exp = ktime_add_safe(now, exp);
}
ret = alarmtimer_do_nsleep(&alarm, exp, type);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 16c027e9cc73..8c0e4092f661 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -463,6 +463,12 @@ void clockevents_register_device(struct clock_event_device *dev)
dev->cpumask = cpumask_of(smp_processor_id());
}
+ if (dev->cpumask == cpu_all_mask) {
+ WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n",
+ dev->name);
+ dev->cpumask = cpu_possible_mask;
+ }
+
raw_spin_lock_irqsave(&clockevents_lock, flags);
list_add(&dev->list, &clockevent_devices);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index f89a78e2792b..f74fb00d8064 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -94,6 +94,8 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
/*[Clocksource internal variables]---------
* curr_clocksource:
* currently selected clocksource.
+ * suspend_clocksource:
+ * used to calculate the suspend time.
* clocksource_list:
* linked list with the registered clocksources
* clocksource_mutex:
@@ -102,10 +104,12 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
* Name of the user-specified clocksource.
*/
static struct clocksource *curr_clocksource;
+static struct clocksource *suspend_clocksource;
static LIST_HEAD(clocksource_list);
static DEFINE_MUTEX(clocksource_mutex);
static char override_name[CS_NAME_LEN];
static int finished_booting;
+static u64 suspend_start;
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
static void clocksource_watchdog_work(struct work_struct *work);
@@ -447,6 +451,140 @@ static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
+static bool clocksource_is_suspend(struct clocksource *cs)
+{
+ return cs == suspend_clocksource;
+}
+
+static void __clocksource_suspend_select(struct clocksource *cs)
+{
+ /*
+ * Skip the clocksource which will be stopped in suspend state.
+ */
+ if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
+ return;
+
+ /*
+ * The nonstop clocksource can be selected as the suspend clocksource to
+ * calculate the suspend time, so it should not supply suspend/resume
+ * interfaces to suspend the nonstop clocksource when system suspends.
+ */
+ if (cs->suspend || cs->resume) {
+ pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
+ cs->name);
+ }
+
+ /* Pick the best rating. */
+ if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
+ suspend_clocksource = cs;
+}
+
+/**
+ * clocksource_suspend_select - Select the best clocksource for suspend timing
+ * @fallback: if select a fallback clocksource
+ */
+static void clocksource_suspend_select(bool fallback)
+{
+ struct clocksource *cs, *old_suspend;
+
+ old_suspend = suspend_clocksource;
+ if (fallback)
+ suspend_clocksource = NULL;
+
+ list_for_each_entry(cs, &clocksource_list, list) {
+ /* Skip current if we were requested for a fallback. */
+ if (fallback && cs == old_suspend)
+ continue;
+
+ __clocksource_suspend_select(cs);
+ }
+}
+
+/**
+ * clocksource_start_suspend_timing - Start measuring the suspend timing
+ * @cs: current clocksource from timekeeping
+ * @start_cycles: current cycles from timekeeping
+ *
+ * This function will save the start cycle values of suspend timer to calculate
+ * the suspend time when resuming system.
+ *
+ * This function is called late in the suspend process from timekeeping_suspend(),
+ * that means processes are freezed, non-boot cpus and interrupts are disabled
+ * now. It is therefore possible to start the suspend timer without taking the
+ * clocksource mutex.
+ */
+void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
+{
+ if (!suspend_clocksource)
+ return;
+
+ /*
+ * If current clocksource is the suspend timer, we should use the
+ * tkr_mono.cycle_last value as suspend_start to avoid same reading
+ * from suspend timer.
+ */
+ if (clocksource_is_suspend(cs)) {
+ suspend_start = start_cycles;
+ return;
+ }
+
+ if (suspend_clocksource->enable &&
+ suspend_clocksource->enable(suspend_clocksource)) {
+ pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
+ return;
+ }
+
+ suspend_start = suspend_clocksource->read(suspend_clocksource);
+}
+
+/**
+ * clocksource_stop_suspend_timing - Stop measuring the suspend timing
+ * @cs: current clocksource from timekeeping
+ * @cycle_now: current cycles from timekeeping
+ *
+ * This function will calculate the suspend time from suspend timer.
+ *
+ * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
+ *
+ * This function is called early in the resume process from timekeeping_resume(),
+ * that means there is only one cpu, no processes are running and the interrupts
+ * are disabled. It is therefore possible to stop the suspend timer without
+ * taking the clocksource mutex.
+ */
+u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+{
+ u64 now, delta, nsec = 0;
+
+ if (!suspend_clocksource)
+ return 0;
+
+ /*
+ * If current clocksource is the suspend timer, we should use the
+ * tkr_mono.cycle_last value from timekeeping as current cycle to
+ * avoid same reading from suspend timer.
+ */
+ if (clocksource_is_suspend(cs))
+ now = cycle_now;
+ else
+ now = suspend_clocksource->read(suspend_clocksource);
+
+ if (now > suspend_start) {
+ delta = clocksource_delta(now, suspend_start,
+ suspend_clocksource->mask);
+ nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
+ suspend_clocksource->shift);
+ }
+
+ /*
+ * Disable the suspend timer to save power if current clocksource is
+ * not the suspend timer.
+ */
+ if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
+ suspend_clocksource->disable(suspend_clocksource);
+
+ return nsec;
+}
+
/**
* clocksource_suspend - suspend the clocksource(s)
*/
@@ -792,6 +930,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
clocksource_select();
clocksource_select_watchdog(false);
+ __clocksource_suspend_select(cs);
mutex_unlock(&clocksource_mutex);
return 0;
}
@@ -820,6 +959,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
clocksource_select();
clocksource_select_watchdog(false);
+ clocksource_suspend_select(false);
mutex_unlock(&clocksource_mutex);
}
EXPORT_SYMBOL(clocksource_change_rating);
@@ -845,6 +985,15 @@ static int clocksource_unbind(struct clocksource *cs)
return -EBUSY;
}
+ if (clocksource_is_suspend(cs)) {
+ /*
+ * Select and try to install a replacement suspend clocksource.
+ * If no replacement suspend clocksource, we will just let the
+ * clocksource go and have no suspend clocksource.
+ */
+ clocksource_suspend_select(true);
+ }
+
clocksource_watchdog_lock(&flags);
clocksource_dequeue_watchdog(cs);
list_del_init(&cs->list);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 3e93c54bd3a1..e1a549c9e399 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -718,8 +718,8 @@ static void hrtimer_switch_to_hres(void)
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
if (tick_init_highres()) {
- printk(KERN_WARNING "Could not switch to high resolution "
- "mode on CPU %d\n", base->cpu);
+ pr_warn("Could not switch to high resolution mode on CPU %u\n",
+ base->cpu);
return;
}
base->hres_active = 1;
@@ -1573,8 +1573,7 @@ retry:
else
expires_next = ktime_add(now, delta);
tick_program_event(expires_next, 1);
- printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
- ktime_to_ns(delta));
+ pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
}
/* called with interrupts disabled */
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index a09ded765f6c..c5e0cba3b39c 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -502,7 +502,7 @@ static void sched_sync_hw_clock(struct timespec64 now,
{
struct timespec64 next;
- getnstimeofday64(&next);
+ ktime_get_real_ts64(&next);
if (!fail)
next.tv_sec = 659;
else {
@@ -537,7 +537,7 @@ static void sync_rtc_clock(void)
if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
return;
- getnstimeofday64(&now);
+ ktime_get_real_ts64(&now);
adjust = now;
if (persistent_clock_is_local)
@@ -591,7 +591,7 @@ static bool sync_cmos_clock(void)
* Architectures are strongly encouraged to use rtclib and not
* implement this legacy API.
*/
- getnstimeofday64(&now);
+ ktime_get_real_ts64(&now);
if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
if (persistent_clock_is_local)
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
@@ -642,7 +642,7 @@ void ntp_notify_cmos_timer(void)
/*
* Propagate a new txc->status value into the NTP state:
*/
-static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
+static inline void process_adj_status(const struct timex *txc)
{
if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
time_state = TIME_OK;
@@ -665,12 +665,10 @@ static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
}
-static inline void process_adjtimex_modes(struct timex *txc,
- struct timespec64 *ts,
- s32 *time_tai)
+static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai)
{
if (txc->modes & ADJ_STATUS)
- process_adj_status(txc, ts);
+ process_adj_status(txc);
if (txc->modes & ADJ_NANO)
time_status |= STA_NANO;
@@ -718,7 +716,7 @@ static inline void process_adjtimex_modes(struct timex *txc,
* adjtimex mainly allows reading (and writing, if superuser) of
* kernel time-keeping variables. used by xntpd.
*/
-int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
+int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai)
{
int result;
@@ -735,7 +733,7 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
/* If there are input parameters, then process them: */
if (txc->modes)
- process_adjtimex_modes(txc, ts, time_tai);
+ process_adjtimex_modes(txc, time_tai);
txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
NTP_SCALE_SHIFT);
@@ -1022,12 +1020,11 @@ void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_t
static int __init ntp_tick_adj_setup(char *str)
{
- int rc = kstrtol(str, 0, (long *)&ntp_tick_adj);
-
+ int rc = kstrtos64(str, 0, &ntp_tick_adj);
if (rc)
return rc;
- ntp_tick_adj <<= NTP_SCALE_SHIFT;
+ ntp_tick_adj <<= NTP_SCALE_SHIFT;
return 1;
}
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index 909bd1f1bfb1..c24b0e13f011 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -8,6 +8,6 @@ extern void ntp_clear(void);
extern u64 ntp_tick_length(void);
extern ktime_t ntp_get_next_leap(void);
extern int second_overflow(time64_t secs);
-extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
-extern void __hardpps(const struct timespec64 *, const struct timespec64 *);
+extern int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai);
+extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 9cdf54b04ca8..294d7b65af33 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -85,7 +85,7 @@ static void bump_cpu_timer(struct k_itimer *timer, u64 now)
continue;
timer->it.cpu.expires += incr;
- timer->it_overrun += 1 << i;
+ timer->it_overrun += 1LL << i;
delta -= incr;
}
}
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 26aa9569e24a..2c6847d5d69b 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -81,7 +81,7 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
ktime_get_ts64(tp);
break;
case CLOCK_BOOTTIME:
- get_monotonic_boottime64(tp);
+ ktime_get_boottime_ts64(tp);
break;
default:
return -EINVAL;
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index e08ce3f27447..f23cc46ecf3e 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -86,15 +86,6 @@ static const struct k_clock clock_realtime, clock_monotonic;
#endif
/*
- * parisc wants ENOTSUP instead of EOPNOTSUPP
- */
-#ifndef ENOTSUP
-# define ENANOSLEEP_NOTSUP EOPNOTSUPP
-#else
-# define ENANOSLEEP_NOTSUP ENOTSUP
-#endif
-
-/*
* The timer ID is turned into a timer address by idr_find().
* Verifying a valid ID consists of:
*
@@ -228,21 +219,21 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
*/
static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
{
- getrawmonotonic64(tp);
+ ktime_get_raw_ts64(tp);
return 0;
}
static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
{
- *tp = current_kernel_time64();
+ ktime_get_coarse_real_ts64(tp);
return 0;
}
static int posix_get_monotonic_coarse(clockid_t which_clock,
struct timespec64 *tp)
{
- *tp = get_monotonic_coarse64();
+ ktime_get_coarse_ts64(tp);
return 0;
}
@@ -254,13 +245,13 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *
static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
{
- get_monotonic_boottime64(tp);
+ ktime_get_boottime_ts64(tp);
return 0;
}
static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
{
- timekeeping_clocktai64(tp);
+ ktime_get_clocktai_ts64(tp);
return 0;
}
@@ -283,6 +274,17 @@ static __init int init_posix_timers(void)
}
__initcall(init_posix_timers);
+/*
+ * The siginfo si_overrun field and the return value of timer_getoverrun(2)
+ * are of type int. Clamp the overrun value to INT_MAX
+ */
+static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
+{
+ s64 sum = timr->it_overrun_last + (s64)baseval;
+
+ return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
+}
+
static void common_hrtimer_rearm(struct k_itimer *timr)
{
struct hrtimer *timer = &timr->it.real.timer;
@@ -290,9 +292,8 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
if (!timr->it_interval)
return;
- timr->it_overrun += (unsigned int) hrtimer_forward(timer,
- timer->base->get_time(),
- timr->it_interval);
+ timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
+ timr->it_interval);
hrtimer_restart(timer);
}
@@ -321,10 +322,10 @@ void posixtimer_rearm(struct siginfo *info)
timr->it_active = 1;
timr->it_overrun_last = timr->it_overrun;
- timr->it_overrun = -1;
+ timr->it_overrun = -1LL;
++timr->it_requeue_pending;
- info->si_overrun += timr->it_overrun_last;
+ info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
}
unlock_timer(timr, flags);
@@ -418,9 +419,8 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
now = ktime_add(now, kj);
}
#endif
- timr->it_overrun += (unsigned int)
- hrtimer_forward(timer, now,
- timr->it_interval);
+ timr->it_overrun += hrtimer_forward(timer, now,
+ timr->it_interval);
ret = HRTIMER_RESTART;
++timr->it_requeue_pending;
timr->it_active = 1;
@@ -524,7 +524,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
new_timer->it_id = (timer_t) new_timer_id;
new_timer->it_clock = which_clock;
new_timer->kclock = kc;
- new_timer->it_overrun = -1;
+ new_timer->it_overrun = -1LL;
if (event) {
rcu_read_lock();
@@ -645,11 +645,11 @@ static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
return __hrtimer_expires_remaining_adjusted(timer, now);
}
-static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
+static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
{
struct hrtimer *timer = &timr->it.real.timer;
- return (int)hrtimer_forward(timer, now, timr->it_interval);
+ return hrtimer_forward(timer, now, timr->it_interval);
}
/*
@@ -743,7 +743,7 @@ static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
/* Get the time remaining on a POSIX.1b interval timer. */
SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
- struct itimerspec __user *, setting)
+ struct __kernel_itimerspec __user *, setting)
{
struct itimerspec64 cur_setting;
@@ -755,7 +755,8 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
return ret;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
+
COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
struct compat_itimerspec __user *, setting)
{
@@ -768,6 +769,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
}
return ret;
}
+
#endif
/*
@@ -789,7 +791,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
if (!timr)
return -EINVAL;
- overrun = timr->it_overrun_last;
+ overrun = timer_overrun_to_int(timr, 0);
unlock_timer(timr, flags);
return overrun;
@@ -906,8 +908,8 @@ retry:
/* Set a POSIX.1b interval timer */
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
- const struct itimerspec __user *, new_setting,
- struct itimerspec __user *, old_setting)
+ const struct __kernel_itimerspec __user *, new_setting,
+ struct __kernel_itimerspec __user *, old_setting)
{
struct itimerspec64 new_spec, old_spec;
struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
@@ -927,7 +929,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
return error;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
struct compat_itimerspec __user *, new,
struct compat_itimerspec __user *, old)
@@ -1220,7 +1222,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
if (!kc)
return -EINVAL;
if (!kc->nsleep)
- return -ENANOSLEEP_NOTSUP;
+ return -EOPNOTSUPP;
if (get_timespec64(&t, rqtp))
return -EFAULT;
@@ -1247,7 +1249,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
if (!kc)
return -EINVAL;
if (!kc->nsleep)
- return -ENANOSLEEP_NOTSUP;
+ return -EOPNOTSUPP;
if (compat_get_timespec64(&t, rqtp))
return -EFAULT;
diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h
index 151e28f5bf30..ddb21145211a 100644
--- a/kernel/time/posix-timers.h
+++ b/kernel/time/posix-timers.h
@@ -19,7 +19,7 @@ struct k_clock {
void (*timer_get)(struct k_itimer *timr,
struct itimerspec64 *cur_setting);
void (*timer_rearm)(struct k_itimer *timr);
- int (*timer_forward)(struct k_itimer *timr, ktime_t now);
+ s64 (*timer_forward)(struct k_itimer *timr, ktime_t now);
ktime_t (*timer_remaining)(struct k_itimer *timr, ktime_t now);
int (*timer_try_to_cancel)(struct k_itimer *timr);
void (*timer_arm)(struct k_itimer *timr, ktime_t expires,
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 2d8f05aad442..cbc72c2c1fca 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -237,7 +237,7 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
pr_debug("Registered %pF as sched_clock source\n", read);
}
-void __init sched_clock_postinit(void)
+void __init generic_sched_clock_init(void)
{
/*
* If no sched_clock() function has been provided at that point,
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index 58045eb976c3..a59641fb88b6 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -90,7 +90,7 @@ static struct clock_event_device ce_broadcast_hrtimer = {
.max_delta_ticks = ULONG_MAX,
.mult = 1,
.shift = 0,
- .cpumask = cpu_all_mask,
+ .cpumask = cpu_possible_mask,
};
static enum hrtimer_restart bc_handler(struct hrtimer *t)
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 2b41e8e2d31d..ccdb351277ee 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(sys_tz);
*/
SYSCALL_DEFINE1(time, time_t __user *, tloc)
{
- time_t i = get_seconds();
+ time_t i = (time_t)ktime_get_real_seconds();
if (tloc) {
if (put_user(i,tloc))
@@ -107,11 +107,9 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
/* compat_time_t is a 32 bit "long" and needs to get converted. */
COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
{
- struct timeval tv;
compat_time_t i;
- do_gettimeofday(&tv);
- i = tv.tv_sec;
+ i = (compat_time_t)ktime_get_real_seconds();
if (tloc) {
if (put_user(i,tloc))
@@ -931,7 +929,7 @@ int compat_put_timespec64(const struct timespec64 *ts, void __user *uts)
EXPORT_SYMBOL_GPL(compat_put_timespec64);
int get_itimerspec64(struct itimerspec64 *it,
- const struct itimerspec __user *uit)
+ const struct __kernel_itimerspec __user *uit)
{
int ret;
@@ -946,7 +944,7 @@ int get_itimerspec64(struct itimerspec64 *it,
EXPORT_SYMBOL_GPL(get_itimerspec64);
int put_itimerspec64(const struct itimerspec64 *it,
- struct itimerspec __user *uit)
+ struct __kernel_itimerspec __user *uit)
{
int ret;
@@ -959,3 +957,24 @@ int put_itimerspec64(const struct itimerspec64 *it,
return ret;
}
EXPORT_SYMBOL_GPL(put_itimerspec64);
+
+int get_compat_itimerspec64(struct itimerspec64 *its,
+ const struct compat_itimerspec __user *uits)
+{
+
+ if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
+ __compat_get_timespec64(&its->it_value, &uits->it_value))
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
+
+int put_compat_itimerspec64(const struct itimerspec64 *its,
+ struct compat_itimerspec __user *uits)
+{
+ if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
+ __compat_put_timespec64(&its->it_value, &uits->it_value))
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4786df904c22..f3b22f456fac 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -17,6 +17,7 @@
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
+#include <linux/sched/clock.h>
#include <linux/syscore_ops.h>
#include <linux/clocksource.h>
#include <linux/jiffies.h>
@@ -34,6 +35,14 @@
#define TK_MIRROR (1 << 1)
#define TK_CLOCK_WAS_SET (1 << 2)
+enum timekeeping_adv_mode {
+ /* Update timekeeper when a tick has passed */
+ TK_ADV_TICK,
+
+ /* Update timekeeper on a direct frequency change */
+ TK_ADV_FREQ
+};
+
/*
* The most important data for readout fits into a single 64 byte
* cache line.
@@ -97,7 +106,7 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
}
}
-static inline struct timespec64 tk_xtime(struct timekeeper *tk)
+static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
{
struct timespec64 ts;
@@ -154,7 +163,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
* a read of the fast-timekeeper tkrs (which is protected by its own locking
* and update logic).
*/
-static inline u64 tk_clock_read(struct tk_read_base *tkr)
+static inline u64 tk_clock_read(const struct tk_read_base *tkr)
{
struct clocksource *clock = READ_ONCE(tkr->clock);
@@ -203,7 +212,7 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
}
}
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
{
struct timekeeper *tk = &tk_core.timekeeper;
u64 now, last, mask, max, delta;
@@ -247,7 +256,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
{
}
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
{
u64 cycle_now, delta;
@@ -344,7 +353,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
static inline u32 arch_gettimeoffset(void) { return 0; }
#endif
-static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
+static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
{
u64 nsec;
@@ -355,7 +364,7 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
return nsec + arch_gettimeoffset();
}
-static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
{
u64 delta;
@@ -363,7 +372,7 @@ static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
return timekeeping_delta_to_ns(tkr, delta);
}
-static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
+static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
{
u64 delta;
@@ -386,7 +395,8 @@ static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
* slightly wrong timestamp (a few nanoseconds). See
* @ktime_get_mono_fast_ns.
*/
-static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
+static void update_fast_timekeeper(const struct tk_read_base *tkr,
+ struct tk_fast *tkf)
{
struct tk_read_base *base = tkf->base;
@@ -541,10 +551,10 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
* number of cycles every time until timekeeping is resumed at which time the
* proper readout base for the fast timekeeper will be restored automatically.
*/
-static void halt_fast_timekeeper(struct timekeeper *tk)
+static void halt_fast_timekeeper(const struct timekeeper *tk)
{
static struct tk_read_base tkr_dummy;
- struct tk_read_base *tkr = &tk->tkr_mono;
+ const struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
cycles_at_suspend = tk_clock_read(tkr);
@@ -1269,7 +1279,7 @@ EXPORT_SYMBOL(do_settimeofday64);
*
* Adds or subtracts an offset value from the current time.
*/
-static int timekeeping_inject_offset(struct timespec64 *ts)
+static int timekeeping_inject_offset(const struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
@@ -1496,22 +1506,39 @@ void __weak read_persistent_clock64(struct timespec64 *ts64)
}
/**
- * read_boot_clock64 - Return time of the system start.
+ * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
+ * from the boot.
*
* Weak dummy function for arches that do not yet support it.
- * Function to read the exact time the system has been started.
- * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
- *
- * XXX - Do be sure to remove it once all arches implement it.
+ * wall_time - current time as returned by persistent clock
+ * boot_offset - offset that is defined as wall_time - boot_time
+ * The default function calculates offset based on the current value of
+ * local_clock(). This way architectures that support sched_clock() but don't
+ * support dedicated boot time clock will provide the best estimate of the
+ * boot time.
*/
-void __weak read_boot_clock64(struct timespec64 *ts)
+void __weak __init
+read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
+ struct timespec64 *boot_offset)
{
- ts->tv_sec = 0;
- ts->tv_nsec = 0;
+ read_persistent_clock64(wall_time);
+ *boot_offset = ns_to_timespec64(local_clock());
}
-/* Flag for if timekeeping_resume() has injected sleeptime */
-static bool sleeptime_injected;
+/*
+ * Flag reflecting whether timekeeping_resume() has injected sleeptime.
+ *
+ * The flag starts of false and is only set when a suspend reaches
+ * timekeeping_suspend(), timekeeping_resume() sets it to false when the
+ * timekeeper clocksource is not stopping across suspend and has been
+ * used to update sleep time. If the timekeeper clocksource has stopped
+ * then the flag stays true and is used by the RTC resume code to decide
+ * whether sleeptime must be injected and if so the flag gets false then.
+ *
+ * If a suspend fails before reaching timekeeping_resume() then the flag
+ * stays false and prevents erroneous sleeptime injection.
+ */
+static bool suspend_timing_needed;
/* Flag for if there is a persistent clock on this platform */
static bool persistent_clock_exists;
@@ -1521,28 +1548,29 @@ static bool persistent_clock_exists;
*/
void __init timekeeping_init(void)
{
+ struct timespec64 wall_time, boot_offset, wall_to_mono;
struct timekeeper *tk = &tk_core.timekeeper;
struct clocksource *clock;
unsigned long flags;
- struct timespec64 now, boot, tmp;
-
- read_persistent_clock64(&now);
- if (!timespec64_valid_strict(&now)) {
- pr_warn("WARNING: Persistent clock returned invalid value!\n"
- " Check your CMOS/BIOS settings.\n");
- now.tv_sec = 0;
- now.tv_nsec = 0;
- } else if (now.tv_sec || now.tv_nsec)
- persistent_clock_exists = true;
- read_boot_clock64(&boot);
- if (!timespec64_valid_strict(&boot)) {
- pr_warn("WARNING: Boot clock returned invalid value!\n"
- " Check your CMOS/BIOS settings.\n");
- boot.tv_sec = 0;
- boot.tv_nsec = 0;
+ read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
+ if (timespec64_valid_strict(&wall_time) &&
+ timespec64_to_ns(&wall_time) > 0) {
+ persistent_clock_exists = true;
+ } else if (timespec64_to_ns(&wall_time) != 0) {
+ pr_warn("Persistent clock returned invalid value");
+ wall_time = (struct timespec64){0};
}
+ if (timespec64_compare(&wall_time, &boot_offset) < 0)
+ boot_offset = (struct timespec64){0};
+
+ /*
+ * We want set wall_to_mono, so the following is true:
+ * wall time + wall_to_mono = boot time
+ */
+ wall_to_mono = timespec64_sub(boot_offset, wall_time);
+
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
ntp_init();
@@ -1552,13 +1580,10 @@ void __init timekeeping_init(void)
clock->enable(clock);
tk_setup_internals(tk, clock);
- tk_set_xtime(tk, &now);
+ tk_set_xtime(tk, &wall_time);
tk->raw_sec = 0;
- if (boot.tv_sec == 0 && boot.tv_nsec == 0)
- boot = tk_xtime(tk);
- set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
- tk_set_wall_to_mono(tk, tmp);
+ tk_set_wall_to_mono(tk, wall_to_mono);
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
@@ -1577,7 +1602,7 @@ static struct timespec64 timekeeping_suspend_time;
* adds the sleep offset to the timekeeping variables.
*/
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
- struct timespec64 *delta)
+ const struct timespec64 *delta)
{
if (!timespec64_valid_strict(delta)) {
printk_deferred(KERN_WARNING
@@ -1610,7 +1635,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
*/
bool timekeeping_rtc_skipresume(void)
{
- return sleeptime_injected;
+ return !suspend_timing_needed;
}
/**
@@ -1638,7 +1663,7 @@ bool timekeeping_rtc_skipsuspend(void)
* This function should only be called by rtc_resume(), and allows
* a suspend offset to be injected into the timekeeping values.
*/
-void timekeeping_inject_sleeptime64(struct timespec64 *delta)
+void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
@@ -1646,6 +1671,8 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
+ suspend_timing_needed = false;
+
timekeeping_forward_now(tk);
__timekeeping_inject_sleeptime(tk, delta);
@@ -1669,9 +1696,9 @@ void timekeeping_resume(void)
struct clocksource *clock = tk->tkr_mono.clock;
unsigned long flags;
struct timespec64 ts_new, ts_delta;
- u64 cycle_now;
+ u64 cycle_now, nsec;
+ bool inject_sleeptime = false;
- sleeptime_injected = false;
read_persistent_clock64(&ts_new);
clockevents_resume();
@@ -1693,22 +1720,19 @@ void timekeeping_resume(void)
* usable source. The rtc part is handled separately in rtc core code.
*/
cycle_now = tk_clock_read(&tk->tkr_mono);
- if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
- cycle_now > tk->tkr_mono.cycle_last) {
- u64 nsec, cyc_delta;
-
- cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
- tk->tkr_mono.mask);
- nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
+ nsec = clocksource_stop_suspend_timing(clock, cycle_now);
+ if (nsec > 0) {
ts_delta = ns_to_timespec64(nsec);
- sleeptime_injected = true;
+ inject_sleeptime = true;
} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
- sleeptime_injected = true;
+ inject_sleeptime = true;
}
- if (sleeptime_injected)
+ if (inject_sleeptime) {
+ suspend_timing_needed = false;
__timekeeping_inject_sleeptime(tk, &ts_delta);
+ }
/* Re-base the last cycle value */
tk->tkr_mono.cycle_last = cycle_now;
@@ -1732,6 +1756,8 @@ int timekeeping_suspend(void)
unsigned long flags;
struct timespec64 delta, delta_delta;
static struct timespec64 old_delta;
+ struct clocksource *curr_clock;
+ u64 cycle_now;
read_persistent_clock64(&timekeeping_suspend_time);
@@ -1743,11 +1769,22 @@ int timekeeping_suspend(void)
if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
persistent_clock_exists = true;
+ suspend_timing_needed = true;
+
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk);
timekeeping_suspended = 1;
+ /*
+ * Since we've called forward_now, cycle_last stores the value
+ * just read from the current clocksource. Save this to potentially
+ * use in suspend timing.
+ */
+ curr_clock = tk->tkr_mono.clock;
+ cycle_now = tk->tkr_mono.cycle_last;
+ clocksource_start_suspend_timing(curr_clock, cycle_now);
+
if (persistent_clock_exists) {
/*
* To avoid drift caused by repeated suspend/resumes,
@@ -2021,11 +2058,11 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
return offset;
}
-/**
- * update_wall_time - Uses the current clocksource to increment the wall time
- *
+/*
+ * timekeeping_advance - Updates the timekeeper to the current time and
+ * current NTP tick length
*/
-void update_wall_time(void)
+static void timekeeping_advance(enum timekeeping_adv_mode mode)
{
struct timekeeper *real_tk = &tk_core.timekeeper;
struct timekeeper *tk = &shadow_timekeeper;
@@ -2042,14 +2079,17 @@ void update_wall_time(void)
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
+
+ if (mode != TK_ADV_TICK)
+ goto out;
#else
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
-#endif
/* Check if there's really nothing to do */
- if (offset < real_tk->cycle_interval)
+ if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
goto out;
+#endif
/* Do some additional sanity checking */
timekeeping_check_update(tk, offset);
@@ -2106,6 +2146,15 @@ out:
}
/**
+ * update_wall_time - Uses the current clocksource to increment the wall time
+ *
+ */
+void update_wall_time(void)
+{
+ timekeeping_advance(TK_ADV_TICK);
+}
+
+/**
* getboottime64 - Return the real time of system boot.
* @ts: pointer to the timespec64 to be set
*
@@ -2220,7 +2269,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
/**
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
*/
-static int timekeeping_validate_timex(struct timex *txc)
+static int timekeeping_validate_timex(const struct timex *txc)
{
if (txc->modes & ADJ_ADJTIME) {
/* singleshot must not be used with any other mode bits */
@@ -2310,7 +2359,7 @@ int do_adjtimex(struct timex *txc)
return ret;
}
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
@@ -2327,6 +2376,10 @@ int do_adjtimex(struct timex *txc)
write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+ /* Update the multiplier immediately if frequency was set directly */
+ if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
+ timekeeping_advance(TK_ADV_FREQ);
+
if (tai != orig_tai)
clock_was_set();
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index 0754cadfa9e6..238e4be60229 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -70,7 +70,7 @@ static int __init tk_debug_sleep_time_init(void)
}
late_initcall(tk_debug_sleep_time_init);
-void tk_debug_account_sleep_time(struct timespec64 *t)
+void tk_debug_account_sleep_time(const struct timespec64 *t)
{
/* Cap bin index so we don't overflow the array */
int bin = min(fls(t->tv_sec), NUM_BINS-1);
diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h
index cf5c0828ee31..bcbb52db2256 100644
--- a/kernel/time/timekeeping_internal.h
+++ b/kernel/time/timekeeping_internal.h
@@ -8,7 +8,7 @@
#include <linux/time.h>
#ifdef CONFIG_DEBUG_FS
-extern void tk_debug_account_sleep_time(struct timespec64 *t);
+extern void tk_debug_account_sleep_time(const struct timespec64 *t);
#else
#define tk_debug_account_sleep_time(x)
#endif
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index cc2d23e6ff61..fa49cd753dea 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -581,7 +581,7 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
* wheel:
*/
base->next_expiry = timer->expires;
- wake_up_nohz_cpu(base->cpu);
+ wake_up_nohz_cpu(base->cpu);
}
static void
@@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base)
raw_spin_lock_irq(&base->lock);
+ /*
+ * timer_base::must_forward_clk must be cleared before running
+ * timers so that any timer functions that call mod_timer() will
+ * not try to forward the base. Idle tracking / clock forwarding
+ * logic is only used with BASE_STD timers.
+ *
+ * The must_forward_clk flag is cleared unconditionally also for
+ * the deferrable base. The deferrable base is not affected by idle
+ * tracking and never forwarded, so clearing the flag is a NOOP.
+ *
+ * The fact that the deferrable base is never forwarded can cause
+ * large variations in granularity for deferrable timers, but they
+ * can be deferred for long periods due to idle anyway.
+ */
+ base->must_forward_clk = false;
+
while (time_after_eq(jiffies, base->clk)) {
levels = collect_expired_timers(base, heads);
@@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
- /*
- * must_forward_clk must be cleared before running timers so that any
- * timer functions that call mod_timer will not try to forward the
- * base. idle trcking / clock forwarding logic is only used with
- * BASE_STD timers.
- *
- * The deferrable base does not do idle tracking at all, so we do
- * not forward it. This can result in very large variations in
- * granularity for deferrable timers, but they can be deferred for
- * long periods due to idle.
- */
- base->must_forward_clk = false;
-
__run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
diff --git a/kernel/torture.c b/kernel/torture.c
index 3de1efbecd6a..1ac24a826589 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -20,6 +20,9 @@
* Author: Paul E. McKenney <paulmck@us.ibm.com>
* Based on kernel/rcu/torture.c.
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -53,7 +56,7 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
static char *torture_type;
-static bool verbose;
+static int verbose;
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
@@ -98,7 +101,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
return false;
- if (verbose)
+ if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: offlining %d\n",
torture_type, cpu);
@@ -111,7 +114,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
"torture_onoff task: offline %d failed: errno %d\n",
torture_type, cpu, ret);
} else {
- if (verbose)
+ if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: offlined %d\n",
torture_type, cpu);
@@ -147,7 +150,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
return false;
- if (verbose)
+ if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: onlining %d\n",
torture_type, cpu);
@@ -160,7 +163,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
"torture_onoff task: online %d failed: errno %d\n",
torture_type, cpu, ret);
} else {
- if (verbose)
+ if (verbose > 1)
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: onlined %d\n",
torture_type, cpu);
@@ -647,7 +650,7 @@ static void torture_stutter_cleanup(void)
* The runnable parameter points to a flag that controls whether or not
* the test is currently runnable. If there is no such flag, pass in NULL.
*/
-bool torture_init_begin(char *ttype, bool v)
+bool torture_init_begin(char *ttype, int v)
{
mutex_lock(&fullstop_mutex);
if (torture_type != NULL) {
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index dcc0166d1997..9a27f146fa1c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -521,7 +521,7 @@ config FUNCTION_PROFILER
in debugfs called function_profile_enabled which defaults to zero.
When a 1 is echoed into this file profiling begins, and when a
zero is entered, profiling stops. A "functions" file is created in
- the trace_stats directory; this file shows the list of functions that
+ the trace_stat directory; this file shows the list of functions that
have been hit and their counters.
If in doubt, say N.
@@ -605,7 +605,7 @@ config HIST_TRIGGERS
Inter-event tracing of quantities such as latencies is also
supported using hist triggers under this option.
- See Documentation/trace/histogram.txt.
+ See Documentation/trace/histogram.rst.
If in doubt, say N.
config MMIOTRACE_TEST
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 987d9a9ae283..b951aa1fac61 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -494,6 +494,9 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (!buts->buf_size || !buts->buf_nr)
return -EINVAL;
+ if (!blk_debugfs_root)
+ return -ENOENT;
+
strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
@@ -518,9 +521,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
ret = -ENOENT;
- if (!blk_debugfs_root)
- goto err;
-
dir = debugfs_lookup(buts->name, blk_debugfs_root);
if (!dir)
bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 823687997b01..176debd3481b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -8288,6 +8288,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
tracing_off();
local_irq_save(flags);
+ printk_nmi_direct_enter();
/* Simulate the iterator */
trace_init_global_iter(&iter);
@@ -8367,7 +8368,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
for_each_tracing_cpu(cpu) {
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
- atomic_dec(&dump_running);
+ atomic_dec(&dump_running);
+ printk_nmi_direct_exit();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(ftrace_dump);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 6b71860f3998..e9d99463e5df 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1228,16 +1228,11 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
/*
* We need to check and see if we modified the pc of the
- * pt_regs, and if so clear the kprobe and return 1 so that we
- * don't do the single stepping.
- * The ftrace kprobe handler leaves it up to us to re-enable
- * preemption here before returning if we've modified the ip.
+ * pt_regs, and if so return 1 so that we don't do the
+ * single stepping.
*/
- if (orig_ip != instruction_pointer(regs)) {
- reset_current_kprobe();
- preempt_enable_no_resched();
+ if (orig_ip != instruction_pointer(regs))
return 1;
- }
if (!ret)
return 0;
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 576d18045811..5470dce212c0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -18,18 +18,14 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sysctl.h>
-#include <linux/smpboot.h>
-#include <linux/sched/rt.h>
-#include <uapi/linux/sched/types.h>
#include <linux/tick.h>
-#include <linux/workqueue.h>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>
+#include <linux/stop_machine.h>
#include <asm/irq_regs.h>
#include <linux/kvm_para.h>
-#include <linux/kthread.h>
static DEFINE_MUTEX(watchdog_mutex);
@@ -169,11 +165,10 @@ static void lockup_detector_update_enable(void)
unsigned int __read_mostly softlockup_panic =
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
-static bool softlockup_threads_initialized __read_mostly;
+static bool softlockup_initialized __read_mostly;
static u64 __read_mostly sample_period;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
-static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
@@ -335,6 +330,27 @@ static void watchdog_interrupt_count(void)
__this_cpu_inc(hrtimer_interrupts);
}
+static DEFINE_PER_CPU(struct completion, softlockup_completion);
+static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
+
+/*
+ * The watchdog thread function - touches the timestamp.
+ *
+ * It only runs once every sample_period seconds (4 seconds by
+ * default) to reset the softlockup timestamp. If this gets delayed
+ * for more than 2*watchdog_thresh seconds then the debug-printout
+ * triggers in watchdog_timer_fn().
+ */
+static int softlockup_fn(void *data)
+{
+ __this_cpu_write(soft_lockup_hrtimer_cnt,
+ __this_cpu_read(hrtimer_interrupts));
+ __touch_watchdog();
+ complete(this_cpu_ptr(&softlockup_completion));
+
+ return 0;
+}
+
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
@@ -350,7 +366,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
watchdog_interrupt_count();
/* kick the softlockup detector */
- wake_up_process(__this_cpu_read(softlockup_watchdog));
+ if (completion_done(this_cpu_ptr(&softlockup_completion))) {
+ reinit_completion(this_cpu_ptr(&softlockup_completion));
+ stop_one_cpu_nowait(smp_processor_id(),
+ softlockup_fn, NULL,
+ this_cpu_ptr(&softlockup_stop_work));
+ }
/* .. and repeat */
hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
@@ -448,16 +469,15 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
-static void watchdog_set_prio(unsigned int policy, unsigned int prio)
-{
- struct sched_param param = { .sched_priority = prio };
-
- sched_setscheduler(current, policy, &param);
-}
-
static void watchdog_enable(unsigned int cpu)
{
struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
+ struct completion *done = this_cpu_ptr(&softlockup_completion);
+
+ WARN_ON_ONCE(cpu != smp_processor_id());
+
+ init_completion(done);
+ complete(done);
/*
* Start the timer first to prevent the NMI watchdog triggering
@@ -473,15 +493,14 @@ static void watchdog_enable(unsigned int cpu)
/* Enable the perf event */
if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
watchdog_nmi_enable(cpu);
-
- watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
}
static void watchdog_disable(unsigned int cpu)
{
struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
- watchdog_set_prio(SCHED_NORMAL, 0);
+ WARN_ON_ONCE(cpu != smp_processor_id());
+
/*
* Disable the perf event first. That prevents that a large delay
* between disabling the timer and disabling the perf event causes
@@ -489,79 +508,66 @@ static void watchdog_disable(unsigned int cpu)
*/
watchdog_nmi_disable(cpu);
hrtimer_cancel(hrtimer);
+ wait_for_completion(this_cpu_ptr(&softlockup_completion));
}
-static void watchdog_cleanup(unsigned int cpu, bool online)
+static int softlockup_stop_fn(void *data)
{
- watchdog_disable(cpu);
+ watchdog_disable(smp_processor_id());
+ return 0;
}
-static int watchdog_should_run(unsigned int cpu)
+static void softlockup_stop_all(void)
{
- return __this_cpu_read(hrtimer_interrupts) !=
- __this_cpu_read(soft_lockup_hrtimer_cnt);
+ int cpu;
+
+ if (!softlockup_initialized)
+ return;
+
+ for_each_cpu(cpu, &watchdog_allowed_mask)
+ smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
+
+ cpumask_clear(&watchdog_allowed_mask);
}
-/*
- * The watchdog thread function - touches the timestamp.
- *
- * It only runs once every sample_period seconds (4 seconds by
- * default) to reset the softlockup timestamp. If this gets delayed
- * for more than 2*watchdog_thresh seconds then the debug-printout
- * triggers in watchdog_timer_fn().
- */
-static void watchdog(unsigned int cpu)
+static int softlockup_start_fn(void *data)
{
- __this_cpu_write(soft_lockup_hrtimer_cnt,
- __this_cpu_read(hrtimer_interrupts));
- __touch_watchdog();
+ watchdog_enable(smp_processor_id());
+ return 0;
}
-static struct smp_hotplug_thread watchdog_threads = {
- .store = &softlockup_watchdog,
- .thread_should_run = watchdog_should_run,
- .thread_fn = watchdog,
- .thread_comm = "watchdog/%u",
- .setup = watchdog_enable,
- .cleanup = watchdog_cleanup,
- .park = watchdog_disable,
- .unpark = watchdog_enable,
-};
-
-static void softlockup_update_smpboot_threads(void)
+static void softlockup_start_all(void)
{
- lockdep_assert_held(&watchdog_mutex);
-
- if (!softlockup_threads_initialized)
- return;
+ int cpu;
- smpboot_update_cpumask_percpu_thread(&watchdog_threads,
- &watchdog_allowed_mask);
+ cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
+ for_each_cpu(cpu, &watchdog_allowed_mask)
+ smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
}
-/* Temporarily park all watchdog threads */
-static void softlockup_park_all_threads(void)
+int lockup_detector_online_cpu(unsigned int cpu)
{
- cpumask_clear(&watchdog_allowed_mask);
- softlockup_update_smpboot_threads();
+ watchdog_enable(cpu);
+ return 0;
}
-/* Unpark enabled threads */
-static void softlockup_unpark_threads(void)
+int lockup_detector_offline_cpu(unsigned int cpu)
{
- cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
- softlockup_update_smpboot_threads();
+ watchdog_disable(cpu);
+ return 0;
}
static void lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
- softlockup_park_all_threads();
+
+ softlockup_stop_all();
set_sample_period();
lockup_detector_update_enable();
if (watchdog_enabled && watchdog_thresh)
- softlockup_unpark_threads();
+ softlockup_start_all();
+
watchdog_nmi_start();
cpus_read_unlock();
/*
@@ -580,8 +586,6 @@ static void lockup_detector_reconfigure(void)
*/
static __init void lockup_detector_setup(void)
{
- int ret;
-
/*
* If sysctl is off and watchdog got disabled on the command line,
* nothing to do here.
@@ -592,24 +596,13 @@ static __init void lockup_detector_setup(void)
!(watchdog_enabled && watchdog_thresh))
return;
- ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
- &watchdog_allowed_mask);
- if (ret) {
- pr_err("Failed to initialize soft lockup detector threads\n");
- return;
- }
-
mutex_lock(&watchdog_mutex);
- softlockup_threads_initialized = true;
lockup_detector_reconfigure();
+ softlockup_initialized = true;
mutex_unlock(&watchdog_mutex);
}
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
-static inline int watchdog_park_threads(void) { return 0; }
-static inline void watchdog_unpark_threads(void) { }
-static inline int watchdog_enable_all_cpus(void) { return 0; }
-static inline void watchdog_disable_all_cpus(void) { }
static void lockup_detector_reconfigure(void)
{
cpus_read_lock();
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index e449a23e9d59..1f7020d65d0a 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -175,8 +175,8 @@ static int hardlockup_detector_event_create(void)
evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
watchdog_overflow_callback, NULL);
if (IS_ERR(evt)) {
- pr_info("Perf event create on CPU %d failed with %ld\n", cpu,
- PTR_ERR(evt));
+ pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
+ PTR_ERR(evt));
return PTR_ERR(evt);
}
this_cpu_write(watchdog_ev, evt);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8838d1158d19..c6e73904c5a5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1,3 +1,5 @@
+menu "Kernel hacking"
+
menu "printk and dmesg options"
config PRINTK_TIME
@@ -30,6 +32,17 @@ config CONSOLE_LOGLEVEL_DEFAULT
usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT
option.
+config CONSOLE_LOGLEVEL_QUIET
+ int "quiet console loglevel (1-15)"
+ range 1 15
+ default "4"
+ help
+ loglevel to use when "quiet" is passed on the kernel commandline.
+
+ When "quiet" is passed on the kernel commandline this loglevel
+ will be used as the loglevel. IOW passing "quiet" will be the
+ equivalent of passing "loglevel=<CONSOLE_LOGLEVEL_QUIET>"
+
config MESSAGE_LOGLEVEL_DEFAULT
int "Default message log level (1-7)"
range 1 7
@@ -1193,6 +1206,7 @@ config DEBUG_ATOMIC_SLEEP
bool "Sleep inside atomic section checking"
select PREEMPT_COUNT
depends on DEBUG_KERNEL
+ depends on !ARCH_NO_PREEMPT
help
If you say Y here, various routines which may sleep will become very
noisy if they are called inside atomic sections: when a spinlock is
@@ -1718,7 +1732,7 @@ config KPROBES_SANITY_TEST
default n
help
This option provides for testing basic kprobes functionality on
- boot. A sample kprobe, jprobe and kretprobe are inserted and
+ boot. Samples of kprobe and kretprobe are inserted and
verified for functionality.
Say N if you are unsure.
@@ -1802,6 +1816,13 @@ config TEST_BITMAP
If unsure, say N.
+config TEST_BITFIELD
+ tristate "Test bitfield functions at runtime"
+ help
+ Enable this option to test the bitfield functions at boot.
+
+ If unsure, say N.
+
config TEST_UUID
tristate "Test functions located in the uuid module at runtime"
@@ -2034,3 +2055,7 @@ config IO_STRICT_DEVMEM
if the driver using a given range cannot be disabled.
If in doubt, say Y.
+
+source "arch/$(SRCARCH)/Kconfig.debug"
+
+endmenu # Kernel hacking
diff --git a/lib/Makefile b/lib/Makefile
index 90dc5520b784..d95bb2525101 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -37,7 +37,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
+ percpu-refcount.o rhashtable.o reciprocal_div.o \
once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
@@ -65,6 +65,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
@@ -116,6 +117,7 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_BCH) += bch.o
+CFLAGS_bch.o := $(call cc-option,-Wframe-larger-than=4500)
obj-$(CONFIG_LZO_COMPRESS) += lzo/
obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_LZ4_COMPRESS) += lz4/
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 53c2d5edc826..1d91e31eceec 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new)
}
EXPORT_SYMBOL(atomic64_xchg);
-int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- int ret = 0;
+ long long val;
raw_spin_lock_irqsave(lock, flags);
- if (v->counter != u) {
+ val = v->counter;
+ if (val != u)
v->counter += a;
- ret = 1;
- }
raw_spin_unlock_irqrestore(lock, flags);
- return ret;
+
+ return val;
}
-EXPORT_SYMBOL(atomic64_add_unless);
+EXPORT_SYMBOL(atomic64_fetch_add_unless);
diff --git a/lib/bch.c b/lib/bch.c
index bc89dfe4d1b3..7b0f2006698b 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -78,15 +78,22 @@
#define GF_M(_p) (CONFIG_BCH_CONST_M)
#define GF_T(_p) (CONFIG_BCH_CONST_T)
#define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1)
+#define BCH_MAX_M (CONFIG_BCH_CONST_M)
#else
#define GF_M(_p) ((_p)->m)
#define GF_T(_p) ((_p)->t)
#define GF_N(_p) ((_p)->n)
+#define BCH_MAX_M 15
#endif
+#define BCH_MAX_T (((1 << BCH_MAX_M) - 1) / BCH_MAX_M)
+
#define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
#define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
+#define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
+#define BCH_ECC_MAX_BYTES DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 8)
+
#ifndef dbg
#define dbg(_fmt, args...) do {} while (0)
#endif
@@ -187,7 +194,8 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
const unsigned int l = BCH_ECC_WORDS(bch)-1;
unsigned int i, mlen;
unsigned long m;
- uint32_t w, r[l+1];
+ uint32_t w, r[BCH_ECC_MAX_WORDS];
+ const size_t r_bytes = BCH_ECC_WORDS(bch) * sizeof(*r);
const uint32_t * const tab0 = bch->mod8_tab;
const uint32_t * const tab1 = tab0 + 256*(l+1);
const uint32_t * const tab2 = tab1 + 256*(l+1);
@@ -198,7 +206,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
/* load ecc parity bytes into internal 32-bit buffer */
load_ecc8(bch, bch->ecc_buf, ecc);
} else {
- memset(bch->ecc_buf, 0, sizeof(r));
+ memset(bch->ecc_buf, 0, r_bytes);
}
/* process first unaligned data bytes */
@@ -215,7 +223,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
mlen = len/4;
data += 4*mlen;
len -= 4*mlen;
- memcpy(r, bch->ecc_buf, sizeof(r));
+ memcpy(r, bch->ecc_buf, r_bytes);
/*
* split each 32-bit word into 4 polynomials of weight 8 as follows:
@@ -241,7 +249,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
r[l] = p0[l]^p1[l]^p2[l]^p3[l];
}
- memcpy(bch->ecc_buf, r, sizeof(r));
+ memcpy(bch->ecc_buf, r, r_bytes);
/* process last unaligned bytes */
if (len)
@@ -434,7 +442,7 @@ static int solve_linear_system(struct bch_control *bch, unsigned int *rows,
{
const int m = GF_M(bch);
unsigned int tmp, mask;
- int rem, c, r, p, k, param[m];
+ int rem, c, r, p, k, param[BCH_MAX_M];
k = 0;
mask = 1 << m;
@@ -1114,7 +1122,7 @@ static int build_deg2_base(struct bch_control *bch)
{
const int m = GF_M(bch);
int i, j, r;
- unsigned int sum, x, y, remaining, ak = 0, xi[m];
+ unsigned int sum, x, y, remaining, ak = 0, xi[BCH_MAX_M];
/* find k s.t. Tr(a^k) = 1 and 0 <= k < m */
for (i = 0; i < m; i++) {
@@ -1254,7 +1262,6 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
struct bch_control *bch = NULL;
const int min_m = 5;
- const int max_m = 15;
/* default primitive polynomials */
static const unsigned int prim_poly_tab[] = {
@@ -1270,7 +1277,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
goto fail;
}
#endif
- if ((m < min_m) || (m > max_m))
+ if ((m < min_m) || (m > BCH_MAX_M))
/*
* values of m greater than 15 are not currently supported;
* supporting m > 15 would require changing table base type
diff --git a/lib/crc32.c b/lib/crc32.c
index 2ef20fe84b69..a6c9afafc8c8 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -27,6 +27,7 @@
/* see: Documentation/crc32.txt for a description of algorithms */
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
@@ -184,7 +185,7 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
#if CRC_LE_BITS == 1
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE);
+ return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
@@ -194,7 +195,7 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
+ (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
@@ -268,7 +269,7 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
{
- return crc32_generic_shift(crc, len, CRCPOLY_LE);
+ return crc32_generic_shift(crc, len, CRC32_POLY_LE);
}
u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
@@ -330,13 +331,13 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
#if CRC_LE_BITS == 1
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE);
+ return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
}
#else
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len,
- (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
+ (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
index cb275a28a750..0c8fb5923e7e 100644
--- a/lib/crc32defs.h
+++ b/lib/crc32defs.h
@@ -1,18 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * There are multiple 16-bit CRC polynomials in common use, but this is
- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
- */
-#define CRCPOLY_LE 0xedb88320
-#define CRCPOLY_BE 0x04c11db7
-
-/*
- * This is the CRC32c polynomial, as outlined by Castagnoli.
- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
- * x^8+x^6+x^0
- */
-#define CRC32C_POLY_LE 0x82F63B78
/* Try to choose an implementation variant via Kconfig */
#ifdef CONFIG_CRC32_SLICEBY8
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 994be4805cec..70935ed91125 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
- pr_warn("object is on stack, but not annotated\n");
+ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+ task_stack_page(current));
else
- pr_warn("object is not on stack, but annotated\n");
+ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+ task_stack_page(current));
+
WARN_ON(1);
}
@@ -1185,8 +1188,7 @@ void __init debug_objects_mem_init(void)
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
- if (obj_cache)
- kmem_cache_destroy(obj_cache);
+ kmem_cache_destroy(obj_cache);
pr_warn("out of memory.\n");
} else
debug_objects_selftest();
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 0234361b24b8..7c4932eed748 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -51,6 +51,7 @@
#endif /* STATIC */
#include <linux/decompress/mm.h>
+#include <linux/crc32poly.h>
#ifndef INT_MAX
#define INT_MAX 0x7fffffff
@@ -654,7 +655,7 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
for (i = 0; i < 256; i++) {
c = i << 24;
for (j = 8; j; j--)
- c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
+ c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1);
bd->crc32Table[i] = c;
}
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f26660ea10a..f755b997b967 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
+#include "../include/linux/crc32poly.h"
#include "../include/generated/autoconf.h"
#include "crc32defs.h"
#include <inttypes.h>
@@ -57,7 +58,7 @@ static void crc32init_le_generic(const uint32_t polynomial,
static void crc32init_le(void)
{
- crc32init_le_generic(CRCPOLY_LE, crc32table_le);
+ crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
}
static void crc32cinit_le(void)
@@ -76,7 +77,7 @@ static void crc32init_be(void)
crc32table_be[0][0] = 0;
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
- crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
+ crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
for (j = 0; j < i; j++)
crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
}
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 54e5bbaa3200..517f5853ffed 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
- pmd_free_pte_page(pmd)) {
+ pmd_free_pte_page(pmd, addr)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
- pud_free_pmd_page(pud)) {
+ pud_free_pmd_page(pud, addr)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5d84c5..f6b547812fe3 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
prev = to_klist_node(prev->n_node.prev);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
next = to_klist_node(next->n_node.next);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
diff --git a/lib/kobject.c b/lib/kobject.c
index 18989b5b3b56..389829d3a1d1 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -35,6 +35,25 @@ const void *kobject_namespace(struct kobject *kobj)
return kobj->ktype->namespace(kobj);
}
+/**
+ * kobject_get_ownership - get sysfs ownership data for @kobj
+ * @kobj: kobject in question
+ * @uid: kernel user ID for sysfs objects
+ * @gid: kernel group ID for sysfs objects
+ *
+ * Returns initial uid/gid pair that should be used when creating sysfs
+ * representation of given kobject. Normally used to adjust ownership of
+ * objects in a container.
+ */
+void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+
+ if (kobj->ktype->get_ownership)
+ kobj->ktype->get_ownership(kobj, uid, gid);
+}
+
/*
* populate_dir - populate directory with attributes.
* @kobj: object we're working on.
@@ -868,9 +887,16 @@ static void kset_release(struct kobject *kobj)
kfree(kset);
}
+void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ if (kobj->parent)
+ kobject_get_ownership(kobj->parent, uid, gid);
+}
+
static struct kobj_type kset_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = kset_release,
+ .release = kset_release,
+ .get_ownership = kset_get_ownership,
};
/**
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index b5c1293ce147..1e1bbf171eca 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -29,7 +29,7 @@
*/
static unsigned int debug_locks_verbose;
-static DEFINE_WW_CLASS(ww_lockdep);
+static DEFINE_WD_CLASS(ww_lockdep);
static int __init setup_debug_locks_verbose(char *str)
{
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 468fb7cd1221..a5c921e6d667 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -41,7 +41,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
mpi_ptr_t tspace = NULL;
mpi_ptr_t rp, ep, mp, bp;
mpi_size_t esize, msize, bsize, rsize;
- int esign, msign, bsign, rsign;
+ int msign, bsign, rsign;
mpi_size_t size;
int mod_shift_cnt;
int negative_result;
@@ -53,7 +53,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
esize = exp->nlimbs;
msize = mod->nlimbs;
size = 2 * msize;
- esign = exp->sign;
msign = mod->sign;
rp = res->d;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index dfa55c873c13..e335bcafa9e4 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -253,8 +253,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
if (policy) {
err = validate_nla(nla, maxtype, policy);
if (err < 0) {
- if (extack)
- extack->bad_attr = nla;
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Attribute failed policy validation");
goto errout;
}
}
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 61a6b5aab07e..15ca78e1c7d4 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
- static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- arch_spin_lock(&lock);
if (regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
@@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
- arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
deleted file mode 100644
index beb14839b41a..000000000000
--- a/lib/percpu_ida.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Percpu IDA library
- *
- * Copyright (C) 2013 Datera, Inc. Kent Overstreet
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/bug.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/sched/signal.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/percpu_ida.h>
-
-struct percpu_ida_cpu {
- /*
- * Even though this is percpu, we need a lock for tag stealing by remote
- * CPUs:
- */
- spinlock_t lock;
-
- /* nr_free/freelist form a stack of free IDs */
- unsigned nr_free;
- unsigned freelist[];
-};
-
-static inline void move_tags(unsigned *dst, unsigned *dst_nr,
- unsigned *src, unsigned *src_nr,
- unsigned nr)
-{
- *src_nr -= nr;
- memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
- *dst_nr += nr;
-}
-
-/*
- * Try to steal tags from a remote cpu's percpu freelist.
- *
- * We first check how many percpu freelists have tags
- *
- * Then we iterate through the cpus until we find some tags - we don't attempt
- * to find the "best" cpu to steal from, to keep cacheline bouncing to a
- * minimum.
- */
-static inline void steal_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
- struct percpu_ida_cpu *remote;
-
- for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags; cpus_have_tags--) {
- cpu = cpumask_next(cpu, &pool->cpus_have_tags);
-
- if (cpu >= nr_cpu_ids) {
- cpu = cpumask_first(&pool->cpus_have_tags);
- if (cpu >= nr_cpu_ids)
- BUG();
- }
-
- pool->cpu_last_stolen = cpu;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
-
- cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
-
- if (remote == tags)
- continue;
-
- spin_lock(&remote->lock);
-
- if (remote->nr_free) {
- memcpy(tags->freelist,
- remote->freelist,
- sizeof(unsigned) * remote->nr_free);
-
- tags->nr_free = remote->nr_free;
- remote->nr_free = 0;
- }
-
- spin_unlock(&remote->lock);
-
- if (tags->nr_free)
- break;
- }
-}
-
-/*
- * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
- * our percpu freelist:
- */
-static inline void alloc_global_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- move_tags(tags->freelist, &tags->nr_free,
- pool->freelist, &pool->nr_free,
- min(pool->nr_free, pool->percpu_batch_size));
-}
-
-/**
- * percpu_ida_alloc - allocate a tag
- * @pool: pool to allocate from
- * @state: task state for prepare_to_wait
- *
- * Returns a tag - an integer in the range [0..nr_tags) (passed to
- * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
- *
- * Safe to be called from interrupt context (assuming it isn't passed
- * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
- *
- * @gfp indicates whether or not to wait until a free id is available (it's not
- * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
- * however long it takes until another thread frees an id (same semantics as a
- * mempool).
- *
- * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
- */
-int percpu_ida_alloc(struct percpu_ida *pool, int state)
-{
- DEFINE_WAIT(wait);
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- int tag = -ENOSPC;
-
- tags = raw_cpu_ptr(pool->tag_cpu);
- spin_lock_irqsave(&tags->lock, flags);
-
- /* Fastpath */
- if (likely(tags->nr_free)) {
- tag = tags->freelist[--tags->nr_free];
- spin_unlock_irqrestore(&tags->lock, flags);
- return tag;
- }
- spin_unlock_irqrestore(&tags->lock, flags);
-
- while (1) {
- spin_lock_irqsave(&pool->lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- /*
- * prepare_to_wait() must come before steal_tags(), in case
- * percpu_ida_free() on another cpu flips a bit in
- * cpus_have_tags
- *
- * global lock held and irqs disabled, don't need percpu lock
- */
- if (state != TASK_RUNNING)
- prepare_to_wait(&pool->wait, &wait, state);
-
- if (!tags->nr_free)
- alloc_global_tags(pool, tags);
- if (!tags->nr_free)
- steal_tags(pool, tags);
-
- if (tags->nr_free) {
- tag = tags->freelist[--tags->nr_free];
- if (tags->nr_free)
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- }
-
- spin_unlock_irqrestore(&pool->lock, flags);
-
- if (tag >= 0 || state == TASK_RUNNING)
- break;
-
- if (signal_pending_state(state, current)) {
- tag = -ERESTARTSYS;
- break;
- }
-
- schedule();
- }
- if (state != TASK_RUNNING)
- finish_wait(&pool->wait, &wait);
-
- return tag;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_alloc);
-
-/**
- * percpu_ida_free - free a tag
- * @pool: pool @tag was allocated from
- * @tag: a tag previously allocated with percpu_ida_alloc()
- *
- * Safe to be called from interrupt context.
- */
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
-{
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- unsigned nr_free;
-
- BUG_ON(tag >= pool->nr_tags);
-
- tags = raw_cpu_ptr(pool->tag_cpu);
-
- spin_lock_irqsave(&tags->lock, flags);
- tags->freelist[tags->nr_free++] = tag;
-
- nr_free = tags->nr_free;
-
- if (nr_free == 1) {
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- wake_up(&pool->wait);
- }
- spin_unlock_irqrestore(&tags->lock, flags);
-
- if (nr_free == pool->percpu_max_size) {
- spin_lock_irqsave(&pool->lock, flags);
- spin_lock(&tags->lock);
-
- if (tags->nr_free == pool->percpu_max_size) {
- move_tags(pool->freelist, &pool->nr_free,
- tags->freelist, &tags->nr_free,
- pool->percpu_batch_size);
-
- wake_up(&pool->wait);
- }
- spin_unlock(&tags->lock);
- spin_unlock_irqrestore(&pool->lock, flags);
- }
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free);
-
-/**
- * percpu_ida_destroy - release a tag pool's resources
- * @pool: pool to free
- *
- * Frees the resources allocated by percpu_ida_init().
- */
-void percpu_ida_destroy(struct percpu_ida *pool)
-{
- free_percpu(pool->tag_cpu);
- free_pages((unsigned long) pool->freelist,
- get_order(pool->nr_tags * sizeof(unsigned)));
-}
-EXPORT_SYMBOL_GPL(percpu_ida_destroy);
-
-/**
- * percpu_ida_init - initialize a percpu tag pool
- * @pool: pool to initialize
- * @nr_tags: number of tags that will be available for allocation
- *
- * Initializes @pool so that it can be used to allocate tags - integers in the
- * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
- * preallocated array of tag structures.
- *
- * Allocation is percpu, but sharding is limited by nr_tags - for best
- * performance, the workload should not span more cpus than nr_tags / 128.
- */
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size)
-{
- unsigned i, cpu, order;
-
- memset(pool, 0, sizeof(*pool));
-
- init_waitqueue_head(&pool->wait);
- spin_lock_init(&pool->lock);
- pool->nr_tags = nr_tags;
- pool->percpu_max_size = max_size;
- pool->percpu_batch_size = batch_size;
-
- /* Guard against overflow */
- if (nr_tags > (unsigned) INT_MAX + 1) {
- pr_err("percpu_ida_init(): nr_tags too large\n");
- return -EINVAL;
- }
-
- order = get_order(nr_tags * sizeof(unsigned));
- pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
- if (!pool->freelist)
- return -ENOMEM;
-
- for (i = 0; i < nr_tags; i++)
- pool->freelist[i] = i;
-
- pool->nr_free = nr_tags;
-
- pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
- pool->percpu_max_size * sizeof(unsigned),
- sizeof(unsigned));
- if (!pool->tag_cpu)
- goto err;
-
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
-
- return 0;
-err:
- percpu_ida_destroy(pool);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(__percpu_ida_init);
-
-/**
- * percpu_ida_for_each_free - iterate free ids of a pool
- * @pool: pool to iterate
- * @fn: interate callback function
- * @data: parameter for @fn
- *
- * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
- * ids might be missed, some might be iterated duplicated, and some might
- * be iterated and not free soon.
- */
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data)
-{
- unsigned long flags;
- struct percpu_ida_cpu *remote;
- unsigned cpu, i, err = 0;
-
- for_each_possible_cpu(cpu) {
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock_irqsave(&remote->lock, flags);
- for (i = 0; i < remote->nr_free; i++) {
- err = fn(remote->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&remote->lock, flags);
- if (err)
- goto out;
- }
-
- spin_lock_irqsave(&pool->lock, flags);
- for (i = 0; i < pool->nr_free; i++) {
- err = fn(pool->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-
-/**
- * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
- * @pool: pool related
- * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
- *
- * Note: this just returns a snapshot of free tags number.
- */
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
-{
- struct percpu_ida_cpu *remote;
- if (cpu == nr_cpu_ids)
- return pool->nr_free;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- return remote->nr_free;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 140fa8bb5c23..914ebe98fc21 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -55,22 +55,24 @@ static inline void XOR(int x, int y, int z)
asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
}
-static inline void LOAD_DATA(int x, int n, u8 *ptr)
+static inline void LOAD_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VLM %2,%3,0,%r1"
- : : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : : "m" (*__ptr), "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
-static inline void STORE_DATA(int x, int n, u8 *ptr)
+static inline void STORE_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VSTM %2,%3,0,1"
- : "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : "=m" (*__ptr) : "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
static inline void COPY_VEC(int x, int y)
@@ -93,19 +95,19 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
q = dptr[z0 + 2]; /* RS syndrome */
for (d = 0; d < bytes; d += $#*NSIZE) {
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= 0; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
- STORE_DATA(0,$#,&p[d]);
- STORE_DATA(8,$#,&q[d]);
+ STORE_DATA(0,&p[d]);
+ STORE_DATA(8,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
@@ -127,14 +129,14 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
for (d = 0; d < bytes; d += $#*NSIZE) {
/* P/Q data pages */
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= start; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
@@ -145,12 +147,12 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
}
- LOAD_DATA(16,$#,&p[d]);
+ LOAD_DATA(16,&p[d]);
XOR(16+$$,16+$$,0+$$);
- STORE_DATA(16,$#,&p[d]);
- LOAD_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&p[d]);
+ LOAD_DATA(16,&q[d]);
XOR(16+$$,16+$$,8+$$);
- STORE_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
index fcb4ce682c6f..bf043258fa00 100644
--- a/lib/reciprocal_div.c
+++ b/lib/reciprocal_div.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/div64.h>
#include <linux/reciprocal_div.h>
@@ -26,3 +27,43 @@ struct reciprocal_value reciprocal_value(u32 d)
return R;
}
EXPORT_SYMBOL(reciprocal_value);
+
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
+{
+ struct reciprocal_value_adv R;
+ u32 l, post_shift;
+ u64 mhigh, mlow;
+
+ /* ceil(log2(d)) */
+ l = fls(d - 1);
+ /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
+ * be handled before calling "reciprocal_value_adv", please see the
+ * comment at include/linux/reciprocal_div.h.
+ */
+ WARN(l == 32,
+ "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
+ d, __func__);
+ post_shift = l;
+ mlow = 1ULL << (32 + l);
+ do_div(mlow, d);
+ mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
+ do_div(mhigh, d);
+
+ for (; post_shift > 0; post_shift--) {
+ u64 lo = mlow >> 1, hi = mhigh >> 1;
+
+ if (lo >= hi)
+ break;
+
+ mlow = lo;
+ mhigh = hi;
+ }
+
+ R.m = (u32)mhigh;
+ R.sh = post_shift;
+ R.exp = l;
+ R.is_wide_m = mhigh > U32_MAX;
+
+ return R;
+}
+EXPORT_SYMBOL(reciprocal_value_adv);
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index d8bb1a1eba72..e5fdc8b9e856 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -283,7 +283,7 @@ out:
* in index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
- * @gfp: GFP_ flags for allocations
+ * @gfp: Memory allocation flags.
*/
struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim,
int nroots, gfp_t gfp)
diff --git a/lib/refcount.c b/lib/refcount.c
index d3b81cefce91..ebcf8cd49e05 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -35,13 +35,13 @@
*
*/
+#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/spinlock.h>
#include <linux/bug.h>
-#ifdef CONFIG_REFCOUNT_FULL
-
/**
- * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -58,7 +58,7 @@
*
* Return: false if the passed refcount is 0, true otherwise
*/
-bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -79,10 +79,10 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_add_not_zero);
+EXPORT_SYMBOL(refcount_add_not_zero_checked);
/**
- * refcount_add - add a value to a refcount
+ * refcount_add_checked - add a value to a refcount
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(refcount_add_not_zero);
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*/
-void refcount_add(unsigned int i, refcount_t *r)
+void refcount_add_checked(unsigned int i, refcount_t *r)
{
- WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_add);
+EXPORT_SYMBOL(refcount_add_checked);
/**
- * refcount_inc_not_zero - increment a refcount unless it is 0
+ * refcount_inc_not_zero_checked - increment a refcount unless it is 0
* @r: the refcount to increment
*
* Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(refcount_add);
*
* Return: true if the increment was successful, false otherwise
*/
-bool refcount_inc_not_zero(refcount_t *r)
+bool refcount_inc_not_zero_checked(refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -134,10 +134,10 @@ bool refcount_inc_not_zero(refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_inc_not_zero);
+EXPORT_SYMBOL(refcount_inc_not_zero_checked);
/**
- * refcount_inc - increment a refcount
+ * refcount_inc_checked - increment a refcount
* @r: the refcount to increment
*
* Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
@@ -148,14 +148,14 @@ EXPORT_SYMBOL(refcount_inc_not_zero);
* Will WARN if the refcount is 0, as this represents a possible use-after-free
* condition.
*/
-void refcount_inc(refcount_t *r)
+void refcount_inc_checked(refcount_t *r)
{
- WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_inc);
+EXPORT_SYMBOL(refcount_inc_checked);
/**
- * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount
* @r: the refcount
*
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(refcount_inc);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -192,10 +192,10 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
return !new;
}
-EXPORT_SYMBOL(refcount_sub_and_test);
+EXPORT_SYMBOL(refcount_sub_and_test_checked);
/**
- * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
* @r: the refcount
*
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
@@ -207,14 +207,14 @@ EXPORT_SYMBOL(refcount_sub_and_test);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_dec_and_test(refcount_t *r)
+bool refcount_dec_and_test_checked(refcount_t *r)
{
- return refcount_sub_and_test(1, r);
+ return refcount_sub_and_test_checked(1, r);
}
-EXPORT_SYMBOL(refcount_dec_and_test);
+EXPORT_SYMBOL(refcount_dec_and_test_checked);
/**
- * refcount_dec - decrement a refcount
+ * refcount_dec_checked - decrement a refcount
* @r: the refcount
*
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
@@ -223,12 +223,11 @@ EXPORT_SYMBOL(refcount_dec_and_test);
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
-void refcount_dec(refcount_t *r)
+void refcount_dec_checked(refcount_t *r)
{
- WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+ WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
-EXPORT_SYMBOL(refcount_dec);
-#endif /* CONFIG_REFCOUNT_FULL */
+EXPORT_SYMBOL(refcount_dec_checked);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index e5c8586cf717..ae4223e0f5bc 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -28,6 +28,7 @@
#include <linux/rhashtable.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/rhashtable.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4U
@@ -115,8 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev,
- unsigned int shifted,
- unsigned int nhash)
+ bool leaf)
{
union nested_table *ntbl;
int i;
@@ -127,10 +127,9 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
- if (ntbl && shifted) {
- for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
- INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
- (i << shifted) | nhash);
+ if (ntbl && leaf) {
+ for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
+ INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
rcu_assign_pointer(*prev, ntbl);
@@ -156,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
- 0, 0)) {
+ false)) {
kfree(tbl);
return NULL;
}
@@ -206,7 +205,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
- INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+ INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
return tbl;
}
@@ -227,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct bucket_table *new_tbl = rhashtable_last_table(ht,
- rht_dereference_rcu(old_tbl->future_tbl, ht));
+ struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
int err = -EAGAIN;
struct rhash_head *head, *next, *entry;
@@ -298,21 +296,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
struct bucket_table *old_tbl,
struct bucket_table *new_tbl)
{
- /* Protect future_tbl using the first bucket lock. */
- spin_lock_bh(old_tbl->locks);
-
- /* Did somebody beat us to it? */
- if (rcu_access_pointer(old_tbl->future_tbl)) {
- spin_unlock_bh(old_tbl->locks);
- return -EEXIST;
- }
-
/* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize.
+ * As cmpxchg() provides strong barriers, we do not need
+ * rcu_assign_pointer().
*/
- rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
- spin_unlock_bh(old_tbl->locks);
+ if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
+ return -EEXIST;
return 0;
}
@@ -475,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
fail:
/* Do not fail the insert if someone else did a rehash. */
- if (likely(rcu_dereference_raw(tbl->future_tbl)))
+ if (likely(rcu_access_pointer(tbl->future_tbl)))
return 0;
/* Schedule async rehash to retry allocation in process context. */
@@ -548,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
return ERR_CAST(data);
- new_tbl = rcu_dereference(tbl->future_tbl);
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (new_tbl)
return new_tbl;
@@ -607,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
break;
spin_unlock_bh(lock);
- tbl = rcu_dereference(tbl->future_tbl);
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
}
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
@@ -1002,7 +993,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .key_offset = offsetof(struct test_obj, key),
* .key_len = sizeof(int),
* .hashfn = jhash,
- * .nulls_base = (1U << RHT_BASE_SHIFT),
* };
*
* Configuration Example 2: Variable length keys
@@ -1034,9 +1024,6 @@ int rhashtable_init(struct rhashtable *ht,
(params->obj_hashfn && !params->obj_cmpfn))
return -EINVAL;
- if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
- return -EINVAL;
-
memset(ht, 0, sizeof(*ht));
mutex_init(&ht->mutex);
spin_lock_init(&ht->lock);
@@ -1100,10 +1087,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;
- /* No rhlist NULLs marking for now. */
- if (params->nulls_base)
- return -EINVAL;
-
err = rhashtable_init(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
@@ -1227,25 +1210,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
- unsigned int shifted;
- unsigned int nhash;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
hash >>= tbl->nest;
- nhash = index;
- shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0, nhash);
+ size <= (1 << shift));
while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1);
size >>= shift;
hash >>= shift;
- nhash |= index << shifted;
- shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0,
- nhash);
+ size <= (1 << shift));
}
if (!ntbl)
diff --git a/lib/test_bitfield.c b/lib/test_bitfield.c
new file mode 100644
index 000000000000..5b8f4108662d
--- /dev/null
+++ b/lib/test_bitfield.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for bitfield helpers.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+
+#define CHECK_ENC_GET_U(tp, v, field, res) do { \
+ { \
+ u##tp _res; \
+ \
+ _res = u##tp##_encode_bits(v, field); \
+ if (_res != res) { \
+ pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\
+ (u64)_res); \
+ return -EINVAL; \
+ } \
+ if (u##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_LE(tp, v, field, res) do { \
+ { \
+ __le##tp _res; \
+ \
+ _res = le##tp##_encode_bits(v, field); \
+ if (_res != cpu_to_le##tp(res)) { \
+ pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
+ (u64)le##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ return -EINVAL; \
+ } \
+ if (le##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_BE(tp, v, field, res) do { \
+ { \
+ __be##tp _res; \
+ \
+ _res = be##tp##_encode_bits(v, field); \
+ if (_res != cpu_to_be##tp(res)) { \
+ pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
+ (u64)be##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ return -EINVAL; \
+ } \
+ if (be##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET(tp, v, field, res) do { \
+ CHECK_ENC_GET_U(tp, v, field, res); \
+ CHECK_ENC_GET_LE(tp, v, field, res); \
+ CHECK_ENC_GET_BE(tp, v, field, res); \
+ } while (0)
+
+static int test_constants(void)
+{
+ /*
+ * NOTE
+ * This whole function compiles (or at least should, if everything
+ * is going according to plan) to nothing after optimisation.
+ */
+
+ CHECK_ENC_GET(16, 1, 0x000f, 0x0001);
+ CHECK_ENC_GET(16, 3, 0x00f0, 0x0030);
+ CHECK_ENC_GET(16, 5, 0x0f00, 0x0500);
+ CHECK_ENC_GET(16, 7, 0xf000, 0x7000);
+ CHECK_ENC_GET(16, 14, 0x000f, 0x000e);
+ CHECK_ENC_GET(16, 15, 0x00f0, 0x00f0);
+
+ CHECK_ENC_GET_U(8, 1, 0x0f, 0x01);
+ CHECK_ENC_GET_U(8, 3, 0xf0, 0x30);
+ CHECK_ENC_GET_U(8, 14, 0x0f, 0x0e);
+ CHECK_ENC_GET_U(8, 15, 0xf0, 0xf0);
+
+ CHECK_ENC_GET(32, 1, 0x00000f00, 0x00000100);
+ CHECK_ENC_GET(32, 3, 0x0000f000, 0x00003000);
+ CHECK_ENC_GET(32, 5, 0x000f0000, 0x00050000);
+ CHECK_ENC_GET(32, 7, 0x00f00000, 0x00700000);
+ CHECK_ENC_GET(32, 14, 0x0f000000, 0x0e000000);
+ CHECK_ENC_GET(32, 15, 0xf0000000, 0xf0000000);
+
+ CHECK_ENC_GET(64, 1, 0x00000f0000000000ull, 0x0000010000000000ull);
+ CHECK_ENC_GET(64, 3, 0x0000f00000000000ull, 0x0000300000000000ull);
+ CHECK_ENC_GET(64, 5, 0x000f000000000000ull, 0x0005000000000000ull);
+ CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
+ CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
+ CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
+
+ return 0;
+}
+
+#define CHECK(tp, mask) do { \
+ u64 v; \
+ \
+ for (v = 0; v < 1 << hweight32(mask); v++) \
+ if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \
+ return -EINVAL; \
+ } while (0)
+
+static int test_variables(void)
+{
+ CHECK(u8, 0x0f);
+ CHECK(u8, 0xf0);
+ CHECK(u8, 0x38);
+
+ CHECK(u16, 0x0038);
+ CHECK(u16, 0x0380);
+ CHECK(u16, 0x3800);
+ CHECK(u16, 0x8000);
+
+ CHECK(u32, 0x80000000);
+ CHECK(u32, 0x7f000000);
+ CHECK(u32, 0x07e00000);
+ CHECK(u32, 0x00018000);
+
+ CHECK(u64, 0x8000000000000000ull);
+ CHECK(u64, 0x7f00000000000000ull);
+ CHECK(u64, 0x0001800000000000ull);
+ CHECK(u64, 0x0000000080000000ull);
+ CHECK(u64, 0x000000007f000000ull);
+ CHECK(u64, 0x0000000018000000ull);
+ CHECK(u64, 0x0000001f8000000ull);
+
+ return 0;
+}
+
+static int __init test_bitfields(void)
+{
+ int ret = test_constants();
+
+ if (ret) {
+ pr_warn("constant tests failed!\n");
+ return ret;
+ }
+
+ ret = test_variables();
+ if (ret) {
+ pr_warn("variable tests failed!\n");
+ return ret;
+ }
+
+#ifdef TEST_BITFIELD_COMPILE
+ /* these should fail compilation */
+ CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
+ u32_encode_bits(7, 0x06000000);
+
+ /* this should at least give a warning */
+ u16_encode_bits(0, 0x60000);
+#endif
+
+ pr_info("tests passed\n");
+
+ return 0;
+}
+module_init(test_bitfields)
+
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index cea592f402ed..53527ea822b5 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -206,6 +206,7 @@ test_string(void)
#define PTR_WIDTH 16
#define PTR ((void *)0xffff0123456789abUL)
#define PTR_STR "ffff0123456789ab"
+#define PTR_VAL_NO_CRNG "(____ptrval____)"
#define ZEROS "00000000" /* hex 32 zero bits */
static int __init
@@ -216,7 +217,16 @@ plain_format(void)
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
- if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
return -1;
return 0;
@@ -227,6 +237,7 @@ plain_format(void)
#define PTR_WIDTH 8
#define PTR ((void *)0x456789ab)
#define PTR_STR "456789ab"
+#define PTR_VAL_NO_CRNG "(ptrval)"
static int __init
plain_format(void)
@@ -245,7 +256,16 @@ plain_hash(void)
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
- if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
return -1;
return 0;
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index fb6968109113..82ac39ce5310 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -83,7 +83,7 @@ static u32 my_hashfn(const void *data, u32 len, u32 seed)
{
const struct test_obj_rhl *obj = data;
- return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
+ return (obj->value.id % 10);
}
static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
@@ -99,7 +99,6 @@ static struct rhashtable_params test_rht_params = {
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(struct test_obj_val),
.hashfn = jhash,
- .nulls_base = (3U << RHT_BASE_SHIFT),
};
static struct rhashtable_params test_rht_params_dup = {
@@ -296,8 +295,6 @@ static int __init test_rhltable(unsigned int entries)
if (!obj_in_table)
goto out_free;
- /* nulls_base not supported in rhlist interface */
- test_rht_params.nulls_base = 0;
err = rhltable_init(&rhlt, &test_rht_params);
if (WARN_ON(err))
goto out_free;
@@ -501,6 +498,8 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
unsigned int i, cnt = 0;
ht = &rhlt->ht;
+ /* Take the mutex to avoid RCU warning */
+ mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
@@ -534,6 +533,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
}
}
printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
+ mutex_unlock(&ht->mutex);
return cnt;
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index a48aaa79d352..d5b3a3f95c01 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1651,6 +1651,17 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
+/* Make pointers available for printing early in the boot sequence. */
+static int debug_boot_weak_hash __ro_after_init;
+
+static int __init debug_boot_weak_hash_enable(char *str)
+{
+ debug_boot_weak_hash = 1;
+ pr_info("debug_boot_weak_hash enabled\n");
+ return 0;
+}
+early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
+
static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
static siphash_key_t ptr_key __read_mostly;
@@ -1675,8 +1686,16 @@ static struct random_ready_callback random_ready = {
static int __init initialize_ptr_random(void)
{
- int ret = add_random_ready_callback(&random_ready);
+ int key_size = sizeof(ptr_key);
+ int ret;
+ /* Use hw RNG if available. */
+ if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
+ static_branch_disable(&not_filled_random_ptr_key);
+ return 0;
+ }
+
+ ret = add_random_ready_callback(&random_ready);
if (!ret) {
return 0;
} else if (ret == -EALREADY) {
@@ -1695,6 +1714,12 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
+ /* When debugging early boot use non-cryptographically secure hash. */
+ if (unlikely(debug_boot_weak_hash)) {
+ hashval = hash_long((unsigned long)ptr, 32);
+ return pointer_string(buf, end, (const void *)hashval, spec);
+ }
+
if (static_branch_unlikely(&not_filled_random_ptr_key)) {
spec.field_width = 2 * sizeof(ptr);
/* string length must be less than default_width */
@@ -1942,6 +1967,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'F':
return device_node_string(buf, end, ptr, spec, fmt + 1);
}
+ break;
case 'x':
return pointer_string(buf, end, ptr, spec);
}
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 34532d14fd4c..25a5d87e2e4c 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -15,6 +15,7 @@
* but they are bigger and use more memory for the lookup table.
*/
+#include <linux/crc32poly.h>
#include "xz_private.h"
/*
@@ -29,7 +30,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
XZ_EXTERN void xz_crc32_init(void)
{
- const uint32_t poly = 0xEDB88320;
+ const uint32_t poly = CRC32_POLY_LE;
uint32_t i;
uint32_t j;
diff --git a/mm/Kconfig b/mm/Kconfig
index ce95491abd6a..9ae1b6a8e30f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1,3 +1,6 @@
+
+menu "Memory Management options"
+
config SELECT_MEMORY_MODEL
def_bool y
depends on ARCH_SELECT_MEMORY_MODEL
@@ -762,3 +765,5 @@ config GUP_BENCHMARK
config ARCH_HAS_PTE_SPECIAL
bool
+
+endmenu
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 9e197987b67d..97db0e8e362b 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -21,6 +21,53 @@
#include "internal.h"
+/**
+ * DOC: bootmem overview
+ *
+ * Bootmem is a boot-time physical memory allocator and configurator.
+ *
+ * It is used early in the boot process before the page allocator is
+ * set up.
+ *
+ * Bootmem is based on the most basic of allocators, a First Fit
+ * allocator which uses a bitmap to represent memory. If a bit is 1,
+ * the page is allocated and 0 if unallocated. To satisfy allocations
+ * of sizes smaller than a page, the allocator records the Page Frame
+ * Number (PFN) of the last allocation and the offset the allocation
+ * ended at. Subsequent small allocations are merged together and
+ * stored on the same page.
+ *
+ * The information used by the bootmem allocator is represented by
+ * :c:type:`struct bootmem_data`. An array to hold up to %MAX_NUMNODES
+ * such structures is statically allocated and then it is discarded
+ * when the system initialization completes. Each entry in this array
+ * corresponds to a node with memory. For UMA systems only entry 0 is
+ * used.
+ *
+ * The bootmem allocator is initialized during early architecture
+ * specific setup. Each architecture is required to supply a
+ * :c:func:`setup_arch` function which, among other tasks, is
+ * responsible for acquiring the necessary parameters to initialise
+ * the boot memory allocator. These parameters define limits of usable
+ * physical memory:
+ *
+ * * @min_low_pfn - the lowest PFN that is available in the system
+ * * @max_low_pfn - the highest PFN that may be addressed by low
+ * memory (%ZONE_NORMAL)
+ * * @max_pfn - the last PFN available to the system.
+ *
+ * After those limits are determined, the :c:func:`init_bootmem` or
+ * :c:func:`init_bootmem_node` function should be called to initialize
+ * the bootmem allocator. The UMA case should use the `init_bootmem`
+ * function. It will initialize ``contig_page_data`` structure that
+ * represents the only memory node in the system. In the NUMA case the
+ * `init_bootmem_node` function should be called to initialize the
+ * bootmem allocator for each node.
+ *
+ * Once the allocator is set up, it is possible to use either single
+ * node or NUMA variant of the allocation APIs.
+ */
+
#ifndef CONFIG_NEED_MULTIPLE_NODES
struct pglist_data __refdata contig_page_data = {
.bdata = &bootmem_node_data[0]
@@ -62,6 +109,8 @@ static unsigned long __init bootmap_bytes(unsigned long pages)
/**
* bootmem_bootmap_pages - calculate bitmap size in pages
* @pages: number of pages the bitmap has to represent
+ *
+ * Return: the number of pages needed to hold the bitmap.
*/
unsigned long __init bootmem_bootmap_pages(unsigned long pages)
{
@@ -121,7 +170,7 @@ static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
* @startpfn: first pfn on the node
* @endpfn: first pfn after the node
*
- * Returns the number of bytes needed to hold the bitmap for this node.
+ * Return: the number of bytes needed to hold the bitmap for this node.
*/
unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
unsigned long startpfn, unsigned long endpfn)
@@ -134,7 +183,7 @@ unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
* @start: pfn where the bitmap is to be placed
* @pages: number of available physical pages
*
- * Returns the number of bytes needed to hold the bitmap.
+ * Return: the number of bytes needed to hold the bitmap.
*/
unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
{
@@ -143,15 +192,6 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
-/*
- * free_bootmem_late - free bootmem pages directly to page allocator
- * @addr: starting physical address of the range
- * @size: size of the range in bytes
- *
- * This is only useful when the bootmem allocator has already been torn
- * down, but we are still initializing the system. Pages are given directly
- * to the page allocator, no bootmem metadata is updated because it is gone.
- */
void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
{
unsigned long cursor, end;
@@ -264,11 +304,6 @@ void __init reset_all_zones_managed_pages(void)
reset_managed_pages_done = 1;
}
-/**
- * free_all_bootmem - release free pages to the buddy allocator
- *
- * Returns the number of pages actually released.
- */
unsigned long __init free_all_bootmem(void)
{
unsigned long total_pages = 0;
@@ -385,16 +420,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
BUG();
}
-/**
- * free_bootmem_node - mark a page range as usable
- * @pgdat: node the range resides on
- * @physaddr: starting address of the range
- * @size: size of the range in bytes
- *
- * Partial pages will be considered reserved and left as they are.
- *
- * The range must reside completely on the specified node.
- */
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
@@ -408,15 +433,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
}
-/**
- * free_bootmem - mark a page range as usable
- * @physaddr: starting physical address of the range
- * @size: size of the range in bytes
- *
- * Partial pages will be considered reserved and left as they are.
- *
- * The range must be contiguous but may span node boundaries.
- */
void __init free_bootmem(unsigned long physaddr, unsigned long size)
{
unsigned long start, end;
@@ -439,6 +455,8 @@ void __init free_bootmem(unsigned long physaddr, unsigned long size)
* Partial pages will be reserved.
*
* The range must reside completely on the specified node.
+ *
+ * Return: 0 on success, -errno on failure.
*/
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags)
@@ -460,6 +478,8 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
* Partial pages will be reserved.
*
* The range must be contiguous but may span node boundaries.
+ *
+ * Return: 0 on success, -errno on failure.
*/
int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags)
@@ -646,19 +666,6 @@ restart:
return NULL;
}
-/**
- * __alloc_bootmem_nopanic - allocate boot memory without panicking
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may happen on any node in the system.
- *
- * Returns NULL on failure.
- */
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
unsigned long goal)
{
@@ -682,19 +689,6 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
return NULL;
}
-/**
- * __alloc_bootmem - allocate boot memory
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may happen on any node in the system.
- *
- * The function panics if the request can not be satisfied.
- */
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal)
{
@@ -754,21 +748,6 @@ void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
return NULL;
}
-/**
- * __alloc_bootmem_node - allocate boot memory from a specific node
- * @pgdat: node to allocate from
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may fall back to any node in the system if the specified node
- * can not hold the requested memory.
- *
- * The function panics if the request can not be satisfied.
- */
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
@@ -807,19 +786,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
}
-/**
- * __alloc_bootmem_low - allocate low boot memory
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may happen on any node in the system.
- *
- * The function panics if the request can not be satisfied.
- */
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
unsigned long goal)
{
@@ -834,21 +800,6 @@ void * __init __alloc_bootmem_low_nopanic(unsigned long size,
ARCH_LOW_ADDRESS_LIMIT);
}
-/**
- * __alloc_bootmem_low_node - allocate low boot memory from a specific node
- * @pgdat: node to allocate from
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may fall back to any node in the system if the specified node
- * can not hold the requested memory.
- *
- * The function panics if the request can not be satisfied.
- */
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 25346bd99364..a9e1e093df51 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -552,7 +552,7 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
VM_BUG_ON_PAGE(!PageCompound(page), page);
- if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
+ if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
put_page(page);
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
@@ -1142,7 +1142,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
vmf->address, page_to_nid(page));
if (unlikely(!pages[i] ||
- mem_cgroup_try_charge(pages[i], vma->vm_mm,
+ mem_cgroup_try_charge_delay(pages[i], vma->vm_mm,
GFP_KERNEL, &memcg, false))) {
if (pages[i])
put_page(pages[i]);
@@ -1312,7 +1312,7 @@ alloc:
goto out;
}
- if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
+ if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm,
huge_gfp, &memcg, true))) {
put_page(new_page);
split_huge_pmd(vma, vmf->pmd, vmf->address);
diff --git a/mm/init-mm.c b/mm/init-mm.c
index f0179c9c04c2..a787a319211e 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -15,6 +15,16 @@
#define INIT_MM_CONTEXT(name)
#endif
+/*
+ * For dynamically allocated mm_structs, there is a dynamically sized cpumask
+ * at the end of the structure, the size of which depends on the maximum CPU
+ * number the system can see. That way we allocate only as much memory for
+ * mm_cpumask() as needed for the hundreds, or thousands of processes that
+ * a system typically runs.
+ *
+ * Since there is only one init_mm in the entire system, keep it simple
+ * and size this cpu_bitmask to NR_CPUS.
+ */
struct mm_struct init_mm = {
.mm_rb = RB_ROOT,
.pgd = swapper_pg_dir,
@@ -25,5 +35,6 @@ struct mm_struct init_mm = {
.arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
.user_ns = &init_user_ns,
+ .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
INIT_MM_CONTEXT(init_mm)
};
diff --git a/mm/memblock.c b/mm/memblock.c
index 4b5d245fafc1..b4ad05764745 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -27,6 +27,61 @@
#include "internal.h"
+/**
+ * DOC: memblock overview
+ *
+ * Memblock is a method of managing memory regions during the early
+ * boot period when the usual kernel memory allocators are not up and
+ * running.
+ *
+ * Memblock views the system memory as collections of contiguous
+ * regions. There are several types of these collections:
+ *
+ * * ``memory`` - describes the physical memory available to the
+ * kernel; this may differ from the actual physical memory installed
+ * in the system, for instance when the memory is restricted with
+ * ``mem=`` command line parameter
+ * * ``reserved`` - describes the regions that were allocated
+ * * ``physmap`` - describes the actual physical memory regardless of
+ * the possible restrictions; the ``physmap`` type is only available
+ * on some architectures.
+ *
+ * Each region is represented by :c:type:`struct memblock_region` that
+ * defines the region extents, its attributes and NUMA node id on NUMA
+ * systems. Every memory type is described by the :c:type:`struct
+ * memblock_type` which contains an array of memory regions along with
+ * the allocator metadata. The memory types are nicely wrapped with
+ * :c:type:`struct memblock`. This structure is statically initialzed
+ * at build time. The region arrays for the "memory" and "reserved"
+ * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
+ * "physmap" type to %INIT_PHYSMEM_REGIONS.
+ * The :c:func:`memblock_allow_resize` enables automatic resizing of
+ * the region arrays during addition of new regions. This feature
+ * should be used with care so that memory allocated for the region
+ * array will not overlap with areas that should be reserved, for
+ * example initrd.
+ *
+ * The early architecture setup should tell memblock what the physical
+ * memory layout is by using :c:func:`memblock_add` or
+ * :c:func:`memblock_add_node` functions. The first function does not
+ * assign the region to a NUMA node and it is appropriate for UMA
+ * systems. Yet, it is possible to use it on NUMA systems as well and
+ * assign the region to a NUMA node later in the setup process using
+ * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
+ * performs such an assignment directly.
+ *
+ * Once memblock is setup the memory can be allocated using either
+ * memblock or bootmem APIs.
+ *
+ * As the system boot progresses, the architecture specific
+ * :c:func:`mem_init` function frees all the memory to the buddy page
+ * allocator.
+ *
+ * If an architecure enables %CONFIG_ARCH_DISCARD_MEMBLOCK, the
+ * memblock data structures will be discarded after the system
+ * initialization compltes.
+ */
+
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
@@ -61,7 +116,7 @@ static int memblock_can_resize __initdata_memblock;
static int memblock_memory_in_slab __initdata_memblock = 0;
static int memblock_reserved_in_slab __initdata_memblock = 0;
-ulong __init_memblock choose_memblock_flags(void)
+enum memblock_flags __init_memblock choose_memblock_flags(void)
{
return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
}
@@ -93,10 +148,11 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
return i < type->cnt;
}
-/*
+/**
* __memblock_find_range_bottom_up - find free area utility in bottom-up
* @start: start of candidate range
- * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
+ * %MEMBLOCK_ALLOC_ACCESSIBLE
* @size: size of free area to find
* @align: alignment of free area to find
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
@@ -104,13 +160,13 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
*
* Utility called from memblock_find_in_range_node(), find free area bottom-up.
*
- * RETURNS:
+ * Return:
* Found address on success, 0 on failure.
*/
static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align, int nid,
- ulong flags)
+ enum memblock_flags flags)
{
phys_addr_t this_start, this_end, cand;
u64 i;
@@ -130,7 +186,8 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
/**
* __memblock_find_range_top_down - find free area utility, in top-down
* @start: start of candidate range
- * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
+ * %MEMBLOCK_ALLOC_ACCESSIBLE
* @size: size of free area to find
* @align: alignment of free area to find
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
@@ -138,13 +195,13 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
*
* Utility called from memblock_find_in_range_node(), find free area top-down.
*
- * RETURNS:
+ * Return:
* Found address on success, 0 on failure.
*/
static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align, int nid,
- ulong flags)
+ enum memblock_flags flags)
{
phys_addr_t this_start, this_end, cand;
u64 i;
@@ -170,7 +227,8 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
* @size: size of free area to find
* @align: alignment of free area to find
* @start: start of candidate range
- * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
+ * %MEMBLOCK_ALLOC_ACCESSIBLE
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
* @flags: pick from blocks based on memory attributes
*
@@ -184,12 +242,13 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
*
* If bottom-up allocation failed, will try to allocate memory top-down.
*
- * RETURNS:
+ * Return:
* Found address on success, 0 on failure.
*/
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
phys_addr_t align, phys_addr_t start,
- phys_addr_t end, int nid, ulong flags)
+ phys_addr_t end, int nid,
+ enum memblock_flags flags)
{
phys_addr_t kernel_end, ret;
@@ -239,13 +298,14 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
/**
* memblock_find_in_range - find free area in given range
* @start: start of candidate range
- * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
+ * %MEMBLOCK_ALLOC_ACCESSIBLE
* @size: size of free area to find
* @align: alignment of free area to find
*
* Find @size free area aligned to @align in the specified range.
*
- * RETURNS:
+ * Return:
* Found address on success, 0 on failure.
*/
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
@@ -253,7 +313,7 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
phys_addr_t align)
{
phys_addr_t ret;
- ulong flags = choose_memblock_flags();
+ enum memblock_flags flags = choose_memblock_flags();
again:
ret = memblock_find_in_range_node(size, align, start, end,
@@ -289,7 +349,7 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
/**
- * Discard memory and reserved arrays if they were allocated
+ * memblock_discard - discard memory and reserved arrays if they were allocated
*/
void __init memblock_discard(void)
{
@@ -319,11 +379,11 @@ void __init memblock_discard(void)
*
* Double the size of the @type regions array. If memblock is being used to
* allocate memory for a new reserved regions array and there is a previously
- * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
+ * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
* waiting to be reserved, ensure the memory used by the new array does
* not overlap.
*
- * RETURNS:
+ * Return:
* 0 on success, -1 on failure.
*/
static int __init_memblock memblock_double_array(struct memblock_type *type,
@@ -468,13 +528,14 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
* @nid: node id of the new region
* @flags: flags of the new region
*
- * Insert new memblock region [@base,@base+@size) into @type at @idx.
+ * Insert new memblock region [@base, @base + @size) into @type at @idx.
* @type must already have extra room to accommodate the new region.
*/
static void __init_memblock memblock_insert_region(struct memblock_type *type,
int idx, phys_addr_t base,
phys_addr_t size,
- int nid, unsigned long flags)
+ int nid,
+ enum memblock_flags flags)
{
struct memblock_region *rgn = &type->regions[idx];
@@ -496,17 +557,17 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
* @nid: nid of the new region
* @flags: flags of the new region
*
- * Add new memblock region [@base,@base+@size) into @type. The new region
+ * Add new memblock region [@base, @base + @size) into @type. The new region
* is allowed to overlap with existing ones - overlaps don't affect already
* existing regions. @type is guaranteed to be minimal (all neighbouring
* compatible regions are merged) after the addition.
*
- * RETURNS:
+ * Return:
* 0 on success, -errno on failure.
*/
int __init_memblock memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
- int nid, unsigned long flags)
+ int nid, enum memblock_flags flags)
{
bool insert = false;
phys_addr_t obase = base;
@@ -590,12 +651,35 @@ repeat:
}
}
+/**
+ * memblock_add_node - add new memblock region within a NUMA node
+ * @base: base address of the new region
+ * @size: size of the new region
+ * @nid: nid of the new region
+ *
+ * Add new memblock region [@base, @base + @size) to the "memory"
+ * type. See memblock_add_range() description for mode details
+ *
+ * Return:
+ * 0 on success, -errno on failure.
+ */
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
int nid)
{
return memblock_add_range(&memblock.memory, base, size, nid, 0);
}
+/**
+ * memblock_add - add new memblock region
+ * @base: base address of the new region
+ * @size: size of the new region
+ *
+ * Add new memblock region [@base, @base + @size) to the "memory"
+ * type. See memblock_add_range() description for mode details
+ *
+ * Return:
+ * 0 on success, -errno on failure.
+ */
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
{
phys_addr_t end = base + size - 1;
@@ -615,11 +699,11 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
* @end_rgn: out parameter for the end of isolated region
*
* Walk @type and ensure that regions don't cross the boundaries defined by
- * [@base,@base+@size). Crossing regions are split at the boundaries,
+ * [@base, @base + @size). Crossing regions are split at the boundaries,
* which may create at most two more regions. The index of the first
* region inside the range is returned in *@start_rgn and end in *@end_rgn.
*
- * RETURNS:
+ * Return:
* 0 on success, -errno on failure.
*/
static int __init_memblock memblock_isolate_range(struct memblock_type *type,
@@ -730,10 +814,15 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
}
/**
+ * memblock_setclr_flag - set or clear flag for a memory region
+ * @base: base address of the region
+ * @size: size of the region
+ * @set: set or clear the flag
+ * @flag: the flag to udpate
*
* This function isolates region [@base, @base + @size), and sets/clears flag
*
- * Return 0 on success, -errno on failure.
+ * Return: 0 on success, -errno on failure.
*/
static int __init_memblock memblock_setclr_flag(phys_addr_t base,
phys_addr_t size, int set, int flag)
@@ -760,7 +849,7 @@ static int __init_memblock memblock_setclr_flag(phys_addr_t base,
* @base: the base phys addr of the region
* @size: the size of the region
*
- * Return 0 on success, -errno on failure.
+ * Return: 0 on success, -errno on failure.
*/
int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
{
@@ -772,7 +861,7 @@ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
* @base: the base phys addr of the region
* @size: the size of the region
*
- * Return 0 on success, -errno on failure.
+ * Return: 0 on success, -errno on failure.
*/
int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
{
@@ -784,7 +873,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
* @base: the base phys addr of the region
* @size: the size of the region
*
- * Return 0 on success, -errno on failure.
+ * Return: 0 on success, -errno on failure.
*/
int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
{
@@ -798,7 +887,7 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
* @base: the base phys addr of the region
* @size: the size of the region
*
- * Return 0 on success, -errno on failure.
+ * Return: 0 on success, -errno on failure.
*/
int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
{
@@ -810,7 +899,7 @@ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
* @base: the base phys addr of the region
* @size: the size of the region
*
- * Return 0 on success, -errno on failure.
+ * Return: 0 on success, -errno on failure.
*/
int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
{
@@ -875,7 +964,8 @@ void __init_memblock __next_reserved_mem_region(u64 *idx,
* As both region arrays are sorted, the function advances the two indices
* in lockstep and returns each intersection.
*/
-void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
+void __init_memblock __next_mem_range(u64 *idx, int nid,
+ enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b,
phys_addr_t *out_start,
@@ -970,9 +1060,6 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
/**
* __next_mem_range_rev - generic next function for for_each_*_range_rev()
*
- * Finds the next range from type_a which is not marked as unsuitable
- * in type_b.
- *
* @idx: pointer to u64 loop variable
* @nid: node selector, %NUMA_NO_NODE for all nodes
* @flags: pick from blocks based on memory attributes
@@ -982,9 +1069,13 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @out_nid: ptr to int for nid of the range, can be %NULL
*
+ * Finds the next range from type_a which is not marked as unsuitable
+ * in type_b.
+ *
* Reverse of __next_mem_range().
*/
-void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
+ enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b,
phys_addr_t *out_start,
@@ -1116,10 +1207,10 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
* @type: memblock type to set node ID for
* @nid: node ID to set
*
- * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
+ * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
* Regions which cross the area boundaries are split as necessary.
*
- * RETURNS:
+ * Return:
* 0 on success, -errno on failure.
*/
int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
@@ -1142,7 +1233,8 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t start,
- phys_addr_t end, int nid, ulong flags)
+ phys_addr_t end, int nid,
+ enum memblock_flags flags)
{
phys_addr_t found;
@@ -1164,7 +1256,7 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- ulong flags)
+ enum memblock_flags flags)
{
return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
flags);
@@ -1172,14 +1264,14 @@ phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t max_addr,
- int nid, ulong flags)
+ int nid, enum memblock_flags flags)
{
return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
}
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
- ulong flags = choose_memblock_flags();
+ enum memblock_flags flags = choose_memblock_flags();
phys_addr_t ret;
again:
@@ -1243,7 +1335,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* The allocation is performed from memory region limited by
* memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
*
- * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
+ * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
*
* The phys address of allocated boot memory block is converted to virtual and
* allocated memory is reset to 0.
@@ -1251,7 +1343,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* In addition, function sets the min_count to 0 using kmemleak_alloc for
* allocated boot memory block, so that it is never reported as leaks.
*
- * RETURNS:
+ * Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
static void * __init memblock_virt_alloc_internal(
@@ -1261,7 +1353,7 @@ static void * __init memblock_virt_alloc_internal(
{
phys_addr_t alloc;
void *ptr;
- ulong flags = choose_memblock_flags();
+ enum memblock_flags flags = choose_memblock_flags();
if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
nid = NUMA_NO_NODE;
@@ -1336,7 +1428,7 @@ done:
* info), if enabled. Does not zero allocated memory, does not panic if request
* cannot be satisfied.
*
- * RETURNS:
+ * Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
void * __init memblock_virt_alloc_try_nid_raw(
@@ -1373,7 +1465,7 @@ void * __init memblock_virt_alloc_try_nid_raw(
* Public function, provides additional debug information (including caller
* info), if enabled. This function zeroes the allocated memory.
*
- * RETURNS:
+ * Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
void * __init memblock_virt_alloc_try_nid_nopanic(
@@ -1409,7 +1501,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
* which provides debug information (including caller info), if enabled,
* and panics if the request can not be satisfied.
*
- * RETURNS:
+ * Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
void * __init memblock_virt_alloc_try_nid(
@@ -1453,9 +1545,9 @@ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
memblock_remove_range(&memblock.reserved, base, size);
}
-/*
+/**
* __memblock_free_late - free bootmem block pages directly to buddy allocator
- * @addr: phys starting address of the boot memory block
+ * @base: phys starting address of the boot memory block
* @size: size of the boot memory block in bytes
*
* This is only useful when the bootmem allocator has already been torn
@@ -1667,9 +1759,9 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
* @base: base of region to check
* @size: size of region to check
*
- * Check if the region [@base, @base+@size) is a subset of a memory block.
+ * Check if the region [@base, @base + @size) is a subset of a memory block.
*
- * RETURNS:
+ * Return:
* 0 if false, non-zero if true
*/
bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
@@ -1688,9 +1780,10 @@ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t siz
* @base: base of region to check
* @size: size of region to check
*
- * Check if the region [@base, @base+@size) intersects a reserved memory block.
+ * Check if the region [@base, @base + @size) intersects a reserved
+ * memory block.
*
- * RETURNS:
+ * Return:
* True if they intersect, false if not.
*/
bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
@@ -1737,7 +1830,7 @@ phys_addr_t __init_memblock memblock_get_current_limit(void)
static void __init_memblock memblock_dump(struct memblock_type *type)
{
phys_addr_t base, end, size;
- unsigned long flags;
+ enum memblock_flags flags;
int idx;
struct memblock_region *rgn;
@@ -1755,7 +1848,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
snprintf(nid_buf, sizeof(nid_buf), " on node %d",
memblock_get_region_node(rgn));
#endif
- pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
+ pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
type->name, idx, &base, &end, &size, nid_buf, flags);
}
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b2173f7e5164..b836e7f00309 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5600,6 +5600,19 @@ out:
return ret;
}
+int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp,
+ bool compound)
+{
+ struct mem_cgroup *memcg;
+ int ret;
+
+ ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
+ memcg = *memcgp;
+ mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
+ return ret;
+}
+
/**
* mem_cgroup_commit_charge - commit a page charge
* @page: page to charge
diff --git a/mm/memfd.c b/mm/memfd.c
index 27069518e3c5..2bb5e257080e 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -326,7 +326,7 @@ SYSCALL_DEFINE2(memfd_create,
goto err_fd;
}
file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
- file->f_flags |= O_RDWR | O_LARGEFILE;
+ file->f_flags |= O_LARGEFILE;
if (flags & MFD_ALLOW_SEALING) {
file_seals = memfd_file_seals_ptr(file);
diff --git a/mm/memory.c b/mm/memory.c
index c5e87a3a82ba..348279ff6e51 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -326,16 +326,20 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-/*
- * See the comment near struct mmu_table_batch.
- */
-
static void tlb_remove_table_smp_sync(void *arg)
{
- /* Simply deliver the interrupt */
+ struct mm_struct __maybe_unused *mm = arg;
+ /*
+ * On most architectures this does nothing. Simply delivering the
+ * interrupt is enough to prevent races with software page table
+ * walking like that done in get_user_pages_fast.
+ *
+ * See the comment near struct mmu_table_batch.
+ */
+ tlb_flush_remove_tables_local(mm);
}
-static void tlb_remove_table_one(void *table)
+static void tlb_remove_table_one(void *table, struct mmu_gather *tlb)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
@@ -344,7 +348,7 @@ static void tlb_remove_table_one(void *table)
* It is however sufficient for software page-table walkers that rely on
* IRQ disabling. See the comment near struct mmu_table_batch.
*/
- smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+ smp_call_function(tlb_remove_table_smp_sync, tlb->mm, 1);
__tlb_remove_table(table);
}
@@ -365,6 +369,8 @@ void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
+ tlb_flush_remove_tables(tlb->mm);
+
if (*batch) {
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
@@ -387,7 +393,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
- tlb_remove_table_one(table);
+ tlb_remove_table_one(table, tlb);
return;
}
(*batch)->nr = 0;
@@ -1884,6 +1890,9 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
+ if (!pfn_modify_allowed(pfn, pgprot))
+ return -EACCES;
+
track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
@@ -1919,6 +1928,9 @@ static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
track_pfn_insert(vma, &pgprot, pfn);
+ if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
+ return -EACCES;
+
/*
* If we don't have pte special, then we have to use the pfn_valid()
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
@@ -1980,6 +1992,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
{
pte_t *pte;
spinlock_t *ptl;
+ int err = 0;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
@@ -1987,12 +2000,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
arch_enter_lazy_mmu_mode();
do {
BUG_ON(!pte_none(*pte));
+ if (!pfn_modify_allowed(pfn, prot)) {
+ err = -EACCES;
+ break;
+ }
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
- return 0;
+ return err;
}
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
@@ -2001,6 +2018,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
{
pmd_t *pmd;
unsigned long next;
+ int err;
pfn -= addr >> PAGE_SHIFT;
pmd = pmd_alloc(mm, pud, addr);
@@ -2009,9 +2027,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
VM_BUG_ON(pmd_trans_huge(*pmd));
do {
next = pmd_addr_end(addr, end);
- if (remap_pte_range(mm, pmd, addr, next,
- pfn + (addr >> PAGE_SHIFT), prot))
- return -ENOMEM;
+ err = remap_pte_range(mm, pmd, addr, next,
+ pfn + (addr >> PAGE_SHIFT), prot);
+ if (err)
+ return err;
} while (pmd++, addr = next, addr != end);
return 0;
}
@@ -2022,6 +2041,7 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
{
pud_t *pud;
unsigned long next;
+ int err;
pfn -= addr >> PAGE_SHIFT;
pud = pud_alloc(mm, p4d, addr);
@@ -2029,9 +2049,10 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
- if (remap_pmd_range(mm, pud, addr, next,
- pfn + (addr >> PAGE_SHIFT), prot))
- return -ENOMEM;
+ err = remap_pmd_range(mm, pud, addr, next,
+ pfn + (addr >> PAGE_SHIFT), prot);
+ if (err)
+ return err;
} while (pud++, addr = next, addr != end);
return 0;
}
@@ -2042,6 +2063,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
{
p4d_t *p4d;
unsigned long next;
+ int err;
pfn -= addr >> PAGE_SHIFT;
p4d = p4d_alloc(mm, pgd, addr);
@@ -2049,9 +2071,10 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
- if (remap_pud_range(mm, p4d, addr, next,
- pfn + (addr >> PAGE_SHIFT), prot))
- return -ENOMEM;
+ err = remap_pud_range(mm, p4d, addr, next,
+ pfn + (addr >> PAGE_SHIFT), prot);
+ if (err)
+ return err;
} while (p4d++, addr = next, addr != end);
return 0;
}
@@ -2501,7 +2524,7 @@ static int wp_page_copy(struct vm_fault *vmf)
cow_user_page(new_page, old_page, vmf->address, vma);
}
- if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
+ if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
goto oom_free_new;
__SetPageUptodate(new_page);
@@ -3001,8 +3024,8 @@ int do_swap_page(struct vm_fault *vmf)
goto out_page;
}
- if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
- &memcg, false)) {
+ if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL,
+ &memcg, false)) {
ret = VM_FAULT_OOM;
goto out_page;
}
@@ -3163,7 +3186,8 @@ static int do_anonymous_page(struct vm_fault *vmf)
if (!page)
goto oom;
- if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
+ if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg,
+ false))
goto oom_free_page;
/*
@@ -3659,7 +3683,7 @@ static int do_cow_fault(struct vm_fault *vmf)
if (!vmf->cow_page)
return VM_FAULT_OOM;
- if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
+ if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
&vmf->memcg, false)) {
put_page(vmf->cow_page);
return VM_FAULT_OOM;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 625608bc8962..6d331620b9e5 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -306,6 +306,42 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
return pages;
}
+static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
+ 0 : -EACCES;
+}
+
+static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long next,
+ struct mm_walk *walk)
+{
+ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
+ 0 : -EACCES;
+}
+
+static int prot_none_test(unsigned long addr, unsigned long next,
+ struct mm_walk *walk)
+{
+ return 0;
+}
+
+static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long newflags)
+{
+ pgprot_t new_pgprot = vm_get_page_prot(newflags);
+ struct mm_walk prot_none_walk = {
+ .pte_entry = prot_none_pte_entry,
+ .hugetlb_entry = prot_none_hugetlb_entry,
+ .test_walk = prot_none_test,
+ .mm = current->mm,
+ .private = &new_pgprot,
+ };
+
+ return walk_page_range(start, end, &prot_none_walk);
+}
+
int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags)
@@ -324,6 +360,19 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
}
/*
+ * Do PROT_NONE PFN permission checks here when we can still
+ * bail out without undoing a lot of state. This is a rather
+ * uncommon case, so doesn't need to be very optimized.
+ */
+ if (arch_has_pfn_modify_check() &&
+ (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+ (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
+ error = prot_none_walk(vma, start, end, newflags);
+ if (error)
+ return error;
+ }
+
+ /*
* If we make a private mapping writable we increase our commit;
* but (without finer accounting) cannot reduce our commit if we
* make it unwritable again. hugetlb mapping were accounted for
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 9b02fda0886b..439af3b765a7 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -42,7 +42,7 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
{
void *ptr;
u64 addr;
- ulong flags = choose_memblock_flags();
+ enum memblock_flags flags = choose_memblock_flags();
if (limit > memblock.current_limit)
limit = memblock.current_limit;
@@ -72,7 +72,7 @@ again:
return ptr;
}
-/*
+/**
* free_bootmem_late - free bootmem pages directly to page allocator
* @addr: starting address of the range
* @size: size of the range in bytes
@@ -176,7 +176,7 @@ void __init reset_all_zones_managed_pages(void)
/**
* free_all_bootmem - release free pages to the buddy allocator
*
- * Returns the number of pages actually released.
+ * Return: the number of pages actually released.
*/
unsigned long __init free_all_bootmem(void)
{
@@ -193,7 +193,7 @@ unsigned long __init free_all_bootmem(void)
/**
* free_bootmem_node - mark a page range as usable
* @pgdat: node the range resides on
- * @physaddr: starting address of the range
+ * @physaddr: starting physical address of the range
* @size: size of the range in bytes
*
* Partial pages will be considered reserved and left as they are.
@@ -208,7 +208,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
/**
* free_bootmem - mark a page range as usable
- * @addr: starting address of the range
+ * @addr: starting physical address of the range
* @size: size of the range in bytes
*
* Partial pages will be considered reserved and left as they are.
@@ -256,7 +256,7 @@ restart:
*
* Allocation may happen on any node in the system.
*
- * Returns NULL on failure.
+ * Return: address of the allocated region or %NULL on failure.
*/
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
unsigned long goal)
@@ -293,6 +293,8 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
* Allocation may happen on any node in the system.
*
* The function panics if the request can not be satisfied.
+ *
+ * Return: address of the allocated region.
*/
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal)
@@ -367,6 +369,8 @@ static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
* can not hold the requested memory.
*
* The function panics if the request can not be satisfied.
+ *
+ * Return: address of the allocated region.
*/
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
@@ -396,6 +400,8 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
* Allocation may happen on any node in the system.
*
* The function panics if the request can not be satisfied.
+ *
+ * Return: address of the allocated region.
*/
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
unsigned long goal)
@@ -425,6 +431,8 @@ void * __init __alloc_bootmem_low_nopanic(unsigned long size,
* can not hold the requested memory.
*
* The function panics if the request can not be satisfied.
+ *
+ * Return: address of the allocated region.
*/
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a790ef4be74e..0922ef5d2e46 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -155,16 +155,17 @@ static inline void set_pcppage_migratetype(struct page *page, int migratetype)
* The following functions are used by the suspend/hibernate code to temporarily
* change gfp_allowed_mask in order to avoid using I/O during memory allocations
* while devices are suspended. To avoid races with the suspend/hibernate code,
- * they should always be called with pm_mutex held (gfp_allowed_mask also should
- * only be modified with pm_mutex held, unless the suspend/hibernate code is
- * guaranteed not to run in parallel with that modification).
+ * they should always be called with system_transition_mutex held
+ * (gfp_allowed_mask also should only be modified with system_transition_mutex
+ * held, unless the suspend/hibernate code is guaranteed not to run in parallel
+ * with that modification).
*/
static gfp_t saved_gfp_mask;
void pm_restore_gfp_mask(void)
{
- WARN_ON(!mutex_is_locked(&pm_mutex));
+ WARN_ON(!mutex_is_locked(&system_transition_mutex));
if (saved_gfp_mask) {
gfp_allowed_mask = saved_gfp_mask;
saved_gfp_mask = 0;
@@ -173,7 +174,7 @@ void pm_restore_gfp_mask(void)
void pm_restrict_gfp_mask(void)
{
- WARN_ON(!mutex_is_locked(&pm_mutex));
+ WARN_ON(!mutex_is_locked(&system_transition_mutex));
WARN_ON(saved_gfp_mask);
saved_gfp_mask = gfp_allowed_mask;
gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
@@ -6939,9 +6940,21 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
start = (void *)PAGE_ALIGN((unsigned long)start);
end = (void *)((unsigned long)end & PAGE_MASK);
for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
+ struct page *page = virt_to_page(pos);
+ void *direct_map_addr;
+
+ /*
+ * 'direct_map_addr' might be different from 'pos'
+ * because some architectures' virt_to_page()
+ * work with aliases. Getting the direct map
+ * address ensures that we get a _writeable_
+ * alias for the memset().
+ */
+ direct_map_addr = page_address(page);
if ((unsigned int)poison <= 0xFF)
- memset(pos, poison, PAGE_SIZE);
- free_reserved_page(virt_to_page(pos));
+ memset(direct_map_addr, poison, PAGE_SIZE);
+
+ free_reserved_page(page);
}
if (pages && s)
diff --git a/mm/page_io.c b/mm/page_io.c
index b41cf9644585..aafd19ec1db4 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -338,7 +338,8 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
ret = -ENOMEM;
goto out;
}
- bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
+ bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
+ bio_associate_blkcg_from_page(bio, page);
count_swpout_vm_event(page);
set_page_writeback(page);
unlock_page(page);
diff --git a/mm/readahead.c b/mm/readahead.c
index e273f0de3376..a59ea70527b9 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -19,6 +19,7 @@
#include <linux/syscalls.h>
#include <linux/file.h>
#include <linux/mm_inline.h>
+#include <linux/blk-cgroup.h>
#include "internal.h"
@@ -385,6 +386,7 @@ ondemand_readahead(struct address_space *mapping,
{
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages = ra->ra_pages;
+ unsigned long add_pages;
pgoff_t prev_offset;
/*
@@ -474,10 +476,17 @@ readit:
* Will this read hit the readahead marker made by itself?
* If so, trigger the readahead marker hit now, and merge
* the resulted next readahead window into the current one.
+ * Take care of maximum IO pages as above.
*/
if (offset == ra->start && ra->size == ra->async_size) {
- ra->async_size = get_next_ra_size(ra, max_pages);
- ra->size += ra->async_size;
+ add_pages = get_next_ra_size(ra, max_pages);
+ if (ra->size + add_pages <= max_pages) {
+ ra->async_size = add_pages;
+ ra->size += add_pages;
+ } else {
+ ra->size = max_pages;
+ ra->async_size = max_pages >> 1;
+ }
}
return ra_submit(ra, mapping, filp);
@@ -505,6 +514,9 @@ void page_cache_sync_readahead(struct address_space *mapping,
if (!ra->ra_pages)
return;
+ if (blk_cgroup_congested())
+ return;
+
/* be dumb */
if (filp && (filp->f_mode & FMODE_RANDOM)) {
force_page_cache_readahead(mapping, filp, offset, req_size);
@@ -555,6 +567,9 @@ page_cache_async_readahead(struct address_space *mapping,
if (inode_read_congested(mapping->host))
return;
+ if (blk_cgroup_congested())
+ return;
+
/* do read-ahead */
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 41b9bbf24e16..06ebe17bb924 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1239,8 +1239,8 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
* the shmem_swaplist_mutex which might hold up shmem_writepage().
* Charged back to the user (not to caller) when swap account is used.
*/
- error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg,
- false);
+ error = mem_cgroup_try_charge_delay(page, current->mm, GFP_KERNEL,
+ &memcg, false);
if (error)
goto out;
/* No radix_tree_preload: swap entry keeps a place for page in tree */
@@ -1713,7 +1713,7 @@ repeat:
goto failed;
}
- error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
+ error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
false);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
@@ -1819,7 +1819,7 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
if (sgp == SGP_WRITE)
__SetPageReferenced(page);
- error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
+ error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
PageTransHuge(page));
if (error)
goto unacct;
@@ -2292,7 +2292,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
__SetPageSwapBacked(page);
__SetPageUptodate(page);
- ret = mem_cgroup_try_charge(page, dst_mm, gfp, &memcg, false);
+ ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
if (ret)
goto out_release;
@@ -3897,18 +3897,11 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
/* common code */
-static const struct dentry_operations anon_ops = {
- .d_dname = simple_dname
-};
-
static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
unsigned long flags, unsigned int i_flags)
{
- struct file *res;
struct inode *inode;
- struct path path;
- struct super_block *sb;
- struct qstr this;
+ struct file *res;
if (IS_ERR(mnt))
return ERR_CAST(mnt);
@@ -3919,41 +3912,21 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, l
if (shmem_acct_size(flags, size))
return ERR_PTR(-ENOMEM);
- res = ERR_PTR(-ENOMEM);
- this.name = name;
- this.len = strlen(name);
- this.hash = 0; /* will go */
- sb = mnt->mnt_sb;
- path.mnt = mntget(mnt);
- path.dentry = d_alloc_pseudo(sb, &this);
- if (!path.dentry)
- goto put_memory;
- d_set_d_op(path.dentry, &anon_ops);
-
- res = ERR_PTR(-ENOSPC);
- inode = shmem_get_inode(sb, NULL, S_IFREG | 0777, 0, flags);
- if (!inode)
- goto put_memory;
-
+ inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
+ flags);
+ if (unlikely(!inode)) {
+ shmem_unacct_size(flags, size);
+ return ERR_PTR(-ENOSPC);
+ }
inode->i_flags |= i_flags;
- d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode); /* It is unlinked */
res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
+ if (!IS_ERR(res))
+ res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
+ &shmem_file_operations);
if (IS_ERR(res))
- goto put_path;
-
- res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
- &shmem_file_operations);
- if (IS_ERR(res))
- goto put_path;
-
- return res;
-
-put_memory:
- shmem_unacct_size(flags, size);
-put_path:
- path_put(&path);
+ iput(inode);
return res;
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2cc2972eedaf..8837b22c848d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2909,6 +2909,35 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
return 0;
}
+
+/*
+ * Find out how many pages are allowed for a single swap device. There
+ * are two limiting factors:
+ * 1) the number of bits for the swap offset in the swp_entry_t type, and
+ * 2) the number of bits in the swap pte, as defined by the different
+ * architectures.
+ *
+ * In order to find the largest possible bit mask, a swap entry with
+ * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
+ * decoded to a swp_entry_t again, and finally the swap offset is
+ * extracted.
+ *
+ * This will mask all the bits from the initial ~0UL mask that can't
+ * be encoded in either the swp_entry_t or the architecture definition
+ * of a swap pte.
+ */
+unsigned long generic_max_swapfile_size(void)
+{
+ return swp_offset(pte_to_swp_entry(
+ swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+}
+
+/* Can be overridden by an architecture for additional checks. */
+__weak unsigned long max_swapfile_size(void)
+{
+ return generic_max_swapfile_size();
+}
+
static unsigned long read_swap_header(struct swap_info_struct *p,
union swap_header *swap_header,
struct inode *inode)
@@ -2944,22 +2973,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
p->cluster_next = 1;
p->cluster_nr = 0;
- /*
- * Find out how many pages are allowed for a single swap
- * device. There are two limiting factors: 1) the number
- * of bits for the swap offset in the swp_entry_t type, and
- * 2) the number of bits in the swap pte as defined by the
- * different architectures. In order to find the
- * largest possible bit mask, a swap entry with swap type 0
- * and swap offset ~0UL is created, encoded to a swap pte,
- * decoded to a swp_entry_t again, and finally the swap
- * offset is extracted. This will mask all the bits from
- * the initial ~0UL mask that can't be encoded in either
- * the swp_entry_t or the architecture definition of a
- * swap pte.
- */
- maxpages = swp_offset(pte_to_swp_entry(
- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+ maxpages = max_swapfile_size();
last_page = swap_header->info.last_page;
if (!last_page) {
pr_warn("Empty swap-file\n");
@@ -3731,6 +3745,37 @@ static void free_swap_count_continuations(struct swap_info_struct *si)
}
}
+#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
+void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
+ gfp_t gfp_mask)
+{
+ struct swap_info_struct *si, *next;
+ if (!(gfp_mask & __GFP_IO) || !memcg)
+ return;
+
+ if (!blk_cgroup_congested())
+ return;
+
+ /*
+ * We've already scheduled a throttle, avoid taking the global swap
+ * lock.
+ */
+ if (current->throttle_queue)
+ return;
+
+ spin_lock(&swap_avail_lock);
+ plist_for_each_entry_safe(si, next, &swap_avail_heads[node],
+ avail_lists[node]) {
+ if (si->bdev) {
+ blkcg_schedule_throttle(bdev_get_queue(si->bdev),
+ true);
+ break;
+ }
+ }
+ spin_unlock(&swap_avail_lock);
+}
+#endif
+
static int __init swapfile_init(void)
{
int nid;
diff --git a/mm/usercopy.c b/mm/usercopy.c
index e9e9325f7638..852eb4e53f06 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -20,6 +20,8 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
+#include <linux/atomic.h>
+#include <linux/jump_label.h>
#include <asm/sections.h>
/*
@@ -240,6 +242,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
}
}
+static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
+
/*
* Validates that the given object is:
* - not bogus address
@@ -248,6 +252,9 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
*/
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
{
+ if (static_branch_unlikely(&bypass_usercopy_checks))
+ return;
+
/* Skip all tests if size is zero. */
if (!n)
return;
@@ -279,3 +286,21 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
check_kernel_text_object((const unsigned long)ptr, n, to_user);
}
EXPORT_SYMBOL(__check_object_size);
+
+static bool enable_checks __initdata = true;
+
+static int __init parse_hardened_usercopy(char *str)
+{
+ return strtobool(str, &enable_checks);
+}
+
+__setup("hardened_usercopy=", parse_hardened_usercopy);
+
+static int __init set_hardened_usercopy(void)
+{
+ if (enable_checks == false)
+ static_branch_enable(&bypass_usercopy_checks);
+ return 1;
+}
+
+late_initcall(set_hardened_usercopy);
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 6b1042e21656..52fad5dad9f7 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -770,6 +770,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
hdr.hop_limit, &hdr.daddr);
skb_push(skb, sizeof(hdr));
+ skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
diff --git a/net/8021q/Makefile b/net/8021q/Makefile
index 9b703454b93e..e05d4d7aab35 100644
--- a/net/8021q/Makefile
+++ b/net/8021q/Makefile
@@ -9,4 +9,3 @@ obj-$(CONFIG_VLAN_8021Q) += 8021q.o
8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
8021q-$(CONFIG_VLAN_8021Q_MVRP) += vlan_mvrp.o
8021q-$(CONFIG_PROC_FS) += vlanproc.o
-
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8ccee3d01822..5e9950453955 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -647,13 +647,14 @@ out:
return err;
}
-static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *vlan_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
- struct sk_buff *p, **pp = NULL;
- struct vlan_hdr *vhdr;
- unsigned int hlen, off_vlan;
const struct packet_offload *ptype;
+ unsigned int hlen, off_vlan;
+ struct sk_buff *pp = NULL;
+ struct vlan_hdr *vhdr;
+ struct sk_buff *p;
__be16 type;
int flush = 1;
@@ -675,7 +676,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
flush = 0;
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
struct vlan_hdr *vhdr2;
if (!NAPI_GRO_CB(p)->same_flow)
diff --git a/net/9p/mod.c b/net/9p/mod.c
index eb9777f05755..253ba824a325 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -171,13 +171,11 @@ void v9fs_put_trans(struct p9_trans_module *m)
*/
static int __init init_p9(void)
{
- int ret = 0;
-
p9_error_init();
pr_info("Installing 9P2000 support\n");
p9_trans_fd_init();
- return ret;
+ return 0;
}
/**
diff --git a/net/Kconfig b/net/Kconfig
index f738a6f27665..228dfa382eec 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -12,7 +12,7 @@ menuconfig NET
The reason is that some programs need kernel networking support even
when running on a stand-alone machine that isn't connected to any
other computer.
-
+
If you are upgrading from an older kernel, you
should consider updating your networking tools too because changes
in the kernel and the tools often go hand in hand. The tools are
diff --git a/net/atm/common.c b/net/atm/common.c
index a7a68e509628..9f8cb0d2e71e 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -653,7 +653,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
struct atm_vcc *vcc;
__poll_t mask;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
mask = 0;
vcc = ATM_SD(sock);
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index b93cc0f18292..46d6cd9a36ae 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -307,9 +307,3 @@ void mpc_proc_clean(void)
}
#endif /* CONFIG_PROC_FS */
-
-
-
-
-
-
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index af8c4b38b746..d84227d75717 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -244,7 +244,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
* the packet count limit, so...
*/
if (atm_may_send(pvcc->atmvcc, size) &&
- atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
+ atomic_inc_not_zero(&pvcc->inflight))
return 1;
/*
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index ac2542b7be88..a14cfa736b63 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -304,4 +304,3 @@ void ax25_digi_invert(const ax25_digi *in, ax25_digi *out)
}
}
}
-
diff --git a/net/ax25/ax25_ds_in.c b/net/ax25/ax25_ds_in.c
index 891596e74278..488fc2d7085a 100644
--- a/net/ax25/ax25_ds_in.c
+++ b/net/ax25/ax25_ds_in.c
@@ -299,4 +299,3 @@ int ax25_ds_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type)
return queued;
}
-
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c
index 28827e81ba2b..bc0329f43013 100644
--- a/net/ax25/ax25_ds_subr.c
+++ b/net/ax25/ax25_ds_subr.c
@@ -205,4 +205,3 @@ void ax25_dama_off(ax25_cb *ax25)
ax25->condition &= ~AX25_COND_DAMA_MODE;
ax25_dev_dama_off(ax25->ax25_dev);
}
-
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 183b1c583d56..70417e9b932d 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -249,4 +249,3 @@ const struct header_ops ax25_header_ops = {
EXPORT_SYMBOL(ax25_header_ops);
EXPORT_SYMBOL(ax25_ip_xmit);
-
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index b11a5f466fcc..3e5afc8dc93e 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -394,4 +394,3 @@ int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
}
return 0;
}
-
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index de8034d80623..361116f77cb9 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -24,7 +24,6 @@ config BATMAN_ADV
depends on NET
select CRC16
select LIBCRC32C
- default n
help
B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
a routing protocol for multi-hop ad-hoc mesh networks. The
@@ -33,7 +32,7 @@ config BATMAN_ADV
tools.
config BATMAN_ADV_BATMAN_V
- bool "B.A.T.M.A.N. V protocol (experimental)"
+ bool "B.A.T.M.A.N. V protocol"
depends on BATMAN_ADV && !(CFG80211=m && BATMAN_ADV=y)
default y
help
@@ -60,7 +59,7 @@ config BATMAN_ADV_BLA
config BATMAN_ADV_DAT
bool "Distributed ARP Table"
depends on BATMAN_ADV && INET
- default n
+ default y
help
This option enables DAT (Distributed ARP Table), a DHT based
mechanism that increases ARP reliability on sparse wireless
@@ -70,7 +69,6 @@ config BATMAN_ADV_DAT
config BATMAN_ADV_NC
bool "Network Coding"
depends on BATMAN_ADV
- default n
help
This option enables network coding, a mechanism that aims to
increase the overall network throughput by fusing multiple
@@ -84,7 +82,6 @@ config BATMAN_ADV_NC
config BATMAN_ADV_MCAST
bool "Multicast optimisation"
depends on BATMAN_ADV && INET && !(BRIDGE=m && BATMAN_ADV=y)
- default n
help
This option enables the multicast optimisation which aims to
reduce the air overhead while improving the reliability of
@@ -94,7 +91,6 @@ config BATMAN_ADV_DEBUGFS
bool "batman-adv debugfs entries"
depends on BATMAN_ADV
depends on DEBUG_FS
- default n
help
Enable this to export routing related debug tables via debugfs.
The information for each soft-interface and used hard-interface can be
diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h
index 317cafd302cf..3dc6a7a43eb7 100644
--- a/net/batman-adv/bat_iv_ogm.h
+++ b/net/batman-adv/bat_iv_ogm.h
@@ -16,11 +16,11 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _BATMAN_ADV_BATADV_IV_OGM_H_
-#define _BATMAN_ADV_BATADV_IV_OGM_H_
+#ifndef _NET_BATMAN_ADV_BAT_IV_OGM_H_
+#define _NET_BATMAN_ADV_BAT_IV_OGM_H_
#include "main.h"
int batadv_iv_init(void);
-#endif /* _BATMAN_ADV_BATADV_IV_OGM_H_ */
+#endif /* _NET_BATMAN_ADV_BAT_IV_OGM_H_ */
diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h
index ed36c5e79fde..e5be14c908c6 100644
--- a/net/batman-adv/bat_v_ogm.h
+++ b/net/batman-adv/bat_v_ogm.h
@@ -16,8 +16,8 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _BATMAN_ADV_BATADV_V_OGM_H_
-#define _BATMAN_ADV_BATADV_V_OGM_H_
+#ifndef _NET_BATMAN_ADV_BAT_V_OGM_H_
+#define _NET_BATMAN_ADV_BAT_V_OGM_H_
#include "main.h"
@@ -34,4 +34,4 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface);
int batadv_v_ogm_packet_recv(struct sk_buff *skb,
struct batadv_hard_iface *if_incoming);
-#endif /* _BATMAN_ADV_BATADV_V_OGM_H_ */
+#endif /* _NET_BATMAN_ADV_BAT_V_OGM_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index a2de5a44bd41..ff9659af6b91 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1449,7 +1449,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
* detection frames. Set the locally administered bit to avoid
* collisions with users mac addresses.
*/
- random_ether_addr(bat_priv->bla.loopdetect_addr);
+ eth_random_addr(bat_priv->bla.loopdetect_addr);
bat_priv->bla.loopdetect_addr[0] = 0xba;
bat_priv->bla.loopdetect_addr[1] = 0xbe;
bat_priv->bla.loopdetect_lasttime = jiffies;
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 87479c60670e..3cb82378300b 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -118,7 +118,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
#ifdef CONFIG_BATMAN_ADV_DAT
/**
- * batadv_dat_cache_open() - Prepare file handler for reads from dat_chache
+ * batadv_dat_cache_open() - Prepare file handler for reads from dat_cache
* @inode: inode which was opened
* @file: file handle to be initialized
*
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 716e5b43acfa..1d295da3e342 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1339,7 +1339,11 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
return false;
}
-static void _batadv_purge_orig(struct batadv_priv *bat_priv)
+/**
+ * batadv_purge_orig_ref() - Purge all outdated originators
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node_tmp;
@@ -1385,21 +1389,12 @@ static void batadv_purge_orig(struct work_struct *work)
delayed_work = to_delayed_work(work);
bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
- _batadv_purge_orig(bat_priv);
+ batadv_purge_orig_ref(bat_priv);
queue_delayed_work(batadv_event_workqueue,
&bat_priv->orig_work,
msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
}
-/**
- * batadv_purge_orig_ref() - Purge all outdated originators
- * @bat_priv: the bat priv with all the soft interface information
- */
-void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
-{
- _batadv_purge_orig(bat_priv);
-}
-
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/**
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 360357f83f20..343d304851a5 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -43,12 +43,13 @@ struct seq_file;
#ifdef CONFIG_BATMAN_ADV_DAT
/**
- * batadv_dat_addr_t - it is the type used for all DHT addresses. If it is
- * changed, BATADV_DAT_ADDR_MAX is changed as well.
+ * typedef batadv_dat_addr_t - type used for all DHT addresses
+ *
+ * If it is changed, BATADV_DAT_ADDR_MAX is changed as well.
*
* *Please be careful: batadv_dat_addr_t must be UNSIGNED*
*/
-#define batadv_dat_addr_t u16
+typedef u16 batadv_dat_addr_t;
#endif /* CONFIG_BATMAN_ADV_DAT */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 3264e1873219..deacc52d7ff1 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -159,7 +159,7 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk)
BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent;
release_sock(sk);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 45ff5dc124cc..bd4978ce8c45 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -748,11 +748,30 @@ static bool conn_use_rpa(struct hci_conn *conn)
return hci_dev_test_flag(hdev, HCI_PRIVACY);
}
+static void set_ext_conn_params(struct hci_conn *conn,
+ struct hci_cp_le_ext_conn_param *p)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ memset(p, 0, sizeof(*p));
+
+ /* Set window to be the same value as the interval to
+ * enable continuous scanning.
+ */
+ p->scan_interval = cpu_to_le16(hdev->le_scan_interval);
+ p->scan_window = p->scan_interval;
+ p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
+ p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
+ p->conn_latency = cpu_to_le16(conn->le_conn_latency);
+ p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
+ p->min_ce_len = cpu_to_le16(0x0000);
+ p->max_ce_len = cpu_to_le16(0x0000);
+}
+
static void hci_req_add_le_create_conn(struct hci_request *req,
struct hci_conn *conn,
bdaddr_t *direct_rpa)
{
- struct hci_cp_le_create_conn cp;
struct hci_dev *hdev = conn->hdev;
u8 own_addr_type;
@@ -775,25 +794,71 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
return;
}
- memset(&cp, 0, sizeof(cp));
+ if (use_ext_conn(hdev)) {
+ struct hci_cp_le_ext_create_conn *cp;
+ struct hci_cp_le_ext_conn_param *p;
+ u8 data[sizeof(*cp) + sizeof(*p) * 3];
+ u32 plen;
- /* Set window to be the same value as the interval to enable
- * continuous scanning.
- */
- cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
- cp.scan_window = cp.scan_interval;
+ cp = (void *) data;
+ p = (void *) cp->data;
+
+ memset(cp, 0, sizeof(*cp));
- bacpy(&cp.peer_addr, &conn->dst);
- cp.peer_addr_type = conn->dst_type;
- cp.own_address_type = own_addr_type;
- cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
- cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
- cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
- cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
- cp.min_ce_len = cpu_to_le16(0x0000);
- cp.max_ce_len = cpu_to_le16(0x0000);
+ bacpy(&cp->peer_addr, &conn->dst);
+ cp->peer_addr_type = conn->dst_type;
+ cp->own_addr_type = own_addr_type;
- hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+ plen = sizeof(*cp);
+
+ if (scan_1m(hdev)) {
+ cp->phys |= LE_SCAN_PHY_1M;
+ set_ext_conn_params(conn, p);
+
+ p++;
+ plen += sizeof(*p);
+ }
+
+ if (scan_2m(hdev)) {
+ cp->phys |= LE_SCAN_PHY_2M;
+ set_ext_conn_params(conn, p);
+
+ p++;
+ plen += sizeof(*p);
+ }
+
+ if (scan_coded(hdev)) {
+ cp->phys |= LE_SCAN_PHY_CODED;
+ set_ext_conn_params(conn, p);
+
+ plen += sizeof(*p);
+ }
+
+ hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
+
+ } else {
+ struct hci_cp_le_create_conn cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ /* Set window to be the same value as the interval to enable
+ * continuous scanning.
+ */
+ cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
+ cp.scan_window = cp.scan_interval;
+
+ bacpy(&cp.peer_addr, &conn->dst);
+ cp.peer_addr_type = conn->dst_type;
+ cp.own_address_type = own_addr_type;
+ cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
+ cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
+ cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
+ cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
+ cp.min_ce_len = cpu_to_le16(0x0000);
+ cp.max_ce_len = cpu_to_le16(0x0000);
+
+ hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+ }
conn->state = BT_CONNECT;
clear_bit(HCI_CONN_SCANNING, &conn->flags);
@@ -803,35 +868,81 @@ static void hci_req_directed_advertising(struct hci_request *req,
struct hci_conn *conn)
{
struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_adv_param cp;
u8 own_addr_type;
u8 enable;
- /* Clear the HCI_LE_ADV bit temporarily so that the
- * hci_update_random_address knows that it's safe to go ahead
- * and write a new random address. The flag will be set back on
- * as soon as the SET_ADV_ENABLE HCI command completes.
- */
- hci_dev_clear_flag(hdev, HCI_LE_ADV);
+ if (ext_adv_capable(hdev)) {
+ struct hci_cp_le_set_ext_adv_params cp;
+ bdaddr_t random_addr;
- /* Set require_privacy to false so that the remote device has a
- * chance of identifying us.
- */
- if (hci_update_random_address(req, false, conn_use_rpa(conn),
- &own_addr_type) < 0)
- return;
+ /* Set require_privacy to false so that the remote device has a
+ * chance of identifying us.
+ */
+ if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
+ &own_addr_type, &random_addr) < 0)
+ return;
- memset(&cp, 0, sizeof(cp));
- cp.type = LE_ADV_DIRECT_IND;
- cp.own_address_type = own_addr_type;
- cp.direct_addr_type = conn->dst_type;
- bacpy(&cp.direct_addr, &conn->dst);
- cp.channel_map = hdev->le_adv_channel_map;
+ memset(&cp, 0, sizeof(cp));
- hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
+ cp.own_addr_type = own_addr_type;
+ cp.channel_map = hdev->le_adv_channel_map;
+ cp.tx_power = HCI_TX_POWER_INVALID;
+ cp.primary_phy = HCI_ADV_PHY_1M;
+ cp.secondary_phy = HCI_ADV_PHY_1M;
+ cp.handle = 0; /* Use instance 0 for directed adv */
+ cp.own_addr_type = own_addr_type;
+ cp.peer_addr_type = conn->dst_type;
+ bacpy(&cp.peer_addr, &conn->dst);
+
+ hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
+
+ if (own_addr_type == ADDR_LE_DEV_RANDOM &&
+ bacmp(&random_addr, BDADDR_ANY) &&
+ bacmp(&random_addr, &hdev->random_addr)) {
+ struct hci_cp_le_set_adv_set_rand_addr cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = 0;
+ bacpy(&cp.bdaddr, &random_addr);
+
+ hci_req_add(req,
+ HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
+ sizeof(cp), &cp);
+ }
- enable = 0x01;
- hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+ __hci_req_enable_ext_advertising(req);
+ } else {
+ struct hci_cp_le_set_adv_param cp;
+
+ /* Clear the HCI_LE_ADV bit temporarily so that the
+ * hci_update_random_address knows that it's safe to go ahead
+ * and write a new random address. The flag will be set back on
+ * as soon as the SET_ADV_ENABLE HCI command completes.
+ */
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
+
+ /* Set require_privacy to false so that the remote device has a
+ * chance of identifying us.
+ */
+ if (hci_update_random_address(req, false, conn_use_rpa(conn),
+ &own_addr_type) < 0)
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.type = LE_ADV_DIRECT_IND;
+ cp.own_address_type = own_addr_type;
+ cp.direct_addr_type = conn->dst_type;
+ bacpy(&cp.direct_addr, &conn->dst);
+ cp.channel_map = hdev->le_adv_channel_map;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+
+ enable = 0x01;
+ hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
+ &enable);
+ }
conn->state = BT_CONNECT;
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index ee8ef1228263..74b29c7d841c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -695,11 +695,42 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[35] & (0x20 | 0x40))
events[1] |= 0x08; /* LE PHY Update Complete */
+ /* If the controller supports LE Set Extended Scan Parameters
+ * and LE Set Extended Scan Enable commands, enable the
+ * corresponding event.
+ */
+ if (use_ext_scan(hdev))
+ events[1] |= 0x10; /* LE Extended Advertising
+ * Report
+ */
+
+ /* If the controller supports the LE Extended Create Connection
+ * command, enable the corresponding event.
+ */
+ if (use_ext_conn(hdev))
+ events[1] |= 0x02; /* LE Enhanced Connection
+ * Complete
+ */
+
+ /* If the controller supports the LE Extended Advertising
+ * command, enable the corresponding event.
+ */
+ if (ext_adv_capable(hdev))
+ events[2] |= 0x02; /* LE Advertising Set
+ * Terminated
+ */
+
hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
events);
- if (hdev->commands[25] & 0x40) {
- /* Read LE Advertising Channel TX Power */
+ /* Read LE Advertising Channel TX Power */
+ if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
+ /* HCI TS spec forbids mixing of legacy and extended
+ * advertising commands wherein READ_ADV_TX_POWER is
+ * also included. So do not call it if extended adv
+ * is supported otherwise controller will return
+ * COMMAND_DISALLOWED for extended commands.
+ */
hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
}
@@ -714,6 +745,17 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
}
+ if (hdev->commands[34] & 0x40) {
+ /* Read LE Resolving List Size */
+ hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
+ 0, NULL);
+ }
+
+ if (hdev->commands[34] & 0x20) {
+ /* Clear LE Resolving List */
+ hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
+ }
+
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
/* Read LE Maximum Data Length */
hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
@@ -722,6 +764,12 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
}
+ if (ext_adv_capable(hdev)) {
+ /* Read LE Number of Supported Advertising Sets */
+ hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
+ 0, NULL);
+ }
+
hci_set_le_support(req);
}
@@ -802,10 +850,9 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[35] & 0x20) {
struct hci_cp_le_set_default_phy cp;
- /* No transmitter PHY or receiver PHY preferences */
- cp.all_phys = 0x03;
- cp.tx_phys = 0;
- cp.rx_phys = 0;
+ cp.all_phys = 0x00;
+ cp.tx_phys = hdev->le_tx_def_phys;
+ cp.rx_phys = hdev->le_rx_def_phys;
hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
}
@@ -1368,7 +1415,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
- if (hci_dev_test_flag(hdev, HCI_SETUP)) {
+ if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+ test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
hci_sock_dev_event(hdev, HCI_DEV_SETUP);
if (hdev->setup)
@@ -1432,6 +1480,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
if (!ret) {
hci_dev_hold(hdev);
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+ hci_adv_instances_set_rpa_expired(hdev, true);
set_bit(HCI_UP, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_UP);
hci_leds_update_powered(hdev, true);
@@ -1587,9 +1636,15 @@ int hci_dev_do_close(struct hci_dev *hdev)
if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
cancel_delayed_work(&hdev->service_cache);
- if (hci_dev_test_flag(hdev, HCI_MGMT))
+ if (hci_dev_test_flag(hdev, HCI_MGMT)) {
+ struct adv_info *adv_instance;
+
cancel_delayed_work_sync(&hdev->rpa_expired);
+ list_for_each_entry(adv_instance, &hdev->adv_instances, list)
+ cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
+ }
+
/* Avoid potential lockdep warnings from the *_flush() calls by
* ensuring the workqueue is empty up front.
*/
@@ -1897,7 +1952,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
break;
case HCISETPTYPE:
+ if (hdev->pkt_type == (__u16) dr.dev_opt)
+ break;
+
hdev->pkt_type = (__u16) dr.dev_opt;
+ mgmt_phy_configuration_changed(hdev, NULL);
break;
case HCISETACLMTU:
@@ -2661,6 +2720,8 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
hdev->cur_adv_instance = 0x00;
}
+ cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
+
list_del(&adv_instance->list);
kfree(adv_instance);
@@ -2669,6 +2730,14 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
return 0;
}
+void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
+{
+ struct adv_info *adv_instance, *n;
+
+ list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
+ adv_instance->rpa_expired = rpa_expired;
+}
+
/* This function requires the caller holds hdev->lock */
void hci_adv_instances_clear(struct hci_dev *hdev)
{
@@ -2680,6 +2749,7 @@ void hci_adv_instances_clear(struct hci_dev *hdev)
}
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
+ cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
list_del(&adv_instance->list);
kfree(adv_instance);
}
@@ -2688,6 +2758,16 @@ void hci_adv_instances_clear(struct hci_dev *hdev)
hdev->cur_adv_instance = 0x00;
}
+static void adv_instance_rpa_expired(struct work_struct *work)
+{
+ struct adv_info *adv_instance = container_of(work, struct adv_info,
+ rpa_expired_cb.work);
+
+ BT_DBG("");
+
+ adv_instance->rpa_expired = true;
+}
+
/* This function requires the caller holds hdev->lock */
int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
u16 adv_data_len, u8 *adv_data,
@@ -2736,6 +2816,11 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
else
adv_instance->duration = duration;
+ adv_instance->tx_power = HCI_TX_POWER_INVALID;
+
+ INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
+ adv_instance_rpa_expired);
+
BT_DBG("%s for %dMR", hdev->name, instance);
return 0;
@@ -2999,6 +3084,8 @@ struct hci_dev *hci_alloc_dev(void)
hdev->le_max_tx_time = 0x0148;
hdev->le_max_rx_len = 0x001b;
hdev->le_max_rx_time = 0x0148;
+ hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
+ hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
@@ -3017,6 +3104,7 @@ struct hci_dev *hci_alloc_dev(void)
INIT_LIST_HEAD(&hdev->identity_resolving_keys);
INIT_LIST_HEAD(&hdev->remote_oob_data);
INIT_LIST_HEAD(&hdev->le_white_list);
+ INIT_LIST_HEAD(&hdev->le_resolv_list);
INIT_LIST_HEAD(&hdev->le_conn_params);
INIT_LIST_HEAD(&hdev->pend_le_conns);
INIT_LIST_HEAD(&hdev->pend_le_reports);
@@ -3218,6 +3306,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_remote_oob_data_clear(hdev);
hci_adv_instances_clear(hdev);
hci_bdaddr_list_clear(&hdev->le_white_list);
+ hci_bdaddr_list_clear(&hdev->le_resolv_list);
hci_conn_params_clear_all(hdev);
hci_discovery_filter_clear(hdev);
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 0d8ab5b3c177..51f5b1efc3a5 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -694,6 +694,21 @@ static int white_list_show(struct seq_file *f, void *ptr)
DEFINE_SHOW_ATTRIBUTE(white_list);
+static int resolv_list_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct bdaddr_list *b;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->le_resolv_list, list)
+ seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(resolv_list);
+
static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
{
struct hci_dev *hdev = f->private;
@@ -955,6 +970,10 @@ void hci_debugfs_create_le(struct hci_dev *hdev)
&hdev->le_white_list_size);
debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
&white_list_fops);
+ debugfs_create_u8("resolv_list_size", 0444, hdev->debugfs,
+ &hdev->le_resolv_list_size);
+ debugfs_create_file("resolv_list", 0444, hdev->debugfs, hdev,
+ &resolv_list_fops);
debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs,
hdev, &identity_resolving_keys_fops);
debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev,
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 235b5aaab23d..f12555f23a49 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -221,6 +221,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hdev->ssp_debug_mode = 0;
hci_bdaddr_list_clear(&hdev->le_white_list);
+ hci_bdaddr_list_clear(&hdev->le_resolv_list);
}
static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
@@ -1041,6 +1042,57 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
+static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+ struct hci_cp_le_set_default_phy *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ hdev->le_tx_def_phys = cp->tx_phys;
+ hdev->le_rx_def_phys = cp->rx_phys;
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+ struct hci_cp_le_set_adv_set_rand_addr *cp;
+ struct adv_info *adv_instance;
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (!hdev->cur_adv_instance) {
+ /* Store in hdev for instance 0 (Set adv and Directed advs) */
+ bacpy(&hdev->random_addr, &cp->bdaddr);
+ } else {
+ adv_instance = hci_find_adv_instance(hdev,
+ hdev->cur_adv_instance);
+ if (adv_instance)
+ bacpy(&adv_instance->random_addr, &cp->bdaddr);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 *sent, status = *((__u8 *) skb->data);
@@ -1076,6 +1128,40 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
+static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_set_ext_adv_enable *cp;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (cp->enable) {
+ struct hci_conn *conn;
+
+ hci_dev_set_flag(hdev, HCI_LE_ADV);
+
+ conn = hci_lookup_le_connect(hdev);
+ if (conn)
+ queue_delayed_work(hdev->workqueue,
+ &conn->le_conn_timeout,
+ conn->conn_timeout);
+ } else {
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_cp_le_set_scan_param *cp;
@@ -1097,6 +1183,31 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
+static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_set_ext_scan_params *cp;
+ __u8 status = *((__u8 *) skb->data);
+ struct hci_cp_le_scan_phy_params *phy_param;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
+ if (!cp)
+ return;
+
+ phy_param = (void *)cp->data;
+
+ hci_dev_lock(hdev);
+
+ hdev->le_scan_type = phy_param->type;
+
+ hci_dev_unlock(hdev);
+}
+
static bool has_pending_adv_report(struct hci_dev *hdev)
{
struct discovery_state *d = &hdev->discovery;
@@ -1126,24 +1237,11 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
d->last_adv_data_len = len;
}
-static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
{
- struct hci_cp_le_set_scan_enable *cp;
- __u8 status = *((__u8 *) skb->data);
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- if (status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
- if (!cp)
- return;
-
hci_dev_lock(hdev);
- switch (cp->enable) {
+ switch (enable) {
case LE_SCAN_ENABLE:
hci_dev_set_flag(hdev, HCI_LE_SCAN);
if (hdev->le_scan_type == LE_SCAN_ACTIVE)
@@ -1189,13 +1287,63 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
default:
bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
- cp->enable);
+ enable);
break;
}
hci_dev_unlock(hdev);
}
+static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_set_scan_enable *cp;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
+ if (!cp)
+ return;
+
+ le_set_scan_enable_complete(hdev, cp->enable);
+}
+
+static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_set_ext_scan_enable *cp;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
+ if (!cp)
+ return;
+
+ le_set_scan_enable_complete(hdev, cp->enable);
+}
+
+static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
+ rp->num_of_sets);
+
+ if (rp->status)
+ return;
+
+ hdev->le_num_of_adv_sets = rp->num_of_sets;
+}
+
static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -1306,6 +1454,56 @@ static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
}
+static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ hci_bdaddr_list_clear(&hdev->le_resolv_list);
+}
+
+static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
+
+ if (rp->status)
+ return;
+
+ hdev->le_resolv_list_size = rp->size;
+}
+
+static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 *sent, status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
+ if (!sent)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (*sent)
+ hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
+ else
+ hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -1375,6 +1573,37 @@ static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
+static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
+ struct hci_cp_le_set_ext_adv_params *cp;
+ struct adv_info *adv_instance;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+ hdev->adv_addr_type = cp->own_addr_type;
+ if (!hdev->cur_adv_instance) {
+ /* Store in hdev for instance 0 */
+ hdev->adv_tx_power = rp->tx_power;
+ } else {
+ adv_instance = hci_find_adv_instance(hdev,
+ hdev->cur_adv_instance);
+ if (adv_instance)
+ adv_instance->tx_power = rp->tx_power;
+ }
+ /* Update adv data as tx power is known now */
+ hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
+ hci_dev_unlock(hdev);
+}
+
static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_rssi *rp = (void *) skb->data;
@@ -1896,10 +2125,44 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_unlock(hdev);
}
+static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
+ u8 peer_addr_type, u8 own_address_type,
+ u8 filter_policy)
+{
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_le(hdev, peer_addr,
+ peer_addr_type);
+ if (!conn)
+ return;
+
+ /* Store the initiator and responder address information which
+ * is needed for SMP. These values will not change during the
+ * lifetime of the connection.
+ */
+ conn->init_addr_type = own_address_type;
+ if (own_address_type == ADDR_LE_DEV_RANDOM)
+ bacpy(&conn->init_addr, &hdev->random_addr);
+ else
+ bacpy(&conn->init_addr, &hdev->bdaddr);
+
+ conn->resp_addr_type = peer_addr_type;
+ bacpy(&conn->resp_addr, peer_addr);
+
+ /* We don't want the connection attempt to stick around
+ * indefinitely since LE doesn't have a page timeout concept
+ * like BR/EDR. Set a timer for any connection that doesn't use
+ * the white list for connecting.
+ */
+ if (filter_policy == HCI_LE_USE_PEER_ADDR)
+ queue_delayed_work(conn->hdev->workqueue,
+ &conn->le_conn_timeout,
+ conn->conn_timeout);
+}
+
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
{
struct hci_cp_le_create_conn *cp;
- struct hci_conn *conn;
BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -1916,35 +2179,34 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
- cp->peer_addr_type);
- if (!conn)
- goto unlock;
+ cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
+ cp->own_address_type, cp->filter_policy);
- /* Store the initiator and responder address information which
- * is needed for SMP. These values will not change during the
- * lifetime of the connection.
- */
- conn->init_addr_type = cp->own_address_type;
- if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
- bacpy(&conn->init_addr, &hdev->random_addr);
- else
- bacpy(&conn->init_addr, &hdev->bdaddr);
+ hci_dev_unlock(hdev);
+}
- conn->resp_addr_type = cp->peer_addr_type;
- bacpy(&conn->resp_addr, &cp->peer_addr);
+static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_le_ext_create_conn *cp;
- /* We don't want the connection attempt to stick around
- * indefinitely since LE doesn't have a page timeout concept
- * like BR/EDR. Set a timer for any connection that doesn't use
- * the white list for connecting.
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ /* All connection failure handling is taken care of by the
+ * hci_le_conn_failed function which is triggered by the HCI
+ * request completion callbacks used for connecting.
*/
- if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
- queue_delayed_work(conn->hdev->workqueue,
- &conn->le_conn_timeout,
- conn->conn_timeout);
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
+ cp->own_addr_type, cp->filter_policy);
-unlock:
hci_dev_unlock(hdev);
}
@@ -2618,8 +2880,10 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* We should disregard the current RPA and generate a new one
* whenever the encryption procedure fails.
*/
- if (ev->status && conn->type == LE_LINK)
+ if (ev->status && conn->type == LE_LINK) {
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+ hci_adv_instances_set_rpa_expired(hdev, true);
+ }
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
@@ -3015,6 +3279,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_le_write_def_data_len(hdev, skb);
break;
+ case HCI_OP_LE_CLEAR_RESOLV_LIST:
+ hci_cc_le_clear_resolv_list(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
+ hci_cc_le_read_resolv_list_size(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
+ hci_cc_le_set_addr_resolution_enable(hdev, skb);
+ break;
+
case HCI_OP_LE_READ_MAX_DATA_LEN:
hci_cc_le_read_max_data_len(hdev, skb);
break;
@@ -3039,6 +3315,34 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_write_ssp_debug_mode(hdev, skb);
break;
+ case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
+ hci_cc_le_set_ext_scan_param(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
+ hci_cc_le_set_ext_scan_enable(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_DEFAULT_PHY:
+ hci_cc_le_set_default_phy(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
+ hci_cc_le_read_num_adv_sets(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_EXT_ADV_PARAMS:
+ hci_cc_set_ext_adv_param(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_EXT_ADV_ENABLE:
+ hci_cc_le_set_ext_adv_enable(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
+ hci_cc_le_set_adv_set_random_addr(hdev, skb);
+ break;
+
default:
BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
break;
@@ -3134,6 +3438,10 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cs_le_start_enc(hdev, ev->status);
break;
+ case HCI_OP_LE_EXT_CREATE_CONN:
+ hci_cs_le_ext_create_conn(hdev, ev->status);
+ break;
+
default:
BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
break;
@@ -4460,16 +4768,15 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
}
#endif
-static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
+ u16 interval, u16 latency, u16 supervision_timeout)
{
- struct hci_ev_le_conn_complete *ev = (void *) skb->data;
struct hci_conn_params *params;
struct hci_conn *conn;
struct smp_irk *irk;
u8 addr_type;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
-
hci_dev_lock(hdev);
/* All controllers implicitly stop advertising in the event of a
@@ -4479,13 +4786,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn = hci_lookup_le_connect(hdev);
if (!conn) {
- conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
+ conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
if (!conn) {
bt_dev_err(hdev, "no memory for new connection");
goto unlock;
}
- conn->dst_type = ev->bdaddr_type;
+ conn->dst_type = bdaddr_type;
/* If we didn't have a hci_conn object previously
* but we're in master role this must be something
@@ -4496,8 +4803,8 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
* initiator address based on the HCI_PRIVACY flag.
*/
if (conn->out) {
- conn->resp_addr_type = ev->bdaddr_type;
- bacpy(&conn->resp_addr, &ev->bdaddr);
+ conn->resp_addr_type = bdaddr_type;
+ bacpy(&conn->resp_addr, bdaddr);
if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
conn->init_addr_type = ADDR_LE_DEV_RANDOM;
bacpy(&conn->init_addr, &hdev->rpa);
@@ -4516,13 +4823,18 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
* the advertising address type.
*/
conn->resp_addr_type = hdev->adv_addr_type;
- if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
- bacpy(&conn->resp_addr, &hdev->random_addr);
- else
+ if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
+ /* In case of ext adv, resp_addr will be updated in
+ * Adv Terminated event.
+ */
+ if (!ext_adv_capable(hdev))
+ bacpy(&conn->resp_addr, &hdev->random_addr);
+ } else {
bacpy(&conn->resp_addr, &hdev->bdaddr);
+ }
- conn->init_addr_type = ev->bdaddr_type;
- bacpy(&conn->init_addr, &ev->bdaddr);
+ conn->init_addr_type = bdaddr_type;
+ bacpy(&conn->init_addr, bdaddr);
/* For incoming connections, set the default minimum
* and maximum connection interval. They will be used
@@ -4548,8 +4860,8 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->dst_type = irk->addr_type;
}
- if (ev->status) {
- hci_le_conn_failed(conn, ev->status);
+ if (status) {
+ hci_le_conn_failed(conn, status);
goto unlock;
}
@@ -4568,17 +4880,17 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
mgmt_device_connected(hdev, conn, 0, NULL, 0);
conn->sec_level = BT_SECURITY_LOW;
- conn->handle = __le16_to_cpu(ev->handle);
+ conn->handle = handle;
conn->state = BT_CONFIG;
- conn->le_conn_interval = le16_to_cpu(ev->interval);
- conn->le_conn_latency = le16_to_cpu(ev->latency);
- conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
+ conn->le_conn_interval = interval;
+ conn->le_conn_latency = latency;
+ conn->le_supv_timeout = supervision_timeout;
hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn);
- if (!ev->status) {
+ if (!status) {
/* The remote features procedure is defined for master
* role only. So only in case of an initiated connection
* request the remote features.
@@ -4600,10 +4912,10 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_hold(conn);
} else {
conn->state = BT_CONNECTED;
- hci_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, status);
}
} else {
- hci_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, status);
}
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
@@ -4622,6 +4934,61 @@ unlock:
hci_dev_unlock(hdev);
}
+static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_le_conn_complete *ev = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+ le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
+ ev->role, le16_to_cpu(ev->handle),
+ le16_to_cpu(ev->interval),
+ le16_to_cpu(ev->latency),
+ le16_to_cpu(ev->supervision_timeout));
+}
+
+static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+ le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
+ ev->role, le16_to_cpu(ev->handle),
+ le16_to_cpu(ev->interval),
+ le16_to_cpu(ev->latency),
+ le16_to_cpu(ev->supervision_timeout));
+}
+
+static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+ if (ev->status)
+ return;
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
+ if (conn) {
+ struct adv_info *adv_instance;
+
+ if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
+ return;
+
+ if (!hdev->cur_adv_instance) {
+ bacpy(&conn->resp_addr, &hdev->random_addr);
+ return;
+ }
+
+ adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
+ if (adv_instance)
+ bacpy(&conn->resp_addr, &adv_instance->random_addr);
+ }
+}
+
static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -4957,6 +5324,78 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
+static u8 ext_evt_type_to_legacy(u16 evt_type)
+{
+ if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
+ switch (evt_type) {
+ case LE_LEGACY_ADV_IND:
+ return LE_ADV_IND;
+ case LE_LEGACY_ADV_DIRECT_IND:
+ return LE_ADV_DIRECT_IND;
+ case LE_LEGACY_ADV_SCAN_IND:
+ return LE_ADV_SCAN_IND;
+ case LE_LEGACY_NONCONN_IND:
+ return LE_ADV_NONCONN_IND;
+ case LE_LEGACY_SCAN_RSP_ADV:
+ case LE_LEGACY_SCAN_RSP_ADV_SCAN:
+ return LE_ADV_SCAN_RSP;
+ }
+
+ BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
+ evt_type);
+
+ return LE_ADV_INVALID;
+ }
+
+ if (evt_type & LE_EXT_ADV_CONN_IND) {
+ if (evt_type & LE_EXT_ADV_DIRECT_IND)
+ return LE_ADV_DIRECT_IND;
+
+ return LE_ADV_IND;
+ }
+
+ if (evt_type & LE_EXT_ADV_SCAN_RSP)
+ return LE_ADV_SCAN_RSP;
+
+ if (evt_type & LE_EXT_ADV_SCAN_IND)
+ return LE_ADV_SCAN_IND;
+
+ if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
+ evt_type & LE_EXT_ADV_DIRECT_IND)
+ return LE_ADV_NONCONN_IND;
+
+ BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
+ evt_type);
+
+ return LE_ADV_INVALID;
+}
+
+static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ u8 num_reports = skb->data[0];
+ void *ptr = &skb->data[1];
+
+ hci_dev_lock(hdev);
+
+ while (num_reports--) {
+ struct hci_ev_le_ext_adv_report *ev = ptr;
+ u8 legacy_evt_type;
+ u16 evt_type;
+
+ evt_type = __le16_to_cpu(ev->evt_type);
+ legacy_evt_type = ext_evt_type_to_legacy(evt_type);
+ if (legacy_evt_type != LE_ADV_INVALID) {
+ process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
+ ev->bdaddr_type, NULL, 0, ev->rssi,
+ ev->data, ev->length);
+ }
+
+ ptr += sizeof(*ev) + ev->length + 1;
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -5189,6 +5628,18 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_le_direct_adv_report_evt(hdev, skb);
break;
+ case HCI_EV_LE_EXT_ADV_REPORT:
+ hci_le_ext_adv_report_evt(hdev, skb);
+ break;
+
+ case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
+ hci_le_enh_conn_complete_evt(hdev, skb);
+ break;
+
+ case HCI_EV_LE_EXT_ADV_SET_TERM:
+ hci_le_ext_adv_term_evt(hdev, skb);
+ break;
+
default:
break;
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e44d34734834..e8c9ef1e1922 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -647,11 +647,22 @@ void __hci_req_update_eir(struct hci_request *req)
void hci_req_add_le_scan_disable(struct hci_request *req)
{
- struct hci_cp_le_set_scan_enable cp;
+ struct hci_dev *hdev = req->hdev;
- memset(&cp, 0, sizeof(cp));
- cp.enable = LE_SCAN_DISABLE;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+ if (use_ext_scan(hdev)) {
+ struct hci_cp_le_set_ext_scan_enable cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = LE_SCAN_DISABLE;
+ hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
+ &cp);
+ } else {
+ struct hci_cp_le_set_scan_enable cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = LE_SCAN_DISABLE;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+ }
}
static void add_to_white_list(struct hci_request *req,
@@ -767,10 +778,86 @@ static bool scan_use_rpa(struct hci_dev *hdev)
return hci_dev_test_flag(hdev, HCI_PRIVACY);
}
+static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
+ u16 window, u8 own_addr_type, u8 filter_policy)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ /* Use ext scanning if set ext scan param and ext scan enable is
+ * supported
+ */
+ if (use_ext_scan(hdev)) {
+ struct hci_cp_le_set_ext_scan_params *ext_param_cp;
+ struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
+ struct hci_cp_le_scan_phy_params *phy_params;
+ u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
+ u32 plen;
+
+ ext_param_cp = (void *)data;
+ phy_params = (void *)ext_param_cp->data;
+
+ memset(ext_param_cp, 0, sizeof(*ext_param_cp));
+ ext_param_cp->own_addr_type = own_addr_type;
+ ext_param_cp->filter_policy = filter_policy;
+
+ plen = sizeof(*ext_param_cp);
+
+ if (scan_1m(hdev) || scan_2m(hdev)) {
+ ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
+
+ memset(phy_params, 0, sizeof(*phy_params));
+ phy_params->type = type;
+ phy_params->interval = cpu_to_le16(interval);
+ phy_params->window = cpu_to_le16(window);
+
+ plen += sizeof(*phy_params);
+ phy_params++;
+ }
+
+ if (scan_coded(hdev)) {
+ ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
+
+ memset(phy_params, 0, sizeof(*phy_params));
+ phy_params->type = type;
+ phy_params->interval = cpu_to_le16(interval);
+ phy_params->window = cpu_to_le16(window);
+
+ plen += sizeof(*phy_params);
+ phy_params++;
+ }
+
+ hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
+ plen, ext_param_cp);
+
+ memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
+ ext_enable_cp.enable = LE_SCAN_ENABLE;
+ ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+
+ hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
+ sizeof(ext_enable_cp), &ext_enable_cp);
+ } else {
+ struct hci_cp_le_set_scan_param param_cp;
+ struct hci_cp_le_set_scan_enable enable_cp;
+
+ memset(&param_cp, 0, sizeof(param_cp));
+ param_cp.type = type;
+ param_cp.interval = cpu_to_le16(interval);
+ param_cp.window = cpu_to_le16(window);
+ param_cp.own_address_type = own_addr_type;
+ param_cp.filter_policy = filter_policy;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+ &param_cp);
+
+ memset(&enable_cp, 0, sizeof(enable_cp));
+ enable_cp.enable = LE_SCAN_ENABLE;
+ enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+ &enable_cp);
+ }
+}
+
void hci_req_add_le_passive_scan(struct hci_request *req)
{
- struct hci_cp_le_set_scan_param param_cp;
- struct hci_cp_le_set_scan_enable enable_cp;
struct hci_dev *hdev = req->hdev;
u8 own_addr_type;
u8 filter_policy;
@@ -804,20 +891,26 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
filter_policy |= 0x02;
- memset(&param_cp, 0, sizeof(param_cp));
- param_cp.type = LE_SCAN_PASSIVE;
- param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
- param_cp.window = cpu_to_le16(hdev->le_scan_window);
- param_cp.own_address_type = own_addr_type;
- param_cp.filter_policy = filter_policy;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
- &param_cp);
+ hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
+ hdev->le_scan_window, own_addr_type, filter_policy);
+}
+
+static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
+{
+ struct adv_info *adv_instance;
+
+ /* Ignore instance 0 */
+ if (instance == 0x00)
+ return 0;
+
+ adv_instance = hci_find_adv_instance(hdev, instance);
+ if (!adv_instance)
+ return 0;
- memset(&enable_cp, 0, sizeof(enable_cp));
- enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
- &enable_cp);
+ /* TODO: Take into account the "appearance" and "local-name" flags here.
+ * These are currently being ignored as they are not supported.
+ */
+ return adv_instance->scan_rsp_len;
}
static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
@@ -841,9 +934,19 @@ static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
void __hci_req_disable_advertising(struct hci_request *req)
{
- u8 enable = 0x00;
+ if (ext_adv_capable(req->hdev)) {
+ struct hci_cp_le_set_ext_adv_enable cp;
- hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+ cp.enable = 0x00;
+ /* Disable all sets since we only support one set at the moment */
+ cp.num_of_sets = 0x00;
+
+ hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
+ } else {
+ u8 enable = 0x00;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+ }
}
static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
@@ -1081,29 +1184,58 @@ static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_scan_rsp_data cp;
u8 len;
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
- memset(&cp, 0, sizeof(cp));
+ if (ext_adv_capable(hdev)) {
+ struct hci_cp_le_set_ext_scan_rsp_data cp;
- if (instance)
- len = create_instance_scan_rsp_data(hdev, instance, cp.data);
- else
- len = create_default_scan_rsp_data(hdev, cp.data);
+ memset(&cp, 0, sizeof(cp));
- if (hdev->scan_rsp_data_len == len &&
- !memcmp(cp.data, hdev->scan_rsp_data, len))
- return;
+ if (instance)
+ len = create_instance_scan_rsp_data(hdev, instance,
+ cp.data);
+ else
+ len = create_default_scan_rsp_data(hdev, cp.data);
+
+ if (hdev->scan_rsp_data_len == len &&
+ !memcmp(cp.data, hdev->scan_rsp_data, len))
+ return;
- memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
- hdev->scan_rsp_data_len = len;
+ memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+ hdev->scan_rsp_data_len = len;
- cp.length = len;
+ cp.handle = 0;
+ cp.length = len;
+ cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+
+ hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
+ &cp);
+ } else {
+ struct hci_cp_le_set_scan_rsp_data cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (instance)
+ len = create_instance_scan_rsp_data(hdev, instance,
+ cp.data);
+ else
+ len = create_default_scan_rsp_data(hdev, cp.data);
+
+ if (hdev->scan_rsp_data_len == len &&
+ !memcmp(cp.data, hdev->scan_rsp_data, len))
+ return;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
+ memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+ hdev->scan_rsp_data_len = len;
+
+ cp.length = len;
+
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
+ }
}
static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
@@ -1160,15 +1292,27 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
ptr += adv_instance->adv_data_len;
}
- /* Provide Tx Power only if we can provide a valid value for it */
- if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
- (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
- ptr[0] = 0x02;
- ptr[1] = EIR_TX_POWER;
- ptr[2] = (u8)hdev->adv_tx_power;
+ if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
+ s8 adv_tx_power;
- ad_len += 3;
- ptr += 3;
+ if (ext_adv_capable(hdev)) {
+ if (adv_instance)
+ adv_tx_power = adv_instance->tx_power;
+ else
+ adv_tx_power = hdev->adv_tx_power;
+ } else {
+ adv_tx_power = hdev->adv_tx_power;
+ }
+
+ /* Provide Tx Power only if we can provide a valid value for it */
+ if (adv_tx_power != HCI_TX_POWER_INVALID) {
+ ptr[0] = 0x02;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8)adv_tx_power;
+
+ ad_len += 3;
+ ptr += 3;
+ }
}
return ad_len;
@@ -1177,27 +1321,51 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_adv_data cp;
u8 len;
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
- memset(&cp, 0, sizeof(cp));
+ if (ext_adv_capable(hdev)) {
+ struct hci_cp_le_set_ext_adv_data cp;
- len = create_instance_adv_data(hdev, instance, cp.data);
+ memset(&cp, 0, sizeof(cp));
- /* There's nothing to do if the data hasn't changed */
- if (hdev->adv_data_len == len &&
- memcmp(cp.data, hdev->adv_data, len) == 0)
- return;
+ len = create_instance_adv_data(hdev, instance, cp.data);
+
+ /* There's nothing to do if the data hasn't changed */
+ if (hdev->adv_data_len == len &&
+ memcmp(cp.data, hdev->adv_data, len) == 0)
+ return;
+
+ memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ hdev->adv_data_len = len;
+
+ cp.length = len;
+ cp.handle = 0;
+ cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+
+ hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
+ } else {
+ struct hci_cp_le_set_adv_data cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = create_instance_adv_data(hdev, instance, cp.data);
+
+ /* There's nothing to do if the data hasn't changed */
+ if (hdev->adv_data_len == len &&
+ memcmp(cp.data, hdev->adv_data, len) == 0)
+ return;
- memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
- hdev->adv_data_len = len;
+ memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ hdev->adv_data_len = len;
- cp.length = len;
+ cp.length = len;
- hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
+ }
}
int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
@@ -1229,9 +1397,13 @@ void hci_req_reenable_advertising(struct hci_dev *hdev)
__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
true);
} else {
- __hci_req_update_adv_data(&req, 0x00);
- __hci_req_update_scan_rsp_data(&req, 0x00);
- __hci_req_enable_advertising(&req);
+ if (ext_adv_capable(hdev)) {
+ __hci_req_start_ext_adv(&req, 0x00);
+ } else {
+ __hci_req_update_adv_data(&req, 0x00);
+ __hci_req_update_scan_rsp_data(&req, 0x00);
+ __hci_req_enable_advertising(&req);
+ }
}
hci_req_run(&req, adv_enable_complete);
@@ -1268,6 +1440,245 @@ unlock:
hci_dev_unlock(hdev);
}
+int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ bool use_rpa, struct adv_info *adv_instance,
+ u8 *own_addr_type, bdaddr_t *rand_addr)
+{
+ int err;
+
+ bacpy(rand_addr, BDADDR_ANY);
+
+ /* If privacy is enabled use a resolvable private address. If
+ * current RPA has expired then generate a new one.
+ */
+ if (use_rpa) {
+ int to;
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+ if (adv_instance) {
+ if (!adv_instance->rpa_expired &&
+ !bacmp(&adv_instance->random_addr, &hdev->rpa))
+ return 0;
+
+ adv_instance->rpa_expired = false;
+ } else {
+ if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
+ !bacmp(&hdev->random_addr, &hdev->rpa))
+ return 0;
+ }
+
+ err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
+ if (err < 0) {
+ BT_ERR("%s failed to generate new RPA", hdev->name);
+ return err;
+ }
+
+ bacpy(rand_addr, &hdev->rpa);
+
+ to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
+ if (adv_instance)
+ queue_delayed_work(hdev->workqueue,
+ &adv_instance->rpa_expired_cb, to);
+ else
+ queue_delayed_work(hdev->workqueue,
+ &hdev->rpa_expired, to);
+
+ return 0;
+ }
+
+ /* In case of required privacy without resolvable private address,
+ * use an non-resolvable private address. This is useful for
+ * non-connectable advertising.
+ */
+ if (require_privacy) {
+ bdaddr_t nrpa;
+
+ while (true) {
+ /* The non-resolvable private address is generated
+ * from random six bytes with the two most significant
+ * bits cleared.
+ */
+ get_random_bytes(&nrpa, 6);
+ nrpa.b[5] &= 0x3f;
+
+ /* The non-resolvable private address shall not be
+ * equal to the public address.
+ */
+ if (bacmp(&hdev->bdaddr, &nrpa))
+ break;
+ }
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ bacpy(rand_addr, &nrpa);
+
+ return 0;
+ }
+
+ /* No privacy so use a public address. */
+ *own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ return 0;
+}
+
+void __hci_req_clear_ext_adv_sets(struct hci_request *req)
+{
+ hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
+}
+
+int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
+{
+ struct hci_cp_le_set_ext_adv_params cp;
+ struct hci_dev *hdev = req->hdev;
+ bool connectable;
+ u32 flags;
+ bdaddr_t random_addr;
+ u8 own_addr_type;
+ int err;
+ struct adv_info *adv_instance;
+ bool secondary_adv;
+ /* In ext adv set param interval is 3 octets */
+ const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
+
+ if (instance > 0) {
+ adv_instance = hci_find_adv_instance(hdev, instance);
+ if (!adv_instance)
+ return -EINVAL;
+ } else {
+ adv_instance = NULL;
+ }
+
+ flags = get_adv_instance_flags(hdev, instance);
+
+ /* If the "connectable" instance flag was not set, then choose between
+ * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+ */
+ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+ mgmt_get_connectable(hdev);
+
+ if (!is_advertising_allowed(hdev, connectable))
+ return -EPERM;
+
+ /* Set require_privacy to true only when non-connectable
+ * advertising is used. In that case it is fine to use a
+ * non-resolvable private address.
+ */
+ err = hci_get_random_address(hdev, !connectable,
+ adv_use_rpa(hdev, flags), adv_instance,
+ &own_addr_type, &random_addr);
+ if (err < 0)
+ return err;
+
+ memset(&cp, 0, sizeof(cp));
+
+ memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
+ memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
+
+ secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
+
+ if (connectable) {
+ if (secondary_adv)
+ cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
+ else
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
+ } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
+ if (secondary_adv)
+ cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
+ else
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
+ } else {
+ if (secondary_adv)
+ cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
+ else
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
+ }
+
+ cp.own_addr_type = own_addr_type;
+ cp.channel_map = hdev->le_adv_channel_map;
+ cp.tx_power = 127;
+ cp.handle = 0;
+
+ if (flags & MGMT_ADV_FLAG_SEC_2M) {
+ cp.primary_phy = HCI_ADV_PHY_1M;
+ cp.secondary_phy = HCI_ADV_PHY_2M;
+ } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
+ cp.primary_phy = HCI_ADV_PHY_CODED;
+ cp.secondary_phy = HCI_ADV_PHY_CODED;
+ } else {
+ /* In all other cases use 1M */
+ cp.primary_phy = HCI_ADV_PHY_1M;
+ cp.secondary_phy = HCI_ADV_PHY_1M;
+ }
+
+ hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
+
+ if (own_addr_type == ADDR_LE_DEV_RANDOM &&
+ bacmp(&random_addr, BDADDR_ANY)) {
+ struct hci_cp_le_set_adv_set_rand_addr cp;
+
+ /* Check if random address need to be updated */
+ if (adv_instance) {
+ if (!bacmp(&random_addr, &adv_instance->random_addr))
+ return 0;
+ } else {
+ if (!bacmp(&random_addr, &hdev->random_addr))
+ return 0;
+ }
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = 0;
+ bacpy(&cp.bdaddr, &random_addr);
+
+ hci_req_add(req,
+ HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
+ sizeof(cp), &cp);
+ }
+
+ return 0;
+}
+
+void __hci_req_enable_ext_advertising(struct hci_request *req)
+{
+ struct hci_cp_le_set_ext_adv_enable *cp;
+ struct hci_cp_ext_adv_set *adv_set;
+ u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
+
+ cp = (void *) data;
+ adv_set = (void *) cp->data;
+
+ memset(cp, 0, sizeof(*cp));
+
+ cp->enable = 0x01;
+ cp->num_of_sets = 0x01;
+
+ memset(adv_set, 0, sizeof(*adv_set));
+
+ adv_set->handle = 0;
+
+ hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
+ sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
+ data);
+}
+
+int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
+{
+ struct hci_dev *hdev = req->hdev;
+ int err;
+
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+ __hci_req_disable_advertising(req);
+
+ err = __hci_req_setup_ext_adv_instance(req, instance);
+ if (err < 0)
+ return err;
+
+ __hci_req_update_scan_rsp_data(req, instance);
+ __hci_req_enable_ext_advertising(req);
+
+ return 0;
+}
+
int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
bool force)
{
@@ -1321,9 +1732,13 @@ int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
return 0;
hdev->cur_adv_instance = instance;
- __hci_req_update_adv_data(req, instance);
- __hci_req_update_scan_rsp_data(req, instance);
- __hci_req_enable_advertising(req);
+ if (ext_adv_capable(hdev)) {
+ __hci_req_start_ext_adv(req, instance);
+ } else {
+ __hci_req_update_adv_data(req, instance);
+ __hci_req_update_scan_rsp_data(req, instance);
+ __hci_req_enable_advertising(req);
+ }
return 0;
}
@@ -1594,8 +2009,12 @@ static int connectable_update(struct hci_request *req, unsigned long opt)
/* Update the advertising parameters if necessary */
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
- !list_empty(&hdev->adv_instances))
- __hci_req_enable_advertising(req);
+ !list_empty(&hdev->adv_instances)) {
+ if (ext_adv_capable(hdev))
+ __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
+ else
+ __hci_req_enable_advertising(req);
+ }
__hci_update_background_scan(req);
@@ -1704,8 +2123,12 @@ static int discoverable_update(struct hci_request *req, unsigned long opt)
/* Discoverable mode affects the local advertising
* address in limited privacy mode.
*/
- if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
- __hci_req_enable_advertising(req);
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
+ if (ext_adv_capable(hdev))
+ __hci_req_start_ext_adv(req, 0x00);
+ else
+ __hci_req_enable_advertising(req);
+ }
}
hci_dev_unlock(hdev);
@@ -1940,7 +2363,6 @@ discov_stopped:
static int le_scan_restart(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_scan_enable cp;
/* If controller is not scanning we are done. */
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
@@ -1948,10 +2370,23 @@ static int le_scan_restart(struct hci_request *req, unsigned long opt)
hci_req_add_le_scan_disable(req);
- memset(&cp, 0, sizeof(cp));
- cp.enable = LE_SCAN_ENABLE;
- cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+ if (use_ext_scan(hdev)) {
+ struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
+
+ memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
+ ext_enable_cp.enable = LE_SCAN_ENABLE;
+ ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+
+ hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
+ sizeof(ext_enable_cp), &ext_enable_cp);
+ } else {
+ struct hci_cp_le_set_scan_enable cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = LE_SCAN_ENABLE;
+ cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+ }
return 0;
}
@@ -2010,8 +2445,6 @@ static int active_scan(struct hci_request *req, unsigned long opt)
{
uint16_t interval = opt;
struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_scan_param param_cp;
- struct hci_cp_le_set_scan_enable enable_cp;
u8 own_addr_type;
int err;
@@ -2050,22 +2483,8 @@ static int active_scan(struct hci_request *req, unsigned long opt)
if (err < 0)
own_addr_type = ADDR_LE_DEV_PUBLIC;
- memset(&param_cp, 0, sizeof(param_cp));
- param_cp.type = LE_SCAN_ACTIVE;
- param_cp.interval = cpu_to_le16(interval);
- param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
- param_cp.own_address_type = own_addr_type;
-
- hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
- &param_cp);
-
- memset(&enable_cp, 0, sizeof(enable_cp));
- enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
-
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
- &enable_cp);
-
+ hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
+ own_addr_type, 0);
return 0;
}
@@ -2302,11 +2721,26 @@ static int powered_update_hci(struct hci_request *req, unsigned long opt)
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
list_empty(&hdev->adv_instances)) {
- __hci_req_update_adv_data(req, 0x00);
- __hci_req_update_scan_rsp_data(req, 0x00);
-
- if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
- __hci_req_enable_advertising(req);
+ int err;
+
+ if (ext_adv_capable(hdev)) {
+ err = __hci_req_setup_ext_adv_instance(req,
+ 0x00);
+ if (!err)
+ __hci_req_update_scan_rsp_data(req,
+ 0x00);
+ } else {
+ err = 0;
+ __hci_req_update_adv_data(req, 0x00);
+ __hci_req_update_scan_rsp_data(req, 0x00);
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
+ if (!ext_adv_capable(hdev))
+ __hci_req_enable_advertising(req);
+ else if (!err)
+ __hci_req_enable_ext_advertising(req);
+ }
} else if (!list_empty(&hdev->adv_instances)) {
struct adv_info *adv_instance;
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 702beb140d9f..692cc8b13368 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -80,6 +80,14 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
struct hci_request *req, u8 instance,
bool force);
+int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance);
+int __hci_req_start_ext_adv(struct hci_request *req, u8 instance);
+void __hci_req_enable_ext_advertising(struct hci_request *req);
+void __hci_req_clear_ext_adv_sets(struct hci_request *req);
+int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ bool use_rpa, struct adv_info *adv_instance,
+ u8 *own_addr_type, bdaddr_t *rand_addr);
+
void __hci_req_update_class(struct hci_request *req);
/* Returns true if HCI commands were queued */
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 1036e4fa1ea2..253975cce943 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session)
del_timer(&session->timer);
}
-static void hidp_process_report(struct hidp_session *session,
- int type, const u8 *data, int len, int intr)
+static void hidp_process_report(struct hidp_session *session, int type,
+ const u8 *data, unsigned int len, int intr)
{
if (len > HID_MAX_BUFFER_SIZE)
len = HID_MAX_BUFFER_SIZE;
@@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->version = req->version;
hid->country = req->country;
- strncpy(hid->name, req->name, sizeof(req->name) - 1);
+ strncpy(hid->name, req->name, sizeof(hid->name));
snprintf(hid->phys, sizeof(hid->phys), "%pMR",
&l2cap_pi(session->ctrl_sock->sk)->chan->src);
diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c
index cb670b5594eb..6d59a5023231 100644
--- a/net/bluetooth/leds.c
+++ b/net/bluetooth/leds.c
@@ -43,7 +43,7 @@ void hci_leds_update_powered(struct hci_dev *hdev, bool enabled)
led_trigger_event(bt_power_led_trigger, enabled ? LED_FULL : LED_OFF);
}
-static void power_activate(struct led_classdev *led_cdev)
+static int power_activate(struct led_classdev *led_cdev)
{
struct hci_basic_led_trigger *htrig;
bool powered;
@@ -52,10 +52,12 @@ static void power_activate(struct led_classdev *led_cdev)
powered = test_bit(HCI_UP, &htrig->hdev->flags);
led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF);
+
+ return 0;
}
static struct led_trigger *led_allocate_basic(struct hci_dev *hdev,
- void (*activate)(struct led_classdev *led_cdev),
+ int (*activate)(struct led_classdev *led_cdev),
const char *name)
{
struct hci_basic_led_trigger *htrig;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 8a80d48d89c4..3bdc8f3ca259 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -617,6 +617,127 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev,
&rp, sizeof(rp));
}
+static u32 get_supported_phys(struct hci_dev *hdev)
+{
+ u32 supported_phys = 0;
+
+ if (lmp_bredr_capable(hdev)) {
+ supported_phys |= MGMT_PHY_BR_1M_1SLOT;
+
+ if (hdev->features[0][0] & LMP_3SLOT)
+ supported_phys |= MGMT_PHY_BR_1M_3SLOT;
+
+ if (hdev->features[0][0] & LMP_5SLOT)
+ supported_phys |= MGMT_PHY_BR_1M_5SLOT;
+
+ if (lmp_edr_2m_capable(hdev)) {
+ supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
+
+ if (lmp_edr_3slot_capable(hdev))
+ supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
+
+ if (lmp_edr_5slot_capable(hdev))
+ supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
+
+ if (lmp_edr_3m_capable(hdev)) {
+ supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
+
+ if (lmp_edr_3slot_capable(hdev))
+ supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
+
+ if (lmp_edr_5slot_capable(hdev))
+ supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
+ }
+ }
+ }
+
+ if (lmp_le_capable(hdev)) {
+ supported_phys |= MGMT_PHY_LE_1M_TX;
+ supported_phys |= MGMT_PHY_LE_1M_RX;
+
+ if (hdev->le_features[1] & HCI_LE_PHY_2M) {
+ supported_phys |= MGMT_PHY_LE_2M_TX;
+ supported_phys |= MGMT_PHY_LE_2M_RX;
+ }
+
+ if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
+ supported_phys |= MGMT_PHY_LE_CODED_TX;
+ supported_phys |= MGMT_PHY_LE_CODED_RX;
+ }
+ }
+
+ return supported_phys;
+}
+
+static u32 get_selected_phys(struct hci_dev *hdev)
+{
+ u32 selected_phys = 0;
+
+ if (lmp_bredr_capable(hdev)) {
+ selected_phys |= MGMT_PHY_BR_1M_1SLOT;
+
+ if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
+ selected_phys |= MGMT_PHY_BR_1M_3SLOT;
+
+ if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
+ selected_phys |= MGMT_PHY_BR_1M_5SLOT;
+
+ if (lmp_edr_2m_capable(hdev)) {
+ if (!(hdev->pkt_type & HCI_2DH1))
+ selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
+
+ if (lmp_edr_3slot_capable(hdev) &&
+ !(hdev->pkt_type & HCI_2DH3))
+ selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
+
+ if (lmp_edr_5slot_capable(hdev) &&
+ !(hdev->pkt_type & HCI_2DH5))
+ selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
+
+ if (lmp_edr_3m_capable(hdev)) {
+ if (!(hdev->pkt_type & HCI_3DH1))
+ selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
+
+ if (lmp_edr_3slot_capable(hdev) &&
+ !(hdev->pkt_type & HCI_3DH3))
+ selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
+
+ if (lmp_edr_5slot_capable(hdev) &&
+ !(hdev->pkt_type & HCI_3DH5))
+ selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
+ }
+ }
+ }
+
+ if (lmp_le_capable(hdev)) {
+ if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
+ selected_phys |= MGMT_PHY_LE_1M_TX;
+
+ if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
+ selected_phys |= MGMT_PHY_LE_1M_RX;
+
+ if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
+ selected_phys |= MGMT_PHY_LE_2M_TX;
+
+ if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
+ selected_phys |= MGMT_PHY_LE_2M_RX;
+
+ if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
+ selected_phys |= MGMT_PHY_LE_CODED_TX;
+
+ if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
+ selected_phys |= MGMT_PHY_LE_CODED_RX;
+ }
+
+ return selected_phys;
+}
+
+static u32 get_configurable_phys(struct hci_dev *hdev)
+{
+ return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
+ ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
+}
+
static u32 get_supported_settings(struct hci_dev *hdev)
{
u32 settings = 0;
@@ -654,6 +775,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
hdev->set_bdaddr)
settings |= MGMT_SETTING_CONFIGURATION;
+ settings |= MGMT_SETTING_PHY_CONFIGURATION;
+
return settings;
}
@@ -817,7 +940,10 @@ static void rpa_expired(struct work_struct *work)
* function.
*/
hci_req_init(&req, hdev);
- __hci_req_enable_advertising(&req);
+ if (ext_adv_capable(hdev))
+ __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
+ else
+ __hci_req_enable_advertising(&req);
hci_req_run(&req, NULL);
}
@@ -1721,10 +1847,17 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
*/
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
struct hci_request req;
-
hci_req_init(&req, hdev);
- __hci_req_update_adv_data(&req, 0x00);
- __hci_req_update_scan_rsp_data(&req, 0x00);
+ if (ext_adv_capable(hdev)) {
+ int err;
+
+ err = __hci_req_setup_ext_adv_instance(&req, 0x00);
+ if (!err)
+ __hci_req_update_scan_rsp_data(&req, 0x00);
+ } else {
+ __hci_req_update_adv_data(&req, 0x00);
+ __hci_req_update_scan_rsp_data(&req, 0x00);
+ }
hci_req_run(&req, NULL);
hci_update_background_scan(hdev);
}
@@ -1823,6 +1956,9 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
} else {
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
__hci_req_disable_advertising(&req);
+
+ if (ext_adv_capable(hdev))
+ __hci_req_clear_ext_adv_sets(&req);
}
hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
@@ -3184,6 +3320,225 @@ static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
+static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_rp_get_phy_confguration rp;
+
+ BT_DBG("sock %p %s", sk, hdev->name);
+
+ hci_dev_lock(hdev);
+
+ memset(&rp, 0, sizeof(rp));
+
+ rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
+ rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
+ rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
+
+ hci_dev_unlock(hdev);
+
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
+ &rp, sizeof(rp));
+}
+
+int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
+{
+ struct mgmt_ev_phy_configuration_changed ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
+
+ return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
+ sizeof(ev), skip);
+}
+
+static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb)
+{
+ struct mgmt_pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
+ if (!cmd)
+ goto unlock;
+
+ if (status) {
+ mgmt_cmd_status(cmd->sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION,
+ mgmt_status(status));
+ } else {
+ mgmt_cmd_complete(cmd->sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION, 0,
+ NULL, 0);
+
+ mgmt_phy_configuration_changed(hdev, cmd->sk);
+ }
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_set_phy_confguration *cp = data;
+ struct hci_cp_le_set_default_phy cp_phy;
+ struct mgmt_pending_cmd *cmd;
+ struct hci_request req;
+ u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
+ u16 pkt_type = (HCI_DH1 | HCI_DM1);
+ bool changed = false;
+ int err;
+
+ BT_DBG("sock %p %s", sk, hdev->name);
+
+ configurable_phys = get_configurable_phys(hdev);
+ supported_phys = get_supported_phys(hdev);
+ selected_phys = __le32_to_cpu(cp->selected_phys);
+
+ if (selected_phys & ~supported_phys)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ unconfigure_phys = supported_phys & ~configurable_phys;
+
+ if ((selected_phys & unconfigure_phys) != unconfigure_phys)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ if (selected_phys == get_selected_phys(hdev))
+ return mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION,
+ 0, NULL, 0);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION,
+ MGMT_STATUS_REJECTED);
+ goto unlock;
+ }
+
+ if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
+ pkt_type |= (HCI_DH3 | HCI_DM3);
+ else
+ pkt_type &= ~(HCI_DH3 | HCI_DM3);
+
+ if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
+ pkt_type |= (HCI_DH5 | HCI_DM5);
+ else
+ pkt_type &= ~(HCI_DH5 | HCI_DM5);
+
+ if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
+ pkt_type &= ~HCI_2DH1;
+ else
+ pkt_type |= HCI_2DH1;
+
+ if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
+ pkt_type &= ~HCI_2DH3;
+ else
+ pkt_type |= HCI_2DH3;
+
+ if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
+ pkt_type &= ~HCI_2DH5;
+ else
+ pkt_type |= HCI_2DH5;
+
+ if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
+ pkt_type &= ~HCI_3DH1;
+ else
+ pkt_type |= HCI_3DH1;
+
+ if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
+ pkt_type &= ~HCI_3DH3;
+ else
+ pkt_type |= HCI_3DH3;
+
+ if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
+ pkt_type &= ~HCI_3DH5;
+ else
+ pkt_type |= HCI_3DH5;
+
+ if (pkt_type != hdev->pkt_type) {
+ hdev->pkt_type = pkt_type;
+ changed = true;
+ }
+
+ if ((selected_phys & MGMT_PHY_LE_MASK) ==
+ (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
+ if (changed)
+ mgmt_phy_configuration_changed(hdev, sk);
+
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION,
+ 0, NULL, 0);
+
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
+ len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ memset(&cp_phy, 0, sizeof(cp_phy));
+
+ if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
+ cp_phy.all_phys |= 0x01;
+
+ if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
+ cp_phy.all_phys |= 0x02;
+
+ if (selected_phys & MGMT_PHY_LE_1M_TX)
+ cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
+
+ if (selected_phys & MGMT_PHY_LE_2M_TX)
+ cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
+
+ if (selected_phys & MGMT_PHY_LE_CODED_TX)
+ cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
+
+ if (selected_phys & MGMT_PHY_LE_1M_RX)
+ cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
+
+ if (selected_phys & MGMT_PHY_LE_2M_RX)
+ cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
+
+ if (selected_phys & MGMT_PHY_LE_CODED_RX)
+ cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
+
+ hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
+
+ err = hci_req_run_skb(&req, set_default_phy_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb)
{
@@ -4037,9 +4392,14 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
* HCI_ADVERTISING flag is not yet set.
*/
hdev->cur_adv_instance = 0x00;
- __hci_req_update_adv_data(&req, 0x00);
- __hci_req_update_scan_rsp_data(&req, 0x00);
- __hci_req_enable_advertising(&req);
+
+ if (ext_adv_capable(hdev)) {
+ __hci_req_start_ext_adv(&req, 0x00);
+ } else {
+ __hci_req_update_adv_data(&req, 0x00);
+ __hci_req_update_scan_rsp_data(&req, 0x00);
+ __hci_req_enable_advertising(&req);
+ }
} else {
__hci_req_disable_advertising(&req);
}
@@ -4609,6 +4969,7 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+ hci_adv_instances_set_rpa_expired(hdev, true);
if (cp->privacy == 0x02)
hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
else
@@ -4617,6 +4978,7 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
memset(hdev->irk, 0, sizeof(hdev->irk));
hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
+ hci_adv_instances_set_rpa_expired(hdev, false);
hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
}
@@ -5967,9 +6329,23 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev)
flags |= MGMT_ADV_FLAG_APPEARANCE;
flags |= MGMT_ADV_FLAG_LOCAL_NAME;
- if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
+ /* In extended adv TX_POWER returned from Set Adv Param
+ * will be always valid.
+ */
+ if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
+ ext_adv_capable(hdev))
flags |= MGMT_ADV_FLAG_TX_POWER;
+ if (ext_adv_capable(hdev)) {
+ flags |= MGMT_ADV_FLAG_SEC_1M;
+
+ if (hdev->le_features[1] & HCI_LE_PHY_2M)
+ flags |= MGMT_ADV_FLAG_SEC_2M;
+
+ if (hdev->le_features[1] & HCI_LE_PHY_CODED)
+ flags |= MGMT_ADV_FLAG_SEC_CODED;
+ }
+
return flags;
}
@@ -6175,7 +6551,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_add_advertising *cp = data;
struct mgmt_rp_add_advertising rp;
u32 flags;
- u32 supported_flags;
+ u32 supported_flags, phy_flags;
u8 status;
u16 timeout, duration;
unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
@@ -6205,10 +6581,12 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
duration = __le16_to_cpu(cp->duration);
/* The current implementation only supports a subset of the specified
- * flags.
+ * flags. Also need to check mutual exclusiveness of sec flags.
*/
supported_flags = get_supported_adv_flags(hdev);
- if (flags & ~supported_flags)
+ phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
+ if (flags & ~supported_flags ||
+ ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -6544,6 +6922,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
HCI_MGMT_UNTRUSTED },
{ set_appearance, MGMT_SET_APPEARANCE_SIZE },
+ { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
+ { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
};
void mgmt_index_added(struct hci_dev *hdev)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 413b8ee49fec..8f0f9279eac9 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -393,7 +393,8 @@ static void sco_sock_cleanup_listen(struct sock *parent)
*/
static void sco_sock_kill(struct sock *sk)
{
- if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
+ if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
+ sock_flag(sk, SOCK_DEAD))
return;
BT_DBG("sk %p state %d", sk, sk->sk_state);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 22a78eedf4b1..f4078830ea50 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -11,12 +11,14 @@
#include <linux/filter.h>
#include <linux/sched/signal.h>
-static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
+static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
+ struct bpf_cgroup_storage *storage)
{
u32 ret;
preempt_disable();
rcu_read_lock();
+ bpf_cgroup_storage_set(storage);
ret = BPF_PROG_RUN(prog, ctx);
rcu_read_unlock();
preempt_enable();
@@ -26,14 +28,19 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
{
+ struct bpf_cgroup_storage *storage = NULL;
u64 time_start, time_spent = 0;
u32 ret = 0, i;
+ storage = bpf_cgroup_storage_alloc(prog);
+ if (IS_ERR(storage))
+ return PTR_ERR(storage);
+
if (!repeat)
repeat = 1;
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
- ret = bpf_test_run_one(prog, ctx);
+ ret = bpf_test_run_one(prog, ctx, storage);
if (need_resched()) {
if (signal_pending(current))
break;
@@ -46,6 +53,8 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
do_div(time_spent, repeat);
*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
+ bpf_cgroup_storage_free(storage);
+
return ret;
}
diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig
index 76deb6615883..e558b46596c4 100644
--- a/net/bpfilter/Kconfig
+++ b/net/bpfilter/Kconfig
@@ -13,4 +13,3 @@ config BPFILTER_UMH
help
This builds bpfilter kernel module with embedded user mode helper
endif
-
diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile
index 39c6980b5d99..0947ee7f70d5 100644
--- a/net/bpfilter/Makefile
+++ b/net/bpfilter/Makefile
@@ -5,14 +5,14 @@
hostprogs-y := bpfilter_umh
bpfilter_umh-objs := main.o
-HOSTCFLAGS += -I. -Itools/include/ -Itools/include/uapi
+KBUILD_HOSTCFLAGS += -I. -Itools/include/ -Itools/include/uapi
HOSTCC := $(CC)
ifeq ($(CONFIG_BPFILTER_UMH), y)
# builtin bpfilter_umh should be compiled with -static
# since rootfs isn't mounted at the time of __init
# function is called and do_execv won't find elf interpreter
-HOSTLDFLAGS += -static
+KBUILD_HOSTLDFLAGS += -static
endif
$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 9019f326fe81..5372e2042adf 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -142,7 +142,20 @@ static int deliver_clone(const struct net_bridge_port *prev,
void br_forward(const struct net_bridge_port *to,
struct sk_buff *skb, bool local_rcv, bool local_orig)
{
- if (to && should_deliver(to, skb)) {
+ if (unlikely(!to))
+ goto out;
+
+ /* redirect to backup link if the destination port is down */
+ if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
+ struct net_bridge_port *backup_port;
+
+ backup_port = rcu_dereference(to->backup_port);
+ if (unlikely(!backup_port))
+ goto out;
+ to = backup_port;
+ }
+
+ if (should_deliver(to, skb)) {
if (local_rcv)
deliver_clone(to, skb, local_orig);
else
@@ -150,6 +163,7 @@ void br_forward(const struct net_bridge_port *to,
return;
}
+out:
if (!local_rcv)
kfree_skb(skb);
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 05e42d86882d..0363f1bdc401 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -26,6 +26,7 @@
#include <net/sock.h>
#include <linux/if_vlan.h>
#include <net/switchdev.h>
+#include <net/net_namespace.h>
#include "br_private.h"
@@ -169,6 +170,58 @@ void br_manage_promisc(struct net_bridge *br)
}
}
+int nbp_backup_change(struct net_bridge_port *p,
+ struct net_device *backup_dev)
+{
+ struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
+ struct net_bridge_port *backup_p = NULL;
+
+ ASSERT_RTNL();
+
+ if (backup_dev) {
+ if (!br_port_exists(backup_dev))
+ return -ENOENT;
+
+ backup_p = br_port_get_rtnl(backup_dev);
+ if (backup_p->br != p->br)
+ return -EINVAL;
+ }
+
+ if (p == backup_p)
+ return -EINVAL;
+
+ if (old_backup == backup_p)
+ return 0;
+
+ /* if the backup link is already set, clear it */
+ if (old_backup)
+ old_backup->backup_redirected_cnt--;
+
+ if (backup_p)
+ backup_p->backup_redirected_cnt++;
+ rcu_assign_pointer(p->backup_port, backup_p);
+
+ return 0;
+}
+
+static void nbp_backup_clear(struct net_bridge_port *p)
+{
+ nbp_backup_change(p, NULL);
+ if (p->backup_redirected_cnt) {
+ struct net_bridge_port *cur_p;
+
+ list_for_each_entry(cur_p, &p->br->port_list, list) {
+ struct net_bridge_port *backup_p;
+
+ backup_p = rtnl_dereference(cur_p->backup_port);
+ if (backup_p == p)
+ nbp_backup_change(cur_p, NULL);
+ }
+ }
+
+ WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
+}
+
static void nbp_update_port_count(struct net_bridge *br)
{
struct net_bridge_port *p;
@@ -204,11 +257,19 @@ static void release_nbp(struct kobject *kobj)
kfree(p);
}
+static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ struct net_bridge_port *p = kobj_to_brport(kobj);
+
+ net_ns_get_ownership(dev_net(p->dev), uid, gid);
+}
+
static struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
.sysfs_ops = &brport_sysfs_ops,
#endif
.release = release_nbp,
+ .get_ownership = brport_get_ownership,
};
static void destroy_nbp(struct net_bridge_port *p)
@@ -286,6 +347,7 @@ static void del_nbp(struct net_bridge_port *p)
nbp_vlan_flush(p);
br_fdb_delete_by_port(br, p, 0, 1);
switchdev_deferred_process();
+ nbp_backup_clear(p);
nbp_update_port_count(br);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 920665dd92db..20ed7adcf1cc 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1423,10 +1423,10 @@ static void br_multicast_query_received(struct net_bridge *br,
br_multicast_mark_router(br, port);
}
-static int br_ip4_multicast_query(struct net_bridge *br,
- struct net_bridge_port *port,
- struct sk_buff *skb,
- u16 vid)
+static void br_ip4_multicast_query(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb,
+ u16 vid)
{
const struct iphdr *iph = ip_hdr(skb);
struct igmphdr *ih = igmp_hdr(skb);
@@ -1439,7 +1439,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
unsigned long now = jiffies;
unsigned int offset = skb_transport_offset(skb);
__be32 group;
- int err = 0;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
@@ -1498,7 +1497,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
out:
spin_unlock(&br->multicast_lock);
- return err;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -1828,7 +1826,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
- err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
+ br_ip4_multicast_query(br, port, skb_trimmed, vid);
break;
case IGMP_HOST_LEAVE_MESSAGE:
br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 9b16eaf33819..6e0dc6bcd32a 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -26,6 +26,7 @@
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <linux/netfilter_bridge.h>
+#include <uapi/linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_arp.h>
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 9f5eb05b0373..ec2b58a09f76 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -169,13 +169,15 @@ static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
+ nla_total_size(br_get_link_af_size_filtered(dev,
- filter_mask)); /* IFLA_AF_SPEC */
+ filter_mask)) /* IFLA_AF_SPEC */
+ + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
}
static int br_port_fill_attrs(struct sk_buff *skb,
const struct net_bridge_port *p)
{
u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
+ struct net_bridge_port *backup_p;
u64 timerval;
if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
@@ -237,6 +239,14 @@ static int br_port_fill_attrs(struct sk_buff *skb,
return -EMSGSIZE;
#endif
+ /* we might be called only with br->lock */
+ rcu_read_lock();
+ backup_p = rcu_dereference(p->backup_port);
+ if (backup_p)
+ nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
+ backup_p->dev->ifindex);
+ rcu_read_unlock();
+
return 0;
}
@@ -663,6 +673,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
[IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
[IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 },
+ [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
};
/* Change the state of the port and notify spanning tree */
@@ -817,6 +828,23 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
if (err)
return err;
+ if (tb[IFLA_BRPORT_BACKUP_PORT]) {
+ struct net_device *backup_dev = NULL;
+ u32 backup_ifindex;
+
+ backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
+ if (backup_ifindex) {
+ backup_dev = __dev_get_by_index(dev_net(p->dev),
+ backup_ifindex);
+ if (!backup_dev)
+ return -ENOENT;
+ }
+
+ err = nbp_backup_change(p, backup_dev);
+ if (err)
+ return err;
+ }
+
br_port_flags_change(p, old_flags ^ p->flags);
return 0;
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 5216a524b537..11ed2029985f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -237,6 +237,7 @@ struct net_bridge_port {
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
struct net_bridge_vlan_group __rcu *vlgrp;
#endif
+ struct net_bridge_port __rcu *backup_port;
/* STP */
u8 priority;
@@ -281,8 +282,11 @@ struct net_bridge_port {
int offload_fwd_mark;
#endif
u16 group_fwd_mask;
+ u16 backup_redirected_cnt;
};
+#define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
+
#define br_auto_port(p) ((p)->flags & BR_AUTO_MASK)
#define br_promisc_port(p) ((p)->flags & BR_PROMISC)
@@ -595,6 +599,7 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
netdev_features_t features);
void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
void br_manage_promisc(struct net_bridge *br);
+int nbp_backup_change(struct net_bridge_port *p, struct net_device *backup_dev);
/* br_input.c */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index f99c5bf5c906..7c87a2fe5248 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -25,6 +25,15 @@ struct brport_attribute {
struct attribute attr;
ssize_t (*show)(struct net_bridge_port *, char *);
int (*store)(struct net_bridge_port *, unsigned long);
+ int (*store_raw)(struct net_bridge_port *, char *);
+};
+
+#define BRPORT_ATTR_RAW(_name, _mode, _show, _store) \
+const struct brport_attribute brport_attr_##_name = { \
+ .attr = {.name = __stringify(_name), \
+ .mode = _mode }, \
+ .show = _show, \
+ .store_raw = _store, \
};
#define BRPORT_ATTR(_name, _mode, _show, _store) \
@@ -182,6 +191,38 @@ static int store_group_fwd_mask(struct net_bridge_port *p,
static BRPORT_ATTR(group_fwd_mask, 0644, show_group_fwd_mask,
store_group_fwd_mask);
+static ssize_t show_backup_port(struct net_bridge_port *p, char *buf)
+{
+ struct net_bridge_port *backup_p;
+ int ret = 0;
+
+ rcu_read_lock();
+ backup_p = rcu_dereference(p->backup_port);
+ if (backup_p)
+ ret = sprintf(buf, "%s\n", backup_p->dev->name);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int store_backup_port(struct net_bridge_port *p, char *buf)
+{
+ struct net_device *backup_dev = NULL;
+ char *nl = strchr(buf, '\n');
+
+ if (nl)
+ *nl = '\0';
+
+ if (strlen(buf) > 0) {
+ backup_dev = __dev_get_by_name(dev_net(p->dev), buf);
+ if (!backup_dev)
+ return -ENOENT;
+ }
+
+ return nbp_backup_change(p, backup_dev);
+}
+static BRPORT_ATTR_RAW(backup_port, 0644, show_backup_port, store_backup_port);
+
BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE);
BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD);
BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
@@ -245,17 +286,17 @@ static const struct brport_attribute *brport_attrs[] = {
&brport_attr_group_fwd_mask,
&brport_attr_neigh_suppress,
&brport_attr_isolated,
+ &brport_attr_backup_port,
NULL
};
#define to_brport_attr(_at) container_of(_at, struct brport_attribute, attr)
-#define to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
static ssize_t brport_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct brport_attribute *brport_attr = to_brport_attr(attr);
- struct net_bridge_port *p = to_brport(kobj);
+ struct net_bridge_port *p = kobj_to_brport(kobj);
if (!brport_attr->show)
return -EINVAL;
@@ -268,29 +309,48 @@ static ssize_t brport_store(struct kobject *kobj,
const char *buf, size_t count)
{
struct brport_attribute *brport_attr = to_brport_attr(attr);
- struct net_bridge_port *p = to_brport(kobj);
+ struct net_bridge_port *p = kobj_to_brport(kobj);
ssize_t ret = -EINVAL;
- char *endp;
unsigned long val;
+ char *endp;
if (!ns_capable(dev_net(p->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
- val = simple_strtoul(buf, &endp, 0);
- if (endp != buf) {
- if (!rtnl_trylock())
- return restart_syscall();
- if (p->dev && p->br && brport_attr->store) {
- spin_lock_bh(&p->br->lock);
- ret = brport_attr->store(p, val);
- spin_unlock_bh(&p->br->lock);
- if (!ret) {
- br_ifinfo_notify(RTM_NEWLINK, NULL, p);
- ret = count;
- }
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (!p->dev || !p->br)
+ goto out_unlock;
+
+ if (brport_attr->store_raw) {
+ char *buf_copy;
+
+ buf_copy = kstrndup(buf, count, GFP_KERNEL);
+ if (!buf_copy) {
+ ret = -ENOMEM;
+ goto out_unlock;
}
- rtnl_unlock();
+ spin_lock_bh(&p->br->lock);
+ ret = brport_attr->store_raw(p, buf_copy);
+ spin_unlock_bh(&p->br->lock);
+ kfree(buf_copy);
+ } else if (brport_attr->store) {
+ val = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ goto out_unlock;
+ spin_lock_bh(&p->br->lock);
+ ret = brport_attr->store(p, val);
+ spin_unlock_bh(&p->br->lock);
}
+
+ if (!ret) {
+ br_ifinfo_notify(RTM_NEWLINK, NULL, p);
+ ret = count;
+ }
+out_unlock:
+ rtnl_unlock();
+
return ret;
}
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index c41da5fac84f..550324c516ee 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -9,6 +9,7 @@
*/
#include <linux/netfilter_bridge/ebtables.h>
+#include <uapi/linux/netfilter_bridge.h>
#include <linux/module.h>
#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 08df7406ecb3..c0fb3ca518af 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -9,6 +9,7 @@
*/
#include <linux/netfilter_bridge/ebtables.h>
+#include <uapi/linux/netfilter_bridge.h>
#include <linux/module.h>
#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 6de981270566..08cbed7d940e 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -89,8 +89,7 @@ static void nft_reject_br_send_v4_tcp_reset(struct net *net,
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
net->ipv4.sysctl_ip_default_ttl);
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
- niph->ttl = net->ipv4.sysctl_ip_default_ttl;
- niph->tot_len = htons(nskb->len);
+ niph->tot_len = htons(nskb->len);
ip_send_check(niph);
nft_reject_br_push_etherhdr(oldskb, nskb);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a6fb1b3bcad9..d18965f3291f 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -941,7 +941,7 @@ static __poll_t caif_poll(struct file *file,
__poll_t mask;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
mask = 0;
/* exceptional events? */
diff --git a/net/compat.c b/net/compat.c
index 7242cce5631b..3b2105f6549d 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -466,8 +466,7 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
ctv = (struct compat_timeval __user *) userstamp;
err = -ENOENT;
- if (!sock_flag(sk, SOCK_TIMESTAMP))
- sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+ sock_enable_timestamp(sk, SOCK_TIMESTAMP);
tv = ktime_to_timeval(sk->sk_stamp);
if (tv.tv_sec == -1)
return err;
@@ -494,8 +493,7 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
ctv = (struct compat_timespec __user *) userstamp;
err = -ENOENT;
- if (!sock_flag(sk, SOCK_TIMESTAMP))
- sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+ sock_enable_timestamp(sk, SOCK_TIMESTAMP);
ts = ktime_to_timespec(sk->sk_stamp);
if (ts.tv_sec == -1)
return err;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 9938952c5c78..9aac0d63d53e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -837,7 +837,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
__poll_t mask;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
mask = 0;
/* exceptional events? */
diff --git a/net/core/dev.c b/net/core/dev.c
index 559a91271f82..325fc5088370 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -149,7 +149,6 @@
#include "net-sysfs.h"
-/* Instead of increasing this, you should create a hash table. */
#define MAX_GRO_SKBS 8
/* This should be increased if a protocol with a bigger head is added. */
@@ -2068,11 +2067,13 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
int i;
+ /* walk through the TCs and see if it falls into any of them */
for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
if ((txq - tc->offset) < tc->count)
return i;
}
+ /* didn't find it, just return -1 to indicate no match */
return -1;
}
@@ -2081,6 +2082,10 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
EXPORT_SYMBOL(netdev_txq_to_tc);
#ifdef CONFIG_XPS
+struct static_key xps_needed __read_mostly;
+EXPORT_SYMBOL(xps_needed);
+struct static_key xps_rxqs_needed __read_mostly;
+EXPORT_SYMBOL(xps_rxqs_needed);
static DEFINE_MUTEX(xps_map_mutex);
#define xmap_dereference(P) \
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
@@ -2092,7 +2097,7 @@ static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
int pos;
if (dev_maps)
- map = xmap_dereference(dev_maps->cpu_map[tci]);
+ map = xmap_dereference(dev_maps->attr_map[tci]);
if (!map)
return false;
@@ -2105,7 +2110,7 @@ static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
break;
}
- RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
+ RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
kfree_rcu(map, rcu);
return false;
}
@@ -2135,34 +2140,71 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
return active;
}
+static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
+ struct xps_dev_maps *dev_maps, unsigned int nr_ids,
+ u16 offset, u16 count, bool is_rxqs_map)
+{
+ bool active = false;
+ int i, j;
+
+ for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
+ j < nr_ids;)
+ active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
+ count);
+ if (!active) {
+ if (is_rxqs_map) {
+ RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+ } else {
+ RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+
+ for (i = offset + (count - 1); count--; i--)
+ netdev_queue_numa_node_write(
+ netdev_get_tx_queue(dev, i),
+ NUMA_NO_NODE);
+ }
+ kfree_rcu(dev_maps, rcu);
+ }
+}
+
static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
u16 count)
{
+ const unsigned long *possible_mask = NULL;
struct xps_dev_maps *dev_maps;
- int cpu, i;
- bool active = false;
+ unsigned int nr_ids;
+
+ if (!static_key_false(&xps_needed))
+ return;
+ cpus_read_lock();
mutex_lock(&xps_map_mutex);
- dev_maps = xmap_dereference(dev->xps_maps);
+ if (static_key_false(&xps_rxqs_needed)) {
+ dev_maps = xmap_dereference(dev->xps_rxqs_map);
+ if (dev_maps) {
+ nr_ids = dev->num_rx_queues;
+ clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
+ offset, count, true);
+ }
+ }
+
+ dev_maps = xmap_dereference(dev->xps_cpus_map);
if (!dev_maps)
goto out_no_maps;
- for_each_possible_cpu(cpu)
- active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
- offset, count);
-
- if (!active) {
- RCU_INIT_POINTER(dev->xps_maps, NULL);
- kfree_rcu(dev_maps, rcu);
- }
-
- for (i = offset + (count - 1); count--; i--)
- netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
- NUMA_NO_NODE);
+ if (num_possible_cpus() > 1)
+ possible_mask = cpumask_bits(cpu_possible_mask);
+ nr_ids = nr_cpu_ids;
+ clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
+ false);
out_no_maps:
+ if (static_key_enabled(&xps_rxqs_needed))
+ static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
+
+ static_key_slow_dec_cpuslocked(&xps_needed);
mutex_unlock(&xps_map_mutex);
+ cpus_read_unlock();
}
static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
@@ -2170,8 +2212,8 @@ static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
}
-static struct xps_map *expand_xps_map(struct xps_map *map,
- int cpu, u16 index)
+static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
+ u16 index, bool is_rxqs_map)
{
struct xps_map *new_map;
int alloc_len = XPS_MIN_MAP_ALLOC;
@@ -2183,7 +2225,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
return map;
}
- /* Need to add queue to this CPU's existing map */
+ /* Need to add tx-queue to this CPU's/rx-queue's existing map */
if (map) {
if (pos < map->alloc_len)
return map;
@@ -2191,9 +2233,14 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
alloc_len = map->alloc_len * 2;
}
- /* Need to allocate new map to store queue on this CPU's map */
- new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
- cpu_to_node(cpu));
+ /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
+ * map
+ */
+ if (is_rxqs_map)
+ new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
+ else
+ new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
+ cpu_to_node(attr_index));
if (!new_map)
return NULL;
@@ -2205,32 +2252,53 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
return new_map;
}
-int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
- u16 index)
+/* Must be called under cpus_read_lock */
+int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
+ u16 index, bool is_rxqs_map)
{
+ const unsigned long *online_mask = NULL, *possible_mask = NULL;
struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
- int i, cpu, tci, numa_node_id = -2;
+ int i, j, tci, numa_node_id = -2;
int maps_sz, num_tc = 1, tc = 0;
struct xps_map *map, *new_map;
bool active = false;
+ unsigned int nr_ids;
if (dev->num_tc) {
+ /* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;
+ if (num_tc < 0)
+ return -EINVAL;
+
+ /* If queue belongs to subordinate dev use its map */
+ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+
tc = netdev_txq_to_tc(dev, index);
if (tc < 0)
return -EINVAL;
}
- maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
- if (maps_sz < L1_CACHE_BYTES)
- maps_sz = L1_CACHE_BYTES;
-
mutex_lock(&xps_map_mutex);
+ if (is_rxqs_map) {
+ maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
+ dev_maps = xmap_dereference(dev->xps_rxqs_map);
+ nr_ids = dev->num_rx_queues;
+ } else {
+ maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
+ if (num_possible_cpus() > 1) {
+ online_mask = cpumask_bits(cpu_online_mask);
+ possible_mask = cpumask_bits(cpu_possible_mask);
+ }
+ dev_maps = xmap_dereference(dev->xps_cpus_map);
+ nr_ids = nr_cpu_ids;
+ }
- dev_maps = xmap_dereference(dev->xps_maps);
+ if (maps_sz < L1_CACHE_BYTES)
+ maps_sz = L1_CACHE_BYTES;
/* allocate memory for queue storage */
- for_each_cpu_and(cpu, cpu_online_mask, mask) {
+ for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
+ j < nr_ids;) {
if (!new_dev_maps)
new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
if (!new_dev_maps) {
@@ -2238,73 +2306,85 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
return -ENOMEM;
}
- tci = cpu * num_tc + tc;
- map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
+ tci = j * num_tc + tc;
+ map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
NULL;
- map = expand_xps_map(map, cpu, index);
+ map = expand_xps_map(map, j, index, is_rxqs_map);
if (!map)
goto error;
- RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
+ RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
}
if (!new_dev_maps)
goto out_no_new_maps;
- for_each_possible_cpu(cpu) {
+ static_key_slow_inc_cpuslocked(&xps_needed);
+ if (is_rxqs_map)
+ static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+
+ for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
+ j < nr_ids;) {
/* copy maps belonging to foreign traffic classes */
- for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
+ for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
/* fill in the new device map from the old device map */
- map = xmap_dereference(dev_maps->cpu_map[tci]);
- RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
+ map = xmap_dereference(dev_maps->attr_map[tci]);
+ RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
}
/* We need to explicitly update tci as prevous loop
* could break out early if dev_maps is NULL.
*/
- tci = cpu * num_tc + tc;
+ tci = j * num_tc + tc;
- if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
- /* add queue to CPU maps */
+ if (netif_attr_test_mask(j, mask, nr_ids) &&
+ netif_attr_test_online(j, online_mask, nr_ids)) {
+ /* add tx-queue to CPU/rx-queue maps */
int pos = 0;
- map = xmap_dereference(new_dev_maps->cpu_map[tci]);
+ map = xmap_dereference(new_dev_maps->attr_map[tci]);
while ((pos < map->len) && (map->queues[pos] != index))
pos++;
if (pos == map->len)
map->queues[map->len++] = index;
#ifdef CONFIG_NUMA
- if (numa_node_id == -2)
- numa_node_id = cpu_to_node(cpu);
- else if (numa_node_id != cpu_to_node(cpu))
- numa_node_id = -1;
+ if (!is_rxqs_map) {
+ if (numa_node_id == -2)
+ numa_node_id = cpu_to_node(j);
+ else if (numa_node_id != cpu_to_node(j))
+ numa_node_id = -1;
+ }
#endif
} else if (dev_maps) {
/* fill in the new device map from the old device map */
- map = xmap_dereference(dev_maps->cpu_map[tci]);
- RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
+ map = xmap_dereference(dev_maps->attr_map[tci]);
+ RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
}
/* copy maps belonging to foreign traffic classes */
for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
/* fill in the new device map from the old device map */
- map = xmap_dereference(dev_maps->cpu_map[tci]);
- RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
+ map = xmap_dereference(dev_maps->attr_map[tci]);
+ RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
}
}
- rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+ if (is_rxqs_map)
+ rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
+ else
+ rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
/* Cleanup old maps */
if (!dev_maps)
goto out_no_old_maps;
- for_each_possible_cpu(cpu) {
- for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
- new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
- map = xmap_dereference(dev_maps->cpu_map[tci]);
+ for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
+ j < nr_ids;) {
+ for (i = num_tc, tci = j * num_tc; i--; tci++) {
+ new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
+ map = xmap_dereference(dev_maps->attr_map[tci]);
if (map && map != new_map)
kfree_rcu(map, rcu);
}
@@ -2317,19 +2397,23 @@ out_no_old_maps:
active = true;
out_no_new_maps:
- /* update Tx queue numa node */
- netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
- (numa_node_id >= 0) ? numa_node_id :
- NUMA_NO_NODE);
+ if (!is_rxqs_map) {
+ /* update Tx queue numa node */
+ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
+ (numa_node_id >= 0) ?
+ numa_node_id : NUMA_NO_NODE);
+ }
if (!dev_maps)
goto out_no_maps;
- /* removes queue from unused CPUs */
- for_each_possible_cpu(cpu) {
- for (i = tc, tci = cpu * num_tc; i--; tci++)
+ /* removes tx-queue from unused CPUs/rx-queues */
+ for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
+ j < nr_ids;) {
+ for (i = tc, tci = j * num_tc; i--; tci++)
active |= remove_xps_queue(dev_maps, tci, index);
- if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
+ if (!netif_attr_test_mask(j, mask, nr_ids) ||
+ !netif_attr_test_online(j, online_mask, nr_ids))
active |= remove_xps_queue(dev_maps, tci, index);
for (i = num_tc - tc, tci++; --i; tci++)
active |= remove_xps_queue(dev_maps, tci, index);
@@ -2337,7 +2421,10 @@ out_no_new_maps:
/* free map if not active */
if (!active) {
- RCU_INIT_POINTER(dev->xps_maps, NULL);
+ if (is_rxqs_map)
+ RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+ else
+ RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
kfree_rcu(dev_maps, rcu);
}
@@ -2347,11 +2434,12 @@ out_no_maps:
return 0;
error:
/* remove any maps that we added */
- for_each_possible_cpu(cpu) {
- for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
- new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
+ for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
+ j < nr_ids;) {
+ for (i = num_tc, tci = j * num_tc; i--; tci++) {
+ new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
map = dev_maps ?
- xmap_dereference(dev_maps->cpu_map[tci]) :
+ xmap_dereference(dev_maps->attr_map[tci]) :
NULL;
if (new_map && new_map != map)
kfree(new_map);
@@ -2363,14 +2451,41 @@ error:
kfree(new_dev_maps);
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
+
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+ u16 index)
+{
+ int ret;
+
+ cpus_read_lock();
+ ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
+ cpus_read_unlock();
+
+ return ret;
+}
EXPORT_SYMBOL(netif_set_xps_queue);
#endif
+static void netdev_unbind_all_sb_channels(struct net_device *dev)
+{
+ struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
+
+ /* Unbind any subordinate channels */
+ while (txq-- != &dev->_tx[0]) {
+ if (txq->sb_dev)
+ netdev_unbind_sb_channel(dev, txq->sb_dev);
+ }
+}
+
void netdev_reset_tc(struct net_device *dev)
{
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, 0);
#endif
+ netdev_unbind_all_sb_channels(dev);
+
+ /* Reset TC configuration of device */
dev->num_tc = 0;
memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
@@ -2399,11 +2514,77 @@ int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, 0);
#endif
+ netdev_unbind_all_sb_channels(dev);
+
dev->num_tc = num_tc;
return 0;
}
EXPORT_SYMBOL(netdev_set_num_tc);
+void netdev_unbind_sb_channel(struct net_device *dev,
+ struct net_device *sb_dev)
+{
+ struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
+
+#ifdef CONFIG_XPS
+ netif_reset_xps_queues_gt(sb_dev, 0);
+#endif
+ memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
+ memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
+
+ while (txq-- != &dev->_tx[0]) {
+ if (txq->sb_dev == sb_dev)
+ txq->sb_dev = NULL;
+ }
+}
+EXPORT_SYMBOL(netdev_unbind_sb_channel);
+
+int netdev_bind_sb_channel_queue(struct net_device *dev,
+ struct net_device *sb_dev,
+ u8 tc, u16 count, u16 offset)
+{
+ /* Make certain the sb_dev and dev are already configured */
+ if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
+ return -EINVAL;
+
+ /* We cannot hand out queues we don't have */
+ if ((offset + count) > dev->real_num_tx_queues)
+ return -EINVAL;
+
+ /* Record the mapping */
+ sb_dev->tc_to_txq[tc].count = count;
+ sb_dev->tc_to_txq[tc].offset = offset;
+
+ /* Provide a way for Tx queue to find the tc_to_txq map or
+ * XPS map for itself.
+ */
+ while (count--)
+ netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
+
+ return 0;
+}
+EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
+
+int netdev_set_sb_channel(struct net_device *dev, u16 channel)
+{
+ /* Do not use a multiqueue device to represent a subordinate channel */
+ if (netif_is_multiqueue(dev))
+ return -ENODEV;
+
+ /* We allow channels 1 - 32767 to be used for subordinate channels.
+ * Channel 0 is meant to be "native" mode and used only to represent
+ * the main root device. We allow writing 0 to reset the device back
+ * to normal mode after being used as a subordinate channel.
+ */
+ if (channel > S16_MAX)
+ return -EINVAL;
+
+ dev->num_tc = -channel;
+
+ return 0;
+}
+EXPORT_SYMBOL(netdev_set_sb_channel);
+
/*
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
* greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -2615,24 +2796,26 @@ EXPORT_SYMBOL(netif_device_attach);
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
* to be used as a distribution range.
*/
-static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb)
+static u16 skb_tx_hash(const struct net_device *dev,
+ const struct net_device *sb_dev,
+ struct sk_buff *skb)
{
u32 hash;
u16 qoffset = 0;
u16 qcount = dev->real_num_tx_queues;
+ if (dev->num_tc) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+
+ qoffset = sb_dev->tc_to_txq[tc].offset;
+ qcount = sb_dev->tc_to_txq[tc].count;
+ }
+
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
while (unlikely(hash >= qcount))
hash -= qcount;
- return hash;
- }
-
- if (dev->num_tc) {
- u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
-
- qoffset = dev->tc_to_txq[tc].offset;
- qcount = dev->tc_to_txq[tc].count;
+ return hash + qoffset;
}
return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
@@ -3376,32 +3559,64 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
}
#endif /* CONFIG_NET_EGRESS */
-static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+#ifdef CONFIG_XPS
+static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
+ struct xps_dev_maps *dev_maps, unsigned int tci)
+{
+ struct xps_map *map;
+ int queue_index = -1;
+
+ if (dev->num_tc) {
+ tci *= dev->num_tc;
+ tci += netdev_get_prio_tc_map(dev, skb->priority);
+ }
+
+ map = rcu_dereference(dev_maps->attr_map[tci]);
+ if (map) {
+ if (map->len == 1)
+ queue_index = map->queues[0];
+ else
+ queue_index = map->queues[reciprocal_scale(
+ skb_get_hash(skb), map->len)];
+ if (unlikely(queue_index >= dev->real_num_tx_queues))
+ queue_index = -1;
+ }
+ return queue_index;
+}
+#endif
+
+static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
+ struct sk_buff *skb)
{
#ifdef CONFIG_XPS
struct xps_dev_maps *dev_maps;
- struct xps_map *map;
+ struct sock *sk = skb->sk;
int queue_index = -1;
+ if (!static_key_false(&xps_needed))
+ return -1;
+
rcu_read_lock();
- dev_maps = rcu_dereference(dev->xps_maps);
+ if (!static_key_false(&xps_rxqs_needed))
+ goto get_cpus_map;
+
+ dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
if (dev_maps) {
- unsigned int tci = skb->sender_cpu - 1;
+ int tci = sk_rx_queue_get(sk);
- if (dev->num_tc) {
- tci *= dev->num_tc;
- tci += netdev_get_prio_tc_map(dev, skb->priority);
- }
+ if (tci >= 0 && tci < dev->num_rx_queues)
+ queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
+ tci);
+ }
- map = rcu_dereference(dev_maps->cpu_map[tci]);
- if (map) {
- if (map->len == 1)
- queue_index = map->queues[0];
- else
- queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
- map->len)];
- if (unlikely(queue_index >= dev->real_num_tx_queues))
- queue_index = -1;
+get_cpus_map:
+ if (queue_index < 0) {
+ dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
+ if (dev_maps) {
+ unsigned int tci = skb->sender_cpu - 1;
+
+ queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
+ tci);
}
}
rcu_read_unlock();
@@ -3412,17 +3627,36 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
#endif
}
-static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
+{
+ return 0;
+}
+EXPORT_SYMBOL(dev_pick_tx_zero);
+
+u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
+{
+ return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
+}
+EXPORT_SYMBOL(dev_pick_tx_cpu_id);
+
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
+ sb_dev = sb_dev ? : dev;
+
if (queue_index < 0 || skb->ooo_okay ||
queue_index >= dev->real_num_tx_queues) {
- int new_index = get_xps_queue(dev, skb);
+ int new_index = get_xps_queue(dev, sb_dev, skb);
if (new_index < 0)
- new_index = skb_tx_hash(dev, skb);
+ new_index = skb_tx_hash(dev, sb_dev, skb);
if (queue_index != new_index && sk &&
sk_fullsock(sk) &&
@@ -3437,7 +3671,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv)
+ struct net_device *sb_dev)
{
int queue_index = 0;
@@ -3452,10 +3686,10 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
- queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
+ queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
__netdev_pick_tx);
else
- queue_index = __netdev_pick_tx(dev, skb);
+ queue_index = __netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_cap_txqueue(dev, queue_index);
}
@@ -3467,7 +3701,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
/**
* __dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
- * @accel_priv: private data used for L2 forwarding offload
+ * @sb_dev: suboordinate device used for L2 forwarding offload
*
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
@@ -3490,7 +3724,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
-static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
@@ -3529,7 +3763,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
else
skb_dst_force(skb);
- txq = netdev_pick_tx(dev, skb, accel_priv);
+ txq = netdev_pick_tx(dev, skb, sb_dev);
q = rcu_dereference_bh(txq->qdisc);
trace_net_dev_queue(skb);
@@ -3603,9 +3837,9 @@ int dev_queue_xmit(struct sk_buff *skb)
}
EXPORT_SYMBOL(dev_queue_xmit);
-int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
+int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
{
- return __dev_queue_xmit(skb, accel_priv);
+ return __dev_queue_xmit(skb, sb_dev);
}
EXPORT_SYMBOL(dev_queue_xmit_accel);
@@ -4028,7 +4262,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
- if (skb_cloned(skb))
+ if (skb_cloned(skb) || skb_is_tc_redirected(skb))
return XDP_PASS;
/* XDP packets must be linear and must have sufficient headroom
@@ -4378,6 +4612,10 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
__skb_push(skb, skb->mac_len);
skb_do_redirect(skb);
return NULL;
+ case TC_ACT_REINSERT:
+ /* this does not scrub the packet, and updates stats on error */
+ skb_tc_reinsert(skb, &cl_res);
+ return NULL;
default:
break;
}
@@ -4494,7 +4732,8 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
return 0;
}
-static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
+static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
+ struct packet_type **ppt_prev)
{
struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler;
@@ -4624,8 +4863,7 @@ skip_classify:
if (pt_prev) {
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
goto drop;
- else
- ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ *ppt_prev = pt_prev;
} else {
drop:
if (!deliver_exact)
@@ -4643,6 +4881,18 @@ out:
return ret;
}
+static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
+{
+ struct net_device *orig_dev = skb->dev;
+ struct packet_type *pt_prev = NULL;
+ int ret;
+
+ ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
+ if (pt_prev)
+ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ return ret;
+}
+
/**
* netif_receive_skb_core - special purpose version of netif_receive_skb
* @skb: buffer to process
@@ -4663,13 +4913,72 @@ int netif_receive_skb_core(struct sk_buff *skb)
int ret;
rcu_read_lock();
- ret = __netif_receive_skb_core(skb, false);
+ ret = __netif_receive_skb_one_core(skb, false);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(netif_receive_skb_core);
+static inline void __netif_receive_skb_list_ptype(struct list_head *head,
+ struct packet_type *pt_prev,
+ struct net_device *orig_dev)
+{
+ struct sk_buff *skb, *next;
+
+ if (!pt_prev)
+ return;
+ if (list_empty(head))
+ return;
+ if (pt_prev->list_func != NULL)
+ pt_prev->list_func(head, pt_prev, orig_dev);
+ else
+ list_for_each_entry_safe(skb, next, head, list)
+ pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+}
+
+static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
+{
+ /* Fast-path assumptions:
+ * - There is no RX handler.
+ * - Only one packet_type matches.
+ * If either of these fails, we will end up doing some per-packet
+ * processing in-line, then handling the 'last ptype' for the whole
+ * sublist. This can't cause out-of-order delivery to any single ptype,
+ * because the 'last ptype' must be constant across the sublist, and all
+ * other ptypes are handled per-packet.
+ */
+ /* Current (common) ptype of sublist */
+ struct packet_type *pt_curr = NULL;
+ /* Current (common) orig_dev of sublist */
+ struct net_device *od_curr = NULL;
+ struct list_head sublist;
+ struct sk_buff *skb, *next;
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+ struct net_device *orig_dev = skb->dev;
+ struct packet_type *pt_prev = NULL;
+
+ list_del(&skb->list);
+ __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
+ if (!pt_prev)
+ continue;
+ if (pt_curr != pt_prev || od_curr != orig_dev) {
+ /* dispatch old sublist */
+ __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
+ /* start new sublist */
+ INIT_LIST_HEAD(&sublist);
+ pt_curr = pt_prev;
+ od_curr = orig_dev;
+ }
+ list_add_tail(&skb->list, &sublist);
+ }
+
+ /* dispatch final sublist */
+ __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
+}
+
static int __netif_receive_skb(struct sk_buff *skb)
{
int ret;
@@ -4687,14 +4996,44 @@ static int __netif_receive_skb(struct sk_buff *skb)
* context down to all allocation sites.
*/
noreclaim_flag = memalloc_noreclaim_save();
- ret = __netif_receive_skb_core(skb, true);
+ ret = __netif_receive_skb_one_core(skb, true);
memalloc_noreclaim_restore(noreclaim_flag);
} else
- ret = __netif_receive_skb_core(skb, false);
+ ret = __netif_receive_skb_one_core(skb, false);
return ret;
}
+static void __netif_receive_skb_list(struct list_head *head)
+{
+ unsigned long noreclaim_flag = 0;
+ struct sk_buff *skb, *next;
+ bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
+
+ list_for_each_entry_safe(skb, next, head, list) {
+ if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
+ struct list_head sublist;
+
+ /* Handle the previous sublist */
+ list_cut_before(&sublist, head, &skb->list);
+ if (!list_empty(&sublist))
+ __netif_receive_skb_list_core(&sublist, pfmemalloc);
+ pfmemalloc = !pfmemalloc;
+ /* See comments in __netif_receive_skb */
+ if (pfmemalloc)
+ noreclaim_flag = memalloc_noreclaim_save();
+ else
+ memalloc_noreclaim_restore(noreclaim_flag);
+ }
+ }
+ /* Handle the remaining sublist */
+ if (!list_empty(head))
+ __netif_receive_skb_list_core(head, pfmemalloc);
+ /* Restore pflags */
+ if (pfmemalloc)
+ memalloc_noreclaim_restore(noreclaim_flag);
+}
+
static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
{
struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
@@ -4717,7 +5056,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
break;
case XDP_QUERY_PROG:
- xdp->prog_attached = !!old;
xdp->prog_id = old ? old->aux->id : 0;
break;
@@ -4769,6 +5107,55 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
return ret;
}
+static void netif_receive_skb_list_internal(struct list_head *head)
+{
+ struct bpf_prog *xdp_prog = NULL;
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+ net_timestamp_check(netdev_tstamp_prequeue, skb);
+ list_del(&skb->list);
+ if (!skb_defer_rx_timestamp(skb))
+ list_add_tail(&skb->list, &sublist);
+ }
+ list_splice_init(&sublist, head);
+
+ if (static_branch_unlikely(&generic_xdp_needed_key)) {
+ preempt_disable();
+ rcu_read_lock();
+ list_for_each_entry_safe(skb, next, head, list) {
+ xdp_prog = rcu_dereference(skb->dev->xdp_prog);
+ list_del(&skb->list);
+ if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
+ list_add_tail(&skb->list, &sublist);
+ }
+ rcu_read_unlock();
+ preempt_enable();
+ /* Put passed packets back on main list */
+ list_splice_init(&sublist, head);
+ }
+
+ rcu_read_lock();
+#ifdef CONFIG_RPS
+ if (static_key_false(&rps_needed)) {
+ list_for_each_entry_safe(skb, next, head, list) {
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu = get_rps_cpu(skb->dev, skb, &rflow);
+
+ if (cpu >= 0) {
+ /* Will be handled, remove from list */
+ list_del(&skb->list);
+ enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+ }
+ }
+ }
+#endif
+ __netif_receive_skb_list(head);
+ rcu_read_unlock();
+}
+
/**
* netif_receive_skb - process receive buffer from network
* @skb: buffer to process
@@ -4792,6 +5179,28 @@ int netif_receive_skb(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_receive_skb);
+/**
+ * netif_receive_skb_list - process many receive buffers from network
+ * @head: list of skbs to process.
+ *
+ * Since return value of netif_receive_skb() is normally ignored, and
+ * wouldn't be meaningful for a list, this function returns void.
+ *
+ * This function may only be called from softirq context and interrupts
+ * should be enabled.
+ */
+void netif_receive_skb_list(struct list_head *head)
+{
+ struct sk_buff *skb;
+
+ if (list_empty(head))
+ return;
+ list_for_each_entry(skb, head, list)
+ trace_netif_receive_skb_list_entry(skb);
+ netif_receive_skb_list_internal(head);
+}
+EXPORT_SYMBOL(netif_receive_skb_list);
+
DEFINE_PER_CPU(struct work_struct, flush_works);
/* Network device is going away, flush any packets still pending */
@@ -4875,42 +5284,50 @@ out:
return netif_receive_skb_internal(skb);
}
-/* napi->gro_list contains packets ordered by age.
- * youngest packets at the head of it.
- * Complete skbs in reverse order to reduce latencies.
- */
-void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
+ bool flush_old)
{
- struct sk_buff *skb, *prev = NULL;
-
- /* scan list and build reverse chain */
- for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
- skb->prev = prev;
- prev = skb;
- }
-
- for (skb = prev; skb; skb = prev) {
- skb->next = NULL;
+ struct list_head *head = &napi->gro_hash[index].list;
+ struct sk_buff *skb, *p;
+ list_for_each_entry_safe_reverse(skb, p, head, list) {
if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
return;
-
- prev = skb->prev;
+ list_del(&skb->list);
+ skb->next = NULL;
napi_gro_complete(skb);
- napi->gro_count--;
+ napi->gro_hash[index].count--;
}
- napi->gro_list = NULL;
+ if (!napi->gro_hash[index].count)
+ __clear_bit(index, &napi->gro_bitmask);
+}
+
+/* napi->gro_hash[].list contains packets ordered by age.
+ * youngest packets at the head of it.
+ * Complete skbs in reverse order to reduce latencies.
+ */
+void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+{
+ u32 i;
+
+ for (i = 0; i < GRO_HASH_BUCKETS; i++) {
+ if (test_bit(i, &napi->gro_bitmask))
+ __napi_gro_flush_chain(napi, i, flush_old);
+ }
}
EXPORT_SYMBOL(napi_gro_flush);
-static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
+static struct list_head *gro_list_prepare(struct napi_struct *napi,
+ struct sk_buff *skb)
{
- struct sk_buff *p;
unsigned int maclen = skb->dev->hard_header_len;
u32 hash = skb_get_hash_raw(skb);
+ struct list_head *head;
+ struct sk_buff *p;
- for (p = napi->gro_list; p; p = p->next) {
+ head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
+ list_for_each_entry(p, head, list) {
unsigned long diffs;
NAPI_GRO_CB(p)->flush = 0;
@@ -4933,6 +5350,8 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
maclen);
NAPI_GRO_CB(p)->same_flow = !diffs;
}
+
+ return head;
}
static void skb_gro_reset_offset(struct sk_buff *skb)
@@ -4975,20 +5394,41 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
}
}
+static void gro_flush_oldest(struct list_head *head)
+{
+ struct sk_buff *oldest;
+
+ oldest = list_last_entry(head, struct sk_buff, list);
+
+ /* We are called with head length >= MAX_GRO_SKBS, so this is
+ * impossible.
+ */
+ if (WARN_ON_ONCE(!oldest))
+ return;
+
+ /* Do not adjust napi->gro_hash[].count, caller is adding a new
+ * SKB to the chain.
+ */
+ list_del(&oldest->list);
+ napi_gro_complete(oldest);
+}
+
static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
- struct sk_buff **pp = NULL;
+ u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
+ struct list_head *head = &offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
- struct list_head *head = &offload_base;
- int same_flow;
+ struct list_head *gro_head;
+ struct sk_buff *pp = NULL;
enum gro_result ret;
+ int same_flow;
int grow;
if (netif_elide_gro(skb->dev))
goto normal;
- gro_list_prepare(napi, skb);
+ gro_head = gro_list_prepare(napi, skb);
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
@@ -5022,7 +5462,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->csum_valid = 0;
}
- pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
+ pp = ptype->callbacks.gro_receive(gro_head, skb);
break;
}
rcu_read_unlock();
@@ -5039,12 +5479,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
if (pp) {
- struct sk_buff *nskb = *pp;
-
- *pp = nskb->next;
- nskb->next = NULL;
- napi_gro_complete(nskb);
- napi->gro_count--;
+ list_del(&pp->list);
+ pp->next = NULL;
+ napi_gro_complete(pp);
+ napi->gro_hash[hash].count--;
}
if (same_flow)
@@ -5053,26 +5491,16 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
if (NAPI_GRO_CB(skb)->flush)
goto normal;
- if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
- struct sk_buff *nskb = napi->gro_list;
-
- /* locate the end of the list to select the 'oldest' flow */
- while (nskb->next) {
- pp = &nskb->next;
- nskb = *pp;
- }
- *pp = NULL;
- nskb->next = NULL;
- napi_gro_complete(nskb);
+ if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
+ gro_flush_oldest(gro_head);
} else {
- napi->gro_count++;
+ napi->gro_hash[hash].count++;
}
NAPI_GRO_CB(skb)->count = 1;
NAPI_GRO_CB(skb)->age = jiffies;
NAPI_GRO_CB(skb)->last = skb;
skb_shinfo(skb)->gso_size = skb_gro_len(skb);
- skb->next = napi->gro_list;
- napi->gro_list = skb;
+ list_add(&skb->list, gro_head);
ret = GRO_HELD;
pull:
@@ -5080,6 +5508,13 @@ pull:
if (grow > 0)
gro_pull_from_frag0(skb, grow);
ok:
+ if (napi->gro_hash[hash].count) {
+ if (!test_bit(hash, &napi->gro_bitmask))
+ __set_bit(hash, &napi->gro_bitmask);
+ } else if (test_bit(hash, &napi->gro_bitmask)) {
+ __clear_bit(hash, &napi->gro_bitmask);
+ }
+
return ret;
normal:
@@ -5478,7 +5913,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
NAPIF_STATE_IN_BUSY_POLL)))
return false;
- if (n->gro_list) {
+ if (n->gro_bitmask) {
unsigned long timeout = 0;
if (work_done)
@@ -5687,21 +6122,31 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
/* Note : we use a relaxed variant of napi_schedule_prep() not setting
* NAPI_STATE_MISSED, since we do not react to a device IRQ.
*/
- if (napi->gro_list && !napi_disable_pending(napi) &&
+ if (napi->gro_bitmask && !napi_disable_pending(napi) &&
!test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
__napi_schedule_irqoff(napi);
return HRTIMER_NORESTART;
}
+static void init_gro_hash(struct napi_struct *napi)
+{
+ int i;
+
+ for (i = 0; i < GRO_HASH_BUCKETS; i++) {
+ INIT_LIST_HEAD(&napi->gro_hash[i].list);
+ napi->gro_hash[i].count = 0;
+ }
+ napi->gro_bitmask = 0;
+}
+
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
INIT_LIST_HEAD(&napi->poll_list);
hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
napi->timer.function = napi_watchdog;
- napi->gro_count = 0;
- napi->gro_list = NULL;
+ init_gro_hash(napi);
napi->skb = NULL;
napi->poll = poll;
if (weight > NAPI_POLL_WEIGHT)
@@ -5734,6 +6179,19 @@ void napi_disable(struct napi_struct *n)
}
EXPORT_SYMBOL(napi_disable);
+static void flush_gro_hash(struct napi_struct *napi)
+{
+ int i;
+
+ for (i = 0; i < GRO_HASH_BUCKETS; i++) {
+ struct sk_buff *skb, *n;
+
+ list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
+ kfree_skb(skb);
+ napi->gro_hash[i].count = 0;
+ }
+}
+
/* Must be called in process context */
void netif_napi_del(struct napi_struct *napi)
{
@@ -5743,9 +6201,8 @@ void netif_napi_del(struct napi_struct *napi)
list_del_init(&napi->dev_list);
napi_free_frags(napi);
- kfree_skb_list(napi->gro_list);
- napi->gro_list = NULL;
- napi->gro_count = 0;
+ flush_gro_hash(napi);
+ napi->gro_bitmask = 0;
}
EXPORT_SYMBOL(netif_napi_del);
@@ -5787,7 +6244,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
goto out_unlock;
}
- if (n->gro_list) {
+ if (n->gro_bitmask) {
/* flush too old packets
* If HZ < 1000, flush all packets.
*/
@@ -7080,13 +7537,15 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
EXPORT_SYMBOL(__dev_set_mtu);
/**
- * dev_set_mtu - Change maximum transfer unit
+ * dev_set_mtu_ext - Change maximum transfer unit
* @dev: device
* @new_mtu: new transfer unit
+ * @extack: netlink extended ack
*
* Change the maximum transfer size of the network device.
*/
-int dev_set_mtu(struct net_device *dev, int new_mtu)
+int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
+ struct netlink_ext_ack *extack)
{
int err, orig_mtu;
@@ -7095,14 +7554,12 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
/* MTU must be positive, and in range */
if (new_mtu < 0 || new_mtu < dev->min_mtu) {
- net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
- dev->name, new_mtu, dev->min_mtu);
+ NL_SET_ERR_MSG(extack, "mtu less than device minimum");
return -EINVAL;
}
if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
- net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
- dev->name, new_mtu, dev->max_mtu);
+ NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
return -EINVAL;
}
@@ -7130,6 +7587,18 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
}
return err;
}
+
+int dev_set_mtu(struct net_device *dev, int new_mtu)
+{
+ struct netlink_ext_ack extack;
+ int err;
+
+ memset(&extack, 0, sizeof(extack));
+ err = dev_set_mtu_ext(dev, new_mtu, &extack);
+ if (err && extack._msg)
+ net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
+ return err;
+}
EXPORT_SYMBOL(dev_set_mtu);
/**
@@ -7279,23 +7748,21 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
}
EXPORT_SYMBOL(dev_change_proto_down);
-void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
- struct netdev_bpf *xdp)
+u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
+ enum bpf_netdev_command cmd)
{
- memset(xdp, 0, sizeof(*xdp));
- xdp->command = XDP_QUERY_PROG;
+ struct netdev_bpf xdp;
- /* Query must always succeed. */
- WARN_ON(bpf_op(dev, xdp) < 0);
-}
+ if (!bpf_op)
+ return 0;
-static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
-{
- struct netdev_bpf xdp;
+ memset(&xdp, 0, sizeof(xdp));
+ xdp.command = cmd;
- __dev_xdp_query(dev, bpf_op, &xdp);
+ /* Query must always succeed. */
+ WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
- return xdp.prog_attached;
+ return xdp.prog_id;
}
static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
@@ -7329,12 +7796,19 @@ static void dev_xdp_uninstall(struct net_device *dev)
if (!ndo_bpf)
return;
- __dev_xdp_query(dev, ndo_bpf, &xdp);
- if (xdp.prog_attached == XDP_ATTACHED_NONE)
- return;
+ memset(&xdp, 0, sizeof(xdp));
+ xdp.command = XDP_QUERY_PROG;
+ WARN_ON(ndo_bpf(dev, &xdp));
+ if (xdp.prog_id)
+ WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
+ NULL));
- /* Program removal should always succeed */
- WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL));
+ /* Remove HW offload */
+ memset(&xdp, 0, sizeof(xdp));
+ xdp.command = XDP_QUERY_PROG_HW;
+ if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
+ WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
+ NULL));
}
/**
@@ -7350,12 +7824,15 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, u32 flags)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ enum bpf_netdev_command query;
struct bpf_prog *prog = NULL;
bpf_op_t bpf_op, bpf_chk;
int err;
ASSERT_RTNL();
+ query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
+
bpf_op = bpf_chk = ops->ndo_bpf;
if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
return -EOPNOTSUPP;
@@ -7365,10 +7842,11 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
bpf_chk = generic_xdp_install;
if (fd >= 0) {
- if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
+ if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) ||
+ __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW))
return -EEXIST;
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
- __dev_xdp_attached(dev, bpf_op))
+ __dev_xdp_query(dev, bpf_op, query))
return -EBUSY;
prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
@@ -8837,6 +9315,9 @@ static struct hlist_head * __net_init netdev_create_hash(void)
/* Initialize per network namespace state */
static int __net_init netdev_init(struct net *net)
{
+ BUILD_BUG_ON(GRO_HASH_BUCKETS >
+ 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
+
if (net != &init_net)
INIT_LIST_HEAD(&net->dev_base_head);
@@ -9107,6 +9588,7 @@ static int __init net_dev_init(void)
sd->cpu = i;
#endif
+ init_gro_hash(&sd->backlog);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
}
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 50537ff961a7..90e8aa36881e 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -284,12 +284,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
case SIOCSIFTXQLEN:
if (ifr->ifr_qlen < 0)
return -EINVAL;
- if (dev->tx_queue_len ^ ifr->ifr_qlen) {
- err = dev_change_tx_queue_len(dev, ifr->ifr_qlen);
- if (err)
- return err;
- }
- return 0;
+ return dev_change_tx_queue_len(dev, ifr->ifr_qlen);
case SIOCSIFNAME:
ifr->ifr_newname[IFNAMSIZ-1] = '\0';
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 22099705cc41..65fc366a78a4 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -326,6 +326,57 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
pool_type, p_tc_index);
}
+struct devlink_region {
+ struct devlink *devlink;
+ struct list_head list;
+ const char *name;
+ struct list_head snapshot_list;
+ u32 max_snapshots;
+ u32 cur_snapshots;
+ u64 size;
+};
+
+struct devlink_snapshot {
+ struct list_head list;
+ struct devlink_region *region;
+ devlink_snapshot_data_dest_t *data_destructor;
+ u64 data_len;
+ u8 *data;
+ u32 id;
+};
+
+static struct devlink_region *
+devlink_region_get_by_name(struct devlink *devlink, const char *region_name)
+{
+ struct devlink_region *region;
+
+ list_for_each_entry(region, &devlink->region_list, list)
+ if (!strcmp(region->name, region_name))
+ return region;
+
+ return NULL;
+}
+
+static struct devlink_snapshot *
+devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
+{
+ struct devlink_snapshot *snapshot;
+
+ list_for_each_entry(snapshot, &region->snapshot_list, list)
+ if (snapshot->id == id)
+ return snapshot;
+
+ return NULL;
+}
+
+static void devlink_region_snapshot_del(struct devlink_snapshot *snapshot)
+{
+ snapshot->region->cur_snapshots--;
+ list_del(&snapshot->list);
+ (*snapshot->data_destructor)(snapshot->data);
+ kfree(snapshot);
+}
+
#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0)
#define DEVLINK_NL_FLAG_NEED_PORT BIT(1)
#define DEVLINK_NL_FLAG_NEED_SB BIT(2)
@@ -2604,6 +2655,919 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return devlink->ops->reload(devlink, info->extack);
}
+static const struct devlink_param devlink_param_generic[] = {
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ .name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME,
+ .type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ .name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME,
+ .type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME,
+ .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE,
+ },
+};
+
+static int devlink_param_generic_verify(const struct devlink_param *param)
+{
+ /* verify it match generic parameter by id and name */
+ if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX)
+ return -EINVAL;
+ if (strcmp(param->name, devlink_param_generic[param->id].name))
+ return -ENOENT;
+
+ WARN_ON(param->type != devlink_param_generic[param->id].type);
+
+ return 0;
+}
+
+static int devlink_param_driver_verify(const struct devlink_param *param)
+{
+ int i;
+
+ if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX)
+ return -EINVAL;
+ /* verify no such name in generic params */
+ for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++)
+ if (!strcmp(param->name, devlink_param_generic[i].name))
+ return -EEXIST;
+
+ return 0;
+}
+
+static struct devlink_param_item *
+devlink_param_find_by_name(struct list_head *param_list,
+ const char *param_name)
+{
+ struct devlink_param_item *param_item;
+
+ list_for_each_entry(param_item, param_list, list)
+ if (!strcmp(param_item->param->name, param_name))
+ return param_item;
+ return NULL;
+}
+
+static struct devlink_param_item *
+devlink_param_find_by_id(struct list_head *param_list, u32 param_id)
+{
+ struct devlink_param_item *param_item;
+
+ list_for_each_entry(param_item, param_list, list)
+ if (param_item->param->id == param_id)
+ return param_item;
+ return NULL;
+}
+
+static bool
+devlink_param_cmode_is_supported(const struct devlink_param *param,
+ enum devlink_param_cmode cmode)
+{
+ return test_bit(cmode, &param->supported_cmodes);
+}
+
+static int devlink_param_get(struct devlink *devlink,
+ const struct devlink_param *param,
+ struct devlink_param_gset_ctx *ctx)
+{
+ if (!param->get)
+ return -EOPNOTSUPP;
+ return param->get(devlink, param->id, ctx);
+}
+
+static int devlink_param_set(struct devlink *devlink,
+ const struct devlink_param *param,
+ struct devlink_param_gset_ctx *ctx)
+{
+ if (!param->set)
+ return -EOPNOTSUPP;
+ return param->set(devlink, param->id, ctx);
+}
+
+static int
+devlink_param_type_to_nla_type(enum devlink_param_type param_type)
+{
+ switch (param_type) {
+ case DEVLINK_PARAM_TYPE_U8:
+ return NLA_U8;
+ case DEVLINK_PARAM_TYPE_U16:
+ return NLA_U16;
+ case DEVLINK_PARAM_TYPE_U32:
+ return NLA_U32;
+ case DEVLINK_PARAM_TYPE_STRING:
+ return NLA_STRING;
+ case DEVLINK_PARAM_TYPE_BOOL:
+ return NLA_FLAG;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+devlink_nl_param_value_fill_one(struct sk_buff *msg,
+ enum devlink_param_type type,
+ enum devlink_param_cmode cmode,
+ union devlink_param_value val)
+{
+ struct nlattr *param_value_attr;
+
+ param_value_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUE);
+ if (!param_value_attr)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode))
+ goto value_nest_cancel;
+
+ switch (type) {
+ case DEVLINK_PARAM_TYPE_U8:
+ if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8))
+ goto value_nest_cancel;
+ break;
+ case DEVLINK_PARAM_TYPE_U16:
+ if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16))
+ goto value_nest_cancel;
+ break;
+ case DEVLINK_PARAM_TYPE_U32:
+ if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32))
+ goto value_nest_cancel;
+ break;
+ case DEVLINK_PARAM_TYPE_STRING:
+ if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
+ val.vstr))
+ goto value_nest_cancel;
+ break;
+ case DEVLINK_PARAM_TYPE_BOOL:
+ if (val.vbool &&
+ nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA))
+ goto value_nest_cancel;
+ break;
+ }
+
+ nla_nest_end(msg, param_value_attr);
+ return 0;
+
+value_nest_cancel:
+ nla_nest_cancel(msg, param_value_attr);
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
+ struct devlink_param_item *param_item,
+ enum devlink_command cmd,
+ u32 portid, u32 seq, int flags)
+{
+ union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
+ const struct devlink_param *param = param_item->param;
+ struct devlink_param_gset_ctx ctx;
+ struct nlattr *param_values_list;
+ struct nlattr *param_attr;
+ int nla_type;
+ void *hdr;
+ int err;
+ int i;
+
+ /* Get value from driver part to driverinit configuration mode */
+ for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
+ if (!devlink_param_cmode_is_supported(param, i))
+ continue;
+ if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) {
+ if (!param_item->driverinit_value_valid)
+ return -EOPNOTSUPP;
+ param_value[i] = param_item->driverinit_value;
+ } else {
+ ctx.cmode = i;
+ err = devlink_param_get(devlink, param, &ctx);
+ if (err)
+ return err;
+ param_value[i] = ctx.val;
+ }
+ }
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto genlmsg_cancel;
+ param_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM);
+ if (!param_attr)
+ goto genlmsg_cancel;
+ if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name))
+ goto param_nest_cancel;
+ if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC))
+ goto param_nest_cancel;
+
+ nla_type = devlink_param_type_to_nla_type(param->type);
+ if (nla_type < 0)
+ goto param_nest_cancel;
+ if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type))
+ goto param_nest_cancel;
+
+ param_values_list = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUES_LIST);
+ if (!param_values_list)
+ goto param_nest_cancel;
+
+ for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
+ if (!devlink_param_cmode_is_supported(param, i))
+ continue;
+ err = devlink_nl_param_value_fill_one(msg, param->type,
+ i, param_value[i]);
+ if (err)
+ goto values_list_nest_cancel;
+ }
+
+ nla_nest_end(msg, param_values_list);
+ nla_nest_end(msg, param_attr);
+ genlmsg_end(msg, hdr);
+ return 0;
+
+values_list_nest_cancel:
+ nla_nest_end(msg, param_values_list);
+param_nest_cancel:
+ nla_nest_cancel(msg, param_attr);
+genlmsg_cancel:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static void devlink_param_notify(struct devlink *devlink,
+ struct devlink_param_item *param_item,
+ enum devlink_command cmd)
+{
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+ err = devlink_nl_param_fill(msg, devlink, param_item, cmd, 0, 0, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink_param_item *param_item;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(param_item, &devlink->param_list, list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_param_fill(msg, devlink, param_item,
+ DEVLINK_CMD_PARAM_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ mutex_unlock(&devlink->lock);
+ goto out;
+ }
+ idx++;
+ }
+ mutex_unlock(&devlink->lock);
+ }
+out:
+ mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int
+devlink_param_type_get_from_info(struct genl_info *info,
+ enum devlink_param_type *param_type)
+{
+ if (!info->attrs[DEVLINK_ATTR_PARAM_TYPE])
+ return -EINVAL;
+
+ switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) {
+ case NLA_U8:
+ *param_type = DEVLINK_PARAM_TYPE_U8;
+ break;
+ case NLA_U16:
+ *param_type = DEVLINK_PARAM_TYPE_U16;
+ break;
+ case NLA_U32:
+ *param_type = DEVLINK_PARAM_TYPE_U32;
+ break;
+ case NLA_STRING:
+ *param_type = DEVLINK_PARAM_TYPE_STRING;
+ break;
+ case NLA_FLAG:
+ *param_type = DEVLINK_PARAM_TYPE_BOOL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+devlink_param_value_get_from_info(const struct devlink_param *param,
+ struct genl_info *info,
+ union devlink_param_value *value)
+{
+ if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
+ !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
+ return -EINVAL;
+
+ switch (param->type) {
+ case DEVLINK_PARAM_TYPE_U8:
+ value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ break;
+ case DEVLINK_PARAM_TYPE_U16:
+ value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ break;
+ case DEVLINK_PARAM_TYPE_U32:
+ value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ break;
+ case DEVLINK_PARAM_TYPE_STRING:
+ if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) >
+ DEVLINK_PARAM_MAX_STRING_VALUE)
+ return -EINVAL;
+ value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ break;
+ case DEVLINK_PARAM_TYPE_BOOL:
+ value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
+ true : false;
+ break;
+ }
+ return 0;
+}
+
+static struct devlink_param_item *
+devlink_param_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ char *param_name;
+
+ if (!info->attrs[DEVLINK_ATTR_PARAM_NAME])
+ return NULL;
+
+ param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
+ return devlink_param_find_by_name(&devlink->param_list, param_name);
+}
+
+static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_param_item *param_item;
+ struct sk_buff *msg;
+ int err;
+
+ param_item = devlink_param_get_from_info(devlink, info);
+ if (!param_item)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_param_fill(msg, devlink, param_item,
+ DEVLINK_CMD_PARAM_GET,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ enum devlink_param_type param_type;
+ struct devlink_param_gset_ctx ctx;
+ enum devlink_param_cmode cmode;
+ struct devlink_param_item *param_item;
+ const struct devlink_param *param;
+ union devlink_param_value value;
+ int err = 0;
+
+ param_item = devlink_param_get_from_info(devlink, info);
+ if (!param_item)
+ return -EINVAL;
+ param = param_item->param;
+ err = devlink_param_type_get_from_info(info, &param_type);
+ if (err)
+ return err;
+ if (param_type != param->type)
+ return -EINVAL;
+ err = devlink_param_value_get_from_info(param, info, &value);
+ if (err)
+ return err;
+ if (param->validate) {
+ err = param->validate(devlink, param->id, value, info->extack);
+ if (err)
+ return err;
+ }
+
+ if (!info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE])
+ return -EINVAL;
+ cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]);
+ if (!devlink_param_cmode_is_supported(param, cmode))
+ return -EOPNOTSUPP;
+
+ if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
+ param_item->driverinit_value = value;
+ param_item->driverinit_value_valid = true;
+ } else {
+ if (!param->set)
+ return -EOPNOTSUPP;
+ ctx.val = value;
+ ctx.cmode = cmode;
+ err = devlink_param_set(devlink, param, &ctx);
+ if (err)
+ return err;
+ }
+
+ devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+ return 0;
+}
+
+static int devlink_param_register_one(struct devlink *devlink,
+ const struct devlink_param *param)
+{
+ struct devlink_param_item *param_item;
+
+ if (devlink_param_find_by_name(&devlink->param_list,
+ param->name))
+ return -EEXIST;
+
+ if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
+ WARN_ON(param->get || param->set);
+ else
+ WARN_ON(!param->get || !param->set);
+
+ param_item = kzalloc(sizeof(*param_item), GFP_KERNEL);
+ if (!param_item)
+ return -ENOMEM;
+ param_item->param = param;
+
+ list_add_tail(&param_item->list, &devlink->param_list);
+ devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+ return 0;
+}
+
+static void devlink_param_unregister_one(struct devlink *devlink,
+ const struct devlink_param *param)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_name(&devlink->param_list,
+ param->name);
+ WARN_ON(!param_item);
+ devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_DEL);
+ list_del(&param_item->list);
+ kfree(param_item);
+}
+
+static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_snapshot *snapshot)
+{
+ struct nlattr *snap_attr;
+ int err;
+
+ snap_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
+ if (!snap_attr)
+ return -EINVAL;
+
+ err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id);
+ if (err)
+ goto nla_put_failure;
+
+ nla_nest_end(msg, snap_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, snap_attr);
+ return err;
+}
+
+static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_region *region)
+{
+ struct devlink_snapshot *snapshot;
+ struct nlattr *snapshots_attr;
+ int err;
+
+ snapshots_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOTS);
+ if (!snapshots_attr)
+ return -EINVAL;
+
+ list_for_each_entry(snapshot, &region->snapshot_list, list) {
+ err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(msg, snapshots_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, snapshots_attr);
+ return err;
+}
+
+static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags,
+ struct devlink_region *region)
+{
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = devlink_nl_put_handle(msg, devlink);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->name);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
+ region->size,
+ DEVLINK_ATTR_PAD);
+ if (err)
+ goto nla_put_failure;
+
+ err = devlink_nl_region_snapshots_id_put(msg, devlink, region);
+ if (err)
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+static void devlink_nl_region_notify(struct devlink_region *region,
+ struct devlink_snapshot *snapshot,
+ enum devlink_command cmd)
+{
+ struct devlink *devlink = region->devlink;
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd);
+ if (!hdr)
+ goto out_free_msg;
+
+ err = devlink_nl_put_handle(msg, devlink);
+ if (err)
+ goto out_cancel_msg;
+
+ err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
+ region->name);
+ if (err)
+ goto out_cancel_msg;
+
+ if (snapshot) {
+ err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID,
+ snapshot->id);
+ if (err)
+ goto out_cancel_msg;
+ } else {
+ err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
+ region->size, DEVLINK_ATTR_PAD);
+ if (err)
+ goto out_cancel_msg;
+ }
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+
+ return;
+
+out_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+out_free_msg:
+ nlmsg_free(msg);
+}
+
+static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_region *region;
+ const char *region_name;
+ struct sk_buff *msg;
+ int err;
+
+ if (!info->attrs[DEVLINK_ATTR_REGION_NAME])
+ return -EINVAL;
+
+ region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+ region = devlink_region_get_by_name(devlink, region_name);
+ if (!region)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET,
+ info->snd_portid, info->snd_seq, 0,
+ region);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink_region *region;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(region, &devlink->region_list, list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_region_fill(msg, devlink,
+ DEVLINK_CMD_REGION_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, region);
+ if (err) {
+ mutex_unlock(&devlink->lock);
+ goto out;
+ }
+ idx++;
+ }
+ mutex_unlock(&devlink->lock);
+ }
+out:
+ mutex_unlock(&devlink_mutex);
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int devlink_nl_cmd_region_del(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_snapshot *snapshot;
+ struct devlink_region *region;
+ const char *region_name;
+ u32 snapshot_id;
+
+ if (!info->attrs[DEVLINK_ATTR_REGION_NAME] ||
+ !info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID])
+ return -EINVAL;
+
+ region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+ snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
+
+ region = devlink_region_get_by_name(devlink, region_name);
+ if (!region)
+ return -EINVAL;
+
+ snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
+ if (!snapshot)
+ return -EINVAL;
+
+ devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
+ devlink_region_snapshot_del(snapshot);
+ return 0;
+}
+
+static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
+ struct devlink *devlink,
+ u8 *chunk, u32 chunk_size,
+ u64 addr)
+{
+ struct nlattr *chunk_attr;
+ int err;
+
+ chunk_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_CHUNK);
+ if (!chunk_attr)
+ return -EINVAL;
+
+ err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr,
+ DEVLINK_ATTR_PAD);
+ if (err)
+ goto nla_put_failure;
+
+ nla_nest_end(msg, chunk_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, chunk_attr);
+ return err;
+}
+
+#define DEVLINK_REGION_READ_CHUNK_SIZE 256
+
+static int devlink_nl_region_read_snapshot_fill(struct sk_buff *skb,
+ struct devlink *devlink,
+ struct devlink_region *region,
+ struct nlattr **attrs,
+ u64 start_offset,
+ u64 end_offset,
+ bool dump,
+ u64 *new_offset)
+{
+ struct devlink_snapshot *snapshot;
+ u64 curr_offset = start_offset;
+ u32 snapshot_id;
+ int err = 0;
+
+ *new_offset = start_offset;
+
+ snapshot_id = nla_get_u32(attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
+ snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
+ if (!snapshot)
+ return -EINVAL;
+
+ if (end_offset > snapshot->data_len || dump)
+ end_offset = snapshot->data_len;
+
+ while (curr_offset < end_offset) {
+ u32 data_size;
+ u8 *data;
+
+ if (end_offset - curr_offset < DEVLINK_REGION_READ_CHUNK_SIZE)
+ data_size = end_offset - curr_offset;
+ else
+ data_size = DEVLINK_REGION_READ_CHUNK_SIZE;
+
+ data = &snapshot->data[curr_offset];
+ err = devlink_nl_cmd_region_read_chunk_fill(skb, devlink,
+ data, data_size,
+ curr_offset);
+ if (err)
+ break;
+
+ curr_offset += data_size;
+ }
+ *new_offset = curr_offset;
+
+ return err;
+}
+
+static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ u64 ret_offset, start_offset, end_offset = 0;
+ struct nlattr *attrs[DEVLINK_ATTR_MAX + 1];
+ const struct genl_ops *ops = cb->data;
+ struct devlink_region *region;
+ struct nlattr *chunks_attr;
+ const char *region_name;
+ struct devlink *devlink;
+ bool dump = true;
+ void *hdr;
+ int err;
+
+ start_offset = *((u64 *)&cb->args[0]);
+
+ err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize,
+ attrs, DEVLINK_ATTR_MAX, ops->policy, NULL);
+ if (err)
+ goto out;
+
+ devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
+ if (IS_ERR(devlink))
+ goto out;
+
+ mutex_lock(&devlink_mutex);
+ mutex_lock(&devlink->lock);
+
+ if (!attrs[DEVLINK_ATTR_REGION_NAME] ||
+ !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID])
+ goto out_unlock;
+
+ region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
+ region = devlink_region_get_by_name(devlink, region_name);
+ if (!region)
+ goto out_unlock;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
+ DEVLINK_CMD_REGION_READ);
+ if (!hdr)
+ goto out_unlock;
+
+ err = devlink_nl_put_handle(skb, devlink);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
+ if (err)
+ goto nla_put_failure;
+
+ chunks_attr = nla_nest_start(skb, DEVLINK_ATTR_REGION_CHUNKS);
+ if (!chunks_attr)
+ goto nla_put_failure;
+
+ if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] &&
+ attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) {
+ if (!start_offset)
+ start_offset =
+ nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
+
+ end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
+ end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
+ dump = false;
+ }
+
+ err = devlink_nl_region_read_snapshot_fill(skb, devlink,
+ region, attrs,
+ start_offset,
+ end_offset, dump,
+ &ret_offset);
+
+ if (err && err != -EMSGSIZE)
+ goto nla_put_failure;
+
+ /* Check if there was any progress done to prevent infinite loop */
+ if (ret_offset == start_offset)
+ goto nla_put_failure;
+
+ *((u64 *)&cb->args[0]) = ret_offset;
+
+ nla_nest_end(skb, chunks_attr);
+ genlmsg_end(skb, hdr);
+ mutex_unlock(&devlink->lock);
+ mutex_unlock(&devlink_mutex);
+
+ return skb->len;
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+out_unlock:
+ mutex_unlock(&devlink->lock);
+ mutex_unlock(&devlink_mutex);
+out:
+ return 0;
+}
+
static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
@@ -2624,6 +3588,11 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
[DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
[DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
+ [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
};
static const struct genl_ops devlink_nl_ops[] = {
@@ -2807,6 +3776,43 @@ static const struct genl_ops devlink_nl_ops[] = {
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
},
+ {
+ .cmd = DEVLINK_CMD_PARAM_GET,
+ .doit = devlink_nl_cmd_param_get_doit,
+ .dumpit = devlink_nl_cmd_param_get_dumpit,
+ .policy = devlink_nl_policy,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_PARAM_SET,
+ .doit = devlink_nl_cmd_param_set_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_GET,
+ .doit = devlink_nl_cmd_region_get_doit,
+ .dumpit = devlink_nl_cmd_region_get_dumpit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_DEL,
+ .doit = devlink_nl_cmd_region_del,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_READ,
+ .dumpit = devlink_nl_cmd_region_read_dumpit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
};
static struct genl_family devlink_nl_family __ro_after_init = {
@@ -2845,6 +3851,8 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
INIT_LIST_HEAD(&devlink->sb_list);
INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
INIT_LIST_HEAD(&devlink->resource_list);
+ INIT_LIST_HEAD(&devlink->param_list);
+ INIT_LIST_HEAD(&devlink->region_list);
mutex_init(&devlink->lock);
return devlink;
}
@@ -3434,6 +4442,320 @@ out:
}
EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
+/**
+ * devlink_params_register - register configuration parameters
+ *
+ * @devlink: devlink
+ * @params: configuration parameters array
+ * @params_count: number of parameters provided
+ *
+ * Register the configuration parameters supported by the driver.
+ */
+int devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ const struct devlink_param *param = params;
+ int i;
+ int err;
+
+ mutex_lock(&devlink->lock);
+ for (i = 0; i < params_count; i++, param++) {
+ if (!param || !param->name || !param->supported_cmodes) {
+ err = -EINVAL;
+ goto rollback;
+ }
+ if (param->generic) {
+ err = devlink_param_generic_verify(param);
+ if (err)
+ goto rollback;
+ } else {
+ err = devlink_param_driver_verify(param);
+ if (err)
+ goto rollback;
+ }
+ err = devlink_param_register_one(devlink, param);
+ if (err)
+ goto rollback;
+ }
+
+ mutex_unlock(&devlink->lock);
+ return 0;
+
+rollback:
+ if (!i)
+ goto unlock;
+ for (param--; i > 0; i--, param--)
+ devlink_param_unregister_one(devlink, param);
+unlock:
+ mutex_unlock(&devlink->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_params_register);
+
+/**
+ * devlink_params_unregister - unregister configuration parameters
+ * @devlink: devlink
+ * @params: configuration parameters to unregister
+ * @params_count: number of parameters provided
+ */
+void devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ const struct devlink_param *param = params;
+ int i;
+
+ mutex_lock(&devlink->lock);
+ for (i = 0; i < params_count; i++, param++)
+ devlink_param_unregister_one(devlink, param);
+ mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_params_unregister);
+
+/**
+ * devlink_param_driverinit_value_get - get configuration parameter
+ * value for driver initializing
+ *
+ * @devlink: devlink
+ * @param_id: parameter ID
+ * @init_val: value of parameter in driverinit configuration mode
+ *
+ * This function should be used by the driver to get driverinit
+ * configuration for initialization after reload command.
+ */
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val)
+{
+ struct devlink_param_item *param_item;
+
+ if (!devlink->ops || !devlink->ops->reload)
+ return -EOPNOTSUPP;
+
+ param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+ if (!param_item)
+ return -EINVAL;
+
+ if (!param_item->driverinit_value_valid ||
+ !devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT))
+ return -EOPNOTSUPP;
+
+ *init_val = param_item->driverinit_value;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
+
+/**
+ * devlink_param_driverinit_value_set - set value of configuration
+ * parameter for driverinit
+ * configuration mode
+ *
+ * @devlink: devlink
+ * @param_id: parameter ID
+ * @init_val: value of parameter to set for driverinit configuration mode
+ *
+ * This function should be used by the driver to set driverinit
+ * configuration mode default value.
+ */
+int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+ if (!param_item)
+ return -EINVAL;
+
+ if (!devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT))
+ return -EOPNOTSUPP;
+
+ param_item->driverinit_value = init_val;
+ param_item->driverinit_value_valid = true;
+
+ devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
+
+/**
+ * devlink_param_value_changed - notify devlink on a parameter's value
+ * change. Should be called by the driver
+ * right after the change.
+ *
+ * @devlink: devlink
+ * @param_id: parameter ID
+ *
+ * This function should be used by the driver to notify devlink on value
+ * change, excluding driverinit configuration mode.
+ * For driverinit configuration mode driver should use the function
+ * devlink_param_driverinit_value_set() instead.
+ */
+void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+ WARN_ON(!param_item);
+
+ devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+}
+EXPORT_SYMBOL_GPL(devlink_param_value_changed);
+
+/**
+ * devlink_region_create - create a new address region
+ *
+ * @devlink: devlink
+ * @region_name: region name
+ * @region_max_snapshots: Maximum supported number of snapshots for region
+ * @region_size: size of region
+ */
+struct devlink_region *devlink_region_create(struct devlink *devlink,
+ const char *region_name,
+ u32 region_max_snapshots,
+ u64 region_size)
+{
+ struct devlink_region *region;
+ int err = 0;
+
+ mutex_lock(&devlink->lock);
+
+ if (devlink_region_get_by_name(devlink, region_name)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ region->devlink = devlink;
+ region->max_snapshots = region_max_snapshots;
+ region->name = region_name;
+ region->size = region_size;
+ INIT_LIST_HEAD(&region->snapshot_list);
+ list_add_tail(&region->list, &devlink->region_list);
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+
+ mutex_unlock(&devlink->lock);
+ return region;
+
+unlock:
+ mutex_unlock(&devlink->lock);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(devlink_region_create);
+
+/**
+ * devlink_region_destroy - destroy address region
+ *
+ * @region: devlink region to destroy
+ */
+void devlink_region_destroy(struct devlink_region *region)
+{
+ struct devlink *devlink = region->devlink;
+ struct devlink_snapshot *snapshot, *ts;
+
+ mutex_lock(&devlink->lock);
+
+ /* Free all snapshots of region */
+ list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
+ devlink_region_snapshot_del(snapshot);
+
+ list_del(&region->list);
+
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
+ mutex_unlock(&devlink->lock);
+ kfree(region);
+}
+EXPORT_SYMBOL_GPL(devlink_region_destroy);
+
+/**
+ * devlink_region_shapshot_id_get - get snapshot ID
+ *
+ * This callback should be called when adding a new snapshot,
+ * Driver should use the same id for multiple snapshots taken
+ * on multiple regions at the same time/by the same trigger.
+ *
+ * @devlink: devlink
+ */
+u32 devlink_region_shapshot_id_get(struct devlink *devlink)
+{
+ u32 id;
+
+ mutex_lock(&devlink->lock);
+ id = ++devlink->snapshot_id;
+ mutex_unlock(&devlink->lock);
+
+ return id;
+}
+EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get);
+
+/**
+ * devlink_region_snapshot_create - create a new snapshot
+ * This will add a new snapshot of a region. The snapshot
+ * will be stored on the region struct and can be accessed
+ * from devlink. This is useful for future analyses of snapshots.
+ * Multiple snapshots can be created on a region.
+ * The @snapshot_id should be obtained using the getter function.
+ *
+ * @devlink_region: devlink region of the snapshot
+ * @data_len: size of snapshot data
+ * @data: snapshot data
+ * @snapshot_id: snapshot id to be created
+ * @data_destructor: pointer to destructor function to free data
+ */
+int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+ u8 *data, u32 snapshot_id,
+ devlink_snapshot_data_dest_t *data_destructor)
+{
+ struct devlink *devlink = region->devlink;
+ struct devlink_snapshot *snapshot;
+ int err;
+
+ mutex_lock(&devlink->lock);
+
+ /* check if region can hold one more snapshot */
+ if (region->cur_snapshots == region->max_snapshots) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+ if (!snapshot) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ snapshot->id = snapshot_id;
+ snapshot->region = region;
+ snapshot->data = data;
+ snapshot->data_len = data_len;
+ snapshot->data_destructor = data_destructor;
+
+ list_add_tail(&snapshot->list, &region->snapshot_list);
+
+ region->cur_snapshots++;
+
+ devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW);
+ mutex_unlock(&devlink->lock);
+ return 0;
+
+unlock:
+ mutex_unlock(&devlink->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
+
static int __init devlink_module_init(void)
{
return genl_register_family(&devlink_nl_family);
diff --git a/net/core/dst.c b/net/core/dst.c
index 2d9b37f8944a..81ccf20e2826 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -307,6 +307,7 @@ void metadata_dst_free(struct metadata_dst *md_dst)
#endif
kfree(md_dst);
}
+EXPORT_SYMBOL_GPL(metadata_dst_free);
struct metadata_dst __percpu *
metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index e677a20180cf..c9993c6c2fd4 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -111,6 +111,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload",
[NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record",
[NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload",
+ [NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload",
};
static const char
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f64aa13811ea..0ff3953f64aa 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -924,8 +924,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
errout:
- if (nlrule)
- kfree(nlrule);
+ kfree(nlrule);
rules_ops_put(ops);
return err;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 9dfd145eedcc..fd423ce3da34 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1453,30 +1453,6 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
return 0;
}
-static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
-{
- struct bpf_prog *old_prog;
- int err;
-
- if (bpf_prog_size(prog->len) > sysctl_optmem_max)
- return -ENOMEM;
-
- if (sk_unhashed(sk) && sk->sk_reuseport) {
- err = reuseport_alloc(sk);
- if (err)
- return err;
- } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
- /* The socket wasn't bound with SO_REUSEPORT */
- return -EINVAL;
- }
-
- old_prog = reuseport_attach_prog(sk, prog);
- if (old_prog)
- bpf_prog_destroy(old_prog);
-
- return 0;
-}
-
static
struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
{
@@ -1550,13 +1526,15 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (IS_ERR(prog))
return PTR_ERR(prog);
- err = __reuseport_attach_prog(prog, sk);
- if (err < 0) {
+ if (bpf_prog_size(prog->len) > sysctl_optmem_max)
+ err = -ENOMEM;
+ else
+ err = reuseport_attach_prog(sk, prog);
+
+ if (err)
__bpf_prog_release(prog);
- return err;
- }
- return 0;
+ return err;
}
static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
@@ -1586,19 +1564,58 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
{
- struct bpf_prog *prog = __get_bpf(ufd, sk);
+ struct bpf_prog *prog;
int err;
+ if (sock_flag(sk, SOCK_FILTER_LOCKED))
+ return -EPERM;
+
+ prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
+ if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL)
+ prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
if (IS_ERR(prog))
return PTR_ERR(prog);
- err = __reuseport_attach_prog(prog, sk);
- if (err < 0) {
- bpf_prog_put(prog);
- return err;
+ if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
+ /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
+ * bpf prog (e.g. sockmap). It depends on the
+ * limitation imposed by bpf_prog_load().
+ * Hence, sysctl_optmem_max is not checked.
+ */
+ if ((sk->sk_type != SOCK_STREAM &&
+ sk->sk_type != SOCK_DGRAM) ||
+ (sk->sk_protocol != IPPROTO_UDP &&
+ sk->sk_protocol != IPPROTO_TCP) ||
+ (sk->sk_family != AF_INET &&
+ sk->sk_family != AF_INET6)) {
+ err = -ENOTSUPP;
+ goto err_prog_put;
+ }
+ } else {
+ /* BPF_PROG_TYPE_SOCKET_FILTER */
+ if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
+ err = -ENOMEM;
+ goto err_prog_put;
+ }
}
- return 0;
+ err = reuseport_attach_prog(sk, prog);
+err_prog_put:
+ if (err)
+ bpf_prog_put(prog);
+
+ return err;
+}
+
+void sk_reuseport_prog_free(struct bpf_prog *prog)
+{
+ if (!prog)
+ return;
+
+ if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
+ bpf_prog_put(prog);
+ else
+ bpf_prog_destroy(prog);
}
struct bpf_scratchpad {
@@ -2082,19 +2099,12 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = {
.arg3_type = ARG_ANYTHING,
};
-struct redirect_info {
- u32 ifindex;
- u32 flags;
- struct bpf_map *map;
- struct bpf_map *map_to_flush;
- unsigned long map_owner;
-};
-
-static DEFINE_PER_CPU(struct redirect_info, redirect_info);
+DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
if (unlikely(flags & ~(BPF_F_INGRESS)))
return TC_ACT_SHOT;
@@ -2107,7 +2117,7 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
int skb_do_redirect(struct sk_buff *skb)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct net_device *dev;
dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
@@ -3200,7 +3210,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
void xdp_do_flush_map(void)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct bpf_map *map = ri->map_to_flush;
ri->map_to_flush = NULL;
@@ -3245,7 +3255,7 @@ static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
unsigned long map_owner = ri->map_owner;
struct bpf_map *map = ri->map;
u32 index = ri->ifindex;
@@ -3285,7 +3295,7 @@ err:
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct net_device *fwd;
u32 index = ri->ifindex;
int err;
@@ -3317,7 +3327,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
unsigned long map_owner = ri->map_owner;
struct bpf_map *map = ri->map;
u32 index = ri->ifindex;
@@ -3368,7 +3378,7 @@ err:
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
u32 index = ri->ifindex;
struct net_device *fwd;
int err = 0;
@@ -3399,7 +3409,7 @@ EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
if (unlikely(flags))
return XDP_ABORTED;
@@ -3423,7 +3433,7 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
unsigned long, map_owner)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
if (unlikely(flags))
return XDP_ABORTED;
@@ -3681,7 +3691,7 @@ BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
if (unlikely(size > IP_TUNNEL_OPTS_MAX))
return -ENOMEM;
- ip_tunnel_info_opts_set(info, from, size);
+ ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
return 0;
}
@@ -3768,6 +3778,32 @@ static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
+
+BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
+ ancestor_level)
+{
+ struct sock *sk = skb_to_full_sk(skb);
+ struct cgroup *ancestor;
+ struct cgroup *cgrp;
+
+ if (!sk || !sk_fullsock(sk))
+ return 0;
+
+ cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ ancestor = cgroup_ancestor(cgrp, ancestor_level);
+ if (!ancestor)
+ return 0;
+
+ return ancestor->kn->id.id;
+}
+
+static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
+ .func = bpf_skb_ancestor_cgroup_id,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+};
#endif
static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
@@ -3814,6 +3850,30 @@ static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
+BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
+{
+ return sock_gen_cookie(ctx->sk);
+}
+
+static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
+ .func = bpf_get_socket_cookie_sock_addr,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
+{
+ return sock_gen_cookie(ctx->sk);
+}
+
+static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
+ .func = bpf_get_socket_cookie_sock_ops,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
{
struct sock *sk = sk_to_full_sk(skb->sk);
@@ -4544,26 +4604,28 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
{
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
+ struct ipv6_sr_hdr *srh = srh_state->srh;
void *srh_tlvs, *srh_end, *ptr;
- struct ipv6_sr_hdr *srh;
int srhoff = 0;
- if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
+ if (srh == NULL)
return -EINVAL;
- srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
ptr = skb->data + offset;
if (ptr >= srh_tlvs && ptr + len <= srh_end)
- srh_state->valid = 0;
+ srh_state->valid = false;
else if (ptr < (void *)&srh->flags ||
ptr + len > (void *)&srh->segments)
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + len)))
return -EFAULT;
+ if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
+ return -EINVAL;
+ srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
memcpy(skb->data + offset, from, len);
return 0;
@@ -4579,52 +4641,78 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
.arg4_type = ARG_CONST_SIZE
};
-BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
- u32, action, void *, param, u32, param_len)
+static void bpf_update_srh_state(struct sk_buff *skb)
{
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
- struct ipv6_sr_hdr *srh;
int srhoff = 0;
- int err;
- if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
- return -EINVAL;
- srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
-
- if (!srh_state->valid) {
- if (unlikely((srh_state->hdrlen & 7) != 0))
- return -EBADMSG;
-
- srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
- if (unlikely(!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3)))
- return -EBADMSG;
-
- srh_state->valid = 1;
+ if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
+ srh_state->srh = NULL;
+ } else {
+ srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+ srh_state->hdrlen = srh_state->srh->hdrlen << 3;
+ srh_state->valid = true;
}
+}
+
+BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
+ u32, action, void *, param, u32, param_len)
+{
+ struct seg6_bpf_srh_state *srh_state =
+ this_cpu_ptr(&seg6_bpf_srh_states);
+ int hdroff = 0;
+ int err;
switch (action) {
case SEG6_LOCAL_ACTION_END_X:
+ if (!seg6_bpf_has_valid_srh(skb))
+ return -EBADMSG;
if (param_len != sizeof(struct in6_addr))
return -EINVAL;
return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
case SEG6_LOCAL_ACTION_END_T:
+ if (!seg6_bpf_has_valid_srh(skb))
+ return -EBADMSG;
+ if (param_len != sizeof(int))
+ return -EINVAL;
+ return seg6_lookup_nexthop(skb, NULL, *(int *)param);
+ case SEG6_LOCAL_ACTION_END_DT6:
+ if (!seg6_bpf_has_valid_srh(skb))
+ return -EBADMSG;
if (param_len != sizeof(int))
return -EINVAL;
+
+ if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
+ return -EBADMSG;
+ if (!pskb_pull(skb, hdroff))
+ return -EBADMSG;
+
+ skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb->encapsulation = 0;
+
+ bpf_compute_data_pointers(skb);
+ bpf_update_srh_state(skb);
return seg6_lookup_nexthop(skb, NULL, *(int *)param);
case SEG6_LOCAL_ACTION_END_B6:
+ if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
+ return -EBADMSG;
err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
param, param_len);
if (!err)
- srh_state->hdrlen =
- ((struct ipv6_sr_hdr *)param)->hdrlen << 3;
+ bpf_update_srh_state(skb);
+
return err;
case SEG6_LOCAL_ACTION_END_B6_ENCAP:
+ if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
+ return -EBADMSG;
err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
param, param_len);
if (!err)
- srh_state->hdrlen =
- ((struct ipv6_sr_hdr *)param)->hdrlen << 3;
+ bpf_update_srh_state(skb);
+
return err;
default:
return -EINVAL;
@@ -4646,15 +4734,14 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
{
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
+ struct ipv6_sr_hdr *srh = srh_state->srh;
void *srh_end, *srh_tlvs, *ptr;
- struct ipv6_sr_hdr *srh;
struct ipv6hdr *hdr;
int srhoff = 0;
int ret;
- if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
+ if (unlikely(srh == NULL))
return -EINVAL;
- srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
((srh->first_segment + 1) << 4));
@@ -4684,8 +4771,11 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
hdr = (struct ipv6hdr *)skb->data;
hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+ if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
+ return -EINVAL;
+ srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
srh_state->hdrlen += len;
- srh_state->valid = 0;
+ srh_state->valid = false;
return 0;
}
@@ -4753,6 +4843,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto();
+ /* else: fall through */
default:
return NULL;
}
@@ -4767,6 +4858,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
*/
case BPF_FUNC_get_current_uid_gid:
return &bpf_get_current_uid_gid_proto;
+ case BPF_FUNC_get_local_storage:
+ return &bpf_get_local_storage_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -4789,6 +4882,10 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
default:
return NULL;
}
+ case BPF_FUNC_get_socket_cookie:
+ return &bpf_get_socket_cookie_sock_addr_proto;
+ case BPF_FUNC_get_local_storage:
+ return &bpf_get_local_storage_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -4812,6 +4909,17 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
static const struct bpf_func_proto *
+cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_get_local_storage:
+ return &bpf_get_local_storage_proto;
+ default:
+ return sk_filter_func_proto(func_id, prog);
+ }
+}
+
+static const struct bpf_func_proto *
tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
@@ -4884,6 +4992,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#ifdef CONFIG_SOCK_CGROUP_DATA
case BPF_FUNC_skb_cgroup_id:
return &bpf_skb_cgroup_id_proto;
+ case BPF_FUNC_skb_ancestor_cgroup_id:
+ return &bpf_skb_ancestor_cgroup_id_proto;
#endif
default:
return bpf_base_func_proto(func_id);
@@ -4931,6 +5041,10 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sock_map_update_proto;
case BPF_FUNC_sock_hash_update:
return &bpf_sock_hash_update_proto;
+ case BPF_FUNC_get_socket_cookie:
+ return &bpf_get_socket_cookie_sock_ops_proto;
+ case BPF_FUNC_get_local_storage:
+ return &bpf_get_local_storage_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -4950,6 +5064,8 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_msg_cork_bytes_proto;
case BPF_FUNC_msg_pull_data:
return &bpf_msg_pull_data_proto;
+ case BPF_FUNC_get_local_storage:
+ return &bpf_get_local_storage_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -4977,6 +5093,8 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_redirect_map_proto;
case BPF_FUNC_sk_redirect_hash:
return &bpf_sk_redirect_hash_proto;
+ case BPF_FUNC_get_local_storage:
+ return &bpf_get_local_storage_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -6781,7 +6899,7 @@ const struct bpf_prog_ops xdp_prog_ops = {
};
const struct bpf_verifier_ops cg_skb_verifier_ops = {
- .get_func_proto = sk_filter_func_proto,
+ .get_func_proto = cg_skb_func_proto,
.is_valid_access = sk_filter_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
};
@@ -6940,3 +7058,271 @@ out:
release_sock(sk);
return ret;
}
+
+#ifdef CONFIG_INET
+struct sk_reuseport_kern {
+ struct sk_buff *skb;
+ struct sock *sk;
+ struct sock *selected_sk;
+ void *data_end;
+ u32 hash;
+ u32 reuseport_id;
+ bool bind_inany;
+};
+
+static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
+ struct sock_reuseport *reuse,
+ struct sock *sk, struct sk_buff *skb,
+ u32 hash)
+{
+ reuse_kern->skb = skb;
+ reuse_kern->sk = sk;
+ reuse_kern->selected_sk = NULL;
+ reuse_kern->data_end = skb->data + skb_headlen(skb);
+ reuse_kern->hash = hash;
+ reuse_kern->reuseport_id = reuse->reuseport_id;
+ reuse_kern->bind_inany = reuse->bind_inany;
+}
+
+struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ u32 hash)
+{
+ struct sk_reuseport_kern reuse_kern;
+ enum sk_action action;
+
+ bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
+ action = BPF_PROG_RUN(prog, &reuse_kern);
+
+ if (action == SK_PASS)
+ return reuse_kern.selected_sk;
+ else
+ return ERR_PTR(-ECONNREFUSED);
+}
+
+BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
+ struct bpf_map *, map, void *, key, u32, flags)
+{
+ struct sock_reuseport *reuse;
+ struct sock *selected_sk;
+
+ selected_sk = map->ops->map_lookup_elem(map, key);
+ if (!selected_sk)
+ return -ENOENT;
+
+ reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
+ if (!reuse)
+ /* selected_sk is unhashed (e.g. by close()) after the
+ * above map_lookup_elem(). Treat selected_sk has already
+ * been removed from the map.
+ */
+ return -ENOENT;
+
+ if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
+ struct sock *sk;
+
+ if (unlikely(!reuse_kern->reuseport_id))
+ /* There is a small race between adding the
+ * sk to the map and setting the
+ * reuse_kern->reuseport_id.
+ * Treat it as the sk has not been added to
+ * the bpf map yet.
+ */
+ return -ENOENT;
+
+ sk = reuse_kern->sk;
+ if (sk->sk_protocol != selected_sk->sk_protocol)
+ return -EPROTOTYPE;
+ else if (sk->sk_family != selected_sk->sk_family)
+ return -EAFNOSUPPORT;
+
+ /* Catch all. Likely bound to a different sockaddr. */
+ return -EBADFD;
+ }
+
+ reuse_kern->selected_sk = selected_sk;
+
+ return 0;
+}
+
+static const struct bpf_func_proto sk_select_reuseport_proto = {
+ .func = sk_select_reuseport,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_PTR_TO_MAP_KEY,
+ .arg4_type = ARG_ANYTHING,
+};
+
+BPF_CALL_4(sk_reuseport_load_bytes,
+ const struct sk_reuseport_kern *, reuse_kern, u32, offset,
+ void *, to, u32, len)
+{
+ return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
+}
+
+static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
+ .func = sk_reuseport_load_bytes,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg4_type = ARG_CONST_SIZE,
+};
+
+BPF_CALL_5(sk_reuseport_load_bytes_relative,
+ const struct sk_reuseport_kern *, reuse_kern, u32, offset,
+ void *, to, u32, len, u32, start_header)
+{
+ return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
+ len, start_header);
+}
+
+static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
+ .func = sk_reuseport_load_bytes_relative,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg4_type = ARG_CONST_SIZE,
+ .arg5_type = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *
+sk_reuseport_func_proto(enum bpf_func_id func_id,
+ const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_sk_select_reuseport:
+ return &sk_select_reuseport_proto;
+ case BPF_FUNC_skb_load_bytes:
+ return &sk_reuseport_load_bytes_proto;
+ case BPF_FUNC_skb_load_bytes_relative:
+ return &sk_reuseport_load_bytes_relative_proto;
+ default:
+ return bpf_base_func_proto(func_id);
+ }
+}
+
+static bool
+sk_reuseport_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ const u32 size_default = sizeof(__u32);
+
+ if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
+ off % size || type != BPF_READ)
+ return false;
+
+ switch (off) {
+ case offsetof(struct sk_reuseport_md, data):
+ info->reg_type = PTR_TO_PACKET;
+ return size == sizeof(__u64);
+
+ case offsetof(struct sk_reuseport_md, data_end):
+ info->reg_type = PTR_TO_PACKET_END;
+ return size == sizeof(__u64);
+
+ case offsetof(struct sk_reuseport_md, hash):
+ return size == size_default;
+
+ /* Fields that allow narrowing */
+ case offsetof(struct sk_reuseport_md, eth_protocol):
+ if (size < FIELD_SIZEOF(struct sk_buff, protocol))
+ return false;
+ /* fall through */
+ case offsetof(struct sk_reuseport_md, ip_protocol):
+ case offsetof(struct sk_reuseport_md, bind_inany):
+ case offsetof(struct sk_reuseport_md, len):
+ bpf_ctx_record_field_size(info, size_default);
+ return bpf_ctx_narrow_access_ok(off, size, size_default);
+
+ default:
+ return false;
+ }
+}
+
+#define SK_REUSEPORT_LOAD_FIELD(F) ({ \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
+ si->dst_reg, si->src_reg, \
+ bpf_target_off(struct sk_reuseport_kern, F, \
+ FIELD_SIZEOF(struct sk_reuseport_kern, F), \
+ target_size)); \
+ })
+
+#define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \
+ SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
+ struct sk_buff, \
+ skb, \
+ SKB_FIELD)
+
+#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
+ SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern, \
+ struct sock, \
+ sk, \
+ SK_FIELD, BPF_SIZE, EXTRA_OFF)
+
+static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ switch (si->off) {
+ case offsetof(struct sk_reuseport_md, data):
+ SK_REUSEPORT_LOAD_SKB_FIELD(data);
+ break;
+
+ case offsetof(struct sk_reuseport_md, len):
+ SK_REUSEPORT_LOAD_SKB_FIELD(len);
+ break;
+
+ case offsetof(struct sk_reuseport_md, eth_protocol):
+ SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
+ break;
+
+ case offsetof(struct sk_reuseport_md, ip_protocol):
+ BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
+ SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
+ BPF_W, 0);
+ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
+ SK_FL_PROTO_SHIFT);
+ /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian
+ * aware. No further narrowing or masking is needed.
+ */
+ *target_size = 1;
+ break;
+
+ case offsetof(struct sk_reuseport_md, data_end):
+ SK_REUSEPORT_LOAD_FIELD(data_end);
+ break;
+
+ case offsetof(struct sk_reuseport_md, hash):
+ SK_REUSEPORT_LOAD_FIELD(hash);
+ break;
+
+ case offsetof(struct sk_reuseport_md, bind_inany):
+ SK_REUSEPORT_LOAD_FIELD(bind_inany);
+ break;
+ }
+
+ return insn - insn_buf;
+}
+
+const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
+ .get_func_proto = sk_reuseport_func_proto,
+ .is_valid_access = sk_reuseport_is_valid_access,
+ .convert_ctx_access = sk_reuseport_convert_ctx_access,
+};
+
+const struct bpf_prog_ops sk_reuseport_prog_ops = {
+};
+#endif /* CONFIG_INET */
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 53f96e4f7bf5..ce9eeeb7c024 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -152,7 +152,11 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
!dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS))
+ FLOW_DISSECTOR_KEY_ENC_PORTS) &&
+ !dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP) &&
+ !dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS))
return;
info = skb_tunnel_info(skb);
@@ -212,6 +216,31 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
tp->src = key->tp_src;
tp->dst = key->tp_dst;
}
+
+ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_dissector_key_ip *ip;
+
+ ip = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ target_container);
+ ip->tos = key->tos;
+ ip->ttl = key->ttl;
+ }
+
+ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ struct flow_dissector_key_enc_opts *enc_opt;
+
+ enc_opt = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_ENC_OPTS,
+ target_container);
+
+ if (info->options_len) {
+ enc_opt->len = info->options_len;
+ ip_tunnel_info_opts_get(enc_opt->data, info);
+ enc_opt->dst_opt_type = info->key.tun_flags &
+ TUNNEL_OPTIONS_PRESENT;
+ }
+ }
}
EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
@@ -589,7 +618,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_vlan *key_vlan;
enum flow_dissect_ret fdret;
- bool skip_vlan = false;
+ enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
int num_hdrs = 0;
u8 ip_proto = 0;
bool ret;
@@ -748,14 +777,14 @@ proto_again:
}
case htons(ETH_P_8021AD):
case htons(ETH_P_8021Q): {
- const struct vlan_hdr *vlan;
+ const struct vlan_hdr *vlan = NULL;
struct vlan_hdr _vlan;
- bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
+ __be16 saved_vlan_tpid = proto;
- if (vlan_tag_present)
+ if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
+ skb && skb_vlan_tag_present(skb)) {
proto = skb->protocol;
-
- if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
+ } else {
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
data, hlen, &_vlan);
if (!vlan) {
@@ -765,20 +794,23 @@ proto_again:
proto = vlan->h_vlan_encapsulated_proto;
nhoff += sizeof(*vlan);
- if (skip_vlan) {
- fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
- break;
- }
}
- skip_vlan = true;
- if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_VLAN)) {
+ if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
+ dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
+ } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
+ dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
+ } else {
+ fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
+ break;
+ }
+
+ if (dissector_uses_key(flow_dissector, dissector_vlan)) {
key_vlan = skb_flow_dissector_target(flow_dissector,
- FLOW_DISSECTOR_KEY_VLAN,
+ dissector_vlan,
target_container);
- if (vlan_tag_present) {
+ if (!vlan) {
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
key_vlan->vlan_priority =
(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
@@ -789,6 +821,7 @@ proto_again:
(ntohs(vlan->h_vlan_TCI) &
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
+ key_vlan->vlan_tpid = saved_vlan_tpid;
}
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 98fd12721221..e4e442d70c2d 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -112,7 +112,7 @@ static void est_timer(struct timer_list *t)
* @bstats: basic statistics
* @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics
- * @stats_lock: statistics lock
+ * @lock: lock for statistics and control path
* @running: qdisc running seqcount
* @opt: rate estimator configuration TLV
*
@@ -128,7 +128,7 @@ static void est_timer(struct timer_list *t)
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
- spinlock_t *stats_lock,
+ spinlock_t *lock,
seqcount_t *running,
struct nlattr *opt)
{
@@ -154,19 +154,22 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
seqcount_init(&est->seq);
intvl_log = parm->interval + 2;
est->bstats = bstats;
- est->stats_lock = stats_lock;
+ est->stats_lock = lock;
est->running = running;
est->ewma_log = parm->ewma_log;
est->intvl_log = intvl_log;
est->cpu_bstats = cpu_bstats;
- if (stats_lock)
+ if (lock)
local_bh_disable();
est_fetch_counters(est, &b);
- if (stats_lock)
+ if (lock)
local_bh_enable();
est->last_bytes = b.bytes;
est->last_packets = b.packets;
+
+ if (lock)
+ spin_lock_bh(lock);
old = rcu_dereference_protected(*rate_est, 1);
if (old) {
del_timer_sync(&old->timer);
@@ -179,6 +182,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
mod_timer(&est->timer, est->next_jiffies);
rcu_assign_pointer(*rate_est, est);
+ if (lock)
+ spin_unlock_bh(lock);
if (old)
kfree_rcu(old, rcu);
return 0;
@@ -209,7 +214,7 @@ EXPORT_SYMBOL(gen_kill_estimator);
* @bstats: basic statistics
* @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics
- * @stats_lock: statistics lock
+ * @lock: lock for statistics and control path
* @running: qdisc running seqcount (might be NULL)
* @opt: rate estimator configuration TLV
*
@@ -221,11 +226,11 @@ EXPORT_SYMBOL(gen_kill_estimator);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
- spinlock_t *stats_lock,
+ spinlock_t *lock,
seqcount_t *running, struct nlattr *opt)
{
return gen_new_estimator(bstats, cpu_bstats, rate_est,
- stats_lock, running, opt);
+ lock, running, opt);
}
EXPORT_SYMBOL(gen_replace_estimator);
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index e45098593dc0..3e85437f7106 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -50,10 +50,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
* mixing with BH RCU lock doesn't work.
*/
preempt_disable();
- rcu_read_lock();
bpf_compute_data_pointers(skb);
ret = bpf_prog_run_save_cb(lwt->prog, skb);
- rcu_read_unlock();
switch (ret) {
case BPF_OK:
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8e3fda9e725c..aa19d86937af 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1148,7 +1148,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
neigh->nud_state = new;
err = 0;
notify = old & NUD_VALID;
- if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
+ if (((old & (NUD_INCOMPLETE | NUD_PROBE)) ||
+ (flags & NEIGH_UPDATE_F_ADMIN)) &&
(new & NUD_FAILED)) {
neigh_invalidate(neigh);
notify = 1;
@@ -3273,4 +3274,3 @@ static int __init neigh_init(void)
}
subsys_initcall(neigh_init);
-
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index bb7e80f4ced3..bd67c4d0fcfd 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -26,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/cpu.h>
#include "net-sysfs.h"
@@ -905,11 +906,20 @@ static const void *rx_queue_namespace(struct kobject *kobj)
return ns;
}
+static void rx_queue_get_ownership(struct kobject *kobj,
+ kuid_t *uid, kgid_t *gid)
+{
+ const struct net *net = rx_queue_namespace(kobj);
+
+ net_ns_get_ownership(net, uid, gid);
+}
+
static struct kobj_type rx_queue_ktype __ro_after_init = {
.sysfs_ops = &rx_queue_sysfs_ops,
.release = rx_queue_release,
.default_attrs = rx_queue_default_attrs,
- .namespace = rx_queue_namespace
+ .namespace = rx_queue_namespace,
+ .get_ownership = rx_queue_get_ownership,
};
static int rx_queue_add_kobject(struct net_device *dev, int index)
@@ -1047,13 +1057,30 @@ static ssize_t traffic_class_show(struct netdev_queue *queue,
char *buf)
{
struct net_device *dev = queue->dev;
- int index = get_netdev_queue_index(queue);
- int tc = netdev_txq_to_tc(dev, index);
+ int index;
+ int tc;
+
+ if (!netif_is_multiqueue(dev))
+ return -ENOENT;
+ index = get_netdev_queue_index(queue);
+
+ /* If queue belongs to subordinate dev use its TC mapping */
+ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+
+ tc = netdev_txq_to_tc(dev, index);
if (tc < 0)
return -EINVAL;
- return sprintf(buf, "%u\n", tc);
+ /* We can report the traffic class one of two ways:
+ * Subordinate device traffic classes are reported with the traffic
+ * class first, and then the subordinate class so for example TC0 on
+ * subordinate device 2 will be reported as "0-2". If the queue
+ * belongs to the root device it will be reported with just the
+ * traffic class, so just "0" for TC 0 for example.
+ */
+ return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) :
+ sprintf(buf, "%u\n", tc);
}
#ifdef CONFIG_XPS
@@ -1070,6 +1097,9 @@ static ssize_t tx_maxrate_store(struct netdev_queue *queue,
int err, index = get_netdev_queue_index(queue);
u32 rate = 0;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
err = kstrtou32(buf, 10, &rate);
if (err < 0)
return err;
@@ -1214,10 +1244,20 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
cpumask_var_t mask;
unsigned long index;
+ if (!netif_is_multiqueue(dev))
+ return -ENOENT;
+
index = get_netdev_queue_index(queue);
if (dev->num_tc) {
+ /* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;
+ if (num_tc < 0)
+ return -EINVAL;
+
+ /* If queue belongs to subordinate dev use its map */
+ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+
tc = netdev_txq_to_tc(dev, index);
if (tc < 0)
return -EINVAL;
@@ -1227,13 +1267,13 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
return -ENOMEM;
rcu_read_lock();
- dev_maps = rcu_dereference(dev->xps_maps);
+ dev_maps = rcu_dereference(dev->xps_cpus_map);
if (dev_maps) {
for_each_possible_cpu(cpu) {
int i, tci = cpu * num_tc + tc;
struct xps_map *map;
- map = rcu_dereference(dev_maps->cpu_map[tci]);
+ map = rcu_dereference(dev_maps->attr_map[tci]);
if (!map)
continue;
@@ -1260,6 +1300,9 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
cpumask_var_t mask;
int err;
+ if (!netif_is_multiqueue(dev))
+ return -ENOENT;
+
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -1283,6 +1326,91 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
= __ATTR_RW(xps_cpus);
+
+static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
+{
+ struct net_device *dev = queue->dev;
+ struct xps_dev_maps *dev_maps;
+ unsigned long *mask, index;
+ int j, len, num_tc = 1, tc = 0;
+
+ index = get_netdev_queue_index(queue);
+
+ if (dev->num_tc) {
+ num_tc = dev->num_tc;
+ tc = netdev_txq_to_tc(dev, index);
+ if (tc < 0)
+ return -EINVAL;
+ }
+ mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
+ GFP_KERNEL);
+ if (!mask)
+ return -ENOMEM;
+
+ rcu_read_lock();
+ dev_maps = rcu_dereference(dev->xps_rxqs_map);
+ if (!dev_maps)
+ goto out_no_maps;
+
+ for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
+ j < dev->num_rx_queues;) {
+ int i, tci = j * num_tc + tc;
+ struct xps_map *map;
+
+ map = rcu_dereference(dev_maps->attr_map[tci]);
+ if (!map)
+ continue;
+
+ for (i = map->len; i--;) {
+ if (map->queues[i] == index) {
+ set_bit(j, mask);
+ break;
+ }
+ }
+ }
+out_no_maps:
+ rcu_read_unlock();
+
+ len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
+ kfree(mask);
+
+ return len < PAGE_SIZE ? len : -EINVAL;
+}
+
+static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
+ size_t len)
+{
+ struct net_device *dev = queue->dev;
+ struct net *net = dev_net(dev);
+ unsigned long *mask, index;
+ int err;
+
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+
+ mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
+ GFP_KERNEL);
+ if (!mask)
+ return -ENOMEM;
+
+ index = get_netdev_queue_index(queue);
+
+ err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
+ if (err) {
+ kfree(mask);
+ return err;
+ }
+
+ cpus_read_lock();
+ err = __netif_set_xps_queue(dev, mask, index, true);
+ cpus_read_unlock();
+
+ kfree(mask);
+ return err ? : len;
+}
+
+static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
+ = __ATTR_RW(xps_rxqs);
#endif /* CONFIG_XPS */
static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
@@ -1290,6 +1418,7 @@ static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
&queue_traffic_class.attr,
#ifdef CONFIG_XPS
&xps_cpus_attribute.attr,
+ &xps_rxqs_attribute.attr,
&queue_tx_maxrate.attr,
#endif
NULL
@@ -1315,11 +1444,20 @@ static const void *netdev_queue_namespace(struct kobject *kobj)
return ns;
}
+static void netdev_queue_get_ownership(struct kobject *kobj,
+ kuid_t *uid, kgid_t *gid)
+{
+ const struct net *net = netdev_queue_namespace(kobj);
+
+ net_ns_get_ownership(net, uid, gid);
+}
+
static struct kobj_type netdev_queue_ktype __ro_after_init = {
.sysfs_ops = &netdev_queue_sysfs_ops,
.release = netdev_queue_release,
.default_attrs = netdev_queue_default_attrs,
.namespace = netdev_queue_namespace,
+ .get_ownership = netdev_queue_get_ownership,
};
static int netdev_queue_add_kobject(struct net_device *dev, int index)
@@ -1509,6 +1647,14 @@ static const void *net_namespace(struct device *d)
return dev_net(dev);
}
+static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
+{
+ struct net_device *dev = to_net_dev(d);
+ const struct net *net = dev_net(dev);
+
+ net_ns_get_ownership(net, uid, gid);
+}
+
static struct class net_class __ro_after_init = {
.name = "net",
.dev_release = netdev_release,
@@ -1516,6 +1662,7 @@ static struct class net_class __ro_after_init = {
.dev_uevent = netdev_uevent,
.ns_type = &net_ns_type_operations,
.namespace = net_namespace,
+ .get_ownership = net_get_ownership,
};
#ifdef CONFIG_OF_NET
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index a11e03f920d3..738871af5efa 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -17,6 +17,7 @@
#include <linux/user_namespace.h>
#include <linux/net_namespace.h>
#include <linux/sched/task.h>
+#include <linux/uidgid.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -448,6 +449,33 @@ dec_ucounts:
return net;
}
+/**
+ * net_ns_get_ownership - get sysfs ownership data for @net
+ * @net: network namespace in question (can be NULL)
+ * @uid: kernel user ID for sysfs objects
+ * @gid: kernel group ID for sysfs objects
+ *
+ * Returns the uid/gid pair of root in the user namespace associated with the
+ * given network namespace.
+ */
+void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
+{
+ if (net) {
+ kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
+ kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
+
+ if (uid_valid(ns_root_uid))
+ *uid = ns_root_uid;
+
+ if (gid_valid(ns_root_gid))
+ *gid = ns_root_gid;
+ } else {
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+ }
+}
+EXPORT_SYMBOL_GPL(net_ns_get_ownership);
+
static void unhash_nsid(struct net *net, struct net *last)
{
struct net *tmp;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 49368e21d228..7f6938405fa1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1265,7 +1265,7 @@ static ssize_t pktgen_if_write(struct file *file,
buf[len] = 0;
if (strcmp(buf, pkt_dev->dst_min) != 0) {
memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min));
- strncpy(pkt_dev->dst_min, buf, len);
+ strcpy(pkt_dev->dst_min, buf);
pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
pkt_dev->cur_daddr = pkt_dev->daddr_min;
}
@@ -1280,14 +1280,12 @@ static ssize_t pktgen_if_write(struct file *file,
if (len < 0)
return len;
-
if (copy_from_user(buf, &user_buffer[i], len))
return -EFAULT;
-
buf[len] = 0;
if (strcmp(buf, pkt_dev->dst_max) != 0) {
memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max));
- strncpy(pkt_dev->dst_max, buf, len);
+ strcpy(pkt_dev->dst_max, buf);
pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
pkt_dev->cur_daddr = pkt_dev->daddr_max;
}
@@ -1396,7 +1394,7 @@ static ssize_t pktgen_if_write(struct file *file,
buf[len] = 0;
if (strcmp(buf, pkt_dev->src_min) != 0) {
memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min));
- strncpy(pkt_dev->src_min, buf, len);
+ strcpy(pkt_dev->src_min, buf);
pkt_dev->saddr_min = in_aton(pkt_dev->src_min);
pkt_dev->cur_saddr = pkt_dev->saddr_min;
}
@@ -1416,7 +1414,7 @@ static ssize_t pktgen_if_write(struct file *file,
buf[len] = 0;
if (strcmp(buf, pkt_dev->src_max) != 0) {
memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max));
- strncpy(pkt_dev->src_max, buf, len);
+ strcpy(pkt_dev->src_max, buf);
pkt_dev->saddr_max = in_aton(pkt_dev->src_max);
pkt_dev->cur_saddr = pkt_dev->saddr_max;
}
@@ -2255,7 +2253,7 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET);
} else {
/* slow path: we dont already have xfrm_state */
- x = xfrm_stateonly_find(pn->net, DUMMY_MARK,
+ x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0,
(xfrm_address_t *)&pkt_dev->cur_daddr,
(xfrm_address_t *)&pkt_dev->cur_saddr,
AF_INET,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e3f743c141b3..24431e578310 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -964,7 +964,8 @@ static size_t rtnl_xdp_size(void)
{
size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
nla_total_size(1) + /* XDP_ATTACHED */
- nla_total_size(4); /* XDP_PROG_ID */
+ nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
+ nla_total_size(4); /* XDP_<mode>_PROG_ID */
return xdp_size;
}
@@ -1014,6 +1015,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_IF_NETNSID */
+ nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
+ nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
+ + nla_total_size(4) /* IFLA_MIN_MTU */
+ + nla_total_size(4) /* IFLA_MAX_MTU */
+ 0;
}
@@ -1353,27 +1356,51 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
+static u32 rtnl_xdp_prog_skb(struct net_device *dev)
{
- const struct net_device_ops *ops = dev->netdev_ops;
const struct bpf_prog *generic_xdp_prog;
- struct netdev_bpf xdp;
ASSERT_RTNL();
- *prog_id = 0;
generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
- if (generic_xdp_prog) {
- *prog_id = generic_xdp_prog->aux->id;
- return XDP_ATTACHED_SKB;
- }
- if (!ops->ndo_bpf)
- return XDP_ATTACHED_NONE;
+ if (!generic_xdp_prog)
+ return 0;
+ return generic_xdp_prog->aux->id;
+}
- __dev_xdp_query(dev, ops->ndo_bpf, &xdp);
- *prog_id = xdp.prog_id;
+static u32 rtnl_xdp_prog_drv(struct net_device *dev)
+{
+ return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG);
+}
- return xdp.prog_attached;
+static u32 rtnl_xdp_prog_hw(struct net_device *dev)
+{
+ return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf,
+ XDP_QUERY_PROG_HW);
+}
+
+static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
+ u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
+ u32 (*get_prog_id)(struct net_device *dev))
+{
+ u32 curr_id;
+ int err;
+
+ curr_id = get_prog_id(dev);
+ if (!curr_id)
+ return 0;
+
+ *prog_id = curr_id;
+ err = nla_put_u32(skb, attr, curr_id);
+ if (err)
+ return err;
+
+ if (*mode != XDP_ATTACHED_NONE)
+ *mode = XDP_ATTACHED_MULTI;
+ else
+ *mode = tgt_mode;
+
+ return 0;
}
static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1381,17 +1408,32 @@ static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
struct nlattr *xdp;
u32 prog_id;
int err;
+ u8 mode;
xdp = nla_nest_start(skb, IFLA_XDP);
if (!xdp)
return -EMSGSIZE;
- err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
- rtnl_xdp_attached_mode(dev, &prog_id));
+ prog_id = 0;
+ mode = XDP_ATTACHED_NONE;
+ err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
+ IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
+ if (err)
+ goto err_cancel;
+ err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
+ IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
+ if (err)
+ goto err_cancel;
+ err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
+ IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
+ if (err)
+ goto err_cancel;
+
+ err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
if (err)
goto err_cancel;
- if (prog_id) {
+ if (prog_id && mode != XDP_ATTACHED_MULTI) {
err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
if (err)
goto err_cancel;
@@ -1561,6 +1603,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+ nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
+ nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
nla_put_u32(skb, IFLA_GROUP, dev->group) ||
nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
@@ -1692,6 +1736,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_IF_NETNSID] = { .type = NLA_S32 },
[IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
[IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
+ [IFLA_MIN_MTU] = { .type = NLA_U32 },
+ [IFLA_MAX_MTU] = { .type = NLA_U32 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -2336,7 +2382,7 @@ static int do_setlink(const struct sk_buff *skb,
}
if (tb[IFLA_MTU]) {
- err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+ err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
if (err < 0)
goto errout;
status |= DO_SETLINK_MODIFIED;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index fb35b62af272..c996c09d095f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1291,7 +1291,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
}
EXPORT_SYMBOL(skb_clone);
-static void skb_headers_offset_update(struct sk_buff *skb, int off)
+void skb_headers_offset_update(struct sk_buff *skb, int off)
{
/* Only adjust this if it actually is csum_start rather than csum */
if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -1305,6 +1305,7 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
skb->inner_network_header += off;
skb->inner_mac_header += off;
}
+EXPORT_SYMBOL(skb_headers_offset_update);
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
{
@@ -1715,7 +1716,7 @@ void *skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
- if (unlikely(skb->data<skb->head))
+ if (unlikely(skb->data < skb->head))
skb_under_panic(skb, len, __builtin_return_address(0));
return skb->data;
}
@@ -2858,23 +2859,27 @@ EXPORT_SYMBOL(skb_queue_purge);
/**
* skb_rbtree_purge - empty a skb rbtree
* @root: root of the rbtree to empty
+ * Return value: the sum of truesizes of all purged skbs.
*
* Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
* the list and one reference dropped. This function does not take
* any lock. Synchronization should be handled by the caller (e.g., TCP
* out-of-order queue is protected by the socket lock).
*/
-void skb_rbtree_purge(struct rb_root *root)
+unsigned int skb_rbtree_purge(struct rb_root *root)
{
struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
while (p) {
struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
p = rb_next(p);
rb_erase(&skb->rbnode, root);
+ sum += skb->truesize;
kfree_skb(skb);
}
+ return sum;
}
/**
@@ -3816,14 +3821,14 @@ err:
}
EXPORT_SYMBOL_GPL(skb_segment);
-int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
{
struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
unsigned int offset = skb_gro_offset(skb);
unsigned int headlen = skb_headlen(skb);
unsigned int len = skb_gro_len(skb);
- struct sk_buff *lp, *p = *head;
unsigned int delta_truesize;
+ struct sk_buff *lp;
if (unlikely(p->len + len >= 65536))
return -E2BIG;
@@ -4899,7 +4904,6 @@ EXPORT_SYMBOL(skb_try_coalesce);
*/
void skb_scrub_packet(struct sk_buff *skb, bool xnet)
{
- skb->tstamp = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
skb->ignore_df = 0;
@@ -4912,8 +4916,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
return;
ipvs_reset(skb);
- skb_orphan(skb);
skb->mark = 0;
+ skb->tstamp = 0;
}
EXPORT_SYMBOL_GPL(skb_scrub_packet);
diff --git a/net/core/sock.c b/net/core/sock.c
index bc2d7a37297f..3730eb855095 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -91,6 +91,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/unaligned.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/errqueue.h>
@@ -249,58 +250,13 @@ static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
_sock_locks("k-clock-")
};
static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
- "rlock-AF_UNSPEC", "rlock-AF_UNIX" , "rlock-AF_INET" ,
- "rlock-AF_AX25" , "rlock-AF_IPX" , "rlock-AF_APPLETALK",
- "rlock-AF_NETROM", "rlock-AF_BRIDGE" , "rlock-AF_ATMPVC" ,
- "rlock-AF_X25" , "rlock-AF_INET6" , "rlock-AF_ROSE" ,
- "rlock-AF_DECnet", "rlock-AF_NETBEUI" , "rlock-AF_SECURITY" ,
- "rlock-AF_KEY" , "rlock-AF_NETLINK" , "rlock-AF_PACKET" ,
- "rlock-AF_ASH" , "rlock-AF_ECONET" , "rlock-AF_ATMSVC" ,
- "rlock-AF_RDS" , "rlock-AF_SNA" , "rlock-AF_IRDA" ,
- "rlock-AF_PPPOX" , "rlock-AF_WANPIPE" , "rlock-AF_LLC" ,
- "rlock-27" , "rlock-28" , "rlock-AF_CAN" ,
- "rlock-AF_TIPC" , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV" ,
- "rlock-AF_RXRPC" , "rlock-AF_ISDN" , "rlock-AF_PHONET" ,
- "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG" ,
- "rlock-AF_NFC" , "rlock-AF_VSOCK" , "rlock-AF_KCM" ,
- "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_XDP" ,
- "rlock-AF_MAX"
+ _sock_locks("rlock-")
};
static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
- "wlock-AF_UNSPEC", "wlock-AF_UNIX" , "wlock-AF_INET" ,
- "wlock-AF_AX25" , "wlock-AF_IPX" , "wlock-AF_APPLETALK",
- "wlock-AF_NETROM", "wlock-AF_BRIDGE" , "wlock-AF_ATMPVC" ,
- "wlock-AF_X25" , "wlock-AF_INET6" , "wlock-AF_ROSE" ,
- "wlock-AF_DECnet", "wlock-AF_NETBEUI" , "wlock-AF_SECURITY" ,
- "wlock-AF_KEY" , "wlock-AF_NETLINK" , "wlock-AF_PACKET" ,
- "wlock-AF_ASH" , "wlock-AF_ECONET" , "wlock-AF_ATMSVC" ,
- "wlock-AF_RDS" , "wlock-AF_SNA" , "wlock-AF_IRDA" ,
- "wlock-AF_PPPOX" , "wlock-AF_WANPIPE" , "wlock-AF_LLC" ,
- "wlock-27" , "wlock-28" , "wlock-AF_CAN" ,
- "wlock-AF_TIPC" , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV" ,
- "wlock-AF_RXRPC" , "wlock-AF_ISDN" , "wlock-AF_PHONET" ,
- "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG" ,
- "wlock-AF_NFC" , "wlock-AF_VSOCK" , "wlock-AF_KCM" ,
- "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_XDP" ,
- "wlock-AF_MAX"
+ _sock_locks("wlock-")
};
static const char *const af_family_elock_key_strings[AF_MAX+1] = {
- "elock-AF_UNSPEC", "elock-AF_UNIX" , "elock-AF_INET" ,
- "elock-AF_AX25" , "elock-AF_IPX" , "elock-AF_APPLETALK",
- "elock-AF_NETROM", "elock-AF_BRIDGE" , "elock-AF_ATMPVC" ,
- "elock-AF_X25" , "elock-AF_INET6" , "elock-AF_ROSE" ,
- "elock-AF_DECnet", "elock-AF_NETBEUI" , "elock-AF_SECURITY" ,
- "elock-AF_KEY" , "elock-AF_NETLINK" , "elock-AF_PACKET" ,
- "elock-AF_ASH" , "elock-AF_ECONET" , "elock-AF_ATMSVC" ,
- "elock-AF_RDS" , "elock-AF_SNA" , "elock-AF_IRDA" ,
- "elock-AF_PPPOX" , "elock-AF_WANPIPE" , "elock-AF_LLC" ,
- "elock-27" , "elock-28" , "elock-AF_CAN" ,
- "elock-AF_TIPC" , "elock-AF_BLUETOOTH", "elock-AF_IUCV" ,
- "elock-AF_RXRPC" , "elock-AF_ISDN" , "elock-AF_PHONET" ,
- "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG" ,
- "elock-AF_NFC" , "elock-AF_VSOCK" , "elock-AF_KCM" ,
- "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_XDP" ,
- "elock-AF_MAX"
+ _sock_locks("elock-")
};
/*
@@ -697,6 +653,7 @@ EXPORT_SYMBOL(sk_mc_loop);
int sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
+ struct sock_txtime sk_txtime;
struct sock *sk = sock->sk;
int val;
int valbool;
@@ -1070,6 +1027,26 @@ set_rcvbuf:
}
break;
+ case SO_TXTIME:
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ } else if (optlen != sizeof(struct sock_txtime)) {
+ ret = -EINVAL;
+ } else if (copy_from_user(&sk_txtime, optval,
+ sizeof(struct sock_txtime))) {
+ ret = -EFAULT;
+ } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
+ ret = -EINVAL;
+ } else {
+ sock_valbool_flag(sk, SOCK_TXTIME, true);
+ sk->sk_clockid = sk_txtime.clockid;
+ sk->sk_txtime_deadline_mode =
+ !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
+ sk->sk_txtime_report_errors =
+ !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
+ }
+ break;
+
default:
ret = -ENOPROTOOPT;
break;
@@ -1115,6 +1092,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
u64 val64;
struct linger ling;
struct timeval tm;
+ struct sock_txtime txtime;
} v;
int lv = sizeof(int);
@@ -1403,6 +1381,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sock_flag(sk, SOCK_ZEROCOPY);
break;
+ case SO_TXTIME:
+ lv = sizeof(v.txtime);
+ v.txtime.clockid = sk->sk_clockid;
+ v.txtime.flags |= sk->sk_txtime_deadline_mode ?
+ SOF_TXTIME_DEADLINE_MODE : 0;
+ v.txtime.flags |= sk->sk_txtime_report_errors ?
+ SOF_TXTIME_REPORT_ERRORS : 0;
+ break;
+
default:
/* We implement the SO_SNDLOWAT etc to not be settable
* (1003.1g 7).
@@ -2137,6 +2124,13 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
sockc->tsflags |= tsflags;
break;
+ case SCM_TXTIME:
+ if (!sock_flag(sk, SOCK_TXTIME))
+ return -EINVAL;
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
+ return -EINVAL;
+ sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
+ break;
/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
case SCM_RIGHTS:
case SCM_CREDENTIALS:
@@ -2401,9 +2395,10 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
{
struct proto *prot = sk->sk_prot;
long allocated = sk_memory_allocated_add(sk, amt);
+ bool charged = true;
if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
- !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
+ !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
goto suppress_allocation;
/* Under limit. */
@@ -2461,7 +2456,8 @@ suppress_allocation:
return 1;
}
- trace_sock_exceed_buf_limit(sk, prot, allocated);
+ if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
+ trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
sk_memory_allocated_sub(sk, amt);
@@ -2818,6 +2814,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_pacing_rate = ~0U;
sk->sk_pacing_shift = 10;
sk->sk_incoming_cpu = -1;
+
+ sk_rx_queue_clear(sk);
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)
@@ -2902,8 +2900,8 @@ EXPORT_SYMBOL(lock_sock_fast);
int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
struct timeval tv;
- if (!sock_flag(sk, SOCK_TIMESTAMP))
- sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+
+ sock_enable_timestamp(sk, SOCK_TIMESTAMP);
tv = ktime_to_timeval(sk->sk_stamp);
if (tv.tv_sec == -1)
return -ENOENT;
@@ -2918,8 +2916,8 @@ EXPORT_SYMBOL(sock_get_timestamp);
int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
{
struct timespec ts;
- if (!sock_flag(sk, SOCK_TIMESTAMP))
- sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+
+ sock_enable_timestamp(sk, SOCK_TIMESTAMP);
ts = ktime_to_timespec(sk->sk_stamp);
if (ts.tv_sec == -1)
return -ENOENT;
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index c37b5be7c5e4..3312a5849a97 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/tcp.h>
#include <linux/workqueue.h>
+#include <linux/nospec.h>
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
@@ -218,6 +219,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
if (req->sdiag_family >= AF_MAX)
return -EINVAL;
+ req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
if (sock_diag_handlers[req->sdiag_family] == NULL)
sock_load_diag_module(req->sdiag_family, 0);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 064acb04be0f..ba5cba56f574 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -8,11 +8,34 @@
#include <net/sock_reuseport.h>
#include <linux/bpf.h>
+#include <linux/idr.h>
+#include <linux/filter.h>
#include <linux/rcupdate.h>
#define INIT_SOCKS 128
-static DEFINE_SPINLOCK(reuseport_lock);
+DEFINE_SPINLOCK(reuseport_lock);
+
+#define REUSEPORT_MIN_ID 1
+static DEFINE_IDA(reuseport_ida);
+
+int reuseport_get_id(struct sock_reuseport *reuse)
+{
+ int id;
+
+ if (reuse->reuseport_id)
+ return reuse->reuseport_id;
+
+ id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
+ /* Called under reuseport_lock */
+ GFP_ATOMIC);
+ if (id < 0)
+ return id;
+
+ reuse->reuseport_id = id;
+
+ return reuse->reuseport_id;
+}
static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
{
@@ -29,7 +52,7 @@ static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
return reuse;
}
-int reuseport_alloc(struct sock *sk)
+int reuseport_alloc(struct sock *sk, bool bind_inany)
{
struct sock_reuseport *reuse;
@@ -41,9 +64,17 @@ int reuseport_alloc(struct sock *sk)
/* Allocation attempts can occur concurrently via the setsockopt path
* and the bind/hash path. Nothing to do when we lose the race.
*/
- if (rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock)))
+ reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
+ lockdep_is_held(&reuseport_lock));
+ if (reuse) {
+ /* Only set reuse->bind_inany if the bind_inany is true.
+ * Otherwise, it will overwrite the reuse->bind_inany
+ * which was set by the bind/hash path.
+ */
+ if (bind_inany)
+ reuse->bind_inany = bind_inany;
goto out;
+ }
reuse = __reuseport_alloc(INIT_SOCKS);
if (!reuse) {
@@ -53,6 +84,7 @@ int reuseport_alloc(struct sock *sk)
reuse->socks[0] = sk;
reuse->num_socks = 1;
+ reuse->bind_inany = bind_inany;
rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
out:
@@ -78,9 +110,12 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
more_reuse->max_socks = more_socks_size;
more_reuse->num_socks = reuse->num_socks;
more_reuse->prog = reuse->prog;
+ more_reuse->reuseport_id = reuse->reuseport_id;
+ more_reuse->bind_inany = reuse->bind_inany;
memcpy(more_reuse->socks, reuse->socks,
reuse->num_socks * sizeof(struct sock *));
+ more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
for (i = 0; i < reuse->num_socks; ++i)
rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
@@ -99,8 +134,9 @@ static void reuseport_free_rcu(struct rcu_head *head)
struct sock_reuseport *reuse;
reuse = container_of(head, struct sock_reuseport, rcu);
- if (reuse->prog)
- bpf_prog_destroy(reuse->prog);
+ sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
+ if (reuse->reuseport_id)
+ ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
kfree(reuse);
}
@@ -110,12 +146,12 @@ static void reuseport_free_rcu(struct rcu_head *head)
* @sk2: Socket belonging to the existing reuseport group.
* May return ENOMEM and not add socket to group under memory pressure.
*/
-int reuseport_add_sock(struct sock *sk, struct sock *sk2)
+int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
{
struct sock_reuseport *old_reuse, *reuse;
if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
- int err = reuseport_alloc(sk2);
+ int err = reuseport_alloc(sk2, bind_inany);
if (err)
return err;
@@ -160,6 +196,14 @@ void reuseport_detach_sock(struct sock *sk)
spin_lock_bh(&reuseport_lock);
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
+
+ /* At least one of the sk in this reuseport group is added to
+ * a bpf map. Notify the bpf side. The bpf map logic will
+ * remove the sk if it is indeed added to a bpf map.
+ */
+ if (reuse->reuseport_id)
+ bpf_sk_reuseport_detach(sk);
+
rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
for (i = 0; i < reuse->num_socks; i++) {
@@ -175,9 +219,9 @@ void reuseport_detach_sock(struct sock *sk)
}
EXPORT_SYMBOL(reuseport_detach_sock);
-static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
- struct bpf_prog *prog, struct sk_buff *skb,
- int hdr_len)
+static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ int hdr_len)
{
struct sk_buff *nskb = NULL;
u32 index;
@@ -238,9 +282,15 @@ struct sock *reuseport_select_sock(struct sock *sk,
/* paired with smp_wmb() in reuseport_add_sock() */
smp_rmb();
- if (prog && skb)
- sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
+ if (!prog || !skb)
+ goto select_by_hash;
+
+ if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
+ sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
+ else
+ sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
+select_by_hash:
/* no bpf or invalid bpf result: fall back to hash usage */
if (!sk2)
sk2 = reuse->socks[reciprocal_scale(hash, socks)];
@@ -252,12 +302,21 @@ out:
}
EXPORT_SYMBOL(reuseport_select_sock);
-struct bpf_prog *
-reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
+int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
{
struct sock_reuseport *reuse;
struct bpf_prog *old_prog;
+ if (sk_unhashed(sk) && sk->sk_reuseport) {
+ int err = reuseport_alloc(sk, false);
+
+ if (err)
+ return err;
+ } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
+ /* The socket wasn't bound with SO_REUSEPORT */
+ return -EINVAL;
+ }
+
spin_lock_bh(&reuseport_lock);
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
@@ -266,6 +325,7 @@ reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
rcu_assign_pointer(reuse->prog, prog);
spin_unlock_bh(&reuseport_lock);
- return old_prog;
+ sk_reuseport_prog_free(old_prog);
+ return 0;
}
EXPORT_SYMBOL(reuseport_attach_prog);
diff --git a/net/core/utils.c b/net/core/utils.c
index d47863b07a60..2a597ac7808e 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -397,7 +397,7 @@ int inet_pton_with_scope(struct net *net, __kernel_sa_family_t af,
break;
default:
pr_err("unexpected address family %d\n", af);
- };
+ }
return ret;
}
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 6771f1855b96..3dd99e1c04f5 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -3,8 +3,11 @@
* Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
* Released under terms in GPL version 2. See COPYING.
*/
+#include <linux/bpf.h>
+#include <linux/filter.h>
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/rhashtable.h>
@@ -45,8 +48,8 @@ static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
!= sizeof(u32));
- /* Use cyclic increasing ID as direct hash key, see rht_bucket_index */
- return key << RHT_HASH_RESERVED_SPACE;
+ /* Use cyclic increasing ID as direct hash key */
+ return key;
}
static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
@@ -327,10 +330,12 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
page = virt_to_head_page(data);
- if (xa)
+ if (xa) {
+ napi_direct &= !xdp_return_frame_no_direct();
page_pool_put_page(xa->page_pool, page, napi_direct);
- else
+ } else {
put_page(page);
+ }
rcu_read_unlock();
break;
case MEM_TYPE_PAGE_SHARED:
@@ -345,8 +350,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
rcu_read_lock();
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
- if (!WARN_ON_ONCE(!xa))
- xa->zc_alloc->free(xa->zc_alloc, handle);
+ xa->zc_alloc->free(xa->zc_alloc, handle);
rcu_read_unlock();
default:
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
@@ -371,3 +375,34 @@ void xdp_return_buff(struct xdp_buff *xdp)
__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle);
}
EXPORT_SYMBOL_GPL(xdp_return_buff);
+
+int xdp_attachment_query(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf)
+{
+ bpf->prog_id = info->prog ? info->prog->aux->id : 0;
+ bpf->prog_flags = info->prog ? info->flags : 0;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xdp_attachment_query);
+
+bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf)
+{
+ if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
+ NL_SET_ERR_MSG(bpf->extack,
+ "program loaded with different flags");
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
+
+void xdp_attachment_setup(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf)
+{
+ if (info->prog)
+ bpf_prog_put(info->prog);
+ info->prog = bpf->prog;
+ info->flags = bpf->flags;
+}
+EXPORT_SYMBOL_GPL(xdp_attachment_setup);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 2589a6b78aa1..a556cd708885 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1786,7 +1786,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
itr->ifindex == ifindex &&
- (!prio || itr->app.priority == prio))
+ ((prio == -1) || itr->app.priority == prio))
return itr;
}
@@ -1821,7 +1821,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock_bh(&dcb_lock);
- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ itr = dcb_app_lookup(app, dev->ifindex, -1);
+ if (itr)
prio = itr->app.priority;
spin_unlock_bh(&dcb_lock);
@@ -1849,7 +1850,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
spin_lock_bh(&dcb_lock);
/* Search for existing match and replace */
- if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
+ itr = dcb_app_lookup(new, dev->ifindex, -1);
+ if (itr) {
if (new->priority)
itr->app.priority = new->priority;
else {
@@ -1882,7 +1884,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock_bh(&dcb_lock);
- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ itr = dcb_app_lookup(app, dev->ifindex, -1);
+ if (itr)
prio |= 1 << itr->app.priority;
spin_unlock_bh(&dcb_lock);
@@ -1955,6 +1958,92 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
}
EXPORT_SYMBOL(dcb_ieee_delapp);
+/**
+ * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
+ * priorities to the DSCP values assigned to that priority. Initialize p_map
+ * such that each map element holds a bit mask of DSCP values configured for
+ * that priority by APP entries.
+ */
+void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_prio_map *p_map)
+{
+ int ifindex = dev->ifindex;
+ struct dcb_app_type *itr;
+ u8 prio;
+
+ memset(p_map->map, 0, sizeof(p_map->map));
+
+ spin_lock_bh(&dcb_lock);
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->ifindex == ifindex &&
+ itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
+ itr->app.protocol < 64 &&
+ itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
+ prio = itr->app.priority;
+ p_map->map[prio] |= 1ULL << itr->app.protocol;
+ }
+ }
+ spin_unlock_bh(&dcb_lock);
+}
+EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
+
+/**
+ * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
+ * DSCP values to the priorities assigned to that DSCP value. Initialize p_map
+ * such that each map element holds a bit mask of priorities configured for a
+ * given DSCP value by APP entries.
+ */
+void
+dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_dscp_map *p_map)
+{
+ int ifindex = dev->ifindex;
+ struct dcb_app_type *itr;
+
+ memset(p_map->map, 0, sizeof(p_map->map));
+
+ spin_lock_bh(&dcb_lock);
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->ifindex == ifindex &&
+ itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
+ itr->app.protocol < 64 &&
+ itr->app.priority < IEEE_8021QAZ_MAX_TCS)
+ p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
+ }
+ spin_unlock_bh(&dcb_lock);
+}
+EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
+
+/**
+ * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
+ * type, with valid PID values >= 1536. A special meaning is then assigned to
+ * protocol value of 0: "default priority. For use when priority is not
+ * otherwise specified".
+ *
+ * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries
+ * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default
+ * priorities set by these entries.
+ */
+u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
+{
+ int ifindex = dev->ifindex;
+ struct dcb_app_type *itr;
+ u8 mask = 0;
+
+ spin_lock_bh(&dcb_lock);
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->ifindex == ifindex &&
+ itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ itr->app.protocol == 0 &&
+ itr->app.priority < IEEE_8021QAZ_MAX_TCS)
+ mask |= 1 << itr->app.priority;
+ }
+ spin_unlock_bh(&dcb_lock);
+
+ return mask;
+}
+EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
+
static int __init dcbnl_init(void)
{
INIT_LIST_HEAD(&dcb_app_list);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 0d56e36a6db7..875858c8b059 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -325,7 +325,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
__poll_t mask;
struct sock *sk = sock->sk;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
if (sk->sk_state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);
diff --git a/net/decnet/Kconfig b/net/decnet/Kconfig
index f3393e154f0f..dcc74956badd 100644
--- a/net/decnet/Kconfig
+++ b/net/decnet/Kconfig
@@ -40,4 +40,3 @@ config DECNET_ROUTER
to work.
See <file:Documentation/networking/decnet.txt> for more information.
-
diff --git a/net/decnet/Makefile b/net/decnet/Makefile
index 9e38122d942b..07b38e441b2d 100644
--- a/net/decnet/Makefile
+++ b/net/decnet/Makefile
@@ -8,4 +8,3 @@ decnet-$(CONFIG_DECNET_ROUTER) += dn_fib.o dn_rules.o dn_table.o
decnet-y += sysctl_net_decnet.o
obj-$(CONFIG_NETFILTER) += netfilter/
-
diff --git a/net/decnet/TODO b/net/decnet/TODO
index ebb5ac69d128..358e9eb49016 100644
--- a/net/decnet/TODO
+++ b/net/decnet/TODO
@@ -16,14 +16,14 @@ Steve's quick list of things that need finishing off:
o Verify errors etc. against POSIX 1003.1g (draft)
- o Using send/recvmsg() to get at connect/disconnect data (POSIX 1003.1g)
+ o Using send/recvmsg() to get at connect/disconnect data (POSIX 1003.1g)
[maybe this should be done at socket level... the control data in the
send/recvmsg() calls should simply be a vector of set/getsockopt()
calls]
o check MSG_CTRUNC is set where it should be.
- o Find all the commonality between DECnet and IPv4 routing code and extract
+ o Find all the commonality between DECnet and IPv4 routing code and extract
it into a small library of routines. [probably a project for 2.7.xx]
o Add perfect socket hashing - an idea suggested by Paul Koning. Currently
@@ -38,4 +38,3 @@ Steve's quick list of things that need finishing off:
o DECnet sendpages() function
o AIO for DECnet
-
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index fce94cbd4378..f78fe58eafc8 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -797,5 +797,3 @@ void __init dn_fib_init(void)
rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELROUTE,
dn_fib_rtm_delroute, NULL, 0);
}
-
-
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 1b2120645730..2fb5e055ba25 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -491,6 +491,7 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
break;
case DN_RUN:
sk->sk_shutdown |= SHUTDOWN_MASK;
+ /* fall through */
case DN_CC:
scp->state = DN_CN;
}
@@ -911,4 +912,3 @@ free_out:
return NET_RX_SUCCESS;
}
-
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 56a52a004c56..a1779de6bd9c 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -701,4 +701,3 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
dn_nsp_send(skb);
}
-
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index e74765024d88..1c002c0fb712 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -404,7 +404,7 @@ void dn_rt_cache_flush(int delay)
if (delay <= 0) {
spin_unlock_bh(&dn_rt_flush_lock);
- dn_run_flush(0);
+ dn_run_flush(NULL);
return;
}
@@ -1920,9 +1920,8 @@ void __init dn_route_init(void)
void __exit dn_route_cleanup(void)
{
del_timer(&dn_route_timer);
- dn_run_flush(0);
+ dn_run_flush(NULL);
remove_proc_entry("decnet_cache", init_net.proc_net);
dst_entries_destroy(&dn_dst_ops);
}
-
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 72236695db3d..4a4e3c17740c 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -256,5 +256,3 @@ void __exit dn_fib_rules_cleanup(void)
rtnl_unlock();
rcu_barrier();
}
-
-
diff --git a/net/decnet/netfilter/Makefile b/net/decnet/netfilter/Makefile
index 255c1ae9daeb..b579e52130aa 100644
--- a/net/decnet/netfilter/Makefile
+++ b/net/decnet/netfilter/Makefile
@@ -3,4 +3,3 @@
#
obj-$(CONFIG_DECNET_NF_GRABULATOR) += dn_rtmsg.o
-
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index ab395e55cd78..a4faacadd8a8 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -158,4 +158,3 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_DNRTMSG);
module_init(dn_rtmsg_init);
module_exit(dn_rtmsg_fini);
-
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 0c9478b91fa5..7f4534828f6c 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -320,4 +320,3 @@ static void __exit exit_dns_resolver(void)
module_init(init_dns_resolver)
module_exit(exit_dns_resolver)
MODULE_LICENSE("GPL");
-
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index dc5d9af3dc80..a1917025e155 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -775,6 +775,20 @@ struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n)
if (!ds)
return NULL;
+ /* We avoid allocating memory outside dsa_switch
+ * if it is not needed.
+ */
+ if (n <= sizeof(ds->_bitmap) * 8) {
+ ds->bitmap = &ds->_bitmap;
+ } else {
+ ds->bitmap = devm_kcalloc(dev,
+ BITS_TO_LONGS(n),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!ds->bitmap))
+ return NULL;
+ }
+
ds->dev = dev;
ds->num_ports = n;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 9864bcd3d317..962c4fd338ba 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -900,7 +900,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, cb, dev, dev);
+ return tcf_block_cb_register(f->block, cb, dev, dev, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, cb, dev);
return 0;
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index b93511726069..142b294d3446 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -136,21 +136,20 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
{
const struct switchdev_obj_port_mdb *mdb = info->mdb;
struct switchdev_trans *trans = info->trans;
- DECLARE_BITMAP(group, ds->num_ports);
int port;
/* Build a mask of Multicast group members */
- bitmap_zero(group, ds->num_ports);
+ bitmap_zero(ds->bitmap, ds->num_ports);
if (ds->index == info->sw_index)
- set_bit(info->port, group);
+ set_bit(info->port, ds->bitmap);
for (port = 0; port < ds->num_ports; port++)
if (dsa_is_dsa_port(ds, port))
- set_bit(port, group);
+ set_bit(port, ds->bitmap);
if (switchdev_trans_ph_prepare(trans))
- return dsa_switch_mdb_prepare_bitmap(ds, mdb, group);
+ return dsa_switch_mdb_prepare_bitmap(ds, mdb, ds->bitmap);
- dsa_switch_mdb_add_bitmap(ds, mdb, group);
+ dsa_switch_mdb_add_bitmap(ds, mdb, ds->bitmap);
return 0;
}
@@ -204,21 +203,20 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds,
{
const struct switchdev_obj_port_vlan *vlan = info->vlan;
struct switchdev_trans *trans = info->trans;
- DECLARE_BITMAP(members, ds->num_ports);
int port;
/* Build a mask of VLAN members */
- bitmap_zero(members, ds->num_ports);
+ bitmap_zero(ds->bitmap, ds->num_ports);
if (ds->index == info->sw_index)
- set_bit(info->port, members);
+ set_bit(info->port, ds->bitmap);
for (port = 0; port < ds->num_ports; port++)
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
- set_bit(port, members);
+ set_bit(port, ds->bitmap);
if (switchdev_trans_ph_prepare(trans))
- return dsa_switch_vlan_prepare_bitmap(ds, vlan, members);
+ return dsa_switch_vlan_prepare_bitmap(ds, vlan, ds->bitmap);
- dsa_switch_vlan_add_bitmap(ds, vlan, members);
+ dsa_switch_vlan_add_bitmap(ds, vlan, ds->bitmap);
return 0;
}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index ee28440f57c5..fd8faa0dfa61 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -427,13 +427,13 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
}
EXPORT_SYMBOL(sysfs_format_mac);
-struct sk_buff **eth_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
{
- struct sk_buff *p, **pp = NULL;
- struct ethhdr *eh, *eh2;
- unsigned int hlen, off_eth;
const struct packet_offload *ptype;
+ unsigned int hlen, off_eth;
+ struct sk_buff *pp = NULL;
+ struct ethhdr *eh, *eh2;
+ struct sk_buff *p;
__be16 type;
int flush = 1;
@@ -448,7 +448,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
flush = 0;
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index 2cc224106b69..e7857a8ac86d 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -25,7 +25,7 @@
#include <net/ieee802154_netdev.h>
#include <net/6lowpan.h>
-#include <net/ipv6.h>
+#include <net/ipv6_frag.h>
#include <net/inet_frag.h>
#include "6lowpan_i.h"
@@ -40,9 +40,6 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
{
const struct frag_lowpan_compare_key *key = a;
- struct lowpan_frag_queue *fq;
-
- fq = container_of(q, struct lowpan_frag_queue, q);
BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
memcpy(&q->key, key, sizeof(*key));
@@ -52,10 +49,8 @@ static void lowpan_frag_expire(struct timer_list *t)
{
struct inet_frag_queue *frag = from_timer(frag, t, timer);
struct frag_queue *fq;
- struct net *net;
fq = container_of(frag, struct frag_queue, q);
- net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
spin_lock(&fq->q.lock);
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index e6ff5128e61a..ca53efa17be1 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
/* We must take a copy of the skb before we modify/replace the ipv6
* header as the header could be used elsewhere
*/
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb)
- return NET_XMIT_DROP;
+ if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
+ skb_tailroom(skb) < ldev->needed_tailroom)) {
+ struct sk_buff *nskb;
+
+ nskb = skb_copy_expand(skb, ldev->needed_headroom,
+ ldev->needed_tailroom, GFP_ATOMIC);
+ if (likely(nskb)) {
+ consume_skb(skb);
+ skb = nskb;
+ } else {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+ } else {
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_XMIT_DROP;
+ }
ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
if (ret < 0) {
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index cb7176cd4cd6..fe225d9a1877 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -400,4 +400,3 @@ module_exit(wpan_phy_class_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface");
MODULE_AUTHOR("Dmitry Eremin-Solenikov");
-
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index 35c432668454..78f6f1233194 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -75,4 +75,3 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] = { .type = NLA_U8, },
};
-
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index a60658c85a9a..bc6b912603f1 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -25,6 +25,7 @@
#include <linux/termios.h> /* For TIOCOUTQ/INQ */
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/socket.h>
#include <net/datalink.h>
#include <net/psnap.h>
#include <net/sock.h>
@@ -452,6 +453,7 @@ struct dgram_sock {
unsigned int bound:1;
unsigned int connected:1;
unsigned int want_ack:1;
+ unsigned int want_lqi:1;
unsigned int secen:1;
unsigned int secen_override:1;
unsigned int seclevel:3;
@@ -486,6 +488,7 @@ static int dgram_init(struct sock *sk)
struct dgram_sock *ro = dgram_sk(sk);
ro->want_ack = 1;
+ ro->want_lqi = 0;
return 0;
}
@@ -713,6 +716,7 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sk_buff *skb;
+ struct dgram_sock *ro = dgram_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name);
skb = skb_recv_datagram(sk, flags, noblock, &err);
@@ -744,6 +748,13 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
*addr_len = sizeof(*saddr);
}
+ if (ro->want_lqi) {
+ err = put_cmsg(msg, SOL_IEEE802154, WPAN_WANTLQI,
+ sizeof(uint8_t), &(mac_cb(skb)->lqi));
+ if (err)
+ goto done;
+ }
+
if (flags & MSG_TRUNC)
copied = skb->len;
done:
@@ -847,6 +858,9 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
case WPAN_WANTACK:
val = ro->want_ack;
break;
+ case WPAN_WANTLQI:
+ val = ro->want_lqi;
+ break;
case WPAN_SECURITY:
if (!ro->secen_override)
val = WPAN_SECURITY_DEFAULT;
@@ -892,6 +906,9 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
case WPAN_WANTACK:
ro->want_ack = !!val;
break;
+ case WPAN_WANTLQI:
+ ro->want_lqi = !!val;
+ break;
case WPAN_SECURITY:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
!ns_capable(net->user_ns, CAP_NET_RAW)) {
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 80dad301361d..32cae39cdff6 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -430,7 +430,7 @@ config INET_DIAG
Support for INET (TCP, DCCP, etc) socket monitoring interface used by
native Linux tools such as ss. ss is included in iproute2, currently
downloadable at:
-
+
http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2
If unsure, say Y.
@@ -600,7 +600,7 @@ config TCP_CONG_VENO
distinguishing to circumvent the difficult judgment of the packet loss
type. TCP Veno cuts down less congestion window in response to random
loss packets.
- See <http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1177186>
+ See <http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1177186>
config TCP_CONG_YEAH
tristate "YeAH TCP"
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index eec9569ffa5c..7446b98661d8 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o
obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o
obj-$(CONFIG_IP_PNP) += ipconfig.o
obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/
-obj-$(CONFIG_INET_DIAG) += inet_diag.o
+obj-$(CONFIG_INET_DIAG) += inet_diag.o
obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index b403499fdabe..20fda8fb8ffd 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -229,6 +229,7 @@ int inet_listen(struct socket *sock, int backlog)
err = inet_csk_listen_start(sk, backlog);
if (err)
goto out;
+ tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
}
sk->sk_max_ack_backlog = backlog;
err = 0;
@@ -485,8 +486,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
* is temporarily down)
*/
err = -EADDRNOTAVAIL;
- if (!net->ipv4.sysctl_ip_nonlocal_bind &&
- !(inet->freebind || inet->transparent) &&
+ if (!inet_can_nonlocal_bind(net, inet) &&
addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST &&
@@ -1384,12 +1384,12 @@ out:
}
EXPORT_SYMBOL(inet_gso_segment);
-struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
{
const struct net_offload *ops;
- struct sk_buff **pp = NULL;
- struct sk_buff *p;
+ struct sk_buff *pp = NULL;
const struct iphdr *iph;
+ struct sk_buff *p;
unsigned int hlen;
unsigned int off;
unsigned int id;
@@ -1425,7 +1425,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
id >>= 16;
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
struct iphdr *iph2;
u16 flush_id;
@@ -1505,8 +1505,8 @@ out:
}
EXPORT_SYMBOL(inet_gro_receive);
-static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *ipip_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
if (NAPI_GRO_CB(skb)->encap_mark) {
NAPI_GRO_CB(skb)->flush = 1;
@@ -1801,6 +1801,7 @@ static __net_init int inet_init_net(struct net *net)
* We set them here, in case sysctl is not compiled.
*/
net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
+ net->ipv4.sysctl_ip_fwd_update_priority = 1;
net->ipv4.sysctl_ip_dynaddr = 0;
net->ipv4.sysctl_ip_early_demux = 1;
net->ipv4.sysctl_udp_early_demux = 1;
@@ -1882,6 +1883,7 @@ fs_initcall(ipv4_offload_init);
static struct packet_type ip_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_IP),
.func = ip_rcv,
+ .list_func = ip_list_rcv,
};
static int __init inet_init(void)
diff --git a/net/ipv4/bpfilter/Makefile b/net/ipv4/bpfilter/Makefile
index ce262d76cc48..e9e42f99725e 100644
--- a/net/ipv4/bpfilter/Makefile
+++ b/net/ipv4/bpfilter/Makefile
@@ -1,2 +1 @@
obj-$(CONFIG_BPFILTER) += sockopt.o
-
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index d7585ab1a77a..ea4bd8a52422 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1827,6 +1827,8 @@ static int inet_netconf_msgsize_devconf(int type)
size += nla_total_size(4);
if (all || type == NETCONFA_MC_FORWARDING)
size += nla_total_size(4);
+ if (all || type == NETCONFA_BC_FORWARDING)
+ size += nla_total_size(4);
if (all || type == NETCONFA_PROXY_NEIGH)
size += nla_total_size(4);
if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
@@ -1873,6 +1875,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
nla_put_s32(skb, NETCONFA_MC_FORWARDING,
IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
goto nla_put_failure;
+ if ((all || type == NETCONFA_BC_FORWARDING) &&
+ nla_put_s32(skb, NETCONFA_BC_FORWARDING,
+ IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0)
+ goto nla_put_failure;
if ((all || type == NETCONFA_PROXY_NEIGH) &&
nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
@@ -2143,6 +2149,10 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
if ((new_value == 0) && (old_value != 0))
rt_cache_flush(net);
+ if (i == IPV4_DEVCONF_BC_FORWARDING - 1 &&
+ new_value != old_value)
+ rt_cache_flush(net);
+
if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
new_value != old_value) {
ifindex = devinet_conf_ifindex(net, cnf);
@@ -2259,6 +2269,7 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
devinet_sysctl_forward),
DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
+ DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"),
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 7cf755ef9efb..58834a10c0be 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -28,8 +28,8 @@
#include <linux/spinlock.h>
#include <net/udp.h>
-static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *esp4_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
int offset = skb_gro_offset(skb);
struct xfrm_offload *xo;
@@ -135,8 +135,7 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
skb->encap_hdr_csum = 1;
- if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
- (x->xso.dev != skb->dev))
+ if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
esp_features = features & ~NETIF_F_CSUM_MASK;
@@ -179,8 +178,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
if (!xo)
return -EINVAL;
- if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
- (x->xso.dev != skb->dev)) {
+ if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
xo->flags |= CRYPTO_FALLBACK;
hw_offload = false;
}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index c9ec1603666b..500a59906b87 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -224,14 +224,14 @@ drop:
return 0;
}
-static struct sk_buff **fou_gro_receive(struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *fou_gro_receive(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
{
- const struct net_offload *ops;
- struct sk_buff **pp = NULL;
u8 proto = fou_from_sock(sk)->protocol;
const struct net_offload **offloads;
+ const struct net_offload *ops;
+ struct sk_buff *pp = NULL;
/* We can clear the encap_mark for FOU as we are essentially doing
* one of two possible things. We are either adding an L4 tunnel
@@ -305,13 +305,13 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
return guehdr;
}
-static struct sk_buff **gue_gro_receive(struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *gue_gro_receive(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
{
const struct net_offload **offloads;
const struct net_offload *ops;
- struct sk_buff **pp = NULL;
+ struct sk_buff *pp = NULL;
struct sk_buff *p;
struct guehdr *guehdr;
size_t len, optlen, hdrlen, off;
@@ -397,7 +397,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
skb_gro_pull(skb, hdrlen);
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
const struct guehdr *guehdr2;
if (!NAPI_GRO_CB(p)->same_flow)
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 6a7d980105f6..6c63524f598a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -108,10 +108,10 @@ out:
return segs;
}
-static struct sk_buff **gre_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *gre_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
- struct sk_buff **pp = NULL;
+ struct sk_buff *pp = NULL;
struct sk_buff *p;
const struct gre_base_hdr *greh;
unsigned int hlen, grehlen;
@@ -182,7 +182,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
null_compute_pseudo);
}
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
const struct gre_base_hdr *greh2;
if (!NAPI_GRO_CB(p)->same_flow)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 1617604c9284..695979b7ef6d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -429,14 +429,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
icmp_param->data.icmph.checksum = 0;
+ ipcm_init(&ipc);
inet->tos = ip_hdr(skb)->tos;
sk->sk_mark = mark;
daddr = ipc.addr = ip_hdr(skb)->saddr;
saddr = fib_compute_spec_dst(skb);
- ipc.opt = NULL;
- ipc.tx_flags = 0;
- ipc.ttl = 0;
- ipc.tos = -1;
if (icmp_param->replyopts.opt.opt.optlen) {
ipc.opt = &icmp_param->replyopts.opt;
@@ -710,11 +707,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
icmp_param.offset = skb_network_offset(skb_in);
inet_sk(sk)->tos = tos;
sk->sk_mark = mark;
+ ipcm_init(&ipc);
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts.opt;
- ipc.tx_flags = 0;
- ipc.ttl = 0;
- ipc.tos = -1;
rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
type, code, &icmp_param);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 75151be21413..cf75f8944b05 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1288,7 +1288,7 @@ static void igmp_group_dropped(struct ip_mc_list *im)
#endif
}
-static void igmp_group_added(struct ip_mc_list *im, unsigned int mode)
+static void igmp_group_added(struct ip_mc_list *im)
{
struct in_device *in_dev = im->interface;
#ifdef CONFIG_IP_MULTICAST
@@ -1320,7 +1320,7 @@ static void igmp_group_added(struct ip_mc_list *im, unsigned int mode)
* not send filter-mode change record as the mode should be from
* IN() to IN(A).
*/
- if (mode == MCAST_EXCLUDE)
+ if (im->sfmode == MCAST_EXCLUDE)
im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
igmp_ifc_event(in_dev);
@@ -1432,7 +1432,7 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, im);
#endif
- igmp_group_added(im, mode);
+ igmp_group_added(im);
if (!in_dev->dead)
ip_rt_multicast_event(in_dev);
out:
@@ -1699,7 +1699,7 @@ void ip_mc_remap(struct in_device *in_dev)
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, pmc);
#endif
- igmp_group_added(pmc, pmc->sfmode);
+ igmp_group_added(pmc);
}
}
@@ -1762,7 +1762,7 @@ void ip_mc_up(struct in_device *in_dev)
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, pmc);
#endif
- igmp_group_added(pmc, pmc->sfmode);
+ igmp_group_added(pmc);
}
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 33a88e045efd..dfd5009f96ef 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -107,6 +107,15 @@ bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
}
EXPORT_SYMBOL(inet_rcv_saddr_equal);
+bool inet_rcv_saddr_any(const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
+#endif
+ return !sk->sk_rcv_saddr;
+}
+
void inet_get_local_port_range(struct net *net, int *low, int *high)
{
unsigned int seq;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 0d70608cc2e1..bcb11f3a27c0 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -20,6 +20,7 @@
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
+#include <linux/rhashtable.h>
#include <net/sock.h>
#include <net/inet_frag.h>
@@ -136,12 +137,16 @@ void inet_frag_destroy(struct inet_frag_queue *q)
fp = q->fragments;
nf = q->net;
f = nf->f;
- while (fp) {
- struct sk_buff *xp = fp->next;
-
- sum_truesize += fp->truesize;
- kfree_skb(fp);
- fp = xp;
+ if (fp) {
+ do {
+ struct sk_buff *xp = fp->next;
+
+ sum_truesize += fp->truesize;
+ kfree_skb(fp);
+ fp = xp;
+ } while (fp);
+ } else {
+ sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
}
sum = sum_truesize + f->qsize;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 3647167c8fa3..f5c9ef2586de 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -328,7 +328,7 @@ struct sock *__inet_lookup_listener(struct net *net,
saddr, sport, daddr, hnum,
dif, sdif);
if (result)
- return result;
+ goto done;
/* Lookup lhash2 with INADDR_ANY */
@@ -337,9 +337,10 @@ struct sock *__inet_lookup_listener(struct net *net,
if (ilb2->count > ilb->count)
goto port_lookup;
- return inet_lhash2_lookup(net, ilb2, skb, doff,
- saddr, sport, daddr, hnum,
- dif, sdif);
+ result = inet_lhash2_lookup(net, ilb2, skb, doff,
+ saddr, sport, daddr, hnum,
+ dif, sdif);
+ goto done;
port_lookup:
sk_for_each_rcu(sk, &ilb->head) {
@@ -352,12 +353,15 @@ port_lookup:
result = reuseport_select_sock(sk, phash,
skb, doff);
if (result)
- return result;
+ goto done;
}
result = sk;
hiscore = score;
}
}
+done:
+ if (unlikely(IS_ERR(result)))
+ return NULL;
return result;
}
EXPORT_SYMBOL_GPL(__inet_lookup_listener);
@@ -567,10 +571,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
inet_csk(sk2)->icsk_bind_hash == tb &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false))
- return reuseport_add_sock(sk, sk2);
+ return reuseport_add_sock(sk, sk2,
+ inet_rcv_saddr_any(sk));
}
- return reuseport_alloc(sk);
+ return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
}
int __inet_hash(struct sock *sk, struct sock *osk)
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index b54b948b0596..32662e9e5d21 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -143,7 +143,8 @@ int ip_forward(struct sk_buff *skb)
!skb_sec_path(skb))
ip_rt_send_redirect(skb);
- skb->priority = rt_tos2priority(iph->tos);
+ if (net->ipv4.sysctl_ip_fwd_update_priority)
+ skb->priority = rt_tos2priority(iph->tos);
return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
net, NULL, skb, skb->dev, rt->dst.dev,
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index d14d741fb05e..88281fbce88c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -57,6 +57,57 @@
*/
static const char ip_frag_cache_name[] = "ip4-frags";
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
+ struct inet_skb_parm h;
+ struct sk_buff *next_frag;
+ int frag_run_len;
+};
+
+#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void ip4_frag_init_run(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+
+ FRAG_CB(skb)->next_frag = NULL;
+ FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
+ struct sk_buff *skb)
+{
+ RB_CLEAR_NODE(&skb->rbnode);
+ FRAG_CB(skb)->next_frag = NULL;
+
+ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+ FRAG_CB(q->fragments_tail)->next_frag = skb;
+ q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+ if (q->last_run_head)
+ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+ &q->last_run_head->rbnode.rb_right);
+ else
+ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+ ip4_frag_init_run(skb);
+ q->fragments_tail = skb;
+ q->last_run_head = skb;
+}
+
/* Describe an entry in the "incomplete datagrams" queue. */
struct ipq {
struct inet_frag_queue q;
@@ -75,8 +126,8 @@ static u8 ip4_frag_ecn(u8 tos)
static struct inet_frags ip4_frags;
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
- struct net_device *dev);
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev);
static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
@@ -136,7 +187,7 @@ static void ip_expire(struct timer_list *t)
{
struct inet_frag_queue *frag = from_timer(frag, t, timer);
const struct iphdr *iph;
- struct sk_buff *head;
+ struct sk_buff *head = NULL;
struct net *net;
struct ipq *qp;
int err;
@@ -152,14 +203,36 @@ static void ip_expire(struct timer_list *t)
ipq_kill(qp);
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
-
- head = qp->q.fragments;
-
__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
- if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !head)
+ if (!(qp->q.flags & INET_FRAG_FIRST_IN))
goto out;
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
+ * pull the head out of the tree in order to be able to
+ * deal with head->dev.
+ */
+ if (qp->q.fragments) {
+ head = qp->q.fragments;
+ qp->q.fragments = head->next;
+ } else {
+ head = skb_rb_first(&qp->q.rb_fragments);
+ if (!head)
+ goto out;
+ if (FRAG_CB(head)->next_frag)
+ rb_replace_node(&head->rbnode,
+ &FRAG_CB(head)->next_frag->rbnode,
+ &qp->q.rb_fragments);
+ else
+ rb_erase(&head->rbnode, &qp->q.rb_fragments);
+ memset(&head->rbnode, 0, sizeof(head->rbnode));
+ barrier();
+ }
+ if (head == qp->q.fragments_tail)
+ qp->q.fragments_tail = NULL;
+
+ sub_frag_mem_limit(qp->q.net, head->truesize);
+
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
goto out;
@@ -179,16 +252,16 @@ static void ip_expire(struct timer_list *t)
(skb_rtable(head)->rt_type != RTN_LOCAL))
goto out;
- skb_get(head);
spin_unlock(&qp->q.lock);
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
- kfree_skb(head);
goto out_rcu_unlock;
out:
spin_unlock(&qp->q.lock);
out_rcu_unlock:
rcu_read_unlock();
+ if (head)
+ kfree_skb(head);
ipq_put(qp);
}
@@ -231,7 +304,7 @@ static int ip_frag_too_far(struct ipq *qp)
end = atomic_inc_return(&peer->rid);
qp->rid = end;
- rc = qp->q.fragments && (end - start) > max;
+ rc = qp->q.fragments_tail && (end - start) > max;
if (rc) {
struct net *net;
@@ -245,7 +318,6 @@ static int ip_frag_too_far(struct ipq *qp)
static int ip_frag_reinit(struct ipq *qp)
{
- struct sk_buff *fp;
unsigned int sum_truesize = 0;
if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
@@ -253,21 +325,16 @@ static int ip_frag_reinit(struct ipq *qp)
return -ETIMEDOUT;
}
- fp = qp->q.fragments;
- do {
- struct sk_buff *xp = fp->next;
-
- sum_truesize += fp->truesize;
- kfree_skb(fp);
- fp = xp;
- } while (fp);
+ sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
sub_frag_mem_limit(qp->q.net, sum_truesize);
qp->q.flags = 0;
qp->q.len = 0;
qp->q.meat = 0;
qp->q.fragments = NULL;
+ qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
+ qp->q.last_run_head = NULL;
qp->iif = 0;
qp->ecn = 0;
@@ -277,7 +344,9 @@ static int ip_frag_reinit(struct ipq *qp)
/* Add new segment to existing queue. */
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
- struct sk_buff *prev, *next;
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct rb_node **rbn, *parent;
+ struct sk_buff *skb1, *prev_tail;
struct net_device *dev;
unsigned int fragsize;
int flags, offset;
@@ -340,100 +409,61 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
if (err)
goto err;
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
- prev = qp->q.fragments_tail;
- if (!prev || prev->ip_defrag_offset < offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = qp->q.fragments; next != NULL; next = next->next) {
- if (next->ip_defrag_offset >= offset)
- break; /* bingo! */
- prev = next;
- }
+ /* Note : skb->rbnode and skb->dev share the same location. */
+ dev = skb->dev;
+ /* Makes sure compiler wont do silly aliasing games */
+ barrier();
-found:
- /* We found where to put this one. Check for overlap with
- * preceding fragment, and, if needed, align things so that
- * any overlaps are eliminated.
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ *
+ * We do the same here for IPv4 (and increment an snmp counter).
*/
- if (prev) {
- int i = (prev->ip_defrag_offset + prev->len) - offset;
- if (i > 0) {
- offset += i;
- err = -EINVAL;
- if (end <= offset)
- goto err;
- err = -ENOMEM;
- if (!pskb_pull(skb, i))
- goto err;
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
-
- err = -ENOMEM;
-
- while (next && next->ip_defrag_offset < end) {
- int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */
-
- if (i < next->len) {
- int delta = -next->truesize;
-
- /* Eat head of the next overlapped fragment
- * and leave the loop. The next ones cannot overlap.
- */
- if (!pskb_pull(next, i))
- goto err;
- delta += next->truesize;
- if (delta)
- add_frag_mem_limit(qp->q.net, delta);
- next->ip_defrag_offset += i;
- qp->q.meat -= i;
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
- next->ip_summed = CHECKSUM_NONE;
- break;
- } else {
- struct sk_buff *free_it = next;
-
- /* Old fragment is completely overridden with
- * new one drop it.
- */
- next = next->next;
-
- if (prev)
- prev->next = next;
- else
- qp->q.fragments = next;
-
- qp->q.meat -= free_it->len;
- sub_frag_mem_limit(qp->q.net, free_it->truesize);
- kfree_skb(free_it);
- }
+ /* Find out where to put this fragment. */
+ prev_tail = qp->q.fragments_tail;
+ if (!prev_tail)
+ ip4_frag_create_run(&qp->q, skb); /* First fragment. */
+ else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
+ /* This is the common case: skb goes to the end. */
+ /* Detect and discard overlaps. */
+ if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
+ goto discard_qp;
+ if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
+ ip4_frag_append_to_last_run(&qp->q, skb);
+ else
+ ip4_frag_create_run(&qp->q, skb);
+ } else {
+ /* Binary search. Note that skb can become the first fragment,
+ * but not the last (covered above).
+ */
+ rbn = &qp->q.rb_fragments.rb_node;
+ do {
+ parent = *rbn;
+ skb1 = rb_to_skb(parent);
+ if (end <= skb1->ip_defrag_offset)
+ rbn = &parent->rb_left;
+ else if (offset >= skb1->ip_defrag_offset +
+ FRAG_CB(skb1)->frag_run_len)
+ rbn = &parent->rb_right;
+ else /* Found an overlap with skb1. */
+ goto discard_qp;
+ } while (*rbn);
+ /* Here we have parent properly set, and rbn pointing to
+ * one of its NULL left/right children. Insert skb.
+ */
+ ip4_frag_init_run(skb);
+ rb_link_node(&skb->rbnode, parent, rbn);
+ rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
}
- /* Note : skb->ip_defrag_offset and skb->dev share the same location */
- dev = skb->dev;
if (dev)
qp->iif = dev->ifindex;
- /* Makes sure compiler wont do silly aliasing games */
- barrier();
skb->ip_defrag_offset = offset;
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- qp->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- qp->q.fragments = skb;
-
qp->q.stamp = skb->tstamp;
qp->q.meat += skb->len;
qp->ecn |= ecn;
@@ -455,7 +485,7 @@ found:
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
- err = ip_frag_reasm(qp, prev, dev);
+ err = ip_frag_reasm(qp, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
return err;
}
@@ -463,20 +493,24 @@ found:
skb_dst_drop(skb);
return -EINPROGRESS;
+discard_qp:
+ inet_frag_kill(&qp->q);
+ err = -EINVAL;
+ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
err:
kfree_skb(skb);
return err;
}
-
/* Build a new IP datagram from all its fragments. */
-
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
- struct net_device *dev)
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev)
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph;
- struct sk_buff *fp, *head = qp->q.fragments;
+ struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
+ struct sk_buff **nextp; /* To build frag_list. */
+ struct rb_node *rbn;
int len;
int ihlen;
int err;
@@ -490,25 +524,26 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
goto out_fail;
}
/* Make the one we just received the head. */
- if (prev) {
- head = prev->next;
- fp = skb_clone(head, GFP_ATOMIC);
+ if (head != skb) {
+ fp = skb_clone(skb, GFP_ATOMIC);
if (!fp)
goto out_nomem;
-
- fp->next = head->next;
- if (!fp->next)
+ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+ if (RB_EMPTY_NODE(&skb->rbnode))
+ FRAG_CB(prev_tail)->next_frag = fp;
+ else
+ rb_replace_node(&skb->rbnode, &fp->rbnode,
+ &qp->q.rb_fragments);
+ if (qp->q.fragments_tail == skb)
qp->q.fragments_tail = fp;
- prev->next = fp;
-
- skb_morph(head, qp->q.fragments);
- head->next = qp->q.fragments->next;
-
- consume_skb(qp->q.fragments);
- qp->q.fragments = head;
+ skb_morph(skb, head);
+ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &qp->q.rb_fragments);
+ consume_skb(head);
+ head = skb;
}
- WARN_ON(!head);
WARN_ON(head->ip_defrag_offset != 0);
/* Allocate a new buffer for the datagram. */
@@ -533,35 +568,60 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
clone = alloc_skb(0, GFP_ATOMIC);
if (!clone)
goto out_nomem;
- clone->next = head->next;
- head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
- head->data_len -= clone->len;
- head->len -= clone->len;
+ head->truesize += clone->truesize;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
add_frag_mem_limit(qp->q.net, clone->truesize);
+ skb_shinfo(head)->frag_list = clone;
+ nextp = &clone->next;
+ } else {
+ nextp = &skb_shinfo(head)->frag_list;
}
- skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
- for (fp=head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
+ /* Traverse the tree in order, to build frag_list. */
+ fp = FRAG_CB(head)->next_frag;
+ rbn = rb_next(&head->rbnode);
+ rb_erase(&head->rbnode, &qp->q.rb_fragments);
+ while (rbn || fp) {
+ /* fp points to the next sk_buff in the current run;
+ * rbn points to the next run.
+ */
+ /* Go through the current run. */
+ while (fp) {
+ *nextp = fp;
+ nextp = &fp->next;
+ fp->prev = NULL;
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ fp = FRAG_CB(fp)->next_frag;
+ }
+ /* Move to the next run. */
+ if (rbn) {
+ struct rb_node *rbnext = rb_next(rbn);
+
+ fp = rb_to_skb(rbn);
+ rb_erase(rbn, &qp->q.rb_fragments);
+ rbn = rbnext;
+ }
}
sub_frag_mem_limit(qp->q.net, head->truesize);
+ *nextp = NULL;
head->next = NULL;
+ head->prev = NULL;
head->dev = dev;
head->tstamp = qp->q.stamp;
IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
@@ -589,7 +649,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
qp->q.fragments = NULL;
+ qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
+ qp->q.last_run_head = NULL;
return 0;
out_nomem:
@@ -671,6 +733,28 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
}
EXPORT_SYMBOL(ip_check_defrag);
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+{
+ struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
+
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
+ while (skb) {
+ struct sk_buff *next = FRAG_CB(skb)->next_frag;
+
+ sum += skb->truesize;
+ kfree_skb(skb);
+ skb = next;
+ }
+ }
+ return sum;
+}
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
#ifdef CONFIG_SYSCTL
static int dist_min;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 2d8efeecf619..51a5d06085ac 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -587,6 +587,8 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
goto err_free_skb;
key = &tun_info->key;
+ if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
+ goto err_free_rt;
md = ip_tunnel_info_opts(tun_info);
if (!md)
goto err_free_rt;
@@ -983,7 +985,6 @@ static void ipgre_tunnel_setup(struct net_device *dev)
static void __gre_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel;
- int t_hlen;
tunnel = netdev_priv(dev);
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
@@ -991,8 +992,6 @@ static void __gre_tunnel_init(struct net_device *dev)
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
- t_hlen = tunnel->hlen + sizeof(struct iphdr);
-
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
@@ -1302,13 +1301,11 @@ static const struct net_device_ops gre_tap_netdev_ops = {
static int erspan_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- int t_hlen;
tunnel->tun_hlen = 8;
tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
erspan_hdr_len(tunnel->erspan_ver);
- t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 7582713dd18f..3196cf58f418 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -307,7 +307,8 @@ drop:
return true;
}
-static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static int ip_rcv_finish_core(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
int (*edemux)(struct sk_buff *skb);
@@ -315,13 +316,6 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
struct rtable *rt;
int err;
- /* if ingress device is enslaved to an L3 master device pass the
- * skb to its handler for processing
- */
- skb = l3mdev_ip_rcv(skb);
- if (!skb)
- return NET_RX_SUCCESS;
-
if (net->ipv4.sysctl_ip_early_demux &&
!skb_dst(skb) &&
!skb->sk &&
@@ -393,7 +387,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
goto drop;
}
- return dst_input(skb);
+ return NET_RX_SUCCESS;
drop:
kfree_skb(skb);
@@ -405,13 +399,29 @@ drop_error:
goto drop;
}
+static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ int ret;
+
+ /* if ingress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip_rcv(skb);
+ if (!skb)
+ return NET_RX_SUCCESS;
+
+ ret = ip_rcv_finish_core(net, sk, skb);
+ if (ret != NET_RX_DROP)
+ ret = dst_input(skb);
+ return ret;
+}
+
/*
* Main IP Receive routine.
*/
-int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
{
const struct iphdr *iph;
- struct net *net;
u32 len;
/* When the interface is in promisc. mode, drop all the crap
@@ -421,7 +431,6 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
goto drop;
- net = dev_net(dev);
__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -489,9 +498,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
- return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
- net, NULL, skb, dev, NULL,
- ip_rcv_finish);
+ return skb;
csum_error:
__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
@@ -500,5 +507,113 @@ inhdr_error:
drop:
kfree_skb(skb);
out:
- return NET_RX_DROP;
+ return NULL;
+}
+
+/*
+ * IP receive entry point
+ */
+int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+ struct net_device *orig_dev)
+{
+ struct net *net = dev_net(dev);
+
+ skb = ip_rcv_core(skb, net);
+ if (skb == NULL)
+ return NET_RX_DROP;
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
+ net, NULL, skb, dev, NULL,
+ ip_rcv_finish);
+}
+
+static void ip_sublist_rcv_finish(struct list_head *head)
+{
+ struct sk_buff *skb, *next;
+
+ list_for_each_entry_safe(skb, next, head, list) {
+ list_del(&skb->list);
+ /* Handle ip{6}_forward case, as sch_direct_xmit have
+ * another kind of SKB-list usage (see validate_xmit_skb_list)
+ */
+ skb->next = NULL;
+ dst_input(skb);
+ }
+}
+
+static void ip_list_rcv_finish(struct net *net, struct sock *sk,
+ struct list_head *head)
+{
+ struct dst_entry *curr_dst = NULL;
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+ struct dst_entry *dst;
+
+ list_del(&skb->list);
+ /* if ingress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip_rcv(skb);
+ if (!skb)
+ continue;
+ if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
+ continue;
+
+ dst = skb_dst(skb);
+ if (curr_dst != dst) {
+ /* dispatch old sublist */
+ if (!list_empty(&sublist))
+ ip_sublist_rcv_finish(&sublist);
+ /* start new sublist */
+ INIT_LIST_HEAD(&sublist);
+ curr_dst = dst;
+ }
+ list_add_tail(&skb->list, &sublist);
+ }
+ /* dispatch final sublist */
+ ip_sublist_rcv_finish(&sublist);
+}
+
+static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
+ struct net *net)
+{
+ NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
+ head, dev, NULL, ip_rcv_finish);
+ ip_list_rcv_finish(net, NULL, head);
+}
+
+/* Receive a list of IP packets */
+void ip_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev)
+{
+ struct net_device *curr_dev = NULL;
+ struct net *curr_net = NULL;
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+ struct net_device *dev = skb->dev;
+ struct net *net = dev_net(dev);
+
+ list_del(&skb->list);
+ skb = ip_rcv_core(skb, net);
+ if (skb == NULL)
+ continue;
+
+ if (curr_dev != dev || curr_net != net) {
+ /* dispatch old sublist */
+ if (!list_empty(&sublist))
+ ip_sublist_rcv(&sublist, curr_dev, curr_net);
+ /* start new sublist */
+ INIT_LIST_HEAD(&sublist);
+ curr_dev = dev;
+ curr_net = net;
+ }
+ list_add_tail(&skb->list, &sublist);
+ }
+ /* dispatch final sublist */
+ ip_sublist_rcv(&sublist, curr_dev, curr_net);
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 0e3edd25f881..9c4e72e9c60a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -423,7 +423,8 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
}
/* Note: skb->sk can be different from sk, in case of tunnels */
-int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
+int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+ __u8 tos)
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
@@ -462,7 +463,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
inet->inet_dport,
inet->inet_sport,
sk->sk_protocol,
- RT_CONN_FLAGS(sk),
+ RT_CONN_FLAGS_TOS(sk, tos),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
@@ -478,7 +479,7 @@ packet_routed:
skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
- *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
+ *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
iph->frag_off = htons(IP_DF);
else
@@ -511,7 +512,7 @@ no_route:
kfree_skb(skb);
return -EHOSTUNREACH;
}
-EXPORT_SYMBOL(ip_queue_xmit);
+EXPORT_SYMBOL(__ip_queue_xmit);
static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
@@ -1147,14 +1148,15 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
cork->fragsize = ip_sk_use_pmtu(sk) ?
dst_mtu(&rt->dst) : rt->dst.dev->mtu;
- cork->gso_size = sk->sk_type == SOCK_DGRAM &&
- sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0;
+ cork->gso_size = ipc->gso_size;
cork->dst = &rt->dst;
cork->length = 0;
cork->ttl = ipc->ttl;
cork->tos = ipc->tos;
cork->priority = ipc->priority;
- cork->tx_flags = ipc->tx_flags;
+ cork->transmit_time = ipc->sockc.transmit_time;
+ cork->tx_flags = 0;
+ sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
return 0;
}
@@ -1415,6 +1417,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
skb->mark = sk->sk_mark;
+ skb->tstamp = cork->transmit_time;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
@@ -1547,11 +1550,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
return;
+ ipcm_init(&ipc);
ipc.addr = daddr;
- ipc.opt = NULL;
- ipc.tx_flags = 0;
- ipc.ttl = 0;
- ipc.tos = -1;
if (replyopts.opt.opt.optlen) {
ipc.opt = &replyopts.opt;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9f79b9803a16..5660adcf7a04 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -60,6 +60,7 @@
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
#include <linux/export.h>
+#include <linux/rhashtable.h>
#include <net/ip_tunnels.h>
#include <net/checksum.h>
#include <net/netlink.h>
@@ -1051,7 +1052,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *skb;
int ret;
- if (assert == IGMPMSG_WHOLEPKT)
+ if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
else
skb = alloc_skb(128, GFP_ATOMIC);
@@ -1059,7 +1060,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
if (!skb)
return -ENOBUFS;
- if (assert == IGMPMSG_WHOLEPKT) {
+ if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) {
/* Ugly, but we have no choice with this interface.
* Duplicate old header, fix ihl, length etc.
* And all this only to mangle msg->im_msgtype and
@@ -1070,9 +1071,12 @@ static int ipmr_cache_report(struct mr_table *mrt,
skb_reset_transport_header(skb);
msg = (struct igmpmsg *)skb_network_header(skb);
memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
- msg->im_msgtype = IGMPMSG_WHOLEPKT;
+ msg->im_msgtype = assert;
msg->im_mbz = 0;
- msg->im_vif = mrt->mroute_reg_vif_num;
+ if (assert == IGMPMSG_WRVIFWHOLE)
+ msg->im_vif = vifi;
+ else
+ msg->im_vif = mrt->mroute_reg_vif_num;
ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
sizeof(struct iphdr));
@@ -1371,6 +1375,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
struct mr_table *mrt;
struct vifctl vif;
struct mfcctl mfc;
+ bool do_wrvifwhole;
u32 uval;
/* There's one exception to the lock - MRT_DONE which needs to unlock */
@@ -1501,10 +1506,12 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
break;
}
+ do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE);
val = !!val;
if (val != mrt->mroute_do_pim) {
mrt->mroute_do_pim = val;
mrt->mroute_do_assert = val;
+ mrt->mroute_do_wrvifwhole = do_wrvifwhole;
}
break;
case MRT_TABLE:
@@ -1982,6 +1989,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
MFC_ASSERT_THRESH)) {
c->_c.mfc_un.res.last_assert = jiffies;
ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
+ if (mrt->mroute_do_wrvifwhole)
+ ipmr_cache_report(mrt, skb, true_vifi,
+ IGMPMSG_WRVIFWHOLE);
}
goto dont_forward;
}
@@ -2658,7 +2668,9 @@ static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
mrt->mroute_reg_vif_num) ||
nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
mrt->mroute_do_assert) ||
- nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim))
+ nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
+ nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
+ mrt->mroute_do_wrvifwhole))
return false;
return true;
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index cafb0506c8c9..1ad9aa62a97b 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -2,6 +2,7 @@
* Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
*/
+#include <linux/rhashtable.h>
#include <linux/mroute_base.h>
/* Sets everything common except 'dev', since that is done under locking */
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index e6774ccb7731..8d2e5dc9a827 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -98,59 +98,6 @@ int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
}
EXPORT_SYMBOL_GPL(nf_ip_reroute);
-__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol)
-{
- const struct iphdr *iph = ip_hdr(skb);
- __sum16 csum = 0;
-
- switch (skb->ip_summed) {
- case CHECKSUM_COMPLETE:
- if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
- break;
- if ((protocol == 0 && !csum_fold(skb->csum)) ||
- !csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - dataoff, protocol,
- skb->csum)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- }
- /* fall through */
- case CHECKSUM_NONE:
- if (protocol == 0)
- skb->csum = 0;
- else
- skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
- skb->len - dataoff,
- protocol, 0);
- csum = __skb_checksum_complete(skb);
- }
- return csum;
-}
-EXPORT_SYMBOL(nf_ip_checksum);
-
-__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol)
-{
- const struct iphdr *iph = ip_hdr(skb);
- __sum16 csum = 0;
-
- switch (skb->ip_summed) {
- case CHECKSUM_COMPLETE:
- if (len == skb->len - dataoff)
- return nf_ip_checksum(skb, hook, dataoff, protocol);
- /* fall through */
- case CHECKSUM_NONE:
- skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
- skb->len - dataoff, 0);
- skb->ip_summed = CHECKSUM_NONE;
- return __skb_checksum_complete_head(skb, dataoff + len);
- }
- return csum;
-}
-EXPORT_SYMBOL_GPL(nf_ip_checksum_partial);
-
int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict __always_unused)
{
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index bbfc356cb1b5..d9504adc47b3 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -9,22 +9,6 @@ config NF_DEFRAG_IPV4
tristate
default n
-config NF_CONNTRACK_IPV4
- tristate "IPv4 connection tracking support (required for NAT)"
- depends on NF_CONNTRACK
- default m if NETFILTER_ADVANCED=n
- select NF_DEFRAG_IPV4
- ---help---
- Connection tracking keeps a record of what packets have passed
- through your machine, in order to figure out how they are related
- into connections.
-
- This is IPv4 support on Layer 3 independent connection tracking.
- Layer 3 independent connection tracking is experimental scheme
- which generalize ip_conntrack to support other layer 3 protocols.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config NF_SOCKET_IPV4
tristate "IPv4 socket lookup support"
help
@@ -112,7 +96,7 @@ config NF_REJECT_IPV4
config NF_NAT_IPV4
tristate "IPv4 NAT"
- depends on NF_CONNTRACK_IPV4
+ depends on NF_CONNTRACK
default m if NETFILTER_ADVANCED=n
select NF_NAT
help
@@ -279,7 +263,7 @@ config IP_NF_TARGET_SYNPROXY
# NAT + specific targets: nf_conntrack
config IP_NF_NAT
tristate "iptables NAT support"
- depends on NF_CONNTRACK_IPV4
+ depends on NF_CONNTRACK
default m if NETFILTER_ADVANCED=n
select NF_NAT
select NF_NAT_IPV4
@@ -340,7 +324,7 @@ config IP_NF_MANGLE
config IP_NF_TARGET_CLUSTERIP
tristate "CLUSTERIP target support"
depends on IP_NF_MANGLE
- depends on NF_CONNTRACK_IPV4
+ depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
select NF_CONNTRACK_MARK
select NETFILTER_FAMILY_ARP
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 8394c17c269f..367993adf4d3 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -3,12 +3,6 @@
# Makefile for the netfilter modules on top of IPv4.
#
-# objects for l3 independent conntrack
-nf_conntrack_ipv4-y := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
-
-# connection tracking
-obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
-
nf_nat_ipv4-y := nf_nat_l3proto_ipv4.o nf_nat_proto_icmp.o
nf_nat_ipv4-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o
obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
deleted file mode 100644
index 9db988f9a4d7..000000000000
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ /dev/null
@@ -1,472 +0,0 @@
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/icmp.h>
-#include <linux/sysctl.h>
-#include <net/route.h>
-#include <net/ip.h>
-
-#include <linux/netfilter_ipv4.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_zones.h>
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_seqadj.h>
-#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#include <net/netfilter/nf_log.h>
-
-static int conntrack4_net_id __read_mostly;
-static DEFINE_MUTEX(register_ipv4_hooks);
-
-struct conntrack4_net {
- unsigned int users;
-};
-
-static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
- struct nf_conntrack_tuple *tuple)
-{
- const __be32 *ap;
- __be32 _addrs[2];
- ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
- sizeof(u_int32_t) * 2, _addrs);
- if (ap == NULL)
- return false;
-
- tuple->src.u3.ip = ap[0];
- tuple->dst.u3.ip = ap[1];
-
- return true;
-}
-
-static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
-{
- tuple->src.u3.ip = orig->dst.u3.ip;
- tuple->dst.u3.ip = orig->src.u3.ip;
-
- return true;
-}
-
-static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
- unsigned int *dataoff, u_int8_t *protonum)
-{
- const struct iphdr *iph;
- struct iphdr _iph;
-
- iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
- if (iph == NULL)
- return -NF_ACCEPT;
-
- /* Conntrack defragments packets, we might still see fragments
- * inside ICMP packets though. */
- if (iph->frag_off & htons(IP_OFFSET))
- return -NF_ACCEPT;
-
- *dataoff = nhoff + (iph->ihl << 2);
- *protonum = iph->protocol;
-
- /* Check bogus IP headers */
- if (*dataoff > skb->len) {
- pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
- "nhoff %u, ihl %u, skblen %u\n",
- nhoff, iph->ihl << 2, skb->len);
- return -NF_ACCEPT;
- }
-
- return NF_ACCEPT;
-}
-
-static unsigned int ipv4_helper(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- const struct nf_conn_help *help;
- const struct nf_conntrack_helper *helper;
-
- /* This is where we call the helper: as the packet goes out. */
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- return NF_ACCEPT;
-
- help = nfct_help(ct);
- if (!help)
- return NF_ACCEPT;
-
- /* rcu_read_lock()ed by nf_hook_thresh */
- helper = rcu_dereference(help->helper);
- if (!helper)
- return NF_ACCEPT;
-
- return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
- ct, ctinfo);
-}
-
-static unsigned int ipv4_confirm(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
-
- /* adjust seqs for loopback traffic only in outgoing direction */
- if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
- !nf_is_loopback_packet(skb)) {
- if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
- NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
- return NF_DROP;
- }
- }
-out:
- /* We've seen it coming out the other side: confirm it */
- return nf_conntrack_confirm(skb);
-}
-
-static unsigned int ipv4_conntrack_in(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
-}
-
-static unsigned int ipv4_conntrack_local(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */
- enum ip_conntrack_info ctinfo;
- struct nf_conn *tmpl;
-
- tmpl = nf_ct_get(skb, &ctinfo);
- if (tmpl && nf_ct_is_template(tmpl)) {
- /* when skipping ct, clear templates to avoid fooling
- * later targets/matches
- */
- skb->_nfct = 0;
- nf_ct_put(tmpl);
- }
- return NF_ACCEPT;
- }
-
- return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
-}
-
-/* Connection tracking may drop packets, but never alters them, so
- make it the first hook. */
-static const struct nf_hook_ops ipv4_conntrack_ops[] = {
- {
- .hook = ipv4_conntrack_in,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP_PRI_CONNTRACK,
- },
- {
- .hook = ipv4_conntrack_local,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP_PRI_CONNTRACK,
- },
- {
- .hook = ipv4_helper,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP_PRI_CONNTRACK_HELPER,
- },
- {
- .hook = ipv4_confirm,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
- },
- {
- .hook = ipv4_helper,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP_PRI_CONNTRACK_HELPER,
- },
- {
- .hook = ipv4_confirm,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
- },
-};
-
-/* Fast function for those who don't want to parse /proc (and I don't
- blame them). */
-/* Reversing the socket's dst/src point of view gives us the reply
- mapping. */
-static int
-getorigdst(struct sock *sk, int optval, void __user *user, int *len)
-{
- const struct inet_sock *inet = inet_sk(sk);
- const struct nf_conntrack_tuple_hash *h;
- struct nf_conntrack_tuple tuple;
-
- memset(&tuple, 0, sizeof(tuple));
-
- lock_sock(sk);
- tuple.src.u3.ip = inet->inet_rcv_saddr;
- tuple.src.u.tcp.port = inet->inet_sport;
- tuple.dst.u3.ip = inet->inet_daddr;
- tuple.dst.u.tcp.port = inet->inet_dport;
- tuple.src.l3num = PF_INET;
- tuple.dst.protonum = sk->sk_protocol;
- release_sock(sk);
-
- /* We only do TCP and SCTP at the moment: is there a better way? */
- if (tuple.dst.protonum != IPPROTO_TCP &&
- tuple.dst.protonum != IPPROTO_SCTP) {
- pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
- return -ENOPROTOOPT;
- }
-
- if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
- pr_debug("SO_ORIGINAL_DST: len %d not %zu\n",
- *len, sizeof(struct sockaddr_in));
- return -EINVAL;
- }
-
- h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
- if (h) {
- struct sockaddr_in sin;
- struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
-
- sin.sin_family = AF_INET;
- sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
- .tuple.dst.u.tcp.port;
- sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
- .tuple.dst.u3.ip;
- memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
-
- pr_debug("SO_ORIGINAL_DST: %pI4 %u\n",
- &sin.sin_addr.s_addr, ntohs(sin.sin_port));
- nf_ct_put(ct);
- if (copy_to_user(user, &sin, sizeof(sin)) != 0)
- return -EFAULT;
- else
- return 0;
- }
- pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n",
- &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port),
- &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port));
- return -ENOENT;
-}
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
- const struct nf_conntrack_tuple *tuple)
-{
- if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
- nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
-static const struct nla_policy ipv4_nla_policy[CTA_IP_MAX+1] = {
- [CTA_IP_V4_SRC] = { .type = NLA_U32 },
- [CTA_IP_V4_DST] = { .type = NLA_U32 },
-};
-
-static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
- struct nf_conntrack_tuple *t)
-{
- if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
- return -EINVAL;
-
- t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
- t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
-
- return 0;
-}
-#endif
-
-static struct nf_sockopt_ops so_getorigdst = {
- .pf = PF_INET,
- .get_optmin = SO_ORIGINAL_DST,
- .get_optmax = SO_ORIGINAL_DST+1,
- .get = getorigdst,
- .owner = THIS_MODULE,
-};
-
-static int ipv4_hooks_register(struct net *net)
-{
- struct conntrack4_net *cnet = net_generic(net, conntrack4_net_id);
- int err = 0;
-
- mutex_lock(&register_ipv4_hooks);
-
- cnet->users++;
- if (cnet->users > 1)
- goto out_unlock;
-
- err = nf_defrag_ipv4_enable(net);
- if (err) {
- cnet->users = 0;
- goto out_unlock;
- }
-
- err = nf_register_net_hooks(net, ipv4_conntrack_ops,
- ARRAY_SIZE(ipv4_conntrack_ops));
-
- if (err)
- cnet->users = 0;
- out_unlock:
- mutex_unlock(&register_ipv4_hooks);
- return err;
-}
-
-static void ipv4_hooks_unregister(struct net *net)
-{
- struct conntrack4_net *cnet = net_generic(net, conntrack4_net_id);
-
- mutex_lock(&register_ipv4_hooks);
- if (cnet->users && (--cnet->users == 0))
- nf_unregister_net_hooks(net, ipv4_conntrack_ops,
- ARRAY_SIZE(ipv4_conntrack_ops));
- mutex_unlock(&register_ipv4_hooks);
-}
-
-const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = {
- .l3proto = PF_INET,
- .pkt_to_tuple = ipv4_pkt_to_tuple,
- .invert_tuple = ipv4_invert_tuple,
- .get_l4proto = ipv4_get_l4proto,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .tuple_to_nlattr = ipv4_tuple_to_nlattr,
- .nlattr_to_tuple = ipv4_nlattr_to_tuple,
- .nla_policy = ipv4_nla_policy,
- .nla_size = NLA_ALIGN(NLA_HDRLEN + sizeof(u32)) + /* CTA_IP_V4_SRC */
- NLA_ALIGN(NLA_HDRLEN + sizeof(u32)), /* CTA_IP_V4_DST */
-#endif
- .net_ns_get = ipv4_hooks_register,
- .net_ns_put = ipv4_hooks_unregister,
- .me = THIS_MODULE,
-};
-
-module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
- &nf_conntrack_htable_size, 0600);
-
-MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
-MODULE_ALIAS("ip_conntrack");
-MODULE_LICENSE("GPL");
-
-static const struct nf_conntrack_l4proto * const builtin_l4proto4[] = {
- &nf_conntrack_l4proto_tcp4,
- &nf_conntrack_l4proto_udp4,
- &nf_conntrack_l4proto_icmp,
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- &nf_conntrack_l4proto_dccp4,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_SCTP
- &nf_conntrack_l4proto_sctp4,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
- &nf_conntrack_l4proto_udplite4,
-#endif
-};
-
-static int ipv4_net_init(struct net *net)
-{
- return nf_ct_l4proto_pernet_register(net, builtin_l4proto4,
- ARRAY_SIZE(builtin_l4proto4));
-}
-
-static void ipv4_net_exit(struct net *net)
-{
- nf_ct_l4proto_pernet_unregister(net, builtin_l4proto4,
- ARRAY_SIZE(builtin_l4proto4));
-}
-
-static struct pernet_operations ipv4_net_ops = {
- .init = ipv4_net_init,
- .exit = ipv4_net_exit,
- .id = &conntrack4_net_id,
- .size = sizeof(struct conntrack4_net),
-};
-
-static int __init nf_conntrack_l3proto_ipv4_init(void)
-{
- int ret = 0;
-
- need_conntrack();
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- if (WARN_ON(nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1) !=
- nf_conntrack_l3proto_ipv4.nla_size))
- return -EINVAL;
-#endif
- ret = nf_register_sockopt(&so_getorigdst);
- if (ret < 0) {
- pr_err("Unable to register netfilter socket option\n");
- return ret;
- }
-
- ret = register_pernet_subsys(&ipv4_net_ops);
- if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register pernet ops\n");
- goto cleanup_sockopt;
- }
-
- ret = nf_ct_l4proto_register(builtin_l4proto4,
- ARRAY_SIZE(builtin_l4proto4));
- if (ret < 0)
- goto cleanup_pernet;
-
- ret = nf_ct_l3proto_register(&nf_conntrack_l3proto_ipv4);
- if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register ipv4 proto.\n");
- goto cleanup_l4proto;
- }
-
- return ret;
-cleanup_l4proto:
- nf_ct_l4proto_unregister(builtin_l4proto4,
- ARRAY_SIZE(builtin_l4proto4));
- cleanup_pernet:
- unregister_pernet_subsys(&ipv4_net_ops);
- cleanup_sockopt:
- nf_unregister_sockopt(&so_getorigdst);
- return ret;
-}
-
-static void __exit nf_conntrack_l3proto_ipv4_fini(void)
-{
- synchronize_net();
- nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
- nf_ct_l4proto_unregister(builtin_l4proto4,
- ARRAY_SIZE(builtin_l4proto4));
- unregister_pernet_subsys(&ipv4_net_ops);
- nf_unregister_sockopt(&so_getorigdst);
-}
-
-module_init(nf_conntrack_l3proto_ipv4_init);
-module_exit(nf_conntrack_l3proto_ipv4_fini);
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
index 4388de0e5380..1e6f28c97d3a 100644
--- a/net/ipv4/netfilter/nf_log_ipv4.c
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
@@ -35,7 +35,7 @@ static const struct nf_loginfo default_loginfo = {
};
/* One level of recursion won't kill us */
-static void dump_ipv4_packet(struct nf_log_buf *m,
+static void dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int iphoff)
{
@@ -183,7 +183,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
/* Max length: 3+maxlen */
if (!iphoff) { /* Only recurse once. */
nf_log_buf_add(m, "[");
- dump_ipv4_packet(m, info, skb,
+ dump_ipv4_packet(net, m, info, skb,
iphoff + ih->ihl*4+sizeof(_icmph));
nf_log_buf_add(m, "] ");
}
@@ -251,7 +251,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
/* Max length: 15 "UID=4294967295 " */
if ((logflags & NF_LOG_UID) && !iphoff)
- nf_log_dump_sk_uid_gid(m, skb->sk);
+ nf_log_dump_sk_uid_gid(net, m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!iphoff && skb->mark)
@@ -333,7 +333,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
if (in != NULL)
dump_ipv4_mac_header(m, loginfo, skb);
- dump_ipv4_packet(m, loginfo, skb, 0);
+ dump_ipv4_packet(net, m, loginfo, skb, 0);
nf_log_buf_close(m);
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2ed64bca54e3..8d7aaf118a30 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -320,8 +320,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
chk_addr_ret = RTN_LOCAL;
- if ((net->ipv4.sysctl_ip_nonlocal_bind == 0 &&
- isk->freebind == 0 && isk->transparent == 0 &&
+ if ((!inet_can_nonlocal_bind(net, isk) &&
chk_addr_ret != RTN_LOCAL) ||
chk_addr_ret == RTN_MULTICAST ||
chk_addr_ret == RTN_BROADCAST)
@@ -361,8 +360,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
scoped);
rcu_read_unlock();
- if (!(net->ipv6.sysctl.ip_nonlocal_bind ||
- isk->freebind || isk->transparent || has_addr ||
+ if (!(ipv6_can_nonlocal_bind(net, isk) || has_addr ||
addr_type == IPV6_ADDR_ANY))
return -EADDRNOTAVAIL;
@@ -739,13 +737,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
/* no remote port */
}
- ipc.sockc.tsflags = sk->sk_tsflags;
- ipc.addr = inet->inet_saddr;
- ipc.opt = NULL;
- ipc.oif = sk->sk_bound_dev_if;
- ipc.tx_flags = 0;
- ipc.ttl = 0;
- ipc.tos = -1;
+ ipcm_init_sk(&ipc, inet);
if (msg->msg_controllen) {
err = ip_cmsg_send(sk, msg, &ipc, false);
@@ -769,8 +761,6 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
rcu_read_unlock();
}
- sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
-
saddr = ipc.addr;
ipc.addr = faddr = daddr;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 77350c1256ce..70289682a670 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -119,6 +119,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
+ SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
SNMP_MIB_SENTINEL
};
@@ -287,6 +288,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPDelivered", LINUX_MIB_TCPDELIVERED),
SNMP_MIB_ITEM("TCPDeliveredCE", LINUX_MIB_TCPDELIVEREDCE),
SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED),
+ SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP),
+ SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index abb3c9490c55..33df4d76db2d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -381,6 +381,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ skb->tstamp = sockc->transmit_time;
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
@@ -561,13 +562,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
daddr = inet->inet_daddr;
}
- ipc.sockc.tsflags = sk->sk_tsflags;
- ipc.addr = inet->inet_saddr;
- ipc.opt = NULL;
- ipc.tx_flags = 0;
- ipc.ttl = 0;
- ipc.tos = -1;
- ipc.oif = sk->sk_bound_dev_if;
+ ipcm_init_sk(&ipc, inet);
if (msg->msg_controllen) {
err = ip_cmsg_send(sk, msg, &ipc, false);
@@ -670,8 +665,6 @@ back_from_confirm:
&rt, msg->msg_flags, &ipc.sockc);
else {
- sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
-
if (!ipc.addr)
ipc.addr = fl4.daddr;
lock_sock(sk);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 1df6e97106d7..b678466da451 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1996,8 +1996,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
goto no_route;
}
- if (res->type == RTN_BROADCAST)
+ if (res->type == RTN_BROADCAST) {
+ if (IN_DEV_BFORWARD(in_dev))
+ goto make_route;
goto brd_input;
+ }
if (res->type == RTN_LOCAL) {
err = fib_validate_source(skb, saddr, daddr, tos,
@@ -2014,6 +2017,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (res->type != RTN_UNICAST)
goto martian_destination;
+make_route:
err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
out: return err;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 5fa335fd3852..b92f422f2fa8 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -201,6 +201,23 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
return ret;
}
+static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct net *net;
+ int ret;
+
+ net = container_of(table->data, struct net,
+ ipv4.sysctl_ip_fwd_update_priority);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ call_netevent_notifiers(NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE,
+ net);
+
+ return ret;
+}
+
static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -664,6 +681,15 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "ip_forward_update_priority",
+ .data = &init_net.ipv4.sysctl_ip_fwd_update_priority,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = ipv4_fwd_update_priority,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
.procname = "ip_nonlocal_bind",
.data = &init_net.ipv4.sysctl_ip_nonlocal_bind,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4491faf83f4f..b8af2fec5ad5 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -507,7 +507,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
const struct tcp_sock *tp = tcp_sk(sk);
int state;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
@@ -817,8 +817,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
* This occurs when user tries to read
* from never connected socket.
*/
- if (!sock_flag(sk, SOCK_DONE))
- ret = -ENOTCONN;
+ ret = -ENOTCONN;
break;
}
if (!timeo) {
@@ -1241,7 +1240,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
/* 'common' sending to sendq */
}
- sockc.tsflags = sk->sk_tsflags;
+ sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
if (unlikely(err)) {
@@ -1275,9 +1274,6 @@ restart:
int linear;
new_segment:
- /* Allocate new segment. If the interface is SG,
- * allocate skb fitting to single page.
- */
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
@@ -2042,13 +2038,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
break;
if (sk->sk_state == TCP_CLOSE) {
- if (!sock_flag(sk, SOCK_DONE)) {
- /* This occurs when user tries to read
- * from never connected socket.
- */
- copied = -ENOTCONN;
- break;
- }
+ /* This occurs when user tries to read
+ * from never connected socket.
+ */
+ copied = -ENOTCONN;
break;
}
@@ -2538,7 +2531,6 @@ int tcp_disconnect(struct sock *sk, int flags)
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- int err = 0;
int old_state = sk->sk_state;
if (old_state != TCP_CLOSE)
@@ -2576,6 +2568,7 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->srtt_us = 0;
+ tp->rcv_rtt_last_tsecr = 0;
tp->write_seq += tp->max_window + 2;
if (tp->write_seq == 0)
tp->write_seq = 1;
@@ -2600,6 +2593,10 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_rx_dst = NULL;
tcp_saved_syn_free(tp);
tp->compressed_ack = 0;
+ tp->bytes_sent = 0;
+ tp->bytes_retrans = 0;
+ tp->dsack_dups = 0;
+ tp->reord_seen = 0;
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
@@ -2614,7 +2611,7 @@ int tcp_disconnect(struct sock *sk, int flags)
}
sk->sk_error_report(sk);
- return err;
+ return 0;
}
EXPORT_SYMBOL(tcp_disconnect);
@@ -2995,7 +2992,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
if (val < 0)
err = -EINVAL;
else
- icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ icsk->icsk_user_timeout = val;
break;
case TCP_FASTOPEN:
@@ -3207,10 +3204,41 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_delivery_rate = rate64;
info->tcpi_delivered = tp->delivered;
info->tcpi_delivered_ce = tp->delivered_ce;
+ info->tcpi_bytes_sent = tp->bytes_sent;
+ info->tcpi_bytes_retrans = tp->bytes_retrans;
+ info->tcpi_dsack_dups = tp->dsack_dups;
+ info->tcpi_reord_seen = tp->reord_seen;
unlock_sock_fast(sk, slow);
}
EXPORT_SYMBOL_GPL(tcp_get_info);
+static size_t tcp_opt_stats_get_size(void)
+{
+ return
+ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */
+ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */
+ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */
+ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */
+ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */
+ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */
+ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */
+ nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */
+ nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */
+ nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */
+ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */
+ nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
+ 0;
+}
+
struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -3219,9 +3247,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
u64 rate64;
u32 rate;
- stats = alloc_skb(7 * nla_total_size_64bit(sizeof(u64)) +
- 7 * nla_total_size(sizeof(u32)) +
- 3 * nla_total_size(sizeof(u8)), GFP_ATOMIC);
+ stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC);
if (!stats)
return NULL;
@@ -3257,6 +3283,13 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
+ nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent,
+ TCP_NLA_PAD);
+ nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans,
+ TCP_NLA_PAD);
+ nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
+ nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
+
return stats;
}
@@ -3451,7 +3484,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break;
case TCP_USER_TIMEOUT:
- val = jiffies_to_msecs(icsk->icsk_user_timeout);
+ val = icsk->icsk_user_timeout;
break;
case TCP_FASTOPEN:
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 4bfff3c87e8e..13d34427ca3d 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -205,7 +205,11 @@ static u32 bbr_bw(const struct sock *sk)
*/
static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
{
- rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache);
+ unsigned int mss = tcp_sk(sk)->mss_cache;
+
+ if (!tcp_needs_internal_pacing(sk))
+ mss = tcp_mss_to_mtu(sk, mss);
+ rate *= mss;
rate *= gain;
rate >>= BBR_SCALE;
rate *= USEC_PER_SEC;
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 8b637f9f23a2..ca61e2a659e7 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -136,7 +136,7 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
*/
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
__tcp_send_ack(sk, ca->prior_rcv_nxt);
- tcp_enter_quickack_mode(sk, 1);
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -157,7 +157,7 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
*/
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
__tcp_send_ack(sk, ca->prior_rcv_nxt);
- tcp_enter_quickack_mode(sk, 1);
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
ca->prior_rcv_nxt = tp->rcv_nxt;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f9dcb29be12d..4c2dd9f863f7 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -78,6 +78,7 @@
#include <linux/errqueue.h>
#include <trace/events/tcp.h>
#include <linux/static_key.h>
+#include <net/busy_poll.h>
int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
@@ -244,16 +245,16 @@ static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
}
-static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
+static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
{
if (tcp_hdr(skb)->cwr) {
- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+ tcp_sk(sk)->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
/* If the sender is telling us it has entered CWR, then its
* cwnd may be very low (even just 1 packet), so we should ACK
* immediately.
*/
- tcp_enter_quickack_mode((struct sock *)tp, 2);
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
}
@@ -590,9 +591,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
{
struct tcp_sock *tp = tcp_sk(sk);
- if (tp->rx_opt.rcv_tsecr &&
- (TCP_SKB_CB(skb)->end_seq -
- TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
+ if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr)
+ return;
+ tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
+
+ if (TCP_SKB_CB(skb)->end_seq -
+ TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
u32 delta_us;
@@ -877,6 +881,7 @@ static void tcp_dsack_seen(struct tcp_sock *tp)
{
tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
tp->rack.dsack_seen = 1;
+ tp->dsack_dups++;
}
/* It's reordering when higher sequence was delivered (i.e. sacked) before
@@ -908,8 +913,8 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
}
- tp->rack.reord = 1;
/* This exciting event is worth to be remembered. 8) */
+ tp->reord_seen++;
NET_INC_STATS(sock_net(sk),
ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER);
}
@@ -1873,6 +1878,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
tp->reordering = min_t(u32, tp->packets_out + addend,
sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
+ tp->reord_seen++;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
}
@@ -3466,7 +3472,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
static void tcp_store_ts_recent(struct tcp_sock *tp)
{
tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
- tp->rx_opt.ts_recent_stamp = get_seconds();
+ tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
}
static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
@@ -4347,6 +4353,11 @@ static bool tcp_try_coalesce(struct sock *sk,
if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
return false;
+#ifdef CONFIG_TLS_DEVICE
+ if (from->decrypted != to->decrypted)
+ return false;
+#endif
+
if (!skb_try_coalesce(to, from, fragstolen, &delta))
return false;
@@ -4642,8 +4653,10 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
skb->data_len = data_len;
skb->len = size;
- if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+ if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
goto err_free;
+ }
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
if (err)
@@ -4690,7 +4703,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
- tcp_ecn_accept_cwr(tp, skb);
+ tcp_ecn_accept_cwr(sk, skb);
tp->rx_opt.dsack = 0;
@@ -4699,18 +4712,21 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
* Out of sequence packets to the out_of_order_queue.
*/
if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
- if (tcp_receive_window(tp) == 0)
+ if (tcp_receive_window(tp) == 0) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
goto out_of_window;
+ }
/* Ok. In sequence. In window. */
queue_and_out:
if (skb_queue_len(&sk->sk_receive_queue) == 0)
sk_forced_mem_schedule(sk, skb->truesize);
- else if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+ else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
goto drop;
+ }
eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
- tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
if (skb->len)
tcp_event_data_recv(sk, skb);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -4719,11 +4735,11 @@ queue_and_out:
if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk);
- /* RFC2581. 4.2. SHOULD send immediate ACK, when
+ /* RFC5681. 4.2. SHOULD send immediate ACK, when
* gap in queue is filled.
*/
if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
- inet_csk(sk)->icsk_ack.pingpong = 0;
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
if (tp->rx_opt.num_sacks)
@@ -4766,8 +4782,10 @@ drop:
/* If window is closed, drop tail of packet. But after
* remembering D-SACK for its head made in previous line.
*/
- if (!tcp_receive_window(tp))
+ if (!tcp_receive_window(tp)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
goto out_of_window;
+ }
goto queue_and_out;
}
@@ -4885,6 +4903,9 @@ restart:
break;
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
+#ifdef CONFIG_TLS_DEVICE
+ nskb->decrypted = skb->decrypted;
+#endif
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
if (list)
__skb_queue_before(list, skb, nskb);
@@ -4912,6 +4933,10 @@ restart:
skb == tail ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
goto end;
+#ifdef CONFIG_TLS_DEVICE
+ if (skb->decrypted != nskb->decrypted)
+ goto end;
+#endif
}
}
}
@@ -5154,7 +5179,9 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
(tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
__tcp_select_window(sk) >= tp->rcv_wnd)) ||
/* We ACK each frame or... */
- tcp_in_quickack_mode(sk)) {
+ tcp_in_quickack_mode(sk) ||
+ /* Protocol state mandates a one-time immediate ACK */
+ inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) {
send_now:
tcp_send_ack(sk);
return;
@@ -5530,6 +5557,11 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
tcp_ack(sk, skb, 0);
__kfree_skb(skb);
tcp_data_snd_check(sk);
+ /* When receiving pure ack in fast path, update
+ * last ts ecr directly instead of calling
+ * tcp_rcv_rtt_measure_ts()
+ */
+ tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
return;
} else { /* Header too small */
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
@@ -5631,6 +5663,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
if (skb) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
security_inet_conn_established(sk, skb);
+ sk_mark_napi_id(sk, skb);
}
tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB);
@@ -6459,6 +6492,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tcp_rsk(req)->snt_isn = isn;
tcp_rsk(req)->txhash = net_tx_rndhash();
tcp_openreq_init_rwin(req, sk, dst);
+ sk_rx_queue_set(req_to_sk(req), skb);
if (!want_cookie) {
tcp_reqsk_record_syn(sk, req, skb);
fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3b2711e33e4c..9e041fa5c545 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -155,7 +155,8 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
and use initial timestamp retrieved from peer table.
*/
if (tcptw->tw_ts_recent_stamp &&
- (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
+ (!twp || (reuse && time_after32(ktime_get_seconds(),
+ tcptw->tw_ts_recent_stamp)))) {
/* In case of repair and re-using TIME-WAIT sockets we still
* want to be sure that it is safe as above but honor the
* sequence numbers and time stamps set as part of the repair
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 1dda1341a223..75ef332a7caf 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -144,7 +144,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
tw->tw_substate = TCP_TIME_WAIT;
tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tmp_opt.saw_tstamp) {
- tcptw->tw_ts_recent_stamp = get_seconds();
+ tcptw->tw_ts_recent_stamp = ktime_get_seconds();
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
}
@@ -189,7 +189,7 @@ kill:
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
- tcptw->tw_ts_recent_stamp = get_seconds();
+ tcptw->tw_ts_recent_stamp = ktime_get_seconds();
}
inet_twsk_put(tw);
@@ -449,119 +449,122 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
struct sk_buff *skb)
{
struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
+ const struct inet_request_sock *ireq = inet_rsk(req);
+ struct tcp_request_sock *treq = tcp_rsk(req);
+ struct inet_connection_sock *newicsk;
+ struct tcp_sock *oldtp, *newtp;
- if (newsk) {
- const struct inet_request_sock *ireq = inet_rsk(req);
- struct tcp_request_sock *treq = tcp_rsk(req);
- struct inet_connection_sock *newicsk = inet_csk(newsk);
- struct tcp_sock *newtp = tcp_sk(newsk);
- struct tcp_sock *oldtp = tcp_sk(sk);
-
- smc_check_reset_syn_req(oldtp, req, newtp);
-
- /* Now setup tcp_sock */
- newtp->pred_flags = 0;
-
- newtp->rcv_wup = newtp->copied_seq =
- newtp->rcv_nxt = treq->rcv_isn + 1;
- newtp->segs_in = 1;
-
- newtp->snd_sml = newtp->snd_una =
- newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
-
- INIT_LIST_HEAD(&newtp->tsq_node);
- INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
-
- tcp_init_wl(newtp, treq->rcv_isn);
-
- newtp->srtt_us = 0;
- newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
- minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
- newicsk->icsk_rto = TCP_TIMEOUT_INIT;
- newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
-
- newtp->packets_out = 0;
- newtp->retrans_out = 0;
- newtp->sacked_out = 0;
- newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- newtp->tlp_high_seq = 0;
- newtp->lsndtime = tcp_jiffies32;
- newsk->sk_txhash = treq->txhash;
- newtp->last_oow_ack_time = 0;
- newtp->total_retrans = req->num_retrans;
-
- /* So many TCP implementations out there (incorrectly) count the
- * initial SYN frame in their delayed-ACK and congestion control
- * algorithms that we must have the following bandaid to talk
- * efficiently to them. -DaveM
- */
- newtp->snd_cwnd = TCP_INIT_CWND;
- newtp->snd_cwnd_cnt = 0;
-
- /* There's a bubble in the pipe until at least the first ACK. */
- newtp->app_limited = ~0U;
-
- tcp_init_xmit_timers(newsk);
- newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
-
- newtp->rx_opt.saw_tstamp = 0;
-
- newtp->rx_opt.dsack = 0;
- newtp->rx_opt.num_sacks = 0;
-
- newtp->urg_data = 0;
-
- if (sock_flag(newsk, SOCK_KEEPOPEN))
- inet_csk_reset_keepalive_timer(newsk,
- keepalive_time_when(newtp));
-
- newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
- newtp->rx_opt.sack_ok = ireq->sack_ok;
- newtp->window_clamp = req->rsk_window_clamp;
- newtp->rcv_ssthresh = req->rsk_rcv_wnd;
- newtp->rcv_wnd = req->rsk_rcv_wnd;
- newtp->rx_opt.wscale_ok = ireq->wscale_ok;
- if (newtp->rx_opt.wscale_ok) {
- newtp->rx_opt.snd_wscale = ireq->snd_wscale;
- newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
- } else {
- newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
- newtp->window_clamp = min(newtp->window_clamp, 65535U);
- }
- newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
- newtp->rx_opt.snd_wscale);
- newtp->max_window = newtp->snd_wnd;
-
- if (newtp->rx_opt.tstamp_ok) {
- newtp->rx_opt.ts_recent = req->ts_recent;
- newtp->rx_opt.ts_recent_stamp = get_seconds();
- newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
- } else {
- newtp->rx_opt.ts_recent_stamp = 0;
- newtp->tcp_header_len = sizeof(struct tcphdr);
- }
- newtp->tsoffset = treq->ts_off;
+ if (!newsk)
+ return NULL;
+
+ newicsk = inet_csk(newsk);
+ newtp = tcp_sk(newsk);
+ oldtp = tcp_sk(sk);
+
+ smc_check_reset_syn_req(oldtp, req, newtp);
+
+ /* Now setup tcp_sock */
+ newtp->pred_flags = 0;
+
+ newtp->rcv_wup = newtp->copied_seq =
+ newtp->rcv_nxt = treq->rcv_isn + 1;
+ newtp->segs_in = 1;
+
+ newtp->snd_sml = newtp->snd_una =
+ newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
+
+ INIT_LIST_HEAD(&newtp->tsq_node);
+ INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
+
+ tcp_init_wl(newtp, treq->rcv_isn);
+
+ newtp->srtt_us = 0;
+ newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+ minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
+ newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+ newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
+
+ newtp->packets_out = 0;
+ newtp->retrans_out = 0;
+ newtp->sacked_out = 0;
+ newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ newtp->tlp_high_seq = 0;
+ newtp->lsndtime = tcp_jiffies32;
+ newsk->sk_txhash = treq->txhash;
+ newtp->last_oow_ack_time = 0;
+ newtp->total_retrans = req->num_retrans;
+
+ /* So many TCP implementations out there (incorrectly) count the
+ * initial SYN frame in their delayed-ACK and congestion control
+ * algorithms that we must have the following bandaid to talk
+ * efficiently to them. -DaveM
+ */
+ newtp->snd_cwnd = TCP_INIT_CWND;
+ newtp->snd_cwnd_cnt = 0;
+
+ /* There's a bubble in the pipe until at least the first ACK. */
+ newtp->app_limited = ~0U;
+
+ tcp_init_xmit_timers(newsk);
+ newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
+
+ newtp->rx_opt.saw_tstamp = 0;
+
+ newtp->rx_opt.dsack = 0;
+ newtp->rx_opt.num_sacks = 0;
+
+ newtp->urg_data = 0;
+
+ if (sock_flag(newsk, SOCK_KEEPOPEN))
+ inet_csk_reset_keepalive_timer(newsk,
+ keepalive_time_when(newtp));
+
+ newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
+ newtp->rx_opt.sack_ok = ireq->sack_ok;
+ newtp->window_clamp = req->rsk_window_clamp;
+ newtp->rcv_ssthresh = req->rsk_rcv_wnd;
+ newtp->rcv_wnd = req->rsk_rcv_wnd;
+ newtp->rx_opt.wscale_ok = ireq->wscale_ok;
+ if (newtp->rx_opt.wscale_ok) {
+ newtp->rx_opt.snd_wscale = ireq->snd_wscale;
+ newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
+ } else {
+ newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
+ newtp->window_clamp = min(newtp->window_clamp, 65535U);
+ }
+ newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
+ newtp->max_window = newtp->snd_wnd;
+
+ if (newtp->rx_opt.tstamp_ok) {
+ newtp->rx_opt.ts_recent = req->ts_recent;
+ newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
+ newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
+ } else {
+ newtp->rx_opt.ts_recent_stamp = 0;
+ newtp->tcp_header_len = sizeof(struct tcphdr);
+ }
+ newtp->tsoffset = treq->ts_off;
#ifdef CONFIG_TCP_MD5SIG
- newtp->md5sig_info = NULL; /*XXX*/
- if (newtp->af_specific->md5_lookup(sk, newsk))
- newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+ newtp->md5sig_info = NULL; /*XXX*/
+ if (newtp->af_specific->md5_lookup(sk, newsk))
+ newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
- if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
- newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
- newtp->rx_opt.mss_clamp = req->mss;
- tcp_ecn_openreq_child(newtp, req);
- newtp->fastopen_req = NULL;
- newtp->fastopen_rsk = NULL;
- newtp->syn_data_acked = 0;
- newtp->rack.mstamp = 0;
- newtp->rack.advanced = 0;
- newtp->rack.reo_wnd_steps = 1;
- newtp->rack.last_delivered = 0;
- newtp->rack.reo_wnd_persist = 0;
- newtp->rack.dsack_seen = 0;
-
- __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
- }
+ if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
+ newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
+ newtp->rx_opt.mss_clamp = req->mss;
+ tcp_ecn_openreq_child(newtp, req);
+ newtp->fastopen_req = NULL;
+ newtp->fastopen_rsk = NULL;
+ newtp->syn_data_acked = 0;
+ newtp->rack.mstamp = 0;
+ newtp->rack.advanced = 0;
+ newtp->rack.reo_wnd_steps = 1;
+ newtp->rack.last_delivered = 0;
+ newtp->rack.reo_wnd_persist = 0;
+ newtp->rack.dsack_seen = 0;
+
+ __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+
return newsk;
}
EXPORT_SYMBOL(tcp_create_openreq_child);
@@ -600,7 +603,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* it can be estimated (approximately)
* from another data.
*/
- tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
+ tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 8cc7c3487330..870b0a335061 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -180,9 +180,9 @@ out:
return segs;
}
-struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
{
- struct sk_buff **pp = NULL;
+ struct sk_buff *pp = NULL;
struct sk_buff *p;
struct tcphdr *th;
struct tcphdr *th2;
@@ -220,7 +220,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
len = skb_gro_len(skb);
flags = tcp_flag_word(th);
- for (; (p = *head); head = &p->next) {
+ list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -233,7 +233,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
goto found;
}
-
+ p = NULL;
goto out_check_final;
found:
@@ -262,8 +262,11 @@ found:
flush |= (len - 1) >= mss;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
+#ifdef CONFIG_TLS_DEVICE
+ flush |= p->decrypted ^ skb->decrypted;
+#endif
- if (flush || skb_gro_receive(head, skb)) {
+ if (flush || skb_gro_receive(p, skb)) {
mss = 1;
goto out_check_final;
}
@@ -277,7 +280,7 @@ out_check_final:
TCP_FLAG_FIN));
if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
- pp = head;
+ pp = p;
out:
NAPI_GRO_CB(skb)->flush |= (flush != 0);
@@ -302,7 +305,7 @@ int tcp_gro_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_gro_complete);
-static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+static struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c4172c1fb198..597dbd749f05 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -977,17 +977,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-/* BBR congestion control needs pacing.
- * Same remark for SO_MAX_PACING_RATE.
- * sch_fq packet scheduler is efficiently handling pacing,
- * but is not always installed/used.
- * Return true if TCP stack should pace packets itself.
- */
-static bool tcp_needs_internal_pacing(const struct sock *sk)
-{
- return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
-}
-
static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
{
u64 len_ns;
@@ -999,9 +988,6 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
if (!rate || rate == ~0U)
return;
- /* Should account for header sizes as sch_fq does,
- * but lets make things simple.
- */
len_ns = (u64)skb->len * NSEC_PER_SEC;
do_div(len_ns, rate);
hrtimer_start(&tcp_sk(sk)->pacing_timer,
@@ -1150,6 +1136,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
if (skb->len != tcp_header_size) {
tcp_event_data_sent(tp, sk);
tp->data_segs_out += tcp_skb_pcount(skb);
+ tp->bytes_sent += skb->len - tcp_header_size;
tcp_internal_pacing(sk, skb);
}
@@ -2711,9 +2698,8 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *next_skb = skb_rb_next(skb);
- int skb_size, next_skb_size;
+ int next_skb_size;
- skb_size = skb->len;
next_skb_size = next_skb->len;
BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
@@ -2884,6 +2870,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
tp->total_retrans += segs;
+ tp->bytes_retrans += skb->len;
/* make sure skb->data is aligned on arches that require it
* and check if ack-trimming & collapsing extended the headroom
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index c61240e43923..4dff40dad4dc 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -146,6 +146,10 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
rs->prior_mstamp); /* ack phase */
rs->interval_us = max(snd_us, ack_us);
+ /* Record both segment send and ack receive intervals */
+ rs->snd_interval_us = snd_us;
+ rs->rcv_interval_us = ack_us;
+
/* Normally we expect interval_us >= min-rtt.
* Note that rate may still be over-estimated when a spuriously
* retransmistted skb was first (s)acked because "interval_us"
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 71593e4400ab..c81aadff769b 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -25,7 +25,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (!tp->rack.reord) {
+ if (!tp->reord_seen) {
/* If reordering has not been observed, be aggressive during
* the recovery or starting the recovery by DUPACK threshold.
*/
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 3b3611729928..7fdf222a0bdf 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -22,6 +22,35 @@
#include <linux/gfp.h>
#include <net/tcp.h>
+static u32 tcp_retransmit_stamp(const struct sock *sk)
+{
+ u32 start_ts = tcp_sk(sk)->retrans_stamp;
+
+ if (unlikely(!start_ts)) {
+ struct sk_buff *head = tcp_rtx_queue_head(sk);
+
+ if (!head)
+ return 0;
+ start_ts = tcp_skb_timestamp(head);
+ }
+ return start_ts;
+}
+
+static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 elapsed, start_ts;
+
+ start_ts = tcp_retransmit_stamp(sk);
+ if (!icsk->icsk_user_timeout || !start_ts)
+ return icsk->icsk_rto;
+ elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
+ if (elapsed >= icsk->icsk_user_timeout)
+ return 1; /* user timeout has passed; fire ASAP */
+ else
+ return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed));
+}
+
/**
* tcp_write_err() - close socket and save error info
* @sk: The socket the error has appeared on.
@@ -166,14 +195,9 @@ static bool retransmits_timed_out(struct sock *sk,
if (!inet_csk(sk)->icsk_retransmits)
return false;
- start_ts = tcp_sk(sk)->retrans_stamp;
- if (unlikely(!start_ts)) {
- struct sk_buff *head = tcp_rtx_queue_head(sk);
-
- if (!head)
- return false;
- start_ts = tcp_skb_timestamp(head);
- }
+ start_ts = tcp_retransmit_stamp(sk);
+ if (!start_ts)
+ return false;
if (likely(timeout == 0)) {
linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
@@ -183,8 +207,9 @@ static bool retransmits_timed_out(struct sock *sk,
else
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ timeout = jiffies_to_msecs(timeout);
}
- return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
+ return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout;
}
/* A write timeout has occurred. Process the after effects. */
@@ -337,8 +362,7 @@ static void tcp_probe_timer(struct sock *sk)
if (!start_ts)
skb->skb_mstamp = tp->tcp_mstamp;
else if (icsk->icsk_user_timeout &&
- (s32)(tcp_time_stamp(tp) - start_ts) >
- jiffies_to_msecs(icsk->icsk_user_timeout))
+ (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
@@ -535,7 +559,8 @@ out_reset_timer:
/* Use normal (exponential) backoff */
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
}
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
__sk_dst_reset(sk);
@@ -672,7 +697,7 @@ static void tcp_keepalive_timer (struct timer_list *t)
* to determine when to timeout instead.
*/
if ((icsk->icsk_user_timeout != 0 &&
- elapsed >= icsk->icsk_user_timeout &&
+ elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
icsk->icsk_probes_out > 0) ||
(icsk->icsk_user_timeout == 0 &&
icsk->icsk_probes_out >= keepalive_probes(tp))) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 24e116ddae79..f4e35b2ff8b8 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -221,11 +221,12 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
(sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false)) {
- return reuseport_add_sock(sk, sk2);
+ return reuseport_add_sock(sk, sk2,
+ inet_rcv_saddr_any(sk));
}
}
- return reuseport_alloc(sk);
+ return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
}
/**
@@ -498,6 +499,8 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
daddr, hnum, dif, sdif,
exact_dif, hslot2, skb);
}
+ if (unlikely(IS_ERR(result)))
+ return NULL;
return result;
}
begin:
@@ -512,6 +515,8 @@ begin:
saddr, sport);
result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
+ if (unlikely(IS_ERR(result)))
+ return NULL;
if (result)
return result;
}
@@ -926,11 +931,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
return -EOPNOTSUPP;
- ipc.opt = NULL;
- ipc.tx_flags = 0;
- ipc.ttl = 0;
- ipc.tos = -1;
-
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
fl4 = &inet->cork.fl.u.ip4;
@@ -977,9 +977,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
connected = 1;
}
- ipc.sockc.tsflags = sk->sk_tsflags;
- ipc.addr = inet->inet_saddr;
- ipc.oif = sk->sk_bound_dev_if;
+ ipcm_init_sk(&ipc, inet);
ipc.gso_size = up->gso_size;
if (msg->msg_controllen) {
@@ -1027,8 +1025,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
saddr = ipc.addr;
ipc.addr = faddr = daddr;
- sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
-
if (ipc.opt && ipc.opt->opt.srr) {
if (!daddr) {
err = -EINVAL;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 69c54540d5b4..0c0522b79b43 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -343,10 +343,11 @@ out:
return segs;
}
-struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
- struct udphdr *uh, udp_lookup_t lookup)
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct udphdr *uh, udp_lookup_t lookup)
{
- struct sk_buff *p, **pp = NULL;
+ struct sk_buff *pp = NULL;
+ struct sk_buff *p;
struct udphdr *uh2;
unsigned int off = skb_gro_offset(skb);
int flush = 1;
@@ -371,7 +372,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
unflush:
flush = 0;
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -399,8 +400,8 @@ out:
}
EXPORT_SYMBOL(udp_gro_receive);
-static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *udp4_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
struct udphdr *uh = udp_gro_udphdr(skb);
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index b3885ca22d6f..613282c65a10 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -15,7 +15,7 @@ menuconfig IPV6
Documentation/networking/ipv6.txt and read the HOWTO at
<http://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/>
- To compile this protocol support as a module, choose M here: the
+ To compile this protocol support as a module, choose M here: the
module will be called ipv6.
if IPV6
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f66a1cae3366..2fac4ad74867 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -385,8 +385,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
if (ndev->cnf.stable_secret.initialized)
ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
- else
- ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
ndev->cnf.mtu6 = dev->mtu;
ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@ -5211,7 +5209,9 @@ static inline size_t inet6_ifla6_size(void)
+ nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
+ nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
+ nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
- + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
+ + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
+ + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
+ + 0;
}
static inline size_t inet6_if_nlmsg_size(void)
@@ -5893,32 +5893,31 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
loff_t *ppos)
{
int ret = 0;
- int new_val;
+ u32 new_val;
struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
struct net *net = (struct net *)ctl->extra2;
+ struct ctl_table tmp = {
+ .data = &new_val,
+ .maxlen = sizeof(new_val),
+ .mode = ctl->mode,
+ };
if (!rtnl_trylock())
return restart_syscall();
- ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ new_val = *((u32 *)ctl->data);
- if (write) {
- new_val = *((int *)ctl->data);
+ ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
+ if (ret != 0)
+ goto out;
+ if (write) {
if (check_addr_gen_mode(new_val) < 0) {
ret = -EINVAL;
goto out;
}
- /* request for default */
- if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
- ipv6_devconf_dflt.addr_gen_mode = new_val;
-
- /* request for individual net device */
- } else {
- if (!idev)
- goto out;
-
+ if (idev) {
if (check_stable_privacy(idev, net, new_val) < 0) {
ret = -EINVAL;
goto out;
@@ -5928,7 +5927,21 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
idev->cnf.addr_gen_mode = new_val;
addrconf_dev_config(idev->dev);
}
+ } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
+ struct net_device *dev;
+
+ net->ipv6.devconf_dflt->addr_gen_mode = new_val;
+ for_each_netdev(net, dev) {
+ idev = __in6_dev_get(dev);
+ if (idev &&
+ idev->cnf.addr_gen_mode != new_val) {
+ idev->cnf.addr_gen_mode = new_val;
+ addrconf_dev_config(idev->dev);
+ }
+ }
}
+
+ *((u32 *)ctl->data) = new_val;
}
out:
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 9ed0eae91758..673bba31eb18 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -322,8 +322,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
/* Reproduce AF_INET checks to make the bindings consistent */
v4addr = addr->sin6_addr.s6_addr32[3];
chk_addr_ret = inet_addr_type(net, v4addr);
- if (!net->ipv4.sysctl_ip_nonlocal_bind &&
- !(inet->freebind || inet->transparent) &&
+ if (!inet_can_nonlocal_bind(net, inet) &&
v4addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST &&
@@ -362,8 +361,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
- if (!net->ipv6.sysctl.ip_nonlocal_bind &&
- !(inet->freebind || inet->transparent) &&
+ if (!ipv6_can_nonlocal_bind(net, inet) &&
!ipv6_chk_addr(net, &addr->sin6_addr,
dev, 0)) {
err = -EADDRNOTAVAIL;
@@ -764,6 +762,7 @@ EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
static struct packet_type ipv6_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_IPV6),
.func = ipv6_rcv,
+ .list_func = ipv6_list_rcv,
};
static int __init ipv6_packet_init(void)
@@ -833,6 +832,7 @@ static int __net_init inet6_net_init(struct net *net)
net->ipv6.sysctl.bindv6only = 0;
net->ipv6.sysctl.icmpv6_time = 1*HZ;
+ net->ipv6.sysctl.icmpv6_echo_ignore_all = 0;
net->ipv6.sysctl.flowlabel_consistency = 1;
net->ipv6.sysctl.auto_flowlabels = IP6_DEFAULT_AUTO_FLOW_LABELS;
net->ipv6.sysctl.idgen_retries = 3;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 1a1f876f8e28..1ede7a16a0be 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -739,7 +739,7 @@ EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
struct msghdr *msg, struct flowi6 *fl6,
- struct ipcm6_cookie *ipc6, struct sockcm_cookie *sockc)
+ struct ipcm6_cookie *ipc6)
{
struct in6_pktinfo *src_info;
struct cmsghdr *cmsg;
@@ -758,7 +758,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
}
if (cmsg->cmsg_level == SOL_SOCKET) {
- err = __sock_cmsg_send(sk, msg, cmsg, sockc);
+ err = __sock_cmsg_send(sk, msg, cmsg, &ipc6->sockc);
if (err)
return err;
continue;
@@ -803,7 +803,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
if (addr_type != IPV6_ADDR_ANY) {
int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
- if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) &&
+ if (!ipv6_can_nonlocal_bind(net, inet_sk(sk)) &&
!ipv6_chk_addr_and_flags(net, &src_info->ipi6_addr,
dev, !strict, 0,
IFA_F_TENTATIVE) &&
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 27f59b61f70f..6177e2171171 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -49,8 +49,8 @@ static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
return 0;
}
-static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *esp6_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
int offset = skb_gro_offset(skb);
struct xfrm_offload *xo;
@@ -162,8 +162,7 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
skb->encap_hdr_csum = 1;
- if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
- (x->xso.dev != skb->dev))
+ if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
esp_features = features & ~NETIF_F_CSUM_MASK;
@@ -207,8 +206,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
if (!xo)
return -EINVAL;
- if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
- (x->xso.dev != skb->dev)) {
+ if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
xo->flags |= CRYPTO_FALLBACK;
hw_offload = false;
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ef2505aefc15..c9c53ade55c3 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -92,7 +92,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct net *net = dev_net(skb->dev);
if (type == ICMPV6_PKT_TOOBIG)
- ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
+ ip6_update_pmtu(skb, net, info, skb->dev->ifindex, 0, sock_net_uid(net, NULL));
else if (type == NDISC_REDIRECT)
ip6_redirect(skb, net, skb->dev->ifindex, 0,
sock_net_uid(net, NULL));
@@ -431,7 +431,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
struct icmp6hdr tmp_hdr;
struct flowi6 fl6;
struct icmpv6_msg msg;
- struct sockcm_cookie sockc_unused = {0};
struct ipcm6_cookie ipc6;
int iif = 0;
int addr_type = 0;
@@ -546,7 +545,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
- ipc6.tclass = np->tclass;
+ ipcm6_init_sk(&ipc6, np);
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
@@ -554,8 +553,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
goto out;
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
- ipc6.dontfrag = np->dontfrag;
- ipc6.opt = NULL;
msg.skb = skb;
msg.offset = skb_network_offset(skb);
@@ -576,7 +573,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr),
&ipc6, &fl6, (struct rt6_info *)dst,
- MSG_DONTWAIT, &sockc_unused)) {
+ MSG_DONTWAIT)) {
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
@@ -680,7 +677,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
struct dst_entry *dst;
struct ipcm6_cookie ipc6;
u32 mark = IP6_REPLY_MARK(net, skb->mark);
- struct sockcm_cookie sockc_unused = {0};
saddr = &ipv6_hdr(skb)->daddr;
@@ -727,16 +723,14 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
msg.offset = 0;
msg.type = ICMPV6_ECHO_REPLY;
+ ipcm6_init_sk(&ipc6, np);
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
- ipc6.dontfrag = np->dontfrag;
- ipc6.opt = NULL;
if (ip6_append_data(sk, icmpv6_getfrag, &msg,
skb->len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), &ipc6, &fl6,
- (struct rt6_info *)dst, MSG_DONTWAIT,
- &sockc_unused)) {
+ (struct rt6_info *)dst, MSG_DONTWAIT)) {
__ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
@@ -800,6 +794,7 @@ out:
static int icmpv6_rcv(struct sk_buff *skb)
{
+ struct net *net = dev_net(skb->dev);
struct net_device *dev = skb->dev;
struct inet6_dev *idev = __in6_dev_get(dev);
const struct in6_addr *saddr, *daddr;
@@ -849,7 +844,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
switch (type) {
case ICMPV6_ECHO_REQUEST:
- icmpv6_echo_reply(skb);
+ if (!net->ipv6.sysctl.icmpv6_echo_ignore_all)
+ icmpv6_echo_reply(skb);
break;
case ICMPV6_ECHO_REPLY:
@@ -1110,6 +1106,13 @@ static struct ctl_table ipv6_icmp_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
+ {
+ .procname = "echo_ignore_all",
+ .data = &init_net.ipv6.sysctl.icmpv6_echo_ignore_all,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{ },
};
@@ -1121,9 +1124,10 @@ struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
sizeof(ipv6_icmp_table_template),
GFP_KERNEL);
- if (table)
+ if (table) {
table[0].data = &net->ipv6.sysctl.icmpv6_time;
-
+ table[1].data = &net->ipv6.sysctl.icmpv6_echo_ignore_all;
+ }
return table;
}
#endif
diff --git a/net/ipv6/ila/Makefile b/net/ipv6/ila/Makefile
index 4b32e5921e5c..b7739aba6e68 100644
--- a/net/ipv6/ila/Makefile
+++ b/net/ipv6/ila/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_IPV6_ILA) += ila.o
-ila-objs := ila_common.o ila_lwt.o ila_xlat.o
+ila-objs := ila_main.o ila_common.o ila_lwt.o ila_xlat.o
diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
index 3c7a11b62334..1f747bcbec29 100644
--- a/net/ipv6/ila/ila.h
+++ b/net/ipv6/ila/ila.h
@@ -19,6 +19,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/checksum.h>
+#include <net/genetlink.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <uapi/linux/ila.h>
@@ -104,9 +105,31 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
void ila_init_saved_csum(struct ila_params *p);
+struct ila_net {
+ struct {
+ struct rhashtable rhash_table;
+ spinlock_t *locks; /* Bucket locks for entry manipulation */
+ unsigned int locks_mask;
+ bool hooks_registered;
+ } xlat;
+};
+
int ila_lwt_init(void);
void ila_lwt_fini(void);
-int ila_xlat_init(void);
-void ila_xlat_fini(void);
+
+int ila_xlat_init_net(struct net *net);
+void ila_xlat_exit_net(struct net *net);
+
+int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
+int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info);
+int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info);
+int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info);
+int ila_xlat_nl_dump_start(struct netlink_callback *cb);
+int ila_xlat_nl_dump_done(struct netlink_callback *cb);
+int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb);
+
+extern unsigned int ila_net_id;
+
+extern struct genl_family ila_nl_family;
#endif /* __ILA_H */
diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c
index 8c88ecf29b93..95e9146918cc 100644
--- a/net/ipv6/ila/ila_common.c
+++ b/net/ipv6/ila/ila_common.c
@@ -153,34 +153,3 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
/* Now change destination address */
iaddr->loc = p->locator;
}
-
-static int __init ila_init(void)
-{
- int ret;
-
- ret = ila_lwt_init();
-
- if (ret)
- goto fail_lwt;
-
- ret = ila_xlat_init();
- if (ret)
- goto fail_xlat;
-
- return 0;
-fail_xlat:
- ila_lwt_fini();
-fail_lwt:
- return ret;
-}
-
-static void __exit ila_fini(void)
-{
- ila_xlat_fini();
- ila_lwt_fini();
-}
-
-module_init(ila_init);
-module_exit(ila_fini);
-MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
-MODULE_LICENSE("GPL");
diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c
new file mode 100644
index 000000000000..18fac76b9520
--- /dev/null
+++ b/net/ipv6/ila/ila_main.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <net/genetlink.h>
+#include <net/ila.h>
+#include <net/netns/generic.h>
+#include <uapi/linux/genetlink.h>
+#include "ila.h"
+
+static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+ [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+ [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, },
+ [ILA_ATTR_IFINDEX] = { .type = NLA_U32, },
+ [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
+ [ILA_ATTR_IDENT_TYPE] = { .type = NLA_U8, },
+};
+
+static const struct genl_ops ila_nl_ops[] = {
+ {
+ .cmd = ILA_CMD_ADD,
+ .doit = ila_xlat_nl_cmd_add_mapping,
+ .policy = ila_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = ILA_CMD_DEL,
+ .doit = ila_xlat_nl_cmd_del_mapping,
+ .policy = ila_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = ILA_CMD_FLUSH,
+ .doit = ila_xlat_nl_cmd_flush,
+ .policy = ila_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = ILA_CMD_GET,
+ .doit = ila_xlat_nl_cmd_get_mapping,
+ .start = ila_xlat_nl_dump_start,
+ .dumpit = ila_xlat_nl_dump,
+ .done = ila_xlat_nl_dump_done,
+ .policy = ila_nl_policy,
+ },
+};
+
+unsigned int ila_net_id;
+
+struct genl_family ila_nl_family __ro_after_init = {
+ .hdrsize = 0,
+ .name = ILA_GENL_NAME,
+ .version = ILA_GENL_VERSION,
+ .maxattr = ILA_ATTR_MAX,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .ops = ila_nl_ops,
+ .n_ops = ARRAY_SIZE(ila_nl_ops),
+};
+
+static __net_init int ila_init_net(struct net *net)
+{
+ int err;
+
+ err = ila_xlat_init_net(net);
+ if (err)
+ goto ila_xlat_init_fail;
+
+ return 0;
+
+ila_xlat_init_fail:
+ return err;
+}
+
+static __net_exit void ila_exit_net(struct net *net)
+{
+ ila_xlat_exit_net(net);
+}
+
+static struct pernet_operations ila_net_ops = {
+ .init = ila_init_net,
+ .exit = ila_exit_net,
+ .id = &ila_net_id,
+ .size = sizeof(struct ila_net),
+};
+
+static int __init ila_init(void)
+{
+ int ret;
+
+ ret = register_pernet_device(&ila_net_ops);
+ if (ret)
+ goto register_device_fail;
+
+ ret = genl_register_family(&ila_nl_family);
+ if (ret)
+ goto register_family_fail;
+
+ ret = ila_lwt_init();
+ if (ret)
+ goto fail_lwt;
+
+ return 0;
+
+fail_lwt:
+ genl_unregister_family(&ila_nl_family);
+register_family_fail:
+ unregister_pernet_device(&ila_net_ops);
+register_device_fail:
+ return ret;
+}
+
+static void __exit ila_fini(void)
+{
+ ila_lwt_fini();
+ genl_unregister_family(&ila_nl_family);
+ unregister_pernet_device(&ila_net_ops);
+}
+
+module_init(ila_init);
+module_exit(ila_fini);
+MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 10ae13560b40..17c455ff69ff 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -22,36 +22,14 @@ struct ila_map {
struct rcu_head rcu;
};
-static unsigned int ila_net_id;
-
-struct ila_net {
- struct rhashtable rhash_table;
- spinlock_t *locks; /* Bucket locks for entry manipulation */
- unsigned int locks_mask;
- bool hooks_registered;
-};
-
+#define MAX_LOCKS 1024
#define LOCKS_PER_CPU 10
static int alloc_ila_locks(struct ila_net *ilan)
{
- unsigned int i, size;
- unsigned int nr_pcpus = num_possible_cpus();
-
- nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
- size = roundup_pow_of_two(nr_pcpus * LOCKS_PER_CPU);
-
- if (sizeof(spinlock_t) != 0) {
- ilan->locks = kvmalloc_array(size, sizeof(spinlock_t),
- GFP_KERNEL);
- if (!ilan->locks)
- return -ENOMEM;
- for (i = 0; i < size; i++)
- spin_lock_init(&ilan->locks[i]);
- }
- ilan->locks_mask = size - 1;
-
- return 0;
+ return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask,
+ MAX_LOCKS, LOCKS_PER_CPU,
+ GFP_KERNEL);
}
static u32 hashrnd __read_mostly;
@@ -71,7 +49,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc)
static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
struct ila_locator loc)
{
- return &ilan->locks[ila_locator_hash(loc) & ilan->locks_mask];
+ return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask];
}
static inline int ila_cmp_wildcards(struct ila_map *ila,
@@ -115,16 +93,6 @@ static const struct rhashtable_params rht_params = {
.obj_cmpfn = ila_cmpfn,
};
-static struct genl_family ila_nl_family;
-
-static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
- [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
- [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, },
- [ILA_ATTR_IFINDEX] = { .type = NLA_U32, },
- [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
- [ILA_ATTR_IDENT_TYPE] = { .type = NLA_U8, },
-};
-
static int parse_nl_config(struct genl_info *info,
struct ila_xlat_params *xp)
{
@@ -162,7 +130,7 @@ static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
{
struct ila_map *ila;
- ila = rhashtable_lookup_fast(&ilan->rhash_table, &iaddr->loc,
+ ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc,
rht_params);
while (ila) {
if (!ila_cmp_wildcards(ila, iaddr, ifindex))
@@ -179,7 +147,7 @@ static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
{
struct ila_map *ila;
- ila = rhashtable_lookup_fast(&ilan->rhash_table,
+ ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
&xp->ip.locator_match,
rht_params);
while (ila) {
@@ -196,9 +164,9 @@ static inline void ila_release(struct ila_map *ila)
kfree_rcu(ila, rcu);
}
-static void ila_free_cb(void *ptr, void *arg)
+static void ila_free_node(struct ila_map *ila)
{
- struct ila_map *ila = (struct ila_map *)ptr, *next;
+ struct ila_map *next;
/* Assume rcu_readlock held */
while (ila) {
@@ -208,6 +176,11 @@ static void ila_free_cb(void *ptr, void *arg)
}
}
+static void ila_free_cb(void *ptr, void *arg)
+{
+ ila_free_node((struct ila_map *)ptr);
+}
+
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
static unsigned int
@@ -235,7 +208,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
int err = 0, order;
- if (!ilan->hooks_registered) {
+ if (!ilan->xlat.hooks_registered) {
/* We defer registering net hooks in the namespace until the
* first mapping is added.
*/
@@ -244,7 +217,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
if (err)
return err;
- ilan->hooks_registered = true;
+ ilan->xlat.hooks_registered = true;
}
ila = kzalloc(sizeof(*ila), GFP_KERNEL);
@@ -259,12 +232,12 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
spin_lock(lock);
- head = rhashtable_lookup_fast(&ilan->rhash_table,
+ head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
&xp->ip.locator_match,
rht_params);
if (!head) {
/* New entry for the rhash_table */
- err = rhashtable_lookup_insert_fast(&ilan->rhash_table,
+ err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table,
&ila->node, rht_params);
} else {
struct ila_map *tila = head, *prev = NULL;
@@ -290,7 +263,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
} else {
/* Make this ila new head */
RCU_INIT_POINTER(ila->next, head);
- err = rhashtable_replace_fast(&ilan->rhash_table,
+ err = rhashtable_replace_fast(&ilan->xlat.rhash_table,
&head->node,
&ila->node, rht_params);
if (err)
@@ -316,7 +289,7 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
spin_lock(lock);
- head = rhashtable_lookup_fast(&ilan->rhash_table,
+ head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
&xp->ip.locator_match, rht_params);
ila = head;
@@ -346,15 +319,15 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
* table
*/
err = rhashtable_replace_fast(
- &ilan->rhash_table, &ila->node,
+ &ilan->xlat.rhash_table, &ila->node,
&head->node, rht_params);
if (err)
goto out;
} else {
/* Entry no longer used */
- err = rhashtable_remove_fast(&ilan->rhash_table,
- &ila->node,
- rht_params);
+ err = rhashtable_remove_fast(
+ &ilan->xlat.rhash_table,
+ &ila->node, rht_params);
}
}
@@ -369,7 +342,7 @@ out:
return err;
}
-static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
+int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
struct ila_xlat_params p;
@@ -382,7 +355,7 @@ static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
return ila_add_mapping(net, &p);
}
-static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
+int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
struct ila_xlat_params xp;
@@ -397,6 +370,59 @@ static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
return 0;
}
+static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan,
+ struct ila_map *ila)
+{
+ return ila_get_lock(ilan, ila->xp.ip.locator_match);
+}
+
+int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct ila_net *ilan = net_generic(net, ila_net_id);
+ struct rhashtable_iter iter;
+ struct ila_map *ila;
+ spinlock_t *lock;
+ int ret;
+
+ ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter, GFP_KERNEL);
+ if (ret)
+ goto done;
+
+ rhashtable_walk_start(&iter);
+
+ for (;;) {
+ ila = rhashtable_walk_next(&iter);
+
+ if (IS_ERR(ila)) {
+ if (PTR_ERR(ila) == -EAGAIN)
+ continue;
+ ret = PTR_ERR(ila);
+ goto done;
+ } else if (!ila) {
+ break;
+ }
+
+ lock = lock_from_ila_map(ilan, ila);
+
+ spin_lock(lock);
+
+ ret = rhashtable_remove_fast(&ilan->xlat.rhash_table,
+ &ila->node, rht_params);
+ if (!ret)
+ ila_free_node(ila);
+
+ spin_unlock(lock);
+
+ if (ret)
+ break;
+ }
+
+done:
+ rhashtable_walk_stop(&iter);
+ return ret;
+}
+
static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
{
if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
@@ -434,7 +460,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
+int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
struct ila_net *ilan = net_generic(net, ila_net_id);
@@ -475,27 +501,34 @@ out_free:
struct ila_dump_iter {
struct rhashtable_iter rhiter;
+ int skip;
};
-static int ila_nl_dump_start(struct netlink_callback *cb)
+int ila_xlat_nl_dump_start(struct netlink_callback *cb)
{
struct net *net = sock_net(cb->skb->sk);
struct ila_net *ilan = net_generic(net, ila_net_id);
- struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
+ struct ila_dump_iter *iter;
+ int ret;
- if (!iter) {
- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter)
- return -ENOMEM;
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
- cb->args[0] = (long)iter;
+ ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter->rhiter,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(iter);
+ return ret;
}
- return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter,
- GFP_KERNEL);
+ iter->skip = 0;
+ cb->args[0] = (long)iter;
+
+ return ret;
}
-static int ila_nl_dump_done(struct netlink_callback *cb)
+int ila_xlat_nl_dump_done(struct netlink_callback *cb)
{
struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
@@ -506,24 +539,49 @@ static int ila_nl_dump_done(struct netlink_callback *cb)
return 0;
}
-static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
+int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
struct rhashtable_iter *rhiter = &iter->rhiter;
+ int skip = iter->skip;
struct ila_map *ila;
int ret;
rhashtable_walk_start(rhiter);
- for (;;) {
- ila = rhashtable_walk_next(rhiter);
+ /* Get first entry */
+ ila = rhashtable_walk_peek(rhiter);
+
+ if (ila && !IS_ERR(ila) && skip) {
+ /* Skip over visited entries */
+
+ while (ila && skip) {
+ /* Skip over any ila entries in this list that we
+ * have already dumped.
+ */
+ ila = rcu_access_pointer(ila->next);
+ skip--;
+ }
+ }
+ skip = 0;
+
+ for (;;) {
if (IS_ERR(ila)) {
- if (PTR_ERR(ila) == -EAGAIN)
- continue;
ret = PTR_ERR(ila);
- goto done;
+ if (ret == -EAGAIN) {
+ /* Table has changed and iter has reset. Return
+ * -EAGAIN to the application even if we have
+ * written data to the skb. The application
+ * needs to deal with this.
+ */
+
+ goto out_ret;
+ } else {
+ break;
+ }
} else if (!ila) {
+ ret = 0;
break;
}
@@ -532,90 +590,54 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq, NLM_F_MULTI,
skb, ILA_CMD_GET);
if (ret)
- goto done;
+ goto out;
+ skip++;
ila = rcu_access_pointer(ila->next);
}
+
+ skip = 0;
+ ila = rhashtable_walk_next(rhiter);
}
- ret = skb->len;
+out:
+ iter->skip = skip;
+ ret = (skb->len ? : ret);
-done:
+out_ret:
rhashtable_walk_stop(rhiter);
return ret;
}
-static const struct genl_ops ila_nl_ops[] = {
- {
- .cmd = ILA_CMD_ADD,
- .doit = ila_nl_cmd_add_mapping,
- .policy = ila_nl_policy,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = ILA_CMD_DEL,
- .doit = ila_nl_cmd_del_mapping,
- .policy = ila_nl_policy,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = ILA_CMD_GET,
- .doit = ila_nl_cmd_get_mapping,
- .start = ila_nl_dump_start,
- .dumpit = ila_nl_dump,
- .done = ila_nl_dump_done,
- .policy = ila_nl_policy,
- },
-};
-
-static struct genl_family ila_nl_family __ro_after_init = {
- .hdrsize = 0,
- .name = ILA_GENL_NAME,
- .version = ILA_GENL_VERSION,
- .maxattr = ILA_ATTR_MAX,
- .netnsok = true,
- .parallel_ops = true,
- .module = THIS_MODULE,
- .ops = ila_nl_ops,
- .n_ops = ARRAY_SIZE(ila_nl_ops),
-};
-
#define ILA_HASH_TABLE_SIZE 1024
-static __net_init int ila_init_net(struct net *net)
+int ila_xlat_init_net(struct net *net)
{
- int err;
struct ila_net *ilan = net_generic(net, ila_net_id);
+ int err;
err = alloc_ila_locks(ilan);
if (err)
return err;
- rhashtable_init(&ilan->rhash_table, &rht_params);
+ rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
return 0;
}
-static __net_exit void ila_exit_net(struct net *net)
+void ila_xlat_exit_net(struct net *net)
{
struct ila_net *ilan = net_generic(net, ila_net_id);
- rhashtable_free_and_destroy(&ilan->rhash_table, ila_free_cb, NULL);
+ rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
- kvfree(ilan->locks);
+ free_bucket_spinlocks(ilan->xlat.locks);
- if (ilan->hooks_registered)
+ if (ilan->xlat.hooks_registered)
nf_unregister_net_hooks(net, ila_nf_hook_ops,
ARRAY_SIZE(ila_nf_hook_ops));
}
-static struct pernet_operations ila_net_ops = {
- .init = ila_init_net,
- .exit = ila_exit_net,
- .id = &ila_net_id,
- .size = sizeof(struct ila_net),
-};
-
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
{
struct ila_map *ila;
@@ -641,29 +663,3 @@ static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
return 0;
}
-
-int __init ila_xlat_init(void)
-{
- int ret;
-
- ret = register_pernet_device(&ila_net_ops);
- if (ret)
- goto exit;
-
- ret = genl_register_family(&ila_nl_family);
- if (ret < 0)
- goto unregister;
-
- return 0;
-
-unregister:
- unregister_pernet_device(&ila_net_ops);
-exit:
- return ret;
-}
-
-void ila_xlat_fini(void)
-{
- genl_unregister_family(&ila_nl_family);
- unregister_pernet_device(&ila_net_ops);
-}
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 595ad408dba0..3d7c7460a0c5 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -191,7 +191,7 @@ struct sock *inet6_lookup_listener(struct net *net,
saddr, sport, daddr, hnum,
dif, sdif);
if (result)
- return result;
+ goto done;
/* Lookup lhash2 with in6addr_any */
@@ -200,9 +200,10 @@ struct sock *inet6_lookup_listener(struct net *net,
if (ilb2->count > ilb->count)
goto port_lookup;
- return inet6_lhash2_lookup(net, ilb2, skb, doff,
- saddr, sport, daddr, hnum,
- dif, sdif);
+ result = inet6_lhash2_lookup(net, ilb2, skb, doff,
+ saddr, sport, daddr, hnum,
+ dif, sdif);
+ goto done;
port_lookup:
sk_for_each(sk, &ilb->head) {
@@ -214,12 +215,15 @@ port_lookup:
result = reuseport_select_sock(sk, phash,
skb, doff);
if (result)
- return result;
+ goto done;
}
result = sk;
hiscore = score;
}
}
+done:
+ if (unlikely(IS_ERR(result)))
+ return NULL;
return result;
}
EXPORT_SYMBOL_GPL(inet6_lookup_listener);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 3eee7637bdfe..cb54a8a3c273 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -373,7 +373,6 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
if (olen > 0) {
struct msghdr msg;
struct flowi6 flowi6;
- struct sockcm_cookie sockc_junk;
struct ipcm6_cookie ipc6;
err = -ENOMEM;
@@ -392,7 +391,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
memset(&flowi6, 0, sizeof(flowi6));
ipc6.opt = fl->opt;
- err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6, &sockc_junk);
+ err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
if (err)
goto done;
err = -EINVAL;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index cd2cfb04e5d8..18a3794b0f52 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -989,6 +989,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
dsfield = key->tos;
+ if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
+ goto tx_err;
md = ip_tunnel_info_opts(tun_info);
if (!md)
goto tx_err;
@@ -1127,7 +1129,7 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
return;
if (rt->dst.dev) {
- dev->hard_header_len = rt->dst.dev->hard_header_len +
+ dev->needed_headroom = rt->dst.dev->hard_header_len +
t_hlen;
if (set_mtu) {
@@ -1153,7 +1155,7 @@ static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
- tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
return t_hlen;
}
@@ -1823,7 +1825,7 @@ static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
erspan_hdr_len(tunnel->parms.erspan_ver);
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
- tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
return t_hlen;
}
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index f08d34491ece..6242682be876 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -47,17 +47,11 @@
#include <net/inet_ecn.h>
#include <net/dst_metadata.h>
-int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
{
void (*edemux)(struct sk_buff *skb);
- /* if ingress device is enslaved to an L3 master device pass the
- * skb to its handler for processing
- */
- skb = l3mdev_ip6_rcv(skb);
- if (!skb)
- return NET_RX_SUCCESS;
-
if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
const struct inet6_protocol *ipprot;
@@ -67,20 +61,73 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
}
if (!skb_valid_dst(skb))
ip6_route_input(skb);
+}
+
+int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ /* if ingress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip6_rcv(skb);
+ if (!skb)
+ return NET_RX_SUCCESS;
+ ip6_rcv_finish_core(net, sk, skb);
return dst_input(skb);
}
-int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+static void ip6_sublist_rcv_finish(struct list_head *head)
+{
+ struct sk_buff *skb, *next;
+
+ list_for_each_entry_safe(skb, next, head, list)
+ dst_input(skb);
+}
+
+static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
+ struct list_head *head)
+{
+ struct dst_entry *curr_dst = NULL;
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+ struct dst_entry *dst;
+
+ list_del(&skb->list);
+ /* if ingress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip6_rcv(skb);
+ if (!skb)
+ continue;
+ ip6_rcv_finish_core(net, sk, skb);
+ dst = skb_dst(skb);
+ if (curr_dst != dst) {
+ /* dispatch old sublist */
+ if (!list_empty(&sublist))
+ ip6_sublist_rcv_finish(&sublist);
+ /* start new sublist */
+ INIT_LIST_HEAD(&sublist);
+ curr_dst = dst;
+ }
+ list_add_tail(&skb->list, &sublist);
+ }
+ /* dispatch final sublist */
+ ip6_sublist_rcv_finish(&sublist);
+}
+
+static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
{
const struct ipv6hdr *hdr;
u32 pkt_len;
struct inet6_dev *idev;
- struct net *net = dev_net(skb->dev);
if (skb->pkt_type == PACKET_OTHERHOST) {
kfree_skb(skb);
- return NET_RX_DROP;
+ return NULL;
}
rcu_read_lock();
@@ -196,7 +243,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (ipv6_parse_hopopts(skb) < 0) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
rcu_read_unlock();
- return NET_RX_DROP;
+ return NULL;
}
}
@@ -205,15 +252,67 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
- return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
- net, NULL, skb, dev, NULL,
- ip6_rcv_finish);
+ return skb;
err:
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
drop:
rcu_read_unlock();
kfree_skb(skb);
- return NET_RX_DROP;
+ return NULL;
+}
+
+int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct net *net = dev_net(skb->dev);
+
+ skb = ip6_rcv_core(skb, dev, net);
+ if (skb == NULL)
+ return NET_RX_DROP;
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
+ net, NULL, skb, dev, NULL,
+ ip6_rcv_finish);
+}
+
+static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev,
+ struct net *net)
+{
+ NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL,
+ head, dev, NULL, ip6_rcv_finish);
+ ip6_list_rcv_finish(net, NULL, head);
+}
+
+/* Receive a list of IPv6 packets */
+void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev)
+{
+ struct net_device *curr_dev = NULL;
+ struct net *curr_net = NULL;
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+ struct net_device *dev = skb->dev;
+ struct net *net = dev_net(dev);
+
+ list_del(&skb->list);
+ skb = ip6_rcv_core(skb, dev, net);
+ if (skb == NULL)
+ continue;
+
+ if (curr_dev != dev || curr_net != net) {
+ /* dispatch old sublist */
+ if (!list_empty(&sublist))
+ ip6_sublist_rcv(&sublist, curr_dev, curr_net);
+ /* start new sublist */
+ INIT_LIST_HEAD(&sublist);
+ curr_dev = dev;
+ curr_net = net;
+ }
+ list_add_tail(&skb->list, &sublist);
+ }
+ /* dispatch final sublist */
+ ip6_sublist_rcv(&sublist, curr_dev, curr_net);
}
/*
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 5b3f2f89ef41..37ff4805b20c 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -163,11 +163,11 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph,
return len;
}
-static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *ipv6_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
const struct net_offload *ops;
- struct sk_buff **pp = NULL;
+ struct sk_buff *pp = NULL;
struct sk_buff *p;
struct ipv6hdr *iph;
unsigned int nlen;
@@ -214,7 +214,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
flush--;
nlen = skb_network_header_len(skb);
- for (p = *head; p; p = p->next) {
+ list_for_each_entry(p, head, list) {
const struct ipv6hdr *iph2;
__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
@@ -263,8 +263,8 @@ out:
return pp;
}
-static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
/* Common GRO receive for SIT and IP6IP6 */
@@ -278,8 +278,8 @@ static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head,
return ipv6_gro_receive(head, skb);
}
-static struct sk_buff **ip4ip6_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
/* Common GRO receive for SIT and IP6IP6 */
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 3168847c30d1..16f200f06500 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1221,13 +1221,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
if (mtu < IPV6_MIN_MTU)
return -EINVAL;
cork->base.fragsize = mtu;
- cork->base.gso_size = sk->sk_type == SOCK_DGRAM &&
- sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0;
+ cork->base.gso_size = ipc6->gso_size;
+ cork->base.tx_flags = 0;
+ sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
if (dst_allfrag(xfrm_dst_path(&rt->dst)))
cork->base.flags |= IPCORK_ALLFRAG;
cork->base.length = 0;
+ cork->base.transmit_time = ipc6->sockc.transmit_time;
+
return 0;
}
@@ -1240,8 +1243,7 @@ static int __ip6_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
- unsigned int flags, struct ipcm6_cookie *ipc6,
- const struct sockcm_cookie *sockc)
+ unsigned int flags, struct ipcm6_cookie *ipc6)
{
struct sk_buff *skb, *skb_prev = NULL;
unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
@@ -1251,7 +1253,6 @@ static int __ip6_append_data(struct sock *sk,
int copy;
int err;
int offset = 0;
- __u8 tx_flags = 0;
u32 tskey = 0;
struct rt6_info *rt = (struct rt6_info *)cork->dst;
struct ipv6_txoptions *opt = v6_cork->opt;
@@ -1270,6 +1271,10 @@ static int __ip6_append_data(struct sock *sk,
mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
orig_mtu = mtu;
+ if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
+ sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
+ tskey = sk->sk_tskey++;
+
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
@@ -1319,13 +1324,6 @@ emsgsize:
rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
csummode = CHECKSUM_PARTIAL;
- if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
- sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
- if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
- sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
- tskey = sk->sk_tskey++;
- }
-
/*
* Let's try using as much space as possible.
* Use MTU if total length of the message fits into the MTU.
@@ -1444,8 +1442,8 @@ alloc_new_skb:
dst_exthdrlen);
/* Only the initial fragment is time stamped */
- skb_shinfo(skb)->tx_flags = tx_flags;
- tx_flags = 0;
+ skb_shinfo(skb)->tx_flags = cork->tx_flags;
+ cork->tx_flags = 0;
skb_shinfo(skb)->tskey = tskey;
tskey = 0;
@@ -1562,8 +1560,7 @@ int ip6_append_data(struct sock *sk,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
- struct rt6_info *rt, unsigned int flags,
- const struct sockcm_cookie *sockc)
+ struct rt6_info *rt, unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1591,7 +1588,7 @@ int ip6_append_data(struct sock *sk,
return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
&np->cork, sk_page_frag(sk), getfrag,
- from, length, transhdrlen, flags, ipc6, sockc);
+ from, length, transhdrlen, flags, ipc6);
}
EXPORT_SYMBOL_GPL(ip6_append_data);
@@ -1675,6 +1672,8 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ skb->tstamp = cork->base.transmit_time;
+
skb_dst_set(skb, dst_clone(&rt->dst));
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
if (proto == IPPROTO_ICMPV6) {
@@ -1749,8 +1748,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags,
- struct inet_cork_full *cork,
- const struct sockcm_cookie *sockc)
+ struct inet_cork_full *cork)
{
struct inet6_cork v6_cork;
struct sk_buff_head queue;
@@ -1778,7 +1776,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
err = __ip6_append_data(sk, fl6, &queue, &cork->base, &v6_cork,
&current->task_frag, getfrag, from,
length + exthdrlen, transhdrlen + exthdrlen,
- flags, ipc6, sockc);
+ flags, ipc6);
if (err) {
__ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
return ERR_PTR(err);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1cc9650af9fb..5df2a58d945c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1113,7 +1113,7 @@ route_lookup:
dst = NULL;
goto tx_err_link_failure;
}
- if (t->parms.collect_md &&
+ if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) &&
ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
&fl6->daddr, 0, &fl6->saddr))
goto tx_err_link_failure;
@@ -1251,6 +1251,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
key = &tun_info->key;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPIP;
+ fl6.saddr = key->u.ipv6.src;
fl6.daddr = key->u.ipv6.dst;
fl6.flowlabel = key->label;
dsfield = key->tos;
@@ -1322,6 +1323,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
key = &tun_info->key;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPV6;
+ fl6.saddr = key->u.ipv6.src;
fl6.daddr = key->u.ipv6.dst;
fl6.flowlabel = key->label;
dsfield = key->tos;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 0d0f0053bb11..d0b7e0249c13 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -32,6 +32,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/compat.h>
+#include <linux/rhashtable.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/raw.h>
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 568ca4187cd1..c0cac9cc3a28 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -500,7 +500,6 @@ sticky_done:
struct ipv6_txoptions *opt = NULL;
struct msghdr msg;
struct flowi6 fl6;
- struct sockcm_cookie sockc_junk;
struct ipcm6_cookie ipc6;
memset(&fl6, 0, sizeof(fl6));
@@ -533,7 +532,7 @@ sticky_done:
msg.msg_control = (void *)(opt+1);
ipc6.opt = opt;
- retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk);
+ retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6);
if (retv)
goto done;
update:
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index f60f310785fd..4ae54aaca373 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -660,7 +660,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
return rv;
}
-static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode)
+static void igmp6_group_added(struct ifmcaddr6 *mc)
{
struct net_device *dev = mc->idev->dev;
char buf[MAX_ADDR_LEN];
@@ -690,7 +690,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode)
* should not send filter-mode change record as the mode
* should be from IN() to IN(A).
*/
- if (mode == MCAST_EXCLUDE)
+ if (mc->mca_sfmode == MCAST_EXCLUDE)
mc->mca_crcount = mc->idev->mc_qrv;
mld_ifc_event(mc->idev);
@@ -931,7 +931,7 @@ static int __ipv6_dev_mc_inc(struct net_device *dev,
write_unlock_bh(&idev->lock);
mld_del_delrec(idev, mc);
- igmp6_group_added(mc, mode);
+ igmp6_group_added(mc);
ma_put(mc);
return 0;
}
@@ -2571,7 +2571,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
ipv6_mc_reset(idev);
for (i = idev->mc_list; i; i = i->next) {
mld_del_delrec(idev, i);
- igmp6_group_added(i, i->mca_sfmode);
+ igmp6_group_added(i);
}
read_unlock_bh(&idev->lock);
}
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 531d6957af36..5ae8e1c51079 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -15,7 +15,6 @@
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/xfrm.h>
-#include <net/ip6_checksum.h>
#include <net/netfilter/nf_queue.h>
int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
@@ -106,71 +105,10 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst,
return err;
}
-__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol)
-{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- __sum16 csum = 0;
-
- switch (skb->ip_summed) {
- case CHECKSUM_COMPLETE:
- if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
- break;
- if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
- skb->len - dataoff, protocol,
- csum_sub(skb->csum,
- skb_checksum(skb, 0,
- dataoff, 0)))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- }
- /* fall through */
- case CHECKSUM_NONE:
- skb->csum = ~csum_unfold(
- csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
- skb->len - dataoff,
- protocol,
- csum_sub(0,
- skb_checksum(skb, 0,
- dataoff, 0))));
- csum = __skb_checksum_complete(skb);
- }
- return csum;
-}
-EXPORT_SYMBOL(nf_ip6_checksum);
-
-static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol)
-{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- __wsum hsum;
- __sum16 csum = 0;
-
- switch (skb->ip_summed) {
- case CHECKSUM_COMPLETE:
- if (len == skb->len - dataoff)
- return nf_ip6_checksum(skb, hook, dataoff, protocol);
- /* fall through */
- case CHECKSUM_NONE:
- hsum = skb_checksum(skb, 0, dataoff, 0);
- skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
- &ip6h->daddr,
- skb->len - dataoff,
- protocol,
- csum_sub(0, hsum)));
- skb->ip_summed = CHECKSUM_NONE;
- return __skb_checksum_complete_head(skb, dataoff + len);
- }
- return csum;
-};
-
static const struct nf_ipv6_ops ipv6ops = {
.chk_addr = ipv6_chk_addr,
.route_input = ip6_route_input,
.fragment = ip6_fragment,
- .checksum = nf_ip6_checksum,
- .checksum_partial = nf_ip6_checksum_partial,
.route = nf_ip6_route,
.reroute = nf_ip6_reroute,
};
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 37b14dc9d863..339d0762b027 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -5,26 +5,6 @@
menu "IPv6: Netfilter Configuration"
depends on INET && IPV6 && NETFILTER
-config NF_DEFRAG_IPV6
- tristate
- default n
-
-config NF_CONNTRACK_IPV6
- tristate "IPv6 connection tracking support"
- depends on INET && IPV6 && NF_CONNTRACK
- default m if NETFILTER_ADVANCED=n
- select NF_DEFRAG_IPV6
- ---help---
- Connection tracking keeps a record of what packets have passed
- through your machine, in order to figure out how they are related
- into connections.
-
- This is IPv6 support on Layer 3 independent connection tracking.
- Layer 3 independent connection tracking is experimental scheme
- which generalize ip_conntrack to support other layer 3 protocols.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config NF_SOCKET_IPV6
tristate "IPv6 socket lookup support"
help
@@ -128,7 +108,7 @@ config NF_LOG_IPV6
config NF_NAT_IPV6
tristate "IPv6 NAT"
- depends on NF_CONNTRACK_IPV6
+ depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
select NF_NAT
help
@@ -328,7 +308,7 @@ config IP6_NF_SECURITY
config IP6_NF_NAT
tristate "ip6tables NAT support"
- depends on NF_CONNTRACK_IPV6
+ depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
select NF_NAT
select NF_NAT_IPV6
@@ -365,6 +345,7 @@ config IP6_NF_TARGET_NPT
endif # IP6_NF_NAT
endif # IP6_NF_IPTABLES
-
endmenu
+config NF_DEFRAG_IPV6
+ tristate
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 10a5a1c87320..200c0c235565 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -11,12 +11,6 @@ obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o
-# objects for l3 independent conntrack
-nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
-
-# l3 independent conntrack
-obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
-
nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o
nf_nat_ipv6-$(CONFIG_NF_NAT_MASQUERADE_IPV6) += nf_nat_masquerade_ipv6.o
obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
deleted file mode 100644
index 663827ee3cf8..000000000000
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ /dev/null
@@ -1,460 +0,0 @@
-/*
- * Copyright (C)2004 USAGI/WIDE Project
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Author:
- * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- */
-
-#include <linux/types.h>
-#include <linux/ipv6.h>
-#include <linux/in6.h>
-#include <linux/netfilter.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/icmp.h>
-#include <net/ipv6.h>
-#include <net/inet_frag.h>
-
-#include <linux/netfilter_bridge.h>
-#include <linux/netfilter_ipv6.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_zones.h>
-#include <net/netfilter/nf_conntrack_seqadj.h>
-#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
-#include <net/netfilter/nf_log.h>
-
-static int conntrack6_net_id;
-static DEFINE_MUTEX(register_ipv6_hooks);
-
-struct conntrack6_net {
- unsigned int users;
-};
-
-static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
- struct nf_conntrack_tuple *tuple)
-{
- const u_int32_t *ap;
- u_int32_t _addrs[8];
-
- ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr),
- sizeof(_addrs), _addrs);
- if (ap == NULL)
- return false;
-
- memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
- memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
-
- return true;
-}
-
-static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
-{
- memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6));
- memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6));
-
- return true;
-}
-
-static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
- unsigned int *dataoff, u_int8_t *protonum)
-{
- unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
- __be16 frag_off;
- int protoff;
- u8 nexthdr;
-
- if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
- &nexthdr, sizeof(nexthdr)) != 0) {
- pr_debug("ip6_conntrack_core: can't get nexthdr\n");
- return -NF_ACCEPT;
- }
- protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
- /*
- * (protoff == skb->len) means the packet has not data, just
- * IPv6 and possibly extensions headers, but it is tracked anyway
- */
- if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
- pr_debug("ip6_conntrack_core: can't find proto in pkt\n");
- return -NF_ACCEPT;
- }
-
- *dataoff = protoff;
- *protonum = nexthdr;
- return NF_ACCEPT;
-}
-
-static unsigned int ipv6_helper(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nf_conn *ct;
- const struct nf_conn_help *help;
- const struct nf_conntrack_helper *helper;
- enum ip_conntrack_info ctinfo;
- __be16 frag_off;
- int protoff;
- u8 nexthdr;
-
- /* This is where we call the helper: as the packet goes out. */
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- return NF_ACCEPT;
-
- help = nfct_help(ct);
- if (!help)
- return NF_ACCEPT;
- /* rcu_read_lock()ed by nf_hook_thresh */
- helper = rcu_dereference(help->helper);
- if (!helper)
- return NF_ACCEPT;
-
- nexthdr = ipv6_hdr(skb)->nexthdr;
- protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
- &frag_off);
- if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
- pr_debug("proto header not found\n");
- return NF_ACCEPT;
- }
-
- return helper->help(skb, protoff, ct, ctinfo);
-}
-
-static unsigned int ipv6_confirm(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- unsigned char pnum = ipv6_hdr(skb)->nexthdr;
- int protoff;
- __be16 frag_off;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
-
- protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
- &frag_off);
- if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
- pr_debug("proto header not found\n");
- goto out;
- }
-
- /* adjust seqs for loopback traffic only in outgoing direction */
- if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
- !nf_is_loopback_packet(skb)) {
- if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
- NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
- return NF_DROP;
- }
- }
-out:
- /* We've seen it coming out the other side: confirm it */
- return nf_conntrack_confirm(skb);
-}
-
-static unsigned int ipv6_conntrack_in(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
-}
-
-static unsigned int ipv6_conntrack_local(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
-}
-
-static const struct nf_hook_ops ipv6_conntrack_ops[] = {
- {
- .hook = ipv6_conntrack_in,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP6_PRI_CONNTRACK,
- },
- {
- .hook = ipv6_conntrack_local,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP6_PRI_CONNTRACK,
- },
- {
- .hook = ipv6_helper,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP6_PRI_CONNTRACK_HELPER,
- },
- {
- .hook = ipv6_confirm,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP6_PRI_LAST,
- },
- {
- .hook = ipv6_helper,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP6_PRI_CONNTRACK_HELPER,
- },
- {
- .hook = ipv6_confirm,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP6_PRI_LAST-1,
- },
-};
-
-static int
-ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
-{
- struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
- const struct ipv6_pinfo *inet6 = inet6_sk(sk);
- const struct inet_sock *inet = inet_sk(sk);
- const struct nf_conntrack_tuple_hash *h;
- struct sockaddr_in6 sin6;
- struct nf_conn *ct;
- __be32 flow_label;
- int bound_dev_if;
-
- lock_sock(sk);
- tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
- tuple.src.u.tcp.port = inet->inet_sport;
- tuple.dst.u3.in6 = sk->sk_v6_daddr;
- tuple.dst.u.tcp.port = inet->inet_dport;
- tuple.dst.protonum = sk->sk_protocol;
- bound_dev_if = sk->sk_bound_dev_if;
- flow_label = inet6->flow_label;
- release_sock(sk);
-
- if (tuple.dst.protonum != IPPROTO_TCP &&
- tuple.dst.protonum != IPPROTO_SCTP)
- return -ENOPROTOOPT;
-
- if (*len < 0 || (unsigned int) *len < sizeof(sin6))
- return -EINVAL;
-
- h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
- if (!h) {
- pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
- &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
- &tuple.dst.u3.ip6, ntohs(tuple.dst.u.tcp.port));
- return -ENOENT;
- }
-
- ct = nf_ct_tuplehash_to_ctrack(h);
-
- sin6.sin6_family = AF_INET6;
- sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
- sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
- memcpy(&sin6.sin6_addr,
- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
- sizeof(sin6.sin6_addr));
-
- nf_ct_put(ct);
- sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
- return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
-}
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
- const struct nf_conntrack_tuple *tuple)
-{
- if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
- nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
-static const struct nla_policy ipv6_nla_policy[CTA_IP_MAX+1] = {
- [CTA_IP_V6_SRC] = { .len = sizeof(u_int32_t)*4 },
- [CTA_IP_V6_DST] = { .len = sizeof(u_int32_t)*4 },
-};
-
-static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
- struct nf_conntrack_tuple *t)
-{
- if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST])
- return -EINVAL;
-
- t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
- t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
-
- return 0;
-}
-#endif
-
-static int ipv6_hooks_register(struct net *net)
-{
- struct conntrack6_net *cnet = net_generic(net, conntrack6_net_id);
- int err = 0;
-
- mutex_lock(&register_ipv6_hooks);
- cnet->users++;
- if (cnet->users > 1)
- goto out_unlock;
-
- err = nf_defrag_ipv6_enable(net);
- if (err < 0) {
- cnet->users = 0;
- goto out_unlock;
- }
-
- err = nf_register_net_hooks(net, ipv6_conntrack_ops,
- ARRAY_SIZE(ipv6_conntrack_ops));
- if (err)
- cnet->users = 0;
- out_unlock:
- mutex_unlock(&register_ipv6_hooks);
- return err;
-}
-
-static void ipv6_hooks_unregister(struct net *net)
-{
- struct conntrack6_net *cnet = net_generic(net, conntrack6_net_id);
-
- mutex_lock(&register_ipv6_hooks);
- if (cnet->users && (--cnet->users == 0))
- nf_unregister_net_hooks(net, ipv6_conntrack_ops,
- ARRAY_SIZE(ipv6_conntrack_ops));
- mutex_unlock(&register_ipv6_hooks);
-}
-
-const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = {
- .l3proto = PF_INET6,
- .pkt_to_tuple = ipv6_pkt_to_tuple,
- .invert_tuple = ipv6_invert_tuple,
- .get_l4proto = ipv6_get_l4proto,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .tuple_to_nlattr = ipv6_tuple_to_nlattr,
- .nlattr_to_tuple = ipv6_nlattr_to_tuple,
- .nla_policy = ipv6_nla_policy,
- .nla_size = NLA_ALIGN(NLA_HDRLEN + sizeof(u32[4])) +
- NLA_ALIGN(NLA_HDRLEN + sizeof(u32[4])),
-#endif
- .net_ns_get = ipv6_hooks_register,
- .net_ns_put = ipv6_hooks_unregister,
- .me = THIS_MODULE,
-};
-
-MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
-
-static struct nf_sockopt_ops so_getorigdst6 = {
- .pf = NFPROTO_IPV6,
- .get_optmin = IP6T_SO_ORIGINAL_DST,
- .get_optmax = IP6T_SO_ORIGINAL_DST + 1,
- .get = ipv6_getorigdst,
- .owner = THIS_MODULE,
-};
-
-static const struct nf_conntrack_l4proto * const builtin_l4proto6[] = {
- &nf_conntrack_l4proto_tcp6,
- &nf_conntrack_l4proto_udp6,
- &nf_conntrack_l4proto_icmpv6,
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- &nf_conntrack_l4proto_dccp6,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_SCTP
- &nf_conntrack_l4proto_sctp6,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
- &nf_conntrack_l4proto_udplite6,
-#endif
-};
-
-static int ipv6_net_init(struct net *net)
-{
- return nf_ct_l4proto_pernet_register(net, builtin_l4proto6,
- ARRAY_SIZE(builtin_l4proto6));
-}
-
-static void ipv6_net_exit(struct net *net)
-{
- nf_ct_l4proto_pernet_unregister(net, builtin_l4proto6,
- ARRAY_SIZE(builtin_l4proto6));
-}
-
-static struct pernet_operations ipv6_net_ops = {
- .init = ipv6_net_init,
- .exit = ipv6_net_exit,
- .id = &conntrack6_net_id,
- .size = sizeof(struct conntrack6_net),
-};
-
-static int __init nf_conntrack_l3proto_ipv6_init(void)
-{
- int ret = 0;
-
- need_conntrack();
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- if (WARN_ON(nla_policy_len(ipv6_nla_policy, CTA_IP_MAX + 1) !=
- nf_conntrack_l3proto_ipv6.nla_size))
- return -EINVAL;
-#endif
-
- ret = nf_register_sockopt(&so_getorigdst6);
- if (ret < 0) {
- pr_err("Unable to register netfilter socket option\n");
- return ret;
- }
-
- ret = register_pernet_subsys(&ipv6_net_ops);
- if (ret < 0)
- goto cleanup_sockopt;
-
- ret = nf_ct_l4proto_register(builtin_l4proto6,
- ARRAY_SIZE(builtin_l4proto6));
- if (ret < 0)
- goto cleanup_pernet;
-
- ret = nf_ct_l3proto_register(&nf_conntrack_l3proto_ipv6);
- if (ret < 0) {
- pr_err("nf_conntrack_ipv6: can't register ipv6 proto.\n");
- goto cleanup_l4proto;
- }
- return ret;
-cleanup_l4proto:
- nf_ct_l4proto_unregister(builtin_l4proto6,
- ARRAY_SIZE(builtin_l4proto6));
- cleanup_pernet:
- unregister_pernet_subsys(&ipv6_net_ops);
- cleanup_sockopt:
- nf_unregister_sockopt(&so_getorigdst6);
- return ret;
-}
-
-static void __exit nf_conntrack_l3proto_ipv6_fini(void)
-{
- synchronize_net();
- nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
- nf_ct_l4proto_unregister(builtin_l4proto6,
- ARRAY_SIZE(builtin_l4proto6));
- unregister_pernet_subsys(&ipv6_net_ops);
- nf_unregister_sockopt(&so_getorigdst6);
-}
-
-module_init(nf_conntrack_l3proto_ipv6_init);
-module_exit(nf_conntrack_l3proto_ipv6_fini);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e4d9e6976d3c..2a14d8b65924 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -33,9 +33,8 @@
#include <net/sock.h>
#include <net/snmp.h>
-#include <net/inet_frag.h>
+#include <net/ipv6_frag.h>
-#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/rawv6.h>
@@ -151,7 +150,7 @@ static void nf_ct_frag6_expire(struct timer_list *t)
fq = container_of(frag, struct frag_queue, q);
net = container_of(fq->q.net, struct net, nf_frag.frags);
- ip6_expire_frag_queue(net, fq);
+ ip6frag_expire_frag_queue(net, fq);
}
/* Creation primitives. */
@@ -464,6 +463,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
head->csum);
fq->q.fragments = NULL;
+ fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
return true;
@@ -558,6 +558,10 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
+ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
+ fhdr->frag_off & htons(IP6_MF))
+ return -EINVAL;
+
skb_orphan(skb);
fq = fq_find(net, fhdr->identification, user, hdr,
skb->dev ? skb->dev->ifindex : 0);
@@ -624,16 +628,24 @@ static struct pernet_operations nf_ct_net_ops = {
.exit = nf_ct_net_exit,
};
+static const struct rhashtable_params nfct_rhash_params = {
+ .head_offset = offsetof(struct inet_frag_queue, node),
+ .hashfn = ip6frag_key_hashfn,
+ .obj_hashfn = ip6frag_obj_hashfn,
+ .obj_cmpfn = ip6frag_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
int nf_ct_frag6_init(void)
{
int ret = 0;
- nf_frags.constructor = ip6_frag_init;
+ nf_frags.constructor = ip6frag_init;
nf_frags.destructor = NULL;
nf_frags.qsize = sizeof(struct frag_queue);
nf_frags.frag_expire = nf_ct_frag6_expire;
nf_frags.frags_cache_name = nf_frags_cache_name;
- nf_frags.rhash_params = ip6_rhash_params;
+ nf_frags.rhash_params = nfct_rhash_params;
ret = inet_frags_init(&nf_frags);
if (ret)
goto out;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index c87b48359e8f..72dd3e202375 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -14,8 +14,7 @@
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/sysctl.h>
-#include <net/ipv6.h>
-#include <net/inet_frag.h>
+#include <net/ipv6_frag.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
@@ -23,7 +22,6 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
#endif
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index b397a8fe88b9..c6bf580d0f33 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -36,7 +36,7 @@ static const struct nf_loginfo default_loginfo = {
};
/* One level of recursion won't kill us */
-static void dump_ipv6_packet(struct nf_log_buf *m,
+static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int ip6hoff,
int recurse)
@@ -258,7 +258,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
/* Max length: 3+maxlen */
if (recurse) {
nf_log_buf_add(m, "[");
- dump_ipv6_packet(m, info, skb,
+ dump_ipv6_packet(net, m, info, skb,
ptr + sizeof(_icmp6h), 0);
nf_log_buf_add(m, "] ");
}
@@ -278,7 +278,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
/* Max length: 15 "UID=4294967295 " */
if ((logflags & NF_LOG_UID) && recurse)
- nf_log_dump_sk_uid_gid(m, skb->sk);
+ nf_log_dump_sk_uid_gid(net, m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (recurse && skb->mark)
@@ -365,7 +365,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
if (in != NULL)
dump_ipv6_mac_header(m, loginfo, skb);
- dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
+ dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1);
nf_log_buf_close(m);
}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 96f56bf49a30..4c04bccc7417 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -62,7 +62,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct dst_entry *dst;
struct rt6_info *rt;
struct pingfakehdr pfh;
- struct sockcm_cookie junk = {0};
struct ipcm6_cookie ipc6;
pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
@@ -119,7 +118,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- ipc6.tclass = np->tclass;
+ ipcm6_init_sk(&ipc6, np);
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false);
@@ -142,13 +141,11 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
pfh.family = AF_INET6;
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
- ipc6.dontfrag = np->dontfrag;
- ipc6.opt = NULL;
lock_sock(sk);
err = ip6_append_data(sk, ping_getfrag, &pfh, len,
0, &ipc6, &fl6, rt,
- MSG_DONTWAIT, &junk);
+ MSG_DONTWAIT);
if (err) {
ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index afc307c89d1a..413d98bf24f4 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -620,7 +620,7 @@ out:
static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
struct flowi6 *fl6, struct dst_entry **dstp,
- unsigned int flags)
+ unsigned int flags, const struct sockcm_cookie *sockc)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
@@ -650,6 +650,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ skb->tstamp = sockc->transmit_time;
skb_dst_set(skb, &rt->dst);
*dstp = NULL;
@@ -766,7 +767,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct dst_entry *dst = NULL;
struct raw6_frag_vec rfv;
struct flowi6 fl6;
- struct sockcm_cookie sockc;
struct ipcm6_cookie ipc6;
int addr_len = msg->msg_namelen;
u16 proto;
@@ -790,10 +790,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_mark = sk->sk_mark;
fl6.flowi6_uid = sk->sk_uid;
- ipc6.hlimit = -1;
- ipc6.tclass = -1;
- ipc6.dontfrag = -1;
- ipc6.opt = NULL;
+ ipcm6_init(&ipc6);
+ ipc6.sockc.tsflags = sk->sk_tsflags;
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
@@ -847,14 +845,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (fl6.flowi6_oif == 0)
fl6.flowi6_oif = sk->sk_bound_dev_if;
- sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
ipc6.opt = opt;
- err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc);
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -921,13 +918,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
back_from_confirm:
if (inet->hdrincl)
- err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
+ err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
+ msg->msg_flags, &ipc6.sockc);
else {
ipc6.opt = opt;
lock_sock(sk);
err = ip6_append_data(sk, raw6_getfrag, &rfv,
len, 0, &ipc6, &fl6, (struct rt6_info *)dst,
- msg->msg_flags, &sockc);
+ msg->msg_flags);
if (err)
ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index b939b94e7e91..5c5b4f79296e 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -57,7 +57,7 @@
#include <net/rawv6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
-#include <net/inet_frag.h>
+#include <net/ipv6_frag.h>
#include <net/inet_ecn.h>
static const char ip6_frag_cache_name[] = "ip6-frags";
@@ -72,61 +72,6 @@ static struct inet_frags ip6_frags;
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
struct net_device *dev);
-void ip6_frag_init(struct inet_frag_queue *q, const void *a)
-{
- struct frag_queue *fq = container_of(q, struct frag_queue, q);
- const struct frag_v6_compare_key *key = a;
-
- q->key.v6 = *key;
- fq->ecn = 0;
-}
-EXPORT_SYMBOL(ip6_frag_init);
-
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
-{
- struct net_device *dev = NULL;
- struct sk_buff *head;
-
- rcu_read_lock();
- spin_lock(&fq->q.lock);
-
- if (fq->q.flags & INET_FRAG_COMPLETE)
- goto out;
-
- inet_frag_kill(&fq->q);
-
- dev = dev_get_by_index_rcu(net, fq->iif);
- if (!dev)
- goto out;
-
- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
-
- /* Don't send error if the first segment did not arrive. */
- head = fq->q.fragments;
- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
- goto out;
-
- /* But use as source device on which LAST ARRIVED
- * segment was received. And do not use fq->dev
- * pointer directly, device might already disappeared.
- */
- head->dev = dev;
- skb_get(head);
- spin_unlock(&fq->q.lock);
-
- icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
- kfree_skb(head);
- goto out_rcu_unlock;
-
-out:
- spin_unlock(&fq->q.lock);
-out_rcu_unlock:
- rcu_read_unlock();
- inet_frag_put(&fq->q);
-}
-EXPORT_SYMBOL(ip6_expire_frag_queue);
-
static void ip6_frag_expire(struct timer_list *t)
{
struct inet_frag_queue *frag = from_timer(frag, t, timer);
@@ -136,7 +81,7 @@ static void ip6_frag_expire(struct timer_list *t)
fq = container_of(frag, struct frag_queue, q);
net = container_of(fq->q.net, struct net, ipv6.frags);
- ip6_expire_frag_queue(net, fq);
+ ip6frag_expire_frag_queue(net, fq);
}
static struct frag_queue *
@@ -460,6 +405,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
rcu_read_unlock();
fq->q.fragments = NULL;
+ fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
return 1;
@@ -510,6 +456,10 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
return 1;
}
+ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
+ fhdr->frag_off & htons(IP6_MF))
+ goto fail_hdr;
+
iif = skb->dev ? skb->dev->ifindex : 0;
fq = fq_find(net, fhdr->identification, hdr, iif);
if (fq) {
@@ -696,42 +646,19 @@ static struct pernet_operations ip6_frags_ops = {
.exit = ipv6_frags_exit_net,
};
-static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
-{
- return jhash2(data,
- sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
-}
-
-static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
-{
- const struct inet_frag_queue *fq = data;
-
- return jhash2((const u32 *)&fq->key.v6,
- sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
-}
-
-static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
-{
- const struct frag_v6_compare_key *key = arg->key;
- const struct inet_frag_queue *fq = ptr;
-
- return !!memcmp(&fq->key, key, sizeof(*key));
-}
-
-const struct rhashtable_params ip6_rhash_params = {
+static const struct rhashtable_params ip6_rhash_params = {
.head_offset = offsetof(struct inet_frag_queue, node),
- .hashfn = ip6_key_hashfn,
- .obj_hashfn = ip6_obj_hashfn,
- .obj_cmpfn = ip6_obj_cmpfn,
+ .hashfn = ip6frag_key_hashfn,
+ .obj_hashfn = ip6frag_obj_hashfn,
+ .obj_cmpfn = ip6frag_obj_cmpfn,
.automatic_shrinking = true,
};
-EXPORT_SYMBOL(ip6_rhash_params);
int __init ipv6_frag_init(void)
{
int ret;
- ip6_frags.constructor = ip6_frag_init;
+ ip6_frags.constructor = ip6frag_init;
ip6_frags.destructor = NULL;
ip6_frags.qsize = sizeof(struct frag_queue);
ip6_frags.frag_expire = ip6_frag_expire;
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 0fdf2a55e746..8d0ba757a46c 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -17,6 +17,7 @@
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/slab.h>
+#include <linux/rhashtable.h>
#include <net/ipv6.h>
#include <net/protocol.h>
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index 558fe8cc6d43..8546f94f30d4 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -22,6 +22,7 @@
#include <linux/icmpv6.h>
#include <linux/mroute6.h>
#include <linux/slab.h>
+#include <linux/rhashtable.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index cd6e4cab63f6..60325dbfe88b 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -459,36 +459,57 @@ drop:
DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
+bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
+{
+ struct seg6_bpf_srh_state *srh_state =
+ this_cpu_ptr(&seg6_bpf_srh_states);
+ struct ipv6_sr_hdr *srh = srh_state->srh;
+
+ if (unlikely(srh == NULL))
+ return false;
+
+ if (unlikely(!srh_state->valid)) {
+ if ((srh_state->hdrlen & 7) != 0)
+ return false;
+
+ srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
+ if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3))
+ return false;
+
+ srh_state->valid = true;
+ }
+
+ return true;
+}
+
static int input_action_end_bpf(struct sk_buff *skb,
struct seg6_local_lwt *slwt)
{
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
- struct seg6_bpf_srh_state local_srh_state;
struct ipv6_sr_hdr *srh;
- int srhoff = 0;
int ret;
srh = get_and_validate_srh(skb);
- if (!srh)
- goto drop;
+ if (!srh) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
/* preempt_disable is needed to protect the per-CPU buffer srh_state,
* which is also accessed by the bpf_lwt_seg6_* helpers
*/
preempt_disable();
+ srh_state->srh = srh;
srh_state->hdrlen = srh->hdrlen << 3;
- srh_state->valid = 1;
+ srh_state->valid = true;
rcu_read_lock();
bpf_compute_data_pointers(skb);
ret = bpf_prog_run_save_cb(slwt->bpf.prog, skb);
rcu_read_unlock();
- local_srh_state = *srh_state;
- preempt_enable();
-
switch (ret) {
case BPF_OK:
case BPF_REDIRECT:
@@ -500,24 +521,17 @@ static int input_action_end_bpf(struct sk_buff *skb,
goto drop;
}
- if (unlikely((local_srh_state.hdrlen & 7) != 0))
- goto drop;
-
- if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
- goto drop;
- srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
- srh->hdrlen = (u8)(local_srh_state.hdrlen >> 3);
-
- if (!local_srh_state.valid &&
- unlikely(!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3)))
+ if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
goto drop;
+ preempt_enable();
if (ret != BPF_REDIRECT)
seg6_lookup_nexthop(skb, NULL, 0);
return dst_input(skb);
drop:
+ preempt_enable();
kfree_skb(skb);
return -EINVAL;
}
@@ -637,12 +651,10 @@ static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt)
if (!seg6_validate_srh(srh, len))
return -EINVAL;
- slwt->srh = kmalloc(len, GFP_KERNEL);
+ slwt->srh = kmemdup(srh, len, GFP_KERNEL);
if (!slwt->srh)
return -ENOMEM;
- memcpy(slwt->srh, srh, len);
-
slwt->headroom += len;
return 0;
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 278e49cd67d4..e72947c99454 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -15,8 +15,8 @@
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
-static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *tcp6_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e6645cae403e..83f4c77c79d8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -235,6 +235,8 @@ struct sock *__udp6_lib_lookup(struct net *net,
exact_dif, hslot2,
skb);
}
+ if (unlikely(IS_ERR(result)))
+ return NULL;
return result;
}
begin:
@@ -249,6 +251,8 @@ begin:
saddr, sport);
result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
+ if (unlikely(IS_ERR(result)))
+ return NULL;
if (result)
return result;
}
@@ -1141,13 +1145,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int err;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
- struct sockcm_cookie sockc;
- ipc6.hlimit = -1;
- ipc6.tclass = -1;
- ipc6.dontfrag = -1;
+ ipcm6_init(&ipc6);
ipc6.gso_size = up->gso_size;
- sockc.tsflags = sk->sk_tsflags;
+ ipc6.sockc.tsflags = sk->sk_tsflags;
/* destination address check */
if (sin6) {
@@ -1282,7 +1283,7 @@ do_udp_sendmsg:
err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
if (err > 0)
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
- &ipc6, &sockc);
+ &ipc6);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -1376,7 +1377,7 @@ back_from_confirm:
skb = ip6_make_skb(sk, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc6,
&fl6, (struct rt6_info *)dst,
- msg->msg_flags, &cork, &sockc);
+ msg->msg_flags, &cork);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_v6_send_skb(skb, &fl6, &cork.base);
@@ -1402,7 +1403,7 @@ do_append_data:
up->len += ulen;
err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
&ipc6, &fl6, (struct rt6_info *)dst,
- corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, &sockc);
+ corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 03a2ff3fe1e6..95dee9ca8d22 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -114,8 +114,8 @@ out:
return segs;
}
-static struct sk_buff **udp6_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+static struct sk_buff *udp6_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
struct udphdr *uh = udp_gro_udphdr(skb);
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 07d36573f50b..da28e4407b8f 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -55,7 +55,7 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
__skb_pull(skb, hdr_len);
memmove(ipv6_hdr(skb), iph, hdr_len);
- x->lastused = get_seconds();
+ x->lastused = ktime_get_real_seconds();
return 0;
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 893a022f9620..a21d8ed0a325 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -150,7 +150,6 @@ static int afiucv_pm_freeze(struct device *dev)
{
struct iucv_sock *iucv;
struct sock *sk;
- int err = 0;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_freeze\n");
@@ -175,7 +174,7 @@ static int afiucv_pm_freeze(struct device *dev)
skb_queue_purge(&iucv->backlog_skb_q);
}
read_unlock(&iucv_sk_list.lock);
- return err;
+ return 0;
}
/**
@@ -1494,7 +1493,7 @@ __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
__poll_t mask = 0;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
if (sk->sk_state == IUCV_LISTEN)
return iucv_accept_poll(sk);
@@ -2515,4 +2514,3 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_IUCV);
-
diff --git a/net/kcm/Kconfig b/net/kcm/Kconfig
index 87fca36e6c47..9ca83f2ade6f 100644
--- a/net/kcm/Kconfig
+++ b/net/kcm/Kconfig
@@ -8,4 +8,3 @@ config AF_KCM
KCM (Kernel Connection Multiplexor) sockets provide a method
for multiplexing messages of a message based application
protocol over kernel connectons (e.g. TCP connections).
-
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index d3601d421571..571d824e4e24 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -2104,4 +2104,3 @@ module_exit(kcm_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_KCM);
-
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5e1d2946ffbf..9d61266526e7 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1383,7 +1383,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
}
if (!x)
- x = xfrm_find_acq(net, &dummy_mark, mode, reqid, proto, xdaddr, xsaddr, 1, family);
+ x = xfrm_find_acq(net, &dummy_mark, mode, reqid, 0, proto, xdaddr, xsaddr, 1, family);
if (x == NULL)
return -ENOENT;
@@ -2414,7 +2414,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
return err;
}
- xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
+ xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
1, &err);
security_xfrm_policy_free(pol_ctx);
@@ -2663,7 +2663,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
return -EINVAL;
delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
- xp = xfrm_policy_byid(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
+ xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
dir, pol->sadb_x_policy_id, delete, &err);
if (xp == NULL)
return -ENOENT;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 40261cb68e83..82cdf9020b53 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -203,44 +203,44 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
-/* Lookup a session. A new reference is held on the returned session. */
-struct l2tp_session *l2tp_session_get(const struct net *net,
- struct l2tp_tunnel *tunnel,
- u32 session_id)
+struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
+ u32 session_id)
{
struct hlist_head *session_list;
struct l2tp_session *session;
- if (!tunnel) {
- struct l2tp_net *pn = l2tp_pernet(net);
-
- session_list = l2tp_session_id_hash_2(pn, session_id);
+ session_list = l2tp_session_id_hash(tunnel, session_id);
- rcu_read_lock_bh();
- hlist_for_each_entry_rcu(session, session_list, global_hlist) {
- if (session->session_id == session_id) {
- l2tp_session_inc_refcount(session);
- rcu_read_unlock_bh();
+ read_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, session_list, hlist)
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ read_unlock_bh(&tunnel->hlist_lock);
- return session;
- }
+ return session;
}
- rcu_read_unlock_bh();
+ read_unlock_bh(&tunnel->hlist_lock);
- return NULL;
- }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
- session_list = l2tp_session_id_hash(tunnel, session_id);
- read_lock_bh(&tunnel->hlist_lock);
- hlist_for_each_entry(session, session_list, hlist) {
+struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+
+ session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, session_list, global_hlist)
if (session->session_id == session_id) {
l2tp_session_inc_refcount(session);
- read_unlock_bh(&tunnel->hlist_lock);
+ rcu_read_unlock_bh();
return session;
}
- }
- read_unlock_bh(&tunnel->hlist_lock);
+ rcu_read_unlock_bh();
return NULL;
}
@@ -322,8 +322,7 @@ int l2tp_session_register(struct l2tp_session *session,
if (tunnel->version == L2TP_HDR_VER_3) {
pn = l2tp_pernet(tunnel->l2tp_net);
- g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
- session->session_id);
+ g_head = l2tp_session_id_hash_2(pn, session->session_id);
spin_lock_bh(&pn->l2tp_session_hlist_lock);
@@ -620,7 +619,7 @@ discard:
*/
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
- int length, int (*payload_hook)(struct sk_buff *skb))
+ int length)
{
struct l2tp_tunnel *tunnel = session->tunnel;
int offset;
@@ -741,13 +740,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
__skb_pull(skb, offset);
- /* If caller wants to process the payload before we queue the
- * packet, do so now.
- */
- if (payload_hook)
- if ((*payload_hook)(skb))
- goto discard;
-
/* Prepare skb for adding to the session's reorder_q. Hold
* packets for max reorder_timeout or 1 second if not
* reordering.
@@ -783,7 +775,7 @@ EXPORT_SYMBOL(l2tp_recv_common);
/* Drop skbs from the session's reorder_q
*/
-int l2tp_session_queue_purge(struct l2tp_session *session)
+static int l2tp_session_queue_purge(struct l2tp_session *session)
{
struct sk_buff *skb = NULL;
BUG_ON(!session);
@@ -794,7 +786,6 @@ int l2tp_session_queue_purge(struct l2tp_session *session)
}
return 0;
}
-EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
* here. The skb is not on a list when we get here.
@@ -802,8 +793,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
* Returns 1 if the packet was not a good data packet and could not be
* forwarded. All such packets are passed up to userspace to deal with.
*/
-static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
- int (*payload_hook)(struct sk_buff *skb))
+static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
{
struct l2tp_session *session = NULL;
unsigned char *ptr, *optr;
@@ -882,7 +872,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
}
/* Find the session context */
- session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id);
+ session = l2tp_tunnel_get_session(tunnel, session_id);
if (!session || !session->recv_skb) {
if (session)
l2tp_session_dec_refcount(session);
@@ -894,7 +884,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
goto error;
}
- l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+ l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
l2tp_session_dec_refcount(session);
return 0;
@@ -923,7 +913,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
tunnel->name, skb->len);
- if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
+ if (l2tp_udp_recv_core(tunnel, skb))
goto pass_up;
return 0;
@@ -1009,8 +999,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
return bufp - optr;
}
-static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
- struct flowi *fl, size_t data_len)
+static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
+ struct flowi *fl, size_t data_len)
{
struct l2tp_tunnel *tunnel = session->tunnel;
unsigned int len = skb->len;
@@ -1052,8 +1042,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
atomic_long_inc(&tunnel->stats.tx_errors);
atomic_long_inc(&session->stats.tx_errors);
}
-
- return 0;
}
/* If caller requires the skb to have a ppp header, the header must be
@@ -1110,7 +1098,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
/* Get routing info from the tunnel socket */
skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
+ skb_dst_set(skb, sk_dst_check(sk, 0));
inet = inet_sk(sk);
fl = &inet->cork.fl;
@@ -1193,7 +1181,7 @@ end:
/* When the tunnel is closed, all the attached sessions need to go too.
*/
-void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
{
int hash;
struct hlist_node *walk;
@@ -1242,7 +1230,6 @@ again:
}
write_unlock_bh(&tunnel->hlist_lock);
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
/* Tunnel socket destroy hook for UDP encapsulation */
static void l2tp_udp_encap_destroy(struct sock *sk)
@@ -1687,8 +1674,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
if (cfg) {
session->pwtype = cfg->pw_type;
session->debug = cfg->debug;
- session->mtu = cfg->mtu;
- session->mru = cfg->mru;
session->send_seq = cfg->send_seq;
session->recv_seq = cfg->recv_seq;
session->lns_mode = cfg->lns_mode;
@@ -1800,4 +1785,3 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP core");
MODULE_LICENSE("GPL");
MODULE_VERSION(L2TP_DRV_VERSION);
-
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index c199020f8a8a..9c9afe94d389 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -12,6 +12,13 @@
#ifndef _L2TP_CORE_H_
#define _L2TP_CORE_H_
+#include <net/dst.h>
+#include <net/sock.h>
+
+#ifdef CONFIG_XFRM
+#include <net/xfrm.h>
+#endif
+
/* Just some random numbers */
#define L2TP_TUNNEL_MAGIC 0x42114DDA
#define L2TP_SESSION_MAGIC 0x0C04EB7D
@@ -45,10 +52,6 @@ struct l2tp_tunnel;
*/
struct l2tp_session_cfg {
enum l2tp_pwtype pw_type;
- unsigned int data_seq:2; /* data sequencing level
- * 0 => none, 1 => IP only,
- * 2 => all
- */
unsigned int recv_seq:1; /* expect receive packets with
* sequence numbers? */
unsigned int send_seq:1; /* send packets with sequence
@@ -58,7 +61,6 @@ struct l2tp_session_cfg {
* control of LNS. */
int debug; /* bitmask of debug message
* categories */
- u16 vlan_id; /* VLAN pseudowire only */
u16 l2specific_type; /* Layer 2 specific type */
u8 cookie[8]; /* optional cookie */
int cookie_len; /* 0, 4 or 8 bytes */
@@ -66,8 +68,6 @@ struct l2tp_session_cfg {
int peer_cookie_len; /* 0, 4 or 8 bytes */
int reorder_timeout; /* configured reorder timeout
* (in jiffies) */
- int mtu;
- int mru;
char *ifname;
};
@@ -99,10 +99,6 @@ struct l2tp_session {
char name[32]; /* for logging */
char ifname[IFNAMSIZ];
- unsigned int data_seq:2; /* data sequencing level
- * 0 => none, 1 => IP only,
- * 2 => all
- */
unsigned int recv_seq:1; /* expect receive packets with
* sequence numbers? */
unsigned int send_seq:1; /* send packets with sequence
@@ -115,8 +111,6 @@ struct l2tp_session {
int reorder_timeout; /* configured reorder timeout
* (in jiffies) */
int reorder_skip; /* set if skip to next nr */
- int mtu;
- int mru;
enum l2tp_pwtype pwtype;
struct l2tp_stats stats;
struct hlist_node global_hlist; /* Global hash list node */
@@ -124,9 +118,7 @@ struct l2tp_session {
int (*build_header)(struct l2tp_session *session, void *buf);
void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
void (*session_close)(struct l2tp_session *session);
-#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
void (*show)(struct seq_file *m, void *priv);
-#endif
uint8_t priv[0]; /* private data */
};
@@ -180,18 +172,12 @@ struct l2tp_tunnel {
struct net *l2tp_net; /* the net we belong to */
refcount_t ref_count;
-#ifdef CONFIG_DEBUG_FS
- void (*show)(struct seq_file *m, void *arg);
-#endif
- int (*recv_payload_hook)(struct sk_buff *skb);
void (*old_sk_destruct)(struct sock *);
struct sock *sock; /* Parent socket */
int fd; /* Parent fd, if tunnel socket
* was created by userspace */
struct work_struct del_work;
-
- uint8_t priv[0]; /* private data */
};
struct l2tp_nl_cmd_ops {
@@ -201,11 +187,6 @@ struct l2tp_nl_cmd_ops {
int (*session_delete)(struct l2tp_session *session);
};
-static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel)
-{
- return &tunnel->priv[0];
-}
-
static inline void *l2tp_session_priv(struct l2tp_session *session)
{
return &session->priv[0];
@@ -213,12 +194,12 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth);
+struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
+ u32 session_id);
void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
-struct l2tp_session *l2tp_session_get(const struct net *net,
- struct l2tp_tunnel *tunnel,
- u32 session_id);
+struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id);
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
@@ -229,7 +210,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
struct l2tp_tunnel_cfg *cfg);
-void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
struct l2tp_session *l2tp_session_create(int priv_size,
struct l2tp_tunnel *tunnel,
@@ -243,8 +223,7 @@ int l2tp_session_delete(struct l2tp_session *session);
void l2tp_session_free(struct l2tp_session *session);
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
- int length, int (*payload_hook)(struct sk_buff *skb));
-int l2tp_session_queue_purge(struct l2tp_session *session);
+ int length);
int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
void l2tp_session_set_header_len(struct l2tp_session *session, int version);
@@ -292,6 +271,36 @@ static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
}
}
+static inline u32 l2tp_tunnel_dst_mtu(const struct l2tp_tunnel *tunnel)
+{
+ struct dst_entry *dst;
+ u32 mtu;
+
+ dst = sk_dst_get(tunnel->sock);
+ if (!dst)
+ return 0;
+
+ mtu = dst_mtu(dst);
+ dst_release(dst);
+
+ return mtu;
+}
+
+#ifdef CONFIG_XFRM
+static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
+{
+ struct sock *sk = tunnel->sock;
+
+ return sk && (rcu_access_pointer(sk->sk_policy[0]) ||
+ rcu_access_pointer(sk->sk_policy[1]));
+}
+#else
+static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
+{
+ return false;
+}
+#endif
+
#define l2tp_printk(ptr, type, func, fmt, ...) \
do { \
if (((ptr)->debug) & (type)) \
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index e87686f7d63c..9821a1458555 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -177,9 +177,6 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
atomic_long_read(&tunnel->stats.rx_packets),
atomic_long_read(&tunnel->stats.rx_bytes),
atomic_long_read(&tunnel->stats.rx_errors));
-
- if (tunnel->show != NULL)
- tunnel->show(m, tunnel);
}
static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
@@ -194,12 +191,9 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
if (session->send_seq || session->recv_seq)
seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns);
seq_printf(m, " refcnt %d\n", refcount_read(&session->ref_count));
- seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n",
- session->mtu, session->mru,
+ seq_printf(m, " config 0/0/%c/%c/-/%s %08x %u\n",
session->recv_seq ? 'R' : '-',
session->send_seq ? 'S' : '-',
- session->data_seq == 1 ? "IPSEQ" :
- session->data_seq == 2 ? "DATASEQ" : "-",
session->lns_mode ? "LNS" : "LAC",
session->debug,
jiffies_to_msecs(session->reorder_timeout));
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 5c366ecfa1cb..8aadc4f3bb9e 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -199,7 +199,6 @@ static void l2tp_eth_delete(struct l2tp_session *session)
}
}
-#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
static void l2tp_eth_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
@@ -219,29 +218,25 @@ static void l2tp_eth_show(struct seq_file *m, void *arg)
dev_put(dev);
}
-#endif
static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel,
struct l2tp_session *session,
struct net_device *dev)
{
unsigned int overhead = 0;
- struct dst_entry *dst;
u32 l3_overhead = 0;
+ u32 mtu;
/* if the encap is UDP, account for UDP header size */
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
overhead += sizeof(struct udphdr);
dev->needed_headroom += sizeof(struct udphdr);
}
- if (session->mtu != 0) {
- dev->mtu = session->mtu;
- dev->needed_headroom += session->hdr_len;
- return;
- }
+
lock_sock(tunnel->sock);
l3_overhead = kernel_sock_ip_overhead(tunnel->sock);
release_sock(tunnel->sock);
+
if (l3_overhead == 0) {
/* L3 Overhead couldn't be identified, this could be
* because tunnel->sock was NULL or the socket's
@@ -255,18 +250,12 @@ static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel,
*/
overhead += session->hdr_len + ETH_HLEN + l3_overhead;
- /* If PMTU discovery was enabled, use discovered MTU on L2TP device */
- dst = sk_dst_get(tunnel->sock);
- if (dst) {
- /* dst_mtu will use PMTU if found, else fallback to intf MTU */
- u32 pmtu = dst_mtu(dst);
+ mtu = l2tp_tunnel_dst_mtu(tunnel) - overhead;
+ if (mtu < dev->min_mtu || mtu > dev->max_mtu)
+ dev->mtu = ETH_DATA_LEN - overhead;
+ else
+ dev->mtu = mtu;
- if (pmtu != 0)
- dev->mtu = pmtu;
- dst_release(dst);
- }
- session->mtu = dev->mtu - overhead;
- dev->mtu = session->mtu;
dev->needed_headroom += session->hdr_len;
}
@@ -314,9 +303,8 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
session->recv_skb = l2tp_eth_dev_recv;
session->session_close = l2tp_eth_delete;
-#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
- session->show = l2tp_eth_show;
-#endif
+ if (IS_ENABLED(CONFIG_L2TP_DEBUGFS))
+ session->show = l2tp_eth_show;
spriv = l2tp_session_priv(session);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index a9c05b2bc1b0..35f6f86d4dcc 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -144,7 +144,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_get(net, NULL, session_id);
+ session = l2tp_session_get(net, session_id);
if (!session)
goto discard;
@@ -165,7 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
- l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+ l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
l2tp_session_dec_refcount(session);
return 0;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 957369192ca1..237f1a4a0b0c 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -157,7 +157,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_get(net, NULL, session_id);
+ session = l2tp_session_get(net, session_id);
if (!session)
goto discard;
@@ -178,8 +178,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
- l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
- tunnel->recv_payload_hook);
+ l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
l2tp_session_dec_refcount(session);
return 0;
@@ -500,7 +499,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
- struct sockcm_cookie sockc_unused = {0};
struct ipcm6_cookie ipc6;
int addr_len = msg->msg_namelen;
int transhdrlen = 4; /* zero session-id */
@@ -525,9 +523,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_mark = sk->sk_mark;
fl6.flowi6_uid = sk->sk_uid;
- ipc6.hlimit = -1;
- ipc6.tclass = -1;
- ipc6.dontfrag = -1;
+ ipcm6_init(&ipc6);
if (lsa) {
if (addr_len < SIN6_LEN_RFC2133)
@@ -575,8 +571,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
opt->tot_len = sizeof(struct ipv6_txoptions);
ipc6.opt = opt;
- err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6,
- &sockc_unused);
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -641,7 +636,7 @@ back_from_confirm:
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
&fl6, (struct rt6_info *)dst,
- msg->msg_flags, &sockc_unused);
+ msg->msg_flags);
if (err)
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 5b9900889e31..edbd5d1fbcde 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -66,7 +66,7 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info)
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_get(net, tunnel_id);
if (tunnel) {
- session = l2tp_session_get(net, tunnel, session_id);
+ session = l2tp_tunnel_get_session(tunnel, session_id);
l2tp_tunnel_dec_refcount(tunnel);
}
}
@@ -560,9 +560,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
}
if (tunnel->version > 2) {
- if (info->attrs[L2TP_ATTR_DATA_SEQ])
- cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
-
if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) {
cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);
if (cfg.l2specific_type != L2TP_L2SPECTYPE_DEFAULT &&
@@ -594,9 +591,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
}
if (info->attrs[L2TP_ATTR_IFNAME])
cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
-
- if (info->attrs[L2TP_ATTR_VLAN_ID])
- cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]);
}
if (info->attrs[L2TP_ATTR_DEBUG])
@@ -614,12 +608,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
- if (info->attrs[L2TP_ATTR_MTU])
- cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
-
- if (info->attrs[L2TP_ATTR_MRU])
- cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
-
#ifdef CONFIG_MODULES
if (l2tp_nl_cmd_ops[cfg.pw_type] == NULL) {
genl_unlock();
@@ -639,7 +627,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
&cfg);
if (ret >= 0) {
- session = l2tp_session_get(net, tunnel, session_id);
+ session = l2tp_tunnel_get_session(tunnel, session_id);
if (session) {
ret = l2tp_session_notify(&l2tp_nl_family, info, session,
L2TP_CMD_SESSION_CREATE);
@@ -693,9 +681,6 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
if (info->attrs[L2TP_ATTR_DEBUG])
session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
- if (info->attrs[L2TP_ATTR_DATA_SEQ])
- session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
-
if (info->attrs[L2TP_ATTR_RECV_SEQ])
session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
@@ -710,12 +695,6 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
- if (info->attrs[L2TP_ATTR_MTU])
- session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
-
- if (info->attrs[L2TP_ATTR_MRU])
- session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
-
ret = l2tp_session_notify(&l2tp_nl_family, info,
session, L2TP_CMD_SESSION_MODIFY);
@@ -731,9 +710,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
void *hdr;
struct nlattr *nest;
struct l2tp_tunnel *tunnel = session->tunnel;
- struct sock *sk = NULL;
-
- sk = tunnel->sock;
hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
if (!hdr)
@@ -745,10 +721,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
session->peer_session_id) ||
nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
- nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
- nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
- (session->mru &&
- nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
+ nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype))
goto nla_put_failure;
if ((session->ifname[0] &&
@@ -762,10 +735,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
-#ifdef CONFIG_XFRM
- (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
+ (l2tp_tunnel_uses_xfrm(tunnel) &&
nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
-#endif
(session->reorder_timeout &&
nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT,
session->reorder_timeout, L2TP_ATTR_PAD)))
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index cf6cca260e7b..04d9946dcdba 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -93,10 +93,8 @@
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#include <net/dst.h>
#include <net/ip.h>
#include <net/udp.h>
-#include <net/xfrm.h>
#include <net/inet_common.h>
#include <asm/byteorder.h>
@@ -127,8 +125,6 @@ struct pppol2tp_session {
* PPPoX socket */
struct sock *__sk; /* Copy of .sk, for cleanup */
struct rcu_head rcu; /* For asynchronous release */
- int flags; /* accessed by PPPIOCGFLAGS.
- * Unused. */
};
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
@@ -183,25 +179,6 @@ out:
* Receive data handling
*****************************************************************************/
-static int pppol2tp_recv_payload_hook(struct sk_buff *skb)
-{
- /* Skip PPP header, if present. In testing, Microsoft L2TP clients
- * don't send the PPP header (PPP header compression enabled), but
- * other clients can include the header. So we cope with both cases
- * here. The PPP header is always FF03 when using L2TP.
- *
- * Note that skb->data[] isn't dereferenced from a u16 ptr here since
- * the field may be unaligned.
- */
- if (!pskb_may_pull(skb, 2))
- return 1;
-
- if ((skb->data[0] == PPP_ALLSTATIONS) && (skb->data[1] == PPP_UI))
- skb_pull(skb, 2);
-
- return 0;
-}
-
/* Receive message. This is the recvmsg for the PPPoL2TP socket.
*/
static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg,
@@ -248,6 +225,17 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
if (sk == NULL)
goto no_sock;
+ /* If the first two bytes are 0xFF03, consider that it is the PPP's
+ * Address and Control fields and skip them. The L2TP module has always
+ * worked this way, although, in theory, the use of these fields should
+ * be negociated and handled at the PPP layer. These fields are
+ * constant: 0xFF is the All-Stations Address and 0x03 the Unnumbered
+ * Information command with Poll/Final bit set to zero (RFC 1662).
+ */
+ if (pskb_may_pull(skb, 2) && skb->data[0] == PPP_ALLSTATIONS &&
+ skb->data[1] == PPP_UI)
+ skb_pull(skb, 2);
+
if (sk->sk_state & PPPOX_BOUND) {
struct pppox_sock *po;
@@ -424,12 +412,6 @@ static void pppol2tp_put_sk(struct rcu_head *head)
sock_put(ps->__sk);
}
-/* Called by l2tp_core when a session socket is being closed.
- */
-static void pppol2tp_session_close(struct l2tp_session *session)
-{
-}
-
/* Really kill the session socket. (Called from sock_put() if
* refcnt == 0.)
*/
@@ -551,7 +533,6 @@ out:
return error;
}
-#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
static void pppol2tp_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
@@ -565,34 +546,118 @@ static void pppol2tp_show(struct seq_file *m, void *arg)
sock_put(sk);
}
}
-#endif
static void pppol2tp_session_init(struct l2tp_session *session)
{
struct pppol2tp_session *ps;
- struct dst_entry *dst;
session->recv_skb = pppol2tp_recv;
- session->session_close = pppol2tp_session_close;
-#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
- session->show = pppol2tp_show;
-#endif
+ if (IS_ENABLED(CONFIG_L2TP_DEBUGFS))
+ session->show = pppol2tp_show;
ps = l2tp_session_priv(session);
mutex_init(&ps->sk_lock);
ps->owner = current->pid;
+}
+
+struct l2tp_connect_info {
+ u8 version;
+ int fd;
+ u32 tunnel_id;
+ u32 peer_tunnel_id;
+ u32 session_id;
+ u32 peer_session_id;
+};
+
+static int pppol2tp_sockaddr_get_info(const void *sa, int sa_len,
+ struct l2tp_connect_info *info)
+{
+ switch (sa_len) {
+ case sizeof(struct sockaddr_pppol2tp):
+ {
+ const struct sockaddr_pppol2tp *sa_v2in4 = sa;
- /* If PMTU discovery was enabled, use the MTU that was discovered */
- dst = sk_dst_get(session->tunnel->sock);
- if (dst) {
- u32 pmtu = dst_mtu(dst);
+ if (sa_v2in4->sa_protocol != PX_PROTO_OL2TP)
+ return -EINVAL;
- if (pmtu) {
- session->mtu = pmtu - PPPOL2TP_HEADER_OVERHEAD;
- session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD;
- }
- dst_release(dst);
+ info->version = 2;
+ info->fd = sa_v2in4->pppol2tp.fd;
+ info->tunnel_id = sa_v2in4->pppol2tp.s_tunnel;
+ info->peer_tunnel_id = sa_v2in4->pppol2tp.d_tunnel;
+ info->session_id = sa_v2in4->pppol2tp.s_session;
+ info->peer_session_id = sa_v2in4->pppol2tp.d_session;
+
+ break;
}
+ case sizeof(struct sockaddr_pppol2tpv3):
+ {
+ const struct sockaddr_pppol2tpv3 *sa_v3in4 = sa;
+
+ if (sa_v3in4->sa_protocol != PX_PROTO_OL2TP)
+ return -EINVAL;
+
+ info->version = 3;
+ info->fd = sa_v3in4->pppol2tp.fd;
+ info->tunnel_id = sa_v3in4->pppol2tp.s_tunnel;
+ info->peer_tunnel_id = sa_v3in4->pppol2tp.d_tunnel;
+ info->session_id = sa_v3in4->pppol2tp.s_session;
+ info->peer_session_id = sa_v3in4->pppol2tp.d_session;
+
+ break;
+ }
+ case sizeof(struct sockaddr_pppol2tpin6):
+ {
+ const struct sockaddr_pppol2tpin6 *sa_v2in6 = sa;
+
+ if (sa_v2in6->sa_protocol != PX_PROTO_OL2TP)
+ return -EINVAL;
+
+ info->version = 2;
+ info->fd = sa_v2in6->pppol2tp.fd;
+ info->tunnel_id = sa_v2in6->pppol2tp.s_tunnel;
+ info->peer_tunnel_id = sa_v2in6->pppol2tp.d_tunnel;
+ info->session_id = sa_v2in6->pppol2tp.s_session;
+ info->peer_session_id = sa_v2in6->pppol2tp.d_session;
+
+ break;
+ }
+ case sizeof(struct sockaddr_pppol2tpv3in6):
+ {
+ const struct sockaddr_pppol2tpv3in6 *sa_v3in6 = sa;
+
+ if (sa_v3in6->sa_protocol != PX_PROTO_OL2TP)
+ return -EINVAL;
+
+ info->version = 3;
+ info->fd = sa_v3in6->pppol2tp.fd;
+ info->tunnel_id = sa_v3in6->pppol2tp.s_tunnel;
+ info->peer_tunnel_id = sa_v3in6->pppol2tp.d_tunnel;
+ info->session_id = sa_v3in6->pppol2tp.s_session;
+ info->peer_session_id = sa_v3in6->pppol2tp.d_session;
+
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Rough estimation of the maximum payload size a tunnel can transmit without
+ * fragmenting at the lower IP layer. Assumes L2TPv2 with sequence
+ * numbers and no IP option. Not quite accurate, but the result is mostly
+ * unused anyway.
+ */
+static int pppol2tp_tunnel_mtu(const struct l2tp_tunnel *tunnel)
+{
+ int mtu;
+
+ mtu = l2tp_tunnel_dst_mtu(tunnel);
+ if (mtu <= PPPOL2TP_HEADER_OVERHEAD)
+ return 1500 - PPPOL2TP_HEADER_OVERHEAD;
+
+ return mtu - PPPOL2TP_HEADER_OVERHEAD;
}
/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
@@ -601,34 +666,23 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
- struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
struct l2tp_session *session = NULL;
+ struct l2tp_connect_info info;
struct l2tp_tunnel *tunnel;
struct pppol2tp_session *ps;
struct l2tp_session_cfg cfg = { 0, };
- int error = 0;
- u32 tunnel_id, peer_tunnel_id;
- u32 session_id, peer_session_id;
bool drop_refcnt = false;
bool drop_tunnel = false;
bool new_session = false;
bool new_tunnel = false;
- int ver = 2;
- int fd;
-
- lock_sock(sk);
-
- error = -EINVAL;
+ int error;
- if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
- sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
- sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
- sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
- goto end;
+ error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info);
+ if (error < 0)
+ return error;
- if (sp->sa_protocol != PX_PROTO_OL2TP)
- goto end;
+ lock_sock(sk);
/* Check for already bound sockets */
error = -EBUSY;
@@ -640,56 +694,12 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
if (sk->sk_user_data)
goto end; /* socket is already attached */
- /* Get params from socket address. Handle L2TPv2 and L2TPv3.
- * This is nasty because there are different sockaddr_pppol2tp
- * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use
- * the sockaddr size to determine which structure the caller
- * is using.
- */
- peer_tunnel_id = 0;
- if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
- fd = sp->pppol2tp.fd;
- tunnel_id = sp->pppol2tp.s_tunnel;
- peer_tunnel_id = sp->pppol2tp.d_tunnel;
- session_id = sp->pppol2tp.s_session;
- peer_session_id = sp->pppol2tp.d_session;
- } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
- struct sockaddr_pppol2tpv3 *sp3 =
- (struct sockaddr_pppol2tpv3 *) sp;
- ver = 3;
- fd = sp3->pppol2tp.fd;
- tunnel_id = sp3->pppol2tp.s_tunnel;
- peer_tunnel_id = sp3->pppol2tp.d_tunnel;
- session_id = sp3->pppol2tp.s_session;
- peer_session_id = sp3->pppol2tp.d_session;
- } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) {
- struct sockaddr_pppol2tpin6 *sp6 =
- (struct sockaddr_pppol2tpin6 *) sp;
- fd = sp6->pppol2tp.fd;
- tunnel_id = sp6->pppol2tp.s_tunnel;
- peer_tunnel_id = sp6->pppol2tp.d_tunnel;
- session_id = sp6->pppol2tp.s_session;
- peer_session_id = sp6->pppol2tp.d_session;
- } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) {
- struct sockaddr_pppol2tpv3in6 *sp6 =
- (struct sockaddr_pppol2tpv3in6 *) sp;
- ver = 3;
- fd = sp6->pppol2tp.fd;
- tunnel_id = sp6->pppol2tp.s_tunnel;
- peer_tunnel_id = sp6->pppol2tp.d_tunnel;
- session_id = sp6->pppol2tp.s_session;
- peer_session_id = sp6->pppol2tp.d_session;
- } else {
- error = -EINVAL;
- goto end; /* bad socket address */
- }
-
/* Don't bind if tunnel_id is 0 */
error = -EINVAL;
- if (tunnel_id == 0)
+ if (!info.tunnel_id)
goto end;
- tunnel = l2tp_tunnel_get(sock_net(sk), tunnel_id);
+ tunnel = l2tp_tunnel_get(sock_net(sk), info.tunnel_id);
if (tunnel)
drop_tunnel = true;
@@ -697,7 +707,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
* peer_session_id is 0. Otherwise look up tunnel using supplied
* tunnel id.
*/
- if ((session_id == 0) && (peer_session_id == 0)) {
+ if (!info.session_id && !info.peer_session_id) {
if (tunnel == NULL) {
struct l2tp_tunnel_cfg tcfg = {
.encap = L2TP_ENCAPTYPE_UDP,
@@ -707,12 +717,16 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
/* Prevent l2tp_tunnel_register() from trying to set up
* a kernel socket.
*/
- if (fd < 0) {
+ if (info.fd < 0) {
error = -EBADF;
goto end;
}
- error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
+ error = l2tp_tunnel_create(sock_net(sk), info.fd,
+ info.version,
+ info.tunnel_id,
+ info.peer_tunnel_id, &tcfg,
+ &tunnel);
if (error < 0)
goto end;
@@ -737,13 +751,10 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
goto end;
}
- if (tunnel->recv_payload_hook == NULL)
- tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
-
if (tunnel->peer_tunnel_id == 0)
- tunnel->peer_tunnel_id = peer_tunnel_id;
+ tunnel->peer_tunnel_id = info.peer_tunnel_id;
- session = l2tp_session_get(sock_net(sk), tunnel, session_id);
+ session = l2tp_tunnel_get_session(tunnel, info.session_id);
if (session) {
drop_refcnt = true;
@@ -766,14 +777,11 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
goto end;
}
} else {
- /* Default MTU must allow space for UDP/L2TP/PPP headers */
- cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
- cfg.mru = cfg.mtu;
cfg.pw_type = L2TP_PWTYPE_PPP;
session = l2tp_session_create(sizeof(struct pppol2tp_session),
- tunnel, session_id,
- peer_session_id, &cfg);
+ tunnel, info.session_id,
+ info.peer_session_id, &cfg);
if (IS_ERR(session)) {
error = PTR_ERR(session);
goto end;
@@ -813,7 +821,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.private = sk;
po->chan.ops = &pppol2tp_chan_ops;
- po->chan.mtu = session->mtu;
+ po->chan.mtu = pppol2tp_tunnel_mtu(tunnel);
error = ppp_register_net_channel(sock_net(sk), &po->chan);
if (error) {
@@ -869,12 +877,6 @@ static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel,
goto err;
}
- /* Default MTU values. */
- if (cfg->mtu == 0)
- cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
- if (cfg->mru == 0)
- cfg->mru = cfg->mtu;
-
/* Allocate and initialize a new session context. */
session = l2tp_session_create(sizeof(struct pppol2tp_session),
tunnel, session_id,
@@ -1021,8 +1023,10 @@ end:
****************************************************************************/
static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
- struct l2tp_stats *stats)
+ const struct l2tp_stats *stats)
{
+ memset(dest, 0, sizeof(*dest));
+
dest->tx_packets = atomic_long_read(&stats->tx_packets);
dest->tx_bytes = atomic_long_read(&stats->tx_bytes);
dest->tx_errors = atomic_long_read(&stats->tx_errors);
@@ -1033,256 +1037,107 @@ static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
dest->rx_errors = atomic_long_read(&stats->rx_errors);
}
-/* Session ioctl helper.
- */
-static int pppol2tp_session_ioctl(struct l2tp_session *session,
- unsigned int cmd, unsigned long arg)
+static int pppol2tp_tunnel_copy_stats(struct pppol2tp_ioc_stats *stats,
+ struct l2tp_tunnel *tunnel)
{
- struct ifreq ifr;
- int err = 0;
- struct sock *sk;
- int val = (int) arg;
- struct pppol2tp_session *ps = l2tp_session_priv(session);
- struct l2tp_tunnel *tunnel = session->tunnel;
- struct pppol2tp_ioc_stats stats;
+ struct l2tp_session *session;
- l2tp_dbg(session, L2TP_MSG_CONTROL,
- "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
- session->name, cmd, arg);
+ if (!stats->session_id) {
+ pppol2tp_copy_stats(stats, &tunnel->stats);
+ return 0;
+ }
- sk = pppol2tp_session_get_sock(session);
- if (!sk)
+ /* If session_id is set, search the corresponding session in the
+ * context of this tunnel and record the session's statistics.
+ */
+ session = l2tp_tunnel_get_session(tunnel, stats->session_id);
+ if (!session)
return -EBADR;
- switch (cmd) {
- case SIOCGIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
- ifr.ifr_mtu = session->mtu;
- if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
- break;
-
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mtu=%d\n",
- session->name, session->mtu);
- err = 0;
- break;
-
- case SIOCSIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
+ if (session->pwtype != L2TP_PWTYPE_PPP) {
+ l2tp_session_dec_refcount(session);
+ return -EBADR;
+ }
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
+ pppol2tp_copy_stats(stats, &session->stats);
+ l2tp_session_dec_refcount(session);
- session->mtu = ifr.ifr_mtu;
+ return 0;
+}
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mtu=%d\n",
- session->name, session->mtu);
- err = 0;
- break;
+static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct pppol2tp_ioc_stats stats;
+ struct l2tp_session *session;
+ int val;
+ switch (cmd) {
case PPPIOCGMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
+ case PPPIOCGFLAGS:
+ session = sock->sk->sk_user_data;
+ if (!session)
+ return -ENOTCONN;
- err = -EFAULT;
- if (put_user(session->mru, (int __user *) arg))
- break;
+ /* Not defined for tunnels */
+ if (!session->session_id && !session->peer_session_id)
+ return -ENOSYS;
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mru=%d\n",
- session->name, session->mru);
- err = 0;
+ if (put_user(0, (int __user *)arg))
+ return -EFAULT;
break;
case PPPIOCSMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (get_user(val, (int __user *) arg))
- break;
-
- session->mru = val;
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mru=%d\n",
- session->name, session->mru);
- err = 0;
- break;
-
- case PPPIOCGFLAGS:
- err = -EFAULT;
- if (put_user(ps->flags, (int __user *) arg))
- break;
-
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: get flags=%d\n",
- session->name, ps->flags);
- err = 0;
- break;
-
case PPPIOCSFLAGS:
- err = -EFAULT;
- if (get_user(val, (int __user *) arg))
- break;
- ps->flags = val;
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: set flags=%d\n",
- session->name, ps->flags);
- err = 0;
- break;
-
- case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
+ session = sock->sk->sk_user_data;
+ if (!session)
+ return -ENOTCONN;
- memset(&stats, 0, sizeof(stats));
- stats.tunnel_id = tunnel->tunnel_id;
- stats.session_id = session->session_id;
- pppol2tp_copy_stats(&stats, &session->stats);
- if (copy_to_user((void __user *) arg, &stats,
- sizeof(stats)))
- break;
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: get L2TP stats\n",
- session->name);
- err = 0;
- break;
+ /* Not defined for tunnels */
+ if (!session->session_id && !session->peer_session_id)
+ return -ENOSYS;
- default:
- err = -ENOSYS;
+ if (get_user(val, (int __user *)arg))
+ return -EFAULT;
break;
- }
- sock_put(sk);
-
- return err;
-}
-
-/* Tunnel ioctl helper.
- *
- * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
- * specifies a session_id, the session ioctl handler is called. This allows an
- * application to retrieve session stats via a tunnel socket.
- */
-static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
- unsigned int cmd, unsigned long arg)
-{
- int err = 0;
- struct sock *sk;
- struct pppol2tp_ioc_stats stats;
-
- l2tp_dbg(tunnel, L2TP_MSG_CONTROL,
- "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
- tunnel->name, cmd, arg);
-
- sk = tunnel->sock;
- sock_hold(sk);
-
- switch (cmd) {
case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- if (copy_from_user(&stats, (void __user *) arg,
- sizeof(stats))) {
- err = -EFAULT;
- break;
+ session = sock->sk->sk_user_data;
+ if (!session)
+ return -ENOTCONN;
+
+ /* Session 0 represents the parent tunnel */
+ if (!session->session_id && !session->peer_session_id) {
+ u32 session_id;
+ int err;
+
+ if (copy_from_user(&stats, (void __user *)arg,
+ sizeof(stats)))
+ return -EFAULT;
+
+ session_id = stats.session_id;
+ err = pppol2tp_tunnel_copy_stats(&stats,
+ session->tunnel);
+ if (err < 0)
+ return err;
+
+ stats.session_id = session_id;
+ } else {
+ pppol2tp_copy_stats(&stats, &session->stats);
+ stats.session_id = session->session_id;
}
- if (stats.session_id != 0) {
- /* resend to session ioctl handler */
- struct l2tp_session *session =
- l2tp_session_get(sock_net(sk), tunnel,
- stats.session_id);
-
- if (!session) {
- err = -EBADR;
- break;
- }
- if (session->pwtype != L2TP_PWTYPE_PPP) {
- l2tp_session_dec_refcount(session);
- err = -EBADR;
- break;
- }
+ stats.tunnel_id = session->tunnel->tunnel_id;
+ stats.using_ipsec = l2tp_tunnel_uses_xfrm(session->tunnel);
- err = pppol2tp_session_ioctl(session, cmd, arg);
- l2tp_session_dec_refcount(session);
- break;
- }
-#ifdef CONFIG_XFRM
- stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
-#endif
- pppol2tp_copy_stats(&stats, &tunnel->stats);
- if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) {
- err = -EFAULT;
- break;
- }
- l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get L2TP stats\n",
- tunnel->name);
- err = 0;
+ if (copy_to_user((void __user *)arg, &stats, sizeof(stats)))
+ return -EFAULT;
break;
default:
- err = -ENOSYS;
- break;
+ return -ENOIOCTLCMD;
}
- sock_put(sk);
-
- return err;
-}
-
-/* Main ioctl() handler.
- * Dispatch to tunnel or session helpers depending on the socket.
- */
-static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- struct sock *sk = sock->sk;
- struct l2tp_session *session;
- struct l2tp_tunnel *tunnel;
- int err;
-
- if (!sk)
- return 0;
-
- err = -EBADF;
- if (sock_flag(sk, SOCK_DEAD) != 0)
- goto end;
-
- err = -ENOTCONN;
- if ((sk->sk_user_data == NULL) ||
- (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
- goto end;
-
- /* Get session context from the socket */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session's session_id is zero, treat ioctl as a
- * tunnel ioctl
- */
- if ((session->session_id == 0) &&
- (session->peer_session_id == 0)) {
- tunnel = session->tunnel;
- err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
- goto end_put_sess;
- }
-
- err = pppol2tp_session_ioctl(session, cmd, arg);
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
+ return 0;
}
/*****************************************************************************
@@ -1722,8 +1577,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
tunnel->peer_tunnel_id,
session->peer_session_id,
state, user_data_ok);
- seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
- session->mtu, session->mru,
+ seq_printf(m, " 0/0/%c/%c/%s %08x %u\n",
session->recv_seq ? 'R' : '-',
session->send_seq ? 'S' : '-',
session->lns_mode ? "LNS" : "LAC",
diff --git a/net/llc/Kconfig b/net/llc/Kconfig
index b91c65108162..176a6c1521a5 100644
--- a/net/llc/Kconfig
+++ b/net/llc/Kconfig
@@ -6,5 +6,5 @@ config LLC2
tristate "ANSI/IEEE 802.2 LLC type 2 Support"
select LLC
help
- This is a Logical Link Layer type 2, connection oriented support.
+ This is a Logical Link Layer type 2, connection oriented support.
Select this if you want to have support for PF_LLC sockets.
diff --git a/net/llc/Makefile b/net/llc/Makefile
index 4e260cff3c5d..5e0ef436daae 100644
--- a/net/llc/Makefile
+++ b/net/llc/Makefile
@@ -4,7 +4,7 @@
# Copyright (c) 1997 by Procom Technology,Inc.
# 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
#
-# This program can be redistributed or modified under the terms of the
+# This program can be redistributed or modified under the terms of the
# GNU General Public License as published by the Free Software Foundation.
# This program is distributed without any warranty or implied warranty
# of merchantability or fitness for a particular purpose.
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index 6daf391b3e84..8db03c2d5440 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -151,4 +151,3 @@ out:
sock_put(sk);
return rc;
}
-
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index e3589ade62e0..bb707789ef2b 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -12,6 +12,7 @@ mac80211-y := \
scan.o offchannel.o \
ht.o agg-tx.o agg-rx.o \
vht.o \
+ he.o \
ibss.o \
iface.o \
rate.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index e83c19d4c292..6a4f154c99f6 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -245,6 +245,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
};
int i, ret = -EOPNOTSUPP;
u16 status = WLAN_STATUS_REQUEST_DECLINED;
+ u16 max_buf_size;
if (tid >= IEEE80211_FIRST_TSPEC_TSID) {
ht_dbg(sta->sdata,
@@ -268,13 +269,18 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
goto end;
}
+ if (sta->sta.he_cap.has_he)
+ max_buf_size = IEEE80211_MAX_AMPDU_BUF;
+ else
+ max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
+
/* sanity check for incoming parameters:
* check if configuration can support the BA policy
* and if buffer size does not exceeds max value */
/* XXX: check own ht delayed BA capability?? */
if (((ba_policy != 1) &&
(!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
- (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
+ (buf_size > max_buf_size)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
ht_dbg_ratelimited(sta->sdata,
"AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
@@ -283,7 +289,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
}
/* determine default buffer size */
if (buf_size == 0)
- buf_size = IEEE80211_MAX_AMPDU_BUF;
+ buf_size = max_buf_size;
/* make sure the size doesn't exceed the maximum supported by the hw */
if (buf_size > sta->sta.max_rx_aggregation_subframes)
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index ac4295296514..69e831bc317b 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -463,6 +463,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
.timeout = 0,
};
int ret;
+ u16 buf_size;
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
@@ -511,11 +512,22 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
sta->ampdu_mlme.addba_req_num[tid]++;
spin_unlock_bh(&sta->lock);
+ if (sta->sta.he_cap.has_he) {
+ buf_size = local->hw.max_tx_aggregation_subframes;
+ } else {
+ /*
+ * We really should use what the driver told us it will
+ * transmit as the maximum, but certain APs (e.g. the
+ * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012)
+ * will crash when we use a lower number.
+ */
+ buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
+ }
+
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
tid_tx->dialog_token, params.ssn,
- IEEE80211_MAX_AMPDU_BUF,
- tid_tx->timeout);
+ buf_size, tid_tx->timeout);
}
/*
@@ -905,8 +917,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
{
struct tid_ampdu_tx *tid_tx;
struct ieee80211_txq *txq;
- u16 capab, tid;
- u8 buf_size;
+ u16 capab, tid, buf_size;
bool amsdu;
capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index bdf6fa78d0d2..d25da0e66da1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -495,7 +495,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
goto out_unlock;
}
- ieee80211_key_free(key, true);
+ ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION);
ret = 0;
out_unlock:
@@ -1412,6 +1412,11 @@ static int sta_apply_parameters(struct ieee80211_local *local,
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
params->vht_capa, sta);
+ if (params->he_capa)
+ ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
+ (void *)params->he_capa,
+ params->he_capa_len, sta);
+
if (params->opmode_notif_used) {
/* returned value is only needed for rc update, but the
* rc isn't initialized here yet, so ignore it
@@ -3486,7 +3491,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
}
local_bh_disable();
- ieee80211_xmit(sdata, sta, skb);
+ ieee80211_xmit(sdata, sta, skb, 0);
local_bh_enable();
ret = 0;
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 690c142a7a44..5ac743816b59 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -116,16 +116,16 @@ static void ieee80211_get_stats(struct net_device *dev,
data[i++] = sta->sta_state;
- if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))
+ if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))
data[i] = 100000ULL *
cfg80211_calculate_bitrate(&sinfo.txrate);
i++;
- if (sinfo.filled & BIT(NL80211_STA_INFO_RX_BITRATE))
+ if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))
data[i] = 100000ULL *
cfg80211_calculate_bitrate(&sinfo.rxrate);
i++;
- if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))
+ if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))
data[i] = (u8)sinfo.signal_avg;
i++;
} else {
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
new file mode 100644
index 000000000000..769078ed5a12
--- /dev/null
+++ b/net/mac80211/he.c
@@ -0,0 +1,55 @@
+/*
+ * HE handling
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ieee80211_i.h"
+
+void
+ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const u8 *he_cap_ie, u8 he_cap_len,
+ struct sta_info *sta)
+{
+ struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
+ struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
+ u8 he_ppe_size;
+ u8 mcs_nss_size;
+ u8 he_total_size;
+
+ memset(he_cap, 0, sizeof(*he_cap));
+
+ if (!he_cap_ie || !ieee80211_get_he_sta_cap(sband))
+ return;
+
+ /* Make sure size is OK */
+ mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+ he_ppe_size =
+ ieee80211_he_ppe_size(he_cap_ie[sizeof(he_cap->he_cap_elem) +
+ mcs_nss_size],
+ he_cap_ie_elem->phy_cap_info);
+ he_total_size = sizeof(he_cap->he_cap_elem) + mcs_nss_size +
+ he_ppe_size;
+ if (he_cap_len < he_total_size)
+ return;
+
+ memcpy(&he_cap->he_cap_elem, he_cap_ie, sizeof(he_cap->he_cap_elem));
+
+ /* HE Tx/Rx HE MCS NSS Support Field */
+ memcpy(&he_cap->he_mcs_nss_supp,
+ &he_cap_ie[sizeof(he_cap->he_cap_elem)], mcs_nss_size);
+
+ /* Check if there are (optional) PPE Thresholds */
+ if (he_cap->he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
+ memcpy(he_cap->ppe_thres,
+ &he_cap_ie[sizeof(he_cap->he_cap_elem) + mcs_nss_size],
+ he_ppe_size);
+
+ he_cap->has_he = true;
+}
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 26a7ba3b698f..f849ea814993 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -352,7 +352,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
test_and_clear_bit(tid,
sta->ampdu_mlme.tid_rx_manage_offl))
___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
- IEEE80211_MAX_AMPDU_BUF,
+ IEEE80211_MAX_AMPDU_BUF_HT,
false, true);
if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d1978aa1c15d..172aeae21ae9 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -165,6 +165,7 @@ typedef unsigned __bitwise ieee80211_tx_result;
#define TX_DROP ((__force ieee80211_tx_result) 1u)
#define TX_QUEUED ((__force ieee80211_tx_result) 2u)
+#define IEEE80211_TX_NO_SEQNO BIT(0)
#define IEEE80211_TX_UNICAST BIT(1)
#define IEEE80211_TX_PS_BUFFERED BIT(2)
@@ -364,6 +365,7 @@ enum ieee80211_sta_flags {
IEEE80211_STA_DISABLE_160MHZ = BIT(13),
IEEE80211_STA_DISABLE_WMM = BIT(14),
IEEE80211_STA_ENABLE_RRM = BIT(15),
+ IEEE80211_STA_DISABLE_HE = BIT(16),
};
struct ieee80211_mgd_auth_data {
@@ -1453,6 +1455,10 @@ struct ieee802_11_elems {
const struct ieee80211_vht_cap *vht_cap_elem;
const struct ieee80211_vht_operation *vht_operation;
const struct ieee80211_meshconf_ie *mesh_config;
+ const u8 *he_cap;
+ const struct ieee80211_he_operation *he_operation;
+ const struct ieee80211_mu_edca_param_set *mu_edca_param_set;
+ const u8 *uora_element;
const u8 *mesh_id;
const u8 *peering;
const __le16 *awake_window;
@@ -1482,6 +1488,7 @@ struct ieee802_11_elems {
u8 ext_supp_rates_len;
u8 wmm_info_len;
u8 wmm_param_len;
+ u8 he_cap_len;
u8 mesh_id_len;
u8 peering_len;
u8 preq_len;
@@ -1824,6 +1831,13 @@ void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
enum nl80211_chan_width
ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta);
+/* HE */
+void
+ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const u8 *he_cap_ie, u8 he_cap_len,
+ struct sta_info *sta);
+
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
@@ -1880,19 +1894,20 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
bool bss_notify, bool enable_qos);
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, struct sk_buff *skb);
+ struct sta_info *sta, struct sk_buff *skb,
+ u32 txdata_flags);
void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid,
- enum nl80211_band band);
+ enum nl80211_band band, u32 txdata_flags);
static inline void
ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid,
- enum nl80211_band band)
+ enum nl80211_band band, u32 txdata_flags)
{
rcu_read_lock();
- __ieee80211_tx_skb_tid_band(sdata, skb, tid, band);
+ __ieee80211_tx_skb_tid_band(sdata, skb, tid, band, txdata_flags);
rcu_read_unlock();
}
@@ -1910,7 +1925,7 @@ static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
}
__ieee80211_tx_skb_tid_band(sdata, skb, tid,
- chanctx_conf->def.chan->band);
+ chanctx_conf->def.chan->band, 0);
rcu_read_unlock();
}
@@ -2031,26 +2046,27 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, u16 stype, u16 reason,
bool send_frame, u8 *frame_buf);
+
+enum {
+ IEEE80211_PROBE_FLAG_DIRECTED = BIT(0),
+ IEEE80211_PROBE_FLAG_MIN_CONTENT = BIT(1),
+ IEEE80211_PROBE_FLAG_RANDOM_SN = BIT(2),
+};
+
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
size_t buffer_len,
struct ieee80211_scan_ies *ie_desc,
const u8 *ie, size_t ie_len,
u8 bands_used, u32 *rate_masks,
- struct cfg80211_chan_def *chandef);
+ struct cfg80211_chan_def *chandef,
+ u32 flags);
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
const u8 *src, const u8 *dst,
u32 ratemask,
struct ieee80211_channel *chan,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
- bool directed);
-void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata,
- const u8 *src, const u8 *dst,
- const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len,
- u32 ratemask, bool directed, u32 tx_flags,
- struct ieee80211_channel *channel, bool scan);
-
+ u32 flags);
u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band band, u32 *basic_rates);
@@ -2073,6 +2089,9 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
const struct cfg80211_chan_def *chandef);
+u8 *ieee80211_ie_build_he_cap(u8 *pos,
+ const struct ieee80211_sta_he_cap *he_cap,
+ u8 *end);
int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
const struct ieee80211_supported_band *sband,
const u8 *srates, int srates_len, u32 *rates);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 555e389b7dfa..5e6cf2cee965 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1130,7 +1130,7 @@ static void ieee80211_uninit(struct net_device *dev)
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
@@ -1176,7 +1176,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ee0d0cc8dc3b..c054ac85793c 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -656,11 +656,15 @@ int ieee80211_key_link(struct ieee80211_key *key,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_key *old_key;
- int idx, ret;
- bool pairwise;
-
- pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
- idx = key->conf.keyidx;
+ int idx = key->conf.keyidx;
+ bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ /*
+ * We want to delay tailroom updates only for station - in that
+ * case it helps roaming speed, but in other cases it hurts and
+ * can cause warnings to appear.
+ */
+ bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION;
+ int ret;
mutex_lock(&sdata->local->key_mtx);
@@ -688,14 +692,14 @@ int ieee80211_key_link(struct ieee80211_key *key,
increment_tailroom_need_count(sdata);
ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
- ieee80211_key_destroy(old_key, true);
+ ieee80211_key_destroy(old_key, delay_tailroom);
ieee80211_debugfs_key_add(key);
if (!local->wowlan) {
ret = ieee80211_key_enable_hw_accel(key);
if (ret)
- ieee80211_key_free(key, true);
+ ieee80211_key_free(key, delay_tailroom);
} else {
ret = 0;
}
@@ -930,7 +934,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
ieee80211_key_replace(key->sdata, key->sta,
key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
key, NULL);
- __ieee80211_key_destroy(key, true);
+ __ieee80211_key_destroy(key, key->sdata->vif.type ==
+ NL80211_IFTYPE_STATION);
}
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
@@ -940,7 +945,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
ieee80211_key_replace(key->sdata, key->sta,
key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
key, NULL);
- __ieee80211_key_destroy(key, true);
+ __ieee80211_key_destroy(key, key->sdata->vif.type ==
+ NL80211_IFTYPE_STATION);
}
mutex_unlock(&local->key_mtx);
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index ba0b507ea691..d6c66fc19716 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -52,13 +52,15 @@ void ieee80211_free_led_names(struct ieee80211_local *local)
kfree(local->radio_led.name);
}
-static void ieee80211_tx_led_activate(struct led_classdev *led_cdev)
+static int ieee80211_tx_led_activate(struct led_classdev *led_cdev)
{
struct ieee80211_local *local = container_of(led_cdev->trigger,
struct ieee80211_local,
tx_led);
atomic_inc(&local->tx_led_active);
+
+ return 0;
}
static void ieee80211_tx_led_deactivate(struct led_classdev *led_cdev)
@@ -70,13 +72,15 @@ static void ieee80211_tx_led_deactivate(struct led_classdev *led_cdev)
atomic_dec(&local->tx_led_active);
}
-static void ieee80211_rx_led_activate(struct led_classdev *led_cdev)
+static int ieee80211_rx_led_activate(struct led_classdev *led_cdev)
{
struct ieee80211_local *local = container_of(led_cdev->trigger,
struct ieee80211_local,
rx_led);
atomic_inc(&local->rx_led_active);
+
+ return 0;
}
static void ieee80211_rx_led_deactivate(struct led_classdev *led_cdev)
@@ -88,13 +92,15 @@ static void ieee80211_rx_led_deactivate(struct led_classdev *led_cdev)
atomic_dec(&local->rx_led_active);
}
-static void ieee80211_assoc_led_activate(struct led_classdev *led_cdev)
+static int ieee80211_assoc_led_activate(struct led_classdev *led_cdev)
{
struct ieee80211_local *local = container_of(led_cdev->trigger,
struct ieee80211_local,
assoc_led);
atomic_inc(&local->assoc_led_active);
+
+ return 0;
}
static void ieee80211_assoc_led_deactivate(struct led_classdev *led_cdev)
@@ -106,13 +112,15 @@ static void ieee80211_assoc_led_deactivate(struct led_classdev *led_cdev)
atomic_dec(&local->assoc_led_active);
}
-static void ieee80211_radio_led_activate(struct led_classdev *led_cdev)
+static int ieee80211_radio_led_activate(struct led_classdev *led_cdev)
{
struct ieee80211_local *local = container_of(led_cdev->trigger,
struct ieee80211_local,
radio_led);
atomic_inc(&local->radio_led_active);
+
+ return 0;
}
static void ieee80211_radio_led_deactivate(struct led_classdev *led_cdev)
@@ -124,13 +132,15 @@ static void ieee80211_radio_led_deactivate(struct led_classdev *led_cdev)
atomic_dec(&local->radio_led_active);
}
-static void ieee80211_tpt_led_activate(struct led_classdev *led_cdev)
+static int ieee80211_tpt_led_activate(struct led_classdev *led_cdev)
{
struct ieee80211_local *local = container_of(led_cdev->trigger,
struct ieee80211_local,
tpt_led);
atomic_inc(&local->tpt_led_active);
+
+ return 0;
}
static void ieee80211_tpt_led_deactivate(struct led_classdev *led_cdev)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index fb73451ed85e..4fb2709cb527 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -3,6 +3,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -557,10 +558,19 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211);
- if (!ops->hw_scan)
+ if (!ops->hw_scan) {
wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_AP_SCAN;
-
+ /*
+ * if the driver behaves correctly using the probe request
+ * (template) from mac80211, then both of these should be
+ * supported even with hw scan - but let drivers opt in.
+ */
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
+ }
if (!ops->set_key)
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -588,8 +598,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local->hw.queues = 1;
local->hw.max_rates = 1;
local->hw.max_report_rates = 0;
- local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
- local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT;
+ local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT;
local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
@@ -816,7 +826,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
int result, i;
enum nl80211_band band;
int channels, max_bitrates;
- bool supp_ht, supp_vht;
+ bool supp_ht, supp_vht, supp_he;
netdev_features_t feature_whitelist;
struct cfg80211_chan_def dflt_chandef = {};
@@ -896,6 +906,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
max_bitrates = 0;
supp_ht = false;
supp_vht = false;
+ supp_he = false;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband;
@@ -922,6 +933,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
+ if (!supp_he)
+ supp_he = !!ieee80211_get_he_sta_cap(sband);
+
if (!sband->ht_cap.ht_supported)
continue;
@@ -1011,6 +1025,18 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->scan_ies_len +=
2 + sizeof(struct ieee80211_vht_cap);
+ /* HE cap element is variable in size - set len to allow max size */
+ /*
+ * TODO: 1 is added at the end of the calculation to accommodate for
+ * the temporary placing of the HE capabilities IE under EXT.
+ * Remove it once it is placed in the final place.
+ */
+ if (supp_he)
+ local->scan_ies_len +=
+ 2 + sizeof(struct ieee80211_he_cap_elem) +
+ sizeof(struct ieee80211_he_mcs_nss_supp) +
+ IEEE80211_HE_PPE_THRES_MAX_LEN + 1;
+
if (!local->ops->hw_scan) {
/* For hw_scan, driver needs to set these up. */
local->hw.wiphy->max_scan_ssids = 4;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a59187c016e0..7fb9957359a3 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -149,6 +149,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
+ const struct ieee80211_he_operation *he_oper,
struct cfg80211_chan_def *chandef, bool tracking)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -207,7 +208,27 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
}
vht_chandef = *chandef;
- if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) {
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && he_oper &&
+ (le32_to_cpu(he_oper->he_oper_params) &
+ IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
+ struct ieee80211_vht_operation he_oper_vht_cap;
+
+ /*
+ * Set only first 3 bytes (other 2 aren't used in
+ * ieee80211_chandef_vht_oper() anyway)
+ */
+ memcpy(&he_oper_vht_cap, he_oper->optional, 3);
+ he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0);
+
+ if (!ieee80211_chandef_vht_oper(&he_oper_vht_cap,
+ &vht_chandef)) {
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
+ sdata_info(sdata,
+ "HE AP VHT information is invalid, disable HE\n");
+ ret = IEEE80211_STA_DISABLE_HE;
+ goto out;
+ }
+ } else if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information is invalid, disable VHT\n");
@@ -300,12 +321,14 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_ht_cap *ht_cap,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
+ const struct ieee80211_he_operation *he_oper,
const u8 *bssid, u32 *changed)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *chan;
+ struct ieee80211_channel *chan = sdata->vif.bss_conf.chandef.chan;
+ struct ieee80211_supported_band *sband =
+ local->hw.wiphy->bands[chan->band];
struct cfg80211_chan_def chandef;
u16 ht_opmode;
u32 flags;
@@ -320,6 +343,11 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
vht_oper = NULL;
+ /* don't check HE if we associated as non-HE station */
+ if (ifmgd->flags & IEEE80211_STA_DISABLE_HE ||
+ !ieee80211_get_he_sta_cap(sband))
+ he_oper = NULL;
+
if (WARN_ON_ONCE(!sta))
return -EINVAL;
@@ -333,12 +361,9 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
}
- chan = sdata->vif.bss_conf.chandef.chan;
- sband = local->hw.wiphy->bands[chan->band];
-
- /* calculate new channel (type) based on HT/VHT operation IEs */
+ /* calculate new channel (type) based on HT/VHT/HE operation IEs */
flags = ieee80211_determine_chantype(sdata, sband, chan,
- ht_oper, vht_oper,
+ ht_oper, vht_oper, he_oper,
&chandef, true);
/*
@@ -582,6 +607,34 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
}
+/* This function determines HE capability flags for the association
+ * and builds the IE.
+ */
+static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb,
+ struct ieee80211_supported_band *sband)
+{
+ u8 *pos;
+ const struct ieee80211_sta_he_cap *he_cap = NULL;
+ u8 he_cap_size;
+
+ he_cap = ieee80211_get_he_sta_cap(sband);
+ if (!he_cap)
+ return;
+
+ /*
+ * TODO: the 1 added is because this temporarily is under the EXTENSION
+ * IE. Get rid of it when it moves.
+ */
+ he_cap_size =
+ 2 + 1 + sizeof(he_cap->he_cap_elem) +
+ ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
+ ieee80211_he_ppe_size(he_cap->ppe_thres[0],
+ he_cap->he_cap_elem.phy_cap_info);
+ pos = skb_put(skb, he_cap_size);
+ ieee80211_ie_build_he_cap(pos, he_cap, pos + he_cap_size);
+}
+
static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -643,6 +696,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
2 + 2 * sband->n_channels + /* supported channels */
2 + sizeof(struct ieee80211_ht_cap) + /* HT */
2 + sizeof(struct ieee80211_vht_cap) + /* VHT */
+ 2 + 1 + sizeof(struct ieee80211_he_cap_elem) + /* HE */
+ sizeof(struct ieee80211_he_mcs_nss_supp) +
+ IEEE80211_HE_PPE_THRES_MAX_LEN +
assoc_data->ie_len + /* extra IEs */
(assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) +
9, /* WMM */
@@ -827,11 +883,41 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
offset = noffset;
}
+ /* if present, add any custom IEs that go before HE */
+ if (assoc_data->ie_len) {
+ static const u8 before_he[] = {
+ /*
+ * no need to list the ones split off before VHT
+ * or generated here
+ */
+ WLAN_EID_OPMODE_NOTIF,
+ WLAN_EID_EXTENSION, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE,
+ /* 11ai elements */
+ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_SESSION,
+ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_PUBLIC_KEY,
+ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_KEY_CONFIRM,
+ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_HLP_CONTAINER,
+ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN,
+ /* TODO: add 11ah/11aj/11ak elements */
+ };
+
+ /* RIC already taken above, so no need to handle here anymore */
+ noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
+ before_he, ARRAY_SIZE(before_he),
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ offset = noffset;
+ }
+
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
ieee80211_add_vht_ie(sdata, skb, sband,
&assoc_data->ap_vht_cap);
- /* if present, add any custom non-vendor IEs that go after HT */
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
+ ieee80211_add_he_ie(sdata, skb, sband);
+
+ /* if present, add any custom non-vendor IEs that go after HE */
if (assoc_data->ie_len) {
noffset = ieee80211_ie_split_vendor(assoc_data->ie,
assoc_data->ie_len,
@@ -898,6 +984,11 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_hdr_3addr *nullfunc;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ /* Don't send NDPs when STA is connected HE */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
+ return;
+
skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
!ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
if (!skb)
@@ -929,6 +1020,10 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
return;
+ /* Don't send NDPs when connected HE */
+ if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
+ return;
+
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
if (!skb)
return;
@@ -1700,9 +1795,11 @@ static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work)
}
/* MLME */
-static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- const u8 *wmm_param, size_t wmm_param_len)
+static bool
+ieee80211_sta_wmm_params(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *wmm_param, size_t wmm_param_len,
+ const struct ieee80211_mu_edca_param_set *mu_edca)
{
struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS];
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1749,6 +1846,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
uapsd = true;
+ params[ac].mu_edca = !!mu_edca;
+ if (mu_edca)
+ params[ac].mu_edca_param_rec = mu_edca->ac_bk;
break;
case 2: /* AC_VI */
ac = IEEE80211_AC_VI;
@@ -1756,6 +1856,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
uapsd = true;
+ params[ac].mu_edca = !!mu_edca;
+ if (mu_edca)
+ params[ac].mu_edca_param_rec = mu_edca->ac_vi;
break;
case 3: /* AC_VO */
ac = IEEE80211_AC_VO;
@@ -1763,6 +1866,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd = true;
+ params[ac].mu_edca = !!mu_edca;
+ if (mu_edca)
+ params[ac].mu_edca_param_rec = mu_edca->ac_vo;
break;
case 0: /* AC_BE */
default:
@@ -1771,6 +1877,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
uapsd = true;
+ params[ac].mu_edca = !!mu_edca;
+ if (mu_edca)
+ params[ac].mu_edca_param_rec = mu_edca->ac_be;
break;
}
@@ -2219,6 +2328,20 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_reset_conn_monitor(sdata);
}
+static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata,
+ const u8 *src, const u8 *dst,
+ const u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channel)
+{
+ struct sk_buff *skb;
+
+ skb = ieee80211_build_probe_req(sdata, src, dst, (u32)-1, channel,
+ ssid, ssid_len, NULL, 0,
+ IEEE80211_PROBE_FLAG_DIRECTED);
+ if (skb)
+ ieee80211_tx_skb(sdata, skb);
+}
+
static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -2265,10 +2388,9 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
else
ssid_len = ssid[1];
- ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
- ssid + 2, ssid_len, NULL,
- 0, (u32) -1, true, 0,
- ifmgd->associated->channel, false);
+ ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst,
+ ssid + 2, ssid_len,
+ ifmgd->associated->channel);
rcu_read_unlock();
}
@@ -2370,7 +2492,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid,
(u32) -1, cbss->channel,
ssid + 2, ssid_len,
- NULL, 0, true);
+ NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED);
rcu_read_unlock();
return skb;
@@ -3008,6 +3130,25 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
goto out;
}
+ /*
+ * If AP doesn't support HT, or it doesn't have HE mandatory IEs, mark
+ * HE as disabled. If on the 5GHz band, make sure it supports VHT.
+ */
+ if (ifmgd->flags & IEEE80211_STA_DISABLE_HT ||
+ (sband->band == NL80211_BAND_5GHZ &&
+ ifmgd->flags & IEEE80211_STA_DISABLE_VHT) ||
+ (!elems.he_cap && !elems.he_operation))
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
+ (!elems.he_cap || !elems.he_operation)) {
+ mutex_unlock(&sdata->local->sta_mtx);
+ sdata_info(sdata,
+ "HE AP is missing HE capability/operation\n");
+ ret = false;
+ goto out;
+ }
+
/* Set up internal HT/VHT capabilities */
if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
@@ -3017,6 +3158,48 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
elems.vht_cap_elem, sta);
+ if (elems.he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
+ elems.he_cap) {
+ ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
+ elems.he_cap,
+ elems.he_cap_len,
+ sta);
+
+ bss_conf->he_support = sta->sta.he_cap.has_he;
+ } else {
+ bss_conf->he_support = false;
+ }
+
+ if (bss_conf->he_support) {
+ u32 he_oper_params =
+ le32_to_cpu(elems.he_operation->he_oper_params);
+
+ bss_conf->bss_color = he_oper_params &
+ IEEE80211_HE_OPERATION_BSS_COLOR_MASK;
+ bss_conf->htc_trig_based_pkt_ext =
+ (he_oper_params &
+ IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK) <<
+ IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET;
+ bss_conf->frame_time_rts_th =
+ (he_oper_params &
+ IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK) <<
+ IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET;
+
+ bss_conf->multi_sta_back_32bit =
+ sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP;
+
+ bss_conf->ack_enabled =
+ sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN;
+
+ bss_conf->uora_exists = !!elems.uora_element;
+ if (elems.uora_element)
+ bss_conf->uora_ocw_range = elems.uora_element[0];
+
+ /* TODO: OPEN: what happens if BSS color disable is set? */
+ }
+
/*
* Some APs, e.g. Netgear WNDR3700, report invalid HT operation data
* in their association response, so ignore that data for our own
@@ -3076,7 +3259,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
ieee80211_set_wmm_default(sdata, false, false);
} else if (!ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
- elems.wmm_param_len)) {
+ elems.wmm_param_len,
+ elems.mu_edca_param_set)) {
/* still enable QoS since we might have HT/VHT */
ieee80211_set_wmm_default(sdata, false, true);
/* set the disable-WMM flag in this case to disable
@@ -3590,7 +3774,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) &&
ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
- elems.wmm_param_len))
+ elems.wmm_param_len,
+ elems.mu_edca_param_set))
changed |= BSS_CHANGED_QOS;
/*
@@ -3629,7 +3814,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (ieee80211_config_bw(sdata, sta,
elems.ht_cap_elem, elems.ht_operation,
- elems.vht_operation, bssid, &changed)) {
+ elems.vht_operation, elems.he_operation,
+ bssid, &changed)) {
mutex_unlock(&local->sta_mtx);
sdata_info(sdata,
"failed to follow AP %pM bandwidth change, disconnect\n",
@@ -4266,6 +4452,68 @@ static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
return chains;
}
+static bool
+ieee80211_verify_sta_he_mcs_support(struct ieee80211_supported_band *sband,
+ const struct ieee80211_he_operation *he_op)
+{
+ const struct ieee80211_sta_he_cap *sta_he_cap =
+ ieee80211_get_he_sta_cap(sband);
+ u16 ap_min_req_set;
+ int i;
+
+ if (!sta_he_cap || !he_op)
+ return false;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+
+ /* Need to go over for 80MHz, 160MHz and for 80+80 */
+ for (i = 0; i < 3; i++) {
+ const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
+ &sta_he_cap->he_mcs_nss_supp;
+ u16 sta_mcs_map_rx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
+ u16 sta_mcs_map_tx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
+ u8 nss;
+ bool verified = true;
+
+ /*
+ * For each band there is a maximum of 8 spatial streams
+ * possible. Each of the sta_mcs_map_* is a 16-bit struct built
+ * of 2 bits per NSS (1-8), with the values defined in enum
+ * ieee80211_he_mcs_support. Need to make sure STA TX and RX
+ * capabilities aren't less than the AP's minimum requirements
+ * for this HE BSS per SS.
+ * It is enough to find one such band that meets the reqs.
+ */
+ for (nss = 8; nss > 0; nss--) {
+ u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
+ u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
+ u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+
+ if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
+
+ /*
+ * Make sure the HE AP doesn't require MCSs that aren't
+ * supported by the client
+ */
+ if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
+ verified = false;
+ break;
+ }
+ }
+
+ if (verified)
+ return true;
+ }
+
+ /* If here, STA doesn't meet AP's HE min requirements */
+ return false;
+}
+
static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss)
{
@@ -4274,6 +4522,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_ht_cap *ht_cap = NULL;
const struct ieee80211_ht_operation *ht_oper = NULL;
const struct ieee80211_vht_operation *vht_oper = NULL;
+ const struct ieee80211_he_operation *he_oper = NULL;
struct ieee80211_supported_band *sband;
struct cfg80211_chan_def chandef;
int ret;
@@ -4329,6 +4578,24 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
}
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
+ ieee80211_get_he_sta_cap(sband)) {
+ const struct cfg80211_bss_ies *ies;
+ const u8 *he_oper_ie;
+
+ ies = rcu_dereference(cbss->ies);
+ he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION,
+ ies->data, ies->len);
+ if (he_oper_ie &&
+ he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3]))
+ he_oper = (void *)(he_oper_ie + 3);
+ else
+ he_oper = NULL;
+
+ if (!ieee80211_verify_sta_he_mcs_support(sband, he_oper))
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+ }
+
/* Allow VHT if at least one channel on the sband supports 80 MHz */
have_80mhz = false;
for (i = 0; i < sband->n_channels; i++) {
@@ -4345,7 +4612,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
- ht_oper, vht_oper,
+ ht_oper, vht_oper, he_oper,
&chandef, false);
sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
@@ -4751,8 +5018,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
netdev_info(sdata->dev,
- "disabling HT/VHT due to WEP/TKIP use\n");
+ "disabling HE/HT/VHT due to WEP/TKIP use\n");
}
}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index f1d40b6645ff..8ef4153cd299 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -262,7 +262,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
if (roc->mgmt_tx_cookie) {
if (!WARN_ON(!roc->frame)) {
ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
- roc->chan->band);
+ roc->chan->band, 0);
roc->frame = NULL;
}
} else {
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 76048b53c5b2..07fb219327d6 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -751,4 +751,3 @@ rc80211_minstrel_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_minstrel);
}
-
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 932985ca4e66..64742f2765c4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -175,6 +175,20 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
len += 12;
}
+ if (status->encoding == RX_ENC_HE &&
+ status->flag & RX_FLAG_RADIOTAP_HE) {
+ len = ALIGN(len, 2);
+ len += 12;
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
+ }
+
+ if (status->encoding == RX_ENC_HE &&
+ status->flag & RX_FLAG_RADIOTAP_HE_MU) {
+ len = ALIGN(len, 2);
+ len += 12;
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
+ }
+
if (status->chains) {
/* antenna and antenna signal fields */
len += 2 * hweight8(status->chains);
@@ -263,6 +277,19 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
int mpdulen, chain;
unsigned long chains = status->chains;
struct ieee80211_vendor_radiotap rtap = {};
+ struct ieee80211_radiotap_he he = {};
+ struct ieee80211_radiotap_he_mu he_mu = {};
+
+ if (status->flag & RX_FLAG_RADIOTAP_HE) {
+ he = *(struct ieee80211_radiotap_he *)skb->data;
+ skb_pull(skb, sizeof(he));
+ WARN_ON_ONCE(status->encoding != RX_ENC_HE);
+ }
+
+ if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
+ he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
+ skb_pull(skb, sizeof(he_mu));
+ }
if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
@@ -520,6 +547,89 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*pos++ = flags;
}
+ if (status->encoding == RX_ENC_HE &&
+ status->flag & RX_FLAG_RADIOTAP_HE) {
+#define HE_PREP(f, val) cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val))
+
+ if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
+ he.data6 |= HE_PREP(DATA6_NSTS,
+ FIELD_GET(RX_ENC_FLAG_STBC_MASK,
+ status->enc_flags));
+ he.data3 |= HE_PREP(DATA3_STBC, 1);
+ } else {
+ he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
+ }
+
+#define CHECK_GI(s) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
+ (int)NL80211_RATE_INFO_HE_GI_##s)
+
+ CHECK_GI(0_8);
+ CHECK_GI(1_6);
+ CHECK_GI(3_2);
+
+ he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
+ he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
+ he.data3 |= HE_PREP(DATA3_CODING,
+ !!(status->enc_flags & RX_ENC_FLAG_LDPC));
+
+ he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
+
+ switch (status->bw) {
+ case RATE_INFO_BW_20:
+ he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
+ break;
+ case RATE_INFO_BW_40:
+ he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
+ break;
+ case RATE_INFO_BW_80:
+ he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
+ break;
+ case RATE_INFO_BW_160:
+ he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
+ break;
+ case RATE_INFO_BW_HE_RU:
+#define CHECK_RU_ALLOC(s) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
+ NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
+
+ CHECK_RU_ALLOC(26);
+ CHECK_RU_ALLOC(52);
+ CHECK_RU_ALLOC(106);
+ CHECK_RU_ALLOC(242);
+ CHECK_RU_ALLOC(484);
+ CHECK_RU_ALLOC(996);
+ CHECK_RU_ALLOC(2x996);
+
+ he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+ status->he_ru + 4);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
+ }
+
+ /* ensure 2 byte alignment */
+ while ((pos - (u8 *)rthdr) & 1)
+ pos++;
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
+ memcpy(pos, &he, sizeof(he));
+ pos += sizeof(he);
+ }
+
+ if (status->encoding == RX_ENC_HE &&
+ status->flag & RX_FLAG_RADIOTAP_HE_MU) {
+ /* ensure 2 byte alignment */
+ while ((pos - (u8 *)rthdr) & 1)
+ pos++;
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
+ memcpy(pos, &he_mu, sizeof(he_mu));
+ pos += sizeof(he_mu);
+ }
+
for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
*pos++ = status->chain_signal[chain];
*pos++ = chain;
@@ -613,6 +723,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
rcu_dereference(local->monitor_sdata);
bool only_monitor = false;
+ if (status->flag & RX_FLAG_RADIOTAP_HE)
+ rtap_space += sizeof(struct ieee80211_radiotap_he);
+
+ if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
+
if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
@@ -3238,7 +3354,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
}
__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
- status->band);
+ status->band, 0);
}
dev_kfree_skb(rx->skb);
return RX_QUEUED;
@@ -3383,8 +3499,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
status = IEEE80211_SKB_RXCB((rx->skb));
sband = rx->local->hw.wiphy->bands[status->band];
- if (!(status->encoding == RX_ENC_HT) &&
- !(status->encoding == RX_ENC_VHT))
+ if (status->encoding == RX_ENC_LEGACY)
rate = &sband->bitrates[status->rate_idx];
ieee80211_rx_cooked_monitor(rx, rate);
@@ -4383,6 +4498,14 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
status->rate_idx, status->nss))
goto drop;
break;
+ case RX_ENC_HE:
+ if (WARN_ONCE(status->rate_idx > 11 ||
+ !status->nss ||
+ status->nss > 8,
+ "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
+ status->rate_idx, status->nss))
+ goto drop;
+ break;
default:
WARN_ON_ONCE(1);
/* fall through */
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 2e917a6d239d..5d2a11777718 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -20,6 +20,7 @@
#include <net/sch_generic.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/random.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
@@ -293,6 +294,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
struct cfg80211_chan_def chandef;
u8 bands_used = 0;
int i, ielen, n_chans;
+ u32 flags = 0;
req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
@@ -331,12 +333,16 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
local->hw_scan_req->req.n_channels = n_chans;
ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
+ if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
+ flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
+
ielen = ieee80211_build_preq_ies(local,
(u8 *)local->hw_scan_req->req.ie,
local->hw_scan_ies_bufsize,
&local->hw_scan_req->ies,
req->ie, req->ie_len,
- bands_used, req->rates, &chandef);
+ bands_used, req->rates, &chandef,
+ flags);
local->hw_scan_req->req.ie_len = ielen;
local->hw_scan_req->req.no_cck = req->no_cck;
ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
@@ -528,6 +534,35 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
round_jiffies_relative(0));
}
+static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
+ const u8 *src, const u8 *dst,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len,
+ u32 ratemask, u32 flags, u32 tx_flags,
+ struct ieee80211_channel *channel)
+{
+ struct sk_buff *skb;
+ u32 txdata_flags = 0;
+
+ skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel,
+ ssid, ssid_len,
+ ie, ie_len, flags);
+
+ if (skb) {
+ if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u16 sn = get_random_u32();
+
+ txdata_flags |= IEEE80211_TX_NO_SEQNO;
+ hdr->seq_ctrl =
+ cpu_to_le16(IEEE80211_SN_TO_SEQ(sn));
+ }
+ IEEE80211_SKB_CB(skb)->flags |= tx_flags;
+ ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band,
+ txdata_flags);
+ }
+}
+
static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
unsigned long *next_delay)
{
@@ -535,7 +570,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata;
struct cfg80211_scan_request *scan_req;
enum nl80211_band band = local->hw.conf.chandef.chan->band;
- u32 tx_flags;
+ u32 flags = 0, tx_flags;
scan_req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
@@ -543,17 +578,21 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
if (scan_req->no_cck)
tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+ if (scan_req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
+ flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
+ if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_SN)
+ flags |= IEEE80211_PROBE_FLAG_RANDOM_SN;
sdata = rcu_dereference_protected(local->scan_sdata,
lockdep_is_held(&local->mtx));
for (i = 0; i < scan_req->n_ssids; i++)
- ieee80211_send_probe_req(
+ ieee80211_send_scan_probe_req(
sdata, local->scan_addr, scan_req->bssid,
scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len,
scan_req->ie, scan_req->ie_len,
- scan_req->rates[band], false,
- tx_flags, local->hw.conf.chandef.chan, true);
+ scan_req->rates[band], flags,
+ tx_flags, local->hw.conf.chandef.chan);
/*
* After sending probe requests, wait for probe responses
@@ -1141,6 +1180,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
u32 rate_masks[NUM_NL80211_BANDS] = {};
u8 bands_used = 0;
u8 *ie;
+ u32 flags = 0;
iebufsz = local->scan_ies_len + req->ie_len;
@@ -1157,6 +1197,9 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
}
}
+ if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
+ flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
+
ie = kcalloc(iebufsz, num_bands, GFP_KERNEL);
if (!ie) {
ret = -ENOMEM;
@@ -1167,7 +1210,8 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
ieee80211_build_preq_ies(local, ie, num_bands * iebufsz,
&sched_scan_ies, req->ie,
- req->ie_len, bands_used, rate_masks, &chandef);
+ req->ie_len, bands_used, rate_masks, &chandef,
+ flags);
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
if (ret == 0) {
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 6428f1ac37b6..f34202242d24 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1323,6 +1323,11 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
struct ieee80211_tx_info *info;
struct ieee80211_chanctx_conf *chanctx_conf;
+ /* Don't send NDPs when STA is connected HE */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
+ return;
+
if (qos) {
fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1391,7 +1396,7 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
}
info->band = chanctx_conf->def.chan->band;
- ieee80211_xmit(sdata, sta, skb);
+ ieee80211_xmit(sdata, sta, skb, 0);
rcu_read_unlock();
}
@@ -1968,7 +1973,7 @@ sta_get_last_rx_stats(struct sta_info *sta)
return stats;
}
-static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
+static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
struct rate_info *rinfo)
{
rinfo->bw = STA_STATS_GET(BW, rate);
@@ -2005,6 +2010,14 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
break;
}
+ case STA_STATS_RATE_TYPE_HE:
+ rinfo->flags = RATE_INFO_FLAGS_HE_MCS;
+ rinfo->mcs = STA_STATS_GET(HE_MCS, rate);
+ rinfo->nss = STA_STATS_GET(HE_NSS, rate);
+ rinfo->he_gi = STA_STATS_GET(HE_GI, rate);
+ rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate);
+ rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate);
+ break;
}
}
@@ -2101,38 +2114,38 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
drv_sta_statistics(local, sdata, &sta->sta, sinfo);
- sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) |
- BIT(NL80211_STA_INFO_STA_FLAGS) |
- BIT(NL80211_STA_INFO_BSS_PARAM) |
- BIT(NL80211_STA_INFO_CONNECTED_TIME) |
- BIT(NL80211_STA_INFO_RX_DROP_MISC);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
+ BIT_ULL(NL80211_STA_INFO_STA_FLAGS) |
+ BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
+ BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count;
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_LOSS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS);
}
sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
sinfo->inactive_time =
jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta));
- if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) |
- BIT(NL80211_STA_INFO_TX_BYTES)))) {
+ if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
sinfo->tx_bytes = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
sinfo->tx_bytes += sta->tx_stats.bytes[ac];
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
}
- if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_PACKETS))) {
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) {
sinfo->tx_packets = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
sinfo->tx_packets += sta->tx_stats.packets[ac];
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
}
- if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) |
- BIT(NL80211_STA_INFO_RX_BYTES)))) {
+ if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
+ BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
if (sta->pcpu_rx_stats) {
@@ -2144,10 +2157,10 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
}
}
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
}
- if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) {
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) {
sinfo->rx_packets = sta->rx_stats.packets;
if (sta->pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
@@ -2157,17 +2170,17 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->rx_packets += cpurxs->packets;
}
}
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
}
- if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_RETRIES))) {
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) {
sinfo->tx_retries = sta->status_stats.retry_count;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_RETRIES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
}
- if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_FAILED))) {
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) {
sinfo->tx_failed = sta->status_stats.retry_failed;
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
sinfo->rx_dropped_misc = sta->rx_stats.dropped;
@@ -2182,23 +2195,23 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
!(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX) |
- BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) |
+ BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
}
if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
- if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) {
sinfo->signal = (s8)last_rxstats->last_signal;
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
if (!sta->pcpu_rx_stats &&
- !(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
+ !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) {
sinfo->signal_avg =
-ewma_signal_read(&sta->rx_stats_avg.signal);
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
}
@@ -2207,11 +2220,11 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
* pcpu statistics
*/
if (last_rxstats->chains &&
- !(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
- BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
- sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+ !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) |
+ BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
if (!sta->pcpu_rx_stats)
- sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
sinfo->chains = last_rxstats->chains;
@@ -2223,15 +2236,15 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
}
}
- if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE))) {
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate,
&sinfo->txrate);
- sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
- if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE))) {
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) {
if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0)
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) {
@@ -2244,18 +2257,18 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
if (ieee80211_vif_is_mesh(&sdata->vif)) {
#ifdef CONFIG_MAC80211_MESH
- sinfo->filled |= BIT(NL80211_STA_INFO_LLID) |
- BIT(NL80211_STA_INFO_PLID) |
- BIT(NL80211_STA_INFO_PLINK_STATE) |
- BIT(NL80211_STA_INFO_LOCAL_PM) |
- BIT(NL80211_STA_INFO_PEER_PM) |
- BIT(NL80211_STA_INFO_NONPEER_PM);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) |
+ BIT_ULL(NL80211_STA_INFO_PLID) |
+ BIT_ULL(NL80211_STA_INFO_PLINK_STATE) |
+ BIT_ULL(NL80211_STA_INFO_LOCAL_PM) |
+ BIT_ULL(NL80211_STA_INFO_PEER_PM) |
+ BIT_ULL(NL80211_STA_INFO_NONPEER_PM);
sinfo->llid = sta->mesh->llid;
sinfo->plid = sta->mesh->plid;
sinfo->plink_state = sta->mesh->plink_state;
if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
- sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET);
sinfo->t_offset = sta->mesh->t_offset;
}
sinfo->local_pm = sta->mesh->local_pm;
@@ -2300,7 +2313,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
thr = sta_get_expected_throughput(sta);
if (thr != 0) {
- sinfo->filled |= BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
sinfo->expected_throughput = thr;
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 81b35f623792..9a04327d71d1 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -170,7 +170,7 @@ struct tid_ampdu_tx {
u8 dialog_token;
u8 stop_initiator;
bool tx_stop;
- u8 buf_size;
+ u16 buf_size;
u16 failed_bar_ssn;
bool bar_pending;
@@ -405,7 +405,7 @@ struct ieee80211_sta_rx_stats {
int last_signal;
u8 chains;
s8 chain_signal_last[IEEE80211_MAX_CHAINS];
- u16 last_rate;
+ u32 last_rate;
struct u64_stats_sync syncp;
u64 bytes;
u64 msdu[IEEE80211_NUM_TIDS + 1];
@@ -764,6 +764,7 @@ enum sta_stats_type {
STA_STATS_RATE_TYPE_LEGACY,
STA_STATS_RATE_TYPE_HT,
STA_STATS_RATE_TYPE_VHT,
+ STA_STATS_RATE_TYPE_HE,
};
#define STA_STATS_FIELD_HT_MCS GENMASK( 7, 0)
@@ -771,9 +772,14 @@ enum sta_stats_type {
#define STA_STATS_FIELD_LEGACY_BAND GENMASK( 7, 4)
#define STA_STATS_FIELD_VHT_MCS GENMASK( 3, 0)
#define STA_STATS_FIELD_VHT_NSS GENMASK( 7, 4)
+#define STA_STATS_FIELD_HE_MCS GENMASK( 3, 0)
+#define STA_STATS_FIELD_HE_NSS GENMASK( 7, 4)
#define STA_STATS_FIELD_BW GENMASK(11, 8)
#define STA_STATS_FIELD_SGI GENMASK(12, 12)
#define STA_STATS_FIELD_TYPE GENMASK(15, 13)
+#define STA_STATS_FIELD_HE_RU GENMASK(18, 16)
+#define STA_STATS_FIELD_HE_GI GENMASK(20, 19)
+#define STA_STATS_FIELD_HE_DCM GENMASK(21, 21)
#define STA_STATS_FIELD(_n, _v) FIELD_PREP(STA_STATS_FIELD_ ## _n, _v)
#define STA_STATS_GET(_n, _v) FIELD_GET(STA_STATS_FIELD_ ## _n, _v)
@@ -782,7 +788,7 @@ enum sta_stats_type {
static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s)
{
- u16 r;
+ u32 r;
r = STA_STATS_FIELD(BW, s->bw);
@@ -804,6 +810,14 @@ static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s)
r |= STA_STATS_FIELD(LEGACY_BAND, s->band);
r |= STA_STATS_FIELD(LEGACY_IDX, s->rate_idx);
break;
+ case RX_ENC_HE:
+ r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HE);
+ r |= STA_STATS_FIELD(HE_NSS, s->nss);
+ r |= STA_STATS_FIELD(HE_MCS, s->rate_idx);
+ r |= STA_STATS_FIELD(HE_GI, s->he_gi);
+ r |= STA_STATS_FIELD(HE_RU, s->he_ru);
+ r |= STA_STATS_FIELD(HE_DCM, s->he_dcm);
+ break;
default:
WARN_ON(1);
return STA_STATS_RATE_INVALID;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 80a7edf8d314..0ab69a1964f8 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -92,7 +92,7 @@
STA_ENTRY \
__field(u16, tid) \
__field(u16, ssn) \
- __field(u8, buf_size) \
+ __field(u16, buf_size) \
__field(bool, amsdu) \
__field(u16, timeout) \
__field(u16, action)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index fa1f1e63a264..cd332e3e1134 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -825,6 +825,8 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
*/
if (!ieee80211_is_data_qos(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1)) {
+ if (tx->flags & IEEE80211_TX_NO_SEQNO)
+ return TX_CONTINUE;
/* driver should assign sequence number */
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
/* for pure STA mode without beacons, we can do it */
@@ -1247,7 +1249,7 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
return NULL;
- if (!ieee80211_is_data(hdr->frame_control))
+ if (!ieee80211_is_data_present(hdr->frame_control))
return NULL;
if (sta) {
@@ -1854,7 +1856,7 @@ EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
*/
static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb,
- bool txpending)
+ bool txpending, u32 txdata_flags)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_data tx;
@@ -1872,6 +1874,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
led_len = skb->len;
res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
+ tx.flags |= txdata_flags;
+
if (unlikely(res_prepare == TX_DROP)) {
ieee80211_free_txskb(&local->hw, skb);
return true;
@@ -1933,7 +1937,8 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
}
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, struct sk_buff *skb)
+ struct sta_info *sta, struct sk_buff *skb,
+ u32 txdata_flags)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1968,7 +1973,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
}
ieee80211_set_qos_hdr(sdata, skb);
- ieee80211_tx(sdata, sta, skb, false);
+ ieee80211_tx(sdata, sta, skb, false, txdata_flags);
}
static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local,
@@ -2289,7 +2294,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
if (!ieee80211_parse_tx_radiotap(local, skb))
goto fail_rcu;
- ieee80211_xmit(sdata, NULL, skb);
+ ieee80211_xmit(sdata, NULL, skb, 0);
rcu_read_unlock();
return NETDEV_TX_OK;
@@ -3648,7 +3653,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
ieee80211_tx_stats(dev, skb->len);
- ieee80211_xmit(sdata, sta, skb);
+ ieee80211_xmit(sdata, sta, skb, 0);
}
goto out;
out_free:
@@ -3867,7 +3872,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
return true;
}
info->band = chanctx_conf->def.chan->band;
- result = ieee80211_tx(sdata, NULL, skb, true);
+ result = ieee80211_tx(sdata, NULL, skb, true, 0);
} else {
struct sk_buff_head skbs;
@@ -4783,7 +4788,7 @@ EXPORT_SYMBOL(ieee80211_unreserve_tid);
void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid,
- enum nl80211_band band)
+ enum nl80211_band band, u32 txdata_flags)
{
int ac = ieee80211_ac_from_tid(tid);
@@ -4800,7 +4805,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
*/
local_bh_disable();
IEEE80211_SKB_CB(skb)->band = band;
- ieee80211_xmit(sdata, NULL, skb);
+ ieee80211_xmit(sdata, NULL, skb, txdata_flags);
local_bh_enable();
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d02fbfec3783..88efda7c9f8a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1095,6 +1095,21 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
if (elen >= sizeof(*elems->max_idle_period_ie))
elems->max_idle_period_ie = (void *)pos;
break;
+ case WLAN_EID_EXTENSION:
+ if (pos[0] == WLAN_EID_EXT_HE_MU_EDCA &&
+ elen >= (sizeof(*elems->mu_edca_param_set) + 1)) {
+ elems->mu_edca_param_set = (void *)&pos[1];
+ } else if (pos[0] == WLAN_EID_EXT_HE_CAPABILITY) {
+ elems->he_cap = (void *)&pos[1];
+ elems->he_cap_len = elen - 1;
+ } else if (pos[0] == WLAN_EID_EXT_HE_OPERATION &&
+ elen >= sizeof(*elems->he_operation) &&
+ elen >= ieee80211_he_oper_size(&pos[1])) {
+ elems->he_operation = (void *)&pos[1];
+ } else if (pos[0] == WLAN_EID_EXT_UORA && elen >= 1) {
+ elems->uora_element = (void *)&pos[1];
+ }
+ break;
default:
break;
}
@@ -1353,9 +1368,10 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
enum nl80211_band band,
u32 rate_mask,
struct cfg80211_chan_def *chandef,
- size_t *offset)
+ size_t *offset, u32 flags)
{
struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *he_cap;
u8 *pos = buffer, *end = buffer + buffer_len;
size_t noffset;
int supp_rates_len, i;
@@ -1433,6 +1449,9 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
chandef->chan->center_freq);
}
+ if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT)
+ goto done;
+
/* insert custom IEs that go before HT */
if (ie && ie_len) {
static const u8 before_ht[] = {
@@ -1460,11 +1479,6 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
sband->ht_cap.cap);
}
- /*
- * If adding more here, adjust code in main.c
- * that calculates local->scan_ies_len.
- */
-
/* insert custom IEs that go before VHT */
if (ie && ie_len) {
static const u8 before_vht[] = {
@@ -1507,9 +1521,43 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
sband->vht_cap.cap);
}
+ /* insert custom IEs that go before HE */
+ if (ie && ie_len) {
+ static const u8 before_he[] = {
+ /*
+ * no need to list the ones split off before VHT
+ * or generated here
+ */
+ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_REQ_PARAMS,
+ WLAN_EID_AP_CSN,
+ /* TODO: add 11ah/11aj/11ak elements */
+ };
+ noffset = ieee80211_ie_split(ie, ie_len,
+ before_he, ARRAY_SIZE(before_he),
+ *offset);
+ if (end - pos < noffset - *offset)
+ goto out_err;
+ memcpy(pos, ie + *offset, noffset - *offset);
+ pos += noffset - *offset;
+ *offset = noffset;
+ }
+
+ he_cap = ieee80211_get_he_sta_cap(sband);
+ if (he_cap) {
+ pos = ieee80211_ie_build_he_cap(pos, he_cap, end);
+ if (!pos)
+ goto out_err;
+ }
+
+ /*
+ * If adding more here, adjust code in main.c
+ * that calculates local->scan_ies_len.
+ */
+
return pos - buffer;
out_err:
WARN_ONCE(1, "not enough space for preq IEs\n");
+ done:
return pos - buffer;
}
@@ -1518,7 +1566,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
struct ieee80211_scan_ies *ie_desc,
const u8 *ie, size_t ie_len,
u8 bands_used, u32 *rate_masks,
- struct cfg80211_chan_def *chandef)
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
{
size_t pos = 0, old_pos = 0, custom_ie_offset = 0;
int i;
@@ -1533,7 +1582,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
ie, ie_len, i,
rate_masks[i],
chandef,
- &custom_ie_offset);
+ &custom_ie_offset,
+ flags);
ie_desc->ies[i] = buffer + old_pos;
ie_desc->len[i] = pos - old_pos;
old_pos = pos;
@@ -1561,7 +1611,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *chan,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
- bool directed)
+ u32 flags)
{
struct ieee80211_local *local = sdata->local;
struct cfg80211_chan_def chandef;
@@ -1577,7 +1627,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
* badly-behaved APs don't respond when this parameter is included.
*/
chandef.width = sdata->vif.bss_conf.chandef.width;
- if (directed)
+ if (flags & IEEE80211_PROBE_FLAG_DIRECTED)
chandef.chan = NULL;
else
chandef.chan = chan;
@@ -1591,7 +1641,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb),
skb_tailroom(skb), &dummy_ie_desc,
ie, ie_len, BIT(chan->band),
- rate_masks, &chandef);
+ rate_masks, &chandef, flags);
skb_put(skb, ies_len);
if (dst) {
@@ -1605,27 +1655,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
return skb;
}
-void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata,
- const u8 *src, const u8 *dst,
- const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len,
- u32 ratemask, bool directed, u32 tx_flags,
- struct ieee80211_channel *channel, bool scan)
-{
- struct sk_buff *skb;
-
- skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel,
- ssid, ssid_len,
- ie, ie_len, directed);
- if (skb) {
- IEEE80211_SKB_CB(skb)->flags |= tx_flags;
- if (scan)
- ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
- else
- ieee80211_tx_skb(sdata, skb);
- }
-}
-
u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band band, u32 *basic_rates)
@@ -2413,6 +2442,72 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
return pos;
}
+u8 *ieee80211_ie_build_he_cap(u8 *pos,
+ const struct ieee80211_sta_he_cap *he_cap,
+ u8 *end)
+{
+ u8 n;
+ u8 ie_len;
+ u8 *orig_pos = pos;
+
+ /* Make sure we have place for the IE */
+ /*
+ * TODO: the 1 added is because this temporarily is under the EXTENSION
+ * IE. Get rid of it when it moves.
+ */
+ if (!he_cap)
+ return orig_pos;
+
+ n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem);
+ ie_len = 2 + 1 +
+ sizeof(he_cap->he_cap_elem) + n +
+ ieee80211_he_ppe_size(he_cap->ppe_thres[0],
+ he_cap->he_cap_elem.phy_cap_info);
+
+ if ((end - pos) < ie_len)
+ return orig_pos;
+
+ *pos++ = WLAN_EID_EXTENSION;
+ pos++; /* We'll set the size later below */
+ *pos++ = WLAN_EID_EXT_HE_CAPABILITY;
+
+ /* Fixed data */
+ memcpy(pos, &he_cap->he_cap_elem, sizeof(he_cap->he_cap_elem));
+ pos += sizeof(he_cap->he_cap_elem);
+
+ memcpy(pos, &he_cap->he_mcs_nss_supp, n);
+ pos += n;
+
+ /* Check if PPE Threshold should be present */
+ if ((he_cap->he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+ goto end;
+
+ /*
+ * Calculate how many PPET16/PPET8 pairs are to come. Algorithm:
+ * (NSS_M1 + 1) x (num of 1 bits in RU_INDEX_BITMASK)
+ */
+ n = hweight8(he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= (1 + ((he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) >>
+ IEEE80211_PPE_THRES_NSS_POS));
+
+ /*
+ * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+ * total size.
+ */
+ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+ n = DIV_ROUND_UP(n, 8);
+
+ /* Copy PPE Thresholds */
+ memcpy(pos, &he_cap->ppe_thres, n);
+ pos += n;
+
+end:
+ orig_pos[1] = (pos - orig_pos) - 2;
+ return pos;
+}
+
u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
const struct cfg80211_chan_def *chandef,
u16 prot_mode, bool rifs_mode)
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 7e253455f9dd..bcd1a5e6ebf4 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
int ret;
if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
- u16 crc = crc_ccitt(0, skb->data, skb->len);
+ struct sk_buff *nskb;
+ u16 crc;
+
+ if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
+ nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
+ GFP_ATOMIC);
+ if (likely(nskb)) {
+ consume_skb(skb);
+ skb = nskb;
+ } else {
+ goto err_tx;
+ }
+ }
+ crc = crc_ccitt(0, skb->data, skb->len);
put_unaligned_le16(crc, skb_put(skb, 2));
}
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 6e558a419f60..94f53a9b7d1a 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -224,7 +224,7 @@ static int mpls_fill_encap_info(struct sk_buff *skb,
struct lwtunnel_state *lwtstate)
{
struct mpls_iptunnel_encap *tun_encap_info;
-
+
tun_encap_info = mpls_lwtunnel_encap(lwtstate);
if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels,
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f0a1c536ef15..71709c104081 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -46,9 +46,19 @@ config NETFILTER_NETLINK_LOG
and is also scheduled to replace the old syslog-based ipt_LOG
and ip6t_LOG modules.
+config NETFILTER_NETLINK_OSF
+ tristate "Netfilter OSF over NFNETLINK interface"
+ depends on NETFILTER_ADVANCED
+ select NETFILTER_NETLINK
+ help
+ If this option is enabled, the kernel will include support
+ for passive OS fingerprint via NFNETLINK.
+
config NF_CONNTRACK
tristate "Netfilter connection tracking support"
default m if NETFILTER_ADVANCED=n
+ select NF_DEFRAG_IPV4
+ select NF_DEFRAG_IPV6 if IPV6 != n
help
Connection tracking keeps a record of what packets have passed
through your machine, in order to figure out how they are related
@@ -96,7 +106,6 @@ config NF_CONNTRACK_SECMARK
config NF_CONNTRACK_ZONES
bool 'Connection tracking zones'
depends on NETFILTER_ADVANCED
- depends on NETFILTER_XT_TARGET_CT
help
This option enables support for connection tracking zones.
Normally, each connection needs to have a unique system wide
@@ -148,10 +157,11 @@ config NF_CONNTRACK_TIMESTAMP
If unsure, say `N'.
config NF_CONNTRACK_LABELS
- bool
+ bool "Connection tracking labels"
help
This option enables support for assigning user-defined flag bits
- to connection tracking entries. It selected by the connlabel match.
+ to connection tracking entries. It can be used with xtables connlabel
+ match and the nftables ct expression.
config NF_CT_PROTO_DCCP
bool 'DCCP protocol connection tracking support'
@@ -355,6 +365,7 @@ config NF_CT_NETLINK_TIMEOUT
tristate 'Connection tracking timeout tuning via Netlink'
select NETFILTER_NETLINK
depends on NETFILTER_ADVANCED
+ depends on NF_CONNTRACK_TIMEOUT
help
This option enables support for connection tracking timeout
fine-grain tuning. This allows you to attach specific timeout
@@ -440,9 +451,6 @@ config NETFILTER_SYNPROXY
endif # NF_CONNTRACK
-config NF_OSF
- tristate
-
config NF_TABLES
select NETFILTER_NETLINK
tristate "Netfilter nf_tables support"
@@ -551,6 +559,12 @@ config NFT_NAT
This option adds the "nat" expression that you can use to perform
typical Network Address Translation (NAT) packet transformations.
+config NFT_TUNNEL
+ tristate "Netfilter nf_tables tunnel module"
+ help
+ This option adds the "tunnel" expression that you can use to set
+ tunneling policies.
+
config NFT_OBJREF
tristate "Netfilter nf_tables stateful object reference module"
help
@@ -615,11 +629,28 @@ config NFT_SOCKET
tristate "Netfilter nf_tables socket match support"
depends on IPV6 || IPV6=n
select NF_SOCKET_IPV4
- select NF_SOCKET_IPV6 if IPV6
+ select NF_SOCKET_IPV6 if NF_TABLES_IPV6
help
This option allows matching for the presence or absence of a
corresponding socket and its attributes.
+config NFT_OSF
+ tristate "Netfilter nf_tables passive OS fingerprint support"
+ depends on NETFILTER_ADVANCED
+ select NETFILTER_NETLINK_OSF
+ help
+ This option allows matching packets from an specific OS.
+
+config NFT_TPROXY
+ tristate "Netfilter nf_tables tproxy support"
+ depends on IPV6 || IPV6=n
+ select NF_DEFRAG_IPV4
+ select NF_DEFRAG_IPV6 if NF_TABLES_IPV6
+ select NF_TPROXY_IPV4
+ select NF_TPROXY_IPV6 if NF_TABLES_IPV6
+ help
+ This makes transparent proxy support available in nftables.
+
if NF_TABLES_NETDEV
config NF_DUP_NETDEV
@@ -881,7 +912,7 @@ config NETFILTER_XT_TARGET_LOG
tristate "LOG target support"
select NF_LOG_COMMON
select NF_LOG_IPV4
- select NF_LOG_IPV6 if IPV6
+ select NF_LOG_IPV6 if IP6_NF_IPTABLES
default m if NETFILTER_ADVANCED=n
help
This option adds a `LOG' target, which allows you to create rules in
@@ -973,7 +1004,7 @@ config NETFILTER_XT_TARGET_TEE
depends on IPV6 || IPV6=n
depends on !NF_CONNTRACK || NF_CONNTRACK
select NF_DUP_IPV4
- select NF_DUP_IPV6 if IPV6
+ select NF_DUP_IPV6 if IP6_NF_IPTABLES
---help---
This option adds a "TEE" target with which a packet can be cloned and
this clone be rerouted to another nexthop.
@@ -1366,8 +1397,8 @@ config NETFILTER_XT_MATCH_NFACCT
config NETFILTER_XT_MATCH_OSF
tristate '"osf" Passive OS fingerprint match'
- depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
- select NF_OSF
+ depends on NETFILTER_ADVANCED
+ select NETFILTER_NETLINK_OSF
help
This option selects the Passive OS Fingerprinting match module
that allows to passively match the remote operating system by
@@ -1481,8 +1512,8 @@ config NETFILTER_XT_MATCH_SOCKET
depends on NETFILTER_ADVANCED
depends on IPV6 || IPV6=n
depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
- depends on NF_SOCKET_IPV4
- depends on NF_SOCKET_IPV6
+ select NF_SOCKET_IPV4
+ select NF_SOCKET_IPV6 if IP6_NF_IPTABLES
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
help
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 8a76dced974d..16895e045b66 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,7 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o utils.o
-nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o
+nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o \
+ nf_conntrack_proto.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o \
+ nf_conntrack_proto_icmp.o \
+ nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o
+
+nf_conntrack-$(subst m,y,$(CONFIG_IPV6)) += nf_conntrack_proto_icmpv6.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
@@ -15,6 +20,7 @@ obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
+obj-$(CONFIG_NETFILTER_NETLINK_OSF) += nfnetlink_osf.o
# connection tracking
obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
@@ -95,6 +101,7 @@ obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
obj-$(CONFIG_NFT_QUOTA) += nft_quota.o
obj-$(CONFIG_NFT_REJECT) += nft_reject.o
obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
+obj-$(CONFIG_NFT_TUNNEL) += nft_tunnel.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
obj-$(CONFIG_NFT_LOG) += nft_log.o
obj-$(CONFIG_NFT_MASQ) += nft_masq.o
@@ -103,8 +110,9 @@ obj-$(CONFIG_NFT_HASH) += nft_hash.o
obj-$(CONFIG_NFT_FIB) += nft_fib.o
obj-$(CONFIG_NFT_FIB_INET) += nft_fib_inet.o
obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o
-obj-$(CONFIG_NF_OSF) += nf_osf.o
obj-$(CONFIG_NFT_SOCKET) += nft_socket.o
+obj-$(CONFIG_NFT_OSF) += nft_osf.o
+obj-$(CONFIG_NFT_TPROXY) += nft_tproxy.o
# nf_tables netdev
obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 168af54db975..dc240cb47ddf 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -603,6 +603,21 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
}
EXPORT_SYMBOL(nf_conntrack_destroy);
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb)
+{
+ struct nf_ct_hook *ct_hook;
+ bool ret = false;
+
+ rcu_read_lock();
+ ct_hook = rcu_dereference(nf_ct_hook);
+ if (ct_hook)
+ ret = ct_hook->get_tuple_skb(dst_tuple, skb);
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL(nf_ct_get_tuple_skb);
+
/* Built-in default zone used e.g. by modules. */
const struct nf_conntrack_zone nf_ct_zone_dflt = {
.id = NF_CT_DEFAULT_ZONE_ID,
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 99e0aa350dc5..0edc62910ebf 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -825,12 +825,23 @@ static void ip_vs_conn_expire(struct timer_list *t)
/* Unlink conn if not referenced anymore */
if (likely(ip_vs_conn_unlink(cp))) {
+ struct ip_vs_conn *ct = cp->control;
+
/* delete the timer if it is activated by other users */
del_timer(&cp->timer);
/* does anybody control me? */
- if (cp->control)
+ if (ct) {
ip_vs_control_del(cp);
+ /* Drop CTL or non-assured TPL if not used anymore */
+ if (!cp->timeout && !atomic_read(&ct->n_control) &&
+ (!(ct->flags & IP_VS_CONN_F_TEMPLATE) ||
+ !(ct->state & IP_VS_CTPL_S_ASSURED))) {
+ IP_VS_DBG(4, "drop controlling connection\n");
+ ct->timeout = 0;
+ ip_vs_conn_expire_now(ct);
+ }
+ }
if ((cp->flags & IP_VS_CONN_F_NFCT) &&
!(cp->flags & IP_VS_CONN_F_ONE_PACKET)) {
@@ -872,6 +883,10 @@ static void ip_vs_conn_expire(struct timer_list *t)
/* Modify timer, so that it expires as soon as possible.
* Can be called without reference only if under RCU lock.
+ * We can have such chain of conns linked with ->control: DATA->CTL->TPL
+ * - DATA (eg. FTP) and TPL (persistence) can be present depending on setup
+ * - cp->timeout=0 indicates all conns from chain should be dropped but
+ * TPL is not dropped if in assured state
*/
void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
{
@@ -1107,7 +1122,7 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
dbuf, ntohs(cp->dport),
- ip_vs_state_name(cp->protocol, cp->state),
+ ip_vs_state_name(cp),
(cp->timer.expires-jiffies)/HZ, pe_data);
else
#endif
@@ -1118,7 +1133,7 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
dbuf, ntohs(cp->dport),
- ip_vs_state_name(cp->protocol, cp->state),
+ ip_vs_state_name(cp),
(cp->timer.expires-jiffies)/HZ, pe_data);
}
return 0;
@@ -1169,7 +1184,7 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
dbuf, ntohs(cp->dport),
- ip_vs_state_name(cp->protocol, cp->state),
+ ip_vs_state_name(cp),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
else
@@ -1181,7 +1196,7 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
dbuf, ntohs(cp->dport),
- ip_vs_state_name(cp->protocol, cp->state),
+ ip_vs_state_name(cp),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
}
@@ -1197,8 +1212,11 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = {
#endif
-/*
- * Randomly drop connection entries before running out of memory
+/* Randomly drop connection entries before running out of memory
+ * Can be used for DATA and CTL conns. For TPL conns there are exceptions:
+ * - traffic for services in OPS mode increases ct->in_pkts, so it is supported
+ * - traffic for services not in OPS mode does not increase ct->in_pkts in
+ * all cases, so it is not supported
*/
static inline int todrop_entry(struct ip_vs_conn *cp)
{
@@ -1242,7 +1260,7 @@ static inline bool ip_vs_conn_ops_mode(struct ip_vs_conn *cp)
void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
{
int idx;
- struct ip_vs_conn *cp, *cp_c;
+ struct ip_vs_conn *cp;
rcu_read_lock();
/*
@@ -1254,13 +1272,15 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->ipvs != ipvs)
continue;
+ if (atomic_read(&cp->n_control))
+ continue;
if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
- if (atomic_read(&cp->n_control) ||
- !ip_vs_conn_ops_mode(cp))
- continue;
- else
- /* connection template of OPS */
+ /* connection template of OPS */
+ if (ip_vs_conn_ops_mode(cp))
goto try_drop;
+ if (!(cp->state & IP_VS_CTPL_S_ASSURED))
+ goto drop;
+ continue;
}
if (cp->protocol == IPPROTO_TCP) {
switch(cp->state) {
@@ -1294,15 +1314,10 @@ try_drop:
continue;
}
- IP_VS_DBG(4, "del connection\n");
+drop:
+ IP_VS_DBG(4, "drop connection\n");
+ cp->timeout = 0;
ip_vs_conn_expire_now(cp);
- cp_c = cp->control;
- /* cp->control is valid only with reference to cp */
- if (cp_c && __ip_vs_conn_get(cp)) {
- IP_VS_DBG(4, "del conn template\n");
- ip_vs_conn_expire_now(cp_c);
- __ip_vs_conn_put(cp);
- }
}
cond_resched_rcu();
}
@@ -1325,15 +1340,19 @@ flush_again:
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
if (cp->ipvs != ipvs)
continue;
- IP_VS_DBG(4, "del connection\n");
- ip_vs_conn_expire_now(cp);
+ /* As timers are expired in LIFO order, restart
+ * the timer of controlling connection first, so
+ * that it is expired after us.
+ */
cp_c = cp->control;
/* cp->control is valid only with reference to cp */
if (cp_c && __ip_vs_conn_get(cp)) {
- IP_VS_DBG(4, "del conn template\n");
+ IP_VS_DBG(4, "del controlling connection\n");
ip_vs_conn_expire_now(cp_c);
__ip_vs_conn_put(cp);
}
+ IP_VS_DBG(4, "del connection\n");
+ ip_vs_conn_expire_now(cp);
}
cond_resched_rcu();
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index dd21782e2f12..62eefea48973 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -134,7 +134,7 @@ static void update_defense_level(struct netns_ipvs *ipvs)
} else {
atomic_set(&ipvs->dropentry, 0);
ipvs->sysctl_drop_entry = 1;
- };
+ }
break;
case 3:
atomic_set(&ipvs->dropentry, 1);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index ca880a3ad033..54ee84adf0bd 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -42,6 +42,11 @@
static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
+/* States for conn templates: NONE or words separated with ",", max 15 chars */
+static const char *ip_vs_ctpl_state_name_table[IP_VS_CTPL_S_LAST] = {
+ [IP_VS_CTPL_S_NONE] = "NONE",
+ [IP_VS_CTPL_S_ASSURED] = "ASSURED",
+};
/*
* register an ipvs protocol
@@ -193,12 +198,20 @@ ip_vs_create_timeout_table(int *table, int size)
}
-const char * ip_vs_state_name(__u16 proto, int state)
+const char *ip_vs_state_name(const struct ip_vs_conn *cp)
{
- struct ip_vs_protocol *pp = ip_vs_proto_get(proto);
+ unsigned int state = cp->state;
+ struct ip_vs_protocol *pp;
+
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+ if (state >= IP_VS_CTPL_S_LAST)
+ return "ERR!";
+ return ip_vs_ctpl_state_name_table[state] ? : "?";
+ }
+ pp = ip_vs_proto_get(cp->protocol);
if (pp == NULL || pp->state_name == NULL)
- return (IPPROTO_IP == proto) ? "NONE" : "ERR!";
+ return (cp->protocol == IPPROTO_IP) ? "NONE" : "ERR!";
return pp->state_name(state);
}
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 3250c4a1111e..b0cd7d08f2a7 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -461,6 +461,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
cp->flags &= ~IP_VS_CONN_F_INACTIVE;
}
}
+ if (next_state == IP_VS_SCTP_S_ESTABLISHED)
+ ip_vs_control_assure_ct(cp);
}
if (likely(pd))
cp->timeout = pd->timeout_table[cp->state = next_state];
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 80d10ad12a15..1770fc6ce960 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -569,6 +569,8 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
cp->flags &= ~IP_VS_CONN_F_INACTIVE;
}
}
+ if (new_state == IP_VS_TCP_S_ESTABLISHED)
+ ip_vs_control_assure_ct(cp);
}
if (likely(pd))
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index e0ef11c3691e..0f53c49025f8 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -460,6 +460,8 @@ udp_state_transition(struct ip_vs_conn *cp, int direction,
}
cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
+ if (direction == IP_VS_DIR_OUTPUT)
+ ip_vs_control_assure_ct(cp);
}
static int __udp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 001501e25625..d4020c5e831d 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1003,12 +1003,9 @@ static void ip_vs_process_message_v0(struct netns_ipvs *ipvs, const char *buffer
continue;
}
} else {
- /* protocol in templates is not used for state/timeout */
- if (state > 0) {
- IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n",
- state);
- state = 0;
- }
+ if (state >= IP_VS_CTPL_S_LAST)
+ IP_VS_DBG(7, "BACKUP v0, Invalid tpl state %u\n",
+ state);
}
ip_vs_conn_fill_param(ipvs, AF_INET, s->protocol,
@@ -1166,12 +1163,9 @@ static inline int ip_vs_proc_sync_conn(struct netns_ipvs *ipvs, __u8 *p, __u8 *m
goto out;
}
} else {
- /* protocol in templates is not used for state/timeout */
- if (state > 0) {
- IP_VS_DBG(3, "BACKUP, Invalid template state %u\n",
- state);
- state = 0;
- }
+ if (state >= IP_VS_CTPL_S_LAST)
+ IP_VS_DBG(7, "BACKUP, Invalid tpl state %u\n",
+ state);
}
if (ip_vs_conn_fill_param_sync(ipvs, af, s, &param, pe_data,
pe_data_len, pe_name, pe_name_len)) {
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 510039862aa9..02ca7df793f5 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -44,17 +44,19 @@
/* we will save the tuples of all connections we care about */
struct nf_conncount_tuple {
- struct hlist_node node;
+ struct list_head node;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_zone zone;
int cpu;
u32 jiffies32;
+ struct rcu_head rcu_head;
};
struct nf_conncount_rb {
struct rb_node node;
- struct hlist_head hhead; /* connections/hosts in same subnet */
+ struct nf_conncount_list list;
u32 key[MAX_KEYLEN];
+ struct rcu_head rcu_head;
};
static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
@@ -62,6 +64,10 @@ static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_i
struct nf_conncount_data {
unsigned int keylen;
struct rb_root root[CONNCOUNT_SLOTS];
+ struct net *net;
+ struct work_struct gc_work;
+ unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)];
+ unsigned int gc_tree;
};
static u_int32_t conncount_rnd __read_mostly;
@@ -82,26 +88,70 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
return memcmp(a, b, klen * sizeof(u32));
}
-bool nf_conncount_add(struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone)
+enum nf_conncount_list_add
+nf_conncount_add(struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
{
struct nf_conncount_tuple *conn;
+ if (WARN_ON_ONCE(list->count > INT_MAX))
+ return NF_CONNCOUNT_ERR;
+
conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
if (conn == NULL)
- return false;
+ return NF_CONNCOUNT_ERR;
+
conn->tuple = *tuple;
conn->zone = *zone;
conn->cpu = raw_smp_processor_id();
conn->jiffies32 = (u32)jiffies;
- hlist_add_head(&conn->node, head);
- return true;
+ spin_lock(&list->list_lock);
+ if (list->dead == true) {
+ kmem_cache_free(conncount_conn_cachep, conn);
+ spin_unlock(&list->list_lock);
+ return NF_CONNCOUNT_SKIP;
+ }
+ list_add_tail(&conn->node, &list->head);
+ list->count++;
+ spin_unlock(&list->list_lock);
+ return NF_CONNCOUNT_ADDED;
}
EXPORT_SYMBOL_GPL(nf_conncount_add);
+static void __conn_free(struct rcu_head *h)
+{
+ struct nf_conncount_tuple *conn;
+
+ conn = container_of(h, struct nf_conncount_tuple, rcu_head);
+ kmem_cache_free(conncount_conn_cachep, conn);
+}
+
+static bool conn_free(struct nf_conncount_list *list,
+ struct nf_conncount_tuple *conn)
+{
+ bool free_entry = false;
+
+ spin_lock(&list->list_lock);
+
+ if (list->count == 0) {
+ spin_unlock(&list->list_lock);
+ return free_entry;
+ }
+
+ list->count--;
+ list_del_rcu(&conn->node);
+ if (list->count == 0)
+ free_entry = true;
+
+ spin_unlock(&list->list_lock);
+ call_rcu(&conn->rcu_head, __conn_free);
+ return free_entry;
+}
+
static const struct nf_conntrack_tuple_hash *
-find_or_evict(struct net *net, struct nf_conncount_tuple *conn)
+find_or_evict(struct net *net, struct nf_conncount_list *list,
+ struct nf_conncount_tuple *conn, bool *free_entry)
{
const struct nf_conntrack_tuple_hash *found;
unsigned long a, b;
@@ -121,34 +171,37 @@ find_or_evict(struct net *net, struct nf_conncount_tuple *conn)
*/
age = a - b;
if (conn->cpu == cpu || age >= 2) {
- hlist_del(&conn->node);
- kmem_cache_free(conncount_conn_cachep, conn);
+ *free_entry = conn_free(list, conn);
return ERR_PTR(-ENOENT);
}
return ERR_PTR(-EAGAIN);
}
-unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone,
- bool *addit)
+void nf_conncount_lookup(struct net *net,
+ struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone,
+ bool *addit)
{
const struct nf_conntrack_tuple_hash *found;
- struct nf_conncount_tuple *conn;
+ struct nf_conncount_tuple *conn, *conn_n;
struct nf_conn *found_ct;
- struct hlist_node *n;
- unsigned int length = 0;
+ unsigned int collect = 0;
+ bool free_entry = false;
+ /* best effort only */
*addit = tuple ? true : false;
/* check the saved connections */
- hlist_for_each_entry_safe(conn, n, head, node) {
- found = find_or_evict(net, conn);
+ list_for_each_entry_safe(conn, conn_n, &list->head, node) {
+ if (collect > CONNCOUNT_GC_MAX_NODES)
+ break;
+
+ found = find_or_evict(net, list, conn, &free_entry);
if (IS_ERR(found)) {
/* Not found, but might be about to be confirmed */
if (PTR_ERR(found) == -EAGAIN) {
- length++;
if (!tuple)
continue;
@@ -156,7 +209,8 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
nf_ct_zone_id(zone, zone->dir))
*addit = false;
- }
+ } else if (PTR_ERR(found) == -ENOENT)
+ collect++;
continue;
}
@@ -165,9 +219,10 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
nf_ct_zone_equal(found_ct, zone, zone->dir)) {
/*
- * Just to be sure we have it only once in the list.
* We should not see tuples twice unless someone hooks
* this into a table without "-p tcp --syn".
+ *
+ * Attempt to avoid a re-add in this case.
*/
*addit = false;
} else if (already_closed(found_ct)) {
@@ -176,19 +231,75 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
* closed already -> ditch it
*/
nf_ct_put(found_ct);
- hlist_del(&conn->node);
- kmem_cache_free(conncount_conn_cachep, conn);
+ conn_free(list, conn);
+ collect++;
continue;
}
nf_ct_put(found_ct);
- length++;
}
-
- return length;
}
EXPORT_SYMBOL_GPL(nf_conncount_lookup);
+void nf_conncount_list_init(struct nf_conncount_list *list)
+{
+ spin_lock_init(&list->list_lock);
+ INIT_LIST_HEAD(&list->head);
+ list->count = 1;
+ list->dead = false;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_list_init);
+
+/* Return true if the list is empty */
+bool nf_conncount_gc_list(struct net *net,
+ struct nf_conncount_list *list)
+{
+ const struct nf_conntrack_tuple_hash *found;
+ struct nf_conncount_tuple *conn, *conn_n;
+ struct nf_conn *found_ct;
+ unsigned int collected = 0;
+ bool free_entry = false;
+
+ list_for_each_entry_safe(conn, conn_n, &list->head, node) {
+ found = find_or_evict(net, list, conn, &free_entry);
+ if (IS_ERR(found)) {
+ if (PTR_ERR(found) == -ENOENT) {
+ if (free_entry)
+ return true;
+ collected++;
+ }
+ continue;
+ }
+
+ found_ct = nf_ct_tuplehash_to_ctrack(found);
+ if (already_closed(found_ct)) {
+ /*
+ * we do not care about connections which are
+ * closed already -> ditch it
+ */
+ nf_ct_put(found_ct);
+ if (conn_free(list, conn))
+ return true;
+ collected++;
+ continue;
+ }
+
+ nf_ct_put(found_ct);
+ if (collected > CONNCOUNT_GC_MAX_NODES)
+ return false;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
+
+static void __tree_nodes_free(struct rcu_head *h)
+{
+ struct nf_conncount_rb *rbconn;
+
+ rbconn = container_of(h, struct nf_conncount_rb, rcu_head);
+ kmem_cache_free(conncount_rb_cachep, rbconn);
+}
+
static void tree_nodes_free(struct rb_root *root,
struct nf_conncount_rb *gc_nodes[],
unsigned int gc_count)
@@ -197,32 +308,46 @@ static void tree_nodes_free(struct rb_root *root,
while (gc_count) {
rbconn = gc_nodes[--gc_count];
- rb_erase(&rbconn->node, root);
- kmem_cache_free(conncount_rb_cachep, rbconn);
+ spin_lock(&rbconn->list.list_lock);
+ if (rbconn->list.count == 0 && rbconn->list.dead == false) {
+ rbconn->list.dead = true;
+ rb_erase(&rbconn->node, root);
+ call_rcu(&rbconn->rcu_head, __tree_nodes_free);
+ }
+ spin_unlock(&rbconn->list.list_lock);
}
}
+static void schedule_gc_worker(struct nf_conncount_data *data, int tree)
+{
+ set_bit(tree, data->pending_trees);
+ schedule_work(&data->gc_work);
+}
+
static unsigned int
-count_tree(struct net *net, struct rb_root *root,
- const u32 *key, u8 keylen,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone)
+insert_tree(struct net *net,
+ struct nf_conncount_data *data,
+ struct rb_root *root,
+ unsigned int hash,
+ const u32 *key,
+ u8 keylen,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
{
+ enum nf_conncount_list_add ret;
struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
struct rb_node **rbnode, *parent;
struct nf_conncount_rb *rbconn;
struct nf_conncount_tuple *conn;
- unsigned int gc_count;
- bool no_gc = false;
+ unsigned int count = 0, gc_count = 0;
+ bool node_found = false;
+
+ spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
- restart:
- gc_count = 0;
parent = NULL;
rbnode = &(root->rb_node);
while (*rbnode) {
int diff;
- bool addit;
-
rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
parent = *rbnode;
@@ -232,33 +357,30 @@ count_tree(struct net *net, struct rb_root *root,
} else if (diff > 0) {
rbnode = &((*rbnode)->rb_right);
} else {
- /* same source network -> be counted! */
- unsigned int count;
-
- count = nf_conncount_lookup(net, &rbconn->hhead, tuple,
- zone, &addit);
-
- tree_nodes_free(root, gc_nodes, gc_count);
- if (!addit)
- return count;
-
- if (!nf_conncount_add(&rbconn->hhead, tuple, zone))
- return 0; /* hotdrop */
-
- return count + 1;
+ /* unlikely: other cpu added node already */
+ node_found = true;
+ ret = nf_conncount_add(&rbconn->list, tuple, zone);
+ if (ret == NF_CONNCOUNT_ERR) {
+ count = 0; /* hotdrop */
+ } else if (ret == NF_CONNCOUNT_ADDED) {
+ count = rbconn->list.count;
+ } else {
+ /* NF_CONNCOUNT_SKIP, rbconn is already
+ * reclaimed by gc, insert a new tree node
+ */
+ node_found = false;
+ }
+ break;
}
- if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
+ if (gc_count >= ARRAY_SIZE(gc_nodes))
continue;
- /* only used for GC on hhead, retval and 'addit' ignored */
- nf_conncount_lookup(net, &rbconn->hhead, tuple, zone, &addit);
- if (hlist_empty(&rbconn->hhead))
+ if (nf_conncount_gc_list(net, &rbconn->list))
gc_nodes[gc_count++] = rbconn;
}
if (gc_count) {
- no_gc = true;
tree_nodes_free(root, gc_nodes, gc_count);
/* tree_node_free before new allocation permits
* allocator to re-use newly free'd object.
@@ -266,58 +388,146 @@ count_tree(struct net *net, struct rb_root *root,
* This is a rare event; in most cases we will find
* existing node to re-use. (or gc_count is 0).
*/
- goto restart;
+
+ if (gc_count >= ARRAY_SIZE(gc_nodes))
+ schedule_gc_worker(data, hash);
}
- if (!tuple)
- return 0;
+ if (node_found)
+ goto out_unlock;
- /* no match, need to insert new node */
+ /* expected case: match, insert new node */
rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
if (rbconn == NULL)
- return 0;
+ goto out_unlock;
conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
if (conn == NULL) {
kmem_cache_free(conncount_rb_cachep, rbconn);
- return 0;
+ goto out_unlock;
}
conn->tuple = *tuple;
conn->zone = *zone;
memcpy(rbconn->key, key, sizeof(u32) * keylen);
- INIT_HLIST_HEAD(&rbconn->hhead);
- hlist_add_head(&conn->node, &rbconn->hhead);
+ nf_conncount_list_init(&rbconn->list);
+ list_add(&conn->node, &rbconn->list.head);
+ count = 1;
rb_link_node(&rbconn->node, parent, rbnode);
rb_insert_color(&rbconn->node, root);
- return 1;
+out_unlock:
+ spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+ return count;
}
-/* Count and return number of conntrack entries in 'net' with particular 'key'.
- * If 'tuple' is not null, insert it into the accounting data structure.
- */
-unsigned int nf_conncount_count(struct net *net,
- struct nf_conncount_data *data,
- const u32 *key,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone)
+static unsigned int
+count_tree(struct net *net,
+ struct nf_conncount_data *data,
+ const u32 *key,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
{
+ enum nf_conncount_list_add ret;
struct rb_root *root;
- int count;
- u32 hash;
+ struct rb_node *parent;
+ struct nf_conncount_rb *rbconn;
+ unsigned int hash;
+ u8 keylen = data->keylen;
hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
root = &data->root[hash];
- spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+ parent = rcu_dereference_raw(root->rb_node);
+ while (parent) {
+ int diff;
+ bool addit;
- count = count_tree(net, root, key, data->keylen, tuple, zone);
+ rbconn = rb_entry(parent, struct nf_conncount_rb, node);
- spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+ diff = key_diff(key, rbconn->key, keylen);
+ if (diff < 0) {
+ parent = rcu_dereference_raw(parent->rb_left);
+ } else if (diff > 0) {
+ parent = rcu_dereference_raw(parent->rb_right);
+ } else {
+ /* same source network -> be counted! */
+ nf_conncount_lookup(net, &rbconn->list, tuple, zone,
+ &addit);
- return count;
+ if (!addit)
+ return rbconn->list.count;
+
+ ret = nf_conncount_add(&rbconn->list, tuple, zone);
+ if (ret == NF_CONNCOUNT_ERR) {
+ return 0; /* hotdrop */
+ } else if (ret == NF_CONNCOUNT_ADDED) {
+ return rbconn->list.count;
+ } else {
+ /* NF_CONNCOUNT_SKIP, rbconn is already
+ * reclaimed by gc, insert a new tree node
+ */
+ break;
+ }
+ }
+ }
+
+ if (!tuple)
+ return 0;
+
+ return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
+}
+
+static void tree_gc_worker(struct work_struct *work)
+{
+ struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
+ struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn;
+ struct rb_root *root;
+ struct rb_node *node;
+ unsigned int tree, next_tree, gc_count = 0;
+
+ tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
+ root = &data->root[tree];
+
+ rcu_read_lock();
+ for (node = rb_first(root); node != NULL; node = rb_next(node)) {
+ rbconn = rb_entry(node, struct nf_conncount_rb, node);
+ if (nf_conncount_gc_list(data->net, &rbconn->list))
+ gc_nodes[gc_count++] = rbconn;
+ }
+ rcu_read_unlock();
+
+ spin_lock_bh(&nf_conncount_locks[tree]);
+
+ if (gc_count) {
+ tree_nodes_free(root, gc_nodes, gc_count);
+ }
+
+ clear_bit(tree, data->pending_trees);
+
+ next_tree = (tree + 1) % CONNCOUNT_SLOTS;
+ next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);
+
+ if (next_tree < CONNCOUNT_SLOTS) {
+ data->gc_tree = next_tree;
+ schedule_work(work);
+ }
+
+ spin_unlock_bh(&nf_conncount_locks[tree]);
+}
+
+/* Count and return number of conntrack entries in 'net' with particular 'key'.
+ * If 'tuple' is not null, insert it into the accounting data structure.
+ * Call with RCU read lock.
+ */
+unsigned int nf_conncount_count(struct net *net,
+ struct nf_conncount_data *data,
+ const u32 *key,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
+{
+ return count_tree(net, data, key, tuple, zone);
}
EXPORT_SYMBOL_GPL(nf_conncount_count);
@@ -348,17 +558,18 @@ struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family
data->root[i] = RB_ROOT;
data->keylen = keylen / sizeof(u32);
+ data->net = net;
+ INIT_WORK(&data->gc_work, tree_gc_worker);
return data;
}
EXPORT_SYMBOL_GPL(nf_conncount_init);
-void nf_conncount_cache_free(struct hlist_head *hhead)
+void nf_conncount_cache_free(struct nf_conncount_list *list)
{
- struct nf_conncount_tuple *conn;
- struct hlist_node *n;
+ struct nf_conncount_tuple *conn, *conn_n;
- hlist_for_each_entry_safe(conn, n, hhead, node)
+ list_for_each_entry_safe(conn, conn_n, &list->head, node)
kmem_cache_free(conncount_conn_cachep, conn);
}
EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
@@ -373,7 +584,7 @@ static void destroy_tree(struct rb_root *r)
rb_erase(node, r);
- nf_conncount_cache_free(&rbconn->hhead);
+ nf_conncount_cache_free(&rbconn->list);
kmem_cache_free(conncount_rb_cachep, rbconn);
}
@@ -384,6 +595,7 @@ void nf_conncount_destroy(struct net *net, unsigned int family,
{
unsigned int i;
+ cancel_work_sync(&data->gc_work);
nf_ct_netns_put(net, family);
for (i = 0; i < ARRAY_SIZE(data->root); ++i)
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
index a1086bdec242..5423b197d98a 100644
--- a/net/netfilter/nf_conntrack_broadcast.c
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -32,7 +32,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
__be32 mask = 0;
/* we're only interested in locally generated packets */
- if (skb->sk == NULL)
+ if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk)))
goto out;
if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
goto out;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3d5280425027..a676d5f76bdc 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -37,7 +37,6 @@
#include <linux/rculist_nulls.h>
#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
@@ -55,6 +54,7 @@
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netns/hash.h>
+#include <net/ip.h>
#include "nf_internals.h"
@@ -222,7 +222,7 @@ static u32 hash_conntrack(const struct net *net,
return scale_hash(hash_conntrack_raw(tuple, net));
}
-bool
+static bool
nf_ct_get_tuple(const struct sk_buff *skb,
unsigned int nhoff,
unsigned int dataoff,
@@ -230,37 +230,151 @@ nf_ct_get_tuple(const struct sk_buff *skb,
u_int8_t protonum,
struct net *net,
struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto)
{
+ unsigned int size;
+ const __be32 *ap;
+ __be32 _addrs[8];
+ struct {
+ __be16 sport;
+ __be16 dport;
+ } _inet_hdr, *inet_hdr;
+
memset(tuple, 0, sizeof(*tuple));
tuple->src.l3num = l3num;
- if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
+ switch (l3num) {
+ case NFPROTO_IPV4:
+ nhoff += offsetof(struct iphdr, saddr);
+ size = 2 * sizeof(__be32);
+ break;
+ case NFPROTO_IPV6:
+ nhoff += offsetof(struct ipv6hdr, saddr);
+ size = sizeof(_addrs);
+ break;
+ default:
+ return true;
+ }
+
+ ap = skb_header_pointer(skb, nhoff, size, _addrs);
+ if (!ap)
return false;
+ switch (l3num) {
+ case NFPROTO_IPV4:
+ tuple->src.u3.ip = ap[0];
+ tuple->dst.u3.ip = ap[1];
+ break;
+ case NFPROTO_IPV6:
+ memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
+ memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
+ break;
+ }
+
tuple->dst.protonum = protonum;
tuple->dst.dir = IP_CT_DIR_ORIGINAL;
- return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
+ if (unlikely(l4proto->pkt_to_tuple))
+ return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
+
+ /* Actually only need first 4 bytes to get ports. */
+ inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
+ if (!inet_hdr)
+ return false;
+
+ tuple->src.u.udp.port = inet_hdr->sport;
+ tuple->dst.u.udp.port = inet_hdr->dport;
+ return true;
+}
+
+static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+ u_int8_t *protonum)
+{
+ int dataoff = -1;
+ const struct iphdr *iph;
+ struct iphdr _iph;
+
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
+ return -1;
+
+ /* Conntrack defragments packets, we might still see fragments
+ * inside ICMP packets though.
+ */
+ if (iph->frag_off & htons(IP_OFFSET))
+ return -1;
+
+ dataoff = nhoff + (iph->ihl << 2);
+ *protonum = iph->protocol;
+
+ /* Check bogus IP headers */
+ if (dataoff > skb->len) {
+ pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
+ nhoff, iph->ihl << 2, skb->len);
+ return -1;
+ }
+ return dataoff;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+ u8 *protonum)
+{
+ int protoff = -1;
+ unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
+ __be16 frag_off;
+ u8 nexthdr;
+
+ if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
+ &nexthdr, sizeof(nexthdr)) != 0) {
+ pr_debug("can't get nexthdr\n");
+ return -1;
+ }
+ protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
+ /*
+ * (protoff == skb->len) means the packet has not data, just
+ * IPv6 and possibly extensions headers, but it is tracked anyway
+ */
+ if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+ pr_debug("can't find proto in pkt\n");
+ return -1;
+ }
+
+ *protonum = nexthdr;
+ return protoff;
+}
+#endif
+
+static int get_l4proto(const struct sk_buff *skb,
+ unsigned int nhoff, u8 pf, u8 *l4num)
+{
+ switch (pf) {
+ case NFPROTO_IPV4:
+ return ipv4_get_l4proto(skb, nhoff, l4num);
+#if IS_ENABLED(CONFIG_IPV6)
+ case NFPROTO_IPV6:
+ return ipv6_get_l4proto(skb, nhoff, l4num);
+#endif
+ default:
+ *l4num = 0;
+ break;
+ }
+ return -1;
}
-EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num,
struct net *net, struct nf_conntrack_tuple *tuple)
{
- const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
- unsigned int protoff;
- u_int8_t protonum;
+ u8 protonum;
+ int protoff;
int ret;
rcu_read_lock();
- l3proto = __nf_ct_l3proto_find(l3num);
- ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
- if (ret != NF_ACCEPT) {
+ protoff = get_l4proto(skb, nhoff, l3num, &protonum);
+ if (protoff <= 0) {
rcu_read_unlock();
return false;
}
@@ -268,7 +382,7 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
l4proto = __nf_ct_l4proto_find(l3num, protonum);
ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
- l3proto, l4proto);
+ l4proto);
rcu_read_unlock();
return ret;
@@ -278,19 +392,35 @@ EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
bool
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto)
{
memset(inverse, 0, sizeof(*inverse));
inverse->src.l3num = orig->src.l3num;
- if (l3proto->invert_tuple(inverse, orig) == 0)
- return false;
+
+ switch (orig->src.l3num) {
+ case NFPROTO_IPV4:
+ inverse->src.u3.ip = orig->dst.u3.ip;
+ inverse->dst.u3.ip = orig->src.u3.ip;
+ break;
+ case NFPROTO_IPV6:
+ inverse->src.u3.in6 = orig->dst.u3.in6;
+ inverse->dst.u3.in6 = orig->src.u3.in6;
+ break;
+ default:
+ break;
+ }
inverse->dst.dir = !orig->dst.dir;
inverse->dst.protonum = orig->dst.protonum;
- return l4proto->invert_tuple(inverse, orig);
+
+ if (unlikely(l4proto->invert_tuple))
+ return l4proto->invert_tuple(inverse, orig);
+
+ inverse->src.u.all = orig->dst.u.all;
+ inverse->dst.u.all = orig->src.u.all;
+ return true;
}
EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
@@ -502,6 +632,18 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
net_eq(net, nf_ct_net(ct));
}
+static inline bool
+nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
+{
+ return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+ nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
+ &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
+ nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
+ nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
+ net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
+}
+
/* caller must hold rcu readlock and none of the nf_conntrack_locks */
static void nf_ct_gc_expired(struct nf_conn *ct)
{
@@ -695,19 +837,21 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
/* This is the conntrack entry already in hashes that won race. */
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
const struct nf_conntrack_l4proto *l4proto;
+ enum ip_conntrack_info oldinfo;
+ struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (l4proto->allow_clash &&
- ((ct->status & IPS_NAT_DONE_MASK) == 0) &&
!nf_ct_is_dying(ct) &&
atomic_inc_not_zero(&ct->ct_general.use)) {
- enum ip_conntrack_info oldinfo;
- struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
-
- nf_ct_acct_merge(ct, ctinfo, loser_ct);
- nf_conntrack_put(&loser_ct->ct_general);
- nf_ct_set(skb, ct, oldinfo);
- return NF_ACCEPT;
+ if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
+ nf_ct_match(ct, loser_ct)) {
+ nf_ct_acct_merge(ct, ctinfo, loser_ct);
+ nf_conntrack_put(&loser_ct->ct_general);
+ nf_ct_set(skb, ct, oldinfo);
+ return NF_ACCEPT;
+ }
+ nf_ct_put(ct);
}
NF_CT_STAT_INC(net, drop);
return NF_DROP;
@@ -1195,7 +1339,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
static noinline struct nf_conntrack_tuple_hash *
init_conntrack(struct net *net, struct nf_conn *tmpl,
const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto,
struct sk_buff *skb,
unsigned int dataoff, u32 hash)
@@ -1208,9 +1351,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
const struct nf_conntrack_zone *zone;
struct nf_conn_timeout *timeout_ext;
struct nf_conntrack_zone tmp;
- unsigned int *timeouts;
- if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
+ if (!nf_ct_invert_tuple(&repl_tuple, tuple, l4proto)) {
pr_debug("Can't invert tuple.\n");
return NULL;
}
@@ -1227,15 +1369,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
}
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
- if (timeout_ext) {
- timeouts = nf_ct_timeout_data(timeout_ext);
- if (unlikely(!timeouts))
- timeouts = l4proto->get_timeouts(net);
- } else {
- timeouts = l4proto->get_timeouts(net);
- }
- if (!l4proto->new(ct, skb, dataoff, timeouts)) {
+ if (!l4proto->new(ct, skb, dataoff)) {
nf_conntrack_free(ct);
pr_debug("can't track with proto module\n");
return NULL;
@@ -1266,8 +1401,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
ct->master = exp->master;
if (exp->helper) {
- help = nf_ct_helper_ext_add(ct, exp->helper,
- GFP_ATOMIC);
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help)
rcu_assign_pointer(help->helper, exp->helper);
}
@@ -1307,7 +1441,6 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
unsigned int dataoff,
u_int16_t l3num,
u_int8_t protonum,
- const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto)
{
const struct nf_conntrack_zone *zone;
@@ -1319,8 +1452,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
u32 hash;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
- dataoff, l3num, protonum, net, &tuple, l3proto,
- l4proto)) {
+ dataoff, l3num, protonum, net, &tuple, l4proto)) {
pr_debug("Can't get tuple\n");
return 0;
}
@@ -1330,7 +1462,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
hash = hash_conntrack_raw(&tuple, net);
h = __nf_conntrack_find_get(net, zone, &tuple, hash);
if (!h) {
- h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
+ h = init_conntrack(net, tmpl, &tuple, l4proto,
skb, dataoff, hash);
if (!h)
return 0;
@@ -1363,14 +1495,11 @@ unsigned int
nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
struct sk_buff *skb)
{
- const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
struct nf_conn *ct, *tmpl;
enum ip_conntrack_info ctinfo;
- unsigned int *timeouts;
- unsigned int dataoff;
u_int8_t protonum;
- int ret;
+ int dataoff, ret;
tmpl = nf_ct_get(skb, &ctinfo);
if (tmpl || ctinfo == IP_CT_UNTRACKED) {
@@ -1384,14 +1513,12 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
}
/* rcu_read_lock()ed by nf_hook_thresh */
- l3proto = __nf_ct_l3proto_find(pf);
- ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
- &dataoff, &protonum);
- if (ret <= 0) {
+ dataoff = get_l4proto(skb, skb_network_offset(skb), pf, &protonum);
+ if (dataoff <= 0) {
pr_debug("not prepared to track yet or error occurred\n");
NF_CT_STAT_INC_ATOMIC(net, error);
NF_CT_STAT_INC_ATOMIC(net, invalid);
- ret = -ret;
+ ret = NF_ACCEPT;
goto out;
}
@@ -1413,8 +1540,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
goto out;
}
repeat:
- ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
- l3proto, l4proto);
+ ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l4proto);
if (ret < 0) {
/* Too stressed to deal. */
NF_CT_STAT_INC_ATOMIC(net, drop);
@@ -1430,10 +1556,7 @@ repeat:
goto out;
}
- /* Decide what timeout policy we want to apply to this flow. */
- timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
-
- ret = l4proto->packet(ct, skb, dataoff, ctinfo, timeouts);
+ ret = l4proto->packet(ct, skb, dataoff, ctinfo);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
@@ -1471,7 +1594,6 @@ bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
rcu_read_lock();
ret = nf_ct_invert_tuple(inverse, orig,
- __nf_ct_l3proto_find(orig->src.l3num),
__nf_ct_l4proto_find(orig->src.l3num,
orig->dst.protonum));
rcu_read_unlock();
@@ -1609,14 +1731,14 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
{
- const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
enum ip_conntrack_info ctinfo;
struct nf_nat_hook *nat_hook;
- unsigned int dataoff, status;
+ unsigned int status;
struct nf_conn *ct;
+ int dataoff;
u16 l3num;
u8 l4num;
@@ -1625,16 +1747,15 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
return 0;
l3num = nf_ct_l3num(ct);
- l3proto = nf_ct_l3proto_find_get(l3num);
- if (l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff,
- &l4num) <= 0)
+ dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
+ if (dataoff <= 0)
return -1;
l4proto = nf_ct_l4proto_find_get(l3num, l4num);
if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
- l4num, net, &tuple, l3proto, l4proto))
+ l4num, net, &tuple, l4proto))
return -1;
if (ct->status & IPS_SRC_NAT) {
@@ -1683,6 +1804,41 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
return 0;
}
+static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb)
+{
+ const struct nf_conntrack_tuple *src_tuple;
+ const struct nf_conntrack_tuple_hash *hash;
+ struct nf_conntrack_tuple srctuple;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct) {
+ src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+ memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+ return true;
+ }
+
+ if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+ NFPROTO_IPV4, dev_net(skb->dev),
+ &srctuple))
+ return false;
+
+ hash = nf_conntrack_find_get(dev_net(skb->dev),
+ &nf_ct_zone_dflt,
+ &srctuple);
+ if (!hash)
+ return false;
+
+ ct = nf_ct_tuplehash_to_ctrack(hash);
+ src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+ memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+ nf_ct_put(ct);
+
+ return true;
+}
+
/* Bring out ya dead! */
static struct nf_conn *
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
@@ -1866,16 +2022,6 @@ static int kill_all(struct nf_conn *i, void *data)
return net_eq(nf_ct_net(i), data);
}
-void nf_ct_free_hashtable(void *hash, unsigned int size)
-{
- if (is_vmalloc_addr(hash))
- vfree(hash);
- else
- free_pages((unsigned long)hash,
- get_order(sizeof(struct hlist_head) * size));
-}
-EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
-
void nf_conntrack_cleanup_start(void)
{
conntrack_gc_work.exiting = true;
@@ -1886,7 +2032,7 @@ void nf_conntrack_cleanup_end(void)
{
RCU_INIT_POINTER(nf_ct_hook, NULL);
cancel_delayed_work_sync(&conntrack_gc_work.dwork);
- nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
+ kvfree(nf_conntrack_hash);
nf_conntrack_proto_fini();
nf_conntrack_seqadj_fini();
@@ -1952,7 +2098,6 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
{
struct hlist_nulls_head *hash;
unsigned int nr_slots, i;
- size_t sz;
if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
return NULL;
@@ -1960,14 +2105,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
- if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
- return NULL;
-
- sz = nr_slots * sizeof(struct hlist_nulls_head);
- hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
- get_order(sz));
- if (!hash)
- hash = vzalloc(sz);
+ hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
+ GFP_KERNEL | __GFP_ZERO);
if (hash && nulls)
for (i = 0; i < nr_slots; i++)
@@ -1994,7 +2133,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
old_size = nf_conntrack_htable_size;
if (old_size == hashsize) {
- nf_ct_free_hashtable(hash, hashsize);
+ kvfree(hash);
return 0;
}
@@ -2030,7 +2169,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
local_bh_enable();
synchronize_net();
- nf_ct_free_hashtable(old_hash, old_size);
+ kvfree(old_hash);
return 0;
}
@@ -2054,9 +2193,6 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
}
EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
-module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
- &nf_conntrack_htable_size, 0600);
-
static __always_inline unsigned int total_extension_size(void)
{
/* remember to add new extensions below */
@@ -2197,13 +2333,14 @@ err_acct:
err_expect:
kmem_cache_destroy(nf_conntrack_cachep);
err_cachep:
- nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
+ kvfree(nf_conntrack_hash);
return ret;
}
static struct nf_ct_hook nf_conntrack_hook = {
.update = nf_conntrack_update,
.destroy = destroy_conntrack,
+ .get_tuple_skb = nf_conntrack_get_tuple_skb,
};
void nf_conntrack_init_end(void)
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 853b23206bb7..27b84231db10 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -610,7 +610,6 @@ static int exp_seq_show(struct seq_file *s, void *v)
expect->tuple.src.l3num,
expect->tuple.dst.protonum);
print_tuple(s, &expect->tuple,
- __nf_ct_l3proto_find(expect->tuple.src.l3num),
__nf_ct_l4proto_find(expect->tuple.src.l3num,
expect->tuple.dst.protonum));
@@ -713,5 +712,5 @@ void nf_conntrack_expect_fini(void)
{
rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep);
- nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
+ kvfree(nf_ct_expect_hash);
}
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index a75b11c39312..e24b762ffa1d 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -24,7 +24,6 @@
#include <linux/rtnetlink.h>
#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -193,8 +192,7 @@ void nf_conntrack_helper_put(struct nf_conntrack_helper *helper)
EXPORT_SYMBOL_GPL(nf_conntrack_helper_put);
struct nf_conn_help *
-nf_ct_helper_ext_add(struct nf_conn *ct,
- struct nf_conntrack_helper *helper, gfp_t gfp)
+nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
{
struct nf_conn_help *help;
@@ -263,7 +261,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
}
if (help == NULL) {
- help = nf_ct_helper_ext_add(ct, helper, flags);
+ help = nf_ct_helper_ext_add(ct, flags);
if (help == NULL)
return -ENOMEM;
} else {
@@ -564,12 +562,12 @@ int nf_conntrack_helper_init(void)
return 0;
out_extend:
- nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
+ kvfree(nf_ct_helper_hash);
return ret;
}
void nf_conntrack_helper_fini(void)
{
nf_ct_extend_unregister(&helper_extend);
- nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
+ kvfree(nf_ct_helper_hash);
}
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c
deleted file mode 100644
index 397e6911214f..000000000000
--- a/net/netfilter/nf_conntrack_l3proto_generic.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
- *
- * Based largely upon the original ip_conntrack code which
- * had the following copyright information:
- *
- * (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Author:
- * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- */
-
-#include <linux/types.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/icmp.h>
-#include <linux/sysctl.h>
-#include <net/ip.h>
-
-#include <linux/netfilter_ipv4.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
-
-static bool generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
- struct nf_conntrack_tuple *tuple)
-{
- memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
- memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
-
- return true;
-}
-
-static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
-{
- memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
- memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
-
- return true;
-}
-
-static int generic_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
- unsigned int *dataoff, u_int8_t *protonum)
-{
- /* Never track !!! */
- return -NF_ACCEPT;
-}
-
-
-struct nf_conntrack_l3proto nf_conntrack_l3proto_generic __read_mostly = {
- .l3proto = PF_UNSPEC,
- .pkt_to_tuple = generic_pkt_to_tuple,
- .invert_tuple = generic_invert_tuple,
- .get_l4proto = generic_get_l4proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 20a2e37c76d1..f981bfa8db72 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -38,7 +38,6 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_acct.h>
@@ -81,9 +80,26 @@ nla_put_failure:
return -1;
}
+static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
+ const struct nf_conntrack_tuple *tuple)
+{
+ if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
+ nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
+ const struct nf_conntrack_tuple *tuple)
+{
+ if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
+ nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
+ return -EMSGSIZE;
+ return 0;
+}
+
static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto)
+ const struct nf_conntrack_tuple *tuple)
{
int ret = 0;
struct nlattr *nest_parms;
@@ -92,8 +108,14 @@ static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
if (!nest_parms)
goto nla_put_failure;
- if (likely(l3proto->tuple_to_nlattr))
- ret = l3proto->tuple_to_nlattr(skb, tuple);
+ switch (tuple->src.l3num) {
+ case NFPROTO_IPV4:
+ ret = ipv4_tuple_to_nlattr(skb, tuple);
+ break;
+ case NFPROTO_IPV6:
+ ret = ipv6_tuple_to_nlattr(skb, tuple);
+ break;
+ }
nla_nest_end(skb, nest_parms);
@@ -106,13 +128,11 @@ nla_put_failure:
static int ctnetlink_dump_tuples(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
- const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
int ret;
rcu_read_lock();
- l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
- ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
+ ret = ctnetlink_dump_tuples_ip(skb, tuple);
if (ret >= 0) {
l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
@@ -556,15 +576,20 @@ nla_put_failure:
return -1;
}
+static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = {
+ [CTA_IP_V4_SRC] = { .type = NLA_U32 },
+ [CTA_IP_V4_DST] = { .type = NLA_U32 },
+ [CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 },
+ [CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 },
+};
+
#if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
static size_t ctnetlink_proto_size(const struct nf_conn *ct)
{
- const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
size_t len, len4 = 0;
- l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
- len = l3proto->nla_size;
+ len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
len *= 3u; /* ORIG, REPLY, MASTER */
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
@@ -936,29 +961,54 @@ out:
return skb->len;
}
+static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
+ struct nf_conntrack_tuple *t)
+{
+ if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
+ return -EINVAL;
+
+ t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
+ t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
+
+ return 0;
+}
+
+static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
+ struct nf_conntrack_tuple *t)
+{
+ if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST])
+ return -EINVAL;
+
+ t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
+ t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
+
+ return 0;
+}
+
static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
struct nf_conntrack_tuple *tuple)
{
struct nlattr *tb[CTA_IP_MAX+1];
- struct nf_conntrack_l3proto *l3proto;
int ret = 0;
ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL, NULL);
if (ret < 0)
return ret;
- rcu_read_lock();
- l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
+ ret = nla_validate_nested(attr, CTA_IP_MAX,
+ cta_ip_nla_policy, NULL);
+ if (ret)
+ return ret;
- if (likely(l3proto->nlattr_to_tuple)) {
- ret = nla_validate_nested(attr, CTA_IP_MAX,
- l3proto->nla_policy, NULL);
- if (ret == 0)
- ret = l3proto->nlattr_to_tuple(tb, tuple);
+ switch (tuple->src.l3num) {
+ case NFPROTO_IPV4:
+ ret = ipv4_nlattr_to_tuple(tb, tuple);
+ break;
+ case NFPROTO_IPV6:
+ ret = ipv6_nlattr_to_tuple(tb, tuple);
+ break;
}
- rcu_read_unlock();
-
return ret;
}
@@ -1897,7 +1947,7 @@ ctnetlink_create_conntrack(struct net *net,
} else {
struct nf_conn_help *help;
- help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help == NULL) {
err = -ENOMEM;
goto err2;
@@ -2581,7 +2631,6 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple_mask *mask)
{
- const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple m;
struct nlattr *nest_parms;
@@ -2597,8 +2646,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
goto nla_put_failure;
rcu_read_lock();
- l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
- ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
+ ret = ctnetlink_dump_tuples_ip(skb, &m);
if (ret >= 0) {
l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
tuple->dst.protonum);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index d88841fbc560..30070732ee50 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -1,14 +1,4 @@
-/* L3/L4 protocol support for nf_conntrack. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
- * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/netfilter.h>
@@ -24,14 +14,36 @@
#include <linux/netdevice.h>
#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_log.h>
+#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <linux/sysctl.h>
+#include <net/route.h>
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+
+#include <linux/ipv6.h>
+#include <linux/in6.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+extern unsigned int nf_conntrack_net_id;
+
static struct nf_conntrack_l4proto __rcu **nf_ct_protos[NFPROTO_NUMPROTO] __read_mostly;
-struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO] __read_mostly;
-EXPORT_SYMBOL_GPL(nf_ct_l3protos);
static DEFINE_MUTEX(nf_ct_proto_mutex);
@@ -122,137 +134,6 @@ __nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto)
}
EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
-/* this is guaranteed to always return a valid protocol helper, since
- * it falls back to generic_protocol */
-const struct nf_conntrack_l3proto *
-nf_ct_l3proto_find_get(u_int16_t l3proto)
-{
- struct nf_conntrack_l3proto *p;
-
- rcu_read_lock();
- p = __nf_ct_l3proto_find(l3proto);
- if (!try_module_get(p->me))
- p = &nf_conntrack_l3proto_generic;
- rcu_read_unlock();
-
- return p;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l3proto_find_get);
-
-int
-nf_ct_l3proto_try_module_get(unsigned short l3proto)
-{
- const struct nf_conntrack_l3proto *p;
- int ret;
-
-retry: p = nf_ct_l3proto_find_get(l3proto);
- if (p == &nf_conntrack_l3proto_generic) {
- ret = request_module("nf_conntrack-%d", l3proto);
- if (!ret)
- goto retry;
-
- return -EPROTOTYPE;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l3proto_try_module_get);
-
-void nf_ct_l3proto_module_put(unsigned short l3proto)
-{
- struct nf_conntrack_l3proto *p;
-
- /* rcu_read_lock not necessary since the caller holds a reference, but
- * taken anyways to avoid lockdep warnings in __nf_ct_l3proto_find()
- */
- rcu_read_lock();
- p = __nf_ct_l3proto_find(l3proto);
- module_put(p->me);
- rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
-
-static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
-{
- const struct nf_conntrack_l3proto *l3proto;
- int ret;
-
- might_sleep();
-
- ret = nf_ct_l3proto_try_module_get(nfproto);
- if (ret < 0)
- return ret;
-
- /* we already have a reference, can't fail */
- rcu_read_lock();
- l3proto = __nf_ct_l3proto_find(nfproto);
- rcu_read_unlock();
-
- if (!l3proto->net_ns_get)
- return 0;
-
- ret = l3proto->net_ns_get(net);
- if (ret < 0)
- nf_ct_l3proto_module_put(nfproto);
-
- return ret;
-}
-
-int nf_ct_netns_get(struct net *net, u8 nfproto)
-{
- int err;
-
- if (nfproto == NFPROTO_INET) {
- err = nf_ct_netns_do_get(net, NFPROTO_IPV4);
- if (err < 0)
- goto err1;
- err = nf_ct_netns_do_get(net, NFPROTO_IPV6);
- if (err < 0)
- goto err2;
- } else {
- err = nf_ct_netns_do_get(net, nfproto);
- if (err < 0)
- goto err1;
- }
- return 0;
-
-err2:
- nf_ct_netns_put(net, NFPROTO_IPV4);
-err1:
- return err;
-}
-EXPORT_SYMBOL_GPL(nf_ct_netns_get);
-
-static void nf_ct_netns_do_put(struct net *net, u8 nfproto)
-{
- const struct nf_conntrack_l3proto *l3proto;
-
- might_sleep();
-
- /* same as nf_conntrack_netns_get(), reference assumed */
- rcu_read_lock();
- l3proto = __nf_ct_l3proto_find(nfproto);
- rcu_read_unlock();
-
- if (WARN_ON(!l3proto))
- return;
-
- if (l3proto->net_ns_put)
- l3proto->net_ns_put(net);
-
- nf_ct_l3proto_module_put(nfproto);
-}
-
-void nf_ct_netns_put(struct net *net, uint8_t nfproto)
-{
- if (nfproto == NFPROTO_INET) {
- nf_ct_netns_do_put(net, NFPROTO_IPV4);
- nf_ct_netns_do_put(net, NFPROTO_IPV6);
- } else
- nf_ct_netns_do_put(net, nfproto);
-}
-EXPORT_SYMBOL_GPL(nf_ct_netns_put);
-
const struct nf_conntrack_l4proto *
nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num)
{
@@ -274,11 +155,6 @@ void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p)
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_put);
-static int kill_l3proto(struct nf_conn *i, void *data)
-{
- return nf_ct_l3num(i) == ((const struct nf_conntrack_l3proto *)data)->l3proto;
-}
-
static int kill_l4proto(struct nf_conn *i, void *data)
{
const struct nf_conntrack_l4proto *l4proto;
@@ -287,52 +163,6 @@ static int kill_l4proto(struct nf_conn *i, void *data)
nf_ct_l3num(i) == l4proto->l3proto;
}
-int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto)
-{
- int ret = 0;
- struct nf_conntrack_l3proto *old;
-
- if (proto->l3proto >= NFPROTO_NUMPROTO)
- return -EBUSY;
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- if (proto->tuple_to_nlattr && proto->nla_size == 0)
- return -EINVAL;
-#endif
- mutex_lock(&nf_ct_proto_mutex);
- old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
- lockdep_is_held(&nf_ct_proto_mutex));
- if (old != &nf_conntrack_l3proto_generic) {
- ret = -EBUSY;
- goto out_unlock;
- }
-
- rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto);
-
-out_unlock:
- mutex_unlock(&nf_ct_proto_mutex);
- return ret;
-
-}
-EXPORT_SYMBOL_GPL(nf_ct_l3proto_register);
-
-void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto)
-{
- BUG_ON(proto->l3proto >= NFPROTO_NUMPROTO);
-
- mutex_lock(&nf_ct_proto_mutex);
- BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
- lockdep_is_held(&nf_ct_proto_mutex)
- ) != proto);
- rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
- &nf_conntrack_l3proto_generic);
- mutex_unlock(&nf_ct_proto_mutex);
-
- synchronize_rcu();
- /* Remove all contrack entries for this protocol */
- nf_ct_iterate_destroy(kill_l3proto, (void*)proto);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l3proto_unregister);
-
static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
const struct nf_conntrack_l4proto *l4proto)
{
@@ -499,8 +329,23 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
-int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
- unsigned int num_proto)
+static void
+nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[],
+ unsigned int num_proto)
+{
+ mutex_lock(&nf_ct_proto_mutex);
+ while (num_proto-- != 0)
+ __nf_ct_l4proto_unregister_one(l4proto[num_proto]);
+ mutex_unlock(&nf_ct_proto_mutex);
+
+ synchronize_net();
+ /* Remove all contrack entries for this protocol */
+ nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto);
+}
+
+static int
+nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
+ unsigned int num_proto)
{
int ret = -EINVAL, ver;
unsigned int i;
@@ -518,7 +363,6 @@ int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
}
return ret;
}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_register);
int nf_ct_l4proto_pernet_register(struct net *net,
const struct nf_conntrack_l4proto *const l4proto[],
@@ -542,20 +386,6 @@ int nf_ct_l4proto_pernet_register(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register);
-void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[],
- unsigned int num_proto)
-{
- mutex_lock(&nf_ct_proto_mutex);
- while (num_proto-- != 0)
- __nf_ct_l4proto_unregister_one(l4proto[num_proto]);
- mutex_unlock(&nf_ct_proto_mutex);
-
- synchronize_net();
- /* Remove all contrack entries for this protocol */
- nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister);
-
void nf_ct_l4proto_pernet_unregister(struct net *net,
const struct nf_conntrack_l4proto *const l4proto[],
unsigned int num_proto)
@@ -565,6 +395,562 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
+static unsigned int ipv4_helper(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn_help *help;
+ const struct nf_conntrack_helper *helper;
+
+ /* This is where we call the helper: as the packet goes out. */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+ return NF_ACCEPT;
+
+ help = nfct_help(ct);
+ if (!help)
+ return NF_ACCEPT;
+
+ /* rcu_read_lock()ed by nf_hook_thresh */
+ helper = rcu_dereference(help->helper);
+ if (!helper)
+ return NF_ACCEPT;
+
+ return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
+ ct, ctinfo);
+}
+
+static unsigned int ipv4_confirm(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+ goto out;
+
+ /* adjust seqs for loopback traffic only in outgoing direction */
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+ !nf_is_loopback_packet(skb)) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
+ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+ return NF_DROP;
+ }
+ }
+out:
+ /* We've seen it coming out the other side: confirm it */
+ return nf_conntrack_confirm(skb);
+}
+
+static unsigned int ipv4_conntrack_in(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
+}
+
+static unsigned int ipv4_conntrack_local(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *tmpl;
+
+ tmpl = nf_ct_get(skb, &ctinfo);
+ if (tmpl && nf_ct_is_template(tmpl)) {
+ /* when skipping ct, clear templates to avoid fooling
+ * later targets/matches
+ */
+ skb->_nfct = 0;
+ nf_ct_put(tmpl);
+ }
+ return NF_ACCEPT;
+ }
+
+ return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
+}
+
+/* Connection tracking may drop packets, but never alters them, so
+ * make it the first hook.
+ */
+static const struct nf_hook_ops ipv4_conntrack_ops[] = {
+ {
+ .hook = ipv4_conntrack_in,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK,
+ },
+ {
+ .hook = ipv4_conntrack_local,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_CONNTRACK,
+ },
+ {
+ .hook = ipv4_helper,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_HELPER,
+ },
+ {
+ .hook = ipv4_confirm,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
+ },
+ {
+ .hook = ipv4_helper,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_HELPER,
+ },
+ {
+ .hook = ipv4_confirm,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
+ },
+};
+
+/* Fast function for those who don't want to parse /proc (and I don't
+ * blame them).
+ * Reversing the socket's dst/src point of view gives us the reply
+ * mapping.
+ */
+static int
+getorigdst(struct sock *sk, int optval, void __user *user, int *len)
+{
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct nf_conntrack_tuple_hash *h;
+ struct nf_conntrack_tuple tuple;
+
+ memset(&tuple, 0, sizeof(tuple));
+
+ lock_sock(sk);
+ tuple.src.u3.ip = inet->inet_rcv_saddr;
+ tuple.src.u.tcp.port = inet->inet_sport;
+ tuple.dst.u3.ip = inet->inet_daddr;
+ tuple.dst.u.tcp.port = inet->inet_dport;
+ tuple.src.l3num = PF_INET;
+ tuple.dst.protonum = sk->sk_protocol;
+ release_sock(sk);
+
+ /* We only do TCP and SCTP at the moment: is there a better way? */
+ if (tuple.dst.protonum != IPPROTO_TCP &&
+ tuple.dst.protonum != IPPROTO_SCTP) {
+ pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
+ return -ENOPROTOOPT;
+ }
+
+ if ((unsigned int)*len < sizeof(struct sockaddr_in)) {
+ pr_debug("SO_ORIGINAL_DST: len %d not %zu\n",
+ *len, sizeof(struct sockaddr_in));
+ return -EINVAL;
+ }
+
+ h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
+ if (h) {
+ struct sockaddr_in sin;
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+ sin.sin_family = AF_INET;
+ sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
+ .tuple.dst.u.tcp.port;
+ sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
+ .tuple.dst.u3.ip;
+ memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
+
+ pr_debug("SO_ORIGINAL_DST: %pI4 %u\n",
+ &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+ nf_ct_put(ct);
+ if (copy_to_user(user, &sin, sizeof(sin)) != 0)
+ return -EFAULT;
+ else
+ return 0;
+ }
+ pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n",
+ &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port),
+ &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port));
+ return -ENOENT;
+}
+
+static struct nf_sockopt_ops so_getorigdst = {
+ .pf = PF_INET,
+ .get_optmin = SO_ORIGINAL_DST,
+ .get_optmax = SO_ORIGINAL_DST + 1,
+ .get = getorigdst,
+ .owner = THIS_MODULE,
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
+{
+ struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
+ const struct ipv6_pinfo *inet6 = inet6_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct nf_conntrack_tuple_hash *h;
+ struct sockaddr_in6 sin6;
+ struct nf_conn *ct;
+ __be32 flow_label;
+ int bound_dev_if;
+
+ lock_sock(sk);
+ tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
+ tuple.src.u.tcp.port = inet->inet_sport;
+ tuple.dst.u3.in6 = sk->sk_v6_daddr;
+ tuple.dst.u.tcp.port = inet->inet_dport;
+ tuple.dst.protonum = sk->sk_protocol;
+ bound_dev_if = sk->sk_bound_dev_if;
+ flow_label = inet6->flow_label;
+ release_sock(sk);
+
+ if (tuple.dst.protonum != IPPROTO_TCP &&
+ tuple.dst.protonum != IPPROTO_SCTP)
+ return -ENOPROTOOPT;
+
+ if (*len < 0 || (unsigned int)*len < sizeof(sin6))
+ return -EINVAL;
+
+ h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
+ if (!h) {
+ pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
+ &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
+ &tuple.dst.u3.ip6, ntohs(tuple.dst.u.tcp.port));
+ return -ENOENT;
+ }
+
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
+ sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
+ memcpy(&sin6.sin6_addr,
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
+ sizeof(sin6.sin6_addr));
+
+ nf_ct_put(ct);
+ sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
+ return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
+}
+
+static struct nf_sockopt_ops so_getorigdst6 = {
+ .pf = NFPROTO_IPV6,
+ .get_optmin = IP6T_SO_ORIGINAL_DST,
+ .get_optmax = IP6T_SO_ORIGINAL_DST + 1,
+ .get = ipv6_getorigdst,
+ .owner = THIS_MODULE,
+};
+
+static unsigned int ipv6_confirm(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ unsigned char pnum = ipv6_hdr(skb)->nexthdr;
+ int protoff;
+ __be16 frag_off;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+ goto out;
+
+ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
+ &frag_off);
+ if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+ pr_debug("proto header not found\n");
+ goto out;
+ }
+
+ /* adjust seqs for loopback traffic only in outgoing direction */
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+ !nf_is_loopback_packet(skb)) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
+ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+ return NF_DROP;
+ }
+ }
+out:
+ /* We've seen it coming out the other side: confirm it */
+ return nf_conntrack_confirm(skb);
+}
+
+static unsigned int ipv6_conntrack_in(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
+}
+
+static unsigned int ipv6_conntrack_local(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
+}
+
+static unsigned int ipv6_helper(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ const struct nf_conn_help *help;
+ const struct nf_conntrack_helper *helper;
+ enum ip_conntrack_info ctinfo;
+ __be16 frag_off;
+ int protoff;
+ u8 nexthdr;
+
+ /* This is where we call the helper: as the packet goes out. */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+ return NF_ACCEPT;
+
+ help = nfct_help(ct);
+ if (!help)
+ return NF_ACCEPT;
+ /* rcu_read_lock()ed by nf_hook_thresh */
+ helper = rcu_dereference(help->helper);
+ if (!helper)
+ return NF_ACCEPT;
+
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+ &frag_off);
+ if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+ pr_debug("proto header not found\n");
+ return NF_ACCEPT;
+ }
+
+ return helper->help(skb, protoff, ct, ctinfo);
+}
+
+static const struct nf_hook_ops ipv6_conntrack_ops[] = {
+ {
+ .hook = ipv6_conntrack_in,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP6_PRI_CONNTRACK,
+ },
+ {
+ .hook = ipv6_conntrack_local,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP6_PRI_CONNTRACK,
+ },
+ {
+ .hook = ipv6_helper,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_CONNTRACK_HELPER,
+ },
+ {
+ .hook = ipv6_confirm,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_LAST,
+ },
+ {
+ .hook = ipv6_helper,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP6_PRI_CONNTRACK_HELPER,
+ },
+ {
+ .hook = ipv6_confirm,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP6_PRI_LAST - 1,
+ },
+};
+#endif
+
+static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
+{
+ struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ int err = 0;
+
+ mutex_lock(&nf_ct_proto_mutex);
+
+ switch (nfproto) {
+ case NFPROTO_IPV4:
+ cnet->users4++;
+ if (cnet->users4 > 1)
+ goto out_unlock;
+ err = nf_defrag_ipv4_enable(net);
+ if (err) {
+ cnet->users4 = 0;
+ goto out_unlock;
+ }
+
+ err = nf_register_net_hooks(net, ipv4_conntrack_ops,
+ ARRAY_SIZE(ipv4_conntrack_ops));
+ if (err)
+ cnet->users4 = 0;
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case NFPROTO_IPV6:
+ cnet->users6++;
+ if (cnet->users6 > 1)
+ goto out_unlock;
+ err = nf_defrag_ipv6_enable(net);
+ if (err < 0) {
+ cnet->users6 = 0;
+ goto out_unlock;
+ }
+
+ err = nf_register_net_hooks(net, ipv6_conntrack_ops,
+ ARRAY_SIZE(ipv6_conntrack_ops));
+ if (err)
+ cnet->users6 = 0;
+ break;
+#endif
+ default:
+ err = -EPROTO;
+ break;
+ }
+ out_unlock:
+ mutex_unlock(&nf_ct_proto_mutex);
+ return err;
+}
+
+static void nf_ct_netns_do_put(struct net *net, u8 nfproto)
+{
+ struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+
+ mutex_lock(&nf_ct_proto_mutex);
+ switch (nfproto) {
+ case NFPROTO_IPV4:
+ if (cnet->users4 && (--cnet->users4 == 0))
+ nf_unregister_net_hooks(net, ipv4_conntrack_ops,
+ ARRAY_SIZE(ipv4_conntrack_ops));
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case NFPROTO_IPV6:
+ if (cnet->users6 && (--cnet->users6 == 0))
+ nf_unregister_net_hooks(net, ipv6_conntrack_ops,
+ ARRAY_SIZE(ipv6_conntrack_ops));
+ break;
+#endif
+ }
+
+ mutex_unlock(&nf_ct_proto_mutex);
+}
+
+int nf_ct_netns_get(struct net *net, u8 nfproto)
+{
+ int err;
+
+ if (nfproto == NFPROTO_INET) {
+ err = nf_ct_netns_do_get(net, NFPROTO_IPV4);
+ if (err < 0)
+ goto err1;
+ err = nf_ct_netns_do_get(net, NFPROTO_IPV6);
+ if (err < 0)
+ goto err2;
+ } else {
+ err = nf_ct_netns_do_get(net, nfproto);
+ if (err < 0)
+ goto err1;
+ }
+ return 0;
+
+err2:
+ nf_ct_netns_put(net, NFPROTO_IPV4);
+err1:
+ return err;
+}
+EXPORT_SYMBOL_GPL(nf_ct_netns_get);
+
+void nf_ct_netns_put(struct net *net, uint8_t nfproto)
+{
+ if (nfproto == NFPROTO_INET) {
+ nf_ct_netns_do_put(net, NFPROTO_IPV4);
+ nf_ct_netns_do_put(net, NFPROTO_IPV6);
+ } else {
+ nf_ct_netns_do_put(net, nfproto);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_ct_netns_put);
+
+static const struct nf_conntrack_l4proto * const builtin_l4proto[] = {
+ &nf_conntrack_l4proto_tcp4,
+ &nf_conntrack_l4proto_udp4,
+ &nf_conntrack_l4proto_icmp,
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ &nf_conntrack_l4proto_dccp4,
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ &nf_conntrack_l4proto_sctp4,
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ &nf_conntrack_l4proto_udplite4,
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
+ &nf_conntrack_l4proto_tcp6,
+ &nf_conntrack_l4proto_udp6,
+ &nf_conntrack_l4proto_icmpv6,
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ &nf_conntrack_l4proto_dccp6,
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ &nf_conntrack_l4proto_sctp6,
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ &nf_conntrack_l4proto_udplite6,
+#endif
+#endif /* CONFIG_IPV6 */
+};
+
+int nf_conntrack_proto_init(void)
+{
+ int ret = 0;
+
+ ret = nf_register_sockopt(&so_getorigdst);
+ if (ret < 0)
+ return ret;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ ret = nf_register_sockopt(&so_getorigdst6);
+ if (ret < 0)
+ goto cleanup_sockopt;
+#endif
+ ret = nf_ct_l4proto_register(builtin_l4proto,
+ ARRAY_SIZE(builtin_l4proto));
+ if (ret < 0)
+ goto cleanup_sockopt2;
+
+ return ret;
+cleanup_sockopt2:
+ nf_unregister_sockopt(&so_getorigdst);
+#if IS_ENABLED(CONFIG_IPV6)
+cleanup_sockopt:
+ nf_unregister_sockopt(&so_getorigdst6);
+#endif
+ return ret;
+}
+
+void nf_conntrack_proto_fini(void)
+{
+ unsigned int i;
+
+ nf_unregister_sockopt(&so_getorigdst);
+#if IS_ENABLED(CONFIG_IPV6)
+ nf_unregister_sockopt(&so_getorigdst6);
+#endif
+ /* No need to call nf_ct_l4proto_unregister(), the register
+ * tables are free'd here anyway.
+ */
+ for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++)
+ kfree(nf_ct_protos[i]);
+}
+
int nf_conntrack_proto_pernet_init(struct net *net)
{
int err;
@@ -581,6 +967,14 @@ int nf_conntrack_proto_pernet_init(struct net *net)
if (err < 0)
return err;
+ err = nf_ct_l4proto_pernet_register(net, builtin_l4proto,
+ ARRAY_SIZE(builtin_l4proto));
+ if (err < 0) {
+ nf_ct_l4proto_unregister_sysctl(net, pn,
+ &nf_conntrack_l4proto_generic);
+ return err;
+ }
+
pn->users++;
return 0;
}
@@ -590,25 +984,19 @@ void nf_conntrack_proto_pernet_fini(struct net *net)
struct nf_proto_net *pn = nf_ct_l4proto_net(net,
&nf_conntrack_l4proto_generic);
+ nf_ct_l4proto_pernet_unregister(net, builtin_l4proto,
+ ARRAY_SIZE(builtin_l4proto));
pn->users--;
nf_ct_l4proto_unregister_sysctl(net,
pn,
&nf_conntrack_l4proto_generic);
}
-int nf_conntrack_proto_init(void)
-{
- unsigned int i;
- for (i = 0; i < NFPROTO_NUMPROTO; i++)
- rcu_assign_pointer(nf_ct_l3protos[i],
- &nf_conntrack_l3proto_generic);
- return 0;
-}
-void nf_conntrack_proto_fini(void)
-{
- unsigned int i;
- /* free l3proto protocol tables */
- for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++)
- kfree(nf_ct_protos[i]);
-}
+module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
+ &nf_conntrack_htable_size, 0600);
+
+MODULE_ALIAS("ip_conntrack");
+MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
+MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 9ce6336d1e55..8c58f96b59e7 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -23,6 +23,7 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_log.h>
/* Timeouts are based on values from RFC4340:
@@ -388,31 +389,8 @@ static inline struct nf_dccp_net *dccp_pernet(struct net *net)
return &net->ct.nf_ct_proto.dccp;
}
-static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple)
-{
- struct dccp_hdr _hdr, *dh;
-
- /* Actually only need first 4 bytes to get ports. */
- dh = skb_header_pointer(skb, dataoff, 4, &_hdr);
- if (dh == NULL)
- return false;
-
- tuple->src.u.dccp.port = dh->dccph_sport;
- tuple->dst.u.dccp.port = dh->dccph_dport;
- return true;
-}
-
-static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv,
- const struct nf_conntrack_tuple *tuple)
-{
- inv->src.u.dccp.port = tuple->dst.u.dccp.port;
- inv->dst.u.dccp.port = tuple->src.u.dccp.port;
- return true;
-}
-
static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts)
+ unsigned int dataoff)
{
struct net *net = nf_ct_net(ct);
struct nf_dccp_net *dn;
@@ -460,19 +438,14 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh)
ntohl(dhack->dccph_ack_nr_low);
}
-static unsigned int *dccp_get_timeouts(struct net *net)
-{
- return dccp_pernet(net)->dccp_timeout;
-}
-
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, enum ip_conntrack_info ctinfo,
- unsigned int *timeouts)
+ unsigned int dataoff, enum ip_conntrack_info ctinfo)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
+ unsigned int *timeouts;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
@@ -546,6 +519,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
+ timeouts = nf_ct_timeout_lookup(ct);
+ if (!timeouts)
+ timeouts = dccp_pernet(nf_ct_net(ct))->dccp_timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
@@ -864,11 +840,8 @@ static struct nf_proto_net *dccp_get_net_proto(struct net *net)
const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
.l3proto = AF_INET,
.l4proto = IPPROTO_DCCP,
- .pkt_to_tuple = dccp_pkt_to_tuple,
- .invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
- .get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.can_early_drop = dccp_can_early_drop,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
@@ -900,11 +873,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4);
const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
.l3proto = AF_INET6,
.l4proto = IPPROTO_DCCP,
- .pkt_to_tuple = dccp_pkt_to_tuple,
- .invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
- .get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.can_early_drop = dccp_can_early_drop,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 6c6896d21cd7..ac4a0b296dcd 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -11,6 +11,7 @@
#include <linux/timer.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
static const unsigned int nf_ct_generic_timeout = 600*HZ;
@@ -41,34 +42,24 @@ static bool generic_pkt_to_tuple(const struct sk_buff *skb,
return true;
}
-static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
-{
- tuple->src.u.all = 0;
- tuple->dst.u.all = 0;
-
- return true;
-}
-
-static unsigned int *generic_get_timeouts(struct net *net)
-{
- return &(generic_pernet(net)->timeout);
-}
-
/* Returns verdict for packet, or -1 for invalid. */
static int generic_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeout)
+ enum ip_conntrack_info ctinfo)
{
+ const unsigned int *timeout = nf_ct_timeout_lookup(ct);
+
+ if (!timeout)
+ timeout = &generic_pernet(nf_ct_net(ct))->timeout;
+
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts)
+ unsigned int dataoff)
{
bool ret;
@@ -87,8 +78,11 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- unsigned int *timeout = data;
struct nf_generic_net *gn = generic_pernet(net);
+ unsigned int *timeout = data;
+
+ if (!timeout)
+ timeout = &gn->timeout;
if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
*timeout =
@@ -168,9 +162,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
.l3proto = PF_UNSPEC,
.l4proto = 255,
.pkt_to_tuple = generic_pkt_to_tuple,
- .invert_tuple = generic_invert_tuple,
.packet = generic_packet,
- .get_timeouts = generic_get_timeouts,
.new = generic_new,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index d049ea5a3770..d1632252bf5b 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -39,6 +39,7 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
@@ -179,15 +180,6 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy);
/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
-/* invert gre part of tuple */
-static bool gre_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
-{
- tuple->dst.u.gre.key = orig->src.u.gre.key;
- tuple->src.u.gre.key = orig->dst.u.gre.key;
- return true;
-}
-
/* gre hdr info to tuple */
static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
@@ -243,8 +235,7 @@ static unsigned int *gre_get_timeouts(struct net *net)
static int gre_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeouts)
+ enum ip_conntrack_info ctinfo)
{
/* If we've seen traffic both ways, this is a GRE connection.
* Extend timeout. */
@@ -263,8 +254,13 @@ static int gre_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts)
+ unsigned int dataoff)
{
+ unsigned int *timeouts = nf_ct_timeout_lookup(ct);
+
+ if (!timeouts)
+ timeouts = gre_get_timeouts(nf_ct_net(ct));
+
pr_debug(": ");
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
@@ -300,6 +296,8 @@ static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
unsigned int *timeouts = data;
struct netns_proto_gre *net_gre = gre_pernet(net);
+ if (!timeouts)
+ timeouts = gre_get_timeouts(net);
/* set default timeouts for GRE. */
timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED];
timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED];
@@ -356,11 +354,9 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
.l3proto = AF_INET,
.l4proto = IPPROTO_GRE,
.pkt_to_tuple = gre_pkt_to_tuple,
- .invert_tuple = gre_invert_tuple,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = gre_print_conntrack,
#endif
- .get_timeouts = gre_get_timeouts,
.packet = gre_packet,
.new = gre_new,
.destroy = gre_destroy,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 5c15beafa711..036670b38282 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -19,6 +19,7 @@
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_log.h>
@@ -80,12 +81,16 @@ static unsigned int *icmp_get_timeouts(struct net *net)
static int icmp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeout)
+ enum ip_conntrack_info ctinfo)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
+ unsigned int *timeout = nf_ct_timeout_lookup(ct);
+
+ if (!timeout)
+ timeout = icmp_get_timeouts(nf_ct_net(ct));
+
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
@@ -93,7 +98,7 @@ static int icmp_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts)
+ unsigned int dataoff)
{
static const u_int8_t valid_new[] = {
[ICMP_ECHO] = 1,
@@ -142,8 +147,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
- if (!nf_ct_invert_tuple(&innertuple, &origtuple,
- &nf_conntrack_l3proto_ipv4, innerproto)) {
+ if (!nf_ct_invert_tuple(&innertuple, &origtuple, innerproto)) {
pr_debug("icmp_error_message: no match\n");
return -NF_ACCEPT;
}
@@ -281,9 +285,11 @@ static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct nf_icmp_net *in = icmp_pernet(net);
if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
+ if (!timeout)
+ timeout = &in->timeout;
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
- } else {
+ } else if (timeout) {
/* Set default ICMP timeout. */
*timeout = in->timeout;
}
@@ -358,7 +364,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
.pkt_to_tuple = icmp_pkt_to_tuple,
.invert_tuple = icmp_invert_tuple,
.packet = icmp_packet,
- .get_timeouts = icmp_get_timeouts,
.new = icmp_new,
.error = icmp_error,
.destroy = NULL,
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index 2548e2c8aedd..bed07b998a10 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -23,6 +23,7 @@
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
#include <net/netfilter/nf_log.h>
@@ -93,9 +94,13 @@ static unsigned int *icmpv6_get_timeouts(struct net *net)
static int icmpv6_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeout)
+ enum ip_conntrack_info ctinfo)
{
+ unsigned int *timeout = nf_ct_timeout_lookup(ct);
+
+ if (!timeout)
+ timeout = icmpv6_get_timeouts(nf_ct_net(ct));
+
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
@@ -106,7 +111,7 @@ static int icmpv6_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts)
+ unsigned int dataoff)
{
static const u_int8_t valid_new[] = {
[ICMPV6_ECHO_REQUEST - 128] = 1,
@@ -152,8 +157,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
- if (!nf_ct_invert_tuple(&intuple, &origtuple,
- &nf_conntrack_l3proto_ipv6, inproto)) {
+ if (!nf_ct_invert_tuple(&intuple, &origtuple, inproto)) {
pr_debug("icmpv6_error: Can't invert tuple\n");
return -NF_ACCEPT;
}
@@ -281,6 +285,8 @@ static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
unsigned int *timeout = data;
struct nf_icmp_net *in = icmpv6_pernet(net);
+ if (!timeout)
+ timeout = icmpv6_get_timeouts(net);
if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
@@ -359,7 +365,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
.pkt_to_tuple = icmpv6_pkt_to_tuple,
.invert_tuple = icmpv6_invert_tuple,
.packet = icmpv6_packet,
- .get_timeouts = icmpv6_get_timeouts,
.new = icmpv6_new,
.error = icmpv6_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index fb9a35d16069..8d1e085fc14a 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -28,6 +28,7 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
closely. They're more complex. --RR
@@ -150,30 +151,6 @@ static inline struct nf_sctp_net *sctp_pernet(struct net *net)
return &net->ct.nf_ct_proto.sctp;
}
-static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple)
-{
- const struct sctphdr *hp;
- struct sctphdr _hdr;
-
- /* Actually only need first 4 bytes to get ports. */
- hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
- if (hp == NULL)
- return false;
-
- tuple->src.u.sctp.port = hp->source;
- tuple->dst.u.sctp.port = hp->dest;
- return true;
-}
-
-static bool sctp_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
-{
- tuple->src.u.sctp.port = orig->dst.u.sctp.port;
- tuple->dst.u.sctp.port = orig->src.u.sctp.port;
- return true;
-}
-
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -296,17 +273,11 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
return sctp_conntracks[dir][i][cur_state];
}
-static unsigned int *sctp_get_timeouts(struct net *net)
-{
- return sctp_pernet(net)->timeouts;
-}
-
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
static int sctp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeouts)
+ enum ip_conntrack_info ctinfo)
{
enum sctp_conntrack new_state, old_state;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -315,6 +286,7 @@ static int sctp_packet(struct nf_conn *ct,
const struct sctp_chunkhdr *sch;
struct sctp_chunkhdr _sch;
u_int32_t offset, count;
+ unsigned int *timeouts;
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
@@ -403,6 +375,10 @@ static int sctp_packet(struct nf_conn *ct,
}
spin_unlock_bh(&ct->lock);
+ timeouts = nf_ct_timeout_lookup(ct);
+ if (!timeouts)
+ timeouts = sctp_pernet(nf_ct_net(ct))->timeouts;
+
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
@@ -423,7 +399,7 @@ out:
/* Called when a new connection for this protocol found. */
static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts)
+ unsigned int dataoff)
{
enum sctp_conntrack new_state;
const struct sctphdr *sh;
@@ -780,13 +756,10 @@ static struct nf_proto_net *sctp_get_net_proto(struct net *net)
const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
.l3proto = PF_INET,
.l4proto = IPPROTO_SCTP,
- .pkt_to_tuple = sctp_pkt_to_tuple,
- .invert_tuple = sctp_invert_tuple,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = sctp_print_conntrack,
#endif
.packet = sctp_packet,
- .get_timeouts = sctp_get_timeouts,
.new = sctp_new,
.error = sctp_error,
.can_early_drop = sctp_can_early_drop,
@@ -817,13 +790,10 @@ EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4);
const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
.l3proto = PF_INET6,
.l4proto = IPPROTO_SCTP,
- .pkt_to_tuple = sctp_pkt_to_tuple,
- .invert_tuple = sctp_invert_tuple,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = sctp_print_conntrack,
#endif
.packet = sctp_packet,
- .get_timeouts = sctp_get_timeouts,
.new = sctp_new,
.error = sctp_error,
.can_early_drop = sctp_can_early_drop,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 8e67910185a0..d80d322b9d8b 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -29,6 +29,7 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
@@ -276,31 +277,6 @@ static inline struct nf_tcp_net *tcp_pernet(struct net *net)
return &net->ct.nf_ct_proto.tcp;
}
-static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple)
-{
- const struct tcphdr *hp;
- struct tcphdr _hdr;
-
- /* Actually only need first 4 bytes to get ports. */
- hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
- if (hp == NULL)
- return false;
-
- tuple->src.u.tcp.port = hp->source;
- tuple->dst.u.tcp.port = hp->dest;
-
- return true;
-}
-
-static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
-{
- tuple->src.u.tcp.port = orig->dst.u.tcp.port;
- tuple->dst.u.tcp.port = orig->src.u.tcp.port;
- return true;
-}
-
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -793,27 +769,21 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
return NF_ACCEPT;
}
-static unsigned int *tcp_get_timeouts(struct net *net)
-{
- return tcp_pernet(net)->timeouts;
-}
-
/* Returns verdict for packet, or -1 for invalid. */
static int tcp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeouts)
+ enum ip_conntrack_info ctinfo)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = tcp_pernet(net);
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
+ unsigned int index, *timeouts;
enum ip_conntrack_dir dir;
const struct tcphdr *th;
struct tcphdr _tcph;
unsigned long timeout;
- unsigned int index;
th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
BUG_ON(th == NULL);
@@ -1046,6 +1016,10 @@ static int tcp_packet(struct nf_conn *ct,
&& new_state == TCP_CONNTRACK_FIN_WAIT)
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
+ timeouts = nf_ct_timeout_lookup(ct);
+ if (!timeouts)
+ timeouts = tn->timeouts;
+
if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
@@ -1095,7 +1069,7 @@ static int tcp_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts)
+ unsigned int dataoff)
{
enum tcp_conntrack new_state;
const struct tcphdr *th;
@@ -1313,10 +1287,12 @@ static unsigned int tcp_nlattr_tuple_size(void)
static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- unsigned int *timeouts = data;
struct nf_tcp_net *tn = tcp_pernet(net);
+ unsigned int *timeouts = data;
int i;
+ if (!timeouts)
+ timeouts = tn->timeouts;
/* set default TCP timeouts. */
for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
timeouts[i] = tn->timeouts[i];
@@ -1559,13 +1535,10 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
{
.l3proto = PF_INET,
.l4proto = IPPROTO_TCP,
- .pkt_to_tuple = tcp_pkt_to_tuple,
- .invert_tuple = tcp_invert_tuple,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = tcp_print_conntrack,
#endif
.packet = tcp_packet,
- .get_timeouts = tcp_get_timeouts,
.new = tcp_new,
.error = tcp_error,
.can_early_drop = tcp_can_early_drop,
@@ -1597,13 +1570,10 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
{
.l3proto = PF_INET6,
.l4proto = IPPROTO_TCP,
- .pkt_to_tuple = tcp_pkt_to_tuple,
- .invert_tuple = tcp_invert_tuple,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = tcp_print_conntrack,
#endif
.packet = tcp_packet,
- .get_timeouts = tcp_get_timeouts,
.new = tcp_new,
.error = tcp_error,
.can_early_drop = tcp_can_early_drop,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index fe7243970aa4..7a1b8988a931 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -22,6 +22,7 @@
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
@@ -36,33 +37,6 @@ static inline struct nf_udp_net *udp_pernet(struct net *net)
return &net->ct.nf_ct_proto.udp;
}
-static bool udp_pkt_to_tuple(const struct sk_buff *skb,
- unsigned int dataoff,
- struct net *net,
- struct nf_conntrack_tuple *tuple)
-{
- const struct udphdr *hp;
- struct udphdr _hdr;
-
- /* Actually only need first 4 bytes to get ports. */
- hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
- if (hp == NULL)
- return false;
-
- tuple->src.u.udp.port = hp->source;
- tuple->dst.u.udp.port = hp->dest;
-
- return true;
-}
-
-static bool udp_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
-{
- tuple->src.u.udp.port = orig->dst.u.udp.port;
- tuple->dst.u.udp.port = orig->src.u.udp.port;
- return true;
-}
-
static unsigned int *udp_get_timeouts(struct net *net)
{
return udp_pernet(net)->timeouts;
@@ -72,9 +46,14 @@ static unsigned int *udp_get_timeouts(struct net *net)
static int udp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeouts)
+ enum ip_conntrack_info ctinfo)
{
+ unsigned int *timeouts;
+
+ timeouts = nf_ct_timeout_lookup(ct);
+ if (!timeouts)
+ timeouts = udp_get_timeouts(nf_ct_net(ct));
+
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
@@ -92,7 +71,7 @@ static int udp_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts)
+ unsigned int dataoff)
{
return true;
}
@@ -203,6 +182,9 @@ static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
unsigned int *timeouts = data;
struct nf_udp_net *un = udp_pernet(net);
+ if (!timeouts)
+ timeouts = un->timeouts;
+
/* set default timeouts for UDP. */
timeouts[UDP_CT_UNREPLIED] = un->timeouts[UDP_CT_UNREPLIED];
timeouts[UDP_CT_REPLIED] = un->timeouts[UDP_CT_REPLIED];
@@ -301,10 +283,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
.l3proto = PF_INET,
.l4proto = IPPROTO_UDP,
.allow_clash = true,
- .pkt_to_tuple = udp_pkt_to_tuple,
- .invert_tuple = udp_invert_tuple,
.packet = udp_packet,
- .get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -333,10 +312,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
.l3proto = PF_INET,
.l4proto = IPPROTO_UDPLITE,
.allow_clash = true,
- .pkt_to_tuple = udp_pkt_to_tuple,
- .invert_tuple = udp_invert_tuple,
.packet = udp_packet,
- .get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -365,10 +341,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
.l3proto = PF_INET6,
.l4proto = IPPROTO_UDP,
.allow_clash = true,
- .pkt_to_tuple = udp_pkt_to_tuple,
- .invert_tuple = udp_invert_tuple,
.packet = udp_packet,
- .get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -397,10 +370,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
.l3proto = PF_INET6,
.l4proto = IPPROTO_UDPLITE,
.allow_clash = true,
- .pkt_to_tuple = udp_pkt_to_tuple,
- .invert_tuple = udp_invert_tuple,
.packet = udp_packet,
- .get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -423,3 +393,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
#endif
+#include <net/netfilter/nf_conntrack_timeout.h>
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index b642c0b2495c..13279f683da9 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -1,12 +1,4 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
@@ -24,7 +16,6 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
@@ -33,15 +24,14 @@
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <linux/rculist_nulls.h>
-MODULE_LICENSE("GPL");
+unsigned int nf_conntrack_net_id __read_mostly;
#ifdef CONFIG_NF_CONNTRACK_PROCFS
void
print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto)
{
- switch (l3proto->l3proto) {
+ switch (tuple->src.l3num) {
case NFPROTO_IPV4:
seq_printf(s, "src=%pI4 dst=%pI4 ",
&tuple->src.u3.ip, &tuple->dst.u3.ip);
@@ -282,7 +272,6 @@ static int ct_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_tuple_hash *hash = v;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
- const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
struct net *net = seq_file_net(s);
int ret = 0;
@@ -303,14 +292,12 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (!net_eq(nf_ct_net(ct), net))
goto release;
- l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
- WARN_ON(!l3proto);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
WARN_ON(!l4proto);
ret = -ENOSPC;
seq_printf(s, "%-8s %u %-8s %u ",
- l3proto_name(l3proto->l3proto), nf_ct_l3num(ct),
+ l3proto_name(nf_ct_l3num(ct)), nf_ct_l3num(ct),
l4proto_name(l4proto->l4proto), nf_ct_protonum(ct));
if (!test_bit(IPS_OFFLOAD_BIT, &ct->status))
@@ -320,7 +307,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
l4proto->print_conntrack(s, ct);
print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- l3proto, l4proto);
+ l4proto);
ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
@@ -333,8 +320,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
seq_puts(s, "[UNREPLIED] ");
- print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
- l3proto, l4proto);
+ print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, l4proto);
ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
@@ -680,6 +666,8 @@ static void nf_conntrack_pernet_exit(struct list_head *net_exit_list)
static struct pernet_operations nf_conntrack_net_ops = {
.init = nf_conntrack_pernet_init,
.exit_batch = nf_conntrack_pernet_exit,
+ .id = &nf_conntrack_net_id,
+ .size = sizeof(struct nf_conntrack_net),
};
static int __init nf_conntrack_standalone_init(void)
diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c
index 46aee65f339b..91fbd183da2d 100644
--- a/net/netfilter/nf_conntrack_timeout.c
+++ b/net/netfilter/nf_conntrack_timeout.c
@@ -24,13 +24,30 @@
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_timeout.h>
-struct ctnl_timeout *
+struct nf_ct_timeout *
(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name) __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook);
-void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly;
+void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout) __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook);
+static int untimeout(struct nf_conn *ct, void *timeout)
+{
+ struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
+
+ if (timeout_ext && (!timeout || timeout_ext->timeout == timeout))
+ RCU_INIT_POINTER(timeout_ext->timeout, NULL);
+
+ /* We are not intended to delete this conntrack. */
+ return 0;
+}
+
+void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout)
+{
+ nf_ct_iterate_cleanup_net(net, untimeout, timeout, 0, 0);
+}
+EXPORT_SYMBOL_GPL(nf_ct_untimeout);
+
static const struct nf_ct_ext_type timeout_extend = {
.len = sizeof(struct nf_conn_timeout),
.align = __alignof__(struct nf_conn_timeout),
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index eb0d1658ac05..d8125616edc7 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -107,11 +107,12 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
tcp->seen[1].td_maxwin = 0;
}
+#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
+#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
+
static void flow_offload_fixup_ct_state(struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
- struct net *net = nf_ct_net(ct);
- unsigned int *timeouts;
unsigned int timeout;
int l4num;
@@ -123,14 +124,10 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
if (!l4proto)
return;
- timeouts = l4proto->get_timeouts(net);
- if (!timeouts)
- return;
-
if (l4num == IPPROTO_TCP)
- timeout = timeouts[TCP_CONNTRACK_ESTABLISHED];
+ timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
else if (l4num == IPPROTO_UDP)
- timeout = timeouts[UDP_CT_REPLIED];
+ timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
else
return;
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
index dc61399e30be..a8c5c846aec1 100644
--- a/net/netfilter/nf_log_common.c
+++ b/net/netfilter/nf_log_common.c
@@ -132,9 +132,10 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header);
-void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk)
+void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
+ struct sock *sk)
{
- if (!sk || !sk_fullsock(sk))
+ if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk)))
return;
read_lock_bh(&sk->sk_callback_lock);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 46f9df99d276..e2b196054dfc 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -28,7 +28,6 @@
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_nat.h>
@@ -108,6 +107,7 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
struct flowi fl;
unsigned int hh_len;
struct dst_entry *dst;
+ struct sock *sk = skb->sk;
int err;
err = xfrm_decode_session(skb, &fl, family);
@@ -119,7 +119,10 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
dst = ((struct xfrm_dst *)dst)->route;
dst_hold(dst);
- dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
+ if (sk && !net_eq(net, sock_net(sk)))
+ sk = NULL;
+
+ dst = xfrm_lookup(net, dst, &fl, sk, 0);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -739,12 +742,6 @@ EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
{
- int err;
-
- err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
- if (err < 0)
- return err;
-
mutex_lock(&nf_nat_proto_mutex);
RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
&nf_nat_l4proto_tcp);
@@ -777,7 +774,6 @@ void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
synchronize_rcu();
nf_nat_l3proto_clean(l3proto->l3proto);
- nf_ct_l3proto_module_put(l3proto->l3proto);
}
EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
@@ -1060,7 +1056,7 @@ static int __init nf_nat_init(void)
ret = nf_ct_extend_register(&nat_extend);
if (ret < 0) {
- nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
+ kvfree(nf_nat_bysource);
pr_err("Unable to register extension\n");
return ret;
}
@@ -1098,7 +1094,7 @@ static void __exit nf_nat_cleanup(void)
for (i = 0; i < NFPROTO_NUMPROTO; i++)
kfree(nf_nat_l4protos[i]);
synchronize_net();
- nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
+ kvfree(nf_nat_bysource);
unregister_pernet_subsys(&nat_net_ops);
}
diff --git a/net/netfilter/nf_osf.c b/net/netfilter/nf_osf.c
deleted file mode 100644
index 5ba5c7bef2f9..000000000000
--- a/net/netfilter/nf_osf.c
+++ /dev/null
@@ -1,218 +0,0 @@
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-#include <linux/capability.h>
-#include <linux/if.h>
-#include <linux/inetdevice.h>
-#include <linux/ip.h>
-#include <linux/list.h>
-#include <linux/rculist.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/tcp.h>
-
-#include <net/ip.h>
-#include <net/tcp.h>
-
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_log.h>
-#include <linux/netfilter/nf_osf.h>
-
-static inline int nf_osf_ttl(const struct sk_buff *skb,
- const struct nf_osf_info *info,
- unsigned char f_ttl)
-{
- const struct iphdr *ip = ip_hdr(skb);
-
- if (info->flags & NF_OSF_TTL) {
- if (info->ttl == NF_OSF_TTL_TRUE)
- return ip->ttl == f_ttl;
- if (info->ttl == NF_OSF_TTL_NOCHECK)
- return 1;
- else if (ip->ttl <= f_ttl)
- return 1;
- else {
- struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
- int ret = 0;
-
- for_ifa(in_dev) {
- if (inet_ifa_match(ip->saddr, ifa)) {
- ret = (ip->ttl == f_ttl);
- break;
- }
- }
- endfor_ifa(in_dev);
-
- return ret;
- }
- }
-
- return ip->ttl == f_ttl;
-}
-
-bool
-nf_osf_match(const struct sk_buff *skb, u_int8_t family,
- int hooknum, struct net_device *in, struct net_device *out,
- const struct nf_osf_info *info, struct net *net,
- const struct list_head *nf_osf_fingers)
-{
- const unsigned char *optp = NULL, *_optp = NULL;
- unsigned int optsize = 0, check_WSS = 0;
- int fmatch = FMATCH_WRONG, fcount = 0;
- const struct iphdr *ip = ip_hdr(skb);
- const struct nf_osf_user_finger *f;
- unsigned char opts[MAX_IPOPTLEN];
- const struct nf_osf_finger *kf;
- u16 window, totlen, mss = 0;
- const struct tcphdr *tcp;
- struct tcphdr _tcph;
- bool df;
-
- tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
- if (!tcp)
- return false;
-
- if (!tcp->syn)
- return false;
-
- totlen = ntohs(ip->tot_len);
- df = ntohs(ip->frag_off) & IP_DF;
- window = ntohs(tcp->window);
-
- if (tcp->doff * 4 > sizeof(struct tcphdr)) {
- optsize = tcp->doff * 4 - sizeof(struct tcphdr);
-
- _optp = optp = skb_header_pointer(skb, ip_hdrlen(skb) +
- sizeof(struct tcphdr), optsize, opts);
- }
-
- list_for_each_entry_rcu(kf, &nf_osf_fingers[df], finger_entry) {
- int foptsize, optnum;
-
- f = &kf->finger;
-
- if (!(info->flags & NF_OSF_LOG) && strcmp(info->genre, f->genre))
- continue;
-
- optp = _optp;
- fmatch = FMATCH_WRONG;
-
- if (totlen != f->ss || !nf_osf_ttl(skb, info, f->ttl))
- continue;
-
- /*
- * Should not happen if userspace parser was written correctly.
- */
- if (f->wss.wc >= OSF_WSS_MAX)
- continue;
-
- /* Check options */
-
- foptsize = 0;
- for (optnum = 0; optnum < f->opt_num; ++optnum)
- foptsize += f->opt[optnum].length;
-
- if (foptsize > MAX_IPOPTLEN ||
- optsize > MAX_IPOPTLEN ||
- optsize != foptsize)
- continue;
-
- check_WSS = f->wss.wc;
-
- for (optnum = 0; optnum < f->opt_num; ++optnum) {
- if (f->opt[optnum].kind == (*optp)) {
- __u32 len = f->opt[optnum].length;
- const __u8 *optend = optp + len;
-
- fmatch = FMATCH_OK;
-
- switch (*optp) {
- case OSFOPT_MSS:
- mss = optp[3];
- mss <<= 8;
- mss |= optp[2];
-
- mss = ntohs((__force __be16)mss);
- break;
- case OSFOPT_TS:
- break;
- }
-
- optp = optend;
- } else
- fmatch = FMATCH_OPT_WRONG;
-
- if (fmatch != FMATCH_OK)
- break;
- }
-
- if (fmatch != FMATCH_OPT_WRONG) {
- fmatch = FMATCH_WRONG;
-
- switch (check_WSS) {
- case OSF_WSS_PLAIN:
- if (f->wss.val == 0 || window == f->wss.val)
- fmatch = FMATCH_OK;
- break;
- case OSF_WSS_MSS:
- /*
- * Some smart modems decrease mangle MSS to
- * SMART_MSS_2, so we check standard, decreased
- * and the one provided in the fingerprint MSS
- * values.
- */
-#define SMART_MSS_1 1460
-#define SMART_MSS_2 1448
- if (window == f->wss.val * mss ||
- window == f->wss.val * SMART_MSS_1 ||
- window == f->wss.val * SMART_MSS_2)
- fmatch = FMATCH_OK;
- break;
- case OSF_WSS_MTU:
- if (window == f->wss.val * (mss + 40) ||
- window == f->wss.val * (SMART_MSS_1 + 40) ||
- window == f->wss.val * (SMART_MSS_2 + 40))
- fmatch = FMATCH_OK;
- break;
- case OSF_WSS_MODULO:
- if ((window % f->wss.val) == 0)
- fmatch = FMATCH_OK;
- break;
- }
- }
-
- if (fmatch != FMATCH_OK)
- continue;
-
- fcount++;
-
- if (info->flags & NF_OSF_LOG)
- nf_log_packet(net, family, hooknum, skb,
- in, out, NULL,
- "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
- f->genre, f->version, f->subtype,
- &ip->saddr, ntohs(tcp->source),
- &ip->daddr, ntohs(tcp->dest),
- f->ttl - ip->ttl);
-
- if ((info->flags & NF_OSF_LOG) &&
- info->loglevel == NF_OSF_LOGLEVEL_FIRST)
- break;
- }
-
- if (!fcount && (info->flags & NF_OSF_LOG))
- nf_log_packet(net, family, hooknum, skb, in, out, NULL,
- "Remote OS is not known: %pI4:%u -> %pI4:%u\n",
- &ip->saddr, ntohs(tcp->source),
- &ip->daddr, ntohs(tcp->dest));
-
- if (fcount)
- fmatch = FMATCH_OK;
-
- return fmatch == FMATCH_OK;
-}
-EXPORT_SYMBOL_GPL(nf_osf_match);
-
-MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index f5745e4c6513..67cdd5c4f4f5 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -14,6 +14,7 @@
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/vmalloc.h>
+#include <linux/rhashtable.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nf_tables.h>
@@ -455,20 +456,59 @@ __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
return NULL;
}
+/*
+ * Loading a module requires dropping mutex that guards the
+ * transaction.
+ * We first need to abort any pending transactions as once
+ * mutex is unlocked a different client could start a new
+ * transaction. It must not see any 'future generation'
+ * changes * as these changes will never happen.
+ */
+#ifdef CONFIG_MODULES
+static int __nf_tables_abort(struct net *net);
+
+static void nft_request_module(struct net *net, const char *fmt, ...)
+{
+ char module_name[MODULE_NAME_LEN];
+ va_list args;
+ int ret;
+
+ __nf_tables_abort(net);
+
+ va_start(args, fmt);
+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
+ va_end(args);
+ if (WARN(ret >= MODULE_NAME_LEN, "truncated: '%s' (len %d)", module_name, ret))
+ return;
+
+ mutex_unlock(&net->nft.commit_mutex);
+ request_module("%s", module_name);
+ mutex_lock(&net->nft.commit_mutex);
+}
+#endif
+
+static void lockdep_nfnl_nft_mutex_not_held(void)
+{
+#ifdef CONFIG_PROVE_LOCKING
+ WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
+#endif
+}
+
static const struct nft_chain_type *
-nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family, bool autoload)
+nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
+ u8 family, bool autoload)
{
const struct nft_chain_type *type;
type = __nf_tables_chain_type_lookup(nla, family);
if (type != NULL)
return type;
+
+ lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (autoload) {
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- request_module("nft-chain-%u-%.*s", family,
- nla_len(nla), (const char *)nla_data(nla));
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ nft_request_module(net, "nft-chain-%u-%.*s", family,
+ nla_len(nla), (const char *)nla_data(nla));
type = __nf_tables_chain_type_lookup(nla, family);
if (type != NULL)
return ERR_PTR(-EAGAIN);
@@ -772,6 +812,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
struct nft_ctx ctx;
int err;
+ lockdep_assert_held(&net->nft.commit_mutex);
attr = nla[NFTA_TABLE_NAME];
table = nft_table_lookup(net, attr, family, genmask);
if (IS_ERR(table)) {
@@ -1012,7 +1053,17 @@ nft_chain_lookup_byhandle(const struct nft_table *table, u64 handle, u8 genmask)
return ERR_PTR(-ENOENT);
}
-static struct nft_chain *nft_chain_lookup(struct nft_table *table,
+static bool lockdep_commit_lock_is_held(struct net *net)
+{
+#ifdef CONFIG_PROVE_LOCKING
+ return lockdep_is_held(&net->nft.commit_mutex);
+#else
+ return true;
+#endif
+}
+
+static struct nft_chain *nft_chain_lookup(struct net *net,
+ struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
char search[NFT_CHAIN_MAXNAMELEN + 1];
@@ -1025,7 +1076,7 @@ static struct nft_chain *nft_chain_lookup(struct nft_table *table,
nla_strlcpy(search, nla, sizeof(search));
WARN_ON(!rcu_read_lock_held() &&
- !lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
+ !lockdep_commit_lock_is_held(net));
chain = ERR_PTR(-ENOENT);
rcu_read_lock();
@@ -1265,7 +1316,7 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
return PTR_ERR(table);
}
- chain = nft_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
+ chain = nft_chain_lookup(net, table, nla[NFTA_CHAIN_NAME], genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
return PTR_ERR(chain);
@@ -1391,13 +1442,16 @@ struct nft_chain_hook {
static int nft_chain_parse_hook(struct net *net,
const struct nlattr * const nla[],
struct nft_chain_hook *hook, u8 family,
- bool create)
+ bool autoload)
{
struct nlattr *ha[NFTA_HOOK_MAX + 1];
const struct nft_chain_type *type;
struct net_device *dev;
int err;
+ lockdep_assert_held(&net->nft.commit_mutex);
+ lockdep_nfnl_nft_mutex_not_held();
+
err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK],
nft_hook_policy, NULL);
if (err < 0)
@@ -1412,8 +1466,8 @@ static int nft_chain_parse_hook(struct net *net,
type = chain_type[family][NFT_CHAIN_T_DEFAULT];
if (nla[NFTA_CHAIN_TYPE]) {
- type = nf_tables_chain_type_lookup(nla[NFTA_CHAIN_TYPE],
- family, create);
+ type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
+ family, autoload);
if (IS_ERR(type))
return PTR_ERR(type);
}
@@ -1480,7 +1534,7 @@ static struct nft_rule **nf_tables_chain_alloc_rules(const struct nft_chain *cha
}
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
- u8 policy, bool create)
+ u8 policy)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
@@ -1498,7 +1552,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
struct nft_chain_hook hook;
struct nf_hook_ops *ops;
- err = nft_chain_parse_hook(net, nla, &hook, family, create);
+ err = nft_chain_parse_hook(net, nla, &hook, family, true);
if (err < 0)
return err;
@@ -1589,8 +1643,7 @@ err1:
return err;
}
-static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
- bool create)
+static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
@@ -1607,7 +1660,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
return -EBUSY;
err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
- create);
+ false);
if (err < 0)
return err;
@@ -1631,7 +1684,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
nla[NFTA_CHAIN_NAME]) {
struct nft_chain *chain2;
- chain2 = nft_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
+ chain2 = nft_chain_lookup(ctx->net, table,
+ nla[NFTA_CHAIN_NAME], genmask);
if (!IS_ERR(chain2))
return -EEXIST;
}
@@ -1706,9 +1760,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
u8 policy = NF_ACCEPT;
struct nft_ctx ctx;
u64 handle = 0;
- bool create;
- create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+ lockdep_assert_held(&net->nft.commit_mutex);
table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask);
if (IS_ERR(table)) {
@@ -1728,7 +1781,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
}
attr = nla[NFTA_CHAIN_HANDLE];
} else {
- chain = nft_chain_lookup(table, attr, genmask);
+ chain = nft_chain_lookup(net, table, attr, genmask);
if (IS_ERR(chain)) {
if (PTR_ERR(chain) != -ENOENT) {
NL_SET_BAD_ATTR(extack, attr);
@@ -1771,10 +1824,10 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
- return nf_tables_updchain(&ctx, genmask, policy, create);
+ return nf_tables_updchain(&ctx, genmask, policy);
}
- return nf_tables_addchain(&ctx, family, genmask, policy, create);
+ return nf_tables_addchain(&ctx, family, genmask, policy);
}
static int nf_tables_delchain(struct net *net, struct sock *nlsk,
@@ -1806,7 +1859,7 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
chain = nft_chain_lookup_byhandle(table, handle, genmask);
} else {
attr = nla[NFTA_CHAIN_NAME];
- chain = nft_chain_lookup(table, attr, genmask);
+ chain = nft_chain_lookup(net, table, attr, genmask);
}
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, attr);
@@ -1891,7 +1944,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
return NULL;
}
-static const struct nft_expr_type *nft_expr_type_get(u8 family,
+static const struct nft_expr_type *nft_expr_type_get(struct net *net,
+ u8 family,
struct nlattr *nla)
{
const struct nft_expr_type *type;
@@ -1903,19 +1957,16 @@ static const struct nft_expr_type *nft_expr_type_get(u8 family,
if (type != NULL && try_module_get(type->owner))
return type;
+ lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- request_module("nft-expr-%u-%.*s", family,
- nla_len(nla), (char *)nla_data(nla));
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ nft_request_module(net, "nft-expr-%u-%.*s", family,
+ nla_len(nla), (char *)nla_data(nla));
if (__nft_expr_type_get(family, nla))
return ERR_PTR(-EAGAIN);
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- request_module("nft-expr-%.*s",
- nla_len(nla), (char *)nla_data(nla));
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ nft_request_module(net, "nft-expr-%.*s",
+ nla_len(nla), (char *)nla_data(nla));
if (__nft_expr_type_get(family, nla))
return ERR_PTR(-EAGAIN);
}
@@ -1984,7 +2035,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
if (err < 0)
return err;
- type = nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
+ type = nft_expr_type_get(ctx->net, ctx->family, tb[NFTA_EXPR_NAME]);
if (IS_ERR(type))
return PTR_ERR(type);
@@ -2349,7 +2400,7 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
return PTR_ERR(table);
}
- chain = nft_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask);
+ chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
@@ -2383,6 +2434,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
{
struct nft_expr *expr;
+ lockdep_assert_held(&ctx->net->nft.commit_mutex);
/*
* Careful: some expressions might not be initialized in case this
* is called on error from nf_tables_newrule().
@@ -2454,8 +2506,6 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
#define NFT_RULE_MAXEXPRS 128
-static struct nft_expr_info *info;
-
static int nf_tables_newrule(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[],
@@ -2463,6 +2513,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
{
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
+ struct nft_expr_info *info = NULL;
int family = nfmsg->nfgen_family;
struct nft_table *table;
struct nft_chain *chain;
@@ -2474,10 +2525,9 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
struct nlattr *tmp;
unsigned int size, i, n, ulen = 0, usize = 0;
int err, rem;
- bool create;
u64 handle, pos_handle;
- create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+ lockdep_assert_held(&net->nft.commit_mutex);
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask);
if (IS_ERR(table)) {
@@ -2485,7 +2535,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
return PTR_ERR(table);
}
- chain = nft_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask);
+ chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
@@ -2508,7 +2558,8 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
else
return -EOPNOTSUPP;
} else {
- if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE) ||
+ nlh->nlmsg_flags & NLM_F_REPLACE)
return -EINVAL;
handle = nf_tables_alloc_handle(table);
@@ -2533,6 +2584,12 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
n = 0;
size = 0;
if (nla[NFTA_RULE_EXPRESSIONS]) {
+ info = kvmalloc_array(NFT_RULE_MAXEXPRS,
+ sizeof(struct nft_expr_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
err = -EINVAL;
if (nla_type(tmp) != NFTA_LIST_ELEM)
@@ -2625,6 +2682,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
list_add_rcu(&rule->list, &chain->rules);
}
}
+ kvfree(info);
chain->use++;
if (net->nft.validate_state == NFT_VALIDATE_DO)
@@ -2638,6 +2696,7 @@ err1:
if (info[i].ops != NULL)
module_put(info[i].ops->type->owner);
}
+ kvfree(info);
return err;
}
@@ -2677,7 +2736,8 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
}
if (nla[NFTA_RULE_CHAIN]) {
- chain = nft_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask);
+ chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN],
+ genmask);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
@@ -2769,11 +2829,11 @@ nft_select_set_ops(const struct nft_ctx *ctx,
const struct nft_set_type *type;
u32 flags = 0;
+ lockdep_assert_held(&ctx->net->nft.commit_mutex);
+ lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (list_empty(&nf_tables_set_types)) {
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- request_module("nft-set");
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ nft_request_module(ctx->net, "nft-set");
if (!list_empty(&nf_tables_set_types))
return ERR_PTR(-EAGAIN);
}
@@ -3295,7 +3355,6 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
struct nft_ctx ctx;
char *name;
unsigned int size;
- bool create;
u64 timeout;
u32 ktype, dtype, flags, policy, gc_int, objtype;
struct nft_set_desc desc;
@@ -3396,8 +3455,6 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
return err;
}
- create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
-
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, genmask);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
@@ -3963,7 +4020,6 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
- const struct nft_set_ext *ext;
struct nft_data_desc desc;
struct nft_set_elem elem;
struct sk_buff *skb;
@@ -3997,7 +4053,6 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return PTR_ERR(priv);
elem.priv = priv;
- ext = nft_set_elem_ext(set, &elem);
err = -ENOMEM;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -4818,7 +4873,8 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
return NULL;
}
-static const struct nft_object_type *nft_obj_type_get(u32 objtype)
+static const struct nft_object_type *
+nft_obj_type_get(struct net *net, u32 objtype)
{
const struct nft_object_type *type;
@@ -4826,11 +4882,10 @@ static const struct nft_object_type *nft_obj_type_get(u32 objtype)
if (type != NULL && try_module_get(type->owner))
return type;
+ lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- request_module("nft-obj-%u", objtype);
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ nft_request_module(net, "nft-obj-%u", objtype);
if (__nft_obj_type_get(objtype))
return ERR_PTR(-EAGAIN);
}
@@ -4882,7 +4937,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
- type = nft_obj_type_get(objtype);
+ type = nft_obj_type_get(net, objtype);
if (IS_ERR(type))
return PTR_ERR(type);
@@ -5372,7 +5427,8 @@ static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family)
return NULL;
}
-static const struct nf_flowtable_type *nft_flowtable_type_get(u8 family)
+static const struct nf_flowtable_type *
+nft_flowtable_type_get(struct net *net, u8 family)
{
const struct nf_flowtable_type *type;
@@ -5380,11 +5436,10 @@ static const struct nf_flowtable_type *nft_flowtable_type_get(u8 family)
if (type != NULL && try_module_get(type->owner))
return type;
+ lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- request_module("nf-flowtable-%u", family);
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ nft_request_module(net, "nf-flowtable-%u", family);
if (__nft_flowtable_type_get(family))
return ERR_PTR(-EAGAIN);
}
@@ -5464,7 +5519,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
goto err1;
}
- type = nft_flowtable_type_get(family);
+ type = nft_flowtable_type_get(net, family);
if (IS_ERR(type)) {
err = PTR_ERR(type);
goto err2;
@@ -5874,13 +5929,13 @@ static int nf_tables_flowtable_event(struct notifier_block *this,
if (!net)
return 0;
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ mutex_lock(&net->nft.commit_mutex);
list_for_each_entry(table, &net->nft.tables, list) {
list_for_each_entry(flowtable, &table->flowtables, list) {
nft_flowtable_event(event, dev, flowtable);
}
}
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ mutex_unlock(&net->nft.commit_mutex);
put_net(net);
return NOTIFY_DONE;
}
@@ -6232,9 +6287,9 @@ static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *cha
next_genbit = nft_gencursor_next(net);
g0 = rcu_dereference_protected(chain->rules_gen_0,
- lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
+ lockdep_commit_lock_is_held(net));
g1 = rcu_dereference_protected(chain->rules_gen_1,
- lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
+ lockdep_commit_lock_is_held(net));
/* No changes to this chain? */
if (chain->rules_next == NULL) {
@@ -6444,6 +6499,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nf_tables_commit_release(net);
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+ mutex_unlock(&net->nft.commit_mutex);
return 0;
}
@@ -6595,12 +6651,25 @@ static void nf_tables_cleanup(struct net *net)
static int nf_tables_abort(struct net *net, struct sk_buff *skb)
{
- return __nf_tables_abort(net);
+ int ret = __nf_tables_abort(net);
+
+ mutex_unlock(&net->nft.commit_mutex);
+
+ return ret;
}
static bool nf_tables_valid_genid(struct net *net, u32 genid)
{
- return net->nft.base_seq == genid;
+ bool genid_ok;
+
+ mutex_lock(&net->nft.commit_mutex);
+
+ genid_ok = genid == 0 || net->nft.base_seq == genid;
+ if (!genid_ok)
+ mutex_unlock(&net->nft.commit_mutex);
+
+ /* else, commit mutex has to be released by commit or abort function */
+ return genid_ok;
}
static const struct nfnetlink_subsystem nf_tables_subsys = {
@@ -6612,6 +6681,7 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
.abort = nf_tables_abort,
.cleanup = nf_tables_cleanup,
.valid_genid = nf_tables_valid_genid,
+ .owner = THIS_MODULE,
};
int nft_chain_validate_dependency(const struct nft_chain *chain,
@@ -6931,8 +7001,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
case NFT_GOTO:
if (!tb[NFTA_VERDICT_CHAIN])
return -EINVAL;
- chain = nft_chain_lookup(ctx->table, tb[NFTA_VERDICT_CHAIN],
- genmask);
+ chain = nft_chain_lookup(ctx->net, ctx->table,
+ tb[NFTA_VERDICT_CHAIN], genmask);
if (IS_ERR(chain))
return PTR_ERR(chain);
if (nft_is_base_chain(chain))
@@ -7177,6 +7247,7 @@ static int __net_init nf_tables_init_net(struct net *net)
{
INIT_LIST_HEAD(&net->nft.tables);
INIT_LIST_HEAD(&net->nft.commit_list);
+ mutex_init(&net->nft.commit_mutex);
net->nft.base_seq = 1;
net->nft.validate_state = NFT_VALIDATE_SKIP;
@@ -7185,11 +7256,11 @@ static int __net_init nf_tables_init_net(struct net *net)
static void __net_exit nf_tables_exit_net(struct net *net)
{
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ mutex_lock(&net->nft.commit_mutex);
if (!list_empty(&net->nft.commit_list))
__nf_tables_abort(net);
__nft_release_tables(net);
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ mutex_unlock(&net->nft.commit_mutex);
WARN_ON_ONCE(!list_empty(&net->nft.tables));
}
@@ -7204,29 +7275,19 @@ static int __init nf_tables_module_init(void)
nft_chain_filter_init();
- info = kmalloc_array(NFT_RULE_MAXEXPRS, sizeof(struct nft_expr_info),
- GFP_KERNEL);
- if (info == NULL) {
- err = -ENOMEM;
- goto err1;
- }
-
err = nf_tables_core_module_init();
if (err < 0)
- goto err2;
+ return err;
err = nfnetlink_subsys_register(&nf_tables_subsys);
if (err < 0)
- goto err3;
+ goto err;
register_netdevice_notifier(&nf_tables_flowtable_notifier);
return register_pernet_subsys(&nf_tables_net_ops);
-err3:
+err:
nf_tables_core_module_exit();
-err2:
- kfree(info);
-err1:
return err;
}
@@ -7238,7 +7299,6 @@ static void __exit nf_tables_module_exit(void)
unregister_pernet_subsys(&nf_tables_net_ops);
rcu_barrier();
nf_tables_core_module_exit();
- kfree(info);
}
module_init(nf_tables_module_init);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 8de912ca53d3..ffd5c0f9412b 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -120,6 +120,20 @@ struct nft_jumpstack {
struct nft_rule *const *rules;
};
+static void expr_call_ops_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ struct nft_pktinfo *pkt)
+{
+ unsigned long e = (unsigned long)expr->ops->eval;
+
+ if (e == (unsigned long)nft_meta_get_eval)
+ nft_meta_get_eval(expr, regs, pkt);
+ else if (e == (unsigned long)nft_lookup_eval)
+ nft_lookup_eval(expr, regs, pkt);
+ else
+ expr->ops->eval(expr, regs, pkt);
+}
+
unsigned int
nft_do_chain(struct nft_pktinfo *pkt, void *priv)
{
@@ -153,7 +167,7 @@ next_rule:
nft_cmp_fast_eval(expr, &regs);
else if (expr->ops != &nft_payload_fast_ops ||
!nft_payload_fast_eval(expr, &regs, pkt))
- expr->ops->eval(expr, &regs, pkt);
+ expr_call_ops_eval(expr, &regs, pkt);
if (regs.verdict.code != NFT_CONTINUE)
break;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e1b6be29848d..916913454624 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -331,18 +331,27 @@ replay:
}
}
- if (!ss->commit || !ss->abort) {
+ if (!ss->valid_genid || !ss->commit || !ss->abort) {
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
return kfree_skb(skb);
}
- if (genid && ss->valid_genid && !ss->valid_genid(net, genid)) {
+ if (!try_module_get(ss->owner)) {
+ nfnl_unlock(subsys_id);
+ netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL);
+ return kfree_skb(skb);
+ }
+
+ if (!ss->valid_genid(net, genid)) {
+ module_put(ss->owner);
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -ERESTART, NULL);
return kfree_skb(skb);
}
+ nfnl_unlock(subsys_id);
+
while (skb->len >= nlmsg_total_size(0)) {
int msglen, type;
@@ -464,14 +473,10 @@ ack:
}
done:
if (status & NFNL_BATCH_REPLAY) {
- const struct nfnetlink_subsystem *ss2;
-
- ss2 = nfnl_dereference_protected(subsys_id);
- if (ss2 == ss)
- ss->abort(net, oskb);
+ ss->abort(net, oskb);
nfnl_err_reset(&err_list);
- nfnl_unlock(subsys_id);
kfree_skb(skb);
+ module_put(ss->owner);
goto replay;
} else if (status == NFNL_BATCH_DONE) {
err = ss->commit(net, oskb);
@@ -489,8 +494,8 @@ done:
ss->cleanup(net);
nfnl_err_deliver(&err_list, oskb);
- nfnl_unlock(subsys_id);
kfree_skb(skb);
+ module_put(ss->owner);
}
static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = {
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 9ee5fa551fa6..d46a236cdf31 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -26,7 +26,6 @@
#include <net/sock.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_timeout.h>
@@ -47,7 +46,7 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
};
static int
-ctnl_timeout_parse_policy(void *timeouts,
+ctnl_timeout_parse_policy(void *timeout,
const struct nf_conntrack_l4proto *l4proto,
struct net *net, const struct nlattr *attr)
{
@@ -68,7 +67,7 @@ ctnl_timeout_parse_policy(void *timeouts,
if (ret < 0)
goto err;
- ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts);
+ ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeout);
err:
kfree(tb);
@@ -114,13 +113,13 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
/* You cannot replace one timeout policy by another of
* different kind, sorry.
*/
- if (matching->l3num != l3num ||
- matching->l4proto->l4proto != l4num)
+ if (matching->timeout.l3num != l3num ||
+ matching->timeout.l4proto->l4proto != l4num)
return -EINVAL;
- return ctnl_timeout_parse_policy(&matching->data,
- matching->l4proto, net,
- cda[CTA_TIMEOUT_DATA]);
+ return ctnl_timeout_parse_policy(&matching->timeout.data,
+ matching->timeout.l4proto,
+ net, cda[CTA_TIMEOUT_DATA]);
}
return -EBUSY;
@@ -141,14 +140,14 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
goto err_proto_put;
}
- ret = ctnl_timeout_parse_policy(&timeout->data, l4proto, net,
+ ret = ctnl_timeout_parse_policy(&timeout->timeout.data, l4proto, net,
cda[CTA_TIMEOUT_DATA]);
if (ret < 0)
goto err;
strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
- timeout->l3num = l3num;
- timeout->l4proto = l4proto;
+ timeout->timeout.l3num = l3num;
+ timeout->timeout.l4proto = l4proto;
refcount_set(&timeout->refcnt, 1);
list_add_tail_rcu(&timeout->head, &net->nfct_timeout_list);
@@ -167,7 +166,7 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
- const struct nf_conntrack_l4proto *l4proto = timeout->l4proto;
+ const struct nf_conntrack_l4proto *l4proto = timeout->timeout.l4proto;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event);
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
@@ -180,8 +179,9 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
nfmsg->res_id = 0;
if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
- nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) ||
- nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
+ nla_put_be16(skb, CTA_TIMEOUT_L3PROTO,
+ htons(timeout->timeout.l3num)) ||
+ nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto) ||
nla_put_be32(skb, CTA_TIMEOUT_USE,
htonl(refcount_read(&timeout->refcnt))))
goto nla_put_failure;
@@ -195,7 +195,8 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
if (!nest_parms)
goto nla_put_failure;
- ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data);
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb,
+ &timeout->timeout.data);
if (ret < 0)
goto nla_put_failure;
@@ -298,22 +299,6 @@ static int cttimeout_get_timeout(struct net *net, struct sock *ctnl,
return ret;
}
-static int untimeout(struct nf_conn *ct, void *timeout)
-{
- struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
-
- if (timeout_ext && (!timeout || timeout_ext->timeout == timeout))
- RCU_INIT_POINTER(timeout_ext->timeout, NULL);
-
- /* We are not intended to delete this conntrack. */
- return 0;
-}
-
-static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
-{
- nf_ct_iterate_cleanup_net(net, untimeout, timeout, 0, 0);
-}
-
/* try to delete object, fail if it is still in use. */
static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
{
@@ -325,8 +310,8 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
if (refcount_dec_if_one(&timeout->refcnt)) {
/* We are protected by nfnl mutex. */
list_del_rcu(&timeout->head);
- nf_ct_l4proto_put(timeout->l4proto);
- ctnl_untimeout(net, timeout);
+ nf_ct_l4proto_put(timeout->timeout.l4proto);
+ nf_ct_untimeout(net, &timeout->timeout);
kfree_rcu(timeout, rcu_head);
} else {
ret = -EBUSY;
@@ -373,7 +358,6 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
struct netlink_ext_ack *extack)
{
const struct nf_conntrack_l4proto *l4proto;
- unsigned int *timeouts;
__u16 l3num;
__u8 l4num;
int ret;
@@ -393,9 +377,7 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
goto err;
}
- timeouts = l4proto->get_timeouts(net);
-
- ret = ctnl_timeout_parse_policy(timeouts, l4proto, net,
+ ret = ctnl_timeout_parse_policy(NULL, l4proto, net,
cda[CTA_TIMEOUT_DATA]);
if (ret < 0)
goto err;
@@ -432,7 +414,6 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
struct nlattr *nest_parms;
- unsigned int *timeouts = l4proto->get_timeouts(net);
int ret;
nest_parms = nla_nest_start(skb,
@@ -440,7 +421,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
if (!nest_parms)
goto nla_put_failure;
- ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
if (ret < 0)
goto nla_put_failure;
@@ -508,7 +489,6 @@ err:
return err;
}
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
static struct ctnl_timeout *
ctnl_timeout_find_get(struct net *net, const char *name)
{
@@ -532,14 +512,16 @@ err:
return matching;
}
-static void ctnl_timeout_put(struct ctnl_timeout *timeout)
+static void ctnl_timeout_put(struct nf_ct_timeout *t)
{
+ struct ctnl_timeout *timeout =
+ container_of(t, struct ctnl_timeout, timeout);
+
if (refcount_dec_and_test(&timeout->refcnt))
kfree_rcu(timeout, rcu_head);
module_put(THIS_MODULE);
}
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
[IPCTNL_MSG_TIMEOUT_NEW] = { .call = cttimeout_new_timeout,
@@ -580,11 +562,11 @@ static void __net_exit cttimeout_net_exit(struct net *net)
struct ctnl_timeout *cur, *tmp;
nf_ct_unconfirmed_destroy(net);
- ctnl_untimeout(net, NULL);
+ nf_ct_untimeout(net, NULL);
list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) {
list_del_rcu(&cur->head);
- nf_ct_l4proto_put(cur->l4proto);
+ nf_ct_l4proto_put(cur->timeout.l4proto);
if (refcount_dec_and_test(&cur->refcnt))
kfree_rcu(cur, rcu_head);
@@ -610,10 +592,8 @@ static int __init cttimeout_init(void)
"nfnetlink.\n");
goto err_out;
}
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, ctnl_timeout_find_get);
RCU_INIT_POINTER(nf_ct_timeout_put_hook, ctnl_timeout_put);
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
return 0;
err_out:
@@ -626,11 +606,9 @@ static void __exit cttimeout_exit(void)
nfnetlink_subsys_unregister(&cttimeout_subsys);
unregister_pernet_subsys(&cttimeout_ops);
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
synchronize_rcu();
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
}
module_init(cttimeout_init);
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
new file mode 100644
index 000000000000..00db27dfd2ff
--- /dev/null
+++ b/net/netfilter/nfnetlink_osf.c
@@ -0,0 +1,436 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/capability.h>
+#include <linux/if.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+
+#include <net/ip.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_log.h>
+#include <linux/netfilter/nfnetlink_osf.h>
+
+/*
+ * Indexed by dont-fragment bit.
+ * It is the only constant value in the fingerprint.
+ */
+struct list_head nf_osf_fingers[2];
+EXPORT_SYMBOL_GPL(nf_osf_fingers);
+
+static inline int nf_osf_ttl(const struct sk_buff *skb,
+ int ttl_check, unsigned char f_ttl)
+{
+ const struct iphdr *ip = ip_hdr(skb);
+
+ if (ttl_check != -1) {
+ if (ttl_check == NF_OSF_TTL_TRUE)
+ return ip->ttl == f_ttl;
+ if (ttl_check == NF_OSF_TTL_NOCHECK)
+ return 1;
+ else if (ip->ttl <= f_ttl)
+ return 1;
+ else {
+ struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
+ int ret = 0;
+
+ for_ifa(in_dev) {
+ if (inet_ifa_match(ip->saddr, ifa)) {
+ ret = (ip->ttl == f_ttl);
+ break;
+ }
+ }
+ endfor_ifa(in_dev);
+
+ return ret;
+ }
+ }
+
+ return ip->ttl == f_ttl;
+}
+
+struct nf_osf_hdr_ctx {
+ bool df;
+ u16 window;
+ u16 totlen;
+ const unsigned char *optp;
+ unsigned int optsize;
+};
+
+static bool nf_osf_match_one(const struct sk_buff *skb,
+ const struct nf_osf_user_finger *f,
+ int ttl_check,
+ struct nf_osf_hdr_ctx *ctx)
+{
+ unsigned int check_WSS = 0;
+ int fmatch = FMATCH_WRONG;
+ int foptsize, optnum;
+ u16 mss = 0;
+
+ if (ctx->totlen != f->ss || !nf_osf_ttl(skb, ttl_check, f->ttl))
+ return false;
+
+ /*
+ * Should not happen if userspace parser was written correctly.
+ */
+ if (f->wss.wc >= OSF_WSS_MAX)
+ return false;
+
+ /* Check options */
+
+ foptsize = 0;
+ for (optnum = 0; optnum < f->opt_num; ++optnum)
+ foptsize += f->opt[optnum].length;
+
+ if (foptsize > MAX_IPOPTLEN ||
+ ctx->optsize > MAX_IPOPTLEN ||
+ ctx->optsize != foptsize)
+ return false;
+
+ check_WSS = f->wss.wc;
+
+ for (optnum = 0; optnum < f->opt_num; ++optnum) {
+ if (f->opt[optnum].kind == *ctx->optp) {
+ __u32 len = f->opt[optnum].length;
+ const __u8 *optend = ctx->optp + len;
+
+ fmatch = FMATCH_OK;
+
+ switch (*ctx->optp) {
+ case OSFOPT_MSS:
+ mss = ctx->optp[3];
+ mss <<= 8;
+ mss |= ctx->optp[2];
+
+ mss = ntohs((__force __be16)mss);
+ break;
+ case OSFOPT_TS:
+ break;
+ }
+
+ ctx->optp = optend;
+ } else
+ fmatch = FMATCH_OPT_WRONG;
+
+ if (fmatch != FMATCH_OK)
+ break;
+ }
+
+ if (fmatch != FMATCH_OPT_WRONG) {
+ fmatch = FMATCH_WRONG;
+
+ switch (check_WSS) {
+ case OSF_WSS_PLAIN:
+ if (f->wss.val == 0 || ctx->window == f->wss.val)
+ fmatch = FMATCH_OK;
+ break;
+ case OSF_WSS_MSS:
+ /*
+ * Some smart modems decrease mangle MSS to
+ * SMART_MSS_2, so we check standard, decreased
+ * and the one provided in the fingerprint MSS
+ * values.
+ */
+#define SMART_MSS_1 1460
+#define SMART_MSS_2 1448
+ if (ctx->window == f->wss.val * mss ||
+ ctx->window == f->wss.val * SMART_MSS_1 ||
+ ctx->window == f->wss.val * SMART_MSS_2)
+ fmatch = FMATCH_OK;
+ break;
+ case OSF_WSS_MTU:
+ if (ctx->window == f->wss.val * (mss + 40) ||
+ ctx->window == f->wss.val * (SMART_MSS_1 + 40) ||
+ ctx->window == f->wss.val * (SMART_MSS_2 + 40))
+ fmatch = FMATCH_OK;
+ break;
+ case OSF_WSS_MODULO:
+ if ((ctx->window % f->wss.val) == 0)
+ fmatch = FMATCH_OK;
+ break;
+ }
+ }
+
+ return fmatch == FMATCH_OK;
+}
+
+static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
+ const struct sk_buff *skb,
+ const struct iphdr *ip,
+ unsigned char *opts)
+{
+ const struct tcphdr *tcp;
+ struct tcphdr _tcph;
+
+ tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
+ if (!tcp)
+ return NULL;
+
+ if (!tcp->syn)
+ return NULL;
+
+ ctx->totlen = ntohs(ip->tot_len);
+ ctx->df = ntohs(ip->frag_off) & IP_DF;
+ ctx->window = ntohs(tcp->window);
+
+ if (tcp->doff * 4 > sizeof(struct tcphdr)) {
+ ctx->optsize = tcp->doff * 4 - sizeof(struct tcphdr);
+
+ ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
+ sizeof(struct tcphdr), ctx->optsize, opts);
+ }
+
+ return tcp;
+}
+
+bool
+nf_osf_match(const struct sk_buff *skb, u_int8_t family,
+ int hooknum, struct net_device *in, struct net_device *out,
+ const struct nf_osf_info *info, struct net *net,
+ const struct list_head *nf_osf_fingers)
+{
+ const struct iphdr *ip = ip_hdr(skb);
+ const struct nf_osf_user_finger *f;
+ unsigned char opts[MAX_IPOPTLEN];
+ const struct nf_osf_finger *kf;
+ int fcount = 0, ttl_check;
+ int fmatch = FMATCH_WRONG;
+ struct nf_osf_hdr_ctx ctx;
+ const struct tcphdr *tcp;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
+ if (!tcp)
+ return false;
+
+ ttl_check = (info->flags & NF_OSF_TTL) ? info->ttl : -1;
+
+ list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) {
+
+ f = &kf->finger;
+
+ if (!(info->flags & NF_OSF_LOG) && strcmp(info->genre, f->genre))
+ continue;
+
+ if (!nf_osf_match_one(skb, f, ttl_check, &ctx))
+ continue;
+
+ fmatch = FMATCH_OK;
+
+ fcount++;
+
+ if (info->flags & NF_OSF_LOG)
+ nf_log_packet(net, family, hooknum, skb,
+ in, out, NULL,
+ "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
+ f->genre, f->version, f->subtype,
+ &ip->saddr, ntohs(tcp->source),
+ &ip->daddr, ntohs(tcp->dest),
+ f->ttl - ip->ttl);
+
+ if ((info->flags & NF_OSF_LOG) &&
+ info->loglevel == NF_OSF_LOGLEVEL_FIRST)
+ break;
+ }
+
+ if (!fcount && (info->flags & NF_OSF_LOG))
+ nf_log_packet(net, family, hooknum, skb, in, out, NULL,
+ "Remote OS is not known: %pI4:%u -> %pI4:%u\n",
+ &ip->saddr, ntohs(tcp->source),
+ &ip->daddr, ntohs(tcp->dest));
+
+ if (fcount)
+ fmatch = FMATCH_OK;
+
+ return fmatch == FMATCH_OK;
+}
+EXPORT_SYMBOL_GPL(nf_osf_match);
+
+const char *nf_osf_find(const struct sk_buff *skb,
+ const struct list_head *nf_osf_fingers)
+{
+ const struct iphdr *ip = ip_hdr(skb);
+ const struct nf_osf_user_finger *f;
+ unsigned char opts[MAX_IPOPTLEN];
+ const struct nf_osf_finger *kf;
+ struct nf_osf_hdr_ctx ctx;
+ const struct tcphdr *tcp;
+ const char *genre = NULL;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
+ if (!tcp)
+ return NULL;
+
+ list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) {
+ f = &kf->finger;
+ if (!nf_osf_match_one(skb, f, -1, &ctx))
+ continue;
+
+ genre = f->genre;
+ break;
+ }
+
+ return genre;
+}
+EXPORT_SYMBOL_GPL(nf_osf_find);
+
+static const struct nla_policy nfnl_osf_policy[OSF_ATTR_MAX + 1] = {
+ [OSF_ATTR_FINGER] = { .len = sizeof(struct nf_osf_user_finger) },
+};
+
+static int nfnl_osf_add_callback(struct net *net, struct sock *ctnl,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
+ const struct nlattr * const osf_attrs[],
+ struct netlink_ext_ack *extack)
+{
+ struct nf_osf_user_finger *f;
+ struct nf_osf_finger *kf = NULL, *sf;
+ int err = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!osf_attrs[OSF_ATTR_FINGER])
+ return -EINVAL;
+
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+ return -EINVAL;
+
+ f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
+
+ kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL);
+ if (!kf)
+ return -ENOMEM;
+
+ memcpy(&kf->finger, f, sizeof(struct nf_osf_user_finger));
+
+ list_for_each_entry(sf, &nf_osf_fingers[!!f->df], finger_entry) {
+ if (memcmp(&sf->finger, f, sizeof(struct nf_osf_user_finger)))
+ continue;
+
+ kfree(kf);
+ kf = NULL;
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ err = -EEXIST;
+ break;
+ }
+
+ /*
+ * We are protected by nfnl mutex.
+ */
+ if (kf)
+ list_add_tail_rcu(&kf->finger_entry, &nf_osf_fingers[!!f->df]);
+
+ return err;
+}
+
+static int nfnl_osf_remove_callback(struct net *net, struct sock *ctnl,
+ struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const osf_attrs[],
+ struct netlink_ext_ack *extack)
+{
+ struct nf_osf_user_finger *f;
+ struct nf_osf_finger *sf;
+ int err = -ENOENT;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!osf_attrs[OSF_ATTR_FINGER])
+ return -EINVAL;
+
+ f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
+
+ list_for_each_entry(sf, &nf_osf_fingers[!!f->df], finger_entry) {
+ if (memcmp(&sf->finger, f, sizeof(struct nf_osf_user_finger)))
+ continue;
+
+ /*
+ * We are protected by nfnl mutex.
+ */
+ list_del_rcu(&sf->finger_entry);
+ kfree_rcu(sf, rcu_head);
+
+ err = 0;
+ break;
+ }
+
+ return err;
+}
+
+static const struct nfnl_callback nfnl_osf_callbacks[OSF_MSG_MAX] = {
+ [OSF_MSG_ADD] = {
+ .call = nfnl_osf_add_callback,
+ .attr_count = OSF_ATTR_MAX,
+ .policy = nfnl_osf_policy,
+ },
+ [OSF_MSG_REMOVE] = {
+ .call = nfnl_osf_remove_callback,
+ .attr_count = OSF_ATTR_MAX,
+ .policy = nfnl_osf_policy,
+ },
+};
+
+static const struct nfnetlink_subsystem nfnl_osf_subsys = {
+ .name = "osf",
+ .subsys_id = NFNL_SUBSYS_OSF,
+ .cb_count = OSF_MSG_MAX,
+ .cb = nfnl_osf_callbacks,
+};
+
+static int __init nfnl_osf_init(void)
+{
+ int err = -EINVAL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nf_osf_fingers); ++i)
+ INIT_LIST_HEAD(&nf_osf_fingers[i]);
+
+ err = nfnetlink_subsys_register(&nfnl_osf_subsys);
+ if (err < 0) {
+ pr_err("Failed to register OSF nsfnetlink helper (%d)\n", err);
+ goto err_out_exit;
+ }
+ return 0;
+
+err_out_exit:
+ return err;
+}
+
+static void __exit nfnl_osf_fini(void)
+{
+ struct nf_osf_finger *f;
+ int i;
+
+ nfnetlink_subsys_unregister(&nfnl_osf_subsys);
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(nf_osf_fingers); ++i) {
+ list_for_each_entry_rcu(f, &nf_osf_fingers[i], finger_entry) {
+ list_del_rcu(&f->finger_entry);
+ kfree_rcu(f, rcu_head);
+ }
+ }
+ rcu_read_unlock();
+
+ rcu_barrier();
+}
+
+module_init(nfnl_osf_init);
+module_exit(nfnl_osf_fini);
+
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index d21834bed805..ea5b7c4944f6 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -322,7 +322,7 @@ static int nf_tables_netdev_event(struct notifier_block *this,
if (!ctx.net)
return NOTIFY_DONE;
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ mutex_lock(&ctx.net->nft.commit_mutex);
list_for_each_entry(table, &ctx.net->nft.tables, list) {
if (table->family != NFPROTO_NETDEV)
continue;
@@ -337,7 +337,7 @@ static int nf_tables_netdev_event(struct notifier_block *this,
nft_netdev_event(event, dev, &ctx);
}
}
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ mutex_unlock(&ctx.net->nft.commit_mutex);
put_net(ctx.net);
return NOTIFY_DONE;
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index a832c59f0a9c..b90d96ba4a12 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -14,10 +14,9 @@
#include <net/netfilter/nf_conntrack_zones.h>
struct nft_connlimit {
- spinlock_t lock;
- struct hlist_head hhead;
- u32 limit;
- bool invert;
+ struct nf_conncount_list list;
+ u32 limit;
+ bool invert;
};
static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
@@ -45,21 +44,19 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
return;
}
- spin_lock_bh(&priv->lock);
- count = nf_conncount_lookup(nft_net(pkt), &priv->hhead, tuple_ptr, zone,
- &addit);
+ nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone,
+ &addit);
+ count = priv->list.count;
if (!addit)
goto out;
- if (!nf_conncount_add(&priv->hhead, tuple_ptr, zone)) {
+ if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) {
regs->verdict.code = NF_DROP;
- spin_unlock_bh(&priv->lock);
return;
}
count++;
out:
- spin_unlock_bh(&priv->lock);
if ((count > priv->limit) ^ priv->invert) {
regs->verdict.code = NFT_BREAK;
@@ -87,8 +84,7 @@ static int nft_connlimit_do_init(const struct nft_ctx *ctx,
invert = true;
}
- spin_lock_init(&priv->lock);
- INIT_HLIST_HEAD(&priv->hhead);
+ nf_conncount_list_init(&priv->list);
priv->limit = limit;
priv->invert = invert;
@@ -99,7 +95,7 @@ static void nft_connlimit_do_destroy(const struct nft_ctx *ctx,
struct nft_connlimit *priv)
{
nf_ct_netns_put(ctx->net, ctx->family);
- nf_conncount_cache_free(&priv->hhead);
+ nf_conncount_cache_free(&priv->list);
}
static int nft_connlimit_do_dump(struct sk_buff *skb,
@@ -212,8 +208,7 @@ static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src)
struct nft_connlimit *priv_dst = nft_expr_priv(dst);
struct nft_connlimit *priv_src = nft_expr_priv(src);
- spin_lock_init(&priv_dst->lock);
- INIT_HLIST_HEAD(&priv_dst->hhead);
+ nf_conncount_list_init(&priv_dst->list);
priv_dst->limit = priv_src->limit;
priv_dst->invert = priv_src->invert;
@@ -225,21 +220,14 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
{
struct nft_connlimit *priv = nft_expr_priv(expr);
- nf_conncount_cache_free(&priv->hhead);
+ nf_conncount_cache_free(&priv->list);
}
static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
{
struct nft_connlimit *priv = nft_expr_priv(expr);
- bool addit, ret;
- spin_lock_bh(&priv->lock);
- nf_conncount_lookup(net, &priv->hhead, NULL, &nf_ct_zone_dflt, &addit);
-
- ret = hlist_empty(&priv->hhead);
- spin_unlock_bh(&priv->lock);
-
- return ret;
+ return nf_conncount_gc_list(net, &priv->list);
}
static struct nft_expr_type nft_connlimit_type;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 1435ffc5f57e..4855d4ce1c8f 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -22,6 +22,8 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
struct nft_ct {
enum nft_ct_keys key:8;
@@ -765,6 +767,194 @@ static struct nft_expr_type nft_notrack_type __read_mostly = {
.owner = THIS_MODULE,
};
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+static int
+nft_ct_timeout_parse_policy(void *timeouts,
+ const struct nf_conntrack_l4proto *l4proto,
+ struct net *net, const struct nlattr *attr)
+{
+ struct nlattr **tb;
+ int ret = 0;
+
+ if (!l4proto->ctnl_timeout.nlattr_to_obj)
+ return 0;
+
+ tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb),
+ GFP_KERNEL);
+
+ if (!tb)
+ return -ENOMEM;
+
+ ret = nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
+ attr, l4proto->ctnl_timeout.nla_policy,
+ NULL);
+ if (ret < 0)
+ goto err;
+
+ ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts);
+
+err:
+ kfree(tb);
+ return ret;
+}
+
+struct nft_ct_timeout_obj {
+ struct nf_conn *tmpl;
+ u8 l4proto;
+};
+
+static void nft_ct_timeout_obj_eval(struct nft_object *obj,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
+ struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
+ struct sk_buff *skb = pkt->skb;
+
+ if (ct ||
+ priv->l4proto != pkt->tprot)
+ return;
+
+ nf_ct_set(skb, priv->tmpl, IP_CT_NEW);
+}
+
+static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[],
+ struct nft_object *obj)
+{
+ const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
+ struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
+ const struct nf_conntrack_l4proto *l4proto;
+ struct nf_conn_timeout *timeout_ext;
+ struct nf_ct_timeout *timeout;
+ int l3num = ctx->family;
+ struct nf_conn *tmpl;
+ __u8 l4num;
+ int ret;
+
+ if (!tb[NFTA_CT_TIMEOUT_L3PROTO] ||
+ !tb[NFTA_CT_TIMEOUT_L4PROTO] ||
+ !tb[NFTA_CT_TIMEOUT_DATA])
+ return -EINVAL;
+
+ l3num = ntohs(nla_get_be16(tb[NFTA_CT_TIMEOUT_L3PROTO]));
+ l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]);
+ priv->l4proto = l4num;
+
+ l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+ if (l4proto->l4proto != l4num) {
+ ret = -EOPNOTSUPP;
+ goto err_proto_put;
+ }
+
+ timeout = kzalloc(sizeof(struct nf_ct_timeout) +
+ l4proto->ctnl_timeout.obj_size, GFP_KERNEL);
+ if (timeout == NULL) {
+ ret = -ENOMEM;
+ goto err_proto_put;
+ }
+
+ ret = nft_ct_timeout_parse_policy(&timeout->data, l4proto, ctx->net,
+ tb[NFTA_CT_TIMEOUT_DATA]);
+ if (ret < 0)
+ goto err_free_timeout;
+
+ timeout->l3num = l3num;
+ timeout->l4proto = l4proto;
+ tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC);
+ if (!tmpl) {
+ ret = -ENOMEM;
+ goto err_free_timeout;
+ }
+
+ timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC);
+ if (!timeout_ext) {
+ ret = -ENOMEM;
+ goto err_free_tmpl;
+ }
+
+ ret = nf_ct_netns_get(ctx->net, ctx->family);
+ if (ret < 0)
+ goto err_free_tmpl;
+
+ priv->tmpl = tmpl;
+
+ return 0;
+
+err_free_tmpl:
+ nf_ct_tmpl_free(tmpl);
+err_free_timeout:
+ kfree(timeout);
+err_proto_put:
+ nf_ct_l4proto_put(l4proto);
+ return ret;
+}
+
+static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
+ struct nft_object *obj)
+{
+ struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
+ struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
+ struct nf_ct_timeout *timeout;
+
+ timeout = rcu_dereference_raw(t->timeout);
+ nf_ct_untimeout(ctx->net, timeout);
+ nf_ct_l4proto_put(timeout->l4proto);
+ nf_ct_netns_put(ctx->net, ctx->family);
+ nf_ct_tmpl_free(priv->tmpl);
+}
+
+static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
+ struct nft_object *obj, bool reset)
+{
+ const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
+ const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
+ const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout);
+ struct nlattr *nest_params;
+ int ret;
+
+ if (nla_put_u8(skb, NFTA_CT_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
+ nla_put_be16(skb, NFTA_CT_TIMEOUT_L3PROTO, htons(timeout->l3num)))
+ return -1;
+
+ nest_params = nla_nest_start(skb, NFTA_CT_TIMEOUT_DATA | NLA_F_NESTED);
+ if (!nest_params)
+ return -1;
+
+ ret = timeout->l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data);
+ if (ret < 0)
+ return -1;
+ nla_nest_end(skb, nest_params);
+ return 0;
+}
+
+static const struct nla_policy nft_ct_timeout_policy[NFTA_CT_TIMEOUT_MAX + 1] = {
+ [NFTA_CT_TIMEOUT_L3PROTO] = {.type = NLA_U16 },
+ [NFTA_CT_TIMEOUT_L4PROTO] = {.type = NLA_U8 },
+ [NFTA_CT_TIMEOUT_DATA] = {.type = NLA_NESTED },
+};
+
+static struct nft_object_type nft_ct_timeout_obj_type;
+
+static const struct nft_object_ops nft_ct_timeout_obj_ops = {
+ .type = &nft_ct_timeout_obj_type,
+ .size = sizeof(struct nft_ct_timeout_obj),
+ .eval = nft_ct_timeout_obj_eval,
+ .init = nft_ct_timeout_obj_init,
+ .destroy = nft_ct_timeout_obj_destroy,
+ .dump = nft_ct_timeout_obj_dump,
+};
+
+static struct nft_object_type nft_ct_timeout_obj_type __read_mostly = {
+ .type = NFT_OBJECT_CT_TIMEOUT,
+ .ops = &nft_ct_timeout_obj_ops,
+ .maxattr = NFTA_CT_TIMEOUT_MAX,
+ .policy = nft_ct_timeout_policy,
+ .owner = THIS_MODULE,
+};
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+
static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
@@ -773,6 +963,7 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
struct nf_conntrack_helper *help4, *help6;
char name[NF_CT_HELPER_NAME_LEN];
int family = ctx->family;
+ int err;
if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO])
return -EINVAL;
@@ -823,7 +1014,18 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
priv->helper4 = help4;
priv->helper6 = help6;
+ err = nf_ct_netns_get(ctx->net, ctx->family);
+ if (err < 0)
+ goto err_put_helper;
+
return 0;
+
+err_put_helper:
+ if (priv->helper4)
+ nf_conntrack_helper_put(priv->helper4);
+ if (priv->helper6)
+ nf_conntrack_helper_put(priv->helper6);
+ return err;
}
static void nft_ct_helper_obj_destroy(const struct nft_ctx *ctx,
@@ -835,6 +1037,8 @@ static void nft_ct_helper_obj_destroy(const struct nft_ctx *ctx,
nf_conntrack_helper_put(priv->helper4);
if (priv->helper6)
nf_conntrack_helper_put(priv->helper6);
+
+ nf_ct_netns_put(ctx->net, ctx->family);
}
static void nft_ct_helper_obj_eval(struct nft_object *obj,
@@ -870,7 +1074,7 @@ static void nft_ct_helper_obj_eval(struct nft_object *obj,
if (test_bit(IPS_HELPER_BIT, &ct->status))
return;
- help = nf_ct_helper_ext_add(ct, to_assign, GFP_ATOMIC);
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help) {
rcu_assign_pointer(help->helper, to_assign);
set_bit(IPS_HELPER_BIT, &ct->status);
@@ -949,9 +1153,17 @@ static int __init nft_ct_module_init(void)
err = nft_register_obj(&nft_ct_helper_obj_type);
if (err < 0)
goto err2;
-
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ err = nft_register_obj(&nft_ct_timeout_obj_type);
+ if (err < 0)
+ goto err3;
+#endif
return 0;
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+err3:
+ nft_unregister_obj(&nft_ct_helper_obj_type);
+#endif
err2:
nft_unregister_expr(&nft_notrack_type);
err1:
@@ -961,6 +1173,9 @@ err1:
static void __exit nft_ct_module_exit(void)
{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ nft_unregister_obj(&nft_ct_timeout_obj_type);
+#endif
nft_unregister_obj(&nft_ct_helper_obj_type);
nft_unregister_expr(&nft_notrack_type);
nft_unregister_expr(&nft_ct_type);
@@ -974,3 +1189,4 @@ MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_EXPR("ct");
MODULE_ALIAS_NFT_EXPR("notrack");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
+MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT);
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 27d7e4598ab6..81184c244d1a 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -118,6 +118,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
u64 timeout;
int err;
+ lockdep_assert_held(&ctx->net->nft.commit_mutex);
+
if (tb[NFTA_DYNSET_SET_NAME] == NULL ||
tb[NFTA_DYNSET_OP] == NULL ||
tb[NFTA_DYNSET_SREG_KEY] == NULL)
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index c2a1d84cdfc4..ad13e8643599 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -26,9 +26,9 @@ struct nft_lookup {
struct nft_set_binding binding;
};
-static void nft_lookup_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_lookup_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_lookup *priv = nft_expr_priv(expr);
const struct nft_set *set = priv->set;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 1105a23bda5e..297fe7d97c18 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -41,9 +41,9 @@ static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
#include "../bridge/br_private.h"
#endif
-static void nft_meta_get_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_meta_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
@@ -107,7 +107,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
break;
case NFT_META_SKUID:
sk = skb_to_full_sk(skb);
- if (!sk || !sk_fullsock(sk))
+ if (!sk || !sk_fullsock(sk) ||
+ !net_eq(nft_net(pkt), sock_net(sk)))
goto err;
read_lock_bh(&sk->sk_callback_lock);
@@ -123,7 +124,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
break;
case NFT_META_SKGID:
sk = skb_to_full_sk(skb);
- if (!sk || !sk_fullsock(sk))
+ if (!sk || !sk_fullsock(sk) ||
+ !net_eq(nft_net(pkt), sock_net(sk)))
goto err;
read_lock_bh(&sk->sk_callback_lock);
@@ -214,7 +216,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
sk = skb_to_full_sk(skb);
- if (!sk || !sk_fullsock(sk))
+ if (!sk || !sk_fullsock(sk) ||
+ !net_eq(nft_net(pkt), sock_net(sk)))
goto err;
*dest = sock_cgroup_classid(&sk->sk_cgrp_data);
break;
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 1f4d0854cf70..649d1700ec5b 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -237,10 +237,8 @@ static int nft_ng_random_map_init(const struct nft_ctx *ctx,
priv->map = nft_set_lookup_global(ctx->net, ctx->table,
tb[NFTA_NG_SET_NAME],
tb[NFTA_NG_SET_ID], genmask);
- if (IS_ERR(priv->map))
- return PTR_ERR(priv->map);
- return 0;
+ return PTR_ERR_OR_ZERO(priv->map);
}
static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
new file mode 100644
index 000000000000..5af74b37f423
--- /dev/null
+++ b/net/netfilter/nft_osf.c
@@ -0,0 +1,104 @@
+#include <net/ip.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_tables.h>
+#include <linux/netfilter/nfnetlink_osf.h>
+
+struct nft_osf {
+ enum nft_registers dreg:8;
+};
+
+static const struct nla_policy nft_osf_policy[NFTA_OSF_MAX + 1] = {
+ [NFTA_OSF_DREG] = { .type = NLA_U32 },
+};
+
+static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_osf *priv = nft_expr_priv(expr);
+ u32 *dest = &regs->data[priv->dreg];
+ struct sk_buff *skb = pkt->skb;
+ const struct tcphdr *tcp;
+ struct tcphdr _tcph;
+ const char *os_name;
+
+ tcp = skb_header_pointer(skb, ip_hdrlen(skb),
+ sizeof(struct tcphdr), &_tcph);
+ if (!tcp) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+ if (!tcp->syn) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
+ os_name = nf_osf_find(skb, nf_osf_fingers);
+ if (!os_name)
+ strncpy((char *)dest, "unknown", NFT_OSF_MAXGENRELEN);
+ else
+ strncpy((char *)dest, os_name, NFT_OSF_MAXGENRELEN);
+}
+
+static int nft_osf_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_osf *priv = nft_expr_priv(expr);
+ int err;
+
+ priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
+ err = nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_osf *priv = nft_expr_priv(expr);
+
+ if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_osf_type;
+static const struct nft_expr_ops nft_osf_op = {
+ .eval = nft_osf_eval,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_osf)),
+ .init = nft_osf_init,
+ .dump = nft_osf_dump,
+ .type = &nft_osf_type,
+};
+
+static struct nft_expr_type nft_osf_type __read_mostly = {
+ .ops = &nft_osf_op,
+ .name = "osf",
+ .owner = THIS_MODULE,
+ .policy = nft_osf_policy,
+ .maxattr = NFTA_OSF_MAX,
+};
+
+static int __init nft_osf_module_init(void)
+{
+ return nft_register_expr(&nft_osf_type);
+}
+
+static void __exit nft_osf_module_exit(void)
+{
+ return nft_unregister_expr(&nft_osf_type);
+}
+
+module_init(nft_osf_module_init);
+module_exit(nft_osf_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
+MODULE_ALIAS_NFT_EXPR("osf");
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index 74e1b3bd6954..d7f3776dfd71 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -23,12 +23,15 @@ static void nft_socket_eval(const struct nft_expr *expr,
struct sock *sk = skb->sk;
u32 *dest = &regs->data[priv->dreg];
+ if (sk && !net_eq(nft_net(pkt), sock_net(sk)))
+ sk = NULL;
+
if (!sk)
switch(nft_pf(pkt)) {
case NFPROTO_IPV4:
sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt));
break;
-#if IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt));
break;
@@ -39,8 +42,8 @@ static void nft_socket_eval(const struct nft_expr *expr,
return;
}
- if(!sk) {
- nft_reg_store8(dest, 0);
+ if (!sk) {
+ regs->verdict.code = NFT_BREAK;
return;
}
@@ -51,6 +54,14 @@ static void nft_socket_eval(const struct nft_expr *expr,
case NFT_SOCKET_TRANSPARENT:
nft_reg_store8(dest, inet_sk_transparent(sk));
break;
+ case NFT_SOCKET_MARK:
+ if (sk_fullsock(sk)) {
+ *dest = sk->sk_mark;
+ } else {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+ break;
default:
WARN_ON(1);
regs->verdict.code = NFT_BREAK;
@@ -74,7 +85,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
switch(ctx->family) {
case NFPROTO_IPV4:
-#if IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
#endif
case NFPROTO_INET:
@@ -88,6 +99,9 @@ static int nft_socket_init(const struct nft_ctx *ctx,
case NFT_SOCKET_TRANSPARENT:
len = sizeof(u8);
break;
+ case NFT_SOCKET_MARK:
+ len = sizeof(u32);
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
new file mode 100644
index 000000000000..eff99dffc842
--- /dev/null
+++ b/net/netfilter/nft_tproxy.c
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/module.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tproxy.h>
+#include <net/inet_sock.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#endif
+
+struct nft_tproxy {
+ enum nft_registers sreg_addr:8;
+ enum nft_registers sreg_port:8;
+ u8 family;
+};
+
+static void nft_tproxy_eval_v4(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_tproxy *priv = nft_expr_priv(expr);
+ struct sk_buff *skb = pkt->skb;
+ const struct iphdr *iph = ip_hdr(skb);
+ struct udphdr _hdr, *hp;
+ __be32 taddr = 0;
+ __be16 tport = 0;
+ struct sock *sk;
+
+ hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
+ if (!hp) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
+ /* check if there's an ongoing connection on the packet addresses, this
+ * happens if the redirect already happened and the current packet
+ * belongs to an already established connection
+ */
+ sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
+ iph->saddr, iph->daddr,
+ hp->source, hp->dest,
+ skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
+
+ if (priv->sreg_addr)
+ taddr = regs->data[priv->sreg_addr];
+ taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr);
+
+ if (priv->sreg_port)
+ tport = regs->data[priv->sreg_port];
+ if (!tport)
+ tport = hp->dest;
+
+ /* UDP has no TCP_TIME_WAIT state, so we never enter here */
+ if (sk && sk->sk_state == TCP_TIME_WAIT) {
+ /* reopening a TIME_WAIT connection needs special handling */
+ sk = nf_tproxy_handle_time_wait4(nft_net(pkt), skb, taddr, tport, sk);
+ } else if (!sk) {
+ /* no, there's no established connection, check if
+ * there's a listener on the redirected addr/port
+ */
+ sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
+ iph->saddr, taddr,
+ hp->source, tport,
+ skb->dev, NF_TPROXY_LOOKUP_LISTENER);
+ }
+
+ if (sk && nf_tproxy_sk_is_transparent(sk))
+ nf_tproxy_assign_sock(skb, sk);
+ else
+ regs->verdict.code = NFT_BREAK;
+}
+
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+static void nft_tproxy_eval_v6(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_tproxy *priv = nft_expr_priv(expr);
+ struct sk_buff *skb = pkt->skb;
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct in6_addr taddr = {0};
+ int thoff = pkt->xt.thoff;
+ struct udphdr _hdr, *hp;
+ __be16 tport = 0;
+ struct sock *sk;
+ int l4proto;
+
+ if (!pkt->tprot_set) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+ l4proto = pkt->tprot;
+
+ hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
+ if (hp == NULL) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
+ /* check if there's an ongoing connection on the packet addresses, this
+ * happens if the redirect already happened and the current packet
+ * belongs to an already established connection
+ */
+ sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff, l4proto,
+ &iph->saddr, &iph->daddr,
+ hp->source, hp->dest,
+ nft_in(pkt), NF_TPROXY_LOOKUP_ESTABLISHED);
+
+ if (priv->sreg_addr)
+ memcpy(&taddr, &regs->data[priv->sreg_addr], sizeof(taddr));
+ taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr);
+
+ if (priv->sreg_port)
+ tport = regs->data[priv->sreg_port];
+ if (!tport)
+ tport = hp->dest;
+
+ /* UDP has no TCP_TIME_WAIT state, so we never enter here */
+ if (sk && sk->sk_state == TCP_TIME_WAIT) {
+ /* reopening a TIME_WAIT connection needs special handling */
+ sk = nf_tproxy_handle_time_wait6(skb, l4proto, thoff,
+ nft_net(pkt),
+ &taddr,
+ tport,
+ sk);
+ } else if (!sk) {
+ /* no there's no established connection, check if
+ * there's a listener on the redirected addr/port
+ */
+ sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff,
+ l4proto, &iph->saddr, &taddr,
+ hp->source, tport,
+ nft_in(pkt), NF_TPROXY_LOOKUP_LISTENER);
+ }
+
+ /* NOTE: assign_sock consumes our sk reference */
+ if (sk && nf_tproxy_sk_is_transparent(sk))
+ nf_tproxy_assign_sock(skb, sk);
+ else
+ regs->verdict.code = NFT_BREAK;
+}
+#endif
+
+static void nft_tproxy_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_tproxy *priv = nft_expr_priv(expr);
+
+ switch (nft_pf(pkt)) {
+ case NFPROTO_IPV4:
+ switch (priv->family) {
+ case NFPROTO_IPV4:
+ case NFPROTO_UNSPEC:
+ nft_tproxy_eval_v4(expr, regs, pkt);
+ return;
+ }
+ break;
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+ case NFPROTO_IPV6:
+ switch (priv->family) {
+ case NFPROTO_IPV6:
+ case NFPROTO_UNSPEC:
+ nft_tproxy_eval_v6(expr, regs, pkt);
+ return;
+ }
+#endif
+ }
+ regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_tproxy_policy[NFTA_TPROXY_MAX + 1] = {
+ [NFTA_TPROXY_FAMILY] = { .type = NLA_U32 },
+ [NFTA_TPROXY_REG_ADDR] = { .type = NLA_U32 },
+ [NFTA_TPROXY_REG_PORT] = { .type = NLA_U32 },
+};
+
+static int nft_tproxy_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_tproxy *priv = nft_expr_priv(expr);
+ unsigned int alen = 0;
+ int err;
+
+ if (!tb[NFTA_TPROXY_FAMILY] ||
+ (!tb[NFTA_TPROXY_REG_ADDR] && !tb[NFTA_TPROXY_REG_PORT]))
+ return -EINVAL;
+
+ priv->family = ntohl(nla_get_be32(tb[NFTA_TPROXY_FAMILY]));
+
+ switch (ctx->family) {
+ case NFPROTO_IPV4:
+ if (priv->family != NFPROTO_IPV4)
+ return -EINVAL;
+ break;
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+ case NFPROTO_IPV6:
+ if (priv->family != NFPROTO_IPV6)
+ return -EINVAL;
+ break;
+#endif
+ case NFPROTO_INET:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Address is specified but the rule family is not set accordingly */
+ if (priv->family == NFPROTO_UNSPEC && tb[NFTA_TPROXY_REG_ADDR])
+ return -EINVAL;
+
+ switch (priv->family) {
+ case NFPROTO_IPV4:
+ alen = FIELD_SIZEOF(union nf_inet_addr, in);
+ err = nf_defrag_ipv4_enable(ctx->net);
+ if (err)
+ return err;
+ break;
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+ case NFPROTO_IPV6:
+ alen = FIELD_SIZEOF(union nf_inet_addr, in6);
+ err = nf_defrag_ipv6_enable(ctx->net);
+ if (err)
+ return err;
+ break;
+#endif
+ case NFPROTO_UNSPEC:
+ /* No address is specified here */
+ err = nf_defrag_ipv4_enable(ctx->net);
+ if (err)
+ return err;
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+ err = nf_defrag_ipv6_enable(ctx->net);
+ if (err)
+ return err;
+#endif
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (tb[NFTA_TPROXY_REG_ADDR]) {
+ priv->sreg_addr = nft_parse_register(tb[NFTA_TPROXY_REG_ADDR]);
+ err = nft_validate_register_load(priv->sreg_addr, alen);
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[NFTA_TPROXY_REG_PORT]) {
+ priv->sreg_port = nft_parse_register(tb[NFTA_TPROXY_REG_PORT]);
+ err = nft_validate_register_load(priv->sreg_port, sizeof(u16));
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int nft_tproxy_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
+{
+ const struct nft_tproxy *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_TPROXY_FAMILY, htonl(priv->family)))
+ return -1;
+
+ if (priv->sreg_addr &&
+ nft_dump_register(skb, NFTA_TPROXY_REG_ADDR, priv->sreg_addr))
+ return -1;
+
+ if (priv->sreg_port &&
+ nft_dump_register(skb, NFTA_TPROXY_REG_PORT, priv->sreg_port))
+ return -1;
+
+ return 0;
+}
+
+static struct nft_expr_type nft_tproxy_type;
+static const struct nft_expr_ops nft_tproxy_ops = {
+ .type = &nft_tproxy_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_tproxy)),
+ .eval = nft_tproxy_eval,
+ .init = nft_tproxy_init,
+ .dump = nft_tproxy_dump,
+};
+
+static struct nft_expr_type nft_tproxy_type __read_mostly = {
+ .name = "tproxy",
+ .ops = &nft_tproxy_ops,
+ .policy = nft_tproxy_policy,
+ .maxattr = NFTA_TPROXY_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_tproxy_module_init(void)
+{
+ return nft_register_expr(&nft_tproxy_type);
+}
+
+static void __exit nft_tproxy_module_exit(void)
+{
+ nft_unregister_expr(&nft_tproxy_type);
+}
+
+module_init(nft_tproxy_module_init);
+module_exit(nft_tproxy_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Máté Eckl");
+MODULE_DESCRIPTION("nf_tables tproxy support module");
+MODULE_ALIAS_NFT_EXPR("tproxy");
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
new file mode 100644
index 000000000000..3a15f219e4e7
--- /dev/null
+++ b/net/netfilter/nft_tunnel.c
@@ -0,0 +1,566 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/seqlock.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/dst_metadata.h>
+#include <net/ip_tunnels.h>
+#include <net/vxlan.h>
+#include <net/erspan.h>
+
+struct nft_tunnel {
+ enum nft_tunnel_keys key:8;
+ enum nft_registers dreg:8;
+};
+
+static void nft_tunnel_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_tunnel *priv = nft_expr_priv(expr);
+ u32 *dest = &regs->data[priv->dreg];
+ struct ip_tunnel_info *tun_info;
+
+ tun_info = skb_tunnel_info(pkt->skb);
+
+ switch (priv->key) {
+ case NFT_TUNNEL_PATH:
+ nft_reg_store8(dest, !!tun_info);
+ break;
+ case NFT_TUNNEL_ID:
+ if (!tun_info) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+ *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
+ break;
+ default:
+ WARN_ON(1);
+ regs->verdict.code = NFT_BREAK;
+ }
+}
+
+static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
+ [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
+ [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
+};
+
+static int nft_tunnel_get_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_tunnel *priv = nft_expr_priv(expr);
+ u32 len;
+
+ if (!tb[NFTA_TUNNEL_KEY] &&
+ !tb[NFTA_TUNNEL_DREG])
+ return -EINVAL;
+
+ priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
+ switch (priv->key) {
+ case NFT_TUNNEL_PATH:
+ len = sizeof(u8);
+ break;
+ case NFT_TUNNEL_ID:
+ len = sizeof(u32);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
+
+ return nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, len);
+}
+
+static int nft_tunnel_get_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
+{
+ const struct nft_tunnel *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
+ goto nla_put_failure;
+ if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_tunnel_type;
+static const struct nft_expr_ops nft_tunnel_get_ops = {
+ .type = &nft_tunnel_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
+ .eval = nft_tunnel_get_eval,
+ .init = nft_tunnel_get_init,
+ .dump = nft_tunnel_get_dump,
+};
+
+static struct nft_expr_type nft_tunnel_type __read_mostly = {
+ .name = "tunnel",
+ .ops = &nft_tunnel_get_ops,
+ .policy = nft_tunnel_policy,
+ .maxattr = NFTA_TUNNEL_MAX,
+ .owner = THIS_MODULE,
+};
+
+struct nft_tunnel_opts {
+ union {
+ struct vxlan_metadata vxlan;
+ struct erspan_metadata erspan;
+ } u;
+ u32 len;
+ __be16 flags;
+};
+
+struct nft_tunnel_obj {
+ struct metadata_dst *md;
+ struct nft_tunnel_opts opts;
+};
+
+static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
+ [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
+ [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
+};
+
+static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
+ const struct nlattr *attr,
+ struct ip_tunnel_info *info)
+{
+ struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
+ nft_tunnel_ip_policy, NULL);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_TUNNEL_KEY_IP_DST])
+ return -EINVAL;
+
+ if (tb[NFTA_TUNNEL_KEY_IP_SRC])
+ info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
+ if (tb[NFTA_TUNNEL_KEY_IP_DST])
+ info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
+
+ return 0;
+}
+
+static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
+ [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
+ [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
+ [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
+};
+
+static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
+ const struct nlattr *attr,
+ struct ip_tunnel_info *info)
+{
+ struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
+ nft_tunnel_ip6_policy, NULL);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
+ return -EINVAL;
+
+ if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
+ memcpy(&info->key.u.ipv6.src,
+ nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
+ sizeof(struct in6_addr));
+ }
+ if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
+ memcpy(&info->key.u.ipv6.dst,
+ nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
+ sizeof(struct in6_addr));
+ }
+ if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
+ info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
+
+ info->mode |= IP_TUNNEL_INFO_IPV6;
+
+ return 0;
+}
+
+static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
+ [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
+};
+
+static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
+ struct nft_tunnel_opts *opts)
+{
+ struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
+ nft_tunnel_opts_vxlan_policy, NULL);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
+ return -EINVAL;
+
+ opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
+
+ opts->len = sizeof(struct vxlan_metadata);
+ opts->flags = TUNNEL_VXLAN_OPT;
+
+ return 0;
+}
+
+static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
+ [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
+ [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
+ [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
+};
+
+static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
+ struct nft_tunnel_opts *opts)
+{
+ struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
+ uint8_t hwid, dir;
+ int err, version;
+
+ err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX, attr,
+ nft_tunnel_opts_erspan_policy, NULL);
+ if (err < 0)
+ return err;
+
+ version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
+ switch (version) {
+ case ERSPAN_VERSION:
+ if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
+ return -EINVAL;
+
+ opts->u.erspan.u.index =
+ nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
+ break;
+ case ERSPAN_VERSION2:
+ if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
+ !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
+ return -EINVAL;
+
+ hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
+ dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
+
+ set_hwid(&opts->u.erspan.u.md2, hwid);
+ opts->u.erspan.u.md2.dir = dir;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ opts->u.erspan.version = version;
+
+ opts->len = sizeof(struct erspan_metadata);
+ opts->flags = TUNNEL_ERSPAN_OPT;
+
+ return 0;
+}
+
+static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
+ [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
+ [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
+};
+
+static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
+ const struct nlattr *attr,
+ struct ip_tunnel_info *info,
+ struct nft_tunnel_opts *opts)
+{
+ struct nlattr *tb[NFTA_TUNNEL_KEY_OPTS_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_OPTS_MAX, attr,
+ nft_tunnel_opts_policy, NULL);
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_TUNNEL_KEY_OPTS_VXLAN]) {
+ err = nft_tunnel_obj_vxlan_init(tb[NFTA_TUNNEL_KEY_OPTS_VXLAN],
+ opts);
+ } else if (tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN]) {
+ err = nft_tunnel_obj_erspan_init(tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN],
+ opts);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
+ [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
+ [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
+ [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
+ [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
+ [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
+ [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
+ [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
+};
+
+static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[],
+ struct nft_object *obj)
+{
+ struct nft_tunnel_obj *priv = nft_obj_data(obj);
+ struct ip_tunnel_info info;
+ struct metadata_dst *md;
+ int err;
+
+ if (!tb[NFTA_TUNNEL_KEY_ID])
+ return -EINVAL;
+
+ memset(&info, 0, sizeof(info));
+ info.mode = IP_TUNNEL_INFO_TX;
+ info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
+ info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
+
+ if (tb[NFTA_TUNNEL_KEY_IP]) {
+ err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
+ if (err < 0)
+ return err;
+ } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
+ err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ if (tb[NFTA_TUNNEL_KEY_SPORT]) {
+ info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
+ }
+ if (tb[NFTA_TUNNEL_KEY_DPORT]) {
+ info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
+ }
+
+ if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
+ u32 tun_flags;
+
+ tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
+ if (tun_flags & ~NFT_TUNNEL_F_MASK)
+ return -EOPNOTSUPP;
+
+ if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
+ info.key.tun_flags &= ~TUNNEL_CSUM;
+ if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
+ info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
+ if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
+ info.key.tun_flags |= TUNNEL_SEQ;
+ }
+ if (tb[NFTA_TUNNEL_KEY_TOS])
+ info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
+ if (tb[NFTA_TUNNEL_KEY_TTL])
+ info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
+ else
+ info.key.ttl = U8_MAX;
+
+ if (tb[NFTA_TUNNEL_KEY_OPTS]) {
+ err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
+ &info, &priv->opts);
+ if (err < 0)
+ return err;
+ }
+
+ md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
+ if (!md)
+ return -ENOMEM;
+
+ memcpy(&md->u.tun_info, &info, sizeof(info));
+ ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
+ priv->opts.flags);
+ priv->md = md;
+
+ return 0;
+}
+
+static inline void nft_tunnel_obj_eval(struct nft_object *obj,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_tunnel_obj *priv = nft_obj_data(obj);
+ struct sk_buff *skb = pkt->skb;
+
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *) priv->md);
+ skb_dst_set(skb, (struct dst_entry *) priv->md);
+}
+
+static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
+{
+ struct nlattr *nest;
+
+ if (info->mode & IP_TUNNEL_INFO_IPV6) {
+ nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_IP6);
+ if (!nest)
+ return -1;
+
+ if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, &info->key.u.ipv6.src) < 0 ||
+ nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, &info->key.u.ipv6.dst) < 0 ||
+ nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, info->key.label))
+ return -1;
+
+ nla_nest_end(skb, nest);
+ } else {
+ nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_IP);
+ if (!nest)
+ return -1;
+
+ if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, info->key.u.ipv4.src) < 0 ||
+ nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, info->key.u.ipv4.dst) < 0)
+ return -1;
+
+ nla_nest_end(skb, nest);
+ }
+
+ return 0;
+}
+
+static int nft_tunnel_opts_dump(struct sk_buff *skb,
+ struct nft_tunnel_obj *priv)
+{
+ struct nft_tunnel_opts *opts = &priv->opts;
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_OPTS);
+ if (!nest)
+ return -1;
+
+ if (opts->flags & TUNNEL_VXLAN_OPT) {
+ if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
+ htonl(opts->u.vxlan.gbp)))
+ return -1;
+ } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
+ switch (opts->u.erspan.version) {
+ case ERSPAN_VERSION:
+ if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
+ opts->u.erspan.u.index))
+ return -1;
+ break;
+ case ERSPAN_VERSION2:
+ if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
+ get_hwid(&opts->u.erspan.u.md2)) ||
+ nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
+ opts->u.erspan.u.md2.dir))
+ return -1;
+ break;
+ }
+ }
+ nla_nest_end(skb, nest);
+
+ return 0;
+}
+
+static int nft_tunnel_ports_dump(struct sk_buff *skb,
+ struct ip_tunnel_info *info)
+{
+ if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 ||
+ nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int nft_tunnel_flags_dump(struct sk_buff *skb,
+ struct ip_tunnel_info *info)
+{
+ u32 flags = 0;
+
+ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
+ if (!(info->key.tun_flags & TUNNEL_CSUM))
+ flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
+ if (info->key.tun_flags & TUNNEL_SEQ)
+ flags |= NFT_TUNNEL_F_SEQ_NUMBER;
+
+ if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int nft_tunnel_obj_dump(struct sk_buff *skb,
+ struct nft_object *obj, bool reset)
+{
+ struct nft_tunnel_obj *priv = nft_obj_data(obj);
+ struct ip_tunnel_info *info = &priv->md->u.tun_info;
+
+ if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
+ tunnel_id_to_key32(info->key.tun_id)) ||
+ nft_tunnel_ip_dump(skb, info) < 0 ||
+ nft_tunnel_ports_dump(skb, info) < 0 ||
+ nft_tunnel_flags_dump(skb, info) < 0 ||
+ nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
+ nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
+ nft_tunnel_opts_dump(skb, priv) < 0)
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
+ struct nft_object *obj)
+{
+ struct nft_tunnel_obj *priv = nft_obj_data(obj);
+
+ metadata_dst_free(priv->md);
+}
+
+static struct nft_object_type nft_tunnel_obj_type;
+static const struct nft_object_ops nft_tunnel_obj_ops = {
+ .type = &nft_tunnel_obj_type,
+ .size = sizeof(struct nft_tunnel_obj),
+ .eval = nft_tunnel_obj_eval,
+ .init = nft_tunnel_obj_init,
+ .destroy = nft_tunnel_obj_destroy,
+ .dump = nft_tunnel_obj_dump,
+};
+
+static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
+ .type = NFT_OBJECT_TUNNEL,
+ .ops = &nft_tunnel_obj_ops,
+ .maxattr = NFTA_TUNNEL_KEY_MAX,
+ .policy = nft_tunnel_key_policy,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_tunnel_module_init(void)
+{
+ int err;
+
+ err = nft_register_expr(&nft_tunnel_type);
+ if (err < 0)
+ return err;
+
+ err = nft_register_obj(&nft_tunnel_obj_type);
+ if (err < 0)
+ nft_unregister_expr(&nft_tunnel_type);
+
+ return err;
+}
+
+static void __exit nft_tunnel_module_exit(void)
+{
+ nft_unregister_obj(&nft_tunnel_obj_type);
+ nft_unregister_expr(&nft_tunnel_type);
+}
+
+module_init(nft_tunnel_module_init);
+module_exit(nft_tunnel_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_EXPR("tunnel");
+MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
index 0b660c568156..e8da9a9bba73 100644
--- a/net/netfilter/utils.c
+++ b/net/netfilter/utils.c
@@ -1,14 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_queue.h>
+#include <net/ip6_checksum.h>
+
+#ifdef CONFIG_INET
+__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u8 protocol)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ __sum16 csum = 0;
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
+ break;
+ if ((protocol == 0 && !csum_fold(skb->csum)) ||
+ !csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - dataoff, protocol,
+ skb->csum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+ /* fall through */
+ case CHECKSUM_NONE:
+ if (protocol == 0)
+ skb->csum = 0;
+ else
+ skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ skb->len - dataoff,
+ protocol, 0);
+ csum = __skb_checksum_complete(skb);
+ }
+ return csum;
+}
+EXPORT_SYMBOL(nf_ip_checksum);
+#endif
+
+static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u8 protocol)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ __sum16 csum = 0;
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (len == skb->len - dataoff)
+ return nf_ip_checksum(skb, hook, dataoff, protocol);
+ /* fall through */
+ case CHECKSUM_NONE:
+ skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
+ skb->len - dataoff, 0);
+ skb->ip_summed = CHECKSUM_NONE;
+ return __skb_checksum_complete_head(skb, dataoff + len);
+ }
+ return csum;
+}
+
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u8 protocol)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ __sum16 csum = 0;
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
+ break;
+ if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ skb->len - dataoff, protocol,
+ csum_sub(skb->csum,
+ skb_checksum(skb, 0,
+ dataoff, 0)))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+ /* fall through */
+ case CHECKSUM_NONE:
+ skb->csum = ~csum_unfold(
+ csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ skb->len - dataoff,
+ protocol,
+ csum_sub(0,
+ skb_checksum(skb, 0,
+ dataoff, 0))));
+ csum = __skb_checksum_complete(skb);
+ }
+ return csum;
+}
+EXPORT_SYMBOL(nf_ip6_checksum);
+
+static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u8 protocol)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ __wsum hsum;
+ __sum16 csum = 0;
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (len == skb->len - dataoff)
+ return nf_ip6_checksum(skb, hook, dataoff, protocol);
+ /* fall through */
+ case CHECKSUM_NONE:
+ hsum = skb_checksum(skb, 0, dataoff, 0);
+ skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
+ &ip6h->daddr,
+ skb->len - dataoff,
+ protocol,
+ csum_sub(0, hsum)));
+ skb->ip_summed = CHECKSUM_NONE;
+ return __skb_checksum_complete_head(skb, dataoff + len);
+ }
+ return csum;
+};
__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol,
+ unsigned int dataoff, u8 protocol,
unsigned short family)
{
- const struct nf_ipv6_ops *v6ops;
__sum16 csum = 0;
switch (family) {
@@ -16,9 +130,7 @@ __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
csum = nf_ip_checksum(skb, hook, dataoff, protocol);
break;
case AF_INET6:
- v6ops = rcu_dereference(nf_ipv6_ops);
- if (v6ops)
- csum = v6ops->checksum(skb, hook, dataoff, protocol);
+ csum = nf_ip6_checksum(skb, hook, dataoff, protocol);
break;
}
@@ -28,9 +140,8 @@ EXPORT_SYMBOL_GPL(nf_checksum);
__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
- u_int8_t protocol, unsigned short family)
+ u8 protocol, unsigned short family)
{
- const struct nf_ipv6_ops *v6ops;
__sum16 csum = 0;
switch (family) {
@@ -39,10 +150,8 @@ __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
protocol);
break;
case AF_INET6:
- v6ops = rcu_dereference(nf_ipv6_ops);
- if (v6ops)
- csum = v6ops->checksum_partial(skb, hook, dataoff, len,
- protocol);
+ csum = nf_ip6_checksum_partial(skb, hook, dataoff, len,
+ protocol);
break;
}
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index f368ee6741db..af883f1b64f9 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -72,7 +72,7 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
struct audit_buffer *ab;
int fam = -1;
- if (audit_enabled == 0)
+ if (audit_enabled == AUDIT_OFF)
goto errout;
ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
if (ab == NULL)
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 03b9a50ec93b..89457efd2e00 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -93,7 +93,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
return -ENOENT;
}
- help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
+ help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
if (help == NULL) {
nf_conntrack_helper_put(helper);
return -ENOMEM;
@@ -104,7 +104,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
+static void __xt_ct_tg_timeout_put(struct nf_ct_timeout *timeout)
{
typeof(nf_ct_timeout_put_hook) timeout_put;
@@ -121,7 +121,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
const struct nf_conntrack_l4proto *l4proto;
- struct ctnl_timeout *timeout;
+ struct nf_ct_timeout *timeout;
struct nf_conn_timeout *timeout_ext;
const char *errmsg = NULL;
int ret = 0;
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 475957cfcf50..0d0d68c989df 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -38,7 +38,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-#if IS_ENABLED(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static unsigned int
tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
@@ -141,7 +141,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
.destroy = tee_tg_destroy,
.me = THIS_MODULE,
},
-#if IS_ENABLED(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "TEE",
.revision = 1,
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index d76550a8b642..ad7420cdc439 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -36,15 +36,6 @@
#include <net/netfilter/nf_tproxy.h>
#include <linux/netfilter/xt_TPROXY.h>
-/* assign a socket to the skb -- consumes sk */
-static void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
-{
- skb_orphan(skb);
- skb->sk = sk;
- skb->destructor = sock_edemux;
-}
-
static unsigned int
tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
u_int32_t mark_mask, u_int32_t mark_value)
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
index 7df2dece57d3..5d92e1781980 100644
--- a/net/netfilter/xt_cgroup.c
+++ b/net/netfilter/xt_cgroup.c
@@ -72,8 +72,9 @@ static bool
cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_cgroup_info_v0 *info = par->matchinfo;
+ struct sock *sk = skb->sk;
- if (skb->sk == NULL || !sk_fullsock(skb->sk))
+ if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
return false;
return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^
@@ -85,8 +86,9 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
const struct xt_cgroup_info_v1 *info = par->matchinfo;
struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
struct cgroup *ancestor = info->priv;
+ struct sock *sk = skb->sk;
- if (!skb->sk || !sk_fullsock(skb->sk))
+ if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
return false;
if (ancestor)
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 6275106ccf50..bc6c8ab0fa62 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -93,10 +93,8 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
/* init private data */
info->data = nf_conncount_init(par->net, par->family, keylen);
- if (IS_ERR(info->data))
- return PTR_ERR(info->data);
- return 0;
+ return PTR_ERR_OR_ZERO(info->data);
}
static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 9cfef73b4107..bf7bba80e24c 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -37,118 +37,6 @@
#include <net/netfilter/nf_log.h>
#include <linux/netfilter/xt_osf.h>
-/*
- * Indexed by dont-fragment bit.
- * It is the only constant value in the fingerprint.
- */
-static struct list_head xt_osf_fingers[2];
-
-static const struct nla_policy xt_osf_policy[OSF_ATTR_MAX + 1] = {
- [OSF_ATTR_FINGER] = { .len = sizeof(struct xt_osf_user_finger) },
-};
-
-static int xt_osf_add_callback(struct net *net, struct sock *ctnl,
- struct sk_buff *skb, const struct nlmsghdr *nlh,
- const struct nlattr * const osf_attrs[],
- struct netlink_ext_ack *extack)
-{
- struct xt_osf_user_finger *f;
- struct xt_osf_finger *kf = NULL, *sf;
- int err = 0;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (!osf_attrs[OSF_ATTR_FINGER])
- return -EINVAL;
-
- if (!(nlh->nlmsg_flags & NLM_F_CREATE))
- return -EINVAL;
-
- f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
-
- kf = kmalloc(sizeof(struct xt_osf_finger), GFP_KERNEL);
- if (!kf)
- return -ENOMEM;
-
- memcpy(&kf->finger, f, sizeof(struct xt_osf_user_finger));
-
- list_for_each_entry(sf, &xt_osf_fingers[!!f->df], finger_entry) {
- if (memcmp(&sf->finger, f, sizeof(struct xt_osf_user_finger)))
- continue;
-
- kfree(kf);
- kf = NULL;
-
- if (nlh->nlmsg_flags & NLM_F_EXCL)
- err = -EEXIST;
- break;
- }
-
- /*
- * We are protected by nfnl mutex.
- */
- if (kf)
- list_add_tail_rcu(&kf->finger_entry, &xt_osf_fingers[!!f->df]);
-
- return err;
-}
-
-static int xt_osf_remove_callback(struct net *net, struct sock *ctnl,
- struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const osf_attrs[],
- struct netlink_ext_ack *extack)
-{
- struct xt_osf_user_finger *f;
- struct xt_osf_finger *sf;
- int err = -ENOENT;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (!osf_attrs[OSF_ATTR_FINGER])
- return -EINVAL;
-
- f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
-
- list_for_each_entry(sf, &xt_osf_fingers[!!f->df], finger_entry) {
- if (memcmp(&sf->finger, f, sizeof(struct xt_osf_user_finger)))
- continue;
-
- /*
- * We are protected by nfnl mutex.
- */
- list_del_rcu(&sf->finger_entry);
- kfree_rcu(sf, rcu_head);
-
- err = 0;
- break;
- }
-
- return err;
-}
-
-static const struct nfnl_callback xt_osf_nfnetlink_callbacks[OSF_MSG_MAX] = {
- [OSF_MSG_ADD] = {
- .call = xt_osf_add_callback,
- .attr_count = OSF_ATTR_MAX,
- .policy = xt_osf_policy,
- },
- [OSF_MSG_REMOVE] = {
- .call = xt_osf_remove_callback,
- .attr_count = OSF_ATTR_MAX,
- .policy = xt_osf_policy,
- },
-};
-
-static const struct nfnetlink_subsystem xt_osf_nfnetlink = {
- .name = "osf",
- .subsys_id = NFNL_SUBSYS_OSF,
- .cb_count = OSF_MSG_MAX,
- .cb = xt_osf_nfnetlink_callbacks,
-};
-
static bool
xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
{
@@ -159,7 +47,7 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
return false;
return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p),
- xt_out(p), info, net, xt_osf_fingers);
+ xt_out(p), info, net, nf_osf_fingers);
}
static struct xt_match xt_osf_match = {
@@ -177,52 +65,21 @@ static struct xt_match xt_osf_match = {
static int __init xt_osf_init(void)
{
- int err = -EINVAL;
- int i;
-
- for (i=0; i<ARRAY_SIZE(xt_osf_fingers); ++i)
- INIT_LIST_HEAD(&xt_osf_fingers[i]);
-
- err = nfnetlink_subsys_register(&xt_osf_nfnetlink);
- if (err < 0) {
- pr_err("Failed to register OSF nsfnetlink helper (%d)\n", err);
- goto err_out_exit;
- }
+ int err;
err = xt_register_match(&xt_osf_match);
if (err) {
pr_err("Failed to register OS fingerprint "
"matching module (%d)\n", err);
- goto err_out_remove;
+ return err;
}
return 0;
-
-err_out_remove:
- nfnetlink_subsys_unregister(&xt_osf_nfnetlink);
-err_out_exit:
- return err;
}
static void __exit xt_osf_fini(void)
{
- struct xt_osf_finger *f;
- int i;
-
- nfnetlink_subsys_unregister(&xt_osf_nfnetlink);
xt_unregister_match(&xt_osf_match);
-
- rcu_read_lock();
- for (i=0; i<ARRAY_SIZE(xt_osf_fingers); ++i) {
-
- list_for_each_entry_rcu(f, &xt_osf_fingers[i], finger_entry) {
- list_del_rcu(&f->finger_entry);
- kfree_rcu(f, rcu_head);
- }
- }
- rcu_read_unlock();
-
- rcu_barrier();
}
module_init(xt_osf_init);
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index 3d705c688a27..46686fb73784 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -67,7 +67,7 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
struct sock *sk = skb_to_full_sk(skb);
struct net *net = xt_net(par);
- if (sk == NULL || sk->sk_socket == NULL)
+ if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk)))
return (info->match ^ info->invert) == 0;
else if (info->match & info->invert & XT_OWNER_SOCKET)
/*
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 07085c22b19c..f44de4bc2100 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -265,7 +265,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
/* use TTL as seen before forwarding */
- if (xt_out(par) != NULL && skb->sk == NULL)
+ if (xt_out(par) != NULL &&
+ (!skb->sk || !net_eq(net, sock_net(skb->sk))))
ttl++;
spin_lock_bh(&recent_lock);
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 5c0779c4fa3c..0472f3472842 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -56,8 +56,12 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
+ if (!net_eq(xt_net(par), sock_net(sk)))
+ sk = NULL;
+
if (!sk)
sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par));
+
if (sk) {
bool wildcard;
bool transparent = true;
@@ -113,8 +117,12 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
+ if (!net_eq(xt_net(par), sock_net(sk)))
+ sk = NULL;
+
if (!sk)
sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par));
+
if (sk) {
bool wildcard;
bool transparent = true;
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 2f328af91a52..4676f5bb16ae 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -101,7 +101,7 @@ struct audit_buffer *netlbl_audit_start_common(int type,
char *secctx;
u32 secctx_len;
- if (audit_enabled == 0)
+ if (audit_enabled == AUDIT_OFF)
return NULL;
audit_buf = audit_log_start(audit_context(), GFP_ATOMIC, type);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 56704d95f82d..930d17fa906c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2307,7 +2307,6 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb = &nlk->cb;
memset(cb, 0, sizeof(*cb));
- cb->start = control->start;
cb->dump = control->dump;
cb->done = control->done;
cb->nlh = nlh;
@@ -2316,8 +2315,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->min_dump_alloc = control->min_dump_alloc;
cb->skb = skb;
- if (cb->start) {
- ret = cb->start(cb);
+ if (control->start) {
+ ret = control->start(cb);
if (ret)
goto error_put;
}
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ea0c0c6f1874..dd4adf8b1167 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -556,7 +556,7 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
pr_debug("%p\n", sk);
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
if (sk->sk_state == LLCP_LISTEN)
return llcp_accept_poll(sk);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 30a5df27116e..85ae53d8fd09 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1057,6 +1057,28 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
clone_flow_key);
}
+/* When 'last' is true, clone() should always consume the 'skb'.
+ * Otherwise, clone() should keep 'skb' intact regardless what
+ * actions are executed within clone().
+ */
+static int clone(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key, const struct nlattr *attr,
+ bool last)
+{
+ struct nlattr *actions;
+ struct nlattr *clone_arg;
+ int rem = nla_len(attr);
+ bool dont_clone_flow_key;
+
+ /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
+ clone_arg = nla_data(attr);
+ dont_clone_flow_key = nla_get_u32(clone_arg);
+ actions = nla_next(clone_arg, &rem);
+
+ return clone_execute(dp, skb, key, 0, actions, rem, last,
+ !dont_clone_flow_key);
+}
+
static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
const struct nlattr *attr)
{
@@ -1336,6 +1358,17 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
consume_skb(skb);
return 0;
}
+ break;
+
+ case OVS_ACTION_ATTR_CLONE: {
+ bool last = nla_is_last(a, rem);
+
+ err = clone(dp, skb, key, a, last);
+ if (last)
+ return err;
+
+ break;
+ }
}
if (unlikely(err)) {
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 284aca2a252d..86a75105af1a 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -26,6 +26,7 @@
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#include <net/ipv6_frag.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <linux/netfilter/nf_nat.h>
@@ -607,23 +608,12 @@ static struct nf_conn *
ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
u8 l3num, struct sk_buff *skb, bool natted)
{
- const struct nf_conntrack_l3proto *l3proto;
- const struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
- unsigned int dataoff;
- u8 protonum;
- l3proto = __nf_ct_l3proto_find(l3num);
- if (l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff,
- &protonum) <= 0) {
- pr_debug("ovs_ct_find_existing: Can't get protonum\n");
- return NULL;
- }
- l4proto = __nf_ct_l4proto_find(l3num, protonum);
- if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
- protonum, net, &tuple, l3proto, l4proto)) {
+ if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num,
+ net, &tuple)) {
pr_debug("ovs_ct_find_existing: Can't get tuple\n");
return NULL;
}
@@ -632,7 +622,7 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
if (natted) {
struct nf_conntrack_tuple inverse;
- if (!nf_ct_invert_tuple(&inverse, &tuple, l3proto, l4proto)) {
+ if (!nf_ct_invert_tuplepr(&inverse, &tuple)) {
pr_debug("ovs_ct_find_existing: Inversion failed!\n");
return NULL;
}
@@ -1314,7 +1304,7 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
return -EINVAL;
}
- help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL);
+ help = nf_ct_helper_ext_add(info->ct, GFP_KERNEL);
if (!help) {
nf_conntrack_helper_put(helper);
return -ENOMEM;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 492ab0c36f7c..a70097ecf33c 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2460,6 +2460,40 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
return 0;
}
+static int validate_and_copy_clone(struct net *net,
+ const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci,
+ bool log, bool last)
+{
+ int start, err;
+ u32 exec;
+
+ if (nla_len(attr) && nla_len(attr) < NLA_HDRLEN)
+ return -EINVAL;
+
+ start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CLONE, log);
+ if (start < 0)
+ return start;
+
+ exec = last || !actions_may_change_flow(attr);
+
+ err = ovs_nla_add_action(sfa, OVS_CLONE_ATTR_EXEC, &exec,
+ sizeof(exec), log);
+ if (err)
+ return err;
+
+ err = __ovs_nla_copy_actions(net, attr, key, sfa,
+ eth_type, vlan_tci, log);
+ if (err)
+ return err;
+
+ add_nested_action_end(*sfa, start);
+
+ return 0;
+}
+
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key,
bool reset_key,
@@ -2516,7 +2550,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
struct ovs_tunnel_info *ovs_tun;
struct nlattr *a;
int err = 0, start, opts_type;
+ __be16 dst_opt_type;
+ dst_opt_type = 0;
ovs_match_init(&match, &key, true, NULL);
opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
if (opts_type < 0)
@@ -2528,10 +2564,13 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
err = validate_geneve_opts(&key);
if (err < 0)
return err;
+ dst_opt_type = TUNNEL_GENEVE_OPT;
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
+ dst_opt_type = TUNNEL_VXLAN_OPT;
break;
case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
+ dst_opt_type = TUNNEL_ERSPAN_OPT;
break;
}
}
@@ -2574,7 +2613,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
*/
ip_tunnel_info_opts_set(tun_info,
TUN_METADATA_OPTS(&key, key.tun_opts_len),
- key.tun_opts_len);
+ key.tun_opts_len, dst_opt_type);
add_nested_action_end(*sfa, start);
return err;
@@ -2844,6 +2883,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
[OVS_ACTION_ATTR_PUSH_NSH] = (u32)-1,
[OVS_ACTION_ATTR_POP_NSH] = 0,
[OVS_ACTION_ATTR_METER] = sizeof(u32),
+ [OVS_ACTION_ATTR_CLONE] = (u32)-1,
};
const struct ovs_action_push_vlan *vlan;
int type = nla_type(a);
@@ -3033,6 +3073,18 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
/* Non-existent meters are simply ignored. */
break;
+ case OVS_ACTION_ATTR_CLONE: {
+ bool last = nla_is_last(a, rem);
+
+ err = validate_and_copy_clone(net, a, key, sfa,
+ eth_type, vlan_tci,
+ log, last);
+ if (err)
+ return err;
+ skip_copy = true;
+ break;
+ }
+
default:
OVS_NLERR(log, "Unknown Action type %d", type);
return -EINVAL;
@@ -3111,6 +3163,26 @@ out:
return err;
}
+static int clone_action_to_attr(const struct nlattr *attr,
+ struct sk_buff *skb)
+{
+ struct nlattr *start;
+ int err = 0, rem = nla_len(attr);
+
+ start = nla_nest_start(skb, OVS_ACTION_ATTR_CLONE);
+ if (!start)
+ return -EMSGSIZE;
+
+ err = ovs_nla_put_actions(nla_data(attr), rem, skb);
+
+ if (err)
+ nla_nest_cancel(skb, start);
+ else
+ nla_nest_end(skb, start);
+
+ return err;
+}
+
static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
{
const struct nlattr *ovs_key = nla_data(a);
@@ -3199,6 +3271,12 @@ int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
return err;
break;
+ case OVS_ACTION_ATTR_CLONE:
+ err = clone_action_to_attr(a, skb);
+ if (err)
+ return err;
+ break;
+
default:
if (nla_put(skb, type, nla_len(a), nla_data(a)))
return -EMSGSIZE;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e6445d8f3f57..5610061e7f2e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
return po->xmit == packet_direct_xmit;
}
-static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
{
- return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
+ return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
}
static u16 packet_pick_tx_queue(struct sk_buff *skb)
@@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
__packet_pick_tx_queue);
queue_index = netdev_cap_txqueue(dev, queue_index);
} else {
- queue_index = __packet_pick_tx_queue(dev, skb);
+ queue_index = __packet_pick_tx_queue(dev, skb, NULL);
}
return queue_index;
@@ -1581,7 +1582,7 @@ static int fanout_set_data(struct packet_sock *po, char __user *data,
return fanout_set_data_ebpf(po, data, len);
default:
return -EINVAL;
- };
+ }
}
static void fanout_release_data(struct packet_fanout *f)
@@ -1590,7 +1591,7 @@ static void fanout_release_data(struct packet_fanout *f)
case PACKET_FANOUT_CBPF:
case PACKET_FANOUT_EBPF:
__fanout_set_data_bpf(f, NULL);
- };
+ }
}
static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
@@ -1951,7 +1952,7 @@ retry:
goto out_unlock;
}
- sockc.tsflags = sk->sk_tsflags;
+ sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
if (unlikely(err))
@@ -1962,6 +1963,7 @@ retry:
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ skb->tstamp = sockc.transmit_time;
sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
@@ -2457,6 +2459,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->dev = dev;
skb->priority = po->sk.sk_priority;
skb->mark = po->sk.sk_mark;
+ skb->tstamp = sockc->transmit_time;
sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
skb_shinfo(skb)->destructor_arg = ph.raw;
@@ -2633,7 +2636,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
if (unlikely(!(dev->flags & IFF_UP)))
goto out_put;
- sockc.tsflags = po->sk.sk_tsflags;
+ sockcm_init(&sockc, &po->sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(&po->sk, msg, &sockc);
if (unlikely(err))
@@ -2829,7 +2832,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (unlikely(!(dev->flags & IFF_UP)))
goto out_unlock;
- sockc.tsflags = sk->sk_tsflags;
+ sockcm_init(&sockc, sk);
sockc.mark = sk->sk_mark;
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
@@ -2905,6 +2908,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sockc.mark;
+ skb->tstamp = sockc.transmit_time;
if (has_vnet_hdr) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
@@ -4133,52 +4137,36 @@ static const struct vm_operations_struct packet_mmap_ops = {
.close = packet_mm_close,
};
-static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
- unsigned int len)
+static void free_pg_vec(struct pgv *pg_vec, unsigned int len)
{
int i;
for (i = 0; i < len; i++) {
if (likely(pg_vec[i].buffer)) {
- if (is_vmalloc_addr(pg_vec[i].buffer))
- vfree(pg_vec[i].buffer);
- else
- free_pages((unsigned long)pg_vec[i].buffer,
- order);
+ kvfree(pg_vec[i].buffer);
pg_vec[i].buffer = NULL;
}
}
kfree(pg_vec);
}
-static char *alloc_one_pg_vec_page(unsigned long order)
+static char *alloc_one_pg_vec_page(unsigned long size)
{
char *buffer;
- gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
- __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
- buffer = (char *) __get_free_pages(gfp_flags, order);
+ buffer = kvzalloc(size, GFP_KERNEL);
if (buffer)
return buffer;
- /* __get_free_pages failed, fall back to vmalloc */
- buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
- if (buffer)
- return buffer;
+ buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
- /* vmalloc failed, lets dig into swap here */
- gfp_flags &= ~__GFP_NORETRY;
- buffer = (char *) __get_free_pages(gfp_flags, order);
- if (buffer)
- return buffer;
-
- /* complete and utter failure */
- return NULL;
+ return buffer;
}
-static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
+static struct pgv *alloc_pg_vec(struct tpacket_req *req)
{
unsigned int block_nr = req->tp_block_nr;
+ unsigned long size = req->tp_block_size;
struct pgv *pg_vec;
int i;
@@ -4187,7 +4175,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
goto out;
for (i = 0; i < block_nr; i++) {
- pg_vec[i].buffer = alloc_one_pg_vec_page(order);
+ pg_vec[i].buffer = alloc_one_pg_vec_page(size);
if (unlikely(!pg_vec[i].buffer))
goto out_free_pgvec;
}
@@ -4196,7 +4184,7 @@ out:
return pg_vec;
out_free_pgvec:
- free_pg_vec(pg_vec, order, block_nr);
+ free_pg_vec(pg_vec, block_nr);
pg_vec = NULL;
goto out;
}
@@ -4206,9 +4194,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
- int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
+ int was_running;
__be16 num;
int err = -EINVAL;
/* Added to avoid minimal code churn */
@@ -4270,8 +4258,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out;
err = -ENOMEM;
- order = get_order(req->tp_block_size);
- pg_vec = alloc_pg_vec(req, order);
+ pg_vec = alloc_pg_vec(req);
if (unlikely(!pg_vec))
goto out;
switch (po->tp_version) {
@@ -4325,7 +4312,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);
- swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
@@ -4351,7 +4337,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}
if (pg_vec)
- free_pg_vec(pg_vec, order, req->tp_block_nr);
+ free_pg_vec(pg_vec, req->tp_block_nr);
out:
return err;
}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 3bb7c5fb3bff..8f50036f62f0 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -64,7 +64,6 @@ struct packet_ring_buffer {
unsigned int frame_size;
unsigned int frame_max;
- unsigned int pg_vec_order;
unsigned int pg_vec_pages;
unsigned int pg_vec_len;
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index bffde4b46c5d..01b3bd6a3708 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -16,6 +16,7 @@ config RDS_RDMA
config RDS_TCP
tristate "RDS over TCP"
depends on RDS
+ depends on IPV6 || !IPV6
---help---
Allow RDS to use TCP as a transport.
This transport does not support RDMA operations.
@@ -24,4 +25,3 @@ config RDS_DEBUG
bool "RDS debugging messages"
depends on RDS
default n
-
diff --git a/net/rds/Makefile b/net/rds/Makefile
index b5d568bd479c..e647f9de104a 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -15,4 +15,3 @@ rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
tcp_send.o tcp_stats.o
ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG
-
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index ab751a150f70..65387e1e6964 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/in.h>
+#include <linux/ipv6.h>
#include <linux/poll.h>
#include <net/sock.h>
@@ -113,26 +114,82 @@ void rds_wake_sk_sleep(struct rds_sock *rs)
static int rds_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
- struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
-
- memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ int uaddr_len;
/* racey, don't care */
if (peer) {
- if (!rs->rs_conn_addr)
+ if (ipv6_addr_any(&rs->rs_conn_addr))
return -ENOTCONN;
- sin->sin_port = rs->rs_conn_port;
- sin->sin_addr.s_addr = rs->rs_conn_addr;
+ if (ipv6_addr_v4mapped(&rs->rs_conn_addr)) {
+ sin = (struct sockaddr_in *)uaddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ sin->sin_family = AF_INET;
+ sin->sin_port = rs->rs_conn_port;
+ sin->sin_addr.s_addr = rs->rs_conn_addr_v4;
+ uaddr_len = sizeof(*sin);
+ } else {
+ sin6 = (struct sockaddr_in6 *)uaddr;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = rs->rs_conn_port;
+ sin6->sin6_addr = rs->rs_conn_addr;
+ sin6->sin6_flowinfo = 0;
+ /* scope_id is the same as in the bound address. */
+ sin6->sin6_scope_id = rs->rs_bound_scope_id;
+ uaddr_len = sizeof(*sin6);
+ }
} else {
- sin->sin_port = rs->rs_bound_port;
- sin->sin_addr.s_addr = rs->rs_bound_addr;
+ /* If socket is not yet bound and the socket is connected,
+ * set the return address family to be the same as the
+ * connected address, but with 0 address value. If it is not
+ * connected, set the family to be AF_UNSPEC (value 0) and
+ * the address size to be that of an IPv4 address.
+ */
+ if (ipv6_addr_any(&rs->rs_bound_addr)) {
+ if (ipv6_addr_any(&rs->rs_conn_addr)) {
+ sin = (struct sockaddr_in *)uaddr;
+ memset(sin, 0, sizeof(*sin));
+ sin->sin_family = AF_UNSPEC;
+ return sizeof(*sin);
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!(ipv6_addr_type(&rs->rs_conn_addr) &
+ IPV6_ADDR_MAPPED)) {
+ sin6 = (struct sockaddr_in6 *)uaddr;
+ memset(sin6, 0, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ return sizeof(*sin6);
+ }
+#endif
+
+ sin = (struct sockaddr_in *)uaddr;
+ memset(sin, 0, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ return sizeof(*sin);
+ }
+ if (ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
+ sin = (struct sockaddr_in *)uaddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ sin->sin_family = AF_INET;
+ sin->sin_port = rs->rs_bound_port;
+ sin->sin_addr.s_addr = rs->rs_bound_addr_v4;
+ uaddr_len = sizeof(*sin);
+ } else {
+ sin6 = (struct sockaddr_in6 *)uaddr;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = rs->rs_bound_port;
+ sin6->sin6_addr = rs->rs_bound_addr;
+ sin6->sin6_flowinfo = 0;
+ sin6->sin6_scope_id = rs->rs_bound_scope_id;
+ uaddr_len = sizeof(*sin6);
+ }
}
- sin->sin_family = AF_INET;
-
- return sizeof(*sin);
+ return uaddr_len;
}
/*
@@ -203,11 +260,12 @@ static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval,
int len)
{
+ struct sockaddr_in6 sin6;
struct sockaddr_in sin;
int ret = 0;
/* racing with another thread binding seems ok here */
- if (rs->rs_bound_addr == 0) {
+ if (ipv6_addr_any(&rs->rs_bound_addr)) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
@@ -215,14 +273,23 @@ static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval,
if (len < sizeof(struct sockaddr_in)) {
ret = -EINVAL;
goto out;
+ } else if (len < sizeof(struct sockaddr_in6)) {
+ /* Assume IPv4 */
+ if (copy_from_user(&sin, optval, sizeof(struct sockaddr_in))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ipv6_addr_set_v4mapped(sin.sin_addr.s_addr, &sin6.sin6_addr);
+ sin6.sin6_port = sin.sin_port;
+ } else {
+ if (copy_from_user(&sin6, optval,
+ sizeof(struct sockaddr_in6))) {
+ ret = -EFAULT;
+ goto out;
+ }
}
- if (copy_from_user(&sin, optval, sizeof(sin))) {
- ret = -EFAULT;
- goto out;
- }
-
- rds_send_drop_to(rs, &sin);
+ rds_send_drop_to(rs, &sin6);
out:
return ret;
}
@@ -435,31 +502,91 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
- struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
+ struct sockaddr_in *sin;
struct rds_sock *rs = rds_sk_to_rs(sk);
int ret = 0;
lock_sock(sk);
- if (addr_len != sizeof(struct sockaddr_in)) {
- ret = -EINVAL;
- goto out;
- }
+ switch (uaddr->sa_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)uaddr;
+ if (addr_len < sizeof(struct sockaddr_in)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
+ ret = -EDESTADDRREQ;
+ break;
+ }
+ if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) ||
+ sin->sin_addr.s_addr == htonl(INADDR_BROADCAST)) {
+ ret = -EINVAL;
+ break;
+ }
+ ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &rs->rs_conn_addr);
+ rs->rs_conn_port = sin->sin_port;
+ break;
- if (sin->sin_family != AF_INET) {
- ret = -EAFNOSUPPORT;
- goto out;
- }
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6;
+ int addr_type;
- if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
- ret = -EDESTADDRREQ;
- goto out;
+ sin6 = (struct sockaddr_in6 *)uaddr;
+ if (addr_len < sizeof(struct sockaddr_in6)) {
+ ret = -EINVAL;
+ break;
+ }
+ addr_type = ipv6_addr_type(&sin6->sin6_addr);
+ if (!(addr_type & IPV6_ADDR_UNICAST)) {
+ __be32 addr4;
+
+ if (!(addr_type & IPV6_ADDR_MAPPED)) {
+ ret = -EPROTOTYPE;
+ break;
+ }
+
+ /* It is a mapped address. Need to do some sanity
+ * checks.
+ */
+ addr4 = sin6->sin6_addr.s6_addr32[3];
+ if (addr4 == htonl(INADDR_ANY) ||
+ addr4 == htonl(INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(addr4))) {
+ ret = -EPROTOTYPE;
+ break;
+ }
+ }
+
+ if (addr_type & IPV6_ADDR_LINKLOCAL) {
+ /* If socket is arleady bound to a link local address,
+ * the peer address must be on the same link.
+ */
+ if (sin6->sin6_scope_id == 0 ||
+ (!ipv6_addr_any(&rs->rs_bound_addr) &&
+ rs->rs_bound_scope_id &&
+ sin6->sin6_scope_id != rs->rs_bound_scope_id)) {
+ ret = -EINVAL;
+ break;
+ }
+ /* Remember the connected address scope ID. It will
+ * be checked against the binding local address when
+ * the socket is bound.
+ */
+ rs->rs_bound_scope_id = sin6->sin6_scope_id;
+ }
+ rs->rs_conn_addr = sin6->sin6_addr;
+ rs->rs_conn_port = sin6->sin6_port;
+ break;
}
+#endif
- rs->rs_conn_addr = sin->sin_addr.s_addr;
- rs->rs_conn_port = sin->sin_port;
+ default:
+ ret = -EAFNOSUPPORT;
+ break;
+ }
-out:
release_sock(sk);
return ret;
}
@@ -578,8 +705,10 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
list_for_each_entry(inc, &rs->rs_recv_queue, i_item) {
total++;
if (total <= len)
- rds_inc_info_copy(inc, iter, inc->i_saddr,
- rs->rs_bound_addr, 1);
+ rds_inc_info_copy(inc, iter,
+ inc->i_saddr.s6_addr32[3],
+ rs->rs_bound_addr_v4,
+ 1);
}
read_unlock(&rs->rs_recv_lock);
@@ -608,8 +737,8 @@ static void rds_sock_info(struct socket *sock, unsigned int len,
list_for_each_entry(rs, &rds_sock_list, rs_item) {
sinfo.sndbuf = rds_sk_sndbuf(rs);
sinfo.rcvbuf = rds_sk_rcvbuf(rs);
- sinfo.bound_addr = rs->rs_bound_addr;
- sinfo.connected_addr = rs->rs_conn_addr;
+ sinfo.bound_addr = rs->rs_bound_addr_v4;
+ sinfo.connected_addr = rs->rs_conn_addr_v4;
sinfo.bound_port = rs->rs_bound_port;
sinfo.connected_port = rs->rs_conn_port;
sinfo.inum = sock_i_ino(rds_rs_to_sk(rs));
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 5aa3a64aa4f0..3ab55784b637 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <net/sock.h>
#include <linux/in.h>
+#include <linux/ipv6.h>
#include <linux/if_arp.h>
#include <linux/jhash.h>
#include <linux/ratelimit.h>
@@ -42,42 +43,58 @@ static struct rhashtable bind_hash_table;
static const struct rhashtable_params ht_parms = {
.nelem_hint = 768,
- .key_len = sizeof(u64),
+ .key_len = RDS_BOUND_KEY_LEN,
.key_offset = offsetof(struct rds_sock, rs_bound_key),
.head_offset = offsetof(struct rds_sock, rs_bound_node),
.max_size = 16384,
.min_size = 1024,
};
+/* Create a key for the bind hash table manipulation. Port is in network byte
+ * order.
+ */
+static inline void __rds_create_bind_key(u8 *key, const struct in6_addr *addr,
+ __be16 port, __u32 scope_id)
+{
+ memcpy(key, addr, sizeof(*addr));
+ key += sizeof(*addr);
+ memcpy(key, &port, sizeof(port));
+ key += sizeof(port);
+ memcpy(key, &scope_id, sizeof(scope_id));
+}
+
/*
* Return the rds_sock bound at the given local address.
*
* The rx path can race with rds_release. We notice if rds_release() has
* marked this socket and don't return a rs ref to the rx path.
*/
-struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
+struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
+ __u32 scope_id)
{
- u64 key = ((u64)addr << 32) | port;
+ u8 key[RDS_BOUND_KEY_LEN];
struct rds_sock *rs;
- rs = rhashtable_lookup_fast(&bind_hash_table, &key, ht_parms);
+ __rds_create_bind_key(key, addr, port, scope_id);
+ rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms);
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
else
rs = NULL;
- rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
- ntohs(port));
+ rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
+ ntohs(port));
return rs;
}
/* returns -ve errno or +ve port */
-static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
+static int rds_add_bound(struct rds_sock *rs, const struct in6_addr *addr,
+ __be16 *port, __u32 scope_id)
{
int ret = -EADDRINUSE;
u16 rover, last;
- u64 key;
+ u8 key[RDS_BOUND_KEY_LEN];
if (*port != 0) {
rover = be16_to_cpu(*port);
@@ -95,12 +112,13 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
if (rover == RDS_FLAG_PROBE_PORT)
continue;
- key = ((u64)addr << 32) | cpu_to_be16(rover);
- if (rhashtable_lookup_fast(&bind_hash_table, &key, ht_parms))
+ __rds_create_bind_key(key, addr, cpu_to_be16(rover),
+ scope_id);
+ if (rhashtable_lookup_fast(&bind_hash_table, key, ht_parms))
continue;
- rs->rs_bound_key = key;
- rs->rs_bound_addr = addr;
+ memcpy(rs->rs_bound_key, key, sizeof(rs->rs_bound_key));
+ rs->rs_bound_addr = *addr;
net_get_random_once(&rs->rs_hash_initval,
sizeof(rs->rs_hash_initval));
rs->rs_bound_port = cpu_to_be16(rover);
@@ -109,12 +127,13 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
if (!rhashtable_insert_fast(&bind_hash_table,
&rs->rs_bound_node, ht_parms)) {
*port = rs->rs_bound_port;
+ rs->rs_bound_scope_id = scope_id;
ret = 0;
- rdsdebug("rs %p binding to %pI4:%d\n",
- rs, &addr, (int)ntohs(*port));
+ rdsdebug("rs %p binding to %pI6c:%d\n",
+ rs, addr, (int)ntohs(*port));
break;
} else {
- rs->rs_bound_addr = 0;
+ rs->rs_bound_addr = in6addr_any;
rds_sock_put(rs);
ret = -ENOMEM;
break;
@@ -127,44 +146,103 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
void rds_remove_bound(struct rds_sock *rs)
{
- if (!rs->rs_bound_addr)
+ if (ipv6_addr_any(&rs->rs_bound_addr))
return;
- rdsdebug("rs %p unbinding from %pI4:%d\n",
+ rdsdebug("rs %p unbinding from %pI6c:%d\n",
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port));
rhashtable_remove_fast(&bind_hash_table, &rs->rs_bound_node, ht_parms);
rds_sock_put(rs);
- rs->rs_bound_addr = 0;
+ rs->rs_bound_addr = in6addr_any;
}
int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
- struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
struct rds_sock *rs = rds_sk_to_rs(sk);
+ struct in6_addr v6addr, *binding_addr;
struct rds_transport *trans;
+ __u32 scope_id = 0;
int ret = 0;
+ __be16 port;
+
+ /* We allow an RDS socket to be bound to either IPv4 or IPv6
+ * address.
+ */
+ if (uaddr->sa_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
+
+ if (addr_len < sizeof(struct sockaddr_in) ||
+ sin->sin_addr.s_addr == htonl(INADDR_ANY) ||
+ sin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+ return -EINVAL;
+ ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &v6addr);
+ binding_addr = &v6addr;
+ port = sin->sin_port;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (uaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)uaddr;
+ int addr_type;
+
+ if (addr_len < sizeof(struct sockaddr_in6))
+ return -EINVAL;
+ addr_type = ipv6_addr_type(&sin6->sin6_addr);
+ if (!(addr_type & IPV6_ADDR_UNICAST)) {
+ __be32 addr4;
+ if (!(addr_type & IPV6_ADDR_MAPPED))
+ return -EINVAL;
+
+ /* It is a mapped address. Need to do some sanity
+ * checks.
+ */
+ addr4 = sin6->sin6_addr.s6_addr32[3];
+ if (addr4 == htonl(INADDR_ANY) ||
+ addr4 == htonl(INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(addr4)))
+ return -EINVAL;
+ }
+ /* The scope ID must be specified for link local address. */
+ if (addr_type & IPV6_ADDR_LINKLOCAL) {
+ if (sin6->sin6_scope_id == 0)
+ return -EINVAL;
+ scope_id = sin6->sin6_scope_id;
+ }
+ binding_addr = &sin6->sin6_addr;
+ port = sin6->sin6_port;
+#endif
+ } else {
+ return -EINVAL;
+ }
lock_sock(sk);
- if (addr_len != sizeof(struct sockaddr_in) ||
- sin->sin_family != AF_INET ||
- rs->rs_bound_addr ||
- sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
+ /* RDS socket does not allow re-binding. */
+ if (!ipv6_addr_any(&rs->rs_bound_addr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* Socket is connected. The binding address should have the same
+ * scope ID as the connected address, except the case when one is
+ * non-link local address (scope_id is 0).
+ */
+ if (!ipv6_addr_any(&rs->rs_conn_addr) && scope_id &&
+ rs->rs_bound_scope_id &&
+ scope_id != rs->rs_bound_scope_id) {
ret = -EINVAL;
goto out;
}
- ret = rds_add_bound(rs, sin->sin_addr.s_addr, &sin->sin_port);
+ ret = rds_add_bound(rs, binding_addr, &port, scope_id);
if (ret)
goto out;
if (rs->rs_transport) { /* previously bound */
trans = rs->rs_transport;
if (trans->laddr_check(sock_net(sock->sk),
- sin->sin_addr.s_addr) != 0) {
+ binding_addr, scope_id) != 0) {
ret = -ENOPROTOOPT;
rds_remove_bound(rs);
} else {
@@ -172,13 +250,13 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
goto out;
}
- trans = rds_trans_get_preferred(sock_net(sock->sk),
- sin->sin_addr.s_addr);
+ trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr,
+ scope_id);
if (!trans) {
ret = -EADDRNOTAVAIL;
rds_remove_bound(rs);
- pr_info_ratelimited("RDS: %s could not find a transport for %pI4, load rds_tcp or rds_rdma?\n",
- __func__, &sin->sin_addr.s_addr);
+ pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
+ __func__, binding_addr);
goto out;
}
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 63da9d2f142d..ccdff09a79c8 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007 Oracle. All rights reserved.
+ * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -101,7 +101,7 @@ static DEFINE_RWLOCK(rds_cong_monitor_lock);
static DEFINE_SPINLOCK(rds_cong_lock);
static struct rb_root rds_cong_tree = RB_ROOT;
-static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
+static struct rds_cong_map *rds_cong_tree_walk(const struct in6_addr *addr,
struct rds_cong_map *insert)
{
struct rb_node **p = &rds_cong_tree.rb_node;
@@ -109,12 +109,15 @@ static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
struct rds_cong_map *map;
while (*p) {
+ int diff;
+
parent = *p;
map = rb_entry(parent, struct rds_cong_map, m_rb_node);
- if (addr < map->m_addr)
+ diff = rds_addr_cmp(addr, &map->m_addr);
+ if (diff < 0)
p = &(*p)->rb_left;
- else if (addr > map->m_addr)
+ else if (diff > 0)
p = &(*p)->rb_right;
else
return map;
@@ -132,7 +135,7 @@ static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
* these bitmaps in the process getting pointers to them. The bitmaps are only
* ever freed as the module is removed after all connections have been freed.
*/
-static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
+static struct rds_cong_map *rds_cong_from_addr(const struct in6_addr *addr)
{
struct rds_cong_map *map;
struct rds_cong_map *ret = NULL;
@@ -144,7 +147,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
if (!map)
return NULL;
- map->m_addr = addr;
+ map->m_addr = *addr;
init_waitqueue_head(&map->m_waitq);
INIT_LIST_HEAD(&map->m_conn_list);
@@ -171,7 +174,7 @@ out:
kfree(map);
}
- rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr));
+ rdsdebug("map %p for addr %pI6c\n", ret, addr);
return ret;
}
@@ -202,8 +205,8 @@ void rds_cong_remove_conn(struct rds_connection *conn)
int rds_cong_get_maps(struct rds_connection *conn)
{
- conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
- conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
+ conn->c_lcong = rds_cong_from_addr(&conn->c_laddr);
+ conn->c_fcong = rds_cong_from_addr(&conn->c_faddr);
if (!(conn->c_lcong && conn->c_fcong))
return -ENOMEM;
@@ -353,7 +356,7 @@ void rds_cong_remove_socket(struct rds_sock *rs)
/* update congestion map for now-closed port */
spin_lock_irqsave(&rds_cong_lock, flags);
- map = rds_cong_tree_walk(rs->rs_bound_addr, NULL);
+ map = rds_cong_tree_walk(&rs->rs_bound_addr, NULL);
spin_unlock_irqrestore(&rds_cong_lock, flags);
if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
diff --git a/net/rds/connection.c b/net/rds/connection.c
index cfb05953b0e5..3bd2f4a5a30d 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +34,9 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <net/inet_hashtables.h>
+#include <net/ipv6.h>
+#include <net/inet6_hashtables.h>
+#include <net/addrconf.h>
#include "rds.h"
#include "loop.h"
@@ -49,18 +51,25 @@ static unsigned long rds_conn_count;
static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
static struct kmem_cache *rds_conn_slab;
-static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
+static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr,
+ const struct in6_addr *faddr)
{
+ static u32 rds6_hash_secret __read_mostly;
static u32 rds_hash_secret __read_mostly;
- unsigned long hash;
+ u32 lhash, fhash, hash;
net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
+ net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret));
+
+ lhash = (__force u32)laddr->s6_addr32[3];
+#if IS_ENABLED(CONFIG_IPV6)
+ fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret);
+#else
+ fhash = (__force u32)faddr->s6_addr32[3];
+#endif
+ hash = __inet_ehashfn(lhash, 0, fhash, 0, rds_hash_secret);
- /* Pass NULL, don't need struct net for hash */
- hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
- be32_to_cpu(faddr), 0,
- rds_hash_secret);
return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
}
@@ -72,20 +81,25 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
/* rcu read lock must be held or the connection spinlock */
static struct rds_connection *rds_conn_lookup(struct net *net,
struct hlist_head *head,
- __be32 laddr, __be32 faddr,
- struct rds_transport *trans)
+ const struct in6_addr *laddr,
+ const struct in6_addr *faddr,
+ struct rds_transport *trans,
+ int dev_if)
{
struct rds_connection *conn, *ret = NULL;
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
- if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
- conn->c_trans == trans && net == rds_conn_net(conn)) {
+ if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
+ ipv6_addr_equal(&conn->c_laddr, laddr) &&
+ conn->c_trans == trans &&
+ net == rds_conn_net(conn) &&
+ conn->c_dev_if == dev_if) {
ret = conn;
break;
}
}
- rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
- &laddr, &faddr);
+ rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret,
+ laddr, faddr);
return ret;
}
@@ -99,8 +113,8 @@ static void rds_conn_path_reset(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
- rdsdebug("connection %pI4 to %pI4 reset\n",
- &conn->c_laddr, &conn->c_faddr);
+ rdsdebug("connection %pI6c to %pI6c reset\n",
+ &conn->c_laddr, &conn->c_faddr);
rds_stats_inc(s_conn_reset);
rds_send_path_reset(cp);
@@ -142,9 +156,12 @@ static void __rds_conn_path_init(struct rds_connection *conn,
* are torn down as the module is removed, if ever.
*/
static struct rds_connection *__rds_conn_create(struct net *net,
- __be32 laddr, __be32 faddr,
- struct rds_transport *trans, gfp_t gfp,
- int is_outgoing)
+ const struct in6_addr *laddr,
+ const struct in6_addr *faddr,
+ struct rds_transport *trans,
+ gfp_t gfp,
+ int is_outgoing,
+ int dev_if)
{
struct rds_connection *conn, *parent = NULL;
struct hlist_head *head = rds_conn_bucket(laddr, faddr);
@@ -154,9 +171,12 @@ static struct rds_connection *__rds_conn_create(struct net *net,
int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
rcu_read_lock();
- conn = rds_conn_lookup(net, head, laddr, faddr, trans);
- if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
- laddr == faddr && !is_outgoing) {
+ conn = rds_conn_lookup(net, head, laddr, faddr, trans, dev_if);
+ if (conn &&
+ conn->c_loopback &&
+ conn->c_trans != &rds_loop_transport &&
+ ipv6_addr_equal(laddr, faddr) &&
+ !is_outgoing) {
/* This is a looped back IB connection, and we're
* called by the code handling the incoming connect.
* We need a second connection object into which we
@@ -181,8 +201,22 @@ static struct rds_connection *__rds_conn_create(struct net *net,
}
INIT_HLIST_NODE(&conn->c_hash_node);
- conn->c_laddr = laddr;
- conn->c_faddr = faddr;
+ conn->c_laddr = *laddr;
+ conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
+ conn->c_faddr = *faddr;
+ conn->c_dev_if = dev_if;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* If the local address is link local, set c_bound_if to be the
+ * index used for this connection. Otherwise, set it to 0 as
+ * the socket is not bound to an interface. c_bound_if is used
+ * to look up a socket when a packet is received
+ */
+ if (ipv6_addr_type(laddr) & IPV6_ADDR_LINKLOCAL)
+ conn->c_bound_if = dev_if;
+ else
+#endif
+ conn->c_bound_if = 0;
rds_conn_net_set(conn, net);
@@ -199,7 +233,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
* can bind to the destination address then we'd rather the messages
* flow through loopback rather than either transport.
*/
- loop_trans = rds_trans_get_preferred(net, faddr);
+ loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if);
if (loop_trans) {
rds_trans_put(loop_trans);
conn->c_loopback = 1;
@@ -233,10 +267,10 @@ static struct rds_connection *__rds_conn_create(struct net *net,
goto out;
}
- rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
- conn, &laddr, &faddr,
- strnlen(trans->t_name, sizeof(trans->t_name)) ? trans->t_name :
- "[unknown]", is_outgoing ? "(outgoing)" : "");
+ rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n",
+ conn, laddr, faddr,
+ strnlen(trans->t_name, sizeof(trans->t_name)) ?
+ trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : "");
/*
* Since we ran without holding the conn lock, someone could
@@ -262,7 +296,8 @@ static struct rds_connection *__rds_conn_create(struct net *net,
/* Creating normal conn */
struct rds_connection *found;
- found = rds_conn_lookup(net, head, laddr, faddr, trans);
+ found = rds_conn_lookup(net, head, laddr, faddr, trans,
+ dev_if);
if (found) {
struct rds_conn_path *cp;
int i;
@@ -295,18 +330,22 @@ out:
}
struct rds_connection *rds_conn_create(struct net *net,
- __be32 laddr, __be32 faddr,
- struct rds_transport *trans, gfp_t gfp)
+ const struct in6_addr *laddr,
+ const struct in6_addr *faddr,
+ struct rds_transport *trans, gfp_t gfp,
+ int dev_if)
{
- return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, 0, dev_if);
}
EXPORT_SYMBOL_GPL(rds_conn_create);
struct rds_connection *rds_conn_create_outgoing(struct net *net,
- __be32 laddr, __be32 faddr,
- struct rds_transport *trans, gfp_t gfp)
+ const struct in6_addr *laddr,
+ const struct in6_addr *faddr,
+ struct rds_transport *trans,
+ gfp_t gfp, int dev_if)
{
- return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, 1, dev_if);
}
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
@@ -464,10 +503,23 @@ void rds_conn_destroy(struct rds_connection *conn)
}
EXPORT_SYMBOL_GPL(rds_conn_destroy);
-static void rds_conn_message_info(struct socket *sock, unsigned int len,
- struct rds_info_iterator *iter,
- struct rds_info_lengths *lens,
- int want_send)
+static void __rds_inc_msg_cp(struct rds_incoming *inc,
+ struct rds_info_iterator *iter,
+ void *saddr, void *daddr, int flip, bool isv6)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (isv6)
+ rds6_inc_info_copy(inc, iter, saddr, daddr, flip);
+ else
+#endif
+ rds_inc_info_copy(inc, iter, *(__be32 *)saddr,
+ *(__be32 *)daddr, flip);
+}
+
+static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens,
+ int want_send, bool isv6)
{
struct hlist_head *head;
struct list_head *list;
@@ -478,7 +530,10 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
size_t i;
int j;
- len /= sizeof(struct rds_info_message);
+ if (isv6)
+ len /= sizeof(struct rds6_info_message);
+ else
+ len /= sizeof(struct rds_info_message);
rcu_read_lock();
@@ -488,6 +543,9 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
struct rds_conn_path *cp;
int npaths;
+ if (!isv6 && conn->c_isv6)
+ continue;
+
npaths = (conn->c_trans->t_mp_capable ?
RDS_MPATH_WORKERS : 1);
@@ -504,11 +562,11 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
list_for_each_entry(rm, list, m_conn_item) {
total++;
if (total <= len)
- rds_inc_info_copy(&rm->m_inc,
- iter,
- conn->c_laddr,
- conn->c_faddr,
- 0);
+ __rds_inc_msg_cp(&rm->m_inc,
+ iter,
+ &conn->c_laddr,
+ &conn->c_faddr,
+ 0, isv6);
}
spin_unlock_irqrestore(&cp->cp_lock, flags);
@@ -518,9 +576,30 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
rcu_read_unlock();
lens->nr = total;
- lens->each = sizeof(struct rds_info_message);
+ if (isv6)
+ lens->each = sizeof(struct rds6_info_message);
+ else
+ lens->each = sizeof(struct rds_info_message);
}
+static void rds_conn_message_info(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens,
+ int want_send)
+{
+ rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rds6_conn_message_info(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens,
+ int want_send)
+{
+ rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true);
+}
+#endif
+
static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
@@ -528,6 +607,15 @@ static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
rds_conn_message_info(sock, len, iter, lens, 1);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void rds6_conn_message_info_send(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens)
+{
+ rds6_conn_message_info(sock, len, iter, lens, 1);
+}
+#endif
+
static void rds_conn_message_info_retrans(struct socket *sock,
unsigned int len,
struct rds_info_iterator *iter,
@@ -536,6 +624,16 @@ static void rds_conn_message_info_retrans(struct socket *sock,
rds_conn_message_info(sock, len, iter, lens, 0);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void rds6_conn_message_info_retrans(struct socket *sock,
+ unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens)
+{
+ rds6_conn_message_info(sock, len, iter, lens, 0);
+}
+#endif
+
void rds_for_each_conn_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
@@ -584,7 +682,6 @@ static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
struct hlist_head *head;
struct rds_connection *conn;
size_t i;
- int j;
rcu_read_lock();
@@ -595,17 +692,20 @@ static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
i++, head++) {
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
struct rds_conn_path *cp;
- int npaths;
- npaths = (conn->c_trans->t_mp_capable ?
- RDS_MPATH_WORKERS : 1);
- for (j = 0; j < npaths; j++) {
- cp = &conn->c_path[j];
+ /* XXX We only copy the information from the first
+ * path for now. The problem is that if there are
+ * more than one underlying paths, we cannot report
+ * information of all of them using the existing
+ * API. For example, there is only one next_tx_seq,
+ * which path's next_tx_seq should we report? It is
+ * a bug in the design of MPRDS.
+ */
+ cp = conn->c_path;
- /* XXX no cp_lock usage.. */
- if (!visitor(cp, buffer))
- continue;
- }
+ /* XXX no cp_lock usage.. */
+ if (!visitor(cp, buffer))
+ continue;
/* We copy as much as we can fit in the buffer,
* but we count all items so that the caller
@@ -624,12 +724,16 @@ static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
{
struct rds_info_connection *cinfo = buffer;
+ struct rds_connection *conn = cp->cp_conn;
+
+ if (conn->c_isv6)
+ return 0;
cinfo->next_tx_seq = cp->cp_next_tx_seq;
cinfo->next_rx_seq = cp->cp_next_rx_seq;
- cinfo->laddr = cp->cp_conn->c_laddr;
- cinfo->faddr = cp->cp_conn->c_faddr;
- strncpy(cinfo->transport, cp->cp_conn->c_trans->t_name,
+ cinfo->laddr = conn->c_laddr.s6_addr32[3];
+ cinfo->faddr = conn->c_faddr.s6_addr32[3];
+ strncpy(cinfo->transport, conn->c_trans->t_name,
sizeof(cinfo->transport));
cinfo->flags = 0;
@@ -645,6 +749,36 @@ static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
return 1;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static int rds6_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
+{
+ struct rds6_info_connection *cinfo6 = buffer;
+ struct rds_connection *conn = cp->cp_conn;
+
+ cinfo6->next_tx_seq = cp->cp_next_tx_seq;
+ cinfo6->next_rx_seq = cp->cp_next_rx_seq;
+ cinfo6->laddr = conn->c_laddr;
+ cinfo6->faddr = conn->c_faddr;
+ strncpy(cinfo6->transport, conn->c_trans->t_name,
+ sizeof(cinfo6->transport));
+ cinfo6->flags = 0;
+
+ rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
+ SENDING);
+ /* XXX Future: return the state rather than these funky bits */
+ rds_conn_info_set(cinfo6->flags,
+ atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
+ CONNECTING);
+ rds_conn_info_set(cinfo6->flags,
+ atomic_read(&cp->cp_state) == RDS_CONN_UP,
+ CONNECTED);
+ /* Just return 1 as there is no error case. This is a helper function
+ * for rds_walk_conn_path_info() and it wants a return value.
+ */
+ return 1;
+}
+#endif
+
static void rds_conn_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
@@ -657,6 +791,20 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
sizeof(struct rds_info_connection));
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void rds6_conn_info(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens)
+{
+ u64 buffer[(sizeof(struct rds6_info_connection) + 7) / 8];
+
+ rds_walk_conn_path_info(sock, len, iter, lens,
+ rds6_conn_info_visitor,
+ buffer,
+ sizeof(struct rds6_info_connection));
+}
+#endif
+
int rds_conn_init(void)
{
int ret;
@@ -678,7 +826,13 @@ int rds_conn_init(void)
rds_conn_message_info_send);
rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
rds_conn_message_info_retrans);
-
+#if IS_ENABLED(CONFIG_IPV6)
+ rds_info_register_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
+ rds_info_register_func(RDS6_INFO_SEND_MESSAGES,
+ rds6_conn_message_info_send);
+ rds_info_register_func(RDS6_INFO_RETRANS_MESSAGES,
+ rds6_conn_message_info_retrans);
+#endif
return 0;
}
@@ -696,6 +850,13 @@ void rds_conn_exit(void)
rds_conn_message_info_send);
rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
rds_conn_message_info_retrans);
+#if IS_ENABLED(CONFIG_IPV6)
+ rds_info_deregister_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
+ rds_info_deregister_func(RDS6_INFO_SEND_MESSAGES,
+ rds6_conn_message_info_send);
+ rds_info_deregister_func(RDS6_INFO_RETRANS_MESSAGES,
+ rds6_conn_message_info_retrans);
+#endif
}
/*
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 683b55d4e2b0..c1d97640c0be 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -39,6 +39,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <net/addrconf.h>
#include "rds_single_path.h"
#include "rds.h"
@@ -295,9 +296,11 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_ib_transport)
return 0;
+ if (conn->c_isv6)
+ return 0;
- iinfo->src_addr = conn->c_laddr;
- iinfo->dst_addr = conn->c_faddr;
+ iinfo->src_addr = conn->c_laddr.s6_addr32[3];
+ iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
@@ -318,6 +321,45 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
return 1;
}
+#if IS_ENABLED(CONFIG_IPV6)
+/* IPv6 version of rds_ib_conn_info_visitor(). */
+static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
+ void *buffer)
+{
+ struct rds6_info_rdma_connection *iinfo6 = buffer;
+ struct rds_ib_connection *ic;
+
+ /* We will only ever look at IB transports */
+ if (conn->c_trans != &rds_ib_transport)
+ return 0;
+
+ iinfo6->src_addr = conn->c_laddr;
+ iinfo6->dst_addr = conn->c_faddr;
+
+ memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
+ memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
+
+ if (rds_conn_state(conn) == RDS_CONN_UP) {
+ struct rds_ib_device *rds_ibdev;
+ struct rdma_dev_addr *dev_addr;
+
+ ic = conn->c_transport_data;
+ dev_addr = &ic->i_cm_id->route.addr.dev_addr;
+ rdma_addr_get_sgid(dev_addr,
+ (union ib_gid *)&iinfo6->src_gid);
+ rdma_addr_get_dgid(dev_addr,
+ (union ib_gid *)&iinfo6->dst_gid);
+
+ rds_ibdev = ic->rds_ibdev;
+ iinfo6->max_send_wr = ic->i_send_ring.w_nr;
+ iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
+ iinfo6->max_send_sge = rds_ibdev->max_sge;
+ rds6_ib_get_mr_info(rds_ibdev, iinfo6);
+ }
+ return 1;
+}
+#endif
+
static void rds_ib_ic_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
@@ -330,6 +372,20 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len,
sizeof(struct rds_info_rdma_connection));
}
+#if IS_ENABLED(CONFIG_IPV6)
+/* IPv6 version of rds_ib_ic_info(). */
+static void rds6_ib_ic_info(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens)
+{
+ u64 buffer[(sizeof(struct rds6_info_rdma_connection) + 7) / 8];
+
+ rds_for_each_conn_info(sock, len, iter, lens,
+ rds6_ib_conn_info_visitor,
+ buffer,
+ sizeof(struct rds6_info_rdma_connection));
+}
+#endif
/*
* Early RDS/IB was built to only bind to an address if there is an IPoIB
@@ -341,12 +397,19 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len,
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
-static int rds_ib_laddr_check(struct net *net, __be32 addr)
+static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr,
+ __u32 scope_id)
{
int ret;
struct rdma_cm_id *cm_id;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct sockaddr_in6 sin6;
+#endif
struct sockaddr_in sin;
+ struct sockaddr *sa;
+ bool isv4;
+ isv4 = ipv6_addr_v4mapped(addr);
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
@@ -355,22 +418,66 @@ static int rds_ib_laddr_check(struct net *net, __be32 addr)
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
- memset(&sin, 0, sizeof(sin));
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = addr;
+ if (isv4) {
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = addr->s6_addr32[3];
+ sa = (struct sockaddr *)&sin;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = *addr;
+ sin6.sin6_scope_id = scope_id;
+ sa = (struct sockaddr *)&sin6;
+
+ /* XXX Do a special IPv6 link local address check here. The
+ * reason is that rdma_bind_addr() always succeeds with IPv6
+ * link local address regardless it is indeed configured in a
+ * system.
+ */
+ if (ipv6_addr_type(addr) & IPV6_ADDR_LINKLOCAL) {
+ struct net_device *dev;
+
+ if (scope_id == 0) {
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ /* Use init_net for now as RDS is not network
+ * name space aware.
+ */
+ dev = dev_get_by_index(&init_net, scope_id);
+ if (!dev) {
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+ if (!ipv6_chk_addr(&init_net, addr, dev, 1)) {
+ dev_put(dev);
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+ dev_put(dev);
+ }
+#else
+ ret = -EADDRNOTAVAIL;
+ goto out;
+#endif
+ }
/* rdma_bind_addr will only succeed for IB & iWARP devices */
- ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+ ret = rdma_bind_addr(cm_id, sa);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
if (ret || !cm_id->device ||
cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
- rdsdebug("addr %pI4 ret %d node type %d\n",
- &addr, ret,
- cm_id->device ? cm_id->device->node_type : -1);
+ rdsdebug("addr %pI6c%%%u ret %d node type %d\n",
+ addr, scope_id, ret,
+ cm_id->device ? cm_id->device->node_type : -1);
+out:
rdma_destroy_id(cm_id);
return ret;
@@ -401,6 +508,9 @@ void rds_ib_exit(void)
rds_ib_set_unloading();
synchronize_rcu();
rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
+#if IS_ENABLED(CONFIG_IPV6)
+ rds_info_deregister_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info);
+#endif
rds_ib_unregister_client();
rds_ib_destroy_nodev_conns();
rds_ib_sysctl_exit();
@@ -462,6 +572,9 @@ int rds_ib_init(void)
rds_trans_register(&rds_ib_transport);
rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
+#if IS_ENABLED(CONFIG_IPV6)
+ rds_info_register_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info);
+#endif
goto out;
@@ -476,4 +589,3 @@ out:
}
MODULE_LICENSE("GPL");
-
diff --git a/net/rds/ib.h b/net/rds/ib.h
index a6f4d7d68e95..73427ff439f9 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -57,16 +57,44 @@ struct rds_ib_refill_cache {
struct list_head *ready;
};
+/* This is the common structure for the IB private data exchange in setting up
+ * an RDS connection. The exchange is different for IPv4 and IPv6 connections.
+ * The reason is that the address size is different and the addresses
+ * exchanged are in the beginning of the structure. Hence it is not possible
+ * for interoperability if same structure is used.
+ */
+struct rds_ib_conn_priv_cmn {
+ u8 ricpc_protocol_major;
+ u8 ricpc_protocol_minor;
+ __be16 ricpc_protocol_minor_mask; /* bitmask */
+ __be32 ricpc_reserved1;
+ __be64 ricpc_ack_seq;
+ __be32 ricpc_credit; /* non-zero enables flow ctl */
+};
+
struct rds_ib_connect_private {
/* Add new fields at the end, and don't permute existing fields. */
- __be32 dp_saddr;
- __be32 dp_daddr;
- u8 dp_protocol_major;
- u8 dp_protocol_minor;
- __be16 dp_protocol_minor_mask; /* bitmask */
- __be32 dp_reserved1;
- __be64 dp_ack_seq;
- __be32 dp_credit; /* non-zero enables flow ctl */
+ __be32 dp_saddr;
+ __be32 dp_daddr;
+ struct rds_ib_conn_priv_cmn dp_cmn;
+};
+
+struct rds6_ib_connect_private {
+ /* Add new fields at the end, and don't permute existing fields. */
+ struct in6_addr dp_saddr;
+ struct in6_addr dp_daddr;
+ struct rds_ib_conn_priv_cmn dp_cmn;
+};
+
+#define dp_protocol_major dp_cmn.ricpc_protocol_major
+#define dp_protocol_minor dp_cmn.ricpc_protocol_minor
+#define dp_protocol_minor_mask dp_cmn.ricpc_protocol_minor_mask
+#define dp_ack_seq dp_cmn.ricpc_ack_seq
+#define dp_credit dp_cmn.ricpc_credit
+
+union rds_ib_conn_priv {
+ struct rds_ib_connect_private ricp_v4;
+ struct rds6_ib_connect_private ricp_v6;
};
struct rds_ib_send_work {
@@ -351,8 +379,8 @@ void rds_ib_listen_stop(void);
__printf(2, 3)
void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
- struct rdma_cm_event *event);
-int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
+ struct rdma_cm_event *event, bool isv6);
+int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6);
void rds_ib_cm_connect_complete(struct rds_connection *conn,
struct rdma_cm_event *event);
@@ -361,7 +389,8 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn,
__rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
/* ib_rdma.c */
-int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
+int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
+ struct in6_addr *ipaddr);
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
void rds_ib_destroy_nodev_conns(void);
@@ -371,7 +400,7 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
int rds_ib_recv_init(void);
void rds_ib_recv_exit(void);
int rds_ib_recv_path(struct rds_conn_path *conn);
-int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
+int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
void rds_ib_inc_free(struct rds_incoming *inc);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index f1684ae6abfd..bfbb31f0c7fd 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/ratelimit.h>
+#include <net/addrconf.h>
#include "rds_single_path.h"
#include "rds.h"
@@ -95,25 +96,45 @@ rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
*/
void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
{
- const struct rds_ib_connect_private *dp = NULL;
struct rds_ib_connection *ic = conn->c_transport_data;
+ const union rds_ib_conn_priv *dp = NULL;
struct ib_qp_attr qp_attr;
+ __be64 ack_seq = 0;
+ __be32 credit = 0;
+ u8 major = 0;
+ u8 minor = 0;
int err;
- if (event->param.conn.private_data_len >= sizeof(*dp)) {
- dp = event->param.conn.private_data;
-
- /* make sure it isn't empty data */
- if (dp->dp_protocol_major) {
- rds_ib_set_protocol(conn,
- RDS_PROTOCOL(dp->dp_protocol_major,
- dp->dp_protocol_minor));
- rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
+ dp = event->param.conn.private_data;
+ if (conn->c_isv6) {
+ if (event->param.conn.private_data_len >=
+ sizeof(struct rds6_ib_connect_private)) {
+ major = dp->ricp_v6.dp_protocol_major;
+ minor = dp->ricp_v6.dp_protocol_minor;
+ credit = dp->ricp_v6.dp_credit;
+ /* dp structure start is not guaranteed to be 8 bytes
+ * aligned. Since dp_ack_seq is 64-bit extended load
+ * operations can be used so go through get_unaligned
+ * to avoid unaligned errors.
+ */
+ ack_seq = get_unaligned(&dp->ricp_v6.dp_ack_seq);
}
+ } else if (event->param.conn.private_data_len >=
+ sizeof(struct rds_ib_connect_private)) {
+ major = dp->ricp_v4.dp_protocol_major;
+ minor = dp->ricp_v4.dp_protocol_minor;
+ credit = dp->ricp_v4.dp_credit;
+ ack_seq = get_unaligned(&dp->ricp_v4.dp_ack_seq);
+ }
+
+ /* make sure it isn't empty data */
+ if (major) {
+ rds_ib_set_protocol(conn, RDS_PROTOCOL(major, minor));
+ rds_ib_set_flow_control(conn, be32_to_cpu(credit));
}
if (conn->c_version < RDS_PROTOCOL(3, 1)) {
- pr_notice("RDS/IB: Connection <%pI4,%pI4> version %u.%u no longer supported\n",
+ pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n",
&conn->c_laddr, &conn->c_faddr,
RDS_PROTOCOL_MAJOR(conn->c_version),
RDS_PROTOCOL_MINOR(conn->c_version));
@@ -121,7 +142,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
rds_conn_destroy(conn);
return;
} else {
- pr_notice("RDS/IB: %s conn connected <%pI4,%pI4> version %u.%u%s\n",
+ pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c> version %u.%u%s\n",
ic->i_active_side ? "Active" : "Passive",
&conn->c_laddr, &conn->c_faddr,
RDS_PROTOCOL_MAJOR(conn->c_version),
@@ -150,7 +171,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
/* update ib_device with this local ipaddr */
- err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
+ err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr);
if (err)
printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
err);
@@ -158,14 +179,8 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
/* If the peer gave us the last packet it saw, process this as if
* we had received a regular ACK. */
if (dp) {
- /* dp structure start is not guaranteed to be 8 bytes aligned.
- * Since dp_ack_seq is 64-bit extended load operations can be
- * used so go through get_unaligned to avoid unaligned errors.
- */
- __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
-
- if (dp_ack_seq)
- rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
+ if (ack_seq)
+ rds_send_drop_acked(conn, be64_to_cpu(ack_seq),
NULL);
}
@@ -173,11 +188,12 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
}
static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
- struct rdma_conn_param *conn_param,
- struct rds_ib_connect_private *dp,
- u32 protocol_version,
- u32 max_responder_resources,
- u32 max_initiator_depth)
+ struct rdma_conn_param *conn_param,
+ union rds_ib_conn_priv *dp,
+ u32 protocol_version,
+ u32 max_responder_resources,
+ u32 max_initiator_depth,
+ bool isv6)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
@@ -193,24 +209,49 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
if (dp) {
memset(dp, 0, sizeof(*dp));
- dp->dp_saddr = conn->c_laddr;
- dp->dp_daddr = conn->c_faddr;
- dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
- dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
- dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
- dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
+ if (isv6) {
+ dp->ricp_v6.dp_saddr = conn->c_laddr;
+ dp->ricp_v6.dp_daddr = conn->c_faddr;
+ dp->ricp_v6.dp_protocol_major =
+ RDS_PROTOCOL_MAJOR(protocol_version);
+ dp->ricp_v6.dp_protocol_minor =
+ RDS_PROTOCOL_MINOR(protocol_version);
+ dp->ricp_v6.dp_protocol_minor_mask =
+ cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
+ dp->ricp_v6.dp_ack_seq =
+ cpu_to_be64(rds_ib_piggyb_ack(ic));
+
+ conn_param->private_data = &dp->ricp_v6;
+ conn_param->private_data_len = sizeof(dp->ricp_v6);
+ } else {
+ dp->ricp_v4.dp_saddr = conn->c_laddr.s6_addr32[3];
+ dp->ricp_v4.dp_daddr = conn->c_faddr.s6_addr32[3];
+ dp->ricp_v4.dp_protocol_major =
+ RDS_PROTOCOL_MAJOR(protocol_version);
+ dp->ricp_v4.dp_protocol_minor =
+ RDS_PROTOCOL_MINOR(protocol_version);
+ dp->ricp_v4.dp_protocol_minor_mask =
+ cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
+ dp->ricp_v4.dp_ack_seq =
+ cpu_to_be64(rds_ib_piggyb_ack(ic));
+
+ conn_param->private_data = &dp->ricp_v4;
+ conn_param->private_data_len = sizeof(dp->ricp_v4);
+ }
/* Advertise flow control */
if (ic->i_flowctl) {
unsigned int credits;
- credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
- dp->dp_credit = cpu_to_be32(credits);
- atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
+ credits = IB_GET_POST_CREDITS
+ (atomic_read(&ic->i_credits));
+ if (isv6)
+ dp->ricp_v6.dp_credit = cpu_to_be32(credits);
+ else
+ dp->ricp_v4.dp_credit = cpu_to_be32(credits);
+ atomic_sub(IB_SET_POST_CREDITS(credits),
+ &ic->i_credits);
}
-
- conn_param->private_data = dp;
- conn_param->private_data_len = sizeof(*dp);
}
}
@@ -349,7 +390,7 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
break;
default:
rdsdebug("Fatal QP Event %u (%s) "
- "- connection %pI4->%pI4, reconnecting\n",
+ "- connection %pI6c->%pI6c, reconnecting\n",
event->event, ib_event_msg(event->event),
&conn->c_laddr, &conn->c_faddr);
rds_conn_drop(conn);
@@ -580,11 +621,13 @@ out:
return ret;
}
-static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
+static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6)
{
- const struct rds_ib_connect_private *dp = event->param.conn.private_data;
- u16 common;
+ const union rds_ib_conn_priv *dp = event->param.conn.private_data;
+ u8 data_len, major, minor;
u32 version = 0;
+ __be16 mask;
+ u16 common;
/*
* rdma_cm private data is odd - when there is any private data in the
@@ -603,51 +646,140 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
return 0;
}
+ if (isv6) {
+ data_len = sizeof(struct rds6_ib_connect_private);
+ major = dp->ricp_v6.dp_protocol_major;
+ minor = dp->ricp_v6.dp_protocol_minor;
+ mask = dp->ricp_v6.dp_protocol_minor_mask;
+ } else {
+ data_len = sizeof(struct rds_ib_connect_private);
+ major = dp->ricp_v4.dp_protocol_major;
+ minor = dp->ricp_v4.dp_protocol_minor;
+ mask = dp->ricp_v4.dp_protocol_minor_mask;
+ }
+
/* Even if len is crap *now* I still want to check it. -ASG */
- if (event->param.conn.private_data_len < sizeof (*dp) ||
- dp->dp_protocol_major == 0)
+ if (event->param.conn.private_data_len < data_len || major == 0)
return RDS_PROTOCOL_3_0;
- common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
- if (dp->dp_protocol_major == 3 && common) {
+ common = be16_to_cpu(mask) & RDS_IB_SUPPORTED_PROTOCOLS;
+ if (major == 3 && common) {
version = RDS_PROTOCOL_3_0;
while ((common >>= 1) != 0)
version++;
- } else
- printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
- &dp->dp_saddr,
- dp->dp_protocol_major,
- dp->dp_protocol_minor);
+ } else {
+ if (isv6)
+ printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI6c using incompatible protocol version %u.%u\n",
+ &dp->ricp_v6.dp_saddr, major, minor);
+ else
+ printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
+ &dp->ricp_v4.dp_saddr, major, minor);
+ }
return version;
}
+#if IS_ENABLED(CONFIG_IPV6)
+/* Given an IPv6 address, find the net_device which hosts that address and
+ * return its index. This is used by the rds_ib_cm_handle_connect() code to
+ * find the interface index of where an incoming request comes from when
+ * the request is using a link local address.
+ *
+ * Note one problem in this search. It is possible that two interfaces have
+ * the same link local address. Unfortunately, this cannot be solved unless
+ * the underlying layer gives us the interface which an incoming RDMA connect
+ * request comes from.
+ */
+static u32 __rds_find_ifindex(struct net *net, const struct in6_addr *addr)
+{
+ struct net_device *dev;
+ int idx = 0;
+
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
+ if (ipv6_chk_addr(net, addr, dev, 1)) {
+ idx = dev->ifindex;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return idx;
+}
+#endif
+
int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
- struct rdma_cm_event *event)
+ struct rdma_cm_event *event, bool isv6)
{
__be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
__be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
- const struct rds_ib_connect_private *dp = event->param.conn.private_data;
- struct rds_ib_connect_private dp_rep;
+ const struct rds_ib_conn_priv_cmn *dp_cmn;
struct rds_connection *conn = NULL;
struct rds_ib_connection *ic = NULL;
struct rdma_conn_param conn_param;
+ const union rds_ib_conn_priv *dp;
+ union rds_ib_conn_priv dp_rep;
+ struct in6_addr s_mapped_addr;
+ struct in6_addr d_mapped_addr;
+ const struct in6_addr *saddr6;
+ const struct in6_addr *daddr6;
+ int destroy = 1;
+ u32 ifindex = 0;
u32 version;
- int err = 1, destroy = 1;
+ int err = 1;
/* Check whether the remote protocol version matches ours. */
- version = rds_ib_protocol_compatible(event);
+ version = rds_ib_protocol_compatible(event, isv6);
if (!version)
goto out;
- rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
- "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr,
+ dp = event->param.conn.private_data;
+ if (isv6) {
+#if IS_ENABLED(CONFIG_IPV6)
+ dp_cmn = &dp->ricp_v6.dp_cmn;
+ saddr6 = &dp->ricp_v6.dp_saddr;
+ daddr6 = &dp->ricp_v6.dp_daddr;
+ /* If either address is link local, need to find the
+ * interface index in order to create a proper RDS
+ * connection.
+ */
+ if (ipv6_addr_type(daddr6) & IPV6_ADDR_LINKLOCAL) {
+ /* Using init_net for now .. */
+ ifindex = __rds_find_ifindex(&init_net, daddr6);
+ /* No index found... Need to bail out. */
+ if (ifindex == 0) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ } else if (ipv6_addr_type(saddr6) & IPV6_ADDR_LINKLOCAL) {
+ /* Use our address to find the correct index. */
+ ifindex = __rds_find_ifindex(&init_net, daddr6);
+ /* No index found... Need to bail out. */
+ if (ifindex == 0) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+#else
+ err = -EOPNOTSUPP;
+ goto out;
+#endif
+ } else {
+ dp_cmn = &dp->ricp_v4.dp_cmn;
+ ipv6_addr_set_v4mapped(dp->ricp_v4.dp_saddr, &s_mapped_addr);
+ ipv6_addr_set_v4mapped(dp->ricp_v4.dp_daddr, &d_mapped_addr);
+ saddr6 = &s_mapped_addr;
+ daddr6 = &d_mapped_addr;
+ }
+
+ rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid "
+ "0x%llx\n", saddr6, daddr6,
RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
(unsigned long long)be64_to_cpu(lguid),
(unsigned long long)be64_to_cpu(fguid));
/* RDS/IB is not currently netns aware, thus init_net */
- conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
- &rds_ib_transport, GFP_KERNEL);
+ conn = rds_conn_create(&init_net, daddr6, saddr6,
+ &rds_ib_transport, GFP_KERNEL, ifindex);
if (IS_ERR(conn)) {
rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
conn = NULL;
@@ -678,12 +810,13 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
ic = conn->c_transport_data;
rds_ib_set_protocol(conn, version);
- rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
+ rds_ib_set_flow_control(conn, be32_to_cpu(dp_cmn->ricpc_credit));
/* If the peer gave us the last packet it saw, process this as if
* we had received a regular ACK. */
- if (dp->dp_ack_seq)
- rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
+ if (dp_cmn->ricpc_ack_seq)
+ rds_send_drop_acked(conn, be64_to_cpu(dp_cmn->ricpc_ack_seq),
+ NULL);
BUG_ON(cm_id->context);
BUG_ON(ic->i_cm_id);
@@ -702,8 +835,8 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
}
rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
- event->param.conn.responder_resources,
- event->param.conn.initiator_depth);
+ event->param.conn.responder_resources,
+ event->param.conn.initiator_depth, isv6);
/* rdma_accept() calls rdma_reject() internally if it fails */
if (rdma_accept(cm_id, &conn_param))
@@ -718,12 +851,12 @@ out:
}
-int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
+int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
{
struct rds_connection *conn = cm_id->context;
struct rds_ib_connection *ic = conn->c_transport_data;
struct rdma_conn_param conn_param;
- struct rds_ib_connect_private dp;
+ union rds_ib_conn_priv dp;
int ret;
/* If the peer doesn't do protocol negotiation, we must
@@ -738,7 +871,7 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
}
rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
- UINT_MAX, UINT_MAX);
+ UINT_MAX, UINT_MAX, isv6);
ret = rdma_connect(cm_id, &conn_param);
if (ret)
rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
@@ -758,13 +891,22 @@ out:
int rds_ib_conn_path_connect(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
- struct rds_ib_connection *ic = conn->c_transport_data;
- struct sockaddr_in src, dest;
+ struct sockaddr_storage src, dest;
+ rdma_cm_event_handler handler;
+ struct rds_ib_connection *ic;
int ret;
+ ic = conn->c_transport_data;
+
/* XXX I wonder what affect the port space has */
/* delegate cm event handler to rdma_transport */
- ic->i_cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, conn,
+#if IS_ENABLED(CONFIG_IPV6)
+ if (conn->c_isv6)
+ handler = rds6_rdma_cm_event_handler;
+ else
+#endif
+ handler = rds_rdma_cm_event_handler;
+ ic->i_cm_id = rdma_create_id(&init_net, handler, conn,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ic->i_cm_id)) {
ret = PTR_ERR(ic->i_cm_id);
@@ -775,13 +917,33 @@ int rds_ib_conn_path_connect(struct rds_conn_path *cp)
rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
- src.sin_family = AF_INET;
- src.sin_addr.s_addr = (__force u32)conn->c_laddr;
- src.sin_port = (__force u16)htons(0);
+ if (ipv6_addr_v4mapped(&conn->c_faddr)) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&src;
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = conn->c_laddr.s6_addr32[3];
+ sin->sin_port = 0;
- dest.sin_family = AF_INET;
- dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
- dest.sin_port = (__force u16)htons(RDS_PORT);
+ sin = (struct sockaddr_in *)&dest;
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = conn->c_faddr.s6_addr32[3];
+ sin->sin_port = htons(RDS_PORT);
+ } else {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&src;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_addr = conn->c_laddr;
+ sin6->sin6_port = 0;
+ sin6->sin6_scope_id = conn->c_dev_if;
+
+ sin6 = (struct sockaddr_in6 *)&dest;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_addr = conn->c_faddr;
+ sin6->sin6_port = htons(RDS_CM_PORT);
+ sin6->sin6_scope_id = conn->c_dev_if;
+ }
ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
(struct sockaddr *)&dest,
@@ -949,7 +1111,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
if (!ic)
return -ENOMEM;
- ret = rds_ib_recv_alloc_caches(ic);
+ ret = rds_ib_recv_alloc_caches(ic, gfp);
if (ret) {
kfree(ic);
return ret;
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index b371cf08b1fc..6431a023ac89 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
pool->fmr_attr.max_pages);
if (IS_ERR(frmr->mr)) {
pr_warn("RDS/IB: %s failed to allocate MR", __func__);
+ err = PTR_ERR(frmr->mr);
goto out_no_cigar;
}
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 655f01d427fe..5da12c248431 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -113,6 +113,8 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
int npages);
void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
struct rds_info_rdma_connection *iinfo);
+void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
+ struct rds6_info_rdma_connection *iinfo6);
void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
struct rds_sock *rs, u32 *key_ret,
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 2e49a40a5e11..63c8d107adcf 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -100,18 +100,19 @@ static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
kfree_rcu(to_free, rcu);
}
-int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
+int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
+ struct in6_addr *ipaddr)
{
struct rds_ib_device *rds_ibdev_old;
- rds_ibdev_old = rds_ib_get_device(ipaddr);
+ rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
if (!rds_ibdev_old)
- return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
+ return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
if (rds_ibdev_old != rds_ibdev) {
- rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
+ rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
rds_ib_dev_put(rds_ibdev_old);
- return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
+ return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
}
rds_ib_dev_put(rds_ibdev_old);
@@ -179,6 +180,17 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co
iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
}
+#if IS_ENABLED(CONFIG_IPV6)
+void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
+ struct rds6_info_rdma_connection *iinfo6)
+{
+ struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
+
+ iinfo6->rdma_mr_max = pool_1m->max_items;
+ iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages;
+}
+#endif
+
struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
@@ -545,7 +557,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
struct rds_ib_connection *ic = NULL;
int ret;
- rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
+ rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
if (!rds_ibdev) {
ret = -ENODEV;
goto out;
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 4c5a937304b2..2f16146e4ec9 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -98,12 +98,12 @@ static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
}
}
-static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
+static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp)
{
struct rds_ib_cache_head *head;
int cpu;
- cache->percpu = alloc_percpu(struct rds_ib_cache_head);
+ cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp);
if (!cache->percpu)
return -ENOMEM;
@@ -118,13 +118,13 @@ static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
return 0;
}
-int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
+int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
{
int ret;
- ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
+ ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
if (!ret) {
- ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
+ ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
if (ret)
free_percpu(ic->i_cache_incs.percpu);
}
@@ -266,7 +266,7 @@ static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *i
rds_ib_stats_inc(s_ib_rx_total_incs);
}
INIT_LIST_HEAD(&ibinc->ii_frags);
- rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
+ rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr);
return ibinc;
}
@@ -376,8 +376,6 @@ static void release_refill(struct rds_connection *conn)
* This tries to allocate and post unused work requests after making sure that
* they have all the allocations they need to queue received fragments into
* sockets.
- *
- * -1 is returned if posting fails due to temporary resource exhaustion.
*/
void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
{
@@ -419,7 +417,7 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
if (ret) {
rds_ib_conn_error(conn, "recv post on "
- "%pI4 returned %d, disconnecting and "
+ "%pI6c returned %d, disconnecting and "
"reconnecting\n", &conn->c_faddr,
ret);
break;
@@ -848,7 +846,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
if (data_len < sizeof(struct rds_header)) {
rds_ib_conn_error(conn, "incoming message "
- "from %pI4 didn't include a "
+ "from %pI6c didn't include a "
"header, disconnecting and "
"reconnecting\n",
&conn->c_faddr);
@@ -861,7 +859,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
/* Validate the checksum. */
if (!rds_message_verify_checksum(ihdr)) {
rds_ib_conn_error(conn, "incoming message "
- "from %pI4 has corrupted header - "
+ "from %pI6c has corrupted header - "
"forcing a reconnect\n",
&conn->c_faddr);
rds_stats_inc(s_recv_drop_bad_checksum);
@@ -941,10 +939,10 @@ static void rds_ib_process_recv(struct rds_connection *conn,
ic->i_recv_data_rem = 0;
ic->i_ibinc = NULL;
- if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
+ if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) {
rds_ib_cong_recv(conn, ibinc);
- else {
- rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
+ } else {
+ rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr,
&ibinc->ii_inc, GFP_ATOMIC);
state->ack_next = be64_to_cpu(hdr->h_sequence);
state->ack_next_valid = 1;
@@ -988,7 +986,7 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
} else {
/* We expect errors as the qp is drained during shutdown */
if (rds_conn_up(conn) || rds_conn_connecting(conn))
- rds_ib_conn_error(conn, "recv completion on <%pI4,%pI4> had status %u (%s), disconnecting and reconnecting\n",
+ rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n",
&conn->c_laddr, &conn->c_faddr,
wc->status,
ib_wc_status_msg(wc->status));
@@ -1023,7 +1021,6 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
{
struct rds_connection *conn = cp->cp_conn;
struct rds_ib_connection *ic = conn->c_transport_data;
- int ret = 0;
rdsdebug("conn %p\n", conn);
if (rds_conn_up(conn)) {
@@ -1032,7 +1029,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
rds_ib_stats_inc(s_ib_rx_refill_from_thread);
}
- return ret;
+ return 0;
}
int rds_ib_recv_init(void)
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 8ac80c1b051e..2dcb555e6350 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -305,7 +305,7 @@ void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
/* We expect errors as the qp is drained during shutdown */
if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
- rds_ib_conn_error(conn, "send completion on <%pI4,%pI4> had status %u (%s), disconnecting and reconnecting\n",
+ rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n",
&conn->c_laddr, &conn->c_faddr, wc->status,
ib_wc_status_msg(wc->status));
}
@@ -730,7 +730,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
first, &first->s_wr, ret, failed_wr);
BUG_ON(failed_wr != &first->s_wr);
if (ret) {
- printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
+ printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
@@ -759,14 +759,11 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_send_work *send = NULL;
const struct ib_send_wr *failed_wr;
- struct rds_ib_device *rds_ibdev;
u32 pos;
u32 work_alloc;
int ret;
int nr_sig = 0;
- rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
-
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
if (work_alloc != 1) {
rds_ib_stats_inc(s_ib_tx_ring_full);
@@ -827,7 +824,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
send, &send->s_atomic_wr, ret, failed_wr);
BUG_ON(failed_wr != &send->s_atomic_wr.wr);
if (ret) {
- printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
+ printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
@@ -967,7 +964,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
first, &first->s_rdma_wr.wr, ret, failed_wr);
BUG_ON(failed_wr != &first->s_rdma_wr.wr);
if (ret) {
- printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
+ printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
diff --git a/net/rds/loop.c b/net/rds/loop.c
index feea1f96ee2a..1d73ad79c847 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,6 +35,7 @@
#include <linux/in.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <linux/ipv6.h>
#include "rds_single_path.h"
#include "rds.h"
@@ -88,11 +89,11 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
BUG_ON(hdr_off || sg || off);
- rds_inc_init(&rm->m_inc, conn, conn->c_laddr);
+ rds_inc_init(&rm->m_inc, conn, &conn->c_laddr);
/* For the embedded inc. Matching put is in loop_inc_free() */
rds_message_addref(rm);
- rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
+ rds_recv_incoming(conn, &conn->c_laddr, &conn->c_faddr, &rm->m_inc,
GFP_KERNEL);
rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence),
diff --git a/net/rds/message.c b/net/rds/message.c
index a35f76971984..4b00b1152a5f 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -514,4 +514,3 @@ void rds_message_unmapped(struct rds_message *rm)
wake_up_interruptible(&rm->m_flush_wait);
}
EXPORT_SYMBOL_GPL(rds_message_unmapped);
-
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 80920e47f2c7..98237feb607a 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007 Oracle. All rights reserved.
+ * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -184,7 +184,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
long i;
int ret;
- if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
+ if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
@@ -576,7 +576,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
args = CMSG_DATA(cmsg);
- if (rs->rs_bound_addr == 0) {
+ if (ipv6_addr_any(&rs->rs_bound_addr)) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out_ret;
}
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index fc59821f0a27..6b0f57c83a2a 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009 Oracle. All rights reserved.
+ * Copyright (c) 2009, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,10 +37,15 @@
#include "rdma_transport.h"
#include "ib.h"
+/* Global IPv4 and IPv6 RDS RDMA listener cm_id */
static struct rdma_cm_id *rds_rdma_listen_id;
+#if IS_ENABLED(CONFIG_IPV6)
+static struct rdma_cm_id *rds6_rdma_listen_id;
+#endif
-int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
- struct rdma_cm_event *event)
+static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event,
+ bool isv6)
{
/* this can be null in the listening path */
struct rds_connection *conn = cm_id->context;
@@ -72,7 +77,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
- ret = trans->cm_handle_connect(cm_id, event);
+ ret = trans->cm_handle_connect(cm_id, event, isv6);
break;
case RDMA_CM_EVENT_ADDR_RESOLVED:
@@ -90,7 +95,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
ibic = conn->c_transport_data;
if (ibic && ibic->i_cm_id == cm_id)
- ret = trans->cm_initiate_connect(cm_id);
+ ret = trans->cm_initiate_connect(cm_id, isv6);
else
rds_conn_drop(conn);
}
@@ -116,14 +121,14 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_DISCONNECTED:
rdsdebug("DISCONNECT event - dropping connection "
- "%pI4->%pI4\n", &conn->c_laddr,
+ "%pI6c->%pI6c\n", &conn->c_laddr,
&conn->c_faddr);
rds_conn_drop(conn);
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
if (conn) {
- pr_info("RDS: RDMA_CM_EVENT_TIMEWAIT_EXIT event: dropping connection %pI4->%pI4\n",
+ pr_info("RDS: RDMA_CM_EVENT_TIMEWAIT_EXIT event: dropping connection %pI6c->%pI6c\n",
&conn->c_laddr, &conn->c_faddr);
rds_conn_drop(conn);
}
@@ -146,13 +151,28 @@ out:
return ret;
}
-static int rds_rdma_listen_init(void)
+int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ return rds_rdma_cm_event_handler_cmn(cm_id, event, false);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ return rds_rdma_cm_event_handler_cmn(cm_id, event, true);
+}
+#endif
+
+static int rds_rdma_listen_init_common(rdma_cm_event_handler handler,
+ struct sockaddr *sa,
+ struct rdma_cm_id **ret_cm_id)
{
- struct sockaddr_in sin;
struct rdma_cm_id *cm_id;
int ret;
- cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, NULL,
+ cm_id = rdma_create_id(&init_net, handler, NULL,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) {
ret = PTR_ERR(cm_id);
@@ -161,15 +181,11 @@ static int rds_rdma_listen_init(void)
return ret;
}
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
- sin.sin_port = (__force u16)htons(RDS_PORT);
-
/*
* XXX I bet this binds the cm_id to a device. If we want to support
* fail-over we'll have to take this into consideration.
*/
- ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+ ret = rdma_bind_addr(cm_id, sa);
if (ret) {
printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
"rdma_bind_addr() returned %d\n", ret);
@@ -185,7 +201,7 @@ static int rds_rdma_listen_init(void)
rdsdebug("cm %p listening on port %u\n", cm_id, RDS_PORT);
- rds_rdma_listen_id = cm_id;
+ *ret_cm_id = cm_id;
cm_id = NULL;
out:
if (cm_id)
@@ -193,6 +209,45 @@ out:
return ret;
}
+/* Initialize the RDS RDMA listeners. We create two listeners for
+ * compatibility reason. The one on RDS_PORT is used for IPv4
+ * requests only. The one on RDS_CM_PORT is used for IPv6 requests
+ * only. So only IPv6 enabled RDS module will communicate using this
+ * port.
+ */
+static int rds_rdma_listen_init(void)
+{
+ int ret;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct sockaddr_in6 sin6;
+#endif
+ struct sockaddr_in sin;
+
+ sin.sin_family = PF_INET;
+ sin.sin_addr.s_addr = htonl(INADDR_ANY);
+ sin.sin_port = htons(RDS_PORT);
+ ret = rds_rdma_listen_init_common(rds_rdma_cm_event_handler,
+ (struct sockaddr *)&sin,
+ &rds_rdma_listen_id);
+ if (ret != 0)
+ return ret;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ sin6.sin6_family = PF_INET6;
+ sin6.sin6_addr = in6addr_any;
+ sin6.sin6_port = htons(RDS_CM_PORT);
+ sin6.sin6_scope_id = 0;
+ sin6.sin6_flowinfo = 0;
+ ret = rds_rdma_listen_init_common(rds6_rdma_cm_event_handler,
+ (struct sockaddr *)&sin6,
+ &rds6_rdma_listen_id);
+ /* Keep going even when IPv6 is not enabled in the system. */
+ if (ret != 0)
+ rdsdebug("Cannot set up IPv6 RDMA listener\n");
+#endif
+ return 0;
+}
+
static void rds_rdma_listen_stop(void)
{
if (rds_rdma_listen_id) {
@@ -200,6 +255,13 @@ static void rds_rdma_listen_stop(void)
rdma_destroy_id(rds_rdma_listen_id);
rds_rdma_listen_id = NULL;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ if (rds6_rdma_listen_id) {
+ rdsdebug("cm %p\n", rds6_rdma_listen_id);
+ rdma_destroy_id(rds6_rdma_listen_id);
+ rds6_rdma_listen_id = NULL;
+ }
+#endif
}
static int rds_rdma_init(void)
@@ -229,4 +291,3 @@ module_exit(rds_rdma_exit);
MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
MODULE_DESCRIPTION("RDS: IB transport");
MODULE_LICENSE("Dual BSD/GPL");
-
diff --git a/net/rds/rdma_transport.h b/net/rds/rdma_transport.h
index d309c4430124..200d3134aaae 100644
--- a/net/rds/rdma_transport.h
+++ b/net/rds/rdma_transport.h
@@ -6,11 +6,16 @@
#include <rdma/rdma_cm.h>
#include "rds.h"
+/* RDMA_CM also uses 16385 as the listener port. */
+#define RDS_CM_PORT 16385
+
#define RDS_RDMA_RESOLVE_TIMEOUT_MS 5000
int rds_rdma_conn_connect(struct rds_connection *conn);
int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
+int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
/* from ib.c */
extern struct rds_transport rds_ib_transport;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 60b3b787fbdb..c4dcf654d8fe 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -10,6 +10,7 @@
#include <linux/rds.h>
#include <linux/rhashtable.h>
#include <linux/refcount.h>
+#include <linux/in6.h>
#include "info.h"
@@ -23,11 +24,13 @@
#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
-/*
- * XXX randomly chosen, but at least seems to be unused:
- * # 18464-18768 Unassigned
- * We should do better. We want a reserved port to discourage unpriv'ed
- * userspace from listening.
+/* The following ports, 16385, 18634, 18635, are registered with IANA as
+ * the ports to be used for RDS over TCP and UDP. Currently, only RDS over
+ * TCP and RDS over IB/RDMA are implemented. 18634 is the historical value
+ * used for the RDMA_CM listener port. RDS/TCP uses port 16385. After
+ * IPv6 work, RDMA_CM also uses 16385 as the listener port. 18634 is kept
+ * to ensure compatibility with older RDS modules. Those ports are defined
+ * in each transport's header file.
*/
#define RDS_PORT 18634
@@ -61,7 +64,7 @@ void rdsdebug(char *fmt, ...)
struct rds_cong_map {
struct rb_node m_rb_node;
- __be32 m_addr;
+ struct in6_addr m_addr;
wait_queue_head_t m_waitq;
struct list_head m_conn_list;
unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
@@ -136,11 +139,14 @@ struct rds_conn_path {
/* One rds_connection per RDS address pair */
struct rds_connection {
struct hlist_node c_hash_node;
- __be32 c_laddr;
- __be32 c_faddr;
+ struct in6_addr c_laddr;
+ struct in6_addr c_faddr;
+ int c_dev_if; /* ifindex used for this conn */
+ int c_bound_if; /* ifindex of c_laddr */
unsigned int c_loopback:1,
+ c_isv6:1,
c_ping_triggered:1,
- c_pad_to_32:30;
+ c_pad_to_32:29;
int c_npaths;
struct rds_connection *c_passive;
struct rds_transport *c_trans;
@@ -269,7 +275,7 @@ struct rds_incoming {
struct rds_conn_path *i_conn_path;
struct rds_header i_hdr;
unsigned long i_rx_jiffies;
- __be32 i_saddr;
+ struct in6_addr i_saddr;
rds_rdma_cookie_t i_rdma_cookie;
struct timeval i_rx_tstamp;
@@ -386,7 +392,7 @@ struct rds_message {
struct list_head m_conn_item;
struct rds_incoming m_inc;
u64 m_ack_seq;
- __be32 m_daddr;
+ struct in6_addr m_daddr;
unsigned long m_flags;
/* Never access m_rs without holding m_rs_lock.
@@ -521,7 +527,8 @@ struct rds_transport {
t_mp_capable:1;
unsigned int t_type;
- int (*laddr_check)(struct net *net, __be32 addr);
+ int (*laddr_check)(struct net *net, const struct in6_addr *addr,
+ __u32 scope_id);
int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
void (*conn_free)(void *data);
int (*conn_path_connect)(struct rds_conn_path *cp);
@@ -537,8 +544,8 @@ struct rds_transport {
void (*inc_free)(struct rds_incoming *inc);
int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
- struct rdma_cm_event *event);
- int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
+ struct rdma_cm_event *event, bool isv6);
+ int (*cm_initiate_connect)(struct rdma_cm_id *cm_id, bool isv6);
void (*cm_connect_complete)(struct rds_connection *conn,
struct rdma_cm_event *event);
@@ -554,6 +561,12 @@ struct rds_transport {
bool (*t_unloading)(struct rds_connection *conn);
};
+/* Bind hash table key length. It is the sum of the size of a struct
+ * in6_addr, a scope_id and a port.
+ */
+#define RDS_BOUND_KEY_LEN \
+ (sizeof(struct in6_addr) + sizeof(__u32) + sizeof(__be16))
+
struct rds_sock {
struct sock rs_sk;
@@ -565,10 +578,14 @@ struct rds_sock {
* support.
*/
struct rhash_head rs_bound_node;
- u64 rs_bound_key;
- __be32 rs_bound_addr;
- __be32 rs_conn_addr;
- __be16 rs_bound_port;
+ u8 rs_bound_key[RDS_BOUND_KEY_LEN];
+ struct sockaddr_in6 rs_bound_sin6;
+#define rs_bound_addr rs_bound_sin6.sin6_addr
+#define rs_bound_addr_v4 rs_bound_sin6.sin6_addr.s6_addr32[3]
+#define rs_bound_port rs_bound_sin6.sin6_port
+#define rs_bound_scope_id rs_bound_sin6.sin6_scope_id
+ struct in6_addr rs_conn_addr;
+#define rs_conn_addr_v4 rs_conn_addr.s6_addr32[3]
__be16 rs_conn_port;
struct rds_transport *rs_transport;
@@ -704,7 +721,8 @@ extern wait_queue_head_t rds_poll_waitq;
/* bind.c */
int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
void rds_remove_bound(struct rds_sock *rs);
-struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
+struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
+ __u32 scope_id);
int rds_bind_lock_init(void);
void rds_bind_lock_destroy(void);
@@ -723,16 +741,20 @@ void rds_cong_remove_socket(struct rds_sock *);
void rds_cong_exit(void);
struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
-/* conn.c */
+/* connection.c */
extern u32 rds_gen_num;
int rds_conn_init(void);
void rds_conn_exit(void);
struct rds_connection *rds_conn_create(struct net *net,
- __be32 laddr, __be32 faddr,
- struct rds_transport *trans, gfp_t gfp);
+ const struct in6_addr *laddr,
+ const struct in6_addr *faddr,
+ struct rds_transport *trans, gfp_t gfp,
+ int dev_if);
struct rds_connection *rds_conn_create_outgoing(struct net *net,
- __be32 laddr, __be32 faddr,
- struct rds_transport *trans, gfp_t gfp);
+ const struct in6_addr *laddr,
+ const struct in6_addr *faddr,
+ struct rds_transport *trans,
+ gfp_t gfp, int dev_if);
void rds_conn_shutdown(struct rds_conn_path *cpath);
void rds_conn_destroy(struct rds_connection *conn);
void rds_conn_drop(struct rds_connection *conn);
@@ -843,11 +865,12 @@ void rds_page_exit(void);
/* recv.c */
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
- __be32 saddr);
+ struct in6_addr *saddr);
void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
- __be32 saddr);
+ struct in6_addr *saddr);
void rds_inc_put(struct rds_incoming *inc);
-void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
+void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
+ struct in6_addr *daddr,
struct rds_incoming *inc, gfp_t gfp);
int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int msg_flags);
@@ -856,13 +879,17 @@ int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
void rds_inc_info_copy(struct rds_incoming *inc,
struct rds_info_iterator *iter,
__be32 saddr, __be32 daddr, int flip);
+void rds6_inc_info_copy(struct rds_incoming *inc,
+ struct rds_info_iterator *iter,
+ struct in6_addr *saddr, struct in6_addr *daddr,
+ int flip);
/* send.c */
int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
void rds_send_path_reset(struct rds_conn_path *conn);
int rds_send_xmit(struct rds_conn_path *cp);
struct sockaddr_in;
-void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
+void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest);
typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
is_acked_func is_acked);
@@ -949,11 +976,14 @@ void rds_send_worker(struct work_struct *);
void rds_recv_worker(struct work_struct *);
void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
void rds_connect_complete(struct rds_connection *conn);
+int rds_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2);
/* transport.c */
void rds_trans_register(struct rds_transport *trans);
void rds_trans_unregister(struct rds_transport *trans);
-struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
+struct rds_transport *rds_trans_get_preferred(struct net *net,
+ const struct in6_addr *addr,
+ __u32 scope_id);
void rds_trans_put(struct rds_transport *trans);
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 192ac6f78ded..504cd6bcc54c 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -41,14 +41,14 @@
#include "rds.h"
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
- __be32 saddr)
+ struct in6_addr *saddr)
{
int i;
refcount_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = conn;
- inc->i_saddr = saddr;
+ inc->i_saddr = *saddr;
inc->i_rdma_cookie = 0;
inc->i_rx_tstamp.tv_sec = 0;
inc->i_rx_tstamp.tv_usec = 0;
@@ -59,13 +59,13 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
EXPORT_SYMBOL_GPL(rds_inc_init);
void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
- __be32 saddr)
+ struct in6_addr *saddr)
{
refcount_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = cp->cp_conn;
inc->i_conn_path = cp;
- inc->i_saddr = saddr;
+ inc->i_saddr = *saddr;
inc->i_rdma_cookie = 0;
inc->i_rx_tstamp.tv_sec = 0;
inc->i_rx_tstamp.tv_usec = 0;
@@ -110,7 +110,7 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
- rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
+ rdsdebug("rs %p (%pI6c:%u) recv bytes %d buf %d "
"now_cong %d delta %d\n",
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
@@ -260,7 +260,7 @@ static void rds_start_mprds(struct rds_connection *conn)
struct rds_conn_path *cp;
if (conn->c_npaths > 1 &&
- IS_CANONICAL(conn->c_laddr, conn->c_faddr)) {
+ rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) < 0) {
for (i = 0; i < conn->c_npaths; i++) {
cp = &conn->c_path[i];
rds_conn_path_connect_if_down(cp);
@@ -284,7 +284,8 @@ static void rds_start_mprds(struct rds_connection *conn)
* conn. This lets loopback, who only has one conn for both directions,
* tell us which roles the addrs in the conn are playing for this message.
*/
-void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
+void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
+ struct in6_addr *daddr,
struct rds_incoming *inc, gfp_t gfp)
{
struct rds_sock *rs = NULL;
@@ -339,7 +340,8 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
if (inc->i_hdr.h_sport == 0) {
- rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
+ rdsdebug("ignore ping with 0 sport from %pI6c\n",
+ saddr);
goto out;
}
rds_stats_inc(s_recv_ping);
@@ -362,7 +364,7 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
goto out;
}
- rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
+ rs = rds_find_bound(daddr, inc->i_hdr.h_dport, conn->c_bound_if);
if (!rs) {
rds_stats_inc(s_recv_drop_no_sock);
goto out;
@@ -625,6 +627,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
struct rds_sock *rs = rds_sk_to_rs(sk);
long timeo;
int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
+ DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct rds_incoming *inc = NULL;
@@ -673,7 +676,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
break;
}
- rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
+ rdsdebug("copying inc %p from %pI6c:%u to user\n", inc,
&inc->i_conn->c_faddr,
ntohs(inc->i_hdr.h_sport));
ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
@@ -707,12 +710,26 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
rds_stats_inc(s_recv_delivered);
- if (sin) {
- sin->sin_family = AF_INET;
- sin->sin_port = inc->i_hdr.h_sport;
- sin->sin_addr.s_addr = inc->i_saddr;
- memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
- msg->msg_namelen = sizeof(*sin);
+ if (msg->msg_name) {
+ if (ipv6_addr_v4mapped(&inc->i_saddr)) {
+ sin = (struct sockaddr_in *)msg->msg_name;
+
+ sin->sin_family = AF_INET;
+ sin->sin_port = inc->i_hdr.h_sport;
+ sin->sin_addr.s_addr =
+ inc->i_saddr.s6_addr32[3];
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ msg->msg_namelen = sizeof(*sin);
+ } else {
+ sin6 = (struct sockaddr_in6 *)msg->msg_name;
+
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = inc->i_hdr.h_sport;
+ sin6->sin6_addr = inc->i_saddr;
+ sin6->sin6_flowinfo = 0;
+ sin6->sin6_scope_id = rs->rs_bound_scope_id;
+ msg->msg_namelen = sizeof(*sin6);
+ }
}
break;
}
@@ -775,3 +792,30 @@ void rds_inc_info_copy(struct rds_incoming *inc,
rds_info_copy(iter, &minfo, sizeof(minfo));
}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void rds6_inc_info_copy(struct rds_incoming *inc,
+ struct rds_info_iterator *iter,
+ struct in6_addr *saddr, struct in6_addr *daddr,
+ int flip)
+{
+ struct rds6_info_message minfo6;
+
+ minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
+ minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
+
+ if (flip) {
+ minfo6.laddr = *daddr;
+ minfo6.faddr = *saddr;
+ minfo6.lport = inc->i_hdr.h_dport;
+ minfo6.fport = inc->i_hdr.h_sport;
+ } else {
+ minfo6.laddr = *saddr;
+ minfo6.faddr = *daddr;
+ minfo6.lport = inc->i_hdr.h_sport;
+ minfo6.fport = inc->i_hdr.h_dport;
+ }
+
+ rds_info_copy(iter, &minfo6, sizeof(minfo6));
+}
+#endif
diff --git a/net/rds/send.c b/net/rds/send.c
index 59f17a2335f4..57b3d5a8b2db 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -709,7 +709,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
}
EXPORT_SYMBOL_GPL(rds_send_drop_acked);
-void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
+void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest)
{
struct rds_message *rm, *tmp;
struct rds_connection *conn;
@@ -721,8 +721,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
spin_lock_irqsave(&rs->rs_lock, flags);
list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
- if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
- dest->sin_port != rm->m_inc.i_hdr.h_dport))
+ if (dest &&
+ (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) ||
+ dest->sin6_port != rm->m_inc.i_hdr.h_dport))
continue;
list_move(&rm->m_sock_item, &list);
@@ -1059,8 +1060,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
+ DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
- __be32 daddr;
__be16 dport;
struct rds_message *rm = NULL;
struct rds_connection *conn;
@@ -1069,10 +1070,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
int nonblock = msg->msg_flags & MSG_DONTWAIT;
long timeo = sock_sndtimeo(sk, nonblock);
struct rds_conn_path *cpath;
+ struct in6_addr daddr;
+ __u32 scope_id = 0;
size_t total_payload_len = payload_len, rdma_payload_len = 0;
bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
int num_sgs = ceil(payload_len, PAGE_SIZE);
+ int namelen;
/* Mirror Linux UDP mirror of BSD error message compatibility */
/* XXX: Perhaps MSG_MORE someday */
@@ -1081,27 +1085,108 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
goto out;
}
- if (msg->msg_namelen) {
- /* XXX fail non-unicast destination IPs? */
- if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
+ namelen = msg->msg_namelen;
+ if (namelen != 0) {
+ if (namelen < sizeof(*usin)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ switch (usin->sin_family) {
+ case AF_INET:
+ if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
+ usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(usin->sin_addr.s_addr))) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr);
+ dport = usin->sin_port;
+ break;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6: {
+ int addr_type;
+
+ if (namelen < sizeof(*sin6)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ addr_type = ipv6_addr_type(&sin6->sin6_addr);
+ if (!(addr_type & IPV6_ADDR_UNICAST)) {
+ __be32 addr4;
+
+ if (!(addr_type & IPV6_ADDR_MAPPED)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* It is a mapped address. Need to do some
+ * sanity checks.
+ */
+ addr4 = sin6->sin6_addr.s6_addr32[3];
+ if (addr4 == htonl(INADDR_ANY) ||
+ addr4 == htonl(INADDR_BROADCAST) ||
+ IN_MULTICAST(ntohl(addr4))) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ if (addr_type & IPV6_ADDR_LINKLOCAL) {
+ if (sin6->sin6_scope_id == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ scope_id = sin6->sin6_scope_id;
+ }
+
+ daddr = sin6->sin6_addr;
+ dport = sin6->sin6_port;
+ break;
+ }
+#endif
+
+ default:
ret = -EINVAL;
goto out;
}
- daddr = usin->sin_addr.s_addr;
- dport = usin->sin_port;
} else {
/* We only care about consistency with ->connect() */
lock_sock(sk);
daddr = rs->rs_conn_addr;
dport = rs->rs_conn_port;
+ scope_id = rs->rs_bound_scope_id;
release_sock(sk);
}
lock_sock(sk);
- if (daddr == 0 || rs->rs_bound_addr == 0) {
+ if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) {
release_sock(sk);
- ret = -ENOTCONN; /* XXX not a great errno */
+ ret = -ENOTCONN;
goto out;
+ } else if (namelen != 0) {
+ /* Cannot send to an IPv4 address using an IPv6 source
+ * address and cannot send to an IPv6 address using an
+ * IPv4 source address.
+ */
+ if (ipv6_addr_v4mapped(&daddr) ^
+ ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
+ release_sock(sk);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ /* If the socket is already bound to a link local address,
+ * it can only send to peers on the same link. But allow
+ * communicating beween link local and non-link local address.
+ */
+ if (scope_id != rs->rs_bound_scope_id) {
+ if (!scope_id) {
+ scope_id = rs->rs_bound_scope_id;
+ } else if (rs->rs_bound_scope_id) {
+ release_sock(sk);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
}
release_sock(sk);
@@ -1155,13 +1240,14 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
/* rds_conn_create has a spinlock that runs with IRQ off.
* Caching the conn in the socket helps a lot. */
- if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
+ if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr))
conn = rs->rs_conn;
else {
conn = rds_conn_create_outgoing(sock_net(sock->sk),
- rs->rs_bound_addr, daddr,
- rs->rs_transport,
- sock->sk->sk_allocation);
+ &rs->rs_bound_addr, &daddr,
+ rs->rs_transport,
+ sock->sk->sk_allocation,
+ scope_id);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
goto out;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 351a28474667..2c7b7c352d3e 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,6 +37,8 @@
#include <net/tcp.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/tcp.h>
+#include <net/addrconf.h>
#include "rds.h"
#include "tcp.h"
@@ -44,7 +46,14 @@
/* only for info exporting */
static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
static LIST_HEAD(rds_tcp_tc_list);
+
+/* rds_tcp_tc_count counts only IPv4 connections.
+ * rds6_tcp_tc_count counts both IPv4 and IPv6 connections.
+ */
static unsigned int rds_tcp_tc_count;
+#if IS_ENABLED(CONFIG_IPV6)
+static unsigned int rds6_tcp_tc_count;
+#endif
/* Track rds_tcp_connection structs so they can be cleaned up */
static DEFINE_SPINLOCK(rds_tcp_conn_lock);
@@ -111,7 +120,11 @@ void rds_tcp_restore_callbacks(struct socket *sock,
/* done under the callback_lock to serialize with write_space */
spin_lock(&rds_tcp_tc_list_lock);
list_del_init(&tc->t_list_item);
- rds_tcp_tc_count--;
+#if IS_ENABLED(CONFIG_IPV6)
+ rds6_tcp_tc_count--;
+#endif
+ if (!tc->t_cpath->cp_conn->c_isv6)
+ rds_tcp_tc_count--;
spin_unlock(&rds_tcp_tc_list_lock);
tc->t_sock = NULL;
@@ -198,7 +211,11 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
/* done under the callback_lock to serialize with write_space */
spin_lock(&rds_tcp_tc_list_lock);
list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
- rds_tcp_tc_count++;
+#if IS_ENABLED(CONFIG_IPV6)
+ rds6_tcp_tc_count++;
+#endif
+ if (!tc->t_cpath->cp_conn->c_isv6)
+ rds_tcp_tc_count++;
spin_unlock(&rds_tcp_tc_list_lock);
/* accepted sockets need our listen data ready undone */
@@ -219,6 +236,9 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
write_unlock_bh(&sock->sk->sk_callback_lock);
}
+/* Handle RDS_INFO_TCP_SOCKETS socket option. It only returns IPv4
+ * connections for backward compatibility.
+ */
static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
@@ -226,8 +246,6 @@ static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
struct rds_info_tcp_socket tsinfo;
struct rds_tcp_connection *tc;
unsigned long flags;
- struct sockaddr_in sin;
- struct socket *sock;
spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
@@ -235,16 +253,15 @@ static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
goto out;
list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
+ struct inet_sock *inet = inet_sk(tc->t_sock->sk);
- sock = tc->t_sock;
- if (sock) {
- sock->ops->getname(sock, (struct sockaddr *)&sin, 0);
- tsinfo.local_addr = sin.sin_addr.s_addr;
- tsinfo.local_port = sin.sin_port;
- sock->ops->getname(sock, (struct sockaddr *)&sin, 1);
- tsinfo.peer_addr = sin.sin_addr.s_addr;
- tsinfo.peer_port = sin.sin_port;
- }
+ if (tc->t_cpath->cp_conn->c_isv6)
+ continue;
+
+ tsinfo.local_addr = inet->inet_saddr;
+ tsinfo.local_port = inet->inet_sport;
+ tsinfo.peer_addr = inet->inet_daddr;
+ tsinfo.peer_port = inet->inet_dport;
tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
tsinfo.data_rem = tc->t_tinc_data_rem;
@@ -262,10 +279,82 @@ out:
spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
}
-static int rds_tcp_laddr_check(struct net *net, __be32 addr)
+#if IS_ENABLED(CONFIG_IPV6)
+/* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and
+ * IPv6 connections. IPv4 connection address is returned in an IPv4 mapped
+ * address.
+ */
+static void rds6_tcp_tc_info(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens)
{
- if (inet_addr_type(net, addr) == RTN_LOCAL)
+ struct rds6_info_tcp_socket tsinfo6;
+ struct rds_tcp_connection *tc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
+
+ if (len / sizeof(tsinfo6) < rds6_tcp_tc_count)
+ goto out;
+
+ list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
+ struct sock *sk = tc->t_sock->sk;
+ struct inet_sock *inet = inet_sk(sk);
+
+ tsinfo6.local_addr = sk->sk_v6_rcv_saddr;
+ tsinfo6.local_port = inet->inet_sport;
+ tsinfo6.peer_addr = sk->sk_v6_daddr;
+ tsinfo6.peer_port = inet->inet_dport;
+
+ tsinfo6.hdr_rem = tc->t_tinc_hdr_rem;
+ tsinfo6.data_rem = tc->t_tinc_data_rem;
+ tsinfo6.last_sent_nxt = tc->t_last_sent_nxt;
+ tsinfo6.last_expected_una = tc->t_last_expected_una;
+ tsinfo6.last_seen_una = tc->t_last_seen_una;
+
+ rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6));
+ }
+
+out:
+ lens->nr = rds6_tcp_tc_count;
+ lens->each = sizeof(tsinfo6);
+
+ spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
+}
+#endif
+
+static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
+ __u32 scope_id)
+{
+ struct net_device *dev = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+ int ret;
+#endif
+
+ if (ipv6_addr_v4mapped(addr)) {
+ if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL)
+ return 0;
+ return -EADDRNOTAVAIL;
+ }
+
+ /* If the scope_id is specified, check only those addresses
+ * hosted on the specified interface.
+ */
+ if (scope_id != 0) {
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, scope_id);
+ /* scope_id is not valid... */
+ if (!dev) {
+ rcu_read_unlock();
+ return -EADDRNOTAVAIL;
+ }
+ rcu_read_unlock();
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ ret = ipv6_chk_addr(net, addr, dev, 0);
+ if (ret)
return 0;
+#endif
return -EADDRNOTAVAIL;
}
@@ -468,13 +557,27 @@ static __net_init int rds_tcp_init_net(struct net *net)
err = -ENOMEM;
goto fail;
}
- rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true);
+#else
+ rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
+#endif
if (!rtn->rds_tcp_listen_sock) {
- pr_warn("could not set up listen sock\n");
- unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
- rtn->rds_tcp_sysctl = NULL;
- err = -EAFNOSUPPORT;
- goto fail;
+ pr_warn("could not set up IPv6 listen sock\n");
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* Try IPv4 as some systems disable IPv6 */
+ rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
+ if (!rtn->rds_tcp_listen_sock) {
+#endif
+ unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
+ rtn->rds_tcp_sysctl = NULL;
+ err = -EAFNOSUPPORT;
+ goto fail;
+#if IS_ENABLED(CONFIG_IPV6)
+ }
+#endif
}
INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
return 0;
@@ -588,6 +691,9 @@ static void rds_tcp_exit(void)
rds_tcp_set_unloading();
synchronize_rcu();
rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
+#if IS_ENABLED(CONFIG_IPV6)
+ rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
+#endif
unregister_pernet_device(&rds_tcp_net_ops);
rds_tcp_destroy_conns();
rds_trans_unregister(&rds_tcp_transport);
@@ -619,6 +725,9 @@ static int rds_tcp_init(void)
rds_trans_register(&rds_tcp_transport);
rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
+#if IS_ENABLED(CONFIG_IPV6)
+ rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
+#endif
goto out;
out_recv:
@@ -633,4 +742,3 @@ module_init(rds_tcp_init);
MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
MODULE_DESCRIPTION("RDS: TCP transport");
MODULE_LICENSE("Dual BSD/GPL");
-
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index c6fa080e9b6d..3c69361d21c7 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -67,7 +67,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *conn);
void rds_tcp_state_change(struct sock *sk);
/* tcp_listen.c */
-struct socket *rds_tcp_listen_init(struct net *);
+struct socket *rds_tcp_listen_init(struct net *net, bool isv6);
void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
void rds_tcp_listen_data_ready(struct sock *sk);
int rds_tcp_accept_one(struct socket *sock);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index d999e7075645..008f50fb25dd 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -66,7 +66,8 @@ void rds_tcp_state_change(struct sock *sk)
* RDS connection as RDS_CONN_UP until the reconnect,
* to avoid RDS datagram loss.
*/
- if (!IS_CANONICAL(cp->cp_conn->c_laddr, cp->cp_conn->c_faddr) &&
+ if (rds_addr_cmp(&cp->cp_conn->c_laddr,
+ &cp->cp_conn->c_faddr) >= 0 &&
rds_conn_path_transition(cp, RDS_CONN_CONNECTING,
RDS_CONN_ERROR)) {
rds_conn_path_drop(cp, false);
@@ -88,7 +89,11 @@ out:
int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
{
struct socket *sock = NULL;
- struct sockaddr_in src, dest;
+ struct sockaddr_in6 sin6;
+ struct sockaddr_in sin;
+ struct sockaddr *addr;
+ int addrlen;
+ bool isv6;
int ret;
struct rds_connection *conn = cp->cp_conn;
struct rds_tcp_connection *tc = cp->cp_transport_data;
@@ -105,37 +110,68 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
mutex_unlock(&tc->t_conn_path_lock);
return 0;
}
- ret = sock_create_kern(rds_conn_net(conn), PF_INET,
- SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ipv6_addr_v4mapped(&conn->c_laddr)) {
+ ret = sock_create_kern(rds_conn_net(conn), PF_INET,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
+ isv6 = false;
+ } else {
+ ret = sock_create_kern(rds_conn_net(conn), PF_INET6,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
+ isv6 = true;
+ }
+
if (ret < 0)
goto out;
rds_tcp_tune(sock);
- src.sin_family = AF_INET;
- src.sin_addr.s_addr = (__force u32)conn->c_laddr;
- src.sin_port = (__force u16)htons(0);
+ if (isv6) {
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = conn->c_laddr;
+ sin6.sin6_port = 0;
+ sin6.sin6_flowinfo = 0;
+ sin6.sin6_scope_id = conn->c_dev_if;
+ addr = (struct sockaddr *)&sin6;
+ addrlen = sizeof(sin6);
+ } else {
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = conn->c_laddr.s6_addr32[3];
+ sin.sin_port = 0;
+ addr = (struct sockaddr *)&sin;
+ addrlen = sizeof(sin);
+ }
- ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src));
+ ret = sock->ops->bind(sock, addr, addrlen);
if (ret) {
- rdsdebug("bind failed with %d at address %pI4\n",
+ rdsdebug("bind failed with %d at address %pI6c\n",
ret, &conn->c_laddr);
goto out;
}
- dest.sin_family = AF_INET;
- dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
- dest.sin_port = (__force u16)htons(RDS_TCP_PORT);
+ if (isv6) {
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = conn->c_faddr;
+ sin6.sin6_port = htons(RDS_TCP_PORT);
+ sin6.sin6_flowinfo = 0;
+ sin6.sin6_scope_id = conn->c_dev_if;
+ addr = (struct sockaddr *)&sin6;
+ addrlen = sizeof(sin6);
+ } else {
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = conn->c_faddr.s6_addr32[3];
+ sin.sin_port = htons(RDS_TCP_PORT);
+ addr = (struct sockaddr *)&sin;
+ addrlen = sizeof(sin);
+ }
/*
* once we call connect() we can start getting callbacks and they
* own the socket
*/
rds_tcp_set_callbacks(sock, cp);
- ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest),
- O_NONBLOCK);
+ ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK);
- rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
+ rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)
ret = 0;
if (ret == 0) {
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 22571189f21e..c12203f646da 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2018 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -83,13 +83,12 @@ static
struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn)
{
int i;
- bool peer_is_smaller = IS_CANONICAL(conn->c_faddr, conn->c_laddr);
int npaths = max_t(int, 1, conn->c_npaths);
/* for mprds, all paths MUST be initiated by the peer
* with the smaller address.
*/
- if (!peer_is_smaller) {
+ if (rds_addr_cmp(&conn->c_faddr, &conn->c_laddr) >= 0) {
/* Make sure we initiate at least one path if this
* has not already been done; rds_start_mprds() will
* take care of additional paths, if necessary.
@@ -132,6 +131,11 @@ int rds_tcp_accept_one(struct socket *sock)
struct rds_tcp_connection *rs_tcp = NULL;
int conn_state;
struct rds_conn_path *cp;
+ struct in6_addr *my_addr, *peer_addr;
+#if !IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr saddr, daddr;
+#endif
+ int dev_if = 0;
if (!sock) /* module unload or netns delete in progress */
return -ENETUNREACH;
@@ -164,13 +168,40 @@ int rds_tcp_accept_one(struct socket *sock)
inet = inet_sk(new_sock->sk);
- rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
- &inet->inet_saddr, ntohs(inet->inet_sport),
- &inet->inet_daddr, ntohs(inet->inet_dport));
+#if IS_ENABLED(CONFIG_IPV6)
+ my_addr = &new_sock->sk->sk_v6_rcv_saddr;
+ peer_addr = &new_sock->sk->sk_v6_daddr;
+#else
+ ipv6_addr_set_v4mapped(inet->inet_saddr, &saddr);
+ ipv6_addr_set_v4mapped(inet->inet_daddr, &daddr);
+ my_addr = &saddr;
+ peer_addr = &daddr;
+#endif
+ rdsdebug("accepted family %d tcp %pI6c:%u -> %pI6c:%u\n",
+ sock->sk->sk_family,
+ my_addr, ntohs(inet->inet_sport),
+ peer_addr, ntohs(inet->inet_dport));
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* sk_bound_dev_if is not set if the peer address is not link local
+ * address. In this case, it happens that mcast_oif is set. So
+ * just use it.
+ */
+ if ((ipv6_addr_type(my_addr) & IPV6_ADDR_LINKLOCAL) &&
+ !(ipv6_addr_type(peer_addr) & IPV6_ADDR_LINKLOCAL)) {
+ struct ipv6_pinfo *inet6;
+
+ inet6 = inet6_sk(new_sock->sk);
+ dev_if = inet6->mcast_oif;
+ } else {
+ dev_if = new_sock->sk->sk_bound_dev_if;
+ }
+#endif
conn = rds_conn_create(sock_net(sock->sk),
- inet->inet_saddr, inet->inet_daddr,
- &rds_tcp_transport, GFP_KERNEL);
+ my_addr, peer_addr,
+ &rds_tcp_transport, GFP_KERNEL, dev_if);
+
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
goto out;
@@ -254,15 +285,22 @@ out:
ready(sk);
}
-struct socket *rds_tcp_listen_init(struct net *net)
+struct socket *rds_tcp_listen_init(struct net *net, bool isv6)
{
- struct sockaddr_in sin;
struct socket *sock = NULL;
+ struct sockaddr_storage ss;
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ int addr_len;
int ret;
- ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (ret < 0)
+ ret = sock_create_kern(net, isv6 ? PF_INET6 : PF_INET, SOCK_STREAM,
+ IPPROTO_TCP, &sock);
+ if (ret < 0) {
+ rdsdebug("could not create %s listener socket: %d\n",
+ isv6 ? "IPv6" : "IPv4", ret);
goto out;
+ }
sock->sk->sk_reuse = SK_CAN_REUSE;
rds_tcp_nonagle(sock);
@@ -272,13 +310,28 @@ struct socket *rds_tcp_listen_init(struct net *net)
sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
write_unlock_bh(&sock->sk->sk_callback_lock);
- sin.sin_family = PF_INET;
- sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
- sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
+ if (isv6) {
+ sin6 = (struct sockaddr_in6 *)&ss;
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_addr = in6addr_any;
+ sin6->sin6_port = (__force u16)htons(RDS_TCP_PORT);
+ sin6->sin6_scope_id = 0;
+ sin6->sin6_flowinfo = 0;
+ addr_len = sizeof(*sin6);
+ } else {
+ sin = (struct sockaddr_in *)&ss;
+ sin->sin_family = PF_INET;
+ sin->sin_addr.s_addr = INADDR_ANY;
+ sin->sin_port = (__force u16)htons(RDS_TCP_PORT);
+ addr_len = sizeof(*sin);
+ }
- ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
- if (ret < 0)
+ ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len);
+ if (ret < 0) {
+ rdsdebug("could not bind %s listener socket: %d\n",
+ isv6 ? "IPv6" : "IPv4", ret);
goto out;
+ }
ret = sock->ops->listen(sock, 64);
if (ret < 0)
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index b9fbd2ee74ef..42c5ff1eda95 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -179,7 +179,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
tc->t_tinc = tinc;
rdsdebug("alloced tinc %p\n", tinc);
rds_inc_path_init(&tinc->ti_inc, cp,
- cp->cp_conn->c_faddr);
+ &cp->cp_conn->c_faddr);
tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
local_clock();
@@ -239,8 +239,9 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
rds_tcp_cong_recv(conn, tinc);
else
- rds_recv_incoming(conn, conn->c_faddr,
- conn->c_laddr, &tinc->ti_inc,
+ rds_recv_incoming(conn, &conn->c_faddr,
+ &conn->c_laddr,
+ &tinc->ti_inc,
arg->gfp);
tc->t_tinc_hdr_rem = sizeof(struct rds_header);
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 7df869d37afd..78a2554a4497 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -153,7 +153,7 @@ out:
* an incoming RST.
*/
if (rds_conn_path_up(cp)) {
- pr_warn("RDS/tcp: send to %pI4 on cp [%d]"
+ pr_warn("RDS/tcp: send to %pI6c on cp [%d]"
"returned %d, "
"disconnecting and reconnecting\n",
&conn->c_faddr, cp->cp_index, ret);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index c52861d77a59..e64f9e4c3cda 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -82,8 +82,8 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
return;
}
- rdsdebug("conn %p for %pI4 to %pI4 complete\n",
- cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr);
+ rdsdebug("conn %p for %pI6c to %pI6c complete\n",
+ cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr);
cp->cp_reconnect_jiffies = 0;
set_bit(0, &cp->cp_conn->c_map_queued);
@@ -125,13 +125,13 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
unsigned long rand;
struct rds_connection *conn = cp->cp_conn;
- rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n",
- conn, &conn->c_laddr, &conn->c_faddr,
- cp->cp_reconnect_jiffies);
+ rdsdebug("conn %p for %pI6c to %pI6c reconnect jiffies %lu\n",
+ conn, &conn->c_laddr, &conn->c_faddr,
+ cp->cp_reconnect_jiffies);
/* let peer with smaller addr initiate reconnect, to avoid duels */
if (conn->c_trans->t_type == RDS_TRANS_TCP &&
- !IS_CANONICAL(conn->c_laddr, conn->c_faddr))
+ rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) >= 0)
return;
set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
@@ -145,7 +145,7 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
}
get_random_bytes(&rand, sizeof(rand));
- rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
+ rdsdebug("%lu delay %lu ceil conn %p for %pI6c -> %pI6c\n",
rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
conn, &conn->c_laddr, &conn->c_faddr);
rcu_read_lock();
@@ -167,14 +167,14 @@ void rds_connect_worker(struct work_struct *work)
int ret;
if (cp->cp_index > 0 &&
- !IS_CANONICAL(cp->cp_conn->c_laddr, cp->cp_conn->c_faddr))
+ rds_addr_cmp(&cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr) >= 0)
return;
clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
if (ret) {
ret = conn->c_trans->conn_path_connect(cp);
- rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n",
- conn, &conn->c_laddr, &conn->c_faddr, ret);
+ rdsdebug("conn %p for %pI6c to %pI6c dispatched, ret %d\n",
+ conn, &conn->c_laddr, &conn->c_faddr, ret);
if (ret) {
if (rds_conn_path_transition(cp,
@@ -259,3 +259,50 @@ int rds_threads_init(void)
return 0;
}
+
+/* Compare two IPv6 addresses. Return 0 if the two addresses are equal.
+ * Return 1 if the first is greater. Return -1 if the second is greater.
+ */
+int rds_addr_cmp(const struct in6_addr *addr1,
+ const struct in6_addr *addr2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const __be64 *a1, *a2;
+ u64 x, y;
+
+ a1 = (__be64 *)addr1;
+ a2 = (__be64 *)addr2;
+
+ if (*a1 != *a2) {
+ if (be64_to_cpu(*a1) < be64_to_cpu(*a2))
+ return -1;
+ else
+ return 1;
+ } else {
+ x = be64_to_cpu(*++a1);
+ y = be64_to_cpu(*++a2);
+ if (x < y)
+ return -1;
+ else if (x > y)
+ return 1;
+ else
+ return 0;
+ }
+#else
+ u32 a, b;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (addr1->s6_addr32[i] != addr2->s6_addr32[i]) {
+ a = ntohl(addr1->s6_addr32[i]);
+ b = ntohl(addr2->s6_addr32[i]);
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ }
+ }
+ return 0;
+#endif
+}
+EXPORT_SYMBOL_GPL(rds_addr_cmp);
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 0b188dd0a344..46f709a4b577 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/in.h>
+#include <linux/ipv6.h>
#include "rds.h"
#include "loop.h"
@@ -75,20 +76,26 @@ void rds_trans_put(struct rds_transport *trans)
module_put(trans->t_owner);
}
-struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr)
+struct rds_transport *rds_trans_get_preferred(struct net *net,
+ const struct in6_addr *addr,
+ __u32 scope_id)
{
struct rds_transport *ret = NULL;
struct rds_transport *trans;
unsigned int i;
- if (IN_LOOPBACK(ntohl(addr)))
+ if (ipv6_addr_v4mapped(addr)) {
+ if (*(u_int8_t *)&addr->s6_addr32[3] == IN_LOOPBACKNET)
+ return &rds_loop_transport;
+ } else if (ipv6_addr_loopback(addr)) {
return &rds_loop_transport;
+ }
down_read(&rds_trans_sem);
for (i = 0; i < RDS_TRANS_COUNT; i++) {
trans = transports[i];
- if (trans && (trans->laddr_check(net, addr) == 0) &&
+ if (trans && (trans->laddr_check(net, addr, scope_id) == 0) &&
(!trans->t_owner || try_module_get(trans->t_owner))) {
ret = trans;
break;
@@ -152,4 +159,3 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
return total;
}
-
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index a7a4e6ff9be2..1355f5ca8d22 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -141,13 +141,15 @@ static void rfkill_led_trigger_event(struct rfkill *rfkill)
led_trigger_event(trigger, LED_FULL);
}
-static void rfkill_led_trigger_activate(struct led_classdev *led)
+static int rfkill_led_trigger_activate(struct led_classdev *led)
{
struct rfkill *rfkill;
rfkill = container_of(led->trigger, struct rfkill, led_trigger);
rfkill_led_trigger_event(rfkill);
+
+ return 0;
}
const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 2b463047dd7b..ac44d8afffb1 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -741,7 +741,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
struct rxrpc_sock *rx = rxrpc_sk(sk);
__poll_t mask;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
mask = 0;
/* the socket is readable if there are any messages waiting on the Rx
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 707630ab4713..c97558710421 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -420,6 +420,7 @@ struct rxrpc_connection {
struct rxrpc_channel {
unsigned long final_ack_at; /* Time at which to issue final ACK */
struct rxrpc_call __rcu *call; /* Active call */
+ unsigned int call_debug_id; /* call->debug_id */
u32 call_id; /* ID of current call */
u32 call_counter; /* Call ID counter */
u32 last_call; /* ID of last call */
@@ -478,6 +479,7 @@ enum rxrpc_call_flag {
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
+ RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
};
/*
@@ -588,7 +590,7 @@ struct rxrpc_call {
*/
#define RXRPC_RXTX_BUFF_SIZE 64
#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
-#define RXRPC_INIT_RX_WINDOW_SIZE 32
+#define RXRPC_INIT_RX_WINDOW_SIZE 63
struct sk_buff **rxtx_buffer;
u8 *rxtx_annotations;
#define RXRPC_TX_ANNO_ACK 0
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 20210418904b..8e7434e92097 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -162,7 +162,6 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
*/
static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
{
- struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
unsigned long resend_at;
rxrpc_seq_t cursor, seq, top;
@@ -207,7 +206,6 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
skb = call->rxtx_buffer[ix];
rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
- sp = rxrpc_skb(skb);
if (anno_type == RXRPC_TX_ANNO_UNACK) {
if (ktime_after(skb->tstamp, max_age)) {
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index f6734d8cb01a..9486293fef5c 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -415,7 +415,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
bool rxrpc_queue_call(struct rxrpc_call *call)
{
const void *here = __builtin_return_address(0);
- int n = __atomic_add_unless(&call->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&call->usage, 1, 0);
if (n == 0)
return false;
if (rxrpc_queue_work(&call->processor))
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 5736f643c516..f8f37188a932 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -590,6 +590,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
*/
smp_wmb();
chan->call_id = call_id;
+ chan->call_debug_id = call->debug_id;
rcu_assign_pointer(chan->call, call);
wake_up(&call->waitq);
}
@@ -1051,7 +1052,6 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
container_of(work, struct rxrpc_net, client_conn_reaper);
unsigned long expiry, conn_expires_at, now;
unsigned int nr_conns;
- bool did_discard = false;
_enter("");
@@ -1113,7 +1113,6 @@ next:
* If someone re-sets the flag and re-gets the ref, that's fine.
*/
rxrpc_put_connection(conn);
- did_discard = true;
nr_conns--;
goto next;
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 3fde001fcc39..6df56ce68861 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -129,8 +129,10 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
_proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
break;
case RXRPC_PACKET_TYPE_ACK:
- trace_rxrpc_tx_ack(NULL, serial, chan->last_seq, 0,
- RXRPC_ACK_DUPLICATE, 0);
+ trace_rxrpc_tx_ack(chan->call_debug_id, serial,
+ ntohl(pkt.ack.firstPacket),
+ ntohl(pkt.ack.serial),
+ pkt.ack.reason, 0);
_proto("Tx ACK %%%u [re]", serial);
break;
}
@@ -138,8 +140,11 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
- trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
- rxrpc_tx_fail_call_final_resend);
+ trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret,
+ rxrpc_tx_point_call_final_resend);
+ else
+ trace_rxrpc_tx_packet(chan->call_debug_id, &pkt.whdr,
+ rxrpc_tx_point_call_final_resend);
_leave("");
}
@@ -240,11 +245,13 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
if (ret < 0) {
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
- rxrpc_tx_fail_conn_abort);
+ rxrpc_tx_point_conn_abort);
_debug("sendmsg failed: %d", ret);
return -EAGAIN;
}
+ trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
+
conn->params.peer->last_tx_at = ktime_get_seconds();
_leave(" = 0");
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 4c77a78a252a..77440a356b14 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -266,7 +266,7 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn)
bool rxrpc_queue_conn(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
- int n = __atomic_add_unless(&conn->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
if (n == 0)
return false;
if (rxrpc_queue_work(&conn->processor))
@@ -309,7 +309,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
const void *here = __builtin_return_address(0);
if (conn) {
- int n = __atomic_add_unless(&conn->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
if (n > 0)
trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
else
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 608d078a4981..cfdc199c6351 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -496,7 +496,7 @@ next_subpacket:
return rxrpc_proto_abort("LSA", call, seq);
}
- trace_rxrpc_rx_data(call, seq, serial, flags, annotation);
+ trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
if (before_eq(seq, hard_ack)) {
ack = RXRPC_ACK_DUPLICATE;
ack_serial = serial;
@@ -592,9 +592,15 @@ ack:
rxrpc_propose_ACK(call, ack, skew, ack_serial,
immediate_ack, true,
rxrpc_propose_ack_input_data);
+ else
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial,
+ false, true,
+ rxrpc_propose_ack_input_data);
- if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1)
+ if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) {
+ trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call);
+ }
_leave(" [queued]");
}
@@ -1262,6 +1268,11 @@ void rxrpc_data_ready(struct sock *udp_sk)
/* But otherwise we need to retransmit the final packet
* from data cached in the connection record.
*/
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
+ trace_rxrpc_rx_data(chan->call_debug_id,
+ sp->hdr.seq,
+ sp->hdr.serial,
+ sp->hdr.flags, 0);
rxrpc_post_packet_to_conn(conn, skb);
goto out_unlock;
}
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
index 8325f1b86840..13bd8a4dfac7 100644
--- a/net/rxrpc/local_event.c
+++ b/net/rxrpc/local_event.c
@@ -72,7 +72,10 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
ret = kernel_sendmsg(local->socket, &msg, iov, 2, len);
if (ret < 0)
trace_rxrpc_tx_fail(local->debug_id, 0, ret,
- rxrpc_tx_fail_version_reply);
+ rxrpc_tx_point_version_reply);
+ else
+ trace_rxrpc_tx_packet(local->debug_id, &whdr,
+ rxrpc_tx_point_version_reply);
_leave("");
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index b493e6b62740..777c3ed4cfc0 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -305,7 +305,7 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
const void *here = __builtin_return_address(0);
if (local) {
- int n = __atomic_add_unless(&local->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&local->usage, 1, 0);
if (n > 0)
trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
else
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 4774c8f5634d..ccf5de160444 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -183,7 +183,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
serial = atomic_inc_return(&conn->serial);
pkt->whdr.serial = htonl(serial);
- trace_rxrpc_tx_ack(call, serial,
+ trace_rxrpc_tx_ack(call->debug_id, serial,
ntohl(pkt->ack.firstPacket),
ntohl(pkt->ack.serial),
pkt->ack.reason, pkt->ack.nAcks);
@@ -212,7 +212,10 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_fail_call_ack);
+ rxrpc_tx_point_call_ack);
+ else
+ trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
+ rxrpc_tx_point_call_ack);
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
@@ -299,7 +302,10 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_fail_call_abort);
+ rxrpc_tx_point_call_abort);
+ else
+ trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
+ rxrpc_tx_point_call_abort);
rxrpc_put_connection(conn);
@@ -396,7 +402,10 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
up_read(&conn->params.local->defrag_sem);
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_fail_call_data_nofrag);
+ rxrpc_tx_point_call_data_nofrag);
+ else
+ trace_rxrpc_tx_packet(call->debug_id, &whdr,
+ rxrpc_tx_point_call_data_nofrag);
if (ret == -EMSGSIZE)
goto send_fragmentable;
@@ -488,7 +497,10 @@ send_fragmentable:
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_fail_call_data_frag);
+ rxrpc_tx_point_call_data_frag);
+ else
+ trace_rxrpc_tx_packet(call->debug_id, &whdr,
+ rxrpc_tx_point_call_data_frag);
up_write(&conn->params.local->defrag_sem);
goto done;
@@ -545,7 +557,10 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
ret = kernel_sendmsg(local->socket, &msg, iov, 2, size);
if (ret < 0)
trace_rxrpc_tx_fail(local->debug_id, 0, ret,
- rxrpc_tx_fail_reject);
+ rxrpc_tx_point_reject);
+ else
+ trace_rxrpc_tx_packet(local->debug_id, &whdr,
+ rxrpc_tx_point_reject);
}
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
@@ -597,7 +612,10 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
if (ret < 0)
trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
- rxrpc_tx_fail_version_keepalive);
+ rxrpc_tx_point_version_keepalive);
+ else
+ trace_rxrpc_tx_packet(peer->debug_id, &whdr,
+ rxrpc_tx_point_version_keepalive);
peer->last_tx_at = ktime_get_seconds();
_leave("");
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 24ec7cdcf332..1dc7648e3eff 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -406,7 +406,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
const void *here = __builtin_return_address(0);
if (peer) {
- int n = __atomic_add_unless(&peer->usage, 1, 0);
+ int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
if (n > 0)
trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
else
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index d9fca8c4bcdc..9805e3b85c36 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -63,6 +63,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_peer *peer;
struct rxrpc_call *call;
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+ unsigned long timeout = 0;
rxrpc_seq_t tx_hard_ack, rx_hard_ack;
char lbuff[50], rbuff[50];
@@ -71,7 +72,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
"Proto Local "
" Remote "
" SvID ConnID CallID End Use State Abort "
- " UserID\n");
+ " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n");
return 0;
}
@@ -94,11 +95,16 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
else
strcpy(rbuff, "no_connection");
+ if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
+ timeout = READ_ONCE(call->expect_rx_by);
+ timeout -= jiffies;
+ }
+
tx_hard_ack = READ_ONCE(call->tx_hard_ack);
rx_hard_ack = READ_ONCE(call->rx_hard_ack);
seq_printf(seq,
"UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
- " %-8.8s %08x %lx %08x %02x %08x %02x\n",
+ " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n",
lbuff,
rbuff,
call->service_id,
@@ -110,7 +116,9 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
call->abort_code,
call->user_call_ID,
tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
- rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack);
+ rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
+ call->rx_serial,
+ timeout);
return 0;
}
@@ -179,7 +187,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
print:
seq_printf(seq,
"UDP %-47.47s %-47.47s %4x %08x %s %3u"
- " %s %08x %08x %08x\n",
+ " %s %08x %08x %08x %08x %08x %08x %08x\n",
lbuff,
rbuff,
conn->service_id,
@@ -189,7 +197,11 @@ print:
rxrpc_conn_states[conn->state],
key_serial(conn->params.key),
atomic_read(&conn->serial),
- conn->hi_serial);
+ conn->hi_serial,
+ conn->channels[0].call_id,
+ conn->channels[1].call_id,
+ conn->channels[2].call_id,
+ conn->channels[3].call_id);
return 0;
}
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 7bff716e911e..816b19a78809 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -144,13 +144,11 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
-#if 0 // TODO: May want to transmit final ACK under some circumstances anyway
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
- rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
+ rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true,
rxrpc_propose_ack_terminal_ack);
- rxrpc_send_ack_packet(call, false, NULL);
+ //rxrpc_send_ack_packet(call, false, NULL);
}
-#endif
write_lock_bh(&call->state_lock);
@@ -315,6 +313,10 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
unsigned int rx_pkt_offset, rx_pkt_len;
int ix, copy, ret = -EAGAIN, ret2;
+ if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) &&
+ call->ackr_reason)
+ rxrpc_send_ack_packet(call, false, NULL);
+
rx_pkt_offset = call->rx_pkt_offset;
rx_pkt_len = call->rx_pkt_len;
@@ -414,6 +416,8 @@ out:
done:
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
rx_pkt_offset, rx_pkt_len, ret);
+ if (ret == -EAGAIN)
+ set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags);
return ret;
}
@@ -607,9 +611,7 @@ wait_error:
* rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
* @sock: The socket that the call exists on
* @call: The call to send data through
- * @buf: The buffer to receive into
- * @size: The size of the buffer, including data already read
- * @_offset: The running offset into the buffer.
+ * @iter: The buffer to receive into
* @want_more: True if more data is expected to be read
* @_abort: Where the abort code is stored if -ECONNABORTED is returned
* @_service: Where to store the actual service ID (may be upgraded)
@@ -622,39 +624,30 @@ wait_error:
* Note that we may return -EAGAIN to drain empty packets at the end of the
* data, even if we've already copied over the requested data.
*
- * This function adds the amount it transfers to *_offset, so this should be
- * precleared as appropriate. Note that the amount remaining in the buffer is
- * taken to be size - *_offset.
- *
* *_abort should also be initialised to 0.
*/
int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
- void *buf, size_t size, size_t *_offset,
+ struct iov_iter *iter,
bool want_more, u32 *_abort, u16 *_service)
{
- struct iov_iter iter;
- struct kvec iov;
+ size_t offset = 0;
int ret;
- _enter("{%d,%s},%zu/%zu,%d",
+ _enter("{%d,%s},%zu,%d",
call->debug_id, rxrpc_call_states[call->state],
- *_offset, size, want_more);
+ iov_iter_count(iter), want_more);
- ASSERTCMP(*_offset, <=, size);
ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
- iov.iov_base = buf + *_offset;
- iov.iov_len = size - *_offset;
- iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset);
-
mutex_lock(&call->user_mutex);
switch (READ_ONCE(call->state)) {
case RXRPC_CALL_CLIENT_RECV_REPLY:
case RXRPC_CALL_SERVER_RECV_REQUEST:
case RXRPC_CALL_SERVER_ACK_REQUEST:
- ret = rxrpc_recvmsg_data(sock, call, NULL, &iter, size, 0,
- _offset);
+ ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
+ iov_iter_count(iter), 0,
+ &offset);
if (ret < 0)
goto out;
@@ -663,7 +656,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
* full buffer or have been given -EAGAIN.
*/
if (ret == 1) {
- if (*_offset < size)
+ if (iov_iter_count(iter) > 0)
goto short_data;
if (!want_more)
goto read_phase_complete;
@@ -686,10 +679,21 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
read_phase_complete:
ret = 1;
out:
+ switch (call->ackr_reason) {
+ case RXRPC_ACK_IDLE:
+ break;
+ case RXRPC_ACK_DELAY:
+ if (ret != -EAGAIN)
+ break;
+ /* Fall through */
+ default:
+ rxrpc_send_ack_packet(call, false, NULL);
+ }
+
if (_service)
*_service = call->service_id;
mutex_unlock(&call->user_mutex);
- _leave(" = %d [%zu,%d]", ret, *_offset, *_abort);
+ _leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
return ret;
short_data:
@@ -705,7 +709,7 @@ call_complete:
ret = call->error;
if (call->completion == RXRPC_CALL_SUCCEEDED) {
ret = 1;
- if (size > 0)
+ if (iov_iter_count(iter) > 0)
ret = -ECONNRESET;
}
goto out;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 47cb019c521a..cea16838d588 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -146,10 +146,10 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
struct sk_buff *skb,
u32 data_size,
- void *sechdr)
+ void *sechdr,
+ struct skcipher_request *req)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxkad_level1_hdr hdr;
struct rxrpc_crypt iv;
struct scatterlist sg;
@@ -183,12 +183,12 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct sk_buff *skb,
u32 data_size,
- void *sechdr)
+ void *sechdr,
+ struct skcipher_request *req)
{
const struct rxrpc_key_token *token;
struct rxkad_level2_hdr rxkhdr;
struct rxrpc_skb_priv *sp;
- SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg[16];
struct sk_buff *trailer;
@@ -296,11 +296,12 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
ret = 0;
break;
case RXRPC_SECURITY_AUTH:
- ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr);
+ ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr,
+ req);
break;
case RXRPC_SECURITY_ENCRYPT:
ret = rxkad_secure_packet_encrypt(call, skb, data_size,
- sechdr);
+ sechdr, req);
break;
default:
ret = -EPERM;
@@ -316,10 +317,10 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
*/
static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
unsigned int offset, unsigned int len,
- rxrpc_seq_t seq)
+ rxrpc_seq_t seq,
+ struct skcipher_request *req)
{
struct rxkad_level1_hdr sechdr;
- SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg[16];
struct sk_buff *trailer;
@@ -402,11 +403,11 @@ nomem:
*/
static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
unsigned int offset, unsigned int len,
- rxrpc_seq_t seq)
+ rxrpc_seq_t seq,
+ struct skcipher_request *req)
{
const struct rxrpc_key_token *token;
struct rxkad_level2_hdr sechdr;
- SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist _sg[4], *sg;
struct sk_buff *trailer;
@@ -549,9 +550,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
case RXRPC_SECURITY_PLAIN:
return 0;
case RXRPC_SECURITY_AUTH:
- return rxkad_verify_packet_1(call, skb, offset, len, seq);
+ return rxkad_verify_packet_1(call, skb, offset, len, seq, req);
case RXRPC_SECURITY_ENCRYPT:
- return rxkad_verify_packet_2(call, skb, offset, len, seq);
+ return rxkad_verify_packet_2(call, skb, offset, len, seq, req);
default:
return -ENOANO;
}
@@ -665,11 +666,13 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
if (ret < 0) {
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
- rxrpc_tx_fail_conn_challenge);
+ rxrpc_tx_point_rxkad_challenge);
return -EAGAIN;
}
conn->params.peer->last_tx_at = ktime_get_seconds();
+ trace_rxrpc_tx_packet(conn->debug_id, &whdr,
+ rxrpc_tx_point_rxkad_challenge);
_leave(" = 0");
return 0;
}
@@ -721,7 +724,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 3, len);
if (ret < 0) {
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
- rxrpc_tx_fail_conn_response);
+ rxrpc_tx_point_rxkad_response);
return -EAGAIN;
}
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index 4a7af7aff37d..d75bd15151e6 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -15,7 +15,6 @@
#include "ar-internal.h"
static struct ctl_table_header *rxrpc_sysctl_reg_table;
-static const unsigned int zero = 0;
static const unsigned int one = 1;
static const unsigned int four = 4;
static const unsigned int thirtytwo = 32;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a01169fb5325..e95741388311 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -1,6 +1,6 @@
#
# Traffic control configuration.
-#
+#
menuconfig NET_SCHED
bool "QoS and/or fair queueing"
@@ -183,6 +183,17 @@ config NET_SCH_CBS
To compile this code as a module, choose M here: the
module will be called sch_cbs.
+config NET_SCH_ETF
+ tristate "Earliest TxTime First (ETF)"
+ help
+ Say Y here if you want to use the Earliest TxTime First (ETF) packet
+ scheduling algorithm.
+
+ See the top of <file:net/sched/sch_etf.c> for more details.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_etf.
+
config NET_SCH_GRED
tristate "Generic Random Early Detection (GRED)"
---help---
@@ -240,6 +251,19 @@ config NET_SCH_MQPRIO
If unsure, say N.
+config NET_SCH_SKBPRIO
+ tristate "SKB priority queue scheduler (SKBPRIO)"
+ help
+ Say Y here if you want to use the SKB priority queue
+ scheduler. This schedules packets according to skb->priority,
+ which is useful for request packets in DoS mitigation systems such
+ as Gatekeeper.
+
+ To compile this driver as a module, choose M here: the module will
+ be called sch_skbprio.
+
+ If unsure, say N.
+
config NET_SCH_CHOKE
tristate "CHOose and Keep responsive flow scheduler (CHOKE)"
help
@@ -284,6 +308,17 @@ config NET_SCH_FQ_CODEL
If unsure, say N.
+config NET_SCH_CAKE
+ tristate "Common Applications Kept Enhanced (CAKE)"
+ help
+ Say Y here if you want to use the Common Applications Kept Enhanced
+ (CAKE) queue management algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_cake.
+
+ If unsure, say N.
+
config NET_SCH_FQ
tristate "Fair Queue"
help
@@ -684,7 +719,7 @@ config NET_CLS_ACT
config NET_ACT_POLICE
tristate "Traffic Policing"
- depends on NET_CLS_ACT
+ depends on NET_CLS_ACT
---help---
Say Y here if you want to do traffic policing, i.e. strict
bandwidth limiting. This action replaces the existing policing
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 8811d3804878..f0403f49edcb 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -33,7 +33,7 @@ obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o
obj-$(CONFIG_NET_SCH_RED) += sch_red.o
obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o
-obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
+obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
@@ -46,14 +46,17 @@ obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
+obj-$(CONFIG_NET_SCH_SKBPRIO) += sch_skbprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
+obj-$(CONFIG_NET_SCH_CAKE) += sch_cake.o
obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o
obj-$(CONFIG_NET_SCH_HHF) += sch_hhf.o
obj-$(CONFIG_NET_SCH_PIE) += sch_pie.o
obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o
+obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 3f4cf930f809..229d63c99be2 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -36,7 +36,7 @@ static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
if (!tp)
return -EINVAL;
- a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true);
+ a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index);
if (!a->goto_chain)
return -ENOMEM;
return 0;
@@ -44,7 +44,7 @@ static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
static void tcf_action_goto_chain_fini(struct tc_action *a)
{
- tcf_chain_put(a->goto_chain);
+ tcf_chain_put_by_act(a->goto_chain);
}
static void tcf_action_goto_chain_exec(const struct tc_action *a,
@@ -55,6 +55,24 @@ static void tcf_action_goto_chain_exec(const struct tc_action *a,
res->goto_tp = rcu_dereference_bh(chain->filter_chain);
}
+static void tcf_free_cookie_rcu(struct rcu_head *p)
+{
+ struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
+
+ kfree(cookie->data);
+ kfree(cookie);
+}
+
+static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
+ struct tc_cookie *new_cookie)
+{
+ struct tc_cookie *old;
+
+ old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
+ if (old)
+ call_rcu(&old->rcu, tcf_free_cookie_rcu);
+}
+
/* XXX: For standalone actions, we don't need a RCU grace period either, because
* actions are always connected to filters and filters are already destroyed in
* RCU callbacks, so after a RCU grace period actions are already disconnected
@@ -65,44 +83,64 @@ static void free_tcf(struct tc_action *p)
free_percpu(p->cpu_bstats);
free_percpu(p->cpu_qstats);
- if (p->act_cookie) {
- kfree(p->act_cookie->data);
- kfree(p->act_cookie);
- }
+ tcf_set_action_cookie(&p->act_cookie, NULL);
if (p->goto_chain)
tcf_action_goto_chain_fini(p);
kfree(p);
}
-static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
+static void tcf_action_cleanup(struct tc_action *p)
{
- spin_lock(&idrinfo->lock);
- idr_remove(&idrinfo->action_idr, p->tcfa_index);
- spin_unlock(&idrinfo->lock);
+ if (p->ops->cleanup)
+ p->ops->cleanup(p);
+
gen_kill_estimator(&p->tcfa_rate_est);
free_tcf(p);
}
+static int __tcf_action_put(struct tc_action *p, bool bind)
+{
+ struct tcf_idrinfo *idrinfo = p->idrinfo;
+
+ if (refcount_dec_and_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
+ if (bind)
+ atomic_dec(&p->tcfa_bindcnt);
+ idr_remove(&idrinfo->action_idr, p->tcfa_index);
+ spin_unlock(&idrinfo->lock);
+
+ tcf_action_cleanup(p);
+ return 1;
+ }
+
+ if (bind)
+ atomic_dec(&p->tcfa_bindcnt);
+
+ return 0;
+}
+
int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
{
int ret = 0;
- ASSERT_RTNL();
-
+ /* Release with strict==1 and bind==0 is only called through act API
+ * interface (classifiers always bind). Only case when action with
+ * positive reference count and zero bind count can exist is when it was
+ * also created with act API (unbinding last classifier will destroy the
+ * action if it was created by classifier). So only case when bind count
+ * can be changed after initial check is when unbound action is
+ * destroyed by act API while classifier binds to action with same id
+ * concurrently. This result either creation of new action(same behavior
+ * as before), or reusing existing action if concurrent process
+ * increments reference count before action is deleted. Both scenarios
+ * are acceptable.
+ */
if (p) {
- if (bind)
- p->tcfa_bindcnt--;
- else if (strict && p->tcfa_bindcnt > 0)
+ if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
return -EPERM;
- p->tcfa_refcnt--;
- if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
- if (p->ops->cleanup)
- p->ops->cleanup(p);
- tcf_idr_remove(p->idrinfo, p);
+ if (__tcf_action_put(p, bind))
ret = ACT_P_DELETED;
- }
}
return ret;
@@ -111,10 +149,15 @@ EXPORT_SYMBOL(__tcf_idr_release);
static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
{
+ struct tc_cookie *act_cookie;
u32 cookie_len = 0;
- if (act->act_cookie)
- cookie_len = nla_total_size(act->act_cookie->len);
+ rcu_read_lock();
+ act_cookie = rcu_dereference(act->act_cookie);
+
+ if (act_cookie)
+ cookie_len = nla_total_size(act_cookie->len);
+ rcu_read_unlock();
return nla_total_size(0) /* action number nested */
+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
@@ -257,46 +300,77 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
}
EXPORT_SYMBOL(tcf_generic_walker);
-static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo)
+static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
+ struct tc_action **a, int bind)
{
- struct tc_action *p = NULL;
+ struct tcf_idrinfo *idrinfo = tn->idrinfo;
+ struct tc_action *p;
spin_lock(&idrinfo->lock);
p = idr_find(&idrinfo->action_idr, index);
+ if (IS_ERR(p)) {
+ p = NULL;
+ } else if (p) {
+ refcount_inc(&p->tcfa_refcnt);
+ if (bind)
+ atomic_inc(&p->tcfa_bindcnt);
+ }
spin_unlock(&idrinfo->lock);
- return p;
+ if (p) {
+ *a = p;
+ return true;
+ }
+ return false;
}
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
{
- struct tcf_idrinfo *idrinfo = tn->idrinfo;
- struct tc_action *p = tcf_idr_lookup(index, idrinfo);
-
- if (p) {
- *a = p;
- return 1;
- }
- return 0;
+ return __tcf_idr_check(tn, index, a, 0);
}
EXPORT_SYMBOL(tcf_idr_search);
bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
int bind)
{
+ return __tcf_idr_check(tn, index, a, bind);
+}
+EXPORT_SYMBOL(tcf_idr_check);
+
+int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
+{
struct tcf_idrinfo *idrinfo = tn->idrinfo;
- struct tc_action *p = tcf_idr_lookup(index, idrinfo);
+ struct tc_action *p;
+ int ret = 0;
- if (index && p) {
- if (bind)
- p->tcfa_bindcnt++;
- p->tcfa_refcnt++;
- *a = p;
- return true;
+ spin_lock(&idrinfo->lock);
+ p = idr_find(&idrinfo->action_idr, index);
+ if (!p) {
+ spin_unlock(&idrinfo->lock);
+ return -ENOENT;
}
- return false;
+
+ if (!atomic_read(&p->tcfa_bindcnt)) {
+ if (refcount_dec_and_test(&p->tcfa_refcnt)) {
+ struct module *owner = p->ops->owner;
+
+ WARN_ON(p != idr_remove(&idrinfo->action_idr,
+ p->tcfa_index));
+ spin_unlock(&idrinfo->lock);
+
+ tcf_action_cleanup(p);
+ module_put(owner);
+ return 0;
+ }
+ ret = 0;
+ } else {
+ ret = -EPERM;
+ }
+
+ spin_unlock(&idrinfo->lock);
+ return ret;
}
-EXPORT_SYMBOL(tcf_idr_check);
+EXPORT_SYMBOL(tcf_idr_delete_index);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
@@ -304,14 +378,13 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
{
struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
struct tcf_idrinfo *idrinfo = tn->idrinfo;
- struct idr *idr = &idrinfo->action_idr;
int err = -ENOMEM;
if (unlikely(!p))
return -ENOMEM;
- p->tcfa_refcnt = 1;
+ refcount_set(&p->tcfa_refcnt, 1);
if (bind)
- p->tcfa_bindcnt = 1;
+ atomic_set(&p->tcfa_bindcnt, 1);
if (cpustats) {
p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
@@ -322,20 +395,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
goto err2;
}
spin_lock_init(&p->tcfa_lock);
- idr_preload(GFP_KERNEL);
- spin_lock(&idrinfo->lock);
- /* user doesn't specify an index */
- if (!index) {
- index = 1;
- err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC);
- } else {
- err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC);
- }
- spin_unlock(&idrinfo->lock);
- idr_preload_end();
- if (err)
- goto err3;
-
p->tcfa_index = index;
p->tcfa_tm.install = jiffies;
p->tcfa_tm.lastuse = jiffies;
@@ -345,7 +404,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
&p->tcfa_rate_est,
&p->tcfa_lock, NULL, est);
if (err)
- goto err4;
+ goto err3;
}
p->idrinfo = idrinfo;
@@ -353,8 +412,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
INIT_LIST_HEAD(&p->list);
*a = p;
return 0;
-err4:
- idr_remove(idr, index);
err3:
free_percpu(p->cpu_qstats);
err2:
@@ -370,11 +427,78 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
struct tcf_idrinfo *idrinfo = tn->idrinfo;
spin_lock(&idrinfo->lock);
- idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
+ /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
+ WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index)));
spin_unlock(&idrinfo->lock);
}
EXPORT_SYMBOL(tcf_idr_insert);
+/* Cleanup idr index that was allocated but not initialized. */
+
+void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
+{
+ struct tcf_idrinfo *idrinfo = tn->idrinfo;
+
+ spin_lock(&idrinfo->lock);
+ /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
+ WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
+ spin_unlock(&idrinfo->lock);
+}
+EXPORT_SYMBOL(tcf_idr_cleanup);
+
+/* Check if action with specified index exists. If actions is found, increments
+ * its reference and bind counters, and return 1. Otherwise insert temporary
+ * error pointer (to prevent concurrent users from inserting actions with same
+ * index) and return 0.
+ */
+
+int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ struct tc_action **a, int bind)
+{
+ struct tcf_idrinfo *idrinfo = tn->idrinfo;
+ struct tc_action *p;
+ int ret;
+
+again:
+ spin_lock(&idrinfo->lock);
+ if (*index) {
+ p = idr_find(&idrinfo->action_idr, *index);
+ if (IS_ERR(p)) {
+ /* This means that another process allocated
+ * index but did not assign the pointer yet.
+ */
+ spin_unlock(&idrinfo->lock);
+ goto again;
+ }
+
+ if (p) {
+ refcount_inc(&p->tcfa_refcnt);
+ if (bind)
+ atomic_inc(&p->tcfa_bindcnt);
+ *a = p;
+ ret = 1;
+ } else {
+ *a = NULL;
+ ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
+ *index, GFP_ATOMIC);
+ if (!ret)
+ idr_replace(&idrinfo->action_idr,
+ ERR_PTR(-EBUSY), *index);
+ }
+ } else {
+ *index = 1;
+ *a = NULL;
+ ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
+ UINT_MAX, GFP_ATOMIC);
+ if (!ret)
+ idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
+ *index);
+ }
+ spin_unlock(&idrinfo->lock);
+ return ret;
+}
+EXPORT_SYMBOL(tcf_idr_check_alloc);
+
void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
struct tcf_idrinfo *idrinfo)
{
@@ -538,13 +662,15 @@ repeat:
}
EXPORT_SYMBOL(tcf_action_exec);
-int tcf_action_destroy(struct list_head *actions, int bind)
+int tcf_action_destroy(struct tc_action *actions[], int bind)
{
const struct tc_action_ops *ops;
- struct tc_action *a, *tmp;
- int ret = 0;
+ struct tc_action *a;
+ int ret = 0, i;
- list_for_each_entry_safe(a, tmp, actions, list) {
+ for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+ a = actions[i];
+ actions[i] = NULL;
ops = a->ops;
ret = __tcf_idr_release(a, bind, true);
if (ret == ACT_P_DELETED)
@@ -555,6 +681,24 @@ int tcf_action_destroy(struct list_head *actions, int bind)
return ret;
}
+static int tcf_action_put(struct tc_action *p)
+{
+ return __tcf_action_put(p, false);
+}
+
+static void tcf_action_put_many(struct tc_action *actions[])
+{
+ int i;
+
+ for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+ struct tc_action *a = actions[i];
+ const struct tc_action_ops *ops = a->ops;
+
+ if (tcf_action_put(a))
+ module_put(ops->owner);
+ }
+}
+
int
tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
@@ -567,16 +711,22 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
int err = -EINVAL;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
+ struct tc_cookie *cookie;
if (nla_put_string(skb, TCA_KIND, a->ops->kind))
goto nla_put_failure;
if (tcf_action_copy_stats(skb, a, 0))
goto nla_put_failure;
- if (a->act_cookie) {
- if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len,
- a->act_cookie->data))
+
+ rcu_read_lock();
+ cookie = rcu_dereference(a->act_cookie);
+ if (cookie) {
+ if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
+ rcu_read_unlock();
goto nla_put_failure;
+ }
}
+ rcu_read_unlock();
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
@@ -593,14 +743,15 @@ nla_put_failure:
}
EXPORT_SYMBOL(tcf_action_dump_1);
-int tcf_action_dump(struct sk_buff *skb, struct list_head *actions,
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
int bind, int ref)
{
struct tc_action *a;
- int err = -EINVAL;
+ int err = -EINVAL, i;
struct nlattr *nest;
- list_for_each_entry(a, actions, list) {
+ for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+ a = actions[i];
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
goto nla_put_failure;
@@ -635,9 +786,19 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
return c;
}
+static bool tcf_action_valid(int action)
+{
+ int opcode = TC_ACT_EXT_OPCODE(action);
+
+ if (!opcode)
+ return action <= TC_ACT_VALUE_MAX;
+ return opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC;
+}
+
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind,
+ bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct tc_action *a;
@@ -688,9 +849,11 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
a_o = tc_lookup_action_n(act_name);
if (a_o == NULL) {
#ifdef CONFIG_MODULES
- rtnl_unlock();
+ if (rtnl_held)
+ rtnl_unlock();
request_module("act_%s", act_name);
- rtnl_lock();
+ if (rtnl_held)
+ rtnl_lock();
a_o = tc_lookup_action_n(act_name);
@@ -713,19 +876,15 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
/* backward compatibility for policer */
if (name == NULL)
err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
- extack);
+ rtnl_held, extack);
else
- err = a_o->init(net, nla, est, &a, ovr, bind, extack);
+ err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
+ extack);
if (err < 0)
goto err_mod;
- if (name == NULL && tb[TCA_ACT_COOKIE]) {
- if (a->act_cookie) {
- kfree(a->act_cookie->data);
- kfree(a->act_cookie);
- }
- a->act_cookie = cookie;
- }
+ if (!name && tb[TCA_ACT_COOKIE])
+ tcf_set_action_cookie(&a->act_cookie, cookie);
/* module count goes up only when brand new policy is created
* if it exists and is only bound to in a_o->init() then
@@ -737,15 +896,19 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
err = tcf_action_goto_chain_init(a, tp);
if (err) {
- LIST_HEAD(actions);
+ struct tc_action *actions[] = { a, NULL };
- list_add_tail(&a->list, &actions);
- tcf_action_destroy(&actions, bind);
+ tcf_action_destroy(actions, bind);
NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
return ERR_PTR(err);
}
}
+ if (!tcf_action_valid(a->tcfa_action)) {
+ NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead");
+ a->tcfa_action = TC_ACT_UNSPEC;
+ }
+
return a;
err_mod:
@@ -758,21 +921,12 @@ err_out:
return ERR_PTR(err);
}
-static void cleanup_a(struct list_head *actions, int ovr)
-{
- struct tc_action *a;
-
- if (!ovr)
- return;
-
- list_for_each_entry(a, actions, list)
- a->tcfa_refcnt--;
-}
+/* Returns numbers of initialized actions or negative error. */
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est, char *name, int ovr, int bind,
- struct list_head *actions, size_t *attr_size,
- struct netlink_ext_ack *extack)
+ struct tc_action *actions[], size_t *attr_size,
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *act;
@@ -786,25 +940,19 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
- extack);
+ rtnl_held, extack);
if (IS_ERR(act)) {
err = PTR_ERR(act);
goto err;
}
act->order = i;
sz += tcf_action_fill_size(act);
- if (ovr)
- act->tcfa_refcnt++;
- list_add_tail(&act->list, actions);
+ /* Start from index 0 */
+ actions[i - 1] = act;
}
*attr_size = tcf_action_full_attrs_size(sz);
-
- /* Remove the temp refcnt which was necessary to protect against
- * destroying an existing action which was being replaced
- */
- cleanup_a(actions, ovr);
- return 0;
+ return i - 1;
err:
tcf_action_destroy(actions, bind);
@@ -855,7 +1003,7 @@ errout:
return -1;
}
-static int tca_get_fill(struct sk_buff *skb, struct list_head *actions,
+static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
u32 portid, u32 seq, u16 flags, int event, int bind,
int ref)
{
@@ -891,7 +1039,7 @@ out_nlmsg_trim:
static int
tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
- struct list_head *actions, int event,
+ struct tc_action *actions[], int event,
struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
@@ -900,7 +1048,7 @@ tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
if (!skb)
return -ENOBUFS;
if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
- 0, 0) <= 0) {
+ 0, 1) <= 0) {
NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
kfree_skb(skb);
return -EINVAL;
@@ -1027,9 +1175,41 @@ err_out:
return err;
}
+static int tcf_action_delete(struct net *net, struct tc_action *actions[],
+ int *acts_deleted, struct netlink_ext_ack *extack)
+{
+ u32 act_index;
+ int ret, i;
+
+ for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+ struct tc_action *a = actions[i];
+ const struct tc_action_ops *ops = a->ops;
+
+ /* Actions can be deleted concurrently so we must save their
+ * type and id to search again after reference is released.
+ */
+ act_index = a->tcfa_index;
+
+ if (tcf_action_put(a)) {
+ /* last reference, action was deleted concurrently */
+ module_put(ops->owner);
+ } else {
+ /* now do the delete */
+ ret = ops->delete(net, act_index);
+ if (ret < 0) {
+ *acts_deleted = i + 1;
+ return ret;
+ }
+ }
+ }
+ *acts_deleted = i;
+ return 0;
+}
+
static int
-tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
- u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
+tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
+ int *acts_deleted, u32 portid, size_t attr_size,
+ struct netlink_ext_ack *extack)
{
int ret;
struct sk_buff *skb;
@@ -1040,14 +1220,14 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
return -ENOBUFS;
if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
- 0, 1) <= 0) {
+ 0, 2) <= 0) {
NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
kfree_skb(skb);
return -EINVAL;
}
/* now do the delete */
- ret = tcf_action_destroy(actions, 0);
+ ret = tcf_action_delete(net, actions, acts_deleted, extack);
if (ret < 0) {
NL_SET_ERR_MSG(extack, "Failed to delete TC action");
kfree_skb(skb);
@@ -1069,7 +1249,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *act;
size_t attr_size = 0;
- LIST_HEAD(actions);
+ struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {};
+ int acts_deleted = 0;
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
if (ret < 0)
@@ -1091,27 +1272,27 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
}
act->order = i;
attr_size += tcf_action_fill_size(act);
- list_add_tail(&act->list, &actions);
+ actions[i - 1] = act;
}
attr_size = tcf_action_full_attrs_size(attr_size);
if (event == RTM_GETACTION)
- ret = tcf_get_notify(net, portid, n, &actions, event, extack);
+ ret = tcf_get_notify(net, portid, n, actions, event, extack);
else { /* delete */
- ret = tcf_del_notify(net, n, &actions, portid, attr_size, extack);
+ ret = tcf_del_notify(net, n, actions, &acts_deleted, portid,
+ attr_size, extack);
if (ret)
goto err;
return ret;
}
err:
- if (event != RTM_GETACTION)
- tcf_action_destroy(&actions, 0);
+ tcf_action_put_many(&actions[acts_deleted]);
return ret;
}
static int
-tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
+tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
@@ -1142,14 +1323,17 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
{
size_t attr_size = 0;
int ret = 0;
- LIST_HEAD(actions);
+ struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
- ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions,
- &attr_size, extack);
- if (ret)
+ ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
+ &attr_size, true, extack);
+ if (ret < 0)
return ret;
+ ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
+ if (ovr)
+ tcf_action_put_many(actions);
- return tcf_add_notify(net, n, &actions, portid, attr_size, extack);
+ return ret;
}
static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 18089c02e557..9b30e62805c7 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -34,8 +34,8 @@ struct tcf_bpf_cfg {
static unsigned int bpf_net_id;
static struct tc_action_ops act_bpf_ops;
-static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
- struct tcf_result *res)
+static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
+ struct tcf_result *res)
{
bool at_ingress = skb_at_tc_ingress(skb);
struct tcf_bpf *prog = to_bpf(act);
@@ -141,13 +141,14 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
struct tcf_bpf *prog = to_bpf(act);
struct tc_act_bpf opt = {
.index = prog->tcf_index,
- .refcnt = prog->tcf_refcnt - ref,
- .bindcnt = prog->tcf_bindcnt - bind,
- .action = prog->tcf_action,
+ .refcnt = refcount_read(&prog->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
};
struct tcf_t tm;
int ret;
+ spin_lock(&prog->tcf_lock);
+ opt.action = prog->tcf_action;
if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -163,9 +164,11 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
TCA_ACT_BPF_PAD))
goto nla_put_failure;
+ spin_unlock(&prog->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock(&prog->tcf_lock);
nlmsg_trim(skb, tp);
return -1;
}
@@ -196,12 +199,10 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
return -EINVAL;
- bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
+ bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
if (bpf_ops == NULL)
return -ENOMEM;
- memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size);
-
fprog_tmp.len = bpf_num_ops;
fprog_tmp.filter = bpf_ops;
@@ -266,7 +267,7 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
{
cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
/* updates to prog->filter are prevented, since it's called either
- * with rtnl lock or during final cleanup in rcu callback
+ * with tcf lock or during final cleanup in rcu callback
*/
cfg->filter = rcu_dereference_protected(prog->filter, 1);
@@ -276,7 +277,8 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act,
- int replace, int bind, struct netlink_ext_ack *extack)
+ int replace, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, bpf_net_id);
struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
@@ -298,21 +300,27 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
- if (!tcf_idr_check(tn, parm->index, act, bind)) {
+ ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
+ if (!ret) {
ret = tcf_idr_create(tn, parm->index, est, act,
&act_bpf_ops, bind, true);
- if (ret < 0)
+ if (ret < 0) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
res = ACT_P_CREATED;
- } else {
+ } else if (ret > 0) {
/* Don't override defaults. */
if (bind)
return 0;
- tcf_idr_release(*act, bind);
- if (!replace)
+ if (!replace) {
+ tcf_idr_release(*act, bind);
return -EEXIST;
+ }
+ } else {
+ return ret;
}
is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
@@ -331,8 +339,8 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
goto out;
prog = to_bpf(*act);
- ASSERT_RTNL();
+ spin_lock(&prog->tcf_lock);
if (res != ACT_P_CREATED)
tcf_bpf_prog_fill_cfg(prog, &old);
@@ -344,6 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
prog->tcf_action = parm->action;
rcu_assign_pointer(prog->filter, cfg.filter);
+ spin_unlock(&prog->tcf_lock);
if (res == ACT_P_CREATED) {
tcf_idr_insert(tn, *act);
@@ -355,8 +364,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
return res;
out:
- if (res == ACT_P_CREATED)
- tcf_idr_release(*act, bind);
+ tcf_idr_release(*act, bind);
return ret;
}
@@ -387,16 +395,24 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_bpf_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, bpf_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_bpf_ops __read_mostly = {
.kind = "bpf",
.type = TCA_ACT_BPF,
.owner = THIS_MODULE,
- .act = tcf_bpf,
+ .act = tcf_bpf_act,
.dump = tcf_bpf_dump,
.cleanup = tcf_bpf_cleanup,
.init = tcf_bpf_init,
.walk = tcf_bpf_walker,
.lookup = tcf_bpf_search,
+ .delete = tcf_bpf_delete,
.size = sizeof(struct tcf_bpf),
};
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index e4b880fa51fe..54c0bf54f2ac 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -31,8 +31,8 @@
static unsigned int connmark_net_id;
static struct tc_action_ops act_connmark_ops;
-static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
const struct nf_conntrack_tuple_hash *thash;
struct nf_conntrack_tuple tuple;
@@ -96,7 +96,7 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
static int tcf_connmark_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind,
+ int ovr, int bind, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, connmark_net_id);
@@ -118,11 +118,14 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
- if (!tcf_idr_check(tn, parm->index, a, bind)) {
+ ret = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (!ret) {
ret = tcf_idr_create(tn, parm->index, est, a,
&act_connmark_ops, bind, false);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
ci = to_connmark(*a);
ci->tcf_action = parm->action;
@@ -131,16 +134,18 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
tcf_idr_insert(tn, *a);
ret = ACT_P_CREATED;
- } else {
+ } else if (ret > 0) {
ci = to_connmark(*a);
if (bind)
return 0;
- tcf_idr_release(*a, bind);
- if (!ovr)
+ if (!ovr) {
+ tcf_idr_release(*a, bind);
return -EEXIST;
+ }
/* replacing action and zone */
ci->tcf_action = parm->action;
ci->zone = parm->zone;
+ ret = 0;
}
return ret;
@@ -154,8 +159,8 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
struct tc_connmark opt = {
.index = ci->tcf_index,
- .refcnt = ci->tcf_refcnt - ref,
- .bindcnt = ci->tcf_bindcnt - bind,
+ .refcnt = refcount_read(&ci->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
.action = ci->tcf_action,
.zone = ci->zone,
};
@@ -193,15 +198,23 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_connmark_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, connmark_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_connmark_ops = {
.kind = "connmark",
.type = TCA_ACT_CONNMARK,
.owner = THIS_MODULE,
- .act = tcf_connmark,
+ .act = tcf_connmark_act,
.dump = tcf_connmark_dump,
.init = tcf_connmark_init,
.walk = tcf_connmark_walker,
.lookup = tcf_connmark_search,
+ .delete = tcf_connmark_delete,
.size = sizeof(struct tcf_connmark_info),
};
@@ -239,4 +252,3 @@ module_exit(connmark_cleanup_module);
MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
MODULE_DESCRIPTION("Connection tracking mark restoring");
MODULE_LICENSE("GPL");
-
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 6e7124e57918..5596fae4e478 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -46,10 +46,11 @@ static struct tc_action_ops act_csum_ops;
static int tcf_csum_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
- int bind, struct netlink_ext_ack *extack)
+ int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, csum_net_id);
- struct tcf_csum_params *params_old, *params_new;
+ struct tcf_csum_params *params_new;
struct nlattr *tb[TCA_CSUM_MAX + 1];
struct tc_csum *parm;
struct tcf_csum *p;
@@ -66,36 +67,43 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_CSUM_PARMS]);
- if (!tcf_idr_check(tn, parm->index, a, bind)) {
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (!err) {
ret = tcf_idr_create(tn, parm->index, est, a,
&act_csum_ops, bind, true);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
ret = ACT_P_CREATED;
- } else {
+ } else if (err > 0) {
if (bind)/* dont override defaults */
return 0;
- tcf_idr_release(*a, bind);
- if (!ovr)
+ if (!ovr) {
+ tcf_idr_release(*a, bind);
return -EEXIST;
+ }
+ } else {
+ return err;
}
p = to_tcf_csum(*a);
- ASSERT_RTNL();
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return -ENOMEM;
}
- params_old = rtnl_dereference(p->params);
+ params_new->update_flags = parm->update_flags;
+ spin_lock(&p->tcf_lock);
p->tcf_action = parm->action;
- params_new->update_flags = parm->update_flags;
- rcu_assign_pointer(p->params, params_new);
- if (params_old)
- kfree_rcu(params_old, rcu);
+ rcu_swap_protected(p->params, params_new,
+ lockdep_is_held(&p->tcf_lock));
+ spin_unlock(&p->tcf_lock);
+
+ if (params_new)
+ kfree_rcu(params_new, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -547,23 +555,22 @@ fail:
return 0;
}
-static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_csum *p = to_tcf_csum(a);
struct tcf_csum_params *params;
u32 update_flags;
int action;
- rcu_read_lock();
- params = rcu_dereference(p->params);
+ params = rcu_dereference_bh(p->params);
tcf_lastuse_update(&p->tcf_tm);
bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
action = READ_ONCE(p->tcf_action);
if (unlikely(action == TC_ACT_SHOT))
- goto drop_stats;
+ goto drop;
update_flags = params->update_flags;
switch (tc_skb_protocol(skb)) {
@@ -577,16 +584,11 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
break;
}
-unlock:
- rcu_read_unlock();
return action;
drop:
- action = TC_ACT_SHOT;
-
-drop_stats:
qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
- goto unlock;
+ return TC_ACT_SHOT;
}
static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
@@ -597,13 +599,15 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
struct tcf_csum_params *params;
struct tc_csum opt = {
.index = p->tcf_index,
- .refcnt = p->tcf_refcnt - ref,
- .bindcnt = p->tcf_bindcnt - bind,
- .action = p->tcf_action,
+ .refcnt = refcount_read(&p->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
};
struct tcf_t t;
- params = rtnl_dereference(p->params);
+ spin_lock(&p->tcf_lock);
+ params = rcu_dereference_protected(p->params,
+ lockdep_is_held(&p->tcf_lock));
+ opt.action = p->tcf_action;
opt.update_flags = params->update_flags;
if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
@@ -612,10 +616,12 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
goto nla_put_failure;
+ spin_unlock(&p->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock(&p->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -653,17 +659,25 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_csum));
}
+static int tcf_csum_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, csum_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_csum_ops = {
.kind = "csum",
.type = TCA_ACT_CSUM,
.owner = THIS_MODULE,
- .act = tcf_csum,
+ .act = tcf_csum_act,
.dump = tcf_csum_dump,
.init = tcf_csum_init,
.cleanup = tcf_csum_cleanup,
.walk = tcf_csum_walker,
.lookup = tcf_csum_search,
.get_fill_size = tcf_csum_get_fill_size,
+ .delete = tcf_csum_delete,
.size = sizeof(struct tcf_csum),
};
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 4dc4f153cad8..52a3e474d822 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -56,7 +56,8 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
static int tcf_gact_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind, struct netlink_ext_ack *extack)
+ int ovr, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, gact_net_id);
struct nlattr *tb[TCA_GACT_MAX + 1];
@@ -90,23 +91,29 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
}
#endif
- if (!tcf_idr_check(tn, parm->index, a, bind)) {
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (!err) {
ret = tcf_idr_create(tn, parm->index, est, a,
&act_gact_ops, bind, true);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
ret = ACT_P_CREATED;
- } else {
+ } else if (err > 0) {
if (bind)/* dont override defaults */
return 0;
- tcf_idr_release(*a, bind);
- if (!ovr)
+ if (!ovr) {
+ tcf_idr_release(*a, bind);
return -EEXIST;
+ }
+ } else {
+ return err;
}
gact = to_gact(*a);
- ASSERT_RTNL();
+ spin_lock(&gact->tcf_lock);
gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
if (p_parm) {
@@ -119,13 +126,15 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
gact->tcfg_ptype = p_parm->ptype;
}
#endif
+ spin_unlock(&gact->tcf_lock);
+
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
}
-static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_gact *gact = to_gact(a);
int action = READ_ONCE(gact->tcf_action);
@@ -169,12 +178,13 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
struct tcf_gact *gact = to_gact(a);
struct tc_gact opt = {
.index = gact->tcf_index,
- .refcnt = gact->tcf_refcnt - ref,
- .bindcnt = gact->tcf_bindcnt - bind,
- .action = gact->tcf_action,
+ .refcnt = refcount_read(&gact->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock(&gact->tcf_lock);
+ opt.action = gact->tcf_action;
if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
#ifdef CONFIG_GACT_PROB
@@ -192,9 +202,12 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &gact->tcf_tm);
if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD))
goto nla_put_failure;
+ spin_unlock(&gact->tcf_lock);
+
return skb->len;
nla_put_failure:
+ spin_unlock(&gact->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -230,17 +243,25 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
return sz;
}
+static int tcf_gact_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, gact_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_gact_ops = {
.kind = "gact",
.type = TCA_ACT_GACT,
.owner = THIS_MODULE,
- .act = tcf_gact,
+ .act = tcf_gact_act,
.stats_update = tcf_gact_stats_update,
.dump = tcf_gact_dump,
.init = tcf_gact_init,
.walk = tcf_gact_walker,
.lookup = tcf_gact_search,
.get_fill_size = tcf_gact_get_fill_size,
+ .delete = tcf_gact_delete,
.size = sizeof(struct tcf_gact),
};
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 20d7d36b2fc9..fdb928ca81bb 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
{
struct tcf_meta_ops *o;
- read_lock(&ife_mod_lock);
+ read_lock_bh(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
if (o->metaid == metaid) {
if (!try_module_get(o->owner))
o = NULL;
- read_unlock(&ife_mod_lock);
+ read_unlock_bh(&ife_mod_lock);
return o;
}
}
- read_unlock(&ife_mod_lock);
+ read_unlock_bh(&ife_mod_lock);
return NULL;
}
@@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops)
!mops->get || !mops->alloc)
return -EINVAL;
- write_lock(&ife_mod_lock);
+ write_lock_bh(&ife_mod_lock);
list_for_each_entry(m, &ifeoplist, list) {
if (m->metaid == mops->metaid ||
(strcmp(mops->name, m->name) == 0)) {
- write_unlock(&ife_mod_lock);
+ write_unlock_bh(&ife_mod_lock);
return -EEXIST;
}
}
@@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops)
mops->release = ife_release_meta_gen;
list_add_tail(&mops->list, &ifeoplist);
- write_unlock(&ife_mod_lock);
+ write_unlock_bh(&ife_mod_lock);
return 0;
}
EXPORT_SYMBOL_GPL(unregister_ife_op);
@@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
struct tcf_meta_ops *m;
int err = -ENOENT;
- write_lock(&ife_mod_lock);
+ write_lock_bh(&ife_mod_lock);
list_for_each_entry(m, &ifeoplist, list) {
if (m->metaid == mops->metaid) {
list_del(&mops->list);
@@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
break;
}
}
- write_unlock(&ife_mod_lock);
+ write_unlock_bh(&ife_mod_lock);
return err;
}
@@ -268,7 +268,8 @@ static const char *ife_meta_id2name(u32 metaid)
* under ife->tcf_lock for existing action
*/
static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
- void *val, int len, bool exists)
+ void *val, int len, bool exists,
+ bool rtnl_held)
{
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0;
@@ -278,9 +279,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
#ifdef CONFIG_MODULES
if (exists)
spin_unlock_bh(&ife->tcf_lock);
- rtnl_unlock();
+ if (rtnl_held)
+ rtnl_unlock();
request_module("ife-meta-%s", ife_meta_id2name(metaid));
- rtnl_lock();
+ if (rtnl_held)
+ rtnl_lock();
if (exists)
spin_lock_bh(&ife->tcf_lock);
ops = find_ife_oplist(metaid);
@@ -340,13 +343,13 @@ static int use_all_metadata(struct tcf_ife_info *ife)
int rc = 0;
int installed = 0;
- read_lock(&ife_mod_lock);
+ read_lock_bh(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
rc = add_metainfo(ife, o->metaid, NULL, 0, true);
if (rc == 0)
installed += 1;
}
- read_unlock(&ife_mod_lock);
+ read_unlock_bh(&ife_mod_lock);
if (installed)
return 0;
@@ -421,7 +424,7 @@ static void tcf_ife_cleanup(struct tc_action *a)
/* under ife->tcf_lock for existing action */
static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
- bool exists)
+ bool exists, bool rtnl_held)
{
int len = 0;
int rc = 0;
@@ -433,7 +436,8 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
val = nla_data(tb[i]);
len = nla_len(tb[i]);
- rc = load_metaops_and_vet(ife, i, val, len, exists);
+ rc = load_metaops_and_vet(ife, i, val, len, exists,
+ rtnl_held);
if (rc != 0)
return rc;
@@ -448,12 +452,13 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
static int tcf_ife_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind, struct netlink_ext_ack *extack)
+ int ovr, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, ife_net_id);
struct nlattr *tb[TCA_IFE_MAX + 1];
struct nlattr *tb2[IFE_META_MAX + 1];
- struct tcf_ife_params *p, *p_old;
+ struct tcf_ife_params *p;
struct tcf_ife_info *ife;
u16 ife_type = ETH_P_IFE;
struct tc_ife *parm;
@@ -483,7 +488,12 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (!p)
return -ENOMEM;
- exists = tcf_idr_check(tn, parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (err < 0) {
+ kfree(p);
+ return err;
+ }
+ exists = err;
if (exists && bind) {
kfree(p);
return 0;
@@ -493,16 +503,15 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
bind, true);
if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
kfree(p);
return ret;
}
ret = ACT_P_CREATED;
- } else {
+ } else if (!ovr) {
tcf_idr_release(*a, bind);
- if (!ovr) {
- kfree(p);
- return -EEXIST;
- }
+ kfree(p);
+ return -EEXIST;
}
ife = to_ife(*a);
@@ -547,11 +556,13 @@ metadata_parse_err:
if (exists)
spin_unlock_bh(&ife->tcf_lock);
+ tcf_idr_release(*a, bind);
+
kfree(p);
return err;
}
- err = populate_metalist(ife, tb2, exists);
+ err = populate_metalist(ife, tb2, exists, rtnl_held);
if (err)
goto metadata_parse_err;
@@ -574,13 +585,13 @@ metadata_parse_err:
}
ife->tcf_action = parm->action;
+ /* protected by tcf_lock when modifying existing action */
+ rcu_swap_protected(ife->params, p, 1);
+
if (exists)
spin_unlock_bh(&ife->tcf_lock);
-
- p_old = rtnl_dereference(ife->params);
- rcu_assign_pointer(ife->params, p);
- if (p_old)
- kfree_rcu(p_old, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -593,16 +604,20 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_ife_info *ife = to_ife(a);
- struct tcf_ife_params *p = rtnl_dereference(ife->params);
+ struct tcf_ife_params *p;
struct tc_ife opt = {
.index = ife->tcf_index,
- .refcnt = ife->tcf_refcnt - ref,
- .bindcnt = ife->tcf_bindcnt - bind,
- .action = ife->tcf_action,
- .flags = p->flags,
+ .refcnt = refcount_read(&ife->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock_bh(&ife->tcf_lock);
+ opt.action = ife->tcf_action;
+ p = rcu_dereference_protected(ife->params,
+ lockdep_is_held(&ife->tcf_lock));
+ opt.flags = p->flags;
+
if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -628,9 +643,11 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
pr_info("Failed to dump metalist\n");
}
+ spin_unlock_bh(&ife->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&ife->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -813,14 +830,11 @@ static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_ife_params *p;
int ret;
- rcu_read_lock();
- p = rcu_dereference(ife->params);
+ p = rcu_dereference_bh(ife->params);
if (p->flags & IFE_ENCODE) {
ret = tcf_ife_encode(skb, a, res, p);
- rcu_read_unlock();
return ret;
}
- rcu_read_unlock();
return tcf_ife_decode(skb, a, res);
}
@@ -843,6 +857,13 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_ife_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, ife_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_ife_ops = {
.kind = "ife",
.type = TCA_ACT_IFE,
@@ -853,6 +874,7 @@ static struct tc_action_ops act_ife_ops = {
.init = tcf_ife_init,
.walk = tcf_ife_walker,
.lookup = tcf_ife_search,
+ .delete = tcf_ife_delete,
.size = sizeof(struct tcf_ife_info),
};
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 14c312d7908f..51f235bbeb5b 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -119,13 +119,18 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
if (tb[TCA_IPT_INDEX] != NULL)
index = nla_get_u32(tb[TCA_IPT_INDEX]);
- exists = tcf_idr_check(tn, index, a, bind);
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
if (exists && bind)
return 0;
if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
@@ -133,22 +138,27 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
if (!exists) {
ret = tcf_idr_create(tn, index, est, a, ops, bind,
false);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, index);
return ret;
+ }
ret = ACT_P_CREATED;
} else {
if (bind)/* dont override defaults */
return 0;
- tcf_idr_release(*a, bind);
- if (!ovr)
+ if (!ovr) {
+ tcf_idr_release(*a, bind);
return -EEXIST;
+ }
}
hook = nla_get_u32(tb[TCA_IPT_HOOK]);
@@ -196,7 +206,8 @@ err1:
static int tcf_ipt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
- int bind, struct netlink_ext_ack *extack)
+ int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
bind);
@@ -204,14 +215,15 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
static int tcf_xt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
- int bind, struct netlink_ext_ack *extack)
+ int bind, bool unlocked,
+ struct netlink_ext_ack *extack)
{
return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
bind);
}
-static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
int ret = 0, result = 0;
struct tcf_ipt *ipt = to_ipt(a);
@@ -276,12 +288,13 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
* for foolproof you need to not assume this
*/
+ spin_lock_bh(&ipt->tcf_lock);
t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
if (unlikely(!t))
goto nla_put_failure;
- c.bindcnt = ipt->tcf_bindcnt - bind;
- c.refcnt = ipt->tcf_refcnt - ref;
+ c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind;
+ c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref;
strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
@@ -295,10 +308,12 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&ipt->tcf_lock);
kfree(t);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&ipt->tcf_lock);
nlmsg_trim(skb, b);
kfree(t);
return -1;
@@ -322,16 +337,24 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_ipt_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, ipt_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_ipt_ops = {
.kind = "ipt",
.type = TCA_ACT_IPT,
.owner = THIS_MODULE,
- .act = tcf_ipt,
+ .act = tcf_ipt_act,
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_release,
.init = tcf_ipt_init,
.walk = tcf_ipt_walker,
.lookup = tcf_ipt_search,
+ .delete = tcf_ipt_delete,
.size = sizeof(struct tcf_ipt),
};
@@ -372,16 +395,24 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_xt_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, xt_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_xt_ops = {
.kind = "xt",
.type = TCA_ACT_XT,
.owner = THIS_MODULE,
- .act = tcf_ipt,
+ .act = tcf_ipt_act,
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_release,
.init = tcf_xt_init,
.walk = tcf_xt_walker,
.lookup = tcf_xt_search,
+ .delete = tcf_xt_delete,
.size = sizeof(struct tcf_ipt),
};
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fd34015331ab..8ec216001077 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -25,10 +25,12 @@
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <linux/tc_act/tc_mirred.h>
#include <net/tc_act/tc_mirred.h>
static LIST_HEAD(mirred_list);
+static DEFINE_SPINLOCK(mirred_list_lock);
static bool tcf_mirred_is_act_redirect(int action)
{
@@ -49,13 +51,35 @@ static bool tcf_mirred_act_wants_ingress(int action)
}
}
+static bool tcf_mirred_can_reinsert(int action)
+{
+ switch (action) {
+ case TC_ACT_SHOT:
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
+ return true;
+ }
+ return false;
+}
+
+static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
+{
+ return rcu_dereference_protected(m->tcfm_dev,
+ lockdep_is_held(&m->tcf_lock));
+}
+
static void tcf_mirred_release(struct tc_action *a)
{
struct tcf_mirred *m = to_mirred(a);
struct net_device *dev;
+ spin_lock(&mirred_list_lock);
list_del(&m->tcfm_list);
- dev = rtnl_dereference(m->tcfm_dev);
+ spin_unlock(&mirred_list_lock);
+
+ /* last reference to action, no need to lock */
+ dev = rcu_dereference_protected(m->tcfm_dev, 1);
if (dev)
dev_put(dev);
}
@@ -68,8 +92,9 @@ static unsigned int mirred_net_id;
static struct tc_action_ops act_mirred_ops;
static int tcf_mirred_init(struct net *net, struct nlattr *nla,
- struct nlattr *est, struct tc_action **a, int ovr,
- int bind, struct netlink_ext_ack *extack)
+ struct nlattr *est, struct tc_action **a,
+ int ovr, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, mirred_net_id);
struct nlattr *tb[TCA_MIRRED_MAX + 1];
@@ -78,7 +103,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
struct tcf_mirred *m;
struct net_device *dev;
bool exists = false;
- int ret;
+ int ret, err;
if (!nla) {
NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
@@ -93,7 +118,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
}
parm = nla_data(tb[TCA_MIRRED_PARMS]);
- exists = tcf_idr_check(tn, parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
if (exists && bind)
return 0;
@@ -106,76 +134,83 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
default:
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, parm->index);
NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
return -EINVAL;
}
- if (parm->ifindex) {
- dev = __dev_get_by_index(net, parm->ifindex);
- if (dev == NULL) {
- if (exists)
- tcf_idr_release(*a, bind);
- return -ENODEV;
- }
- mac_header_xmit = dev_is_mac_header_xmit(dev);
- } else {
- dev = NULL;
- }
if (!exists) {
- if (!dev) {
+ if (!parm->ifindex) {
+ tcf_idr_cleanup(tn, parm->index);
NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
return -EINVAL;
}
ret = tcf_idr_create(tn, parm->index, est, a,
&act_mirred_ops, bind, true);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
ret = ACT_P_CREATED;
- } else {
+ } else if (!ovr) {
tcf_idr_release(*a, bind);
- if (!ovr)
- return -EEXIST;
+ return -EEXIST;
}
m = to_mirred(*a);
- ASSERT_RTNL();
+ spin_lock(&m->tcf_lock);
m->tcf_action = parm->action;
m->tcfm_eaction = parm->eaction;
- if (dev != NULL) {
- if (ret != ACT_P_CREATED)
- dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
- dev_hold(dev);
- rcu_assign_pointer(m->tcfm_dev, dev);
+
+ if (parm->ifindex) {
+ dev = dev_get_by_index(net, parm->ifindex);
+ if (!dev) {
+ spin_unlock(&m->tcf_lock);
+ tcf_idr_release(*a, bind);
+ return -ENODEV;
+ }
+ mac_header_xmit = dev_is_mac_header_xmit(dev);
+ rcu_swap_protected(m->tcfm_dev, dev,
+ lockdep_is_held(&m->tcf_lock));
+ if (dev)
+ dev_put(dev);
m->tcfm_mac_header_xmit = mac_header_xmit;
}
+ spin_unlock(&m->tcf_lock);
if (ret == ACT_P_CREATED) {
+ spin_lock(&mirred_list_lock);
list_add(&m->tcfm_list, &mirred_list);
+ spin_unlock(&mirred_list_lock);
+
tcf_idr_insert(tn, *a);
}
return ret;
}
-static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_mirred *m = to_mirred(a);
+ struct sk_buff *skb2 = skb;
bool m_mac_header_xmit;
struct net_device *dev;
- struct sk_buff *skb2;
int retval, err = 0;
+ bool use_reinsert;
+ bool want_ingress;
+ bool is_redirect;
int m_eaction;
int mac_len;
tcf_lastuse_update(&m->tcf_tm);
bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
- rcu_read_lock();
m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
m_eaction = READ_ONCE(m->tcfm_eaction);
retval = READ_ONCE(m->tcf_action);
- dev = rcu_dereference(m->tcfm_dev);
+ dev = rcu_dereference_bh(m->tcfm_dev);
if (unlikely(!dev)) {
pr_notice_once("tc mirred: target device is gone\n");
goto out;
@@ -187,16 +222,25 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
goto out;
}
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (!skb2)
- goto out;
+ /* we could easily avoid the clone only if called by ingress and clsact;
+ * since we can't easily detect the clsact caller, skip clone only for
+ * ingress - that covers the TC S/W datapath.
+ */
+ is_redirect = tcf_mirred_is_act_redirect(m_eaction);
+ use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
+ tcf_mirred_can_reinsert(retval);
+ if (!use_reinsert) {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb2)
+ goto out;
+ }
/* If action's target direction differs than filter's direction,
* and devices expect a mac header on xmit, then mac push/pull is
* needed.
*/
- if (skb_at_tc_ingress(skb) != tcf_mirred_act_wants_ingress(m_eaction) &&
- m_mac_header_xmit) {
+ want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
+ if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) {
if (!skb_at_tc_ingress(skb)) {
/* caught at egress, act ingress: pull mac */
mac_len = skb_network_header(skb) - skb_mac_header(skb);
@@ -207,15 +251,23 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
}
}
+ skb2->skb_iif = skb->dev->ifindex;
+ skb2->dev = dev;
+
/* mirror is always swallowed */
- if (tcf_mirred_is_act_redirect(m_eaction)) {
+ if (is_redirect) {
skb2->tc_redirected = 1;
skb2->tc_from_ingress = skb2->tc_at_ingress;
+
+ /* let's the caller reinsert the packet, if possible */
+ if (use_reinsert) {
+ res->ingress = want_ingress;
+ res->qstats = this_cpu_ptr(m->common.cpu_qstats);
+ return TC_ACT_REINSERT;
+ }
}
- skb2->skb_iif = skb->dev->ifindex;
- skb2->dev = dev;
- if (!tcf_mirred_act_wants_ingress(m_eaction))
+ if (!want_ingress)
err = dev_queue_xmit(skb2);
else
err = netif_receive_skb(skb2);
@@ -226,7 +278,6 @@ out:
if (tcf_mirred_is_act_redirect(m_eaction))
retval = TC_ACT_SHOT;
}
- rcu_read_unlock();
return retval;
}
@@ -246,26 +297,33 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_mirred *m = to_mirred(a);
- struct net_device *dev = rtnl_dereference(m->tcfm_dev);
struct tc_mirred opt = {
.index = m->tcf_index,
- .action = m->tcf_action,
- .refcnt = m->tcf_refcnt - ref,
- .bindcnt = m->tcf_bindcnt - bind,
- .eaction = m->tcfm_eaction,
- .ifindex = dev ? dev->ifindex : 0,
+ .refcnt = refcount_read(&m->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
};
+ struct net_device *dev;
struct tcf_t t;
+ spin_lock(&m->tcf_lock);
+ opt.action = m->tcf_action;
+ opt.eaction = m->tcfm_eaction;
+ dev = tcf_mirred_dev_dereference(m);
+ if (dev)
+ opt.ifindex = dev->ifindex;
+
if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
tcf_tm_dump(&t, &m->tcf_tm);
if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
goto nla_put_failure;
+ spin_unlock(&m->tcf_lock);
+
return skb->len;
nla_put_failure:
+ spin_unlock(&m->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -296,15 +354,19 @@ static int mirred_device_event(struct notifier_block *unused,
ASSERT_RTNL();
if (event == NETDEV_UNREGISTER) {
+ spin_lock(&mirred_list_lock);
list_for_each_entry(m, &mirred_list, tcfm_list) {
- if (rcu_access_pointer(m->tcfm_dev) == dev) {
+ spin_lock(&m->tcf_lock);
+ if (tcf_mirred_dev_dereference(m) == dev) {
dev_put(dev);
/* Note : no rcu grace period necessary, as
* net_device are already rcu protected.
*/
RCU_INIT_POINTER(m->tcfm_dev, NULL);
}
+ spin_unlock(&m->tcf_lock);
}
+ spin_unlock(&mirred_list_lock);
}
return NOTIFY_DONE;
@@ -317,15 +379,34 @@ static struct notifier_block mirred_device_notifier = {
static struct net_device *tcf_mirred_get_dev(const struct tc_action *a)
{
struct tcf_mirred *m = to_mirred(a);
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = rcu_dereference(m->tcfm_dev);
+ if (dev)
+ dev_hold(dev);
+ rcu_read_unlock();
+
+ return dev;
+}
+
+static void tcf_mirred_put_dev(struct net_device *dev)
+{
+ dev_put(dev);
+}
+
+static int tcf_mirred_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, mirred_net_id);
- return rtnl_dereference(m->tcfm_dev);
+ return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
.type = TCA_ACT_MIRRED,
.owner = THIS_MODULE,
- .act = tcf_mirred,
+ .act = tcf_mirred_act,
.stats_update = tcf_stats_update,
.dump = tcf_mirred_dump,
.cleanup = tcf_mirred_release,
@@ -334,6 +415,8 @@ static struct tc_action_ops act_mirred_ops = {
.lookup = tcf_mirred_search,
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev,
+ .put_dev = tcf_mirred_put_dev,
+ .delete = tcf_mirred_delete,
};
static __net_init int mirred_init_net(struct net *net)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 4b5848b6c252..822e903bfc25 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -38,7 +38,7 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_action **a, int ovr, int bind,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, nat_net_id);
struct nlattr *tb[TCA_NAT_MAX + 1];
@@ -57,18 +57,24 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
return -EINVAL;
parm = nla_data(tb[TCA_NAT_PARMS]);
- if (!tcf_idr_check(tn, parm->index, a, bind)) {
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (!err) {
ret = tcf_idr_create(tn, parm->index, est, a,
&act_nat_ops, bind, false);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
ret = ACT_P_CREATED;
- } else {
+ } else if (err > 0) {
if (bind)
return 0;
- tcf_idr_release(*a, bind);
- if (!ovr)
+ if (!ovr) {
+ tcf_idr_release(*a, bind);
return -EEXIST;
+ }
+ } else {
+ return err;
}
p = to_tcf_nat(*a);
@@ -87,8 +93,8 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
return ret;
}
-static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_nat *p = to_tcf_nat(a);
struct iphdr *iph;
@@ -257,8 +263,8 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
.index = p->tcf_index,
.action = p->tcf_action,
- .refcnt = p->tcf_refcnt - ref,
- .bindcnt = p->tcf_bindcnt - bind,
+ .refcnt = refcount_read(&p->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
};
struct tcf_t t;
@@ -294,15 +300,23 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_nat_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, nat_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_nat_ops = {
.kind = "nat",
.type = TCA_ACT_NAT,
.owner = THIS_MODULE,
- .act = tcf_nat,
+ .act = tcf_nat_act,
.dump = tcf_nat_dump,
.init = tcf_nat_init,
.walk = tcf_nat_walker,
.lookup = tcf_nat_search,
+ .delete = tcf_nat_delete,
.size = sizeof(struct tcf_nat),
};
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 8a925c72db5f..8a7a7cb94e83 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -132,20 +132,23 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind, struct netlink_ext_ack *extack)
+ int ovr, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, pedit_net_id);
struct nlattr *tb[TCA_PEDIT_MAX + 1];
- struct nlattr *pattr;
- struct tc_pedit *parm;
- int ret = 0, err;
- struct tcf_pedit *p;
struct tc_pedit_key *keys = NULL;
struct tcf_pedit_key_ex *keys_ex;
+ struct tc_pedit *parm;
+ struct nlattr *pattr;
+ struct tcf_pedit *p;
+ int ret = 0, err;
int ksize;
- if (nla == NULL)
+ if (!nla) {
+ NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
return -EINVAL;
+ }
err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy, NULL);
if (err < 0)
@@ -154,59 +157,68 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
pattr = tb[TCA_PEDIT_PARMS];
if (!pattr)
pattr = tb[TCA_PEDIT_PARMS_EX];
- if (!pattr)
+ if (!pattr) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing required TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute");
return -EINVAL;
+ }
parm = nla_data(pattr);
ksize = parm->nkeys * sizeof(struct tc_pedit_key);
- if (nla_len(pattr) < sizeof(*parm) + ksize)
+ if (nla_len(pattr) < sizeof(*parm) + ksize) {
+ NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
return -EINVAL;
+ }
keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
if (IS_ERR(keys_ex))
return PTR_ERR(keys_ex);
- if (!tcf_idr_check(tn, parm->index, a, bind)) {
- if (!parm->nkeys)
- return -EINVAL;
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (!err) {
+ if (!parm->nkeys) {
+ tcf_idr_cleanup(tn, parm->index);
+ NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
+ ret = -EINVAL;
+ goto out_free;
+ }
ret = tcf_idr_create(tn, parm->index, est, a,
&act_pedit_ops, bind, false);
- if (ret)
- return ret;
- p = to_pedit(*a);
- keys = kmalloc(ksize, GFP_KERNEL);
- if (keys == NULL) {
- tcf_idr_release(*a, bind);
- kfree(keys_ex);
- return -ENOMEM;
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
+ goto out_free;
}
ret = ACT_P_CREATED;
- } else {
+ } else if (err > 0) {
if (bind)
- return 0;
- tcf_idr_release(*a, bind);
- if (!ovr)
- return -EEXIST;
- p = to_pedit(*a);
- if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
- keys = kmalloc(ksize, GFP_KERNEL);
- if (!keys) {
- kfree(keys_ex);
- return -ENOMEM;
- }
+ goto out_free;
+ if (!ovr) {
+ ret = -EEXIST;
+ goto out_release;
}
+ } else {
+ return err;
}
+ p = to_pedit(*a);
spin_lock_bh(&p->tcf_lock);
- p->tcfp_flags = parm->flags;
- p->tcf_action = parm->action;
- if (keys) {
+
+ if (ret == ACT_P_CREATED ||
+ (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys)) {
+ keys = kmalloc(ksize, GFP_ATOMIC);
+ if (!keys) {
+ spin_unlock_bh(&p->tcf_lock);
+ ret = -ENOMEM;
+ goto out_release;
+ }
kfree(p->tcfp_keys);
p->tcfp_keys = keys;
p->tcfp_nkeys = parm->nkeys;
}
memcpy(p->tcfp_keys, parm->keys, ksize);
+ p->tcfp_flags = parm->flags;
+ p->tcf_action = parm->action;
+
kfree(p->tcfp_keys_ex);
p->tcfp_keys_ex = keys_ex;
@@ -214,12 +226,20 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+
+out_release:
+ tcf_idr_release(*a, bind);
+out_free:
+ kfree(keys_ex);
+ return ret;
+
}
static void tcf_pedit_cleanup(struct tc_action *a)
{
struct tcf_pedit *p = to_pedit(a);
struct tc_pedit_key *keys = p->tcfp_keys;
+
kfree(keys);
kfree(p->tcfp_keys_ex);
}
@@ -263,13 +283,13 @@ static int pedit_skb_hdr_offset(struct sk_buff *skb,
default:
ret = -EINVAL;
break;
- };
+ }
return ret;
}
-static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_pedit *p = to_pedit(a);
int i;
@@ -284,11 +304,12 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
if (p->tcfp_nkeys > 0) {
struct tc_pedit_key *tkey = p->tcfp_keys;
struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex;
- enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ enum pedit_header_type htype =
+ TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
- u32 *ptr, _data;
+ u32 *ptr, hdata;
int offset = tkey->off;
int hoffset;
u32 val;
@@ -303,39 +324,39 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
if (rc) {
- pr_info("tc filter pedit bad header type specified (0x%x)\n",
+ pr_info("tc action pedit bad header type specified (0x%x)\n",
htype);
goto bad;
}
if (tkey->offmask) {
- char *d, _d;
+ u8 *d, _d;
if (!offset_valid(skb, hoffset + tkey->at)) {
- pr_info("tc filter pedit 'at' offset %d out of bounds\n",
+ pr_info("tc action pedit 'at' offset %d out of bounds\n",
hoffset + tkey->at);
goto bad;
}
- d = skb_header_pointer(skb, hoffset + tkey->at, 1,
- &_d);
+ d = skb_header_pointer(skb, hoffset + tkey->at,
+ sizeof(_d), &_d);
if (!d)
goto bad;
offset += (*d & tkey->offmask) >> tkey->shift;
}
if (offset % 4) {
- pr_info("tc filter pedit"
- " offset must be on 32 bit boundaries\n");
+ pr_info("tc action pedit offset must be on 32 bit boundaries\n");
goto bad;
}
if (!offset_valid(skb, hoffset + offset)) {
- pr_info("tc filter pedit offset %d out of bounds\n",
+ pr_info("tc action pedit offset %d out of bounds\n",
hoffset + offset);
goto bad;
}
- ptr = skb_header_pointer(skb, hoffset + offset, 4, &_data);
+ ptr = skb_header_pointer(skb, hoffset + offset,
+ sizeof(hdata), &hdata);
if (!ptr)
goto bad;
/* just do it, baby */
@@ -347,19 +368,20 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
val = (*ptr + tkey->val) & ~tkey->mask;
break;
default:
- pr_info("tc filter pedit bad command (%d)\n",
+ pr_info("tc action pedit bad command (%d)\n",
cmd);
goto bad;
}
*ptr = ((*ptr & tkey->mask) ^ val);
- if (ptr == &_data)
+ if (ptr == &hdata)
skb_store_bits(skb, hoffset + offset, ptr, 4);
}
goto done;
- } else
+ } else {
WARN(1, "pedit BUG: index %d\n", p->tcf_index);
+ }
bad:
p->tcf_qstats.overlimits++;
@@ -385,14 +407,15 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
if (unlikely(!opt))
return -ENOBUFS;
+ spin_lock_bh(&p->tcf_lock);
memcpy(opt->keys, p->tcfp_keys,
p->tcfp_nkeys * sizeof(struct tc_pedit_key));
opt->index = p->tcf_index;
opt->nkeys = p->tcfp_nkeys;
opt->flags = p->tcfp_flags;
opt->action = p->tcf_action;
- opt->refcnt = p->tcf_refcnt - ref;
- opt->bindcnt = p->tcf_bindcnt - bind;
+ opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
+ opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
if (p->tcfp_keys_ex) {
tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
@@ -407,11 +430,13 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&p->tcf_lock);
kfree(opt);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&p->tcf_lock);
nlmsg_trim(skb, b);
kfree(opt);
return -1;
@@ -435,16 +460,24 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_pedit_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, pedit_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
.type = TCA_ACT_PEDIT,
.owner = THIS_MODULE,
- .act = tcf_pedit,
+ .act = tcf_pedit_act,
.dump = tcf_pedit_dump,
.cleanup = tcf_pedit_cleanup,
.init = tcf_pedit_init,
.walk = tcf_pedit_walker,
.lookup = tcf_pedit_search,
+ .delete = tcf_pedit_delete,
.size = sizeof(struct tcf_pedit),
};
@@ -483,4 +516,3 @@ static void __exit pedit_cleanup_module(void)
module_init(pedit_init_module);
module_exit(pedit_cleanup_module);
-
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 4e72bc2a0dfb..06f0742db593 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -56,7 +56,7 @@ struct tc_police_compat {
static unsigned int police_net_id;
static struct tc_action_ops act_police_ops;
-static int tcf_act_police_walker(struct net *net, struct sk_buff *skb,
+static int tcf_police_walker(struct net *net, struct sk_buff *skb,
struct netlink_callback *cb, int type,
const struct tc_action_ops *ops,
struct netlink_ext_ack *extack)
@@ -73,9 +73,9 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
[TCA_POLICE_RESULT] = { .type = NLA_U32 },
};
-static int tcf_act_police_init(struct net *net, struct nlattr *nla,
+static int tcf_police_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind,
+ int ovr, int bind, bool rtnl_held,
struct netlink_ext_ack *extack)
{
int ret = 0, err;
@@ -101,20 +101,24 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_POLICE_TBF]);
- exists = tcf_idr_check(tn, parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
if (exists && bind)
return 0;
if (!exists) {
ret = tcf_idr_create(tn, parm->index, NULL, a,
&act_police_ops, bind, false);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
ret = ACT_P_CREATED;
- } else {
+ } else if (!ovr) {
tcf_idr_release(*a, bind);
- if (!ovr)
- return -EEXIST;
+ return -EEXIST;
}
police = to_police(*a);
@@ -195,12 +199,11 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
failure:
qdisc_put_rtab(P_tab);
qdisc_put_rtab(R_tab);
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return err;
}
-static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
+static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_police *police = to_police(a);
@@ -264,21 +267,22 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
return police->tcf_action;
}
-static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a,
+static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_police *police = to_police(a);
struct tc_police opt = {
.index = police->tcf_index,
- .action = police->tcf_action,
- .mtu = police->tcfp_mtu,
- .burst = PSCHED_NS2TICKS(police->tcfp_burst),
- .refcnt = police->tcf_refcnt - ref,
- .bindcnt = police->tcf_bindcnt - bind,
+ .refcnt = refcount_read(&police->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock_bh(&police->tcf_lock);
+ opt.action = police->tcf_action;
+ opt.mtu = police->tcfp_mtu;
+ opt.burst = PSCHED_NS2TICKS(police->tcfp_burst);
if (police->rate_present)
psched_ratecfg_getrate(&opt.rate, &police->rate);
if (police->peak_present)
@@ -298,10 +302,12 @@ static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a,
t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&police->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&police->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -314,6 +320,13 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_police_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, police_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
MODULE_AUTHOR("Alexey Kuznetsov");
MODULE_DESCRIPTION("Policing actions");
MODULE_LICENSE("GPL");
@@ -322,11 +335,12 @@ static struct tc_action_ops act_police_ops = {
.kind = "police",
.type = TCA_ID_POLICE,
.owner = THIS_MODULE,
- .act = tcf_act_police,
- .dump = tcf_act_police_dump,
- .init = tcf_act_police_init,
- .walk = tcf_act_police_walker,
+ .act = tcf_police_act,
+ .dump = tcf_police_dump,
+ .init = tcf_police_init,
+ .walk = tcf_police_walker,
.lookup = tcf_police_search,
+ .delete = tcf_police_delete,
.size = sizeof(struct tcf_police),
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 5db358497c9e..81071afe1b43 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -37,7 +37,8 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
static int tcf_sample_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr,
- int bind, struct netlink_ext_ack *extack)
+ int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, sample_net_id);
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
@@ -45,7 +46,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
struct tc_sample *parm;
struct tcf_sample *s;
bool exists = false;
- int ret;
+ int ret, err;
if (!nla)
return -EINVAL;
@@ -58,30 +59,35 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_SAMPLE_PARMS]);
- exists = tcf_idr_check(tn, parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
if (exists && bind)
return 0;
if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a,
&act_sample_ops, bind, false);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
ret = ACT_P_CREATED;
- } else {
+ } else if (!ovr) {
tcf_idr_release(*a, bind);
- if (!ovr)
- return -EEXIST;
+ return -EEXIST;
}
s = to_sample(*a);
+ spin_lock(&s->tcf_lock);
s->tcf_action = parm->action;
s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
psample_group = psample_group_get(net, s->psample_group_num);
if (!psample_group) {
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ spin_unlock(&s->tcf_lock);
+ tcf_idr_release(*a, bind);
return -ENOMEM;
}
RCU_INIT_POINTER(s->psample_group, psample_group);
@@ -90,6 +96,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
s->truncate = true;
s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
}
+ spin_unlock(&s->tcf_lock);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -101,7 +108,8 @@ static void tcf_sample_cleanup(struct tc_action *a)
struct tcf_sample *s = to_sample(a);
struct psample_group *psample_group;
- psample_group = rtnl_dereference(s->psample_group);
+ /* last reference to action, no need to lock */
+ psample_group = rcu_dereference_protected(s->psample_group, 1);
RCU_INIT_POINTER(s->psample_group, NULL);
if (psample_group)
psample_group_put(psample_group);
@@ -136,8 +144,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb);
retval = READ_ONCE(s->tcf_action);
- rcu_read_lock();
- psample_group = rcu_dereference(s->psample_group);
+ psample_group = rcu_dereference_bh(s->psample_group);
/* randomly sample packets according to rate */
if (psample_group && (prandom_u32() % s->rate == 0)) {
@@ -161,7 +168,6 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
skb_pull(skb, skb->mac_len);
}
- rcu_read_unlock();
return retval;
}
@@ -172,12 +178,13 @@ static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
struct tcf_sample *s = to_sample(a);
struct tc_sample opt = {
.index = s->tcf_index,
- .action = s->tcf_action,
- .refcnt = s->tcf_refcnt - ref,
- .bindcnt = s->tcf_bindcnt - bind,
+ .refcnt = refcount_read(&s->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&s->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock(&s->tcf_lock);
+ opt.action = s->tcf_action;
if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -194,9 +201,12 @@ static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num))
goto nla_put_failure;
+ spin_unlock(&s->tcf_lock);
+
return skb->len;
nla_put_failure:
+ spin_unlock(&s->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -219,6 +229,13 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_sample_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_sample_ops = {
.kind = "sample",
.type = TCA_ACT_SAMPLE,
@@ -229,6 +246,7 @@ static struct tc_action_ops act_sample_ops = {
.cleanup = tcf_sample_cleanup,
.walk = tcf_sample_walker,
.lookup = tcf_sample_search,
+ .delete = tcf_sample_delete,
.size = sizeof(struct tcf_sample),
};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 98c4afe7c15b..e616523ba3c1 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -28,8 +28,8 @@ static unsigned int simp_net_id;
static struct tc_action_ops act_simp_ops;
#define SIMP_MAX_DATA 32
-static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_defact *d = to_defact(a);
@@ -79,7 +79,8 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
static int tcf_simp_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind, struct netlink_ext_ack *extack)
+ int ovr, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, simp_net_id);
struct nlattr *tb[TCA_DEF_MAX + 1];
@@ -99,21 +100,28 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_DEF_PARMS]);
- exists = tcf_idr_check(tn, parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
if (exists && bind)
return 0;
if (tb[TCA_DEF_DATA] == NULL) {
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, parm->index);
return -EINVAL;
}
if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a,
&act_simp_ops, bind, false);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
d = to_defact(*a);
ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
@@ -126,9 +134,10 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
} else {
d = to_defact(*a);
- tcf_idr_release(*a, bind);
- if (!ovr)
+ if (!ovr) {
+ tcf_idr_release(*a, bind);
return -EEXIST;
+ }
reset_policy(d, tb[TCA_DEF_DATA], parm);
}
@@ -145,12 +154,13 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
struct tcf_defact *d = to_defact(a);
struct tc_defact opt = {
.index = d->tcf_index,
- .refcnt = d->tcf_refcnt - ref,
- .bindcnt = d->tcf_bindcnt - bind,
- .action = d->tcf_action,
+ .refcnt = refcount_read(&d->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock_bh(&d->tcf_lock);
+ opt.action = d->tcf_action;
if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
goto nla_put_failure;
@@ -158,9 +168,12 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &d->tcf_tm);
if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&d->tcf_lock);
+
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&d->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -183,16 +196,24 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_simp_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, simp_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_simp_ops = {
.kind = "simple",
.type = TCA_ACT_SIMP,
.owner = THIS_MODULE,
- .act = tcf_simp,
+ .act = tcf_simp_act,
.dump = tcf_simp_dump,
.cleanup = tcf_simp_release,
.init = tcf_simp_init,
.walk = tcf_simp_walker,
.lookup = tcf_simp_search,
+ .delete = tcf_simp_delete,
.size = sizeof(struct tcf_defact),
};
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 6138d1d71900..926d7bc4a89d 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -23,6 +23,9 @@
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/dsfield.h>
#include <linux/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_skbedit.h>
@@ -30,29 +33,54 @@
static unsigned int skbedit_net_id;
static struct tc_action_ops act_skbedit_ops;
-static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_skbedit *d = to_skbedit(a);
+ struct tcf_skbedit_params *params;
+ int action;
- spin_lock(&d->tcf_lock);
tcf_lastuse_update(&d->tcf_tm);
- bstats_update(&d->tcf_bstats, skb);
-
- if (d->flags & SKBEDIT_F_PRIORITY)
- skb->priority = d->priority;
- if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
- skb->dev->real_num_tx_queues > d->queue_mapping)
- skb_set_queue_mapping(skb, d->queue_mapping);
- if (d->flags & SKBEDIT_F_MARK) {
- skb->mark &= ~d->mask;
- skb->mark |= d->mark & d->mask;
+ bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+
+ params = rcu_dereference_bh(d->params);
+ action = READ_ONCE(d->tcf_action);
+
+ if (params->flags & SKBEDIT_F_PRIORITY)
+ skb->priority = params->priority;
+ if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
+ int wlen = skb_network_offset(skb);
+
+ switch (tc_skb_protocol(skb)) {
+ case htons(ETH_P_IP):
+ wlen += sizeof(struct iphdr);
+ if (!pskb_may_pull(skb, wlen))
+ goto err;
+ skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ break;
+
+ case htons(ETH_P_IPV6):
+ wlen += sizeof(struct ipv6hdr);
+ if (!pskb_may_pull(skb, wlen))
+ goto err;
+ skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+ break;
+ }
}
- if (d->flags & SKBEDIT_F_PTYPE)
- skb->pkt_type = d->ptype;
+ if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
+ skb->dev->real_num_tx_queues > params->queue_mapping)
+ skb_set_queue_mapping(skb, params->queue_mapping);
+ if (params->flags & SKBEDIT_F_MARK) {
+ skb->mark &= ~params->mask;
+ skb->mark |= params->mark & params->mask;
+ }
+ if (params->flags & SKBEDIT_F_PTYPE)
+ skb->pkt_type = params->ptype;
+ return action;
- spin_unlock(&d->tcf_lock);
- return d->tcf_action;
+err:
+ qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
+ return TC_ACT_SHOT;
}
static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
@@ -62,13 +90,16 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
[TCA_SKBEDIT_MARK] = { .len = sizeof(u32) },
[TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) },
[TCA_SKBEDIT_MASK] = { .len = sizeof(u32) },
+ [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) },
};
static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind, struct netlink_ext_ack *extack)
+ int ovr, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
+ struct tcf_skbedit_params *params_old, *params_new;
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
struct tc_skbedit *parm;
struct tcf_skbedit *d;
@@ -114,52 +145,76 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
mask = nla_data(tb[TCA_SKBEDIT_MASK]);
}
+ if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
+ u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
+
+ if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
+ flags |= SKBEDIT_F_INHERITDSFIELD;
+ }
+
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
- exists = tcf_idr_check(tn, parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
if (exists && bind)
return 0;
if (!flags) {
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, parm->index);
return -EINVAL;
}
if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a,
- &act_skbedit_ops, bind, false);
- if (ret)
+ &act_skbedit_ops, bind, true);
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
d = to_skbedit(*a);
ret = ACT_P_CREATED;
} else {
d = to_skbedit(*a);
- tcf_idr_release(*a, bind);
- if (!ovr)
+ if (!ovr) {
+ tcf_idr_release(*a, bind);
return -EEXIST;
+ }
}
- spin_lock_bh(&d->tcf_lock);
+ ASSERT_RTNL();
+
+ params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+ if (unlikely(!params_new)) {
+ if (ret == ACT_P_CREATED)
+ tcf_idr_release(*a, bind);
+ return -ENOMEM;
+ }
- d->flags = flags;
+ params_new->flags = flags;
if (flags & SKBEDIT_F_PRIORITY)
- d->priority = *priority;
+ params_new->priority = *priority;
if (flags & SKBEDIT_F_QUEUE_MAPPING)
- d->queue_mapping = *queue_mapping;
+ params_new->queue_mapping = *queue_mapping;
if (flags & SKBEDIT_F_MARK)
- d->mark = *mark;
+ params_new->mark = *mark;
if (flags & SKBEDIT_F_PTYPE)
- d->ptype = *ptype;
+ params_new->ptype = *ptype;
/* default behaviour is to use all the bits */
- d->mask = 0xffffffff;
+ params_new->mask = 0xffffffff;
if (flags & SKBEDIT_F_MASK)
- d->mask = *mask;
+ params_new->mask = *mask;
d->tcf_action = parm->action;
-
- spin_unlock_bh(&d->tcf_lock);
+ params_old = rtnl_dereference(d->params);
+ rcu_assign_pointer(d->params, params_new);
+ if (params_old)
+ kfree_rcu(params_old, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -171,30 +226,39 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbedit *d = to_skbedit(a);
+ struct tcf_skbedit_params *params;
struct tc_skbedit opt = {
.index = d->tcf_index,
- .refcnt = d->tcf_refcnt - ref,
- .bindcnt = d->tcf_bindcnt - bind,
+ .refcnt = refcount_read(&d->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
.action = d->tcf_action,
};
+ u64 pure_flags = 0;
struct tcf_t t;
+ params = rtnl_dereference(d->params);
+
if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_PRIORITY) &&
- nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority))
+ if ((params->flags & SKBEDIT_F_PRIORITY) &&
+ nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
- nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping))
+ if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
+ nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_MARK) &&
- nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark))
+ if ((params->flags & SKBEDIT_F_MARK) &&
+ nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_PTYPE) &&
- nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype))
+ if ((params->flags & SKBEDIT_F_PTYPE) &&
+ nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_MASK) &&
- nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask))
+ if ((params->flags & SKBEDIT_F_MASK) &&
+ nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
+ goto nla_put_failure;
+ if (params->flags & SKBEDIT_F_INHERITDSFIELD)
+ pure_flags |= SKBEDIT_F_INHERITDSFIELD;
+ if (pure_flags != 0 &&
+ nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
goto nla_put_failure;
tcf_tm_dump(&t, &d->tcf_tm);
@@ -207,6 +271,16 @@ nla_put_failure:
return -1;
}
+static void tcf_skbedit_cleanup(struct tc_action *a)
+{
+ struct tcf_skbedit *d = to_skbedit(a);
+ struct tcf_skbedit_params *params;
+
+ params = rcu_dereference_protected(d->params, 1);
+ if (params)
+ kfree_rcu(params, rcu);
+}
+
static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
struct netlink_callback *cb, int type,
const struct tc_action_ops *ops,
@@ -225,15 +299,24 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_skbedit_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, skbedit_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
.type = TCA_ACT_SKBEDIT,
.owner = THIS_MODULE,
- .act = tcf_skbedit,
+ .act = tcf_skbedit_act,
.dump = tcf_skbedit_dump,
.init = tcf_skbedit_init,
+ .cleanup = tcf_skbedit_cleanup,
.walk = tcf_skbedit_walker,
.lookup = tcf_skbedit_search,
+ .delete = tcf_skbedit_delete,
.size = sizeof(struct tcf_skbedit),
};
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index ad050d7d4b46..d6a1af0c4171 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -24,7 +24,7 @@ static unsigned int skbmod_net_id;
static struct tc_action_ops act_skbmod_ops;
#define MAX_EDIT_LEN ETH_HLEN
-static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a,
+static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_skbmod *d = to_skbmod(a);
@@ -41,20 +41,14 @@ static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a,
* then MAX_EDIT_LEN needs to change appropriately
*/
err = skb_ensure_writable(skb, MAX_EDIT_LEN);
- if (unlikely(err)) { /* best policy is to drop on the floor */
- qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
- return TC_ACT_SHOT;
- }
+ if (unlikely(err)) /* best policy is to drop on the floor */
+ goto drop;
- rcu_read_lock();
action = READ_ONCE(d->tcf_action);
- if (unlikely(action == TC_ACT_SHOT)) {
- qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
- rcu_read_unlock();
- return action;
- }
+ if (unlikely(action == TC_ACT_SHOT))
+ goto drop;
- p = rcu_dereference(d->skbmod_p);
+ p = rcu_dereference_bh(d->skbmod_p);
flags = p->flags;
if (flags & SKBMOD_F_DMAC)
ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
@@ -62,7 +56,6 @@ static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a,
ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
if (flags & SKBMOD_F_ETYPE)
eth_hdr(skb)->h_proto = p->eth_type;
- rcu_read_unlock();
if (flags & SKBMOD_F_SWAPMAC) {
u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */
@@ -73,6 +66,10 @@ static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a,
}
return action;
+
+drop:
+ qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
+ return TC_ACT_SHOT;
}
static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
@@ -84,7 +81,8 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind, struct netlink_ext_ack *extack)
+ int ovr, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
struct nlattr *tb[TCA_SKBMOD_MAX + 1];
@@ -127,46 +125,50 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
if (parm->flags & SKBMOD_F_SWAPMAC)
lflags = SKBMOD_F_SWAPMAC;
- exists = tcf_idr_check(tn, parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
if (exists && bind)
return 0;
if (!lflags) {
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, parm->index);
return -EINVAL;
}
if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a,
&act_skbmod_ops, bind, true);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
ret = ACT_P_CREATED;
- } else {
+ } else if (!ovr) {
tcf_idr_release(*a, bind);
- if (!ovr)
- return -EEXIST;
+ return -EEXIST;
}
d = to_skbmod(*a);
- ASSERT_RTNL();
p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
if (unlikely(!p)) {
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return -ENOMEM;
}
p->flags = lflags;
d->tcf_action = parm->action;
- p_old = rtnl_dereference(d->skbmod_p);
-
if (ovr)
spin_lock_bh(&d->tcf_lock);
+ /* Protected by tcf_lock if overwriting existing action. */
+ p_old = rcu_dereference_protected(d->skbmod_p, 1);
if (lflags & SKBMOD_F_DMAC)
ether_addr_copy(p->eth_dst, daddr);
@@ -202,15 +204,18 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
{
struct tcf_skbmod *d = to_skbmod(a);
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p);
+ struct tcf_skbmod_params *p;
struct tc_skbmod opt = {
.index = d->tcf_index,
- .refcnt = d->tcf_refcnt - ref,
- .bindcnt = d->tcf_bindcnt - bind,
- .action = d->tcf_action,
+ .refcnt = refcount_read(&d->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock_bh(&d->tcf_lock);
+ opt.action = d->tcf_action;
+ p = rcu_dereference_protected(d->skbmod_p,
+ lockdep_is_held(&d->tcf_lock));
opt.flags = p->flags;
if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -228,8 +233,10 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&d->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&d->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -252,16 +259,24 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_skbmod_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_skbmod_ops = {
.kind = "skbmod",
.type = TCA_ACT_SKBMOD,
.owner = THIS_MODULE,
- .act = tcf_skbmod_run,
+ .act = tcf_skbmod_act,
.dump = tcf_skbmod_dump,
.init = tcf_skbmod_init,
.cleanup = tcf_skbmod_cleanup,
.walk = tcf_skbmod_walker,
.lookup = tcf_skbmod_search,
+ .delete = tcf_skbmod_delete,
.size = sizeof(struct tcf_skbmod),
};
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 9bc6c2ae98a5..ba2ae9f75ef5 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
+#include <net/geneve.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
@@ -30,9 +31,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_tunnel_key_params *params;
int action;
- rcu_read_lock();
-
- params = rcu_dereference(t->params);
+ params = rcu_dereference_bh(t->params);
tcf_lastuse_update(&t->tcf_tm);
bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
@@ -52,11 +51,138 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
break;
}
- rcu_read_unlock();
-
return action;
}
+static const struct nla_policy
+enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
+ [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
+ [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
+ [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
+ [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
+ .len = 128 },
+};
+
+static int
+tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
+ int err, data_len, opt_len;
+ u8 *data;
+
+ err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
+ nla, geneve_opt_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
+ !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
+ !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
+ return -EINVAL;
+ }
+
+ data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
+ data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
+ if (data_len < 4) {
+ NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
+ return -ERANGE;
+ }
+ if (data_len % 4) {
+ NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
+ return -ERANGE;
+ }
+
+ opt_len = sizeof(struct geneve_opt) + data_len;
+ if (dst) {
+ struct geneve_opt *opt = dst;
+
+ WARN_ON(dst_len < opt_len);
+
+ opt->opt_class =
+ nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
+ opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
+ opt->length = data_len / 4; /* length is in units of 4 bytes */
+ opt->r1 = 0;
+ opt->r2 = 0;
+ opt->r3 = 0;
+
+ memcpy(opt + 1, data, data_len);
+ }
+
+ return opt_len;
+}
+
+static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
+ int dst_len, struct netlink_ext_ack *extack)
+{
+ int err, rem, opt_len, len = nla_len(nla), opts_len = 0;
+ const struct nlattr *attr, *head = nla_data(nla);
+
+ err = nla_validate(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
+ enc_opts_policy, extack);
+ if (err)
+ return err;
+
+ nla_for_each_attr(attr, head, len, rem) {
+ switch (nla_type(attr)) {
+ case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
+ opt_len = tunnel_key_copy_geneve_opt(attr, dst,
+ dst_len, extack);
+ if (opt_len < 0)
+ return opt_len;
+ opts_len += opt_len;
+ if (dst) {
+ dst_len -= opt_len;
+ dst += opt_len;
+ }
+ break;
+ }
+ }
+
+ if (!opts_len) {
+ NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
+ return -EINVAL;
+ }
+
+ if (rem > 0) {
+ NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
+ return -EINVAL;
+ }
+
+ return opts_len;
+}
+
+static int tunnel_key_get_opts_len(struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ return tunnel_key_copy_opts(nla, NULL, 0, extack);
+}
+
+static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
+ int opts_len, struct netlink_ext_ack *extack)
+{
+ info->options_len = opts_len;
+ switch (nla_type(nla_data(nla))) {
+ case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
+#if IS_ENABLED(CONFIG_INET)
+ info->key.tun_flags |= TUNNEL_GENEVE_OPT;
+ return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
+ opts_len, extack);
+#else
+ return -EAFNOSUPPORT;
+#endif
+ default:
+ NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
+ return -EINVAL;
+ }
+}
+
static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
[TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
[TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
@@ -66,39 +192,53 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
[TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
[TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
[TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
+ [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
+ [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
+ [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
};
static int tunnel_key_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind, struct netlink_ext_ack *extack)
+ int ovr, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
- struct tcf_tunnel_key_params *params_old;
struct tcf_tunnel_key_params *params_new;
struct metadata_dst *metadata = NULL;
struct tc_tunnel_key *parm;
struct tcf_tunnel_key *t;
bool exists = false;
__be16 dst_port = 0;
+ int opts_len = 0;
__be64 key_id;
__be16 flags;
+ u8 tos, ttl;
int ret = 0;
int err;
- if (!nla)
+ if (!nla) {
+ NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
return -EINVAL;
+ }
err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy,
- NULL);
- if (err < 0)
+ extack);
+ if (err < 0) {
+ NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
return err;
+ }
- if (!tb[TCA_TUNNEL_KEY_PARMS])
+ if (!tb[TCA_TUNNEL_KEY_PARMS]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
return -EINVAL;
+ }
parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
- exists = tcf_idr_check(tn, parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
if (exists && bind)
return 0;
@@ -107,6 +247,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
break;
case TCA_TUNNEL_KEY_ACT_SET:
if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key id");
ret = -EINVAL;
goto err_out;
}
@@ -121,6 +262,22 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
+ if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
+ opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
+ extack);
+ if (opts_len < 0) {
+ ret = opts_len;
+ goto err_out;
+ }
+ }
+
+ tos = 0;
+ if (tb[TCA_TUNNEL_KEY_ENC_TOS])
+ tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
+ ttl = 0;
+ if (tb[TCA_TUNNEL_KEY_ENC_TTL])
+ ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
+
if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
__be32 saddr;
@@ -129,9 +286,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
- metadata = __ip_tun_set_dst(saddr, daddr, 0, 0,
+ metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
dst_port, flags,
- key_id, 0);
+ key_id, opts_len);
} else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
struct in6_addr saddr;
@@ -140,19 +297,33 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
- metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, dst_port,
+ metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
0, flags,
key_id, 0);
+ } else {
+ NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
+ ret = -EINVAL;
+ goto err_out;
}
if (!metadata) {
- ret = -EINVAL;
+ NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
+ ret = -ENOMEM;
goto err_out;
}
+ if (opts_len) {
+ ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
+ &metadata->u.tun_info,
+ opts_len, extack);
+ if (ret < 0)
+ goto err_out;
+ }
+
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
break;
default:
+ NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
ret = -EINVAL;
goto err_out;
}
@@ -160,36 +331,36 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a,
&act_tunnel_key_ops, bind, true);
- if (ret)
- return ret;
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
+ goto err_out;
+ }
ret = ACT_P_CREATED;
- } else {
+ } else if (!ovr) {
tcf_idr_release(*a, bind);
- if (!ovr)
- return -EEXIST;
+ NL_SET_ERR_MSG(extack, "TC IDR already exists");
+ return -EEXIST;
}
t = to_tunnel_key(*a);
- ASSERT_RTNL();
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
+ NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
return -ENOMEM;
}
-
- params_old = rtnl_dereference(t->params);
-
- t->tcf_action = parm->action;
params_new->tcft_action = parm->t_action;
params_new->tcft_enc_metadata = metadata;
- rcu_assign_pointer(t->params, params_new);
-
- if (params_old)
- kfree_rcu(params_old, rcu);
+ spin_lock(&t->tcf_lock);
+ t->tcf_action = parm->action;
+ rcu_swap_protected(t->params, params_new,
+ lockdep_is_held(&t->tcf_lock));
+ spin_unlock(&t->tcf_lock);
+ if (params_new)
+ kfree_rcu(params_new, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -199,6 +370,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
err_out:
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, parm->index);
return ret;
}
@@ -216,6 +389,61 @@ static void tunnel_key_release(struct tc_action *a)
}
}
+static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
+ const struct ip_tunnel_info *info)
+{
+ int len = info->options_len;
+ u8 *src = (u8 *)(info + 1);
+ struct nlattr *start;
+
+ start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
+ if (!start)
+ return -EMSGSIZE;
+
+ while (len > 0) {
+ struct geneve_opt *opt = (struct geneve_opt *)src;
+
+ if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
+ opt->opt_class) ||
+ nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
+ opt->type) ||
+ nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
+ opt->length * 4, opt + 1))
+ return -EMSGSIZE;
+
+ len -= sizeof(struct geneve_opt) + opt->length * 4;
+ src += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ nla_nest_end(skb, start);
+ return 0;
+}
+
+static int tunnel_key_opts_dump(struct sk_buff *skb,
+ const struct ip_tunnel_info *info)
+{
+ struct nlattr *start;
+ int err;
+
+ if (!info->options_len)
+ return 0;
+
+ start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS);
+ if (!start)
+ return -EMSGSIZE;
+
+ if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ err = tunnel_key_geneve_opts_dump(skb, info);
+ if (err)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ nla_nest_end(skb, start);
+ return 0;
+}
+
static int tunnel_key_dump_addresses(struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
@@ -252,22 +480,24 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
struct tcf_tunnel_key_params *params;
struct tc_tunnel_key opt = {
.index = t->tcf_index,
- .refcnt = t->tcf_refcnt - ref,
- .bindcnt = t->tcf_bindcnt - bind,
- .action = t->tcf_action,
+ .refcnt = refcount_read(&t->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
};
struct tcf_t tm;
- params = rtnl_dereference(t->params);
-
+ spin_lock(&t->tcf_lock);
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&t->tcf_lock));
+ opt.action = t->tcf_action;
opt.t_action = params->tcft_action;
if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
- struct ip_tunnel_key *key =
- &params->tcft_enc_metadata->u.tun_info.key;
+ struct ip_tunnel_info *info =
+ &params->tcft_enc_metadata->u.tun_info;
+ struct ip_tunnel_key *key = &info->key;
__be32 key_id = tunnel_id_to_key32(key->tun_id);
if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) ||
@@ -275,7 +505,14 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
&params->tcft_enc_metadata->u.tun_info) ||
nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst) ||
nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
- !(key->tun_flags & TUNNEL_CSUM)))
+ !(key->tun_flags & TUNNEL_CSUM)) ||
+ tunnel_key_opts_dump(skb, info))
+ goto nla_put_failure;
+
+ if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
+ goto nla_put_failure;
+
+ if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
goto nla_put_failure;
}
@@ -283,10 +520,12 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
&tm, TCA_TUNNEL_KEY_PAD))
goto nla_put_failure;
+ spin_unlock(&t->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock(&t->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -309,6 +548,13 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tunnel_key_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_tunnel_key_ops = {
.kind = "tunnel_key",
.type = TCA_ACT_TUNNEL_KEY,
@@ -319,6 +565,7 @@ static struct tc_action_ops act_tunnel_key_ops = {
.cleanup = tunnel_key_release,
.walk = tunnel_key_walker,
.lookup = tunnel_key_search,
+ .delete = tunnel_key_delete,
.size = sizeof(struct tcf_tunnel_key),
};
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 1fb39e1f9d07..d1f5028384c9 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -22,8 +22,8 @@
static unsigned int vlan_net_id;
static struct tc_action_ops act_vlan_ops;
-static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
- struct tcf_result *res)
+static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_vlan *v = to_vlan(a);
struct tcf_vlan_params *p;
@@ -40,11 +40,9 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
if (skb_at_tc_ingress(skb))
skb_push_rcsum(skb, skb->mac_len);
- rcu_read_lock();
-
action = READ_ONCE(v->tcf_action);
- p = rcu_dereference(v->vlan_p);
+ p = rcu_dereference_bh(v->vlan_p);
switch (p->tcfv_action) {
case TCA_VLAN_ACT_POP:
@@ -61,7 +59,7 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
case TCA_VLAN_ACT_MODIFY:
/* No-op if no vlan tag (either hw-accel or in-payload) */
if (!skb_vlan_tagged(skb))
- goto unlock;
+ goto out;
/* extract existing tag (and guarantee no hw-accel tag) */
if (skb_vlan_tag_present(skb)) {
tci = skb_vlan_tag_get(skb);
@@ -86,18 +84,15 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
BUG();
}
- goto unlock;
-
-drop:
- action = TC_ACT_SHOT;
- qstats_drop_inc(this_cpu_ptr(v->common.cpu_qstats));
-
-unlock:
- rcu_read_unlock();
+out:
if (skb_at_tc_ingress(skb))
skb_pull_rcsum(skb, skb->mac_len);
return action;
+
+drop:
+ qstats_drop_inc(this_cpu_ptr(v->common.cpu_qstats));
+ return TC_ACT_SHOT;
}
static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
@@ -109,11 +104,12 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
static int tcf_vlan_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
- int ovr, int bind, struct netlink_ext_ack *extack)
+ int ovr, int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, vlan_net_id);
struct nlattr *tb[TCA_VLAN_MAX + 1];
- struct tcf_vlan_params *p, *p_old;
+ struct tcf_vlan_params *p;
struct tc_vlan *parm;
struct tcf_vlan *v;
int action;
@@ -133,7 +129,10 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (!tb[TCA_VLAN_PARMS])
return -EINVAL;
parm = nla_data(tb[TCA_VLAN_PARMS]);
- exists = tcf_idr_check(tn, parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
if (exists && bind)
return 0;
@@ -145,12 +144,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, parm->index);
return -EINVAL;
}
push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
if (push_vid >= VLAN_VID_MASK) {
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, parm->index);
return -ERANGE;
}
@@ -163,6 +166,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
default:
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, parm->index);
return -EPROTONOSUPPORT;
}
} else {
@@ -175,6 +180,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
default:
if (exists)
tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, parm->index);
return -EINVAL;
}
action = parm->v_action;
@@ -182,39 +189,37 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a,
&act_vlan_ops, bind, true);
- if (ret)
+ if (ret) {
+ tcf_idr_cleanup(tn, parm->index);
return ret;
+ }
ret = ACT_P_CREATED;
- } else {
+ } else if (!ovr) {
tcf_idr_release(*a, bind);
- if (!ovr)
- return -EEXIST;
+ return -EEXIST;
}
v = to_vlan(*a);
- ASSERT_RTNL();
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return -ENOMEM;
}
- v->tcf_action = parm->action;
-
- p_old = rtnl_dereference(v->vlan_p);
-
p->tcfv_action = action;
p->tcfv_push_vid = push_vid;
p->tcfv_push_prio = push_prio;
p->tcfv_push_proto = push_proto;
- rcu_assign_pointer(v->vlan_p, p);
+ spin_lock(&v->tcf_lock);
+ v->tcf_action = parm->action;
+ rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
+ spin_unlock(&v->tcf_lock);
- if (p_old)
- kfree_rcu(p_old, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -236,16 +241,18 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_vlan *v = to_vlan(a);
- struct tcf_vlan_params *p = rtnl_dereference(v->vlan_p);
+ struct tcf_vlan_params *p;
struct tc_vlan opt = {
.index = v->tcf_index,
- .refcnt = v->tcf_refcnt - ref,
- .bindcnt = v->tcf_bindcnt - bind,
- .action = v->tcf_action,
- .v_action = p->tcfv_action,
+ .refcnt = refcount_read(&v->tcf_refcnt) - ref,
+ .bindcnt = atomic_read(&v->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock(&v->tcf_lock);
+ opt.action = v->tcf_action;
+ p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock));
+ opt.v_action = p->tcfv_action;
if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -261,9 +268,12 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &v->tcf_tm);
if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
goto nla_put_failure;
+ spin_unlock(&v->tcf_lock);
+
return skb->len;
nla_put_failure:
+ spin_unlock(&v->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -286,16 +296,24 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static int tcf_vlan_delete(struct net *net, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, vlan_net_id);
+
+ return tcf_idr_delete_index(tn, index);
+}
+
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
.type = TCA_ACT_VLAN,
.owner = THIS_MODULE,
- .act = tcf_vlan,
+ .act = tcf_vlan_act,
.dump = tcf_vlan_dump,
.init = tcf_vlan_init,
.cleanup = tcf_vlan_cleanup,
.walk = tcf_vlan_walker,
.lookup = tcf_vlan_search,
+ .delete = tcf_vlan_delete,
.size = sizeof(struct tcf_vlan),
};
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f74513a7c7a8..31bd1439cf60 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -39,7 +39,7 @@ static DEFINE_RWLOCK(cls_mod_lock);
/* Find classifier type by string name */
-static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
+static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
{
const struct tcf_proto_ops *t, *res = NULL;
@@ -57,6 +57,33 @@ static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
return res;
}
+static const struct tcf_proto_ops *
+tcf_proto_lookup_ops(const char *kind, struct netlink_ext_ack *extack)
+{
+ const struct tcf_proto_ops *ops;
+
+ ops = __tcf_proto_lookup_ops(kind);
+ if (ops)
+ return ops;
+#ifdef CONFIG_MODULES
+ rtnl_unlock();
+ request_module("cls_%s", kind);
+ rtnl_lock();
+ ops = __tcf_proto_lookup_ops(kind);
+ /* We dropped the RTNL semaphore in order to perform
+ * the module load. So, even if we succeeded in loading
+ * the module we have to replay the request. We indicate
+ * this using -EAGAIN.
+ */
+ if (ops) {
+ module_put(ops->owner);
+ return ERR_PTR(-EAGAIN);
+ }
+#endif
+ NL_SET_ERR_MSG(extack, "TC classifier not found");
+ return ERR_PTR(-ENOENT);
+}
+
/* Register(unregister) new classifier type */
int register_tcf_proto_ops(struct tcf_proto_ops *ops)
@@ -133,27 +160,9 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
if (!tp)
return ERR_PTR(-ENOBUFS);
- err = -ENOENT;
- tp->ops = tcf_proto_lookup_ops(kind);
- if (!tp->ops) {
-#ifdef CONFIG_MODULES
- rtnl_unlock();
- request_module("cls_%s", kind);
- rtnl_lock();
- tp->ops = tcf_proto_lookup_ops(kind);
- /* We dropped the RTNL semaphore in order to perform
- * the module load. So, even if we succeeded in loading
- * the module we have to replay the request. We indicate
- * this using -EAGAIN.
- */
- if (tp->ops) {
- module_put(tp->ops->owner);
- err = -EAGAIN;
- } else {
- NL_SET_ERR_MSG(extack, "TC classifier not found");
- err = -ENOENT;
- }
-#endif
+ tp->ops = tcf_proto_lookup_ops(kind, extack);
+ if (IS_ERR(tp->ops)) {
+ err = PTR_ERR(tp->ops);
goto errout;
}
tp->classify = tp->ops->classify;
@@ -195,11 +204,12 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (!chain)
return NULL;
- INIT_LIST_HEAD(&chain->filter_chain_list);
list_add_tail(&chain->list, &block->chain_list);
chain->block = block;
chain->index = chain_index;
chain->refcnt = 1;
+ if (!chain->index)
+ block->chain0.chain = chain;
return chain;
}
@@ -209,35 +219,28 @@ static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
if (item->chain_head_change)
item->chain_head_change(tp_head, item->chain_head_change_priv);
}
-static void tcf_chain_head_change(struct tcf_chain *chain,
- struct tcf_proto *tp_head)
+
+static void tcf_chain0_head_change(struct tcf_chain *chain,
+ struct tcf_proto *tp_head)
{
struct tcf_filter_chain_list_item *item;
+ struct tcf_block *block = chain->block;
- list_for_each_entry(item, &chain->filter_chain_list, list)
+ if (chain->index)
+ return;
+ list_for_each_entry(item, &block->chain0.filter_chain_list, list)
tcf_chain_head_change_item(item, tp_head);
}
-static void tcf_chain_flush(struct tcf_chain *chain)
-{
- struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
-
- tcf_chain_head_change(chain, NULL);
- while (tp) {
- RCU_INIT_POINTER(chain->filter_chain, tp->next);
- tcf_proto_destroy(tp, NULL);
- tp = rtnl_dereference(chain->filter_chain);
- tcf_chain_put(chain);
- }
-}
-
static void tcf_chain_destroy(struct tcf_chain *chain)
{
struct tcf_block *block = chain->block;
list_del(&chain->list);
+ if (!chain->index)
+ block->chain0.chain = NULL;
kfree(chain);
- if (list_empty(&block->chain_list))
+ if (list_empty(&block->chain_list) && block->refcnt == 0)
kfree(block);
}
@@ -246,28 +249,119 @@ static void tcf_chain_hold(struct tcf_chain *chain)
++chain->refcnt;
}
-struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
- bool create)
+static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
+{
+ /* In case all the references are action references, this
+ * chain should not be shown to the user.
+ */
+ return chain->refcnt == chain->action_refcnt;
+}
+
+static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
+ u32 chain_index)
{
struct tcf_chain *chain;
list_for_each_entry(chain, &block->chain_list, list) {
- if (chain->index == chain_index) {
- tcf_chain_hold(chain);
+ if (chain->index == chain_index)
return chain;
- }
}
+ return NULL;
+}
- return create ? tcf_chain_create(block, chain_index) : NULL;
+static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
+ u32 seq, u16 flags, int event, bool unicast);
+
+static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
+ u32 chain_index, bool create,
+ bool by_act)
+{
+ struct tcf_chain *chain = tcf_chain_lookup(block, chain_index);
+
+ if (chain) {
+ tcf_chain_hold(chain);
+ } else {
+ if (!create)
+ return NULL;
+ chain = tcf_chain_create(block, chain_index);
+ if (!chain)
+ return NULL;
+ }
+
+ if (by_act)
+ ++chain->action_refcnt;
+
+ /* Send notification only in case we got the first
+ * non-action reference. Until then, the chain acts only as
+ * a placeholder for actions pointing to it and user ought
+ * not know about them.
+ */
+ if (chain->refcnt - chain->action_refcnt == 1 && !by_act)
+ tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
+ RTM_NEWCHAIN, false);
+
+ return chain;
}
-EXPORT_SYMBOL(tcf_chain_get);
-void tcf_chain_put(struct tcf_chain *chain)
+static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
+ bool create)
{
- if (--chain->refcnt == 0)
+ return __tcf_chain_get(block, chain_index, create, false);
+}
+
+struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
+{
+ return __tcf_chain_get(block, chain_index, true, true);
+}
+EXPORT_SYMBOL(tcf_chain_get_by_act);
+
+static void tc_chain_tmplt_del(struct tcf_chain *chain);
+
+static void __tcf_chain_put(struct tcf_chain *chain, bool by_act)
+{
+ if (by_act)
+ chain->action_refcnt--;
+ chain->refcnt--;
+
+ /* The last dropped non-action reference will trigger notification. */
+ if (chain->refcnt - chain->action_refcnt == 0 && !by_act)
+ tc_chain_notify(chain, NULL, 0, 0, RTM_DELCHAIN, false);
+
+ if (chain->refcnt == 0) {
+ tc_chain_tmplt_del(chain);
tcf_chain_destroy(chain);
+ }
+}
+
+static void tcf_chain_put(struct tcf_chain *chain)
+{
+ __tcf_chain_put(chain, false);
+}
+
+void tcf_chain_put_by_act(struct tcf_chain *chain)
+{
+ __tcf_chain_put(chain, true);
+}
+EXPORT_SYMBOL(tcf_chain_put_by_act);
+
+static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
+{
+ if (chain->explicitly_created)
+ tcf_chain_put(chain);
+}
+
+static void tcf_chain_flush(struct tcf_chain *chain)
+{
+ struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
+
+ tcf_chain0_head_change(chain, NULL);
+ while (tp) {
+ RCU_INIT_POINTER(chain->filter_chain, tp->next);
+ tcf_proto_destroy(tp, NULL);
+ tp = rtnl_dereference(chain->filter_chain);
+ tcf_chain_put(chain);
+ }
}
-EXPORT_SYMBOL(tcf_chain_put);
static bool tcf_block_offload_in_use(struct tcf_block *block)
{
@@ -277,18 +371,21 @@ static bool tcf_block_offload_in_use(struct tcf_block *block)
static int tcf_block_offload_cmd(struct tcf_block *block,
struct net_device *dev,
struct tcf_block_ext_info *ei,
- enum tc_block_command command)
+ enum tc_block_command command,
+ struct netlink_ext_ack *extack)
{
struct tc_block_offload bo = {};
bo.command = command;
bo.binder_type = ei->binder_type;
bo.block = block;
+ bo.extack = extack;
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
}
static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
- struct tcf_block_ext_info *ei)
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = q->dev_queue->dev;
int err;
@@ -299,10 +396,12 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
/* If tc offload feature is disabled and the block we try to bind
* to already has some offloaded filters, forbid to bind.
*/
- if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
+ if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
+ NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
return -EOPNOTSUPP;
+ }
- err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
+ err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
if (err == -EOPNOTSUPP)
goto no_offload_dev_inc;
return err;
@@ -322,7 +421,7 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
if (!dev->netdev_ops->ndo_setup_tc)
goto no_offload_dev_dec;
- err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
+ err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
if (err == -EOPNOTSUPP)
goto no_offload_dev_dec;
return;
@@ -332,10 +431,11 @@ no_offload_dev_dec:
}
static int
-tcf_chain_head_change_cb_add(struct tcf_chain *chain,
- struct tcf_block_ext_info *ei,
- struct netlink_ext_ack *extack)
+tcf_chain0_head_change_cb_add(struct tcf_block *block,
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack)
{
+ struct tcf_chain *chain0 = block->chain0.chain;
struct tcf_filter_chain_list_item *item;
item = kmalloc(sizeof(*item), GFP_KERNEL);
@@ -345,23 +445,25 @@ tcf_chain_head_change_cb_add(struct tcf_chain *chain,
}
item->chain_head_change = ei->chain_head_change;
item->chain_head_change_priv = ei->chain_head_change_priv;
- if (chain->filter_chain)
- tcf_chain_head_change_item(item, chain->filter_chain);
- list_add(&item->list, &chain->filter_chain_list);
+ if (chain0 && chain0->filter_chain)
+ tcf_chain_head_change_item(item, chain0->filter_chain);
+ list_add(&item->list, &block->chain0.filter_chain_list);
return 0;
}
static void
-tcf_chain_head_change_cb_del(struct tcf_chain *chain,
- struct tcf_block_ext_info *ei)
+tcf_chain0_head_change_cb_del(struct tcf_block *block,
+ struct tcf_block_ext_info *ei)
{
+ struct tcf_chain *chain0 = block->chain0.chain;
struct tcf_filter_chain_list_item *item;
- list_for_each_entry(item, &chain->filter_chain_list, list) {
+ list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
(item->chain_head_change == ei->chain_head_change &&
item->chain_head_change_priv == ei->chain_head_change_priv)) {
- tcf_chain_head_change_item(item, NULL);
+ if (chain0)
+ tcf_chain_head_change_item(item, NULL);
list_del(&item->list);
kfree(item);
return;
@@ -397,8 +499,6 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
struct netlink_ext_ack *extack)
{
struct tcf_block *block;
- struct tcf_chain *chain;
- int err;
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block) {
@@ -408,14 +508,8 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
INIT_LIST_HEAD(&block->chain_list);
INIT_LIST_HEAD(&block->cb_list);
INIT_LIST_HEAD(&block->owner_list);
+ INIT_LIST_HEAD(&block->chain0.filter_chain_list);
- /* Create chain 0 by default, it has to be always present. */
- chain = tcf_chain_create(block, 0);
- if (!chain) {
- NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
- err = -ENOMEM;
- goto err_chain_create;
- }
block->refcnt = 1;
block->net = net;
block->index = block_index;
@@ -424,10 +518,6 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
if (!tcf_block_shared(block))
block->q = q;
return block;
-
-err_chain_create:
- kfree(block);
- return ERR_PTR(err);
}
static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
@@ -509,11 +599,6 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
return block;
}
-static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
-{
- return list_first_entry(&block->chain_list, struct tcf_chain, list);
-}
-
struct tcf_block_owner_item {
struct list_head list;
struct Qdisc *q;
@@ -607,12 +692,11 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
- err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block),
- ei, extack);
+ err = tcf_chain0_head_change_cb_add(block, ei, extack);
if (err)
- goto err_chain_head_change_cb_add;
+ goto err_chain0_head_change_cb_add;
- err = tcf_block_offload_bind(block, q, ei);
+ err = tcf_block_offload_bind(block, q, ei, extack);
if (err)
goto err_block_offload_bind;
@@ -620,15 +704,14 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
return 0;
err_block_offload_bind:
- tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
-err_chain_head_change_cb_add:
+ tcf_chain0_head_change_cb_del(block, ei);
+err_chain0_head_change_cb_add:
tcf_block_owner_del(block, q, ei->binder_type);
err_block_owner_add:
if (created) {
if (tcf_block_shared(block))
tcf_block_remove(block, net);
err_block_insert:
- kfree(tcf_block_chain_zero(block));
kfree(block);
} else {
block->refcnt--;
@@ -668,10 +751,10 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
if (!block)
return;
- tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
+ tcf_chain0_head_change_cb_del(block, ei);
tcf_block_owner_del(block, q, ei->binder_type);
- if (--block->refcnt == 0) {
+ if (block->refcnt == 1) {
if (tcf_block_shared(block))
tcf_block_remove(block, block->net);
@@ -687,13 +770,18 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
tcf_block_offload_unbind(block, q, ei);
- if (block->refcnt == 0) {
+ if (block->refcnt == 1) {
/* At this point, all the chains should have refcnt >= 1. */
- list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
+ tcf_chain_put_explicitly_created(chain);
tcf_chain_put(chain);
+ }
- /* Finally, put chain 0 and allow block to be freed. */
- tcf_chain_put(tcf_block_chain_zero(block));
+ block->refcnt--;
+ if (list_empty(&block->chain_list))
+ kfree(block);
+ } else {
+ block->refcnt--;
}
}
EXPORT_SYMBOL(tcf_block_put_ext);
@@ -746,18 +834,53 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
}
EXPORT_SYMBOL(tcf_block_cb_decref);
+static int
+tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
+ void *cb_priv, bool add, bool offload_in_use,
+ struct netlink_ext_ack *extack)
+{
+ struct tcf_chain *chain;
+ struct tcf_proto *tp;
+ int err;
+
+ list_for_each_entry(chain, &block->chain_list, list) {
+ for (tp = rtnl_dereference(chain->filter_chain); tp;
+ tp = rtnl_dereference(tp->next)) {
+ if (tp->ops->reoffload) {
+ err = tp->ops->reoffload(tp, add, cb, cb_priv,
+ extack);
+ if (err && add)
+ goto err_playback_remove;
+ } else if (add && offload_in_use) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
+ goto err_playback_remove;
+ }
+ }
+ }
+
+ return 0;
+
+err_playback_remove:
+ tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
+ extack);
+ return err;
+}
+
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv)
+ void *cb_priv,
+ struct netlink_ext_ack *extack)
{
struct tcf_block_cb *block_cb;
+ int err;
- /* At this point, playback of previous block cb calls is not supported,
- * so forbid to register to block which already has some offloaded
- * filters present.
- */
- if (tcf_block_offload_in_use(block))
- return ERR_PTR(-EOPNOTSUPP);
+ /* Replay any already present rules */
+ err = tcf_block_playback_offloads(block, cb, cb_priv, true,
+ tcf_block_offload_in_use(block),
+ extack);
+ if (err)
+ return ERR_PTR(err);
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
if (!block_cb)
@@ -772,17 +895,22 @@ EXPORT_SYMBOL(__tcf_block_cb_register);
int tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv)
+ void *cb_priv, struct netlink_ext_ack *extack)
{
struct tcf_block_cb *block_cb;
- block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
- return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
+ block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
+ extack);
+ return PTR_ERR_OR_ZERO(block_cb);
}
EXPORT_SYMBOL(tcf_block_cb_register);
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb)
{
+ tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
+ false, tcf_block_offload_in_use(block),
+ NULL);
list_del(&block_cb->list);
kfree(block_cb);
}
@@ -796,7 +924,7 @@ void tcf_block_cb_unregister(struct tcf_block *block,
block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
if (!block_cb)
return;
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
}
EXPORT_SYMBOL(tcf_block_cb_unregister);
@@ -893,7 +1021,7 @@ static void tcf_chain_tp_insert(struct tcf_chain *chain,
struct tcf_proto *tp)
{
if (*chain_info->pprev == chain->filter_chain)
- tcf_chain_head_change(chain, tp);
+ tcf_chain0_head_change(chain, tp);
RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
rcu_assign_pointer(*chain_info->pprev, tp);
tcf_chain_hold(chain);
@@ -906,7 +1034,7 @@ static void tcf_chain_tp_remove(struct tcf_chain *chain,
struct tcf_proto *next = rtnl_dereference(chain_info->next);
if (tp == chain->filter_chain)
- tcf_chain_head_change(chain, next);
+ tcf_chain0_head_change(chain, next);
RCU_INIT_POINTER(*chain_info->pprev, next);
tcf_chain_put(chain);
}
@@ -1182,6 +1310,12 @@ replay:
goto errout;
}
+ if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
+ NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
+ err = -EINVAL;
+ goto errout;
+ }
+
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
extack);
@@ -1257,6 +1391,13 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
}
chain = tcf_chain_get(block, chain_index, false);
if (!chain) {
+ /* User requested flush on non-existent chain. Nothing to do,
+ * so just return success.
+ */
+ if (prio == 0) {
+ err = 0;
+ goto errout;
+ }
NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
err = -EINVAL;
goto errout;
@@ -1463,7 +1604,9 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
arg.w.stop = 0;
arg.w.skip = cb->args[1] - 1;
arg.w.count = 0;
+ arg.w.cookie = cb->args[2];
tp->ops->walk(tp, &arg.w);
+ cb->args[2] = arg.w.cookie;
cb->args[1] = arg.w.count + 1;
if (arg.w.stop)
return false;
@@ -1561,14 +1704,334 @@ out:
return skb->len;
}
+static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net,
+ struct sk_buff *skb, struct tcf_block *block,
+ u32 portid, u32 seq, u16 flags, int event)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ const struct tcf_proto_ops *ops;
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ void *priv;
+
+ ops = chain->tmplt_ops;
+ priv = chain->tmplt_priv;
+
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
+ if (!nlh)
+ goto out_nlmsg_trim;
+ tcm = nlmsg_data(nlh);
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm__pad1 = 0;
+ tcm->tcm__pad2 = 0;
+ tcm->tcm_handle = 0;
+ if (block->q) {
+ tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
+ tcm->tcm_parent = block->q->handle;
+ } else {
+ tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
+ tcm->tcm_block_index = block->index;
+ }
+
+ if (nla_put_u32(skb, TCA_CHAIN, chain->index))
+ goto nla_put_failure;
+
+ if (ops) {
+ if (nla_put_string(skb, TCA_KIND, ops->kind))
+ goto nla_put_failure;
+ if (ops->tmplt_dump(skb, net, priv) < 0)
+ goto nla_put_failure;
+ }
+
+ nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+ return skb->len;
+
+out_nlmsg_trim:
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -EMSGSIZE;
+}
+
+static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
+ u32 seq, u16 flags, int event, bool unicast)
+{
+ u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+ struct tcf_block *block = chain->block;
+ struct net *net = block->net;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ if (tc_chain_fill_node(chain, net, skb, block, portid,
+ seq, flags, event) <= 0) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (unicast)
+ return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
+
+ return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
+}
+
+static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
+ struct nlattr **tca,
+ struct netlink_ext_ack *extack)
+{
+ const struct tcf_proto_ops *ops;
+ void *tmplt_priv;
+
+ /* If kind is not set, user did not specify template. */
+ if (!tca[TCA_KIND])
+ return 0;
+
+ ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), extack);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+ if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
+ NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
+ return -EOPNOTSUPP;
+ }
+
+ tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
+ if (IS_ERR(tmplt_priv)) {
+ module_put(ops->owner);
+ return PTR_ERR(tmplt_priv);
+ }
+ chain->tmplt_ops = ops;
+ chain->tmplt_priv = tmplt_priv;
+ return 0;
+}
+
+static void tc_chain_tmplt_del(struct tcf_chain *chain)
+{
+ const struct tcf_proto_ops *ops = chain->tmplt_ops;
+
+ /* If template ops are set, no work to do for us. */
+ if (!ops)
+ return;
+
+ ops->tmplt_destroy(chain->tmplt_priv);
+ module_put(ops->owner);
+}
+
+/* Add/delete/get a chain */
+
+static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tca[TCA_MAX + 1];
+ struct tcmsg *t;
+ u32 parent;
+ u32 chain_index;
+ struct Qdisc *q = NULL;
+ struct tcf_chain *chain = NULL;
+ struct tcf_block *block;
+ unsigned long cl;
+ int err;
+
+ if (n->nlmsg_type != RTM_GETCHAIN &&
+ !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+
+replay:
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ if (err < 0)
+ return err;
+
+ t = nlmsg_data(n);
+ parent = t->tcm_parent;
+ cl = 0;
+
+ block = tcf_block_find(net, &q, &parent, &cl,
+ t->tcm_ifindex, t->tcm_block_index, extack);
+ if (IS_ERR(block))
+ return PTR_ERR(block);
+
+ chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ if (chain_index > TC_ACT_EXT_VAL_MASK) {
+ NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
+ return -EINVAL;
+ }
+ chain = tcf_chain_lookup(block, chain_index);
+ if (n->nlmsg_type == RTM_NEWCHAIN) {
+ if (chain) {
+ if (tcf_chain_held_by_acts_only(chain)) {
+ /* The chain exists only because there is
+ * some action referencing it.
+ */
+ tcf_chain_hold(chain);
+ } else {
+ NL_SET_ERR_MSG(extack, "Filter chain already exists");
+ return -EEXIST;
+ }
+ } else {
+ if (!(n->nlmsg_flags & NLM_F_CREATE)) {
+ NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
+ return -ENOENT;
+ }
+ chain = tcf_chain_create(block, chain_index);
+ if (!chain) {
+ NL_SET_ERR_MSG(extack, "Failed to create filter chain");
+ return -ENOMEM;
+ }
+ }
+ } else {
+ if (!chain || tcf_chain_held_by_acts_only(chain)) {
+ NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
+ return -EINVAL;
+ }
+ tcf_chain_hold(chain);
+ }
+
+ switch (n->nlmsg_type) {
+ case RTM_NEWCHAIN:
+ err = tc_chain_tmplt_add(chain, net, tca, extack);
+ if (err)
+ goto errout;
+ /* In case the chain was successfully added, take a reference
+ * to the chain. This ensures that an empty chain
+ * does not disappear at the end of this function.
+ */
+ tcf_chain_hold(chain);
+ chain->explicitly_created = true;
+ tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
+ RTM_NEWCHAIN, false);
+ break;
+ case RTM_DELCHAIN:
+ /* Flush the chain first as the user requested chain removal. */
+ tcf_chain_flush(chain);
+ /* In case the chain was successfully deleted, put a reference
+ * to the chain previously taken during addition.
+ */
+ tcf_chain_put_explicitly_created(chain);
+ chain->explicitly_created = false;
+ break;
+ case RTM_GETCHAIN:
+ err = tc_chain_notify(chain, skb, n->nlmsg_seq,
+ n->nlmsg_seq, n->nlmsg_type, true);
+ if (err < 0)
+ NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG(extack, "Unsupported message type");
+ goto errout;
+ }
+
+errout:
+ tcf_chain_put(chain);
+ if (err == -EAGAIN)
+ /* Replay the request. */
+ goto replay;
+ return err;
+}
+
+/* called with RTNL */
+static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tca[TCA_MAX + 1];
+ struct Qdisc *q = NULL;
+ struct tcf_block *block;
+ struct tcf_chain *chain;
+ struct tcmsg *tcm = nlmsg_data(cb->nlh);
+ long index_start;
+ long index;
+ u32 parent;
+ int err;
+
+ if (nlmsg_len(cb->nlh) < sizeof(*tcm))
+ return skb->len;
+
+ err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
+ if (err)
+ return err;
+
+ if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
+ block = tcf_block_lookup(net, tcm->tcm_block_index);
+ if (!block)
+ goto out;
+ /* If we work with block index, q is NULL and parent value
+ * will never be used in the following code. The check
+ * in tcf_fill_node prevents it. However, compiler does not
+ * see that far, so set parent to zero to silence the warning
+ * about parent being uninitialized.
+ */
+ parent = 0;
+ } else {
+ const struct Qdisc_class_ops *cops;
+ struct net_device *dev;
+ unsigned long cl = 0;
+
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
+ return skb->len;
+
+ parent = tcm->tcm_parent;
+ if (!parent) {
+ q = dev->qdisc;
+ parent = q->handle;
+ } else {
+ q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
+ }
+ if (!q)
+ goto out;
+ cops = q->ops->cl_ops;
+ if (!cops)
+ goto out;
+ if (!cops->tcf_block)
+ goto out;
+ if (TC_H_MIN(tcm->tcm_parent)) {
+ cl = cops->find(q, tcm->tcm_parent);
+ if (cl == 0)
+ goto out;
+ }
+ block = cops->tcf_block(q, cl, NULL);
+ if (!block)
+ goto out;
+ if (tcf_block_shared(block))
+ q = NULL;
+ }
+
+ index_start = cb->args[0];
+ index = 0;
+
+ list_for_each_entry(chain, &block->chain_list, list) {
+ if ((tca[TCA_CHAIN] &&
+ nla_get_u32(tca[TCA_CHAIN]) != chain->index))
+ continue;
+ if (index < index_start) {
+ index++;
+ continue;
+ }
+ if (tcf_chain_held_by_acts_only(chain))
+ continue;
+ err = tc_chain_fill_node(chain, net, skb, block,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWCHAIN);
+ if (err <= 0)
+ break;
+ index++;
+ }
+
+ cb->args[0] = index;
+
+out:
+ /* If we did no progress, the error (EMSGSIZE) is real */
+ if (skb->len == 0 && err)
+ return err;
+ return skb->len;
+}
+
void tcf_exts_destroy(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
- LIST_HEAD(actions);
-
- ASSERT_RTNL();
- tcf_exts_to_list(exts, &actions);
- tcf_action_destroy(&actions, TCA_ACT_UNBIND);
+ tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
kfree(exts->actions);
exts->nr_actions = 0;
#endif
@@ -1587,7 +2050,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
if (exts->police && tb[exts->police]) {
act = tcf_action_init_1(net, tp, tb[exts->police],
rate_tlv, "police", ovr,
- TCA_ACT_BIND, extack);
+ TCA_ACT_BIND, true, extack);
if (IS_ERR(act))
return PTR_ERR(act);
@@ -1595,17 +2058,15 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
exts->actions[0] = act;
exts->nr_actions = 1;
} else if (exts->action && tb[exts->action]) {
- LIST_HEAD(actions);
- int err, i = 0;
+ int err;
err = tcf_action_init(net, tp, tb[exts->action],
rate_tlv, NULL, ovr, TCA_ACT_BIND,
- &actions, &attr_size, extack);
- if (err)
+ exts->actions, &attr_size, true,
+ extack);
+ if (err < 0)
return err;
- list_for_each_entry(act, &actions, list)
- exts->actions[i++] = act;
- exts->nr_actions = i;
+ exts->nr_actions = err;
}
exts->net = net;
}
@@ -1654,14 +2115,11 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
* tc data even if iproute2 was newer - jhs
*/
if (exts->type != TCA_OLD_COMPAT) {
- LIST_HEAD(actions);
-
nest = nla_nest_start(skb, exts->action);
if (nest == NULL)
goto nla_put_failure;
- tcf_exts_to_list(exts, &actions);
- if (tcf_action_dump(skb, &actions, 0, 0) < 0)
+ if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
} else if (exts->police) {
@@ -1718,6 +2176,7 @@ static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
if (!dev)
continue;
ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
+ a->ops->put_dev(dev);
if (ret < 0)
return ret;
ok_count += ret;
@@ -1786,6 +2245,10 @@ static int __init tc_filter_init(void)
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
tc_dump_tfilter, 0);
+ rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
+ rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
+ rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
+ tc_dump_chain, 0);
return 0;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 95367f37098d..6a5dce8baf19 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -324,4 +324,3 @@ static void __exit exit_basic(void)
module_init(init_basic)
module_exit(exit_basic)
MODULE_LICENSE("GPL");
-
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 1aa7f6511065..fa6fe2fe0f32 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -43,6 +43,7 @@ struct cls_bpf_prog {
struct tcf_result res;
bool exts_integrated;
u32 gen_flags;
+ unsigned int in_hw_count;
struct tcf_exts exts;
u32 handle;
u16 bpf_num_ops;
@@ -174,6 +175,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
cls_bpf_offload_cmd(tp, oldprog, prog, extack);
return err;
} else if (err > 0) {
+ prog->in_hw_count = err;
tcf_block_offload_inc(block, &prog->gen_flags);
}
}
@@ -347,12 +349,10 @@ static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
return -EINVAL;
- bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
+ bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
if (bpf_ops == NULL)
return -ENOMEM;
- memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
-
fprog_tmp.len = bpf_num_ops;
fprog_tmp.filter = bpf_ops;
@@ -652,6 +652,42 @@ skip:
}
}
+static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+ void *cb_priv, struct netlink_ext_ack *extack)
+{
+ struct cls_bpf_head *head = rtnl_dereference(tp->root);
+ struct tcf_block *block = tp->chain->block;
+ struct tc_cls_bpf_offload cls_bpf = {};
+ struct cls_bpf_prog *prog;
+ int err;
+
+ list_for_each_entry(prog, &head->plist, link) {
+ if (tc_skip_hw(prog->gen_flags))
+ continue;
+
+ tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
+ extack);
+ cls_bpf.command = TC_CLSBPF_OFFLOAD;
+ cls_bpf.exts = &prog->exts;
+ cls_bpf.prog = add ? prog->filter : NULL;
+ cls_bpf.oldprog = add ? NULL : prog->filter;
+ cls_bpf.name = prog->bpf_name;
+ cls_bpf.exts_integrated = prog->exts_integrated;
+
+ err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv);
+ if (err) {
+ if (add && tc_skip_sw(prog->gen_flags))
+ return err;
+ continue;
+ }
+
+ tc_cls_offload_cnt_update(block, &prog->in_hw_count,
+ &prog->gen_flags, add);
+ }
+
+ return 0;
+}
+
static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
.kind = "bpf",
.owner = THIS_MODULE,
@@ -662,6 +698,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
.change = cls_bpf_change,
.delete = cls_bpf_delete,
.walk = cls_bpf_walk,
+ .reoffload = cls_bpf_reoffload,
.dump = cls_bpf_dump,
.bind_class = cls_bpf_bind_class,
};
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9e8b26a80fb3..6fd9bdd93796 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -24,6 +24,7 @@
#include <net/pkt_cls.h>
#include <net/ip.h>
#include <net/flow_dissector.h>
+#include <net/geneve.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
@@ -35,6 +36,7 @@ struct fl_flow_key {
struct flow_dissector_key_basic basic;
struct flow_dissector_key_eth_addrs eth;
struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
union {
struct flow_dissector_key_ipv4_addrs ipv4;
struct flow_dissector_key_ipv6_addrs ipv6;
@@ -51,6 +53,8 @@ struct fl_flow_key {
struct flow_dissector_key_mpls mpls;
struct flow_dissector_key_tcp tcp;
struct flow_dissector_key_ip ip;
+ struct flow_dissector_key_ip enc_ip;
+ struct flow_dissector_key_enc_opts enc_opts;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct fl_flow_mask_range {
@@ -70,6 +74,13 @@ struct fl_flow_mask {
struct list_head list;
};
+struct fl_flow_tmplt {
+ struct fl_flow_key dummy_key;
+ struct fl_flow_key mask;
+ struct flow_dissector dissector;
+ struct tcf_chain *chain;
+};
+
struct cls_fl_head {
struct rhashtable ht;
struct list_head masks;
@@ -87,6 +98,7 @@ struct cls_fl_filter {
struct list_head list;
u32 handle;
u32 flags;
+ unsigned int in_hw_count;
struct rcu_work rwork;
struct net_device *hw_dev;
};
@@ -144,6 +156,23 @@ static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
*lmkey++ = *lkey++ & *lmask++;
}
+static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
+ struct fl_flow_mask *mask)
+{
+ const long *lmask = fl_key_get_start(&mask->key, mask);
+ const long *ltmplt;
+ int i;
+
+ if (!tmplt)
+ return true;
+ ltmplt = fl_key_get_start(&tmplt->mask, mask);
+ for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
+ if (~*ltmplt++ & *lmask++)
+ return false;
+ }
+ return true;
+}
+
static void fl_clear_masked_range(struct fl_flow_key *key,
struct fl_flow_mask *mask)
{
@@ -289,6 +318,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
fl_hw_destroy_filter(tp, f, NULL);
return err;
} else if (err > 0) {
+ f->in_hw_count = err;
tcf_block_offload_inc(block, &f->flags);
}
@@ -447,6 +477,28 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
+ [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
+ [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
+ [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
+ .len = 128 },
};
static void fl_set_key_val(struct nlattr **tb,
@@ -498,22 +550,26 @@ static int fl_set_key_mpls(struct nlattr **tb,
}
static void fl_set_key_vlan(struct nlattr **tb,
+ __be16 ethertype,
+ int vlan_id_key, int vlan_prio_key,
struct flow_dissector_key_vlan *key_val,
struct flow_dissector_key_vlan *key_mask)
{
#define VLAN_PRIORITY_MASK 0x7
- if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
+ if (tb[vlan_id_key]) {
key_val->vlan_id =
- nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
+ nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
key_mask->vlan_id = VLAN_VID_MASK;
}
- if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
+ if (tb[vlan_prio_key]) {
key_val->vlan_priority =
- nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
+ nla_get_u8(tb[vlan_prio_key]) &
VLAN_PRIORITY_MASK;
key_mask->vlan_priority = VLAN_PRIORITY_MASK;
}
+ key_val->vlan_tpid = ethertype;
+ key_mask->vlan_tpid = cpu_to_be16(~0);
}
static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
@@ -551,17 +607,156 @@ static int fl_set_key_flags(struct nlattr **tb,
return 0;
}
-static void fl_set_key_ip(struct nlattr **tb,
+static void fl_set_key_ip(struct nlattr **tb, bool encap,
struct flow_dissector_key_ip *key,
struct flow_dissector_key_ip *mask)
{
- fl_set_key_val(tb, &key->tos, TCA_FLOWER_KEY_IP_TOS,
- &mask->tos, TCA_FLOWER_KEY_IP_TOS_MASK,
- sizeof(key->tos));
+ int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
+ int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
+ int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
+ int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
+
+ fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
+ fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
+}
+
+static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
+ int depth, int option_len,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
+ struct nlattr *class = NULL, *type = NULL, *data = NULL;
+ struct geneve_opt *opt;
+ int err, data_len = 0;
+
+ if (option_len > sizeof(struct geneve_opt))
+ data_len = option_len - sizeof(struct geneve_opt);
+
+ opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
+ memset(opt, 0xff, option_len);
+ opt->length = data_len / 4;
+ opt->r1 = 0;
+ opt->r2 = 0;
+ opt->r3 = 0;
+
+ /* If no mask has been prodived we assume an exact match. */
+ if (!depth)
+ return sizeof(struct geneve_opt) + data_len;
+
+ if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
+ NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
+ return -EINVAL;
+ }
+
+ err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
+ nla, geneve_opt_policy, extack);
+ if (err < 0)
+ return err;
+
+ /* We are not allowed to omit any of CLASS, TYPE or DATA
+ * fields from the key.
+ */
+ if (!option_len &&
+ (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
+ !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
+ !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
+ NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
+ return -EINVAL;
+ }
+
+ /* Omitting any of CLASS, TYPE or DATA fields is allowed
+ * for the mask.
+ */
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
+ int new_len = key->enc_opts.len;
+
+ data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
+ data_len = nla_len(data);
+ if (data_len < 4) {
+ NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
+ return -ERANGE;
+ }
+ if (data_len % 4) {
+ NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
+ return -ERANGE;
+ }
+
+ new_len += sizeof(struct geneve_opt) + data_len;
+ BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
+ if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
+ NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
+ return -ERANGE;
+ }
+ opt->length = data_len / 4;
+ memcpy(opt->opt_data, nla_data(data), data_len);
+ }
+
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
+ class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
+ opt->opt_class = nla_get_be16(class);
+ }
+
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
+ type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
+ opt->type = nla_get_u8(type);
+ }
+
+ return sizeof(struct geneve_opt) + data_len;
+}
+
+static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
+ struct fl_flow_key *mask,
+ struct netlink_ext_ack *extack)
+{
+ const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
+ int option_len, key_depth, msk_depth = 0;
+
+ nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
+
+ if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
+ nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+ msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+ }
- fl_set_key_val(tb, &key->ttl, TCA_FLOWER_KEY_IP_TTL,
- &mask->ttl, TCA_FLOWER_KEY_IP_TTL_MASK,
- sizeof(key->ttl));
+ nla_for_each_attr(nla_opt_key, nla_enc_key,
+ nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
+ switch (nla_type(nla_opt_key)) {
+ case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
+ option_len = 0;
+ key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
+ option_len = fl_set_geneve_opt(nla_opt_key, key,
+ key_depth, option_len,
+ extack);
+ if (option_len < 0)
+ return option_len;
+
+ key->enc_opts.len += option_len;
+ /* At the same time we need to parse through the mask
+ * in order to verify exact and mask attribute lengths.
+ */
+ mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
+ option_len = fl_set_geneve_opt(nla_opt_msk, mask,
+ msk_depth, option_len,
+ extack);
+ if (option_len < 0)
+ return option_len;
+
+ mask->enc_opts.len += option_len;
+ if (key->enc_opts.len != mask->enc_opts.len) {
+ NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
+ return -EINVAL;
+ }
+
+ if (msk_depth)
+ nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
static int fl_set_key(struct net *net, struct nlattr **tb,
@@ -590,12 +785,28 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
- if (ethertype == htons(ETH_P_8021Q)) {
- fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
- fl_set_key_val(tb, &key->basic.n_proto,
- TCA_FLOWER_KEY_VLAN_ETH_TYPE,
- &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
- sizeof(key->basic.n_proto));
+ if (eth_type_vlan(ethertype)) {
+ fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
+ TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
+ &mask->vlan);
+
+ if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
+ ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
+ if (eth_type_vlan(ethertype)) {
+ fl_set_key_vlan(tb, ethertype,
+ TCA_FLOWER_KEY_CVLAN_ID,
+ TCA_FLOWER_KEY_CVLAN_PRIO,
+ &key->cvlan, &mask->cvlan);
+ fl_set_key_val(tb, &key->basic.n_proto,
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+ &mask->basic.n_proto,
+ TCA_FLOWER_UNSPEC,
+ sizeof(key->basic.n_proto));
+ } else {
+ key->basic.n_proto = ethertype;
+ mask->basic.n_proto = cpu_to_be16(~0);
+ }
+ }
} else {
key->basic.n_proto = ethertype;
mask->basic.n_proto = cpu_to_be16(~0);
@@ -607,7 +818,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
&mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
sizeof(key->basic.ip_proto));
- fl_set_key_ip(tb, &key->ip, &mask->ip);
+ fl_set_key_ip(tb, false, &key->ip, &mask->ip);
}
if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
@@ -742,6 +953,14 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
&mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
sizeof(key->enc_tp.dst));
+ fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
+
+ if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
+ ret = fl_set_enc_opt(tb, key, mask, extack);
+ if (ret)
+ return ret;
+ }
+
if (tb[TCA_FLOWER_KEY_FLAGS])
ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
@@ -793,47 +1012,54 @@ static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
FL_KEY_SET(keys, cnt, id, member); \
} while(0);
-static void fl_init_dissector(struct fl_flow_mask *mask)
+static void fl_init_dissector(struct flow_dissector *dissector,
+ struct fl_flow_key *mask)
{
struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
size_t cnt = 0;
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_PORTS, tp);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_IP, ip);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_TCP, tcp);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_ICMP, icmp);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_ARP, arp);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_MPLS, mpls);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_VLAN, vlan);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_CVLAN, cvlan);
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
- if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
- FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
+ if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
+ FL_KEY_IS_MASKED(mask, enc_ipv6))
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
enc_control);
- FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
- skb_flow_dissector_init(&mask->dissector, keys, cnt);
+ skb_flow_dissector_init(dissector, keys, cnt);
}
static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
@@ -852,7 +1078,7 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
if (err)
goto errout_free;
- fl_init_dissector(newmask);
+ fl_init_dissector(&newmask->dissector, &newmask->key);
INIT_LIST_HEAD_RCU(&newmask->filters);
@@ -901,6 +1127,7 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
struct cls_fl_filter *f, struct fl_flow_mask *mask,
unsigned long base, struct nlattr **tb,
struct nlattr *est, bool ovr,
+ struct fl_flow_tmplt *tmplt,
struct netlink_ext_ack *extack)
{
int err;
@@ -921,6 +1148,11 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
fl_mask_update_range(mask);
fl_set_masked_key(&f->mkey, &f->key, mask);
+ if (!fl_mask_fits_tmplt(tmplt, mask)) {
+ NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -986,7 +1218,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
- extack);
+ tp->chain->tmplt_priv, extack);
if (err)
goto errout_idr;
@@ -1071,20 +1303,146 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f;
+
+ arg->count = arg->skip;
+
+ while ((f = idr_get_next_ul(&head->handle_idr,
+ &arg->cookie)) != NULL) {
+ if (arg->fn(tp, f, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->cookie = f->handle + 1;
+ arg->count++;
+ }
+}
+
+static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+ void *cb_priv, struct netlink_ext_ack *extack)
+{
+ struct cls_fl_head *head = rtnl_dereference(tp->root);
+ struct tc_cls_flower_offload cls_flower = {};
+ struct tcf_block *block = tp->chain->block;
struct fl_flow_mask *mask;
+ struct cls_fl_filter *f;
+ int err;
- list_for_each_entry_rcu(mask, &head->masks, list) {
- list_for_each_entry_rcu(f, &mask->filters, list) {
- if (arg->count < arg->skip)
- goto skip;
- if (arg->fn(tp, f, arg) < 0) {
- arg->stop = 1;
- break;
+ list_for_each_entry(mask, &head->masks, list) {
+ list_for_each_entry(f, &mask->filters, list) {
+ if (tc_skip_hw(f->flags))
+ continue;
+
+ tc_cls_common_offload_init(&cls_flower.common, tp,
+ f->flags, extack);
+ cls_flower.command = add ?
+ TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
+ cls_flower.cookie = (unsigned long)f;
+ cls_flower.dissector = &mask->dissector;
+ cls_flower.mask = &mask->key;
+ cls_flower.key = &f->mkey;
+ cls_flower.exts = &f->exts;
+ cls_flower.classid = f->res.classid;
+
+ err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+ if (err) {
+ if (add && tc_skip_sw(f->flags))
+ return err;
+ continue;
}
-skip:
- arg->count++;
+
+ tc_cls_offload_cnt_update(block, &f->in_hw_count,
+ &f->flags, add);
}
}
+
+ return 0;
+}
+
+static void fl_hw_create_tmplt(struct tcf_chain *chain,
+ struct fl_flow_tmplt *tmplt)
+{
+ struct tc_cls_flower_offload cls_flower = {};
+ struct tcf_block *block = chain->block;
+ struct tcf_exts dummy_exts = { 0, };
+
+ cls_flower.common.chain_index = chain->index;
+ cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
+ cls_flower.cookie = (unsigned long) tmplt;
+ cls_flower.dissector = &tmplt->dissector;
+ cls_flower.mask = &tmplt->mask;
+ cls_flower.key = &tmplt->dummy_key;
+ cls_flower.exts = &dummy_exts;
+
+ /* We don't care if driver (any of them) fails to handle this
+ * call. It serves just as a hint for it.
+ */
+ tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
+ &cls_flower, false);
+}
+
+static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
+ struct fl_flow_tmplt *tmplt)
+{
+ struct tc_cls_flower_offload cls_flower = {};
+ struct tcf_block *block = chain->block;
+
+ cls_flower.common.chain_index = chain->index;
+ cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
+ cls_flower.cookie = (unsigned long) tmplt;
+
+ tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
+ &cls_flower, false);
+}
+
+static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
+ struct nlattr **tca,
+ struct netlink_ext_ack *extack)
+{
+ struct fl_flow_tmplt *tmplt;
+ struct nlattr **tb;
+ int err;
+
+ if (!tca[TCA_OPTIONS])
+ return ERR_PTR(-EINVAL);
+
+ tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
+ if (!tb)
+ return ERR_PTR(-ENOBUFS);
+ err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
+ fl_policy, NULL);
+ if (err)
+ goto errout_tb;
+
+ tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
+ if (!tmplt) {
+ err = -ENOMEM;
+ goto errout_tb;
+ }
+ tmplt->chain = chain;
+ err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
+ if (err)
+ goto errout_tmplt;
+ kfree(tb);
+
+ fl_init_dissector(&tmplt->dissector, &tmplt->mask);
+
+ fl_hw_create_tmplt(chain, tmplt);
+
+ return tmplt;
+
+errout_tmplt:
+ kfree(tmplt);
+errout_tb:
+ kfree(tb);
+ return ERR_PTR(err);
+}
+
+static void fl_tmplt_destroy(void *tmplt_priv)
+{
+ struct fl_flow_tmplt *tmplt = tmplt_priv;
+
+ fl_hw_destroy_tmplt(tmplt->chain, tmplt);
+ kfree(tmplt);
}
static int fl_dump_key_val(struct sk_buff *skb,
@@ -1141,20 +1499,24 @@ static int fl_dump_key_mpls(struct sk_buff *skb,
return 0;
}
-static int fl_dump_key_ip(struct sk_buff *skb,
+static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
struct flow_dissector_key_ip *key,
struct flow_dissector_key_ip *mask)
{
- if (fl_dump_key_val(skb, &key->tos, TCA_FLOWER_KEY_IP_TOS, &mask->tos,
- TCA_FLOWER_KEY_IP_TOS_MASK, sizeof(key->tos)) ||
- fl_dump_key_val(skb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, &mask->ttl,
- TCA_FLOWER_KEY_IP_TTL_MASK, sizeof(key->ttl)))
+ int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
+ int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
+ int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
+ int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
+
+ if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
+ fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
return -1;
return 0;
}
static int fl_dump_key_vlan(struct sk_buff *skb,
+ int vlan_id_key, int vlan_prio_key,
struct flow_dissector_key_vlan *vlan_key,
struct flow_dissector_key_vlan *vlan_mask)
{
@@ -1163,13 +1525,13 @@ static int fl_dump_key_vlan(struct sk_buff *skb,
if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
return 0;
if (vlan_mask->vlan_id) {
- err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
+ err = nla_put_u16(skb, vlan_id_key,
vlan_key->vlan_id);
if (err)
return err;
}
if (vlan_mask->vlan_priority) {
- err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
+ err = nla_put_u8(skb, vlan_prio_key,
vlan_key->vlan_priority);
if (err)
return err;
@@ -1216,29 +1578,86 @@ static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
}
-static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+static int fl_dump_key_geneve_opt(struct sk_buff *skb,
+ struct flow_dissector_key_enc_opts *enc_opts)
{
- struct cls_fl_filter *f = fh;
+ struct geneve_opt *opt;
struct nlattr *nest;
- struct fl_flow_key *key, *mask;
+ int opt_off = 0;
- if (!f)
- return skb->len;
+ nest = nla_nest_start(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
+ if (!nest)
+ goto nla_put_failure;
- t->tcm_handle = f->handle;
+ while (enc_opts->len > opt_off) {
+ opt = (struct geneve_opt *)&enc_opts->data[opt_off];
- nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
+ opt->opt_class))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
+ opt->type))
+ goto nla_put_failure;
+ if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
+ opt->length * 4, opt->opt_data))
+ goto nla_put_failure;
+
+ opt_off += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
+ struct flow_dissector_key_enc_opts *enc_opts)
+{
+ struct nlattr *nest;
+ int err;
+
+ if (!enc_opts->len)
+ return 0;
+
+ nest = nla_nest_start(skb, enc_opt_type);
if (!nest)
goto nla_put_failure;
- if (f->res.classid &&
- nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
+ switch (enc_opts->dst_opt_type) {
+ case TUNNEL_GENEVE_OPT:
+ err = fl_dump_key_geneve_opt(skb, enc_opts);
+ if (err)
+ goto nla_put_failure;
+ break;
+ default:
goto nla_put_failure;
+ }
+ nla_nest_end(skb, nest);
+ return 0;
- key = &f->key;
- mask = &f->mask->key;
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int fl_dump_key_enc_opt(struct sk_buff *skb,
+ struct flow_dissector_key_enc_opts *key_opts,
+ struct flow_dissector_key_enc_opts *msk_opts)
+{
+ int err;
+
+ err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
+ if (err)
+ return err;
+
+ return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
+}
+static int fl_dump_key(struct sk_buff *skb, struct net *net,
+ struct fl_flow_key *key, struct fl_flow_key *mask)
+{
if (mask->indev_ifindex) {
struct net_device *dev;
@@ -1247,9 +1666,6 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
goto nla_put_failure;
}
- if (!tc_skip_hw(f->flags))
- fl_hw_update_stats(tp, f);
-
if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
sizeof(key->eth.dst)) ||
@@ -1264,15 +1680,36 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
goto nla_put_failure;
- if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
+ if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
+ TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
goto nla_put_failure;
+ if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
+ TCA_FLOWER_KEY_CVLAN_PRIO,
+ &key->cvlan, &mask->cvlan) ||
+ (mask->cvlan.vlan_tpid &&
+ nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ key->cvlan.vlan_tpid)))
+ goto nla_put_failure;
+
+ if (mask->basic.n_proto) {
+ if (mask->cvlan.vlan_tpid) {
+ if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+ key->basic.n_proto))
+ goto nla_put_failure;
+ } else if (mask->vlan.vlan_tpid) {
+ if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ key->basic.n_proto))
+ goto nla_put_failure;
+ }
+ }
+
if ((key->basic.n_proto == htons(ETH_P_IP) ||
key->basic.n_proto == htons(ETH_P_IPV6)) &&
(fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
&mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
sizeof(key->basic.ip_proto)) ||
- fl_dump_key_ip(skb, &key->ip, &mask->ip)))
+ fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
goto nla_put_failure;
if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
@@ -1397,12 +1834,49 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
&mask->enc_tp.dst,
TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
- sizeof(key->enc_tp.dst)))
+ sizeof(key->enc_tp.dst)) ||
+ fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
+ fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
goto nla_put_failure;
if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
+ struct sk_buff *skb, struct tcmsg *t)
+{
+ struct cls_fl_filter *f = fh;
+ struct nlattr *nest;
+ struct fl_flow_key *key, *mask;
+
+ if (!f)
+ return skb->len;
+
+ t->tcm_handle = f->handle;
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (!nest)
+ goto nla_put_failure;
+
+ if (f->res.classid &&
+ nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
+ goto nla_put_failure;
+
+ key = &f->key;
+ mask = &f->mask->key;
+
+ if (fl_dump_key(skb, net, key, mask))
+ goto nla_put_failure;
+
+ if (!tc_skip_hw(f->flags))
+ fl_hw_update_stats(tp, f);
+
if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
goto nla_put_failure;
@@ -1421,6 +1895,31 @@ nla_put_failure:
return -1;
}
+static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
+{
+ struct fl_flow_tmplt *tmplt = tmplt_priv;
+ struct fl_flow_key *key, *mask;
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (!nest)
+ goto nla_put_failure;
+
+ key = &tmplt->dummy_key;
+ mask = &tmplt->mask;
+
+ if (fl_dump_key(skb, net, key, mask))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+
+ return skb->len;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
{
struct cls_fl_filter *f = fh;
@@ -1438,8 +1937,12 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.change = fl_change,
.delete = fl_delete,
.walk = fl_walk,
+ .reoffload = fl_reoffload,
.dump = fl_dump,
.bind_class = fl_bind_class,
+ .tmplt_create = fl_tmplt_create,
+ .tmplt_destroy = fl_tmplt_destroy,
+ .tmplt_dump = fl_tmplt_dump,
.owner = THIS_MODULE,
};
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 47b207ef7762..af16f36ed578 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -21,6 +21,7 @@ struct cls_mall_head {
struct tcf_result res;
u32 handle;
u32 flags;
+ unsigned int in_hw_count;
struct rcu_work rwork;
};
@@ -95,6 +96,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
mall_destroy_hw_filter(tp, head, cookie, NULL);
return err;
} else if (err > 0) {
+ head->in_hw_count = err;
tcf_block_offload_inc(block, &head->flags);
}
@@ -235,6 +237,35 @@ skip:
arg->count++;
}
+static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+ void *cb_priv, struct netlink_ext_ack *extack)
+{
+ struct cls_mall_head *head = rtnl_dereference(tp->root);
+ struct tc_cls_matchall_offload cls_mall = {};
+ struct tcf_block *block = tp->chain->block;
+ int err;
+
+ if (tc_skip_hw(head->flags))
+ return 0;
+
+ tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
+ cls_mall.command = add ?
+ TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
+ cls_mall.exts = &head->exts;
+ cls_mall.cookie = (unsigned long)head;
+
+ err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv);
+ if (err) {
+ if (add && tc_skip_sw(head->flags))
+ return err;
+ return 0;
+ }
+
+ tc_cls_offload_cnt_update(block, &head->in_hw_count, &head->flags, add);
+
+ return 0;
+}
+
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct sk_buff *skb, struct tcmsg *t)
{
@@ -289,6 +320,7 @@ static struct tcf_proto_ops cls_mall_ops __read_mostly = {
.change = mall_change,
.delete = mall_delete,
.walk = mall_walk,
+ .reoffload = mall_reoffload,
.dump = mall_dump,
.bind_class = mall_bind_class,
.owner = THIS_MODULE,
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 32f4bbd82f35..9ccc93f257db 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -447,11 +447,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
tcf_bind_filter(tp, &cr.res, base);
}
- if (old_r)
- tcf_exts_change(&r->exts, &e);
- else
- tcf_exts_change(&cr.exts, &e);
-
if (old_r && old_r != r) {
err = tcindex_filter_result_init(old_r);
if (err < 0) {
@@ -462,12 +457,15 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
oldp = p;
r->res = cr.res;
+ tcf_exts_change(&r->exts, &e);
+
rcu_assign_pointer(tp->root, cp);
if (r == &new_filter_result) {
struct tcindex_filter *nfp;
struct tcindex_filter __rcu **fp;
+ f->result.res = r->res;
tcf_exts_change(&f->result.exts, &r->exts);
fp = cp->h + (handle % cp->hash);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index fb861f90fde6..d5d2a6dc3921 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -62,6 +62,7 @@ struct tc_u_knode {
struct tc_u32_pcnt __percpu *pf;
#endif
u32 flags;
+ unsigned int in_hw_count;
#ifdef CONFIG_CLS_U32_MARK
u32 val;
u32 mask;
@@ -571,6 +572,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
u32_remove_hw_knode(tp, n, NULL);
return err;
} else if (err > 0) {
+ n->in_hw_count = err;
tcf_block_offload_inc(block, &n->flags);
}
@@ -1199,6 +1201,114 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
}
}
+static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
+ bool add, tc_setup_cb_t *cb, void *cb_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_cls_u32_offload cls_u32 = {};
+ int err;
+
+ tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
+ cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
+ cls_u32.hnode.divisor = ht->divisor;
+ cls_u32.hnode.handle = ht->handle;
+ cls_u32.hnode.prio = ht->prio;
+
+ err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
+ if (err && add && tc_skip_sw(ht->flags))
+ return err;
+
+ return 0;
+}
+
+static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
+ bool add, tc_setup_cb_t *cb, void *cb_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
+ struct tcf_block *block = tp->chain->block;
+ struct tc_cls_u32_offload cls_u32 = {};
+ int err;
+
+ tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
+ cls_u32.command = add ?
+ TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
+ cls_u32.knode.handle = n->handle;
+
+ if (add) {
+ cls_u32.knode.fshift = n->fshift;
+#ifdef CONFIG_CLS_U32_MARK
+ cls_u32.knode.val = n->val;
+ cls_u32.knode.mask = n->mask;
+#else
+ cls_u32.knode.val = 0;
+ cls_u32.knode.mask = 0;
+#endif
+ cls_u32.knode.sel = &n->sel;
+ cls_u32.knode.exts = &n->exts;
+ if (n->ht_down)
+ cls_u32.knode.link_handle = ht->handle;
+ }
+
+ err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
+ if (err) {
+ if (add && tc_skip_sw(n->flags))
+ return err;
+ return 0;
+ }
+
+ tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add);
+
+ return 0;
+}
+
+static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+ void *cb_priv, struct netlink_ext_ack *extack)
+{
+ struct tc_u_common *tp_c = tp->data;
+ struct tc_u_hnode *ht;
+ struct tc_u_knode *n;
+ unsigned int h;
+ int err;
+
+ for (ht = rtnl_dereference(tp_c->hlist);
+ ht;
+ ht = rtnl_dereference(ht->next)) {
+ if (ht->prio != tp->prio)
+ continue;
+
+ /* When adding filters to a new dev, try to offload the
+ * hashtable first. When removing, do the filters before the
+ * hashtable.
+ */
+ if (add && !tc_skip_hw(ht->flags)) {
+ err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
+ extack);
+ if (err)
+ return err;
+ }
+
+ for (h = 0; h <= ht->divisor; h++) {
+ for (n = rtnl_dereference(ht->ht[h]);
+ n;
+ n = rtnl_dereference(n->next)) {
+ if (tc_skip_hw(n->flags))
+ continue;
+
+ err = u32_reoffload_knode(tp, n, add, cb,
+ cb_priv, extack);
+ if (err)
+ return err;
+ }
+ }
+
+ if (!add && !tc_skip_hw(ht->flags))
+ u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
+ }
+
+ return 0;
+}
+
static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
{
struct tc_u_knode *n = fh;
@@ -1336,6 +1446,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.change = u32_change,
.delete = u32_delete,
.walk = u32_walk,
+ .reoffload = u32_reoffload,
.dump = u32_dump,
.bind_class = u32_bind_class,
.owner = THIS_MODULE,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 54eca685420f..98541c6399db 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -596,12 +596,19 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
+void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
+ clockid_t clockid)
{
- hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
wd->timer.function = qdisc_watchdog;
wd->qdisc = qdisc;
}
+EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
+
+void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
+{
+ qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
+}
EXPORT_SYMBOL(qdisc_watchdog_init);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
new file mode 100644
index 000000000000..35fc7252187c
--- /dev/null
+++ b/net/sched/sch_cake.c
@@ -0,0 +1,3020 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* COMMON Applications Kept Enhanced (CAKE) discipline
+ *
+ * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com>
+ * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk>
+ * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com>
+ * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de>
+ * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
+ * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au>
+ *
+ * The CAKE Principles:
+ * (or, how to have your cake and eat it too)
+ *
+ * This is a combination of several shaping, AQM and FQ techniques into one
+ * easy-to-use package:
+ *
+ * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
+ * equipment and bloated MACs. This operates in deficit mode (as in sch_fq),
+ * eliminating the need for any sort of burst parameter (eg. token bucket
+ * depth). Burst support is limited to that necessary to overcome scheduling
+ * latency.
+ *
+ * - A Diffserv-aware priority queue, giving more priority to certain classes,
+ * up to a specified fraction of bandwidth. Above that bandwidth threshold,
+ * the priority is reduced to avoid starving other tins.
+ *
+ * - Each priority tin has a separate Flow Queue system, to isolate traffic
+ * flows from each other. This prevents a burst on one flow from increasing
+ * the delay to another. Flows are distributed to queues using a
+ * set-associative hash function.
+ *
+ * - Each queue is actively managed by Cobalt, which is a combination of the
+ * Codel and Blue AQM algorithms. This serves flows fairly, and signals
+ * congestion early via ECN (if available) and/or packet drops, to keep
+ * latency low. The codel parameters are auto-tuned based on the bandwidth
+ * setting, as is necessary at low bandwidths.
+ *
+ * The configuration parameters are kept deliberately simple for ease of use.
+ * Everything has sane defaults. Complete generality of configuration is *not*
+ * a goal.
+ *
+ * The priority queue operates according to a weighted DRR scheme, combined with
+ * a bandwidth tracker which reuses the shaper logic to detect which side of the
+ * bandwidth sharing threshold the tin is operating. This determines whether a
+ * priority-based weight (high) or a bandwidth-based weight (low) is used for
+ * that tin in the current pass.
+ *
+ * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
+ * granted us permission to leverage.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/reciprocal_div.h>
+#include <net/netlink.h>
+#include <linux/version.h>
+#include <linux/if_vlan.h>
+#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+#include <net/tcp.h>
+#include <net/flow_dissector.h>
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack_core.h>
+#endif
+
+#define CAKE_SET_WAYS (8)
+#define CAKE_MAX_TINS (8)
+#define CAKE_QUEUES (1024)
+#define CAKE_FLOW_MASK 63
+#define CAKE_FLOW_NAT_FLAG 64
+
+/* struct cobalt_params - contains codel and blue parameters
+ * @interval: codel initial drop rate
+ * @target: maximum persistent sojourn time & blue update rate
+ * @mtu_time: serialisation delay of maximum-size packet
+ * @p_inc: increment of blue drop probability (0.32 fxp)
+ * @p_dec: decrement of blue drop probability (0.32 fxp)
+ */
+struct cobalt_params {
+ u64 interval;
+ u64 target;
+ u64 mtu_time;
+ u32 p_inc;
+ u32 p_dec;
+};
+
+/* struct cobalt_vars - contains codel and blue variables
+ * @count: codel dropping frequency
+ * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
+ * @drop_next: time to drop next packet, or when we dropped last
+ * @blue_timer: Blue time to next drop
+ * @p_drop: BLUE drop probability (0.32 fxp)
+ * @dropping: set if in dropping state
+ * @ecn_marked: set if marked
+ */
+struct cobalt_vars {
+ u32 count;
+ u32 rec_inv_sqrt;
+ ktime_t drop_next;
+ ktime_t blue_timer;
+ u32 p_drop;
+ bool dropping;
+ bool ecn_marked;
+};
+
+enum {
+ CAKE_SET_NONE = 0,
+ CAKE_SET_SPARSE,
+ CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
+ CAKE_SET_BULK,
+ CAKE_SET_DECAYING
+};
+
+struct cake_flow {
+ /* this stuff is all needed per-flow at dequeue time */
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ struct list_head flowchain;
+ s32 deficit;
+ u32 dropped;
+ struct cobalt_vars cvars;
+ u16 srchost; /* index into cake_host table */
+ u16 dsthost;
+ u8 set;
+}; /* please try to keep this structure <= 64 bytes */
+
+struct cake_host {
+ u32 srchost_tag;
+ u32 dsthost_tag;
+ u16 srchost_refcnt;
+ u16 dsthost_refcnt;
+};
+
+struct cake_heap_entry {
+ u16 t:3, b:10;
+};
+
+struct cake_tin_data {
+ struct cake_flow flows[CAKE_QUEUES];
+ u32 backlogs[CAKE_QUEUES];
+ u32 tags[CAKE_QUEUES]; /* for set association */
+ u16 overflow_idx[CAKE_QUEUES];
+ struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
+ u16 flow_quantum;
+
+ struct cobalt_params cparams;
+ u32 drop_overlimit;
+ u16 bulk_flow_count;
+ u16 sparse_flow_count;
+ u16 decaying_flow_count;
+ u16 unresponsive_flow_count;
+
+ u32 max_skblen;
+
+ struct list_head new_flows;
+ struct list_head old_flows;
+ struct list_head decaying_flows;
+
+ /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
+ ktime_t time_next_packet;
+ u64 tin_rate_ns;
+ u64 tin_rate_bps;
+ u16 tin_rate_shft;
+
+ u16 tin_quantum_prio;
+ u16 tin_quantum_band;
+ s32 tin_deficit;
+ u32 tin_backlog;
+ u32 tin_dropped;
+ u32 tin_ecn_mark;
+
+ u32 packets;
+ u64 bytes;
+
+ u32 ack_drops;
+
+ /* moving averages */
+ u64 avge_delay;
+ u64 peak_delay;
+ u64 base_delay;
+
+ /* hash function stats */
+ u32 way_directs;
+ u32 way_hits;
+ u32 way_misses;
+ u32 way_collisions;
+}; /* number of tins is small, so size of this struct doesn't matter much */
+
+struct cake_sched_data {
+ struct tcf_proto __rcu *filter_list; /* optional external classifier */
+ struct tcf_block *block;
+ struct cake_tin_data *tins;
+
+ struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
+ u16 overflow_timeout;
+
+ u16 tin_cnt;
+ u8 tin_mode;
+ u8 flow_mode;
+ u8 ack_filter;
+ u8 atm_mode;
+
+ /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
+ u16 rate_shft;
+ ktime_t time_next_packet;
+ ktime_t failsafe_next_packet;
+ u64 rate_ns;
+ u64 rate_bps;
+ u16 rate_flags;
+ s16 rate_overhead;
+ u16 rate_mpu;
+ u64 interval;
+ u64 target;
+
+ /* resource tracking */
+ u32 buffer_used;
+ u32 buffer_max_used;
+ u32 buffer_limit;
+ u32 buffer_config_limit;
+
+ /* indices for dequeue */
+ u16 cur_tin;
+ u16 cur_flow;
+
+ struct qdisc_watchdog watchdog;
+ const u8 *tin_index;
+ const u8 *tin_order;
+
+ /* bandwidth capacity estimate */
+ ktime_t last_packet_time;
+ ktime_t avg_window_begin;
+ u64 avg_packet_interval;
+ u64 avg_window_bytes;
+ u64 avg_peak_bandwidth;
+ ktime_t last_reconfig_time;
+
+ /* packet length stats */
+ u32 avg_netoff;
+ u16 max_netlen;
+ u16 max_adjlen;
+ u16 min_netlen;
+ u16 min_adjlen;
+};
+
+enum {
+ CAKE_FLAG_OVERHEAD = BIT(0),
+ CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
+ CAKE_FLAG_INGRESS = BIT(2),
+ CAKE_FLAG_WASH = BIT(3),
+ CAKE_FLAG_SPLIT_GSO = BIT(4)
+};
+
+/* COBALT operates the Codel and BLUE algorithms in parallel, in order to
+ * obtain the best features of each. Codel is excellent on flows which
+ * respond to congestion signals in a TCP-like way. BLUE is more effective on
+ * unresponsive flows.
+ */
+
+struct cobalt_skb_cb {
+ ktime_t enqueue_time;
+ u32 adjusted_len;
+};
+
+static u64 us_to_ns(u64 us)
+{
+ return us * NSEC_PER_USEC;
+}
+
+static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
+ return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_cobalt_cb(skb)->enqueue_time;
+}
+
+static void cobalt_set_enqueue_time(struct sk_buff *skb,
+ ktime_t now)
+{
+ get_cobalt_cb(skb)->enqueue_time = now;
+}
+
+static u16 quantum_div[CAKE_QUEUES + 1] = {0};
+
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+ 2, 5, 1, 2, 4, 2, 2, 2,
+ 0, 2, 1, 2, 1, 2, 1, 2,
+ 5, 2, 4, 2, 4, 2, 4, 2,
+ 3, 2, 3, 2, 3, 2, 3, 2,
+ 6, 2, 3, 2, 3, 2, 3, 2,
+ 6, 2, 2, 2, 6, 2, 6, 2,
+ 7, 2, 2, 2, 2, 2, 2, 2,
+ 7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+ 0, 2, 0, 0, 2, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0,
+ 2, 0, 2, 0, 2, 0, 2, 0,
+ 2, 0, 2, 0, 2, 0, 2, 0,
+ 3, 0, 2, 0, 2, 0, 2, 0,
+ 3, 0, 0, 0, 3, 0, 3, 0,
+ 3, 0, 0, 0, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+ 0, 0, 0, 0, 2, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 2, 0, 2, 0,
+ 2, 0, 0, 0, 0, 0, 0, 0,
+ 2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
+#define REC_INV_SQRT_CACHE (16)
+static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
+
+/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+
+static void cobalt_newton_step(struct cobalt_vars *vars)
+{
+ u32 invsqrt, invsqrt2;
+ u64 val;
+
+ invsqrt = vars->rec_inv_sqrt;
+ invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+ val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+ val >>= 2; /* avoid overflow in following multiply */
+ val = (val * invsqrt) >> (32 - 2 + 1);
+
+ vars->rec_inv_sqrt = val;
+}
+
+static void cobalt_invsqrt(struct cobalt_vars *vars)
+{
+ if (vars->count < REC_INV_SQRT_CACHE)
+ vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
+ else
+ cobalt_newton_step(vars);
+}
+
+/* There is a big difference in timing between the accurate values placed in
+ * the cache and the approximations given by a single Newton step for small
+ * count values, particularly when stepping from count 1 to 2 or vice versa.
+ * Above 16, a single Newton step gives sufficient accuracy in either
+ * direction, given the precision stored.
+ *
+ * The magnitude of the error when stepping up to count 2 is such as to give
+ * the value that *should* have been produced at count 4.
+ */
+
+static void cobalt_cache_init(void)
+{
+ struct cobalt_vars v;
+
+ memset(&v, 0, sizeof(v));
+ v.rec_inv_sqrt = ~0U;
+ cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
+
+ for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
+ cobalt_newton_step(&v);
+ cobalt_newton_step(&v);
+ cobalt_newton_step(&v);
+ cobalt_newton_step(&v);
+
+ cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
+ }
+}
+
+static void cobalt_vars_init(struct cobalt_vars *vars)
+{
+ memset(vars, 0, sizeof(*vars));
+
+ if (!cobalt_rec_inv_sqrt_cache[0]) {
+ cobalt_cache_init();
+ cobalt_rec_inv_sqrt_cache[0] = ~0;
+ }
+}
+
+/* CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
+static ktime_t cobalt_control(ktime_t t,
+ u64 interval,
+ u32 rec_inv_sqrt)
+{
+ return ktime_add_ns(t, reciprocal_scale(interval,
+ rec_inv_sqrt));
+}
+
+/* Call this when a packet had to be dropped due to queue overflow. Returns
+ * true if the BLUE state was quiescent before but active after this call.
+ */
+static bool cobalt_queue_full(struct cobalt_vars *vars,
+ struct cobalt_params *p,
+ ktime_t now)
+{
+ bool up = false;
+
+ if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
+ up = !vars->p_drop;
+ vars->p_drop += p->p_inc;
+ if (vars->p_drop < p->p_inc)
+ vars->p_drop = ~0;
+ vars->blue_timer = now;
+ }
+ vars->dropping = true;
+ vars->drop_next = now;
+ if (!vars->count)
+ vars->count = 1;
+
+ return up;
+}
+
+/* Call this when the queue was serviced but turned out to be empty. Returns
+ * true if the BLUE state was active before but quiescent after this call.
+ */
+static bool cobalt_queue_empty(struct cobalt_vars *vars,
+ struct cobalt_params *p,
+ ktime_t now)
+{
+ bool down = false;
+
+ if (vars->p_drop &&
+ ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
+ if (vars->p_drop < p->p_dec)
+ vars->p_drop = 0;
+ else
+ vars->p_drop -= p->p_dec;
+ vars->blue_timer = now;
+ down = !vars->p_drop;
+ }
+ vars->dropping = false;
+
+ if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
+ vars->count--;
+ cobalt_invsqrt(vars);
+ vars->drop_next = cobalt_control(vars->drop_next,
+ p->interval,
+ vars->rec_inv_sqrt);
+ }
+
+ return down;
+}
+
+/* Call this with a freshly dequeued packet for possible congestion marking.
+ * Returns true as an instruction to drop the packet, false for delivery.
+ */
+static bool cobalt_should_drop(struct cobalt_vars *vars,
+ struct cobalt_params *p,
+ ktime_t now,
+ struct sk_buff *skb,
+ u32 bulk_flows)
+{
+ bool next_due, over_target, drop = false;
+ ktime_t schedule;
+ u64 sojourn;
+
+/* The 'schedule' variable records, in its sign, whether 'now' is before or
+ * after 'drop_next'. This allows 'drop_next' to be updated before the next
+ * scheduling decision is actually branched, without destroying that
+ * information. Similarly, the first 'schedule' value calculated is preserved
+ * in the boolean 'next_due'.
+ *
+ * As for 'drop_next', we take advantage of the fact that 'interval' is both
+ * the delay between first exceeding 'target' and the first signalling event,
+ * *and* the scaling factor for the signalling frequency. It's therefore very
+ * natural to use a single mechanism for both purposes, and eliminates a
+ * significant amount of reference Codel's spaghetti code. To help with this,
+ * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
+ * as possible to 1.0 in fixed-point.
+ */
+
+ sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
+ schedule = ktime_sub(now, vars->drop_next);
+ over_target = sojourn > p->target &&
+ sojourn > p->mtu_time * bulk_flows * 2 &&
+ sojourn > p->mtu_time * 4;
+ next_due = vars->count && ktime_to_ns(schedule) >= 0;
+
+ vars->ecn_marked = false;
+
+ if (over_target) {
+ if (!vars->dropping) {
+ vars->dropping = true;
+ vars->drop_next = cobalt_control(now,
+ p->interval,
+ vars->rec_inv_sqrt);
+ }
+ if (!vars->count)
+ vars->count = 1;
+ } else if (vars->dropping) {
+ vars->dropping = false;
+ }
+
+ if (next_due && vars->dropping) {
+ /* Use ECN mark if possible, otherwise drop */
+ drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
+
+ vars->count++;
+ if (!vars->count)
+ vars->count--;
+ cobalt_invsqrt(vars);
+ vars->drop_next = cobalt_control(vars->drop_next,
+ p->interval,
+ vars->rec_inv_sqrt);
+ schedule = ktime_sub(now, vars->drop_next);
+ } else {
+ while (next_due) {
+ vars->count--;
+ cobalt_invsqrt(vars);
+ vars->drop_next = cobalt_control(vars->drop_next,
+ p->interval,
+ vars->rec_inv_sqrt);
+ schedule = ktime_sub(now, vars->drop_next);
+ next_due = vars->count && ktime_to_ns(schedule) >= 0;
+ }
+ }
+
+ /* Simple BLUE implementation. Lack of ECN is deliberate. */
+ if (vars->p_drop)
+ drop |= (prandom_u32() < vars->p_drop);
+
+ /* Overload the drop_next field as an activity timeout */
+ if (!vars->count)
+ vars->drop_next = ktime_add_ns(now, p->interval);
+ else if (ktime_to_ns(schedule) > 0 && !drop)
+ vars->drop_next = now;
+
+ return drop;
+}
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+ const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ struct nf_conntrack_tuple tuple = {};
+ bool rev = !skb->_nfct;
+
+ if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+ return;
+
+ if (!nf_ct_get_tuple_skb(&tuple, skb))
+ return;
+
+ keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
+ keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
+
+ if (keys->ports.ports) {
+ keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
+ keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
+ }
+#endif
+}
+
+/* Cake has several subtle multiple bit settings. In these cases you
+ * would be matching triple isolate mode as well.
+ */
+
+static bool cake_dsrc(int flow_mode)
+{
+ return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
+}
+
+static bool cake_ddst(int flow_mode)
+{
+ return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
+}
+
+static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+ int flow_mode)
+{
+ u32 flow_hash = 0, srchost_hash, dsthost_hash;
+ u16 reduced_hash, srchost_idx, dsthost_idx;
+ struct flow_keys keys, host_keys;
+
+ if (unlikely(flow_mode == CAKE_FLOW_NONE))
+ return 0;
+
+ skb_flow_dissect_flow_keys(skb, &keys,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+ if (flow_mode & CAKE_FLOW_NAT_FLAG)
+ cake_update_flowkeys(&keys, skb);
+
+ /* flow_hash_from_keys() sorts the addresses by value, so we have
+ * to preserve their order in a separate data structure to treat
+ * src and dst host addresses as independently selectable.
+ */
+ host_keys = keys;
+ host_keys.ports.ports = 0;
+ host_keys.basic.ip_proto = 0;
+ host_keys.keyid.keyid = 0;
+ host_keys.tags.flow_label = 0;
+
+ switch (host_keys.control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ host_keys.addrs.v4addrs.src = 0;
+ dsthost_hash = flow_hash_from_keys(&host_keys);
+ host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
+ host_keys.addrs.v4addrs.dst = 0;
+ srchost_hash = flow_hash_from_keys(&host_keys);
+ break;
+
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ memset(&host_keys.addrs.v6addrs.src, 0,
+ sizeof(host_keys.addrs.v6addrs.src));
+ dsthost_hash = flow_hash_from_keys(&host_keys);
+ host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
+ memset(&host_keys.addrs.v6addrs.dst, 0,
+ sizeof(host_keys.addrs.v6addrs.dst));
+ srchost_hash = flow_hash_from_keys(&host_keys);
+ break;
+
+ default:
+ dsthost_hash = 0;
+ srchost_hash = 0;
+ }
+
+ /* This *must* be after the above switch, since as a
+ * side-effect it sorts the src and dst addresses.
+ */
+ if (flow_mode & CAKE_FLOW_FLOWS)
+ flow_hash = flow_hash_from_keys(&keys);
+
+ if (!(flow_mode & CAKE_FLOW_FLOWS)) {
+ if (flow_mode & CAKE_FLOW_SRC_IP)
+ flow_hash ^= srchost_hash;
+
+ if (flow_mode & CAKE_FLOW_DST_IP)
+ flow_hash ^= dsthost_hash;
+ }
+
+ reduced_hash = flow_hash % CAKE_QUEUES;
+
+ /* set-associative hashing */
+ /* fast path if no hash collision (direct lookup succeeds) */
+ if (likely(q->tags[reduced_hash] == flow_hash &&
+ q->flows[reduced_hash].set)) {
+ q->way_directs++;
+ } else {
+ u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
+ u32 outer_hash = reduced_hash - inner_hash;
+ bool allocate_src = false;
+ bool allocate_dst = false;
+ u32 i, k;
+
+ /* check if any active queue in the set is reserved for
+ * this flow.
+ */
+ for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
+ i++, k = (k + 1) % CAKE_SET_WAYS) {
+ if (q->tags[outer_hash + k] == flow_hash) {
+ if (i)
+ q->way_hits++;
+
+ if (!q->flows[outer_hash + k].set) {
+ /* need to increment host refcnts */
+ allocate_src = cake_dsrc(flow_mode);
+ allocate_dst = cake_ddst(flow_mode);
+ }
+
+ goto found;
+ }
+ }
+
+ /* no queue is reserved for this flow, look for an
+ * empty one.
+ */
+ for (i = 0; i < CAKE_SET_WAYS;
+ i++, k = (k + 1) % CAKE_SET_WAYS) {
+ if (!q->flows[outer_hash + k].set) {
+ q->way_misses++;
+ allocate_src = cake_dsrc(flow_mode);
+ allocate_dst = cake_ddst(flow_mode);
+ goto found;
+ }
+ }
+
+ /* With no empty queues, default to the original
+ * queue, accept the collision, update the host tags.
+ */
+ q->way_collisions++;
+ q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--;
+ q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--;
+ allocate_src = cake_dsrc(flow_mode);
+ allocate_dst = cake_ddst(flow_mode);
+found:
+ /* reserve queue for future packets in same flow */
+ reduced_hash = outer_hash + k;
+ q->tags[reduced_hash] = flow_hash;
+
+ if (allocate_src) {
+ srchost_idx = srchost_hash % CAKE_QUEUES;
+ inner_hash = srchost_idx % CAKE_SET_WAYS;
+ outer_hash = srchost_idx - inner_hash;
+ for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
+ i++, k = (k + 1) % CAKE_SET_WAYS) {
+ if (q->hosts[outer_hash + k].srchost_tag ==
+ srchost_hash)
+ goto found_src;
+ }
+ for (i = 0; i < CAKE_SET_WAYS;
+ i++, k = (k + 1) % CAKE_SET_WAYS) {
+ if (!q->hosts[outer_hash + k].srchost_refcnt)
+ break;
+ }
+ q->hosts[outer_hash + k].srchost_tag = srchost_hash;
+found_src:
+ srchost_idx = outer_hash + k;
+ q->hosts[srchost_idx].srchost_refcnt++;
+ q->flows[reduced_hash].srchost = srchost_idx;
+ }
+
+ if (allocate_dst) {
+ dsthost_idx = dsthost_hash % CAKE_QUEUES;
+ inner_hash = dsthost_idx % CAKE_SET_WAYS;
+ outer_hash = dsthost_idx - inner_hash;
+ for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
+ i++, k = (k + 1) % CAKE_SET_WAYS) {
+ if (q->hosts[outer_hash + k].dsthost_tag ==
+ dsthost_hash)
+ goto found_dst;
+ }
+ for (i = 0; i < CAKE_SET_WAYS;
+ i++, k = (k + 1) % CAKE_SET_WAYS) {
+ if (!q->hosts[outer_hash + k].dsthost_refcnt)
+ break;
+ }
+ q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
+found_dst:
+ dsthost_idx = outer_hash + k;
+ q->hosts[dsthost_idx].dsthost_refcnt++;
+ q->flows[reduced_hash].dsthost = dsthost_idx;
+ }
+ }
+
+ return reduced_hash;
+}
+
+/* helper functions : might be changed when/if skb use a standard list_head */
+/* remove one skb from head of slot queue */
+
+static struct sk_buff *dequeue_head(struct cake_flow *flow)
+{
+ struct sk_buff *skb = flow->head;
+
+ if (skb) {
+ flow->head = skb->next;
+ skb->next = NULL;
+ }
+
+ return skb;
+}
+
+/* add skb to flow queue (tail add) */
+
+static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
+{
+ if (!flow->head)
+ flow->head = skb;
+ else
+ flow->tail->next = skb;
+ flow->tail = skb;
+ skb->next = NULL;
+}
+
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+ struct ipv6hdr *buf)
+{
+ unsigned int offset = skb_network_offset(skb);
+ struct iphdr *iph;
+
+ iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+ if (!iph)
+ return NULL;
+
+ if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+ return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+ else if (iph->version == 4)
+ return iph;
+
+ else if (iph->version == 6)
+ return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+ return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+ unsigned int offset = skb_network_offset(skb);
+ const struct ipv6hdr *ipv6h;
+ const struct tcphdr *tcph;
+ const struct iphdr *iph;
+ struct ipv6hdr _ipv6h;
+ struct tcphdr _tcph;
+
+ ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+ if (!ipv6h)
+ return NULL;
+
+ if (ipv6h->version == 4) {
+ iph = (struct iphdr *)ipv6h;
+ offset += iph->ihl * 4;
+
+ /* special-case 6in4 tunnelling, as that is a common way to get
+ * v6 connectivity in the home
+ */
+ if (iph->protocol == IPPROTO_IPV6) {
+ ipv6h = skb_header_pointer(skb, offset,
+ sizeof(_ipv6h), &_ipv6h);
+
+ if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
+ return NULL;
+
+ offset += sizeof(struct ipv6hdr);
+
+ } else if (iph->protocol != IPPROTO_TCP) {
+ return NULL;
+ }
+
+ } else if (ipv6h->version == 6) {
+ if (ipv6h->nexthdr != IPPROTO_TCP)
+ return NULL;
+
+ offset += sizeof(struct ipv6hdr);
+ } else {
+ return NULL;
+ }
+
+ tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (!tcph)
+ return NULL;
+
+ return skb_header_pointer(skb, offset,
+ min(__tcp_hdrlen(tcph), bufsize), buf);
+}
+
+static const void *cake_get_tcpopt(const struct tcphdr *tcph,
+ int code, int *oplen)
+{
+ /* inspired by tcp_parse_options in tcp_input.c */
+ int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
+ const u8 *ptr = (const u8 *)(tcph + 1);
+
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize;
+
+ if (opcode == TCPOPT_EOL)
+ break;
+ if (opcode == TCPOPT_NOP) {
+ length--;
+ continue;
+ }
+ opsize = *ptr++;
+ if (opsize < 2 || opsize > length)
+ break;
+
+ if (opcode == code) {
+ *oplen = opsize;
+ return ptr;
+ }
+
+ ptr += opsize - 2;
+ length -= opsize;
+ }
+
+ return NULL;
+}
+
+/* Compare two SACK sequences. A sequence is considered greater if it SACKs more
+ * bytes than the other. In the case where both sequences ACKs bytes that the
+ * other doesn't, A is considered greater. DSACKs in A also makes A be
+ * considered greater.
+ *
+ * @return -1, 0 or 1 as normal compare functions
+ */
+static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
+ const struct tcphdr *tcph_b)
+{
+ const struct tcp_sack_block_wire *sack_a, *sack_b;
+ u32 ack_seq_a = ntohl(tcph_a->ack_seq);
+ u32 bytes_a = 0, bytes_b = 0;
+ int oplen_a, oplen_b;
+ bool first = true;
+
+ sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
+ sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
+
+ /* pointers point to option contents */
+ oplen_a -= TCPOLEN_SACK_BASE;
+ oplen_b -= TCPOLEN_SACK_BASE;
+
+ if (sack_a && oplen_a >= sizeof(*sack_a) &&
+ (!sack_b || oplen_b < sizeof(*sack_b)))
+ return -1;
+ else if (sack_b && oplen_b >= sizeof(*sack_b) &&
+ (!sack_a || oplen_a < sizeof(*sack_a)))
+ return 1;
+ else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
+ (!sack_b || oplen_b < sizeof(*sack_b)))
+ return 0;
+
+ while (oplen_a >= sizeof(*sack_a)) {
+ const struct tcp_sack_block_wire *sack_tmp = sack_b;
+ u32 start_a = get_unaligned_be32(&sack_a->start_seq);
+ u32 end_a = get_unaligned_be32(&sack_a->end_seq);
+ int oplen_tmp = oplen_b;
+ bool found = false;
+
+ /* DSACK; always considered greater to prevent dropping */
+ if (before(start_a, ack_seq_a))
+ return -1;
+
+ bytes_a += end_a - start_a;
+
+ while (oplen_tmp >= sizeof(*sack_tmp)) {
+ u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
+ u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
+
+ /* first time through we count the total size */
+ if (first)
+ bytes_b += end_b - start_b;
+
+ if (!after(start_b, start_a) && !before(end_b, end_a)) {
+ found = true;
+ if (!first)
+ break;
+ }
+ oplen_tmp -= sizeof(*sack_tmp);
+ sack_tmp++;
+ }
+
+ if (!found)
+ return -1;
+
+ oplen_a -= sizeof(*sack_a);
+ sack_a++;
+ first = false;
+ }
+
+ /* If we made it this far, all ranges SACKed by A are covered by B, so
+ * either the SACKs are equal, or B SACKs more bytes.
+ */
+ return bytes_b > bytes_a ? 1 : 0;
+}
+
+static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
+ u32 *tsval, u32 *tsecr)
+{
+ const u8 *ptr;
+ int opsize;
+
+ ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
+
+ if (ptr && opsize == TCPOLEN_TIMESTAMP) {
+ *tsval = get_unaligned_be32(ptr);
+ *tsecr = get_unaligned_be32(ptr + 4);
+ }
+}
+
+static bool cake_tcph_may_drop(const struct tcphdr *tcph,
+ u32 tstamp_new, u32 tsecr_new)
+{
+ /* inspired by tcp_parse_options in tcp_input.c */
+ int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
+ const u8 *ptr = (const u8 *)(tcph + 1);
+ u32 tstamp, tsecr;
+
+ /* 3 reserved flags must be unset to avoid future breakage
+ * ACK must be set
+ * ECE/CWR are handled separately
+ * All other flags URG/PSH/RST/SYN/FIN must be unset
+ * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
+ * 0x00C00000 = CWR/ECE (handled separately)
+ * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
+ */
+ if (((tcp_flag_word(tcph) &
+ cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
+ return false;
+
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize;
+
+ if (opcode == TCPOPT_EOL)
+ break;
+ if (opcode == TCPOPT_NOP) {
+ length--;
+ continue;
+ }
+ opsize = *ptr++;
+ if (opsize < 2 || opsize > length)
+ break;
+
+ switch (opcode) {
+ case TCPOPT_MD5SIG: /* doesn't influence state */
+ break;
+
+ case TCPOPT_SACK: /* stricter checking performed later */
+ if (opsize % 8 != 2)
+ return false;
+ break;
+
+ case TCPOPT_TIMESTAMP:
+ /* only drop timestamps lower than new */
+ if (opsize != TCPOLEN_TIMESTAMP)
+ return false;
+ tstamp = get_unaligned_be32(ptr);
+ tsecr = get_unaligned_be32(ptr + 4);
+ if (after(tstamp, tstamp_new) ||
+ after(tsecr, tsecr_new))
+ return false;
+ break;
+
+ case TCPOPT_MSS: /* these should only be set on SYN */
+ case TCPOPT_WINDOW:
+ case TCPOPT_SACK_PERM:
+ case TCPOPT_FASTOPEN:
+ case TCPOPT_EXP:
+ default: /* don't drop if any unknown options are present */
+ return false;
+ }
+
+ ptr += opsize - 2;
+ length -= opsize;
+ }
+
+ return true;
+}
+
+static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
+ struct cake_flow *flow)
+{
+ bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
+ struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
+ struct sk_buff *skb_check, *skb_prev = NULL;
+ const struct ipv6hdr *ipv6h, *ipv6h_check;
+ unsigned char _tcph[64], _tcph_check[64];
+ const struct tcphdr *tcph, *tcph_check;
+ const struct iphdr *iph, *iph_check;
+ struct ipv6hdr _iph, _iph_check;
+ const struct sk_buff *skb;
+ int seglen, num_found = 0;
+ u32 tstamp = 0, tsecr = 0;
+ __be32 elig_flags = 0;
+ int sack_comp;
+
+ /* no other possible ACKs to filter */
+ if (flow->head == flow->tail)
+ return NULL;
+
+ skb = flow->tail;
+ tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
+ iph = cake_get_iphdr(skb, &_iph);
+ if (!tcph)
+ return NULL;
+
+ cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
+
+ /* the 'triggering' packet need only have the ACK flag set.
+ * also check that SYN is not set, as there won't be any previous ACKs.
+ */
+ if ((tcp_flag_word(tcph) &
+ (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
+ return NULL;
+
+ /* the 'triggering' ACK is at the tail of the queue, we have already
+ * returned if it is the only packet in the flow. loop through the rest
+ * of the queue looking for pure ACKs with the same 5-tuple as the
+ * triggering one.
+ */
+ for (skb_check = flow->head;
+ skb_check && skb_check != skb;
+ skb_prev = skb_check, skb_check = skb_check->next) {
+ iph_check = cake_get_iphdr(skb_check, &_iph_check);
+ tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
+ sizeof(_tcph_check));
+
+ /* only TCP packets with matching 5-tuple are eligible, and only
+ * drop safe headers
+ */
+ if (!tcph_check || iph->version != iph_check->version ||
+ tcph_check->source != tcph->source ||
+ tcph_check->dest != tcph->dest)
+ continue;
+
+ if (iph_check->version == 4) {
+ if (iph_check->saddr != iph->saddr ||
+ iph_check->daddr != iph->daddr)
+ continue;
+
+ seglen = ntohs(iph_check->tot_len) -
+ (4 * iph_check->ihl);
+ } else if (iph_check->version == 6) {
+ ipv6h = (struct ipv6hdr *)iph;
+ ipv6h_check = (struct ipv6hdr *)iph_check;
+
+ if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
+ ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
+ continue;
+
+ seglen = ntohs(ipv6h_check->payload_len);
+ } else {
+ WARN_ON(1); /* shouldn't happen */
+ continue;
+ }
+
+ /* If the ECE/CWR flags changed from the previous eligible
+ * packet in the same flow, we should no longer be dropping that
+ * previous packet as this would lose information.
+ */
+ if (elig_ack && (tcp_flag_word(tcph_check) &
+ (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
+ elig_ack = NULL;
+ elig_ack_prev = NULL;
+ num_found--;
+ }
+
+ /* Check TCP options and flags, don't drop ACKs with segment
+ * data, and don't drop ACKs with a higher cumulative ACK
+ * counter than the triggering packet. Check ACK seqno here to
+ * avoid parsing SACK options of packets we are going to exclude
+ * anyway.
+ */
+ if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
+ (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
+ after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
+ continue;
+
+ /* Check SACK options. The triggering packet must SACK more data
+ * than the ACK under consideration, or SACK the same range but
+ * have a larger cumulative ACK counter. The latter is a
+ * pathological case, but is contained in the following check
+ * anyway, just to be safe.
+ */
+ sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
+
+ if (sack_comp < 0 ||
+ (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
+ sack_comp == 0))
+ continue;
+
+ /* At this point we have found an eligible pure ACK to drop; if
+ * we are in aggressive mode, we are done. Otherwise, keep
+ * searching unless this is the second eligible ACK we
+ * found.
+ *
+ * Since we want to drop ACK closest to the head of the queue,
+ * save the first eligible ACK we find, even if we need to loop
+ * again.
+ */
+ if (!elig_ack) {
+ elig_ack = skb_check;
+ elig_ack_prev = skb_prev;
+ elig_flags = (tcp_flag_word(tcph_check)
+ & (TCP_FLAG_ECE | TCP_FLAG_CWR));
+ }
+
+ if (num_found++ > 0)
+ goto found;
+ }
+
+ /* We made it through the queue without finding two eligible ACKs . If
+ * we found a single eligible ACK we can drop it in aggressive mode if
+ * we can guarantee that this does not interfere with ECN flag
+ * information. We ensure this by dropping it only if the enqueued
+ * packet is consecutive with the eligible ACK, and their flags match.
+ */
+ if (elig_ack && aggressive && elig_ack->next == skb &&
+ (elig_flags == (tcp_flag_word(tcph) &
+ (TCP_FLAG_ECE | TCP_FLAG_CWR))))
+ goto found;
+
+ return NULL;
+
+found:
+ if (elig_ack_prev)
+ elig_ack_prev->next = elig_ack->next;
+ else
+ flow->head = elig_ack->next;
+
+ elig_ack->next = NULL;
+
+ return elig_ack;
+}
+
+static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
+{
+ avg -= avg >> shift;
+ avg += sample >> shift;
+ return avg;
+}
+
+static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
+{
+ if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+ len -= off;
+
+ if (q->max_netlen < len)
+ q->max_netlen = len;
+ if (q->min_netlen > len)
+ q->min_netlen = len;
+
+ len += q->rate_overhead;
+
+ if (len < q->rate_mpu)
+ len = q->rate_mpu;
+
+ if (q->atm_mode == CAKE_ATM_ATM) {
+ len += 47;
+ len /= 48;
+ len *= 53;
+ } else if (q->atm_mode == CAKE_ATM_PTM) {
+ /* Add one byte per 64 bytes or part thereof.
+ * This is conservative and easier to calculate than the
+ * precise value.
+ */
+ len += (len + 63) / 64;
+ }
+
+ if (q->max_adjlen < len)
+ q->max_adjlen = len;
+ if (q->min_adjlen > len)
+ q->min_adjlen = len;
+
+ return len;
+}
+
+static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ unsigned int hdr_len, last_len = 0;
+ u32 off = skb_network_offset(skb);
+ u32 len = qdisc_pkt_len(skb);
+ u16 segs = 1;
+
+ q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+ if (!shinfo->gso_size)
+ return cake_calc_overhead(q, len, off);
+
+ /* borrowed from qdisc_pkt_len_init() */
+ hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+ /* + transport layer */
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+ SKB_GSO_TCPV6))) {
+ const struct tcphdr *th;
+ struct tcphdr _tcphdr;
+
+ th = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_tcphdr), &_tcphdr);
+ if (likely(th))
+ hdr_len += __tcp_hdrlen(th);
+ } else {
+ struct udphdr _udphdr;
+
+ if (skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_udphdr), &_udphdr))
+ hdr_len += sizeof(struct udphdr);
+ }
+
+ if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+ segs = DIV_ROUND_UP(skb->len - hdr_len,
+ shinfo->gso_size);
+ else
+ segs = shinfo->gso_segs;
+
+ len = shinfo->gso_size + hdr_len;
+ last_len = skb->len - shinfo->gso_size * (segs - 1);
+
+ return (cake_calc_overhead(q, len, off) * (segs - 1) +
+ cake_calc_overhead(q, last_len, off));
+}
+
+static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
+{
+ struct cake_heap_entry ii = q->overflow_heap[i];
+ struct cake_heap_entry jj = q->overflow_heap[j];
+
+ q->overflow_heap[i] = jj;
+ q->overflow_heap[j] = ii;
+
+ q->tins[ii.t].overflow_idx[ii.b] = j;
+ q->tins[jj.t].overflow_idx[jj.b] = i;
+}
+
+static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
+{
+ struct cake_heap_entry ii = q->overflow_heap[i];
+
+ return q->tins[ii.t].backlogs[ii.b];
+}
+
+static void cake_heapify(struct cake_sched_data *q, u16 i)
+{
+ static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
+ u32 mb = cake_heap_get_backlog(q, i);
+ u32 m = i;
+
+ while (m < a) {
+ u32 l = m + m + 1;
+ u32 r = l + 1;
+
+ if (l < a) {
+ u32 lb = cake_heap_get_backlog(q, l);
+
+ if (lb > mb) {
+ m = l;
+ mb = lb;
+ }
+ }
+
+ if (r < a) {
+ u32 rb = cake_heap_get_backlog(q, r);
+
+ if (rb > mb) {
+ m = r;
+ mb = rb;
+ }
+ }
+
+ if (m != i) {
+ cake_heap_swap(q, i, m);
+ i = m;
+ } else {
+ break;
+ }
+ }
+}
+
+static void cake_heapify_up(struct cake_sched_data *q, u16 i)
+{
+ while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
+ u16 p = (i - 1) >> 1;
+ u32 ib = cake_heap_get_backlog(q, i);
+ u32 pb = cake_heap_get_backlog(q, p);
+
+ if (ib > pb) {
+ cake_heap_swap(q, i, p);
+ i = p;
+ } else {
+ break;
+ }
+ }
+}
+
+static int cake_advance_shaper(struct cake_sched_data *q,
+ struct cake_tin_data *b,
+ struct sk_buff *skb,
+ ktime_t now, bool drop)
+{
+ u32 len = get_cobalt_cb(skb)->adjusted_len;
+
+ /* charge packet bandwidth to this tin
+ * and to the global shaper.
+ */
+ if (q->rate_ns) {
+ u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
+ u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
+ u64 failsafe_dur = global_dur + (global_dur >> 1);
+
+ if (ktime_before(b->time_next_packet, now))
+ b->time_next_packet = ktime_add_ns(b->time_next_packet,
+ tin_dur);
+
+ else if (ktime_before(b->time_next_packet,
+ ktime_add_ns(now, tin_dur)))
+ b->time_next_packet = ktime_add_ns(now, tin_dur);
+
+ q->time_next_packet = ktime_add_ns(q->time_next_packet,
+ global_dur);
+ if (!drop)
+ q->failsafe_next_packet = \
+ ktime_add_ns(q->failsafe_next_packet,
+ failsafe_dur);
+ }
+ return len;
+}
+
+static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ ktime_t now = ktime_get();
+ u32 idx = 0, tin = 0, len;
+ struct cake_heap_entry qq;
+ struct cake_tin_data *b;
+ struct cake_flow *flow;
+ struct sk_buff *skb;
+
+ if (!q->overflow_timeout) {
+ int i;
+ /* Build fresh max-heap */
+ for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
+ cake_heapify(q, i);
+ }
+ q->overflow_timeout = 65535;
+
+ /* select longest queue for pruning */
+ qq = q->overflow_heap[0];
+ tin = qq.t;
+ idx = qq.b;
+
+ b = &q->tins[tin];
+ flow = &b->flows[idx];
+ skb = dequeue_head(flow);
+ if (unlikely(!skb)) {
+ /* heap has gone wrong, rebuild it next time */
+ q->overflow_timeout = 0;
+ return idx + (tin << 16);
+ }
+
+ if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
+ b->unresponsive_flow_count++;
+
+ len = qdisc_pkt_len(skb);
+ q->buffer_used -= skb->truesize;
+ b->backlogs[idx] -= len;
+ b->tin_backlog -= len;
+ sch->qstats.backlog -= len;
+ qdisc_tree_reduce_backlog(sch, 1, len);
+
+ flow->dropped++;
+ b->tin_dropped++;
+ sch->qstats.drops++;
+
+ if (q->rate_flags & CAKE_FLAG_INGRESS)
+ cake_advance_shaper(q, b, skb, now, true);
+
+ __qdisc_drop(skb, to_free);
+ sch->q.qlen--;
+
+ cake_heapify(q, 0);
+
+ return idx + (tin << 16);
+}
+
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+ break;
+ case htons(ETH_P_IPV6):
+ ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+ u8 dscp;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ if (wash && dscp)
+ ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+ return dscp;
+
+ case htons(ETH_P_IPV6):
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+ if (wash && dscp)
+ ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+ return dscp;
+
+ case htons(ETH_P_ARP):
+ return 0x38; /* CS7 - Net Control */
+
+ default:
+ /* If there is no Diffserv field, treat as best-effort */
+ return 0;
+ }
+}
+
+static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
+ struct sk_buff *skb)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ u32 tin;
+
+ if (TC_H_MAJ(skb->priority) == sch->handle &&
+ TC_H_MIN(skb->priority) > 0 &&
+ TC_H_MIN(skb->priority) <= q->tin_cnt) {
+ tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
+
+ if (q->rate_flags & CAKE_FLAG_WASH)
+ cake_wash_diffserv(skb);
+ } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) {
+ /* extract the Diffserv Precedence field, if it exists */
+ /* and clear DSCP bits if washing */
+ tin = q->tin_index[cake_handle_diffserv(skb,
+ q->rate_flags & CAKE_FLAG_WASH)];
+ if (unlikely(tin >= q->tin_cnt))
+ tin = 0;
+ } else {
+ tin = 0;
+ if (q->rate_flags & CAKE_FLAG_WASH)
+ cake_wash_diffserv(skb);
+ }
+
+ return &q->tins[tin];
+}
+
+static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
+ struct sk_buff *skb, int flow_mode, int *qerr)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ struct tcf_proto *filter;
+ struct tcf_result res;
+ u32 flow = 0;
+ int result;
+
+ filter = rcu_dereference_bh(q->filter_list);
+ if (!filter)
+ goto hash;
+
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ result = tcf_classify(skb, filter, &res, false);
+
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ /* fall through */
+ case TC_ACT_SHOT:
+ return 0;
+ }
+#endif
+ if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
+ flow = TC_H_MIN(res.classid);
+ }
+hash:
+ *t = cake_select_tin(sch, skb);
+ return flow ?: cake_hash(*t, skb, flow_mode) + 1;
+}
+
+static void cake_reconfigure(struct Qdisc *sch);
+
+static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ int len = qdisc_pkt_len(skb);
+ int uninitialized_var(ret);
+ struct sk_buff *ack = NULL;
+ ktime_t now = ktime_get();
+ struct cake_tin_data *b;
+ struct cake_flow *flow;
+ u32 idx;
+
+ /* choose flow to insert into */
+ idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+ if (idx == 0) {
+ if (ret & __NET_XMIT_BYPASS)
+ qdisc_qstats_drop(sch);
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
+ idx--;
+ flow = &b->flows[idx];
+
+ /* ensure shaper state isn't stale */
+ if (!b->tin_backlog) {
+ if (ktime_before(b->time_next_packet, now))
+ b->time_next_packet = now;
+
+ if (!sch->q.qlen) {
+ if (ktime_before(q->time_next_packet, now)) {
+ q->failsafe_next_packet = now;
+ q->time_next_packet = now;
+ } else if (ktime_after(q->time_next_packet, now) &&
+ ktime_after(q->failsafe_next_packet, now)) {
+ u64 next = \
+ min(ktime_to_ns(q->time_next_packet),
+ ktime_to_ns(
+ q->failsafe_next_packet));
+ sch->qstats.overlimits++;
+ qdisc_watchdog_schedule_ns(&q->watchdog, next);
+ }
+ }
+ }
+
+ if (unlikely(len > b->max_skblen))
+ b->max_skblen = len;
+
+ if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+ struct sk_buff *segs, *nskb;
+ netdev_features_t features = netif_skb_features(skb);
+ unsigned int slen = 0;
+
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ if (IS_ERR_OR_NULL(segs))
+ return qdisc_drop(skb, sch, to_free);
+
+ while (segs) {
+ nskb = segs->next;
+ segs->next = NULL;
+ qdisc_skb_cb(segs)->pkt_len = segs->len;
+ cobalt_set_enqueue_time(segs, now);
+ get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+ segs);
+ flow_queue_add(flow, segs);
+
+ sch->q.qlen++;
+ slen += segs->len;
+ q->buffer_used += segs->truesize;
+ b->packets++;
+ segs = nskb;
+ }
+
+ /* stats */
+ b->bytes += slen;
+ b->backlogs[idx] += slen;
+ b->tin_backlog += slen;
+ sch->qstats.backlog += slen;
+ q->avg_window_bytes += slen;
+
+ qdisc_tree_reduce_backlog(sch, 1, len);
+ consume_skb(skb);
+ } else {
+ /* not splitting */
+ cobalt_set_enqueue_time(skb, now);
+ get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+ flow_queue_add(flow, skb);
+
+ if (q->ack_filter)
+ ack = cake_ack_filter(q, flow);
+
+ if (ack) {
+ b->ack_drops++;
+ sch->qstats.drops++;
+ b->bytes += qdisc_pkt_len(ack);
+ len -= qdisc_pkt_len(ack);
+ q->buffer_used += skb->truesize - ack->truesize;
+ if (q->rate_flags & CAKE_FLAG_INGRESS)
+ cake_advance_shaper(q, b, ack, now, true);
+
+ qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+ consume_skb(ack);
+ } else {
+ sch->q.qlen++;
+ q->buffer_used += skb->truesize;
+ }
+
+ /* stats */
+ b->packets++;
+ b->bytes += len;
+ b->backlogs[idx] += len;
+ b->tin_backlog += len;
+ sch->qstats.backlog += len;
+ q->avg_window_bytes += len;
+ }
+
+ if (q->overflow_timeout)
+ cake_heapify_up(q, b->overflow_idx[idx]);
+
+ /* incoming bandwidth capacity estimate */
+ if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+ u64 packet_interval = \
+ ktime_to_ns(ktime_sub(now, q->last_packet_time));
+
+ if (packet_interval > NSEC_PER_SEC)
+ packet_interval = NSEC_PER_SEC;
+
+ /* filter out short-term bursts, eg. wifi aggregation */
+ q->avg_packet_interval = \
+ cake_ewma(q->avg_packet_interval,
+ packet_interval,
+ (packet_interval > q->avg_packet_interval ?
+ 2 : 8));
+
+ q->last_packet_time = now;
+
+ if (packet_interval > q->avg_packet_interval) {
+ u64 window_interval = \
+ ktime_to_ns(ktime_sub(now,
+ q->avg_window_begin));
+ u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+ do_div(b, window_interval);
+ q->avg_peak_bandwidth =
+ cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+ q->avg_window_bytes = 0;
+ q->avg_window_begin = now;
+
+ if (ktime_after(now,
+ ktime_add_ms(q->last_reconfig_time,
+ 250))) {
+ q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+ cake_reconfigure(sch);
+ }
+ }
+ } else {
+ q->avg_window_bytes = 0;
+ q->last_packet_time = now;
+ }
+
+ /* flowchain */
+ if (!flow->set || flow->set == CAKE_SET_DECAYING) {
+ struct cake_host *srchost = &b->hosts[flow->srchost];
+ struct cake_host *dsthost = &b->hosts[flow->dsthost];
+ u16 host_load = 1;
+
+ if (!flow->set) {
+ list_add_tail(&flow->flowchain, &b->new_flows);
+ } else {
+ b->decaying_flow_count--;
+ list_move_tail(&flow->flowchain, &b->new_flows);
+ }
+ flow->set = CAKE_SET_SPARSE;
+ b->sparse_flow_count++;
+
+ if (cake_dsrc(q->flow_mode))
+ host_load = max(host_load, srchost->srchost_refcnt);
+
+ if (cake_ddst(q->flow_mode))
+ host_load = max(host_load, dsthost->dsthost_refcnt);
+
+ flow->deficit = (b->flow_quantum *
+ quantum_div[host_load]) >> 16;
+ } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
+ /* this flow was empty, accounted as a sparse flow, but actually
+ * in the bulk rotation.
+ */
+ flow->set = CAKE_SET_BULK;
+ b->sparse_flow_count--;
+ b->bulk_flow_count++;
+ }
+
+ if (q->buffer_used > q->buffer_max_used)
+ q->buffer_max_used = q->buffer_used;
+
+ if (q->buffer_used > q->buffer_limit) {
+ u32 dropped = 0;
+
+ while (q->buffer_used > q->buffer_limit) {
+ dropped++;
+ cake_drop(sch, to_free);
+ }
+ b->drop_overlimit += dropped;
+ }
+ return NET_XMIT_SUCCESS;
+}
+
+static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ struct cake_tin_data *b = &q->tins[q->cur_tin];
+ struct cake_flow *flow = &b->flows[q->cur_flow];
+ struct sk_buff *skb = NULL;
+ u32 len;
+
+ if (flow->head) {
+ skb = dequeue_head(flow);
+ len = qdisc_pkt_len(skb);
+ b->backlogs[q->cur_flow] -= len;
+ b->tin_backlog -= len;
+ sch->qstats.backlog -= len;
+ q->buffer_used -= skb->truesize;
+ sch->q.qlen--;
+
+ if (q->overflow_timeout)
+ cake_heapify(q, b->overflow_idx[q->cur_flow]);
+ }
+ return skb;
+}
+
+/* Discard leftover packets from a tin no longer in use. */
+static void cake_clear_tin(struct Qdisc *sch, u16 tin)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ q->cur_tin = tin;
+ for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
+ while (!!(skb = cake_dequeue_one(sch)))
+ kfree_skb(skb);
+}
+
+static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ struct cake_tin_data *b = &q->tins[q->cur_tin];
+ struct cake_host *srchost, *dsthost;
+ ktime_t now = ktime_get();
+ struct cake_flow *flow;
+ struct list_head *head;
+ bool first_flow = true;
+ struct sk_buff *skb;
+ u16 host_load;
+ u64 delay;
+ u32 len;
+
+begin:
+ if (!sch->q.qlen)
+ return NULL;
+
+ /* global hard shaper */
+ if (ktime_after(q->time_next_packet, now) &&
+ ktime_after(q->failsafe_next_packet, now)) {
+ u64 next = min(ktime_to_ns(q->time_next_packet),
+ ktime_to_ns(q->failsafe_next_packet));
+
+ sch->qstats.overlimits++;
+ qdisc_watchdog_schedule_ns(&q->watchdog, next);
+ return NULL;
+ }
+
+ /* Choose a class to work on. */
+ if (!q->rate_ns) {
+ /* In unlimited mode, can't rely on shaper timings, just balance
+ * with DRR
+ */
+ bool wrapped = false, empty = true;
+
+ while (b->tin_deficit < 0 ||
+ !(b->sparse_flow_count + b->bulk_flow_count)) {
+ if (b->tin_deficit <= 0)
+ b->tin_deficit += b->tin_quantum_band;
+ if (b->sparse_flow_count + b->bulk_flow_count)
+ empty = false;
+
+ q->cur_tin++;
+ b++;
+ if (q->cur_tin >= q->tin_cnt) {
+ q->cur_tin = 0;
+ b = q->tins;
+
+ if (wrapped) {
+ /* It's possible for q->qlen to be
+ * nonzero when we actually have no
+ * packets anywhere.
+ */
+ if (empty)
+ return NULL;
+ } else {
+ wrapped = true;
+ }
+ }
+ }
+ } else {
+ /* In shaped mode, choose:
+ * - Highest-priority tin with queue and meeting schedule, or
+ * - The earliest-scheduled tin with queue.
+ */
+ ktime_t best_time = KTIME_MAX;
+ int tin, best_tin = 0;
+
+ for (tin = 0; tin < q->tin_cnt; tin++) {
+ b = q->tins + tin;
+ if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
+ ktime_t time_to_pkt = \
+ ktime_sub(b->time_next_packet, now);
+
+ if (ktime_to_ns(time_to_pkt) <= 0 ||
+ ktime_compare(time_to_pkt,
+ best_time) <= 0) {
+ best_time = time_to_pkt;
+ best_tin = tin;
+ }
+ }
+ }
+
+ q->cur_tin = best_tin;
+ b = q->tins + best_tin;
+
+ /* No point in going further if no packets to deliver. */
+ if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
+ return NULL;
+ }
+
+retry:
+ /* service this class */
+ head = &b->decaying_flows;
+ if (!first_flow || list_empty(head)) {
+ head = &b->new_flows;
+ if (list_empty(head)) {
+ head = &b->old_flows;
+ if (unlikely(list_empty(head))) {
+ head = &b->decaying_flows;
+ if (unlikely(list_empty(head)))
+ goto begin;
+ }
+ }
+ }
+ flow = list_first_entry(head, struct cake_flow, flowchain);
+ q->cur_flow = flow - b->flows;
+ first_flow = false;
+
+ /* triple isolation (modified DRR++) */
+ srchost = &b->hosts[flow->srchost];
+ dsthost = &b->hosts[flow->dsthost];
+ host_load = 1;
+
+ if (cake_dsrc(q->flow_mode))
+ host_load = max(host_load, srchost->srchost_refcnt);
+
+ if (cake_ddst(q->flow_mode))
+ host_load = max(host_load, dsthost->dsthost_refcnt);
+
+ WARN_ON(host_load > CAKE_QUEUES);
+
+ /* flow isolation (DRR++) */
+ if (flow->deficit <= 0) {
+ /* The shifted prandom_u32() is a way to apply dithering to
+ * avoid accumulating roundoff errors
+ */
+ flow->deficit += (b->flow_quantum * quantum_div[host_load] +
+ (prandom_u32() >> 16)) >> 16;
+ list_move_tail(&flow->flowchain, &b->old_flows);
+
+ /* Keep all flows with deficits out of the sparse and decaying
+ * rotations. No non-empty flow can go into the decaying
+ * rotation, so they can't get deficits
+ */
+ if (flow->set == CAKE_SET_SPARSE) {
+ if (flow->head) {
+ b->sparse_flow_count--;
+ b->bulk_flow_count++;
+ flow->set = CAKE_SET_BULK;
+ } else {
+ /* we've moved it to the bulk rotation for
+ * correct deficit accounting but we still want
+ * to count it as a sparse flow, not a bulk one.
+ */
+ flow->set = CAKE_SET_SPARSE_WAIT;
+ }
+ }
+ goto retry;
+ }
+
+ /* Retrieve a packet via the AQM */
+ while (1) {
+ skb = cake_dequeue_one(sch);
+ if (!skb) {
+ /* this queue was actually empty */
+ if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
+ b->unresponsive_flow_count--;
+
+ if (flow->cvars.p_drop || flow->cvars.count ||
+ ktime_before(now, flow->cvars.drop_next)) {
+ /* keep in the flowchain until the state has
+ * decayed to rest
+ */
+ list_move_tail(&flow->flowchain,
+ &b->decaying_flows);
+ if (flow->set == CAKE_SET_BULK) {
+ b->bulk_flow_count--;
+ b->decaying_flow_count++;
+ } else if (flow->set == CAKE_SET_SPARSE ||
+ flow->set == CAKE_SET_SPARSE_WAIT) {
+ b->sparse_flow_count--;
+ b->decaying_flow_count++;
+ }
+ flow->set = CAKE_SET_DECAYING;
+ } else {
+ /* remove empty queue from the flowchain */
+ list_del_init(&flow->flowchain);
+ if (flow->set == CAKE_SET_SPARSE ||
+ flow->set == CAKE_SET_SPARSE_WAIT)
+ b->sparse_flow_count--;
+ else if (flow->set == CAKE_SET_BULK)
+ b->bulk_flow_count--;
+ else
+ b->decaying_flow_count--;
+
+ flow->set = CAKE_SET_NONE;
+ srchost->srchost_refcnt--;
+ dsthost->dsthost_refcnt--;
+ }
+ goto begin;
+ }
+
+ /* Last packet in queue may be marked, shouldn't be dropped */
+ if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
+ (b->bulk_flow_count *
+ !!(q->rate_flags &
+ CAKE_FLAG_INGRESS))) ||
+ !flow->head)
+ break;
+
+ /* drop this packet, get another one */
+ if (q->rate_flags & CAKE_FLAG_INGRESS) {
+ len = cake_advance_shaper(q, b, skb,
+ now, true);
+ flow->deficit -= len;
+ b->tin_deficit -= len;
+ }
+ flow->dropped++;
+ b->tin_dropped++;
+ qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
+ qdisc_qstats_drop(sch);
+ kfree_skb(skb);
+ if (q->rate_flags & CAKE_FLAG_INGRESS)
+ goto retry;
+ }
+
+ b->tin_ecn_mark += !!flow->cvars.ecn_marked;
+ qdisc_bstats_update(sch, skb);
+
+ /* collect delay stats */
+ delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
+ b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
+ b->peak_delay = cake_ewma(b->peak_delay, delay,
+ delay > b->peak_delay ? 2 : 8);
+ b->base_delay = cake_ewma(b->base_delay, delay,
+ delay < b->base_delay ? 2 : 8);
+
+ len = cake_advance_shaper(q, b, skb, now, false);
+ flow->deficit -= len;
+ b->tin_deficit -= len;
+
+ if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
+ u64 next = min(ktime_to_ns(q->time_next_packet),
+ ktime_to_ns(q->failsafe_next_packet));
+
+ qdisc_watchdog_schedule_ns(&q->watchdog, next);
+ } else if (!sch->q.qlen) {
+ int i;
+
+ for (i = 0; i < q->tin_cnt; i++) {
+ if (q->tins[i].decaying_flow_count) {
+ ktime_t next = \
+ ktime_add_ns(now,
+ q->tins[i].cparams.target);
+
+ qdisc_watchdog_schedule_ns(&q->watchdog,
+ ktime_to_ns(next));
+ break;
+ }
+ }
+ }
+
+ if (q->overflow_timeout)
+ q->overflow_timeout--;
+
+ return skb;
+}
+
+static void cake_reset(struct Qdisc *sch)
+{
+ u32 c;
+
+ for (c = 0; c < CAKE_MAX_TINS; c++)
+ cake_clear_tin(sch, c);
+}
+
+static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
+ [TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 },
+ [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
+ [TCA_CAKE_ATM] = { .type = NLA_U32 },
+ [TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 },
+ [TCA_CAKE_OVERHEAD] = { .type = NLA_S32 },
+ [TCA_CAKE_RTT] = { .type = NLA_U32 },
+ [TCA_CAKE_TARGET] = { .type = NLA_U32 },
+ [TCA_CAKE_AUTORATE] = { .type = NLA_U32 },
+ [TCA_CAKE_MEMORY] = { .type = NLA_U32 },
+ [TCA_CAKE_NAT] = { .type = NLA_U32 },
+ [TCA_CAKE_RAW] = { .type = NLA_U32 },
+ [TCA_CAKE_WASH] = { .type = NLA_U32 },
+ [TCA_CAKE_MPU] = { .type = NLA_U32 },
+ [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
+ [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
+};
+
+static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
+ u64 target_ns, u64 rtt_est_ns)
+{
+ /* convert byte-rate into time-per-byte
+ * so it will always unwedge in reasonable time.
+ */
+ static const u64 MIN_RATE = 64;
+ u32 byte_target = mtu;
+ u64 byte_target_ns;
+ u8 rate_shft = 0;
+ u64 rate_ns = 0;
+
+ b->flow_quantum = 1514;
+ if (rate) {
+ b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
+ rate_shft = 34;
+ rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
+ rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
+ while (!!(rate_ns >> 34)) {
+ rate_ns >>= 1;
+ rate_shft--;
+ }
+ } /* else unlimited, ie. zero delay */
+
+ b->tin_rate_bps = rate;
+ b->tin_rate_ns = rate_ns;
+ b->tin_rate_shft = rate_shft;
+
+ byte_target_ns = (byte_target * rate_ns) >> rate_shft;
+
+ b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
+ b->cparams.interval = max(rtt_est_ns +
+ b->cparams.target - target_ns,
+ b->cparams.target * 2);
+ b->cparams.mtu_time = byte_target_ns;
+ b->cparams.p_inc = 1 << 24; /* 1/256 */
+ b->cparams.p_dec = 1 << 20; /* 1/4096 */
+}
+
+static int cake_config_besteffort(struct Qdisc *sch)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ struct cake_tin_data *b = &q->tins[0];
+ u32 mtu = psched_mtu(qdisc_dev(sch));
+ u64 rate = q->rate_bps;
+
+ q->tin_cnt = 1;
+
+ q->tin_index = besteffort;
+ q->tin_order = normal_order;
+
+ cake_set_rate(b, rate, mtu,
+ us_to_ns(q->target), us_to_ns(q->interval));
+ b->tin_quantum_band = 65535;
+ b->tin_quantum_prio = 65535;
+
+ return 0;
+}
+
+static int cake_config_precedence(struct Qdisc *sch)
+{
+ /* convert high-level (user visible) parameters into internal format */
+ struct cake_sched_data *q = qdisc_priv(sch);
+ u32 mtu = psched_mtu(qdisc_dev(sch));
+ u64 rate = q->rate_bps;
+ u32 quantum1 = 256;
+ u32 quantum2 = 256;
+ u32 i;
+
+ q->tin_cnt = 8;
+ q->tin_index = precedence;
+ q->tin_order = normal_order;
+
+ for (i = 0; i < q->tin_cnt; i++) {
+ struct cake_tin_data *b = &q->tins[i];
+
+ cake_set_rate(b, rate, mtu, us_to_ns(q->target),
+ us_to_ns(q->interval));
+
+ b->tin_quantum_prio = max_t(u16, 1U, quantum1);
+ b->tin_quantum_band = max_t(u16, 1U, quantum2);
+
+ /* calculate next class's parameters */
+ rate *= 7;
+ rate >>= 3;
+
+ quantum1 *= 3;
+ quantum1 >>= 1;
+
+ quantum2 *= 7;
+ quantum2 >>= 3;
+ }
+
+ return 0;
+}
+
+/* List of known Diffserv codepoints:
+ *
+ * Least Effort (CS1)
+ * Best Effort (CS0)
+ * Max Reliability & LLT "Lo" (TOS1)
+ * Max Throughput (TOS2)
+ * Min Delay (TOS4)
+ * LLT "La" (TOS5)
+ * Assured Forwarding 1 (AF1x) - x3
+ * Assured Forwarding 2 (AF2x) - x3
+ * Assured Forwarding 3 (AF3x) - x3
+ * Assured Forwarding 4 (AF4x) - x3
+ * Precedence Class 2 (CS2)
+ * Precedence Class 3 (CS3)
+ * Precedence Class 4 (CS4)
+ * Precedence Class 5 (CS5)
+ * Precedence Class 6 (CS6)
+ * Precedence Class 7 (CS7)
+ * Voice Admit (VA)
+ * Expedited Forwarding (EF)
+
+ * Total 25 codepoints.
+ */
+
+/* List of traffic classes in RFC 4594:
+ * (roughly descending order of contended priority)
+ * (roughly ascending order of uncontended throughput)
+ *
+ * Network Control (CS6,CS7) - routing traffic
+ * Telephony (EF,VA) - aka. VoIP streams
+ * Signalling (CS5) - VoIP setup
+ * Multimedia Conferencing (AF4x) - aka. video calls
+ * Realtime Interactive (CS4) - eg. games
+ * Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch
+ * Broadcast Video (CS3)
+ * Low Latency Data (AF2x,TOS4) - eg. database
+ * Ops, Admin, Management (CS2,TOS1) - eg. ssh
+ * Standard Service (CS0 & unrecognised codepoints)
+ * High Throughput Data (AF1x,TOS2) - eg. web traffic
+ * Low Priority Data (CS1) - eg. BitTorrent
+
+ * Total 12 traffic classes.
+ */
+
+static int cake_config_diffserv8(struct Qdisc *sch)
+{
+/* Pruned list of traffic classes for typical applications:
+ *
+ * Network Control (CS6, CS7)
+ * Minimum Latency (EF, VA, CS5, CS4)
+ * Interactive Shell (CS2, TOS1)
+ * Low Latency Transactions (AF2x, TOS4)
+ * Video Streaming (AF4x, AF3x, CS3)
+ * Bog Standard (CS0 etc.)
+ * High Throughput (AF1x, TOS2)
+ * Background Traffic (CS1)
+ *
+ * Total 8 traffic classes.
+ */
+
+ struct cake_sched_data *q = qdisc_priv(sch);
+ u32 mtu = psched_mtu(qdisc_dev(sch));
+ u64 rate = q->rate_bps;
+ u32 quantum1 = 256;
+ u32 quantum2 = 256;
+ u32 i;
+
+ q->tin_cnt = 8;
+
+ /* codepoint to class mapping */
+ q->tin_index = diffserv8;
+ q->tin_order = normal_order;
+
+ /* class characteristics */
+ for (i = 0; i < q->tin_cnt; i++) {
+ struct cake_tin_data *b = &q->tins[i];
+
+ cake_set_rate(b, rate, mtu, us_to_ns(q->target),
+ us_to_ns(q->interval));
+
+ b->tin_quantum_prio = max_t(u16, 1U, quantum1);
+ b->tin_quantum_band = max_t(u16, 1U, quantum2);
+
+ /* calculate next class's parameters */
+ rate *= 7;
+ rate >>= 3;
+
+ quantum1 *= 3;
+ quantum1 >>= 1;
+
+ quantum2 *= 7;
+ quantum2 >>= 3;
+ }
+
+ return 0;
+}
+
+static int cake_config_diffserv4(struct Qdisc *sch)
+{
+/* Further pruned list of traffic classes for four-class system:
+ *
+ * Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
+ * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
+ * Best Effort (CS0, AF1x, TOS2, and those not specified)
+ * Background Traffic (CS1)
+ *
+ * Total 4 traffic classes.
+ */
+
+ struct cake_sched_data *q = qdisc_priv(sch);
+ u32 mtu = psched_mtu(qdisc_dev(sch));
+ u64 rate = q->rate_bps;
+ u32 quantum = 1024;
+
+ q->tin_cnt = 4;
+
+ /* codepoint to class mapping */
+ q->tin_index = diffserv4;
+ q->tin_order = bulk_order;
+
+ /* class characteristics */
+ cake_set_rate(&q->tins[0], rate, mtu,
+ us_to_ns(q->target), us_to_ns(q->interval));
+ cake_set_rate(&q->tins[1], rate >> 4, mtu,
+ us_to_ns(q->target), us_to_ns(q->interval));
+ cake_set_rate(&q->tins[2], rate >> 1, mtu,
+ us_to_ns(q->target), us_to_ns(q->interval));
+ cake_set_rate(&q->tins[3], rate >> 2, mtu,
+ us_to_ns(q->target), us_to_ns(q->interval));
+
+ /* priority weights */
+ q->tins[0].tin_quantum_prio = quantum;
+ q->tins[1].tin_quantum_prio = quantum >> 4;
+ q->tins[2].tin_quantum_prio = quantum << 2;
+ q->tins[3].tin_quantum_prio = quantum << 4;
+
+ /* bandwidth-sharing weights */
+ q->tins[0].tin_quantum_band = quantum;
+ q->tins[1].tin_quantum_band = quantum >> 4;
+ q->tins[2].tin_quantum_band = quantum >> 1;
+ q->tins[3].tin_quantum_band = quantum >> 2;
+
+ return 0;
+}
+
+static int cake_config_diffserv3(struct Qdisc *sch)
+{
+/* Simplified Diffserv structure with 3 tins.
+ * Low Priority (CS1)
+ * Best Effort
+ * Latency Sensitive (TOS4, VA, EF, CS6, CS7)
+ */
+ struct cake_sched_data *q = qdisc_priv(sch);
+ u32 mtu = psched_mtu(qdisc_dev(sch));
+ u64 rate = q->rate_bps;
+ u32 quantum = 1024;
+
+ q->tin_cnt = 3;
+
+ /* codepoint to class mapping */
+ q->tin_index = diffserv3;
+ q->tin_order = bulk_order;
+
+ /* class characteristics */
+ cake_set_rate(&q->tins[0], rate, mtu,
+ us_to_ns(q->target), us_to_ns(q->interval));
+ cake_set_rate(&q->tins[1], rate >> 4, mtu,
+ us_to_ns(q->target), us_to_ns(q->interval));
+ cake_set_rate(&q->tins[2], rate >> 2, mtu,
+ us_to_ns(q->target), us_to_ns(q->interval));
+
+ /* priority weights */
+ q->tins[0].tin_quantum_prio = quantum;
+ q->tins[1].tin_quantum_prio = quantum >> 4;
+ q->tins[2].tin_quantum_prio = quantum << 4;
+
+ /* bandwidth-sharing weights */
+ q->tins[0].tin_quantum_band = quantum;
+ q->tins[1].tin_quantum_band = quantum >> 4;
+ q->tins[2].tin_quantum_band = quantum >> 2;
+
+ return 0;
+}
+
+static void cake_reconfigure(struct Qdisc *sch)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ int c, ft;
+
+ switch (q->tin_mode) {
+ case CAKE_DIFFSERV_BESTEFFORT:
+ ft = cake_config_besteffort(sch);
+ break;
+
+ case CAKE_DIFFSERV_PRECEDENCE:
+ ft = cake_config_precedence(sch);
+ break;
+
+ case CAKE_DIFFSERV_DIFFSERV8:
+ ft = cake_config_diffserv8(sch);
+ break;
+
+ case CAKE_DIFFSERV_DIFFSERV4:
+ ft = cake_config_diffserv4(sch);
+ break;
+
+ case CAKE_DIFFSERV_DIFFSERV3:
+ default:
+ ft = cake_config_diffserv3(sch);
+ break;
+ }
+
+ for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
+ cake_clear_tin(sch, c);
+ q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
+ }
+
+ q->rate_ns = q->tins[ft].tin_rate_ns;
+ q->rate_shft = q->tins[ft].tin_rate_shft;
+
+ if (q->buffer_config_limit) {
+ q->buffer_limit = q->buffer_config_limit;
+ } else if (q->rate_bps) {
+ u64 t = q->rate_bps * q->interval;
+
+ do_div(t, USEC_PER_SEC / 4);
+ q->buffer_limit = max_t(u32, t, 4U << 20);
+ } else {
+ q->buffer_limit = ~0;
+ }
+
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ q->buffer_limit = min(q->buffer_limit,
+ max(sch->limit * psched_mtu(qdisc_dev(sch)),
+ q->buffer_config_limit));
+}
+
+static int cake_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CAKE_MAX + 1];
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (tb[TCA_CAKE_NAT]) {
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+ q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+ !!nla_get_u32(tb[TCA_CAKE_NAT]);
+#else
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
+ "No conntrack support in kernel");
+ return -EOPNOTSUPP;
+#endif
+ }
+
+ if (tb[TCA_CAKE_BASE_RATE64])
+ q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
+
+ if (tb[TCA_CAKE_DIFFSERV_MODE])
+ q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
+
+ if (tb[TCA_CAKE_WASH]) {
+ if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
+ q->rate_flags |= CAKE_FLAG_WASH;
+ else
+ q->rate_flags &= ~CAKE_FLAG_WASH;
+ }
+
+ if (tb[TCA_CAKE_FLOW_MODE])
+ q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
+ (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
+ CAKE_FLOW_MASK));
+
+ if (tb[TCA_CAKE_ATM])
+ q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
+
+ if (tb[TCA_CAKE_OVERHEAD]) {
+ q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
+ q->rate_flags |= CAKE_FLAG_OVERHEAD;
+
+ q->max_netlen = 0;
+ q->max_adjlen = 0;
+ q->min_netlen = ~0;
+ q->min_adjlen = ~0;
+ }
+
+ if (tb[TCA_CAKE_RAW]) {
+ q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
+
+ q->max_netlen = 0;
+ q->max_adjlen = 0;
+ q->min_netlen = ~0;
+ q->min_adjlen = ~0;
+ }
+
+ if (tb[TCA_CAKE_MPU])
+ q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
+
+ if (tb[TCA_CAKE_RTT]) {
+ q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
+
+ if (!q->interval)
+ q->interval = 1;
+ }
+
+ if (tb[TCA_CAKE_TARGET]) {
+ q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
+
+ if (!q->target)
+ q->target = 1;
+ }
+
+ if (tb[TCA_CAKE_AUTORATE]) {
+ if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
+ q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
+ else
+ q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
+ }
+
+ if (tb[TCA_CAKE_INGRESS]) {
+ if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
+ q->rate_flags |= CAKE_FLAG_INGRESS;
+ else
+ q->rate_flags &= ~CAKE_FLAG_INGRESS;
+ }
+
+ if (tb[TCA_CAKE_ACK_FILTER])
+ q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
+
+ if (tb[TCA_CAKE_MEMORY])
+ q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
+
+ if (tb[TCA_CAKE_SPLIT_GSO]) {
+ if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
+ q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
+ else
+ q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
+ }
+
+ if (q->tins) {
+ sch_tree_lock(sch);
+ cake_reconfigure(sch);
+ sch_tree_unlock(sch);
+ }
+
+ return 0;
+}
+
+static void cake_destroy(struct Qdisc *sch)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+
+ qdisc_watchdog_cancel(&q->watchdog);
+ tcf_block_put(q->block);
+ kvfree(q->tins);
+}
+
+static int cake_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ int i, j, err;
+
+ sch->limit = 10240;
+ q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
+ q->flow_mode = CAKE_FLOW_TRIPLE;
+
+ q->rate_bps = 0; /* unlimited by default */
+
+ q->interval = 100000; /* 100ms default */
+ q->target = 5000; /* 5ms: codel RFC argues
+ * for 5 to 10% of interval
+ */
+ q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
+ q->cur_tin = 0;
+ q->cur_flow = 0;
+
+ qdisc_watchdog_init(&q->watchdog, sch);
+
+ if (opt) {
+ int err = cake_change(sch, opt, extack);
+
+ if (err)
+ return err;
+ }
+
+ err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
+ if (err)
+ return err;
+
+ quantum_div[0] = ~0;
+ for (i = 1; i <= CAKE_QUEUES; i++)
+ quantum_div[i] = 65535 / i;
+
+ q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data),
+ GFP_KERNEL);
+ if (!q->tins)
+ goto nomem;
+
+ for (i = 0; i < CAKE_MAX_TINS; i++) {
+ struct cake_tin_data *b = q->tins + i;
+
+ INIT_LIST_HEAD(&b->new_flows);
+ INIT_LIST_HEAD(&b->old_flows);
+ INIT_LIST_HEAD(&b->decaying_flows);
+ b->sparse_flow_count = 0;
+ b->bulk_flow_count = 0;
+ b->decaying_flow_count = 0;
+
+ for (j = 0; j < CAKE_QUEUES; j++) {
+ struct cake_flow *flow = b->flows + j;
+ u32 k = j * CAKE_MAX_TINS + i;
+
+ INIT_LIST_HEAD(&flow->flowchain);
+ cobalt_vars_init(&flow->cvars);
+
+ q->overflow_heap[k].t = i;
+ q->overflow_heap[k].b = j;
+ b->overflow_idx[j] = k;
+ }
+ }
+
+ cake_reconfigure(sch);
+ q->avg_peak_bandwidth = q->rate_bps;
+ q->min_netlen = ~0;
+ q->min_adjlen = ~0;
+ return 0;
+
+nomem:
+ cake_destroy(sch);
+ return -ENOMEM;
+}
+
+static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (!opts)
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
+ TCA_CAKE_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
+ q->flow_mode & CAKE_FLOW_MASK))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
+ !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_INGRESS,
+ !!(q->rate_flags & CAKE_FLAG_INGRESS)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_NAT,
+ !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_WASH,
+ !!(q->rate_flags & CAKE_FLAG_WASH)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
+ goto nla_put_failure;
+
+ if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
+ if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
+ !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ return -1;
+}
+
+static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP);
+ struct cake_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tstats, *ts;
+ int i;
+
+ if (!stats)
+ return -1;
+
+#define PUT_STAT_U32(attr, data) do { \
+ if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
+ goto nla_put_failure; \
+ } while (0)
+#define PUT_STAT_U64(attr, data) do { \
+ if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
+ data, TCA_CAKE_STATS_PAD)) \
+ goto nla_put_failure; \
+ } while (0)
+
+ PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
+ PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
+ PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
+ PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
+ PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
+ PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
+ PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
+ PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
+
+#undef PUT_STAT_U32
+#undef PUT_STAT_U64
+
+ tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS);
+ if (!tstats)
+ goto nla_put_failure;
+
+#define PUT_TSTAT_U32(attr, data) do { \
+ if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
+ goto nla_put_failure; \
+ } while (0)
+#define PUT_TSTAT_U64(attr, data) do { \
+ if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
+ data, TCA_CAKE_TIN_STATS_PAD)) \
+ goto nla_put_failure; \
+ } while (0)
+
+ for (i = 0; i < q->tin_cnt; i++) {
+ struct cake_tin_data *b = &q->tins[q->tin_order[i]];
+
+ ts = nla_nest_start(d->skb, i + 1);
+ if (!ts)
+ goto nla_put_failure;
+
+ PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
+ PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
+ PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
+
+ PUT_TSTAT_U32(TARGET_US,
+ ktime_to_us(ns_to_ktime(b->cparams.target)));
+ PUT_TSTAT_U32(INTERVAL_US,
+ ktime_to_us(ns_to_ktime(b->cparams.interval)));
+
+ PUT_TSTAT_U32(SENT_PACKETS, b->packets);
+ PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
+ PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
+ PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
+
+ PUT_TSTAT_U32(PEAK_DELAY_US,
+ ktime_to_us(ns_to_ktime(b->peak_delay)));
+ PUT_TSTAT_U32(AVG_DELAY_US,
+ ktime_to_us(ns_to_ktime(b->avge_delay)));
+ PUT_TSTAT_U32(BASE_DELAY_US,
+ ktime_to_us(ns_to_ktime(b->base_delay)));
+
+ PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
+ PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
+ PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
+
+ PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
+ b->decaying_flow_count);
+ PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
+ PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
+ PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
+
+ PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
+ nla_nest_end(d->skb, ts);
+ }
+
+#undef PUT_TSTAT_U32
+#undef PUT_TSTAT_U64
+
+ nla_nest_end(d->skb, tstats);
+ return nla_nest_end(d->skb, stats);
+
+nla_put_failure:
+ nla_nest_cancel(d->skb, stats);
+ return -1;
+}
+
+static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long cake_find(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ return 0;
+}
+
+static void cake_unbind(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return q->block;
+}
+
+static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ const struct cake_flow *flow = NULL;
+ struct gnet_stats_queue qs = { 0 };
+ struct nlattr *stats;
+ u32 idx = cl - 1;
+
+ if (idx < CAKE_QUEUES * q->tin_cnt) {
+ const struct cake_tin_data *b = \
+ &q->tins[q->tin_order[idx / CAKE_QUEUES]];
+ const struct sk_buff *skb;
+
+ flow = &b->flows[idx % CAKE_QUEUES];
+
+ if (flow->head) {
+ sch_tree_lock(sch);
+ skb = flow->head;
+ while (skb) {
+ qs.qlen++;
+ skb = skb->next;
+ }
+ sch_tree_unlock(sch);
+ }
+ qs.backlog = b->backlogs[idx % CAKE_QUEUES];
+ qs.drops = flow->dropped;
+ }
+ if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
+ return -1;
+ if (flow) {
+ ktime_t now = ktime_get();
+
+ stats = nla_nest_start(d->skb, TCA_STATS_APP);
+ if (!stats)
+ return -1;
+
+#define PUT_STAT_U32(attr, data) do { \
+ if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
+ goto nla_put_failure; \
+ } while (0)
+#define PUT_STAT_S32(attr, data) do { \
+ if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
+ goto nla_put_failure; \
+ } while (0)
+
+ PUT_STAT_S32(DEFICIT, flow->deficit);
+ PUT_STAT_U32(DROPPING, flow->cvars.dropping);
+ PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
+ PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
+ if (flow->cvars.p_drop) {
+ PUT_STAT_S32(BLUE_TIMER_US,
+ ktime_to_us(
+ ktime_sub(now,
+ flow->cvars.blue_timer)));
+ }
+ if (flow->cvars.dropping) {
+ PUT_STAT_S32(DROP_NEXT_US,
+ ktime_to_us(
+ ktime_sub(now,
+ flow->cvars.drop_next)));
+ }
+
+ if (nla_nest_end(d->skb, stats) < 0)
+ return -1;
+ }
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(d->skb, stats);
+ return -1;
+}
+
+static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct cake_sched_data *q = qdisc_priv(sch);
+ unsigned int i, j;
+
+ if (arg->stop)
+ return;
+
+ for (i = 0; i < q->tin_cnt; i++) {
+ struct cake_tin_data *b = &q->tins[q->tin_order[i]];
+
+ for (j = 0; j < CAKE_QUEUES; j++) {
+ if (list_empty(&b->flows[j].flowchain) ||
+ arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+ }
+}
+
+static const struct Qdisc_class_ops cake_class_ops = {
+ .leaf = cake_leaf,
+ .find = cake_find,
+ .tcf_block = cake_tcf_block,
+ .bind_tcf = cake_bind,
+ .unbind_tcf = cake_unbind,
+ .dump = cake_dump_class,
+ .dump_stats = cake_dump_class_stats,
+ .walk = cake_walk,
+};
+
+static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
+ .cl_ops = &cake_class_ops,
+ .id = "cake",
+ .priv_size = sizeof(struct cake_sched_data),
+ .enqueue = cake_enqueue,
+ .dequeue = cake_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = cake_init,
+ .reset = cake_reset,
+ .destroy = cake_destroy,
+ .change = cake_change,
+ .dump = cake_dump,
+ .dump_stats = cake_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init cake_module_init(void)
+{
+ return register_qdisc(&cake_qdisc_ops);
+}
+
+static void __exit cake_module_exit(void)
+{
+ unregister_qdisc(&cake_qdisc_ops);
+}
+
+module_init(cake_module_init)
+module_exit(cake_module_exit)
+MODULE_AUTHOR("Jonathan Morton");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("The CAKE shaper.");
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index cdd96b9a27bc..e26a24017faa 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -78,18 +78,42 @@ struct cbs_sched_data {
s64 sendslope; /* in bytes/s */
s64 idleslope; /* in bytes/s */
struct qdisc_watchdog watchdog;
- int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch);
+ int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free);
struct sk_buff *(*dequeue)(struct Qdisc *sch);
+ struct Qdisc *qdisc;
};
-static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch)
+static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct Qdisc *child,
+ struct sk_buff **to_free)
{
- return qdisc_enqueue_tail(skb, sch);
+ int err;
+
+ err = child->ops->enqueue(skb, child, to_free);
+ if (err != NET_XMIT_SUCCESS)
+ return err;
+
+ qdisc_qstats_backlog_inc(sch, skb);
+ sch->q.qlen++;
+
+ return NET_XMIT_SUCCESS;
}
-static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch)
+static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
{
struct cbs_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *qdisc = q->qdisc;
+
+ return cbs_child_enqueue(skb, sch, qdisc, to_free);
+}
+
+static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ struct cbs_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *qdisc = q->qdisc;
if (sch->q.qlen == 0 && q->credits > 0) {
/* We need to stop accumulating credits when there's
@@ -99,7 +123,7 @@ static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch)
q->last = ktime_get_ns();
}
- return qdisc_enqueue_tail(skb, sch);
+ return cbs_child_enqueue(skb, sch, qdisc, to_free);
}
static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -107,7 +131,7 @@ static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
struct cbs_sched_data *q = qdisc_priv(sch);
- return q->enqueue(skb, sch);
+ return q->enqueue(skb, sch, to_free);
}
/* timediff is in ns, slope is in bytes/s */
@@ -132,9 +156,25 @@ static s64 credits_from_len(unsigned int len, s64 slope, s64 port_rate)
return div64_s64(len * slope, port_rate);
}
+static struct sk_buff *cbs_child_dequeue(struct Qdisc *sch, struct Qdisc *child)
+{
+ struct sk_buff *skb;
+
+ skb = child->ops->dequeue(child);
+ if (!skb)
+ return NULL;
+
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ sch->q.qlen--;
+
+ return skb;
+}
+
static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
{
struct cbs_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *qdisc = q->qdisc;
s64 now = ktime_get_ns();
struct sk_buff *skb;
s64 credits;
@@ -157,8 +197,7 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
return NULL;
}
}
-
- skb = qdisc_dequeue_head(sch);
+ skb = cbs_child_dequeue(sch, qdisc);
if (!skb)
return NULL;
@@ -178,7 +217,10 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
static struct sk_buff *cbs_dequeue_offload(struct Qdisc *sch)
{
- return qdisc_dequeue_head(sch);
+ struct cbs_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *qdisc = q->qdisc;
+
+ return cbs_child_dequeue(sch, qdisc);
}
static struct sk_buff *cbs_dequeue(struct Qdisc *sch)
@@ -310,6 +352,13 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
return -EINVAL;
}
+ q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ sch->handle, extack);
+ if (!q->qdisc)
+ return -ENOMEM;
+
+ qdisc_hash_add(q->qdisc, false);
+
q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
q->enqueue = cbs_enqueue_soft;
@@ -328,6 +377,9 @@ static void cbs_destroy(struct Qdisc *sch)
qdisc_watchdog_cancel(&q->watchdog);
cbs_disable_offload(dev, q);
+
+ if (q->qdisc)
+ qdisc_destroy(q->qdisc);
}
static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -356,8 +408,72 @@ nla_put_failure:
return -1;
}
+static int cbs_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct cbs_sched_data *q = qdisc_priv(sch);
+
+ if (cl != 1 || !q->qdisc) /* only one class */
+ return -ENOENT;
+
+ tcm->tcm_handle |= TC_H_MIN(1);
+ tcm->tcm_info = q->qdisc->handle;
+
+ return 0;
+}
+
+static int cbs_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ struct Qdisc **old, struct netlink_ext_ack *extack)
+{
+ struct cbs_sched_data *q = qdisc_priv(sch);
+
+ if (!new) {
+ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ sch->handle, NULL);
+ if (!new)
+ new = &noop_qdisc;
+ }
+
+ *old = qdisc_replace(sch, new, &q->qdisc);
+ return 0;
+}
+
+static struct Qdisc *cbs_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ struct cbs_sched_data *q = qdisc_priv(sch);
+
+ return q->qdisc;
+}
+
+static unsigned long cbs_find(struct Qdisc *sch, u32 classid)
+{
+ return 1;
+}
+
+static void cbs_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+ if (!walker->stop) {
+ if (walker->count >= walker->skip) {
+ if (walker->fn(sch, 1, walker) < 0) {
+ walker->stop = 1;
+ return;
+ }
+ }
+ walker->count++;
+ }
+}
+
+static const struct Qdisc_class_ops cbs_class_ops = {
+ .graft = cbs_graft,
+ .leaf = cbs_leaf,
+ .find = cbs_find,
+ .walk = cbs_walk,
+ .dump = cbs_dump_class,
+};
+
static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
.id = "cbs",
+ .cl_ops = &cbs_class_ops,
.priv_size = sizeof(struct cbs_sched_data),
.enqueue = cbs_enqueue,
.dequeue = cbs_dequeue,
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
new file mode 100644
index 000000000000..1538d6fa8165
--- /dev/null
+++ b/net/sched/sch_etf.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* net/sched/sch_etf.c Earliest TxTime First queueing discipline.
+ *
+ * Authors: Jesus Sanchez-Palencia <jesus.sanchez-palencia@intel.com>
+ * Vinicius Costa Gomes <vinicius.gomes@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/errqueue.h>
+#include <linux/rbtree.h>
+#include <linux/skbuff.h>
+#include <linux/posix-timers.h>
+#include <net/netlink.h>
+#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
+#include <net/sock.h>
+
+#define DEADLINE_MODE_IS_ON(x) ((x)->flags & TC_ETF_DEADLINE_MODE_ON)
+#define OFFLOAD_IS_ON(x) ((x)->flags & TC_ETF_OFFLOAD_ON)
+
+struct etf_sched_data {
+ bool offload;
+ bool deadline_mode;
+ int clockid;
+ int queue;
+ s32 delta; /* in ns */
+ ktime_t last; /* The txtime of the last skb sent to the netdevice. */
+ struct rb_root head;
+ struct qdisc_watchdog watchdog;
+ ktime_t (*get_time)(void);
+};
+
+static const struct nla_policy etf_policy[TCA_ETF_MAX + 1] = {
+ [TCA_ETF_PARMS] = { .len = sizeof(struct tc_etf_qopt) },
+};
+
+static inline int validate_input_params(struct tc_etf_qopt *qopt,
+ struct netlink_ext_ack *extack)
+{
+ /* Check if params comply to the following rules:
+ * * Clockid and delta must be valid.
+ *
+ * * Dynamic clockids are not supported.
+ *
+ * * Delta must be a positive integer.
+ *
+ * Also note that for the HW offload case, we must
+ * expect that system clocks have been synchronized to PHC.
+ */
+ if (qopt->clockid < 0) {
+ NL_SET_ERR_MSG(extack, "Dynamic clockids are not supported");
+ return -ENOTSUPP;
+ }
+
+ if (qopt->clockid != CLOCK_TAI) {
+ NL_SET_ERR_MSG(extack, "Invalid clockid. CLOCK_TAI must be used");
+ return -EINVAL;
+ }
+
+ if (qopt->delta < 0) {
+ NL_SET_ERR_MSG(extack, "Delta must be positive");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ ktime_t txtime = nskb->tstamp;
+ struct sock *sk = nskb->sk;
+ ktime_t now;
+
+ if (!sk)
+ return false;
+
+ if (!sock_flag(sk, SOCK_TXTIME))
+ return false;
+
+ /* We don't perform crosstimestamping.
+ * Drop if packet's clockid differs from qdisc's.
+ */
+ if (sk->sk_clockid != q->clockid)
+ return false;
+
+ if (sk->sk_txtime_deadline_mode != q->deadline_mode)
+ return false;
+
+ now = q->get_time();
+ if (ktime_before(txtime, now) || ktime_before(txtime, q->last))
+ return false;
+
+ return true;
+}
+
+static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ struct rb_node *p;
+
+ p = rb_first(&q->head);
+ if (!p)
+ return NULL;
+
+ return rb_to_skb(p);
+}
+
+static void reset_watchdog(struct Qdisc *sch)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb = etf_peek_timesortedlist(sch);
+ ktime_t next;
+
+ if (!skb)
+ return;
+
+ next = ktime_sub_ns(skb->tstamp, q->delta);
+ qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next));
+}
+
+static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
+{
+ struct sock_exterr_skb *serr;
+ struct sk_buff *clone;
+ ktime_t txtime = skb->tstamp;
+
+ if (!skb->sk || !(skb->sk->sk_txtime_report_errors))
+ return;
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (!clone)
+ return;
+
+ serr = SKB_EXT_ERR(clone);
+ serr->ee.ee_errno = err;
+ serr->ee.ee_origin = SO_EE_ORIGIN_TXTIME;
+ serr->ee.ee_type = 0;
+ serr->ee.ee_code = code;
+ serr->ee.ee_pad = 0;
+ serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */
+ serr->ee.ee_info = txtime; /* low part of tstamp */
+
+ if (sock_queue_err_skb(skb->sk, clone))
+ kfree_skb(clone);
+}
+
+static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ struct rb_node **p = &q->head.rb_node, *parent = NULL;
+ ktime_t txtime = nskb->tstamp;
+
+ if (!is_packet_valid(sch, nskb)) {
+ report_sock_error(nskb, EINVAL,
+ SO_EE_CODE_TXTIME_INVALID_PARAM);
+ return qdisc_drop(nskb, sch, to_free);
+ }
+
+ while (*p) {
+ struct sk_buff *skb;
+
+ parent = *p;
+ skb = rb_to_skb(parent);
+ if (ktime_after(txtime, skb->tstamp))
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&nskb->rbnode, parent, p);
+ rb_insert_color(&nskb->rbnode, &q->head);
+
+ qdisc_qstats_backlog_inc(sch, nskb);
+ sch->q.qlen++;
+
+ /* Now we may need to re-arm the qdisc watchdog for the next packet. */
+ reset_watchdog(sch);
+
+ return NET_XMIT_SUCCESS;
+}
+
+static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb,
+ bool drop)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+
+ rb_erase(&skb->rbnode, &q->head);
+
+ /* The rbnode field in the skb re-uses these fields, now that
+ * we are done with the rbnode, reset them.
+ */
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->dev = qdisc_dev(sch);
+
+ qdisc_qstats_backlog_dec(sch, skb);
+
+ if (drop) {
+ struct sk_buff *to_free = NULL;
+
+ report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
+
+ qdisc_drop(skb, sch, &to_free);
+ kfree_skb_list(to_free);
+ qdisc_qstats_overlimit(sch);
+ } else {
+ qdisc_bstats_update(sch, skb);
+
+ q->last = skb->tstamp;
+ }
+
+ sch->q.qlen--;
+}
+
+static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ ktime_t now, next;
+
+ skb = etf_peek_timesortedlist(sch);
+ if (!skb)
+ return NULL;
+
+ now = q->get_time();
+
+ /* Drop if packet has expired while in queue. */
+ if (ktime_before(skb->tstamp, now)) {
+ timesortedlist_erase(sch, skb, true);
+ skb = NULL;
+ goto out;
+ }
+
+ /* When in deadline mode, dequeue as soon as possible and change the
+ * txtime from deadline to (now + delta).
+ */
+ if (q->deadline_mode) {
+ timesortedlist_erase(sch, skb, false);
+ skb->tstamp = now;
+ goto out;
+ }
+
+ next = ktime_sub_ns(skb->tstamp, q->delta);
+
+ /* Dequeue only if now is within the [txtime - delta, txtime] range. */
+ if (ktime_after(now, next))
+ timesortedlist_erase(sch, skb, false);
+ else
+ skb = NULL;
+
+out:
+ /* Now we may need to re-arm the qdisc watchdog for the next packet. */
+ reset_watchdog(sch);
+
+ return skb;
+}
+
+static void etf_disable_offload(struct net_device *dev,
+ struct etf_sched_data *q)
+{
+ struct tc_etf_qopt_offload etf = { };
+ const struct net_device_ops *ops;
+ int err;
+
+ if (!q->offload)
+ return;
+
+ ops = dev->netdev_ops;
+ if (!ops->ndo_setup_tc)
+ return;
+
+ etf.queue = q->queue;
+ etf.enable = 0;
+
+ err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf);
+ if (err < 0)
+ pr_warn("Couldn't disable ETF offload for queue %d\n",
+ etf.queue);
+}
+
+static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
+ struct netlink_ext_ack *extack)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ struct tc_etf_qopt_offload etf = { };
+ int err;
+
+ if (q->offload)
+ return 0;
+
+ if (!ops->ndo_setup_tc) {
+ NL_SET_ERR_MSG(extack, "Specified device does not support ETF offload");
+ return -EOPNOTSUPP;
+ }
+
+ etf.queue = q->queue;
+ etf.enable = 1;
+
+ err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf);
+ if (err < 0) {
+ NL_SET_ERR_MSG(extack, "Specified device failed to setup ETF hardware offload");
+ return err;
+ }
+
+ return 0;
+}
+
+static int etf_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct nlattr *tb[TCA_ETF_MAX + 1];
+ struct tc_etf_qopt *qopt;
+ int err;
+
+ if (!opt) {
+ NL_SET_ERR_MSG(extack,
+ "Missing ETF qdisc options which are mandatory");
+ return -EINVAL;
+ }
+
+ err = nla_parse_nested(tb, TCA_ETF_MAX, opt, etf_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_ETF_PARMS]) {
+ NL_SET_ERR_MSG(extack, "Missing mandatory ETF parameters");
+ return -EINVAL;
+ }
+
+ qopt = nla_data(tb[TCA_ETF_PARMS]);
+
+ pr_debug("delta %d clockid %d offload %s deadline %s\n",
+ qopt->delta, qopt->clockid,
+ OFFLOAD_IS_ON(qopt) ? "on" : "off",
+ DEADLINE_MODE_IS_ON(qopt) ? "on" : "off");
+
+ err = validate_input_params(qopt, extack);
+ if (err < 0)
+ return err;
+
+ q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
+
+ if (OFFLOAD_IS_ON(qopt)) {
+ err = etf_enable_offload(dev, q, extack);
+ if (err < 0)
+ return err;
+ }
+
+ /* Everything went OK, save the parameters used. */
+ q->delta = qopt->delta;
+ q->clockid = qopt->clockid;
+ q->offload = OFFLOAD_IS_ON(qopt);
+ q->deadline_mode = DEADLINE_MODE_IS_ON(qopt);
+
+ switch (q->clockid) {
+ case CLOCK_REALTIME:
+ q->get_time = ktime_get_real;
+ break;
+ case CLOCK_MONOTONIC:
+ q->get_time = ktime_get;
+ break;
+ case CLOCK_BOOTTIME:
+ q->get_time = ktime_get_boottime;
+ break;
+ case CLOCK_TAI:
+ q->get_time = ktime_get_clocktai;
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Clockid is not supported");
+ return -ENOTSUPP;
+ }
+
+ qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid);
+
+ return 0;
+}
+
+static void timesortedlist_clear(struct Qdisc *sch)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ struct rb_node *p = rb_first(&q->head);
+
+ while (p) {
+ struct sk_buff *skb = rb_to_skb(p);
+
+ p = rb_next(p);
+
+ rb_erase(&skb->rbnode, &q->head);
+ rtnl_kfree_skbs(skb, skb);
+ sch->q.qlen--;
+ }
+}
+
+static void etf_reset(struct Qdisc *sch)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+
+ /* Only cancel watchdog if it's been initialized. */
+ if (q->watchdog.qdisc == sch)
+ qdisc_watchdog_cancel(&q->watchdog);
+
+ /* No matter which mode we are on, it's safe to clear both lists. */
+ timesortedlist_clear(sch);
+ __qdisc_reset_queue(&sch->q);
+
+ sch->qstats.backlog = 0;
+ sch->q.qlen = 0;
+
+ q->last = 0;
+}
+
+static void etf_destroy(struct Qdisc *sch)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+
+ /* Only cancel watchdog if it's been initialized. */
+ if (q->watchdog.qdisc == sch)
+ qdisc_watchdog_cancel(&q->watchdog);
+
+ etf_disable_offload(dev, q);
+}
+
+static int etf_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ struct tc_etf_qopt opt = { };
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (!nest)
+ goto nla_put_failure;
+
+ opt.delta = q->delta;
+ opt.clockid = q->clockid;
+ if (q->offload)
+ opt.flags |= TC_ETF_OFFLOAD_ON;
+
+ if (q->deadline_mode)
+ opt.flags |= TC_ETF_DEADLINE_MODE_ON;
+
+ if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, nest);
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -1;
+}
+
+static struct Qdisc_ops etf_qdisc_ops __read_mostly = {
+ .id = "etf",
+ .priv_size = sizeof(struct etf_sched_data),
+ .enqueue = etf_enqueue_timesortedlist,
+ .dequeue = etf_dequeue_timesortedlist,
+ .peek = etf_peek_timesortedlist,
+ .init = etf_init,
+ .reset = etf_reset,
+ .destroy = etf_destroy,
+ .dump = etf_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init etf_module_init(void)
+{
+ return register_qdisc(&etf_qdisc_ops);
+}
+
+static void __exit etf_module_exit(void)
+{
+ unregister_qdisc(&etf_qdisc_ops);
+}
+module_init(etf_module_init)
+module_exit(etf_module_exit)
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 2a4ab7caf553..43c4bfe625a9 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -126,7 +126,6 @@ struct htb_class {
union {
struct htb_class_leaf {
- struct list_head drop_list;
int deficit[TC_HTB_MAXDEPTH];
struct Qdisc *q;
} leaf;
@@ -171,7 +170,6 @@ struct htb_sched {
struct qdisc_watchdog watchdog;
s64 now; /* cached dequeue time */
- struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
/* time of nearest event per level (row) */
s64 near_ev_cache[TC_HTB_MAXDEPTH];
@@ -562,8 +560,6 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
if (!cl->prio_activity) {
cl->prio_activity = 1 << cl->prio;
htb_activate_prios(q, cl);
- list_add_tail(&cl->un.leaf.drop_list,
- q->drops + cl->prio);
}
}
@@ -579,7 +575,6 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
htb_deactivate_prios(q, cl);
cl->prio_activity = 0;
- list_del_init(&cl->un.leaf.drop_list);
}
static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -981,7 +976,6 @@ static void htb_reset(struct Qdisc *sch)
else {
if (cl->un.leaf.q)
qdisc_reset(cl->un.leaf.q);
- INIT_LIST_HEAD(&cl->un.leaf.drop_list);
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
@@ -993,8 +987,6 @@ static void htb_reset(struct Qdisc *sch)
sch->qstats.backlog = 0;
memset(q->hlevel, 0, sizeof(q->hlevel));
memset(q->row_mask, 0, sizeof(q->row_mask));
- for (i = 0; i < TC_HTB_NUMPRIO; i++)
- INIT_LIST_HEAD(q->drops + i);
}
static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
@@ -1024,7 +1016,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_glob *gopt;
int err;
- int i;
qdisc_watchdog_init(&q->watchdog, sch);
INIT_WORK(&q->work, htb_work_func);
@@ -1050,8 +1041,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
return err;
- for (i = 0; i < TC_HTB_NUMPRIO; i++)
- INIT_LIST_HEAD(q->drops + i);
qdisc_skb_head_init(&q->direct_queue);
@@ -1224,7 +1213,6 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
parent->level = 0;
memset(&parent->un.inner, 0, sizeof(parent->un.inner));
- INIT_LIST_HEAD(&parent->un.leaf.drop_list);
parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
@@ -1418,7 +1406,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
}
cl->children = 0;
- INIT_LIST_HEAD(&cl->un.leaf.drop_list);
RB_CLEAR_NODE(&cl->pq_node);
for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 7d6801fc5340..ad18a2052416 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -68,6 +68,11 @@
Fabio Ludovici <fabio.ludovici at yahoo.it>
*/
+struct disttable {
+ u32 size;
+ s16 table[0];
+};
+
struct netem_sched_data {
/* internal t(ime)fifo qdisc uses t_root and sch->limit */
struct rb_root t_root;
@@ -99,10 +104,7 @@ struct netem_sched_data {
u32 rho;
} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
- struct disttable {
- u32 size;
- s16 table[0];
- } *delay_dist;
+ struct disttable *delay_dist;
enum {
CLG_RANDOM,
@@ -142,6 +144,7 @@ struct netem_sched_data {
s32 bytes_left;
} slot;
+ struct disttable *slot_dist;
};
/* Time stamp put into socket buffer control block
@@ -180,7 +183,7 @@ static u32 get_crandom(struct crndstate *state)
u64 value, rho;
unsigned long answer;
- if (state->rho == 0) /* no correlation */
+ if (!state || state->rho == 0) /* no correlation */
return prandom_u32();
value = prandom_u32();
@@ -601,10 +604,19 @@ finish_segs:
static void get_slot_next(struct netem_sched_data *q, u64 now)
{
- q->slot.slot_next = now + q->slot_config.min_delay +
- (prandom_u32() *
- (q->slot_config.max_delay -
- q->slot_config.min_delay) >> 32);
+ s64 next_delay;
+
+ if (!q->slot_dist)
+ next_delay = q->slot_config.min_delay +
+ (prandom_u32() *
+ (q->slot_config.max_delay -
+ q->slot_config.min_delay) >> 32);
+ else
+ next_delay = tabledist(q->slot_config.dist_delay,
+ (s32)(q->slot_config.dist_jitter),
+ NULL, q->slot_dist);
+
+ q->slot.slot_next = now + next_delay;
q->slot.packets_left = q->slot_config.max_packets;
q->slot.bytes_left = q->slot_config.max_bytes;
}
@@ -721,9 +733,9 @@ static void dist_free(struct disttable *d)
* signed 16 bit values.
*/
-static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
+static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
+ const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
size_t n = nla_len(attr)/sizeof(__s16);
const __s16 *data = nla_data(attr);
spinlock_t *root_lock;
@@ -744,7 +756,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
root_lock = qdisc_root_sleeping_lock(sch);
spin_lock_bh(root_lock);
- swap(q->delay_dist, d);
+ swap(*tbl, d);
spin_unlock_bh(root_lock);
dist_free(d);
@@ -762,7 +774,8 @@ static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
q->slot_config.max_bytes = INT_MAX;
q->slot.packets_left = q->slot_config.max_packets;
q->slot.bytes_left = q->slot_config.max_bytes;
- if (q->slot_config.min_delay | q->slot_config.max_delay)
+ if (q->slot_config.min_delay | q->slot_config.max_delay |
+ q->slot_config.dist_jitter)
q->slot.slot_next = ktime_get_ns();
else
q->slot.slot_next = 0;
@@ -926,16 +939,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
}
if (tb[TCA_NETEM_DELAY_DIST]) {
- ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
- if (ret) {
- /* recover clg and loss_model, in case of
- * q->clg and q->loss_model were modified
- * in get_loss_clg()
- */
- q->clg = old_clg;
- q->loss_model = old_loss_model;
- return ret;
- }
+ ret = get_dist_table(sch, &q->delay_dist,
+ tb[TCA_NETEM_DELAY_DIST]);
+ if (ret)
+ goto get_table_failure;
+ }
+
+ if (tb[TCA_NETEM_SLOT_DIST]) {
+ ret = get_dist_table(sch, &q->slot_dist,
+ tb[TCA_NETEM_SLOT_DIST]);
+ if (ret)
+ goto get_table_failure;
}
sch->limit = qopt->limit;
@@ -983,6 +997,15 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
get_slot(q, tb[TCA_NETEM_SLOT]);
return ret;
+
+get_table_failure:
+ /* recover clg and loss_model, in case of
+ * q->clg and q->loss_model were modified
+ * in get_loss_clg()
+ */
+ q->clg = old_clg;
+ q->loss_model = old_loss_model;
+ return ret;
}
static int netem_init(struct Qdisc *sch, struct nlattr *opt,
@@ -1011,6 +1034,7 @@ static void netem_destroy(struct Qdisc *sch)
if (q->qdisc)
qdisc_destroy(q->qdisc);
dist_free(q->delay_dist);
+ dist_free(q->slot_dist);
}
static int dump_loss_model(const struct netem_sched_data *q,
@@ -1127,7 +1151,8 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
if (dump_loss_model(q, skb) != 0)
goto nla_put_failure;
- if (q->slot_config.min_delay | q->slot_config.max_delay) {
+ if (q->slot_config.min_delay | q->slot_config.max_delay |
+ q->slot_config.dist_jitter) {
slot = q->slot_config;
if (slot.max_packets == INT_MAX)
slot.max_packets = 0;
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
new file mode 100644
index 000000000000..52c0b6d8f1d7
--- /dev/null
+++ b/net/sched/sch_skbprio.c
@@ -0,0 +1,320 @@
+/*
+ * net/sched/sch_skbprio.c SKB Priority Queue.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Nishanth Devarajan, <ndev2021@gmail.com>
+ * Cody Doucette, <doucette@bu.edu>
+ * original idea by Michel Machado, Cody Doucette, and Qiaobin Fu
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/sch_generic.h>
+#include <net/inet_ecn.h>
+
+/* SKB Priority Queue
+ * =================================
+ *
+ * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes
+ * packets according to their skb->priority field. Under congestion,
+ * Skbprio drops already-enqueued lower priority packets to make space
+ * available for higher priority packets; it was conceived as a solution
+ * for denial-of-service defenses that need to route packets with different
+ * priorities as a mean to overcome DoS attacks.
+ */
+
+struct skbprio_sched_data {
+ /* Queue state. */
+ struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY];
+ struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY];
+ u16 highest_prio;
+ u16 lowest_prio;
+};
+
+static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
+{
+ int prio;
+
+ for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) {
+ if (!skb_queue_empty(&q->qdiscs[prio]))
+ return prio;
+ }
+
+ /* SKB queue is empty, return 0 (default highest priority setting). */
+ return 0;
+}
+
+static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
+{
+ int prio;
+
+ for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) {
+ if (!skb_queue_empty(&q->qdiscs[prio]))
+ return prio;
+ }
+
+ /* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1
+ * (default lowest priority setting).
+ */
+ return SKBPRIO_MAX_PRIORITY - 1;
+}
+
+static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1;
+ struct skbprio_sched_data *q = qdisc_priv(sch);
+ struct sk_buff_head *qdisc;
+ struct sk_buff_head *lp_qdisc;
+ struct sk_buff *to_drop;
+ u16 prio, lp;
+
+ /* Obtain the priority of @skb. */
+ prio = min(skb->priority, max_priority);
+
+ qdisc = &q->qdiscs[prio];
+ if (sch->q.qlen < sch->limit) {
+ __skb_queue_tail(qdisc, skb);
+ qdisc_qstats_backlog_inc(sch, skb);
+ q->qstats[prio].backlog += qdisc_pkt_len(skb);
+
+ /* Check to update highest and lowest priorities. */
+ if (prio > q->highest_prio)
+ q->highest_prio = prio;
+
+ if (prio < q->lowest_prio)
+ q->lowest_prio = prio;
+
+ sch->q.qlen++;
+ return NET_XMIT_SUCCESS;
+ }
+
+ /* If this packet has the lowest priority, drop it. */
+ lp = q->lowest_prio;
+ if (prio <= lp) {
+ q->qstats[prio].drops++;
+ q->qstats[prio].overlimits++;
+ return qdisc_drop(skb, sch, to_free);
+ }
+
+ __skb_queue_tail(qdisc, skb);
+ qdisc_qstats_backlog_inc(sch, skb);
+ q->qstats[prio].backlog += qdisc_pkt_len(skb);
+
+ /* Drop the packet at the tail of the lowest priority qdisc. */
+ lp_qdisc = &q->qdiscs[lp];
+ to_drop = __skb_dequeue_tail(lp_qdisc);
+ BUG_ON(!to_drop);
+ qdisc_qstats_backlog_dec(sch, to_drop);
+ qdisc_drop(to_drop, sch, to_free);
+
+ q->qstats[lp].backlog -= qdisc_pkt_len(to_drop);
+ q->qstats[lp].drops++;
+ q->qstats[lp].overlimits++;
+
+ /* Check to update highest and lowest priorities. */
+ if (skb_queue_empty(lp_qdisc)) {
+ if (q->lowest_prio == q->highest_prio) {
+ /* The incoming packet is the only packet in queue. */
+ BUG_ON(sch->q.qlen != 1);
+ q->lowest_prio = prio;
+ q->highest_prio = prio;
+ } else {
+ q->lowest_prio = calc_new_low_prio(q);
+ }
+ }
+
+ if (prio > q->highest_prio)
+ q->highest_prio = prio;
+
+ return NET_XMIT_CN;
+}
+
+static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
+{
+ struct skbprio_sched_data *q = qdisc_priv(sch);
+ struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio];
+ struct sk_buff *skb = __skb_dequeue(hpq);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ sch->q.qlen--;
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+
+ q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb);
+
+ /* Update highest priority field. */
+ if (skb_queue_empty(hpq)) {
+ if (q->lowest_prio == q->highest_prio) {
+ BUG_ON(sch->q.qlen);
+ q->highest_prio = 0;
+ q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
+ } else {
+ q->highest_prio = calc_new_high_prio(q);
+ }
+ }
+ return skb;
+}
+
+static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_skbprio_qopt *ctl = nla_data(opt);
+
+ sch->limit = ctl->limit;
+ return 0;
+}
+
+static int skbprio_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct skbprio_sched_data *q = qdisc_priv(sch);
+ int prio;
+
+ /* Initialise all queues, one for each possible priority. */
+ for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
+ __skb_queue_head_init(&q->qdiscs[prio]);
+
+ memset(&q->qstats, 0, sizeof(q->qstats));
+ q->highest_prio = 0;
+ q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
+ sch->limit = 64;
+ if (!opt)
+ return 0;
+
+ return skbprio_change(sch, opt, extack);
+}
+
+static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct tc_skbprio_qopt opt;
+
+ opt.limit = sch->limit;
+
+ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ return -1;
+
+ return skb->len;
+}
+
+static void skbprio_reset(struct Qdisc *sch)
+{
+ struct skbprio_sched_data *q = qdisc_priv(sch);
+ int prio;
+
+ sch->qstats.backlog = 0;
+ sch->q.qlen = 0;
+
+ for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
+ __skb_queue_purge(&q->qdiscs[prio]);
+
+ memset(&q->qstats, 0, sizeof(q->qstats));
+ q->highest_prio = 0;
+ q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
+}
+
+static void skbprio_destroy(struct Qdisc *sch)
+{
+ struct skbprio_sched_data *q = qdisc_priv(sch);
+ int prio;
+
+ for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
+ __skb_queue_purge(&q->qdiscs[prio]);
+}
+
+static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long skbprio_find(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+{
+ struct skbprio_sched_data *q = qdisc_priv(sch);
+ if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1],
+ q->qstats[cl - 1].qlen) < 0)
+ return -1;
+ return 0;
+}
+
+static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ unsigned int i;
+
+ if (arg->stop)
+ return;
+
+ for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) {
+ if (arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(sch, i + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops skbprio_class_ops = {
+ .leaf = skbprio_leaf,
+ .find = skbprio_find,
+ .dump = skbprio_dump_class,
+ .dump_stats = skbprio_dump_class_stats,
+ .walk = skbprio_walk,
+};
+
+static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
+ .cl_ops = &skbprio_class_ops,
+ .id = "skbprio",
+ .priv_size = sizeof(struct skbprio_sched_data),
+ .enqueue = skbprio_enqueue,
+ .dequeue = skbprio_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = skbprio_init,
+ .reset = skbprio_reset,
+ .change = skbprio_change,
+ .dump = skbprio_dump,
+ .destroy = skbprio_destroy,
+ .owner = THIS_MODULE,
+};
+
+static int __init skbprio_module_init(void)
+{
+ return register_qdisc(&skbprio_qdisc_ops);
+}
+
+static void __exit skbprio_module_exit(void)
+{
+ unregister_qdisc(&skbprio_qdisc_ops);
+}
+
+module_init(skbprio_module_init)
+module_exit(skbprio_module_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index c740b189d4ba..950ecf6e7439 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -41,8 +41,8 @@ config SCTP_DBG_OBJCNT
bool "SCTP: Debug object counts"
depends on PROC_FS
help
- If you say Y, this will enable debugging support for counting the
- type of objects that are currently allocated. This is useful for
+ If you say Y, this will enable debugging support for counting the
+ type of objects that are currently allocated. This is useful for
identifying memory leaks. This debug information can be viewed by
'cat /proc/net/sctp/sctp_dbg_objcnt'
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5d5a16204d50..297d9cf960b9 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -115,6 +115,9 @@ static struct sctp_association *sctp_association_init(
/* Initialize path max retrans value. */
asoc->pathmaxrxt = sp->pathmaxrxt;
+ asoc->flowlabel = sp->flowlabel;
+ asoc->dscp = sp->dscp;
+
/* Initialize default path MTU. */
asoc->pathmtu = sp->pathmtu;
@@ -647,6 +650,18 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
peer->sackdelay = asoc->sackdelay;
peer->sackfreq = asoc->sackfreq;
+ if (addr->sa.sa_family == AF_INET6) {
+ __be32 info = addr->v6.sin6_flowinfo;
+
+ if (info) {
+ peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
+ peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ } else {
+ peer->flowlabel = asoc->flowlabel;
+ }
+ }
+ peer->dscp = asoc->dscp;
+
/* Enable/disable heartbeat, SACK delay, and path MTU discovery
* based on association setting.
*/
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index bfb9f812e2ef..ce8087846f05 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -325,7 +325,8 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
time_after(jiffies, chunk->msg->expires_at)) {
struct sctp_stream_out *streamout =
- &chunk->asoc->stream.out[chunk->sinfo.sinfo_stream];
+ SCTP_SO(&chunk->asoc->stream,
+ chunk->sinfo.sinfo_stream);
if (chunk->sent_count) {
chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
@@ -339,7 +340,8 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
} else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
struct sctp_stream_out *streamout =
- &chunk->asoc->stream.out[chunk->sinfo.sinfo_stream];
+ SCTP_SO(&chunk->asoc->stream,
+ chunk->sinfo.sinfo_stream);
chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index ba8a6e6c36fa..9bbc5f92c941 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -56,6 +56,7 @@
#include <net/sctp/sm.h>
#include <net/sctp/checksum.h>
#include <net/net_namespace.h>
+#include <linux/rhashtable.h>
/* Forward declarations for internal helpers. */
static int sctp_rcv_ootb(struct sk_buff *);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0cd2e764f47f..fc6c5e4bffa5 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -209,12 +209,17 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
struct sock *sk = skb->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
struct flowi6 *fl6 = &transport->fl.u.ip6;
+ __u8 tclass = np->tclass;
int res;
pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
skb->len, &fl6->saddr, &fl6->daddr);
- IP6_ECN_flow_xmit(sk, fl6->flowlabel);
+ if (transport->dscp & SCTP_DSCP_SET_MASK)
+ tclass = transport->dscp & SCTP_DSCP_VAL_MASK;
+
+ if (INET_ECN_is_capable(tclass))
+ IP6_ECN_flow_xmit(sk, fl6->flowlabel);
if (!(transport->param_flags & SPP_PMTUD_ENABLE))
skb->ignore_df = 1;
@@ -223,7 +228,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
rcu_read_lock();
res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
- np->tclass);
+ tclass);
rcu_read_unlock();
return res;
}
@@ -254,6 +259,17 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
fl6->flowi6_oif = daddr->v6.sin6_scope_id;
else if (asoc)
fl6->flowi6_oif = asoc->base.sk->sk_bound_dev_if;
+ if (t->flowlabel & SCTP_FLOWLABEL_SET_MASK)
+ fl6->flowlabel = htonl(t->flowlabel & SCTP_FLOWLABEL_VAL_MASK);
+
+ if (np->sndflow && (fl6->flowlabel & IPV6_FLOWLABEL_MASK)) {
+ struct ip6_flowlabel *flowlabel;
+
+ flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
+ if (!flowlabel)
+ goto out;
+ fl6_sock_release(flowlabel);
+ }
pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index d68aa33485a9..d74d00b29942 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -80,7 +80,7 @@ static inline void sctp_outq_head_data(struct sctp_outq *q,
q->out_qlen += ch->skb->len;
stream = sctp_chunk_stream_no(ch);
- oute = q->asoc->stream.out[stream].ext;
+ oute = SCTP_SO(&q->asoc->stream, stream)->ext;
list_add(&ch->stream_list, &oute->outq);
}
@@ -101,7 +101,7 @@ static inline void sctp_outq_tail_data(struct sctp_outq *q,
q->out_qlen += ch->skb->len;
stream = sctp_chunk_stream_no(ch);
- oute = q->asoc->stream.out[stream].ext;
+ oute = SCTP_SO(&q->asoc->stream, stream)->ext;
list_add_tail(&ch->stream_list, &oute->outq);
}
@@ -372,7 +372,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
sctp_insert_list(&asoc->outqueue.abandoned,
&chk->transmitted_list);
- streamout = &asoc->stream.out[chk->sinfo.sinfo_stream];
+ streamout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
asoc->sent_cnt_removable--;
asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
@@ -416,7 +416,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) {
struct sctp_stream_out *streamout =
- &asoc->stream.out[chk->sinfo.sinfo_stream];
+ SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
}
@@ -1082,6 +1082,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
/* Finally, transmit new packets. */
while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) {
__u32 sid = ntohs(chunk->subh.data_hdr->stream);
+ __u8 stream_state = SCTP_SO(&ctx->asoc->stream, sid)->state;
/* Has this chunk expired? */
if (sctp_chunk_abandoned(chunk)) {
@@ -1091,7 +1092,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
continue;
}
- if (ctx->asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) {
+ if (stream_state == SCTP_STREAM_CLOSED) {
sctp_outq_head_data(ctx->q, chunk);
break;
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 67f73d3a1356..e948db29ab53 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -426,13 +426,16 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
struct dst_entry *dst = NULL;
union sctp_addr *daddr = &t->ipaddr;
union sctp_addr dst_saddr;
+ __u8 tos = inet_sk(sk)->tos;
+ if (t->dscp & SCTP_DSCP_SET_MASK)
+ tos = t->dscp & SCTP_DSCP_VAL_MASK;
memset(fl4, 0x0, sizeof(struct flowi4));
fl4->daddr = daddr->v4.sin_addr.s_addr;
fl4->fl4_dport = daddr->v4.sin_port;
fl4->flowi4_proto = IPPROTO_SCTP;
if (asoc) {
- fl4->flowi4_tos = RT_CONN_FLAGS(asoc->base.sk);
+ fl4->flowi4_tos = RT_CONN_FLAGS_TOS(asoc->base.sk, tos);
fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
fl4->fl4_sport = htons(asoc->base.bind_addr.port);
}
@@ -495,7 +498,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
fl4->fl4_sport = laddr->a.v4.sin_port;
flowi4_update_output(fl4,
asoc->base.sk->sk_bound_dev_if,
- RT_CONN_FLAGS(asoc->base.sk),
+ RT_CONN_FLAGS_TOS(asoc->base.sk, tos),
daddr->v4.sin_addr.s_addr,
laddr->a.v4.sin_addr.s_addr);
@@ -971,16 +974,21 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
struct sctp_transport *transport)
{
struct inet_sock *inet = inet_sk(skb->sk);
+ __u8 dscp = inet->tos;
pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb,
- skb->len, &transport->fl.u.ip4.saddr, &transport->fl.u.ip4.daddr);
+ skb->len, &transport->fl.u.ip4.saddr,
+ &transport->fl.u.ip4.daddr);
+
+ if (transport->dscp & SCTP_DSCP_SET_MASK)
+ dscp = transport->dscp & SCTP_DSCP_VAL_MASK;
inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
- return ip_queue_xmit(&inet->sk, skb, &transport->fl);
+ return __ip_queue_xmit(&inet->sk, skb, &transport->fl, dscp);
}
static struct sctp_af sctp_af_inet;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 298112ca8c06..85d393090238 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1827,4 +1827,3 @@ nomem:
error = -ENOMEM;
goto out;
}
-
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ce620e878538..e96b15a66aba 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -66,6 +66,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/compat.h>
+#include <linux/rhashtable.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -1696,6 +1697,7 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
struct sctp_association *asoc;
enum sctp_scope scope;
struct cmsghdr *cmsg;
+ __be32 flowinfo = 0;
struct sctp_af *af;
int err;
@@ -1780,6 +1782,9 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
if (!cmsgs->addrs_msg)
return 0;
+ if (daddr->sa.sa_family == AF_INET6)
+ flowinfo = daddr->v6.sin6_flowinfo;
+
/* sendv addr list parse */
for_each_cmsghdr(cmsg, cmsgs->addrs_msg) {
struct sctp_transport *transport;
@@ -1812,6 +1817,7 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
}
dlen = sizeof(struct in6_addr);
+ daddr->v6.sin6_flowinfo = flowinfo;
daddr->v6.sin6_family = AF_INET6;
daddr->v6.sin6_port = htons(asoc->peer.port);
memcpy(&daddr->v6.sin6_addr, CMSG_DATA(cmsg), dlen);
@@ -1905,7 +1911,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
goto err;
}
- if (unlikely(!asoc->stream.out[sinfo->sinfo_stream].ext)) {
+ if (unlikely(!SCTP_SO(&asoc->stream, sinfo->sinfo_stream)->ext)) {
err = sctp_stream_init_ext(&asoc->stream, sinfo->sinfo_stream);
if (err)
goto err;
@@ -2392,6 +2398,8 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
* uint32_t spp_pathmtu;
* uint32_t spp_sackdelay;
* uint32_t spp_flags;
+ * uint32_t spp_ipv6_flowlabel;
+ * uint8_t spp_dscp;
* };
*
* spp_assoc_id - (one-to-many style socket) This is filled in the
@@ -2471,6 +2479,45 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
* also that this field is mutually exclusive to
* SPP_SACKDELAY_ENABLE, setting both will have undefined
* results.
+ *
+ * SPP_IPV6_FLOWLABEL: Setting this flag enables the
+ * setting of the IPV6 flow label value. The value is
+ * contained in the spp_ipv6_flowlabel field.
+ * Upon retrieval, this flag will be set to indicate that
+ * the spp_ipv6_flowlabel field has a valid value returned.
+ * If a specific destination address is set (in the
+ * spp_address field), then the value returned is that of
+ * the address. If just an association is specified (and
+ * no address), then the association's default flow label
+ * is returned. If neither an association nor a destination
+ * is specified, then the socket's default flow label is
+ * returned. For non-IPv6 sockets, this flag will be left
+ * cleared.
+ *
+ * SPP_DSCP: Setting this flag enables the setting of the
+ * Differentiated Services Code Point (DSCP) value
+ * associated with either the association or a specific
+ * address. The value is obtained in the spp_dscp field.
+ * Upon retrieval, this flag will be set to indicate that
+ * the spp_dscp field has a valid value returned. If a
+ * specific destination address is set when called (in the
+ * spp_address field), then that specific destination
+ * address's DSCP value is returned. If just an association
+ * is specified, then the association's default DSCP is
+ * returned. If neither an association nor a destination is
+ * specified, then the socket's default DSCP is returned.
+ *
+ * spp_ipv6_flowlabel
+ * - This field is used in conjunction with the
+ * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
+ * The 20 least significant bits are used for the flow
+ * label. This setting has precedence over any IPv6-layer
+ * setting.
+ *
+ * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
+ * and contains the DSCP. The 6 most significant bits are
+ * used for the DSCP. This setting has precedence over any
+ * IPv4- or IPv6- layer setting.
*/
static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
struct sctp_transport *trans,
@@ -2610,6 +2657,51 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
}
}
+ if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
+ if (trans && trans->ipaddr.sa.sa_family == AF_INET6) {
+ trans->flowlabel = params->spp_ipv6_flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ } else if (asoc) {
+ list_for_each_entry(trans,
+ &asoc->peer.transport_addr_list,
+ transports) {
+ if (trans->ipaddr.sa.sa_family != AF_INET6)
+ continue;
+ trans->flowlabel = params->spp_ipv6_flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ }
+ asoc->flowlabel = params->spp_ipv6_flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ } else if (sctp_opt2sk(sp)->sk_family == AF_INET6) {
+ sp->flowlabel = params->spp_ipv6_flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ sp->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ }
+ }
+
+ if (params->spp_flags & SPP_DSCP) {
+ if (trans) {
+ trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
+ trans->dscp |= SCTP_DSCP_SET_MASK;
+ } else if (asoc) {
+ list_for_each_entry(trans,
+ &asoc->peer.transport_addr_list,
+ transports) {
+ trans->dscp = params->spp_dscp &
+ SCTP_DSCP_VAL_MASK;
+ trans->dscp |= SCTP_DSCP_SET_MASK;
+ }
+ asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
+ asoc->dscp |= SCTP_DSCP_SET_MASK;
+ } else {
+ sp->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
+ sp->dscp |= SCTP_DSCP_SET_MASK;
+ }
+ }
+
return 0;
}
@@ -2624,11 +2716,18 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
int error;
int hb_change, pmtud_change, sackdelay_change;
- if (optlen != sizeof(struct sctp_paddrparams))
+ if (optlen == sizeof(params)) {
+ if (copy_from_user(&params, optval, optlen))
+ return -EFAULT;
+ } else if (optlen == ALIGN(offsetof(struct sctp_paddrparams,
+ spp_ipv6_flowlabel), 4)) {
+ if (copy_from_user(&params, optval, optlen))
+ return -EFAULT;
+ if (params.spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL))
+ return -EINVAL;
+ } else {
return -EINVAL;
-
- if (copy_from_user(&params, optval, optlen))
- return -EFAULT;
+ }
/* Validate flags and value parameters. */
hb_change = params.spp_flags & SPP_HB;
@@ -4169,6 +4268,28 @@ out:
return retval;
}
+static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
+ unsigned int optlen)
+{
+ int val;
+
+ if (!sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+
+ if (sctp_sk(sk)->ep->base.bind_addr.port)
+ return -EFAULT;
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ sctp_sk(sk)->reuse = !!val;
+
+ return 0;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4363,6 +4484,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
retval = sctp_setsockopt_interleaving_supported(sk, optval,
optlen);
break;
+ case SCTP_REUSE_PORT:
+ retval = sctp_setsockopt_reuse_port(sk, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -5427,6 +5551,45 @@ out:
* also that this field is mutually exclusive to
* SPP_SACKDELAY_ENABLE, setting both will have undefined
* results.
+ *
+ * SPP_IPV6_FLOWLABEL: Setting this flag enables the
+ * setting of the IPV6 flow label value. The value is
+ * contained in the spp_ipv6_flowlabel field.
+ * Upon retrieval, this flag will be set to indicate that
+ * the spp_ipv6_flowlabel field has a valid value returned.
+ * If a specific destination address is set (in the
+ * spp_address field), then the value returned is that of
+ * the address. If just an association is specified (and
+ * no address), then the association's default flow label
+ * is returned. If neither an association nor a destination
+ * is specified, then the socket's default flow label is
+ * returned. For non-IPv6 sockets, this flag will be left
+ * cleared.
+ *
+ * SPP_DSCP: Setting this flag enables the setting of the
+ * Differentiated Services Code Point (DSCP) value
+ * associated with either the association or a specific
+ * address. The value is obtained in the spp_dscp field.
+ * Upon retrieval, this flag will be set to indicate that
+ * the spp_dscp field has a valid value returned. If a
+ * specific destination address is set when called (in the
+ * spp_address field), then that specific destination
+ * address's DSCP value is returned. If just an association
+ * is specified, then the association's default DSCP is
+ * returned. If neither an association nor a destination is
+ * specified, then the socket's default DSCP is returned.
+ *
+ * spp_ipv6_flowlabel
+ * - This field is used in conjunction with the
+ * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
+ * The 20 least significant bits are used for the flow
+ * label. This setting has precedence over any IPv6-layer
+ * setting.
+ *
+ * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
+ * and contains the DSCP. The 6 most significant bits are
+ * used for the DSCP. This setting has precedence over any
+ * IPv4- or IPv6- layer setting.
*/
static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
char __user *optval, int __user *optlen)
@@ -5436,9 +5599,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
- if (len < sizeof(struct sctp_paddrparams))
+ if (len >= sizeof(params))
+ len = sizeof(params);
+ else if (len >= ALIGN(offsetof(struct sctp_paddrparams,
+ spp_ipv6_flowlabel), 4))
+ len = ALIGN(offsetof(struct sctp_paddrparams,
+ spp_ipv6_flowlabel), 4);
+ else
return -EINVAL;
- len = sizeof(struct sctp_paddrparams);
+
if (copy_from_user(&params, optval, len))
return -EFAULT;
@@ -5473,6 +5642,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
/*draft-11 doesn't say what to return in spp_flags*/
params.spp_flags = trans->param_flags;
+ if (trans->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
+ params.spp_ipv6_flowlabel = trans->flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ params.spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+ if (trans->dscp & SCTP_DSCP_SET_MASK) {
+ params.spp_dscp = trans->dscp & SCTP_DSCP_VAL_MASK;
+ params.spp_flags |= SPP_DSCP;
+ }
} else if (asoc) {
/* Fetch association values. */
params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
@@ -5482,6 +5660,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
/*draft-11 doesn't say what to return in spp_flags*/
params.spp_flags = asoc->param_flags;
+ if (asoc->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
+ params.spp_ipv6_flowlabel = asoc->flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ params.spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+ if (asoc->dscp & SCTP_DSCP_SET_MASK) {
+ params.spp_dscp = asoc->dscp & SCTP_DSCP_VAL_MASK;
+ params.spp_flags |= SPP_DSCP;
+ }
} else {
/* Fetch socket values. */
params.spp_hbinterval = sp->hbinterval;
@@ -5491,6 +5678,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
/*draft-11 doesn't say what to return in spp_flags*/
params.spp_flags = sp->param_flags;
+ if (sp->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
+ params.spp_ipv6_flowlabel = sp->flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ params.spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+ if (sp->dscp & SCTP_DSCP_SET_MASK) {
+ params.spp_dscp = sp->dscp & SCTP_DSCP_VAL_MASK;
+ params.spp_flags |= SPP_DSCP;
+ }
}
if (copy_to_user(optval, &params, len))
@@ -6958,7 +7154,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
goto out;
- streamoute = asoc->stream.out[params.sprstat_sid].ext;
+ streamoute = SCTP_SO(&asoc->stream, params.sprstat_sid)->ext;
if (!streamoute) {
/* Not allocated yet, means all stats are 0 */
params.sprstat_abandoned_unsent = 0;
@@ -7196,6 +7392,26 @@ out:
return retval;
}
+static int sctp_getsockopt_reuse_port(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ int val;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ val = sctp_sk(sk)->reuse;
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -7391,6 +7607,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
optlen);
break;
+ case SCTP_REUSE_PORT:
+ retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -7428,6 +7647,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
+ bool reuse = (sk->sk_reuse || sctp_sk(sk)->reuse);
struct sctp_bind_hashbucket *head; /* hash list */
struct sctp_bind_bucket *pp;
unsigned short snum;
@@ -7500,13 +7720,11 @@ pp_found:
* used by other socket (pp->owner not empty); that other
* socket is going to be sk2.
*/
- int reuse = sk->sk_reuse;
struct sock *sk2;
pr_debug("%s: found a possible match\n", __func__);
- if (pp->fastreuse && sk->sk_reuse &&
- sk->sk_state != SCTP_SS_LISTENING)
+ if (pp->fastreuse && reuse && sk->sk_state != SCTP_SS_LISTENING)
goto success;
/* Run through the list of sockets bound to the port
@@ -7524,7 +7742,7 @@ pp_found:
ep2 = sctp_sk(sk2)->ep;
if (sk == sk2 ||
- (reuse && sk2->sk_reuse &&
+ (reuse && (sk2->sk_reuse || sctp_sk(sk2)->reuse) &&
sk2->sk_state != SCTP_SS_LISTENING))
continue;
@@ -7548,12 +7766,12 @@ pp_not_found:
* SO_REUSEADDR on this socket -sk-).
*/
if (hlist_empty(&pp->owner)) {
- if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
+ if (reuse && sk->sk_state != SCTP_SS_LISTENING)
pp->fastreuse = 1;
else
pp->fastreuse = 0;
} else if (pp->fastreuse &&
- (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
+ (!reuse || sk->sk_state == SCTP_SS_LISTENING))
pp->fastreuse = 0;
/* We are set, so fill up all the data in the hash table
@@ -7684,7 +7902,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
err = 0;
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
- if (sk->sk_reuse)
+ if (sk->sk_reuse || sctp_sk(sk)->reuse)
sctp_sk(sk)->bind_hash->fastreuse = 1;
goto out;
}
@@ -8551,6 +8769,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newsk->sk_no_check_tx = sk->sk_no_check_tx;
newsk->sk_no_check_rx = sk->sk_no_check_rx;
newsk->sk_reuse = sk->sk_reuse;
+ sctp_sk(newsk)->reuse = sp->reuse;
newsk->sk_shutdown = sk->sk_shutdown;
newsk->sk_destruct = sctp_destruct_sock;
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index f1f1d1b232ba..ffb940d3b57c 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -37,6 +37,53 @@
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
+static struct flex_array *fa_alloc(size_t elem_size, size_t elem_count,
+ gfp_t gfp)
+{
+ struct flex_array *result;
+ int err;
+
+ result = flex_array_alloc(elem_size, elem_count, gfp);
+ if (result) {
+ err = flex_array_prealloc(result, 0, elem_count, gfp);
+ if (err) {
+ flex_array_free(result);
+ result = NULL;
+ }
+ }
+
+ return result;
+}
+
+static void fa_free(struct flex_array *fa)
+{
+ if (fa)
+ flex_array_free(fa);
+}
+
+static void fa_copy(struct flex_array *fa, struct flex_array *from,
+ size_t index, size_t count)
+{
+ void *elem;
+
+ while (count--) {
+ elem = flex_array_get(from, index);
+ flex_array_put(fa, index, elem, 0);
+ index++;
+ }
+}
+
+static void fa_zero(struct flex_array *fa, size_t index, size_t count)
+{
+ void *elem;
+
+ while (count--) {
+ elem = flex_array_get(fa, index);
+ memset(elem, 0, fa->element_size);
+ index++;
+ }
+}
+
/* Migrates chunks from stream queues to new stream queues if needed,
* but not across associations. Also, removes those chunks to streams
* higher than the new max.
@@ -78,34 +125,33 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
* sctp_stream_update will swap ->out pointers.
*/
for (i = 0; i < outcnt; i++) {
- kfree(new->out[i].ext);
- new->out[i].ext = stream->out[i].ext;
- stream->out[i].ext = NULL;
+ kfree(SCTP_SO(new, i)->ext);
+ SCTP_SO(new, i)->ext = SCTP_SO(stream, i)->ext;
+ SCTP_SO(stream, i)->ext = NULL;
}
}
for (i = outcnt; i < stream->outcnt; i++)
- kfree(stream->out[i].ext);
+ kfree(SCTP_SO(stream, i)->ext);
}
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
gfp_t gfp)
{
- struct sctp_stream_out *out;
+ struct flex_array *out;
+ size_t elem_size = sizeof(struct sctp_stream_out);
- out = kmalloc_array(outcnt, sizeof(*out), gfp);
+ out = fa_alloc(elem_size, outcnt, gfp);
if (!out)
return -ENOMEM;
if (stream->out) {
- memcpy(out, stream->out, min(outcnt, stream->outcnt) *
- sizeof(*out));
- kfree(stream->out);
+ fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
+ fa_free(stream->out);
}
if (outcnt > stream->outcnt)
- memset(out + stream->outcnt, 0,
- (outcnt - stream->outcnt) * sizeof(*out));
+ fa_zero(out, stream->outcnt, (outcnt - stream->outcnt));
stream->out = out;
@@ -115,22 +161,20 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
gfp_t gfp)
{
- struct sctp_stream_in *in;
-
- in = kmalloc_array(incnt, sizeof(*stream->in), gfp);
+ struct flex_array *in;
+ size_t elem_size = sizeof(struct sctp_stream_in);
+ in = fa_alloc(elem_size, incnt, gfp);
if (!in)
return -ENOMEM;
if (stream->in) {
- memcpy(in, stream->in, min(incnt, stream->incnt) *
- sizeof(*in));
- kfree(stream->in);
+ fa_copy(in, stream->in, 0, min(incnt, stream->incnt));
+ fa_free(stream->in);
}
if (incnt > stream->incnt)
- memset(in + stream->incnt, 0,
- (incnt - stream->incnt) * sizeof(*in));
+ fa_zero(in, stream->incnt, (incnt - stream->incnt));
stream->in = in;
@@ -162,7 +206,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
stream->outcnt = outcnt;
for (i = 0; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_OPEN;
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
sched->init(stream);
@@ -174,7 +218,7 @@ in:
ret = sctp_stream_alloc_in(stream, incnt, gfp);
if (ret) {
sched->free(stream);
- kfree(stream->out);
+ fa_free(stream->out);
stream->out = NULL;
stream->outcnt = 0;
goto out;
@@ -193,7 +237,7 @@ int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
soute = kzalloc(sizeof(*soute), GFP_KERNEL);
if (!soute)
return -ENOMEM;
- stream->out[sid].ext = soute;
+ SCTP_SO(stream, sid)->ext = soute;
return sctp_sched_init_sid(stream, sid, GFP_KERNEL);
}
@@ -205,9 +249,9 @@ void sctp_stream_free(struct sctp_stream *stream)
sched->free(stream);
for (i = 0; i < stream->outcnt; i++)
- kfree(stream->out[i].ext);
- kfree(stream->out);
- kfree(stream->in);
+ kfree(SCTP_SO(stream, i)->ext);
+ fa_free(stream->out);
+ fa_free(stream->in);
}
void sctp_stream_clear(struct sctp_stream *stream)
@@ -215,12 +259,12 @@ void sctp_stream_clear(struct sctp_stream *stream)
int i;
for (i = 0; i < stream->outcnt; i++) {
- stream->out[i].mid = 0;
- stream->out[i].mid_uo = 0;
+ SCTP_SO(stream, i)->mid = 0;
+ SCTP_SO(stream, i)->mid_uo = 0;
}
for (i = 0; i < stream->incnt; i++)
- stream->in[i].mid = 0;
+ SCTP_SI(stream, i)->mid = 0;
}
void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
@@ -273,8 +317,8 @@ static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
for (i = 0; i < str_nums; i++) {
__u16 sid = ntohs(str_list[i]);
- if (stream->out[sid].ext &&
- !list_empty(&stream->out[sid].ext->outq))
+ if (SCTP_SO(stream, sid)->ext &&
+ !list_empty(&SCTP_SO(stream, sid)->ext->outq))
return false;
}
@@ -361,11 +405,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
if (out) {
if (str_nums)
for (i = 0; i < str_nums; i++)
- stream->out[str_list[i]].state =
+ SCTP_SO(stream, str_list[i])->state =
SCTP_STREAM_CLOSED;
else
for (i = 0; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_CLOSED;
+ SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
}
asoc->strreset_chunk = chunk;
@@ -380,11 +424,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
if (str_nums)
for (i = 0; i < str_nums; i++)
- stream->out[str_list[i]].state =
+ SCTP_SO(stream, str_list[i])->state =
SCTP_STREAM_OPEN;
else
for (i = 0; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_OPEN;
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
goto out;
}
@@ -418,7 +462,7 @@ int sctp_send_reset_assoc(struct sctp_association *asoc)
/* Block further xmit of data until this request is completed */
for (i = 0; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_CLOSED;
+ SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
asoc->strreset_chunk = chunk;
sctp_chunk_hold(asoc->strreset_chunk);
@@ -429,7 +473,7 @@ int sctp_send_reset_assoc(struct sctp_association *asoc)
asoc->strreset_chunk = NULL;
for (i = 0; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_OPEN;
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
return retval;
}
@@ -609,10 +653,10 @@ struct sctp_chunk *sctp_process_strreset_outreq(
}
for (i = 0; i < nums; i++)
- stream->in[ntohs(str_p[i])].mid = 0;
+ SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
} else {
for (i = 0; i < stream->incnt; i++)
- stream->in[i].mid = 0;
+ SCTP_SI(stream, i)->mid = 0;
}
result = SCTP_STRRESET_PERFORMED;
@@ -683,11 +727,11 @@ struct sctp_chunk *sctp_process_strreset_inreq(
if (nums)
for (i = 0; i < nums; i++)
- stream->out[ntohs(str_p[i])].state =
+ SCTP_SO(stream, ntohs(str_p[i]))->state =
SCTP_STREAM_CLOSED;
else
for (i = 0; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_CLOSED;
+ SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
asoc->strreset_chunk = chunk;
asoc->strreset_outstanding = 1;
@@ -786,11 +830,11 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
* incoming and outgoing streams.
*/
for (i = 0; i < stream->outcnt; i++) {
- stream->out[i].mid = 0;
- stream->out[i].mid_uo = 0;
+ SCTP_SO(stream, i)->mid = 0;
+ SCTP_SO(stream, i)->mid_uo = 0;
}
for (i = 0; i < stream->incnt; i++)
- stream->in[i].mid = 0;
+ SCTP_SI(stream, i)->mid = 0;
result = SCTP_STRRESET_PERFORMED;
@@ -979,15 +1023,18 @@ struct sctp_chunk *sctp_process_strreset_resp(
sizeof(__u16);
if (result == SCTP_STRRESET_PERFORMED) {
+ struct sctp_stream_out *sout;
if (nums) {
for (i = 0; i < nums; i++) {
- stream->out[ntohs(str_p[i])].mid = 0;
- stream->out[ntohs(str_p[i])].mid_uo = 0;
+ sout = SCTP_SO(stream, ntohs(str_p[i]));
+ sout->mid = 0;
+ sout->mid_uo = 0;
}
} else {
for (i = 0; i < stream->outcnt; i++) {
- stream->out[i].mid = 0;
- stream->out[i].mid_uo = 0;
+ sout = SCTP_SO(stream, i);
+ sout->mid = 0;
+ sout->mid_uo = 0;
}
}
@@ -995,7 +1042,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
}
for (i = 0; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_OPEN;
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
nums, str_p, GFP_ATOMIC);
@@ -1050,15 +1097,15 @@ struct sctp_chunk *sctp_process_strreset_resp(
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
for (i = 0; i < stream->outcnt; i++) {
- stream->out[i].mid = 0;
- stream->out[i].mid_uo = 0;
+ SCTP_SO(stream, i)->mid = 0;
+ SCTP_SO(stream, i)->mid_uo = 0;
}
for (i = 0; i < stream->incnt; i++)
- stream->in[i].mid = 0;
+ SCTP_SI(stream, i)->mid = 0;
}
for (i = 0; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_OPEN;
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
*evp = sctp_ulpevent_make_assoc_reset_event(asoc, flags,
stsn, rtsn, GFP_ATOMIC);
@@ -1072,7 +1119,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
if (result == SCTP_STRRESET_PERFORMED)
for (i = number; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_OPEN;
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
else
stream->outcnt = number;
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index d3764c181299..0a78cdf86463 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -197,7 +197,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_partial(
__u32 next_fsn = 0;
int is_last = 0;
- sin = sctp_stream_in(ulpq->asoc, event->stream);
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
skb_queue_walk(&ulpq->reasm, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
@@ -278,7 +278,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
__u32 pd_len = 0;
__u32 mid = 0;
- sin = sctp_stream_in(ulpq->asoc, event->stream);
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
skb_queue_walk(&ulpq->reasm, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
@@ -368,7 +368,7 @@ static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
sctp_intl_store_reasm(ulpq, event);
- sin = sctp_stream_in(ulpq->asoc, event->stream);
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
if (sin->pd_mode && event->mid == sin->mid &&
event->fsn == sin->fsn)
retval = sctp_intl_retrieve_partial(ulpq, event);
@@ -575,7 +575,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
__u32 next_fsn = 0;
int is_last = 0;
- sin = sctp_stream_in(ulpq->asoc, event->stream);
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
skb_queue_walk(&ulpq->reasm_uo, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
@@ -659,7 +659,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
__u32 pd_len = 0;
__u32 mid = 0;
- sin = sctp_stream_in(ulpq->asoc, event->stream);
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
skb_queue_walk(&ulpq->reasm_uo, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
@@ -750,7 +750,7 @@ static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
sctp_intl_store_reasm_uo(ulpq, event);
- sin = sctp_stream_in(ulpq->asoc, event->stream);
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
event->fsn == sin->fsn_uo)
retval = sctp_intl_retrieve_partial_uo(ulpq, event);
@@ -774,7 +774,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
skb_queue_walk(&ulpq->reasm_uo, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
- csin = sctp_stream_in(ulpq->asoc, cevent->stream);
+ csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
if (csin->pd_mode_uo)
continue;
@@ -875,7 +875,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
skb_queue_walk(&ulpq->reasm, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
- csin = sctp_stream_in(ulpq->asoc, cevent->stream);
+ csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
if (csin->pd_mode)
continue;
@@ -1053,7 +1053,7 @@ static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
__u16 sid;
for (sid = 0; sid < stream->incnt; sid++) {
- struct sctp_stream_in *sin = &stream->in[sid];
+ struct sctp_stream_in *sin = SCTP_SI(stream, sid);
__u32 mid;
if (sin->pd_mode_uo) {
@@ -1247,7 +1247,7 @@ static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
__u8 flags)
{
- struct sctp_stream_in *sin = sctp_stream_in(ulpq->asoc, sid);
+ struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
struct sctp_stream *stream = &ulpq->asoc->stream;
if (flags & SCTP_FTSN_U_BIT) {
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index f5fcd425232a..a6c04a94b08f 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -161,7 +161,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
/* Give the next scheduler a clean slate. */
for (i = 0; i < asoc->stream.outcnt; i++) {
- void *p = asoc->stream.out[i].ext;
+ void *p = SCTP_SO(&asoc->stream, i)->ext;
if (!p)
continue;
@@ -175,7 +175,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
asoc->outqueue.sched = n;
n->init(&asoc->stream);
for (i = 0; i < asoc->stream.outcnt; i++) {
- if (!asoc->stream.out[i].ext)
+ if (!SCTP_SO(&asoc->stream, i)->ext)
continue;
ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
@@ -217,7 +217,7 @@ int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
if (sid >= asoc->stream.outcnt)
return -EINVAL;
- if (!asoc->stream.out[sid].ext) {
+ if (!SCTP_SO(&asoc->stream, sid)->ext) {
int ret;
ret = sctp_stream_init_ext(&asoc->stream, sid);
@@ -234,7 +234,7 @@ int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
if (sid >= asoc->stream.outcnt)
return -EINVAL;
- if (!asoc->stream.out[sid].ext)
+ if (!SCTP_SO(&asoc->stream, sid)->ext)
return 0;
return asoc->outqueue.sched->get(&asoc->stream, sid, value);
@@ -252,7 +252,7 @@ void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
* priority stream comes in.
*/
sid = sctp_chunk_stream_no(ch);
- sout = &q->asoc->stream.out[sid];
+ sout = SCTP_SO(&q->asoc->stream, sid);
q->asoc->stream.out_curr = sout;
return;
}
@@ -272,8 +272,9 @@ void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp)
{
struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
+ struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext;
- INIT_LIST_HEAD(&stream->out[sid].ext->outq);
+ INIT_LIST_HEAD(&ext->outq);
return sched->init_sid(stream, sid, gfp);
}
diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c
index 7997d35dd0fd..2245083a98f2 100644
--- a/net/sctp/stream_sched_prio.c
+++ b/net/sctp/stream_sched_prio.c
@@ -75,10 +75,10 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head(
/* No luck. So we search on all streams now. */
for (i = 0; i < stream->outcnt; i++) {
- if (!stream->out[i].ext)
+ if (!SCTP_SO(stream, i)->ext)
continue;
- p = stream->out[i].ext->prio_head;
+ p = SCTP_SO(stream, i)->ext->prio_head;
if (!p)
/* Means all other streams won't be initialized
* as well.
@@ -165,7 +165,7 @@ static void sctp_sched_prio_sched(struct sctp_stream *stream,
static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid,
__u16 prio, gfp_t gfp)
{
- struct sctp_stream_out *sout = &stream->out[sid];
+ struct sctp_stream_out *sout = SCTP_SO(stream, sid);
struct sctp_stream_out_ext *soute = sout->ext;
struct sctp_stream_priorities *prio_head, *old;
bool reschedule = false;
@@ -186,7 +186,7 @@ static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid,
return 0;
for (i = 0; i < stream->outcnt; i++) {
- soute = stream->out[i].ext;
+ soute = SCTP_SO(stream, i)->ext;
if (soute && soute->prio_head == old)
/* It's still in use, nothing else to do here. */
return 0;
@@ -201,7 +201,7 @@ static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid,
static int sctp_sched_prio_get(struct sctp_stream *stream, __u16 sid,
__u16 *value)
{
- *value = stream->out[sid].ext->prio_head->prio;
+ *value = SCTP_SO(stream, sid)->ext->prio_head->prio;
return 0;
}
@@ -215,7 +215,7 @@ static int sctp_sched_prio_init(struct sctp_stream *stream)
static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid,
gfp_t gfp)
{
- INIT_LIST_HEAD(&stream->out[sid].ext->prio_list);
+ INIT_LIST_HEAD(&SCTP_SO(stream, sid)->ext->prio_list);
return sctp_sched_prio_set(stream, sid, 0, gfp);
}
@@ -233,9 +233,9 @@ static void sctp_sched_prio_free(struct sctp_stream *stream)
*/
sctp_sched_prio_unsched_all(stream);
for (i = 0; i < stream->outcnt; i++) {
- if (!stream->out[i].ext)
+ if (!SCTP_SO(stream, i)->ext)
continue;
- prio = stream->out[i].ext->prio_head;
+ prio = SCTP_SO(stream, i)->ext->prio_head;
if (prio && list_empty(&prio->prio_sched))
list_add(&prio->prio_sched, &list);
}
@@ -255,7 +255,7 @@ static void sctp_sched_prio_enqueue(struct sctp_outq *q,
ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list);
sid = sctp_chunk_stream_no(ch);
stream = &q->asoc->stream;
- sctp_sched_prio_sched(stream, stream->out[sid].ext);
+ sctp_sched_prio_sched(stream, SCTP_SO(stream, sid)->ext);
}
static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q)
@@ -297,7 +297,7 @@ static void sctp_sched_prio_dequeue_done(struct sctp_outq *q,
* this priority.
*/
sid = sctp_chunk_stream_no(ch);
- soute = q->asoc->stream.out[sid].ext;
+ soute = SCTP_SO(&q->asoc->stream, sid)->ext;
prio = soute->prio_head;
sctp_sched_prio_next_stream(prio);
@@ -317,7 +317,7 @@ static void sctp_sched_prio_sched_all(struct sctp_stream *stream)
__u16 sid;
sid = sctp_chunk_stream_no(ch);
- sout = &stream->out[sid];
+ sout = SCTP_SO(stream, sid);
if (sout->ext)
sctp_sched_prio_sched(stream, sout->ext);
}
diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c
index 1155692448f1..52ba743fa7a7 100644
--- a/net/sctp/stream_sched_rr.c
+++ b/net/sctp/stream_sched_rr.c
@@ -100,7 +100,7 @@ static int sctp_sched_rr_init(struct sctp_stream *stream)
static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid,
gfp_t gfp)
{
- INIT_LIST_HEAD(&stream->out[sid].ext->rr_list);
+ INIT_LIST_HEAD(&SCTP_SO(stream, sid)->ext->rr_list);
return 0;
}
@@ -120,7 +120,7 @@ static void sctp_sched_rr_enqueue(struct sctp_outq *q,
ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list);
sid = sctp_chunk_stream_no(ch);
stream = &q->asoc->stream;
- sctp_sched_rr_sched(stream, stream->out[sid].ext);
+ sctp_sched_rr_sched(stream, SCTP_SO(stream, sid)->ext);
}
static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q)
@@ -154,7 +154,7 @@ static void sctp_sched_rr_dequeue_done(struct sctp_outq *q,
/* Last chunk on that msg, move to the next stream */
sid = sctp_chunk_stream_no(ch);
- soute = q->asoc->stream.out[sid].ext;
+ soute = SCTP_SO(&q->asoc->stream, sid)->ext;
sctp_sched_rr_next_stream(&q->asoc->stream);
@@ -173,7 +173,7 @@ static void sctp_sched_rr_sched_all(struct sctp_stream *stream)
__u16 sid;
sid = sctp_chunk_stream_no(ch);
- soute = stream->out[sid].ext;
+ soute = SCTP_SO(stream, sid)->ext;
if (soute)
sctp_sched_rr_sched(stream, soute);
}
diff --git a/net/smc/Makefile b/net/smc/Makefile
index 188104654b54..4df96b4b8130 100644
--- a/net/smc/Makefile
+++ b/net/smc/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_SMC) += smc.o
obj-$(CONFIG_SMC_DIAG) += smc_diag.o
smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
-smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o
+smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index e7de5f282722..2d8a1e15e4f9 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -23,6 +23,7 @@
#include <linux/workqueue.h>
#include <linux/in.h>
#include <linux/sched/signal.h>
+#include <linux/if_vlan.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -35,6 +36,7 @@
#include "smc_cdc.h"
#include "smc_core.h"
#include "smc_ib.h"
+#include "smc_ism.h"
#include "smc_pnet.h"
#include "smc_tx.h"
#include "smc_rx.h"
@@ -342,20 +344,17 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
rc = smc_ib_modify_qp_rts(link);
if (rc)
- return SMC_CLC_DECL_INTERR;
+ return SMC_CLC_DECL_ERR_RDYLNK;
smc_wr_remember_qp_attr(link);
if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
- return SMC_CLC_DECL_INTERR;
+ return SMC_CLC_DECL_ERR_REGRMB;
/* send CONFIRM LINK response over RoCE fabric */
- rc = smc_llc_send_confirm_link(link,
- link->smcibdev->mac[link->ibport - 1],
- &link->smcibdev->gid[link->ibport - 1],
- SMC_LLC_RESP);
+ rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
if (rc < 0)
- return SMC_CLC_DECL_TCL;
+ return SMC_CLC_DECL_TIMEOUT_CL;
/* receive ADD LINK request from server over RoCE fabric */
rest = wait_for_completion_interruptible_timeout(&link->llc_add,
@@ -371,18 +370,17 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
/* send add link reject message, only one link supported for now */
rc = smc_llc_send_add_link(link,
link->smcibdev->mac[link->ibport - 1],
- &link->smcibdev->gid[link->ibport - 1],
- SMC_LLC_RESP);
+ link->gid, SMC_LLC_RESP);
if (rc < 0)
- return SMC_CLC_DECL_TCL;
+ return SMC_CLC_DECL_TIMEOUT_AL;
smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
return 0;
}
-static void smc_conn_save_peer_info(struct smc_sock *smc,
- struct smc_clc_msg_accept_confirm *clc)
+static void smcr_conn_save_peer_info(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm *clc)
{
int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
@@ -393,6 +391,28 @@ static void smc_conn_save_peer_info(struct smc_sock *smc,
smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
}
+static void smcd_conn_save_peer_info(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm *clc)
+{
+ int bufsize = smc_uncompress_bufsize(clc->dmbe_size);
+
+ smc->conn.peer_rmbe_idx = clc->dmbe_idx;
+ smc->conn.peer_token = clc->token;
+ /* msg header takes up space in the buffer */
+ smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
+ atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
+ smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
+}
+
+static void smc_conn_save_peer_info(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm *clc)
+{
+ if (smc->conn.lgr->is_smcd)
+ smcd_conn_save_peer_info(smc, clc);
+ else
+ smcr_conn_save_peer_info(smc, clc);
+}
+
static void smc_link_save_peer_info(struct smc_link *link,
struct smc_clc_msg_accept_confirm *clc)
{
@@ -404,9 +424,10 @@ static void smc_link_save_peer_info(struct smc_link *link,
}
/* fall back during connect */
-static int smc_connect_fallback(struct smc_sock *smc)
+static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
{
smc->use_fallback = true;
+ smc->fallback_rsn = reason_code;
smc_copy_sock_settings_to_clc(smc);
if (smc->sk.sk_state == SMC_INIT)
smc->sk.sk_state = SMC_ACTIVE;
@@ -423,7 +444,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
sock_put(&smc->sk); /* passive closing */
return reason_code;
}
- if (reason_code != SMC_CLC_DECL_REPLY) {
+ if (reason_code != SMC_CLC_DECL_PEERDECL) {
rc = smc_clc_send_decline(smc, reason_code);
if (rc < 0) {
if (smc->sk.sk_state == SMC_INIT)
@@ -431,7 +452,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
return rc;
}
}
- return smc_connect_fallback(smc);
+ return smc_connect_fallback(smc, reason_code);
}
/* abort connecting */
@@ -448,7 +469,7 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code,
/* check if there is a rdma device available for this connection. */
/* called for connect and listen */
static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev,
- u8 *ibport)
+ u8 *ibport, unsigned short vlan_id, u8 gid[])
{
int reason_code = 0;
@@ -456,22 +477,59 @@ static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev,
* within same PNETID that also contains the ethernet device
* used for the internal TCP socket
*/
- smc_pnet_find_roce_resource(smc->clcsock->sk, ibdev, ibport);
+ smc_pnet_find_roce_resource(smc->clcsock->sk, ibdev, ibport, vlan_id,
+ gid);
if (!(*ibdev))
reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
return reason_code;
}
+/* check if there is an ISM device available for this connection. */
+/* called for connect and listen */
+static int smc_check_ism(struct smc_sock *smc, struct smcd_dev **ismdev)
+{
+ /* Find ISM device with same PNETID as connecting interface */
+ smc_pnet_find_ism_resource(smc->clcsock->sk, ismdev);
+ if (!(*ismdev))
+ return SMC_CLC_DECL_CNFERR; /* configuration error */
+ return 0;
+}
+
+/* Check for VLAN ID and register it on ISM device just for CLC handshake */
+static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
+ struct smcd_dev *ismdev,
+ unsigned short vlan_id)
+{
+ if (vlan_id && smc_ism_get_vlan(ismdev, vlan_id))
+ return SMC_CLC_DECL_CNFERR;
+ return 0;
+}
+
+/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
+ * used, the VLAN ID will be registered again during the connection setup.
+ */
+static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
+ struct smcd_dev *ismdev,
+ unsigned short vlan_id)
+{
+ if (!is_smcd)
+ return 0;
+ if (vlan_id && smc_ism_put_vlan(ismdev, vlan_id))
+ return SMC_CLC_DECL_CNFERR;
+ return 0;
+}
+
/* CLC handshake during connect */
-static int smc_connect_clc(struct smc_sock *smc,
+static int smc_connect_clc(struct smc_sock *smc, int smc_type,
struct smc_clc_msg_accept_confirm *aclc,
- struct smc_ib_device *ibdev, u8 ibport)
+ struct smc_ib_device *ibdev, u8 ibport,
+ u8 gid[], struct smcd_dev *ismdev)
{
int rc = 0;
/* do inband token exchange */
- rc = smc_clc_send_proposal(smc, ibdev, ibport);
+ rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, gid, ismdev);
if (rc)
return rc;
/* receive SMC Accept CLC message */
@@ -488,8 +546,8 @@ static int smc_connect_rdma(struct smc_sock *smc,
int reason_code = 0;
mutex_lock(&smc_create_lgr_pending);
- local_contact = smc_conn_create(smc, ibdev, ibport, &aclc->lcl,
- aclc->hdr.flag);
+ local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
+ ibport, &aclc->lcl, NULL, 0);
if (local_contact < 0) {
if (local_contact == -ENOMEM)
reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
@@ -504,14 +562,14 @@ static int smc_connect_rdma(struct smc_sock *smc,
smc_conn_save_peer_info(smc, aclc);
/* create send buffer and rmb */
- if (smc_buf_create(smc))
+ if (smc_buf_create(smc, false))
return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact);
if (local_contact == SMC_FIRST_CONTACT)
smc_link_save_peer_info(link, aclc);
if (smc_rmb_rtoken_handling(&smc->conn, aclc))
- return smc_connect_abort(smc, SMC_CLC_DECL_INTERR,
+ return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK,
local_contact);
smc_close_init(smc);
@@ -519,12 +577,12 @@ static int smc_connect_rdma(struct smc_sock *smc,
if (local_contact == SMC_FIRST_CONTACT) {
if (smc_ib_ready_link(link))
- return smc_connect_abort(smc, SMC_CLC_DECL_INTERR,
+ return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
local_contact);
} else {
if (!smc->conn.rmb_desc->reused &&
smc_reg_rmb(link, smc->conn.rmb_desc, true))
- return smc_connect_abort(smc, SMC_CLC_DECL_INTERR,
+ return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
local_contact);
}
smc_rmb_sync_sg_for_device(&smc->conn);
@@ -551,41 +609,113 @@ static int smc_connect_rdma(struct smc_sock *smc,
return 0;
}
+/* setup for ISM connection of client */
+static int smc_connect_ism(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm *aclc,
+ struct smcd_dev *ismdev)
+{
+ int local_contact = SMC_FIRST_CONTACT;
+ int rc = 0;
+
+ mutex_lock(&smc_create_lgr_pending);
+ local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0,
+ NULL, ismdev, aclc->gid);
+ if (local_contact < 0)
+ return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0);
+
+ /* Create send and receive buffers */
+ if (smc_buf_create(smc, true))
+ return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact);
+
+ smc_conn_save_peer_info(smc, aclc);
+ smc_close_init(smc);
+ smc_rx_init(smc);
+ smc_tx_init(smc);
+
+ rc = smc_clc_send_confirm(smc);
+ if (rc)
+ return smc_connect_abort(smc, rc, local_contact);
+ mutex_unlock(&smc_create_lgr_pending);
+
+ smc_copy_sock_settings_to_clc(smc);
+ if (smc->sk.sk_state == SMC_INIT)
+ smc->sk.sk_state = SMC_ACTIVE;
+
+ return 0;
+}
+
/* perform steps before actually connecting */
static int __smc_connect(struct smc_sock *smc)
{
+ bool ism_supported = false, rdma_supported = false;
struct smc_clc_msg_accept_confirm aclc;
struct smc_ib_device *ibdev;
+ struct smcd_dev *ismdev;
+ u8 gid[SMC_GID_SIZE];
+ unsigned short vlan;
+ int smc_type;
int rc = 0;
u8 ibport;
sock_hold(&smc->sk); /* sock put in passive closing */
if (smc->use_fallback)
- return smc_connect_fallback(smc);
+ return smc_connect_fallback(smc, smc->fallback_rsn);
/* if peer has not signalled SMC-capability, fall back */
if (!tcp_sk(smc->clcsock->sk)->syn_smc)
- return smc_connect_fallback(smc);
+ return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
/* IPSec connections opt out of SMC-R optimizations */
if (using_ipsec(smc))
return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
- /* check if a RDMA device is available; if not, fall back */
- if (smc_check_rdma(smc, &ibdev, &ibport))
+ /* check for VLAN ID */
+ if (smc_vlan_by_tcpsk(smc->clcsock, &vlan))
return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR);
+ /* check if there is an ism device available */
+ if (!smc_check_ism(smc, &ismdev) &&
+ !smc_connect_ism_vlan_setup(smc, ismdev, vlan)) {
+ /* ISM is supported for this connection */
+ ism_supported = true;
+ smc_type = SMC_TYPE_D;
+ }
+
+ /* check if there is a rdma device available */
+ if (!smc_check_rdma(smc, &ibdev, &ibport, vlan, gid)) {
+ /* RDMA is supported for this connection */
+ rdma_supported = true;
+ if (ism_supported)
+ smc_type = SMC_TYPE_B; /* both */
+ else
+ smc_type = SMC_TYPE_R; /* only RDMA */
+ }
+
+ /* if neither ISM nor RDMA are supported, fallback */
+ if (!rdma_supported && !ism_supported)
+ return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV);
+
/* perform CLC handshake */
- rc = smc_connect_clc(smc, &aclc, ibdev, ibport);
- if (rc)
+ rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, gid, ismdev);
+ if (rc) {
+ smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan);
return smc_connect_decline_fallback(smc, rc);
+ }
- /* connect using rdma */
- rc = smc_connect_rdma(smc, &aclc, ibdev, ibport);
- if (rc)
+ /* depending on previous steps, connect using rdma or ism */
+ if (rdma_supported && aclc.hdr.path == SMC_TYPE_R)
+ rc = smc_connect_rdma(smc, &aclc, ibdev, ibport);
+ else if (ism_supported && aclc.hdr.path == SMC_TYPE_D)
+ rc = smc_connect_ism(smc, &aclc, ismdev);
+ else
+ rc = SMC_CLC_DECL_MODEUNSUPP;
+ if (rc) {
+ smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan);
return smc_connect_decline_fallback(smc, rc);
+ }
+ smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan);
return 0;
}
@@ -817,15 +947,12 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
link = &lgr->lnk[SMC_SINGLE_LINK];
if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
- return SMC_CLC_DECL_INTERR;
+ return SMC_CLC_DECL_ERR_REGRMB;
/* send CONFIRM LINK request to client over the RoCE fabric */
- rc = smc_llc_send_confirm_link(link,
- link->smcibdev->mac[link->ibport - 1],
- &link->smcibdev->gid[link->ibport - 1],
- SMC_LLC_REQ);
+ rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
if (rc < 0)
- return SMC_CLC_DECL_TCL;
+ return SMC_CLC_DECL_TIMEOUT_CL;
/* receive CONFIRM LINK response from client over the RoCE fabric */
rest = wait_for_completion_interruptible_timeout(
@@ -845,10 +972,9 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
/* send ADD LINK request to client over the RoCE fabric */
rc = smc_llc_send_add_link(link,
link->smcibdev->mac[link->ibport - 1],
- &link->smcibdev->gid[link->ibport - 1],
- SMC_LLC_REQ);
+ link->gid, SMC_LLC_REQ);
if (rc < 0)
- return SMC_CLC_DECL_TCL;
+ return SMC_CLC_DECL_TIMEOUT_AL;
/* receive ADD LINK response from client over the RoCE fabric */
rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
@@ -923,7 +1049,8 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
}
smc_conn_free(&new_smc->conn);
new_smc->use_fallback = true;
- if (reason_code && reason_code != SMC_CLC_DECL_REPLY) {
+ new_smc->fallback_rsn = reason_code;
+ if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
if (smc_clc_send_decline(new_smc, reason_code) < 0) {
smc_listen_out_err(new_smc);
return;
@@ -953,7 +1080,8 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
int *local_contact)
{
/* allocate connection / link group */
- *local_contact = smc_conn_create(new_smc, ibdev, ibport, &pclc->lcl, 0);
+ *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport,
+ &pclc->lcl, NULL, 0);
if (*local_contact < 0) {
if (*local_contact == -ENOMEM)
return SMC_CLC_DECL_MEM;/* insufficient memory*/
@@ -961,12 +1089,50 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
}
/* create send buffer and rmb */
- if (smc_buf_create(new_smc))
+ if (smc_buf_create(new_smc, false))
return SMC_CLC_DECL_MEM;
return 0;
}
+/* listen worker: initialize connection and buffers for SMC-D */
+static int smc_listen_ism_init(struct smc_sock *new_smc,
+ struct smc_clc_msg_proposal *pclc,
+ struct smcd_dev *ismdev,
+ int *local_contact)
+{
+ struct smc_clc_msg_smcd *pclc_smcd;
+
+ pclc_smcd = smc_get_clc_msg_smcd(pclc);
+ *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL,
+ ismdev, pclc_smcd->gid);
+ if (*local_contact < 0) {
+ if (*local_contact == -ENOMEM)
+ return SMC_CLC_DECL_MEM;/* insufficient memory*/
+ return SMC_CLC_DECL_INTERR; /* other error */
+ }
+
+ /* Check if peer can be reached via ISM device */
+ if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
+ new_smc->conn.lgr->vlan_id,
+ new_smc->conn.lgr->smcd)) {
+ if (*local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(new_smc->conn.lgr);
+ smc_conn_free(&new_smc->conn);
+ return SMC_CLC_DECL_CNFERR;
+ }
+
+ /* Create send and receive buffers */
+ if (smc_buf_create(new_smc, true)) {
+ if (*local_contact == SMC_FIRST_CONTACT)
+ smc_lgr_forget(new_smc->conn.lgr);
+ smc_conn_free(&new_smc->conn);
+ return SMC_CLC_DECL_MEM;
+ }
+
+ return 0;
+}
+
/* listen worker: register buffers */
static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
{
@@ -975,7 +1141,7 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
if (local_contact != SMC_FIRST_CONTACT) {
if (!new_smc->conn.rmb_desc->reused) {
if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
- return SMC_CLC_DECL_INTERR;
+ return SMC_CLC_DECL_ERR_REGRMB;
}
}
smc_rmb_sync_sg_for_device(&new_smc->conn);
@@ -995,13 +1161,13 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc,
smc_link_save_peer_info(link, cclc);
if (smc_rmb_rtoken_handling(&new_smc->conn, cclc)) {
- reason_code = SMC_CLC_DECL_INTERR;
+ reason_code = SMC_CLC_DECL_ERR_RTOK;
goto decline;
}
if (local_contact == SMC_FIRST_CONTACT) {
if (smc_ib_ready_link(link)) {
- reason_code = SMC_CLC_DECL_INTERR;
+ reason_code = SMC_CLC_DECL_ERR_RDYLNK;
goto decline;
}
/* QP confirmation over RoCE fabric */
@@ -1025,8 +1191,11 @@ static void smc_listen_work(struct work_struct *work)
struct smc_clc_msg_accept_confirm cclc;
struct smc_clc_msg_proposal *pclc;
struct smc_ib_device *ibdev;
+ bool ism_supported = false;
+ struct smcd_dev *ismdev;
u8 buf[SMC_CLC_MAX_LEN];
int local_contact = 0;
+ unsigned short vlan;
int reason_code = 0;
int rc = 0;
u8 ibport;
@@ -1039,6 +1208,7 @@ static void smc_listen_work(struct work_struct *work)
/* check if peer is smc capable */
if (!tcp_sk(newclcsock->sk)->syn_smc) {
new_smc->use_fallback = true;
+ new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
smc_listen_out_connected(new_smc);
return;
}
@@ -1065,15 +1235,26 @@ static void smc_listen_work(struct work_struct *work)
smc_rx_init(new_smc);
smc_tx_init(new_smc);
+ /* check if ISM is available */
+ if ((pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) &&
+ !smc_check_ism(new_smc, &ismdev) &&
+ !smc_listen_ism_init(new_smc, pclc, ismdev, &local_contact)) {
+ ism_supported = true;
+ }
+
/* check if RDMA is available */
- if (smc_check_rdma(new_smc, &ibdev, &ibport) ||
- smc_listen_rdma_check(new_smc, pclc) ||
- smc_listen_rdma_init(new_smc, pclc, ibdev, ibport,
- &local_contact) ||
- smc_listen_rdma_reg(new_smc, local_contact)) {
+ if (!ism_supported &&
+ ((pclc->hdr.path != SMC_TYPE_R && pclc->hdr.path != SMC_TYPE_B) ||
+ smc_vlan_by_tcpsk(new_smc->clcsock, &vlan) ||
+ smc_check_rdma(new_smc, &ibdev, &ibport, vlan, NULL) ||
+ smc_listen_rdma_check(new_smc, pclc) ||
+ smc_listen_rdma_init(new_smc, pclc, ibdev, ibport,
+ &local_contact) ||
+ smc_listen_rdma_reg(new_smc, local_contact))) {
/* SMC not supported, decline */
mutex_unlock(&smc_create_lgr_pending);
- smc_listen_decline(new_smc, SMC_CLC_DECL_CNFERR, local_contact);
+ smc_listen_decline(new_smc, SMC_CLC_DECL_MODEUNSUPP,
+ local_contact);
return;
}
@@ -1095,7 +1276,8 @@ static void smc_listen_work(struct work_struct *work)
}
/* finish worker */
- smc_listen_rdma_finish(new_smc, &cclc, local_contact);
+ if (!ism_supported)
+ smc_listen_rdma_finish(new_smc, &cclc, local_contact);
smc_conn_save_peer_info(new_smc, &cclc);
mutex_unlock(&smc_create_lgr_pending);
smc_listen_out_connected(new_smc);
@@ -1119,6 +1301,7 @@ static void smc_tcp_listen_work(struct work_struct *work)
new_smc->listen_smc = lsmc;
new_smc->use_fallback = lsmc->use_fallback;
+ new_smc->fallback_rsn = lsmc->fallback_rsn;
sock_hold(lsk); /* sock_put in smc_listen_work */
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
smc_copy_sock_settings_to_smc(new_smc);
@@ -1275,6 +1458,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_FASTOPEN) {
if (sk->sk_state == SMC_INIT) {
smc->use_fallback = true;
+ smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
} else {
rc = -EINVAL;
goto out;
@@ -1353,7 +1537,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
mask |= EPOLLERR;
} else {
if (sk->sk_state != SMC_CLOSED)
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
if (sk->sk_err)
mask |= EPOLLERR;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
@@ -1471,6 +1655,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
/* option not supported by SMC */
if (sk->sk_state == SMC_INIT) {
smc->use_fallback = true;
+ smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
} else {
if (!smc->use_fallback)
rc = -EINVAL;
@@ -1578,12 +1763,8 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
smc->sk.sk_state == SMC_CLOSED) {
answ = 0;
} else {
- smc_curs_write(&cons,
- smc_curs_read(&conn->local_tx_ctrl.cons, conn),
- conn);
- smc_curs_write(&urg,
- smc_curs_read(&conn->urg_curs, conn),
- conn);
+ smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
+ smc_curs_copy(&urg, &conn->urg_curs, conn);
answ = smc_curs_diff(conn->rmb_desc->len,
&cons, &urg) == 1;
}
@@ -1716,6 +1897,7 @@ static int smc_create(struct net *net, struct socket *sock, int protocol,
/* create internal TCP socket for CLC handshake and fallback */
smc = smc_sk(sk);
smc->use_fallback = false; /* assume rdma capability first */
+ smc->fallback_rsn = 0;
rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
&smc->clcsock);
if (rc) {
diff --git a/net/smc/smc.h b/net/smc/smc.h
index d7ca26570482..08786ace6010 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -21,8 +21,6 @@
#define SMCPROTO_SMC 0 /* SMC protocol, IPv4 */
#define SMCPROTO_SMC6 1 /* SMC protocol, IPv6 */
-#define SMC_MAX_PORTS 2 /* Max # of ports */
-
extern struct proto smc_proto;
extern struct proto smc_proto6;
@@ -185,6 +183,11 @@ struct smc_connection {
spinlock_t acurs_lock; /* protect cursors */
#endif
struct work_struct close_work; /* peer sent some closing */
+ struct tasklet_struct rx_tsklet; /* Receiver tasklet for SMC-D */
+ u8 rx_off; /* receive offset:
+ * 0 for SMC-R, 32 for SMC-D
+ */
+ u64 peer_token; /* SMC-D token of peer */
};
struct smc_connect_info {
@@ -205,6 +208,8 @@ struct smc_sock { /* smc sock container */
struct list_head accept_q; /* sockets to be accepted */
spinlock_t accept_q_lock; /* protects accept_q */
bool use_fallback; /* fallback to tcp */
+ int fallback_rsn; /* reason for fallback */
+ u32 peer_diagnosis; /* decline reason from peer */
int sockopt_defer_accept;
/* sockopt TCP_DEFER_ACCEPT
* value
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 9bde1e4ca288..ed5dcf03fe0b 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -34,14 +34,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
enum ib_wc_status wc_status)
{
struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
+ struct smc_connection *conn = cdcpend->conn;
struct smc_sock *smc;
int diff;
- if (!cdcpend->conn)
+ if (!conn)
/* already dismissed */
return;
- smc = container_of(cdcpend->conn, struct smc_sock, conn);
+ smc = container_of(conn, struct smc_sock, conn);
bh_lock_sock(&smc->sk);
if (!wc_status) {
diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
@@ -52,9 +53,7 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
atomic_add(diff, &cdcpend->conn->sndbuf_space);
/* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
smp_mb__after_atomic();
- smc_curs_write(&cdcpend->conn->tx_curs_fin,
- smc_curs_read(&cdcpend->cursor, cdcpend->conn),
- cdcpend->conn);
+ smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn);
}
smc_tx_sndbuf_nonfull(smc);
bh_unlock_sock(&smc->sk);
@@ -110,14 +109,13 @@ int smc_cdc_msg_send(struct smc_connection *conn,
&conn->local_tx_ctrl, conn);
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
if (!rc)
- smc_curs_write(&conn->rx_curs_confirmed,
- smc_curs_read(&conn->local_tx_ctrl.cons, conn),
- conn);
+ smc_curs_copy(&conn->rx_curs_confirmed,
+ &conn->local_tx_ctrl.cons, conn);
return rc;
}
-int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
+static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
{
struct smc_cdc_tx_pend *pend;
struct smc_wr_buf *wr_buf;
@@ -130,6 +128,21 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
return smc_cdc_msg_send(conn, wr_buf, pend);
}
+int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
+{
+ int rc;
+
+ if (conn->lgr->is_smcd) {
+ spin_lock_bh(&conn->send_lock);
+ rc = smcd_cdc_msg_send(conn);
+ spin_unlock_bh(&conn->send_lock);
+ } else {
+ rc = smcr_cdc_get_slot_and_msg_send(conn);
+ }
+
+ return rc;
+}
+
static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
unsigned long data)
{
@@ -157,6 +170,44 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
(unsigned long)conn);
}
+/* Send a SMC-D CDC header.
+ * This increments the free space available in our send buffer.
+ * Also update the confirmed receive buffer with what was sent to the peer.
+ */
+int smcd_cdc_msg_send(struct smc_connection *conn)
+{
+ struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+ struct smcd_cdc_msg cdc;
+ int rc, diff;
+
+ memset(&cdc, 0, sizeof(cdc));
+ cdc.common.type = SMC_CDC_MSG_TYPE;
+ cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap;
+ cdc.prod_count = conn->local_tx_ctrl.prod.count;
+
+ cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap;
+ cdc.cons_count = conn->local_tx_ctrl.cons.count;
+ cdc.prod_flags = conn->local_tx_ctrl.prod_flags;
+ cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
+ rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
+ if (rc)
+ return rc;
+ smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons,
+ conn);
+ /* Calculate transmitted data and increment free send buffer space */
+ diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
+ &conn->tx_curs_sent);
+ /* increased by confirmed number of bytes */
+ smp_mb__before_atomic();
+ atomic_add(diff, &conn->sndbuf_space);
+ /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
+ smp_mb__after_atomic();
+ smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
+
+ smc_tx_sndbuf_nonfull(smc);
+ return rc;
+}
+
/********************************* receive ***********************************/
static inline bool smc_cdc_before(u16 seq1, u16 seq2)
@@ -171,14 +222,12 @@ static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
char *base;
/* new data included urgent business */
- smc_curs_write(&conn->urg_curs,
- smc_curs_read(&conn->local_rx_ctrl.prod, conn),
- conn);
+ smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn);
conn->urg_state = SMC_URG_VALID;
if (!sock_flag(&smc->sk, SOCK_URGINLINE))
/* we'll skip the urgent byte, so don't account for it */
(*diff_prod)--;
- base = (char *)conn->rmb_desc->cpu_addr;
+ base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
if (conn->urg_curs.count)
conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
else
@@ -193,12 +242,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
struct smc_connection *conn = &smc->conn;
int diff_cons, diff_prod;
- smc_curs_write(&prod_old,
- smc_curs_read(&conn->local_rx_ctrl.prod, conn),
- conn);
- smc_curs_write(&cons_old,
- smc_curs_read(&conn->local_rx_ctrl.cons, conn),
- conn);
+ smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
+ smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
@@ -277,6 +322,34 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
sock_put(&smc->sk); /* no free sk in softirq-context */
}
+/* Schedule a tasklet for this connection. Triggered from the ISM device IRQ
+ * handler to indicate update in the DMBE.
+ *
+ * Context:
+ * - tasklet context
+ */
+static void smcd_cdc_rx_tsklet(unsigned long data)
+{
+ struct smc_connection *conn = (struct smc_connection *)data;
+ struct smcd_cdc_msg cdc;
+ struct smc_sock *smc;
+
+ if (!conn)
+ return;
+
+ memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc));
+ smc = container_of(conn, struct smc_sock, conn);
+ smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
+}
+
+/* Initialize receive tasklet. Called from ISM device IRQ handler to start
+ * receiver side.
+ */
+void smcd_cdc_rx_init(struct smc_connection *conn)
+{
+ tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn);
+}
+
/***************************** init, exit, misc ******************************/
static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
@@ -293,7 +366,7 @@ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
return; /* invalid message */
/* lookup connection */
- lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
+ lgr = smc_get_lgr(link);
read_lock_bh(&lgr->conns_lock);
conn = smc_lgr_find_conn(ntohl(cdc->token), lgr);
read_unlock_bh(&lgr->conns_lock);
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index f60082fee5b8..934df4473a7c 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -50,6 +50,20 @@ struct smc_cdc_msg {
u8 reserved[18];
} __packed; /* format defined in RFC7609 */
+/* CDC message for SMC-D */
+struct smcd_cdc_msg {
+ struct smc_wr_rx_hdr common; /* Type = 0xFE */
+ u8 res1[7];
+ u16 prod_wrap;
+ u32 prod_count;
+ u8 res2[2];
+ u16 cons_wrap;
+ u32 cons_count;
+ struct smc_cdc_producer_flags prod_flags;
+ struct smc_cdc_conn_state_flags conn_state_flags;
+ u8 res3[8];
+} __packed;
+
static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
{
return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort ||
@@ -90,47 +104,34 @@ static inline u64 smc_curs_read(union smc_host_cursor *curs,
#endif
}
-static inline u64 smc_curs_read_net(union smc_cdc_cursor *curs,
- struct smc_connection *conn)
-{
-#ifndef KERNEL_HAS_ATOMIC64
- unsigned long flags;
- u64 ret;
-
- spin_lock_irqsave(&conn->acurs_lock, flags);
- ret = curs->acurs;
- spin_unlock_irqrestore(&conn->acurs_lock, flags);
- return ret;
-#else
- return atomic64_read(&curs->acurs);
-#endif
-}
-
-static inline void smc_curs_write(union smc_host_cursor *curs, u64 val,
- struct smc_connection *conn)
+/* Copy cursor src into tgt */
+static inline void smc_curs_copy(union smc_host_cursor *tgt,
+ union smc_host_cursor *src,
+ struct smc_connection *conn)
{
#ifndef KERNEL_HAS_ATOMIC64
unsigned long flags;
spin_lock_irqsave(&conn->acurs_lock, flags);
- curs->acurs = val;
+ tgt->acurs = src->acurs;
spin_unlock_irqrestore(&conn->acurs_lock, flags);
#else
- atomic64_set(&curs->acurs, val);
+ atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
#endif
}
-static inline void smc_curs_write_net(union smc_cdc_cursor *curs, u64 val,
- struct smc_connection *conn)
+static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
+ union smc_cdc_cursor *src,
+ struct smc_connection *conn)
{
#ifndef KERNEL_HAS_ATOMIC64
unsigned long flags;
spin_lock_irqsave(&conn->acurs_lock, flags);
- curs->acurs = val;
+ tgt->acurs = src->acurs;
spin_unlock_irqrestore(&conn->acurs_lock, flags);
#else
- atomic64_set(&curs->acurs, val);
+ atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
#endif
}
@@ -165,7 +166,7 @@ static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
{
union smc_host_cursor temp;
- smc_curs_write(&temp, smc_curs_read(local, conn), conn);
+ smc_curs_copy(&temp, local, conn);
peer->count = htonl(temp.count);
peer->wrap = htons(temp.wrap);
/* peer->reserved = htons(0); must be ensured by caller */
@@ -192,8 +193,8 @@ static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local,
union smc_host_cursor temp, old;
union smc_cdc_cursor net;
- smc_curs_write(&old, smc_curs_read(local, conn), conn);
- smc_curs_write_net(&net, smc_curs_read_net(peer, conn), conn);
+ smc_curs_copy(&old, local, conn);
+ smc_curs_copy_net(&net, peer, conn);
temp.count = ntohl(net.count);
temp.wrap = ntohs(net.wrap);
if ((old.wrap > temp.wrap) && temp.wrap)
@@ -201,12 +202,12 @@ static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local,
if ((old.wrap == temp.wrap) &&
(old.count > temp.count))
return;
- smc_curs_write(local, smc_curs_read(&temp, conn), conn);
+ smc_curs_copy(local, &temp, conn);
}
-static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
- struct smc_cdc_msg *peer,
- struct smc_connection *conn)
+static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
+ struct smc_cdc_msg *peer,
+ struct smc_connection *conn)
{
local->common.type = peer->common.type;
local->len = peer->len;
@@ -218,6 +219,27 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
local->conn_state_flags = peer->conn_state_flags;
}
+static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
+ struct smcd_cdc_msg *peer)
+{
+ local->prod.wrap = peer->prod_wrap;
+ local->prod.count = peer->prod_count;
+ local->cons.wrap = peer->cons_wrap;
+ local->cons.count = peer->cons_count;
+ local->prod_flags = peer->prod_flags;
+ local->conn_state_flags = peer->conn_state_flags;
+}
+
+static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
+ struct smc_cdc_msg *peer,
+ struct smc_connection *conn)
+{
+ if (conn->lgr->is_smcd)
+ smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer);
+ else
+ smcr_cdc_msg_to_host(local, peer, conn);
+}
+
struct smc_cdc_tx_pend;
int smc_cdc_get_free_slot(struct smc_connection *conn,
@@ -227,6 +249,8 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
struct smc_cdc_tx_pend *pend);
int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
+int smcd_cdc_msg_send(struct smc_connection *conn);
int smc_cdc_init(void) __init;
+void smcd_cdc_rx_init(struct smc_connection *conn);
#endif /* SMC_CDC_H */
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index ae5d168653ce..83aba9ade060 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -23,9 +23,15 @@
#include "smc_core.h"
#include "smc_clc.h"
#include "smc_ib.h"
+#include "smc_ism.h"
+
+#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
+#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
/* eye catcher "SMCR" EBCDIC for CLC messages */
static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
+/* eye catcher "SMCD" EBCDIC for CLC messages */
+static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
/* check if received message has a correct header length and contains valid
* heading and trailing eyecatchers
@@ -38,10 +44,14 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
struct smc_clc_msg_decline *dclc;
struct smc_clc_msg_trail *trl;
- if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+ if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
+ memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
return false;
switch (clcm->type) {
case SMC_CLC_PROPOSAL:
+ if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
+ clcm->path != SMC_TYPE_B)
+ return false;
pclc = (struct smc_clc_msg_proposal *)clcm;
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
if (ntohs(pclc->hdr.length) !=
@@ -56,10 +66,16 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
break;
case SMC_CLC_ACCEPT:
case SMC_CLC_CONFIRM:
+ if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D)
+ return false;
clc = (struct smc_clc_msg_accept_confirm *)clcm;
- if (ntohs(clc->hdr.length) != sizeof(*clc))
+ if ((clcm->path == SMC_TYPE_R &&
+ ntohs(clc->hdr.length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) ||
+ (clcm->path == SMC_TYPE_D &&
+ ntohs(clc->hdr.length) != SMCD_CLC_ACCEPT_CONFIRM_LEN))
return false;
- trl = &clc->trl;
+ trl = (struct smc_clc_msg_trail *)
+ ((u8 *)clc + ntohs(clc->hdr.length) - sizeof(*trl));
break;
case SMC_CLC_DECLINE:
dclc = (struct smc_clc_msg_decline *)clcm;
@@ -70,7 +86,8 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
default:
return false;
}
- if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+ if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
+ memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
return false;
return true;
}
@@ -296,6 +313,9 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
datlen = ntohs(clcm->length);
if ((len < sizeof(struct smc_clc_msg_hdr)) ||
(datlen > buflen) ||
+ (clcm->version != SMC_CLC_V1) ||
+ (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
+ clcm->path != SMC_TYPE_B) ||
((clcm->type != SMC_CLC_DECLINE) &&
(clcm->type != expected_type))) {
smc->sk.sk_err = EPROTO;
@@ -314,7 +334,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
goto out;
}
if (clcm->type == SMC_CLC_DECLINE) {
- reason_code = SMC_CLC_DECL_REPLY;
+ struct smc_clc_msg_decline *dclc;
+
+ dclc = (struct smc_clc_msg_decline *)clcm;
+ reason_code = SMC_CLC_DECL_PEERDECL;
+ smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
smc->conn.lgr->sync_err = 1;
smc_lgr_terminate(smc->conn.lgr);
@@ -357,17 +381,18 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
}
/* send CLC PROPOSAL message across internal TCP socket */
-int smc_clc_send_proposal(struct smc_sock *smc,
- struct smc_ib_device *smcibdev,
- u8 ibport)
+int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
+ struct smc_ib_device *ibdev, u8 ibport, u8 gid[],
+ struct smcd_dev *ismdev)
{
struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX];
struct smc_clc_msg_proposal_prefix pclc_prfx;
+ struct smc_clc_msg_smcd pclc_smcd;
struct smc_clc_msg_proposal pclc;
struct smc_clc_msg_trail trl;
int len, i, plen, rc;
int reason_code = 0;
- struct kvec vec[4];
+ struct kvec vec[5];
struct msghdr msg;
/* retrieve ip prefixes for CLC proposal msg */
@@ -382,18 +407,34 @@ int smc_clc_send_proposal(struct smc_sock *smc,
memset(&pclc, 0, sizeof(pclc));
memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
pclc.hdr.type = SMC_CLC_PROPOSAL;
- pclc.hdr.length = htons(plen);
pclc.hdr.version = SMC_CLC_V1; /* SMC version */
- memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
- memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE);
- memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN);
- pclc.iparea_offset = htons(0);
+ pclc.hdr.path = smc_type;
+ if (smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B) {
+ /* add SMC-R specifics */
+ memcpy(pclc.lcl.id_for_peer, local_systemid,
+ sizeof(local_systemid));
+ memcpy(&pclc.lcl.gid, gid, SMC_GID_SIZE);
+ memcpy(&pclc.lcl.mac, &ibdev->mac[ibport - 1], ETH_ALEN);
+ pclc.iparea_offset = htons(0);
+ }
+ if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
+ /* add SMC-D specifics */
+ memset(&pclc_smcd, 0, sizeof(pclc_smcd));
+ plen += sizeof(pclc_smcd);
+ pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET);
+ pclc_smcd.gid = ismdev->local_gid;
+ }
+ pclc.hdr.length = htons(plen);
memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
memset(&msg, 0, sizeof(msg));
i = 0;
vec[i].iov_base = &pclc;
vec[i++].iov_len = sizeof(pclc);
+ if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
+ vec[i].iov_base = &pclc_smcd;
+ vec[i++].iov_len = sizeof(pclc_smcd);
+ }
vec[i].iov_base = &pclc_prfx;
vec[i++].iov_len = sizeof(pclc_prfx);
if (pclc_prfx.ipv6_prefixes_cnt > 0) {
@@ -429,35 +470,55 @@ int smc_clc_send_confirm(struct smc_sock *smc)
struct kvec vec;
int len;
- link = &conn->lgr->lnk[SMC_SINGLE_LINK];
/* send SMC Confirm CLC msg */
memset(&cclc, 0, sizeof(cclc));
- memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
cclc.hdr.type = SMC_CLC_CONFIRM;
- cclc.hdr.length = htons(sizeof(cclc));
cclc.hdr.version = SMC_CLC_V1; /* SMC version */
- memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
- memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
- SMC_GID_SIZE);
- memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
- hton24(cclc.qpn, link->roce_qp->qp_num);
- cclc.rmb_rkey =
- htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
- cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
- cclc.rmbe_alert_token = htonl(conn->alert_token_local);
- cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
- cclc.rmbe_size = conn->rmbe_size_short;
- cclc.rmb_dma_addr = cpu_to_be64(
- (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
- hton24(cclc.psn, link->psn_initial);
-
- memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+ if (smc->conn.lgr->is_smcd) {
+ /* SMC-D specific settings */
+ memcpy(cclc.hdr.eyecatcher, SMCD_EYECATCHER,
+ sizeof(SMCD_EYECATCHER));
+ cclc.hdr.path = SMC_TYPE_D;
+ cclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
+ cclc.gid = conn->lgr->smcd->local_gid;
+ cclc.token = conn->rmb_desc->token;
+ cclc.dmbe_size = conn->rmbe_size_short;
+ cclc.dmbe_idx = 0;
+ memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
+ memcpy(cclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
+ sizeof(SMCD_EYECATCHER));
+ } else {
+ /* SMC-R specific settings */
+ link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+ memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER,
+ sizeof(SMC_EYECATCHER));
+ cclc.hdr.path = SMC_TYPE_R;
+ cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
+ memcpy(cclc.lcl.id_for_peer, local_systemid,
+ sizeof(local_systemid));
+ memcpy(&cclc.lcl.gid, link->gid, SMC_GID_SIZE);
+ memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
+ ETH_ALEN);
+ hton24(cclc.qpn, link->roce_qp->qp_num);
+ cclc.rmb_rkey =
+ htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
+ cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
+ cclc.rmbe_alert_token = htonl(conn->alert_token_local);
+ cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
+ cclc.rmbe_size = conn->rmbe_size_short;
+ cclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
+ (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
+ hton24(cclc.psn, link->psn_initial);
+ memcpy(cclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
+ sizeof(SMC_EYECATCHER));
+ }
memset(&msg, 0, sizeof(msg));
vec.iov_base = &cclc;
- vec.iov_len = sizeof(cclc);
- len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(cclc));
- if (len < sizeof(cclc)) {
+ vec.iov_len = ntohs(cclc.hdr.length);
+ len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
+ ntohs(cclc.hdr.length));
+ if (len < ntohs(cclc.hdr.length)) {
if (len >= 0) {
reason_code = -ENETUNREACH;
smc->sk.sk_err = -reason_code;
@@ -480,35 +541,57 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
int rc = 0;
int len;
- link = &conn->lgr->lnk[SMC_SINGLE_LINK];
memset(&aclc, 0, sizeof(aclc));
- memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
aclc.hdr.type = SMC_CLC_ACCEPT;
- aclc.hdr.length = htons(sizeof(aclc));
aclc.hdr.version = SMC_CLC_V1; /* SMC version */
if (srv_first_contact)
aclc.hdr.flag = 1;
- memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
- memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
- SMC_GID_SIZE);
- memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
- hton24(aclc.qpn, link->roce_qp->qp_num);
- aclc.rmb_rkey =
- htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
- aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */
- aclc.rmbe_alert_token = htonl(conn->alert_token_local);
- aclc.qp_mtu = link->path_mtu;
- aclc.rmbe_size = conn->rmbe_size_short,
- aclc.rmb_dma_addr = cpu_to_be64(
- (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
- hton24(aclc.psn, link->psn_initial);
- memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+
+ if (new_smc->conn.lgr->is_smcd) {
+ /* SMC-D specific settings */
+ aclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
+ memcpy(aclc.hdr.eyecatcher, SMCD_EYECATCHER,
+ sizeof(SMCD_EYECATCHER));
+ aclc.hdr.path = SMC_TYPE_D;
+ aclc.gid = conn->lgr->smcd->local_gid;
+ aclc.token = conn->rmb_desc->token;
+ aclc.dmbe_size = conn->rmbe_size_short;
+ aclc.dmbe_idx = 0;
+ memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
+ memcpy(aclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
+ sizeof(SMCD_EYECATCHER));
+ } else {
+ /* SMC-R specific settings */
+ aclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
+ memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER,
+ sizeof(SMC_EYECATCHER));
+ aclc.hdr.path = SMC_TYPE_R;
+ link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+ memcpy(aclc.lcl.id_for_peer, local_systemid,
+ sizeof(local_systemid));
+ memcpy(&aclc.lcl.gid, link->gid, SMC_GID_SIZE);
+ memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1],
+ ETH_ALEN);
+ hton24(aclc.qpn, link->roce_qp->qp_num);
+ aclc.rmb_rkey =
+ htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
+ aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */
+ aclc.rmbe_alert_token = htonl(conn->alert_token_local);
+ aclc.qp_mtu = link->path_mtu;
+ aclc.rmbe_size = conn->rmbe_size_short,
+ aclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
+ (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
+ hton24(aclc.psn, link->psn_initial);
+ memcpy(aclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
+ sizeof(SMC_EYECATCHER));
+ }
memset(&msg, 0, sizeof(msg));
vec.iov_base = &aclc;
- vec.iov_len = sizeof(aclc);
- len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, sizeof(aclc));
- if (len < sizeof(aclc)) {
+ vec.iov_len = ntohs(aclc.hdr.length);
+ len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1,
+ ntohs(aclc.hdr.length));
+ if (len < ntohs(aclc.hdr.length)) {
if (len >= 0)
new_smc->sk.sk_err = EPROTO;
else
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 41ff9ea96139..18da89b681c2 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -23,17 +23,26 @@
#define SMC_CLC_DECLINE 0x04
#define SMC_CLC_V1 0x1 /* SMC version */
+#define SMC_TYPE_R 0 /* SMC-R only */
+#define SMC_TYPE_D 1 /* SMC-D only */
+#define SMC_TYPE_B 3 /* SMC-R and SMC-D */
#define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */
#define SMC_CLC_DECL_MEM 0x01010000 /* insufficient memory resources */
-#define SMC_CLC_DECL_TIMEOUT 0x02000000 /* timeout */
+#define SMC_CLC_DECL_TIMEOUT_CL 0x02010000 /* timeout w4 QP confirm link */
+#define SMC_CLC_DECL_TIMEOUT_AL 0x02020000 /* timeout w4 QP add link */
#define SMC_CLC_DECL_CNFERR 0x03000000 /* configuration error */
-#define SMC_CLC_DECL_IPSEC 0x03030000 /* IPsec usage */
+#define SMC_CLC_DECL_PEERNOSMC 0x03010000 /* peer did not indicate SMC */
+#define SMC_CLC_DECL_IPSEC 0x03020000 /* IPsec usage */
+#define SMC_CLC_DECL_NOSMCDEV 0x03030000 /* no SMC device found */
+#define SMC_CLC_DECL_MODEUNSUPP 0x03040000 /* smc modes do not match (R or D)*/
+#define SMC_CLC_DECL_RMBE_EC 0x03050000 /* peer has eyecatcher in RMBE */
+#define SMC_CLC_DECL_OPTUNSUPP 0x03060000 /* fastopen sockopt not supported */
#define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */
-#define SMC_CLC_DECL_REPLY 0x06000000 /* reply to a received decline */
+#define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */
#define SMC_CLC_DECL_INTERR 0x99990000 /* internal error */
-#define SMC_CLC_DECL_TCL 0x02040000 /* timeout w4 QP confirm */
-#define SMC_CLC_DECL_SEND 0x07000000 /* sending problem */
-#define SMC_CLC_DECL_RMBE_EC 0x08000000 /* peer has eyecatcher in RMBE */
+#define SMC_CLC_DECL_ERR_RTOK 0x99990001 /* rtoken handling failed */
+#define SMC_CLC_DECL_ERR_RDYLNK 0x99990002 /* ib ready link failed */
+#define SMC_CLC_DECL_ERR_REGRMB 0x99990003 /* reg rmb failed */
struct smc_clc_msg_hdr { /* header1 of clc messages */
u8 eyecatcher[4]; /* eye catcher */
@@ -42,9 +51,11 @@ struct smc_clc_msg_hdr { /* header1 of clc messages */
#if defined(__BIG_ENDIAN_BITFIELD)
u8 version : 4,
flag : 1,
- rsvd : 3;
+ rsvd : 1,
+ path : 2;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 rsvd : 3,
+ u8 path : 2,
+ rsvd : 1,
flag : 1,
version : 4;
#endif
@@ -77,6 +88,11 @@ struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/
u8 ipv6_prefixes_cnt; /* number of IPv6 prefixes in prefix array */
} __aligned(4);
+struct smc_clc_msg_smcd { /* SMC-D GID information */
+ u64 gid; /* ISM GID of requestor */
+ u8 res[32];
+};
+
struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
struct smc_clc_msg_hdr hdr;
struct smc_clc_msg_local lcl;
@@ -94,23 +110,45 @@ struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */
struct smc_clc_msg_hdr hdr;
- struct smc_clc_msg_local lcl;
- u8 qpn[3]; /* QP number */
- __be32 rmb_rkey; /* RMB rkey */
- u8 rmbe_idx; /* Index of RMBE in RMB */
- __be32 rmbe_alert_token;/* unique connection id */
+ union {
+ struct { /* SMC-R */
+ struct smc_clc_msg_local lcl;
+ u8 qpn[3]; /* QP number */
+ __be32 rmb_rkey; /* RMB rkey */
+ u8 rmbe_idx; /* Index of RMBE in RMB */
+ __be32 rmbe_alert_token;/* unique connection id */
#if defined(__BIG_ENDIAN_BITFIELD)
- u8 rmbe_size : 4, /* RMBE buf size (compressed notation) */
- qp_mtu : 4; /* QP mtu */
+ u8 rmbe_size : 4, /* buf size (compressed) */
+ qp_mtu : 4; /* QP mtu */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 qp_mtu : 4,
- rmbe_size : 4;
+ u8 qp_mtu : 4,
+ rmbe_size : 4;
#endif
- u8 reserved;
- __be64 rmb_dma_addr; /* RMB virtual address */
- u8 reserved2;
- u8 psn[3]; /* initial packet sequence number */
- struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
+ u8 reserved;
+ __be64 rmb_dma_addr; /* RMB virtual address */
+ u8 reserved2;
+ u8 psn[3]; /* packet sequence number */
+ struct smc_clc_msg_trail smcr_trl;
+ /* eye catcher "SMCR" EBCDIC */
+ } __packed;
+ struct { /* SMC-D */
+ u64 gid; /* Sender GID */
+ u64 token; /* DMB token */
+ u8 dmbe_idx; /* DMBE index */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 dmbe_size : 4, /* buf size (compressed) */
+ reserved3 : 4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 reserved3 : 4,
+ dmbe_size : 4;
+#endif
+ u16 reserved4;
+ u32 linkid; /* Link identifier */
+ u32 reserved5[3];
+ struct smc_clc_msg_trail smcd_trl;
+ /* eye catcher "SMCD" EBCDIC */
+ } __packed;
+ };
} __packed; /* format defined in RFC7609 */
struct smc_clc_msg_decline { /* clc decline message */
@@ -129,13 +167,26 @@ smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
}
+/* get SMC-D info from proposal message */
+static inline struct smc_clc_msg_smcd *
+smc_get_clc_msg_smcd(struct smc_clc_msg_proposal *prop)
+{
+ if (ntohs(prop->iparea_offset) != sizeof(struct smc_clc_msg_smcd))
+ return NULL;
+
+ return (struct smc_clc_msg_smcd *)(prop + 1);
+}
+
+struct smcd_dev;
+
int smc_clc_prfx_match(struct socket *clcsock,
struct smc_clc_msg_proposal_prefix *prop);
int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
u8 expected_type);
int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info);
-int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev,
- u8 ibport);
+int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
+ struct smc_ib_device *smcibdev, u8 ibport, u8 gid[],
+ struct smcd_dev *ismdev);
int smc_clc_send_confirm(struct smc_sock *smc);
int smc_clc_send_accept(struct smc_sock *smc, int srv_first_contact);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 15bad268f37d..e871368500e3 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -26,10 +26,12 @@
#include "smc_llc.h"
#include "smc_cdc.h"
#include "smc_close.h"
+#include "smc_ism.h"
#define SMC_LGR_NUM_INCR 256
#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
#define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
+#define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
static struct smc_lgr_list smc_lgr_list = { /* established link groups */
.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
@@ -47,8 +49,13 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
* otherwise there is a risk of out-of-sync link groups.
*/
mod_delayed_work(system_wq, &lgr->free_work,
- lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
- SMC_LGR_FREE_DELAY_SERV);
+ (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
+ SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV);
+}
+
+void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
+{
+ mod_delayed_work(system_wq, &lgr->free_work, SMC_LGR_FREE_DELAY_FAST);
}
/* Register connection's alert token in our lookup structure.
@@ -133,6 +140,20 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
smc_lgr_schedule_free_work(lgr);
}
+/* Send delete link, either as client to request the initiation
+ * of the DELETE LINK sequence from server; or as server to
+ * initiate the delete processing. See smc_llc_rx_delete_link().
+ */
+static int smc_link_send_delete(struct smc_link *lnk)
+{
+ if (lnk->state == SMC_LNK_ACTIVE &&
+ !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) {
+ smc_llc_link_deleting(lnk);
+ return 0;
+ }
+ return -ENOTCONN;
+}
+
static void smc_lgr_free_work(struct work_struct *work)
{
struct smc_link_group *lgr = container_of(to_delayed_work(work),
@@ -153,17 +174,30 @@ static void smc_lgr_free_work(struct work_struct *work)
list_del_init(&lgr->list); /* remove from smc_lgr_list */
free:
spin_unlock_bh(&smc_lgr_list.lock);
+
+ if (!lgr->is_smcd && !lgr->terminating) {
+ /* try to send del link msg, on error free lgr immediately */
+ if (!smc_link_send_delete(&lgr->lnk[SMC_SINGLE_LINK])) {
+ /* reschedule in case we never receive a response */
+ smc_lgr_schedule_free_work(lgr);
+ return;
+ }
+ }
+
if (!delayed_work_pending(&lgr->free_work)) {
- if (lgr->lnk[SMC_SINGLE_LINK].state != SMC_LNK_INACTIVE)
- smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
+ struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+
+ if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
+ smc_llc_link_inactive(lnk);
smc_lgr_free(lgr);
}
}
/* create a new SMC link group */
-static int smc_lgr_create(struct smc_sock *smc,
+static int smc_lgr_create(struct smc_sock *smc, bool is_smcd,
struct smc_ib_device *smcibdev, u8 ibport,
- char *peer_systemid, unsigned short vlan_id)
+ char *peer_systemid, unsigned short vlan_id,
+ struct smcd_dev *smcismdev, u64 peer_gid)
{
struct smc_link_group *lgr;
struct smc_link *lnk;
@@ -171,17 +205,23 @@ static int smc_lgr_create(struct smc_sock *smc,
int rc = 0;
int i;
+ if (is_smcd && vlan_id) {
+ rc = smc_ism_get_vlan(smcismdev, vlan_id);
+ if (rc)
+ goto out;
+ }
+
lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
if (!lgr) {
rc = -ENOMEM;
goto out;
}
- lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
+ lgr->is_smcd = is_smcd;
lgr->sync_err = 0;
- memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
lgr->vlan_id = vlan_id;
rwlock_init(&lgr->sndbufs_lock);
rwlock_init(&lgr->rmbs_lock);
+ rwlock_init(&lgr->conns_lock);
for (i = 0; i < SMC_RMBE_SIZES; i++) {
INIT_LIST_HEAD(&lgr->sndbufs[i]);
INIT_LIST_HEAD(&lgr->rmbs[i]);
@@ -190,36 +230,48 @@ static int smc_lgr_create(struct smc_sock *smc,
memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
lgr->conns_all = RB_ROOT;
-
- lnk = &lgr->lnk[SMC_SINGLE_LINK];
- /* initialize link */
- lnk->state = SMC_LNK_ACTIVATING;
- lnk->link_id = SMC_SINGLE_LINK;
- lnk->smcibdev = smcibdev;
- lnk->ibport = ibport;
- lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
- if (!smcibdev->initialized)
- smc_ib_setup_per_ibdev(smcibdev);
- get_random_bytes(rndvec, sizeof(rndvec));
- lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16);
- rc = smc_llc_link_init(lnk);
- if (rc)
- goto free_lgr;
- rc = smc_wr_alloc_link_mem(lnk);
- if (rc)
- goto clear_llc_lnk;
- rc = smc_ib_create_protection_domain(lnk);
- if (rc)
- goto free_link_mem;
- rc = smc_ib_create_queue_pair(lnk);
- if (rc)
- goto dealloc_pd;
- rc = smc_wr_create_link(lnk);
- if (rc)
- goto destroy_qp;
-
+ if (is_smcd) {
+ /* SMC-D specific settings */
+ lgr->peer_gid = peer_gid;
+ lgr->smcd = smcismdev;
+ } else {
+ /* SMC-R specific settings */
+ lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
+ memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
+
+ lnk = &lgr->lnk[SMC_SINGLE_LINK];
+ /* initialize link */
+ lnk->state = SMC_LNK_ACTIVATING;
+ lnk->link_id = SMC_SINGLE_LINK;
+ lnk->smcibdev = smcibdev;
+ lnk->ibport = ibport;
+ lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
+ if (!smcibdev->initialized)
+ smc_ib_setup_per_ibdev(smcibdev);
+ get_random_bytes(rndvec, sizeof(rndvec));
+ lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
+ (rndvec[2] << 16);
+ rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
+ vlan_id, lnk->gid, &lnk->sgid_index);
+ if (rc)
+ goto free_lgr;
+ rc = smc_llc_link_init(lnk);
+ if (rc)
+ goto free_lgr;
+ rc = smc_wr_alloc_link_mem(lnk);
+ if (rc)
+ goto clear_llc_lnk;
+ rc = smc_ib_create_protection_domain(lnk);
+ if (rc)
+ goto free_link_mem;
+ rc = smc_ib_create_queue_pair(lnk);
+ if (rc)
+ goto dealloc_pd;
+ rc = smc_wr_create_link(lnk);
+ if (rc)
+ goto destroy_qp;
+ }
smc->conn.lgr = lgr;
- rwlock_init(&lgr->conns_lock);
spin_lock_bh(&smc_lgr_list.lock);
list_add(&lgr->list, &smc_lgr_list.list);
spin_unlock_bh(&smc_lgr_list.lock);
@@ -265,7 +317,12 @@ void smc_conn_free(struct smc_connection *conn)
{
if (!conn->lgr)
return;
- smc_cdc_tx_dismiss_slots(conn);
+ if (conn->lgr->is_smcd) {
+ smc_ism_unset_conn(conn);
+ tasklet_kill(&conn->rx_tsklet);
+ } else {
+ smc_cdc_tx_dismiss_slots(conn);
+ }
smc_lgr_unregister_conn(conn);
smc_buf_unuse(conn);
}
@@ -281,8 +338,8 @@ static void smc_link_clear(struct smc_link *lnk)
smc_wr_free_link_mem(lnk);
}
-static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
- struct smc_buf_desc *buf_desc)
+static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
+ struct smc_buf_desc *buf_desc)
{
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
@@ -302,6 +359,28 @@ static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
kfree(buf_desc);
}
+static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
+ struct smc_buf_desc *buf_desc)
+{
+ if (is_dmb) {
+ /* restore original buf len */
+ buf_desc->len += sizeof(struct smcd_cdc_msg);
+ smc_ism_unregister_dmb(lgr->smcd, buf_desc);
+ } else {
+ kfree(buf_desc->cpu_addr);
+ }
+ kfree(buf_desc);
+}
+
+static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
+ struct smc_buf_desc *buf_desc)
+{
+ if (lgr->is_smcd)
+ smcd_buf_free(lgr, is_rmb, buf_desc);
+ else
+ smcr_buf_free(lgr, is_rmb, buf_desc);
+}
+
static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
{
struct smc_buf_desc *buf_desc, *bf_desc;
@@ -333,7 +412,10 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr)
void smc_lgr_free(struct smc_link_group *lgr)
{
smc_lgr_free_bufs(lgr);
- smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
+ if (lgr->is_smcd)
+ smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+ else
+ smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
kfree(lgr);
}
@@ -358,7 +440,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
lgr->terminating = 1;
if (!list_empty(&lgr->list)) /* forget lgr */
list_del_init(&lgr->list);
- smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
+ if (!lgr->is_smcd)
+ smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
write_lock_bh(&lgr->conns_lock);
node = rb_first(&lgr->conns_all);
@@ -375,7 +458,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
node = rb_first(&lgr->conns_all);
}
write_unlock_bh(&lgr->conns_lock);
- wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
+ if (!lgr->is_smcd)
+ wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
smc_lgr_schedule_free_work(lgr);
}
@@ -393,17 +477,44 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
spin_lock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
- if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
+ if (!lgr->is_smcd &&
+ lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
__smc_lgr_terminate(lgr);
}
spin_unlock_bh(&smc_lgr_list.lock);
}
+/* Called when SMC-D device is terminated or peer is lost */
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
+{
+ struct smc_link_group *lgr, *l;
+ LIST_HEAD(lgr_free_list);
+
+ /* run common cleanup function and build free list */
+ spin_lock_bh(&smc_lgr_list.lock);
+ list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
+ if (lgr->is_smcd && lgr->smcd == dev &&
+ (!peer_gid || lgr->peer_gid == peer_gid) &&
+ !list_empty(&lgr->list)) {
+ __smc_lgr_terminate(lgr);
+ list_move(&lgr->list, &lgr_free_list);
+ }
+ }
+ spin_unlock_bh(&smc_lgr_list.lock);
+
+ /* cancel the regular free workers and actually free lgrs */
+ list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
+ list_del_init(&lgr->list);
+ cancel_delayed_work_sync(&lgr->free_work);
+ smc_lgr_free(lgr);
+ }
+}
+
/* Determine vlan of internal TCP socket.
* @vlan_id: address to store the determined vlan id into
*/
-static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
+int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
{
struct dst_entry *dst = sk_dst_get(clcsock->sk);
struct net_device *ndev;
@@ -447,41 +558,30 @@ out:
return rc;
}
-/* determine the link gid matching the vlan id of the link group */
-static int smc_link_determine_gid(struct smc_link_group *lgr)
+static bool smcr_lgr_match(struct smc_link_group *lgr,
+ struct smc_clc_msg_local *lcl,
+ enum smc_lgr_role role)
{
- struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
- struct ib_gid_attr gattr;
- union ib_gid gid;
- int i;
-
- if (!lgr->vlan_id) {
- lnk->gid = lnk->smcibdev->gid[lnk->ibport - 1];
- return 0;
- }
+ return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
+ SMC_SYSTEMID_LEN) &&
+ !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
+ SMC_GID_SIZE) &&
+ !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
+ sizeof(lcl->mac)) &&
+ lgr->role == role;
+}
- for (i = 0; i < lnk->smcibdev->pattr[lnk->ibport - 1].gid_tbl_len;
- i++) {
- if (ib_query_gid(lnk->smcibdev->ibdev, lnk->ibport, i, &gid,
- &gattr))
- continue;
- if (gattr.ndev) {
- if (is_vlan_dev(gattr.ndev) &&
- vlan_dev_vlan_id(gattr.ndev) == lgr->vlan_id) {
- lnk->gid = gid;
- dev_put(gattr.ndev);
- return 0;
- }
- dev_put(gattr.ndev);
- }
- }
- return -ENODEV;
+static bool smcd_lgr_match(struct smc_link_group *lgr,
+ struct smcd_dev *smcismdev, u64 peer_gid)
+{
+ return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
}
/* create a new SMC connection (and a new link group if necessary) */
-int smc_conn_create(struct smc_sock *smc,
+int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
struct smc_ib_device *smcibdev, u8 ibport,
- struct smc_clc_msg_local *lcl, int srv_first_contact)
+ struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
+ u64 peer_gid)
{
struct smc_connection *conn = &smc->conn;
int local_contact = SMC_FIRST_CONTACT;
@@ -503,17 +603,12 @@ int smc_conn_create(struct smc_sock *smc,
spin_lock_bh(&smc_lgr_list.lock);
list_for_each_entry(lgr, &smc_lgr_list.list, list) {
write_lock_bh(&lgr->conns_lock);
- if (!memcmp(lgr->peer_systemid, lcl->id_for_peer,
- SMC_SYSTEMID_LEN) &&
- !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
- SMC_GID_SIZE) &&
- !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
- sizeof(lcl->mac)) &&
+ if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
+ smcr_lgr_match(lgr, lcl, role)) &&
!lgr->sync_err &&
- (lgr->role == role) &&
- (lgr->vlan_id == vlan_id) &&
- ((role == SMC_CLNT) ||
- (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) {
+ lgr->vlan_id == vlan_id &&
+ (role == SMC_CLNT ||
+ lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
/* link group found */
local_contact = SMC_REUSE_CONTACT;
conn->lgr = lgr;
@@ -536,16 +631,19 @@ int smc_conn_create(struct smc_sock *smc,
create:
if (local_contact == SMC_FIRST_CONTACT) {
- rc = smc_lgr_create(smc, smcibdev, ibport,
- lcl->id_for_peer, vlan_id);
+ rc = smc_lgr_create(smc, is_smcd, smcibdev, ibport,
+ lcl->id_for_peer, vlan_id, smcd, peer_gid);
if (rc)
goto out;
smc_lgr_register_conn(conn); /* add smc conn to lgr */
- rc = smc_link_determine_gid(conn->lgr);
}
conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
conn->urg_state = SMC_URG_READ;
+ if (is_smcd) {
+ conn->rx_off = sizeof(struct smcd_cdc_msg);
+ smcd_cdc_rx_init(conn); /* init tasklet for this conn */
+ }
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&conn->acurs_lock);
#endif
@@ -610,8 +708,8 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size)
return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
}
-static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr,
- bool is_rmb, int bufsize)
+static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
+ bool is_rmb, int bufsize)
{
struct smc_buf_desc *buf_desc;
struct smc_link *lnk;
@@ -669,7 +767,44 @@ static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr,
return buf_desc;
}
-static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
+#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
+
+static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
+ bool is_dmb, int bufsize)
+{
+ struct smc_buf_desc *buf_desc;
+ int rc;
+
+ if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
+ return ERR_PTR(-EAGAIN);
+
+ /* try to alloc a new DMB */
+ buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
+ if (!buf_desc)
+ return ERR_PTR(-ENOMEM);
+ if (is_dmb) {
+ rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
+ if (rc) {
+ kfree(buf_desc);
+ return ERR_PTR(-EAGAIN);
+ }
+ buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
+ /* CDC header stored in buf. So, pretend it was smaller */
+ buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
+ } else {
+ buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
+ __GFP_NOWARN | __GFP_NORETRY |
+ __GFP_NOMEMALLOC);
+ if (!buf_desc->cpu_addr) {
+ kfree(buf_desc);
+ return ERR_PTR(-EAGAIN);
+ }
+ buf_desc->len = bufsize;
+ }
+ return buf_desc;
+}
+
+static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
{
struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
struct smc_connection *conn = &smc->conn;
@@ -707,7 +842,11 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
break; /* found reusable slot */
}
- buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize);
+ if (is_smcd)
+ buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
+ else
+ buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
+
if (PTR_ERR(buf_desc) == -ENOMEM)
break;
if (IS_ERR(buf_desc))
@@ -728,7 +867,10 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
conn->rmbe_size_short = bufsize_short;
smc->sk.sk_rcvbuf = bufsize * 2;
atomic_set(&conn->bytes_to_rcv, 0);
- conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize);
+ conn->rmbe_update_limit =
+ smc_rmb_wnd_update_limit(buf_desc->len);
+ if (is_smcd)
+ smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
} else {
conn->sndbuf_desc = buf_desc;
smc->sk.sk_sndbuf = bufsize * 2;
@@ -741,6 +883,8 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
+ if (!conn->lgr || conn->lgr->is_smcd)
+ return;
smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
conn->sndbuf_desc, DMA_TO_DEVICE);
}
@@ -749,6 +893,8 @@ void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
+ if (!conn->lgr || conn->lgr->is_smcd)
+ return;
smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
conn->sndbuf_desc, DMA_TO_DEVICE);
}
@@ -757,6 +903,8 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
+ if (!conn->lgr || conn->lgr->is_smcd)
+ return;
smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
conn->rmb_desc, DMA_FROM_DEVICE);
}
@@ -765,6 +913,8 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
+ if (!conn->lgr || conn->lgr->is_smcd)
+ return;
smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
conn->rmb_desc, DMA_FROM_DEVICE);
}
@@ -775,16 +925,16 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
* the Linux implementation uses just one RMB-element per RMB, i.e. uses an
* extra RMB for every connection in a link group
*/
-int smc_buf_create(struct smc_sock *smc)
+int smc_buf_create(struct smc_sock *smc, bool is_smcd)
{
int rc;
/* create send buffer */
- rc = __smc_buf_create(smc, false);
+ rc = __smc_buf_create(smc, is_smcd, false);
if (rc)
return rc;
/* create rmb */
- rc = __smc_buf_create(smc, true);
+ rc = __smc_buf_create(smc, is_smcd, true);
if (rc)
smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
return rc;
@@ -866,7 +1016,14 @@ void smc_core_exit(void)
spin_unlock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
list_del_init(&lgr->list);
- smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
+ if (!lgr->is_smcd) {
+ struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+
+ if (lnk->state == SMC_LNK_ACTIVE)
+ smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
+ false);
+ smc_llc_link_inactive(lnk);
+ }
cancel_delayed_work_sync(&lgr->free_work);
smc_lgr_free(lgr); /* free link group */
}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 93cb3523bf50..c156674733c9 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -34,7 +34,8 @@ enum smc_lgr_role { /* possible roles of a link group */
enum smc_link_state { /* possible states of a link */
SMC_LNK_INACTIVE, /* link is inactive */
SMC_LNK_ACTIVATING, /* link is being activated */
- SMC_LNK_ACTIVE /* link is active */
+ SMC_LNK_ACTIVE, /* link is active */
+ SMC_LNK_DELETING, /* link is being deleted */
};
#define SMC_WR_BUF_SIZE 48 /* size of work request buffer */
@@ -84,14 +85,15 @@ struct smc_link {
wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */
- union ib_gid gid; /* gid matching used vlan id */
+ u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/
+ u8 sgid_index; /* gid index for vlan id */
u32 peer_qpn; /* QP number of peer */
enum ib_mtu path_mtu; /* used mtu */
enum ib_mtu peer_mtu; /* mtu size of peer */
u32 psn_initial; /* QP tx initial packet seqno */
u32 peer_psn; /* QP rx initial packet seqno */
u8 peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */
- u8 peer_gid[sizeof(union ib_gid)]; /* gid of peer*/
+ u8 peer_gid[SMC_GID_SIZE]; /* gid of peer*/
u8 link_id; /* unique # within link group */
enum smc_link_state state; /* state of link */
@@ -124,15 +126,28 @@ struct smc_buf_desc {
void *cpu_addr; /* virtual address of buffer */
struct page *pages;
int len; /* length of buffer */
- struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */
- struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX];
- /* for rmb only: memory region
- * incl. rkey provided to peer
- */
- u32 order; /* allocation order */
u32 used; /* currently used / unused */
u8 reused : 1; /* new created / reused */
u8 regerr : 1; /* err during registration */
+ union {
+ struct { /* SMC-R */
+ struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];
+ /* virtual buffer */
+ struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX];
+ /* for rmb only: memory region
+ * incl. rkey provided to peer
+ */
+ u32 order; /* allocation order */
+ };
+ struct { /* SMC-D */
+ unsigned short sba_idx;
+ /* SBA index number */
+ u64 token;
+ /* DMB token number */
+ dma_addr_t dma_addr;
+ /* DMA address */
+ };
+ };
};
struct smc_rtoken { /* address/key of remote RMB */
@@ -148,12 +163,10 @@ struct smc_rtoken { /* address/key of remote RMB */
* struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15)
*/
+struct smcd_dev;
+
struct smc_link_group {
struct list_head list;
- enum smc_lgr_role role; /* client or server */
- struct smc_link lnk[SMC_LINKS_PER_LGR_MAX]; /* smc link */
- char peer_systemid[SMC_SYSTEMID_LEN];
- /* unique system_id of peer */
struct rb_root conns_all; /* connection tree */
rwlock_t conns_lock; /* protects conns_all */
unsigned int conns_num; /* current # of connections */
@@ -163,17 +176,34 @@ struct smc_link_group {
rwlock_t sndbufs_lock; /* protects tx buffers */
struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */
rwlock_t rmbs_lock; /* protects rx buffers */
- struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX]
- [SMC_LINKS_PER_LGR_MAX];
- /* remote addr/key pairs */
- unsigned long rtokens_used_mask[BITS_TO_LONGS(
- SMC_RMBS_PER_LGR_MAX)];
- /* used rtoken elements */
u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */
struct delayed_work free_work; /* delayed freeing of an lgr */
u8 sync_err : 1; /* lgr no longer fits to peer */
u8 terminating : 1;/* lgr is terminating */
+
+ bool is_smcd; /* SMC-R or SMC-D */
+ union {
+ struct { /* SMC-R */
+ enum smc_lgr_role role;
+ /* client or server */
+ struct smc_link lnk[SMC_LINKS_PER_LGR_MAX];
+ /* smc link */
+ char peer_systemid[SMC_SYSTEMID_LEN];
+ /* unique system_id of peer */
+ struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX]
+ [SMC_LINKS_PER_LGR_MAX];
+ /* remote addr/key pairs */
+ DECLARE_BITMAP(rtokens_used_mask, SMC_RMBS_PER_LGR_MAX);
+ /* used rtoken elements */
+ };
+ struct { /* SMC-D */
+ u64 peer_gid;
+ /* Peer GID (remote) */
+ struct smcd_dev *smcd;
+ /* ISM device for VLAN reg. */
+ };
+ };
};
/* Find the connection associated with the given alert token in the link group.
@@ -217,7 +247,8 @@ void smc_lgr_free(struct smc_link_group *lgr);
void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
-int smc_buf_create(struct smc_sock *smc);
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid);
+int smc_buf_create(struct smc_sock *smc, bool is_smcd);
int smc_uncompress_bufsize(u8 compressed);
int smc_rmb_rtoken_handling(struct smc_connection *conn,
struct smc_clc_msg_accept_confirm *clc);
@@ -227,9 +258,19 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn);
void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn);
void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn);
void smc_rmb_sync_sg_for_device(struct smc_connection *conn);
+int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id);
+
void smc_conn_free(struct smc_connection *conn);
-int smc_conn_create(struct smc_sock *smc,
+int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
struct smc_ib_device *smcibdev, u8 ibport,
- struct smc_clc_msg_local *lcl, int srv_first_contact);
+ struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
+ u64 peer_gid);
+void smcd_conn_free(struct smc_connection *conn);
+void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
void smc_core_exit(void);
+
+static inline struct smc_link_group *smc_get_lgr(struct smc_link *link)
+{
+ return container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
+}
#endif
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 839354402215..dbf64a93d68a 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -79,6 +79,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
struct nlattr *bc)
{
struct smc_sock *smc = smc_sk(sk);
+ struct smc_diag_fallback fallback;
struct user_namespace *user_ns;
struct smc_diag_msg *r;
struct nlmsghdr *nlh;
@@ -91,11 +92,21 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
r = nlmsg_data(nlh);
smc_diag_msg_common_fill(r, sk);
r->diag_state = sk->sk_state;
- r->diag_fallback = smc->use_fallback;
+ if (smc->use_fallback)
+ r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP;
+ else if (smc->conn.lgr && smc->conn.lgr->is_smcd)
+ r->diag_mode = SMC_DIAG_MODE_SMCD;
+ else
+ r->diag_mode = SMC_DIAG_MODE_SMCR;
user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
goto errout;
+ fallback.reason = smc->fallback_rsn;
+ fallback.peer_diagnosis = smc->peer_diagnosis;
+ if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0)
+ goto errout;
+
if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
smc->conn.alert_token_local) {
struct smc_connection *conn = &smc->conn;
@@ -136,7 +147,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
goto errout;
}
- if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr &&
+ if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
+ (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
!list_empty(&smc->conn.lgr->list)) {
struct smc_diag_lgrinfo linfo = {
.role = smc->conn.lgr->role,
@@ -148,13 +160,28 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
smc_gid_be16_convert(linfo.lnk[0].gid,
- smc->conn.lgr->lnk[0].gid.raw);
+ smc->conn.lgr->lnk[0].gid);
smc_gid_be16_convert(linfo.lnk[0].peer_gid,
smc->conn.lgr->lnk[0].peer_gid);
if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
goto errout;
}
+ if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
+ (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
+ !list_empty(&smc->conn.lgr->list)) {
+ struct smc_connection *conn = &smc->conn;
+ struct smcd_diag_dmbinfo dinfo = {
+ .linkid = *((u32 *)conn->lgr->id),
+ .peer_gid = conn->lgr->peer_gid,
+ .my_gid = conn->lgr->smcd->local_gid,
+ .token = conn->rmb_desc->token,
+ .peer_token = conn->peer_token
+ };
+
+ if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
+ goto errout;
+ }
nlmsg_end(skb, nlh);
return 0;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 117b05f1a494..9bb5274a244e 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -69,7 +69,7 @@ static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
- rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, 0, 1, 0);
+ rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0);
rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
sizeof(lnk->peer_mac));
@@ -113,8 +113,7 @@ int smc_ib_modify_qp_reset(struct smc_link *lnk)
int smc_ib_ready_link(struct smc_link *lnk)
{
- struct smc_link_group *lgr =
- container_of(lnk, struct smc_link_group, lnk[0]);
+ struct smc_link_group *lgr = smc_get_lgr(lnk);
int rc = 0;
rc = smc_ib_modify_qp_init(lnk);
@@ -144,6 +143,93 @@ out:
return rc;
}
+static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
+{
+ struct ib_gid_attr gattr;
+ union ib_gid gid;
+ int rc;
+
+ rc = ib_query_gid(smcibdev->ibdev, ibport, 0, &gid, &gattr);
+ if (rc || !gattr.ndev)
+ return -ENODEV;
+
+ memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN);
+ dev_put(gattr.ndev);
+ return 0;
+}
+
+/* Create an identifier unique for this instance of SMC-R.
+ * The MAC-address of the first active registered IB device
+ * plus a random 2-byte number is used to create this identifier.
+ * This name is delivered to the peer during connection initialization.
+ */
+static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
+ u8 ibport)
+{
+ memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
+ sizeof(smcibdev->mac[ibport - 1]));
+ get_random_bytes(&local_systemid[0], 2);
+}
+
+bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
+{
+ return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
+}
+
+/* determine the gid for an ib-device port and vlan id */
+int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
+ unsigned short vlan_id, u8 gid[], u8 *sgid_index)
+{
+ struct ib_gid_attr gattr;
+ union ib_gid _gid;
+ int i;
+
+ for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
+ memset(&_gid, 0, SMC_GID_SIZE);
+ memset(&gattr, 0, sizeof(gattr));
+ if (ib_query_gid(smcibdev->ibdev, ibport, i, &_gid, &gattr))
+ continue;
+ if (!gattr.ndev)
+ continue;
+ if (((!vlan_id && !is_vlan_dev(gattr.ndev)) ||
+ (vlan_id && is_vlan_dev(gattr.ndev) &&
+ vlan_dev_vlan_id(gattr.ndev) == vlan_id)) &&
+ gattr.gid_type == IB_GID_TYPE_IB) {
+ if (gid)
+ memcpy(gid, &_gid, SMC_GID_SIZE);
+ if (sgid_index)
+ *sgid_index = i;
+ dev_put(gattr.ndev);
+ return 0;
+ }
+ dev_put(gattr.ndev);
+ }
+ return -ENODEV;
+}
+
+static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
+{
+ int rc;
+
+ memset(&smcibdev->pattr[ibport - 1], 0,
+ sizeof(smcibdev->pattr[ibport - 1]));
+ rc = ib_query_port(smcibdev->ibdev, ibport,
+ &smcibdev->pattr[ibport - 1]);
+ if (rc)
+ goto out;
+ /* the SMC protocol requires specification of the RoCE MAC address */
+ rc = smc_ib_fill_mac(smcibdev, ibport);
+ if (rc)
+ goto out;
+ if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
+ sizeof(local_systemid)) &&
+ smc_ib_port_active(smcibdev, ibport))
+ /* create unique system identifier */
+ smc_ib_define_local_systemid(smcibdev, ibport);
+out:
+ return rc;
+}
+
/* process context wrapper for might_sleep smc_ib_remember_port_attr */
static void smc_ib_port_event_work(struct work_struct *work)
{
@@ -371,62 +457,6 @@ void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev,
buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0;
}
-static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport)
-{
- struct ib_gid_attr gattr;
- int rc;
-
- rc = ib_query_gid(smcibdev->ibdev, ibport, 0,
- &smcibdev->gid[ibport - 1], &gattr);
- if (rc || !gattr.ndev)
- return -ENODEV;
-
- memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN);
- dev_put(gattr.ndev);
- return 0;
-}
-
-/* Create an identifier unique for this instance of SMC-R.
- * The MAC-address of the first active registered IB device
- * plus a random 2-byte number is used to create this identifier.
- * This name is delivered to the peer during connection initialization.
- */
-static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
- u8 ibport)
-{
- memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
- sizeof(smcibdev->mac[ibport - 1]));
- get_random_bytes(&local_systemid[0], 2);
-}
-
-bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
-{
- return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
-}
-
-int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
-{
- int rc;
-
- memset(&smcibdev->pattr[ibport - 1], 0,
- sizeof(smcibdev->pattr[ibport - 1]));
- rc = ib_query_port(smcibdev->ibdev, ibport,
- &smcibdev->pattr[ibport - 1]);
- if (rc)
- goto out;
- /* the SMC protocol requires specification of the RoCE MAC address */
- rc = smc_ib_fill_gid_and_mac(smcibdev, ibport);
- if (rc)
- goto out;
- if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
- sizeof(local_systemid)) &&
- smc_ib_port_active(smcibdev, ibport))
- /* create unique system identifier */
- smc_ib_define_local_systemid(smcibdev, ibport);
-out:
- return rc;
-}
-
long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
{
struct ib_cq_init_attr cqattr = {
@@ -455,9 +485,6 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
smcibdev->roce_cq_recv = NULL;
goto err;
}
- INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
- smc_ib_global_event_handler);
- ib_register_event_handler(&smcibdev->event_handler);
smc_wr_add_dev(smcibdev);
smcibdev->initialized = 1;
return rc;
@@ -473,7 +500,6 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
return;
smcibdev->initialized = 0;
smc_wr_remove_dev(smcibdev);
- ib_unregister_event_handler(&smcibdev->event_handler);
ib_destroy_cq(smcibdev->roce_cq_recv);
ib_destroy_cq(smcibdev->roce_cq_send);
}
@@ -484,6 +510,8 @@ static struct ib_client smc_ib_client;
static void smc_ib_add_dev(struct ib_device *ibdev)
{
struct smc_ib_device *smcibdev;
+ u8 port_cnt;
+ int i;
if (ibdev->node_type != RDMA_NODE_IB_CA)
return;
@@ -499,6 +527,21 @@ static void smc_ib_add_dev(struct ib_device *ibdev)
list_add_tail(&smcibdev->list, &smc_ib_devices.list);
spin_unlock(&smc_ib_devices.lock);
ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
+ INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
+ smc_ib_global_event_handler);
+ ib_register_event_handler(&smcibdev->event_handler);
+
+ /* trigger reading of the port attributes */
+ port_cnt = smcibdev->ibdev->phys_port_cnt;
+ for (i = 0;
+ i < min_t(size_t, port_cnt, SMC_MAX_PORTS);
+ i++) {
+ set_bit(i, &smcibdev->port_event_mask);
+ /* determine pnetids of the port */
+ smc_pnetid_by_dev_port(ibdev->dev.parent, i,
+ smcibdev->pnetid[i]);
+ }
+ schedule_work(&smcibdev->port_event_work);
}
/* callback function for ib_register_client() */
@@ -513,6 +556,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
spin_unlock(&smc_ib_devices.lock);
smc_pnet_remove_by_ibdev(smcibdev);
smc_ib_cleanup_per_ibdev(smcibdev);
+ ib_unregister_event_handler(&smcibdev->event_handler);
kfree(smcibdev);
}
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index e90630dadf8e..bac7fd65a4c0 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <rdma/ib_verbs.h>
+#include <net/smc.h>
#define SMC_MAX_PORTS 2 /* Max # of ports */
#define SMC_GID_SIZE sizeof(union ib_gid)
@@ -39,7 +40,8 @@ struct smc_ib_device { /* ib-device infos for smc */
struct tasklet_struct recv_tasklet; /* called by recv cq handler */
char mac[SMC_MAX_PORTS][ETH_ALEN];
/* mac address per port*/
- union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */
+ u8 pnetid[SMC_MAX_PORTS][SMC_MAX_PNETID_LEN];
+ /* pnetid per port */
u8 initialized : 1; /* ib dev CQ, evthdl done */
struct work_struct port_event_work;
unsigned long port_event_mask;
@@ -51,7 +53,6 @@ struct smc_link;
int smc_ib_register_client(void) __init;
void smc_ib_unregister_client(void);
bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
-int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport);
int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
struct smc_buf_desc *buf_slot,
enum dma_data_direction data_direction);
@@ -75,4 +76,6 @@ void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev,
void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
struct smc_buf_desc *buf_slot,
enum dma_data_direction data_direction);
+int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
+ unsigned short vlan_id, u8 gid[], u8 *sgid_index);
#endif
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
new file mode 100644
index 000000000000..e36f21ce7252
--- /dev/null
+++ b/net/smc/smc_ism.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Shared Memory Communications Direct over ISM devices (SMC-D)
+ *
+ * Functions for ISM device.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+
+#include "smc.h"
+#include "smc_core.h"
+#include "smc_ism.h"
+#include "smc_pnet.h"
+
+struct smcd_dev_list smcd_dev_list = {
+ .list = LIST_HEAD_INIT(smcd_dev_list.list),
+ .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock)
+};
+
+/* Test if an ISM communication is possible. */
+int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd)
+{
+ return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0,
+ vlan_id);
+}
+
+int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos,
+ void *data, size_t len)
+{
+ int rc;
+
+ rc = smcd->ops->move_data(smcd, pos->token, pos->index, pos->signal,
+ pos->offset, data, len);
+
+ return rc < 0 ? rc : 0;
+}
+
+/* Set a connection using this DMBE. */
+void smc_ism_set_conn(struct smc_connection *conn)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
+ conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn;
+ spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
+}
+
+/* Unset a connection using this DMBE. */
+void smc_ism_unset_conn(struct smc_connection *conn)
+{
+ unsigned long flags;
+
+ if (!conn->rmb_desc)
+ return;
+
+ spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
+ conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL;
+ spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
+}
+
+/* Register a VLAN identifier with the ISM device. Use a reference count
+ * and add a VLAN identifier only when the first DMB using this VLAN is
+ * registered.
+ */
+int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid)
+{
+ struct smc_ism_vlanid *new_vlan, *vlan;
+ unsigned long flags;
+ int rc = 0;
+
+ if (!vlanid) /* No valid vlan id */
+ return -EINVAL;
+
+ /* create new vlan entry, in case we need it */
+ new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL);
+ if (!new_vlan)
+ return -ENOMEM;
+ new_vlan->vlanid = vlanid;
+ refcount_set(&new_vlan->refcnt, 1);
+
+ /* if there is an existing entry, increase count and return */
+ spin_lock_irqsave(&smcd->lock, flags);
+ list_for_each_entry(vlan, &smcd->vlan, list) {
+ if (vlan->vlanid == vlanid) {
+ refcount_inc(&vlan->refcnt);
+ kfree(new_vlan);
+ goto out;
+ }
+ }
+
+ /* no existing entry found.
+ * add new entry to device; might fail, e.g., if HW limit reached
+ */
+ if (smcd->ops->add_vlan_id(smcd, vlanid)) {
+ kfree(new_vlan);
+ rc = -EIO;
+ goto out;
+ }
+ list_add_tail(&new_vlan->list, &smcd->vlan);
+out:
+ spin_unlock_irqrestore(&smcd->lock, flags);
+ return rc;
+}
+
+/* Unregister a VLAN identifier with the ISM device. Use a reference count
+ * and remove a VLAN identifier only when the last DMB using this VLAN is
+ * unregistered.
+ */
+int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
+{
+ struct smc_ism_vlanid *vlan;
+ unsigned long flags;
+ bool found = false;
+ int rc = 0;
+
+ if (!vlanid) /* No valid vlan id */
+ return -EINVAL;
+
+ spin_lock_irqsave(&smcd->lock, flags);
+ list_for_each_entry(vlan, &smcd->vlan, list) {
+ if (vlan->vlanid == vlanid) {
+ if (!refcount_dec_and_test(&vlan->refcnt))
+ goto out;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ rc = -ENOENT;
+ goto out; /* VLAN id not in table */
+ }
+
+ /* Found and the last reference just gone */
+ if (smcd->ops->del_vlan_id(smcd, vlanid))
+ rc = -EIO;
+ list_del(&vlan->list);
+ kfree(vlan);
+out:
+ spin_unlock_irqrestore(&smcd->lock, flags);
+ return rc;
+}
+
+int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
+{
+ struct smcd_dmb dmb;
+
+ memset(&dmb, 0, sizeof(dmb));
+ dmb.dmb_tok = dmb_desc->token;
+ dmb.sba_idx = dmb_desc->sba_idx;
+ dmb.cpu_addr = dmb_desc->cpu_addr;
+ dmb.dma_addr = dmb_desc->dma_addr;
+ dmb.dmb_len = dmb_desc->len;
+ return smcd->ops->unregister_dmb(smcd, &dmb);
+}
+
+int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
+ struct smc_buf_desc *dmb_desc)
+{
+ struct smcd_dmb dmb;
+ int rc;
+
+ memset(&dmb, 0, sizeof(dmb));
+ dmb.dmb_len = dmb_len;
+ dmb.sba_idx = dmb_desc->sba_idx;
+ dmb.vlan_id = lgr->vlan_id;
+ dmb.rgid = lgr->peer_gid;
+ rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb);
+ if (!rc) {
+ dmb_desc->sba_idx = dmb.sba_idx;
+ dmb_desc->token = dmb.dmb_tok;
+ dmb_desc->cpu_addr = dmb.cpu_addr;
+ dmb_desc->dma_addr = dmb.dma_addr;
+ dmb_desc->len = dmb.dmb_len;
+ }
+ return rc;
+}
+
+struct smc_ism_event_work {
+ struct work_struct work;
+ struct smcd_dev *smcd;
+ struct smcd_event event;
+};
+
+#define ISM_EVENT_REQUEST 0x0001
+#define ISM_EVENT_RESPONSE 0x0002
+#define ISM_EVENT_REQUEST_IR 0x00000001
+#define ISM_EVENT_CODE_TESTLINK 0x83
+
+static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
+{
+ union {
+ u64 info;
+ struct {
+ u32 uid;
+ unsigned short vlanid;
+ u16 code;
+ };
+ } ev_info;
+
+ switch (wrk->event.code) {
+ case ISM_EVENT_CODE_TESTLINK: /* Activity timer */
+ ev_info.info = wrk->event.info;
+ if (ev_info.code == ISM_EVENT_REQUEST) {
+ ev_info.code = ISM_EVENT_RESPONSE;
+ wrk->smcd->ops->signal_event(wrk->smcd,
+ wrk->event.tok,
+ ISM_EVENT_REQUEST_IR,
+ ISM_EVENT_CODE_TESTLINK,
+ ev_info.info);
+ }
+ break;
+ }
+}
+
+/* worker for SMC-D events */
+static void smc_ism_event_work(struct work_struct *work)
+{
+ struct smc_ism_event_work *wrk =
+ container_of(work, struct smc_ism_event_work, work);
+
+ switch (wrk->event.type) {
+ case ISM_EVENT_GID: /* GID event, token is peer GID */
+ smc_smcd_terminate(wrk->smcd, wrk->event.tok);
+ break;
+ case ISM_EVENT_DMB:
+ break;
+ case ISM_EVENT_SWR: /* Software defined event */
+ smcd_handle_sw_event(wrk);
+ break;
+ }
+ kfree(wrk);
+}
+
+static void smcd_release(struct device *dev)
+{
+ struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev);
+
+ kfree(smcd->conn);
+ kfree(smcd);
+}
+
+struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+ const struct smcd_ops *ops, int max_dmbs)
+{
+ struct smcd_dev *smcd;
+
+ smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
+ if (!smcd)
+ return NULL;
+ smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
+ GFP_KERNEL);
+ if (!smcd->conn) {
+ kfree(smcd);
+ return NULL;
+ }
+
+ smcd->dev.parent = parent;
+ smcd->dev.release = smcd_release;
+ device_initialize(&smcd->dev);
+ dev_set_name(&smcd->dev, name);
+ smcd->ops = ops;
+ smc_pnetid_by_dev_port(parent, 0, smcd->pnetid);
+
+ spin_lock_init(&smcd->lock);
+ INIT_LIST_HEAD(&smcd->vlan);
+ smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
+ WQ_MEM_RECLAIM, name);
+ return smcd;
+}
+EXPORT_SYMBOL_GPL(smcd_alloc_dev);
+
+int smcd_register_dev(struct smcd_dev *smcd)
+{
+ spin_lock(&smcd_dev_list.lock);
+ list_add_tail(&smcd->list, &smcd_dev_list.list);
+ spin_unlock(&smcd_dev_list.lock);
+
+ return device_add(&smcd->dev);
+}
+EXPORT_SYMBOL_GPL(smcd_register_dev);
+
+void smcd_unregister_dev(struct smcd_dev *smcd)
+{
+ spin_lock(&smcd_dev_list.lock);
+ list_del(&smcd->list);
+ spin_unlock(&smcd_dev_list.lock);
+ flush_workqueue(smcd->event_wq);
+ destroy_workqueue(smcd->event_wq);
+ smc_smcd_terminate(smcd, 0);
+
+ device_del(&smcd->dev);
+}
+EXPORT_SYMBOL_GPL(smcd_unregister_dev);
+
+void smcd_free_dev(struct smcd_dev *smcd)
+{
+ put_device(&smcd->dev);
+}
+EXPORT_SYMBOL_GPL(smcd_free_dev);
+
+/* SMCD Device event handler. Called from ISM device interrupt handler.
+ * Parameters are smcd device pointer,
+ * - event->type (0 --> DMB, 1 --> GID),
+ * - event->code (event code),
+ * - event->tok (either DMB token when event type 0, or GID when event type 1)
+ * - event->time (time of day)
+ * - event->info (debug info).
+ *
+ * Context:
+ * - Function called in IRQ context from ISM device driver event handler.
+ */
+void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event)
+{
+ struct smc_ism_event_work *wrk;
+
+ /* copy event to event work queue, and let it be handled there */
+ wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC);
+ if (!wrk)
+ return;
+ INIT_WORK(&wrk->work, smc_ism_event_work);
+ wrk->smcd = smcd;
+ wrk->event = *event;
+ queue_work(smcd->event_wq, &wrk->work);
+}
+EXPORT_SYMBOL_GPL(smcd_handle_event);
+
+/* SMCD Device interrupt handler. Called from ISM device interrupt handler.
+ * Parameters are smcd device pointer and DMB number. Find the connection and
+ * schedule the tasklet for this connection.
+ *
+ * Context:
+ * - Function called in IRQ context from ISM device driver IRQ handler.
+ */
+void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno)
+{
+ struct smc_connection *conn = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smcd->lock, flags);
+ conn = smcd->conn[dmbno];
+ if (conn)
+ tasklet_schedule(&conn->rx_tsklet);
+ spin_unlock_irqrestore(&smcd->lock, flags);
+}
+EXPORT_SYMBOL_GPL(smcd_handle_irq);
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
new file mode 100644
index 000000000000..aee45b860b79
--- /dev/null
+++ b/net/smc/smc_ism.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Shared Memory Communications Direct over ISM devices (SMC-D)
+ *
+ * SMC-D ISM device structure definitions.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef SMCD_ISM_H
+#define SMCD_ISM_H
+
+#include <linux/uio.h>
+
+#include "smc.h"
+
+struct smcd_dev_list { /* List of SMCD devices */
+ struct list_head list;
+ spinlock_t lock; /* Protects list of devices */
+};
+
+extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */
+
+struct smc_ism_vlanid { /* VLAN id set on ISM device */
+ struct list_head list;
+ unsigned short vlanid; /* Vlan id */
+ refcount_t refcnt; /* Reference count */
+};
+
+struct smc_ism_position { /* ISM device position to write to */
+ u64 token; /* Token of DMB */
+ u32 offset; /* Offset into DMBE */
+ u8 index; /* Index of DMBE */
+ u8 signal; /* Generate interrupt on owner side */
+};
+
+struct smcd_dev;
+
+int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *dev);
+void smc_ism_set_conn(struct smc_connection *conn);
+void smc_ism_unset_conn(struct smc_connection *conn);
+int smc_ism_get_vlan(struct smcd_dev *dev, unsigned short vlan_id);
+int smc_ism_put_vlan(struct smcd_dev *dev, unsigned short vlan_id);
+int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
+ struct smc_buf_desc *dmb_desc);
+int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
+int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
+ void *data, size_t len);
+#endif
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 5800a6b43d83..9c916c709ca7 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -182,12 +182,10 @@ static int smc_llc_add_pending_send(struct smc_link *link,
}
/* high-level API to send LLC confirm link */
-int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[],
- union ib_gid *gid,
+int smc_llc_send_confirm_link(struct smc_link *link,
enum smc_llc_reqresp reqresp)
{
- struct smc_link_group *lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
+ struct smc_link_group *lgr = smc_get_lgr(link);
struct smc_llc_msg_confirm_link *confllc;
struct smc_wr_tx_pend_priv *pend;
struct smc_wr_buf *wr_buf;
@@ -203,8 +201,9 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[],
confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
if (reqresp == SMC_LLC_RESP)
confllc->hd.flags |= SMC_LLC_FLAG_RESP;
- memcpy(confllc->sender_mac, mac, ETH_ALEN);
- memcpy(confllc->sender_gid, gid, SMC_GID_SIZE);
+ memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
+ ETH_ALEN);
+ memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
confllc->link_num = link->link_id;
memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
@@ -241,8 +240,7 @@ static int smc_llc_send_confirm_rkey(struct smc_link *link,
/* prepare an add link message */
static void smc_llc_prep_add_link(struct smc_llc_msg_add_link *addllc,
- struct smc_link *link, u8 mac[],
- union ib_gid *gid,
+ struct smc_link *link, u8 mac[], u8 gid[],
enum smc_llc_reqresp reqresp)
{
memset(addllc, 0, sizeof(*addllc));
@@ -259,8 +257,7 @@ static void smc_llc_prep_add_link(struct smc_llc_msg_add_link *addllc,
}
/* send ADD LINK request or response */
-int smc_llc_send_add_link(struct smc_link *link, u8 mac[],
- union ib_gid *gid,
+int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
enum smc_llc_reqresp reqresp)
{
struct smc_llc_msg_add_link *addllc;
@@ -281,7 +278,7 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[],
/* prepare a delete link message */
static void smc_llc_prep_delete_link(struct smc_llc_msg_del_link *delllc,
struct smc_link *link,
- enum smc_llc_reqresp reqresp)
+ enum smc_llc_reqresp reqresp, bool orderly)
{
memset(delllc, 0, sizeof(*delllc));
delllc->hd.common.type = SMC_LLC_DELETE_LINK;
@@ -290,13 +287,14 @@ static void smc_llc_prep_delete_link(struct smc_llc_msg_del_link *delllc,
delllc->hd.flags |= SMC_LLC_FLAG_RESP;
/* DEL_LINK_ALL because only 1 link supported */
delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
- delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
+ if (orderly)
+ delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
delllc->link_num = link->link_id;
}
/* send DELETE LINK request or response */
int smc_llc_send_delete_link(struct smc_link *link,
- enum smc_llc_reqresp reqresp)
+ enum smc_llc_reqresp reqresp, bool orderly)
{
struct smc_llc_msg_del_link *delllc;
struct smc_wr_tx_pend_priv *pend;
@@ -307,7 +305,7 @@ int smc_llc_send_delete_link(struct smc_link *link,
if (rc)
return rc;
delllc = (struct smc_llc_msg_del_link *)wr_buf;
- smc_llc_prep_delete_link(delllc, link, reqresp);
+ smc_llc_prep_delete_link(delllc, link, reqresp, orderly);
/* send llc message */
rc = smc_wr_tx_send(link, pend);
return rc;
@@ -381,11 +379,9 @@ static int smc_llc_send_message(struct smc_link *link, void *llcbuf, int llclen)
static void smc_llc_rx_confirm_link(struct smc_link *link,
struct smc_llc_msg_confirm_link *llc)
{
- struct smc_link_group *lgr;
+ struct smc_link_group *lgr = smc_get_lgr(link);
int conf_rc;
- lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
-
/* RMBE eyecatchers are not supported */
if (llc->hd.flags & SMC_LLC_FLAG_NO_RMBE_EYEC)
conf_rc = 0;
@@ -411,8 +407,7 @@ static void smc_llc_rx_confirm_link(struct smc_link *link,
static void smc_llc_rx_add_link(struct smc_link *link,
struct smc_llc_msg_add_link *llc)
{
- struct smc_link_group *lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
+ struct smc_link_group *lgr = smc_get_lgr(link);
if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
if (link->state == SMC_LNK_ACTIVATING)
@@ -426,14 +421,12 @@ static void smc_llc_rx_add_link(struct smc_link *link,
if (lgr->role == SMC_SERV) {
smc_llc_prep_add_link(llc, link,
link->smcibdev->mac[link->ibport - 1],
- &link->smcibdev->gid[link->ibport - 1],
- SMC_LLC_REQ);
+ link->gid, SMC_LLC_REQ);
} else {
smc_llc_prep_add_link(llc, link,
link->smcibdev->mac[link->ibport - 1],
- &link->smcibdev->gid[link->ibport - 1],
- SMC_LLC_RESP);
+ link->gid, SMC_LLC_RESP);
}
smc_llc_send_message(link, llc, sizeof(*llc));
}
@@ -442,22 +435,23 @@ static void smc_llc_rx_add_link(struct smc_link *link,
static void smc_llc_rx_delete_link(struct smc_link *link,
struct smc_llc_msg_del_link *llc)
{
- struct smc_link_group *lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
+ struct smc_link_group *lgr = smc_get_lgr(link);
if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
if (lgr->role == SMC_SERV)
- smc_lgr_terminate(lgr);
+ smc_lgr_schedule_free_work_fast(lgr);
} else {
+ smc_lgr_forget(lgr);
+ smc_llc_link_deleting(link);
if (lgr->role == SMC_SERV) {
- smc_lgr_forget(lgr);
- smc_llc_prep_delete_link(llc, link, SMC_LLC_REQ);
- smc_llc_send_message(link, llc, sizeof(*llc));
+ /* client asks to delete this link, send request */
+ smc_llc_prep_delete_link(llc, link, SMC_LLC_REQ, true);
} else {
- smc_llc_prep_delete_link(llc, link, SMC_LLC_RESP);
- smc_llc_send_message(link, llc, sizeof(*llc));
- smc_lgr_terminate(lgr);
+ /* server requests to delete this link, send response */
+ smc_llc_prep_delete_link(llc, link, SMC_LLC_RESP, true);
}
+ smc_llc_send_message(link, llc, sizeof(*llc));
+ smc_lgr_schedule_free_work_fast(lgr);
}
}
@@ -476,17 +470,14 @@ static void smc_llc_rx_test_link(struct smc_link *link,
static void smc_llc_rx_confirm_rkey(struct smc_link *link,
struct smc_llc_msg_confirm_rkey *llc)
{
- struct smc_link_group *lgr;
int rc;
- lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
-
if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
link->llc_confirm_rkey_rc = llc->hd.flags &
SMC_LLC_FLAG_RKEY_NEG;
complete(&link->llc_confirm_rkey);
} else {
- rc = smc_rtoken_add(lgr,
+ rc = smc_rtoken_add(smc_get_lgr(link),
llc->rtoken[0].rmb_vaddr,
llc->rtoken[0].rmb_key);
@@ -514,18 +505,15 @@ static void smc_llc_rx_confirm_rkey_cont(struct smc_link *link,
static void smc_llc_rx_delete_rkey(struct smc_link *link,
struct smc_llc_msg_delete_rkey *llc)
{
- struct smc_link_group *lgr;
u8 err_mask = 0;
int i, max;
- lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
-
if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
/* unused as long as we don't send this type of msg */
} else {
max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
for (i = 0; i < max; i++) {
- if (smc_rtoken_delete(lgr, llc->rkey[i]))
+ if (smc_rtoken_delete(smc_get_lgr(link), llc->rkey[i]))
err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
}
@@ -583,12 +571,10 @@ static void smc_llc_testlink_work(struct work_struct *work)
struct smc_link *link = container_of(to_delayed_work(work),
struct smc_link, llc_testlink_wrk);
unsigned long next_interval;
- struct smc_link_group *lgr;
unsigned long expire_time;
u8 user_data[16] = { 0 };
int rc;
- lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
if (link->state != SMC_LNK_ACTIVE)
return; /* don't reschedule worker */
expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
@@ -602,7 +588,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
SMC_LLC_WAIT_TIME);
if (rc <= 0) {
- smc_lgr_terminate(lgr);
+ smc_lgr_terminate(smc_get_lgr(link));
return;
}
next_interval = link->llc_testlink_time;
@@ -613,8 +599,7 @@ out:
int smc_llc_link_init(struct smc_link *link)
{
- struct smc_link_group *lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
+ struct smc_link_group *lgr = smc_get_lgr(link);
link->llc_wq = alloc_ordered_workqueue("llc_wq-%x:%x)", WQ_MEM_RECLAIM,
*((u32 *)lgr->id),
link->link_id);
@@ -640,6 +625,11 @@ void smc_llc_link_active(struct smc_link *link, int testlink_time)
}
}
+void smc_llc_link_deleting(struct smc_link *link)
+{
+ link->state = SMC_LNK_DELETING;
+}
+
/* called in tasklet context */
void smc_llc_link_inactive(struct smc_link *link)
{
diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h
index 65c8645e96a1..9e2ff088e301 100644
--- a/net/smc/smc_llc.h
+++ b/net/smc/smc_llc.h
@@ -36,14 +36,15 @@ enum smc_llc_msg_type {
};
/* transmit */
-int smc_llc_send_confirm_link(struct smc_link *lnk, u8 mac[], union ib_gid *gid,
+int smc_llc_send_confirm_link(struct smc_link *lnk,
enum smc_llc_reqresp reqresp);
-int smc_llc_send_add_link(struct smc_link *link, u8 mac[], union ib_gid *gid,
+int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
enum smc_llc_reqresp reqresp);
int smc_llc_send_delete_link(struct smc_link *link,
- enum smc_llc_reqresp reqresp);
+ enum smc_llc_reqresp reqresp, bool orderly);
int smc_llc_link_init(struct smc_link *link);
void smc_llc_link_active(struct smc_link *link, int testlink_time);
+void smc_llc_link_deleting(struct smc_link *link);
void smc_llc_link_inactive(struct smc_link *link);
void smc_llc_link_clear(struct smc_link *link);
int smc_llc_do_confirm_rkey(struct smc_link *link,
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index d7b88b2d1b22..01c6ce042a1c 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -22,13 +22,12 @@
#include "smc_pnet.h"
#include "smc_ib.h"
-
-#define SMC_MAX_PNET_ID_LEN 16 /* Max. length of PNET id */
+#include "smc_ism.h"
static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
[SMC_PNETID_NAME] = {
.type = NLA_NUL_STRING,
- .len = SMC_MAX_PNET_ID_LEN - 1
+ .len = SMC_MAX_PNETID_LEN - 1
},
[SMC_PNETID_ETHNAME] = {
.type = NLA_NUL_STRING,
@@ -65,7 +64,7 @@ static struct smc_pnettable {
*/
struct smc_pnetentry {
struct list_head list;
- char pnet_name[SMC_MAX_PNET_ID_LEN + 1];
+ char pnet_name[SMC_MAX_PNETID_LEN + 1];
struct net_device *ndev;
struct smc_ib_device *smcibdev;
u8 ib_port;
@@ -209,7 +208,7 @@ static bool smc_pnetid_valid(const char *pnet_name, char *pnetid)
return false;
while (--end >= bf && isspace(*end))
;
- if (end - bf >= SMC_MAX_PNET_ID_LEN)
+ if (end - bf >= SMC_MAX_PNETID_LEN)
return false;
while (bf <= end) {
if (!isalnum(*bf))
@@ -358,9 +357,6 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
kfree(pnetelem);
return rc;
}
- rc = smc_ib_remember_port_attr(pnetelem->smcibdev, pnetelem->ib_port);
- if (rc)
- smc_pnet_remove_by_pnetid(pnetelem->pnet_name);
return rc;
}
@@ -485,10 +481,10 @@ static int smc_pnet_netdev_event(struct notifier_block *this,
case NETDEV_REBOOT:
case NETDEV_UNREGISTER:
smc_pnet_remove_by_ndev(event_dev);
+ return NOTIFY_OK;
default:
- break;
+ return NOTIFY_DONE;
}
- return NOTIFY_DONE;
}
static struct notifier_block smc_netdev_notifier = {
@@ -515,28 +511,104 @@ void smc_pnet_exit(void)
genl_unregister_family(&smc_pnet_nl_family);
}
-/* PNET table analysis for a given sock:
- * determine ib_device and port belonging to used internal TCP socket
- * ethernet interface.
+/* Determine one base device for stacked net devices.
+ * If the lower device level contains more than one devices
+ * (for instance with bonding slaves), just the first device
+ * is used to reach a base device.
*/
-void smc_pnet_find_roce_resource(struct sock *sk,
- struct smc_ib_device **smcibdev, u8 *ibport)
+static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
{
- struct dst_entry *dst = sk_dst_get(sk);
- struct smc_pnetentry *pnetelem;
+ int i, nest_lvl;
- *smcibdev = NULL;
- *ibport = 0;
+ rtnl_lock();
+ nest_lvl = dev_get_nest_level(ndev);
+ for (i = 0; i < nest_lvl; i++) {
+ struct list_head *lower = &ndev->adj_list.lower;
+
+ if (list_empty(lower))
+ break;
+ lower = lower->next;
+ ndev = netdev_lower_get_next(ndev, &lower);
+ }
+ rtnl_unlock();
+ return ndev;
+}
+
+/* Determine the corresponding IB device port based on the hardware PNETID.
+ * Searching stops at the first matching active IB device port with vlan_id
+ * configured.
+ */
+static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
+ struct smc_ib_device **smcibdev,
+ u8 *ibport, unsigned short vlan_id,
+ u8 gid[])
+{
+ u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
+ struct smc_ib_device *ibdev;
+ int i;
+
+ ndev = pnet_find_base_ndev(ndev);
+ if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
+ ndev_pnetid))
+ return; /* pnetid could not be determined */
+
+ spin_lock(&smc_ib_devices.lock);
+ list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
+ for (i = 1; i <= SMC_MAX_PORTS; i++) {
+ if (!rdma_is_port_valid(ibdev->ibdev, i))
+ continue;
+ if (!memcmp(ibdev->pnetid[i - 1], ndev_pnetid,
+ SMC_MAX_PNETID_LEN) &&
+ smc_ib_port_active(ibdev, i) &&
+ !smc_ib_determine_gid(ibdev, i, vlan_id, gid,
+ NULL)) {
+ *smcibdev = ibdev;
+ *ibport = i;
+ goto out;
+ }
+ }
+ }
+out:
+ spin_unlock(&smc_ib_devices.lock);
+}
+
+static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
+ struct smcd_dev **smcismdev)
+{
+ u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
+ struct smcd_dev *ismdev;
+
+ ndev = pnet_find_base_ndev(ndev);
+ if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
+ ndev_pnetid))
+ return; /* pnetid could not be determined */
+
+ spin_lock(&smcd_dev_list.lock);
+ list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
+ if (!memcmp(ismdev->pnetid, ndev_pnetid, SMC_MAX_PNETID_LEN)) {
+ *smcismdev = ismdev;
+ break;
+ }
+ }
+ spin_unlock(&smcd_dev_list.lock);
+}
+
+/* Lookup of coupled ib_device via SMC pnet table */
+static void smc_pnet_find_roce_by_table(struct net_device *netdev,
+ struct smc_ib_device **smcibdev,
+ u8 *ibport, unsigned short vlan_id,
+ u8 gid[])
+{
+ struct smc_pnetentry *pnetelem;
- if (!dst)
- return;
- if (!dst->dev)
- goto out_rel;
read_lock(&smc_pnettable.lock);
list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
- if (dst->dev == pnetelem->ndev) {
+ if (netdev == pnetelem->ndev) {
if (smc_ib_port_active(pnetelem->smcibdev,
- pnetelem->ib_port)) {
+ pnetelem->ib_port) &&
+ !smc_ib_determine_gid(pnetelem->smcibdev,
+ pnetelem->ib_port, vlan_id,
+ gid, NULL)) {
*smcibdev = pnetelem->smcibdev;
*ibport = pnetelem->ib_port;
}
@@ -544,6 +616,55 @@ void smc_pnet_find_roce_resource(struct sock *sk,
}
}
read_unlock(&smc_pnettable.lock);
+}
+
+/* PNET table analysis for a given sock:
+ * determine ib_device and port belonging to used internal TCP socket
+ * ethernet interface.
+ */
+void smc_pnet_find_roce_resource(struct sock *sk,
+ struct smc_ib_device **smcibdev, u8 *ibport,
+ unsigned short vlan_id, u8 gid[])
+{
+ struct dst_entry *dst = sk_dst_get(sk);
+
+ *smcibdev = NULL;
+ *ibport = 0;
+
+ if (!dst)
+ goto out;
+ if (!dst->dev)
+ goto out_rel;
+
+ /* if possible, lookup via hardware-defined pnetid */
+ smc_pnet_find_roce_by_pnetid(dst->dev, smcibdev, ibport, vlan_id, gid);
+ if (*smcibdev)
+ goto out_rel;
+
+ /* lookup via SMC PNET table */
+ smc_pnet_find_roce_by_table(dst->dev, smcibdev, ibport, vlan_id, gid);
+
+out_rel:
+ dst_release(dst);
+out:
+ return;
+}
+
+void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev)
+{
+ struct dst_entry *dst = sk_dst_get(sk);
+
+ *smcismdev = NULL;
+ if (!dst)
+ goto out;
+ if (!dst->dev)
+ goto out_rel;
+
+ /* if possible, lookup via hardware-defined pnetid */
+ smc_pnet_find_ism_by_pnetid(dst->dev, smcismdev);
+
out_rel:
dst_release(dst);
+out:
+ return;
}
diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h
index 5a29519db976..8ff777636e32 100644
--- a/net/smc/smc_pnet.h
+++ b/net/smc/smc_pnet.h
@@ -12,12 +12,29 @@
#ifndef _SMC_PNET_H
#define _SMC_PNET_H
+#if IS_ENABLED(CONFIG_HAVE_PNETID)
+#include <asm/pnet.h>
+#endif
+
struct smc_ib_device;
+struct smcd_dev;
+
+static inline int smc_pnetid_by_dev_port(struct device *dev,
+ unsigned short port, u8 *pnetid)
+{
+#if IS_ENABLED(CONFIG_HAVE_PNETID)
+ return pnet_id_by_dev_port(dev, port, pnetid);
+#else
+ return -ENOENT;
+#endif
+}
int smc_pnet_init(void) __init;
void smc_pnet_exit(void);
int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev);
void smc_pnet_find_roce_resource(struct sock *sk,
- struct smc_ib_device **smcibdev, u8 *ibport);
+ struct smc_ib_device **smcibdev, u8 *ibport,
+ unsigned short vlan_id, u8 gid[]);
+void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev);
#endif
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index 3d77b383cccd..bbcf0fe4ae10 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -82,8 +82,7 @@ static int smc_rx_update_consumer(struct smc_sock *smc,
}
}
- smc_curs_write(&conn->local_tx_ctrl.cons, smc_curs_read(&cons, conn),
- conn);
+ smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn);
/* send consumer cursor update if required */
/* similar to advertising new TCP rcv_wnd if required */
@@ -97,8 +96,7 @@ static void smc_rx_update_cons(struct smc_sock *smc, size_t len)
struct smc_connection *conn = &smc->conn;
union smc_host_cursor cons;
- smc_curs_write(&cons, smc_curs_read(&conn->local_tx_ctrl.cons, conn),
- conn);
+ smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
smc_rx_update_consumer(smc, cons, len);
}
@@ -157,10 +155,8 @@ static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
struct splice_pipe_desc spd;
struct partial_page partial;
struct smc_spd_priv *priv;
- struct page *page;
int bytes;
- page = virt_to_page(smc->conn.rmb_desc->cpu_addr);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -172,7 +168,7 @@ static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
spd.nr_pages_max = 1;
spd.nr_pages = 1;
- spd.pages = &page;
+ spd.pages = &smc->conn.rmb_desc->pages;
spd.partial = &partial;
spd.ops = &smc_pipe_ops;
spd.spd_release = smc_rx_spd_release;
@@ -245,10 +241,7 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
if (!(flags & MSG_TRUNC))
rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1);
len = 1;
- smc_curs_write(&cons,
- smc_curs_read(&conn->local_tx_ctrl.cons,
- conn),
- conn);
+ smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
if (smc_curs_diff(conn->rmb_desc->len, &cons,
&conn->urg_curs) > 1)
conn->urg_rx_skip_pend = true;
@@ -305,7 +298,7 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
/* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
- rcvbuf_base = conn->rmb_desc->cpu_addr;
+ rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
do { /* while (read_remaining) */
if (read_done >= target || (pipe && read_done))
@@ -370,9 +363,7 @@ copy:
continue;
}
- smc_curs_write(&cons,
- smc_curs_read(&conn->local_tx_ctrl.cons, conn),
- conn);
+ smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
/* subsequent splice() calls pick up where previous left */
if (splbytes)
smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 72e1a2782fe8..d8366ed51757 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -24,6 +24,7 @@
#include "smc.h"
#include "smc_wr.h"
#include "smc_cdc.h"
+#include "smc_ism.h"
#include "smc_tx.h"
#define SMC_TX_WORK_DELAY HZ
@@ -180,9 +181,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
copylen = min_t(size_t, send_remaining, writespace);
/* determine start of sndbuf */
sndbuf_base = conn->sndbuf_desc->cpu_addr;
- smc_curs_write(&prep,
- smc_curs_read(&conn->tx_curs_prep, conn),
- conn);
+ smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
tx_cnt_prep = prep.count;
/* determine chunks where to write into sndbuf */
/* either unwrapped case, or 1st chunk of wrapped case */
@@ -213,9 +212,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
smc_sndbuf_sync_sg_for_device(conn);
/* update cursors */
smc_curs_add(conn->sndbuf_desc->len, &prep, copylen);
- smc_curs_write(&conn->tx_curs_prep,
- smc_curs_read(&prep, conn),
- conn);
+ smc_curs_copy(&conn->tx_curs_prep, &prep, conn);
/* increased in send tasklet smc_cdc_tx_handler() */
smp_mb__before_atomic();
atomic_sub(copylen, &conn->sndbuf_space);
@@ -250,6 +247,24 @@ out_err:
/***************************** sndbuf consumer *******************************/
+/* sndbuf consumer: actual data transfer of one target chunk with ISM write */
+int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
+ u32 offset, int signal)
+{
+ struct smc_ism_position pos;
+ int rc;
+
+ memset(&pos, 0, sizeof(pos));
+ pos.token = conn->peer_token;
+ pos.index = conn->peer_rmbe_idx;
+ pos.offset = conn->tx_off + offset;
+ pos.signal = signal;
+ rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
+ if (rc)
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+ return rc;
+}
+
/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
int num_sges, struct ib_sge sges[])
@@ -296,26 +311,109 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn,
smc_curs_add(conn->sndbuf_desc->len, sent, len);
}
+/* SMC-R helper for smc_tx_rdma_writes() */
+static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
+ size_t src_off, size_t src_len,
+ size_t dst_off, size_t dst_len)
+{
+ dma_addr_t dma_addr =
+ sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
+ struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+ int src_len_sum = src_len, dst_len_sum = dst_len;
+ struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
+ int sent_count = src_off;
+ int srcchunk, dstchunk;
+ int num_sges;
+ int rc;
+
+ for (dstchunk = 0; dstchunk < 2; dstchunk++) {
+ num_sges = 0;
+ for (srcchunk = 0; srcchunk < 2; srcchunk++) {
+ sges[srcchunk].addr = dma_addr + src_off;
+ sges[srcchunk].length = src_len;
+ sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
+ num_sges++;
+
+ src_off += src_len;
+ if (src_off >= conn->sndbuf_desc->len)
+ src_off -= conn->sndbuf_desc->len;
+ /* modulo in send ring */
+ if (src_len_sum == dst_len)
+ break; /* either on 1st or 2nd iteration */
+ /* prepare next (== 2nd) iteration */
+ src_len = dst_len - src_len; /* remainder */
+ src_len_sum += src_len;
+ }
+ rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
+ if (rc)
+ return rc;
+ if (dst_len_sum == len)
+ break; /* either on 1st or 2nd iteration */
+ /* prepare next (== 2nd) iteration */
+ dst_off = 0; /* modulo offset in RMBE ring buffer */
+ dst_len = len - dst_len; /* remainder */
+ dst_len_sum += dst_len;
+ src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
+ sent_count);
+ src_len_sum = src_len;
+ }
+ return 0;
+}
+
+/* SMC-D helper for smc_tx_rdma_writes() */
+static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
+ size_t src_off, size_t src_len,
+ size_t dst_off, size_t dst_len)
+{
+ int src_len_sum = src_len, dst_len_sum = dst_len;
+ int srcchunk, dstchunk;
+ int rc;
+
+ for (dstchunk = 0; dstchunk < 2; dstchunk++) {
+ for (srcchunk = 0; srcchunk < 2; srcchunk++) {
+ void *data = conn->sndbuf_desc->cpu_addr + src_off;
+
+ rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
+ sizeof(struct smcd_cdc_msg), 0);
+ if (rc)
+ return rc;
+ dst_off += src_len;
+ src_off += src_len;
+ if (src_off >= conn->sndbuf_desc->len)
+ src_off -= conn->sndbuf_desc->len;
+ /* modulo in send ring */
+ if (src_len_sum == dst_len)
+ break; /* either on 1st or 2nd iteration */
+ /* prepare next (== 2nd) iteration */
+ src_len = dst_len - src_len; /* remainder */
+ src_len_sum += src_len;
+ }
+ if (dst_len_sum == len)
+ break; /* either on 1st or 2nd iteration */
+ /* prepare next (== 2nd) iteration */
+ dst_off = 0; /* modulo offset in RMBE ring buffer */
+ dst_len = len - dst_len; /* remainder */
+ dst_len_sum += dst_len;
+ src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
+ src_len_sum = src_len;
+ }
+ return 0;
+}
+
/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
* usable snd_wnd as max transmit
*/
static int smc_tx_rdma_writes(struct smc_connection *conn)
{
- size_t src_off, src_len, dst_off, dst_len; /* current chunk values */
- size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk;
+ size_t len, src_len, dst_off, dst_len; /* current chunk values */
union smc_host_cursor sent, prep, prod, cons;
- struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
- struct smc_link_group *lgr = conn->lgr;
struct smc_cdc_producer_flags *pflags;
int to_send, rmbespace;
- struct smc_link *link;
- dma_addr_t dma_addr;
- int num_sges;
int rc;
/* source: sndbuf */
- smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
- smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
+ smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
+ smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
/* cf. wmem_alloc - (snd_max - snd_una) */
to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
if (to_send <= 0)
@@ -326,12 +424,8 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
rmbespace = atomic_read(&conn->peer_rmbe_space);
if (rmbespace <= 0)
return 0;
- smc_curs_write(&prod,
- smc_curs_read(&conn->local_tx_ctrl.prod, conn),
- conn);
- smc_curs_write(&cons,
- smc_curs_read(&conn->local_rx_ctrl.cons, conn),
- conn);
+ smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
+ smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
/* if usable snd_wnd closes ask peer to advertise once it opens again */
pflags = &conn->local_tx_ctrl.prod_flags;
@@ -340,7 +434,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
len = min(to_send, rmbespace);
/* initialize variables for first iteration of subsequent nested loop */
- link = &lgr->lnk[SMC_SINGLE_LINK];
dst_off = prod.count;
if (prod.wrap == cons.wrap) {
/* the filled destination area is unwrapped,
@@ -357,8 +450,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
*/
dst_len = len;
}
- dst_len_sum = dst_len;
- src_off = sent.count;
/* dst_len determines the maximum src_len */
if (sent.count + dst_len <= conn->sndbuf_desc->len) {
/* unwrapped src case: single chunk of entire dst_len */
@@ -367,51 +458,23 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
/* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
src_len = conn->sndbuf_desc->len - sent.count;
}
- src_len_sum = src_len;
- dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
- for (dstchunk = 0; dstchunk < 2; dstchunk++) {
- num_sges = 0;
- for (srcchunk = 0; srcchunk < 2; srcchunk++) {
- sges[srcchunk].addr = dma_addr + src_off;
- sges[srcchunk].length = src_len;
- sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
- num_sges++;
- src_off += src_len;
- if (src_off >= conn->sndbuf_desc->len)
- src_off -= conn->sndbuf_desc->len;
- /* modulo in send ring */
- if (src_len_sum == dst_len)
- break; /* either on 1st or 2nd iteration */
- /* prepare next (== 2nd) iteration */
- src_len = dst_len - src_len; /* remainder */
- src_len_sum += src_len;
- }
- rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
- if (rc)
- return rc;
- if (dst_len_sum == len)
- break; /* either on 1st or 2nd iteration */
- /* prepare next (== 2nd) iteration */
- dst_off = 0; /* modulo offset in RMBE ring buffer */
- dst_len = len - dst_len; /* remainder */
- dst_len_sum += dst_len;
- src_len = min_t(int,
- dst_len, conn->sndbuf_desc->len - sent.count);
- src_len_sum = src_len;
- }
+
+ if (conn->lgr->is_smcd)
+ rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
+ dst_off, dst_len);
+ else
+ rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
+ dst_off, dst_len);
+ if (rc)
+ return rc;
if (conn->urg_tx_pend && len == to_send)
pflags->urg_data_present = 1;
smc_tx_advance_cursors(conn, &prod, &sent, len);
/* update connection's cursors with advanced local cursors */
- smc_curs_write(&conn->local_tx_ctrl.prod,
- smc_curs_read(&prod, conn),
- conn);
+ smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn);
/* dst: peer RMBE */
- smc_curs_write(&conn->tx_curs_sent,
- smc_curs_read(&sent, conn),
- conn);
- /* src: local sndbuf */
+ smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */
return 0;
}
@@ -419,7 +482,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
/* Wakeup sndbuf consumers from any context (IRQ or process)
* since there is more data to transmit; usable snd_wnd as max transmit
*/
-int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
{
struct smc_cdc_producer_flags *pflags;
struct smc_cdc_tx_pend *pend;
@@ -466,6 +529,37 @@ out_unlock:
return rc;
}
+static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+ struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
+ int rc = 0;
+
+ spin_lock_bh(&conn->send_lock);
+ if (!pflags->urg_data_present)
+ rc = smc_tx_rdma_writes(conn);
+ if (!rc)
+ rc = smcd_cdc_msg_send(conn);
+
+ if (!rc && pflags->urg_data_present) {
+ pflags->urg_data_pending = 0;
+ pflags->urg_data_present = 0;
+ }
+ spin_unlock_bh(&conn->send_lock);
+ return rc;
+}
+
+int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+ int rc;
+
+ if (conn->lgr->is_smcd)
+ rc = smcd_tx_sndbuf_nonempty(conn);
+ else
+ rc = smcr_tx_sndbuf_nonempty(conn);
+
+ return rc;
+}
+
/* Wakeup sndbuf consumers from process context
* since there is more data to transmit
*/
@@ -498,17 +592,11 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
int sender_free = conn->rmb_desc->len;
int to_confirm;
- smc_curs_write(&cons,
- smc_curs_read(&conn->local_tx_ctrl.cons, conn),
- conn);
- smc_curs_write(&cfed,
- smc_curs_read(&conn->rx_curs_confirmed, conn),
- conn);
+ smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
+ smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn);
to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
if (to_confirm > conn->rmbe_update_limit) {
- smc_curs_write(&prod,
- smc_curs_read(&conn->local_rx_ctrl.prod, conn),
- conn);
+ smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
sender_free = conn->rmb_desc->len -
smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
}
@@ -524,9 +612,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
SMC_TX_WORK_DELAY);
return;
}
- smc_curs_write(&conn->rx_curs_confirmed,
- smc_curs_read(&conn->local_tx_ctrl.cons, conn),
- conn);
+ smc_curs_copy(&conn->rx_curs_confirmed,
+ &conn->local_tx_ctrl.cons, conn);
conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
}
if (conn->local_rx_ctrl.prod_flags.write_blocked &&
diff --git a/net/smc/smc_tx.h b/net/smc/smc_tx.h
index 9d2238909fa0..07e6ad76224a 100644
--- a/net/smc/smc_tx.h
+++ b/net/smc/smc_tx.h
@@ -22,8 +22,8 @@ static inline int smc_tx_prepared_sends(struct smc_connection *conn)
{
union smc_host_cursor sent, prep;
- smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
- smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
+ smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
+ smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
return smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
}
@@ -33,5 +33,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len);
int smc_tx_sndbuf_nonempty(struct smc_connection *conn);
void smc_tx_sndbuf_nonfull(struct smc_sock *smc);
void smc_tx_consumer_update(struct smc_connection *conn, bool force);
+int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
+ u32 offset, int signal);
#endif /* SMC_TX_H */
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index de1a438cf977..3c458d279855 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -92,8 +92,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
return;
if (wc->status) {
- struct smc_link_group *lgr;
-
for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
/* clear full struct smc_wr_tx_pend including .priv */
memset(&link->wr_tx_pends[i], 0,
@@ -103,9 +101,7 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
clear_bit(i, link->wr_tx_mask);
}
/* terminate connections of this link group abnormally */
- lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
- smc_lgr_terminate(lgr);
+ smc_lgr_terminate(smc_get_lgr(link));
}
if (pnd_snd.handler)
pnd_snd.handler(&pnd_snd.priv, link, wc->status);
@@ -186,18 +182,14 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
if (rc)
return rc;
} else {
- struct smc_link_group *lgr;
-
- lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
rc = wait_event_timeout(
link->wr_tx_wait,
- list_empty(&lgr->list) || /* lgr terminated */
+ link->state == SMC_LNK_INACTIVE ||
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
SMC_WR_TX_WAIT_FREE_SLOT_TIME);
if (!rc) {
/* timeout - terminate connections */
- smc_lgr_terminate(lgr);
+ smc_lgr_terminate(smc_get_lgr(link));
return -EPIPE;
}
if (idx == link->wr_tx_cnt)
@@ -248,12 +240,8 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
pend = container_of(priv, struct smc_wr_tx_pend, priv);
rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL);
if (rc) {
- struct smc_link_group *lgr =
- container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
-
smc_wr_tx_put_slot(link, priv);
- smc_lgr_terminate(lgr);
+ smc_lgr_terminate(smc_get_lgr(link));
}
return rc;
}
@@ -278,11 +266,7 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
SMC_WR_REG_MR_WAIT_TIME);
if (!rc) {
/* timeout - terminate connections */
- struct smc_link_group *lgr;
-
- lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
- smc_lgr_terminate(lgr);
+ smc_lgr_terminate(smc_get_lgr(link));
return -EPIPE;
}
if (rc == -ERESTARTSYS)
@@ -375,8 +359,6 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
smc_wr_rx_demultiplex(&wc[i]);
smc_wr_rx_post(link); /* refill WR RX */
} else {
- struct smc_link_group *lgr;
-
/* handle status errors */
switch (wc[i].status) {
case IB_WC_RETRY_EXC_ERR:
@@ -385,9 +367,7 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
/* terminate connections of this link group
* abnormally
*/
- lgr = container_of(link, struct smc_link_group,
- lnk[SMC_SINGLE_LINK]);
- smc_lgr_terminate(lgr);
+ smc_lgr_terminate(smc_get_lgr(link));
break;
default:
smc_wr_rx_post(link); /* refill WR RX */
diff --git a/net/socket.c b/net/socket.c
index 8c24d5dc4bc8..e6945e318f02 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -252,7 +252,7 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
init_waitqueue_head(&wq->wait);
wq->fasync_list = NULL;
wq->flags = 0;
- RCU_INIT_POINTER(ei->socket.wq, wq);
+ ei->socket.wq = wq;
ei->socket.state = SS_UNCONNECTED;
ei->socket.flags = 0;
@@ -266,11 +266,9 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
static void sock_destroy_inode(struct inode *inode)
{
struct socket_alloc *ei;
- struct socket_wq *wq;
ei = container_of(inode, struct socket_alloc, vfs_inode);
- wq = rcu_dereference_protected(ei->socket.wq, 1);
- kfree_rcu(wq, rcu);
+ kfree_rcu(ei->socket.wq, rcu);
kmem_cache_free(sock_inode_cachep, ei);
}
@@ -388,39 +386,20 @@ static struct file_system_type sock_fs_type = {
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
{
- struct qstr name = { .name = "" };
- struct path path;
struct file *file;
- if (dname) {
- name.name = dname;
- name.len = strlen(name.name);
- } else if (sock->sk) {
- name.name = sock->sk->sk_prot_creator->name;
- name.len = strlen(name.name);
- }
- path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
- if (unlikely(!path.dentry)) {
- sock_release(sock);
- return ERR_PTR(-ENOMEM);
- }
- path.mnt = mntget(sock_mnt);
-
- d_instantiate(path.dentry, SOCK_INODE(sock));
+ if (!dname)
+ dname = sock->sk ? sock->sk->sk_prot_creator->name : "";
- file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
- &socket_file_ops);
+ file = alloc_file_pseudo(SOCK_INODE(sock), sock_mnt, dname,
+ O_RDWR | (flags & O_NONBLOCK),
+ &socket_file_ops);
if (IS_ERR(file)) {
- /* drop dentry, keep inode for a bit */
- ihold(d_inode(path.dentry));
- path_put(&path);
- /* ... and now kill it properly */
sock_release(sock);
return file;
}
sock->file = file;
- file->f_flags = O_RDWR | (flags & O_NONBLOCK);
file->private_data = sock;
return file;
}
@@ -604,7 +583,7 @@ static void __sock_release(struct socket *sock, struct inode *inode)
module_put(owner);
}
- if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
+ if (sock->wq->fasync_list)
pr_err("%s: fasync list not empty!\n", __func__);
if (!sock->file) {
@@ -1131,12 +1110,21 @@ EXPORT_SYMBOL(sock_create_lite);
static __poll_t sock_poll(struct file *file, poll_table *wait)
{
struct socket *sock = file->private_data;
- __poll_t events = poll_requested_events(wait);
+ __poll_t events = poll_requested_events(wait), flag = 0;
- sock_poll_busy_loop(sock, events);
if (!sock->ops->poll)
return 0;
- return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock);
+
+ if (sk_can_busy_loop(sock->sk)) {
+ /* poll once if requested by the syscall */
+ if (events & POLL_BUSY_LOOP)
+ sk_busy_loop(sock->sk, 1);
+
+ /* if this socket can poll_ll, tell the system call */
+ flag = POLL_BUSY_LOOP;
+ }
+
+ return sock->ops->poll(file, sock, wait) | flag;
}
static int sock_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1173,7 +1161,7 @@ static int sock_fasync(int fd, struct file *filp, int on)
return -EINVAL;
lock_sock(sk);
- wq = rcu_dereference_protected(sock->wq, lockdep_sock_is_held(sk));
+ wq = sock->wq;
fasync_helper(fd, filp, on, &wq->fasync_list);
if (!wq->fasync_list)
@@ -2690,8 +2678,7 @@ EXPORT_SYMBOL(sock_unregister);
bool sock_is_registered(int family)
{
- return family < NPROTO &&
- rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]);
+ return family < NPROTO && rcu_access_pointer(net_families[family]);
}
static int __init sock_init(void)
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 625acb27efcc..da1a676860ca 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -140,11 +140,13 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
/* We are going to append to the frags_list of head.
* Need to unshare the frag_list.
*/
- err = skb_unclone(head, GFP_ATOMIC);
- if (err) {
- STRP_STATS_INCR(strp->stats.mem_fail);
- desc->error = err;
- return 0;
+ if (skb_has_frag_list(head)) {
+ err = skb_unclone(head, GFP_ATOMIC);
+ if (err) {
+ STRP_STATS_INCR(strp->stats.mem_fail);
+ desc->error = err;
+ return 0;
+ }
}
if (unlikely(skb_shinfo(head)->frag_list)) {
@@ -201,14 +203,16 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
memset(stm, 0, sizeof(*stm));
stm->strp.offset = orig_offset + eaten;
} else {
- /* Unclone since we may be appending to an skb that we
+ /* Unclone if we are appending to an skb that we
* already share a frag_list with.
*/
- err = skb_unclone(skb, GFP_ATOMIC);
- if (err) {
- STRP_STATS_INCR(strp->stats.mem_fail);
- desc->error = err;
- break;
+ if (skb_has_frag_list(skb)) {
+ err = skb_unclone(skb, GFP_ATOMIC);
+ if (err) {
+ STRP_STATS_INCR(strp->stats.mem_fail);
+ desc->error = err;
+ break;
+ }
}
stm = _strp_msg(head);
@@ -404,8 +408,6 @@ EXPORT_SYMBOL_GPL(strp_data_ready);
static void do_strp_work(struct strparser *strp)
{
- read_descriptor_t rd_desc;
-
/* We need the read lock to synchronize with strp_data_ready. We
* need the socket lock for calling strp_read_sock.
*/
@@ -417,8 +419,6 @@ static void do_strp_work(struct strparser *strp)
if (strp->paused)
goto out;
- rd_desc.arg.data = strp;
-
if (strp_read_sock(strp) == -ENOMEM)
queue_work(strp_wq, &strp->work);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index be8f103d22fd..0fc397fae42b 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -517,7 +517,7 @@ gss_alloc_msg(struct gss_auth *gss_auth,
err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
if (err)
goto err_put_pipe_version;
- };
+ }
kref_get(&gss_auth->kref);
return gss_msg;
err_put_pipe_version:
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index f3711176be45..9ee6cfea56dd 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -512,7 +512,7 @@ int tipc_bcast_init(struct net *net)
struct tipc_bc_base *bb = NULL;
struct tipc_link *l = NULL;
- bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
+ bb = kzalloc(sizeof(*bb), GFP_KERNEL);
if (!bb)
goto enomem;
tn->bcbase = bb;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 2dfb492a7c94..418f03d0be90 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -395,6 +395,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
tipc_net_init(net, node_id, 0);
}
if (!tipc_own_id(net)) {
+ dev_put(dev);
pr_warn("Failed to obtain node identity\n");
return -EINVAL;
}
@@ -610,6 +611,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
case NETDEV_CHANGE:
if (netif_carrier_ok(dev))
break;
+ /* else: fall through */
case NETDEV_UP:
test_and_set_bit_lock(0, &b->up);
break;
diff --git a/net/tipc/group.c b/net/tipc/group.c
index d7a7befeddd4..e82f13cb2dc5 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -159,11 +159,6 @@ u32 tipc_group_exclude(struct tipc_group *grp)
return 0;
}
-int tipc_group_size(struct tipc_group *grp)
-{
- return grp->member_cnt;
-}
-
struct tipc_group *tipc_group_create(struct net *net, u32 portid,
struct tipc_group_req *mreq,
bool *group_is_open)
@@ -232,8 +227,8 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp)
kfree(grp);
}
-struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
- u32 node, u32 port)
+static struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
+ u32 node, u32 port)
{
struct rb_node *n = grp->members.rb_node;
u64 nkey, key = (u64)node << 32 | port;
@@ -918,3 +913,35 @@ void tipc_group_member_evt(struct tipc_group *grp,
}
*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
}
+
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
+{
+ struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP);
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
+ grp->type) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
+ grp->instance) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT,
+ grp->bc_snd_nxt))
+ goto group_msg_cancel;
+
+ if (grp->scope == TIPC_NODE_SCOPE)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE))
+ goto group_msg_cancel;
+
+ if (grp->scope == TIPC_CLUSTER_SCOPE)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE))
+ goto group_msg_cancel;
+
+ if (*grp->open)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN))
+ goto group_msg_cancel;
+
+ nla_nest_end(skb, group);
+ return 0;
+
+group_msg_cancel:
+ nla_nest_cancel(skb, group);
+ return -1;
+}
diff --git a/net/tipc/group.h b/net/tipc/group.h
index 5996af6e9f1d..76b4e5a7b39d 100644
--- a/net/tipc/group.h
+++ b/net/tipc/group.h
@@ -72,4 +72,5 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
u32 port, struct sk_buff_head *xmitq);
u16 tipc_group_bc_snd_nxt(struct tipc_group *grp);
void tipc_group_update_member(struct tipc_member *m, int len);
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb);
#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 695acb783969..b1f0bee54eac 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -106,7 +106,8 @@ struct tipc_stats {
* @backlogq: queue for messages waiting to be sent
* @snt_nxt: next sequence number to use for outbound messages
* @last_retransmitted: sequence number of most recently retransmitted message
- * @stale_count: # of identical retransmit requests made by peer
+ * @stale_cnt: counter for number of identical retransmit attempts
+ * @stale_limit: time when repeated identical retransmits must force link reset
* @ackers: # of peers that needs to ack each packet before it can be released
* @acked: # last packet acked by a certain peer. Used for broadcast.
* @rcv_nxt: next sequence number to expect for inbound messages
@@ -127,14 +128,17 @@ struct tipc_link {
struct net *net;
/* Management and link supervision data */
- u32 peer_session;
- u32 session;
+ u16 peer_session;
+ u16 session;
+ u16 snd_nxt_state;
+ u16 rcv_nxt_state;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
u32 abort_limit;
u32 state;
u16 peer_caps;
+ bool in_session;
bool active;
u32 silent_intv_cnt;
char if_name[TIPC_MAX_IF_NAME];
@@ -161,7 +165,8 @@ struct tipc_link {
u16 snd_nxt;
u16 last_retransm;
u16 window;
- u32 stale_count;
+ u16 stale_cnt;
+ unsigned long stale_limit;
/* Reception */
u16 rcv_nxt;
@@ -212,11 +217,6 @@ enum {
*/
#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
-/* Wildcard value for link session numbers. When it is known that
- * peer endpoint is down, any session number must be accepted.
- */
-#define ANY_SESSION 0x10000
-
/* Link FSM states:
*/
enum {
@@ -297,11 +297,6 @@ static bool link_is_bc_rcvlink(struct tipc_link *l)
return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
}
-int tipc_link_is_active(struct tipc_link *l)
-{
- return l->active;
-}
-
void tipc_link_set_active(struct tipc_link *l, bool active)
{
l->active = active;
@@ -337,6 +332,11 @@ char tipc_link_plane(struct tipc_link *l)
return l->net_plane;
}
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
+{
+ l->peer_caps = capabilities;
+}
+
void tipc_link_add_bc_peer(struct tipc_link *snd_l,
struct tipc_link *uc_l,
struct sk_buff_head *xmitq)
@@ -373,7 +373,7 @@ int tipc_link_bc_peers(struct tipc_link *l)
return l->ackers;
}
-u16 link_bc_rcv_gap(struct tipc_link *l)
+static u16 link_bc_rcv_gap(struct tipc_link *l)
{
struct sk_buff *skb = skb_peek(&l->deferdq);
u16 gap = 0;
@@ -469,7 +469,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
l->addr = peer;
l->peer_caps = peer_caps;
l->net = net;
- l->peer_session = ANY_SESSION;
+ l->in_session = false;
l->bearer_id = bearer_id;
l->tolerance = tolerance;
l->net_plane = net_plane;
@@ -820,7 +820,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
* Wake up a number of waiting users, as permitted by available space
* in the send queue
*/
-void link_prepare_wakeup(struct tipc_link *l)
+static void link_prepare_wakeup(struct tipc_link *l)
{
struct sk_buff *skb, *tmp;
int imp, i = 0;
@@ -838,7 +838,7 @@ void link_prepare_wakeup(struct tipc_link *l)
void tipc_link_reset(struct tipc_link *l)
{
- l->peer_session = ANY_SESSION;
+ l->in_session = false;
l->session++;
l->mtu = l->advertised_mtu;
__skb_queue_purge(&l->transmq);
@@ -857,10 +857,12 @@ void tipc_link_reset(struct tipc_link *l)
l->rcv_unacked = 0;
l->snd_nxt = 1;
l->rcv_nxt = 1;
+ l->snd_nxt_state = 1;
+ l->rcv_nxt_state = 1;
l->acked = 0;
l->silent_intv_cnt = 0;
l->rst_cnt = 0;
- l->stale_count = 0;
+ l->stale_cnt = 0;
l->bc_peer_is_up = false;
memset(&l->mon_state, 0, sizeof(l->mon_state));
tipc_link_reset_stats(l);
@@ -954,7 +956,8 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
return rc;
}
-void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
+static void tipc_link_advance_backlog(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
{
struct sk_buff *skb, *_skb;
struct tipc_msg *hdr;
@@ -997,39 +1000,41 @@ static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
}
-int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker,
- u16 from, u16 to, struct sk_buff_head *xmitq)
+/* tipc_link_retrans() - retransmit one or more packets
+ * @l: the link to transmit on
+ * @r: the receiving link ordering the retransmit. Same as l if unicast
+ * @from: retransmit from (inclusive) this sequence number
+ * @to: retransmit to (inclusive) this sequence number
+ * xmitq: queue for accumulating the retransmitted packets
+ */
+static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
+ u16 from, u16 to, struct sk_buff_head *xmitq)
{
struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
- struct tipc_msg *hdr;
- u16 ack = l->rcv_nxt - 1;
u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ u16 ack = l->rcv_nxt - 1;
+ struct tipc_msg *hdr;
if (!skb)
return 0;
/* Detect repeated retransmit failures on same packet */
- if (nacker->last_retransm != buf_seqno(skb)) {
- nacker->last_retransm = buf_seqno(skb);
- nacker->stale_count = 1;
- } else if (++nacker->stale_count > 100) {
+ if (r->last_retransm != buf_seqno(skb)) {
+ r->last_retransm = buf_seqno(skb);
+ r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance);
+ } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
link_retransmit_failure(l, skb);
- nacker->stale_count = 0;
if (link_is_bc_sndlink(l))
return TIPC_LINK_DOWN_EVT;
return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
- /* Move forward to where retransmission should start */
skb_queue_walk(&l->transmq, skb) {
- if (!less(buf_seqno(skb), from))
- break;
- }
-
- skb_queue_walk_from(&l->transmq, skb) {
- if (more(buf_seqno(skb), to))
- break;
hdr = buf_msg(skb);
+ if (less(msg_seqno(hdr), from))
+ continue;
+ if (more(msg_seqno(hdr), to))
+ break;
_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
if (!_skb)
return 0;
@@ -1063,6 +1068,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
skb_queue_tail(mc_inputq, skb);
return true;
}
+ /* else: fall through */
case CONN_MANAGER:
skb_queue_tail(inputq, skb);
return true;
@@ -1271,6 +1277,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
/* Forward queues and wake up waiting users */
if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
+ l->stale_cnt = 0;
tipc_link_advance_backlog(l, xmitq);
if (unlikely(!skb_queue_empty(&l->wakeupq)))
link_prepare_wakeup(l);
@@ -1347,6 +1354,8 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
if (mtyp == STATE_MSG) {
+ if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
+ msg_set_seqno(hdr, l->snd_nxt_state++);
msg_set_seq_gap(hdr, rcvgap);
msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
msg_set_probe(hdr, probe);
@@ -1438,6 +1447,44 @@ tnl:
}
}
+/* tipc_link_validate_msg(): validate message against current link state
+ * Returns true if message should be accepted, otherwise false
+ */
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
+{
+ u16 curr_session = l->peer_session;
+ u16 session = msg_session(hdr);
+ int mtyp = msg_type(hdr);
+
+ if (msg_user(hdr) != LINK_PROTOCOL)
+ return true;
+
+ switch (mtyp) {
+ case RESET_MSG:
+ if (!l->in_session)
+ return true;
+ /* Accept only RESET with new session number */
+ return more(session, curr_session);
+ case ACTIVATE_MSG:
+ if (!l->in_session)
+ return true;
+ /* Accept only ACTIVATE with new or current session number */
+ return !less(session, curr_session);
+ case STATE_MSG:
+ /* Accept only STATE with current session number */
+ if (!l->in_session)
+ return false;
+ if (session != curr_session)
+ return false;
+ if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
+ return true;
+ /* Accept only STATE with new sequence number */
+ return !less(msg_seqno(hdr), l->rcv_nxt_state);
+ default:
+ return false;
+ }
+}
+
/* tipc_link_proto_rcv(): receive link level protocol message :
* Note that network plane id propagates through the network, and may
* change at any time. The node with lowest numerical id determines
@@ -1471,17 +1518,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
hdr = buf_msg(skb);
data = msg_data(hdr);
+ if (!tipc_link_validate_msg(l, hdr))
+ goto exit;
+
switch (mtyp) {
case RESET_MSG:
-
- /* Ignore duplicate RESET with old session number */
- if ((less_eq(msg_session(hdr), l->peer_session)) &&
- (l->peer_session != ANY_SESSION))
- break;
- /* fall thru' */
-
case ACTIVATE_MSG:
-
/* Complete own link name with peer's interface name */
if_name = strrchr(l->name, ':') + 1;
if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
@@ -1509,12 +1551,14 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
rc = TIPC_LINK_UP_EVT;
l->peer_session = msg_session(hdr);
+ l->in_session = true;
l->peer_bearer_id = msg_bearer_id(hdr);
if (l->mtu > msg_max_pkt(hdr))
l->mtu = msg_max_pkt(hdr);
break;
case STATE_MSG:
+ l->rcv_nxt_state = msg_seqno(hdr) + 1;
/* Update own tolerance if peer indicates a non-zero value */
if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
diff --git a/net/tipc/link.h b/net/tipc/link.h
index ec59348a81e8..7bc494a33fdf 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -110,6 +110,8 @@ char *tipc_link_name(struct tipc_link *l);
char tipc_link_plane(struct tipc_link *l);
int tipc_link_prio(struct tipc_link *l);
int tipc_link_window(struct tipc_link *l);
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities);
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr);
unsigned long tipc_link_tolerance(struct tipc_link *l);
void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 5453e564da82..67f69389ec17 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -684,7 +684,8 @@ int tipc_nl_monitor_get_threshold(struct net *net)
return tn->mon_threshold;
}
-int __tipc_nl_add_monitor_peer(struct tipc_peer *peer, struct tipc_nl_msg *msg)
+static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
+ struct tipc_nl_msg *msg)
{
struct tipc_mon_domain *dom = peer->domain;
struct nlattr *attrs;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b6c45dccba3d..b61891054709 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -416,26 +416,31 @@ bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
*/
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
{
- struct tipc_msg *msg;
- int imsz, offset;
+ struct tipc_msg *hdr, *ihdr;
+ int imsz;
*iskb = NULL;
if (unlikely(skb_linearize(skb)))
goto none;
- msg = buf_msg(skb);
- offset = msg_hdr_sz(msg) + *pos;
- if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE)))
+ hdr = buf_msg(skb);
+ if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
goto none;
- *iskb = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!*iskb))
+ ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
+ imsz = msg_size(ihdr);
+
+ if ((*pos + imsz) > msg_data_sz(hdr))
goto none;
- skb_pull(*iskb, offset);
- imsz = msg_size(buf_msg(*iskb));
- skb_trim(*iskb, imsz);
+
+ *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
+ if (!*iskb)
+ goto none;
+
+ skb_copy_to_linear_data(*iskb, ihdr, imsz);
if (unlikely(!tipc_msg_validate(iskb)))
goto none;
+
*pos += align(imsz);
return true;
none:
@@ -531,12 +536,6 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
msg_set_hdr_sz(hdr, BASIC_H_SIZE);
}
- if (skb_cloned(_skb) &&
- pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
- goto exit;
-
- /* reassign after skb header modifications */
- hdr = buf_msg(_skb);
/* Now reverse the concerned fields */
msg_set_errcode(hdr, err);
msg_set_non_seq(hdr, 0);
@@ -595,10 +594,6 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
if (!skb_cloned(skb))
return true;
- /* Unclone buffer in case it was bundled */
- if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
- return false;
-
return true;
}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index bebe88cae07b..88f027b502f6 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -735,7 +735,7 @@ int tipc_nametbl_init(struct net *net)
struct name_table *nt;
int i;
- nt = kzalloc(sizeof(*nt), GFP_ATOMIC);
+ nt = kzalloc(sizeof(*nt), GFP_KERNEL);
if (!nt)
return -ENOMEM;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0453bd451ce8..68014f1b6976 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -45,6 +45,7 @@
#include "netlink.h"
#define INVALID_NODE_SIG 0x10000
+#define NODE_CLEANUP_AFTER 300000
/* Flags used to take different actions according to flag type
* TIPC_NOTIFY_NODE_DOWN: notify node is down
@@ -96,6 +97,7 @@ struct tipc_bclink_entry {
* @link_id: local and remote bearer ids of changing link, if any
* @publ_list: list of publications
* @rcu: rcu struct for tipc_node
+ * @delete_at: indicates the time for deleting a down node
*/
struct tipc_node {
u32 addr;
@@ -121,6 +123,7 @@ struct tipc_node {
unsigned long keepalive_intv;
struct timer_list timer;
struct rcu_head rcu;
+ unsigned long delete_at;
};
/* Node FSM states and events:
@@ -160,6 +163,7 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
static void tipc_node_put(struct tipc_node *node);
static bool node_is_up(struct tipc_node *n);
+static void tipc_node_delete_from_list(struct tipc_node *node);
struct tipc_sock_conn {
u32 port;
@@ -359,13 +363,24 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *n, *temp_node;
+ struct tipc_link *l;
+ int bearer_id;
int i;
spin_lock_bh(&tn->node_list_lock);
n = tipc_node_find(net, addr);
if (n) {
+ if (n->capabilities == capabilities)
+ goto exit;
/* Same node may come back with new capabilities */
+ write_lock_bh(&n->lock);
n->capabilities = capabilities;
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ l = n->links[bearer_id].link;
+ if (l)
+ tipc_link_update_caps(l, capabilities);
+ }
+ write_unlock_bh(&n->lock);
goto exit;
}
n = kzalloc(sizeof(*n), GFP_ATOMIC);
@@ -390,6 +405,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
for (i = 0; i < MAX_BEARERS; i++)
spin_lock_init(&n->links[i].lock);
n->state = SELF_DOWN_PEER_LEAVING;
+ n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
n->signature = INVALID_NODE_SIG;
n->active_links[0] = INVALID_BEARER_ID;
n->active_links[1] = INVALID_BEARER_ID;
@@ -433,11 +449,16 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
}
-static void tipc_node_delete(struct tipc_node *node)
+static void tipc_node_delete_from_list(struct tipc_node *node)
{
list_del_rcu(&node->list);
hlist_del_rcu(&node->hash);
tipc_node_put(node);
+}
+
+static void tipc_node_delete(struct tipc_node *node)
+{
+ tipc_node_delete_from_list(node);
del_timer_sync(&node->timer);
tipc_node_put(node);
@@ -544,6 +565,42 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
tipc_node_put(node);
}
+static void tipc_node_clear_links(struct tipc_node *node)
+{
+ int i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ struct tipc_link_entry *le = &node->links[i];
+
+ if (le->link) {
+ kfree(le->link);
+ le->link = NULL;
+ node->link_cnt--;
+ }
+ }
+}
+
+/* tipc_node_cleanup - delete nodes that does not
+ * have active links for NODE_CLEANUP_AFTER time
+ */
+static int tipc_node_cleanup(struct tipc_node *peer)
+{
+ struct tipc_net *tn = tipc_net(peer->net);
+ bool deleted = false;
+
+ spin_lock_bh(&tn->node_list_lock);
+ tipc_node_write_lock(peer);
+
+ if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
+ tipc_node_clear_links(peer);
+ tipc_node_delete_from_list(peer);
+ deleted = true;
+ }
+ tipc_node_write_unlock(peer);
+ spin_unlock_bh(&tn->node_list_lock);
+ return deleted;
+}
+
/* tipc_node_timeout - handle expiration of node timer
*/
static void tipc_node_timeout(struct timer_list *t)
@@ -551,21 +608,29 @@ static void tipc_node_timeout(struct timer_list *t)
struct tipc_node *n = from_timer(n, t, timer);
struct tipc_link_entry *le;
struct sk_buff_head xmitq;
+ int remains = n->link_cnt;
int bearer_id;
int rc = 0;
+ if (!node_is_up(n) && tipc_node_cleanup(n)) {
+ /*Removing the reference of Timer*/
+ tipc_node_put(n);
+ return;
+ }
+
__skb_queue_head_init(&xmitq);
- for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
tipc_node_read_lock(n);
le = &n->links[bearer_id];
- spin_lock_bh(&le->lock);
if (le->link) {
+ spin_lock_bh(&le->lock);
/* Link tolerance may change asynchronously: */
tipc_node_calculate_timer(n, le->link);
rc = tipc_link_timeout(le->link, &xmitq);
+ spin_unlock_bh(&le->lock);
+ remains--;
}
- spin_unlock_bh(&le->lock);
tipc_node_read_unlock(n);
tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
if (rc & TIPC_LINK_DOWN_EVT)
@@ -1174,6 +1239,7 @@ static void node_lost_contact(struct tipc_node *n,
uint i;
pr_debug("Lost contact with %x\n", n->addr);
+ n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
/* Clean up broadcast state */
tipc_bcast_remove_peer(n->net, n->bc_entry.link);
@@ -1481,7 +1547,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
* tipc_node_check_state - check and if necessary update node state
* @skb: TIPC packet
* @bearer_id: identity of bearer delivering the packet
- * Returns true if state is ok, otherwise consumes buffer and returns false
+ * Returns true if state and msg are ok, otherwise false
*/
static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
int bearer_id, struct sk_buff_head *xmitq)
@@ -1515,6 +1581,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
}
}
+ if (!tipc_link_validate_msg(l, hdr))
+ return false;
+
/* Check and update node accesibility if applicable */
if (state == SELF_UP_PEER_COMING) {
if (!tipc_link_is_up(l))
@@ -1743,7 +1812,6 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
struct tipc_node *peer;
u32 addr;
int err;
- int i;
/* We identify the peer by its net */
if (!info->attrs[TIPC_NLA_NET])
@@ -1778,15 +1846,7 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- for (i = 0; i < MAX_BEARERS; i++) {
- struct tipc_link_entry *le = &peer->links[i];
-
- if (le->link) {
- kfree(le->link);
- le->link = NULL;
- peer->link_cnt--;
- }
- }
+ tipc_node_clear_links(peer);
tipc_node_write_unlock(peer);
tipc_node_delete(peer);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 846c8f240872..48b3298a248d 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -49,14 +49,16 @@ enum {
TIPC_BCAST_STATE_NACK = (1 << 2),
TIPC_BLOCK_FLOWCTL = (1 << 3),
TIPC_BCAST_RCAST = (1 << 4),
- TIPC_NODE_ID128 = (1 << 5)
+ TIPC_NODE_ID128 = (1 << 5),
+ TIPC_LINK_PROTO_SEQNO = (1 << 6)
};
-#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \
- TIPC_BCAST_STATE_NACK | \
- TIPC_BCAST_RCAST | \
- TIPC_BLOCK_FLOWCTL | \
- TIPC_NODE_ID128)
+#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \
+ TIPC_BCAST_STATE_NACK | \
+ TIPC_BCAST_RCAST | \
+ TIPC_BLOCK_FLOWCTL | \
+ TIPC_NODE_ID128 | \
+ TIPC_LINK_PROTO_SEQNO)
#define INVALID_BEARER_ID -1
void tipc_node_stop(struct net *net);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 930852c54d7a..c1e93c9515bc 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -411,7 +411,6 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
static int tipc_sk_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
- struct tipc_net *tn;
const struct proto_ops *ops;
struct sock *sk;
struct tipc_sock *tsk;
@@ -446,7 +445,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
INIT_LIST_HEAD(&tsk->publications);
INIT_LIST_HEAD(&tsk->cong_links);
msg = &tsk->phdr;
- tn = net_generic(sock_net(sk), tipc_net_id);
/* Finish initializing socket data structures */
sock->ops = ops;
@@ -716,7 +714,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
struct tipc_sock *tsk = tipc_sk(sk);
__poll_t revents = 0;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
if (sk->sk_shutdown & RCV_SHUTDOWN)
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
@@ -1117,7 +1115,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
u32 self = tipc_own_addr(net);
u32 type, lower, upper, scope;
struct sk_buff *skb, *_skb;
- u32 portid, oport, onode;
+ u32 portid, onode;
struct sk_buff_head tmpq;
struct list_head dports;
struct tipc_msg *hdr;
@@ -1133,7 +1131,6 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
user = msg_user(hdr);
mtyp = msg_type(hdr);
hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
- oport = msg_origport(hdr);
onode = msg_orignode(hdr);
type = msg_nametype(hdr);
@@ -3320,6 +3317,11 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
goto stat_msg_cancel;
nla_nest_end(skb, stat);
+
+ if (tsk->group)
+ if (tipc_group_fill_sock_diag(tsk->group, skb))
+ goto stat_msg_cancel;
+
nla_nest_end(skb, attrs);
return 0;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index a7a8f8e20ff3..292742e50bfa 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -52,9 +52,12 @@ static DEFINE_SPINLOCK(tls_device_lock);
static void tls_device_free_ctx(struct tls_context *ctx)
{
- struct tls_offload_context *offload_ctx = tls_offload_ctx(ctx);
+ if (ctx->tx_conf == TLS_HW)
+ kfree(tls_offload_ctx_tx(ctx));
+
+ if (ctx->rx_conf == TLS_HW)
+ kfree(tls_offload_ctx_rx(ctx));
- kfree(offload_ctx);
kfree(ctx);
}
@@ -71,10 +74,11 @@ static void tls_device_gc_task(struct work_struct *work)
list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
struct net_device *netdev = ctx->netdev;
- if (netdev) {
+ if (netdev && ctx->tx_conf == TLS_HW) {
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_TX);
dev_put(netdev);
+ ctx->netdev = NULL;
}
list_del(&ctx->list);
@@ -82,6 +86,22 @@ static void tls_device_gc_task(struct work_struct *work)
}
}
+static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
+ struct net_device *netdev)
+{
+ if (sk->sk_destruct != tls_device_sk_destruct) {
+ refcount_set(&ctx->refcount, 1);
+ dev_hold(netdev);
+ ctx->netdev = netdev;
+ spin_lock_irq(&tls_device_lock);
+ list_add_tail(&ctx->list, &tls_device_list);
+ spin_unlock_irq(&tls_device_lock);
+
+ ctx->sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = tls_device_sk_destruct;
+ }
+}
+
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
{
unsigned long flags;
@@ -125,7 +145,7 @@ static void destroy_record(struct tls_record_info *record)
kfree(record);
}
-static void delete_all_records(struct tls_offload_context *offload_ctx)
+static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
{
struct tls_record_info *info, *temp;
@@ -141,14 +161,14 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_record_info *info, *temp;
- struct tls_offload_context *ctx;
+ struct tls_offload_context_tx *ctx;
u64 deleted_records = 0;
unsigned long flags;
if (!tls_ctx)
return;
- ctx = tls_offload_ctx(tls_ctx);
+ ctx = tls_offload_ctx_tx(tls_ctx);
spin_lock_irqsave(&ctx->lock, flags);
info = ctx->retransmit_hint;
@@ -179,15 +199,17 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
void tls_device_sk_destruct(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
- if (ctx->open_record)
- destroy_record(ctx->open_record);
+ tls_ctx->sk_destruct(sk);
- delete_all_records(ctx);
- crypto_free_aead(ctx->aead_send);
- ctx->sk_destruct(sk);
- clean_acked_data_disable(inet_csk(sk));
+ if (tls_ctx->tx_conf == TLS_HW) {
+ if (ctx->open_record)
+ destroy_record(ctx->open_record);
+ delete_all_records(ctx);
+ crypto_free_aead(ctx->aead_send);
+ clean_acked_data_disable(inet_csk(sk));
+ }
if (refcount_dec_and_test(&tls_ctx->refcount))
tls_device_queue_ctx_destruction(tls_ctx);
@@ -219,7 +241,7 @@ static void tls_append_frag(struct tls_record_info *record,
static int tls_push_record(struct sock *sk,
struct tls_context *ctx,
- struct tls_offload_context *offload_ctx,
+ struct tls_offload_context_tx *offload_ctx,
struct tls_record_info *record,
struct page_frag *pfrag,
int flags,
@@ -264,7 +286,7 @@ static int tls_push_record(struct sock *sk,
return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
}
-static int tls_create_new_record(struct tls_offload_context *offload_ctx,
+static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
struct page_frag *pfrag,
size_t prepend_size)
{
@@ -290,7 +312,7 @@ static int tls_create_new_record(struct tls_offload_context *offload_ctx,
}
static int tls_do_allocation(struct sock *sk,
- struct tls_offload_context *offload_ctx,
+ struct tls_offload_context_tx *offload_ctx,
struct page_frag *pfrag,
size_t prepend_size)
{
@@ -324,7 +346,7 @@ static int tls_push_data(struct sock *sk,
unsigned char record_type)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
struct tls_record_info *record = ctx->open_record;
@@ -477,7 +499,7 @@ out:
return rc;
}
-struct tls_record_info *tls_get_record(struct tls_offload_context *context,
+struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn)
{
u64 record_sn = context->hint_record_sn;
@@ -520,11 +542,123 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
}
+void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct net_device *netdev = tls_ctx->netdev;
+ struct tls_offload_context_rx *rx_ctx;
+ u32 is_req_pending;
+ s64 resync_req;
+ u32 req_seq;
+
+ if (tls_ctx->rx_conf != TLS_HW)
+ return;
+
+ rx_ctx = tls_offload_ctx_rx(tls_ctx);
+ resync_req = atomic64_read(&rx_ctx->resync_req);
+ req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
+ is_req_pending = resync_req;
+
+ if (unlikely(is_req_pending) && req_seq == seq &&
+ atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
+ netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk,
+ seq + TLS_HEADER_SIZE - 1,
+ rcd_sn);
+}
+
+static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
+{
+ struct strp_msg *rxm = strp_msg(skb);
+ int err = 0, offset = rxm->offset, copy, nsg;
+ struct sk_buff *skb_iter, *unused;
+ struct scatterlist sg[1];
+ char *orig_buf, *buf;
+
+ orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
+ TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
+ if (!orig_buf)
+ return -ENOMEM;
+ buf = orig_buf;
+
+ nsg = skb_cow_data(skb, 0, &unused);
+ if (unlikely(nsg < 0)) {
+ err = nsg;
+ goto free_buf;
+ }
+
+ sg_init_table(sg, 1);
+ sg_set_buf(&sg[0], buf,
+ rxm->full_len + TLS_HEADER_SIZE +
+ TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ skb_copy_bits(skb, offset, buf,
+ TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
+
+ /* We are interested only in the decrypted data not the auth */
+ err = decrypt_skb(sk, skb, sg);
+ if (err != -EBADMSG)
+ goto free_buf;
+ else
+ err = 0;
+
+ copy = min_t(int, skb_pagelen(skb) - offset,
+ rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+
+ if (skb->decrypted)
+ skb_store_bits(skb, offset, buf, copy);
+
+ offset += copy;
+ buf += copy;
+
+ skb_walk_frags(skb, skb_iter) {
+ copy = min_t(int, skb_iter->len,
+ rxm->full_len - offset + rxm->offset -
+ TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+
+ if (skb_iter->decrypted)
+ skb_store_bits(skb_iter, offset, buf, copy);
+
+ offset += copy;
+ buf += copy;
+ }
+
+free_buf:
+ kfree(orig_buf);
+ return err;
+}
+
+int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
+ int is_decrypted = skb->decrypted;
+ int is_encrypted = !is_decrypted;
+ struct sk_buff *skb_iter;
+
+ /* Skip if it is already decrypted */
+ if (ctx->sw.decrypted)
+ return 0;
+
+ /* Check if all the data is decrypted already */
+ skb_walk_frags(skb, skb_iter) {
+ is_decrypted &= skb_iter->decrypted;
+ is_encrypted &= !skb_iter->decrypted;
+ }
+
+ ctx->sw.decrypted |= is_decrypted;
+
+ /* Return immedeatly if the record is either entirely plaintext or
+ * entirely ciphertext. Otherwise handle reencrypt partially decrypted
+ * record.
+ */
+ return (is_encrypted || is_decrypted) ? 0 :
+ tls_device_reencrypt(sk, skb);
+}
+
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
{
u16 nonce_size, tag_size, iv_size, rec_seq_size;
struct tls_record_info *start_marker_record;
- struct tls_offload_context *offload_ctx;
+ struct tls_offload_context_tx *offload_ctx;
struct tls_crypto_info *crypto_info;
struct net_device *netdev;
char *iv, *rec_seq;
@@ -546,7 +680,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
goto out;
}
- offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE, GFP_KERNEL);
+ offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
if (!offload_ctx) {
rc = -ENOMEM;
goto free_marker_record;
@@ -582,12 +716,11 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
ctx->tx.rec_seq_size = rec_seq_size;
- ctx->tx.rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
+ ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
if (!ctx->tx.rec_seq) {
rc = -ENOMEM;
goto free_iv;
}
- memcpy(ctx->tx.rec_seq, rec_seq, rec_seq_size);
rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
if (rc)
@@ -609,7 +742,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
ctx->push_pending_record = tls_device_push_pending_record;
- offload_ctx->sk_destruct = sk->sk_destruct;
/* TLS offload is greatly simplified if we don't send
* SKBs where only part of the payload needs to be encrypted.
@@ -619,8 +751,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
if (skb)
TCP_SKB_CB(skb)->eor = 1;
- refcount_set(&ctx->refcount, 1);
-
/* We support starting offload on multiple sockets
* concurrently, so we only need a read lock here.
* This lock must precede get_netdev_for_sock to prevent races between
@@ -655,19 +785,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
if (rc)
goto release_netdev;
- ctx->netdev = netdev;
-
- spin_lock_irq(&tls_device_lock);
- list_add_tail(&ctx->list, &tls_device_list);
- spin_unlock_irq(&tls_device_lock);
+ tls_device_attach(ctx, sk, netdev);
- sk->sk_validate_xmit_skb = tls_validate_xmit_skb;
/* following this assignment tls_is_sk_tx_device_offloaded
* will return true and the context might be accessed
* by the netdev's xmit function.
*/
- smp_store_release(&sk->sk_destruct,
- &tls_device_sk_destruct);
+ smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
+ dev_put(netdev);
up_read(&device_offload_lock);
goto out;
@@ -690,6 +815,105 @@ out:
return rc;
}
+int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
+{
+ struct tls_offload_context_rx *context;
+ struct net_device *netdev;
+ int rc = 0;
+
+ /* We support starting offload on multiple sockets
+ * concurrently, so we only need a read lock here.
+ * This lock must precede get_netdev_for_sock to prevent races between
+ * NETDEV_DOWN and setsockopt.
+ */
+ down_read(&device_offload_lock);
+ netdev = get_netdev_for_sock(sk);
+ if (!netdev) {
+ pr_err_ratelimited("%s: netdev not found\n", __func__);
+ rc = -EINVAL;
+ goto release_lock;
+ }
+
+ if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
+ pr_err_ratelimited("%s: netdev %s with no TLS offload\n",
+ __func__, netdev->name);
+ rc = -ENOTSUPP;
+ goto release_netdev;
+ }
+
+ /* Avoid offloading if the device is down
+ * We don't want to offload new flows after
+ * the NETDEV_DOWN event
+ */
+ if (!(netdev->flags & IFF_UP)) {
+ rc = -EINVAL;
+ goto release_netdev;
+ }
+
+ context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
+ if (!context) {
+ rc = -ENOMEM;
+ goto release_netdev;
+ }
+
+ ctx->priv_ctx_rx = context;
+ rc = tls_set_sw_offload(sk, ctx, 0);
+ if (rc)
+ goto release_ctx;
+
+ rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
+ &ctx->crypto_recv,
+ tcp_sk(sk)->copied_seq);
+ if (rc) {
+ pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
+ __func__);
+ goto free_sw_resources;
+ }
+
+ tls_device_attach(ctx, sk, netdev);
+ goto release_netdev;
+
+free_sw_resources:
+ tls_sw_free_resources_rx(sk);
+release_ctx:
+ ctx->priv_ctx_rx = NULL;
+release_netdev:
+ dev_put(netdev);
+release_lock:
+ up_read(&device_offload_lock);
+ return rc;
+}
+
+void tls_device_offload_cleanup_rx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct net_device *netdev;
+
+ down_read(&device_offload_lock);
+ netdev = tls_ctx->netdev;
+ if (!netdev)
+ goto out;
+
+ if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
+ pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n",
+ __func__);
+ goto out;
+ }
+
+ netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+ TLS_OFFLOAD_CTX_DIR_RX);
+
+ if (tls_ctx->tx_conf != TLS_HW) {
+ dev_put(netdev);
+ tls_ctx->netdev = NULL;
+ }
+out:
+ up_read(&device_offload_lock);
+ kfree(tls_ctx->rx.rec_seq);
+ kfree(tls_ctx->rx.iv);
+ tls_sw_release_resources_rx(sk);
+}
+
static int tls_device_down(struct net_device *netdev)
{
struct tls_context *ctx, *tmp;
@@ -710,8 +934,12 @@ static int tls_device_down(struct net_device *netdev)
spin_unlock_irqrestore(&tls_device_lock, flags);
list_for_each_entry_safe(ctx, tmp, &list, list) {
- netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
- TLS_OFFLOAD_CTX_DIR_TX);
+ if (ctx->tx_conf == TLS_HW)
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ TLS_OFFLOAD_CTX_DIR_TX);
+ if (ctx->rx_conf == TLS_HW)
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ TLS_OFFLOAD_CTX_DIR_RX);
ctx->netdev = NULL;
dev_put(netdev);
list_del_init(&ctx->list);
@@ -732,12 +960,16 @@ static int tls_dev_event(struct notifier_block *this, unsigned long event,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (!(dev->features & NETIF_F_HW_TLS_TX))
+ if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
return NOTIFY_DONE;
switch (event) {
case NETDEV_REGISTER:
case NETDEV_FEAT_CHANGE:
+ if ((dev->features & NETIF_F_HW_TLS_RX) &&
+ !dev->tlsdev_ops->tls_dev_resync_rx)
+ return NOTIFY_BAD;
+
if (dev->tlsdev_ops &&
dev->tlsdev_ops->tls_dev_add &&
dev->tlsdev_ops->tls_dev_del)
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 748914abdb60..6102169239d1 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -42,7 +42,7 @@ static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
sg_set_page(sg, sg_page(src),
src->length - diff, walk->offset);
- scatterwalk_crypto_chain(sg, sg_next(src), 0, 2);
+ scatterwalk_crypto_chain(sg, sg_next(src), 2);
}
static int tls_enc_record(struct aead_request *aead_req,
@@ -214,7 +214,7 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
static int fill_sg_in(struct scatterlist *sg_in,
struct sk_buff *skb,
- struct tls_offload_context *ctx,
+ struct tls_offload_context_tx *ctx,
u64 *rcd_sn,
s32 *sync_size,
int *resync_sgs)
@@ -299,7 +299,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
s32 sync_size, u64 rcd_sn)
{
int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
- struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int payload_len = skb->len - tcp_payload_offset;
void *buf, *iv, *aad, *dummy_buf;
struct aead_request *aead_req;
@@ -361,7 +361,7 @@ static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
{
int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int payload_len = skb->len - tcp_payload_offset;
struct scatterlist *sg_in, sg_out[3];
struct sk_buff *nskb = NULL;
@@ -413,9 +413,10 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
return tls_sw_fallback(sk, skb);
}
+EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
int tls_sw_fallback_init(struct sock *sk,
- struct tls_offload_context *offload_ctx,
+ struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info)
{
const u8 *key;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 301f22430469..b09867c8b817 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -51,15 +51,6 @@ enum {
TLSV6,
TLS_NUM_PROTS,
};
-enum {
- TLS_BASE,
- TLS_SW,
-#ifdef CONFIG_TLS_DEVICE
- TLS_HW,
-#endif
- TLS_HW_RECORD,
- TLS_NUM_CONFIG,
-};
static struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
@@ -290,7 +281,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
}
#ifdef CONFIG_TLS_DEVICE
- if (ctx->tx_conf != TLS_HW) {
+ if (ctx->rx_conf == TLS_HW)
+ tls_device_offload_cleanup_rx(sk);
+
+ if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
#else
{
#endif
@@ -470,8 +464,16 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
conf = TLS_SW;
}
} else {
- rc = tls_set_sw_offload(sk, ctx, 0);
- conf = TLS_SW;
+#ifdef CONFIG_TLS_DEVICE
+ rc = tls_set_device_offload_rx(sk, ctx);
+ conf = TLS_HW;
+ if (rc) {
+#else
+ {
+#endif
+ rc = tls_set_sw_offload(sk, ctx, 0);
+ conf = TLS_SW;
+ }
}
if (rc)
@@ -629,6 +631,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
+
+ prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
+
+ prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
+
+ prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
#endif
prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 1f3d9789af30..52fbe727d7c1 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -48,21 +48,11 @@ static int tls_do_decryption(struct sock *sk,
struct scatterlist *sgout,
char *iv_recv,
size_t data_len,
- struct sk_buff *skb,
- gfp_t flags)
+ struct aead_request *aead_req)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- struct strp_msg *rxm = strp_msg(skb);
- struct aead_request *aead_req;
-
int ret;
- unsigned int req_size = sizeof(struct aead_request) +
- crypto_aead_reqsize(ctx->aead_recv);
-
- aead_req = kzalloc(req_size, flags);
- if (!aead_req)
- return -ENOMEM;
aead_request_set_tfm(aead_req, ctx->aead_recv);
aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
@@ -73,20 +63,6 @@ static int tls_do_decryption(struct sock *sk,
crypto_req_done, &ctx->async_wait);
ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
-
- if (ret < 0)
- goto out;
-
- rxm->offset += tls_ctx->rx.prepend_size;
- rxm->full_len -= tls_ctx->rx.overhead_size;
- tls_advance_record_sn(sk, &tls_ctx->rx);
-
- ctx->decrypted = true;
-
- ctx->saved_data_ready(sk);
-
-out:
- kfree(aead_req);
return ret;
}
@@ -224,8 +200,7 @@ static int tls_push_record(struct sock *sk, int flags,
struct aead_request *req;
int rc;
- req = kzalloc(sizeof(struct aead_request) +
- crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
+ req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
if (!req)
return -ENOMEM;
@@ -267,7 +242,7 @@ static int tls_push_record(struct sock *sk, int flags,
tls_advance_record_sn(sk, &tls_ctx->tx);
out_req:
- kfree(req);
+ aead_request_free(req);
return rc;
}
@@ -328,7 +303,12 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
}
}
+ /* Mark the end in the last sg entry if newly added */
+ if (num_elem > *pages_used)
+ sg_mark_end(&to[num_elem - 1]);
out:
+ if (rc)
+ iov_iter_revert(from, size - *size_used);
*size_used = size;
*pages_used = num_elem;
@@ -377,6 +357,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
int record_room;
bool full_record;
int orig_size;
+ bool is_kvec = msg->msg_iter.type & ITER_KVEC;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
return -ENOTSUPP;
@@ -425,8 +406,7 @@ alloc_encrypted:
try_to_copy -= required_size - ctx->sg_encrypted_size;
full_record = true;
}
-
- if (full_record || eor) {
+ if (!is_kvec && (full_record || eor)) {
ret = zerocopy_from_iter(sk, &msg->msg_iter,
try_to_copy, &ctx->sg_plaintext_num_elem,
&ctx->sg_plaintext_size,
@@ -438,15 +418,11 @@ alloc_encrypted:
copied += try_to_copy;
ret = tls_push_record(sk, msg->msg_flags, record_type);
- if (!ret)
- continue;
- if (ret < 0)
+ if (ret)
goto send_end;
+ continue;
- copied -= try_to_copy;
fallback_to_reg_send:
- iov_iter_revert(&msg->msg_iter,
- ctx->sg_plaintext_size - orig_size);
trim_sg(sk, ctx->sg_plaintext_data,
&ctx->sg_plaintext_num_elem,
&ctx->sg_plaintext_size,
@@ -673,57 +649,167 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
return skb;
}
-static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
- struct scatterlist *sgout)
+/* This function decrypts the input skb into either out_iov or in out_sg
+ * or in skb buffers itself. The input parameter 'zc' indicates if
+ * zero-copy mode needs to be tried or not. With zero-copy mode, either
+ * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
+ * NULL, then the decryption happens inside skb buffers itself, i.e.
+ * zero-copy gets disabled and 'zc' is updated.
+ */
+
+static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *out_iov,
+ struct scatterlist *out_sg,
+ int *chunk, bool *zc)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE];
- struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
- struct scatterlist *sgin = &sgin_arr[0];
struct strp_msg *rxm = strp_msg(skb);
- int ret, nsg = ARRAY_SIZE(sgin_arr);
+ int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
+ struct aead_request *aead_req;
struct sk_buff *unused;
+ u8 *aad, *iv, *mem = NULL;
+ struct scatterlist *sgin = NULL;
+ struct scatterlist *sgout = NULL;
+ const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
+
+ if (*zc && (out_iov || out_sg)) {
+ if (out_iov)
+ n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
+ else
+ n_sgout = sg_nents(out_sg);
+ } else {
+ n_sgout = 0;
+ *zc = false;
+ }
- ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
+ n_sgin = skb_cow_data(skb, 0, &unused);
+ if (n_sgin < 1)
+ return -EBADMSG;
+
+ /* Increment to accommodate AAD */
+ n_sgin = n_sgin + 1;
+
+ nsg = n_sgin + n_sgout;
+
+ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
+ mem_size = aead_size + (nsg * sizeof(struct scatterlist));
+ mem_size = mem_size + TLS_AAD_SPACE_SIZE;
+ mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
+
+ /* Allocate a single block of memory which contains
+ * aead_req || sgin[] || sgout[] || aad || iv.
+ * This order achieves correct alignment for aead_req, sgin, sgout.
+ */
+ mem = kmalloc(mem_size, sk->sk_allocation);
+ if (!mem)
+ return -ENOMEM;
+
+ /* Segment the allocated memory */
+ aead_req = (struct aead_request *)mem;
+ sgin = (struct scatterlist *)(mem + aead_size);
+ sgout = sgin + n_sgin;
+ aad = (u8 *)(sgout + n_sgout);
+ iv = aad + TLS_AAD_SPACE_SIZE;
+
+ /* Prepare IV */
+ err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
tls_ctx->rx.iv_size);
- if (ret < 0)
- return ret;
-
- memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- if (!sgout) {
- nsg = skb_cow_data(skb, 0, &unused) + 1;
- sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
- sgout = sgin;
+ if (err < 0) {
+ kfree(mem);
+ return err;
}
+ memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- sg_init_table(sgin, nsg);
- sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
+ /* Prepare AAD */
+ tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
+ tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
+ ctx->control);
- nsg = skb_to_sgvec(skb, &sgin[1],
+ /* Prepare sgin */
+ sg_init_table(sgin, n_sgin);
+ sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
+ err = skb_to_sgvec(skb, &sgin[1],
rxm->offset + tls_ctx->rx.prepend_size,
rxm->full_len - tls_ctx->rx.prepend_size);
- if (nsg < 0) {
- ret = nsg;
- goto out;
+ if (err < 0) {
+ kfree(mem);
+ return err;
}
- tls_make_aad(ctx->rx_aad_ciphertext,
- rxm->full_len - tls_ctx->rx.overhead_size,
- tls_ctx->rx.rec_seq,
- tls_ctx->rx.rec_seq_size,
- ctx->control);
+ if (n_sgout) {
+ if (out_iov) {
+ sg_init_table(sgout, n_sgout);
+ sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
- ret = tls_do_decryption(sk, sgin, sgout, iv,
- rxm->full_len - tls_ctx->rx.overhead_size,
- skb, sk->sk_allocation);
+ *chunk = 0;
+ err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
+ chunk, &sgout[1],
+ (n_sgout - 1), false);
+ if (err < 0)
+ goto fallback_to_reg_recv;
+ } else if (out_sg) {
+ memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
+ } else {
+ goto fallback_to_reg_recv;
+ }
+ } else {
+fallback_to_reg_recv:
+ sgout = sgin;
+ pages = 0;
+ *chunk = 0;
+ *zc = false;
+ }
-out:
- if (sgin != &sgin_arr[0])
- kfree(sgin);
+ /* Prepare and submit AEAD request */
+ err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req);
- return ret;
+ /* Release the pages in case iov was mapped to pages */
+ for (; pages > 0; pages--)
+ put_page(sg_page(&sgout[pages]));
+
+ kfree(mem);
+ return err;
+}
+
+static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *dest, int *chunk, bool *zc)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct strp_msg *rxm = strp_msg(skb);
+ int err = 0;
+
+#ifdef CONFIG_TLS_DEVICE
+ err = tls_device_decrypted(sk, skb);
+ if (err < 0)
+ return err;
+#endif
+ if (!ctx->decrypted) {
+ err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
+ if (err < 0)
+ return err;
+ } else {
+ *zc = false;
+ }
+
+ rxm->offset += tls_ctx->rx.prepend_size;
+ rxm->full_len -= tls_ctx->rx.overhead_size;
+ tls_advance_record_sn(sk, &tls_ctx->rx);
+ ctx->decrypted = true;
+ ctx->saved_data_ready(sk);
+
+ return err;
+}
+
+int decrypt_skb(struct sock *sk, struct sk_buff *skb,
+ struct scatterlist *sgout)
+{
+ bool zc = true;
+ int chunk;
+
+ return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
}
static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
@@ -764,6 +850,7 @@ int tls_sw_recvmsg(struct sock *sk,
bool cmsg = false;
int target, err = 0;
long timeo;
+ bool is_kvec = msg->msg_iter.type & ITER_KVEC;
flags |= nonblock;
@@ -801,43 +888,17 @@ int tls_sw_recvmsg(struct sock *sk,
}
if (!ctx->decrypted) {
- int page_count;
- int to_copy;
-
- page_count = iov_iter_npages(&msg->msg_iter,
- MAX_SKB_FRAGS);
- to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
- if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
- likely(!(flags & MSG_PEEK))) {
- struct scatterlist sgin[MAX_SKB_FRAGS + 1];
- int pages = 0;
+ int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
+ if (!is_kvec && to_copy <= len &&
+ likely(!(flags & MSG_PEEK)))
zc = true;
- sg_init_table(sgin, MAX_SKB_FRAGS + 1);
- sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
- TLS_AAD_SPACE_SIZE);
-
- err = zerocopy_from_iter(sk, &msg->msg_iter,
- to_copy, &pages,
- &chunk, &sgin[1],
- MAX_SKB_FRAGS, false);
- if (err < 0)
- goto fallback_to_reg_recv;
-
- err = decrypt_skb(sk, skb, sgin);
- for (; pages > 0; pages--)
- put_page(sg_page(&sgin[pages]));
- if (err < 0) {
- tls_err_abort(sk, EBADMSG);
- goto recv_end;
- }
- } else {
-fallback_to_reg_recv:
- err = decrypt_skb(sk, skb, NULL);
- if (err < 0) {
- tls_err_abort(sk, EBADMSG);
- goto recv_end;
- }
+
+ err = decrypt_skb_update(sk, skb, &msg->msg_iter,
+ &chunk, &zc);
+ if (err < 0) {
+ tls_err_abort(sk, EBADMSG);
+ goto recv_end;
}
ctx->decrypted = true;
}
@@ -888,6 +949,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
int err = 0;
long timeo;
int chunk;
+ bool zc = false;
lock_sock(sk);
@@ -904,7 +966,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
}
if (!ctx->decrypted) {
- err = decrypt_skb(sk, skb, NULL);
+ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
if (err < 0) {
tls_err_abort(sk, EBADMSG);
@@ -950,7 +1012,7 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
{
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- char header[tls_ctx->rx.prepend_size];
+ char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
struct strp_msg *rxm = strp_msg(skb);
size_t cipher_overhead;
size_t data_len = 0;
@@ -960,6 +1022,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
return 0;
+ /* Sanity-check size of on-stack buffer. */
+ if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
+ ret = -EINVAL;
+ goto read_failure;
+ }
+
/* Linearize header to local buffer */
ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
@@ -987,6 +1055,10 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure;
}
+#ifdef CONFIG_TLS_DEVICE
+ handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
+ *(u64*)tls_ctx->rx.rec_seq);
+#endif
return data_len + TLS_HEADER_SIZE;
read_failure:
@@ -999,16 +1071,13 @@ static void tls_queue(struct strparser *strp, struct sk_buff *skb)
{
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- struct strp_msg *rxm;
-
- rxm = strp_msg(skb);
ctx->decrypted = false;
ctx->recv_pkt = skb;
strp_pause(strp);
- strp->sk->sk_state_change(strp->sk);
+ ctx->saved_data_ready(strp->sk);
}
static void tls_data_ready(struct sock *sk)
@@ -1024,23 +1093,20 @@ void tls_sw_free_resources_tx(struct sock *sk)
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- if (ctx->aead_send)
- crypto_free_aead(ctx->aead_send);
+ crypto_free_aead(ctx->aead_send);
tls_free_both_sg(sk);
kfree(ctx);
}
-void tls_sw_free_resources_rx(struct sock *sk)
+void tls_sw_release_resources_rx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
if (ctx->aead_recv) {
- if (ctx->recv_pkt) {
- kfree_skb(ctx->recv_pkt);
- ctx->recv_pkt = NULL;
- }
+ kfree_skb(ctx->recv_pkt);
+ ctx->recv_pkt = NULL;
crypto_free_aead(ctx->aead_recv);
strp_stop(&ctx->strp);
write_lock_bh(&sk->sk_callback_lock);
@@ -1050,6 +1116,14 @@ void tls_sw_free_resources_rx(struct sock *sk)
strp_done(&ctx->strp);
lock_sock(sk);
}
+}
+
+void tls_sw_free_resources_rx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+
+ tls_sw_release_resources_rx(sk);
kfree(ctx);
}
@@ -1074,28 +1148,38 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
}
if (tx) {
- sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
- if (!sw_ctx_tx) {
- rc = -ENOMEM;
- goto out;
+ if (!ctx->priv_ctx_tx) {
+ sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
+ if (!sw_ctx_tx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ ctx->priv_ctx_tx = sw_ctx_tx;
+ } else {
+ sw_ctx_tx =
+ (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
}
- crypto_init_wait(&sw_ctx_tx->async_wait);
- ctx->priv_ctx_tx = sw_ctx_tx;
} else {
- sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
- if (!sw_ctx_rx) {
- rc = -ENOMEM;
- goto out;
+ if (!ctx->priv_ctx_rx) {
+ sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
+ if (!sw_ctx_rx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ ctx->priv_ctx_rx = sw_ctx_rx;
+ } else {
+ sw_ctx_rx =
+ (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
}
- crypto_init_wait(&sw_ctx_rx->async_wait);
- ctx->priv_ctx_rx = sw_ctx_rx;
}
if (tx) {
+ crypto_init_wait(&sw_ctx_tx->async_wait);
crypto_info = &ctx->crypto_send;
cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send;
} else {
+ crypto_init_wait(&sw_ctx_rx->async_wait);
crypto_info = &ctx->crypto_recv;
cctx = &ctx->rx;
aead = &sw_ctx_rx->aead_recv;
@@ -1120,7 +1204,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
}
/* Sanity-check the IV size for stack allocations. */
- if (iv_size > MAX_IV_SIZE) {
+ if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
rc = -EINVAL;
goto free_priv;
}
@@ -1138,12 +1222,11 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
cctx->rec_seq_size = rec_seq_size;
- cctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
+ cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
if (!cctx->rec_seq) {
rc = -ENOMEM;
goto free_iv;
}
- memcpy(cctx->rec_seq, rec_seq, rec_seq_size);
if (sw_ctx_tx) {
sg_init_table(sw_ctx_tx->sg_encrypted_data,
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e5473c03d667..d1edfa3cad61 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -430,7 +430,12 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
connected = unix_dgram_peer_wake_connect(sk, other);
- if (unix_recvq_full(other))
+ /* If other is SOCK_DEAD, we want to make sure we signal
+ * POLLOUT, such that a subsequent write() can get a
+ * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
+ * to other and its full, we will hang waiting for POLLOUT.
+ */
+ if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
return 1;
if (connected)
@@ -2635,7 +2640,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
struct sock *sk = sock->sk;
__poll_t mask;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
mask = 0;
/* exceptional events? */
@@ -2672,7 +2677,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
unsigned int writable;
__poll_t mask;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, wait);
mask = 0;
/* exceptional events? */
diff --git a/net/wimax/Makefile b/net/wimax/Makefile
index eb2db0d3b880..c2a71ae487ac 100644
--- a/net/wimax/Makefile
+++ b/net/wimax/Makefile
@@ -11,5 +11,3 @@ wimax-y := \
stack.o
wimax-$(CONFIG_DEBUG_FS) += debugfs.o
-
-
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c
index 6c9bedb7431e..24514840746e 100644
--- a/net/wimax/debugfs.c
+++ b/net/wimax/debugfs.c
@@ -76,5 +76,3 @@ void wimax_debugfs_rm(struct wimax_dev *wimax_dev)
{
debugfs_remove_recursive(wimax_dev->debugfs_dentry);
}
-
-
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index 54aa146930bd..101b2fa3f32e 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -404,4 +404,3 @@ error_no_wimax_dev:
d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
return result;
}
-
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 5db731512014..a6307813b6d5 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -486,7 +486,8 @@ int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev)
d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev);
/* Do the RFKILL setup before locking, as RFKILL will call
- * into our functions. */
+ * into our functions.
+ */
wimax_dev->net_dev = net_dev;
result = wimax_rfkill_add(wimax_dev);
if (result < 0)
@@ -629,4 +630,3 @@ module_exit(wimax_subsys_exit);
MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
MODULE_DESCRIPTION("Linux WiMAX stack");
MODULE_LICENSE("GPL");
-
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 48e8097339ab..a88551f3bc43 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -3,7 +3,7 @@
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2015 Intel Deutschland GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -744,6 +744,8 @@ int wiphy_register(struct wiphy *wiphy)
/* sanity check supported bands/channels */
for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ u16 types = 0;
+
sband = wiphy->bands[band];
if (!sband)
continue;
@@ -788,6 +790,23 @@ int wiphy_register(struct wiphy *wiphy)
sband->channels[i].band = band;
}
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *iftd;
+
+ iftd = &sband->iftype_data[i];
+
+ if (WARN_ON(!iftd->types_mask))
+ return -EINVAL;
+ if (WARN_ON(types & iftd->types_mask))
+ return -EINVAL;
+
+ /* at least one piece of information must be present */
+ if (WARN_ON(!iftd->he_cap.has_he))
+ return -EINVAL;
+
+ types |= iftd->types_mask;
+ }
+
have_band = true;
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 63eb1b5fdd04..7f52ef569320 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -76,7 +76,7 @@ struct cfg80211_registered_device {
struct cfg80211_scan_request *scan_req; /* protected by RTNL */
struct sk_buff *scan_msg;
struct list_head sched_scan_req_list;
- unsigned long suspend_at;
+ time64_t suspend_at;
struct work_struct scan_done_wk;
struct genl_info *cur_cmd_info;
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index ba0a1f398ce5..e6bce1f130c9 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -65,9 +65,9 @@ struct lib80211_tkip_data {
int key_idx;
struct crypto_skcipher *rx_tfm_arc4;
- struct crypto_ahash *rx_tfm_michael;
+ struct crypto_shash *rx_tfm_michael;
struct crypto_skcipher *tx_tfm_arc4;
- struct crypto_ahash *tx_tfm_michael;
+ struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16], tx_hdr[16];
@@ -106,8 +106,7 @@ static void *lib80211_tkip_init(int key_idx)
goto fail;
}
- priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->tx_tfm_michael)) {
priv->tx_tfm_michael = NULL;
goto fail;
@@ -120,8 +119,7 @@ static void *lib80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->rx_tfm_michael)) {
priv->rx_tfm_michael = NULL;
goto fail;
@@ -131,9 +129,9 @@ static void *lib80211_tkip_init(int key_idx)
fail:
if (priv) {
- crypto_free_ahash(priv->tx_tfm_michael);
+ crypto_free_shash(priv->tx_tfm_michael);
crypto_free_skcipher(priv->tx_tfm_arc4);
- crypto_free_ahash(priv->rx_tfm_michael);
+ crypto_free_shash(priv->rx_tfm_michael);
crypto_free_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -145,9 +143,9 @@ static void lib80211_tkip_deinit(void *priv)
{
struct lib80211_tkip_data *_priv = priv;
if (_priv) {
- crypto_free_ahash(_priv->tx_tfm_michael);
+ crypto_free_shash(_priv->tx_tfm_michael);
crypto_free_skcipher(_priv->tx_tfm_arc4);
- crypto_free_ahash(_priv->rx_tfm_michael);
+ crypto_free_shash(_priv->rx_tfm_michael);
crypto_free_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
@@ -510,29 +508,36 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return keyidx;
}
-static int michael_mic(struct crypto_ahash *tfm_michael, u8 * key, u8 * hdr,
- u8 * data, size_t data_len, u8 * mic)
+static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
+ u8 *data, size_t data_len, u8 *mic)
{
- AHASH_REQUEST_ON_STACK(req, tfm_michael);
- struct scatterlist sg[2];
+ SHASH_DESC_ON_STACK(desc, tfm_michael);
int err;
if (tfm_michael == NULL) {
pr_warn("%s(): tfm_michael == NULL\n", __func__);
return -1;
}
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], hdr, 16);
- sg_set_buf(&sg[1], data, data_len);
- if (crypto_ahash_setkey(tfm_michael, key, 8))
+ desc->tfm = tfm_michael;
+ desc->flags = 0;
+
+ if (crypto_shash_setkey(tfm_michael, key, 8))
return -1;
- ahash_request_set_tfm(req, tfm_michael);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, sg, mic, data_len + 16);
- err = crypto_ahash_digest(req);
- ahash_request_zero(req);
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out;
+ err = crypto_shash_update(desc, hdr, 16);
+ if (err)
+ goto out;
+ err = crypto_shash_update(desc, data, data_len);
+ if (err)
+ goto out;
+ err = crypto_shash_final(desc, mic);
+
+out:
+ shash_desc_zero(desc);
return err;
}
@@ -654,9 +659,9 @@ static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
{
struct lib80211_tkip_data *tkey = priv;
int keyidx;
- struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+ struct crypto_shash *tfm = tkey->tx_tfm_michael;
struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
- struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+ struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 80bc986c79e5..5fb9b7dd9831 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -428,6 +428,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
[NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
[NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
+ [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY,
+ .len = NL80211_HE_MAX_CAPABILITY_LEN },
};
/* policy for the key attributes */
@@ -1324,6 +1326,34 @@ static int nl80211_send_coalesce(struct sk_buff *msg,
return 0;
}
+static int
+nl80211_send_iftype_data(struct sk_buff *msg,
+ const struct ieee80211_sband_iftype_data *iftdata)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &iftdata->he_cap;
+
+ if (nl80211_put_iftypes(msg, NL80211_BAND_IFTYPE_ATTR_IFTYPES,
+ iftdata->types_mask))
+ return -ENOBUFS;
+
+ if (he_cap->has_he) {
+ if (nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC,
+ sizeof(he_cap->he_cap_elem.mac_cap_info),
+ he_cap->he_cap_elem.mac_cap_info) ||
+ nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY,
+ sizeof(he_cap->he_cap_elem.phy_cap_info),
+ he_cap->he_cap_elem.phy_cap_info) ||
+ nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
+ sizeof(he_cap->he_mcs_nss_supp),
+ &he_cap->he_mcs_nss_supp) ||
+ nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
+ sizeof(he_cap->ppe_thres), he_cap->ppe_thres))
+ return -ENOBUFS;
+ }
+
+ return 0;
+}
+
static int nl80211_send_band_rateinfo(struct sk_buff *msg,
struct ieee80211_supported_band *sband)
{
@@ -1353,6 +1383,32 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg,
sband->vht_cap.cap)))
return -ENOBUFS;
+ if (sband->n_iftype_data) {
+ struct nlattr *nl_iftype_data =
+ nla_nest_start(msg, NL80211_BAND_ATTR_IFTYPE_DATA);
+ int err;
+
+ if (!nl_iftype_data)
+ return -ENOBUFS;
+
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ struct nlattr *iftdata;
+
+ iftdata = nla_nest_start(msg, i + 1);
+ if (!iftdata)
+ return -ENOBUFS;
+
+ err = nl80211_send_iftype_data(msg,
+ &sband->iftype_data[i]);
+ if (err)
+ return err;
+
+ nla_nest_end(msg, iftdata);
+ }
+
+ nla_nest_end(msg, nl_iftype_data);
+ }
+
/* add bitrates */
nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES);
if (!nl_rates)
@@ -2757,7 +2813,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
nla_put_u32(msg, NL80211_ATTR_GENERATION,
rdev->devlist_generation ^
- (cfg80211_rdev_list_generation << 2)))
+ (cfg80211_rdev_list_generation << 2)) ||
+ nla_put_u8(msg, NL80211_ATTR_4ADDR, wdev->use_4addr))
goto nla_put_failure;
if (rdev->ops->get_channel) {
@@ -4472,6 +4529,9 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
case RATE_INFO_BW_160:
rate_flg = NL80211_RATE_INFO_160_MHZ_WIDTH;
break;
+ case RATE_INFO_BW_HE_RU:
+ rate_flg = 0;
+ WARN_ON(!(info->flags & RATE_INFO_FLAGS_HE_MCS));
}
if (rate_flg && nla_put_flag(msg, rate_flg))
@@ -4491,6 +4551,19 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
if (info->flags & RATE_INFO_FLAGS_SHORT_GI &&
nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))
return false;
+ } else if (info->flags & RATE_INFO_FLAGS_HE_MCS) {
+ if (nla_put_u8(msg, NL80211_RATE_INFO_HE_MCS, info->mcs))
+ return false;
+ if (nla_put_u8(msg, NL80211_RATE_INFO_HE_NSS, info->nss))
+ return false;
+ if (nla_put_u8(msg, NL80211_RATE_INFO_HE_GI, info->he_gi))
+ return false;
+ if (nla_put_u8(msg, NL80211_RATE_INFO_HE_DCM, info->he_dcm))
+ return false;
+ if (info->bw == RATE_INFO_BW_HE_RU &&
+ nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC,
+ info->he_ru_alloc))
+ return false;
}
nla_nest_end(msg, rate);
@@ -4547,13 +4620,13 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
#define PUT_SINFO(attr, memb, type) do { \
BUILD_BUG_ON(sizeof(type) == sizeof(u64)); \
- if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) && \
+ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \
nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \
sinfo->memb)) \
goto nla_put_failure; \
} while (0)
#define PUT_SINFO_U64(attr, memb) do { \
- if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) && \
+ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \
nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr, \
sinfo->memb, NL80211_STA_INFO_PAD)) \
goto nla_put_failure; \
@@ -4562,14 +4635,14 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
PUT_SINFO(CONNECTED_TIME, connected_time, u32);
PUT_SINFO(INACTIVE_TIME, inactive_time, u32);
- if (sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES) |
- BIT(NL80211_STA_INFO_RX_BYTES64)) &&
+ if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) &&
nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
(u32)sinfo->rx_bytes))
goto nla_put_failure;
- if (sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES) |
- BIT(NL80211_STA_INFO_TX_BYTES64)) &&
+ if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES64)) &&
nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
(u32)sinfo->tx_bytes))
goto nla_put_failure;
@@ -4589,24 +4662,24 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
default:
break;
}
- if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL)) {
+ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) {
if (!nl80211_put_signal(msg, sinfo->chains,
sinfo->chain_signal,
NL80211_STA_INFO_CHAIN_SIGNAL))
goto nla_put_failure;
}
- if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) {
+ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) {
if (!nl80211_put_signal(msg, sinfo->chains,
sinfo->chain_signal_avg,
NL80211_STA_INFO_CHAIN_SIGNAL_AVG))
goto nla_put_failure;
}
- if (sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE)) {
+ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) {
if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
NL80211_STA_INFO_TX_BITRATE))
goto nla_put_failure;
}
- if (sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE)) {
+ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) {
if (!nl80211_put_sta_rate(msg, &sinfo->rxrate,
NL80211_STA_INFO_RX_BITRATE))
goto nla_put_failure;
@@ -4622,7 +4695,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
PUT_SINFO(PEER_PM, peer_pm, u32);
PUT_SINFO(NONPEER_PM, nonpeer_pm, u32);
- if (sinfo->filled & BIT(NL80211_STA_INFO_BSS_PARAM)) {
+ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) {
bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
if (!bss_param)
goto nla_put_failure;
@@ -4641,7 +4714,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
nla_nest_end(msg, bss_param);
}
- if ((sinfo->filled & BIT(NL80211_STA_INFO_STA_FLAGS)) &&
+ if ((sinfo->filled & BIT_ULL(NL80211_STA_INFO_STA_FLAGS)) &&
nla_put(msg, NL80211_STA_INFO_STA_FLAGS,
sizeof(struct nl80211_sta_flag_update),
&sinfo->sta_flags))
@@ -4887,7 +4960,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
return -EINVAL;
if (params->supported_rates)
return -EINVAL;
- if (params->ext_capab || params->ht_capa || params->vht_capa)
+ if (params->ext_capab || params->ht_capa || params->vht_capa ||
+ params->he_capa)
return -EINVAL;
}
@@ -5093,6 +5167,15 @@ static int nl80211_set_station_tdls(struct genl_info *info,
if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
params->vht_capa =
nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
+ if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) {
+ params->he_capa =
+ nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+ params->he_capa_len =
+ nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+
+ if (params->he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN)
+ return -EINVAL;
+ }
err = nl80211_parse_sta_channel_info(info, params);
if (err)
@@ -5320,6 +5403,17 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.vht_capa =
nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
+ if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) {
+ params.he_capa =
+ nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+ params.he_capa_len =
+ nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+
+ /* max len is validated in nla policy */
+ if (params.he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN)
+ return -EINVAL;
+ }
+
if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
params.opmode_notif_used = true;
params.opmode_notif =
@@ -5352,6 +5446,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
params.ht_capa = NULL;
params.vht_capa = NULL;
+
+ /* HE requires WME */
+ if (params.he_capa_len)
+ return -EINVAL;
}
/* When you run into this, adjust the code below for the new flag */
@@ -6849,6 +6947,16 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev)
return regulatory_pre_cac_allowed(wdev->wiphy);
}
+static bool nl80211_check_scan_feat(struct wiphy *wiphy, u32 flags, u32 flag,
+ enum nl80211_ext_feature_index feat)
+{
+ if (!(flags & flag))
+ return true;
+ if (wiphy_ext_feature_isset(wiphy, feat))
+ return true;
+ return false;
+}
+
static int
nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev,
void *request, struct nlattr **attrs,
@@ -6883,15 +6991,33 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev,
if (((*flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
!(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) ||
- ((*flags & NL80211_SCAN_FLAG_LOW_SPAN) &&
- !wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_LOW_SPAN_SCAN)) ||
- ((*flags & NL80211_SCAN_FLAG_LOW_POWER) &&
- !wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_LOW_POWER_SCAN)) ||
- ((*flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) &&
- !wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN)))
+ !nl80211_check_scan_feat(wiphy, *flags,
+ NL80211_SCAN_FLAG_LOW_SPAN,
+ NL80211_EXT_FEATURE_LOW_SPAN_SCAN) ||
+ !nl80211_check_scan_feat(wiphy, *flags,
+ NL80211_SCAN_FLAG_LOW_POWER,
+ NL80211_EXT_FEATURE_LOW_POWER_SCAN) ||
+ !nl80211_check_scan_feat(wiphy, *flags,
+ NL80211_SCAN_FLAG_HIGH_ACCURACY,
+ NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN) ||
+ !nl80211_check_scan_feat(wiphy, *flags,
+ NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME,
+ NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME) ||
+ !nl80211_check_scan_feat(wiphy, *flags,
+ NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP,
+ NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP) ||
+ !nl80211_check_scan_feat(wiphy, *flags,
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) ||
+ !nl80211_check_scan_feat(wiphy, *flags,
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE) ||
+ !nl80211_check_scan_feat(wiphy, *flags,
+ NL80211_SCAN_FLAG_RANDOM_SN,
+ NL80211_EXT_FEATURE_SCAN_RANDOM_SN) ||
+ !nl80211_check_scan_feat(wiphy, *flags,
+ NL80211_SCAN_FLAG_MIN_PREQ_CONTENT,
+ NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT))
return -EOPNOTSUPP;
if (*flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
@@ -6906,26 +7032,6 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev,
return err;
}
- if ((*flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME) &&
- !wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME))
- return -EOPNOTSUPP;
-
- if ((*flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP) &&
- !wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP))
- return -EOPNOTSUPP;
-
- if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) &&
- !wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION))
- return -EOPNOTSUPP;
-
- if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE) &&
- !wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE))
- return -EOPNOTSUPP;
-
return 0;
}
@@ -10148,7 +10254,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
if (err)
return err;
- if (sinfo.filled & BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
+ if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
wdev->cqm_config->last_rssi_event_value =
(s8) sinfo.rx_beacon_signal_avg;
}
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 570a2b67ca10..6ab32f6a1961 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -102,7 +102,7 @@ static int wiphy_suspend(struct device *dev)
struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
int ret = 0;
- rdev->suspend_at = get_seconds();
+ rdev->suspend_at = ktime_get_boottime_seconds();
rtnl_lock();
if (rdev->wiphy.registered) {
@@ -130,7 +130,7 @@ static int wiphy_resume(struct device *dev)
int ret = 0;
/* Age scan results with time spent in suspend */
- cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
+ cfg80211_bss_age(rdev, ktime_get_boottime_seconds() - rdev->suspend_at);
rtnl_lock();
if (rdev->wiphy.registered && rdev->ops->resume)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 3c654cd7ba56..e0825a019e9f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -4,6 +4,7 @@
*
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2017 Intel Deutschland GmbH
*/
#include <linux/export.h>
#include <linux/bitops.h>
@@ -1142,6 +1143,85 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
return 0;
}
+static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
+{
+#define SCALE 2048
+ u16 mcs_divisors[12] = {
+ 34133, /* 16.666666... */
+ 17067, /* 8.333333... */
+ 11378, /* 5.555555... */
+ 8533, /* 4.166666... */
+ 5689, /* 2.777777... */
+ 4267, /* 2.083333... */
+ 3923, /* 1.851851... */
+ 3413, /* 1.666666... */
+ 2844, /* 1.388888... */
+ 2560, /* 1.250000... */
+ 2276, /* 1.111111... */
+ 2048, /* 1.000000... */
+ };
+ u32 rates_160M[3] = { 960777777, 907400000, 816666666 };
+ u32 rates_969[3] = { 480388888, 453700000, 408333333 };
+ u32 rates_484[3] = { 229411111, 216666666, 195000000 };
+ u32 rates_242[3] = { 114711111, 108333333, 97500000 };
+ u32 rates_106[3] = { 40000000, 37777777, 34000000 };
+ u32 rates_52[3] = { 18820000, 17777777, 16000000 };
+ u32 rates_26[3] = { 9411111, 8888888, 8000000 };
+ u64 tmp;
+ u32 result;
+
+ if (WARN_ON_ONCE(rate->mcs > 11))
+ return 0;
+
+ if (WARN_ON_ONCE(rate->he_gi > NL80211_RATE_INFO_HE_GI_3_2))
+ return 0;
+ if (WARN_ON_ONCE(rate->he_ru_alloc >
+ NL80211_RATE_INFO_HE_RU_ALLOC_2x996))
+ return 0;
+ if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8))
+ return 0;
+
+ if (rate->bw == RATE_INFO_BW_160)
+ result = rates_160M[rate->he_gi];
+ else if (rate->bw == RATE_INFO_BW_80 ||
+ (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996))
+ result = rates_969[rate->he_gi];
+ else if (rate->bw == RATE_INFO_BW_40 ||
+ (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484))
+ result = rates_484[rate->he_gi];
+ else if (rate->bw == RATE_INFO_BW_20 ||
+ (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_242))
+ result = rates_242[rate->he_gi];
+ else if (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_106)
+ result = rates_106[rate->he_gi];
+ else if (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_52)
+ result = rates_52[rate->he_gi];
+ else if (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26)
+ result = rates_26[rate->he_gi];
+ else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
+ rate->bw, rate->he_ru_alloc))
+ return 0;
+
+ /* now scale to the appropriate MCS */
+ tmp = result;
+ tmp *= SCALE;
+ do_div(tmp, mcs_divisors[rate->mcs]);
+ result = tmp;
+
+ /* and take NSS, DCM into account */
+ result = (result * rate->nss) / 8;
+ if (rate->he_dcm)
+ result /= 2;
+
+ return result;
+}
+
u32 cfg80211_calculate_bitrate(struct rate_info *rate)
{
if (rate->flags & RATE_INFO_FLAGS_MCS)
@@ -1150,6 +1230,8 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate)
return cfg80211_calculate_bitrate_60g(rate);
if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
return cfg80211_calculate_bitrate_vht(rate);
+ if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
+ return cfg80211_calculate_bitrate_he(rate);
return rate->legacy;
}
@@ -1791,8 +1873,9 @@ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp)
{
- sinfo->pertid = kcalloc(sizeof(*(sinfo->pertid)),
- IEEE80211_NUM_TIDS + 1, gfp);
+ sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1,
+ sizeof(*(sinfo->pertid)),
+ gfp);
if (!sinfo->pertid)
return -ENOMEM;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 05186a47878f..167f7025ac98 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1278,7 +1278,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
if (err)
return err;
- if (!(sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)))
+ if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)))
return -EOPNOTSUPP;
rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
@@ -1320,7 +1320,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
- if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) {
+ if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) {
int sig = sinfo.signal;
wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
@@ -1334,7 +1334,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
break;
}
case CFG80211_SIGNAL_TYPE_UNSPEC:
- if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) {
+ if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) {
wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
wstats.qual.level = sinfo.signal;
@@ -1347,9 +1347,9 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
}
wstats.qual.updated |= IW_QUAL_NOISE_INVALID;
- if (sinfo.filled & BIT(NL80211_STA_INFO_RX_DROP_MISC))
+ if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC))
wstats.discard.misc = sinfo.rx_dropped_misc;
- if (sinfo.filled & BIT(NL80211_STA_INFO_TX_FAILED))
+ if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))
wstats.discard.retries = sinfo.tx_failed;
return &wstats;
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
index e2fa133f9fba..59fcb41fc5e6 100644
--- a/net/x25/Kconfig
+++ b/net/x25/Kconfig
@@ -31,5 +31,3 @@ config X25
To compile this driver as a module, choose M here: the module
will be called x25. If unsure, say N.
-
-
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 9c214ec681ac..743103786652 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -381,4 +381,3 @@ void x25_check_rbuf(struct sock *sk)
x25_stop_timer(sk);
}
}
-
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index f47abb46c587..911ca6d3cb5a 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -11,6 +11,8 @@
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
#include "xdp_umem.h"
#include "xsk_queue.h"
@@ -40,6 +42,21 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
}
}
+int xdp_umem_query(struct net_device *dev, u16 queue_id)
+{
+ struct netdev_bpf bpf;
+
+ ASSERT_RTNL();
+
+ memset(&bpf, 0, sizeof(bpf));
+ bpf.command = XDP_QUERY_XSK_UMEM;
+ bpf.xsk.queue_id = queue_id;
+
+ if (!dev->netdev_ops->ndo_bpf)
+ return 0;
+ return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
+}
+
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
u32 queue_id, u16 flags)
{
@@ -56,41 +73,36 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
if (force_copy)
return 0;
- dev_hold(dev);
-
- if (dev->netdev_ops->ndo_bpf && dev->netdev_ops->ndo_xsk_async_xmit) {
- bpf.command = XDP_QUERY_XSK_UMEM;
+ if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
+ return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
- rtnl_lock();
- err = dev->netdev_ops->ndo_bpf(dev, &bpf);
- rtnl_unlock();
+ bpf.command = XDP_QUERY_XSK_UMEM;
- if (err) {
- dev_put(dev);
- return force_zc ? -ENOTSUPP : 0;
- }
-
- bpf.command = XDP_SETUP_XSK_UMEM;
- bpf.xsk.umem = umem;
- bpf.xsk.queue_id = queue_id;
+ rtnl_lock();
+ err = xdp_umem_query(dev, queue_id);
+ if (err) {
+ err = err < 0 ? -ENOTSUPP : -EBUSY;
+ goto err_rtnl_unlock;
+ }
- rtnl_lock();
- err = dev->netdev_ops->ndo_bpf(dev, &bpf);
- rtnl_unlock();
+ bpf.command = XDP_SETUP_XSK_UMEM;
+ bpf.xsk.umem = umem;
+ bpf.xsk.queue_id = queue_id;
- if (err) {
- dev_put(dev);
- return force_zc ? err : 0; /* fail or fallback */
- }
+ err = dev->netdev_ops->ndo_bpf(dev, &bpf);
+ if (err)
+ goto err_rtnl_unlock;
+ rtnl_unlock();
- umem->dev = dev;
- umem->queue_id = queue_id;
- umem->zc = true;
- return 0;
- }
+ dev_hold(dev);
+ umem->dev = dev;
+ umem->queue_id = queue_id;
+ umem->zc = true;
+ return 0;
- dev_put(dev);
- return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
+err_rtnl_unlock:
+ rtnl_unlock();
+ return force_zc ? err : 0; /* fail or fallback */
}
static void xdp_umem_clear_dev(struct xdp_umem *umem)
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 286ed25c1a69..4a9ee2d83158 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -25,6 +25,14 @@ config XFRM_USER
If unsure, say Y.
+config XFRM_INTERFACE
+ tristate "Transformation virtual interface"
+ depends on XFRM && IPV6
+ ---help---
+ This provides a virtual interface to route IPsec traffic.
+
+ If unsure, say N.
+
config XFRM_SUB_POLICY
bool "Transformation sub policy support"
depends on XFRM
@@ -87,4 +95,3 @@ config NET_KEY_MIGRATE
<draft-sugimoto-mip6-pfkey-migrate>.
If unsure, say N.
-
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index 0bd2465a8c5a..fbc4552d17b8 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
+obj-$(CONFIG_XFRM_INTERFACE) += xfrm_interface.o
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 175941e15a6e..5611b7521020 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -56,7 +56,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
if (skb_is_gso(skb)) {
struct net_device *dev = skb->dev;
- if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
+ if (unlikely(x->xso.dev != dev)) {
struct sk_buff *segs;
/* Packet got rerouted, fixup features and segment it. */
@@ -162,7 +162,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
}
dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
- x->props.family, x->props.output_mark);
+ x->props.family,
+ xfrm_smark_get(0, x));
if (IS_ERR(dst))
return 0;
@@ -210,8 +211,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (!x->type_offload || x->encap)
return false;
- if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
- (!xdst->child->xfrm && x->type->get_mtu)) {
+ if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
+ (!xdst->child->xfrm && x->type->get_mtu)) {
mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
if (skb->len <= mtu)
@@ -306,12 +307,6 @@ static int xfrm_dev_register(struct net_device *dev)
return xfrm_api_check(dev);
}
-static int xfrm_dev_unregister(struct net_device *dev)
-{
- xfrm_policy_cache_flush();
- return NOTIFY_DONE;
-}
-
static int xfrm_dev_feat_change(struct net_device *dev)
{
return xfrm_api_check(dev);
@@ -322,7 +317,6 @@ static int xfrm_dev_down(struct net_device *dev)
if (dev->features & NETIF_F_HW_ESP)
xfrm_dev_state_flush(dev_net(dev), dev, true);
- xfrm_policy_cache_flush();
return NOTIFY_DONE;
}
@@ -334,9 +328,6 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
case NETDEV_REGISTER:
return xfrm_dev_register(dev);
- case NETDEV_UNREGISTER:
- return xfrm_dev_unregister(dev);
-
case NETDEV_FEAT_CHANGE:
return xfrm_dev_feat_change(dev);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 352abca2605f..b89c9c7f8c5c 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -320,6 +320,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
seq = 0;
if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
+ secpath_reset(skb);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
@@ -328,17 +329,21 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
XFRM_SPI_SKB_CB(skb)->daddroff);
do {
if (skb->sp->len == XFRM_MAX_DEPTH) {
+ secpath_reset(skb);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto drop;
}
x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
if (x == NULL) {
+ secpath_reset(skb);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
xfrm_audit_state_notfound(skb, family, spi, seq);
goto drop;
}
+ skb->mark = xfrm_smark_get(skb->mark, x);
+
skb->sp->xvec[skb->sp->len++] = x;
lock:
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
new file mode 100644
index 000000000000..31acc6f33d98
--- /dev/null
+++ b/net/xfrm/xfrm_interface.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * XFRM virtual interface
+ *
+ * Copyright (C) 2018 secunet Security Networks AG
+ *
+ * Author:
+ * Steffen Klassert <steffen.klassert@secunet.com>
+ */
+
+#include <linux/module.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sockios.h>
+#include <linux/icmp.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/net.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/if_link.h>
+#include <linux/if_arp.h>
+#include <linux/icmpv6.h>
+#include <linux/init.h>
+#include <linux/route.h>
+#include <linux/rtnetlink.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/slab.h>
+#include <linux/hash.h>
+
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <linux/etherdevice.h>
+
+static int xfrmi_dev_init(struct net_device *dev);
+static void xfrmi_dev_setup(struct net_device *dev);
+static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
+static unsigned int xfrmi_net_id __read_mostly;
+
+struct xfrmi_net {
+ /* lists for storing interfaces in use */
+ struct xfrm_if __rcu *xfrmi[1];
+};
+
+#define for_each_xfrmi_rcu(start, xi) \
+ for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
+
+static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
+{
+ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+ struct xfrm_if *xi;
+
+ for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
+ if (x->if_id == xi->p.if_id &&
+ (xi->dev->flags & IFF_UP))
+ return xi;
+ }
+
+ return NULL;
+}
+
+static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
+{
+ struct xfrmi_net *xfrmn;
+ int ifindex;
+ struct xfrm_if *xi;
+
+ if (!skb->dev)
+ return NULL;
+
+ xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id);
+ ifindex = skb->dev->ifindex;
+
+ for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
+ if (ifindex == xi->dev->ifindex &&
+ (xi->dev->flags & IFF_UP))
+ return xi;
+ }
+
+ return NULL;
+}
+
+static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
+{
+ struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0];
+
+ rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
+ rcu_assign_pointer(*xip, xi);
+}
+
+static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
+{
+ struct xfrm_if __rcu **xip;
+ struct xfrm_if *iter;
+
+ for (xip = &xfrmn->xfrmi[0];
+ (iter = rtnl_dereference(*xip)) != NULL;
+ xip = &iter->next) {
+ if (xi == iter) {
+ rcu_assign_pointer(*xip, xi->next);
+ break;
+ }
+ }
+}
+
+static void xfrmi_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+}
+
+static int xfrmi_create2(struct net_device *dev)
+{
+ struct xfrm_if *xi = netdev_priv(dev);
+ struct net *net = dev_net(dev);
+ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+ int err;
+
+ dev->rtnl_link_ops = &xfrmi_link_ops;
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto out;
+
+ strcpy(xi->p.name, dev->name);
+
+ dev_hold(dev);
+ xfrmi_link(xfrmn, xi);
+
+ return 0;
+
+out:
+ return err;
+}
+
+static struct xfrm_if *xfrmi_create(struct net *net, struct xfrm_if_parms *p)
+{
+ struct net_device *dev;
+ struct xfrm_if *xi;
+ char name[IFNAMSIZ];
+ int err;
+
+ if (p->name[0]) {
+ strlcpy(name, p->name, IFNAMSIZ);
+ } else {
+ err = -EINVAL;
+ goto failed;
+ }
+
+ dev = alloc_netdev(sizeof(*xi), name, NET_NAME_UNKNOWN, xfrmi_dev_setup);
+ if (!dev) {
+ err = -EAGAIN;
+ goto failed;
+ }
+
+ dev_net_set(dev, net);
+
+ xi = netdev_priv(dev);
+ xi->p = *p;
+ xi->net = net;
+ xi->dev = dev;
+ xi->phydev = dev_get_by_index(net, p->link);
+ if (!xi->phydev) {
+ err = -ENODEV;
+ goto failed_free;
+ }
+
+ err = xfrmi_create2(dev);
+ if (err < 0)
+ goto failed_dev_put;
+
+ return xi;
+
+failed_dev_put:
+ dev_put(xi->phydev);
+failed_free:
+ free_netdev(dev);
+failed:
+ return ERR_PTR(err);
+}
+
+static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p,
+ int create)
+{
+ struct xfrm_if __rcu **xip;
+ struct xfrm_if *xi;
+ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+
+ for (xip = &xfrmn->xfrmi[0];
+ (xi = rtnl_dereference(*xip)) != NULL;
+ xip = &xi->next) {
+ if (xi->p.if_id == p->if_id) {
+ if (create)
+ return ERR_PTR(-EEXIST);
+
+ return xi;
+ }
+ }
+ if (!create)
+ return ERR_PTR(-ENODEV);
+ return xfrmi_create(net, p);
+}
+
+static void xfrmi_dev_uninit(struct net_device *dev)
+{
+ struct xfrm_if *xi = netdev_priv(dev);
+ struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
+
+ xfrmi_unlink(xfrmn, xi);
+ dev_put(xi->phydev);
+ dev_put(dev);
+}
+
+static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
+{
+ skb->tstamp = 0;
+ skb->pkt_type = PACKET_HOST;
+ skb->skb_iif = 0;
+ skb->ignore_df = 0;
+ skb_dst_drop(skb);
+ nf_reset(skb);
+ nf_reset_trace(skb);
+
+ if (!xnet)
+ return;
+
+ ipvs_reset(skb);
+ secpath_reset(skb);
+ skb_orphan(skb);
+ skb->mark = 0;
+}
+
+static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
+{
+ struct pcpu_sw_netstats *tstats;
+ struct xfrm_mode *inner_mode;
+ struct net_device *dev;
+ struct xfrm_state *x;
+ struct xfrm_if *xi;
+ bool xnet;
+
+ if (err && !skb->sp)
+ return 0;
+
+ x = xfrm_input_state(skb);
+
+ xi = xfrmi_lookup(xs_net(x), x);
+ if (!xi)
+ return 1;
+
+ dev = xi->dev;
+ skb->dev = dev;
+
+ if (err) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_dropped++;
+
+ return 0;
+ }
+
+ xnet = !net_eq(xi->net, dev_net(skb->dev));
+
+ if (xnet) {
+ inner_mode = x->inner_mode;
+
+ if (x->sel.family == AF_UNSPEC) {
+ inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+ if (inner_mode == NULL) {
+ XFRM_INC_STATS(dev_net(skb->dev),
+ LINUX_MIB_XFRMINSTATEMODEERROR);
+ return -EINVAL;
+ }
+ }
+
+ if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
+ inner_mode->afinfo->family))
+ return -EPERM;
+ }
+
+ xfrmi_scrub_packet(skb, xnet);
+
+ tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
+
+ return 0;
+}
+
+static int
+xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+{
+ struct xfrm_if *xi = netdev_priv(dev);
+ struct net_device_stats *stats = &xi->dev->stats;
+ struct dst_entry *dst = skb_dst(skb);
+ unsigned int length = skb->len;
+ struct net_device *tdev;
+ struct xfrm_state *x;
+ int err = -1;
+ int mtu;
+
+ if (!dst)
+ goto tx_err_link_failure;
+
+ dst_hold(dst);
+ dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ dst = NULL;
+ goto tx_err_link_failure;
+ }
+
+ x = dst->xfrm;
+ if (!x)
+ goto tx_err_link_failure;
+
+ if (x->if_id != xi->p.if_id)
+ goto tx_err_link_failure;
+
+ tdev = dst->dev;
+
+ if (tdev == dev) {
+ stats->collisions++;
+ net_warn_ratelimited("%s: Local routing loop detected!\n",
+ xi->p.name);
+ goto tx_err_dst_release;
+ }
+
+ mtu = dst_mtu(dst);
+ if (!skb->ignore_df && skb->len > mtu) {
+ skb_dst_update_pmtu(skb, mtu);
+
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ } else {
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
+ }
+
+ dst_release(dst);
+ return -EMSGSIZE;
+ }
+
+ xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
+ skb_dst_set(skb, dst);
+ skb->dev = tdev;
+
+ err = dst_output(xi->net, skb->sk, skb);
+ if (net_xmit_eval(err) == 0) {
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_bytes += length;
+ tstats->tx_packets++;
+ u64_stats_update_end(&tstats->syncp);
+ } else {
+ stats->tx_errors++;
+ stats->tx_aborted_errors++;
+ }
+
+ return 0;
+tx_err_link_failure:
+ stats->tx_carrier_errors++;
+ dst_link_failure(skb);
+tx_err_dst_release:
+ dst_release(dst);
+ return err;
+}
+
+static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xfrm_if *xi = netdev_priv(dev);
+ struct net_device_stats *stats = &xi->dev->stats;
+ struct flowi fl;
+ int ret;
+
+ memset(&fl, 0, sizeof(fl));
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IPV6):
+ xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ break;
+ case htons(ETH_P_IP):
+ xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ break;
+ default:
+ goto tx_err;
+ }
+
+ fl.flowi_oif = xi->phydev->ifindex;
+
+ ret = xfrmi_xmit2(skb, dev, &fl);
+ if (ret < 0)
+ goto tx_err;
+
+ return NETDEV_TX_OK;
+
+tx_err:
+ stats->tx_errors++;
+ stats->tx_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int xfrmi4_err(struct sk_buff *skb, u32 info)
+{
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
+ struct net *net = dev_net(skb->dev);
+ int protocol = iph->protocol;
+ struct ip_comp_hdr *ipch;
+ struct ip_esp_hdr *esph;
+ struct ip_auth_hdr *ah ;
+ struct xfrm_state *x;
+ struct xfrm_if *xi;
+ __be32 spi;
+
+ switch (protocol) {
+ case IPPROTO_ESP:
+ esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
+ spi = esph->spi;
+ break;
+ case IPPROTO_AH:
+ ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
+ spi = ah->spi;
+ break;
+ case IPPROTO_COMP:
+ ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
+ spi = htonl(ntohs(ipch->cpi));
+ break;
+ default:
+ return 0;
+ }
+
+ switch (icmp_hdr(skb)->type) {
+ case ICMP_DEST_UNREACH:
+ if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ return 0;
+ case ICMP_REDIRECT:
+ break;
+ default:
+ return 0;
+ }
+
+ x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+ spi, protocol, AF_INET);
+ if (!x)
+ return 0;
+
+ xi = xfrmi_lookup(net, x);
+ if (!xi) {
+ xfrm_state_put(x);
+ return -1;
+ }
+
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0);
+ else
+ ipv4_redirect(skb, net, 0, 0, protocol, 0);
+ xfrm_state_put(x);
+
+ return 0;
+}
+
+static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info)
+{
+ const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
+ struct net *net = dev_net(skb->dev);
+ int protocol = iph->nexthdr;
+ struct ip_comp_hdr *ipch;
+ struct ip_esp_hdr *esph;
+ struct ip_auth_hdr *ah;
+ struct xfrm_state *x;
+ struct xfrm_if *xi;
+ __be32 spi;
+
+ switch (protocol) {
+ case IPPROTO_ESP:
+ esph = (struct ip_esp_hdr *)(skb->data + offset);
+ spi = esph->spi;
+ break;
+ case IPPROTO_AH:
+ ah = (struct ip_auth_hdr *)(skb->data + offset);
+ spi = ah->spi;
+ break;
+ case IPPROTO_COMP:
+ ipch = (struct ip_comp_hdr *)(skb->data + offset);
+ spi = htonl(ntohs(ipch->cpi));
+ break;
+ default:
+ return 0;
+ }
+
+ if (type != ICMPV6_PKT_TOOBIG &&
+ type != NDISC_REDIRECT)
+ return 0;
+
+ x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+ spi, protocol, AF_INET6);
+ if (!x)
+ return 0;
+
+ xi = xfrmi_lookup(net, x);
+ if (!xi) {
+ xfrm_state_put(x);
+ return -1;
+ }
+
+ if (type == NDISC_REDIRECT)
+ ip6_redirect(skb, net, skb->dev->ifindex, 0,
+ sock_net_uid(net, NULL));
+ else
+ ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
+ xfrm_state_put(x);
+
+ return 0;
+}
+
+static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
+{
+ if (xi->p.link != p->link)
+ return -EINVAL;
+
+ xi->p.if_id = p->if_id;
+
+ return 0;
+}
+
+static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
+{
+ struct net *net = dev_net(xi->dev);
+ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+ int err;
+
+ xfrmi_unlink(xfrmn, xi);
+ synchronize_net();
+ err = xfrmi_change(xi, p);
+ xfrmi_link(xfrmn, xi);
+ netdev_state_change(xi->dev);
+ return err;
+}
+
+static void xfrmi_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
+{
+ int cpu;
+
+ if (!dev->tstats)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct pcpu_sw_netstats *stats;
+ struct pcpu_sw_netstats tmp;
+ int start;
+
+ stats = per_cpu_ptr(dev->tstats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ tmp.rx_packets = stats->rx_packets;
+ tmp.rx_bytes = stats->rx_bytes;
+ tmp.tx_packets = stats->tx_packets;
+ tmp.tx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ s->rx_packets += tmp.rx_packets;
+ s->rx_bytes += tmp.rx_bytes;
+ s->tx_packets += tmp.tx_packets;
+ s->tx_bytes += tmp.tx_bytes;
+ }
+
+ s->rx_dropped = dev->stats.rx_dropped;
+ s->tx_dropped = dev->stats.tx_dropped;
+}
+
+static int xfrmi_get_iflink(const struct net_device *dev)
+{
+ struct xfrm_if *xi = netdev_priv(dev);
+
+ return xi->phydev->ifindex;
+}
+
+
+static const struct net_device_ops xfrmi_netdev_ops = {
+ .ndo_init = xfrmi_dev_init,
+ .ndo_uninit = xfrmi_dev_uninit,
+ .ndo_start_xmit = xfrmi_xmit,
+ .ndo_get_stats64 = xfrmi_get_stats64,
+ .ndo_get_iflink = xfrmi_get_iflink,
+};
+
+static void xfrmi_dev_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &xfrmi_netdev_ops;
+ dev->type = ARPHRD_NONE;
+ dev->hard_header_len = ETH_HLEN;
+ dev->min_header_len = ETH_HLEN;
+ dev->mtu = ETH_DATA_LEN;
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = ETH_DATA_LEN;
+ dev->addr_len = ETH_ALEN;
+ dev->flags = IFF_NOARP;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = xfrmi_dev_free;
+ netif_keep_dst(dev);
+}
+
+static int xfrmi_dev_init(struct net_device *dev)
+{
+ struct xfrm_if *xi = netdev_priv(dev);
+ struct net_device *phydev = xi->phydev;
+ int err;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ err = gro_cells_init(&xi->gro_cells, dev);
+ if (err) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+
+ dev->features |= NETIF_F_LLTX;
+
+ dev->needed_headroom = phydev->needed_headroom;
+ dev->needed_tailroom = phydev->needed_tailroom;
+
+ if (is_zero_ether_addr(dev->dev_addr))
+ eth_hw_addr_inherit(dev, phydev);
+ if (is_zero_ether_addr(dev->broadcast))
+ memcpy(dev->broadcast, phydev->broadcast, dev->addr_len);
+
+ return 0;
+}
+
+static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static void xfrmi_netlink_parms(struct nlattr *data[],
+ struct xfrm_if_parms *parms)
+{
+ memset(parms, 0, sizeof(*parms));
+
+ if (!data)
+ return;
+
+ if (data[IFLA_XFRM_LINK])
+ parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
+
+ if (data[IFLA_XFRM_IF_ID])
+ parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
+}
+
+static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = dev_net(dev);
+ struct xfrm_if_parms *p;
+ struct xfrm_if *xi;
+
+ xi = netdev_priv(dev);
+ p = &xi->p;
+
+ xfrmi_netlink_parms(data, p);
+
+ if (!tb[IFLA_IFNAME])
+ return -EINVAL;
+
+ nla_strlcpy(p->name, tb[IFLA_IFNAME], IFNAMSIZ);
+
+ xi = xfrmi_locate(net, p, 1);
+ return PTR_ERR_OR_ZERO(xi);
+}
+
+static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
+{
+ unregister_netdevice_queue(dev, head);
+}
+
+static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct xfrm_if *xi = netdev_priv(dev);
+ struct net *net = dev_net(dev);
+
+ xfrmi_netlink_parms(data, &xi->p);
+
+ xi = xfrmi_locate(net, &xi->p, 0);
+
+ if (IS_ERR_OR_NULL(xi)) {
+ xi = netdev_priv(dev);
+ } else {
+ if (xi->dev != dev)
+ return -EEXIST;
+ }
+
+ return xfrmi_update(xi, &xi->p);
+}
+
+static size_t xfrmi_get_size(const struct net_device *dev)
+{
+ return
+ /* IFLA_XFRM_LINK */
+ nla_total_size(4) +
+ /* IFLA_XFRM_IF_ID */
+ nla_total_size(4) +
+ 0;
+}
+
+static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct xfrm_if *xi = netdev_priv(dev);
+ struct xfrm_if_parms *parm = &xi->p;
+
+ if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
+ nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+struct net *xfrmi_get_link_net(const struct net_device *dev)
+{
+ struct xfrm_if *xi = netdev_priv(dev);
+
+ return dev_net(xi->phydev);
+}
+
+static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
+ [IFLA_XFRM_LINK] = { .type = NLA_U32 },
+ [IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
+ .kind = "xfrm",
+ .maxtype = IFLA_XFRM_MAX,
+ .policy = xfrmi_policy,
+ .priv_size = sizeof(struct xfrm_if),
+ .setup = xfrmi_dev_setup,
+ .validate = xfrmi_validate,
+ .newlink = xfrmi_newlink,
+ .dellink = xfrmi_dellink,
+ .changelink = xfrmi_changelink,
+ .get_size = xfrmi_get_size,
+ .fill_info = xfrmi_fill_info,
+ .get_link_net = xfrmi_get_link_net,
+};
+
+static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn)
+{
+ struct xfrm_if *xi;
+ LIST_HEAD(list);
+
+ xi = rtnl_dereference(xfrmn->xfrmi[0]);
+ if (!xi)
+ return;
+
+ unregister_netdevice_queue(xi->dev, &list);
+ unregister_netdevice_many(&list);
+}
+
+static int __net_init xfrmi_init_net(struct net *net)
+{
+ return 0;
+}
+
+static void __net_exit xfrmi_exit_net(struct net *net)
+{
+ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+
+ rtnl_lock();
+ xfrmi_destroy_interfaces(xfrmn);
+ rtnl_unlock();
+}
+
+static struct pernet_operations xfrmi_net_ops = {
+ .init = xfrmi_init_net,
+ .exit = xfrmi_exit_net,
+ .id = &xfrmi_net_id,
+ .size = sizeof(struct xfrmi_net),
+};
+
+static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
+ .handler = xfrm6_rcv,
+ .cb_handler = xfrmi_rcv_cb,
+ .err_handler = xfrmi6_err,
+ .priority = 10,
+};
+
+static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
+ .handler = xfrm6_rcv,
+ .cb_handler = xfrmi_rcv_cb,
+ .err_handler = xfrmi6_err,
+ .priority = 10,
+};
+
+static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
+ .handler = xfrm6_rcv,
+ .cb_handler = xfrmi_rcv_cb,
+ .err_handler = xfrmi6_err,
+ .priority = 10,
+};
+
+static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
+ .handler = xfrm4_rcv,
+ .input_handler = xfrm_input,
+ .cb_handler = xfrmi_rcv_cb,
+ .err_handler = xfrmi4_err,
+ .priority = 10,
+};
+
+static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
+ .handler = xfrm4_rcv,
+ .input_handler = xfrm_input,
+ .cb_handler = xfrmi_rcv_cb,
+ .err_handler = xfrmi4_err,
+ .priority = 10,
+};
+
+static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
+ .handler = xfrm4_rcv,
+ .input_handler = xfrm_input,
+ .cb_handler = xfrmi_rcv_cb,
+ .err_handler = xfrmi4_err,
+ .priority = 10,
+};
+
+static int __init xfrmi4_init(void)
+{
+ int err;
+
+ err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
+ if (err < 0)
+ goto xfrm_proto_esp_failed;
+ err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
+ if (err < 0)
+ goto xfrm_proto_ah_failed;
+ err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
+ if (err < 0)
+ goto xfrm_proto_comp_failed;
+
+ return 0;
+
+xfrm_proto_comp_failed:
+ xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
+xfrm_proto_ah_failed:
+ xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
+xfrm_proto_esp_failed:
+ return err;
+}
+
+static void xfrmi4_fini(void)
+{
+ xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
+ xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
+ xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
+}
+
+static int __init xfrmi6_init(void)
+{
+ int err;
+
+ err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
+ if (err < 0)
+ goto xfrm_proto_esp_failed;
+ err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
+ if (err < 0)
+ goto xfrm_proto_ah_failed;
+ err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
+ if (err < 0)
+ goto xfrm_proto_comp_failed;
+
+ return 0;
+
+xfrm_proto_comp_failed:
+ xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
+xfrm_proto_ah_failed:
+ xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
+xfrm_proto_esp_failed:
+ return err;
+}
+
+static void xfrmi6_fini(void)
+{
+ xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
+ xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
+ xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
+}
+
+static const struct xfrm_if_cb xfrm_if_cb = {
+ .decode_session = xfrmi_decode_session,
+};
+
+static int __init xfrmi_init(void)
+{
+ const char *msg;
+ int err;
+
+ pr_info("IPsec XFRM device driver\n");
+
+ msg = "tunnel device";
+ err = register_pernet_device(&xfrmi_net_ops);
+ if (err < 0)
+ goto pernet_dev_failed;
+
+ msg = "xfrm4 protocols";
+ err = xfrmi4_init();
+ if (err < 0)
+ goto xfrmi4_failed;
+
+ msg = "xfrm6 protocols";
+ err = xfrmi6_init();
+ if (err < 0)
+ goto xfrmi6_failed;
+
+
+ msg = "netlink interface";
+ err = rtnl_link_register(&xfrmi_link_ops);
+ if (err < 0)
+ goto rtnl_link_failed;
+
+ xfrm_if_register_cb(&xfrm_if_cb);
+
+ return err;
+
+rtnl_link_failed:
+ xfrmi6_fini();
+xfrmi6_failed:
+ xfrmi4_fini();
+xfrmi4_failed:
+ unregister_pernet_device(&xfrmi_net_ops);
+pernet_dev_failed:
+ pr_err("xfrmi init: failed to register %s\n", msg);
+ return err;
+}
+
+static void __exit xfrmi_fini(void)
+{
+ xfrm_if_unregister_cb();
+ rtnl_link_unregister(&xfrmi_link_ops);
+ xfrmi4_fini();
+ xfrmi6_fini();
+ unregister_pernet_device(&xfrmi_net_ops);
+}
+
+module_init(xfrmi_init);
+module_exit(xfrmi_fini);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("xfrm");
+MODULE_ALIAS_NETDEV("xfrm0");
+MODULE_AUTHOR("Steffen Klassert");
+MODULE_DESCRIPTION("XFRM virtual interface");
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 89b178a78dc7..45ba07ab3e4f 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -66,8 +66,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
goto error_nolock;
}
- if (x->props.output_mark)
- skb->mark = x->props.output_mark;
+ skb->mark = xfrm_smark_get(skb->mark, x);
err = x->outer_mode->output(x, skb);
if (err) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7c5e8978aeaa..3110c3fbee20 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -45,8 +45,9 @@ struct xfrm_flo {
u8 flags;
};
-static DEFINE_PER_CPU(struct xfrm_dst *, xfrm_last_dst);
-static struct work_struct *xfrm_pcpu_work __read_mostly;
+static DEFINE_SPINLOCK(xfrm_if_cb_lock);
+static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
+
static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
__read_mostly;
@@ -119,6 +120,12 @@ static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short fa
return afinfo;
}
+/* Called with rcu_read_lock(). */
+static const struct xfrm_if_cb *xfrm_if_get_cb(void)
+{
+ return rcu_dereference(xfrm_if_cb);
+}
+
struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
@@ -182,8 +189,8 @@ static inline unsigned long make_jiffies(long secs)
static void xfrm_policy_timer(struct timer_list *t)
{
struct xfrm_policy *xp = from_timer(xp, t, timer);
- unsigned long now = get_seconds();
- long next = LONG_MAX;
+ time64_t now = ktime_get_real_seconds();
+ time64_t next = TIME64_MAX;
int warn = 0;
int dir;
@@ -195,7 +202,7 @@ static void xfrm_policy_timer(struct timer_list *t)
dir = xfrm_policy_id2dir(xp->index);
if (xp->lft.hard_add_expires_seconds) {
- long tmo = xp->lft.hard_add_expires_seconds +
+ time64_t tmo = xp->lft.hard_add_expires_seconds +
xp->curlft.add_time - now;
if (tmo <= 0)
goto expired;
@@ -203,7 +210,7 @@ static void xfrm_policy_timer(struct timer_list *t)
next = tmo;
}
if (xp->lft.hard_use_expires_seconds) {
- long tmo = xp->lft.hard_use_expires_seconds +
+ time64_t tmo = xp->lft.hard_use_expires_seconds +
(xp->curlft.use_time ? : xp->curlft.add_time) - now;
if (tmo <= 0)
goto expired;
@@ -211,7 +218,7 @@ static void xfrm_policy_timer(struct timer_list *t)
next = tmo;
}
if (xp->lft.soft_add_expires_seconds) {
- long tmo = xp->lft.soft_add_expires_seconds +
+ time64_t tmo = xp->lft.soft_add_expires_seconds +
xp->curlft.add_time - now;
if (tmo <= 0) {
warn = 1;
@@ -221,7 +228,7 @@ static void xfrm_policy_timer(struct timer_list *t)
next = tmo;
}
if (xp->lft.soft_use_expires_seconds) {
- long tmo = xp->lft.soft_use_expires_seconds +
+ time64_t tmo = xp->lft.soft_use_expires_seconds +
(xp->curlft.use_time ? : xp->curlft.add_time) - now;
if (tmo <= 0) {
warn = 1;
@@ -233,7 +240,7 @@ static void xfrm_policy_timer(struct timer_list *t)
if (warn)
km_policy_expired(xp, dir, 0, 0);
- if (next != LONG_MAX &&
+ if (next != TIME64_MAX &&
!mod_timer(&xp->timer, jiffies + make_jiffies(next)))
xfrm_pol_hold(xp);
@@ -747,6 +754,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
newpos = NULL;
hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == policy->type &&
+ pol->if_id == policy->if_id &&
!selector_cmp(&pol->selector, &policy->selector) &&
xfrm_policy_mark_match(policy, pol) &&
xfrm_sec_ctx_match(pol->security, policy->security) &&
@@ -783,7 +791,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
}
policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
- policy->curlft.add_time = get_seconds();
+ policy->curlft.add_time = ktime_get_real_seconds();
policy->curlft.use_time = 0;
if (!mod_timer(&policy->timer, jiffies + HZ))
xfrm_pol_hold(policy);
@@ -798,8 +806,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
}
EXPORT_SYMBOL(xfrm_policy_insert);
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
- int dir, struct xfrm_selector *sel,
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
+ u8 type, int dir,
+ struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err)
{
@@ -812,6 +821,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
ret = NULL;
hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == type &&
+ pol->if_id == if_id &&
(mark & pol->mark.m) == pol->mark.v &&
!selector_cmp(sel, &pol->selector) &&
xfrm_sec_ctx_match(ctx, pol->security)) {
@@ -837,8 +847,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
}
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
- int dir, u32 id, int delete, int *err)
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
+ u8 type, int dir, u32 id, int delete,
+ int *err)
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
@@ -853,6 +864,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
ret = NULL;
hlist_for_each_entry(pol, chain, byidx) {
if (pol->type == type && pol->index == id &&
+ pol->if_id == if_id &&
(mark & pol->mark.m) == pol->mark.v) {
xfrm_pol_hold(pol);
if (delete) {
@@ -1056,13 +1068,14 @@ EXPORT_SYMBOL(xfrm_policy_walk_done);
*/
static int xfrm_policy_match(const struct xfrm_policy *pol,
const struct flowi *fl,
- u8 type, u16 family, int dir)
+ u8 type, u16 family, int dir, u32 if_id)
{
const struct xfrm_selector *sel = &pol->selector;
int ret = -ESRCH;
bool match;
if (pol->family != family ||
+ pol->if_id != if_id ||
(fl->flowi_mark & pol->mark.m) != pol->mark.v ||
pol->type != type)
return ret;
@@ -1077,7 +1090,8 @@ static int xfrm_policy_match(const struct xfrm_policy *pol,
static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
const struct flowi *fl,
- u16 family, u8 dir)
+ u16 family, u8 dir,
+ u32 if_id)
{
int err;
struct xfrm_policy *pol, *ret;
@@ -1101,7 +1115,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
priority = ~0U;
ret = NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
- err = xfrm_policy_match(pol, fl, type, family, dir);
+ err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
if (err) {
if (err == -ESRCH)
continue;
@@ -1120,7 +1134,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
if ((pol->priority >= priority) && ret)
break;
- err = xfrm_policy_match(pol, fl, type, family, dir);
+ err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
if (err) {
if (err == -ESRCH)
continue;
@@ -1145,21 +1159,25 @@ fail:
return ret;
}
-static struct xfrm_policy *
-xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
+static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
+ const struct flowi *fl,
+ u16 family, u8 dir, u32 if_id)
{
#ifdef CONFIG_XFRM_SUB_POLICY
struct xfrm_policy *pol;
- pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
+ pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
+ dir, if_id);
if (pol != NULL)
return pol;
#endif
- return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
+ return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
+ dir, if_id);
}
static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
- const struct flowi *fl, u16 family)
+ const struct flowi *fl,
+ u16 family, u32 if_id)
{
struct xfrm_policy *pol;
@@ -1177,7 +1195,8 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
match = xfrm_selector_match(&pol->selector, fl, family);
if (match) {
- if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
+ if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
+ pol->if_id != if_id) {
pol = NULL;
goto out;
}
@@ -1268,7 +1287,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
old_pol = rcu_dereference_protected(sk->sk_policy[dir],
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
if (pol) {
- pol->curlft.add_time = get_seconds();
+ pol->curlft.add_time = ktime_get_real_seconds();
pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
xfrm_sk_policy_link(pol, dir);
}
@@ -1305,6 +1324,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
newp->lft = old->lft;
newp->curlft = old->curlft;
newp->mark = old->mark;
+ newp->if_id = old->if_id;
newp->action = old->action;
newp->flags = old->flags;
newp->xfrm_nr = old->xfrm_nr;
@@ -1390,7 +1410,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
}
}
- x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
+ x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
+ family, policy->if_id);
if (x && x->km.state == XFRM_STATE_VALID) {
xfrm[nx++] = x;
@@ -1607,10 +1628,11 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst_copy_metrics(dst1, dst);
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
+ __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
+
family = xfrm[i]->props.family;
dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
- &saddr, &daddr, family,
- xfrm[i]->props.output_mark);
+ &saddr, &daddr, family, mark);
err = PTR_ERR(dst);
if (IS_ERR(dst))
goto put_states;
@@ -1692,7 +1714,8 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family,
pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
XFRM_POLICY_TYPE_MAIN,
fl, family,
- XFRM_POLICY_OUT);
+ XFRM_POLICY_OUT,
+ pols[0]->if_id);
if (pols[1]) {
if (IS_ERR(pols[1])) {
xfrm_pols_put(pols, *num_pols);
@@ -1714,108 +1737,6 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family,
}
-static void xfrm_last_dst_update(struct xfrm_dst *xdst, struct xfrm_dst *old)
-{
- this_cpu_write(xfrm_last_dst, xdst);
- if (old)
- dst_release(&old->u.dst);
-}
-
-static void __xfrm_pcpu_work_fn(void)
-{
- struct xfrm_dst *old;
-
- old = this_cpu_read(xfrm_last_dst);
- if (old && !xfrm_bundle_ok(old))
- xfrm_last_dst_update(NULL, old);
-}
-
-static void xfrm_pcpu_work_fn(struct work_struct *work)
-{
- local_bh_disable();
- rcu_read_lock();
- __xfrm_pcpu_work_fn();
- rcu_read_unlock();
- local_bh_enable();
-}
-
-void xfrm_policy_cache_flush(void)
-{
- struct xfrm_dst *old;
- bool found = false;
- int cpu;
-
- might_sleep();
-
- local_bh_disable();
- rcu_read_lock();
- for_each_possible_cpu(cpu) {
- old = per_cpu(xfrm_last_dst, cpu);
- if (old && !xfrm_bundle_ok(old)) {
- if (smp_processor_id() == cpu) {
- __xfrm_pcpu_work_fn();
- continue;
- }
- found = true;
- break;
- }
- }
-
- rcu_read_unlock();
- local_bh_enable();
-
- if (!found)
- return;
-
- get_online_cpus();
-
- for_each_possible_cpu(cpu) {
- bool bundle_release;
-
- rcu_read_lock();
- old = per_cpu(xfrm_last_dst, cpu);
- bundle_release = old && !xfrm_bundle_ok(old);
- rcu_read_unlock();
-
- if (!bundle_release)
- continue;
-
- if (cpu_online(cpu)) {
- schedule_work_on(cpu, &xfrm_pcpu_work[cpu]);
- continue;
- }
-
- rcu_read_lock();
- old = per_cpu(xfrm_last_dst, cpu);
- if (old && !xfrm_bundle_ok(old)) {
- per_cpu(xfrm_last_dst, cpu) = NULL;
- dst_release(&old->u.dst);
- }
- rcu_read_unlock();
- }
-
- put_online_cpus();
-}
-
-static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
- struct xfrm_state * const xfrm[],
- int num)
-{
- const struct dst_entry *dst = &xdst->u.dst;
- int i;
-
- if (xdst->num_xfrms != num)
- return false;
-
- for (i = 0; i < num; i++) {
- if (!dst || dst->xfrm != xfrm[i])
- return false;
- dst = xfrm_dst_child(dst);
- }
-
- return xfrm_bundle_ok(xdst);
-}
-
static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
const struct flowi *fl, u16 family,
@@ -1824,34 +1745,21 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
struct net *net = xp_net(pols[0]);
struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
- struct xfrm_dst *xdst, *old;
+ struct xfrm_dst *xdst;
struct dst_entry *dst;
int err;
/* Try to instantiate a bundle */
err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
if (err <= 0) {
- if (err != 0 && err != -EAGAIN)
+ if (err == 0)
+ return NULL;
+
+ if (err != -EAGAIN)
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
return ERR_PTR(err);
}
- xdst = this_cpu_read(xfrm_last_dst);
- if (xdst &&
- xdst->u.dst.dev == dst_orig->dev &&
- xdst->num_pols == num_pols &&
- memcmp(xdst->pols, pols,
- sizeof(struct xfrm_policy *) * num_pols) == 0 &&
- xfrm_xdst_can_reuse(xdst, xfrm, err)) {
- dst_hold(&xdst->u.dst);
- xfrm_pols_put(pols, num_pols);
- while (err > 0)
- xfrm_state_put(xfrm[--err]);
- return xdst;
- }
-
- old = xdst;
-
dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
if (IS_ERR(dst)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
@@ -1864,9 +1772,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
xdst->policy_genid = atomic_read(&pols[0]->genid);
- atomic_set(&xdst->u.dst.__refcnt, 2);
- xfrm_last_dst_update(xdst, old);
-
return xdst;
}
@@ -2047,8 +1952,10 @@ free_dst:
goto out;
}
-static struct xfrm_dst *
-xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct xfrm_flo *xflo)
+static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
+ const struct flowi *fl,
+ u16 family, u8 dir,
+ struct xfrm_flo *xflo, u32 if_id)
{
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols = 0, num_xfrms = 0, err;
@@ -2057,7 +1964,7 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
/* Resolve policies to use if we couldn't get them from
* previous cache entry */
num_pols = 1;
- pols[0] = xfrm_policy_lookup(net, fl, family, dir);
+ pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
@@ -2067,13 +1974,15 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
if (num_xfrms <= 0)
goto make_dummy_bundle;
- local_bh_disable();
xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
xflo->dst_orig);
- local_bh_enable();
-
if (IS_ERR(xdst)) {
err = PTR_ERR(xdst);
+ if (err == -EREMOTE) {
+ xfrm_pols_put(pols, num_pols);
+ return NULL;
+ }
+
if (err != -EAGAIN)
goto error;
goto make_dummy_bundle;
@@ -2123,14 +2032,19 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
return ret;
}
-/* Main function: finds/creates a bundle for given flow.
+/* Finds/creates a bundle for given flow and if_id
*
* At the moment we eat a raw IP route. Mostly to speed up lookups
* on interfaces with disabled IPsec.
+ *
+ * xfrm_lookup uses an if_id of 0 by default, and is provided for
+ * compatibility
*/
-struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
- const struct flowi *fl,
- const struct sock *sk, int flags)
+struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
+ struct dst_entry *dst_orig,
+ const struct flowi *fl,
+ const struct sock *sk,
+ int flags, u32 if_id)
{
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
struct xfrm_dst *xdst;
@@ -2146,7 +2060,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
sk = sk_const_to_full_sk(sk);
if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
num_pols = 1;
- pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
+ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
+ if_id);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
@@ -2158,15 +2073,16 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
goto no_transform;
}
- local_bh_disable();
xdst = xfrm_resolve_and_create_bundle(
pols, num_pols, fl,
family, dst_orig);
- local_bh_enable();
if (IS_ERR(xdst)) {
xfrm_pols_put(pols, num_pols);
err = PTR_ERR(xdst);
+ if (err == -EREMOTE)
+ goto nopol;
+
goto dropdst;
} else if (xdst == NULL) {
num_xfrms = 0;
@@ -2189,7 +2105,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
!net->xfrm.policy_count[XFRM_POLICY_OUT])
goto nopol;
- xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo);
+ xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
if (xdst == NULL)
goto nopol;
if (IS_ERR(xdst)) {
@@ -2234,7 +2150,7 @@ no_transform:
}
for (i = 0; i < num_pols; i++)
- pols[i]->curlft.use_time = get_seconds();
+ pols[i]->curlft.use_time = ktime_get_real_seconds();
if (num_xfrms < 0) {
/* Prohibit the flow */
@@ -2270,6 +2186,19 @@ dropdst:
xfrm_pols_put(pols, drop_pols);
return ERR_PTR(err);
}
+EXPORT_SYMBOL(xfrm_lookup_with_ifid);
+
+/* Main function: finds/creates a bundle for given flow.
+ *
+ * At the moment we eat a raw IP route. Mostly to speed up lookups
+ * on interfaces with disabled IPsec.
+ */
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, const struct sock *sk,
+ int flags)
+{
+ return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
+}
EXPORT_SYMBOL(xfrm_lookup);
/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
@@ -2368,6 +2297,7 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
return -EAFNOSUPPORT;
afinfo->decode_session(skb, fl, reverse);
+
err = security_xfrm_decode_session(skb, &fl->flowi_secid);
rcu_read_unlock();
return err;
@@ -2398,6 +2328,19 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
int reverse;
struct flowi fl;
int xerr_idx = -1;
+ const struct xfrm_if_cb *ifcb;
+ struct xfrm_if *xi;
+ u32 if_id = 0;
+
+ rcu_read_lock();
+ ifcb = xfrm_if_get_cb();
+
+ if (ifcb) {
+ xi = ifcb->decode_session(skb);
+ if (xi)
+ if_id = xi->p.if_id;
+ }
+ rcu_read_unlock();
reverse = dir & ~XFRM_POLICY_MASK;
dir &= XFRM_POLICY_MASK;
@@ -2425,7 +2368,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
pol = NULL;
sk = sk_to_full_sk(sk);
if (sk && sk->sk_policy[dir]) {
- pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
+ pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
@@ -2433,7 +2376,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
}
if (!pol)
- pol = xfrm_policy_lookup(net, &fl, family, dir);
+ pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
@@ -2449,7 +2392,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
return 1;
}
- pol->curlft.use_time = get_seconds();
+ pol->curlft.use_time = ktime_get_real_seconds();
pols[0] = pol;
npols++;
@@ -2457,13 +2400,13 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
&fl, family,
- XFRM_POLICY_IN);
+ XFRM_POLICY_IN, if_id);
if (pols[1]) {
if (IS_ERR(pols[1])) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
}
- pols[1]->curlft.use_time = get_seconds();
+ pols[1]->curlft.use_time = ktime_get_real_seconds();
npols++;
}
}
@@ -2822,6 +2765,21 @@ void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
}
EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
+void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
+{
+ spin_lock(&xfrm_if_cb_lock);
+ rcu_assign_pointer(xfrm_if_cb, ifcb);
+ spin_unlock(&xfrm_if_cb_lock);
+}
+EXPORT_SYMBOL(xfrm_if_register_cb);
+
+void xfrm_if_unregister_cb(void)
+{
+ RCU_INIT_POINTER(xfrm_if_cb, NULL);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(xfrm_if_unregister_cb);
+
#ifdef CONFIG_XFRM_STATISTICS
static int __net_init xfrm_statistics_init(struct net *net)
{
@@ -2989,19 +2947,13 @@ static struct pernet_operations __net_initdata xfrm_net_ops = {
void __init xfrm_init(void)
{
- int i;
-
- xfrm_pcpu_work = kmalloc_array(NR_CPUS, sizeof(*xfrm_pcpu_work),
- GFP_KERNEL);
- BUG_ON(!xfrm_pcpu_work);
-
- for (i = 0; i < NR_CPUS; i++)
- INIT_WORK(&xfrm_pcpu_work[i], xfrm_pcpu_work_fn);
-
register_pernet_subsys(&xfrm_net_ops);
xfrm_dev_init();
seqcount_init(&xfrm_policy_hash_generation);
xfrm_input_init();
+
+ RCU_INIT_POINTER(xfrm_if_cb, NULL);
+ synchronize_rcu();
}
#ifdef CONFIG_AUDITSYSCALL
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 8308281f3253..b669262682c9 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -475,8 +475,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
{
struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
- unsigned long now = get_seconds();
- long next = LONG_MAX;
+ time64_t now = ktime_get_real_seconds();
+ time64_t next = TIME64_MAX;
int warn = 0;
int err = 0;
@@ -537,7 +537,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
if (warn)
km_state_expired(x, 0, 0);
resched:
- if (next != LONG_MAX) {
+ if (next != TIME64_MAX) {
tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
}
@@ -577,7 +577,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
- x->curlft.add_time = get_seconds();
+ x->curlft.add_time = ktime_get_real_seconds();
x->lft.soft_byte_limit = XFRM_INF;
x->lft.soft_packet_limit = XFRM_INF;
x->lft.hard_byte_limit = XFRM_INF;
@@ -735,10 +735,9 @@ restart:
}
out:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
- if (cnt) {
+ if (cnt)
err = 0;
- xfrm_policy_cache_flush();
- }
+
return err;
}
EXPORT_SYMBOL(xfrm_state_flush);
@@ -931,7 +930,7 @@ struct xfrm_state *
xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
const struct flowi *fl, struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
- unsigned short family)
+ unsigned short family, u32 if_id)
{
static xfrm_address_t saddr_wildcard = { };
struct net *net = xp_net(pol);
@@ -955,6 +954,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
+ x->if_id == if_id &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
@@ -971,6 +971,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
+ x->if_id == if_id &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
tmpl->mode == x->props.mode &&
@@ -1010,6 +1011,7 @@ found:
* to current session. */
xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
memcpy(&x->mark, &pol->mark, sizeof(x->mark));
+ x->if_id = if_id;
error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
if (error) {
@@ -1067,7 +1069,7 @@ out:
}
struct xfrm_state *
-xfrm_stateonly_find(struct net *net, u32 mark,
+xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
xfrm_address_t *daddr, xfrm_address_t *saddr,
unsigned short family, u8 mode, u8 proto, u32 reqid)
{
@@ -1080,6 +1082,7 @@ xfrm_stateonly_find(struct net *net, u32 mark,
if (x->props.family == family &&
x->props.reqid == reqid &&
(mark & x->mark.m) == x->mark.v &&
+ x->if_id == if_id &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, family) &&
mode == x->props.mode &&
@@ -1160,11 +1163,13 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
struct xfrm_state *x;
unsigned int h;
u32 mark = xnew->mark.v & xnew->mark.m;
+ u32 if_id = xnew->if_id;
h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
+ x->if_id == if_id &&
(mark & x->mark.m) == x->mark.v &&
xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
@@ -1187,7 +1192,7 @@ EXPORT_SYMBOL(xfrm_state_insert);
static struct xfrm_state *__find_acq_core(struct net *net,
const struct xfrm_mark *m,
unsigned short family, u8 mode,
- u32 reqid, u8 proto,
+ u32 reqid, u32 if_id, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
int create)
@@ -1242,6 +1247,7 @@ static struct xfrm_state *__find_acq_core(struct net *net,
x->props.family = family;
x->props.mode = mode;
x->props.reqid = reqid;
+ x->if_id = if_id;
x->mark.v = m->v;
x->mark.m = m->m;
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
@@ -1296,7 +1302,7 @@ int xfrm_state_add(struct xfrm_state *x)
if (use_spi && !x1)
x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
- x->props.reqid, x->id.proto,
+ x->props.reqid, x->if_id, x->id.proto,
&x->id.daddr, &x->props.saddr, 0);
__xfrm_state_bump_genids(x);
@@ -1395,6 +1401,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
x->props.flags = orig->props.flags;
x->props.extra_flags = orig->props.extra_flags;
+ x->if_id = orig->if_id;
x->tfcpad = orig->tfcpad;
x->replay_maxdiff = orig->replay_maxdiff;
x->replay_maxage = orig->replay_maxage;
@@ -1554,6 +1561,19 @@ out:
if (x1->curlft.use_time)
xfrm_state_check_expire(x1);
+ if (x->props.smark.m || x->props.smark.v || x->if_id) {
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+
+ if (x->props.smark.m || x->props.smark.v)
+ x1->props.smark = x->props.smark;
+
+ if (x->if_id)
+ x1->if_id = x->if_id;
+
+ __xfrm_state_bump_genids(x1);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ }
+
err = 0;
x->km.state = XFRM_STATE_DEAD;
__xfrm_state_put(x);
@@ -1571,7 +1591,7 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
if (!x->curlft.use_time)
- x->curlft.use_time = get_seconds();
+ x->curlft.use_time = ktime_get_real_seconds();
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
@@ -1619,13 +1639,13 @@ EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
- u8 proto, const xfrm_address_t *daddr,
+ u32 if_id, u8 proto, const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
- x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create);
+ x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return x;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 33878e6e0d0a..4791aa8b8185 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -527,6 +527,19 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
x->replay_maxdiff = nla_get_u32(rt);
}
+static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
+{
+ if (attrs[XFRMA_SET_MARK]) {
+ m->v = nla_get_u32(attrs[XFRMA_SET_MARK]);
+ if (attrs[XFRMA_SET_MARK_MASK])
+ m->m = nla_get_u32(attrs[XFRMA_SET_MARK_MASK]);
+ else
+ m->m = 0xffffffff;
+ } else {
+ m->v = m->m = 0;
+ }
+}
+
static struct xfrm_state *xfrm_state_construct(struct net *net,
struct xfrm_usersa_info *p,
struct nlattr **attrs,
@@ -579,8 +592,10 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
xfrm_mark_get(attrs, &x->mark);
- if (attrs[XFRMA_OUTPUT_MARK])
- x->props.output_mark = nla_get_u32(attrs[XFRMA_OUTPUT_MARK]);
+ xfrm_smark_init(attrs, &x->props.smark);
+
+ if (attrs[XFRMA_IF_ID])
+ x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
if (err)
@@ -824,6 +839,18 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
return 0;
}
+static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
+{
+ int ret = 0;
+
+ if (m->v | m->m) {
+ ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v);
+ if (!ret)
+ ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m);
+ }
+ return ret;
+}
+
/* Don't change this without updating xfrm_sa_len! */
static int copy_to_user_state_extra(struct xfrm_state *x,
struct xfrm_usersa_info *p,
@@ -887,6 +914,11 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
ret = xfrm_mark_put(skb, &x->mark);
if (ret)
goto out;
+
+ ret = xfrm_smark_put(skb, &x->props.smark);
+ if (ret)
+ goto out;
+
if (x->replay_esn)
ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
xfrm_replay_state_esn_len(x->replay_esn),
@@ -900,8 +932,8 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
ret = copy_user_offload(&x->xso, skb);
if (ret)
goto out;
- if (x->props.output_mark) {
- ret = nla_put_u32(skb, XFRMA_OUTPUT_MARK, x->props.output_mark);
+ if (x->if_id) {
+ ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id);
if (ret)
goto out;
}
@@ -1255,6 +1287,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u32 mark;
struct xfrm_mark m;
+ u32 if_id = 0;
p = nlmsg_data(nlh);
err = verify_spi_info(p->info.id.proto, p->min, p->max);
@@ -1267,6 +1300,10 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
x = NULL;
mark = xfrm_mark_get(attrs, &m);
+
+ if (attrs[XFRMA_IF_ID])
+ if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+
if (p->info.seq) {
x = xfrm_find_acq_byseq(net, mark, p->info.seq);
if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
@@ -1277,7 +1314,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!x)
x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
- p->info.id.proto, daddr,
+ if_id, p->info.id.proto, daddr,
&p->info.saddr, 1,
family);
err = -ENOENT;
@@ -1565,6 +1602,9 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
xfrm_mark_get(attrs, &xp->mark);
+ if (attrs[XFRMA_IF_ID])
+ xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+
return xp;
error:
*errp = err;
@@ -1712,6 +1752,8 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
+ if (!err)
+ err = xfrm_if_id_put(skb, xp->if_id);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
@@ -1793,6 +1835,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
int delete;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
+ u32 if_id = 0;
p = nlmsg_data(nlh);
delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
@@ -1805,8 +1848,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
+ if (attrs[XFRMA_IF_ID])
+ if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+
if (p->index)
- xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
+ xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
@@ -1823,7 +1869,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
}
- xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
+ xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel,
ctx, delete, &err);
security_xfrm_policy_free(ctx);
}
@@ -1946,6 +1992,10 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
if (err)
goto out_cancel;
+ err = xfrm_if_id_put(skb, x->if_id);
+ if (err)
+ goto out_cancel;
+
nlmsg_end(skb, nlh);
return 0;
@@ -2088,6 +2138,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
int err = -ENOENT;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
+ u32 if_id = 0;
err = copy_from_user_policy_type(&type, attrs);
if (err)
@@ -2097,8 +2148,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
+ if (attrs[XFRMA_IF_ID])
+ if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+
if (p->index)
- xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
+ xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
@@ -2115,7 +2169,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
}
- xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
+ xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir,
&p->sel, ctx, 0, &err);
security_xfrm_policy_free(ctx);
}
@@ -2497,7 +2551,9 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_PROTO] = { .type = NLA_U8 },
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
[XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
- [XFRMA_OUTPUT_MARK] = { .type = NLA_U32 },
+ [XFRMA_SET_MARK] = { .type = NLA_U32 },
+ [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
+ [XFRMA_IF_ID] = { .type = NLA_U32 },
};
static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
@@ -2629,6 +2685,10 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
if (err)
return err;
+ err = xfrm_if_id_put(skb, x->if_id);
+ if (err)
+ return err;
+
nlmsg_end(skb, nlh);
return 0;
}
@@ -2723,8 +2783,12 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
l += nla_total_size(sizeof(x->props.extra_flags));
if (x->xso.dev)
l += nla_total_size(sizeof(x->xso));
- if (x->props.output_mark)
- l += nla_total_size(sizeof(x->props.output_mark));
+ if (x->props.smark.v | x->props.smark.m) {
+ l += nla_total_size(sizeof(x->props.smark.v));
+ l += nla_total_size(sizeof(x->props.smark.m));
+ }
+ if (x->if_id)
+ l += nla_total_size(sizeof(x->if_id));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size_64bit(sizeof(u64));
@@ -2854,6 +2918,8 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
+ if (!err)
+ err = xfrm_if_id_put(skb, xp->if_id);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
@@ -2970,6 +3036,8 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
+ if (!err)
+ err = xfrm_if_id_put(skb, xp->if_id);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
@@ -3051,6 +3119,8 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
err = copy_to_user_policy_type(xp->type, skb);
if (!err)
err = xfrm_mark_put(skb, &xp->mark);
+ if (!err)
+ err = xfrm_if_id_put(skb, xp->if_id);
if (err)
goto out_free_skb;
@@ -3284,4 +3354,3 @@ module_init(xfrm_user_init);
module_exit(xfrm_user_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
-
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 1303af10e54d..36f9f41d094b 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -52,6 +52,7 @@ hostprogs-y += xdp_adjust_tail
hostprogs-y += xdpsock
hostprogs-y += xdp_fwd
hostprogs-y += task_fd_query
+hostprogs-y += xdp_sample_pkts
# Libbpf dependencies
LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
@@ -104,9 +105,10 @@ xdp_rxq_info-objs := xdp_rxq_info_user.o
syscall_tp-objs := bpf_load.o syscall_tp_user.o
cpustat-objs := bpf_load.o cpustat_user.o
xdp_adjust_tail-objs := xdp_adjust_tail_user.o
-xdpsock-objs := bpf_load.o xdpsock_user.o
-xdp_fwd-objs := bpf_load.o xdp_fwd_user.o
+xdpsock-objs := xdpsock_user.o
+xdp_fwd-objs := xdp_fwd_user.o
task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
+xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@@ -163,12 +165,13 @@ always += xdp_adjust_tail_kern.o
always += xdpsock_kern.o
always += xdp_fwd_kern.o
always += task_fd_query_kern.o
+always += xdp_sample_pkts_kern.o
-HOSTCFLAGS += -I$(objtree)/usr/include
-HOSTCFLAGS += -I$(srctree)/tools/lib/
-HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
-HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
-HOSTCFLAGS += -I$(srctree)/tools/perf
+KBUILD_HOSTCFLAGS += -I$(objtree)/usr/include
+KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/
+KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
+KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
+KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf
HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
HOSTCFLAGS_trace_helpers.o += -I$(srctree)/tools/lib/bpf/
@@ -179,18 +182,21 @@ HOSTCFLAGS_spintest_user.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_trace_event_user.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_sampleip_user.o += -I$(srctree)/tools/lib/bpf/
HOSTCFLAGS_task_fd_query_user.o += -I$(srctree)/tools/lib/bpf/
+HOSTCFLAGS_xdp_sample_pkts_user.o += -I$(srctree)/tools/lib/bpf/
-HOST_LOADLIBES += $(LIBBPF) -lelf
-HOSTLOADLIBES_tracex4 += -lrt
-HOSTLOADLIBES_trace_output += -lrt
-HOSTLOADLIBES_map_perf_test += -lrt
-HOSTLOADLIBES_test_overhead += -lrt
-HOSTLOADLIBES_xdpsock += -pthread
+KBUILD_HOSTLDLIBS += $(LIBBPF) -lelf
+HOSTLDLIBS_tracex4 += -lrt
+HOSTLDLIBS_trace_output += -lrt
+HOSTLDLIBS_map_perf_test += -lrt
+HOSTLDLIBS_test_overhead += -lrt
+HOSTLDLIBS_xdpsock += -pthread
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
LLC ?= llc
CLANG ?= clang
+LLVM_OBJCOPY ?= llvm-objcopy
+BTF_PAHOLE ?= pahole
# Detect that we're cross compiling and use the cross compiler
ifdef CROSS_COMPILE
@@ -198,6 +204,16 @@ HOSTCC = $(CROSS_COMPILE)gcc
CLANG_ARCH_ARGS = -target $(ARCH)
endif
+BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
+BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
+BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
+
+ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),)
+ EXTRA_CFLAGS += -g
+ LLC_FLAGS += -mattr=dwarfris
+ DWARF2BTF = y
+endif
+
# Trick to allow make to be run from this directory
all:
$(MAKE) -C ../../ $(CURDIR)/ BPF_SAMPLES_PATH=$(CURDIR)
@@ -256,4 +272,7 @@ $(obj)/%.o: $(src)/%.c
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
- -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
+ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+endif
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 89161c9ed466..904e775d1a44 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -107,6 +107,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
return -1;
}
+ if (prog_cnt == MAX_PROGS)
+ return -1;
+
fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version,
bpf_log_buf, BPF_LOG_BUF_SIZE);
if (fd < 0) {
diff --git a/samples/bpf/hash_func01.h b/samples/bpf/hash_func01.h
new file mode 100644
index 000000000000..38255812e376
--- /dev/null
+++ b/samples/bpf/hash_func01.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: LGPL-2.1
+ *
+ * Based on Paul Hsieh's (LGPG 2.1) hash function
+ * From: http://www.azillionmonkeys.com/qed/hash.html
+ */
+
+#define get16bits(d) (*((const __u16 *) (d)))
+
+static __always_inline
+__u32 SuperFastHash (const char *data, int len, __u32 initval) {
+ __u32 hash = initval;
+ __u32 tmp;
+ int rem;
+
+ if (len <= 0 || data == NULL) return 0;
+
+ rem = len & 3;
+ len >>= 2;
+
+ /* Main loop */
+#pragma clang loop unroll(full)
+ for (;len > 0; len--) {
+ hash += get16bits (data);
+ tmp = (get16bits (data+2) << 11) ^ hash;
+ hash = (hash << 16) ^ tmp;
+ data += 2*sizeof (__u16);
+ hash += hash >> 11;
+ }
+
+ /* Handle end cases */
+ switch (rem) {
+ case 3: hash += get16bits (data);
+ hash ^= hash << 16;
+ hash ^= ((signed char)data[sizeof (__u16)]) << 18;
+ hash += hash >> 11;
+ break;
+ case 2: hash += get16bits (data);
+ hash ^= hash << 11;
+ hash += hash >> 17;
+ break;
+ case 1: hash += (signed char)*data;
+ hash ^= hash << 10;
+ hash += hash >> 1;
+ }
+
+ /* Force "avalanching" of final 127 bits */
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+
+ return hash;
+}
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index b453e6a161be..180f9d813bca 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -8,7 +8,8 @@
* information. The number of invocations of the program, which maps
* to the number of packets received, is stored to key 0. Key 1 is
* incremented on each iteration by the number of bytes stored in
- * the skb.
+ * the skb. The program also stores the number of received bytes
+ * in the cgroup storage.
*
* - Attaches the new program to a cgroup using BPF_PROG_ATTACH
*
@@ -21,12 +22,15 @@
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
+#include <sys/resource.h>
+#include <sys/time.h>
#include <unistd.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include "bpf_insn.h"
+#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
#define FOO "/foo"
@@ -205,6 +209,8 @@ static int map_fd = -1;
static int prog_load_cnt(int verdict, int val)
{
+ int cgroup_storage_fd;
+
if (map_fd < 0)
map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
if (map_fd < 0) {
@@ -212,6 +218,13 @@ static int prog_load_cnt(int verdict, int val)
return -1;
}
+ cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
+ sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
+ if (cgroup_storage_fd < 0) {
+ printf("failed to create map '%s'\n", strerror(errno));
+ return -1;
+ }
+
struct bpf_insn prog[] = {
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
@@ -222,6 +235,11 @@ static int prog_load_cnt(int verdict, int val)
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+ BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_MOV64_IMM(BPF_REG_1, val),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
@@ -237,6 +255,7 @@ static int prog_load_cnt(int verdict, int val)
printf("Output from verifier:\n%s\n-------\n", bpf_log_buf);
return 0;
}
+ close(cgroup_storage_fd);
return ret;
}
diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c
index 3b5be2364975..a9277b118c33 100644
--- a/samples/bpf/test_cgrp2_sock2.c
+++ b/samples/bpf/test_cgrp2_sock2.c
@@ -51,7 +51,7 @@ int main(int argc, char **argv)
if (argc > 3)
filter_id = atoi(argv[3]);
- if (filter_id > prog_cnt) {
+ if (filter_id >= prog_cnt) {
printf("Invalid program id; program not found in file\n");
return EXIT_FAILURE;
}
diff --git a/samples/bpf/xdp_fwd_user.c b/samples/bpf/xdp_fwd_user.c
index a87a2048ed32..f88e1d7093d6 100644
--- a/samples/bpf/xdp_fwd_user.c
+++ b/samples/bpf/xdp_fwd_user.c
@@ -24,8 +24,7 @@
#include <fcntl.h>
#include <libgen.h>
-#include "bpf_load.h"
-#include "bpf_util.h"
+#include "bpf/libbpf.h"
#include <bpf/bpf.h>
@@ -63,9 +62,15 @@ static void usage(const char *prog)
int main(int argc, char **argv)
{
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ };
+ const char *prog_name = "xdp_fwd";
+ struct bpf_program *prog;
char filename[PATH_MAX];
+ struct bpf_object *obj;
int opt, i, idx, err;
- int prog_id = 0;
+ int prog_fd, map_fd;
int attach = 1;
int ret = 0;
@@ -75,7 +80,7 @@ int main(int argc, char **argv)
attach = 0;
break;
case 'D':
- prog_id = 1;
+ prog_name = "xdp_fwd_direct";
break;
default:
usage(basename(argv[0]));
@@ -90,6 +95,7 @@ int main(int argc, char **argv)
if (attach) {
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = filename;
if (access(filename, O_RDONLY) < 0) {
printf("error accessing file %s: %s\n",
@@ -97,19 +103,25 @@ int main(int argc, char **argv)
return 1;
}
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
return 1;
- }
- if (!prog_fd[prog_id]) {
- printf("load_bpf_file: %s\n", strerror(errno));
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ printf("program not found: %s\n", strerror(prog_fd));
+ return 1;
+ }
+ map_fd = bpf_map__fd(bpf_object__find_map_by_name(obj,
+ "tx_port"));
+ if (map_fd < 0) {
+ printf("map not found: %s\n", strerror(map_fd));
return 1;
}
}
if (attach) {
for (i = 1; i < 64; ++i)
- bpf_map_update_elem(map_fd[0], &i, &i, 0);
+ bpf_map_update_elem(map_fd, &i, &i, 0);
}
for (i = optind; i < argc; ++i) {
@@ -126,7 +138,7 @@ int main(int argc, char **argv)
if (err)
ret = err;
} else {
- err = do_attach(idx, prog_fd[prog_id], argv[i]);
+ err = do_attach(idx, prog_fd, argv[i]);
if (err)
ret = err;
}
diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
index 4938dcbaecbf..a306d1c75622 100644
--- a/samples/bpf/xdp_redirect_cpu_kern.c
+++ b/samples/bpf/xdp_redirect_cpu_kern.c
@@ -13,6 +13,7 @@
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
+#include "hash_func01.h"
#define MAX_CPUS 64 /* WARNING - sync with _user.c */
@@ -134,7 +135,16 @@ bool parse_eth(struct ethhdr *eth, void *data_end,
return false;
eth_type = vlan_hdr->h_vlan_encapsulated_proto;
}
- /* TODO: Handle double VLAN tagged packet */
+ /* Handle double VLAN tagged packet */
+ if (eth_type == htons(ETH_P_8021Q) || eth_type == htons(ETH_P_8021AD)) {
+ struct vlan_hdr *vlan_hdr;
+
+ vlan_hdr = (void *)eth + offset;
+ offset += sizeof(*vlan_hdr);
+ if ((void *)eth + offset > data_end)
+ return false;
+ eth_type = vlan_hdr->h_vlan_encapsulated_proto;
+ }
*eth_proto = ntohs(eth_type);
*l3_offset = offset;
@@ -452,6 +462,108 @@ int xdp_prognum4_ddos_filter_pktgen(struct xdp_md *ctx)
return bpf_redirect_map(&cpu_map, cpu_dest, 0);
}
+/* Hashing initval */
+#define INITVAL 15485863
+
+static __always_inline
+u32 get_ipv4_hash_ip_pair(struct xdp_md *ctx, u64 nh_off)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct iphdr *iph = data + nh_off;
+ u32 cpu_hash;
+
+ if (iph + 1 > data_end)
+ return 0;
+
+ cpu_hash = iph->saddr + iph->daddr;
+ cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + iph->protocol);
+
+ return cpu_hash;
+}
+
+static __always_inline
+u32 get_ipv6_hash_ip_pair(struct xdp_md *ctx, u64 nh_off)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct ipv6hdr *ip6h = data + nh_off;
+ u32 cpu_hash;
+
+ if (ip6h + 1 > data_end)
+ return 0;
+
+ cpu_hash = ip6h->saddr.s6_addr32[0] + ip6h->daddr.s6_addr32[0];
+ cpu_hash += ip6h->saddr.s6_addr32[1] + ip6h->daddr.s6_addr32[1];
+ cpu_hash += ip6h->saddr.s6_addr32[2] + ip6h->daddr.s6_addr32[2];
+ cpu_hash += ip6h->saddr.s6_addr32[3] + ip6h->daddr.s6_addr32[3];
+ cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + ip6h->nexthdr);
+
+ return cpu_hash;
+}
+
+/* Load-Balance traffic based on hashing IP-addrs + L4-proto. The
+ * hashing scheme is symmetric, meaning swapping IP src/dest still hit
+ * same CPU.
+ */
+SEC("xdp_cpu_map5_lb_hash_ip_pairs")
+int xdp_prognum5_lb_hash_ip_pairs(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct ethhdr *eth = data;
+ u8 ip_proto = IPPROTO_UDP;
+ struct datarec *rec;
+ u16 eth_proto = 0;
+ u64 l3_offset = 0;
+ u32 cpu_dest = 0;
+ u32 cpu_idx = 0;
+ u32 *cpu_lookup;
+ u32 *cpu_max;
+ u32 cpu_hash;
+ u32 key = 0;
+
+ /* Count RX packet in map */
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_ABORTED;
+ rec->processed++;
+
+ cpu_max = bpf_map_lookup_elem(&cpus_count, &key);
+ if (!cpu_max)
+ return XDP_ABORTED;
+
+ if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
+ return XDP_PASS; /* Just skip */
+
+ /* Hash for IPv4 and IPv6 */
+ switch (eth_proto) {
+ case ETH_P_IP:
+ cpu_hash = get_ipv4_hash_ip_pair(ctx, l3_offset);
+ break;
+ case ETH_P_IPV6:
+ cpu_hash = get_ipv6_hash_ip_pair(ctx, l3_offset);
+ break;
+ case ETH_P_ARP: /* ARP packet handled on CPU idx 0 */
+ default:
+ cpu_hash = 0;
+ }
+
+ /* Choose CPU based on hash */
+ cpu_idx = cpu_hash % *cpu_max;
+
+ cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
+ if (!cpu_lookup)
+ return XDP_ABORTED;
+ cpu_dest = *cpu_lookup;
+
+ if (cpu_dest >= MAX_CPUS) {
+ rec->issue++;
+ return XDP_ABORTED;
+ }
+
+ return bpf_redirect_map(&cpu_map, cpu_dest, 0);
+}
char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index 4b4d78fffe30..9a6c7e0a6dd1 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -22,7 +22,7 @@ static const char *__doc__ =
#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
/* How many xdp_progs are defined in _kern.c */
-#define MAX_PROG 5
+#define MAX_PROG 6
/* Wanted to get rid of bpf_load.h and fake-"libbpf.h" (and instead
* use bpf/libbpf.h), but cannot as (currently) needed for XDP
@@ -567,7 +567,7 @@ int main(int argc, char **argv)
int added_cpus = 0;
int longindex = 0;
int interval = 2;
- int prog_num = 0;
+ int prog_num = 5;
int add_cpu = -1;
__u32 qsize;
int opt;
diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
index 3fd209291653..222a83eed1cb 100644
--- a/samples/bpf/xdp_rxq_info_kern.c
+++ b/samples/bpf/xdp_rxq_info_kern.c
@@ -4,6 +4,8 @@
* Example howto extract XDP RX-queue info
*/
#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/in.h>
#include "bpf_helpers.h"
/* Config setup from with userspace
@@ -14,6 +16,12 @@
struct config {
__u32 action;
int ifindex;
+ __u32 options;
+};
+enum cfg_options_flags {
+ NO_TOUCH = 0x0U,
+ READ_MEM = 0x1U,
+ SWAP_MAC = 0x2U,
};
struct bpf_map_def SEC("maps") config_map = {
.type = BPF_MAP_TYPE_ARRAY,
@@ -45,6 +53,23 @@ struct bpf_map_def SEC("maps") rx_queue_index_map = {
.max_entries = MAX_RXQs + 1,
};
+static __always_inline
+void swap_src_dst_mac(void *data)
+{
+ unsigned short *p = data;
+ unsigned short dst[3];
+
+ dst[0] = p[0];
+ dst[1] = p[1];
+ dst[2] = p[2];
+ p[0] = p[3];
+ p[1] = p[4];
+ p[2] = p[5];
+ p[3] = dst[0];
+ p[4] = dst[1];
+ p[5] = dst[2];
+}
+
SEC("xdp_prog0")
int xdp_prognum0(struct xdp_md *ctx)
{
@@ -90,6 +115,24 @@ int xdp_prognum0(struct xdp_md *ctx)
if (key == MAX_RXQs)
rxq_rec->issue++;
+ /* Default: Don't touch packet data, only count packets */
+ if (unlikely(config->options & (READ_MEM|SWAP_MAC))) {
+ struct ethhdr *eth = data;
+
+ if (eth + 1 > data_end)
+ return XDP_ABORTED;
+
+ /* Avoid compiler removing this: Drop non 802.3 Ethertypes */
+ if (ntohs(eth->h_proto) < ETH_P_802_3_MIN)
+ return XDP_ABORTED;
+
+ /* XDP_TX requires changing MAC-addrs, else HW may drop.
+ * Can also be enabled with --swapmac (for test purposes)
+ */
+ if (unlikely(config->options & SWAP_MAC))
+ swap_src_dst_mac(data);
+ }
+
return config->action;
}
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
index e4e9ba52bff0..248a7eab9531 100644
--- a/samples/bpf/xdp_rxq_info_user.c
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -50,6 +50,8 @@ static const struct option long_options[] = {
{"sec", required_argument, NULL, 's' },
{"no-separators", no_argument, NULL, 'z' },
{"action", required_argument, NULL, 'a' },
+ {"readmem", no_argument, NULL, 'r' },
+ {"swapmac", no_argument, NULL, 'm' },
{0, 0, NULL, 0 }
};
@@ -66,6 +68,12 @@ static void int_exit(int sig)
struct config {
__u32 action;
int ifindex;
+ __u32 options;
+};
+enum cfg_options_flags {
+ NO_TOUCH = 0x0U,
+ READ_MEM = 0x1U,
+ SWAP_MAC = 0x2U,
};
#define XDP_ACTION_MAX (XDP_TX + 1)
#define XDP_ACTION_MAX_STRLEN 11
@@ -109,6 +117,18 @@ static void list_xdp_actions(void)
printf("\n");
}
+static char* options2str(enum cfg_options_flags flag)
+{
+ if (flag == NO_TOUCH)
+ return "no_touch";
+ if (flag & SWAP_MAC)
+ return "swapmac";
+ if (flag & READ_MEM)
+ return "read";
+ fprintf(stderr, "ERR: Unknown config option flags");
+ exit(EXIT_FAIL);
+}
+
static void usage(char *argv[])
{
int i;
@@ -305,7 +325,7 @@ static __u64 calc_errs_pps(struct datarec *r,
static void stats_print(struct stats_record *stats_rec,
struct stats_record *stats_prev,
- int action)
+ int action, __u32 cfg_opt)
{
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
unsigned int nr_cpus = bpf_num_possible_cpus();
@@ -316,8 +336,8 @@ static void stats_print(struct stats_record *stats_rec,
int i;
/* Header */
- printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n",
- ifname, ifindex, action2str(action));
+ printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s options:%s\n",
+ ifname, ifindex, action2str(action), options2str(cfg_opt));
/* stats_global_map */
{
@@ -399,7 +419,7 @@ static inline void swap(struct stats_record **a, struct stats_record **b)
*b = tmp;
}
-static void stats_poll(int interval, int action)
+static void stats_poll(int interval, int action, __u32 cfg_opt)
{
struct stats_record *record, *prev;
@@ -410,7 +430,7 @@ static void stats_poll(int interval, int action)
while (1) {
swap(&prev, &record);
stats_collect(record);
- stats_print(record, prev, action);
+ stats_print(record, prev, action, cfg_opt);
sleep(interval);
}
@@ -421,6 +441,7 @@ static void stats_poll(int interval, int action)
int main(int argc, char **argv)
{
+ __u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */
struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
struct bpf_prog_load_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP,
@@ -435,6 +456,7 @@ int main(int argc, char **argv)
int interval = 2;
__u32 key = 0;
+
char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 };
int action = XDP_PASS; /* Default action */
char *action_str = NULL;
@@ -496,6 +518,12 @@ int main(int argc, char **argv)
action_str = (char *)&action_str_buf;
strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN);
break;
+ case 'r':
+ cfg_options |= READ_MEM;
+ break;
+ case 'm':
+ cfg_options |= SWAP_MAC;
+ break;
case 'h':
error:
default:
@@ -523,6 +551,11 @@ int main(int argc, char **argv)
}
cfg.action = action;
+ /* XDP_TX requires changing MAC-addrs, else HW may drop */
+ if (action == XDP_TX)
+ cfg_options |= SWAP_MAC;
+ cfg.options = cfg_options;
+
/* Trick to pretty printf with thousands separators use %' */
if (use_separators)
setlocale(LC_NUMERIC, "en_US");
@@ -542,6 +575,6 @@ int main(int argc, char **argv)
return EXIT_FAIL_XDP;
}
- stats_poll(interval, action);
+ stats_poll(interval, action, cfg_options);
return EXIT_OK;
}
diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c
new file mode 100644
index 000000000000..f7ca8b850978
--- /dev/null
+++ b/samples/bpf/xdp_sample_pkts_kern.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+#define SAMPLE_SIZE 64ul
+#define MAX_CPUS 128
+
+#define bpf_printk(fmt, ...) \
+({ \
+ char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+})
+
+struct bpf_map_def SEC("maps") my_map = {
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(u32),
+ .max_entries = MAX_CPUS,
+};
+
+SEC("xdp_sample")
+int xdp_sample_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+
+ /* Metadata will be in the perf event before the packet data. */
+ struct S {
+ u16 cookie;
+ u16 pkt_len;
+ } __packed metadata;
+
+ if (data < data_end) {
+ /* The XDP perf_event_output handler will use the upper 32 bits
+ * of the flags argument as a number of bytes to include of the
+ * packet payload in the event data. If the size is too big, the
+ * call to bpf_perf_event_output will fail and return -EFAULT.
+ *
+ * See bpf_xdp_event_output in net/core/filter.c.
+ *
+ * The BPF_F_CURRENT_CPU flag means that the event output fd
+ * will be indexed by the CPU number in the event map.
+ */
+ u64 flags = BPF_F_CURRENT_CPU;
+ u16 sample_size;
+ int ret;
+
+ metadata.cookie = 0xdead;
+ metadata.pkt_len = (u16)(data_end - data);
+ sample_size = min(metadata.pkt_len, SAMPLE_SIZE);
+ flags |= (u64)sample_size << 32;
+
+ ret = bpf_perf_event_output(ctx, &my_map, flags,
+ &metadata, sizeof(metadata));
+ if (ret)
+ bpf_printk("perf_event_output failed: %d\n", ret);
+ }
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
new file mode 100644
index 000000000000..8dd87c1eb560
--- /dev/null
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <net/if.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/sysinfo.h>
+#include <sys/ioctl.h>
+#include <signal.h>
+#include <libbpf.h>
+#include <bpf/bpf.h>
+
+#include "perf-sys.h"
+#include "trace_helpers.h"
+
+#define MAX_CPUS 128
+static int pmu_fds[MAX_CPUS], if_idx;
+static struct perf_event_mmap_page *headers[MAX_CPUS];
+static char *if_name;
+
+static int do_attach(int idx, int fd, const char *name)
+{
+ int err;
+
+ err = bpf_set_link_xdp_fd(idx, fd, 0);
+ if (err < 0)
+ printf("ERROR: failed to attach program to %s\n", name);
+
+ return err;
+}
+
+static int do_detach(int idx, const char *name)
+{
+ int err;
+
+ err = bpf_set_link_xdp_fd(idx, -1, 0);
+ if (err < 0)
+ printf("ERROR: failed to detach program from %s\n", name);
+
+ return err;
+}
+
+#define SAMPLE_SIZE 64
+
+static int print_bpf_output(void *data, int size)
+{
+ struct {
+ __u16 cookie;
+ __u16 pkt_len;
+ __u8 pkt_data[SAMPLE_SIZE];
+ } __packed *e = data;
+ int i;
+
+ if (e->cookie != 0xdead) {
+ printf("BUG cookie %x sized %d\n",
+ e->cookie, size);
+ return LIBBPF_PERF_EVENT_ERROR;
+ }
+
+ printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len);
+ for (i = 0; i < 14 && i < e->pkt_len; i++)
+ printf("%02x ", e->pkt_data[i]);
+ printf("\n");
+
+ return LIBBPF_PERF_EVENT_CONT;
+}
+
+static void test_bpf_perf_event(int map_fd, int num)
+{
+ struct perf_event_attr attr = {
+ .sample_type = PERF_SAMPLE_RAW,
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_BPF_OUTPUT,
+ .wakeup_events = 1, /* get an fd notification for every event */
+ };
+ int i;
+
+ for (i = 0; i < num; i++) {
+ int key = i;
+
+ pmu_fds[i] = sys_perf_event_open(&attr, -1/*pid*/, i/*cpu*/,
+ -1/*group_fd*/, 0);
+
+ assert(pmu_fds[i] >= 0);
+ assert(bpf_map_update_elem(map_fd, &key,
+ &pmu_fds[i], BPF_ANY) == 0);
+ ioctl(pmu_fds[i], PERF_EVENT_IOC_ENABLE, 0);
+ }
+}
+
+static void sig_handler(int signo)
+{
+ do_detach(if_idx, if_name);
+ exit(0);
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ };
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ int prog_fd, map_fd;
+ char filename[256];
+ int ret, err, i;
+ int numcpus;
+
+ if (argc < 2) {
+ printf("Usage: %s <ifname>\n", argv[0]);
+ return 1;
+ }
+
+ numcpus = get_nprocs();
+ if (numcpus > MAX_CPUS)
+ numcpus = MAX_CPUS;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = filename;
+
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+ return 1;
+
+ if (!prog_fd) {
+ printf("load_bpf_file: %s\n", strerror(errno));
+ return 1;
+ }
+
+ map = bpf_map__next(NULL, obj);
+ if (!map) {
+ printf("finding a map in obj file failed\n");
+ return 1;
+ }
+ map_fd = bpf_map__fd(map);
+
+ if_idx = if_nametoindex(argv[1]);
+ if (!if_idx)
+ if_idx = strtoul(argv[1], NULL, 0);
+
+ if (!if_idx) {
+ fprintf(stderr, "Invalid ifname\n");
+ return 1;
+ }
+ if_name = argv[1];
+ err = do_attach(if_idx, prog_fd, argv[1]);
+ if (err)
+ return err;
+
+ if (signal(SIGINT, sig_handler) ||
+ signal(SIGHUP, sig_handler) ||
+ signal(SIGTERM, sig_handler)) {
+ perror("signal");
+ return 1;
+ }
+
+ test_bpf_perf_event(map_fd, numcpus);
+
+ for (i = 0; i < numcpus; i++)
+ if (perf_event_mmap_header(pmu_fds[i], &headers[i]) < 0)
+ return 1;
+
+ ret = perf_event_poller_multi(pmu_fds, headers, numcpus,
+ print_bpf_output);
+ kill(0, SIGINT);
+ return ret;
+}
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 5904b1543831..4914788b6727 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -26,7 +26,7 @@
#include <sys/types.h>
#include <poll.h>
-#include "bpf_load.h"
+#include "bpf/libbpf.h"
#include "bpf_util.h"
#include <bpf/bpf.h>
@@ -145,8 +145,13 @@ static void dump_stats(void);
} while (0)
#define barrier() __asm__ __volatile__("": : :"memory")
+#ifdef __aarch64__
+#define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
+#define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
+#else
#define u_smp_rmb() barrier()
#define u_smp_wmb() barrier()
+#endif
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
@@ -886,7 +891,13 @@ static void l2fwd(struct xdpsock *xsk)
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ };
+ int prog_fd, qidconf_map, xsks_map;
+ struct bpf_object *obj;
char xdp_filename[256];
+ struct bpf_map *map;
int i, ret, key = 0;
pthread_t pt;
@@ -899,24 +910,38 @@ int main(int argc, char **argv)
}
snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = xdp_filename;
- if (load_bpf_file(xdp_filename)) {
- fprintf(stderr, "ERROR: load_bpf_file %s\n", bpf_log_buf);
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+ exit(EXIT_FAILURE);
+ if (prog_fd < 0) {
+ fprintf(stderr, "ERROR: no program found: %s\n",
+ strerror(prog_fd));
exit(EXIT_FAILURE);
}
- if (!prog_fd[0]) {
- fprintf(stderr, "ERROR: load_bpf_file: \"%s\"\n",
- strerror(errno));
+ map = bpf_object__find_map_by_name(obj, "qidconf_map");
+ qidconf_map = bpf_map__fd(map);
+ if (qidconf_map < 0) {
+ fprintf(stderr, "ERROR: no qidconf map found: %s\n",
+ strerror(qidconf_map));
+ exit(EXIT_FAILURE);
+ }
+
+ map = bpf_object__find_map_by_name(obj, "xsks_map");
+ xsks_map = bpf_map__fd(map);
+ if (xsks_map < 0) {
+ fprintf(stderr, "ERROR: no xsks map found: %s\n",
+ strerror(xsks_map));
exit(EXIT_FAILURE);
}
- if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd[0], opt_xdp_flags) < 0) {
+ if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
fprintf(stderr, "ERROR: link set xdp fd failed\n");
exit(EXIT_FAILURE);
}
- ret = bpf_map_update_elem(map_fd[0], &key, &opt_queue, 0);
+ ret = bpf_map_update_elem(qidconf_map, &key, &opt_queue, 0);
if (ret) {
fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
exit(EXIT_FAILURE);
@@ -933,7 +958,7 @@ int main(int argc, char **argv)
/* ...and insert them into the map. */
for (i = 0; i < num_socks; i++) {
key = i;
- ret = bpf_map_update_elem(map_fd[1], &key, &xsks[i]->sfd, 0);
+ ret = bpf_map_update_elem(xsks_map, &key, &xsks[i]->sfd, 0);
if (ret) {
fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
exit(EXIT_FAILURE);
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
index ba942e3ead89..cf34ff6b4065 100644
--- a/samples/seccomp/Makefile
+++ b/samples/seccomp/Makefile
@@ -30,9 +30,9 @@ HOSTCFLAGS_bpf-direct.o += $(MFLAG)
HOSTCFLAGS_dropper.o += $(MFLAG)
HOSTCFLAGS_bpf-helper.o += $(MFLAG)
HOSTCFLAGS_bpf-fancy.o += $(MFLAG)
-HOSTLOADLIBES_bpf-direct += $(MFLAG)
-HOSTLOADLIBES_bpf-fancy += $(MFLAG)
-HOSTLOADLIBES_dropper += $(MFLAG)
+HOSTLDLIBS_bpf-direct += $(MFLAG)
+HOSTLDLIBS_bpf-fancy += $(MFLAG)
+HOSTLDLIBS_dropper += $(MFLAG)
endif
always := $(hostprogs-m)
endif
diff --git a/scripts/.gitignore b/scripts/.gitignore
index 0442c06eefcb..12d302d70128 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -1,6 +1,7 @@
#
# Generated files
#
+bin2c
conmakehash
kallsyms
pnmtologo
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 86321f06461e..7c98f60e2266 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -56,7 +56,7 @@ kecho := $($(quiet)kecho)
define filechk
$(Q)set -e; \
mkdir -p $(dir $@); \
- $(filechk_$(1)) < $< > $@.tmp; \
+ $(filechk_$(1)) > $@.tmp; \
if [ -r $@ ] && cmp -s $@ $@.tmp; then \
rm -f $@.tmp; \
else \
@@ -126,7 +126,7 @@ cc-option = $(call __cc-option, $(CC),\
# hostcc-option
# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
hostcc-option = $(call __cc-option, $(HOSTCC),\
- $(HOSTCFLAGS) $(HOST_EXTRACFLAGS),$(1),$(2))
+ $(KBUILD_HOSTCFLAGS) $(HOST_EXTRACFLAGS),$(1),$(2))
# cc-option-yn
# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
@@ -163,8 +163,8 @@ cc-ldoption = $(call try-run,\
$(CC) $(1) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
# ld-option
-# Usage: LDFLAGS += $(call ld-option, -X)
-ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2))
+# Usage: LDFLAGS += $(call ld-option, -X, -Y)
+ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2),$(3))
# ar-option
# Usage: KBUILD_ARFLAGS := $(call ar-option,D)
@@ -400,3 +400,6 @@ endif
endef
#
###############################################################################
+
+# delete partially updated (i.e. corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/scripts/Makefile b/scripts/Makefile
index 25ab143cbe14..61affa300d25 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -10,6 +10,7 @@
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+hostprogs-$(CONFIG_BUILD_BIN2C) += bin2c
hostprogs-$(CONFIG_KALLSYMS) += kallsyms
hostprogs-$(CONFIG_LOGO) += pnmtologo
hostprogs-$(CONFIG_VT) += conmakehash
@@ -22,8 +23,8 @@ hostprogs-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include
HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
-HOSTLOADLIBES_sign-file = -lcrypto
-HOSTLOADLIBES_extract-cert = -lcrypto
+HOSTLDLIBS_sign-file = -lcrypto
+HOSTLDLIBS_extract-cert = -lcrypto
always := $(hostprogs-y) $(hostprogs-m)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 514ed63ff571..1adaac4e10d8 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -53,13 +53,6 @@ endif
include scripts/Makefile.lib
-ifdef host-progs
-ifneq ($(hostprogs-y),$(host-progs))
-$(warning kbuild: $(obj)/Makefile - Usage of host-progs is deprecated. Please replace with hostprogs-y!)
-hostprogs-y += $(host-progs)
-endif
-endif
-
# Do not include host rules unless needed
ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
include scripts/Makefile.host
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 17ef94c635cd..0b80e3207b20 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -38,7 +38,6 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
__clean-files := $(extra-y) $(extra-m) $(extra-) \
$(always) $(targets) $(clean-files) \
- $(host-progs) \
$(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
$(hostlibs-y) $(hostlibs-m) $(hostlibs-) \
$(hostcxxlibs-y) $(hostcxxlibs-m)
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index c961b9a65d11..0a482f341576 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -1,33 +1,44 @@
# SPDX-License-Identifier: GPL-2.0
+
gcc-plugin-$(CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) += cyc_complexity_plugin.so
gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += latent_entropy_plugin.so
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += -DLATENT_ENTROPY_PLUGIN
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) \
+ += -DLATENT_ENTROPY_PLUGIN
ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
- DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable
+ DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable
endif
+export DISABLE_LATENT_ENTROPY_PLUGIN
gcc-plugin-$(CONFIG_GCC_PLUGIN_SANCOV) += sancov_plugin.so
+
gcc-plugin-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) += structleak_plugin.so
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE) += -fplugin-arg-structleak_plugin-verbose
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) += -fplugin-arg-structleak_plugin-byref-all
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) += -DSTRUCTLEAK_PLUGIN
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE) \
+ += -fplugin-arg-structleak_plugin-verbose
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \
+ += -fplugin-arg-structleak_plugin-byref-all
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
+ += -DSTRUCTLEAK_PLUGIN
gcc-plugin-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) += randomize_layout_plugin.so
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) += -DRANDSTRUCT_PLUGIN
-gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE) += -fplugin-arg-randomize_layout_plugin-performance-mode
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) \
+ += -DRANDSTRUCT_PLUGIN
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE) \
+ += -fplugin-arg-randomize_layout_plugin-performance-mode
+# All the plugin CFLAGS are collected here in case a build target needs to
+# filter them out of the KBUILD_CFLAGS.
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
-
-export GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR
-export DISABLE_LATENT_ENTROPY_PLUGIN
-
-# sancov_plugin.so can be only in CFLAGS_KCOV because avoid duplication.
+# The sancov_plugin.so is included via CFLAGS_KCOV, so it is removed here.
GCC_PLUGINS_CFLAGS := $(filter-out %/sancov_plugin.so, $(GCC_PLUGINS_CFLAGS))
+export GCC_PLUGINS_CFLAGS
+# Add the flags to the build!
KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+
+# All enabled GCC plugins are collected here for building below.
GCC_PLUGIN := $(gcc-plugin-y)
-GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y)
+export GCC_PLUGIN
# Actually do the build, if requested.
PHONY += gcc-plugins
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index aa971cc3f339..0393f75db4d4 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -62,9 +62,9 @@ host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
#####
# Handle options to gcc. Support building with separate output directory
-_hostc_flags = $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) \
+_hostc_flags = $(KBUILD_HOSTCFLAGS) $(HOST_EXTRACFLAGS) \
$(HOSTCFLAGS_$(basetarget).o)
-_hostcxx_flags = $(HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \
+_hostcxx_flags = $(KBUILD_HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \
$(HOSTCXXFLAGS_$(basetarget).o)
ifeq ($(KBUILD_SRC),)
@@ -84,17 +84,17 @@ hostcxx_flags = -Wp,-MD,$(depfile) $(__hostcxx_flags)
# Create executable from a single .c file
# host-csingle -> Executable
quiet_cmd_host-csingle = HOSTCC $@
- cmd_host-csingle = $(HOSTCC) $(hostc_flags) $(HOSTLDFLAGS) -o $@ $< \
- $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+ cmd_host-csingle = $(HOSTCC) $(hostc_flags) $(KBUILD_HOSTLDFLAGS) -o $@ $< \
+ $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F))
$(host-csingle): $(obj)/%: $(src)/%.c FORCE
$(call if_changed_dep,host-csingle)
# Link an executable based on list of .o files, all plain c
# host-cmulti -> executable
quiet_cmd_host-cmulti = HOSTLD $@
- cmd_host-cmulti = $(HOSTCC) $(HOSTLDFLAGS) -o $@ \
+ cmd_host-cmulti = $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -o $@ \
$(addprefix $(obj)/,$($(@F)-objs)) \
- $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+ $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F))
$(host-cmulti): FORCE
$(call if_changed,host-cmulti)
$(call multi_depend, $(host-cmulti), , -objs)
@@ -109,10 +109,10 @@ $(host-cobjs): $(obj)/%.o: $(src)/%.c FORCE
# Link an executable based on list of .o files, a mixture of .c and .cc
# host-cxxmulti -> executable
quiet_cmd_host-cxxmulti = HOSTLD $@
- cmd_host-cxxmulti = $(HOSTCXX) $(HOSTLDFLAGS) -o $@ \
+ cmd_host-cxxmulti = $(HOSTCXX) $(KBUILD_HOSTLDFLAGS) -o $@ \
$(foreach o,objs cxxobjs,\
$(addprefix $(obj)/,$($(@F)-$(o)))) \
- $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+ $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F))
$(host-cxxmulti): FORCE
$(call if_changed,host-cxxmulti)
$(call multi_depend, $(host-cxxmulti), , -objs -cxxobjs)
@@ -143,9 +143,9 @@ $(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
# Link a shared library, based on position independent .o files
# *.o -> .so shared library (host-cshlib)
quiet_cmd_host-cshlib = HOSTLLD -shared $@
- cmd_host-cshlib = $(HOSTCC) $(HOSTLDFLAGS) -shared -o $@ \
+ cmd_host-cshlib = $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -shared -o $@ \
$(addprefix $(obj)/,$($(@F:.so=-objs))) \
- $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+ $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F))
$(host-cshlib): FORCE
$(call if_changed,host-cshlib)
$(call multi_depend, $(host-cshlib), .so, -objs)
@@ -153,9 +153,9 @@ $(call multi_depend, $(host-cshlib), .so, -objs)
# Link a shared library, based on position independent .o files
# *.o -> .so shared library (host-cxxshlib)
quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
- cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
+ cmd_host-cxxshlib = $(HOSTCXX) $(KBUILD_HOSTLDFLAGS) -shared -o $@ \
$(addprefix $(obj)/,$($(@F:.so=-objs))) \
- $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+ $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(@F))
$(host-cxxshlib): FORCE
$(call if_changed,host-cxxshlib)
$(call multi_depend, $(host-cxxshlib), .so, -objs)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 1bb594fcfe12..df0fff252619 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -162,7 +162,7 @@ a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(__cpp_flags)
-ld_flags = $(LDFLAGS) $(ldflags-y)
+ld_flags = $(LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F))
DTC_INCLUDE := $(srctree)/scripts/dtc/include-prefixes
@@ -225,8 +225,7 @@ $(obj)/%: $(src)/%_shipped
# ---------------------------------------------------------------------------
quiet_cmd_ld = LD $@
-cmd_ld = $(LD) $(LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F)) \
- $(filter-out FORCE,$^) -o $@
+cmd_ld = $(LD) $(ld_flags) $(filter-out FORCE,$^) -o $@
# Objcopy
# ---------------------------------------------------------------------------
@@ -416,7 +415,7 @@ define filechk_offsets
echo " * This file was generated by Kbuild"; \
echo " */"; \
echo ""; \
- sed -ne $(sed-offsets); \
+ sed -ne $(sed-offsets) < $<; \
echo ""; \
echo "#endif" )
endef
diff --git a/scripts/Makefile.modbuiltin b/scripts/Makefile.modbuiltin
index 40867a41615b..a072a4267746 100644
--- a/scripts/Makefile.modbuiltin
+++ b/scripts/Makefile.modbuiltin
@@ -8,10 +8,10 @@ src := $(obj)
PHONY := __modbuiltin
__modbuiltin:
--include include/config/auto.conf
+include include/config/auto.conf
# tristate.conf sets tristate variables to uppercase 'Y' or 'M'
# That way, we get the list of built-in modules in obj-Y
--include include/config/tristate.conf
+include include/config/tristate.conf
include scripts/Kbuild.include
diff --git a/scripts/basic/.gitignore b/scripts/basic/.gitignore
index 9528ec9e5adc..a776371a3502 100644
--- a/scripts/basic/.gitignore
+++ b/scripts/basic/.gitignore
@@ -1,2 +1 @@
fixdep
-bin2c
diff --git a/scripts/basic/Makefile b/scripts/basic/Makefile
index 0372b33febe5..af49b446f17d 100644
--- a/scripts/basic/Makefile
+++ b/scripts/basic/Makefile
@@ -9,7 +9,6 @@
# fixdep: Used to generate dependency information during build process
hostprogs-y := fixdep
-hostprogs-$(CONFIG_BUILD_BIN2C) += bin2c
always := $(hostprogs-y)
# fixdep is needed to compile other host programs
diff --git a/scripts/basic/bin2c.c b/scripts/bin2c.c
index c3d7eef3ad06..c3d7eef3ad06 100644
--- a/scripts/basic/bin2c.c
+++ b/scripts/bin2c.c
diff --git a/scripts/coccicheck b/scripts/coccicheck
index 9fedca611b7f..e04d328210ac 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -128,9 +128,10 @@ run_cmd_parmap() {
fi
echo $@ >>$DEBUG_FILE
$@ 2>>$DEBUG_FILE
- if [[ $? -ne 0 ]]; then
+ err=$?
+ if [[ $err -ne 0 ]]; then
echo "coccicheck failed"
- exit $?
+ exit $err
fi
}
diff --git a/scripts/coccinelle/api/atomic_as_refcounter.cocci b/scripts/coccinelle/api/atomic_as_refcounter.cocci
new file mode 100644
index 000000000000..988120e0fd67
--- /dev/null
+++ b/scripts/coccinelle/api/atomic_as_refcounter.cocci
@@ -0,0 +1,129 @@
+// Check if refcount_t type and API should be used
+// instead of atomic_t type when dealing with refcounters
+//
+// Copyright (c) 2016-2017, Elena Reshetova, Intel Corporation
+//
+// Confidence: Moderate
+// URL: http://coccinelle.lip6.fr/
+// Options: --include-headers --very-quiet
+
+virtual report
+
+@r1 exists@
+identifier a, x;
+position p1, p2;
+identifier fname =~ ".*free.*";
+identifier fname2 =~ ".*destroy.*";
+identifier fname3 =~ ".*del.*";
+identifier fname4 =~ ".*queue_work.*";
+identifier fname5 =~ ".*schedule_work.*";
+identifier fname6 =~ ".*call_rcu.*";
+
+@@
+
+(
+ atomic_dec_and_test@p1(&(a)->x)
+|
+ atomic_dec_and_lock@p1(&(a)->x, ...)
+|
+ atomic_long_dec_and_lock@p1(&(a)->x, ...)
+|
+ atomic_long_dec_and_test@p1(&(a)->x)
+|
+ atomic64_dec_and_test@p1(&(a)->x)
+|
+ local_dec_and_test@p1(&(a)->x)
+)
+...
+(
+ fname@p2(a, ...);
+|
+ fname2@p2(...);
+|
+ fname3@p2(...);
+|
+ fname4@p2(...);
+|
+ fname5@p2(...);
+|
+ fname6@p2(...);
+)
+
+
+@script:python depends on report@
+p1 << r1.p1;
+p2 << r1.p2;
+@@
+msg = "atomic_dec_and_test variation before object free at line %s."
+coccilib.report.print_report(p1[0], msg % (p2[0].line))
+
+@r4 exists@
+identifier a, x, y;
+position p1, p2;
+identifier fname =~ ".*free.*";
+
+@@
+
+(
+ atomic_dec_and_test@p1(&(a)->x)
+|
+ atomic_dec_and_lock@p1(&(a)->x, ...)
+|
+ atomic_long_dec_and_lock@p1(&(a)->x, ...)
+|
+ atomic_long_dec_and_test@p1(&(a)->x)
+|
+ atomic64_dec_and_test@p1(&(a)->x)
+|
+ local_dec_and_test@p1(&(a)->x)
+)
+...
+y=a
+...
+fname@p2(y, ...);
+
+
+@script:python depends on report@
+p1 << r4.p1;
+p2 << r4.p2;
+@@
+msg = "atomic_dec_and_test variation before object free at line %s."
+coccilib.report.print_report(p1[0], msg % (p2[0].line))
+
+@r2 exists@
+identifier a, x;
+position p1;
+@@
+
+(
+atomic_add_unless(&(a)->x,-1,1)@p1
+|
+atomic_long_add_unless(&(a)->x,-1,1)@p1
+|
+atomic64_add_unless(&(a)->x,-1,1)@p1
+)
+
+@script:python depends on report@
+p1 << r2.p1;
+@@
+msg = "atomic_add_unless"
+coccilib.report.print_report(p1[0], msg)
+
+@r3 exists@
+identifier x;
+position p1;
+@@
+
+(
+x = atomic_add_return@p1(-1, ...);
+|
+x = atomic_long_add_return@p1(-1, ...);
+|
+x = atomic64_add_return@p1(-1, ...);
+)
+
+@script:python depends on report@
+p1 << r3.p1;
+@@
+msg = "x = atomic_add_return(-1, ...)"
+coccilib.report.print_report(p1[0], msg)
diff --git a/scripts/coccinelle/tests/doubletest.cocci b/scripts/coccinelle/tests/doubletest.cocci
index 78d74c22ca12..7af2ce7eb9bf 100644
--- a/scripts/coccinelle/tests/doubletest.cocci
+++ b/scripts/coccinelle/tests/doubletest.cocci
@@ -1,6 +1,7 @@
/// Find &&/|| operations that include the same argument more than once
-//# A common source of false positives is when the argument performs a side
-//# effect.
+//# A common source of false positives is when the expression, or
+//# another expresssion in the same && or || operation, performs a
+//# side effect.
///
// Confidence: Moderate
// Copyright: (C) 2010 Nicolas Palix, DIKU. GPLv2.
@@ -20,20 +21,37 @@ position p;
@@
(
-* E@p
- || ... || E
+ E@p || ... || E
|
-* E@p
- && ... && E
+ E@p && ... && E
)
-@script:python depends on org@
+@bad@
+expression r.E,e1,e2,fn;
+position r.p;
+assignment operator op;
+@@
+
+(
+E@p
+&
+ <+... \(fn(...)\|e1 op e2\|e1++\|e1--\|++e1\|--e1\) ...+>
+)
+
+@depends on context && !bad@
+expression r.E;
+position r.p;
+@@
+
+*E@p
+
+@script:python depends on org && !bad@
p << r.p;
@@
cocci.print_main("duplicated argument to && or ||",p)
-@script:python depends on report@
+@script:python depends on report && !bad@
p << r.p;
@@
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 1a6f85e0e6e1..999d585eaa73 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -10,10 +10,16 @@ fi
DEPMOD=$1
KERNELRELEASE=$2
-if ! test -r System.map -a -x "$DEPMOD"; then
+if ! test -r System.map ; then
exit 0
fi
+if [ -z $(command -v $DEPMOD) ]; then
+ echo "'make modules_install' requires $DEPMOD. Please install it." >&2
+ echo "This is probably in the kmod package." >&2
+ exit 1
+fi
+
# older versions of depmod require the version string to start with three
# numbers, so we cheat with a symlink here
depmod_hack_needed=true
diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check
index 078999a3fdff..ad9db6821824 100755
--- a/scripts/documentation-file-ref-check
+++ b/scripts/documentation-file-ref-check
@@ -75,6 +75,12 @@ while (<IN>) {
# Remove URL false-positives
next if ($fulref =~ m/^http/);
+ # Remove sched-pelt false-positive
+ next if ($fulref =~ m,^Documentation/scheduler/sched-pelt$,);
+
+ # Discard some build examples from Documentation/target/tcm_mod_builder.txt
+ next if ($fulref =~ m,mnt/sdb/lio-core-2.6.git/Documentation/target,);
+
# Check if exists, evaluating wildcards
next if (grep -e, glob("$ref $fulref"));
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
new file mode 100644
index 000000000000..7430a7c77a4a
--- /dev/null
+++ b/scripts/gcc-plugins/Kconfig
@@ -0,0 +1,142 @@
+preferred-plugin-hostcc := $(if-success,[ $(gcc-version) -ge 40800 ],$(HOSTCXX),$(HOSTCC))
+
+config PLUGIN_HOSTCC
+ string
+ default "$(shell,$(srctree)/scripts/gcc-plugin.sh "$(preferred-plugin-hostcc)" "$(HOSTCXX)" "$(CC)")"
+ help
+ Host compiler used to build GCC plugins. This can be $(HOSTCXX),
+ $(HOSTCC), or a null string if GCC plugin is unsupported.
+
+config HAVE_GCC_PLUGINS
+ bool
+ help
+ An arch should select this symbol if it supports building with
+ GCC plugins.
+
+menuconfig GCC_PLUGINS
+ bool "GCC plugins"
+ depends on HAVE_GCC_PLUGINS
+ depends on PLUGIN_HOSTCC != ""
+ help
+ GCC plugins are loadable modules that provide extra features to the
+ compiler. They are useful for runtime instrumentation and static analysis.
+
+ See Documentation/gcc-plugins.txt for details.
+
+if GCC_PLUGINS
+
+config GCC_PLUGIN_CYC_COMPLEXITY
+ bool "Compute the cyclomatic complexity of a function" if EXPERT
+ depends on !COMPILE_TEST # too noisy
+ help
+ The complexity M of a function's control flow graph is defined as:
+ M = E - N + 2P
+ where
+
+ E = the number of edges
+ N = the number of nodes
+ P = the number of connected components (exit nodes).
+
+ Enabling this plugin reports the complexity to stderr during the
+ build. It mainly serves as a simple example of how to create a
+ gcc plugin for the kernel.
+
+config GCC_PLUGIN_SANCOV
+ bool
+ help
+ This plugin inserts a __sanitizer_cov_trace_pc() call at the start of
+ basic blocks. It supports all gcc versions with plugin support (from
+ gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
+ by Dmitry Vyukov <dvyukov@google.com>.
+
+config GCC_PLUGIN_LATENT_ENTROPY
+ bool "Generate some entropy during boot and runtime"
+ help
+ By saying Y here the kernel will instrument some kernel code to
+ extract some entropy from both original and artificially created
+ program state. This will help especially embedded systems where
+ there is little 'natural' source of entropy normally. The cost
+ is some slowdown of the boot process (about 0.5%) and fork and
+ irq processing.
+
+ Note that entropy extracted this way is not cryptographically
+ secure!
+
+ This plugin was ported from grsecurity/PaX. More information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
+config GCC_PLUGIN_STRUCTLEAK
+ bool "Force initialization of variables containing userspace addresses"
+ # Currently STRUCTLEAK inserts initialization out of live scope of
+ # variables from KASAN point of view. This leads to KASAN false
+ # positive reports. Prohibit this combination for now.
+ depends on !KASAN_EXTRA
+ help
+ This plugin zero-initializes any structures containing a
+ __user attribute. This can prevent some classes of information
+ exposures.
+
+ This plugin was ported from grsecurity/PaX. More information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
+config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
+ bool "Force initialize all struct type variables passed by reference"
+ depends on GCC_PLUGIN_STRUCTLEAK
+ depends on !COMPILE_TEST
+ help
+ Zero initialize any struct type local variable that may be passed by
+ reference without having been initialized.
+
+config GCC_PLUGIN_STRUCTLEAK_VERBOSE
+ bool "Report forcefully initialized variables"
+ depends on GCC_PLUGIN_STRUCTLEAK
+ depends on !COMPILE_TEST # too noisy
+ help
+ This option will cause a warning to be printed each time the
+ structleak plugin finds a variable it thinks needs to be
+ initialized. Since not all existing initializers are detected
+ by the plugin, this can produce false positive warnings.
+
+config GCC_PLUGIN_RANDSTRUCT
+ bool "Randomize layout of sensitive kernel structures"
+ select MODVERSIONS if MODULES
+ help
+ If you say Y here, the layouts of structures that are entirely
+ function pointers (and have not been manually annotated with
+ __no_randomize_layout), or structures that have been explicitly
+ marked with __randomize_layout, will be randomized at compile-time.
+ This can introduce the requirement of an additional information
+ exposure vulnerability for exploits targeting these structure
+ types.
+
+ Enabling this feature will introduce some performance impact,
+ slightly increase memory usage, and prevent the use of forensic
+ tools like Volatility against the system (unless the kernel
+ source tree isn't cleaned after kernel installation).
+
+ The seed used for compilation is located at
+ scripts/gcc-plgins/randomize_layout_seed.h. It remains after
+ a make clean to allow for external modules to be compiled with
+ the existing seed and will be removed by a make mrproper or
+ make distclean.
+
+ Note that the implementation requires gcc 4.7 or newer.
+
+ This plugin was ported from grsecurity/PaX. More information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
+config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE
+ bool "Use cacheline-aware structure randomization"
+ depends on GCC_PLUGIN_RANDSTRUCT
+ depends on !COMPILE_TEST # do not reduce test coverage
+ help
+ If you say Y here, the RANDSTRUCT randomization will make a
+ best effort at restricting randomization to cacheline-sized
+ groups of elements. It will further not randomize bitfields
+ in structures. This reduces the performance hit of RANDSTRUCT
+ at the cost of weakened randomization.
+
+endif
diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
index 326254653bd0..aa0d0ec6936d 100644
--- a/scripts/gcc-plugins/Makefile
+++ b/scripts/gcc-plugins/Makefile
@@ -14,8 +14,6 @@ else
export HOST_EXTRACXXFLAGS
endif
-export HOSTLIBS
-
$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
quiet_cmd_create_randomize_layout_seed = GENSEED $@
cmd_create_randomize_layout_seed = \
@@ -29,7 +27,4 @@ always := $($(HOSTLIBS)-y)
$(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o))
-subdir-y := $(GCC_PLUGIN_SUBDIR)
-subdir- += $(GCC_PLUGIN_SUBDIR)
-
clean-files += *.so
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index f46750053377..552d5efd7cb7 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -392,13 +392,6 @@ static inline struct cgraph_node *cgraph_alias_target(struct cgraph_node *n)
}
#endif
-#if BUILDING_GCC_VERSION >= 4007 && BUILDING_GCC_VERSION <= 4009
-#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
- cgraph_create_edge((caller), (callee), (call_stmt), (count), (freq))
-#define cgraph_create_edge_including_clones(caller, callee, old_call_stmt, call_stmt, count, freq, nest, reason) \
- cgraph_create_edge_including_clones((caller), (callee), (old_call_stmt), (call_stmt), (count), (freq), (reason))
-#endif
-
#if BUILDING_GCC_VERSION <= 4008
#define ENTRY_BLOCK_PTR_FOR_FN(FN) ENTRY_BLOCK_PTR_FOR_FUNCTION(FN)
#define EXIT_BLOCK_PTR_FOR_FN(FN) EXIT_BLOCK_PTR_FOR_FUNCTION(FN)
@@ -723,10 +716,23 @@ static inline const char *get_decl_section_name(const_tree decl)
#define varpool_get_node(decl) varpool_node::get(decl)
#define dump_varpool_node(file, node) (node)->dump(file)
-#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
+#if BUILDING_GCC_VERSION >= 8000
+#define cgraph_create_edge(caller, callee, call_stmt, count, freq) \
+ (caller)->create_edge((callee), (call_stmt), (count))
+
+#define cgraph_create_edge_including_clones(caller, callee, \
+ old_call_stmt, call_stmt, count, freq, reason) \
+ (caller)->create_edge_including_clones((callee), \
+ (old_call_stmt), (call_stmt), (count), (reason))
+#else
+#define cgraph_create_edge(caller, callee, call_stmt, count, freq) \
(caller)->create_edge((callee), (call_stmt), (count), (freq))
-#define cgraph_create_edge_including_clones(caller, callee, old_call_stmt, call_stmt, count, freq, nest, reason) \
- (caller)->create_edge_including_clones((callee), (old_call_stmt), (call_stmt), (count), (freq), (reason))
+
+#define cgraph_create_edge_including_clones(caller, callee, \
+ old_call_stmt, call_stmt, count, freq, reason) \
+ (caller)->create_edge_including_clones((callee), \
+ (old_call_stmt), (call_stmt), (count), (freq), (reason))
+#endif
typedef struct cgraph_node *cgraph_node_ptr;
typedef struct cgraph_edge *cgraph_edge_p;
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index a3ac2c91331c..8d8791069abf 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -3,8 +3,7 @@
# Kernel configuration targets
# These targets are used from top-level makefile
-PHONY += xconfig gconfig menuconfig config syncconfig \
- localmodconfig localyesconfig
+PHONY += xconfig gconfig menuconfig config localmodconfig localyesconfig
ifdef KBUILD_KCONFIG
Kconfig := $(KBUILD_KCONFIG)
@@ -34,14 +33,7 @@ config: $(obj)/conf
nconfig: $(obj)/nconf
$< $(silent) $(Kconfig)
-# This has become an internal implementation detail and is now deprecated
-# for external use.
-syncconfig: $(obj)/conf
- $(Q)mkdir -p include/config include/generated
- $< $(silent) --$@ $(Kconfig)
-
localyesconfig localmodconfig: $(obj)/conf
- $(Q)mkdir -p include/config include/generated
$(Q)perl $(srctree)/$(src)/streamline_config.pl --$@ $(srctree) $(Kconfig) > .tmp.config
$(Q)if [ -f .config ]; then \
cmp -s .tmp.config .config || \
@@ -56,8 +48,12 @@ localyesconfig localmodconfig: $(obj)/conf
$(Q)rm -f .tmp.config
# These targets map 1:1 to the commandline options of 'conf'
+#
+# Note:
+# syncconfig has become an internal implementation detail and is now
+# deprecated for external use
simple-targets := oldconfig allnoconfig allyesconfig allmodconfig \
- alldefconfig randconfig listnewconfig olddefconfig
+ alldefconfig randconfig listnewconfig olddefconfig syncconfig
PHONY += $(simple-targets)
$(simple-targets): $(obj)/conf
@@ -169,7 +165,7 @@ HOSTCFLAGS_zconf.tab.o := -I$(src)
hostprogs-y += nconf
nconf-objs := nconf.o zconf.tab.o nconf.gui.o
-HOSTLOADLIBES_nconf = $(shell . $(obj)/.nconf-cfg && echo $$libs)
+HOSTLDLIBS_nconf = $(shell . $(obj)/.nconf-cfg && echo $$libs)
HOSTCFLAGS_nconf.o = $(shell . $(obj)/.nconf-cfg && echo $$cflags)
HOSTCFLAGS_nconf.gui.o = $(shell . $(obj)/.nconf-cfg && echo $$cflags)
@@ -180,7 +176,7 @@ hostprogs-y += mconf
lxdialog := checklist.o inputbox.o menubox.o textbox.o util.o yesno.o
mconf-objs := mconf.o zconf.tab.o $(addprefix lxdialog/, $(lxdialog))
-HOSTLOADLIBES_mconf = $(shell . $(obj)/.mconf-cfg && echo $$libs)
+HOSTLDLIBS_mconf = $(shell . $(obj)/.mconf-cfg && echo $$libs)
$(foreach f, mconf.o $(lxdialog), \
$(eval HOSTCFLAGS_$f = $$(shell . $(obj)/.mconf-cfg && echo $$$$cflags)))
@@ -191,7 +187,7 @@ hostprogs-y += qconf
qconf-cxxobjs := qconf.o
qconf-objs := zconf.tab.o
-HOSTLOADLIBES_qconf = $(shell . $(obj)/.qconf-cfg && echo $$libs)
+HOSTLDLIBS_qconf = $(shell . $(obj)/.qconf-cfg && echo $$libs)
HOSTCXXFLAGS_qconf.o = $(shell . $(obj)/.qconf-cfg && echo $$cflags)
$(obj)/qconf.o: $(obj)/.qconf-cfg $(obj)/qconf.moc
@@ -206,7 +202,7 @@ $(obj)/%.moc: $(src)/%.h $(obj)/.qconf-cfg
hostprogs-y += gconf
gconf-objs := gconf.o zconf.tab.o
-HOSTLOADLIBES_gconf = $(shell . $(obj)/.gconf-cfg && echo $$libs)
+HOSTLDLIBS_gconf = $(shell . $(obj)/.gconf-cfg && echo $$libs)
HOSTCFLAGS_gconf.o = $(shell . $(obj)/.gconf-cfg && echo $$cflags)
$(obj)/gconf.o: $(obj)/.gconf-cfg
@@ -215,6 +211,7 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c
# check if necessary packages are available, and configure build flags
define filechk_conf_cfg
+ $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \
$(CONFIG_SHELL) $<
endef
diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh
new file mode 100644
index 000000000000..7a1c40bfb58c
--- /dev/null
+++ b/scripts/kconfig/check-pkgconfig.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# Check for pkg-config presence
+
+if [ -z $(command -v pkg-config) ]; then
+ echo "'make *config' requires 'pkg-config'. Please install it." 1>&2
+ exit 1
+fi
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 671ff5364497..b35cc9303979 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -496,6 +496,7 @@ int main(int ac, char **av)
int opt;
const char *name, *defconfig_file = NULL /* gcc uninit */;
struct stat tmpstat;
+ int no_conf_write = 0;
tty_stdio = isatty(0) && isatty(1);
@@ -633,13 +634,14 @@ int main(int ac, char **av)
}
if (sync_kconfig) {
- if (conf_get_changed()) {
- name = getenv("KCONFIG_NOSILENTUPDATE");
- if (name && *name) {
+ name = getenv("KCONFIG_NOSILENTUPDATE");
+ if (name && *name) {
+ if (conf_get_changed()) {
fprintf(stderr,
"\n*** The configuration requires explicit update.\n\n");
return 1;
}
+ no_conf_write = 1;
}
}
@@ -684,29 +686,32 @@ int main(int ac, char **av)
break;
}
- if (sync_kconfig) {
- /* syncconfig is used during the build so we shall update autoconf.
- * All other commands are only used to generate a config.
- */
- if (conf_get_changed() && conf_write(NULL)) {
- fprintf(stderr, "\n*** Error during writing of the configuration.\n\n");
- exit(1);
- }
- if (conf_write_autoconf()) {
- fprintf(stderr, "\n*** Error during update of the configuration.\n\n");
- return 1;
- }
- } else if (input_mode == savedefconfig) {
+ if (input_mode == savedefconfig) {
if (conf_write_defconfig(defconfig_file)) {
fprintf(stderr, "n*** Error while saving defconfig to: %s\n\n",
defconfig_file);
return 1;
}
} else if (input_mode != listnewconfig) {
- if (conf_write(NULL)) {
+ if (!no_conf_write && conf_write(NULL)) {
fprintf(stderr, "\n*** Error during writing of the configuration.\n\n");
exit(1);
}
+
+ /*
+ * Create auto.conf if it does not exist.
+ * This prevents GNU Make 4.1 or older from emitting
+ * "include/config/auto.conf: No such file or directory"
+ * in the top-level Makefile
+ *
+ * syncconfig always creates or updates auto.conf because it is
+ * used during the build.
+ */
+ if (conf_write_autoconf(sync_kconfig) && sync_kconfig) {
+ fprintf(stderr,
+ "\n*** Error during sync of the configuration.\n\n");
+ return 1;
+ }
}
return 0;
}
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 39e20974f4a3..91d0a5c014ac 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -16,6 +16,64 @@
#include "lkc.h"
+/* return true if 'path' exists, false otherwise */
+static bool is_present(const char *path)
+{
+ struct stat st;
+
+ return !stat(path, &st);
+}
+
+/* return true if 'path' exists and it is a directory, false otherwise */
+static bool is_dir(const char *path)
+{
+ struct stat st;
+
+ if (stat(path, &st))
+ return 0;
+
+ return S_ISDIR(st.st_mode);
+}
+
+/*
+ * Create the parent directory of the given path.
+ *
+ * For example, if 'include/config/auto.conf' is given, create 'include/config'.
+ */
+static int make_parent_dir(const char *path)
+{
+ char tmp[PATH_MAX + 1];
+ char *p;
+
+ strncpy(tmp, path, sizeof(tmp));
+ tmp[sizeof(tmp) - 1] = 0;
+
+ /* Remove the base name. Just return if nothing is left */
+ p = strrchr(tmp, '/');
+ if (!p)
+ return 0;
+ *(p + 1) = 0;
+
+ /* Just in case it is an absolute path */
+ p = tmp;
+ while (*p == '/')
+ p++;
+
+ while ((p = strchr(p, '/'))) {
+ *p = 0;
+
+ /* skip if the directory exists */
+ if (!is_dir(tmp) && mkdir(tmp, 0755))
+ return -1;
+
+ *p = '/';
+ while (*p == '/')
+ p++;
+ }
+
+ return 0;
+}
+
struct conf_printer {
void (*print_symbol)(FILE *, struct symbol *, const char *, void *);
void (*print_comment)(FILE *, const char *, void *);
@@ -43,16 +101,16 @@ static void conf_warning(const char *fmt, ...)
conf_warnings++;
}
-static void conf_default_message_callback(const char *fmt, va_list ap)
+static void conf_default_message_callback(const char *s)
{
printf("#\n# ");
- vprintf(fmt, ap);
+ printf("%s", s);
printf("\n#\n");
}
-static void (*conf_message_callback) (const char *fmt, va_list ap) =
+static void (*conf_message_callback)(const char *s) =
conf_default_message_callback;
-void conf_set_message_callback(void (*fn) (const char *fmt, va_list ap))
+void conf_set_message_callback(void (*fn)(const char *s))
{
conf_message_callback = fn;
}
@@ -60,10 +118,15 @@ void conf_set_message_callback(void (*fn) (const char *fmt, va_list ap))
static void conf_message(const char *fmt, ...)
{
va_list ap;
+ char buf[4096];
+
+ if (!conf_message_callback)
+ return;
va_start(ap, fmt);
- if (conf_message_callback)
- conf_message_callback(fmt, ap);
+
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ conf_message_callback(buf);
va_end(ap);
}
@@ -83,7 +146,6 @@ const char *conf_get_autoconfig_name(void)
char *conf_get_default_confname(void)
{
- struct stat buf;
static char fullname[PATH_MAX+1];
char *env, *name;
@@ -91,7 +153,7 @@ char *conf_get_default_confname(void)
env = getenv(SRCTREE);
if (env) {
sprintf(fullname, "%s/%s", env, name);
- if (!stat(fullname, &buf))
+ if (is_present(fullname))
return fullname;
}
return name;
@@ -397,7 +459,7 @@ int conf_read(const char *name)
for_all_symbols(i, sym) {
sym_calc_value(sym);
- if (sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO))
+ if (sym_is_choice(sym) || (sym->flags & SYMBOL_NO_WRITE))
continue;
if (sym_has_value(sym) && (sym->flags & SYMBOL_WRITE)) {
/* check that calculated value agrees with saved value */
@@ -725,10 +787,9 @@ int conf_write(const char *name)
dirname[0] = 0;
if (name && name[0]) {
- struct stat st;
char *slash;
- if (!stat(name, &st) && S_ISDIR(st.st_mode)) {
+ if (is_dir(name)) {
strcpy(dirname, name);
strcat(dirname, "/");
basename = conf_get_configname();
@@ -813,26 +874,59 @@ next:
return 0;
}
+/* write a dependency file as used by kbuild to track dependencies */
+static int conf_write_dep(const char *name)
+{
+ struct file *file;
+ FILE *out;
+
+ if (!name)
+ name = ".kconfig.d";
+ out = fopen("..config.tmp", "w");
+ if (!out)
+ return 1;
+ fprintf(out, "deps_config := \\\n");
+ for (file = file_list; file; file = file->next) {
+ if (file->next)
+ fprintf(out, "\t%s \\\n", file->name);
+ else
+ fprintf(out, "\t%s\n", file->name);
+ }
+ fprintf(out, "\n%s: \\\n"
+ "\t$(deps_config)\n\n", conf_get_autoconfig_name());
+
+ env_write_dep(out, conf_get_autoconfig_name());
+
+ fprintf(out, "\n$(deps_config): ;\n");
+ fclose(out);
+
+ if (make_parent_dir(name))
+ return 1;
+ rename("..config.tmp", name);
+ return 0;
+}
+
static int conf_split_config(void)
{
const char *name;
char path[PATH_MAX+1];
char *s, *d, c;
struct symbol *sym;
- struct stat sb;
int res, i, fd;
name = conf_get_autoconfig_name();
conf_read_simple(name, S_DEF_AUTO);
sym_calc_value(modules_sym);
+ if (make_parent_dir("include/config/foo.h"))
+ return 1;
if (chdir("include/config"))
return 1;
res = 0;
for_all_symbols(i, sym) {
sym_calc_value(sym);
- if ((sym->flags & SYMBOL_AUTO) || !sym->name)
+ if ((sym->flags & SYMBOL_NO_WRITE) || !sym->name)
continue;
if (sym->flags & SYMBOL_WRITE) {
if (sym->flags & SYMBOL_DEF_AUTO) {
@@ -897,19 +991,12 @@ static int conf_split_config(void)
res = 1;
break;
}
- /*
- * Create directory components,
- * unless they exist already.
- */
- d = path;
- while ((d = strchr(d, '/'))) {
- *d = 0;
- if (stat(path, &sb) && mkdir(path, 0755)) {
- res = 1;
- goto out;
- }
- *d++ = '/';
+
+ if (make_parent_dir(path)) {
+ res = 1;
+ goto out;
}
+
/* Try it again. */
fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (fd == -1) {
@@ -926,16 +1013,20 @@ out:
return res;
}
-int conf_write_autoconf(void)
+int conf_write_autoconf(int overwrite)
{
struct symbol *sym;
const char *name;
+ const char *autoconf_name = conf_get_autoconfig_name();
FILE *out, *tristate, *out_h;
int i;
+ if (!overwrite && is_present(autoconf_name))
+ return 0;
+
sym_clear_all_valid();
- file_write_dep("include/config/auto.conf.cmd");
+ conf_write_dep("include/config/auto.conf.cmd");
if (conf_split_config())
return 1;
@@ -982,19 +1073,26 @@ int conf_write_autoconf(void)
name = getenv("KCONFIG_AUTOHEADER");
if (!name)
name = "include/generated/autoconf.h";
+ if (make_parent_dir(name))
+ return 1;
if (rename(".tmpconfig.h", name))
return 1;
+
name = getenv("KCONFIG_TRISTATE");
if (!name)
name = "include/config/tristate.conf";
+ if (make_parent_dir(name))
+ return 1;
if (rename(".tmpconfig_tristate", name))
return 1;
- name = conf_get_autoconfig_name();
+
+ if (make_parent_dir(autoconf_name))
+ return 1;
/*
* This must be the last step, kbuild has a dependency on auto.conf
* and this marks the successful completion of the previous steps.
*/
- if (rename(".tmpconfig", name))
+ if (rename(".tmpconfig", autoconf_name))
return 1;
return 0;
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index f63b41b0dd49..7c329e179007 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -141,7 +141,7 @@ struct symbol {
#define SYMBOL_OPTIONAL 0x0100 /* choice is optional - values can be 'n' */
#define SYMBOL_WRITE 0x0200 /* write symbol to file (KCONFIG_CONFIG) */
#define SYMBOL_CHANGED 0x0400 /* ? */
-#define SYMBOL_AUTO 0x1000 /* value from environment variable */
+#define SYMBOL_NO_WRITE 0x1000 /* Symbol for internal use only; it will not be written */
#define SYMBOL_CHECKED 0x2000 /* used during dependency checking */
#define SYMBOL_WARNED 0x8000 /* warning has been issued */
@@ -185,7 +185,6 @@ enum prop_type {
P_SELECT, /* select BAR */
P_IMPLY, /* imply BAR */
P_RANGE, /* range 7..100 (for a symbol) */
- P_ENV, /* value from environment variable */
P_SYMBOL, /* where a symbol is defined */
};
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 610c4ab54d76..36f578415c4a 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -101,8 +101,8 @@ const char *dbg_sym_flags(int val)
strcat(buf, "write/");
if (val & SYMBOL_CHANGED)
strcat(buf, "changed/");
- if (val & SYMBOL_AUTO)
- strcat(buf, "auto/");
+ if (val & SYMBOL_NO_WRITE)
+ strcat(buf, "no_write/");
buf[strlen(buf) - 1] = '\0';
@@ -525,6 +525,7 @@ void on_save_activate(GtkMenuItem * menuitem, gpointer user_data)
{
if (conf_write(NULL))
text_insert_msg("Error", "Unable to save configuration !");
+ conf_write_autoconf(0);
}
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index ed3ff88e60ba..9eb7c837cd8f 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -97,7 +97,6 @@ void menu_set_type(int type);
/* util.c */
struct file *file_lookup(const char *name);
-int file_write_dep(const char *name);
void *xmalloc(size_t size);
void *xcalloc(size_t nmemb, size_t size);
void *xrealloc(void *p, size_t size);
@@ -126,7 +125,6 @@ const char *sym_get_string_default(struct symbol *sym);
struct symbol *sym_check_deps(struct symbol *sym);
struct property *prop_alloc(enum prop_type type, struct symbol *sym);
struct symbol *prop_get_symbol(struct property *prop);
-struct property *sym_get_env_prop(struct symbol *sym);
static inline tristate sym_get_tristate_value(struct symbol *sym)
{
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index a8b7a330587e..86c267540ccc 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -7,10 +7,10 @@ int conf_read(const char *name);
int conf_read_simple(const char *name, int);
int conf_write_defconfig(const char *name);
int conf_write(const char *name);
-int conf_write_autoconf(void);
+int conf_write_autoconf(int overwrite);
bool conf_get_changed(void);
void conf_set_changed_callback(void (*fn)(void));
-void conf_set_message_callback(void (*fn)(const char *fmt, va_list ap));
+void conf_set_message_callback(void (*fn)(const char *s));
/* menu.c */
extern struct menu rootmenu;
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 5294ed159b98..83b5836615fb 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -772,16 +772,13 @@ static void show_helptext(const char *title, const char *text)
show_textbox(title, text, 0, 0);
}
-static void conf_message_callback(const char *fmt, va_list ap)
+static void conf_message_callback(const char *s)
{
- char buf[PATH_MAX+1];
-
- vsnprintf(buf, sizeof(buf), fmt, ap);
if (save_and_exit) {
if (!silent)
- printf("%s", buf);
+ printf("%s", s);
} else {
- show_textbox(NULL, buf, 6, 60);
+ show_textbox(NULL, s, 6, 60);
}
}
@@ -977,6 +974,7 @@ static int handle_exit(void)
"\n\n");
return 1;
}
+ conf_write_autoconf(0);
/* fall through */
case -1:
if (!silent)
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 379a119dcd1e..4cf15d449c05 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -212,7 +212,7 @@ void menu_add_option(int token, char *arg)
sym_defconfig_list = current_entry->sym;
else if (sym_defconfig_list != current_entry->sym)
zconf_error("trying to redefine defconfig symbol");
- sym_defconfig_list->flags |= SYMBOL_AUTO;
+ sym_defconfig_list->flags |= SYMBOL_NO_WRITE;
break;
case T_OPT_ALLNOCONFIG_Y:
current_entry->sym->flags |= SYMBOL_ALLNOCONFIG_Y;
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 97b78445584b..1ef232ae5ab9 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -674,6 +674,7 @@ static int do_exit(void)
"Your configuration changes were NOT saved.",
1,
"<OK>");
+ conf_write_autoconf(0);
break;
default:
btn_dialog(
@@ -1210,12 +1211,9 @@ static void conf(struct menu *menu)
}
}
-static void conf_message_callback(const char *fmt, va_list ap)
+static void conf_message_callback(const char *s)
{
- char buf[1024];
-
- vsnprintf(buf, sizeof(buf), fmt, ap);
- btn_dialog(main_window, buf, 1, "<OK>");
+ btn_dialog(main_window, s, 1, "<OK>");
}
static void show_help(struct menu *menu)
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index ad9c22dd04f5..ef4310f2558b 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -1149,7 +1149,6 @@ QString ConfigInfoView::debug_info(struct symbol *sym)
case P_DEFAULT:
case P_SELECT:
case P_RANGE:
- case P_ENV:
debug += prop_get_type_name(prop->type);
debug += ": ";
expr_print(prop->expr, expr_print_help, &debug, E_NONE);
@@ -1535,6 +1534,8 @@ bool ConfigMainWindow::saveConfig(void)
QMessageBox::information(this, "qconf", "Unable to save configuration!");
return false;
}
+ conf_write_autoconf(0);
+
return true;
}
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 7c9a88e91cfa..4ec8b1f0d42c 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -76,15 +76,6 @@ struct property *sym_get_choice_prop(struct symbol *sym)
return NULL;
}
-struct property *sym_get_env_prop(struct symbol *sym)
-{
- struct property *prop;
-
- for_all_properties(sym, prop, P_ENV)
- return prop;
- return NULL;
-}
-
static struct property *sym_get_default_prop(struct symbol *sym)
{
struct property *prop;
@@ -463,7 +454,7 @@ void sym_calc_value(struct symbol *sym)
}
}
- if (sym->flags & SYMBOL_AUTO)
+ if (sym->flags & SYMBOL_NO_WRITE)
sym->flags &= ~SYMBOL_WRITE;
if (sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES)
@@ -1298,8 +1289,6 @@ const char *prop_get_type_name(enum prop_type type)
switch (type) {
case P_PROMPT:
return "prompt";
- case P_ENV:
- return "env";
case P_COMMENT:
return "comment";
case P_MENU:
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index a365594770d9..d999683bb2a7 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -29,36 +29,6 @@ struct file *file_lookup(const char *name)
return file;
}
-/* write a dependency file as used by kbuild to track dependencies */
-int file_write_dep(const char *name)
-{
- struct file *file;
- FILE *out;
-
- if (!name)
- name = ".kconfig.d";
- out = fopen("..config.tmp", "w");
- if (!out)
- return 1;
- fprintf(out, "deps_config := \\\n");
- for (file = file_list; file; file = file->next) {
- if (file->next)
- fprintf(out, "\t%s \\\n", file->name);
- else
- fprintf(out, "\t%s\n", file->name);
- }
- fprintf(out, "\n%s: \\\n"
- "\t$(deps_config)\n\n", conf_get_autoconfig_name());
-
- env_write_dep(out, conf_get_autoconfig_name());
-
- fprintf(out, "\n$(deps_config): ;\n");
- fclose(out);
- rename("..config.tmp", name);
- return 0;
-}
-
-
/* Allocate initial growable string */
struct gstr str_new(void)
{
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 4b68272ebdb9..22fceb264cf5 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE];
static struct menu *current_menu, *current_entry;
%}
-%expect 31
+%expect 30
%union
{
@@ -117,7 +117,7 @@ start: mainmenu_stmt stmt_list | stmt_list;
/* mainmenu entry */
-mainmenu_stmt: T_MAINMENU prompt nl
+mainmenu_stmt: T_MAINMENU prompt T_EOL
{
menu_add_prompt(P_MENU, $2, NULL);
};
@@ -265,7 +265,7 @@ symbol_option_arg:
choice: T_CHOICE word_opt T_EOL
{
struct symbol *sym = sym_lookup($2, SYMBOL_CHOICE);
- sym->flags |= SYMBOL_AUTO;
+ sym->flags |= SYMBOL_NO_WRITE;
menu_add_entry(sym);
menu_add_expr(P_CHOICE, NULL, NULL);
free($2);
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 0057d8eafcc1..8f0f508a78e9 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1062,7 +1062,7 @@ sub dump_struct($$) {
my $x = shift;
my $file = shift;
- if ($x =~ /(struct|union)\s+(\w+)\s*{(.*)}/) {
+ if ($x =~ /(struct|union)\s+(\w+)\s*\{(.*)\}/) {
my $decl_type = $1;
$declaration_name = $2;
my $members = $3;
@@ -1148,20 +1148,20 @@ sub dump_struct($$) {
}
}
}
- $members =~ s/(struct|union)([^\{\};]+)\{([^\{\}]*)}([^\{\}\;]*)\;/$newmember/;
+ $members =~ s/(struct|union)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;/$newmember/;
}
# Ignore other nested elements, like enums
- $members =~ s/({[^\{\}]*})//g;
+ $members =~ s/(\{[^\{\}]*\})//g;
create_parameterlist($members, ';', $file, $declaration_name);
check_sections($file, $declaration_name, $decl_type, $sectcheck, $struct_actual);
# Adjust declaration for better display
- $declaration =~ s/([{;])/$1\n/g;
- $declaration =~ s/}\s+;/};/g;
+ $declaration =~ s/([\{;])/$1\n/g;
+ $declaration =~ s/\}\s+;/};/g;
# Better handle inlined enums
- do {} while ($declaration =~ s/(enum\s+{[^}]+),([^\n])/$1,\n$2/);
+ do {} while ($declaration =~ s/(enum\s+\{[^\}]+),([^\n])/$1,\n$2/);
my @def_args = split /\n/, $declaration;
my $level = 1;
@@ -1171,12 +1171,12 @@ sub dump_struct($$) {
$clause =~ s/\s+$//;
$clause =~ s/\s+/ /;
next if (!$clause);
- $level-- if ($clause =~ m/(})/ && $level > 1);
+ $level-- if ($clause =~ m/(\})/ && $level > 1);
if (!($clause =~ m/^\s*#/)) {
$declaration .= "\t" x $level;
}
$declaration .= "\t" . $clause . "\n";
- $level++ if ($clause =~ m/({)/ && !($clause =~m/}/));
+ $level++ if ($clause =~ m/(\{)/ && !($clause =~m/\}/));
}
output_declaration($declaration_name,
'struct',
@@ -1244,7 +1244,7 @@ sub dump_enum($$) {
# strip #define macros inside enums
$x =~ s@#\s*((define|ifdef)\s+|endif)[^;]*;@@gos;
- if ($x =~ /enum\s+(\w+)\s*{(.*)}/) {
+ if ($x =~ /enum\s+(\w+)\s*\{(.*)\}/) {
$declaration_name = $1;
my $members = $2;
my %_members;
@@ -1785,7 +1785,7 @@ sub process_proto_type($$) {
}
while (1) {
- if ( $x =~ /([^{};]*)([{};])(.*)/ ) {
+ if ( $x =~ /([^\{\};]*)([\{\};])(.*)/ ) {
if( length $prototype ) {
$prototype .= " "
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 1663fb19343a..dc6d714e4dcb 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -2125,10 +2125,13 @@ static int check_modname_len(struct module *mod)
**/
static void add_header(struct buffer *b, struct module *mod)
{
+ buf_printf(b, "#include <linux/build-salt.h>\n");
buf_printf(b, "#include <linux/module.h>\n");
buf_printf(b, "#include <linux/vermagic.h>\n");
buf_printf(b, "#include <linux/compiler.h>\n");
buf_printf(b, "\n");
+ buf_printf(b, "BUILD_SALT;\n");
+ buf_printf(b, "\n");
buf_printf(b, "MODULE_INFO(vermagic, VERMAGIC_STRING);\n");
buf_printf(b, "MODULE_INFO(name, KBUILD_MODNAME);\n");
buf_printf(b, "\n");
diff --git a/scripts/package/buildtar b/scripts/package/buildtar
index e8cc72a51b32..d624a07a4e77 100755
--- a/scripts/package/buildtar
+++ b/scripts/package/buildtar
@@ -84,10 +84,6 @@ case "${ARCH}" in
[ -f "${KBUILD_IMAGE}" ] && cp -v -- "${KBUILD_IMAGE}" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}"
[ -f "${objtree}/lifimage" ] && cp -v -- "${objtree}/lifimage" "${tmpdir}/boot/lifimage-${KERNELRELEASE}"
;;
- vax)
- [ -f "${objtree}/vmlinux.SYS" ] && cp -v -- "${objtree}/vmlinux.SYS" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}.SYS"
- [ -f "${objtree}/vmlinux.dsk" ] && cp -v -- "${objtree}/vmlinux.dsk" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}.dsk"
- ;;
mips)
if [ -f "${objtree}/arch/mips/boot/compressed/vmlinux.bin" ]; then
cp -v -- "${objtree}/arch/mips/boot/compressed/vmlinux.bin" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}"
@@ -109,6 +105,14 @@ case "${ARCH}" in
cp -v -- "${objtree}/vmlinux" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}"
fi
;;
+ arm64)
+ for i in Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo ; do
+ if [ -f "${objtree}/arch/arm64/boot/${i}" ] ; then
+ cp -v -- "${objtree}/arch/arm64/boot/${i}" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}"
+ break
+ fi
+ done
+ ;;
*)
[ -f "${KBUILD_IMAGE}" ] && cp -v -- "${KBUILD_IMAGE}" "${tmpdir}/boot/vmlinux-kbuild-${KERNELRELEASE}"
echo "" >&2
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index 985d72d1ab34..663a7f343b42 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -6,49 +6,81 @@
set -e
+is_enabled() {
+ grep -q "^CONFIG_$1=y" $KCONFIG_CONFIG
+}
+
+if_enabled_echo() {
+ if is_enabled "$1"; then
+ echo -n "$2"
+ elif [ $# -ge 3 ]; then
+ echo -n "$3"
+ fi
+}
+
set_debarch() {
+ if [ -n "$KBUILD_DEBARCH" ] ; then
+ debarch="$KBUILD_DEBARCH"
+ return
+ fi
+
# Attempt to find the correct Debian architecture
case "$UTS_MACHINE" in
- i386|ia64|alpha)
+ i386|ia64|alpha|m68k|riscv*)
debarch="$UTS_MACHINE" ;;
x86_64)
debarch=amd64 ;;
sparc*)
- debarch=sparc ;;
+ debarch=sparc$(if_enabled_echo 64BIT 64) ;;
s390*)
- debarch=s390$(grep -q CONFIG_64BIT=y $KCONFIG_CONFIG && echo x || true) ;;
+ debarch=s390x ;;
ppc*)
- debarch=$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo ppc64el || echo powerpc) ;;
+ if is_enabled 64BIT; then
+ debarch=ppc64$(if_enabled_echo CPU_LITTLE_ENDIAN el)
+ else
+ debarch=powerpc$(if_enabled_echo SPE spe)
+ fi
+ ;;
parisc*)
debarch=hppa ;;
mips*)
- debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
+ if is_enabled CPU_LITTLE_ENDIAN; then
+ debarch=mips$(if_enabled_echo 64BIT 64)$(if_enabled_echo CPU_MIPSR6 r6)el
+ elif is_enabled CPU_MIPSR6; then
+ debarch=mips$(if_enabled_echo 64BIT 64)r6
+ else
+ debarch=mips
+ fi
+ ;;
aarch64|arm64)
debarch=arm64 ;;
arm*)
- if grep -q CONFIG_AEABI=y $KCONFIG_CONFIG; then
- if grep -q CONFIG_VFP=y $KCONFIG_CONFIG; then
- debarch=armhf
- else
- debarch=armel
- fi
+ if is_enabled AEABI; then
+ debarch=arm$(if_enabled_echo VFP hf el)
else
- debarch=arm
+ debarch=arm
+ fi
+ ;;
+ openrisc)
+ debarch=or1k ;;
+ sh)
+ if is_enabled CPU_SH3; then
+ debarch=sh3$(if_enabled_echo CPU_BIG_ENDIAN eb)
+ elif is_enabled CPU_SH4; then
+ debarch=sh4$(if_enabled_echo CPU_BIG_ENDIAN eb)
fi
;;
- *)
- debarch=$(dpkg --print-architecture)
+ esac
+ if [ -z "$debarch" ]; then
+ debarch=$(dpkg-architecture -qDEB_HOST_ARCH)
echo "" >&2
echo "** ** ** WARNING ** ** **" >&2
echo "" >&2
echo "Your architecture doesn't have its equivalent" >&2
echo "Debian userspace architecture defined!" >&2
- echo "Falling back to using your current userspace instead!" >&2
+ echo "Falling back to the current host architecture ($debarch)." >&2
echo "Please add support for $UTS_MACHINE to ${0} ..." >&2
echo "" >&2
- esac
- if [ -n "$KBUILD_DEBARCH" ] ; then
- debarch="$KBUILD_DEBARCH"
fi
}
diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py
index db40fa04cd51..9b6dd4f36335 100755
--- a/scripts/tracing/draw_functrace.py
+++ b/scripts/tracing/draw_functrace.py
@@ -123,7 +123,7 @@ def main():
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
- print CallTree.ROOT
+ print(CallTree.ROOT)
if __name__ == "__main__":
main()
diff --git a/security/Kconfig b/security/Kconfig
index c4302067a3ad..27d8b2688f75 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -57,7 +57,7 @@ config SECURITY_NETWORK
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
default y
- depends on X86_64 && !UML
+ depends on X86 && !UML
help
This feature reduces the number of hardware side channels by
ensuring that the majority of kernel addresses are not mapped
@@ -153,7 +153,6 @@ config HAVE_HARDENED_USERCOPY_ALLOCATOR
config HARDENED_USERCOPY
bool "Harden memory copies between kernel and userspace"
depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
- select BUG
imply STRICT_DEVMEM
help
This option checks for obviously wrong memory regions when
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 74f17376202b..8b8b70620bbe 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -395,7 +395,7 @@ static int apparmor_inode_getattr(const struct path *path)
return common_perm_cond(OP_GETATTR, path, AA_MAY_GETATTR);
}
-static int apparmor_file_open(struct file *file, const struct cred *cred)
+static int apparmor_file_open(struct file *file)
{
struct aa_file_ctx *fctx = file_ctx(file);
struct aa_label *label;
@@ -414,7 +414,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
return 0;
}
- label = aa_get_newest_cred_label(cred);
+ label = aa_get_newest_cred_label(file->f_cred);
if (!unconfined(label)) {
struct inode *inode = file_inode(file);
struct path_cond cond = { inode->i_uid, inode->i_mode };
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index ab6a029062a1..6dc075144508 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -115,3 +115,26 @@ int asymmetric_verify(struct key *keyring, const char *sig,
pr_debug("%s() = %d\n", __func__, ret);
return ret;
}
+
+/**
+ * integrity_kernel_module_request - prevent crypto-pkcs1pad(rsa,*) requests
+ * @kmod_name: kernel module name
+ *
+ * We have situation, when public_key_verify_signature() in case of RSA
+ * algorithm use alg_name to store internal information in order to
+ * construct an algorithm on the fly, but crypto_larval_lookup() will try
+ * to use alg_name in order to load kernel module with same name.
+ * Since we don't have any real "crypto-pkcs1pad(rsa,*)" kernel modules,
+ * we are safe to fail such module request from crypto_larval_lookup().
+ *
+ * In this way we prevent modprobe execution during digsig verification
+ * and avoid possible deadlock if modprobe and/or it's dependencies
+ * also signed with digsig.
+ */
+int integrity_kernel_module_request(char *kmod_name)
+{
+ if (strncmp(kmod_name, "crypto-pkcs1pad(rsa,", 20) == 0)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
index d593346d0bba..60221852b26a 100644
--- a/security/integrity/evm/Kconfig
+++ b/security/integrity/evm/Kconfig
@@ -4,6 +4,7 @@ config EVM
select ENCRYPTED_KEYS
select CRYPTO_HMAC
select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
default n
help
EVM protects a file's security extended attributes against
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index 1257c3c24723..c3f437f5db10 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -47,6 +47,11 @@ extern struct crypto_shash *hash_tfm;
/* List of EVM protected security xattrs */
extern struct list_head evm_config_xattrnames;
+struct evm_digest {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+} __packed;
+
int evm_init_key(void);
int evm_update_evmxattr(struct dentry *dentry,
const char *req_xattr_name,
@@ -54,10 +59,11 @@ int evm_update_evmxattr(struct dentry *dentry,
size_t req_xattr_value_len);
int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
const char *req_xattr_value,
- size_t req_xattr_value_len, char *digest);
+ size_t req_xattr_value_len, struct evm_digest *data);
int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
const char *req_xattr_value,
- size_t req_xattr_value_len, char type, char *digest);
+ size_t req_xattr_value_len, char type,
+ struct evm_digest *data);
int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
char *hmac_val);
int evm_init_secfs(void);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index b60524310855..8a3905bb02c7 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -21,6 +21,7 @@
#include <linux/evm.h>
#include <keys/encrypted-type.h>
#include <crypto/hash.h>
+#include <crypto/hash_info.h>
#include "evm.h"
#define EVMKEY "evm-key"
@@ -29,7 +30,7 @@ static unsigned char evmkey[MAX_KEY_SIZE];
static int evmkey_len = MAX_KEY_SIZE;
struct crypto_shash *hmac_tfm;
-struct crypto_shash *hash_tfm;
+static struct crypto_shash *evm_tfm[HASH_ALGO__LAST];
static DEFINE_MUTEX(mutex);
@@ -38,7 +39,6 @@ static DEFINE_MUTEX(mutex);
static unsigned long evm_set_key_flags;
static char * const evm_hmac = "hmac(sha1)";
-static char * const evm_hash = "sha1";
/**
* evm_set_key() - set EVM HMAC key from the kernel
@@ -74,10 +74,10 @@ busy:
}
EXPORT_SYMBOL_GPL(evm_set_key);
-static struct shash_desc *init_desc(char type)
+static struct shash_desc *init_desc(char type, uint8_t hash_algo)
{
long rc;
- char *algo;
+ const char *algo;
struct crypto_shash **tfm;
struct shash_desc *desc;
@@ -89,15 +89,16 @@ static struct shash_desc *init_desc(char type)
tfm = &hmac_tfm;
algo = evm_hmac;
} else {
- tfm = &hash_tfm;
- algo = evm_hash;
+ tfm = &evm_tfm[hash_algo];
+ algo = hash_algo_name[hash_algo];
}
if (*tfm == NULL) {
mutex_lock(&mutex);
if (*tfm)
goto out;
- *tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC);
+ *tfm = crypto_alloc_shash(algo, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_NOLOAD);
if (IS_ERR(*tfm)) {
rc = PTR_ERR(*tfm);
pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
@@ -186,10 +187,10 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
* each xattr, but attempt to re-use the previously allocated memory.
*/
static int evm_calc_hmac_or_hash(struct dentry *dentry,
- const char *req_xattr_name,
- const char *req_xattr_value,
- size_t req_xattr_value_len,
- char type, char *digest)
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len,
+ uint8_t type, struct evm_digest *data)
{
struct inode *inode = d_backing_inode(dentry);
struct xattr_list *xattr;
@@ -204,10 +205,12 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
inode->i_sb->s_user_ns != &init_user_ns)
return -EOPNOTSUPP;
- desc = init_desc(type);
+ desc = init_desc(type, data->hdr.algo);
if (IS_ERR(desc))
return PTR_ERR(desc);
+ data->hdr.length = crypto_shash_digestsize(desc->tfm);
+
error = -ENODATA;
list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
bool is_ima = false;
@@ -239,7 +242,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
if (is_ima)
ima_present = true;
}
- hmac_add_misc(desc, inode, type, digest);
+ hmac_add_misc(desc, inode, type, data->digest);
/* Portable EVM signatures must include an IMA hash */
if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
@@ -252,18 +255,18 @@ out:
int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
const char *req_xattr_value, size_t req_xattr_value_len,
- char *digest)
+ struct evm_digest *data)
{
return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
- req_xattr_value_len, EVM_XATTR_HMAC, digest);
+ req_xattr_value_len, EVM_XATTR_HMAC, data);
}
int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
const char *req_xattr_value, size_t req_xattr_value_len,
- char type, char *digest)
+ char type, struct evm_digest *data)
{
return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
- req_xattr_value_len, type, digest);
+ req_xattr_value_len, type, data);
}
static int evm_is_immutable(struct dentry *dentry, struct inode *inode)
@@ -303,7 +306,7 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
const char *xattr_value, size_t xattr_value_len)
{
struct inode *inode = d_backing_inode(dentry);
- struct evm_ima_xattr_data xattr_data;
+ struct evm_digest data;
int rc = 0;
/*
@@ -316,13 +319,14 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
if (rc)
return -EPERM;
+ data.hdr.algo = HASH_ALGO_SHA1;
rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
- xattr_value_len, xattr_data.digest);
+ xattr_value_len, &data);
if (rc == 0) {
- xattr_data.type = EVM_XATTR_HMAC;
+ data.hdr.xattr.sha1.type = EVM_XATTR_HMAC;
rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
- &xattr_data,
- sizeof(xattr_data), 0);
+ &data.hdr.xattr.data[1],
+ SHA1_DIGEST_SIZE + 1, 0);
} else if (rc == -ENODATA && (inode->i_opflags & IOP_XATTR)) {
rc = __vfs_removexattr(dentry, XATTR_NAME_EVM);
}
@@ -334,7 +338,7 @@ int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
{
struct shash_desc *desc;
- desc = init_desc(EVM_XATTR_HMAC);
+ desc = init_desc(EVM_XATTR_HMAC, HASH_ALGO_SHA1);
if (IS_ERR(desc)) {
pr_info("init_desc failed\n");
return PTR_ERR(desc);
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index f9eff5041e4c..7f3f54d89a6e 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -25,6 +25,7 @@
#include <linux/magic.h>
#include <crypto/hash.h>
+#include <crypto/hash_info.h>
#include <crypto/algapi.h>
#include "evm.h"
@@ -134,8 +135,9 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
struct integrity_iint_cache *iint)
{
struct evm_ima_xattr_data *xattr_data = NULL;
- struct evm_ima_xattr_data calc;
+ struct signature_v2_hdr *hdr;
enum integrity_status evm_status = INTEGRITY_PASS;
+ struct evm_digest digest;
struct inode *inode;
int rc, xattr_len;
@@ -171,25 +173,28 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
evm_status = INTEGRITY_FAIL;
goto out;
}
+
+ digest.hdr.algo = HASH_ALGO_SHA1;
rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
- xattr_value_len, calc.digest);
+ xattr_value_len, &digest);
if (rc)
break;
- rc = crypto_memneq(xattr_data->digest, calc.digest,
- sizeof(calc.digest));
+ rc = crypto_memneq(xattr_data->digest, digest.digest,
+ SHA1_DIGEST_SIZE);
if (rc)
rc = -EINVAL;
break;
case EVM_IMA_XATTR_DIGSIG:
case EVM_XATTR_PORTABLE_DIGSIG:
+ hdr = (struct signature_v2_hdr *)xattr_data;
+ digest.hdr.algo = hdr->hash_algo;
rc = evm_calc_hash(dentry, xattr_name, xattr_value,
- xattr_value_len, xattr_data->type,
- calc.digest);
+ xattr_value_len, xattr_data->type, &digest);
if (rc)
break;
rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
(const char *)xattr_data, xattr_len,
- calc.digest, sizeof(calc.digest));
+ digest.digest, digest.hdr.length);
if (!rc) {
inode = d_backing_inode(dentry);
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index 637eb999e340..77de71b7794c 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -193,8 +193,8 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
return -E2BIG;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_EVM_XATTR);
- if (IS_ERR(ab))
- return PTR_ERR(ab);
+ if (!ab)
+ return -ENOMEM;
xattr = kmalloc(sizeof(struct xattr_list), GFP_KERNEL);
if (!xattr) {
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 149faa81f6f0..5a6810041e5c 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -219,10 +219,13 @@ static int __init integrity_fs_init(void)
{
integrity_dir = securityfs_create_dir("integrity", NULL);
if (IS_ERR(integrity_dir)) {
- pr_err("Unable to create integrity sysfs dir: %ld\n",
- PTR_ERR(integrity_dir));
+ int ret = PTR_ERR(integrity_dir);
+
+ if (ret != -ENODEV)
+ pr_err("Unable to create integrity sysfs dir: %d\n",
+ ret);
integrity_dir = NULL;
- return PTR_ERR(integrity_dir);
+ return ret;
}
return 0;
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 6a8f67714c83..13b446328dda 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -12,6 +12,7 @@ config IMA
select TCG_TIS if TCG_TPM && X86
select TCG_CRB if TCG_TPM && ACPI
select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
+ select INTEGRITY_AUDIT if AUDIT
help
The Trusted Computing Group(TCG) runtime Integrity
Measurement Architecture(IMA) maintains a list of hash
@@ -156,6 +157,64 @@ config IMA_APPRAISE
<http://linux-ima.sourceforge.net>
If unsure, say N.
+config IMA_APPRAISE_BUILD_POLICY
+ bool "IMA build time configured policy rules"
+ depends on IMA_APPRAISE && INTEGRITY_ASYMMETRIC_KEYS
+ default n
+ help
+ This option defines an IMA appraisal policy at build time, which
+ is enforced at run time without having to specify a builtin
+ policy name on the boot command line. The build time appraisal
+ policy rules persist after loading a custom policy.
+
+ Depending on the rules configured, this policy may require kernel
+ modules, firmware, the kexec kernel image, and/or the IMA policy
+ to be signed. Unsigned files might prevent the system from
+ booting or applications from working properly.
+
+config IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS
+ bool "Appraise firmware signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ This option defines a policy requiring all firmware to be signed,
+ including the regulatory.db. If both this option and
+ CFG80211_REQUIRE_SIGNED_REGDB are enabled, then both signature
+ verification methods are necessary.
+
+config IMA_APPRAISE_REQUIRE_KEXEC_SIGS
+ bool "Appraise kexec kernel image signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require all kexec'ed kernel images to
+ be signed and verified by a public key on the trusted IMA
+ keyring.
+
+ Kernel image signatures can not be verified by the original
+ kexec_load syscall. Enabling this rule will prevent its
+ usage.
+
+config IMA_APPRAISE_REQUIRE_MODULE_SIGS
+ bool "Appraise kernel modules signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require all kernel modules to be signed
+ and verified by a public key on the trusted IMA keyring.
+
+ Kernel module signatures can only be verified by IMA-appraisal,
+ via the finit_module syscall. Enabling this rule will prevent
+ the usage of the init_module syscall.
+
+config IMA_APPRAISE_REQUIRE_POLICY_SIGS
+ bool "Appraise IMA policy signature"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require the IMA policy to be signed and
+ and verified by a key on the trusted IMA keyring.
+
config IMA_APPRAISE_BOOTPARAM
bool "ima_appraise boot parameter"
depends on IMA_APPRAISE
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 354bb5716ce3..67db9d9454ca 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -53,9 +53,9 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
extern int ima_policy_flag;
/* set during initialization */
-extern int ima_used_chip;
extern int ima_hash_algo;
extern int ima_appraise;
+extern struct tpm_chip *ima_tpm_chip;
/* IMA event related data */
struct ima_event_data {
@@ -232,13 +232,14 @@ int ima_policy_show(struct seq_file *m, void *v);
#define IMA_APPRAISE_MODULES 0x08
#define IMA_APPRAISE_FIRMWARE 0x10
#define IMA_APPRAISE_POLICY 0x20
+#define IMA_APPRAISE_KEXEC 0x40
#ifdef CONFIG_IMA_APPRAISE
int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int opened);
+ int xattr_len);
int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
@@ -254,7 +255,7 @@ static inline int ima_appraise_measurement(enum ima_hooks func,
struct file *file,
const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int opened)
+ int xattr_len)
{
return INTEGRITY_UNKNOWN;
}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 8bd7a0733e51..deec1804a00a 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -212,7 +212,7 @@ int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int opened)
+ int xattr_len)
{
static const char op[] = "appraise_data";
const char *cause = "unknown";
@@ -231,7 +231,7 @@ int ima_appraise_measurement(enum ima_hooks func,
cause = iint->flags & IMA_DIGSIG_REQUIRED ?
"IMA-signature-required" : "missing-hash";
status = INTEGRITY_NOLABEL;
- if (opened & FILE_CREATED)
+ if (file->f_mode & FMODE_CREATED)
iint->flags |= IMA_NEW_FILE;
if ((iint->flags & IMA_NEW_FILE) &&
(!(iint->flags & IMA_DIGSIG_REQUIRED) ||
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 4e085a17124f..7e7e7e7c250a 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -631,10 +631,10 @@ int ima_calc_buffer_hash(const void *buf, loff_t len,
static void __init ima_pcrread(int idx, u8 *pcr)
{
- if (!ima_used_chip)
+ if (!ima_tpm_chip)
return;
- if (tpm_pcr_read(NULL, idx, pcr) != 0)
+ if (tpm_pcr_read(ima_tpm_chip, idx, pcr) != 0)
pr_err("Error Communicating to TPM chip\n");
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 29b72cd2502e..faac9ecaa0ae 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -26,7 +26,7 @@
/* name for boot aggregate entry */
static const char *boot_aggregate_name = "boot_aggregate";
-int ima_used_chip;
+struct tpm_chip *ima_tpm_chip;
/* Add the boot aggregate to the IMA measurement list and extend
* the PCR register.
@@ -64,7 +64,7 @@ static int __init ima_add_boot_aggregate(void)
iint->ima_hash->algo = HASH_ALGO_SHA1;
iint->ima_hash->length = SHA1_DIGEST_SIZE;
- if (ima_used_chip) {
+ if (ima_tpm_chip) {
result = ima_calc_boot_aggregate(&hash.hdr);
if (result < 0) {
audit_cause = "hashing_error";
@@ -106,17 +106,11 @@ void __init ima_load_x509(void)
int __init ima_init(void)
{
- u8 pcr_i[TPM_DIGEST_SIZE];
int rc;
- ima_used_chip = 0;
- rc = tpm_pcr_read(NULL, 0, pcr_i);
- if (rc == 0)
- ima_used_chip = 1;
-
- if (!ima_used_chip)
- pr_info("No TPM chip found, activating TPM-bypass! (rc=%d)\n",
- rc);
+ ima_tpm_chip = tpm_default_chip();
+ if (!ima_tpm_chip)
+ pr_info("No TPM chip found, activating TPM-bypass!\n");
rc = integrity_init_keyring(INTEGRITY_KEYRING_IMA);
if (rc)
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index dca44cf7838e..2d31921fbda4 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -168,7 +168,7 @@ void ima_file_free(struct file *file)
static int process_measurement(struct file *file, const struct cred *cred,
u32 secid, char *buf, loff_t size, int mask,
- enum ima_hooks func, int opened)
+ enum ima_hooks func)
{
struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint = NULL;
@@ -294,7 +294,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
inode_lock(inode);
rc = ima_appraise_measurement(func, iint, file, pathname,
- xattr_value, xattr_len, opened);
+ xattr_value, xattr_len);
inode_unlock(inode);
}
if (action & IMA_AUDIT)
@@ -338,7 +338,7 @@ int ima_file_mmap(struct file *file, unsigned long prot)
if (file && (prot & PROT_EXEC)) {
security_task_getsecid(current, &secid);
return process_measurement(file, current_cred(), secid, NULL,
- 0, MAY_EXEC, MMAP_CHECK, 0);
+ 0, MAY_EXEC, MMAP_CHECK);
}
return 0;
@@ -364,13 +364,13 @@ int ima_bprm_check(struct linux_binprm *bprm)
security_task_getsecid(current, &secid);
ret = process_measurement(bprm->file, current_cred(), secid, NULL, 0,
- MAY_EXEC, BPRM_CHECK, 0);
+ MAY_EXEC, BPRM_CHECK);
if (ret)
return ret;
security_cred_getsecid(bprm->cred, &secid);
return process_measurement(bprm->file, bprm->cred, secid, NULL, 0,
- MAY_EXEC, CREDS_CHECK, 0);
+ MAY_EXEC, CREDS_CHECK);
}
/**
@@ -383,14 +383,14 @@ int ima_bprm_check(struct linux_binprm *bprm)
* On success return 0. On integrity appraisal error, assuming the file
* is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
-int ima_file_check(struct file *file, int mask, int opened)
+int ima_file_check(struct file *file, int mask)
{
u32 secid;
security_task_getsecid(current, &secid);
return process_measurement(file, current_cred(), secid, NULL, 0,
mask & (MAY_READ | MAY_WRITE | MAY_EXEC |
- MAY_APPEND), FILE_CHECK, opened);
+ MAY_APPEND), FILE_CHECK);
}
EXPORT_SYMBOL_GPL(ima_file_check);
@@ -429,16 +429,14 @@ void ima_post_path_mknod(struct dentry *dentry)
*/
int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
{
- bool sig_enforce = is_module_sig_enforced();
-
- if (!file && read_id == READING_MODULE) {
- if (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES) &&
- (ima_appraise & IMA_APPRAISE_ENFORCE)) {
- pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
- return -EACCES; /* INTEGRITY_UNKNOWN */
- }
- return 0; /* We rely on module signature checking */
- }
+ /*
+ * READING_FIRMWARE_PREALLOC_BUFFER
+ *
+ * Do devices using pre-allocated memory run the risk of the
+ * firmware being accessible to the device prior to the completion
+ * of IMA's signature verification any more than when using two
+ * buffers?
+ */
return 0;
}
@@ -472,14 +470,13 @@ int ima_post_read_file(struct file *file, void *buf, loff_t size,
if (!file && read_id == READING_FIRMWARE) {
if ((ima_appraise & IMA_APPRAISE_FIRMWARE) &&
- (ima_appraise & IMA_APPRAISE_ENFORCE))
+ (ima_appraise & IMA_APPRAISE_ENFORCE)) {
+ pr_err("Prevent firmware loading_store.\n");
return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
return 0;
}
- if (!file && read_id == READING_MODULE) /* MODULE_SIG_FORCE enabled */
- return 0;
-
/* permit signed certs */
if (!file && read_id == READING_X509_CERTIFICATE)
return 0;
@@ -493,7 +490,50 @@ int ima_post_read_file(struct file *file, void *buf, loff_t size,
func = read_idmap[read_id] ?: FILE_CHECK;
security_task_getsecid(current, &secid);
return process_measurement(file, current_cred(), secid, buf, size,
- MAY_READ, func, 0);
+ MAY_READ, func);
+}
+
+/**
+ * ima_load_data - appraise decision based on policy
+ * @id: kernel load data caller identifier
+ *
+ * Callers of this LSM hook can not measure, appraise, or audit the
+ * data provided by userspace. Enforce policy rules requring a file
+ * signature (eg. kexec'ed kernel image).
+ *
+ * For permission return 0, otherwise return -EACCES.
+ */
+int ima_load_data(enum kernel_load_data_id id)
+{
+ bool sig_enforce;
+
+ if ((ima_appraise & IMA_APPRAISE_ENFORCE) != IMA_APPRAISE_ENFORCE)
+ return 0;
+
+ switch (id) {
+ case LOADING_KEXEC_IMAGE:
+ if (ima_appraise & IMA_APPRAISE_KEXEC) {
+ pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ break;
+ case LOADING_FIRMWARE:
+ if (ima_appraise & IMA_APPRAISE_FIRMWARE) {
+ pr_err("Prevent firmware sysfs fallback loading.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ break;
+ case LOADING_MODULE:
+ sig_enforce = is_module_sig_enforced();
+
+ if (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES)) {
+ pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ default:
+ break;
+ }
+ return 0;
}
static int __init init_ima(void)
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index cdcc9a7b4e24..8c9499867c91 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -49,6 +49,7 @@
int ima_policy_flag;
static int temp_ima_appraise;
+static int build_ima_appraise __ro_after_init;
#define MAX_LSM_RULES 6
enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
@@ -162,6 +163,25 @@ static struct ima_rule_entry default_appraise_rules[] __ro_after_init = {
#endif
};
+static struct ima_rule_entry build_appraise_rules[] __ro_after_init = {
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS
+ {.action = APPRAISE, .func = MODULE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS
+ {.action = APPRAISE, .func = FIRMWARE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS
+ {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+};
+
static struct ima_rule_entry secure_boot_rules[] __ro_after_init = {
{.action = APPRAISE, .func = MODULE_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
@@ -435,7 +455,7 @@ void ima_update_policy_flag(void)
ima_policy_flag |= entry->action;
}
- ima_appraise |= temp_ima_appraise;
+ ima_appraise |= (build_ima_appraise | temp_ima_appraise);
if (!ima_appraise)
ima_policy_flag &= ~IMA_APPRAISE;
}
@@ -448,6 +468,8 @@ static int ima_appraise_flag(enum ima_hooks func)
return IMA_APPRAISE_FIRMWARE;
else if (func == POLICY_CHECK)
return IMA_APPRAISE_POLICY;
+ else if (func == KEXEC_KERNEL_CHECK)
+ return IMA_APPRAISE_KEXEC;
return 0;
}
@@ -486,8 +508,8 @@ void __init ima_init_policy(void)
}
/*
- * Insert the appraise rules requiring file signatures, prior to
- * any other appraise rules.
+ * Insert the builtin "secure_boot" policy rules requiring file
+ * signatures, prior to any other appraise rules.
*/
for (i = 0; i < secure_boot_entries; i++) {
list_add_tail(&secure_boot_rules[i].list, &ima_default_rules);
@@ -495,6 +517,26 @@ void __init ima_init_policy(void)
ima_appraise_flag(secure_boot_rules[i].func);
}
+ /*
+ * Insert the build time appraise rules requiring file signatures
+ * for both the initial and custom policies, prior to other appraise
+ * rules.
+ */
+ for (i = 0; i < ARRAY_SIZE(build_appraise_rules); i++) {
+ struct ima_rule_entry *entry;
+
+ if (!secure_boot_entries)
+ list_add_tail(&build_appraise_rules[i].list,
+ &ima_default_rules);
+
+ entry = kmemdup(&build_appraise_rules[i], sizeof(*entry),
+ GFP_KERNEL);
+ if (entry)
+ list_add_tail(&entry->list, &ima_policy_rules);
+ build_ima_appraise |=
+ ima_appraise_flag(build_appraise_rules[i].func);
+ }
+
for (i = 0; i < appraise_entries; i++) {
list_add_tail(&default_appraise_rules[i].list,
&ima_default_rules);
@@ -615,14 +657,16 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
static void ima_log_string_op(struct audit_buffer *ab, char *key, char *value,
bool (*rule_operator)(kuid_t, kuid_t))
{
+ if (!ab)
+ return;
+
if (rule_operator == &uid_gt)
audit_log_format(ab, "%s>", key);
else if (rule_operator == &uid_lt)
audit_log_format(ab, "%s<", key);
else
audit_log_format(ab, "%s=", key);
- audit_log_untrustedstring(ab, value);
- audit_log_format(ab, " ");
+ audit_log_format(ab, "%s ", value);
}
static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
{
@@ -637,7 +681,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
bool uid_token;
int result = 0;
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE);
+ ab = integrity_audit_log_start(audit_context(), GFP_KERNEL,
+ AUDIT_INTEGRITY_POLICY_RULE);
entry->uid = INVALID_UID;
entry->fowner = INVALID_UID;
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 418f35e38015..b186819bd5aa 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -142,10 +142,10 @@ static int ima_pcr_extend(const u8 *hash, int pcr)
{
int result = 0;
- if (!ima_used_chip)
+ if (!ima_tpm_chip)
return result;
- result = tpm_pcr_extend(NULL, pcr, hash);
+ result = tpm_pcr_extend(ima_tpm_chip, pcr, hash);
if (result != 0)
pr_err("Error Communicating to TPM chip, result: %d\n", result);
return result;
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 0bb372eed62a..e60473b13a8d 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -15,6 +15,7 @@
#include <linux/integrity.h>
#include <crypto/sha.h>
#include <linux/key.h>
+#include <linux/audit.h>
/* iint action cache flags */
#define IMA_MEASURE 0x00000001
@@ -199,6 +200,13 @@ static inline void evm_load_x509(void)
void integrity_audit_msg(int audit_msgno, struct inode *inode,
const unsigned char *fname, const char *op,
const char *cause, int result, int info);
+
+static inline struct audit_buffer *
+integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type)
+{
+ return audit_log_start(ctx, gfp_mask, type);
+}
+
#else
static inline void integrity_audit_msg(int audit_msgno, struct inode *inode,
const unsigned char *fname,
@@ -206,4 +214,11 @@ static inline void integrity_audit_msg(int audit_msgno, struct inode *inode,
int result, int info)
{
}
+
+static inline struct audit_buffer *
+integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type)
+{
+ return NULL;
+}
+
#endif
diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c
index ab10a25310a1..82c98f7d217e 100644
--- a/security/integrity/integrity_audit.c
+++ b/security/integrity/integrity_audit.c
@@ -45,11 +45,7 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
audit_log_task_context(ab);
- audit_log_format(ab, " op=");
- audit_log_string(ab, op);
- audit_log_format(ab, " cause=");
- audit_log_string(ab, cause);
- audit_log_format(ab, " comm=");
+ audit_log_format(ab, " op=%s cause=%s comm=", op, cause);
audit_log_untrustedstring(ab, get_task_comm(name, current));
if (fname) {
audit_log_format(ab, " name=");
diff --git a/security/keys/dh.c b/security/keys/dh.c
index b203f7758f97..711e89d8c415 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -317,7 +317,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
if (ret)
goto out3;
- tfm = crypto_alloc_kpp("dh", CRYPTO_ALG_TYPE_KPP, 0);
+ tfm = crypto_alloc_kpp("dh", 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
goto out3;
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 5fa191252c8f..0716af28808a 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -173,9 +173,15 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
return 0;
}
+static int loadpin_load_data(enum kernel_load_data_id id)
+{
+ return loadpin_read_file(NULL, (enum kernel_read_file_id) id);
+}
+
static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(sb_free_security, loadpin_sb_free_security),
LSM_HOOK_INIT(kernel_read_file, loadpin_read_file),
+ LSM_HOOK_INIT(kernel_load_data, loadpin_load_data),
};
void __init loadpin_add_hooks(void)
diff --git a/security/security.c b/security/security.c
index 68f46d849abe..47cfff01d7ec 100644
--- a/security/security.c
+++ b/security/security.c
@@ -118,6 +118,8 @@ static int lsm_append(char *new, char **result)
if (*result == NULL) {
*result = kstrdup(new, GFP_KERNEL);
+ if (*result == NULL)
+ return -ENOMEM;
} else {
/* Check if it is the last registered name */
if (match_last_lsm(*result, new))
@@ -970,11 +972,11 @@ int security_file_receive(struct file *file)
return call_int_hook(file_receive, 0, file);
}
-int security_file_open(struct file *file, const struct cred *cred)
+int security_file_open(struct file *file)
{
int ret;
- ret = call_int_hook(file_open, 0, file, cred);
+ ret = call_int_hook(file_open, 0, file);
if (ret)
return ret;
@@ -1030,7 +1032,12 @@ int security_kernel_create_files_as(struct cred *new, struct inode *inode)
int security_kernel_module_request(char *kmod_name)
{
- return call_int_hook(kernel_module_request, 0, kmod_name);
+ int ret;
+
+ ret = call_int_hook(kernel_module_request, 0, kmod_name);
+ if (ret)
+ return ret;
+ return integrity_kernel_module_request(kmod_name);
}
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id)
@@ -1056,6 +1063,17 @@ int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
}
EXPORT_SYMBOL_GPL(security_kernel_post_read_file);
+int security_kernel_load_data(enum kernel_load_data_id id)
+{
+ int ret;
+
+ ret = call_int_hook(kernel_load_data, 0, id);
+ if (ret)
+ return ret;
+ return ima_load_data(id);
+}
+EXPORT_SYMBOL_GPL(security_kernel_load_data);
+
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags)
{
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index f3aedf077509..635e5c1e3e48 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -650,7 +650,7 @@ static int avc_latest_notif_update(struct selinux_avc *avc,
spin_lock_irqsave(&notif_lock, flag);
if (is_insert) {
if (seqno < avc->avc_cache.latest_notif) {
- printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n",
+ pr_warn("SELinux: avc: seqno %d < latest_notif %d\n",
seqno, avc->avc_cache.latest_notif);
ret = -EAGAIN;
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2b5ee5fbd652..ad9a9b8e9979 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -530,7 +530,7 @@ static int sb_finish_set_opts(struct super_block *sb)
the first boot of the SELinux kernel before we have
assigned xattr values to the filesystem. */
if (!(root_inode->i_opflags & IOP_XATTR)) {
- printk(KERN_WARNING "SELinux: (dev %s, type %s) has no "
+ pr_warn("SELinux: (dev %s, type %s) has no "
"xattr support\n", sb->s_id, sb->s_type->name);
rc = -EOPNOTSUPP;
goto out;
@@ -539,11 +539,11 @@ static int sb_finish_set_opts(struct super_block *sb)
rc = __vfs_getxattr(root, root_inode, XATTR_NAME_SELINUX, NULL, 0);
if (rc < 0 && rc != -ENODATA) {
if (rc == -EOPNOTSUPP)
- printk(KERN_WARNING "SELinux: (dev %s, type "
+ pr_warn("SELinux: (dev %s, type "
"%s) has no security xattr handler\n",
sb->s_id, sb->s_type->name);
else
- printk(KERN_WARNING "SELinux: (dev %s, type "
+ pr_warn("SELinux: (dev %s, type "
"%s) getxattr errno %d\n", sb->s_id,
sb->s_type->name, -rc);
goto out;
@@ -742,7 +742,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
goto out;
}
rc = -EINVAL;
- printk(KERN_WARNING "SELinux: Unable to set superblock options "
+ pr_warn("SELinux: Unable to set superblock options "
"before the security server is initialized\n");
goto out;
}
@@ -784,7 +784,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
mount_options[i], &sid,
GFP_KERNEL);
if (rc) {
- printk(KERN_WARNING "SELinux: security_context_str_to_sid"
+ pr_warn("SELinux: security_context_str_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
mount_options[i], sb->s_id, name, rc);
goto out;
@@ -860,8 +860,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
*/
rc = security_fs_use(&selinux_state, sb);
if (rc) {
- printk(KERN_WARNING
- "%s: security_fs_use(%s) returned %d\n",
+ pr_warn("%s: security_fs_use(%s) returned %d\n",
__func__, sb->s_type->name, rc);
goto out;
}
@@ -947,7 +946,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (sbsec->behavior != SECURITY_FS_USE_XATTR &&
sbsec->behavior != SECURITY_FS_USE_NATIVE) {
rc = -EINVAL;
- printk(KERN_WARNING "SELinux: defcontext option is "
+ pr_warn("SELinux: defcontext option is "
"invalid for this filesystem type\n");
goto out;
}
@@ -969,7 +968,7 @@ out:
return rc;
out_double_mount:
rc = -EINVAL;
- printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different "
+ pr_warn("SELinux: mount invalid. Same superblock, different "
"security settings for (dev %s, type %s)\n", sb->s_id, name);
goto out;
}
@@ -998,7 +997,7 @@ static int selinux_cmp_sb_context(const struct super_block *oldsb,
}
return 0;
mismatch:
- printk(KERN_WARNING "SELinux: mount invalid. Same superblock, "
+ pr_warn("SELinux: mount invalid. Same superblock, "
"different security settings for (dev %s, "
"type %s)\n", newsb->s_id, newsb->s_type->name);
return -EBUSY;
@@ -1106,7 +1105,7 @@ static int selinux_parse_opts_str(char *options,
case Opt_context:
if (context || defcontext) {
rc = -EINVAL;
- printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
+ pr_warn(SEL_MOUNT_FAIL_MSG);
goto out_err;
}
context = match_strdup(&args[0]);
@@ -1119,7 +1118,7 @@ static int selinux_parse_opts_str(char *options,
case Opt_fscontext:
if (fscontext) {
rc = -EINVAL;
- printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
+ pr_warn(SEL_MOUNT_FAIL_MSG);
goto out_err;
}
fscontext = match_strdup(&args[0]);
@@ -1132,7 +1131,7 @@ static int selinux_parse_opts_str(char *options,
case Opt_rootcontext:
if (rootcontext) {
rc = -EINVAL;
- printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
+ pr_warn(SEL_MOUNT_FAIL_MSG);
goto out_err;
}
rootcontext = match_strdup(&args[0]);
@@ -1145,7 +1144,7 @@ static int selinux_parse_opts_str(char *options,
case Opt_defcontext:
if (context || defcontext) {
rc = -EINVAL;
- printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
+ pr_warn(SEL_MOUNT_FAIL_MSG);
goto out_err;
}
defcontext = match_strdup(&args[0]);
@@ -1158,7 +1157,7 @@ static int selinux_parse_opts_str(char *options,
break;
default:
rc = -EINVAL;
- printk(KERN_WARNING "SELinux: unknown mount option\n");
+ pr_warn("SELinux: unknown mount option\n");
goto out_err;
}
@@ -1623,7 +1622,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
dput(dentry);
if (rc < 0) {
if (rc != -ENODATA) {
- printk(KERN_WARNING "SELinux: %s: getxattr returned "
+ pr_warn("SELinux: %s: getxattr returned "
"%d for dev=%s ino=%ld\n", __func__,
-rc, inode->i_sb->s_id, inode->i_ino);
kfree(context);
@@ -1643,11 +1642,11 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
if (rc == -EINVAL) {
if (printk_ratelimit())
- printk(KERN_NOTICE "SELinux: inode=%lu on dev=%s was found to have an invalid "
+ pr_notice("SELinux: inode=%lu on dev=%s was found to have an invalid "
"context=%s. This indicates you may need to relabel the inode or the "
"filesystem in question.\n", ino, dev, context);
} else {
- printk(KERN_WARNING "SELinux: %s: context_to_sid(%s) "
+ pr_warn("SELinux: %s: context_to_sid(%s) "
"returned %d for dev=%s ino=%ld\n",
__func__, context, -rc, dev, ino);
}
@@ -1785,8 +1784,7 @@ static int cred_has_capability(const struct cred *cred,
sclass = initns ? SECCLASS_CAPABILITY2 : SECCLASS_CAP2_USERNS;
break;
default:
- printk(KERN_ERR
- "SELinux: out of range capability %d\n", cap);
+ pr_err("SELinux: out of range capability %d\n", cap);
BUG();
return -EINVAL;
}
@@ -2029,7 +2027,7 @@ static int may_link(struct inode *dir,
av = DIR__RMDIR;
break;
default:
- printk(KERN_WARNING "SELinux: %s: unrecognized kind %d\n",
+ pr_warn("SELinux: %s: unrecognized kind %d\n",
__func__, kind);
return 0;
}
@@ -2875,7 +2873,7 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
mount_options[i], &sid,
GFP_KERNEL);
if (rc) {
- printk(KERN_WARNING "SELinux: security_context_str_to_sid"
+ pr_warn("SELinux: security_context_str_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
mount_options[i], sb->s_id, sb->s_type->name, rc);
goto out_free_opts;
@@ -2914,7 +2912,7 @@ out_free_secdata:
free_secdata(secdata);
return rc;
out_bad_option:
- printk(KERN_WARNING "SELinux: unable to change security options "
+ pr_warn("SELinux: unable to change security options "
"during remount (dev %s, type=%s)\n", sb->s_id,
sb->s_type->name);
goto out_free_opts;
@@ -3357,7 +3355,7 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
rc = security_context_to_sid_force(&selinux_state, value, size,
&newsid);
if (rc) {
- printk(KERN_ERR "SELinux: unable to map context to SID"
+ pr_err("SELinux: unable to map context to SID"
"for (%s, %lu), rc=%d\n",
inode->i_sb->s_id, inode->i_ino, -rc);
return;
@@ -3862,7 +3860,7 @@ static int selinux_file_receive(struct file *file)
return file_has_perm(cred, file, file_to_av(file));
}
-static int selinux_file_open(struct file *file, const struct cred *cred)
+static int selinux_file_open(struct file *file)
{
struct file_security_struct *fsec;
struct inode_security_struct *isec;
@@ -3886,7 +3884,7 @@ static int selinux_file_open(struct file *file, const struct cred *cred)
* new inode label or new policy.
* This check is not redundant - do not remove.
*/
- return file_path_has_perm(cred, file, open_file_to_av(file));
+ return file_path_has_perm(file->f_cred, file, open_file_to_av(file));
}
/* task security operations */
@@ -4073,6 +4071,20 @@ static int selinux_kernel_read_file(struct file *file,
return rc;
}
+static int selinux_kernel_load_data(enum kernel_load_data_id id)
+{
+ int rc = 0;
+
+ switch (id) {
+ case LOADING_MODULE:
+ rc = selinux_kernel_module_from_file(NULL);
+ default:
+ break;
+ }
+
+ return rc;
+}
+
static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
{
return avc_has_perm(&selinux_state,
@@ -4420,7 +4432,7 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
}
parse_error:
- printk(KERN_WARNING
+ pr_warn(
"SELinux: failure in selinux_parse_skb(),"
" unable to parse packet\n");
return ret;
@@ -4463,7 +4475,7 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
err = security_net_peersid_resolve(&selinux_state, nlbl_sid,
nlbl_type, xfrm_sid, sid);
if (unlikely(err)) {
- printk(KERN_WARNING
+ pr_warn(
"SELinux: failure in selinux_skb_peerlbl_sid(),"
" unable to determine packet's peer label\n");
return -EACCES;
@@ -6972,6 +6984,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(kernel_act_as, selinux_kernel_act_as),
LSM_HOOK_INIT(kernel_create_files_as, selinux_kernel_create_files_as),
LSM_HOOK_INIT(kernel_module_request, selinux_kernel_module_request),
+ LSM_HOOK_INIT(kernel_load_data, selinux_kernel_load_data),
LSM_HOOK_INIT(kernel_read_file, selinux_kernel_read_file),
LSM_HOOK_INIT(task_setpgid, selinux_task_setpgid),
LSM_HOOK_INIT(task_getpgid, selinux_task_getpgid),
@@ -7126,11 +7139,11 @@ static __init int selinux_init(void)
}
if (!selinux_enabled) {
- printk(KERN_INFO "SELinux: Disabled at boot.\n");
+ pr_info("SELinux: Disabled at boot.\n");
return 0;
}
- printk(KERN_INFO "SELinux: Initializing.\n");
+ pr_info("SELinux: Initializing.\n");
memset(&selinux_state, 0, sizeof(selinux_state));
enforcing_set(&selinux_state, selinux_enforcing_boot);
@@ -7166,9 +7179,9 @@ static __init int selinux_init(void)
panic("SELinux: Unable to register AVC LSM notifier callback\n");
if (selinux_enforcing_boot)
- printk(KERN_DEBUG "SELinux: Starting in enforcing mode\n");
+ pr_debug("SELinux: Starting in enforcing mode\n");
else
- printk(KERN_DEBUG "SELinux: Starting in permissive mode\n");
+ pr_debug("SELinux: Starting in permissive mode\n");
return 0;
}
@@ -7180,10 +7193,10 @@ static void delayed_superblock_init(struct super_block *sb, void *unused)
void selinux_complete_init(void)
{
- printk(KERN_DEBUG "SELinux: Completing initialization.\n");
+ pr_debug("SELinux: Completing initialization.\n");
/* Set up any superblocks initialized prior to the policy load. */
- printk(KERN_DEBUG "SELinux: Setting up existing superblocks.\n");
+ pr_debug("SELinux: Setting up existing superblocks.\n");
iterate_supers(delayed_superblock_init, NULL);
}
@@ -7258,7 +7271,7 @@ static int __init selinux_nf_ip_init(void)
if (!selinux_enabled)
return 0;
- printk(KERN_DEBUG "SELinux: Registering netfilter hooks\n");
+ pr_debug("SELinux: Registering netfilter hooks\n");
err = register_pernet_subsys(&selinux_net_ops);
if (err)
@@ -7271,7 +7284,7 @@ __initcall(selinux_nf_ip_init);
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
static void selinux_nf_ip_exit(void)
{
- printk(KERN_DEBUG "SELinux: Unregistering netfilter hooks\n");
+ pr_debug("SELinux: Unregistering netfilter hooks\n");
unregister_pernet_subsys(&selinux_net_ops);
}
@@ -7300,7 +7313,7 @@ int selinux_disable(struct selinux_state *state)
state->disabled = 1;
- printk(KERN_INFO "SELinux: Disabled at runtime.\n");
+ pr_info("SELinux: Disabled at runtime.\n");
selinux_enabled = 0;
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index ac65f7417413..8c738c189942 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -145,9 +145,8 @@ static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
dev = dev_get_by_index(ns, ifindex);
if (unlikely(dev == NULL)) {
- printk(KERN_WARNING
- "SELinux: failure in sel_netif_sid_slow(),"
- " invalid network interface (%d)\n", ifindex);
+ pr_warn("SELinux: failure in %s(), invalid network interface (%d)\n",
+ __func__, ifindex);
return -ENOENT;
}
@@ -177,10 +176,8 @@ out:
spin_unlock_bh(&sel_netif_lock);
dev_put(dev);
if (unlikely(ret)) {
- printk(KERN_WARNING
- "SELinux: failure in sel_netif_sid_slow(),"
- " unable to determine network interface label (%d)\n",
- ifindex);
+ pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n",
+ __func__, ifindex);
kfree(new);
}
return ret;
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 828fb6a4e941..8a8a72507437 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -94,7 +94,7 @@ out:
out_kfree_skb:
kfree_skb(skb);
oom:
- printk(KERN_ERR "SELinux: OOM in %s\n", __func__);
+ pr_err("SELinux: OOM in %s\n", __func__);
goto out;
}
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index 6dd89b89bc1f..afa0d432436b 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -238,9 +238,8 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
out:
spin_unlock_bh(&sel_netnode_lock);
if (unlikely(ret)) {
- printk(KERN_WARNING
- "SELinux: failure in sel_netnode_sid_slow(),"
- " unable to determine network node label\n");
+ pr_warn("SELinux: failure in %s(), unable to determine network node label\n",
+ __func__);
kfree(new);
}
return ret;
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 9ed4c5064a5e..7a141cadbffc 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -173,9 +173,8 @@ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
out:
spin_unlock_bh(&sel_netport_lock);
if (unlikely(ret)) {
- printk(KERN_WARNING
- "SELinux: failure in sel_netport_sid_slow(),"
- " unable to determine network port label\n");
+ pr_warn("SELinux: failure in %s(), unable to determine network port label\n",
+ __func__);
kfree(new);
}
return ret;
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 7b7433a1a34c..74b951f55608 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -159,7 +159,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
switch (sclass) {
case SECCLASS_NETLINK_ROUTE_SOCKET:
/* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */
- BUILD_BUG_ON(RTM_MAX != (RTM_NEWCACHEREPORT + 3));
+ BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3));
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
break;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 79d3709b0671..f3a5a138a096 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -614,7 +614,7 @@ static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
length = -ERANGE;
if (len > SIMPLE_TRANSACTION_LIMIT) {
- printk(KERN_ERR "SELinux: %s: context size (%u) exceeds "
+ pr_err("SELinux: %s: context size (%u) exceeds "
"payload max\n", __func__, len);
goto out;
}
@@ -767,7 +767,7 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size);
static ssize_t sel_write_user(struct file *file, char *buf, size_t size);
static ssize_t sel_write_member(struct file *file, char *buf, size_t size);
-static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+static ssize_t (*const write_op[])(struct file *, char *, size_t) = {
[SEL_ACCESS] = sel_write_access,
[SEL_CREATE] = sel_write_create,
[SEL_RELABEL] = sel_write_relabel,
@@ -950,7 +950,7 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
length = -ERANGE;
if (len > SIMPLE_TRANSACTION_LIMIT) {
- printk(KERN_ERR "SELinux: %s: context size (%u) exceeds "
+ pr_err("SELinux: %s: context size (%u) exceeds "
"payload max\n", __func__, len);
goto out;
}
@@ -1141,7 +1141,7 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
length = -ERANGE;
if (len > SIMPLE_TRANSACTION_LIMIT) {
- printk(KERN_ERR "SELinux: %s: context size (%u) exceeds "
+ pr_err("SELinux: %s: context size (%u) exceeds "
"payload max\n", __func__, len);
goto out;
}
@@ -1365,13 +1365,18 @@ static int sel_make_bools(struct selinux_fs_info *fsi)
ret = -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR);
- if (!inode)
+ if (!inode) {
+ dput(dentry);
goto out;
+ }
ret = -ENAMETOOLONG;
len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
- if (len >= PAGE_SIZE)
+ if (len >= PAGE_SIZE) {
+ dput(dentry);
+ iput(inode);
goto out;
+ }
isec = (struct inode_security_struct *)inode->i_security;
ret = security_genfs_sid(fsi->state, "selinuxfs", page,
@@ -1586,8 +1591,10 @@ static int sel_make_avc_files(struct dentry *dir)
return -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
- if (!inode)
+ if (!inode) {
+ dput(dentry);
return -ENOMEM;
+ }
inode->i_fop = files[i].ops;
inode->i_ino = ++fsi->last_ino;
@@ -1632,8 +1639,10 @@ static int sel_make_initcon_files(struct dentry *dir)
return -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
- if (!inode)
+ if (!inode) {
+ dput(dentry);
return -ENOMEM;
+ }
inode->i_fop = &sel_initcon_ops;
inode->i_ino = i|SEL_INITCON_INO_OFFSET;
@@ -1733,8 +1742,10 @@ static int sel_make_perm_files(char *objclass, int classvalue,
rc = -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
- if (!inode)
+ if (!inode) {
+ dput(dentry);
goto out;
+ }
inode->i_fop = &sel_perm_ops;
/* i+1 since perm values are 1-indexed */
@@ -1763,8 +1774,10 @@ static int sel_make_class_dir_entries(char *classname, int index,
return -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
- if (!inode)
+ if (!inode) {
+ dput(dentry);
return -ENOMEM;
+ }
inode->i_fop = &sel_class_ops;
inode->i_ino = sel_class_to_ino(index);
@@ -1838,8 +1851,10 @@ static int sel_make_policycap(struct selinux_fs_info *fsi)
return -ENOMEM;
inode = sel_make_inode(fsi->sb, S_IFREG | 0444);
- if (inode == NULL)
+ if (inode == NULL) {
+ dput(dentry);
return -ENOMEM;
+ }
inode->i_fop = &sel_policycap_ops;
inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET;
@@ -1932,8 +1947,10 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
ret = -ENOMEM;
inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO);
- if (!inode)
+ if (!inode) {
+ dput(dentry);
goto err;
+ }
inode->i_ino = ++fsi->last_ino;
isec = (struct inode_security_struct *)inode->i_security;
@@ -1984,7 +2001,7 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
goto err;
return 0;
err:
- printk(KERN_ERR "SELinux: %s: failed while creating inodes\n",
+ pr_err("SELinux: %s: failed while creating inodes\n",
__func__);
selinux_fs_info_free(sb);
@@ -2034,7 +2051,7 @@ static int __init init_sel_fs(void)
selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type);
if (IS_ERR(selinuxfs_mount)) {
- printk(KERN_ERR "selinuxfs: could not mount!\n");
+ pr_err("selinuxfs: could not mount!\n");
err = PTR_ERR(selinuxfs_mount);
selinuxfs_mount = NULL;
}
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index a2c9148b0662..c0417cf17fee 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -338,7 +338,7 @@ int avtab_alloc(struct avtab *h, u32 nrules)
h->nel = 0;
h->nslot = nslot;
h->mask = mask;
- printk(KERN_DEBUG "SELinux: %d avtab hash slots, %d rules.\n",
+ pr_debug("SELinux: %d avtab hash slots, %d rules.\n",
h->nslot, nrules);
return 0;
}
@@ -368,7 +368,7 @@ void avtab_hash_eval(struct avtab *h, char *tag)
}
}
- printk(KERN_DEBUG "SELinux: %s: %d entries and %d/%d buckets used, "
+ pr_debug("SELinux: %s: %d entries and %d/%d buckets used, "
"longest chain length %d sum of chain length^2 %llu\n",
tag, h->nel, slots_used, h->nslot, max_chain_len,
chain2_len_sum);
@@ -407,18 +407,18 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
if (vers < POLICYDB_VERSION_AVTAB) {
rc = next_entry(buf32, fp, sizeof(u32));
if (rc) {
- printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ pr_err("SELinux: avtab: truncated entry\n");
return rc;
}
items2 = le32_to_cpu(buf32[0]);
if (items2 > ARRAY_SIZE(buf32)) {
- printk(KERN_ERR "SELinux: avtab: entry overflow\n");
+ pr_err("SELinux: avtab: entry overflow\n");
return -EINVAL;
}
rc = next_entry(buf32, fp, sizeof(u32)*items2);
if (rc) {
- printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ pr_err("SELinux: avtab: truncated entry\n");
return rc;
}
items = 0;
@@ -426,19 +426,19 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
val = le32_to_cpu(buf32[items++]);
key.source_type = (u16)val;
if (key.source_type != val) {
- printk(KERN_ERR "SELinux: avtab: truncated source type\n");
+ pr_err("SELinux: avtab: truncated source type\n");
return -EINVAL;
}
val = le32_to_cpu(buf32[items++]);
key.target_type = (u16)val;
if (key.target_type != val) {
- printk(KERN_ERR "SELinux: avtab: truncated target type\n");
+ pr_err("SELinux: avtab: truncated target type\n");
return -EINVAL;
}
val = le32_to_cpu(buf32[items++]);
key.target_class = (u16)val;
if (key.target_class != val) {
- printk(KERN_ERR "SELinux: avtab: truncated target class\n");
+ pr_err("SELinux: avtab: truncated target class\n");
return -EINVAL;
}
@@ -446,16 +446,16 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
enabled = (val & AVTAB_ENABLED_OLD) ? AVTAB_ENABLED : 0;
if (!(val & (AVTAB_AV | AVTAB_TYPE))) {
- printk(KERN_ERR "SELinux: avtab: null entry\n");
+ pr_err("SELinux: avtab: null entry\n");
return -EINVAL;
}
if ((val & AVTAB_AV) &&
(val & AVTAB_TYPE)) {
- printk(KERN_ERR "SELinux: avtab: entry has both access vectors and types\n");
+ pr_err("SELinux: avtab: entry has both access vectors and types\n");
return -EINVAL;
}
if (val & AVTAB_XPERMS) {
- printk(KERN_ERR "SELinux: avtab: entry has extended permissions\n");
+ pr_err("SELinux: avtab: entry has extended permissions\n");
return -EINVAL;
}
@@ -470,7 +470,8 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
}
if (items != items2) {
- printk(KERN_ERR "SELinux: avtab: entry only had %d items, expected %d\n", items2, items);
+ pr_err("SELinux: avtab: entry only had %d items, expected %d\n",
+ items2, items);
return -EINVAL;
}
return 0;
@@ -478,7 +479,7 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
rc = next_entry(buf16, fp, sizeof(u16)*4);
if (rc) {
- printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ pr_err("SELinux: avtab: truncated entry\n");
return rc;
}
@@ -491,7 +492,7 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
if (!policydb_type_isvalid(pol, key.source_type) ||
!policydb_type_isvalid(pol, key.target_type) ||
!policydb_class_isvalid(pol, key.target_class)) {
- printk(KERN_ERR "SELinux: avtab: invalid type or class\n");
+ pr_err("SELinux: avtab: invalid type or class\n");
return -EINVAL;
}
@@ -501,13 +502,13 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
set++;
}
if (!set || set > 1) {
- printk(KERN_ERR "SELinux: avtab: more than one specifier\n");
+ pr_err("SELinux: avtab: more than one specifier\n");
return -EINVAL;
}
if ((vers < POLICYDB_VERSION_XPERMS_IOCTL) &&
(key.specified & AVTAB_XPERMS)) {
- printk(KERN_ERR "SELinux: avtab: policy version %u does not "
+ pr_err("SELinux: avtab: policy version %u does not "
"support extended permissions rules and one "
"was specified\n", vers);
return -EINVAL;
@@ -515,17 +516,17 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
memset(&xperms, 0, sizeof(struct avtab_extended_perms));
rc = next_entry(&xperms.specified, fp, sizeof(u8));
if (rc) {
- printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ pr_err("SELinux: avtab: truncated entry\n");
return rc;
}
rc = next_entry(&xperms.driver, fp, sizeof(u8));
if (rc) {
- printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ pr_err("SELinux: avtab: truncated entry\n");
return rc;
}
rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(xperms.perms.p));
if (rc) {
- printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ pr_err("SELinux: avtab: truncated entry\n");
return rc;
}
for (i = 0; i < ARRAY_SIZE(xperms.perms.p); i++)
@@ -534,14 +535,14 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
} else {
rc = next_entry(buf32, fp, sizeof(u32));
if (rc) {
- printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+ pr_err("SELinux: avtab: truncated entry\n");
return rc;
}
datum.u.data = le32_to_cpu(*buf32);
}
if ((key.specified & AVTAB_TYPE) &&
!policydb_type_isvalid(pol, datum.u.data)) {
- printk(KERN_ERR "SELinux: avtab: invalid type\n");
+ pr_err("SELinux: avtab: invalid type\n");
return -EINVAL;
}
return insertf(a, &key, &datum, p);
@@ -562,12 +563,12 @@ int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
rc = next_entry(buf, fp, sizeof(u32));
if (rc < 0) {
- printk(KERN_ERR "SELinux: avtab: truncated table\n");
+ pr_err("SELinux: avtab: truncated table\n");
goto bad;
}
nel = le32_to_cpu(buf[0]);
if (!nel) {
- printk(KERN_ERR "SELinux: avtab: table is empty\n");
+ pr_err("SELinux: avtab: table is empty\n");
rc = -EINVAL;
goto bad;
}
@@ -580,9 +581,9 @@ int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL);
if (rc) {
if (rc == -ENOMEM)
- printk(KERN_ERR "SELinux: avtab: out of memory\n");
+ pr_err("SELinux: avtab: out of memory\n");
else if (rc == -EEXIST)
- printk(KERN_ERR "SELinux: avtab: duplicate entry\n");
+ pr_err("SELinux: avtab: duplicate entry\n");
goto bad;
}
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index c91543a617ac..f49e522e932d 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -96,7 +96,7 @@ int evaluate_cond_node(struct policydb *p, struct cond_node *node)
if (new_state != node->cur_state) {
node->cur_state = new_state;
if (new_state == -1)
- printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n");
+ pr_err("SELinux: expression result was undefined - disabling all rules.\n");
/* turn the rules on or off */
for (cur = node->true_list; cur; cur = cur->next) {
if (new_state <= 0)
@@ -287,7 +287,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
*/
if (k->specified & AVTAB_TYPE) {
if (avtab_search(&p->te_avtab, k)) {
- printk(KERN_ERR "SELinux: type rule already exists outside of a conditional.\n");
+ pr_err("SELinux: type rule already exists outside of a conditional.\n");
goto err;
}
/*
@@ -302,7 +302,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
node_ptr = avtab_search_node(&p->te_cond_avtab, k);
if (node_ptr) {
if (avtab_search_node_next(node_ptr, k->specified)) {
- printk(KERN_ERR "SELinux: too many conflicting type rules.\n");
+ pr_err("SELinux: too many conflicting type rules.\n");
goto err;
}
found = 0;
@@ -313,13 +313,13 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
}
}
if (!found) {
- printk(KERN_ERR "SELinux: conflicting type rules.\n");
+ pr_err("SELinux: conflicting type rules.\n");
goto err;
}
}
} else {
if (avtab_search(&p->te_cond_avtab, k)) {
- printk(KERN_ERR "SELinux: conflicting type rules when adding type rule for true.\n");
+ pr_err("SELinux: conflicting type rules when adding type rule for true.\n");
goto err;
}
}
@@ -327,7 +327,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
if (!node_ptr) {
- printk(KERN_ERR "SELinux: could not insert rule.\n");
+ pr_err("SELinux: could not insert rule.\n");
rc = -ENOMEM;
goto err;
}
@@ -387,12 +387,12 @@ static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list *
static int expr_isvalid(struct policydb *p, struct cond_expr *expr)
{
if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) {
- printk(KERN_ERR "SELinux: conditional expressions uses unknown operator.\n");
+ pr_err("SELinux: conditional expressions uses unknown operator.\n");
return 0;
}
if (expr->bool > p->p_bools.nprim) {
- printk(KERN_ERR "SELinux: conditional expressions uses unknown bool.\n");
+ pr_err("SELinux: conditional expressions uses unknown bool.\n");
return 0;
}
return 1;
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 5ae8c61b75bf..8f624f80055b 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -362,7 +362,7 @@ int ebitmap_read(struct ebitmap *e, void *fp)
count = le32_to_cpu(buf[2]);
if (mapunit != BITS_PER_U64) {
- printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
+ pr_err("SELinux: ebitmap: map size %u does not "
"match my size %zd (high bit was %d)\n",
mapunit, BITS_PER_U64, e->highbit);
goto bad;
@@ -383,19 +383,19 @@ int ebitmap_read(struct ebitmap *e, void *fp)
for (i = 0; i < count; i++) {
rc = next_entry(&startbit, fp, sizeof(u32));
if (rc < 0) {
- printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
+ pr_err("SELinux: ebitmap: truncated map\n");
goto bad;
}
startbit = le32_to_cpu(startbit);
if (startbit & (mapunit - 1)) {
- printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
+ pr_err("SELinux: ebitmap start bit (%d) is "
"not a multiple of the map unit size (%u)\n",
startbit, mapunit);
goto bad;
}
if (startbit > e->highbit - mapunit) {
- printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
+ pr_err("SELinux: ebitmap start bit (%d) is "
"beyond the end of the bitmap (%u)\n",
startbit, (e->highbit - mapunit));
goto bad;
@@ -405,8 +405,7 @@ int ebitmap_read(struct ebitmap *e, void *fp)
struct ebitmap_node *tmp;
tmp = kmem_cache_zalloc(ebitmap_node_cachep, GFP_KERNEL);
if (!tmp) {
- printk(KERN_ERR
- "SELinux: ebitmap: out of memory\n");
+ pr_err("SELinux: ebitmap: out of memory\n");
rc = -ENOMEM;
goto bad;
}
@@ -418,7 +417,7 @@ int ebitmap_read(struct ebitmap *e, void *fp)
e->node = tmp;
n = tmp;
} else if (startbit <= n->startbit) {
- printk(KERN_ERR "SELinux: ebitmap: start bit %d"
+ pr_err("SELinux: ebitmap: start bit %d"
" comes after start bit %d\n",
startbit, n->startbit);
goto bad;
@@ -426,7 +425,7 @@ int ebitmap_read(struct ebitmap *e, void *fp)
rc = next_entry(&map, fp, sizeof(u64));
if (rc < 0) {
- printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
+ pr_err("SELinux: ebitmap: truncated map\n");
goto bad;
}
map = le64_to_cpu(map);
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 6e8c8056d7ad..e9394e7adc84 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -504,7 +504,7 @@ static void hash_eval(struct hashtab *h, const char *hash_name)
struct hashtab_info info;
hashtab_stat(h, &info);
- printk(KERN_DEBUG "SELinux: %s: %d entries and %d/%d buckets used, "
+ pr_debug("SELinux: %s: %d entries and %d/%d buckets used, "
"longest chain length %d\n", hash_name, h->nel,
info.slots_used, h->size, info.max_chain_len);
}
@@ -533,15 +533,17 @@ static int policydb_index(struct policydb *p)
{
int i, rc;
- printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools",
- p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
if (p->mls_enabled)
- printk(KERN_CONT ", %d sens, %d cats", p->p_levels.nprim,
- p->p_cats.nprim);
- printk(KERN_CONT "\n");
+ pr_debug("SELinux: %d users, %d roles, %d types, %d bools, %d sens, %d cats\n",
+ p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim,
+ p->p_bools.nprim, p->p_levels.nprim, p->p_cats.nprim);
+ else
+ pr_debug("SELinux: %d users, %d roles, %d types, %d bools\n",
+ p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim,
+ p->p_bools.nprim);
- printk(KERN_DEBUG "SELinux: %d classes, %d rules\n",
- p->p_classes.nprim, p->te_avtab.nel);
+ pr_debug("SELinux: %d classes, %d rules\n",
+ p->p_classes.nprim, p->te_avtab.nel);
#ifdef DEBUG_HASHES
avtab_hash_eval(&p->te_avtab, "rules");
@@ -897,7 +899,7 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
rc = sidtab_init(s);
if (rc) {
- printk(KERN_ERR "SELinux: out of memory on SID table init\n");
+ pr_err("SELinux: out of memory on SID table init\n");
goto out;
}
@@ -905,14 +907,14 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
for (c = head; c; c = c->next) {
rc = -EINVAL;
if (!c->context[0].user) {
- printk(KERN_ERR "SELinux: SID %s was never defined.\n",
+ pr_err("SELinux: SID %s was never defined.\n",
c->u.name);
goto out;
}
rc = sidtab_insert(s, c->sid[0], &c->context[0]);
if (rc) {
- printk(KERN_ERR "SELinux: unable to load initial SID %s.\n",
+ pr_err("SELinux: unable to load initial SID %s.\n",
c->u.name);
goto out;
}
@@ -1005,13 +1007,13 @@ static int mls_read_range_helper(struct mls_range *r, void *fp)
rc = -EINVAL;
items = le32_to_cpu(buf[0]);
if (items > ARRAY_SIZE(buf)) {
- printk(KERN_ERR "SELinux: mls: range overflow\n");
+ pr_err("SELinux: mls: range overflow\n");
goto out;
}
rc = next_entry(buf, fp, sizeof(u32) * items);
if (rc) {
- printk(KERN_ERR "SELinux: mls: truncated range\n");
+ pr_err("SELinux: mls: truncated range\n");
goto out;
}
@@ -1023,19 +1025,19 @@ static int mls_read_range_helper(struct mls_range *r, void *fp)
rc = ebitmap_read(&r->level[0].cat, fp);
if (rc) {
- printk(KERN_ERR "SELinux: mls: error reading low categories\n");
+ pr_err("SELinux: mls: error reading low categories\n");
goto out;
}
if (items > 1) {
rc = ebitmap_read(&r->level[1].cat, fp);
if (rc) {
- printk(KERN_ERR "SELinux: mls: error reading high categories\n");
+ pr_err("SELinux: mls: error reading high categories\n");
goto bad_high;
}
} else {
rc = ebitmap_cpy(&r->level[1].cat, &r->level[0].cat);
if (rc) {
- printk(KERN_ERR "SELinux: mls: out of memory\n");
+ pr_err("SELinux: mls: out of memory\n");
goto bad_high;
}
}
@@ -1060,7 +1062,7 @@ static int context_read_and_validate(struct context *c,
rc = next_entry(buf, fp, sizeof buf);
if (rc) {
- printk(KERN_ERR "SELinux: context truncated\n");
+ pr_err("SELinux: context truncated\n");
goto out;
}
c->user = le32_to_cpu(buf[0]);
@@ -1069,14 +1071,14 @@ static int context_read_and_validate(struct context *c,
if (p->policyvers >= POLICYDB_VERSION_MLS) {
rc = mls_read_range_helper(&c->range, fp);
if (rc) {
- printk(KERN_ERR "SELinux: error reading MLS range of context\n");
+ pr_err("SELinux: error reading MLS range of context\n");
goto out;
}
}
rc = -EINVAL;
if (!policydb_context_isvalid(p, c)) {
- printk(KERN_ERR "SELinux: invalid security context\n");
+ pr_err("SELinux: invalid security context\n");
context_destroy(c);
goto out;
}
@@ -1352,7 +1354,8 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
rc = -EINVAL;
cladatum->comdatum = hashtab_search(p->p_commons.table, cladatum->comkey);
if (!cladatum->comdatum) {
- printk(KERN_ERR "SELinux: unknown common %s\n", cladatum->comkey);
+ pr_err("SELinux: unknown common %s\n",
+ cladatum->comkey);
goto bad;
}
}
@@ -1444,7 +1447,7 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp)
if (strcmp(key, OBJECT_R) == 0) {
rc = -EINVAL;
if (role->value != OBJECT_R_VAL) {
- printk(KERN_ERR "SELinux: Role %s has wrong value %d\n",
+ pr_err("SELinux: Role %s has wrong value %d\n",
OBJECT_R, role->value);
goto bad;
}
@@ -1522,14 +1525,14 @@ static int mls_read_level(struct mls_level *lp, void *fp)
rc = next_entry(buf, fp, sizeof buf);
if (rc) {
- printk(KERN_ERR "SELinux: mls: truncated level\n");
+ pr_err("SELinux: mls: truncated level\n");
return rc;
}
lp->sens = le32_to_cpu(buf[0]);
rc = ebitmap_read(&lp->cat, fp);
if (rc) {
- printk(KERN_ERR "SELinux: mls: error reading level categories\n");
+ pr_err("SELinux: mls: error reading level categories\n");
return rc;
}
return 0;
@@ -1683,7 +1686,7 @@ static int user_bounds_sanity_check(void *key, void *datum, void *datap)
unsigned long bit;
if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
- printk(KERN_ERR "SELinux: user %s: "
+ pr_err("SELinux: user %s: "
"too deep or looped boundary",
(char *) key);
return -EINVAL;
@@ -1694,8 +1697,7 @@ static int user_bounds_sanity_check(void *key, void *datum, void *datap)
if (ebitmap_get_bit(&upper->roles, bit))
continue;
- printk(KERN_ERR
- "SELinux: boundary violated policy: "
+ pr_err("SELinux: boundary violated policy: "
"user=%s role=%s bounds=%s\n",
sym_name(p, SYM_USERS, user->value - 1),
sym_name(p, SYM_ROLES, bit),
@@ -1720,7 +1722,7 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap)
unsigned long bit;
if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
- printk(KERN_ERR "SELinux: role %s: "
+ pr_err("SELinux: role %s: "
"too deep or looped bounds\n",
(char *) key);
return -EINVAL;
@@ -1731,8 +1733,7 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap)
if (ebitmap_get_bit(&upper->types, bit))
continue;
- printk(KERN_ERR
- "SELinux: boundary violated policy: "
+ pr_err("SELinux: boundary violated policy: "
"role=%s type=%s bounds=%s\n",
sym_name(p, SYM_ROLES, role->value - 1),
sym_name(p, SYM_TYPES, bit),
@@ -1754,7 +1755,7 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap)
upper = datum;
while (upper->bounds) {
if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
- printk(KERN_ERR "SELinux: type %s: "
+ pr_err("SELinux: type %s: "
"too deep or looped boundary\n",
(char *) key);
return -EINVAL;
@@ -1765,7 +1766,7 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap)
BUG_ON(!upper);
if (upper->attribute) {
- printk(KERN_ERR "SELinux: type %s: "
+ pr_err("SELinux: type %s: "
"bounded by attribute %s",
(char *) key,
sym_name(p, SYM_TYPES, upper->value - 1));
@@ -1888,7 +1889,7 @@ static int range_read(struct policydb *p, void *fp)
rc = -EINVAL;
if (!mls_range_isvalid(p, r)) {
- printk(KERN_WARNING "SELinux: rangetrans: invalid range\n");
+ pr_warn("SELinux: rangetrans: invalid range\n");
goto out;
}
@@ -2023,7 +2024,7 @@ static int genfs_read(struct policydb *p, void *fp)
genfs_p = genfs, genfs = genfs->next) {
rc = -EINVAL;
if (strcmp(newgenfs->fstype, genfs->fstype) == 0) {
- printk(KERN_ERR "SELinux: dup genfs fstype %s\n",
+ pr_err("SELinux: dup genfs fstype %s\n",
newgenfs->fstype);
goto out;
}
@@ -2073,7 +2074,7 @@ static int genfs_read(struct policydb *p, void *fp)
if (!strcmp(newc->u.name, c->u.name) &&
(!c->v.sclass || !newc->v.sclass ||
newc->v.sclass == c->v.sclass)) {
- printk(KERN_ERR "SELinux: dup genfs entry (%s,%s)\n",
+ pr_err("SELinux: dup genfs entry (%s,%s)\n",
genfs->fstype, c->u.name);
goto out;
}
@@ -2295,7 +2296,7 @@ int policydb_read(struct policydb *p, void *fp)
rc = -EINVAL;
if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) {
- printk(KERN_ERR "SELinux: policydb magic number 0x%x does "
+ pr_err("SELinux: policydb magic number 0x%x does "
"not match expected magic number 0x%x\n",
le32_to_cpu(buf[0]), POLICYDB_MAGIC);
goto bad;
@@ -2304,7 +2305,7 @@ int policydb_read(struct policydb *p, void *fp)
rc = -EINVAL;
len = le32_to_cpu(buf[1]);
if (len != strlen(POLICYDB_STRING)) {
- printk(KERN_ERR "SELinux: policydb string length %d does not "
+ pr_err("SELinux: policydb string length %d does not "
"match expected length %zu\n",
len, strlen(POLICYDB_STRING));
goto bad;
@@ -2313,14 +2314,14 @@ int policydb_read(struct policydb *p, void *fp)
rc = -ENOMEM;
policydb_str = kmalloc(len + 1, GFP_KERNEL);
if (!policydb_str) {
- printk(KERN_ERR "SELinux: unable to allocate memory for policydb "
+ pr_err("SELinux: unable to allocate memory for policydb "
"string of length %d\n", len);
goto bad;
}
rc = next_entry(policydb_str, fp, len);
if (rc) {
- printk(KERN_ERR "SELinux: truncated policydb string identifier\n");
+ pr_err("SELinux: truncated policydb string identifier\n");
kfree(policydb_str);
goto bad;
}
@@ -2328,7 +2329,7 @@ int policydb_read(struct policydb *p, void *fp)
rc = -EINVAL;
policydb_str[len] = '\0';
if (strcmp(policydb_str, POLICYDB_STRING)) {
- printk(KERN_ERR "SELinux: policydb string %s does not match "
+ pr_err("SELinux: policydb string %s does not match "
"my string %s\n", policydb_str, POLICYDB_STRING);
kfree(policydb_str);
goto bad;
@@ -2346,7 +2347,7 @@ int policydb_read(struct policydb *p, void *fp)
p->policyvers = le32_to_cpu(buf[0]);
if (p->policyvers < POLICYDB_VERSION_MIN ||
p->policyvers > POLICYDB_VERSION_MAX) {
- printk(KERN_ERR "SELinux: policydb version %d does not match "
+ pr_err("SELinux: policydb version %d does not match "
"my version range %d-%d\n",
le32_to_cpu(buf[0]), POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX);
goto bad;
@@ -2357,7 +2358,7 @@ int policydb_read(struct policydb *p, void *fp)
rc = -EINVAL;
if (p->policyvers < POLICYDB_VERSION_MLS) {
- printk(KERN_ERR "SELinux: security policydb version %d "
+ pr_err("SELinux: security policydb version %d "
"(MLS) not backwards compatible\n",
p->policyvers);
goto bad;
@@ -2381,7 +2382,7 @@ int policydb_read(struct policydb *p, void *fp)
rc = -EINVAL;
info = policydb_lookup_compat(p->policyvers);
if (!info) {
- printk(KERN_ERR "SELinux: unable to find policy compat info "
+ pr_err("SELinux: unable to find policy compat info "
"for version %d\n", p->policyvers);
goto bad;
}
@@ -2389,7 +2390,7 @@ int policydb_read(struct policydb *p, void *fp)
rc = -EINVAL;
if (le32_to_cpu(buf[2]) != info->sym_num ||
le32_to_cpu(buf[3]) != info->ocon_num) {
- printk(KERN_ERR "SELinux: policydb table sizes (%d,%d) do "
+ pr_err("SELinux: policydb table sizes (%d,%d) do "
"not match mine (%d,%d)\n", le32_to_cpu(buf[2]),
le32_to_cpu(buf[3]),
info->sym_num, info->ocon_num);
@@ -3417,7 +3418,7 @@ int policydb_write(struct policydb *p, void *fp)
* careful if you ever try to remove this restriction
*/
if (p->policyvers < POLICYDB_VERSION_AVTAB) {
- printk(KERN_ERR "SELinux: refusing to write policy version %d."
+ pr_err("SELinux: refusing to write policy version %d."
" Because it is less than version %d\n", p->policyvers,
POLICYDB_VERSION_AVTAB);
return -EINVAL;
@@ -3446,7 +3447,7 @@ int policydb_write(struct policydb *p, void *fp)
/* Write the version, config, and table sizes. */
info = policydb_lookup_compat(p->policyvers);
if (!info) {
- printk(KERN_ERR "SELinux: compatibility lookup failed for policy "
+ pr_err("SELinux: compatibility lookup failed for policy "
"version %d", p->policyvers);
return -EINVAL;
}
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index dd2ceec06fef..f3def298a90e 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -136,8 +136,7 @@ static int selinux_set_mapping(struct policydb *pol,
p_out->value = string_to_security_class(pol, p_in->name);
if (!p_out->value) {
- printk(KERN_INFO
- "SELinux: Class %s not defined in policy.\n",
+ pr_info("SELinux: Class %s not defined in policy.\n",
p_in->name);
if (pol->reject_unknown)
goto err;
@@ -156,8 +155,7 @@ static int selinux_set_mapping(struct policydb *pol,
p_out->perms[k] = string_to_av_perm(pol, p_out->value,
p_in->perms[k]);
if (!p_out->perms[k]) {
- printk(KERN_INFO
- "SELinux: Permission %s in class %s not defined in policy.\n",
+ pr_info("SELinux: Permission %s in class %s not defined in policy.\n",
p_in->perms[k], p_in->name);
if (pol->reject_unknown)
goto err;
@@ -170,7 +168,7 @@ static int selinux_set_mapping(struct policydb *pol,
}
if (print_unknown_handle)
- printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
+ pr_info("SELinux: the above unknown classes and permissions will be %s\n",
pol->allow_unknown ? "allowed" : "denied");
out_map->size = i;
@@ -644,7 +642,7 @@ static void context_struct_compute_av(struct policydb *policydb,
if (unlikely(!tclass || tclass > policydb->p_classes.nprim)) {
if (printk_ratelimit())
- printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
+ pr_warn("SELinux: Invalid class %hu\n", tclass);
return;
}
@@ -793,7 +791,7 @@ static int security_compute_validatetrans(struct selinux_state *state,
ocontext = sidtab_search(sidtab, oldsid);
if (!ocontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, oldsid);
rc = -EINVAL;
goto out;
@@ -801,7 +799,7 @@ static int security_compute_validatetrans(struct selinux_state *state,
ncontext = sidtab_search(sidtab, newsid);
if (!ncontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, newsid);
rc = -EINVAL;
goto out;
@@ -809,7 +807,7 @@ static int security_compute_validatetrans(struct selinux_state *state,
tcontext = sidtab_search(sidtab, tasksid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tasksid);
rc = -EINVAL;
goto out;
@@ -883,7 +881,7 @@ int security_bounded_transition(struct selinux_state *state,
rc = -EINVAL;
old_context = sidtab_search(sidtab, old_sid);
if (!old_context) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
+ pr_err("SELinux: %s: unrecognized SID %u\n",
__func__, old_sid);
goto out;
}
@@ -891,7 +889,7 @@ int security_bounded_transition(struct selinux_state *state,
rc = -EINVAL;
new_context = sidtab_search(sidtab, new_sid);
if (!new_context) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
+ pr_err("SELinux: %s: unrecognized SID %u\n",
__func__, new_sid);
goto out;
}
@@ -1040,14 +1038,14 @@ void security_compute_xperms_decision(struct selinux_state *state,
scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
@@ -1129,7 +1127,7 @@ void security_compute_av(struct selinux_state *state,
scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
@@ -1140,7 +1138,7 @@ void security_compute_av(struct selinux_state *state,
tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
@@ -1183,7 +1181,7 @@ void security_compute_av_user(struct selinux_state *state,
scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
@@ -1194,7 +1192,7 @@ void security_compute_av_user(struct selinux_state *state,
tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
@@ -1310,7 +1308,7 @@ static int security_sid_to_context_core(struct selinux_state *state,
*scontext = scontextp;
goto out;
}
- printk(KERN_ERR "SELinux: %s: called before initial "
+ pr_err("SELinux: %s: called before initial "
"load_policy on unknown SID %d\n", __func__, sid);
rc = -EINVAL;
goto out;
@@ -1323,7 +1321,7 @@ static int security_sid_to_context_core(struct selinux_state *state,
else
context = sidtab_search(sidtab, sid);
if (!context) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, sid);
rc = -EINVAL;
goto out_unlock;
@@ -1678,14 +1676,14 @@ static int security_compute_sid(struct selinux_state *state,
scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
rc = -EINVAL;
goto out_unlock;
}
tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
rc = -EINVAL;
goto out_unlock;
@@ -1911,7 +1909,8 @@ static inline int convert_context_handle_invalid_context(
return -EINVAL;
if (!context_struct_to_string(policydb, context, &s, &len)) {
- printk(KERN_WARNING "SELinux: Context %s would be invalid if enforcing\n", s);
+ pr_warn("SELinux: Context %s would be invalid if enforcing\n",
+ s);
kfree(s);
}
return 0;
@@ -1962,7 +1961,7 @@ static int convert_context(u32 key,
c->len, &ctx, SECSID_NULL);
kfree(s);
if (!rc) {
- printk(KERN_INFO "SELinux: Context %s became valid (mapped).\n",
+ pr_info("SELinux: Context %s became valid (mapped).\n",
c->str);
/* Replace string with mapped representation. */
kfree(c->str);
@@ -1974,7 +1973,7 @@ static int convert_context(u32 key,
goto out;
} else {
/* Other error condition, e.g. ENOMEM. */
- printk(KERN_ERR "SELinux: Unable to map context %s, rc = %d.\n",
+ pr_err("SELinux: Unable to map context %s, rc = %d.\n",
c->str, -rc);
goto out;
}
@@ -2033,7 +2032,7 @@ static int convert_context(u32 key,
oc = oc->next;
rc = -EINVAL;
if (!oc) {
- printk(KERN_ERR "SELinux: unable to look up"
+ pr_err("SELinux: unable to look up"
" the initial SIDs list\n");
goto bad;
}
@@ -2065,7 +2064,7 @@ bad:
context_destroy(c);
c->str = s;
c->len = len;
- printk(KERN_INFO "SELinux: Context %s became invalid (unmapped).\n",
+ pr_info("SELinux: Context %s became invalid (unmapped).\n",
c->str);
rc = 0;
goto out;
@@ -2170,13 +2169,13 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
newpolicydb->len = len;
/* If switching between different policy types, log MLS status */
if (policydb->mls_enabled && !newpolicydb->mls_enabled)
- printk(KERN_INFO "SELinux: Disabling MLS support...\n");
+ pr_info("SELinux: Disabling MLS support...\n");
else if (!policydb->mls_enabled && newpolicydb->mls_enabled)
- printk(KERN_INFO "SELinux: Enabling MLS support...\n");
+ pr_info("SELinux: Enabling MLS support...\n");
rc = policydb_load_isids(newpolicydb, &newsidtab);
if (rc) {
- printk(KERN_ERR "SELinux: unable to load the initial SIDs\n");
+ pr_err("SELinux: unable to load the initial SIDs\n");
policydb_destroy(newpolicydb);
goto out;
}
@@ -2187,7 +2186,7 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
rc = security_preserve_bools(state, newpolicydb);
if (rc) {
- printk(KERN_ERR "SELinux: unable to preserve booleans\n");
+ pr_err("SELinux: unable to preserve booleans\n");
goto err;
}
@@ -2207,7 +2206,7 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
args.newp = newpolicydb;
rc = sidtab_map(&newsidtab, convert_context, &args);
if (rc) {
- printk(KERN_ERR "SELinux: unable to convert the internal"
+ pr_err("SELinux: unable to convert the internal"
" representation of contexts in the new SID"
" table\n");
goto err;
@@ -2999,7 +2998,7 @@ int security_sid_mls_copy(struct selinux_state *state,
rc = -EINVAL;
context1 = sidtab_search(sidtab, sid);
if (!context1) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, sid);
goto out_unlock;
}
@@ -3007,7 +3006,7 @@ int security_sid_mls_copy(struct selinux_state *state,
rc = -EINVAL;
context2 = sidtab_search(sidtab, mls_sid);
if (!context2) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, mls_sid);
goto out_unlock;
}
@@ -3104,14 +3103,14 @@ int security_net_peersid_resolve(struct selinux_state *state,
rc = -EINVAL;
nlbl_ctx = sidtab_search(sidtab, nlbl_sid);
if (!nlbl_ctx) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, nlbl_sid);
goto out;
}
rc = -EINVAL;
xfrm_ctx = sidtab_search(sidtab, xfrm_sid);
if (!xfrm_ctx) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, xfrm_sid);
goto out;
}
@@ -3202,7 +3201,7 @@ int security_get_permissions(struct selinux_state *state,
rc = -EINVAL;
match = hashtab_search(policydb->p_classes.table, class);
if (!match) {
- printk(KERN_ERR "SELinux: %s: unrecognized class %s\n",
+ pr_err("SELinux: %s: unrecognized class %s\n",
__func__, class);
goto out;
}
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index 5be31b7af225..fd75a12fa8fc 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -214,8 +214,7 @@ int sidtab_context_to_sid(struct sidtab *s,
}
sid = s->next_sid++;
if (context->len)
- printk(KERN_INFO
- "SELinux: Context %s is not valid (left unmapped).\n",
+ pr_info("SELinux: Context %s is not valid (left unmapped).\n",
context->str);
ret = sidtab_insert(s, sid, context);
if (ret)
@@ -253,7 +252,7 @@ void sidtab_hash_eval(struct sidtab *h, char *tag)
}
}
- printk(KERN_DEBUG "%s: %d entries and %d/%d buckets used, longest "
+ pr_debug("%s: %d entries and %d/%d buckets used, longest "
"chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
max_chain_len);
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 19de675d4504..340fc30ad85d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -28,6 +28,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/dccp.h>
+#include <linux/icmpv6.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/pipe_fs_i.h>
@@ -1927,9 +1928,9 @@ static int smack_file_receive(struct file *file)
*
* Returns 0
*/
-static int smack_file_open(struct file *file, const struct cred *cred)
+static int smack_file_open(struct file *file)
{
- struct task_smack *tsp = cred->security;
+ struct task_smack *tsp = file->f_cred->security;
struct inode *inode = file_inode(file);
struct smk_audit_info ad;
int rc;
@@ -1937,7 +1938,7 @@ static int smack_file_open(struct file *file, const struct cred *cred)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad);
- rc = smk_bu_credfile(cred, file, MAY_READ, rc);
+ rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc);
return rc;
}
@@ -3896,6 +3897,7 @@ static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
sip->sin6_port = th->source;
break;
case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (uh != NULL)
sip->sin6_port = uh->source;
@@ -3924,15 +3926,19 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
struct smack_known *skp = NULL;
int rc = 0;
struct smk_audit_info ad;
+ u16 family = sk->sk_family;
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
#endif
#if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 sadd;
int proto;
+
+ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
#endif /* CONFIG_IPV6 */
- switch (sk->sk_family) {
+ switch (family) {
case PF_INET:
#ifdef CONFIG_SECURITY_SMACK_NETFILTER
/*
@@ -3950,7 +3956,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/
netlbl_secattr_init(&secattr);
- rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
+ rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0)
skp = smack_from_secattr(&secattr, ssp);
else
@@ -3963,7 +3969,7 @@ access_check:
#endif
#ifdef CONFIG_AUDIT
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
- ad.a.u.net->family = sk->sk_family;
+ ad.a.u.net->family = family;
ad.a.u.net->netif = skb->skb_iif;
ipv4_skb_to_auditdata(skb, &ad.a, NULL);
#endif
@@ -3977,12 +3983,13 @@ access_check:
rc = smk_bu_note("IPv4 delivery", skp, ssp->smk_in,
MAY_WRITE, rc);
if (rc != 0)
- netlbl_skbuff_err(skb, sk->sk_family, rc, 0);
+ netlbl_skbuff_err(skb, family, rc, 0);
break;
#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
proto = smk_skb_to_addr_ipv6(skb, &sadd);
- if (proto != IPPROTO_UDP && proto != IPPROTO_TCP)
+ if (proto != IPPROTO_UDP && proto != IPPROTO_UDPLITE &&
+ proto != IPPROTO_TCP && proto != IPPROTO_DCCP)
break;
#ifdef SMACK_IPV6_SECMARK_LABELING
if (skb && skb->secmark != 0)
@@ -3993,7 +4000,7 @@ access_check:
skp = smack_net_ambient;
#ifdef CONFIG_AUDIT
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
- ad.a.u.net->family = sk->sk_family;
+ ad.a.u.net->family = family;
ad.a.u.net->netif = skb->skb_iif;
ipv6_skb_to_auditdata(skb, &ad.a, NULL);
#endif /* CONFIG_AUDIT */
@@ -4004,6 +4011,9 @@ access_check:
#ifdef SMACK_IPV6_PORT_LABELING
rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING);
#endif /* SMACK_IPV6_PORT_LABELING */
+ if (rc != 0)
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH,
+ ICMPV6_ADM_PROHIBITED, 0);
break;
#endif /* CONFIG_IPV6 */
}
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index b7c6a7ffc058..cca5a3012fee 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -4,7 +4,7 @@ obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load
targets += builtin-policy.h
define do_policy
echo "static char tomoyo_builtin_$(1)[] __initdata ="; \
-$(objtree)/scripts/basic/bin2c <$(firstword $(wildcard $(obj)/policy/$(1).conf $(srctree)/$(src)/policy/$(1).conf.default) /dev/null); \
+$(objtree)/scripts/bin2c <$(firstword $(wildcard $(obj)/policy/$(1).conf $(srctree)/$(src)/policy/$(1).conf.default) /dev/null); \
echo ";"
endef
quiet_cmd_policy = POLICY $@
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 213b8c593668..9f932e2d6852 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -320,7 +320,7 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_file_open(struct file *f, const struct cred *cred)
+static int tomoyo_file_open(struct file *f)
{
int flags = f->f_flags;
/* Don't check read permission here if called from do_execve(). */
diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
index 31f858eceffc..7a0dfca03a57 100644
--- a/sound/ac97/bus.c
+++ b/sound/ac97/bus.c
@@ -13,6 +13,7 @@
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -68,6 +69,27 @@ ac97_codec_find(struct ac97_controller *ac97_ctrl, unsigned int codec_num)
return ac97_ctrl->codecs[codec_num];
}
+static struct device_node *
+ac97_of_get_child_device(struct ac97_controller *ac97_ctrl, int idx,
+ unsigned int vendor_id)
+{
+ struct device_node *node;
+ u32 reg;
+ char compat[] = "ac97,0000,0000";
+
+ snprintf(compat, sizeof(compat), "ac97,%04x,%04x",
+ vendor_id >> 16, vendor_id & 0xffff);
+
+ for_each_child_of_node(ac97_ctrl->parent->of_node, node) {
+ if ((idx != of_property_read_u32(node, "reg", &reg)) ||
+ !of_device_is_compatible(node, compat))
+ continue;
+ return of_node_get(node);
+ }
+
+ return NULL;
+}
+
static void ac97_codec_release(struct device *dev)
{
struct ac97_codec_device *adev;
@@ -76,6 +98,7 @@ static void ac97_codec_release(struct device *dev)
adev = to_ac97_device(dev);
ac97_ctrl = adev->ac97_ctrl;
ac97_ctrl->codecs[adev->num] = NULL;
+ of_node_put(dev->of_node);
kfree(adev);
}
@@ -98,6 +121,8 @@ static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx,
device_initialize(&codec->dev);
dev_set_name(&codec->dev, "%s:%u", dev_name(ac97_ctrl->parent), idx);
+ codec->dev.of_node = ac97_of_get_child_device(ac97_ctrl, idx,
+ vendor_id);
ret = device_add(&codec->dev);
if (ret)
@@ -105,6 +130,7 @@ static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx,
return 0;
err_free_codec:
+ of_node_put(codec->dev.of_node);
put_device(&codec->dev);
kfree(codec);
ac97_ctrl->codecs[idx] = NULL;
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index 71960089e207..65557421fe0b 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -88,8 +88,10 @@ static struct device_node *get_gpio(char *name,
}
reg = of_get_property(np, "reg", NULL);
- if (!reg)
+ if (!reg) {
+ of_node_put(np);
return NULL;
+ }
*gpioptr = *reg;
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 65171f6657a2..5fbd47a9177e 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -17,14 +17,9 @@ config SND_ARMAACI
select SND_PCM
select SND_AC97_CODEC
-config SND_PXA2XX_PCM
- tristate
- select SND_PCM
-
config SND_PXA2XX_AC97
tristate "AC97 driver for the Intel PXA2xx chip"
depends on ARCH_PXA
- select SND_PXA2XX_PCM
select SND_AC97_CODEC
select SND_PXA2XX_LIB
select SND_PXA2XX_LIB_AC97
diff --git a/sound/arm/Makefile b/sound/arm/Makefile
index e10d5b169565..34c769489877 100644
--- a/sound/arm/Makefile
+++ b/sound/arm/Makefile
@@ -6,9 +6,6 @@
obj-$(CONFIG_SND_ARMAACI) += snd-aaci.o
snd-aaci-objs := aaci.o
-obj-$(CONFIG_SND_PXA2XX_PCM) += snd-pxa2xx-pcm.o
-snd-pxa2xx-pcm-objs := pxa2xx-pcm.o
-
obj-$(CONFIG_SND_PXA2XX_LIB) += snd-pxa2xx-lib.o
snd-pxa2xx-lib-y := pxa2xx-pcm-lib.o
snd-pxa2xx-lib-$(CONFIG_SND_PXA2XX_LIB_AC97) += pxa2xx-ac97-lib.o
diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
index 5950a9e218d9..8eafd3d3dff6 100644
--- a/sound/arm/pxa2xx-ac97-lib.c
+++ b/sound/arm/pxa2xx-ac97-lib.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <sound/pxa2xx-lib.h>
@@ -337,6 +338,17 @@ int pxa2xx_ac97_hw_probe(struct platform_device *dev)
dev_err(&dev->dev, "Invalid reset GPIO %d\n",
pdata->reset_gpio);
}
+ } else if (!pdata && dev->dev.of_node) {
+ pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ pdata->reset_gpio = of_get_named_gpio(dev->dev.of_node,
+ "reset-gpios", 0);
+ if (pdata->reset_gpio == -ENOENT)
+ pdata->reset_gpio = -1;
+ else if (pdata->reset_gpio < 0)
+ return pdata->reset_gpio;
+ reset_gpio = pdata->reset_gpio;
} else {
if (cpu_is_pxa27x())
reset_gpio = 113;
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 4bc244c40f80..1f72672262d0 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
-#include <linux/dma/pxa-dma.h>
+#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -27,8 +27,6 @@
#include <mach/regs-ac97.h>
#include <mach/audio.h>
-#include "pxa2xx-pcm.h"
-
static void pxa2xx_ac97_legacy_reset(struct snd_ac97 *ac97)
{
if (!pxa2xx_ac97_try_cold_reset())
@@ -63,61 +61,46 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
.reset = pxa2xx_ac97_legacy_reset,
};
-static struct pxad_param pxa2xx_ac97_pcm_out_req = {
- .prio = PXAD_PRIO_LOWEST,
- .drcmr = 12,
-};
-
-static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_out = {
- .addr = __PREG(PCDR),
- .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- .maxburst = 32,
- .filter_data = &pxa2xx_ac97_pcm_out_req,
-};
-
-static struct pxad_param pxa2xx_ac97_pcm_in_req = {
- .prio = PXAD_PRIO_LOWEST,
- .drcmr = 11,
-};
-
-static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_in = {
- .addr = __PREG(PCDR),
- .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- .maxburst = 32,
- .filter_data = &pxa2xx_ac97_pcm_in_req,
-};
-
static struct snd_pcm *pxa2xx_ac97_pcm;
static struct snd_ac97 *pxa2xx_ac97_ac97;
-static int pxa2xx_ac97_pcm_startup(struct snd_pcm_substream *substream)
+static int pxa2xx_ac97_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
pxa2xx_audio_ops_t *platform_ops;
- int r;
+ int ret, i;
+
+ ret = pxa2xx_pcm_open(substream);
+ if (ret)
+ return ret;
runtime->hw.channels_min = 2;
runtime->hw.channels_max = 2;
- r = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- AC97_RATES_FRONT_DAC : AC97_RATES_ADC;
- runtime->hw.rates = pxa2xx_ac97_ac97->rates[r];
+ i = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ AC97_RATES_FRONT_DAC : AC97_RATES_ADC;
+ runtime->hw.rates = pxa2xx_ac97_ac97->rates[i];
snd_pcm_limit_hw_rates(runtime);
- platform_ops = substream->pcm->card->dev->platform_data;
- if (platform_ops && platform_ops->startup)
- return platform_ops->startup(substream, platform_ops->priv);
- else
- return 0;
+ platform_ops = substream->pcm->card->dev->platform_data;
+ if (platform_ops && platform_ops->startup) {
+ ret = platform_ops->startup(substream, platform_ops->priv);
+ if (ret < 0)
+ pxa2xx_pcm_close(substream);
+ }
+
+ return ret;
}
-static void pxa2xx_ac97_pcm_shutdown(struct snd_pcm_substream *substream)
+static int pxa2xx_ac97_pcm_close(struct snd_pcm_substream *substream)
{
pxa2xx_audio_ops_t *platform_ops;
- platform_ops = substream->pcm->card->dev->platform_data;
+ platform_ops = substream->pcm->card->dev->platform_data;
if (platform_ops && platform_ops->shutdown)
platform_ops->shutdown(substream, platform_ops->priv);
+
+ return 0;
}
static int pxa2xx_ac97_pcm_prepare(struct snd_pcm_substream *substream)
@@ -125,17 +108,15 @@ static int pxa2xx_ac97_pcm_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
int reg = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
AC97_PCM_FRONT_DAC_RATE : AC97_PCM_LR_ADC_RATE;
+ int ret;
+
+ ret = pxa2xx_pcm_prepare(substream);
+ if (ret < 0)
+ return ret;
+
return snd_ac97_set_rate(pxa2xx_ac97_ac97, reg, runtime->rate);
}
-static struct pxa2xx_pcm_client pxa2xx_ac97_pcm_client = {
- .playback_params = &pxa2xx_ac97_pcm_out,
- .capture_params = &pxa2xx_ac97_pcm_in,
- .startup = pxa2xx_ac97_pcm_startup,
- .shutdown = pxa2xx_ac97_pcm_shutdown,
- .prepare = pxa2xx_ac97_pcm_prepare,
-};
-
#ifdef CONFIG_PM_SLEEP
static int pxa2xx_ac97_do_suspend(struct snd_card *card)
@@ -193,6 +174,53 @@ static int pxa2xx_ac97_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pxa2xx_ac97_pm_ops, pxa2xx_ac97_suspend, pxa2xx_ac97_resume);
#endif
+static const struct snd_pcm_ops pxa2xx_ac97_pcm_ops = {
+ .open = pxa2xx_ac97_pcm_open,
+ .close = pxa2xx_ac97_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = pxa2xx_pcm_hw_params,
+ .hw_free = pxa2xx_pcm_hw_free,
+ .prepare = pxa2xx_ac97_pcm_prepare,
+ .trigger = pxa2xx_pcm_trigger,
+ .pointer = pxa2xx_pcm_pointer,
+ .mmap = pxa2xx_pcm_mmap,
+};
+
+
+static int pxa2xx_ac97_pcm_new(struct snd_card *card)
+{
+ struct snd_pcm *pcm;
+ int stream, ret;
+
+ ret = snd_pcm_new(card, "PXA2xx-PCM", 0, 1, 1, &pcm);
+ if (ret)
+ goto out;
+
+ pcm->private_free = pxa2xx_pcm_free_dma_buffers;
+
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
+
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ snd_pcm_set_ops(pcm, stream, &pxa2xx_ac97_pcm_ops);
+ ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream);
+ if (ret)
+ goto out;
+
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+ snd_pcm_set_ops(pcm, stream, &pxa2xx_ac97_pcm_ops);
+ ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream);
+ if (ret)
+ goto out;
+
+ pxa2xx_ac97_pcm = pcm;
+ ret = 0;
+
+ out:
+ return ret;
+}
+
static int pxa2xx_ac97_probe(struct platform_device *dev)
{
struct snd_card *card;
@@ -214,7 +242,7 @@ static int pxa2xx_ac97_probe(struct platform_device *dev)
strlcpy(card->driver, dev->dev.driver->name, sizeof(card->driver));
- ret = pxa2xx_pcm_new(card, &pxa2xx_ac97_pcm_client, &pxa2xx_ac97_pcm);
+ ret = pxa2xx_ac97_pcm_new(card);
if (ret)
goto err;
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index e8da3b8ee721..7931789d4a9f 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -16,8 +16,6 @@
#include <sound/pxa2xx-lib.h>
#include <sound/dmaengine_pcm.h>
-#include "pxa2xx-pcm.h"
-
static const struct snd_pcm_hardware pxa2xx_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
@@ -25,8 +23,8 @@ static const struct snd_pcm_hardware pxa2xx_pcm_hardware = {
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_RESUME,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_S32_LE,
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
.period_bytes_min = 32,
.period_bytes_max = 8192 - 32,
.periods_min = 1,
@@ -35,8 +33,8 @@ static const struct snd_pcm_hardware pxa2xx_pcm_hardware = {
.fifo_size = 32,
};
-int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -64,14 +62,14 @@ int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-EXPORT_SYMBOL(__pxa2xx_pcm_hw_params);
+EXPORT_SYMBOL(pxa2xx_pcm_hw_params);
-int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream)
+int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
}
-EXPORT_SYMBOL(__pxa2xx_pcm_hw_free);
+EXPORT_SYMBOL(pxa2xx_pcm_hw_free);
int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
@@ -86,13 +84,13 @@ pxa2xx_pcm_pointer(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL(pxa2xx_pcm_pointer);
-int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
+int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
{
return 0;
}
-EXPORT_SYMBOL(__pxa2xx_pcm_prepare);
+EXPORT_SYMBOL(pxa2xx_pcm_prepare);
-int __pxa2xx_pcm_open(struct snd_pcm_substream *substream)
+int pxa2xx_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -125,17 +123,17 @@ int __pxa2xx_pcm_open(struct snd_pcm_substream *substream)
if (ret < 0)
return ret;
- return snd_dmaengine_pcm_open_request_chan(substream,
- pxad_filter_fn,
- dma_params->filter_data);
+ return snd_dmaengine_pcm_open(
+ substream, dma_request_slave_channel(rtd->cpu_dai->dev,
+ dma_params->chan_name));
}
-EXPORT_SYMBOL(__pxa2xx_pcm_open);
+EXPORT_SYMBOL(pxa2xx_pcm_open);
-int __pxa2xx_pcm_close(struct snd_pcm_substream *substream)
+int pxa2xx_pcm_close(struct snd_pcm_substream *substream)
{
return snd_dmaengine_pcm_close_release_chan(substream);
}
-EXPORT_SYMBOL(__pxa2xx_pcm_close);
+EXPORT_SYMBOL(pxa2xx_pcm_close);
int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
@@ -181,6 +179,47 @@ void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
}
EXPORT_SYMBOL(pxa2xx_pcm_free_dma_buffers);
+int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+ ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret)
+ goto out;
+ }
+
+ if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+ ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE);
+ if (ret)
+ goto out;
+ }
+ out:
+ return ret;
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_new);
+
+const struct snd_pcm_ops pxa2xx_pcm_ops = {
+ .open = pxa2xx_pcm_open,
+ .close = pxa2xx_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = pxa2xx_pcm_hw_params,
+ .hw_free = pxa2xx_pcm_hw_free,
+ .prepare = pxa2xx_pcm_prepare,
+ .trigger = pxa2xx_pcm_trigger,
+ .pointer = pxa2xx_pcm_pointer,
+ .mmap = pxa2xx_pcm_mmap,
+};
+EXPORT_SYMBOL(pxa2xx_pcm_ops);
+
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("Intel PXA2xx sound library");
MODULE_LICENSE("GPL");
diff --git a/sound/arm/pxa2xx-pcm.c b/sound/arm/pxa2xx-pcm.c
deleted file mode 100644
index 1c6f4b436de3..000000000000
--- a/sound/arm/pxa2xx-pcm.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * linux/sound/arm/pxa2xx-pcm.c -- ALSA PCM interface for the Intel PXA2xx chip
- *
- * Author: Nicolas Pitre
- * Created: Nov 30, 2004
- * Copyright: (C) 2004 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-
-#include <mach/dma.h>
-
-#include <sound/core.h>
-#include <sound/pxa2xx-lib.h>
-#include <sound/dmaengine_pcm.h>
-
-#include "pxa2xx-pcm.h"
-
-static int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct pxa2xx_pcm_client *client = substream->private_data;
-
- __pxa2xx_pcm_prepare(substream);
-
- return client->prepare(substream);
-}
-
-static int pxa2xx_pcm_open(struct snd_pcm_substream *substream)
-{
- struct pxa2xx_pcm_client *client = substream->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct pxa2xx_runtime_data *rtd;
- int ret;
-
- ret = __pxa2xx_pcm_open(substream);
- if (ret)
- goto out;
-
- rtd = runtime->private_data;
-
- rtd->params = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- client->playback_params : client->capture_params;
-
- ret = client->startup(substream);
- if (!ret)
- goto err2;
-
- return 0;
-
- err2:
- __pxa2xx_pcm_close(substream);
- out:
- return ret;
-}
-
-static int pxa2xx_pcm_close(struct snd_pcm_substream *substream)
-{
- struct pxa2xx_pcm_client *client = substream->private_data;
-
- client->shutdown(substream);
-
- return __pxa2xx_pcm_close(substream);
-}
-
-static const struct snd_pcm_ops pxa2xx_pcm_ops = {
- .open = pxa2xx_pcm_open,
- .close = pxa2xx_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = __pxa2xx_pcm_hw_params,
- .hw_free = __pxa2xx_pcm_hw_free,
- .prepare = pxa2xx_pcm_prepare,
- .trigger = pxa2xx_pcm_trigger,
- .pointer = pxa2xx_pcm_pointer,
- .mmap = pxa2xx_pcm_mmap,
-};
-
-int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
- struct snd_pcm **rpcm)
-{
- struct snd_pcm *pcm;
- int play = client->playback_params ? 1 : 0;
- int capt = client->capture_params ? 1 : 0;
- int ret;
-
- ret = snd_pcm_new(card, "PXA2xx-PCM", 0, play, capt, &pcm);
- if (ret)
- goto out;
-
- pcm->private_data = client;
- pcm->private_free = pxa2xx_pcm_free_dma_buffers;
-
- ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
- if (ret)
- goto out;
-
- if (play) {
- int stream = SNDRV_PCM_STREAM_PLAYBACK;
- snd_pcm_set_ops(pcm, stream, &pxa2xx_pcm_ops);
- ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream);
- if (ret)
- goto out;
- }
- if (capt) {
- int stream = SNDRV_PCM_STREAM_CAPTURE;
- snd_pcm_set_ops(pcm, stream, &pxa2xx_pcm_ops);
- ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream);
- if (ret)
- goto out;
- }
-
- if (rpcm)
- *rpcm = pcm;
- ret = 0;
-
- out:
- return ret;
-}
-
-EXPORT_SYMBOL(pxa2xx_pcm_new);
-
-MODULE_AUTHOR("Nicolas Pitre");
-MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module");
-MODULE_LICENSE("GPL");
diff --git a/sound/arm/pxa2xx-pcm.h b/sound/arm/pxa2xx-pcm.h
deleted file mode 100644
index 8fa2b7c9e6b8..000000000000
--- a/sound/arm/pxa2xx-pcm.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * linux/sound/arm/pxa2xx-pcm.h -- ALSA PCM interface for the Intel PXA2xx chip
- *
- * Author: Nicolas Pitre
- * Created: Nov 30, 2004
- * Copyright: MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-struct pxa2xx_runtime_data {
- int dma_ch;
- struct snd_dmaengine_dai_dma_data *params;
-};
-
-struct pxa2xx_pcm_client {
- struct snd_dmaengine_dai_dma_data *playback_params;
- struct snd_dmaengine_dai_dma_data *capture_params;
- int (*startup)(struct snd_pcm_substream *);
- void (*shutdown)(struct snd_pcm_substream *);
- int (*prepare)(struct snd_pcm_substream *);
-};
-
-extern int pxa2xx_pcm_new(struct snd_card *, struct pxa2xx_pcm_client *, struct snd_pcm **);
-
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 4b01a37c836e..26b5e245b074 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -1160,18 +1160,6 @@ int snd_compress_deregister(struct snd_compr *device)
}
EXPORT_SYMBOL_GPL(snd_compress_deregister);
-static int __init snd_compress_init(void)
-{
- return 0;
-}
-
-static void __exit snd_compress_exit(void)
-{
-}
-
-module_init(snd_compress_init);
-module_exit(snd_compress_exit);
-
MODULE_DESCRIPTION("ALSA Compressed offload framework");
MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 7f89d3c79a4b..753d5fc4b284 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -242,16 +242,12 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
int err;
while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
- size_t aligned_size;
if (err != -ENOMEM)
return err;
if (size <= PAGE_SIZE)
return -ENOMEM;
- aligned_size = PAGE_SIZE << get_order(size);
- if (size != aligned_size)
- size = aligned_size;
- else
- size >>= 1;
+ size >>= 1;
+ size = PAGE_SIZE << get_order(size);
}
if (! dmab->area)
return -ENOMEM;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 905a53c1cde5..f8d4a419f3af 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1851,7 +1851,7 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
for (fmt = 0; fmt < 32; ++fmt) {
if (snd_mask_test(format_mask, fmt)) {
- int f = snd_pcm_oss_format_to(fmt);
+ int f = snd_pcm_oss_format_to((__force snd_pcm_format_t)fmt);
if (f >= 0)
formats |= f;
}
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 85a56af104bd..0391cb1a4f19 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -281,10 +281,10 @@ static int snd_pcm_plug_formats(const struct snd_mask *mask,
SNDRV_PCM_FMTBIT_U32_BE | SNDRV_PCM_FMTBIT_S32_BE);
snd_mask_set(&formats, (__force int)SNDRV_PCM_FORMAT_MU_LAW);
- if (formats.bits[0] & (u32)linfmts)
- formats.bits[0] |= (u32)linfmts;
- if (formats.bits[1] & (u32)(linfmts >> 32))
- formats.bits[1] |= (u32)(linfmts >> 32);
+ if (formats.bits[0] & lower_32_bits(linfmts))
+ formats.bits[0] |= lower_32_bits(linfmts);
+ if (formats.bits[1] & upper_32_bits(linfmts))
+ formats.bits[1] |= upper_32_bits(linfmts);
return snd_mask_test(&formats, (__force int)format);
}
@@ -353,6 +353,7 @@ snd_pcm_format_t snd_pcm_plug_slave_format(snd_pcm_format_t format,
if (snd_mask_test(format_mask, (__force int)format1))
return format1;
}
+ /* fall through */
default:
return (__force snd_pcm_format_t)-EINVAL;
}
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index c352bfb973cc..fdb9b92fc8d6 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -492,13 +492,8 @@ static void snd_pcm_xrun_injection_write(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_pcm_substream *substream = entry->private_data;
- struct snd_pcm_runtime *runtime;
- snd_pcm_stream_lock_irq(substream);
- runtime = substream->runtime;
- if (runtime && runtime->status->state == SNDRV_PCM_STATE_RUNNING)
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irq(substream);
+ snd_pcm_stop_xrun(substream);
}
static void snd_pcm_xrun_debug_read(struct snd_info_entry *entry,
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 44b5ae833082..4e6110d778bd 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -153,7 +153,8 @@ EXPORT_SYMBOL(snd_pcm_debug_name);
dump_stack(); \
} while (0)
-static void xrun(struct snd_pcm_substream *substream)
+/* call with stream lock held */
+void __snd_pcm_xrun(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -201,7 +202,7 @@ int snd_pcm_update_state(struct snd_pcm_substream *substream,
}
} else {
if (avail >= runtime->stop_threshold) {
- xrun(substream);
+ __snd_pcm_xrun(substream);
return -EPIPE;
}
}
@@ -297,7 +298,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
}
if (pos == SNDRV_PCM_POS_XRUN) {
- xrun(substream);
+ __snd_pcm_xrun(substream);
return -EPIPE;
}
if (pos >= runtime->buffer_size) {
@@ -626,27 +627,33 @@ EXPORT_SYMBOL(snd_interval_refine);
static int snd_interval_refine_first(struct snd_interval *i)
{
+ const unsigned int last_max = i->max;
+
if (snd_BUG_ON(snd_interval_empty(i)))
return -EINVAL;
if (snd_interval_single(i))
return 0;
i->max = i->min;
- i->openmax = i->openmin;
- if (i->openmax)
+ if (i->openmin)
i->max++;
+ /* only exclude max value if also excluded before refine */
+ i->openmax = (i->openmax && i->max >= last_max);
return 1;
}
static int snd_interval_refine_last(struct snd_interval *i)
{
+ const unsigned int last_min = i->min;
+
if (snd_BUG_ON(snd_interval_empty(i)))
return -EINVAL;
if (snd_interval_single(i))
return 0;
i->min = i->max;
- i->openmin = i->openmax;
- if (i->openmin)
+ if (i->openmax)
i->min--;
+ /* only exclude min value if also excluded before refine */
+ i->openmin = (i->openmin && i->min <= last_min);
return 1;
}
@@ -1832,12 +1839,19 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
if (runtime->no_period_wakeup)
wait_time = MAX_SCHEDULE_TIMEOUT;
else {
- wait_time = 10;
- if (runtime->rate) {
- long t = runtime->period_size * 2 / runtime->rate;
- wait_time = max(t, wait_time);
+ /* use wait time from substream if available */
+ if (substream->wait_time) {
+ wait_time = substream->wait_time;
+ } else {
+ wait_time = 10;
+
+ if (runtime->rate) {
+ long t = runtime->period_size * 2 /
+ runtime->rate;
+ wait_time = max(t, wait_time);
+ }
+ wait_time = msecs_to_jiffies(wait_time * 1000);
}
- wait_time = msecs_to_jiffies(wait_time * 1000);
}
for (;;) {
diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h
index 7a499d02df6c..c515612969a4 100644
--- a/sound/core/pcm_local.h
+++ b/sound/core/pcm_local.h
@@ -65,4 +65,6 @@ static inline void snd_pcm_timer_init(struct snd_pcm_substream *substream) {}
static inline void snd_pcm_timer_done(struct snd_pcm_substream *substream) {}
#endif
+void __snd_pcm_xrun(struct snd_pcm_substream *substream);
+
#endif /* __SOUND_CORE_PCM_LOCAL_H */
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index cecc79772c94..66c90f486af9 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1337,13 +1337,12 @@ int snd_pcm_drain_done(struct snd_pcm_substream *substream)
int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
{
unsigned long flags;
- int ret = 0;
snd_pcm_stream_lock_irqsave(substream, flags);
- if (snd_pcm_running(substream))
- ret = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ if (substream->runtime && snd_pcm_running(substream))
+ __snd_pcm_xrun(substream);
snd_pcm_stream_unlock_irqrestore(substream, flags);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
@@ -1591,7 +1590,8 @@ static int snd_pcm_xrun(struct snd_pcm_substream *substream)
result = 0; /* already there */
break;
case SNDRV_PCM_STATE_RUNNING:
- result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ __snd_pcm_xrun(substream);
+ result = 0;
break;
default:
result = -EBADFD;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index b53026a72e73..69517e18ef07 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/mm.h>
#include <sound/rawmidi.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -88,6 +89,7 @@ static inline unsigned short snd_rawmidi_file_flags(struct file *file)
static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
+
return runtime->avail >= runtime->avail_min;
}
@@ -95,6 +97,7 @@ static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substre
size_t count)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
+
return runtime->avail >= runtime->avail_min &&
(!substream->append || runtime->avail >= count);
}
@@ -103,6 +106,7 @@ static void snd_rawmidi_input_event_work(struct work_struct *work)
{
struct snd_rawmidi_runtime *runtime =
container_of(work, struct snd_rawmidi_runtime, event_work);
+
if (runtime->event)
runtime->event(runtime->substream);
}
@@ -111,7 +115,8 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime;
- if ((runtime = kzalloc(sizeof(*runtime), GFP_KERNEL)) == NULL)
+ runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
+ if (!runtime)
return -ENOMEM;
runtime->substream = substream;
spin_lock_init(&runtime->lock);
@@ -124,7 +129,8 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
runtime->avail = 0;
else
runtime->avail = runtime->buffer_size;
- if ((runtime->buffer = kmalloc(runtime->buffer_size, GFP_KERNEL)) == NULL) {
+ runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL);
+ if (!runtime->buffer) {
kfree(runtime);
return -ENOMEM;
}
@@ -137,13 +143,13 @@ static int snd_rawmidi_runtime_free(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
- kfree(runtime->buffer);
+ kvfree(runtime->buffer);
kfree(runtime);
substream->runtime = NULL;
return 0;
}
-static inline void snd_rawmidi_output_trigger(struct snd_rawmidi_substream *substream,int up)
+static inline void snd_rawmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
{
if (!substream->opened)
return;
@@ -159,17 +165,28 @@ static void snd_rawmidi_input_trigger(struct snd_rawmidi_substream *substream, i
cancel_work_sync(&substream->runtime->event_work);
}
-int snd_rawmidi_drop_output(struct snd_rawmidi_substream *substream)
+static void __reset_runtime_ptrs(struct snd_rawmidi_runtime *runtime,
+ bool is_input)
+{
+ runtime->drain = 0;
+ runtime->appl_ptr = runtime->hw_ptr = 0;
+ runtime->avail = is_input ? 0 : runtime->buffer_size;
+}
+
+static void reset_runtime_ptrs(struct snd_rawmidi_runtime *runtime,
+ bool is_input)
{
unsigned long flags;
- struct snd_rawmidi_runtime *runtime = substream->runtime;
- snd_rawmidi_output_trigger(substream, 0);
- runtime->drain = 0;
spin_lock_irqsave(&runtime->lock, flags);
- runtime->appl_ptr = runtime->hw_ptr = 0;
- runtime->avail = runtime->buffer_size;
+ __reset_runtime_ptrs(runtime, is_input);
spin_unlock_irqrestore(&runtime->lock, flags);
+}
+
+int snd_rawmidi_drop_output(struct snd_rawmidi_substream *substream)
+{
+ snd_rawmidi_output_trigger(substream, 0);
+ reset_runtime_ptrs(substream->runtime, false);
return 0;
}
EXPORT_SYMBOL(snd_rawmidi_drop_output);
@@ -208,15 +225,8 @@ EXPORT_SYMBOL(snd_rawmidi_drain_output);
int snd_rawmidi_drain_input(struct snd_rawmidi_substream *substream)
{
- unsigned long flags;
- struct snd_rawmidi_runtime *runtime = substream->runtime;
-
snd_rawmidi_input_trigger(substream, 0);
- runtime->drain = 0;
- spin_lock_irqsave(&runtime->lock, flags);
- runtime->appl_ptr = runtime->hw_ptr = 0;
- runtime->avail = 0;
- spin_unlock_irqrestore(&runtime->lock, flags);
+ reset_runtime_ptrs(substream->runtime, true);
return 0;
}
EXPORT_SYMBOL(snd_rawmidi_drain_input);
@@ -330,25 +340,23 @@ static int rawmidi_open_priv(struct snd_rawmidi *rmidi, int subdevice, int mode,
/* called from sound/core/seq/seq_midi.c */
int snd_rawmidi_kernel_open(struct snd_card *card, int device, int subdevice,
- int mode, struct snd_rawmidi_file * rfile)
+ int mode, struct snd_rawmidi_file *rfile)
{
struct snd_rawmidi *rmidi;
- int err;
+ int err = 0;
if (snd_BUG_ON(!rfile))
return -EINVAL;
mutex_lock(&register_mutex);
rmidi = snd_rawmidi_search(card, device);
- if (rmidi == NULL) {
- mutex_unlock(&register_mutex);
- return -ENODEV;
- }
- if (!try_module_get(rmidi->card->module)) {
- mutex_unlock(&register_mutex);
- return -ENXIO;
- }
+ if (!rmidi)
+ err = -ENODEV;
+ else if (!try_module_get(rmidi->card->module))
+ err = -ENXIO;
mutex_unlock(&register_mutex);
+ if (err < 0)
+ return err;
mutex_lock(&rmidi->open_mutex);
err = rawmidi_open_priv(rmidi, subdevice, mode, rfile);
@@ -370,7 +378,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
struct snd_rawmidi_file *rawmidi_file = NULL;
wait_queue_entry_t wait;
- if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK))
+ if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK))
return -EINVAL; /* invalid combination */
err = nonseekable_open(inode, file);
@@ -520,7 +528,7 @@ int snd_rawmidi_kernel_release(struct snd_rawmidi_file *rfile)
if (snd_BUG_ON(!rfile))
return -ENXIO;
-
+
rmidi = rfile->rmidi;
rawmidi_release_priv(rfile);
module_put(rmidi->card->module);
@@ -548,7 +556,7 @@ static int snd_rawmidi_info(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_info *info)
{
struct snd_rawmidi *rmidi;
-
+
if (substream == NULL)
return -ENODEV;
rmidi = substream->rmidi;
@@ -568,11 +576,13 @@ static int snd_rawmidi_info(struct snd_rawmidi_substream *substream,
}
static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
- struct snd_rawmidi_info __user * _info)
+ struct snd_rawmidi_info __user *_info)
{
struct snd_rawmidi_info info;
int err;
- if ((err = snd_rawmidi_info(substream, &info)) < 0)
+
+ err = snd_rawmidi_info(substream, &info);
+ if (err < 0)
return err;
if (copy_to_user(_info, &info, sizeof(struct snd_rawmidi_info)))
return -EFAULT;
@@ -619,85 +629,68 @@ static int snd_rawmidi_info_select_user(struct snd_card *card,
{
int err;
struct snd_rawmidi_info info;
+
if (get_user(info.device, &_info->device))
return -EFAULT;
if (get_user(info.stream, &_info->stream))
return -EFAULT;
if (get_user(info.subdevice, &_info->subdevice))
return -EFAULT;
- if ((err = snd_rawmidi_info_select(card, &info)) < 0)
+ err = snd_rawmidi_info_select(card, &info);
+ if (err < 0)
return err;
if (copy_to_user(_info, &info, sizeof(struct snd_rawmidi_info)))
return -EFAULT;
return 0;
}
-int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
- struct snd_rawmidi_params * params)
+static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
+ struct snd_rawmidi_params *params,
+ bool is_input)
{
char *newbuf, *oldbuf;
- struct snd_rawmidi_runtime *runtime = substream->runtime;
-
- if (substream->append && substream->use_count > 1)
- return -EBUSY;
- snd_rawmidi_drain_output(substream);
- if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L) {
+
+ if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L)
return -EINVAL;
- }
- if (params->avail_min < 1 || params->avail_min > params->buffer_size) {
+ if (params->avail_min < 1 || params->avail_min > params->buffer_size)
return -EINVAL;
- }
if (params->buffer_size != runtime->buffer_size) {
- newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
+ newbuf = kvmalloc(params->buffer_size, GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
spin_lock_irq(&runtime->lock);
oldbuf = runtime->buffer;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
- runtime->avail = runtime->buffer_size;
- runtime->appl_ptr = runtime->hw_ptr = 0;
+ __reset_runtime_ptrs(runtime, is_input);
spin_unlock_irq(&runtime->lock);
- kfree(oldbuf);
+ kvfree(oldbuf);
}
runtime->avail_min = params->avail_min;
- substream->active_sensing = !params->no_active_sensing;
return 0;
}
+
+int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
+ struct snd_rawmidi_params *params)
+{
+ if (substream->append && substream->use_count > 1)
+ return -EBUSY;
+ snd_rawmidi_drain_output(substream);
+ substream->active_sensing = !params->no_active_sensing;
+ return resize_runtime_buffer(substream->runtime, params, false);
+}
EXPORT_SYMBOL(snd_rawmidi_output_params);
int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
- struct snd_rawmidi_params * params)
+ struct snd_rawmidi_params *params)
{
- char *newbuf, *oldbuf;
- struct snd_rawmidi_runtime *runtime = substream->runtime;
-
snd_rawmidi_drain_input(substream);
- if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L) {
- return -EINVAL;
- }
- if (params->avail_min < 1 || params->avail_min > params->buffer_size) {
- return -EINVAL;
- }
- if (params->buffer_size != runtime->buffer_size) {
- newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
- if (!newbuf)
- return -ENOMEM;
- spin_lock_irq(&runtime->lock);
- oldbuf = runtime->buffer;
- runtime->buffer = newbuf;
- runtime->buffer_size = params->buffer_size;
- runtime->appl_ptr = runtime->hw_ptr = 0;
- spin_unlock_irq(&runtime->lock);
- kfree(oldbuf);
- }
- runtime->avail_min = params->avail_min;
- return 0;
+ return resize_runtime_buffer(substream->runtime, params, true);
}
EXPORT_SYMBOL(snd_rawmidi_input_params);
static int snd_rawmidi_output_status(struct snd_rawmidi_substream *substream,
- struct snd_rawmidi_status * status)
+ struct snd_rawmidi_status *status)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
@@ -710,7 +703,7 @@ static int snd_rawmidi_output_status(struct snd_rawmidi_substream *substream,
}
static int snd_rawmidi_input_status(struct snd_rawmidi_substream *substream,
- struct snd_rawmidi_status * status)
+ struct snd_rawmidi_status *status)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
@@ -739,6 +732,7 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
{
int stream;
struct snd_rawmidi_info __user *info = argp;
+
if (get_user(stream, &info->stream))
return -EFAULT;
switch (stream) {
@@ -753,6 +747,7 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
case SNDRV_RAWMIDI_IOCTL_PARAMS:
{
struct snd_rawmidi_params params;
+
if (copy_from_user(&params, argp, sizeof(struct snd_rawmidi_params)))
return -EFAULT;
switch (params.stream) {
@@ -772,6 +767,7 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
{
int err = 0;
struct snd_rawmidi_status status;
+
if (copy_from_user(&status, argp, sizeof(struct snd_rawmidi_status)))
return -EFAULT;
switch (status.stream) {
@@ -797,6 +793,7 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
case SNDRV_RAWMIDI_IOCTL_DROP:
{
int val;
+
if (get_user(val, (int __user *) argp))
return -EFAULT;
switch (val) {
@@ -811,6 +808,7 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
case SNDRV_RAWMIDI_IOCTL_DRAIN:
{
int val;
+
if (get_user(val, (int __user *) argp))
return -EFAULT;
switch (val) {
@@ -844,7 +842,7 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
case SNDRV_CTL_IOCTL_RAWMIDI_NEXT_DEVICE:
{
int device;
-
+
if (get_user(device, (int __user *)argp))
return -EFAULT;
if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
@@ -866,7 +864,7 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
case SNDRV_CTL_IOCTL_RAWMIDI_PREFER_SUBDEVICE:
{
int val;
-
+
if (get_user(val, (int __user *)argp))
return -EFAULT;
control->preferred_subdevice[SND_CTL_SUBDEV_RAWMIDI] = val;
@@ -1020,6 +1018,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
spin_lock_irq(&runtime->lock);
while (!snd_rawmidi_ready(substream)) {
wait_queue_entry_t wait;
+
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
spin_unlock_irq(&runtime->lock);
return result > 0 ? result : -EAGAIN;
@@ -1072,7 +1071,7 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
spin_lock_irqsave(&runtime->lock, flags);
result = runtime->avail >= runtime->buffer_size;
spin_unlock_irqrestore(&runtime->lock, flags);
- return result;
+ return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
@@ -1210,7 +1209,7 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
* @substream: the rawmidi substream
* @buffer: the buffer pointer
* @count: the data size to transfer
- *
+ *
* Copies data from the buffer to the device and advances the pointer.
*
* Return: The copied size if successful, or a negative error code on failure.
@@ -1324,6 +1323,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
spin_lock_irq(&runtime->lock);
while (!snd_rawmidi_ready_append(substream, count)) {
wait_queue_entry_t wait;
+
if (file->f_flags & O_NONBLOCK) {
spin_unlock_irq(&runtime->lock);
return result > 0 ? result : -EAGAIN;
@@ -1357,6 +1357,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
while (runtime->avail != runtime->buffer_size) {
wait_queue_entry_t wait;
unsigned int last_avail = runtime->avail;
+
init_waitqueue_entry(&wait, current);
add_wait_queue(&runtime->sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
@@ -1374,7 +1375,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
return result;
}
-static __poll_t snd_rawmidi_poll(struct file *file, poll_table * wait)
+static __poll_t snd_rawmidi_poll(struct file *file, poll_table *wait)
{
struct snd_rawmidi_file *rfile;
struct snd_rawmidi_runtime *runtime;
@@ -1411,7 +1412,6 @@ static __poll_t snd_rawmidi_poll(struct file *file, poll_table * wait)
#endif
/*
-
*/
static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
@@ -1479,8 +1479,7 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
* Register functions
*/
-static const struct file_operations snd_rawmidi_f_ops =
-{
+static const struct file_operations snd_rawmidi_f_ops = {
.owner = THIS_MODULE,
.read = snd_rawmidi_read,
.write = snd_rawmidi_write,
@@ -1535,7 +1534,7 @@ static void release_rawmidi_device(struct device *dev)
*/
int snd_rawmidi_new(struct snd_card *card, char *id, int device,
int output_count, int input_count,
- struct snd_rawmidi ** rrawmidi)
+ struct snd_rawmidi **rrawmidi)
{
struct snd_rawmidi *rmidi;
int err;
@@ -1566,27 +1565,29 @@ int snd_rawmidi_new(struct snd_card *card, char *id, int device,
rmidi->dev.release = release_rawmidi_device;
dev_set_name(&rmidi->dev, "midiC%iD%i", card->number, device);
- if ((err = snd_rawmidi_alloc_substreams(rmidi,
- &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT],
- SNDRV_RAWMIDI_STREAM_INPUT,
- input_count)) < 0) {
- snd_rawmidi_free(rmidi);
- return err;
- }
- if ((err = snd_rawmidi_alloc_substreams(rmidi,
- &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT],
- SNDRV_RAWMIDI_STREAM_OUTPUT,
- output_count)) < 0) {
- snd_rawmidi_free(rmidi);
- return err;
- }
- if ((err = snd_device_new(card, SNDRV_DEV_RAWMIDI, rmidi, &ops)) < 0) {
- snd_rawmidi_free(rmidi);
- return err;
- }
+ err = snd_rawmidi_alloc_substreams(rmidi,
+ &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT],
+ SNDRV_RAWMIDI_STREAM_INPUT,
+ input_count);
+ if (err < 0)
+ goto error;
+ err = snd_rawmidi_alloc_substreams(rmidi,
+ &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT],
+ SNDRV_RAWMIDI_STREAM_OUTPUT,
+ output_count);
+ if (err < 0)
+ goto error;
+ err = snd_device_new(card, SNDRV_DEV_RAWMIDI, rmidi, &ops);
+ if (err < 0)
+ goto error;
+
if (rrawmidi)
*rrawmidi = rmidi;
return 0;
+
+ error:
+ snd_rawmidi_free(rmidi);
+ return err;
}
EXPORT_SYMBOL(snd_rawmidi_new);
@@ -1624,6 +1625,7 @@ static int snd_rawmidi_free(struct snd_rawmidi *rmidi)
static int snd_rawmidi_dev_free(struct snd_device *device)
{
struct snd_rawmidi *rmidi = device->device_data;
+
return snd_rawmidi_free(rmidi);
}
@@ -1631,6 +1633,7 @@ static int snd_rawmidi_dev_free(struct snd_device *device)
static void snd_rawmidi_dev_seq_free(struct snd_seq_device *device)
{
struct snd_rawmidi *rmidi = device->private_data;
+
rmidi->seq_dev = NULL;
}
#endif
@@ -1644,30 +1647,27 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
if (rmidi->device >= SNDRV_RAWMIDI_DEVICES)
return -ENOMEM;
+ err = 0;
mutex_lock(&register_mutex);
- if (snd_rawmidi_search(rmidi->card, rmidi->device)) {
- mutex_unlock(&register_mutex);
- return -EBUSY;
- }
- list_add_tail(&rmidi->list, &snd_rawmidi_devices);
+ if (snd_rawmidi_search(rmidi->card, rmidi->device))
+ err = -EBUSY;
+ else
+ list_add_tail(&rmidi->list, &snd_rawmidi_devices);
mutex_unlock(&register_mutex);
+ if (err < 0)
+ return err;
+
err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
rmidi->card, rmidi->device,
&snd_rawmidi_f_ops, rmidi, &rmidi->dev);
if (err < 0) {
rmidi_err(rmidi, "unable to register\n");
- mutex_lock(&register_mutex);
- list_del(&rmidi->list);
- mutex_unlock(&register_mutex);
- return err;
+ goto error;
}
- if (rmidi->ops && rmidi->ops->dev_register &&
- (err = rmidi->ops->dev_register(rmidi)) < 0) {
- snd_unregister_device(&rmidi->dev);
- mutex_lock(&register_mutex);
- list_del(&rmidi->list);
- mutex_unlock(&register_mutex);
- return err;
+ if (rmidi->ops && rmidi->ops->dev_register) {
+ err = rmidi->ops->dev_register(rmidi);
+ if (err < 0)
+ goto error_unregister;
}
#ifdef CONFIG_SND_OSSEMUL
rmidi->ossreg = 0;
@@ -1719,6 +1719,14 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
}
#endif
return 0;
+
+ error_unregister:
+ snd_unregister_device(&rmidi->dev);
+ error:
+ mutex_lock(&register_mutex);
+ list_del(&rmidi->list);
+ mutex_unlock(&register_mutex);
+ return err;
}
static int snd_rawmidi_dev_disconnect(struct snd_device *device)
@@ -1732,6 +1740,7 @@ static int snd_rawmidi_dev_disconnect(struct snd_device *device)
list_del_init(&rmidi->list);
for (dir = 0; dir < 2; dir++) {
struct snd_rawmidi_substream *s;
+
list_for_each_entry(s, &rmidi->streams[dir].substreams, list) {
if (s->runtime)
wake_up(&s->runtime->sleep);
@@ -1769,7 +1778,7 @@ void snd_rawmidi_set_ops(struct snd_rawmidi *rmidi, int stream,
const struct snd_rawmidi_ops *ops)
{
struct snd_rawmidi_substream *substream;
-
+
list_for_each_entry(substream, &rmidi->streams[stream].substreams, list)
substream->ops = ops;
}
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
index 5f64d0d88320..e1f44fc86885 100644
--- a/sound/core/seq/oss/seq_oss.c
+++ b/sound/core/seq/oss/seq_oss.c
@@ -203,7 +203,7 @@ odev_poll(struct file *file, poll_table * wait)
struct seq_oss_devinfo *dp;
dp = file->private_data;
if (snd_BUG_ON(!dp))
- return -ENXIO;
+ return EPOLLERR;
return snd_seq_oss_poll(dp, file, wait);
}
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index 9debd1b8fd28..0d5f8b16d057 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -637,7 +637,7 @@ snd_seq_oss_midi_putc(struct seq_oss_devinfo *dp, int dev, unsigned char c, stru
if ((mdev = get_mididev(dp, dev)) == NULL)
return -ENODEV;
- if (snd_midi_event_encode_byte(mdev->coder, c, ev) > 0) {
+ if (snd_midi_event_encode_byte(mdev->coder, c, ev)) {
snd_seq_oss_fill_addr(dp, ev, mdev->client, mdev->port);
snd_use_lock_free(&mdev->use_lock);
return 0;
diff --git a/sound/core/seq/oss/seq_oss_timer.c b/sound/core/seq/oss/seq_oss_timer.c
index 4f24ea9fad93..ba127c22539a 100644
--- a/sound/core/seq/oss/seq_oss_timer.c
+++ b/sound/core/seq/oss/seq_oss_timer.c
@@ -92,7 +92,7 @@ snd_seq_oss_process_timer_event(struct seq_oss_timer *rec, union evrec *ev)
case TMR_WAIT_REL:
parm += rec->cur_tick;
rec->realtime = 0;
- /* continue to next */
+ /* fall through and continue to next */
case TMR_WAIT_ABS:
if (parm == 0) {
rec->realtime = 1;
diff --git a/sound/core/seq/seq.c b/sound/core/seq/seq.c
index 639544b4fb04..7de98d71f2aa 100644
--- a/sound/core/seq/seq.c
+++ b/sound/core/seq/seq.c
@@ -84,30 +84,32 @@ static int __init alsa_seq_init(void)
{
int err;
- if ((err = client_init_data()) < 0)
- goto error;
-
- /* init memory, room for selected events */
- if ((err = snd_sequencer_memory_init()) < 0)
- goto error;
-
- /* init event queues */
- if ((err = snd_seq_queues_init()) < 0)
+ err = client_init_data();
+ if (err < 0)
goto error;
/* register sequencer device */
- if ((err = snd_sequencer_device_init()) < 0)
+ err = snd_sequencer_device_init();
+ if (err < 0)
goto error;
/* register proc interface */
- if ((err = snd_seq_info_init()) < 0)
- goto error;
+ err = snd_seq_info_init();
+ if (err < 0)
+ goto error_device;
/* register our internal client */
- if ((err = snd_seq_system_client_init()) < 0)
- goto error;
+ err = snd_seq_system_client_init();
+ if (err < 0)
+ goto error_info;
snd_seq_autoload_init();
+ return 0;
+
+ error_info:
+ snd_seq_info_done();
+ error_device:
+ snd_sequencer_device_done();
error:
return err;
}
@@ -126,9 +128,6 @@ static void __exit alsa_seq_exit(void)
/* unregister sequencer device */
snd_sequencer_device_done();
- /* release event memory */
- snd_sequencer_memory_done();
-
snd_seq_autoload_exit();
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 56ca78423040..92e6524a3a9d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -311,10 +311,9 @@ static int snd_seq_open(struct inode *inode, struct file *file)
if (err < 0)
return err;
- if (mutex_lock_interruptible(&register_mutex))
- return -ERESTARTSYS;
+ mutex_lock(&register_mutex);
client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS);
- if (client == NULL) {
+ if (!client) {
mutex_unlock(&register_mutex);
return -ENOMEM; /* failure code */
}
@@ -1101,7 +1100,7 @@ static __poll_t snd_seq_poll(struct file *file, poll_table * wait)
/* check client structures are in place */
if (snd_BUG_ON(!client))
- return -ENXIO;
+ return EPOLLERR;
if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) &&
client->data.user.fifo) {
@@ -1704,10 +1703,7 @@ static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
if (queue == NULL)
return -EINVAL;
- if (mutex_lock_interruptible(&queue->timer_mutex)) {
- queuefree(queue);
- return -ERESTARTSYS;
- }
+ mutex_lock(&queue->timer_mutex);
tmr = queue->timer;
memset(timer, 0, sizeof(*timer));
timer->queue = queue->queue;
@@ -1741,10 +1737,7 @@ static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
q = queueptr(timer->queue);
if (q == NULL)
return -ENXIO;
- if (mutex_lock_interruptible(&q->timer_mutex)) {
- queuefree(q);
- return -ERESTARTSYS;
- }
+ mutex_lock(&q->timer_mutex);
tmr = q->timer;
snd_seq_queue_timer_close(timer->queue);
tmr->type = timer->type;
@@ -2180,8 +2173,7 @@ int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS)
return -EINVAL;
- if (mutex_lock_interruptible(&register_mutex))
- return -ERESTARTSYS;
+ mutex_lock(&register_mutex);
if (card) {
client_index += SNDRV_SEQ_GLOBAL_CLIENTS
@@ -2522,19 +2514,15 @@ int __init snd_sequencer_device_init(void)
snd_device_initialize(&seq_dev, NULL);
dev_set_name(&seq_dev, "seq");
- if (mutex_lock_interruptible(&register_mutex))
- return -ERESTARTSYS;
-
+ mutex_lock(&register_mutex);
err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
&snd_seq_f_ops, NULL, &seq_dev);
+ mutex_unlock(&register_mutex);
if (err < 0) {
- mutex_unlock(&register_mutex);
put_device(&seq_dev);
return err;
}
- mutex_unlock(&register_mutex);
-
return 0;
}
@@ -2543,7 +2531,7 @@ int __init snd_sequencer_device_init(void)
/*
* unregister sequencer device
*/
-void __exit snd_sequencer_device_done(void)
+void snd_sequencer_device_done(void)
{
snd_unregister_device(&seq_dev);
put_device(&seq_dev);
diff --git a/sound/core/seq/seq_info.c b/sound/core/seq/seq_info.c
index 97015447b9b3..b27fedd435b6 100644
--- a/sound/core/seq/seq_info.c
+++ b/sound/core/seq/seq_info.c
@@ -50,7 +50,7 @@ create_info_entry(char *name, void (*read)(struct snd_info_entry *,
return entry;
}
-static void free_info_entries(void)
+void snd_seq_info_done(void)
{
snd_info_free_entry(queues_entry);
snd_info_free_entry(clients_entry);
@@ -70,12 +70,6 @@ int __init snd_seq_info_init(void)
return 0;
error:
- free_info_entries();
+ snd_seq_info_done();
return -ENOMEM;
}
-
-int __exit snd_seq_info_done(void)
-{
- free_info_entries();
- return 0;
-}
diff --git a/sound/core/seq/seq_info.h b/sound/core/seq/seq_info.h
index f8549f81a645..2cdf8f6e63f5 100644
--- a/sound/core/seq/seq_info.h
+++ b/sound/core/seq/seq_info.h
@@ -30,11 +30,11 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry, struct snd_info_buff
#ifdef CONFIG_SND_PROC_FS
-int snd_seq_info_init( void );
-int snd_seq_info_done( void );
+int snd_seq_info_init(void);
+void snd_seq_info_done(void);
#else
static inline int snd_seq_info_init(void) { return 0; }
-static inline int snd_seq_info_done(void) { return 0; }
+static inline void snd_seq_info_done(void) {}
#endif
#endif
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index a4c8543176b2..5b0388202bac 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -504,18 +504,6 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool)
return 0;
}
-/* initialize sequencer memory */
-int __init snd_sequencer_memory_init(void)
-{
- return 0;
-}
-
-/* release sequencer memory */
-void __exit snd_sequencer_memory_done(void)
-{
-}
-
-
/* exported to seq_clientmgr.c */
void snd_seq_info_pool(struct snd_info_buffer *buffer,
struct snd_seq_pool *pool, char *space)
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 3abe306c394a..1292fe91f02e 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -94,12 +94,6 @@ struct snd_seq_pool *snd_seq_pool_new(int poolsize);
/* remove pool */
int snd_seq_pool_delete(struct snd_seq_pool **pool);
-/* init memory */
-int snd_sequencer_memory_init(void);
-
-/* release event memory */
-void snd_sequencer_memory_done(void);
-
/* polling */
int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait);
diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
index 5dd0ee258359..9e0dabd3ce5f 100644
--- a/sound/core/seq/seq_midi.c
+++ b/sound/core/seq/seq_midi.c
@@ -78,7 +78,7 @@ static void snd_midi_input_event(struct snd_rawmidi_substream *substream)
struct seq_midisynth *msynth;
struct snd_seq_event ev;
char buf[16], *pbuf;
- long res, count;
+ long res;
if (substream == NULL)
return;
@@ -94,19 +94,15 @@ static void snd_midi_input_event(struct snd_rawmidi_substream *substream)
if (msynth->parser == NULL)
continue;
pbuf = buf;
- while (res > 0) {
- count = snd_midi_event_encode(msynth->parser, pbuf, res, &ev);
- if (count < 0)
- break;
- pbuf += count;
- res -= count;
- if (ev.type != SNDRV_SEQ_EVENT_NONE) {
- ev.source.port = msynth->seq_port;
- ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
- snd_seq_kernel_client_dispatch(msynth->seq_client, &ev, 1, 0);
- /* clear event and reset header */
- memset(&ev, 0, sizeof(ev));
- }
+ while (res-- > 0) {
+ if (!snd_midi_event_encode_byte(msynth->parser,
+ *pbuf++, &ev))
+ continue;
+ ev.source.port = msynth->seq_port;
+ ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
+ snd_seq_kernel_client_dispatch(msynth->seq_client, &ev, 1, 0);
+ /* clear event and reset header */
+ memset(&ev, 0, sizeof(ev));
}
}
}
diff --git a/sound/core/seq/seq_midi_emul.c b/sound/core/seq/seq_midi_emul.c
index 288f839a554b..c1975dd31871 100644
--- a/sound/core/seq/seq_midi_emul.c
+++ b/sound/core/seq/seq_midi_emul.c
@@ -318,7 +318,7 @@ do_control(struct snd_midi_op *ops, void *drv, struct snd_midi_channel_set *chse
break;
case MIDI_CTL_MSB_DATA_ENTRY:
chan->control[MIDI_CTL_LSB_DATA_ENTRY] = 0;
- /* go through here */
+ /* fall through */
case MIDI_CTL_LSB_DATA_ENTRY:
if (chan->param_type == SNDRV_MIDI_PARAM_TYPE_REGISTERED)
rpn(ops, drv, chan, chset);
@@ -728,15 +728,3 @@ void snd_midi_channel_free_set(struct snd_midi_channel_set *chset)
kfree(chset);
}
EXPORT_SYMBOL(snd_midi_channel_free_set);
-
-static int __init alsa_seq_midi_emul_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_seq_midi_emul_exit(void)
-{
-}
-
-module_init(alsa_seq_midi_emul_init)
-module_exit(alsa_seq_midi_emul_exit)
diff --git a/sound/core/seq/seq_midi_event.c b/sound/core/seq/seq_midi_event.c
index 90bbbdbeba03..b11419537062 100644
--- a/sound/core/seq/seq_midi_event.c
+++ b/sound/core/seq/seq_midi_event.c
@@ -175,14 +175,6 @@ void snd_midi_event_reset_decode(struct snd_midi_event *dev)
}
EXPORT_SYMBOL(snd_midi_event_reset_decode);
-#if 0
-void snd_midi_event_init(struct snd_midi_event *dev)
-{
- snd_midi_event_reset_encode(dev);
- snd_midi_event_reset_decode(dev);
-}
-#endif /* 0 */
-
void snd_midi_event_no_status(struct snd_midi_event *dev, int on)
{
dev->nostat = on ? 1 : 0;
@@ -190,69 +182,16 @@ void snd_midi_event_no_status(struct snd_midi_event *dev, int on)
EXPORT_SYMBOL(snd_midi_event_no_status);
/*
- * resize buffer
- */
-#if 0
-int snd_midi_event_resize_buffer(struct snd_midi_event *dev, int bufsize)
-{
- unsigned char *new_buf, *old_buf;
- unsigned long flags;
-
- if (bufsize == dev->bufsize)
- return 0;
- new_buf = kmalloc(bufsize, GFP_KERNEL);
- if (new_buf == NULL)
- return -ENOMEM;
- spin_lock_irqsave(&dev->lock, flags);
- old_buf = dev->buf;
- dev->buf = new_buf;
- dev->bufsize = bufsize;
- reset_encode(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
- kfree(old_buf);
- return 0;
-}
-#endif /* 0 */
-
-/*
- * read bytes and encode to sequencer event if finished
- * return the size of encoded bytes
- */
-long snd_midi_event_encode(struct snd_midi_event *dev, unsigned char *buf, long count,
- struct snd_seq_event *ev)
-{
- long result = 0;
- int rc;
-
- ev->type = SNDRV_SEQ_EVENT_NONE;
-
- while (count-- > 0) {
- rc = snd_midi_event_encode_byte(dev, *buf++, ev);
- result++;
- if (rc < 0)
- return rc;
- else if (rc > 0)
- return result;
- }
-
- return result;
-}
-EXPORT_SYMBOL(snd_midi_event_encode);
-
-/*
* read one byte and encode to sequencer event:
- * return 1 if MIDI bytes are encoded to an event
- * 0 data is not finished
- * negative for error
+ * return true if MIDI bytes are encoded to an event
+ * false data is not finished
*/
-int snd_midi_event_encode_byte(struct snd_midi_event *dev, int c,
- struct snd_seq_event *ev)
+bool snd_midi_event_encode_byte(struct snd_midi_event *dev, unsigned char c,
+ struct snd_seq_event *ev)
{
- int rc = 0;
+ bool rc = false;
unsigned long flags;
- c &= 0xff;
-
if (c >= MIDI_CMD_COMMON_CLOCK) {
/* real-time event */
ev->type = status_event[ST_SPECIAL + c - 0xf0].event;
@@ -293,7 +232,7 @@ int snd_midi_event_encode_byte(struct snd_midi_event *dev, int c,
status_event[dev->type].encode(dev, ev);
if (dev->type >= ST_SPECIAL)
dev->type = ST_INVALID;
- rc = 1;
+ rc = true;
} else if (dev->type == ST_SYSEX) {
if (c == MIDI_CMD_COMMON_SYSEX_END ||
dev->read >= dev->bufsize) {
@@ -306,7 +245,7 @@ int snd_midi_event_encode_byte(struct snd_midi_event *dev, int c,
dev->read = 0; /* continue to parse */
else
reset_encode(dev); /* all parsed */
- rc = 1;
+ rc = true;
}
}
@@ -531,15 +470,3 @@ static int extra_decode_xrpn(struct snd_midi_event *dev, unsigned char *buf,
}
return idx;
}
-
-static int __init alsa_seq_midi_event_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_seq_midi_event_exit(void)
-{
-}
-
-module_init(alsa_seq_midi_event_init)
-module_exit(alsa_seq_midi_event_exit)
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index b377f5048352..3b3ac96f1f5f 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -159,18 +159,8 @@ static void queue_delete(struct snd_seq_queue *q)
/*----------------------------------------------------------------*/
-/* setup queues */
-int __init snd_seq_queues_init(void)
-{
- /*
- memset(queue_list, 0, sizeof(queue_list));
- num_queues = 0;
- */
- return 0;
-}
-
/* delete all existing queues */
-void __exit snd_seq_queues_delete(void)
+void snd_seq_queues_delete(void)
{
int i;
diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
index 719093489a2c..e006fc8e3a36 100644
--- a/sound/core/seq/seq_queue.h
+++ b/sound/core/seq/seq_queue.h
@@ -63,9 +63,6 @@ struct snd_seq_queue {
/* get the number of current queues */
int snd_seq_queue_get_cur_queues(void);
-/* init queues structure */
-int snd_seq_queues_init(void);
-
/* delete queues */
void snd_seq_queues_delete(void);
@@ -112,28 +109,4 @@ int snd_seq_queue_is_used(int queueid, int client);
int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop);
-/*
- * 64bit division - for sync stuff..
- */
-#if defined(i386) || defined(i486)
-
-#define udiv_qrnnd(q, r, n1, n0, d) \
- __asm__ ("divl %4" \
- : "=a" ((u32)(q)), \
- "=d" ((u32)(r)) \
- : "0" ((u32)(n0)), \
- "1" ((u32)(n1)), \
- "rm" ((u32)(d)))
-
-#define u64_div(x,y,q) do {u32 __tmp; udiv_qrnnd(q, __tmp, (x)>>32, x, y);} while (0)
-#define u64_mod(x,y,r) do {u32 __tmp; udiv_qrnnd(__tmp, q, (x)>>32, x, y);} while (0)
-#define u64_divmod(x,y,q,r) udiv_qrnnd(q, r, (x)>>32, x, y)
-
-#else
-#define u64_div(x,y,q) ((q) = (u32)((u64)(x) / (u64)(y)))
-#define u64_mod(x,y,r) ((r) = (u32)((u64)(x) % (u64)(y)))
-#define u64_divmod(x,y,q,r) (u64_div(x,y,q), u64_mod(x,y,r))
-#endif
-
-
#endif
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 289ae6bb81d9..a2f1c6b58693 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -89,7 +89,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
else
down_read(&rdev->filelist_sem);
list_for_each_entry(vmidi, &rdev->filelist, list) {
- if (!vmidi->trigger)
+ if (!READ_ONCE(vmidi->trigger))
continue;
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
@@ -110,23 +110,6 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
}
/*
- * receive an event from the remote virmidi port
- *
- * for rawmidi inputs, you can call this function from the event
- * handler of a remote port which is attached to the virmidi via
- * SNDRV_VIRMIDI_SEQ_ATTACH.
- */
-#if 0
-int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
-{
- struct snd_virmidi_dev *rdev;
-
- rdev = rmidi->private_data;
- return snd_virmidi_dev_receive_event(rdev, ev, true);
-}
-#endif /* 0 */
-
-/*
* event handler of virmidi port
*/
static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
@@ -147,68 +130,62 @@ static void snd_virmidi_input_trigger(struct snd_rawmidi_substream *substream, i
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
- if (up) {
- vmidi->trigger = 1;
- } else {
- vmidi->trigger = 0;
- }
+ WRITE_ONCE(vmidi->trigger, !!up);
}
-/*
- * trigger rawmidi stream for output
+/* process rawmidi bytes and send events;
+ * we need no lock here for vmidi->event since it's handled only in this work
*/
-static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
+static void snd_vmidi_output_work(struct work_struct *work)
{
- struct snd_virmidi *vmidi = substream->runtime->private_data;
- int count, res;
- unsigned char buf[32], *pbuf;
- unsigned long flags;
-
- if (up) {
- vmidi->trigger = 1;
- if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
- !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
- while (snd_rawmidi_transmit(substream, buf,
- sizeof(buf)) > 0) {
- /* ignored */
- }
- return;
- }
- spin_lock_irqsave(&substream->runtime->lock, flags);
+ struct snd_virmidi *vmidi;
+ struct snd_rawmidi_substream *substream;
+ unsigned char input;
+ int ret;
+
+ vmidi = container_of(work, struct snd_virmidi, output_work);
+ substream = vmidi->substream;
+
+ /* discard the outputs in dispatch mode unless subscribed */
+ if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
+ !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
+ while (!snd_rawmidi_transmit_empty(substream))
+ snd_rawmidi_transmit_ack(substream, 1);
+ return;
+ }
+
+ while (READ_ONCE(vmidi->trigger)) {
+ if (snd_rawmidi_transmit(substream, &input, 1) != 1)
+ break;
+ if (!snd_midi_event_encode_byte(vmidi->parser, input,
+ &vmidi->event))
+ continue;
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
- if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
- goto out;
+ ret = snd_seq_kernel_client_dispatch(vmidi->client,
+ &vmidi->event,
+ false, 0);
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
- }
- while (1) {
- count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
- if (count <= 0)
+ if (ret < 0)
break;
- pbuf = buf;
- while (count > 0) {
- res = snd_midi_event_encode(vmidi->parser, pbuf, count, &vmidi->event);
- if (res < 0) {
- snd_midi_event_reset_encode(vmidi->parser);
- continue;
- }
- __snd_rawmidi_transmit_ack(substream, res);
- pbuf += res;
- count -= res;
- if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
- if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
- goto out;
- vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
- }
- }
}
- out:
- spin_unlock_irqrestore(&substream->runtime->lock, flags);
- } else {
- vmidi->trigger = 0;
+ /* rawmidi input might be huge, allow to have a break */
+ cond_resched();
}
}
/*
+ * trigger rawmidi stream for output
+ */
+static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
+{
+ struct snd_virmidi *vmidi = substream->runtime->private_data;
+
+ WRITE_ONCE(vmidi->trigger, !!up);
+ if (up)
+ queue_work(system_highpri_wq, &vmidi->output_work);
+}
+
+/*
* open rawmidi handle for input
*/
static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
@@ -260,6 +237,7 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
vmidi->port = rdev->port;
snd_virmidi_init_event(vmidi, &vmidi->event);
vmidi->rdev = rdev;
+ INIT_WORK(&vmidi->output_work, snd_vmidi_output_work);
runtime->private_data = vmidi;
return 0;
}
@@ -289,6 +267,9 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
static int snd_virmidi_output_close(struct snd_rawmidi_substream *substream)
{
struct snd_virmidi *vmidi = substream->runtime->private_data;
+
+ WRITE_ONCE(vmidi->trigger, false); /* to be sure */
+ cancel_work_sync(&vmidi->output_work);
snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
@@ -546,19 +527,3 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
return 0;
}
EXPORT_SYMBOL(snd_virmidi_new);
-
-/*
- * ENTRY functions
- */
-
-static int __init alsa_virmidi_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_virmidi_exit(void)
-{
-}
-
-module_init(alsa_virmidi_init)
-module_exit(alsa_virmidi_exit)
diff --git a/sound/core/timer.c b/sound/core/timer.c
index b6f076bbc72d..61a0cec6e1f6 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -883,6 +883,11 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
if (snd_BUG_ON(!tid))
return -EINVAL;
+ if (tid->dev_class == SNDRV_TIMER_CLASS_CARD ||
+ tid->dev_class == SNDRV_TIMER_CLASS_PCM) {
+ if (WARN_ON(!card))
+ return -EINVAL;
+ }
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 78a2fdc38531..1e34e6381baa 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -778,7 +778,6 @@ static const struct snd_pcm_ops loopback_pcm_ops = {
.trigger = loopback_trigger,
.pointer = loopback_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static int loopback_pcm_new(struct loopback *loopback,
diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
index 3e745f47dd2f..dae26e856b26 100644
--- a/sound/drivers/mpu401/mpu401_uart.c
+++ b/sound/drivers/mpu401/mpu401_uart.c
@@ -617,19 +617,3 @@ free_device:
}
EXPORT_SYMBOL(snd_mpu401_uart_new);
-
-/*
- * INIT part
- */
-
-static int __init alsa_mpu401_uart_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_mpu401_uart_exit(void)
-{
-}
-
-module_init(alsa_mpu401_uart_init)
-module_exit(alsa_mpu401_uart_exit)
diff --git a/sound/drivers/opl3/opl3_drums.c b/sound/drivers/opl3/opl3_drums.c
index 73694380734a..14929822956c 100644
--- a/sound/drivers/opl3/opl3_drums.c
+++ b/sound/drivers/opl3/opl3_drums.c
@@ -21,8 +21,6 @@
#include "opl3_voice.h"
-extern char snd_opl3_regmap[MAX_OPL2_VOICES][4];
-
static char snd_opl3_drum_table[47] =
{
OPL3_BASSDRUM_ON, OPL3_BASSDRUM_ON, OPL3_HIHAT_ON, /* 35 - 37 */
diff --git a/sound/drivers/opl3/opl3_lib.c b/sound/drivers/opl3/opl3_lib.c
index 588963d6be28..cf86c36c7c3b 100644
--- a/sound/drivers/opl3/opl3_lib.c
+++ b/sound/drivers/opl3/opl3_lib.c
@@ -31,13 +31,12 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <sound/minors.h>
+#include "opl3_voice.h"
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Hannu Savolainen 1993-1996, Rob Hooft");
MODULE_DESCRIPTION("Routines for control of AdLib FM cards (OPL2/OPL3/OPL4 chips)");
MODULE_LICENSE("GPL");
-extern char snd_opl3_regmap[MAX_OPL2_VOICES][4];
-
static void snd_opl2_command(struct snd_opl3 * opl3, unsigned short cmd, unsigned char val)
{
unsigned long flags;
@@ -539,19 +538,3 @@ int snd_opl3_hwdep_new(struct snd_opl3 * opl3,
}
EXPORT_SYMBOL(snd_opl3_hwdep_new);
-
-/*
- * INIT part
- */
-
-static int __init alsa_opl3_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_opl3_exit(void)
-{
-}
-
-module_init(alsa_opl3_init)
-module_exit(alsa_opl3_exit)
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index bb3f3a5a6951..a33cb744e96c 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -25,10 +25,6 @@
#include "opl3_voice.h"
#include <sound/asoundef.h>
-extern char snd_opl3_regmap[MAX_OPL2_VOICES][4];
-
-extern bool use_internal_drums;
-
static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
struct snd_midi_channel *chan);
/*
@@ -372,6 +368,7 @@ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
instr_4op = 1;
break;
}
+ /* fall through */
default:
spin_unlock_irqrestore(&opl3->voice_lock, flags);
return;
@@ -721,9 +718,6 @@ void snd_opl3_note_off(void *p, int note, int vel,
*/
void snd_opl3_key_press(void *p, int note, int vel, struct snd_midi_channel *chan)
{
- struct snd_opl3 *opl3;
-
- opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Key pressure, ch#: %i, inst#: %i\n",
chan->number, chan->midi_program);
@@ -735,9 +729,6 @@ void snd_opl3_key_press(void *p, int note, int vel, struct snd_midi_channel *cha
*/
void snd_opl3_terminate_note(void *p, int note, struct snd_midi_channel *chan)
{
- struct snd_opl3 *opl3;
-
- opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Terminate note, ch#: %i, inst#: %i\n",
chan->number, chan->midi_program);
@@ -861,9 +852,6 @@ void snd_opl3_control(void *p, int type, struct snd_midi_channel *chan)
void snd_opl3_nrpn(void *p, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset)
{
- struct snd_opl3 *opl3;
-
- opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "NRPN, ch#: %i, inst#: %i\n",
chan->number, chan->midi_program);
@@ -876,9 +864,6 @@ void snd_opl3_nrpn(void *p, struct snd_midi_channel *chan,
void snd_opl3_sysex(void *p, unsigned char *buf, int len,
int parsed, struct snd_midi_channel_set *chset)
{
- struct snd_opl3 *opl3;
-
- opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "SYSEX\n");
#endif
diff --git a/sound/drivers/opl3/opl3_oss.c b/sound/drivers/opl3/opl3_oss.c
index 22c3e4bca220..869220ced4ed 100644
--- a/sound/drivers/opl3/opl3_oss.c
+++ b/sound/drivers/opl3/opl3_oss.c
@@ -29,8 +29,6 @@ static int snd_opl3_reset_seq_oss(struct snd_seq_oss_arg *arg);
/* operators */
-extern struct snd_midi_op opl3_ops;
-
static struct snd_seq_oss_callback oss_callback = {
.owner = THIS_MODULE,
.open = snd_opl3_open_seq_oss,
@@ -233,11 +231,8 @@ static int snd_opl3_load_patch_seq_oss(struct snd_seq_oss_arg *arg, int format,
static int snd_opl3_ioctl_seq_oss(struct snd_seq_oss_arg *arg, unsigned int cmd,
unsigned long ioarg)
{
- struct snd_opl3 *opl3;
-
if (snd_BUG_ON(!arg))
return -ENXIO;
- opl3 = arg->private_data;
switch (cmd) {
case SNDCTL_FM_LOAD_INSTR:
snd_printk(KERN_ERR "OPL3: "
@@ -261,11 +256,8 @@ static int snd_opl3_ioctl_seq_oss(struct snd_seq_oss_arg *arg, unsigned int cmd,
/* reset device */
static int snd_opl3_reset_seq_oss(struct snd_seq_oss_arg *arg)
{
- struct snd_opl3 *opl3;
-
if (snd_BUG_ON(!arg))
return -ENXIO;
- opl3 = arg->private_data;
return 0;
}
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index 42920a243328..d522925fc5c0 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -24,6 +24,7 @@
#include <linux/nospec.h>
#include <sound/opl3.h>
#include <sound/asound_fm.h>
+#include "opl3_voice.h"
#if IS_ENABLED(CONFIG_SND_SEQUENCER)
#define OPL3_SUPPORT_SYNTH
diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
index a2445163008e..5b02bd49fde4 100644
--- a/sound/drivers/opl3/opl3_voice.h
+++ b/sound/drivers/opl3/opl3_voice.h
@@ -52,4 +52,8 @@ void snd_opl3_free_seq_oss(struct snd_opl3 *opl3);
#define snd_opl3_free_seq_oss(opl3) /* NOP */
#endif
+extern char snd_opl3_regmap[MAX_OPL2_VOICES][4];
+extern bool use_internal_drums;
+extern struct snd_midi_op opl3_ops;
+
#endif
diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
index db76a5bf2bd2..819d2dce2a19 100644
--- a/sound/drivers/opl4/opl4_lib.c
+++ b/sound/drivers/opl4/opl4_lib.c
@@ -263,15 +263,3 @@ int snd_opl4_create(struct snd_card *card,
}
EXPORT_SYMBOL(snd_opl4_create);
-
-static int __init alsa_opl4_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_opl4_exit(void)
-{
-}
-
-module_init(alsa_opl4_init)
-module_exit(alsa_opl4_exit)
diff --git a/sound/drivers/vx/vx_core.c b/sound/drivers/vx/vx_core.c
index 121357397a6d..04368dd59a4c 100644
--- a/sound/drivers/vx/vx_core.c
+++ b/sound/drivers/vx/vx_core.c
@@ -815,18 +815,3 @@ struct vx_core *snd_vx_create(struct snd_card *card, struct snd_vx_hardware *hw,
}
EXPORT_SYMBOL(snd_vx_create);
-
-/*
- * module entries
- */
-static int __init alsa_vx_core_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_vx_core_exit(void)
-{
-}
-
-module_init(alsa_vx_core_init)
-module_exit(alsa_vx_core_exit)
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index 380a028469c4..ba80f459bdc5 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -883,7 +883,6 @@ static const struct snd_pcm_ops vx_pcm_playback_ops = {
.trigger = vx_pcm_trigger,
.pointer = vx_pcm_playback_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
@@ -1105,7 +1104,6 @@ static const struct snd_pcm_ops vx_pcm_capture_ops = {
.trigger = vx_pcm_trigger,
.pointer = vx_pcm_capture_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
diff --git a/sound/firewire/bebob/bebob_pcm.c b/sound/firewire/bebob/bebob_pcm.c
index e6adab3ef42e..ea9b86450580 100644
--- a/sound/firewire/bebob/bebob_pcm.c
+++ b/sound/firewire/bebob/bebob_pcm.c
@@ -373,7 +373,6 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
int err;
diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
index b2efb1c71a98..218292bdace6 100644
--- a/sound/firewire/dice/dice-alesis.c
+++ b/sound/firewire/dice/dice-alesis.c
@@ -37,7 +37,7 @@ int snd_dice_detect_alesis_formats(struct snd_dice *dice)
MAX_STREAMS * SND_DICE_RATE_MODE_COUNT *
sizeof(unsigned int));
} else {
- memcpy(dice->rx_pcm_chs, alesis_io26_tx_pcm_chs,
+ memcpy(dice->tx_pcm_chs, alesis_io26_tx_pcm_chs,
MAX_STREAMS * SND_DICE_RATE_MODE_COUNT *
sizeof(unsigned int));
}
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index 80351b29fe0d..bb3ef5ff3488 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -412,7 +412,6 @@ int snd_dice_create_pcm(struct snd_dice *dice)
.pointer = capture_pointer,
.ack = capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -425,7 +424,6 @@ int snd_dice_create_pcm(struct snd_dice *dice)
.pointer = playback_pointer,
.ack = playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
unsigned int capture, playback;
diff --git a/sound/firewire/digi00x/digi00x-pcm.c b/sound/firewire/digi00x/digi00x-pcm.c
index 796f4b4645f5..fdcff0460c53 100644
--- a/sound/firewire/digi00x/digi00x-pcm.c
+++ b/sound/firewire/digi00x/digi00x-pcm.c
@@ -352,7 +352,6 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
int err;
diff --git a/sound/firewire/fireface/ff-pcm.c b/sound/firewire/fireface/ff-pcm.c
index e3c16308363d..bf47f9ec8703 100644
--- a/sound/firewire/fireface/ff-pcm.c
+++ b/sound/firewire/fireface/ff-pcm.c
@@ -383,7 +383,6 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
int err;
diff --git a/sound/firewire/fireworks/fireworks_pcm.c b/sound/firewire/fireworks/fireworks_pcm.c
index 40faed5e6968..aed566d82726 100644
--- a/sound/firewire/fireworks/fireworks_pcm.c
+++ b/sound/firewire/fireworks/fireworks_pcm.c
@@ -397,7 +397,6 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
int err;
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 3919e186a30b..30957477e005 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -454,7 +454,6 @@ static int isight_create_pcm(struct isight *isight)
.trigger = isight_trigger,
.pointer = isight_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
int err;
diff --git a/sound/firewire/motu/motu-pcm.c b/sound/firewire/motu/motu-pcm.c
index 4330220890e8..ab69d7e6ac05 100644
--- a/sound/firewire/motu/motu-pcm.c
+++ b/sound/firewire/motu/motu-pcm.c
@@ -363,7 +363,6 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
.pointer = capture_pointer,
.ack = capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -376,7 +375,6 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
.pointer = playback_pointer,
.ack = playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
int err;
diff --git a/sound/firewire/motu/motu-protocol-v2.c b/sound/firewire/motu/motu-protocol-v2.c
index 525b746330be..453fc29fade7 100644
--- a/sound/firewire/motu/motu-protocol-v2.c
+++ b/sound/firewire/motu/motu-protocol-v2.c
@@ -13,6 +13,8 @@
#define V2_CLOCK_RATE_SHIFT 3
#define V2_CLOCK_SRC_MASK 0x00000007
#define V2_CLOCK_SRC_SHIFT 0
+#define V2_CLOCK_TRAVELER_FETCH_DISABLE 0x04000000
+#define V2_CLOCK_TRAVELER_FETCH_ENABLE 0x03000000
#define V2_IN_OUT_CONF_OFFSET 0x0c04
#define V2_OPT_OUT_IFACE_MASK 0x00000c00
@@ -66,6 +68,11 @@ static int v2_set_clock_rate(struct snd_motu *motu, unsigned int rate)
data &= ~V2_CLOCK_RATE_MASK;
data |= i << V2_CLOCK_RATE_SHIFT;
+ if (motu->spec == &snd_motu_spec_traveler) {
+ data &= ~V2_CLOCK_TRAVELER_FETCH_ENABLE;
+ data |= V2_CLOCK_TRAVELER_FETCH_DISABLE;
+ }
+
reg = cpu_to_be32(data);
return snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET, &reg,
sizeof(reg));
@@ -121,8 +128,31 @@ static int v2_get_clock_source(struct snd_motu *motu,
static int v2_switch_fetching_mode(struct snd_motu *motu, bool enable)
{
- /* V2 protocol doesn't have this feature. */
- return 0;
+ __be32 reg;
+ u32 data;
+ int err = 0;
+
+ if (motu->spec == &snd_motu_spec_traveler) {
+ err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET,
+ &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg);
+
+ data &= ~(V2_CLOCK_TRAVELER_FETCH_DISABLE |
+ V2_CLOCK_TRAVELER_FETCH_ENABLE);
+
+ if (enable)
+ data |= V2_CLOCK_TRAVELER_FETCH_ENABLE;
+ else
+ data |= V2_CLOCK_TRAVELER_FETCH_DISABLE;
+
+ reg = cpu_to_be32(data);
+ err = snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET,
+ &reg, sizeof(reg));
+ }
+
+ return err;
}
static void calculate_fixed_part(struct snd_motu_packet_format *formats,
@@ -149,11 +179,20 @@ static void calculate_fixed_part(struct snd_motu_packet_format *formats,
pcm_chunks[1] += 2;
}
} else {
- /*
- * Packets to v2 units transfer main-out-1/2 and phone-out-1/2.
- */
- pcm_chunks[0] += 4;
- pcm_chunks[1] += 4;
+ if (flags & SND_MOTU_SPEC_RX_SEPARETED_MAIN) {
+ pcm_chunks[0] += 2;
+ pcm_chunks[1] += 2;
+ }
+
+ // Packets to v2 units include 2 chunks for phone 1/2, except
+ // for 176.4/192.0 kHz.
+ pcm_chunks[0] += 2;
+ pcm_chunks[1] += 2;
+ }
+
+ if (flags & SND_MOTU_SPEC_HAS_AESEBU_IFACE) {
+ pcm_chunks[0] += 2;
+ pcm_chunks[1] += 2;
}
/*
@@ -164,19 +203,16 @@ static void calculate_fixed_part(struct snd_motu_packet_format *formats,
pcm_chunks[0] += 2;
pcm_chunks[1] += 2;
- /* This part should be multiples of 4. */
- formats->fixed_part_pcm_chunks[0] = round_up(2 + pcm_chunks[0], 4) - 2;
- formats->fixed_part_pcm_chunks[1] = round_up(2 + pcm_chunks[1], 4) - 2;
- if (flags & SND_MOTU_SPEC_SUPPORT_CLOCK_X4)
- formats->fixed_part_pcm_chunks[2] =
- round_up(2 + pcm_chunks[2], 4) - 2;
+ formats->fixed_part_pcm_chunks[0] = pcm_chunks[0];
+ formats->fixed_part_pcm_chunks[1] = pcm_chunks[1];
+ formats->fixed_part_pcm_chunks[2] = pcm_chunks[2];
}
static void calculate_differed_part(struct snd_motu_packet_format *formats,
enum snd_motu_spec_flags flags,
u32 data, u32 mask, u32 shift)
{
- unsigned char pcm_chunks[3] = {0, 0};
+ unsigned char pcm_chunks[2] = {0, 0};
/*
* When optical interfaces are configured for S/PDIF (TOSLINK),
diff --git a/sound/firewire/motu/motu-protocol-v3.c b/sound/firewire/motu/motu-protocol-v3.c
index c7cd9864dc4d..7cc80a05e91f 100644
--- a/sound/firewire/motu/motu-protocol-v3.c
+++ b/sound/firewire/motu/motu-protocol-v3.c
@@ -188,11 +188,20 @@ static void calculate_fixed_part(struct snd_motu_packet_format *formats,
pcm_chunks[1] += 2;
}
} else {
- /*
- * Packets to v2 units transfer main-out-1/2 and phone-out-1/2.
- */
- pcm_chunks[0] += 4;
- pcm_chunks[1] += 4;
+ if (flags & SND_MOTU_SPEC_RX_SEPARETED_MAIN) {
+ pcm_chunks[0] += 2;
+ pcm_chunks[1] += 2;
+ }
+
+ // Packets to v3 units include 2 chunks for phone 1/2, except
+ // for 176.4/192.0 kHz.
+ pcm_chunks[0] += 2;
+ pcm_chunks[1] += 2;
+ }
+
+ if (flags & SND_MOTU_SPEC_HAS_AESEBU_IFACE) {
+ pcm_chunks[0] += 2;
+ pcm_chunks[1] += 2;
}
/*
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index 0d6b526105ab..300d31b6f191 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -200,6 +200,22 @@ static const struct snd_motu_spec motu_828mk2 = {
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
+ SND_MOTU_SPEC_RX_SEPARETED_MAIN |
+ SND_MOTU_SPEC_HAS_OPT_IFACE_A |
+ SND_MOTU_SPEC_RX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_TX_MIDI_2ND_Q,
+
+ .analog_in_ports = 8,
+ .analog_out_ports = 8,
+};
+
+const struct snd_motu_spec snd_motu_spec_traveler = {
+ .name = "Traveler",
+ .protocol = &snd_motu_protocol_v2,
+ .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
+ SND_MOTU_SPEC_SUPPORT_CLOCK_X4 |
+ SND_MOTU_SPEC_TX_RETURN_CHUNK |
+ SND_MOTU_SPEC_HAS_AESEBU_IFACE |
SND_MOTU_SPEC_HAS_OPT_IFACE_A |
SND_MOTU_SPEC_RX_MIDI_2ND_Q |
SND_MOTU_SPEC_TX_MIDI_2ND_Q,
@@ -216,6 +232,7 @@ static const struct snd_motu_spec motu_828mk3 = {
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
SND_MOTU_SPEC_TX_REVERB_CHUNK |
+ SND_MOTU_SPEC_RX_SEPARETED_MAIN |
SND_MOTU_SPEC_HAS_OPT_IFACE_A |
SND_MOTU_SPEC_HAS_OPT_IFACE_B |
SND_MOTU_SPEC_RX_MIDI_3RD_Q |
@@ -231,6 +248,7 @@ static const struct snd_motu_spec motu_audio_express = {
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
+ SND_MOTU_SPEC_RX_SEPARETED_MAIN |
SND_MOTU_SPEC_RX_MIDI_2ND_Q |
SND_MOTU_SPEC_TX_MIDI_3RD_Q,
.analog_in_ports = 2,
@@ -250,6 +268,7 @@ static const struct snd_motu_spec motu_audio_express = {
static const struct ieee1394_device_id motu_id_table[] = {
SND_MOTU_DEV_ENTRY(0x101800, &motu_828mk2),
+ SND_MOTU_DEV_ENTRY(0x107800, &snd_motu_spec_traveler),
SND_MOTU_DEV_ENTRY(0x106800, &motu_828mk3), /* FireWire only. */
SND_MOTU_DEV_ENTRY(0x100800, &motu_828mk3), /* Hybrid. */
SND_MOTU_DEV_ENTRY(0x104800, &motu_audio_express),
diff --git a/sound/firewire/motu/motu.h b/sound/firewire/motu/motu.h
index 4b23cf337c4b..fd5327d30ab1 100644
--- a/sound/firewire/motu/motu.h
+++ b/sound/firewire/motu/motu.h
@@ -79,13 +79,14 @@ enum snd_motu_spec_flags {
SND_MOTU_SPEC_TX_MICINST_CHUNK = 0x0004,
SND_MOTU_SPEC_TX_RETURN_CHUNK = 0x0008,
SND_MOTU_SPEC_TX_REVERB_CHUNK = 0x0010,
- SND_MOTU_SPEC_TX_AESEBU_CHUNK = 0x0020,
+ SND_MOTU_SPEC_HAS_AESEBU_IFACE = 0x0020,
SND_MOTU_SPEC_HAS_OPT_IFACE_A = 0x0040,
SND_MOTU_SPEC_HAS_OPT_IFACE_B = 0x0080,
SND_MOTU_SPEC_RX_MIDI_2ND_Q = 0x0100,
SND_MOTU_SPEC_RX_MIDI_3RD_Q = 0x0200,
SND_MOTU_SPEC_TX_MIDI_2ND_Q = 0x0400,
SND_MOTU_SPEC_TX_MIDI_3RD_Q = 0x0800,
+ SND_MOTU_SPEC_RX_SEPARETED_MAIN = 0x1000,
};
#define SND_MOTU_CLOCK_RATE_COUNT 6
@@ -128,6 +129,8 @@ struct snd_motu_spec {
extern const struct snd_motu_protocol snd_motu_protocol_v2;
extern const struct snd_motu_protocol snd_motu_protocol_v3;
+extern const struct snd_motu_spec snd_motu_spec_traveler;
+
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir,
const struct snd_motu_protocol *const protocol);
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 3dd46285c0e2..b3f6503dd34d 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -389,7 +389,6 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -402,7 +401,6 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
unsigned int cap = 0;
diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
index 6ec8ec634d4d..e4cc8990e195 100644
--- a/sound/firewire/tascam/tascam-pcm.c
+++ b/sound/firewire/tascam/tascam-pcm.c
@@ -279,7 +279,6 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
struct snd_pcm *pcm;
int err;
diff --git a/sound/hda/Kconfig b/sound/hda/Kconfig
index 3129546398d0..2d90e11b3eaa 100644
--- a/sound/hda/Kconfig
+++ b/sound/hda/Kconfig
@@ -5,11 +5,12 @@ config SND_HDA_CORE
config SND_HDA_DSP_LOADER
bool
+config SND_HDA_COMPONENT
+ bool
+
config SND_HDA_I915
bool
- default y
- depends on DRM_I915
- depends on SND_HDA_CORE
+ select SND_HDA_COMPONENT
config SND_HDA_EXT_CORE
tristate
diff --git a/sound/hda/Makefile b/sound/hda/Makefile
index e4e726f2ce98..2160202e2dc1 100644
--- a/sound/hda/Makefile
+++ b/sound/hda/Makefile
@@ -6,6 +6,7 @@ snd-hda-core-objs += trace.o
CFLAGS_trace.o := -I$(src)
# for sync with i915 gfx driver
+snd-hda-core-$(CONFIG_SND_HDA_COMPONENT) += hdac_component.o
snd-hda-core-$(CONFIG_SND_HDA_I915) += hdac_i915.o
obj-$(CONFIG_SND_HDA_CORE) += snd-hda-core.o
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 0daf31383084..9c37d9af3023 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -87,9 +87,10 @@ static const struct hdac_io_ops hdac_ext_default_io = {
*
* Returns 0 if successful, or a negative error code.
*/
-int snd_hdac_ext_bus_init(struct hdac_ext_bus *ebus, struct device *dev,
+int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
const struct hdac_bus_ops *ops,
- const struct hdac_io_ops *io_ops)
+ const struct hdac_io_ops *io_ops,
+ const struct hdac_ext_bus_ops *ext_ops)
{
int ret;
static int idx;
@@ -98,15 +99,16 @@ int snd_hdac_ext_bus_init(struct hdac_ext_bus *ebus, struct device *dev,
if (io_ops == NULL)
io_ops = &hdac_ext_default_io;
- ret = snd_hdac_bus_init(&ebus->bus, dev, ops, io_ops);
+ ret = snd_hdac_bus_init(bus, dev, ops, io_ops);
if (ret < 0)
return ret;
- INIT_LIST_HEAD(&ebus->hlink_list);
- ebus->idx = idx++;
+ bus->ext_ops = ext_ops;
+ INIT_LIST_HEAD(&bus->hlink_list);
+ bus->idx = idx++;
- mutex_init(&ebus->lock);
- ebus->cmd_dma_state = true;
+ mutex_init(&bus->lock);
+ bus->cmd_dma_state = true;
return 0;
}
@@ -116,10 +118,10 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_init);
* snd_hdac_ext_bus_exit - clean up a HD-audio extended bus
* @ebus: the pointer to extended bus object
*/
-void snd_hdac_ext_bus_exit(struct hdac_ext_bus *ebus)
+void snd_hdac_ext_bus_exit(struct hdac_bus *bus)
{
- snd_hdac_bus_exit(&ebus->bus);
- WARN_ON(!list_empty(&ebus->hlink_list));
+ snd_hdac_bus_exit(bus);
+ WARN_ON(!list_empty(&bus->hlink_list));
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_exit);
@@ -135,21 +137,15 @@ static void default_release(struct device *dev)
*
* Returns zero for success or a negative error code.
*/
-int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *ebus, int addr)
+int snd_hdac_ext_bus_device_init(struct hdac_bus *bus, int addr,
+ struct hdac_device *hdev)
{
- struct hdac_ext_device *edev;
- struct hdac_device *hdev = NULL;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
char name[15];
int ret;
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return -ENOMEM;
- hdev = &edev->hdev;
- edev->ebus = ebus;
+ hdev->bus = bus;
- snprintf(name, sizeof(name), "ehdaudio%dD%d", ebus->idx, addr);
+ snprintf(name, sizeof(name), "ehdaudio%dD%d", bus->idx, addr);
ret = snd_hdac_device_init(hdev, bus, name, addr);
if (ret < 0) {
@@ -176,10 +172,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_init);
*/
void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdev);
-
snd_hdac_device_exit(hdev);
- kfree(edev);
+ kfree(hdev);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_exit);
@@ -188,14 +182,14 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_exit);
*
* @ebus: HD-audio extended bus
*/
-void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus)
+void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus)
{
struct hdac_device *codec, *__codec;
/*
* we need to remove all the codec devices objects created in the
* snd_hdac_ext_bus_device_init
*/
- list_for_each_entry_safe(codec, __codec, &ebus->bus.codec_list, list) {
+ list_for_each_entry_safe(codec, __codec, &bus->codec_list, list) {
snd_hdac_device_unregister(codec);
put_device(&codec->dev);
}
@@ -204,35 +198,31 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_remove);
#define dev_to_hdac(dev) (container_of((dev), \
struct hdac_device, dev))
-static inline struct hdac_ext_driver *get_edrv(struct device *dev)
+static inline struct hdac_driver *get_hdrv(struct device *dev)
{
struct hdac_driver *hdrv = drv_to_hdac_driver(dev->driver);
- struct hdac_ext_driver *edrv = to_ehdac_driver(hdrv);
-
- return edrv;
+ return hdrv;
}
-static inline struct hdac_ext_device *get_edev(struct device *dev)
+static inline struct hdac_device *get_hdev(struct device *dev)
{
struct hdac_device *hdev = dev_to_hdac_dev(dev);
- struct hdac_ext_device *edev = to_ehdac_device(hdev);
-
- return edev;
+ return hdev;
}
static int hda_ext_drv_probe(struct device *dev)
{
- return (get_edrv(dev))->probe(get_edev(dev));
+ return (get_hdrv(dev))->probe(get_hdev(dev));
}
static int hdac_ext_drv_remove(struct device *dev)
{
- return (get_edrv(dev))->remove(get_edev(dev));
+ return (get_hdrv(dev))->remove(get_hdev(dev));
}
static void hdac_ext_drv_shutdown(struct device *dev)
{
- return (get_edrv(dev))->shutdown(get_edev(dev));
+ return (get_hdrv(dev))->shutdown(get_hdev(dev));
}
/**
@@ -240,20 +230,20 @@ static void hdac_ext_drv_shutdown(struct device *dev)
*
* @drv: ext hda driver structure
*/
-int snd_hda_ext_driver_register(struct hdac_ext_driver *drv)
+int snd_hda_ext_driver_register(struct hdac_driver *drv)
{
- drv->hdac.type = HDA_DEV_ASOC;
- drv->hdac.driver.bus = &snd_hda_bus_type;
+ drv->type = HDA_DEV_ASOC;
+ drv->driver.bus = &snd_hda_bus_type;
/* we use default match */
if (drv->probe)
- drv->hdac.driver.probe = hda_ext_drv_probe;
+ drv->driver.probe = hda_ext_drv_probe;
if (drv->remove)
- drv->hdac.driver.remove = hdac_ext_drv_remove;
+ drv->driver.remove = hdac_ext_drv_remove;
if (drv->shutdown)
- drv->hdac.driver.shutdown = hdac_ext_drv_shutdown;
+ drv->driver.shutdown = hdac_ext_drv_shutdown;
- return driver_register(&drv->hdac.driver);
+ return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(snd_hda_ext_driver_register);
@@ -262,8 +252,8 @@ EXPORT_SYMBOL_GPL(snd_hda_ext_driver_register);
*
* @drv: ext hda driver structure
*/
-void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv)
+void snd_hda_ext_driver_unregister(struct hdac_driver *drv)
{
- driver_unregister(&drv->hdac.driver);
+ driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(snd_hda_ext_driver_unregister);
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 84f3b8168716..5bc4a1d587d4 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -39,9 +39,8 @@
* @ebus: HD-audio extended core bus
* @enable: flag to turn on/off the capability
*/
-void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *ebus, bool enable)
+void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *bus, bool enable)
{
- struct hdac_bus *bus = &ebus->bus;
if (!bus->ppcap) {
dev_err(bus->dev, "Address of PP capability is NULL");
@@ -60,9 +59,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_enable);
* @ebus: HD-audio extended core bus
* @enable: flag to enable/disable interrupt
*/
-void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *ebus, bool enable)
+void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *bus, bool enable)
{
- struct hdac_bus *bus = &ebus->bus;
if (!bus->ppcap) {
dev_err(bus->dev, "Address of PP capability is NULL\n");
@@ -89,12 +87,11 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_int_enable);
* in hlink_list of extended hdac bus
* Note: this will be freed on bus exit by driver
*/
-int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *ebus)
+int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus)
{
int idx;
u32 link_count;
struct hdac_ext_link *hlink;
- struct hdac_bus *bus = &ebus->bus;
link_count = readl(bus->mlcap + AZX_REG_ML_MLCD) + 1;
@@ -114,7 +111,7 @@ int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *ebus)
/* since link in On, update the ref */
hlink->ref_count = 1;
- list_add_tail(&hlink->list, &ebus->hlink_list);
+ list_add_tail(&hlink->list, &bus->hlink_list);
}
return 0;
@@ -127,12 +124,12 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_get_ml_capabilities);
* @ebus: HD-audio ext core bus
*/
-void snd_hdac_link_free_all(struct hdac_ext_bus *ebus)
+void snd_hdac_link_free_all(struct hdac_bus *bus)
{
struct hdac_ext_link *l;
- while (!list_empty(&ebus->hlink_list)) {
- l = list_first_entry(&ebus->hlink_list, struct hdac_ext_link, list);
+ while (!list_empty(&bus->hlink_list)) {
+ l = list_first_entry(&bus->hlink_list, struct hdac_ext_link, list);
list_del(&l->list);
kfree(l);
}
@@ -144,7 +141,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_link_free_all);
* @ebus: HD-audio extended core bus
* @codec_name: codec name
*/
-struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_ext_bus *ebus,
+struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
const char *codec_name)
{
int i;
@@ -153,10 +150,10 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_ext_bus *ebus,
if (sscanf(codec_name, "ehdaudio%dD%d", &bus_idx, &addr) != 2)
return NULL;
- if (ebus->idx != bus_idx)
+ if (bus->idx != bus_idx)
return NULL;
- list_for_each_entry(hlink, &ebus->hlink_list, list) {
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
for (i = 0; i < HDA_MAX_CODECS; i++) {
if (hlink->lsdiid & (0x1 << addr))
return hlink;
@@ -219,12 +216,12 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down);
* snd_hdac_ext_bus_link_power_up_all -power up all hda link
* @ebus: HD-audio extended bus
*/
-int snd_hdac_ext_bus_link_power_up_all(struct hdac_ext_bus *ebus)
+int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus)
{
struct hdac_ext_link *hlink = NULL;
int ret;
- list_for_each_entry(hlink, &ebus->hlink_list, list) {
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
snd_hdac_updatel(hlink->ml_addr,
AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
ret = check_hdac_link_power_active(hlink, true);
@@ -240,12 +237,12 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_up_all);
* snd_hdac_ext_bus_link_power_down_all -power down all hda link
* @ebus: HD-audio extended bus
*/
-int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus)
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus)
{
struct hdac_ext_link *hlink = NULL;
int ret;
- list_for_each_entry(hlink, &ebus->hlink_list, list) {
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
ret = check_hdac_link_power_active(hlink, false);
if (ret < 0)
@@ -256,39 +253,48 @@ int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus)
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
-int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
struct hdac_ext_link *link)
{
int ret = 0;
- mutex_lock(&ebus->lock);
+ mutex_lock(&bus->lock);
/*
* if we move from 0 to 1, count will be 1 so power up this link
* as well, also check the dma status and trigger that
*/
if (++link->ref_count == 1) {
- if (!ebus->cmd_dma_state) {
- snd_hdac_bus_init_cmd_io(&ebus->bus);
- ebus->cmd_dma_state = true;
+ if (!bus->cmd_dma_state) {
+ snd_hdac_bus_init_cmd_io(bus);
+ bus->cmd_dma_state = true;
}
ret = snd_hdac_ext_bus_link_power_up(link);
+
+ /*
+ * wait for 521usec for codec to report status
+ * HDA spec section 4.3 - Codec Discovery
+ */
+ udelay(521);
+ bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
+ snd_hdac_chip_writew(bus, STATESTS, bus->codec_mask);
}
- mutex_unlock(&ebus->lock);
+ mutex_unlock(&bus->lock);
return ret;
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_get);
-int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_bus_link_put(struct hdac_bus *bus,
struct hdac_ext_link *link)
{
int ret = 0;
struct hdac_ext_link *hlink;
bool link_up = false;
- mutex_lock(&ebus->lock);
+ mutex_lock(&bus->lock);
/*
* if we move from 1 to 0, count will be 0
@@ -301,7 +307,7 @@ int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
* now check if all links are off, if so turn off
* cmd dma as well
*/
- list_for_each_entry(hlink, &ebus->hlink_list, list) {
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
if (hlink->ref_count) {
link_up = true;
break;
@@ -309,12 +315,12 @@ int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
}
if (!link_up) {
- snd_hdac_bus_stop_cmd_io(&ebus->bus);
- ebus->cmd_dma_state = false;
+ snd_hdac_bus_stop_cmd_io(bus);
+ bus->cmd_dma_state = false;
}
}
- mutex_unlock(&ebus->lock);
+ mutex_unlock(&bus->lock);
return ret;
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_put);
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index c96d7a7a36af..1bd27576db98 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -25,7 +25,7 @@
/**
* snd_hdac_ext_stream_init - initialize each stream (aka device)
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @stream: HD-audio ext core stream object to initialize
* @idx: stream index number
* @direction: stream direction (SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE)
@@ -34,18 +34,16 @@
* initialize the stream, if ppcap is enabled then init those and then
* invoke hdac stream initialization routine
*/
-void snd_hdac_ext_stream_init(struct hdac_ext_bus *ebus,
+void snd_hdac_ext_stream_init(struct hdac_bus *bus,
struct hdac_ext_stream *stream,
int idx, int direction, int tag)
{
- struct hdac_bus *bus = &ebus->bus;
-
if (bus->ppcap) {
stream->pphc_addr = bus->ppcap + AZX_PPHC_BASE +
AZX_PPHC_INTERVAL * idx;
stream->pplc_addr = bus->ppcap + AZX_PPLC_BASE +
- AZX_PPLC_MULTI * ebus->num_streams +
+ AZX_PPLC_MULTI * bus->num_streams +
AZX_PPLC_INTERVAL * idx;
}
@@ -71,12 +69,12 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init);
/**
* snd_hdac_ext_stream_init_all - create and initialize the stream objects
* for an extended hda bus
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @start_idx: start index for streams
* @num_stream: number of streams to initialize
* @dir: direction of streams
*/
-int snd_hdac_ext_stream_init_all(struct hdac_ext_bus *ebus, int start_idx,
+int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
int num_stream, int dir)
{
int stream_tag = 0;
@@ -88,7 +86,7 @@ int snd_hdac_ext_stream_init_all(struct hdac_ext_bus *ebus, int start_idx,
if (!stream)
return -ENOMEM;
tag = ++stream_tag;
- snd_hdac_ext_stream_init(ebus, stream, idx, dir, tag);
+ snd_hdac_ext_stream_init(bus, stream, idx, dir, tag);
idx++;
}
@@ -100,17 +98,16 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init_all);
/**
* snd_hdac_stream_free_all - free hdac extended stream objects
*
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
*/
-void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus)
+void snd_hdac_stream_free_all(struct hdac_bus *bus)
{
struct hdac_stream *s, *_s;
struct hdac_ext_stream *stream;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
stream = stream_to_hdac_ext_stream(s);
- snd_hdac_ext_stream_decouple(ebus, stream, false);
+ snd_hdac_ext_stream_decouple(bus, stream, false);
list_del(&s->list);
kfree(stream);
}
@@ -119,15 +116,14 @@ EXPORT_SYMBOL_GPL(snd_hdac_stream_free_all);
/**
* snd_hdac_ext_stream_decouple - decouple the hdac stream
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @stream: HD-audio ext core stream object to initialize
* @decouple: flag to decouple
*/
-void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *ebus,
+void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
struct hdac_ext_stream *stream, bool decouple)
{
struct hdac_stream *hstream = &stream->hstream;
- struct hdac_bus *bus = &ebus->bus;
u32 val;
int mask = AZX_PPCTL_PROCEN(hstream->index);
@@ -251,19 +247,18 @@ void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id);
static struct hdac_ext_stream *
-hdac_ext_link_stream_assign(struct hdac_ext_bus *ebus,
+hdac_ext_link_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *res = NULL;
struct hdac_stream *stream = NULL;
- struct hdac_bus *hbus = &ebus->bus;
- if (!hbus->ppcap) {
- dev_err(hbus->dev, "stream type not supported\n");
+ if (!bus->ppcap) {
+ dev_err(bus->dev, "stream type not supported\n");
return NULL;
}
- list_for_each_entry(stream, &hbus->stream_list, list) {
+ list_for_each_entry(stream, &bus->stream_list, list) {
struct hdac_ext_stream *hstream = container_of(stream,
struct hdac_ext_stream,
hstream);
@@ -277,34 +272,33 @@ hdac_ext_link_stream_assign(struct hdac_ext_bus *ebus,
}
if (!hstream->link_locked) {
- snd_hdac_ext_stream_decouple(ebus, hstream, true);
+ snd_hdac_ext_stream_decouple(bus, hstream, true);
res = hstream;
break;
}
}
if (res) {
- spin_lock_irq(&hbus->reg_lock);
+ spin_lock_irq(&bus->reg_lock);
res->link_locked = 1;
res->link_substream = substream;
- spin_unlock_irq(&hbus->reg_lock);
+ spin_unlock_irq(&bus->reg_lock);
}
return res;
}
static struct hdac_ext_stream *
-hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
+hdac_ext_host_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *res = NULL;
struct hdac_stream *stream = NULL;
- struct hdac_bus *hbus = &ebus->bus;
- if (!hbus->ppcap) {
- dev_err(hbus->dev, "stream type not supported\n");
+ if (!bus->ppcap) {
+ dev_err(bus->dev, "stream type not supported\n");
return NULL;
}
- list_for_each_entry(stream, &hbus->stream_list, list) {
+ list_for_each_entry(stream, &bus->stream_list, list) {
struct hdac_ext_stream *hstream = container_of(stream,
struct hdac_ext_stream,
hstream);
@@ -313,17 +307,17 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
if (!stream->opened) {
if (!hstream->decoupled)
- snd_hdac_ext_stream_decouple(ebus, hstream, true);
+ snd_hdac_ext_stream_decouple(bus, hstream, true);
res = hstream;
break;
}
}
if (res) {
- spin_lock_irq(&hbus->reg_lock);
+ spin_lock_irq(&bus->reg_lock);
res->hstream.opened = 1;
res->hstream.running = 0;
res->hstream.substream = substream;
- spin_unlock_irq(&hbus->reg_lock);
+ spin_unlock_irq(&bus->reg_lock);
}
return res;
@@ -331,7 +325,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
/**
* snd_hdac_ext_stream_assign - assign a stream for the PCM
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @substream: PCM substream to assign
* @type: type of stream (coupled, host or link stream)
*
@@ -346,27 +340,26 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
* the same stream object when it's used beforehand. when a stream is
* decoupled, it becomes a host stream and link stream.
*/
-struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_ext_bus *ebus,
+struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
int type)
{
struct hdac_ext_stream *hstream = NULL;
struct hdac_stream *stream = NULL;
- struct hdac_bus *hbus = &ebus->bus;
switch (type) {
case HDAC_EXT_STREAM_TYPE_COUPLED:
- stream = snd_hdac_stream_assign(hbus, substream);
+ stream = snd_hdac_stream_assign(bus, substream);
if (stream)
hstream = container_of(stream,
struct hdac_ext_stream, hstream);
return hstream;
case HDAC_EXT_STREAM_TYPE_HOST:
- return hdac_ext_host_stream_assign(ebus, substream);
+ return hdac_ext_host_stream_assign(bus, substream);
case HDAC_EXT_STREAM_TYPE_LINK:
- return hdac_ext_link_stream_assign(ebus, substream);
+ return hdac_ext_link_stream_assign(bus, substream);
default:
return NULL;
@@ -384,7 +377,6 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_assign);
void snd_hdac_ext_stream_release(struct hdac_ext_stream *stream, int type)
{
struct hdac_bus *bus = stream->hstream.bus;
- struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
switch (type) {
case HDAC_EXT_STREAM_TYPE_COUPLED:
@@ -393,13 +385,13 @@ void snd_hdac_ext_stream_release(struct hdac_ext_stream *stream, int type)
case HDAC_EXT_STREAM_TYPE_HOST:
if (stream->decoupled && !stream->link_locked)
- snd_hdac_ext_stream_decouple(ebus, stream, false);
+ snd_hdac_ext_stream_decouple(bus, stream, false);
snd_hdac_stream_release(&stream->hstream);
break;
case HDAC_EXT_STREAM_TYPE_LINK:
if (stream->decoupled && !stream->hstream.opened)
- snd_hdac_ext_stream_decouple(ebus, stream, false);
+ snd_hdac_ext_stream_decouple(bus, stream, false);
spin_lock_irq(&bus->reg_lock);
stream->link_locked = 0;
stream->link_substream = NULL;
@@ -415,16 +407,15 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_release);
/**
* snd_hdac_ext_stream_spbcap_enable - enable SPIB for a stream
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @enable: flag to enable/disable SPIB
* @index: stream index for which SPIB need to be enabled
*/
-void snd_hdac_ext_stream_spbcap_enable(struct hdac_ext_bus *ebus,
+void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
bool enable, int index)
{
u32 mask = 0;
u32 register_mask = 0;
- struct hdac_bus *bus = &ebus->bus;
if (!bus->spbcap) {
dev_err(bus->dev, "Address of SPB capability is NULL\n");
@@ -446,14 +437,13 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_spbcap_enable);
/**
* snd_hdac_ext_stream_set_spib - sets the spib value of a stream
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @stream: hdac_ext_stream
* @value: spib value to set
*/
-int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
struct hdac_ext_stream *stream, u32 value)
{
- struct hdac_bus *bus = &ebus->bus;
if (!bus->spbcap) {
dev_err(bus->dev, "Address of SPB capability is NULL\n");
@@ -468,15 +458,14 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_set_spib);
/**
* snd_hdac_ext_stream_get_spbmaxfifo - gets the spib value of a stream
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @stream: hdac_ext_stream
*
* Return maxfifo for the stream
*/
-int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
struct hdac_ext_stream *stream)
{
- struct hdac_bus *bus = &ebus->bus;
if (!bus->spbcap) {
dev_err(bus->dev, "Address of SPB capability is NULL\n");
@@ -490,11 +479,10 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_get_spbmaxfifo);
/**
* snd_hdac_ext_stop_streams - stop all stream if running
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
*/
-void snd_hdac_ext_stop_streams(struct hdac_ext_bus *ebus)
+void snd_hdac_ext_stop_streams(struct hdac_bus *bus)
{
- struct hdac_bus *bus = ebus_to_hbus(ebus);
struct hdac_stream *stream;
if (bus->chip_init) {
@@ -507,16 +495,15 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stop_streams);
/**
* snd_hdac_ext_stream_drsm_enable - enable DMA resume for a stream
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @enable: flag to enable/disable DRSM
* @index: stream index for which DRSM need to be enabled
*/
-void snd_hdac_ext_stream_drsm_enable(struct hdac_ext_bus *ebus,
+void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index)
{
u32 mask = 0;
u32 register_mask = 0;
- struct hdac_bus *bus = &ebus->bus;
if (!bus->drsmcap) {
dev_err(bus->dev, "Address of DRSM capability is NULL\n");
@@ -538,14 +525,13 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_drsm_enable);
/**
* snd_hdac_ext_stream_set_dpibr - sets the dpibr value of a stream
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @stream: hdac_ext_stream
* @value: dpib value to set
*/
-int snd_hdac_ext_stream_set_dpibr(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_set_dpibr(struct hdac_bus *bus,
struct hdac_ext_stream *stream, u32 value)
{
- struct hdac_bus *bus = &ebus->bus;
if (!bus->drsmcap) {
dev_err(bus->dev, "Address of DRSM capability is NULL\n");
@@ -560,7 +546,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_set_dpibr);
/**
* snd_hdac_ext_stream_set_lpib - sets the lpib value of a stream
- * @ebus: HD-audio ext core bus
+ * @bus: HD-audio core bus
* @stream: hdac_ext_stream
* @value: lpib value to set
*/
diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c
new file mode 100644
index 000000000000..6e46a9c73aed
--- /dev/null
+++ b/sound/hda/hdac_component.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+// hdac_component.c - routines for sync between HD-A core and DRM driver
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/component.h>
+#include <sound/core.h>
+#include <sound/hdaudio.h>
+#include <sound/hda_component.h>
+#include <sound/hda_register.h>
+
+static void hdac_acomp_release(struct device *dev, void *res)
+{
+}
+
+static struct drm_audio_component *hdac_get_acomp(struct device *dev)
+{
+ return devres_find(dev, hdac_acomp_release, NULL, NULL);
+}
+
+/**
+ * snd_hdac_set_codec_wakeup - Enable / disable HDMI/DP codec wakeup
+ * @bus: HDA core bus
+ * @enable: enable or disable the wakeup
+ *
+ * This function is supposed to be used only by a HD-audio controller
+ * driver that needs the interaction with graphics driver.
+ *
+ * This function should be called during the chip reset, also called at
+ * resume for updating STATESTS register read.
+ *
+ * Returns zero for success or a negative error code.
+ */
+int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+{
+ struct drm_audio_component *acomp = bus->audio_component;
+
+ if (!acomp || !acomp->ops)
+ return -ENODEV;
+
+ if (!acomp->ops->codec_wake_override)
+ return 0;
+
+ dev_dbg(bus->dev, "%s codec wakeup\n",
+ enable ? "enable" : "disable");
+
+ acomp->ops->codec_wake_override(acomp->dev, enable);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_set_codec_wakeup);
+
+/**
+ * snd_hdac_display_power - Power up / down the power refcount
+ * @bus: HDA core bus
+ * @enable: power up or down
+ *
+ * This function is supposed to be used only by a HD-audio controller
+ * driver that needs the interaction with graphics driver.
+ *
+ * This function manages a refcount and calls the get_power() and
+ * put_power() ops accordingly, toggling the codec wakeup, too.
+ *
+ * Returns zero for success or a negative error code.
+ */
+int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+{
+ struct drm_audio_component *acomp = bus->audio_component;
+
+ if (!acomp || !acomp->ops)
+ return -ENODEV;
+
+ dev_dbg(bus->dev, "display power %s\n",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ if (!bus->drm_power_refcount++) {
+ if (acomp->ops->get_power)
+ acomp->ops->get_power(acomp->dev);
+ snd_hdac_set_codec_wakeup(bus, true);
+ snd_hdac_set_codec_wakeup(bus, false);
+ }
+ } else {
+ WARN_ON(!bus->drm_power_refcount);
+ if (!--bus->drm_power_refcount)
+ if (acomp->ops->put_power)
+ acomp->ops->put_power(acomp->dev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_display_power);
+
+/**
+ * snd_hdac_sync_audio_rate - Set N/CTS based on the sample rate
+ * @codec: HDA codec
+ * @nid: the pin widget NID
+ * @dev_id: device identifier
+ * @rate: the sample rate to set
+ *
+ * This function is supposed to be used only by a HD-audio controller
+ * driver that needs the interaction with graphics driver.
+ *
+ * This function sets N/CTS value based on the given sample rate.
+ * Returns zero for success, or a negative error code.
+ */
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, int rate)
+{
+ struct hdac_bus *bus = codec->bus;
+ struct drm_audio_component *acomp = bus->audio_component;
+ int port, pipe;
+
+ if (!acomp || !acomp->ops || !acomp->ops->sync_audio_rate)
+ return -ENODEV;
+ port = nid;
+ if (acomp->audio_ops && acomp->audio_ops->pin2port) {
+ port = acomp->audio_ops->pin2port(codec, nid);
+ if (port < 0)
+ return -EINVAL;
+ }
+ pipe = dev_id;
+ return acomp->ops->sync_audio_rate(acomp->dev, port, pipe, rate);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
+
+/**
+ * snd_hdac_acomp_get_eld - Get the audio state and ELD via component
+ * @codec: HDA codec
+ * @nid: the pin widget NID
+ * @dev_id: device identifier
+ * @audio_enabled: the pointer to store the current audio state
+ * @buffer: the buffer pointer to store ELD bytes
+ * @max_bytes: the max bytes to be stored on @buffer
+ *
+ * This function is supposed to be used only by a HD-audio controller
+ * driver that needs the interaction with graphics driver.
+ *
+ * This function queries the current state of the audio on the given
+ * digital port and fetches the ELD bytes onto the given buffer.
+ * It returns the number of bytes for the total ELD data, zero for
+ * invalid ELD, or a negative error code.
+ *
+ * The return size is the total bytes required for the whole ELD bytes,
+ * thus it may be over @max_bytes. If it's over @max_bytes, it implies
+ * that only a part of ELD bytes have been fetched.
+ */
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
+ bool *audio_enabled, char *buffer, int max_bytes)
+{
+ struct hdac_bus *bus = codec->bus;
+ struct drm_audio_component *acomp = bus->audio_component;
+ int port, pipe;
+
+ if (!acomp || !acomp->ops || !acomp->ops->get_eld)
+ return -ENODEV;
+
+ port = nid;
+ if (acomp->audio_ops && acomp->audio_ops->pin2port) {
+ port = acomp->audio_ops->pin2port(codec, nid);
+ if (port < 0)
+ return -EINVAL;
+ }
+ pipe = dev_id;
+ return acomp->ops->get_eld(acomp->dev, port, pipe, audio_enabled,
+ buffer, max_bytes);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_acomp_get_eld);
+
+static int hdac_component_master_bind(struct device *dev)
+{
+ struct drm_audio_component *acomp = hdac_get_acomp(dev);
+ int ret;
+
+ if (WARN_ON(!acomp))
+ return -EINVAL;
+
+ ret = component_bind_all(dev, acomp);
+ if (ret < 0)
+ return ret;
+
+ if (WARN_ON(!(acomp->dev && acomp->ops))) {
+ ret = -EINVAL;
+ goto out_unbind;
+ }
+
+ /* pin the module to avoid dynamic unbinding, but only if given */
+ if (!try_module_get(acomp->ops->owner)) {
+ ret = -ENODEV;
+ goto out_unbind;
+ }
+
+ if (acomp->audio_ops && acomp->audio_ops->master_bind) {
+ ret = acomp->audio_ops->master_bind(dev, acomp);
+ if (ret < 0)
+ goto module_put;
+ }
+
+ return 0;
+
+ module_put:
+ module_put(acomp->ops->owner);
+out_unbind:
+ component_unbind_all(dev, acomp);
+
+ return ret;
+}
+
+static void hdac_component_master_unbind(struct device *dev)
+{
+ struct drm_audio_component *acomp = hdac_get_acomp(dev);
+
+ if (acomp->audio_ops && acomp->audio_ops->master_unbind)
+ acomp->audio_ops->master_unbind(dev, acomp);
+ module_put(acomp->ops->owner);
+ component_unbind_all(dev, acomp);
+ WARN_ON(acomp->ops || acomp->dev);
+}
+
+static const struct component_master_ops hdac_component_master_ops = {
+ .bind = hdac_component_master_bind,
+ .unbind = hdac_component_master_unbind,
+};
+
+/**
+ * snd_hdac_acomp_register_notifier - Register audio component ops
+ * @bus: HDA core bus
+ * @aops: audio component ops
+ *
+ * This function is supposed to be used only by a HD-audio controller
+ * driver that needs the interaction with graphics driver.
+ *
+ * This function sets the given ops to be called by the graphics driver.
+ *
+ * Returns zero for success or a negative error code.
+ */
+int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *aops)
+{
+ if (!bus->audio_component)
+ return -ENODEV;
+
+ bus->audio_component->audio_ops = aops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_acomp_register_notifier);
+
+/**
+ * snd_hdac_acomp_init - Initialize audio component
+ * @bus: HDA core bus
+ * @match_master: match function for finding components
+ * @extra_size: Extra bytes to allocate
+ *
+ * This function is supposed to be used only by a HD-audio controller
+ * driver that needs the interaction with graphics driver.
+ *
+ * This function initializes and sets up the audio component to communicate
+ * with graphics driver.
+ *
+ * Unlike snd_hdac_i915_init(), this function doesn't synchronize with the
+ * binding with the DRM component. Each caller needs to sync via master_bind
+ * audio_ops.
+ *
+ * Returns zero for success or a negative error code.
+ */
+int snd_hdac_acomp_init(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *aops,
+ int (*match_master)(struct device *, void *),
+ size_t extra_size)
+{
+ struct component_match *match = NULL;
+ struct device *dev = bus->dev;
+ struct drm_audio_component *acomp;
+ int ret;
+
+ if (WARN_ON(hdac_get_acomp(dev)))
+ return -EBUSY;
+
+ acomp = devres_alloc(hdac_acomp_release, sizeof(*acomp) + extra_size,
+ GFP_KERNEL);
+ if (!acomp)
+ return -ENOMEM;
+ acomp->audio_ops = aops;
+ bus->audio_component = acomp;
+ devres_add(dev, acomp);
+
+ component_match_add(dev, &match, match_master, bus);
+ ret = component_master_add_with_match(dev, &hdac_component_master_ops,
+ match);
+ if (ret < 0)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ bus->audio_component = NULL;
+ devres_destroy(dev, hdac_acomp_release, NULL, NULL);
+ dev_info(dev, "failed to add audio component master (%d)\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_acomp_init);
+
+/**
+ * snd_hdac_acomp_exit - Finalize audio component
+ * @bus: HDA core bus
+ *
+ * This function is supposed to be used only by a HD-audio controller
+ * driver that needs the interaction with graphics driver.
+ *
+ * This function releases the audio component that has been used.
+ *
+ * Returns zero for success or a negative error code.
+ */
+int snd_hdac_acomp_exit(struct hdac_bus *bus)
+{
+ struct device *dev = bus->dev;
+ struct drm_audio_component *acomp = bus->audio_component;
+
+ if (!acomp)
+ return 0;
+
+ WARN_ON(bus->drm_power_refcount);
+ if (bus->drm_power_refcount > 0 && acomp->ops)
+ acomp->ops->put_power(acomp->dev);
+
+ component_master_del(dev, &hdac_component_master_ops);
+
+ bus->audio_component = NULL;
+ devres_destroy(dev, hdac_acomp_release, NULL, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_acomp_exit);
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index 7ba100bb1c3f..dbf02a3a8d2f 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -738,7 +738,7 @@ static struct hda_rate_tbl rate_bits[] = {
*/
unsigned int snd_hdac_calc_stream_format(unsigned int rate,
unsigned int channels,
- unsigned int format,
+ snd_pcm_format_t format,
unsigned int maxbps,
unsigned short spdif_ctls)
{
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index cbe818eda336..b5282cbbe489 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -15,88 +15,12 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/component.h>
-#include <drm/i915_component.h>
#include <sound/core.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
#include <sound/hda_register.h>
-static struct i915_audio_component *hdac_acomp;
-
-/**
- * snd_hdac_set_codec_wakeup - Enable / disable HDMI/DP codec wakeup
- * @bus: HDA core bus
- * @enable: enable or disable the wakeup
- *
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
- *
- * This function should be called during the chip reset, also called at
- * resume for updating STATESTS register read.
- *
- * Returns zero for success or a negative error code.
- */
-int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
-{
- struct i915_audio_component *acomp = bus->audio_component;
-
- if (!acomp || !acomp->ops)
- return -ENODEV;
-
- if (!acomp->ops->codec_wake_override) {
- dev_warn(bus->dev,
- "Invalid codec wake callback\n");
- return 0;
- }
-
- dev_dbg(bus->dev, "%s codec wakeup\n",
- enable ? "enable" : "disable");
-
- acomp->ops->codec_wake_override(acomp->dev, enable);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hdac_set_codec_wakeup);
-
-/**
- * snd_hdac_display_power - Power up / down the power refcount
- * @bus: HDA core bus
- * @enable: power up or down
- *
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
- *
- * This function manages a refcount and calls the i915 get_power() and
- * put_power() ops accordingly, toggling the codec wakeup, too.
- *
- * Returns zero for success or a negative error code.
- */
-int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
-{
- struct i915_audio_component *acomp = bus->audio_component;
-
- if (!acomp || !acomp->ops)
- return -ENODEV;
-
- dev_dbg(bus->dev, "display power %s\n",
- enable ? "enable" : "disable");
-
- if (enable) {
- if (!bus->i915_power_refcount++) {
- acomp->ops->get_power(acomp->dev);
- snd_hdac_set_codec_wakeup(bus, true);
- snd_hdac_set_codec_wakeup(bus, false);
- }
- } else {
- WARN_ON(!bus->i915_power_refcount);
- if (!--bus->i915_power_refcount)
- acomp->ops->put_power(acomp->dev);
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hdac_display_power);
+static struct completion bind_complete;
#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
((pci)->device == 0x0c0c) || \
@@ -119,7 +43,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_display_power);
*/
void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
- struct i915_audio_component *acomp = bus->audio_component;
+ struct drm_audio_component *acomp = bus->audio_component;
struct pci_dev *pci = to_pci_dev(bus->dev);
int cdclk_freq;
unsigned int bclk_m, bclk_n;
@@ -158,181 +82,11 @@ void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
}
EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
-/* There is a fixed mapping between audio pin node and display port.
- * on SNB, IVY, HSW, BSW, SKL, BXT, KBL:
- * Pin Widget 5 - PORT B (port = 1 in i915 driver)
- * Pin Widget 6 - PORT C (port = 2 in i915 driver)
- * Pin Widget 7 - PORT D (port = 3 in i915 driver)
- *
- * on VLV, ILK:
- * Pin Widget 4 - PORT B (port = 1 in i915 driver)
- * Pin Widget 5 - PORT C (port = 2 in i915 driver)
- * Pin Widget 6 - PORT D (port = 3 in i915 driver)
- */
-static int pin2port(struct hdac_device *codec, hda_nid_t pin_nid)
-{
- int base_nid;
-
- switch (codec->vendor_id) {
- case 0x80860054: /* ILK */
- case 0x80862804: /* ILK */
- case 0x80862882: /* VLV */
- base_nid = 3;
- break;
- default:
- base_nid = 4;
- break;
- }
-
- if (WARN_ON(pin_nid <= base_nid || pin_nid > base_nid + 3))
- return -1;
- return pin_nid - base_nid;
-}
-
-/**
- * snd_hdac_sync_audio_rate - Set N/CTS based on the sample rate
- * @codec: HDA codec
- * @nid: the pin widget NID
- * @dev_id: device identifier
- * @rate: the sample rate to set
- *
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
- *
- * This function sets N/CTS value based on the given sample rate.
- * Returns zero for success, or a negative error code.
- */
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
- int dev_id, int rate)
-{
- struct hdac_bus *bus = codec->bus;
- struct i915_audio_component *acomp = bus->audio_component;
- int port, pipe;
-
- if (!acomp || !acomp->ops || !acomp->ops->sync_audio_rate)
- return -ENODEV;
- port = pin2port(codec, nid);
- if (port < 0)
- return -EINVAL;
- pipe = dev_id;
- return acomp->ops->sync_audio_rate(acomp->dev, port, pipe, rate);
-}
-EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
-
-/**
- * snd_hdac_acomp_get_eld - Get the audio state and ELD via component
- * @codec: HDA codec
- * @nid: the pin widget NID
- * @dev_id: device identifier
- * @audio_enabled: the pointer to store the current audio state
- * @buffer: the buffer pointer to store ELD bytes
- * @max_bytes: the max bytes to be stored on @buffer
- *
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
- *
- * This function queries the current state of the audio on the given
- * digital port and fetches the ELD bytes onto the given buffer.
- * It returns the number of bytes for the total ELD data, zero for
- * invalid ELD, or a negative error code.
- *
- * The return size is the total bytes required for the whole ELD bytes,
- * thus it may be over @max_bytes. If it's over @max_bytes, it implies
- * that only a part of ELD bytes have been fetched.
- */
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
- bool *audio_enabled, char *buffer, int max_bytes)
-{
- struct hdac_bus *bus = codec->bus;
- struct i915_audio_component *acomp = bus->audio_component;
- int port, pipe;
-
- if (!acomp || !acomp->ops || !acomp->ops->get_eld)
- return -ENODEV;
-
- port = pin2port(codec, nid);
- if (port < 0)
- return -EINVAL;
-
- pipe = dev_id;
- return acomp->ops->get_eld(acomp->dev, port, pipe, audio_enabled,
- buffer, max_bytes);
-}
-EXPORT_SYMBOL_GPL(snd_hdac_acomp_get_eld);
-
-static int hdac_component_master_bind(struct device *dev)
-{
- struct i915_audio_component *acomp = hdac_acomp;
- int ret;
-
- ret = component_bind_all(dev, acomp);
- if (ret < 0)
- return ret;
-
- if (WARN_ON(!(acomp->dev && acomp->ops && acomp->ops->get_power &&
- acomp->ops->put_power && acomp->ops->get_cdclk_freq))) {
- ret = -EINVAL;
- goto out_unbind;
- }
-
- /*
- * Atm, we don't support dynamic unbinding initiated by the child
- * component, so pin its containing module until we unbind.
- */
- if (!try_module_get(acomp->ops->owner)) {
- ret = -ENODEV;
- goto out_unbind;
- }
-
- return 0;
-
-out_unbind:
- component_unbind_all(dev, acomp);
-
- return ret;
-}
-
-static void hdac_component_master_unbind(struct device *dev)
-{
- struct i915_audio_component *acomp = hdac_acomp;
-
- module_put(acomp->ops->owner);
- component_unbind_all(dev, acomp);
- WARN_ON(acomp->ops || acomp->dev);
-}
-
-static const struct component_master_ops hdac_component_master_ops = {
- .bind = hdac_component_master_bind,
- .unbind = hdac_component_master_unbind,
-};
-
-static int hdac_component_master_match(struct device *dev, void *data)
+static int i915_component_master_match(struct device *dev, void *data)
{
- /* i915 is the only supported component */
return !strcmp(dev->driver->name, "i915");
}
-/**
- * snd_hdac_i915_register_notifier - Register i915 audio component ops
- * @aops: i915 audio component ops
- *
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
- *
- * This function sets the given ops to be called by the i915 graphics driver.
- *
- * Returns zero for success or a negative error code.
- */
-int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
-{
- if (!hdac_acomp)
- return -ENODEV;
-
- hdac_acomp->audio_ops = aops;
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier);
-
/* check whether intel graphics is present */
static bool i915_gfx_present(void)
{
@@ -345,6 +99,19 @@ static bool i915_gfx_present(void)
return pci_dev_present(ids);
}
+static int i915_master_bind(struct device *dev,
+ struct drm_audio_component *acomp)
+{
+ complete_all(&bind_complete);
+ /* clear audio_ops here as it was needed only for completion call */
+ acomp->audio_ops = NULL;
+ return 0;
+}
+
+static const struct drm_audio_component_audio_ops i915_init_ops = {
+ .master_bind = i915_master_bind
+};
+
/**
* snd_hdac_i915_init - Initialize i915 audio component
* @bus: HDA core bus
@@ -359,83 +126,31 @@ static bool i915_gfx_present(void)
*/
int snd_hdac_i915_init(struct hdac_bus *bus)
{
- struct component_match *match = NULL;
- struct device *dev = bus->dev;
- struct i915_audio_component *acomp;
- int ret;
-
- if (WARN_ON(hdac_acomp))
- return -EBUSY;
+ struct drm_audio_component *acomp;
+ int err;
if (!i915_gfx_present())
return -ENODEV;
- acomp = kzalloc(sizeof(*acomp), GFP_KERNEL);
- if (!acomp)
- return -ENOMEM;
- bus->audio_component = acomp;
- hdac_acomp = acomp;
-
- component_match_add(dev, &match, hdac_component_master_match, bus);
- ret = component_master_add_with_match(dev, &hdac_component_master_ops,
- match);
- if (ret < 0)
- goto out_err;
-
- /*
- * Atm, we don't support deferring the component binding, so make sure
- * i915 is loaded and that the binding successfully completes.
- */
- request_module("i915");
+ init_completion(&bind_complete);
+ err = snd_hdac_acomp_init(bus, &i915_init_ops,
+ i915_component_master_match,
+ sizeof(struct i915_audio_component) - sizeof(*acomp));
+ if (err < 0)
+ return err;
+ acomp = bus->audio_component;
+ if (!acomp)
+ return -ENODEV;
if (!acomp->ops) {
- ret = -ENODEV;
- goto out_master_del;
+ request_module("i915");
+ /* 10s timeout */
+ wait_for_completion_timeout(&bind_complete, 10 * 1000);
+ }
+ if (!acomp->ops) {
+ snd_hdac_acomp_exit(bus);
+ return -ENODEV;
}
- dev_dbg(dev, "bound to i915 component master\n");
-
return 0;
-out_master_del:
- component_master_del(dev, &hdac_component_master_ops);
-out_err:
- kfree(acomp);
- bus->audio_component = NULL;
- hdac_acomp = NULL;
- dev_info(dev, "failed to add i915 component master (%d)\n", ret);
-
- return ret;
}
EXPORT_SYMBOL_GPL(snd_hdac_i915_init);
-
-/**
- * snd_hdac_i915_exit - Finalize i915 audio component
- * @bus: HDA core bus
- *
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
- *
- * This function releases the i915 audio component that has been used.
- *
- * Returns zero for success or a negative error code.
- */
-int snd_hdac_i915_exit(struct hdac_bus *bus)
-{
- struct device *dev = bus->dev;
- struct i915_audio_component *acomp = bus->audio_component;
-
- if (!acomp)
- return 0;
-
- WARN_ON(bus->i915_power_refcount);
- if (bus->i915_power_refcount > 0 && acomp->ops)
- acomp->ops->put_power(acomp->dev);
-
- component_master_del(dev, &hdac_component_master_ops);
-
- kfree(acomp);
- bus->audio_component = NULL;
- hdac_acomp = NULL;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hdac_i915_exit);
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index e1472c7ab6c1..eee422390d8e 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -621,7 +621,7 @@ int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
unsigned int byte_size, struct snd_dma_buffer *bufp)
{
struct hdac_bus *bus = azx_dev->bus;
- u32 *bdl;
+ __le32 *bdl;
int err;
snd_hdac_dsp_lock(azx_dev);
@@ -651,7 +651,7 @@ int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
snd_hdac_stream_writel(azx_dev, SD_BDLPU, 0);
azx_dev->frags = 0;
- bdl = (u32 *)azx_dev->bdl.area;
+ bdl = (__le32 *)azx_dev->bdl.area;
err = setup_bdle(bus, bufp, azx_dev, &bdl, 0, byte_size, 0);
if (err < 0)
goto error;
diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
index 7e21621e492a..2647309bc675 100644
--- a/sound/i2c/cs8427.c
+++ b/sound/i2c/cs8427.c
@@ -621,15 +621,3 @@ int snd_cs8427_iec958_pcm(struct snd_i2c_device *cs8427, unsigned int rate)
}
EXPORT_SYMBOL(snd_cs8427_iec958_pcm);
-
-static int __init alsa_cs8427_module_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_cs8427_module_exit(void)
-{
-}
-
-module_init(alsa_cs8427_module_init)
-module_exit(alsa_cs8427_module_exit)
diff --git a/sound/i2c/i2c.c b/sound/i2c/i2c.c
index ef2a9afe9e19..c4a232f18a79 100644
--- a/sound/i2c/i2c.c
+++ b/sound/i2c/i2c.c
@@ -338,16 +338,3 @@ static int snd_i2c_bit_probeaddr(struct snd_i2c_bus *bus, unsigned short addr)
snd_i2c_bit_stop(bus);
return err;
}
-
-
-static int __init alsa_i2c_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_i2c_exit(void)
-{
-}
-
-module_init(alsa_i2c_init)
-module_exit(alsa_i2c_exit)
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
index bf377dc192aa..7f2761a2e7c8 100644
--- a/sound/i2c/other/ak4xxx-adda.c
+++ b/sound/i2c/other/ak4xxx-adda.c
@@ -911,15 +911,3 @@ int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
return 0;
}
EXPORT_SYMBOL(snd_akm4xxx_build_controls);
-
-static int __init alsa_akm4xxx_module_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_akm4xxx_module_exit(void)
-{
-}
-
-module_init(alsa_akm4xxx_module_init)
-module_exit(alsa_akm4xxx_module_exit)
diff --git a/sound/i2c/tea6330t.c b/sound/i2c/tea6330t.c
index 2d22310dce05..239c4822427f 100644
--- a/sound/i2c/tea6330t.c
+++ b/sound/i2c/tea6330t.c
@@ -368,19 +368,3 @@ int snd_tea6330t_update_mixer(struct snd_card *card,
EXPORT_SYMBOL(snd_tea6330t_detect);
EXPORT_SYMBOL(snd_tea6330t_update_mixer);
-
-/*
- * INIT part
- */
-
-static int __init alsa_tea6330t_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_tea6330t_exit(void)
-{
-}
-
-module_init(alsa_tea6330t_init)
-module_exit(alsa_tea6330t_exit)
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index 43b35a873d78..d7db1eeebc84 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -459,7 +459,7 @@ config SND_MSND_CLASSIC
Say M here if you have a Turtle Beach MultiSound Classic, Tahiti or
Monterey (not for the Pinnacle or Fiji).
- See <file:Documentation/sound/oss/MultiSound> for important information
+ See <file:Documentation/sound/cards/multisound.sh> for important information
about this driver. Note that it has been discontinued, but the
Voyetra Turtle Beach knowledge base entry for it is still available
at <http://www.turtlebeach.com/site/kb_ftp/790.asp>.
diff --git a/sound/isa/ad1816a/ad1816a_lib.c b/sound/isa/ad1816a/ad1816a_lib.c
index 923201414469..fba6d22f7f4b 100644
--- a/sound/isa/ad1816a/ad1816a_lib.c
+++ b/sound/isa/ad1816a/ad1816a_lib.c
@@ -85,7 +85,8 @@ static void snd_ad1816a_write_mask(struct snd_ad1816a *chip, unsigned char reg,
static unsigned char snd_ad1816a_get_format(struct snd_ad1816a *chip,
- unsigned int format, int channels)
+ snd_pcm_format_t format,
+ int channels)
{
unsigned char retval = AD1816A_FMT_LINEAR_8;
diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c
index a826c138e7f5..3dfe7e592c25 100644
--- a/sound/isa/es1688/es1688.c
+++ b/sound/isa/es1688/es1688.c
@@ -260,7 +260,6 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard,
struct snd_card *card;
static unsigned int dev;
int error;
- struct snd_es1688 *chip;
if (snd_es968_pnp_is_probed)
return -EBUSY;
@@ -276,7 +275,6 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard,
sizeof(struct snd_es1688), &card);
if (error < 0)
return error;
- chip = card->private_data;
error = snd_card_es968_pnp(card, dev, pcard, pid);
if (error < 0) {
diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c
index f9c0662e9a22..50cdce0e8946 100644
--- a/sound/isa/es1688/es1688_lib.c
+++ b/sound/isa/es1688/es1688_lib.c
@@ -1029,19 +1029,3 @@ EXPORT_SYMBOL(snd_es1688_mixer_write);
EXPORT_SYMBOL(snd_es1688_create);
EXPORT_SYMBOL(snd_es1688_pcm);
EXPORT_SYMBOL(snd_es1688_mixer);
-
-/*
- * INIT part
- */
-
-static int __init alsa_es1688_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_es1688_exit(void)
-{
-}
-
-module_init(alsa_es1688_init)
-module_exit(alsa_es1688_exit)
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index 2a6960c3e2a4..0d103d6f805e 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -1024,6 +1024,7 @@ static int snd_es18xx_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
val = 3;
} else
retVal = snd_es18xx_mixer_bits(chip, 0x7a, 0x08, 0x00) != 0x00;
+ /* fall through */
/* 4 source chips */
case 0x1868:
case 0x1878:
diff --git a/sound/isa/galaxy/galaxy.c b/sound/isa/galaxy/galaxy.c
index b9994cc9f5fb..af9eea41379f 100644
--- a/sound/isa/galaxy/galaxy.c
+++ b/sound/isa/galaxy/galaxy.c
@@ -260,6 +260,7 @@ static int snd_galaxy_match(struct device *dev, unsigned int n)
break;
case 2:
irq[n] = 9;
+ /* Fall through */
case 9:
wss_config[n] |= WSS_CONFIG_IRQ_9;
break;
@@ -304,6 +305,7 @@ static int snd_galaxy_match(struct device *dev, unsigned int n)
case 1:
if (dma1[n] == 0)
break;
+ /* Fall through */
default:
dev_err(dev, "invalid capture DMA %d\n", dma2[n]);
return 0;
@@ -333,6 +335,7 @@ mpu:
break;
case 2:
mpu_irq[n] = 9;
+ /* Fall through */
case 9:
config[n] |= GALAXY_CONFIG_MPUIRQ_2;
break;
diff --git a/sound/isa/gus/gus_io.c b/sound/isa/gus/gus_io.c
index ca79878d8d8c..2fd32ef22c30 100644
--- a/sound/isa/gus/gus_io.c
+++ b/sound/isa/gus/gus_io.c
@@ -461,7 +461,7 @@ void snd_gf1_print_voice_registers(struct snd_gus_card * gus)
printk(KERN_INFO " -%i- GFA1 effect address = 0x%x\n", voice, snd_gf1_i_read_addr(gus, 0x11, ctrl & 4));
printk(KERN_INFO " -%i- GFA1 effect volume = 0x%x\n", voice, snd_gf1_i_read16(gus, 0x16));
printk(KERN_INFO " -%i- GFA1 effect volume final = 0x%x\n", voice, snd_gf1_i_read16(gus, 0x1d));
- printk(KERN_INFO " -%i- GFA1 effect acumulator = 0x%x\n", voice, snd_gf1_i_read8(gus, 0x14));
+ printk(KERN_INFO " -%i- GFA1 effect accumulator = 0x%x\n", voice, snd_gf1_i_read8(gus, 0x14));
}
if (mode & 0x20) {
printk(KERN_INFO " -%i- GFA1 left offset = 0x%x (%i)\n", voice, snd_gf1_i_read16(gus, 0x13), snd_gf1_i_read16(gus, 0x13) >> 4);
diff --git a/sound/isa/gus/gus_main.c b/sound/isa/gus/gus_main.c
index 3cf9b13c780a..3b8a0c880db5 100644
--- a/sound/isa/gus/gus_main.c
+++ b/sound/isa/gus/gus_main.c
@@ -465,19 +465,3 @@ EXPORT_SYMBOL(snd_gf1_mem_alloc);
EXPORT_SYMBOL(snd_gf1_mem_xfree);
EXPORT_SYMBOL(snd_gf1_mem_free);
EXPORT_SYMBOL(snd_gf1_mem_lock);
-
-/*
- * INIT part
- */
-
-static int __init alsa_gus_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_gus_exit(void)
-{
-}
-
-module_init(alsa_gus_init)
-module_exit(alsa_gus_exit)
diff --git a/sound/isa/gus/gus_reset.c b/sound/isa/gus/gus_reset.c
index 3d1fed0c2620..59b3f683d49b 100644
--- a/sound/isa/gus/gus_reset.c
+++ b/sound/isa/gus/gus_reset.c
@@ -292,7 +292,6 @@ void snd_gf1_free_voice(struct snd_gus_card * gus, struct snd_gus_voice *voice)
{
unsigned long flags;
void (*private_free)(struct snd_gus_voice *voice);
- void *private_data;
if (voice == NULL || !voice->use)
return;
@@ -300,7 +299,6 @@ void snd_gf1_free_voice(struct snd_gus_card * gus, struct snd_gus_voice *voice)
snd_gf1_clear_voices(gus, voice->number, voice->number);
spin_lock_irqsave(&gus->voice_alloc, flags);
private_free = voice->private_free;
- private_data = voice->private_data;
voice->private_free = NULL;
voice->private_data = NULL;
if (voice->pcm)
diff --git a/sound/isa/msnd/msnd.c b/sound/isa/msnd/msnd.c
index 569897f64fda..7c3203fe4869 100644
--- a/sound/isa/msnd/msnd.c
+++ b/sound/isa/msnd/msnd.c
@@ -54,7 +54,7 @@
#define LOGNAME "msnd"
-void snd_msnd_init_queue(void *base, int start, int size)
+void snd_msnd_init_queue(void __iomem *base, int start, int size)
{
writew(PCTODSP_BASED(start), base + JQS_wStart);
writew(PCTODSP_OFFSET(size) - 1, base + JQS_wSize);
@@ -270,7 +270,7 @@ int snd_msnd_DARQ(struct snd_msnd *chip, int bank)
udelay(1);
if (chip->capturePeriods == 2) {
- void *pDAQ = chip->mappedbase + DARQ_DATA_BUFF +
+ void __iomem *pDAQ = chip->mappedbase + DARQ_DATA_BUFF +
bank * DAQDS__size + DAQDS_wStart;
unsigned short offset = 0x3000 + chip->capturePeriodBytes;
@@ -309,7 +309,7 @@ int snd_msnd_DAPQ(struct snd_msnd *chip, int start)
{
u16 DAPQ_tail;
int protect = start, nbanks = 0;
- void *DAQD;
+ void __iomem *DAQD;
static int play_banks_submitted;
/* unsigned long flags;
spin_lock_irqsave(&chip->lock, flags); not necessary */
@@ -370,7 +370,7 @@ static void snd_msnd_play_reset_queue(struct snd_msnd *chip,
unsigned int pcm_count)
{
int n;
- void *pDAQ = chip->mappedbase + DAPQ_DATA_BUFF;
+ void __iomem *pDAQ = chip->mappedbase + DAPQ_DATA_BUFF;
chip->last_playbank = -1;
chip->playLimit = pcm_count * (pcm_periods - 1);
@@ -398,7 +398,7 @@ static void snd_msnd_capture_reset_queue(struct snd_msnd *chip,
unsigned int pcm_count)
{
int n;
- void *pDAQ;
+ void __iomem *pDAQ;
/* unsigned long flags; */
/* snd_msnd_init_queue(chip->DARQ, DARQ_DATA_BUFF, DARQ_BUFF_SIZE); */
@@ -485,7 +485,7 @@ static int snd_msnd_playback_open(struct snd_pcm_substream *substream)
clear_bit(F_WRITING, &chip->flags);
snd_msnd_enable_irq(chip);
- runtime->dma_area = chip->mappedbase;
+ runtime->dma_area = (__force void *)chip->mappedbase;
runtime->dma_bytes = 0x3000;
chip->playback_substream = substream;
@@ -508,7 +508,7 @@ static int snd_msnd_playback_hw_params(struct snd_pcm_substream *substream,
{
int i;
struct snd_msnd *chip = snd_pcm_substream_chip(substream);
- void *pDAQ = chip->mappedbase + DAPQ_DATA_BUFF;
+ void __iomem *pDAQ = chip->mappedbase + DAPQ_DATA_BUFF;
chip->play_sample_size = snd_pcm_format_width(params_format(params));
chip->play_channels = params_channels(params);
@@ -589,7 +589,7 @@ static int snd_msnd_capture_open(struct snd_pcm_substream *substream)
set_bit(F_AUDIO_READ_INUSE, &chip->flags);
snd_msnd_enable_irq(chip);
- runtime->dma_area = chip->mappedbase + 0x3000;
+ runtime->dma_area = (__force void *)chip->mappedbase + 0x3000;
runtime->dma_bytes = 0x3000;
memset(runtime->dma_area, 0, runtime->dma_bytes);
chip->capture_substream = substream;
@@ -654,7 +654,7 @@ static int snd_msnd_capture_hw_params(struct snd_pcm_substream *substream,
{
int i;
struct snd_msnd *chip = snd_pcm_substream_chip(substream);
- void *pDAQ = chip->mappedbase + DARQ_DATA_BUFF;
+ void __iomem *pDAQ = chip->mappedbase + DARQ_DATA_BUFF;
chip->capture_sample_size = snd_pcm_format_width(params_format(params));
chip->capture_channels = params_channels(params);
diff --git a/sound/isa/msnd/msnd.h b/sound/isa/msnd/msnd.h
index 5f3c7dcd9f9d..80c718757eef 100644
--- a/sound/isa/msnd/msnd.h
+++ b/sound/isa/msnd/msnd.h
@@ -283,7 +283,7 @@ struct snd_msnd {
};
-void snd_msnd_init_queue(void *base, int start, int size);
+void snd_msnd_init_queue(void __iomem *base, int start, int size);
int snd_msnd_send_dsp_cmd(struct snd_msnd *chip, u8 cmd);
int snd_msnd_send_word(struct snd_msnd *chip,
diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
index 013d8d1170fe..42876b0cb68b 100644
--- a/sound/isa/msnd/msnd_midi.c
+++ b/sound/isa/msnd/msnd_midi.c
@@ -119,7 +119,7 @@ void snd_msndmidi_input_read(void *mpuv)
{
unsigned long flags;
struct snd_msndmidi *mpu = mpuv;
- void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
+ void __iomem *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
u16 head, tail, size;
spin_lock_irqsave(&mpu->input_lock, flags);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 6c584d9b6c42..11af9c40bc05 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -82,10 +82,10 @@
static void set_default_audio_parameters(struct snd_msnd *chip)
{
- chip->play_sample_size = DEFSAMPLESIZE;
+ chip->play_sample_size = snd_pcm_format_width(DEFSAMPLESIZE);
chip->play_sample_rate = DEFSAMPLERATE;
chip->play_channels = DEFCHANNELS;
- chip->capture_sample_size = DEFSAMPLESIZE;
+ chip->capture_sample_size = snd_pcm_format_width(DEFSAMPLESIZE);
chip->capture_sample_rate = DEFSAMPLERATE;
chip->capture_channels = DEFCHANNELS;
}
@@ -169,7 +169,7 @@ static void snd_msnd_eval_dsp_msg(struct snd_msnd *chip, u16 wMessage)
static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id)
{
struct snd_msnd *chip = dev_id;
- void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
+ void __iomem *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
u16 head, tail, size;
/* Send ack to DSP */
@@ -810,7 +810,7 @@ module_param(calibrate_signal, int, 0444);
#ifndef MSND_CLASSIC
module_param_array(digital, int, NULL, 0444);
module_param_hw_array(cfg, long, ioport, NULL, 0444);
-module_param_array(reset, int, 0, 0444);
+module_param_array(reset, int, NULL, 0444);
module_param_hw_array(mpu_io, long, ioport, NULL, 0444);
module_param_hw_array(mpu_irq, int, irq, NULL, 0444);
module_param_hw_array(ide_io0, long, ioport, NULL, 0444);
diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
index 8894c7c18ad6..c6136c6b0214 100644
--- a/sound/isa/opti9xx/miro.c
+++ b/sound/isa/opti9xx/miro.c
@@ -176,10 +176,13 @@ static int aci_busy_wait(struct snd_miro_aci *aci)
switch (timeout-ACI_MINTIME) {
case 0 ... 9:
out /= 10;
+ /* fall through */
case 10 ... 19:
out /= 10;
+ /* fall through */
case 20 ... 30:
out /= 10;
+ /* fall through */
default:
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(out);
@@ -834,6 +837,7 @@ static unsigned char snd_miro_read(struct snd_miro *chip,
retval = inb(chip->mc_base + 9);
break;
}
+ /* fall through */
case OPTi9XX_HW_82C929:
retval = inb(chip->mc_base + reg);
@@ -863,6 +867,7 @@ static void snd_miro_write(struct snd_miro *chip, unsigned char reg,
outb(value, chip->mc_base + 9);
break;
}
+ /* fall through */
case OPTi9XX_HW_82C929:
outb(value, chip->mc_base + reg);
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index 505cd81e19fa..ac0ab6eb40f0 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -261,6 +261,7 @@ static unsigned char snd_opti9xx_read(struct snd_opti9xx *chip,
retval = inb(chip->mc_base + 9);
break;
}
+ /* Fall through */
case OPTi9XX_HW_82C928:
case OPTi9XX_HW_82C929:
@@ -303,6 +304,7 @@ static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg,
outb(value, chip->mc_base + 9);
break;
}
+ /* Fall through */
case OPTi9XX_HW_82C928:
case OPTi9XX_HW_82C929:
@@ -350,6 +352,7 @@ static int snd_opti9xx_configure(struct snd_opti9xx *chip,
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(4), 0xf0, 0xfc);
/* enable wave audio */
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(6), 0x02, 0x02);
+ /* Fall through */
case OPTi9XX_HW_82C925:
/* enable WSS mode */
diff --git a/sound/isa/sb/emu8000_patch.c b/sound/isa/sb/emu8000_patch.c
index c2e41d2762f7..d45a6b9d6437 100644
--- a/sound/isa/sb/emu8000_patch.c
+++ b/sound/isa/sb/emu8000_patch.c
@@ -165,11 +165,8 @@ snd_emu8000_sample_new(struct snd_emux *rec, struct snd_sf_sample *sp,
return 0;
/* be sure loop points start < end */
- if (sp->v.loopstart > sp->v.loopend) {
- int tmp = sp->v.loopstart;
- sp->v.loopstart = sp->v.loopend;
- sp->v.loopend = tmp;
- }
+ if (sp->v.loopstart > sp->v.loopend)
+ swap(sp->v.loopstart, sp->v.loopend);
/* compute true data size to be loaded */
truesize = sp->v.size;
diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c
index bc5af71d3bdb..f46f6ec3ea0c 100644
--- a/sound/isa/sb/emu8000_pcm.c
+++ b/sound/isa/sb/emu8000_pcm.c
@@ -470,7 +470,7 @@ static int emu8k_pcm_copy(struct snd_pcm_substream *subs,
/* convert to word unit */
pos = (pos << 1) + rec->loop_start[voice];
count <<= 1;
- LOOP_WRITE(rec, pos, src, count, COPY_UESR);
+ LOOP_WRITE(rec, pos, src, count, COPY_USER);
return 0;
}
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index fa5780bb0c68..bf3db0d2ea12 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -60,18 +60,18 @@ MODULE_FIRMWARE("sb16/ima_adpcm_capture.csp");
* RIFF data format
*/
struct riff_header {
- __u32 name;
- __u32 len;
+ __le32 name;
+ __le32 len;
};
struct desc_header {
struct riff_header info;
- __u16 func_nr;
- __u16 VOC_type;
- __u16 flags_play_rec;
- __u16 flags_16bit_8bit;
- __u16 flags_stereo_mono;
- __u16 flags_rates;
+ __le16 func_nr;
+ __le16 VOC_type;
+ __le16 flags_play_rec;
+ __le16 flags_16bit_8bit;
+ __le16 flags_stereo_mono;
+ __le16 flags_rates;
};
/*
@@ -93,7 +93,7 @@ static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
struct snd_sb_csp_microcode __user * code);
static int snd_sb_csp_unload(struct snd_sb_csp * p);
static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags);
-static int snd_sb_csp_autoload(struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode);
+static int snd_sb_csp_autoload(struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode);
static int snd_sb_csp_check_version(struct snd_sb_csp * p);
static int snd_sb_csp_use(struct snd_sb_csp * p);
@@ -314,7 +314,7 @@ static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
unsigned short func_nr = 0;
struct riff_header file_h, item_h, code_h;
- __u32 item_type;
+ __le32 item_type;
struct desc_header funcdesc_h;
unsigned long flags;
@@ -326,7 +326,7 @@ static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
if (copy_from_user(&file_h, data_ptr, sizeof(file_h)))
return -EFAULT;
- if ((file_h.name != RIFF_HEADER) ||
+ if ((le32_to_cpu(file_h.name) != RIFF_HEADER) ||
(le32_to_cpu(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) {
snd_printd("%s: Invalid RIFF header\n", __func__);
return -EINVAL;
@@ -336,7 +336,7 @@ static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
if (copy_from_user(&item_type, data_ptr, sizeof(item_type)))
return -EFAULT;
- if (item_type != CSP__HEADER) {
+ if (le32_to_cpu(item_type) != CSP__HEADER) {
snd_printd("%s: Invalid RIFF file type\n", __func__);
return -EINVAL;
}
@@ -346,12 +346,12 @@ static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
if (copy_from_user(&item_h, data_ptr, sizeof(item_h)))
return -EFAULT;
data_ptr += sizeof(item_h);
- if (item_h.name != LIST_HEADER)
+ if (le32_to_cpu(item_h.name) != LIST_HEADER)
continue;
if (copy_from_user(&item_type, data_ptr, sizeof(item_type)))
return -EFAULT;
- switch (item_type) {
+ switch (le32_to_cpu(item_type)) {
case FUNC_HEADER:
if (copy_from_user(&funcdesc_h, data_ptr + sizeof(item_type), sizeof(funcdesc_h)))
return -EFAULT;
@@ -378,7 +378,7 @@ static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
return -EFAULT;
/* init microcode blocks */
- if (code_h.name != INIT_HEADER)
+ if (le32_to_cpu(code_h.name) != INIT_HEADER)
break;
data_ptr += sizeof(code_h);
err = snd_sb_csp_load_user(p, data_ptr, le32_to_cpu(code_h.len),
@@ -391,7 +391,7 @@ static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
if (copy_from_user(&code_h, data_ptr, sizeof(code_h)))
return -EFAULT;
- if (code_h.name != MAIN_HEADER) {
+ if (le32_to_cpu(code_h.name) != MAIN_HEADER) {
snd_printd("%s: Missing 'main' microcode\n", __func__);
return -EINVAL;
}
@@ -726,7 +726,7 @@ static int snd_sb_csp_firmware_load(struct snd_sb_csp *p, int index, int flags)
* autoload hardware codec if necessary
* return 0 if CSP is loaded and ready to run (p->running != 0)
*/
-static int snd_sb_csp_autoload(struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode)
+static int snd_sb_csp_autoload(struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode)
{
unsigned long flags;
int err = 0;
@@ -736,7 +736,7 @@ static int snd_sb_csp_autoload(struct snd_sb_csp * p, int pcm_sfmt, int play_rec
return -EBUSY;
/* autoload microcode only if requested hardware codec is not already loaded */
- if (((1 << pcm_sfmt) & p->acc_format) && (play_rec_mode & p->mode)) {
+ if (((1U << (__force int)pcm_sfmt) & p->acc_format) && (play_rec_mode & p->mode)) {
p->running = SNDRV_SB_CSP_ST_AUTO;
} else {
switch (pcm_sfmt) {
@@ -1185,19 +1185,3 @@ static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buff
/* */
EXPORT_SYMBOL(snd_sb_csp_new);
-
-/*
- * INIT part
- */
-
-static int __init alsa_sb_csp_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_sb_csp_exit(void)
-{
-}
-
-module_init(alsa_sb_csp_init)
-module_exit(alsa_sb_csp_exit)
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index 3e39ba220c39..37e6ce7b0b13 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -49,6 +49,9 @@ MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Routines for control of 16-bit SoundBlaster cards and clones");
MODULE_LICENSE("GPL");
+#define runtime_format_bits(runtime) \
+ ((unsigned int)pcm_format_to_bits((runtime)->format))
+
#ifdef CONFIG_SND_SB16_CSP
static void snd_sb16_csp_playback_prepare(struct snd_sb *chip, struct snd_pcm_runtime *runtime)
{
@@ -58,7 +61,7 @@ static void snd_sb16_csp_playback_prepare(struct snd_sb *chip, struct snd_pcm_ru
if (csp->running & SNDRV_SB_CSP_ST_LOADED) {
/* manually loaded codec */
if ((csp->mode & SNDRV_SB_CSP_MODE_DSP_WRITE) &&
- ((1U << runtime->format) == csp->acc_format)) {
+ (runtime_format_bits(runtime) == csp->acc_format)) {
/* Supported runtime PCM format for playback */
if (csp->ops.csp_use(csp) == 0) {
/* If CSP was successfully acquired */
@@ -66,7 +69,7 @@ static void snd_sb16_csp_playback_prepare(struct snd_sb *chip, struct snd_pcm_ru
}
} else if ((csp->mode & SNDRV_SB_CSP_MODE_QSOUND) && (csp->q_enabled)) {
/* QSound decoder is loaded and enabled */
- if ((1 << runtime->format) & (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 |
+ if (runtime_format_bits(runtime) & (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 |
SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE)) {
/* Only for simple PCM formats */
if (csp->ops.csp_use(csp) == 0) {
@@ -106,7 +109,7 @@ static void snd_sb16_csp_capture_prepare(struct snd_sb *chip, struct snd_pcm_run
if (csp->running & SNDRV_SB_CSP_ST_LOADED) {
/* manually loaded codec */
if ((csp->mode & SNDRV_SB_CSP_MODE_DSP_READ) &&
- ((1U << runtime->format) == csp->acc_format)) {
+ (runtime_format_bits(runtime) == csp->acc_format)) {
/* Supported runtime PCM format for capture */
if (csp->ops.csp_use(csp) == 0) {
/* If CSP was successfully acquired */
@@ -897,19 +900,3 @@ EXPORT_SYMBOL(snd_sb16dsp_pcm);
EXPORT_SYMBOL(snd_sb16dsp_get_pcm_ops);
EXPORT_SYMBOL(snd_sb16dsp_configure);
EXPORT_SYMBOL(snd_sb16dsp_interrupt);
-
-/*
- * INIT part
- */
-
-static int __init alsa_sb16_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_sb16_exit(void)
-{
-}
-
-module_init(alsa_sb16_init)
-module_exit(alsa_sb16_exit)
diff --git a/sound/isa/sb/sb8_main.c b/sound/isa/sb/sb8_main.c
index d45df5c54423..481797744b3c 100644
--- a/sound/isa/sb/sb8_main.c
+++ b/sound/isa/sb/sb8_main.c
@@ -381,7 +381,6 @@ static int snd_sb8_capture_trigger(struct snd_pcm_substream *substream,
irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip)
{
struct snd_pcm_substream *substream;
- struct snd_pcm_runtime *runtime;
snd_sb_ack_8bit(chip);
switch (chip->mode) {
@@ -391,7 +390,6 @@ irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip)
/* fallthru */
case SB_MODE_PLAYBACK_8:
substream = chip->playback_substream;
- runtime = substream->runtime;
if (chip->playback_format == SB_DSP_OUTPUT)
snd_sb8_playback_trigger(substream, SNDRV_PCM_TRIGGER_START);
snd_pcm_period_elapsed(substream);
@@ -402,7 +400,6 @@ irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip)
/* fallthru */
case SB_MODE_CAPTURE_8:
substream = chip->capture_substream;
- runtime = substream->runtime;
if (chip->capture_format == SB_DSP_INPUT)
snd_sb8_capture_trigger(substream, SNDRV_PCM_TRIGGER_START);
snd_pcm_period_elapsed(substream);
@@ -624,19 +621,3 @@ EXPORT_SYMBOL(snd_sb8dsp_interrupt);
/* sb8_midi.c */
EXPORT_SYMBOL(snd_sb8dsp_midi_interrupt);
EXPORT_SYMBOL(snd_sb8dsp_midi);
-
-/*
- * INIT part
- */
-
-static int __init alsa_sb8_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_sb8_exit(void)
-{
-}
-
-module_init(alsa_sb8_init)
-module_exit(alsa_sb8_exit)
diff --git a/sound/isa/sb/sb_common.c b/sound/isa/sb/sb_common.c
index 787a4ade4afd..90b254aaef74 100644
--- a/sound/isa/sb/sb_common.c
+++ b/sound/isa/sb/sb_common.c
@@ -305,19 +305,3 @@ EXPORT_SYMBOL(snd_sbmixer_add_ctl);
EXPORT_SYMBOL(snd_sbmixer_suspend);
EXPORT_SYMBOL(snd_sbmixer_resume);
#endif
-
-/*
- * INIT part
- */
-
-static int __init alsa_sb_common_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_sb_common_exit(void)
-{
-}
-
-module_init(alsa_sb_common_init)
-module_exit(alsa_sb_common_exit)
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 8a852042a066..32453f81b95a 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -541,7 +541,7 @@ static unsigned char snd_wss_get_rate(unsigned int rate)
}
static unsigned char snd_wss_get_format(struct snd_wss *chip,
- int format,
+ snd_pcm_format_t format,
int channels)
{
unsigned char rformat;
@@ -2279,19 +2279,3 @@ const struct snd_pcm_ops *snd_wss_get_pcm_ops(int direction)
&snd_wss_playback_ops : &snd_wss_capture_ops;
}
EXPORT_SYMBOL(snd_wss_get_pcm_ops);
-
-/*
- * INIT part
- */
-
-static int __init alsa_wss_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_wss_exit(void)
-{
-}
-
-module_init(alsa_wss_init);
-module_exit(alsa_wss_exit);
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 9fb68b35de5a..3ec9391a4736 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -685,7 +685,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
@@ -698,7 +697,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static const struct snd_pcm_ops snd_sgio2audio_capture_ops = {
@@ -711,7 +709,6 @@ static const struct snd_pcm_ops snd_sgio2audio_capture_ops = {
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
/*
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 1ef7cdf1d3e8..f4459d1a9d67 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -2941,19 +2941,3 @@ int snd_ac97_tune_hardware(struct snd_ac97 *ac97,
}
EXPORT_SYMBOL(snd_ac97_tune_hardware);
-
-/*
- * INIT part
- */
-
-static int __init alsa_ac97_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_ac97_exit(void)
-{
-}
-
-module_init(alsa_ac97_init)
-module_exit(alsa_ac97_exit)
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 39547e32e584..9f569379b77e 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1484,12 +1484,9 @@ static struct snd_pcm_hardware snd_ali_capture =
static void snd_ali_pcm_free_substream(struct snd_pcm_runtime *runtime)
{
struct snd_ali_voice *pvoice = runtime->private_data;
- struct snd_ali *codec;
- if (pvoice) {
- codec = pvoice->codec;
+ if (pvoice)
snd_ali_free_voice(pvoice->codec, pvoice);
- }
}
static int snd_ali_open(struct snd_pcm_substream *substream, int rec,
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 64e0961f93ba..a31fe1550903 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -311,27 +311,29 @@ static void print_hwparams(struct snd_pcm_substream *substream,
snd_pcm_format_width(params_format(p)) / 8);
}
+#define INVALID_FORMAT (__force snd_pcm_format_t)(-1)
+
static snd_pcm_format_t hpi_to_alsa_formats[] = {
- -1, /* INVALID */
+ INVALID_FORMAT, /* INVALID */
SNDRV_PCM_FORMAT_U8, /* HPI_FORMAT_PCM8_UNSIGNED 1 */
SNDRV_PCM_FORMAT_S16, /* HPI_FORMAT_PCM16_SIGNED 2 */
- -1, /* HPI_FORMAT_MPEG_L1 3 */
+ INVALID_FORMAT, /* HPI_FORMAT_MPEG_L1 3 */
SNDRV_PCM_FORMAT_MPEG, /* HPI_FORMAT_MPEG_L2 4 */
SNDRV_PCM_FORMAT_MPEG, /* HPI_FORMAT_MPEG_L3 5 */
- -1, /* HPI_FORMAT_DOLBY_AC2 6 */
- -1, /* HPI_FORMAT_DOLBY_AC3 7 */
+ INVALID_FORMAT, /* HPI_FORMAT_DOLBY_AC2 6 */
+ INVALID_FORMAT, /* HPI_FORMAT_DOLBY_AC3 7 */
SNDRV_PCM_FORMAT_S16_BE,/* HPI_FORMAT_PCM16_BIGENDIAN 8 */
- -1, /* HPI_FORMAT_AA_TAGIT1_HITS 9 */
- -1, /* HPI_FORMAT_AA_TAGIT1_INSERTS 10 */
+ INVALID_FORMAT, /* HPI_FORMAT_AA_TAGIT1_HITS 9 */
+ INVALID_FORMAT, /* HPI_FORMAT_AA_TAGIT1_INSERTS 10 */
SNDRV_PCM_FORMAT_S32, /* HPI_FORMAT_PCM32_SIGNED 11 */
- -1, /* HPI_FORMAT_RAW_BITSTREAM 12 */
- -1, /* HPI_FORMAT_AA_TAGIT1_HITS_EX1 13 */
+ INVALID_FORMAT, /* HPI_FORMAT_RAW_BITSTREAM 12 */
+ INVALID_FORMAT, /* HPI_FORMAT_AA_TAGIT1_HITS_EX1 13 */
SNDRV_PCM_FORMAT_FLOAT, /* HPI_FORMAT_PCM32_FLOAT 14 */
#if 1
/* ALSA can't handle 3 byte sample size together with power-of-2
* constraint on buffer_bytes, so disable this format
*/
- -1
+ INVALID_FORMAT
#else
/* SNDRV_PCM_FORMAT_S24_3LE */ /* HPI_FORMAT_PCM24_SIGNED 15 */
#endif
@@ -1023,7 +1025,7 @@ static u64 snd_card_asihpi_playback_formats(struct snd_card_asihpi *asihpi,
format, sample_rate, 128000, 0);
if (!err)
err = hpi_outstream_query_format(h_stream, &hpi_format);
- if (!err && (hpi_to_alsa_formats[format] != -1))
+ if (!err && (hpi_to_alsa_formats[format] != INVALID_FORMAT))
formats |= pcm_format_to_bits(hpi_to_alsa_formats[format]);
}
return formats;
@@ -1205,7 +1207,7 @@ static u64 snd_card_asihpi_capture_formats(struct snd_card_asihpi *asihpi,
format, sample_rate, 128000, 0);
if (!err)
err = hpi_instream_query_format(h_stream, &hpi_format);
- if (!err && (hpi_to_alsa_formats[format] != -1))
+ if (!err && (hpi_to_alsa_formats[format] != INVALID_FORMAT))
formats |= pcm_format_to_bits(hpi_to_alsa_formats[format]);
}
return formats;
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 8d5abfa4e24b..2864698436a5 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -635,7 +635,6 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
{
struct hpi_message hm;
struct hpi_response hr;
- u32 max_streams;
HPI_DEBUG_LOG(VERBOSE, "init ADAPTER_GET_INFO\n");
memset(&hm, 0, sizeof(hm));
@@ -660,10 +659,6 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
pao->type = hr.u.ax.info.adapter_type;
pao->index = hr.u.ax.info.adapter_index;
- max_streams =
- hr.u.ax.info.num_outstreams +
- hr.u.ax.info.num_instreams;
-
HPI_DEBUG_LOG(VERBOSE,
"got adapter info type %x index %d serial %d\n",
hr.u.ax.info.adapter_type, hr.u.ax.info.adapter_index,
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 7ae63d452bba..a1e4944dcfe8 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -207,10 +207,10 @@ struct atiixp;
*/
struct atiixp_dma_desc {
- u32 addr; /* DMA buffer address */
+ __le32 addr; /* DMA buffer address */
u16 status; /* status bits */
u16 size; /* size of the packet in dwords */
- u32 next; /* address of the next packet descriptor */
+ __le32 next; /* address of the next packet descriptor */
};
/*
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index a586635664e0..dc1de860cedf 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -183,10 +183,10 @@ struct atiixp_modem;
*/
struct atiixp_dma_desc {
- u32 addr; /* DMA buffer address */
+ __le32 addr; /* DMA buffer address */
u16 status; /* status bits */
u16 size; /* size of the packet in dwords */
- u32 next; /* address of the next packet descriptor */
+ __le32 next; /* address of the next packet descriptor */
};
/*
diff --git a/sound/pci/au88x0/au88x0.h b/sound/pci/au88x0/au88x0.h
index bcc648bf6478..e3e31f07d766 100644
--- a/sound/pci/au88x0/au88x0.h
+++ b/sound/pci/au88x0/au88x0.h
@@ -241,7 +241,7 @@ static int vortex_core_init(vortex_t * card);
static int vortex_core_shutdown(vortex_t * card);
static void vortex_enable_int(vortex_t * card);
static irqreturn_t vortex_interrupt(int irq, void *dev_id);
-static int vortex_alsafmt_aspfmt(int alsafmt, vortex_t *v);
+static int vortex_alsafmt_aspfmt(snd_pcm_format_t alsafmt, vortex_t *v);
/* Connection stuff. */
static void vortex_connect_default(vortex_t * vortex, int en);
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 4083c8b01619..2e5b460a847c 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2770,7 +2770,7 @@ static int vortex_core_shutdown(vortex_t * vortex)
/* Alsa support. */
-static int vortex_alsafmt_aspfmt(int alsafmt, vortex_t *v)
+static int vortex_alsafmt_aspfmt(snd_pcm_format_t alsafmt, vortex_t *v)
{
int fmt;
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index d8ade8771a32..ba971042f871 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -228,14 +228,14 @@ static int snd_bt87x_create_risc(struct snd_bt87x *chip, struct snd_pcm_substrea
unsigned int periods, unsigned int period_bytes)
{
unsigned int i, offset;
- u32 *risc;
+ __le32 *risc;
if (chip->dma_risc.area == NULL) {
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
PAGE_ALIGN(MAX_RISC_SIZE), &chip->dma_risc) < 0)
return -ENOMEM;
}
- risc = (u32 *)chip->dma_risc.area;
+ risc = (__le32 *)chip->dma_risc.area;
offset = 0;
*risc++ = cpu_to_le32(RISC_SYNC | RISC_SYNC_FM1);
*risc++ = cpu_to_le32(0);
diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
index abb01ce66983..8d0a3d357345 100644
--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
+++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
@@ -73,13 +73,10 @@ static void cs46xx_dsp_proc_scb_info_read (struct snd_info_entry *entry,
{
struct proc_scb_info * scb_info = entry->private_data;
struct dsp_scb_descriptor * scb = scb_info->scb_desc;
- struct dsp_spos_instance * ins;
struct snd_cs46xx *chip = scb_info->chip;
int j,col;
void __iomem *dst = chip->region.idx[1].remap_addr + DSP_PARAMETER_BYTE_OFFSET;
- ins = chip->dsp_spos_instance;
-
mutex_lock(&chip->spos_mutex);
snd_iprintf(buffer,"%04x %s:\n",scb->address,scb->scb_name);
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index de409cda50aa..4590086d9cd8 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -192,8 +192,6 @@ static void process_bm0_irq(struct cs5535audio *cs5535au)
bm_stat = cs_readb(cs5535au, ACC_BM0_STATUS);
spin_unlock(&cs5535au->reg_lock);
if (bm_stat & EOP) {
- struct cs5535audio_dma *dma;
- dma = cs5535au->playback_substream->runtime->private_data;
snd_pcm_period_elapsed(cs5535au->playback_substream);
} else {
dev_err(cs5535au->card->dev,
@@ -208,11 +206,8 @@ static void process_bm1_irq(struct cs5535audio *cs5535au)
spin_lock(&cs5535au->reg_lock);
bm_stat = cs_readb(cs5535au, ACC_BM1_STATUS);
spin_unlock(&cs5535au->reg_lock);
- if (bm_stat & EOP) {
- struct cs5535audio_dma *dma;
- dma = cs5535au->capture_substream->runtime->private_data;
+ if (bm_stat & EOP)
snd_pcm_period_elapsed(cs5535au->capture_substream);
- }
}
static irqreturn_t snd_cs5535audio_interrupt(int irq, void *dev_id)
diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h
index f4fcdf93f3c8..d84620a0c26c 100644
--- a/sound/pci/cs5535audio/cs5535audio.h
+++ b/sound/pci/cs5535audio/cs5535audio.h
@@ -67,9 +67,9 @@ struct cs5535audio_dma_ops {
};
struct cs5535audio_dma_desc {
- u32 addr;
- u16 size;
- u16 ctlreserved;
+ __le32 addr;
+ __le16 size;
+ __le16 ctlreserved;
};
struct cs5535audio_dma {
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index ee7065f6e162..326caec854e1 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -158,8 +158,8 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
lastdesc->addr = cpu_to_le32((u32) dma->desc_buf.addr);
lastdesc->size = 0;
lastdesc->ctlreserved = cpu_to_le16(PRD_JMP);
- jmpprd_addr = cpu_to_le32(lastdesc->addr +
- (sizeof(struct cs5535audio_dma_desc)*periods));
+ jmpprd_addr = (u32)dma->desc_buf.addr +
+ sizeof(struct cs5535audio_dma_desc) * periods;
dma->substream = substream;
dma->period_bytes = period_bytes;
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index 8e6eb9d7984b..6a051a1c3724 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1319,7 +1319,7 @@ static int hw_pll_init(struct hw *hw, unsigned int rsr)
break;
hw_write_20kx(hw, PLLCTL, pllctl);
- mdelay(40);
+ msleep(40);
}
if (i >= 3) {
dev_alert(hw->card->dev, "PLL initialization failed!!!\n");
@@ -1407,7 +1407,7 @@ static int hw_reset_dac(struct hw *hw)
/* To be effective, need to reset the DAC twice. */
for (i = 0; i < 2; i++) {
/* set gpio */
- mdelay(100);
+ msleep(100);
gpioorg = (u16)hw_read_20kx(hw, GPIO);
gpioorg &= 0xfffd;
hw_write_20kx(hw, GPIO, gpioorg);
@@ -2030,7 +2030,7 @@ static int hw_card_init(struct hw *hw, struct card_conf *info)
hw_write_20kx(hw, GIE, 0);
/* Reset all SRC pending interrupts */
hw_write_20kx(hw, SRCIP, 0);
- mdelay(30);
+ msleep(30);
/* Detect the card ID and configure GPIO accordingly. */
switch (hw->model) {
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index b866d6b2c923..3c966fafc754 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -1316,12 +1316,12 @@ static int hw_pll_init(struct hw *hw, unsigned int rsr)
set_field(&pllctl, PLLCTL_FD, 48000 == rsr ? 16 - 4 : 147 - 4);
set_field(&pllctl, PLLCTL_RD, 48000 == rsr ? 1 - 1 : 10 - 1);
hw_write_20kx(hw, PLL_CTL, pllctl);
- mdelay(40);
+ msleep(40);
pllctl = hw_read_20kx(hw, PLL_CTL);
set_field(&pllctl, PLLCTL_FD, 48000 == rsr ? 16 - 2 : 147 - 2);
hw_write_20kx(hw, PLL_CTL, pllctl);
- mdelay(40);
+ msleep(40);
for (i = 0; i < 1000; i++) {
pllstat = hw_read_20kx(hw, PLL_STAT);
@@ -1584,7 +1584,7 @@ static void hw_dac_stop(struct hw *hw)
data = hw_read_20kx(hw, GPIO_DATA);
data &= 0xFFFFFFFD;
hw_write_20kx(hw, GPIO_DATA, data);
- mdelay(10);
+ usleep_range(10000, 11000);
}
static void hw_dac_start(struct hw *hw)
@@ -1593,7 +1593,7 @@ static void hw_dac_start(struct hw *hw)
data = hw_read_20kx(hw, GPIO_DATA);
data |= 0x2;
hw_write_20kx(hw, GPIO_DATA, data);
- mdelay(50);
+ msleep(50);
}
static void hw_dac_reset(struct hw *hw)
@@ -1864,11 +1864,11 @@ static int hw_adc_init(struct hw *hw, const struct adc_conf *info)
hw_write_20kx(hw, GPIO_DATA, data);
}
- mdelay(10);
+ usleep_range(10000, 11000);
/* Return the ADC to normal operation. */
data |= (0x1 << 15);
hw_write_20kx(hw, GPIO_DATA, data);
- mdelay(50);
+ msleep(50);
/* I2C write to register offset 0x0B to set ADC LRCLK polarity */
/* invert bit, interface format to I2S, word length to 24-bit, */
diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c
index db710d0a609f..4777d50fbbf8 100644
--- a/sound/pci/ctxfi/ctmixer.c
+++ b/sound/pci/ctxfi/ctmixer.c
@@ -938,17 +938,18 @@ static int ct_mixer_topology_build(struct ct_mixer *mixer)
struct sum *sum;
struct amixer *amix_d, *amix_s;
enum CT_AMIXER_CTL i, j;
+ enum CT_SUM_CTL k;
/* Build topology from destination to source */
/* Set up Master mixer */
- for (i = AMIXER_MASTER_F, j = SUM_IN_F;
- i <= AMIXER_MASTER_S; i++, j++) {
+ for (i = AMIXER_MASTER_F, k = SUM_IN_F;
+ i <= AMIXER_MASTER_S; i++, k++) {
amix_d = mixer->amixers[i*CHN_NUM];
- sum = mixer->sums[j*CHN_NUM];
+ sum = mixer->sums[k*CHN_NUM];
amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
amix_d = mixer->amixers[i*CHN_NUM+1];
- sum = mixer->sums[j*CHN_NUM+1];
+ sum = mixer->sums[k*CHN_NUM+1];
amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
}
@@ -972,12 +973,12 @@ static int ct_mixer_topology_build(struct ct_mixer *mixer)
amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL);
/* Set up PCM-in mixer */
- for (i = AMIXER_PCM_F, j = SUM_IN_F; i <= AMIXER_PCM_S; i++, j++) {
+ for (i = AMIXER_PCM_F, k = SUM_IN_F; i <= AMIXER_PCM_S; i++, k++) {
amix_d = mixer->amixers[i*CHN_NUM];
- sum = mixer->sums[j*CHN_NUM];
+ sum = mixer->sums[k*CHN_NUM];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
amix_d = mixer->amixers[i*CHN_NUM+1];
- sum = mixer->sums[j*CHN_NUM+1];
+ sum = mixer->sums[k*CHN_NUM+1];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
}
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 358ef7dcf410..907cf1a46712 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -713,6 +713,7 @@ static int pcm_prepare(struct snd_pcm_substream *substream)
break;
case SNDRV_PCM_FORMAT_S32_BE:
format.data_are_bigendian = 1;
+ /* fall through */
case SNDRV_PCM_FORMAT_S32_LE:
format.bits_per_sample = 32;
break;
@@ -764,6 +765,7 @@ static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
pipe->last_counter = 0;
pipe->position = 0;
*pipe->dma_counter = 0;
+ /* fall through */
case PIPE_STATE_PAUSED:
pipe->state = PIPE_STATE_STARTED;
break;
diff --git a/sound/pci/echoaudio/echoaudio.h b/sound/pci/echoaudio/echoaudio.h
index 44b390a667d5..be4d0489394a 100644
--- a/sound/pci/echoaudio/echoaudio.h
+++ b/sound/pci/echoaudio/echoaudio.h
@@ -294,7 +294,7 @@
struct audiopipe {
- volatile u32 *dma_counter; /* Commpage register that contains
+ volatile __le32 *dma_counter; /* Commpage register that contains
* the current dma position
* (lower 32 bits only)
*/
diff --git a/sound/pci/echoaudio/echoaudio_3g.c b/sound/pci/echoaudio/echoaudio_3g.c
index 22c786b8a889..cc3c79387194 100644
--- a/sound/pci/echoaudio/echoaudio_3g.c
+++ b/sound/pci/echoaudio/echoaudio_3g.c
@@ -73,19 +73,21 @@ register. write_control_reg sends the new control register value to the DSP. */
static int write_control_reg(struct echoaudio *chip, u32 ctl, u32 frq,
char force)
{
+ __le32 ctl_reg, frq_reg;
+
if (wait_handshake(chip))
return -EIO;
dev_dbg(chip->card->dev,
"WriteControlReg: Setting 0x%x, 0x%x\n", ctl, frq);
- ctl = cpu_to_le32(ctl);
- frq = cpu_to_le32(frq);
+ ctl_reg = cpu_to_le32(ctl);
+ frq_reg = cpu_to_le32(frq);
- if (ctl != chip->comm_page->control_register ||
- frq != chip->comm_page->e3g_frq_register || force) {
- chip->comm_page->e3g_frq_register = frq;
- chip->comm_page->control_register = ctl;
+ if (ctl_reg != chip->comm_page->control_register ||
+ frq_reg != chip->comm_page->e3g_frq_register || force) {
+ chip->comm_page->e3g_frq_register = frq_reg;
+ chip->comm_page->control_register = ctl_reg;
clear_handshake(chip);
return send_vector(chip, DSP_VC_WRITE_CONTROL_REG);
}
diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
index 15aae2fad8e4..b181752b8481 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.c
+++ b/sound/pci/echoaudio/echoaudio_dsp.c
@@ -679,7 +679,7 @@ static int restore_dsp_rettings(struct echoaudio *chip)
/* Gina20/Darla20 only. Should be harmless for other cards. */
chip->comm_page->gd_clock_state = GD_CLOCK_UNDEF;
chip->comm_page->gd_spdif_status = GD_SPDIF_STATUS_UNDEF;
- chip->comm_page->handshake = 0xffffffff;
+ chip->comm_page->handshake = cpu_to_le32(0xffffffff);
/* Restore output busses */
for (i = 0; i < num_busses_out(chip); i++) {
@@ -989,7 +989,7 @@ static int init_dsp_comm_page(struct echoaudio *chip)
/* Init the comm page */
chip->comm_page->comm_size =
cpu_to_le32(sizeof(struct comm_page));
- chip->comm_page->handshake = 0xffffffff;
+ chip->comm_page->handshake = cpu_to_le32(0xffffffff);
chip->comm_page->midi_out_free_count =
cpu_to_le32(DSP_MIDI_OUT_FIFO_SIZE);
chip->comm_page->sample_rate = cpu_to_le32(44100);
@@ -1087,7 +1087,7 @@ static int allocate_pipes(struct echoaudio *chip, struct audiopipe *pipe,
/* The counter register is where the DSP writes the 32 bit DMA
position for a pipe. The DSP is constantly updating this value as
it moves data. The DMA counter is in units of bytes, not samples. */
- pipe->dma_counter = &chip->comm_page->position[pipe_index];
+ pipe->dma_counter = (__le32 *)&chip->comm_page->position[pipe_index];
*pipe->dma_counter = 0;
return pipe_index;
}
diff --git a/sound/pci/echoaudio/echoaudio_dsp.h b/sound/pci/echoaudio/echoaudio_dsp.h
index cb7d75a0a503..aa9129519795 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.h
+++ b/sound/pci/echoaudio/echoaudio_dsp.h
@@ -627,8 +627,8 @@ sg_entry struct is read by the DSP, so all values must be little-endian. */
#define MAX_SGLIST_ENTRIES 512
struct sg_entry {
- u32 addr;
- u32 size;
+ __le32 addr;
+ __le32 size;
};
@@ -643,18 +643,18 @@ struct sg_entry {
****************************************************************************/
struct comm_page { /* Base Length*/
- u32 comm_size; /* size of this object 0x000 4 */
- u32 flags; /* See Appendix A below 0x004 4 */
- u32 unused; /* Unused entry 0x008 4 */
- u32 sample_rate; /* Card sample rate in Hz 0x00c 4 */
- u32 handshake; /* DSP command handshake 0x010 4 */
- u32 cmd_start; /* Chs. to start mask 0x014 4 */
- u32 cmd_stop; /* Chs. to stop mask 0x018 4 */
- u32 cmd_reset; /* Chs. to reset mask 0x01c 4 */
- u16 audio_format[DSP_MAXPIPES]; /* Chs. audio format 0x020 32*2 */
+ __le32 comm_size; /* size of this object 0x000 4 */
+ __le32 flags; /* See Appendix A below 0x004 4 */
+ __le32 unused; /* Unused entry 0x008 4 */
+ __le32 sample_rate; /* Card sample rate in Hz 0x00c 4 */
+ __le32 handshake; /* DSP command handshake 0x010 4 */
+ __le32 cmd_start; /* Chs. to start mask 0x014 4 */
+ __le32 cmd_stop; /* Chs. to stop mask 0x018 4 */
+ __le32 cmd_reset; /* Chs. to reset mask 0x01c 4 */
+ __le16 audio_format[DSP_MAXPIPES]; /* Chs. audio format 0x020 32*2 */
struct sg_entry sglist_addr[DSP_MAXPIPES];
/* Chs. Physical sglist addrs 0x060 32*8 */
- u32 position[DSP_MAXPIPES];
+ __le32 position[DSP_MAXPIPES];
/* Positions for ea. ch. 0x160 32*4 */
s8 vu_meter[DSP_MAXPIPES];
/* VU meters 0x1e0 32*1 */
@@ -666,28 +666,28 @@ struct comm_page { /* Base Length*/
/* Input gain 0x230 16*1 */
s8 monitors[MONITOR_ARRAY_SIZE];
/* Monitor map 0x240 0x180 */
- u32 play_coeff[MAX_PLAY_TAPS];
+ __le32 play_coeff[MAX_PLAY_TAPS];
/* Gina/Darla play filters - obsolete 0x3c0 168*4 */
- u32 rec_coeff[MAX_REC_TAPS];
+ __le32 rec_coeff[MAX_REC_TAPS];
/* Gina/Darla record filters - obsolete 0x660 192*4 */
- u16 midi_input[MIDI_IN_BUFFER_SIZE];
+ __le16 midi_input[MIDI_IN_BUFFER_SIZE];
/* MIDI input data transfer buffer 0x960 256*2 */
u8 gd_clock_state; /* Chg Gina/Darla clock state 0xb60 1 */
u8 gd_spdif_status; /* Chg. Gina/Darla S/PDIF state 0xb61 1 */
u8 gd_resampler_state; /* Should always be 3 0xb62 1 */
u8 filler2; /* 0xb63 1 */
- u32 nominal_level_mask; /* -10 level enable mask 0xb64 4 */
- u16 input_clock; /* Chg. Input clock state 0xb68 2 */
- u16 output_clock; /* Chg. Output clock state 0xb6a 2 */
- u32 status_clocks; /* Current Input clock state 0xb6c 4 */
- u32 ext_box_status; /* External box status 0xb70 4 */
- u32 cmd_add_buffer; /* Pipes to add (obsolete) 0xb74 4 */
- u32 midi_out_free_count;
+ __le32 nominal_level_mask; /* -10 level enable mask 0xb64 4 */
+ __le16 input_clock; /* Chg. Input clock state 0xb68 2 */
+ __le16 output_clock; /* Chg. Output clock state 0xb6a 2 */
+ __le32 status_clocks; /* Current Input clock state 0xb6c 4 */
+ __le32 ext_box_status; /* External box status 0xb70 4 */
+ __le32 cmd_add_buffer; /* Pipes to add (obsolete) 0xb74 4 */
+ __le32 midi_out_free_count;
/* # of bytes free in MIDI output FIFO 0xb78 4 */
- u32 unused2; /* Cyclic pipes 0xb7c 4 */
- u32 control_register;
+ __le32 unused2; /* Cyclic pipes 0xb7c 4 */
+ __le32 control_register;
/* Mona, Gina24, Layla24, 3G ctrl reg 0xb80 4 */
- u32 e3g_frq_register; /* 3G frequency register 0xb84 4 */
+ __le32 e3g_frq_register; /* 3G frequency register 0xb84 4 */
u8 filler[24]; /* filler 0xb88 24*1 */
s8 vmixer[VMIXER_ARRAY_SIZE];
/* Vmixer levels 0xba0 64*1 */
diff --git a/sound/pci/echoaudio/echoaudio_gml.c b/sound/pci/echoaudio/echoaudio_gml.c
index 834b39e97db7..eea6fe530ab4 100644
--- a/sound/pci/echoaudio/echoaudio_gml.c
+++ b/sound/pci/echoaudio/echoaudio_gml.c
@@ -63,6 +63,8 @@ the control register. write_control_reg sends the new control register
value to the DSP. */
static int write_control_reg(struct echoaudio *chip, u32 value, char force)
{
+ __le32 reg_value;
+
/* Handle the digital input auto-mute */
if (chip->digital_in_automute)
value |= GML_DIGITAL_IN_AUTO_MUTE;
@@ -72,11 +74,11 @@ static int write_control_reg(struct echoaudio *chip, u32 value, char force)
dev_dbg(chip->card->dev, "write_control_reg: 0x%x\n", value);
/* Write the control register */
- value = cpu_to_le32(value);
- if (value != chip->comm_page->control_register || force) {
+ reg_value = cpu_to_le32(value);
+ if (reg_value != chip->comm_page->control_register || force) {
if (wait_handshake(chip))
return -EIO;
- chip->comm_page->control_register = value;
+ chip->comm_page->control_register = reg_value;
clear_handshake(chip);
return send_vector(chip, DSP_VC_WRITE_CONTROL_REG);
}
diff --git a/sound/pci/emu10k1/emu10k1_patch.c b/sound/pci/emu10k1/emu10k1_patch.c
index 0e069aeab86d..c32eb7053715 100644
--- a/sound/pci/emu10k1/emu10k1_patch.c
+++ b/sound/pci/emu10k1/emu10k1_patch.c
@@ -70,11 +70,8 @@ snd_emu10k1_sample_new(struct snd_emux *rec, struct snd_sf_sample *sp,
loopend = sampleend;
/* be sure loop points start < end */
- if (sp->v.loopstart >= sp->v.loopend) {
- int tmp = sp->v.loopstart;
- sp->v.loopstart = sp->v.loopend;
- sp->v.loopend = tmp;
- }
+ if (sp->v.loopstart >= sp->v.loopend)
+ swap(sp->v.loopstart, sp->v.loopend);
/* compute true data size to be loaded */
truesize = sp->v.size + BLANK_HEAD_SIZE;
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index de2ecbe95d6c..90713741c2dc 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -526,7 +526,7 @@ static int snd_emu10k1_gpr_poke(struct snd_emu10k1 *emu,
if (!test_bit(gpr, icode->gpr_valid))
continue;
if (in_kernel)
- val = *(u32 *)&icode->gpr_map[gpr];
+ val = *(__force u32 *)&icode->gpr_map[gpr];
else if (get_user(val, &icode->gpr_map[gpr]))
return -EFAULT;
snd_emu10k1_ptr_write(emu, emu->gpr_base + gpr, 0, val);
@@ -560,8 +560,8 @@ static int snd_emu10k1_tram_poke(struct snd_emu10k1 *emu,
if (!test_bit(tram, icode->tram_valid))
continue;
if (in_kernel) {
- val = *(u32 *)&icode->tram_data_map[tram];
- addr = *(u32 *)&icode->tram_addr_map[tram];
+ val = *(__force u32 *)&icode->tram_data_map[tram];
+ addr = *(__force u32 *)&icode->tram_addr_map[tram];
} else {
if (get_user(val, &icode->tram_data_map[tram]) ||
get_user(addr, &icode->tram_addr_map[tram]))
@@ -611,8 +611,8 @@ static int snd_emu10k1_code_poke(struct snd_emu10k1 *emu,
if (!test_bit(pc / 2, icode->code_valid))
continue;
if (in_kernel) {
- lo = *(u32 *)&icode->code[pc + 0];
- hi = *(u32 *)&icode->code[pc + 1];
+ lo = *(__force u32 *)&icode->code[pc + 0];
+ hi = *(__force u32 *)&icode->code[pc + 1];
} else {
if (get_user(lo, &icode->code[pc + 0]) ||
get_user(hi, &icode->code[pc + 1]))
@@ -666,7 +666,7 @@ static unsigned int *copy_tlv(const unsigned int __user *_tlv, bool in_kernel)
if (!_tlv)
return NULL;
if (in_kernel)
- memcpy(data, (void *)_tlv, sizeof(data));
+ memcpy(data, (__force void *)_tlv, sizeof(data));
else if (copy_from_user(data, _tlv, sizeof(data)))
return NULL;
if (data[1] >= MAX_TLV_SIZE)
@@ -676,7 +676,7 @@ static unsigned int *copy_tlv(const unsigned int __user *_tlv, bool in_kernel)
return NULL;
memcpy(tlv, data, sizeof(data));
if (in_kernel) {
- memcpy(tlv + 2, (void *)(_tlv + 2), data[1]);
+ memcpy(tlv + 2, (__force void *)(_tlv + 2), data[1]);
} else if (copy_from_user(tlv + 2, _tlv + 2, data[1])) {
kfree(tlv);
return NULL;
@@ -693,7 +693,7 @@ static int copy_gctl(struct snd_emu10k1 *emu,
if (emu->support_tlv) {
if (in_kernel)
- memcpy(gctl, (void *)&_gctl[idx], sizeof(*gctl));
+ memcpy(gctl, (__force void *)&_gctl[idx], sizeof(*gctl));
else if (copy_from_user(gctl, &_gctl[idx], sizeof(*gctl)))
return -EFAULT;
return 0;
@@ -701,7 +701,7 @@ static int copy_gctl(struct snd_emu10k1 *emu,
octl = (struct snd_emu10k1_fx8010_control_old_gpr __user *)_gctl;
if (in_kernel)
- memcpy(gctl, (void *)&octl[idx], sizeof(*octl));
+ memcpy(gctl, (__force void *)&octl[idx], sizeof(*octl));
else if (copy_from_user(gctl, &octl[idx], sizeof(*octl)))
return -EFAULT;
gctl->tlv = NULL;
@@ -735,7 +735,7 @@ static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
for (i = 0, _id = icode->gpr_del_controls;
i < icode->gpr_del_control_count; i++, _id++) {
if (in_kernel)
- id = *(struct snd_ctl_elem_id *)_id;
+ id = *(__force struct snd_ctl_elem_id *)_id;
else if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
if (snd_emu10k1_look_for_ctl(emu, &id) == NULL)
@@ -833,7 +833,7 @@ static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
knew.device = gctl->id.device;
knew.subdevice = gctl->id.subdevice;
knew.info = snd_emu10k1_gpr_ctl_info;
- knew.tlv.p = copy_tlv(gctl->tlv, in_kernel);
+ knew.tlv.p = copy_tlv((__force const unsigned int __user *)gctl->tlv, in_kernel);
if (knew.tlv.p)
knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ;
@@ -897,7 +897,7 @@ static int snd_emu10k1_del_controls(struct snd_emu10k1 *emu,
for (i = 0, _id = icode->gpr_del_controls;
i < icode->gpr_del_control_count; i++, _id++) {
if (in_kernel)
- id = *(struct snd_ctl_elem_id *)_id;
+ id = *(__force struct snd_ctl_elem_id *)_id;
else if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 69f9b100bd24..9f2b6097f486 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -290,7 +290,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int silent_page, tmp;
int voice, stereo, w_16;
- unsigned char attn, send_amount[8];
+ unsigned char send_amount[8];
unsigned char send_routing[8];
unsigned long flags;
unsigned int pitch_target;
@@ -313,7 +313,6 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
/* volume parameters */
if (extra) {
- attn = 0;
memset(send_routing, 0, sizeof(send_routing));
send_routing[0] = 0;
send_routing[1] = 1;
@@ -779,7 +778,7 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_START:
snd_emu10k1_playback_invalidate_cache(emu, 1, epcm->extra); /* do we need this? */
snd_emu10k1_playback_invalidate_cache(emu, 0, epcm->voices[0]);
- /* follow thru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
@@ -929,7 +928,7 @@ static int snd_emu10k1_efx_playback_trigger(struct snd_pcm_substream *substream,
}
snd_emu10k1_playback_invalidate_cache(emu, 1, epcm->extra);
- /* follow thru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
snd_emu10k1_playback_prepare_voice(emu, epcm->extra, 1, 1, NULL);
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 39f79a6b5283..727eb3da1fda 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -2392,7 +2392,7 @@ static int snd_audiopci_probe(struct pci_dev *pci,
static int dev;
struct snd_card *card;
struct ensoniq *ensoniq;
- int err, pcm_devs[2];
+ int err;
if (dev >= SNDRV_CARDS)
return -ENODEV;
@@ -2412,7 +2412,6 @@ static int snd_audiopci_probe(struct pci_dev *pci,
}
card->private_data = ensoniq;
- pcm_devs[0] = 0; pcm_devs[1] = 1;
#ifdef CHIP1370
if ((err = snd_ensoniq_1370_mixer(ensoniq)) < 0) {
snd_card_free(card);
diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c
index 1b48a8c19d28..bbd6c87a4ed6 100644
--- a/sound/pci/hda/dell_wmi_helper.c
+++ b/sound/pci/hda/dell_wmi_helper.c
@@ -6,111 +6,18 @@
#if IS_ENABLED(CONFIG_DELL_LAPTOP)
#include <linux/dell-led.h>
-enum {
- MICMUTE_LED_ON,
- MICMUTE_LED_OFF,
- MICMUTE_LED_FOLLOW_CAPTURE,
- MICMUTE_LED_FOLLOW_MUTE,
-};
-
-static int dell_led_mode = MICMUTE_LED_FOLLOW_MUTE;
-static int dell_capture;
-static int dell_led_value;
static int (*dell_micmute_led_set_func)(int);
-static void (*dell_old_cap_hook)(struct hda_codec *,
- struct snd_kcontrol *,
- struct snd_ctl_elem_value *);
-
-static void call_micmute_led_update(void)
-{
- int val;
-
- switch (dell_led_mode) {
- case MICMUTE_LED_ON:
- val = 1;
- break;
- case MICMUTE_LED_OFF:
- val = 0;
- break;
- case MICMUTE_LED_FOLLOW_CAPTURE:
- val = dell_capture;
- break;
- case MICMUTE_LED_FOLLOW_MUTE:
- default:
- val = !dell_capture;
- break;
- }
-
- if (val == dell_led_value)
- return;
- dell_led_value = val;
- dell_micmute_led_set_func(dell_led_value);
-}
-
-static void update_dell_wmi_micmute_led(struct hda_codec *codec,
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- if (dell_old_cap_hook)
- dell_old_cap_hook(codec, kcontrol, ucontrol);
-
- if (!ucontrol || !dell_micmute_led_set_func)
- return;
- if (strcmp("Capture Switch", ucontrol->id.name) == 0 && ucontrol->id.index == 0) {
- /* TODO: How do I verify if it's a mono or stereo here? */
- dell_capture = (ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1]);
- call_micmute_led_update();
- }
-}
-static int dell_mic_mute_led_mode_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
+static void dell_micmute_update(struct hda_codec *codec)
{
- static const char * const texts[] = {
- "On", "Off", "Follow Capture", "Follow Mute",
- };
-
- return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
-}
+ struct hda_gen_spec *spec = codec->spec;
-static int dell_mic_mute_led_mode_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.enumerated.item[0] = dell_led_mode;
- return 0;
+ dell_micmute_led_set_func(spec->micmute_led.led_value);
}
-static int dell_mic_mute_led_mode_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- unsigned int mode;
-
- mode = ucontrol->value.enumerated.item[0];
- if (mode > MICMUTE_LED_FOLLOW_MUTE)
- mode = MICMUTE_LED_FOLLOW_MUTE;
- if (mode == dell_led_mode)
- return 0;
- dell_led_mode = mode;
- call_micmute_led_update();
- return 1;
-}
-
-static const struct snd_kcontrol_new dell_mic_mute_mode_ctls[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Mic Mute-LED Mode",
- .info = dell_mic_mute_led_mode_info,
- .get = dell_mic_mute_led_mode_get,
- .put = dell_mic_mute_led_mode_put,
- },
- {}
-};
-
static void alc_fixup_dell_wmi(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- struct alc_spec *spec = codec->spec;
bool removefunc = false;
if (action == HDA_FIXUP_ACT_PROBE) {
@@ -121,25 +28,14 @@ static void alc_fixup_dell_wmi(struct hda_codec *codec,
return;
}
- removefunc = true;
- if (dell_micmute_led_set_func(false) >= 0) {
- dell_led_value = 0;
- if (spec->gen.num_adc_nids > 1 && !spec->gen.dyn_adc_switch)
- codec_dbg(codec, "Skipping micmute LED control due to several ADCs");
- else {
- dell_old_cap_hook = spec->gen.cap_sync_hook;
- spec->gen.cap_sync_hook = update_dell_wmi_micmute_led;
- removefunc = false;
- add_mixer(spec, dell_mic_mute_mode_ctls);
- }
- }
-
+ removefunc = (dell_micmute_led_set_func(false) < 0) ||
+ (snd_hda_gen_add_micmute_led(codec,
+ dell_micmute_update) < 0);
}
if (dell_micmute_led_set_func && (action == HDA_FIXUP_ACT_FREE || removefunc)) {
symbol_put(dell_micmute_led_set);
dell_micmute_led_set_func = NULL;
- dell_old_cap_hook = NULL;
}
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 20a171ac4bb2..0a5085537034 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -37,15 +37,8 @@
#include "hda_jack.h"
#include <sound/hda_hwdep.h>
-#ifdef CONFIG_PM
-#define codec_in_pm(codec) atomic_read(&(codec)->core.in_pm)
-#define hda_codec_is_power_on(codec) \
- (!pm_runtime_suspended(hda_codec_dev(codec)))
-#else
-#define codec_in_pm(codec) 0
-#define hda_codec_is_power_on(codec) 1
-#endif
-
+#define codec_in_pm(codec) snd_hdac_is_in_pm(&codec->core)
+#define hda_codec_is_power_on(codec) snd_hdac_is_power_on(&codec->core)
#define codec_has_epss(codec) \
((codec)->core.power_caps & AC_PWRST_EPSS)
#define codec_has_clkstop(codec) \
@@ -858,6 +851,39 @@ static void snd_hda_codec_dev_release(struct device *dev)
kfree(codec);
}
+#define DEV_NAME_LEN 31
+
+static int snd_hda_codec_device_init(struct hda_bus *bus, struct snd_card *card,
+ unsigned int codec_addr, struct hda_codec **codecp)
+{
+ char name[DEV_NAME_LEN];
+ struct hda_codec *codec;
+ int err;
+
+ dev_dbg(card->dev, "%s: entry\n", __func__);
+
+ if (snd_BUG_ON(!bus))
+ return -EINVAL;
+ if (snd_BUG_ON(codec_addr > HDA_MAX_CODEC_ADDRESS))
+ return -EINVAL;
+
+ codec = kzalloc(sizeof(*codec), GFP_KERNEL);
+ if (!codec)
+ return -ENOMEM;
+
+ sprintf(name, "hdaudioC%dD%d", card->number, codec_addr);
+ err = snd_hdac_device_init(&codec->core, &bus->core, name, codec_addr);
+ if (err < 0) {
+ kfree(codec);
+ return err;
+ }
+
+ codec->core.type = HDA_DEV_LEGACY;
+ *codecp = codec;
+
+ return err;
+}
+
/**
* snd_hda_codec_new - create a HDA codec
* @bus: the bus to assign
@@ -869,7 +895,19 @@ static void snd_hda_codec_dev_release(struct device *dev)
int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
unsigned int codec_addr, struct hda_codec **codecp)
{
- struct hda_codec *codec;
+ int ret;
+
+ ret = snd_hda_codec_device_init(bus, card, codec_addr, codecp);
+ if (ret < 0)
+ return ret;
+
+ return snd_hda_codec_device_new(bus, card, codec_addr, *codecp);
+}
+EXPORT_SYMBOL_GPL(snd_hda_codec_new);
+
+int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
+ unsigned int codec_addr, struct hda_codec *codec)
+{
char component[31];
hda_nid_t fg;
int err;
@@ -879,25 +917,14 @@ int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
.dev_free = snd_hda_codec_dev_free,
};
+ dev_dbg(card->dev, "%s: entry\n", __func__);
+
if (snd_BUG_ON(!bus))
return -EINVAL;
if (snd_BUG_ON(codec_addr > HDA_MAX_CODEC_ADDRESS))
return -EINVAL;
- codec = kzalloc(sizeof(*codec), GFP_KERNEL);
- if (!codec)
- return -ENOMEM;
-
- sprintf(component, "hdaudioC%dD%d", card->number, codec_addr);
- err = snd_hdac_device_init(&codec->core, &bus->core, component,
- codec_addr);
- if (err < 0) {
- kfree(codec);
- return err;
- }
-
codec->core.dev.release = snd_hda_codec_dev_release;
- codec->core.type = HDA_DEV_LEGACY;
codec->core.exec_verb = codec_exec_verb;
codec->bus = bus;
@@ -957,15 +984,13 @@ int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
if (err < 0)
goto error;
- if (codecp)
- *codecp = codec;
return 0;
error:
put_device(hda_codec_dev(codec));
return err;
}
-EXPORT_SYMBOL_GPL(snd_hda_codec_new);
+EXPORT_SYMBOL_GPL(snd_hda_codec_device_new);
/**
* snd_hda_codec_update_widgets - Refresh widget caps and pin defaults
@@ -2846,14 +2871,13 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec)
{
unsigned int state;
- atomic_inc(&codec->core.in_pm);
-
+ snd_hdac_enter_pm(&codec->core);
if (codec->patch_ops.suspend)
codec->patch_ops.suspend(codec);
hda_cleanup_all_streams(codec);
state = hda_set_power_state(codec, AC_PWRST_D3);
update_power_acct(codec, true);
- atomic_dec(&codec->core.in_pm);
+ snd_hdac_leave_pm(&codec->core);
return state;
}
@@ -2862,8 +2886,7 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec)
*/
static void hda_call_codec_resume(struct hda_codec *codec)
{
- atomic_inc(&codec->core.in_pm);
-
+ snd_hdac_enter_pm(&codec->core);
if (codec->core.regmap)
regcache_mark_dirty(codec->core.regmap);
@@ -2886,7 +2909,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
hda_jackpoll_work(&codec->jackpoll_work.work);
else
snd_hda_jack_report_sync(codec);
- atomic_dec(&codec->core.in_pm);
+ snd_hdac_leave_pm(&codec->core);
}
static int hda_codec_runtime_suspend(struct device *dev)
@@ -2992,6 +3015,7 @@ int snd_hda_codec_build_controls(struct hda_codec *codec)
sync_power_up_states(codec);
return 0;
}
+EXPORT_SYMBOL_GPL(snd_hda_codec_build_controls);
/*
* PCM stuff
@@ -3197,6 +3221,7 @@ int snd_hda_codec_parse_pcms(struct hda_codec *codec)
return 0;
}
+EXPORT_SYMBOL_GPL(snd_hda_codec_parse_pcms);
/* assign all PCMs of the given codec */
int snd_hda_codec_build_pcms(struct hda_codec *codec)
@@ -3252,8 +3277,8 @@ int snd_hda_add_new_ctls(struct hda_codec *codec,
for (; knew->name; knew++) {
struct snd_kcontrol *kctl;
int addr = 0, idx = 0;
- if (knew->iface == -1) /* skip this codec private value */
- continue;
+ if (knew->iface == (__force snd_ctl_elem_iface_t)-1)
+ continue; /* skip this codec private value */
for (;;) {
kctl = snd_ctl_new1(knew, codec);
if (!kctl)
@@ -3843,7 +3868,7 @@ EXPORT_SYMBOL_GPL(snd_hda_correct_pin_ctl);
* This function is a helper to set a pin ctl value more safely.
* It corrects the pin ctl value via snd_hda_correct_pin_ctl(), stores the
* value in pin target array via snd_hda_codec_set_pin_target(), then
- * actually writes the value via either snd_hda_codec_update_cache() or
+ * actually writes the value via either snd_hda_codec_write_cache() or
* snd_hda_codec_write() depending on @cached flag.
*/
int _snd_hda_set_pin_ctl(struct hda_codec *codec, hda_nid_t pin,
@@ -3852,7 +3877,7 @@ int _snd_hda_set_pin_ctl(struct hda_codec *codec, hda_nid_t pin,
val = snd_hda_correct_pin_ctl(codec, pin, val);
snd_hda_codec_set_pin_target(codec, pin, val);
if (cached)
- return snd_hda_codec_update_cache(codec, pin, 0,
+ return snd_hda_codec_write_cache(codec, pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, val);
else
return snd_hda_codec_write(codec, pin, 0,
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index a8b1b31f161c..0d98bb9068b1 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -84,6 +84,7 @@ struct hda_bus {
*/
typedef int (*hda_codec_patch_t)(struct hda_codec *);
+#define HDA_CODEC_ID_SKIP_PROBE 0x00000001
#define HDA_CODEC_ID_GENERIC_HDMI 0x00000101
#define HDA_CODEC_ID_GENERIC 0x00000201
@@ -308,6 +309,8 @@ struct hda_codec {
*/
int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
unsigned int codec_addr, struct hda_codec **codecp);
+int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
+ unsigned int codec_addr, struct hda_codec *codec);
int snd_hda_codec_configure(struct hda_codec *codec);
int snd_hda_codec_update_widgets(struct hda_codec *codec);
@@ -382,9 +385,6 @@ snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
return snd_hdac_regmap_write(&codec->core, nid, verb, parm);
}
-#define snd_hda_codec_update_cache(codec, nid, flags, verb, parm) \
- snd_hda_codec_write_cache(codec, nid, flags, verb, parm)
-
/* the struct for codec->pin_configs */
struct hda_pincfg {
hda_nid_t nid;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index db773e219aaa..579984ecdec3 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -209,7 +209,7 @@ static void parse_user_hints(struct hda_codec *codec)
*/
#define update_pin_ctl(codec, pin, val) \
- snd_hda_codec_update_cache(codec, pin, 0, \
+ snd_hda_codec_write_cache(codec, pin, 0, \
AC_VERB_SET_PIN_WIDGET_CONTROL, val)
/* restore the pinctl based on the cached value */
@@ -898,7 +898,7 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
hda_nid_t nid = path->path[i];
if (enable && path->multi[i])
- snd_hda_codec_update_cache(codec, nid, 0,
+ snd_hda_codec_write_cache(codec, nid, 0,
AC_VERB_SET_CONNECT_SEL,
path->idx[i]);
if (has_amp_in(codec, path, i))
@@ -930,7 +930,7 @@ static void set_pin_eapd(struct hda_codec *codec, hda_nid_t pin, bool enable)
return;
if (codec->inv_eapd)
enable = !enable;
- snd_hda_codec_update_cache(codec, pin, 0,
+ snd_hda_codec_write_cache(codec, pin, 0,
AC_VERB_SET_EAPD_BTLENABLE,
enable ? 0x02 : 0x00);
}
@@ -3900,6 +3900,142 @@ static int parse_mic_boost(struct hda_codec *codec)
}
/*
+ * mic mute LED hook helpers
+ */
+enum {
+ MICMUTE_LED_ON,
+ MICMUTE_LED_OFF,
+ MICMUTE_LED_FOLLOW_CAPTURE,
+ MICMUTE_LED_FOLLOW_MUTE,
+};
+
+static void call_micmute_led_update(struct hda_codec *codec)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ unsigned int val;
+
+ switch (spec->micmute_led.led_mode) {
+ case MICMUTE_LED_ON:
+ val = 1;
+ break;
+ case MICMUTE_LED_OFF:
+ val = 0;
+ break;
+ case MICMUTE_LED_FOLLOW_CAPTURE:
+ val = !!spec->micmute_led.capture;
+ break;
+ case MICMUTE_LED_FOLLOW_MUTE:
+ default:
+ val = !spec->micmute_led.capture;
+ break;
+ }
+
+ if (val == spec->micmute_led.led_value)
+ return;
+ spec->micmute_led.led_value = val;
+ if (spec->micmute_led.update)
+ spec->micmute_led.update(codec);
+}
+
+static void update_micmute_led(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ unsigned int mask;
+
+ if (spec->micmute_led.old_hook)
+ spec->micmute_led.old_hook(codec, kcontrol, ucontrol);
+
+ if (!ucontrol)
+ return;
+ mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+ if (!strcmp("Capture Switch", ucontrol->id.name)) {
+ /* TODO: How do I verify if it's a mono or stereo here? */
+ if (ucontrol->value.integer.value[0] ||
+ ucontrol->value.integer.value[1])
+ spec->micmute_led.capture |= mask;
+ else
+ spec->micmute_led.capture &= ~mask;
+ call_micmute_led_update(codec);
+ }
+}
+
+static int micmute_led_mode_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char * const texts[] = {
+ "On", "Off", "Follow Capture", "Follow Mute",
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
+}
+
+static int micmute_led_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct hda_gen_spec *spec = codec->spec;
+
+ ucontrol->value.enumerated.item[0] = spec->micmute_led.led_mode;
+ return 0;
+}
+
+static int micmute_led_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct hda_gen_spec *spec = codec->spec;
+ unsigned int mode;
+
+ mode = ucontrol->value.enumerated.item[0];
+ if (mode > MICMUTE_LED_FOLLOW_MUTE)
+ mode = MICMUTE_LED_FOLLOW_MUTE;
+ if (mode == spec->micmute_led.led_mode)
+ return 0;
+ spec->micmute_led.led_mode = mode;
+ call_micmute_led_update(codec);
+ return 1;
+}
+
+static const struct snd_kcontrol_new micmute_led_mode_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Mic Mute-LED Mode",
+ .info = micmute_led_mode_info,
+ .get = micmute_led_mode_get,
+ .put = micmute_led_mode_put,
+};
+
+/**
+ * snd_hda_gen_add_micmute_led - helper for setting up mic mute LED hook
+ * @codec: the HDA codec
+ * @hook: the callback for updating LED
+ *
+ * Called from the codec drivers for offering the mic mute LED controls.
+ * When established, it sets up cap_sync_hook and triggers the callback at
+ * each time when the capture mixer switch changes. The callback is supposed
+ * to update the LED accordingly.
+ *
+ * Returns 0 if the hook is established or a negative error code.
+ */
+int snd_hda_gen_add_micmute_led(struct hda_codec *codec,
+ void (*hook)(struct hda_codec *))
+{
+ struct hda_gen_spec *spec = codec->spec;
+
+ spec->micmute_led.led_mode = MICMUTE_LED_FOLLOW_MUTE;
+ spec->micmute_led.capture = 0;
+ spec->micmute_led.led_value = 0;
+ spec->micmute_led.old_hook = spec->cap_sync_hook;
+ spec->micmute_led.update = hook;
+ spec->cap_sync_hook = update_micmute_led;
+ if (!snd_hda_gen_add_kctl(spec, NULL, &micmute_led_mode_ctl))
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_add_micmute_led);
+
+/*
* parse digital I/Os and set up NIDs in BIOS auto-parse mode
*/
static void parse_digital(struct hda_codec *codec)
@@ -5837,7 +5973,7 @@ static void clear_unsol_on_unused_pins(struct hda_codec *codec)
hda_nid_t nid = pin->nid;
if (is_jack_detectable(codec, nid) &&
!snd_hda_jack_tbl_get(codec, nid))
- snd_hda_codec_update_cache(codec, nid, 0,
+ snd_hda_codec_write_cache(codec, nid, 0,
AC_VERB_SET_UNSOLICITED_ENABLE, 0);
}
}
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 61772317de46..10123664fa61 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -86,6 +86,16 @@ struct badness_table {
extern const struct badness_table hda_main_out_badness;
extern const struct badness_table hda_extra_out_badness;
+struct hda_micmute_hook {
+ unsigned int led_mode;
+ unsigned int capture;
+ unsigned int led_value;
+ void (*update)(struct hda_codec *codec);
+ void (*old_hook)(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+};
+
struct hda_gen_spec {
char stream_name_analog[32]; /* analog PCM stream */
const struct hda_pcm_stream *stream_analog_playback;
@@ -276,6 +286,9 @@ struct hda_gen_spec {
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
+ /* mic mute LED hook; called via cap_sync_hook */
+ struct hda_micmute_hook micmute_led;
+
/* PCM hooks */
void (*pcm_playback_hook)(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
@@ -342,4 +355,7 @@ unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on);
int snd_hda_gen_fix_pin_power(struct hda_codec *codec, hda_nid_t pin);
+int snd_hda_gen_add_micmute_led(struct hda_codec *codec,
+ void (*hook)(struct hda_codec *));
+
#endif /* __SOUND_HDA_GENERIC_H */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1ae1850b3bfd..1b2ce304152a 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1319,15 +1319,16 @@ static const struct vga_switcheroo_client_ops azx_vs_ops = {
static int register_vga_switcheroo(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ struct pci_dev *p;
int err;
if (!hda->use_vga_switcheroo)
return 0;
- /* FIXME: currently only handling DIS controller
- * is there any machine with two switchable HDMI audio controllers?
- */
- err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
- VGA_SWITCHEROO_DIS);
+
+ p = get_bound_vga(chip->pci);
+ err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops, p);
+ pci_dev_put(p);
+
if (err < 0)
return err;
hda->vga_switcheroo_registered = 1;
@@ -1429,7 +1430,7 @@ static struct pci_dev *get_bound_vga(struct pci_dev *pci)
p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
pci->bus->number, 0);
if (p) {
- if ((p->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ if ((p->class >> 16) == PCI_BASE_CLASS_DISPLAY)
return p;
pci_dev_put(p);
}
@@ -2207,7 +2208,7 @@ out_free:
*/
static struct snd_pci_quirk power_save_blacklist[] = {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
- SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+ SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
@@ -2535,7 +2536,8 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* AMD Raven */
{ PCI_DEVICE(0x1022, 0x15e3),
- .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+ AZX_DCAPS_PM_RUNTIME },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x0002),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 757857313426..fd476fb40e1b 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -148,7 +148,7 @@ static void ad_vmaster_eapd_hook(void *private_data, int enabled)
return;
if (codec->inv_eapd)
enabled = !enabled;
- snd_hda_codec_update_cache(codec, spec->eapd_nid, 0,
+ snd_hda_codec_write_cache(codec, spec->eapd_nid, 0,
AC_VERB_SET_EAPD_BTLENABLE,
enabled ? 0x02 : 0x00);
}
@@ -991,7 +991,7 @@ static void ad1884_vmaster_hp_gpio_hook(void *private_data, int enabled)
if (spec->eapd_nid)
ad_vmaster_eapd_hook(private_data, enabled);
- snd_hda_codec_update_cache(codec, 0x01, 0,
+ snd_hda_codec_write_cache(codec, 0x01, 0,
AC_VERB_SET_GPIO_DATA,
enabled ? 0x00 : 0x02);
}
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 321e95c409c1..0166a3d7cd55 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -897,7 +897,7 @@ struct ca0132_spec {
const struct hda_verb *base_init_verbs;
const struct hda_verb *base_exit_verbs;
const struct hda_verb *chip_init_verbs;
- const struct hda_verb *sbz_init_verbs;
+ const struct hda_verb *desktop_init_verbs;
struct hda_verb *spec_init_verbs;
struct auto_pin_cfg autocfg;
@@ -965,9 +965,11 @@ struct ca0132_spec {
long cur_ctl_vals[TUNING_CTLS_COUNT];
#endif
/*
- * Sound Blaster Z PCI region 2 iomem, used for input and output
- * switching, and other unknown commands.
+ * The Recon3D, Sound Blaster Z, Sound Blaster ZxR, and Sound Blaster
+ * AE-5 all use PCI region 2 to toggle GPIO and other currently unknown
+ * things.
*/
+ bool use_pci_mmio;
void __iomem *mem_base;
/*
@@ -994,6 +996,7 @@ enum {
QUIRK_ALIENWARE_M17XR4,
QUIRK_SBZ,
QUIRK_R3DI,
+ QUIRK_R3D,
};
static const struct hda_pintbl alienware_pincfgs[] = {
@@ -1025,6 +1028,21 @@ static const struct hda_pintbl sbz_pincfgs[] = {
{}
};
+/* Recon3D pin configs taken from Windows Driver */
+static const struct hda_pintbl r3d_pincfgs[] = {
+ { 0x0b, 0x01014110 }, /* Port G -- Lineout FRONT L/R */
+ { 0x0c, 0x014510f0 }, /* SPDIF Out 1 */
+ { 0x0d, 0x014510f0 }, /* Digital Out */
+ { 0x0e, 0x01c520f0 }, /* SPDIF In */
+ { 0x0f, 0x0221401f }, /* Port A -- BackPanel HP */
+ { 0x10, 0x01016011 }, /* Port D -- Center/LFE or FP Hp */
+ { 0x11, 0x01011014 }, /* Port B -- LineMicIn2 / Rear L/R */
+ { 0x12, 0x02a090f0 }, /* Port C -- LineIn1 */
+ { 0x13, 0x908700f0 }, /* What U Hear In*/
+ { 0x18, 0x50d000f0 }, /* N/A */
+ {}
+};
+
/* Recon3D integrated pin configs taken from Windows Driver */
static const struct hda_pintbl r3di_pincfgs[] = {
{ 0x0b, 0x01014110 }, /* Port G -- Lineout FRONT L/R */
@@ -1050,6 +1068,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
{}
};
@@ -3073,6 +3092,24 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
*/
/*
+ * For cards with PCI-E region2 (Sound Blaster Z/ZxR, Recon3D, and AE-5)
+ * the mmio address 0x320 is used to set GPIO pins. The format for the data
+ * The first eight bits are just the number of the pin. So far, I've only seen
+ * this number go to 7.
+ */
+static void ca0132_mmio_gpio_set(struct hda_codec *codec, unsigned int gpio_pin,
+ bool enable)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned short gpio_data;
+
+ gpio_data = gpio_pin & 0xF;
+ gpio_data |= ((enable << 8) & 0x100);
+
+ writew(gpio_data, spec->mem_base + 0x320);
+}
+
+/*
* Sets up the GPIO pins so that they are discoverable. If this isn't done,
* the card shows as having no GPIO pins.
*/
@@ -3947,15 +3984,19 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
/*speaker out config*/
switch (spec->quirk) {
case QUIRK_SBZ:
- writew(0x0007, spec->mem_base + 0x320);
- writew(0x0104, spec->mem_base + 0x320);
- writew(0x0101, spec->mem_base + 0x320);
+ ca0132_mmio_gpio_set(codec, 7, false);
+ ca0132_mmio_gpio_set(codec, 4, true);
+ ca0132_mmio_gpio_set(codec, 1, true);
chipio_set_control_param(codec, 0x0D, 0x18);
break;
case QUIRK_R3DI:
chipio_set_control_param(codec, 0x0D, 0x24);
r3di_gpio_out_set(codec, R3DI_LINE_OUT);
break;
+ case QUIRK_R3D:
+ chipio_set_control_param(codec, 0x0D, 0x24);
+ ca0132_mmio_gpio_set(codec, 1, true);
+ break;
}
/* disable headphone node */
@@ -3983,15 +4024,19 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
/* Headphone out config*/
switch (spec->quirk) {
case QUIRK_SBZ:
- writew(0x0107, spec->mem_base + 0x320);
- writew(0x0104, spec->mem_base + 0x320);
- writew(0x0001, spec->mem_base + 0x320);
+ ca0132_mmio_gpio_set(codec, 7, true);
+ ca0132_mmio_gpio_set(codec, 4, true);
+ ca0132_mmio_gpio_set(codec, 1, false);
chipio_set_control_param(codec, 0x0D, 0x12);
break;
case QUIRK_R3DI:
chipio_set_control_param(codec, 0x0D, 0x21);
r3di_gpio_out_set(codec, R3DI_HEADPHONE_OUT);
break;
+ case QUIRK_R3D:
+ chipio_set_control_param(codec, 0x0D, 0x21);
+ ca0132_mmio_gpio_set(codec, 0x1, false);
+ break;
}
snd_hda_codec_write(codec, spec->out_pins[0], 0,
@@ -4025,15 +4070,19 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
/* Surround out config*/
switch (spec->quirk) {
case QUIRK_SBZ:
- writew(0x0007, spec->mem_base + 0x320);
- writew(0x0104, spec->mem_base + 0x320);
- writew(0x0101, spec->mem_base + 0x320);
+ ca0132_mmio_gpio_set(codec, 7, false);
+ ca0132_mmio_gpio_set(codec, 4, true);
+ ca0132_mmio_gpio_set(codec, 1, true);
chipio_set_control_param(codec, 0x0D, 0x18);
break;
case QUIRK_R3DI:
chipio_set_control_param(codec, 0x0D, 0x24);
r3di_gpio_out_set(codec, R3DI_LINE_OUT);
break;
+ case QUIRK_R3D:
+ ca0132_mmio_gpio_set(codec, 1, true);
+ chipio_set_control_param(codec, 0x0D, 0x24);
+ break;
}
/* enable line out node */
pin_ctl = snd_hda_codec_read(codec, spec->out_pins[0], 0,
@@ -4291,7 +4340,8 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
case REAR_MIC:
switch (spec->quirk) {
case QUIRK_SBZ:
- writew(0x0000, spec->mem_base + 0x320);
+ case QUIRK_R3D:
+ ca0132_mmio_gpio_set(codec, 0, false);
tmp = FLOAT_THREE;
break;
case QUIRK_R3DI:
@@ -4323,7 +4373,8 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
ca0132_mic_boost_set(codec, 0);
switch (spec->quirk) {
case QUIRK_SBZ:
- writew(0x0000, spec->mem_base + 0x320);
+ case QUIRK_R3D:
+ ca0132_mmio_gpio_set(codec, 0, false);
break;
case QUIRK_R3DI:
r3di_gpio_mic_set(codec, R3DI_REAR_MIC);
@@ -4349,8 +4400,9 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
case FRONT_MIC:
switch (spec->quirk) {
case QUIRK_SBZ:
- writew(0x0100, spec->mem_base + 0x320);
- writew(0x0005, spec->mem_base + 0x320);
+ case QUIRK_R3D:
+ ca0132_mmio_gpio_set(codec, 0, true);
+ ca0132_mmio_gpio_set(codec, 5, false);
tmp = FLOAT_THREE;
break;
case QUIRK_R3DI:
@@ -5516,8 +5568,7 @@ static int ca0132_alt_add_effect_slider(struct hda_codec *codec, hda_nid_t nid,
sprintf(namestr, "FX: %s %s Volume", pfx, dirstr[dir]);
- knew.tlv.c = 0;
- knew.tlv.p = 0;
+ knew.tlv.c = NULL;
switch (nid) {
case XBASS_XOVER:
@@ -5729,11 +5780,11 @@ static const struct snd_kcontrol_new ca0132_mixer[] = {
};
/*
- * SBZ specific control mixer. Removes auto-detect for mic, and adds surround
- * controls. Also sets both the Front Playback and Capture Volume controls to
- * alt so they set the DSP's decibel level.
+ * Desktop specific control mixer. Removes auto-detect for mic, and adds
+ * surround controls. Also sets both the Front Playback and Capture Volume
+ * controls to alt so they set the DSP's decibel level.
*/
-static const struct snd_kcontrol_new sbz_mixer[] = {
+static const struct snd_kcontrol_new desktop_mixer[] = {
CA0132_ALT_CODEC_VOL("Front Playback Volume", 0x02, HDA_OUTPUT),
CA0132_CODEC_MUTE("Front Playback Switch", VNID_SPK, HDA_OUTPUT),
HDA_CODEC_VOLUME("Surround Playback Volume", 0x04, 0, HDA_OUTPUT),
@@ -5804,8 +5855,8 @@ static int ca0132_build_controls(struct hda_codec *codec)
*/
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT;
for (i = 0; i < num_fx; i++) {
- /* SBZ breaks if Echo Cancellation is used */
- if (spec->quirk == QUIRK_SBZ) {
+ /* SBZ and R3D break if Echo Cancellation is used. */
+ if (spec->quirk == QUIRK_SBZ || spec->quirk == QUIRK_R3D) {
if (i == (ECHO_CANCELLATION - IN_EFFECT_START_NID +
OUT_EFFECTS_COUNT))
continue;
@@ -6187,10 +6238,10 @@ static void ca0132_refresh_widget_caps(struct hda_codec *codec)
}
/*
- * Recon3Di r3di_setup_defaults sub functions.
+ * Recon3D r3d_setup_defaults sub functions.
*/
-static void r3di_dsp_scp_startup(struct hda_codec *codec)
+static void r3d_dsp_scp_startup(struct hda_codec *codec)
{
unsigned int tmp;
@@ -6211,7 +6262,7 @@ static void r3di_dsp_scp_startup(struct hda_codec *codec)
}
-static void r3di_dsp_initial_mic_setup(struct hda_codec *codec)
+static void r3d_dsp_initial_mic_setup(struct hda_codec *codec)
{
unsigned int tmp;
@@ -6421,10 +6472,10 @@ static void ca0132_setup_defaults(struct hda_codec *codec)
}
/*
- * Setup default parameters for Recon3Di DSP.
+ * Setup default parameters for Recon3D/Recon3Di DSP.
*/
-static void r3di_setup_defaults(struct hda_codec *codec)
+static void r3d_setup_defaults(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
unsigned int tmp;
@@ -6434,9 +6485,9 @@ static void r3di_setup_defaults(struct hda_codec *codec)
if (spec->dsp_state != DSP_DOWNLOADED)
return;
- r3di_dsp_scp_startup(codec);
+ r3d_dsp_scp_startup(codec);
- r3di_dsp_initial_mic_setup(codec);
+ r3d_dsp_initial_mic_setup(codec);
/*remove DSP headroom*/
tmp = FLOAT_ZERO;
@@ -6450,7 +6501,8 @@ static void r3di_setup_defaults(struct hda_codec *codec)
/* Set speaker source? */
dspio_set_uint_param(codec, 0x32, 0x00, tmp);
- r3di_gpio_dsp_status_set(codec, R3DI_DSP_DOWNLOADED);
+ if (spec->quirk == QUIRK_R3DI)
+ r3di_gpio_dsp_status_set(codec, R3DI_DSP_DOWNLOADED);
/* Setup effect defaults */
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT + 1;
@@ -6462,7 +6514,6 @@ static void r3di_setup_defaults(struct hda_codec *codec)
ca0132_effects[idx].def_vals[i]);
}
}
-
}
/*
@@ -6727,7 +6778,12 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
{
- ca0132_select_mic(codec);
+ struct ca0132_spec *spec = codec->spec;
+
+ if (spec->use_alt_functions)
+ ca0132_alt_select_in(codec);
+ else
+ ca0132_select_mic(codec);
}
static void ca0132_init_unsol(struct hda_codec *codec)
@@ -6798,8 +6854,8 @@ static struct hda_verb ca0132_init_verbs0[] = {
{}
};
-/* Extra init verbs for SBZ */
-static struct hda_verb sbz_init_verbs[] = {
+/* Extra init verbs for desktop cards. */
+static struct hda_verb ca0132_init_verbs1[] = {
{0x15, 0x70D, 0x20},
{0x15, 0x70E, 0x19},
{0x15, 0x707, 0x00},
@@ -6891,16 +6947,12 @@ static void sbz_region2_exit(struct hda_codec *codec)
writeb(0x0, spec->mem_base + 0x100);
for (i = 0; i < 8; i++)
writeb(0xb3, spec->mem_base + 0x304);
- /*
- * I believe these are GPIO, with the right most hex digit being the
- * gpio pin, and the second digit being on or off. We see this more in
- * the input/output select functions.
- */
- writew(0x0000, spec->mem_base + 0x320);
- writew(0x0001, spec->mem_base + 0x320);
- writew(0x0104, spec->mem_base + 0x320);
- writew(0x0005, spec->mem_base + 0x320);
- writew(0x0007, spec->mem_base + 0x320);
+
+ ca0132_mmio_gpio_set(codec, 0, false);
+ ca0132_mmio_gpio_set(codec, 1, false);
+ ca0132_mmio_gpio_set(codec, 4, true);
+ ca0132_mmio_gpio_set(codec, 5, false);
+ ca0132_mmio_gpio_set(codec, 7, false);
}
static void sbz_set_pin_ctl_default(struct hda_codec *codec)
@@ -6916,7 +6968,7 @@ static void sbz_set_pin_ctl_default(struct hda_codec *codec)
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x00);
}
-static void sbz_clear_unsolicited(struct hda_codec *codec)
+static void ca0132_clear_unsolicited(struct hda_codec *codec)
{
hda_nid_t pins[7] = {0x0B, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13};
unsigned int i;
@@ -6969,21 +7021,22 @@ static void sbz_exit_chip(struct hda_codec *codec)
chipio_set_control_param(codec, 0x0D, 0x24);
- sbz_clear_unsolicited(codec);
+ ca0132_clear_unsolicited(codec);
sbz_set_pin_ctl_default(codec);
snd_hda_codec_write(codec, 0x0B, 0,
AC_VERB_SET_EAPD_BTLENABLE, 0x00);
- if (dspload_is_loaded(codec))
- dsp_reset(codec);
-
- snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
- VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE, 0x00);
-
sbz_region2_exit(codec);
}
+static void r3d_exit_chip(struct hda_codec *codec)
+{
+ ca0132_clear_unsolicited(codec);
+ snd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);
+ snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x5b);
+}
+
static void ca0132_exit_chip(struct hda_codec *codec)
{
/* put any chip cleanup stuffs here. */
@@ -7098,9 +7151,27 @@ static void sbz_pre_dsp_setup(struct hda_codec *codec)
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x44);
}
-/*
- * Extra commands that don't really fit anywhere else.
- */
+static void r3d_pre_dsp_setup(struct hda_codec *codec)
+{
+
+ snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfc);
+ snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfd);
+ snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfe);
+ snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xff);
+
+ chipio_write(codec, 0x18b0a4, 0x000000c2);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x1E);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_HIGH, 0x1C);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_DATA_WRITE, 0x5B);
+
+ snd_hda_codec_write(codec, 0x11, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x44);
+}
+
static void r3di_pre_dsp_setup(struct hda_codec *codec)
{
chipio_write(codec, 0x18b0a4, 0x000000c2);
@@ -7125,13 +7196,12 @@ static void r3di_pre_dsp_setup(struct hda_codec *codec)
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x04);
}
-
/*
* These are sent before the DSP is downloaded. Not sure
* what they do, or if they're necessary. Could possibly
* be removed. Figure they're better to leave in.
*/
-static void sbz_region2_startup(struct hda_codec *codec)
+static void ca0132_mmio_init(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
@@ -7171,7 +7241,7 @@ static void ca0132_alt_init(struct hda_codec *codec)
ca0132_gpio_init(codec);
sbz_pre_dsp_setup(codec);
snd_hda_sequence_write(codec, spec->chip_init_verbs);
- snd_hda_sequence_write(codec, spec->sbz_init_verbs);
+ snd_hda_sequence_write(codec, spec->desktop_init_verbs);
break;
case QUIRK_R3DI:
codec_dbg(codec, "R3DI alt_init");
@@ -7182,6 +7252,11 @@ static void ca0132_alt_init(struct hda_codec *codec)
snd_hda_sequence_write(codec, spec->chip_init_verbs);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, 0x6FF, 0xC4);
break;
+ case QUIRK_R3D:
+ r3d_pre_dsp_setup(codec);
+ snd_hda_sequence_write(codec, spec->chip_init_verbs);
+ snd_hda_sequence_write(codec, spec->desktop_init_verbs);
+ break;
}
}
@@ -7218,8 +7293,8 @@ static int ca0132_init(struct hda_codec *codec)
spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
- if (spec->quirk == QUIRK_SBZ)
- sbz_region2_startup(codec);
+ if (spec->use_pci_mmio)
+ ca0132_mmio_init(codec);
snd_hda_power_up_pm(codec);
@@ -7236,14 +7311,13 @@ static int ca0132_init(struct hda_codec *codec)
ca0132_refresh_widget_caps(codec);
- if (spec->quirk == QUIRK_SBZ)
- writew(0x0107, spec->mem_base + 0x320);
-
switch (spec->quirk) {
case QUIRK_R3DI:
- r3di_setup_defaults(codec);
+ case QUIRK_R3D:
+ r3d_setup_defaults(codec);
break;
case QUIRK_SBZ:
+ sbz_setup_defaults(codec);
break;
default:
ca0132_setup_defaults(codec);
@@ -7274,20 +7348,12 @@ static int ca0132_init(struct hda_codec *codec)
ca0132_gpio_setup(codec);
snd_hda_sequence_write(codec, spec->spec_init_verbs);
- switch (spec->quirk) {
- case QUIRK_SBZ:
- sbz_setup_defaults(codec);
- ca0132_alt_select_out(codec);
- ca0132_alt_select_in(codec);
- break;
- case QUIRK_R3DI:
+ if (spec->use_alt_functions) {
ca0132_alt_select_out(codec);
ca0132_alt_select_in(codec);
- break;
- default:
+ } else {
ca0132_select_out(codec);
ca0132_select_mic(codec);
- break;
}
snd_hda_jack_report_sync(codec);
@@ -7316,16 +7382,17 @@ static void ca0132_free(struct hda_codec *codec)
case QUIRK_SBZ:
sbz_exit_chip(codec);
break;
+ case QUIRK_R3D:
+ r3d_exit_chip(codec);
+ break;
case QUIRK_R3DI:
r3di_gpio_shutdown(codec);
- snd_hda_sequence_write(codec, spec->base_exit_verbs);
- ca0132_exit_chip(codec);
- break;
- default:
- snd_hda_sequence_write(codec, spec->base_exit_verbs);
- ca0132_exit_chip(codec);
break;
}
+
+ snd_hda_sequence_write(codec, spec->base_exit_verbs);
+ ca0132_exit_chip(codec);
+
snd_hda_power_down(codec);
if (spec->mem_base)
iounmap(spec->mem_base);
@@ -7386,8 +7453,15 @@ static void ca0132_config(struct hda_codec *codec)
spec->unsol_tag_amic1 = 0x11;
break;
case QUIRK_SBZ:
- codec_dbg(codec, "%s: QUIRK_SBZ applied.\n", __func__);
- snd_hda_apply_pincfgs(codec, sbz_pincfgs);
+ case QUIRK_R3D:
+ if (spec->quirk == QUIRK_SBZ) {
+ codec_dbg(codec, "%s: QUIRK_SBZ applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, sbz_pincfgs);
+ }
+ if (spec->quirk == QUIRK_R3D) {
+ codec_dbg(codec, "%s: QUIRK_R3D applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, r3d_pincfgs);
+ }
spec->num_outputs = 2;
spec->out_pins[0] = 0x0B; /* Line out */
@@ -7473,8 +7547,8 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
struct ca0132_spec *spec = codec->spec;
spec->chip_init_verbs = ca0132_init_verbs0;
- if (spec->quirk == QUIRK_SBZ)
- spec->sbz_init_verbs = sbz_init_verbs;
+ if (spec->quirk == QUIRK_SBZ || spec->quirk == QUIRK_R3D)
+ spec->desktop_init_verbs = ca0132_init_verbs1;
spec->spec_init_verbs = kcalloc(NUM_SPEC_VERBS,
sizeof(struct hda_verb),
GFP_KERNEL);
@@ -7530,25 +7604,19 @@ static int patch_ca0132(struct hda_codec *codec)
else
spec->quirk = QUIRK_NONE;
- /* Setup BAR Region 2 for Sound Blaster Z */
- if (spec->quirk == QUIRK_SBZ) {
- spec->mem_base = pci_iomap(codec->bus->pci, 2, 0xC20);
- if (spec->mem_base == NULL) {
- codec_warn(codec, "pci_iomap failed!");
- codec_info(codec, "perhaps this is not an SBZ?");
- spec->quirk = QUIRK_NONE;
- }
- }
-
spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->num_mixers = 1;
/* Set which mixers each quirk uses. */
switch (spec->quirk) {
case QUIRK_SBZ:
- spec->mixers[0] = sbz_mixer;
+ spec->mixers[0] = desktop_mixer;
snd_hda_codec_set_name(codec, "Sound Blaster Z");
break;
+ case QUIRK_R3D:
+ spec->mixers[0] = desktop_mixer;
+ snd_hda_codec_set_name(codec, "Recon3D");
+ break;
case QUIRK_R3DI:
spec->mixers[0] = r3di_mixer;
snd_hda_codec_set_name(codec, "Recon3Di");
@@ -7558,19 +7626,34 @@ static int patch_ca0132(struct hda_codec *codec)
break;
}
- /* Setup whether or not to use alt functions/controls */
+ /* Setup whether or not to use alt functions/controls/pci_mmio */
switch (spec->quirk) {
case QUIRK_SBZ:
+ case QUIRK_R3D:
+ spec->use_alt_controls = true;
+ spec->use_alt_functions = true;
+ spec->use_pci_mmio = true;
+ break;
case QUIRK_R3DI:
spec->use_alt_controls = true;
spec->use_alt_functions = true;
+ spec->use_pci_mmio = false;
break;
default:
spec->use_alt_controls = false;
spec->use_alt_functions = false;
+ spec->use_pci_mmio = false;
break;
}
+ if (spec->use_pci_mmio) {
+ spec->mem_base = pci_iomap(codec->bus->pci, 2, 0xC20);
+ if (spec->mem_base == NULL) {
+ codec_warn(codec, "pci_iomap failed! Setting quirk to QUIRK_NONE.");
+ spec->quirk = QUIRK_NONE;
+ }
+ }
+
spec->base_init_verbs = ca0132_base_init_verbs;
spec->base_exit_verbs = ca0132_base_exit_verbs;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index d6e079f4ec09..a7f91be45194 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1096,25 +1096,6 @@ static int cs421x_init(struct hda_codec *codec)
return 0;
}
-static int cs421x_build_controls(struct hda_codec *codec)
-{
- struct cs_spec *spec = codec->spec;
- int err;
-
- err = snd_hda_gen_build_controls(codec);
- if (err < 0)
- return err;
-
- if (spec->gen.autocfg.speaker_outs &&
- spec->vendor_nid == CS4210_VENDOR_NID) {
- err = snd_hda_ctl_add(codec, 0,
- snd_ctl_new1(&cs421x_speaker_boost_ctl, codec));
- if (err < 0)
- return err;
- }
- return 0;
-}
-
static void fix_volume_caps(struct hda_codec *codec, hda_nid_t dac)
{
unsigned int caps;
@@ -1144,6 +1125,14 @@ static int cs421x_parse_auto_config(struct hda_codec *codec)
return err;
parse_cs421x_digital(codec);
+
+ if (spec->gen.autocfg.speaker_outs &&
+ spec->vendor_nid == CS4210_VENDOR_NID) {
+ if (!snd_hda_gen_add_kctl(&spec->gen, NULL,
+ &cs421x_speaker_boost_ctl))
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -1175,7 +1164,7 @@ static int cs421x_suspend(struct hda_codec *codec)
#endif
static const struct hda_codec_ops cs421x_patch_ops = {
- .build_controls = cs421x_build_controls,
+ .build_controls = snd_hda_gen_build_controls,
.build_pcms = snd_hda_gen_build_pcms,
.init = cs421x_init,
.free = cs_free,
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f641c20095f7..cfd4e4f97f8f 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -37,8 +37,6 @@
struct conexant_spec {
struct hda_gen_spec gen;
- unsigned int beep_amp;
-
/* extra EAPD pins */
unsigned int num_eapds;
hda_nid_t eapds[4];
@@ -62,65 +60,48 @@ struct conexant_spec {
#ifdef CONFIG_SND_HDA_INPUT_BEEP
-static inline void set_beep_amp(struct conexant_spec *spec, hda_nid_t nid,
- int idx, int dir)
-{
- spec->gen.beep_nid = nid;
- spec->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 1, idx, dir);
-}
-/* additional beep mixers; the actual parameters are overwritten at build */
+/* additional beep mixers; private_value will be overwritten */
static const struct snd_kcontrol_new cxt_beep_mixer[] = {
HDA_CODEC_VOLUME_MONO("Beep Playback Volume", 0, 1, 0, HDA_OUTPUT),
HDA_CODEC_MUTE_BEEP_MONO("Beep Playback Switch", 0, 1, 0, HDA_OUTPUT),
- { } /* end */
};
-/* create beep controls if needed */
-static int add_beep_ctls(struct hda_codec *codec)
+static int set_beep_amp(struct conexant_spec *spec, hda_nid_t nid,
+ int idx, int dir)
{
- struct conexant_spec *spec = codec->spec;
- int err;
+ struct snd_kcontrol_new *knew;
+ unsigned int beep_amp = HDA_COMPOSE_AMP_VAL(nid, 1, idx, dir);
+ int i;
- if (spec->beep_amp) {
- const struct snd_kcontrol_new *knew;
- for (knew = cxt_beep_mixer; knew->name; knew++) {
- struct snd_kcontrol *kctl;
- kctl = snd_ctl_new1(knew, codec);
- if (!kctl)
- return -ENOMEM;
- kctl->private_value = spec->beep_amp;
- err = snd_hda_ctl_add(codec, 0, kctl);
- if (err < 0)
- return err;
- }
+ spec->gen.beep_nid = nid;
+ for (i = 0; i < ARRAY_SIZE(cxt_beep_mixer); i++) {
+ knew = snd_hda_gen_add_kctl(&spec->gen, NULL,
+ &cxt_beep_mixer[i]);
+ if (!knew)
+ return -ENOMEM;
+ knew->private_value = beep_amp;
}
return 0;
}
-#else
-#define set_beep_amp(spec, nid, idx, dir) /* NOP */
-#define add_beep_ctls(codec) 0
-#endif
-
-/*
- * Automatic parser for CX20641 & co
- */
-#ifdef CONFIG_SND_HDA_INPUT_BEEP
-static void cx_auto_parse_beep(struct hda_codec *codec)
+static int cx_auto_parse_beep(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
hda_nid_t nid;
for_each_hda_codec_node(nid, codec)
- if (get_wcaps_type(get_wcaps(codec, nid)) == AC_WID_BEEP) {
- set_beep_amp(spec, nid, 0, HDA_OUTPUT);
- break;
- }
+ if (get_wcaps_type(get_wcaps(codec, nid)) == AC_WID_BEEP)
+ return set_beep_amp(spec, nid, 0, HDA_OUTPUT);
+ return 0;
}
#else
-#define cx_auto_parse_beep(codec)
+#define cx_auto_parse_beep(codec) 0
#endif
+/*
+ * Automatic parser for CX20641 & co
+ */
+
/* parse EAPDs */
static void cx_auto_parse_eapd(struct hda_codec *codec)
{
@@ -179,21 +160,6 @@ static void cx_auto_vmaster_hook_mute_led(void *private_data, int enabled)
enabled ? 0x00 : 0x02);
}
-static int cx_auto_build_controls(struct hda_codec *codec)
-{
- int err;
-
- err = snd_hda_gen_build_controls(codec);
- if (err < 0)
- return err;
-
- err = add_beep_ctls(codec);
- if (err < 0)
- return err;
-
- return 0;
-}
-
static int cx_auto_init(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
@@ -211,6 +177,7 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
struct conexant_spec *spec = codec->spec;
switch (codec->core.vendor_id) {
+ case 0x14f12008: /* CX8200 */
case 0x14f150f2: /* CX20722 */
case 0x14f150f4: /* CX20724 */
break;
@@ -218,13 +185,14 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
return;
}
- /* Turn the CX20722 codec into D3 to avoid spurious noises
+ /* Turn the problematic codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
snd_hda_codec_write(codec, codec->core.afg, 0,
AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+ msleep(10);
}
static void cx_auto_free(struct hda_codec *codec)
@@ -234,7 +202,7 @@ static void cx_auto_free(struct hda_codec *codec)
}
static const struct hda_codec_ops cx_auto_patch_ops = {
- .build_controls = cx_auto_build_controls,
+ .build_controls = snd_hda_gen_build_controls,
.build_pcms = snd_hda_gen_build_pcms,
.init = cx_auto_init,
.reboot_notify = cx_auto_reboot_notify,
@@ -343,6 +311,7 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec,
snd_hdac_regmap_add_vendor_verb(&codec->core, 0x410);
break;
case HDA_FIXUP_ACT_PROBE:
+ WARN_ON(spec->gen.cap_sync_hook);
spec->gen.cap_sync_hook = cxt_update_headset_mode_hook;
spec->gen.automute_hook = cxt_update_headset_mode;
break;
@@ -374,7 +343,7 @@ static void cxt_fixup_headset_mic(struct hda_codec *codec,
* control. */
#define update_mic_pin(codec, nid, val) \
- snd_hda_codec_update_cache(codec, nid, 0, \
+ snd_hda_codec_write_cache(codec, nid, 0, \
AC_VERB_SET_PIN_WIDGET_CONTROL, val)
static const struct hda_input_mux olpc_xo_dc_bias = {
@@ -695,16 +664,12 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
}
/* turn on/off mic-mute LED via GPIO per capture hook */
-static void cxt_fixup_gpio_mic_mute_hook(struct hda_codec *codec,
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static void cxt_gpio_micmute_update(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- if (ucontrol)
- cxt_update_gpio_led(codec, spec->gpio_mic_led_mask,
- ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1]);
+ cxt_update_gpio_led(codec, spec->gpio_mic_led_mask,
+ spec->gen.micmute_led.led_value);
}
@@ -721,11 +686,11 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
- spec->gen.cap_sync_hook = cxt_fixup_gpio_mic_mute_hook;
spec->gpio_led = 0;
spec->mute_led_polarity = 0;
spec->gpio_mute_led_mask = 0x01;
spec->gpio_mic_led_mask = 0x02;
+ snd_hda_gen_add_micmute_led(codec, cxt_gpio_micmute_update);
}
snd_hda_add_verbs(codec, gpio_init);
if (spec->gpio_led)
@@ -1037,7 +1002,6 @@ static int patch_conexant_auto(struct hda_codec *codec)
codec->spec = spec;
codec->patch_ops = cx_auto_patch_ops;
- cx_auto_parse_beep(codec);
cx_auto_parse_eapd(codec);
spec->gen.own_eapd_ctl = 1;
if (spec->dynamic_eapd)
@@ -1097,6 +1061,10 @@ static int patch_conexant_auto(struct hda_codec *codec)
if (err < 0)
goto error;
+ err = cx_auto_parse_beep(codec);
+ if (err < 0)
+ goto error;
+
/* Some laptops with Conexant chips show stalls in S3 resume,
* which falls into the single-cmd mode.
* Better to make reset, then.
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 8a49415aebac..cb587dce67a9 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -177,13 +177,13 @@ struct hdmi_spec {
/* i915/powerwell (Haswell+/Valleyview+) specific */
bool use_acomp_notifier; /* use i915 eld_notify callback for hotplug */
- struct i915_audio_component_audio_ops i915_audio_ops;
+ struct drm_audio_component_audio_ops drm_audio_ops;
struct hdac_chmap chmap;
hda_nid_t vendor_nid;
};
-#ifdef CONFIG_SND_HDA_I915
+#ifdef CONFIG_SND_HDA_COMPONENT
static inline bool codec_has_acomp(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
@@ -339,13 +339,13 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
if (!per_pin) {
/* no pin is bound to the pcm */
uinfo->count = 0;
- mutex_unlock(&spec->pcm_lock);
- return 0;
+ goto unlock;
}
eld = &per_pin->sink_eld;
uinfo->count = eld->eld_valid ? eld->eld_size : 0;
- mutex_unlock(&spec->pcm_lock);
+ unlock:
+ mutex_unlock(&spec->pcm_lock);
return 0;
}
@@ -357,6 +357,7 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
struct hdmi_spec_per_pin *per_pin;
struct hdmi_eld *eld;
int pcm_idx;
+ int err = 0;
pcm_idx = kcontrol->private_value;
mutex_lock(&spec->pcm_lock);
@@ -365,16 +366,15 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
/* no pin is bound to the pcm */
memset(ucontrol->value.bytes.data, 0,
ARRAY_SIZE(ucontrol->value.bytes.data));
- mutex_unlock(&spec->pcm_lock);
- return 0;
+ goto unlock;
}
- eld = &per_pin->sink_eld;
+ eld = &per_pin->sink_eld;
if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
eld->eld_size > ELD_MAX_SIZE) {
- mutex_unlock(&spec->pcm_lock);
snd_BUG();
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
}
memset(ucontrol->value.bytes.data, 0,
@@ -382,9 +382,10 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
if (eld->eld_valid)
memcpy(ucontrol->value.bytes.data, eld->eld_buffer,
eld->eld_size);
- mutex_unlock(&spec->pcm_lock);
- return 0;
+ unlock:
+ mutex_unlock(&spec->pcm_lock);
+ return err;
}
static const struct snd_kcontrol_new eld_bytes_ctl = {
@@ -1209,8 +1210,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
pin_idx = hinfo_to_pin_index(codec, hinfo);
if (!spec->dyn_pcm_assign) {
if (snd_BUG_ON(pin_idx < 0)) {
- mutex_unlock(&spec->pcm_lock);
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
}
} else {
/* no pin is assigned to the PCM
@@ -1218,16 +1219,13 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
*/
if (pin_idx < 0) {
err = hdmi_pcm_open_no_pin(hinfo, codec, substream);
- mutex_unlock(&spec->pcm_lock);
- return err;
+ goto unlock;
}
}
err = hdmi_choose_cvt(codec, pin_idx, &cvt_idx);
- if (err < 0) {
- mutex_unlock(&spec->pcm_lock);
- return err;
- }
+ if (err < 0)
+ goto unlock;
per_cvt = get_cvt(spec, cvt_idx);
/* Claim converter */
@@ -1264,12 +1262,11 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
per_cvt->assigned = 0;
hinfo->nid = 0;
snd_hda_spdif_ctls_unassign(codec, pcm_idx);
- mutex_unlock(&spec->pcm_lock);
- return -ENODEV;
+ err = -ENODEV;
+ goto unlock;
}
}
- mutex_unlock(&spec->pcm_lock);
/* Store the updated parameters */
runtime->hw.channels_min = hinfo->channels_min;
runtime->hw.channels_max = hinfo->channels_max;
@@ -1278,7 +1275,9 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS, 2);
- return 0;
+ unlock:
+ mutex_unlock(&spec->pcm_lock);
+ return err;
}
/*
@@ -1867,7 +1866,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct snd_pcm_runtime *runtime = substream->runtime;
bool non_pcm;
int pinctl;
- int err;
+ int err = 0;
mutex_lock(&spec->pcm_lock);
pin_idx = hinfo_to_pin_index(codec, hinfo);
@@ -1879,13 +1878,12 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
pin_cvt_fixup(codec, NULL, cvt_nid);
snd_hda_codec_setup_stream(codec, cvt_nid,
stream_tag, 0, format);
- mutex_unlock(&spec->pcm_lock);
- return 0;
+ goto unlock;
}
if (snd_BUG_ON(pin_idx < 0)) {
- mutex_unlock(&spec->pcm_lock);
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
}
per_pin = get_pin(spec, pin_idx);
pin_nid = per_pin->pin_nid;
@@ -1924,6 +1922,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
/* snd_hda_set_dev_select() has been called before */
err = spec->ops.setup_stream(codec, cvt_nid, pin_nid,
stream_tag, format);
+ unlock:
mutex_unlock(&spec->pcm_lock);
return err;
}
@@ -1945,6 +1944,7 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
struct hdmi_spec_per_cvt *per_cvt;
struct hdmi_spec_per_pin *per_pin;
int pinctl;
+ int err = 0;
if (hinfo->nid) {
pcm_idx = hinfo_to_pcm_index(codec, hinfo);
@@ -1963,14 +1963,12 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
snd_hda_spdif_ctls_unassign(codec, pcm_idx);
clear_bit(pcm_idx, &spec->pcm_in_use);
pin_idx = hinfo_to_pin_index(codec, hinfo);
- if (spec->dyn_pcm_assign && pin_idx < 0) {
- mutex_unlock(&spec->pcm_lock);
- return 0;
- }
+ if (spec->dyn_pcm_assign && pin_idx < 0)
+ goto unlock;
if (snd_BUG_ON(pin_idx < 0)) {
- mutex_unlock(&spec->pcm_lock);
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
}
per_pin = get_pin(spec, pin_idx);
@@ -1989,10 +1987,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
per_pin->setup = false;
per_pin->channels = 0;
mutex_unlock(&per_pin->lock);
+ unlock:
mutex_unlock(&spec->pcm_lock);
}
- return 0;
+ return err;
}
static const struct hda_pcm_ops generic_ops = {
@@ -2288,7 +2287,7 @@ static void generic_hdmi_free(struct hda_codec *codec)
int pin_idx, pcm_idx;
if (codec_has_acomp(codec))
- snd_hdac_i915_register_notifier(NULL);
+ snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2471,6 +2470,38 @@ static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg,
snd_hda_codec_set_power_to_all(codec, fg, power_state);
}
+/* There is a fixed mapping between audio pin node and display port.
+ * on SNB, IVY, HSW, BSW, SKL, BXT, KBL:
+ * Pin Widget 5 - PORT B (port = 1 in i915 driver)
+ * Pin Widget 6 - PORT C (port = 2 in i915 driver)
+ * Pin Widget 7 - PORT D (port = 3 in i915 driver)
+ *
+ * on VLV, ILK:
+ * Pin Widget 4 - PORT B (port = 1 in i915 driver)
+ * Pin Widget 5 - PORT C (port = 2 in i915 driver)
+ * Pin Widget 6 - PORT D (port = 3 in i915 driver)
+ */
+static int intel_base_nid(struct hda_codec *codec)
+{
+ switch (codec->core.vendor_id) {
+ case 0x80860054: /* ILK */
+ case 0x80862804: /* ILK */
+ case 0x80862882: /* VLV */
+ return 4;
+ default:
+ return 5;
+ }
+}
+
+static int intel_pin2port(void *audio_ptr, int pin_nid)
+{
+ int base_nid = intel_base_nid(audio_ptr);
+
+ if (WARN_ON(pin_nid < base_nid || pin_nid >= base_nid + 3))
+ return -1;
+ return pin_nid - base_nid + 1; /* intel port is 1-based */
+}
+
static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
{
struct hda_codec *codec = audio_ptr;
@@ -2481,16 +2512,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
if (port < 1 || port > 3)
return;
- switch (codec->core.vendor_id) {
- case 0x80860054: /* ILK */
- case 0x80862804: /* ILK */
- case 0x80862882: /* VLV */
- pin_nid = port + 0x03;
- break;
- default:
- pin_nid = port + 0x04;
- break;
- }
+ pin_nid = port + intel_base_nid(codec) - 1; /* intel port is 1-based */
/* skip notification during system suspend (but not in runtime PM);
* the state will be updated at resume
@@ -2498,7 +2520,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
return;
/* ditto during suspend/resume process itself */
- if (atomic_read(&(codec)->core.in_pm))
+ if (snd_hdac_is_in_pm(&codec->core))
return;
snd_hdac_i915_set_bclk(&codec->bus->core);
@@ -2511,14 +2533,16 @@ static void register_i915_notifier(struct hda_codec *codec)
struct hdmi_spec *spec = codec->spec;
spec->use_acomp_notifier = true;
- spec->i915_audio_ops.audio_ptr = codec;
+ spec->drm_audio_ops.audio_ptr = codec;
/* intel_audio_codec_enable() or intel_audio_codec_disable()
* will call pin_eld_notify with using audio_ptr pointer
* We need make sure audio_ptr is really setup
*/
wmb();
- spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
- snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
+ spec->drm_audio_ops.pin2port = intel_pin2port;
+ spec->drm_audio_ops.pin_eld_notify = intel_pin_eld_notify;
+ snd_hdac_acomp_register_notifier(&codec->bus->core,
+ &spec->drm_audio_ops);
}
/* setup_stream ops override for HSW+ */
@@ -2551,6 +2575,8 @@ static int alloc_intel_hdmi(struct hda_codec *codec)
/* requires i915 binding */
if (!codec->bus->core.audio_component) {
codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
+ /* set probe_id here to prevent generic fallback binding */
+ codec->probe_id = HDA_CODEC_ID_SKIP_PROBE;
return -ENODEV;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f6af3e1c2b93..b20974ef1e13 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -43,11 +43,9 @@
/* extra amp-initialization sequence types */
enum {
+ ALC_INIT_UNDEFINED,
ALC_INIT_NONE,
ALC_INIT_DEFAULT,
- ALC_INIT_GPIO1,
- ALC_INIT_GPIO2,
- ALC_INIT_GPIO3,
};
enum {
@@ -85,19 +83,20 @@ struct alc_spec {
struct hda_gen_spec gen; /* must be at head */
/* codec parameterization */
- const struct snd_kcontrol_new *mixers[5]; /* mixer arrays */
- unsigned int num_mixers;
- unsigned int beep_amp; /* beep amp value, set via set_beep_amp() */
-
struct alc_customize_define cdefine;
unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */
+ /* GPIO bits */
+ unsigned int gpio_mask;
+ unsigned int gpio_dir;
+ unsigned int gpio_data;
+ bool gpio_write_delay; /* add a delay before writing gpio_data */
+
/* mute LED for HP laptops, see alc269_fixup_mic_mute_hook() */
int mute_led_polarity;
hda_nid_t mute_led_nid;
hda_nid_t cap_mute_led_nid;
- unsigned int gpio_led; /* used for alc269_fixup_hp_gpio_led() */
unsigned int gpio_mute_led_mask;
unsigned int gpio_mic_led_mask;
@@ -205,41 +204,87 @@ static void alc_process_coef_fw(struct hda_codec *codec,
}
/*
- * Append the given mixer and verb elements for the later use
- * The mixer array is referred in build_controls(), and init_verbs are
- * called in init().
+ * GPIO setup tables, used in initialization
*/
-static void add_mixer(struct alc_spec *spec, const struct snd_kcontrol_new *mix)
+
+/* Enable GPIO mask and set output */
+static void alc_setup_gpio(struct hda_codec *codec, unsigned int mask)
+{
+ struct alc_spec *spec = codec->spec;
+
+ spec->gpio_mask |= mask;
+ spec->gpio_dir |= mask;
+ spec->gpio_data |= mask;
+}
+
+static void alc_write_gpio_data(struct hda_codec *codec)
{
- if (snd_BUG_ON(spec->num_mixers >= ARRAY_SIZE(spec->mixers)))
+ struct alc_spec *spec = codec->spec;
+
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+ spec->gpio_data);
+}
+
+static void alc_update_gpio_data(struct hda_codec *codec, unsigned int mask,
+ bool on)
+{
+ struct alc_spec *spec = codec->spec;
+ unsigned int oldval = spec->gpio_data;
+
+ if (on)
+ spec->gpio_data |= mask;
+ else
+ spec->gpio_data &= ~mask;
+ if (oldval != spec->gpio_data)
+ alc_write_gpio_data(codec);
+}
+
+static void alc_write_gpio(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (!spec->gpio_mask)
return;
- spec->mixers[spec->num_mixers++] = mix;
+
+ snd_hda_codec_write(codec, codec->core.afg, 0,
+ AC_VERB_SET_GPIO_MASK, spec->gpio_mask);
+ snd_hda_codec_write(codec, codec->core.afg, 0,
+ AC_VERB_SET_GPIO_DIRECTION, spec->gpio_dir);
+ if (spec->gpio_write_delay)
+ msleep(1);
+ alc_write_gpio_data(codec);
}
-/*
- * GPIO setup tables, used in initialization
- */
-/* Enable GPIO mask and set output */
-static const struct hda_verb alc_gpio1_init_verbs[] = {
- {0x01, AC_VERB_SET_GPIO_MASK, 0x01},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x01},
- { }
-};
+static void alc_fixup_gpio(struct hda_codec *codec, int action,
+ unsigned int mask)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ alc_setup_gpio(codec, mask);
+}
-static const struct hda_verb alc_gpio2_init_verbs[] = {
- {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x02},
- { }
-};
+static void alc_fixup_gpio1(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ alc_fixup_gpio(codec, action, 0x01);
+}
-static const struct hda_verb alc_gpio3_init_verbs[] = {
- {0x01, AC_VERB_SET_GPIO_MASK, 0x03},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x03},
- { }
-};
+static void alc_fixup_gpio2(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ alc_fixup_gpio(codec, action, 0x02);
+}
+
+static void alc_fixup_gpio3(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ alc_fixup_gpio(codec, action, 0x03);
+}
+
+static void alc_fixup_gpio4(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ alc_fixup_gpio(codec, action, 0x04);
+}
/*
* Fix hardware PLL issue
@@ -447,16 +492,8 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
{
alc_fill_eapd_coef(codec);
alc_auto_setup_eapd(codec, true);
+ alc_write_gpio(codec);
switch (type) {
- case ALC_INIT_GPIO1:
- snd_hda_sequence_write(codec, alc_gpio1_init_verbs);
- break;
- case ALC_INIT_GPIO2:
- snd_hda_sequence_write(codec, alc_gpio2_init_verbs);
- break;
- case ALC_INIT_GPIO3:
- snd_hda_sequence_write(codec, alc_gpio3_init_verbs);
- break;
case ALC_INIT_DEFAULT:
switch (codec->core.vendor_id) {
case 0x10ec0260:
@@ -656,20 +693,22 @@ do_sku:
* 7~6 : Reserved
*/
tmp = (ass & 0x38) >> 3; /* external Amp control */
- switch (tmp) {
- case 1:
- spec->init_amp = ALC_INIT_GPIO1;
- break;
- case 3:
- spec->init_amp = ALC_INIT_GPIO2;
- break;
- case 7:
- spec->init_amp = ALC_INIT_GPIO3;
- break;
- case 5:
- default:
- spec->init_amp = ALC_INIT_DEFAULT;
- break;
+ if (spec->init_amp == ALC_INIT_UNDEFINED) {
+ switch (tmp) {
+ case 1:
+ alc_setup_gpio(codec, 0x01);
+ break;
+ case 3:
+ alc_setup_gpio(codec, 0x02);
+ break;
+ case 7:
+ alc_setup_gpio(codec, 0x03);
+ break;
+ case 5:
+ default:
+ spec->init_amp = ALC_INIT_DEFAULT;
+ break;
+ }
}
/* is laptop or Desktop and enable the function "Mute internal speaker
@@ -722,47 +761,14 @@ static void alc_fixup_inv_dmic(struct hda_codec *codec,
}
-#ifdef CONFIG_SND_HDA_INPUT_BEEP
-/* additional beep mixers; the actual parameters are overwritten at build */
-static const struct snd_kcontrol_new alc_beep_mixer[] = {
- HDA_CODEC_VOLUME("Beep Playback Volume", 0, 0, HDA_INPUT),
- HDA_CODEC_MUTE_BEEP("Beep Playback Switch", 0, 0, HDA_INPUT),
- { } /* end */
-};
-#endif
-
static int alc_build_controls(struct hda_codec *codec)
{
- struct alc_spec *spec = codec->spec;
- int i, err;
+ int err;
err = snd_hda_gen_build_controls(codec);
if (err < 0)
return err;
- for (i = 0; i < spec->num_mixers; i++) {
- err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
- if (err < 0)
- return err;
- }
-
-#ifdef CONFIG_SND_HDA_INPUT_BEEP
- /* create beep controls if needed */
- if (spec->beep_amp) {
- const struct snd_kcontrol_new *knew;
- for (knew = alc_beep_mixer; knew->name; knew++) {
- struct snd_kcontrol *kctl;
- kctl = snd_ctl_new1(knew, codec);
- if (!kctl)
- return -ENOMEM;
- kctl->private_value = spec->beep_amp;
- err = snd_hda_ctl_add(codec, 0, kctl);
- if (err < 0)
- return err;
- }
- }
-#endif
-
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_BUILD);
return 0;
}
@@ -973,8 +979,30 @@ static int alc_codec_rename_from_preset(struct hda_codec *codec)
* Digital-beep handlers
*/
#ifdef CONFIG_SND_HDA_INPUT_BEEP
-#define set_beep_amp(spec, nid, idx, dir) \
- ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir))
+
+/* additional beep mixers; private_value will be overwritten */
+static const struct snd_kcontrol_new alc_beep_mixer[] = {
+ HDA_CODEC_VOLUME("Beep Playback Volume", 0, 0, HDA_INPUT),
+ HDA_CODEC_MUTE_BEEP("Beep Playback Switch", 0, 0, HDA_INPUT),
+};
+
+/* set up and create beep controls */
+static int set_beep_amp(struct alc_spec *spec, hda_nid_t nid,
+ int idx, int dir)
+{
+ struct snd_kcontrol_new *knew;
+ unsigned int beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(alc_beep_mixer); i++) {
+ knew = snd_hda_gen_add_kctl(&spec->gen, NULL,
+ &alc_beep_mixer[i]);
+ if (!knew)
+ return -ENOMEM;
+ knew->private_value = beep_amp;
+ }
+ return 0;
+}
static const struct snd_pci_quirk beep_white_list[] = {
SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
@@ -999,7 +1027,7 @@ static inline int has_cdefine_beep(struct hda_codec *codec)
return spec->cdefine.enable_pcbeep;
}
#else
-#define set_beep_amp(spec, nid, idx, dir) /* NOP */
+#define set_beep_amp(spec, nid, idx, dir) 0
#define has_cdefine_beep(codec) 0
#endif
@@ -1104,12 +1132,12 @@ static void alc880_fixup_vol_knob(struct hda_codec *codec,
static const struct hda_fixup alc880_fixups[] = {
[ALC880_FIXUP_GPIO1] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = alc_gpio1_init_verbs,
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_gpio1,
},
[ALC880_FIXUP_GPIO2] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = alc_gpio2_init_verbs,
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_gpio2,
},
[ALC880_FIXUP_MEDION_RIM] = {
.type = HDA_FIXUP_VERBS,
@@ -1501,8 +1529,11 @@ static int patch_alc880(struct hda_codec *codec)
if (err < 0)
goto error;
- if (!spec->gen.no_analog)
- set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ if (!spec->gen.no_analog) {
+ err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ if (err < 0)
+ goto error;
+ }
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
@@ -1544,8 +1575,8 @@ enum {
static void alc260_gpio1_automute(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
- spec->gen.hp_jack_present);
+
+ alc_update_gpio_data(codec, 0x01, spec->gen.hp_jack_present);
}
static void alc260_fixup_gpio1_toggle(struct hda_codec *codec,
@@ -1562,7 +1593,7 @@ static void alc260_fixup_gpio1_toggle(struct hda_codec *codec,
spec->gen.autocfg.hp_pins[0] = 0x0f; /* copy it for automute */
snd_hda_jack_detect_enable_callback(codec, 0x0f,
snd_hda_gen_hp_automute);
- snd_hda_add_verbs(codec, alc_gpio1_init_verbs);
+ alc_setup_gpio(codec, 0x01);
}
}
@@ -1589,8 +1620,6 @@ static void alc260_fixup_kn1(struct hda_codec *codec,
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
snd_hda_apply_pincfgs(codec, pincfgs);
- break;
- case HDA_FIXUP_ACT_PROBE:
spec->init_amp = ALC_INIT_NONE;
break;
}
@@ -1600,7 +1629,7 @@ static void alc260_fixup_fsc_s7020(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct alc_spec *spec = codec->spec;
- if (action == HDA_FIXUP_ACT_PROBE)
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
spec->init_amp = ALC_INIT_NONE;
}
@@ -1638,8 +1667,8 @@ static const struct hda_fixup alc260_fixups[] = {
},
},
[ALC260_FIXUP_GPIO1] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = alc_gpio1_init_verbs,
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_gpio1,
},
[ALC260_FIXUP_GPIO1_TOGGLE] = {
.type = HDA_FIXUP_FUNC,
@@ -1751,8 +1780,11 @@ static int patch_alc260(struct hda_codec *codec)
if (err < 0)
goto error;
- if (!spec->gen.no_analog)
- set_beep_amp(spec, 0x07, 0x05, HDA_INPUT);
+ if (!spec->gen.no_analog) {
+ err = set_beep_amp(spec, 0x07, 0x05, HDA_INPUT);
+ if (err < 0)
+ goto error;
+ }
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
@@ -1824,47 +1856,14 @@ static void alc889_fixup_coef(struct hda_codec *codec,
alc_update_coef_idx(codec, 7, 0, 0x2030);
}
-/* toggle speaker-output according to the hp-jack state */
-static void alc882_gpio_mute(struct hda_codec *codec, int pin, int muted)
-{
- unsigned int gpiostate, gpiomask, gpiodir;
-
- gpiostate = snd_hda_codec_read(codec, codec->core.afg, 0,
- AC_VERB_GET_GPIO_DATA, 0);
-
- if (!muted)
- gpiostate |= (1 << pin);
- else
- gpiostate &= ~(1 << pin);
-
- gpiomask = snd_hda_codec_read(codec, codec->core.afg, 0,
- AC_VERB_GET_GPIO_MASK, 0);
- gpiomask |= (1 << pin);
-
- gpiodir = snd_hda_codec_read(codec, codec->core.afg, 0,
- AC_VERB_GET_GPIO_DIRECTION, 0);
- gpiodir |= (1 << pin);
-
-
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_GPIO_MASK, gpiomask);
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_GPIO_DIRECTION, gpiodir);
-
- msleep(1);
-
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_GPIO_DATA, gpiostate);
-}
-
/* set up GPIO at initialization */
static void alc885_fixup_macpro_gpio(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- if (action != HDA_FIXUP_ACT_INIT)
- return;
- alc882_gpio_mute(codec, 0, 0);
- alc882_gpio_mute(codec, 1, 0);
+ struct alc_spec *spec = codec->spec;
+
+ spec->gpio_write_delay = true;
+ alc_fixup_gpio3(codec, fix, action);
}
/* Fix the connection of some pins for ALC889:
@@ -2143,20 +2142,20 @@ static const struct hda_fixup alc882_fixups[] = {
}
},
[ALC882_FIXUP_GPIO1] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = alc_gpio1_init_verbs,
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_gpio1,
},
[ALC882_FIXUP_GPIO2] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = alc_gpio2_init_verbs,
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_gpio2,
},
[ALC882_FIXUP_GPIO3] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = alc_gpio3_init_verbs,
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_gpio3,
},
[ALC882_FIXUP_ASUS_W2JC] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = alc_gpio1_init_verbs,
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_gpio1,
.chained = true,
.chain_id = ALC882_FIXUP_EAPD,
},
@@ -2376,12 +2375,37 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
};
static const struct hda_model_fixup alc882_fixup_models[] = {
+ {.id = ALC882_FIXUP_ABIT_AW9D_MAX, .name = "abit-aw9d"},
+ {.id = ALC882_FIXUP_LENOVO_Y530, .name = "lenovo-y530"},
+ {.id = ALC882_FIXUP_ACER_ASPIRE_7736, .name = "acer-aspire-7736"},
+ {.id = ALC882_FIXUP_ASUS_W90V, .name = "asus-w90v"},
+ {.id = ALC889_FIXUP_CD, .name = "cd"},
+ {.id = ALC889_FIXUP_FRONT_HP_NO_PRESENCE, .name = "no-front-hp"},
+ {.id = ALC889_FIXUP_VAIO_TT, .name = "vaio-tt"},
+ {.id = ALC888_FIXUP_EEE1601, .name = "eee1601"},
+ {.id = ALC882_FIXUP_EAPD, .name = "alc882-eapd"},
+ {.id = ALC883_FIXUP_EAPD, .name = "alc883-eapd"},
+ {.id = ALC882_FIXUP_GPIO1, .name = "gpio1"},
+ {.id = ALC882_FIXUP_GPIO2, .name = "gpio2"},
+ {.id = ALC882_FIXUP_GPIO3, .name = "gpio3"},
+ {.id = ALC889_FIXUP_COEF, .name = "alc889-coef"},
+ {.id = ALC882_FIXUP_ASUS_W2JC, .name = "asus-w2jc"},
{.id = ALC882_FIXUP_ACER_ASPIRE_4930G, .name = "acer-aspire-4930g"},
{.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"},
{.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
+ {.id = ALC885_FIXUP_MACPRO_GPIO, .name = "macpro-gpio"},
+ {.id = ALC889_FIXUP_DAC_ROUTE, .name = "dac-route"},
+ {.id = ALC889_FIXUP_MBP_VREF, .name = "mbp-vref"},
+ {.id = ALC889_FIXUP_IMAC91_VREF, .name = "imac91-vref"},
+ {.id = ALC889_FIXUP_MBA11_VREF, .name = "mba11-vref"},
+ {.id = ALC889_FIXUP_MBA21_VREF, .name = "mba21-vref"},
+ {.id = ALC889_FIXUP_MP11_VREF, .name = "mp11-vref"},
+ {.id = ALC889_FIXUP_MP41_VREF, .name = "mp41-vref"},
{.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"},
{.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
+ {.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"},
{.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"},
+ {.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"},
{}
};
@@ -2435,8 +2459,11 @@ static int patch_alc882(struct hda_codec *codec)
if (err < 0)
goto error;
- if (!spec->gen.no_analog && spec->gen.beep_nid)
- set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ if (!spec->gen.no_analog && spec->gen.beep_nid) {
+ err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ if (err < 0)
+ goto error;
+ }
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
@@ -2557,6 +2584,14 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
static const struct hda_model_fixup alc262_fixup_models[] = {
{.id = ALC262_FIXUP_INV_DMIC, .name = "inv-dmic"},
+ {.id = ALC262_FIXUP_FSC_H270, .name = "fsc-h270"},
+ {.id = ALC262_FIXUP_FSC_S7110, .name = "fsc-s7110"},
+ {.id = ALC262_FIXUP_HP_Z200, .name = "hp-z200"},
+ {.id = ALC262_FIXUP_TYAN, .name = "tyan"},
+ {.id = ALC262_FIXUP_LENOVO_3000, .name = "lenovo-3000"},
+ {.id = ALC262_FIXUP_BENQ, .name = "benq"},
+ {.id = ALC262_FIXUP_BENQ_T31, .name = "benq-t31"},
+ {.id = ALC262_FIXUP_INTEL_BAYLEYBAY, .name = "bayleybay"},
{}
};
@@ -2598,8 +2633,11 @@ static int patch_alc262(struct hda_codec *codec)
if (err < 0)
goto error;
- if (!spec->gen.no_analog && spec->gen.beep_nid)
- set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ if (!spec->gen.no_analog && spec->gen.beep_nid) {
+ err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ if (err < 0)
+ goto error;
+ }
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
@@ -2645,7 +2683,6 @@ static const struct snd_kcontrol_new alc268_beep_mixer[] = {
.put = alc268_beep_switch_put,
.private_value = HDA_COMPOSE_AMP_VAL(0x0f, 3, 1, HDA_INPUT)
},
- { }
};
/* set PCBEEP vol = 0, mute connections */
@@ -2686,6 +2723,7 @@ static const struct hda_fixup alc268_fixups[] = {
static const struct hda_model_fixup alc268_fixup_models[] = {
{.id = ALC268_FIXUP_INV_DMIC, .name = "inv-dmic"},
{.id = ALC268_FIXUP_HP_EAPD, .name = "hp-eapd"},
+ {.id = ALC268_FIXUP_SPDIF, .name = "spdif"},
{}
};
@@ -2713,7 +2751,7 @@ static int alc268_parse_auto_config(struct hda_codec *codec)
static int patch_alc268(struct hda_codec *codec)
{
struct alc_spec *spec;
- int err;
+ int i, err;
/* ALC268 has no aa-loopback mixer */
err = alc_alloc_spec(codec, 0);
@@ -2735,7 +2773,13 @@ static int patch_alc268(struct hda_codec *codec)
if (err > 0 && !spec->gen.no_analog &&
spec->gen.autocfg.speaker_pins[0] != 0x1d) {
- add_mixer(spec, alc268_beep_mixer);
+ for (i = 0; i < ARRAY_SIZE(alc268_beep_mixer); i++) {
+ if (!snd_hda_gen_add_kctl(&spec->gen, NULL,
+ &alc268_beep_mixer[i])) {
+ err = -ENOMEM;
+ goto error;
+ }
+ }
snd_hda_add_verbs(codec, alc268_beep_init_verbs);
if (!query_amp_caps(codec, 0x1d, HDA_INPUT))
/* override the amp caps for beep generator */
@@ -3454,9 +3498,8 @@ static int alc269_resume(struct hda_codec *codec)
* suspend, and won't restore the data after resume, so we restore it
* in the driver.
*/
- if (spec->gpio_led)
- snd_hda_codec_write(codec, codec->core.afg, 0, AC_VERB_SET_GPIO_DATA,
- spec->gpio_led);
+ if (spec->gpio_data)
+ alc_write_gpio_data(codec);
if (spec->has_alc5505_dsp)
alc5505_dsp_resume(codec);
@@ -3696,18 +3739,10 @@ static void alc_update_gpio_led(struct hda_codec *codec, unsigned int mask,
bool enabled)
{
struct alc_spec *spec = codec->spec;
- unsigned int oldval = spec->gpio_led;
if (spec->mute_led_polarity)
enabled = !enabled;
-
- if (enabled)
- spec->gpio_led &= ~mask;
- else
- spec->gpio_led |= mask;
- if (spec->gpio_led != oldval)
- snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
- spec->gpio_led);
+ alc_update_gpio_data(codec, mask, !enabled); /* muted -> LED on */
}
/* turn on/off mute LED via GPIO per vmaster hook */
@@ -3720,104 +3755,79 @@ static void alc_fixup_gpio_mute_hook(void *private_data, int enabled)
}
/* turn on/off mic-mute LED via GPIO per capture hook */
-static void alc_fixup_gpio_mic_mute_hook(struct hda_codec *codec,
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static void alc_gpio_micmute_update(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- if (ucontrol)
- alc_update_gpio_led(codec, spec->gpio_mic_led_mask,
- ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1]);
+ alc_update_gpio_led(codec, spec->gpio_mic_led_mask,
+ spec->gen.micmute_led.led_value);
}
-static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
- const struct hda_fixup *fix, int action)
+/* setup mute and mic-mute GPIO bits, add hooks appropriately */
+static void alc_fixup_hp_gpio_led(struct hda_codec *codec,
+ int action,
+ unsigned int mute_mask,
+ unsigned int micmute_mask)
{
struct alc_spec *spec = codec->spec;
- static const struct hda_verb gpio_init[] = {
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
- {}
- };
- if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ alc_fixup_gpio(codec, action, mute_mask | micmute_mask);
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+ if (mute_mask) {
+ spec->gpio_mute_led_mask = mute_mask;
spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
- spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
- spec->gpio_led = 0;
- spec->mute_led_polarity = 0;
- spec->gpio_mute_led_mask = 0x08;
- spec->gpio_mic_led_mask = 0x10;
- snd_hda_add_verbs(codec, gpio_init);
+ }
+ if (micmute_mask) {
+ spec->gpio_mic_led_mask = micmute_mask;
+ snd_hda_gen_add_micmute_led(codec, alc_gpio_micmute_update);
}
}
-static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
+static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- struct alc_spec *spec = codec->spec;
- static const struct hda_verb gpio_init[] = {
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x22 },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x22 },
- {}
- };
+ alc_fixup_hp_gpio_led(codec, action, 0x08, 0x10);
+}
- if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
- spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
- spec->gpio_led = 0;
- spec->mute_led_polarity = 0;
- spec->gpio_mute_led_mask = 0x02;
- spec->gpio_mic_led_mask = 0x20;
- snd_hda_add_verbs(codec, gpio_init);
- }
+static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ alc_fixup_hp_gpio_led(codec, action, 0x02, 0x20);
}
/* turn on/off mic-mute LED per capture hook */
-static void alc269_fixup_hp_cap_mic_mute_hook(struct hda_codec *codec,
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static void alc_cap_micmute_update(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- unsigned int pinval, enable, disable;
+ unsigned int pinval;
+ if (!spec->cap_mute_led_nid)
+ return;
pinval = snd_hda_codec_get_pin_target(codec, spec->cap_mute_led_nid);
pinval &= ~AC_PINCTL_VREFEN;
- enable = pinval | AC_PINCTL_VREF_80;
- disable = pinval | AC_PINCTL_VREF_HIZ;
-
- if (!ucontrol)
- return;
-
- if (ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1])
- pinval = disable;
+ if (spec->gen.micmute_led.led_value)
+ pinval |= AC_PINCTL_VREF_80;
else
- pinval = enable;
-
- if (spec->cap_mute_led_nid)
- snd_hda_set_pin_ctl_cache(codec, spec->cap_mute_led_nid, pinval);
+ pinval |= AC_PINCTL_VREF_HIZ;
+ snd_hda_set_pin_ctl_cache(codec, spec->cap_mute_led_nid, pinval);
}
static void alc269_fixup_hp_gpio_mic1_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct alc_spec *spec = codec->spec;
- static const struct hda_verb gpio_init[] = {
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x08 },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x08 },
- {}
- };
+ alc_fixup_hp_gpio_led(codec, action, 0x08, 0);
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
- spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook;
- spec->gpio_led = 0;
- spec->mute_led_polarity = 0;
- spec->gpio_mute_led_mask = 0x08;
+ /* Like hp_gpio_mic1_led, but also needs GPIO4 low to
+ * enable headphone amp
+ */
+ spec->gpio_mask |= 0x10;
+ spec->gpio_dir |= 0x10;
spec->cap_mute_led_nid = 0x18;
- snd_hda_add_verbs(codec, gpio_init);
+ snd_hda_gen_add_micmute_led(codec, alc_cap_micmute_update);
codec->power_filter = led_power_filter;
}
}
@@ -3825,22 +3835,12 @@ static void alc269_fixup_hp_gpio_mic1_led(struct hda_codec *codec,
static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- /* Like hp_gpio_mic1_led, but also needs GPIO4 low to enable headphone amp */
struct alc_spec *spec = codec->spec;
- static const struct hda_verb gpio_init[] = {
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
- {}
- };
+ alc_fixup_hp_gpio_led(codec, action, 0x08, 0);
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
- spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook;
- spec->gpio_led = 0;
- spec->mute_led_polarity = 0;
- spec->gpio_mute_led_mask = 0x08;
spec->cap_mute_led_nid = 0x18;
- snd_hda_add_verbs(codec, gpio_init);
+ snd_hda_gen_add_micmute_led(codec, alc_cap_micmute_update);
codec->power_filter = led_power_filter;
}
}
@@ -3890,38 +3890,29 @@ static int alc_register_micmute_input_device(struct hda_codec *codec)
return 0;
}
+/* GPIO1 = set according to SKU external amp
+ * GPIO2 = mic mute hotkey
+ * GPIO3 = mute LED
+ * GPIO4 = mic mute LED
+ */
static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- /* GPIO1 = set according to SKU external amp
- GPIO2 = mic mute hotkey
- GPIO3 = mute LED
- GPIO4 = mic mute LED */
- static const struct hda_verb gpio_init[] = {
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x1e },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x1a },
- { 0x01, AC_VERB_SET_GPIO_DATA, 0x02 },
- {}
- };
-
struct alc_spec *spec = codec->spec;
+ alc_fixup_hp_gpio_led(codec, action, 0x08, 0x10);
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->init_amp = ALC_INIT_DEFAULT;
if (alc_register_micmute_input_device(codec) != 0)
return;
- snd_hda_add_verbs(codec, gpio_init);
+ spec->gpio_mask |= 0x06;
+ spec->gpio_dir |= 0x02;
+ spec->gpio_data |= 0x02;
snd_hda_codec_write_cache(codec, codec->core.afg, 0,
AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x04);
snd_hda_jack_detect_enable_callback(codec, codec->core.afg,
gpio2_mic_hotkey_event);
-
- spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
- spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
- spec->gpio_led = 0;
- spec->mute_led_polarity = 0;
- spec->gpio_mute_led_mask = 0x08;
- spec->gpio_mic_led_mask = 0x10;
return;
}
@@ -3929,40 +3920,28 @@ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
return;
switch (action) {
- case HDA_FIXUP_ACT_PROBE:
- spec->init_amp = ALC_INIT_DEFAULT;
- break;
case HDA_FIXUP_ACT_FREE:
input_unregister_device(spec->kb_dev);
spec->kb_dev = NULL;
}
}
+/* Line2 = mic mute hotkey
+ * GPIO2 = mic mute LED
+ */
static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- /* Line2 = mic mute hotkey
- GPIO2 = mic mute LED */
- static const struct hda_verb gpio_init[] = {
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x04 },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04 },
- {}
- };
-
struct alc_spec *spec = codec->spec;
+ alc_fixup_hp_gpio_led(codec, action, 0, 0x04);
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->init_amp = ALC_INIT_DEFAULT;
if (alc_register_micmute_input_device(codec) != 0)
return;
- snd_hda_add_verbs(codec, gpio_init);
snd_hda_jack_detect_enable_callback(codec, 0x1b,
gpio2_mic_hotkey_event);
-
- spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
- spec->gpio_led = 0;
- spec->mute_led_polarity = 0;
- spec->gpio_mic_led_mask = 0x04;
return;
}
@@ -3970,9 +3949,6 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
return;
switch (action) {
- case HDA_FIXUP_ACT_PROBE:
- spec->init_amp = ALC_INIT_DEFAULT;
- break;
case HDA_FIXUP_ACT_FREE:
input_unregister_device(spec->kb_dev);
spec->kb_dev = NULL;
@@ -3988,14 +3964,10 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
{
struct alc_spec *spec = codec->spec;
+ alc269_fixup_hp_mute_led_micx(codec, fix, action, 0x1a);
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.vmaster_mute.hook = alc269_fixup_mic_mute_hook;
- spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook;
- spec->mute_led_polarity = 0;
- spec->mute_led_nid = 0x1a;
spec->cap_mute_led_nid = 0x18;
- spec->gen.vmaster_mute_enum = 1;
- codec->power_filter = led_power_filter;
+ snd_hda_gen_add_micmute_led(codec, alc_cap_micmute_update);
}
}
@@ -4843,6 +4815,7 @@ static void alc_probe_headset_mode(struct hda_codec *codec)
spec->headphone_mic_pin = cfg->inputs[i].pin;
}
+ WARN_ON(spec->gen.cap_sync_hook);
spec->gen.cap_sync_hook = alc_update_headset_mode_hook;
spec->gen.automute_hook = alc_update_headset_mode;
spec->gen.hp_automute_hook = alc_update_headset_jack_cb;
@@ -4934,13 +4907,10 @@ static void alc288_update_headset_jack_cb(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
struct alc_spec *spec = codec->spec;
- int present;
alc_update_headset_jack_cb(codec, jack);
/* Headset Mic enable or disable, only for Dell Dino */
- present = spec->gen.hp_jack_present ? 0x40 : 0;
- snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
- present);
+ alc_update_gpio_data(codec, 0x40, spec->gen.hp_jack_present);
}
static void alc_fixup_headset_mode_dell_alc288(struct hda_codec *codec,
@@ -4949,6 +4919,9 @@ static void alc_fixup_headset_mode_dell_alc288(struct hda_codec *codec,
alc_fixup_headset_mode(codec, fix, action);
if (action == HDA_FIXUP_ACT_PROBE) {
struct alc_spec *spec = codec->spec;
+ /* toggled via hp_automute_hook */
+ spec->gpio_mask |= 0x40;
+ spec->gpio_dir |= 0x40;
spec->gen.hp_automute_hook = alc288_update_headset_jack_cb;
}
}
@@ -4969,7 +4942,7 @@ static void alc_no_shutup(struct hda_codec *codec)
static void alc_fixup_no_shutup(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- if (action == HDA_FIXUP_ACT_PROBE) {
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
struct alc_spec *spec = codec->spec;
spec->shutup = alc_no_shutup;
}
@@ -5051,10 +5024,9 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec,
* it causes a click noise at start up
*/
snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+ spec->shutup = alc_shutup_dell_xps13;
break;
case HDA_FIXUP_ACT_PROBE:
- spec->shutup = alc_shutup_dell_xps13;
-
/* Make the internal mic the default input source. */
for (i = 0; i < imux->num_items; i++) {
if (spec->gen.imux_pins[i] == 0x12) {
@@ -5231,13 +5203,6 @@ static void alc282_fixup_asus_tx300(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct alc_spec *spec = codec->spec;
- /* TX300 needs to set up GPIO2 for the speaker amp */
- static const struct hda_verb gpio2_verbs[] = {
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x04 },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04 },
- { 0x01, AC_VERB_SET_GPIO_DATA, 0x04 },
- {}
- };
static const struct hda_pintbl dock_pins[] = {
{ 0x1b, 0x21114000 }, /* dock speaker pin */
{}
@@ -5245,13 +5210,18 @@ static void alc282_fixup_asus_tx300(struct hda_codec *codec,
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
- snd_hda_add_verbs(codec, gpio2_verbs);
+ spec->init_amp = ALC_INIT_DEFAULT;
+ /* TX300 needs to set up GPIO2 for the speaker amp */
+ alc_setup_gpio(codec, 0x04);
snd_hda_apply_pincfgs(codec, dock_pins);
spec->gen.auto_mute_via_amp = 1;
spec->gen.automute_hook = asus_tx300_automute;
snd_hda_jack_detect_enable_callback(codec, 0x1b,
snd_hda_gen_hp_automute);
break;
+ case HDA_FIXUP_ACT_PROBE:
+ spec->init_amp = ALC_INIT_DEFAULT;
+ break;
case HDA_FIXUP_ACT_BUILD:
/* this is a bit tricky; give more sane names for the main
* (tablet) speaker and the dock speaker, respectively
@@ -5325,30 +5295,26 @@ static void alc280_fixup_hp_9480m(struct hda_codec *codec,
int action)
{
struct alc_spec *spec = codec->spec;
- static const struct hda_verb gpio_init[] = {
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
- {}
- };
+ alc_fixup_hp_gpio_led(codec, action, 0x08, 0);
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- /* Set the hooks to turn the headphone amp on/off
- * as needed
- */
- spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
+ /* amp at GPIO4; toggled via alc280_hp_gpio4_automute_hook() */
+ spec->gpio_mask |= 0x10;
+ spec->gpio_dir |= 0x10;
spec->gen.hp_automute_hook = alc280_hp_gpio4_automute_hook;
+ }
+}
- /* The GPIOs are currently off */
- spec->gpio_led = 0;
-
- /* GPIO3 is connected to the output mute LED,
- * high is on, low is off
- */
- spec->mute_led_polarity = 0;
- spec->gpio_mute_led_mask = 0x08;
+static void alc275_fixup_gpio4_off(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
- /* Initialize GPIO configuration */
- snd_hda_add_verbs(codec, gpio_init);
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gpio_mask |= 0x04;
+ spec->gpio_dir |= 0x04;
+ /* set data bit low */
}
}
@@ -5492,7 +5458,6 @@ enum {
ALC280_FIXUP_HP_9480M,
ALC288_FIXUP_DELL_HEADSET_MODE,
ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
- ALC288_FIXUP_DELL_XPS_13_GPIO6,
ALC288_FIXUP_DELL_XPS_13,
ALC288_FIXUP_DISABLE_AAMIX,
ALC292_FIXUP_DELL_E7X,
@@ -5540,13 +5505,8 @@ static const struct hda_fixup alc269_fixups[] = {
}
},
[ALC275_FIXUP_SONY_VAIO_GPIO2] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = (const struct hda_verb[]) {
- {0x01, AC_VERB_SET_GPIO_MASK, 0x04},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x00},
- { }
- },
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc275_fixup_gpio4_off,
.chained = true,
.chain_id = ALC269_FIXUP_SONY_VAIO
},
@@ -6113,22 +6073,11 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC288_FIXUP_DELL_HEADSET_MODE
},
- [ALC288_FIXUP_DELL_XPS_13_GPIO6] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = (const struct hda_verb[]) {
- {0x01, AC_VERB_SET_GPIO_MASK, 0x40},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x40},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x00},
- { }
- },
- .chained = true,
- .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE
- },
[ALC288_FIXUP_DISABLE_AAMIX] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_disable_aamix,
.chained = true,
- .chain_id = ALC288_FIXUP_DELL_XPS_13_GPIO6
+ .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE
},
[ALC288_FIXUP_DELL_XPS_13] = {
.type = HDA_FIXUP_FUNC,
@@ -6291,14 +6240,9 @@ static const struct hda_fixup alc269_fixups[] = {
.chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
},
[ALC256_FIXUP_ASUS_AIO_GPIO2] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = (const struct hda_verb[]) {
- /* Set up GPIO2 for the speaker amp */
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x04 },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04 },
- { 0x01, AC_VERB_SET_GPIO_DATA, 0x04 },
- {}
- },
+ .type = HDA_FIXUP_FUNC,
+ /* Set up GPIO2 for the speaker amp */
+ .v.func = alc_fixup_gpio4,
},
[ALC233_FIXUP_ASUS_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
@@ -6530,6 +6474,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6713,13 +6658,95 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
+ {.id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, .name = "dell-headset3"},
+ {.id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, .name = "dell-headset4"},
{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
{.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
+ {.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"},
{.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
{.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"},
+ {.id = ALC269_FIXUP_SONY_VAIO, .name = "vaio"},
+ {.id = ALC269_FIXUP_DELL_M101Z, .name = "dell-m101z"},
+ {.id = ALC269_FIXUP_ASUS_G73JW, .name = "asus-g73jw"},
+ {.id = ALC269_FIXUP_LENOVO_EAPD, .name = "lenovo-eapd"},
+ {.id = ALC275_FIXUP_SONY_HWEQ, .name = "sony-hweq"},
+ {.id = ALC269_FIXUP_PCM_44K, .name = "pcm44k"},
+ {.id = ALC269_FIXUP_LIFEBOOK, .name = "lifebook"},
+ {.id = ALC269_FIXUP_LIFEBOOK_EXTMIC, .name = "lifebook-extmic"},
+ {.id = ALC269_FIXUP_LIFEBOOK_HP_PIN, .name = "lifebook-hp-pin"},
+ {.id = ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC, .name = "lifebook-u7x7"},
+ {.id = ALC269VB_FIXUP_AMIC, .name = "alc269vb-amic"},
+ {.id = ALC269VB_FIXUP_DMIC, .name = "alc269vb-dmic"},
+ {.id = ALC269_FIXUP_HP_MUTE_LED_MIC1, .name = "hp-mute-led-mic1"},
+ {.id = ALC269_FIXUP_HP_MUTE_LED_MIC2, .name = "hp-mute-led-mic2"},
+ {.id = ALC269_FIXUP_HP_MUTE_LED_MIC3, .name = "hp-mute-led-mic3"},
+ {.id = ALC269_FIXUP_HP_GPIO_MIC1_LED, .name = "hp-gpio-mic1"},
+ {.id = ALC269_FIXUP_HP_LINE1_MIC1_LED, .name = "hp-line1-mic1"},
+ {.id = ALC269_FIXUP_NO_SHUTUP, .name = "noshutup"},
+ {.id = ALC286_FIXUP_SONY_MIC_NO_PRESENCE, .name = "sony-nomic"},
+ {.id = ALC269_FIXUP_ASPIRE_HEADSET_MIC, .name = "aspire-headset-mic"},
+ {.id = ALC269_FIXUP_ASUS_X101, .name = "asus-x101"},
+ {.id = ALC271_FIXUP_HP_GATE_MIC_JACK, .name = "acer-ao7xx"},
+ {.id = ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572, .name = "acer-aspire-e1"},
+ {.id = ALC269_FIXUP_ACER_AC700, .name = "acer-ac700"},
+ {.id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST, .name = "limit-mic-boost"},
+ {.id = ALC269VB_FIXUP_ASUS_ZENBOOK, .name = "asus-zenbook"},
+ {.id = ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A, .name = "asus-zenbook-ux31a"},
+ {.id = ALC269VB_FIXUP_ORDISSIMO_EVE2, .name = "ordissimo"},
+ {.id = ALC282_FIXUP_ASUS_TX300, .name = "asus-tx300"},
+ {.id = ALC283_FIXUP_INT_MIC, .name = "alc283-int-mic"},
+ {.id = ALC290_FIXUP_MONO_SPEAKERS_HSJACK, .name = "mono-speakers"},
+ {.id = ALC290_FIXUP_SUBWOOFER_HSJACK, .name = "alc290-subwoofer"},
+ {.id = ALC269_FIXUP_THINKPAD_ACPI, .name = "thinkpad"},
+ {.id = ALC269_FIXUP_DMIC_THINKPAD_ACPI, .name = "dmic-thinkpad"},
+ {.id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE, .name = "alc255-acer"},
+ {.id = ALC255_FIXUP_ASUS_MIC_NO_PRESENCE, .name = "alc255-asus"},
+ {.id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
+ {.id = ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "alc255-dell2"},
+ {.id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc293-dell1"},
+ {.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"},
+ {.id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, .name = "alc255-dell-mute"},
+ {.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"},
+ {.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"},
+ {.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+ {.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"},
+ {.id = ALC280_FIXUP_HP_DOCK_PINS, .name = "hp-dock-pins"},
+ {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic"},
+ {.id = ALC280_FIXUP_HP_9480M, .name = "hp-9480m"},
+ {.id = ALC288_FIXUP_DELL_HEADSET_MODE, .name = "alc288-dell-headset"},
+ {.id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc288-dell1"},
+ {.id = ALC288_FIXUP_DELL_XPS_13, .name = "alc288-dell-xps13"},
+ {.id = ALC292_FIXUP_DELL_E7X, .name = "dell-e7x"},
+ {.id = ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, .name = "alc293-dell"},
+ {.id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc298-dell1"},
+ {.id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, .name = "alc298-dell-aio"},
+ {.id = ALC275_FIXUP_DELL_XPS, .name = "alc275-dell-xps"},
+ {.id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, .name = "alc256-dell-xps13"},
+ {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
+ {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
+ {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
+ {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
+ {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
+ {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
+ {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
+ {.id = ALC298_FIXUP_SPK_VOLUME, .name = "alc298-spk-volume"},
+ {.id = ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER, .name = "dell-inspiron-7559"},
+ {.id = ALC269_FIXUP_ATIV_BOOK_8, .name = "ativ-book"},
+ {.id = ALC221_FIXUP_HP_MIC_NO_PRESENCE, .name = "alc221-hp-mic"},
+ {.id = ALC256_FIXUP_ASUS_HEADSET_MODE, .name = "alc256-asus-headset"},
+ {.id = ALC256_FIXUP_ASUS_MIC, .name = "alc256-asus-mic"},
+ {.id = ALC256_FIXUP_ASUS_AIO_GPIO2, .name = "alc256-asus-aio"},
+ {.id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE, .name = "alc233-asus"},
+ {.id = ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, .name = "alc233-eapd"},
+ {.id = ALC294_FIXUP_LENOVO_MIC_LOCATION, .name = "alc294-lenovo-mic"},
+ {.id = ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, .name = "alc225-wyse"},
+ {.id = ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, .name = "alc274-dell-aio"},
+ {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
+ {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
+ {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
{}
};
#define ALC225_STANDARD_PINS \
@@ -6983,7 +7010,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60130},
{0x19, 0x03a11020},
{0x21, 0x0321101f}),
- SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL_XPS_13_GPIO6,
+ SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60120},
{0x14, 0x90170110},
{0x21, 0x0321101f}),
@@ -7140,18 +7167,6 @@ static int patch_alc269(struct hda_codec *codec)
spec->shutup = alc_default_shutup;
spec->init_hook = alc_default_init;
- snd_hda_pick_fixup(codec, alc269_fixup_models,
- alc269_fixup_tbl, alc269_fixups);
- snd_hda_pick_pin_fixup(codec, alc269_pin_fixup_tbl, alc269_fixups);
- snd_hda_pick_fixup(codec, NULL, alc269_fixup_vendor_tbl,
- alc269_fixups);
- snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
-
- alc_auto_parse_customize_define(codec);
-
- if (has_cdefine_beep(codec))
- spec->gen.beep_nid = 0x01;
-
switch (codec->core.vendor_id) {
case 0x10ec0269:
spec->codec_variant = ALC269_TYPE_ALC269VA;
@@ -7271,13 +7286,28 @@ static int patch_alc269(struct hda_codec *codec)
spec->init_hook = alc5505_dsp_init;
}
+ snd_hda_pick_fixup(codec, alc269_fixup_models,
+ alc269_fixup_tbl, alc269_fixups);
+ snd_hda_pick_pin_fixup(codec, alc269_pin_fixup_tbl, alc269_fixups);
+ snd_hda_pick_fixup(codec, NULL, alc269_fixup_vendor_tbl,
+ alc269_fixups);
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
+ alc_auto_parse_customize_define(codec);
+
+ if (has_cdefine_beep(codec))
+ spec->gen.beep_nid = 0x01;
+
/* automatic parse from the BIOS config */
err = alc269_parse_auto_config(codec);
if (err < 0)
goto error;
- if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid)
- set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);
+ if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid) {
+ err = set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);
+ if (err < 0)
+ goto error;
+ }
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
@@ -7406,8 +7436,11 @@ static int patch_alc861(struct hda_codec *codec)
if (err < 0)
goto error;
- if (!spec->gen.no_analog)
- set_beep_amp(spec, 0x23, 0, HDA_OUTPUT);
+ if (!spec->gen.no_analog) {
+ err = set_beep_amp(spec, 0x23, 0, HDA_OUTPUT);
+ if (err < 0)
+ goto error;
+ }
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
@@ -7447,16 +7480,21 @@ static void alc861vd_fixup_dallas(struct hda_codec *codec,
}
}
+/* reset GPIO1 */
+static void alc660vd_fixup_asus_gpio1(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ spec->gpio_mask |= 0x02;
+ alc_fixup_gpio(codec, action, 0x01);
+}
+
static const struct hda_fixup alc861vd_fixups[] = {
[ALC660VD_FIX_ASUS_GPIO1] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = (const struct hda_verb[]) {
- /* reset GPIO1 */
- {0x01, AC_VERB_SET_GPIO_MASK, 0x03},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x01},
- { }
- }
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc660vd_fixup_asus_gpio1,
},
[ALC861VD_FIX_DALLAS] = {
.type = HDA_FIXUP_FUNC,
@@ -7495,8 +7533,11 @@ static int patch_alc861vd(struct hda_codec *codec)
if (err < 0)
goto error;
- if (!spec->gen.no_analog)
- set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ if (!spec->gen.no_analog) {
+ err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ if (err < 0)
+ goto error;
+ }
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
@@ -7577,7 +7618,7 @@ static unsigned int gpio_led_power_filter(struct hda_codec *codec,
unsigned int power_state)
{
struct alc_spec *spec = codec->spec;
- if (nid == codec->core.afg && power_state == AC_PWRST_D3 && spec->gpio_led)
+ if (nid == codec->core.afg && power_state == AC_PWRST_D3 && spec->gpio_data)
return AC_PWRST_D0;
return power_state;
}
@@ -7586,18 +7627,10 @@ static void alc662_fixup_led_gpio1(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct alc_spec *spec = codec->spec;
- static const struct hda_verb gpio_init[] = {
- { 0x01, AC_VERB_SET_GPIO_MASK, 0x01 },
- { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01 },
- {}
- };
+ alc_fixup_hp_gpio_led(codec, action, 0x01, 0);
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
- spec->gpio_led = 0;
spec->mute_led_polarity = 1;
- spec->gpio_mute_led_mask = 0x01;
- snd_hda_add_verbs(codec, gpio_init);
codec->power_filter = gpio_led_power_filter;
}
}
@@ -8110,7 +8143,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
};
static const struct hda_model_fixup alc662_fixup_models[] = {
+ {.id = ALC662_FIXUP_ASPIRE, .name = "aspire"},
+ {.id = ALC662_FIXUP_IDEAPAD, .name = "ideapad"},
{.id = ALC272_FIXUP_MARIO, .name = "mario"},
+ {.id = ALC662_FIXUP_HP_RP5800, .name = "hp-rp5800"},
{.id = ALC662_FIXUP_ASUS_MODE1, .name = "asus-mode1"},
{.id = ALC662_FIXUP_ASUS_MODE2, .name = "asus-mode2"},
{.id = ALC662_FIXUP_ASUS_MODE3, .name = "asus-mode3"},
@@ -8119,8 +8155,23 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
{.id = ALC662_FIXUP_ASUS_MODE6, .name = "asus-mode6"},
{.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"},
{.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"},
+ {.id = ALC662_FIXUP_ZOTAC_Z68, .name = "zotac-z68"},
{.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"},
+ {.id = ALC662_FIXUP_DELL_MIC_NO_PRESENCE, .name = "alc662-headset-multi"},
{.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
+ {.id = ALC662_FIXUP_HEADSET_MODE, .name = "alc662-headset"},
+ {.id = ALC668_FIXUP_HEADSET_MODE, .name = "alc668-headset"},
+ {.id = ALC662_FIXUP_BASS_16, .name = "bass16"},
+ {.id = ALC662_FIXUP_BASS_1A, .name = "bass1a"},
+ {.id = ALC668_FIXUP_AUTO_MUTE, .name = "automute"},
+ {.id = ALC668_FIXUP_DELL_XPS13, .name = "dell-xps13"},
+ {.id = ALC662_FIXUP_ASUS_Nx50, .name = "asus-nx50"},
+ {.id = ALC668_FIXUP_ASUS_Nx51, .name = "asus-nx51"},
+ {.id = ALC891_FIXUP_HEADSET_MODE, .name = "alc891-headset"},
+ {.id = ALC891_FIXUP_DELL_MIC_NO_PRESENCE, .name = "alc891-headset-multi"},
+ {.id = ALC662_FIXUP_ACER_VERITON, .name = "acer-veriton"},
+ {.id = ALC892_FIXUP_ASROCK_MOBO, .name = "asrock-mobo"},
+ {.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"},
{.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
{}
};
@@ -8214,18 +8265,20 @@ static int patch_alc662(struct hda_codec *codec)
if (!spec->gen.no_analog && spec->gen.beep_nid) {
switch (codec->core.vendor_id) {
case 0x10ec0662:
- set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
+ err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
break;
case 0x10ec0272:
case 0x10ec0663:
case 0x10ec0665:
case 0x10ec0668:
- set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ err = set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
break;
case 0x10ec0273:
- set_beep_amp(spec, 0x0b, 0x03, HDA_INPUT);
+ err = set_beep_amp(spec, 0x0b, 0x03, HDA_INPUT);
break;
}
+ if (err < 0)
+ goto error;
}
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 63d15b545b33..046705b4691a 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -332,33 +332,15 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
}
/* hook for controlling mic-mute LED GPIO */
-static void stac_capture_led_hook(struct hda_codec *codec,
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static void stac_capture_led_update(struct hda_codec *codec)
{
struct sigmatel_spec *spec = codec->spec;
- unsigned int mask;
- bool cur_mute, prev_mute;
- if (!kcontrol || !ucontrol)
- return;
-
- mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
- prev_mute = !spec->mic_enabled;
- if (ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1])
- spec->mic_enabled |= mask;
+ if (spec->gen.micmute_led.led_value)
+ spec->gpio_data |= spec->mic_mute_led_gpio;
else
- spec->mic_enabled &= ~mask;
- cur_mute = !spec->mic_enabled;
- if (cur_mute != prev_mute) {
- if (cur_mute)
- spec->gpio_data |= spec->mic_mute_led_gpio;
- else
- spec->gpio_data &= ~spec->mic_mute_led_gpio;
- stac_gpio_set(codec, spec->gpio_mask,
- spec->gpio_dir, spec->gpio_data);
- }
+ spec->gpio_data &= ~spec->mic_mute_led_gpio;
+ stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data);
}
static int stac_vrefout_set(struct hda_codec *codec,
@@ -4656,8 +4638,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
spec->gpio_dir |= spec->mic_mute_led_gpio;
spec->mic_enabled = 0;
spec->gpio_data |= spec->mic_mute_led_gpio;
-
- spec->gen.cap_sync_hook = stac_capture_led_hook;
+ snd_hda_gen_add_micmute_led(codec, stac_capture_led_update);
}
}
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index fc30d1e8aa76..6b9617aee0e6 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -90,13 +90,6 @@ enum VIA_HDA_CODEC {
struct via_spec {
struct hda_gen_spec gen;
- /* codec parameterization */
- const struct snd_kcontrol_new *mixers[6];
- unsigned int num_mixers;
-
- const struct hda_verb *init_verbs[5];
- unsigned int num_iverbs;
-
/* HP mode source */
unsigned int dmic_enabled;
enum VIA_HDA_CODEC codec_type;
@@ -107,8 +100,6 @@ struct via_spec {
/* work to check hp jack state */
int hp_work_active;
int vt1708_jack_detect;
-
- unsigned int beep_amp;
};
static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec);
@@ -262,69 +253,51 @@ static int via_pin_power_ctl_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static const struct snd_kcontrol_new via_pin_power_ctl_enum[] = {
- {
+static const struct snd_kcontrol_new via_pin_power_ctl_enum = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Dynamic Power-Control",
.info = via_pin_power_ctl_info,
.get = via_pin_power_ctl_get,
.put = via_pin_power_ctl_put,
- },
- {} /* terminator */
};
#ifdef CONFIG_SND_HDA_INPUT_BEEP
-static inline void set_beep_amp(struct via_spec *spec, hda_nid_t nid,
- int idx, int dir)
-{
- spec->gen.beep_nid = nid;
- spec->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 1, idx, dir);
-}
-
/* additional beep mixers; the actual parameters are overwritten at build */
-static const struct snd_kcontrol_new cxt_beep_mixer[] = {
+static const struct snd_kcontrol_new via_beep_mixer[] = {
HDA_CODEC_VOLUME_MONO("Beep Playback Volume", 0, 1, 0, HDA_OUTPUT),
HDA_CODEC_MUTE_BEEP_MONO("Beep Playback Switch", 0, 1, 0, HDA_OUTPUT),
- { } /* end */
};
-/* create beep controls if needed */
-static int add_beep_ctls(struct hda_codec *codec)
+static int set_beep_amp(struct via_spec *spec, hda_nid_t nid,
+ int idx, int dir)
{
- struct via_spec *spec = codec->spec;
- int err;
+ struct snd_kcontrol_new *knew;
+ unsigned int beep_amp = HDA_COMPOSE_AMP_VAL(nid, 1, idx, dir);
+ int i;
- if (spec->beep_amp) {
- const struct snd_kcontrol_new *knew;
- for (knew = cxt_beep_mixer; knew->name; knew++) {
- struct snd_kcontrol *kctl;
- kctl = snd_ctl_new1(knew, codec);
- if (!kctl)
- return -ENOMEM;
- kctl->private_value = spec->beep_amp;
- err = snd_hda_ctl_add(codec, 0, kctl);
- if (err < 0)
- return err;
- }
+ spec->gen.beep_nid = nid;
+ for (i = 0; i < ARRAY_SIZE(via_beep_mixer); i++) {
+ knew = snd_hda_gen_add_kctl(&spec->gen, NULL,
+ &via_beep_mixer[i]);
+ if (!knew)
+ return -ENOMEM;
+ knew->private_value = beep_amp;
}
return 0;
}
-static void auto_parse_beep(struct hda_codec *codec)
+static int auto_parse_beep(struct hda_codec *codec)
{
struct via_spec *spec = codec->spec;
hda_nid_t nid;
for_each_hda_codec_node(nid, codec)
- if (get_wcaps_type(get_wcaps(codec, nid)) == AC_WID_BEEP) {
- set_beep_amp(spec, nid, 0, HDA_OUTPUT);
- break;
- }
+ if (get_wcaps_type(get_wcaps(codec, nid)) == AC_WID_BEEP)
+ return set_beep_amp(spec, nid, 0, HDA_OUTPUT);
+ return 0;
}
#else
-#define set_beep_amp(spec, nid, idx, dir) /* NOP */
-#define add_beep_ctls(codec) 0
-#define auto_parse_beep(codec)
+#define auto_parse_beep(codec) 0
#endif
/* check AA path's mute status */
@@ -403,30 +376,6 @@ static void analog_low_current_mode(struct hda_codec *codec)
return __analog_low_current_mode(codec, false);
}
-static int via_build_controls(struct hda_codec *codec)
-{
- struct via_spec *spec = codec->spec;
- int err, i;
-
- err = snd_hda_gen_build_controls(codec);
- if (err < 0)
- return err;
-
- err = add_beep_ctls(codec);
- if (err < 0)
- return err;
-
- spec->mixers[spec->num_mixers++] = via_pin_power_ctl_enum;
-
- for (i = 0; i < spec->num_mixers; i++) {
- err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
static void via_playback_pcm_hook(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream,
@@ -481,7 +430,7 @@ static int via_check_power_status(struct hda_codec *codec, hda_nid_t nid)
static int via_init(struct hda_codec *codec);
static const struct hda_codec_ops via_patch_ops = {
- .build_controls = via_build_controls,
+ .build_controls = snd_hda_gen_build_controls,
.build_pcms = snd_hda_gen_build_pcms,
.init = via_init,
.free = via_free,
@@ -545,16 +494,13 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static const struct snd_kcontrol_new vt1708_jack_detect_ctl[] = {
- {
+static const struct snd_kcontrol_new vt1708_jack_detect_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Jack Detect",
.count = 1,
.info = snd_ctl_boolean_mono_info,
.get = vt1708_jack_detect_get,
.put = vt1708_jack_detect_put,
- },
- {} /* terminator */
};
static const struct badness_table via_main_out_badness = {
@@ -586,12 +532,17 @@ static int via_parse_auto_config(struct hda_codec *codec)
if (err < 0)
return err;
- auto_parse_beep(codec);
-
err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
if (err < 0)
return err;
+ err = auto_parse_beep(codec);
+ if (err < 0)
+ return err;
+
+ if (!snd_hda_gen_add_kctl(&spec->gen, NULL, &via_pin_power_ctl_enum))
+ return -ENOMEM;
+
/* disable widget PM at start for compatibility */
codec->power_save_node = 0;
spec->gen.power_down_unused = 0;
@@ -600,12 +551,6 @@ static int via_parse_auto_config(struct hda_codec *codec)
static int via_init(struct hda_codec *codec)
{
- struct via_spec *spec = codec->spec;
- int i;
-
- for (i = 0; i < spec->num_iverbs; i++)
- snd_hda_sequence_write(codec, spec->init_verbs[i]);
-
/* init power states */
__analog_low_current_mode(codec, true);
@@ -623,7 +568,7 @@ static int vt1708_build_controls(struct hda_codec *codec)
int err;
int old_interval = codec->jackpoll_interval;
codec->jackpoll_interval = msecs_to_jiffies(100);
- err = via_build_controls(codec);
+ err = snd_hda_gen_build_controls(codec);
codec->jackpoll_interval = old_interval;
return err;
}
@@ -684,22 +629,29 @@ static int patch_vt1708(struct hda_codec *codec)
vt1708_set_pinconfig_connect(codec, VT1708_HP_PIN_NID);
vt1708_set_pinconfig_connect(codec, VT1708_CD_PIN_NID);
+ err = snd_hda_add_verbs(codec, vt1708_init_verbs);
+ if (err < 0)
+ goto error;
+
/* automatic parse from the BIOS config */
err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
+ if (err < 0)
+ goto error;
/* add jack detect on/off control */
- spec->mixers[spec->num_mixers++] = vt1708_jack_detect_ctl;
-
- spec->init_verbs[spec->num_iverbs++] = vt1708_init_verbs;
+ if (!snd_hda_gen_add_kctl(&spec->gen, NULL, &vt1708_jack_detect_ctl)) {
+ err = -ENOMEM;
+ goto error;
+ }
/* clear jackpoll_interval again; it's set dynamically */
codec->jackpoll_interval = 0;
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
static int patch_vt1709(struct hda_codec *codec)
@@ -715,12 +667,14 @@ static int patch_vt1709(struct hda_codec *codec)
spec->gen.mixer_nid = 0x18;
err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
+ if (err < 0)
+ goto error;
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
static int patch_vt1708S(struct hda_codec *codec);
@@ -741,12 +695,14 @@ static int patch_vt1708B(struct hda_codec *codec)
/* automatic parse from the BIOS config */
err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
+ if (err < 0)
+ goto error;
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
/* Patch for VT1708S */
@@ -791,16 +747,20 @@ static int patch_vt1708S(struct hda_codec *codec)
if (codec->core.vendor_id == 0x11064397)
snd_hda_codec_set_name(codec, "VT1705");
+ err = snd_hda_add_verbs(codec, vt1708S_init_verbs);
+ if (err < 0)
+ goto error;
+
/* automatic parse from the BIOS config */
err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
-
- spec->init_verbs[spec->num_iverbs++] = vt1708S_init_verbs;
+ if (err < 0)
+ goto error;
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
/* Patch for VT1702 */
@@ -832,16 +792,20 @@ static int patch_vt1702(struct hda_codec *codec)
(0x5 << AC_AMPCAP_STEP_SIZE_SHIFT) |
(1 << AC_AMPCAP_MUTE_SHIFT));
+ err = snd_hda_add_verbs(codec, vt1702_init_verbs);
+ if (err < 0)
+ goto error;
+
/* automatic parse from the BIOS config */
err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
-
- spec->init_verbs[spec->num_iverbs++] = vt1702_init_verbs;
+ if (err < 0)
+ goto error;
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
/* Patch for VT1718S */
@@ -904,16 +868,20 @@ static int patch_vt1718S(struct hda_codec *codec)
override_mic_boost(codec, 0x29, 0, 3, 40);
add_secret_dac_path(codec);
+ err = snd_hda_add_verbs(codec, vt1718S_init_verbs);
+ if (err < 0)
+ goto error;
+
/* automatic parse from the BIOS config */
err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
-
- spec->init_verbs[spec->num_iverbs++] = vt1718S_init_verbs;
+ if (err < 0)
+ goto error;
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
/* Patch for VT1716S */
@@ -955,9 +923,9 @@ static int vt1716s_dmic_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static const struct snd_kcontrol_new vt1716s_dmic_mixer[] = {
- HDA_CODEC_VOLUME("Digital Mic Capture Volume", 0x22, 0x0, HDA_INPUT),
- {
+static const struct snd_kcontrol_new vt1716s_dmic_mixer_vol =
+ HDA_CODEC_VOLUME("Digital Mic Capture Volume", 0x22, 0x0, HDA_INPUT);
+static const struct snd_kcontrol_new vt1716s_dmic_mixer_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Digital Mic Capture Switch",
.subdevice = HDA_SUBDEV_NID_FLAG | 0x26,
@@ -965,16 +933,12 @@ static const struct snd_kcontrol_new vt1716s_dmic_mixer[] = {
.info = vt1716s_dmic_info,
.get = vt1716s_dmic_get,
.put = vt1716s_dmic_put,
- },
- {} /* end */
};
/* mono-out mixer elements */
-static const struct snd_kcontrol_new vt1716S_mono_out_mixer[] = {
- HDA_CODEC_MUTE("Mono Playback Switch", 0x2a, 0x0, HDA_OUTPUT),
- { } /* end */
-};
+static const struct snd_kcontrol_new vt1716S_mono_out_mixer =
+ HDA_CODEC_MUTE("Mono Playback Switch", 0x2a, 0x0, HDA_OUTPUT);
static const struct hda_verb vt1716S_init_verbs[] = {
/* Enable Boost Volume backdoor */
@@ -1000,19 +964,27 @@ static int patch_vt1716S(struct hda_codec *codec)
override_mic_boost(codec, 0x1a, 0, 3, 40);
override_mic_boost(codec, 0x1e, 0, 3, 40);
+ err = snd_hda_add_verbs(codec, vt1716S_init_verbs);
+ if (err < 0)
+ goto error;
+
/* automatic parse from the BIOS config */
err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
-
- spec->init_verbs[spec->num_iverbs++] = vt1716S_init_verbs;
+ if (err < 0)
+ goto error;
- spec->mixers[spec->num_mixers++] = vt1716s_dmic_mixer;
- spec->mixers[spec->num_mixers++] = vt1716S_mono_out_mixer;
+ if (!snd_hda_gen_add_kctl(&spec->gen, NULL, &vt1716s_dmic_mixer_vol) ||
+ !snd_hda_gen_add_kctl(&spec->gen, NULL, &vt1716s_dmic_mixer_sw) ||
+ !snd_hda_gen_add_kctl(&spec->gen, NULL, &vt1716S_mono_out_mixer)) {
+ err = -ENOMEM;
+ goto error;
+ }
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
/* for vt2002P */
@@ -1107,19 +1079,23 @@ static int patch_vt2002P(struct hda_codec *codec)
snd_hda_pick_fixup(codec, NULL, vt2002p_fixups, via_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
- /* automatic parse from the BIOS config */
- err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
-
if (spec->codec_type == VT1802)
- spec->init_verbs[spec->num_iverbs++] = vt1802_init_verbs;
+ err = snd_hda_add_verbs(codec, vt1802_init_verbs);
else
- spec->init_verbs[spec->num_iverbs++] = vt2002P_init_verbs;
+ err = snd_hda_add_verbs(codec, vt2002P_init_verbs);
+ if (err < 0)
+ goto error;
+
+ /* automatic parse from the BIOS config */
+ err = via_parse_auto_config(codec);
+ if (err < 0)
+ goto error;
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
/* for vt1812 */
@@ -1148,16 +1124,20 @@ static int patch_vt1812(struct hda_codec *codec)
override_mic_boost(codec, 0x29, 0, 3, 40);
add_secret_dac_path(codec);
+ err = snd_hda_add_verbs(codec, vt1812_init_verbs);
+ if (err < 0)
+ goto error;
+
/* automatic parse from the BIOS config */
err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
-
- spec->init_verbs[spec->num_iverbs++] = vt1812_init_verbs;
+ if (err < 0)
+ goto error;
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
/* patch for vt3476 */
@@ -1185,16 +1165,20 @@ static int patch_vt3476(struct hda_codec *codec)
spec->gen.mixer_nid = 0x3f;
add_secret_dac_path(codec);
+ err = snd_hda_add_verbs(codec, vt3476_init_verbs);
+ if (err < 0)
+ goto error;
+
/* automatic parse from the BIOS config */
err = via_parse_auto_config(codec);
- if (err < 0) {
- via_free(codec);
- return err;
- }
-
- spec->init_verbs[spec->num_iverbs++] = vt3476_init_verbs;
+ if (err < 0)
+ goto error;
return 0;
+
+ error:
+ via_free(codec);
+ return err;
}
/*
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 65bb3ac6af4c..97f49b751e6e 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -27,17 +27,11 @@ static void update_tpacpi_mute_led(void *private_data, int enabled)
led_set_func(TPACPI_LED_MUTE, !enabled);
}
-static void update_tpacpi_micmute_led(struct hda_codec *codec,
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static void update_tpacpi_micmute(struct hda_codec *codec)
{
- if (!ucontrol || !led_set_func)
- return;
- if (strcmp("Capture Switch", ucontrol->id.name) == 0 && ucontrol->id.index == 0) {
- /* TODO: How do I verify if it's a mono or stereo here? */
- bool val = ucontrol->value.integer.value[0] || ucontrol->value.integer.value[1];
- led_set_func(TPACPI_LED_MICMUTE, !val);
- }
+ struct hda_gen_spec *spec = codec->spec;
+
+ led_set_func(TPACPI_LED_MICMUTE, spec->micmute_led.led_value);
}
static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
@@ -63,15 +57,10 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
spec->vmaster_mute.hook = update_tpacpi_mute_led;
removefunc = false;
}
- if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
- if (spec->num_adc_nids > 1 && !spec->dyn_adc_switch)
- codec_dbg(codec,
- "Skipping micmute LED control due to several ADCs");
- else {
- spec->cap_sync_hook = update_tpacpi_micmute_led;
- removefunc = false;
- }
- }
+ if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 &&
+ snd_hda_gen_add_micmute_led(codec,
+ update_tpacpi_micmute) > 0)
+ removefunc = false;
}
if (led_set_func && (action == HDA_FIXUP_ACT_FREE || removefunc)) {
diff --git a/sound/pci/ice1712/ak4xxx.c b/sound/pci/ice1712/ak4xxx.c
index 179ef7a5f0d1..a553897a4c4f 100644
--- a/sound/pci/ice1712/ak4xxx.c
+++ b/sound/pci/ice1712/ak4xxx.c
@@ -179,18 +179,6 @@ int snd_ice1712_akm4xxx_build_controls(struct snd_ice1712 *ice)
return 0;
}
-static int __init alsa_ice1712_akm4xxx_module_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_ice1712_akm4xxx_module_exit(void)
-{
-}
-
-module_init(alsa_ice1712_akm4xxx_module_init)
-module_exit(alsa_ice1712_akm4xxx_module_exit)
-
EXPORT_SYMBOL(snd_ice1712_akm4xxx_init);
EXPORT_SYMBOL(snd_ice1712_akm4xxx_free);
EXPORT_SYMBOL(snd_ice1712_akm4xxx_build_controls);
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index d7366ade5a25..c97b5528e4b8 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -314,26 +314,7 @@ static struct snd_kcontrol_new prodigy_hd2_controls[] = {
/* --------------- */
-/*
- * Logarithmic volume values for WM87*6
- * Computed as 20 * Log10(255 / x)
- */
-static const unsigned char wm_vol[256] = {
- 127, 48, 42, 39, 36, 34, 33, 31, 30, 29, 28, 27, 27, 26, 25, 25, 24, 24, 23,
- 23, 22, 22, 21, 21, 21, 20, 20, 20, 19, 19, 19, 18, 18, 18, 18, 17, 17, 17,
- 17, 16, 16, 16, 16, 15, 15, 15, 15, 15, 15, 14, 14, 14, 14, 14, 13, 13, 13,
- 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0
-};
-
-#define WM_VOL_MAX (sizeof(wm_vol) - 1)
+#define WM_VOL_MAX 255
#define WM_VOL_MUTE 0x8000
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 4c24346340f4..5ee468d1aefe 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -351,7 +351,7 @@ enum {
struct ichdev {
unsigned int ichd; /* ich device number */
unsigned long reg_offset; /* offset to bmaddr */
- u32 *bdbar; /* CPU address (32bit) */
+ __le32 *bdbar; /* CPU address (32bit) */
unsigned int bdbar_addr; /* PCI bus address (32bit) */
struct snd_pcm_substream *substream;
unsigned int physbuf; /* physical address (32bit) */
@@ -677,7 +677,7 @@ static void snd_intel8x0_ali_codec_write(struct snd_ac97 *ac97, unsigned short r
static void snd_intel8x0_setup_periods(struct intel8x0 *chip, struct ichdev *ichdev)
{
int idx;
- u32 *bdbar = ichdev->bdbar;
+ __le32 *bdbar = ichdev->bdbar;
unsigned long port = ichdev->reg_offset;
iputdword(chip, port + ICH_REG_OFF_BDBAR, ichdev->bdbar_addr);
@@ -3143,7 +3143,7 @@ static int snd_intel8x0_create(struct snd_card *card,
int_sta_masks = 0;
for (i = 0; i < chip->bdbars_count; i++) {
ichdev = &chip->ichd[i];
- ichdev->bdbar = ((u32 *)chip->bdbars.area) +
+ ichdev->bdbar = ((__le32 *)chip->bdbars.area) +
(i * ICH_MAX_FRAGS * 2);
ichdev->bdbar_addr = chip->bdbars.addr +
(i * sizeof(u32) * ICH_MAX_FRAGS * 2);
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 3a4769a97d29..943a726b1c1b 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -168,7 +168,7 @@ enum { ALID_MDMIN, ALID_MDMOUT, ALID_MDMLAST = ALID_MDMOUT };
struct ichdev {
unsigned int ichd; /* ich device number */
unsigned long reg_offset; /* offset to bmaddr */
- u32 *bdbar; /* CPU address (32bit) */
+ __le32 *bdbar; /* CPU address (32bit) */
unsigned int bdbar_addr; /* PCI bus address (32bit) */
struct snd_pcm_substream *substream;
unsigned int physbuf; /* physical address (32bit) */
@@ -395,7 +395,7 @@ static unsigned short snd_intel8x0m_codec_read(struct snd_ac97 *ac97,
static void snd_intel8x0m_setup_periods(struct intel8x0m *chip, struct ichdev *ichdev)
{
int idx;
- u32 *bdbar = ichdev->bdbar;
+ __le32 *bdbar = ichdev->bdbar;
unsigned long port = ichdev->reg_offset;
iputdword(chip, port + ICH_REG_OFF_BDBAR, ichdev->bdbar_addr);
@@ -1217,7 +1217,7 @@ static int snd_intel8x0m_create(struct snd_card *card,
int_sta_masks = 0;
for (i = 0; i < chip->bdbars_count; i++) {
ichdev = &chip->ichd[i];
- ichdev->bdbar = ((u32 *)chip->bdbars.area) + (i * ICH_MAX_FRAGS * 2);
+ ichdev->bdbar = ((__le32 *)chip->bdbars.area) + (i * ICH_MAX_FRAGS * 2);
ichdev->bdbar_addr = chip->bdbars.addr + (i * sizeof(u32) * ICH_MAX_FRAGS * 2);
int_sta_masks |= ichdev->int_sta_mask;
}
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 4206ba44d8bb..4e189a93f475 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -1326,7 +1326,7 @@ static int snd_korg1212_copy_to(struct snd_pcm_substream *substream,
}
#endif
if (in_kernel)
- memcpy((void *)dst, src, size);
+ memcpy((__force void *)dst, src, size);
else if (copy_to_user(dst, src, size))
return -EFAULT;
src++;
@@ -1365,7 +1365,7 @@ static int snd_korg1212_copy_from(struct snd_pcm_substream *substream,
}
#endif
if (in_kernel)
- memcpy((void *)dst, src, size);
+ memcpy(dst, (__force void *)src, size);
else if (copy_from_user(dst, src, size))
return -EFAULT;
dst++;
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index 9ff600084973..254f24366892 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -369,9 +369,9 @@ static int setup_corb_rirb(struct lola *chip)
return err;
chip->corb.addr = chip->rb.addr;
- chip->corb.buf = (u32 *)chip->rb.area;
+ chip->corb.buf = (__le32 *)chip->rb.area;
chip->rirb.addr = chip->rb.addr + 2048;
- chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
+ chip->rirb.buf = (__le32 *)(chip->rb.area + 2048);
/* disable ringbuffer DMAs */
lola_writeb(chip, BAR0, RIRBCTL, 0);
diff --git a/sound/pci/lola/lola.h b/sound/pci/lola/lola.h
index f0b100059efd..bd852fed8bb6 100644
--- a/sound/pci/lola/lola.h
+++ b/sound/pci/lola/lola.h
@@ -220,7 +220,7 @@ struct lola_bar {
/* CORB/RIRB */
struct lola_rb {
- u32 *buf; /* CORB/RIRB buffer, 8 byte per each entry */
+ __le32 *buf; /* CORB/RIRB buffer, 8 byte per each entry */
dma_addr_t addr; /* physical address of CORB/RIRB buffer */
unsigned short rp, wp; /* read/write pointers */
int cmds; /* number of pending requests */
@@ -275,7 +275,7 @@ struct lola_mixer_array {
struct lola_mixer_widget {
unsigned int nid;
unsigned int caps;
- struct lola_mixer_array __user *array;
+ struct lola_mixer_array __iomem *array;
struct lola_mixer_array *array_saved;
unsigned int src_stream_outs;
unsigned int src_phys_ins;
diff --git a/sound/pci/lola/lola_pcm.c b/sound/pci/lola/lola_pcm.c
index 310b26a756c9..e70276c3ea20 100644
--- a/sound/pci/lola/lola_pcm.c
+++ b/sound/pci/lola/lola_pcm.c
@@ -316,10 +316,10 @@ static int lola_pcm_hw_free(struct snd_pcm_substream *substream)
* set up a BDL entry
*/
static int setup_bdle(struct snd_pcm_substream *substream,
- struct lola_stream *str, u32 **bdlp,
+ struct lola_stream *str, __le32 **bdlp,
int ofs, int size)
{
- u32 *bdl = *bdlp;
+ __le32 *bdl = *bdlp;
while (size > 0) {
dma_addr_t addr;
@@ -355,14 +355,14 @@ static int lola_setup_periods(struct lola *chip, struct lola_pcm *pcm,
struct snd_pcm_substream *substream,
struct lola_stream *str)
{
- u32 *bdl;
+ __le32 *bdl;
int i, ofs, periods, period_bytes;
period_bytes = str->period_bytes;
periods = str->bufsize / period_bytes;
/* program the initial BDL entries */
- bdl = (u32 *)(pcm->bdl.area + LOLA_BDL_ENTRY_SIZE * str->index);
+ bdl = (__le32 *)(pcm->bdl.area + LOLA_BDL_ENTRY_SIZE * str->index);
ofs = 0;
str->frags = 0;
for (i = 0; i < periods; i++) {
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 224e942f556d..62962178a9d7 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -2103,7 +2103,7 @@ static const u16 minisrc_lpf[MINISRC_LPF_LEN] = {
static void snd_m3_assp_init(struct snd_m3 *chip)
{
unsigned int i;
- const u16 *data;
+ const __le16 *data;
/* zero kernel data */
for (i = 0; i < (REV_B_DATA_MEMORY_UNIT_LENGTH * NUM_UNITS_KERNEL_DATA) / 2; i++)
@@ -2121,7 +2121,7 @@ static void snd_m3_assp_init(struct snd_m3 *chip)
KDATA_DMA_XFER0);
/* write kernel into code memory.. */
- data = (const u16 *)chip->assp_kernel_image->data;
+ data = (const __le16 *)chip->assp_kernel_image->data;
for (i = 0 ; i * 2 < chip->assp_kernel_image->size; i++) {
snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE,
REV_B_CODE_MEMORY_BEGIN + i,
@@ -2134,7 +2134,7 @@ static void snd_m3_assp_init(struct snd_m3 *chip)
* drop it there. It seems that the minisrc doesn't
* need vectors, so we won't bother with them..
*/
- data = (const u16 *)chip->assp_minisrc_image->data;
+ data = (const __le16 *)chip->assp_minisrc_image->data;
for (i = 0; i * 2 < chip->assp_minisrc_image->size; i++) {
snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE,
0x400 + i, le16_to_cpu(data[i]));
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index a74f1ad7e7b8..9cd297a42f24 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -182,6 +182,7 @@ static int mixart_set_clock(struct mixart_mgr *mgr,
case PIPE_RUNNING:
if(rate != 0)
break;
+ /* fall through */
default:
if(rate == 0)
return 0; /* nothing to do */
diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c
index 8bf2ce32d4a8..71776bfe0485 100644
--- a/sound/pci/mixart/mixart_core.c
+++ b/sound/pci/mixart/mixart_core.c
@@ -107,7 +107,7 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
#ifndef __BIG_ENDIAN
size /= 4; /* u32 size */
for(i=0; i < size; i++) {
- ((u32*)resp->data)[i] = be32_to_cpu(((u32*)resp->data)[i]);
+ ((u32*)resp->data)[i] = be32_to_cpu(((__be32*)resp->data)[i]);
}
#endif
@@ -519,7 +519,7 @@ irqreturn_t snd_mixart_threaded_irq(int irq, void *dev_id)
/* Traces are text: the swapped msg_data has to be swapped back ! */
int i;
for(i=0; i<(resp.size/4); i++) {
- (mixart_msg_data)[i] = cpu_to_be32((mixart_msg_data)[i]);
+ ((__be32*)mixart_msg_data)[i] = cpu_to_be32((mixart_msg_data)[i]);
}
#endif
((char*)mixart_msg_data)[resp.size - 1] = 0;
@@ -540,7 +540,7 @@ irqreturn_t snd_mixart_threaded_irq(int irq, void *dev_id)
dev_err(&mgr->pci->dev,
"canceled notification %x !\n", msg);
}
- /* no break, continue ! */
+ /* fall through */
case MSG_TYPE_ANSWER:
/* answer or notification to a message we are waiting for*/
mutex_lock(&mgr->msg_lock);
diff --git a/sound/pci/mixart/mixart_hwdep.c b/sound/pci/mixart/mixart_hwdep.c
index 5bfd3ac80db5..bc92758de82c 100644
--- a/sound/pci/mixart/mixart_hwdep.c
+++ b/sound/pci/mixart/mixart_hwdep.c
@@ -73,30 +73,30 @@ static int mixart_wait_nice_for_register_value(struct mixart_mgr *mgr,
*/
struct snd_mixart_elf32_ehdr {
u8 e_ident[16];
- u16 e_type;
- u16 e_machine;
- u32 e_version;
- u32 e_entry;
- u32 e_phoff;
- u32 e_shoff;
- u32 e_flags;
- u16 e_ehsize;
- u16 e_phentsize;
- u16 e_phnum;
- u16 e_shentsize;
- u16 e_shnum;
- u16 e_shstrndx;
+ __be16 e_type;
+ __be16 e_machine;
+ __be32 e_version;
+ __be32 e_entry;
+ __be32 e_phoff;
+ __be32 e_shoff;
+ __be32 e_flags;
+ __be16 e_ehsize;
+ __be16 e_phentsize;
+ __be16 e_phnum;
+ __be16 e_shentsize;
+ __be16 e_shnum;
+ __be16 e_shstrndx;
};
struct snd_mixart_elf32_phdr {
- u32 p_type;
- u32 p_offset;
- u32 p_vaddr;
- u32 p_paddr;
- u32 p_filesz;
- u32 p_memsz;
- u32 p_flags;
- u32 p_align;
+ __be32 p_type;
+ __be32 p_offset;
+ __be32 p_vaddr;
+ __be32 p_paddr;
+ __be32 p_filesz;
+ __be32 p_memsz;
+ __be32 p_flags;
+ __be32 p_align;
};
static int mixart_load_elf(struct mixart_mgr *mgr, const struct firmware *dsp )
diff --git a/sound/pci/mixart/mixart_hwdep.h b/sound/pci/mixart/mixart_hwdep.h
index 812e288ef2e7..2794cd385b8e 100644
--- a/sound/pci/mixart/mixart_hwdep.h
+++ b/sound/pci/mixart/mixart_hwdep.h
@@ -26,19 +26,19 @@
#include <sound/hwdep.h>
#ifndef readl_be
-#define readl_be(x) be32_to_cpu(__raw_readl(x))
+#define readl_be(x) be32_to_cpu((__force __be32)__raw_readl(x))
#endif
#ifndef writel_be
-#define writel_be(data,addr) __raw_writel(cpu_to_be32(data),addr)
+#define writel_be(data,addr) __raw_writel((__force u32)cpu_to_be32(data),addr)
#endif
#ifndef readl_le
-#define readl_le(x) le32_to_cpu(__raw_readl(x))
+#define readl_le(x) le32_to_cpu((__force __le32)__raw_readl(x))
#endif
#ifndef writel_le
-#define writel_le(data,addr) __raw_writel(cpu_to_le32(data),addr)
+#define writel_le(data,addr) __raw_writel((__force u32)cpu_to_le32(data),addr)
#endif
#define MIXART_MEM(mgr,x) ((mgr)->mem[0].virt + (x))
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 44f3b48d47b1..23017e3bc76c 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -470,10 +470,10 @@ struct snd_riptide {
};
struct sgd { /* scatter gather desriptor */
- u32 dwNextLink;
- u32 dwSegPtrPhys;
- u32 dwSegLen;
- u32 dwStat_Ctl;
+ __le32 dwNextLink;
+ __le32 dwSegPtrPhys;
+ __le32 dwSegLen;
+ __le32 dwStat_Ctl;
};
struct pcmhw { /* pcm descriptor */
@@ -1017,7 +1017,7 @@ getsamplerate(struct cmdif *cif, unsigned char *intdec, unsigned int *rate)
static int
setsampleformat(struct cmdif *cif,
unsigned char mixer, unsigned char id,
- unsigned char channels, unsigned char format)
+ unsigned char channels, snd_pcm_format_t format)
{
unsigned char w, ch, sig, order;
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 7fbdb703bfcd..7218f38b59db 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -1433,14 +1433,12 @@ static int snd_sonicvibes_midi(struct sonicvibes *sonic,
{
struct snd_mpu401 * mpu = rmidi->private_data;
struct snd_card *card = sonic->card;
- struct snd_rawmidi_str *dir;
unsigned int idx;
int err;
mpu->private_data = sonic;
mpu->open_input = snd_sonicvibes_midi_input_open;
mpu->close_input = snd_sonicvibes_midi_input_close;
- dir = &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT];
for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_midi_controls); idx++)
if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_sonicvibes_midi_controls[idx], sonic))) < 0)
return err;
diff --git a/sound/pci/trident/trident.c b/sound/pci/trident/trident.c
index cedf13b64803..2f18b1cdc2cd 100644
--- a/sound/pci/trident/trident.c
+++ b/sound/pci/trident/trident.c
@@ -123,7 +123,7 @@ static int snd_trident_probe(struct pci_dev *pci,
} else {
strcpy(card->shortname, "Trident ");
}
- strcat(card->shortname, card->driver);
+ strcat(card->shortname, str);
sprintf(card->longname, "%s PCI Audio at 0x%lx, irq %d",
card->shortname, trident->port, trident->irq);
diff --git a/sound/pci/trident/trident.h b/sound/pci/trident/trident.h
index 9624e5937719..2d62c1921255 100644
--- a/sound/pci/trident/trident.h
+++ b/sound/pci/trident/trident.h
@@ -264,7 +264,7 @@ struct snd_trident_memblk_arg {
};
struct snd_trident_tlb {
- unsigned int * entries; /* 16k-aligned TLB table */
+ __le32 *entries; /* 16k-aligned TLB table */
dma_addr_t entries_dmaaddr; /* 16k-aligned PCI address to TLB table */
unsigned long * shadow_entries; /* shadow entries with virtual addresses */
struct snd_dma_buffer buffer;
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 49c64fae3466..5523e193d556 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -3359,7 +3359,7 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
dev_err(trident->card->dev, "unable to allocate TLB buffer\n");
return -ENOMEM;
}
- trident->tlb.entries = (unsigned int*)ALIGN((unsigned long)trident->tlb.buffer.area, SNDRV_TRIDENT_MAX_PAGES * 4);
+ trident->tlb.entries = (__le32 *)ALIGN((unsigned long)trident->tlb.buffer.area, SNDRV_TRIDENT_MAX_PAGES * 4);
trident->tlb.entries_dmaaddr = ALIGN(trident->tlb.buffer.addr, SNDRV_TRIDENT_MAX_PAGES * 4);
/* allocate shadow TLB page table (virtual addresses) */
trident->tlb.shadow_entries =
diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c
index d4298af6d3ee..c0d0bf44f365 100644
--- a/sound/pci/vx222/vx222_ops.c
+++ b/sound/pci/vx222/vx222_ops.c
@@ -275,7 +275,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
length >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
for (; length > 0; length--) {
- outl(cpu_to_le32(*addr), port);
+ outl(*addr, port);
addr++;
}
addr = (u32 *)runtime->dma_area;
@@ -285,7 +285,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
count >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
for (; count > 0; count--) {
- outl(cpu_to_le32(*addr), port);
+ outl(*addr, port);
addr++;
}
@@ -313,7 +313,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
length >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
for (; length > 0; length--)
- *addr++ = le32_to_cpu(inl(port));
+ *addr++ = inl(port);
addr = (u32 *)runtime->dma_area;
pipe->hw_ptr = 0;
}
@@ -321,7 +321,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
count >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
for (; count > 0; count--)
- *addr++ = le32_to_cpu(inl(port));
+ *addr++ = inl(port);
vx2_release_pseudo_dma(chip);
}
diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
index aa9bb065f385..e2fa7e360d79 100644
--- a/sound/pci/ymfpci/ymfpci.h
+++ b/sound/pci/ymfpci/ymfpci.h
@@ -185,50 +185,50 @@
*/
struct snd_ymfpci_playback_bank {
- u32 format;
- u32 loop_default;
- u32 base; /* 32-bit address */
- u32 loop_start; /* 32-bit offset */
- u32 loop_end; /* 32-bit offset */
- u32 loop_frac; /* 8-bit fraction - loop_start */
- u32 delta_end; /* pitch delta end */
- u32 lpfK_end;
- u32 eg_gain_end;
- u32 left_gain_end;
- u32 right_gain_end;
- u32 eff1_gain_end;
- u32 eff2_gain_end;
- u32 eff3_gain_end;
- u32 lpfQ;
- u32 status;
- u32 num_of_frames;
- u32 loop_count;
- u32 start;
- u32 start_frac;
- u32 delta;
- u32 lpfK;
- u32 eg_gain;
- u32 left_gain;
- u32 right_gain;
- u32 eff1_gain;
- u32 eff2_gain;
- u32 eff3_gain;
- u32 lpfD1;
- u32 lpfD2;
+ __le32 format;
+ __le32 loop_default;
+ __le32 base; /* 32-bit address */
+ __le32 loop_start; /* 32-bit offset */
+ __le32 loop_end; /* 32-bit offset */
+ __le32 loop_frac; /* 8-bit fraction - loop_start */
+ __le32 delta_end; /* pitch delta end */
+ __le32 lpfK_end;
+ __le32 eg_gain_end;
+ __le32 left_gain_end;
+ __le32 right_gain_end;
+ __le32 eff1_gain_end;
+ __le32 eff2_gain_end;
+ __le32 eff3_gain_end;
+ __le32 lpfQ;
+ __le32 status;
+ __le32 num_of_frames;
+ __le32 loop_count;
+ __le32 start;
+ __le32 start_frac;
+ __le32 delta;
+ __le32 lpfK;
+ __le32 eg_gain;
+ __le32 left_gain;
+ __le32 right_gain;
+ __le32 eff1_gain;
+ __le32 eff2_gain;
+ __le32 eff3_gain;
+ __le32 lpfD1;
+ __le32 lpfD2;
};
struct snd_ymfpci_capture_bank {
- u32 base; /* 32-bit address */
- u32 loop_end; /* 32-bit offset */
- u32 start; /* 32-bit offset */
- u32 num_of_loops; /* counter */
+ __le32 base; /* 32-bit address */
+ __le32 loop_end; /* 32-bit offset */
+ __le32 start; /* 32-bit offset */
+ __le32 num_of_loops; /* counter */
};
struct snd_ymfpci_effect_bank {
- u32 base; /* 32-bit address */
- u32 loop_end; /* 32-bit offset */
- u32 start; /* 32-bit offset */
- u32 temp;
+ __le32 base; /* 32-bit address */
+ __le32 loop_end; /* 32-bit offset */
+ __le32 start; /* 32-bit offset */
+ __le32 temp;
};
struct snd_ymfpci_pcm;
@@ -316,7 +316,7 @@ struct snd_ymfpci {
dma_addr_t work_base_addr;
struct snd_dma_buffer ac3_tmp_base;
- u32 *ctrl_playback;
+ __le32 *ctrl_playback;
struct snd_ymfpci_playback_bank *bank_playback[YDSXG_PLAYBACK_VOICES][2];
struct snd_ymfpci_capture_bank *bank_capture[YDSXG_CAPTURE_VOICES][2];
struct snd_ymfpci_effect_bank *bank_effect[YDSXG_EFFECT_VOICES][2];
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 6f81396aadc9..a4926fb03991 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -336,7 +336,7 @@ static void snd_ymfpci_pcm_interrupt(struct snd_ymfpci *chip, struct snd_ymfpci_
unsigned int subs = ypcm->substream->number;
unsigned int next_bank = 1 - chip->active_bank;
struct snd_ymfpci_playback_bank *bank;
- u32 volume;
+ __le32 volume;
bank = &voice->bank[next_bank];
volume = cpu_to_le32(chip->pcm_mixer[subs].left << 15);
@@ -505,7 +505,7 @@ static void snd_ymfpci_pcm_init_voice(struct snd_ymfpci_pcm *ypcm, unsigned int
u32 lpfK = snd_ymfpci_calc_lpfK(runtime->rate);
struct snd_ymfpci_playback_bank *bank;
unsigned int nbank;
- u32 vol_left, vol_right;
+ __le32 vol_left, vol_right;
u8 use_left, use_right;
unsigned long flags;
@@ -2135,7 +2135,7 @@ static int snd_ymfpci_memalloc(struct snd_ymfpci *chip)
chip->bank_base_playback = ptr;
chip->bank_base_playback_addr = ptr_addr;
- chip->ctrl_playback = (u32 *)ptr;
+ chip->ctrl_playback = (__le32 *)ptr;
chip->ctrl_playback[0] = cpu_to_le32(YDSXG_PLAYBACK_VOICES);
ptr += ALIGN(playback_ctrl_size, 0x100);
ptr_addr += ALIGN(playback_ctrl_size, 0x100);
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
index 4a2354b5ae00..98a6863e933c 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
@@ -276,7 +276,6 @@ static const struct snd_pcm_ops pdacf_pcm_capture_ops = {
.trigger = pdacf_pcm_trigger,
.pointer = pdacf_pcm_capture_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
index 8cde40226355..4c4ef1fec69f 100644
--- a/sound/pcmcia/vx/vxp_ops.c
+++ b/sound/pcmcia/vx/vxp_ops.c
@@ -375,7 +375,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
length >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
for (; length > 0; length--) {
- outw(cpu_to_le16(*addr), port);
+ outw(*addr, port);
addr++;
}
addr = (unsigned short *)runtime->dma_area;
@@ -385,7 +385,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
count >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
for (; count > 0; count--) {
- outw(cpu_to_le16(*addr), port);
+ outw(*addr, port);
addr++;
}
vx_release_pseudo_dma(chip);
@@ -417,7 +417,7 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
length >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
for (; length > 0; length--)
- *addr++ = le16_to_cpu(inw(port));
+ *addr++ = inw(port);
addr = (unsigned short *)runtime->dma_area;
pipe->hw_ptr = 0;
}
@@ -425,12 +425,12 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
count >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
for (; count > 1; count--)
- *addr++ = le16_to_cpu(inw(port));
+ *addr++ = inw(port);
/* Disable DMA */
pchip->regDIALOG &= ~VXP_DLG_DMAREAD_SEL_MASK;
vx_outb(chip, DIALOG, pchip->regDIALOG);
/* Read the last word (16 bits) */
- *addr = le16_to_cpu(inw(port));
+ *addr = inw(port);
/* Disable 16-bit accesses */
pchip->regDIALOG &= ~VXP_DLG_DMA16_SEL_MASK;
vx_outb(chip, DIALOG, pchip->regDIALOG);
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 41af6b9cc350..1cf11cf51e1d 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -57,6 +57,7 @@ source "sound/soc/kirkwood/Kconfig"
source "sound/soc/img/Kconfig"
source "sound/soc/intel/Kconfig"
source "sound/soc/mediatek/Kconfig"
+source "sound/soc/meson/Kconfig"
source "sound/soc/mxs/Kconfig"
source "sound/soc/pxa/Kconfig"
source "sound/soc/qcom/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 06389a5385d7..62a5f87c3cfc 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_SND_SOC) += jz4740/
obj-$(CONFIG_SND_SOC) += img/
obj-$(CONFIG_SND_SOC) += intel/
obj-$(CONFIG_SND_SOC) += mediatek/
+obj-$(CONFIG_SND_SOC) += meson/
obj-$(CONFIG_SND_SOC) += mxs/
obj-$(CONFIG_SND_SOC) += nuc900/
obj-$(CONFIG_SND_SOC) += omap/
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
index 6cbf9cf4d1a4..58c1dcb4d255 100644
--- a/sound/soc/amd/Kconfig
+++ b/sound/soc/amd/Kconfig
@@ -8,6 +8,7 @@ config SND_SOC_AMD_CZ_DA7219MX98357_MACH
select SND_SOC_DA7219
select SND_SOC_MAX98357A
select SND_SOC_ADAU7002
+ select REGULATOR
depends on SND_SOC_AMD_ACP && I2C
help
This option enables machine driver for DA7219 and MAX9835.
diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
index ccddc6650b9c..8e3275a96a82 100644
--- a/sound/soc/amd/acp-da7219-max98357a.c
+++ b/sound/soc/amd/acp-da7219-max98357a.c
@@ -32,6 +32,8 @@
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/module.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/acpi.h>
@@ -148,7 +150,8 @@ static int cz_da7219_startup(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&constraints_rates);
- machine->i2s_instance = I2S_BT_INSTANCE;
+ machine->i2s_instance = I2S_SP_INSTANCE;
+ machine->capture_channel = CAP_CHANNEL1;
return da7219_clk_enable(substream);
}
@@ -163,7 +166,7 @@ static int cz_max_startup(struct snd_pcm_substream *substream)
struct snd_soc_card *card = rtd->card;
struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
- machine->i2s_instance = I2S_SP_INSTANCE;
+ machine->i2s_instance = I2S_BT_INSTANCE;
return da7219_clk_enable(substream);
}
@@ -172,13 +175,24 @@ static void cz_max_shutdown(struct snd_pcm_substream *substream)
da7219_clk_disable();
}
-static int cz_dmic_startup(struct snd_pcm_substream *substream)
+static int cz_dmic0_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
+
+ machine->i2s_instance = I2S_BT_INSTANCE;
+ return da7219_clk_enable(substream);
+}
+
+static int cz_dmic1_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
machine->i2s_instance = I2S_SP_INSTANCE;
+ machine->capture_channel = CAP_CHANNEL0;
return da7219_clk_enable(substream);
}
@@ -197,23 +211,39 @@ static const struct snd_soc_ops cz_max_play_ops = {
.shutdown = cz_max_shutdown,
};
-static const struct snd_soc_ops cz_dmic_cap_ops = {
- .startup = cz_dmic_startup,
+static const struct snd_soc_ops cz_dmic0_cap_ops = {
+ .startup = cz_dmic0_startup,
+ .shutdown = cz_dmic_shutdown,
+};
+
+static const struct snd_soc_ops cz_dmic1_cap_ops = {
+ .startup = cz_dmic1_startup,
.shutdown = cz_dmic_shutdown,
};
static struct snd_soc_dai_link cz_dai_7219_98357[] = {
{
- .name = "amd-da7219-play-cap",
- .stream_name = "Playback and Capture",
+ .name = "amd-da7219-play",
+ .stream_name = "Playback",
.platform_name = "acp_audio_dma.0.auto",
- .cpu_dai_name = "designware-i2s.3.auto",
+ .cpu_dai_name = "designware-i2s.1.auto",
.codec_dai_name = "da7219-hifi",
.codec_name = "i2c-DLGS7219:00",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.init = cz_da7219_init,
.dpcm_playback = 1,
+ .ops = &cz_da7219_cap_ops,
+ },
+ {
+ .name = "amd-da7219-cap",
+ .stream_name = "Capture",
+ .platform_name = "acp_audio_dma.0.auto",
+ .cpu_dai_name = "designware-i2s.2.auto",
+ .codec_dai_name = "da7219-hifi",
+ .codec_name = "i2c-DLGS7219:00",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
.ops = &cz_da7219_cap_ops,
},
@@ -221,7 +251,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
.name = "amd-max98357-play",
.stream_name = "HiFi Playback",
.platform_name = "acp_audio_dma.0.auto",
- .cpu_dai_name = "designware-i2s.1.auto",
+ .cpu_dai_name = "designware-i2s.3.auto",
.codec_dai_name = "HiFi",
.codec_name = "MX98357A:00",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
@@ -230,8 +260,22 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
.ops = &cz_max_play_ops,
},
{
- .name = "dmic",
- .stream_name = "DMIC Capture",
+ /* C panel DMIC */
+ .name = "dmic0",
+ .stream_name = "DMIC0 Capture",
+ .platform_name = "acp_audio_dma.0.auto",
+ .cpu_dai_name = "designware-i2s.3.auto",
+ .codec_dai_name = "adau7002-hifi",
+ .codec_name = "ADAU7002:00",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .dpcm_capture = 1,
+ .ops = &cz_dmic0_cap_ops,
+ },
+ {
+ /* A/B panel DMIC */
+ .name = "dmic1",
+ .stream_name = "DMIC1 Capture",
.platform_name = "acp_audio_dma.0.auto",
.cpu_dai_name = "designware-i2s.2.auto",
.codec_dai_name = "adau7002-hifi",
@@ -239,7 +283,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
.dpcm_capture = 1,
- .ops = &cz_dmic_cap_ops,
+ .ops = &cz_dmic1_cap_ops,
},
};
@@ -278,11 +322,52 @@ static struct snd_soc_card cz_card = {
.num_controls = ARRAY_SIZE(cz_mc_controls),
};
+static struct regulator_consumer_supply acp_da7219_supplies[] = {
+ REGULATOR_SUPPLY("VDD", "i2c-DLGS7219:00"),
+ REGULATOR_SUPPLY("VDDMIC", "i2c-DLGS7219:00"),
+ REGULATOR_SUPPLY("VDDIO", "i2c-DLGS7219:00"),
+ REGULATOR_SUPPLY("IOVDD", "ADAU7002:00"),
+};
+
+static struct regulator_init_data acp_da7219_data = {
+ .constraints = {
+ .always_on = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(acp_da7219_supplies),
+ .consumer_supplies = acp_da7219_supplies,
+};
+
+static struct regulator_config acp_da7219_cfg = {
+ .init_data = &acp_da7219_data,
+};
+
+static struct regulator_ops acp_da7219_ops = {
+};
+
+static struct regulator_desc acp_da7219_desc = {
+ .name = "reg-fixed-1.8V",
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &acp_da7219_ops,
+ .fixed_uV = 1800000, /* 1.8V */
+ .n_voltages = 1,
+};
+
static int cz_probe(struct platform_device *pdev)
{
int ret;
struct snd_soc_card *card;
struct acp_platform_info *machine;
+ struct regulator_dev *rdev;
+
+ acp_da7219_cfg.dev = &pdev->dev;
+ rdev = devm_regulator_register(&pdev->dev, &acp_da7219_desc,
+ &acp_da7219_cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Failed to register regulator: %d\n",
+ (int)PTR_ERR(rdev));
+ return -EINVAL;
+ }
machine = devm_kzalloc(&pdev->dev, sizeof(struct acp_platform_info),
GFP_KERNEL);
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 77203841c535..e359938e3d7e 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -224,13 +224,11 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
switch (asic_type) {
case CHIP_STONEY:
dmadscr[i].xfer_val |=
- BIT(22) |
(ACP_DMA_ATTR_SHARED_MEM_TO_DAGB_GARLIC << 16) |
(size / 2);
break;
default:
dmadscr[i].xfer_val |=
- BIT(22) |
(ACP_DMA_ATTR_SHAREDMEM_TO_DAGB_ONION << 16) |
(size / 2);
}
@@ -322,22 +320,87 @@ static void config_acp_dma(void __iomem *acp_mmio,
struct audio_substream_data *rtd,
u32 asic_type)
{
+ u16 ch_acp_sysmem, ch_acp_i2s;
+
acp_pte_config(acp_mmio, rtd->pg, rtd->num_of_pages,
rtd->pte_offset);
+
+ if (rtd->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ ch_acp_sysmem = rtd->ch1;
+ ch_acp_i2s = rtd->ch2;
+ } else {
+ ch_acp_i2s = rtd->ch1;
+ ch_acp_sysmem = rtd->ch2;
+ }
/* Configure System memory <-> ACP SRAM DMA descriptors */
set_acp_sysmem_dma_descriptors(acp_mmio, rtd->size,
rtd->direction, rtd->pte_offset,
- rtd->ch1, rtd->sram_bank,
+ ch_acp_sysmem, rtd->sram_bank,
rtd->dma_dscr_idx_1, asic_type);
/* Configure ACP SRAM <-> I2S DMA descriptors */
set_acp_to_i2s_dma_descriptors(acp_mmio, rtd->size,
rtd->direction, rtd->sram_bank,
- rtd->destination, rtd->ch2,
+ rtd->destination, ch_acp_i2s,
rtd->dma_dscr_idx_2, asic_type);
}
+static void acp_dma_cap_channel_enable(void __iomem *acp_mmio,
+ u16 cap_channel)
+{
+ u32 val, ch_reg, imr_reg, res_reg;
+
+ switch (cap_channel) {
+ case CAP_CHANNEL1:
+ ch_reg = mmACP_I2SMICSP_RER1;
+ res_reg = mmACP_I2SMICSP_RCR1;
+ imr_reg = mmACP_I2SMICSP_IMR1;
+ break;
+ case CAP_CHANNEL0:
+ default:
+ ch_reg = mmACP_I2SMICSP_RER0;
+ res_reg = mmACP_I2SMICSP_RCR0;
+ imr_reg = mmACP_I2SMICSP_IMR0;
+ break;
+ }
+ val = acp_reg_read(acp_mmio,
+ mmACP_I2S_16BIT_RESOLUTION_EN);
+ if (val & ACP_I2S_MIC_16BIT_RESOLUTION_EN) {
+ acp_reg_write(0x0, acp_mmio, ch_reg);
+ /* Set 16bit resolution on capture */
+ acp_reg_write(0x2, acp_mmio, res_reg);
+ }
+ val = acp_reg_read(acp_mmio, imr_reg);
+ val &= ~ACP_I2SMICSP_IMR1__I2SMICSP_RXDAM_MASK;
+ val &= ~ACP_I2SMICSP_IMR1__I2SMICSP_RXFOM_MASK;
+ acp_reg_write(val, acp_mmio, imr_reg);
+ acp_reg_write(0x1, acp_mmio, ch_reg);
+}
+
+static void acp_dma_cap_channel_disable(void __iomem *acp_mmio,
+ u16 cap_channel)
+{
+ u32 val, ch_reg, imr_reg;
+
+ switch (cap_channel) {
+ case CAP_CHANNEL1:
+ imr_reg = mmACP_I2SMICSP_IMR1;
+ ch_reg = mmACP_I2SMICSP_RER1;
+ break;
+ case CAP_CHANNEL0:
+ default:
+ imr_reg = mmACP_I2SMICSP_IMR0;
+ ch_reg = mmACP_I2SMICSP_RER0;
+ break;
+ }
+ val = acp_reg_read(acp_mmio, imr_reg);
+ val |= ACP_I2SMICSP_IMR1__I2SMICSP_RXDAM_MASK;
+ val |= ACP_I2SMICSP_IMR1__I2SMICSP_RXFOM_MASK;
+ acp_reg_write(val, acp_mmio, imr_reg);
+ acp_reg_write(0x0, acp_mmio, ch_reg);
+}
+
/* Start a given DMA channel transfer */
-static void acp_dma_start(void __iomem *acp_mmio, u16 ch_num)
+static void acp_dma_start(void __iomem *acp_mmio, u16 ch_num, bool is_circular)
{
u32 dma_ctrl;
@@ -356,10 +419,8 @@ static void acp_dma_start(void __iomem *acp_mmio, u16 ch_num)
switch (ch_num) {
case ACP_TO_I2S_DMA_CH_NUM:
- case ACP_TO_SYSRAM_CH_NUM:
case I2S_TO_ACP_DMA_CH_NUM:
case ACP_TO_I2S_DMA_BT_INSTANCE_CH_NUM:
- case ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM:
case I2S_TO_ACP_DMA_BT_INSTANCE_CH_NUM:
dma_ctrl |= ACP_DMA_CNTL_0__DMAChIOCEn_MASK;
break;
@@ -368,8 +429,11 @@ static void acp_dma_start(void __iomem *acp_mmio, u16 ch_num)
break;
}
- /* circular for both DMA channel */
- dma_ctrl |= ACP_DMA_CNTL_0__Circular_DMA_En_MASK;
+ /* enable for ACP to SRAM DMA channel */
+ if (is_circular == true)
+ dma_ctrl |= ACP_DMA_CNTL_0__Circular_DMA_En_MASK;
+ else
+ dma_ctrl &= ~ACP_DMA_CNTL_0__Circular_DMA_En_MASK;
acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
}
@@ -613,6 +677,7 @@ static int acp_deinit(void __iomem *acp_mmio)
/* ACP DMA irq handler routine for playback, capture usecases */
static irqreturn_t dma_irq_handler(int irq, void *arg)
{
+ u16 dscr_idx;
u32 intr_flag, ext_intr_status;
struct audio_drv_data *irq_data;
void __iomem *acp_mmio;
@@ -644,32 +709,39 @@ static irqreturn_t dma_irq_handler(int irq, void *arg)
if ((intr_flag & BIT(I2S_TO_ACP_DMA_CH_NUM)) != 0) {
valid_irq = true;
+ if (acp_reg_read(acp_mmio, mmACP_DMA_CUR_DSCR_14) ==
+ CAPTURE_START_DMA_DESCR_CH15)
+ dscr_idx = CAPTURE_END_DMA_DESCR_CH14;
+ else
+ dscr_idx = CAPTURE_START_DMA_DESCR_CH14;
+ config_acp_dma_channel(acp_mmio, ACP_TO_SYSRAM_CH_NUM, dscr_idx,
+ 1, 0);
+ acp_dma_start(acp_mmio, ACP_TO_SYSRAM_CH_NUM, false);
+
snd_pcm_period_elapsed(irq_data->capture_i2ssp_stream);
acp_reg_write((intr_flag & BIT(I2S_TO_ACP_DMA_CH_NUM)) << 16,
acp_mmio, mmACP_EXTERNAL_INTR_STAT);
}
- if ((intr_flag & BIT(ACP_TO_SYSRAM_CH_NUM)) != 0) {
- valid_irq = true;
- acp_reg_write((intr_flag & BIT(ACP_TO_SYSRAM_CH_NUM)) << 16,
- acp_mmio, mmACP_EXTERNAL_INTR_STAT);
- }
-
if ((intr_flag & BIT(I2S_TO_ACP_DMA_BT_INSTANCE_CH_NUM)) != 0) {
valid_irq = true;
+ if (acp_reg_read(acp_mmio, mmACP_DMA_CUR_DSCR_10) ==
+ CAPTURE_START_DMA_DESCR_CH11)
+ dscr_idx = CAPTURE_END_DMA_DESCR_CH10;
+ else
+ dscr_idx = CAPTURE_START_DMA_DESCR_CH10;
+ config_acp_dma_channel(acp_mmio,
+ ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM,
+ dscr_idx, 1, 0);
+ acp_dma_start(acp_mmio, ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM,
+ false);
+
snd_pcm_period_elapsed(irq_data->capture_i2sbt_stream);
acp_reg_write((intr_flag &
BIT(I2S_TO_ACP_DMA_BT_INSTANCE_CH_NUM)) << 16,
acp_mmio, mmACP_EXTERNAL_INTR_STAT);
}
- if ((intr_flag & BIT(ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM)) != 0) {
- valid_irq = true;
- acp_reg_write((intr_flag &
- BIT(ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM)) << 16,
- acp_mmio, mmACP_EXTERNAL_INTR_STAT);
- }
-
if (valid_irq)
return IRQ_HANDLED;
else
@@ -773,7 +845,10 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
if (WARN_ON(!rtd))
return -EINVAL;
- rtd->i2s_instance = pinfo->i2s_instance;
+ if (pinfo) {
+ rtd->i2s_instance = pinfo->i2s_instance;
+ rtd->capture_channel = pinfo->capture_channel;
+ }
if (adata->asic_type == CHIP_STONEY) {
val = acp_reg_read(adata->acp_mmio,
mmACP_I2S_16BIT_RESOLUTION_EN);
@@ -841,8 +916,8 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
switch (rtd->i2s_instance) {
case I2S_BT_INSTANCE:
rtd->pte_offset = ACP_ST_BT_CAPTURE_PTE_OFFSET;
- rtd->ch1 = ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM;
- rtd->ch2 = I2S_TO_ACP_DMA_BT_INSTANCE_CH_NUM;
+ rtd->ch1 = I2S_TO_ACP_DMA_BT_INSTANCE_CH_NUM;
+ rtd->ch2 = ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM;
rtd->sram_bank = ACP_SRAM_BANK_4_ADDRESS;
rtd->destination = FROM_BLUETOOTH;
rtd->dma_dscr_idx_1 = CAPTURE_START_DMA_DESCR_CH10;
@@ -851,13 +926,14 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
mmACP_I2S_BT_RECEIVE_BYTE_CNT_HIGH;
rtd->byte_cnt_low_reg_offset =
mmACP_I2S_BT_RECEIVE_BYTE_CNT_LOW;
+ rtd->dma_curr_dscr = mmACP_DMA_CUR_DSCR_11;
adata->capture_i2sbt_stream = substream;
break;
case I2S_SP_INSTANCE:
default:
rtd->pte_offset = ACP_CAPTURE_PTE_OFFSET;
- rtd->ch1 = ACP_TO_SYSRAM_CH_NUM;
- rtd->ch2 = I2S_TO_ACP_DMA_CH_NUM;
+ rtd->ch1 = I2S_TO_ACP_DMA_CH_NUM;
+ rtd->ch2 = ACP_TO_SYSRAM_CH_NUM;
switch (adata->asic_type) {
case CHIP_STONEY:
rtd->pte_offset = ACP_ST_CAPTURE_PTE_OFFSET;
@@ -874,6 +950,7 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
mmACP_I2S_RECEIVED_BYTE_CNT_HIGH;
rtd->byte_cnt_low_reg_offset =
mmACP_I2S_RECEIVED_BYTE_CNT_LOW;
+ rtd->dma_curr_dscr = mmACP_DMA_CUR_DSCR_15;
adata->capture_i2ssp_stream = substream;
}
}
@@ -927,6 +1004,8 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
u32 buffersize;
u32 pos = 0;
u64 bytescount = 0;
+ u16 dscr;
+ u32 period_bytes, delay;
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
@@ -934,12 +1013,25 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
if (!rtd)
return -EINVAL;
- buffersize = frames_to_bytes(runtime, runtime->buffer_size);
- bytescount = acp_get_byte_count(rtd);
-
- if (bytescount > rtd->bytescount)
- bytescount -= rtd->bytescount;
- pos = do_div(bytescount, buffersize);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ period_bytes = frames_to_bytes(runtime, runtime->period_size);
+ dscr = acp_reg_read(rtd->acp_mmio, rtd->dma_curr_dscr);
+ if (dscr == rtd->dma_dscr_idx_1)
+ pos = period_bytes;
+ else
+ pos = 0;
+ bytescount = acp_get_byte_count(rtd);
+ if (bytescount > rtd->bytescount)
+ bytescount -= rtd->bytescount;
+ delay = do_div(bytescount, period_bytes);
+ runtime->delay = bytes_to_frames(runtime, delay);
+ } else {
+ buffersize = frames_to_bytes(runtime, runtime->buffer_size);
+ bytescount = acp_get_byte_count(rtd);
+ if (bytescount > rtd->bytescount)
+ bytescount -= rtd->bytescount;
+ pos = do_div(bytescount, buffersize);
+ }
return bytes_to_frames(runtime, pos);
}
@@ -953,16 +1045,24 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
+ u16 ch_acp_sysmem, ch_acp_i2s;
if (!rtd)
return -EINVAL;
+ if (rtd->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ ch_acp_sysmem = rtd->ch1;
+ ch_acp_i2s = rtd->ch2;
+ } else {
+ ch_acp_i2s = rtd->ch1;
+ ch_acp_sysmem = rtd->ch2;
+ }
config_acp_dma_channel(rtd->acp_mmio,
- rtd->ch1,
+ ch_acp_sysmem,
rtd->dma_dscr_idx_1,
NUM_DSCRS_PER_CHANNEL, 0);
config_acp_dma_channel(rtd->acp_mmio,
- rtd->ch2,
+ ch_acp_i2s,
rtd->dma_dscr_idx_2,
NUM_DSCRS_PER_CHANNEL, 0);
return 0;
@@ -971,7 +1071,6 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
{
int ret;
- u64 bytescount = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
@@ -982,37 +1081,32 @@ static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
- bytescount = acp_get_byte_count(rtd);
- if (rtd->bytescount == 0)
- rtd->bytescount = bytescount;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- acp_dma_start(rtd->acp_mmio, rtd->ch1);
- acp_dma_start(rtd->acp_mmio, rtd->ch2);
+ rtd->bytescount = acp_get_byte_count(rtd);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (rtd->capture_channel == CAP_CHANNEL0) {
+ acp_dma_cap_channel_disable(rtd->acp_mmio,
+ CAP_CHANNEL1);
+ acp_dma_cap_channel_enable(rtd->acp_mmio,
+ CAP_CHANNEL0);
+ }
+ if (rtd->capture_channel == CAP_CHANNEL1) {
+ acp_dma_cap_channel_disable(rtd->acp_mmio,
+ CAP_CHANNEL0);
+ acp_dma_cap_channel_enable(rtd->acp_mmio,
+ CAP_CHANNEL1);
+ }
+ acp_dma_start(rtd->acp_mmio, rtd->ch1, true);
} else {
- acp_dma_start(rtd->acp_mmio, rtd->ch2);
- acp_dma_start(rtd->acp_mmio, rtd->ch1);
+ acp_dma_start(rtd->acp_mmio, rtd->ch1, true);
+ acp_dma_start(rtd->acp_mmio, rtd->ch2, true);
}
ret = 0;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
- /* For playback, non circular dma should be stopped first
- * i.e Sysram to acp dma transfer channel(rtd->ch1) should be
- * stopped before stopping cirular dma which is acp sram to i2s
- * fifo dma transfer channel(rtd->ch2). Where as in Capture
- * scenario, i2s fifo to acp sram dma channel(rtd->ch2) stopped
- * first before stopping acp sram to sysram which is circular
- * dma(rtd->ch1).
- */
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- acp_dma_stop(rtd->acp_mmio, rtd->ch1);
- ret = acp_dma_stop(rtd->acp_mmio, rtd->ch2);
- } else {
- acp_dma_stop(rtd->acp_mmio, rtd->ch2);
- ret = acp_dma_stop(rtd->acp_mmio, rtd->ch1);
- }
- rtd->bytescount = 0;
+ acp_dma_stop(rtd->acp_mmio, rtd->ch2);
+ ret = acp_dma_stop(rtd->acp_mmio, rtd->ch1);
break;
default:
ret = -EINVAL;
diff --git a/sound/soc/amd/acp.h b/sound/soc/amd/acp.h
index 9cd3e96c84d4..be3963e8f4fa 100644
--- a/sound/soc/amd/acp.h
+++ b/sound/soc/amd/acp.h
@@ -55,6 +55,8 @@
#define I2S_SP_INSTANCE 0x01
#define I2S_BT_INSTANCE 0x02
+#define CAP_CHANNEL0 0x00
+#define CAP_CHANNEL1 0x01
#define ACP_TILE_ON_MASK 0x03
#define ACP_TILE_OFF_MASK 0x02
@@ -72,16 +74,16 @@
#define ACP_TO_I2S_DMA_CH_NUM 13
/* Capture DMA channels */
-#define ACP_TO_SYSRAM_CH_NUM 14
-#define I2S_TO_ACP_DMA_CH_NUM 15
+#define I2S_TO_ACP_DMA_CH_NUM 14
+#define ACP_TO_SYSRAM_CH_NUM 15
/* Playback DMA Channels for I2S BT instance */
#define SYSRAM_TO_ACP_BT_INSTANCE_CH_NUM 8
#define ACP_TO_I2S_DMA_BT_INSTANCE_CH_NUM 9
/* Capture DMA Channels for I2S BT Instance */
-#define ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM 10
-#define I2S_TO_ACP_DMA_BT_INSTANCE_CH_NUM 11
+#define I2S_TO_ACP_DMA_BT_INSTANCE_CH_NUM 10
+#define ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM 11
#define NUM_DSCRS_PER_CHANNEL 2
@@ -125,6 +127,7 @@ struct audio_substream_data {
unsigned int order;
u16 num_of_pages;
u16 i2s_instance;
+ u16 capture_channel;
u16 direction;
u16 ch1;
u16 ch2;
@@ -135,6 +138,7 @@ struct audio_substream_data {
u32 sram_bank;
u32 byte_cnt_high_reg_offset;
u32 byte_cnt_low_reg_offset;
+ u32 dma_curr_dscr;
uint64_t size;
u64 bytescount;
void __iomem *acp_mmio;
@@ -155,6 +159,7 @@ struct audio_drv_data {
*/
struct acp_platform_info {
u16 i2s_instance;
+ u16 capture_channel;
};
union acp_dma_count {
diff --git a/sound/soc/atmel/atmel-i2s.c b/sound/soc/atmel/atmel-i2s.c
index 5d3b5af9fd92..d88c1d995036 100644
--- a/sound/soc/atmel/atmel-i2s.c
+++ b/sound/soc/atmel/atmel-i2s.c
@@ -206,7 +206,6 @@ struct atmel_i2s_dev {
struct regmap *regmap;
struct clk *pclk;
struct clk *gclk;
- struct clk *aclk;
struct snd_dmaengine_dai_dma_data playback;
struct snd_dmaengine_dai_dma_data capture;
unsigned int fmt;
@@ -303,7 +302,7 @@ static int atmel_i2s_get_gck_param(struct atmel_i2s_dev *dev, int fs)
{
int i, best;
- if (!dev->gclk || !dev->aclk) {
+ if (!dev->gclk) {
dev_err(dev->dev, "cannot generate the I2S Master Clock\n");
return -EINVAL;
}
@@ -421,7 +420,7 @@ static int atmel_i2s_switch_mck_generator(struct atmel_i2s_dev *dev,
bool enabled)
{
unsigned int mr, mr_mask;
- unsigned long aclk_rate;
+ unsigned long gclk_rate;
int ret;
mr = 0;
@@ -445,35 +444,18 @@ static int atmel_i2s_switch_mck_generator(struct atmel_i2s_dev *dev,
/* Disable/unprepare the PMC generated clock. */
clk_disable_unprepare(dev->gclk);
- /* Disable/unprepare the PLL audio clock. */
- clk_disable_unprepare(dev->aclk);
return 0;
}
if (!dev->gck_param)
return -EINVAL;
- aclk_rate = dev->gck_param->mck * (dev->gck_param->imckdiv + 1);
+ gclk_rate = dev->gck_param->mck * (dev->gck_param->imckdiv + 1);
- /* Fist change the PLL audio clock frequency ... */
- ret = clk_set_rate(dev->aclk, aclk_rate);
+ ret = clk_set_rate(dev->gclk, gclk_rate);
if (ret)
return ret;
- /*
- * ... then set the PMC generated clock rate to the very same frequency
- * to set the gclk parent to aclk.
- */
- ret = clk_set_rate(dev->gclk, aclk_rate);
- if (ret)
- return ret;
-
- /* Prepare and enable the PLL audio clock first ... */
- ret = clk_prepare_enable(dev->aclk);
- if (ret)
- return ret;
-
- /* ... then prepare and enable the PMC generated clock. */
ret = clk_prepare_enable(dev->gclk);
if (ret)
return ret;
@@ -668,28 +650,14 @@ static int atmel_i2s_probe(struct platform_device *pdev)
return err;
}
- /* Get audio clocks to generate the I2S Master Clock (I2S_MCK) */
- dev->aclk = devm_clk_get(&pdev->dev, "aclk");
+ /* Get audio clock to generate the I2S Master Clock (I2S_MCK) */
dev->gclk = devm_clk_get(&pdev->dev, "gclk");
- if (IS_ERR(dev->aclk) && IS_ERR(dev->gclk)) {
- if (PTR_ERR(dev->aclk) == -EPROBE_DEFER ||
- PTR_ERR(dev->gclk) == -EPROBE_DEFER)
+ if (IS_ERR(dev->gclk)) {
+ if (PTR_ERR(dev->gclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
/* Master Mode not supported */
- dev->aclk = NULL;
dev->gclk = NULL;
- } else if (IS_ERR(dev->gclk)) {
- err = PTR_ERR(dev->gclk);
- dev_err(&pdev->dev,
- "failed to get the PMC generated clock: %d\n", err);
- return err;
- } else if (IS_ERR(dev->aclk)) {
- err = PTR_ERR(dev->aclk);
- dev_err(&pdev->dev,
- "failed to get the PLL audio clock: %d\n", err);
- return err;
}
-
dev->dev = &pdev->dev;
dev->regmap = regmap;
platform_set_drvdata(pdev, dev);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 63cf62e9c9aa..efb095dbcd71 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -74,12 +74,12 @@ config SND_SOC_ALL_CODECS
select SND_SOC_DA7219 if I2C
select SND_SOC_DA732X if I2C
select SND_SOC_DA9055 if I2C
- select SND_SOC_DIO2125
select SND_SOC_DMIC if GPIOLIB
select SND_SOC_ES8316 if I2C
select SND_SOC_ES8328_SPI if SPI_MASTER
select SND_SOC_ES8328_I2C if I2C
select SND_SOC_ES7134
+ select SND_SOC_ES7241
select SND_SOC_GTM601
select SND_SOC_HDAC_HDMI
select SND_SOC_ICS43432
@@ -141,8 +141,10 @@ config SND_SOC_ALL_CODECS
select SND_SOC_RT5668 if I2C
select SND_SOC_RT5670 if I2C
select SND_SOC_RT5677 if I2C && SPI_MASTER
+ select SND_SOC_RT5682 if I2C
select SND_SOC_SGTL5000 if I2C
select SND_SOC_SI476X if MFD_SI476X_CORE
+ select SND_SOC_SIMPLE_AMPLIFIER
select SND_SOC_SIRF_AUDIO_CODEC
select SND_SOC_SPDIF
select SND_SOC_SSM2305
@@ -572,10 +574,6 @@ config SND_SOC_DA732X
config SND_SOC_DA9055
tristate
-config SND_SOC_DIO2125
- tristate "Dioo DIO2125 Amplifier"
- select GPIOLIB
-
config SND_SOC_DMIC
tristate
@@ -588,6 +586,9 @@ config SND_SOC_HDMI_CODEC
config SND_SOC_ES7134
tristate "Everest Semi ES7134 CODEC"
+config SND_SOC_ES7241
+ tristate "Everest Semi ES7241 CODEC"
+
config SND_SOC_ES8316
tristate "Everest Semi ES8316 CODEC"
depends on I2C
@@ -778,6 +779,7 @@ config SND_SOC_RL6231
default y if SND_SOC_RT5668=y
default y if SND_SOC_RT5670=y
default y if SND_SOC_RT5677=y
+ default y if SND_SOC_RT5682=y
default y if SND_SOC_RT1305=y
default m if SND_SOC_RT5514=m
default m if SND_SOC_RT5616=m
@@ -791,6 +793,7 @@ config SND_SOC_RL6231
default m if SND_SOC_RT5668=m
default m if SND_SOC_RT5670=m
default m if SND_SOC_RT5677=m
+ default m if SND_SOC_RT5682=m
default m if SND_SOC_RT1305=m
config SND_SOC_RL6347A
@@ -871,6 +874,9 @@ config SND_SOC_RT5677_SPI
tristate
default SND_SOC_RT5677 && SPI
+config SND_SOC_RT5682
+ tristate
+
#Freescale sgtl5000 codec
config SND_SOC_SGTL5000
tristate "Freescale SGTL5000 CODEC"
@@ -891,6 +897,10 @@ config SND_SOC_SIGMADSP_REGMAP
tristate
select SND_SOC_SIGMADSP
+config SND_SOC_SIMPLE_AMPLIFIER
+ tristate "Simple Audio Amplifier"
+ select GPIOLIB
+
config SND_SOC_SIRF_AUDIO_CODEC
tristate "SiRF SoC internal audio codec"
select REGMAP_MMIO
@@ -953,8 +963,11 @@ config SND_SOC_TAS5086
depends on I2C
config SND_SOC_TAS571X
- tristate "Texas Instruments TAS5711/TAS5717/TAS5719/TAS5721 power amplifiers"
+ tristate "Texas Instruments TAS571x power amplifiers"
depends on I2C
+ help
+ Enable support for Texas Instruments TAS5707, TAS5711, TAS5717,
+ TAS5719 and TAS5721 power amplifiers
config SND_SOC_TAS5720
tristate "Texas Instruments TAS5720 Mono Audio amplifier"
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index e023fdf85221..7ae7c85e8219 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -71,6 +71,7 @@ snd-soc-da732x-objs := da732x.o
snd-soc-da9055-objs := da9055.o
snd-soc-dmic-objs := dmic.o
snd-soc-es7134-objs := es7134.o
+snd-soc-es7241-objs := es7241.o
snd-soc-es8316-objs := es8316.o
snd-soc-es8328-objs := es8328.o
snd-soc-es8328-i2c-objs := es8328-i2c.o
@@ -146,6 +147,7 @@ snd-soc-rt5668-objs := rt5668.o
snd-soc-rt5670-objs := rt5670.o
snd-soc-rt5677-objs := rt5677.o
snd-soc-rt5677-spi-objs := rt5677-spi.o
+snd-soc-rt5682-objs := rt5682.o
snd-soc-sgtl5000-objs := sgtl5000.o
snd-soc-alc5623-objs := alc5623.o
snd-soc-alc5632-objs := alc5632.o
@@ -249,9 +251,9 @@ snd-soc-wm9713-objs := wm9713.o
snd-soc-wm-hubs-objs := wm_hubs.o
snd-soc-zx-aud96p22-objs := zx_aud96p22.o
# Amp
-snd-soc-dio2125-objs := dio2125.o
snd-soc-max9877-objs := max9877.o
snd-soc-max98504-objs := max98504.o
+snd-soc-simple-amplifier-objs := simple-amplifier.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
snd-soc-tas2552-objs := tas2552.o
@@ -329,6 +331,7 @@ obj-$(CONFIG_SND_SOC_DA732X) += snd-soc-da732x.o
obj-$(CONFIG_SND_SOC_DA9055) += snd-soc-da9055.o
obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o
obj-$(CONFIG_SND_SOC_ES7134) += snd-soc-es7134.o
+obj-$(CONFIG_SND_SOC_ES7241) += snd-soc-es7241.o
obj-$(CONFIG_SND_SOC_ES8316) += snd-soc-es8316.o
obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o
obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
@@ -405,6 +408,7 @@ obj-$(CONFIG_SND_SOC_RT5668) += snd-soc-rt5668.o
obj-$(CONFIG_SND_SOC_RT5670) += snd-soc-rt5670.o
obj-$(CONFIG_SND_SOC_RT5677) += snd-soc-rt5677.o
obj-$(CONFIG_SND_SOC_RT5677_SPI) += snd-soc-rt5677-spi.o
+obj-$(CONFIG_SND_SOC_RT5682) += snd-soc-rt5682.o
obj-$(CONFIG_SND_SOC_SGTL5000) += snd-soc-sgtl5000.o
obj-$(CONFIG_SND_SOC_SIGMADSP) += snd-soc-sigmadsp.o
obj-$(CONFIG_SND_SOC_SIGMADSP_I2C) += snd-soc-sigmadsp-i2c.o
@@ -507,7 +511,7 @@ obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
obj-$(CONFIG_SND_SOC_ZX_AUD96P22) += snd-soc-zx-aud96p22.o
# Amp
-obj-$(CONFIG_SND_SOC_DIO2125) += snd-soc-dio2125.o
obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o
obj-$(CONFIG_SND_SOC_MAX98504) += snd-soc-max98504.o
+obj-$(CONFIG_SND_SOC_SIMPLE_AMPLIFIER) += snd-soc-simple-amplifier.o
obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index ae41edd1c406..57169b8ff14e 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -299,6 +299,7 @@ static const struct snd_soc_dapm_route adau17x1_dsp_dapm_routes[] = {
{ "DSP", NULL, "Left Decimator" },
{ "DSP", NULL, "Right Decimator" },
+ { "DSP", NULL, "Playback" },
};
static const struct snd_soc_dapm_route adau17x1_no_dsp_dapm_routes[] = {
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index db21ecbe0762..8b9ca7e7a682 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -648,6 +648,7 @@ static int adav80x_set_pll(struct snd_soc_component *component, int pll_id,
pll_ctrl1 |= ADAV80X_PLL_CTRL1_PLLDIV;
break;
}
+ /* fall through */
default:
return -EINVAL;
}
diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c
index 31ec0ba2e639..299ada4dfaa0 100644
--- a/sound/soc/codecs/ak4458.c
+++ b/sound/soc/codecs/ak4458.c
@@ -558,7 +558,7 @@ static int __maybe_unused ak4458_runtime_resume(struct device *dev)
}
#endif /* CONFIG_PM */
-struct snd_soc_component_driver soc_codec_dev_ak4458 = {
+static const struct snd_soc_component_driver soc_codec_dev_ak4458 = {
.probe = ak4458_probe,
.remove = ak4458_remove,
.controls = ak4458_snd_controls,
diff --git a/sound/soc/codecs/ak4554.c b/sound/soc/codecs/ak4554.c
index b7ee13406d93..2fa83a1a84cf 100644
--- a/sound/soc/codecs/ak4554.c
+++ b/sound/soc/codecs/ak4554.c
@@ -1,13 +1,8 @@
-/*
- * ak4554.c
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+// ak4554.c
+//
+// Copyright (C) 2013 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
#include <linux/module.h>
#include <sound/soc.h>
@@ -97,6 +92,6 @@ static struct platform_driver ak4554_driver = {
};
module_platform_driver(ak4554_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SoC AK4554 driver");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index 8523ff9351cf..c1181a20714d 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -1,18 +1,14 @@
-/*
- * ak4613.c -- Asahi Kasei ALSA Soc Audio driver
- *
- * Copyright (C) 2015 Renesas Electronics Corporation
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * Based on ak4642.c by Kuninori Morimoto
- * Based on wm8731.c by Richard Purdie
- * Based on ak4535.c by Richard Purdie
- * Based on wm8753.c by Liam Girdwood
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// ak4613.c -- Asahi Kasei ALSA Soc Audio driver
+//
+// Copyright (C) 2015 Renesas Electronics Corporation
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+//
+// Based on ak4642.c by Kuninori Morimoto
+// Based on wm8731.c by Richard Purdie
+// Based on ak4535.c by Richard Purdie
+// Based on wm8753.c by Liam Girdwood
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 605055964529..353237025514 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -1,17 +1,13 @@
-/*
- * ak4642.c -- AK4642/AK4643 ALSA Soc Audio driver
- *
- * Copyright (C) 2009 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on wm8731.c by Richard Purdie
- * Based on ak4535.c by Richard Purdie
- * Based on wm8753.c by Liam Girdwood
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// ak4642.c -- AK4642/AK4643 ALSA Soc Audio driver
+//
+// Copyright (C) 2009 Renesas Solutions Corp.
+// Kuninori Morimoto <morimoto.kuninori@renesas.com>
+//
+// Based on wm8731.c by Richard Purdie
+// Based on ak4535.c by Richard Purdie
+// Based on wm8753.c by Liam Girdwood
/* ** CAUTION **
*
@@ -709,4 +705,4 @@ module_i2c_driver(ak4642_i2c_driver);
MODULE_DESCRIPTION("Soc AK4642 driver");
MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
index f4ed5cc40661..448bb90c9c8e 100644
--- a/sound/soc/codecs/ak5558.c
+++ b/sound/soc/codecs/ak5558.c
@@ -322,13 +322,13 @@ static int __maybe_unused ak5558_runtime_resume(struct device *dev)
return regcache_sync(ak5558->regmap);
}
-const struct dev_pm_ops ak5558_pm = {
+static const struct dev_pm_ops ak5558_pm = {
SET_RUNTIME_PM_OPS(ak5558_runtime_suspend, ak5558_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
-struct snd_soc_component_driver soc_codec_dev_ak5558 = {
+static const struct snd_soc_component_driver soc_codec_dev_ak5558 = {
.probe = ak5558_probe,
.remove = ak5558_remove,
.controls = ak5558_snd_controls,
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 2a7a4168c072..3c266eeb89bf 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -219,7 +219,7 @@ static bool cs4270_reg_is_volatile(struct device *dev, unsigned int reg)
{
/* Unreadable registers are considered volatile */
if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
- return 1;
+ return true;
return reg == CS4270_CHIPID;
}
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 196e9c343aeb..45e50fe3bf25 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -235,6 +235,9 @@ ARIZONA_MIXER_CONTROLS("AIF2TX6", ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("AIF3TX1", ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("AIF3TX2", ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE),
+
+WM_ADSP_FW_CONTROL("DSP2", 1),
+WM_ADSP_FW_CONTROL("DSP3", 2),
};
ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
@@ -1283,6 +1286,12 @@ static int cs47l24_probe(struct platform_device *pdev)
return ret;
}
+ ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 1);
+ if (ret != 0)
+ dev_warn(&pdev->dev,
+ "Failed to set compressed IRQ as a wake source: %d\n",
+ ret);
+
arizona_init_common(arizona);
ret = arizona_init_vol_limit(arizona);
@@ -1306,6 +1315,7 @@ static int cs47l24_probe(struct platform_device *pdev)
err_spk_irqs:
arizona_free_spk_irqs(arizona);
err_dsp_irq:
+ arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 0);
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, cs47l24);
return ret;
@@ -1323,6 +1333,7 @@ static int cs47l24_remove(struct platform_device *pdev)
arizona_free_spk_irqs(arizona);
+ arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 0);
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, cs47l24);
return 0;
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 07dd33b09596..ab174b5114dc 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -362,8 +362,27 @@ static int cx20442_component_probe(struct snd_soc_component *component)
return -ENOMEM;
cx20442->por = regulator_get(component->dev, "POR");
- if (IS_ERR(cx20442->por))
- dev_warn(component->dev, "failed to get the regulator");
+ if (IS_ERR(cx20442->por)) {
+ int err = PTR_ERR(cx20442->por);
+
+ dev_warn(component->dev, "failed to get POR supply (%d)", err);
+ /*
+ * When running on a non-dt platform and requested regulator
+ * is not available, regulator_get() never returns
+ * -EPROBE_DEFER as it is not able to justify if the regulator
+ * may still appear later. On the other hand, the board can
+ * still set full constraints flag at late_initcall in order
+ * to instruct regulator_get() to return a dummy one if
+ * sufficient. Hence, if we get -ENODEV here, let's convert
+ * it to -EPROBE_DEFER and wait for the board to decide or
+ * let Deferred Probe infrastructure handle this error.
+ */
+ if (err == -ENODEV)
+ err = -EPROBE_DEFER;
+ kfree(cx20442);
+ return err;
+ }
+
cx20442->tty = NULL;
snd_soc_component_set_drvdata(component, cx20442);
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index a664111b7184..e172913d04a4 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -1,19 +1,14 @@
-/*
- * DA7210 ALSA Soc codec driver
- *
- * Copyright (c) 2009 Dialog Semiconductor
- * Written by David Chen <Dajun.chen@diasemi.com>
- *
- * Copyright (C) 2009 Renesas Solutions Corp.
- * Cleanups by Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Tested on SuperH Ecovec24 board with S16/S24 LE in 48KHz using I2S
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// DA7210 ALSA Soc codec driver
+//
+// Copyright (c) 2009 Dialog Semiconductor
+// Written by David Chen <Dajun.chen@diasemi.com>
+//
+// Copyright (C) 2009 Renesas Solutions Corp.
+// Cleanups by Kuninori Morimoto <morimoto.kuninori@renesas.com>
+//
+// Tested on SuperH Ecovec24 board with S16/S24 LE in 48KHz using I2S
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 54cb5f24969f..92d006a5283e 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1140,9 +1140,9 @@ static bool da7213_volatile_register(struct device *dev, unsigned int reg)
case DA7213_ALC_OFFSET_AUTO_M_R:
case DA7213_ALC_OFFSET_AUTO_U_R:
case DA7213_ALC_CIC_OP_LVL_DATA:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index a49ab751a036..2c7d5088e6f2 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -59,6 +59,7 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
container_of(work, struct da7219_aad_priv, btn_det_work);
struct snd_soc_component *component = da7219_aad->component;
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+ struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
u8 statusa, micbias_ctrl;
bool micbias_up = false;
int retries = 0;
@@ -86,6 +87,8 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
if (retries >= DA7219_AAD_MICBIAS_CHK_RETRIES)
dev_warn(component->dev, "Mic bias status check timed out");
+ da7219->micbias_on_event = true;
+
/*
* Mic bias pulse required to enable mic, must be done before enabling
* button detection to prevent erroneous button readings.
@@ -439,6 +442,8 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
snd_soc_component_update_bits(component, DA7219_ACCDET_CONFIG_1,
DA7219_BUTTON_CONFIG_MASK, 0);
+ da7219->micbias_on_event = false;
+
/* Disable mic bias */
snd_soc_dapm_disable_pin(dapm, "Mic Bias");
snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 980a6a8bf56d..e46e9f4bc994 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -768,6 +768,30 @@ static const struct snd_kcontrol_new da7219_st_out_filtr_mix_controls[] = {
* DAPM Events
*/
+static int da7219_mic_pga_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (da7219->micbias_on_event) {
+ /*
+ * Delay only for first capture after bias enabled to
+ * avoid possible DC offset related noise.
+ */
+ da7219->micbias_on_event = false;
+ msleep(da7219->mic_pga_delay);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int da7219_dai_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -937,12 +961,12 @@ static const struct snd_soc_dapm_widget da7219_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("MIC"),
/* Input PGAs */
- SND_SOC_DAPM_PGA("Mic PGA", DA7219_MIC_1_CTRL,
- DA7219_MIC_1_AMP_EN_SHIFT, DA7219_NO_INVERT,
- NULL, 0),
- SND_SOC_DAPM_PGA("Mixin PGA", DA7219_MIXIN_L_CTRL,
- DA7219_MIXIN_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
- NULL, 0),
+ SND_SOC_DAPM_PGA_E("Mic PGA", DA7219_MIC_1_CTRL,
+ DA7219_MIC_1_AMP_EN_SHIFT, DA7219_NO_INVERT,
+ NULL, 0, da7219_mic_pga_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PGA_E("Mixin PGA", DA7219_MIXIN_L_CTRL,
+ DA7219_MIXIN_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
+ NULL, 0, da7219_settling_event, SND_SOC_DAPM_POST_PMU),
/* Input Filters */
SND_SOC_DAPM_ADC("ADC", NULL, DA7219_ADC_L_CTRL, DA7219_ADC_L_EN_SHIFT,
@@ -1847,6 +1871,14 @@ static void da7219_handle_pdata(struct snd_soc_component *component)
snd_soc_component_write(component, DA7219_MICBIAS_CTRL, micbias_lvl);
+ /*
+ * Calculate delay required to compensate for DC offset in
+ * Mic PGA, based on Mic Bias voltage.
+ */
+ da7219->mic_pga_delay = DA7219_MIC_PGA_BASE_DELAY +
+ (pdata->micbias_lvl *
+ DA7219_MIC_PGA_OFFSET_DELAY);
+
/* Mic */
switch (pdata->mic_amp_in_sel) {
case DA7219_MIC_AMP_IN_SEL_DIFF:
@@ -2143,9 +2175,9 @@ static bool da7219_volatile_register(struct device *dev, unsigned int reg)
case DA7219_ACCDET_IRQ_EVENT_B:
case DA7219_ACCDET_CONFIG_8:
case DA7219_SYSTEM_STATUS:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
diff --git a/sound/soc/codecs/da7219.h b/sound/soc/codecs/da7219.h
index 1b00023e33cd..3a006862f0e7 100644
--- a/sound/soc/codecs/da7219.h
+++ b/sound/soc/codecs/da7219.h
@@ -781,8 +781,10 @@
#define DA7219_SYS_STAT_CHECK_DELAY 50
/* Power up/down Delays */
-#define DA7219_SETTLING_DELAY 40
-#define DA7219_MIN_GAIN_DELAY 30
+#define DA7219_SETTLING_DELAY 40
+#define DA7219_MIN_GAIN_DELAY 30
+#define DA7219_MIC_PGA_BASE_DELAY 100
+#define DA7219_MIC_PGA_OFFSET_DELAY 40
enum da7219_clk_src {
DA7219_CLKSRC_MCLK = 0,
@@ -828,6 +830,8 @@ struct da7219_priv {
bool master;
bool alc_en;
+ bool micbias_on_event;
+ unsigned int mic_pga_delay;
u8 gain_ramp_ctrl;
};
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index afdf90c78884..f6a7bf9560e7 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -1041,9 +1041,9 @@ static bool da9055_volatile_register(struct device *dev,
case DA9055_HP_R_GAIN_STATUS:
case DA9055_LINE_GAIN_STATUS:
case DA9055_ALC_CIC_OP_LVL_DATA:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
diff --git a/sound/soc/codecs/es7134.c b/sound/soc/codecs/es7134.c
index 58515bb1a303..6d7bca7b78ca 100644
--- a/sound/soc/codecs/es7134.c
+++ b/sound/soc/codecs/es7134.c
@@ -17,6 +17,7 @@
* in the file called COPYING.
*/
+#include <linux/of_platform.h>
#include <linux/module.h>
#include <sound/soc.h>
@@ -24,6 +25,82 @@
* The everest 7134 is a very simple DA converter with no register
*/
+struct es7134_clock_mode {
+ unsigned int rate_min;
+ unsigned int rate_max;
+ unsigned int *mclk_fs;
+ unsigned int mclk_fs_num;
+};
+
+struct es7134_chip {
+ struct snd_soc_dai_driver *dai_drv;
+ const struct es7134_clock_mode *modes;
+ unsigned int mode_num;
+ const struct snd_soc_dapm_widget *extra_widgets;
+ unsigned int extra_widget_num;
+ const struct snd_soc_dapm_route *extra_routes;
+ unsigned int extra_route_num;
+};
+
+struct es7134_data {
+ unsigned int mclk;
+ const struct es7134_chip *chip;
+};
+
+static int es7134_check_mclk(struct snd_soc_dai *dai,
+ struct es7134_data *priv,
+ unsigned int rate)
+{
+ unsigned int mfs = priv->mclk / rate;
+ int i, j;
+
+ for (i = 0; i < priv->chip->mode_num; i++) {
+ const struct es7134_clock_mode *mode = &priv->chip->modes[i];
+
+ if (rate < mode->rate_min || rate > mode->rate_max)
+ continue;
+
+ for (j = 0; j < mode->mclk_fs_num; j++) {
+ if (mode->mclk_fs[j] == mfs)
+ return 0;
+ }
+
+ dev_err(dai->dev, "unsupported mclk_fs %u for rate %u\n",
+ mfs, rate);
+ return -EINVAL;
+ }
+
+ /* should not happen */
+ dev_err(dai->dev, "unsupported rate: %u\n", rate);
+ return -EINVAL;
+}
+
+static int es7134_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct es7134_data *priv = snd_soc_dai_get_drvdata(dai);
+
+ /* mclk has not been provided, assume it is OK */
+ if (!priv->mclk)
+ return 0;
+
+ return es7134_check_mclk(dai, priv, params_rate(params));
+}
+
+static int es7134_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct es7134_data *priv = snd_soc_dai_get_drvdata(dai);
+
+ if (dir == SND_SOC_CLOCK_IN && clk_id == 0) {
+ priv->mclk = freq;
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
static int es7134_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
fmt &= (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_INV_MASK |
@@ -38,8 +115,38 @@ static int es7134_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
return 0;
}
+static int es7134_component_probe(struct snd_soc_component *c)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(c);
+ struct es7134_data *priv = snd_soc_component_get_drvdata(c);
+ const struct es7134_chip *chip = priv->chip;
+ int ret;
+
+ if (chip->extra_widget_num) {
+ ret = snd_soc_dapm_new_controls(dapm, chip->extra_widgets,
+ chip->extra_widget_num);
+ if (ret) {
+ dev_err(c->dev, "failed to add extra widgets\n");
+ return ret;
+ }
+ }
+
+ if (chip->extra_route_num) {
+ ret = snd_soc_dapm_add_routes(dapm, chip->extra_routes,
+ chip->extra_route_num);
+ if (ret) {
+ dev_err(c->dev, "failed to add extra routes\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dai_ops es7134_dai_ops = {
.set_fmt = es7134_set_fmt,
+ .hw_params = es7134_hw_params,
+ .set_sysclk = es7134_set_sysclk,
};
static struct snd_soc_dai_driver es7134_dai = {
@@ -48,7 +155,11 @@ static struct snd_soc_dai_driver es7134_dai = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000,
+ .rates = (SNDRV_PCM_RATE_8000_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S18_3LE |
SNDRV_PCM_FMTBIT_S20_3LE |
@@ -58,18 +169,56 @@ static struct snd_soc_dai_driver es7134_dai = {
.ops = &es7134_dai_ops,
};
+static const struct es7134_clock_mode es7134_modes[] = {
+ {
+ /* Single speed mode */
+ .rate_min = 8000,
+ .rate_max = 50000,
+ .mclk_fs = (unsigned int[]) { 256, 384, 512, 768, 1024 },
+ .mclk_fs_num = 5,
+ }, {
+ /* Double speed mode */
+ .rate_min = 84000,
+ .rate_max = 100000,
+ .mclk_fs = (unsigned int[]) { 128, 192, 256, 384, 512 },
+ .mclk_fs_num = 5,
+ }, {
+ /* Quad speed mode */
+ .rate_min = 167000,
+ .rate_max = 192000,
+ .mclk_fs = (unsigned int[]) { 128, 192, 256 },
+ .mclk_fs_num = 3,
+ },
+};
+
+/* Digital I/O are also supplied by VDD on the es7134 */
+static const struct snd_soc_dapm_route es7134_extra_routes[] = {
+ { "Playback", NULL, "VDD", }
+};
+
+static const struct es7134_chip es7134_chip = {
+ .dai_drv = &es7134_dai,
+ .modes = es7134_modes,
+ .mode_num = ARRAY_SIZE(es7134_modes),
+ .extra_routes = es7134_extra_routes,
+ .extra_route_num = ARRAY_SIZE(es7134_extra_routes),
+};
+
static const struct snd_soc_dapm_widget es7134_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("AOUTL"),
SND_SOC_DAPM_OUTPUT("AOUTR"),
SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("VDD", 0, 0),
};
static const struct snd_soc_dapm_route es7134_dapm_routes[] = {
{ "AOUTL", NULL, "DAC" },
{ "AOUTR", NULL, "DAC" },
+ { "DAC", NULL, "VDD" },
};
static const struct snd_soc_component_driver es7134_component_driver = {
+ .probe = es7134_component_probe,
.dapm_widgets = es7134_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(es7134_dapm_widgets),
.dapm_routes = es7134_dapm_routes,
@@ -80,17 +229,87 @@ static const struct snd_soc_component_driver es7134_component_driver = {
.non_legacy_dai_naming = 1,
};
+static struct snd_soc_dai_driver es7154_dai = {
+ .name = "es7154-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = (SNDRV_PCM_RATE_8000_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S18_3LE |
+ SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ },
+ .ops = &es7134_dai_ops,
+};
+
+static const struct es7134_clock_mode es7154_modes[] = {
+ {
+ /* Single speed mode */
+ .rate_min = 8000,
+ .rate_max = 50000,
+ .mclk_fs = (unsigned int[]) { 32, 64, 128, 192, 256,
+ 384, 512, 768, 1024 },
+ .mclk_fs_num = 9,
+ }, {
+ /* Double speed mode */
+ .rate_min = 84000,
+ .rate_max = 100000,
+ .mclk_fs = (unsigned int[]) { 128, 192, 256, 384, 512,
+ 768, 1024},
+ .mclk_fs_num = 7,
+ }
+};
+
+/* Es7154 has a separate supply for digital I/O */
+static const struct snd_soc_dapm_widget es7154_extra_widgets[] = {
+ SND_SOC_DAPM_REGULATOR_SUPPLY("PVDD", 0, 0),
+};
+
+static const struct snd_soc_dapm_route es7154_extra_routes[] = {
+ { "Playback", NULL, "PVDD", }
+};
+
+static const struct es7134_chip es7154_chip = {
+ .dai_drv = &es7154_dai,
+ .modes = es7154_modes,
+ .mode_num = ARRAY_SIZE(es7154_modes),
+ .extra_routes = es7154_extra_routes,
+ .extra_route_num = ARRAY_SIZE(es7154_extra_routes),
+ .extra_widgets = es7154_extra_widgets,
+ .extra_widget_num = ARRAY_SIZE(es7154_extra_widgets),
+};
+
static int es7134_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct es7134_data *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->chip = of_device_get_match_data(dev);
+ if (!priv->chip) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
return devm_snd_soc_register_component(&pdev->dev,
&es7134_component_driver,
- &es7134_dai, 1);
+ priv->chip->dai_drv, 1);
}
#ifdef CONFIG_OF
static const struct of_device_id es7134_ids[] = {
- { .compatible = "everest,es7134", },
- { .compatible = "everest,es7144", },
+ { .compatible = "everest,es7134", .data = &es7134_chip },
+ { .compatible = "everest,es7144", .data = &es7134_chip },
+ { .compatible = "everest,es7154", .data = &es7154_chip },
{ }
};
MODULE_DEVICE_TABLE(of, es7134_ids);
diff --git a/sound/soc/codecs/es7241.c b/sound/soc/codecs/es7241.c
new file mode 100644
index 000000000000..87991bd4acef
--- /dev/null
+++ b/sound/soc/codecs/es7241.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/gpio/consumer.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+
+struct es7241_clock_mode {
+ unsigned int rate_min;
+ unsigned int rate_max;
+ unsigned int *slv_mfs;
+ unsigned int slv_mfs_num;
+ unsigned int mst_mfs;
+ unsigned int mst_m0:1;
+ unsigned int mst_m1:1;
+};
+
+struct es7241_chip {
+ const struct es7241_clock_mode *modes;
+ unsigned int mode_num;
+};
+
+struct es7241_data {
+ struct gpio_desc *reset;
+ struct gpio_desc *m0;
+ struct gpio_desc *m1;
+ unsigned int fmt;
+ unsigned int mclk;
+ bool is_slave;
+ const struct es7241_chip *chip;
+};
+
+static void es7241_set_mode(struct es7241_data *priv, int m0, int m1)
+{
+ /* put the device in reset */
+ gpiod_set_value_cansleep(priv->reset, 0);
+
+ /* set the mode */
+ gpiod_set_value_cansleep(priv->m0, m0);
+ gpiod_set_value_cansleep(priv->m1, m1);
+
+ /* take the device out of reset - datasheet does not specify a delay */
+ gpiod_set_value_cansleep(priv->reset, 1);
+}
+
+static int es7241_set_slave_mode(struct es7241_data *priv,
+ const struct es7241_clock_mode *mode,
+ unsigned int mfs)
+{
+ int j;
+
+ if (!mfs)
+ goto out_ok;
+
+ for (j = 0; j < mode->slv_mfs_num; j++) {
+ if (mode->slv_mfs[j] == mfs)
+ goto out_ok;
+ }
+
+ return -EINVAL;
+
+out_ok:
+ es7241_set_mode(priv, 1, 1);
+ return 0;
+}
+
+static int es7241_set_master_mode(struct es7241_data *priv,
+ const struct es7241_clock_mode *mode,
+ unsigned int mfs)
+{
+ /*
+ * We can't really set clock ratio, if the mclk/lrclk is different
+ * from what we provide, then error out
+ */
+ if (mfs && mfs != mode->mst_mfs)
+ return -EINVAL;
+
+ es7241_set_mode(priv, mode->mst_m0, mode->mst_m1);
+
+ return 0;
+}
+
+static int es7241_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct es7241_data *priv = snd_soc_dai_get_drvdata(dai);
+ unsigned int rate = params_rate(params);
+ unsigned int mfs = priv->mclk / rate;
+ int i;
+
+ for (i = 0; i < priv->chip->mode_num; i++) {
+ const struct es7241_clock_mode *mode = &priv->chip->modes[i];
+
+ if (rate < mode->rate_min || rate >= mode->rate_max)
+ continue;
+
+ if (priv->is_slave)
+ return es7241_set_slave_mode(priv, mode, mfs);
+ else
+ return es7241_set_master_mode(priv, mode, mfs);
+ }
+
+ /* should not happen */
+ dev_err(dai->dev, "unsupported rate: %u\n", rate);
+ return -EINVAL;
+}
+
+static int es7241_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct es7241_data *priv = snd_soc_dai_get_drvdata(dai);
+
+ if (dir == SND_SOC_CLOCK_IN && clk_id == 0) {
+ priv->mclk = freq;
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int es7241_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct es7241_data *priv = snd_soc_dai_get_drvdata(dai);
+
+ if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) {
+ dev_err(dai->dev, "Unsupported dai clock inversion\n");
+ return -EINVAL;
+ }
+
+ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != priv->fmt) {
+ dev_err(dai->dev, "Invalid dai format\n");
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ priv->is_slave = true;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ priv->is_slave = false;
+ break;
+
+ default:
+ dev_err(dai->dev, "Unsupported clock configuration\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops es7241_dai_ops = {
+ .set_fmt = es7241_set_fmt,
+ .hw_params = es7241_hw_params,
+ .set_sysclk = es7241_set_sysclk,
+};
+
+static struct snd_soc_dai_driver es7241_dai = {
+ .name = "es7241-hifi",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ },
+ .ops = &es7241_dai_ops,
+};
+
+static const struct es7241_clock_mode es7241_modes[] = {
+ {
+ /* Single speed mode */
+ .rate_min = 8000,
+ .rate_max = 50000,
+ .slv_mfs = (unsigned int[]) { 256, 384, 512, 768, 1024 },
+ .slv_mfs_num = 5,
+ .mst_mfs = 256,
+ .mst_m0 = 0,
+ .mst_m1 = 0,
+ }, {
+ /* Double speed mode */
+ .rate_min = 50000,
+ .rate_max = 100000,
+ .slv_mfs = (unsigned int[]) { 128, 192 },
+ .slv_mfs_num = 2,
+ .mst_mfs = 128,
+ .mst_m0 = 1,
+ .mst_m1 = 0,
+ }, {
+ /* Quad speed mode */
+ .rate_min = 100000,
+ .rate_max = 200000,
+ .slv_mfs = (unsigned int[]) { 64 },
+ .slv_mfs_num = 1,
+ .mst_mfs = 64,
+ .mst_m0 = 0,
+ .mst_m1 = 1,
+ },
+};
+
+static const struct es7241_chip es7241_chip = {
+ .modes = es7241_modes,
+ .mode_num = ARRAY_SIZE(es7241_modes),
+};
+
+static const struct snd_soc_dapm_widget es7241_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("AINL"),
+ SND_SOC_DAPM_INPUT("AINR"),
+ SND_SOC_DAPM_DAC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("VDDP", 0, 0),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("VDDD", 0, 0),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("VDDA", 0, 0),
+};
+
+static const struct snd_soc_dapm_route es7241_dapm_routes[] = {
+ { "ADC", NULL, "AINL", },
+ { "ADC", NULL, "AINR", },
+ { "ADC", NULL, "VDDA", },
+ { "Capture", NULL, "VDDP", },
+ { "Capture", NULL, "VDDD", },
+};
+
+static const struct snd_soc_component_driver es7241_component_driver = {
+ .dapm_widgets = es7241_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(es7241_dapm_widgets),
+ .dapm_routes = es7241_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(es7241_dapm_routes),
+ .idle_bias_on = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static void es7241_parse_fmt(struct device *dev, struct es7241_data *priv)
+{
+ bool is_leftj;
+
+ /*
+ * The format is given by a pull resistor on the SDOUT pin:
+ * pull-up for i2s, pull-down for left justified.
+ */
+ is_leftj = of_property_read_bool(dev->of_node,
+ "everest,sdout-pull-down");
+ if (is_leftj)
+ priv->fmt = SND_SOC_DAIFMT_LEFT_J;
+ else
+ priv->fmt = SND_SOC_DAIFMT_I2S;
+}
+
+static int es7241_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct es7241_data *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->chip = of_device_get_match_data(dev);
+ if (!priv->chip) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
+ es7241_parse_fmt(dev, priv);
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ err = PTR_ERR(priv->reset);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get 'reset' gpio: %d", err);
+ return err;
+ }
+
+ priv->m0 = devm_gpiod_get_optional(dev, "m0", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->m0)) {
+ err = PTR_ERR(priv->m0);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get 'm0' gpio: %d", err);
+ return err;
+ }
+
+ priv->m1 = devm_gpiod_get_optional(dev, "m1", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->m1)) {
+ err = PTR_ERR(priv->m1);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get 'm1' gpio: %d", err);
+ return err;
+ }
+
+ return devm_snd_soc_register_component(&pdev->dev,
+ &es7241_component_driver,
+ &es7241_dai, 1);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id es7241_ids[] = {
+ { .compatible = "everest,es7241", .data = &es7241_chip },
+ { }
+};
+MODULE_DEVICE_TABLE(of, es7241_ids);
+#endif
+
+static struct platform_driver es7241_driver = {
+ .driver = {
+ .name = "es7241",
+ .of_match_table = of_match_ptr(es7241_ids),
+ },
+ .probe = es7241_probe,
+};
+
+module_platform_driver(es7241_driver);
+
+MODULE_DESCRIPTION("ASoC ES7241 audio codec driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 84f7a7a36e4b..7b8533abf637 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -85,7 +85,7 @@ struct hdac_hdmi_pin {
bool mst_capable;
struct hdac_hdmi_port *ports;
int num_ports;
- struct hdac_ext_device *edev;
+ struct hdac_device *hdev;
};
struct hdac_hdmi_port {
@@ -126,6 +126,9 @@ struct hdac_hdmi_drv_data {
};
struct hdac_hdmi_priv {
+ struct hdac_device *hdev;
+ struct snd_soc_component *component;
+ struct snd_card *card;
struct hdac_hdmi_dai_port_map dai_map[HDA_MAX_CVTS];
struct list_head pin_list;
struct list_head cvt_list;
@@ -139,7 +142,7 @@ struct hdac_hdmi_priv {
struct snd_soc_dai_driver *dai_drv;
};
-#define hdev_to_hdmi_priv(_hdev) ((to_ehdac_device(_hdev))->private_data)
+#define hdev_to_hdmi_priv(_hdev) dev_get_drvdata(&(_hdev)->dev)
static struct hdac_hdmi_pcm *
hdac_hdmi_get_pcm_from_cvt(struct hdac_hdmi_priv *hdmi,
@@ -158,7 +161,7 @@ hdac_hdmi_get_pcm_from_cvt(struct hdac_hdmi_priv *hdmi,
static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
struct hdac_hdmi_port *port, bool is_connect)
{
- struct hdac_ext_device *edev = port->pin->edev;
+ struct hdac_device *hdev = port->pin->hdev;
if (is_connect)
snd_soc_dapm_enable_pin(port->dapm, port->jack_pin);
@@ -172,7 +175,7 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
* ports.
*/
if (pcm->jack_event == 0) {
- dev_dbg(&edev->hdev.dev,
+ dev_dbg(&hdev->dev,
"jack report for pcm=%d\n",
pcm->pcm_id);
snd_soc_jack_report(pcm->jack, SND_JACK_AVOUT,
@@ -198,19 +201,18 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
/*
* Get the no devices that can be connected to a port on the Pin widget.
*/
-static int hdac_hdmi_get_port_len(struct hdac_ext_device *edev, hda_nid_t nid)
+static int hdac_hdmi_get_port_len(struct hdac_device *hdev, hda_nid_t nid)
{
unsigned int caps;
unsigned int type, param;
- caps = get_wcaps(&edev->hdev, nid);
+ caps = get_wcaps(hdev, nid);
type = get_wcaps_type(caps);
if (!(caps & AC_WCAP_DIGITAL) || (type != AC_WID_PIN))
return 0;
- param = snd_hdac_read_parm_uncached(&edev->hdev, nid,
- AC_PAR_DEVLIST_LEN);
+ param = snd_hdac_read_parm_uncached(hdev, nid, AC_PAR_DEVLIST_LEN);
if (param == -1)
return param;
@@ -222,10 +224,10 @@ static int hdac_hdmi_get_port_len(struct hdac_ext_device *edev, hda_nid_t nid)
* id selected on the pin. Return 0 means the first port entry
* is selected or MST is not supported.
*/
-static int hdac_hdmi_port_select_get(struct hdac_ext_device *edev,
+static int hdac_hdmi_port_select_get(struct hdac_device *hdev,
struct hdac_hdmi_port *port)
{
- return snd_hdac_codec_read(&edev->hdev, port->pin->nid,
+ return snd_hdac_codec_read(hdev, port->pin->nid,
0, AC_VERB_GET_DEVICE_SEL, 0);
}
@@ -233,7 +235,7 @@ static int hdac_hdmi_port_select_get(struct hdac_ext_device *edev,
* Sets the selected port entry for the configuring Pin widget verb.
* returns error if port set is not equal to port get otherwise success
*/
-static int hdac_hdmi_port_select_set(struct hdac_ext_device *edev,
+static int hdac_hdmi_port_select_set(struct hdac_device *hdev,
struct hdac_hdmi_port *port)
{
int num_ports;
@@ -242,8 +244,7 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *edev,
return 0;
/* AC_PAR_DEVLIST_LEN is 0 based. */
- num_ports = hdac_hdmi_get_port_len(edev, port->pin->nid);
-
+ num_ports = hdac_hdmi_get_port_len(hdev, port->pin->nid);
if (num_ports < 0)
return -EIO;
/*
@@ -253,13 +254,13 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *edev,
if (num_ports + 1 < port->id)
return 0;
- snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
+ snd_hdac_codec_write(hdev, port->pin->nid, 0,
AC_VERB_SET_DEVICE_SEL, port->id);
- if (port->id != hdac_hdmi_port_select_get(edev, port))
+ if (port->id != hdac_hdmi_port_select_get(hdev, port))
return -EIO;
- dev_dbg(&edev->hdev.dev, "Selected the port=%d\n", port->id);
+ dev_dbg(&hdev->dev, "Selected the port=%d\n", port->id);
return 0;
}
@@ -277,13 +278,6 @@ static struct hdac_hdmi_pcm *get_hdmi_pcm_from_id(struct hdac_hdmi_priv *hdmi,
return NULL;
}
-static inline struct hdac_ext_device *to_hda_ext_device(struct device *dev)
-{
- struct hdac_device *hdev = dev_to_hdac_dev(dev);
-
- return to_ehdac_device(hdev);
-}
-
static unsigned int sad_format(const u8 *sad)
{
return ((sad[0] >> 0x3) & 0x1f);
@@ -324,15 +318,13 @@ format_constraint:
}
static void
-hdac_hdmi_set_dip_index(struct hdac_ext_device *edev, hda_nid_t pin_nid,
+hdac_hdmi_set_dip_index(struct hdac_device *hdev, hda_nid_t pin_nid,
int packet_index, int byte_index)
{
int val;
val = (packet_index << 5) | (byte_index & 0x1f);
-
- snd_hdac_codec_write(&edev->hdev, pin_nid, 0,
- AC_VERB_SET_HDMI_DIP_INDEX, val);
+ snd_hdac_codec_write(hdev, pin_nid, 0, AC_VERB_SET_HDMI_DIP_INDEX, val);
}
struct dp_audio_infoframe {
@@ -347,14 +339,14 @@ struct dp_audio_infoframe {
u8 LFEPBL01_LSV36_DM_INH7;
};
-static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *edev,
+static int hdac_hdmi_setup_audio_infoframe(struct hdac_device *hdev,
struct hdac_hdmi_pcm *pcm, struct hdac_hdmi_port *port)
{
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
struct hdmi_audio_infoframe frame;
struct hdac_hdmi_pin *pin = port->pin;
struct dp_audio_infoframe dp_ai;
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_cvt *cvt = pcm->cvt;
u8 *dip;
int ret;
@@ -363,11 +355,11 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *edev,
u8 conn_type;
int channels, ca;
- ca = snd_hdac_channel_allocation(&edev->hdev, port->eld.info.spk_alloc,
+ ca = snd_hdac_channel_allocation(hdev, port->eld.info.spk_alloc,
pcm->channels, pcm->chmap_set, true, pcm->chmap);
channels = snd_hdac_get_active_channels(ca);
- hdmi->chmap.ops.set_channel_count(&edev->hdev, cvt->nid, channels);
+ hdmi->chmap.ops.set_channel_count(hdev, cvt->nid, channels);
snd_hdac_setup_channel_mapping(&hdmi->chmap, pin->nid, false, ca,
pcm->channels, pcm->chmap, pcm->chmap_set);
@@ -400,32 +392,31 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *edev,
break;
default:
- dev_err(&edev->hdev.dev, "Invalid connection type: %d\n",
- conn_type);
+ dev_err(&hdev->dev, "Invalid connection type: %d\n", conn_type);
return -EIO;
}
/* stop infoframe transmission */
- hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
- snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
+ hdac_hdmi_set_dip_index(hdev, pin->nid, 0x0, 0x0);
+ snd_hdac_codec_write(hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_DISABLE);
/* Fill infoframe. Index auto-incremented */
- hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
+ hdac_hdmi_set_dip_index(hdev, pin->nid, 0x0, 0x0);
if (conn_type == DRM_ELD_CONN_TYPE_HDMI) {
for (i = 0; i < sizeof(buffer); i++)
- snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
+ snd_hdac_codec_write(hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_DATA, buffer[i]);
} else {
for (i = 0; i < sizeof(dp_ai); i++)
- snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
+ snd_hdac_codec_write(hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_DATA, dip[i]);
}
/* Start infoframe */
- hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
- snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
+ hdac_hdmi_set_dip_index(hdev, pin->nid, 0x0, 0x0);
+ snd_hdac_codec_write(hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_BEST);
return 0;
@@ -435,12 +426,12 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask,
int slots, int slot_width)
{
- struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = snd_soc_dai_get_drvdata(dai);
+ struct hdac_device *hdev = hdmi->hdev;
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_pcm *pcm;
- dev_dbg(&edev->hdev.dev, "%s: strm_tag: %d\n", __func__, tx_mask);
+ dev_dbg(&hdev->dev, "%s: strm_tag: %d\n", __func__, tx_mask);
dai_map = &hdmi->dai_map[dai->id];
@@ -455,8 +446,8 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hparams, struct snd_soc_dai *dai)
{
- struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = snd_soc_dai_get_drvdata(dai);
+ struct hdac_device *hdev = hdmi->hdev;
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_port *port;
struct hdac_hdmi_pcm *pcm;
@@ -469,7 +460,7 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
return -ENODEV;
if ((!port->eld.monitor_present) || (!port->eld.eld_valid)) {
- dev_err(&edev->hdev.dev,
+ dev_err(&hdev->dev,
"device is not configured for this pin:port%d:%d\n",
port->pin->nid, port->id);
return -ENODEV;
@@ -489,28 +480,28 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *edev,
+static int hdac_hdmi_query_port_connlist(struct hdac_device *hdev,
struct hdac_hdmi_pin *pin,
struct hdac_hdmi_port *port)
{
- if (!(get_wcaps(&edev->hdev, pin->nid) & AC_WCAP_CONN_LIST)) {
- dev_warn(&edev->hdev.dev,
+ if (!(get_wcaps(hdev, pin->nid) & AC_WCAP_CONN_LIST)) {
+ dev_warn(&hdev->dev,
"HDMI: pin %d wcaps %#x does not support connection list\n",
- pin->nid, get_wcaps(&edev->hdev, pin->nid));
+ pin->nid, get_wcaps(hdev, pin->nid));
return -EINVAL;
}
- if (hdac_hdmi_port_select_set(edev, port) < 0)
+ if (hdac_hdmi_port_select_set(hdev, port) < 0)
return -EIO;
- port->num_mux_nids = snd_hdac_get_connections(&edev->hdev, pin->nid,
+ port->num_mux_nids = snd_hdac_get_connections(hdev, pin->nid,
port->mux_nids, HDA_MAX_CONNECTIONS);
if (port->num_mux_nids == 0)
- dev_warn(&edev->hdev.dev,
+ dev_warn(&hdev->dev,
"No connections found for pin:port %d:%d\n",
pin->nid, port->id);
- dev_dbg(&edev->hdev.dev, "num_mux_nids %d for pin:port %d:%d\n",
+ dev_dbg(&hdev->dev, "num_mux_nids %d for pin:port %d:%d\n",
port->num_mux_nids, pin->nid, port->id);
return port->num_mux_nids;
@@ -526,7 +517,7 @@ static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *edev,
* connected.
*/
static struct hdac_hdmi_port *hdac_hdmi_get_port_from_cvt(
- struct hdac_ext_device *edev,
+ struct hdac_device *hdev,
struct hdac_hdmi_priv *hdmi,
struct hdac_hdmi_cvt *cvt)
{
@@ -541,7 +532,7 @@ static struct hdac_hdmi_port *hdac_hdmi_get_port_from_cvt(
list_for_each_entry(port, &pcm->port_list, head) {
mutex_lock(&pcm->lock);
- ret = hdac_hdmi_query_port_connlist(edev,
+ ret = hdac_hdmi_query_port_connlist(hdev,
port->pin, port);
mutex_unlock(&pcm->lock);
if (ret < 0)
@@ -568,8 +559,8 @@ static struct hdac_hdmi_port *hdac_hdmi_get_port_from_cvt(
static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = snd_soc_dai_get_drvdata(dai);
+ struct hdac_device *hdev = hdmi->hdev;
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_cvt *cvt;
struct hdac_hdmi_port *port;
@@ -578,7 +569,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
dai_map = &hdmi->dai_map[dai->id];
cvt = dai_map->cvt;
- port = hdac_hdmi_get_port_from_cvt(edev, hdmi, cvt);
+ port = hdac_hdmi_get_port_from_cvt(hdev, hdmi, cvt);
/*
* To make PA and other userland happy.
@@ -589,7 +580,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
if ((!port->eld.monitor_present) ||
(!port->eld.eld_valid)) {
- dev_warn(&edev->hdev.dev,
+ dev_warn(&hdev->dev,
"Failed: present?:%d ELD valid?:%d pin:port: %d:%d\n",
port->eld.monitor_present, port->eld.eld_valid,
port->pin->nid, port->id);
@@ -611,8 +602,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
static void hdac_hdmi_pcm_close(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = snd_soc_dai_get_drvdata(dai);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_pcm *pcm;
@@ -695,10 +685,10 @@ static void hdac_hdmi_fill_route(struct snd_soc_dapm_route *route,
route->connected = handler;
}
-static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
+static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_device *hdev,
struct hdac_hdmi_port *port)
{
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = NULL;
struct hdac_hdmi_port *p;
@@ -715,33 +705,32 @@ static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
return NULL;
}
-static void hdac_hdmi_set_power_state(struct hdac_ext_device *edev,
+static void hdac_hdmi_set_power_state(struct hdac_device *hdev,
hda_nid_t nid, unsigned int pwr_state)
{
int count;
unsigned int state;
- if (get_wcaps(&edev->hdev, nid) & AC_WCAP_POWER) {
- if (!snd_hdac_check_power_state(&edev->hdev, nid, pwr_state)) {
+ if (get_wcaps(hdev, nid) & AC_WCAP_POWER) {
+ if (!snd_hdac_check_power_state(hdev, nid, pwr_state)) {
for (count = 0; count < 10; count++) {
- snd_hdac_codec_read(&edev->hdev, nid, 0,
+ snd_hdac_codec_read(hdev, nid, 0,
AC_VERB_SET_POWER_STATE,
pwr_state);
- state = snd_hdac_sync_power_state(&edev->hdev,
+ state = snd_hdac_sync_power_state(hdev,
nid, pwr_state);
if (!(state & AC_PWRST_ERROR))
break;
}
}
-
}
}
-static void hdac_hdmi_set_amp(struct hdac_ext_device *edev,
+static void hdac_hdmi_set_amp(struct hdac_device *hdev,
hda_nid_t nid, int val)
{
- if (get_wcaps(&edev->hdev, nid) & AC_WCAP_OUT_AMP)
- snd_hdac_codec_write(&edev->hdev, nid, 0,
+ if (get_wcaps(hdev, nid) & AC_WCAP_OUT_AMP)
+ snd_hdac_codec_write(hdev, nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE, val);
}
@@ -750,40 +739,40 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kc, int event)
{
struct hdac_hdmi_port *port = w->priv;
- struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
+ struct hdac_device *hdev = dev_to_hdac_dev(w->dapm->dev);
struct hdac_hdmi_pcm *pcm;
- dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
+ dev_dbg(&hdev->dev, "%s: widget: %s event: %x\n",
__func__, w->name, event);
- pcm = hdac_hdmi_get_pcm(edev, port);
+ pcm = hdac_hdmi_get_pcm(hdev, port);
if (!pcm)
return -EIO;
/* set the device if pin is mst_capable */
- if (hdac_hdmi_port_select_set(edev, port) < 0)
+ if (hdac_hdmi_port_select_set(hdev, port) < 0)
return -EIO;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D0);
+ hdac_hdmi_set_power_state(hdev, port->pin->nid, AC_PWRST_D0);
/* Enable out path for this pin widget */
- snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
+ snd_hdac_codec_write(hdev, port->pin->nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
- hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_UNMUTE);
+ hdac_hdmi_set_amp(hdev, port->pin->nid, AMP_OUT_UNMUTE);
- return hdac_hdmi_setup_audio_infoframe(edev, pcm, port);
+ return hdac_hdmi_setup_audio_infoframe(hdev, pcm, port);
case SND_SOC_DAPM_POST_PMD:
- hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_MUTE);
+ hdac_hdmi_set_amp(hdev, port->pin->nid, AMP_OUT_MUTE);
/* Disable out path for this pin widget */
- snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
+ snd_hdac_codec_write(hdev, port->pin->nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
- hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D3);
+ hdac_hdmi_set_power_state(hdev, port->pin->nid, AC_PWRST_D3);
break;
}
@@ -795,11 +784,11 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kc, int event)
{
struct hdac_hdmi_cvt *cvt = w->priv;
- struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_device *hdev = dev_to_hdac_dev(w->dapm->dev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm;
- dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
+ dev_dbg(&hdev->dev, "%s: widget: %s event: %x\n",
__func__, w->name, event);
pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, cvt);
@@ -808,29 +797,29 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D0);
+ hdac_hdmi_set_power_state(hdev, cvt->nid, AC_PWRST_D0);
/* Enable transmission */
- snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
+ snd_hdac_codec_write(hdev, cvt->nid, 0,
AC_VERB_SET_DIGI_CONVERT_1, 1);
/* Category Code (CC) to zero */
- snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
+ snd_hdac_codec_write(hdev, cvt->nid, 0,
AC_VERB_SET_DIGI_CONVERT_2, 0);
- snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
+ snd_hdac_codec_write(hdev, cvt->nid, 0,
AC_VERB_SET_CHANNEL_STREAMID, pcm->stream_tag);
- snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
+ snd_hdac_codec_write(hdev, cvt->nid, 0,
AC_VERB_SET_STREAM_FORMAT, pcm->format);
break;
case SND_SOC_DAPM_POST_PMD:
- snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
+ snd_hdac_codec_write(hdev, cvt->nid, 0,
AC_VERB_SET_CHANNEL_STREAMID, 0);
- snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
+ snd_hdac_codec_write(hdev, cvt->nid, 0,
AC_VERB_SET_STREAM_FORMAT, 0);
- hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D3);
+ hdac_hdmi_set_power_state(hdev, cvt->nid, AC_PWRST_D3);
break;
}
@@ -842,10 +831,10 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kc, int event)
{
struct hdac_hdmi_port *port = w->priv;
- struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
+ struct hdac_device *hdev = dev_to_hdac_dev(w->dapm->dev);
int mux_idx;
- dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
+ dev_dbg(&hdev->dev, "%s: widget: %s event: %x\n",
__func__, w->name, event);
if (!kc)
@@ -854,11 +843,11 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
mux_idx = dapm_kcontrol_get_value(kc);
/* set the device if pin is mst_capable */
- if (hdac_hdmi_port_select_set(edev, port) < 0)
+ if (hdac_hdmi_port_select_set(hdev, port) < 0)
return -EIO;
if (mux_idx > 0) {
- snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
+ snd_hdac_codec_write(hdev, port->pin->nid, 0,
AC_VERB_SET_CONNECT_SEL, (mux_idx - 1));
}
@@ -877,8 +866,8 @@ static int hdac_hdmi_set_pin_port_mux(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_context *dapm = w->dapm;
struct hdac_hdmi_port *port = w->priv;
- struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_device *hdev = dev_to_hdac_dev(dapm->dev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = NULL;
const char *cvt_name = e->texts[ucontrol->value.enumerated.item[0]];
@@ -931,12 +920,12 @@ static int hdac_hdmi_set_pin_port_mux(struct snd_kcontrol *kcontrol,
* care of selecting the right one and leaving all other inputs selected to
* "NONE"
*/
-static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
+static int hdac_hdmi_create_pin_port_muxs(struct hdac_device *hdev,
struct hdac_hdmi_port *port,
struct snd_soc_dapm_widget *widget,
const char *widget_name)
{
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pin *pin = port->pin;
struct snd_kcontrol_new *kc;
struct hdac_hdmi_cvt *cvt;
@@ -948,17 +937,17 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
int i = 0;
int num_items = hdmi->num_cvt + 1;
- kc = devm_kzalloc(&edev->hdev.dev, sizeof(*kc), GFP_KERNEL);
+ kc = devm_kzalloc(&hdev->dev, sizeof(*kc), GFP_KERNEL);
if (!kc)
return -ENOMEM;
- se = devm_kzalloc(&edev->hdev.dev, sizeof(*se), GFP_KERNEL);
+ se = devm_kzalloc(&hdev->dev, sizeof(*se), GFP_KERNEL);
if (!se)
return -ENOMEM;
snprintf(kc_name, NAME_SIZE, "Pin %d port %d Input",
pin->nid, port->id);
- kc->name = devm_kstrdup(&edev->hdev.dev, kc_name, GFP_KERNEL);
+ kc->name = devm_kstrdup(&hdev->dev, kc_name, GFP_KERNEL);
if (!kc->name)
return -ENOMEM;
@@ -976,35 +965,35 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
se->mask = roundup_pow_of_two(se->items) - 1;
sprintf(mux_items, "NONE");
- items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
+ items[i] = devm_kstrdup(&hdev->dev, mux_items, GFP_KERNEL);
if (!items[i])
return -ENOMEM;
list_for_each_entry(cvt, &hdmi->cvt_list, head) {
i++;
sprintf(mux_items, "cvt %d", cvt->nid);
- items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
+ items[i] = devm_kstrdup(&hdev->dev, mux_items, GFP_KERNEL);
if (!items[i])
return -ENOMEM;
}
- se->texts = devm_kmemdup(&edev->hdev.dev, items,
+ se->texts = devm_kmemdup(&hdev->dev, items,
(num_items * sizeof(char *)), GFP_KERNEL);
if (!se->texts)
return -ENOMEM;
- return hdac_hdmi_fill_widget_info(&edev->hdev.dev, widget,
+ return hdac_hdmi_fill_widget_info(&hdev->dev, widget,
snd_soc_dapm_mux, port, widget_name, NULL, kc, 1,
hdac_hdmi_pin_mux_widget_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_REG);
}
/* Add cvt <- input <- mux route map */
-static void hdac_hdmi_add_pinmux_cvt_route(struct hdac_ext_device *edev,
+static void hdac_hdmi_add_pinmux_cvt_route(struct hdac_device *hdev,
struct snd_soc_dapm_widget *widgets,
struct snd_soc_dapm_route *route, int rindex)
{
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
const struct snd_kcontrol_new *kc;
struct soc_enum *se;
int mux_index = hdmi->num_cvt + hdmi->num_ports;
@@ -1046,8 +1035,8 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
{
struct snd_soc_dapm_widget *widgets;
struct snd_soc_dapm_route *route;
- struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_device *hdev = dev_to_hdac_dev(dapm->dev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct snd_soc_dai_driver *dai_drv = hdmi->dai_drv;
char widget_name[NAME_SIZE];
struct hdac_hdmi_cvt *cvt;
@@ -1099,7 +1088,7 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
for (j = 0; j < pin->num_ports; j++) {
sprintf(widget_name, "Pin%d-Port%d Mux",
pin->nid, pin->ports[j].id);
- ret = hdac_hdmi_create_pin_port_muxs(edev,
+ ret = hdac_hdmi_create_pin_port_muxs(hdev,
&pin->ports[j], &widgets[i],
widget_name);
if (ret < 0)
@@ -1134,7 +1123,7 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
}
}
- hdac_hdmi_add_pinmux_cvt_route(edev, widgets, route, i);
+ hdac_hdmi_add_pinmux_cvt_route(hdev, widgets, route, i);
snd_soc_dapm_new_controls(dapm, widgets,
((2 * hdmi->num_ports) + hdmi->num_cvt));
@@ -1146,9 +1135,9 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
}
-static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
+static int hdac_hdmi_init_dai_map(struct hdac_device *hdev)
{
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_cvt *cvt;
int dai_id = 0;
@@ -1164,7 +1153,7 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
dai_id++;
if (dai_id == HDA_MAX_CVTS) {
- dev_warn(&edev->hdev.dev,
+ dev_warn(&hdev->dev,
"Max dais supported: %d\n", dai_id);
break;
}
@@ -1173,9 +1162,9 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
return 0;
}
-static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
+static int hdac_hdmi_add_cvt(struct hdac_device *hdev, hda_nid_t nid)
{
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_cvt *cvt;
char name[NAME_SIZE];
@@ -1190,10 +1179,10 @@ static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
list_add_tail(&cvt->head, &hdmi->cvt_list);
hdmi->num_cvt++;
- return hdac_hdmi_query_cvt_params(&edev->hdev, cvt);
+ return hdac_hdmi_query_cvt_params(hdev, cvt);
}
-static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
+static int hdac_hdmi_parse_eld(struct hdac_device *hdev,
struct hdac_hdmi_port *port)
{
unsigned int ver, mnl;
@@ -1202,7 +1191,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
>> DRM_ELD_VER_SHIFT;
if (ver != ELD_VER_CEA_861D && ver != ELD_VER_PARTIAL) {
- dev_err(&edev->hdev.dev, "HDMI: Unknown ELD version %d\n", ver);
+ dev_err(&hdev->dev, "HDMI: Unknown ELD version %d\n", ver);
return -EINVAL;
}
@@ -1210,7 +1199,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
if (mnl > ELD_MAX_MNL) {
- dev_err(&edev->hdev.dev, "HDMI: MNL Invalid %d\n", mnl);
+ dev_err(&hdev->dev, "HDMI: MNL Invalid %d\n", mnl);
return -EINVAL;
}
@@ -1222,8 +1211,8 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
struct hdac_hdmi_port *port)
{
- struct hdac_ext_device *edev = pin->edev;
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_device *hdev = pin->hdev;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm;
int size = 0;
int port_id = -1;
@@ -1241,14 +1230,14 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
if (pin->mst_capable)
port_id = port->id;
- size = snd_hdac_acomp_get_eld(&edev->hdev, pin->nid, port_id,
+ size = snd_hdac_acomp_get_eld(hdev, pin->nid, port_id,
&port->eld.monitor_present,
port->eld.eld_buffer,
ELD_MAX_SIZE);
if (size > 0) {
size = min(size, ELD_MAX_SIZE);
- if (hdac_hdmi_parse_eld(edev, port) < 0)
+ if (hdac_hdmi_parse_eld(hdev, port) < 0)
size = -EINVAL;
}
@@ -1260,11 +1249,11 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
port->eld.eld_size = 0;
}
- pcm = hdac_hdmi_get_pcm(edev, port);
+ pcm = hdac_hdmi_get_pcm(hdev, port);
if (!port->eld.monitor_present || !port->eld.eld_valid) {
- dev_err(&edev->hdev.dev, "%s: disconnect for pin:port %d:%d\n",
+ dev_err(&hdev->dev, "%s: disconnect for pin:port %d:%d\n",
__func__, pin->nid, port->id);
/*
@@ -1316,9 +1305,9 @@ static int hdac_hdmi_add_ports(struct hdac_hdmi_priv *hdmi,
return 0;
}
-static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
+static int hdac_hdmi_add_pin(struct hdac_device *hdev, hda_nid_t nid)
{
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pin *pin;
int ret;
@@ -1328,7 +1317,7 @@ static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
pin->nid = nid;
pin->mst_capable = false;
- pin->edev = edev;
+ pin->hdev = hdev;
ret = hdac_hdmi_add_ports(hdmi, pin);
if (ret < 0)
return ret;
@@ -1459,15 +1448,14 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdev,
* Parse all nodes and store the cvt/pin nids in array
* Add one time initialization for pin and cvt widgets
*/
-static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
+static int hdac_hdmi_parse_and_map_nid(struct hdac_device *hdev,
struct snd_soc_dai_driver **dais, int *num_dais)
{
hda_nid_t nid;
int i, num_nodes;
struct hdac_hdmi_cvt *temp_cvt, *cvt_next;
struct hdac_hdmi_pin *temp_pin, *pin_next;
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
- struct hdac_device *hdev = &edev->hdev;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
int ret;
hdac_hdmi_skl_enable_all_pins(hdev);
@@ -1492,13 +1480,13 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
switch (type) {
case AC_WID_AUD_OUT:
- ret = hdac_hdmi_add_cvt(edev, nid);
+ ret = hdac_hdmi_add_cvt(hdev, nid);
if (ret < 0)
goto free_widgets;
break;
case AC_WID_PIN:
- ret = hdac_hdmi_add_pin(edev, nid);
+ ret = hdac_hdmi_add_pin(hdev, nid);
if (ret < 0)
goto free_widgets;
break;
@@ -1518,7 +1506,7 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
}
*num_dais = hdmi->num_cvt;
- ret = hdac_hdmi_init_dai_map(edev);
+ ret = hdac_hdmi_init_dai_map(hdev);
if (ret < 0)
goto free_widgets;
@@ -1542,19 +1530,24 @@ free_widgets:
return ret;
}
+static int hdac_hdmi_pin2port(void *aptr, int pin)
+{
+ return pin - 4; /* map NID 0x05 -> port #1 */
+}
+
static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
{
- struct hdac_ext_device *edev = aptr;
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_device *hdev = aptr;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pin *pin = NULL;
struct hdac_hdmi_port *hport = NULL;
- struct snd_soc_component *component = edev->scodec;
+ struct snd_soc_component *component = hdmi->component;
int i;
/* Don't know how this mapping is derived */
hda_nid_t pin_nid = port + 0x04;
- dev_dbg(&edev->hdev.dev, "%s: for pin:%d port=%d\n", __func__,
+ dev_dbg(&hdev->dev, "%s: for pin:%d port=%d\n", __func__,
pin_nid, pipe);
/*
@@ -1567,7 +1560,7 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
SNDRV_CTL_POWER_D0)
return;
- if (atomic_read(&edev->hdev.in_pm))
+ if (atomic_read(&hdev->in_pm))
return;
list_for_each_entry(pin, &hdmi->pin_list, head) {
@@ -1595,7 +1588,8 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
}
-static struct i915_audio_component_audio_ops aops = {
+static struct drm_audio_component_audio_ops aops = {
+ .pin2port = hdac_hdmi_pin2port,
.pin_eld_notify = hdac_hdmi_eld_notify_cb,
};
@@ -1614,15 +1608,15 @@ static struct snd_pcm *hdac_hdmi_get_pcm_from_id(struct snd_soc_card *card,
/* create jack pin kcontrols */
static int create_fill_jack_kcontrols(struct snd_soc_card *card,
- struct hdac_ext_device *edev)
+ struct hdac_device *hdev)
{
struct hdac_hdmi_pin *pin;
struct snd_kcontrol_new *kc;
char kc_name[NAME_SIZE], xname[NAME_SIZE];
char *name;
int i = 0, j;
- struct snd_soc_component *component = edev->scodec;
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
+ struct snd_soc_component *component = hdmi->component;
kc = devm_kcalloc(component->dev, hdmi->num_ports,
sizeof(*kc), GFP_KERNEL);
@@ -1659,8 +1653,8 @@ static int create_fill_jack_kcontrols(struct snd_soc_card *card,
int hdac_hdmi_jack_port_init(struct snd_soc_component *component,
struct snd_soc_dapm_context *dapm)
{
- struct hdac_ext_device *edev = snd_soc_component_get_drvdata(component);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = snd_soc_component_get_drvdata(component);
+ struct hdac_device *hdev = hdmi->hdev;
struct hdac_hdmi_pin *pin;
struct snd_soc_dapm_widget *widgets;
struct snd_soc_dapm_route *route;
@@ -1715,7 +1709,7 @@ int hdac_hdmi_jack_port_init(struct snd_soc_component *component,
return ret;
/* Add Jack Pin switch Kcontrol */
- ret = create_fill_jack_kcontrols(dapm->card, edev);
+ ret = create_fill_jack_kcontrols(dapm->card, hdev);
if (ret < 0)
return ret;
@@ -1735,8 +1729,8 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
struct snd_soc_jack *jack)
{
struct snd_soc_component *component = dai->component;
- struct hdac_ext_device *edev = snd_soc_component_get_drvdata(component);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = snd_soc_component_get_drvdata(component);
+ struct hdac_device *hdev = hdmi->hdev;
struct hdac_hdmi_pcm *pcm;
struct snd_pcm *snd_pcm;
int err;
@@ -1758,7 +1752,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
if (snd_pcm) {
err = snd_hdac_add_chmap_ctls(snd_pcm, device, &hdmi->chmap);
if (err < 0) {
- dev_err(&edev->hdev.dev,
+ dev_err(&hdev->dev,
"chmap control add failed with err: %d for pcm: %d\n",
err, device);
kfree(pcm);
@@ -1772,7 +1766,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
}
EXPORT_SYMBOL_GPL(hdac_hdmi_jack_init);
-static void hdac_hdmi_present_sense_all_pins(struct hdac_ext_device *edev,
+static void hdac_hdmi_present_sense_all_pins(struct hdac_device *hdev,
struct hdac_hdmi_priv *hdmi, bool detect_pin_caps)
{
int i;
@@ -1781,7 +1775,7 @@ static void hdac_hdmi_present_sense_all_pins(struct hdac_ext_device *edev,
list_for_each_entry(pin, &hdmi->pin_list, head) {
if (detect_pin_caps) {
- if (hdac_hdmi_get_port_len(edev, pin->nid) == 0)
+ if (hdac_hdmi_get_port_len(hdev, pin->nid) == 0)
pin->mst_capable = false;
else
pin->mst_capable = true;
@@ -1798,68 +1792,67 @@ static void hdac_hdmi_present_sense_all_pins(struct hdac_ext_device *edev,
static int hdmi_codec_probe(struct snd_soc_component *component)
{
- struct hdac_ext_device *edev = snd_soc_component_get_drvdata(component);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = snd_soc_component_get_drvdata(component);
+ struct hdac_device *hdev = hdmi->hdev;
struct snd_soc_dapm_context *dapm =
snd_soc_component_get_dapm(component);
struct hdac_ext_link *hlink = NULL;
int ret;
- edev->scodec = component;
+ hdmi->component = component;
/*
* hold the ref while we probe, also no need to drop the ref on
* exit, we call pm_runtime_suspend() so that will do for us
*/
- hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
+ hlink = snd_hdac_ext_bus_get_link(hdev->bus, dev_name(&hdev->dev));
if (!hlink) {
- dev_err(&edev->hdev.dev, "hdac link not found\n");
+ dev_err(&hdev->dev, "hdac link not found\n");
return -EIO;
}
- snd_hdac_ext_bus_link_get(edev->ebus, hlink);
+ snd_hdac_ext_bus_link_get(hdev->bus, hlink);
ret = create_fill_widget_route_map(dapm);
if (ret < 0)
return ret;
- aops.audio_ptr = edev;
- ret = snd_hdac_i915_register_notifier(&aops);
+ aops.audio_ptr = hdev;
+ ret = snd_hdac_acomp_register_notifier(hdev->bus, &aops);
if (ret < 0) {
- dev_err(&edev->hdev.dev, "notifier register failed: err: %d\n",
- ret);
+ dev_err(&hdev->dev, "notifier register failed: err: %d\n", ret);
return ret;
}
- hdac_hdmi_present_sense_all_pins(edev, hdmi, true);
+ hdac_hdmi_present_sense_all_pins(hdev, hdmi, true);
/* Imp: Store the card pointer in hda_codec */
- edev->card = dapm->card->snd_card;
+ hdmi->card = dapm->card->snd_card;
/*
* hdac_device core already sets the state to active and calls
* get_noresume. So enable runtime and set the device to suspend.
*/
- pm_runtime_enable(&edev->hdev.dev);
- pm_runtime_put(&edev->hdev.dev);
- pm_runtime_suspend(&edev->hdev.dev);
+ pm_runtime_enable(&hdev->dev);
+ pm_runtime_put(&hdev->dev);
+ pm_runtime_suspend(&hdev->dev);
return 0;
}
static void hdmi_codec_remove(struct snd_soc_component *component)
{
- struct hdac_ext_device *edev = snd_soc_component_get_drvdata(component);
+ struct hdac_hdmi_priv *hdmi = snd_soc_component_get_drvdata(component);
+ struct hdac_device *hdev = hdmi->hdev;
- pm_runtime_disable(&edev->hdev.dev);
+ pm_runtime_disable(&hdev->dev);
}
#ifdef CONFIG_PM
static int hdmi_codec_prepare(struct device *dev)
{
- struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_device *hdev = &edev->hdev;
+ struct hdac_device *hdev = dev_to_hdac_dev(dev);
- pm_runtime_get_sync(&edev->hdev.dev);
+ pm_runtime_get_sync(&hdev->dev);
/*
* Power down afg.
@@ -1876,16 +1869,15 @@ static int hdmi_codec_prepare(struct device *dev)
static void hdmi_codec_complete(struct device *dev)
{
- struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
- struct hdac_device *hdev = &edev->hdev;
+ struct hdac_device *hdev = dev_to_hdac_dev(dev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
/* Power up afg */
snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D0);
- hdac_hdmi_skl_enable_all_pins(&edev->hdev);
- hdac_hdmi_skl_enable_dp12(&edev->hdev);
+ hdac_hdmi_skl_enable_all_pins(hdev);
+ hdac_hdmi_skl_enable_dp12(hdev);
/*
* As the ELD notify callback request is not entertained while the
@@ -1893,9 +1885,9 @@ static void hdmi_codec_complete(struct device *dev)
* all pins here. pin capablity change is not support, so use the
* already set pin caps.
*/
- hdac_hdmi_present_sense_all_pins(edev, hdmi, false);
+ hdac_hdmi_present_sense_all_pins(hdev, hdmi, false);
- pm_runtime_put_sync(&edev->hdev.dev);
+ pm_runtime_put_sync(&hdev->dev);
}
#else
#define hdmi_codec_prepare NULL
@@ -1922,7 +1914,6 @@ static void hdac_hdmi_get_chmap(struct hdac_device *hdev, int pcm_idx,
static void hdac_hdmi_set_chmap(struct hdac_device *hdev, int pcm_idx,
unsigned char *chmap, int prepared)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdev);
struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
struct hdac_hdmi_port *port;
@@ -1938,7 +1929,7 @@ static void hdac_hdmi_set_chmap(struct hdac_device *hdev, int pcm_idx,
memcpy(pcm->chmap, chmap, ARRAY_SIZE(pcm->chmap));
list_for_each_entry(port, &pcm->port_list, head)
if (prepared)
- hdac_hdmi_setup_audio_infoframe(edev, pcm, port);
+ hdac_hdmi_setup_audio_infoframe(hdev, pcm, port);
mutex_unlock(&pcm->lock);
}
@@ -1987,10 +1978,9 @@ static struct hdac_hdmi_drv_data intel_drv_data = {
.vendor_nid = INTEL_VENDOR_NID,
};
-static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
+static int hdac_hdmi_dev_probe(struct hdac_device *hdev)
{
- struct hdac_device *hdev = &edev->hdev;
- struct hdac_hdmi_priv *hdmi_priv;
+ struct hdac_hdmi_priv *hdmi_priv = NULL;
struct snd_soc_dai_driver *hdmi_dais = NULL;
struct hdac_ext_link *hlink = NULL;
int num_dais = 0;
@@ -1999,24 +1989,24 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
const struct hda_device_id *hdac_id = hdac_get_device_id(hdev, hdrv);
/* hold the ref while we probe */
- hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
+ hlink = snd_hdac_ext_bus_get_link(hdev->bus, dev_name(&hdev->dev));
if (!hlink) {
- dev_err(&edev->hdev.dev, "hdac link not found\n");
+ dev_err(&hdev->dev, "hdac link not found\n");
return -EIO;
}
- snd_hdac_ext_bus_link_get(edev->ebus, hlink);
+ snd_hdac_ext_bus_link_get(hdev->bus, hlink);
hdmi_priv = devm_kzalloc(&hdev->dev, sizeof(*hdmi_priv), GFP_KERNEL);
if (hdmi_priv == NULL)
return -ENOMEM;
- edev->private_data = hdmi_priv;
snd_hdac_register_chmap_ops(hdev, &hdmi_priv->chmap);
hdmi_priv->chmap.ops.get_chmap = hdac_hdmi_get_chmap;
hdmi_priv->chmap.ops.set_chmap = hdac_hdmi_set_chmap;
hdmi_priv->chmap.ops.is_pcm_attached = is_hdac_hdmi_pcm_attached;
hdmi_priv->chmap.ops.get_spk_alloc = hdac_hdmi_get_spk_alloc;
+ hdmi_priv->hdev = hdev;
if (!hdac_id)
return -ENODEV;
@@ -2027,7 +2017,7 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
else
hdmi_priv->drv_data = &intel_drv_data;
- dev_set_drvdata(&hdev->dev, edev);
+ dev_set_drvdata(&hdev->dev, hdmi_priv);
INIT_LIST_HEAD(&hdmi_priv->pin_list);
INIT_LIST_HEAD(&hdmi_priv->cvt_list);
@@ -2038,15 +2028,15 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
* Turned off in the runtime_suspend during the first explicit
* pm_runtime_suspend call.
*/
- ret = snd_hdac_display_power(edev->hdev.bus, true);
+ ret = snd_hdac_display_power(hdev->bus, true);
if (ret < 0) {
- dev_err(&edev->hdev.dev,
+ dev_err(&hdev->dev,
"Cannot turn on display power on i915 err: %d\n",
ret);
return ret;
}
- ret = hdac_hdmi_parse_and_map_nid(edev, &hdmi_dais, &num_dais);
+ ret = hdac_hdmi_parse_and_map_nid(hdev, &hdmi_dais, &num_dais);
if (ret < 0) {
dev_err(&hdev->dev,
"Failed in parse and map nid with err: %d\n", ret);
@@ -2058,14 +2048,14 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
ret = devm_snd_soc_register_component(&hdev->dev, &hdmi_hda_codec,
hdmi_dais, num_dais);
- snd_hdac_ext_bus_link_put(edev->ebus, hlink);
+ snd_hdac_ext_bus_link_put(hdev->bus, hlink);
return ret;
}
-static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
+static int hdac_hdmi_dev_remove(struct hdac_device *hdev)
{
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pin *pin, *pin_next;
struct hdac_hdmi_cvt *cvt, *cvt_next;
struct hdac_hdmi_pcm *pcm, *pcm_next;
@@ -2103,12 +2093,79 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
}
#ifdef CONFIG_PM
+/*
+ * Power management sequences
+ * ==========================
+ *
+ * The following explains the PM handling of HDAC HDMI with its parent
+ * device SKL and display power usage
+ *
+ * Probe
+ * -----
+ * In SKL probe,
+ * 1. skl_probe_work() powers up the display (refcount++ -> 1)
+ * 2. enumerates the codecs on the link
+ * 3. powers down the display (refcount-- -> 0)
+ *
+ * In HDAC HDMI probe,
+ * 1. hdac_hdmi_dev_probe() powers up the display (refcount++ -> 1)
+ * 2. probe the codec
+ * 3. put the HDAC HDMI device to runtime suspend
+ * 4. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
+ *
+ * Once children are runtime suspended, SKL device also goes to runtime
+ * suspend
+ *
+ * HDMI Playback
+ * -------------
+ * Open HDMI device,
+ * 1. skl_runtime_resume() invoked
+ * 2. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
+ *
+ * Close HDMI device,
+ * 1. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
+ * 2. skl_runtime_suspend() invoked
+ *
+ * S0/S3 Cycle with playback in progress
+ * -------------------------------------
+ * When the device is opened for playback, the device is runtime active
+ * already and the display refcount is 1 as explained above.
+ *
+ * Entering to S3,
+ * 1. hdmi_codec_prepare() invoke the runtime resume of codec which just
+ * increments the PM runtime usage count of the codec since the device
+ * is in use already
+ * 2. skl_suspend() powers down the display (refcount-- -> 0)
+ *
+ * Wakeup from S3,
+ * 1. skl_resume() powers up the display (refcount++ -> 1)
+ * 2. hdmi_codec_complete() invokes the runtime suspend of codec which just
+ * decrements the PM runtime usage count of the codec since the device
+ * is in use already
+ *
+ * Once playback is stopped, the display refcount is set to 0 as explained
+ * above in the HDMI playback sequence. The PM handlings are designed in
+ * such way that to balance the refcount of display power when the codec
+ * device put to S3 while playback is going on.
+ *
+ * S0/S3 Cycle without playback in progress
+ * ----------------------------------------
+ * Entering to S3,
+ * 1. hdmi_codec_prepare() invoke the runtime resume of codec
+ * 2. skl_runtime_resume() invoked
+ * 3. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
+ * 4. skl_suspend() powers down the display (refcount-- -> 0)
+ *
+ * Wakeup from S3,
+ * 1. skl_resume() powers up the display (refcount++ -> 1)
+ * 2. hdmi_codec_complete() invokes the runtime suspend of codec
+ * 3. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
+ * 4. skl_runtime_suspend() invoked
+ */
static int hdac_hdmi_runtime_suspend(struct device *dev)
{
- struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_device *hdev = &edev->hdev;
+ struct hdac_device *hdev = dev_to_hdac_dev(dev);
struct hdac_bus *bus = hdev->bus;
- struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
struct hdac_ext_link *hlink = NULL;
int err;
@@ -2129,27 +2186,25 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
AC_PWRST_D3);
err = snd_hdac_display_power(bus, false);
if (err < 0) {
- dev_err(bus->dev, "Cannot turn on display power on i915\n");
+ dev_err(dev, "Cannot turn on display power on i915\n");
return err;
}
- hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+ hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev));
if (!hlink) {
dev_err(dev, "hdac link not found\n");
return -EIO;
}
- snd_hdac_ext_bus_link_put(ebus, hlink);
+ snd_hdac_ext_bus_link_put(bus, hlink);
return 0;
}
static int hdac_hdmi_runtime_resume(struct device *dev)
{
- struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_device *hdev = &edev->hdev;
+ struct hdac_device *hdev = dev_to_hdac_dev(dev);
struct hdac_bus *bus = hdev->bus;
- struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
struct hdac_ext_link *hlink = NULL;
int err;
@@ -2159,22 +2214,22 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
if (!bus)
return 0;
- hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+ hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev));
if (!hlink) {
dev_err(dev, "hdac link not found\n");
return -EIO;
}
- snd_hdac_ext_bus_link_get(ebus, hlink);
+ snd_hdac_ext_bus_link_get(bus, hlink);
err = snd_hdac_display_power(bus, true);
if (err < 0) {
- dev_err(bus->dev, "Cannot turn on display power on i915\n");
+ dev_err(dev, "Cannot turn on display power on i915\n");
return err;
}
- hdac_hdmi_skl_enable_all_pins(&edev->hdev);
- hdac_hdmi_skl_enable_dp12(&edev->hdev);
+ hdac_hdmi_skl_enable_all_pins(hdev);
+ hdac_hdmi_skl_enable_dp12(hdev);
/* Power up afg */
snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
@@ -2206,14 +2261,12 @@ static const struct hda_device_id hdmi_list[] = {
MODULE_DEVICE_TABLE(hdaudio, hdmi_list);
-static struct hdac_ext_driver hdmi_driver = {
- . hdac = {
- .driver = {
- .name = "HDMI HDA Codec",
- .pm = &hdac_hdmi_pm,
- },
- .id_table = hdmi_list,
+static struct hdac_driver hdmi_driver = {
+ .driver = {
+ .name = "HDMI HDA Codec",
+ .pm = &hdac_hdmi_pm,
},
+ .id_table = hdmi_list,
.probe = hdac_hdmi_dev_probe,
.remove = hdac_hdmi_dev_remove,
};
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 38e4a8515709..d00734d31e04 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -291,10 +291,6 @@ static const struct snd_soc_dapm_widget hdmi_widgets[] = {
SND_SOC_DAPM_OUTPUT("TX"),
};
-static const struct snd_soc_dapm_route hdmi_routes[] = {
- { "TX", NULL, "Playback" },
-};
-
enum {
DAI_ID_I2S = 0,
DAI_ID_SPDIF,
@@ -689,9 +685,23 @@ static int hdmi_codec_pcm_new(struct snd_soc_pcm_runtime *rtd,
return snd_ctl_add(rtd->card->snd_card, kctl);
}
+static int hdmi_dai_probe(struct snd_soc_dai *dai)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_dapm_route route = {
+ .sink = "TX",
+ .source = dai->driver->playback.stream_name,
+ };
+
+ dapm = snd_soc_component_get_dapm(dai->component);
+
+ return snd_soc_dapm_add_routes(dapm, &route, 1);
+}
+
static const struct snd_soc_dai_driver hdmi_i2s_dai = {
.name = "i2s-hifi",
.id = DAI_ID_I2S,
+ .probe = hdmi_dai_probe,
.playback = {
.stream_name = "I2S Playback",
.channels_min = 2,
@@ -707,6 +717,7 @@ static const struct snd_soc_dai_driver hdmi_i2s_dai = {
static const struct snd_soc_dai_driver hdmi_spdif_dai = {
.name = "spdif-hifi",
.id = DAI_ID_SPDIF,
+ .probe = hdmi_dai_probe,
.playback = {
.stream_name = "SPDIF Playback",
.channels_min = 2,
@@ -733,8 +744,6 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
static const struct snd_soc_component_driver hdmi_driver = {
.dapm_widgets = hdmi_widgets,
.num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
- .dapm_routes = hdmi_routes,
- .num_dapm_routes = ARRAY_SIZE(hdmi_routes),
.of_xlate_dai_id = hdmi_of_xlate_dai_id,
.idle_bias_on = 1,
.use_pmdown_time = 1,
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index a92586106932..92b7125ea169 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -488,6 +488,7 @@ static const DECLARE_TLV_DB_RANGE(max98373_bde_gain_tlv,
static bool max98373_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case MAX98373_R2000_SW_RESET:
case MAX98373_R2001_INT_RAW1 ... MAX98373_R200C_INT_EN3:
case MAX98373_R2010_IRQ_CTRL:
case MAX98373_R2014_THERM_WARN_THRESH
diff --git a/sound/soc/codecs/max9850.c b/sound/soc/codecs/max9850.c
index 74d7f52c7e73..6e6134589588 100644
--- a/sound/soc/codecs/max9850.c
+++ b/sound/soc/codecs/max9850.c
@@ -52,9 +52,9 @@ static bool max9850_volatile_register(struct device *dev, unsigned int reg)
switch (reg) {
case MAX9850_STATUSA:
case MAX9850_STATUSB:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
diff --git a/sound/soc/codecs/nau8540.c b/sound/soc/codecs/nau8540.c
index 17104f8dc1a9..e3c8cd17daf2 100644
--- a/sound/soc/codecs/nau8540.c
+++ b/sound/soc/codecs/nau8540.c
@@ -362,11 +362,8 @@ static const struct snd_soc_dapm_route nau8540_dapm_routes[] = {
static int nau8540_clock_check(struct nau8540 *nau8540, int rate, int osr)
{
- int osrate;
-
if (osr >= ARRAY_SIZE(osr_adc_sel))
return -EINVAL;
- osrate = osr_adc_sel[osr].osr;
if (rate * osr > CLK_ADC_MAX) {
dev_err(nau8540->dev, "exceed the maximum frequency of CLK_ADC\n");
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 6bd14453f06e..468d5143e2c4 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -1274,7 +1274,7 @@ static int nau8824_calc_fll_param(unsigned int fll_in,
fvco_max = 0;
fvco_sel = ARRAY_SIZE(mclk_src_scaling);
for (i = 0; i < ARRAY_SIZE(mclk_src_scaling); i++) {
- fvco = 256 * fs * 2 * mclk_src_scaling[i].param;
+ fvco = 256ULL * fs * 2 * mclk_src_scaling[i].param;
if (fvco > NAU_FVCO_MIN && fvco < NAU_FVCO_MAX &&
fvco_max < fvco) {
fvco_max = fvco;
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index dc6ea4987b7d..b9fed99d8b5e 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -2016,7 +2016,7 @@ static int nau8825_calc_fll_param(unsigned int fll_in, unsigned int fs,
fvco_max = 0;
fvco_sel = ARRAY_SIZE(mclk_src_scaling);
for (i = 0; i < ARRAY_SIZE(mclk_src_scaling); i++) {
- fvco = 256 * fs * 2 * mclk_src_scaling[i].param;
+ fvco = 256ULL * fs * 2 * mclk_src_scaling[i].param;
if (fvco > NAU_FVCO_MIN && fvco < NAU_FVCO_MAX &&
fvco_max < fvco) {
fvco_max = fvco;
diff --git a/sound/soc/codecs/pcm1789.c b/sound/soc/codecs/pcm1789.c
index 21f15219b3ad..8df6447c76a6 100644
--- a/sound/soc/codecs/pcm1789.c
+++ b/sound/soc/codecs/pcm1789.c
@@ -262,8 +262,7 @@ int pcm1789_common_exit(struct device *dev)
{
struct pcm1789_private *priv = dev_get_drvdata(dev);
- if (&priv->work)
- flush_work(&priv->work);
+ flush_work(&priv->work);
return 0;
}
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
index 88fde70b1e9e..690c26e7389e 100644
--- a/sound/soc/codecs/pcm186x.c
+++ b/sound/soc/codecs/pcm186x.c
@@ -265,7 +265,7 @@ static int pcm186x_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_component *component = dai->component;
struct pcm186x_priv *priv = snd_soc_component_get_drvdata(component);
unsigned int rate = params_rate(params);
- unsigned int format = params_format(params);
+ snd_pcm_format_t format = params_format(params);
unsigned int width = params_width(params);
unsigned int channels = params_channels(params);
unsigned int div_lrck;
diff --git a/sound/soc/codecs/rt1305.c b/sound/soc/codecs/rt1305.c
index f4c8c45f4010..c4452efc7970 100644
--- a/sound/soc/codecs/rt1305.c
+++ b/sound/soc/codecs/rt1305.c
@@ -1066,7 +1066,7 @@ static void rt1305_calibrate(struct rt1305_priv *rt1305)
pr_debug("Left_rhl = 0x%x rh=0x%x rl=0x%x\n", rhl, rh, rl);
pr_info("Left channel %d.%dohm\n", (r0ohm/10), (r0ohm%10));
- r0l = 562949953421312;
+ r0l = 562949953421312ULL;
if (rhl != 0)
do_div(r0l, rhl);
pr_debug("Left_r0 = 0x%llx\n", r0l);
@@ -1083,7 +1083,7 @@ static void rt1305_calibrate(struct rt1305_priv *rt1305)
pr_debug("Right_rhl = 0x%x rh=0x%x rl=0x%x\n", rhl, rh, rl);
pr_info("Right channel %d.%dohm\n", (r0ohm/10), (r0ohm%10));
- r0r = 562949953421312;
+ r0r = 562949953421312ULL;
if (rhl != 0)
do_div(r0r, rhl);
pr_debug("Right_r0 = 0x%llx\n", r0r);
@@ -1150,17 +1150,11 @@ static int rt1305_i2c_probe(struct i2c_client *i2c,
rt1305_reset(rt1305->regmap);
rt1305_calibrate(rt1305);
- return snd_soc_register_component(&i2c->dev, &soc_component_dev_rt1305,
+ return devm_snd_soc_register_component(&i2c->dev,
+ &soc_component_dev_rt1305,
rt1305_dai, ARRAY_SIZE(rt1305_dai));
}
-static int rt1305_i2c_remove(struct i2c_client *i2c)
-{
- snd_soc_unregister_component(&i2c->dev);
-
- return 0;
-}
-
static void rt1305_i2c_shutdown(struct i2c_client *client)
{
struct rt1305_priv *rt1305 = i2c_get_clientdata(client);
@@ -1180,7 +1174,6 @@ static struct i2c_driver rt1305_i2c_driver = {
#endif
},
.probe = rt1305_i2c_probe,
- .remove = rt1305_i2c_remove,
.shutdown = rt1305_i2c_shutdown,
.id_table = rt1305_i2c_id,
};
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 18686ffb0cd5..6478d10c4f4a 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -268,7 +268,6 @@ static const struct snd_pcm_ops rt5514_spi_pcm_ops = {
.hw_params = rt5514_spi_hw_params,
.hw_free = rt5514_spi_hw_free,
.pointer = rt5514_spi_pcm_pointer,
- .mmap = snd_pcm_lib_mmap_vmalloc,
.page = snd_pcm_lib_get_vmalloc_page,
};
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 1570b91bf018..dca82dd6e3bf 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = {
{RT5514_ANA_CTRL_LDO10, 0x00028604},
{RT5514_ANA_CTRL_ADCFED, 0x00000800},
{RT5514_ASRC_IN_CTRL1, 0x00000003},
- {RT5514_DOWNFILTER0_CTRL3, 0x10000362},
- {RT5514_DOWNFILTER1_CTRL3, 0x10000362},
+ {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
};
static const struct reg_default rt5514_reg[] = {
@@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = {
{RT5514_ASRC_IN_CTRL1, 0x00000003},
{RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
{RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
- {RT5514_DOWNFILTER0_CTRL3, 0x10000362},
+ {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
{RT5514_DOWNFILTER1_CTRL1, 0x00020c2f},
{RT5514_DOWNFILTER1_CTRL2, 0x00020c2f},
- {RT5514_DOWNFILTER1_CTRL3, 0x10000362},
+ {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
{RT5514_ANA_CTRL_LDO10, 0x00028604},
{RT5514_ANA_CTRL_LDO18_16, 0x02000345},
{RT5514_ANA_CTRL_ADC12, 0x0000a2a8},
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index cf6dce69eb2a..865f49ac38dd 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -105,9 +105,9 @@ static bool rt5631_volatile_register(struct device *dev, unsigned int reg)
case RT5631_INDEX_ADD:
case RT5631_INDEX_DATA:
case RT5631_EQ_CTRL:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
@@ -164,9 +164,9 @@ static bool rt5631_readable_register(struct device *dev, unsigned int reg)
case RT5631_VENDOR_ID:
case RT5631_VENDOR_ID1:
case RT5631_VENDOR_ID2:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
@@ -229,10 +229,10 @@ static SOC_ENUM_SINGLE_DECL(rt5631_spk_ratio_enum, RT5631_GEN_PUR_CTRL_REG,
static const struct snd_kcontrol_new rt5631_snd_controls[] = {
/* MIC */
SOC_ENUM("MIC1 Mode Control", rt5631_mic1_mode_enum),
- SOC_SINGLE_TLV("MIC1 Boost", RT5631_MIC_CTRL_2,
+ SOC_SINGLE_TLV("MIC1 Boost Volume", RT5631_MIC_CTRL_2,
RT5631_MIC1_BOOST_SHIFT, 8, 0, mic_bst_tlv),
SOC_ENUM("MIC2 Mode Control", rt5631_mic2_mode_enum),
- SOC_SINGLE_TLV("MIC2 Boost", RT5631_MIC_CTRL_2,
+ SOC_SINGLE_TLV("MIC2 Boost Volume", RT5631_MIC_CTRL_2,
RT5631_MIC2_BOOST_SHIFT, 8, 0, mic_bst_tlv),
/* MONO IN */
SOC_ENUM("MONOIN Mode Control", rt5631_monoin_mode_enum),
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 8bf8d360c25f..27770143ae8f 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -1665,6 +1665,7 @@ static int get_sdp_info(struct snd_soc_component *component, int dai_id)
break;
case RT5640_IF_113:
ret |= RT5640_U_IF1;
+ /* fall through */
case RT5640_IF_312:
case RT5640_IF_213:
ret |= RT5640_U_IF2;
@@ -1680,6 +1681,7 @@ static int get_sdp_info(struct snd_soc_component *component, int dai_id)
break;
case RT5640_IF_223:
ret |= RT5640_U_IF1;
+ /* fall through */
case RT5640_IF_123:
case RT5640_IF_321:
ret |= RT5640_U_IF2;
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index 6b5669f3e85d..985852fd9723 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -331,11 +331,13 @@ static const struct snd_kcontrol_new rt5651_snd_controls[] = {
SOC_DOUBLE_TLV("Mono DAC Playback Volume", RT5651_DAC2_DIG_VOL,
RT5651_L_VOL_SFT, RT5651_R_VOL_SFT,
175, 0, dac_vol_tlv),
- /* IN1/IN2 Control */
+ /* IN1/IN2/IN3 Control */
SOC_SINGLE_TLV("IN1 Boost", RT5651_IN1_IN2,
RT5651_BST_SFT1, 8, 0, bst_tlv),
SOC_SINGLE_TLV("IN2 Boost", RT5651_IN1_IN2,
RT5651_BST_SFT2, 8, 0, bst_tlv),
+ SOC_SINGLE_TLV("IN3 Boost", RT5651_IN3,
+ RT5651_BST_SFT1, 8, 0, bst_tlv),
/* INL/INR Volume Control */
SOC_DOUBLE_TLV("IN Capture Volume", RT5651_INL1_INR1_VOL,
RT5651_INL_VOL_SFT, RT5651_INR_VOL_SFT,
@@ -1581,6 +1583,24 @@ static void rt5651_disable_micbias1_for_ovcd(struct snd_soc_component *component
snd_soc_dapm_mutex_unlock(dapm);
}
+static void rt5651_enable_micbias1_ovcd_irq(struct snd_soc_component *component)
+{
+ struct rt5651_priv *rt5651 = snd_soc_component_get_drvdata(component);
+
+ snd_soc_component_update_bits(component, RT5651_IRQ_CTRL2,
+ RT5651_IRQ_MB1_OC_MASK, RT5651_IRQ_MB1_OC_NOR);
+ rt5651->ovcd_irq_enabled = true;
+}
+
+static void rt5651_disable_micbias1_ovcd_irq(struct snd_soc_component *component)
+{
+ struct rt5651_priv *rt5651 = snd_soc_component_get_drvdata(component);
+
+ snd_soc_component_update_bits(component, RT5651_IRQ_CTRL2,
+ RT5651_IRQ_MB1_OC_MASK, RT5651_IRQ_MB1_OC_BP);
+ rt5651->ovcd_irq_enabled = false;
+}
+
static void rt5651_clear_micbias1_ovcd(struct snd_soc_component *component)
{
snd_soc_component_update_bits(component, RT5651_IRQ_CTRL2,
@@ -1622,10 +1642,80 @@ static bool rt5651_jack_inserted(struct snd_soc_component *component)
return val == 0;
}
-/* Jack detect timings */
+/* Jack detect and button-press timings */
#define JACK_SETTLE_TIME 100 /* milli seconds */
#define JACK_DETECT_COUNT 5
#define JACK_DETECT_MAXCOUNT 20 /* Aprox. 2 seconds worth of tries */
+#define JACK_UNPLUG_TIME 80 /* milli seconds */
+#define BP_POLL_TIME 10 /* milli seconds */
+#define BP_POLL_MAXCOUNT 200 /* assume something is wrong after this */
+#define BP_THRESHOLD 3
+
+static void rt5651_start_button_press_work(struct snd_soc_component *component)
+{
+ struct rt5651_priv *rt5651 = snd_soc_component_get_drvdata(component);
+
+ rt5651->poll_count = 0;
+ rt5651->press_count = 0;
+ rt5651->release_count = 0;
+ rt5651->pressed = false;
+ rt5651->press_reported = false;
+ rt5651_clear_micbias1_ovcd(component);
+ schedule_delayed_work(&rt5651->bp_work, msecs_to_jiffies(BP_POLL_TIME));
+}
+
+static void rt5651_button_press_work(struct work_struct *work)
+{
+ struct rt5651_priv *rt5651 =
+ container_of(work, struct rt5651_priv, bp_work.work);
+ struct snd_soc_component *component = rt5651->component;
+
+ /* Check the jack was not removed underneath us */
+ if (!rt5651_jack_inserted(component))
+ return;
+
+ if (rt5651_micbias1_ovcd(component)) {
+ rt5651->release_count = 0;
+ rt5651->press_count++;
+ /* Remember till after JACK_UNPLUG_TIME wait */
+ if (rt5651->press_count >= BP_THRESHOLD)
+ rt5651->pressed = true;
+ rt5651_clear_micbias1_ovcd(component);
+ } else {
+ rt5651->press_count = 0;
+ rt5651->release_count++;
+ }
+
+ /*
+ * The pins get temporarily shorted on jack unplug, so we poll for
+ * at least JACK_UNPLUG_TIME milli-seconds before reporting a press.
+ */
+ rt5651->poll_count++;
+ if (rt5651->poll_count < (JACK_UNPLUG_TIME / BP_POLL_TIME)) {
+ schedule_delayed_work(&rt5651->bp_work,
+ msecs_to_jiffies(BP_POLL_TIME));
+ return;
+ }
+
+ if (rt5651->pressed && !rt5651->press_reported) {
+ dev_dbg(component->dev, "headset button press\n");
+ snd_soc_jack_report(rt5651->hp_jack, SND_JACK_BTN_0,
+ SND_JACK_BTN_0);
+ rt5651->press_reported = true;
+ }
+
+ if (rt5651->release_count >= BP_THRESHOLD) {
+ if (rt5651->press_reported) {
+ dev_dbg(component->dev, "headset button release\n");
+ snd_soc_jack_report(rt5651->hp_jack, 0, SND_JACK_BTN_0);
+ }
+ /* Re-enable OVCD IRQ to detect next press */
+ rt5651_enable_micbias1_ovcd_irq(component);
+ return; /* Stop polling */
+ }
+
+ schedule_delayed_work(&rt5651->bp_work, msecs_to_jiffies(BP_POLL_TIME));
+}
static int rt5651_detect_headset(struct snd_soc_component *component)
{
@@ -1676,15 +1766,58 @@ static void rt5651_jack_detect_work(struct work_struct *work)
{
struct rt5651_priv *rt5651 =
container_of(work, struct rt5651_priv, jack_detect_work);
+ struct snd_soc_component *component = rt5651->component;
int report = 0;
- if (rt5651_jack_inserted(rt5651->component)) {
- rt5651_enable_micbias1_for_ovcd(rt5651->component);
- report = rt5651_detect_headset(rt5651->component);
- rt5651_disable_micbias1_for_ovcd(rt5651->component);
+ if (!rt5651_jack_inserted(component)) {
+ /* Jack removed, or spurious IRQ? */
+ if (rt5651->hp_jack->status & SND_JACK_HEADPHONE) {
+ if (rt5651->hp_jack->status & SND_JACK_MICROPHONE) {
+ cancel_delayed_work_sync(&rt5651->bp_work);
+ rt5651_disable_micbias1_ovcd_irq(component);
+ rt5651_disable_micbias1_for_ovcd(component);
+ }
+ snd_soc_jack_report(rt5651->hp_jack, 0,
+ SND_JACK_HEADSET | SND_JACK_BTN_0);
+ dev_dbg(component->dev, "jack unplugged\n");
+ }
+ } else if (!(rt5651->hp_jack->status & SND_JACK_HEADPHONE)) {
+ /* Jack inserted */
+ WARN_ON(rt5651->ovcd_irq_enabled);
+ rt5651_enable_micbias1_for_ovcd(component);
+ report = rt5651_detect_headset(component);
+ if (report == SND_JACK_HEADSET) {
+ /* Enable ovcd IRQ for button press detect. */
+ rt5651_enable_micbias1_ovcd_irq(component);
+ } else {
+ /* No more need for overcurrent detect. */
+ rt5651_disable_micbias1_for_ovcd(component);
+ }
+ dev_dbg(component->dev, "detect report %#02x\n", report);
+ snd_soc_jack_report(rt5651->hp_jack, report, SND_JACK_HEADSET);
+ } else if (rt5651->ovcd_irq_enabled && rt5651_micbias1_ovcd(component)) {
+ dev_dbg(component->dev, "OVCD IRQ\n");
+
+ /*
+ * The ovcd IRQ keeps firing while the button is pressed, so
+ * we disable it and start polling the button until released.
+ *
+ * The disable will make the IRQ pin 0 again and since we get
+ * IRQs on both edges (so as to detect both jack plugin and
+ * unplug) this means we will immediately get another IRQ.
+ * The ovcd_irq_enabled check above makes the 2ND IRQ a NOP.
+ */
+ rt5651_disable_micbias1_ovcd_irq(component);
+ rt5651_start_button_press_work(component);
+
+ /*
+ * If the jack-detect IRQ flag goes high (unplug) after our
+ * above rt5651_jack_inserted() check and before we have
+ * disabled the OVCD IRQ, the IRQ pin will stay high and as
+ * we react to edges, we miss the unplug event -> recheck.
+ */
+ queue_work(system_long_wq, &rt5651->jack_detect_work);
}
-
- snd_soc_jack_report(rt5651->hp_jack, report, SND_JACK_HEADSET);
}
static irqreturn_t rt5651_irq(int irq, void *data)
@@ -1696,14 +1829,18 @@ static irqreturn_t rt5651_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int rt5651_set_jack(struct snd_soc_component *component,
- struct snd_soc_jack *hp_jack, void *data)
+static void rt5651_cancel_work(void *data)
{
- struct rt5651_priv *rt5651 = snd_soc_component_get_drvdata(component);
- int ret;
+ struct rt5651_priv *rt5651 = data;
- if (!rt5651->irq)
- return -EINVAL;
+ cancel_work_sync(&rt5651->jack_detect_work);
+ cancel_delayed_work_sync(&rt5651->bp_work);
+}
+
+static void rt5651_enable_jack_detect(struct snd_soc_component *component,
+ struct snd_soc_jack *hp_jack)
+{
+ struct rt5651_priv *rt5651 = snd_soc_component_get_drvdata(component);
/* IRQ output on GPIO1 */
snd_soc_component_update_bits(component, RT5651_GPIO_CTRL1,
@@ -1730,10 +1867,10 @@ static int rt5651_set_jack(struct snd_soc_component *component,
RT5651_JD2_IRQ_EN, RT5651_JD2_IRQ_EN);
break;
case RT5651_JD_NULL:
- return 0;
+ return;
default:
dev_err(component->dev, "Currently only JD1_1 / JD1_2 / JD2 are supported\n");
- return -EINVAL;
+ return;
}
/* Enable jack detect power */
@@ -1767,19 +1904,39 @@ static int rt5651_set_jack(struct snd_soc_component *component,
RT5651_MB1_OC_STKY_MASK, RT5651_MB1_OC_STKY_EN);
rt5651->hp_jack = hp_jack;
-
- ret = devm_request_threaded_irq(component->dev, rt5651->irq, NULL,
- rt5651_irq,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT, "rt5651", rt5651);
- if (ret) {
- dev_err(component->dev, "Failed to reguest IRQ: %d\n", ret);
- return ret;
+ if (rt5651->hp_jack->status & SND_JACK_MICROPHONE) {
+ rt5651_enable_micbias1_for_ovcd(component);
+ rt5651_enable_micbias1_ovcd_irq(component);
}
+ enable_irq(rt5651->irq);
/* sync initial jack state */
queue_work(system_power_efficient_wq, &rt5651->jack_detect_work);
+}
+
+static void rt5651_disable_jack_detect(struct snd_soc_component *component)
+{
+ struct rt5651_priv *rt5651 = snd_soc_component_get_drvdata(component);
+
+ disable_irq(rt5651->irq);
+ rt5651_cancel_work(rt5651);
+
+ if (rt5651->hp_jack->status & SND_JACK_MICROPHONE) {
+ rt5651_disable_micbias1_ovcd_irq(component);
+ rt5651_disable_micbias1_for_ovcd(component);
+ snd_soc_jack_report(rt5651->hp_jack, 0, SND_JACK_BTN_0);
+ }
+
+ rt5651->hp_jack = NULL;
+}
+
+static int rt5651_set_jack(struct snd_soc_component *component,
+ struct snd_soc_jack *jack, void *data)
+{
+ if (jack)
+ rt5651_enable_jack_detect(component, jack);
+ else
+ rt5651_disable_jack_detect(component);
return 0;
}
@@ -2034,8 +2191,26 @@ static int rt5651_i2c_probe(struct i2c_client *i2c,
rt5651->irq = i2c->irq;
rt5651->hp_mute = 1;
+ INIT_DELAYED_WORK(&rt5651->bp_work, rt5651_button_press_work);
INIT_WORK(&rt5651->jack_detect_work, rt5651_jack_detect_work);
+ /* Make sure work is stopped on probe-error / remove */
+ ret = devm_add_action_or_reset(&i2c->dev, rt5651_cancel_work, rt5651);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&i2c->dev, rt5651->irq, rt5651_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT, "rt5651", rt5651);
+ if (ret == 0) {
+ /* Gets re-enabled by rt5651_set_jack() */
+ disable_irq(rt5651->irq);
+ } else {
+ dev_warn(&i2c->dev, "Failed to reguest IRQ %d: %d\n",
+ rt5651->irq, ret);
+ rt5651->irq = -ENXIO;
+ }
+
ret = devm_snd_soc_register_component(&i2c->dev,
&soc_component_dev_rt5651,
rt5651_dai, ARRAY_SIZE(rt5651_dai));
@@ -2043,15 +2218,6 @@ static int rt5651_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int rt5651_i2c_remove(struct i2c_client *i2c)
-{
- struct rt5651_priv *rt5651 = i2c_get_clientdata(i2c);
-
- cancel_work_sync(&rt5651->jack_detect_work);
-
- return 0;
-}
-
static struct i2c_driver rt5651_i2c_driver = {
.driver = {
.name = "rt5651",
@@ -2059,7 +2225,6 @@ static struct i2c_driver rt5651_i2c_driver = {
.of_match_table = of_match_ptr(rt5651_of_match),
},
.probe = rt5651_i2c_probe,
- .remove = rt5651_i2c_remove,
.id_table = rt5651_i2c_id,
};
module_i2c_driver(rt5651_i2c_driver);
diff --git a/sound/soc/codecs/rt5651.h b/sound/soc/codecs/rt5651.h
index 3a0968c53fde..ac6de6fb5414 100644
--- a/sound/soc/codecs/rt5651.h
+++ b/sound/soc/codecs/rt5651.h
@@ -2071,8 +2071,16 @@ struct rt5651_pll_code {
struct rt5651_priv {
struct snd_soc_component *component;
struct regmap *regmap;
+ /* Jack and button detect data */
struct snd_soc_jack *hp_jack;
struct work_struct jack_detect_work;
+ struct delayed_work bp_work;
+ bool ovcd_irq_enabled;
+ bool pressed;
+ bool press_reported;
+ int press_count;
+ int release_count;
+ int poll_count;
unsigned int jd_src;
unsigned int ovcd_th;
unsigned int ovcd_sf;
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 8a0181a2db08..9b7a1833d331 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -4417,6 +4417,7 @@ static int rt5677_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
break;
case 25:
slot_width_25 = 0x8080;
+ /* fall through */
case 24:
val |= (2 << 8);
break;
@@ -5007,7 +5008,7 @@ static const struct regmap_config rt5677_regmap = {
};
static const struct of_device_id rt5677_of_match[] = {
- { .compatible = "realtek,rt5677", RT5677 },
+ { .compatible = "realtek,rt5677", .data = (const void *)RT5677 },
{ }
};
MODULE_DEVICE_TABLE(of, rt5677_of_match);
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
new file mode 100644
index 000000000000..640d400ca013
--- /dev/null
+++ b/sound/soc/codecs/rt5682.c
@@ -0,0 +1,2681 @@
+/*
+ * rt5682.c -- RT5682 ALSA SoC audio component driver
+ *
+ * Copyright 2018 Realtek Semiconductor Corp.
+ * Author: Bard Liao <bardliao@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/rt5682.h>
+
+#include "rl6231.h"
+#include "rt5682.h"
+
+#define RT5682_NUM_SUPPLIES 3
+
+static const char *rt5682_supply_names[RT5682_NUM_SUPPLIES] = {
+ "AVDD",
+ "MICVDD",
+ "VBAT",
+};
+
+struct rt5682_priv {
+ struct snd_soc_component *component;
+ struct rt5682_platform_data pdata;
+ struct regmap *regmap;
+ struct snd_soc_jack *hs_jack;
+ struct regulator_bulk_data supplies[RT5682_NUM_SUPPLIES];
+ struct delayed_work jack_detect_work;
+ struct delayed_work jd_check_work;
+ struct mutex calibrate_mutex;
+
+ int sysclk;
+ int sysclk_src;
+ int lrck[RT5682_AIFS];
+ int bclk[RT5682_AIFS];
+ int master[RT5682_AIFS];
+
+ int pll_src;
+ int pll_in;
+ int pll_out;
+
+ int jack_type;
+};
+
+static const struct reg_sequence patch_list[] = {
+ {0x01c1, 0x1000},
+};
+
+static const struct reg_default rt5682_reg[] = {
+ {0x0002, 0x8080},
+ {0x0003, 0x8000},
+ {0x0005, 0x0000},
+ {0x0006, 0x0000},
+ {0x0008, 0x800f},
+ {0x000b, 0x0000},
+ {0x0010, 0x4040},
+ {0x0011, 0x0000},
+ {0x0012, 0x1404},
+ {0x0013, 0x1000},
+ {0x0014, 0xa00a},
+ {0x0015, 0x0404},
+ {0x0016, 0x0404},
+ {0x0019, 0xafaf},
+ {0x001c, 0x2f2f},
+ {0x001f, 0x0000},
+ {0x0022, 0x5757},
+ {0x0023, 0x0039},
+ {0x0024, 0x000b},
+ {0x0026, 0xc0c4},
+ {0x0029, 0x8080},
+ {0x002a, 0xa0a0},
+ {0x002b, 0x0300},
+ {0x0030, 0x0000},
+ {0x003c, 0x0080},
+ {0x0044, 0x0c0c},
+ {0x0049, 0x0000},
+ {0x0061, 0x0000},
+ {0x0062, 0x0000},
+ {0x0063, 0x003f},
+ {0x0064, 0x0000},
+ {0x0065, 0x0000},
+ {0x0066, 0x0030},
+ {0x0067, 0x0000},
+ {0x006b, 0x0000},
+ {0x006c, 0x0000},
+ {0x006d, 0x2200},
+ {0x006e, 0x0a10},
+ {0x0070, 0x8000},
+ {0x0071, 0x8000},
+ {0x0073, 0x0000},
+ {0x0074, 0x0000},
+ {0x0075, 0x0002},
+ {0x0076, 0x0001},
+ {0x0079, 0x0000},
+ {0x007a, 0x0000},
+ {0x007b, 0x0000},
+ {0x007c, 0x0100},
+ {0x007e, 0x0000},
+ {0x0080, 0x0000},
+ {0x0081, 0x0000},
+ {0x0082, 0x0000},
+ {0x0083, 0x0000},
+ {0x0084, 0x0000},
+ {0x0085, 0x0000},
+ {0x0086, 0x0005},
+ {0x0087, 0x0000},
+ {0x0088, 0x0000},
+ {0x008c, 0x0003},
+ {0x008d, 0x0000},
+ {0x008e, 0x0060},
+ {0x008f, 0x1000},
+ {0x0091, 0x0c26},
+ {0x0092, 0x0073},
+ {0x0093, 0x0000},
+ {0x0094, 0x0080},
+ {0x0098, 0x0000},
+ {0x009a, 0x0000},
+ {0x009b, 0x0000},
+ {0x009c, 0x0000},
+ {0x009d, 0x0000},
+ {0x009e, 0x100c},
+ {0x009f, 0x0000},
+ {0x00a0, 0x0000},
+ {0x00a3, 0x0002},
+ {0x00a4, 0x0001},
+ {0x00ae, 0x2040},
+ {0x00af, 0x0000},
+ {0x00b6, 0x0000},
+ {0x00b7, 0x0000},
+ {0x00b8, 0x0000},
+ {0x00b9, 0x0002},
+ {0x00be, 0x0000},
+ {0x00c0, 0x0160},
+ {0x00c1, 0x82a0},
+ {0x00c2, 0x0000},
+ {0x00d0, 0x0000},
+ {0x00d1, 0x2244},
+ {0x00d2, 0x3300},
+ {0x00d3, 0x2200},
+ {0x00d4, 0x0000},
+ {0x00d9, 0x0009},
+ {0x00da, 0x0000},
+ {0x00db, 0x0000},
+ {0x00dc, 0x00c0},
+ {0x00dd, 0x2220},
+ {0x00de, 0x3131},
+ {0x00df, 0x3131},
+ {0x00e0, 0x3131},
+ {0x00e2, 0x0000},
+ {0x00e3, 0x4000},
+ {0x00e4, 0x0aa0},
+ {0x00e5, 0x3131},
+ {0x00e6, 0x3131},
+ {0x00e7, 0x3131},
+ {0x00e8, 0x3131},
+ {0x00ea, 0xb320},
+ {0x00eb, 0x0000},
+ {0x00f0, 0x0000},
+ {0x00f1, 0x00d0},
+ {0x00f2, 0x00d0},
+ {0x00f6, 0x0000},
+ {0x00fa, 0x0000},
+ {0x00fb, 0x0000},
+ {0x00fc, 0x0000},
+ {0x00fd, 0x0000},
+ {0x00fe, 0x10ec},
+ {0x00ff, 0x6530},
+ {0x0100, 0xa0a0},
+ {0x010b, 0x0000},
+ {0x010c, 0xae00},
+ {0x010d, 0xaaa0},
+ {0x010e, 0x8aa2},
+ {0x010f, 0x02a2},
+ {0x0110, 0xc000},
+ {0x0111, 0x04a2},
+ {0x0112, 0x2800},
+ {0x0113, 0x0000},
+ {0x0117, 0x0100},
+ {0x0125, 0x0410},
+ {0x0132, 0x6026},
+ {0x0136, 0x5555},
+ {0x0138, 0x3700},
+ {0x013a, 0x2000},
+ {0x013b, 0x2000},
+ {0x013c, 0x2005},
+ {0x013f, 0x0000},
+ {0x0142, 0x0000},
+ {0x0145, 0x0002},
+ {0x0146, 0x0000},
+ {0x0147, 0x0000},
+ {0x0148, 0x0000},
+ {0x0149, 0x0000},
+ {0x0150, 0x79a1},
+ {0x0151, 0x0000},
+ {0x0160, 0x4ec0},
+ {0x0161, 0x0080},
+ {0x0162, 0x0200},
+ {0x0163, 0x0800},
+ {0x0164, 0x0000},
+ {0x0165, 0x0000},
+ {0x0166, 0x0000},
+ {0x0167, 0x000f},
+ {0x0168, 0x000f},
+ {0x0169, 0x0021},
+ {0x0190, 0x413d},
+ {0x0194, 0x0000},
+ {0x0195, 0x0000},
+ {0x0197, 0x0022},
+ {0x0198, 0x0000},
+ {0x0199, 0x0000},
+ {0x01af, 0x0000},
+ {0x01b0, 0x0400},
+ {0x01b1, 0x0000},
+ {0x01b2, 0x0000},
+ {0x01b3, 0x0000},
+ {0x01b4, 0x0000},
+ {0x01b5, 0x0000},
+ {0x01b6, 0x01c3},
+ {0x01b7, 0x02a0},
+ {0x01b8, 0x03e9},
+ {0x01b9, 0x1389},
+ {0x01ba, 0xc351},
+ {0x01bb, 0x0009},
+ {0x01bc, 0x0018},
+ {0x01bd, 0x002a},
+ {0x01be, 0x004c},
+ {0x01bf, 0x0097},
+ {0x01c0, 0x433d},
+ {0x01c2, 0x0000},
+ {0x01c3, 0x0000},
+ {0x01c4, 0x0000},
+ {0x01c5, 0x0000},
+ {0x01c6, 0x0000},
+ {0x01c7, 0x0000},
+ {0x01c8, 0x40af},
+ {0x01c9, 0x0702},
+ {0x01ca, 0x0000},
+ {0x01cb, 0x0000},
+ {0x01cc, 0x5757},
+ {0x01cd, 0x5757},
+ {0x01ce, 0x5757},
+ {0x01cf, 0x5757},
+ {0x01d0, 0x5757},
+ {0x01d1, 0x5757},
+ {0x01d2, 0x5757},
+ {0x01d3, 0x5757},
+ {0x01d4, 0x5757},
+ {0x01d5, 0x5757},
+ {0x01d6, 0x0000},
+ {0x01d7, 0x0008},
+ {0x01d8, 0x0029},
+ {0x01d9, 0x3333},
+ {0x01da, 0x0000},
+ {0x01db, 0x0004},
+ {0x01dc, 0x0000},
+ {0x01de, 0x7c00},
+ {0x01df, 0x0320},
+ {0x01e0, 0x06a1},
+ {0x01e1, 0x0000},
+ {0x01e2, 0x0000},
+ {0x01e3, 0x0000},
+ {0x01e4, 0x0000},
+ {0x01e6, 0x0001},
+ {0x01e7, 0x0000},
+ {0x01e8, 0x0000},
+ {0x01ea, 0x0000},
+ {0x01eb, 0x0000},
+ {0x01ec, 0x0000},
+ {0x01ed, 0x0000},
+ {0x01ee, 0x0000},
+ {0x01ef, 0x0000},
+ {0x01f0, 0x0000},
+ {0x01f1, 0x0000},
+ {0x01f2, 0x0000},
+ {0x01f3, 0x0000},
+ {0x01f4, 0x0000},
+ {0x0210, 0x6297},
+ {0x0211, 0xa005},
+ {0x0212, 0x824c},
+ {0x0213, 0xf7ff},
+ {0x0214, 0xf24c},
+ {0x0215, 0x0102},
+ {0x0216, 0x00a3},
+ {0x0217, 0x0048},
+ {0x0218, 0xa2c0},
+ {0x0219, 0x0400},
+ {0x021a, 0x00c8},
+ {0x021b, 0x00c0},
+ {0x021c, 0x0000},
+ {0x0250, 0x4500},
+ {0x0251, 0x40b3},
+ {0x0252, 0x0000},
+ {0x0253, 0x0000},
+ {0x0254, 0x0000},
+ {0x0255, 0x0000},
+ {0x0256, 0x0000},
+ {0x0257, 0x0000},
+ {0x0258, 0x0000},
+ {0x0259, 0x0000},
+ {0x025a, 0x0005},
+ {0x0270, 0x0000},
+ {0x02ff, 0x0110},
+ {0x0300, 0x001f},
+ {0x0301, 0x032c},
+ {0x0302, 0x5f21},
+ {0x0303, 0x4000},
+ {0x0304, 0x4000},
+ {0x0305, 0x06d5},
+ {0x0306, 0x8000},
+ {0x0307, 0x0700},
+ {0x0310, 0x4560},
+ {0x0311, 0xa4a8},
+ {0x0312, 0x7418},
+ {0x0313, 0x0000},
+ {0x0314, 0x0006},
+ {0x0315, 0xffff},
+ {0x0316, 0xc400},
+ {0x0317, 0x0000},
+ {0x03c0, 0x7e00},
+ {0x03c1, 0x8000},
+ {0x03c2, 0x8000},
+ {0x03c3, 0x8000},
+ {0x03c4, 0x8000},
+ {0x03c5, 0x8000},
+ {0x03c6, 0x8000},
+ {0x03c7, 0x8000},
+ {0x03c8, 0x8000},
+ {0x03c9, 0x8000},
+ {0x03ca, 0x8000},
+ {0x03cb, 0x8000},
+ {0x03cc, 0x8000},
+ {0x03d0, 0x0000},
+ {0x03d1, 0x0000},
+ {0x03d2, 0x0000},
+ {0x03d3, 0x0000},
+ {0x03d4, 0x2000},
+ {0x03d5, 0x2000},
+ {0x03d6, 0x0000},
+ {0x03d7, 0x0000},
+ {0x03d8, 0x2000},
+ {0x03d9, 0x2000},
+ {0x03da, 0x2000},
+ {0x03db, 0x2000},
+ {0x03dc, 0x0000},
+ {0x03dd, 0x0000},
+ {0x03de, 0x0000},
+ {0x03df, 0x2000},
+ {0x03e0, 0x0000},
+ {0x03e1, 0x0000},
+ {0x03e2, 0x0000},
+ {0x03e3, 0x0000},
+ {0x03e4, 0x0000},
+ {0x03e5, 0x0000},
+ {0x03e6, 0x0000},
+ {0x03e7, 0x0000},
+ {0x03e8, 0x0000},
+ {0x03e9, 0x0000},
+ {0x03ea, 0x0000},
+ {0x03eb, 0x0000},
+ {0x03ec, 0x0000},
+ {0x03ed, 0x0000},
+ {0x03ee, 0x0000},
+ {0x03ef, 0x0000},
+ {0x03f0, 0x0800},
+ {0x03f1, 0x0800},
+ {0x03f2, 0x0800},
+ {0x03f3, 0x0800},
+};
+
+static bool rt5682_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT5682_RESET:
+ case RT5682_CBJ_CTRL_2:
+ case RT5682_INT_ST_1:
+ case RT5682_4BTN_IL_CMD_1:
+ case RT5682_AJD1_CTRL:
+ case RT5682_HP_CALIB_CTRL_1:
+ case RT5682_DEVICE_ID:
+ case RT5682_I2C_MODE:
+ case RT5682_HP_CALIB_CTRL_10:
+ case RT5682_EFUSE_CTRL_2:
+ case RT5682_JD_TOP_VC_VTRL:
+ case RT5682_HP_IMP_SENS_CTRL_19:
+ case RT5682_IL_CMD_1:
+ case RT5682_SAR_IL_CMD_2:
+ case RT5682_SAR_IL_CMD_4:
+ case RT5682_SAR_IL_CMD_10:
+ case RT5682_SAR_IL_CMD_11:
+ case RT5682_EFUSE_CTRL_6...RT5682_EFUSE_CTRL_11:
+ case RT5682_HP_CALIB_STA_1...RT5682_HP_CALIB_STA_11:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rt5682_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT5682_RESET:
+ case RT5682_VERSION_ID:
+ case RT5682_VENDOR_ID:
+ case RT5682_DEVICE_ID:
+ case RT5682_HP_CTRL_1:
+ case RT5682_HP_CTRL_2:
+ case RT5682_HPL_GAIN:
+ case RT5682_HPR_GAIN:
+ case RT5682_I2C_CTRL:
+ case RT5682_CBJ_BST_CTRL:
+ case RT5682_CBJ_CTRL_1:
+ case RT5682_CBJ_CTRL_2:
+ case RT5682_CBJ_CTRL_3:
+ case RT5682_CBJ_CTRL_4:
+ case RT5682_CBJ_CTRL_5:
+ case RT5682_CBJ_CTRL_6:
+ case RT5682_CBJ_CTRL_7:
+ case RT5682_DAC1_DIG_VOL:
+ case RT5682_STO1_ADC_DIG_VOL:
+ case RT5682_STO1_ADC_BOOST:
+ case RT5682_HP_IMP_GAIN_1:
+ case RT5682_HP_IMP_GAIN_2:
+ case RT5682_SIDETONE_CTRL:
+ case RT5682_STO1_ADC_MIXER:
+ case RT5682_AD_DA_MIXER:
+ case RT5682_STO1_DAC_MIXER:
+ case RT5682_A_DAC1_MUX:
+ case RT5682_DIG_INF2_DATA:
+ case RT5682_REC_MIXER:
+ case RT5682_CAL_REC:
+ case RT5682_ALC_BACK_GAIN:
+ case RT5682_PWR_DIG_1:
+ case RT5682_PWR_DIG_2:
+ case RT5682_PWR_ANLG_1:
+ case RT5682_PWR_ANLG_2:
+ case RT5682_PWR_ANLG_3:
+ case RT5682_PWR_MIXER:
+ case RT5682_PWR_VOL:
+ case RT5682_CLK_DET:
+ case RT5682_RESET_LPF_CTRL:
+ case RT5682_RESET_HPF_CTRL:
+ case RT5682_DMIC_CTRL_1:
+ case RT5682_I2S1_SDP:
+ case RT5682_I2S2_SDP:
+ case RT5682_ADDA_CLK_1:
+ case RT5682_ADDA_CLK_2:
+ case RT5682_I2S1_F_DIV_CTRL_1:
+ case RT5682_I2S1_F_DIV_CTRL_2:
+ case RT5682_TDM_CTRL:
+ case RT5682_TDM_ADDA_CTRL_1:
+ case RT5682_TDM_ADDA_CTRL_2:
+ case RT5682_DATA_SEL_CTRL_1:
+ case RT5682_TDM_TCON_CTRL:
+ case RT5682_GLB_CLK:
+ case RT5682_PLL_CTRL_1:
+ case RT5682_PLL_CTRL_2:
+ case RT5682_PLL_TRACK_1:
+ case RT5682_PLL_TRACK_2:
+ case RT5682_PLL_TRACK_3:
+ case RT5682_PLL_TRACK_4:
+ case RT5682_PLL_TRACK_5:
+ case RT5682_PLL_TRACK_6:
+ case RT5682_PLL_TRACK_11:
+ case RT5682_SDW_REF_CLK:
+ case RT5682_DEPOP_1:
+ case RT5682_DEPOP_2:
+ case RT5682_HP_CHARGE_PUMP_1:
+ case RT5682_HP_CHARGE_PUMP_2:
+ case RT5682_MICBIAS_1:
+ case RT5682_MICBIAS_2:
+ case RT5682_PLL_TRACK_12:
+ case RT5682_PLL_TRACK_14:
+ case RT5682_PLL2_CTRL_1:
+ case RT5682_PLL2_CTRL_2:
+ case RT5682_PLL2_CTRL_3:
+ case RT5682_PLL2_CTRL_4:
+ case RT5682_RC_CLK_CTRL:
+ case RT5682_I2S_M_CLK_CTRL_1:
+ case RT5682_I2S2_F_DIV_CTRL_1:
+ case RT5682_I2S2_F_DIV_CTRL_2:
+ case RT5682_EQ_CTRL_1:
+ case RT5682_EQ_CTRL_2:
+ case RT5682_IRQ_CTRL_1:
+ case RT5682_IRQ_CTRL_2:
+ case RT5682_IRQ_CTRL_3:
+ case RT5682_IRQ_CTRL_4:
+ case RT5682_INT_ST_1:
+ case RT5682_GPIO_CTRL_1:
+ case RT5682_GPIO_CTRL_2:
+ case RT5682_GPIO_CTRL_3:
+ case RT5682_HP_AMP_DET_CTRL_1:
+ case RT5682_HP_AMP_DET_CTRL_2:
+ case RT5682_MID_HP_AMP_DET:
+ case RT5682_LOW_HP_AMP_DET:
+ case RT5682_DELAY_BUF_CTRL:
+ case RT5682_SV_ZCD_1:
+ case RT5682_SV_ZCD_2:
+ case RT5682_IL_CMD_1:
+ case RT5682_IL_CMD_2:
+ case RT5682_IL_CMD_3:
+ case RT5682_IL_CMD_4:
+ case RT5682_IL_CMD_5:
+ case RT5682_IL_CMD_6:
+ case RT5682_4BTN_IL_CMD_1:
+ case RT5682_4BTN_IL_CMD_2:
+ case RT5682_4BTN_IL_CMD_3:
+ case RT5682_4BTN_IL_CMD_4:
+ case RT5682_4BTN_IL_CMD_5:
+ case RT5682_4BTN_IL_CMD_6:
+ case RT5682_4BTN_IL_CMD_7:
+ case RT5682_ADC_STO1_HP_CTRL_1:
+ case RT5682_ADC_STO1_HP_CTRL_2:
+ case RT5682_AJD1_CTRL:
+ case RT5682_JD1_THD:
+ case RT5682_JD2_THD:
+ case RT5682_JD_CTRL_1:
+ case RT5682_DUMMY_1:
+ case RT5682_DUMMY_2:
+ case RT5682_DUMMY_3:
+ case RT5682_DAC_ADC_DIG_VOL1:
+ case RT5682_BIAS_CUR_CTRL_2:
+ case RT5682_BIAS_CUR_CTRL_3:
+ case RT5682_BIAS_CUR_CTRL_4:
+ case RT5682_BIAS_CUR_CTRL_5:
+ case RT5682_BIAS_CUR_CTRL_6:
+ case RT5682_BIAS_CUR_CTRL_7:
+ case RT5682_BIAS_CUR_CTRL_8:
+ case RT5682_BIAS_CUR_CTRL_9:
+ case RT5682_BIAS_CUR_CTRL_10:
+ case RT5682_VREF_REC_OP_FB_CAP_CTRL:
+ case RT5682_CHARGE_PUMP_1:
+ case RT5682_DIG_IN_CTRL_1:
+ case RT5682_PAD_DRIVING_CTRL:
+ case RT5682_SOFT_RAMP_DEPOP:
+ case RT5682_CHOP_DAC:
+ case RT5682_CHOP_ADC:
+ case RT5682_CALIB_ADC_CTRL:
+ case RT5682_VOL_TEST:
+ case RT5682_SPKVDD_DET_STA:
+ case RT5682_TEST_MODE_CTRL_1:
+ case RT5682_TEST_MODE_CTRL_2:
+ case RT5682_TEST_MODE_CTRL_3:
+ case RT5682_TEST_MODE_CTRL_4:
+ case RT5682_TEST_MODE_CTRL_5:
+ case RT5682_PLL1_INTERNAL:
+ case RT5682_PLL2_INTERNAL:
+ case RT5682_STO_NG2_CTRL_1:
+ case RT5682_STO_NG2_CTRL_2:
+ case RT5682_STO_NG2_CTRL_3:
+ case RT5682_STO_NG2_CTRL_4:
+ case RT5682_STO_NG2_CTRL_5:
+ case RT5682_STO_NG2_CTRL_6:
+ case RT5682_STO_NG2_CTRL_7:
+ case RT5682_STO_NG2_CTRL_8:
+ case RT5682_STO_NG2_CTRL_9:
+ case RT5682_STO_NG2_CTRL_10:
+ case RT5682_STO1_DAC_SIL_DET:
+ case RT5682_SIL_PSV_CTRL1:
+ case RT5682_SIL_PSV_CTRL2:
+ case RT5682_SIL_PSV_CTRL3:
+ case RT5682_SIL_PSV_CTRL4:
+ case RT5682_SIL_PSV_CTRL5:
+ case RT5682_HP_IMP_SENS_CTRL_01:
+ case RT5682_HP_IMP_SENS_CTRL_02:
+ case RT5682_HP_IMP_SENS_CTRL_03:
+ case RT5682_HP_IMP_SENS_CTRL_04:
+ case RT5682_HP_IMP_SENS_CTRL_05:
+ case RT5682_HP_IMP_SENS_CTRL_06:
+ case RT5682_HP_IMP_SENS_CTRL_07:
+ case RT5682_HP_IMP_SENS_CTRL_08:
+ case RT5682_HP_IMP_SENS_CTRL_09:
+ case RT5682_HP_IMP_SENS_CTRL_10:
+ case RT5682_HP_IMP_SENS_CTRL_11:
+ case RT5682_HP_IMP_SENS_CTRL_12:
+ case RT5682_HP_IMP_SENS_CTRL_13:
+ case RT5682_HP_IMP_SENS_CTRL_14:
+ case RT5682_HP_IMP_SENS_CTRL_15:
+ case RT5682_HP_IMP_SENS_CTRL_16:
+ case RT5682_HP_IMP_SENS_CTRL_17:
+ case RT5682_HP_IMP_SENS_CTRL_18:
+ case RT5682_HP_IMP_SENS_CTRL_19:
+ case RT5682_HP_IMP_SENS_CTRL_20:
+ case RT5682_HP_IMP_SENS_CTRL_21:
+ case RT5682_HP_IMP_SENS_CTRL_22:
+ case RT5682_HP_IMP_SENS_CTRL_23:
+ case RT5682_HP_IMP_SENS_CTRL_24:
+ case RT5682_HP_IMP_SENS_CTRL_25:
+ case RT5682_HP_IMP_SENS_CTRL_26:
+ case RT5682_HP_IMP_SENS_CTRL_27:
+ case RT5682_HP_IMP_SENS_CTRL_28:
+ case RT5682_HP_IMP_SENS_CTRL_29:
+ case RT5682_HP_IMP_SENS_CTRL_30:
+ case RT5682_HP_IMP_SENS_CTRL_31:
+ case RT5682_HP_IMP_SENS_CTRL_32:
+ case RT5682_HP_IMP_SENS_CTRL_33:
+ case RT5682_HP_IMP_SENS_CTRL_34:
+ case RT5682_HP_IMP_SENS_CTRL_35:
+ case RT5682_HP_IMP_SENS_CTRL_36:
+ case RT5682_HP_IMP_SENS_CTRL_37:
+ case RT5682_HP_IMP_SENS_CTRL_38:
+ case RT5682_HP_IMP_SENS_CTRL_39:
+ case RT5682_HP_IMP_SENS_CTRL_40:
+ case RT5682_HP_IMP_SENS_CTRL_41:
+ case RT5682_HP_IMP_SENS_CTRL_42:
+ case RT5682_HP_IMP_SENS_CTRL_43:
+ case RT5682_HP_LOGIC_CTRL_1:
+ case RT5682_HP_LOGIC_CTRL_2:
+ case RT5682_HP_LOGIC_CTRL_3:
+ case RT5682_HP_CALIB_CTRL_1:
+ case RT5682_HP_CALIB_CTRL_2:
+ case RT5682_HP_CALIB_CTRL_3:
+ case RT5682_HP_CALIB_CTRL_4:
+ case RT5682_HP_CALIB_CTRL_5:
+ case RT5682_HP_CALIB_CTRL_6:
+ case RT5682_HP_CALIB_CTRL_7:
+ case RT5682_HP_CALIB_CTRL_9:
+ case RT5682_HP_CALIB_CTRL_10:
+ case RT5682_HP_CALIB_CTRL_11:
+ case RT5682_HP_CALIB_STA_1:
+ case RT5682_HP_CALIB_STA_2:
+ case RT5682_HP_CALIB_STA_3:
+ case RT5682_HP_CALIB_STA_4:
+ case RT5682_HP_CALIB_STA_5:
+ case RT5682_HP_CALIB_STA_6:
+ case RT5682_HP_CALIB_STA_7:
+ case RT5682_HP_CALIB_STA_8:
+ case RT5682_HP_CALIB_STA_9:
+ case RT5682_HP_CALIB_STA_10:
+ case RT5682_HP_CALIB_STA_11:
+ case RT5682_SAR_IL_CMD_1:
+ case RT5682_SAR_IL_CMD_2:
+ case RT5682_SAR_IL_CMD_3:
+ case RT5682_SAR_IL_CMD_4:
+ case RT5682_SAR_IL_CMD_5:
+ case RT5682_SAR_IL_CMD_6:
+ case RT5682_SAR_IL_CMD_7:
+ case RT5682_SAR_IL_CMD_8:
+ case RT5682_SAR_IL_CMD_9:
+ case RT5682_SAR_IL_CMD_10:
+ case RT5682_SAR_IL_CMD_11:
+ case RT5682_SAR_IL_CMD_12:
+ case RT5682_SAR_IL_CMD_13:
+ case RT5682_EFUSE_CTRL_1:
+ case RT5682_EFUSE_CTRL_2:
+ case RT5682_EFUSE_CTRL_3:
+ case RT5682_EFUSE_CTRL_4:
+ case RT5682_EFUSE_CTRL_5:
+ case RT5682_EFUSE_CTRL_6:
+ case RT5682_EFUSE_CTRL_7:
+ case RT5682_EFUSE_CTRL_8:
+ case RT5682_EFUSE_CTRL_9:
+ case RT5682_EFUSE_CTRL_10:
+ case RT5682_EFUSE_CTRL_11:
+ case RT5682_JD_TOP_VC_VTRL:
+ case RT5682_DRC1_CTRL_0:
+ case RT5682_DRC1_CTRL_1:
+ case RT5682_DRC1_CTRL_2:
+ case RT5682_DRC1_CTRL_3:
+ case RT5682_DRC1_CTRL_4:
+ case RT5682_DRC1_CTRL_5:
+ case RT5682_DRC1_CTRL_6:
+ case RT5682_DRC1_HARD_LMT_CTRL_1:
+ case RT5682_DRC1_HARD_LMT_CTRL_2:
+ case RT5682_DRC1_PRIV_1:
+ case RT5682_DRC1_PRIV_2:
+ case RT5682_DRC1_PRIV_3:
+ case RT5682_DRC1_PRIV_4:
+ case RT5682_DRC1_PRIV_5:
+ case RT5682_DRC1_PRIV_6:
+ case RT5682_DRC1_PRIV_7:
+ case RT5682_DRC1_PRIV_8:
+ case RT5682_EQ_AUTO_RCV_CTRL1:
+ case RT5682_EQ_AUTO_RCV_CTRL2:
+ case RT5682_EQ_AUTO_RCV_CTRL3:
+ case RT5682_EQ_AUTO_RCV_CTRL4:
+ case RT5682_EQ_AUTO_RCV_CTRL5:
+ case RT5682_EQ_AUTO_RCV_CTRL6:
+ case RT5682_EQ_AUTO_RCV_CTRL7:
+ case RT5682_EQ_AUTO_RCV_CTRL8:
+ case RT5682_EQ_AUTO_RCV_CTRL9:
+ case RT5682_EQ_AUTO_RCV_CTRL10:
+ case RT5682_EQ_AUTO_RCV_CTRL11:
+ case RT5682_EQ_AUTO_RCV_CTRL12:
+ case RT5682_EQ_AUTO_RCV_CTRL13:
+ case RT5682_ADC_L_EQ_LPF1_A1:
+ case RT5682_R_EQ_LPF1_A1:
+ case RT5682_L_EQ_LPF1_H0:
+ case RT5682_R_EQ_LPF1_H0:
+ case RT5682_L_EQ_BPF1_A1:
+ case RT5682_R_EQ_BPF1_A1:
+ case RT5682_L_EQ_BPF1_A2:
+ case RT5682_R_EQ_BPF1_A2:
+ case RT5682_L_EQ_BPF1_H0:
+ case RT5682_R_EQ_BPF1_H0:
+ case RT5682_L_EQ_BPF2_A1:
+ case RT5682_R_EQ_BPF2_A1:
+ case RT5682_L_EQ_BPF2_A2:
+ case RT5682_R_EQ_BPF2_A2:
+ case RT5682_L_EQ_BPF2_H0:
+ case RT5682_R_EQ_BPF2_H0:
+ case RT5682_L_EQ_BPF3_A1:
+ case RT5682_R_EQ_BPF3_A1:
+ case RT5682_L_EQ_BPF3_A2:
+ case RT5682_R_EQ_BPF3_A2:
+ case RT5682_L_EQ_BPF3_H0:
+ case RT5682_R_EQ_BPF3_H0:
+ case RT5682_L_EQ_BPF4_A1:
+ case RT5682_R_EQ_BPF4_A1:
+ case RT5682_L_EQ_BPF4_A2:
+ case RT5682_R_EQ_BPF4_A2:
+ case RT5682_L_EQ_BPF4_H0:
+ case RT5682_R_EQ_BPF4_H0:
+ case RT5682_L_EQ_HPF1_A1:
+ case RT5682_R_EQ_HPF1_A1:
+ case RT5682_L_EQ_HPF1_H0:
+ case RT5682_R_EQ_HPF1_H0:
+ case RT5682_L_EQ_PRE_VOL:
+ case RT5682_R_EQ_PRE_VOL:
+ case RT5682_L_EQ_POST_VOL:
+ case RT5682_R_EQ_POST_VOL:
+ case RT5682_I2C_MODE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
+
+/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
+ 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
+ 3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
+ 6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
+ 7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
+ 8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
+
+/* Interface data select */
+static const char * const rt5682_data_select[] = {
+ "L/R", "R/L", "L/L", "R/R"
+};
+
+static SOC_ENUM_SINGLE_DECL(rt5682_if2_adc_enum,
+ RT5682_DIG_INF2_DATA, RT5682_IF2_ADC_SEL_SFT, rt5682_data_select);
+
+static SOC_ENUM_SINGLE_DECL(rt5682_if1_01_adc_enum,
+ RT5682_TDM_ADDA_CTRL_1, RT5682_IF1_ADC1_SEL_SFT, rt5682_data_select);
+
+static SOC_ENUM_SINGLE_DECL(rt5682_if1_23_adc_enum,
+ RT5682_TDM_ADDA_CTRL_1, RT5682_IF1_ADC2_SEL_SFT, rt5682_data_select);
+
+static SOC_ENUM_SINGLE_DECL(rt5682_if1_45_adc_enum,
+ RT5682_TDM_ADDA_CTRL_1, RT5682_IF1_ADC3_SEL_SFT, rt5682_data_select);
+
+static SOC_ENUM_SINGLE_DECL(rt5682_if1_67_adc_enum,
+ RT5682_TDM_ADDA_CTRL_1, RT5682_IF1_ADC4_SEL_SFT, rt5682_data_select);
+
+static const struct snd_kcontrol_new rt5682_if2_adc_swap_mux =
+ SOC_DAPM_ENUM("IF2 ADC Swap Mux", rt5682_if2_adc_enum);
+
+static const struct snd_kcontrol_new rt5682_if1_01_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1 01 ADC Swap Mux", rt5682_if1_01_adc_enum);
+
+static const struct snd_kcontrol_new rt5682_if1_23_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1 23 ADC Swap Mux", rt5682_if1_23_adc_enum);
+
+static const struct snd_kcontrol_new rt5682_if1_45_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1 45 ADC Swap Mux", rt5682_if1_45_adc_enum);
+
+static const struct snd_kcontrol_new rt5682_if1_67_adc_swap_mux =
+ SOC_DAPM_ENUM("IF1 67 ADC Swap Mux", rt5682_if1_67_adc_enum);
+
+static void rt5682_reset(struct regmap *regmap)
+{
+ regmap_write(regmap, RT5682_RESET, 0);
+ regmap_write(regmap, RT5682_I2C_MODE, 1);
+}
+/**
+ * rt5682_sel_asrc_clk_src - select ASRC clock source for a set of filters
+ * @component: SoC audio component device.
+ * @filter_mask: mask of filters.
+ * @clk_src: clock source
+ *
+ * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5682 can
+ * only support standard 32fs or 64fs i2s format, ASRC should be enabled to
+ * support special i2s clock format such as Intel's 100fs(100 * sampling rate).
+ * ASRC function will track i2s clock and generate a corresponding system clock
+ * for codec. This function provides an API to select the clock source for a
+ * set of filters specified by the mask. And the component driver will turn on
+ * ASRC for these filters if ASRC is selected as their clock source.
+ */
+int rt5682_sel_asrc_clk_src(struct snd_soc_component *component,
+ unsigned int filter_mask, unsigned int clk_src)
+{
+
+ switch (clk_src) {
+ case RT5682_CLK_SEL_SYS:
+ case RT5682_CLK_SEL_I2S1_ASRC:
+ case RT5682_CLK_SEL_I2S2_ASRC:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (filter_mask & RT5682_DA_STEREO1_FILTER) {
+ snd_soc_component_update_bits(component, RT5682_PLL_TRACK_2,
+ RT5682_FILTER_CLK_SEL_MASK,
+ clk_src << RT5682_FILTER_CLK_SEL_SFT);
+ }
+
+ if (filter_mask & RT5682_AD_STEREO1_FILTER) {
+ snd_soc_component_update_bits(component, RT5682_PLL_TRACK_3,
+ RT5682_FILTER_CLK_SEL_MASK,
+ clk_src << RT5682_FILTER_CLK_SEL_SFT);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5682_sel_asrc_clk_src);
+
+static int rt5682_button_detect(struct snd_soc_component *component)
+{
+ int btn_type, val;
+
+ val = snd_soc_component_read32(component, RT5682_4BTN_IL_CMD_1);
+ btn_type = val & 0xfff0;
+ snd_soc_component_write(component, RT5682_4BTN_IL_CMD_1, val);
+ pr_debug("%s btn_type=%x\n", __func__, btn_type);
+ snd_soc_component_update_bits(component,
+ RT5682_SAR_IL_CMD_2, 0x10, 0x10);
+
+ return btn_type;
+}
+
+static void rt5682_enable_push_button_irq(struct snd_soc_component *component,
+ bool enable)
+{
+ if (enable) {
+ snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
+ RT5682_SAR_BUTT_DET_MASK, RT5682_SAR_BUTT_DET_EN);
+ snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_13,
+ RT5682_SAR_SOUR_MASK, RT5682_SAR_SOUR_BTN);
+ snd_soc_component_write(component, RT5682_IL_CMD_1, 0x0040);
+ snd_soc_component_update_bits(component, RT5682_4BTN_IL_CMD_2,
+ RT5682_4BTN_IL_MASK | RT5682_4BTN_IL_RST_MASK,
+ RT5682_4BTN_IL_EN | RT5682_4BTN_IL_NOR);
+ snd_soc_component_update_bits(component, RT5682_IRQ_CTRL_3,
+ RT5682_IL_IRQ_MASK, RT5682_IL_IRQ_EN);
+ } else {
+ snd_soc_component_update_bits(component, RT5682_IRQ_CTRL_3,
+ RT5682_IL_IRQ_MASK, RT5682_IL_IRQ_DIS);
+ snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
+ RT5682_SAR_BUTT_DET_MASK, RT5682_SAR_BUTT_DET_DIS);
+ snd_soc_component_update_bits(component, RT5682_4BTN_IL_CMD_2,
+ RT5682_4BTN_IL_MASK, RT5682_4BTN_IL_DIS);
+ snd_soc_component_update_bits(component, RT5682_4BTN_IL_CMD_2,
+ RT5682_4BTN_IL_RST_MASK, RT5682_4BTN_IL_RST);
+ snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_13,
+ RT5682_SAR_SOUR_MASK, RT5682_SAR_SOUR_TYPE);
+ }
+}
+
+/**
+ * rt5682_headset_detect - Detect headset.
+ * @component: SoC audio component device.
+ * @jack_insert: Jack insert or not.
+ *
+ * Detect whether is headset or not when jack inserted.
+ *
+ * Returns detect status.
+ */
+static int rt5682_headset_detect(struct snd_soc_component *component,
+ int jack_insert)
+{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ unsigned int val, count;
+
+ if (jack_insert) {
+ snd_soc_dapm_force_enable_pin(dapm, "CBJ Power");
+ snd_soc_dapm_sync(dapm);
+ snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
+ RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
+
+ count = 0;
+ val = snd_soc_component_read32(component, RT5682_CBJ_CTRL_2)
+ & RT5682_JACK_TYPE_MASK;
+ while (val == 0 && count < 50) {
+ usleep_range(10000, 15000);
+ val = snd_soc_component_read32(component,
+ RT5682_CBJ_CTRL_2) & RT5682_JACK_TYPE_MASK;
+ count++;
+ }
+
+ switch (val) {
+ case 0x1:
+ case 0x2:
+ rt5682->jack_type = SND_JACK_HEADSET;
+ rt5682_enable_push_button_irq(component, true);
+ break;
+ default:
+ rt5682->jack_type = SND_JACK_HEADPHONE;
+ }
+
+ } else {
+ rt5682_enable_push_button_irq(component, false);
+ snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
+ RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
+ snd_soc_dapm_disable_pin(dapm, "CBJ Power");
+ snd_soc_dapm_sync(dapm);
+
+ rt5682->jack_type = 0;
+ }
+
+ dev_dbg(component->dev, "jack_type = %d\n", rt5682->jack_type);
+ return rt5682->jack_type;
+}
+
+static irqreturn_t rt5682_irq(int irq, void *data)
+{
+ struct rt5682_priv *rt5682 = data;
+
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, msecs_to_jiffies(250));
+
+ return IRQ_HANDLED;
+}
+
+static void rt5682_jd_check_handler(struct work_struct *work)
+{
+ struct rt5682_priv *rt5682 = container_of(work, struct rt5682_priv,
+ jd_check_work.work);
+
+ if (snd_soc_component_read32(rt5682->component, RT5682_AJD1_CTRL)
+ & RT5682_JDH_RS_MASK) {
+ /* jack out */
+ rt5682->jack_type = rt5682_headset_detect(rt5682->component, 0);
+
+ snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type,
+ SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
+ } else {
+ schedule_delayed_work(&rt5682->jd_check_work, 500);
+ }
+}
+
+static int rt5682_set_jack_detect(struct snd_soc_component *component,
+ struct snd_soc_jack *hs_jack, void *data)
+{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+
+ switch (rt5682->pdata.jd_src) {
+ case RT5682_JD1:
+ snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_2,
+ RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
+ snd_soc_component_write(component, RT5682_CBJ_CTRL_1, 0xd042);
+ snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_3,
+ RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
+ snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
+ RT5682_SAR_POW_MASK, RT5682_SAR_POW_EN);
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP1_PIN_MASK, RT5682_GP1_PIN_IRQ);
+ regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+ RT5682_POW_IRQ | RT5682_POW_JDH |
+ RT5682_POW_ANA, RT5682_POW_IRQ |
+ RT5682_POW_JDH | RT5682_POW_ANA);
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_2,
+ RT5682_PWR_JDH | RT5682_PWR_JDL,
+ RT5682_PWR_JDH | RT5682_PWR_JDL);
+ regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+ RT5682_JD1_EN_MASK | RT5682_JD1_POL_MASK,
+ RT5682_JD1_EN | RT5682_JD1_POL_NOR);
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, msecs_to_jiffies(250));
+ break;
+
+ case RT5682_JD_NULL:
+ regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+ RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
+ regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+ RT5682_POW_JDH | RT5682_POW_JDL, 0);
+ break;
+
+ default:
+ dev_warn(component->dev, "Wrong JD source\n");
+ break;
+ }
+
+ rt5682->hs_jack = hs_jack;
+
+ return 0;
+}
+
+static void rt5682_jack_detect_handler(struct work_struct *work)
+{
+ struct rt5682_priv *rt5682 =
+ container_of(work, struct rt5682_priv, jack_detect_work.work);
+ int val, btn_type;
+
+ while (!rt5682->component)
+ usleep_range(10000, 15000);
+
+ while (!rt5682->component->card->instantiated)
+ usleep_range(10000, 15000);
+
+ mutex_lock(&rt5682->calibrate_mutex);
+
+ val = snd_soc_component_read32(rt5682->component, RT5682_AJD1_CTRL)
+ & RT5682_JDH_RS_MASK;
+ if (!val) {
+ /* jack in */
+ if (rt5682->jack_type == 0) {
+ /* jack was out, report jack type */
+ rt5682->jack_type =
+ rt5682_headset_detect(rt5682->component, 1);
+ } else {
+ /* jack is already in, report button event */
+ rt5682->jack_type = SND_JACK_HEADSET;
+ btn_type = rt5682_button_detect(rt5682->component);
+ /**
+ * rt5682 can report three kinds of button behavior,
+ * one click, double click and hold. However,
+ * currently we will report button pressed/released
+ * event. So all the three button behaviors are
+ * treated as button pressed.
+ */
+ switch (btn_type) {
+ case 0x8000:
+ case 0x4000:
+ case 0x2000:
+ rt5682->jack_type |= SND_JACK_BTN_0;
+ break;
+ case 0x1000:
+ case 0x0800:
+ case 0x0400:
+ rt5682->jack_type |= SND_JACK_BTN_1;
+ break;
+ case 0x0200:
+ case 0x0100:
+ case 0x0080:
+ rt5682->jack_type |= SND_JACK_BTN_2;
+ break;
+ case 0x0040:
+ case 0x0020:
+ case 0x0010:
+ rt5682->jack_type |= SND_JACK_BTN_3;
+ break;
+ case 0x0000: /* unpressed */
+ break;
+ default:
+ btn_type = 0;
+ dev_err(rt5682->component->dev,
+ "Unexpected button code 0x%04x\n",
+ btn_type);
+ break;
+ }
+ }
+ } else {
+ /* jack out */
+ rt5682->jack_type = rt5682_headset_detect(rt5682->component, 0);
+ }
+
+ snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type,
+ SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
+
+ if (rt5682->jack_type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3))
+ schedule_delayed_work(&rt5682->jd_check_work, 0);
+ else
+ cancel_delayed_work_sync(&rt5682->jd_check_work);
+
+ mutex_unlock(&rt5682->calibrate_mutex);
+}
+
+static const struct snd_kcontrol_new rt5682_snd_controls[] = {
+ /* Headphone Output Volume */
+ SOC_DOUBLE_R_TLV("Headphone Playback Volume", RT5682_HPL_GAIN,
+ RT5682_HPR_GAIN, RT5682_G_HP_SFT, 15, 1, hp_vol_tlv),
+
+ /* DAC Digital Volume */
+ SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL,
+ RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv),
+
+ /* IN Boost Volume */
+ SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL,
+ RT5682_BST_CBJ_SFT, 8, 0, bst_tlv),
+
+ /* ADC Digital Volume Control */
+ SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL,
+ RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1),
+ SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL,
+ RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv),
+
+ /* ADC Boost Volume Control */
+ SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST,
+ RT5682_STO1_ADC_L_BST_SFT, RT5682_STO1_ADC_R_BST_SFT,
+ 3, 0, adc_bst_tlv),
+};
+
+
+static int rt5682_div_sel(struct rt5682_priv *rt5682,
+ int target, const int div[], int size)
+{
+ int i;
+
+ if (rt5682->sysclk < target) {
+ pr_err("sysclk rate %d is too low\n",
+ rt5682->sysclk);
+ return 0;
+ }
+
+ for (i = 0; i < size - 1; i++) {
+ pr_info("div[%d]=%d\n", i, div[i]);
+ if (target * div[i] == rt5682->sysclk)
+ return i;
+ if (target * div[i + 1] > rt5682->sysclk) {
+ pr_err("can't find div for sysclk %d\n",
+ rt5682->sysclk);
+ return i;
+ }
+ }
+
+ if (target * div[i] < rt5682->sysclk)
+ pr_err("sysclk rate %d is too high\n",
+ rt5682->sysclk);
+
+ return size - 1;
+
+}
+
+/**
+ * set_dmic_clk - Set parameter of dmic.
+ *
+ * @w: DAPM widget.
+ * @kcontrol: The kcontrol of this widget.
+ * @event: Event id.
+ *
+ * Choose dmic clock between 1MHz and 3MHz.
+ * It is better for clock to approximate 3MHz.
+ */
+static int set_dmic_clk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ int idx = -EINVAL;
+ static const int div[] = {2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128};
+
+ idx = rt5682_div_sel(rt5682, 1500000, div, ARRAY_SIZE(div));
+
+ snd_soc_component_update_bits(component, RT5682_DMIC_CTRL_1,
+ RT5682_DMIC_CLK_MASK, idx << RT5682_DMIC_CLK_SFT);
+
+ return 0;
+}
+
+static int set_filter_clk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ int ref, val, reg, sft, mask, idx = -EINVAL;
+ static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48};
+ static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48};
+
+ val = snd_soc_component_read32(component, RT5682_GPIO_CTRL_1) &
+ RT5682_GP4_PIN_MASK;
+ if (w->shift == RT5682_PWR_ADC_S1F_BIT &&
+ val == RT5682_GP4_PIN_ADCDAT2)
+ ref = 256 * rt5682->lrck[RT5682_AIF2];
+ else
+ ref = 256 * rt5682->lrck[RT5682_AIF1];
+
+ idx = rt5682_div_sel(rt5682, ref, div_f, ARRAY_SIZE(div_f));
+
+ if (w->shift == RT5682_PWR_ADC_S1F_BIT) {
+ reg = RT5682_PLL_TRACK_3;
+ sft = RT5682_ADC_OSR_SFT;
+ mask = RT5682_ADC_OSR_MASK;
+ } else {
+ reg = RT5682_PLL_TRACK_2;
+ sft = RT5682_DAC_OSR_SFT;
+ mask = RT5682_DAC_OSR_MASK;
+ }
+
+ snd_soc_component_update_bits(component, reg,
+ RT5682_FILTER_CLK_DIV_MASK, idx << RT5682_FILTER_CLK_DIV_SFT);
+
+ /* select over sample rate */
+ for (idx = 0; idx < ARRAY_SIZE(div_o); idx++) {
+ if (rt5682->sysclk <= 12288000 * div_o[idx])
+ break;
+ }
+
+ snd_soc_component_update_bits(component, RT5682_ADDA_CLK_1,
+ mask, idx << sft);
+
+ return 0;
+}
+
+static int is_sys_clk_from_pll1(struct snd_soc_dapm_widget *w,
+ struct snd_soc_dapm_widget *sink)
+{
+ unsigned int val;
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ val = snd_soc_component_read32(component, RT5682_GLB_CLK);
+ val &= RT5682_SCLK_SRC_MASK;
+ if (val == RT5682_SCLK_SRC_PLL1)
+ return 1;
+ else
+ return 0;
+}
+
+static int is_using_asrc(struct snd_soc_dapm_widget *w,
+ struct snd_soc_dapm_widget *sink)
+{
+ unsigned int reg, shift, val;
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ switch (w->shift) {
+ case RT5682_ADC_STO1_ASRC_SFT:
+ reg = RT5682_PLL_TRACK_3;
+ shift = RT5682_FILTER_CLK_SEL_SFT;
+ break;
+ case RT5682_DAC_STO1_ASRC_SFT:
+ reg = RT5682_PLL_TRACK_2;
+ shift = RT5682_FILTER_CLK_SEL_SFT;
+ break;
+ default:
+ return 0;
+ }
+
+ val = (snd_soc_component_read32(component, reg) >> shift) & 0xf;
+ switch (val) {
+ case RT5682_CLK_SEL_I2S1_ASRC:
+ case RT5682_CLK_SEL_I2S2_ASRC:
+ return 1;
+ default:
+ return 0;
+ }
+
+}
+
+/* Digital Mixer */
+static const struct snd_kcontrol_new rt5682_sto1_adc_l_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5682_STO1_ADC_MIXER,
+ RT5682_M_STO1_ADC_L1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5682_STO1_ADC_MIXER,
+ RT5682_M_STO1_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5682_sto1_adc_r_mix[] = {
+ SOC_DAPM_SINGLE("ADC1 Switch", RT5682_STO1_ADC_MIXER,
+ RT5682_M_STO1_ADC_R1_SFT, 1, 1),
+ SOC_DAPM_SINGLE("ADC2 Switch", RT5682_STO1_ADC_MIXER,
+ RT5682_M_STO1_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5682_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("Stereo ADC Switch", RT5682_AD_DA_MIXER,
+ RT5682_M_ADCMIX_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC1 Switch", RT5682_AD_DA_MIXER,
+ RT5682_M_DAC1_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5682_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("Stereo ADC Switch", RT5682_AD_DA_MIXER,
+ RT5682_M_ADCMIX_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC1 Switch", RT5682_AD_DA_MIXER,
+ RT5682_M_DAC1_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5682_sto1_dac_l_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5682_STO1_DAC_MIXER,
+ RT5682_M_DAC_L1_STO_L_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5682_STO1_DAC_MIXER,
+ RT5682_M_DAC_R1_STO_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5682_sto1_dac_r_mix[] = {
+ SOC_DAPM_SINGLE("DAC L1 Switch", RT5682_STO1_DAC_MIXER,
+ RT5682_M_DAC_L1_STO_R_SFT, 1, 1),
+ SOC_DAPM_SINGLE("DAC R1 Switch", RT5682_STO1_DAC_MIXER,
+ RT5682_M_DAC_R1_STO_R_SFT, 1, 1),
+};
+
+/* Analog Input Mixer */
+static const struct snd_kcontrol_new rt5682_rec1_l_mix[] = {
+ SOC_DAPM_SINGLE("CBJ Switch", RT5682_REC_MIXER,
+ RT5682_M_CBJ_RM1_L_SFT, 1, 1),
+};
+
+/* STO1 ADC1 Source */
+/* MX-26 [13] [5] */
+static const char * const rt5682_sto1_adc1_src[] = {
+ "DAC MIX", "ADC"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5682_sto1_adc1l_enum, RT5682_STO1_ADC_MIXER,
+ RT5682_STO1_ADC1L_SRC_SFT, rt5682_sto1_adc1_src);
+
+static const struct snd_kcontrol_new rt5682_sto1_adc1l_mux =
+ SOC_DAPM_ENUM("Stereo1 ADC1L Source", rt5682_sto1_adc1l_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5682_sto1_adc1r_enum, RT5682_STO1_ADC_MIXER,
+ RT5682_STO1_ADC1R_SRC_SFT, rt5682_sto1_adc1_src);
+
+static const struct snd_kcontrol_new rt5682_sto1_adc1r_mux =
+ SOC_DAPM_ENUM("Stereo1 ADC1L Source", rt5682_sto1_adc1r_enum);
+
+/* STO1 ADC Source */
+/* MX-26 [11:10] [3:2] */
+static const char * const rt5682_sto1_adc_src[] = {
+ "ADC1 L", "ADC1 R"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5682_sto1_adcl_enum, RT5682_STO1_ADC_MIXER,
+ RT5682_STO1_ADCL_SRC_SFT, rt5682_sto1_adc_src);
+
+static const struct snd_kcontrol_new rt5682_sto1_adcl_mux =
+ SOC_DAPM_ENUM("Stereo1 ADCL Source", rt5682_sto1_adcl_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5682_sto1_adcr_enum, RT5682_STO1_ADC_MIXER,
+ RT5682_STO1_ADCR_SRC_SFT, rt5682_sto1_adc_src);
+
+static const struct snd_kcontrol_new rt5682_sto1_adcr_mux =
+ SOC_DAPM_ENUM("Stereo1 ADCR Source", rt5682_sto1_adcr_enum);
+
+/* STO1 ADC2 Source */
+/* MX-26 [12] [4] */
+static const char * const rt5682_sto1_adc2_src[] = {
+ "DAC MIX", "DMIC"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5682_sto1_adc2l_enum, RT5682_STO1_ADC_MIXER,
+ RT5682_STO1_ADC2L_SRC_SFT, rt5682_sto1_adc2_src);
+
+static const struct snd_kcontrol_new rt5682_sto1_adc2l_mux =
+ SOC_DAPM_ENUM("Stereo1 ADC2L Source", rt5682_sto1_adc2l_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5682_sto1_adc2r_enum, RT5682_STO1_ADC_MIXER,
+ RT5682_STO1_ADC2R_SRC_SFT, rt5682_sto1_adc2_src);
+
+static const struct snd_kcontrol_new rt5682_sto1_adc2r_mux =
+ SOC_DAPM_ENUM("Stereo1 ADC2R Source", rt5682_sto1_adc2r_enum);
+
+/* MX-79 [6:4] I2S1 ADC data location */
+static const unsigned int rt5682_if1_adc_slot_values[] = {
+ 0,
+ 2,
+ 4,
+ 6,
+};
+
+static const char * const rt5682_if1_adc_slot_src[] = {
+ "Slot 0", "Slot 2", "Slot 4", "Slot 6"
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(rt5682_if1_adc_slot_enum,
+ RT5682_TDM_CTRL, RT5682_TDM_ADC_LCA_SFT, RT5682_TDM_ADC_LCA_MASK,
+ rt5682_if1_adc_slot_src, rt5682_if1_adc_slot_values);
+
+static const struct snd_kcontrol_new rt5682_if1_adc_slot_mux =
+ SOC_DAPM_ENUM("IF1 ADC Slot location", rt5682_if1_adc_slot_enum);
+
+/* Analog DAC L1 Source, Analog DAC R1 Source*/
+/* MX-2B [4], MX-2B [0]*/
+static const char * const rt5682_alg_dac1_src[] = {
+ "Stereo1 DAC Mixer", "DAC1"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5682_alg_dac_l1_enum, RT5682_A_DAC1_MUX,
+ RT5682_A_DACL1_SFT, rt5682_alg_dac1_src);
+
+static const struct snd_kcontrol_new rt5682_alg_dac_l1_mux =
+ SOC_DAPM_ENUM("Analog DAC L1 Source", rt5682_alg_dac_l1_enum);
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5682_alg_dac_r1_enum, RT5682_A_DAC1_MUX,
+ RT5682_A_DACR1_SFT, rt5682_alg_dac1_src);
+
+static const struct snd_kcontrol_new rt5682_alg_dac_r1_mux =
+ SOC_DAPM_ENUM("Analog DAC R1 Source", rt5682_alg_dac_r1_enum);
+
+/* Out Switch */
+static const struct snd_kcontrol_new hpol_switch =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5682_HP_CTRL_1,
+ RT5682_L_MUTE_SFT, 1, 1);
+static const struct snd_kcontrol_new hpor_switch =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5682_HP_CTRL_1,
+ RT5682_R_MUTE_SFT, 1, 1);
+
+static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_write(component,
+ RT5682_HP_LOGIC_CTRL_2, 0x0012);
+ snd_soc_component_write(component,
+ RT5682_HP_CTRL_2, 0x6000);
+ snd_soc_component_update_bits(component, RT5682_STO_NG2_CTRL_1,
+ RT5682_NG2_EN_MASK, RT5682_NG2_EN);
+ snd_soc_component_update_bits(component,
+ RT5682_DEPOP_1, 0x60, 0x60);
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_update_bits(component,
+ RT5682_DEPOP_1, 0x60, 0x0);
+ snd_soc_component_write(component,
+ RT5682_HP_CTRL_2, 0x0000);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+
+}
+
+static int set_dmic_power(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /*Add delay to avoid pop noise*/
+ msleep(150);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5655_set_verf(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ switch (w->shift) {
+ case RT5682_PWR_VREF1_BIT:
+ snd_soc_component_update_bits(component,
+ RT5682_PWR_ANLG_1, RT5682_PWR_FV1, 0);
+ break;
+
+ case RT5682_PWR_VREF2_BIT:
+ snd_soc_component_update_bits(component,
+ RT5682_PWR_ANLG_1, RT5682_PWR_FV2, 0);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(15000, 20000);
+ switch (w->shift) {
+ case RT5682_PWR_VREF1_BIT:
+ snd_soc_component_update_bits(component,
+ RT5682_PWR_ANLG_1, RT5682_PWR_FV1,
+ RT5682_PWR_FV1);
+ break;
+
+ case RT5682_PWR_VREF2_BIT:
+ snd_soc_component_update_bits(component,
+ RT5682_PWR_ANLG_1, RT5682_PWR_FV2,
+ RT5682_PWR_FV2);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static const unsigned int rt5682_adcdat_pin_values[] = {
+ 1,
+ 3,
+};
+
+static const char * const rt5682_adcdat_pin_select[] = {
+ "ADCDAT1",
+ "ADCDAT2",
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(rt5682_adcdat_pin_enum,
+ RT5682_GPIO_CTRL_1, RT5682_GP4_PIN_SFT, RT5682_GP4_PIN_MASK,
+ rt5682_adcdat_pin_select, rt5682_adcdat_pin_values);
+
+static const struct snd_kcontrol_new rt5682_adcdat_pin_ctrl =
+ SOC_DAPM_ENUM("ADCDAT", rt5682_adcdat_pin_enum);
+
+static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("LDO2", RT5682_PWR_ANLG_3, RT5682_PWR_LDO2_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL1", RT5682_PWR_ANLG_3, RT5682_PWR_PLL_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL2B", RT5682_PWR_ANLG_3, RT5682_PWR_PLL2B_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL2F", RT5682_PWR_ANLG_3, RT5682_PWR_PLL2F_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0,
+ rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0,
+ rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+
+ /* ASRC */
+ SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5682_PLL_TRACK_1,
+ RT5682_DAC_STO1_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC STO1 ASRC", 1, RT5682_PLL_TRACK_1,
+ RT5682_ADC_STO1_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("AD ASRC", 1, RT5682_PLL_TRACK_1,
+ RT5682_AD_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DA ASRC", 1, RT5682_PLL_TRACK_1,
+ RT5682_DA_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DMIC ASRC", 1, RT5682_PLL_TRACK_1,
+ RT5682_DMIC_ASRC_SFT, 0, NULL, 0),
+
+ /* Input Side */
+ SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5682_PWR_ANLG_2, RT5682_PWR_MB1_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5682_PWR_ANLG_2, RT5682_PWR_MB2_BIT,
+ 0, NULL, 0),
+
+ /* Input Lines */
+ SND_SOC_DAPM_INPUT("DMIC L1"),
+ SND_SOC_DAPM_INPUT("DMIC R1"),
+
+ SND_SOC_DAPM_INPUT("IN1P"),
+
+ SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0,
+ set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_SUPPLY("DMIC1 Power", RT5682_DMIC_CTRL_1,
+ RT5682_DMIC_1_EN_SFT, 0, set_dmic_power, SND_SOC_DAPM_POST_PMU),
+
+ /* Boost */
+ SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("CBJ Power", RT5682_PWR_ANLG_3,
+ RT5682_PWR_CBJ_BIT, 0, NULL, 0),
+
+ /* REC Mixer */
+ SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5682_rec1_l_mix,
+ ARRAY_SIZE(rt5682_rec1_l_mix)),
+ SND_SOC_DAPM_SUPPLY("RECMIX1L Power", RT5682_PWR_ANLG_2,
+ RT5682_PWR_RM1_L_BIT, 0, NULL, 0),
+
+ /* ADCs */
+ SND_SOC_DAPM_ADC("ADC1 L", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_ADC("ADC1 R", NULL, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_SUPPLY("ADC1 L Power", RT5682_PWR_DIG_1,
+ RT5682_PWR_ADC_L1_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC1 R Power", RT5682_PWR_DIG_1,
+ RT5682_PWR_ADC_R1_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC1 clock", RT5682_CHOP_ADC,
+ RT5682_CKGEN_ADC1_SFT, 0, NULL, 0),
+
+ /* ADC Mux */
+ SND_SOC_DAPM_MUX("Stereo1 ADC L1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_sto1_adc1l_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC R1 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_sto1_adc1r_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC L2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_sto1_adc2l_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC R2 Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_sto1_adc2r_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_sto1_adcl_mux),
+ SND_SOC_DAPM_MUX("Stereo1 ADC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_sto1_adcr_mux),
+ SND_SOC_DAPM_MUX("IF1_ADC Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_if1_adc_slot_mux),
+
+ /* ADC Mixer */
+ SND_SOC_DAPM_SUPPLY("ADC Stereo1 Filter", RT5682_PWR_DIG_2,
+ RT5682_PWR_ADC_S1F_BIT, 0, set_filter_clk,
+ SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_MIXER("Stereo1 ADC MIXL", RT5682_STO1_ADC_DIG_VOL,
+ RT5682_L_MUTE_SFT, 1, rt5682_sto1_adc_l_mix,
+ ARRAY_SIZE(rt5682_sto1_adc_l_mix)),
+ SND_SOC_DAPM_MIXER("Stereo1 ADC MIXR", RT5682_STO1_ADC_DIG_VOL,
+ RT5682_R_MUTE_SFT, 1, rt5682_sto1_adc_r_mix,
+ ARRAY_SIZE(rt5682_sto1_adc_r_mix)),
+ SND_SOC_DAPM_SUPPLY("BTN Detection Mode", RT5682_SAR_IL_CMD_1,
+ 14, 1, NULL, 0),
+
+ /* ADC PGA */
+ SND_SOC_DAPM_PGA("Stereo1 ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Digital Interface */
+ SND_SOC_DAPM_SUPPLY("I2S1", RT5682_PWR_DIG_1, RT5682_PWR_I2S1_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("I2S2", RT5682_PWR_DIG_1, RT5682_PWR_I2S2_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC1 L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("IF1 DAC1 R", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Digital Interface Select */
+ SND_SOC_DAPM_MUX("IF1 01 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_if1_01_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1 23 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_if1_23_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1 45 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_if1_45_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF1 67 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_if1_67_adc_swap_mux),
+ SND_SOC_DAPM_MUX("IF2 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_if2_adc_swap_mux),
+
+ SND_SOC_DAPM_MUX("ADCDAT Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_adcdat_pin_ctrl),
+
+ /* Audio Interface */
+ SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0,
+ RT5682_I2S1_SDP, RT5682_SEL_ADCDAT_SFT, 1),
+ SND_SOC_DAPM_AIF_OUT("AIF2TX", "AIF2 Capture", 0,
+ RT5682_I2S2_SDP, RT5682_I2S2_PIN_CFG_SFT, 1),
+ SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+
+ /* Output Side */
+ /* DAC mixer before sound effect */
+ SND_SOC_DAPM_MIXER("DAC1 MIXL", SND_SOC_NOPM, 0, 0,
+ rt5682_dac_l_mix, ARRAY_SIZE(rt5682_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("DAC1 MIXR", SND_SOC_NOPM, 0, 0,
+ rt5682_dac_r_mix, ARRAY_SIZE(rt5682_dac_r_mix)),
+
+ /* DAC channel Mux */
+ SND_SOC_DAPM_MUX("DAC L1 Source", SND_SOC_NOPM, 0, 0,
+ &rt5682_alg_dac_l1_mux),
+ SND_SOC_DAPM_MUX("DAC R1 Source", SND_SOC_NOPM, 0, 0,
+ &rt5682_alg_dac_r1_mux),
+
+ /* DAC Mixer */
+ SND_SOC_DAPM_SUPPLY("DAC Stereo1 Filter", RT5682_PWR_DIG_2,
+ RT5682_PWR_DAC_S1F_BIT, 0, set_filter_clk,
+ SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_MIXER("Stereo1 DAC MIXL", SND_SOC_NOPM, 0, 0,
+ rt5682_sto1_dac_l_mix, ARRAY_SIZE(rt5682_sto1_dac_l_mix)),
+ SND_SOC_DAPM_MIXER("Stereo1 DAC MIXR", SND_SOC_NOPM, 0, 0,
+ rt5682_sto1_dac_r_mix, ARRAY_SIZE(rt5682_sto1_dac_r_mix)),
+
+ /* DACs */
+ SND_SOC_DAPM_DAC("DAC L1", NULL, RT5682_PWR_DIG_1,
+ RT5682_PWR_DAC_L1_BIT, 0),
+ SND_SOC_DAPM_DAC("DAC R1", NULL, RT5682_PWR_DIG_1,
+ RT5682_PWR_DAC_R1_BIT, 0),
+ SND_SOC_DAPM_SUPPLY_S("DAC 1 Clock", 3, RT5682_CHOP_DAC,
+ RT5682_CKGEN_DAC1_SFT, 0, NULL, 0),
+
+ /* HPO */
+ SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5682_hp_event,
+ SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU),
+
+ SND_SOC_DAPM_SUPPLY("HP Amp L", RT5682_PWR_ANLG_1,
+ RT5682_PWR_HA_L_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("HP Amp R", RT5682_PWR_ANLG_1,
+ RT5682_PWR_HA_R_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("Charge Pump", 1, RT5682_DEPOP_1,
+ RT5682_PUMP_EN_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("Capless", 2, RT5682_DEPOP_1,
+ RT5682_CAPLESS_EN_SFT, 0, NULL, 0),
+
+ SND_SOC_DAPM_SWITCH("HPOL Playback", SND_SOC_NOPM, 0, 0,
+ &hpol_switch),
+ SND_SOC_DAPM_SWITCH("HPOR Playback", SND_SOC_NOPM, 0, 0,
+ &hpor_switch),
+
+ /* CLK DET */
+ SND_SOC_DAPM_SUPPLY("CLKDET SYS", RT5682_CLK_DET,
+ RT5682_SYS_CLK_DET_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CLKDET PLL1", RT5682_CLK_DET,
+ RT5682_PLL1_CLK_DET_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CLKDET PLL2", RT5682_CLK_DET,
+ RT5682_PLL2_CLK_DET_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CLKDET", RT5682_CLK_DET,
+ RT5682_POW_CLK_DET_SFT, 0, NULL, 0),
+
+ /* Output Lines */
+ SND_SOC_DAPM_OUTPUT("HPOL"),
+ SND_SOC_DAPM_OUTPUT("HPOR"),
+
+};
+
+static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
+ /*PLL*/
+ {"ADC Stereo1 Filter", NULL, "PLL1", is_sys_clk_from_pll1},
+ {"DAC Stereo1 Filter", NULL, "PLL1", is_sys_clk_from_pll1},
+
+ /*ASRC*/
+ {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
+ {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
+ {"ADC STO1 ASRC", NULL, "AD ASRC"},
+ {"ADC STO1 ASRC", NULL, "CLKDET"},
+ {"DAC STO1 ASRC", NULL, "DA ASRC"},
+ {"DAC STO1 ASRC", NULL, "CLKDET"},
+
+ /*Vref*/
+ {"MICBIAS1", NULL, "Vref1"},
+ {"MICBIAS1", NULL, "Vref2"},
+ {"MICBIAS2", NULL, "Vref1"},
+ {"MICBIAS2", NULL, "Vref2"},
+
+ {"CLKDET SYS", NULL, "CLKDET"},
+
+ {"IN1P", NULL, "LDO2"},
+
+ {"BST1 CBJ", NULL, "IN1P"},
+ {"BST1 CBJ", NULL, "CBJ Power"},
+ {"CBJ Power", NULL, "Vref2"},
+
+ {"RECMIX1L", "CBJ Switch", "BST1 CBJ"},
+ {"RECMIX1L", NULL, "RECMIX1L Power"},
+
+ {"ADC1 L", NULL, "RECMIX1L"},
+ {"ADC1 L", NULL, "ADC1 L Power"},
+ {"ADC1 L", NULL, "ADC1 clock"},
+
+ {"DMIC L1", NULL, "DMIC CLK"},
+ {"DMIC L1", NULL, "DMIC1 Power"},
+ {"DMIC R1", NULL, "DMIC CLK"},
+ {"DMIC R1", NULL, "DMIC1 Power"},
+ {"DMIC CLK", NULL, "DMIC ASRC"},
+
+ {"Stereo1 ADC L Mux", "ADC1 L", "ADC1 L"},
+ {"Stereo1 ADC L Mux", "ADC1 R", "ADC1 R"},
+ {"Stereo1 ADC R Mux", "ADC1 L", "ADC1 L"},
+ {"Stereo1 ADC R Mux", "ADC1 R", "ADC1 R"},
+
+ {"Stereo1 ADC L1 Mux", "ADC", "Stereo1 ADC L Mux"},
+ {"Stereo1 ADC L1 Mux", "DAC MIX", "Stereo1 DAC MIXL"},
+ {"Stereo1 ADC L2 Mux", "DMIC", "DMIC L1"},
+ {"Stereo1 ADC L2 Mux", "DAC MIX", "Stereo1 DAC MIXL"},
+
+ {"Stereo1 ADC R1 Mux", "ADC", "Stereo1 ADC R Mux"},
+ {"Stereo1 ADC R1 Mux", "DAC MIX", "Stereo1 DAC MIXR"},
+ {"Stereo1 ADC R2 Mux", "DMIC", "DMIC R1"},
+ {"Stereo1 ADC R2 Mux", "DAC MIX", "Stereo1 DAC MIXR"},
+
+ {"Stereo1 ADC MIXL", "ADC1 Switch", "Stereo1 ADC L1 Mux"},
+ {"Stereo1 ADC MIXL", "ADC2 Switch", "Stereo1 ADC L2 Mux"},
+ {"Stereo1 ADC MIXL", NULL, "ADC Stereo1 Filter"},
+
+ {"Stereo1 ADC MIXR", "ADC1 Switch", "Stereo1 ADC R1 Mux"},
+ {"Stereo1 ADC MIXR", "ADC2 Switch", "Stereo1 ADC R2 Mux"},
+ {"Stereo1 ADC MIXR", NULL, "ADC Stereo1 Filter"},
+
+ {"ADC Stereo1 Filter", NULL, "BTN Detection Mode"},
+
+ {"Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXL"},
+ {"Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXR"},
+
+ {"IF1 01 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"},
+ {"IF1 01 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"},
+ {"IF1 01 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"},
+ {"IF1 01 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"},
+ {"IF1 23 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"},
+ {"IF1 23 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"},
+ {"IF1 23 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"},
+ {"IF1 23 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"},
+ {"IF1 45 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"},
+ {"IF1 45 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"},
+ {"IF1 45 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"},
+ {"IF1 45 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"},
+ {"IF1 67 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"},
+ {"IF1 67 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"},
+ {"IF1 67 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"},
+ {"IF1 67 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"},
+
+ {"IF1_ADC Mux", "Slot 0", "IF1 01 ADC Swap Mux"},
+ {"IF1_ADC Mux", "Slot 2", "IF1 23 ADC Swap Mux"},
+ {"IF1_ADC Mux", "Slot 4", "IF1 45 ADC Swap Mux"},
+ {"IF1_ADC Mux", "Slot 6", "IF1 67 ADC Swap Mux"},
+ {"IF1_ADC Mux", NULL, "I2S1"},
+ {"ADCDAT Mux", "ADCDAT1", "IF1_ADC Mux"},
+ {"AIF1TX", NULL, "ADCDAT Mux"},
+ {"IF2 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"},
+ {"IF2 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"},
+ {"IF2 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"},
+ {"IF2 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"},
+ {"ADCDAT Mux", "ADCDAT2", "IF2 ADC Swap Mux"},
+ {"AIF2TX", NULL, "ADCDAT Mux"},
+
+ {"IF1 DAC1 L", NULL, "AIF1RX"},
+ {"IF1 DAC1 L", NULL, "I2S1"},
+ {"IF1 DAC1 L", NULL, "DAC Stereo1 Filter"},
+ {"IF1 DAC1 R", NULL, "AIF1RX"},
+ {"IF1 DAC1 R", NULL, "I2S1"},
+ {"IF1 DAC1 R", NULL, "DAC Stereo1 Filter"},
+
+ {"DAC1 MIXL", "Stereo ADC Switch", "Stereo1 ADC MIXL"},
+ {"DAC1 MIXL", "DAC1 Switch", "IF1 DAC1 L"},
+ {"DAC1 MIXR", "Stereo ADC Switch", "Stereo1 ADC MIXR"},
+ {"DAC1 MIXR", "DAC1 Switch", "IF1 DAC1 R"},
+
+ {"Stereo1 DAC MIXL", "DAC L1 Switch", "DAC1 MIXL"},
+ {"Stereo1 DAC MIXL", "DAC R1 Switch", "DAC1 MIXR"},
+
+ {"Stereo1 DAC MIXR", "DAC R1 Switch", "DAC1 MIXR"},
+ {"Stereo1 DAC MIXR", "DAC L1 Switch", "DAC1 MIXL"},
+
+ {"DAC L1 Source", "DAC1", "DAC1 MIXL"},
+ {"DAC L1 Source", "Stereo1 DAC Mixer", "Stereo1 DAC MIXL"},
+ {"DAC R1 Source", "DAC1", "DAC1 MIXR"},
+ {"DAC R1 Source", "Stereo1 DAC Mixer", "Stereo1 DAC MIXR"},
+
+ {"DAC L1", NULL, "DAC L1 Source"},
+ {"DAC R1", NULL, "DAC R1 Source"},
+
+ {"DAC L1", NULL, "DAC 1 Clock"},
+ {"DAC R1", NULL, "DAC 1 Clock"},
+
+ {"HP Amp", NULL, "DAC L1"},
+ {"HP Amp", NULL, "DAC R1"},
+ {"HP Amp", NULL, "HP Amp L"},
+ {"HP Amp", NULL, "HP Amp R"},
+ {"HP Amp", NULL, "Capless"},
+ {"HP Amp", NULL, "Charge Pump"},
+ {"HP Amp", NULL, "CLKDET SYS"},
+ {"HP Amp", NULL, "CBJ Power"},
+ {"HP Amp", NULL, "Vref2"},
+ {"HPOL Playback", "Switch", "HP Amp"},
+ {"HPOR Playback", "Switch", "HP Amp"},
+ {"HPOL", NULL, "HPOL Playback"},
+ {"HPOR", NULL, "HPOR Playback"},
+};
+
+static int rt5682_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ unsigned int cl, val = 0;
+
+ if (tx_mask || rx_mask)
+ snd_soc_component_update_bits(component, RT5682_TDM_ADDA_CTRL_2,
+ RT5682_TDM_EN, RT5682_TDM_EN);
+ else
+ snd_soc_component_update_bits(component, RT5682_TDM_ADDA_CTRL_2,
+ RT5682_TDM_EN, 0);
+
+ switch (slots) {
+ case 4:
+ val |= RT5682_TDM_TX_CH_4;
+ val |= RT5682_TDM_RX_CH_4;
+ break;
+ case 6:
+ val |= RT5682_TDM_TX_CH_6;
+ val |= RT5682_TDM_RX_CH_6;
+ break;
+ case 8:
+ val |= RT5682_TDM_TX_CH_8;
+ val |= RT5682_TDM_RX_CH_8;
+ break;
+ case 2:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, RT5682_TDM_CTRL,
+ RT5682_TDM_TX_CH_MASK | RT5682_TDM_RX_CH_MASK, val);
+
+ switch (slot_width) {
+ case 8:
+ if (tx_mask || rx_mask)
+ return -EINVAL;
+ cl = RT5682_I2S1_TX_CHL_8 | RT5682_I2S1_RX_CHL_8;
+ break;
+ case 16:
+ val = RT5682_TDM_CL_16;
+ cl = RT5682_I2S1_TX_CHL_16 | RT5682_I2S1_RX_CHL_16;
+ break;
+ case 20:
+ val = RT5682_TDM_CL_20;
+ cl = RT5682_I2S1_TX_CHL_20 | RT5682_I2S1_RX_CHL_20;
+ break;
+ case 24:
+ val = RT5682_TDM_CL_24;
+ cl = RT5682_I2S1_TX_CHL_24 | RT5682_I2S1_RX_CHL_24;
+ break;
+ case 32:
+ val = RT5682_TDM_CL_32;
+ cl = RT5682_I2S1_TX_CHL_32 | RT5682_I2S1_RX_CHL_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, RT5682_TDM_TCON_CTRL,
+ RT5682_TDM_CL_MASK, val);
+ snd_soc_component_update_bits(component, RT5682_I2S1_SDP,
+ RT5682_I2S1_TX_CHL_MASK | RT5682_I2S1_RX_CHL_MASK, cl);
+
+ return 0;
+}
+
+
+static int rt5682_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ unsigned int len_1 = 0, len_2 = 0;
+ int pre_div, frame_size;
+
+ rt5682->lrck[dai->id] = params_rate(params);
+ pre_div = rl6231_get_clk_info(rt5682->sysclk, rt5682->lrck[dai->id]);
+
+ frame_size = snd_soc_params_to_frame_size(params);
+ if (frame_size < 0) {
+ dev_err(component->dev, "Unsupported frame size: %d\n",
+ frame_size);
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->dev, "lrck is %dHz and pre_div is %d for iis %d\n",
+ rt5682->lrck[dai->id], pre_div, dai->id);
+
+ switch (params_width(params)) {
+ case 16:
+ break;
+ case 20:
+ len_1 |= RT5682_I2S1_DL_20;
+ len_2 |= RT5682_I2S2_DL_20;
+ break;
+ case 24:
+ len_1 |= RT5682_I2S1_DL_24;
+ len_2 |= RT5682_I2S2_DL_24;
+ break;
+ case 32:
+ len_1 |= RT5682_I2S1_DL_32;
+ len_2 |= RT5682_I2S2_DL_24;
+ break;
+ case 8:
+ len_1 |= RT5682_I2S2_DL_8;
+ len_2 |= RT5682_I2S2_DL_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dai->id) {
+ case RT5682_AIF1:
+ snd_soc_component_update_bits(component, RT5682_I2S1_SDP,
+ RT5682_I2S1_DL_MASK, len_1);
+ if (rt5682->master[RT5682_AIF1]) {
+ snd_soc_component_update_bits(component,
+ RT5682_ADDA_CLK_1, RT5682_I2S_M_DIV_MASK,
+ pre_div << RT5682_I2S_M_DIV_SFT);
+ }
+ if (params_channels(params) == 1) /* mono mode */
+ snd_soc_component_update_bits(component,
+ RT5682_I2S1_SDP, RT5682_I2S1_MONO_MASK,
+ RT5682_I2S1_MONO_EN);
+ else
+ snd_soc_component_update_bits(component,
+ RT5682_I2S1_SDP, RT5682_I2S1_MONO_MASK,
+ RT5682_I2S1_MONO_DIS);
+ break;
+ case RT5682_AIF2:
+ snd_soc_component_update_bits(component, RT5682_I2S2_SDP,
+ RT5682_I2S2_DL_MASK, len_2);
+ if (rt5682->master[RT5682_AIF2]) {
+ snd_soc_component_update_bits(component,
+ RT5682_I2S_M_CLK_CTRL_1, RT5682_I2S2_M_PD_MASK,
+ pre_div << RT5682_I2S2_M_PD_SFT);
+ }
+ if (params_channels(params) == 1) /* mono mode */
+ snd_soc_component_update_bits(component,
+ RT5682_I2S2_SDP, RT5682_I2S2_MONO_MASK,
+ RT5682_I2S2_MONO_EN);
+ else
+ snd_soc_component_update_bits(component,
+ RT5682_I2S2_SDP, RT5682_I2S2_MONO_MASK,
+ RT5682_I2S2_MONO_DIS);
+ break;
+ default:
+ dev_err(component->dev, "Invalid dai->id: %d\n", dai->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rt5682_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ unsigned int reg_val = 0, tdm_ctrl = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ rt5682->master[dai->id] = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ rt5682->master[dai->id] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ reg_val |= RT5682_I2S_BP_INV;
+ tdm_ctrl |= RT5682_TDM_S_BP_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ if (dai->id == RT5682_AIF1)
+ tdm_ctrl |= RT5682_TDM_S_LP_INV | RT5682_TDM_M_BP_INV;
+ else
+ return -EINVAL;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ if (dai->id == RT5682_AIF1)
+ tdm_ctrl |= RT5682_TDM_S_BP_INV | RT5682_TDM_S_LP_INV |
+ RT5682_TDM_M_BP_INV | RT5682_TDM_M_LP_INV;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ reg_val |= RT5682_I2S_DF_LEFT;
+ tdm_ctrl |= RT5682_TDM_DF_LEFT;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ reg_val |= RT5682_I2S_DF_PCM_A;
+ tdm_ctrl |= RT5682_TDM_DF_PCM_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ reg_val |= RT5682_I2S_DF_PCM_B;
+ tdm_ctrl |= RT5682_TDM_DF_PCM_B;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dai->id) {
+ case RT5682_AIF1:
+ snd_soc_component_update_bits(component, RT5682_I2S1_SDP,
+ RT5682_I2S_DF_MASK, reg_val);
+ snd_soc_component_update_bits(component, RT5682_TDM_TCON_CTRL,
+ RT5682_TDM_MS_MASK | RT5682_TDM_S_BP_MASK |
+ RT5682_TDM_DF_MASK | RT5682_TDM_M_BP_MASK |
+ RT5682_TDM_M_LP_MASK | RT5682_TDM_S_LP_MASK,
+ tdm_ctrl | rt5682->master[dai->id]);
+ break;
+ case RT5682_AIF2:
+ if (rt5682->master[dai->id] == 0)
+ reg_val |= RT5682_I2S2_MS_S;
+ snd_soc_component_update_bits(component, RT5682_I2S2_SDP,
+ RT5682_I2S2_MS_MASK | RT5682_I2S_BP_MASK |
+ RT5682_I2S_DF_MASK, reg_val);
+ break;
+ default:
+ dev_err(component->dev, "Invalid dai->id: %d\n", dai->id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int rt5682_set_component_sysclk(struct snd_soc_component *component,
+ int clk_id, int source, unsigned int freq, int dir)
+{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ unsigned int reg_val = 0, src = 0;
+
+ if (freq == rt5682->sysclk && clk_id == rt5682->sysclk_src)
+ return 0;
+
+ switch (clk_id) {
+ case RT5682_SCLK_S_MCLK:
+ reg_val |= RT5682_SCLK_SRC_MCLK;
+ src = RT5682_CLK_SRC_MCLK;
+ break;
+ case RT5682_SCLK_S_PLL1:
+ reg_val |= RT5682_SCLK_SRC_PLL1;
+ src = RT5682_CLK_SRC_PLL1;
+ break;
+ case RT5682_SCLK_S_PLL2:
+ reg_val |= RT5682_SCLK_SRC_PLL2;
+ src = RT5682_CLK_SRC_PLL2;
+ break;
+ case RT5682_SCLK_S_RCCLK:
+ reg_val |= RT5682_SCLK_SRC_RCCLK;
+ src = RT5682_CLK_SRC_RCCLK;
+ break;
+ default:
+ dev_err(component->dev, "Invalid clock id (%d)\n", clk_id);
+ return -EINVAL;
+ }
+ snd_soc_component_update_bits(component, RT5682_GLB_CLK,
+ RT5682_SCLK_SRC_MASK, reg_val);
+
+ if (rt5682->master[RT5682_AIF2]) {
+ snd_soc_component_update_bits(component,
+ RT5682_I2S_M_CLK_CTRL_1, RT5682_I2S2_SRC_MASK,
+ src << RT5682_I2S2_SRC_SFT);
+ }
+
+ rt5682->sysclk = freq;
+ rt5682->sysclk_src = clk_id;
+
+ dev_dbg(component->dev, "Sysclk is %dHz and clock id is %d\n",
+ freq, clk_id);
+
+ return 0;
+}
+
+static int rt5682_set_component_pll(struct snd_soc_component *component,
+ int pll_id, int source, unsigned int freq_in,
+ unsigned int freq_out)
+{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ struct rl6231_pll_code pll_code;
+ int ret;
+
+ if (source == rt5682->pll_src && freq_in == rt5682->pll_in &&
+ freq_out == rt5682->pll_out)
+ return 0;
+
+ if (!freq_in || !freq_out) {
+ dev_dbg(component->dev, "PLL disabled\n");
+
+ rt5682->pll_in = 0;
+ rt5682->pll_out = 0;
+ snd_soc_component_update_bits(component, RT5682_GLB_CLK,
+ RT5682_SCLK_SRC_MASK, RT5682_SCLK_SRC_MCLK);
+ return 0;
+ }
+
+ switch (source) {
+ case RT5682_PLL1_S_MCLK:
+ snd_soc_component_update_bits(component, RT5682_GLB_CLK,
+ RT5682_PLL1_SRC_MASK, RT5682_PLL1_SRC_MCLK);
+ break;
+ case RT5682_PLL1_S_BCLK1:
+ snd_soc_component_update_bits(component, RT5682_GLB_CLK,
+ RT5682_PLL1_SRC_MASK, RT5682_PLL1_SRC_BCLK1);
+ break;
+ default:
+ dev_err(component->dev, "Unknown PLL Source %d\n", source);
+ return -EINVAL;
+ }
+
+ ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
+ if (ret < 0) {
+ dev_err(component->dev, "Unsupport input clock %d\n", freq_in);
+ return ret;
+ }
+
+ dev_dbg(component->dev, "bypass=%d m=%d n=%d k=%d\n",
+ pll_code.m_bp, (pll_code.m_bp ? 0 : pll_code.m_code),
+ pll_code.n_code, pll_code.k_code);
+
+ snd_soc_component_write(component, RT5682_PLL_CTRL_1,
+ pll_code.n_code << RT5682_PLL_N_SFT | pll_code.k_code);
+ snd_soc_component_write(component, RT5682_PLL_CTRL_2,
+ (pll_code.m_bp ? 0 : pll_code.m_code) << RT5682_PLL_M_SFT |
+ pll_code.m_bp << RT5682_PLL_M_BP_SFT | RT5682_PLL_RST);
+
+ rt5682->pll_in = freq_in;
+ rt5682->pll_out = freq_out;
+ rt5682->pll_src = source;
+
+ return 0;
+}
+
+static int rt5682_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+
+ rt5682->bclk[dai->id] = ratio;
+
+ switch (ratio) {
+ case 64:
+ snd_soc_component_update_bits(component, RT5682_ADDA_CLK_2,
+ RT5682_I2S2_BCLK_MS2_MASK,
+ RT5682_I2S2_BCLK_MS2_64);
+ break;
+ case 32:
+ snd_soc_component_update_bits(component, RT5682_ADDA_CLK_2,
+ RT5682_I2S2_BCLK_MS2_MASK,
+ RT5682_I2S2_BCLK_MS2_32);
+ break;
+ default:
+ dev_err(dai->dev, "Invalid bclk ratio %d\n", ratio);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rt5682_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
+ RT5682_PWR_MB | RT5682_PWR_BG,
+ RT5682_PWR_MB | RT5682_PWR_BG);
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
+ RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO,
+ RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
+ RT5682_PWR_MB, RT5682_PWR_MB);
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
+ RT5682_DIG_GATE_CTRL, RT5682_DIG_GATE_CTRL);
+ break;
+ case SND_SOC_BIAS_OFF:
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
+ RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO, 0);
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
+ RT5682_PWR_MB | RT5682_PWR_BG, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int rt5682_probe(struct snd_soc_component *component)
+{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+
+ rt5682->component = component;
+
+ return 0;
+}
+
+static void rt5682_remove(struct snd_soc_component *component)
+{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+
+ rt5682_reset(rt5682->regmap);
+}
+
+#ifdef CONFIG_PM
+static int rt5682_suspend(struct snd_soc_component *component)
+{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+
+ regcache_cache_only(rt5682->regmap, true);
+ regcache_mark_dirty(rt5682->regmap);
+ return 0;
+}
+
+static int rt5682_resume(struct snd_soc_component *component)
+{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+
+ regcache_cache_only(rt5682->regmap, false);
+ regcache_sync(rt5682->regmap);
+
+ return 0;
+}
+#else
+#define rt5682_suspend NULL
+#define rt5682_resume NULL
+#endif
+
+#define RT5682_STEREO_RATES SNDRV_PCM_RATE_8000_192000
+#define RT5682_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+static const struct snd_soc_dai_ops rt5682_aif1_dai_ops = {
+ .hw_params = rt5682_hw_params,
+ .set_fmt = rt5682_set_dai_fmt,
+ .set_tdm_slot = rt5682_set_tdm_slot,
+};
+
+static const struct snd_soc_dai_ops rt5682_aif2_dai_ops = {
+ .hw_params = rt5682_hw_params,
+ .set_fmt = rt5682_set_dai_fmt,
+ .set_bclk_ratio = rt5682_set_bclk_ratio,
+};
+
+static struct snd_soc_dai_driver rt5682_dai[] = {
+ {
+ .name = "rt5682-aif1",
+ .id = RT5682_AIF1,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .ops = &rt5682_aif1_dai_ops,
+ },
+ {
+ .name = "rt5682-aif2",
+ .id = RT5682_AIF2,
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .ops = &rt5682_aif2_dai_ops,
+ },
+};
+
+static const struct snd_soc_component_driver soc_component_dev_rt5682 = {
+ .probe = rt5682_probe,
+ .remove = rt5682_remove,
+ .suspend = rt5682_suspend,
+ .resume = rt5682_resume,
+ .set_bias_level = rt5682_set_bias_level,
+ .controls = rt5682_snd_controls,
+ .num_controls = ARRAY_SIZE(rt5682_snd_controls),
+ .dapm_widgets = rt5682_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5682_dapm_widgets),
+ .dapm_routes = rt5682_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt5682_dapm_routes),
+ .set_sysclk = rt5682_set_component_sysclk,
+ .set_pll = rt5682_set_component_pll,
+ .set_jack = rt5682_set_jack_detect,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct regmap_config rt5682_regmap = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .max_register = RT5682_I2C_MODE,
+ .volatile_reg = rt5682_volatile_register,
+ .readable_reg = rt5682_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = rt5682_reg,
+ .num_reg_defaults = ARRAY_SIZE(rt5682_reg),
+ .use_single_rw = true,
+};
+
+static const struct i2c_device_id rt5682_i2c_id[] = {
+ {"rt5682", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, rt5682_i2c_id);
+
+static int rt5682_parse_dt(struct rt5682_priv *rt5682, struct device *dev)
+{
+
+ device_property_read_u32(dev, "realtek,dmic1-data-pin",
+ &rt5682->pdata.dmic1_data_pin);
+ device_property_read_u32(dev, "realtek,dmic1-clk-pin",
+ &rt5682->pdata.dmic1_clk_pin);
+ device_property_read_u32(dev, "realtek,jd-src",
+ &rt5682->pdata.jd_src);
+
+ rt5682->pdata.ldo1_en = of_get_named_gpio(dev->of_node,
+ "realtek,ldo1-en-gpios", 0);
+
+ return 0;
+}
+
+static void rt5682_calibrate(struct rt5682_priv *rt5682)
+{
+ int value, count;
+
+ mutex_lock(&rt5682->calibrate_mutex);
+
+ rt5682_reset(rt5682->regmap);
+ regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xa2bf);
+ usleep_range(15000, 20000);
+ regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xf2bf);
+ regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0380);
+ regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x8001);
+ regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
+ regmap_write(rt5682->regmap, RT5682_STO1_DAC_MIXER, 0x2080);
+ regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0x4040);
+ regmap_write(rt5682->regmap, RT5682_DEPOP_1, 0x0069);
+ regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x3000);
+ regmap_write(rt5682->regmap, RT5682_HP_CTRL_2, 0x6000);
+ regmap_write(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1, 0x0f26);
+ regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x7f05);
+ regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0x686c);
+ regmap_write(rt5682->regmap, RT5682_CAL_REC, 0x0d0d);
+ regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_9, 0x000f);
+ regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x8d01);
+ regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_2, 0x0321);
+ regmap_write(rt5682->regmap, RT5682_HP_LOGIC_CTRL_2, 0x0004);
+ regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_1, 0x7c00);
+ regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_3, 0x06a1);
+ regmap_write(rt5682->regmap, RT5682_A_DAC1_MUX, 0x0311);
+ regmap_write(rt5682->regmap, RT5682_RESET_HPF_CTRL, 0x0000);
+ regmap_write(rt5682->regmap, RT5682_ADC_STO1_HP_CTRL_1, 0x3320);
+
+ regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_1, 0xfc00);
+
+ for (count = 0; count < 60; count++) {
+ regmap_read(rt5682->regmap, RT5682_HP_CALIB_STA_1, &value);
+ if (!(value & 0x8000))
+ break;
+
+ usleep_range(10000, 10005);
+ }
+
+ if (count >= 60)
+ pr_err("HP Calibration Failure\n");
+
+ /* restore settings */
+ regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
+ regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000);
+
+ mutex_unlock(&rt5682->calibrate_mutex);
+
+}
+
+static int rt5682_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct rt5682_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ struct rt5682_priv *rt5682;
+ int i, ret;
+ unsigned int val;
+
+ rt5682 = devm_kzalloc(&i2c->dev, sizeof(struct rt5682_priv),
+ GFP_KERNEL);
+
+ if (rt5682 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, rt5682);
+
+ if (pdata)
+ rt5682->pdata = *pdata;
+ else
+ rt5682_parse_dt(rt5682, &i2c->dev);
+
+ rt5682->regmap = devm_regmap_init_i2c(i2c, &rt5682_regmap);
+ if (IS_ERR(rt5682->regmap)) {
+ ret = PTR_ERR(rt5682->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rt5682->supplies); i++)
+ rt5682->supplies[i].supply = rt5682_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5682->supplies),
+ rt5682->supplies);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(rt5682->supplies),
+ rt5682->supplies);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ if (gpio_is_valid(rt5682->pdata.ldo1_en)) {
+ if (devm_gpio_request_one(&i2c->dev, rt5682->pdata.ldo1_en,
+ GPIOF_OUT_INIT_HIGH, "rt5682"))
+ dev_err(&i2c->dev, "Fail gpio_request gpio_ldo\n");
+ }
+
+ /* Sleep for 300 ms miniumum */
+ usleep_range(300000, 350000);
+
+ regmap_write(rt5682->regmap, RT5682_I2C_MODE, 0x1);
+ usleep_range(10000, 15000);
+
+ regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
+ if (val != DEVICE_ID) {
+ pr_err("Device with ID register %x is not rt5682\n", val);
+ return -ENODEV;
+ }
+
+ rt5682_reset(rt5682->regmap);
+
+ rt5682_calibrate(rt5682);
+
+ ret = regmap_register_patch(rt5682->regmap, patch_list,
+ ARRAY_SIZE(patch_list));
+ if (ret != 0)
+ dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
+
+ regmap_write(rt5682->regmap, RT5682_DEPOP_1, 0x0000);
+
+ /* DMIC pin*/
+ if (rt5682->pdata.dmic1_data_pin != RT5682_DMIC1_NULL) {
+ switch (rt5682->pdata.dmic1_data_pin) {
+ case RT5682_DMIC1_DATA_GPIO2: /* share with LRCK2 */
+ regmap_update_bits(rt5682->regmap, RT5682_DMIC_CTRL_1,
+ RT5682_DMIC_1_DP_MASK, RT5682_DMIC_1_DP_GPIO2);
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP2_PIN_MASK, RT5682_GP2_PIN_DMIC_SDA);
+ break;
+
+ case RT5682_DMIC1_DATA_GPIO5: /* share with DACDAT1 */
+ regmap_update_bits(rt5682->regmap, RT5682_DMIC_CTRL_1,
+ RT5682_DMIC_1_DP_MASK, RT5682_DMIC_1_DP_GPIO5);
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP5_PIN_MASK, RT5682_GP5_PIN_DMIC_SDA);
+ break;
+
+ default:
+ dev_warn(&i2c->dev, "invalid DMIC_DAT pin\n");
+ break;
+ }
+
+ switch (rt5682->pdata.dmic1_clk_pin) {
+ case RT5682_DMIC1_CLK_GPIO1: /* share with IRQ */
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP1_PIN_MASK, RT5682_GP1_PIN_DMIC_CLK);
+ break;
+
+ case RT5682_DMIC1_CLK_GPIO3: /* share with BCLK2 */
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP3_PIN_MASK, RT5682_GP3_PIN_DMIC_CLK);
+ break;
+
+ default:
+ dev_warn(&i2c->dev, "invalid DMIC_CLK pin\n");
+ break;
+ }
+ }
+
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
+ RT5682_LDO1_DVO_MASK | RT5682_HP_DRIVER_MASK,
+ RT5682_LDO1_DVO_12 | RT5682_HP_DRIVER_5X);
+ regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0380);
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP4_PIN_MASK | RT5682_GP5_PIN_MASK,
+ RT5682_GP4_PIN_ADCDAT1 | RT5682_GP5_PIN_DACDAT1);
+ regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
+
+ INIT_DELAYED_WORK(&rt5682->jack_detect_work,
+ rt5682_jack_detect_handler);
+ INIT_DELAYED_WORK(&rt5682->jd_check_work,
+ rt5682_jd_check_handler);
+
+ mutex_init(&rt5682->calibrate_mutex);
+
+ if (i2c->irq) {
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ rt5682_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT, "rt5682", rt5682);
+ if (ret)
+ dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+
+ }
+
+ return devm_snd_soc_register_component(&i2c->dev,
+ &soc_component_dev_rt5682,
+ rt5682_dai, ARRAY_SIZE(rt5682_dai));
+}
+
+static void rt5682_i2c_shutdown(struct i2c_client *client)
+{
+ struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
+
+ rt5682_reset(rt5682->regmap);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rt5682_of_match[] = {
+ {.compatible = "realtek,rt5682i"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt5682_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id rt5682_acpi_match[] = {
+ {"10EC5682", 0,},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, rt5682_acpi_match);
+#endif
+
+static struct i2c_driver rt5682_i2c_driver = {
+ .driver = {
+ .name = "rt5682",
+ .of_match_table = of_match_ptr(rt5682_of_match),
+ .acpi_match_table = ACPI_PTR(rt5682_acpi_match),
+ },
+ .probe = rt5682_i2c_probe,
+ .shutdown = rt5682_i2c_shutdown,
+ .id_table = rt5682_i2c_id,
+};
+module_i2c_driver(rt5682_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT5682 driver");
+MODULE_AUTHOR("Bard Liao <bardliao@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
new file mode 100644
index 000000000000..8068140ebe3f
--- /dev/null
+++ b/sound/soc/codecs/rt5682.h
@@ -0,0 +1,1324 @@
+/*
+ * rt5682.h -- RT5682/RT5658 ALSA SoC audio driver
+ *
+ * Copyright 2018 Realtek Microelectronics
+ * Author: Bard Liao <bardliao@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT5682_H__
+#define __RT5682_H__
+
+#include <sound/rt5682.h>
+
+#define DEVICE_ID 0x6530
+
+/* Info */
+#define RT5682_RESET 0x0000
+#define RT5682_VERSION_ID 0x00fd
+#define RT5682_VENDOR_ID 0x00fe
+#define RT5682_DEVICE_ID 0x00ff
+/* I/O - Output */
+#define RT5682_HP_CTRL_1 0x0002
+#define RT5682_HP_CTRL_2 0x0003
+#define RT5682_HPL_GAIN 0x0005
+#define RT5682_HPR_GAIN 0x0006
+
+#define RT5682_I2C_CTRL 0x0008
+
+/* I/O - Input */
+#define RT5682_CBJ_BST_CTRL 0x000b
+#define RT5682_CBJ_CTRL_1 0x0010
+#define RT5682_CBJ_CTRL_2 0x0011
+#define RT5682_CBJ_CTRL_3 0x0012
+#define RT5682_CBJ_CTRL_4 0x0013
+#define RT5682_CBJ_CTRL_5 0x0014
+#define RT5682_CBJ_CTRL_6 0x0015
+#define RT5682_CBJ_CTRL_7 0x0016
+/* I/O - ADC/DAC/DMIC */
+#define RT5682_DAC1_DIG_VOL 0x0019
+#define RT5682_STO1_ADC_DIG_VOL 0x001c
+#define RT5682_STO1_ADC_BOOST 0x001f
+#define RT5682_HP_IMP_GAIN_1 0x0022
+#define RT5682_HP_IMP_GAIN_2 0x0023
+/* Mixer - D-D */
+#define RT5682_SIDETONE_CTRL 0x0024
+#define RT5682_STO1_ADC_MIXER 0x0026
+#define RT5682_AD_DA_MIXER 0x0029
+#define RT5682_STO1_DAC_MIXER 0x002a
+#define RT5682_A_DAC1_MUX 0x002b
+#define RT5682_DIG_INF2_DATA 0x0030
+/* Mixer - ADC */
+#define RT5682_REC_MIXER 0x003c
+#define RT5682_CAL_REC 0x0044
+#define RT5682_ALC_BACK_GAIN 0x0049
+/* Power */
+#define RT5682_PWR_DIG_1 0x0061
+#define RT5682_PWR_DIG_2 0x0062
+#define RT5682_PWR_ANLG_1 0x0063
+#define RT5682_PWR_ANLG_2 0x0064
+#define RT5682_PWR_ANLG_3 0x0065
+#define RT5682_PWR_MIXER 0x0066
+#define RT5682_PWR_VOL 0x0067
+/* Clock Detect */
+#define RT5682_CLK_DET 0x006b
+/* Filter Auto Reset */
+#define RT5682_RESET_LPF_CTRL 0x006c
+#define RT5682_RESET_HPF_CTRL 0x006d
+/* DMIC */
+#define RT5682_DMIC_CTRL_1 0x006e
+/* Format - ADC/DAC */
+#define RT5682_I2S1_SDP 0x0070
+#define RT5682_I2S2_SDP 0x0071
+#define RT5682_ADDA_CLK_1 0x0073
+#define RT5682_ADDA_CLK_2 0x0074
+#define RT5682_I2S1_F_DIV_CTRL_1 0x0075
+#define RT5682_I2S1_F_DIV_CTRL_2 0x0076
+/* Format - TDM Control */
+#define RT5682_TDM_CTRL 0x0079
+#define RT5682_TDM_ADDA_CTRL_1 0x007a
+#define RT5682_TDM_ADDA_CTRL_2 0x007b
+#define RT5682_DATA_SEL_CTRL_1 0x007c
+#define RT5682_TDM_TCON_CTRL 0x007e
+/* Function - Analog */
+#define RT5682_GLB_CLK 0x0080
+#define RT5682_PLL_CTRL_1 0x0081
+#define RT5682_PLL_CTRL_2 0x0082
+#define RT5682_PLL_TRACK_1 0x0083
+#define RT5682_PLL_TRACK_2 0x0084
+#define RT5682_PLL_TRACK_3 0x0085
+#define RT5682_PLL_TRACK_4 0x0086
+#define RT5682_PLL_TRACK_5 0x0087
+#define RT5682_PLL_TRACK_6 0x0088
+#define RT5682_PLL_TRACK_11 0x008c
+#define RT5682_SDW_REF_CLK 0x008d
+#define RT5682_DEPOP_1 0x008e
+#define RT5682_DEPOP_2 0x008f
+#define RT5682_HP_CHARGE_PUMP_1 0x0091
+#define RT5682_HP_CHARGE_PUMP_2 0x0092
+#define RT5682_MICBIAS_1 0x0093
+#define RT5682_MICBIAS_2 0x0094
+#define RT5682_PLL_TRACK_12 0x0098
+#define RT5682_PLL_TRACK_14 0x009a
+#define RT5682_PLL2_CTRL_1 0x009b
+#define RT5682_PLL2_CTRL_2 0x009c
+#define RT5682_PLL2_CTRL_3 0x009d
+#define RT5682_PLL2_CTRL_4 0x009e
+#define RT5682_RC_CLK_CTRL 0x009f
+#define RT5682_I2S_M_CLK_CTRL_1 0x00a0
+#define RT5682_I2S2_F_DIV_CTRL_1 0x00a3
+#define RT5682_I2S2_F_DIV_CTRL_2 0x00a4
+/* Function - Digital */
+#define RT5682_EQ_CTRL_1 0x00ae
+#define RT5682_EQ_CTRL_2 0x00af
+#define RT5682_IRQ_CTRL_1 0x00b6
+#define RT5682_IRQ_CTRL_2 0x00b7
+#define RT5682_IRQ_CTRL_3 0x00b8
+#define RT5682_IRQ_CTRL_4 0x00b9
+#define RT5682_INT_ST_1 0x00be
+#define RT5682_GPIO_CTRL_1 0x00c0
+#define RT5682_GPIO_CTRL_2 0x00c1
+#define RT5682_GPIO_CTRL_3 0x00c2
+#define RT5682_HP_AMP_DET_CTRL_1 0x00d0
+#define RT5682_HP_AMP_DET_CTRL_2 0x00d1
+#define RT5682_MID_HP_AMP_DET 0x00d2
+#define RT5682_LOW_HP_AMP_DET 0x00d3
+#define RT5682_DELAY_BUF_CTRL 0x00d4
+#define RT5682_SV_ZCD_1 0x00d9
+#define RT5682_SV_ZCD_2 0x00da
+#define RT5682_IL_CMD_1 0x00db
+#define RT5682_IL_CMD_2 0x00dc
+#define RT5682_IL_CMD_3 0x00dd
+#define RT5682_IL_CMD_4 0x00de
+#define RT5682_IL_CMD_5 0x00df
+#define RT5682_IL_CMD_6 0x00e0
+#define RT5682_4BTN_IL_CMD_1 0x00e2
+#define RT5682_4BTN_IL_CMD_2 0x00e3
+#define RT5682_4BTN_IL_CMD_3 0x00e4
+#define RT5682_4BTN_IL_CMD_4 0x00e5
+#define RT5682_4BTN_IL_CMD_5 0x00e6
+#define RT5682_4BTN_IL_CMD_6 0x00e7
+#define RT5682_4BTN_IL_CMD_7 0x00e8
+
+#define RT5682_ADC_STO1_HP_CTRL_1 0x00ea
+#define RT5682_ADC_STO1_HP_CTRL_2 0x00eb
+#define RT5682_AJD1_CTRL 0x00f0
+#define RT5682_JD1_THD 0x00f1
+#define RT5682_JD2_THD 0x00f2
+#define RT5682_JD_CTRL_1 0x00f6
+/* General Control */
+#define RT5682_DUMMY_1 0x00fa
+#define RT5682_DUMMY_2 0x00fb
+#define RT5682_DUMMY_3 0x00fc
+
+#define RT5682_DAC_ADC_DIG_VOL1 0x0100
+#define RT5682_BIAS_CUR_CTRL_2 0x010b
+#define RT5682_BIAS_CUR_CTRL_3 0x010c
+#define RT5682_BIAS_CUR_CTRL_4 0x010d
+#define RT5682_BIAS_CUR_CTRL_5 0x010e
+#define RT5682_BIAS_CUR_CTRL_6 0x010f
+#define RT5682_BIAS_CUR_CTRL_7 0x0110
+#define RT5682_BIAS_CUR_CTRL_8 0x0111
+#define RT5682_BIAS_CUR_CTRL_9 0x0112
+#define RT5682_BIAS_CUR_CTRL_10 0x0113
+#define RT5682_VREF_REC_OP_FB_CAP_CTRL 0x0117
+#define RT5682_CHARGE_PUMP_1 0x0125
+#define RT5682_DIG_IN_CTRL_1 0x0132
+#define RT5682_PAD_DRIVING_CTRL 0x0136
+#define RT5682_SOFT_RAMP_DEPOP 0x0138
+#define RT5682_CHOP_DAC 0x013a
+#define RT5682_CHOP_ADC 0x013b
+#define RT5682_CALIB_ADC_CTRL 0x013c
+#define RT5682_VOL_TEST 0x013f
+#define RT5682_SPKVDD_DET_STA 0x0142
+#define RT5682_TEST_MODE_CTRL_1 0x0145
+#define RT5682_TEST_MODE_CTRL_2 0x0146
+#define RT5682_TEST_MODE_CTRL_3 0x0147
+#define RT5682_TEST_MODE_CTRL_4 0x0148
+#define RT5682_TEST_MODE_CTRL_5 0x0149
+#define RT5682_PLL1_INTERNAL 0x0150
+#define RT5682_PLL2_INTERNAL 0x0151
+#define RT5682_STO_NG2_CTRL_1 0x0160
+#define RT5682_STO_NG2_CTRL_2 0x0161
+#define RT5682_STO_NG2_CTRL_3 0x0162
+#define RT5682_STO_NG2_CTRL_4 0x0163
+#define RT5682_STO_NG2_CTRL_5 0x0164
+#define RT5682_STO_NG2_CTRL_6 0x0165
+#define RT5682_STO_NG2_CTRL_7 0x0166
+#define RT5682_STO_NG2_CTRL_8 0x0167
+#define RT5682_STO_NG2_CTRL_9 0x0168
+#define RT5682_STO_NG2_CTRL_10 0x0169
+#define RT5682_STO1_DAC_SIL_DET 0x0190
+#define RT5682_SIL_PSV_CTRL1 0x0194
+#define RT5682_SIL_PSV_CTRL2 0x0195
+#define RT5682_SIL_PSV_CTRL3 0x0197
+#define RT5682_SIL_PSV_CTRL4 0x0198
+#define RT5682_SIL_PSV_CTRL5 0x0199
+#define RT5682_HP_IMP_SENS_CTRL_01 0x01af
+#define RT5682_HP_IMP_SENS_CTRL_02 0x01b0
+#define RT5682_HP_IMP_SENS_CTRL_03 0x01b1
+#define RT5682_HP_IMP_SENS_CTRL_04 0x01b2
+#define RT5682_HP_IMP_SENS_CTRL_05 0x01b3
+#define RT5682_HP_IMP_SENS_CTRL_06 0x01b4
+#define RT5682_HP_IMP_SENS_CTRL_07 0x01b5
+#define RT5682_HP_IMP_SENS_CTRL_08 0x01b6
+#define RT5682_HP_IMP_SENS_CTRL_09 0x01b7
+#define RT5682_HP_IMP_SENS_CTRL_10 0x01b8
+#define RT5682_HP_IMP_SENS_CTRL_11 0x01b9
+#define RT5682_HP_IMP_SENS_CTRL_12 0x01ba
+#define RT5682_HP_IMP_SENS_CTRL_13 0x01bb
+#define RT5682_HP_IMP_SENS_CTRL_14 0x01bc
+#define RT5682_HP_IMP_SENS_CTRL_15 0x01bd
+#define RT5682_HP_IMP_SENS_CTRL_16 0x01be
+#define RT5682_HP_IMP_SENS_CTRL_17 0x01bf
+#define RT5682_HP_IMP_SENS_CTRL_18 0x01c0
+#define RT5682_HP_IMP_SENS_CTRL_19 0x01c1
+#define RT5682_HP_IMP_SENS_CTRL_20 0x01c2
+#define RT5682_HP_IMP_SENS_CTRL_21 0x01c3
+#define RT5682_HP_IMP_SENS_CTRL_22 0x01c4
+#define RT5682_HP_IMP_SENS_CTRL_23 0x01c5
+#define RT5682_HP_IMP_SENS_CTRL_24 0x01c6
+#define RT5682_HP_IMP_SENS_CTRL_25 0x01c7
+#define RT5682_HP_IMP_SENS_CTRL_26 0x01c8
+#define RT5682_HP_IMP_SENS_CTRL_27 0x01c9
+#define RT5682_HP_IMP_SENS_CTRL_28 0x01ca
+#define RT5682_HP_IMP_SENS_CTRL_29 0x01cb
+#define RT5682_HP_IMP_SENS_CTRL_30 0x01cc
+#define RT5682_HP_IMP_SENS_CTRL_31 0x01cd
+#define RT5682_HP_IMP_SENS_CTRL_32 0x01ce
+#define RT5682_HP_IMP_SENS_CTRL_33 0x01cf
+#define RT5682_HP_IMP_SENS_CTRL_34 0x01d0
+#define RT5682_HP_IMP_SENS_CTRL_35 0x01d1
+#define RT5682_HP_IMP_SENS_CTRL_36 0x01d2
+#define RT5682_HP_IMP_SENS_CTRL_37 0x01d3
+#define RT5682_HP_IMP_SENS_CTRL_38 0x01d4
+#define RT5682_HP_IMP_SENS_CTRL_39 0x01d5
+#define RT5682_HP_IMP_SENS_CTRL_40 0x01d6
+#define RT5682_HP_IMP_SENS_CTRL_41 0x01d7
+#define RT5682_HP_IMP_SENS_CTRL_42 0x01d8
+#define RT5682_HP_IMP_SENS_CTRL_43 0x01d9
+#define RT5682_HP_LOGIC_CTRL_1 0x01da
+#define RT5682_HP_LOGIC_CTRL_2 0x01db
+#define RT5682_HP_LOGIC_CTRL_3 0x01dc
+#define RT5682_HP_CALIB_CTRL_1 0x01de
+#define RT5682_HP_CALIB_CTRL_2 0x01df
+#define RT5682_HP_CALIB_CTRL_3 0x01e0
+#define RT5682_HP_CALIB_CTRL_4 0x01e1
+#define RT5682_HP_CALIB_CTRL_5 0x01e2
+#define RT5682_HP_CALIB_CTRL_6 0x01e3
+#define RT5682_HP_CALIB_CTRL_7 0x01e4
+#define RT5682_HP_CALIB_CTRL_9 0x01e6
+#define RT5682_HP_CALIB_CTRL_10 0x01e7
+#define RT5682_HP_CALIB_CTRL_11 0x01e8
+#define RT5682_HP_CALIB_STA_1 0x01ea
+#define RT5682_HP_CALIB_STA_2 0x01eb
+#define RT5682_HP_CALIB_STA_3 0x01ec
+#define RT5682_HP_CALIB_STA_4 0x01ed
+#define RT5682_HP_CALIB_STA_5 0x01ee
+#define RT5682_HP_CALIB_STA_6 0x01ef
+#define RT5682_HP_CALIB_STA_7 0x01f0
+#define RT5682_HP_CALIB_STA_8 0x01f1
+#define RT5682_HP_CALIB_STA_9 0x01f2
+#define RT5682_HP_CALIB_STA_10 0x01f3
+#define RT5682_HP_CALIB_STA_11 0x01f4
+#define RT5682_SAR_IL_CMD_1 0x0210
+#define RT5682_SAR_IL_CMD_2 0x0211
+#define RT5682_SAR_IL_CMD_3 0x0212
+#define RT5682_SAR_IL_CMD_4 0x0213
+#define RT5682_SAR_IL_CMD_5 0x0214
+#define RT5682_SAR_IL_CMD_6 0x0215
+#define RT5682_SAR_IL_CMD_7 0x0216
+#define RT5682_SAR_IL_CMD_8 0x0217
+#define RT5682_SAR_IL_CMD_9 0x0218
+#define RT5682_SAR_IL_CMD_10 0x0219
+#define RT5682_SAR_IL_CMD_11 0x021a
+#define RT5682_SAR_IL_CMD_12 0x021b
+#define RT5682_SAR_IL_CMD_13 0x021c
+#define RT5682_EFUSE_CTRL_1 0x0250
+#define RT5682_EFUSE_CTRL_2 0x0251
+#define RT5682_EFUSE_CTRL_3 0x0252
+#define RT5682_EFUSE_CTRL_4 0x0253
+#define RT5682_EFUSE_CTRL_5 0x0254
+#define RT5682_EFUSE_CTRL_6 0x0255
+#define RT5682_EFUSE_CTRL_7 0x0256
+#define RT5682_EFUSE_CTRL_8 0x0257
+#define RT5682_EFUSE_CTRL_9 0x0258
+#define RT5682_EFUSE_CTRL_10 0x0259
+#define RT5682_EFUSE_CTRL_11 0x025a
+#define RT5682_JD_TOP_VC_VTRL 0x0270
+#define RT5682_DRC1_CTRL_0 0x02ff
+#define RT5682_DRC1_CTRL_1 0x0300
+#define RT5682_DRC1_CTRL_2 0x0301
+#define RT5682_DRC1_CTRL_3 0x0302
+#define RT5682_DRC1_CTRL_4 0x0303
+#define RT5682_DRC1_CTRL_5 0x0304
+#define RT5682_DRC1_CTRL_6 0x0305
+#define RT5682_DRC1_HARD_LMT_CTRL_1 0x0306
+#define RT5682_DRC1_HARD_LMT_CTRL_2 0x0307
+#define RT5682_DRC1_PRIV_1 0x0310
+#define RT5682_DRC1_PRIV_2 0x0311
+#define RT5682_DRC1_PRIV_3 0x0312
+#define RT5682_DRC1_PRIV_4 0x0313
+#define RT5682_DRC1_PRIV_5 0x0314
+#define RT5682_DRC1_PRIV_6 0x0315
+#define RT5682_DRC1_PRIV_7 0x0316
+#define RT5682_DRC1_PRIV_8 0x0317
+#define RT5682_EQ_AUTO_RCV_CTRL1 0x03c0
+#define RT5682_EQ_AUTO_RCV_CTRL2 0x03c1
+#define RT5682_EQ_AUTO_RCV_CTRL3 0x03c2
+#define RT5682_EQ_AUTO_RCV_CTRL4 0x03c3
+#define RT5682_EQ_AUTO_RCV_CTRL5 0x03c4
+#define RT5682_EQ_AUTO_RCV_CTRL6 0x03c5
+#define RT5682_EQ_AUTO_RCV_CTRL7 0x03c6
+#define RT5682_EQ_AUTO_RCV_CTRL8 0x03c7
+#define RT5682_EQ_AUTO_RCV_CTRL9 0x03c8
+#define RT5682_EQ_AUTO_RCV_CTRL10 0x03c9
+#define RT5682_EQ_AUTO_RCV_CTRL11 0x03ca
+#define RT5682_EQ_AUTO_RCV_CTRL12 0x03cb
+#define RT5682_EQ_AUTO_RCV_CTRL13 0x03cc
+#define RT5682_ADC_L_EQ_LPF1_A1 0x03d0
+#define RT5682_R_EQ_LPF1_A1 0x03d1
+#define RT5682_L_EQ_LPF1_H0 0x03d2
+#define RT5682_R_EQ_LPF1_H0 0x03d3
+#define RT5682_L_EQ_BPF1_A1 0x03d4
+#define RT5682_R_EQ_BPF1_A1 0x03d5
+#define RT5682_L_EQ_BPF1_A2 0x03d6
+#define RT5682_R_EQ_BPF1_A2 0x03d7
+#define RT5682_L_EQ_BPF1_H0 0x03d8
+#define RT5682_R_EQ_BPF1_H0 0x03d9
+#define RT5682_L_EQ_BPF2_A1 0x03da
+#define RT5682_R_EQ_BPF2_A1 0x03db
+#define RT5682_L_EQ_BPF2_A2 0x03dc
+#define RT5682_R_EQ_BPF2_A2 0x03dd
+#define RT5682_L_EQ_BPF2_H0 0x03de
+#define RT5682_R_EQ_BPF2_H0 0x03df
+#define RT5682_L_EQ_BPF3_A1 0x03e0
+#define RT5682_R_EQ_BPF3_A1 0x03e1
+#define RT5682_L_EQ_BPF3_A2 0x03e2
+#define RT5682_R_EQ_BPF3_A2 0x03e3
+#define RT5682_L_EQ_BPF3_H0 0x03e4
+#define RT5682_R_EQ_BPF3_H0 0x03e5
+#define RT5682_L_EQ_BPF4_A1 0x03e6
+#define RT5682_R_EQ_BPF4_A1 0x03e7
+#define RT5682_L_EQ_BPF4_A2 0x03e8
+#define RT5682_R_EQ_BPF4_A2 0x03e9
+#define RT5682_L_EQ_BPF4_H0 0x03ea
+#define RT5682_R_EQ_BPF4_H0 0x03eb
+#define RT5682_L_EQ_HPF1_A1 0x03ec
+#define RT5682_R_EQ_HPF1_A1 0x03ed
+#define RT5682_L_EQ_HPF1_H0 0x03ee
+#define RT5682_R_EQ_HPF1_H0 0x03ef
+#define RT5682_L_EQ_PRE_VOL 0x03f0
+#define RT5682_R_EQ_PRE_VOL 0x03f1
+#define RT5682_L_EQ_POST_VOL 0x03f2
+#define RT5682_R_EQ_POST_VOL 0x03f3
+#define RT5682_I2C_MODE 0xffff
+
+
+/* global definition */
+#define RT5682_L_MUTE (0x1 << 15)
+#define RT5682_L_MUTE_SFT 15
+#define RT5682_VOL_L_MUTE (0x1 << 14)
+#define RT5682_VOL_L_SFT 14
+#define RT5682_R_MUTE (0x1 << 7)
+#define RT5682_R_MUTE_SFT 7
+#define RT5682_VOL_R_MUTE (0x1 << 6)
+#define RT5682_VOL_R_SFT 6
+#define RT5682_L_VOL_MASK (0x3f << 8)
+#define RT5682_L_VOL_SFT 8
+#define RT5682_R_VOL_MASK (0x3f)
+#define RT5682_R_VOL_SFT 0
+
+/*Headphone Amp L/R Analog Gain and Digital NG2 Gain Control (0x0005 0x0006)*/
+#define RT5682_G_HP (0xf << 8)
+#define RT5682_G_HP_SFT 8
+#define RT5682_G_STO_DA_DMIX (0xf)
+#define RT5682_G_STO_DA_SFT 0
+
+/* CBJ Control (0x000b) */
+#define RT5682_BST_CBJ_MASK (0xf << 8)
+#define RT5682_BST_CBJ_SFT 8
+
+/* Embeeded Jack and Type Detection Control 1 (0x0010) */
+#define RT5682_EMB_JD_EN (0x1 << 15)
+#define RT5682_EMB_JD_EN_SFT 15
+#define RT5682_EMB_JD_RST (0x1 << 14)
+#define RT5682_JD_MODE (0x1 << 13)
+#define RT5682_JD_MODE_SFT 13
+#define RT5682_DET_TYPE (0x1 << 12)
+#define RT5682_DET_TYPE_SFT 12
+#define RT5682_POLA_EXT_JD_MASK (0x1 << 11)
+#define RT5682_POLA_EXT_JD_LOW (0x1 << 11)
+#define RT5682_POLA_EXT_JD_HIGH (0x0 << 11)
+#define RT5682_EXT_JD_DIG (0x1 << 9)
+#define RT5682_POL_FAST_OFF_MASK (0x1 << 8)
+#define RT5682_POL_FAST_OFF_HIGH (0x1 << 8)
+#define RT5682_POL_FAST_OFF_LOW (0x0 << 8)
+#define RT5682_FAST_OFF_MASK (0x1 << 7)
+#define RT5682_FAST_OFF_EN (0x1 << 7)
+#define RT5682_FAST_OFF_DIS (0x0 << 7)
+#define RT5682_VREF_POW_MASK (0x1 << 6)
+#define RT5682_VREF_POW_FSM (0x0 << 6)
+#define RT5682_VREF_POW_REG (0x1 << 6)
+#define RT5682_MB1_PATH_MASK (0x1 << 5)
+#define RT5682_CTRL_MB1_REG (0x1 << 5)
+#define RT5682_CTRL_MB1_FSM (0x0 << 5)
+#define RT5682_MB2_PATH_MASK (0x1 << 4)
+#define RT5682_CTRL_MB2_REG (0x1 << 4)
+#define RT5682_CTRL_MB2_FSM (0x0 << 4)
+#define RT5682_TRIG_JD_MASK (0x1 << 3)
+#define RT5682_TRIG_JD_HIGH (0x1 << 3)
+#define RT5682_TRIG_JD_LOW (0x0 << 3)
+#define RT5682_MIC_CAP_MASK (0x1 << 1)
+#define RT5682_MIC_CAP_HS (0x1 << 1)
+#define RT5682_MIC_CAP_HP (0x0 << 1)
+#define RT5682_MIC_CAP_SRC_MASK (0x1)
+#define RT5682_MIC_CAP_SRC_REG (0x1)
+#define RT5682_MIC_CAP_SRC_ANA (0x0)
+
+/* Embeeded Jack and Type Detection Control 2 (0x0011) */
+#define RT5682_EXT_JD_SRC (0x7 << 4)
+#define RT5682_EXT_JD_SRC_SFT 4
+#define RT5682_EXT_JD_SRC_GPIO_JD1 (0x0 << 4)
+#define RT5682_EXT_JD_SRC_GPIO_JD2 (0x1 << 4)
+#define RT5682_EXT_JD_SRC_JDH (0x2 << 4)
+#define RT5682_EXT_JD_SRC_JDL (0x3 << 4)
+#define RT5682_EXT_JD_SRC_MANUAL (0x4 << 4)
+#define RT5682_JACK_TYPE_MASK (0x3)
+
+/* Combo Jack and Type Detection Control 3 (0x0012) */
+#define RT5682_CBJ_IN_BUF_EN (0x1 << 7)
+
+/* Combo Jack and Type Detection Control 4 (0x0013) */
+#define RT5682_SEL_SHT_MID_TON_MASK (0x3 << 12)
+#define RT5682_SEL_SHT_MID_TON_2 (0x0 << 12)
+#define RT5682_SEL_SHT_MID_TON_3 (0x1 << 12)
+#define RT5682_CBJ_JD_TEST_MASK (0x1 << 6)
+#define RT5682_CBJ_JD_TEST_NORM (0x0 << 6)
+#define RT5682_CBJ_JD_TEST_MODE (0x1 << 6)
+
+/* DAC1 Digital Volume (0x0019) */
+#define RT5682_DAC_L1_VOL_MASK (0xff << 8)
+#define RT5682_DAC_L1_VOL_SFT 8
+#define RT5682_DAC_R1_VOL_MASK (0xff)
+#define RT5682_DAC_R1_VOL_SFT 0
+
+/* ADC Digital Volume Control (0x001c) */
+#define RT5682_ADC_L_VOL_MASK (0x7f << 8)
+#define RT5682_ADC_L_VOL_SFT 8
+#define RT5682_ADC_R_VOL_MASK (0x7f)
+#define RT5682_ADC_R_VOL_SFT 0
+
+/* Stereo1 ADC Boost Gain Control (0x001f) */
+#define RT5682_STO1_ADC_L_BST_MASK (0x3 << 14)
+#define RT5682_STO1_ADC_L_BST_SFT 14
+#define RT5682_STO1_ADC_R_BST_MASK (0x3 << 12)
+#define RT5682_STO1_ADC_R_BST_SFT 12
+
+/* Sidetone Control (0x0024) */
+#define RT5682_ST_SRC_SEL (0x1 << 8)
+#define RT5682_ST_SRC_SFT 8
+#define RT5682_ST_EN_MASK (0x1 << 6)
+#define RT5682_ST_DIS (0x0 << 6)
+#define RT5682_ST_EN (0x1 << 6)
+#define RT5682_ST_EN_SFT 6
+
+/* Stereo1 ADC Mixer Control (0x0026) */
+#define RT5682_M_STO1_ADC_L1 (0x1 << 15)
+#define RT5682_M_STO1_ADC_L1_SFT 15
+#define RT5682_M_STO1_ADC_L2 (0x1 << 14)
+#define RT5682_M_STO1_ADC_L2_SFT 14
+#define RT5682_STO1_ADC1L_SRC_MASK (0x1 << 13)
+#define RT5682_STO1_ADC1L_SRC_SFT 13
+#define RT5682_STO1_ADC1_SRC_ADC (0x1 << 13)
+#define RT5682_STO1_ADC1_SRC_DACMIX (0x0 << 13)
+#define RT5682_STO1_ADC2L_SRC_MASK (0x1 << 12)
+#define RT5682_STO1_ADC2L_SRC_SFT 12
+#define RT5682_STO1_ADCL_SRC_MASK (0x3 << 10)
+#define RT5682_STO1_ADCL_SRC_SFT 10
+#define RT5682_STO1_DD_L_SRC_MASK (0x1 << 9)
+#define RT5682_STO1_DD_L_SRC_SFT 9
+#define RT5682_STO1_DMIC_SRC_MASK (0x1 << 8)
+#define RT5682_STO1_DMIC_SRC_SFT 8
+#define RT5682_STO1_DMIC_SRC_DMIC2 (0x1 << 8)
+#define RT5682_STO1_DMIC_SRC_DMIC1 (0x0 << 8)
+#define RT5682_M_STO1_ADC_R1 (0x1 << 7)
+#define RT5682_M_STO1_ADC_R1_SFT 7
+#define RT5682_M_STO1_ADC_R2 (0x1 << 6)
+#define RT5682_M_STO1_ADC_R2_SFT 6
+#define RT5682_STO1_ADC1R_SRC_MASK (0x1 << 5)
+#define RT5682_STO1_ADC1R_SRC_SFT 5
+#define RT5682_STO1_ADC2R_SRC_MASK (0x1 << 4)
+#define RT5682_STO1_ADC2R_SRC_SFT 4
+#define RT5682_STO1_ADCR_SRC_MASK (0x3 << 2)
+#define RT5682_STO1_ADCR_SRC_SFT 2
+
+/* ADC Mixer to DAC Mixer Control (0x0029) */
+#define RT5682_M_ADCMIX_L (0x1 << 15)
+#define RT5682_M_ADCMIX_L_SFT 15
+#define RT5682_M_DAC1_L (0x1 << 14)
+#define RT5682_M_DAC1_L_SFT 14
+#define RT5682_DAC1_R_SEL_MASK (0x1 << 10)
+#define RT5682_DAC1_R_SEL_SFT 10
+#define RT5682_DAC1_L_SEL_MASK (0x1 << 8)
+#define RT5682_DAC1_L_SEL_SFT 8
+#define RT5682_M_ADCMIX_R (0x1 << 7)
+#define RT5682_M_ADCMIX_R_SFT 7
+#define RT5682_M_DAC1_R (0x1 << 6)
+#define RT5682_M_DAC1_R_SFT 6
+
+/* Stereo1 DAC Mixer Control (0x002a) */
+#define RT5682_M_DAC_L1_STO_L (0x1 << 15)
+#define RT5682_M_DAC_L1_STO_L_SFT 15
+#define RT5682_G_DAC_L1_STO_L_MASK (0x1 << 14)
+#define RT5682_G_DAC_L1_STO_L_SFT 14
+#define RT5682_M_DAC_R1_STO_L (0x1 << 13)
+#define RT5682_M_DAC_R1_STO_L_SFT 13
+#define RT5682_G_DAC_R1_STO_L_MASK (0x1 << 12)
+#define RT5682_G_DAC_R1_STO_L_SFT 12
+#define RT5682_M_DAC_L1_STO_R (0x1 << 7)
+#define RT5682_M_DAC_L1_STO_R_SFT 7
+#define RT5682_G_DAC_L1_STO_R_MASK (0x1 << 6)
+#define RT5682_G_DAC_L1_STO_R_SFT 6
+#define RT5682_M_DAC_R1_STO_R (0x1 << 5)
+#define RT5682_M_DAC_R1_STO_R_SFT 5
+#define RT5682_G_DAC_R1_STO_R_MASK (0x1 << 4)
+#define RT5682_G_DAC_R1_STO_R_SFT 4
+
+/* Analog DAC1 Input Source Control (0x002b) */
+#define RT5682_M_ST_STO_L (0x1 << 9)
+#define RT5682_M_ST_STO_L_SFT 9
+#define RT5682_M_ST_STO_R (0x1 << 8)
+#define RT5682_M_ST_STO_R_SFT 8
+#define RT5682_DAC_L1_SRC_MASK (0x3 << 4)
+#define RT5682_A_DACL1_SFT 4
+#define RT5682_DAC_R1_SRC_MASK (0x3)
+#define RT5682_A_DACR1_SFT 0
+
+/* Digital Interface Data Control (0x0030) */
+#define RT5682_IF2_ADC_SEL_MASK (0x3 << 0)
+#define RT5682_IF2_ADC_SEL_SFT 0
+
+/* REC Left Mixer Control 2 (0x003c) */
+#define RT5682_G_CBJ_RM1_L (0x7 << 10)
+#define RT5682_G_CBJ_RM1_L_SFT 10
+#define RT5682_M_CBJ_RM1_L (0x1 << 7)
+#define RT5682_M_CBJ_RM1_L_SFT 7
+
+/* Power Management for Digital 1 (0x0061) */
+#define RT5682_PWR_I2S1 (0x1 << 15)
+#define RT5682_PWR_I2S1_BIT 15
+#define RT5682_PWR_I2S2 (0x1 << 14)
+#define RT5682_PWR_I2S2_BIT 14
+#define RT5682_PWR_DAC_L1 (0x1 << 11)
+#define RT5682_PWR_DAC_L1_BIT 11
+#define RT5682_PWR_DAC_R1 (0x1 << 10)
+#define RT5682_PWR_DAC_R1_BIT 10
+#define RT5682_PWR_LDO (0x1 << 8)
+#define RT5682_PWR_LDO_BIT 8
+#define RT5682_PWR_ADC_L1 (0x1 << 4)
+#define RT5682_PWR_ADC_L1_BIT 4
+#define RT5682_PWR_ADC_R1 (0x1 << 3)
+#define RT5682_PWR_ADC_R1_BIT 3
+#define RT5682_DIG_GATE_CTRL (0x1 << 0)
+#define RT5682_DIG_GATE_CTRL_SFT 0
+
+
+/* Power Management for Digital 2 (0x0062) */
+#define RT5682_PWR_ADC_S1F (0x1 << 15)
+#define RT5682_PWR_ADC_S1F_BIT 15
+#define RT5682_PWR_DAC_S1F (0x1 << 10)
+#define RT5682_PWR_DAC_S1F_BIT 10
+
+/* Power Management for Analog 1 (0x0063) */
+#define RT5682_PWR_VREF1 (0x1 << 15)
+#define RT5682_PWR_VREF1_BIT 15
+#define RT5682_PWR_FV1 (0x1 << 14)
+#define RT5682_PWR_FV1_BIT 14
+#define RT5682_PWR_VREF2 (0x1 << 13)
+#define RT5682_PWR_VREF2_BIT 13
+#define RT5682_PWR_FV2 (0x1 << 12)
+#define RT5682_PWR_FV2_BIT 12
+#define RT5682_LDO1_DBG_MASK (0x3 << 10)
+#define RT5682_PWR_MB (0x1 << 9)
+#define RT5682_PWR_MB_BIT 9
+#define RT5682_PWR_BG (0x1 << 7)
+#define RT5682_PWR_BG_BIT 7
+#define RT5682_LDO1_BYPASS_MASK (0x1 << 6)
+#define RT5682_LDO1_BYPASS (0x1 << 6)
+#define RT5682_LDO1_NOT_BYPASS (0x0 << 6)
+#define RT5682_PWR_MA_BIT 6
+#define RT5682_LDO1_DVO_MASK (0x3 << 4)
+#define RT5682_LDO1_DVO_09 (0x0 << 4)
+#define RT5682_LDO1_DVO_10 (0x1 << 4)
+#define RT5682_LDO1_DVO_12 (0x2 << 4)
+#define RT5682_LDO1_DVO_14 (0x3 << 4)
+#define RT5682_HP_DRIVER_MASK (0x3 << 2)
+#define RT5682_HP_DRIVER_1X (0x0 << 2)
+#define RT5682_HP_DRIVER_3X (0x1 << 2)
+#define RT5682_HP_DRIVER_5X (0x3 << 2)
+#define RT5682_PWR_HA_L (0x1 << 1)
+#define RT5682_PWR_HA_L_BIT 1
+#define RT5682_PWR_HA_R (0x1 << 0)
+#define RT5682_PWR_HA_R_BIT 0
+
+/* Power Management for Analog 2 (0x0064) */
+#define RT5682_PWR_MB1 (0x1 << 11)
+#define RT5682_PWR_MB1_PWR_DOWN (0x0 << 11)
+#define RT5682_PWR_MB1_BIT 11
+#define RT5682_PWR_MB2 (0x1 << 10)
+#define RT5682_PWR_MB2_PWR_DOWN (0x0 << 10)
+#define RT5682_PWR_MB2_BIT 10
+#define RT5682_PWR_JDH (0x1 << 3)
+#define RT5682_PWR_JDH_BIT 3
+#define RT5682_PWR_JDL (0x1 << 2)
+#define RT5682_PWR_JDL_BIT 2
+#define RT5682_PWR_RM1_L (0x1 << 1)
+#define RT5682_PWR_RM1_L_BIT 1
+
+/* Power Management for Analog 3 (0x0065) */
+#define RT5682_PWR_CBJ (0x1 << 9)
+#define RT5682_PWR_CBJ_BIT 9
+#define RT5682_PWR_PLL (0x1 << 6)
+#define RT5682_PWR_PLL_BIT 6
+#define RT5682_PWR_PLL2B (0x1 << 5)
+#define RT5682_PWR_PLL2B_BIT 5
+#define RT5682_PWR_PLL2F (0x1 << 4)
+#define RT5682_PWR_PLL2F_BIT 4
+#define RT5682_PWR_LDO2 (0x1 << 2)
+#define RT5682_PWR_LDO2_BIT 2
+#define RT5682_PWR_DET_SPKVDD (0x1 << 1)
+#define RT5682_PWR_DET_SPKVDD_BIT 1
+
+/* Power Management for Mixer (0x0066) */
+#define RT5682_PWR_STO1_DAC_L (0x1 << 5)
+#define RT5682_PWR_STO1_DAC_L_BIT 5
+#define RT5682_PWR_STO1_DAC_R (0x1 << 4)
+#define RT5682_PWR_STO1_DAC_R_BIT 4
+
+/* MCLK and System Clock Detection Control (0x006b) */
+#define RT5682_SYS_CLK_DET (0x1 << 15)
+#define RT5682_SYS_CLK_DET_SFT 15
+#define RT5682_PLL1_CLK_DET (0x1 << 14)
+#define RT5682_PLL1_CLK_DET_SFT 14
+#define RT5682_PLL2_CLK_DET (0x1 << 13)
+#define RT5682_PLL2_CLK_DET_SFT 13
+#define RT5682_POW_CLK_DET2_SFT 8
+#define RT5682_POW_CLK_DET_SFT 0
+
+/* Digital Microphone Control 1 (0x006e) */
+#define RT5682_DMIC_1_EN_MASK (0x1 << 15)
+#define RT5682_DMIC_1_EN_SFT 15
+#define RT5682_DMIC_1_DIS (0x0 << 15)
+#define RT5682_DMIC_1_EN (0x1 << 15)
+#define RT5682_DMIC_1_DP_MASK (0x3 << 4)
+#define RT5682_DMIC_1_DP_SFT 4
+#define RT5682_DMIC_1_DP_GPIO2 (0x0 << 4)
+#define RT5682_DMIC_1_DP_GPIO5 (0x1 << 4)
+#define RT5682_DMIC_CLK_MASK (0xf << 0)
+#define RT5682_DMIC_CLK_SFT 0
+
+/* I2S1 Audio Serial Data Port Control (0x0070) */
+#define RT5682_SEL_ADCDAT_MASK (0x1 << 15)
+#define RT5682_SEL_ADCDAT_OUT (0x0 << 15)
+#define RT5682_SEL_ADCDAT_IN (0x1 << 15)
+#define RT5682_SEL_ADCDAT_SFT 15
+#define RT5682_I2S1_TX_CHL_MASK (0x7 << 12)
+#define RT5682_I2S1_TX_CHL_SFT 12
+#define RT5682_I2S1_TX_CHL_16 (0x0 << 12)
+#define RT5682_I2S1_TX_CHL_20 (0x1 << 12)
+#define RT5682_I2S1_TX_CHL_24 (0x2 << 12)
+#define RT5682_I2S1_TX_CHL_32 (0x3 << 12)
+#define RT5682_I2S1_TX_CHL_8 (0x4 << 12)
+#define RT5682_I2S1_RX_CHL_MASK (0x7 << 8)
+#define RT5682_I2S1_RX_CHL_SFT 8
+#define RT5682_I2S1_RX_CHL_16 (0x0 << 8)
+#define RT5682_I2S1_RX_CHL_20 (0x1 << 8)
+#define RT5682_I2S1_RX_CHL_24 (0x2 << 8)
+#define RT5682_I2S1_RX_CHL_32 (0x3 << 8)
+#define RT5682_I2S1_RX_CHL_8 (0x4 << 8)
+#define RT5682_I2S1_MONO_MASK (0x1 << 7)
+#define RT5682_I2S1_MONO_EN (0x1 << 7)
+#define RT5682_I2S1_MONO_DIS (0x0 << 7)
+#define RT5682_I2S2_MONO_MASK (0x1 << 6)
+#define RT5682_I2S2_MONO_EN (0x1 << 6)
+#define RT5682_I2S2_MONO_DIS (0x0 << 6)
+#define RT5682_I2S1_DL_MASK (0x7 << 4)
+#define RT5682_I2S1_DL_SFT 4
+#define RT5682_I2S1_DL_16 (0x0 << 4)
+#define RT5682_I2S1_DL_20 (0x1 << 4)
+#define RT5682_I2S1_DL_24 (0x2 << 4)
+#define RT5682_I2S1_DL_32 (0x3 << 4)
+#define RT5682_I2S1_DL_8 (0x4 << 4)
+
+/* I2S1/2 Audio Serial Data Port Control (0x0070)(0x0071) */
+#define RT5682_I2S2_MS_MASK (0x1 << 15)
+#define RT5682_I2S2_MS_SFT 15
+#define RT5682_I2S2_MS_M (0x0 << 15)
+#define RT5682_I2S2_MS_S (0x1 << 15)
+#define RT5682_I2S2_PIN_CFG_MASK (0x1 << 14)
+#define RT5682_I2S2_PIN_CFG_SFT 14
+#define RT5682_I2S2_CLK_SEL_MASK (0x1 << 11)
+#define RT5682_I2S2_CLK_SEL_SFT 11
+#define RT5682_I2S2_OUT_MASK (0x1 << 9)
+#define RT5682_I2S2_OUT_SFT 9
+#define RT5682_I2S2_OUT_UM (0x0 << 9)
+#define RT5682_I2S2_OUT_M (0x1 << 9)
+#define RT5682_I2S_BP_MASK (0x1 << 8)
+#define RT5682_I2S_BP_SFT 8
+#define RT5682_I2S_BP_NOR (0x0 << 8)
+#define RT5682_I2S_BP_INV (0x1 << 8)
+#define RT5682_I2S2_MONO_EN (0x1 << 6)
+#define RT5682_I2S2_MONO_DIS (0x0 << 6)
+#define RT5682_I2S2_DL_MASK (0x3 << 4)
+#define RT5682_I2S2_DL_SFT 4
+#define RT5682_I2S2_DL_16 (0x0 << 4)
+#define RT5682_I2S2_DL_20 (0x1 << 4)
+#define RT5682_I2S2_DL_24 (0x2 << 4)
+#define RT5682_I2S2_DL_8 (0x3 << 4)
+#define RT5682_I2S_DF_MASK (0x7)
+#define RT5682_I2S_DF_SFT 0
+#define RT5682_I2S_DF_I2S (0x0)
+#define RT5682_I2S_DF_LEFT (0x1)
+#define RT5682_I2S_DF_PCM_A (0x2)
+#define RT5682_I2S_DF_PCM_B (0x3)
+#define RT5682_I2S_DF_PCM_A_N (0x6)
+#define RT5682_I2S_DF_PCM_B_N (0x7)
+
+/* ADC/DAC Clock Control 1 (0x0073) */
+#define RT5682_ADC_OSR_MASK (0xf << 12)
+#define RT5682_ADC_OSR_SFT 12
+#define RT5682_ADC_OSR_D_1 (0x0 << 12)
+#define RT5682_ADC_OSR_D_2 (0x1 << 12)
+#define RT5682_ADC_OSR_D_4 (0x2 << 12)
+#define RT5682_ADC_OSR_D_6 (0x3 << 12)
+#define RT5682_ADC_OSR_D_8 (0x4 << 12)
+#define RT5682_ADC_OSR_D_12 (0x5 << 12)
+#define RT5682_ADC_OSR_D_16 (0x6 << 12)
+#define RT5682_ADC_OSR_D_24 (0x7 << 12)
+#define RT5682_ADC_OSR_D_32 (0x8 << 12)
+#define RT5682_ADC_OSR_D_48 (0x9 << 12)
+#define RT5682_I2S_M_DIV_MASK (0xf << 12)
+#define RT5682_I2S_M_DIV_SFT 8
+#define RT5682_I2S_M_D_1 (0x0 << 8)
+#define RT5682_I2S_M_D_2 (0x1 << 8)
+#define RT5682_I2S_M_D_3 (0x2 << 8)
+#define RT5682_I2S_M_D_4 (0x3 << 8)
+#define RT5682_I2S_M_D_6 (0x4 << 8)
+#define RT5682_I2S_M_D_8 (0x5 << 8)
+#define RT5682_I2S_M_D_12 (0x6 << 8)
+#define RT5682_I2S_M_D_16 (0x7 << 8)
+#define RT5682_I2S_M_D_24 (0x8 << 8)
+#define RT5682_I2S_M_D_32 (0x9 << 8)
+#define RT5682_I2S_M_D_48 (0x10 << 8)
+#define RT5682_I2S_CLK_SRC_MASK (0x7 << 4)
+#define RT5682_I2S_CLK_SRC_SFT 4
+#define RT5682_I2S_CLK_SRC_MCLK (0x0 << 4)
+#define RT5682_I2S_CLK_SRC_PLL1 (0x1 << 4)
+#define RT5682_I2S_CLK_SRC_PLL2 (0x2 << 4)
+#define RT5682_I2S_CLK_SRC_SDW (0x3 << 4)
+#define RT5682_I2S_CLK_SRC_RCCLK (0x4 << 4) /* 25M */
+#define RT5682_DAC_OSR_MASK (0xf << 0)
+#define RT5682_DAC_OSR_SFT 0
+#define RT5682_DAC_OSR_D_1 (0x0 << 0)
+#define RT5682_DAC_OSR_D_2 (0x1 << 0)
+#define RT5682_DAC_OSR_D_4 (0x2 << 0)
+#define RT5682_DAC_OSR_D_6 (0x3 << 0)
+#define RT5682_DAC_OSR_D_8 (0x4 << 0)
+#define RT5682_DAC_OSR_D_12 (0x5 << 0)
+#define RT5682_DAC_OSR_D_16 (0x6 << 0)
+#define RT5682_DAC_OSR_D_24 (0x7 << 0)
+#define RT5682_DAC_OSR_D_32 (0x8 << 0)
+#define RT5682_DAC_OSR_D_48 (0x9 << 0)
+
+/* ADC/DAC Clock Control 2 (0x0074) */
+#define RT5682_I2S2_BCLK_MS2_MASK (0x1 << 11)
+#define RT5682_I2S2_BCLK_MS2_SFT 11
+#define RT5682_I2S2_BCLK_MS2_32 (0x0 << 11)
+#define RT5682_I2S2_BCLK_MS2_64 (0x1 << 11)
+
+
+/* TDM control 1 (0x0079) */
+#define RT5682_TDM_TX_CH_MASK (0x3 << 12)
+#define RT5682_TDM_TX_CH_2 (0x0 << 12)
+#define RT5682_TDM_TX_CH_4 (0x1 << 12)
+#define RT5682_TDM_TX_CH_6 (0x2 << 12)
+#define RT5682_TDM_TX_CH_8 (0x3 << 12)
+#define RT5682_TDM_RX_CH_MASK (0x3 << 8)
+#define RT5682_TDM_RX_CH_2 (0x0 << 8)
+#define RT5682_TDM_RX_CH_4 (0x1 << 8)
+#define RT5682_TDM_RX_CH_6 (0x2 << 8)
+#define RT5682_TDM_RX_CH_8 (0x3 << 8)
+#define RT5682_TDM_ADC_LCA_MASK (0xf << 4)
+#define RT5682_TDM_ADC_LCA_SFT 4
+#define RT5682_TDM_ADC_DL_SFT 0
+
+/* TDM control 2 (0x007a) */
+#define RT5682_IF1_ADC1_SEL_SFT 14
+#define RT5682_IF1_ADC2_SEL_SFT 12
+#define RT5682_IF1_ADC3_SEL_SFT 10
+#define RT5682_IF1_ADC4_SEL_SFT 8
+#define RT5682_TDM_ADC_SEL_SFT 4
+
+/* TDM control 3 (0x007b) */
+#define RT5682_TDM_EN (0x1 << 7)
+
+/* TDM/I2S control (0x007e) */
+#define RT5682_TDM_S_BP_MASK (0x1 << 15)
+#define RT5682_TDM_S_BP_SFT 15
+#define RT5682_TDM_S_BP_NOR (0x0 << 15)
+#define RT5682_TDM_S_BP_INV (0x1 << 15)
+#define RT5682_TDM_S_LP_MASK (0x1 << 14)
+#define RT5682_TDM_S_LP_SFT 14
+#define RT5682_TDM_S_LP_NOR (0x0 << 14)
+#define RT5682_TDM_S_LP_INV (0x1 << 14)
+#define RT5682_TDM_DF_MASK (0x7 << 11)
+#define RT5682_TDM_DF_SFT 11
+#define RT5682_TDM_DF_I2S (0x0 << 11)
+#define RT5682_TDM_DF_LEFT (0x1 << 11)
+#define RT5682_TDM_DF_PCM_A (0x2 << 11)
+#define RT5682_TDM_DF_PCM_B (0x3 << 11)
+#define RT5682_TDM_DF_PCM_A_N (0x6 << 11)
+#define RT5682_TDM_DF_PCM_B_N (0x7 << 11)
+#define RT5682_TDM_CL_MASK (0x3 << 4)
+#define RT5682_TDM_CL_16 (0x0 << 4)
+#define RT5682_TDM_CL_20 (0x1 << 4)
+#define RT5682_TDM_CL_24 (0x2 << 4)
+#define RT5682_TDM_CL_32 (0x3 << 4)
+#define RT5682_TDM_M_BP_MASK (0x1 << 2)
+#define RT5682_TDM_M_BP_SFT 2
+#define RT5682_TDM_M_BP_NOR (0x0 << 2)
+#define RT5682_TDM_M_BP_INV (0x1 << 2)
+#define RT5682_TDM_M_LP_MASK (0x1 << 1)
+#define RT5682_TDM_M_LP_SFT 1
+#define RT5682_TDM_M_LP_NOR (0x0 << 1)
+#define RT5682_TDM_M_LP_INV (0x1 << 1)
+#define RT5682_TDM_MS_MASK (0x1 << 0)
+#define RT5682_TDM_MS_SFT 0
+#define RT5682_TDM_MS_M (0x0 << 0)
+#define RT5682_TDM_MS_S (0x1 << 0)
+
+/* Global Clock Control (0x0080) */
+#define RT5682_SCLK_SRC_MASK (0x7 << 13)
+#define RT5682_SCLK_SRC_SFT 13
+#define RT5682_SCLK_SRC_MCLK (0x0 << 13)
+#define RT5682_SCLK_SRC_PLL1 (0x1 << 13)
+#define RT5682_SCLK_SRC_PLL2 (0x2 << 13)
+#define RT5682_SCLK_SRC_SDW (0x3 << 13)
+#define RT5682_SCLK_SRC_RCCLK (0x4 << 13)
+#define RT5682_PLL1_SRC_MASK (0x3 << 10)
+#define RT5682_PLL1_SRC_SFT 10
+#define RT5682_PLL1_SRC_MCLK (0x0 << 10)
+#define RT5682_PLL1_SRC_BCLK1 (0x1 << 10)
+#define RT5682_PLL1_SRC_SDW (0x2 << 10)
+#define RT5682_PLL1_SRC_RC (0x3 << 10)
+#define RT5682_PLL2_SRC_MASK (0x3 << 8)
+#define RT5682_PLL2_SRC_SFT 8
+#define RT5682_PLL2_SRC_MCLK (0x0 << 8)
+#define RT5682_PLL2_SRC_BCLK1 (0x1 << 8)
+#define RT5682_PLL2_SRC_SDW (0x2 << 8)
+#define RT5682_PLL2_SRC_RC (0x3 << 8)
+
+
+
+#define RT5682_PLL_INP_MAX 40000000
+#define RT5682_PLL_INP_MIN 256000
+/* PLL M/N/K Code Control 1 (0x0081) */
+#define RT5682_PLL_N_MAX 0x001ff
+#define RT5682_PLL_N_MASK (RT5682_PLL_N_MAX << 7)
+#define RT5682_PLL_N_SFT 7
+#define RT5682_PLL_K_MAX 0x001f
+#define RT5682_PLL_K_MASK (RT5682_PLL_K_MAX)
+#define RT5682_PLL_K_SFT 0
+
+/* PLL M/N/K Code Control 2 (0x0082) */
+#define RT5682_PLL_M_MAX 0x00f
+#define RT5682_PLL_M_MASK (RT5682_PLL_M_MAX << 12)
+#define RT5682_PLL_M_SFT 12
+#define RT5682_PLL_M_BP (0x1 << 11)
+#define RT5682_PLL_M_BP_SFT 11
+#define RT5682_PLL_K_BP (0x1 << 10)
+#define RT5682_PLL_K_BP_SFT 10
+#define RT5682_PLL_RST (0x1 << 1)
+
+/* PLL tracking mode 1 (0x0083) */
+#define RT5682_DA_ASRC_MASK (0x1 << 13)
+#define RT5682_DA_ASRC_SFT 13
+#define RT5682_DAC_STO1_ASRC_MASK (0x1 << 12)
+#define RT5682_DAC_STO1_ASRC_SFT 12
+#define RT5682_AD_ASRC_MASK (0x1 << 8)
+#define RT5682_AD_ASRC_SFT 8
+#define RT5682_AD_ASRC_SEL_MASK (0x1 << 4)
+#define RT5682_AD_ASRC_SEL_SFT 4
+#define RT5682_DMIC_ASRC_MASK (0x1 << 3)
+#define RT5682_DMIC_ASRC_SFT 3
+#define RT5682_ADC_STO1_ASRC_MASK (0x1 << 2)
+#define RT5682_ADC_STO1_ASRC_SFT 2
+#define RT5682_DA_ASRC_SEL_MASK (0x1 << 0)
+#define RT5682_DA_ASRC_SEL_SFT 0
+
+/* PLL tracking mode 2 3 (0x0084)(0x0085)*/
+#define RT5682_FILTER_CLK_SEL_MASK (0x7 << 12)
+#define RT5682_FILTER_CLK_SEL_SFT 12
+#define RT5682_FILTER_CLK_DIV_MASK (0xf << 8)
+#define RT5682_FILTER_CLK_DIV_SFT 8
+
+/* ASRC Control 4 (0x0086) */
+#define RT5682_ASRCIN_FTK_N1_MASK (0x3 << 14)
+#define RT5682_ASRCIN_FTK_N1_SFT 14
+#define RT5682_ASRCIN_FTK_N2_MASK (0x3 << 12)
+#define RT5682_ASRCIN_FTK_N2_SFT 12
+#define RT5682_ASRCIN_FTK_M1_MASK (0x7 << 8)
+#define RT5682_ASRCIN_FTK_M1_SFT 8
+#define RT5682_ASRCIN_FTK_M2_MASK (0x7 << 4)
+#define RT5682_ASRCIN_FTK_M2_SFT 4
+
+/* SoundWire reference clk (0x008d) */
+#define RT5682_PLL2_OUT_MASK (0x1 << 8)
+#define RT5682_PLL2_OUT_98M (0x0 << 8)
+#define RT5682_PLL2_OUT_49M (0x1 << 8)
+#define RT5682_SDW_REF_2_MASK (0xf << 4)
+#define RT5682_SDW_REF_2_SFT 4
+#define RT5682_SDW_REF_2_48K (0x0 << 4)
+#define RT5682_SDW_REF_2_96K (0x1 << 4)
+#define RT5682_SDW_REF_2_192K (0x2 << 4)
+#define RT5682_SDW_REF_2_32K (0x3 << 4)
+#define RT5682_SDW_REF_2_24K (0x4 << 4)
+#define RT5682_SDW_REF_2_16K (0x5 << 4)
+#define RT5682_SDW_REF_2_12K (0x6 << 4)
+#define RT5682_SDW_REF_2_8K (0x7 << 4)
+#define RT5682_SDW_REF_2_44K (0x8 << 4)
+#define RT5682_SDW_REF_2_88K (0x9 << 4)
+#define RT5682_SDW_REF_2_176K (0xa << 4)
+#define RT5682_SDW_REF_2_353K (0xb << 4)
+#define RT5682_SDW_REF_2_22K (0xc << 4)
+#define RT5682_SDW_REF_2_384K (0xd << 4)
+#define RT5682_SDW_REF_2_11K (0xe << 4)
+#define RT5682_SDW_REF_1_MASK (0xf << 0)
+#define RT5682_SDW_REF_1_SFT 0
+#define RT5682_SDW_REF_1_48K (0x0 << 0)
+#define RT5682_SDW_REF_1_96K (0x1 << 0)
+#define RT5682_SDW_REF_1_192K (0x2 << 0)
+#define RT5682_SDW_REF_1_32K (0x3 << 0)
+#define RT5682_SDW_REF_1_24K (0x4 << 0)
+#define RT5682_SDW_REF_1_16K (0x5 << 0)
+#define RT5682_SDW_REF_1_12K (0x6 << 0)
+#define RT5682_SDW_REF_1_8K (0x7 << 0)
+#define RT5682_SDW_REF_1_44K (0x8 << 0)
+#define RT5682_SDW_REF_1_88K (0x9 << 0)
+#define RT5682_SDW_REF_1_176K (0xa << 0)
+#define RT5682_SDW_REF_1_353K (0xb << 0)
+#define RT5682_SDW_REF_1_22K (0xc << 0)
+#define RT5682_SDW_REF_1_384K (0xd << 0)
+#define RT5682_SDW_REF_1_11K (0xe << 0)
+
+/* Depop Mode Control 1 (0x008e) */
+#define RT5682_PUMP_EN (0x1 << 3)
+#define RT5682_PUMP_EN_SFT 3
+#define RT5682_CAPLESS_EN (0x1 << 0)
+#define RT5682_CAPLESS_EN_SFT 0
+
+/* Depop Mode Control 2 (0x8f) */
+#define RT5682_RAMP_MASK (0x1 << 12)
+#define RT5682_RAMP_SFT 12
+#define RT5682_RAMP_DIS (0x0 << 12)
+#define RT5682_RAMP_EN (0x1 << 12)
+#define RT5682_BPS_MASK (0x1 << 11)
+#define RT5682_BPS_SFT 11
+#define RT5682_BPS_DIS (0x0 << 11)
+#define RT5682_BPS_EN (0x1 << 11)
+#define RT5682_FAST_UPDN_MASK (0x1 << 10)
+#define RT5682_FAST_UPDN_SFT 10
+#define RT5682_FAST_UPDN_DIS (0x0 << 10)
+#define RT5682_FAST_UPDN_EN (0x1 << 10)
+#define RT5682_VLO_MASK (0x1 << 7)
+#define RT5682_VLO_SFT 7
+#define RT5682_VLO_3V (0x0 << 7)
+#define RT5682_VLO_33V (0x1 << 7)
+
+/* HPOUT charge pump 1 (0x0091) */
+#define RT5682_OSW_L_MASK (0x1 << 11)
+#define RT5682_OSW_L_SFT 11
+#define RT5682_OSW_L_DIS (0x0 << 11)
+#define RT5682_OSW_L_EN (0x1 << 11)
+#define RT5682_OSW_R_MASK (0x1 << 10)
+#define RT5682_OSW_R_SFT 10
+#define RT5682_OSW_R_DIS (0x0 << 10)
+#define RT5682_OSW_R_EN (0x1 << 10)
+#define RT5682_PM_HP_MASK (0x3 << 8)
+#define RT5682_PM_HP_SFT 8
+#define RT5682_PM_HP_LV (0x0 << 8)
+#define RT5682_PM_HP_MV (0x1 << 8)
+#define RT5682_PM_HP_HV (0x2 << 8)
+#define RT5682_IB_HP_MASK (0x3 << 6)
+#define RT5682_IB_HP_SFT 6
+#define RT5682_IB_HP_125IL (0x0 << 6)
+#define RT5682_IB_HP_25IL (0x1 << 6)
+#define RT5682_IB_HP_5IL (0x2 << 6)
+#define RT5682_IB_HP_1IL (0x3 << 6)
+
+/* Micbias Control1 (0x93) */
+#define RT5682_MIC1_OV_MASK (0x3 << 14)
+#define RT5682_MIC1_OV_SFT 14
+#define RT5682_MIC1_OV_2V7 (0x0 << 14)
+#define RT5682_MIC1_OV_2V4 (0x1 << 14)
+#define RT5682_MIC1_OV_2V25 (0x3 << 14)
+#define RT5682_MIC1_OV_1V8 (0x4 << 14)
+#define RT5682_MIC1_CLK_MASK (0x1 << 13)
+#define RT5682_MIC1_CLK_SFT 13
+#define RT5682_MIC1_CLK_DIS (0x0 << 13)
+#define RT5682_MIC1_CLK_EN (0x1 << 13)
+#define RT5682_MIC1_OVCD_MASK (0x1 << 12)
+#define RT5682_MIC1_OVCD_SFT 12
+#define RT5682_MIC1_OVCD_DIS (0x0 << 12)
+#define RT5682_MIC1_OVCD_EN (0x1 << 12)
+#define RT5682_MIC1_OVTH_MASK (0x3 << 10)
+#define RT5682_MIC1_OVTH_SFT 10
+#define RT5682_MIC1_OVTH_768UA (0x0 << 10)
+#define RT5682_MIC1_OVTH_960UA (0x1 << 10)
+#define RT5682_MIC1_OVTH_1152UA (0x2 << 10)
+#define RT5682_MIC1_OVTH_1960UA (0x3 << 10)
+#define RT5682_MIC2_OV_MASK (0x3 << 8)
+#define RT5682_MIC2_OV_SFT 8
+#define RT5682_MIC2_OV_2V7 (0x0 << 8)
+#define RT5682_MIC2_OV_2V4 (0x1 << 8)
+#define RT5682_MIC2_OV_2V25 (0x3 << 8)
+#define RT5682_MIC2_OV_1V8 (0x4 << 8)
+#define RT5682_MIC2_CLK_MASK (0x1 << 7)
+#define RT5682_MIC2_CLK_SFT 7
+#define RT5682_MIC2_CLK_DIS (0x0 << 7)
+#define RT5682_MIC2_CLK_EN (0x1 << 7)
+#define RT5682_MIC2_OVTH_MASK (0x3 << 4)
+#define RT5682_MIC2_OVTH_SFT 4
+#define RT5682_MIC2_OVTH_768UA (0x0 << 4)
+#define RT5682_MIC2_OVTH_960UA (0x1 << 4)
+#define RT5682_MIC2_OVTH_1152UA (0x2 << 4)
+#define RT5682_MIC2_OVTH_1960UA (0x3 << 4)
+#define RT5682_PWR_MB_MASK (0x1 << 3)
+#define RT5682_PWR_MB_SFT 3
+#define RT5682_PWR_MB_PD (0x0 << 3)
+#define RT5682_PWR_MB_PU (0x1 << 3)
+
+/* Micbias Control2 (0x0094) */
+#define RT5682_PWR_CLK25M_MASK (0x1 << 9)
+#define RT5682_PWR_CLK25M_SFT 9
+#define RT5682_PWR_CLK25M_PD (0x0 << 9)
+#define RT5682_PWR_CLK25M_PU (0x1 << 9)
+#define RT5682_PWR_CLK1M_MASK (0x1 << 8)
+#define RT5682_PWR_CLK1M_SFT 8
+#define RT5682_PWR_CLK1M_PD (0x0 << 8)
+#define RT5682_PWR_CLK1M_PU (0x1 << 8)
+
+/* RC Clock Control (0x009f) */
+#define RT5682_POW_IRQ (0x1 << 15)
+#define RT5682_POW_JDH (0x1 << 14)
+#define RT5682_POW_JDL (0x1 << 13)
+#define RT5682_POW_ANA (0x1 << 12)
+
+/* I2S Master Mode Clock Control 1 (0x00a0) */
+#define RT5682_CLK_SRC_MCLK (0x0)
+#define RT5682_CLK_SRC_PLL1 (0x1)
+#define RT5682_CLK_SRC_PLL2 (0x2)
+#define RT5682_CLK_SRC_SDW (0x3)
+#define RT5682_CLK_SRC_RCCLK (0x4)
+#define RT5682_I2S_PD_1 (0x0)
+#define RT5682_I2S_PD_2 (0x1)
+#define RT5682_I2S_PD_3 (0x2)
+#define RT5682_I2S_PD_4 (0x3)
+#define RT5682_I2S_PD_6 (0x4)
+#define RT5682_I2S_PD_8 (0x5)
+#define RT5682_I2S_PD_12 (0x6)
+#define RT5682_I2S_PD_16 (0x7)
+#define RT5682_I2S_PD_24 (0x8)
+#define RT5682_I2S_PD_32 (0x9)
+#define RT5682_I2S_PD_48 (0xa)
+#define RT5682_I2S2_SRC_MASK (0x3 << 4)
+#define RT5682_I2S2_SRC_SFT 4
+#define RT5682_I2S2_M_PD_MASK (0xf << 0)
+#define RT5682_I2S2_M_PD_SFT 0
+
+/* IRQ Control 1 (0x00b6) */
+#define RT5682_JD1_PULSE_EN_MASK (0x1 << 10)
+#define RT5682_JD1_PULSE_EN_SFT 10
+#define RT5682_JD1_PULSE_DIS (0x0 << 10)
+#define RT5682_JD1_PULSE_EN (0x1 << 10)
+
+/* IRQ Control 2 (0x00b7) */
+#define RT5682_JD1_EN_MASK (0x1 << 15)
+#define RT5682_JD1_EN_SFT 15
+#define RT5682_JD1_DIS (0x0 << 15)
+#define RT5682_JD1_EN (0x1 << 15)
+#define RT5682_JD1_POL_MASK (0x1 << 13)
+#define RT5682_JD1_POL_NOR (0x0 << 13)
+#define RT5682_JD1_POL_INV (0x1 << 13)
+
+/* IRQ Control 3 (0x00b8) */
+#define RT5682_IL_IRQ_MASK (0x1 << 7)
+#define RT5682_IL_IRQ_DIS (0x0 << 7)
+#define RT5682_IL_IRQ_EN (0x1 << 7)
+
+/* GPIO Control 1 (0x00c0) */
+#define RT5682_GP1_PIN_MASK (0x3 << 14)
+#define RT5682_GP1_PIN_SFT 14
+#define RT5682_GP1_PIN_GPIO1 (0x0 << 14)
+#define RT5682_GP1_PIN_IRQ (0x1 << 14)
+#define RT5682_GP1_PIN_DMIC_CLK (0x2 << 14)
+#define RT5682_GP2_PIN_MASK (0x3 << 12)
+#define RT5682_GP2_PIN_SFT 12
+#define RT5682_GP2_PIN_GPIO2 (0x0 << 12)
+#define RT5682_GP2_PIN_LRCK2 (0x1 << 12)
+#define RT5682_GP2_PIN_DMIC_SDA (0x2 << 12)
+#define RT5682_GP3_PIN_MASK (0x3 << 10)
+#define RT5682_GP3_PIN_SFT 10
+#define RT5682_GP3_PIN_GPIO3 (0x0 << 10)
+#define RT5682_GP3_PIN_BCLK2 (0x1 << 10)
+#define RT5682_GP3_PIN_DMIC_CLK (0x2 << 10)
+#define RT5682_GP4_PIN_MASK (0x3 << 8)
+#define RT5682_GP4_PIN_SFT 8
+#define RT5682_GP4_PIN_GPIO4 (0x0 << 8)
+#define RT5682_GP4_PIN_ADCDAT1 (0x1 << 8)
+#define RT5682_GP4_PIN_DMIC_CLK (0x2 << 8)
+#define RT5682_GP4_PIN_ADCDAT2 (0x3 << 8)
+#define RT5682_GP5_PIN_MASK (0x3 << 6)
+#define RT5682_GP5_PIN_SFT 6
+#define RT5682_GP5_PIN_GPIO5 (0x0 << 6)
+#define RT5682_GP5_PIN_DACDAT1 (0x1 << 6)
+#define RT5682_GP5_PIN_DMIC_SDA (0x2 << 6)
+#define RT5682_GP6_PIN_MASK (0x1 << 5)
+#define RT5682_GP6_PIN_SFT 5
+#define RT5682_GP6_PIN_GPIO6 (0x0 << 5)
+#define RT5682_GP6_PIN_LRCK1 (0x1 << 5)
+
+/* GPIO Control 2 (0x00c1)*/
+#define RT5682_GP1_PF_MASK (0x1 << 15)
+#define RT5682_GP1_PF_IN (0x0 << 15)
+#define RT5682_GP1_PF_OUT (0x1 << 15)
+#define RT5682_GP1_OUT_MASK (0x1 << 14)
+#define RT5682_GP1_OUT_L (0x0 << 14)
+#define RT5682_GP1_OUT_H (0x1 << 14)
+#define RT5682_GP2_PF_MASK (0x1 << 13)
+#define RT5682_GP2_PF_IN (0x0 << 13)
+#define RT5682_GP2_PF_OUT (0x1 << 13)
+#define RT5682_GP2_OUT_MASK (0x1 << 12)
+#define RT5682_GP2_OUT_L (0x0 << 12)
+#define RT5682_GP2_OUT_H (0x1 << 12)
+#define RT5682_GP3_PF_MASK (0x1 << 11)
+#define RT5682_GP3_PF_IN (0x0 << 11)
+#define RT5682_GP3_PF_OUT (0x1 << 11)
+#define RT5682_GP3_OUT_MASK (0x1 << 10)
+#define RT5682_GP3_OUT_L (0x0 << 10)
+#define RT5682_GP3_OUT_H (0x1 << 10)
+#define RT5682_GP4_PF_MASK (0x1 << 9)
+#define RT5682_GP4_PF_IN (0x0 << 9)
+#define RT5682_GP4_PF_OUT (0x1 << 9)
+#define RT5682_GP4_OUT_MASK (0x1 << 8)
+#define RT5682_GP4_OUT_L (0x0 << 8)
+#define RT5682_GP4_OUT_H (0x1 << 8)
+#define RT5682_GP5_PF_MASK (0x1 << 7)
+#define RT5682_GP5_PF_IN (0x0 << 7)
+#define RT5682_GP5_PF_OUT (0x1 << 7)
+#define RT5682_GP5_OUT_MASK (0x1 << 6)
+#define RT5682_GP5_OUT_L (0x0 << 6)
+#define RT5682_GP5_OUT_H (0x1 << 6)
+#define RT5682_GP6_PF_MASK (0x1 << 5)
+#define RT5682_GP6_PF_IN (0x0 << 5)
+#define RT5682_GP6_PF_OUT (0x1 << 5)
+#define RT5682_GP6_OUT_MASK (0x1 << 4)
+#define RT5682_GP6_OUT_L (0x0 << 4)
+#define RT5682_GP6_OUT_H (0x1 << 4)
+
+
+/* GPIO Status (0x00c2) */
+#define RT5682_GP6_STA (0x1 << 6)
+#define RT5682_GP5_STA (0x1 << 5)
+#define RT5682_GP4_STA (0x1 << 4)
+#define RT5682_GP3_STA (0x1 << 3)
+#define RT5682_GP2_STA (0x1 << 2)
+#define RT5682_GP1_STA (0x1 << 1)
+
+/* Soft volume and zero cross control 1 (0x00d9) */
+#define RT5682_SV_MASK (0x1 << 15)
+#define RT5682_SV_SFT 15
+#define RT5682_SV_DIS (0x0 << 15)
+#define RT5682_SV_EN (0x1 << 15)
+#define RT5682_ZCD_MASK (0x1 << 10)
+#define RT5682_ZCD_SFT 10
+#define RT5682_ZCD_PD (0x0 << 10)
+#define RT5682_ZCD_PU (0x1 << 10)
+#define RT5682_SV_DLY_MASK (0xf)
+#define RT5682_SV_DLY_SFT 0
+
+/* Soft volume and zero cross control 2 (0x00da) */
+#define RT5682_ZCD_BST1_CBJ_MASK (0x1 << 7)
+#define RT5682_ZCD_BST1_CBJ_SFT 7
+#define RT5682_ZCD_BST1_CBJ_DIS (0x0 << 7)
+#define RT5682_ZCD_BST1_CBJ_EN (0x1 << 7)
+#define RT5682_ZCD_RECMIX_MASK (0x1)
+#define RT5682_ZCD_RECMIX_SFT 0
+#define RT5682_ZCD_RECMIX_DIS (0x0)
+#define RT5682_ZCD_RECMIX_EN (0x1)
+
+/* 4 Button Inline Command Control 2 (0x00e3) */
+#define RT5682_4BTN_IL_MASK (0x1 << 15)
+#define RT5682_4BTN_IL_EN (0x1 << 15)
+#define RT5682_4BTN_IL_DIS (0x0 << 15)
+#define RT5682_4BTN_IL_RST_MASK (0x1 << 14)
+#define RT5682_4BTN_IL_NOR (0x1 << 14)
+#define RT5682_4BTN_IL_RST (0x0 << 14)
+
+/* Analog JD Control (0x00f0) */
+#define RT5682_JDH_RS_MASK (0x1 << 4)
+#define RT5682_JDH_NO_PLUG (0x1 << 4)
+#define RT5682_JDH_PLUG (0x0 << 4)
+
+/* Chopper and Clock control for DAC (0x013a)*/
+#define RT5682_CKXEN_DAC1_MASK (0x1 << 13)
+#define RT5682_CKXEN_DAC1_SFT 13
+#define RT5682_CKGEN_DAC1_MASK (0x1 << 12)
+#define RT5682_CKGEN_DAC1_SFT 12
+
+/* Chopper and Clock control for ADC (0x013b)*/
+#define RT5682_CKXEN_ADC1_MASK (0x1 << 13)
+#define RT5682_CKXEN_ADC1_SFT 13
+#define RT5682_CKGEN_ADC1_MASK (0x1 << 12)
+#define RT5682_CKGEN_ADC1_SFT 12
+
+/* Volume test (0x013f)*/
+#define RT5682_SEL_CLK_VOL_MASK (0x1 << 15)
+#define RT5682_SEL_CLK_VOL_EN (0x1 << 15)
+#define RT5682_SEL_CLK_VOL_DIS (0x0 << 15)
+
+/* Test Mode Control 1 (0x0145) */
+#define RT5682_AD2DA_LB_MASK (0x1 << 10)
+#define RT5682_AD2DA_LB_SFT 10
+
+/* Stereo Noise Gate Control 1 (0x0160) */
+#define RT5682_NG2_EN_MASK (0x1 << 15)
+#define RT5682_NG2_EN (0x1 << 15)
+#define RT5682_NG2_DIS (0x0 << 15)
+
+/* Stereo1 DAC Silence Detection Control (0x0190) */
+#define RT5682_DEB_STO_DAC_MASK (0x7 << 4)
+#define RT5682_DEB_80_MS (0x0 << 4)
+
+/* SAR ADC Inline Command Control 1 (0x0210) */
+#define RT5682_SAR_BUTT_DET_MASK (0x1 << 15)
+#define RT5682_SAR_BUTT_DET_EN (0x1 << 15)
+#define RT5682_SAR_BUTT_DET_DIS (0x0 << 15)
+#define RT5682_SAR_BUTDET_MODE_MASK (0x1 << 14)
+#define RT5682_SAR_BUTDET_POW_SAV (0x1 << 14)
+#define RT5682_SAR_BUTDET_POW_NORM (0x0 << 14)
+#define RT5682_SAR_BUTDET_RST_MASK (0x1 << 13)
+#define RT5682_SAR_BUTDET_RST_NORMAL (0x1 << 13)
+#define RT5682_SAR_BUTDET_RST (0x0 << 13)
+#define RT5682_SAR_POW_MASK (0x1 << 12)
+#define RT5682_SAR_POW_EN (0x1 << 12)
+#define RT5682_SAR_POW_DIS (0x0 << 12)
+#define RT5682_SAR_RST_MASK (0x1 << 11)
+#define RT5682_SAR_RST_NORMAL (0x1 << 11)
+#define RT5682_SAR_RST (0x0 << 11)
+#define RT5682_SAR_BYPASS_MASK (0x1 << 10)
+#define RT5682_SAR_BYPASS_EN (0x1 << 10)
+#define RT5682_SAR_BYPASS_DIS (0x0 << 10)
+#define RT5682_SAR_SEL_MB1_MASK (0x1 << 9)
+#define RT5682_SAR_SEL_MB1_SEL (0x1 << 9)
+#define RT5682_SAR_SEL_MB1_NOSEL (0x0 << 9)
+#define RT5682_SAR_SEL_MB2_MASK (0x1 << 8)
+#define RT5682_SAR_SEL_MB2_SEL (0x1 << 8)
+#define RT5682_SAR_SEL_MB2_NOSEL (0x0 << 8)
+#define RT5682_SAR_SEL_MODE_MASK (0x1 << 7)
+#define RT5682_SAR_SEL_MODE_CMP (0x1 << 7)
+#define RT5682_SAR_SEL_MODE_ADC (0x0 << 7)
+#define RT5682_SAR_SEL_MB1_MB2_MASK (0x1 << 5)
+#define RT5682_SAR_SEL_MB1_MB2_AUTO (0x1 << 5)
+#define RT5682_SAR_SEL_MB1_MB2_MANU (0x0 << 5)
+#define RT5682_SAR_SEL_SIGNAL_MASK (0x1 << 4)
+#define RT5682_SAR_SEL_SIGNAL_AUTO (0x1 << 4)
+#define RT5682_SAR_SEL_SIGNAL_MANU (0x0 << 4)
+
+/* SAR ADC Inline Command Control 13 (0x021c) */
+#define RT5682_SAR_SOUR_MASK (0x3f)
+#define RT5682_SAR_SOUR_BTN (0x3f)
+#define RT5682_SAR_SOUR_TYPE (0x0)
+
+
+/* System Clock Source */
+enum {
+ RT5682_SCLK_S_MCLK,
+ RT5682_SCLK_S_PLL1,
+ RT5682_SCLK_S_PLL2,
+ RT5682_SCLK_S_RCCLK,
+};
+
+/* PLL Source */
+enum {
+ RT5682_PLL1_S_MCLK,
+ RT5682_PLL1_S_BCLK1,
+ RT5682_PLL1_S_RCCLK,
+};
+
+enum {
+ RT5682_AIF1,
+ RT5682_AIF2,
+ RT5682_AIFS
+};
+
+/* filter mask */
+enum {
+ RT5682_DA_STEREO1_FILTER = 0x1,
+ RT5682_AD_STEREO1_FILTER = (0x1 << 1),
+};
+
+enum {
+ RT5682_CLK_SEL_SYS,
+ RT5682_CLK_SEL_I2S1_ASRC,
+ RT5682_CLK_SEL_I2S2_ASRC,
+};
+
+int rt5682_sel_asrc_clk_src(struct snd_soc_component *component,
+ unsigned int filter_mask, unsigned int clk_src);
+
+#endif /* __RT5682_H__ */
diff --git a/sound/soc/codecs/dio2125.c b/sound/soc/codecs/simple-amplifier.c
index 09451cd44f9b..85524acf3e9c 100644
--- a/sound/soc/codecs/dio2125.c
+++ b/sound/soc/codecs/simple-amplifier.c
@@ -21,9 +21,9 @@
#include <linux/module.h>
#include <sound/soc.h>
-#define DRV_NAME "dio2125"
+#define DRV_NAME "simple-amplifier"
-struct dio2125 {
+struct simple_amp {
struct gpio_desc *gpiod_enable;
};
@@ -31,7 +31,7 @@ static int drv_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *control, int event)
{
struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
- struct dio2125 *priv = snd_soc_component_get_drvdata(c);
+ struct simple_amp *priv = snd_soc_component_get_drvdata(c);
int val;
switch (event) {
@@ -51,7 +51,7 @@ static int drv_event(struct snd_soc_dapm_widget *w,
return 0;
}
-static const struct snd_soc_dapm_widget dio2125_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget simple_amp_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("INL"),
SND_SOC_DAPM_INPUT("INR"),
SND_SOC_DAPM_OUT_DRV_E("DRV", SND_SOC_NOPM, 0, 0, NULL, 0, drv_event,
@@ -60,24 +60,24 @@ static const struct snd_soc_dapm_widget dio2125_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("OUTR"),
};
-static const struct snd_soc_dapm_route dio2125_dapm_routes[] = {
+static const struct snd_soc_dapm_route simple_amp_dapm_routes[] = {
{ "DRV", NULL, "INL" },
{ "DRV", NULL, "INR" },
{ "OUTL", NULL, "DRV" },
{ "OUTR", NULL, "DRV" },
};
-static const struct snd_soc_component_driver dio2125_component_driver = {
- .dapm_widgets = dio2125_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(dio2125_dapm_widgets),
- .dapm_routes = dio2125_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(dio2125_dapm_routes),
+static const struct snd_soc_component_driver simple_amp_component_driver = {
+ .dapm_widgets = simple_amp_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(simple_amp_dapm_widgets),
+ .dapm_routes = simple_amp_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(simple_amp_dapm_routes),
};
-static int dio2125_probe(struct platform_device *pdev)
+static int simple_amp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct dio2125 *priv;
+ struct simple_amp *priv;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -93,28 +93,30 @@ static int dio2125_probe(struct platform_device *pdev)
return err;
}
- return devm_snd_soc_register_component(dev, &dio2125_component_driver,
+ return devm_snd_soc_register_component(dev,
+ &simple_amp_component_driver,
NULL, 0);
}
#ifdef CONFIG_OF
-static const struct of_device_id dio2125_ids[] = {
+static const struct of_device_id simple_amp_ids[] = {
{ .compatible = "dioo,dio2125", },
+ { .compatible = "simple-audio-amplifier", },
{ }
};
-MODULE_DEVICE_TABLE(of, dio2125_ids);
+MODULE_DEVICE_TABLE(of, simple_amp_ids);
#endif
-static struct platform_driver dio2125_driver = {
+static struct platform_driver simple_amp_driver = {
.driver = {
.name = DRV_NAME,
- .of_match_table = of_match_ptr(dio2125_ids),
+ .of_match_table = of_match_ptr(simple_amp_ids),
},
- .probe = dio2125_probe,
+ .probe = simple_amp_probe,
};
-module_platform_driver(dio2125_driver);
+module_platform_driver(simple_amp_driver);
-MODULE_DESCRIPTION("ASoC DIO2125 output driver");
+MODULE_DESCRIPTION("ASoC Simple Audio Amplifier driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 52f34c94ec25..ca2dfe12344e 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -7,6 +7,9 @@
* TAS5721 support:
* Copyright (C) 2016 Petr Kulhavy, Barix AG <petr@barix.com>
*
+ * TAS5707 support:
+ * Copyright (C) 2018 Jerome Brunet, Baylibre SAS <jbrunet@baylibre.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -444,6 +447,111 @@ static const struct tas571x_chip tas5711_chip = {
.vol_reg_size = 1,
};
+static const struct regmap_range tas5707_volatile_regs_range[] = {
+ regmap_reg_range(TAS571X_CLK_CTRL_REG, TAS571X_ERR_STATUS_REG),
+ regmap_reg_range(TAS571X_OSC_TRIM_REG, TAS571X_OSC_TRIM_REG),
+ regmap_reg_range(TAS5707_CH1_BQ0_REG, TAS5707_CH2_BQ6_REG),
+};
+
+static const struct regmap_access_table tas5707_volatile_regs = {
+ .yes_ranges = tas5707_volatile_regs_range,
+ .n_yes_ranges = ARRAY_SIZE(tas5707_volatile_regs_range),
+
+};
+
+static const DECLARE_TLV_DB_SCALE(tas5707_volume_tlv, -7900, 50, 1);
+
+static const char * const tas5707_volume_slew_step_txt[] = {
+ "256", "512", "1024", "2048",
+};
+
+static const unsigned int tas5707_volume_slew_step_values[] = {
+ 3, 0, 1, 2,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(tas5707_volume_slew_step_enum,
+ TAS571X_VOL_CFG_REG, 0, 0x3,
+ tas5707_volume_slew_step_txt,
+ tas5707_volume_slew_step_values);
+
+static const struct snd_kcontrol_new tas5707_controls[] = {
+ SOC_SINGLE_TLV("Master Volume",
+ TAS571X_MVOL_REG,
+ 0, 0xff, 1, tas5707_volume_tlv),
+ SOC_DOUBLE_R_TLV("Speaker Volume",
+ TAS571X_CH1_VOL_REG,
+ TAS571X_CH2_VOL_REG,
+ 0, 0xff, 1, tas5707_volume_tlv),
+ SOC_DOUBLE("Speaker Switch",
+ TAS571X_SOFT_MUTE_REG,
+ TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
+ 1, 1),
+
+ SOC_ENUM("Slew Rate Steps", tas5707_volume_slew_step_enum),
+
+ BIQUAD_COEFS("CH1 - Biquad 0", TAS5707_CH1_BQ0_REG),
+ BIQUAD_COEFS("CH1 - Biquad 1", TAS5707_CH1_BQ1_REG),
+ BIQUAD_COEFS("CH1 - Biquad 2", TAS5707_CH1_BQ2_REG),
+ BIQUAD_COEFS("CH1 - Biquad 3", TAS5707_CH1_BQ3_REG),
+ BIQUAD_COEFS("CH1 - Biquad 4", TAS5707_CH1_BQ4_REG),
+ BIQUAD_COEFS("CH1 - Biquad 5", TAS5707_CH1_BQ5_REG),
+ BIQUAD_COEFS("CH1 - Biquad 6", TAS5707_CH1_BQ6_REG),
+
+ BIQUAD_COEFS("CH2 - Biquad 0", TAS5707_CH2_BQ0_REG),
+ BIQUAD_COEFS("CH2 - Biquad 1", TAS5707_CH2_BQ1_REG),
+ BIQUAD_COEFS("CH2 - Biquad 2", TAS5707_CH2_BQ2_REG),
+ BIQUAD_COEFS("CH2 - Biquad 3", TAS5707_CH2_BQ3_REG),
+ BIQUAD_COEFS("CH2 - Biquad 4", TAS5707_CH2_BQ4_REG),
+ BIQUAD_COEFS("CH2 - Biquad 5", TAS5707_CH2_BQ5_REG),
+ BIQUAD_COEFS("CH2 - Biquad 6", TAS5707_CH2_BQ6_REG),
+};
+
+static const struct reg_default tas5707_reg_defaults[] = {
+ {TAS571X_CLK_CTRL_REG, 0x6c},
+ {TAS571X_DEV_ID_REG, 0x70},
+ {TAS571X_ERR_STATUS_REG, 0x00},
+ {TAS571X_SYS_CTRL_1_REG, 0xa0},
+ {TAS571X_SDI_REG, 0x05},
+ {TAS571X_SYS_CTRL_2_REG, 0x40},
+ {TAS571X_SOFT_MUTE_REG, 0x00},
+ {TAS571X_MVOL_REG, 0xff},
+ {TAS571X_CH1_VOL_REG, 0x30},
+ {TAS571X_CH2_VOL_REG, 0x30},
+ {TAS571X_VOL_CFG_REG, 0x91},
+ {TAS571X_MODULATION_LIMIT_REG, 0x02},
+ {TAS571X_IC_DELAY_CH1_REG, 0xac},
+ {TAS571X_IC_DELAY_CH2_REG, 0x54},
+ {TAS571X_IC_DELAY_CH3_REG, 0xac},
+ {TAS571X_IC_DELAY_CH4_REG, 0x54},
+ {TAS571X_START_STOP_PERIOD_REG, 0x0f},
+ {TAS571X_OSC_TRIM_REG, 0x82},
+ {TAS571X_BKND_ERR_REG, 0x02},
+ {TAS571X_INPUT_MUX_REG, 0x17772},
+ {TAS571X_PWM_MUX_REG, 0x1021345},
+};
+
+static const struct regmap_config tas5707_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .max_register = 0xff,
+ .reg_read = tas571x_reg_read,
+ .reg_write = tas571x_reg_write,
+ .reg_defaults = tas5707_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tas5707_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .wr_table = &tas571x_write_regs,
+ .volatile_table = &tas5707_volatile_regs,
+};
+
+static const struct tas571x_chip tas5707_chip = {
+ .supply_names = tas5711_supply_names,
+ .num_supply_names = ARRAY_SIZE(tas5711_supply_names),
+ .controls = tas5707_controls,
+ .num_controls = ARRAY_SIZE(tas5707_controls),
+ .regmap_config = &tas5707_regmap_config,
+ .vol_reg_size = 1,
+};
+
static const char *const tas5717_supply_names[] = {
"AVDD",
"DVDD",
@@ -775,6 +883,7 @@ static int tas571x_i2c_remove(struct i2c_client *client)
}
static const struct of_device_id tas571x_of_match[] = {
+ { .compatible = "ti,tas5707", .data = &tas5707_chip, },
{ .compatible = "ti,tas5711", .data = &tas5711_chip, },
{ .compatible = "ti,tas5717", .data = &tas5717_chip, },
{ .compatible = "ti,tas5719", .data = &tas5717_chip, },
@@ -784,6 +893,7 @@ static const struct of_device_id tas571x_of_match[] = {
MODULE_DEVICE_TABLE(of, tas571x_of_match);
static const struct i2c_device_id tas571x_i2c_id[] = {
+ { "tas5707", (kernel_ulong_t) &tas5707_chip },
{ "tas5711", (kernel_ulong_t) &tas5711_chip },
{ "tas5717", (kernel_ulong_t) &tas5717_chip },
{ "tas5719", (kernel_ulong_t) &tas5717_chip },
diff --git a/sound/soc/codecs/tas571x.h b/sound/soc/codecs/tas571x.h
index c45677bc26ad..bd23e89cfe79 100644
--- a/sound/soc/codecs/tas571x.h
+++ b/sound/soc/codecs/tas571x.h
@@ -53,6 +53,22 @@
#define TAS571X_PWM_MUX_REG 0x25
/* 20-byte biquad registers */
+#define TAS5707_CH1_BQ0_REG 0x29
+#define TAS5707_CH1_BQ1_REG 0x2a
+#define TAS5707_CH1_BQ2_REG 0x2b
+#define TAS5707_CH1_BQ3_REG 0x2c
+#define TAS5707_CH1_BQ4_REG 0x2d
+#define TAS5707_CH1_BQ5_REG 0x2e
+#define TAS5707_CH1_BQ6_REG 0x2f
+
+#define TAS5707_CH2_BQ0_REG 0x30
+#define TAS5707_CH2_BQ1_REG 0x31
+#define TAS5707_CH2_BQ2_REG 0x32
+#define TAS5707_CH2_BQ3_REG 0x33
+#define TAS5707_CH2_BQ4_REG 0x34
+#define TAS5707_CH2_BQ5_REG 0x35
+#define TAS5707_CH2_BQ6_REG 0x36
+
#define TAS5717_CH1_BQ0_REG 0x26
#define TAS5717_CH1_BQ1_REG 0x27
#define TAS5717_CH1_BQ2_REG 0x28
diff --git a/sound/soc/codecs/tda7419.c b/sound/soc/codecs/tda7419.c
index 225c210ac38f..7f3b79c5a563 100644
--- a/sound/soc/codecs/tda7419.c
+++ b/sound/soc/codecs/tda7419.c
@@ -142,9 +142,9 @@ struct tda7419_vol_control {
static inline bool tda7419_vol_is_stereo(struct tda7419_vol_control *tvc)
{
if (tvc->reg == tvc->rreg)
- return 0;
+ return false;
- return 1;
+ return true;
}
static int tda7419_vol_info(struct snd_kcontrol *kcontrol,
diff --git a/sound/soc/codecs/tscs42xx.c b/sound/soc/codecs/tscs42xx.c
index d18ff17719cc..7396a6e5277e 100644
--- a/sound/soc/codecs/tscs42xx.c
+++ b/sound/soc/codecs/tscs42xx.c
@@ -625,25 +625,34 @@ static int bytes_info_ext(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new tscs42xx_snd_controls[] = {
/* Volumes */
- SOC_DOUBLE_R_TLV("Headphone Playback Volume", R_HPVOLL, R_HPVOLR,
+ SOC_DOUBLE_R_TLV("Headphone Volume", R_HPVOLL, R_HPVOLR,
FB_HPVOLL, 0x7F, 0, hpvol_scale),
- SOC_DOUBLE_R_TLV("Speaker Playback Volume", R_SPKVOLL, R_SPKVOLR,
+ SOC_DOUBLE_R_TLV("Speaker Volume", R_SPKVOLL, R_SPKVOLR,
FB_SPKVOLL, 0x7F, 0, spkvol_scale),
- SOC_DOUBLE_R_TLV("Master Playback Volume", R_DACVOLL, R_DACVOLR,
+ SOC_DOUBLE_R_TLV("Master Volume", R_DACVOLL, R_DACVOLR,
FB_DACVOLL, 0xFF, 0, dacvol_scale),
- SOC_DOUBLE_R_TLV("PCM Capture Volume", R_ADCVOLL, R_ADCVOLR,
+ SOC_DOUBLE_R_TLV("PCM Volume", R_ADCVOLL, R_ADCVOLR,
FB_ADCVOLL, 0xFF, 0, adcvol_scale),
- SOC_DOUBLE_R_TLV("Master Capture Volume", R_INVOLL, R_INVOLR,
+ SOC_DOUBLE_R_TLV("Input Volume", R_INVOLL, R_INVOLR,
FB_INVOLL, 0x3F, 0, invol_scale),
/* INSEL */
- SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", R_INSELL, R_INSELR,
+ SOC_DOUBLE_R_TLV("Mic Boost Volume", R_INSELL, R_INSELR,
FB_INSELL_MICBSTL, FV_INSELL_MICBSTL_30DB,
0, mic_boost_scale),
/* Input Channel Map */
SOC_ENUM("Input Channel Map", ch_map_select_enum),
+ /* Mic Bias */
+ SOC_SINGLE("Mic Bias Boost Switch", 0x71, 0x07, 1, 0),
+
+ /* Headphone Auto Switching */
+ SOC_SINGLE("Headphone Auto Switching Switch",
+ R_CTL, FB_CTL_HPSWEN, 1, 0),
+ SOC_SINGLE("Headphone Detect Polarity Toggle Switch",
+ R_CTL, FB_CTL_HPSWPOL, 1, 0),
+
/* Coefficient Ram */
COEFF_RAM_CTL("Cascade1L BiQuad1", BIQUAD_SIZE, 0x00),
COEFF_RAM_CTL("Cascade1L BiQuad2", BIQUAD_SIZE, 0x05),
@@ -733,9 +742,9 @@ static const struct snd_kcontrol_new tscs42xx_snd_controls[] = {
R_CLECTL, FB_CLECTL_LIMIT_EN, 1, 0),
SOC_SINGLE("Comp Switch",
R_CLECTL, FB_CLECTL_COMP_EN, 1, 0),
- SOC_SINGLE_TLV("CLE Make-Up Gain Playback Volume",
+ SOC_SINGLE_TLV("CLE Make-Up Gain Volume",
R_MUGAIN, FB_MUGAIN_CLEMUG, 0x1f, 0, mugain_scale),
- SOC_SINGLE_TLV("Comp Thresh Playback Volume",
+ SOC_SINGLE_TLV("Comp Thresh Volume",
R_COMPTH, FB_COMPTH, 0xff, 0, compth_scale),
SOC_ENUM("Comp Ratio", compressor_ratio_enum),
SND_SOC_BYTES("Comp Atk Time", R_CATKTCL, 2),
@@ -766,9 +775,9 @@ static const struct snd_kcontrol_new tscs42xx_snd_controls[] = {
SOC_SINGLE("MBC1 Phase Invert Switch",
R_DACMBCMUG1, FB_DACMBCMUG1_PHASE, 1, 0),
- SOC_SINGLE_TLV("DAC MBC1 Make-Up Gain Playback Volume",
+ SOC_SINGLE_TLV("DAC MBC1 Make-Up Gain Volume",
R_DACMBCMUG1, FB_DACMBCMUG1_MUGAIN, 0x1f, 0, mugain_scale),
- SOC_SINGLE_TLV("DAC MBC1 Comp Thresh Playback Volume",
+ SOC_SINGLE_TLV("DAC MBC1 Comp Thresh Volume",
R_DACMBCTHR1, FB_DACMBCTHR1_THRESH, 0xff, 0, compth_scale),
SOC_ENUM("DAC MBC1 Comp Ratio",
dac_mbc1_compressor_ratio_enum),
@@ -778,9 +787,9 @@ static const struct snd_kcontrol_new tscs42xx_snd_controls[] = {
SOC_SINGLE("MBC2 Phase Invert Switch",
R_DACMBCMUG2, FB_DACMBCMUG2_PHASE, 1, 0),
- SOC_SINGLE_TLV("DAC MBC2 Make-Up Gain Playback Volume",
+ SOC_SINGLE_TLV("DAC MBC2 Make-Up Gain Volume",
R_DACMBCMUG2, FB_DACMBCMUG2_MUGAIN, 0x1f, 0, mugain_scale),
- SOC_SINGLE_TLV("DAC MBC2 Comp Thresh Playback Volume",
+ SOC_SINGLE_TLV("DAC MBC2 Comp Thresh Volume",
R_DACMBCTHR2, FB_DACMBCTHR2_THRESH, 0xff, 0, compth_scale),
SOC_ENUM("DAC MBC2 Comp Ratio",
dac_mbc2_compressor_ratio_enum),
@@ -790,9 +799,9 @@ static const struct snd_kcontrol_new tscs42xx_snd_controls[] = {
SOC_SINGLE("MBC3 Phase Invert Switch",
R_DACMBCMUG3, FB_DACMBCMUG3_PHASE, 1, 0),
- SOC_SINGLE_TLV("DAC MBC3 Make-Up Gain Playback Volume",
+ SOC_SINGLE_TLV("DAC MBC3 Make-Up Gain Volume",
R_DACMBCMUG3, FB_DACMBCMUG3_MUGAIN, 0x1f, 0, mugain_scale),
- SOC_SINGLE_TLV("DAC MBC3 Comp Thresh Playback Volume",
+ SOC_SINGLE_TLV("DAC MBC3 Comp Thresh Volume",
R_DACMBCTHR3, FB_DACMBCTHR3_THRESH, 0xff, 0, compth_scale),
SOC_ENUM("DAC MBC3 Comp Ratio",
dac_mbc3_compressor_ratio_enum),
diff --git a/sound/soc/codecs/tscs42xx.h b/sound/soc/codecs/tscs42xx.h
index 814c8f3c4a68..6b3a21081635 100644
--- a/sound/soc/codecs/tscs42xx.h
+++ b/sound/soc/codecs/tscs42xx.h
@@ -34,6 +34,7 @@ enum {
#define R_DACSR 0x19
#define R_PWRM1 0x1A
#define R_PWRM2 0x1B
+#define R_CTL 0x1C
#define R_CONFIG0 0x1F
#define R_CONFIG1 0x20
#define R_DMICCTL 0x24
@@ -1110,6 +1111,13 @@ enum {
#define RV_PWRM2_VREF_DISABLE \
RV(FV_PWRM2_VREF_DISABLE, FB_PWRM2_VREF)
+/******************************
+ * R_CTL (0x1C) *
+ ******************************/
+
+/* Fiel Offsets */
+#define FB_CTL_HPSWEN 7
+#define FB_CTL_HPSWPOL 6
/******************************
* R_CONFIG0 (0x1F) *
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index bfd1abd72253..94675da514c8 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -148,7 +148,7 @@ static bool twl6040_can_write_to_chip(struct snd_soc_component *component,
case TWL6040_REG_HFRCTL:
return priv->dl2_unmuted;
default:
- return 1;
+ return true;
}
}
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index 3663b9fd4d65..deff65161504 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -1180,6 +1180,9 @@ SOC_DOUBLE_R_TLV("OUT2 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_2L,
SOC_DOUBLE("OUT2 Switch", WM2200_PDM_1, WM2200_SPK1L_MUTE_SHIFT,
WM2200_SPK1R_MUTE_SHIFT, 1, 1),
SOC_ENUM("RxANC Src", wm2200_rxanc_input_sel),
+
+WM_ADSP_FW_CONTROL("DSP1", 0),
+WM_ADSP_FW_CONTROL("DSP2", 1),
};
WM2200_MIXER_ENUMS(OUT1L, WM2200_OUT1LMIX_INPUT_1_SOURCE);
@@ -1553,15 +1556,10 @@ static const struct snd_soc_dapm_route wm2200_dapm_routes[] = {
static int wm2200_probe(struct snd_soc_component *component)
{
struct wm2200_priv *wm2200 = snd_soc_component_get_drvdata(component);
- int ret;
wm2200->component = component;
- ret = snd_soc_add_component_controls(component, wm_adsp_fw_controls, 2);
- if (ret != 0)
- return ret;
-
- return ret;
+ return 0;
}
static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
diff --git a/sound/soc/codecs/wm5100-tables.c b/sound/soc/codecs/wm5100-tables.c
index e239f4bf2460..9e987cf07450 100644
--- a/sound/soc/codecs/wm5100-tables.c
+++ b/sound/soc/codecs/wm5100-tables.c
@@ -30,7 +30,7 @@ bool wm5100_volatile_register(struct device *dev, unsigned int reg)
case WM5100_OUTPUT_STATUS_2:
case WM5100_INPUT_ENABLES_STATUS:
case WM5100_MIC_DETECT_3:
- return 1;
+ return true;
default:
if ((reg >= WM5100_DSP1_PM_0 && reg <= WM5100_DSP1_PM_1535) ||
(reg >= WM5100_DSP1_ZM_0 && reg <= WM5100_DSP1_ZM_2047) ||
@@ -41,9 +41,9 @@ bool wm5100_volatile_register(struct device *dev, unsigned int reg)
(reg >= WM5100_DSP3_PM_0 && reg <= WM5100_DSP3_PM_1535) ||
(reg >= WM5100_DSP3_ZM_0 && reg <= WM5100_DSP3_ZM_2047) ||
(reg >= WM5100_DSP3_DM_0 && reg <= WM5100_DSP3_DM_511))
- return 1;
+ return true;
else
- return 0;
+ return false;
}
}
@@ -798,7 +798,7 @@ bool wm5100_readable_register(struct device *dev, unsigned int reg)
case WM5100_DSP3_CONTROL_28:
case WM5100_DSP3_CONTROL_29:
case WM5100_DSP3_CONTROL_30:
- return 1;
+ return true;
default:
if ((reg >= WM5100_DSP1_PM_0 && reg <= WM5100_DSP1_PM_1535) ||
(reg >= WM5100_DSP1_ZM_0 && reg <= WM5100_DSP1_ZM_2047) ||
@@ -809,9 +809,9 @@ bool wm5100_readable_register(struct device *dev, unsigned int reg)
(reg >= WM5100_DSP3_PM_0 && reg <= WM5100_DSP3_PM_1535) ||
(reg >= WM5100_DSP3_ZM_0 && reg <= WM5100_DSP3_ZM_2047) ||
(reg >= WM5100_DSP3_DM_0 && reg <= WM5100_DSP3_DM_511))
- return 1;
+ return true;
else
- return 0;
+ return false;
}
}
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 1ac83388d1b8..7e817e1877c2 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -985,6 +985,8 @@ ARIZONA_MIXER_CONTROLS("SLIMTX5", ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SLIMTX6", ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SLIMTX7", ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SLIMTX8", ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE),
+
+WM_ADSP_FW_CONTROL("DSP1", 0),
};
ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
@@ -2094,6 +2096,12 @@ static int wm5102_probe(struct platform_device *pdev)
return ret;
}
+ ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 1);
+ if (ret != 0)
+ dev_warn(&pdev->dev,
+ "Failed to set compressed IRQ as a wake source: %d\n",
+ ret);
+
arizona_init_common(arizona);
ret = arizona_init_vol_limit(arizona);
@@ -2117,6 +2125,7 @@ static int wm5102_probe(struct platform_device *pdev)
err_spk_irqs:
arizona_free_spk_irqs(arizona);
err_dsp_irq:
+ arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 0);
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5102);
return ret;
@@ -2133,6 +2142,7 @@ static int wm5102_remove(struct platform_device *pdev)
arizona_free_spk_irqs(arizona);
+ arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 0);
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5102);
return 0;
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index fb9835dcd836..b0789a03d699 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -927,6 +927,11 @@ ARIZONA_MIXER_CONTROLS("SLIMTX5", ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SLIMTX6", ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SLIMTX7", ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SLIMTX8", ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE),
+
+WM_ADSP_FW_CONTROL("DSP1", 0),
+WM_ADSP_FW_CONTROL("DSP2", 1),
+WM_ADSP_FW_CONTROL("DSP3", 2),
+WM_ADSP_FW_CONTROL("DSP4", 3),
};
ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
@@ -2455,6 +2460,12 @@ static int wm5110_probe(struct platform_device *pdev)
return ret;
}
+ ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 1);
+ if (ret != 0)
+ dev_warn(&pdev->dev,
+ "Failed to set compressed IRQ as a wake source: %d\n",
+ ret);
+
arizona_init_common(arizona);
ret = arizona_init_vol_limit(arizona);
@@ -2478,6 +2489,7 @@ static int wm5110_probe(struct platform_device *pdev)
err_spk_irqs:
arizona_free_spk_irqs(arizona);
err_dsp_irq:
+ arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 0);
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5110);
return ret;
@@ -2496,6 +2508,7 @@ static int wm5110_remove(struct platform_device *pdev)
arizona_free_spk_irqs(arizona);
+ arizona_set_irq_wake(arizona, ARIZONA_IRQ_DSP_IRQ1, 0);
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5110);
return 0;
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 7b8b6ef2f632..6cb3c153ba19 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -251,10 +251,10 @@ static bool wm8903_volatile_register(struct device *dev, unsigned int reg)
case WM8903_DC_SERVO_READBACK_2:
case WM8903_DC_SERVO_READBACK_3:
case WM8903_DC_SERVO_READBACK_4:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 9037a35b931d..1965635ec07c 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1455,6 +1455,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_A:
aif1 |= 0x3;
break;
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index ba44e3d6c1e0..cd204f79647d 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -686,6 +686,7 @@ static int wm8955_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
aif |= WM8955_LRP;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_A:
aif |= 0x3;
break;
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index c30f5aa392c6..8dc1f3d6a988 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -839,6 +839,7 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream,
iface |= 0x000c;
break;
}
+ /* fall through */
default:
dev_err(component->dev, "unsupported width %d\n",
params_width(params));
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index f70f563d59f3..68b4cadc308f 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -653,6 +653,7 @@ static int wm8961_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_DSP_B:
aif |= WM8961_LRP;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_A:
aif |= 3;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index a11e9d6bf950..efd8910b1ff7 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2649,6 +2649,7 @@ static int wm8962_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
aif0 |= WM8962_LRCLK_INV | 3;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_A:
aif0 |= 3;
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index 62200117444b..6e52c6a8bab3 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -522,7 +522,7 @@ static inline int get_coeff(int mclk, int rate)
/* The set of rates we can generate from the above for each SYSCLK */
static const unsigned int rates_12288[] = {
- 8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000,
+ 8000, 12000, 16000, 24000, 32000, 48000, 96000,
};
static const struct snd_pcm_hw_constraint_list constraints_12288 = {
@@ -540,7 +540,7 @@ static const struct snd_pcm_hw_constraint_list constraints_112896 = {
};
static const unsigned int rates_12[] = {
- 8000, 11025, 12000, 16000, 22050, 2400, 32000, 41100, 48000,
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 41100, 48000,
48000, 88235, 96000,
};
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 411b9eee88c2..457bc437ce54 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -40,9 +40,9 @@ static bool wm8990_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM8990_RESET:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 7fdfdf3f6e67..14f1b0c0d286 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2432,6 +2432,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
snd_soc_component_update_bits(component, WM8994_POWER_MANAGEMENT_2,
WM8994_OPCLK_ENA, 0);
}
+ break;
default:
return -EINVAL;
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 60e227832331..68c99fe37097 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -1465,6 +1465,7 @@ static int wm8995_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
aif |= WM8995_AIF1_LRCLK_INV;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_A:
aif |= (0x3 << WM8995_AIF1_FMT_SHIFT);
break;
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index d9d206046f8c..91711f8958c5 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -1498,9 +1498,9 @@ static bool wm8996_readable_register(struct device *dev, unsigned int reg)
case WM8996_RIGHT_PDM_SPEAKER:
case WM8996_PDM_SPEAKER_MUTE_SEQUENCE:
case WM8996_PDM_SPEAKER_VOLUME:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
@@ -1522,9 +1522,9 @@ static bool wm8996_volatile_register(struct device *dev, unsigned int reg)
case WM8996_MIC_DETECT_3:
case WM8996_HEADPHONE_DETECT_1:
case WM8996_HEADPHONE_DETECT_2:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
@@ -1858,6 +1858,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
case 24576000:
ratediv = WM8996_SYSCLK_DIV;
wm8996->sysclk /= 2;
+ /* fall through */
case 11289600:
case 12288000:
snd_soc_component_update_bits(component, WM8996_AIF_RATE,
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 5a0ea7b3c149..399255d1f78a 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -933,6 +933,7 @@ static int wm9081_set_dai_fmt(struct snd_soc_dai *dai,
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
aif2 |= WM9081_AIF_LRCLK_INV;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_A:
aif2 |= 0x3;
break;
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 2fcdd84021a5..f61656070225 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -35,15 +36,15 @@
#include "wm_adsp.h"
#define adsp_crit(_dsp, fmt, ...) \
- dev_crit(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+ dev_crit(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
#define adsp_err(_dsp, fmt, ...) \
- dev_err(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+ dev_err(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
#define adsp_warn(_dsp, fmt, ...) \
- dev_warn(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+ dev_warn(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
#define adsp_info(_dsp, fmt, ...) \
- dev_info(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+ dev_info(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
#define adsp_dbg(_dsp, fmt, ...) \
- dev_dbg(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+ dev_dbg(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
#define ADSP1_CONTROL_1 0x00
#define ADSP1_CONTROL_2 0x02
@@ -418,7 +419,7 @@ static const struct wm_adsp_fw_caps ctrl_caps[] = {
{
.id = SND_AUDIOCODEC_BESPOKE,
.desc = {
- .max_ch = 1,
+ .max_ch = 8,
.sample_rates = { 16000 },
.num_sample_rates = 1,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -608,7 +609,6 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
struct snd_soc_component *component)
{
struct dentry *root = NULL;
- char *root_name;
int i;
if (!component->debugfs_root) {
@@ -616,13 +616,7 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
goto err;
}
- root_name = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!root_name)
- goto err;
-
- snprintf(root_name, PAGE_SIZE, "dsp%d", dsp->num);
- root = debugfs_create_dir(root_name, component->debugfs_root);
- kfree(root_name);
+ root = debugfs_create_dir(dsp->name, component->debugfs_root);
if (!root)
goto err;
@@ -684,8 +678,8 @@ static inline void wm_adsp_debugfs_clear(struct wm_adsp *dsp)
}
#endif
-static int wm_adsp_fw_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+int wm_adsp_fw_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
@@ -695,9 +689,10 @@ static int wm_adsp_fw_get(struct snd_kcontrol *kcontrol,
return 0;
}
+EXPORT_SYMBOL_GPL(wm_adsp_fw_get);
-static int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
@@ -721,8 +716,9 @@ static int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
return ret;
}
+EXPORT_SYMBOL_GPL(wm_adsp_fw_put);
-static const struct soc_enum wm_adsp_fw_enum[] = {
+const struct soc_enum wm_adsp_fw_enum[] = {
SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
SOC_ENUM_SINGLE(0, 1, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
SOC_ENUM_SINGLE(0, 2, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
@@ -731,24 +727,7 @@ static const struct soc_enum wm_adsp_fw_enum[] = {
SOC_ENUM_SINGLE(0, 5, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
SOC_ENUM_SINGLE(0, 6, ARRAY_SIZE(wm_adsp_fw_text), wm_adsp_fw_text),
};
-
-const struct snd_kcontrol_new wm_adsp_fw_controls[] = {
- SOC_ENUM_EXT("DSP1 Firmware", wm_adsp_fw_enum[0],
- wm_adsp_fw_get, wm_adsp_fw_put),
- SOC_ENUM_EXT("DSP2 Firmware", wm_adsp_fw_enum[1],
- wm_adsp_fw_get, wm_adsp_fw_put),
- SOC_ENUM_EXT("DSP3 Firmware", wm_adsp_fw_enum[2],
- wm_adsp_fw_get, wm_adsp_fw_put),
- SOC_ENUM_EXT("DSP4 Firmware", wm_adsp_fw_enum[3],
- wm_adsp_fw_get, wm_adsp_fw_put),
- SOC_ENUM_EXT("DSP5 Firmware", wm_adsp_fw_enum[4],
- wm_adsp_fw_get, wm_adsp_fw_put),
- SOC_ENUM_EXT("DSP6 Firmware", wm_adsp_fw_enum[5],
- wm_adsp_fw_get, wm_adsp_fw_put),
- SOC_ENUM_EXT("DSP7 Firmware", wm_adsp_fw_enum[6],
- wm_adsp_fw_get, wm_adsp_fw_put),
-};
-EXPORT_SYMBOL_GPL(wm_adsp_fw_controls);
+EXPORT_SYMBOL_GPL(wm_adsp_fw_enum);
static struct wm_adsp_region const *wm_adsp_find_region(struct wm_adsp *dsp,
int type)
@@ -1330,12 +1309,12 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
switch (dsp->fw_ver) {
case 0:
case 1:
- snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "DSP%d %s %x",
- dsp->num, region_name, alg_region->alg);
+ snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s %x",
+ dsp->name, region_name, alg_region->alg);
break;
default:
ret = snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
- "DSP%d%c %.12s %x", dsp->num, *region_name,
+ "%s%c %.12s %x", dsp->name, *region_name,
wm_adsp_fw_text[dsp->fw], alg_region->alg);
/* Truncate the subname from the start if it is too long */
@@ -1343,6 +1322,9 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
int avail = SNDRV_CTL_ELEM_ID_NAME_MAXLEN - ret - 2;
int skip = 0;
+ if (dsp->component->name_prefix)
+ avail -= strlen(dsp->component->name_prefix) + 1;
+
if (subname_len > avail)
skip = subname_len - avail;
@@ -1604,6 +1586,15 @@ static int wm_adsp_parse_coeff(struct wm_adsp *dsp,
if (ret)
return -EINVAL;
break;
+ case WMFW_CTL_TYPE_HOST_BUFFER:
+ ret = wm_adsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
default:
adsp_err(dsp, "Unknown control type: %d\n",
coeff_blk.ctl_type);
@@ -1651,7 +1642,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
if (file == NULL)
return -ENOMEM;
- snprintf(file, PAGE_SIZE, "%s-dsp%d-%s.wmfw", dsp->part, dsp->num,
+ snprintf(file, PAGE_SIZE, "%s-%s-%s.wmfw", dsp->part, dsp->fwf_name,
wm_adsp_fw[dsp->fw].file);
file[PAGE_SIZE - 1] = '\0';
@@ -1871,9 +1862,11 @@ static void wm_adsp_ctl_fixup_base(struct wm_adsp *dsp,
}
static void *wm_adsp_read_algs(struct wm_adsp *dsp, size_t n_algs,
+ const struct wm_adsp_region *mem,
unsigned int pos, unsigned int len)
{
void *alg;
+ unsigned int reg;
int ret;
__be32 val;
@@ -1888,7 +1881,9 @@ static void *wm_adsp_read_algs(struct wm_adsp *dsp, size_t n_algs,
}
/* Read the terminator first to validate the length */
- ret = regmap_raw_read(dsp->regmap, pos + len, &val, sizeof(val));
+ reg = wm_adsp_region_to_reg(mem, pos + len);
+
+ ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
if (ret != 0) {
adsp_err(dsp, "Failed to read algorithm list end: %d\n",
ret);
@@ -1897,13 +1892,18 @@ static void *wm_adsp_read_algs(struct wm_adsp *dsp, size_t n_algs,
if (be32_to_cpu(val) != 0xbedead)
adsp_warn(dsp, "Algorithm list end %x 0x%x != 0xbedead\n",
- pos + len, be32_to_cpu(val));
+ reg, be32_to_cpu(val));
+
+ /* Convert length from DSP words to bytes */
+ len *= sizeof(u32);
- alg = kcalloc(len, 2, GFP_KERNEL | GFP_DMA);
+ alg = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!alg)
return ERR_PTR(-ENOMEM);
- ret = regmap_raw_read(dsp->regmap, pos, alg, len * 2);
+ reg = wm_adsp_region_to_reg(mem, pos);
+
+ ret = regmap_raw_read(dsp->regmap, reg, alg, len);
if (ret != 0) {
adsp_err(dsp, "Failed to read algorithm list: %d\n", ret);
kfree(alg);
@@ -2002,10 +2002,11 @@ static int wm_adsp1_setup_algs(struct wm_adsp *dsp)
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
- pos = sizeof(adsp1_id) / 2;
- len = (sizeof(*adsp1_alg) * n_algs) / 2;
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(adsp1_id) / sizeof(u32);
+ len = (sizeof(*adsp1_alg) * n_algs) / sizeof(u32);
- adsp1_alg = wm_adsp_read_algs(dsp, n_algs, mem->base + pos, len);
+ adsp1_alg = wm_adsp_read_algs(dsp, n_algs, mem, pos, len);
if (IS_ERR(adsp1_alg))
return PTR_ERR(adsp1_alg);
@@ -2113,10 +2114,11 @@ static int wm_adsp2_setup_algs(struct wm_adsp *dsp)
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
- pos = sizeof(adsp2_id) / 2;
- len = (sizeof(*adsp2_alg) * n_algs) / 2;
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(adsp2_id) / sizeof(u32);
+ len = (sizeof(*adsp2_alg) * n_algs) / sizeof(u32);
- adsp2_alg = wm_adsp_read_algs(dsp, n_algs, mem->base + pos, len);
+ adsp2_alg = wm_adsp_read_algs(dsp, n_algs, mem, pos, len);
if (IS_ERR(adsp2_alg))
return PTR_ERR(adsp2_alg);
@@ -2218,7 +2220,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
if (file == NULL)
return -ENOMEM;
- snprintf(file, PAGE_SIZE, "%s-dsp%d-%s.bin", dsp->part, dsp->num,
+ snprintf(file, PAGE_SIZE, "%s-%s-%s.bin", dsp->part, dsp->fwf_name,
wm_adsp_fw[dsp->fw].file);
file[PAGE_SIZE - 1] = '\0';
@@ -2390,8 +2392,38 @@ out:
return ret;
}
+static int wm_adsp_create_name(struct wm_adsp *dsp)
+{
+ char *p;
+
+ if (!dsp->name) {
+ dsp->name = devm_kasprintf(dsp->dev, GFP_KERNEL, "DSP%d",
+ dsp->num);
+ if (!dsp->name)
+ return -ENOMEM;
+ }
+
+ if (!dsp->fwf_name) {
+ p = devm_kstrdup(dsp->dev, dsp->name, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ dsp->fwf_name = p;
+ for (; *p != 0; ++p)
+ *p = tolower(*p);
+ }
+
+ return 0;
+}
+
int wm_adsp1_init(struct wm_adsp *dsp)
{
+ int ret;
+
+ ret = wm_adsp_create_name(dsp);
+ if (ret)
+ return ret;
+
INIT_LIST_HEAD(&dsp->alg_regions);
mutex_init(&dsp->pwr_lock);
@@ -2642,7 +2674,10 @@ int wm_adsp2_preloader_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
- struct wm_adsp *dsp = snd_soc_component_get_drvdata(component);
+ struct wm_adsp *dsps = snd_soc_component_get_drvdata(component);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct wm_adsp *dsp = &dsps[mc->shift - 1];
ucontrol->value.integer.value[0] = dsp->preloaded;
@@ -2654,13 +2689,14 @@ int wm_adsp2_preloader_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
- struct wm_adsp *dsp = snd_soc_component_get_drvdata(component);
+ struct wm_adsp *dsps = snd_soc_component_get_drvdata(component);
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
+ struct wm_adsp *dsp = &dsps[mc->shift - 1];
char preload[32];
- snprintf(preload, ARRAY_SIZE(preload), "DSP%u Preload", mc->shift);
+ snprintf(preload, ARRAY_SIZE(preload), "%s Preload", dsp->name);
dsp->preloaded = ucontrol->value.integer.value[0];
@@ -2671,6 +2707,8 @@ int wm_adsp2_preloader_put(struct snd_kcontrol *kcontrol,
snd_soc_dapm_sync(dapm);
+ flush_work(&dsp->boot_work);
+
return 0;
}
EXPORT_SYMBOL_GPL(wm_adsp2_preloader_put);
@@ -2853,17 +2891,14 @@ int wm_adsp2_component_probe(struct wm_adsp *dsp, struct snd_soc_component *comp
{
char preload[32];
- snprintf(preload, ARRAY_SIZE(preload), "DSP%d Preload", dsp->num);
-
+ snprintf(preload, ARRAY_SIZE(preload), "%s Preload", dsp->name);
snd_soc_component_disable_pin(component, preload);
wm_adsp2_init_debugfs(dsp, component);
dsp->component = component;
- return snd_soc_add_component_controls(component,
- &wm_adsp_fw_controls[dsp->num - 1],
- 1);
+ return 0;
}
EXPORT_SYMBOL_GPL(wm_adsp2_component_probe);
@@ -2879,6 +2914,10 @@ int wm_adsp2_init(struct wm_adsp *dsp)
{
int ret;
+ ret = wm_adsp_create_name(dsp);
+ if (ret)
+ return ret;
+
switch (dsp->rev) {
case 0:
/*
@@ -3186,7 +3225,7 @@ static inline int wm_adsp_buffer_write(struct wm_adsp_compr_buf *buf,
buf->host_buf_ptr + field_offset, data);
}
-static int wm_adsp_buffer_locate(struct wm_adsp_compr_buf *buf)
+static int wm_adsp_legacy_host_buf_addr(struct wm_adsp_compr_buf *buf)
{
struct wm_adsp_alg_region *alg_region;
struct wm_adsp *dsp = buf->dsp;
@@ -3225,6 +3264,61 @@ static int wm_adsp_buffer_locate(struct wm_adsp_compr_buf *buf)
return 0;
}
+static struct wm_coeff_ctl *
+wm_adsp_find_host_buffer_ctrl(struct wm_adsp_compr_buf *buf)
+{
+ struct wm_adsp *dsp = buf->dsp;
+ struct wm_coeff_ctl *ctl;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->type != WMFW_CTL_TYPE_HOST_BUFFER)
+ continue;
+
+ if (!ctl->enabled)
+ continue;
+
+ return ctl;
+ }
+
+ return NULL;
+}
+
+static int wm_adsp_buffer_locate(struct wm_adsp_compr_buf *buf)
+{
+ struct wm_adsp *dsp = buf->dsp;
+ struct wm_coeff_ctl *ctl;
+ unsigned int reg;
+ u32 val;
+ int i, ret;
+
+ ctl = wm_adsp_find_host_buffer_ctrl(buf);
+ if (!ctl)
+ return wm_adsp_legacy_host_buf_addr(buf);
+
+ ret = wm_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 5; ++i) {
+ ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (!val)
+ return -EIO;
+
+ buf->host_buf_ptr = be32_to_cpu(val);
+ adsp_dbg(dsp, "host_buf_ptr=%x\n", buf->host_buf_ptr);
+
+ return 0;
+}
+
static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
{
const struct wm_adsp_fw_caps *caps = wm_adsp_fw[buf->dsp->fw].caps;
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index bc6d359f0533..4b8778b0b06c 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -57,6 +57,8 @@ struct wm_adsp_compr_buf;
struct wm_adsp {
const char *part;
+ const char *name;
+ const char *fwf_name;
int rev;
int num;
int type;
@@ -121,7 +123,11 @@ struct wm_adsp {
.reg = SND_SOC_NOPM, .shift = num, .event = wm_adsp2_event, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD }
-extern const struct snd_kcontrol_new wm_adsp_fw_controls[];
+#define WM_ADSP_FW_CONTROL(dspname, num) \
+ SOC_ENUM_EXT(dspname " Firmware", wm_adsp_fw_enum[num], \
+ wm_adsp_fw_get, wm_adsp_fw_put)
+
+extern const struct soc_enum wm_adsp_fw_enum[];
int wm_adsp1_init(struct wm_adsp *dsp);
int wm_adsp2_init(struct wm_adsp *dsp);
@@ -144,6 +150,10 @@ int wm_adsp2_preloader_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int wm_adsp2_preloader_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
+int wm_adsp_fw_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream);
int wm_adsp_compr_free(struct snd_compr_stream *stream);
diff --git a/sound/soc/codecs/wmfw.h b/sound/soc/codecs/wmfw.h
index ec78b9da020f..0c3f50acb8b1 100644
--- a/sound/soc/codecs/wmfw.h
+++ b/sound/soc/codecs/wmfw.h
@@ -29,6 +29,7 @@
/* Non-ALSA coefficient types start at 0x1000 */
#define WMFW_CTL_TYPE_ACKED 0x1000 /* acked control */
#define WMFW_CTL_TYPE_HOSTEVENT 0x1001 /* event control */
+#define WMFW_CTL_TYPE_HOST_BUFFER 0x1002 /* host buffer pointer */
struct wmfw_header {
char magic[4];
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index 807040bb3921..a3206e65e5e5 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -340,6 +340,7 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
* rate is lowered.
*/
inv_fs = true;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_A:
dev->mode = MOD_DSP_A;
break;
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 47c0c821d325..f70db8412c7c 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -320,12 +320,8 @@ static irqreturn_t davinci_mcasp_tx_irq_handler(int irq, void *data)
handled_mask |= XUNDRN;
substream = mcasp->substreams[SNDRV_PCM_STREAM_PLAYBACK];
- if (substream) {
- snd_pcm_stream_lock_irq(substream);
- if (snd_pcm_running(substream))
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irq(substream);
- }
+ if (substream)
+ snd_pcm_stop_xrun(substream);
}
if (!handled_mask)
@@ -355,12 +351,8 @@ static irqreturn_t davinci_mcasp_rx_irq_handler(int irq, void *data)
handled_mask |= ROVRN;
substream = mcasp->substreams[SNDRV_PCM_STREAM_CAPTURE];
- if (substream) {
- snd_pcm_stream_lock_irq(substream);
- if (snd_pcm_running(substream))
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock_irq(substream);
- }
+ if (substream)
+ snd_pcm_stop_xrun(substream);
}
if (!handled_mask)
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 4a6750aa3637..44433b20435c 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -1,14 +1,10 @@
-/*
- * Freescale Generic ASoC Sound Card driver with ASRC
- *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- *
- * Author: Nicolin Chen <nicoleotsuka@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale Generic ASoC Sound Card driver with ASRC
+//
+// Copyright (C) 2014 Freescale Semiconductor, Inc.
+//
+// Author: Nicolin Chen <nicoleotsuka@gmail.com>
#include <linux/clk.h>
#include <linux/i2c.h>
@@ -199,7 +195,7 @@ static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
snd_mask_none(mask);
- snd_mask_set(mask, (__force int)priv->asrc_format);
+ snd_mask_set_format(mask, priv->asrc_format);
return 0;
}
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index adfb8135d739..528e8b108422 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -1,14 +1,10 @@
-/*
- * Freescale ASRC ALSA SoC Digital Audio Interface (DAI) driver
- *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- *
- * Author: Nicolin Chen <nicoleotsuka@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale ASRC ALSA SoC Digital Audio Interface (DAI) driver
+//
+// Copyright (C) 2014 Freescale Semiconductor, Inc.
+//
+// Author: Nicolin Chen <nicoleotsuka@gmail.com>
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index d558dd5499a5..c60075112570 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fsl_asrc.h - Freescale ASRC ALSA SoC header file
*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* Author: Nicolin Chen <nicoleotsuka@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#ifndef _FSL_ASRC_H
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index 565e16d8fe85..1033ac6631b0 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -1,14 +1,10 @@
-/*
- * Freescale ASRC ALSA SoC Platform (DMA) driver
- *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- *
- * Author: Nicolin Chen <nicoleotsuka@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale ASRC ALSA SoC Platform (DMA) driver
+//
+// Copyright (C) 2014 Freescale Semiconductor, Inc.
+//
+// Author: Nicolin Chen <nicoleotsuka@gmail.com>
#include <linux/dma-mapping.h>
#include <linux/module.h>
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 8f43110373b8..c1d1d06783e5 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -249,6 +249,7 @@ static int fsl_esai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
break;
case ESAI_HCKT_EXTAL:
ecr |= ESAI_ECR_ETI;
+ /* fall through */
case ESAI_HCKR_EXTAL:
ecr |= ESAI_ECR_ERI;
break;
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 9b59d87b61bf..740b90df44bb 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -1118,7 +1118,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
for (sysclk_df = sysclk_dfmin; sysclk_df <= sysclk_dfmax; sysclk_df++) {
for (txclk_df = 1; txclk_df <= 128; txclk_df++) {
- rate_ideal = rate[index] * txclk_df * 64;
+ rate_ideal = rate[index] * txclk_df * 64ULL;
if (round)
rate_actual = clk_round_rate(clk, rate_ideal);
else
diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
index 7592b0406370..7f0fa4b52223 100644
--- a/sound/soc/fsl/fsl_utils.c
+++ b/sound/soc/fsl/fsl_utils.c
@@ -1,14 +1,10 @@
-/**
- * Freescale ALSA SoC Machine driver utility
- *
- * Author: Timur Tabi <timur@freescale.com>
- *
- * Copyright 2010 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale ALSA SoC Machine driver utility
+//
+// Author: Timur Tabi <timur@freescale.com>
+//
+// Copyright 2010 Freescale Semiconductor, Inc.
#include <linux/module.h>
#include <linux/of_address.h>
diff --git a/sound/soc/fsl/fsl_utils.h b/sound/soc/fsl/fsl_utils.h
index 1687b66ef18e..c5dc2a14b492 100644
--- a/sound/soc/fsl/fsl_utils.h
+++ b/sound/soc/fsl/fsl_utils.h
@@ -1,13 +1,10 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Freescale ALSA SoC Machine driver utility
*
* Author: Timur Tabi <timur@freescale.com>
*
* Copyright 2010 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#ifndef _FSL_UTILS_H
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index b99e0b5e00e9..c29200cf755a 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -1,14 +1,7 @@
-/*
- * Copyright 2012 Freescale Semiconductor, Inc.
- * Copyright 2012 Linaro Ltd.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2012 Freescale Semiconductor, Inc.
+// Copyright 2012 Linaro Ltd.
#include <linux/module.h>
#include <linux/of.h>
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index d93bacacbd5b..2094d2c8919f 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -1,15 +1,12 @@
-/*
- * ASoC audio graph sound card support
- *
- * Copyright (C) 2016 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * based on ${LINUX}/sound/soc/generic/simple-card.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// ASoC audio graph sound card support
+//
+// Copyright (C) 2016 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+//
+// based on ${LINUX}/sound/soc/generic/simple-card.c
+
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/gpio.h>
@@ -21,7 +18,6 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/string.h>
-#include <sound/jack.h>
#include <sound/simple_card_utils.h>
struct graph_card_data {
@@ -32,6 +28,8 @@ struct graph_card_data {
unsigned int mclk_fs;
} *dai_props;
unsigned int mclk_fs;
+ struct asoc_simple_jack hp_jack;
+ struct asoc_simple_jack mic_jack;
struct snd_soc_dai_link *dai_link;
struct gpio_desc *pa_gpio;
};
@@ -278,6 +276,22 @@ static int asoc_graph_get_dais_count(struct device *dev)
return count;
}
+static int asoc_graph_soc_card_probe(struct snd_soc_card *card)
+{
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ ret = asoc_simple_card_init_hp(card, &priv->hp_jack, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_init_mic(card, &priv->mic_jack, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int asoc_graph_card_probe(struct platform_device *pdev)
{
struct graph_card_data *priv;
@@ -319,6 +333,7 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
card->num_links = num;
card->dapm_widgets = asoc_graph_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(asoc_graph_card_dapm_widgets);
+ card->probe = asoc_graph_soc_card_probe;
ret = asoc_graph_card_parse_of(priv);
if (ret < 0) {
diff --git a/sound/soc/generic/audio-graph-scu-card.c b/sound/soc/generic/audio-graph-scu-card.c
index 095ef6426d42..92882e392d6c 100644
--- a/sound/soc/generic/audio-graph-scu-card.c
+++ b/sound/soc/generic/audio-graph-scu-card.c
@@ -1,17 +1,14 @@
-/*
- * ASoC audio graph SCU sound card support
- *
- * Copyright (C) 2017 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * based on
- * ${LINUX}/sound/soc/generic/simple-scu-card.c
- * ${LINUX}/sound/soc/generic/audio-graph-card.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// ASoC audio graph SCU sound card support
+//
+// Copyright (C) 2017 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+//
+// based on
+// ${LINUX}/sound/soc/generic/simple-scu-card.c
+// ${LINUX}/sound/soc/generic/audio-graph-card.c
+
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/gpio.h>
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 3751a07de6aa..d3f3f0fec74c 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -1,16 +1,17 @@
-/*
- * simple-card-utils.c
- *
- * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// simple-card-utils.c
+//
+// Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/of_graph.h>
+#include <sound/jack.h>
#include <sound/simple_card_utils.h>
void asoc_simple_card_convert_fixup(struct asoc_simple_card_data *data,
@@ -419,6 +420,61 @@ int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(asoc_simple_card_of_parse_widgets);
+int asoc_simple_card_init_jack(struct snd_soc_card *card,
+ struct asoc_simple_jack *sjack,
+ int is_hp, char *prefix)
+{
+ struct device *dev = card->dev;
+ enum of_gpio_flags flags;
+ char prop[128];
+ char *pin_name;
+ char *gpio_name;
+ int mask;
+ int det;
+
+ if (!prefix)
+ prefix = "";
+
+ sjack->gpio.gpio = -ENOENT;
+
+ if (is_hp) {
+ snprintf(prop, sizeof(prop), "%shp-det-gpio", prefix);
+ pin_name = "Headphones";
+ gpio_name = "Headphone detection";
+ mask = SND_JACK_HEADPHONE;
+ } else {
+ snprintf(prop, sizeof(prop), "%smic-det-gpio", prefix);
+ pin_name = "Mic Jack";
+ gpio_name = "Mic detection";
+ mask = SND_JACK_MICROPHONE;
+ }
+
+ det = of_get_named_gpio_flags(dev->of_node, prop, 0, &flags);
+ if (det == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (gpio_is_valid(det)) {
+ sjack->pin.pin = pin_name;
+ sjack->pin.mask = mask;
+
+ sjack->gpio.name = gpio_name;
+ sjack->gpio.report = mask;
+ sjack->gpio.gpio = det;
+ sjack->gpio.invert = !!(flags & OF_GPIO_ACTIVE_LOW);
+ sjack->gpio.debounce_time = 150;
+
+ snd_soc_card_jack_new(card, pin_name, mask,
+ &sjack->jack,
+ &sjack->pin, 1);
+
+ snd_soc_jack_add_gpios(&sjack->jack, 1,
+ &sjack->gpio);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_init_jack);
+
/* Module information */
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_DESCRIPTION("ALSA SoC Simple Card Utils");
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 8b374af86a6e..64bf3560c1d1 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -1,32 +1,20 @@
-/*
- * ASoC simple sound card support
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// ASoC simple sound card support
+//
+// Copyright (C) 2012 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
#include <linux/clk.h>
#include <linux/device.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/string.h>
-#include <sound/jack.h>
#include <sound/simple_card.h>
#include <sound/soc-dai.h>
#include <sound/soc.h>
-struct asoc_simple_jack {
- struct snd_soc_jack jack;
- struct snd_soc_jack_pin pin;
- struct snd_soc_jack_gpio gpio;
-};
-
struct simple_card_data {
struct snd_soc_card snd_card;
struct simple_dai_props {
@@ -49,61 +37,6 @@ struct simple_card_data {
#define CELL "#sound-dai-cells"
#define PREFIX "simple-audio-card,"
-#define asoc_simple_card_init_hp(card, sjack, prefix)\
- asoc_simple_card_init_jack(card, sjack, 1, prefix)
-#define asoc_simple_card_init_mic(card, sjack, prefix)\
- asoc_simple_card_init_jack(card, sjack, 0, prefix)
-static int asoc_simple_card_init_jack(struct snd_soc_card *card,
- struct asoc_simple_jack *sjack,
- int is_hp, char *prefix)
-{
- struct device *dev = card->dev;
- enum of_gpio_flags flags;
- char prop[128];
- char *pin_name;
- char *gpio_name;
- int mask;
- int det;
-
- sjack->gpio.gpio = -ENOENT;
-
- if (is_hp) {
- snprintf(prop, sizeof(prop), "%shp-det-gpio", prefix);
- pin_name = "Headphones";
- gpio_name = "Headphone detection";
- mask = SND_JACK_HEADPHONE;
- } else {
- snprintf(prop, sizeof(prop), "%smic-det-gpio", prefix);
- pin_name = "Mic Jack";
- gpio_name = "Mic detection";
- mask = SND_JACK_MICROPHONE;
- }
-
- det = of_get_named_gpio_flags(dev->of_node, prop, 0, &flags);
- if (det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (gpio_is_valid(det)) {
- sjack->pin.pin = pin_name;
- sjack->pin.mask = mask;
-
- sjack->gpio.name = gpio_name;
- sjack->gpio.report = mask;
- sjack->gpio.gpio = det;
- sjack->gpio.invert = !!(flags & OF_GPIO_ACTIVE_LOW);
- sjack->gpio.debounce_time = 150;
-
- snd_soc_card_jack_new(card, pin_name, mask,
- &sjack->jack,
- &sjack->pin, 1);
-
- snd_soc_jack_add_gpios(&sjack->jack, 1,
- &sjack->gpio);
- }
-
- return 0;
-}
-
static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -213,14 +146,6 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
if (ret < 0)
return ret;
- ret = asoc_simple_card_init_hp(rtd->card, &priv->hp_jack, PREFIX);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
- if (ret < 0)
- return ret;
-
return 0;
}
@@ -414,6 +339,22 @@ card_parse_end:
return ret;
}
+static int asoc_simple_soc_card_probe(struct snd_soc_card *card)
+{
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ ret = asoc_simple_card_init_hp(card, &priv->hp_jack, PREFIX);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_init_mic(card, &priv->mic_jack, PREFIX);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int asoc_simple_card_probe(struct platform_device *pdev)
{
struct simple_card_data *priv;
@@ -449,6 +390,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
card->dev = dev;
card->dai_link = priv->dai_link;
card->num_links = num;
+ card->probe = asoc_simple_soc_card_probe;
if (np && of_device_is_available(np)) {
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
index 487716559deb..16a83bc51e0e 100644
--- a/sound/soc/generic/simple-scu-card.c
+++ b/sound/soc/generic/simple-scu-card.c
@@ -1,15 +1,12 @@
-/*
- * ASoC simple SCU sound card support
- *
- * Copyright (C) 2015 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * based on ${LINUX}/sound/soc/generic/simple-card.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// ASoC simple SCU sound card support
+//
+// Copyright (C) 2015 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+//
+// based on ${LINUX}/sound/soc/generic/simple-card.c
+
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/module.h>
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 6a8b253c58d2..5455d6e0ab53 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -266,17 +266,15 @@ static int sst_cdev_ack(struct device *dev, unsigned int str_id,
stream->cumm_bytes += bytes;
dev_dbg(dev, "bytes copied %d inc by %ld\n", stream->cumm_bytes, bytes);
- memcpy_fromio(&fw_tstamp,
- ((void *)(ctx->mailbox + ctx->tstamp)
- +(str_id * sizeof(fw_tstamp))),
- sizeof(fw_tstamp));
+ addr = ((void __iomem *)(ctx->mailbox + ctx->tstamp)) +
+ (str_id * sizeof(fw_tstamp));
+
+ memcpy_fromio(&fw_tstamp, addr, sizeof(fw_tstamp));
fw_tstamp.bytes_copied = stream->cumm_bytes;
dev_dbg(dev, "bytes sent to fw %llu inc by %ld\n",
fw_tstamp.bytes_copied, bytes);
- addr = ((void *)(ctx->mailbox + ctx->tstamp)) +
- (str_id * sizeof(fw_tstamp));
offset = offsetof(struct snd_sst_tstamp, bytes_copied);
sst_shim_write(addr, offset, fw_tstamp.bytes_copied);
return 0;
@@ -360,11 +358,12 @@ static int sst_cdev_tstamp(struct device *dev, unsigned int str_id,
struct snd_sst_tstamp fw_tstamp = {0,};
struct stream_info *stream;
struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+ void __iomem *addr;
+
+ addr = (void __iomem *)(ctx->mailbox + ctx->tstamp) +
+ (str_id * sizeof(fw_tstamp));
- memcpy_fromio(&fw_tstamp,
- ((void *)(ctx->mailbox + ctx->tstamp)
- +(str_id * sizeof(fw_tstamp))),
- sizeof(fw_tstamp));
+ memcpy_fromio(&fw_tstamp, addr, sizeof(fw_tstamp));
stream = get_stream_info(ctx, str_id);
if (!stream)
@@ -530,6 +529,7 @@ static int sst_read_timestamp(struct device *dev, struct pcm_stream_info *info)
struct snd_sst_tstamp fw_tstamp;
unsigned int str_id;
struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+ void __iomem *addr;
str_id = info->str_id;
stream = get_stream_info(ctx, str_id);
@@ -540,10 +540,11 @@ static int sst_read_timestamp(struct device *dev, struct pcm_stream_info *info)
return -EINVAL;
substream = stream->pcm_substream;
- memcpy_fromio(&fw_tstamp,
- ((void *)(ctx->mailbox + ctx->tstamp)
- + (str_id * sizeof(fw_tstamp))),
- sizeof(fw_tstamp));
+ addr = (void __iomem *)(ctx->mailbox + ctx->tstamp) +
+ (str_id * sizeof(fw_tstamp));
+
+ memcpy_fromio(&fw_tstamp, addr, sizeof(fw_tstamp));
+
return sst_calc_tstamp(ctx, info, substream, &fw_tstamp);
}
diff --git a/sound/soc/intel/atom/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
index a686eef2cf7f..27413ebae956 100644
--- a/sound/soc/intel/atom/sst/sst_loader.c
+++ b/sound/soc/intel/atom/sst/sst_loader.c
@@ -44,15 +44,15 @@ void memcpy32_toio(void __iomem *dst, const void *src, int count)
/* __iowrite32_copy uses 32-bit count values so divide by 4 for
* right count in words
*/
- __iowrite32_copy(dst, src, count/4);
+ __iowrite32_copy(dst, src, count / 4);
}
void memcpy32_fromio(void *dst, const void __iomem *src, int count)
{
- /* __iowrite32_copy uses 32-bit count values so divide by 4 for
+ /* __ioread32_copy uses 32-bit count values so divide by 4 for
* right count in words
*/
- __iowrite32_copy(dst, src, count/4);
+ __ioread32_copy(dst, src, count / 4);
}
/**
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 24797482a3d2..cccda87f4b34 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -281,6 +281,20 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH
Say Y if you have such a device.
If unsure select "N".
+config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
+ tristate "GLK with RT5682 and MAX98357A in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_RT5682
+ select SND_SOC_MAX98357A
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ select SND_HDA_DSP_LOADER
+ help
+ This adds support for ASoC machine driver for Geminilake platforms
+ with RT5682 + MAX98357A I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
endif ## SND_SOC_INTEL_SKYLAKE
endif ## SND_SOC_INTEL_MACH
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 92b5507291af..87ef8b4058e5 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -6,6 +6,7 @@ snd-soc-sst-bdw-rt5677-mach-objs := bdw-rt5677.o
snd-soc-sst-broadwell-objs := broadwell.o
snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o
snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
+snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o
snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH) += snd-soc-sst-bxt-da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
+obj-$(CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH) += snd-soc-sst-glk-rt5682_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
obj-$(CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH) += snd-soc-sst-bdw-rt5677-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH) += snd-soc-sst-bytcr-rt5640.o
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index 6ea360f33575..efcfd906c856 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -154,9 +154,7 @@ static int broadwell_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min = channels->max = 2;
/* set SSP0 to 16 bit */
- snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
- SNDRV_PCM_HW_PARAM_FIRST_MASK],
- SNDRV_PCM_FORMAT_S16_LE);
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 40eb979d5ac1..6f052fc8d1e2 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -160,7 +160,7 @@ static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
/* set SSP to 24 bit */
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
@@ -324,8 +324,22 @@ static const struct snd_pcm_hw_constraint_list constraints_16000 = {
.list = rates_16000,
};
+static const unsigned int ch_mono[] = {
+ 1,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
static int broxton_refcap_startup(struct snd_pcm_substream *substream)
{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_16000);
@@ -586,7 +600,7 @@ static int broxton_audio_probe(struct platform_device *pdev)
static struct platform_driver broxton_audio = {
.probe = broxton_audio_probe,
.driver = {
- .name = "bxt_da7219_max98357a_i2s",
+ .name = "bxt_da7219_max98357a",
.pm = &snd_soc_pm_ops,
},
};
@@ -599,4 +613,4 @@ MODULE_AUTHOR("Rohit Ainapure <rohit.m.ainapure@intel.com>");
MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>");
MODULE_AUTHOR("Conrad Cooke <conrad.cooke@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:bxt_da7219_max98357a_i2s");
+MODULE_ALIAS("platform:bxt_da7219_max98357a");
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index b68c289558a8..27308337ab12 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -221,7 +221,7 @@ static int broxton_ssp5_fixup(struct snd_soc_pcm_runtime *rtd,
/* set SSP5 to 24 bit */
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 33065ba294a9..d32844f94d74 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -404,7 +404,7 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
},
.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
BYT_RT5640_JD_SRC_JD1_IN4P |
- BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_TH_1500UA |
BYT_RT5640_OVCD_SF_0P75 |
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
@@ -464,12 +464,38 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MCLK_EN),
},
{
+ /* Chuwi Vi10 (CWI505) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-PF02"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S165"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
},
.driver_data = (void *)(BYT_RT5640_DMIC1_MAP),
},
+ { /* Connect Tablet 9 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Connect"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Tablet 9"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -536,6 +562,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Lenovo Miix 2 8 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "20326"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Hiking"),
+ },
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* MSI S100 tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
@@ -549,6 +588,20 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_DIFF_MIC |
BYT_RT5640_MCLK_EN),
},
+ { /* Nuvison/TMax TM800W560 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TMAX"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TM800W560L"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_JD_NOT_INV |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Pipo W4 */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 987720e203f9..f8a68bdb3885 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -26,8 +26,12 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/platform_sst_audio.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -42,8 +46,6 @@ enum {
BYT_RT5651_IN1_MAP,
BYT_RT5651_IN2_MAP,
BYT_RT5651_IN1_IN2_MAP,
- BYT_RT5651_IN1_HS_IN3_MAP,
- BYT_RT5651_IN2_HS_IN3_MAP,
};
enum {
@@ -76,21 +78,26 @@ enum {
#define BYT_RT5651_SSP2_AIF2 BIT(19) /* default is using AIF1 */
#define BYT_RT5651_SSP0_AIF1 BIT(20)
#define BYT_RT5651_SSP0_AIF2 BIT(21)
+#define BYT_RT5651_HP_LR_SWAPPED BIT(22)
+#define BYT_RT5651_MONO_SPEAKER BIT(23)
+
+#define BYT_RT5651_DEFAULT_QUIRKS (BYT_RT5651_MCLK_EN | \
+ BYT_RT5651_JD1_1 | \
+ BYT_RT5651_OVCD_TH_2000UA | \
+ BYT_RT5651_OVCD_SF_0P75)
/* jack-detect-source + dmic-en + ovcd-th + -sf + terminating empty entry */
#define MAX_NO_PROPS 5
struct byt_rt5651_private {
struct clk *mclk;
+ struct gpio_desc *ext_amp_gpio;
struct snd_soc_jack jack;
};
/* Default: jack-detect on JD1_1, internal mic on in2, headsetmic on in3 */
-static unsigned long byt_rt5651_quirk = BYT_RT5651_MCLK_EN |
- BYT_RT5651_JD1_1 |
- BYT_RT5651_OVCD_TH_2000UA |
- BYT_RT5651_OVCD_SF_0P75 |
- BYT_RT5651_IN2_HS_IN3_MAP;
+static unsigned long byt_rt5651_quirk = BYT_RT5651_DEFAULT_QUIRKS |
+ BYT_RT5651_IN2_MAP;
static void log_quirks(struct device *dev)
{
@@ -100,10 +107,8 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk IN1_MAP enabled");
if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN2_MAP)
dev_info(dev, "quirk IN2_MAP enabled");
- if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN1_HS_IN3_MAP)
- dev_info(dev, "quirk IN1_HS_IN3_MAP enabled");
- if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN2_HS_IN3_MAP)
- dev_info(dev, "quirk IN2_HS_IN3_MAP enabled");
+ if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN1_IN2_MAP)
+ dev_info(dev, "quirk IN1_IN2_MAP enabled");
if (BYT_RT5651_JDSRC(byt_rt5651_quirk)) {
dev_info(dev, "quirk realtek,jack-detect-source %ld\n",
BYT_RT5651_JDSRC(byt_rt5651_quirk));
@@ -124,6 +129,8 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk SSP0_AIF1 enabled\n");
if (byt_rt5651_quirk & BYT_RT5651_SSP0_AIF2)
dev_info(dev, "quirk SSP0_AIF2 enabled\n");
+ if (byt_rt5651_quirk & BYT_RT5651_MONO_SPEAKER)
+ dev_info(dev, "quirk MONO_SPEAKER enabled\n");
}
#define BYT_CODEC_DAI1 "rt5651-aif1"
@@ -211,6 +218,20 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
return 0;
}
+static int rt5651_ext_amp_power_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_card *card = w->dapm->card;
+ struct byt_rt5651_private *priv = snd_soc_card_get_drvdata(card);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ gpiod_set_value_cansleep(priv->ext_amp_gpio, 1);
+ else
+ gpiod_set_value_cansleep(priv->ext_amp_gpio, 0);
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget byt_rt5651_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
@@ -220,7 +241,9 @@ static const struct snd_soc_dapm_widget byt_rt5651_widgets[] = {
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
-
+ SND_SOC_DAPM_SUPPLY("Ext Amp Power", SND_SOC_NOPM, 0, 0,
+ rt5651_ext_amp_power_event,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
};
static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
@@ -228,6 +251,7 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
{"Headset Mic", NULL, "Platform Clock"},
{"Internal Mic", NULL, "Platform Clock"},
{"Speaker", NULL, "Platform Clock"},
+ {"Speaker", NULL, "Ext Amp Power"},
{"Line In", NULL, "Platform Clock"},
{"Headset Mic", NULL, "micbias1"}, /* lowercase for rt5651 */
@@ -241,38 +265,26 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
};
static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic_map[] = {
- {"IN2P", NULL, "Headset Mic"},
{"DMIC L1", NULL, "Internal Mic"},
{"DMIC R1", NULL, "Internal Mic"},
+ {"IN3P", NULL, "Headset Mic"},
};
static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_map[] = {
{"Internal Mic", NULL, "micbias1"},
{"IN1P", NULL, "Internal Mic"},
- {"IN2P", NULL, "Headset Mic"},
+ {"IN3P", NULL, "Headset Mic"},
};
static const struct snd_soc_dapm_route byt_rt5651_intmic_in2_map[] = {
{"Internal Mic", NULL, "micbias1"},
- {"IN1P", NULL, "Headset Mic"},
- {"IN2P", NULL, "Internal Mic"},
-};
-
-static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_in2_map[] = {
- {"Internal Mic", NULL, "micbias1"},
- {"IN1P", NULL, "Internal Mic"},
{"IN2P", NULL, "Internal Mic"},
{"IN3P", NULL, "Headset Mic"},
};
-static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_hs_in3_map[] = {
+static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_in2_map[] = {
{"Internal Mic", NULL, "micbias1"},
{"IN1P", NULL, "Internal Mic"},
- {"IN3P", NULL, "Headset Mic"},
-};
-
-static const struct snd_soc_dapm_route byt_rt5651_intmic_in2_hs_in3_map[] = {
- {"Internal Mic", NULL, "micbias1"},
{"IN2P", NULL, "Internal Mic"},
{"IN3P", NULL, "Headset Mic"},
};
@@ -357,46 +369,72 @@ static int byt_rt5651_quirk_cb(const struct dmi_system_id *id)
static const struct dmi_system_id byt_rt5651_quirk_table[] = {
{
+ /* Chuwi Hi8 Pro (CWI513) */
.callback = byt_rt5651_quirk_cb,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"),
},
- .driver_data = (void *)(BYT_RT5651_IN1_HS_IN3_MAP),
+ .driver_data = (void *)(BYT_RT5651_DEFAULT_QUIRKS |
+ BYT_RT5651_IN2_MAP |
+ BYT_RT5651_HP_LR_SWAPPED |
+ BYT_RT5651_MONO_SPEAKER),
},
{
+ /* Chuwi Vi8 Plus (CWI519) */
.callback = byt_rt5651_quirk_cb,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ADI"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Turbot"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "D2D3_Vi8A1"),
},
- .driver_data = (void *)(BYT_RT5651_MCLK_EN |
- BYT_RT5651_IN1_HS_IN3_MAP),
+ .driver_data = (void *)(BYT_RT5651_DEFAULT_QUIRKS |
+ BYT_RT5651_IN2_MAP |
+ BYT_RT5651_HP_LR_SWAPPED |
+ BYT_RT5651_MONO_SPEAKER),
+ },
+ {
+ /* I.T.Works TW701, Ployer Momo7w and Trekstor ST70416-6
+ * (these all use the same mainboard) */
+ .callback = byt_rt5651_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "INSYDE Corp."),
+ /* Partial match for all of itWORKS.G.WI71C.JGBMRBA,
+ * TREK.G.WI71C.JGBMRBA0x and MOMO.G.WI71C.MABMRBA02 */
+ DMI_MATCH(DMI_BIOS_VERSION, ".G.WI71C."),
+ },
+ .driver_data = (void *)(BYT_RT5651_DEFAULT_QUIRKS |
+ BYT_RT5651_IN2_MAP |
+ BYT_RT5651_SSP0_AIF1 |
+ BYT_RT5651_MONO_SPEAKER),
},
{
+ /* KIANO SlimNote 14.2 */
.callback = byt_rt5651_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "KIANO"),
DMI_MATCH(DMI_PRODUCT_NAME, "KIANO SlimNote 14.2"),
},
- .driver_data = (void *)(BYT_RT5651_MCLK_EN |
- BYT_RT5651_JD1_1 |
- BYT_RT5651_OVCD_TH_2000UA |
- BYT_RT5651_OVCD_SF_0P75 |
+ .driver_data = (void *)(BYT_RT5651_DEFAULT_QUIRKS |
BYT_RT5651_IN1_IN2_MAP),
},
{
- /* Chuwi Vi8 Plus (CWI519) */
+ /* Minnowboard Max B3 */
.callback = byt_rt5651_quirk_cb,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
- DMI_MATCH(DMI_PRODUCT_NAME, "D2D3_Vi8A1"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
+ },
+ .driver_data = (void *)(BYT_RT5651_IN1_MAP),
+ },
+ {
+ /* Minnowboard Turbot */
+ .callback = byt_rt5651_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ADI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Turbot"),
},
.driver_data = (void *)(BYT_RT5651_MCLK_EN |
- BYT_RT5651_JD1_1 |
- BYT_RT5651_OVCD_TH_2000UA |
- BYT_RT5651_OVCD_SF_0P75 |
- BYT_RT5651_IN2_HS_IN3_MAP),
+ BYT_RT5651_IN1_MAP),
},
{
/* VIOS LTH17 */
@@ -405,11 +443,24 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "VIOS"),
DMI_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
- .driver_data = (void *)(BYT_RT5651_MCLK_EN |
+ .driver_data = (void *)(BYT_RT5651_IN1_IN2_MAP |
BYT_RT5651_JD1_1 |
BYT_RT5651_OVCD_TH_2000UA |
BYT_RT5651_OVCD_SF_1P0 |
- BYT_RT5651_IN1_IN2_MAP),
+ BYT_RT5651_MCLK_EN),
+ },
+ {
+ /* Yours Y8W81 (and others using the same mainboard) */
+ .callback = byt_rt5651_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "INSYDE Corp."),
+ /* Partial match for all devs with a W86C mainboard */
+ DMI_MATCH(DMI_BIOS_VERSION, ".F.W86C."),
+ },
+ .driver_data = (void *)(BYT_RT5651_DEFAULT_QUIRKS |
+ BYT_RT5651_IN2_MAP |
+ BYT_RT5651_SSP0_AIF1 |
+ BYT_RT5651_MONO_SPEAKER),
},
{}
};
@@ -418,15 +469,10 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
* Note this MUST be called before snd_soc_register_card(), so that the props
* are in place before the codec component driver's probe function parses them.
*/
-static int byt_rt5651_add_codec_device_props(const char *i2c_dev_name)
+static int byt_rt5651_add_codec_device_props(struct device *i2c_dev)
{
struct property_entry props[MAX_NO_PROPS] = {};
- struct device *i2c_dev;
- int ret, cnt = 0;
-
- i2c_dev = bus_find_device_by_name(&i2c_bus_type, NULL, i2c_dev_name);
- if (!i2c_dev)
- return -EPROBE_DEFER;
+ int cnt = 0;
props[cnt++] = PROPERTY_ENTRY_U32("realtek,jack-detect-source",
BYT_RT5651_JDSRC(byt_rt5651_quirk));
@@ -440,10 +486,7 @@ static int byt_rt5651_add_codec_device_props(const char *i2c_dev_name)
if (byt_rt5651_quirk & BYT_RT5651_DMIC_EN)
props[cnt++] = PROPERTY_ENTRY_BOOL("realtek,dmic-en");
- ret = device_add_properties(i2c_dev, props);
- put_device(i2c_dev);
-
- return ret;
+ return device_add_properties(i2c_dev, props);
}
static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
@@ -475,14 +518,6 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
custom_map = byt_rt5651_intmic_in1_in2_map;
num_routes = ARRAY_SIZE(byt_rt5651_intmic_in1_in2_map);
break;
- case BYT_RT5651_IN1_HS_IN3_MAP:
- custom_map = byt_rt5651_intmic_in1_hs_in3_map;
- num_routes = ARRAY_SIZE(byt_rt5651_intmic_in1_hs_in3_map);
- break;
- case BYT_RT5651_IN2_HS_IN3_MAP:
- custom_map = byt_rt5651_intmic_in2_hs_in3_map;
- num_routes = ARRAY_SIZE(byt_rt5651_intmic_in2_hs_in3_map);
- break;
default:
custom_map = byt_rt5651_intmic_dmic_map;
num_routes = ARRAY_SIZE(byt_rt5651_intmic_dmic_map);
@@ -546,13 +581,17 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
if (BYT_RT5651_JDSRC(byt_rt5651_quirk)) {
ret = snd_soc_card_jack_new(runtime->card, "Headset",
- SND_JACK_HEADSET, &priv->jack,
- bytcr_jack_pins, ARRAY_SIZE(bytcr_jack_pins));
+ SND_JACK_HEADSET | SND_JACK_BTN_0,
+ &priv->jack, bytcr_jack_pins,
+ ARRAY_SIZE(bytcr_jack_pins));
if (ret) {
dev_err(runtime->dev, "jack creation failed %d\n", ret);
return ret;
}
+ snd_jack_set_key(priv->jack.jack, SND_JACK_BTN_0,
+ KEY_PLAYPAUSE);
+
ret = snd_soc_component_set_jack(codec, &priv->jack, NULL);
if (ret)
return ret;
@@ -691,6 +730,48 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
};
/* SoC card */
+static char byt_rt5651_codec_name[SND_ACPI_I2C_ID_LEN];
+static char byt_rt5651_codec_aif_name[12]; /* = "rt5651-aif[1|2]" */
+static char byt_rt5651_cpu_dai_name[10]; /* = "ssp[0|2]-port" */
+static char byt_rt5651_long_name[50]; /* = "bytcr-rt5651-*-spk-*-mic[-swapped-hp]" */
+
+static int byt_rt5651_suspend(struct snd_soc_card *card)
+{
+ struct snd_soc_component *component;
+
+ if (!BYT_RT5651_JDSRC(byt_rt5651_quirk))
+ return 0;
+
+ list_for_each_entry(component, &card->component_dev_list, card_list) {
+ if (!strcmp(component->name, byt_rt5651_codec_name)) {
+ dev_dbg(component->dev, "disabling jack detect before suspend\n");
+ snd_soc_component_set_jack(component, NULL, NULL);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int byt_rt5651_resume(struct snd_soc_card *card)
+{
+ struct byt_rt5651_private *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component;
+
+ if (!BYT_RT5651_JDSRC(byt_rt5651_quirk))
+ return 0;
+
+ list_for_each_entry(component, &card->component_dev_list, card_list) {
+ if (!strcmp(component->name, byt_rt5651_codec_name)) {
+ dev_dbg(component->dev, "re-enabling jack detect after resume\n");
+ snd_soc_component_set_jack(component, &priv->jack, NULL);
+ break;
+ }
+ }
+
+ return 0;
+}
+
static struct snd_soc_card byt_rt5651_card = {
.name = "bytcr-rt5651",
.owner = THIS_MODULE,
@@ -701,23 +782,86 @@ static struct snd_soc_card byt_rt5651_card = {
.dapm_routes = byt_rt5651_audio_map,
.num_dapm_routes = ARRAY_SIZE(byt_rt5651_audio_map),
.fully_routed = true,
+ .suspend_pre = byt_rt5651_suspend,
+ .resume_post = byt_rt5651_resume,
};
-static char byt_rt5651_codec_name[SND_ACPI_I2C_ID_LEN];
-static char byt_rt5651_codec_aif_name[12]; /* = "rt5651-aif[1|2]" */
-static char byt_rt5651_cpu_dai_name[10]; /* = "ssp[0|2]-port" */
-static char byt_rt5651_long_name[40]; /* = "bytcr-rt5651-*-spk-*-mic" */
+static const struct x86_cpu_id baytrail_cpu_ids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, /* Valleyview */
+ {}
+};
+
+static const struct x86_cpu_id cherrytrail_cpu_ids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, /* Braswell */
+ {}
+};
+
+static const struct acpi_gpio_params first_gpio = { 0, 0, false };
+static const struct acpi_gpio_params second_gpio = { 1, 0, false };
+
+static const struct acpi_gpio_mapping byt_rt5651_amp_en_first[] = {
+ { "ext-amp-enable-gpios", &first_gpio, 1 },
+ { },
+};
-static bool is_valleyview(void)
+static const struct acpi_gpio_mapping byt_rt5651_amp_en_second[] = {
+ { "ext-amp-enable-gpios", &second_gpio, 1 },
+ { },
+};
+
+/*
+ * Some boards have I2cSerialBusV2, GpioIo, GpioInt as ACPI resources, other
+ * boards may have I2cSerialBusV2, GpioInt, GpioIo instead. We want the
+ * GpioIo one for the ext-amp-enable-gpio and both count for the index in
+ * acpi_gpio_params index. So we have 2 different mappings and the code
+ * below figures out which one to use.
+ */
+struct byt_rt5651_acpi_resource_data {
+ int gpio_count;
+ int gpio_int_idx;
+};
+
+static int snd_byt_rt5651_acpi_resource(struct acpi_resource *ares, void *arg)
{
- static const struct x86_cpu_id cpu_ids[] = {
- { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */
- {}
- };
-
- if (!x86_match_cpu(cpu_ids))
- return false;
- return true;
+ struct byt_rt5651_acpi_resource_data *data = arg;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+ return 0;
+
+ if (ares->data.gpio.connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
+ data->gpio_int_idx = data->gpio_count;
+
+ data->gpio_count++;
+ return 0;
+}
+
+static void snd_byt_rt5651_mc_add_amp_en_gpio_mapping(struct device *codec)
+{
+ struct byt_rt5651_acpi_resource_data data = { 0, -1 };
+ LIST_HEAD(resources);
+ int ret;
+
+ ret = acpi_dev_get_resources(ACPI_COMPANION(codec), &resources,
+ snd_byt_rt5651_acpi_resource, &data);
+ if (ret < 0) {
+ dev_warn(codec, "Failed to get ACPI resources, not adding external amplifier GPIO mapping\n");
+ return;
+ }
+
+ /* All info we need is gathered during the walk */
+ acpi_dev_free_resource_list(&resources);
+
+ switch (data.gpio_int_idx) {
+ case 0:
+ devm_acpi_dev_add_driver_gpios(codec, byt_rt5651_amp_en_second);
+ break;
+ case 1:
+ devm_acpi_dev_add_driver_gpios(codec, byt_rt5651_amp_en_first);
+ break;
+ default:
+ dev_warn(codec, "Unknown GpioInt index %d, not adding external amplifier GPIO mapping\n",
+ data.gpio_int_idx);
+ }
}
struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */
@@ -727,13 +871,12 @@ struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */
static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
{
- const char * const intmic_name[] =
- { "dmic", "in1", "in2", "in12", "in1", "in2" };
- const char * const hsmic_name[] =
- { "in2", "in2", "in1", "in3", "in3", "in3" };
+ const char * const mic_name[] = { "dmic", "in1", "in2", "in12" };
struct byt_rt5651_private *priv;
struct snd_soc_acpi_mach *mach;
+ struct device *codec_dev;
const char *i2c_name = NULL;
+ const char *hp_swapped;
bool is_bytcr = false;
int ret_val = 0;
int dai_index = 0;
@@ -767,11 +910,16 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
"%s%s", "i2c-", i2c_name);
byt_rt5651_dais[dai_index].codec_name = byt_rt5651_codec_name;
+ codec_dev = bus_find_device_by_name(&i2c_bus_type, NULL,
+ byt_rt5651_codec_name);
+ if (!codec_dev)
+ return -EPROBE_DEFER;
+
/*
* swap SSP0 if bytcr is detected
* (will be overridden if DMI quirk is detected)
*/
- if (is_valleyview()) {
+ if (x86_match_cpu(baytrail_cpu_ids)) {
struct sst_platform_info *p_info = mach->pdata;
const struct sst_res_info *res_info = p_info->res_info;
@@ -830,9 +978,37 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
dmi_check_system(byt_rt5651_quirk_table);
/* Must be called before register_card, also see declaration comment. */
- ret_val = byt_rt5651_add_codec_device_props(byt_rt5651_codec_name);
- if (ret_val)
+ ret_val = byt_rt5651_add_codec_device_props(codec_dev);
+ if (ret_val) {
+ put_device(codec_dev);
return ret_val;
+ }
+
+ /* Cherry Trail devices use an external amplifier enable gpio */
+ if (x86_match_cpu(cherrytrail_cpu_ids)) {
+ snd_byt_rt5651_mc_add_amp_en_gpio_mapping(codec_dev);
+ priv->ext_amp_gpio = devm_fwnode_get_index_gpiod_from_child(
+ &pdev->dev, "ext-amp-enable", 0,
+ codec_dev->fwnode,
+ GPIOD_OUT_LOW, "speaker-amp");
+ if (IS_ERR(priv->ext_amp_gpio)) {
+ ret_val = PTR_ERR(priv->ext_amp_gpio);
+ switch (ret_val) {
+ case -ENOENT:
+ priv->ext_amp_gpio = NULL;
+ break;
+ default:
+ dev_err(&pdev->dev, "Failed to get ext-amp-enable GPIO: %d\n",
+ ret_val);
+ /* fall through */
+ case -EPROBE_DEFER:
+ put_device(codec_dev);
+ return ret_val;
+ }
+ }
+ }
+
+ put_device(codec_dev);
log_quirks(&pdev->dev);
@@ -876,10 +1052,16 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
}
}
+ if (byt_rt5651_quirk & BYT_RT5651_HP_LR_SWAPPED)
+ hp_swapped = "-hp-swapped";
+ else
+ hp_swapped = "";
+
snprintf(byt_rt5651_long_name, sizeof(byt_rt5651_long_name),
- "bytcr-rt5651-%s-intmic-%s-hsmic",
- intmic_name[BYT_RT5651_MAP(byt_rt5651_quirk)],
- hsmic_name[BYT_RT5651_MAP(byt_rt5651_quirk)]);
+ "bytcr-rt5651-%s-spk-%s-mic%s",
+ (byt_rt5651_quirk & BYT_RT5651_MONO_SPEAKER) ?
+ "mono" : "stereo",
+ mic_name[BYT_RT5651_MAP(byt_rt5651_quirk)], hp_swapped);
byt_rt5651_card.long_name = byt_rt5651_long_name;
ret_val = devm_snd_soc_register_card(&pdev->dev, &byt_rt5651_card);
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
new file mode 100644
index 000000000000..c4b94e2617c5
--- /dev/null
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Intel Corporation.
+
+/*
+ * Intel Geminilake I2S Machine Driver with MAX98357A & RT5682 Codecs
+ *
+ * Modified from:
+ * Intel Apollolake I2S Machine driver
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../skylake/skl.h"
+#include "../../codecs/rt5682.h"
+#include "../../codecs/hdac_hdmi.h"
+
+/* The platform clock outputs 19.2Mhz clock to codec as I2S MCLK */
+#define GLK_PLAT_CLK_FREQ 19200000
+#define RT5682_PLL_FREQ (48000 * 512)
+#define GLK_REALTEK_CODEC_DAI "rt5682-aif1"
+#define GLK_MAXIM_CODEC_DAI "HiFi"
+#define MAXIM_DEV0_NAME "MX98357A:00"
+#define DUAL_CHANNEL 2
+#define QUAD_CHANNEL 4
+#define NAME_SIZE 32
+
+static struct snd_soc_jack geminilake_hdmi[3];
+
+struct glk_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct glk_card_private {
+ struct snd_soc_jack geminilake_headset;
+ struct list_head hdmi_pcm_list;
+};
+
+enum {
+ GLK_DPCM_AUDIO_PB = 0,
+ GLK_DPCM_AUDIO_CP,
+ GLK_DPCM_AUDIO_HS_PB,
+ GLK_DPCM_AUDIO_ECHO_REF_CP,
+ GLK_DPCM_AUDIO_REF_CP,
+ GLK_DPCM_AUDIO_DMIC_CP,
+ GLK_DPCM_AUDIO_HDMI1_PB,
+ GLK_DPCM_AUDIO_HDMI2_PB,
+ GLK_DPCM_AUDIO_HDMI3_PB,
+};
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret = 0;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, GLK_REALTEK_CODEC_DAI);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
+ return -EIO;
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0);
+ if (ret)
+ dev_err(card->dev, "failed to stop sysclk: %d\n", ret);
+ } else if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK,
+ GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ);
+ if (ret < 0) {
+ dev_err(card->dev, "can't set codec pll: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ret)
+ dev_err(card->dev, "failed to start internal clk: %d\n", ret);
+
+ return ret;
+}
+
+static const struct snd_kcontrol_new geminilake_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Spk"),
+};
+
+static const struct snd_soc_dapm_widget geminilake_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Spk", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+ SND_SOC_DAPM_SPK("HDMI1", NULL),
+ SND_SOC_DAPM_SPK("HDMI2", NULL),
+ SND_SOC_DAPM_SPK("HDMI3", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route geminilake_map[] = {
+ /* HP jack connectors - unknown if we have jack detection */
+ { "Headphone Jack", NULL, "Platform Clock" },
+ { "Headphone Jack", NULL, "HPOL" },
+ { "Headphone Jack", NULL, "HPOR" },
+
+ /* speaker */
+ { "Spk", NULL, "Speaker" },
+
+ /* other jacks */
+ { "Headset Mic", NULL, "Platform Clock" },
+ { "IN1P", NULL, "Headset Mic" },
+
+ /* digital mics */
+ { "DMic", NULL, "SoC DMIC" },
+
+ /* CODEC BE connections */
+ { "HiFi Playback", NULL, "ssp1 Tx" },
+ { "ssp1 Tx", NULL, "codec0_out" },
+
+ { "AIF1 Playback", NULL, "ssp2 Tx" },
+ { "ssp2 Tx", NULL, "codec1_out" },
+
+ { "codec0_in", NULL, "ssp2 Rx" },
+ { "ssp2 Rx", NULL, "AIF1 Capture" },
+
+ { "HDMI1", NULL, "hif5-0 Output" },
+ { "HDMI2", NULL, "hif6-0 Output" },
+ { "HDMI2", NULL, "hif7-0 Output" },
+
+ { "hifi3", NULL, "iDisp3 Tx" },
+ { "iDisp3 Tx", NULL, "iDisp3_out" },
+ { "hifi2", NULL, "iDisp2 Tx" },
+ { "iDisp2 Tx", NULL, "iDisp2_out" },
+ { "hifi1", NULL, "iDisp1 Tx" },
+ { "iDisp1 Tx", NULL, "iDisp1_out" },
+
+ /* DMIC */
+ { "dmic01_hifi", NULL, "DMIC01 Rx" },
+ { "DMIC01 Rx", NULL, "DMIC AIF" },
+};
+
+static int geminilake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = DUAL_CHANNEL;
+
+ /* set SSP to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
+static int geminilake_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct glk_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_jack *jack;
+ int ret;
+
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1,
+ RT5682_PLL_FREQ, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(rtd->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &ctx->geminilake_headset, NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ jack = &ctx->geminilake_headset;
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+ ret = snd_soc_component_set_jack(component, jack, NULL);
+
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack call-back failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+};
+
+static int geminilake_rt5682_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ /* Set valid bitmask & configuration for I2S in 24 bit */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x0, 0x0, 2, 24);
+ if (ret < 0) {
+ dev_err(rtd->dev, "set TDM slot err:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct snd_soc_ops geminilake_rt5682_ops = {
+ .hw_params = geminilake_rt5682_hw_params,
+};
+
+static int geminilake_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct glk_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct glk_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = GLK_DPCM_AUDIO_HDMI1_PB + dai->id;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int geminilake_rt5682_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_dapm_context *dapm;
+ int ret;
+
+ dapm = snd_soc_component_get_dapm(component);
+ ret = snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+ if (ret) {
+ dev_err(rtd->dev, "Ref Cap ignore suspend failed %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static const unsigned int rates[] = {
+ 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static const unsigned int channels[] = {
+ DUAL_CHANNEL,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static unsigned int channels_quad[] = {
+ QUAD_CHANNEL,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
+ .count = ARRAY_SIZE(channels_quad),
+ .list = channels_quad,
+ .mask = 0,
+};
+
+static int geminilake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /*
+ * set BE channel constraint as user FE channels
+ */
+ channels->min = channels->max = 4;
+
+ return 0;
+}
+
+static int geminilake_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels_quad);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static const struct snd_soc_ops geminilake_dmic_ops = {
+ .startup = geminilake_dmic_startup,
+};
+
+static const unsigned int rates_16000[] = {
+ 16000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_16000 = {
+ .count = ARRAY_SIZE(rates_16000),
+ .list = rates_16000,
+};
+
+static int geminilake_refcap_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_16000);
+};
+
+static const struct snd_soc_ops geminilake_refcap_ops = {
+ .startup = geminilake_refcap_startup,
+};
+
+/* geminilake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link geminilake_dais[] = {
+ /* Front End DAI links */
+ [GLK_DPCM_AUDIO_PB] = {
+ .name = "Glk Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:0e.0",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = geminilake_rt5682_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ },
+ [GLK_DPCM_AUDIO_CP] = {
+ .name = "Glk Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:0e.0",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ },
+ [GLK_DPCM_AUDIO_HS_PB] = {
+ .name = "Glk Audio Headset Playback",
+ .stream_name = "Headset Audio",
+ .cpu_dai_name = "System Pin2",
+ .platform_name = "0000:00:0e.0",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .dpcm_playback = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [GLK_DPCM_AUDIO_ECHO_REF_CP] = {
+ .name = "Glk Audio Echo Reference cap",
+ .stream_name = "Echoreference Capture",
+ .cpu_dai_name = "Echoref Pin",
+ .platform_name = "0000:00:0e.0",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .init = NULL,
+ .capture_only = 1,
+ .nonatomic = 1,
+ },
+ [GLK_DPCM_AUDIO_REF_CP] = {
+ .name = "Glk Audio Reference cap",
+ .stream_name = "Refcap",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &geminilake_refcap_ops,
+ },
+ [GLK_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Glk Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &geminilake_dmic_ops,
+ },
+ [GLK_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Glk HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [GLK_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Glk HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [GLK_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Glk HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ /* Back End DAI links */
+ {
+ /* SSP1 - Codec */
+ .name = "SSP1-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP1 Pin",
+ .platform_name = "0000:00:0e.0",
+ .no_pcm = 1,
+ .codec_name = MAXIM_DEV0_NAME,
+ .codec_dai_name = GLK_MAXIM_CODEC_DAI,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = geminilake_ssp_fixup,
+ .dpcm_playback = 1,
+ },
+ {
+ /* SSP2 - Codec */
+ .name = "SSP2-Codec",
+ .id = 1,
+ .cpu_dai_name = "SSP2 Pin",
+ .platform_name = "0000:00:0e.0",
+ .no_pcm = 1,
+ .codec_name = "i2c-10EC5682:00",
+ .codec_dai_name = GLK_REALTEK_CODEC_DAI,
+ .init = geminilake_rt5682_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = geminilake_ssp_fixup,
+ .ops = &geminilake_rt5682_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "dmic01",
+ .id = 2,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:0e.0",
+ .ignore_suspend = 1,
+ .be_hw_params_fixup = geminilake_dmic_fixup,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:0e.0",
+ .init = geminilake_hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:0e.0",
+ .init = geminilake_hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:0e.0",
+ .init = geminilake_hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+static int glk_card_late_probe(struct snd_soc_card *card)
+{
+ struct glk_card_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component = NULL;
+ char jack_name[NAME_SIZE];
+ struct glk_hdmi_pcm *pcm;
+ int err = 0;
+ int i = 0;
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &geminilake_hdmi[i],
+ NULL, 0);
+
+ if (err)
+ return err;
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &geminilake_hdmi[i]);
+ if (err < 0)
+ return err;
+
+ i++;
+ }
+
+ if (!component)
+ return -EINVAL;
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+}
+
+/* geminilake audio machine driver for SPT + RT5682 */
+static struct snd_soc_card glk_audio_card_rt5682_m98357a = {
+ .name = "glkrt5682max",
+ .owner = THIS_MODULE,
+ .dai_link = geminilake_dais,
+ .num_links = ARRAY_SIZE(geminilake_dais),
+ .controls = geminilake_controls,
+ .num_controls = ARRAY_SIZE(geminilake_controls),
+ .dapm_widgets = geminilake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(geminilake_widgets),
+ .dapm_routes = geminilake_map,
+ .num_dapm_routes = ARRAY_SIZE(geminilake_map),
+ .fully_routed = true,
+ .late_probe = glk_card_late_probe,
+};
+
+static int geminilake_audio_probe(struct platform_device *pdev)
+{
+ struct glk_card_private *ctx;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ glk_audio_card_rt5682_m98357a.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&glk_audio_card_rt5682_m98357a, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev,
+ &glk_audio_card_rt5682_m98357a);
+}
+
+static const struct platform_device_id glk_board_ids[] = {
+ {
+ .name = "glk_rt5682_max98357a",
+ .driver_data =
+ (kernel_ulong_t)&glk_audio_card_rt5682_m98357a,
+ },
+ { }
+};
+
+static struct platform_driver geminilake_audio = {
+ .probe = geminilake_audio_probe,
+ .driver = {
+ .name = "glk_rt5682_max98357a",
+ .pm = &snd_soc_pm_ops,
+ },
+ .id_table = glk_board_ids,
+};
+module_platform_driver(geminilake_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Geminilake Audio Machine driver-RT5682 & MAX98357A in I2S mode");
+MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
+MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:glk_rt5682_max98357a");
diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
index 94294c27d1db..38f6ab74709d 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
@@ -152,7 +152,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
/* set SSP to 24 bit */
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
@@ -380,6 +380,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
.trigger = {
SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_capture = 1,
+ .ops = &kabylake_da7219_fe_ops,
},
[KBL_DPCM_AUDIO_DMIC_CP] = {
.name = "Kbl Audio DMIC cap",
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 3a61252fe450..21a6490746a6 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -434,14 +434,14 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = 48000;
channels->min = channels->max = 2;
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
}
/*
* The speaker on the SSP0 supports S16_LE and not S24_LE.
* thus changing the mask here
*/
if (!strcmp(be_dai_link->name, "SSP0-Codec"))
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 92f5fb2ae0a3..a892b37eab7c 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -307,7 +307,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = 48000;
channels->min = channels->max = 2;
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
} else if (!strcmp(fe_dai_link->name, "Kbl Audio DMIC cap")) {
if (params_channels(params) == 2 ||
DMIC_CH(dmic_constraints) == 2)
@@ -320,7 +320,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
* thus changing the mask here
*/
if (!strcmp(be_dai_link->name, "SSP0-Codec"))
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index 3ff6646cfa21..d31482b8c9bb 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -157,7 +157,7 @@ static int skylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
/* set SSP0 to 24 bit */
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index b0610bba3cfa..e877bb60beb1 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -346,7 +346,7 @@ static int skylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
/* set SSP0 to 24 bit */
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index 38a1495c29cf..0e1818dd4cc6 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -229,7 +229,7 @@ static int skylake_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
/* set SSP0 to 24 bit */
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index 7379d8830c39..915a34cdc8ac 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -3,7 +3,11 @@ snd-soc-sst-dsp-objs := sst-dsp.o
snd-soc-sst-acpi-objs := sst-acpi.o
snd-soc-sst-ipc-objs := sst-ipc.o
snd-soc-sst-firmware-objs := sst-firmware.o
-snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-match.o soc-acpi-intel-hsw-bdw-match.o
+snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-match.o \
+ soc-acpi-intel-hsw-bdw-match.o \
+ soc-acpi-intel-skl-match.o soc-acpi-intel-kbl-match.o \
+ soc-acpi-intel-bxt-match.o soc-acpi-intel-glk-match.o \
+ soc-acpi-intel-cnl-match.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
new file mode 100644
index 000000000000..f39386e540d3
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-bxt-match.c - tables and support for BXT ACPI enumeration.
+ *
+ * Copyright (c) 2018, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+static struct snd_soc_acpi_codecs bxt_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98357A"}
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = {
+ {
+ .id = "INT343A",
+ .drv_name = "bxt_alc298s_i2s",
+ .fw_filename = "intel/dsp_fw_bxtn.bin",
+ },
+ {
+ .id = "DLGS7219",
+ .drv_name = "bxt_da7219_max98357a",
+ .fw_filename = "intel/dsp_fw_bxtn.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &bxt_codecs,
+ .sof_fw_filename = "intel/sof-apl.ri",
+ .sof_tplg_filename = "intel/sof-apl-da7219.tplg",
+ .asoc_plat_name = "0000:00:0e.0",
+ },
+ {
+ .id = "104C5122",
+ .drv_name = "bxt-pcm512x",
+ .sof_fw_filename = "intel/sof-apl.ri",
+ .sof_tplg_filename = "intel/sof-apl-pcm512x.tplg",
+ .asoc_plat_name = "0000:00:0e.0",
+ },
+ {
+ .id = "1AEC8804",
+ .drv_name = "bxt-wm8804",
+ .sof_fw_filename = "intel/sof-apl.ri",
+ .sof_tplg_filename = "intel/sof-apl-wm8804.tplg",
+ .asoc_plat_name = "0000:00:0e.0",
+ },
+ {
+ .id = "INT34C3",
+ .drv_name = "bxt_tdf8532",
+ .sof_fw_filename = "intel/sof-apl.ri",
+ .sof_tplg_filename = "intel/sof-apl-tdf8532.tplg",
+ .asoc_plat_name = "0000:00:0e.0",
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_bxt_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
index bfe1ca68a542..4daa8a4f0c0c 100644
--- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
@@ -59,8 +59,8 @@ static struct snd_soc_acpi_mach byt_thinkpad_10 = {
.drv_name = "cht-bsw-rt5672",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-rt5670.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-rt5670.tplg",
.asoc_plat_name = "sst-mfld-platform",
};
@@ -98,8 +98,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcr_rt5640",
.machine_quirk = byt_quirk,
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-rt5640.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-rt5640.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -107,8 +107,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "bytcr_rt5640",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcr_rt5640",
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-rt5640.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-rt5640.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -116,8 +116,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "bytcr_rt5640",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcr_rt5640",
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-rt5640.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-rt5640.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -125,8 +125,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "bytcr_rt5651",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcr_rt5651",
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-rt5651.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-rt5651.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -134,8 +134,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "bytcht_da7213",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcht_da7213",
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-da7213.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-da7213.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -143,8 +143,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "bytcht_da7213",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcht_da7213",
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-da7213.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-da7213.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
/* some Baytrail platforms rely on RT5645, use CHT machine driver */
@@ -153,8 +153,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-rt5645.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-rt5645.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -162,8 +162,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-rt5645.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-rt5645.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
/* use CHT driver to Baytrail Chromebooks */
@@ -172,8 +172,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "cht-bsw-max98090",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-byt.ri",
- .sof_tplg_filename = "intel/reef-byt-max98090.tplg",
+ .sof_fw_filename = "intel/sof-byt.ri",
+ .sof_tplg_filename = "intel/sof-byt-max98090.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
index ad1eb2d644be..91bb99b69601 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
@@ -44,8 +44,8 @@ static struct snd_soc_acpi_mach cht_surface_mach = {
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-rt5645.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-rt5645.tplg",
.asoc_plat_name = "sst-mfld-platform",
};
@@ -68,8 +68,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "cht-bsw-rt5672",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-rt5670.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-rt5670.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -77,8 +77,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "cht-bsw-rt5672",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-rt5670.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-rt5670.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -86,8 +86,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-rt5645.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-rt5645.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -95,8 +95,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-rt5645.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-rt5645.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -104,8 +104,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-rt5645.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-rt5645.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -113,8 +113,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "cht-bsw-max98090",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-max98090.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-max98090.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -122,8 +122,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "cht-bsw-nau8824",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-nau8824.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-nau8824.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -131,8 +131,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "bytcht_da7213",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcht_da7213",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-da7213.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-da7213.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -140,8 +140,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "bytcht_da7213",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcht_da7213",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-da7213.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-da7213.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -149,8 +149,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "bytcht_es8316",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcht_es8316",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-es8316.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-es8316.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
/* some CHT-T platforms rely on RT5640, use Baytrail machine driver */
@@ -160,8 +160,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcr_rt5640",
.machine_quirk = cht_quirk,
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-rt5640.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-rt5640.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
{
@@ -169,8 +169,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "bytcr_rt5640",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcr_rt5640",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-rt5640.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-rt5640.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
/* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
@@ -179,8 +179,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "bytcr_rt5651",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcr_rt5651",
- .sof_fw_filename = "intel/reef-cht.ri",
- .sof_tplg_filename = "intel/reef-cht-rt5651.tplg",
+ .sof_fw_filename = "intel/sof-cht.ri",
+ .sof_tplg_filename = "intel/sof-cht-rt5651.tplg",
.asoc_plat_name = "sst-mfld-platform",
},
#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
new file mode 100644
index 000000000000..ec8e28e7b937
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-cnl-match.c - tables and support for CNL ACPI enumeration.
+ *
+ * Copyright (c) 2018, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include "../skylake/skl.h"
+
+static struct skl_machine_pdata cnl_pdata = {
+ .use_tplg_pcm = true,
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = {
+ {
+ .id = "INT34C2",
+ .drv_name = "cnl_rt274",
+ .fw_filename = "intel/dsp_fw_cnl.bin",
+ .pdata = &cnl_pdata,
+ .sof_fw_filename = "intel/sof-cnl.ri",
+ .sof_tplg_filename = "intel/sof-cnl-rt274.tplg",
+ .asoc_plat_name = "0000:00:1f.3",
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cnl_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
new file mode 100644
index 000000000000..305875af71ca
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-glk-match.c - tables and support for GLK ACPI enumeration.
+ *
+ * Copyright (c) 2018, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+static struct snd_soc_acpi_codecs glk_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98357A"}
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
+ {
+ .id = "INT343A",
+ .drv_name = "glk_alc298s_i2s",
+ .fw_filename = "intel/dsp_fw_glk.bin",
+ .sof_fw_filename = "intel/sof-glk.ri",
+ .sof_tplg_filename = "intel/sof-glk-alc298.tplg",
+ .asoc_plat_name = "0000:00:0e.0",
+ },
+ {
+ .id = "DLGS7219",
+ .drv_name = "glk_da7219_max98357a",
+ .fw_filename = "intel/dsp_fw_glk.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &glk_codecs,
+ .sof_fw_filename = "intel/sof-glk.ri",
+ .sof_tplg_filename = "intel/sof-glk-da7219.tplg",
+ .asoc_plat_name = "0000:00:0e.0",
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_glk_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
index e0e8c8c27528..494a0ea9b029 100644
--- a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
@@ -23,8 +23,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[] = {
.id = "INT33CA",
.drv_name = "haswell-audio",
.fw_filename = "intel/IntcSST1.bin",
- .sof_fw_filename = "intel/reef-hsw.ri",
- .sof_tplg_filename = "intel/reef-hsw.tplg",
+ .sof_fw_filename = "intel/sof-hsw.ri",
+ .sof_tplg_filename = "intel/sof-hsw.tplg",
.asoc_plat_name = "haswell-pcm-audio",
},
{}
@@ -36,24 +36,24 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[] = {
.id = "INT343A",
.drv_name = "broadwell-audio",
.fw_filename = "intel/IntcSST2.bin",
- .sof_fw_filename = "intel/reef-bdw.ri",
- .sof_tplg_filename = "intel/reef-bdw-rt286.tplg",
+ .sof_fw_filename = "intel/sof-bdw.ri",
+ .sof_tplg_filename = "intel/sof-bdw-rt286.tplg",
.asoc_plat_name = "haswell-pcm-audio",
},
{
.id = "RT5677CE",
.drv_name = "bdw-rt5677",
.fw_filename = "intel/IntcSST2.bin",
- .sof_fw_filename = "intel/reef-bdw.ri",
- .sof_tplg_filename = "intel/reef-bdw-rt286.tplg",
+ .sof_fw_filename = "intel/sof-bdw.ri",
+ .sof_tplg_filename = "intel/sof-bdw-rt5677.tplg",
.asoc_plat_name = "haswell-pcm-audio",
},
{
.id = "INT33CA",
.drv_name = "haswell-audio",
.fw_filename = "intel/IntcSST2.bin",
- .sof_fw_filename = "intel/reef-bdw.ri",
- .sof_tplg_filename = "intel/reef-bdw-rt5640.tplg",
+ .sof_fw_filename = "intel/sof-bdw.ri",
+ .sof_tplg_filename = "intel/sof-bdw-rt5640.tplg",
.asoc_plat_name = "haswell-pcm-audio",
},
{}
diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
new file mode 100644
index 000000000000..0ee173ca437d
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-kbl-match.c - tables and support for KBL ACPI enumeration.
+ *
+ * Copyright (c) 2018, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include "../skylake/skl.h"
+
+static struct skl_machine_pdata skl_dmic_data;
+
+static struct snd_soc_acpi_codecs kbl_codecs = {
+ .num_codecs = 1,
+ .codecs = {"10508825"}
+};
+
+static struct snd_soc_acpi_codecs kbl_poppy_codecs = {
+ .num_codecs = 1,
+ .codecs = {"10EC5663"}
+};
+
+static struct snd_soc_acpi_codecs kbl_5663_5514_codecs = {
+ .num_codecs = 2,
+ .codecs = {"10EC5663", "10EC5514"}
+};
+
+static struct snd_soc_acpi_codecs kbl_7219_98357_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98357A"}
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
+ {
+ .id = "INT343A",
+ .drv_name = "kbl_alc286s_i2s",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ },
+ {
+ .id = "INT343B",
+ .drv_name = "kbl_n88l25_s4567",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &kbl_codecs,
+ .pdata = &skl_dmic_data,
+ },
+ {
+ .id = "MX98357A",
+ .drv_name = "kbl_n88l25_m98357a",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &kbl_codecs,
+ .pdata = &skl_dmic_data,
+ },
+ {
+ .id = "MX98927",
+ .drv_name = "kbl_r5514_5663_max",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &kbl_5663_5514_codecs,
+ .pdata = &skl_dmic_data,
+ },
+ {
+ .id = "MX98927",
+ .drv_name = "kbl_rt5663_m98927",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &kbl_poppy_codecs,
+ .pdata = &skl_dmic_data,
+ },
+ {
+ .id = "10EC5663",
+ .drv_name = "kbl_rt5663",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ },
+ {
+ .id = "DLGS7219",
+ .drv_name = "kbl_da7219_max98357a",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &kbl_7219_98357_codecs,
+ .pdata = &skl_dmic_data,
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_kbl_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-skl-match.c b/sound/soc/intel/common/soc-acpi-intel-skl-match.c
new file mode 100644
index 000000000000..0c9c0edd35b3
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-skl-match.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-skl-match.c - tables and support for SKL ACPI enumeration.
+ *
+ * Copyright (c) 2018, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include "../skylake/skl.h"
+
+static struct skl_machine_pdata skl_dmic_data;
+
+static struct snd_soc_acpi_codecs skl_codecs = {
+ .num_codecs = 1,
+ .codecs = {"10508825"}
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_skl_machines[] = {
+ {
+ .id = "INT343A",
+ .drv_name = "skl_alc286s_i2s",
+ .fw_filename = "intel/dsp_fw_release.bin",
+ },
+ {
+ .id = "INT343B",
+ .drv_name = "skl_n88l25_s4567",
+ .fw_filename = "intel/dsp_fw_release.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &skl_codecs,
+ .pdata = &skl_dmic_data,
+ },
+ {
+ .id = "MX98357A",
+ .drv_name = "skl_n88l25_m98357a",
+ .fw_filename = "intel/dsp_fw_release.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &skl_codecs,
+ .pdata = &skl_dmic_data,
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_skl_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 657afc02f1c4..11041aedea31 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -270,7 +270,7 @@ void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
}
EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
-int sst_dma_new(struct sst_dsp *sst)
+static int sst_dma_new(struct sst_dsp *sst)
{
struct sst_pdata *sst_pdata = sst->pdata;
struct sst_dma *dma;
@@ -320,9 +320,8 @@ err_dma_dev:
devm_kfree(sst->dev, dma);
return ret;
}
-EXPORT_SYMBOL(sst_dma_new);
-void sst_dma_free(struct sst_dma *dma)
+static void sst_dma_free(struct sst_dma *dma)
{
if (dma == NULL)
@@ -335,7 +334,6 @@ void sst_dma_free(struct sst_dma *dma)
dw_remove(dma->chip);
}
-EXPORT_SYMBOL(sst_dma_free);
/* create new generic firmware object */
struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
diff --git a/sound/soc/intel/haswell/sst-haswell-dsp.c b/sound/soc/intel/haswell/sst-haswell-dsp.c
index b2bec36d074c..a28220e67cdf 100644
--- a/sound/soc/intel/haswell/sst-haswell-dsp.c
+++ b/sound/soc/intel/haswell/sst-haswell-dsp.c
@@ -93,29 +93,31 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
struct sst_module_template template;
int count, ret;
void __iomem *ram;
+ int type = le16_to_cpu(module->type);
+ int entry_point = le32_to_cpu(module->entry_point);
/* TODO: allowed module types need to be configurable */
- if (module->type != SST_HSW_MODULE_BASE_FW
- && module->type != SST_HSW_MODULE_PCM_SYSTEM
- && module->type != SST_HSW_MODULE_PCM
- && module->type != SST_HSW_MODULE_PCM_REFERENCE
- && module->type != SST_HSW_MODULE_PCM_CAPTURE
- && module->type != SST_HSW_MODULE_WAVES
- && module->type != SST_HSW_MODULE_LPAL)
+ if (type != SST_HSW_MODULE_BASE_FW &&
+ type != SST_HSW_MODULE_PCM_SYSTEM &&
+ type != SST_HSW_MODULE_PCM &&
+ type != SST_HSW_MODULE_PCM_REFERENCE &&
+ type != SST_HSW_MODULE_PCM_CAPTURE &&
+ type != SST_HSW_MODULE_WAVES &&
+ type != SST_HSW_MODULE_LPAL)
return 0;
dev_dbg(dsp->dev, "new module sign 0x%s size 0x%x blocks 0x%x type 0x%x\n",
module->signature, module->mod_size,
- module->blocks, module->type);
- dev_dbg(dsp->dev, " entrypoint 0x%x\n", module->entry_point);
+ module->blocks, type);
+ dev_dbg(dsp->dev, " entrypoint 0x%x\n", entry_point);
dev_dbg(dsp->dev, " persistent 0x%x scratch 0x%x\n",
module->info.persistent_size, module->info.scratch_size);
memset(&template, 0, sizeof(template));
- template.id = module->type;
- template.entry = module->entry_point - 4;
- template.persistent_size = module->info.persistent_size;
- template.scratch_size = module->info.scratch_size;
+ template.id = type;
+ template.entry = entry_point - 4;
+ template.persistent_size = le32_to_cpu(module->info.persistent_size);
+ template.scratch_size = le32_to_cpu(module->info.scratch_size);
mod = sst_module_new(fw, &template, NULL);
if (mod == NULL)
@@ -123,26 +125,26 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
block = (void *)module + sizeof(*module);
- for (count = 0; count < module->blocks; count++) {
+ for (count = 0; count < le32_to_cpu(module->blocks); count++) {
- if (block->size <= 0) {
+ if (le32_to_cpu(block->size) <= 0) {
dev_err(dsp->dev,
"error: block %d size invalid\n", count);
sst_module_free(mod);
return -EINVAL;
}
- switch (block->type) {
+ switch (le32_to_cpu(block->type)) {
case SST_HSW_IRAM:
ram = dsp->addr.lpe;
- mod->offset =
- block->ram_offset + dsp->addr.iram_offset;
+ mod->offset = le32_to_cpu(block->ram_offset) +
+ dsp->addr.iram_offset;
mod->type = SST_MEM_IRAM;
break;
case SST_HSW_DRAM:
case SST_HSW_REGS:
ram = dsp->addr.lpe;
- mod->offset = block->ram_offset;
+ mod->offset = le32_to_cpu(block->ram_offset);
mod->type = SST_MEM_DRAM;
break;
default:
@@ -152,7 +154,7 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
return -EINVAL;
}
- mod->size = block->size;
+ mod->size = le32_to_cpu(block->size);
mod->data = (void *)block + sizeof(*block);
mod->data_offset = mod->data - fw->dma_buf;
@@ -169,7 +171,8 @@ static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw,
return ret;
}
- block = (void *)block + sizeof(*block) + block->size;
+ block = (void *)block + sizeof(*block) +
+ le32_to_cpu(block->size);
}
mod->state = SST_MODULE_STATE_LOADED;
@@ -188,7 +191,8 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw)
/* verify FW */
if ((strncmp(header->signature, SST_HSW_FW_SIGN, 4) != 0) ||
- (sst_fw->size != header->file_size + sizeof(*header))) {
+ (sst_fw->size !=
+ le32_to_cpu(header->file_size) + sizeof(*header))) {
dev_err(dsp->dev, "error: invalid fw sign/filesize mismatch\n");
return -EINVAL;
}
@@ -199,7 +203,7 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw)
/* parse each module */
module = (void *)sst_fw->dma_buf + sizeof(*header);
- for (count = 0; count < header->modules; count++) {
+ for (count = 0; count < le32_to_cpu(header->modules); count++) {
/* module */
ret = hsw_parse_module(dsp, sst_fw, module);
@@ -207,7 +211,8 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw)
dev_err(dsp->dev, "error: invalid module %d\n", count);
return ret;
}
- module = (void *)module + sizeof(*module) + module->mod_size;
+ module = (void *)module + sizeof(*module) +
+ le32_to_cpu(module->mod_size);
}
return 0;
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index d5f9c30eba32..8bfb8b0fa3d5 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -33,8 +33,7 @@
static int skl_alloc_dma_buf(struct device *dev,
struct snd_dma_buffer *dmab, size_t size)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
if (!bus)
return -ENODEV;
@@ -44,8 +43,7 @@ static int skl_alloc_dma_buf(struct device *dev,
static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
if (!bus)
return -ENODEV;
@@ -89,8 +87,7 @@ void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable)
static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
int stream_tag, int enable)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
struct hdac_stream *stream = snd_hdac_get_stream(bus,
SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
struct hdac_ext_stream *estream;
@@ -100,10 +97,10 @@ static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
estream = stream_to_hdac_ext_stream(stream);
/* enable/disable SPIB for this hdac stream */
- snd_hdac_ext_stream_spbcap_enable(ebus, enable, stream->index);
+ snd_hdac_ext_stream_spbcap_enable(bus, enable, stream->index);
/* set the spib value */
- snd_hdac_ext_stream_set_spib(ebus, estream, size);
+ snd_hdac_ext_stream_set_spib(bus, estream, size);
return 0;
}
@@ -111,8 +108,7 @@ static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
static int skl_dsp_prepare(struct device *dev, unsigned int format,
unsigned int size, struct snd_dma_buffer *dmab)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
struct hdac_ext_stream *estream;
struct hdac_stream *stream;
struct snd_pcm_substream substream;
@@ -124,7 +120,7 @@ static int skl_dsp_prepare(struct device *dev, unsigned int format,
memset(&substream, 0, sizeof(substream));
substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
- estream = snd_hdac_ext_stream_assign(ebus, &substream,
+ estream = snd_hdac_ext_stream_assign(bus, &substream,
HDAC_EXT_STREAM_TYPE_HOST);
if (!estream)
return -ENODEV;
@@ -143,9 +139,8 @@ static int skl_dsp_prepare(struct device *dev, unsigned int format,
static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
struct hdac_stream *stream;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
if (!bus)
return -ENODEV;
@@ -163,10 +158,9 @@ static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag)
static int skl_dsp_cleanup(struct device *dev,
struct snd_dma_buffer *dmab, int stream_tag)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
struct hdac_stream *stream;
struct hdac_ext_stream *estream;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
if (!bus)
return -ENODEV;
@@ -270,8 +264,7 @@ const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id)
int skl_init_dsp(struct skl *skl)
{
void __iomem *mmio_base;
- struct hdac_ext_bus *ebus = &skl->ebus;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = skl_to_bus(skl);
struct skl_dsp_loader_ops loader_ops;
int irq = bus->irq;
const struct skl_dsp_ops *ops;
@@ -279,8 +272,8 @@ int skl_init_dsp(struct skl *skl)
int ret;
/* enable ppcap interrupt */
- snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
- snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
+ snd_hdac_ext_bus_ppcap_enable(bus, true);
+ snd_hdac_ext_bus_ppcap_int_enable(bus, true);
/* read the BAR of the ADSP MMIO */
mmio_base = pci_ioremap_bar(skl->pci, 4);
@@ -335,12 +328,11 @@ unmap_mmio:
int skl_free_dsp(struct skl *skl)
{
- struct hdac_ext_bus *ebus = &skl->ebus;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = skl_to_bus(skl);
struct skl_sst *ctx = skl->skl_sst;
/* disable ppcap interrupt */
- snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
+ snd_hdac_ext_bus_ppcap_int_enable(bus, false);
ctx->dsp_ops->cleanup(bus->dev, ctx);
@@ -383,10 +375,11 @@ int skl_suspend_late_dsp(struct skl *skl)
int skl_suspend_dsp(struct skl *skl)
{
struct skl_sst *ctx = skl->skl_sst;
+ struct hdac_bus *bus = skl_to_bus(skl);
int ret;
/* if ppcap is not supported return 0 */
- if (!skl->ebus.bus.ppcap)
+ if (!bus->ppcap)
return 0;
ret = skl_dsp_sleep(ctx->dsp);
@@ -394,8 +387,8 @@ int skl_suspend_dsp(struct skl *skl)
return ret;
/* disable ppcap interrupt */
- snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
- snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
+ snd_hdac_ext_bus_ppcap_int_enable(bus, false);
+ snd_hdac_ext_bus_ppcap_enable(bus, false);
return 0;
}
@@ -403,15 +396,16 @@ int skl_suspend_dsp(struct skl *skl)
int skl_resume_dsp(struct skl *skl)
{
struct skl_sst *ctx = skl->skl_sst;
+ struct hdac_bus *bus = skl_to_bus(skl);
int ret;
/* if ppcap is not supported return 0 */
- if (!skl->ebus.bus.ppcap)
+ if (!bus->ppcap)
return 0;
/* enable ppcap interrupt */
- snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
- snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
+ snd_hdac_ext_bus_ppcap_enable(bus, true);
+ snd_hdac_ext_bus_ppcap_int_enable(bus, true);
/* check if DSP 1st boot is done */
if (skl->skl_sst->is_first_boot == true)
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index b9b140275be0..01a050cf8775 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -141,7 +141,7 @@ struct nhlt_specific_cfg
{
struct nhlt_fmt *fmt;
struct nhlt_endpoint *epnt;
- struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+ struct hdac_bus *bus = skl_to_bus(skl);
struct device *dev = bus->dev;
struct nhlt_specific_cfg *sp_config;
struct nhlt_acpi_table *nhlt = skl->nhlt;
@@ -228,7 +228,7 @@ static void skl_nhlt_trim_space(char *trim)
int skl_nhlt_update_topology_bin(struct skl *skl)
{
struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
- struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+ struct hdac_bus *bus = skl_to_bus(skl);
struct device *dev = bus->dev;
dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n",
@@ -248,8 +248,8 @@ static ssize_t skl_nhlt_platform_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pci = to_pci_dev(dev);
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct skl *skl = ebus_to_skl(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
+ struct skl *skl = bus_to_skl(bus);
struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
char platform_id[32];
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index afa86b9e4dcf..823e39103edd 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -67,16 +67,15 @@ struct hdac_ext_stream *get_hdac_ext_stream(struct snd_pcm_substream *substream)
return substream->runtime->private_data;
}
-static struct hdac_ext_bus *get_bus_ctx(struct snd_pcm_substream *substream)
+static struct hdac_bus *get_bus_ctx(struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
struct hdac_stream *hstream = hdac_stream(stream);
struct hdac_bus *bus = hstream->bus;
-
- return hbus_to_ebus(bus);
+ return bus;
}
-static int skl_substream_alloc_pages(struct hdac_ext_bus *ebus,
+static int skl_substream_alloc_pages(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
size_t size)
{
@@ -95,7 +94,7 @@ static int skl_substream_free_pages(struct hdac_bus *bus,
return snd_pcm_lib_free_pages(substream);
}
-static void skl_set_pcm_constrains(struct hdac_ext_bus *ebus,
+static void skl_set_pcm_constrains(struct hdac_bus *bus,
struct snd_pcm_runtime *runtime)
{
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
@@ -105,9 +104,9 @@ static void skl_set_pcm_constrains(struct hdac_ext_bus *ebus,
20, 178000000);
}
-static enum hdac_ext_stream_type skl_get_host_stream_type(struct hdac_ext_bus *ebus)
+static enum hdac_ext_stream_type skl_get_host_stream_type(struct hdac_bus *bus)
{
- if ((ebus_to_hbus(ebus))->ppcap)
+ if (bus->ppcap)
return HDAC_EXT_STREAM_TYPE_HOST;
else
return HDAC_EXT_STREAM_TYPE_COUPLED;
@@ -123,9 +122,9 @@ static enum hdac_ext_stream_type skl_get_host_stream_type(struct hdac_ext_bus *e
static void skl_set_suspend_active(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai, bool enable)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+ struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct snd_soc_dapm_widget *w;
- struct skl *skl = ebus_to_skl(ebus);
+ struct skl *skl = bus_to_skl(bus);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
w = dai->playback_widget;
@@ -140,8 +139,7 @@ static void skl_set_suspend_active(struct snd_pcm_substream *substream,
int skl_pcm_host_dma_prepare(struct device *dev, struct skl_pipe_params *params)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
unsigned int format_val;
struct hdac_stream *hstream;
struct hdac_ext_stream *stream;
@@ -153,7 +151,7 @@ int skl_pcm_host_dma_prepare(struct device *dev, struct skl_pipe_params *params)
return -EINVAL;
stream = stream_to_hdac_ext_stream(hstream);
- snd_hdac_ext_stream_decouple(ebus, stream, true);
+ snd_hdac_ext_stream_decouple(bus, stream, true);
format_val = snd_hdac_calc_stream_format(params->s_freq,
params->ch, params->format, params->host_bps, 0);
@@ -177,8 +175,7 @@ int skl_pcm_host_dma_prepare(struct device *dev, struct skl_pipe_params *params)
int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
unsigned int format_val;
struct hdac_stream *hstream;
struct hdac_ext_stream *stream;
@@ -190,7 +187,7 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
return -EINVAL;
stream = stream_to_hdac_ext_stream(hstream);
- snd_hdac_ext_stream_decouple(ebus, stream, true);
+ snd_hdac_ext_stream_decouple(bus, stream, true);
format_val = snd_hdac_calc_stream_format(params->s_freq, params->ch,
params->format, params->link_bps, 0);
@@ -201,7 +198,7 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
snd_hdac_ext_link_stream_setup(stream, format_val);
- list_for_each_entry(link, &ebus->hlink_list, list) {
+ list_for_each_entry(link, &bus->hlink_list, list) {
if (link->index == params->link_index)
snd_hdac_ext_link_set_stream_id(link,
hstream->stream_tag);
@@ -215,7 +212,7 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
static int skl_pcm_open(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+ struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *stream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct skl_dma_params *dma_params;
@@ -224,12 +221,12 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
- stream = snd_hdac_ext_stream_assign(ebus, substream,
- skl_get_host_stream_type(ebus));
+ stream = snd_hdac_ext_stream_assign(bus, substream,
+ skl_get_host_stream_type(bus));
if (stream == NULL)
return -EBUSY;
- skl_set_pcm_constrains(ebus, runtime);
+ skl_set_pcm_constrains(bus, runtime);
/*
* disable WALLCLOCK timestamps for capture streams
@@ -301,7 +298,7 @@ static int skl_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+ struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct skl_pipe_params p_params = {0};
@@ -309,7 +306,7 @@ static int skl_pcm_hw_params(struct snd_pcm_substream *substream,
int ret, dma_id;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
- ret = skl_substream_alloc_pages(ebus, substream,
+ ret = skl_substream_alloc_pages(bus, substream,
params_buffer_bytes(params));
if (ret < 0)
return ret;
@@ -343,14 +340,14 @@ static void skl_pcm_close(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
- struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+ struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct skl_dma_params *dma_params = NULL;
- struct skl *skl = ebus_to_skl(ebus);
+ struct skl *skl = bus_to_skl(bus);
struct skl_module_cfg *mconfig;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
- snd_hdac_ext_stream_release(stream, skl_get_host_stream_type(ebus));
+ snd_hdac_ext_stream_release(stream, skl_get_host_stream_type(bus));
dma_params = snd_soc_dai_get_dma_data(dai, substream);
/*
@@ -380,7 +377,7 @@ static void skl_pcm_close(struct snd_pcm_substream *substream,
static int skl_pcm_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+ struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
struct skl *skl = get_skl_ctx(dai->dev);
struct skl_module_cfg *mconfig;
@@ -400,7 +397,7 @@ static int skl_pcm_hw_free(struct snd_pcm_substream *substream,
snd_hdac_stream_cleanup(hdac_stream(stream));
hdac_stream(stream)->prepared = 0;
- return skl_substream_free_pages(ebus_to_hbus(ebus), substream);
+ return skl_substream_free_pages(bus, substream);
}
static int skl_be_hw_params(struct snd_pcm_substream *substream,
@@ -420,8 +417,7 @@ static int skl_be_hw_params(struct snd_pcm_substream *substream,
static int skl_decoupled_trigger(struct snd_pcm_substream *substream,
int cmd)
{
- struct hdac_ext_bus *ebus = get_bus_ctx(substream);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = get_bus_ctx(substream);
struct hdac_ext_stream *stream;
int start;
unsigned long cookie;
@@ -470,7 +466,7 @@ static int skl_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
struct skl *skl = get_skl_ctx(dai->dev);
struct skl_sst *ctx = skl->skl_sst;
struct skl_module_cfg *mconfig;
- struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+ struct hdac_bus *bus = get_bus_ctx(substream);
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
struct snd_soc_dapm_widget *w;
int ret;
@@ -492,9 +488,9 @@ static int skl_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
* dpib & lpib position to resume before starting the
* DMA
*/
- snd_hdac_ext_stream_drsm_enable(ebus, true,
+ snd_hdac_ext_stream_drsm_enable(bus, true,
hdac_stream(stream)->index);
- snd_hdac_ext_stream_set_dpibr(ebus, stream,
+ snd_hdac_ext_stream_set_dpibr(bus, stream,
stream->lpib);
snd_hdac_ext_stream_set_lpib(stream, stream->lpib);
}
@@ -528,14 +524,14 @@ static int skl_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
ret = skl_decoupled_trigger(substream, cmd);
if ((cmd == SNDRV_PCM_TRIGGER_SUSPEND) && !w->ignore_suspend) {
/* save the dpib and lpib positions */
- stream->dpib = readl(ebus->bus.remap_addr +
+ stream->dpib = readl(bus->remap_addr +
AZX_REG_VS_SDXDPIB_XBASE +
(AZX_REG_VS_SDXDPIB_XINTERVAL *
hdac_stream(stream)->index));
stream->lpib = snd_hdac_stream_get_pos_lpib(
hdac_stream(stream));
- snd_hdac_ext_stream_decouple(ebus, stream, false);
+ snd_hdac_ext_stream_decouple(bus, stream, false);
}
break;
@@ -546,11 +542,12 @@ static int skl_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
return 0;
}
+
static int skl_link_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+ struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *link_dev;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct snd_soc_dai *codec_dai = rtd->codec_dai;
@@ -558,14 +555,14 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream,
struct hdac_ext_link *link;
int stream_tag;
- link_dev = snd_hdac_ext_stream_assign(ebus, substream,
+ link_dev = snd_hdac_ext_stream_assign(bus, substream,
HDAC_EXT_STREAM_TYPE_LINK);
if (!link_dev)
return -EBUSY;
snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
- link = snd_hdac_ext_bus_get_link(ebus, codec_dai->component->name);
+ link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
if (!link)
return -EINVAL;
@@ -610,7 +607,7 @@ static int skl_link_pcm_trigger(struct snd_pcm_substream *substream,
{
struct hdac_ext_stream *link_dev =
snd_soc_dai_get_dma_data(dai, substream);
- struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+ struct hdac_bus *bus = get_bus_ctx(substream);
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
dev_dbg(dai->dev, "In %s cmd=%d\n", __func__, cmd);
@@ -626,7 +623,7 @@ static int skl_link_pcm_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_STOP:
snd_hdac_ext_link_stream_clear(link_dev);
if (cmd == SNDRV_PCM_TRIGGER_SUSPEND)
- snd_hdac_ext_stream_decouple(ebus, stream, false);
+ snd_hdac_ext_stream_decouple(bus, stream, false);
break;
default:
@@ -638,7 +635,7 @@ static int skl_link_pcm_trigger(struct snd_pcm_substream *substream,
static int skl_link_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+ struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct hdac_ext_stream *link_dev =
snd_soc_dai_get_dma_data(dai, substream);
@@ -648,7 +645,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
link_dev->link_prepared = 0;
- link = snd_hdac_ext_bus_get_link(ebus, rtd->codec_dai->component->name);
+ link = snd_hdac_ext_bus_get_link(bus, rtd->codec_dai->component->name);
if (!link)
return -EINVAL;
@@ -1017,10 +1014,11 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
},
};
-int skl_dai_load(struct snd_soc_component *cmp,
- struct snd_soc_dai_driver *pcm_dai)
+int skl_dai_load(struct snd_soc_component *cmp, int index,
+ struct snd_soc_dai_driver *dai_drv,
+ struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai)
{
- pcm_dai->ops = &skl_pcm_dai_ops;
+ dai_drv->ops = &skl_pcm_dai_ops;
return 0;
}
@@ -1041,8 +1039,7 @@ static int skl_platform_open(struct snd_pcm_substream *substream)
static int skl_coupled_trigger(struct snd_pcm_substream *substream,
int cmd)
{
- struct hdac_ext_bus *ebus = get_bus_ctx(substream);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = get_bus_ctx(substream);
struct hdac_ext_stream *stream;
struct snd_pcm_substream *s;
bool start;
@@ -1115,9 +1112,9 @@ static int skl_coupled_trigger(struct snd_pcm_substream *substream,
static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
int cmd)
{
- struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+ struct hdac_bus *bus = get_bus_ctx(substream);
- if (!(ebus_to_hbus(ebus))->ppcap)
+ if (!bus->ppcap)
return skl_coupled_trigger(substream, cmd);
return 0;
@@ -1127,7 +1124,7 @@ static snd_pcm_uframes_t skl_platform_pcm_pointer
(struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
- struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+ struct hdac_bus *bus = get_bus_ctx(substream);
unsigned int pos;
/*
@@ -1152,12 +1149,12 @@ static snd_pcm_uframes_t skl_platform_pcm_pointer
*/
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- pos = readl(ebus->bus.remap_addr + AZX_REG_VS_SDXDPIB_XBASE +
+ pos = readl(bus->remap_addr + AZX_REG_VS_SDXDPIB_XBASE +
(AZX_REG_VS_SDXDPIB_XINTERVAL *
hdac_stream(hstream)->index));
} else {
udelay(20);
- readl(ebus->bus.remap_addr +
+ readl(bus->remap_addr +
AZX_REG_VS_SDXDPIB_XBASE +
(AZX_REG_VS_SDXDPIB_XINTERVAL *
hdac_stream(hstream)->index));
@@ -1242,11 +1239,11 @@ static void skl_pcm_free(struct snd_pcm *pcm)
static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *dai = rtd->cpu_dai;
- struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+ struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct snd_pcm *pcm = rtd->pcm;
unsigned int size;
int retval = 0;
- struct skl *skl = ebus_to_skl(ebus);
+ struct skl *skl = bus_to_skl(bus);
if (dai->driver->playback.channels_min ||
dai->driver->capture.channels_min) {
@@ -1356,19 +1353,19 @@ static int skl_populate_modules(struct skl *skl)
static int skl_platform_soc_probe(struct snd_soc_component *component)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(component->dev);
- struct skl *skl = ebus_to_skl(ebus);
+ struct hdac_bus *bus = dev_get_drvdata(component->dev);
+ struct skl *skl = bus_to_skl(bus);
const struct skl_dsp_ops *ops;
int ret;
pm_runtime_get_sync(component->dev);
- if ((ebus_to_hbus(ebus))->ppcap) {
+ if (bus->ppcap) {
skl->component = component;
/* init debugfs */
skl->debugfs = skl_debugfs_init(skl);
- ret = skl_tplg_init(component, ebus);
+ ret = skl_tplg_init(component, bus);
if (ret < 0) {
dev_err(component->dev, "Failed to init topology!\n");
return ret;
@@ -1425,10 +1422,10 @@ static const struct snd_soc_component_driver skl_component = {
int skl_platform_register(struct device *dev)
{
int ret;
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
- struct skl *skl = ebus_to_skl(ebus);
struct snd_soc_dai_driver *dais;
int num_dais = ARRAY_SIZE(skl_platform_dai);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
+ struct skl *skl = bus_to_skl(bus);
INIT_LIST_HEAD(&skl->ppl_list);
INIT_LIST_HEAD(&skl->bind_list);
@@ -1464,8 +1461,8 @@ err:
int skl_platform_unregister(struct device *dev)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
- struct skl *skl = ebus_to_skl(ebus);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
+ struct skl *skl = bus_to_skl(bus);
struct skl_module_deferred_bind *modules, *tmp;
if (!list_empty(&skl->bind_list)) {
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c
index d2b1d60fec02..5bc0d38da7e3 100644
--- a/sound/soc/intel/skylake/skl-sst-cldma.c
+++ b/sound/soc/intel/skylake/skl-sst-cldma.c
@@ -83,9 +83,9 @@ static void skl_cldma_stream_clear(struct sst_dsp *ctx)
/* Code loader helper APIs */
static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_data,
- u32 **bdlp, int size, int with_ioc)
+ __le32 **bdlp, int size, int with_ioc)
{
- u32 *bdl = *bdlp;
+ __le32 *bdl = *bdlp;
ctx->cl_dev.frags = 0;
while (size > 0) {
@@ -330,7 +330,7 @@ void skl_cldma_process_intr(struct sst_dsp *ctx)
int skl_cldma_prepare(struct sst_dsp *ctx)
{
int ret;
- u32 *bdl;
+ __le32 *bdl;
ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
@@ -359,7 +359,7 @@ int skl_cldma_prepare(struct sst_dsp *ctx)
ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
return ret;
}
- bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;
+ bdl = (__le32 *)ctx->cl_dev.dmab_bdl.area;
/* Allocate BDLs */
ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.h b/sound/soc/intel/skylake/skl-sst-cldma.h
index 5b730a1a0ae4..ec736921a083 100644
--- a/sound/soc/intel/skylake/skl-sst-cldma.h
+++ b/sound/soc/intel/skylake/skl-sst-cldma.h
@@ -203,7 +203,7 @@ struct sst_dsp;
struct skl_cl_dev_ops {
void (*cl_setup_bdle)(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_data,
- u32 **bdlp, int size, int with_ioc);
+ __le32 **bdlp, int size, int with_ioc);
void (*cl_setup_controller)(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_bdl,
unsigned int max_size, u32 page_count);
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index fcdc716754b6..2620d77729c5 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -108,6 +108,9 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
case snd_soc_dapm_aif_out:
case snd_soc_dapm_dai_out:
case snd_soc_dapm_switch:
+ case snd_soc_dapm_output:
+ case snd_soc_dapm_mux:
+
return false;
default:
return true;
@@ -934,7 +937,7 @@ static int skl_tplg_find_moduleid_from_uuid(struct skl *skl,
struct soc_bytes_ext *sb = (void *) k->private_value;
struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
struct skl_kpb_params *uuid_params, *params;
- struct hdac_bus *bus = ebus_to_hbus(skl_to_ebus(skl));
+ struct hdac_bus *bus = skl_to_bus(skl);
int i, size, module_id;
if (bc->set_params == SKL_PARAM_BIND && bc->max) {
@@ -3024,14 +3027,13 @@ void skl_cleanup_resources(struct skl *skl)
* information to the driver about module and pipeline parameters which DSP
* FW expects like ids, resource values, formats etc
*/
-static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
+static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index,
struct snd_soc_dapm_widget *w,
struct snd_soc_tplg_dapm_widget *tplg_w)
{
int ret;
- struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
- struct skl *skl = ebus_to_skl(ebus);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
+ struct skl *skl = bus_to_skl(bus);
struct skl_module_cfg *mconfig;
if (!tplg_w->priv.size)
@@ -3131,14 +3133,14 @@ static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
}
static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
+ int index,
struct snd_kcontrol_new *kctl,
struct snd_soc_tplg_ctl_hdr *hdr)
{
struct soc_bytes_ext *sb;
struct snd_soc_tplg_bytes_control *tplg_bc;
struct snd_soc_tplg_enum_control *tplg_ec;
- struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
struct soc_enum *se;
switch (hdr->ops.info) {
@@ -3619,12 +3621,11 @@ static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
return 0;
}
-static int skl_manifest_load(struct snd_soc_component *cmpnt,
+static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
struct snd_soc_tplg_manifest *manifest)
{
- struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
- struct skl *skl = ebus_to_skl(ebus);
+ struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
+ struct skl *skl = bus_to_skl(bus);
/* proceed only if we have private data defined */
if (manifest->priv.size == 0)
@@ -3713,12 +3714,11 @@ static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
/*
* SKL topology init routine
*/
-int skl_tplg_init(struct snd_soc_component *component, struct hdac_ext_bus *ebus)
+int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
{
int ret;
const struct firmware *fw;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
- struct skl *skl = ebus_to_skl(ebus);
+ struct skl *skl = bus_to_skl(bus);
struct skl_pipeline *ppl;
ret = request_firmware(&fw, skl->tplg_name, bus->dev);
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index 6d7e0569695f..82282cac9751 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -458,9 +458,9 @@ enum skl_channel {
static inline struct skl *get_skl_ctx(struct device *dev)
{
- struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+ struct hdac_bus *bus = dev_get_drvdata(dev);
- return ebus_to_skl(ebus);
+ return bus_to_skl(bus);
}
int skl_tplg_be_update_params(struct snd_soc_dai *dai,
@@ -470,7 +470,7 @@ int skl_dsp_set_dma_control(struct skl_sst *ctx, u32 *caps,
void skl_tplg_set_be_dmic_config(struct snd_soc_dai *dai,
struct skl_pipe_params *params, int stream);
int skl_tplg_init(struct snd_soc_component *component,
- struct hdac_ext_bus *ebus);
+ struct hdac_bus *ebus);
struct skl_module_cfg *skl_tplg_fe_get_cpr_module(
struct snd_soc_dai *dai, int stream);
int skl_tplg_update_pipe_params(struct device *dev,
@@ -512,8 +512,9 @@ int skl_pcm_host_dma_prepare(struct device *dev,
int skl_pcm_link_dma_prepare(struct device *dev,
struct skl_pipe_params *params);
-int skl_dai_load(struct snd_soc_component *cmp,
- struct snd_soc_dai_driver *pcm_dai);
+int skl_dai_load(struct snd_soc_component *cmp, int index,
+ struct snd_soc_dai_driver *dai_drv,
+ struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai);
void skl_tplg_add_moduleid_in_bind_params(struct skl *skl,
struct snd_soc_dapm_widget *w);
#endif
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index f0d9793f872a..dce649485649 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <sound/pcm.h>
#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
#include <sound/hda_register.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
@@ -36,8 +37,6 @@
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
-static struct skl_machine_pdata skl_dmic_data;
-
/*
* initialize the PCI registers
*/
@@ -54,7 +53,7 @@ static void skl_update_pci_byte(struct pci_dev *pci, unsigned int reg,
static void skl_init_pci(struct skl *skl)
{
- struct hdac_ext_bus *ebus = &skl->ebus;
+ struct hdac_bus *bus = skl_to_bus(skl);
/*
* Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
@@ -63,7 +62,7 @@ static void skl_init_pci(struct skl *skl)
* codecs.
* The PCI register TCSEL is defined in the Intel manuals.
*/
- dev_dbg(ebus_to_hbus(ebus)->dev, "Clearing TCSEL\n");
+ dev_dbg(bus->dev, "Clearing TCSEL\n");
skl_update_pci_byte(skl->pci, AZX_PCIREG_TCSEL, 0x07, 0);
}
@@ -103,8 +102,7 @@ static void skl_enable_miscbdcge(struct device *dev, bool enable)
static void skl_clock_power_gating(struct device *dev, bool enable)
{
struct pci_dev *pci = to_pci_dev(dev);
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
u32 val;
/* Update PDCGE bit of CGCTL register */
@@ -127,7 +125,6 @@ static void skl_clock_power_gating(struct device *dev, bool enable)
*/
static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
{
- struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
struct hdac_ext_link *hlink;
int ret;
@@ -135,7 +132,7 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
ret = snd_hdac_bus_init_chip(bus, full_reset);
/* Reset stream-to-link mapping */
- list_for_each_entry(hlink, &ebus->hlink_list, list)
+ list_for_each_entry(hlink, &bus->hlink_list, list)
bus->io_ops->reg_writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
skl_enable_miscbdcge(bus->dev, true);
@@ -146,8 +143,7 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
void skl_update_d0i3c(struct device *dev, bool enable)
{
struct pci_dev *pci = to_pci_dev(dev);
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
u8 reg;
int timeout = 50;
@@ -197,8 +193,7 @@ static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
static irqreturn_t skl_interrupt(int irq, void *dev_id)
{
- struct hdac_ext_bus *ebus = dev_id;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = dev_id;
u32 status;
if (!pm_runtime_active(bus->dev))
@@ -227,8 +222,7 @@ static irqreturn_t skl_interrupt(int irq, void *dev_id)
static irqreturn_t skl_threaded_handler(int irq, void *dev_id)
{
- struct hdac_ext_bus *ebus = dev_id;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = dev_id;
u32 status;
status = snd_hdac_chip_readl(bus, INTSTS);
@@ -238,16 +232,15 @@ static irqreturn_t skl_threaded_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
+static int skl_acquire_irq(struct hdac_bus *bus, int do_disconnect)
{
- struct skl *skl = ebus_to_skl(ebus);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct skl *skl = bus_to_skl(bus);
int ret;
ret = request_threaded_irq(skl->pci->irq, skl_interrupt,
skl_threaded_handler,
IRQF_SHARED,
- KBUILD_MODNAME, ebus);
+ KBUILD_MODNAME, bus);
if (ret) {
dev_err(bus->dev,
"unable to grab IRQ %d, disabling device\n",
@@ -264,21 +257,20 @@ static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
static int skl_suspend_late(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct skl *skl = ebus_to_skl(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
+ struct skl *skl = bus_to_skl(bus);
return skl_suspend_late_dsp(skl);
}
#ifdef CONFIG_PM
-static int _skl_suspend(struct hdac_ext_bus *ebus)
+static int _skl_suspend(struct hdac_bus *bus)
{
- struct skl *skl = ebus_to_skl(ebus);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct skl *skl = bus_to_skl(bus);
struct pci_dev *pci = to_pci_dev(bus->dev);
int ret;
- snd_hdac_ext_bus_link_power_down_all(ebus);
+ snd_hdac_ext_bus_link_power_down_all(bus);
ret = skl_suspend_dsp(skl);
if (ret < 0)
@@ -295,10 +287,9 @@ static int _skl_suspend(struct hdac_ext_bus *ebus)
return 0;
}
-static int _skl_resume(struct hdac_ext_bus *ebus)
+static int _skl_resume(struct hdac_bus *bus)
{
- struct skl *skl = ebus_to_skl(ebus);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct skl *skl = bus_to_skl(bus);
skl_init_pci(skl);
skl_init_chip(bus, true);
@@ -314,9 +305,8 @@ static int _skl_resume(struct hdac_ext_bus *ebus)
static int skl_suspend(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct skl *skl = ebus_to_skl(ebus);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
+ struct skl *skl = bus_to_skl(bus);
int ret = 0;
/*
@@ -325,15 +315,15 @@ static int skl_suspend(struct device *dev)
*/
if (skl->supend_active) {
/* turn off the links and stop the CORB/RIRB DMA if it is On */
- snd_hdac_ext_bus_link_power_down_all(ebus);
+ snd_hdac_ext_bus_link_power_down_all(bus);
- if (ebus->cmd_dma_state)
- snd_hdac_bus_stop_cmd_io(&ebus->bus);
+ if (bus->cmd_dma_state)
+ snd_hdac_bus_stop_cmd_io(bus);
enable_irq_wake(bus->irq);
pci_save_state(pci);
} else {
- ret = _skl_suspend(ebus);
+ ret = _skl_suspend(bus);
if (ret < 0)
return ret;
skl->skl_sst->fw_loaded = false;
@@ -352,9 +342,8 @@ static int skl_suspend(struct device *dev)
static int skl_resume(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct skl *skl = ebus_to_skl(ebus);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
+ struct skl *skl = bus_to_skl(bus);
struct hdac_ext_link *hlink = NULL;
int ret;
@@ -374,32 +363,32 @@ static int skl_resume(struct device *dev)
*/
if (skl->supend_active) {
pci_restore_state(pci);
- snd_hdac_ext_bus_link_power_up_all(ebus);
+ snd_hdac_ext_bus_link_power_up_all(bus);
disable_irq_wake(bus->irq);
/*
* turn On the links which are On before active suspend
* and start the CORB/RIRB DMA if On before
* active suspend.
*/
- list_for_each_entry(hlink, &ebus->hlink_list, list) {
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
if (hlink->ref_count)
snd_hdac_ext_bus_link_power_up(hlink);
}
- if (ebus->cmd_dma_state)
- snd_hdac_bus_init_cmd_io(&ebus->bus);
ret = 0;
+ if (bus->cmd_dma_state)
+ snd_hdac_bus_init_cmd_io(bus);
} else {
- ret = _skl_resume(ebus);
+ ret = _skl_resume(bus);
/* turn off the links which are off before suspend */
- list_for_each_entry(hlink, &ebus->hlink_list, list) {
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
if (!hlink->ref_count)
snd_hdac_ext_bus_link_power_down(hlink);
}
- if (!ebus->cmd_dma_state)
- snd_hdac_bus_stop_cmd_io(&ebus->bus);
+ if (!bus->cmd_dma_state)
+ snd_hdac_bus_stop_cmd_io(bus);
}
return ret;
@@ -410,23 +399,21 @@ static int skl_resume(struct device *dev)
static int skl_runtime_suspend(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
dev_dbg(bus->dev, "in %s\n", __func__);
- return _skl_suspend(ebus);
+ return _skl_suspend(bus);
}
static int skl_runtime_resume(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
dev_dbg(bus->dev, "in %s\n", __func__);
- return _skl_resume(ebus);
+ return _skl_resume(bus);
}
#endif /* CONFIG_PM */
@@ -439,20 +426,19 @@ static const struct dev_pm_ops skl_pm = {
/*
* destructor
*/
-static int skl_free(struct hdac_ext_bus *ebus)
+static int skl_free(struct hdac_bus *bus)
{
- struct skl *skl = ebus_to_skl(ebus);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct skl *skl = bus_to_skl(bus);
skl->init_done = 0; /* to be sure */
- snd_hdac_ext_stop_streams(ebus);
+ snd_hdac_ext_stop_streams(bus);
if (bus->irq >= 0)
- free_irq(bus->irq, (void *)ebus);
+ free_irq(bus->irq, (void *)bus);
snd_hdac_bus_free_stream_pages(bus);
- snd_hdac_stream_free_all(ebus);
- snd_hdac_link_free_all(ebus);
+ snd_hdac_stream_free_all(bus);
+ snd_hdac_link_free_all(bus);
if (bus->remap_addr)
iounmap(bus->remap_addr);
@@ -460,11 +446,11 @@ static int skl_free(struct hdac_ext_bus *ebus)
pci_release_regions(skl->pci);
pci_disable_device(skl->pci);
- snd_hdac_ext_bus_exit(ebus);
+ snd_hdac_ext_bus_exit(bus);
cancel_work_sync(&skl->probe_work);
if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
- snd_hdac_i915_exit(&ebus->bus);
+ snd_hdac_i915_exit(bus);
return 0;
}
@@ -488,8 +474,8 @@ static struct skl_ssp_clk skl_ssp_clks[] = {
static int skl_find_machine(struct skl *skl, void *driver_data)
{
+ struct hdac_bus *bus = skl_to_bus(skl);
struct snd_soc_acpi_mach *mach = driver_data;
- struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
struct skl_machine_pdata *pdata;
mach = snd_soc_acpi_find_machine(mach);
@@ -500,17 +486,19 @@ static int skl_find_machine(struct skl *skl, void *driver_data)
skl->mach = mach;
skl->fw_name = mach->fw_filename;
- pdata = skl->mach->pdata;
+ pdata = mach->pdata;
- if (mach->pdata)
+ if (pdata) {
skl->use_tplg_pcm = pdata->use_tplg_pcm;
+ pdata->dmic_num = skl_get_dmic_geo(skl);
+ }
return 0;
}
static int skl_machine_device_register(struct skl *skl)
{
- struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+ struct hdac_bus *bus = skl_to_bus(skl);
struct snd_soc_acpi_mach *mach = skl->mach;
struct platform_device *pdev;
int ret;
@@ -544,7 +532,7 @@ static void skl_machine_device_unregister(struct skl *skl)
static int skl_dmic_device_register(struct skl *skl)
{
- struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+ struct hdac_bus *bus = skl_to_bus(skl);
struct platform_device *pdev;
int ret;
@@ -643,12 +631,13 @@ static void skl_clock_device_unregister(struct skl *skl)
/*
* Probe the given codec address
*/
-static int probe_codec(struct hdac_ext_bus *ebus, int addr)
+static int probe_codec(struct hdac_bus *bus, int addr)
{
- struct hdac_bus *bus = ebus_to_hbus(ebus);
unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
unsigned int res = -1;
+ struct skl *skl = bus_to_skl(bus);
+ struct hdac_device *hdev;
mutex_lock(&bus->cmd_mutex);
snd_hdac_bus_send_cmd(bus, cmd);
@@ -658,13 +647,16 @@ static int probe_codec(struct hdac_ext_bus *ebus, int addr)
return -EIO;
dev_dbg(bus->dev, "codec #%d probed OK\n", addr);
- return snd_hdac_ext_bus_device_init(ebus, addr);
+ hdev = devm_kzalloc(&skl->pci->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ return snd_hdac_ext_bus_device_init(bus, addr, hdev);
}
/* Codec initialization */
-static void skl_codec_create(struct hdac_ext_bus *ebus)
+static void skl_codec_create(struct hdac_bus *bus)
{
- struct hdac_bus *bus = ebus_to_hbus(ebus);
int c, max_slots;
max_slots = HDA_MAX_CODECS;
@@ -672,7 +664,7 @@ static void skl_codec_create(struct hdac_ext_bus *ebus)
/* First try to probe all given codec slots */
for (c = 0; c < max_slots; c++) {
if ((bus->codec_mask & (1 << c))) {
- if (probe_codec(ebus, c) < 0) {
+ if (probe_codec(bus, c) < 0) {
/*
* Some BIOSen give you wrong codec addresses
* that don't exist
@@ -722,8 +714,7 @@ static int skl_i915_init(struct hdac_bus *bus)
static void skl_probe_work(struct work_struct *work)
{
struct skl *skl = container_of(work, struct skl, probe_work);
- struct hdac_ext_bus *ebus = &skl->ebus;
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = skl_to_bus(skl);
struct hdac_ext_link *hlink = NULL;
int err;
@@ -744,7 +735,7 @@ static void skl_probe_work(struct work_struct *work)
dev_info(bus->dev, "no hda codecs found!\n");
/* create codec instances */
- skl_codec_create(ebus);
+ skl_codec_create(bus);
/* register platform dai and controls */
err = skl_platform_register(bus->dev);
@@ -773,8 +764,8 @@ static void skl_probe_work(struct work_struct *work)
/*
* we are done probing so decrement link counts
*/
- list_for_each_entry(hlink, &ebus->hlink_list, list)
- snd_hdac_ext_bus_link_put(ebus, hlink);
+ list_for_each_entry(hlink, &bus->hlink_list, list)
+ snd_hdac_ext_bus_link_put(bus, hlink);
/* configure PM */
pm_runtime_put_noidle(bus->dev);
@@ -796,7 +787,7 @@ static int skl_create(struct pci_dev *pci,
struct skl **rskl)
{
struct skl *skl;
- struct hdac_ext_bus *ebus;
+ struct hdac_bus *bus;
int err;
@@ -811,23 +802,22 @@ static int skl_create(struct pci_dev *pci,
pci_disable_device(pci);
return -ENOMEM;
}
- ebus = &skl->ebus;
- snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
- ebus->bus.use_posbuf = 1;
+
+ bus = skl_to_bus(skl);
+ snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, NULL);
+ bus->use_posbuf = 1;
skl->pci = pci;
INIT_WORK(&skl->probe_work, skl_probe_work);
-
- ebus->bus.bdl_pos_adj = 0;
+ bus->bdl_pos_adj = 0;
*rskl = skl;
return 0;
}
-static int skl_first_init(struct hdac_ext_bus *ebus)
+static int skl_first_init(struct hdac_bus *bus)
{
- struct skl *skl = ebus_to_skl(ebus);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct skl *skl = bus_to_skl(bus);
struct pci_dev *pci = skl->pci;
int err;
unsigned short gcap;
@@ -848,7 +838,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
snd_hdac_bus_parse_capabilities(bus);
- if (skl_acquire_irq(ebus, 0) < 0)
+ if (skl_acquire_irq(bus, 0) < 0)
return -EBUSY;
pci_set_master(pci);
@@ -872,14 +862,14 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
if (!pb_streams && !cp_streams)
return -EIO;
- ebus->num_streams = cp_streams + pb_streams;
+ bus->num_streams = cp_streams + pb_streams;
/* initialize streams */
snd_hdac_ext_stream_init_all
- (ebus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
+ (bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
start_idx = cp_streams;
snd_hdac_ext_stream_init_all
- (ebus, start_idx, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
+ (bus, start_idx, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
err = snd_hdac_bus_alloc_stream_pages(bus);
if (err < 0)
@@ -895,7 +885,6 @@ static int skl_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct skl *skl;
- struct hdac_ext_bus *ebus = NULL;
struct hdac_bus *bus = NULL;
int err;
@@ -904,10 +893,9 @@ static int skl_probe(struct pci_dev *pci,
if (err < 0)
return err;
- ebus = &skl->ebus;
- bus = ebus_to_hbus(ebus);
+ bus = skl_to_bus(skl);
- err = skl_first_init(ebus);
+ err = skl_first_init(bus);
if (err < 0)
goto out_free;
@@ -928,9 +916,7 @@ static int skl_probe(struct pci_dev *pci,
skl_nhlt_update_topology_bin(skl);
- pci_set_drvdata(skl->pci, ebus);
-
- skl_dmic_data.dmic_num = skl_get_dmic_geo(skl);
+ pci_set_drvdata(skl->pci, bus);
/* check if dsp is there */
if (bus->ppcap) {
@@ -952,7 +938,7 @@ static int skl_probe(struct pci_dev *pci,
skl->skl_sst->clock_power_gating = skl_clock_power_gating;
}
if (bus->mlcap)
- snd_hdac_ext_bus_get_ml_capabilities(ebus);
+ snd_hdac_ext_bus_get_ml_capabilities(bus);
snd_hdac_bus_stop_chip(bus);
@@ -972,31 +958,30 @@ out_clk_free:
out_nhlt_free:
skl_nhlt_free(skl->nhlt);
out_free:
- skl_free(ebus);
+ skl_free(bus);
return err;
}
static void skl_shutdown(struct pci_dev *pci)
{
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
struct hdac_stream *s;
struct hdac_ext_stream *stream;
struct skl *skl;
- if (ebus == NULL)
+ if (!bus)
return;
- skl = ebus_to_skl(ebus);
+ skl = bus_to_skl(bus);
if (!skl->init_done)
return;
- snd_hdac_ext_stop_streams(ebus);
+ snd_hdac_ext_stop_streams(bus);
list_for_each_entry(s, &bus->stream_list, list) {
stream = stream_to_hdac_ext_stream(s);
- snd_hdac_ext_stream_decouple(ebus, stream, false);
+ snd_hdac_ext_stream_decouple(bus, stream, false);
}
snd_hdac_bus_stop_chip(bus);
@@ -1004,15 +989,15 @@ static void skl_shutdown(struct pci_dev *pci)
static void skl_remove(struct pci_dev *pci)
{
- struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
- struct skl *skl = ebus_to_skl(ebus);
+ struct hdac_bus *bus = pci_get_drvdata(pci);
+ struct skl *skl = bus_to_skl(bus);
release_firmware(skl->tplg);
pm_runtime_get_noresume(&pci->dev);
/* codec removal, invoke bus_device_remove */
- snd_hdac_ext_bus_device_remove(ebus);
+ snd_hdac_ext_bus_device_remove(bus);
skl->debugfs = NULL;
skl_platform_unregister(&pci->dev);
@@ -1022,176 +1007,27 @@ static void skl_remove(struct pci_dev *pci)
skl_clock_device_unregister(skl);
skl_nhlt_remove_sysfs(skl);
skl_nhlt_free(skl->nhlt);
- skl_free(ebus);
+ skl_free(bus);
dev_set_drvdata(&pci->dev, NULL);
}
-static struct snd_soc_acpi_codecs skl_codecs = {
- .num_codecs = 1,
- .codecs = {"10508825"}
-};
-
-static struct snd_soc_acpi_codecs kbl_codecs = {
- .num_codecs = 1,
- .codecs = {"10508825"}
-};
-
-static struct snd_soc_acpi_codecs bxt_codecs = {
- .num_codecs = 1,
- .codecs = {"MX98357A"}
-};
-
-static struct snd_soc_acpi_codecs kbl_poppy_codecs = {
- .num_codecs = 1,
- .codecs = {"10EC5663"}
-};
-
-static struct snd_soc_acpi_codecs kbl_5663_5514_codecs = {
- .num_codecs = 2,
- .codecs = {"10EC5663", "10EC5514"}
-};
-
-static struct snd_soc_acpi_codecs kbl_7219_98357_codecs = {
- .num_codecs = 1,
- .codecs = {"MX98357A"}
-};
-
-static struct skl_machine_pdata cnl_pdata = {
- .use_tplg_pcm = true,
-};
-
-static struct snd_soc_acpi_mach sst_skl_devdata[] = {
- {
- .id = "INT343A",
- .drv_name = "skl_alc286s_i2s",
- .fw_filename = "intel/dsp_fw_release.bin",
- },
- {
- .id = "INT343B",
- .drv_name = "skl_n88l25_s4567",
- .fw_filename = "intel/dsp_fw_release.bin",
- .machine_quirk = snd_soc_acpi_codec_list,
- .quirk_data = &skl_codecs,
- .pdata = &skl_dmic_data
- },
- {
- .id = "MX98357A",
- .drv_name = "skl_n88l25_m98357a",
- .fw_filename = "intel/dsp_fw_release.bin",
- .machine_quirk = snd_soc_acpi_codec_list,
- .quirk_data = &skl_codecs,
- .pdata = &skl_dmic_data
- },
- {}
-};
-
-static struct snd_soc_acpi_mach sst_bxtp_devdata[] = {
- {
- .id = "INT343A",
- .drv_name = "bxt_alc298s_i2s",
- .fw_filename = "intel/dsp_fw_bxtn.bin",
- },
- {
- .id = "DLGS7219",
- .drv_name = "bxt_da7219_max98357a_i2s",
- .fw_filename = "intel/dsp_fw_bxtn.bin",
- .machine_quirk = snd_soc_acpi_codec_list,
- .quirk_data = &bxt_codecs,
- },
- {}
-};
-
-static struct snd_soc_acpi_mach sst_kbl_devdata[] = {
- {
- .id = "INT343A",
- .drv_name = "kbl_alc286s_i2s",
- .fw_filename = "intel/dsp_fw_kbl.bin",
- },
- {
- .id = "INT343B",
- .drv_name = "kbl_n88l25_s4567",
- .fw_filename = "intel/dsp_fw_kbl.bin",
- .machine_quirk = snd_soc_acpi_codec_list,
- .quirk_data = &kbl_codecs,
- .pdata = &skl_dmic_data
- },
- {
- .id = "MX98357A",
- .drv_name = "kbl_n88l25_m98357a",
- .fw_filename = "intel/dsp_fw_kbl.bin",
- .machine_quirk = snd_soc_acpi_codec_list,
- .quirk_data = &kbl_codecs,
- .pdata = &skl_dmic_data
- },
- {
- .id = "MX98927",
- .drv_name = "kbl_r5514_5663_max",
- .fw_filename = "intel/dsp_fw_kbl.bin",
- .machine_quirk = snd_soc_acpi_codec_list,
- .quirk_data = &kbl_5663_5514_codecs,
- .pdata = &skl_dmic_data
- },
- {
- .id = "MX98927",
- .drv_name = "kbl_rt5663_m98927",
- .fw_filename = "intel/dsp_fw_kbl.bin",
- .machine_quirk = snd_soc_acpi_codec_list,
- .quirk_data = &kbl_poppy_codecs,
- .pdata = &skl_dmic_data
- },
- {
- .id = "10EC5663",
- .drv_name = "kbl_rt5663",
- .fw_filename = "intel/dsp_fw_kbl.bin",
- },
- {
- .id = "DLGS7219",
- .drv_name = "kbl_da7219_max98357a",
- .fw_filename = "intel/dsp_fw_kbl.bin",
- .machine_quirk = snd_soc_acpi_codec_list,
- .quirk_data = &kbl_7219_98357_codecs,
- .pdata = &skl_dmic_data
- },
-
- {}
-};
-
-static struct snd_soc_acpi_mach sst_glk_devdata[] = {
- {
- .id = "INT343A",
- .drv_name = "glk_alc298s_i2s",
- .fw_filename = "intel/dsp_fw_glk.bin",
- },
- {}
-};
-
-static const struct snd_soc_acpi_mach sst_cnl_devdata[] = {
- {
- .id = "INT34C2",
- .drv_name = "cnl_rt274",
- .fw_filename = "intel/dsp_fw_cnl.bin",
- .pdata = &cnl_pdata,
- },
- {}
-};
-
/* PCI IDs */
static const struct pci_device_id skl_ids[] = {
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
- .driver_data = (unsigned long)&sst_skl_devdata},
+ .driver_data = (unsigned long)&snd_soc_acpi_intel_skl_machines},
/* BXT-P */
{ PCI_DEVICE(0x8086, 0x5a98),
- .driver_data = (unsigned long)&sst_bxtp_devdata},
+ .driver_data = (unsigned long)&snd_soc_acpi_intel_bxt_machines},
/* KBL */
{ PCI_DEVICE(0x8086, 0x9D71),
- .driver_data = (unsigned long)&sst_kbl_devdata},
+ .driver_data = (unsigned long)&snd_soc_acpi_intel_kbl_machines},
/* GLK */
{ PCI_DEVICE(0x8086, 0x3198),
- .driver_data = (unsigned long)&sst_glk_devdata},
+ .driver_data = (unsigned long)&snd_soc_acpi_intel_glk_machines},
/* CNL */
{ PCI_DEVICE(0x8086, 0x9dc8),
- .driver_data = (unsigned long)&sst_cnl_devdata},
+ .driver_data = (unsigned long)&snd_soc_acpi_intel_cnl_machines},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, skl_ids);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 0d5375cbcf6e..78aa8bdcb619 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -71,7 +71,7 @@ struct skl_fw_config {
};
struct skl {
- struct hdac_ext_bus ebus;
+ struct hdac_bus hbus;
struct pci_dev *pci;
unsigned int init_done:1; /* delayed init status */
@@ -105,9 +105,8 @@ struct skl {
struct snd_soc_acpi_mach *mach;
};
-#define skl_to_ebus(s) (&(s)->ebus)
-#define ebus_to_skl(sbus) \
- container_of(sbus, struct skl, sbus)
+#define skl_to_bus(s) (&(s)->hbus)
+#define bus_to_skl(bus) container_of(bus, struct skl, hbus)
/* to pass dai dma data */
struct skl_dma_params {
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
index 51ec4ff6ed95..697aa50aff9a 100644
--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
@@ -15,20 +15,12 @@
int mtk_afe_combine_sub_dai(struct mtk_base_afe *afe)
{
- struct snd_soc_dai_driver *sub_dai_drivers;
+ struct mtk_base_afe_dai *dai;
size_t num_dai_drivers = 0, dai_idx = 0;
- int i;
-
- if (!afe->sub_dais) {
- dev_err(afe->dev, "%s(), sub_dais == NULL\n", __func__);
- return -EINVAL;
- }
/* calcualte total dai driver size */
- for (i = 0; i < afe->num_sub_dais; i++) {
- if (afe->sub_dais[i].dai_drivers &&
- afe->sub_dais[i].num_dai_drivers != 0)
- num_dai_drivers += afe->sub_dais[i].num_dai_drivers;
+ list_for_each_entry(dai, &afe->sub_dais, list) {
+ num_dai_drivers += dai->num_dai_drivers;
}
dev_info(afe->dev, "%s(), num of dai %zd\n", __func__, num_dai_drivers);
@@ -42,19 +34,14 @@ int mtk_afe_combine_sub_dai(struct mtk_base_afe *afe)
if (!afe->dai_drivers)
return -ENOMEM;
- for (i = 0; i < afe->num_sub_dais; i++) {
- if (afe->sub_dais[i].dai_drivers &&
- afe->sub_dais[i].num_dai_drivers != 0) {
- sub_dai_drivers = afe->sub_dais[i].dai_drivers;
- /* dai driver */
- memcpy(&afe->dai_drivers[dai_idx],
- sub_dai_drivers,
- afe->sub_dais[i].num_dai_drivers *
- sizeof(struct snd_soc_dai_driver));
- dai_idx += afe->sub_dais[i].num_dai_drivers;
- }
+ list_for_each_entry(dai, &afe->sub_dais, list) {
+ /* dai driver */
+ memcpy(&afe->dai_drivers[dai_idx],
+ dai->dai_drivers,
+ dai->num_dai_drivers *
+ sizeof(struct snd_soc_dai_driver));
+ dai_idx += dai->num_dai_drivers;
}
-
return 0;
}
EXPORT_SYMBOL_GPL(mtk_afe_combine_sub_dai);
@@ -62,28 +49,25 @@ EXPORT_SYMBOL_GPL(mtk_afe_combine_sub_dai);
int mtk_afe_add_sub_dai_control(struct snd_soc_component *component)
{
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
- int i;
+ struct mtk_base_afe_dai *dai;
- if (!afe->sub_dais) {
- dev_err(afe->dev, "%s(), sub_dais == NULL\n", __func__);
- return -EINVAL;
- }
-
- for (i = 0; i < afe->num_sub_dais; i++) {
- if (afe->sub_dais[i].controls)
+ list_for_each_entry(dai, &afe->sub_dais, list) {
+ if (dai->controls)
snd_soc_add_component_controls(component,
- afe->sub_dais[i].controls,
- afe->sub_dais[i].num_controls);
+ dai->controls,
+ dai->num_controls);
- if (afe->sub_dais[i].dapm_widgets)
+ if (dai->dapm_widgets)
snd_soc_dapm_new_controls(&component->dapm,
- afe->sub_dais[i].dapm_widgets,
- afe->sub_dais[i].num_dapm_widgets);
-
- if (afe->sub_dais[i].dapm_routes)
+ dai->dapm_widgets,
+ dai->num_dapm_widgets);
+ }
+ /* add routes after all widgets are added */
+ list_for_each_entry(dai, &afe->sub_dais, list) {
+ if (dai->dapm_routes)
snd_soc_dapm_add_routes(&component->dapm,
- afe->sub_dais[i].dapm_routes,
- afe->sub_dais[i].num_dapm_routes);
+ dai->dapm_routes,
+ dai->num_dapm_routes);
}
snd_soc_dapm_new_widgets(component->dapm.card);
diff --git a/sound/soc/mediatek/common/mtk-base-afe.h b/sound/soc/mediatek/common/mtk-base-afe.h
index bcf562f029b6..bd8d5e0c6843 100644
--- a/sound/soc/mediatek/common/mtk-base-afe.h
+++ b/sound/soc/mediatek/common/mtk-base-afe.h
@@ -46,6 +46,7 @@ struct mtk_base_irq_data {
};
struct device;
+struct list_head;
struct mtk_base_afe_memif;
struct mtk_base_afe_irq;
struct mtk_base_afe_dai;
@@ -72,8 +73,7 @@ struct mtk_base_afe {
struct mtk_base_afe_irq *irqs;
int irqs_size;
- struct mtk_base_afe_dai *sub_dais;
- int num_sub_dais;
+ struct list_head sub_dais;
struct snd_soc_dai_driver *dai_drivers;
unsigned int num_dai_drivers;
@@ -110,6 +110,8 @@ struct mtk_base_afe_dai {
unsigned int num_dapm_widgets;
const struct snd_soc_dapm_route *dapm_routes;
unsigned int num_dapm_routes;
+
+ struct list_head list;
};
#endif
diff --git a/sound/soc/mediatek/mt6797/mt6797-afe-common.h b/sound/soc/mediatek/mt6797/mt6797-afe-common.h
index 22eb7b455cf1..4eac9977b2b0 100644
--- a/sound/soc/mediatek/mt6797/mt6797-afe-common.h
+++ b/sound/soc/mediatek/mt6797/mt6797-afe-common.h
@@ -10,6 +10,7 @@
#define _MT_6797_AFE_COMMON_H_
#include <sound/soc.h>
+#include <linux/list.h>
#include <linux/regmap.h>
#include "../common/mtk-base-afe.h"
diff --git a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
index 6c5dd9fc9976..192f4d7b37b6 100644
--- a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
+++ b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
@@ -733,6 +733,34 @@ static const struct snd_soc_component_driver mt6797_afe_component = {
.probe = mt6797_afe_component_probe,
};
+static int mt6797_dai_memif_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mt6797_memif_dai_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mt6797_memif_dai_driver);
+
+ dai->dapm_widgets = mt6797_memif_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mt6797_memif_widgets);
+ dai->dapm_routes = mt6797_memif_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mt6797_memif_routes);
+ return 0;
+}
+
+typedef int (*dai_register_cb)(struct mtk_base_afe *);
+static const dai_register_cb dai_register_cbs[] = {
+ mt6797_dai_adda_register,
+ mt6797_dai_pcm_register,
+ mt6797_dai_hostless_register,
+ mt6797_dai_memif_register,
+};
+
static int mt6797_afe_pcm_dev_probe(struct platform_device *pdev)
{
struct mtk_base_afe *afe;
@@ -811,29 +839,24 @@ static int mt6797_afe_pcm_dev_probe(struct platform_device *pdev)
}
/* init sub_dais */
- afe->num_sub_dais = MT6797_DAI_NUM;
- afe->sub_dais = devm_kcalloc(dev, afe->num_sub_dais,
- sizeof(*afe->sub_dais),
- GFP_KERNEL);
- if (!afe->sub_dais)
- return -ENOMEM;
-
- mt6797_dai_adda_register(afe);
- mt6797_dai_pcm_register(afe);
- mt6797_dai_hostless_register(afe);
-
- afe->sub_dais[MT6797_MEMIF_DL1].dai_drivers = mt6797_memif_dai_driver;
- afe->sub_dais[MT6797_MEMIF_DL1].num_dai_drivers =
- ARRAY_SIZE(mt6797_memif_dai_driver);
- afe->sub_dais[MT6797_MEMIF_DL1].dapm_widgets = mt6797_memif_widgets;
- afe->sub_dais[MT6797_MEMIF_DL1].num_dapm_widgets =
- ARRAY_SIZE(mt6797_memif_widgets);
- afe->sub_dais[MT6797_MEMIF_DL1].dapm_routes = mt6797_memif_routes;
- afe->sub_dais[MT6797_MEMIF_DL1].num_dapm_routes =
- ARRAY_SIZE(mt6797_memif_routes);
+ INIT_LIST_HEAD(&afe->sub_dais);
+
+ for (i = 0; i < ARRAY_SIZE(dai_register_cbs); i++) {
+ ret = dai_register_cbs[i](afe);
+ if (ret) {
+ dev_warn(afe->dev, "dai register i %d fail, ret %d\n",
+ i, ret);
+ return ret;
+ }
+ }
/* init dai_driver and component_driver */
- mtk_afe_combine_sub_dai(afe);
+ ret = mtk_afe_combine_sub_dai(afe);
+ if (ret) {
+ dev_warn(afe->dev, "mtk_afe_combine_sub_dai fail, ret %d\n",
+ ret);
+ return ret;
+ }
afe->mtk_afe_hardware = &mt6797_afe_hardware;
afe->memif_fs = mt6797_memif_fs;
diff --git a/sound/soc/mediatek/mt6797/mt6797-dai-adda.c b/sound/soc/mediatek/mt6797/mt6797-dai-adda.c
index ad083265ce94..0ac6409c6d61 100644
--- a/sound/soc/mediatek/mt6797/mt6797-dai-adda.c
+++ b/sound/soc/mediatek/mt6797/mt6797-dai-adda.c
@@ -383,14 +383,20 @@ static struct snd_soc_dai_driver mtk_dai_adda_driver[] = {
int mt6797_dai_adda_register(struct mtk_base_afe *afe)
{
- int id = MT6797_DAI_ADDA;
+ struct mtk_base_afe_dai *dai;
- afe->sub_dais[id].dai_drivers = mtk_dai_adda_driver;
- afe->sub_dais[id].num_dai_drivers = ARRAY_SIZE(mtk_dai_adda_driver);
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
- afe->sub_dais[id].dapm_widgets = mtk_dai_adda_widgets;
- afe->sub_dais[id].num_dapm_widgets = ARRAY_SIZE(mtk_dai_adda_widgets);
- afe->sub_dais[id].dapm_routes = mtk_dai_adda_routes;
- afe->sub_dais[id].num_dapm_routes = ARRAY_SIZE(mtk_dai_adda_routes);
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_adda_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_adda_driver);
+
+ dai->dapm_widgets = mtk_dai_adda_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_adda_widgets);
+ dai->dapm_routes = mtk_dai_adda_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_adda_routes);
return 0;
}
diff --git a/sound/soc/mediatek/mt6797/mt6797-dai-hostless.c b/sound/soc/mediatek/mt6797/mt6797-dai-hostless.c
index 4cf985b15a11..ed23e6a53b08 100644
--- a/sound/soc/mediatek/mt6797/mt6797-dai-hostless.c
+++ b/sound/soc/mediatek/mt6797/mt6797-dai-hostless.c
@@ -100,13 +100,19 @@ static struct snd_soc_dai_driver mtk_dai_hostless_driver[] = {
int mt6797_dai_hostless_register(struct mtk_base_afe *afe)
{
- int id = MT6797_DAI_HOSTLESS_LPBK;
+ struct mtk_base_afe_dai *dai;
- afe->sub_dais[id].dai_drivers = mtk_dai_hostless_driver;
- afe->sub_dais[id].num_dai_drivers = ARRAY_SIZE(mtk_dai_hostless_driver);
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
- afe->sub_dais[id].dapm_routes = mtk_dai_hostless_routes;
- afe->sub_dais[id].num_dapm_routes = ARRAY_SIZE(mtk_dai_hostless_routes);
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_hostless_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_hostless_driver);
+
+ dai->dapm_routes = mtk_dai_hostless_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_hostless_routes);
return 0;
}
diff --git a/sound/soc/mediatek/mt6797/mt6797-dai-pcm.c b/sound/soc/mediatek/mt6797/mt6797-dai-pcm.c
index 16d5b5067204..3136f0bc7827 100644
--- a/sound/soc/mediatek/mt6797/mt6797-dai-pcm.c
+++ b/sound/soc/mediatek/mt6797/mt6797-dai-pcm.c
@@ -298,15 +298,20 @@ static struct snd_soc_dai_driver mtk_dai_pcm_driver[] = {
int mt6797_dai_pcm_register(struct mtk_base_afe *afe)
{
- int id = MT6797_DAI_PCM_1;
+ struct mtk_base_afe_dai *dai;
- afe->sub_dais[id].dai_drivers = mtk_dai_pcm_driver;
- afe->sub_dais[id].num_dai_drivers = ARRAY_SIZE(mtk_dai_pcm_driver);
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
- afe->sub_dais[id].dapm_widgets = mtk_dai_pcm_widgets;
- afe->sub_dais[id].num_dapm_widgets = ARRAY_SIZE(mtk_dai_pcm_widgets);
- afe->sub_dais[id].dapm_routes = mtk_dai_pcm_routes;
- afe->sub_dais[id].num_dapm_routes = ARRAY_SIZE(mtk_dai_pcm_routes);
+ list_add(&dai->list, &afe->sub_dais);
+ dai->dai_drivers = mtk_dai_pcm_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_pcm_driver);
+
+ dai->dapm_widgets = mtk_dai_pcm_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_pcm_widgets);
+ dai->dapm_routes = mtk_dai_pcm_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_pcm_routes);
return 0;
}
diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
new file mode 100644
index 000000000000..8af8bc358a90
--- /dev/null
+++ b/sound/soc/meson/Kconfig
@@ -0,0 +1,65 @@
+menu "ASoC support for Amlogic platforms"
+ depends on ARCH_MESON || COMPILE_TEST
+
+config SND_MESON_AXG_FIFO
+ tristate
+ select REGMAP_MMIO
+
+config SND_MESON_AXG_FRDDR
+ tristate "Amlogic AXG Playback FIFO support"
+ select SND_MESON_AXG_FIFO
+ help
+ Select Y or M to add support for the frontend playback interfaces
+ embedded in the Amlogic AXG SoC family
+
+config SND_MESON_AXG_TODDR
+ tristate "Amlogic AXG Capture FIFO support"
+ select SND_MESON_AXG_FIFO
+ help
+ Select Y or M to add support for the frontend capture interfaces
+ embedded in the Amlogic AXG SoC family
+
+config SND_MESON_AXG_TDM_FORMATTER
+ tristate
+ select REGMAP_MMIO
+
+config SND_MESON_AXG_TDM_INTERFACE
+ tristate
+ select SND_MESON_AXG_TDM_FORMATTER
+
+config SND_MESON_AXG_TDMIN
+ tristate "Amlogic AXG TDM Input Support"
+ select SND_MESON_AXG_TDM_FORMATTER
+ select SND_MESON_AXG_TDM_INTERFACE
+ help
+ Select Y or M to add support for TDM input formatter embedded
+ in the Amlogic AXG SoC family
+
+config SND_MESON_AXG_TDMOUT
+ tristate "Amlogic AXG TDM Output Support"
+ select SND_MESON_AXG_TDM_FORMATTER
+ select SND_MESON_AXG_TDM_INTERFACE
+ help
+ Select Y or M to add support for TDM output formatter embedded
+ in the Amlogic AXG SoC family
+
+config SND_MESON_AXG_SOUND_CARD
+ tristate "Amlogic AXG Sound Card Support"
+ select SND_MESON_AXG_TDM_INTERFACE
+ imply SND_MESON_AXG_FRDDR
+ imply SND_MESON_AXG_TODDR
+ imply SND_MESON_AXG_TDMIN
+ imply SND_MESON_AXG_TDMOUT
+ imply SND_MESON_AXG_SPDIFOUT
+ help
+ Select Y or M to add support for the AXG SoC sound card
+
+config SND_MESON_AXG_SPDIFOUT
+ tristate "Amlogic AXG SPDIF Output Support"
+ select SND_PCM_IEC958
+ imply SND_SOC_SPDIF
+ help
+ Select Y or M to add support for SPDIF output serializer embedded
+ in the Amlogic AXG SoC family
+
+endmenu
diff --git a/sound/soc/meson/Makefile b/sound/soc/meson/Makefile
new file mode 100644
index 000000000000..c5e003b093db
--- /dev/null
+++ b/sound/soc/meson/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+snd-soc-meson-axg-fifo-objs := axg-fifo.o
+snd-soc-meson-axg-frddr-objs := axg-frddr.o
+snd-soc-meson-axg-toddr-objs := axg-toddr.o
+snd-soc-meson-axg-tdm-formatter-objs := axg-tdm-formatter.o
+snd-soc-meson-axg-tdm-interface-objs := axg-tdm-interface.o
+snd-soc-meson-axg-tdmin-objs := axg-tdmin.o
+snd-soc-meson-axg-tdmout-objs := axg-tdmout.o
+snd-soc-meson-axg-sound-card-objs := axg-card.o
+snd-soc-meson-axg-spdifout-objs := axg-spdifout.o
+
+obj-$(CONFIG_SND_MESON_AXG_FIFO) += snd-soc-meson-axg-fifo.o
+obj-$(CONFIG_SND_MESON_AXG_FRDDR) += snd-soc-meson-axg-frddr.o
+obj-$(CONFIG_SND_MESON_AXG_TODDR) += snd-soc-meson-axg-toddr.o
+obj-$(CONFIG_SND_MESON_AXG_TDM_FORMATTER) += snd-soc-meson-axg-tdm-formatter.o
+obj-$(CONFIG_SND_MESON_AXG_TDM_INTERFACE) += snd-soc-meson-axg-tdm-interface.o
+obj-$(CONFIG_SND_MESON_AXG_TDMIN) += snd-soc-meson-axg-tdmin.o
+obj-$(CONFIG_SND_MESON_AXG_TDMOUT) += snd-soc-meson-axg-tdmout.o
+obj-$(CONFIG_SND_MESON_AXG_SOUND_CARD) += snd-soc-meson-axg-sound-card.o
+obj-$(CONFIG_SND_MESON_AXG_SPDIFOUT) += snd-soc-meson-axg-spdifout.o
diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
new file mode 100644
index 000000000000..2914ba0d965b
--- /dev/null
+++ b/sound/soc/meson/axg-card.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "axg-tdm.h"
+
+struct axg_card {
+ struct snd_soc_card card;
+ void **link_data;
+};
+
+struct axg_dai_link_tdm_mask {
+ u32 tx;
+ u32 rx;
+};
+
+struct axg_dai_link_tdm_data {
+ unsigned int mclk_fs;
+ unsigned int slots;
+ unsigned int slot_width;
+ u32 *tx_mask;
+ u32 *rx_mask;
+ struct axg_dai_link_tdm_mask *codec_masks;
+};
+
+#define PREFIX "amlogic,"
+
+static int axg_card_reallocate_links(struct axg_card *priv,
+ unsigned int num_links)
+{
+ struct snd_soc_dai_link *links;
+ void **ldata;
+
+ links = krealloc(priv->card.dai_link,
+ num_links * sizeof(*priv->card.dai_link),
+ GFP_KERNEL | __GFP_ZERO);
+ ldata = krealloc(priv->link_data,
+ num_links * sizeof(*priv->link_data),
+ GFP_KERNEL | __GFP_ZERO);
+
+ if (!links || !ldata) {
+ dev_err(priv->card.dev, "failed to allocate links\n");
+ return -ENOMEM;
+ }
+
+ priv->card.dai_link = links;
+ priv->link_data = ldata;
+ priv->card.num_links = num_links;
+ return 0;
+}
+
+static int axg_card_parse_dai(struct snd_soc_card *card,
+ struct device_node *node,
+ struct device_node **dai_of_node,
+ const char **dai_name)
+{
+ struct of_phandle_args args;
+ int ret;
+
+ if (!dai_name || !dai_of_node || !node)
+ return -EINVAL;
+
+ ret = of_parse_phandle_with_args(node, "sound-dai",
+ "#sound-dai-cells", 0, &args);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(card->dev, "can't parse dai %d\n", ret);
+ return ret;
+ }
+ *dai_of_node = args.np;
+
+ return snd_soc_get_dai_name(&args, dai_name);
+}
+
+static int axg_card_set_link_name(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ const char *prefix)
+{
+ char *name = devm_kasprintf(card->dev, GFP_KERNEL, "%s.%s",
+ prefix, link->cpu_of_node->full_name);
+ if (!name)
+ return -ENOMEM;
+
+ link->name = name;
+ link->stream_name = name;
+
+ return 0;
+}
+
+static void axg_card_clean_references(struct axg_card *priv)
+{
+ struct snd_soc_card *card = &priv->card;
+ struct snd_soc_dai_link *link;
+ int i, j;
+
+ if (card->dai_link) {
+ for (i = 0; i < card->num_links; i++) {
+ link = &card->dai_link[i];
+ of_node_put(link->cpu_of_node);
+ for (j = 0; j < link->num_codecs; j++)
+ of_node_put(link->codecs[j].of_node);
+ }
+ }
+
+ if (card->aux_dev) {
+ for (i = 0; i < card->num_aux_devs; i++)
+ of_node_put(card->aux_dev[i].codec_of_node);
+ }
+
+ kfree(card->dai_link);
+ kfree(priv->link_data);
+}
+
+static int axg_card_add_aux_devices(struct snd_soc_card *card)
+{
+ struct device_node *node = card->dev->of_node;
+ struct snd_soc_aux_dev *aux;
+ int num, i;
+
+ num = of_count_phandle_with_args(node, "audio-aux-devs", NULL);
+ if (num == -ENOENT) {
+ /*
+ * It is ok to have no auxiliary devices but for this card it
+ * is a strange situtation. Let's warn the about it.
+ */
+ dev_warn(card->dev, "card has no auxiliary devices\n");
+ return 0;
+ } else if (num < 0) {
+ dev_err(card->dev, "error getting auxiliary devices: %d\n",
+ num);
+ return num;
+ }
+
+ aux = devm_kcalloc(card->dev, num, sizeof(*aux), GFP_KERNEL);
+ if (!aux)
+ return -ENOMEM;
+ card->aux_dev = aux;
+ card->num_aux_devs = num;
+
+ for (i = 0; i < card->num_aux_devs; i++, aux++) {
+ aux->codec_of_node =
+ of_parse_phandle(node, "audio-aux-devs", i);
+ if (!aux->codec_of_node)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int axg_card_tdm_be_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct axg_card *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct axg_dai_link_tdm_data *be =
+ (struct axg_dai_link_tdm_data *)priv->link_data[rtd->num];
+ struct snd_soc_dai *codec_dai;
+ unsigned int mclk;
+ int ret, i;
+
+ if (be->mclk_fs) {
+ mclk = params_rate(params) * be->mclk_fs;
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ codec_dai = rtd->codec_dais[i];
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (ret && ret != -ENOTSUPP)
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, 0, mclk,
+ SND_SOC_CLOCK_OUT);
+ if (ret && ret != -ENOTSUPP)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops axg_card_tdm_be_ops = {
+ .hw_params = axg_card_tdm_be_hw_params,
+};
+
+static int axg_card_tdm_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct axg_card *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct axg_dai_link_tdm_data *be =
+ (struct axg_dai_link_tdm_data *)priv->link_data[rtd->num];
+ struct snd_soc_dai *codec_dai;
+ int ret, i;
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ codec_dai = rtd->codec_dais[i];
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ be->codec_masks[i].tx,
+ be->codec_masks[i].rx,
+ be->slots, be->slot_width);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(codec_dai->dev,
+ "setting tdm link slots failed\n");
+ return ret;
+ }
+ }
+
+ ret = axg_tdm_set_tdm_slots(rtd->cpu_dai, be->tx_mask, be->rx_mask,
+ be->slots, be->slot_width);
+ if (ret) {
+ dev_err(rtd->cpu_dai->dev, "setting tdm link slots failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axg_card_tdm_dai_lb_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct axg_card *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct axg_dai_link_tdm_data *be =
+ (struct axg_dai_link_tdm_data *)priv->link_data[rtd->num];
+ int ret;
+
+ /* The loopback rx_mask is the pad tx_mask */
+ ret = axg_tdm_set_tdm_slots(rtd->cpu_dai, NULL, be->tx_mask,
+ be->slots, be->slot_width);
+ if (ret) {
+ dev_err(rtd->cpu_dai->dev, "setting tdm link slots failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
+ int *index)
+{
+ struct axg_card *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai_link *pad = &card->dai_link[*index];
+ struct snd_soc_dai_link *lb;
+ int ret;
+
+ /* extend links */
+ ret = axg_card_reallocate_links(priv, card->num_links + 1);
+ if (ret)
+ return ret;
+
+ lb = &card->dai_link[*index + 1];
+
+ lb->name = kasprintf(GFP_KERNEL, "%s-lb", pad->name);
+ if (!lb->name)
+ return -ENOMEM;
+
+ lb->stream_name = lb->name;
+ lb->cpu_of_node = pad->cpu_of_node;
+ lb->cpu_dai_name = "TDM Loopback";
+ lb->codec_name = "snd-soc-dummy";
+ lb->codec_dai_name = "snd-soc-dummy-dai";
+ lb->dpcm_capture = 1;
+ lb->no_pcm = 1;
+ lb->ops = &axg_card_tdm_be_ops;
+ lb->init = axg_card_tdm_dai_lb_init;
+
+ /* Provide the same link data to the loopback */
+ priv->link_data[*index + 1] = priv->link_data[*index];
+
+ /*
+ * axg_card_clean_references() will iterate over this link,
+ * make sure the node count is balanced
+ */
+ of_node_get(lb->cpu_of_node);
+
+ /* Let add_links continue where it should */
+ *index += 1;
+
+ return 0;
+}
+
+static unsigned int axg_card_parse_daifmt(struct device_node *node,
+ struct device_node *cpu_node)
+{
+ struct device_node *bitclkmaster = NULL;
+ struct device_node *framemaster = NULL;
+ unsigned int daifmt;
+
+ daifmt = snd_soc_of_parse_daifmt(node, PREFIX,
+ &bitclkmaster, &framemaster);
+ daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+
+ /* If no master is provided, default to cpu master */
+ if (!bitclkmaster || bitclkmaster == cpu_node) {
+ daifmt |= (!framemaster || framemaster == cpu_node) ?
+ SND_SOC_DAIFMT_CBS_CFS : SND_SOC_DAIFMT_CBS_CFM;
+ } else {
+ daifmt |= (!framemaster || framemaster == cpu_node) ?
+ SND_SOC_DAIFMT_CBM_CFS : SND_SOC_DAIFMT_CBM_CFM;
+ }
+
+ of_node_put(bitclkmaster);
+ of_node_put(framemaster);
+
+ return daifmt;
+}
+
+static int axg_card_parse_cpu_tdm_slots(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node,
+ struct axg_dai_link_tdm_data *be)
+{
+ char propname[32];
+ u32 tx, rx;
+ int i;
+
+ be->tx_mask = devm_kcalloc(card->dev, AXG_TDM_NUM_LANES,
+ sizeof(*be->tx_mask), GFP_KERNEL);
+ be->rx_mask = devm_kcalloc(card->dev, AXG_TDM_NUM_LANES,
+ sizeof(*be->rx_mask), GFP_KERNEL);
+ if (!be->tx_mask || !be->rx_mask)
+ return -ENOMEM;
+
+ for (i = 0, tx = 0; i < AXG_TDM_NUM_LANES; i++) {
+ snprintf(propname, 32, "dai-tdm-slot-tx-mask-%d", i);
+ snd_soc_of_get_slot_mask(node, propname, &be->tx_mask[i]);
+ tx = max(tx, be->tx_mask[i]);
+ }
+
+ /* Disable playback is the interface has no tx slots */
+ if (!tx)
+ link->dpcm_playback = 0;
+
+ for (i = 0, rx = 0; i < AXG_TDM_NUM_LANES; i++) {
+ snprintf(propname, 32, "dai-tdm-slot-rx-mask-%d", i);
+ snd_soc_of_get_slot_mask(node, propname, &be->rx_mask[i]);
+ rx = max(rx, be->rx_mask[i]);
+ }
+
+ /* Disable capture is the interface has no rx slots */
+ if (!rx)
+ link->dpcm_capture = 0;
+
+ /* ... but the interface should at least have one of them */
+ if (!tx && !rx) {
+ dev_err(card->dev, "tdm link has no cpu slots\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(node, "dai-tdm-slot-num", &be->slots);
+ if (!be->slots) {
+ /*
+ * If the slot number is not provided, set it such as it
+ * accommodates the largest mask
+ */
+ be->slots = fls(max(tx, rx));
+ } else if (be->slots < fls(max(tx, rx)) || be->slots > 32) {
+ /*
+ * Error if the slots can't accommodate the largest mask or
+ * if it is just too big
+ */
+ dev_err(card->dev, "bad slot number\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(node, "dai-tdm-slot-width", &be->slot_width);
+
+ return 0;
+}
+
+static int axg_card_parse_codecs_masks(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node,
+ struct axg_dai_link_tdm_data *be)
+{
+ struct axg_dai_link_tdm_mask *codec_mask;
+ struct device_node *np;
+
+ codec_mask = devm_kcalloc(card->dev, link->num_codecs,
+ sizeof(*codec_mask), GFP_KERNEL);
+ if (!codec_mask)
+ return -ENOMEM;
+
+ be->codec_masks = codec_mask;
+
+ for_each_child_of_node(node, np) {
+ snd_soc_of_get_slot_mask(np, "dai-tdm-slot-rx-mask",
+ &codec_mask->rx);
+ snd_soc_of_get_slot_mask(np, "dai-tdm-slot-tx-mask",
+ &codec_mask->tx);
+
+ codec_mask++;
+ }
+
+ return 0;
+}
+
+static int axg_card_parse_tdm(struct snd_soc_card *card,
+ struct device_node *node,
+ int *index)
+{
+ struct axg_card *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai_link *link = &card->dai_link[*index];
+ struct axg_dai_link_tdm_data *be;
+ int ret;
+
+ /* Allocate tdm link parameters */
+ be = devm_kzalloc(card->dev, sizeof(*be), GFP_KERNEL);
+ if (!be)
+ return -ENOMEM;
+ priv->link_data[*index] = be;
+
+ /* Setup tdm link */
+ link->ops = &axg_card_tdm_be_ops;
+ link->init = axg_card_tdm_dai_init;
+ link->dai_fmt = axg_card_parse_daifmt(node, link->cpu_of_node);
+
+ of_property_read_u32(node, "mclk-fs", &be->mclk_fs);
+
+ ret = axg_card_parse_cpu_tdm_slots(card, link, node, be);
+ if (ret) {
+ dev_err(card->dev, "error parsing tdm link slots\n");
+ return ret;
+ }
+
+ ret = axg_card_parse_codecs_masks(card, link, node, be);
+ if (ret)
+ return ret;
+
+ /* Add loopback if the pad dai has playback */
+ if (link->dpcm_playback) {
+ ret = axg_card_add_tdm_loopback(card, index);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axg_card_set_be_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node)
+{
+ struct snd_soc_dai_link_component *codec;
+ struct device_node *np;
+ int ret, num_codecs;
+
+ link->no_pcm = 1;
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 1;
+
+ num_codecs = of_get_child_count(node);
+ if (!num_codecs) {
+ dev_err(card->dev, "be link %s has no codec\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ codec = devm_kcalloc(card->dev, num_codecs, sizeof(*codec), GFP_KERNEL);
+ if (!codec)
+ return -ENOMEM;
+
+ link->codecs = codec;
+ link->num_codecs = num_codecs;
+
+ for_each_child_of_node(node, np) {
+ ret = axg_card_parse_dai(card, np, &codec->of_node,
+ &codec->dai_name);
+ if (ret) {
+ of_node_put(np);
+ return ret;
+ }
+
+ codec++;
+ }
+
+ ret = axg_card_set_link_name(card, link, "be");
+ if (ret)
+ dev_err(card->dev, "error setting %s link name\n", np->name);
+
+ return ret;
+}
+
+static int axg_card_set_fe_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ bool is_playback)
+{
+ link->dynamic = 1;
+ link->dpcm_merged_format = 1;
+ link->dpcm_merged_chan = 1;
+ link->dpcm_merged_rate = 1;
+ link->codec_dai_name = "snd-soc-dummy-dai";
+ link->codec_name = "snd-soc-dummy";
+
+ if (is_playback)
+ link->dpcm_playback = 1;
+ else
+ link->dpcm_capture = 1;
+
+ return axg_card_set_link_name(card, link, "fe");
+}
+
+static int axg_card_cpu_is_capture_fe(struct device_node *np)
+{
+ return of_device_is_compatible(np, PREFIX "axg-toddr");
+}
+
+static int axg_card_cpu_is_playback_fe(struct device_node *np)
+{
+ return of_device_is_compatible(np, PREFIX "axg-frddr");
+}
+
+static int axg_card_cpu_is_tdm_iface(struct device_node *np)
+{
+ return of_device_is_compatible(np, PREFIX "axg-tdm-iface");
+}
+
+static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
+ int *index)
+{
+ struct snd_soc_dai_link *dai_link = &card->dai_link[*index];
+ int ret;
+
+ ret = axg_card_parse_dai(card, np, &dai_link->cpu_of_node,
+ &dai_link->cpu_dai_name);
+ if (ret)
+ return ret;
+
+ if (axg_card_cpu_is_playback_fe(dai_link->cpu_of_node))
+ ret = axg_card_set_fe_link(card, dai_link, true);
+ else if (axg_card_cpu_is_capture_fe(dai_link->cpu_of_node))
+ ret = axg_card_set_fe_link(card, dai_link, false);
+ else
+ ret = axg_card_set_be_link(card, dai_link, np);
+
+ if (ret)
+ return ret;
+
+ if (axg_card_cpu_is_tdm_iface(dai_link->cpu_of_node))
+ ret = axg_card_parse_tdm(card, np, index);
+
+ return ret;
+}
+
+static int axg_card_add_links(struct snd_soc_card *card)
+{
+ struct axg_card *priv = snd_soc_card_get_drvdata(card);
+ struct device_node *node = card->dev->of_node;
+ struct device_node *np;
+ int num, i, ret;
+
+ num = of_get_child_count(node);
+ if (!num) {
+ dev_err(card->dev, "card has no links\n");
+ return -EINVAL;
+ }
+
+ ret = axg_card_reallocate_links(priv, num);
+ if (ret)
+ return ret;
+
+ i = 0;
+ for_each_child_of_node(node, np) {
+ ret = axg_card_add_link(card, np, &i);
+ if (ret) {
+ of_node_put(np);
+ return ret;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int axg_card_parse_of_optional(struct snd_soc_card *card,
+ const char *propname,
+ int (*func)(struct snd_soc_card *c,
+ const char *p))
+{
+ /* If property is not provided, don't fail ... */
+ if (!of_property_read_bool(card->dev->of_node, propname))
+ return 0;
+
+ /* ... but do fail if it is provided and the parsing fails */
+ return func(card, propname);
+}
+
+static const struct of_device_id axg_card_of_match[] = {
+ { .compatible = "amlogic,axg-sound-card", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, axg_card_of_match);
+
+static int axg_card_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct axg_card *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+ snd_soc_card_set_drvdata(&priv->card, priv);
+
+ priv->card.owner = THIS_MODULE;
+ priv->card.dev = dev;
+
+ ret = snd_soc_of_parse_card_name(&priv->card, "model");
+ if (ret < 0)
+ return ret;
+
+ ret = axg_card_parse_of_optional(&priv->card, "audio-routing",
+ snd_soc_of_parse_audio_routing);
+ if (ret) {
+ dev_err(dev, "error while parsing routing\n");
+ return ret;
+ }
+
+ ret = axg_card_parse_of_optional(&priv->card, "audio-widgets",
+ snd_soc_of_parse_audio_simple_widgets);
+ if (ret) {
+ dev_err(dev, "error while parsing widgets\n");
+ return ret;
+ }
+
+ ret = axg_card_add_links(&priv->card);
+ if (ret)
+ goto out_err;
+
+ ret = axg_card_add_aux_devices(&priv->card);
+ if (ret)
+ goto out_err;
+
+ ret = devm_snd_soc_register_card(dev, &priv->card);
+ if (ret)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ axg_card_clean_references(priv);
+ return ret;
+}
+
+static int axg_card_remove(struct platform_device *pdev)
+{
+ struct axg_card *priv = platform_get_drvdata(pdev);
+
+ axg_card_clean_references(priv);
+
+ return 0;
+}
+
+static struct platform_driver axg_card_pdrv = {
+ .probe = axg_card_probe,
+ .remove = axg_card_remove,
+ .driver = {
+ .name = "axg-sound-card",
+ .of_match_table = axg_card_of_match,
+ },
+};
+module_platform_driver(axg_card_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG ALSA machine driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
new file mode 100644
index 000000000000..30262550e37b
--- /dev/null
+++ b/sound/soc/meson/axg-fifo.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "axg-fifo.h"
+
+/*
+ * This file implements the platform operations common to the playback and
+ * capture frontend DAI. The logic behind this two types of fifo is very
+ * similar but some difference exist.
+ * These differences the respective DAI drivers
+ */
+
+static struct snd_pcm_hardware axg_fifo_hw = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_PAUSE),
+
+ .formats = AXG_FIFO_FORMATS,
+ .rate_min = 5512,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = AXG_FIFO_CH_MAX,
+ .period_bytes_min = AXG_FIFO_MIN_DEPTH,
+ .period_bytes_max = UINT_MAX,
+ .periods_min = 2,
+ .periods_max = UINT_MAX,
+
+ /* No real justification for this */
+ .buffer_bytes_max = 1 * 1024 * 1024,
+};
+
+static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss)
+{
+ struct snd_soc_pcm_runtime *rtd = ss->private_data;
+
+ return rtd->cpu_dai;
+}
+
+static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss)
+{
+ struct snd_soc_dai *dai = axg_fifo_dai(ss);
+
+ return snd_soc_dai_get_drvdata(dai);
+}
+
+static struct device *axg_fifo_dev(struct snd_pcm_substream *ss)
+{
+ struct snd_soc_dai *dai = axg_fifo_dai(ss);
+
+ return dai->dev;
+}
+
+static void __dma_enable(struct axg_fifo *fifo, bool enable)
+{
+ regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN,
+ enable ? CTRL0_DMA_EN : 0);
+}
+
+static int axg_fifo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
+{
+ struct axg_fifo *fifo = axg_fifo_data(ss);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ __dma_enable(fifo, true);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_STOP:
+ __dma_enable(fifo, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_pcm_substream *ss)
+{
+ struct axg_fifo *fifo = axg_fifo_data(ss);
+ struct snd_pcm_runtime *runtime = ss->runtime;
+ unsigned int addr;
+
+ regmap_read(fifo->map, FIFO_STATUS2, &addr);
+
+ return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
+}
+
+static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = ss->runtime;
+ struct axg_fifo *fifo = axg_fifo_data(ss);
+ dma_addr_t end_ptr;
+ unsigned int burst_num;
+ int ret;
+
+ ret = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(params));
+ if (ret < 0)
+ return ret;
+
+ /* Setup dma memory pointers */
+ end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST;
+ regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr);
+ regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr);
+
+ /* Setup interrupt periodicity */
+ burst_num = params_period_bytes(params) / AXG_FIFO_BURST;
+ regmap_write(fifo->map, FIFO_INT_ADDR, burst_num);
+
+ /* Enable block count irq */
+ regmap_update_bits(fifo->map, FIFO_CTRL0,
+ CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
+ CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT));
+
+ return 0;
+}
+
+static int axg_fifo_pcm_hw_free(struct snd_pcm_substream *ss)
+{
+ struct axg_fifo *fifo = axg_fifo_data(ss);
+
+ /* Disable the block count irq */
+ regmap_update_bits(fifo->map, FIFO_CTRL0,
+ CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
+
+ return snd_pcm_lib_free_pages(ss);
+}
+
+static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
+{
+ regmap_update_bits(fifo->map, FIFO_CTRL1,
+ CTRL1_INT_CLR(FIFO_INT_MASK),
+ CTRL1_INT_CLR(mask));
+
+ /* Clear must also be cleared */
+ regmap_update_bits(fifo->map, FIFO_CTRL1,
+ CTRL1_INT_CLR(FIFO_INT_MASK),
+ 0);
+}
+
+static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
+{
+ struct snd_pcm_substream *ss = dev_id;
+ struct axg_fifo *fifo = axg_fifo_data(ss);
+ unsigned int status;
+
+ regmap_read(fifo->map, FIFO_STATUS1, &status);
+
+ status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
+ if (status & FIFO_INT_COUNT_REPEAT)
+ snd_pcm_period_elapsed(ss);
+ else
+ dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
+ status);
+
+ /* Ack irqs */
+ axg_fifo_ack_irq(fifo, status);
+
+ return IRQ_RETVAL(status);
+}
+
+static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
+{
+ struct axg_fifo *fifo = axg_fifo_data(ss);
+ struct device *dev = axg_fifo_dev(ss);
+ int ret;
+
+ snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw);
+
+ /*
+ * Make sure the buffer and period size are multiple of the FIFO
+ * minimum depth size
+ */
+ ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ AXG_FIFO_MIN_DEPTH);
+ if (ret)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ AXG_FIFO_MIN_DEPTH);
+ if (ret)
+ return ret;
+
+ ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
+ dev_name(dev), ss);
+
+ /* Enable pclk to access registers and clock the fifo ip */
+ ret = clk_prepare_enable(fifo->pclk);
+ if (ret)
+ return ret;
+
+ /* Setup status2 so it reports the memory pointer */
+ regmap_update_bits(fifo->map, FIFO_CTRL1,
+ CTRL1_STATUS2_SEL_MASK,
+ CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
+
+ /* Make sure the dma is initially disabled */
+ __dma_enable(fifo, false);
+
+ /* Disable irqs until params are ready */
+ regmap_update_bits(fifo->map, FIFO_CTRL0,
+ CTRL0_INT_EN(FIFO_INT_MASK), 0);
+
+ /* Clear any pending interrupt */
+ axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
+
+ /* Take memory arbitror out of reset */
+ ret = reset_control_deassert(fifo->arb);
+ if (ret)
+ clk_disable_unprepare(fifo->pclk);
+
+ return ret;
+}
+
+static int axg_fifo_pcm_close(struct snd_pcm_substream *ss)
+{
+ struct axg_fifo *fifo = axg_fifo_data(ss);
+ int ret;
+
+ /* Put the memory arbitror back in reset */
+ ret = reset_control_assert(fifo->arb);
+
+ /* Disable fifo ip and register access */
+ clk_disable_unprepare(fifo->pclk);
+
+ /* remove IRQ */
+ free_irq(fifo->irq, ss);
+
+ return ret;
+}
+
+const struct snd_pcm_ops axg_fifo_pcm_ops = {
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = axg_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
+};
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_ops);
+
+int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ size_t size = axg_fifo_hw.buffer_bytes_max;
+
+ return snd_pcm_lib_preallocate_pages(rtd->pcm->streams[type].substream,
+ SNDRV_DMA_TYPE_DEV, card->dev,
+ size, size);
+}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_new);
+
+static const struct regmap_config axg_fifo_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = FIFO_STATUS2,
+};
+
+int axg_fifo_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct axg_fifo_match_data *data;
+ struct axg_fifo *fifo;
+ struct resource *res;
+ void __iomem *regs;
+
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
+ fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
+ if (!fifo)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, fifo);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg);
+ if (IS_ERR(fifo->map)) {
+ dev_err(dev, "failed to init regmap: %ld\n",
+ PTR_ERR(fifo->map));
+ return PTR_ERR(fifo->map);
+ }
+
+ fifo->pclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(fifo->pclk)) {
+ if (PTR_ERR(fifo->pclk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get pclk: %ld\n",
+ PTR_ERR(fifo->pclk));
+ return PTR_ERR(fifo->pclk);
+ }
+
+ fifo->arb = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(fifo->arb)) {
+ if (PTR_ERR(fifo->arb) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get arb reset: %ld\n",
+ PTR_ERR(fifo->arb));
+ return PTR_ERR(fifo->arb);
+ }
+
+ fifo->irq = of_irq_get(dev->of_node, 0);
+ if (fifo->irq <= 0) {
+ dev_err(dev, "failed to get irq: %d\n", fifo->irq);
+ return fifo->irq;
+ }
+
+ return devm_snd_soc_register_component(dev, data->component_drv,
+ data->dai_drv, 1);
+}
+EXPORT_SYMBOL_GPL(axg_fifo_probe);
+
+MODULE_DESCRIPTION("Amlogic AXG fifo driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-fifo.h b/sound/soc/meson/axg-fifo.h
new file mode 100644
index 000000000000..cb6c4013ca33
--- /dev/null
+++ b/sound/soc/meson/axg-fifo.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _MESON_AXG_FIFO_H
+#define _MESON_AXG_FIFO_H
+
+struct clk;
+struct platform_device;
+struct regmap;
+struct reset_control;
+
+struct snd_soc_component_driver;
+struct snd_soc_dai;
+struct snd_soc_dai_driver;
+struct snd_pcm_ops;
+struct snd_soc_pcm_runtime;
+
+#define AXG_FIFO_CH_MAX 128
+#define AXG_FIFO_RATES (SNDRV_PCM_RATE_5512 | \
+ SNDRV_PCM_RATE_8000_192000)
+#define AXG_FIFO_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+#define AXG_FIFO_BURST 8
+#define AXG_FIFO_MIN_CNT 64
+#define AXG_FIFO_MIN_DEPTH (AXG_FIFO_BURST * AXG_FIFO_MIN_CNT)
+
+#define FIFO_INT_ADDR_FINISH BIT(0)
+#define FIFO_INT_ADDR_INT BIT(1)
+#define FIFO_INT_COUNT_REPEAT BIT(2)
+#define FIFO_INT_COUNT_ONCE BIT(3)
+#define FIFO_INT_FIFO_ZERO BIT(4)
+#define FIFO_INT_FIFO_DEPTH BIT(5)
+#define FIFO_INT_MASK GENMASK(7, 0)
+
+#define FIFO_CTRL0 0x00
+#define CTRL0_DMA_EN BIT(31)
+#define CTRL0_INT_EN(x) ((x) << 16)
+#define CTRL0_SEL_MASK GENMASK(2, 0)
+#define CTRL0_SEL_SHIFT 0
+#define FIFO_CTRL1 0x04
+#define CTRL1_INT_CLR(x) ((x) << 0)
+#define CTRL1_STATUS2_SEL_MASK GENMASK(11, 8)
+#define CTRL1_STATUS2_SEL(x) ((x) << 8)
+#define STATUS2_SEL_DDR_READ 0
+#define CTRL1_THRESHOLD_MASK GENMASK(23, 16)
+#define CTRL1_THRESHOLD(x) ((x) << 16)
+#define CTRL1_FRDDR_DEPTH_MASK GENMASK(31, 24)
+#define CTRL1_FRDDR_DEPTH(x) ((x) << 24)
+#define FIFO_START_ADDR 0x08
+#define FIFO_FINISH_ADDR 0x0c
+#define FIFO_INT_ADDR 0x10
+#define FIFO_STATUS1 0x14
+#define STATUS1_INT_STS(x) ((x) << 0)
+#define FIFO_STATUS2 0x18
+
+struct axg_fifo {
+ struct regmap *map;
+ struct clk *pclk;
+ struct reset_control *arb;
+ int irq;
+};
+
+struct axg_fifo_match_data {
+ const struct snd_soc_component_driver *component_drv;
+ struct snd_soc_dai_driver *dai_drv;
+};
+
+extern const struct snd_pcm_ops axg_fifo_pcm_ops;
+
+int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type);
+int axg_fifo_probe(struct platform_device *pdev);
+
+#endif /* _MESON_AXG_FIFO_H */
diff --git a/sound/soc/meson/axg-frddr.c b/sound/soc/meson/axg-frddr.c
new file mode 100644
index 000000000000..a6f6f6a2eca8
--- /dev/null
+++ b/sound/soc/meson/axg-frddr.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+/* This driver implements the frontend playback DAI of AXG based SoCs */
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "axg-fifo.h"
+
+#define CTRL0_FRDDR_PP_MODE BIT(30)
+
+static int axg_frddr_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai);
+ unsigned int fifo_depth, fifo_threshold;
+ int ret;
+
+ /* Enable pclk to access registers and clock the fifo ip */
+ ret = clk_prepare_enable(fifo->pclk);
+ if (ret)
+ return ret;
+
+ /* Apply single buffer mode to the interface */
+ regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_FRDDR_PP_MODE, 0);
+
+ /*
+ * TODO: We could adapt the fifo depth and the fifo threshold
+ * depending on the expected memory throughput and lantencies
+ * For now, we'll just use the same values as the vendor kernel
+ * Depth and threshold are zero based.
+ */
+ fifo_depth = AXG_FIFO_MIN_CNT - 1;
+ fifo_threshold = (AXG_FIFO_MIN_CNT / 2) - 1;
+ regmap_update_bits(fifo->map, FIFO_CTRL1,
+ CTRL1_FRDDR_DEPTH_MASK | CTRL1_THRESHOLD_MASK,
+ CTRL1_FRDDR_DEPTH(fifo_depth) |
+ CTRL1_THRESHOLD(fifo_threshold));
+
+ return 0;
+}
+
+static void axg_frddr_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable_unprepare(fifo->pclk);
+}
+
+static int axg_frddr_pcm_new(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai)
+{
+ return axg_fifo_pcm_new(rtd, SNDRV_PCM_STREAM_PLAYBACK);
+}
+
+static const struct snd_soc_dai_ops axg_frddr_ops = {
+ .startup = axg_frddr_dai_startup,
+ .shutdown = axg_frddr_dai_shutdown,
+};
+
+static struct snd_soc_dai_driver axg_frddr_dai_drv = {
+ .name = "FRDDR",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = AXG_FIFO_CH_MAX,
+ .rates = AXG_FIFO_RATES,
+ .formats = AXG_FIFO_FORMATS,
+ },
+ .ops = &axg_frddr_ops,
+ .pcm_new = axg_frddr_pcm_new,
+};
+
+static const char * const axg_frddr_sel_texts[] = {
+ "OUT 0", "OUT 1", "OUT 2", "OUT 3"
+};
+
+static SOC_ENUM_SINGLE_DECL(axg_frddr_sel_enum, FIFO_CTRL0, CTRL0_SEL_SHIFT,
+ axg_frddr_sel_texts);
+
+static const struct snd_kcontrol_new axg_frddr_out_demux =
+ SOC_DAPM_ENUM("Output Sink", axg_frddr_sel_enum);
+
+static const struct snd_soc_dapm_widget axg_frddr_dapm_widgets[] = {
+ SND_SOC_DAPM_DEMUX("SINK SEL", SND_SOC_NOPM, 0, 0,
+ &axg_frddr_out_demux),
+ SND_SOC_DAPM_AIF_OUT("OUT 0", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("OUT 1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("OUT 2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("OUT 3", NULL, 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route axg_frddr_dapm_routes[] = {
+ { "SINK SEL", NULL, "Playback" },
+ { "OUT 0", "OUT 0", "SINK SEL" },
+ { "OUT 1", "OUT 1", "SINK SEL" },
+ { "OUT 2", "OUT 2", "SINK SEL" },
+ { "OUT 3", "OUT 3", "SINK SEL" },
+};
+
+static const struct snd_soc_component_driver axg_frddr_component_drv = {
+ .dapm_widgets = axg_frddr_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(axg_frddr_dapm_widgets),
+ .dapm_routes = axg_frddr_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(axg_frddr_dapm_routes),
+ .ops = &axg_fifo_pcm_ops
+};
+
+static const struct axg_fifo_match_data axg_frddr_match_data = {
+ .component_drv = &axg_frddr_component_drv,
+ .dai_drv = &axg_frddr_dai_drv
+};
+
+static const struct of_device_id axg_frddr_of_match[] = {
+ {
+ .compatible = "amlogic,axg-frddr",
+ .data = &axg_frddr_match_data,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, axg_frddr_of_match);
+
+static struct platform_driver axg_frddr_pdrv = {
+ .probe = axg_fifo_probe,
+ .driver = {
+ .name = "axg-frddr",
+ .of_match_table = axg_frddr_of_match,
+ },
+};
+module_platform_driver(axg_frddr_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG playback fifo driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-spdifout.c b/sound/soc/meson/axg-spdifout.c
new file mode 100644
index 000000000000..9dea528053ad
--- /dev/null
+++ b/sound/soc/meson/axg-spdifout.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/pcm_params.h>
+#include <sound/pcm_iec958.h>
+
+/*
+ * NOTE:
+ * The meaning of bits SPDIFOUT_CTRL0_XXX_SEL is actually the opposite
+ * of what the documentation says. Manual control on V, U and C bits is
+ * applied when the related sel bits are cleared
+ */
+
+#define SPDIFOUT_STAT 0x00
+#define SPDIFOUT_GAIN0 0x04
+#define SPDIFOUT_GAIN1 0x08
+#define SPDIFOUT_CTRL0 0x0c
+#define SPDIFOUT_CTRL0_EN BIT(31)
+#define SPDIFOUT_CTRL0_RST_OUT BIT(29)
+#define SPDIFOUT_CTRL0_RST_IN BIT(28)
+#define SPDIFOUT_CTRL0_USEL BIT(26)
+#define SPDIFOUT_CTRL0_USET BIT(25)
+#define SPDIFOUT_CTRL0_CHSTS_SEL BIT(24)
+#define SPDIFOUT_CTRL0_DATA_SEL BIT(20)
+#define SPDIFOUT_CTRL0_MSB_FIRST BIT(19)
+#define SPDIFOUT_CTRL0_VSEL BIT(18)
+#define SPDIFOUT_CTRL0_VSET BIT(17)
+#define SPDIFOUT_CTRL0_MASK_MASK GENMASK(11, 4)
+#define SPDIFOUT_CTRL0_MASK(x) ((x) << 4)
+#define SPDIFOUT_CTRL1 0x10
+#define SPDIFOUT_CTRL1_MSB_POS_MASK GENMASK(12, 8)
+#define SPDIFOUT_CTRL1_MSB_POS(x) ((x) << 8)
+#define SPDIFOUT_CTRL1_TYPE_MASK GENMASK(6, 4)
+#define SPDIFOUT_CTRL1_TYPE(x) ((x) << 4)
+#define SPDIFOUT_PREAMB 0x14
+#define SPDIFOUT_SWAP 0x18
+#define SPDIFOUT_CHSTS0 0x1c
+#define SPDIFOUT_CHSTS1 0x20
+#define SPDIFOUT_CHSTS2 0x24
+#define SPDIFOUT_CHSTS3 0x28
+#define SPDIFOUT_CHSTS4 0x2c
+#define SPDIFOUT_CHSTS5 0x30
+#define SPDIFOUT_CHSTS6 0x34
+#define SPDIFOUT_CHSTS7 0x38
+#define SPDIFOUT_CHSTS8 0x3c
+#define SPDIFOUT_CHSTS9 0x40
+#define SPDIFOUT_CHSTSA 0x44
+#define SPDIFOUT_CHSTSB 0x48
+#define SPDIFOUT_MUTE_VAL 0x4c
+
+struct axg_spdifout {
+ struct regmap *map;
+ struct clk *mclk;
+ struct clk *pclk;
+};
+
+static void axg_spdifout_enable(struct regmap *map)
+{
+ /* Apply both reset */
+ regmap_update_bits(map, SPDIFOUT_CTRL0,
+ SPDIFOUT_CTRL0_RST_OUT | SPDIFOUT_CTRL0_RST_IN,
+ 0);
+
+ /* Clear out reset before in reset */
+ regmap_update_bits(map, SPDIFOUT_CTRL0,
+ SPDIFOUT_CTRL0_RST_OUT, SPDIFOUT_CTRL0_RST_OUT);
+ regmap_update_bits(map, SPDIFOUT_CTRL0,
+ SPDIFOUT_CTRL0_RST_IN, SPDIFOUT_CTRL0_RST_IN);
+
+ /* Enable spdifout */
+ regmap_update_bits(map, SPDIFOUT_CTRL0, SPDIFOUT_CTRL0_EN,
+ SPDIFOUT_CTRL0_EN);
+}
+
+static void axg_spdifout_disable(struct regmap *map)
+{
+ regmap_update_bits(map, SPDIFOUT_CTRL0, SPDIFOUT_CTRL0_EN, 0);
+}
+
+static int axg_spdifout_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct axg_spdifout *priv = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ axg_spdifout_enable(priv->map);
+ return 0;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ axg_spdifout_disable(priv->map);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int axg_spdifout_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct axg_spdifout *priv = snd_soc_dai_get_drvdata(dai);
+
+ /* Use spdif valid bit to perform digital mute */
+ regmap_update_bits(priv->map, SPDIFOUT_CTRL0, SPDIFOUT_CTRL0_VSET,
+ mute ? SPDIFOUT_CTRL0_VSET : 0);
+
+ return 0;
+}
+
+static int axg_spdifout_sample_fmt(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct axg_spdifout *priv = snd_soc_dai_get_drvdata(dai);
+ unsigned int val;
+
+ /* Set the samples spdifout will pull from the FIFO */
+ switch (params_channels(params)) {
+ case 1:
+ val = SPDIFOUT_CTRL0_MASK(0x1);
+ break;
+ case 2:
+ val = SPDIFOUT_CTRL0_MASK(0x3);
+ break;
+ default:
+ dev_err(dai->dev, "too many channels for spdif dai: %u\n",
+ params_channels(params));
+ return -EINVAL;
+ }
+
+ regmap_update_bits(priv->map, SPDIFOUT_CTRL0,
+ SPDIFOUT_CTRL0_MASK_MASK, val);
+
+ /* FIFO data are arranged in chunks of 64bits */
+ switch (params_physical_width(params)) {
+ case 8:
+ /* 8 samples of 8 bits */
+ val = SPDIFOUT_CTRL1_TYPE(0);
+ break;
+ case 16:
+ /* 4 samples of 16 bits - right justified */
+ val = SPDIFOUT_CTRL1_TYPE(2);
+ break;
+ case 32:
+ /* 2 samples of 32 bits - right justified */
+ val = SPDIFOUT_CTRL1_TYPE(4);
+ break;
+ default:
+ dev_err(dai->dev, "Unsupported physical width: %u\n",
+ params_physical_width(params));
+ return -EINVAL;
+ }
+
+ /* Position of the MSB in FIFO samples */
+ val |= SPDIFOUT_CTRL1_MSB_POS(params_width(params) - 1);
+
+ regmap_update_bits(priv->map, SPDIFOUT_CTRL1,
+ SPDIFOUT_CTRL1_MSB_POS_MASK |
+ SPDIFOUT_CTRL1_TYPE_MASK, val);
+
+ regmap_update_bits(priv->map, SPDIFOUT_CTRL0,
+ SPDIFOUT_CTRL0_MSB_FIRST | SPDIFOUT_CTRL0_DATA_SEL,
+ 0);
+
+ return 0;
+}
+
+static int axg_spdifout_set_chsts(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct axg_spdifout *priv = snd_soc_dai_get_drvdata(dai);
+ unsigned int offset;
+ int ret;
+ u8 cs[4];
+ u32 val;
+
+ ret = snd_pcm_create_iec958_consumer_hw_params(params, cs, 4);
+ if (ret < 0) {
+ dev_err(dai->dev, "Creating IEC958 channel status failed %d\n",
+ ret);
+ return ret;
+ }
+ val = cs[0] | cs[1] << 8 | cs[2] << 16 | cs[3] << 24;
+
+ /* Setup channel status A bits [31 - 0]*/
+ regmap_write(priv->map, SPDIFOUT_CHSTS0, val);
+
+ /* Clear channel status A bits [191 - 32] */
+ for (offset = SPDIFOUT_CHSTS1; offset <= SPDIFOUT_CHSTS5;
+ offset += regmap_get_reg_stride(priv->map))
+ regmap_write(priv->map, offset, 0);
+
+ /* Setup channel status B bits [31 - 0]*/
+ regmap_write(priv->map, SPDIFOUT_CHSTS6, val);
+
+ /* Clear channel status B bits [191 - 32] */
+ for (offset = SPDIFOUT_CHSTS7; offset <= SPDIFOUT_CHSTSB;
+ offset += regmap_get_reg_stride(priv->map))
+ regmap_write(priv->map, offset, 0);
+
+ return 0;
+}
+
+static int axg_spdifout_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct axg_spdifout *priv = snd_soc_dai_get_drvdata(dai);
+ unsigned int rate = params_rate(params);
+ int ret;
+
+ /* 2 * 32bits per subframe * 2 channels = 128 */
+ ret = clk_set_rate(priv->mclk, rate * 128);
+ if (ret) {
+ dev_err(dai->dev, "failed to set spdif clock\n");
+ return ret;
+ }
+
+ ret = axg_spdifout_sample_fmt(params, dai);
+ if (ret) {
+ dev_err(dai->dev, "failed to setup sample format\n");
+ return ret;
+ }
+
+ ret = axg_spdifout_set_chsts(params, dai);
+ if (ret) {
+ dev_err(dai->dev, "failed to setup channel status words\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axg_spdifout_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_spdifout *priv = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ /* Clock the spdif output block */
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret) {
+ dev_err(dai->dev, "failed to enable pclk\n");
+ return ret;
+ }
+
+ /* Make sure the block is initially stopped */
+ axg_spdifout_disable(priv->map);
+
+ /* Insert data from bit 27 lsb first */
+ regmap_update_bits(priv->map, SPDIFOUT_CTRL0,
+ SPDIFOUT_CTRL0_MSB_FIRST | SPDIFOUT_CTRL0_DATA_SEL,
+ 0);
+
+ /* Manual control of V, C and U, U = 0 */
+ regmap_update_bits(priv->map, SPDIFOUT_CTRL0,
+ SPDIFOUT_CTRL0_CHSTS_SEL | SPDIFOUT_CTRL0_VSEL |
+ SPDIFOUT_CTRL0_USEL | SPDIFOUT_CTRL0_USET,
+ 0);
+
+ /* Static SWAP configuration ATM */
+ regmap_write(priv->map, SPDIFOUT_SWAP, 0x10);
+
+ return 0;
+}
+
+static void axg_spdifout_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_spdifout *priv = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable_unprepare(priv->pclk);
+}
+
+static const struct snd_soc_dai_ops axg_spdifout_ops = {
+ .trigger = axg_spdifout_trigger,
+ .digital_mute = axg_spdifout_digital_mute,
+ .hw_params = axg_spdifout_hw_params,
+ .startup = axg_spdifout_startup,
+ .shutdown = axg_spdifout_shutdown,
+};
+
+static struct snd_soc_dai_driver axg_spdifout_dai_drv[] = {
+ {
+ .name = "SPDIF Output",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = (SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000),
+ .formats = (SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ },
+ .ops = &axg_spdifout_ops,
+ },
+};
+
+static const char * const spdifout_sel_texts[] = {
+ "IN 0", "IN 1", "IN 2",
+};
+
+static SOC_ENUM_SINGLE_DECL(axg_spdifout_sel_enum, SPDIFOUT_CTRL1, 24,
+ spdifout_sel_texts);
+
+static const struct snd_kcontrol_new axg_spdifout_in_mux =
+ SOC_DAPM_ENUM("Input Source", axg_spdifout_sel_enum);
+
+static const struct snd_soc_dapm_widget axg_spdifout_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("IN 0", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("SRC SEL", SND_SOC_NOPM, 0, 0, &axg_spdifout_in_mux),
+};
+
+static const struct snd_soc_dapm_route axg_spdifout_dapm_routes[] = {
+ { "SRC SEL", "IN 0", "IN 0" },
+ { "SRC SEL", "IN 1", "IN 1" },
+ { "SRC SEL", "IN 2", "IN 2" },
+ { "Playback", NULL, "SRC SEL" },
+};
+
+static const struct snd_kcontrol_new axg_spdifout_controls[] = {
+ SOC_DOUBLE("Playback Volume", SPDIFOUT_GAIN0, 0, 8, 255, 0),
+ SOC_DOUBLE("Playback Switch", SPDIFOUT_CTRL0, 22, 21, 1, 1),
+ SOC_SINGLE("Playback Gain Enable Switch",
+ SPDIFOUT_CTRL1, 26, 1, 0),
+ SOC_SINGLE("Playback Channels Mix Switch",
+ SPDIFOUT_CTRL0, 23, 1, 0),
+};
+
+static int axg_spdifout_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct axg_spdifout *priv = snd_soc_component_get_drvdata(component);
+ enum snd_soc_bias_level now =
+ snd_soc_component_get_bias_level(component);
+ int ret = 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (now == SND_SOC_BIAS_STANDBY)
+ ret = clk_prepare_enable(priv->mclk);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (now == SND_SOC_BIAS_PREPARE)
+ clk_disable_unprepare(priv->mclk);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ case SND_SOC_BIAS_ON:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_component_driver axg_spdifout_component_drv = {
+ .controls = axg_spdifout_controls,
+ .num_controls = ARRAY_SIZE(axg_spdifout_controls),
+ .dapm_widgets = axg_spdifout_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(axg_spdifout_dapm_widgets),
+ .dapm_routes = axg_spdifout_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(axg_spdifout_dapm_routes),
+ .set_bias_level = axg_spdifout_set_bias_level,
+};
+
+static const struct regmap_config axg_spdifout_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SPDIFOUT_MUTE_VAL,
+};
+
+static const struct of_device_id axg_spdifout_of_match[] = {
+ { .compatible = "amlogic,axg-spdifout", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, axg_spdifout_of_match);
+
+static int axg_spdifout_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct axg_spdifout *priv;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->map = devm_regmap_init_mmio(dev, regs, &axg_spdifout_regmap_cfg);
+ if (IS_ERR(priv->map)) {
+ dev_err(dev, "failed to init regmap: %ld\n",
+ PTR_ERR(priv->map));
+ return PTR_ERR(priv->map);
+ }
+
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get pclk: %d\n", ret);
+ return ret;
+ }
+
+ priv->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(priv->mclk)) {
+ ret = PTR_ERR(priv->mclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get mclk: %d\n", ret);
+ return ret;
+ }
+
+ return devm_snd_soc_register_component(dev, &axg_spdifout_component_drv,
+ axg_spdifout_dai_drv, ARRAY_SIZE(axg_spdifout_dai_drv));
+}
+
+static struct platform_driver axg_spdifout_pdrv = {
+ .probe = axg_spdifout_probe,
+ .driver = {
+ .name = "axg-spdifout",
+ .of_match_table = axg_spdifout_of_match,
+ },
+};
+module_platform_driver(axg_spdifout_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG SPDIF Output driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c
new file mode 100644
index 000000000000..43e390f9358a
--- /dev/null
+++ b/sound/soc/meson/axg-tdm-formatter.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+
+#include "axg-tdm-formatter.h"
+
+struct axg_tdm_formatter {
+ struct list_head list;
+ struct axg_tdm_stream *stream;
+ const struct axg_tdm_formatter_driver *drv;
+ struct clk *pclk;
+ struct clk *sclk;
+ struct clk *lrclk;
+ struct clk *sclk_sel;
+ struct clk *lrclk_sel;
+ bool enabled;
+ struct regmap *map;
+};
+
+int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ struct axg_tdm_stream *ts,
+ unsigned int offset)
+{
+ unsigned int val, ch = ts->channels;
+ unsigned long mask;
+ int i, j;
+
+ /*
+ * Distribute the channels of the stream over the available slots
+ * of each TDM lane
+ */
+ for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
+ val = 0;
+ mask = ts->mask[i];
+
+ for (j = find_first_bit(&mask, 32);
+ (j < 32) && ch;
+ j = find_next_bit(&mask, 32, j + 1)) {
+ val |= 1 << j;
+ ch -= 1;
+ }
+
+ regmap_write(map, offset, val);
+ offset += regmap_get_reg_stride(map);
+ }
+
+ /*
+ * If we still have channel left at the end of the process, it means
+ * the stream has more channels than we can accommodate and we should
+ * have caught this earlier.
+ */
+ if (WARN_ON(ch != 0)) {
+ pr_err("channel mask error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks);
+
+static int axg_tdm_formatter_enable(struct axg_tdm_formatter *formatter)
+{
+ struct axg_tdm_stream *ts = formatter->stream;
+ bool invert = formatter->drv->invert_sclk;
+ int ret;
+
+ /* Do nothing if the formatter is already enabled */
+ if (formatter->enabled)
+ return 0;
+
+ /*
+ * If sclk is inverted, invert it back and provide the inversion
+ * required by the formatter
+ */
+ invert ^= axg_tdm_sclk_invert(ts->iface->fmt);
+ ret = clk_set_phase(formatter->sclk, invert ? 180 : 0);
+ if (ret)
+ return ret;
+
+ /* Setup the stream parameter in the formatter */
+ ret = formatter->drv->ops->prepare(formatter->map, formatter->stream);
+ if (ret)
+ return ret;
+
+ /* Enable the signal clocks feeding the formatter */
+ ret = clk_prepare_enable(formatter->sclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(formatter->lrclk);
+ if (ret) {
+ clk_disable_unprepare(formatter->sclk);
+ return ret;
+ }
+
+ /* Finally, actually enable the formatter */
+ formatter->drv->ops->enable(formatter->map);
+ formatter->enabled = true;
+
+ return 0;
+}
+
+static void axg_tdm_formatter_disable(struct axg_tdm_formatter *formatter)
+{
+ /* Do nothing if the formatter is already disabled */
+ if (!formatter->enabled)
+ return;
+
+ formatter->drv->ops->disable(formatter->map);
+ clk_disable_unprepare(formatter->lrclk);
+ clk_disable_unprepare(formatter->sclk);
+ formatter->enabled = false;
+}
+
+static int axg_tdm_formatter_attach(struct axg_tdm_formatter *formatter)
+{
+ struct axg_tdm_stream *ts = formatter->stream;
+ int ret = 0;
+
+ mutex_lock(&ts->lock);
+
+ /* Catch up if the stream is already running when we attach */
+ if (ts->ready) {
+ ret = axg_tdm_formatter_enable(formatter);
+ if (ret) {
+ pr_err("failed to enable formatter\n");
+ goto out;
+ }
+ }
+
+ list_add_tail(&formatter->list, &ts->formatter_list);
+out:
+ mutex_unlock(&ts->lock);
+ return ret;
+}
+
+static void axg_tdm_formatter_dettach(struct axg_tdm_formatter *formatter)
+{
+ struct axg_tdm_stream *ts = formatter->stream;
+
+ mutex_lock(&ts->lock);
+ list_del(&formatter->list);
+ mutex_unlock(&ts->lock);
+
+ axg_tdm_formatter_disable(formatter);
+}
+
+static int axg_tdm_formatter_power_up(struct axg_tdm_formatter *formatter,
+ struct snd_soc_dapm_widget *w)
+{
+ struct axg_tdm_stream *ts = formatter->drv->ops->get_stream(w);
+ int ret;
+
+ /*
+ * If we don't get a stream at this stage, it would mean that the
+ * widget is powering up but is not attached to any backend DAI.
+ * It should not happen, ever !
+ */
+ if (WARN_ON(!ts))
+ return -ENODEV;
+
+ /* Clock our device */
+ ret = clk_prepare_enable(formatter->pclk);
+ if (ret)
+ return ret;
+
+ /* Reparent the bit clock to the TDM interface */
+ ret = clk_set_parent(formatter->sclk_sel, ts->iface->sclk);
+ if (ret)
+ goto disable_pclk;
+
+ /* Reparent the sample clock to the TDM interface */
+ ret = clk_set_parent(formatter->lrclk_sel, ts->iface->lrclk);
+ if (ret)
+ goto disable_pclk;
+
+ formatter->stream = ts;
+ ret = axg_tdm_formatter_attach(formatter);
+ if (ret)
+ goto disable_pclk;
+
+ return 0;
+
+disable_pclk:
+ clk_disable_unprepare(formatter->pclk);
+ return ret;
+}
+
+static void axg_tdm_formatter_power_down(struct axg_tdm_formatter *formatter)
+{
+ axg_tdm_formatter_dettach(formatter);
+ clk_disable_unprepare(formatter->pclk);
+ formatter->stream = NULL;
+}
+
+int axg_tdm_formatter_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *control,
+ int event)
+{
+ struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
+ struct axg_tdm_formatter *formatter = snd_soc_component_get_drvdata(c);
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = axg_tdm_formatter_power_up(formatter, w);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ axg_tdm_formatter_power_down(formatter);
+ break;
+
+ default:
+ dev_err(c->dev, "Unexpected event %d\n", event);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(axg_tdm_formatter_event);
+
+int axg_tdm_formatter_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct axg_tdm_formatter_driver *drv;
+ struct axg_tdm_formatter *formatter;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ drv = of_device_get_match_data(dev);
+ if (!drv) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
+ formatter = devm_kzalloc(dev, sizeof(*formatter), GFP_KERNEL);
+ if (!formatter)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, formatter);
+ formatter->drv = drv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ formatter->map = devm_regmap_init_mmio(dev, regs, drv->regmap_cfg);
+ if (IS_ERR(formatter->map)) {
+ dev_err(dev, "failed to init regmap: %ld\n",
+ PTR_ERR(formatter->map));
+ return PTR_ERR(formatter->map);
+ }
+
+ /* Peripharal clock */
+ formatter->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(formatter->pclk)) {
+ ret = PTR_ERR(formatter->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get pclk: %d\n", ret);
+ return ret;
+ }
+
+ /* Formatter bit clock */
+ formatter->sclk = devm_clk_get(dev, "sclk");
+ if (IS_ERR(formatter->sclk)) {
+ ret = PTR_ERR(formatter->sclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get sclk: %d\n", ret);
+ return ret;
+ }
+
+ /* Formatter sample clock */
+ formatter->lrclk = devm_clk_get(dev, "lrclk");
+ if (IS_ERR(formatter->lrclk)) {
+ ret = PTR_ERR(formatter->lrclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get lrclk: %d\n", ret);
+ return ret;
+ }
+
+ /* Formatter bit clock input multiplexer */
+ formatter->sclk_sel = devm_clk_get(dev, "sclk_sel");
+ if (IS_ERR(formatter->sclk_sel)) {
+ ret = PTR_ERR(formatter->sclk_sel);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get sclk_sel: %d\n", ret);
+ return ret;
+ }
+
+ /* Formatter sample clock input multiplexer */
+ formatter->lrclk_sel = devm_clk_get(dev, "lrclk_sel");
+ if (IS_ERR(formatter->lrclk_sel)) {
+ ret = PTR_ERR(formatter->lrclk_sel);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get lrclk_sel: %d\n", ret);
+ return ret;
+ }
+
+ return devm_snd_soc_register_component(dev, drv->component_drv,
+ NULL, 0);
+}
+EXPORT_SYMBOL_GPL(axg_tdm_formatter_probe);
+
+int axg_tdm_stream_start(struct axg_tdm_stream *ts)
+{
+ struct axg_tdm_formatter *formatter;
+ int ret = 0;
+
+ mutex_lock(&ts->lock);
+ ts->ready = true;
+
+ /* Start all the formatters attached to the stream */
+ list_for_each_entry(formatter, &ts->formatter_list, list) {
+ ret = axg_tdm_formatter_enable(formatter);
+ if (ret) {
+ pr_err("failed to start tdm stream\n");
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&ts->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(axg_tdm_stream_start);
+
+void axg_tdm_stream_stop(struct axg_tdm_stream *ts)
+{
+ struct axg_tdm_formatter *formatter;
+
+ mutex_lock(&ts->lock);
+ ts->ready = false;
+
+ /* Stop all the formatters attached to the stream */
+ list_for_each_entry(formatter, &ts->formatter_list, list) {
+ axg_tdm_formatter_disable(formatter);
+ }
+
+ mutex_unlock(&ts->lock);
+}
+EXPORT_SYMBOL_GPL(axg_tdm_stream_stop);
+
+struct axg_tdm_stream *axg_tdm_stream_alloc(struct axg_tdm_iface *iface)
+{
+ struct axg_tdm_stream *ts;
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts) {
+ INIT_LIST_HEAD(&ts->formatter_list);
+ mutex_init(&ts->lock);
+ ts->iface = iface;
+ }
+
+ return ts;
+}
+EXPORT_SYMBOL_GPL(axg_tdm_stream_alloc);
+
+void axg_tdm_stream_free(struct axg_tdm_stream *ts)
+{
+ /*
+ * If the list is not empty, it would mean that one of the formatter
+ * widget is still powered and attached to the interface while we
+ * we are removing the TDM DAI. It should not be possible
+ */
+ WARN_ON(!list_empty(&ts->formatter_list));
+ mutex_destroy(&ts->lock);
+ kfree(ts);
+}
+EXPORT_SYMBOL_GPL(axg_tdm_stream_free);
+
+MODULE_DESCRIPTION("Amlogic AXG TDM formatter driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-tdm-formatter.h b/sound/soc/meson/axg-tdm-formatter.h
new file mode 100644
index 000000000000..cf947caf3cb1
--- /dev/null
+++ b/sound/soc/meson/axg-tdm-formatter.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ *
+ * Copyright (c) 2018 Baylibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _MESON_AXG_TDM_FORMATTER_H
+#define _MESON_AXG_TDM_FORMATTER_H
+
+#include "axg-tdm.h"
+
+struct platform_device;
+struct regmap;
+struct snd_soc_dapm_widget;
+struct snd_kcontrol;
+
+struct axg_tdm_formatter_ops {
+ struct axg_tdm_stream *(*get_stream)(struct snd_soc_dapm_widget *w);
+ void (*enable)(struct regmap *map);
+ void (*disable)(struct regmap *map);
+ int (*prepare)(struct regmap *map, struct axg_tdm_stream *ts);
+};
+
+struct axg_tdm_formatter_driver {
+ const struct snd_soc_component_driver *component_drv;
+ const struct regmap_config *regmap_cfg;
+ const struct axg_tdm_formatter_ops *ops;
+ bool invert_sclk;
+};
+
+int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ struct axg_tdm_stream *ts,
+ unsigned int offset);
+int axg_tdm_formatter_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *control,
+ int event);
+int axg_tdm_formatter_probe(struct platform_device *pdev);
+
+#endif /* _MESON_AXG_TDM_FORMATTER_H */
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
new file mode 100644
index 000000000000..7b8baf46d968
--- /dev/null
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "axg-tdm.h"
+
+enum {
+ TDM_IFACE_PAD,
+ TDM_IFACE_LOOPBACK,
+};
+
+static unsigned int axg_tdm_slots_total(u32 *mask)
+{
+ unsigned int slots = 0;
+ int i;
+
+ if (!mask)
+ return 0;
+
+ /* Count the total number of slots provided by all 4 lanes */
+ for (i = 0; i < AXG_TDM_NUM_LANES; i++)
+ slots += hweight32(mask[i]);
+
+ return slots;
+}
+
+int axg_tdm_set_tdm_slots(struct snd_soc_dai *dai, u32 *tx_mask,
+ u32 *rx_mask, unsigned int slots,
+ unsigned int slot_width)
+{
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+ struct axg_tdm_stream *tx = (struct axg_tdm_stream *)
+ dai->playback_dma_data;
+ struct axg_tdm_stream *rx = (struct axg_tdm_stream *)
+ dai->capture_dma_data;
+ unsigned int tx_slots, rx_slots;
+
+ tx_slots = axg_tdm_slots_total(tx_mask);
+ rx_slots = axg_tdm_slots_total(rx_mask);
+
+ /* We should at least have a slot for a valid interface */
+ if (!tx_slots && !rx_slots) {
+ dev_err(dai->dev, "interface has no slot\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Amend the dai driver channel number and let dpcm channel merge do
+ * its job
+ */
+ if (tx) {
+ tx->mask = tx_mask;
+ dai->driver->playback.channels_max = tx_slots;
+ }
+
+ if (rx) {
+ rx->mask = rx_mask;
+ dai->driver->capture.channels_max = rx_slots;
+ }
+
+ iface->slots = slots;
+
+ switch (slot_width) {
+ case 0:
+ /* defaults width to 32 if not provided */
+ iface->slot_width = 32;
+ break;
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ iface->slot_width = slot_width;
+ break;
+ default:
+ dev_err(dai->dev, "unsupported slot width: %d\n", slot_width);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(axg_tdm_set_tdm_slots);
+
+static int axg_tdm_iface_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+ int ret = -ENOTSUPP;
+
+ if (dir == SND_SOC_CLOCK_OUT && clk_id == 0) {
+ if (!iface->mclk) {
+ dev_warn(dai->dev, "master clock not provided\n");
+ } else {
+ ret = clk_set_rate(iface->mclk, freq);
+ if (!ret)
+ iface->mclk_rate = freq;
+ }
+ }
+
+ return ret;
+}
+
+static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+
+ /* These modes are not supported */
+ if (fmt & (SND_SOC_DAIFMT_CBS_CFM | SND_SOC_DAIFMT_CBM_CFS)) {
+ dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n");
+ return -EINVAL;
+ }
+
+ /* If the TDM interface is the clock master, it requires mclk */
+ if (!iface->mclk && (fmt & SND_SOC_DAIFMT_CBS_CFS)) {
+ dev_err(dai->dev, "cpu clock master: mclk missing\n");
+ return -ENODEV;
+ }
+
+ iface->fmt = fmt;
+ return 0;
+}
+
+static int axg_tdm_iface_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+ struct axg_tdm_stream *ts =
+ snd_soc_dai_get_dma_data(dai, substream);
+ int ret;
+
+ if (!axg_tdm_slots_total(ts->mask)) {
+ dev_err(dai->dev, "interface has not slots\n");
+ return -EINVAL;
+ }
+
+ /* Apply component wide rate symmetry */
+ if (dai->component->active) {
+ ret = snd_pcm_hw_constraint_single(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE,
+ iface->rate);
+ if (ret < 0) {
+ dev_err(dai->dev,
+ "can't set iface rate constraint\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int axg_tdm_iface_set_stream(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+ struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
+ unsigned int channels = params_channels(params);
+ unsigned int width = params_width(params);
+
+ /* Save rate and sample_bits for component symmetry */
+ iface->rate = params_rate(params);
+
+ /* Make sure this interface can cope with the stream */
+ if (axg_tdm_slots_total(ts->mask) < channels) {
+ dev_err(dai->dev, "not enough slots for channels\n");
+ return -EINVAL;
+ }
+
+ if (iface->slot_width < width) {
+ dev_err(dai->dev, "incompatible slots width for stream\n");
+ return -EINVAL;
+ }
+
+ /* Save the parameter for tdmout/tdmin widgets */
+ ts->physical_width = params_physical_width(params);
+ ts->width = params_width(params);
+ ts->channels = params_channels(params);
+
+ return 0;
+}
+
+static int axg_tdm_iface_set_lrclk(struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *params)
+{
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+ unsigned int ratio_num;
+ int ret;
+
+ ret = clk_set_rate(iface->lrclk, params_rate(params));
+ if (ret) {
+ dev_err(dai->dev, "setting sample clock failed: %d\n", ret);
+ return ret;
+ }
+
+ switch (iface->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ /* 50% duty cycle ratio */
+ ratio_num = 1;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ /*
+ * A zero duty cycle ratio will result in setting the mininum
+ * ratio possible which, for this clock, is 1 cycle of the
+ * parent bclk clock high and the rest low, This is exactly
+ * what we want here.
+ */
+ ratio_num = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = clk_set_duty_cycle(iface->lrclk, ratio_num, 2);
+ if (ret) {
+ dev_err(dai->dev,
+ "setting sample clock duty cycle failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Set sample clock inversion */
+ ret = clk_set_phase(iface->lrclk,
+ axg_tdm_lrclk_invert(iface->fmt) ? 180 : 0);
+ if (ret) {
+ dev_err(dai->dev,
+ "setting sample clock phase failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axg_tdm_iface_set_sclk(struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *params)
+{
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+ unsigned long srate;
+ int ret;
+
+ srate = iface->slots * iface->slot_width * params_rate(params);
+
+ if (!iface->mclk_rate) {
+ /* If no specific mclk is requested, default to bit clock * 4 */
+ clk_set_rate(iface->mclk, 4 * srate);
+ } else {
+ /* Check if we can actually get the bit clock from mclk */
+ if (iface->mclk_rate % srate) {
+ dev_err(dai->dev,
+ "can't derive sclk %lu from mclk %lu\n",
+ srate, iface->mclk_rate);
+ return -EINVAL;
+ }
+ }
+
+ ret = clk_set_rate(iface->sclk, srate);
+ if (ret) {
+ dev_err(dai->dev, "setting bit clock failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Set the bit clock inversion */
+ ret = clk_set_phase(iface->sclk,
+ axg_tdm_sclk_invert(iface->fmt) ? 0 : 180);
+ if (ret) {
+ dev_err(dai->dev, "setting bit clock phase failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ switch (iface->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ if (iface->slots > 2) {
+ dev_err(dai->dev, "bad slot number for format: %d\n",
+ iface->slots);
+ return -EINVAL;
+ }
+ break;
+
+ case SND_SOC_DAI_FORMAT_DSP_A:
+ case SND_SOC_DAI_FORMAT_DSP_B:
+ break;
+
+ default:
+ dev_err(dai->dev, "unsupported dai format\n");
+ return -EINVAL;
+ }
+
+ ret = axg_tdm_iface_set_stream(substream, params, dai);
+ if (ret)
+ return ret;
+
+ if (iface->fmt & SND_SOC_DAIFMT_CBS_CFS) {
+ ret = axg_tdm_iface_set_sclk(dai, params);
+ if (ret)
+ return ret;
+
+ ret = axg_tdm_iface_set_lrclk(dai, params);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
+
+ /* Stop all attached formatters */
+ axg_tdm_stream_stop(ts);
+
+ return 0;
+}
+
+static int axg_tdm_iface_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
+
+ /* Force all attached formatters to update */
+ return axg_tdm_stream_reset(ts);
+}
+
+static int axg_tdm_iface_remove_dai(struct snd_soc_dai *dai)
+{
+ if (dai->capture_dma_data)
+ axg_tdm_stream_free(dai->capture_dma_data);
+
+ if (dai->playback_dma_data)
+ axg_tdm_stream_free(dai->playback_dma_data);
+
+ return 0;
+}
+
+static int axg_tdm_iface_probe_dai(struct snd_soc_dai *dai)
+{
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+
+ if (dai->capture_widget) {
+ dai->capture_dma_data = axg_tdm_stream_alloc(iface);
+ if (!dai->capture_dma_data)
+ return -ENOMEM;
+ }
+
+ if (dai->playback_widget) {
+ dai->playback_dma_data = axg_tdm_stream_alloc(iface);
+ if (!dai->playback_dma_data) {
+ axg_tdm_iface_remove_dai(dai);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops axg_tdm_iface_ops = {
+ .set_sysclk = axg_tdm_iface_set_sysclk,
+ .set_fmt = axg_tdm_iface_set_fmt,
+ .startup = axg_tdm_iface_startup,
+ .hw_params = axg_tdm_iface_hw_params,
+ .prepare = axg_tdm_iface_prepare,
+ .hw_free = axg_tdm_iface_hw_free,
+};
+
+/* TDM Backend DAIs */
+static const struct snd_soc_dai_driver axg_tdm_iface_dai_drv[] = {
+ [TDM_IFACE_PAD] = {
+ .name = "TDM Pad",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = AXG_TDM_CHANNEL_MAX,
+ .rates = AXG_TDM_RATES,
+ .formats = AXG_TDM_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = AXG_TDM_CHANNEL_MAX,
+ .rates = AXG_TDM_RATES,
+ .formats = AXG_TDM_FORMATS,
+ },
+ .id = TDM_IFACE_PAD,
+ .ops = &axg_tdm_iface_ops,
+ .probe = axg_tdm_iface_probe_dai,
+ .remove = axg_tdm_iface_remove_dai,
+ },
+ [TDM_IFACE_LOOPBACK] = {
+ .name = "TDM Loopback",
+ .capture = {
+ .stream_name = "Loopback",
+ .channels_min = 1,
+ .channels_max = AXG_TDM_CHANNEL_MAX,
+ .rates = AXG_TDM_RATES,
+ .formats = AXG_TDM_FORMATS,
+ },
+ .id = TDM_IFACE_LOOPBACK,
+ .ops = &axg_tdm_iface_ops,
+ .probe = axg_tdm_iface_probe_dai,
+ .remove = axg_tdm_iface_remove_dai,
+ },
+};
+
+static int axg_tdm_iface_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct axg_tdm_iface *iface = snd_soc_component_get_drvdata(component);
+ enum snd_soc_bias_level now =
+ snd_soc_component_get_bias_level(component);
+ int ret = 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (now == SND_SOC_BIAS_STANDBY)
+ ret = clk_prepare_enable(iface->mclk);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (now == SND_SOC_BIAS_PREPARE)
+ clk_disable_unprepare(iface->mclk);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ case SND_SOC_BIAS_ON:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_component_driver axg_tdm_iface_component_drv = {
+ .set_bias_level = axg_tdm_iface_set_bias_level,
+};
+
+static const struct of_device_id axg_tdm_iface_of_match[] = {
+ { .compatible = "amlogic,axg-tdm-iface", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, axg_tdm_iface_of_match);
+
+static int axg_tdm_iface_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct snd_soc_dai_driver *dai_drv;
+ struct axg_tdm_iface *iface;
+ int ret, i;
+
+ iface = devm_kzalloc(dev, sizeof(*iface), GFP_KERNEL);
+ if (!iface)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, iface);
+
+ /*
+ * Duplicate dai driver: depending on the slot masks configuration
+ * We'll change the number of channel provided by DAI stream, so dpcm
+ * channel merge can be done properly
+ */
+ dai_drv = devm_kcalloc(dev, ARRAY_SIZE(axg_tdm_iface_dai_drv),
+ sizeof(*dai_drv), GFP_KERNEL);
+ if (!dai_drv)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(axg_tdm_iface_dai_drv); i++)
+ memcpy(&dai_drv[i], &axg_tdm_iface_dai_drv[i],
+ sizeof(*dai_drv));
+
+ /* Bit clock provided on the pad */
+ iface->sclk = devm_clk_get(dev, "sclk");
+ if (IS_ERR(iface->sclk)) {
+ ret = PTR_ERR(iface->sclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get sclk: %d\n", ret);
+ return ret;
+ }
+
+ /* Sample clock provided on the pad */
+ iface->lrclk = devm_clk_get(dev, "lrclk");
+ if (IS_ERR(iface->lrclk)) {
+ ret = PTR_ERR(iface->lrclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get lrclk: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * mclk maybe be missing when the cpu dai is in slave mode and
+ * the codec does not require it to provide a master clock.
+ * At this point, ignore the error if mclk is missing. We'll
+ * throw an error if the cpu dai is master and mclk is missing
+ */
+ iface->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(iface->mclk)) {
+ ret = PTR_ERR(iface->mclk);
+ if (ret == -ENOENT) {
+ iface->mclk = NULL;
+ } else {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get mclk: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return devm_snd_soc_register_component(dev,
+ &axg_tdm_iface_component_drv, dai_drv,
+ ARRAY_SIZE(axg_tdm_iface_dai_drv));
+}
+
+static struct platform_driver axg_tdm_iface_pdrv = {
+ .probe = axg_tdm_iface_probe,
+ .driver = {
+ .name = "axg-tdm-iface",
+ .of_match_table = axg_tdm_iface_of_match,
+ },
+};
+module_platform_driver(axg_tdm_iface_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG TDM interface driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-tdm.h b/sound/soc/meson/axg-tdm.h
new file mode 100644
index 000000000000..e578b6f40a07
--- /dev/null
+++ b/sound/soc/meson/axg-tdm.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ *
+ * Copyright (c) 2018 Baylibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _MESON_AXG_TDM_H
+#define _MESON_AXG_TDM_H
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#define AXG_TDM_NUM_LANES 4
+#define AXG_TDM_CHANNEL_MAX 128
+#define AXG_TDM_RATES (SNDRV_PCM_RATE_5512 | \
+ SNDRV_PCM_RATE_8000_192000)
+#define AXG_TDM_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+struct axg_tdm_iface {
+ struct clk *sclk;
+ struct clk *lrclk;
+ struct clk *mclk;
+ unsigned long mclk_rate;
+
+ /* format is common to all the DAIs of the iface */
+ unsigned int fmt;
+ unsigned int slots;
+ unsigned int slot_width;
+
+ /* For component wide symmetry */
+ int rate;
+};
+
+static inline bool axg_tdm_lrclk_invert(unsigned int fmt)
+{
+ return (fmt & SND_SOC_DAIFMT_I2S) ^
+ !!(fmt & (SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_NB_IF));
+}
+
+static inline bool axg_tdm_sclk_invert(unsigned int fmt)
+{
+ return fmt & (SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_IB_NF);
+}
+
+struct axg_tdm_stream {
+ struct axg_tdm_iface *iface;
+ struct list_head formatter_list;
+ struct mutex lock;
+ unsigned int channels;
+ unsigned int width;
+ unsigned int physical_width;
+ u32 *mask;
+ bool ready;
+};
+
+struct axg_tdm_stream *axg_tdm_stream_alloc(struct axg_tdm_iface *iface);
+void axg_tdm_stream_free(struct axg_tdm_stream *ts);
+int axg_tdm_stream_start(struct axg_tdm_stream *ts);
+void axg_tdm_stream_stop(struct axg_tdm_stream *ts);
+
+static inline int axg_tdm_stream_reset(struct axg_tdm_stream *ts)
+{
+ axg_tdm_stream_stop(ts);
+ return axg_tdm_stream_start(ts);
+}
+
+int axg_tdm_set_tdm_slots(struct snd_soc_dai *dai, u32 *tx_mask,
+ u32 *rx_mask, unsigned int slots,
+ unsigned int slot_width);
+
+#endif /* _MESON_AXG_TDM_H */
diff --git a/sound/soc/meson/axg-tdmin.c b/sound/soc/meson/axg-tdmin.c
new file mode 100644
index 000000000000..bbac44c81688
--- /dev/null
+++ b/sound/soc/meson/axg-tdmin.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "axg-tdm-formatter.h"
+
+#define TDMIN_CTRL 0x00
+#define TDMIN_CTRL_ENABLE BIT(31)
+#define TDMIN_CTRL_I2S_MODE BIT(30)
+#define TDMIN_CTRL_RST_OUT BIT(29)
+#define TDMIN_CTRL_RST_IN BIT(28)
+#define TDMIN_CTRL_WS_INV BIT(25)
+#define TDMIN_CTRL_SEL_SHIFT 20
+#define TDMIN_CTRL_IN_BIT_SKEW_MASK GENMASK(18, 16)
+#define TDMIN_CTRL_IN_BIT_SKEW(x) ((x) << 16)
+#define TDMIN_CTRL_LSB_FIRST BIT(5)
+#define TDMIN_CTRL_BITNUM_MASK GENMASK(4, 0)
+#define TDMIN_CTRL_BITNUM(x) ((x) << 0)
+#define TDMIN_SWAP 0x04
+#define TDMIN_MASK0 0x08
+#define TDMIN_MASK1 0x0c
+#define TDMIN_MASK2 0x10
+#define TDMIN_MASK3 0x14
+#define TDMIN_STAT 0x18
+#define TDMIN_MUTE_VAL 0x1c
+#define TDMIN_MUTE0 0x20
+#define TDMIN_MUTE1 0x24
+#define TDMIN_MUTE2 0x28
+#define TDMIN_MUTE3 0x2c
+
+static const struct regmap_config axg_tdmin_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = TDMIN_MUTE3,
+};
+
+static const char * const axg_tdmin_sel_texts[] = {
+ "IN 0", "IN 1", "IN 2", "IN 3", "IN 4", "IN 5",
+};
+
+/* Change to special mux control to reset dapm */
+static SOC_ENUM_SINGLE_DECL(axg_tdmin_sel_enum, TDMIN_CTRL,
+ TDMIN_CTRL_SEL_SHIFT, axg_tdmin_sel_texts);
+
+static const struct snd_kcontrol_new axg_tdmin_in_mux =
+ SOC_DAPM_ENUM("Input Source", axg_tdmin_sel_enum);
+
+static struct snd_soc_dai *
+axg_tdmin_get_be(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_path *p = NULL;
+ struct snd_soc_dai *be;
+
+ snd_soc_dapm_widget_for_each_source_path(w, p) {
+ if (!p->connect)
+ continue;
+
+ if (p->source->id == snd_soc_dapm_dai_out)
+ return (struct snd_soc_dai *)p->source->priv;
+
+ be = axg_tdmin_get_be(p->source);
+ if (be)
+ return be;
+ }
+
+ return NULL;
+}
+
+static struct axg_tdm_stream *
+axg_tdmin_get_tdm_stream(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dai *be = axg_tdmin_get_be(w);
+
+ if (!be)
+ return NULL;
+
+ return be->capture_dma_data;
+}
+
+static void axg_tdmin_enable(struct regmap *map)
+{
+ /* Apply both reset */
+ regmap_update_bits(map, TDMIN_CTRL,
+ TDMIN_CTRL_RST_OUT | TDMIN_CTRL_RST_IN, 0);
+
+ /* Clear out reset before in reset */
+ regmap_update_bits(map, TDMIN_CTRL,
+ TDMIN_CTRL_RST_OUT, TDMIN_CTRL_RST_OUT);
+ regmap_update_bits(map, TDMIN_CTRL,
+ TDMIN_CTRL_RST_IN, TDMIN_CTRL_RST_IN);
+
+ /* Actually enable tdmin */
+ regmap_update_bits(map, TDMIN_CTRL,
+ TDMIN_CTRL_ENABLE, TDMIN_CTRL_ENABLE);
+}
+
+static void axg_tdmin_disable(struct regmap *map)
+{
+ regmap_update_bits(map, TDMIN_CTRL, TDMIN_CTRL_ENABLE, 0);
+}
+
+static int axg_tdmin_prepare(struct regmap *map, struct axg_tdm_stream *ts)
+{
+ unsigned int val = 0;
+
+ /* Set stream skew */
+ switch (ts->iface->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_DSP_A:
+ val |= TDMIN_CTRL_IN_BIT_SKEW(3);
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ case SND_SOC_DAIFMT_DSP_B:
+ val = TDMIN_CTRL_IN_BIT_SKEW(2);
+ break;
+
+ default:
+ pr_err("Unsupported format: %u\n",
+ ts->iface->fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ return -EINVAL;
+ }
+
+ /* Set stream format mode */
+ switch (ts->iface->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ val |= TDMIN_CTRL_I2S_MODE;
+ break;
+ }
+
+ /* If the sample clock is inverted, invert it back for the formatter */
+ if (axg_tdm_lrclk_invert(ts->iface->fmt))
+ val |= TDMIN_CTRL_WS_INV;
+
+ /* Set the slot width */
+ val |= TDMIN_CTRL_BITNUM(ts->iface->slot_width - 1);
+
+ /*
+ * The following also reset LSB_FIRST which result in the formatter
+ * placing the first bit received at bit 31
+ */
+ regmap_update_bits(map, TDMIN_CTRL,
+ (TDMIN_CTRL_IN_BIT_SKEW_MASK | TDMIN_CTRL_WS_INV |
+ TDMIN_CTRL_I2S_MODE | TDMIN_CTRL_LSB_FIRST |
+ TDMIN_CTRL_BITNUM_MASK), val);
+
+ /* Set static swap mask configuration */
+ regmap_write(map, TDMIN_SWAP, 0x76543210);
+
+ return axg_tdm_formatter_set_channel_masks(map, ts, TDMIN_MASK0);
+}
+
+static const struct snd_soc_dapm_widget axg_tdmin_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("IN 0", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 3", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 4", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 5", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("SRC SEL", SND_SOC_NOPM, 0, 0, &axg_tdmin_in_mux),
+ SND_SOC_DAPM_PGA_E("DEC", SND_SOC_NOPM, 0, 0, NULL, 0,
+ axg_tdm_formatter_event,
+ (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD)),
+ SND_SOC_DAPM_AIF_OUT("OUT", NULL, 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route axg_tdmin_dapm_routes[] = {
+ { "SRC SEL", "IN 0", "IN 0" },
+ { "SRC SEL", "IN 1", "IN 1" },
+ { "SRC SEL", "IN 2", "IN 2" },
+ { "SRC SEL", "IN 3", "IN 3" },
+ { "SRC SEL", "IN 4", "IN 4" },
+ { "SRC SEL", "IN 5", "IN 5" },
+ { "DEC", NULL, "SRC SEL" },
+ { "OUT", NULL, "DEC" },
+};
+
+static const struct snd_soc_component_driver axg_tdmin_component_drv = {
+ .dapm_widgets = axg_tdmin_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(axg_tdmin_dapm_widgets),
+ .dapm_routes = axg_tdmin_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(axg_tdmin_dapm_routes),
+};
+
+static const struct axg_tdm_formatter_ops axg_tdmin_ops = {
+ .get_stream = axg_tdmin_get_tdm_stream,
+ .prepare = axg_tdmin_prepare,
+ .enable = axg_tdmin_enable,
+ .disable = axg_tdmin_disable,
+};
+
+static const struct axg_tdm_formatter_driver axg_tdmin_drv = {
+ .component_drv = &axg_tdmin_component_drv,
+ .regmap_cfg = &axg_tdmin_regmap_cfg,
+ .ops = &axg_tdmin_ops,
+ .invert_sclk = false,
+};
+
+static const struct of_device_id axg_tdmin_of_match[] = {
+ {
+ .compatible = "amlogic,axg-tdmin",
+ .data = &axg_tdmin_drv,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, axg_tdmin_of_match);
+
+static struct platform_driver axg_tdmin_pdrv = {
+ .probe = axg_tdm_formatter_probe,
+ .driver = {
+ .name = "axg-tdmin",
+ .of_match_table = axg_tdmin_of_match,
+ },
+};
+module_platform_driver(axg_tdmin_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG TDM input formatter driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-tdmout.c b/sound/soc/meson/axg-tdmout.c
new file mode 100644
index 000000000000..f73368ee1088
--- /dev/null
+++ b/sound/soc/meson/axg-tdmout.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "axg-tdm-formatter.h"
+
+#define TDMOUT_CTRL0 0x00
+#define TDMOUT_CTRL0_BITNUM_MASK GENMASK(4, 0)
+#define TDMOUT_CTRL0_BITNUM(x) ((x) << 0)
+#define TDMOUT_CTRL0_SLOTNUM_MASK GENMASK(9, 5)
+#define TDMOUT_CTRL0_SLOTNUM(x) ((x) << 5)
+#define TDMOUT_CTRL0_INIT_BITNUM_MASK GENMASK(19, 15)
+#define TDMOUT_CTRL0_INIT_BITNUM(x) ((x) << 15)
+#define TDMOUT_CTRL0_ENABLE BIT(31)
+#define TDMOUT_CTRL0_RST_OUT BIT(29)
+#define TDMOUT_CTRL0_RST_IN BIT(28)
+#define TDMOUT_CTRL1 0x04
+#define TDMOUT_CTRL1_TYPE_MASK GENMASK(6, 4)
+#define TDMOUT_CTRL1_TYPE(x) ((x) << 4)
+#define TDMOUT_CTRL1_MSB_POS_MASK GENMASK(12, 8)
+#define TDMOUT_CTRL1_MSB_POS(x) ((x) << 8)
+#define TDMOUT_CTRL1_SEL_SHIFT 24
+#define TDMOUT_CTRL1_GAIN_EN 26
+#define TDMOUT_CTRL1_WS_INV BIT(28)
+#define TDMOUT_SWAP 0x08
+#define TDMOUT_MASK0 0x0c
+#define TDMOUT_MASK1 0x10
+#define TDMOUT_MASK2 0x14
+#define TDMOUT_MASK3 0x18
+#define TDMOUT_STAT 0x1c
+#define TDMOUT_GAIN0 0x20
+#define TDMOUT_GAIN1 0x24
+#define TDMOUT_MUTE_VAL 0x28
+#define TDMOUT_MUTE0 0x2c
+#define TDMOUT_MUTE1 0x30
+#define TDMOUT_MUTE2 0x34
+#define TDMOUT_MUTE3 0x38
+#define TDMOUT_MASK_VAL 0x3c
+
+static const struct regmap_config axg_tdmout_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = TDMOUT_MASK_VAL,
+};
+
+static const struct snd_kcontrol_new axg_tdmout_controls[] = {
+ SOC_DOUBLE("Lane 0 Volume", TDMOUT_GAIN0, 0, 8, 255, 0),
+ SOC_DOUBLE("Lane 1 Volume", TDMOUT_GAIN0, 16, 24, 255, 0),
+ SOC_DOUBLE("Lane 2 Volume", TDMOUT_GAIN1, 0, 8, 255, 0),
+ SOC_DOUBLE("Lane 3 Volume", TDMOUT_GAIN1, 16, 24, 255, 0),
+ SOC_SINGLE("Gain Enable Switch", TDMOUT_CTRL1,
+ TDMOUT_CTRL1_GAIN_EN, 1, 0),
+};
+
+static const char * const tdmout_sel_texts[] = {
+ "IN 0", "IN 1", "IN 2",
+};
+
+static SOC_ENUM_SINGLE_DECL(axg_tdmout_sel_enum, TDMOUT_CTRL1,
+ TDMOUT_CTRL1_SEL_SHIFT, tdmout_sel_texts);
+
+static const struct snd_kcontrol_new axg_tdmout_in_mux =
+ SOC_DAPM_ENUM("Input Source", axg_tdmout_sel_enum);
+
+static struct snd_soc_dai *
+axg_tdmout_get_be(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_path *p = NULL;
+ struct snd_soc_dai *be;
+
+ snd_soc_dapm_widget_for_each_sink_path(w, p) {
+ if (!p->connect)
+ continue;
+
+ if (p->sink->id == snd_soc_dapm_dai_in)
+ return (struct snd_soc_dai *)p->sink->priv;
+
+ be = axg_tdmout_get_be(p->sink);
+ if (be)
+ return be;
+ }
+
+ return NULL;
+}
+
+static struct axg_tdm_stream *
+axg_tdmout_get_tdm_stream(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dai *be = axg_tdmout_get_be(w);
+
+ if (!be)
+ return NULL;
+
+ return be->playback_dma_data;
+}
+
+static void axg_tdmout_enable(struct regmap *map)
+{
+ /* Apply both reset */
+ regmap_update_bits(map, TDMOUT_CTRL0,
+ TDMOUT_CTRL0_RST_OUT | TDMOUT_CTRL0_RST_IN, 0);
+
+ /* Clear out reset before in reset */
+ regmap_update_bits(map, TDMOUT_CTRL0,
+ TDMOUT_CTRL0_RST_OUT, TDMOUT_CTRL0_RST_OUT);
+ regmap_update_bits(map, TDMOUT_CTRL0,
+ TDMOUT_CTRL0_RST_IN, TDMOUT_CTRL0_RST_IN);
+
+ /* Actually enable tdmout */
+ regmap_update_bits(map, TDMOUT_CTRL0,
+ TDMOUT_CTRL0_ENABLE, TDMOUT_CTRL0_ENABLE);
+}
+
+static void axg_tdmout_disable(struct regmap *map)
+{
+ regmap_update_bits(map, TDMOUT_CTRL0, TDMOUT_CTRL0_ENABLE, 0);
+}
+
+static int axg_tdmout_prepare(struct regmap *map, struct axg_tdm_stream *ts)
+{
+ unsigned int val = 0;
+
+ /* Set the stream skew */
+ switch (ts->iface->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_DSP_A:
+ val |= TDMOUT_CTRL0_INIT_BITNUM(1);
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ case SND_SOC_DAIFMT_DSP_B:
+ val |= TDMOUT_CTRL0_INIT_BITNUM(2);
+ break;
+
+ default:
+ pr_err("Unsupported format: %u\n",
+ ts->iface->fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ return -EINVAL;
+ }
+
+ /* Set the slot width */
+ val |= TDMOUT_CTRL0_BITNUM(ts->iface->slot_width - 1);
+
+ /* Set the slot number */
+ val |= TDMOUT_CTRL0_SLOTNUM(ts->iface->slots - 1);
+
+ regmap_update_bits(map, TDMOUT_CTRL0,
+ TDMOUT_CTRL0_INIT_BITNUM_MASK |
+ TDMOUT_CTRL0_BITNUM_MASK |
+ TDMOUT_CTRL0_SLOTNUM_MASK, val);
+
+ /* Set the sample width */
+ val = TDMOUT_CTRL1_MSB_POS(ts->width - 1);
+
+ /* FIFO data are arranged in chunks of 64bits */
+ switch (ts->physical_width) {
+ case 8:
+ /* 8 samples of 8 bits */
+ val |= TDMOUT_CTRL1_TYPE(0);
+ break;
+ case 16:
+ /* 4 samples of 16 bits - right justified */
+ val |= TDMOUT_CTRL1_TYPE(2);
+ break;
+ case 32:
+ /* 2 samples of 32 bits - right justified */
+ val |= TDMOUT_CTRL1_TYPE(4);
+ break;
+ default:
+ pr_err("Unsupported physical width: %u\n",
+ ts->physical_width);
+ return -EINVAL;
+ }
+
+ /* If the sample clock is inverted, invert it back for the formatter */
+ if (axg_tdm_lrclk_invert(ts->iface->fmt))
+ val |= TDMOUT_CTRL1_WS_INV;
+
+ regmap_update_bits(map, TDMOUT_CTRL1,
+ (TDMOUT_CTRL1_TYPE_MASK | TDMOUT_CTRL1_MSB_POS_MASK |
+ TDMOUT_CTRL1_WS_INV), val);
+
+ /* Set static swap mask configuration */
+ regmap_write(map, TDMOUT_SWAP, 0x76543210);
+
+ return axg_tdm_formatter_set_channel_masks(map, ts, TDMOUT_MASK0);
+}
+
+static const struct snd_soc_dapm_widget axg_tdmout_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("IN 0", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("SRC SEL", SND_SOC_NOPM, 0, 0, &axg_tdmout_in_mux),
+ SND_SOC_DAPM_PGA_E("ENC", SND_SOC_NOPM, 0, 0, NULL, 0,
+ axg_tdm_formatter_event,
+ (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD)),
+ SND_SOC_DAPM_AIF_OUT("OUT", NULL, 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route axg_tdmout_dapm_routes[] = {
+ { "SRC SEL", "IN 0", "IN 0" },
+ { "SRC SEL", "IN 1", "IN 1" },
+ { "SRC SEL", "IN 2", "IN 2" },
+ { "ENC", NULL, "SRC SEL" },
+ { "OUT", NULL, "ENC" },
+};
+
+static const struct snd_soc_component_driver axg_tdmout_component_drv = {
+ .controls = axg_tdmout_controls,
+ .num_controls = ARRAY_SIZE(axg_tdmout_controls),
+ .dapm_widgets = axg_tdmout_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(axg_tdmout_dapm_widgets),
+ .dapm_routes = axg_tdmout_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(axg_tdmout_dapm_routes),
+};
+
+static const struct axg_tdm_formatter_ops axg_tdmout_ops = {
+ .get_stream = axg_tdmout_get_tdm_stream,
+ .prepare = axg_tdmout_prepare,
+ .enable = axg_tdmout_enable,
+ .disable = axg_tdmout_disable,
+};
+
+static const struct axg_tdm_formatter_driver axg_tdmout_drv = {
+ .component_drv = &axg_tdmout_component_drv,
+ .regmap_cfg = &axg_tdmout_regmap_cfg,
+ .ops = &axg_tdmout_ops,
+ .invert_sclk = true,
+};
+
+static const struct of_device_id axg_tdmout_of_match[] = {
+ {
+ .compatible = "amlogic,axg-tdmout",
+ .data = &axg_tdmout_drv,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, axg_tdmout_of_match);
+
+static struct platform_driver axg_tdmout_pdrv = {
+ .probe = axg_tdm_formatter_probe,
+ .driver = {
+ .name = "axg-tdmout",
+ .of_match_table = axg_tdmout_of_match,
+ },
+};
+module_platform_driver(axg_tdmout_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG TDM output formatter driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
new file mode 100644
index 000000000000..c2c9bb312586
--- /dev/null
+++ b/sound/soc/meson/axg-toddr.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+/* This driver implements the frontend capture DAI of AXG based SoCs */
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "axg-fifo.h"
+
+#define CTRL0_TODDR_SEL_RESAMPLE BIT(30)
+#define CTRL0_TODDR_EXT_SIGNED BIT(29)
+#define CTRL0_TODDR_PP_MODE BIT(28)
+#define CTRL0_TODDR_TYPE_MASK GENMASK(15, 13)
+#define CTRL0_TODDR_TYPE(x) ((x) << 13)
+#define CTRL0_TODDR_MSB_POS_MASK GENMASK(12, 8)
+#define CTRL0_TODDR_MSB_POS(x) ((x) << 8)
+#define CTRL0_TODDR_LSB_POS_MASK GENMASK(7, 3)
+#define CTRL0_TODDR_LSB_POS(x) ((x) << 3)
+
+static int axg_toddr_pcm_new(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai)
+{
+ return axg_fifo_pcm_new(rtd, SNDRV_PCM_STREAM_CAPTURE);
+}
+
+static int axg_toddr_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai);
+ unsigned int type, width, msb = 31;
+
+ /*
+ * NOTE:
+ * Almost all backend will place the MSB at bit 31, except SPDIF Input
+ * which will put it at index 28. When adding support for the SPDIF
+ * Input, we'll need to find which type of backend we are connected to.
+ */
+
+ switch (params_physical_width(params)) {
+ case 8:
+ type = 0; /* 8 samples of 8 bits */
+ break;
+ case 16:
+ type = 2; /* 4 samples of 16 bits - right justified */
+ break;
+ case 32:
+ type = 4; /* 2 samples of 32 bits - right justified */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ width = params_width(params);
+
+ regmap_update_bits(fifo->map, FIFO_CTRL0,
+ CTRL0_TODDR_TYPE_MASK |
+ CTRL0_TODDR_MSB_POS_MASK |
+ CTRL0_TODDR_LSB_POS_MASK,
+ CTRL0_TODDR_TYPE(type) |
+ CTRL0_TODDR_MSB_POS(msb) |
+ CTRL0_TODDR_LSB_POS(msb - (width - 1)));
+
+ return 0;
+}
+
+static int axg_toddr_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai);
+ unsigned int fifo_threshold;
+ int ret;
+
+ /* Enable pclk to access registers and clock the fifo ip */
+ ret = clk_prepare_enable(fifo->pclk);
+ if (ret)
+ return ret;
+
+ /* Select orginal data - resampling not supported ATM */
+ regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_SEL_RESAMPLE, 0);
+
+ /* Only signed format are supported ATM */
+ regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_EXT_SIGNED,
+ CTRL0_TODDR_EXT_SIGNED);
+
+ /* Apply single buffer mode to the interface */
+ regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_PP_MODE, 0);
+
+ /* TODDR does not have a configurable fifo depth */
+ fifo_threshold = AXG_FIFO_MIN_CNT - 1;
+ regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_THRESHOLD_MASK,
+ CTRL1_THRESHOLD(fifo_threshold));
+
+ return 0;
+}
+
+static void axg_toddr_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable_unprepare(fifo->pclk);
+}
+
+static const struct snd_soc_dai_ops axg_toddr_ops = {
+ .hw_params = axg_toddr_dai_hw_params,
+ .startup = axg_toddr_dai_startup,
+ .shutdown = axg_toddr_dai_shutdown,
+};
+
+static struct snd_soc_dai_driver axg_toddr_dai_drv = {
+ .name = "TODDR",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = AXG_FIFO_CH_MAX,
+ .rates = AXG_FIFO_RATES,
+ .formats = AXG_FIFO_FORMATS,
+ },
+ .ops = &axg_toddr_ops,
+ .pcm_new = axg_toddr_pcm_new,
+};
+
+static const char * const axg_toddr_sel_texts[] = {
+ "IN 0", "IN 1", "IN 2", "IN 3", "IN 4", "IN 6"
+};
+
+static const unsigned int axg_toddr_sel_values[] = {
+ 0, 1, 2, 3, 4, 6
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(axg_toddr_sel_enum, FIFO_CTRL0,
+ CTRL0_SEL_SHIFT, CTRL0_SEL_MASK,
+ axg_toddr_sel_texts, axg_toddr_sel_values);
+
+static const struct snd_kcontrol_new axg_toddr_in_mux =
+ SOC_DAPM_ENUM("Input Source", axg_toddr_sel_enum);
+
+static const struct snd_soc_dapm_widget axg_toddr_dapm_widgets[] = {
+ SND_SOC_DAPM_MUX("SRC SEL", SND_SOC_NOPM, 0, 0, &axg_toddr_in_mux),
+ SND_SOC_DAPM_AIF_IN("IN 0", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 3", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 4", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("IN 6", NULL, 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route axg_toddr_dapm_routes[] = {
+ { "Capture", NULL, "SRC SEL" },
+ { "SRC SEL", "IN 0", "IN 0" },
+ { "SRC SEL", "IN 1", "IN 1" },
+ { "SRC SEL", "IN 2", "IN 2" },
+ { "SRC SEL", "IN 3", "IN 3" },
+ { "SRC SEL", "IN 4", "IN 4" },
+ { "SRC SEL", "IN 6", "IN 6" },
+};
+
+static const struct snd_soc_component_driver axg_toddr_component_drv = {
+ .dapm_widgets = axg_toddr_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(axg_toddr_dapm_widgets),
+ .dapm_routes = axg_toddr_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(axg_toddr_dapm_routes),
+ .ops = &axg_fifo_pcm_ops
+};
+
+static const struct axg_fifo_match_data axg_toddr_match_data = {
+ .component_drv = &axg_toddr_component_drv,
+ .dai_drv = &axg_toddr_dai_drv
+};
+
+static const struct of_device_id axg_toddr_of_match[] = {
+ {
+ .compatible = "amlogic,axg-toddr",
+ .data = &axg_toddr_match_data,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, axg_toddr_of_match);
+
+static struct platform_driver axg_toddr_pdrv = {
+ .probe = axg_fifo_probe,
+ .driver = {
+ .name = "axg-toddr",
+ .of_match_table = axg_toddr_of_match,
+ },
+};
+module_platform_driver(axg_toddr_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG capture fifo driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index 15ccbf479c96..d5ae9eb8c756 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -40,7 +40,7 @@ struct abe_twl6040 {
int mclk_freq; /* MCLK frequency speed for twl6040 */
};
-struct platform_device *dmic_codec_dev;
+static struct platform_device *dmic_codec_dev;
static int omap_abe_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 51dd7c65096b..fe966272bd0c 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -213,8 +213,10 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
switch (channels) {
case 6:
dmic->ch_enabled |= OMAP_DMIC_UP3_ENABLE;
+ /* fall through */
case 4:
dmic->ch_enabled |= OMAP_DMIC_UP2_ENABLE;
+ /* fall through */
case 2:
dmic->ch_enabled |= OMAP_DMIC_UP1_ENABLE;
break;
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 0e97360f9890..4c1be36c2207 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -310,15 +310,19 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
/* up to 3 channels for capture */
return -EINVAL;
link_mask |= 1 << 4;
+ /* fall through */
case 4:
if (stream == SNDRV_PCM_STREAM_CAPTURE)
/* up to 3 channels for capture */
return -EINVAL;
link_mask |= 1 << 3;
+ /* fall through */
case 3:
link_mask |= 1 << 2;
+ /* fall through */
case 2:
link_mask |= 1 << 1;
+ /* fall through */
case 1:
link_mask |= 1 << 0;
break;
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 960744e46edc..776e148b0aa2 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -24,15 +24,19 @@ config SND_PXA2XX_AC97
config SND_PXA2XX_SOC_AC97
tristate
select AC97_BUS
+ select SND_PXA2XX_LIB
select SND_PXA2XX_LIB_AC97
select SND_SOC_AC97_BUS
config SND_PXA2XX_SOC_I2S
+ select SND_PXA2XX_LIB
tristate
config SND_PXA_SOC_SSP
- tristate
+ tristate "Soc Audio via PXA2xx/PXA3xx SSP ports"
+ depends on PLAT_PXA
select PXA_SSP
+ select SND_PXA2XX_LIB
config SND_MMP_SOC_SSPA
tristate
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 2fc012b06c43..935a248e5bf6 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -90,95 +90,9 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- unsigned int acps, acds, width;
- unsigned int div4 = PXA_SSP_CLK_SCDB_4;
+ unsigned int width;
int ret = 0;
- width = snd_pcm_format_physical_width(params_format(params));
-
- /*
- * rate = SSPSCLK / (2 * width(16 or 32))
- * SSPSCLK = (ACPS / ACDS) / SSPSCLKDIV(div4 or div1)
- */
- switch (params_rate(params)) {
- case 8000:
- /* off by a factor of 2: bug in the PXA27x audio clock? */
- acps = 32842000;
- switch (width) {
- case 16:
- /* 513156 Hz ~= _2_ * 8000 Hz * 32 (+0.23%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_16;
- break;
- default: /* 32 */
- /* 1026312 Hz ~= _2_ * 8000 Hz * 64 (+0.23%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_8;
- }
- break;
- case 11025:
- acps = 5622000;
- switch (width) {
- case 16:
- /* 351375 Hz ~= 11025 Hz * 32 (-0.41%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_4;
- break;
- default: /* 32 */
- /* 702750 Hz ~= 11025 Hz * 64 (-0.41%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_2;
- }
- break;
- case 22050:
- acps = 5622000;
- switch (width) {
- case 16:
- /* 702750 Hz ~= 22050 Hz * 32 (-0.41%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_2;
- break;
- default: /* 32 */
- /* 1405500 Hz ~= 22050 Hz * 64 (-0.41%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_1;
- }
- break;
- case 44100:
- acps = 5622000;
- switch (width) {
- case 16:
- /* 1405500 Hz ~= 44100 Hz * 32 (-0.41%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_2;
- break;
- default: /* 32 */
- /* 2811000 Hz ~= 44100 Hz * 64 (-0.41%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_1;
- }
- break;
- case 48000:
- acps = 12235000;
- switch (width) {
- case 16:
- /* 1529375 Hz ~= 48000 Hz * 32 (-0.44%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_2;
- break;
- default: /* 32 */
- /* 3058750 Hz ~= 48000 Hz * 64 (-0.44%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_1;
- }
- break;
- case 96000:
- default:
- acps = 12235000;
- switch (width) {
- case 16:
- /* 3058750 Hz ~= 96000 Hz * 32 (-0.44%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_1;
- break;
- default: /* 32 */
- /* 6117500 Hz ~= 96000 Hz * 64 (-0.44%) */
- acds = PXA_SSP_CLK_AUDIO_DIV_2;
- div4 = PXA_SSP_CLK_SCDB_1;
- break;
- }
- break;
- }
-
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_MSB |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
@@ -191,6 +105,7 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
+ width = snd_pcm_format_physical_width(params_format(params));
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 1, 0, 1, width);
if (ret < 0)
return ret;
@@ -201,23 +116,6 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- /* set the SSP audio system clock ACDS divider */
- ret = snd_soc_dai_set_clkdiv(cpu_dai,
- PXA_SSP_AUDIO_DIV_ACDS, acds);
- if (ret < 0)
- return ret;
-
- /* set the SSP audio system clock SCDB divider4 */
- ret = snd_soc_dai_set_clkdiv(cpu_dai,
- PXA_SSP_AUDIO_DIV_SCDB, div4);
- if (ret < 0)
- return ret;
-
- /* set SSP audio pll clock */
- ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, acps);
- if (ret < 0)
- return ret;
-
return 0;
}
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 6fc986080130..69033e1a84e6 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -34,7 +34,6 @@
#include <sound/pxa2xx-lib.h>
#include <sound/dmaengine_pcm.h>
-#include "../../arm/pxa2xx-pcm.h"
#include "pxa-ssp.h"
/*
@@ -42,6 +41,8 @@
*/
struct ssp_priv {
struct ssp_device *ssp;
+ struct clk *extclk;
+ unsigned long ssp_clk;
unsigned int sysclk;
unsigned int dai_fmt;
unsigned int configured_dai_fmt;
@@ -105,9 +106,8 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
dma = kzalloc(sizeof(struct snd_dmaengine_dai_dma_data), GFP_KERNEL);
if (!dma)
return -ENOMEM;
-
- dma->filter_data = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
- &ssp->drcmr_tx : &ssp->drcmr_rx;
+ dma->chan_name = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ "tx" : "rx";
snd_soc_dai_set_dma_data(cpu_dai, substream, dma);
@@ -194,21 +194,6 @@ static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div)
pxa_ssp_write_reg(ssp, SSCR0, sscr0);
}
-/**
- * pxa_ssp_get_clkdiv - get SSP clock divider
- */
-static u32 pxa_ssp_get_scr(struct ssp_device *ssp)
-{
- u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
- u32 div;
-
- if (ssp->type == PXA25x_SSP)
- div = ((sscr0 >> 8) & 0xff) * 2 + 2;
- else
- div = ((sscr0 >> 8) & 0xfff) + 1;
- return div;
-}
-
/*
* Set the SSP ports SYSCLK.
*/
@@ -221,6 +206,21 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0) &
~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS);
+ if (priv->extclk) {
+ int ret;
+
+ /*
+ * For DT based boards, if an extclk is given, use it
+ * here and configure PXA_SSP_CLK_EXT.
+ */
+
+ ret = clk_set_rate(priv->extclk, freq);
+ if (ret < 0)
+ return ret;
+
+ clk_id = PXA_SSP_CLK_EXT;
+ }
+
dev_dbg(&ssp->pdev->dev,
"pxa_ssp_set_dai_sysclk id: %d, clk_id %d, freq %u\n",
cpu_dai->id, clk_id, freq);
@@ -265,66 +265,17 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
}
/*
- * Set the SSP clock dividers.
- */
-static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
- int div_id, int div)
-{
- struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
- struct ssp_device *ssp = priv->ssp;
- int val;
-
- switch (div_id) {
- case PXA_SSP_AUDIO_DIV_ACDS:
- val = (pxa_ssp_read_reg(ssp, SSACD) & ~0x7) | SSACD_ACDS(div);
- pxa_ssp_write_reg(ssp, SSACD, val);
- break;
- case PXA_SSP_AUDIO_DIV_SCDB:
- val = pxa_ssp_read_reg(ssp, SSACD);
- val &= ~SSACD_SCDB;
- if (ssp->type == PXA3xx_SSP)
- val &= ~SSACD_SCDX8;
- switch (div) {
- case PXA_SSP_CLK_SCDB_1:
- val |= SSACD_SCDB;
- break;
- case PXA_SSP_CLK_SCDB_4:
- break;
- case PXA_SSP_CLK_SCDB_8:
- if (ssp->type == PXA3xx_SSP)
- val |= SSACD_SCDX8;
- else
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
- pxa_ssp_write_reg(ssp, SSACD, val);
- break;
- case PXA_SSP_DIV_SCR:
- pxa_ssp_set_scr(ssp, div);
- break;
- default:
- return -ENODEV;
- }
-
- return 0;
-}
-
-/*
* Configure the PLL frequency pxa27x and (afaik - pxa320 only)
*/
-static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
- int source, unsigned int freq_in, unsigned int freq_out)
+static int pxa_ssp_set_pll(struct ssp_priv *priv, unsigned int freq)
{
- struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70;
if (ssp->type == PXA3xx_SSP)
pxa_ssp_write_reg(ssp, SSACDD, 0);
- switch (freq_out) {
+ switch (freq) {
case 5622000:
break;
case 11345000:
@@ -355,7 +306,7 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
u64 tmp = 19968;
tmp *= 1000000;
- do_div(tmp, freq_out);
+ do_div(tmp, freq);
val = tmp;
val = (val << 16) | 64;
@@ -365,7 +316,7 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
dev_dbg(&ssp->pdev->dev,
"Using SSACDD %x to supply %uHz\n",
- val, freq_out);
+ val, freq);
break;
}
@@ -535,6 +486,7 @@ static int pxa_ssp_configure_dai_fmt(struct ssp_priv *priv)
case SND_SOC_DAIFMT_DSP_A:
sspsp |= SSPSP_FSRT;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_B:
sscr0 |= SSCR0_MOD | SSCR0_PSP;
sscr1 |= SSCR1_TRAIL | SSCR1_RWOT;
@@ -570,6 +522,24 @@ static int pxa_ssp_configure_dai_fmt(struct ssp_priv *priv)
return 0;
}
+struct pxa_ssp_clock_mode {
+ int rate;
+ int pll;
+ u8 acds;
+ u8 scdb;
+};
+
+static const struct pxa_ssp_clock_mode pxa_ssp_clock_modes[] = {
+ { .rate = 8000, .pll = 32842000, .acds = SSACD_ACDS_32, .scdb = SSACD_SCDB_4X },
+ { .rate = 11025, .pll = 5622000, .acds = SSACD_ACDS_4, .scdb = SSACD_SCDB_4X },
+ { .rate = 16000, .pll = 32842000, .acds = SSACD_ACDS_16, .scdb = SSACD_SCDB_4X },
+ { .rate = 22050, .pll = 5622000, .acds = SSACD_ACDS_2, .scdb = SSACD_SCDB_4X },
+ { .rate = 44100, .pll = 11345000, .acds = SSACD_ACDS_2, .scdb = SSACD_SCDB_4X },
+ { .rate = 48000, .pll = 12235000, .acds = SSACD_ACDS_2, .scdb = SSACD_SCDB_4X },
+ { .rate = 96000, .pll = 12235000, .acds = SSACD_ACDS_4, .scdb = SSACD_SCDB_1X },
+ {}
+};
+
/*
* Set the SSP audio DMA parameters and sample size.
* Can be called multiple times by oss emulation.
@@ -581,11 +551,12 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
int chn = params_channels(params);
- u32 sscr0;
- u32 sspsp;
+ u32 sscr0, sspsp;
int width = snd_pcm_format_physical_width(params_format(params));
int ttsa = pxa_ssp_read_reg(ssp, SSTSA) & 0xf;
struct snd_dmaengine_dai_dma_data *dma_data;
+ int rate = params_rate(params);
+ int bclk = rate * chn * (width / 8);
int ret;
dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream);
@@ -625,11 +596,57 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
}
pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+ if (sscr0 & SSCR0_ACS) {
+ ret = pxa_ssp_set_pll(priv, bclk);
+
+ /*
+ * If we were able to generate the bclk directly,
+ * all is fine. Otherwise, look up the closest rate
+ * from the table and also set the dividers.
+ */
+
+ if (ret < 0) {
+ const struct pxa_ssp_clock_mode *m;
+ int ssacd, acds;
+
+ for (m = pxa_ssp_clock_modes; m->rate; m++) {
+ if (m->rate == rate)
+ break;
+ }
+
+ if (!m->rate)
+ return -EINVAL;
+
+ acds = m->acds;
+
+ /* The values in the table are for 16 bits */
+ if (width == 32)
+ acds--;
+
+ ret = pxa_ssp_set_pll(priv, bclk);
+ if (ret < 0)
+ return ret;
+
+ ssacd = pxa_ssp_read_reg(ssp, SSACD);
+ ssacd &= ~(SSACD_ACDS(7) | SSACD_SCDB_1X);
+ ssacd |= SSACD_ACDS(m->acds);
+ ssacd |= m->scdb;
+ pxa_ssp_write_reg(ssp, SSACD, ssacd);
+ }
+ } else if (sscr0 & SSCR0_ECS) {
+ /*
+ * For setups with external clocking, the PLL and its diviers
+ * are not active. Instead, the SCR bits in SSCR0 can be used
+ * to divide the clock.
+ */
+ pxa_ssp_set_scr(ssp, bclk / rate);
+ }
+
switch (priv->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
sspsp = pxa_ssp_read_reg(ssp, SSPSP);
- if ((pxa_ssp_get_scr(ssp) == 4) && (width == 16)) {
+ if (((priv->sysclk / bclk) == 64) && (width == 16)) {
/* This is a special case where the bitclk is 64fs
* and we're not dealing with 2*32 bits of audio
* samples.
@@ -773,6 +790,15 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
ret = -ENODEV;
goto err_priv;
}
+
+ priv->extclk = devm_clk_get(dev, "extclk");
+ if (IS_ERR(priv->extclk)) {
+ ret = PTR_ERR(priv->extclk);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ priv->extclk = NULL;
+ }
} else {
priv->ssp = pxa_ssp_request(dai->id + 1, "SoC audio");
if (priv->ssp == NULL) {
@@ -814,8 +840,6 @@ static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
.trigger = pxa_ssp_trigger,
.hw_params = pxa_ssp_hw_params,
.set_sysclk = pxa_ssp_set_dai_sysclk,
- .set_clkdiv = pxa_ssp_set_dai_clkdiv,
- .set_pll = pxa_ssp_set_dai_pll,
.set_fmt = pxa_ssp_set_dai_fmt,
.set_tdm_slot = pxa_ssp_set_dai_tdm_slot,
.set_tristate = pxa_ssp_set_dai_tristate,
@@ -843,6 +867,9 @@ static struct snd_soc_dai_driver pxa_ssp_dai = {
static const struct snd_soc_component_driver pxa_ssp_component = {
.name = "pxa-ssp",
+ .ops = &pxa2xx_pcm_ops,
+ .pcm_new = pxa2xx_soc_pcm_new,
+ .pcm_free = pxa2xx_pcm_free_dma_buffers,
};
#ifdef CONFIG_OF
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 803818aabee9..9f779657bc86 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -68,61 +68,39 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
.reset = pxa2xx_ac97_cold_reset,
};
-static struct pxad_param pxa2xx_ac97_pcm_stereo_in_req = {
- .prio = PXAD_PRIO_LOWEST,
- .drcmr = 11,
-};
-
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
.addr = __PREG(PCDR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .chan_name = "pcm_pcm_stereo_in",
.maxburst = 32,
- .filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
-};
-
-static struct pxad_param pxa2xx_ac97_pcm_stereo_out_req = {
- .prio = PXAD_PRIO_LOWEST,
- .drcmr = 12,
};
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
.addr = __PREG(PCDR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .chan_name = "pcm_pcm_stereo_out",
.maxburst = 32,
- .filter_data = &pxa2xx_ac97_pcm_stereo_out_req,
};
-static struct pxad_param pxa2xx_ac97_pcm_aux_mono_out_req = {
- .prio = PXAD_PRIO_LOWEST,
- .drcmr = 10,
-};
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_out = {
.addr = __PREG(MODR),
.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .chan_name = "pcm_aux_mono_out",
.maxburst = 16,
- .filter_data = &pxa2xx_ac97_pcm_aux_mono_out_req,
};
-static struct pxad_param pxa2xx_ac97_pcm_aux_mono_in_req = {
- .prio = PXAD_PRIO_LOWEST,
- .drcmr = 9,
-};
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_in = {
.addr = __PREG(MODR),
.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .chan_name = "pcm_aux_mono_in",
.maxburst = 16,
- .filter_data = &pxa2xx_ac97_pcm_aux_mono_in_req,
};
-static struct pxad_param pxa2xx_ac97_pcm_aux_mic_mono_req = {
- .prio = PXAD_PRIO_LOWEST,
- .drcmr = 8,
-};
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_mic_mono_in = {
.addr = __PREG(MCDR),
.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .chan_name = "pcm_aux_mic_mono",
.maxburst = 16,
- .filter_data = &pxa2xx_ac97_pcm_aux_mic_mono_req,
};
static int pxa2xx_ac97_hifi_startup(struct snd_pcm_substream *substream,
@@ -236,7 +214,21 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
static const struct snd_soc_component_driver pxa_ac97_component = {
.name = "pxa-ac97",
+ .ops = &pxa2xx_pcm_ops,
+ .pcm_new = pxa2xx_soc_pcm_new,
+ .pcm_free = pxa2xx_pcm_free_dma_buffers,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id pxa2xx_ac97_dt_ids[] = {
+ { .compatible = "marvell,pxa250-ac97", },
+ { .compatible = "marvell,pxa270-ac97", },
+ { .compatible = "marvell,pxa300-ac97", },
+ { }
};
+MODULE_DEVICE_TABLE(of, pxa2xx_ac97_dt_ids);
+
+#endif
static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
{
@@ -296,6 +288,7 @@ static struct platform_driver pxa2xx_ac97_driver = {
#ifdef CONFIG_PM_SLEEP
.pm = &pxa2xx_ac97_pm_ops,
#endif
+ .of_match_table = of_match_ptr(pxa2xx_ac97_dt_ids),
},
};
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 3fb60baf6eab..42820121e5b9 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -82,20 +82,18 @@ static struct pxa_i2s_port pxa_i2s;
static struct clk *clk_i2s;
static int clk_ena = 0;
-static unsigned long pxa2xx_i2s_pcm_stereo_out_req = 3;
static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_out = {
.addr = __PREG(SADR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .chan_name = "tx",
.maxburst = 32,
- .filter_data = &pxa2xx_i2s_pcm_stereo_out_req,
};
-static unsigned long pxa2xx_i2s_pcm_stereo_in_req = 2;
static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_in = {
.addr = __PREG(SADR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .chan_name = "rx",
.maxburst = 32,
- .filter_data = &pxa2xx_i2s_pcm_stereo_in_req,
};
static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream,
@@ -366,6 +364,9 @@ static struct snd_soc_dai_driver pxa_i2s_dai = {
static const struct snd_soc_component_driver pxa_i2s_component = {
.name = "pxa-i2s",
+ .ops = &pxa2xx_pcm_ops,
+ .pcm_new = pxa2xx_soc_pcm_new,
+ .pcm_free = pxa2xx_pcm_free_dma_buffers,
};
static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 8b6a70e94c01..72eaaef1b426 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -20,70 +20,6 @@
#include <sound/pxa2xx-lib.h>
#include <sound/dmaengine_pcm.h>
-#include "../../arm/pxa2xx-pcm.h"
-
-static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_dmaengine_dai_dma_data *dma;
-
- dma = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-
- /* return if this is a bufferless transfer e.g.
- * codec <--> BT codec or GSM modem -- lg FIXME */
- if (!dma)
- return 0;
-
- return __pxa2xx_pcm_hw_params(substream, params);
-}
-
-static int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- __pxa2xx_pcm_hw_free(substream);
-
- return 0;
-}
-
-static const struct snd_pcm_ops pxa2xx_pcm_ops = {
- .open = __pxa2xx_pcm_open,
- .close = __pxa2xx_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = pxa2xx_pcm_hw_params,
- .hw_free = pxa2xx_pcm_hw_free,
- .prepare = __pxa2xx_pcm_prepare,
- .trigger = pxa2xx_pcm_trigger,
- .pointer = pxa2xx_pcm_pointer,
- .mmap = pxa2xx_pcm_mmap,
-};
-
-static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_card *card = rtd->card->snd_card;
- struct snd_pcm *pcm = rtd->pcm;
- int ret;
-
- ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
- ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_PLAYBACK);
- if (ret)
- goto out;
- }
-
- if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
- ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
- SNDRV_PCM_STREAM_CAPTURE);
- if (ret)
- goto out;
- }
- out:
- return ret;
-}
-
static const struct snd_soc_component_driver pxa2xx_soc_platform = {
.ops = &pxa2xx_pcm_ops,
.pcm_new = pxa2xx_soc_pcm_new,
@@ -96,18 +32,9 @@ static int pxa2xx_soc_platform_probe(struct platform_device *pdev)
NULL, 0);
}
-#ifdef CONFIG_OF
-static const struct of_device_id snd_soc_pxa_audio_match[] = {
- { .compatible = "mrvl,pxa-pcm-audio" },
- { }
-};
-MODULE_DEVICE_TABLE(of, snd_soc_pxa_audio_match);
-#endif
-
static struct platform_driver pxa_pcm_driver = {
.driver = {
.name = "pxa-pcm-audio",
- .of_match_table = of_match_ptr(snd_soc_pxa_audio_match),
},
.probe = pxa2xx_soc_platform_probe,
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index ba468e560dd2..230eee450f45 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -83,11 +83,9 @@ static int zylonite_voice_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- unsigned int pll_out = 0;
unsigned int wm9713_div = 0;
int ret = 0;
int rate = params_rate(params);
- int width = snd_pcm_format_physical_width(params_format(params));
/* Only support ratios that we can generate neatly from the AC97
* based master clock - in particular, this excludes 44.1kHz.
@@ -109,17 +107,10 @@ static int zylonite_voice_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- /* Add 1 to the width for the leading clock cycle */
- pll_out = rate * (width + 1) * 8;
-
ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_AUDIO, 0, 1);
if (ret < 0)
return ret;
- ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, pll_out);
- if (ret < 0)
- return ret;
-
if (clk_pout)
ret = snd_soc_dai_set_clkdiv(codec_dai, WM9713_PCMCLK_PLL_DIV,
WM9713_PCMDIV(wm9713_div));
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 87838fa27997..2a4c912d1e48 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -41,6 +41,9 @@ config SND_SOC_APQ8016_SBC
APQ8016 SOC-based systems.
Say Y if you want to use audio devices on MI2S.
+config SND_SOC_QCOM_COMMON
+ tristate
+
config SND_SOC_QDSP6_COMMON
tristate
@@ -86,7 +89,18 @@ config SND_SOC_MSM8996
tristate "SoC Machine driver for MSM8996 and APQ8096 boards"
depends on QCOM_APR
select SND_SOC_QDSP6
+ select SND_SOC_QCOM_COMMON
help
Support for Qualcomm Technologies LPASS audio block in
APQ8096 SoC-based systems.
Say Y if you want to use audio device on this SoCs
+
+config SND_SOC_SDM845
+ tristate "SoC Machine driver for SDM845 boards"
+ depends on QCOM_APR
+ select SND_SOC_QDSP6
+ select SND_SOC_QCOM_COMMON
+ help
+ To add support for audio on Qualcomm Technologies Inc.
+ SDM845 SoC-based systems.
+ Say Y if you want to use audio device on this SoCs.
diff --git a/sound/soc/qcom/Makefile b/sound/soc/qcom/Makefile
index 206945bb9ba1..41b2c7a23a4d 100644
--- a/sound/soc/qcom/Makefile
+++ b/sound/soc/qcom/Makefile
@@ -14,10 +14,14 @@ obj-$(CONFIG_SND_SOC_LPASS_APQ8016) += snd-soc-lpass-apq8016.o
snd-soc-storm-objs := storm.o
snd-soc-apq8016-sbc-objs := apq8016_sbc.o
snd-soc-apq8096-objs := apq8096.o
+snd-soc-sdm845-objs := sdm845.o
+snd-soc-qcom-common-objs := common.o
obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-apq8096.o
+obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o
+obj-$(CONFIG_SND_SOC_QCOM_COMMON) += snd-soc-qcom-common.o
#DSP lib
obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/
diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c
index 561cd429e6f2..1543e85629f8 100644
--- a/sound/soc/qcom/apq8096.c
+++ b/sound/soc/qcom/apq8096.c
@@ -1,14 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018, Linaro Limited
-#include <linux/soc/qcom/apr.h>
#include <linux/module.h>
-#include <linux/component.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm.h>
+#include "common.h"
static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
@@ -24,211 +23,57 @@ static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static int apq8096_sbc_parse_of(struct snd_soc_card *card)
+static void apq8096_add_be_ops(struct snd_soc_card *card)
{
- struct device_node *np;
- struct device_node *codec = NULL;
- struct device_node *platform = NULL;
- struct device_node *cpu = NULL;
- struct device *dev = card->dev;
- struct snd_soc_dai_link *link;
- int ret, num_links;
-
- ret = snd_soc_of_parse_card_name(card, "qcom,model");
- if (ret) {
- dev_err(dev, "Error parsing card name: %d\n", ret);
- return ret;
- }
-
- /* DAPM routes */
- if (of_property_read_bool(dev->of_node, "qcom,audio-routing")) {
- ret = snd_soc_of_parse_audio_routing(card,
- "qcom,audio-routing");
- if (ret)
- return ret;
- }
-
- /* Populate links */
- num_links = of_get_child_count(dev->of_node);
+ struct snd_soc_dai_link *link = card->dai_link;
+ int i, num_links = card->num_links;
- /* Allocate the DAI link array */
- card->dai_link = kcalloc(num_links, sizeof(*link), GFP_KERNEL);
- if (!card->dai_link)
- return -ENOMEM;
-
- card->num_links = num_links;
- link = card->dai_link;
-
- for_each_child_of_node(dev->of_node, np) {
- cpu = of_get_child_by_name(np, "cpu");
- if (!cpu) {
- dev_err(dev, "Can't find cpu DT node\n");
- ret = -EINVAL;
- goto err;
- }
-
- link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
- if (!link->cpu_of_node) {
- dev_err(card->dev, "error getting cpu phandle\n");
- ret = -EINVAL;
- goto err;
- }
-
- ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
- if (ret) {
- dev_err(card->dev, "error getting cpu dai name\n");
- goto err;
- }
-
- platform = of_get_child_by_name(np, "platform");
- codec = of_get_child_by_name(np, "codec");
- if (codec && platform) {
- link->platform_of_node = of_parse_phandle(platform,
- "sound-dai",
- 0);
- if (!link->platform_of_node) {
- dev_err(card->dev, "platform dai not found\n");
- ret = -EINVAL;
- goto err;
- }
-
- ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
- if (ret < 0) {
- dev_err(card->dev, "codec dai not found\n");
- goto err;
- }
- link->no_pcm = 1;
- link->ignore_pmdown_time = 1;
+ for (i = 0; i < num_links; i++) {
+ if (link->no_pcm == 1)
link->be_hw_params_fixup = apq8096_be_hw_params_fixup;
- } else {
- link->platform_of_node = link->cpu_of_node;
- link->codec_dai_name = "snd-soc-dummy-dai";
- link->codec_name = "snd-soc-dummy";
- link->dynamic = 1;
- }
-
- link->ignore_suspend = 1;
- ret = of_property_read_string(np, "link-name", &link->name);
- if (ret) {
- dev_err(card->dev, "error getting codec dai_link name\n");
- goto err;
- }
-
- link->dpcm_playback = 1;
- link->dpcm_capture = 1;
- link->stream_name = link->name;
link++;
}
-
- return 0;
-err:
- of_node_put(cpu);
- of_node_put(codec);
- of_node_put(platform);
- kfree(card->dai_link);
- return ret;
}
-static int apq8096_bind(struct device *dev)
+static int apq8096_platform_probe(struct platform_device *pdev)
{
struct snd_soc_card *card;
+ struct device *dev = &pdev->dev;
int ret;
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
- component_bind_all(dev, card);
card->dev = dev;
- ret = apq8096_sbc_parse_of(card);
+ dev_set_drvdata(dev, card);
+ ret = qcom_snd_parse_of(card);
if (ret) {
dev_err(dev, "Error parsing OF data\n");
goto err;
}
+ apq8096_add_be_ops(card);
ret = snd_soc_register_card(card);
if (ret)
- goto err;
+ goto err_card_register;
return 0;
+err_card_register:
+ kfree(card->dai_link);
err:
- component_unbind_all(dev, card);
kfree(card);
return ret;
}
-static void apq8096_unbind(struct device *dev)
+static int apq8096_platform_remove(struct platform_device *pdev)
{
- struct snd_soc_card *card = dev_get_drvdata(dev);
+ struct snd_soc_card *card = dev_get_drvdata(&pdev->dev);
snd_soc_unregister_card(card);
- component_unbind_all(dev, card);
kfree(card->dai_link);
kfree(card);
-}
-
-static const struct component_master_ops apq8096_ops = {
- .bind = apq8096_bind,
- .unbind = apq8096_unbind,
-};
-
-static int apq8016_compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static void apq8016_release_of(struct device *dev, void *data)
-{
- of_node_put(data);
-}
-
-static int add_audio_components(struct device *dev,
- struct component_match **matchptr)
-{
- struct device_node *np, *platform, *cpu, *node, *dai_node;
-
- node = dev->of_node;
-
- for_each_child_of_node(node, np) {
- cpu = of_get_child_by_name(np, "cpu");
- if (cpu) {
- dai_node = of_parse_phandle(cpu, "sound-dai", 0);
- of_node_get(dai_node);
- component_match_add_release(dev, matchptr,
- apq8016_release_of,
- apq8016_compare_of,
- dai_node);
- }
-
- platform = of_get_child_by_name(np, "platform");
- if (platform) {
- dai_node = of_parse_phandle(platform, "sound-dai", 0);
- component_match_add_release(dev, matchptr,
- apq8016_release_of,
- apq8016_compare_of,
- dai_node);
- }
- }
-
- return 0;
-}
-
-static int apq8096_platform_probe(struct platform_device *pdev)
-{
- struct component_match *match = NULL;
- int ret;
-
- ret = add_audio_components(&pdev->dev, &match);
- if (ret)
- return ret;
-
- return component_master_add_with_match(&pdev->dev, &apq8096_ops, match);
-}
-
-static int apq8096_platform_remove(struct platform_device *pdev)
-{
- component_master_del(&pdev->dev, &apq8096_ops);
return 0;
}
@@ -245,7 +90,6 @@ static struct platform_driver msm_snd_apq8096_driver = {
.remove = apq8096_platform_remove,
.driver = {
.name = "msm-snd-apq8096",
- .owner = THIS_MODULE,
.of_match_table = msm_snd_apq8096_dt_match,
},
};
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
new file mode 100644
index 000000000000..eb1b9da05dd4
--- /dev/null
+++ b/sound/soc/qcom/common.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Linaro Limited.
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/module.h>
+#include "common.h"
+
+int qcom_snd_parse_of(struct snd_soc_card *card)
+{
+ struct device_node *np;
+ struct device_node *codec = NULL;
+ struct device_node *platform = NULL;
+ struct device_node *cpu = NULL;
+ struct device *dev = card->dev;
+ struct snd_soc_dai_link *link;
+ int ret, num_links;
+
+ ret = snd_soc_of_parse_card_name(card, "model");
+ if (ret) {
+ dev_err(dev, "Error parsing card name: %d\n", ret);
+ return ret;
+ }
+
+ /* DAPM routes */
+ if (of_property_read_bool(dev->of_node, "audio-routing")) {
+ ret = snd_soc_of_parse_audio_routing(card,
+ "audio-routing");
+ if (ret)
+ return ret;
+ }
+
+ /* Populate links */
+ num_links = of_get_child_count(dev->of_node);
+
+ /* Allocate the DAI link array */
+ card->dai_link = kcalloc(num_links, sizeof(*link), GFP_KERNEL);
+ if (!card->dai_link)
+ return -ENOMEM;
+
+ card->num_links = num_links;
+ link = card->dai_link;
+ for_each_child_of_node(dev->of_node, np) {
+ cpu = of_get_child_by_name(np, "cpu");
+ if (!cpu) {
+ dev_err(dev, "Can't find cpu DT node\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
+ if (!link->cpu_of_node) {
+ dev_err(card->dev, "error getting cpu phandle\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
+ if (ret) {
+ dev_err(card->dev, "error getting cpu dai name\n");
+ goto err;
+ }
+
+ platform = of_get_child_by_name(np, "platform");
+ codec = of_get_child_by_name(np, "codec");
+ if (codec && platform) {
+ link->platform_of_node = of_parse_phandle(platform,
+ "sound-dai",
+ 0);
+ if (!link->platform_of_node) {
+ dev_err(card->dev, "platform dai not found\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+ if (ret < 0) {
+ dev_err(card->dev, "codec dai not found\n");
+ goto err;
+ }
+ link->no_pcm = 1;
+ link->ignore_pmdown_time = 1;
+ } else {
+ link->platform_of_node = link->cpu_of_node;
+ link->codec_dai_name = "snd-soc-dummy-dai";
+ link->codec_name = "snd-soc-dummy";
+ link->dynamic = 1;
+ }
+
+ link->ignore_suspend = 1;
+ ret = of_property_read_string(np, "link-name", &link->name);
+ if (ret) {
+ dev_err(card->dev, "error getting codec dai_link name\n");
+ goto err;
+ }
+
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 1;
+ link->stream_name = link->name;
+ link++;
+ }
+
+ return 0;
+err:
+ of_node_put(cpu);
+ of_node_put(codec);
+ of_node_put(platform);
+ kfree(card->dai_link);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_snd_parse_of);
+
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/common.h b/sound/soc/qcom/common.h
new file mode 100644
index 000000000000..f05c05b12bd7
--- /dev/null
+++ b/sound/soc/qcom/common.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#ifndef __QCOM_SND_COMMON_H__
+#define __QCOM_SND_COMMON_H__
+
+#include <sound/soc.h>
+
+int qcom_snd_parse_of(struct snd_soc_card *card);
+
+#endif
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 31fe78aa207f..d07271ea4c45 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -458,7 +458,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
return IRQ_NONE;
}
dev_warn(soc_runtime->dev, "xrun warning\n");
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stop_xrun(substream);
ret = IRQ_HANDLED;
}
diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c
index 9983c665a941..932c3ebfd252 100644
--- a/sound/soc/qcom/qdsp6/q6adm.c
+++ b/sound/soc/qcom/qdsp6/q6adm.c
@@ -64,7 +64,6 @@ struct q6adm {
struct aprv2_ibasic_rsp_result_t result;
struct mutex lock;
wait_queue_head_t matrix_map_wait;
- struct platform_device *pdev_routing;
};
struct q6adm_cmd_device_open_v5 {
@@ -588,7 +587,6 @@ EXPORT_SYMBOL_GPL(q6adm_close);
static int q6adm_probe(struct apr_device *adev)
{
struct device *dev = &adev->dev;
- struct device_node *dais_np;
struct q6adm *adm;
adm = devm_kzalloc(&adev->dev, sizeof(*adm), GFP_KERNEL);
@@ -605,22 +603,12 @@ static int q6adm_probe(struct apr_device *adev)
INIT_LIST_HEAD(&adm->copps_list);
spin_lock_init(&adm->copps_list_lock);
- dais_np = of_get_child_by_name(dev->of_node, "routing");
- if (dais_np) {
- adm->pdev_routing = of_platform_device_create(dais_np,
- "q6routing", dev);
- of_node_put(dais_np);
- }
-
- return 0;
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
static int q6adm_remove(struct apr_device *adev)
{
- struct q6adm *adm = dev_get_drvdata(&adev->dev);
-
- if (adm->pdev_routing)
- of_platform_device_destroy(&adm->pdev_routing->dev, NULL);
+ of_platform_depopulate(&adev->dev);
return 0;
}
diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
index 5002dd05bf27..60ff4a2d3577 100644
--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
+++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
@@ -4,7 +4,6 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/component.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -81,7 +80,6 @@ static int q6slim_hw_params(struct snd_pcm_substream *substream,
struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
struct q6afe_slim_cfg *slim = &dai_data->port_config[dai->id].slim;
- slim->num_channels = params_channels(params);
slim->sample_rate = params_rate(params);
switch (params_format(params)) {
@@ -315,6 +313,9 @@ static void q6afe_dai_shutdown(struct snd_pcm_substream *substream,
struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev);
int rc;
+ if (!dai_data->is_port_started[dai->id])
+ return;
+
rc = q6afe_port_stop(dai_data->port[dai->id]);
if (rc < 0)
dev_err(dai->dev, "fail to close AFE port (%d)\n", rc);
@@ -382,23 +383,31 @@ static int q6slim_set_channel_map(struct snd_soc_dai *dai,
struct q6afe_port_config *pcfg = &dai_data->port_config[dai->id];
int i;
- if (!rx_slot) {
- pr_err("%s: rx slot not found\n", __func__);
- return -EINVAL;
- }
+ if (dai->id & 0x1) {
+ /* TX */
+ if (!tx_slot) {
+ pr_err("%s: tx slot not found\n", __func__);
+ return -EINVAL;
+ }
- for (i = 0; i < rx_num; i++) {
- pcfg->slim.ch_mapping[i] = rx_slot[i];
- pr_debug("%s: find number of channels[%d] ch[%d]\n",
- __func__, i, rx_slot[i]);
- }
+ for (i = 0; i < tx_num; i++)
+ pcfg->slim.ch_mapping[i] = tx_slot[i];
+
+ pcfg->slim.num_channels = tx_num;
+
+
+ } else {
+ if (!rx_slot) {
+ pr_err("%s: rx slot not found\n", __func__);
+ return -EINVAL;
+ }
- pcfg->slim.num_channels = rx_num;
+ for (i = 0; i < rx_num; i++)
+ pcfg->slim.ch_mapping[i] = rx_slot[i];
- pr_debug("%s: SLIMBUS_%d_RX cnt[%d] ch[%d %d]\n", __func__,
- (dai->id - SLIMBUS_0_RX) / 2, rx_num,
- pcfg->slim.ch_mapping[0],
- pcfg->slim.ch_mapping[1]);
+ pcfg->slim.num_channels = rx_num;
+
+ }
return 0;
}
@@ -443,6 +452,14 @@ static const struct snd_soc_dapm_route q6afe_dapm_routes[] = {
{"Slimbus5 Playback", NULL, "SLIMBUS_5_RX"},
{"Slimbus6 Playback", NULL, "SLIMBUS_6_RX"},
+ {"SLIMBUS_0_TX", NULL, "Slimbus Capture"},
+ {"SLIMBUS_1_TX", NULL, "Slimbus1 Capture"},
+ {"SLIMBUS_2_TX", NULL, "Slimbus2 Capture"},
+ {"SLIMBUS_3_TX", NULL, "Slimbus3 Capture"},
+ {"SLIMBUS_4_TX", NULL, "Slimbus4 Capture"},
+ {"SLIMBUS_5_TX", NULL, "Slimbus5 Capture"},
+ {"SLIMBUS_6_TX", NULL, "Slimbus6 Capture"},
+
{"Primary MI2S Playback", NULL, "PRI_MI2S_RX"},
{"Secondary MI2S Playback", NULL, "SEC_MI2S_RX"},
{"Tertiary MI2S Playback", NULL, "TERT_MI2S_RX"},
@@ -637,6 +654,24 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.rate_max = 192000,
},
}, {
+ .name = "SLIMBUS_0_TX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_0_TX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .capture = {
+ .stream_name = "Slimbus Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ }, {
.playback = {
.stream_name = "Slimbus1 Playback",
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
@@ -655,6 +690,24 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.probe = msm_dai_q6_dai_probe,
.remove = msm_dai_q6_dai_remove,
}, {
+ .name = "SLIMBUS_1_TX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_1_TX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .capture = {
+ .stream_name = "Slimbus1 Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ }, {
.playback = {
.stream_name = "Slimbus2 Playback",
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
@@ -672,6 +725,25 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.id = SLIMBUS_2_RX,
.probe = msm_dai_q6_dai_probe,
.remove = msm_dai_q6_dai_remove,
+
+ }, {
+ .name = "SLIMBUS_2_TX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_2_TX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .capture = {
+ .stream_name = "Slimbus2 Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
}, {
.playback = {
.stream_name = "Slimbus3 Playback",
@@ -690,6 +762,25 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.id = SLIMBUS_3_RX,
.probe = msm_dai_q6_dai_probe,
.remove = msm_dai_q6_dai_remove,
+
+ }, {
+ .name = "SLIMBUS_3_TX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_3_TX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .capture = {
+ .stream_name = "Slimbus3 Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
}, {
.playback = {
.stream_name = "Slimbus4 Playback",
@@ -708,6 +799,25 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.id = SLIMBUS_4_RX,
.probe = msm_dai_q6_dai_probe,
.remove = msm_dai_q6_dai_remove,
+
+ }, {
+ .name = "SLIMBUS_4_TX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_4_TX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .capture = {
+ .stream_name = "Slimbus4 Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
}, {
.playback = {
.stream_name = "Slimbus5 Playback",
@@ -726,6 +836,25 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.id = SLIMBUS_5_RX,
.probe = msm_dai_q6_dai_probe,
.remove = msm_dai_q6_dai_remove,
+
+ }, {
+ .name = "SLIMBUS_5_TX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_5_TX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .capture = {
+ .stream_name = "Slimbus5 Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
}, {
.playback = {
.stream_name = "Slimbus6 Playback",
@@ -744,6 +873,25 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.id = SLIMBUS_6_RX,
.probe = msm_dai_q6_dai_probe,
.remove = msm_dai_q6_dai_remove,
+
+ }, {
+ .name = "SLIMBUS_6_TX",
+ .ops = &q6slim_ops,
+ .id = SLIMBUS_6_TX,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ .capture = {
+ .stream_name = "Slimbus6 Capture",
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
}, {
.playback = {
.stream_name = "Primary MI2S Playback",
@@ -972,6 +1120,13 @@ static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_2_TX", "Slimbus2 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
@@ -1180,7 +1335,7 @@ static void of_q6afe_parse_dai_data(struct device *dev,
int id, i, num_lines;
ret = of_property_read_u32(node, "reg", &id);
- if (ret || id > AFE_PORT_MAX) {
+ if (ret || id < 0 || id >= AFE_PORT_MAX) {
dev_err(dev, "valid dai id not found:%d\n", ret);
continue;
}
@@ -1249,11 +1404,12 @@ static void of_q6afe_parse_dai_data(struct device *dev,
}
}
-static int q6afe_dai_bind(struct device *dev, struct device *master, void *data)
+static int q6afe_dai_dev_probe(struct platform_device *pdev)
{
struct q6afe_dai_data *dai_data;
+ struct device *dev = &pdev->dev;
- dai_data = kzalloc(sizeof(*dai_data), GFP_KERNEL);
+ dai_data = devm_kzalloc(dev, sizeof(*dai_data), GFP_KERNEL);
if (!dai_data)
return -ENOMEM;
@@ -1261,41 +1417,22 @@ static int q6afe_dai_bind(struct device *dev, struct device *master, void *data)
of_q6afe_parse_dai_data(dev, dai_data);
- return snd_soc_register_component(dev, &q6afe_dai_component,
+ return devm_snd_soc_register_component(dev, &q6afe_dai_component,
q6afe_dais, ARRAY_SIZE(q6afe_dais));
}
-static void q6afe_dai_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct q6afe_dai_data *dai_data = dev_get_drvdata(dev);
-
- snd_soc_unregister_component(dev);
- kfree(dai_data);
-}
-
-static const struct component_ops q6afe_dai_comp_ops = {
- .bind = q6afe_dai_bind,
- .unbind = q6afe_dai_unbind,
+static const struct of_device_id q6afe_dai_device_id[] = {
+ { .compatible = "qcom,q6afe-dais" },
+ {},
};
-
-static int q6afe_dai_dev_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &q6afe_dai_comp_ops);
-}
-
-static int q6afe_dai_dev_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &q6afe_dai_comp_ops);
- return 0;
-}
+MODULE_DEVICE_TABLE(of, q6afe_dai_device_id);
static struct platform_driver q6afe_dai_platform_driver = {
.driver = {
.name = "q6afe-dai",
+ .of_match_table = of_match_ptr(q6afe_dai_device_id),
},
.probe = q6afe_dai_dev_probe,
- .remove = q6afe_dai_dev_remove,
};
module_platform_driver(q6afe_dai_platform_driver);
diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
index 01f43218984b..000775b4bba8 100644
--- a/sound/soc/qcom/qdsp6/q6afe.c
+++ b/sound/soc/qcom/qdsp6/q6afe.c
@@ -316,7 +316,6 @@ struct q6afe {
struct mutex lock;
struct list_head port_list;
spinlock_t port_list_lock;
- struct platform_device *pdev_dais;
};
struct afe_port_cmd_device_start {
@@ -515,6 +514,20 @@ static struct afe_port_map port_maps[AFE_PORT_MAX] = {
SLIMBUS_5_RX, 1, 1},
[SLIMBUS_6_RX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_RX,
SLIMBUS_6_RX, 1, 1},
+ [SLIMBUS_0_TX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX,
+ SLIMBUS_0_TX, 0, 1},
+ [SLIMBUS_1_TX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX,
+ SLIMBUS_1_TX, 0, 1},
+ [SLIMBUS_2_TX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX,
+ SLIMBUS_2_TX, 0, 1},
+ [SLIMBUS_3_TX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX,
+ SLIMBUS_3_TX, 0, 1},
+ [SLIMBUS_4_TX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX,
+ SLIMBUS_4_TX, 0, 1},
+ [SLIMBUS_5_TX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX,
+ SLIMBUS_5_TX, 0, 1},
+ [SLIMBUS_6_TX] = { AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_TX,
+ SLIMBUS_6_TX, 0, 1},
[PRIMARY_MI2S_RX] = { AFE_PORT_ID_PRIMARY_MI2S_RX,
PRIMARY_MI2S_RX, 1, 1},
[PRIMARY_MI2S_TX] = { AFE_PORT_ID_PRIMARY_MI2S_TX,
@@ -777,7 +790,7 @@ static int q6afe_callback(struct apr_device *adev, struct apr_resp_pkt *data)
*/
int q6afe_get_port_id(int index)
{
- if (index < 0 || index > AFE_PORT_MAX)
+ if (index < 0 || index >= AFE_PORT_MAX)
return -EINVAL;
return port_maps[index].port_id;
@@ -1014,7 +1027,7 @@ int q6afe_port_stop(struct q6afe_port *port)
port_id = port->id;
index = port->token;
- if (index < 0 || index > AFE_PORT_MAX) {
+ if (index < 0 || index >= AFE_PORT_MAX) {
dev_err(afe->dev, "AFE port index[%d] invalid!\n", index);
return -EINVAL;
}
@@ -1355,7 +1368,7 @@ struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id)
unsigned long flags;
int cfg_type;
- if (id < 0 || id > AFE_PORT_MAX) {
+ if (id < 0 || id >= AFE_PORT_MAX) {
dev_err(dev, "AFE port token[%d] invalid!\n", id);
return ERR_PTR(-EINVAL);
}
@@ -1373,6 +1386,13 @@ struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id)
case AFE_PORT_ID_MULTICHAN_HDMI_RX:
cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
break;
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_TX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_TX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_3_TX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_4_TX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX:
+ case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_6_TX:
case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_RX:
case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX:
case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_2_RX:
@@ -1438,7 +1458,6 @@ static int q6afe_probe(struct apr_device *adev)
{
struct q6afe *afe;
struct device *dev = &adev->dev;
- struct device_node *dais_np;
afe = devm_kzalloc(dev, sizeof(*afe), GFP_KERNEL);
if (!afe)
@@ -1453,22 +1472,12 @@ static int q6afe_probe(struct apr_device *adev)
dev_set_drvdata(dev, afe);
- dais_np = of_get_child_by_name(dev->of_node, "dais");
- if (dais_np) {
- afe->pdev_dais = of_platform_device_create(dais_np,
- "q6afe-dai", dev);
- of_node_put(dais_np);
- }
-
- return 0;
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
static int q6afe_remove(struct apr_device *adev)
{
- struct q6afe *afe = dev_get_drvdata(&adev->dev);
-
- if (afe->pdev_dais)
- of_platform_device_destroy(&afe->pdev_dais->dev, NULL);
+ of_platform_depopulate(&adev->dev);
return 0;
}
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 349c6a883c63..9db9a2944ef2 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/component.h>
#include <sound/soc.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
@@ -390,7 +389,9 @@ static int q6asm_dai_close(struct snd_pcm_substream *substream)
struct q6asm_dai_rtd *prtd = runtime->private_data;
if (prtd->audio_client) {
- q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ if (prtd->state)
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+
q6asm_unmap_memory_regions(substream->stream,
prtd->audio_client);
q6asm_audio_client_free(prtd->audio_client);
@@ -561,14 +562,15 @@ static struct snd_soc_dai_driver q6asm_fe_dais[] = {
Q6ASM_FEDAI_DRIVER(8),
};
-static int q6asm_dai_bind(struct device *dev, struct device *master, void *data)
+static int q6asm_dai_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct of_phandle_args args;
struct q6asm_dai_data *pdata;
int rc;
- pdata = kzalloc(sizeof(struct q6asm_dai_data), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -580,43 +582,23 @@ static int q6asm_dai_bind(struct device *dev, struct device *master, void *data)
dev_set_drvdata(dev, pdata);
- return snd_soc_register_component(dev, &q6asm_fe_dai_component,
+ return devm_snd_soc_register_component(dev, &q6asm_fe_dai_component,
q6asm_fe_dais,
ARRAY_SIZE(q6asm_fe_dais));
}
-static void q6asm_dai_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct q6asm_dai_data *pdata = dev_get_drvdata(dev);
-
- snd_soc_unregister_component(dev);
-
- kfree(pdata);
-
-}
-static const struct component_ops q6asm_dai_comp_ops = {
- .bind = q6asm_dai_bind,
- .unbind = q6asm_dai_unbind,
+static const struct of_device_id q6asm_dai_device_id[] = {
+ { .compatible = "qcom,q6asm-dais" },
+ {},
};
-
-static int q6asm_dai_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &q6asm_dai_comp_ops);
-}
-
-static int q6asm_dai_dev_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &q6asm_dai_comp_ops);
- return 0;
-}
+MODULE_DEVICE_TABLE(of, q6asm_dai_device_id);
static struct platform_driver q6asm_dai_platform_driver = {
.driver = {
.name = "q6asm-dai",
+ .of_match_table = of_match_ptr(q6asm_dai_device_id),
},
.probe = q6asm_dai_probe,
- .remove = q6asm_dai_dev_remove,
};
module_platform_driver(q6asm_dai_platform_driver);
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index 530852385cad..2b2c7233bb5f 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -174,10 +174,8 @@ struct q6asm {
struct device *dev;
struct q6core_svc_api_info ainfo;
wait_queue_head_t mem_wait;
- struct platform_device *pcmdev;
spinlock_t slock;
struct audio_client *session[MAX_SESSIONS + 1];
- struct platform_device *pdev_dais;
};
struct audio_client {
@@ -1344,7 +1342,6 @@ EXPORT_SYMBOL_GPL(q6asm_cmd_nowait);
static int q6asm_probe(struct apr_device *adev)
{
struct device *dev = &adev->dev;
- struct device_node *dais_np;
struct q6asm *q6asm;
q6asm = devm_kzalloc(dev, sizeof(*q6asm), GFP_KERNEL);
@@ -1359,22 +1356,12 @@ static int q6asm_probe(struct apr_device *adev)
spin_lock_init(&q6asm->slock);
dev_set_drvdata(dev, q6asm);
- dais_np = of_get_child_by_name(dev->of_node, "dais");
- if (dais_np) {
- q6asm->pdev_dais = of_platform_device_create(dais_np,
- "q6asm-dai", dev);
- of_node_put(dais_np);
- }
-
- return 0;
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
static int q6asm_remove(struct apr_device *adev)
{
- struct q6asm *q6asm = dev_get_drvdata(&adev->dev);
-
- if (q6asm->pdev_dais)
- of_platform_device_destroy(&q6asm->pdev_dais->dev, NULL);
+ of_platform_depopulate(&adev->dev);
return 0;
}
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index 593f66b8622f..dc94c5c53788 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -8,7 +8,6 @@
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/bitops.h>
-#include <linux/component.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/slab.h>
@@ -68,6 +67,13 @@
{ mix_name, "SEC_MI2S_TX", "SEC_MI2S_TX" }, \
{ mix_name, "QUAT_MI2S_TX", "QUAT_MI2S_TX" }, \
{ mix_name, "TERT_MI2S_TX", "TERT_MI2S_TX" }, \
+ { mix_name, "SLIMBUS_0_TX", "SLIMBUS_0_TX" }, \
+ { mix_name, "SLIMBUS_1_TX", "SLIMBUS_1_TX" }, \
+ { mix_name, "SLIMBUS_2_TX", "SLIMBUS_2_TX" }, \
+ { mix_name, "SLIMBUS_3_TX", "SLIMBUS_3_TX" }, \
+ { mix_name, "SLIMBUS_4_TX", "SLIMBUS_4_TX" }, \
+ { mix_name, "SLIMBUS_5_TX", "SLIMBUS_5_TX" }, \
+ { mix_name, "SLIMBUS_6_TX", "SLIMBUS_6_TX" }, \
{ mix_name, "PRIMARY_TDM_TX_0", "PRIMARY_TDM_TX_0"}, \
{ mix_name, "PRIMARY_TDM_TX_1", "PRIMARY_TDM_TX_1"}, \
{ mix_name, "PRIMARY_TDM_TX_2", "PRIMARY_TDM_TX_2"}, \
@@ -122,6 +128,27 @@
SOC_SINGLE_EXT("QUAT_MI2S_TX", QUATERNARY_MI2S_TX, \
id, 1, 0, msm_routing_get_audio_mixer, \
msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SLIMBUS_0_TX", SLIMBUS_0_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SLIMBUS_1_TX", SLIMBUS_1_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SLIMBUS_2_TX", SLIMBUS_2_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SLIMBUS_3_TX", SLIMBUS_3_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SLIMBUS_4_TX", SLIMBUS_4_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SLIMBUS_5_TX", SLIMBUS_5_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
+ SOC_SINGLE_EXT("SLIMBUS_6_TX", SLIMBUS_6_TX, \
+ id, 1, 0, msm_routing_get_audio_mixer, \
+ msm_routing_put_audio_mixer), \
SOC_SINGLE_EXT("PRIMARY_TDM_TX_0", PRIMARY_TDM_TX_0, \
id, 1, 0, msm_routing_get_audio_mixer, \
msm_routing_put_audio_mixer), \
@@ -310,7 +337,7 @@ int q6routing_stream_open(int fedai_id, int perf_mode,
session->channels, topology, perf_mode,
session->bits_per_sample, 0, 0);
- if (!copp) {
+ if (IS_ERR_OR_NULL(copp)) {
mutex_unlock(&routing_data->lock);
return -EINVAL;
}
@@ -899,7 +926,7 @@ static int routing_hw_params(struct snd_pcm_substream *substream,
else
path_type = ADM_PATH_LIVE_REC;
- if (be_id > AFE_MAX_PORTS)
+ if (be_id >= AFE_MAX_PORTS)
return -EINVAL;
session = &data->port_data[be_id];
@@ -949,9 +976,10 @@ static const struct snd_soc_component_driver msm_soc_routing_component = {
.num_dapm_routes = ARRAY_SIZE(intercon),
};
-static int q6routing_dai_bind(struct device *dev, struct device *master,
- void *data)
+static int q6pcm_routing_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+
routing_data = kzalloc(sizeof(*routing_data), GFP_KERNEL);
if (!routing_data)
return -ENOMEM;
@@ -961,41 +989,28 @@ static int q6routing_dai_bind(struct device *dev, struct device *master,
mutex_init(&routing_data->lock);
dev_set_drvdata(dev, routing_data);
- return snd_soc_register_component(dev, &msm_soc_routing_component,
+ return devm_snd_soc_register_component(dev, &msm_soc_routing_component,
NULL, 0);
}
-static void q6routing_dai_unbind(struct device *dev, struct device *master,
- void *d)
+static int q6pcm_routing_remove(struct platform_device *pdev)
{
- struct msm_routing_data *data = dev_get_drvdata(dev);
-
- snd_soc_unregister_component(dev);
-
- kfree(data);
-
+ kfree(routing_data);
routing_data = NULL;
-}
-
-static const struct component_ops q6routing_dai_comp_ops = {
- .bind = q6routing_dai_bind,
- .unbind = q6routing_dai_unbind,
-};
-static int q6pcm_routing_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &q6routing_dai_comp_ops);
-}
-
-static int q6pcm_routing_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &q6routing_dai_comp_ops);
return 0;
}
+static const struct of_device_id q6pcm_routing_device_id[] = {
+ { .compatible = "qcom,q6adm-routing" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, q6pcm_routing_device_id);
+
static struct platform_driver q6pcm_routing_platform_driver = {
.driver = {
.name = "q6routing",
+ .of_match_table = of_match_ptr(q6pcm_routing_device_id),
},
.probe = q6pcm_routing_probe,
.remove = q6pcm_routing_remove,
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
new file mode 100644
index 000000000000..2a781d87ee65
--- /dev/null
+++ b/sound/soc/qcom/sdm845.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include "common.h"
+#include "qdsp6/q6afe.h"
+
+#define DEFAULT_SAMPLE_RATE_48K 48000
+#define DEFAULT_MCLK_RATE 24576000
+#define DEFAULT_BCLK_RATE 12288000
+
+struct sdm845_snd_data {
+ struct snd_soc_card *card;
+ uint32_t pri_mi2s_clk_count;
+ uint32_t quat_tdm_clk_count;
+};
+
+static unsigned int tdm_slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+
+static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+ int channels, slot_width;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ slot_width = 32;
+ break;
+ default:
+ dev_err(rtd->dev, "%s: invalid param format 0x%x\n",
+ __func__, params_format(params));
+ return -EINVAL;
+ }
+
+ channels = params_channels(params);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, 0x3,
+ 8, slot_width);
+ if (ret < 0) {
+ dev_err(rtd->dev, "%s: failed to set tdm slot, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ ret = snd_soc_dai_set_channel_map(cpu_dai, 0, NULL,
+ channels, tdm_slot_offset);
+ if (ret < 0) {
+ dev_err(rtd->dev, "%s: failed to set channel map, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+ } else {
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0xf, 0,
+ 8, slot_width);
+ if (ret < 0) {
+ dev_err(rtd->dev, "%s: failed to set tdm slot, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ ret = snd_soc_dai_set_channel_map(cpu_dai, channels,
+ tdm_slot_offset, 0, NULL);
+ if (ret < 0) {
+ dev_err(rtd->dev, "%s: failed to set channel map, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+ }
+end:
+ return ret;
+}
+
+static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+
+ switch (cpu_dai->id) {
+ case QUATERNARY_TDM_RX_0:
+ case QUATERNARY_TDM_TX_0:
+ ret = sdm845_tdm_snd_hw_params(substream, params);
+ break;
+ default:
+ pr_err("%s: invalid dai id 0x%x\n", __func__, cpu_dai->id);
+ break;
+ }
+ return ret;
+}
+
+static int sdm845_snd_startup(struct snd_pcm_substream *substream)
+{
+ unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct sdm845_snd_data *data = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
+ switch (cpu_dai->id) {
+ case PRIMARY_MI2S_RX:
+ case PRIMARY_MI2S_TX:
+ if (++(data->pri_mi2s_clk_count) == 1) {
+ snd_soc_dai_set_sysclk(cpu_dai,
+ Q6AFE_LPASS_CLK_ID_MCLK_1,
+ DEFAULT_MCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
+ snd_soc_dai_set_sysclk(cpu_dai,
+ Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT,
+ DEFAULT_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
+ }
+ snd_soc_dai_set_fmt(cpu_dai, fmt);
+ break;
+
+ case QUATERNARY_TDM_RX_0:
+ case QUATERNARY_TDM_TX_0:
+ if (++(data->quat_tdm_clk_count) == 1) {
+ snd_soc_dai_set_sysclk(cpu_dai,
+ Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT,
+ DEFAULT_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
+ }
+ break;
+
+ default:
+ pr_err("%s: invalid dai id 0x%x\n", __func__, cpu_dai->id);
+ break;
+ }
+ return 0;
+}
+
+static void sdm845_snd_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct sdm845_snd_data *data = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
+ switch (cpu_dai->id) {
+ case PRIMARY_MI2S_RX:
+ case PRIMARY_MI2S_TX:
+ if (--(data->pri_mi2s_clk_count) == 0) {
+ snd_soc_dai_set_sysclk(cpu_dai,
+ Q6AFE_LPASS_CLK_ID_MCLK_1,
+ 0, SNDRV_PCM_STREAM_PLAYBACK);
+ snd_soc_dai_set_sysclk(cpu_dai,
+ Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT,
+ 0, SNDRV_PCM_STREAM_PLAYBACK);
+ };
+ break;
+
+ case QUATERNARY_TDM_RX_0:
+ case QUATERNARY_TDM_TX_0:
+ if (--(data->quat_tdm_clk_count) == 0) {
+ snd_soc_dai_set_sysclk(cpu_dai,
+ Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT,
+ 0, SNDRV_PCM_STREAM_PLAYBACK);
+ }
+ break;
+
+ default:
+ pr_err("%s: invalid dai id 0x%x\n", __func__, cpu_dai->id);
+ break;
+ }
+}
+
+static struct snd_soc_ops sdm845_be_ops = {
+ .hw_params = sdm845_snd_hw_params,
+ .startup = sdm845_snd_startup,
+ .shutdown = sdm845_snd_shutdown,
+};
+
+static int sdm845_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ rate->min = rate->max = DEFAULT_SAMPLE_RATE_48K;
+ channels->min = channels->max = 2;
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+}
+
+static void sdm845_add_be_ops(struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link *link = card->dai_link;
+ int i, num_links = card->num_links;
+
+ for (i = 0; i < num_links; i++) {
+ if (link->no_pcm == 1) {
+ link->ops = &sdm845_be_ops;
+ link->be_hw_params_fixup = sdm845_be_hw_params_fixup;
+ }
+ link++;
+ }
+}
+
+static int sdm845_snd_platform_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card;
+ struct sdm845_snd_data *data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ /* Allocate the private data */
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto data_alloc_fail;
+ }
+
+ card->dev = dev;
+ dev_set_drvdata(dev, card);
+ ret = qcom_snd_parse_of(card);
+ if (ret) {
+ dev_err(dev, "Error parsing OF data\n");
+ goto parse_dt_fail;
+ }
+
+ data->card = card;
+ snd_soc_card_set_drvdata(card, data);
+
+ sdm845_add_be_ops(card);
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(dev, "Sound card registration failed\n");
+ goto register_card_fail;
+ }
+ return ret;
+
+register_card_fail:
+ kfree(card->dai_link);
+parse_dt_fail:
+ kfree(data);
+data_alloc_fail:
+ kfree(card);
+ return ret;
+}
+
+static int sdm845_snd_platform_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = dev_get_drvdata(&pdev->dev);
+ struct sdm845_snd_data *data = snd_soc_card_get_drvdata(card);
+
+ snd_soc_unregister_card(card);
+ kfree(card->dai_link);
+ kfree(data);
+ kfree(card);
+ return 0;
+}
+
+static const struct of_device_id sdm845_snd_device_id[] = {
+ { .compatible = "qcom,sdm845-sndcard" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdm845_snd_device_id);
+
+static struct platform_driver sdm845_snd_driver = {
+ .probe = sdm845_snd_platform_probe,
+ .remove = sdm845_snd_platform_remove,
+ .driver = {
+ .name = "msm-snd-sdm845",
+ .of_match_table = sdm845_snd_device_id,
+ },
+};
+module_platform_driver(sdm845_snd_driver);
+
+MODULE_DESCRIPTION("sdm845 ASoC Machine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/rockchip/Makefile b/sound/soc/rockchip/Makefile
index 05b078e7b87f..65e814d46006 100644
--- a/sound/soc/rockchip/Makefile
+++ b/sound/soc/rockchip/Makefile
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
# ROCKCHIP Platform Support
snd-soc-rockchip-i2s-objs := rockchip_i2s.o
+snd-soc-rockchip-pcm-objs := rockchip_pcm.o
snd-soc-rockchip-pdm-objs := rockchip_pdm.o
snd-soc-rockchip-spdif-objs := rockchip_spdif.o
-obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-rockchip-i2s.o
+obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-rockchip-i2s.o snd-soc-rockchip-pcm.o
obj-$(CONFIG_SND_SOC_ROCKCHIP_PDM) += snd-soc-rockchip-pdm.o
obj-$(CONFIG_SND_SOC_ROCKCHIP_SPDIF) += snd-soc-rockchip-spdif.o
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 950823d69e9c..60d43d53a8f5 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -22,6 +22,7 @@
#include <sound/dmaengine_pcm.h>
#include "rockchip_i2s.h"
+#include "rockchip_pcm.h"
#define DRV_NAME "rockchip-i2s"
@@ -674,7 +675,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
goto err_suspend;
}
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ ret = rockchip_pcm_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Could not register PCM\n");
return ret;
diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
new file mode 100644
index 000000000000..f77538319221
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_pcm.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/dmaengine_pcm.h>
+
+#include "rockchip_pcm.h"
+
+static const struct snd_pcm_hardware snd_rockchip_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+ .period_bytes_min = 32,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 52,
+ .buffer_bytes_max = 64 * 1024,
+ .fifo_size = 32,
+};
+
+static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = {
+ .pcm_hardware = &snd_rockchip_hardware,
+ .prealloc_buffer_size = 32 * 1024,
+};
+
+int rockchip_pcm_platform_register(struct device *dev)
+{
+ return devm_snd_dmaengine_pcm_register(dev, &rk_dmaengine_pcm_config,
+ SND_DMAENGINE_PCM_FLAG_COMPAT);
+}
+EXPORT_SYMBOL_GPL(rockchip_pcm_platform_register);
+
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/rockchip/rockchip_pcm.h b/sound/soc/rockchip/rockchip_pcm.h
new file mode 100644
index 000000000000..d6c36115c60a
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_pcm.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ROCKCHIP_PCM_H
+#define _ROCKCHIP_PCM_H
+
+int rockchip_pcm_platform_register(struct device *dev);
+
+#endif
diff --git a/sound/soc/rockchip/rockchip_rt5645.c b/sound/soc/rockchip/rockchip_rt5645.c
index 4db4fd56db35..881c32498808 100644
--- a/sound/soc/rockchip/rockchip_rt5645.c
+++ b/sound/soc/rockchip/rockchip_rt5645.c
@@ -181,7 +181,8 @@ static int snd_rk_mc_probe(struct platform_device *pdev)
if (!rk_dailink.cpu_of_node) {
dev_err(&pdev->dev,
"Property 'rockchip,i2s-controller' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_codec_of_node;
}
rk_dailink.platform_of_node = rk_dailink.cpu_of_node;
@@ -190,17 +191,36 @@ static int snd_rk_mc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Soc parse card name failed %d\n", ret);
- return ret;
+ goto put_cpu_of_node;
}
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
dev_err(&pdev->dev,
"Soc register card failed %d\n", ret);
- return ret;
+ goto put_cpu_of_node;
}
return ret;
+
+put_cpu_of_node:
+ of_node_put(rk_dailink.cpu_of_node);
+ rk_dailink.cpu_of_node = NULL;
+put_codec_of_node:
+ of_node_put(rk_dailink.codec_of_node);
+ rk_dailink.codec_of_node = NULL;
+
+ return ret;
+}
+
+static int snd_rk_mc_remove(struct platform_device *pdev)
+{
+ of_node_put(rk_dailink.cpu_of_node);
+ rk_dailink.cpu_of_node = NULL;
+ of_node_put(rk_dailink.codec_of_node);
+ rk_dailink.codec_of_node = NULL;
+
+ return 0;
}
static const struct of_device_id rockchip_rt5645_of_match[] = {
@@ -212,6 +232,7 @@ MODULE_DEVICE_TABLE(of, rockchip_rt5645_of_match);
static struct platform_driver snd_rk_mc_driver = {
.probe = snd_rk_mc_probe,
+ .remove = snd_rk_mc_remove,
.driver = {
.name = DRV_NAME,
.pm = &snd_soc_pm_ops,
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index f914ed45db7d..d6c62aa13041 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -710,6 +710,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
switch (params_channels(params)) {
case 6:
val |= MOD_DC2_EN;
+ /* fall through */
case 4:
val |= MOD_DC1_EN;
break;
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 0ae0800bf3a8..dc20f0f7080a 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "SoC Audio support for Renesas SoCs"
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index 2dc3b762fdd9..922fb6aa3ed1 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -1,16 +1,14 @@
-/*
- * SH7760 ("camelot") DMABRG audio DMA unit support
- *
- * Copyright (C) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
- * licensed under the terms outlined in the file COPYING at the root
- * of the linux kernel sources.
- *
- * The SH7760 DMABRG provides 4 dma channels (2x rec, 2x play), which
- * trigger an interrupt when one half of the programmed transfer size
- * has been xmitted.
- *
- * FIXME: little-endian only for now
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// SH7760 ("camelot") DMABRG audio DMA unit support
+//
+// Copyright (C) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
+//
+// The SH7760 DMABRG provides 4 dma channels (2x rec, 2x play), which
+// trigger an interrupt when one half of the programmed transfer size
+// has been xmitted.
+//
+// FIXME: little-endian only for now
#include <linux/module.h>
#include <linux/gfp.h>
@@ -341,6 +339,6 @@ static struct platform_driver sh7760_pcm_driver = {
module_platform_driver(sh7760_pcm_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SH7760 Audio DMA (DMABRG) driver");
MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 3bae06dd121f..aa7e902f0c02 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1,16 +1,12 @@
-/*
- * Fifo-attached Serial Interface (FSI) support for SH7724
- *
- * Copyright (C) 2009 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on ssi.c
- * Copyright (c) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Fifo-attached Serial Interface (FSI) support for SH7724
+//
+// Copyright (C) 2009 Renesas Solutions Corp.
+// Kuninori Morimoto <morimoto.kuninori@renesas.com>
+//
+// Based on ssi.c
+// Copyright (c) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
diff --git a/sound/soc/sh/hac.c b/sound/soc/sh/hac.c
index 624aaf569fef..c2b496398e6b 100644
--- a/sound/soc/sh/hac.c
+++ b/sound/soc/sh/hac.c
@@ -1,13 +1,11 @@
-/*
- * Hitachi Audio Controller (AC97) support for SH7760/SH7780
- *
- * Copyright (c) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
- * licensed under the terms outlined in the file COPYING at the root
- * of the linux kernel sources.
- *
- * dont forget to set IPSEL/OMSEL register bits (in your board code) to
- * enable HAC output pins!
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Hitachi Audio Controller (AC97) support for SH7760/SH7780
+//
+// Copyright (c) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
+//
+// dont forget to set IPSEL/OMSEL register bits (in your board code) to
+// enable HAC output pins!
/* BIG FAT FIXME: although the SH7760 has 2 independent AC97 units, only
* the FIRST can be used since ASoC does not pass any information to the
@@ -343,6 +341,6 @@ static struct platform_driver hac_pcm_driver = {
module_platform_driver(hac_pcm_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SuperH onchip HAC (AC97) audio driver");
MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index ecb057ff9fbb..8739c9f60672 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -1,12 +1,8 @@
-/*
- * ALSA SoC driver for Migo-R
- *
- * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// ALSA SoC driver for Migo-R
+//
+// Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
#include <linux/clkdev.h>
#include <linux/device.h>
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
index 9c3d5aed99d1..5d1ff8ef26f9 100644
--- a/sound/soc/sh/rcar/Makefile
+++ b/sound/soc/sh/rcar/Makefile
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
snd-soc-rcar-objs := core.o gen.o dma.o adg.o ssi.o ssiu.o src.o ctu.o mix.o dvc.o cmd.o
obj-$(CONFIG_SND_SOC_RCAR) += snd-soc-rcar.o
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 4672688cac32..3a3064dda57f 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -1,12 +1,9 @@
-/*
- * Helper routines for R-Car sound ADG.
- *
- * Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Helper routines for R-Car sound ADG.
+//
+// Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
#include <linux/clk-provider.h>
#include "rsnd.h"
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index 5900fb535a2b..cc191cd5fb82 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -1,13 +1,10 @@
-/*
- * Renesas R-Car CMD support
- *
- * Copyright (C) 2015 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas R-Car CMD support
+//
+// Copyright (C) 2015 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
#include "rsnd.h"
struct rsnd_cmd {
@@ -89,7 +86,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
cmd_case[rsnd_mod_id(src)] << 16;
}
- dev_dbg(dev, "ctu/mix path = 0x%08x", data);
+ dev_dbg(dev, "ctu/mix path = 0x%08x\n", data);
rsnd_mod_write(mod, CMD_ROUTE_SLCT, data);
rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f237002180c0..f8425d8b44d2 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1,16 +1,12 @@
-/*
- * Renesas R-Car SRU/SCU/SSIU/SSI support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * Based on fsi.c
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas R-Car SRU/SCU/SSIU/SSI support
+//
+// Copyright (C) 2013 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+//
+// Based on fsi.c
+// Kuninori Morimoto <morimoto.kuninori@renesas.com>
/*
* Renesas R-Car sound device structure
@@ -552,6 +548,15 @@ struct rsnd_dai *rsnd_rdai_get(struct rsnd_priv *priv, int id)
return priv->rdai + id;
}
+static struct snd_soc_dai_driver
+*rsnd_daidrv_get(struct rsnd_priv *priv, int id)
+{
+ if ((id < 0) || (id >= rsnd_rdai_nr(priv)))
+ return NULL;
+
+ return priv->daidrv + id;
+}
+
#define rsnd_dai_to_priv(dai) snd_soc_dai_get_drvdata(dai)
static struct rsnd_dai *rsnd_dai_to_rdai(struct snd_soc_dai *dai)
{
@@ -1037,7 +1042,7 @@ static void __rsnd_dai_probe(struct rsnd_priv *priv,
int io_i;
rdai = rsnd_rdai_get(priv, dai_i);
- drv = priv->daidrv + dai_i;
+ drv = rsnd_daidrv_get(priv, dai_i);
io_playback = &rdai->playback;
io_capture = &rdai->capture;
@@ -1085,6 +1090,12 @@ static void __rsnd_dai_probe(struct rsnd_priv *priv,
of_node_put(capture);
}
+ if (rsnd_ssi_is_pin_sharing(io_capture) ||
+ rsnd_ssi_is_pin_sharing(io_playback)) {
+ /* should have symmetric_rates if pin sharing */
+ drv->symmetric_rates = 1;
+ }
+
dev_dbg(dev, "%s (%s/%s)\n", rdai->name,
rsnd_io_to_mod_ssi(io_playback) ? "play" : " -- ",
rsnd_io_to_mod_ssi(io_capture) ? "capture" : " -- ");
@@ -1606,7 +1617,7 @@ static struct platform_driver rsnd_driver = {
};
module_platform_driver(rsnd_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas R-Car audio driver");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_ALIAS("platform:rcar-pcm-audio");
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
index 83be7d3ae0a8..6a55aa753003 100644
--- a/sound/soc/sh/rcar/ctu.c
+++ b/sound/soc/sh/rcar/ctu.c
@@ -1,12 +1,9 @@
-/*
- * ctu.c
- *
- * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// ctu.c
+//
+// Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
#include "rsnd.h"
#define CTU_NAME_SIZE 16
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index ef82b94d038b..fe63ef8600d0 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -1,13 +1,10 @@
-/*
- * Renesas R-Car Audio DMAC support
- *
- * Copyright (C) 2015 Renesas Electronics Corp.
- * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas R-Car Audio DMAC support
+//
+// Copyright (C) 2015 Renesas Electronics Corp.
+// Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
#include <linux/delay.h>
#include <linux/of_dma.h>
#include "rsnd.h"
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index ca1780e0b830..2b16e0ce6bc5 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -1,13 +1,9 @@
-/*
- * Renesas R-Car DVC support
- *
- * Copyright (C) 2014 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas R-Car DVC support
+//
+// Copyright (C) 2014 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
/*
* Playback Volume
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 25642e92dae0..0230301fe078 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -1,13 +1,9 @@
-/*
- * Renesas R-Car Gen1 SRU/SSI support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas R-Car Gen1 SRU/SSI support
+//
+// Copyright (C) 2013 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
/*
* #define DEBUG
diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c
index 1881b2de9126..8e3b57eaa708 100644
--- a/sound/soc/sh/rcar/mix.c
+++ b/sound/soc/sh/rcar/mix.c
@@ -1,12 +1,8 @@
-/*
- * mix.c
- *
- * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// mix.c
+//
+// Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
/*
* CTUn MIXn
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 6d7280d2d9be..96d93330b1e1 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -1,13 +1,10 @@
-/*
- * Renesas R-Car
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas R-Car
+//
+// Copyright (C) 2013 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
#ifndef RSND_H
#define RSND_H
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 6c72d1a81cf5..beccfbac7581 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -1,13 +1,9 @@
-/*
- * Renesas R-Car SRC support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas R-Car SRC support
+//
+// Copyright (C) 2013 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
/*
* you can enable below define if you don't need
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 6e1166ec24a0..8304e4ec9242 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -1,16 +1,12 @@
-/*
- * Renesas R-Car SSIU/SSI support
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * Based on fsi.c
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas R-Car SSIU/SSI support
+//
+// Copyright (C) 2013 Renesas Solutions Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+//
+// Based on fsi.c
+// Kuninori Morimoto <morimoto.kuninori@renesas.com>
/*
* you can enable below define if you don't need
@@ -37,6 +33,7 @@
#define CHNL_4 (1 << 22) /* Channels */
#define CHNL_6 (2 << 22) /* Channels */
#define CHNL_8 (3 << 22) /* Channels */
+#define DWL_MASK (7 << 19) /* Data Word Length mask */
#define DWL_8 (0 << 19) /* Data Word Length */
#define DWL_16 (1 << 19) /* Data Word Length */
#define DWL_18 (2 << 19) /* Data Word Length */
@@ -353,21 +350,18 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- u32 cr_own;
- u32 cr_mode;
- u32 wsr;
+ u32 cr_own = ssi->cr_own;
+ u32 cr_mode = ssi->cr_mode;
+ u32 wsr = ssi->wsr;
int is_tdm;
- if (rsnd_ssi_is_parent(mod, io))
- return;
-
is_tdm = rsnd_runtime_is_ssi_tdm(io);
/*
* always use 32bit system word.
* see also rsnd_ssi_master_clk_enable()
*/
- cr_own = FORCE | SWL_32;
+ cr_own |= FORCE | SWL_32;
if (rdai->bit_clk_inv)
cr_own |= SCKP;
@@ -377,9 +371,18 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_own |= SDTA;
if (rdai->sys_delay)
cr_own |= DEL;
+
+ /*
+ * We shouldn't exchange SWSP after running.
+ * This means, parent needs to care it.
+ */
+ if (rsnd_ssi_is_parent(mod, io))
+ goto init_end;
+
if (rsnd_io_is_play(io))
cr_own |= TRMD;
+ cr_own &= ~DWL_MASK;
switch (snd_pcm_format_width(runtime->format)) {
case 16:
cr_own |= DWL_16;
@@ -406,7 +409,7 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
wsr |= WS_MODE;
cr_own |= CHNL_8;
}
-
+init_end:
ssi->cr_own = cr_own;
ssi->cr_mode = cr_mode;
ssi->wsr = wsr;
@@ -470,15 +473,18 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod,
return -EIO;
}
- if (!rsnd_ssi_is_parent(mod, io))
- ssi->cr_own = 0;
-
rsnd_ssi_master_clk_stop(mod, io);
rsnd_mod_power_off(mod);
ssi->usrcnt--;
+ if (!ssi->usrcnt) {
+ ssi->cr_own = 0;
+ ssi->cr_mode = 0;
+ ssi->wsr = 0;
+ }
+
return 0;
}
@@ -1055,9 +1061,10 @@ struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id)
int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod)
{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ if (!mod)
+ return 0;
- return !!(rsnd_flags_has(ssi, RSND_SSI_CLK_PIN_SHARE));
+ return !!(rsnd_flags_has(rsnd_mod_to_ssi(mod), RSND_SSI_CLK_PIN_SHARE));
}
static u32 *rsnd_ssi_get_status(struct rsnd_dai_stream *io,
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 47bdba9fc582..016fbf5ac242 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -1,12 +1,9 @@
-/*
- * Renesas R-Car SSIU support
- *
- * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Renesas R-Car SSIU support
+//
+// Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
#include "rsnd.h"
#define SSIU_NAME "ssiu"
diff --git a/sound/soc/sh/sh7760-ac97.c b/sound/soc/sh/sh7760-ac97.c
index 4a3568a9bf59..4bb4c13cf860 100644
--- a/sound/soc/sh/sh7760-ac97.c
+++ b/sound/soc/sh/sh7760-ac97.c
@@ -1,10 +1,8 @@
-/*
- * Generic AC97 sound support for SH7760
- *
- * (c) 2007 Manuel Lauss
- *
- * Licensed under the GPLv2.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Generic AC97 sound support for SH7760
+//
+// (c) 2007 Manuel Lauss
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -68,6 +66,6 @@ static void __exit sh7760_ac97_exit(void)
module_init(sh7760_ac97_init);
module_exit(sh7760_ac97_exit);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Generic SH7760 AC97 sound machine");
MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
diff --git a/sound/soc/sh/siu.h b/sound/soc/sh/siu.h
index 6088d627c0e4..63a508fdfe78 100644
--- a/sound/soc/sh/siu.h
+++ b/sound/soc/sh/siu.h
@@ -1,23 +1,9 @@
-/*
- * siu.h - ALSA SoC driver for Renesas SH7343, SH7722 SIU peripheral.
- *
- * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- * Copyright (C) 2006 Carlos Munoz <carlos@kenati.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// siu.h - ALSA SoC driver for Renesas SH7343, SH7722 SIU peripheral.
+//
+// Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+// Copyright (C) 2006 Carlos Munoz <carlos@kenati.com>
#ifndef SIU_H
#define SIU_H
diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c
index ee2211635e92..f2a386fcd92e 100644
--- a/sound/soc/sh/siu_dai.c
+++ b/sound/soc/sh/siu_dai.c
@@ -1,23 +1,9 @@
-/*
- * siu_dai.c - ALSA SoC driver for Renesas SH7343, SH7722 SIU peripheral.
- *
- * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- * Copyright (C) 2006 Carlos Munoz <carlos@kenati.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// siu_dai.c - ALSA SoC driver for Renesas SH7343, SH7722 SIU peripheral.
+//
+// Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+// Copyright (C) 2006 Carlos Munoz <carlos@kenati.com>
#include <linux/delay.h>
#include <linux/firmware.h>
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index 172909570ed5..e263757e4a69 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -1,23 +1,10 @@
-/*
- * siu_pcm.c - ALSA driver for Renesas SH7343, SH7722 SIU peripheral.
- *
- * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- * Copyright (C) 2006 Carlos Munoz <carlos@kenati.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// siu_pcm.c - ALSA driver for Renesas SH7343, SH7722 SIU peripheral.
+//
+// Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+// Copyright (C) 2006 Carlos Munoz <carlos@kenati.com>
+
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index 89ed1b107ac5..8125fa3840b6 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -1,14 +1,11 @@
-/*
- * Serial Sound Interface (I2S) support for SH7760/SH7780
- *
- * Copyright (c) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
- *
- * licensed under the terms outlined in the file COPYING at the root
- * of the linux kernel sources.
- *
- * dont forget to set IPSEL/OMSEL register bits (in your board code) to
- * enable SSI output pins!
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Serial Sound Interface (I2S) support for SH7760/SH7780
+//
+// Copyright (c) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
+//
+// dont forget to set IPSEL/OMSEL register bits (in your board code) to
+// enable SSI output pins!
/*
* LIMITATIONS:
@@ -400,6 +397,6 @@ static struct platform_driver sh4_ssi_driver = {
module_platform_driver(sh4_ssi_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SuperH onchip SSI (I2S) audio driver");
MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
index 77e7dcf969d0..d70fcd4a1adf 100644
--- a/sound/soc/sirf/sirf-usp.c
+++ b/sound/soc/sirf/sirf-usp.c
@@ -370,10 +370,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, usp);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap(&pdev->dev, mem_res->start,
- resource_size(mem_res));
- if (base == NULL)
- return -ENOMEM;
+ base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
usp->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&sirf_usp_regmap_config);
if (IS_ERR(usp->regmap))
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index 3f424f214bca..c086786e4471 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -1,20 +1,15 @@
-/*
- * soc-ac97.c -- ALSA SoC Audio Layer AC97 support
- *
- * Copyright 2005 Wolfson Microelectronics PLC.
- * Copyright 2005 Openedhand Ltd.
- * Copyright (C) 2010 Slimlogic Ltd.
- * Copyright (C) 2010 Texas Instruments Inc.
- *
- * Author: Liam Girdwood <lrg@slimlogic.co.uk>
- * with code, comments and ideas from :-
- * Richard Purdie <richard@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-ac97.c -- ALSA SoC Audio Layer AC97 support
+//
+// Copyright 2005 Wolfson Microelectronics PLC.
+// Copyright 2005 Openedhand Ltd.
+// Copyright (C) 2010 Slimlogic Ltd.
+// Copyright (C) 2010 Texas Instruments Inc.
+//
+// Author: Liam Girdwood <lrg@slimlogic.co.uk>
+// with code, comments and ideas from :-
+// Richard Purdie <richard@openedhand.com>
#include <linux/ctype.h>
#include <linux/delay.h>
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index 3d7e1ff79139..b8e72b52db30 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -1,18 +1,8 @@
-/*
- * soc-apci.c - support for ACPI enumeration.
- *
- * Copyright (c) 2013-15, Intel Corporation.
- *
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// soc-apci.c - support for ACPI enumeration.
+//
+// Copyright (c) 2013-15, Intel Corporation.
#include <sound/soc-acpi.h>
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index e095115fa9f9..409d082e80d1 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -1,18 +1,12 @@
-/*
- * soc-compress.c -- ALSA SoC Compress
- *
- * Copyright (C) 2012 Intel Corp.
- *
- * Authors: Namarta Kohli <namartax.kohli@intel.com>
- * Ramesh Babu K V <ramesh.babu@linux.intel.com>
- * Vinod Koul <vinod.koul@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-compress.c -- ALSA SoC Compress
+//
+// Copyright (C) 2012 Intel Corp.
+//
+// Authors: Namarta Kohli <namartax.kohli@intel.com>
+// Ramesh Babu K V <ramesh.babu@linux.intel.com>
+// Vinod Koul <vinod.koul@linux.intel.com>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -146,6 +140,30 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
stream = SNDRV_PCM_STREAM_CAPTURE;
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+ fe->dpcm[stream].runtime = fe_substream->runtime;
+
+ ret = dpcm_path_get(fe, stream, &list);
+ if (ret < 0)
+ goto be_err;
+ else if (ret == 0)
+ dev_dbg(fe->dev, "Compress ASoC: %s no valid %s route\n",
+ fe->dai_link->name, stream ? "capture" : "playback");
+ /* calculate valid and active FE <-> BE dpcms */
+ dpcm_process_paths(fe, stream, &list, 1);
+ fe->dpcm[stream].runtime = fe_substream->runtime;
+
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+ ret = dpcm_be_dai_startup(fe, stream);
+ if (ret < 0) {
+ /* clean up all links */
+ list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
+
+ dpcm_be_disconnect(fe, stream);
+ fe->dpcm[stream].runtime = NULL;
+ goto out;
+ }
if (cpu_dai->driver->cops && cpu_dai->driver->cops->startup) {
ret = cpu_dai->driver->cops->startup(cstream, cpu_dai);
@@ -159,7 +177,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
ret = soc_compr_components_open(cstream, &component);
if (ret < 0)
- goto machine_err;
+ goto open_err;
if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->startup) {
ret = fe->dai_link->compr_ops->startup(cstream);
@@ -170,31 +188,6 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
}
}
- fe->dpcm[stream].runtime = fe_substream->runtime;
-
- ret = dpcm_path_get(fe, stream, &list);
- if (ret < 0)
- goto fe_err;
- else if (ret == 0)
- dev_dbg(fe->dev, "Compress ASoC: %s no valid %s route\n",
- fe->dai_link->name, stream ? "capture" : "playback");
-
- /* calculate valid and active FE <-> BE dpcms */
- dpcm_process_paths(fe, stream, &list, 1);
-
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
-
- ret = dpcm_be_dai_startup(fe, stream);
- if (ret < 0) {
- /* clean up all links */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
- dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
-
- dpcm_be_disconnect(fe, stream);
- fe->dpcm[stream].runtime = NULL;
- goto path_err;
- }
-
dpcm_clear_pending_state(fe, stream);
dpcm_path_put(&list);
@@ -207,17 +200,14 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
return 0;
-path_err:
- dpcm_path_put(&list);
-fe_err:
- if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
- fe->dai_link->compr_ops->shutdown(cstream);
machine_err:
soc_compr_components_free(cstream, component);
-
+open_err:
if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
out:
+ dpcm_path_put(&list);
+be_err:
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
mutex_unlock(&fe->card->mutex);
return ret;
@@ -557,6 +547,24 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+ /*
+ * Create an empty hw_params for the BE as the machine driver must
+ * fix this up to match DSP decoder and ASRC configuration.
+ * I.e. machine driver fixup for compressed BE is mandatory.
+ */
+ memset(&fe->dpcm[fe_substream->stream].hw_params, 0,
+ sizeof(struct snd_pcm_hw_params));
+
+ fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+ ret = dpcm_be_dai_hw_params(fe, stream);
+ if (ret < 0)
+ goto out;
+
+ ret = dpcm_be_dai_prepare(fe, stream);
+ if (ret < 0)
+ goto out;
+
if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_params) {
ret = cpu_dai->driver->cops->set_params(cstream, params, cpu_dai);
if (ret < 0)
@@ -583,24 +591,6 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
goto out;
}
- /*
- * Create an empty hw_params for the BE as the machine driver must
- * fix this up to match DSP decoder and ASRC configuration.
- * I.e. machine driver fixup for compressed BE is mandatory.
- */
- memset(&fe->dpcm[fe_substream->stream].hw_params, 0,
- sizeof(struct snd_pcm_hw_params));
-
- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
-
- ret = dpcm_be_dai_hw_params(fe, stream);
- if (ret < 0)
- goto out;
-
- ret = dpcm_be_dai_prepare(fe, stream);
- if (ret < 0)
- goto out;
-
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4663de3cf495..9cfe10d8040c 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1,26 +1,21 @@
-/*
- * soc-core.c -- ALSA SoC Audio Layer
- *
- * Copyright 2005 Wolfson Microelectronics PLC.
- * Copyright 2005 Openedhand Ltd.
- * Copyright (C) 2010 Slimlogic Ltd.
- * Copyright (C) 2010 Texas Instruments Inc.
- *
- * Author: Liam Girdwood <lrg@slimlogic.co.uk>
- * with code, comments and ideas from :-
- * Richard Purdie <richard@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * TODO:
- * o Add hw rules to enforce rates, etc.
- * o More testing with other codecs/machines.
- * o Add more codecs and platforms to ensure good API coverage.
- * o Support TDM on PCM and I2S
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-core.c -- ALSA SoC Audio Layer
+//
+// Copyright 2005 Wolfson Microelectronics PLC.
+// Copyright 2005 Openedhand Ltd.
+// Copyright (C) 2010 Slimlogic Ltd.
+// Copyright (C) 2010 Texas Instruments Inc.
+//
+// Author: Liam Girdwood <lrg@slimlogic.co.uk>
+// with code, comments and ideas from :-
+// Richard Purdie <richard@openedhand.com>
+//
+// TODO:
+// o Add hw rules to enforce rates, etc.
+// o More testing with other codecs/machines.
+// o Add more codecs and platforms to ensure good API coverage.
+// o Support TDM on PCM and I2S
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -533,6 +528,7 @@ int snd_soc_suspend(struct device *dev)
"ASoC: idle_bias_off CODEC on over suspend\n");
break;
}
+ /* fall through */
case SND_SOC_BIAS_OFF:
if (component->driver->suspend)
@@ -852,6 +848,9 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
const char *platform_name;
int i;
+ if (dai_link->ignore)
+ return 0;
+
dev_dbg(card->dev, "ASoC: binding %s\n", dai_link->name);
if (soc_is_dai_link_bound(card, dai_link)) {
@@ -1195,15 +1194,27 @@ void snd_soc_remove_dai_link(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(snd_soc_remove_dai_link);
+static void soc_set_of_name_prefix(struct snd_soc_component *component)
+{
+ struct device_node *component_of_node = component->dev->of_node;
+ const char *str;
+ int ret;
+
+ if (!component_of_node && component->dev->parent)
+ component_of_node = component->dev->parent->of_node;
+
+ ret = of_property_read_string(component_of_node, "sound-name-prefix",
+ &str);
+ if (!ret)
+ component->name_prefix = str;
+}
+
static void soc_set_name_prefix(struct snd_soc_card *card,
struct snd_soc_component *component)
{
int i;
- if (card->codec_conf == NULL)
- return;
-
- for (i = 0; i < card->num_configs; i++) {
+ for (i = 0; i < card->num_configs && card->codec_conf; i++) {
struct snd_soc_codec_conf *map = &card->codec_conf[i];
struct device_node *component_of_node = component->dev->of_node;
@@ -1215,8 +1226,14 @@ static void soc_set_name_prefix(struct snd_soc_card *card,
if (map->dev_name && strcmp(component->name, map->dev_name))
continue;
component->name_prefix = map->name_prefix;
- break;
+ return;
}
+
+ /*
+ * If there is no configuration table or no match in the table,
+ * check if a prefix is provided in the node
+ */
+ soc_set_of_name_prefix(component);
}
static int soc_probe_component(struct snd_soc_card *card,
@@ -1461,7 +1478,9 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
{
struct snd_soc_dai_link *dai_link = rtd->dai_link;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int i, ret;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+ int i, ret, num;
dev_dbg(card->dev, "ASoC: probe %s dai link %d late %d\n",
card->name, rtd->num, order);
@@ -1507,9 +1526,28 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
soc_dpcm_debugfs_add(rtd);
#endif
+ num = rtd->num;
+
+ /*
+ * most drivers will register their PCMs using DAI link ordering but
+ * topology based drivers can use the DAI link id field to set PCM
+ * device number and then use rtd + a base offset of the BEs.
+ */
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (!component->driver->use_dai_pcm_id)
+ continue;
+
+ if (rtd->dai_link->no_pcm)
+ num += component->driver->be_pcm_base;
+ else
+ num = rtd->dai_link->id;
+ }
+
if (cpu_dai->driver->compress_new) {
/*create compress_device"*/
- ret = cpu_dai->driver->compress_new(rtd, rtd->num);
+ ret = cpu_dai->driver->compress_new(rtd, num);
if (ret < 0) {
dev_err(card->dev, "ASoC: can't create compress %s\n",
dai_link->stream_name);
@@ -1519,7 +1557,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
if (!dai_link->params) {
/* create the pcm */
- ret = soc_new_pcm(rtd, rtd->num);
+ ret = soc_new_pcm(rtd, num);
if (ret < 0) {
dev_err(card->dev, "ASoC: can't create pcm %s :%d\n",
dai_link->stream_name, ret);
@@ -1846,6 +1884,74 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
EXPORT_SYMBOL_GPL(snd_soc_set_dmi_name);
#endif /* CONFIG_DMI */
+static void soc_check_tplg_fes(struct snd_soc_card *card)
+{
+ struct snd_soc_component *component;
+ const struct snd_soc_component_driver *comp_drv;
+ struct snd_soc_dai_link *dai_link;
+ int i;
+
+ list_for_each_entry(component, &component_list, list) {
+
+ /* does this component override FEs ? */
+ if (!component->driver->ignore_machine)
+ continue;
+
+ /* for this machine ? */
+ if (strcmp(component->driver->ignore_machine,
+ card->dev->driver->name))
+ continue;
+
+ /* machine matches, so override the rtd data */
+ for (i = 0; i < card->num_links; i++) {
+
+ dai_link = &card->dai_link[i];
+
+ /* ignore this FE */
+ if (dai_link->dynamic) {
+ dai_link->ignore = true;
+ continue;
+ }
+
+ dev_info(card->dev, "info: override FE DAI link %s\n",
+ card->dai_link[i].name);
+
+ /* override platform component */
+ dai_link->platform_name = component->name;
+
+ /* convert non BE into BE */
+ dai_link->no_pcm = 1;
+
+ /* override any BE fixups */
+ dai_link->be_hw_params_fixup =
+ component->driver->be_hw_params_fixup;
+
+ /* most BE links don't set stream name, so set it to
+ * dai link name if it's NULL to help bind widgets.
+ */
+ if (!dai_link->stream_name)
+ dai_link->stream_name = dai_link->name;
+ }
+
+ /* Inform userspace we are using alternate topology */
+ if (component->driver->topology_name_prefix) {
+
+ /* topology shortname created ? */
+ if (!card->topology_shortname_created) {
+ comp_drv = component->driver;
+
+ snprintf(card->topology_shortname, 32, "%s-%s",
+ comp_drv->topology_name_prefix,
+ card->name);
+ card->topology_shortname_created = true;
+ }
+
+ /* use topology shortname */
+ card->name = card->topology_shortname;
+ }
+ }
+}
+
static int snd_soc_instantiate_card(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *rtd;
@@ -1855,6 +1961,9 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
mutex_lock(&client_mutex);
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
+ /* check whether any platform is ignore machine FE and using topology */
+ soc_check_tplg_fes(card);
+
/* bind DAIs */
for (i = 0; i < card->num_links; i++) {
ret = soc_bind_dai_link(card, &card->dai_link[i]);
@@ -2523,6 +2632,28 @@ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai,
EXPORT_SYMBOL_GPL(snd_soc_dai_set_channel_map);
/**
+ * snd_soc_dai_get_channel_map - Get DAI audio channel map
+ * @dai: DAI
+ * @tx_num: how many TX channels
+ * @tx_slot: pointer to an array which imply the TX slot number channel
+ * 0~num-1 uses
+ * @rx_num: how many RX channels
+ * @rx_slot: pointer to an array which imply the RX slot number channel
+ * 0~num-1 uses
+ */
+int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai,
+ unsigned int *tx_num, unsigned int *tx_slot,
+ unsigned int *rx_num, unsigned int *rx_slot)
+{
+ if (dai->driver->ops->get_channel_map)
+ return dai->driver->ops->get_channel_map(dai, tx_num, tx_slot,
+ rx_num, rx_slot);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_get_channel_map);
+
+/**
* snd_soc_dai_set_tristate - configure DAI system or master clock.
* @dai: DAI
* @tristate: tristate enable
@@ -3258,9 +3389,9 @@ int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_simple_widgets);
-static int snd_soc_of_get_slot_mask(struct device_node *np,
- const char *prop_name,
- unsigned int *mask)
+int snd_soc_of_get_slot_mask(struct device_node *np,
+ const char *prop_name,
+ unsigned int *mask)
{
u32 val;
const __be32 *of_slot_mask = of_get_property(np, prop_name, &val);
@@ -3275,6 +3406,7 @@ static int snd_soc_of_get_slot_mask(struct device_node *np,
return val;
}
+EXPORT_SYMBOL_GPL(snd_soc_of_get_slot_mask);
int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *tx_mask,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 229c12349803..7e96793050c9 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1,27 +1,21 @@
-/*
- * soc-dapm.c -- ALSA SoC Dynamic Audio Power Management
- *
- * Copyright 2005 Wolfson Microelectronics PLC.
- * Author: Liam Girdwood <lrg@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Features:
- * o Changes power status of internal codec blocks depending on the
- * dynamic configuration of codec internal audio paths and active
- * DACs/ADCs.
- * o Platform power domain - can support external components i.e. amps and
- * mic/headphone insertion events.
- * o Automatic Mic Bias support
- * o Jack insertion power event initiation - e.g. hp insertion will enable
- * sinks, dacs, etc
- * o Delayed power down of audio subsystem to reduce pops between a quick
- * device reopen.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-dapm.c -- ALSA SoC Dynamic Audio Power Management
+//
+// Copyright 2005 Wolfson Microelectronics PLC.
+// Author: Liam Girdwood <lrg@slimlogic.co.uk>
+//
+// Features:
+// o Changes power status of internal codec blocks depending on the
+// dynamic configuration of codec internal audio paths and active
+// DACs/ADCs.
+// o Platform power domain - can support external components i.e. amps and
+// mic/headphone insertion events.
+// o Automatic Mic Bias support
+// o Jack insertion power event initiation - e.g. hp insertion will enable
+// sinks, dacs, etc
+// o Delayed power down of audio subsystem to reduce pops between a quick
+// device reopen.
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -3662,7 +3656,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
struct snd_pcm_substream substream;
struct snd_pcm_hw_params *params = NULL;
struct snd_pcm_runtime *runtime = NULL;
- u64 fmt;
+ unsigned int fmt;
int ret;
if (WARN_ON(!config) ||
@@ -4073,6 +4067,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
continue;
}
+ /* let users know there is no DAI to link */
+ if (!dai_w->priv) {
+ dev_dbg(card->dev, "dai widget %s has no DAI\n",
+ dai_w->name);
+ continue;
+ }
+
dai = dai_w->priv;
/* ...find all widgets with the same stream and link them */
diff --git a/sound/soc/soc-devres.c b/sound/soc/soc-devres.c
index 7ac745df1412..a9ea172a66a7 100644
--- a/sound/soc/soc-devres.c
+++ b/sound/soc/soc-devres.c
@@ -1,13 +1,8 @@
-/*
- * soc-devres.c -- ALSA SoC Audio Layer devres functions
- *
- * Copyright (C) 2013 Linaro Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-devres.c -- ALSA SoC Audio Layer devres functions
+//
+// Copyright (C) 2013 Linaro Ltd
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 56a541b9ff9e..52fd7af952a5 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -1,17 +1,8 @@
-/*
- * Copyright (C) 2013, Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2013, Analog Devices Inc.
+// Author: Lars-Peter Clausen <lars@metafoo.de>
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dmaengine.h>
@@ -197,7 +188,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea
case 32:
case 64:
if (addr_widths & (1 << (bits / 8)))
- hw.formats |= (1LL << i);
+ hw.formats |= pcm_format_to_bits(i);
break;
default:
/* Unsupported types */
@@ -343,7 +334,7 @@ static snd_pcm_uframes_t dmaengine_pcm_pointer(
static int dmaengine_copy_user(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- void *buf, unsigned long bytes)
+ void __user *buf, unsigned long bytes)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component =
@@ -359,18 +350,17 @@ static int dmaengine_copy_user(struct snd_pcm_substream *substream,
int ret;
if (is_playback)
- if (copy_from_user(dma_ptr, (void __user *)buf, bytes))
+ if (copy_from_user(dma_ptr, buf, bytes))
return -EFAULT;
if (process) {
- ret = process(substream, channel, hwoff,
- (void __user *)buf, bytes);
+ ret = process(substream, channel, hwoff, (__force void *)buf, bytes);
if (ret < 0)
return ret;
}
if (!is_playback)
- if (copy_to_user((void __user *)buf, dma_ptr, bytes))
+ if (copy_to_user(buf, dma_ptr, bytes))
return -EFAULT;
return 0;
diff --git a/sound/soc/soc-io.c b/sound/soc/soc-io.c
index 026cd5347e53..1ff9175e9d5e 100644
--- a/sound/soc/soc-io.c
+++ b/sound/soc/soc-io.c
@@ -1,15 +1,10 @@
-/*
- * soc-io.c -- ASoC register I/O helpers
- *
- * Copyright 2009-2011 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-io.c -- ASoC register I/O helpers
+//
+// Copyright 2009-2011 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index b2b16044ae80..c7b990abdbaa 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -1,15 +1,10 @@
-/*
- * soc-jack.c -- ALSA SoC jack handling
- *
- * Copyright 2008 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-jack.c -- ALSA SoC jack handling
+//
+// Copyright 2008 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <sound/jack.h>
#include <sound/soc.h>
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 7144a51ddfa9..592efb370c44 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -1,20 +1,15 @@
-/*
- * soc-ops.c -- Generic ASoC operations
- *
- * Copyright 2005 Wolfson Microelectronics PLC.
- * Copyright 2005 Openedhand Ltd.
- * Copyright (C) 2010 Slimlogic Ltd.
- * Copyright (C) 2010 Texas Instruments Inc.
- *
- * Author: Liam Girdwood <lrg@slimlogic.co.uk>
- * with code, comments and ideas from :-
- * Richard Purdie <richard@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-ops.c -- Generic ASoC operations
+//
+// Copyright 2005 Wolfson Microelectronics PLC.
+// Copyright 2005 Openedhand Ltd.
+// Copyright (C) 2010 Slimlogic Ltd.
+// Copyright (C) 2010 Texas Instruments Inc.
+//
+// Author: Liam Girdwood <lrg@slimlogic.co.uk>
+// with code, comments and ideas from :-
+// Richard Purdie <richard@openedhand.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 5e7ae47a9658..e8b98bfd4cf1 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1,20 +1,14 @@
-/*
- * soc-pcm.c -- ALSA SoC PCM
- *
- * Copyright 2005 Wolfson Microelectronics PLC.
- * Copyright 2005 Openedhand Ltd.
- * Copyright (C) 2010 Slimlogic Ltd.
- * Copyright (C) 2010 Texas Instruments Inc.
- *
- * Authors: Liam Girdwood <lrg@ti.com>
- * Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-pcm.c -- ALSA SoC PCM
+//
+// Copyright 2005 Wolfson Microelectronics PLC.
+// Copyright 2005 Openedhand Ltd.
+// Copyright (C) 2010 Slimlogic Ltd.
+// Copyright (C) 2010 Texas Instruments Inc.
+//
+// Authors: Liam Girdwood <lrg@ti.com>
+// Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -448,6 +442,29 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
hw->rate_max = min_not_zero(hw->rate_max, rate_max);
}
+static int soc_pcm_components_close(struct snd_pcm_substream *substream,
+ struct snd_soc_component *last)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (component == last)
+ break;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->close)
+ continue;
+
+ component->driver->ops->close(substream);
+ }
+
+ return 0;
+}
+
/*
* Called by ALSA when a PCM substream is opened, the runtime->hw record is
* then initialized and any private data can be allocated. This also calls
@@ -462,7 +479,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai;
const char *codec_dai_name = "multicodec";
- int i, ret = 0, __ret;
+ int i, ret = 0;
pinctrl_pm_select_default_state(cpu_dai->dev);
for (i = 0; i < rtd->num_codecs; i++)
@@ -486,7 +503,6 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
}
}
- ret = 0;
for_each_rtdcom(rtd, rtdcom) {
component = rtdcom->component;
@@ -494,16 +510,15 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
!component->driver->ops->open)
continue;
- __ret = component->driver->ops->open(substream);
- if (__ret < 0) {
+ ret = component->driver->ops->open(substream);
+ if (ret < 0) {
dev_err(component->dev,
"ASoC: can't open component %s: %d\n",
- component->name, __ret);
- ret = __ret;
+ component->name, ret);
+ goto component_err;
}
}
- if (ret < 0)
- goto component_err;
+ component = NULL;
for (i = 0; i < rtd->num_codecs; i++) {
codec_dai = rtd->codec_dais[i];
@@ -612,15 +627,7 @@ codec_dai_err:
}
component_err:
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- if (!component->driver->ops ||
- !component->driver->ops->close)
- continue;
-
- component->driver->ops->close(substream);
- }
+ soc_pcm_components_close(substream, component);
if (cpu_dai->driver->ops->shutdown)
cpu_dai->driver->ops->shutdown(substream, cpu_dai);
@@ -714,15 +721,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
if (rtd->dai_link->ops->shutdown)
rtd->dai_link->ops->shutdown(substream);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- if (!component->driver->ops ||
- !component->driver->ops->close)
- continue;
-
- component->driver->ops->close(substream);
- }
+ soc_pcm_components_close(substream, NULL);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (snd_soc_runtime_ignore_pmdown_time(rtd)) {
@@ -860,8 +859,20 @@ int soc_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
int ret;
+ /* perform any topology hw_params fixups before DAI */
+ if (rtd->dai_link->be_hw_params_fixup) {
+ ret = rtd->dai_link->be_hw_params_fixup(rtd, params);
+ if (ret < 0) {
+ dev_err(rtd->dev,
+ "ASoC: hw_params topology fixup failed %d\n",
+ ret);
+ return ret;
+ }
+ }
+
if (dai->driver->ops->hw_params) {
ret = dai->driver->ops->hw_params(substream, params, dai);
if (ret < 0) {
@@ -874,6 +885,29 @@ int soc_dai_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static int soc_pcm_components_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_component *last)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (component == last)
+ break;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->hw_free)
+ continue;
+
+ component->driver->ops->hw_free(substream);
+ }
+
+ return 0;
+}
+
/*
* Called by ALSA when the hardware params are set by application. This
* function can also be called multiple times and can allocate buffers
@@ -886,7 +920,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int i, ret = 0, __ret;
+ int i, ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
if (rtd->dai_link->ops->hw_params) {
@@ -944,7 +978,6 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
goto interface_err;
- ret = 0;
for_each_rtdcom(rtd, rtdcom) {
component = rtdcom->component;
@@ -952,16 +985,15 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
!component->driver->ops->hw_params)
continue;
- __ret = component->driver->ops->hw_params(substream, params);
- if (__ret < 0) {
+ ret = component->driver->ops->hw_params(substream, params);
+ if (ret < 0) {
dev_err(component->dev,
"ASoC: %s hw params failed: %d\n",
- component->name, __ret);
- ret = __ret;
+ component->name, ret);
+ goto component_err;
}
}
- if (ret < 0)
- goto component_err;
+ component = NULL;
/* store the parameters for each DAIs */
cpu_dai->rate = params_rate(params);
@@ -977,15 +1009,7 @@ out:
return ret;
component_err:
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- if (!component->driver->ops ||
- !component->driver->ops->hw_free)
- continue;
-
- component->driver->ops->hw_free(substream);
- }
+ soc_pcm_components_hw_free(substream, component);
if (cpu_dai->driver->ops->hw_free)
cpu_dai->driver->ops->hw_free(substream, cpu_dai);
@@ -1014,8 +1038,6 @@ codec_err:
static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component;
- struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai;
bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
@@ -1052,15 +1074,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
rtd->dai_link->ops->hw_free(substream);
/* free any component resources */
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- if (!component->driver->ops ||
- !component->driver->ops->hw_free)
- continue;
-
- component->driver->ops->hw_free(substream);
- }
+ soc_pcm_components_hw_free(substream, NULL);
/* now free hw params for the DAIs */
for (i = 0; i < rtd->num_codecs; i++) {
@@ -1165,6 +1179,9 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
snd_pcm_sframes_t codec_delay = 0;
int i;
+ /* clearing the previous total delay */
+ runtime->delay = 0;
+
for_each_rtdcom(rtd, rtdcom) {
component = rtdcom->component;
@@ -1176,6 +1193,8 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
offset = component->driver->ops->pointer(substream);
break;
}
+ /* base delay if assigned in pointer callback */
+ delay = runtime->delay;
if (cpu_dai->driver->ops->delay)
delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
@@ -1658,29 +1677,28 @@ unwind:
}
static void dpcm_init_runtime_hw(struct snd_pcm_runtime *runtime,
- struct snd_soc_pcm_stream *stream,
- u64 formats)
+ struct snd_soc_pcm_stream *stream)
{
runtime->hw.rate_min = stream->rate_min;
runtime->hw.rate_max = stream->rate_max;
runtime->hw.channels_min = stream->channels_min;
runtime->hw.channels_max = stream->channels_max;
if (runtime->hw.formats)
- runtime->hw.formats &= formats & stream->formats;
+ runtime->hw.formats &= stream->formats;
else
- runtime->hw.formats = formats & stream->formats;
+ runtime->hw.formats = stream->formats;
runtime->hw.rates = stream->rates;
}
-static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream)
+static void dpcm_runtime_merge_format(struct snd_pcm_substream *substream,
+ u64 *formats)
{
struct snd_soc_pcm_runtime *fe = substream->private_data;
struct snd_soc_dpcm *dpcm;
- u64 formats = ULLONG_MAX;
int stream = substream->stream;
if (!fe->dai_link->dpcm_merged_format)
- return formats;
+ return;
/*
* It returns merged BE codec format
@@ -1694,17 +1712,132 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream)
int i;
for (i = 0; i < be->num_codecs; i++) {
+ /*
+ * Skip CODECs which don't support the current stream
+ * type. See soc_pcm_init_runtime_hw() for more details
+ */
+ if (!snd_soc_dai_stream_valid(be->codec_dais[i],
+ stream))
+ continue;
+
codec_dai_drv = be->codec_dais[i]->driver;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
codec_stream = &codec_dai_drv->playback;
else
codec_stream = &codec_dai_drv->capture;
- formats &= codec_stream->formats;
+ *formats &= codec_stream->formats;
}
}
+}
+
+static void dpcm_runtime_merge_chan(struct snd_pcm_substream *substream,
+ unsigned int *channels_min,
+ unsigned int *channels_max)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct snd_soc_dpcm *dpcm;
+ int stream = substream->stream;
- return formats;
+ if (!fe->dai_link->dpcm_merged_chan)
+ return;
+
+ /*
+ * It returns merged BE codec channel;
+ * if FE want to use it (= dpcm_merged_chan)
+ */
+
+ list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dpcm->be;
+ struct snd_soc_dai_driver *cpu_dai_drv = be->cpu_dai->driver;
+ struct snd_soc_dai_driver *codec_dai_drv;
+ struct snd_soc_pcm_stream *codec_stream;
+ struct snd_soc_pcm_stream *cpu_stream;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ cpu_stream = &cpu_dai_drv->playback;
+ else
+ cpu_stream = &cpu_dai_drv->capture;
+
+ *channels_min = max(*channels_min, cpu_stream->channels_min);
+ *channels_max = min(*channels_max, cpu_stream->channels_max);
+
+ /*
+ * chan min/max cannot be enforced if there are multiple CODEC
+ * DAIs connected to a single CPU DAI, use CPU DAI's directly
+ */
+ if (be->num_codecs == 1) {
+ codec_dai_drv = be->codec_dais[0]->driver;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ codec_stream = &codec_dai_drv->playback;
+ else
+ codec_stream = &codec_dai_drv->capture;
+
+ *channels_min = max(*channels_min,
+ codec_stream->channels_min);
+ *channels_max = min(*channels_max,
+ codec_stream->channels_max);
+ }
+ }
+}
+
+static void dpcm_runtime_merge_rate(struct snd_pcm_substream *substream,
+ unsigned int *rates,
+ unsigned int *rate_min,
+ unsigned int *rate_max)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ struct snd_soc_dpcm *dpcm;
+ int stream = substream->stream;
+
+ if (!fe->dai_link->dpcm_merged_rate)
+ return;
+
+ /*
+ * It returns merged BE codec channel;
+ * if FE want to use it (= dpcm_merged_chan)
+ */
+
+ list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ struct snd_soc_pcm_runtime *be = dpcm->be;
+ struct snd_soc_dai_driver *cpu_dai_drv = be->cpu_dai->driver;
+ struct snd_soc_dai_driver *codec_dai_drv;
+ struct snd_soc_pcm_stream *codec_stream;
+ struct snd_soc_pcm_stream *cpu_stream;
+ int i;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ cpu_stream = &cpu_dai_drv->playback;
+ else
+ cpu_stream = &cpu_dai_drv->capture;
+
+ *rate_min = max(*rate_min, cpu_stream->rate_min);
+ *rate_max = min_not_zero(*rate_max, cpu_stream->rate_max);
+ *rates = snd_pcm_rate_mask_intersect(*rates, cpu_stream->rates);
+
+ for (i = 0; i < be->num_codecs; i++) {
+ /*
+ * Skip CODECs which don't support the current stream
+ * type. See soc_pcm_init_runtime_hw() for more details
+ */
+ if (!snd_soc_dai_stream_valid(be->codec_dais[i],
+ stream))
+ continue;
+
+ codec_dai_drv = be->codec_dais[i]->driver;
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ codec_stream = &codec_dai_drv->playback;
+ else
+ codec_stream = &codec_dai_drv->capture;
+
+ *rate_min = max(*rate_min, codec_stream->rate_min);
+ *rate_max = min_not_zero(*rate_max,
+ codec_stream->rate_max);
+ *rates = snd_pcm_rate_mask_intersect(*rates,
+ codec_stream->rates);
+ }
+ }
}
static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream)
@@ -1713,12 +1846,17 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai_driver *cpu_dai_drv = cpu_dai->driver;
- u64 format = dpcm_runtime_base_format(substream);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- dpcm_init_runtime_hw(runtime, &cpu_dai_drv->playback, format);
+ dpcm_init_runtime_hw(runtime, &cpu_dai_drv->playback);
else
- dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture, format);
+ dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture);
+
+ dpcm_runtime_merge_format(substream, &runtime->hw.formats);
+ dpcm_runtime_merge_chan(substream, &runtime->hw.channels_min,
+ &runtime->hw.channels_max);
+ dpcm_runtime_merge_rate(substream, &runtime->hw.rates,
+ &runtime->hw.rate_min, &runtime->hw.rate_max);
}
static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd);
@@ -2543,106 +2681,113 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
return ret;
}
-/* Called by DAPM mixer/mux changes to update audio routing between PCMs and
- * any DAI links.
- */
-int soc_dpcm_runtime_update(struct snd_soc_card *card)
+static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
{
- struct snd_soc_pcm_runtime *fe;
- int old, new, paths;
+ struct snd_soc_dapm_widget_list *list;
+ int count, paths;
- mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
- list_for_each_entry(fe, &card->rtd_list, list) {
- struct snd_soc_dapm_widget_list *list;
+ if (!fe->dai_link->dynamic)
+ return 0;
- /* make sure link is FE */
- if (!fe->dai_link->dynamic)
- continue;
+ /* only check active links */
+ if (!fe->cpu_dai->active)
+ return 0;
- /* only check active links */
- if (!fe->cpu_dai->active)
- continue;
+ /* DAPM sync will call this to update DSP paths */
+ dev_dbg(fe->dev, "ASoC: DPCM %s runtime update for FE %s\n",
+ new ? "new" : "old", fe->dai_link->name);
- /* DAPM sync will call this to update DSP paths */
- dev_dbg(fe->dev, "ASoC: DPCM runtime update for FE %s\n",
- fe->dai_link->name);
+ /* skip if FE doesn't have playback capability */
+ if (!fe->cpu_dai->driver->playback.channels_min ||
+ !fe->codec_dai->driver->playback.channels_min)
+ goto capture;
- /* skip if FE doesn't have playback capability */
- if (!fe->cpu_dai->driver->playback.channels_min
- || !fe->codec_dai->driver->playback.channels_min)
- goto capture;
-
- /* skip if FE isn't currently playing */
- if (!fe->cpu_dai->playback_active
- || !fe->codec_dai->playback_active)
- goto capture;
-
- paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
- if (paths < 0) {
- dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
- fe->dai_link->name, "playback");
- mutex_unlock(&card->mutex);
- return paths;
- }
+ /* skip if FE isn't currently playing */
+ if (!fe->cpu_dai->playback_active || !fe->codec_dai->playback_active)
+ goto capture;
- /* update any new playback paths */
- new = dpcm_process_paths(fe, SNDRV_PCM_STREAM_PLAYBACK, &list, 1);
- if (new) {
- dpcm_run_new_update(fe, SNDRV_PCM_STREAM_PLAYBACK);
- dpcm_clear_pending_state(fe, SNDRV_PCM_STREAM_PLAYBACK);
- dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
- }
+ paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
+ if (paths < 0) {
+ dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
+ fe->dai_link->name, "playback");
+ return paths;
+ }
- /* update any old playback paths */
- old = dpcm_process_paths(fe, SNDRV_PCM_STREAM_PLAYBACK, &list, 0);
- if (old) {
+ /* update any playback paths */
+ count = dpcm_process_paths(fe, SNDRV_PCM_STREAM_PLAYBACK, &list, new);
+ if (count) {
+ if (new)
+ dpcm_run_new_update(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ else
dpcm_run_old_update(fe, SNDRV_PCM_STREAM_PLAYBACK);
- dpcm_clear_pending_state(fe, SNDRV_PCM_STREAM_PLAYBACK);
- dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
- }
- dpcm_path_put(&list);
+ dpcm_clear_pending_state(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ }
+
+ dpcm_path_put(&list);
+
capture:
- /* skip if FE doesn't have capture capability */
- if (!fe->cpu_dai->driver->capture.channels_min
- || !fe->codec_dai->driver->capture.channels_min)
- continue;
+ /* skip if FE doesn't have capture capability */
+ if (!fe->cpu_dai->driver->capture.channels_min ||
+ !fe->codec_dai->driver->capture.channels_min)
+ return 0;
- /* skip if FE isn't currently capturing */
- if (!fe->cpu_dai->capture_active
- || !fe->codec_dai->capture_active)
- continue;
+ /* skip if FE isn't currently capturing */
+ if (!fe->cpu_dai->capture_active || !fe->codec_dai->capture_active)
+ return 0;
- paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
- if (paths < 0) {
- dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
- fe->dai_link->name, "capture");
- mutex_unlock(&card->mutex);
- return paths;
- }
+ paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
+ if (paths < 0) {
+ dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
+ fe->dai_link->name, "capture");
+ return paths;
+ }
- /* update any new capture paths */
- new = dpcm_process_paths(fe, SNDRV_PCM_STREAM_CAPTURE, &list, 1);
- if (new) {
+ /* update any old capture paths */
+ count = dpcm_process_paths(fe, SNDRV_PCM_STREAM_CAPTURE, &list, new);
+ if (count) {
+ if (new)
dpcm_run_new_update(fe, SNDRV_PCM_STREAM_CAPTURE);
- dpcm_clear_pending_state(fe, SNDRV_PCM_STREAM_CAPTURE);
- dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_CAPTURE);
- }
-
- /* update any old capture paths */
- old = dpcm_process_paths(fe, SNDRV_PCM_STREAM_CAPTURE, &list, 0);
- if (old) {
+ else
dpcm_run_old_update(fe, SNDRV_PCM_STREAM_CAPTURE);
- dpcm_clear_pending_state(fe, SNDRV_PCM_STREAM_CAPTURE);
- dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_CAPTURE);
- }
- dpcm_path_put(&list);
+ dpcm_clear_pending_state(fe, SNDRV_PCM_STREAM_CAPTURE);
+ dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_CAPTURE);
}
- mutex_unlock(&card->mutex);
+ dpcm_path_put(&list);
+
return 0;
}
+
+/* Called by DAPM mixer/mux changes to update audio routing between PCMs and
+ * any DAI links.
+ */
+int soc_dpcm_runtime_update(struct snd_soc_card *card)
+{
+ struct snd_soc_pcm_runtime *fe;
+ int ret = 0;
+
+ mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+ /* shutdown all old paths first */
+ list_for_each_entry(fe, &card->rtd_list, list) {
+ ret = soc_dpcm_fe_runtime_update(fe, 0);
+ if (ret)
+ goto out;
+ }
+
+ /* bring new paths up */
+ list_for_each_entry(fe, &card->rtd_list, list) {
+ ret = soc_dpcm_fe_runtime_update(fe, 1);
+ if (ret)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&card->mutex);
+ return ret;
+}
int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute)
{
struct snd_soc_dpcm *dpcm;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 53f121a50c97..66e77e020745 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1,29 +1,24 @@
-/*
- * soc-topology.c -- ALSA SoC Topology
- *
- * Copyright (C) 2012 Texas Instruments Inc.
- * Copyright (C) 2015 Intel Corporation.
- *
- * Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
- * K, Mythri P <mythri.p.k@intel.com>
- * Prusty, Subhransu S <subhransu.s.prusty@intel.com>
- * B, Jayachandran <jayachandran.b@intel.com>
- * Abdullah, Omair M <omair.m.abdullah@intel.com>
- * Jin, Yao <yao.jin@intel.com>
- * Lin, Mengdong <mengdong.lin@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Add support to read audio firmware topology alongside firmware text. The
- * topology data can contain kcontrols, DAPM graphs, widgets, DAIs, DAI links,
- * equalizers, firmware, coefficients etc.
- *
- * This file only manages the core ALSA and ASoC components, all other bespoke
- * firmware topology data is passed to component drivers for bespoke handling.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-topology.c -- ALSA SoC Topology
+//
+// Copyright (C) 2012 Texas Instruments Inc.
+// Copyright (C) 2015 Intel Corporation.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// K, Mythri P <mythri.p.k@intel.com>
+// Prusty, Subhransu S <subhransu.s.prusty@intel.com>
+// B, Jayachandran <jayachandran.b@intel.com>
+// Abdullah, Omair M <omair.m.abdullah@intel.com>
+// Jin, Yao <yao.jin@intel.com>
+// Lin, Mengdong <mengdong.lin@intel.com>
+//
+// Add support to read audio firmware topology alongside firmware text. The
+// topology data can contain kcontrols, DAPM graphs, widgets, DAIs, DAI links,
+// equalizers, firmware, coefficients etc.
+//
+// This file only manages the core ALSA and ASoC components, all other bespoke
+// firmware topology data is passed to component drivers for bespoke handling.
#include <linux/kernel.h>
#include <linux/export.h>
@@ -259,7 +254,7 @@ static int soc_tplg_vendor_load_(struct soc_tplg *tplg,
int ret = 0;
if (tplg->comp && tplg->ops && tplg->ops->vendor_load)
- ret = tplg->ops->vendor_load(tplg->comp, hdr);
+ ret = tplg->ops->vendor_load(tplg->comp, tplg->index, hdr);
else {
dev_err(tplg->dev, "ASoC: no vendor load callback for ID %d\n",
hdr->vendor_type);
@@ -291,7 +286,8 @@ static int soc_tplg_widget_load(struct soc_tplg *tplg,
struct snd_soc_dapm_widget *w, struct snd_soc_tplg_dapm_widget *tplg_w)
{
if (tplg->comp && tplg->ops && tplg->ops->widget_load)
- return tplg->ops->widget_load(tplg->comp, w, tplg_w);
+ return tplg->ops->widget_load(tplg->comp, tplg->index, w,
+ tplg_w);
return 0;
}
@@ -302,27 +298,30 @@ static int soc_tplg_widget_ready(struct soc_tplg *tplg,
struct snd_soc_dapm_widget *w, struct snd_soc_tplg_dapm_widget *tplg_w)
{
if (tplg->comp && tplg->ops && tplg->ops->widget_ready)
- return tplg->ops->widget_ready(tplg->comp, w, tplg_w);
+ return tplg->ops->widget_ready(tplg->comp, tplg->index, w,
+ tplg_w);
return 0;
}
/* pass DAI configurations to component driver for extra initialization */
static int soc_tplg_dai_load(struct soc_tplg *tplg,
- struct snd_soc_dai_driver *dai_drv)
+ struct snd_soc_dai_driver *dai_drv,
+ struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai)
{
if (tplg->comp && tplg->ops && tplg->ops->dai_load)
- return tplg->ops->dai_load(tplg->comp, dai_drv);
+ return tplg->ops->dai_load(tplg->comp, tplg->index, dai_drv,
+ pcm, dai);
return 0;
}
/* pass link configurations to component driver for extra initialization */
static int soc_tplg_dai_link_load(struct soc_tplg *tplg,
- struct snd_soc_dai_link *link)
+ struct snd_soc_dai_link *link, struct snd_soc_tplg_link_config *cfg)
{
if (tplg->comp && tplg->ops && tplg->ops->link_load)
- return tplg->ops->link_load(tplg->comp, link);
+ return tplg->ops->link_load(tplg->comp, tplg->index, link, cfg);
return 0;
}
@@ -643,7 +642,8 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
struct snd_kcontrol_new *k, struct snd_soc_tplg_ctl_hdr *hdr)
{
if (tplg->comp && tplg->ops && tplg->ops->control_load)
- return tplg->ops->control_load(tplg->comp, k, hdr);
+ return tplg->ops->control_load(tplg->comp, tplg->index, k,
+ hdr);
return 0;
}
@@ -1100,6 +1100,17 @@ static int soc_tplg_kcontrol_elems_load(struct soc_tplg *tplg,
return 0;
}
+/* optionally pass new dynamic kcontrol to component driver. */
+static int soc_tplg_add_route(struct soc_tplg *tplg,
+ struct snd_soc_dapm_route *route)
+{
+ if (tplg->comp && tplg->ops && tplg->ops->dapm_route_load)
+ return tplg->ops->dapm_route_load(tplg->comp, tplg->index,
+ route);
+
+ return 0;
+}
+
static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
struct snd_soc_tplg_hdr *hdr)
{
@@ -1148,6 +1159,8 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
else
route.control = elem->control;
+ soc_tplg_add_route(tplg, &route);
+
/* add route, but keep going if some fail */
snd_soc_dapm_add_routes(dapm, &route, 1);
}
@@ -1702,7 +1715,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
dai_drv->compress_new = snd_soc_new_compress;
/* pass control to component driver for optional further init */
- ret = soc_tplg_dai_load(tplg, dai_drv);
+ ret = soc_tplg_dai_load(tplg, dai_drv, pcm, NULL);
if (ret < 0) {
dev_err(tplg->comp->dev, "ASoC: DAI loading failed\n");
kfree(dai_drv);
@@ -1772,7 +1785,7 @@ static int soc_tplg_fe_link_create(struct soc_tplg *tplg,
set_link_flags(link, pcm->flag_mask, pcm->flags);
/* pass control to component driver for optional further init */
- ret = soc_tplg_dai_link_load(tplg, link);
+ ret = soc_tplg_dai_link_load(tplg, link, NULL);
if (ret < 0) {
dev_err(tplg->comp->dev, "ASoC: FE link loading failed\n");
kfree(link);
@@ -2080,7 +2093,7 @@ static int soc_tplg_link_config(struct soc_tplg *tplg,
set_link_flags(link, cfg->flag_mask, cfg->flags);
/* pass control to component driver for optional further init */
- ret = soc_tplg_dai_link_load(tplg, link);
+ ret = soc_tplg_dai_link_load(tplg, link, cfg);
if (ret < 0) {
dev_err(tplg->dev, "ASoC: physical link loading failed\n");
return ret;
@@ -2202,7 +2215,7 @@ static int soc_tplg_dai_config(struct soc_tplg *tplg,
set_dai_flags(dai_drv, d->flag_mask, d->flags);
/* pass control to component driver for optional further init */
- ret = soc_tplg_dai_load(tplg, dai_drv);
+ ret = soc_tplg_dai_load(tplg, dai_drv, NULL, dai);
if (ret < 0) {
dev_err(tplg->comp->dev, "ASoC: DAI loading failed\n");
return ret;
@@ -2311,7 +2324,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
/* pass control to component driver for optional further init */
if (tplg->comp && tplg->ops && tplg->ops->manifest)
- return tplg->ops->manifest(tplg->comp, _manifest);
+ return tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
if (!abi_match) /* free the duplicated one */
kfree(_manifest);
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 2d9e98bd1530..e0c93496c0cd 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -1,17 +1,11 @@
-/*
- * soc-util.c -- ALSA SoC Audio Layer utility functions
- *
- * Copyright 2009 Wolfson Microelectronics PLC.
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- * Liam Girdwood <lrg@slimlogic.co.uk>
- *
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// soc-util.c -- ALSA SoC Audio Layer utility functions
+//
+// Copyright 2009 Wolfson Microelectronics PLC.
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+// Liam Girdwood <lrg@slimlogic.co.uk>
#include <linux/platform_device.h>
#include <linux/export.h>
@@ -381,6 +375,6 @@ int __init snd_soc_util_init(void)
void __exit snd_soc_util_exit(void)
{
- platform_device_unregister(soc_dummy_dev);
platform_driver_unregister(&soc_dummy_driver);
+ platform_device_unregister(soc_dummy_dev);
}
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index d8b6936e544e..313dab2857ef 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -91,7 +91,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
/* Stop the player */
- snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stop_xrun(player->substream);
}
ret = IRQ_HANDLED;
@@ -105,7 +105,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
/* Stop the player */
- snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stop_xrun(player->substream);
ret = IRQ_HANDLED;
}
@@ -138,7 +138,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
dev_err(player->dev, "Underflow recovery failed\n");
/* Stop the player */
- snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stop_xrun(player->substream);
ret = IRQ_HANDLED;
}
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index ee0055e60852..7b63d35ef428 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -65,7 +65,7 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
dev_err(reader->dev, "FIFO error detected\n");
- snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stop_xrun(reader->substream);
ret = IRQ_HANDLED;
}
diff --git a/sound/soc/stm/Kconfig b/sound/soc/stm/Kconfig
index 48f9ddd94016..9b2681397dba 100644
--- a/sound/soc/stm/Kconfig
+++ b/sound/soc/stm/Kconfig
@@ -6,6 +6,7 @@ config SND_SOC_STM32_SAI
depends on SND_SOC
select SND_SOC_GENERIC_DMAENGINE_PCM
select REGMAP_MMIO
+ select SND_PCM_IEC958
help
Say Y if you want to enable SAI for STM32
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
index db73fef3e500..706ff005234f 100644
--- a/sound/soc/stm/stm32_adfsdm.c
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -149,7 +149,7 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
unsigned int old_pos = priv->pos;
unsigned int cur_size = size;
- dev_dbg(rtd->dev, "%s: buff_add :%p, pos = %d, size = %zu\n",
+ dev_dbg(rtd->dev, "%s: buff_add :%pK, pos = %d, size = %zu\n",
__func__, &pcm_buff[priv->pos], priv->pos, size);
if ((priv->pos + size) > buff_size) {
@@ -269,16 +269,10 @@ static int stm32_adfsdm_pcm_new(struct snd_soc_pcm_runtime *rtd)
static void stm32_adfsdm_pcm_free(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
- struct snd_soc_pcm_runtime *rtd;
- struct stm32_adfsdm_priv *priv;
substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
- if (substream) {
- rtd = substream->private_data;
- priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
-
+ if (substream)
snd_pcm_lib_preallocate_free_for_all(pcm);
- }
}
static struct snd_soc_component_driver stm32_adfsdm_soc_platform = {
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index cfeb219e1d78..06fba9650ac4 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -96,7 +96,8 @@
* @slot_mask: rx or tx active slots mask. set at init or at runtime
* @data_size: PCM data width. corresponds to PCM substream width.
* @spdif_frm_cnt: S/PDIF playback frame counter
- * @spdif_status_bits: S/PDIF status bits
+ * @snd_aes_iec958: iec958 data
+ * @ctrl_lock: control lock
*/
struct stm32_sai_sub_data {
struct platform_device *pdev;
@@ -125,7 +126,8 @@ struct stm32_sai_sub_data {
int slot_mask;
int data_size;
unsigned int spdif_frm_cnt;
- unsigned char spdif_status_bits[SAI_IEC60958_STATUS_BYTES];
+ struct snd_aes_iec958 iec958;
+ struct mutex ctrl_lock; /* protect resources accessed by controls */
};
enum stm32_sai_fifo_th {
@@ -184,10 +186,6 @@ static bool stm32_sai_sub_writeable_reg(struct device *dev, unsigned int reg)
}
}
-static const unsigned char default_status_bits[SAI_IEC60958_STATUS_BYTES] = {
- 0, 0, 0, IEC958_AES3_CON_FS_48000,
-};
-
static const struct regmap_config stm32_sai_sub_regmap_config_f4 = {
.reg_bits = 32,
.reg_stride = 4,
@@ -210,6 +208,49 @@ static const struct regmap_config stm32_sai_sub_regmap_config_h7 = {
.fast_io = true,
};
+static int snd_pcm_iec958_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ uinfo->count = 1;
+
+ return 0;
+}
+
+static int snd_pcm_iec958_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uctl)
+{
+ struct stm32_sai_sub_data *sai = snd_kcontrol_chip(kcontrol);
+
+ mutex_lock(&sai->ctrl_lock);
+ memcpy(uctl->value.iec958.status, sai->iec958.status, 4);
+ mutex_unlock(&sai->ctrl_lock);
+
+ return 0;
+}
+
+static int snd_pcm_iec958_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uctl)
+{
+ struct stm32_sai_sub_data *sai = snd_kcontrol_chip(kcontrol);
+
+ mutex_lock(&sai->ctrl_lock);
+ memcpy(sai->iec958.status, uctl->value.iec958.status, 4);
+ mutex_unlock(&sai->ctrl_lock);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new iec958_ctls = {
+ .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE),
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
+ .info = snd_pcm_iec958_info,
+ .get = snd_pcm_iec958_get,
+ .put = snd_pcm_iec958_put,
+};
+
static irqreturn_t stm32_sai_isr(int irq, void *devid)
{
struct stm32_sai_sub_data *sai = (struct stm32_sai_sub_data *)devid;
@@ -259,11 +300,8 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
status = SNDRV_PCM_STATE_XRUN;
}
- if (status != SNDRV_PCM_STATE_RUNNING) {
- snd_pcm_stream_lock(sai->substream);
- snd_pcm_stop(sai->substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock(sai->substream);
- }
+ if (status != SNDRV_PCM_STATE_RUNNING)
+ snd_pcm_stop_xrun(sai->substream);
return IRQ_HANDLED;
}
@@ -619,6 +657,59 @@ static void stm32_sai_set_frame(struct snd_soc_dai *cpu_dai)
}
}
+static void stm32_sai_init_iec958_status(struct stm32_sai_sub_data *sai)
+{
+ unsigned char *cs = sai->iec958.status;
+
+ cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE;
+ cs[1] = IEC958_AES1_CON_GENERAL;
+ cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC;
+ cs[3] = IEC958_AES3_CON_CLOCK_1000PPM | IEC958_AES3_CON_FS_NOTID;
+}
+
+static void stm32_sai_set_iec958_status(struct stm32_sai_sub_data *sai,
+ struct snd_pcm_runtime *runtime)
+{
+ if (!runtime)
+ return;
+
+ /* Force the sample rate according to runtime rate */
+ mutex_lock(&sai->ctrl_lock);
+ switch (runtime->rate) {
+ case 22050:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_22050;
+ break;
+ case 44100:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_44100;
+ break;
+ case 88200:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_88200;
+ break;
+ case 176400:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_176400;
+ break;
+ case 24000:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_24000;
+ break;
+ case 48000:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_48000;
+ break;
+ case 96000:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_96000;
+ break;
+ case 192000:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_192000;
+ break;
+ case 32000:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_32000;
+ break;
+ default:
+ sai->iec958.status[3] = IEC958_AES3_CON_FS_NOTID;
+ break;
+ }
+ mutex_unlock(&sai->ctrl_lock);
+}
+
static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
struct snd_pcm_hw_params *params)
{
@@ -709,7 +800,11 @@ static int stm32_sai_hw_params(struct snd_pcm_substream *substream,
sai->data_size = params_width(params);
- if (!STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
+ if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
+ /* Rate not already set in runtime structure */
+ substream->runtime->rate = params_rate(params);
+ stm32_sai_set_iec958_status(sai, substream->runtime);
+ } else {
ret = stm32_sai_set_slots(cpu_dai);
if (ret < 0)
return ret;
@@ -789,6 +884,20 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
sai->substream = NULL;
}
+static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
+
+ if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
+ dev_dbg(&sai->pdev->dev, "%s: register iec controls", __func__);
+ return snd_ctl_add(rtd->pcm->card,
+ snd_ctl_new1(&iec958_ctls, sai));
+ }
+
+ return 0;
+}
+
static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
{
struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
@@ -809,6 +918,10 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
else
snd_soc_dai_init_dma_data(cpu_dai, NULL, &sai->dma_params);
+ /* Next settings are not relevant for spdif mode */
+ if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
+ return 0;
+
cr1_mask = SAI_XCR1_RX_TX;
if (STM_SAI_IS_CAPTURE(sai))
cr1 |= SAI_XCR1_RX_TX;
@@ -820,10 +933,6 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
sai->synco, sai->synci);
}
- if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
- memcpy(sai->spdif_status_bits, default_status_bits,
- sizeof(default_status_bits));
-
cr1_mask |= SAI_XCR1_SYNCEN_MASK;
cr1 |= SAI_XCR1_SYNCEN_SET(sai->sync);
@@ -861,7 +970,7 @@ static int stm32_sai_pcm_process_spdif(struct snd_pcm_substream *substream,
/* Set channel status bit */
byte = frm_cnt >> 3;
mask = 1 << (frm_cnt - (byte << 3));
- if (sai->spdif_status_bits[byte] & mask)
+ if (sai->iec958.status[byte] & mask)
*ptr |= 0x04000000;
ptr++;
@@ -888,6 +997,7 @@ static const struct snd_pcm_hardware stm32_sai_pcm_hw = {
static struct snd_soc_dai_driver stm32_sai_playback_dai[] = {
{
.probe = stm32_sai_dai_probe,
+ .pcm_new = stm32_sai_pcm_new,
.id = 1, /* avoid call to fmt_single_name() */
.playback = {
.channels_min = 1,
@@ -998,6 +1108,7 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
dev_err(&pdev->dev, "S/PDIF IEC60958 not supported\n");
return -EINVAL;
}
+ stm32_sai_init_iec958_status(sai);
sai->spdif = true;
sai->master = true;
}
@@ -1114,6 +1225,7 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
sai->id = (uintptr_t)of_id->data;
sai->pdev = pdev;
+ mutex_init(&sai->ctrl_lock);
platform_set_drvdata(pdev, sai);
sai->pdata = dev_get_drvdata(pdev->dev.parent);
diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
index affad46bf188..682ef33afb5f 100644
--- a/sound/soc/tegra/tegra20_ac97.c
+++ b/sound/soc/tegra/tegra20_ac97.c
@@ -377,7 +377,7 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
ret = clk_prepare_enable(ac97->clk_ac97);
if (ret) {
dev_err(&pdev->dev, "clk_enable failed: %d\n", ret);
- goto err;
+ goto err_clk_put;
}
ret = snd_soc_set_ac97_ops(&tegra20_ac97_ops);
diff --git a/sound/soc/tegra/tegra30_i2s.h b/sound/soc/tegra/tegra30_i2s.h
index 774fc6ad2026..2e561e946de2 100644
--- a/sound/soc/tegra/tegra30_i2s.h
+++ b/sound/soc/tegra/tegra30_i2s.h
@@ -173,7 +173,7 @@
/* Number of slots in frame, minus 1 */
#define TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_SHIFT 16
#define TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_MASK_US 7
-#define TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_MASK (TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOT_MASK_US << TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOT_SHIFT)
+#define TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_MASK (TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_MASK_US << TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_SHIFT)
/* TDM mode slot enable bitmask */
#define TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_SHIFT 8
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index 5197d6b18cb6..98d87801d57a 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -190,14 +190,14 @@ static int tegra_alc5632_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
ret = -EINVAL;
- goto err;
+ goto err_put_codec_of_node;
}
tegra_alc5632_dai.platform_of_node = tegra_alc5632_dai.cpu_of_node;
ret = tegra_asoc_utils_init(&alc5632->util_data, &pdev->dev);
if (ret)
- goto err;
+ goto err_put_cpu_of_node;
ret = snd_soc_register_card(card);
if (ret) {
@@ -210,6 +210,13 @@ static int tegra_alc5632_probe(struct platform_device *pdev)
err_fini_utils:
tegra_asoc_utils_fini(&alc5632->util_data);
+err_put_cpu_of_node:
+ of_node_put(tegra_alc5632_dai.cpu_of_node);
+ tegra_alc5632_dai.cpu_of_node = NULL;
+ tegra_alc5632_dai.platform_of_node = NULL;
+err_put_codec_of_node:
+ of_node_put(tegra_alc5632_dai.codec_of_node);
+ tegra_alc5632_dai.codec_of_node = NULL;
err:
return ret;
}
@@ -223,6 +230,12 @@ static int tegra_alc5632_remove(struct platform_device *pdev)
tegra_asoc_utils_fini(&machine->util_data);
+ of_node_put(tegra_alc5632_dai.cpu_of_node);
+ tegra_alc5632_dai.cpu_of_node = NULL;
+ tegra_alc5632_dai.platform_of_node = NULL;
+ of_node_put(tegra_alc5632_dai.codec_of_node);
+ tegra_alc5632_dai.codec_of_node = NULL;
+
return 0;
}
diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
index 0e4805c7b4ca..7081f15302cc 100644
--- a/sound/soc/tegra/tegra_rt5677.c
+++ b/sound/soc/tegra/tegra_rt5677.c
@@ -264,13 +264,13 @@ static int tegra_rt5677_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
ret = -EINVAL;
- goto err;
+ goto err_put_codec_of_node;
}
tegra_rt5677_dai.platform_of_node = tegra_rt5677_dai.cpu_of_node;
ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
if (ret)
- goto err;
+ goto err_put_cpu_of_node;
ret = snd_soc_register_card(card);
if (ret) {
@@ -283,6 +283,13 @@ static int tegra_rt5677_probe(struct platform_device *pdev)
err_fini_utils:
tegra_asoc_utils_fini(&machine->util_data);
+err_put_cpu_of_node:
+ of_node_put(tegra_rt5677_dai.cpu_of_node);
+ tegra_rt5677_dai.cpu_of_node = NULL;
+ tegra_rt5677_dai.platform_of_node = NULL;
+err_put_codec_of_node:
+ of_node_put(tegra_rt5677_dai.codec_of_node);
+ tegra_rt5677_dai.codec_of_node = NULL;
err:
return ret;
}
@@ -296,6 +303,12 @@ static int tegra_rt5677_remove(struct platform_device *pdev)
tegra_asoc_utils_fini(&machine->util_data);
+ tegra_rt5677_dai.platform_of_node = NULL;
+ of_node_put(tegra_rt5677_dai.codec_of_node);
+ tegra_rt5677_dai.codec_of_node = NULL;
+ of_node_put(tegra_rt5677_dai.cpu_of_node);
+ tegra_rt5677_dai.cpu_of_node = NULL;
+
return 0;
}
diff --git a/sound/soc/uniphier/aio-core.c b/sound/soc/uniphier/aio-core.c
index 638cb3fc5f7b..9bcba06ba52e 100644
--- a/sound/soc/uniphier/aio-core.c
+++ b/sound/soc/uniphier/aio-core.c
@@ -265,6 +265,57 @@ void aio_port_reset(struct uniphier_aio_sub *sub)
}
/**
+ * aio_port_set_ch - set channels of LPCM
+ * @sub: the AIO substream pointer, PCM substream only
+ * @ch : count of channels
+ *
+ * Set suitable slot selecting to input/output port block of AIO.
+ *
+ * This function may return error if non-PCM substream.
+ *
+ * Return: Zero if successful, otherwise a negative value on error.
+ */
+static int aio_port_set_ch(struct uniphier_aio_sub *sub)
+{
+ struct regmap *r = sub->aio->chip->regmap;
+ u32 slotsel_2ch[] = {
+ 0, 0, 0, 0, 0,
+ };
+ u32 slotsel_multi[] = {
+ OPORTMXTYSLOTCTR_SLOTSEL_SLOT0,
+ OPORTMXTYSLOTCTR_SLOTSEL_SLOT1,
+ OPORTMXTYSLOTCTR_SLOTSEL_SLOT2,
+ OPORTMXTYSLOTCTR_SLOTSEL_SLOT3,
+ OPORTMXTYSLOTCTR_SLOTSEL_SLOT4,
+ };
+ u32 mode, *slotsel;
+ int i;
+
+ switch (params_channels(&sub->params)) {
+ case 8:
+ case 6:
+ mode = OPORTMXTYSLOTCTR_MODE;
+ slotsel = slotsel_multi;
+ break;
+ case 2:
+ mode = 0;
+ slotsel = slotsel_2ch;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < AUD_MAX_SLOTSEL; i++) {
+ regmap_update_bits(r, OPORTMXTYSLOTCTR(sub->swm->oport.map, i),
+ OPORTMXTYSLOTCTR_MODE, mode);
+ regmap_update_bits(r, OPORTMXTYSLOTCTR(sub->swm->oport.map, i),
+ OPORTMXTYSLOTCTR_SLOTSEL_MASK, slotsel[i]);
+ }
+
+ return 0;
+}
+
+/**
* aio_port_set_rate - set sampling rate of LPCM
* @sub: the AIO substream pointer, PCM substream only
* @rate: Sampling rate in Hz.
@@ -276,7 +327,7 @@ void aio_port_reset(struct uniphier_aio_sub *sub)
*
* Return: Zero if successful, otherwise a negative value on error.
*/
-int aio_port_set_rate(struct uniphier_aio_sub *sub, int rate)
+static int aio_port_set_rate(struct uniphier_aio_sub *sub, int rate)
{
struct regmap *r = sub->aio->chip->regmap;
struct device *dev = &sub->aio->chip->pdev->dev;
@@ -395,7 +446,7 @@ int aio_port_set_rate(struct uniphier_aio_sub *sub, int rate)
*
* Return: Zero if successful, otherwise a negative value on error.
*/
-int aio_port_set_fmt(struct uniphier_aio_sub *sub)
+static int aio_port_set_fmt(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
struct device *dev = &sub->aio->chip->pdev->dev;
@@ -460,7 +511,7 @@ int aio_port_set_fmt(struct uniphier_aio_sub *sub)
*
* Return: Zero if successful, otherwise a negative value on error.
*/
-int aio_port_set_clk(struct uniphier_aio_sub *sub)
+static int aio_port_set_clk(struct uniphier_aio_sub *sub)
{
struct uniphier_aio_chip *chip = sub->aio->chip;
struct device *dev = &sub->aio->chip->pdev->dev;
@@ -575,6 +626,10 @@ int aio_port_set_param(struct uniphier_aio_sub *sub, int pass_through,
rate = params_rate(params);
}
+ ret = aio_port_set_ch(sub);
+ if (ret)
+ return ret;
+
ret = aio_port_set_rate(sub, rate);
if (ret)
return ret;
@@ -731,15 +786,28 @@ void aio_port_set_volume(struct uniphier_aio_sub *sub, int vol)
int aio_if_set_param(struct uniphier_aio_sub *sub, int pass_through)
{
struct regmap *r = sub->aio->chip->regmap;
- u32 v;
+ u32 memfmt, v;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
- if (pass_through)
+ if (pass_through) {
v = PBOUTMXCTR0_ENDIAN_0123 |
PBOUTMXCTR0_MEMFMT_STREAM;
- else
- v = PBOUTMXCTR0_ENDIAN_3210 |
- PBOUTMXCTR0_MEMFMT_2CH;
+ } else {
+ switch (params_channels(&sub->params)) {
+ case 2:
+ memfmt = PBOUTMXCTR0_MEMFMT_2CH;
+ break;
+ case 6:
+ memfmt = PBOUTMXCTR0_MEMFMT_6CH;
+ break;
+ case 8:
+ memfmt = PBOUTMXCTR0_MEMFMT_8CH;
+ break;
+ default:
+ return -EINVAL;
+ }
+ v = PBOUTMXCTR0_ENDIAN_3210 | memfmt;
+ }
regmap_write(r, PBOUTMXCTR0(sub->swm->oif.map), v);
regmap_write(r, PBOUTMXCTR1(sub->swm->oif.map), 0);
diff --git a/sound/soc/uniphier/aio-cpu.c b/sound/soc/uniphier/aio-cpu.c
index 2d9b7dde2ffa..ee90e6c3937c 100644
--- a/sound/soc/uniphier/aio-cpu.c
+++ b/sound/soc/uniphier/aio-cpu.c
@@ -219,15 +219,10 @@ static int uniphier_aio_set_pll(struct snd_soc_dai *dai, int pll_id,
unsigned int freq_out)
{
struct uniphier_aio *aio = uniphier_priv(dai);
- struct device *dev = &aio->chip->pdev->dev;
int ret;
if (!is_valid_pll(aio->chip, pll_id))
return -EINVAL;
- if (!aio->chip->plls[pll_id].enable) {
- dev_err(dev, "PLL(%d) is not implemented\n", pll_id);
- return -ENOTSUPP;
- }
ret = aio_chip_set_pll(aio->chip, pll_id, freq_out);
if (ret < 0)
diff --git a/sound/soc/uniphier/aio-ld11.c b/sound/soc/uniphier/aio-ld11.c
index ab04d3331be9..de962df245ba 100644
--- a/sound/soc/uniphier/aio-ld11.c
+++ b/sound/soc/uniphier/aio-ld11.c
@@ -286,7 +286,7 @@ static struct snd_soc_dai_driver uniphier_aio_dai_ld11[] = {
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
- .channels_max = 2,
+ .channels_max = 8,
},
.ops = &uniphier_aio_i2s_ops,
},
diff --git a/sound/soc/uniphier/aio-reg.h b/sound/soc/uniphier/aio-reg.h
index 45fdc6ae358a..734395dbcffb 100644
--- a/sound/soc/uniphier/aio-reg.h
+++ b/sound/soc/uniphier/aio-reg.h
@@ -374,6 +374,7 @@
#define OPORTMXTYVOLGAINSTATUS(n, m) (0x42108 + 0x400 * (n) + 0x20 * (m))
#define OPORTMXTYVOLGAINSTATUS_CUR_MASK GENMASK(15, 0)
#define OPORTMXTYSLOTCTR(n, m) (0x42114 + 0x400 * (n) + 0x20 * (m))
+#define OPORTMXTYSLOTCTR_MODE BIT(15)
#define OPORTMXTYSLOTCTR_SLOTSEL_MASK GENMASK(11, 8)
#define OPORTMXTYSLOTCTR_SLOTSEL_SLOT0 (0x8 << 8)
#define OPORTMXTYSLOTCTR_SLOTSEL_SLOT1 (0x9 << 8)
diff --git a/sound/soc/uniphier/aio.h b/sound/soc/uniphier/aio.h
index aa89c2f6fa24..ca6ccbae0ee8 100644
--- a/sound/soc/uniphier/aio.h
+++ b/sound/soc/uniphier/aio.h
@@ -141,6 +141,9 @@ enum IEC61937_PC {
#define AUD_MIN_FRAGMENT_SIZE (4 * 1024)
#define AUD_MAX_FRAGMENT_SIZE (16 * 1024)
+/* max 5 slots, 10 channels, 2 channel in 1 slot */
+#define AUD_MAX_SLOTSEL 5
+
/*
* This is a selector for virtual register map of AIO.
*
@@ -322,9 +325,6 @@ int aio_chip_set_pll(struct uniphier_aio_chip *chip, int pll_id,
void aio_chip_init(struct uniphier_aio_chip *chip);
int aio_init(struct uniphier_aio_sub *sub);
void aio_port_reset(struct uniphier_aio_sub *sub);
-int aio_port_set_rate(struct uniphier_aio_sub *sub, int rate);
-int aio_port_set_fmt(struct uniphier_aio_sub *sub);
-int aio_port_set_clk(struct uniphier_aio_sub *sub);
int aio_port_set_param(struct uniphier_aio_sub *sub, int pass_through,
const struct snd_pcm_hw_params *params);
void aio_port_set_enable(struct uniphier_aio_sub *sub, int enable);
diff --git a/sound/soc/zte/zx-tdm.c b/sound/soc/zte/zx-tdm.c
index dc955272f58b..389272eeba9a 100644
--- a/sound/soc/zte/zx-tdm.c
+++ b/sound/soc/zte/zx-tdm.c
@@ -144,8 +144,8 @@ static void zx_tdm_rx_dma_en(struct zx_tdm_info *tdm, bool on)
#define ZX_TDM_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000)
#define ZX_TDM_FMTBIT \
- (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_MU_LAW | \
- SNDRV_PCM_FORMAT_A_LAW)
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_MU_LAW | \
+ SNDRV_PCM_FMTBIT_A_LAW)
static int zx_tdm_dai_probe(struct snd_soc_dai *dai)
{
diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c
index b840ff2dcfbb..64f3141a3e1b 100644
--- a/sound/synth/emux/emux.c
+++ b/sound/synth/emux/emux.c
@@ -163,20 +163,3 @@ int snd_emux_free(struct snd_emux *emu)
}
EXPORT_SYMBOL(snd_emux_free);
-
-
-/*
- * INIT part
- */
-
-static int __init alsa_emux_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_emux_exit(void)
-{
-}
-
-module_init(alsa_emux_init)
-module_exit(alsa_emux_exit)
diff --git a/sound/synth/util_mem.c b/sound/synth/util_mem.c
index 8e34bc4e07ec..4bd1e98200d2 100644
--- a/sound/synth/util_mem.c
+++ b/sound/synth/util_mem.c
@@ -193,19 +193,3 @@ EXPORT_SYMBOL(snd_util_mem_avail);
EXPORT_SYMBOL(__snd_util_mem_alloc);
EXPORT_SYMBOL(__snd_util_mem_free);
EXPORT_SYMBOL(__snd_util_memblk_new);
-
-/*
- * INIT part
- */
-
-static int __init alsa_util_mem_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_util_mem_exit(void)
-{
-}
-
-module_init(alsa_util_mem_init)
-module_exit(alsa_util_mem_exit)
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 2dd2518a71d3..f8ef3e2a8ca0 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -565,7 +565,6 @@ static const struct snd_pcm_ops pcm_ops = {
.trigger = usb6fire_pcm_trigger,
.pointer = usb6fire_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 05440e2df8d9..d330f74c90e6 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -13,6 +13,7 @@ snd-usb-audio-objs := card.o \
mixer_scarlett.o \
mixer_us16x08.o \
pcm.o \
+ power.o \
proc.o \
quirks.o \
stream.o
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index f35d29f49ffe..c6108a3d7f8f 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -348,7 +348,6 @@ static const struct snd_pcm_ops snd_usb_caiaq_ops = {
.trigger = snd_usb_caiaq_pcm_trigger,
.pointer = snd_usb_caiaq_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static void check_for_elapsed_periods(struct snd_usb_caiaqdev *cdev,
@@ -636,6 +635,7 @@ static void read_completed(struct urb *urb)
struct device *dev;
struct urb *out = NULL;
int i, frame, len, send_it = 0, outframe = 0;
+ unsigned long flags;
size_t offset = 0;
if (urb->status || !info)
@@ -672,10 +672,10 @@ static void read_completed(struct urb *urb)
offset += len;
if (len > 0) {
- spin_lock(&cdev->spinlock);
+ spin_lock_irqsave(&cdev->spinlock, flags);
fill_out_urb(cdev, out, &out->iso_frame_desc[outframe]);
read_in_urb(cdev, urb, &urb->iso_frame_desc[frame]);
- spin_unlock(&cdev->spinlock);
+ spin_unlock_irqrestore(&cdev->spinlock, flags);
check_for_elapsed_periods(cdev, cdev->sub_playback);
check_for_elapsed_periods(cdev, cdev->sub_capture);
send_it = 1;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index a1ed798a1c6b..2bfe4e80a6b9 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -809,6 +809,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
if (!chip->num_suspended_intf++) {
list_for_each_entry(as, &chip->pcm_list, list) {
snd_pcm_suspend_all(as->pcm);
+ snd_usb_pcm_suspend(as);
as->substream[0].need_setup_ep =
as->substream[1].need_setup_ep = true;
}
@@ -824,6 +825,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
{
struct snd_usb_audio *chip = usb_get_intfdata(intf);
+ struct snd_usb_stream *as;
struct usb_mixer_interface *mixer;
struct list_head *p;
int err = 0;
@@ -834,6 +836,13 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
return 0;
atomic_inc(&chip->active); /* avoid autopm */
+
+ list_for_each_entry(as, &chip->pcm_list, list) {
+ err = snd_usb_pcm_resume(as);
+ if (err < 0)
+ goto err_out;
+ }
+
/*
* ALSA leaves material resumption to user space
* we just notify and restart the mixers
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 9b41b7dda84f..ac785d15ced4 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -37,6 +37,7 @@ struct audioformat {
struct snd_usb_substream;
struct snd_usb_endpoint;
+struct snd_usb_power_domain;
struct snd_urb_ctx {
struct urb *urb;
@@ -115,6 +116,7 @@ struct snd_usb_substream {
int interface; /* current interface */
int endpoint; /* assigned endpoint */
struct audioformat *cur_audiofmt; /* current audioformat pointer (for hw_params callback) */
+ struct snd_usb_power_domain *str_pd; /* UAC3 Power Domain for streaming path */
snd_pcm_format_t pcm_format; /* current audio format (for hw_params callback) */
unsigned int channels; /* current number of channels (for hw_params callback) */
unsigned int channels_max; /* max channels in the all audiofmts */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index c79749613fa6..db5e39d67a90 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -513,14 +513,28 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
bool writeable;
u32 bmControls;
+ /* First, try to find a valid clock. This may trigger
+ * automatic clock selection if the current clock is not
+ * valid.
+ */
clock = snd_usb_clock_find_source(chip, fmt->protocol,
fmt->clock, true);
- if (clock < 0)
- return clock;
+ if (clock < 0) {
+ /* We did not find a valid clock, but that might be
+ * because the current sample rate does not match an
+ * external clock source. Try again without validation
+ * and we will do another validation after setting the
+ * rate.
+ */
+ clock = snd_usb_clock_find_source(chip, fmt->protocol,
+ fmt->clock, false);
+ if (clock < 0)
+ return clock;
+ }
prev_rate = get_sample_rate_v2v3(chip, iface, fmt->altsetting, clock);
if (prev_rate == rate)
- return 0;
+ goto validation;
if (fmt->protocol == UAC_VERSION_3) {
struct uac3_clock_source_descriptor *cs_desc;
@@ -577,6 +591,10 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
snd_usb_set_interface_quirk(dev);
}
+validation:
+ /* validate clock after rate change */
+ if (!uac_clock_source_is_valid(chip, fmt->protocol, clock))
+ return -ENXIO;
return 0;
}
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index c90607ebe155..d86be8bfe412 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -325,7 +325,6 @@ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
unsigned long flags;
struct snd_usb_packet_info *uninitialized_var(packet);
struct snd_urb_ctx *ctx = NULL;
- struct urb *urb;
int err, i;
spin_lock_irqsave(&ep->lock, flags);
@@ -345,7 +344,6 @@ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
return;
list_del_init(&ctx->ready_list);
- urb = ctx->urb;
/* copy over the length information */
for (i = 0; i < packet->packets; i++)
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index 396c317115b1..e1fbb9cc9ea7 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -523,7 +523,6 @@ static const struct snd_pcm_ops pcm_ops = {
.trigger = hiface_pcm_trigger,
.pointer = hiface_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static int hiface_pcm_init_urb(struct pcm_urb *urb,
diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
index 750467fb95db..f47ba94e6f4a 100644
--- a/sound/usb/line6/toneport.c
+++ b/sound/usb/line6/toneport.c
@@ -367,12 +367,13 @@ static bool toneport_has_source_select(struct usb_line6_toneport *toneport)
*/
static void toneport_setup(struct usb_line6_toneport *toneport)
{
- int ticks;
+ u32 ticks;
struct usb_line6 *line6 = &toneport->line6;
struct usb_device *usbdev = line6->usbdev;
/* sync time on device with host: */
- ticks = (int)get_seconds();
+ /* note: 32-bit timestamps overflow in year 2106 */
+ ticks = (u32)ktime_get_real_seconds();
line6_write_data(line6, 0x80c6, &ticks, 4);
/* enable device: */
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 2c1aaa3292bf..dcfc546d81b9 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -281,15 +281,16 @@ static void snd_usbmidi_out_urb_complete(struct urb *urb)
struct out_urb_context *context = urb->context;
struct snd_usb_midi_out_endpoint *ep = context->ep;
unsigned int urb_index;
+ unsigned long flags;
- spin_lock(&ep->buffer_lock);
+ spin_lock_irqsave(&ep->buffer_lock, flags);
urb_index = context - ep->urbs;
ep->active_urbs &= ~(1 << urb_index);
if (unlikely(ep->drain_urbs)) {
ep->drain_urbs &= ~(1 << urb_index);
wake_up(&ep->drain_wait);
}
- spin_unlock(&ep->buffer_lock);
+ spin_unlock_irqrestore(&ep->buffer_lock, flags);
if (urb->status < 0) {
int err = snd_usbmidi_urb_error(urb);
if (err < 0) {
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
index 386fbfd5c617..a0b6d039017f 100644
--- a/sound/usb/misc/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -900,7 +900,6 @@ static const struct snd_pcm_ops capture_pcm_ops = {
.trigger = capture_pcm_trigger,
.pointer = capture_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static const struct snd_pcm_ops playback_pcm_ops = {
@@ -913,7 +912,6 @@ static const struct snd_pcm_ops playback_pcm_ops = {
.trigger = playback_pcm_trigger,
.pointer = playback_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static const struct uac_format_type_i_discrete_descriptor *
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index ca963e94ec03..c63c84b54969 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -675,16 +675,16 @@ static int get_term_name(struct snd_usb_audio *chip, struct usb_audio_term *iter
if (term_only)
return 0;
switch (iterm->type >> 16) {
- case UAC_SELECTOR_UNIT:
+ case UAC3_SELECTOR_UNIT:
strcpy(name, "Selector");
return 8;
- case UAC1_PROCESSING_UNIT:
+ case UAC3_PROCESSING_UNIT:
strcpy(name, "Process Unit");
return 12;
- case UAC1_EXTENSION_UNIT:
+ case UAC3_EXTENSION_UNIT:
strcpy(name, "Ext Unit");
return 8;
- case UAC_MIXER_UNIT:
+ case UAC3_MIXER_UNIT:
strcpy(name, "Mixer");
return 5;
default:
@@ -832,7 +832,7 @@ static int check_input_term(struct mixer_build *state, int id,
case UAC_MIXER_UNIT: {
struct uac_mixer_unit_descriptor *d = p1;
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ term->type = UAC3_MIXER_UNIT << 16; /* virtual type */
term->channels = uac_mixer_unit_bNrChannels(d);
term->chconfig = uac_mixer_unit_wChannelConfig(d, protocol);
term->name = uac_mixer_unit_iMixer(d);
@@ -845,15 +845,25 @@ static int check_input_term(struct mixer_build *state, int id,
err = check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
term->id = id;
term->name = uac_selector_unit_iSelector(d);
return 0;
}
case UAC1_PROCESSING_UNIT:
+ /* UAC2_EFFECT_UNIT */
+ if (protocol == UAC_VERSION_1)
+ term->type = UAC3_PROCESSING_UNIT << 16; /* virtual type */
+ else /* UAC_VERSION_2 */
+ term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */
+ /* fall through */
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 */
- /* UAC2_EFFECT_UNIT */
+ if (protocol == UAC_VERSION_1 && !term->type)
+ term->type = UAC3_EXTENSION_UNIT << 16; /* virtual type */
+ else if (protocol == UAC_VERSION_2 && !term->type)
+ term->type = UAC3_PROCESSING_UNIT << 16; /* virtual type */
+ /* fall through */
case UAC2_EXTENSION_UNIT_V2: {
struct uac_processing_unit_descriptor *d = p1;
@@ -869,7 +879,9 @@ static int check_input_term(struct mixer_build *state, int id,
id = d->baSourceID[0];
break; /* continue to parse */
}
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ if (!term->type)
+ term->type = UAC3_EXTENSION_UNIT << 16; /* virtual type */
+
term->channels = uac_processing_unit_bNrChannels(d);
term->chconfig = uac_processing_unit_wChannelConfig(d, protocol);
term->name = uac_processing_unit_iProcessing(d, protocol);
@@ -878,7 +890,7 @@ static int check_input_term(struct mixer_build *state, int id,
case UAC2_CLOCK_SOURCE: {
struct uac_clock_source_descriptor *d = p1;
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */
term->id = id;
term->name = d->iClockSource;
return 0;
@@ -923,7 +935,7 @@ static int check_input_term(struct mixer_build *state, int id,
case UAC3_CLOCK_SOURCE: {
struct uac3_clock_source_descriptor *d = p1;
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */
term->id = id;
term->name = le16_to_cpu(d->wClockSourceStr);
return 0;
@@ -936,7 +948,37 @@ static int check_input_term(struct mixer_build *state, int id,
return err;
term->channels = err;
- term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ term->type = UAC3_MIXER_UNIT << 16; /* virtual type */
+
+ return 0;
+ }
+ case UAC3_SELECTOR_UNIT:
+ case UAC3_CLOCK_SELECTOR: {
+ struct uac_selector_unit_descriptor *d = p1;
+ /* call recursively to retrieve the channel info */
+ err = check_input_term(state, d->baSourceID[0], term);
+ if (err < 0)
+ return err;
+ term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
+ term->id = id;
+ term->name = 0; /* TODO: UAC3 Class-specific strings */
+
+ return 0;
+ }
+ case UAC3_PROCESSING_UNIT: {
+ struct uac_processing_unit_descriptor *d = p1;
+
+ if (!d->bNrInPins)
+ return -EINVAL;
+
+ /* call recursively to retrieve the channel info */
+ err = check_input_term(state, d->baSourceID[0], term);
+ if (err < 0)
+ return err;
+
+ term->type = UAC3_PROCESSING_UNIT << 16; /* virtual type */
+ term->id = id;
+ term->name = 0; /* TODO: UAC3 Class-specific strings */
return 0;
}
@@ -2167,6 +2209,11 @@ struct procunit_info {
struct procunit_value_info *values;
};
+static struct procunit_value_info undefined_proc_info[] = {
+ { 0x00, "Control Undefined", 0 },
+ { 0 }
+};
+
static struct procunit_value_info updown_proc_info[] = {
{ UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 },
@@ -2215,6 +2262,23 @@ static struct procunit_info procunits[] = {
{ UAC_PROCESS_DYN_RANGE_COMP, "DCR", dcr_proc_info },
{ 0 },
};
+
+static struct procunit_value_info uac3_updown_proc_info[] = {
+ { UAC3_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 },
+ { 0 }
+};
+static struct procunit_value_info uac3_stereo_ext_proc_info[] = {
+ { UAC3_EXT_WIDTH_CONTROL, "Width Control", USB_MIXER_U8 },
+ { 0 }
+};
+
+static struct procunit_info uac3_procunits[] = {
+ { UAC3_PROCESS_UP_DOWNMIX, "Up Down", uac3_updown_proc_info },
+ { UAC3_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", uac3_stereo_ext_proc_info },
+ { UAC3_PROCESS_MULTI_FUNCTION, "Multi-Function", undefined_proc_info },
+ { 0 },
+};
+
/*
* predefined data for extension units
*/
@@ -2287,8 +2351,16 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
for (valinfo = info->values; valinfo->control; valinfo++) {
__u8 *controls = uac_processing_unit_bmControls(desc, state->mixer->protocol);
- if (!(controls[valinfo->control / 8] & (1 << ((valinfo->control % 8) - 1))))
- continue;
+ if (state->mixer->protocol == UAC_VERSION_1) {
+ if (!(controls[valinfo->control / 8] &
+ (1 << ((valinfo->control % 8) - 1))))
+ continue;
+ } else { /* UAC_VERSION_2/3 */
+ if (!uac_v2v3_control_is_readable(controls[valinfo->control / 8],
+ valinfo->control))
+ continue;
+ }
+
map = find_map(state->map, unitid, valinfo->control);
if (check_ignored_ctl(map))
continue;
@@ -2300,26 +2372,55 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
cval->val_type = valinfo->val_type;
cval->channels = 1;
+ if (state->mixer->protocol > UAC_VERSION_1 &&
+ !uac_v2v3_control_is_writeable(controls[valinfo->control / 8],
+ valinfo->control))
+ cval->master_readonly = 1;
+
/* get min/max values */
- if (type == UAC_PROCESS_UP_DOWNMIX && cval->control == UAC_UD_MODE_SELECT) {
- __u8 *control_spec = uac_processing_unit_specific(desc, state->mixer->protocol);
- /* FIXME: hard-coded */
- cval->min = 1;
- cval->max = control_spec[0];
- cval->res = 1;
- cval->initialized = 1;
- } else {
- if (type == USB_XU_CLOCK_RATE) {
- /*
- * E-Mu USB 0404/0202/TrackerPre/0204
- * samplerate control quirk
- */
- cval->min = 0;
- cval->max = 5;
+ switch (type) {
+ case UAC_PROCESS_UP_DOWNMIX: {
+ bool mode_sel = false;
+
+ switch (state->mixer->protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
+ if (cval->control == UAC_UD_MODE_SELECT)
+ mode_sel = true;
+ break;
+ case UAC_VERSION_3:
+ if (cval->control == UAC3_UD_MODE_SELECT)
+ mode_sel = true;
+ break;
+ }
+
+ if (mode_sel) {
+ __u8 *control_spec = uac_processing_unit_specific(desc,
+ state->mixer->protocol);
+ cval->min = 1;
+ cval->max = control_spec[0];
cval->res = 1;
cval->initialized = 1;
- } else
- get_min_max(cval, valinfo->min_value);
+ break;
+ }
+
+ get_min_max(cval, valinfo->min_value);
+ break;
+ }
+ case USB_XU_CLOCK_RATE:
+ /*
+ * E-Mu USB 0404/0202/TrackerPre/0204
+ * samplerate control quirk
+ */
+ cval->min = 0;
+ cval->max = 5;
+ cval->res = 1;
+ cval->initialized = 1;
+ break;
+ default:
+ get_min_max(cval, valinfo->min_value);
+ break;
}
kctl = snd_ctl_new1(&mixer_procunit_ctl, cval);
@@ -2362,8 +2463,16 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
static int parse_audio_processing_unit(struct mixer_build *state, int unitid,
void *raw_desc)
{
- return build_audio_procunit(state, unitid, raw_desc,
- procunits, "Processing Unit");
+ switch (state->mixer->protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
+ return build_audio_procunit(state, unitid, raw_desc,
+ procunits, "Processing Unit");
+ case UAC_VERSION_3:
+ return build_audio_procunit(state, unitid, raw_desc,
+ uac3_procunits, "Processing Unit");
+ }
}
static int parse_audio_extension_unit(struct mixer_build *state, int unitid,
@@ -2509,11 +2618,20 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
cval->res = 1;
cval->initialized = 1;
- if (state->mixer->protocol == UAC_VERSION_1)
+ switch (state->mixer->protocol) {
+ case UAC_VERSION_1:
+ default:
cval->control = 0;
- else /* UAC_VERSION_2 */
- cval->control = (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) ?
- UAC2_CX_CLOCK_SELECTOR : UAC2_SU_SELECTOR;
+ break;
+ case UAC_VERSION_2:
+ case UAC_VERSION_3:
+ if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR ||
+ desc->bDescriptorSubtype == UAC3_CLOCK_SELECTOR)
+ cval->control = UAC2_CX_CLOCK_SELECTOR;
+ else /* UAC2/3_SELECTOR_UNIT */
+ cval->control = UAC2_SU_SELECTOR;
+ break;
+ }
namelist = kmalloc_array(desc->bNrInPins, sizeof(char *), GFP_KERNEL);
if (!namelist) {
@@ -2555,12 +2673,22 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
if (!len) {
/* no mapping ? */
+ switch (state->mixer->protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
/* if iSelector is given, use it */
- nameid = uac_selector_unit_iSelector(desc);
- if (nameid)
- len = snd_usb_copy_string_desc(state->chip, nameid,
- kctl->id.name,
- sizeof(kctl->id.name));
+ nameid = uac_selector_unit_iSelector(desc);
+ if (nameid)
+ len = snd_usb_copy_string_desc(state->chip,
+ nameid, kctl->id.name,
+ sizeof(kctl->id.name));
+ break;
+ case UAC_VERSION_3:
+ /* TODO: Class-Specific strings not yet supported */
+ break;
+ }
+
/* ... or pick up the terminal name at next */
if (!len)
len = get_term_name(state->chip, &state->oterm,
@@ -2570,7 +2698,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
/* and add the proper suffix */
- if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
+ if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR ||
+ desc->bDescriptorSubtype == UAC3_CLOCK_SELECTOR)
append_ctl_name(kctl, " Clock Source");
else if ((state->oterm.type & 0xff00) == 0x0100)
append_ctl_name(kctl, " Capture Source");
@@ -2641,6 +2770,7 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
return parse_audio_mixer_unit(state, unitid, p1);
case UAC3_CLOCK_SOURCE:
return parse_clock_source_unit(state, unitid, p1);
+ case UAC3_SELECTOR_UNIT:
case UAC3_CLOCK_SELECTOR:
return parse_audio_selector_unit(state, unitid, p1);
case UAC3_FEATURE_UNIT:
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index e02653465e29..3d12af8bf191 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -109,4 +109,6 @@ int snd_usb_get_cur_mix_value(struct usb_mixer_elem_info *cval,
extern void snd_usb_mixer_elem_free(struct snd_kcontrol *kctl);
+extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl;
+
#endif /* __USBMIXER_H */
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index e82a72fea9a1..cbfb48bdea51 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -47,8 +47,6 @@
#include "mixer_us16x08.h"
#include "helper.h"
-extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl;
-
struct std_mono_table {
unsigned int unitid, control, cmask;
int val_type;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 160f52c4871b..382847154227 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -711,6 +711,54 @@ static int configure_endpoint(struct snd_usb_substream *subs)
return ret;
}
+static int snd_usb_pcm_change_state(struct snd_usb_substream *subs, int state)
+{
+ int ret;
+
+ if (!subs->str_pd)
+ return 0;
+
+ ret = snd_usb_power_domain_set(subs->stream->chip, subs->str_pd, state);
+ if (ret < 0) {
+ dev_err(&subs->dev->dev,
+ "Cannot change Power Domain ID: %d to state: %d. Err: %d\n",
+ subs->str_pd->pd_id, state, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int snd_usb_pcm_suspend(struct snd_usb_stream *as)
+{
+ int ret;
+
+ ret = snd_usb_pcm_change_state(&as->substream[0], UAC3_PD_STATE_D2);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_usb_pcm_change_state(&as->substream[1], UAC3_PD_STATE_D2);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int snd_usb_pcm_resume(struct snd_usb_stream *as)
+{
+ int ret;
+
+ ret = snd_usb_pcm_change_state(&as->substream[0], UAC3_PD_STATE_D1);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_usb_pcm_change_state(&as->substream[1], UAC3_PD_STATE_D1);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
/*
* hw_params callback
*
@@ -755,16 +803,22 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
ret = snd_usb_lock_shutdown(subs->stream->chip);
if (ret < 0)
return ret;
+
+ ret = snd_usb_pcm_change_state(subs, UAC3_PD_STATE_D0);
+ if (ret < 0)
+ goto unlock;
+
ret = set_format(subs, fmt);
- snd_usb_unlock_shutdown(subs->stream->chip);
if (ret < 0)
- return ret;
+ goto unlock;
subs->interface = fmt->iface;
subs->altset_idx = fmt->altset_idx;
subs->need_setup_ep = true;
- return 0;
+ unlock:
+ snd_usb_unlock_shutdown(subs->stream->chip);
+ return ret;
}
/*
@@ -821,6 +875,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint);
snd_usb_endpoint_sync_pending_stop(subs->data_endpoint);
+ ret = snd_usb_pcm_change_state(subs, UAC3_PD_STATE_D0);
+ if (ret < 0)
+ goto unlock;
+
ret = set_format(subs, subs->cur_audiofmt);
if (ret < 0)
goto unlock;
@@ -1265,6 +1323,7 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream)
int direction = substream->stream;
struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
struct snd_usb_substream *subs = &as->substream[direction];
+ int ret;
stop_endpoints(subs, true);
@@ -1273,7 +1332,10 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream)
!snd_usb_lock_shutdown(subs->stream->chip)) {
usb_set_interface(subs->dev, subs->interface, 0);
subs->interface = -1;
+ ret = snd_usb_pcm_change_state(subs, UAC3_PD_STATE_D1);
snd_usb_unlock_shutdown(subs->stream->chip);
+ if (ret < 0)
+ return ret;
}
subs->pcm_substream = NULL;
@@ -1632,6 +1694,7 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
subs->trigger_tstamp_pending_update = true;
+ /* fall through */
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
subs->data_endpoint->prepare_data_urb = prepare_playback_urb;
subs->data_endpoint->retire_data_urb = retire_playback_urb;
@@ -1694,7 +1757,6 @@ static const struct snd_pcm_ops snd_usb_playback_ops = {
.trigger = snd_usb_substream_playback_trigger,
.pointer = snd_usb_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static const struct snd_pcm_ops snd_usb_capture_ops = {
@@ -1707,7 +1769,6 @@ static const struct snd_pcm_ops snd_usb_capture_ops = {
.trigger = snd_usb_substream_capture_trigger,
.pointer = snd_usb_pcm_pointer,
.page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
};
static const struct snd_pcm_ops snd_usb_playback_dev_ops = {
diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h
index f77ec58bf1a1..9833627c1eca 100644
--- a/sound/usb/pcm.h
+++ b/sound/usb/pcm.h
@@ -6,6 +6,8 @@ snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
unsigned int rate);
void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream);
+int snd_usb_pcm_suspend(struct snd_usb_stream *as);
+int snd_usb_pcm_resume(struct snd_usb_stream *as);
int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
struct usb_host_interface *alts,
diff --git a/sound/usb/power.c b/sound/usb/power.c
new file mode 100644
index 000000000000..bd303a1ba1b7
--- /dev/null
+++ b/sound/usb/power.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UAC3 Power Domain state management functions
+ */
+
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
+
+#include "usbaudio.h"
+#include "helper.h"
+#include "power.h"
+
+struct snd_usb_power_domain *
+snd_usb_find_power_domain(struct usb_host_interface *ctrl_iface,
+ unsigned char id)
+{
+ struct snd_usb_power_domain *pd;
+ void *p;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return NULL;
+
+ p = NULL;
+ while ((p = snd_usb_find_csint_desc(ctrl_iface->extra,
+ ctrl_iface->extralen,
+ p, UAC3_POWER_DOMAIN)) != NULL) {
+ struct uac3_power_domain_descriptor *pd_desc = p;
+ int i;
+
+ for (i = 0; i < pd_desc->bNrEntities; i++) {
+ if (pd_desc->baEntityID[i] == id) {
+ pd->pd_id = pd_desc->bPowerDomainID;
+ pd->pd_d1d0_rec =
+ le16_to_cpu(pd_desc->waRecoveryTime1);
+ pd->pd_d2d0_rec =
+ le16_to_cpu(pd_desc->waRecoveryTime2);
+ return pd;
+ }
+ }
+ }
+
+ kfree(pd);
+ return NULL;
+}
+
+int snd_usb_power_domain_set(struct snd_usb_audio *chip,
+ struct snd_usb_power_domain *pd,
+ unsigned char state)
+{
+ struct usb_device *dev = chip->dev;
+ unsigned char current_state;
+ int err, idx;
+
+ idx = snd_usb_ctrl_intf(chip) | (pd->pd_id << 8);
+
+ err = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0),
+ UAC2_CS_CUR,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ UAC3_AC_POWER_DOMAIN_CONTROL << 8, idx,
+ &current_state, sizeof(current_state));
+ if (err < 0) {
+ dev_err(&dev->dev, "Can't get UAC3 power state for id %d\n",
+ pd->pd_id);
+ return err;
+ }
+
+ if (current_state == state) {
+ dev_dbg(&dev->dev, "UAC3 power domain id %d already in state %d\n",
+ pd->pd_id, state);
+ return 0;
+ }
+
+ err = snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), UAC2_CS_CUR,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+ UAC3_AC_POWER_DOMAIN_CONTROL << 8, idx,
+ &state, sizeof(state));
+ if (err < 0) {
+ dev_err(&dev->dev, "Can't set UAC3 power state to %d for id %d\n",
+ state, pd->pd_id);
+ return err;
+ }
+
+ if (state == UAC3_PD_STATE_D0) {
+ switch (current_state) {
+ case UAC3_PD_STATE_D2:
+ udelay(pd->pd_d2d0_rec * 50);
+ break;
+ case UAC3_PD_STATE_D1:
+ udelay(pd->pd_d1d0_rec * 50);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ dev_dbg(&dev->dev, "UAC3 power domain id %d change to state %d\n",
+ pd->pd_id, state);
+
+ return 0;
+}
diff --git a/sound/usb/power.h b/sound/usb/power.h
index b2e25f60c5a2..6004231a7c75 100644
--- a/sound/usb/power.h
+++ b/sound/usb/power.h
@@ -2,6 +2,25 @@
#ifndef __USBAUDIO_POWER_H
#define __USBAUDIO_POWER_H
+struct snd_usb_power_domain {
+ int pd_id; /* UAC3 Power Domain ID */
+ int pd_d1d0_rec; /* D1 to D0 recovery time */
+ int pd_d2d0_rec; /* D2 to D0 recovery time */
+};
+
+enum {
+ UAC3_PD_STATE_D0,
+ UAC3_PD_STATE_D1,
+ UAC3_PD_STATE_D2,
+};
+
+int snd_usb_power_domain_set(struct snd_usb_audio *chip,
+ struct snd_usb_power_domain *pd,
+ unsigned char state);
+struct snd_usb_power_domain *
+snd_usb_find_power_domain(struct usb_host_interface *ctrl_iface,
+ unsigned char id);
+
#ifdef CONFIG_PM
int snd_usb_autoresume(struct snd_usb_audio *chip);
void snd_usb_autosuspend(struct snd_usb_audio *chip);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 8aac48f9c322..08aa78007020 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2875,7 +2875,8 @@ YAMAHA_DEVICE(0x7010, "UB99"),
*/
#define AU0828_DEVICE(vid, pid, vname, pname) { \
- USB_DEVICE_VENDOR_SPEC(vid, pid), \
+ .idVendor = vid, \
+ .idProduct = pid, \
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
USB_DEVICE_ID_MATCH_INT_CLASS | \
USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 02b6cc02767f..8a945ece9869 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1213,7 +1213,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
if (err < 0)
return err;
- mdelay(20); /* Delay needed after setting the interface */
+ msleep(20); /* Delay needed after setting the interface */
/* Vendor mode switch cmd is required. */
if (fmt->formats & SNDRV_PCM_FMTBIT_DSD_U32_BE) {
@@ -1234,7 +1234,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
return err;
}
- mdelay(20);
+ msleep(20);
}
return 0;
}
@@ -1281,7 +1281,7 @@ void snd_usb_set_interface_quirk(struct usb_device *dev)
switch (USB_ID_VENDOR(chip->usb_id)) {
case 0x23ba: /* Playback Design */
case 0x0644: /* TEAC Corp. */
- mdelay(50);
+ msleep(50);
break;
}
}
@@ -1301,7 +1301,7 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
*/
if (USB_ID_VENDOR(chip->usb_id) == 0x23ba &&
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
- mdelay(20);
+ msleep(20);
/*
* "TEAC Corp." products need a 20ms delay after each
@@ -1309,14 +1309,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
*/
if (USB_ID_VENDOR(chip->usb_id) == 0x0644 &&
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
- mdelay(20);
+ msleep(20);
/* ITF-USB DSD based DACs functionality need a delay
* after each class compliant request
*/
if (is_itf_usb_dsd_dac(chip->usb_id)
&& (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
- mdelay(20);
+ msleep(20);
/* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
* otherwise requests like get/set frequency return as failed despite
@@ -1326,7 +1326,7 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
chip->usb_id == USB_ID(0x046d, 0x0a46) ||
chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
- mdelay(1);
+ usleep_range(1000, 2000);
}
/*
@@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
+ case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
@@ -1443,6 +1444,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
*/
switch (USB_ID_VENDOR(chip->usb_id)) {
case 0x20b1: /* XMOS based devices */
+ case 0x152a: /* Thesycon devices */
case 0x25ce: /* Mytek devices */
if (fp->dsd_raw)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 729afd808cc4..67cf849aa16b 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -37,6 +37,7 @@
#include "format.h"
#include "clock.h"
#include "stream.h"
+#include "power.h"
/*
* free a substream
@@ -53,6 +54,7 @@ static void free_substream(struct snd_usb_substream *subs)
kfree(fp);
}
kfree(subs->rate_list.list);
+ kfree(subs->str_pd);
}
@@ -82,7 +84,8 @@ static void snd_usb_audio_pcm_free(struct snd_pcm *pcm)
static void snd_usb_init_substream(struct snd_usb_stream *as,
int stream,
- struct audioformat *fp)
+ struct audioformat *fp,
+ struct snd_usb_power_domain *pd)
{
struct snd_usb_substream *subs = &as->substream[stream];
@@ -107,6 +110,13 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
if (fp->channels > subs->channels_max)
subs->channels_max = fp->channels;
+ if (pd) {
+ subs->str_pd = pd;
+ /* Initialize Power Domain to idle status D1 */
+ snd_usb_power_domain_set(subs->stream->chip, pd,
+ UAC3_PD_STATE_D1);
+ }
+
snd_usb_preallocate_buffer(subs);
}
@@ -468,9 +478,11 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
* fmt_list and will be freed on the chip instance release. do not free
* fp or do remove it from the substream fmt_list to avoid double-free.
*/
-int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
- int stream,
- struct audioformat *fp)
+static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
+ int stream,
+ struct audioformat *fp,
+ struct snd_usb_power_domain *pd)
+
{
struct snd_usb_stream *as;
struct snd_usb_substream *subs;
@@ -498,7 +510,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
err = snd_pcm_new_stream(as->pcm, stream, 1);
if (err < 0)
return err;
- snd_usb_init_substream(as, stream, fp);
+ snd_usb_init_substream(as, stream, fp, pd);
return add_chmap(as->pcm, stream, subs);
}
@@ -526,7 +538,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
else
strcpy(pcm->name, "USB Audio");
- snd_usb_init_substream(as, stream, fp);
+ snd_usb_init_substream(as, stream, fp, pd);
/*
* Keep using head insertion for M-Audio Audiophile USB (tm) which has a
@@ -544,6 +556,21 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
return add_chmap(pcm, stream, &as->substream[stream]);
}
+int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
+ int stream,
+ struct audioformat *fp)
+{
+ return __snd_usb_add_audio_stream(chip, stream, fp, NULL);
+}
+
+static int snd_usb_add_audio_stream_v3(struct snd_usb_audio *chip,
+ int stream,
+ struct audioformat *fp,
+ struct snd_usb_power_domain *pd)
+{
+ return __snd_usb_add_audio_stream(chip, stream, fp, pd);
+}
+
static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
struct usb_host_interface *alts,
int protocol, int iface_no)
@@ -819,6 +846,7 @@ found_clock:
static struct audioformat *
snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
struct usb_host_interface *alts,
+ struct snd_usb_power_domain **pd_out,
int iface_no, int altset_idx,
int altno, int stream)
{
@@ -829,6 +857,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
struct uac3_as_header_descriptor *as = NULL;
struct uac3_hc_descriptor_header hc_header;
struct snd_pcm_chmap_elem *chmap;
+ struct snd_usb_power_domain *pd;
unsigned char badd_profile;
u64 badd_formats = 0;
unsigned int num_channels;
@@ -1008,12 +1037,28 @@ found_clock:
fp->rate_max = UAC3_BADD_SAMPLING_RATE;
fp->rates = SNDRV_PCM_RATE_CONTINUOUS;
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ kfree(fp->rate_table);
+ kfree(fp);
+ return NULL;
+ }
+ pd->pd_id = (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ UAC3_BADD_PD_ID10 : UAC3_BADD_PD_ID11;
+ pd->pd_d1d0_rec = UAC3_BADD_PD_RECOVER_D1D0;
+ pd->pd_d2d0_rec = UAC3_BADD_PD_RECOVER_D2D0;
+
} else {
fp->attributes = parse_uac_endpoint_attributes(chip, alts,
UAC_VERSION_3,
iface_no);
+
+ pd = snd_usb_find_power_domain(chip->ctrl_intf,
+ as->bTerminalLink);
+
/* ok, let's parse further... */
if (snd_usb_parse_audio_format_v3(chip, fp, as, stream) < 0) {
+ kfree(pd);
kfree(fp->chmap);
kfree(fp->rate_table);
kfree(fp);
@@ -1021,6 +1066,9 @@ found_clock:
}
}
+ if (pd)
+ *pd_out = pd;
+
return fp;
}
@@ -1032,6 +1080,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
struct usb_interface_descriptor *altsd;
int i, altno, err, stream;
struct audioformat *fp = NULL;
+ struct snd_usb_power_domain *pd = NULL;
int num, protocol;
dev = chip->dev;
@@ -1114,7 +1163,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
break;
}
case UAC_VERSION_3:
- fp = snd_usb_get_audioformat_uac3(chip, alts,
+ fp = snd_usb_get_audioformat_uac3(chip, alts, &pd,
iface_no, i, altno, stream);
break;
}
@@ -1125,9 +1174,14 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
return PTR_ERR(fp);
dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
- err = snd_usb_add_audio_stream(chip, stream, fp);
+ if (protocol == UAC_VERSION_3)
+ err = snd_usb_add_audio_stream_v3(chip, stream, fp, pd);
+ else
+ err = snd_usb_add_audio_stream(chip, stream, fp);
+
if (err < 0) {
list_del(&fp->list); /* unlink for avoiding double-free */
+ kfree(pd);
kfree(fp->rate_table);
kfree(fp->chmap);
kfree(fp);
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 4ed9d0c41843..fa7dca5a68c8 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -290,7 +290,6 @@ static void had_reset_audio(struct snd_intelhad *intelhaddata)
static int had_prog_status_reg(struct snd_pcm_substream *substream,
struct snd_intelhad *intelhaddata)
{
- union aud_cfg cfg_val = {.regval = 0};
union aud_ch_status_0 ch_stat0 = {.regval = 0};
union aud_ch_status_1 ch_stat1 = {.regval = 0};
@@ -298,7 +297,6 @@ static int had_prog_status_reg(struct snd_pcm_substream *substream,
IEC958_AES0_NONAUDIO) >> 1;
ch_stat0.regx.clk_acc = (intelhaddata->aes_bits &
IEC958_AES3_CON_CLOCK) >> 4;
- cfg_val.regx.val_bit = ch_stat0.regx.lpcm_id;
switch (substream->runtime->rate) {
case AUD_SAMPLE_RATE_32:
@@ -1854,7 +1852,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
/* setup private data which can be retrieved when required */
pcm->private_data = ctx;
pcm->info_flags = 0;
- strncpy(pcm->name, card->shortname, strlen(card->shortname));
+ strlcpy(pcm->name, card->shortname, strlen(card->shortname));
/* setup the ops for playabck */
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &had_pcm_ops);
diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c
index 5a2bd70a2fa1..129180e17db1 100644
--- a/sound/xen/xen_snd_front_alsa.c
+++ b/sound/xen/xen_snd_front_alsa.c
@@ -188,7 +188,7 @@ static u64 to_sndif_formats_mask(u64 alsa_formats)
mask = 0;
for (i = 0; i < ARRAY_SIZE(ALSA_SNDIF_FORMATS); i++)
- if (1 << ALSA_SNDIF_FORMATS[i].alsa & alsa_formats)
+ if (pcm_format_to_bits(ALSA_SNDIF_FORMATS[i].alsa) & alsa_formats)
mask |= 1 << ALSA_SNDIF_FORMATS[i].sndif;
return mask;
@@ -202,7 +202,7 @@ static u64 to_alsa_formats_mask(u64 sndif_formats)
mask = 0;
for (i = 0; i < ARRAY_SIZE(ALSA_SNDIF_FORMATS); i++)
if (1 << ALSA_SNDIF_FORMATS[i].sndif & sndif_formats)
- mask |= 1 << ALSA_SNDIF_FORMATS[i].alsa;
+ mask |= pcm_format_to_bits(ALSA_SNDIF_FORMATS[i].alsa);
return mask;
}
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..5072cbd15c82
--- /dev/null
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define __ARCH_WANT_RENAMEAT
+
+#include <asm-generic/unistd.h>
diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
index fc0df353ff0d..87245c584784 100644
--- a/tools/arch/parisc/include/uapi/asm/errno.h
+++ b/tools/arch/parisc/include/uapi/asm/errno.h
@@ -113,7 +113,6 @@
#define ELOOP 249 /* Too many symbolic links encountered */
#define ENOSYS 251 /* Function not implemented */
-#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 5701f5cecd31..64aaa3f5f36c 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -219,6 +219,7 @@
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -341,6 +342,7 @@
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
@@ -373,5 +375,6 @@
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/bpf/.gitignore b/tools/bpf/.gitignore
new file mode 100644
index 000000000000..dfe2bd5a4b95
--- /dev/null
+++ b/tools/bpf/.gitignore
@@ -0,0 +1,5 @@
+FEATURE-DUMP.bpf
+bpf_asm
+bpf_dbg
+bpf_exp.yacc.*
+bpf_jit_disasm
diff --git a/tools/bpf/Makefile.helpers b/tools/bpf/Makefile.helpers
new file mode 100644
index 000000000000..c34fea77f39f
--- /dev/null
+++ b/tools/bpf/Makefile.helpers
@@ -0,0 +1,59 @@
+ifndef allow-override
+ include ../scripts/Makefile.include
+ include ../scripts/utilities.mak
+else
+ # Assume Makefile.helpers is being run from bpftool/Documentation
+ # subdirectory. Go up two more directories to fetch bpf.h header and
+ # associated script.
+ UP2DIR := ../../
+endif
+
+INSTALL ?= install
+RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
+
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
+endif
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
+man7dir = $(mandir)/man7
+
+HELPERS_RST = bpf-helpers.rst
+MAN7_RST = $(HELPERS_RST)
+
+_DOC_MAN7 = $(patsubst %.rst,%.7,$(MAN7_RST))
+DOC_MAN7 = $(addprefix $(OUTPUT),$(_DOC_MAN7))
+
+helpers: man7
+man7: $(DOC_MAN7)
+
+RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+
+$(OUTPUT)$(HELPERS_RST): $(UP2DIR)../../include/uapi/linux/bpf.h
+ $(QUIET_GEN)$(UP2DIR)../../scripts/bpf_helpers_doc.py --filename $< > $@
+
+$(OUTPUT)%.7: $(OUTPUT)%.rst
+ifndef RST2MAN_DEP
+ $(error "rst2man not found, but required to generate man pages")
+endif
+ $(QUIET_GEN)rst2man $< > $@
+
+helpers-clean:
+ $(call QUIET_CLEAN, eBPF_helpers-manpage)
+ $(Q)$(RM) $(DOC_MAN7) $(OUTPUT)$(HELPERS_RST)
+
+helpers-install: helpers
+ $(call QUIET_INSTALL, eBPF_helpers-manpage)
+ $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man7dir)
+ $(Q)$(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir)
+
+helpers-uninstall:
+ $(call QUIET_UNINST, eBPF_helpers-manpage)
+ $(Q)$(RM) $(addprefix $(DESTDIR)$(man7dir)/,$(_DOC_MAN7))
+ $(Q)$(RMDIR) $(DESTDIR)$(man7dir)
+
+.PHONY: helpers helpers-clean helpers-install helpers-uninstall
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index d7e678c2d396..67167e44b726 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -1,3 +1,5 @@
*.d
bpftool
+bpftool*.8
+bpf-helpers.*
FEATURE-DUMP.bpftool
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index a9d47c1558bb..f7663a3e60c9 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -15,12 +15,15 @@ prefix ?= /usr/local
mandir ?= $(prefix)/man
man8dir = $(mandir)/man8
-MAN8_RST = $(wildcard *.rst)
+# Load targets for building eBPF helpers man page.
+include ../../Makefile.helpers
+
+MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst))
_DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
-man: man8
+man: man8 helpers
man8: $(DOC_MAN8)
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
@@ -31,16 +34,16 @@ ifndef RST2MAN_DEP
endif
$(QUIET_GEN)rst2man $< > $@
-clean:
+clean: helpers-clean
$(call QUIET_CLEAN, Documentation)
$(Q)$(RM) $(DOC_MAN8)
-install: man
+install: man helpers-install
$(call QUIET_INSTALL, Documentation-man)
$(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
$(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
-uninstall:
+uninstall: helpers-uninstall
$(call QUIET_UNINST, Documentation-man)
$(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
$(Q)$(RMDIR) $(DESTDIR)$(man8dir)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index 7b0e6d453e92..edbe81534c6d 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -15,12 +15,13 @@ SYNOPSIS
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
*COMMANDS* :=
- { **show** | **list** | **attach** | **detach** | **help** }
+ { **show** | **list** | **tree** | **attach** | **detach** | **help** }
MAP COMMANDS
=============
| **bpftool** **cgroup { show | list }** *CGROUP*
+| **bpftool** **cgroup tree** [*CGROUP_ROOT*]
| **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
| **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
| **bpftool** **cgroup help**
@@ -39,6 +40,15 @@ DESCRIPTION
Output will start with program ID followed by attach type,
attach flags and program name.
+ **bpftool cgroup tree** [*CGROUP_ROOT*]
+ Iterate over all cgroups in *CGROUP_ROOT* and list all
+ attached programs. If *CGROUP_ROOT* is not specified,
+ bpftool uses cgroup v2 mountpoint.
+
+ The output is similar to the output of cgroup show/list
+ commands: it starts with absolute cgroup path, followed by
+ program ID, attach type, attach flags and program name.
+
**bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
Attach program *PROG* to the cgroup *CGROUP* with attach type
*ATTACH_TYPE* and optional *ATTACH_FLAGS*.
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 43d34a5c3ec5..64156a16d530 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -24,10 +24,20 @@ MAP COMMANDS
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog pin** *PROG* *FILE*
-| **bpftool** **prog load** *OBJ* *FILE*
+| **bpftool** **prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
| **bpftool** **prog help**
|
+| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+| *TYPE* := {
+| **socket** | **kprobe** | **kretprobe** | **classifier** | **action** |
+| **tracepoint** | **raw_tracepoint** | **xdp** | **perf_event** | **cgroup/skb** |
+| **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** |
+| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** |
+| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
+| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6**
+| }
+
DESCRIPTION
===========
@@ -64,8 +74,19 @@ DESCRIPTION
Note: *FILE* must be located in *bpffs* mount.
- **bpftool prog load** *OBJ* *FILE*
+ **bpftool prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
Load bpf program from binary *OBJ* and pin as *FILE*.
+ **type** is optional, if not specified program type will be
+ inferred from section names.
+ By default bpftool will create new maps as declared in the ELF
+ object being loaded. **map** parameter allows for the reuse
+ of existing maps. It can be specified multiple times, each
+ time for a different map. *IDX* refers to index of the map
+ to be replaced in the ELF file counting from 0, while *NAME*
+ allows to replace a map by name. *MAP* specifies the map to
+ use, referring to it by **id** or through a **pinned** file.
+ If **dev** *NAME* is specified program will be loaded onto
+ given networking device (offload).
Note: *FILE* must be located in *bpffs* mount.
@@ -159,6 +180,14 @@ EXAMPLES
mov %rbx,0x0(%rbp)
48 89 5d 00
+|
+| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7**
+| **# bpftool prog show pinned /sys/fs/bpf/xdp1**
+| 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl
+| loaded_at 2018-06-25T16:17:31-0700 uid 0
+| xlated 488B jited 336B memlock 4096B map_ids 7
+| **# rm /sys/fs/bpf/xdp1**
+|
SEE ALSO
========
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 892dbf095bff..74288a2197ab 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -23,10 +23,10 @@ endif
LIBBPF = $(BPF_PATH)libbpf.a
-BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
+BPFTOOL_VERSION := $(shell make --no-print-directory -sC ../../.. kernelversion)
$(LIBBPF): FORCE
- $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
+ $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a
$(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
@@ -52,7 +52,7 @@ INSTALL ?= install
RM ?= rm -f
FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_TESTS = libbfd disassembler-four-args reallocarray
FEATURE_DISPLAY = libbfd disassembler-four-args
check_feat := 1
@@ -75,6 +75,10 @@ ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif
+ifeq ($(feature-reallocarray), 0)
+CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+endif
+
include $(wildcard $(OUTPUT)*.d)
all: $(OUTPUT)bpftool
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 1e1083321643..598066c40191 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -99,6 +99,35 @@ _bpftool_get_prog_tags()
command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) )
}
+_bpftool_get_obj_map_names()
+{
+ local obj
+
+ obj=$1
+
+ maps=$(objdump -j maps -t $obj 2>/dev/null | \
+ command awk '/g . maps/ {print $NF}')
+
+ COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) )
+}
+
+_bpftool_get_obj_map_idxs()
+{
+ local obj
+
+ obj=$1
+
+ nmaps=$(objdump -j maps -t $obj 2>/dev/null | grep -c 'g . maps')
+
+ COMPREPLY+=( $( compgen -W "$(seq 0 $((nmaps - 1)))" -- "$cur" ) )
+}
+
+_sysfs_get_netdevs()
+{
+ COMPREPLY+=( $( compgen -W "$( ls /sys/class/net 2>/dev/null )" -- \
+ "$cur" ) )
+}
+
# For bpftool map update: retrieve type of the map to update.
_bpftool_map_update_map_type()
{
@@ -153,6 +182,13 @@ _bpftool()
local cur prev words objword
_init_completion || return
+ # Deal with options
+ if [[ ${words[cword]} == -* ]]; then
+ local c='--version --json --pretty --bpffs'
+ COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
+ return 0
+ fi
+
# Deal with simplest keywords
case $prev in
help|hex|opcodes|visual)
@@ -172,20 +208,23 @@ _bpftool()
;;
esac
- # Search for object and command
- local object command cmdword
- for (( cmdword=1; cmdword < ${#words[@]}-1; cmdword++ )); do
- [[ -n $object ]] && command=${words[cmdword]} && break
- [[ ${words[cmdword]} != -* ]] && object=${words[cmdword]}
+ # Remove all options so completions don't have to deal with them.
+ local i
+ for (( i=1; i < ${#words[@]}; )); do
+ if [[ ${words[i]::1} == - ]]; then
+ words=( "${words[@]:0:i}" "${words[@]:i+1}" )
+ [[ $i -le $cword ]] && cword=$(( cword - 1 ))
+ else
+ i=$(( ++i ))
+ fi
done
+ cur=${words[cword]}
+ prev=${words[cword - 1]}
+
+ local object=${words[1]} command=${words[2]}
- if [[ -z $object ]]; then
+ if [[ -z $object || $cword -eq 1 ]]; then
case $cur in
- -*)
- local c='--version --json --pretty'
- COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
- return 0
- ;;
*)
COMPREPLY=( $( compgen -W "$( bpftool help 2>&1 | \
command sed \
@@ -204,12 +243,14 @@ _bpftool()
# Completion depends on object and command in use
case $object in
prog)
- case $prev in
- id)
- _bpftool_get_prog_ids
- return 0
- ;;
- esac
+ if [[ $command != "load" ]]; then
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ return 0
+ ;;
+ esac
+ fi
local PROG_TYPE='id pinned tag'
case $command in
@@ -252,8 +293,57 @@ _bpftool()
return 0
;;
load)
- _filedir
- return 0
+ local obj
+
+ if [[ ${#words[@]} -lt 6 ]]; then
+ _filedir
+ return 0
+ fi
+
+ obj=${words[3]}
+
+ if [[ ${words[-4]} == "map" ]]; then
+ COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
+ return 0
+ fi
+ if [[ ${words[-3]} == "map" ]]; then
+ if [[ ${words[-2]} == "idx" ]]; then
+ _bpftool_get_obj_map_idxs $obj
+ elif [[ ${words[-2]} == "name" ]]; then
+ _bpftool_get_obj_map_names $obj
+ fi
+ return 0
+ fi
+ if [[ ${words[-2]} == "map" ]]; then
+ COMPREPLY=( $( compgen -W "idx name" -- "$cur" ) )
+ return 0
+ fi
+
+ case $prev in
+ type)
+ COMPREPLY=( $( compgen -W "socket kprobe kretprobe classifier action tracepoint raw_tracepoint xdp perf_event cgroup/skb cgroup/sock cgroup/dev lwt_in lwt_out lwt_xmit lwt_seg6local sockops sk_skb sk_msg lirc_mode2 cgroup/bind4 cgroup/bind6 cgroup/connect4 cgroup/connect6 cgroup/sendmsg4 cgroup/sendmsg6 cgroup/post_bind4 cgroup/post_bind6" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_map_ids
+ return 0
+ ;;
+ pinned)
+ _filedir
+ return 0
+ ;;
+ dev)
+ _sysfs_get_netdevs
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "map" -- "$cur" ) )
+ _bpftool_once_attr 'type'
+ _bpftool_once_attr 'dev'
+ return 0
+ ;;
+ esac
;;
*)
[[ $prev == $object ]] && \
@@ -404,6 +494,10 @@ _bpftool()
_filedir
return 0
;;
+ tree)
+ _filedir
+ return 0
+ ;;
attach|detach)
local ATTACH_TYPES='ingress egress sock_create sock_ops \
device bind4 bind6 post_bind4 post_bind6 connect4 \
@@ -445,7 +539,7 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'help attach detach \
- show list' -- "$cur" ) )
+ show list tree' -- "$cur" ) )
;;
esac
;;
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
new file mode 100644
index 000000000000..55bc512a1831
--- /dev/null
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Facebook */
+
+#include <ctype.h>
+#include <stdio.h> /* for (FILE *) used by json_writer */
+#include <string.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/btf.h>
+#include <linux/err.h>
+
+#include "btf.h"
+#include "json_writer.h"
+#include "main.h"
+
+#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
+#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
+#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
+#define BITS_ROUNDUP_BYTES(bits) \
+ (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
+
+static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
+ __u8 bit_offset, const void *data);
+
+static void btf_dumper_ptr(const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ if (is_plain_text)
+ jsonw_printf(jw, "%p", *(unsigned long *)data);
+ else
+ jsonw_printf(jw, "%u", *(unsigned long *)data);
+}
+
+static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ int actual_type_id;
+
+ actual_type_id = btf__resolve_type(d->btf, type_id);
+ if (actual_type_id < 0)
+ return actual_type_id;
+
+ return btf_dumper_do_type(d, actual_type_id, 0, data);
+}
+
+static void btf_dumper_enum(const void *data, json_writer_t *jw)
+{
+ jsonw_printf(jw, "%d", *(int *)data);
+}
+
+static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+ struct btf_array *arr = (struct btf_array *)(t + 1);
+ long long elem_size;
+ int ret = 0;
+ __u32 i;
+
+ elem_size = btf__resolve_size(d->btf, arr->type);
+ if (elem_size < 0)
+ return elem_size;
+
+ jsonw_start_array(d->jw);
+ for (i = 0; i < arr->nelems; i++) {
+ ret = btf_dumper_do_type(d, arr->type, 0,
+ data + i * elem_size);
+ if (ret)
+ break;
+ }
+
+ jsonw_end_array(d->jw);
+ return ret;
+}
+
+static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
+ const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ int left_shift_bits, right_shift_bits;
+ int nr_bits = BTF_INT_BITS(int_type);
+ int total_bits_offset;
+ int bytes_to_copy;
+ int bits_to_copy;
+ __u64 print_num;
+
+ total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
+ data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
+ bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ bits_to_copy = bit_offset + nr_bits;
+ bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
+
+ print_num = 0;
+ memcpy(&print_num, data, bytes_to_copy);
+#if defined(__BIG_ENDIAN_BITFIELD)
+ left_shift_bits = bit_offset;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ left_shift_bits = 64 - bits_to_copy;
+#else
+#error neither big nor little endian
+#endif
+ right_shift_bits = 64 - nr_bits;
+
+ print_num <<= left_shift_bits;
+ print_num >>= right_shift_bits;
+ if (is_plain_text)
+ jsonw_printf(jw, "0x%llx", print_num);
+ else
+ jsonw_printf(jw, "%llu", print_num);
+}
+
+static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
+ const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ __u32 *int_type;
+ __u32 nr_bits;
+
+ int_type = (__u32 *)(t + 1);
+ nr_bits = BTF_INT_BITS(*int_type);
+ /* if this is bit field */
+ if (bit_offset || BTF_INT_OFFSET(*int_type) ||
+ BITS_PER_BYTE_MASKED(nr_bits)) {
+ btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+ is_plain_text);
+ return 0;
+ }
+
+ switch (BTF_INT_ENCODING(*int_type)) {
+ case 0:
+ if (BTF_INT_BITS(*int_type) == 64)
+ jsonw_printf(jw, "%lu", *(__u64 *)data);
+ else if (BTF_INT_BITS(*int_type) == 32)
+ jsonw_printf(jw, "%u", *(__u32 *)data);
+ else if (BTF_INT_BITS(*int_type) == 16)
+ jsonw_printf(jw, "%hu", *(__u16 *)data);
+ else if (BTF_INT_BITS(*int_type) == 8)
+ jsonw_printf(jw, "%hhu", *(__u8 *)data);
+ else
+ btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+ is_plain_text);
+ break;
+ case BTF_INT_SIGNED:
+ if (BTF_INT_BITS(*int_type) == 64)
+ jsonw_printf(jw, "%ld", *(long long *)data);
+ else if (BTF_INT_BITS(*int_type) == 32)
+ jsonw_printf(jw, "%d", *(int *)data);
+ else if (BTF_INT_BITS(*int_type) == 16)
+ jsonw_printf(jw, "%hd", *(short *)data);
+ else if (BTF_INT_BITS(*int_type) == 8)
+ jsonw_printf(jw, "%hhd", *(char *)data);
+ else
+ btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+ is_plain_text);
+ break;
+ case BTF_INT_CHAR:
+ if (isprint(*(char *)data))
+ jsonw_printf(jw, "\"%c\"", *(char *)data);
+ else
+ if (is_plain_text)
+ jsonw_printf(jw, "0x%hhx", *(char *)data);
+ else
+ jsonw_printf(jw, "\"\\u00%02hhx\"",
+ *(char *)data);
+ break;
+ case BTF_INT_BOOL:
+ jsonw_bool(jw, *(int *)data);
+ break;
+ default:
+ /* shouldn't happen */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ const struct btf_type *t;
+ struct btf_member *m;
+ const void *data_off;
+ int ret = 0;
+ int i, vlen;
+
+ t = btf__type_by_id(d->btf, type_id);
+ if (!t)
+ return -EINVAL;
+
+ vlen = BTF_INFO_VLEN(t->info);
+ jsonw_start_object(d->jw);
+ m = (struct btf_member *)(t + 1);
+
+ for (i = 0; i < vlen; i++) {
+ data_off = data + BITS_ROUNDDOWN_BYTES(m[i].offset);
+ jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
+ ret = btf_dumper_do_type(d, m[i].type,
+ BITS_PER_BYTE_MASKED(m[i].offset),
+ data_off);
+ if (ret)
+ break;
+ }
+
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
+static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
+ __u8 bit_offset, const void *data)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ return btf_dumper_int(t, bit_offset, data, d->jw,
+ d->is_plain_text);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return btf_dumper_struct(d, type_id, data);
+ case BTF_KIND_ARRAY:
+ return btf_dumper_array(d, type_id, data);
+ case BTF_KIND_ENUM:
+ btf_dumper_enum(data, d->jw);
+ return 0;
+ case BTF_KIND_PTR:
+ btf_dumper_ptr(data, d->jw, d->is_plain_text);
+ return 0;
+ case BTF_KIND_UNKN:
+ jsonw_printf(d->jw, "(unknown)");
+ return 0;
+ case BTF_KIND_FWD:
+ /* map key or value can't be forward */
+ jsonw_printf(d->jw, "(fwd-kind-invalid)");
+ return -EINVAL;
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ return btf_dumper_modifier(d, type_id, data);
+ default:
+ jsonw_printf(d->jw, "(unsupported-kind");
+ return -EINVAL;
+ }
+}
+
+int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ return btf_dumper_do_type(d, type_id, 0, data);
+}
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index 16bee011e16c..ee7a9765c6b3 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -2,7 +2,12 @@
// Copyright (C) 2017 Facebook
// Author: Roman Gushchin <guro@fb.com>
+#define _XOPEN_SOURCE 500
+#include <errno.h>
#include <fcntl.h>
+#include <ftw.h>
+#include <mntent.h>
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
@@ -53,7 +58,8 @@ static enum bpf_attach_type parse_attach_type(const char *str)
}
static int show_bpf_prog(int id, const char *attach_type_str,
- const char *attach_flags_str)
+ const char *attach_flags_str,
+ int level)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
@@ -78,7 +84,8 @@ static int show_bpf_prog(int id, const char *attach_type_str,
jsonw_string_field(json_wtr, "name", info.name);
jsonw_end_object(json_wtr);
} else {
- printf("%-8u %-15s %-15s %-15s\n", info.id,
+ printf("%s%-8u %-15s %-15s %-15s\n", level ? " " : "",
+ info.id,
attach_type_str,
attach_flags_str,
info.name);
@@ -88,7 +95,20 @@ static int show_bpf_prog(int id, const char *attach_type_str,
return 0;
}
-static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+{
+ __u32 prog_cnt = 0;
+ int ret;
+
+ ret = bpf_prog_query(cgroup_fd, type, 0, NULL, NULL, &prog_cnt);
+ if (ret)
+ return -1;
+
+ return prog_cnt;
+}
+
+static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ int level)
{
__u32 prog_ids[1024] = {0};
char *attach_flags_str;
@@ -123,7 +143,7 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
for (iter = 0; iter < prog_cnt; iter++)
show_bpf_prog(prog_ids[iter], attach_type_strings[type],
- attach_flags_str);
+ attach_flags_str, level);
return 0;
}
@@ -161,7 +181,7 @@ static int do_show(int argc, char **argv)
* If we were able to get the show for at least one
* attach type, let's return 0.
*/
- if (show_attached_bpf_progs(cgroup_fd, type) == 0)
+ if (show_attached_bpf_progs(cgroup_fd, type, 0) == 0)
ret = 0;
}
@@ -173,6 +193,143 @@ exit:
return ret;
}
+/*
+ * To distinguish nftw() errors and do_show_tree_fn() errors
+ * and avoid duplicating error messages, let's return -2
+ * from do_show_tree_fn() in case of error.
+ */
+#define NFTW_ERR -1
+#define SHOW_TREE_FN_ERR -2
+static int do_show_tree_fn(const char *fpath, const struct stat *sb,
+ int typeflag, struct FTW *ftw)
+{
+ enum bpf_attach_type type;
+ bool skip = true;
+ int cgroup_fd;
+
+ if (typeflag != FTW_D)
+ return 0;
+
+ cgroup_fd = open(fpath, O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s: %s", fpath, strerror(errno));
+ return SHOW_TREE_FN_ERR;
+ }
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ int count = count_attached_bpf_progs(cgroup_fd, type);
+
+ if (count < 0 && errno != EINVAL) {
+ p_err("can't query bpf programs attached to %s: %s",
+ fpath, strerror(errno));
+ close(cgroup_fd);
+ return SHOW_TREE_FN_ERR;
+ }
+ if (count > 0) {
+ skip = false;
+ break;
+ }
+ }
+
+ if (skip) {
+ close(cgroup_fd);
+ return 0;
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_string_field(json_wtr, "cgroup", fpath);
+ jsonw_name(json_wtr, "programs");
+ jsonw_start_array(json_wtr);
+ } else {
+ printf("%s\n", fpath);
+ }
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
+ show_attached_bpf_progs(cgroup_fd, type, ftw->level);
+
+ if (json_output) {
+ jsonw_end_array(json_wtr);
+ jsonw_end_object(json_wtr);
+ }
+
+ close(cgroup_fd);
+
+ return 0;
+}
+
+static char *find_cgroup_root(void)
+{
+ struct mntent *mnt;
+ FILE *f;
+
+ f = fopen("/proc/mounts", "r");
+ if (f == NULL)
+ return NULL;
+
+ while ((mnt = getmntent(f))) {
+ if (strcmp(mnt->mnt_type, "cgroup2") == 0) {
+ fclose(f);
+ return strdup(mnt->mnt_dir);
+ }
+ }
+
+ fclose(f);
+ return NULL;
+}
+
+static int do_show_tree(int argc, char **argv)
+{
+ char *cgroup_root;
+ int ret;
+
+ switch (argc) {
+ case 0:
+ cgroup_root = find_cgroup_root();
+ if (!cgroup_root) {
+ p_err("cgroup v2 isn't mounted");
+ return -1;
+ }
+ break;
+ case 1:
+ cgroup_root = argv[0];
+ break;
+ default:
+ p_err("too many parameters for cgroup tree");
+ return -1;
+ }
+
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ else
+ printf("%s\n"
+ "%-8s %-15s %-15s %-15s\n",
+ "CgroupPath",
+ "ID", "AttachType", "AttachFlags", "Name");
+
+ switch (nftw(cgroup_root, do_show_tree_fn, 1024, FTW_MOUNT)) {
+ case NFTW_ERR:
+ p_err("can't iterate over %s: %s", cgroup_root,
+ strerror(errno));
+ ret = -1;
+ break;
+ case SHOW_TREE_FN_ERR:
+ ret = -1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ if (argc == 0)
+ free(cgroup_root);
+
+ return ret;
+}
+
static int do_attach(int argc, char **argv)
{
enum bpf_attach_type attach_type;
@@ -289,6 +446,7 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %s %s { show | list } CGROUP\n"
+ " %s %s tree [CGROUP_ROOT]\n"
" %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
" %s %s detach CGROUP ATTACH_TYPE PROG\n"
" %s %s help\n"
@@ -298,6 +456,7 @@ static int do_help(int argc, char **argv)
" " HELP_SPEC_PROGRAM "\n"
" " HELP_SPEC_OPTIONS "\n"
"",
+ bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2]);
@@ -307,6 +466,7 @@ static int do_help(int argc, char **argv)
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
+ { "tree", do_show_tree },
{ "attach", do_attach },
{ "detach", do_detach },
{ "help", do_help },
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 3f140eff039f..b3a0709ea7ed 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index eea7f14355f3..d15a62be6cf0 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
#include <bfd.h>
#include <ctype.h>
#include <errno.h>
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 63fdb310b9a4..238e734d75b3 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
#ifndef __BPF_TOOL_H
#define __BPF_TOOL_H
@@ -44,6 +42,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/hashtable.h>
+#include <tools/libc_compat.h>
#include "json_writer.h"
@@ -52,6 +51,21 @@
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
+#define GET_ARG() ({ argc--; *argv++; })
+#define REQ_ARGS(cnt) \
+ ({ \
+ int _cnt = (cnt); \
+ bool _res; \
+ \
+ if (argc < _cnt) { \
+ p_err("'%s' needs at least %d arguments, %d found", \
+ argv[-1], _cnt, argc); \
+ _res = false; \
+ } else { \
+ _res = true; \
+ } \
+ _res; \
+ })
#define ERR_MAX_LEN 1024
@@ -61,6 +75,8 @@
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }"
#define HELP_SPEC_OPTIONS \
"OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} }"
+#define HELP_SPEC_MAP \
+ "MAP := { id MAP_ID | pinned FILE }"
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
@@ -122,6 +138,7 @@ int do_cgroup(int argc, char **arg);
int do_perf(int argc, char **arg);
int prog_parse_fd(int *argc, char ***argv);
+int map_parse_fd(int *argc, char ***argv);
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
@@ -133,4 +150,19 @@ unsigned int get_page_size(void);
unsigned int get_possible_cpus(void);
const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino);
+struct btf_dumper {
+ const struct btf *btf;
+ json_writer_t *jw;
+ bool is_plain_text;
+};
+
+/* btf_dumper_type - print data along with type information
+ * @d: an instance containing context for dumping types
+ * @type_id: index in btf->types array. this points to the type to be dumped
+ * @data: pointer the actual data, i.e. the values to be printed
+ *
+ * Returns zero on success and negative error code otherwise
+ */
+int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
+ const void *data);
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index f74a8bcbda87..b2ec20e562bd 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -31,11 +31,10 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <stdbool.h>
#include <stdio.h>
@@ -47,6 +46,8 @@
#include <bpf.h>
+#include "btf.h"
+#include "json_writer.h"
#include "main.h"
static const char * const map_type_name[] = {
@@ -68,6 +69,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_SOCKMAP] = "sockmap",
[BPF_MAP_TYPE_CPUMAP] = "cpumap",
[BPF_MAP_TYPE_SOCKHASH] = "sockhash",
+ [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
};
static bool map_is_per_cpu(__u32 type)
@@ -97,7 +99,7 @@ static void *alloc_value(struct bpf_map_info *info)
return malloc(info->value_size);
}
-static int map_parse_fd(int *argc, char ***argv)
+int map_parse_fd(int *argc, char ***argv)
{
int fd;
@@ -152,8 +154,109 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
return fd;
}
+static int do_dump_btf(const struct btf_dumper *d,
+ struct bpf_map_info *map_info, void *key,
+ void *value)
+{
+ int ret;
+
+ /* start of key-value pair */
+ jsonw_start_object(d->jw);
+
+ jsonw_name(d->jw, "key");
+
+ ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
+ if (ret)
+ goto err_end_obj;
+
+ jsonw_name(d->jw, "value");
+
+ ret = btf_dumper_type(d, map_info->btf_value_type_id, value);
+
+err_end_obj:
+ /* end of key-value pair */
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
+static int get_btf(struct bpf_map_info *map_info, struct btf **btf)
+{
+ struct bpf_btf_info btf_info = { 0 };
+ __u32 len = sizeof(btf_info);
+ __u32 last_size;
+ int btf_fd;
+ void *ptr;
+ int err;
+
+ err = 0;
+ *btf = NULL;
+ btf_fd = bpf_btf_get_fd_by_id(map_info->btf_id);
+ if (btf_fd < 0)
+ return 0;
+
+ /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
+ * let's start with a sane default - 4KiB here - and resize it only if
+ * bpf_obj_get_info_by_fd() needs a bigger buffer.
+ */
+ btf_info.btf_size = 4096;
+ last_size = btf_info.btf_size;
+ ptr = malloc(last_size);
+ if (!ptr) {
+ err = -ENOMEM;
+ goto exit_free;
+ }
+
+ bzero(ptr, last_size);
+ btf_info.btf = ptr_to_u64(ptr);
+ err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+
+ if (!err && btf_info.btf_size > last_size) {
+ void *temp_ptr;
+
+ last_size = btf_info.btf_size;
+ temp_ptr = realloc(ptr, last_size);
+ if (!temp_ptr) {
+ err = -ENOMEM;
+ goto exit_free;
+ }
+ ptr = temp_ptr;
+ bzero(ptr, last_size);
+ btf_info.btf = ptr_to_u64(ptr);
+ err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+ }
+
+ if (err || btf_info.btf_size > last_size) {
+ err = errno;
+ goto exit_free;
+ }
+
+ *btf = btf__new((__u8 *)btf_info.btf, btf_info.btf_size, NULL);
+ if (IS_ERR(*btf)) {
+ err = PTR_ERR(*btf);
+ *btf = NULL;
+ }
+
+exit_free:
+ close(btf_fd);
+ free(ptr);
+
+ return err;
+}
+
+static json_writer_t *get_btf_writer(void)
+{
+ json_writer_t *jw = jsonw_new(stdout);
+
+ if (!jw)
+ return NULL;
+ jsonw_pretty(jw, true);
+
+ return jw;
+}
+
static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
- unsigned char *value)
+ unsigned char *value, struct btf *btf)
{
jsonw_start_object(json_wtr);
@@ -162,6 +265,16 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
print_hex_data_json(key, info->key_size);
jsonw_name(json_wtr, "value");
print_hex_data_json(value, info->value_size);
+ if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = json_wtr,
+ .is_plain_text = false,
+ };
+
+ jsonw_name(json_wtr, "formatted");
+ do_dump_btf(&d, info, key, value);
+ }
} else {
unsigned int i, n, step;
@@ -514,10 +627,12 @@ static int do_show(int argc, char **argv)
static int do_dump(int argc, char **argv)
{
+ struct bpf_map_info info = {};
void *key, *value, *prev_key;
unsigned int num_elems = 0;
- struct bpf_map_info info = {};
__u32 len = sizeof(info);
+ json_writer_t *btf_wtr;
+ struct btf *btf = NULL;
int err;
int fd;
@@ -543,8 +658,27 @@ static int do_dump(int argc, char **argv)
}
prev_key = NULL;
+
+ err = get_btf(&info, &btf);
+ if (err) {
+ p_err("failed to get btf");
+ goto exit_free;
+ }
+
if (json_output)
jsonw_start_array(json_wtr);
+ else
+ if (btf) {
+ btf_wtr = get_btf_writer();
+ if (!btf_wtr) {
+ p_info("failed to create json writer for btf. falling back to plain output");
+ btf__free(btf);
+ btf = NULL;
+ } else {
+ jsonw_start_array(btf_wtr);
+ }
+ }
+
while (true) {
err = bpf_map_get_next_key(fd, prev_key, key);
if (err) {
@@ -555,9 +689,19 @@ static int do_dump(int argc, char **argv)
if (!bpf_map_lookup_elem(fd, key, value)) {
if (json_output)
- print_entry_json(&info, key, value);
+ print_entry_json(&info, key, value, btf);
else
- print_entry_plain(&info, key, value);
+ if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, &info, key, value);
+ } else {
+ print_entry_plain(&info, key, value);
+ }
} else {
if (json_output) {
jsonw_name(json_wtr, "key");
@@ -580,14 +724,19 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_end_array(json_wtr);
- else
+ else if (btf) {
+ jsonw_end_array(btf_wtr);
+ jsonw_destroy(&btf_wtr);
+ } else {
printf("Found %u element%s\n", num_elems,
num_elems != 1 ? "s" : "");
+ }
exit_free:
free(key);
free(value);
close(fd);
+ btf__free(btf);
return err;
}
@@ -643,6 +792,8 @@ static int do_lookup(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
+ json_writer_t *btf_wtr;
+ struct btf *btf = NULL;
void *key, *value;
int err;
int fd;
@@ -667,27 +818,60 @@ static int do_lookup(int argc, char **argv)
goto exit_free;
err = bpf_map_lookup_elem(fd, key, value);
- if (!err) {
- if (json_output)
- print_entry_json(&info, key, value);
- else
+ if (err) {
+ if (errno == ENOENT) {
+ if (json_output) {
+ jsonw_null(json_wtr);
+ } else {
+ printf("key:\n");
+ fprint_hex(stdout, key, info.key_size, " ");
+ printf("\n\nNot found\n");
+ }
+ } else {
+ p_err("lookup failed: %s", strerror(errno));
+ }
+
+ goto exit_free;
+ }
+
+ /* here means bpf_map_lookup_elem() succeeded */
+ err = get_btf(&info, &btf);
+ if (err) {
+ p_err("failed to get btf");
+ goto exit_free;
+ }
+
+ if (json_output) {
+ print_entry_json(&info, key, value, btf);
+ } else if (btf) {
+ /* if here json_wtr wouldn't have been initialised,
+ * so let's create separate writer for btf
+ */
+ btf_wtr = get_btf_writer();
+ if (!btf_wtr) {
+ p_info("failed to create json writer for btf. falling back to plain output");
+ btf__free(btf);
+ btf = NULL;
print_entry_plain(&info, key, value);
- } else if (errno == ENOENT) {
- if (json_output) {
- jsonw_null(json_wtr);
} else {
- printf("key:\n");
- fprint_hex(stdout, key, info.key_size, " ");
- printf("\n\nNot found\n");
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, &info, key, value);
+ jsonw_destroy(&btf_wtr);
}
} else {
- p_err("lookup failed: %s", strerror(errno));
+ print_entry_plain(&info, key, value);
}
exit_free:
free(key);
free(value);
close(fd);
+ btf__free(btf);
return err;
}
@@ -830,7 +1014,7 @@ static int do_help(int argc, char **argv)
" %s %s event_pipe MAP [cpu N index M]\n"
" %s %s help\n"
"\n"
- " MAP := { id MAP_ID | pinned FILE }\n"
+ " " HELP_SPEC_MAP "\n"
" DATA := { [hex] BYTES }\n"
" " HELP_SPEC_PROGRAM "\n"
" VALUE := { DATA | MAP | PROG }\n"
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 959aa53ab678..dce960d22106 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,8 +31,7 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
+#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <stdarg.h>
@@ -41,9 +40,12 @@
#include <string.h>
#include <time.h>
#include <unistd.h>
+#include <net/if.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <linux/err.h>
+
#include <bpf.h>
#include <libbpf.h>
@@ -681,31 +683,247 @@ static int do_pin(int argc, char **argv)
return err;
}
+struct map_replace {
+ int idx;
+ int fd;
+ char *name;
+};
+
+int map_replace_compar(const void *p1, const void *p2)
+{
+ const struct map_replace *a = p1, *b = p2;
+
+ return a->idx - b->idx;
+}
+
static int do_load(int argc, char **argv)
{
+ enum bpf_attach_type expected_attach_type;
+ struct bpf_object_open_attr attr = {
+ .prog_type = BPF_PROG_TYPE_UNSPEC,
+ };
+ struct map_replace *map_replace = NULL;
+ unsigned int old_map_fds = 0;
+ struct bpf_program *prog;
struct bpf_object *obj;
- int prog_fd;
-
- if (argc != 2)
- usage();
+ struct bpf_map *map;
+ const char *pinfile;
+ unsigned int i, j;
+ __u32 ifindex = 0;
+ int idx, err;
- if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
- p_err("failed to load program");
+ if (!REQ_ARGS(2))
return -1;
+ attr.file = GET_ARG();
+ pinfile = GET_ARG();
+
+ while (argc) {
+ if (is_prefix(*argv, "type")) {
+ char *type;
+
+ NEXT_ARG();
+
+ if (attr.prog_type != BPF_PROG_TYPE_UNSPEC) {
+ p_err("program type already specified");
+ goto err_free_reuse_maps;
+ }
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ /* Put a '/' at the end of type to appease libbpf */
+ type = malloc(strlen(*argv) + 2);
+ if (!type) {
+ p_err("mem alloc failed");
+ goto err_free_reuse_maps;
+ }
+ *type = 0;
+ strcat(type, *argv);
+ strcat(type, "/");
+
+ err = libbpf_prog_type_by_name(type, &attr.prog_type,
+ &expected_attach_type);
+ free(type);
+ if (err < 0) {
+ p_err("unknown program type '%s'", *argv);
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "map")) {
+ char *endptr, *name;
+ int fd;
+
+ NEXT_ARG();
+
+ if (!REQ_ARGS(4))
+ goto err_free_reuse_maps;
+
+ if (is_prefix(*argv, "idx")) {
+ NEXT_ARG();
+
+ idx = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as IDX", *argv);
+ goto err_free_reuse_maps;
+ }
+ name = NULL;
+ } else if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+
+ name = *argv;
+ idx = -1;
+ } else {
+ p_err("expected 'idx' or 'name', got: '%s'?",
+ *argv);
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+
+ fd = map_parse_fd(&argc, &argv);
+ if (fd < 0)
+ goto err_free_reuse_maps;
+
+ map_replace = reallocarray(map_replace, old_map_fds + 1,
+ sizeof(*map_replace));
+ if (!map_replace) {
+ p_err("mem alloc failed");
+ goto err_free_reuse_maps;
+ }
+ map_replace[old_map_fds].idx = idx;
+ map_replace[old_map_fds].name = name;
+ map_replace[old_map_fds].fd = fd;
+ old_map_fds++;
+ } else if (is_prefix(*argv, "dev")) {
+ NEXT_ARG();
+
+ if (ifindex) {
+ p_err("offload device already specified");
+ goto err_free_reuse_maps;
+ }
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ ifindex = if_nametoindex(*argv);
+ if (!ifindex) {
+ p_err("unrecognized netdevice '%s': %s",
+ *argv, strerror(errno));
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+ } else {
+ p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
+ *argv);
+ goto err_free_reuse_maps;
+ }
+ }
+
+ obj = bpf_object__open_xattr(&attr);
+ if (IS_ERR_OR_NULL(obj)) {
+ p_err("failed to open object file");
+ goto err_free_reuse_maps;
+ }
+
+ prog = bpf_program__next(NULL, obj);
+ if (!prog) {
+ p_err("object file doesn't contain any bpf program");
+ goto err_close_obj;
+ }
+
+ bpf_program__set_ifindex(prog, ifindex);
+ if (attr.prog_type == BPF_PROG_TYPE_UNSPEC) {
+ const char *sec_name = bpf_program__title(prog, false);
+
+ err = libbpf_prog_type_by_name(sec_name, &attr.prog_type,
+ &expected_attach_type);
+ if (err < 0) {
+ p_err("failed to guess program type based on section name %s\n",
+ sec_name);
+ goto err_close_obj;
+ }
+ }
+ bpf_program__set_type(prog, attr.prog_type);
+ bpf_program__set_expected_attach_type(prog, expected_attach_type);
+
+ qsort(map_replace, old_map_fds, sizeof(*map_replace),
+ map_replace_compar);
+
+ /* After the sort maps by name will be first on the list, because they
+ * have idx == -1. Resolve them.
+ */
+ j = 0;
+ while (j < old_map_fds && map_replace[j].name) {
+ i = 0;
+ bpf_map__for_each(map, obj) {
+ if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
+ map_replace[j].idx = i;
+ break;
+ }
+ i++;
+ }
+ if (map_replace[j].idx == -1) {
+ p_err("unable to find map '%s'", map_replace[j].name);
+ goto err_close_obj;
+ }
+ j++;
+ }
+ /* Resort if any names were resolved */
+ if (j)
+ qsort(map_replace, old_map_fds, sizeof(*map_replace),
+ map_replace_compar);
+
+ /* Set ifindex and name reuse */
+ j = 0;
+ idx = 0;
+ bpf_map__for_each(map, obj) {
+ if (!bpf_map__is_offload_neutral(map))
+ bpf_map__set_ifindex(map, ifindex);
+
+ if (j < old_map_fds && idx == map_replace[j].idx) {
+ err = bpf_map__reuse_fd(map, map_replace[j++].fd);
+ if (err) {
+ p_err("unable to set up map reuse: %d", err);
+ goto err_close_obj;
+ }
+
+ /* Next reuse wants to apply to the same map */
+ if (j < old_map_fds && map_replace[j].idx == idx) {
+ p_err("replacement for map idx %d specified more than once",
+ idx);
+ goto err_close_obj;
+ }
+ }
+
+ idx++;
+ }
+ if (j < old_map_fds) {
+ p_err("map idx '%d' not used", map_replace[j].idx);
+ goto err_close_obj;
+ }
+
+ err = bpf_object__load(obj);
+ if (err) {
+ p_err("failed to load object file");
+ goto err_close_obj;
}
- if (do_pin_fd(prog_fd, argv[1]))
+ if (do_pin_fd(bpf_program__fd(prog), pinfile))
goto err_close_obj;
if (json_output)
jsonw_null(json_wtr);
bpf_object__close(obj);
+ for (i = 0; i < old_map_fds; i++)
+ close(map_replace[i].fd);
+ free(map_replace);
return 0;
err_close_obj:
bpf_object__close(obj);
+err_free_reuse_maps:
+ for (i = 0; i < old_map_fds; i++)
+ close(map_replace[i].fd);
+ free(map_replace);
return -1;
}
@@ -721,10 +939,19 @@ static int do_help(int argc, char **argv)
" %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n"
" %s %s dump jited PROG [{ file FILE | opcodes }]\n"
" %s %s pin PROG FILE\n"
- " %s %s load OBJ FILE\n"
+ " %s %s load OBJ FILE [type TYPE] [dev NAME] \\\n"
+ " [map { idx IDX | name NAME } MAP]\n"
" %s %s help\n"
"\n"
+ " " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
+ " TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
+ " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
+ " cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
+ " lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
+ " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
+ " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
+ " cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index b97f1da60dd1..3284759df98a 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -35,6 +35,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#define _GNU_SOURCE
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
@@ -66,9 +67,8 @@ void kernel_syms_load(struct dump_data *dd)
while (!feof(fp)) {
if (!fgets(buff, sizeof(buff), fp))
break;
- tmp = realloc(dd->sym_mapping,
- (dd->sym_count + 1) *
- sizeof(*dd->sym_mapping));
+ tmp = reallocarray(dd->sym_mapping, dd->sym_count + 1,
+ sizeof(*dd->sym_mapping));
if (!tmp) {
out:
free(dd->sym_mapping);
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 950c1504ca37..9ec01f4454f9 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX
###
## HOSTCC C flags
-host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
+host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(KBUILD_HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 5edf65e684ab..727050c40f09 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE
$(Q)$(MAKE) $(build)=fixdep
$(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
- $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $<
+ $(QUIET_LINK)$(HOSTCC) $(KBUILD_HOSTLDFLAGS) -o $@ $<
FORCE:
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 5b6dda3b1ca8..f216b2f5c3d7 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -57,6 +57,7 @@ FEATURE_TESTS_BASIC := \
libunwind-aarch64 \
pthread-attr-setaffinity-np \
pthread-barrier \
+ reallocarray \
stackprotector-all \
timerfd \
libdw-dwarf-unwind \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index dac9563b5470..0516259be70f 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -14,6 +14,7 @@ FILES= \
test-libaudit.bin \
test-libbfd.bin \
test-disassembler-four-args.bin \
+ test-reallocarray.bin \
test-liberty.bin \
test-liberty-z.bin \
test-cplus-demangle.bin \
@@ -204,6 +205,9 @@ $(OUTPUT)test-libbfd.bin:
$(OUTPUT)test-disassembler-four-args.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
+$(OUTPUT)test-reallocarray.bin:
+ $(BUILD)
+
$(OUTPUT)test-liberty.bin:
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c
new file mode 100644
index 000000000000..8170de35150d
--- /dev/null
+++ b/tools/build/feature/test-reallocarray.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <stdlib.h>
+
+int main(void)
+{
+ return !!reallocarray(NULL, 1, 1);
+}
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
index 70fe61295733..0d35f18006a1 100644
--- a/tools/include/linux/compiler-gcc.h
+++ b/tools/include/linux/compiler-gcc.h
@@ -36,3 +36,7 @@
#endif
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
+
+#if GCC_VERSION >= 50100
+#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+#endif
diff --git a/tools/include/linux/overflow.h b/tools/include/linux/overflow.h
new file mode 100644
index 000000000000..8712ff70995f
--- /dev/null
+++ b/tools/include/linux/overflow.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+#ifndef __LINUX_OVERFLOW_H
+#define __LINUX_OVERFLOW_H
+
+#include <linux/compiler.h>
+
+/*
+ * In the fallback code below, we need to compute the minimum and
+ * maximum values representable in a given type. These macros may also
+ * be useful elsewhere, so we provide them outside the
+ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
+ *
+ * It would seem more obvious to do something like
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+#define is_signed_type(type) (((type)(-1)) < (type)1)
+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
+
+#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+/*
+ * For simplicity and code hygiene, the fallback code below insists on
+ * a, b and *d having the same type (similar to the min() and max()
+ * macros), whereas gcc's type-generic overflow checkers accept
+ * different types. Hence we don't just make check_add_overflow an
+ * alias for __builtin_add_overflow, but add type checks similar to
+ * below.
+ */
+#define check_add_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_add_overflow(__a, __b, __d); \
+})
+
+#define check_sub_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_sub_overflow(__a, __b, __d); \
+})
+
+#define check_mul_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_mul_overflow(__a, __b, __d); \
+})
+
+#else
+
+
+/* Checking for unsigned overflow is relatively easy without causing UB. */
+#define __unsigned_add_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = __a + __b; \
+ *__d < __a; \
+})
+#define __unsigned_sub_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = __a - __b; \
+ __a < __b; \
+})
+/*
+ * If one of a or b is a compile-time constant, this avoids a division.
+ */
+#define __unsigned_mul_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = __a * __b; \
+ __builtin_constant_p(__b) ? \
+ __b > 0 && __a > type_max(typeof(__a)) / __b : \
+ __a > 0 && __b > type_max(typeof(__b)) / __a; \
+})
+
+/*
+ * For signed types, detecting overflow is much harder, especially if
+ * we want to avoid UB. But the interface of these macros is such that
+ * we must provide a result in *d, and in fact we must produce the
+ * result promised by gcc's builtins, which is simply the possibly
+ * wrapped-around value. Fortunately, we can just formally do the
+ * operations in the widest relevant unsigned type (u64) and then
+ * truncate the result - gcc is smart enough to generate the same code
+ * with and without the (u64) casts.
+ */
+
+/*
+ * Adding two signed integers can overflow only if they have the same
+ * sign, and overflow has happened iff the result has the opposite
+ * sign.
+ */
+#define __signed_add_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = (u64)__a + (u64)__b; \
+ (((~(__a ^ __b)) & (*__d ^ __a)) \
+ & type_min(typeof(__a))) != 0; \
+})
+
+/*
+ * Subtraction is similar, except that overflow can now happen only
+ * when the signs are opposite. In this case, overflow has happened if
+ * the result has the opposite sign of a.
+ */
+#define __signed_sub_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = (u64)__a - (u64)__b; \
+ ((((__a ^ __b)) & (*__d ^ __a)) \
+ & type_min(typeof(__a))) != 0; \
+})
+
+/*
+ * Signed multiplication is rather hard. gcc always follows C99, so
+ * division is truncated towards 0. This means that we can write the
+ * overflow check like this:
+ *
+ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
+ * (a < -1 && (b > MIN/a || b < MAX/a) ||
+ * (a == -1 && b == MIN)
+ *
+ * The redundant casts of -1 are to silence an annoying -Wtype-limits
+ * (included in -Wextra) warning: When the type is u8 or u16, the
+ * __b_c_e in check_mul_overflow obviously selects
+ * __unsigned_mul_overflow, but unfortunately gcc still parses this
+ * code and warns about the limited range of __b.
+ */
+
+#define __signed_mul_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ typeof(a) __tmax = type_max(typeof(a)); \
+ typeof(a) __tmin = type_min(typeof(a)); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = (u64)__a * (u64)__b; \
+ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
+ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
+ (__b == (typeof(__b))-1 && __a == __tmin); \
+})
+
+
+#define check_add_overflow(a, b, d) \
+ __builtin_choose_expr(is_signed_type(typeof(a)), \
+ __signed_add_overflow(a, b, d), \
+ __unsigned_add_overflow(a, b, d))
+
+#define check_sub_overflow(a, b, d) \
+ __builtin_choose_expr(is_signed_type(typeof(a)), \
+ __signed_sub_overflow(a, b, d), \
+ __unsigned_sub_overflow(a, b, d))
+
+#define check_mul_overflow(a, b, d) \
+ __builtin_choose_expr(is_signed_type(typeof(a)), \
+ __signed_mul_overflow(a, b, d), \
+ __unsigned_mul_overflow(a, b, d))
+
+
+#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array_size(size_t a, size_t b)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(a, b, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(a, b, &bytes))
+ return SIZE_MAX;
+ if (check_mul_overflow(bytes, c, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(n, size, &bytes))
+ return SIZE_MAX;
+ if (check_add_overflow(bytes, c, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * struct_size() - Calculate size of structure with trailing array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @n: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @n @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, n) \
+ __ab_c_size(n, \
+ sizeof(*(p)->member) + __must_be_array((p)->member),\
+ sizeof(*(p)))
+
+#endif /* __LINUX_OVERFLOW_H */
diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h
new file mode 100644
index 000000000000..664ced8cb1b0
--- /dev/null
+++ b/tools/include/tools/libc_compat.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#ifndef __TOOLS_LIBC_COMPAT_H
+#define __TOOLS_LIBC_COMPAT_H
+
+#include <stdlib.h>
+#include <linux/overflow.h>
+
+#ifdef COMPAT_NEED_REALLOCARRAY
+static inline void *reallocarray(void *ptr, size_t nmemb, size_t size)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(nmemb, size, &bytes)))
+ return NULL;
+ return realloc(ptr, bytes);
+}
+#endif
+#endif
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
new file mode 100644
index 000000000000..42990676a55e
--- /dev/null
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -0,0 +1,783 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#include <asm/bitsperlong.h>
+
+/*
+ * This file contains the system call numbers, based on the
+ * layout of the x86-64 architecture, which embeds the
+ * pointer to the syscall in the table.
+ *
+ * As a basic principle, no duplication of functionality
+ * should be added, e.g. we don't use lseek when llseek
+ * is present. New architectures should use this file
+ * and implement the less feature-full calls in user space.
+ */
+
+#ifndef __SYSCALL
+#define __SYSCALL(x, y)
+#endif
+
+#if __BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)
+#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32)
+#else
+#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64)
+#endif
+
+#ifdef __SYSCALL_COMPAT
+#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _comp)
+#define __SC_COMP_3264(_nr, _32, _64, _comp) __SYSCALL(_nr, _comp)
+#else
+#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _sys)
+#define __SC_COMP_3264(_nr, _32, _64, _comp) __SC_3264(_nr, _32, _64)
+#endif
+
+#define __NR_io_setup 0
+__SC_COMP(__NR_io_setup, sys_io_setup, compat_sys_io_setup)
+#define __NR_io_destroy 1
+__SYSCALL(__NR_io_destroy, sys_io_destroy)
+#define __NR_io_submit 2
+__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
+#define __NR_io_cancel 3
+__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#define __NR_io_getevents 4
+__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
+
+/* fs/xattr.c */
+#define __NR_setxattr 5
+__SYSCALL(__NR_setxattr, sys_setxattr)
+#define __NR_lsetxattr 6
+__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
+#define __NR_fsetxattr 7
+__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
+#define __NR_getxattr 8
+__SYSCALL(__NR_getxattr, sys_getxattr)
+#define __NR_lgetxattr 9
+__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
+#define __NR_fgetxattr 10
+__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
+#define __NR_listxattr 11
+__SYSCALL(__NR_listxattr, sys_listxattr)
+#define __NR_llistxattr 12
+__SYSCALL(__NR_llistxattr, sys_llistxattr)
+#define __NR_flistxattr 13
+__SYSCALL(__NR_flistxattr, sys_flistxattr)
+#define __NR_removexattr 14
+__SYSCALL(__NR_removexattr, sys_removexattr)
+#define __NR_lremovexattr 15
+__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
+#define __NR_fremovexattr 16
+__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
+
+/* fs/dcache.c */
+#define __NR_getcwd 17
+__SYSCALL(__NR_getcwd, sys_getcwd)
+
+/* fs/cookies.c */
+#define __NR_lookup_dcookie 18
+__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
+
+/* fs/eventfd.c */
+#define __NR_eventfd2 19
+__SYSCALL(__NR_eventfd2, sys_eventfd2)
+
+/* fs/eventpoll.c */
+#define __NR_epoll_create1 20
+__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
+#define __NR_epoll_ctl 21
+__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
+#define __NR_epoll_pwait 22
+__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
+
+/* fs/fcntl.c */
+#define __NR_dup 23
+__SYSCALL(__NR_dup, sys_dup)
+#define __NR_dup3 24
+__SYSCALL(__NR_dup3, sys_dup3)
+#define __NR3264_fcntl 25
+__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
+
+/* fs/inotify_user.c */
+#define __NR_inotify_init1 26
+__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
+#define __NR_inotify_add_watch 27
+__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
+#define __NR_inotify_rm_watch 28
+__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+
+/* fs/ioctl.c */
+#define __NR_ioctl 29
+__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
+
+/* fs/ioprio.c */
+#define __NR_ioprio_set 30
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
+#define __NR_ioprio_get 31
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
+
+/* fs/locks.c */
+#define __NR_flock 32
+__SYSCALL(__NR_flock, sys_flock)
+
+/* fs/namei.c */
+#define __NR_mknodat 33
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_mkdirat 34
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_unlinkat 35
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_symlinkat 36
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_linkat 37
+__SYSCALL(__NR_linkat, sys_linkat)
+#ifdef __ARCH_WANT_RENAMEAT
+/* renameat is superseded with flags by renameat2 */
+#define __NR_renameat 38
+__SYSCALL(__NR_renameat, sys_renameat)
+#endif /* __ARCH_WANT_RENAMEAT */
+
+/* fs/namespace.c */
+#define __NR_umount2 39
+__SYSCALL(__NR_umount2, sys_umount)
+#define __NR_mount 40
+__SC_COMP(__NR_mount, sys_mount, compat_sys_mount)
+#define __NR_pivot_root 41
+__SYSCALL(__NR_pivot_root, sys_pivot_root)
+
+/* fs/nfsctl.c */
+#define __NR_nfsservctl 42
+__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
+
+/* fs/open.c */
+#define __NR3264_statfs 43
+__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
+ compat_sys_statfs64)
+#define __NR3264_fstatfs 44
+__SC_COMP_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs, \
+ compat_sys_fstatfs64)
+#define __NR3264_truncate 45
+__SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
+ compat_sys_truncate64)
+#define __NR3264_ftruncate 46
+__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
+ compat_sys_ftruncate64)
+
+#define __NR_fallocate 47
+__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
+#define __NR_faccessat 48
+__SYSCALL(__NR_faccessat, sys_faccessat)
+#define __NR_chdir 49
+__SYSCALL(__NR_chdir, sys_chdir)
+#define __NR_fchdir 50
+__SYSCALL(__NR_fchdir, sys_fchdir)
+#define __NR_chroot 51
+__SYSCALL(__NR_chroot, sys_chroot)
+#define __NR_fchmod 52
+__SYSCALL(__NR_fchmod, sys_fchmod)
+#define __NR_fchmodat 53
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_fchownat 54
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_fchown 55
+__SYSCALL(__NR_fchown, sys_fchown)
+#define __NR_openat 56
+__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
+#define __NR_close 57
+__SYSCALL(__NR_close, sys_close)
+#define __NR_vhangup 58
+__SYSCALL(__NR_vhangup, sys_vhangup)
+
+/* fs/pipe.c */
+#define __NR_pipe2 59
+__SYSCALL(__NR_pipe2, sys_pipe2)
+
+/* fs/quota.c */
+#define __NR_quotactl 60
+__SYSCALL(__NR_quotactl, sys_quotactl)
+
+/* fs/readdir.c */
+#define __NR_getdents64 61
+__SYSCALL(__NR_getdents64, sys_getdents64)
+
+/* fs/read_write.c */
+#define __NR3264_lseek 62
+__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
+#define __NR_read 63
+__SYSCALL(__NR_read, sys_read)
+#define __NR_write 64
+__SYSCALL(__NR_write, sys_write)
+#define __NR_readv 65
+__SC_COMP(__NR_readv, sys_readv, compat_sys_readv)
+#define __NR_writev 66
+__SC_COMP(__NR_writev, sys_writev, compat_sys_writev)
+#define __NR_pread64 67
+__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64)
+#define __NR_pwrite64 68
+__SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
+#define __NR_preadv 69
+__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
+#define __NR_pwritev 70
+__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
+
+/* fs/sendfile.c */
+#define __NR3264_sendfile 71
+__SYSCALL(__NR3264_sendfile, sys_sendfile64)
+
+/* fs/select.c */
+#define __NR_pselect6 72
+__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
+#define __NR_ppoll 73
+__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
+
+/* fs/signalfd.c */
+#define __NR_signalfd4 74
+__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
+
+/* fs/splice.c */
+#define __NR_vmsplice 75
+__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice)
+#define __NR_splice 76
+__SYSCALL(__NR_splice, sys_splice)
+#define __NR_tee 77
+__SYSCALL(__NR_tee, sys_tee)
+
+/* fs/stat.c */
+#define __NR_readlinkat 78
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR3264_fstatat 79
+__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
+#define __NR3264_fstat 80
+__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
+
+/* fs/sync.c */
+#define __NR_sync 81
+__SYSCALL(__NR_sync, sys_sync)
+#define __NR_fsync 82
+__SYSCALL(__NR_fsync, sys_fsync)
+#define __NR_fdatasync 83
+__SYSCALL(__NR_fdatasync, sys_fdatasync)
+#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
+#define __NR_sync_file_range2 84
+__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
+ compat_sys_sync_file_range2)
+#else
+#define __NR_sync_file_range 84
+__SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
+ compat_sys_sync_file_range)
+#endif
+
+/* fs/timerfd.c */
+#define __NR_timerfd_create 85
+__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#define __NR_timerfd_settime 86
+__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
+ compat_sys_timerfd_settime)
+#define __NR_timerfd_gettime 87
+__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
+ compat_sys_timerfd_gettime)
+
+/* fs/utimes.c */
+#define __NR_utimensat 88
+__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
+
+/* kernel/acct.c */
+#define __NR_acct 89
+__SYSCALL(__NR_acct, sys_acct)
+
+/* kernel/capability.c */
+#define __NR_capget 90
+__SYSCALL(__NR_capget, sys_capget)
+#define __NR_capset 91
+__SYSCALL(__NR_capset, sys_capset)
+
+/* kernel/exec_domain.c */
+#define __NR_personality 92
+__SYSCALL(__NR_personality, sys_personality)
+
+/* kernel/exit.c */
+#define __NR_exit 93
+__SYSCALL(__NR_exit, sys_exit)
+#define __NR_exit_group 94
+__SYSCALL(__NR_exit_group, sys_exit_group)
+#define __NR_waitid 95
+__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
+
+/* kernel/fork.c */
+#define __NR_set_tid_address 96
+__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
+#define __NR_unshare 97
+__SYSCALL(__NR_unshare, sys_unshare)
+
+/* kernel/futex.c */
+#define __NR_futex 98
+__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
+#define __NR_set_robust_list 99
+__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
+ compat_sys_set_robust_list)
+#define __NR_get_robust_list 100
+__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
+ compat_sys_get_robust_list)
+
+/* kernel/hrtimer.c */
+#define __NR_nanosleep 101
+__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
+
+/* kernel/itimer.c */
+#define __NR_getitimer 102
+__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
+#define __NR_setitimer 103
+__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
+
+/* kernel/kexec.c */
+#define __NR_kexec_load 104
+__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
+
+/* kernel/module.c */
+#define __NR_init_module 105
+__SYSCALL(__NR_init_module, sys_init_module)
+#define __NR_delete_module 106
+__SYSCALL(__NR_delete_module, sys_delete_module)
+
+/* kernel/posix-timers.c */
+#define __NR_timer_create 107
+__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+#define __NR_timer_gettime 108
+__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
+#define __NR_timer_getoverrun 109
+__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#define __NR_timer_settime 110
+__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
+#define __NR_timer_delete 111
+__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#define __NR_clock_settime 112
+__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
+#define __NR_clock_gettime 113
+__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
+#define __NR_clock_getres 114
+__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
+#define __NR_clock_nanosleep 115
+__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
+ compat_sys_clock_nanosleep)
+
+/* kernel/printk.c */
+#define __NR_syslog 116
+__SYSCALL(__NR_syslog, sys_syslog)
+
+/* kernel/ptrace.c */
+#define __NR_ptrace 117
+__SYSCALL(__NR_ptrace, sys_ptrace)
+
+/* kernel/sched/core.c */
+#define __NR_sched_setparam 118
+__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
+#define __NR_sched_setscheduler 119
+__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
+#define __NR_sched_getscheduler 120
+__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
+#define __NR_sched_getparam 121
+__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
+#define __NR_sched_setaffinity 122
+__SC_COMP(__NR_sched_setaffinity, sys_sched_setaffinity, \
+ compat_sys_sched_setaffinity)
+#define __NR_sched_getaffinity 123
+__SC_COMP(__NR_sched_getaffinity, sys_sched_getaffinity, \
+ compat_sys_sched_getaffinity)
+#define __NR_sched_yield 124
+__SYSCALL(__NR_sched_yield, sys_sched_yield)
+#define __NR_sched_get_priority_max 125
+__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
+#define __NR_sched_get_priority_min 126
+__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#define __NR_sched_rr_get_interval 127
+__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
+ compat_sys_sched_rr_get_interval)
+
+/* kernel/signal.c */
+#define __NR_restart_syscall 128
+__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
+#define __NR_kill 129
+__SYSCALL(__NR_kill, sys_kill)
+#define __NR_tkill 130
+__SYSCALL(__NR_tkill, sys_tkill)
+#define __NR_tgkill 131
+__SYSCALL(__NR_tgkill, sys_tgkill)
+#define __NR_sigaltstack 132
+__SC_COMP(__NR_sigaltstack, sys_sigaltstack, compat_sys_sigaltstack)
+#define __NR_rt_sigsuspend 133
+__SC_COMP(__NR_rt_sigsuspend, sys_rt_sigsuspend, compat_sys_rt_sigsuspend)
+#define __NR_rt_sigaction 134
+__SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
+#define __NR_rt_sigprocmask 135
+__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
+#define __NR_rt_sigpending 136
+__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+#define __NR_rt_sigtimedwait 137
+__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
+ compat_sys_rt_sigtimedwait)
+#define __NR_rt_sigqueueinfo 138
+__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
+ compat_sys_rt_sigqueueinfo)
+#define __NR_rt_sigreturn 139
+__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
+
+/* kernel/sys.c */
+#define __NR_setpriority 140
+__SYSCALL(__NR_setpriority, sys_setpriority)
+#define __NR_getpriority 141
+__SYSCALL(__NR_getpriority, sys_getpriority)
+#define __NR_reboot 142
+__SYSCALL(__NR_reboot, sys_reboot)
+#define __NR_setregid 143
+__SYSCALL(__NR_setregid, sys_setregid)
+#define __NR_setgid 144
+__SYSCALL(__NR_setgid, sys_setgid)
+#define __NR_setreuid 145
+__SYSCALL(__NR_setreuid, sys_setreuid)
+#define __NR_setuid 146
+__SYSCALL(__NR_setuid, sys_setuid)
+#define __NR_setresuid 147
+__SYSCALL(__NR_setresuid, sys_setresuid)
+#define __NR_getresuid 148
+__SYSCALL(__NR_getresuid, sys_getresuid)
+#define __NR_setresgid 149
+__SYSCALL(__NR_setresgid, sys_setresgid)
+#define __NR_getresgid 150
+__SYSCALL(__NR_getresgid, sys_getresgid)
+#define __NR_setfsuid 151
+__SYSCALL(__NR_setfsuid, sys_setfsuid)
+#define __NR_setfsgid 152
+__SYSCALL(__NR_setfsgid, sys_setfsgid)
+#define __NR_times 153
+__SC_COMP(__NR_times, sys_times, compat_sys_times)
+#define __NR_setpgid 154
+__SYSCALL(__NR_setpgid, sys_setpgid)
+#define __NR_getpgid 155
+__SYSCALL(__NR_getpgid, sys_getpgid)
+#define __NR_getsid 156
+__SYSCALL(__NR_getsid, sys_getsid)
+#define __NR_setsid 157
+__SYSCALL(__NR_setsid, sys_setsid)
+#define __NR_getgroups 158
+__SYSCALL(__NR_getgroups, sys_getgroups)
+#define __NR_setgroups 159
+__SYSCALL(__NR_setgroups, sys_setgroups)
+#define __NR_uname 160
+__SYSCALL(__NR_uname, sys_newuname)
+#define __NR_sethostname 161
+__SYSCALL(__NR_sethostname, sys_sethostname)
+#define __NR_setdomainname 162
+__SYSCALL(__NR_setdomainname, sys_setdomainname)
+#define __NR_getrlimit 163
+__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
+#define __NR_setrlimit 164
+__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
+#define __NR_getrusage 165
+__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
+#define __NR_umask 166
+__SYSCALL(__NR_umask, sys_umask)
+#define __NR_prctl 167
+__SYSCALL(__NR_prctl, sys_prctl)
+#define __NR_getcpu 168
+__SYSCALL(__NR_getcpu, sys_getcpu)
+
+/* kernel/time.c */
+#define __NR_gettimeofday 169
+__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
+#define __NR_settimeofday 170
+__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
+#define __NR_adjtimex 171
+__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
+
+/* kernel/timer.c */
+#define __NR_getpid 172
+__SYSCALL(__NR_getpid, sys_getpid)
+#define __NR_getppid 173
+__SYSCALL(__NR_getppid, sys_getppid)
+#define __NR_getuid 174
+__SYSCALL(__NR_getuid, sys_getuid)
+#define __NR_geteuid 175
+__SYSCALL(__NR_geteuid, sys_geteuid)
+#define __NR_getgid 176
+__SYSCALL(__NR_getgid, sys_getgid)
+#define __NR_getegid 177
+__SYSCALL(__NR_getegid, sys_getegid)
+#define __NR_gettid 178
+__SYSCALL(__NR_gettid, sys_gettid)
+#define __NR_sysinfo 179
+__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
+
+/* ipc/mqueue.c */
+#define __NR_mq_open 180
+__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
+#define __NR_mq_unlink 181
+__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#define __NR_mq_timedsend 182
+__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
+#define __NR_mq_timedreceive 183
+__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
+ compat_sys_mq_timedreceive)
+#define __NR_mq_notify 184
+__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
+#define __NR_mq_getsetattr 185
+__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
+
+/* ipc/msg.c */
+#define __NR_msgget 186
+__SYSCALL(__NR_msgget, sys_msgget)
+#define __NR_msgctl 187
+__SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
+#define __NR_msgrcv 188
+__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
+#define __NR_msgsnd 189
+__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
+
+/* ipc/sem.c */
+#define __NR_semget 190
+__SYSCALL(__NR_semget, sys_semget)
+#define __NR_semctl 191
+__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+#define __NR_semtimedop 192
+__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
+#define __NR_semop 193
+__SYSCALL(__NR_semop, sys_semop)
+
+/* ipc/shm.c */
+#define __NR_shmget 194
+__SYSCALL(__NR_shmget, sys_shmget)
+#define __NR_shmctl 195
+__SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
+#define __NR_shmat 196
+__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
+#define __NR_shmdt 197
+__SYSCALL(__NR_shmdt, sys_shmdt)
+
+/* net/socket.c */
+#define __NR_socket 198
+__SYSCALL(__NR_socket, sys_socket)
+#define __NR_socketpair 199
+__SYSCALL(__NR_socketpair, sys_socketpair)
+#define __NR_bind 200
+__SYSCALL(__NR_bind, sys_bind)
+#define __NR_listen 201
+__SYSCALL(__NR_listen, sys_listen)
+#define __NR_accept 202
+__SYSCALL(__NR_accept, sys_accept)
+#define __NR_connect 203
+__SYSCALL(__NR_connect, sys_connect)
+#define __NR_getsockname 204
+__SYSCALL(__NR_getsockname, sys_getsockname)
+#define __NR_getpeername 205
+__SYSCALL(__NR_getpeername, sys_getpeername)
+#define __NR_sendto 206
+__SYSCALL(__NR_sendto, sys_sendto)
+#define __NR_recvfrom 207
+__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
+#define __NR_setsockopt 208
+__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
+#define __NR_getsockopt 209
+__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
+#define __NR_shutdown 210
+__SYSCALL(__NR_shutdown, sys_shutdown)
+#define __NR_sendmsg 211
+__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
+#define __NR_recvmsg 212
+__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
+
+/* mm/filemap.c */
+#define __NR_readahead 213
+__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
+
+/* mm/nommu.c, also with MMU */
+#define __NR_brk 214
+__SYSCALL(__NR_brk, sys_brk)
+#define __NR_munmap 215
+__SYSCALL(__NR_munmap, sys_munmap)
+#define __NR_mremap 216
+__SYSCALL(__NR_mremap, sys_mremap)
+
+/* security/keys/keyctl.c */
+#define __NR_add_key 217
+__SYSCALL(__NR_add_key, sys_add_key)
+#define __NR_request_key 218
+__SYSCALL(__NR_request_key, sys_request_key)
+#define __NR_keyctl 219
+__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
+
+/* arch/example/kernel/sys_example.c */
+#define __NR_clone 220
+__SYSCALL(__NR_clone, sys_clone)
+#define __NR_execve 221
+__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
+
+#define __NR3264_mmap 222
+__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
+/* mm/fadvise.c */
+#define __NR3264_fadvise64 223
+__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
+
+/* mm/, CONFIG_MMU only */
+#ifndef __ARCH_NOMMU
+#define __NR_swapon 224
+__SYSCALL(__NR_swapon, sys_swapon)
+#define __NR_swapoff 225
+__SYSCALL(__NR_swapoff, sys_swapoff)
+#define __NR_mprotect 226
+__SYSCALL(__NR_mprotect, sys_mprotect)
+#define __NR_msync 227
+__SYSCALL(__NR_msync, sys_msync)
+#define __NR_mlock 228
+__SYSCALL(__NR_mlock, sys_mlock)
+#define __NR_munlock 229
+__SYSCALL(__NR_munlock, sys_munlock)
+#define __NR_mlockall 230
+__SYSCALL(__NR_mlockall, sys_mlockall)
+#define __NR_munlockall 231
+__SYSCALL(__NR_munlockall, sys_munlockall)
+#define __NR_mincore 232
+__SYSCALL(__NR_mincore, sys_mincore)
+#define __NR_madvise 233
+__SYSCALL(__NR_madvise, sys_madvise)
+#define __NR_remap_file_pages 234
+__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
+#define __NR_mbind 235
+__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
+#define __NR_get_mempolicy 236
+__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
+#define __NR_set_mempolicy 237
+__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
+#define __NR_migrate_pages 238
+__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
+#define __NR_move_pages 239
+__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
+#endif
+
+#define __NR_rt_tgsigqueueinfo 240
+__SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
+ compat_sys_rt_tgsigqueueinfo)
+#define __NR_perf_event_open 241
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
+#define __NR_accept4 242
+__SYSCALL(__NR_accept4, sys_accept4)
+#define __NR_recvmmsg 243
+__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
+
+/*
+ * Architectures may provide up to 16 syscalls of their own
+ * starting with this value.
+ */
+#define __NR_arch_specific_syscall 244
+
+#define __NR_wait4 260
+__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
+#define __NR_prlimit64 261
+__SYSCALL(__NR_prlimit64, sys_prlimit64)
+#define __NR_fanotify_init 262
+__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
+#define __NR_fanotify_mark 263
+__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
+#define __NR_name_to_handle_at 264
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at 265
+__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
+ compat_sys_open_by_handle_at)
+#define __NR_clock_adjtime 266
+__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
+#define __NR_syncfs 267
+__SYSCALL(__NR_syncfs, sys_syncfs)
+#define __NR_setns 268
+__SYSCALL(__NR_setns, sys_setns)
+#define __NR_sendmmsg 269
+__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+ compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+ compat_sys_process_vm_writev)
+#define __NR_kcmp 272
+__SYSCALL(__NR_kcmp, sys_kcmp)
+#define __NR_finit_module 273
+__SYSCALL(__NR_finit_module, sys_finit_module)
+#define __NR_sched_setattr 274
+__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
+#define __NR_sched_getattr 275
+__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+#define __NR_renameat2 276
+__SYSCALL(__NR_renameat2, sys_renameat2)
+#define __NR_seccomp 277
+__SYSCALL(__NR_seccomp, sys_seccomp)
+#define __NR_getrandom 278
+__SYSCALL(__NR_getrandom, sys_getrandom)
+#define __NR_memfd_create 279
+__SYSCALL(__NR_memfd_create, sys_memfd_create)
+#define __NR_bpf 280
+__SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 281
+__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 282
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 283
+__SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 284
+__SYSCALL(__NR_mlock2, sys_mlock2)
+#define __NR_copy_file_range 285
+__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 286
+__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 287
+__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
+#define __NR_pkey_mprotect 288
+__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
+#define __NR_pkey_alloc 289
+__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
+#define __NR_pkey_free 290
+__SYSCALL(__NR_pkey_free, sys_pkey_free)
+#define __NR_statx 291
+__SYSCALL(__NR_statx, sys_statx)
+#define __NR_io_pgetevents 292
+__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+
+#undef __NR_syscalls
+#define __NR_syscalls 293
+
+/*
+ * 32 bit systems traditionally used different
+ * syscalls for off_t and loff_t arguments, while
+ * 64 bit systems only need the off_t version.
+ * For new 32 bit platforms, there is no need to
+ * implement the old 32 bit off_t syscalls, so
+ * they take different names.
+ * Here we map the numbers so that both versions
+ * use the same syscall table layout.
+ */
+#if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT)
+#define __NR_fcntl __NR3264_fcntl
+#define __NR_statfs __NR3264_statfs
+#define __NR_fstatfs __NR3264_fstatfs
+#define __NR_truncate __NR3264_truncate
+#define __NR_ftruncate __NR3264_ftruncate
+#define __NR_lseek __NR3264_lseek
+#define __NR_sendfile __NR3264_sendfile
+#define __NR_newfstatat __NR3264_fstatat
+#define __NR_fstat __NR3264_fstat
+#define __NR_mmap __NR3264_mmap
+#define __NR_fadvise64 __NR3264_fadvise64
+#ifdef __NR3264_stat
+#define __NR_stat __NR3264_stat
+#define __NR_lstat __NR3264_lstat
+#endif
+#else
+#define __NR_fcntl64 __NR3264_fcntl
+#define __NR_statfs64 __NR3264_statfs
+#define __NR_fstatfs64 __NR3264_fstatfs
+#define __NR_truncate64 __NR3264_truncate
+#define __NR_ftruncate64 __NR3264_ftruncate
+#define __NR_llseek __NR3264_lseek
+#define __NR_sendfile64 __NR3264_sendfile
+#define __NR_fstatat64 __NR3264_fstatat
+#define __NR_fstat64 __NR3264_fstat
+#define __NR_mmap2 __NR3264_mmap
+#define __NR_fadvise64_64 __NR3264_fadvise64
+#ifdef __NR3264_stat
+#define __NR_stat64 __NR3264_stat
+#define __NR_lstat64 __NR3264_lstat
+#endif
+#endif
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index b7db3261c62d..66917a4eba27 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -75,6 +75,11 @@ struct bpf_lpm_trie_key {
__u8 data[0]; /* Arbitrary size */
};
+struct bpf_cgroup_storage_key {
+ __u64 cgroup_inode_id; /* cgroup inode id */
+ __u32 attach_type; /* program attach type */
+};
+
/* BPF syscall commands, see bpf(2) man-page for details. */
enum bpf_cmd {
BPF_MAP_CREATE,
@@ -120,6 +125,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_CPUMAP,
BPF_MAP_TYPE_XSKMAP,
BPF_MAP_TYPE_SOCKHASH,
+ BPF_MAP_TYPE_CGROUP_STORAGE,
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
};
enum bpf_prog_type {
@@ -144,6 +151,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_PROG_TYPE_LWT_SEG6LOCAL,
BPF_PROG_TYPE_LIRC_MODE2,
+ BPF_PROG_TYPE_SK_REUSEPORT,
};
enum bpf_attach_type {
@@ -1371,6 +1379,20 @@ union bpf_attr {
* A 8-byte long non-decreasing number on success, or 0 if the
* socket field is missing inside *skb*.
*
+ * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
+ * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
* u32 bpf_get_socket_uid(struct sk_buff *skb)
* Return
* The owner UID of the socket associated to *skb*. If the socket
@@ -1826,7 +1848,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
- * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -1877,7 +1899,7 @@ union bpf_attr {
* * < 0 if any input argument is invalid
* * 0 on success (packet is forwarded, nexthop neighbor exists)
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
- * * packet is not forwarded or needs assist from full stack
+ * packet is not forwarded or needs assist from full stack
*
* int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
* Description
@@ -2033,7 +2055,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2053,7 +2074,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2073,10 +2093,54 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
+ *
+ * void* get_local_storage(void *map, u64 flags)
+ * Description
+ * Get the pointer to the local storage area.
+ * The type and the size of the local storage is defined
+ * by the *map* argument.
+ * The *flags* meaning is specific for each map type,
+ * and has to be 0 for cgroup local storage.
+ *
+ * Depending on the bpf program type, a local storage area
+ * can be shared between multiple instances of the bpf program,
+ * running simultaneously.
+ *
+ * A user should care about the synchronization by himself.
+ * For example, by using the BPF_STX_XADD instruction to alter
+ * the shared data.
+ * Return
+ * Pointer to the local storage area.
+ *
+ * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
+ * Description
+ * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
+ * It checks the selected sk is matching the incoming
+ * request in the skb.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2159,7 +2223,10 @@ union bpf_attr {
FN(rc_repeat), \
FN(rc_keydown), \
FN(skb_cgroup_id), \
- FN(get_current_cgroup_id),
+ FN(get_current_cgroup_id), \
+ FN(get_local_storage), \
+ FN(sk_select_reuseport), \
+ FN(skb_ancestor_cgroup_id),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2376,6 +2443,30 @@ struct sk_msg_md {
__u32 local_port; /* stored in host byte order */
};
+struct sk_reuseport_md {
+ /*
+ * Start of directly accessible data. It begins from
+ * the tcp/udp header.
+ */
+ void *data;
+ void *data_end; /* End of directly accessible data */
+ /*
+ * Total length of packet (starting from the tcp/udp header).
+ * Note that the directly accessible bytes (data_end - data)
+ * could be less than this "len". Those bytes could be
+ * indirectly read by a helper "bpf_skb_load_bytes()".
+ */
+ __u32 len;
+ /*
+ * Eth protocol in the mac header (network byte order). e.g.
+ * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
+ */
+ __u32 eth_protocol;
+ __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
+ __u32 bind_inany; /* Is sock bound to an INANY address? */
+ __u32 hash; /* A hash of the packet 4 tuples */
+};
+
#define BPF_TAG_SIZE 8
struct bpf_prog_info {
@@ -2557,6 +2648,9 @@ enum {
* Arg1: old_state
* Arg2: new_state
*/
+ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
+ * socket transition to LISTEN state.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
new file mode 100644
index 000000000000..48e8a225b985
--- /dev/null
+++ b/tools/include/uapi/linux/in.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions of the Internet Protocol.
+ *
+ * Version: @(#)in.h 1.0.1 04/21/93
+ *
+ * Authors: Original taken from the GNU Project <netinet/in.h> file.
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_LINUX_IN_H
+#define _UAPI_LINUX_IN_H
+
+#include <linux/types.h>
+#include <linux/libc-compat.h>
+#include <linux/socket.h>
+
+#if __UAPI_DEF_IN_IPPROTO
+/* Standard well-defined IP protocols. */
+enum {
+ IPPROTO_IP = 0, /* Dummy protocol for TCP */
+#define IPPROTO_IP IPPROTO_IP
+ IPPROTO_ICMP = 1, /* Internet Control Message Protocol */
+#define IPPROTO_ICMP IPPROTO_ICMP
+ IPPROTO_IGMP = 2, /* Internet Group Management Protocol */
+#define IPPROTO_IGMP IPPROTO_IGMP
+ IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */
+#define IPPROTO_IPIP IPPROTO_IPIP
+ IPPROTO_TCP = 6, /* Transmission Control Protocol */
+#define IPPROTO_TCP IPPROTO_TCP
+ IPPROTO_EGP = 8, /* Exterior Gateway Protocol */
+#define IPPROTO_EGP IPPROTO_EGP
+ IPPROTO_PUP = 12, /* PUP protocol */
+#define IPPROTO_PUP IPPROTO_PUP
+ IPPROTO_UDP = 17, /* User Datagram Protocol */
+#define IPPROTO_UDP IPPROTO_UDP
+ IPPROTO_IDP = 22, /* XNS IDP protocol */
+#define IPPROTO_IDP IPPROTO_IDP
+ IPPROTO_TP = 29, /* SO Transport Protocol Class 4 */
+#define IPPROTO_TP IPPROTO_TP
+ IPPROTO_DCCP = 33, /* Datagram Congestion Control Protocol */
+#define IPPROTO_DCCP IPPROTO_DCCP
+ IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */
+#define IPPROTO_IPV6 IPPROTO_IPV6
+ IPPROTO_RSVP = 46, /* RSVP Protocol */
+#define IPPROTO_RSVP IPPROTO_RSVP
+ IPPROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */
+#define IPPROTO_GRE IPPROTO_GRE
+ IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */
+#define IPPROTO_ESP IPPROTO_ESP
+ IPPROTO_AH = 51, /* Authentication Header protocol */
+#define IPPROTO_AH IPPROTO_AH
+ IPPROTO_MTP = 92, /* Multicast Transport Protocol */
+#define IPPROTO_MTP IPPROTO_MTP
+ IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
+#define IPPROTO_BEETPH IPPROTO_BEETPH
+ IPPROTO_ENCAP = 98, /* Encapsulation Header */
+#define IPPROTO_ENCAP IPPROTO_ENCAP
+ IPPROTO_PIM = 103, /* Protocol Independent Multicast */
+#define IPPROTO_PIM IPPROTO_PIM
+ IPPROTO_COMP = 108, /* Compression Header Protocol */
+#define IPPROTO_COMP IPPROTO_COMP
+ IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
+#define IPPROTO_SCTP IPPROTO_SCTP
+ IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
+#define IPPROTO_UDPLITE IPPROTO_UDPLITE
+ IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */
+#define IPPROTO_MPLS IPPROTO_MPLS
+ IPPROTO_RAW = 255, /* Raw IP packets */
+#define IPPROTO_RAW IPPROTO_RAW
+ IPPROTO_MAX
+};
+#endif
+
+#if __UAPI_DEF_IN_ADDR
+/* Internet address. */
+struct in_addr {
+ __be32 s_addr;
+};
+#endif
+
+#define IP_TOS 1
+#define IP_TTL 2
+#define IP_HDRINCL 3
+#define IP_OPTIONS 4
+#define IP_ROUTER_ALERT 5
+#define IP_RECVOPTS 6
+#define IP_RETOPTS 7
+#define IP_PKTINFO 8
+#define IP_PKTOPTIONS 9
+#define IP_MTU_DISCOVER 10
+#define IP_RECVERR 11
+#define IP_RECVTTL 12
+#define IP_RECVTOS 13
+#define IP_MTU 14
+#define IP_FREEBIND 15
+#define IP_IPSEC_POLICY 16
+#define IP_XFRM_POLICY 17
+#define IP_PASSSEC 18
+#define IP_TRANSPARENT 19
+
+/* BSD compatibility */
+#define IP_RECVRETOPTS IP_RETOPTS
+
+/* TProxy original addresses */
+#define IP_ORIGDSTADDR 20
+#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
+
+#define IP_MINTTL 21
+#define IP_NODEFRAG 22
+#define IP_CHECKSUM 23
+#define IP_BIND_ADDRESS_NO_PORT 24
+#define IP_RECVFRAGSIZE 25
+
+/* IP_MTU_DISCOVER values */
+#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
+#define IP_PMTUDISC_WANT 1 /* Use per route hints */
+#define IP_PMTUDISC_DO 2 /* Always DF */
+#define IP_PMTUDISC_PROBE 3 /* Ignore dst pmtu */
+/* Always use interface mtu (ignores dst pmtu) but don't set DF flag.
+ * Also incoming ICMP frag_needed notifications will be ignored on
+ * this socket to prevent accepting spoofed ones.
+ */
+#define IP_PMTUDISC_INTERFACE 4
+/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+ * fragmented if they exeed the interface mtu
+ */
+#define IP_PMTUDISC_OMIT 5
+
+#define IP_MULTICAST_IF 32
+#define IP_MULTICAST_TTL 33
+#define IP_MULTICAST_LOOP 34
+#define IP_ADD_MEMBERSHIP 35
+#define IP_DROP_MEMBERSHIP 36
+#define IP_UNBLOCK_SOURCE 37
+#define IP_BLOCK_SOURCE 38
+#define IP_ADD_SOURCE_MEMBERSHIP 39
+#define IP_DROP_SOURCE_MEMBERSHIP 40
+#define IP_MSFILTER 41
+#define MCAST_JOIN_GROUP 42
+#define MCAST_BLOCK_SOURCE 43
+#define MCAST_UNBLOCK_SOURCE 44
+#define MCAST_LEAVE_GROUP 45
+#define MCAST_JOIN_SOURCE_GROUP 46
+#define MCAST_LEAVE_SOURCE_GROUP 47
+#define MCAST_MSFILTER 48
+#define IP_MULTICAST_ALL 49
+#define IP_UNICAST_IF 50
+
+#define MCAST_EXCLUDE 0
+#define MCAST_INCLUDE 1
+
+/* These need to appear somewhere around here */
+#define IP_DEFAULT_MULTICAST_TTL 1
+#define IP_DEFAULT_MULTICAST_LOOP 1
+
+/* Request struct for multicast socket ops */
+
+#if __UAPI_DEF_IP_MREQ
+struct ip_mreq {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+
+struct ip_mreqn {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_address; /* local IP address of interface */
+ int imr_ifindex; /* Interface index */
+};
+
+struct ip_mreq_source {
+ __be32 imr_multiaddr;
+ __be32 imr_interface;
+ __be32 imr_sourceaddr;
+};
+
+struct ip_msfilter {
+ __be32 imsf_multiaddr;
+ __be32 imsf_interface;
+ __u32 imsf_fmode;
+ __u32 imsf_numsrc;
+ __be32 imsf_slist[1];
+};
+
+#define IP_MSFILTER_SIZE(numsrc) \
+ (sizeof(struct ip_msfilter) - sizeof(__u32) \
+ + (numsrc) * sizeof(__u32))
+
+struct group_req {
+ __u32 gr_interface; /* interface index */
+ struct __kernel_sockaddr_storage gr_group; /* group address */
+};
+
+struct group_source_req {
+ __u32 gsr_interface; /* interface index */
+ struct __kernel_sockaddr_storage gsr_group; /* group address */
+ struct __kernel_sockaddr_storage gsr_source; /* source address */
+};
+
+struct group_filter {
+ __u32 gf_interface; /* interface index */
+ struct __kernel_sockaddr_storage gf_group; /* multicast address */
+ __u32 gf_fmode; /* filter mode */
+ __u32 gf_numsrc; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+};
+
+#define GROUP_FILTER_SIZE(numsrc) \
+ (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \
+ + (numsrc) * sizeof(struct __kernel_sockaddr_storage))
+#endif
+
+#if __UAPI_DEF_IN_PKTINFO
+struct in_pktinfo {
+ int ipi_ifindex;
+ struct in_addr ipi_spec_dst;
+ struct in_addr ipi_addr;
+};
+#endif
+
+/* Structure describing an Internet (IP) socket address. */
+#if __UAPI_DEF_SOCKADDR_IN
+#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
+struct sockaddr_in {
+ __kernel_sa_family_t sin_family; /* Address family */
+ __be16 sin_port; /* Port number */
+ struct in_addr sin_addr; /* Internet address */
+
+ /* Pad to size of `struct sockaddr'. */
+ unsigned char __pad[__SOCK_SIZE__ - sizeof(short int) -
+ sizeof(unsigned short int) - sizeof(struct in_addr)];
+};
+#define sin_zero __pad /* for BSD UNIX comp. -FvK */
+#endif
+
+#if __UAPI_DEF_IN_CLASS
+/*
+ * Definitions of the bits in an Internet address integer.
+ * On subnets, host and network parts are found according
+ * to the subnet mask, not these masks.
+ */
+#define IN_CLASSA(a) ((((long int) (a)) & 0x80000000) == 0)
+#define IN_CLASSA_NET 0xff000000
+#define IN_CLASSA_NSHIFT 24
+#define IN_CLASSA_HOST (0xffffffff & ~IN_CLASSA_NET)
+#define IN_CLASSA_MAX 128
+
+#define IN_CLASSB(a) ((((long int) (a)) & 0xc0000000) == 0x80000000)
+#define IN_CLASSB_NET 0xffff0000
+#define IN_CLASSB_NSHIFT 16
+#define IN_CLASSB_HOST (0xffffffff & ~IN_CLASSB_NET)
+#define IN_CLASSB_MAX 65536
+
+#define IN_CLASSC(a) ((((long int) (a)) & 0xe0000000) == 0xc0000000)
+#define IN_CLASSC_NET 0xffffff00
+#define IN_CLASSC_NSHIFT 8
+#define IN_CLASSC_HOST (0xffffffff & ~IN_CLASSC_NET)
+
+#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
+#define IN_MULTICAST(a) IN_CLASSD(a)
+#define IN_MULTICAST_NET 0xF0000000
+
+#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define IN_BADCLASS(a) IN_EXPERIMENTAL((a))
+
+/* Address to accept any incoming messages. */
+#define INADDR_ANY ((unsigned long int) 0x00000000)
+
+/* Address to send to all hosts. */
+#define INADDR_BROADCAST ((unsigned long int) 0xffffffff)
+
+/* Address indicating an error return. */
+#define INADDR_NONE ((unsigned long int) 0xffffffff)
+
+/* Network number for local host loopback. */
+#define IN_LOOPBACKNET 127
+
+/* Address to loopback in software to local host. */
+#define INADDR_LOOPBACK 0x7f000001 /* 127.0.0.1 */
+#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
+
+/* Defines for Multicast INADDR */
+#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
+#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
+#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
+#endif
+
+/* <asm/byteorder.h> contains the htonl type stuff.. */
+#include <asm/byteorder.h>
+
+
+#endif /* _UAPI_LINUX_IN_H */
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 6070e655042d..13a861135127 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 5390e7725e43..d49902e818b5 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -66,7 +66,7 @@ ifndef VERBOSE
endif
FEATURE_USER = .libbpf
-FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf
+FEATURE_TESTS = libelf libelf-mmap bpf reallocarray
FEATURE_DISPLAY = libelf bpf
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi -I$(srctree)/tools/perf
@@ -116,8 +116,8 @@ ifeq ($(feature-libelf-mmap), 1)
override CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
endif
-ifeq ($(feature-libelf-getphdrnum), 1)
- override CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
+ifeq ($(feature-reallocarray), 0)
+ override CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
endif
# Append required CFLAGS
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 9ddc89dae962..60aa4ca8b2c5 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -92,6 +92,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
attr.btf_key_type_id = create_attr->btf_key_type_id;
attr.btf_value_type_id = create_attr->btf_value_type_id;
attr.map_ifindex = create_attr->map_ifindex;
+ attr.inner_map_fd = create_attr->inner_map_fd;
return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 0639a30a457d..6f38164b2618 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -39,6 +39,7 @@ struct bpf_create_map_attr {
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 map_ifindex;
+ __u32 inner_map_fd;
};
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr);
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index c36a3a76986a..cf94b0770522 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -16,6 +16,11 @@
#define BTF_MAX_NR_TYPES 65535
+#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
+ ((k) == BTF_KIND_VOLATILE) || \
+ ((k) == BTF_KIND_CONST) || \
+ ((k) == BTF_KIND_RESTRICT))
+
static struct btf_type btf_void;
struct btf {
@@ -32,14 +37,6 @@ struct btf {
int fd;
};
-static const char *btf_name_by_offset(const struct btf *btf, __u32 offset)
-{
- if (offset < btf->hdr->str_len)
- return &btf->strings[offset];
- else
- return NULL;
-}
-
static int btf_add_type(struct btf *btf, struct btf_type *t)
{
if (btf->types_size - btf->nr_types < 2) {
@@ -269,6 +266,26 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
return nelems * size;
}
+int btf__resolve_type(const struct btf *btf, __u32 type_id)
+{
+ const struct btf_type *t;
+ int depth = 0;
+
+ t = btf__type_by_id(btf, type_id);
+ while (depth < MAX_RESOLVE_DEPTH &&
+ !btf_type_is_void_or_null(t) &&
+ IS_MODIFIER(BTF_INFO_KIND(t->info))) {
+ type_id = t->type;
+ t = btf__type_by_id(btf, type_id);
+ depth++;
+ }
+
+ if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
+ return -EINVAL;
+
+ return type_id;
+}
+
__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
{
__u32 i;
@@ -278,7 +295,7 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
for (i = 1; i <= btf->nr_types; i++) {
const struct btf_type *t = btf->types[i];
- const char *name = btf_name_by_offset(btf, t->name_off);
+ const char *name = btf__name_by_offset(btf, t->name_off);
if (name && !strcmp(type_name, name))
return i;
@@ -368,3 +385,11 @@ int btf__fd(const struct btf *btf)
{
return btf->fd;
}
+
+const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
+{
+ if (offset < btf->hdr->str_len)
+ return &btf->strings[offset];
+ else
+ return NULL;
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index caac3a404dc5..4897e0724d4e 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -19,6 +19,8 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
__s32 btf__find_by_name(const struct btf *btf, const char *type_name);
const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id);
__s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
+int btf__resolve_type(const struct btf *btf, __u32 type_id);
int btf__fd(const struct btf *btf);
+const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
#endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 1aafdbe827fe..2abd0f112627 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -22,6 +22,7 @@
* License along with this program; if not, see <http://www.gnu.org/licenses>
*/
+#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
@@ -42,6 +43,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
+#include <tools/libc_compat.h>
#include <libelf.h>
#include <gelf.h>
@@ -96,54 +98,6 @@ void libbpf_set_print(libbpf_print_fn_t warn,
#define STRERR_BUFSIZE 128
-#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
-#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
-#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
-
-static const char *libbpf_strerror_table[NR_ERRNO] = {
- [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
- [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
- [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
- [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
- [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
- [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
- [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
- [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
- [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
- [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
- [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
- [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
-};
-
-int libbpf_strerror(int err, char *buf, size_t size)
-{
- if (!buf || !size)
- return -1;
-
- err = err > 0 ? err : -err;
-
- if (err < __LIBBPF_ERRNO__START) {
- int ret;
-
- ret = strerror_r(err, buf, size);
- buf[size - 1] = '\0';
- return ret;
- }
-
- if (err < __LIBBPF_ERRNO__END) {
- const char *msg;
-
- msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
- snprintf(buf, size, "%s", msg);
- buf[size - 1] = '\0';
- return 0;
- }
-
- snprintf(buf, size, "Unknown libbpf error %d", err);
- buf[size - 1] = '\0';
- return -1;
-}
-
#define CHECK_ERR(action, err, out) do { \
err = action; \
if (err) \
@@ -235,6 +189,7 @@ struct bpf_object {
size_t nr_maps;
bool loaded;
+ bool has_pseudo_calls;
/*
* Information when doing elf related work. Only valid if fd
@@ -369,7 +324,7 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
progs = obj->programs;
nr_progs = obj->nr_programs;
- progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
+ progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
if (!progs) {
/*
* In this case the original obj->programs
@@ -401,10 +356,6 @@ bpf_object__init_prog_names(struct bpf_object *obj)
const char *name = NULL;
prog = &obj->programs[pi];
- if (prog->idx == obj->efile.text_shndx) {
- name = ".text";
- goto skip_search;
- }
for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
si++) {
@@ -427,12 +378,15 @@ bpf_object__init_prog_names(struct bpf_object *obj)
}
}
+ if (!name && prog->idx == obj->efile.text_shndx)
+ name = ".text";
+
if (!name) {
pr_warning("failed to find sym for prog %s\n",
prog->section_name);
return -EINVAL;
}
-skip_search:
+
prog->name = strdup(name);
if (!prog->name) {
pr_warning("failed to allocate memory for prog sym %s\n",
@@ -514,8 +468,10 @@ static int bpf_object__elf_init(struct bpf_object *obj)
} else {
obj->efile.fd = open(obj->path, O_RDONLY);
if (obj->efile.fd < 0) {
- pr_warning("failed to open %s: %s\n", obj->path,
- strerror(errno));
+ char errmsg[STRERR_BUFSIZE];
+ char *cp = strerror_r(errno, errmsg, sizeof(errmsg));
+
+ pr_warning("failed to open %s: %s\n", obj->path, cp);
return -errno;
}
@@ -854,10 +810,11 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
data->d_size, name, idx);
if (err) {
char errmsg[STRERR_BUFSIZE];
+ char *cp = strerror_r(-err, errmsg,
+ sizeof(errmsg));
- strerror_r(-err, errmsg, sizeof(errmsg));
pr_warning("failed to alloc program %s (%s): %s",
- name, obj->path, errmsg);
+ name, obj->path, cp);
}
} else if (sh.sh_type == SHT_REL) {
void *reloc = obj->efile.reloc;
@@ -871,8 +828,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
continue;
}
- reloc = realloc(reloc,
- sizeof(*obj->efile.reloc) * nr_reloc);
+ reloc = reallocarray(reloc, nr_reloc,
+ sizeof(*obj->efile.reloc));
if (!reloc) {
pr_warning("realloc failed\n");
err = -ENOMEM;
@@ -920,6 +877,18 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
return NULL;
}
+struct bpf_program *
+bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
+{
+ struct bpf_program *pos;
+
+ bpf_object__for_each_program(pos, obj) {
+ if (pos->section_name && !strcmp(pos->section_name, title))
+ return pos;
+ }
+ return NULL;
+}
+
static int
bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
Elf_Data *data, struct bpf_object *obj)
@@ -982,6 +951,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
prog->reloc_desc[i].type = RELO_CALL;
prog->reloc_desc[i].insn_idx = insn_idx;
prog->reloc_desc[i].text_off = sym.st_value;
+ obj->has_pseudo_calls = true;
continue;
}
@@ -1085,6 +1055,53 @@ static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
return 0;
}
+int bpf_map__reuse_fd(struct bpf_map *map, int fd)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int new_fd, err;
+ char *new_name;
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ if (err)
+ return err;
+
+ new_name = strdup(info.name);
+ if (!new_name)
+ return -errno;
+
+ new_fd = open("/", O_RDONLY | O_CLOEXEC);
+ if (new_fd < 0)
+ goto err_free_new_name;
+
+ new_fd = dup3(fd, new_fd, O_CLOEXEC);
+ if (new_fd < 0)
+ goto err_close_new_fd;
+
+ err = zclose(map->fd);
+ if (err)
+ goto err_close_new_fd;
+ free(map->name);
+
+ map->fd = new_fd;
+ map->name = new_name;
+ map->def.type = info.type;
+ map->def.key_size = info.key_size;
+ map->def.value_size = info.value_size;
+ map->def.max_entries = info.max_entries;
+ map->def.map_flags = info.map_flags;
+ map->btf_key_type_id = info.btf_key_type_id;
+ map->btf_value_type_id = info.btf_value_type_id;
+
+ return 0;
+
+err_close_new_fd:
+ close(new_fd);
+err_free_new_name:
+ free(new_name);
+ return -errno;
+}
+
static int
bpf_object__create_maps(struct bpf_object *obj)
{
@@ -1095,8 +1112,15 @@ bpf_object__create_maps(struct bpf_object *obj)
for (i = 0; i < obj->nr_maps; i++) {
struct bpf_map *map = &obj->maps[i];
struct bpf_map_def *def = &map->def;
+ char *cp, errmsg[STRERR_BUFSIZE];
int *pfd = &map->fd;
+ if (map->fd >= 0) {
+ pr_debug("skip map create (preset) %s: fd=%d\n",
+ map->name, map->fd);
+ continue;
+ }
+
create_attr.name = map->name;
create_attr.map_ifindex = map->map_ifindex;
create_attr.map_type = def->type;
@@ -1116,8 +1140,9 @@ bpf_object__create_maps(struct bpf_object *obj)
*pfd = bpf_create_map_xattr(&create_attr);
if (*pfd < 0 && create_attr.btf_key_type_id) {
+ cp = strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
- map->name, strerror(errno), errno);
+ map->name, cp, errno);
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
@@ -1130,9 +1155,9 @@ bpf_object__create_maps(struct bpf_object *obj)
size_t j;
err = *pfd;
+ cp = strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("failed to create map (name: '%s'): %s\n",
- map->name,
- strerror(errno));
+ map->name, cp);
for (j = 0; j < i; j++)
zclose(obj->maps[j].fd);
return err;
@@ -1167,7 +1192,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
return -LIBBPF_ERRNO__RELOC;
}
new_cnt = prog->insns_cnt + text->insns_cnt;
- new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
+ new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
if (!new_insn) {
pr_warning("oom in prog realloc\n");
return -ENOMEM;
@@ -1284,6 +1309,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
char *license, u32 kern_version, int *pfd, int prog_ifindex)
{
struct bpf_load_program_attr load_attr;
+ char *cp, errmsg[STRERR_BUFSIZE];
char *log_buf;
int ret;
@@ -1313,7 +1339,8 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
}
ret = -LIBBPF_ERRNO__LOAD;
- pr_warning("load bpf program failed: %s\n", strerror(errno));
+ cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warning("load bpf program failed: %s\n", cp);
if (log_buf && log_buf[0] != '\0') {
ret = -LIBBPF_ERRNO__VERIFY;
@@ -1431,6 +1458,12 @@ out:
return err;
}
+static bool bpf_program__is_function_storage(struct bpf_program *prog,
+ struct bpf_object *obj)
+{
+ return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
+}
+
static int
bpf_object__load_progs(struct bpf_object *obj)
{
@@ -1438,7 +1471,7 @@ bpf_object__load_progs(struct bpf_object *obj)
int err;
for (i = 0; i < obj->nr_programs; i++) {
- if (obj->programs[i].idx == obj->efile.text_shndx)
+ if (bpf_program__is_function_storage(&obj->programs[i], obj))
continue;
err = bpf_program__load(&obj->programs[i],
obj->license,
@@ -1468,6 +1501,7 @@ static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
case BPF_PROG_TYPE_SK_MSG:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_LIRC_MODE2:
+ case BPF_PROG_TYPE_SK_REUSEPORT:
return false;
case BPF_PROG_TYPE_UNSPEC:
case BPF_PROG_TYPE_KPROBE:
@@ -1518,15 +1552,26 @@ out:
return ERR_PTR(err);
}
-struct bpf_object *bpf_object__open(const char *path)
+struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
{
/* param validation */
- if (!path)
+ if (!attr->file)
return NULL;
- pr_debug("loading %s\n", path);
+ pr_debug("loading %s\n", attr->file);
- return __bpf_object__open(path, NULL, 0, true);
+ return __bpf_object__open(attr->file, NULL, 0,
+ bpf_prog_type__needs_kver(attr->prog_type));
+}
+
+struct bpf_object *bpf_object__open(const char *path)
+{
+ struct bpf_object_open_attr attr = {
+ .file = path,
+ .prog_type = BPF_PROG_TYPE_UNSPEC,
+ };
+
+ return bpf_object__open_xattr(&attr);
}
struct bpf_object *bpf_object__open_buffer(void *obj_buf,
@@ -1595,6 +1640,7 @@ out:
static int check_path(const char *path)
{
+ char *cp, errmsg[STRERR_BUFSIZE];
struct statfs st_fs;
char *dname, *dir;
int err = 0;
@@ -1608,7 +1654,8 @@ static int check_path(const char *path)
dir = dirname(dname);
if (statfs(dir, &st_fs)) {
- pr_warning("failed to statfs %s: %s\n", dir, strerror(errno));
+ cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warning("failed to statfs %s: %s\n", dir, cp);
err = -errno;
}
free(dname);
@@ -1624,6 +1671,7 @@ static int check_path(const char *path)
int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
int instance)
{
+ char *cp, errmsg[STRERR_BUFSIZE];
int err;
err = check_path(path);
@@ -1642,7 +1690,8 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
}
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
- pr_warning("failed to pin program: %s\n", strerror(errno));
+ cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warning("failed to pin program: %s\n", cp);
return -errno;
}
pr_debug("pinned program '%s'\n", path);
@@ -1652,13 +1701,16 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
static int make_dir(const char *path)
{
+ char *cp, errmsg[STRERR_BUFSIZE];
int err = 0;
if (mkdir(path, 0700) && errno != EEXIST)
err = -errno;
- if (err)
- pr_warning("failed to mkdir %s: %s\n", path, strerror(-err));
+ if (err) {
+ cp = strerror_r(-err, errmsg, sizeof(errmsg));
+ pr_warning("failed to mkdir %s: %s\n", path, cp);
+ }
return err;
}
@@ -1705,6 +1757,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
int bpf_map__pin(struct bpf_map *map, const char *path)
{
+ char *cp, errmsg[STRERR_BUFSIZE];
int err;
err = check_path(path);
@@ -1717,7 +1770,8 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
}
if (bpf_obj_pin(map->fd, path)) {
- pr_warning("failed to pin map: %s\n", strerror(errno));
+ cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warning("failed to pin map: %s\n", cp);
return -errno;
}
@@ -1863,8 +1917,8 @@ void *bpf_object__priv(struct bpf_object *obj)
return obj ? obj->priv : ERR_PTR(-EINVAL);
}
-struct bpf_program *
-bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
+static struct bpf_program *
+__bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
{
size_t idx;
@@ -1885,6 +1939,18 @@ bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
return &obj->programs[idx];
}
+struct bpf_program *
+bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
+{
+ struct bpf_program *prog = prev;
+
+ do {
+ prog = __bpf_program__next(prog, obj);
+ } while (prog && bpf_program__is_function_storage(prog, obj));
+
+ return prog;
+}
+
int bpf_program__set_priv(struct bpf_program *prog, void *priv,
bpf_program_clear_priv_t clear_priv)
{
@@ -1901,6 +1967,11 @@ void *bpf_program__priv(struct bpf_program *prog)
return prog ? prog->priv : ERR_PTR(-EINVAL);
}
+void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
+{
+ prog->prog_ifindex = ifindex;
+}
+
const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
{
const char *title;
@@ -1954,6 +2025,9 @@ int bpf_program__nth_fd(struct bpf_program *prog, int n)
{
int fd;
+ if (!prog)
+ return -EINVAL;
+
if (n >= prog->instances.nr || n < 0) {
pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
n, prog->section_name, prog->instances.nr);
@@ -2042,9 +2116,11 @@ static const struct {
BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
+ BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
+ BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2),
BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
@@ -2060,23 +2136,31 @@ static const struct {
#undef BPF_S_PROG_SEC
#undef BPF_SA_PROG_SEC
-static int bpf_program__identify_section(struct bpf_program *prog)
+int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type)
{
int i;
- if (!prog->section_name)
- goto err;
-
- for (i = 0; i < ARRAY_SIZE(section_names); i++)
- if (strncmp(prog->section_name, section_names[i].sec,
- section_names[i].len) == 0)
- return i;
+ if (!name)
+ return -EINVAL;
-err:
- pr_warning("failed to guess program type based on section name %s\n",
- prog->section_name);
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (strncmp(name, section_names[i].sec, section_names[i].len))
+ continue;
+ *prog_type = section_names[i].prog_type;
+ *expected_attach_type = section_names[i].expected_attach_type;
+ return 0;
+ }
+ return -EINVAL;
+}
- return -1;
+static int
+bpf_program__identify_section(struct bpf_program *prog,
+ enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type)
+{
+ return libbpf_prog_type_by_name(prog->section_name, prog_type,
+ expected_attach_type);
}
int bpf_map__fd(struct bpf_map *map)
@@ -2125,6 +2209,16 @@ void *bpf_map__priv(struct bpf_map *map)
return map ? map->priv : ERR_PTR(-EINVAL);
}
+bool bpf_map__is_offload_neutral(struct bpf_map *map)
+{
+ return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
+}
+
+void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
+{
+ map->map_ifindex = ifindex;
+}
+
struct bpf_map *
bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
{
@@ -2199,12 +2293,15 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_object **pobj, int *prog_fd)
{
+ struct bpf_object_open_attr open_attr = {
+ .file = attr->file,
+ .prog_type = attr->prog_type,
+ };
struct bpf_program *prog, *first_prog = NULL;
enum bpf_attach_type expected_attach_type;
enum bpf_prog_type prog_type;
struct bpf_object *obj;
struct bpf_map *map;
- int section_idx;
int err;
if (!attr)
@@ -2212,8 +2309,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
if (!attr->file)
return -EINVAL;
- obj = __bpf_object__open(attr->file, NULL, 0,
- bpf_prog_type__needs_kver(attr->prog_type));
+ obj = bpf_object__open_xattr(&open_attr);
if (IS_ERR_OR_NULL(obj))
return -ENOENT;
@@ -2226,26 +2322,27 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
prog->prog_ifindex = attr->ifindex;
expected_attach_type = attr->expected_attach_type;
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
- section_idx = bpf_program__identify_section(prog);
- if (section_idx < 0) {
+ err = bpf_program__identify_section(prog, &prog_type,
+ &expected_attach_type);
+ if (err < 0) {
+ pr_warning("failed to guess program type based on section name %s\n",
+ prog->section_name);
bpf_object__close(obj);
return -EINVAL;
}
- prog_type = section_names[section_idx].prog_type;
- expected_attach_type =
- section_names[section_idx].expected_attach_type;
}
bpf_program__set_type(prog, prog_type);
bpf_program__set_expected_attach_type(prog,
expected_attach_type);
- if (prog->idx != obj->efile.text_shndx && !first_prog)
+ if (!bpf_program__is_function_storage(prog, obj) && !first_prog)
first_prog = prog;
}
bpf_map__for_each(map, obj) {
- map->map_ifindex = attr->ifindex;
+ if (!bpf_map__is_offload_neutral(map))
+ map->map_ifindex = attr->ifindex;
}
if (!first_prog) {
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index b33ae02f7d0e..96c55fac54c3 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -66,7 +66,13 @@ void libbpf_set_print(libbpf_print_fn_t warn,
/* Hide internal to user */
struct bpf_object;
+struct bpf_object_open_attr {
+ const char *file;
+ enum bpf_prog_type prog_type;
+};
+
struct bpf_object *bpf_object__open(const char *path);
+struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr);
struct bpf_object *bpf_object__open_buffer(void *obj_buf,
size_t obj_buf_sz,
const char *name);
@@ -80,6 +86,9 @@ const char *bpf_object__name(struct bpf_object *obj);
unsigned int bpf_object__kversion(struct bpf_object *obj);
int bpf_object__btf_fd(const struct bpf_object *obj);
+struct bpf_program *
+bpf_object__find_program_by_title(struct bpf_object *obj, const char *title);
+
struct bpf_object *bpf_object__next(struct bpf_object *prev);
#define bpf_object__for_each_safe(pos, tmp) \
for ((pos) = bpf_object__next(NULL), \
@@ -92,6 +101,9 @@ int bpf_object__set_priv(struct bpf_object *obj, void *priv,
bpf_object_clear_priv_t clear_priv);
void *bpf_object__priv(struct bpf_object *prog);
+int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type);
+
/* Accessors of bpf_program */
struct bpf_program;
struct bpf_program *bpf_program__next(struct bpf_program *prog,
@@ -109,6 +121,7 @@ int bpf_program__set_priv(struct bpf_program *prog, void *priv,
bpf_program_clear_priv_t clear_priv);
void *bpf_program__priv(struct bpf_program *prog);
+void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex);
const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
@@ -251,6 +264,9 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
void *bpf_map__priv(struct bpf_map *map);
+int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+bool bpf_map__is_offload_neutral(struct bpf_map *map);
+void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
int bpf_map__pin(struct bpf_map *map, const char *path);
long libbpf_get_error(const void *ptr);
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
new file mode 100644
index 000000000000..d9ba851bd7f9
--- /dev/null
+++ b/tools/lib/bpf/libbpf_errno.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: LGPL-2.1
+
+/*
+ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ * Copyright (C) 2017 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "libbpf.h"
+
+#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
+#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
+#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
+
+static const char *libbpf_strerror_table[NR_ERRNO] = {
+ [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
+ [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
+ [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
+ [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
+ [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
+ [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
+ [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
+ [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
+ [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
+ [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
+ [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
+ [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
+};
+
+int libbpf_strerror(int err, char *buf, size_t size)
+{
+ if (!buf || !size)
+ return -1;
+
+ err = err > 0 ? err : -err;
+
+ if (err < __LIBBPF_ERRNO__START) {
+ int ret;
+
+ ret = strerror_r(err, buf, size);
+ buf[size - 1] = '\0';
+ return ret;
+ }
+
+ if (err < __LIBBPF_ERRNO__END) {
+ const char *msg;
+
+ msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
+ snprintf(buf, size, "%s", msg);
+ buf[size - 1] = '\0';
+ return 0;
+ }
+
+ snprintf(buf, size, "Unknown libbpf error %d", err);
+ buf[size - 1] = '\0';
+ return -1;
+}
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index 1b09f3175a1f..0cbd1ef8f86d 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -804,7 +804,7 @@ type of fence:
Second, some types of fence affect the way the memory subsystem
propagates stores. When a fence instruction is executed on CPU C:
- For each other CPU C', smb_wmb() forces all po-earlier stores
+ For each other CPU C', smp_wmb() forces all po-earlier stores
on C to propagate to C' before any po-later stores do.
For each other CPU C', any store which propagates to C before
diff --git a/tools/memory-model/Documentation/recipes.txt b/tools/memory-model/Documentation/recipes.txt
index ee4309a87fc4..af72700cc20a 100644
--- a/tools/memory-model/Documentation/recipes.txt
+++ b/tools/memory-model/Documentation/recipes.txt
@@ -126,7 +126,7 @@ However, it is not necessarily the case that accesses ordered by
locking will be seen as ordered by CPUs not holding that lock.
Consider this example:
- /* See Z6.0+pooncelock+pooncelock+pombonce.litmus. */
+ /* See Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus. */
void CPU0(void)
{
spin_lock(&mylock);
@@ -292,7 +292,7 @@ and to use smp_load_acquire() instead of smp_rmb(). However, the older
smp_wmb() and smp_rmb() APIs are still heavily used, so it is important
to understand their use cases. The general approach is shown below:
- /* See MP+wmbonceonce+rmbonceonce.litmus. */
+ /* See MP+fencewmbonceonce+fencermbonceonce.litmus. */
void CPU0(void)
{
WRITE_ONCE(x, 1);
@@ -322,9 +322,9 @@ the following write-side code fragment:
And the xlog_valid_lsn() function in fs/xfs/xfs_log_priv.h contains
the corresponding read-side code fragment:
- cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
+ cur_cycle = READ_ONCE(log->l_curr_cycle);
smp_rmb();
- cur_block = ACCESS_ONCE(log->l_curr_block);
+ cur_block = READ_ONCE(log->l_curr_block);
Alternatively, consider the following comment in function
perf_output_put_handle() in kernel/events/ring_buffer.c:
@@ -360,7 +360,7 @@ can be seen in the LB+poonceonces.litmus litmus test.
One way of avoiding the counter-intuitive outcome is through the use of a
control dependency paired with a full memory barrier:
- /* See LB+ctrlonceonce+mbonceonce.litmus. */
+ /* See LB+fencembonceonce+ctrlonceonce.litmus. */
void CPU0(void)
{
r0 = READ_ONCE(x);
@@ -476,7 +476,7 @@ that one CPU first stores to one variable and then loads from a second,
while another CPU stores to the second variable and then loads from the
first. Preserving order requires nothing less than full barriers:
- /* See SB+mbonceonces.litmus. */
+ /* See SB+fencembonceonces.litmus. */
void CPU0(void)
{
WRITE_ONCE(x, 1);
diff --git a/tools/memory-model/README b/tools/memory-model/README
index 734f7feaa5dc..ee987ce20aae 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -35,13 +35,13 @@ BASIC USAGE: HERD7
The memory model is used, in conjunction with "herd7", to exhaustively
explore the state space of small litmus tests.
-For example, to run SB+mbonceonces.litmus against the memory model:
+For example, to run SB+fencembonceonces.litmus against the memory model:
- $ herd7 -conf linux-kernel.cfg litmus-tests/SB+mbonceonces.litmus
+ $ herd7 -conf linux-kernel.cfg litmus-tests/SB+fencembonceonces.litmus
Here is the corresponding output:
- Test SB+mbonceonces Allowed
+ Test SB+fencembonceonces Allowed
States 3
0:r0=0; 1:r0=1;
0:r0=1; 1:r0=0;
@@ -50,8 +50,8 @@ Here is the corresponding output:
Witnesses
Positive: 0 Negative: 3
Condition exists (0:r0=0 /\ 1:r0=0)
- Observation SB+mbonceonces Never 0 3
- Time SB+mbonceonces 0.01
+ Observation SB+fencembonceonces Never 0 3
+ Time SB+fencembonceonces 0.01
Hash=d66d99523e2cac6b06e66f4c995ebb48
The "Positive: 0 Negative: 3" and the "Never 0 3" each indicate that
@@ -67,16 +67,16 @@ BASIC USAGE: KLITMUS7
The "klitmus7" tool converts a litmus test into a Linux kernel module,
which may then be loaded and run.
-For example, to run SB+mbonceonces.litmus against hardware:
+For example, to run SB+fencembonceonces.litmus against hardware:
$ mkdir mymodules
- $ klitmus7 -o mymodules litmus-tests/SB+mbonceonces.litmus
+ $ klitmus7 -o mymodules litmus-tests/SB+fencembonceonces.litmus
$ cd mymodules ; make
$ sudo sh run.sh
The corresponding output includes:
- Test SB+mbonceonces Allowed
+ Test SB+fencembonceonces Allowed
Histogram (3 states)
644580 :>0:r0=1; 1:r0=0;
644328 :>0:r0=0; 1:r0=1;
@@ -86,8 +86,8 @@ The corresponding output includes:
Positive: 0, Negative: 2000000
Condition exists (0:r0=0 /\ 1:r0=0) is NOT validated
Hash=d66d99523e2cac6b06e66f4c995ebb48
- Observation SB+mbonceonces Never 0 2000000
- Time SB+mbonceonces 0.16
+ Observation SB+fencembonceonces Never 0 2000000
+ Time SB+fencembonceonces 0.16
The "Positive: 0 Negative: 2000000" and the "Never 0 2000000" indicate
that during two million trials, the state specified in this litmus
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index 64f5740e0e75..b84fb2f67109 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -13,7 +13,7 @@
"Linux-kernel memory consistency model"
-enum Accesses = 'once (*READ_ONCE,WRITE_ONCE,ACCESS_ONCE*) ||
+enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
'release (*smp_store_release*) ||
'acquire (*smp_load_acquire*) ||
'noreturn (* R of non-return RMW *)
diff --git a/tools/memory-model/litmus-tests/IRIW+mbonceonces+OnceOnce.litmus b/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus
index 98a3716efa37..e729d2776e89 100644
--- a/tools/memory-model/litmus-tests/IRIW+mbonceonces+OnceOnce.litmus
+++ b/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus
@@ -1,4 +1,4 @@
-C IRIW+mbonceonces+OnceOnce
+C IRIW+fencembonceonces+OnceOnce
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
index 7a39a0aaa976..0f749e419b34 100644
--- a/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
+++ b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
@@ -1,4 +1,4 @@
-C ISA2+pooncelock+pooncelock+pombonce.litmus
+C ISA2+pooncelock+pooncelock+pombonce
(*
* Result: Sometimes
diff --git a/tools/memory-model/litmus-tests/LB+ctrlonceonce+mbonceonce.litmus b/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus
index de6708229dd1..4727f5aaf03b 100644
--- a/tools/memory-model/litmus-tests/LB+ctrlonceonce+mbonceonce.litmus
+++ b/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus
@@ -1,4 +1,4 @@
-C LB+ctrlonceonce+mbonceonce
+C LB+fencembonceonce+ctrlonceonce
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/MP+wmbonceonce+rmbonceonce.litmus b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus
index c078f38ff27a..a273da9faa6d 100644
--- a/tools/memory-model/litmus-tests/MP+wmbonceonce+rmbonceonce.litmus
+++ b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus
@@ -1,4 +1,4 @@
-C MP+wmbonceonce+rmbonceonce
+C MP+fencewmbonceonce+fencermbonceonce
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/R+mbonceonces.litmus b/tools/memory-model/litmus-tests/R+fencembonceonces.litmus
index a0e884ad2132..222a0b850b4a 100644
--- a/tools/memory-model/litmus-tests/R+mbonceonces.litmus
+++ b/tools/memory-model/litmus-tests/R+fencembonceonces.litmus
@@ -1,4 +1,4 @@
-C R+mbonceonces
+C R+fencembonceonces
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/README b/tools/memory-model/litmus-tests/README
index 17eb9a8c222d..4581ec2d3c57 100644
--- a/tools/memory-model/litmus-tests/README
+++ b/tools/memory-model/litmus-tests/README
@@ -18,7 +18,7 @@ CoWW+poonceonce.litmus
Test of write-write coherence, that is, whether or not two
successive writes to the same variable are ordered.
-IRIW+mbonceonces+OnceOnce.litmus
+IRIW+fencembonceonces+OnceOnce.litmus
Test of independent reads from independent writes with smp_mb()
between each pairs of reads. In other words, is smp_mb()
sufficient to cause two different reading processes to agree on
@@ -47,7 +47,7 @@ ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus
Can a release-acquire chain order a prior store against
a later load?
-LB+ctrlonceonce+mbonceonce.litmus
+LB+fencembonceonce+ctrlonceonce.litmus
Does a control dependency and an smp_mb() suffice for the
load-buffering litmus test, where each process reads from one
of two variables then writes to the other?
@@ -88,14 +88,14 @@ MP+porevlocks.litmus
As below, but with the first access of the writer process
and the second access of reader process protected by a lock.
-MP+wmbonceonce+rmbonceonce.litmus
+MP+fencewmbonceonce+fencermbonceonce.litmus
Does a smp_wmb() (between the stores) and an smp_rmb() (between
the loads) suffice for the message-passing litmus test, where one
process writes data and then a flag, and the other process reads
the flag and then the data. (This is similar to the ISA2 tests,
but with two processes instead of three.)
-R+mbonceonces.litmus
+R+fencembonceonces.litmus
This is the fully ordered (via smp_mb()) version of one of
the classic counterintuitive litmus tests that illustrates the
effects of store propagation delays.
@@ -103,7 +103,7 @@ R+mbonceonces.litmus
R+poonceonces.litmus
As above, but without the smp_mb() invocations.
-SB+mbonceonces.litmus
+SB+fencembonceonces.litmus
This is the fully ordered (again, via smp_mb() version of store
buffering, which forms the core of Dekker's mutual-exclusion
algorithm.
@@ -111,15 +111,24 @@ SB+mbonceonces.litmus
SB+poonceonces.litmus
As above, but without the smp_mb() invocations.
+SB+rfionceonce-poonceonces.litmus
+ This litmus test demonstrates that LKMM is not fully multicopy
+ atomic. (Neither is it other multicopy atomic.) This litmus test
+ also demonstrates the "locations" debugging aid, which designates
+ additional registers and locations to be printed out in the dump
+ of final states in the herd7 output. Without the "locations"
+ statement, only those registers and locations mentioned in the
+ "exists" clause will be printed.
+
S+poonceonces.litmus
As below, but without the smp_wmb() and acquire load.
-S+wmbonceonce+poacquireonce.litmus
+S+fencewmbonceonce+poacquireonce.litmus
Can a smp_wmb(), instead of a release, and an acquire order
a prior store against a subsequent store?
WRC+poonceonces+Once.litmus
-WRC+pooncerelease+rmbonceonce+Once.litmus
+WRC+pooncerelease+fencermbonceonce+Once.litmus
These two are members of an extension of the MP litmus-test
class in which the first write is moved to a separate process.
The second is forbidden because smp_store_release() is
@@ -134,7 +143,7 @@ Z6.0+pooncelock+poonceLock+pombonce.litmus
As above, but with smp_mb__after_spinlock() immediately
following the spin_lock().
-Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus
+Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
Is the ordering provided by a release-acquire chain sufficient
to make ordering apparent to accesses by a process that does
not participate in that release-acquire chain?
diff --git a/tools/memory-model/litmus-tests/S+wmbonceonce+poacquireonce.litmus b/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus
index c53350205d28..18479823cd6c 100644
--- a/tools/memory-model/litmus-tests/S+wmbonceonce+poacquireonce.litmus
+++ b/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus
@@ -1,4 +1,4 @@
-C S+wmbonceonce+poacquireonce
+C S+fencewmbonceonce+poacquireonce
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/SB+mbonceonces.litmus b/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus
index 74b874ffa8da..ed5fff18d223 100644
--- a/tools/memory-model/litmus-tests/SB+mbonceonces.litmus
+++ b/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus
@@ -1,4 +1,4 @@
-C SB+mbonceonces
+C SB+fencembonceonces
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus b/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus
new file mode 100644
index 000000000000..04a16603660b
--- /dev/null
+++ b/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus
@@ -0,0 +1,32 @@
+C SB+rfionceonce-poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test demonstrates that LKMM is not fully multicopy atomic.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+ int r1;
+ int r2;
+
+ WRITE_ONCE(*x, 1);
+ r1 = READ_ONCE(*x);
+ r2 = READ_ONCE(*y);
+}
+
+P1(int *x, int *y)
+{
+ int r3;
+ int r4;
+
+ WRITE_ONCE(*y, 1);
+ r3 = READ_ONCE(*y);
+ r4 = READ_ONCE(*x);
+}
+
+locations [0:r1; 1:r3; x; y] (* Debug aid: Print things not in "exists". *)
+exists (0:r2=0 /\ 1:r4=0)
diff --git a/tools/memory-model/litmus-tests/WRC+pooncerelease+rmbonceonce+Once.litmus b/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus
index ad3448b941e6..e9947250d7de 100644
--- a/tools/memory-model/litmus-tests/WRC+pooncerelease+rmbonceonce+Once.litmus
+++ b/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus
@@ -1,4 +1,4 @@
-C WRC+pooncerelease+rmbonceonce+Once
+C WRC+pooncerelease+fencermbonceonce+Once
(*
* Result: Never
diff --git a/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus b/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
index a20fc3fafb53..88e70b87a683 100644
--- a/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus
+++ b/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
@@ -1,4 +1,4 @@
-C Z6.0+pooncerelease+poacquirerelease+mbonceonce
+C Z6.0+pooncerelease+poacquirerelease+fencembonceonce
(*
* Result: Sometimes
diff --git a/tools/memory-model/scripts/checkalllitmus.sh b/tools/memory-model/scripts/checkalllitmus.sh
index af0aa15ab84e..ca528f9a24d4 100644..100755
--- a/tools/memory-model/scripts/checkalllitmus.sh
+++ b/tools/memory-model/scripts/checkalllitmus.sh
@@ -9,7 +9,7 @@
# appended.
#
# Usage:
-# sh checkalllitmus.sh [ directory ]
+# checkalllitmus.sh [ directory ]
#
# The LINUX_HERD_OPTIONS environment variable may be used to specify
# arguments to herd, whose default is defined by the checklitmus.sh script.
diff --git a/tools/memory-model/scripts/checklitmus.sh b/tools/memory-model/scripts/checklitmus.sh
index e2e477472844..bf12a75c0719 100644..100755
--- a/tools/memory-model/scripts/checklitmus.sh
+++ b/tools/memory-model/scripts/checklitmus.sh
@@ -8,7 +8,7 @@
# with ".out" appended.
#
# Usage:
-# sh checklitmus.sh file.litmus
+# checklitmus.sh file.litmus
#
# The LINUX_HERD_OPTIONS environment variable may be used to specify
# arguments to herd, which default to "-conf linux-kernel.cfg". Thus,
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index f76d9914686a..c9d038f91af6 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -31,8 +31,8 @@ INCLUDES := -I$(srctree)/tools/include \
-I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
-I$(srctree)/tools/objtool/arch/$(ARCH)/include
WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS += -Werror $(WARNINGS) $(HOSTCFLAGS) -g $(INCLUDES)
-LDFLAGS += -lelf $(LIBSUBCMD) $(HOSTLDFLAGS)
+CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
+LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
# Allow old libelf to be used:
elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
diff --git a/tools/objtool/arch/x86/include/asm/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
index 9c9dc579bd7d..46f516dd80ce 100644
--- a/tools/objtool/arch/x86/include/asm/orc_types.h
+++ b/tools/objtool/arch/x86/include/asm/orc_types.h
@@ -88,6 +88,7 @@ struct orc_entry {
unsigned sp_reg:4;
unsigned bp_reg:4;
unsigned type:2;
+ unsigned end:1;
} __packed;
/*
@@ -101,6 +102,7 @@ struct unwind_hint {
s16 sp_offset;
u8 sp_reg;
u8 type;
+ u8 end;
};
#endif /* __ASSEMBLY__ */
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index f4a25bd1871f..2928939b98ec 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1157,6 +1157,7 @@ static int read_unwind_hints(struct objtool_file *file)
cfa->offset = hint->sp_offset;
insn->state.type = hint->type;
+ insn->state.end = hint->end;
}
return 0;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index c6b68fcb926f..95700a2bcb7c 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -31,7 +31,7 @@ struct insn_state {
int stack_size;
unsigned char type;
bool bp_scratch;
- bool drap;
+ bool drap, end;
int drap_reg, drap_offset;
struct cfi_reg vals[CFI_NUM_REGS];
};
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index c3343820916a..faa444270ee3 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -203,7 +203,8 @@ int orc_dump(const char *_objname)
print_reg(orc[i].bp_reg, orc[i].bp_offset);
- printf(" type:%s\n", orc_type_name(orc[i].type));
+ printf(" type:%s end:%d\n",
+ orc_type_name(orc[i].type), orc[i].end);
}
elf_end(elf);
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index 18384d9be4e1..3f98dcfbc177 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -31,6 +31,8 @@ int create_orc(struct objtool_file *file)
struct cfi_reg *cfa = &insn->state.cfa;
struct cfi_reg *bp = &insn->state.regs[CFI_BP];
+ orc->end = insn->state.end;
+
if (cfa->base == CFI_UNDEFINED) {
orc->sp_reg = ORC_REG_UNDEFINED;
continue;
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
index 9074b477bff0..af146bb03b4d 100644
--- a/tools/pci/pcitest.c
+++ b/tools/pci/pcitest.c
@@ -31,12 +31,17 @@
#define BILLION 1E9
static char *result[] = { "NOT OKAY", "OKAY" };
+static char *irq[] = { "LEGACY", "MSI", "MSI-X" };
struct pci_test {
char *device;
char barnum;
bool legacyirq;
unsigned int msinum;
+ unsigned int msixnum;
+ int irqtype;
+ bool set_irqtype;
+ bool get_irqtype;
bool read;
bool write;
bool copy;
@@ -65,6 +70,24 @@ static int run_test(struct pci_test *test)
fprintf(stdout, "%s\n", result[ret]);
}
+ if (test->set_irqtype) {
+ ret = ioctl(fd, PCITEST_SET_IRQTYPE, test->irqtype);
+ fprintf(stdout, "SET IRQ TYPE TO %s:\t\t", irq[test->irqtype]);
+ if (ret < 0)
+ fprintf(stdout, "FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->get_irqtype) {
+ ret = ioctl(fd, PCITEST_GET_IRQTYPE);
+ fprintf(stdout, "GET IRQ TYPE:\t\t");
+ if (ret < 0)
+ fprintf(stdout, "FAILED\n");
+ else
+ fprintf(stdout, "%s\n", irq[ret]);
+ }
+
if (test->legacyirq) {
ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0);
fprintf(stdout, "LEGACY IRQ:\t");
@@ -83,6 +106,15 @@ static int run_test(struct pci_test *test)
fprintf(stdout, "%s\n", result[ret]);
}
+ if (test->msixnum > 0 && test->msixnum <= 2048) {
+ ret = ioctl(fd, PCITEST_MSIX, test->msixnum);
+ fprintf(stdout, "MSI-X%d:\t\t", test->msixnum);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
if (test->write) {
ret = ioctl(fd, PCITEST_WRITE, test->size);
fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size);
@@ -133,7 +165,7 @@ int main(int argc, char **argv)
/* set default endpoint device */
test->device = "/dev/pci-endpoint-test.0";
- while ((c = getopt(argc, argv, "D:b:m:lrwcs:")) != EOF)
+ while ((c = getopt(argc, argv, "D:b:m:x:i:Ilrwcs:")) != EOF)
switch (c) {
case 'D':
test->device = optarg;
@@ -151,6 +183,20 @@ int main(int argc, char **argv)
if (test->msinum < 1 || test->msinum > 32)
goto usage;
continue;
+ case 'x':
+ test->msixnum = atoi(optarg);
+ if (test->msixnum < 1 || test->msixnum > 2048)
+ goto usage;
+ continue;
+ case 'i':
+ test->irqtype = atoi(optarg);
+ if (test->irqtype < 0 || test->irqtype > 2)
+ goto usage;
+ test->set_irqtype = true;
+ continue;
+ case 'I':
+ test->get_irqtype = true;
+ continue;
case 'r':
test->read = true;
continue;
@@ -173,6 +219,9 @@ usage:
"\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n"
"\t-b <bar num> BAR test (bar number between 0..5)\n"
"\t-m <msi num> MSI test (msi number between 1..32)\n"
+ "\t-x <msix num> \tMSI-X test (msix number between 1..2048)\n"
+ "\t-i <irq type> \tSet IRQ type (0 - Legacy, 1 - MSI, 2 - MSI-X)\n"
+ "\t-I Get current IRQ type configured\n"
"\t-l Legacy IRQ test\n"
"\t-r Read buffer test\n"
"\t-w Write buffer test\n"
diff --git a/tools/pci/pcitest.sh b/tools/pci/pcitest.sh
index 77e8c85ef744..75ed48ff2990 100644
--- a/tools/pci/pcitest.sh
+++ b/tools/pci/pcitest.sh
@@ -16,7 +16,10 @@ echo
echo "Interrupt tests"
echo
+pcitest -i 0
pcitest -l
+
+pcitest -i 1
msi=1
while [ $msi -lt 33 ]
@@ -26,9 +29,21 @@ do
done
echo
+pcitest -i 2
+msix=1
+
+while [ $msix -lt 2049 ]
+do
+ pcitest -x $msix
+ msix=`expr $msix + 1`
+done
+echo
+
echo "Read Tests"
echo
+pcitest -i 1
+
pcitest -r -s 1
pcitest -r -s 1024
pcitest -r -s 1025
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 11300dbe35c5..236b9b97dfdb 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -18,6 +18,10 @@ various perf commands with the -e option.
OPTIONS
-------
+-d::
+--desc::
+Print extra event descriptions. (default)
+
--no-desc::
Don't print descriptions.
@@ -25,11 +29,13 @@ Don't print descriptions.
--long-desc::
Print longer event descriptions.
+--debug::
+Enable debugging output.
+
--details::
Print how named events are resolved internally into perf events, and also
any extra expressions computed by perf stat.
-
[[EVENT_MODIFIERS]]
EVENT MODIFIERS
---------------
@@ -234,7 +240,7 @@ perf also supports group leader sampling using the :S specifier.
perf record -e '{cycles,instructions}:S' ...
perf report --group
-Normally all events in a event group sample, but with :S only
+Normally all events in an event group sample, but with :S only
the first event (the leader) samples, and it only reads the values of the
other events in the group.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 04168da4268e..246dee081efd 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -94,7 +94,7 @@ OPTIONS
"perf report" to view group events together.
--filter=<filter>::
- Event filter. This option should follow a event selector (-e) which
+ Event filter. This option should follow an event selector (-e) which
selects either tracepoint event(s) or a hardware trace PMU
(e.g. Intel PT or CoreSight).
@@ -153,7 +153,7 @@ OPTIONS
--exclude-perf::
Don't record events issued by perf itself. This option should follow
- a event selector (-e) which selects tracepoint event(s). It adds a
+ an event selector (-e) which selects tracepoint event(s). It adds a
filter expression 'common_pid != $PERFPID' to filters. If other
'--filter' exists, the new filter expression will be combined with
them by '&&'.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index f5a3b402589e..f6d1a03c7523 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -54,6 +54,8 @@ endif
ifeq ($(SRCARCH),arm64)
NO_PERF_REGS := 0
+ NO_SYSCALL_TABLE := 0
+ CFLAGS += -I$(OUTPUT)arch/arm64/include/generated
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
endif
@@ -905,8 +907,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
-perf_include_dir = lib/include/perf
-perf_examples_dir = lib/examples/perf
+perf_include_dir = lib/perf/include
+perf_examples_dir = lib/perf/examples
sharedir = $(prefix)/share
template_dir = share/perf-core/templates
STRACE_GROUPS_DIR = share/perf-core/strace/groups
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index ecc9fc952655..b3d1b12a5081 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -384,6 +384,8 @@ export INSTALL SHELL_PATH
SHELL = $(SHELL_PATH)
+linux_uapi_dir := $(srctree)/tools/include/uapi/linux
+
beauty_outdir := $(OUTPUT)trace/beauty/generated
beauty_ioctl_outdir := $(beauty_outdir)/ioctl
drm_ioctl_array := $(beauty_ioctl_outdir)/drm_ioctl_array.c
@@ -431,6 +433,12 @@ kvm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/kvm_ioctl.sh
$(kvm_ioctl_array): $(kvm_hdr_dir)/kvm.h $(kvm_ioctl_tbl)
$(Q)$(SHELL) '$(kvm_ioctl_tbl)' $(kvm_hdr_dir) > $@
+socket_ipproto_array := $(beauty_outdir)/socket_ipproto_array.c
+socket_ipproto_tbl := $(srctree)/tools/perf/trace/beauty/socket_ipproto.sh
+
+$(socket_ipproto_array): $(linux_uapi_dir)/in.h $(socket_ipproto_tbl)
+ $(Q)$(SHELL) '$(socket_ipproto_tbl)' $(linux_uapi_dir) > $@
+
vhost_virtio_ioctl_array := $(beauty_ioctl_outdir)/vhost_virtio_ioctl_array.c
vhost_virtio_hdr_dir := $(srctree)/tools/include/uapi/linux
vhost_virtio_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
@@ -566,6 +574,7 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(sndrv_ctl_ioctl_array) \
$(kcmp_type_array) \
$(kvm_ioctl_array) \
+ $(socket_ipproto_array) \
$(vhost_virtio_ioctl_array) \
$(madvise_behavior_array) \
$(perf_ioctl_array) \
@@ -860,6 +869,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(sndrv_pcm_ioctl_array) \
$(OUTPUT)$(kvm_ioctl_array) \
$(OUTPUT)$(kcmp_type_array) \
+ $(OUTPUT)$(socket_ipproto_array) \
$(OUTPUT)$(vhost_virtio_ioctl_array) \
$(OUTPUT)$(perf_ioctl_array) \
$(OUTPUT)$(prctl_option_array) \
diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile
index 91de4860faad..f013b115dc86 100644
--- a/tools/perf/arch/arm64/Makefile
+++ b/tools/perf/arch/arm64/Makefile
@@ -4,3 +4,24 @@ PERF_HAVE_DWARF_REGS := 1
endif
PERF_HAVE_JITDUMP := 1
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+
+#
+# Syscall table generation for perf
+#
+
+out := $(OUTPUT)arch/arm64/include/generated/asm
+header := $(out)/syscalls.c
+sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h
+sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sysdef) $(systbl)
+ $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@
+
+clean::
+ $(call QUIET_CLEAN, arm64) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
new file mode 100755
index 000000000000..52e197317d3e
--- /dev/null
+++ b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
@@ -0,0 +1,62 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf. Derived from
+# powerpc script.
+#
+# Copyright IBM Corp. 2017
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
+# Changed by: Kim Phillips <kim.phillips@arm.com>
+
+gcc=$1
+hostcc=$2
+input=$3
+
+if ! test -r $input; then
+ echo "Could not read input file" >&2
+ exit 1
+fi
+
+create_table_from_c()
+{
+ local sc nr last_sc
+
+ create_table_exe=`mktemp /tmp/create-table-XXXXXX`
+
+ {
+
+ cat <<-_EoHEADER
+ #include <stdio.h>
+ #define __ARCH_WANT_RENAMEAT
+ #include "$input"
+ int main(int argc, char *argv[])
+ {
+ _EoHEADER
+
+ while read sc nr; do
+ printf "%s\n" " printf(\"\\t[%d] = \\\"$sc\\\",\\n\", __NR_$sc);"
+ last_sc=$sc
+ done
+
+ printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);"
+ printf "}\n"
+
+ } | $hostcc -o $create_table_exe -x c -
+
+ $create_table_exe
+
+ rm -f $create_table_exe
+}
+
+create_table()
+{
+ echo "static const char *syscalltbl_arm64[] = {"
+ create_table_from_c
+ echo "};"
+}
+
+$gcc -E -dM -x c $input \
+ |sed -ne 's/^#define __NR_//p' \
+ |sort -t' ' -k2 -nu \
+ |create_table
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index ef5d59a5742e..7c6eeb4633fe 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -58,9 +58,13 @@ static int check_return_reg(int ra_regno, Dwarf_Frame *frame)
}
/*
- * Check if return address is on the stack.
+ * Check if return address is on the stack. If return address
+ * is in a register (typically R0), it is yet to be saved on
+ * the stack.
*/
- if (nops != 0 || ops != NULL)
+ if ((nops != 0 || ops != NULL) &&
+ !(nops == 1 && ops[0].atom == DW_OP_regx &&
+ ops[0].number2 == 0 && ops[0].offset == 0))
return 0;
/*
@@ -246,7 +250,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
if (!chain || chain->nr < 3)
return skip_slot;
- ip = chain->ips[2];
+ ip = chain->ips[1];
thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
index d233e2eb9592..aaabab5e2830 100644
--- a/tools/perf/arch/s390/util/kvm-stat.c
+++ b/tools/perf/arch/s390/util/kvm-stat.c
@@ -102,7 +102,7 @@ const char * const kvm_skip_events[] = {
int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
{
- if (strstr(cpuid, "IBM/S390")) {
+ if (strstr(cpuid, "IBM")) {
kvm->exit_reasons = sie_exit_reasons;
kvm->exit_reasons_isa = "SIE";
} else
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 6a8738f7ead3..f3aa9d02a5ab 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -2193,7 +2193,7 @@ static void print_cacheline(struct c2c_hists *c2c_hists,
fprintf(out, "%s\n", bf);
fprintf(out, " -------------------------------------------------------------\n");
- hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, true);
+ hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, false);
}
static void print_pareto(FILE *out)
@@ -2268,7 +2268,7 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
fprintf(out, "=================================================\n");
fprintf(out, "#\n");
- hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, false);
+ hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, true);
fprintf(out, "\n");
fprintf(out, "=================================================\n");
@@ -2349,6 +2349,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
" s Toggle full length of symbol and source line columns \n"
" q Return back to cacheline list \n";
+ if (!he)
+ return 0;
+
/* Display compact version first. */
c2c.symbol_full = false;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index d660cb7b222b..39db2ee32d48 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -696,7 +696,7 @@ static void hists__process(struct hists *hists)
hists__output_resort(hists, NULL);
hists__fprintf(hists, !quiet, 0, 0, 0, stdout,
- symbol_conf.use_callchain);
+ !symbol_conf.use_callchain);
}
static void data__fprintf(void)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index c04dc7b53797..02f7a3c27761 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -478,8 +478,8 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
- symbol_conf.use_callchain ||
- symbol_conf.show_branchflag_count);
+ !(symbol_conf.use_callchain ||
+ symbol_conf.show_branchflag_count));
fprintf(stdout, "\n\n");
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 05be023c3f0e..d097b5b47eb8 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -296,18 +296,6 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
return perf_evsel__open_per_thread(evsel, evsel_list->threads);
}
-/*
- * Does the counter have nsecs as a unit?
- */
-static inline int nsec_counter(struct perf_evsel *evsel)
-{
- if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
- perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
- return 1;
-
- return 0;
-}
-
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -1058,34 +1046,6 @@ static void print_metric_header(void *ctx, const char *color __maybe_unused,
fprintf(os->fh, "%*s ", metric_only_len, unit);
}
-static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
-{
- FILE *output = stat_config.output;
- double msecs = avg / NSEC_PER_MSEC;
- const char *fmt_v, *fmt_n;
- char name[25];
-
- fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
- fmt_n = csv_output ? "%s" : "%-25s";
-
- aggr_printout(evsel, id, nr);
-
- scnprintf(name, sizeof(name), "%s%s",
- perf_evsel__name(evsel), csv_output ? "" : " (msec)");
-
- fprintf(output, fmt_v, msecs, csv_sep);
-
- if (csv_output)
- fprintf(output, "%s%s", evsel->unit, csv_sep);
- else
- fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep);
-
- fprintf(output, fmt_n, name);
-
- if (evsel->cgrp)
- fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
-}
-
static int first_shadow_cpu(struct perf_evsel *evsel, int id)
{
int i;
@@ -1241,11 +1201,7 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
return;
}
- if (metric_only)
- /* nothing */;
- else if (nsec_counter(counter))
- nsec_printout(id, nr, counter, uval);
- else
+ if (!metric_only)
abs_printout(id, nr, counter, uval);
out.print_metric = pm;
@@ -1331,7 +1287,7 @@ static void collect_all_aliases(struct perf_evsel *counter,
alias->scale != counter->scale ||
alias->cgrp != counter->cgrp ||
strcmp(alias->unit, counter->unit) ||
- nsec_counter(alias) != nsec_counter(counter))
+ perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter))
break;
alias->merged_stat = true;
cb(alias, data, false);
@@ -2449,6 +2405,18 @@ static int add_default_attributes(void)
return 0;
if (transaction_run) {
+ /* Handle -T as -M transaction. Once platform specific metrics
+ * support has been added to the json files, all archictures
+ * will use this approach. To determine transaction support
+ * on an architecture test for such a metric name.
+ */
+ if (metricgroup__has_metric("transaction")) {
+ struct option opt = { .value = &evsel_list };
+
+ return metricgroup__parse_groups(&opt, "transaction",
+ &metric_events);
+ }
+
if (pmu_have_event("cpu", "cycles-ct") &&
pmu_have_event("cpu", "el-start"))
err = parse_events(evsel_list, transaction_attrs,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ffdc2769ff9f..d21d8751e749 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -307,7 +307,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
hists__output_recalc_col_len(hists, top->print_entries - printed);
putchar('\n');
hists__fprintf(hists, false, top->print_entries - printed, win_width,
- top->min_percent, stdout, symbol_conf.use_callchain);
+ top->min_percent, stdout, !symbol_conf.use_callchain);
}
static void prompt_integer(int *target, const char *msg)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 6a748eca2edb..88561eed7950 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -291,7 +291,7 @@ size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const cha
{
int idx = val - sa->offset;
- if (idx < 0 || idx >= sa->nr_entries)
+ if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL)
return scnprintf(bf, size, intfmt, val);
return scnprintf(bf, size, "%s", sa->entries[idx]);
@@ -761,10 +761,12 @@ static struct syscall_fmt {
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
{ .name = "socket",
.arg = { [0] = STRARRAY(family, socket_families),
- [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, },
+ [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
+ [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
{ .name = "socketpair",
.arg = { [0] = STRARRAY(family, socket_families),
- [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, }, },
+ [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
+ [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
{ .name = "stat", .alias = "newstat", },
{ .name = "statx",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
@@ -2990,6 +2992,7 @@ static int trace__parse_events_option(const struct option *opt, const char *str,
if (trace__validate_ev_qualifier(trace))
goto out;
+ trace->trace_syscalls = true;
}
err = 0;
@@ -3045,7 +3048,7 @@ int cmd_trace(int argc, const char **argv)
},
.output = stderr,
.show_comm = true,
- .trace_syscalls = true,
+ .trace_syscalls = false,
.kernel_syscallchains = false,
.max_stack = UINT_MAX,
};
@@ -3191,13 +3194,7 @@ int cmd_trace(int argc, const char **argv)
if (!trace.trace_syscalls && !trace.trace_pgfaults &&
trace.evlist->nr_entries == 0 /* Was --events used? */) {
- pr_err("Please specify something to trace.\n");
- return -1;
- }
-
- if (!trace.trace_syscalls && trace.ev_qualifier) {
- pr_err("The -e option can't be used with --no-syscalls.\n");
- goto out;
+ trace.trace_syscalls = true;
}
if (output_name != NULL) {
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 10f333e2e825..de28466c0186 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -7,6 +7,7 @@ include/uapi/drm/i915_drm.h
include/uapi/linux/fcntl.h
include/uapi/linux/kcmp.h
include/uapi/linux/kvm.h
+include/uapi/linux/in.h
include/uapi/linux/perf_event.h
include/uapi/linux/prctl.h
include/uapi/linux/sched.h
@@ -35,6 +36,7 @@ arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sie.h
arch/arm/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/kvm.h
+arch/arm64/include/uapi/asm/unistd.h
arch/alpha/include/uapi/asm/errno.h
arch/mips/include/asm/errno.h
arch/mips/include/uapi/asm/errno.h
@@ -53,6 +55,7 @@ include/uapi/asm-generic/errno.h
include/uapi/asm-generic/errno-base.h
include/uapi/asm-generic/ioctls.h
include/uapi/asm-generic/mman-common.h
+include/uapi/asm-generic/unistd.h
'
check_2 () {
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index dd764ad5efdf..a63aa6241b7f 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#ifndef _PERF_BPF_H
#define _PERF_BPF_H
+
+#include <uapi/linux/bpf.h>
+
#define SEC(NAME) __attribute__((section(NAME), used))
#define probe(function, vars) \
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index d215714f48df..21bf7f5a3cf5 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -25,7 +25,9 @@ static inline unsigned long long rdclock(void)
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}
+#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 1024
+#endif
extern const char *input_name;
extern bool perf_host, perf_guest;
diff --git a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
index bc03c06c3918..752e47eb6977 100644
--- a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
+++ b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2/core-imp-def.json
@@ -12,6 +12,21 @@
"ArchStdEvent": "L1D_CACHE_REFILL_WR",
},
{
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL",
+ },
+ {
"ArchStdEvent": "L1D_TLB_REFILL_RD",
},
{
@@ -24,9 +39,75 @@
"ArchStdEvent": "L1D_TLB_WR",
},
{
+ "ArchStdEvent": "L2D_TLB_REFILL_RD",
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR",
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD",
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR",
+ },
+ {
"ArchStdEvent": "BUS_ACCESS_RD",
- },
- {
+ },
+ {
"ArchStdEvent": "BUS_ACCESS_WR",
- }
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD",
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF",
+ },
+ {
+ "ArchStdEvent": "EXC_SVC",
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ",
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ",
+ },
+ {
+ "ArchStdEvent": "EXC_SMC",
+ },
+ {
+ "ArchStdEvent": "EXC_HVC",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ",
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
index 8bf16759ca53..2dd8dafff2ef 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
@@ -1,71 +1,83 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
"BriefDescription": "Problem-State L1I Directory Writes",
"PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1I Penalty Cycles",
"PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
"BriefDescription": "Problem-State L1D Directory Writes",
"PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
index 0feedb40f30f..b6b7f29ca831 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
@@ -1,107 +1,125 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "L1I_L3_LOCAL_WRITES",
"BriefDescription": "L1I L3 Local Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1D_L3_LOCAL_WRITES",
"BriefDescription": "L1D L3 Local Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the installtion cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1I_L3_REMOTE_WRITES",
"BriefDescription": "L1I L3 Remote Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L3_REMOTE_WRITES",
"BriefDescription": "L1D L3 Remote Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_CACHELINE_INVALIDATES",
"BriefDescription": "L1I Cacheline Invalidates",
"PublicDescription": "A cache line in the Level-1 I-Cache has been invalidated by a store on the same CPU as the Level-1 I-Cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
"PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
"PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
"PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
index 8bf16759ca53..2dd8dafff2ef 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
@@ -1,71 +1,83 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
"BriefDescription": "Problem-State L1I Directory Writes",
"PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1I Penalty Cycles",
"PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
"BriefDescription": "Problem-State L1D Directory Writes",
"PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
index 9a002b6967f1..436ce33f1182 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
@@ -1,335 +1,391 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
"PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB1_GPAGE_WRITES",
"BriefDescription": "DTLB1 Two-Gigabyte Page Writes",
"PublicDescription": "Counter:132 Name:DTLB1_GPAGE_WRITES A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
"PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB1_MISSES",
"BriefDescription": "L1C TLB1 Misses",
"PublicDescription": "Increments by one for any cycle where a Level-1 cache or Level-1 TLB miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Node L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Node Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1D_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1D_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1D_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Node L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Node L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "176",
"EventName": "L1I_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Node Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "L1I_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "L1I_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "L1I_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "218",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
"PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "219",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "220",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "448",
"EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
"BriefDescription": "Cycle count with one thread active",
"PublicDescription": "Cycle count with one thread active"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "449",
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json
new file mode 100644
index 000000000000..1a0034f79f73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json
@@ -0,0 +1,7 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
index 8f653c9d899d..17fb5241928b 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
@@ -1,47 +1,55 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
index aa4dfb46b65b..e7a3524b748f 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
@@ -1,317 +1,370 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "Counter:128 Name:L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB2_WRITES",
"BriefDescription": "DTLB2 Writes",
"PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB2_MISSES",
"BriefDescription": "DTLB2 Misses",
"PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB2_HPAGE_WRITES",
"BriefDescription": "DTLB2 One-Megabyte Page Writes",
"PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB2_GPAGE_WRITES",
"BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
"PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB2_WRITES",
"BriefDescription": "ITLB2 Writes",
"PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB2_MISSES",
"BriefDescription": "ITLB2 Misses",
"PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_ENGINES_BUSY",
"BriefDescription": "TLB2 Engines Busy",
"PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB2_MISSES",
"BriefDescription": "L1C TLB2 Misses",
"PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
"BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "224",
"EventName": "BCD_DFP_EXECUTION_SLOTS",
"BriefDescription": "BCD DFP Execution Slots",
"PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "225",
"EventName": "VX_BCD_EXECUTION_SLOTS",
"BriefDescription": "VX BCD Execution Slots",
"PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "226",
"EventName": "DECIMAL_INSTRUCTIONS",
"BriefDescription": "Decimal Instructions",
"PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "232",
"EventName": "LAST_HOST_TRANSLATIONS",
"BriefDescription": "Last host translation done",
"PublicDescription": "Last Host Translation done"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "243",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
"PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "244",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "245",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "448",
"EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
"BriefDescription": "Cycle count with one thread active",
"PublicDescription": "Cycle count with one thread active"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "449",
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json
new file mode 100644
index 000000000000..1a0034f79f73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json
@@ -0,0 +1,7 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
index 8bf16759ca53..2dd8dafff2ef 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
@@ -1,71 +1,83 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
"BriefDescription": "Problem-State L1I Directory Writes",
"PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1I Penalty Cycles",
"PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
"BriefDescription": "Problem-State L1D Directory Writes",
"PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
index b6d7fec7c2e7..b7b42a870bb0 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
@@ -1,143 +1,167 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from the Level-2 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
"PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
"PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle a ITLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
"PublicDescription": "Incremented by one for every store sent to Level-2 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
index 8bf16759ca53..2dd8dafff2ef 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
@@ -1,71 +1,83 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
"BriefDescription": "CPU Cycles",
"PublicDescription": "Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
"BriefDescription": "Instructions",
"PublicDescription": "Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
"BriefDescription": "L1I Directory Writes",
"PublicDescription": "Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
"BriefDescription": "L1I Penalty Cycles",
"PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
"BriefDescription": "L1D Directory Writes",
"PublicDescription": "Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
"BriefDescription": "L1D Penalty Cycles",
"PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
"BriefDescription": "Problem-State CPU Cycles",
"PublicDescription": "Problem-State Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
"BriefDescription": "Problem-State Instructions",
"PublicDescription": "Problem-State Instruction Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
"BriefDescription": "Problem-State L1I Directory Writes",
"PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1I Penalty Cycles",
"PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
"BriefDescription": "Problem-State L1D Directory Writes",
"PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
"BriefDescription": "Problem-State L1D Penalty Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
index 7e5b72492141..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
@@ -1,95 +1,111 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
"BriefDescription": "PRNG Functions",
"PublicDescription": "Total number of the PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
"BriefDescription": "PRNG Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
"BriefDescription": "PRNG Blocked Functions",
"PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
"BriefDescription": "PRNG Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
"BriefDescription": "SHA Functions",
"PublicDescription": "Total number of SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
"BriefDescription": "SHA Cycles",
"PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
"BriefDescription": "SHA Blocked Functions",
"PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
"BriefDescription": "SHA Bloced Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
"BriefDescription": "DEA Functions",
"PublicDescription": "Total number of the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
"BriefDescription": "DEA Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
"BriefDescription": "DEA Blocked Functions",
"PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
"BriefDescription": "DEA Blocked Cycles",
"PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
"BriefDescription": "AES Functions",
"PublicDescription": "Total number of AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
"BriefDescription": "AES Cycles",
"PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
"BriefDescription": "AES Blocked Functions",
"PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
"BriefDescription": "AES Blocked Cycles",
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
index 8682126aabb2..162251037219 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
@@ -1,209 +1,244 @@
[
{
+ "Unit": "CPU-M-CF",
"EventCode": "128",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
"PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle a DTLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
"PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle a ITLB1 miss is in progress."
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "L1D_L2I_SOURCED_WRITES",
"BriefDescription": "L1D L2I Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
"PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
"PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
"PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Book L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
"PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Book L3 Sourced Writes with Intervention",
"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
"PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
"PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
},
{
+ "Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json b/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json
new file mode 100644
index 000000000000..1a0034f79f73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/transaction.json
@@ -0,0 +1,7 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ }
+]
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index db3a594ee1e4..68c92bb599ee 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -233,6 +233,8 @@ static struct map {
{ "QPI LL", "uncore_qpi" },
{ "SBO", "uncore_sbox" },
{ "iMPH-U", "uncore_arb" },
+ { "CPU-M-CF", "cpum_cf" },
+ { "CPU-M-SF", "cpum_sf" },
{}
};
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index dd850a26d579..d7a5e1b9aa6f 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -385,7 +385,7 @@ static int test_and_print(struct test *t, bool force_skip, int subtest)
if (!t->subtest.get_nr)
pr_debug("%s:", t->desc);
else
- pr_debug("%s subtest %d:", t->desc, subtest);
+ pr_debug("%s subtest %d:", t->desc, subtest + 1);
switch (err) {
case TEST_OK:
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 61211918bfba..3b97ac018d5a 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1322,6 +1322,14 @@ static int test__intel_pt(struct perf_evlist *evlist)
return 0;
}
+static int test__checkevent_complex_name(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong complex name parsing", strcmp(evsel->name, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks") == 0);
+ return 0;
+}
+
static int count_tracepoints(void)
{
struct dirent *events_ent;
@@ -1658,6 +1666,11 @@ static struct evlist_test test__events[] = {
.check = test__intel_pt,
.id = 52,
},
+ {
+ .name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
+ .check = test__checkevent_complex_name,
+ .id = 53
+ }
};
static struct evlist_test test__events_pmu[] = {
@@ -1676,6 +1689,11 @@ static struct evlist_test test__events_pmu[] = {
.check = test__checkevent_pmu_partial_time_callgraph,
.id = 2,
},
+ {
+ .name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
+ .check = test__checkevent_complex_name,
+ .id = 3,
+ }
};
struct terms_test {
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 94e513e62b34..3013ac8f83d0 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -13,11 +13,24 @@
libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
+event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?'
+
+add_libc_inet_pton_event() {
+
+ event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \
+ grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))")
+
+ if [ $? -ne 0 -o -z "$event_name" ] ; then
+ printf "FAIL: could not add event\n"
+ return 1
+ fi
+}
+
trace_libc_inet_pton_backtrace() {
expected=`mktemp -u /tmp/expected.XXX`
- echo "ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" > $expected
+ echo "ping[][0-9 \.:]+$event_name: \([[:xdigit:]]+\)" > $expected
echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
case "$(uname -m)" in
s390x)
@@ -26,6 +39,12 @@ trace_libc_inet_pton_backtrace() {
echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
+ ppc64|ppc64le)
+ eventattr='max-stack=4'
+ echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ ;;
*)
eventattr='max-stack=3'
echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
@@ -35,7 +54,7 @@ trace_libc_inet_pton_backtrace() {
perf_data=`mktemp -u /tmp/perf.data.XXX`
perf_script=`mktemp -u /tmp/perf.script.XXX`
- perf record -e probe_libc:inet_pton/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
+ perf record -e $event_name/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
perf script -i $perf_data > $perf_script
exec 3<$perf_script
@@ -46,7 +65,7 @@ trace_libc_inet_pton_backtrace() {
echo "$line" | egrep -q "$pattern"
if [ $? -ne 0 ] ; then
printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
- exit 1
+ return 1
fi
done
@@ -56,13 +75,20 @@ trace_libc_inet_pton_backtrace() {
# even if the perf script output does not match.
}
+delete_libc_inet_pton_event() {
+
+ if [ -n "$event_name" ] ; then
+ perf probe -q -d $event_name
+ fi
+}
+
# Check for IPv6 interface existence
ip a sh lo | fgrep -q inet6 || exit 2
skip_if_no_perf_probe && \
-perf probe -q $libc inet_pton && \
+add_libc_inet_pton_event && \
trace_libc_inet_pton_backtrace
err=$?
rm -f ${perf_data} ${perf_script} ${expected}
-perf probe -q -d probe_libc:inet_pton
+delete_libc_inet_pton_event
exit $err
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 66330d4b739b..f528ba35e140 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -7,4 +7,5 @@ endif
libperf-y += kcmp.o
libperf-y += pkey_alloc.o
libperf-y += prctl.o
+libperf-y += socket.o
libperf-y += statx.o
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 984a504d335c..9615af5d412b 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -106,6 +106,9 @@ size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_a
size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_PRCTL_ARG3 syscall_arg__scnprintf_prctl_arg3
+size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_SK_PROTO syscall_arg__scnprintf_socket_protocol
+
size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags
diff --git a/tools/perf/trace/beauty/drm_ioctl.sh b/tools/perf/trace/beauty/drm_ioctl.sh
index 2149d3a98e42..9d3816815e60 100755
--- a/tools/perf/trace/beauty/drm_ioctl.sh
+++ b/tools/perf/trace/beauty/drm_ioctl.sh
@@ -1,13 +1,14 @@
#!/bin/sh
-drm_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/drm/
+
printf "#ifndef DRM_COMMAND_BASE\n"
-grep "#define DRM_COMMAND_BASE" $drm_header_dir/drm.h
+grep "#define DRM_COMMAND_BASE" $header_dir/drm.h
printf "#endif\n"
printf "static const char *drm_ioctl_cmds[] = {\n"
-grep "^#define DRM_IOCTL.*DRM_IO" $drm_header_dir/drm.h | \
+grep "^#define DRM_IOCTL.*DRM_IO" $header_dir/drm.h | \
sed -r 's/^#define +DRM_IOCTL_([A-Z0-9_]+)[ ]+DRM_IO[A-Z]* *\( *(0x[[:xdigit:]]+),*.*/ [\2] = "\1",/g'
-grep "^#define DRM_I915_[A-Z_0-9]\+[ ]\+0x" $drm_header_dir/i915_drm.h | \
+grep "^#define DRM_I915_[A-Z_0-9]\+[ ]\+0x" $header_dir/i915_drm.h | \
sed -r 's/^#define +DRM_I915_([A-Z0-9_]+)[ ]+(0x[[:xdigit:]]+)/\t[DRM_COMMAND_BASE + \2] = "I915_\1",/g'
printf "};\n"
diff --git a/tools/perf/trace/beauty/kcmp_type.sh b/tools/perf/trace/beauty/kcmp_type.sh
index 40d063b8c082..a3c304caa336 100755
--- a/tools/perf/trace/beauty/kcmp_type.sh
+++ b/tools/perf/trace/beauty/kcmp_type.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *kcmp_types[] = {\n"
regex='^[[:space:]]+(KCMP_(\w+)),'
diff --git a/tools/perf/trace/beauty/kvm_ioctl.sh b/tools/perf/trace/beauty/kvm_ioctl.sh
index bd28817afced..c4699fd46bb6 100755
--- a/tools/perf/trace/beauty/kvm_ioctl.sh
+++ b/tools/perf/trace/beauty/kvm_ioctl.sh
@@ -1,10 +1,10 @@
#!/bin/sh
-kvm_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *kvm_ioctl_cmds[] = {\n"
regex='^#[[:space:]]*define[[:space:]]+KVM_(\w+)[[:space:]]+_IO[RW]*\([[:space:]]*KVMIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*'
-egrep $regex ${kvm_header_dir}/kvm.h | \
+egrep $regex ${header_dir}/kvm.h | \
sed -r "s/$regex/\2 \1/g" | \
egrep -v " ((ARM|PPC|S390)_|[GS]ET_(DEBUGREGS|PIT2|XSAVE|TSC_KHZ)|CREATE_SPAPR_TCE_64)" | \
sort | xargs printf "\t[%s] = \"%s\",\n"
diff --git a/tools/perf/trace/beauty/madvise_behavior.sh b/tools/perf/trace/beauty/madvise_behavior.sh
index 60ef8640ee70..431639eb4d29 100755
--- a/tools/perf/trace/beauty/madvise_behavior.sh
+++ b/tools/perf/trace/beauty/madvise_behavior.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/
printf "static const char *madvise_advices[] = {\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MADV_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
diff --git a/tools/perf/trace/beauty/perf_ioctl.sh b/tools/perf/trace/beauty/perf_ioctl.sh
index faea4237c793..6492c74df928 100755
--- a/tools/perf/trace/beauty/perf_ioctl.sh
+++ b/tools/perf/trace/beauty/perf_ioctl.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *perf_ioctl_cmds[] = {\n"
regex='^#[[:space:]]*define[[:space:]]+PERF_EVENT_IOC_(\w+)[[:space:]]+_IO[RW]*[[:space:]]*\([[:space:]]*.\$.[[:space:]]*,[[:space:]]*([[:digit:]]+).*'
diff --git a/tools/perf/trace/beauty/pkey_alloc_access_rights.sh b/tools/perf/trace/beauty/pkey_alloc_access_rights.sh
index 62e51a02b839..e0a51aeb20b2 100755
--- a/tools/perf/trace/beauty/pkey_alloc_access_rights.sh
+++ b/tools/perf/trace/beauty/pkey_alloc_access_rights.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/
printf "static const char *pkey_alloc_access_rights[] = {\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+PKEY_([[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*'
diff --git a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh
index aad5ab130539..eb511bb5fbd3 100755
--- a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh
+++ b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh
@@ -1,8 +1,8 @@
#!/bin/sh
-sound_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/
printf "static const char *sndrv_ctl_ioctl_cmds[] = {\n"
-grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $sound_header_dir/asound.h | \
+grep "^#define[\t ]\+SNDRV_CTL_IOCTL_" $header_dir/asound.h | \
sed -r 's/^#define +SNDRV_CTL_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.U., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g'
printf "};\n"
diff --git a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
index b7e9ef6b2f55..6818392968b2 100755
--- a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
+++ b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
@@ -1,8 +1,8 @@
#!/bin/sh
-sound_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/
printf "static const char *sndrv_pcm_ioctl_cmds[] = {\n"
-grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $sound_header_dir/asound.h | \
+grep "^#define[\t ]\+SNDRV_PCM_IOCTL_" $header_dir/asound.h | \
sed -r 's/^#define +SNDRV_PCM_IOCTL_([A-Z0-9_]+)[\t ]+_IO[RW]*\( *.A., *(0x[[:xdigit:]]+),?.*/\t[\2] = \"\1\",/g'
printf "};\n"
diff --git a/tools/perf/trace/beauty/socket.c b/tools/perf/trace/beauty/socket.c
new file mode 100644
index 000000000000..65227269384b
--- /dev/null
+++ b/tools/perf/trace/beauty/socket.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * trace/beauty/socket.c
+ *
+ * Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+
+#include "trace/beauty/beauty.h"
+#include <sys/types.h>
+#include <sys/socket.h>
+
+static size_t socket__scnprintf_ipproto(int protocol, char *bf, size_t size)
+{
+#include "trace/beauty/generated/socket_ipproto_array.c"
+ static DEFINE_STRARRAY(socket_ipproto);
+
+ return strarray__scnprintf(&strarray__socket_ipproto, bf, size, "%d", protocol);
+}
+
+size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int domain = syscall_arg__val(arg, 0);
+
+ if (domain == AF_INET || domain == AF_INET6)
+ return socket__scnprintf_ipproto(arg->val, bf, size);
+
+ return syscall_arg__scnprintf_int(bf, size, arg);
+}
diff --git a/tools/perf/trace/beauty/socket_ipproto.sh b/tools/perf/trace/beauty/socket_ipproto.sh
new file mode 100755
index 000000000000..a3cc24633bec
--- /dev/null
+++ b/tools/perf/trace/beauty/socket_ipproto.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+
+printf "static const char *socket_ipproto[] = {\n"
+regex='^[[:space:]]+IPPROTO_(\w+)[[:space:]]+=[[:space:]]+([[:digit:]]+),.*'
+
+egrep $regex ${header_dir}/in.h | \
+ sed -r "s/$regex/\2 \1/g" | \
+ sort | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n"
diff --git a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
index 76f1de697787..0f6a5197d0be 100755
--- a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
+++ b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
@@ -1,17 +1,17 @@
#!/bin/sh
-vhost_virtio_header_dir=$1
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *vhost_virtio_ioctl_cmds[] = {\n"
regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*'
-egrep $regex ${vhost_virtio_header_dir}/vhost.h | \
+egrep $regex ${header_dir}/vhost.h | \
sed -r "s/$regex/\2 \1/g" | \
sort | xargs printf "\t[%s] = \"%s\",\n"
printf "};\n"
printf "static const char *vhost_virtio_ioctl_read_cmds[] = {\n"
regex='^#[[:space:]]*define[[:space:]]+VHOST_(\w+)[[:space:]]+_IOW?R\([[:space:]]*VHOST_VIRTIO[[:space:]]*,[[:space:]]*(0x[[:xdigit:]]+).*'
-egrep $regex ${vhost_virtio_header_dir}/vhost.h | \
+egrep $regex ${header_dir}/vhost.h | \
sed -r "s/$regex/\2 \1/g" | \
sort | xargs printf "\t[%s] = \"%s\",\n"
printf "};\n"
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 69b7a28f7a1c..74c4ae1f0a05 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -529,7 +529,7 @@ out:
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
char *bf, size_t bfsz, FILE *fp,
- bool use_callchain)
+ bool ignore_callchains)
{
int ret;
int callchain_ret = 0;
@@ -550,7 +550,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
ret = fprintf(fp, "%s\n", bf);
- if (hist_entry__has_callchains(he) && use_callchain)
+ if (hist_entry__has_callchains(he) && !ignore_callchains)
callchain_ret = hist_entry_callchain__fprintf(he, total_period,
0, fp);
@@ -755,7 +755,7 @@ int hists__fprintf_headers(struct hists *hists, FILE *fp)
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp,
- bool use_callchain)
+ bool ignore_callchains)
{
struct rb_node *nd;
size_t ret = 0;
@@ -799,7 +799,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
if (percent < min_pcnt)
continue;
- ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
+ ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
if (max_rows && ++nr_rows >= max_rows)
break;
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index cee658733e2c..3d02ae38ec56 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -747,7 +747,9 @@ int bpf__load(struct bpf_object *obj)
err = bpf_object__load(obj);
if (err) {
- pr_debug("bpf: load objects failed\n");
+ char bf[128];
+ libbpf_strerror(err, bf, sizeof(bf));
+ pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
return err;
}
return 0;
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 7798a2cc8a86..31279a7bd919 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -20,9 +20,10 @@ static struct rw_semaphore comm_str_lock = {.lock = PTHREAD_RWLOCK_INITIALIZER,}
static struct comm_str *comm_str__get(struct comm_str *cs)
{
- if (cs)
- refcount_inc(&cs->refcnt);
- return cs;
+ if (cs && refcount_inc_not_zero(&cs->refcnt))
+ return cs;
+
+ return NULL;
}
static void comm_str__put(struct comm_str *cs)
@@ -67,9 +68,14 @@ struct comm_str *__comm_str__findnew(const char *str, struct rb_root *root)
parent = *p;
iter = rb_entry(parent, struct comm_str, rb_node);
+ /*
+ * If we race with comm_str__put, iter->refcnt is 0
+ * and it will be removed within comm_str__put call
+ * shortly, ignore it in this search.
+ */
cmp = strcmp(str, iter->str);
- if (!cmp)
- return comm_str__get(iter);
+ if (!cmp && comm_str__get(iter))
+ return iter;
if (cmp < 0)
p = &(*p)->rb_left;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 4d5fc374e730..938def6d0bb9 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -31,6 +31,8 @@
#endif
#endif
+#define CS_ETM_INVAL_ADDR 0xdeadbeefdeadbeefUL
+
struct cs_etm_decoder {
void *data;
void (*packet_printer)(const char *msg);
@@ -261,8 +263,8 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
decoder->tail = 0;
decoder->packet_count = 0;
for (i = 0; i < MAX_BUFFER; i++) {
- decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL;
- decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
+ decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[i].last_instr_taken_branch = false;
decoder->packet_buffer[i].exc = false;
decoder->packet_buffer[i].exc_ret = false;
@@ -295,8 +297,8 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
decoder->packet_buffer[et].exc = false;
decoder->packet_buffer[et].exc_ret = false;
decoder->packet_buffer[et].cpu = *((int *)inode->priv);
- decoder->packet_buffer[et].start_addr = 0xdeadbeefdeadbeefUL;
- decoder->packet_buffer[et].end_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
+ decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
if (decoder->packet_count == MAX_BUFFER - 1)
return OCSD_RESP_WAIT;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index 743f5f444304..612b5755f742 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -23,6 +23,7 @@ struct cs_etm_buffer {
};
enum cs_etm_sample_type {
+ CS_ETM_EMPTY = 0,
CS_ETM_RANGE = 1 << 0,
CS_ETM_TRACE_ON = 1 << 1,
};
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 822ba915d144..2ae640257fdb 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -494,6 +494,10 @@ static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
{
+ /* Returns 0 for the CS_ETM_TRACE_ON packet */
+ if (packet->sample_type == CS_ETM_TRACE_ON)
+ return 0;
+
/*
* The packet records the execution range with an exclusive end address
*
@@ -505,6 +509,15 @@ static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
return packet->end_addr - A64_INSTR_SIZE;
}
+static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
+{
+ /* Returns 0 for the CS_ETM_TRACE_ON packet */
+ if (packet->sample_type == CS_ETM_TRACE_ON)
+ return 0;
+
+ return packet->start_addr;
+}
+
static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet)
{
/*
@@ -546,7 +559,7 @@ static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
be = &bs->entries[etmq->last_branch_pos];
be->from = cs_etm__last_executed_instr(etmq->prev_packet);
- be->to = etmq->packet->start_addr;
+ be->to = cs_etm__first_executed_instr(etmq->packet);
/* No support for mispredict */
be->flags.mispred = 0;
be->flags.predicted = 1;
@@ -701,7 +714,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
sample.ip = cs_etm__last_executed_instr(etmq->prev_packet);
sample.pid = etmq->pid;
sample.tid = etmq->tid;
- sample.addr = etmq->packet->start_addr;
+ sample.addr = cs_etm__first_executed_instr(etmq->packet);
sample.id = etmq->etm->branches_id;
sample.stream_id = etmq->etm->branches_id;
sample.period = 1;
@@ -897,13 +910,23 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
etmq->period_instructions = instrs_over;
}
- if (etm->sample_branches &&
- etmq->prev_packet &&
- etmq->prev_packet->sample_type == CS_ETM_RANGE &&
- etmq->prev_packet->last_instr_taken_branch) {
- ret = cs_etm__synth_branch_sample(etmq);
- if (ret)
- return ret;
+ if (etm->sample_branches && etmq->prev_packet) {
+ bool generate_sample = false;
+
+ /* Generate sample for tracing on packet */
+ if (etmq->prev_packet->sample_type == CS_ETM_TRACE_ON)
+ generate_sample = true;
+
+ /* Generate sample for branch taken packet */
+ if (etmq->prev_packet->sample_type == CS_ETM_RANGE &&
+ etmq->prev_packet->last_instr_taken_branch)
+ generate_sample = true;
+
+ if (generate_sample) {
+ ret = cs_etm__synth_branch_sample(etmq);
+ if (ret)
+ return ret;
+ }
}
if (etm->sample_branches || etm->synth_opts.last_branch) {
@@ -922,10 +945,17 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
static int cs_etm__flush(struct cs_etm_queue *etmq)
{
int err = 0;
+ struct cs_etm_auxtrace *etm = etmq->etm;
struct cs_etm_packet *tmp;
+ if (!etmq->prev_packet)
+ return 0;
+
+ /* Handle start tracing packet */
+ if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
+ goto swap_packet;
+
if (etmq->etm->synth_opts.last_branch &&
- etmq->prev_packet &&
etmq->prev_packet->sample_type == CS_ETM_RANGE) {
/*
* Generate a last branch event for the branches left in the
@@ -939,8 +969,22 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
err = cs_etm__synth_instruction_sample(
etmq, addr,
etmq->period_instructions);
+ if (err)
+ return err;
+
etmq->period_instructions = 0;
+ }
+
+ if (etm->sample_branches &&
+ etmq->prev_packet->sample_type == CS_ETM_RANGE) {
+ err = cs_etm__synth_branch_sample(etmq);
+ if (err)
+ return err;
+ }
+
+swap_packet:
+ if (etmq->etm->synth_opts.last_branch) {
/*
* Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
* the next incoming packet.
@@ -1020,6 +1064,13 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
*/
cs_etm__flush(etmq);
break;
+ case CS_ETM_EMPTY:
+ /*
+ * Should not receive empty packet,
+ * report error.
+ */
+ pr_err("CS ETM Trace: empty packet\n");
+ return -EINVAL;
default:
break;
}
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 94fce4f537e9..ddf84b941abf 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -260,6 +260,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
evsel->attr.sample_period = 1;
}
+ if (perf_evsel__is_clock(evsel)) {
+ /*
+ * The evsel->unit points to static alias->unit
+ * so it's ok to use static string in here.
+ */
+ static const char *unit = "msec";
+
+ evsel->unit = unit;
+ evsel->scale = 1e-6;
+ }
+
return evsel;
}
@@ -848,6 +859,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
}
}
+static bool is_dummy_event(struct perf_evsel *evsel)
+{
+ return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
+ (evsel->attr.config == PERF_COUNT_SW_DUMMY);
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -1086,6 +1103,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
else
perf_evsel__reset_sample_bit(evsel, PERIOD);
}
+
+ /*
+ * For initial_delay, a dummy event is added implicitly.
+ * The software event will trigger -EOPNOTSUPP error out,
+ * if BRANCH_STACK bit is set.
+ */
+ if (opts->initial_delay && is_dummy_event(evsel))
+ perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
}
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index d277930b19a1..973c03167947 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -402,10 +402,13 @@ bool perf_evsel__is_function_event(struct perf_evsel *evsel);
static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
{
- struct perf_event_attr *attr = &evsel->attr;
+ return perf_evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
+}
- return (attr->config == PERF_COUNT_SW_BPF_OUTPUT) &&
- (attr->type == PERF_TYPE_SOFTWARE);
+static inline bool perf_evsel__is_clock(struct perf_evsel *evsel)
+{
+ return perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
+ perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
}
struct perf_attr_details {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 653ff65aa2c3..5af58aac91ad 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2587,7 +2587,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
FEAT_OPN(BRANCH_STACK, branch_stack, false),
FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
- FEAT_OPN(GROUP_DESC, group_desc, false),
+ FEAT_OPR(GROUP_DESC, group_desc, false),
FEAT_OPN(AUXTRACE, auxtrace, false),
FEAT_OPN(STAT, stat, false),
FEAT_OPN(CACHE, cache, true),
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 73049f7f0f60..3badd7f1e1b8 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -181,7 +181,7 @@ size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp,
- bool use_callchain);
+ bool ignore_callchains);
size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp);
void hists__filter_by_dso(struct hists *hists);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index e7b4a8b513f2..b300a3973448 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -408,23 +408,16 @@ out_err:
}
/*
- * Caller must eventually drop thread->refcnt returned with a successful
- * lookup/new thread inserted.
+ * Front-end cache - TID lookups come in blocks,
+ * so most of the time we dont have to look up
+ * the full rbtree:
*/
-static struct thread *____machine__findnew_thread(struct machine *machine,
- struct threads *threads,
- pid_t pid, pid_t tid,
- bool create)
+static struct thread*
+__threads__get_last_match(struct threads *threads, struct machine *machine,
+ int pid, int tid)
{
- struct rb_node **p = &threads->entries.rb_node;
- struct rb_node *parent = NULL;
struct thread *th;
- /*
- * Front-end cache - TID lookups come in blocks,
- * so most of the time we dont have to look up
- * the full rbtree:
- */
th = threads->last_match;
if (th != NULL) {
if (th->tid == tid) {
@@ -435,12 +428,57 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
threads->last_match = NULL;
}
+ return NULL;
+}
+
+static struct thread*
+threads__get_last_match(struct threads *threads, struct machine *machine,
+ int pid, int tid)
+{
+ struct thread *th = NULL;
+
+ if (perf_singlethreaded)
+ th = __threads__get_last_match(threads, machine, pid, tid);
+
+ return th;
+}
+
+static void
+__threads__set_last_match(struct threads *threads, struct thread *th)
+{
+ threads->last_match = th;
+}
+
+static void
+threads__set_last_match(struct threads *threads, struct thread *th)
+{
+ if (perf_singlethreaded)
+ __threads__set_last_match(threads, th);
+}
+
+/*
+ * Caller must eventually drop thread->refcnt returned with a successful
+ * lookup/new thread inserted.
+ */
+static struct thread *____machine__findnew_thread(struct machine *machine,
+ struct threads *threads,
+ pid_t pid, pid_t tid,
+ bool create)
+{
+ struct rb_node **p = &threads->entries.rb_node;
+ struct rb_node *parent = NULL;
+ struct thread *th;
+
+ th = threads__get_last_match(threads, machine, pid, tid);
+ if (th)
+ return th;
+
while (*p != NULL) {
parent = *p;
th = rb_entry(parent, struct thread, rb_node);
if (th->tid == tid) {
- threads->last_match = th;
+ threads__set_last_match(threads, th);
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
@@ -477,7 +515,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* It is now in the rbtree, get a ref
*/
thread__get(th);
- threads->last_match = th;
+ threads__set_last_match(threads, th);
++threads->nr;
}
@@ -1635,7 +1673,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
struct threads *threads = machine__threads(machine, th->tid);
if (threads->last_match == th)
- threads->last_match = NULL;
+ threads__set_last_match(threads, NULL);
BUG_ON(refcount_read(&th->refcnt) == 0);
if (lock)
@@ -2272,6 +2310,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
const char *srcline = NULL;
+ u64 addr;
if (symbol_conf.hide_unresolved && entry->sym == NULL)
return 0;
@@ -2279,7 +2318,13 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
return 0;
- srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
+ /*
+ * Convert entry->ip from a virtual address to an offset in
+ * its corresponding binary.
+ */
+ addr = map__map_ip(entry->map, entry->ip);
+
+ srcline = callchain_srcline(entry->map, entry->sym, addr);
return callchain_cursor_append(cursor, entry->ip,
entry->map, entry->sym,
false, NULL, 0, 0, 0, srcline);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 1ddc3d1d0147..a28f9b5cc4ff 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -326,8 +326,8 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
if (raw)
s = (char *)pe->metric_name;
else {
- if (asprintf(&s, "%s\n\t[%s]",
- pe->metric_name, pe->desc) < 0)
+ if (asprintf(&s, "%s\n%*s%s]",
+ pe->metric_name, 8, "[", pe->desc) < 0)
return;
}
@@ -490,3 +490,25 @@ out:
metricgroup__free_egroups(&group_list);
return ret;
}
+
+bool metricgroup__has_metric(const char *metric)
+{
+ struct pmu_events_map *map = perf_pmu__find_map(NULL);
+ struct pmu_event *pe;
+ int i;
+
+ if (!map)
+ return false;
+
+ for (i = 0; ; i++) {
+ pe = &map->table[i];
+
+ if (!pe->name && !pe->metric_group && !pe->metric_name)
+ break;
+ if (!pe->metric_expr)
+ continue;
+ if (match_metric(pe->metric_name, metric))
+ return true;
+ }
+ return false;
+}
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index 06854e125ee7..8a155dba0581 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -28,4 +28,5 @@ int metricgroup__parse_groups(const struct option *opt,
struct rblist *metric_events);
void metricgroup__print(bool metrics, bool groups, char *filter, bool raw);
+bool metricgroup__has_metric(const char *metric);
#endif
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 3ba6a1742f91..afd68524ffa9 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -652,12 +652,6 @@ static int is_arm_pmu_core(const char *name)
if (stat(path, &st) == 0)
return 1;
- /* Look for cpu sysfs (specific to s390) */
- scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s",
- sysfs, name);
- if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5))
- return 1;
-
return 0;
}
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 594d14a02b67..99990f5f2512 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -913,11 +913,10 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
ratio = total / avg;
print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
- } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
- perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
+ } else if (perf_evsel__is_clock(evsel)) {
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
- avg / ratio);
+ avg / (ratio * evsel->scale));
else
print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 0ee7f568d60c..3393d7ee9401 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -38,6 +38,10 @@ static const char **syscalltbl_native = syscalltbl_powerpc_64;
#include <asm/syscalls_32.c>
const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID;
static const char **syscalltbl_native = syscalltbl_powerpc_32;
+#elif defined(__aarch64__)
+#include <asm/syscalls.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_arm64;
#endif
struct syscall {
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 538db4e5d1e6..6f318b15950e 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -77,7 +77,7 @@ static int entry(u64 ip, struct unwind_info *ui)
if (__report_module(&al, ip, ui))
return -1;
- e->ip = al.addr;
+ e->ip = ip;
e->map = al.map;
e->sym = al.sym;
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 6a11bc7e6b27..79f521a552cf 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -575,7 +575,7 @@ static int entry(u64 ip, struct thread *thread,
struct addr_location al;
e.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
- e.ip = al.addr;
+ e.ip = ip;
e.map = al.map;
pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index a362e3d7abc6..fff7fb1285fc 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -22,7 +22,8 @@ $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
- test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user
+ test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
+ test_socket_cookie test_cgroup_storage test_select_reuseport
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
@@ -33,7 +34,8 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \
test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \
test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
- get_cgroup_id_kern.o
+ get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
+ test_skb_cgroup_id_kern.o
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -44,10 +46,11 @@ TEST_PROGS := test_kmod.sh \
test_sock_addr.sh \
test_tunnel.sh \
test_lwt_seg6local.sh \
- test_lirc_mode2.sh
+ test_lirc_mode2.sh \
+ test_skb_cgroup_id.sh
# Compile but not part of 'make run_tests'
-TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr
+TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user
include ../lib.mk
@@ -58,11 +61,15 @@ $(TEST_GEN_PROGS): $(BPFOBJ)
$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a
$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
+$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_sock: cgroup_helpers.c
$(OUTPUT)/test_sock_addr: cgroup_helpers.c
+$(OUTPUT)/test_socket_cookie: cgroup_helpers.c
$(OUTPUT)/test_sockmap: cgroup_helpers.c
+$(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c
$(OUTPUT)/test_progs: trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
+$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
.PHONY: force
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 810de20e8e26..e4be7730222d 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -65,6 +65,8 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
(void *) BPF_FUNC_xdp_adjust_head;
static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
(void *) BPF_FUNC_xdp_adjust_meta;
+static int (*bpf_get_socket_cookie)(void *ctx) =
+ (void *) BPF_FUNC_get_socket_cookie;
static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) =
(void *) BPF_FUNC_setsockopt;
@@ -109,6 +111,8 @@ static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state,
int size, int flags) =
(void *) BPF_FUNC_skb_get_xfrm_state;
+static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) =
+ (void *) BPF_FUNC_sk_select_reuseport;
static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) =
(void *) BPF_FUNC_get_stack;
static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params,
@@ -133,6 +137,12 @@ static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol,
(void *) BPF_FUNC_rc_keydown;
static unsigned long long (*bpf_get_current_cgroup_id)(void) =
(void *) BPF_FUNC_get_current_cgroup_id;
+static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) =
+ (void *) BPF_FUNC_get_local_storage;
+static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
+ (void *) BPF_FUNC_skb_cgroup_id;
+static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
+ (void *) BPF_FUNC_skb_ancestor_cgroup_id;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
@@ -169,6 +179,8 @@ struct bpf_map_def {
static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
(void *) BPF_FUNC_skb_load_bytes;
+static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) =
+ (void *) BPF_FUNC_skb_load_bytes_relative;
static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
(void *) BPF_FUNC_skb_store_bytes;
static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index d0811b3d6a6f..315a44fa32af 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -44,4 +44,8 @@ static inline unsigned int bpf_num_possible_cpus(void)
name[bpf_num_possible_cpus()]
#define bpf_percpu(name, cpu) name[(cpu)].v
+#ifndef ARRAY_SIZE
+# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
#endif /* __BPF_UTIL__ */
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index c87b4e052ce9..cf16948aad4a 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -118,7 +118,7 @@ static int join_cgroup_from_top(char *cgroup_path)
*
* On success, it returns 0, otherwise on failure it returns 1.
*/
-int join_cgroup(char *path)
+int join_cgroup(const char *path)
{
char cgroup_path[PATH_MAX + 1];
@@ -158,7 +158,7 @@ void cleanup_cgroup_environment(void)
* On success, it returns the file descriptor. On failure it returns 0.
* If there is a failure, it prints the error to stderr.
*/
-int create_and_get_cgroup(char *path)
+int create_and_get_cgroup(const char *path)
{
char cgroup_path[PATH_MAX + 1];
int fd;
@@ -186,7 +186,7 @@ int create_and_get_cgroup(char *path)
* which is an invalid cgroup id.
* If there is a failure, it prints the error to stderr.
*/
-unsigned long long get_cgroup_id(char *path)
+unsigned long long get_cgroup_id(const char *path)
{
int dirfd, err, flags, mount_id, fhsize;
union {
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h
index 20a4a5dcd469..d64bb8957090 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.h
+++ b/tools/testing/selftests/bpf/cgroup_helpers.h
@@ -9,10 +9,10 @@
__FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
-int create_and_get_cgroup(char *path);
-int join_cgroup(char *path);
+int create_and_get_cgroup(const char *path);
+int join_cgroup(const char *path);
int setup_cgroup_environment(void);
void cleanup_cgroup_environment(void);
-unsigned long long get_cgroup_id(char *path);
+unsigned long long get_cgroup_id(const char *path);
#endif
diff --git a/tools/testing/selftests/bpf/socket_cookie_prog.c b/tools/testing/selftests/bpf/socket_cookie_prog.c
new file mode 100644
index 000000000000..9ff8ac4b0bf6
--- /dev/null
+++ b/tools/testing/selftests/bpf/socket_cookie_prog.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <linux/bpf.h>
+#include <sys/socket.h>
+
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+struct bpf_map_def SEC("maps") socket_cookies = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(__u64),
+ .value_size = sizeof(__u32),
+ .max_entries = 1 << 8,
+};
+
+SEC("cgroup/connect6")
+int set_cookie(struct bpf_sock_addr *ctx)
+{
+ __u32 cookie_value = 0xFF;
+ __u64 cookie_key;
+
+ if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
+ return 1;
+
+ cookie_key = bpf_get_socket_cookie(ctx);
+ if (bpf_map_update_elem(&socket_cookies, &cookie_key, &cookie_value, 0))
+ return 0;
+
+ return 1;
+}
+
+SEC("sockops")
+int update_cookie(struct bpf_sock_ops *ctx)
+{
+ __u32 new_cookie_value;
+ __u32 *cookie_value;
+ __u64 cookie_key;
+
+ if (ctx->family != AF_INET6)
+ return 1;
+
+ if (ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
+ return 1;
+
+ cookie_key = bpf_get_socket_cookie(ctx);
+
+ cookie_value = bpf_map_lookup_elem(&socket_cookies, &cookie_key);
+ if (!cookie_value)
+ return 1;
+
+ new_cookie_value = (ctx->local_port << 8) | *cookie_value;
+ bpf_map_update_elem(&socket_cookies, &cookie_key, &new_cookie_value, 0);
+
+ return 1;
+}
+
+int _version SEC("version") = 1;
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py
index 481dccdf140c..7f8200a8702b 100755
--- a/tools/testing/selftests/bpf/tcp_client.py
+++ b/tools/testing/selftests/bpf/tcp_client.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
#
@@ -9,11 +9,11 @@ import subprocess
import select
def read(sock, n):
- buf = ''
+ buf = b''
while len(buf) < n:
rem = n - len(buf)
try: s = sock.recv(rem)
- except (socket.error), e: return ''
+ except (socket.error) as e: return b''
buf += s
return buf
@@ -22,7 +22,7 @@ def send(sock, s):
count = 0
while count < total:
try: n = sock.send(s)
- except (socket.error), e: n = 0
+ except (socket.error) as e: n = 0
if n == 0:
return count;
count += n
@@ -39,10 +39,10 @@ try:
except socket.error as e:
sys.exit(1)
-buf = ''
+buf = b''
n = 0
while n < 1000:
- buf += '+'
+ buf += b'+'
n += 1
sock.settimeout(1);
diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py
index bc454d7d0be2..b39903fca4c8 100755
--- a/tools/testing/selftests/bpf/tcp_server.py
+++ b/tools/testing/selftests/bpf/tcp_server.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
#
@@ -9,11 +9,11 @@ import subprocess
import select
def read(sock, n):
- buf = ''
+ buf = b''
while len(buf) < n:
rem = n - len(buf)
try: s = sock.recv(rem)
- except (socket.error), e: return ''
+ except (socket.error) as e: return b''
buf += s
return buf
@@ -22,7 +22,7 @@ def send(sock, s):
count = 0
while count < total:
try: n = sock.send(s)
- except (socket.error), e: n = 0
+ except (socket.error) as e: n = 0
if n == 0:
return count;
count += n
@@ -43,7 +43,7 @@ host = socket.gethostname()
try: serverSocket.bind((host, 0))
except socket.error as msg:
- print 'bind fails: ', msg
+ print('bind fails: ' + str(msg))
sn = serverSocket.getsockname()
serverPort = sn[1]
@@ -51,10 +51,10 @@ serverPort = sn[1]
cmdStr = ("./tcp_client.py %d &") % (serverPort)
os.system(cmdStr)
-buf = ''
+buf = b''
n = 0
while n < 500:
- buf += '.'
+ buf += b'.'
n += 1
serverSocket.listen(MAX_PORTS)
@@ -79,5 +79,5 @@ while True:
serverSocket.close()
sys.exit(0)
else:
- print 'Select timeout!'
+ print('Select timeout!')
sys.exit(1)
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 6b1b302310fe..5f377ec53f2f 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -18,10 +18,7 @@
#include "../../../include/linux/filter.h"
#include "bpf_rlimit.h"
-
-#ifndef ARRAY_SIZE
-# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_util.h"
#define MAX_INSNS 512
#define MAX_MATCHES 16
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index ffdd27737c9e..6b5cfeb7a9cc 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -19,6 +19,7 @@
#include <bpf/btf.h>
#include "bpf_rlimit.h"
+#include "bpf_util.h"
static uint32_t pass_cnt;
static uint32_t error_cnt;
@@ -93,10 +94,6 @@ static int __base_pr(const char *format, ...)
#define MAX_NR_RAW_TYPES 1024
#define BTF_LOG_BUF_SIZE 65535
-#ifndef ARRAY_SIZE
-# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
static struct args {
unsigned int raw_test_num;
unsigned int file_test_num;
@@ -131,6 +128,8 @@ struct btf_raw_test {
__u32 max_entries;
bool btf_load_err;
bool map_create_err;
+ bool ordered_map;
+ bool lossless_map;
int hdr_len_delta;
int type_off_delta;
int str_off_delta;
@@ -2093,8 +2092,7 @@ struct pprint_mapv {
} aenum;
};
-static struct btf_raw_test pprint_test = {
- .descr = "BTF pretty print test #1",
+static struct btf_raw_test pprint_test_template = {
.raw_types = {
/* unsighed char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
@@ -2146,8 +2144,6 @@ static struct btf_raw_test pprint_test = {
},
.str_sec = "\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum",
.str_sec_size = sizeof("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
- .map_type = BPF_MAP_TYPE_ARRAY,
- .map_name = "pprint_test",
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
@@ -2155,6 +2151,40 @@ static struct btf_raw_test pprint_test = {
.max_entries = 128 * 1024,
};
+static struct btf_pprint_test_meta {
+ const char *descr;
+ enum bpf_map_type map_type;
+ const char *map_name;
+ bool ordered_map;
+ bool lossless_map;
+} pprint_tests_meta[] = {
+{
+ .descr = "BTF pretty print array",
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "pprint_test_array",
+ .ordered_map = true,
+ .lossless_map = true,
+},
+
+{
+ .descr = "BTF pretty print hash",
+ .map_type = BPF_MAP_TYPE_HASH,
+ .map_name = "pprint_test_hash",
+ .ordered_map = false,
+ .lossless_map = true,
+},
+
+{
+ .descr = "BTF pretty print lru hash",
+ .map_type = BPF_MAP_TYPE_LRU_HASH,
+ .map_name = "pprint_test_lru_hash",
+ .ordered_map = false,
+ .lossless_map = false,
+},
+
+};
+
+
static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i)
{
v->ui32 = i;
@@ -2166,10 +2196,12 @@ static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i)
v->aenum = i & 0x03;
}
-static int test_pprint(void)
+static int do_test_pprint(void)
{
- const struct btf_raw_test *test = &pprint_test;
+ const struct btf_raw_test *test = &pprint_test_template;
struct bpf_create_map_attr create_attr = {};
+ unsigned int key, nr_read_elems;
+ bool ordered_map, lossless_map;
int map_fd = -1, btf_fd = -1;
struct pprint_mapv mapv = {};
unsigned int raw_btf_size;
@@ -2178,7 +2210,6 @@ static int test_pprint(void)
char pin_path[255];
size_t line_len = 0;
char *line = NULL;
- unsigned int key;
uint8_t *raw_btf;
ssize_t nread;
int err, ret;
@@ -2251,14 +2282,18 @@ static int test_pprint(void)
goto done;
}
- key = 0;
+ nr_read_elems = 0;
+ ordered_map = test->ordered_map;
+ lossless_map = test->lossless_map;
do {
ssize_t nexpected_line;
+ unsigned int next_key;
- set_pprint_mapv(&mapv, key);
+ next_key = ordered_map ? nr_read_elems : atoi(line);
+ set_pprint_mapv(&mapv, next_key);
nexpected_line = snprintf(expected_line, sizeof(expected_line),
"%u: {%u,0,%d,0x%x,0x%x,0x%x,{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n",
- key,
+ next_key,
mapv.ui32, mapv.si32,
mapv.unused_bits2a, mapv.bits28, mapv.unused_bits2b,
mapv.ui64,
@@ -2281,11 +2316,12 @@ static int test_pprint(void)
}
nread = getline(&line, &line_len, pin_file);
- } while (++key < test->max_entries && nread > 0);
+ } while (++nr_read_elems < test->max_entries && nread > 0);
- if (CHECK(key < test->max_entries,
- "Unexpected EOF. key:%u test->max_entries:%u",
- key, test->max_entries)) {
+ if (lossless_map &&
+ CHECK(nr_read_elems < test->max_entries,
+ "Unexpected EOF. nr_read_elems:%u test->max_entries:%u",
+ nr_read_elems, test->max_entries)) {
err = -1;
goto done;
}
@@ -2314,6 +2350,24 @@ done:
return err;
}
+static int test_pprint(void)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) {
+ pprint_test_template.descr = pprint_tests_meta[i].descr;
+ pprint_test_template.map_type = pprint_tests_meta[i].map_type;
+ pprint_test_template.map_name = pprint_tests_meta[i].map_name;
+ pprint_test_template.ordered_map = pprint_tests_meta[i].ordered_map;
+ pprint_test_template.lossless_map = pprint_tests_meta[i].lossless_map;
+
+ err |= count_result(do_test_pprint());
+ }
+
+ return err;
+}
+
static void usage(const char *cmd)
{
fprintf(stderr, "Usage: %s [-l] [[-r test_num (1 - %zu)] | [-g test_num (1 - %zu)] | [-f test_num (1 - %zu)] | [-p]]\n",
@@ -2409,7 +2463,7 @@ int main(int argc, char **argv)
err |= test_file();
if (args.pprint_test)
- err |= count_result(test_pprint());
+ err |= test_pprint();
if (args.raw_test || args.get_info_test || args.file_test ||
args.pprint_test)
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
new file mode 100644
index 000000000000..dc83fb2d3f27
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <assert.h>
+#include <bpf/bpf.h>
+#include <linux/filter.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "cgroup_helpers.h"
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/"
+
+int main(int argc, char **argv)
+{
+ struct bpf_insn prog[] = {
+ BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
+ BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+ int error = EXIT_FAILURE;
+ int map_fd, prog_fd, cgroup_fd;
+ struct bpf_cgroup_storage_key key;
+ unsigned long long value;
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, sizeof(key),
+ sizeof(value), 0, 0);
+ if (map_fd < 0) {
+ printf("Failed to create map: %s\n", strerror(errno));
+ goto out;
+ }
+
+ prog[0].imm = map_fd;
+ prog_fd = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ prog, insns_cnt, "GPL", 0,
+ bpf_log_buf, BPF_LOG_BUF_SIZE);
+ if (prog_fd < 0) {
+ printf("Failed to load bpf program: %s\n", bpf_log_buf);
+ goto out;
+ }
+
+ if (setup_cgroup_environment()) {
+ printf("Failed to setup cgroup environment\n");
+ goto err;
+ }
+
+ /* Create a cgroup, get fd, and join it */
+ cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
+ if (!cgroup_fd) {
+ printf("Failed to create test cgroup\n");
+ goto err;
+ }
+
+ if (join_cgroup(TEST_CGROUP)) {
+ printf("Failed to join cgroup\n");
+ goto err;
+ }
+
+ /* Attach the bpf program */
+ if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) {
+ printf("Failed to attach bpf program\n");
+ goto err;
+ }
+
+ if (bpf_map_get_next_key(map_fd, NULL, &key)) {
+ printf("Failed to get the first key in cgroup storage\n");
+ goto err;
+ }
+
+ if (bpf_map_lookup_elem(map_fd, &key, &value)) {
+ printf("Failed to lookup cgroup storage\n");
+ goto err;
+ }
+
+ /* Every second packet should be dropped */
+ assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
+ assert(system("ping localhost -c 1 -W 1 -q > /dev/null"));
+ assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
+
+ /* Check the counter in the cgroup local storage */
+ if (bpf_map_lookup_elem(map_fd, &key, &value)) {
+ printf("Failed to lookup cgroup storage\n");
+ goto err;
+ }
+
+ if (value != 3) {
+ printf("Unexpected data in the cgroup storage: %llu\n", value);
+ goto err;
+ }
+
+ /* Bump the counter in the cgroup local storage */
+ value++;
+ if (bpf_map_update_elem(map_fd, &key, &value, 0)) {
+ printf("Failed to update the data in the cgroup storage\n");
+ goto err;
+ }
+
+ /* Every second packet should be dropped */
+ assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
+ assert(system("ping localhost -c 1 -W 1 -q > /dev/null"));
+ assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
+
+ /* Check the final value of the counter in the cgroup local storage */
+ if (bpf_map_lookup_elem(map_fd, &key, &value)) {
+ printf("Failed to lookup the cgroup storage\n");
+ goto err;
+ }
+
+ if (value != 7) {
+ printf("Unexpected data in the cgroup storage: %llu\n", value);
+ goto err;
+ }
+
+ error = 0;
+ printf("test_cgroup_storage:PASS\n");
+
+err:
+ cleanup_cgroup_environment();
+
+out:
+ return error;
+}
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 6c253343a6f9..6f54f84144a0 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -17,7 +17,8 @@
#include <stdlib.h>
#include <sys/wait.h>
-
+#include <sys/socket.h>
+#include <netinet/in.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
@@ -26,8 +27,21 @@
#include "bpf_util.h"
#include "bpf_rlimit.h"
+#ifndef ENOTSUPP
+#define ENOTSUPP 524
+#endif
+
static int map_flags;
+#define CHECK(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
+ printf(format); \
+ exit(-1); \
+ } \
+})
+
static void test_hashmap(int task, void *data)
{
long long key, next_key, first_key, value;
@@ -1150,6 +1164,250 @@ static void test_map_wronly(void)
assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == EPERM);
}
+static void prepare_reuseport_grp(int type, int map_fd,
+ __s64 *fds64, __u64 *sk_cookies,
+ unsigned int n)
+{
+ socklen_t optlen, addrlen;
+ struct sockaddr_in6 s6;
+ const __u32 index0 = 0;
+ const int optval = 1;
+ unsigned int i;
+ u64 sk_cookie;
+ __s64 fd64;
+ int err;
+
+ s6.sin6_family = AF_INET6;
+ s6.sin6_addr = in6addr_any;
+ s6.sin6_port = 0;
+ addrlen = sizeof(s6);
+ optlen = sizeof(sk_cookie);
+
+ for (i = 0; i < n; i++) {
+ fd64 = socket(AF_INET6, type, 0);
+ CHECK(fd64 == -1, "socket()",
+ "sock_type:%d fd64:%lld errno:%d\n",
+ type, fd64, errno);
+
+ err = setsockopt(fd64, SOL_SOCKET, SO_REUSEPORT,
+ &optval, sizeof(optval));
+ CHECK(err == -1, "setsockopt(SO_REUSEPORT)",
+ "err:%d errno:%d\n", err, errno);
+
+ /* reuseport_array does not allow unbound sk */
+ err = bpf_map_update_elem(map_fd, &index0, &fd64,
+ BPF_ANY);
+ CHECK(err != -1 || errno != EINVAL,
+ "reuseport array update unbound sk",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+
+ err = bind(fd64, (struct sockaddr *)&s6, sizeof(s6));
+ CHECK(err == -1, "bind()",
+ "sock_type:%d err:%d errno:%d\n", type, err, errno);
+
+ if (i == 0) {
+ err = getsockname(fd64, (struct sockaddr *)&s6,
+ &addrlen);
+ CHECK(err == -1, "getsockname()",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+ }
+
+ err = getsockopt(fd64, SOL_SOCKET, SO_COOKIE, &sk_cookie,
+ &optlen);
+ CHECK(err == -1, "getsockopt(SO_COOKIE)",
+ "sock_type:%d err:%d errno:%d\n", type, err, errno);
+
+ if (type == SOCK_STREAM) {
+ /*
+ * reuseport_array does not allow
+ * non-listening tcp sk.
+ */
+ err = bpf_map_update_elem(map_fd, &index0, &fd64,
+ BPF_ANY);
+ CHECK(err != -1 || errno != EINVAL,
+ "reuseport array update non-listening sk",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+ err = listen(fd64, 0);
+ CHECK(err == -1, "listen()",
+ "sock_type:%d, err:%d errno:%d\n",
+ type, err, errno);
+ }
+
+ fds64[i] = fd64;
+ sk_cookies[i] = sk_cookie;
+ }
+}
+
+static void test_reuseport_array(void)
+{
+#define REUSEPORT_FD_IDX(err, last) ({ (err) ? last : !last; })
+
+ const __u32 array_size = 4, index0 = 0, index3 = 3;
+ int types[2] = { SOCK_STREAM, SOCK_DGRAM }, type;
+ __u64 grpa_cookies[2], sk_cookie, map_cookie;
+ __s64 grpa_fds64[2] = { -1, -1 }, fd64 = -1;
+ const __u32 bad_index = array_size;
+ int map_fd, err, t, f;
+ __u32 fds_idx = 0;
+ int fd;
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
+ sizeof(__u32), sizeof(__u64), array_size, 0);
+ CHECK(map_fd == -1, "reuseport array create",
+ "map_fd:%d, errno:%d\n", map_fd, errno);
+
+ /* Test lookup/update/delete with invalid index */
+ err = bpf_map_delete_elem(map_fd, &bad_index);
+ CHECK(err != -1 || errno != E2BIG, "reuseport array del >=max_entries",
+ "err:%d errno:%d\n", err, errno);
+
+ err = bpf_map_update_elem(map_fd, &bad_index, &fd64, BPF_ANY);
+ CHECK(err != -1 || errno != E2BIG,
+ "reuseport array update >=max_entries",
+ "err:%d errno:%d\n", err, errno);
+
+ err = bpf_map_lookup_elem(map_fd, &bad_index, &map_cookie);
+ CHECK(err != -1 || errno != ENOENT,
+ "reuseport array update >=max_entries",
+ "err:%d errno:%d\n", err, errno);
+
+ /* Test lookup/delete non existence elem */
+ err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
+ CHECK(err != -1 || errno != ENOENT,
+ "reuseport array lookup not-exist elem",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_delete_elem(map_fd, &index3);
+ CHECK(err != -1 || errno != ENOENT,
+ "reuseport array del not-exist elem",
+ "err:%d errno:%d\n", err, errno);
+
+ for (t = 0; t < ARRAY_SIZE(types); t++) {
+ type = types[t];
+
+ prepare_reuseport_grp(type, map_fd, grpa_fds64,
+ grpa_cookies, ARRAY_SIZE(grpa_fds64));
+
+ /* Test BPF_* update flags */
+ /* BPF_EXIST failure case */
+ err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
+ BPF_EXIST);
+ CHECK(err != -1 || errno != ENOENT,
+ "reuseport array update empty elem BPF_EXIST",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+ fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
+
+ /* BPF_NOEXIST success case */
+ err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
+ BPF_NOEXIST);
+ CHECK(err == -1,
+ "reuseport array update empty elem BPF_NOEXIST",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+ fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
+
+ /* BPF_EXIST success case. */
+ err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
+ BPF_EXIST);
+ CHECK(err == -1,
+ "reuseport array update same elem BPF_EXIST",
+ "sock_type:%d err:%d errno:%d\n", type, err, errno);
+ fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
+
+ /* BPF_NOEXIST failure case */
+ err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
+ BPF_NOEXIST);
+ CHECK(err != -1 || errno != EEXIST,
+ "reuseport array update non-empty elem BPF_NOEXIST",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+ fds_idx = REUSEPORT_FD_IDX(err, fds_idx);
+
+ /* BPF_ANY case (always succeed) */
+ err = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],
+ BPF_ANY);
+ CHECK(err == -1,
+ "reuseport array update same sk with BPF_ANY",
+ "sock_type:%d err:%d errno:%d\n", type, err, errno);
+
+ fd64 = grpa_fds64[fds_idx];
+ sk_cookie = grpa_cookies[fds_idx];
+
+ /* The same sk cannot be added to reuseport_array twice */
+ err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_ANY);
+ CHECK(err != -1 || errno != EBUSY,
+ "reuseport array update same sk with same index",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+
+ err = bpf_map_update_elem(map_fd, &index0, &fd64, BPF_ANY);
+ CHECK(err != -1 || errno != EBUSY,
+ "reuseport array update same sk with different index",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+
+ /* Test delete elem */
+ err = bpf_map_delete_elem(map_fd, &index3);
+ CHECK(err == -1, "reuseport array delete sk",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+
+ /* Add it back with BPF_NOEXIST */
+ err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);
+ CHECK(err == -1,
+ "reuseport array re-add with BPF_NOEXIST after del",
+ "sock_type:%d err:%d errno:%d\n", type, err, errno);
+
+ /* Test cookie */
+ err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
+ CHECK(err == -1 || sk_cookie != map_cookie,
+ "reuseport array lookup re-added sk",
+ "sock_type:%d err:%d errno:%d sk_cookie:0x%llx map_cookie:0x%llxn",
+ type, err, errno, sk_cookie, map_cookie);
+
+ /* Test elem removed by close() */
+ for (f = 0; f < ARRAY_SIZE(grpa_fds64); f++)
+ close(grpa_fds64[f]);
+ err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
+ CHECK(err != -1 || errno != ENOENT,
+ "reuseport array lookup after close()",
+ "sock_type:%d err:%d errno:%d\n",
+ type, err, errno);
+ }
+
+ /* Test SOCK_RAW */
+ fd64 = socket(AF_INET6, SOCK_RAW, IPPROTO_UDP);
+ CHECK(fd64 == -1, "socket(SOCK_RAW)", "err:%d errno:%d\n",
+ err, errno);
+ err = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);
+ CHECK(err != -1 || errno != ENOTSUPP, "reuseport array update SOCK_RAW",
+ "err:%d errno:%d\n", err, errno);
+ close(fd64);
+
+ /* Close the 64 bit value map */
+ close(map_fd);
+
+ /* Test 32 bit fd */
+ map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
+ sizeof(__u32), sizeof(__u32), array_size, 0);
+ CHECK(map_fd == -1, "reuseport array create",
+ "map_fd:%d, errno:%d\n", map_fd, errno);
+ prepare_reuseport_grp(SOCK_STREAM, map_fd, &fd64, &sk_cookie, 1);
+ fd = fd64;
+ err = bpf_map_update_elem(map_fd, &index3, &fd, BPF_NOEXIST);
+ CHECK(err == -1, "reuseport array update 32 bit fd",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);
+ CHECK(err != -1 || errno != ENOSPC,
+ "reuseport array lookup 32 bit fd",
+ "err:%d errno:%d\n", err, errno);
+ close(fd);
+ close(map_fd);
+}
+
static void run_all_tests(void)
{
test_hashmap(0, NULL);
@@ -1170,6 +1428,8 @@ static void run_all_tests(void)
test_map_rdonly();
test_map_wronly();
+
+ test_reuseport_array();
}
int main(void)
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index be800d0e7a84..d59642e70f56 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -158,8 +158,9 @@ def tool(name, args, flags, JSON=True, ns="", fail=True, include_stderr=False):
else:
return ret, out
-def bpftool(args, JSON=True, ns="", fail=True):
- return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail)
+def bpftool(args, JSON=True, ns="", fail=True, include_stderr=False):
+ return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns,
+ fail=fail, include_stderr=include_stderr)
def bpftool_prog_list(expected=None, ns=""):
_, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
@@ -201,6 +202,21 @@ def bpftool_map_list_wait(expected=0, n_retry=20):
time.sleep(0.05)
raise Exception("Time out waiting for map counts to stabilize want %d, have %d" % (expected, nmaps))
+def bpftool_prog_load(sample, file_name, maps=[], prog_type="xdp", dev=None,
+ fail=True, include_stderr=False):
+ args = "prog load %s %s" % (os.path.join(bpf_test_dir, sample), file_name)
+ if prog_type is not None:
+ args += " type " + prog_type
+ if dev is not None:
+ args += " dev " + dev
+ if len(maps):
+ args += " map " + " map ".join(maps)
+
+ res = bpftool(args, fail=fail, include_stderr=include_stderr)
+ if res[0] == 0:
+ files.append(file_name)
+ return res
+
def ip(args, force=False, JSON=True, ns="", fail=True, include_stderr=False):
if force:
args = "-force " + args
@@ -307,21 +323,25 @@ class NetdevSim:
Class for netdevsim netdevice and its attributes.
"""
- def __init__(self):
+ def __init__(self, link=None):
+ self.link = link
+
self.dev = self._netdevsim_create()
devs.append(self)
self.ns = ""
self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname'])
+ self.sdev_dir = self.dfs_dir + '/sdev/'
self.dfs_refresh()
def __getitem__(self, key):
return self.dev[key]
def _netdevsim_create(self):
+ link = "" if self.link is None else "link " + self.link.dev['ifname']
_, old = ip("link show")
- ip("link add sim%d type netdevsim")
+ ip("link add sim%d {link} type netdevsim".format(link=link))
_, new = ip("link show")
for dev in new:
@@ -339,13 +359,18 @@ class NetdevSim:
self.dfs = DebugfsDir(self.dfs_dir)
return self.dfs
+ def dfs_read(self, f):
+ path = os.path.join(self.dfs_dir, f)
+ _, data = cmd('cat %s' % (path))
+ return data.strip()
+
def dfs_num_bound_progs(self):
- path = os.path.join(self.dfs_dir, "bpf_bound_progs")
+ path = os.path.join(self.sdev_dir, "bpf_bound_progs")
_, progs = cmd('ls %s' % (path))
return len(progs.split())
def dfs_get_bound_progs(self, expected):
- progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs"))
+ progs = DebugfsDir(os.path.join(self.sdev_dir, "bpf_bound_progs"))
if expected is not None:
if len(progs) != expected:
fail(True, "%d BPF programs bound, expected %d" %
@@ -547,11 +572,11 @@ def check_extack(output, reference, args):
if skip_extack:
return
lines = output.split("\n")
- comp = len(lines) >= 2 and lines[1] == reference
+ comp = len(lines) >= 2 and lines[1] == 'Error: ' + reference
fail(not comp, "Missing or incorrect netlink extack message")
def check_extack_nsim(output, reference, args):
- check_extack(output, "Error: netdevsim: " + reference, args)
+ check_extack(output, "netdevsim: " + reference, args)
def check_no_extack(res, needle):
fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"),
@@ -654,7 +679,7 @@ try:
ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
fail=False, include_stderr=True)
fail(ret == 0, "TC filter loaded without enabling TC offloads")
- check_extack(err, "Error: TC offload is disabled on net device.", args)
+ check_extack(err, "TC offload is disabled on net device.", args)
sim.wait_for_flush()
sim.set_ethtool_tc_offloads(True)
@@ -694,7 +719,7 @@ try:
skip_sw=True,
fail=False, include_stderr=True)
fail(ret == 0, "Offloaded a filter to chain other than 0")
- check_extack(err, "Error: Driver supports only offload of chain 0.", args)
+ check_extack(err, "Driver supports only offload of chain 0.", args)
sim.tc_flush_filters()
start_test("Test TC replace...")
@@ -814,24 +839,20 @@ try:
"Device parameters reported for non-offloaded program")
start_test("Test XDP prog replace with bad flags...")
- ret, _, err = sim.set_xdp(obj, "offload", force=True,
+ ret, _, err = sim.set_xdp(obj, "generic", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
- check_extack_nsim(err, "program loaded with different flags.", args)
+ fail(err.count("File exists") != 1, "Replaced driver XDP with generic")
ret, _, err = sim.set_xdp(obj, "", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
- check_extack_nsim(err, "program loaded with different flags.", args)
+ check_extack(err, "program loaded with different flags.", args)
start_test("Test XDP prog remove with bad flags...")
- ret, _, err = sim.unset_xdp("offload", force=True,
- fail=False, include_stderr=True)
- fail(ret == 0, "Removed program with a bad mode mode")
- check_extack_nsim(err, "program loaded with different flags.", args)
ret, _, err = sim.unset_xdp("", force=True,
fail=False, include_stderr=True)
- fail(ret == 0, "Removed program with a bad mode mode")
- check_extack_nsim(err, "program loaded with different flags.", args)
+ fail(ret == 0, "Removed program with a bad mode")
+ check_extack(err, "program loaded with different flags.", args)
start_test("Test MTU restrictions...")
ret, _ = sim.set_mtu(9000, fail=False)
@@ -846,6 +867,25 @@ try:
sim.set_mtu(1500)
sim.wait_for_flush()
+ start_test("Test non-offload XDP attaching to HW...")
+ bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/nooffload")
+ nooffload = bpf_pinned("/sys/fs/bpf/nooffload")
+ ret, _, err = sim.set_xdp(nooffload, "offload",
+ fail=False, include_stderr=True)
+ fail(ret == 0, "attached non-offloaded XDP program to HW")
+ check_extack_nsim(err, "xdpoffload of non-bound program.", args)
+ rm("/sys/fs/bpf/nooffload")
+
+ start_test("Test offload XDP attaching to drv...")
+ bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload",
+ dev=sim['ifname'])
+ offload = bpf_pinned("/sys/fs/bpf/offload")
+ ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True)
+ fail(ret == 0, "attached offloaded XDP program to drv")
+ check_extack(err, "using device-bound program without HW_MODE flag is not supported.", args)
+ rm("/sys/fs/bpf/offload")
+ sim.wait_for_flush()
+
start_test("Test XDP offload...")
_, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True)
ipl = sim.ip_link_show(xdp=True)
@@ -891,6 +931,60 @@ try:
rm(pin_file)
bpftool_prog_list_wait(expected=0)
+ start_test("Test multi-attachment XDP - attach...")
+ sim.set_xdp(obj, "offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+ fail("prog" not in xdp, "Base program not reported in single program mode")
+ fail(len(ipl["xdp"]["attached"]) != 1,
+ "Wrong attached program count with one program")
+
+ sim.set_xdp(obj, "")
+ two_xdps = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded2 = sim.dfs_read("bpf_offloaded_id")
+
+ fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
+ fail("prog" in two_xdps, "Base program reported in multi program mode")
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after driver activated")
+ fail(len(two_xdps["attached"]) != 2,
+ "Wrong attached program count with two programs")
+ fail(two_xdps["attached"][0]["prog"]["id"] ==
+ two_xdps["attached"][1]["prog"]["id"],
+ "offloaded and drv programs have the same id")
+ fail(offloaded != offloaded2,
+ "offload ID changed after loading driver program")
+
+ start_test("Test multi-attachment XDP - replace...")
+ ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
+ fail(err.count("busy") != 1, "Replaced one of programs without -force")
+
+ start_test("Test multi-attachment XDP - detach...")
+ ret, _, err = sim.unset_xdp("drv", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program with a bad mode")
+ check_extack(err, "program loaded with different flags.", args)
+
+ sim.unset_xdp("offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+
+ fail(xdp["mode"] != 1, "Bad mode reported after multiple programs")
+ fail("prog" not in xdp,
+ "Base program not reported after multi program mode")
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after driver activated")
+ fail(len(ipl["xdp"]["attached"]) != 1,
+ "Wrong attached program count with remaining programs")
+ fail(offloaded != "0", "offload ID reported with only driver program left")
+
+ start_test("Test multi-attachment XDP - device remove...")
+ sim.set_xdp(obj, "offload")
+ sim.remove()
+
+ sim = NetdevSim()
+ sim.set_ethtool_tc_offloads(True)
+
start_test("Test mixing of TC and XDP...")
sim.tc_add_ingress()
sim.set_xdp(obj, "offload")
@@ -1085,6 +1179,106 @@ try:
fail(ret == 0,
"netdevsim didn't refuse to create a map with offload disabled")
+ sim.remove()
+
+ start_test("Test multi-dev ASIC program reuse...")
+ simA = NetdevSim()
+ simB1 = NetdevSim()
+ simB2 = NetdevSim(link=simB1)
+ simB3 = NetdevSim(link=simB1)
+ sims = (simA, simB1, simB2, simB3)
+ simB = (simB1, simB2, simB3)
+
+ bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA",
+ dev=simA['ifname'])
+ progA = bpf_pinned("/sys/fs/bpf/nsimA")
+ bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB",
+ dev=simB1['ifname'])
+ progB = bpf_pinned("/sys/fs/bpf/nsimB")
+
+ simA.set_xdp(progA, "offload", JSON=False)
+ for d in simB:
+ d.set_xdp(progB, "offload", JSON=False)
+
+ start_test("Test multi-dev ASIC cross-dev replace...")
+ ret, _ = simA.set_xdp(progB, "offload", force=True, JSON=False, fail=False)
+ fail(ret == 0, "cross-ASIC program allowed")
+ for d in simB:
+ ret, _ = d.set_xdp(progA, "offload", force=True, JSON=False, fail=False)
+ fail(ret == 0, "cross-ASIC program allowed")
+
+ start_test("Test multi-dev ASIC cross-dev install...")
+ for d in sims:
+ d.unset_xdp("offload")
+
+ ret, _, err = simA.set_xdp(progB, "offload", force=True, JSON=False,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "cross-ASIC program allowed")
+ check_extack_nsim(err, "program bound to different dev.", args)
+ for d in simB:
+ ret, _, err = d.set_xdp(progA, "offload", force=True, JSON=False,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "cross-ASIC program allowed")
+ check_extack_nsim(err, "program bound to different dev.", args)
+
+ start_test("Test multi-dev ASIC cross-dev map reuse...")
+
+ mapA = bpftool("prog show %s" % (progA))[1]["map_ids"][0]
+ mapB = bpftool("prog show %s" % (progB))[1]["map_ids"][0]
+
+ ret, _ = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_",
+ dev=simB3['ifname'],
+ maps=["idx 0 id %d" % (mapB)],
+ fail=False)
+ fail(ret != 0, "couldn't reuse a map on the same ASIC")
+ rm("/sys/fs/bpf/nsimB_")
+
+ ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA_",
+ dev=simA['ifname'],
+ maps=["idx 0 id %d" % (mapB)],
+ fail=False, include_stderr=True)
+ fail(ret == 0, "could reuse a map on a different ASIC")
+ fail(err.count("offload device mismatch between prog and map") == 0,
+ "error message missing for cross-ASIC map")
+
+ ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_",
+ dev=simB1['ifname'],
+ maps=["idx 0 id %d" % (mapA)],
+ fail=False, include_stderr=True)
+ fail(ret == 0, "could reuse a map on a different ASIC")
+ fail(err.count("offload device mismatch between prog and map") == 0,
+ "error message missing for cross-ASIC map")
+
+ start_test("Test multi-dev ASIC cross-dev destruction...")
+ bpftool_prog_list_wait(expected=2)
+
+ simA.remove()
+ bpftool_prog_list_wait(expected=1)
+
+ ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
+ fail(ifnameB != simB1['ifname'], "program not bound to originial device")
+ simB1.remove()
+ bpftool_prog_list_wait(expected=1)
+
+ start_test("Test multi-dev ASIC cross-dev destruction - move...")
+ ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
+ fail(ifnameB not in (simB2['ifname'], simB3['ifname']),
+ "program not bound to remaining devices")
+
+ simB2.remove()
+ ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
+ fail(ifnameB != simB3['ifname'], "program not bound to remaining device")
+
+ simB3.remove()
+ bpftool_prog_list_wait(expected=0)
+
+ start_test("Test multi-dev ASIC cross-dev destruction - orphaned...")
+ ret, out = bpftool("prog show %s" % (progB), fail=False)
+ fail(ret == 0, "got information about orphaned program")
+ fail("error" not in out, "no error reported for get info on orphaned")
+ fail(out["error"] != "can't get prog info: No such device",
+ "wrong error for get info on orphaned")
+
print("%s: OK" % (os.path.basename(__file__)))
finally:
diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c
new file mode 100644
index 000000000000..75646d9b34aa
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_select_reuseport.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Facebook */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <sys/types.h>
+#include <sys/epoll.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_rlimit.h"
+#include "bpf_util.h"
+#include "test_select_reuseport_common.h"
+
+#define MIN_TCPHDR_LEN 20
+#define UDPHDR_LEN 8
+
+#define TCP_SYNCOOKIE_SYSCTL "/proc/sys/net/ipv4/tcp_syncookies"
+#define TCP_FO_SYSCTL "/proc/sys/net/ipv4/tcp_fastopen"
+#define REUSEPORT_ARRAY_SIZE 32
+
+static int result_map, tmp_index_ovr_map, linum_map, data_check_map;
+static enum result expected_results[NR_RESULTS];
+static int sk_fds[REUSEPORT_ARRAY_SIZE];
+static int reuseport_array, outer_map;
+static int select_by_skb_data_prog;
+static int saved_tcp_syncookie;
+static struct bpf_object *obj;
+static int saved_tcp_fo;
+static __u32 index_zero;
+static int epfd;
+
+static union sa46 {
+ struct sockaddr_in6 v6;
+ struct sockaddr_in v4;
+ sa_family_t family;
+} srv_sa;
+
+#define CHECK(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
+ printf(format); \
+ exit(-1); \
+ } \
+})
+
+static void create_maps(void)
+{
+ struct bpf_create_map_attr attr = {};
+
+ /* Creating reuseport_array */
+ attr.name = "reuseport_array";
+ attr.map_type = BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
+ attr.key_size = sizeof(__u32);
+ attr.value_size = sizeof(__u32);
+ attr.max_entries = REUSEPORT_ARRAY_SIZE;
+
+ reuseport_array = bpf_create_map_xattr(&attr);
+ CHECK(reuseport_array == -1, "creating reuseport_array",
+ "reuseport_array:%d errno:%d\n", reuseport_array, errno);
+
+ /* Creating outer_map */
+ attr.name = "outer_map";
+ attr.map_type = BPF_MAP_TYPE_ARRAY_OF_MAPS;
+ attr.key_size = sizeof(__u32);
+ attr.value_size = sizeof(__u32);
+ attr.max_entries = 1;
+ attr.inner_map_fd = reuseport_array;
+ outer_map = bpf_create_map_xattr(&attr);
+ CHECK(outer_map == -1, "creating outer_map",
+ "outer_map:%d errno:%d\n", outer_map, errno);
+}
+
+static void prepare_bpf_obj(void)
+{
+ struct bpf_program *prog;
+ struct bpf_map *map;
+ int err;
+ struct bpf_object_open_attr attr = {
+ .file = "test_select_reuseport_kern.o",
+ .prog_type = BPF_PROG_TYPE_SK_REUSEPORT,
+ };
+
+ obj = bpf_object__open_xattr(&attr);
+ CHECK(IS_ERR_OR_NULL(obj), "open test_select_reuseport_kern.o",
+ "obj:%p PTR_ERR(obj):%ld\n", obj, PTR_ERR(obj));
+
+ prog = bpf_program__next(NULL, obj);
+ CHECK(!prog, "get first bpf_program", "!prog\n");
+ bpf_program__set_type(prog, attr.prog_type);
+
+ map = bpf_object__find_map_by_name(obj, "outer_map");
+ CHECK(!map, "find outer_map", "!map\n");
+ err = bpf_map__reuse_fd(map, outer_map);
+ CHECK(err, "reuse outer_map", "err:%d\n", err);
+
+ err = bpf_object__load(obj);
+ CHECK(err, "load bpf_object", "err:%d\n", err);
+
+ select_by_skb_data_prog = bpf_program__fd(prog);
+ CHECK(select_by_skb_data_prog == -1, "get prog fd",
+ "select_by_skb_data_prog:%d\n", select_by_skb_data_prog);
+
+ map = bpf_object__find_map_by_name(obj, "result_map");
+ CHECK(!map, "find result_map", "!map\n");
+ result_map = bpf_map__fd(map);
+ CHECK(result_map == -1, "get result_map fd",
+ "result_map:%d\n", result_map);
+
+ map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map");
+ CHECK(!map, "find tmp_index_ovr_map", "!map\n");
+ tmp_index_ovr_map = bpf_map__fd(map);
+ CHECK(tmp_index_ovr_map == -1, "get tmp_index_ovr_map fd",
+ "tmp_index_ovr_map:%d\n", tmp_index_ovr_map);
+
+ map = bpf_object__find_map_by_name(obj, "linum_map");
+ CHECK(!map, "find linum_map", "!map\n");
+ linum_map = bpf_map__fd(map);
+ CHECK(linum_map == -1, "get linum_map fd",
+ "linum_map:%d\n", linum_map);
+
+ map = bpf_object__find_map_by_name(obj, "data_check_map");
+ CHECK(!map, "find data_check_map", "!map\n");
+ data_check_map = bpf_map__fd(map);
+ CHECK(data_check_map == -1, "get data_check_map fd",
+ "data_check_map:%d\n", data_check_map);
+}
+
+static void sa46_init_loopback(union sa46 *sa, sa_family_t family)
+{
+ memset(sa, 0, sizeof(*sa));
+ sa->family = family;
+ if (sa->family == AF_INET6)
+ sa->v6.sin6_addr = in6addr_loopback;
+ else
+ sa->v4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+}
+
+static void sa46_init_inany(union sa46 *sa, sa_family_t family)
+{
+ memset(sa, 0, sizeof(*sa));
+ sa->family = family;
+ if (sa->family == AF_INET6)
+ sa->v6.sin6_addr = in6addr_any;
+ else
+ sa->v4.sin_addr.s_addr = INADDR_ANY;
+}
+
+static int read_int_sysctl(const char *sysctl)
+{
+ char buf[16];
+ int fd, ret;
+
+ fd = open(sysctl, 0);
+ CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n",
+ sysctl, fd, errno);
+
+ ret = read(fd, buf, sizeof(buf));
+ CHECK(ret <= 0, "read(sysctl)", "sysctl:%s ret:%d errno:%d\n",
+ sysctl, ret, errno);
+ close(fd);
+
+ return atoi(buf);
+}
+
+static void write_int_sysctl(const char *sysctl, int v)
+{
+ int fd, ret, size;
+ char buf[16];
+
+ fd = open(sysctl, O_RDWR);
+ CHECK(fd == -1, "open(sysctl)", "sysctl:%s fd:%d errno:%d\n",
+ sysctl, fd, errno);
+
+ size = snprintf(buf, sizeof(buf), "%d", v);
+ ret = write(fd, buf, size);
+ CHECK(ret != size, "write(sysctl)",
+ "sysctl:%s ret:%d size:%d errno:%d\n", sysctl, ret, size, errno);
+ close(fd);
+}
+
+static void restore_sysctls(void)
+{
+ write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo);
+ write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie);
+}
+
+static void enable_fastopen(void)
+{
+ int fo;
+
+ fo = read_int_sysctl(TCP_FO_SYSCTL);
+ write_int_sysctl(TCP_FO_SYSCTL, fo | 7);
+}
+
+static void enable_syncookie(void)
+{
+ write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 2);
+}
+
+static void disable_syncookie(void)
+{
+ write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 0);
+}
+
+static __u32 get_linum(void)
+{
+ __u32 linum;
+ int err;
+
+ err = bpf_map_lookup_elem(linum_map, &index_zero, &linum);
+ CHECK(err == -1, "lookup_elem(linum_map)", "err:%d errno:%d\n",
+ err, errno);
+
+ return linum;
+}
+
+static void check_data(int type, sa_family_t family, const struct cmd *cmd,
+ int cli_fd)
+{
+ struct data_check expected = {}, result;
+ union sa46 cli_sa;
+ socklen_t addrlen;
+ int err;
+
+ addrlen = sizeof(cli_sa);
+ err = getsockname(cli_fd, (struct sockaddr *)&cli_sa,
+ &addrlen);
+ CHECK(err == -1, "getsockname(cli_fd)", "err:%d errno:%d\n",
+ err, errno);
+
+ err = bpf_map_lookup_elem(data_check_map, &index_zero, &result);
+ CHECK(err == -1, "lookup_elem(data_check_map)", "err:%d errno:%d\n",
+ err, errno);
+
+ if (type == SOCK_STREAM) {
+ expected.len = MIN_TCPHDR_LEN;
+ expected.ip_protocol = IPPROTO_TCP;
+ } else {
+ expected.len = UDPHDR_LEN;
+ expected.ip_protocol = IPPROTO_UDP;
+ }
+
+ if (family == AF_INET6) {
+ expected.eth_protocol = htons(ETH_P_IPV6);
+ expected.bind_inany = !srv_sa.v6.sin6_addr.s6_addr32[3] &&
+ !srv_sa.v6.sin6_addr.s6_addr32[2] &&
+ !srv_sa.v6.sin6_addr.s6_addr32[1] &&
+ !srv_sa.v6.sin6_addr.s6_addr32[0];
+
+ memcpy(&expected.skb_addrs[0], cli_sa.v6.sin6_addr.s6_addr32,
+ sizeof(cli_sa.v6.sin6_addr));
+ memcpy(&expected.skb_addrs[4], &in6addr_loopback,
+ sizeof(in6addr_loopback));
+ expected.skb_ports[0] = cli_sa.v6.sin6_port;
+ expected.skb_ports[1] = srv_sa.v6.sin6_port;
+ } else {
+ expected.eth_protocol = htons(ETH_P_IP);
+ expected.bind_inany = !srv_sa.v4.sin_addr.s_addr;
+
+ expected.skb_addrs[0] = cli_sa.v4.sin_addr.s_addr;
+ expected.skb_addrs[1] = htonl(INADDR_LOOPBACK);
+ expected.skb_ports[0] = cli_sa.v4.sin_port;
+ expected.skb_ports[1] = srv_sa.v4.sin_port;
+ }
+
+ if (memcmp(&result, &expected, offsetof(struct data_check,
+ equal_check_end))) {
+ printf("unexpected data_check\n");
+ printf(" result: (0x%x, %u, %u)\n",
+ result.eth_protocol, result.ip_protocol,
+ result.bind_inany);
+ printf("expected: (0x%x, %u, %u)\n",
+ expected.eth_protocol, expected.ip_protocol,
+ expected.bind_inany);
+ CHECK(1, "data_check result != expected",
+ "bpf_prog_linum:%u\n", get_linum());
+ }
+
+ CHECK(!result.hash, "data_check result.hash empty",
+ "result.hash:%u", result.hash);
+
+ expected.len += cmd ? sizeof(*cmd) : 0;
+ if (type == SOCK_STREAM)
+ CHECK(expected.len > result.len, "expected.len > result.len",
+ "expected.len:%u result.len:%u bpf_prog_linum:%u\n",
+ expected.len, result.len, get_linum());
+ else
+ CHECK(expected.len != result.len, "expected.len != result.len",
+ "expected.len:%u result.len:%u bpf_prog_linum:%u\n",
+ expected.len, result.len, get_linum());
+}
+
+static void check_results(void)
+{
+ __u32 results[NR_RESULTS];
+ __u32 i, broken = 0;
+ int err;
+
+ for (i = 0; i < NR_RESULTS; i++) {
+ err = bpf_map_lookup_elem(result_map, &i, &results[i]);
+ CHECK(err == -1, "lookup_elem(result_map)",
+ "i:%u err:%d errno:%d\n", i, err, errno);
+ }
+
+ for (i = 0; i < NR_RESULTS; i++) {
+ if (results[i] != expected_results[i]) {
+ broken = i;
+ break;
+ }
+ }
+
+ if (i == NR_RESULTS)
+ return;
+
+ printf("unexpected result\n");
+ printf(" result: [");
+ printf("%u", results[0]);
+ for (i = 1; i < NR_RESULTS; i++)
+ printf(", %u", results[i]);
+ printf("]\n");
+
+ printf("expected: [");
+ printf("%u", expected_results[0]);
+ for (i = 1; i < NR_RESULTS; i++)
+ printf(", %u", expected_results[i]);
+ printf("]\n");
+
+ CHECK(expected_results[broken] != results[broken],
+ "unexpected result",
+ "expected_results[%u] != results[%u] bpf_prog_linum:%u\n",
+ broken, broken, get_linum());
+}
+
+static int send_data(int type, sa_family_t family, void *data, size_t len,
+ enum result expected)
+{
+ union sa46 cli_sa;
+ int fd, err;
+
+ fd = socket(family, type, 0);
+ CHECK(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno);
+
+ sa46_init_loopback(&cli_sa, family);
+ err = bind(fd, (struct sockaddr *)&cli_sa, sizeof(cli_sa));
+ CHECK(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno);
+
+ err = sendto(fd, data, len, MSG_FASTOPEN, (struct sockaddr *)&srv_sa,
+ sizeof(srv_sa));
+ CHECK(err != len && expected >= PASS,
+ "sendto()", "family:%u err:%d errno:%d expected:%d\n",
+ family, err, errno, expected);
+
+ return fd;
+}
+
+static void do_test(int type, sa_family_t family, struct cmd *cmd,
+ enum result expected)
+{
+ int nev, srv_fd, cli_fd;
+ struct epoll_event ev;
+ struct cmd rcv_cmd;
+ ssize_t nread;
+
+ cli_fd = send_data(type, family, cmd, cmd ? sizeof(*cmd) : 0,
+ expected);
+ nev = epoll_wait(epfd, &ev, 1, expected >= PASS ? 5 : 0);
+ CHECK((nev <= 0 && expected >= PASS) ||
+ (nev > 0 && expected < PASS),
+ "nev <> expected",
+ "nev:%d expected:%d type:%d family:%d data:(%d, %d)\n",
+ nev, expected, type, family,
+ cmd ? cmd->reuseport_index : -1,
+ cmd ? cmd->pass_on_failure : -1);
+ check_results();
+ check_data(type, family, cmd, cli_fd);
+
+ if (expected < PASS)
+ return;
+
+ CHECK(expected != PASS_ERR_SK_SELECT_REUSEPORT &&
+ cmd->reuseport_index != ev.data.u32,
+ "check cmd->reuseport_index",
+ "cmd:(%u, %u) ev.data.u32:%u\n",
+ cmd->pass_on_failure, cmd->reuseport_index, ev.data.u32);
+
+ srv_fd = sk_fds[ev.data.u32];
+ if (type == SOCK_STREAM) {
+ int new_fd = accept(srv_fd, NULL, 0);
+
+ CHECK(new_fd == -1, "accept(srv_fd)",
+ "ev.data.u32:%u new_fd:%d errno:%d\n",
+ ev.data.u32, new_fd, errno);
+
+ nread = recv(new_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT);
+ CHECK(nread != sizeof(rcv_cmd),
+ "recv(new_fd)",
+ "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n",
+ ev.data.u32, nread, sizeof(rcv_cmd), errno);
+
+ close(new_fd);
+ } else {
+ nread = recv(srv_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT);
+ CHECK(nread != sizeof(rcv_cmd),
+ "recv(sk_fds)",
+ "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n",
+ ev.data.u32, nread, sizeof(rcv_cmd), errno);
+ }
+
+ close(cli_fd);
+}
+
+static void test_err_inner_map(int type, sa_family_t family)
+{
+ struct cmd cmd = {
+ .reuseport_index = 0,
+ .pass_on_failure = 0,
+ };
+
+ printf("%s: ", __func__);
+ expected_results[DROP_ERR_INNER_MAP]++;
+ do_test(type, family, &cmd, DROP_ERR_INNER_MAP);
+ printf("OK\n");
+}
+
+static void test_err_skb_data(int type, sa_family_t family)
+{
+ printf("%s: ", __func__);
+ expected_results[DROP_ERR_SKB_DATA]++;
+ do_test(type, family, NULL, DROP_ERR_SKB_DATA);
+ printf("OK\n");
+}
+
+static void test_err_sk_select_port(int type, sa_family_t family)
+{
+ struct cmd cmd = {
+ .reuseport_index = REUSEPORT_ARRAY_SIZE,
+ .pass_on_failure = 0,
+ };
+
+ printf("%s: ", __func__);
+ expected_results[DROP_ERR_SK_SELECT_REUSEPORT]++;
+ do_test(type, family, &cmd, DROP_ERR_SK_SELECT_REUSEPORT);
+ printf("OK\n");
+}
+
+static void test_pass(int type, sa_family_t family)
+{
+ struct cmd cmd;
+ int i;
+
+ printf("%s: ", __func__);
+ cmd.pass_on_failure = 0;
+ for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) {
+ expected_results[PASS]++;
+ cmd.reuseport_index = i;
+ do_test(type, family, &cmd, PASS);
+ }
+ printf("OK\n");
+}
+
+static void test_syncookie(int type, sa_family_t family)
+{
+ int err, tmp_index = 1;
+ struct cmd cmd = {
+ .reuseport_index = 0,
+ .pass_on_failure = 0,
+ };
+
+ if (type != SOCK_STREAM)
+ return;
+
+ printf("%s: ", __func__);
+ /*
+ * +1 for TCP-SYN and
+ * +1 for the TCP-ACK (ack the syncookie)
+ */
+ expected_results[PASS] += 2;
+ enable_syncookie();
+ /*
+ * Simulate TCP-SYN and TCP-ACK are handled by two different sk:
+ * TCP-SYN: select sk_fds[tmp_index = 1] tmp_index is from the
+ * tmp_index_ovr_map
+ * TCP-ACK: select sk_fds[reuseport_index = 0] reuseport_index
+ * is from the cmd.reuseport_index
+ */
+ err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero,
+ &tmp_index, BPF_ANY);
+ CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, 1)",
+ "err:%d errno:%d\n", err, errno);
+ do_test(type, family, &cmd, PASS);
+ err = bpf_map_lookup_elem(tmp_index_ovr_map, &index_zero,
+ &tmp_index);
+ CHECK(err == -1 || tmp_index != -1,
+ "lookup_elem(tmp_index_ovr_map)",
+ "err:%d errno:%d tmp_index:%d\n",
+ err, errno, tmp_index);
+ disable_syncookie();
+ printf("OK\n");
+}
+
+static void test_pass_on_err(int type, sa_family_t family)
+{
+ struct cmd cmd = {
+ .reuseport_index = REUSEPORT_ARRAY_SIZE,
+ .pass_on_failure = 1,
+ };
+
+ printf("%s: ", __func__);
+ expected_results[PASS_ERR_SK_SELECT_REUSEPORT] += 1;
+ do_test(type, family, &cmd, PASS_ERR_SK_SELECT_REUSEPORT);
+ printf("OK\n");
+}
+
+static void prepare_sk_fds(int type, sa_family_t family, bool inany)
+{
+ const int first = REUSEPORT_ARRAY_SIZE - 1;
+ int i, err, optval = 1;
+ struct epoll_event ev;
+ socklen_t addrlen;
+
+ if (inany)
+ sa46_init_inany(&srv_sa, family);
+ else
+ sa46_init_loopback(&srv_sa, family);
+ addrlen = sizeof(srv_sa);
+
+ /*
+ * The sk_fds[] is filled from the back such that the order
+ * is exactly opposite to the (struct sock_reuseport *)reuse->socks[].
+ */
+ for (i = first; i >= 0; i--) {
+ sk_fds[i] = socket(family, type, 0);
+ CHECK(sk_fds[i] == -1, "socket()", "sk_fds[%d]:%d errno:%d\n",
+ i, sk_fds[i], errno);
+ err = setsockopt(sk_fds[i], SOL_SOCKET, SO_REUSEPORT,
+ &optval, sizeof(optval));
+ CHECK(err == -1, "setsockopt(SO_REUSEPORT)",
+ "sk_fds[%d] err:%d errno:%d\n",
+ i, err, errno);
+
+ if (i == first) {
+ err = setsockopt(sk_fds[i], SOL_SOCKET,
+ SO_ATTACH_REUSEPORT_EBPF,
+ &select_by_skb_data_prog,
+ sizeof(select_by_skb_data_prog));
+ CHECK(err == -1, "setsockopt(SO_ATTACH_REUEPORT_EBPF)",
+ "err:%d errno:%d\n", err, errno);
+ }
+
+ err = bind(sk_fds[i], (struct sockaddr *)&srv_sa, addrlen);
+ CHECK(err == -1, "bind()", "sk_fds[%d] err:%d errno:%d\n",
+ i, err, errno);
+
+ if (type == SOCK_STREAM) {
+ err = listen(sk_fds[i], 10);
+ CHECK(err == -1, "listen()",
+ "sk_fds[%d] err:%d errno:%d\n",
+ i, err, errno);
+ }
+
+ err = bpf_map_update_elem(reuseport_array, &i, &sk_fds[i],
+ BPF_NOEXIST);
+ CHECK(err == -1, "update_elem(reuseport_array)",
+ "sk_fds[%d] err:%d errno:%d\n", i, err, errno);
+
+ if (i == first) {
+ socklen_t addrlen = sizeof(srv_sa);
+
+ err = getsockname(sk_fds[i], (struct sockaddr *)&srv_sa,
+ &addrlen);
+ CHECK(err == -1, "getsockname()",
+ "sk_fds[%d] err:%d errno:%d\n", i, err, errno);
+ }
+ }
+
+ epfd = epoll_create(1);
+ CHECK(epfd == -1, "epoll_create(1)",
+ "epfd:%d errno:%d\n", epfd, errno);
+
+ ev.events = EPOLLIN;
+ for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) {
+ ev.data.u32 = i;
+ err = epoll_ctl(epfd, EPOLL_CTL_ADD, sk_fds[i], &ev);
+ CHECK(err, "epoll_ctl(EPOLL_CTL_ADD)", "sk_fds[%d]\n", i);
+ }
+}
+
+static void setup_per_test(int type, unsigned short family, bool inany)
+{
+ int ovr = -1, err;
+
+ prepare_sk_fds(type, family, inany);
+ err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &ovr,
+ BPF_ANY);
+ CHECK(err == -1, "update_elem(tmp_index_ovr_map, 0, -1)",
+ "err:%d errno:%d\n", err, errno);
+}
+
+static void cleanup_per_test(void)
+{
+ int i, err;
+
+ for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++)
+ close(sk_fds[i]);
+ close(epfd);
+
+ err = bpf_map_delete_elem(outer_map, &index_zero);
+ CHECK(err == -1, "delete_elem(outer_map)",
+ "err:%d errno:%d\n", err, errno);
+}
+
+static void cleanup(void)
+{
+ close(outer_map);
+ close(reuseport_array);
+ bpf_object__close(obj);
+}
+
+static void test_all(void)
+{
+ /* Extra SOCK_STREAM to test bind_inany==true */
+ const int types[] = { SOCK_STREAM, SOCK_DGRAM, SOCK_STREAM };
+ const char * const type_strings[] = { "TCP", "UDP", "TCP" };
+ const char * const family_strings[] = { "IPv6", "IPv4" };
+ const unsigned short families[] = { AF_INET6, AF_INET };
+ const bool bind_inany[] = { false, false, true };
+ int t, f, err;
+
+ for (f = 0; f < ARRAY_SIZE(families); f++) {
+ unsigned short family = families[f];
+
+ for (t = 0; t < ARRAY_SIZE(types); t++) {
+ bool inany = bind_inany[t];
+ int type = types[t];
+
+ printf("######## %s/%s %s ########\n",
+ family_strings[f], type_strings[t],
+ inany ? " INANY " : "LOOPBACK");
+
+ setup_per_test(type, family, inany);
+
+ test_err_inner_map(type, family);
+
+ /* Install reuseport_array to the outer_map */
+ err = bpf_map_update_elem(outer_map, &index_zero,
+ &reuseport_array, BPF_ANY);
+ CHECK(err == -1, "update_elem(outer_map)",
+ "err:%d errno:%d\n", err, errno);
+
+ test_err_skb_data(type, family);
+ test_err_sk_select_port(type, family);
+ test_pass(type, family);
+ test_syncookie(type, family);
+ test_pass_on_err(type, family);
+
+ cleanup_per_test();
+ printf("\n");
+ }
+ }
+}
+
+int main(int argc, const char **argv)
+{
+ create_maps();
+ prepare_bpf_obj();
+ saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL);
+ saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL);
+ enable_fastopen();
+ disable_syncookie();
+ atexit(restore_sysctls);
+
+ test_all();
+
+ cleanup();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_select_reuseport_common.h b/tools/testing/selftests/bpf/test_select_reuseport_common.h
new file mode 100644
index 000000000000..08eb2a9f145f
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_select_reuseport_common.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Facebook */
+
+#ifndef __TEST_SELECT_REUSEPORT_COMMON_H
+#define __TEST_SELECT_REUSEPORT_COMMON_H
+
+#include <linux/types.h>
+
+enum result {
+ DROP_ERR_INNER_MAP,
+ DROP_ERR_SKB_DATA,
+ DROP_ERR_SK_SELECT_REUSEPORT,
+ DROP_MISC,
+ PASS,
+ PASS_ERR_SK_SELECT_REUSEPORT,
+ NR_RESULTS,
+};
+
+struct cmd {
+ __u32 reuseport_index;
+ __u32 pass_on_failure;
+};
+
+struct data_check {
+ __u32 ip_protocol;
+ __u32 skb_addrs[8];
+ __u16 skb_ports[2];
+ __u16 eth_protocol;
+ __u8 bind_inany;
+ __u8 equal_check_end[0];
+
+ __u32 len;
+ __u32 hash;
+};
+
+#endif
diff --git a/tools/testing/selftests/bpf/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/test_select_reuseport_kern.c
new file mode 100644
index 000000000000..5b54ec637ada
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_select_reuseport_kern.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Facebook */
+
+#include <stdlib.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "bpf_endian.h"
+#include "bpf_helpers.h"
+#include "test_select_reuseport_common.h"
+
+int _version SEC("version") = 1;
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+struct bpf_map_def SEC("maps") outer_map = {
+ .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") result_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = NR_RESULTS,
+};
+
+struct bpf_map_def SEC("maps") tmp_index_ovr_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(int),
+ .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") linum_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") data_check_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct data_check),
+ .max_entries = 1,
+};
+
+#define GOTO_DONE(_result) ({ \
+ result = (_result); \
+ linum = __LINE__; \
+ goto done; \
+})
+
+SEC("select_by_skb_data")
+int _select_by_skb_data(struct sk_reuseport_md *reuse_md)
+{
+ __u32 linum, index = 0, flags = 0, index_zero = 0;
+ __u32 *result_cnt, *linum_value;
+ struct data_check data_check = {};
+ struct cmd *cmd, cmd_copy;
+ void *data, *data_end;
+ void *reuseport_array;
+ enum result result;
+ int *index_ovr;
+ int err;
+
+ data = reuse_md->data;
+ data_end = reuse_md->data_end;
+ data_check.len = reuse_md->len;
+ data_check.eth_protocol = reuse_md->eth_protocol;
+ data_check.ip_protocol = reuse_md->ip_protocol;
+ data_check.hash = reuse_md->hash;
+ data_check.bind_inany = reuse_md->bind_inany;
+ if (data_check.eth_protocol == bpf_htons(ETH_P_IP)) {
+ if (bpf_skb_load_bytes_relative(reuse_md,
+ offsetof(struct iphdr, saddr),
+ data_check.skb_addrs, 8,
+ BPF_HDR_START_NET))
+ GOTO_DONE(DROP_MISC);
+ } else {
+ if (bpf_skb_load_bytes_relative(reuse_md,
+ offsetof(struct ipv6hdr, saddr),
+ data_check.skb_addrs, 32,
+ BPF_HDR_START_NET))
+ GOTO_DONE(DROP_MISC);
+ }
+
+ /*
+ * The ip_protocol could be a compile time decision
+ * if the bpf_prog.o is dedicated to either TCP or
+ * UDP.
+ *
+ * Otherwise, reuse_md->ip_protocol or
+ * the protocol field in the iphdr can be used.
+ */
+ if (data_check.ip_protocol == IPPROTO_TCP) {
+ struct tcphdr *th = data;
+
+ if (th + 1 > data_end)
+ GOTO_DONE(DROP_MISC);
+
+ data_check.skb_ports[0] = th->source;
+ data_check.skb_ports[1] = th->dest;
+
+ if ((th->doff << 2) + sizeof(*cmd) > data_check.len)
+ GOTO_DONE(DROP_ERR_SKB_DATA);
+ if (bpf_skb_load_bytes(reuse_md, th->doff << 2, &cmd_copy,
+ sizeof(cmd_copy)))
+ GOTO_DONE(DROP_MISC);
+ cmd = &cmd_copy;
+ } else if (data_check.ip_protocol == IPPROTO_UDP) {
+ struct udphdr *uh = data;
+
+ if (uh + 1 > data_end)
+ GOTO_DONE(DROP_MISC);
+
+ data_check.skb_ports[0] = uh->source;
+ data_check.skb_ports[1] = uh->dest;
+
+ if (sizeof(struct udphdr) + sizeof(*cmd) > data_check.len)
+ GOTO_DONE(DROP_ERR_SKB_DATA);
+ if (data + sizeof(struct udphdr) + sizeof(*cmd) > data_end) {
+ if (bpf_skb_load_bytes(reuse_md, sizeof(struct udphdr),
+ &cmd_copy, sizeof(cmd_copy)))
+ GOTO_DONE(DROP_MISC);
+ cmd = &cmd_copy;
+ } else {
+ cmd = data + sizeof(struct udphdr);
+ }
+ } else {
+ GOTO_DONE(DROP_MISC);
+ }
+
+ reuseport_array = bpf_map_lookup_elem(&outer_map, &index_zero);
+ if (!reuseport_array)
+ GOTO_DONE(DROP_ERR_INNER_MAP);
+
+ index = cmd->reuseport_index;
+ index_ovr = bpf_map_lookup_elem(&tmp_index_ovr_map, &index_zero);
+ if (!index_ovr)
+ GOTO_DONE(DROP_MISC);
+
+ if (*index_ovr != -1) {
+ index = *index_ovr;
+ *index_ovr = -1;
+ }
+ err = bpf_sk_select_reuseport(reuse_md, reuseport_array, &index,
+ flags);
+ if (!err)
+ GOTO_DONE(PASS);
+
+ if (cmd->pass_on_failure)
+ GOTO_DONE(PASS_ERR_SK_SELECT_REUSEPORT);
+ else
+ GOTO_DONE(DROP_ERR_SK_SELECT_REUSEPORT);
+
+done:
+ result_cnt = bpf_map_lookup_elem(&result_map, &result);
+ if (!result_cnt)
+ return SK_DROP;
+
+ bpf_map_update_elem(&linum_map, &index_zero, &linum, BPF_ANY);
+ bpf_map_update_elem(&data_check_map, &index_zero, &data_check, BPF_ANY);
+
+ (*result_cnt)++;
+ return result < PASS ? SK_DROP : SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh
new file mode 100755
index 000000000000..42544a969abc
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Facebook
+
+set -eu
+
+wait_for_ip()
+{
+ local _i
+ echo -n "Wait for testing link-local IP to become available "
+ for _i in $(seq ${MAX_PING_TRIES}); do
+ echo -n "."
+ if ping -6 -q -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then
+ echo " OK"
+ return
+ fi
+ sleep 1
+ done
+ echo 1>&2 "ERROR: Timeout waiting for test IP to become available."
+ exit 1
+}
+
+setup()
+{
+ # Create testing interfaces not to interfere with current environment.
+ ip link add dev ${TEST_IF} type veth peer name ${TEST_IF_PEER}
+ ip link set ${TEST_IF} up
+ ip link set ${TEST_IF_PEER} up
+
+ wait_for_ip
+
+ tc qdisc add dev ${TEST_IF} clsact
+ tc filter add dev ${TEST_IF} egress bpf obj ${BPF_PROG_OBJ} \
+ sec ${BPF_PROG_SECTION} da
+
+ BPF_PROG_ID=$(tc filter show dev ${TEST_IF} egress | \
+ awk '/ id / {sub(/.* id /, "", $0); print($1)}')
+}
+
+cleanup()
+{
+ ip link del ${TEST_IF} 2>/dev/null || :
+ ip link del ${TEST_IF_PEER} 2>/dev/null || :
+}
+
+main()
+{
+ trap cleanup EXIT 2 3 6 15
+ setup
+ ${PROG} ${TEST_IF} ${BPF_PROG_ID}
+}
+
+DIR=$(dirname $0)
+TEST_IF="test_cgid_1"
+TEST_IF_PEER="test_cgid_2"
+MAX_PING_TRIES=5
+BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.o"
+BPF_PROG_SECTION="cgroup_id_logger"
+BPF_PROG_ID=0
+PROG="${DIR}/test_skb_cgroup_id_user"
+
+main
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c
new file mode 100644
index 000000000000..68cf9829f5a7
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+
+#include <string.h>
+
+#include "bpf_helpers.h"
+
+#define NUM_CGROUP_LEVELS 4
+
+struct bpf_map_def SEC("maps") cgroup_ids = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u64),
+ .max_entries = NUM_CGROUP_LEVELS,
+};
+
+static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
+{
+ __u64 id;
+
+ /* [1] &level passed to external function that may change it, it's
+ * incompatible with loop unroll.
+ */
+ id = bpf_skb_ancestor_cgroup_id(skb, level);
+ bpf_map_update_elem(&cgroup_ids, &level, &id, 0);
+}
+
+SEC("cgroup_id_logger")
+int log_cgroup_id(struct __sk_buff *skb)
+{
+ /* Loop unroll can't be used here due to [1]. Unrolling manually.
+ * Number of calls should be in sync with NUM_CGROUP_LEVELS.
+ */
+ log_nth_level(skb, 0);
+ log_nth_level(skb, 1);
+ log_nth_level(skb, 2);
+ log_nth_level(skb, 3);
+
+ return TC_ACT_OK;
+}
+
+int _version SEC("version") = 1;
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
new file mode 100644
index 000000000000..c121cc59f314
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_rlimit.h"
+#include "cgroup_helpers.h"
+
+#define CGROUP_PATH "/skb_cgroup_test"
+#define NUM_CGROUP_LEVELS 4
+
+/* RFC 4291, Section 2.7.1 */
+#define LINKLOCAL_MULTICAST "ff02::1"
+
+static int mk_dst_addr(const char *ip, const char *iface,
+ struct sockaddr_in6 *dst)
+{
+ memset(dst, 0, sizeof(*dst));
+
+ dst->sin6_family = AF_INET6;
+ dst->sin6_port = htons(1025);
+
+ if (inet_pton(AF_INET6, ip, &dst->sin6_addr) != 1) {
+ log_err("Invalid IPv6: %s", ip);
+ return -1;
+ }
+
+ dst->sin6_scope_id = if_nametoindex(iface);
+ if (!dst->sin6_scope_id) {
+ log_err("Failed to get index of iface: %s", iface);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int send_packet(const char *iface)
+{
+ struct sockaddr_in6 dst;
+ char msg[] = "msg";
+ int err = 0;
+ int fd = -1;
+
+ if (mk_dst_addr(LINKLOCAL_MULTICAST, iface, &dst))
+ goto err;
+
+ fd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (fd == -1) {
+ log_err("Failed to create UDP socket");
+ goto err;
+ }
+
+ if (sendto(fd, &msg, sizeof(msg), 0, (const struct sockaddr *)&dst,
+ sizeof(dst)) == -1) {
+ log_err("Failed to send datagram");
+ goto err;
+ }
+
+ goto out;
+err:
+ err = -1;
+out:
+ if (fd >= 0)
+ close(fd);
+ return err;
+}
+
+int get_map_fd_by_prog_id(int prog_id)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 map_ids[1];
+ int prog_fd = -1;
+ int map_fd = -1;
+
+ prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (prog_fd < 0) {
+ log_err("Failed to get fd by prog id %d", prog_id);
+ goto err;
+ }
+
+ info.nr_map_ids = 1;
+ info.map_ids = (__u64) (unsigned long) map_ids;
+
+ if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+ log_err("Failed to get info by prog fd %d", prog_fd);
+ goto err;
+ }
+
+ if (!info.nr_map_ids) {
+ log_err("No maps found for prog fd %d", prog_fd);
+ goto err;
+ }
+
+ map_fd = bpf_map_get_fd_by_id(map_ids[0]);
+ if (map_fd < 0)
+ log_err("Failed to get fd by map id %d", map_ids[0]);
+err:
+ if (prog_fd >= 0)
+ close(prog_fd);
+ return map_fd;
+}
+
+int check_ancestor_cgroup_ids(int prog_id)
+{
+ __u64 actual_ids[NUM_CGROUP_LEVELS], expected_ids[NUM_CGROUP_LEVELS];
+ __u32 level;
+ int err = 0;
+ int map_fd;
+
+ expected_ids[0] = 0x100000001; /* root cgroup */
+ expected_ids[1] = get_cgroup_id("");
+ expected_ids[2] = get_cgroup_id(CGROUP_PATH);
+ expected_ids[3] = 0; /* non-existent cgroup */
+
+ map_fd = get_map_fd_by_prog_id(prog_id);
+ if (map_fd < 0)
+ goto err;
+
+ for (level = 0; level < NUM_CGROUP_LEVELS; ++level) {
+ if (bpf_map_lookup_elem(map_fd, &level, &actual_ids[level])) {
+ log_err("Failed to lookup key %d", level);
+ goto err;
+ }
+ if (actual_ids[level] != expected_ids[level]) {
+ log_err("%llx (actual) != %llx (expected), level: %u\n",
+ actual_ids[level], expected_ids[level], level);
+ goto err;
+ }
+ }
+
+ goto out;
+err:
+ err = -1;
+out:
+ if (map_fd >= 0)
+ close(map_fd);
+ return err;
+}
+
+int main(int argc, char **argv)
+{
+ int cgfd = -1;
+ int err = 0;
+
+ if (argc < 3) {
+ fprintf(stderr, "Usage: %s iface prog_id\n", argv[0]);
+ exit(EXIT_FAILURE);
+ }
+
+ if (setup_cgroup_environment())
+ goto err;
+
+ cgfd = create_and_get_cgroup(CGROUP_PATH);
+ if (!cgfd)
+ goto err;
+
+ if (join_cgroup(CGROUP_PATH))
+ goto err;
+
+ if (send_packet(argv[1]))
+ goto err;
+
+ if (check_ancestor_cgroup_ids(atoi(argv[2])))
+ goto err;
+
+ goto out;
+err:
+ err = -1;
+out:
+ close(cgfd);
+ cleanup_cgroup_environment();
+ printf("[%s]\n", err ? "FAIL" : "PASS");
+ return err;
+}
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index f4d99fabc56d..b8ebe2f58074 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -14,10 +14,7 @@
#include "cgroup_helpers.h"
#include "bpf_rlimit.h"
-
-#ifndef ARRAY_SIZE
-# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_util.h"
#define CG_PATH "/foo"
#define MAX_INSNS 512
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index a5e76b9219b9..aeeb76a54d63 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -20,15 +20,12 @@
#include "cgroup_helpers.h"
#include "bpf_rlimit.h"
+#include "bpf_util.h"
#ifndef ENOTSUPP
# define ENOTSUPP 524
#endif
-#ifndef ARRAY_SIZE
-# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
#define CG_PATH "/foo"
#define CONNECT4_PROG_PATH "./connect4_prog.o"
#define CONNECT6_PROG_PATH "./connect6_prog.o"
@@ -998,8 +995,9 @@ int init_pktinfo(int domain, struct cmsghdr *cmsg)
return 0;
}
-static int sendmsg_to_server(const struct sockaddr_storage *addr,
- socklen_t addr_len, int set_cmsg, int *syscall_err)
+static int sendmsg_to_server(int type, const struct sockaddr_storage *addr,
+ socklen_t addr_len, int set_cmsg, int flags,
+ int *syscall_err)
{
union {
char buf[CMSG_SPACE(sizeof(struct in6_pktinfo))];
@@ -1022,7 +1020,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr,
goto err;
}
- fd = socket(domain, SOCK_DGRAM, 0);
+ fd = socket(domain, type, 0);
if (fd == -1) {
log_err("Failed to create client socket");
goto err;
@@ -1052,7 +1050,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr,
}
}
- if (sendmsg(fd, &hdr, 0) != sizeof(data)) {
+ if (sendmsg(fd, &hdr, flags) != sizeof(data)) {
log_err("Fail to send message to server");
*syscall_err = errno;
goto err;
@@ -1066,6 +1064,15 @@ out:
return fd;
}
+static int fastconnect_to_server(const struct sockaddr_storage *addr,
+ socklen_t addr_len)
+{
+ int sendmsg_err;
+
+ return sendmsg_to_server(SOCK_STREAM, addr, addr_len, /*set_cmsg*/0,
+ MSG_FASTOPEN, &sendmsg_err);
+}
+
static int recvmsg_from_client(int sockfd, struct sockaddr_storage *src_addr)
{
struct timeval tv;
@@ -1185,6 +1192,20 @@ static int run_connect_test_case(const struct sock_addr_test *test)
if (cmp_local_ip(clientfd, &expected_src_addr))
goto err;
+ if (test->type == SOCK_STREAM) {
+ /* Test TCP Fast Open scenario */
+ clientfd = fastconnect_to_server(&requested_addr, addr_len);
+ if (clientfd == -1)
+ goto err;
+
+ /* Make sure src and dst addrs were overridden properly */
+ if (cmp_peer_addr(clientfd, &expected_addr))
+ goto err;
+
+ if (cmp_local_ip(clientfd, &expected_src_addr))
+ goto err;
+ }
+
goto out;
err:
err = -1;
@@ -1222,8 +1243,9 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
if (clientfd >= 0)
close(clientfd);
- clientfd = sendmsg_to_server(&requested_addr, addr_len,
- set_cmsg, &err);
+ clientfd = sendmsg_to_server(test->type, &requested_addr,
+ addr_len, set_cmsg, /*flags*/0,
+ &err);
if (err)
goto out;
else if (clientfd == -1)
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
new file mode 100644
index 000000000000..68e108e4687a
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <string.h>
+#include <unistd.h>
+
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_rlimit.h"
+#include "cgroup_helpers.h"
+
+#define CG_PATH "/foo"
+#define SOCKET_COOKIE_PROG "./socket_cookie_prog.o"
+
+static int start_server(void)
+{
+ struct sockaddr_in6 addr;
+ int fd;
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (fd == -1) {
+ log_err("Failed to create server socket");
+ goto out;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin6_family = AF_INET6;
+ addr.sin6_addr = in6addr_loopback;
+ addr.sin6_port = 0;
+
+ if (bind(fd, (const struct sockaddr *)&addr, sizeof(addr)) == -1) {
+ log_err("Failed to bind server socket");
+ goto close_out;
+ }
+
+ if (listen(fd, 128) == -1) {
+ log_err("Failed to listen on server socket");
+ goto close_out;
+ }
+
+ goto out;
+
+close_out:
+ close(fd);
+ fd = -1;
+out:
+ return fd;
+}
+
+static int connect_to_server(int server_fd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len = sizeof(addr);
+ int fd;
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (fd == -1) {
+ log_err("Failed to create client socket");
+ goto out;
+ }
+
+ if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
+ log_err("Failed to get server addr");
+ goto close_out;
+ }
+
+ if (connect(fd, (const struct sockaddr *)&addr, len) == -1) {
+ log_err("Fail to connect to server");
+ goto close_out;
+ }
+
+ goto out;
+
+close_out:
+ close(fd);
+ fd = -1;
+out:
+ return fd;
+}
+
+static int validate_map(struct bpf_map *map, int client_fd)
+{
+ __u32 cookie_expected_value;
+ struct sockaddr_in6 addr;
+ socklen_t len = sizeof(addr);
+ __u32 cookie_value;
+ __u64 cookie_key;
+ int err = 0;
+ int map_fd;
+
+ if (!map) {
+ log_err("Map not found in BPF object");
+ goto err;
+ }
+
+ map_fd = bpf_map__fd(map);
+
+ err = bpf_map_get_next_key(map_fd, NULL, &cookie_key);
+ if (err) {
+ log_err("Can't get cookie key from map");
+ goto out;
+ }
+
+ err = bpf_map_lookup_elem(map_fd, &cookie_key, &cookie_value);
+ if (err) {
+ log_err("Can't get cookie value from map");
+ goto out;
+ }
+
+ err = getsockname(client_fd, (struct sockaddr *)&addr, &len);
+ if (err) {
+ log_err("Can't get client local addr");
+ goto out;
+ }
+
+ cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
+ if (cookie_value != cookie_expected_value) {
+ log_err("Unexpected value in map: %x != %x", cookie_value,
+ cookie_expected_value);
+ goto err;
+ }
+
+ goto out;
+err:
+ err = -1;
+out:
+ return err;
+}
+
+static int run_test(int cgfd)
+{
+ enum bpf_attach_type attach_type;
+ struct bpf_prog_load_attr attr;
+ struct bpf_program *prog;
+ struct bpf_object *pobj;
+ const char *prog_name;
+ int server_fd = -1;
+ int client_fd = -1;
+ int prog_fd = -1;
+ int err = 0;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.file = SOCKET_COOKIE_PROG;
+ attr.prog_type = BPF_PROG_TYPE_UNSPEC;
+
+ err = bpf_prog_load_xattr(&attr, &pobj, &prog_fd);
+ if (err) {
+ log_err("Failed to load %s", attr.file);
+ goto out;
+ }
+
+ bpf_object__for_each_program(prog, pobj) {
+ prog_name = bpf_program__title(prog, /*needs_copy*/ false);
+
+ if (strcmp(prog_name, "cgroup/connect6") == 0) {
+ attach_type = BPF_CGROUP_INET6_CONNECT;
+ } else if (strcmp(prog_name, "sockops") == 0) {
+ attach_type = BPF_CGROUP_SOCK_OPS;
+ } else {
+ log_err("Unexpected prog: %s", prog_name);
+ goto err;
+ }
+
+ err = bpf_prog_attach(bpf_program__fd(prog), cgfd, attach_type,
+ BPF_F_ALLOW_OVERRIDE);
+ if (err) {
+ log_err("Failed to attach prog %s", prog_name);
+ goto out;
+ }
+ }
+
+ server_fd = start_server();
+ if (server_fd == -1)
+ goto err;
+
+ client_fd = connect_to_server(server_fd);
+ if (client_fd == -1)
+ goto err;
+
+ if (validate_map(bpf_map__next(NULL, pobj), client_fd))
+ goto err;
+
+ goto out;
+err:
+ err = -1;
+out:
+ close(client_fd);
+ close(server_fd);
+ bpf_object__close(pobj);
+ printf("%s\n", err ? "FAILED" : "PASSED");
+ return err;
+}
+
+int main(int argc, char **argv)
+{
+ int cgfd = -1;
+ int err = 0;
+
+ if (setup_cgroup_environment())
+ goto err;
+
+ cgfd = create_and_get_cgroup(CG_PATH);
+ if (!cgfd)
+ goto err;
+
+ if (join_cgroup(CG_PATH))
+ goto err;
+
+ if (run_test(cgfd))
+ goto err;
+
+ goto out;
+err:
+ err = -1;
+out:
+ close(cgfd);
+ cleanup_cgroup_environment();
+ return err;
+}
diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h
index 2fe43289943c..7bcfa6207005 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf.h
+++ b/tools/testing/selftests/bpf/test_tcpbpf.h
@@ -12,5 +12,6 @@ struct tcpbpf_globals {
__u32 good_cb_test_rv;
__u64 bytes_received;
__u64 bytes_acked;
+ __u32 num_listen;
};
#endif
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
index 3e645ee41ed5..4b7fd540cea9 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
@@ -96,15 +96,22 @@ int bpf_testcb(struct bpf_sock_ops *skops)
if (!gp)
break;
g = *gp;
- g.total_retrans = skops->total_retrans;
- g.data_segs_in = skops->data_segs_in;
- g.data_segs_out = skops->data_segs_out;
- g.bytes_received = skops->bytes_received;
- g.bytes_acked = skops->bytes_acked;
+ if (skops->args[0] == BPF_TCP_LISTEN) {
+ g.num_listen++;
+ } else {
+ g.total_retrans = skops->total_retrans;
+ g.data_segs_in = skops->data_segs_in;
+ g.data_segs_out = skops->data_segs_out;
+ g.bytes_received = skops->bytes_received;
+ g.bytes_acked = skops->bytes_acked;
+ }
bpf_map_update_elem(&global_map, &key, &g,
BPF_ANY);
}
break;
+ case BPF_SOCK_OPS_TCP_LISTEN_CB:
+ bpf_sock_ops_cb_flags_set(skops, BPF_SOCK_OPS_STATE_CB_FLAG);
+ break;
default:
rv = -1;
}
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
index 84ab5163c828..a275c2971376 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -1,27 +1,59 @@
// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
-#include <stdio.h>
#include <unistd.h>
#include <errno.h>
-#include <signal.h>
#include <string.h>
-#include <assert.h>
-#include <linux/perf_event.h>
-#include <linux/ptrace.h>
#include <linux/bpf.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_util.h"
+
#include "bpf_rlimit.h"
-#include <linux/perf_event.h>
+#include "bpf_util.h"
+#include "cgroup_helpers.h"
+
#include "test_tcpbpf.h"
+#define EXPECT_EQ(expected, actual, fmt) \
+ do { \
+ if ((expected) != (actual)) { \
+ printf(" Value of: " #actual "\n" \
+ " Actual: %" fmt "\n" \
+ " Expected: %" fmt "\n", \
+ (actual), (expected)); \
+ goto err; \
+ } \
+ } while (0)
+
+int verify_result(const struct tcpbpf_globals *result)
+{
+ __u32 expected_events;
+
+ expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) |
+ (1 << BPF_SOCK_OPS_RWND_INIT) |
+ (1 << BPF_SOCK_OPS_TCP_CONNECT_CB) |
+ (1 << BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB) |
+ (1 << BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) |
+ (1 << BPF_SOCK_OPS_NEEDS_ECN) |
+ (1 << BPF_SOCK_OPS_STATE_CB) |
+ (1 << BPF_SOCK_OPS_TCP_LISTEN_CB));
+
+ EXPECT_EQ(expected_events, result->event_map, "#" PRIx32);
+ EXPECT_EQ(501ULL, result->bytes_received, "llu");
+ EXPECT_EQ(1002ULL, result->bytes_acked, "llu");
+ EXPECT_EQ(1, result->data_segs_in, PRIu32);
+ EXPECT_EQ(1, result->data_segs_out, PRIu32);
+ EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32);
+ EXPECT_EQ(0, result->good_cb_test_rv, PRIu32);
+ EXPECT_EQ(1, result->num_listen, PRIu32);
+
+ return 0;
+err:
+ return -1;
+}
+
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
@@ -35,42 +67,28 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
return bpf_map__fd(map);
}
-#define SYSTEM(CMD) \
- do { \
- if (system(CMD)) { \
- printf("system(%s) FAILS!\n", CMD); \
- } \
- } while (0)
-
int main(int argc, char **argv)
{
const char *file = "test_tcpbpf_kern.o";
struct tcpbpf_globals g = {0};
- int cg_fd, prog_fd, map_fd;
- bool debug_flag = false;
+ const char *cg_path = "/foo";
int error = EXIT_FAILURE;
struct bpf_object *obj;
- char cmd[100], *dir;
- struct stat buffer;
+ int prog_fd, map_fd;
+ int cg_fd = -1;
__u32 key = 0;
- int pid;
int rv;
- if (argc > 1 && strcmp(argv[1], "-d") == 0)
- debug_flag = true;
+ if (setup_cgroup_environment())
+ goto err;
- dir = "/tmp/cgroupv2/foo";
+ cg_fd = create_and_get_cgroup(cg_path);
+ if (!cg_fd)
+ goto err;
- if (stat(dir, &buffer) != 0) {
- SYSTEM("mkdir -p /tmp/cgroupv2");
- SYSTEM("mount -t cgroup2 none /tmp/cgroupv2");
- SYSTEM("mkdir -p /tmp/cgroupv2/foo");
- }
- pid = (int) getpid();
- sprintf(cmd, "echo %d >> /tmp/cgroupv2/foo/cgroup.procs", pid);
- SYSTEM(cmd);
+ if (join_cgroup(cg_path))
+ goto err;
- cg_fd = open(dir, O_DIRECTORY, O_RDONLY);
if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
printf("FAILED: load_bpf_file failed for: %s\n", file);
goto err;
@@ -83,7 +101,10 @@ int main(int argc, char **argv)
goto err;
}
- SYSTEM("./tcp_server.py");
+ if (system("./tcp_server.py")) {
+ printf("FAILED: TCP server\n");
+ goto err;
+ }
map_fd = bpf_find_map(__func__, obj, "global_map");
if (map_fd < 0)
@@ -95,34 +116,16 @@ int main(int argc, char **argv)
goto err;
}
- if (g.bytes_received != 501 || g.bytes_acked != 1002 ||
- g.data_segs_in != 1 || g.data_segs_out != 1 ||
- (g.event_map ^ 0x47e) != 0 || g.bad_cb_test_rv != 0x80 ||
- g.good_cb_test_rv != 0) {
+ if (verify_result(&g)) {
printf("FAILED: Wrong stats\n");
- if (debug_flag) {
- printf("\n");
- printf("bytes_received: %d (expecting 501)\n",
- (int)g.bytes_received);
- printf("bytes_acked: %d (expecting 1002)\n",
- (int)g.bytes_acked);
- printf("data_segs_in: %d (expecting 1)\n",
- g.data_segs_in);
- printf("data_segs_out: %d (expecting 1)\n",
- g.data_segs_out);
- printf("event_map: 0x%x (at least 0x47e)\n",
- g.event_map);
- printf("bad_cb_test_rv: 0x%x (expecting 0x80)\n",
- g.bad_cb_test_rv);
- printf("good_cb_test_rv:0x%x (expecting 0)\n",
- g.good_cb_test_rv);
- }
goto err;
}
+
printf("PASSED!\n");
error = 0;
err:
bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
+ close(cg_fd);
+ cleanup_cgroup_environment();
return error;
-
}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 41106d9d5cc7..67c412d19c09 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -42,15 +42,12 @@
#endif
#include "bpf_rlimit.h"
#include "bpf_rand.h"
+#include "bpf_util.h"
#include "../../../include/linux/filter.h"
-#ifndef ARRAY_SIZE
-# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
#define MAX_INSNS BPF_MAXINSNS
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 7
+#define MAX_NR_MAPS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -70,6 +67,7 @@ struct bpf_test {
int fixup_prog1[MAX_FIXUPS];
int fixup_prog2[MAX_FIXUPS];
int fixup_map_in_map[MAX_FIXUPS];
+ int fixup_cgroup_storage[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t retval;
@@ -4631,6 +4629,121 @@ static struct bpf_test tests[] = {
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "valid cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid per-cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
"multiple registers share map_lookup_elem result",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
@@ -6997,7 +7110,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
@@ -7020,7 +7133,7 @@ static struct bpf_test tests[] = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
@@ -7042,7 +7155,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
@@ -12372,6 +12485,32 @@ static struct bpf_test tests[] = {
.result = REJECT,
.errstr = "variable ctx access var_off=(0x0; 0x4)",
},
+ {
+ "mov64 src == dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "mov64 src != dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -12476,6 +12615,19 @@ static int create_map_in_map(void)
return outer_map_fd;
}
+static int create_cgroup_storage(void)
+{
+ int fd;
+
+ fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
+ sizeof(struct bpf_cgroup_storage_key),
+ TEST_DATA_LEN, 0, 0);
+ if (fd < 0)
+ printf("Failed to create array '%s'!\n", strerror(errno));
+
+ return fd;
+}
+
static char bpf_vlog[UINT_MAX >> 8];
static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
@@ -12488,6 +12640,7 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
int *fixup_prog1 = test->fixup_prog1;
int *fixup_prog2 = test->fixup_prog2;
int *fixup_map_in_map = test->fixup_map_in_map;
+ int *fixup_cgroup_storage = test->fixup_cgroup_storage;
if (test->fill_helper)
test->fill_helper(test);
@@ -12555,6 +12708,14 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
fixup_map_in_map++;
} while (*fixup_map_in_map);
}
+
+ if (*fixup_cgroup_storage) {
+ map_fds[7] = create_cgroup_storage();
+ do {
+ prog[*fixup_cgroup_storage].imm = map_fds[7];
+ fixup_cgroup_storage++;
+ } while (*fixup_cgroup_storage);
+ }
}
static void do_test_single(struct bpf_test *test, bool unpriv,
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 3868dcb63420..cabe2a3a3b30 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -88,7 +88,7 @@ static int page_size;
static int page_cnt = 8;
static struct perf_event_mmap_page *header;
-int perf_event_mmap(int fd)
+int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header)
{
void *base;
int mmap_size;
@@ -102,10 +102,15 @@ int perf_event_mmap(int fd)
return -1;
}
- header = base;
+ *header = base;
return 0;
}
+int perf_event_mmap(int fd)
+{
+ return perf_event_mmap_header(fd, &header);
+}
+
static int perf_event_poll(int fd)
{
struct pollfd pfd = { .fd = fd, .events = POLLIN };
@@ -163,3 +168,42 @@ int perf_event_poller(int fd, perf_event_print_fn output_fn)
return ret;
}
+
+int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers,
+ int num_fds, perf_event_print_fn output_fn)
+{
+ enum bpf_perf_event_ret ret;
+ struct pollfd *pfds;
+ void *buf = NULL;
+ size_t len = 0;
+ int i;
+
+ pfds = calloc(num_fds, sizeof(*pfds));
+ if (!pfds)
+ return LIBBPF_PERF_EVENT_ERROR;
+
+ for (i = 0; i < num_fds; i++) {
+ pfds[i].fd = fds[i];
+ pfds[i].events = POLLIN;
+ }
+
+ for (;;) {
+ poll(pfds, num_fds, 1000);
+ for (i = 0; i < num_fds; i++) {
+ if (!pfds[i].revents)
+ continue;
+
+ ret = bpf_perf_event_read_simple(headers[i],
+ page_cnt * page_size,
+ page_size, &buf, &len,
+ bpf_perf_event_print,
+ output_fn);
+ if (ret != LIBBPF_PERF_EVENT_CONT)
+ break;
+ }
+ }
+ free(buf);
+ free(pfds);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h
index 3b4bcf7f5084..18924f23db1b 100644
--- a/tools/testing/selftests/bpf/trace_helpers.h
+++ b/tools/testing/selftests/bpf/trace_helpers.h
@@ -3,6 +3,7 @@
#define __TRACE_HELPER_H
#include <libbpf.h>
+#include <linux/perf_event.h>
struct ksym {
long addr;
@@ -16,6 +17,9 @@ long ksym_get_addr(const char *name);
typedef enum bpf_perf_event_ret (*perf_event_print_fn)(void *data, int size);
int perf_event_mmap(int fd);
+int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header);
/* return LIBBPF_PERF_EVENT_DONE or LIBBPF_PERF_EVENT_ERROR */
int perf_event_poller(int fd, perf_event_print_fn output_fn);
+int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers,
+ int num_fds, perf_event_print_fn output_fn);
#endif
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
new file mode 100755
index 000000000000..76f1ab4898d9
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
@@ -0,0 +1,217 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test uses standard topology for testing gretap. See
+# ../../../net/forwarding/mirror_gre_topo_lib.sh for more details.
+#
+# Test offloading various features of offloading gretap mirrors specific to
+# mlxsw.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=6
+source $lib_dir/lib.sh
+source $lib_dir/mirror_lib.sh
+source $lib_dir/mirror_gre_lib.sh
+source $lib_dir/mirror_gre_topo_lib.sh
+
+setup_keyful()
+{
+ tunnel_create gt6-key ip6gretap 2001:db8:3::1 2001:db8:3::2 \
+ ttl 100 tos inherit allow-localremote \
+ key 1234
+
+ tunnel_create h3-gt6-key ip6gretap 2001:db8:3::2 2001:db8:3::1 \
+ key 1234
+ ip link set h3-gt6-key vrf v$h3
+ matchall_sink_create h3-gt6-key
+
+ ip address add dev $swp3 2001:db8:3::1/64
+ ip address add dev $h3 2001:db8:3::2/64
+}
+
+cleanup_keyful()
+{
+ ip address del dev $h3 2001:db8:3::2/64
+ ip address del dev $swp3 2001:db8:3::1/64
+
+ tunnel_destroy h3-gt6-key
+ tunnel_destroy gt6-key
+}
+
+setup_soft()
+{
+ # Set up a topology for testing underlay routes that point at an
+ # unsupported soft device.
+
+ tunnel_create gt6-soft ip6gretap 2001:db8:4::1 2001:db8:4::2 \
+ ttl 100 tos inherit allow-localremote
+
+ tunnel_create h3-gt6-soft ip6gretap 2001:db8:4::2 2001:db8:4::1
+ ip link set h3-gt6-soft vrf v$h3
+ matchall_sink_create h3-gt6-soft
+
+ ip link add name v1 type veth peer name v2
+ ip link set dev v1 up
+ ip address add dev v1 2001:db8:4::1/64
+
+ ip link set dev v2 vrf v$h3
+ ip link set dev v2 up
+ ip address add dev v2 2001:db8:4::2/64
+}
+
+cleanup_soft()
+{
+ ip link del dev v1
+
+ tunnel_destroy h3-gt6-soft
+ tunnel_destroy gt6-soft
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+ mirror_gre_topo_create
+
+ ip address add dev $swp3 2001:db8:2::1/64
+ ip address add dev $h3 2001:db8:2::2/64
+
+ ip address add dev $swp3 192.0.2.129/28
+ ip address add dev $h3 192.0.2.130/28
+
+ setup_keyful
+ setup_soft
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ cleanup_soft
+ cleanup_keyful
+
+ ip address del dev $h3 2001:db8:2::2/64
+ ip address del dev $swp3 2001:db8:2::1/64
+
+ ip address del dev $h3 192.0.2.130/28
+ ip address del dev $swp3 192.0.2.129/28
+
+ mirror_gre_topo_destroy
+ vrf_cleanup
+}
+
+test_span_gre_ttl_inherit()
+{
+ local tundev=$1; shift
+ local type=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ ip link set dev $tundev type $type ttl inherit
+ mirror_install $swp1 ingress $tundev "matchall $tcflags"
+ fail_test_span_gre_dir $tundev ingress
+
+ ip link set dev $tundev type $type ttl 100
+
+ quick_test_span_gre_dir $tundev ingress
+ mirror_uninstall $swp1 ingress
+
+ log_test "$what: no offload on TTL of inherit ($tcflags)"
+}
+
+test_span_gre_tos_fixed()
+{
+ local tundev=$1; shift
+ local type=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ ip link set dev $tundev type $type tos 0x10
+ mirror_install $swp1 ingress $tundev "matchall $tcflags"
+ fail_test_span_gre_dir $tundev ingress
+
+ ip link set dev $tundev type $type tos inherit
+ quick_test_span_gre_dir $tundev ingress
+ mirror_uninstall $swp1 ingress
+
+ log_test "$what: no offload on a fixed TOS ($tcflags)"
+}
+
+test_span_failable()
+{
+ local should_fail=$1; shift
+ local tundev=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ mirror_install $swp1 ingress $tundev "matchall $tcflags"
+ if ((should_fail)); then
+ fail_test_span_gre_dir $tundev ingress
+ else
+ quick_test_span_gre_dir $tundev ingress
+ fi
+ mirror_uninstall $swp1 ingress
+
+ log_test "$what: should_fail=$should_fail ($tcflags)"
+}
+
+test_failable()
+{
+ local should_fail=$1; shift
+
+ test_span_failable $should_fail gt6-key "mirror to keyful gretap"
+ test_span_failable $should_fail gt6-soft "mirror to gretap w/ soft underlay"
+}
+
+test_sw()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ test_failable 0
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+test_hw()
+{
+ test_failable 1
+
+ test_span_gre_tos_fixed gt4 gretap "mirror to gretap"
+ test_span_gre_tos_fixed gt6 ip6gretap "mirror to ip6gretap"
+
+ test_span_gre_ttl_inherit gt4 gretap "mirror to gretap"
+ test_span_gre_ttl_inherit gt6 ip6gretap "mirror to ip6gretap"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+if ! tc_offload_check; then
+ check_err 1 "Could not test offloaded functionality"
+ log_test "mlxsw-specific tests for mirror to gretap"
+ exit
+fi
+
+tcflags="skip_hw"
+test_sw
+
+tcflags="skip_sw"
+test_hw
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
new file mode 100644
index 000000000000..6f3a70df63bc
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
@@ -0,0 +1,197 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Test offloading a number of mirrors-to-gretap. The test creates a number of
+# tunnels. Then it adds one flower mirror for each of the tunnels, matching a
+# given host IP. Then it generates traffic at each of the host IPs and checks
+# that the traffic has been mirrored at the appropriate tunnel.
+#
+# +--------------------------+ +--------------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 2001:db8:1:X::1/64 | | 2001:db8:1:X::2/64 | |
+# +-----|--------------------+ +--------------------|-----+
+# | |
+# +-----|-------------------------------------------------------------|-----+
+# | SW o--> mirrors | |
+# | +---|-------------------------------------------------------------|---+ |
+# | | + $swp1 BR $swp2 + | |
+# | +---------------------------------------------------------------------+ |
+# | |
+# | + $swp3 + gt6-<X> (ip6gretap) |
+# | | 2001:db8:2:X::1/64 : loc=2001:db8:2:X::1 |
+# | | : rem=2001:db8:2:X::2 |
+# | | : ttl=100 |
+# | | : tos=inherit |
+# | | : |
+# +-----|--------------------------------:----------------------------------+
+# | :
+# +-----|--------------------------------:----------------------------------+
+# | H3 + $h3 + h3-gt6-<X> (ip6gretap) |
+# | 2001:db8:2:X::2/64 loc=2001:db8:2:X::2 |
+# | rem=2001:db8:2:X::1 |
+# | ttl=100 |
+# | tos=inherit |
+# | |
+# +-------------------------------------------------------------------------+
+
+source ../../../../net/forwarding/mirror_lib.sh
+
+MIRROR_NUM_NETIFS=6
+
+mirror_gre_ipv6_addr()
+{
+ local net=$1; shift
+ local num=$1; shift
+
+ printf "2001:db8:%x:%x" $net $num
+}
+
+mirror_gre_tunnels_create()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ MIRROR_GRE_BATCH_FILE="$(mktemp)"
+ for ((i=0; i < count; ++i)); do
+ local match_dip=$(mirror_gre_ipv6_addr 1 $i)::2
+ local htun=h3-gt6-$i
+ local tun=gt6-$i
+
+ ((mirror_gre_tunnels++))
+
+ ip address add dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64
+ ip address add dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64
+
+ ip address add dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64
+ ip address add dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64
+
+ tunnel_create $tun ip6gretap \
+ $(mirror_gre_ipv6_addr 2 $i)::1 \
+ $(mirror_gre_ipv6_addr 2 $i)::2 \
+ ttl 100 tos inherit allow-localremote
+
+ tunnel_create $htun ip6gretap \
+ $(mirror_gre_ipv6_addr 2 $i)::2 \
+ $(mirror_gre_ipv6_addr 2 $i)::1
+ ip link set $htun vrf v$h3
+ matchall_sink_create $htun
+
+ cat >> $MIRROR_GRE_BATCH_FILE <<-EOF
+ filter add dev $swp1 ingress pref 1000 \
+ protocol ipv6 \
+ flower $tcflags dst_ip $match_dip \
+ action mirred egress mirror dev $tun
+ EOF
+ done
+
+ tc -b $MIRROR_GRE_BATCH_FILE
+ check_err_fail $should_fail $? "Mirror rule insertion"
+}
+
+mirror_gre_tunnels_destroy()
+{
+ local count=$1; shift
+
+ for ((i=0; i < count; ++i)); do
+ local htun=h3-gt6-$i
+ local tun=gt6-$i
+
+ ip address del dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64
+ ip address del dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64
+
+ ip address del dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64
+ ip address del dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64
+
+ tunnel_destroy $htun
+ tunnel_destroy $tun
+ done
+}
+
+__mirror_gre_test()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ mirror_gre_tunnels_create $count $should_fail
+ if ((should_fail)); then
+ return
+ fi
+
+ sleep 5
+
+ for ((i = 0; i < count; ++i)); do
+ local dip=$(mirror_gre_ipv6_addr 1 $i)::2
+ local htun=h3-gt6-$i
+ local message
+
+ icmp6_capture_install $htun
+ mirror_test v$h1 "" $dip $htun 100 10
+ icmp6_capture_uninstall $htun
+ done
+}
+
+mirror_gre_test()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then
+ check_err 1 "Could not test offloaded functionality"
+ return
+ fi
+
+ tcflags="skip_sw"
+ __mirror_gre_test $count $should_fail
+}
+
+mirror_gre_setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ mirror_gre_tunnels=0
+
+ vrf_prepare
+
+ simple_if_init $h1
+ simple_if_init $h2
+ simple_if_init $h3
+
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ tc qdisc add dev $swp1 clsact
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+
+ ip link set dev $swp3 up
+}
+
+mirror_gre_cleanup()
+{
+ mirror_gre_tunnels_destroy $mirror_gre_tunnels
+
+ ip link set dev $swp3 down
+
+ ip link set dev $swp2 down
+
+ tc qdisc del dev $swp1 clsact
+ ip link set dev $swp1 down
+
+ ip link del dev br1
+
+ simple_if_fini $h3
+ simple_if_fini $h2
+ simple_if_fini $h1
+
+ vrf_cleanup
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
new file mode 100755
index 000000000000..1ca631d5aaba
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
@@ -0,0 +1,189 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for DSCP prioritization and rewrite. Packets ingress $swp1 with a DSCP
+# tag and are prioritized according to the map at $swp1. They egress $swp2 and
+# the DSCP value is updated to match the map at that interface. The updated DSCP
+# tag is verified at $h2.
+#
+# ICMP responses are produced with the same DSCP tag that arrived at $h2. They
+# go through prioritization at $swp2 and DSCP retagging at $swp1. The tag is
+# verified at $h1--it should match the original tag.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +----|-----------------+ +----------------|-----+
+# | |
+# +----|----------------------------------------------------------------|-----+
+# | SW | | |
+# | +-|----------------------------------------------------------------|-+ |
+# | | + $swp1 BR $swp2 + | |
+# | | APP=0,5,10 .. 7,5,17 APP=0,5,20 .. 7,5,27 | |
+# | +--------------------------------------------------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ test_dscp
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=4
+source $lib_dir/lib.sh
+
+h1_create()
+{
+ local dscp;
+
+ simple_if_init $h1 192.0.2.1/28
+ tc qdisc add dev $h1 clsact
+ dscp_capture_install $h1 10
+}
+
+h1_destroy()
+{
+ dscp_capture_uninstall $h1 10
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/28
+ tc qdisc add dev $h2 clsact
+ dscp_capture_install $h2 20
+}
+
+h2_destroy()
+{
+ dscp_capture_uninstall $h2 20
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2 192.0.2.2/28
+}
+
+dscp_map()
+{
+ local base=$1; shift
+
+ for prio in {0..7}; do
+ echo app=$prio,5,$((base + prio))
+ done
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+
+ lldptool -T -i $swp1 -V APP $(dscp_map 10) >/dev/null
+ lldptool -T -i $swp2 -V APP $(dscp_map 20) >/dev/null
+ lldpad_app_wait_set $swp1
+ lldpad_app_wait_set $swp2
+}
+
+switch_destroy()
+{
+ lldptool -T -i $swp2 -V APP -d $(dscp_map 20) >/dev/null
+ lldptool -T -i $swp1 -V APP -d $(dscp_map 10) >/dev/null
+ lldpad_app_wait_del
+
+ ip link set dev $swp2 nomaster
+ ip link set dev $swp1 nomaster
+ ip link del dev br1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.2
+}
+
+dscp_ping_test()
+{
+ local vrf_name=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local prio=$1; shift
+ local dev_10=$1; shift
+ local dev_20=$1; shift
+
+ local dscp_10=$(((prio + 10) << 2))
+ local dscp_20=$(((prio + 20) << 2))
+
+ RET=0
+
+ local -A t0s
+ eval "t0s=($(dscp_fetch_stats $dev_10 10)
+ $(dscp_fetch_stats $dev_20 20))"
+
+ ip vrf exec $vrf_name \
+ ${PING} -Q $dscp_10 ${sip:+-I $sip} $dip \
+ -c 10 -i 0.1 -w 2 &> /dev/null
+
+ local -A t1s
+ eval "t1s=($(dscp_fetch_stats $dev_10 10)
+ $(dscp_fetch_stats $dev_20 20))"
+
+ for key in ${!t0s[@]}; do
+ local expect
+ if ((key == prio+10 || key == prio+20)); then
+ expect=10
+ else
+ expect=0
+ fi
+
+ local delta=$((t1s[$key] - t0s[$key]))
+ ((expect == delta))
+ check_err $? "DSCP $key: Expected to capture $expect packets, got $delta."
+ done
+
+ log_test "DSCP rewrite: $dscp_10-(prio $prio)-$dscp_20"
+}
+
+test_dscp()
+{
+ for prio in {0..7}; do
+ dscp_ping_test v$h1 192.0.2.1 192.0.2.2 $prio $h1 $h2
+ done
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
new file mode 100755
index 000000000000..281d90766e12
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
@@ -0,0 +1,233 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for DSCP prioritization in the router.
+#
+# With ip_forward_update_priority disabled, the packets are expected to keep
+# their DSCP (which in this test uses only values 0..7) intact as they are
+# forwarded by the switch. That is verified at $h2. ICMP responses are formed
+# with the same DSCP as the requests, and likewise pass through the switch
+# intact, which is verified at $h1.
+#
+# With ip_forward_update_priority enabled, router reprioritizes the packets
+# according to the table in reprioritize(). Thus, say, DSCP 7 maps to priority
+# 4, which on egress maps back to DSCP 4. The response packet then gets
+# reprioritized to 6, getting DSCP 6 on egress.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.18/28 | |
+# +----|-----------------+ +----------------|-----+
+# | |
+# +----|----------------------------------------------------------------|-----+
+# | SW | | |
+# | + $swp1 $swp2 + |
+# | 192.0.2.2/28 192.0.2.17/28 |
+# | APP=0,5,0 .. 7,5,7 APP=0,5,0 .. 7,5,7 |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ test_update
+ test_no_update
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=4
+source $lib_dir/lib.sh
+
+reprioritize()
+{
+ local in=$1; shift
+
+ # This is based on rt_tos2priority in include/net/route.h. Assuming 1:1
+ # mapping between priorities and TOS, it yields a new priority for a
+ # packet with ingress priority of $in.
+ local -a reprio=(0 0 2 2 6 6 4 4)
+
+ echo ${reprio[$in]}
+}
+
+h1_create()
+{
+ local dscp;
+
+ simple_if_init $h1 192.0.2.1/28
+ tc qdisc add dev $h1 clsact
+ dscp_capture_install $h1 0
+ ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ dscp_capture_uninstall $h1 0
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.18/28
+ tc qdisc add dev $h2 clsact
+ dscp_capture_install $h2 0
+ ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ dscp_capture_uninstall $h2 0
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2 192.0.2.18/28
+}
+
+dscp_map()
+{
+ local base=$1; shift
+
+ for prio in {0..7}; do
+ echo app=$prio,5,$((base + prio))
+ done
+}
+
+switch_create()
+{
+ simple_if_init $swp1 192.0.2.2/28
+ __simple_if_init $swp2 v$swp1 192.0.2.17/28
+
+ lldptool -T -i $swp1 -V APP $(dscp_map 0) >/dev/null
+ lldptool -T -i $swp2 -V APP $(dscp_map 0) >/dev/null
+ lldpad_app_wait_set $swp1
+ lldpad_app_wait_set $swp2
+}
+
+switch_destroy()
+{
+ lldptool -T -i $swp2 -V APP -d $(dscp_map 0) >/dev/null
+ lldptool -T -i $swp1 -V APP -d $(dscp_map 0) >/dev/null
+ lldpad_app_wait_del
+
+ __simple_if_fini $swp2 192.0.2.17/28
+ simple_if_fini $swp1 192.0.2.2/28
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ sysctl_set net.ipv4.ip_forward_update_priority 1
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+ sysctl_restore net.ipv4.ip_forward_update_priority
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.18
+}
+
+dscp_ping_test()
+{
+ local vrf_name=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local prio=$1; shift
+ local reprio=$1; shift
+ local dev1=$1; shift
+ local dev2=$1; shift
+
+ local prio2=$($reprio $prio) # ICMP Request egress prio
+ local prio3=$($reprio $prio2) # ICMP Response egress prio
+
+ local dscp=$((prio << 2)) # ICMP Request ingress DSCP
+ local dscp2=$((prio2 << 2)) # ICMP Request egress DSCP
+ local dscp3=$((prio3 << 2)) # ICMP Response egress DSCP
+
+ RET=0
+
+ eval "local -A dev1_t0s=($(dscp_fetch_stats $dev1 0))"
+ eval "local -A dev2_t0s=($(dscp_fetch_stats $dev2 0))"
+
+ ip vrf exec $vrf_name \
+ ${PING} -Q $dscp ${sip:+-I $sip} $dip \
+ -c 10 -i 0.1 -w 2 &> /dev/null
+
+ eval "local -A dev1_t1s=($(dscp_fetch_stats $dev1 0))"
+ eval "local -A dev2_t1s=($(dscp_fetch_stats $dev2 0))"
+
+ for i in {0..7}; do
+ local dscpi=$((i << 2))
+ local expect2=0
+ local expect3=0
+
+ if ((i == prio2)); then
+ expect2=10
+ fi
+ if ((i == prio3)); then
+ expect3=10
+ fi
+
+ local delta=$((dev2_t1s[$i] - dev2_t0s[$i]))
+ ((expect2 == delta))
+ check_err $? "DSCP $dscpi@$dev2: Expected to capture $expect2 packets, got $delta."
+
+ delta=$((dev1_t1s[$i] - dev1_t0s[$i]))
+ ((expect3 == delta))
+ check_err $? "DSCP $dscpi@$dev1: Expected to capture $expect3 packets, got $delta."
+ done
+
+ log_test "DSCP rewrite: $dscp-(prio $prio2)-$dscp2-(prio $prio3)-$dscp3"
+}
+
+__test_update()
+{
+ local update=$1; shift
+ local reprio=$1; shift
+
+ sysctl_restore net.ipv4.ip_forward_update_priority
+ sysctl_set net.ipv4.ip_forward_update_priority $update
+
+ for prio in {0..7}; do
+ dscp_ping_test v$h1 192.0.2.1 192.0.2.18 $prio $reprio $h1 $h2
+ done
+}
+
+test_update()
+{
+ __test_update 1 reprioritize
+}
+
+test_no_update()
+{
+ __test_update 0 echo
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
new file mode 100644
index 000000000000..d231649b4f01
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ROUTER_NUM_NETIFS=4
+
+router_h1_create()
+{
+ simple_if_init $h1 192.0.1.1/24
+ ip route add 193.0.0.0/8 via 192.0.1.2 dev $h1
+}
+
+router_h1_destroy()
+{
+ ip route del 193.0.0.0/8 via 192.0.1.2 dev $h1
+ simple_if_fini $h1 192.0.1.1/24
+}
+
+router_h2_create()
+{
+ simple_if_init $h2 192.0.2.1/24
+ tc qdisc add dev $h2 handle ffff: ingress
+}
+
+router_h2_destroy()
+{
+ tc qdisc del dev $h2 handle ffff: ingress
+ simple_if_fini $h2 192.0.2.1/24
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ ip address add 192.0.1.2/24 dev $rp1
+ ip address add 192.0.2.2/24 dev $rp2
+}
+
+router_destroy()
+{
+ ip address del 192.0.2.2/24 dev $rp2
+ ip address del 192.0.1.2/24 dev $rp1
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+router_setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ h1mac=$(mac_get $h1)
+ rp1mac=$(mac_get $rp1)
+
+ vrf_prepare
+
+ router_h1_create
+ router_h2_create
+
+ router_create
+}
+
+router_offload_validate()
+{
+ local route_count=$1
+ local offloaded_count
+
+ offloaded_count=$(ip route | grep -o 'offload' | wc -l)
+ [[ $offloaded_count -ge $route_count ]]
+}
+
+router_routes_create()
+{
+ local route_count=$1
+ local count=0
+
+ ROUTE_FILE="$(mktemp)"
+
+ for i in {0..255}
+ do
+ for j in {0..255}
+ do
+ for k in {0..255}
+ do
+ if [[ $count -eq $route_count ]]; then
+ break 3
+ fi
+
+ echo route add 193.${i}.${j}.${k}/32 via \
+ 192.0.2.1 dev $rp2 >> $ROUTE_FILE
+ ((count++))
+ done
+ done
+ done
+
+ ip -b $ROUTE_FILE &> /dev/null
+}
+
+router_routes_destroy()
+{
+ if [[ -v ROUTE_FILE ]]; then
+ rm -f $ROUTE_FILE
+ fi
+}
+
+router_test()
+{
+ local route_count=$1
+ local should_fail=$2
+ local count=0
+
+ RET=0
+
+ router_routes_create $route_count
+
+ router_offload_validate $route_count
+ check_err_fail $should_fail $? "Offload of $route_count routes"
+ if [[ $RET -ne 0 ]] || [[ $should_fail -eq 1 ]]; then
+ return
+ fi
+
+ tc filter add dev $h2 ingress protocol ip pref 1 flower \
+ skip_sw dst_ip 193.0.0.0/8 action drop
+
+ for i in {0..255}
+ do
+ for j in {0..255}
+ do
+ for k in {0..255}
+ do
+ if [[ $count -eq $route_count ]]; then
+ break 3
+ fi
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $rp1mac \
+ -A 192.0.1.1 -B 193.${i}.${j}.${k} \
+ -t ip -q
+ ((count++))
+ done
+ done
+ done
+
+ tc_check_packets "dev $h2 ingress" 1 $route_count
+ check_err $? "Offload mismatch"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 flower \
+ skip_sw dst_ip 193.0.0.0/8 action drop
+
+ router_routes_destroy
+}
+
+router_cleanup()
+{
+ pre_cleanup
+
+ router_routes_destroy
+ router_destroy
+
+ router_h2_destroy
+ router_h1_destroy
+
+ vrf_cleanup
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
new file mode 100755
index 000000000000..3b75180f455d
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -0,0 +1,366 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for checking the A-TCAM and C-TCAM operation in Spectrum-2.
+# It tries to exercise as many code paths in the eRP state machine as
+# possible.
+
+lib_dir=$(dirname $0)/../../../../net/forwarding
+
+ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
+ multiple_masks_test ctcam_edge_cases_test"
+NUM_NETIFS=2
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+tcflags="skip_hw"
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 198.51.100.1/24
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24 198.51.100.1/24
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/24 198.51.100.2/24
+ tc qdisc add dev $h2 clsact
+}
+
+h2_destroy()
+{
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2 192.0.2.2/24 198.51.100.2/24
+}
+
+single_mask_test()
+{
+ # When only a single mask is required, the device uses the master
+ # mask and not the eRP table. Verify that under this mode the right
+ # filter is matched
+
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Single filter - did not match"
+
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 198.51.100.2 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 2
+ check_err $? "Two filters - did not match highest priority"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 198.51.100.1 -B 198.51.100.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Two filters - did not match lowest priority"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 198.51.100.1 -B 198.51.100.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Single filter - did not match after delete"
+
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+
+ log_test "single mask test ($tcflags)"
+}
+
+identical_filters_test()
+{
+ # When two filters that only differ in their priority are used,
+ # one needs to be inserted into the C-TCAM. This test verifies
+ # that filters are correctly spilled to C-TCAM and that the right
+ # filter is matched
+
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match A-TCAM filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match C-TCAM filter after A-TCAM delete"
+
+ tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match C-TCAM filter after A-TCAM add"
+
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_err $? "Did not match A-TCAM filter after C-TCAM delete"
+
+ tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower
+
+ log_test "identical filters test ($tcflags)"
+}
+
+two_masks_test()
+{
+ # When more than one mask is required, the eRP table is used. This
+ # test verifies that the eRP table is correctly allocated and used
+
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
+ $tcflags dst_ip 192.0.0.0/16 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Two filters - did not match highest priority"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_err $? "Single filter - did not match"
+
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.0/24 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Two filters - did not match highest priority after add"
+
+ tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+
+ log_test "two masks test ($tcflags)"
+}
+
+multiple_masks_test()
+{
+ # The number of masks in a region is limited. Once the maximum
+ # number of masks has been reached filters that require new
+ # masks are spilled to the C-TCAM. This test verifies that
+ # spillage is performed correctly and that the right filter is
+ # matched
+
+ local index
+
+ RET=0
+
+ NUM_MASKS=32
+ BASE_INDEX=100
+
+ for i in $(eval echo {1..$NUM_MASKS}); do
+ index=$((BASE_INDEX - i))
+
+ tc filter add dev $h2 ingress protocol ip pref $index \
+ handle $index \
+ flower $tcflags dst_ip 192.0.2.2/${i} src_ip 192.0.2.1 \
+ action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 \
+ -B 192.0.2.2 -t ip -q
+
+ tc_check_packets "dev $h2 ingress" $index 1
+ check_err $? "$i filters - did not match highest priority (add)"
+ done
+
+ for i in $(eval echo {$NUM_MASKS..1}); do
+ index=$((BASE_INDEX - i))
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 \
+ -B 192.0.2.2 -t ip -q
+
+ tc_check_packets "dev $h2 ingress" $index 2
+ check_err $? "$i filters - did not match highest priority (del)"
+
+ tc filter del dev $h2 ingress protocol ip pref $index \
+ handle $index flower
+ done
+
+ log_test "multiple masks test ($tcflags)"
+}
+
+ctcam_two_atcam_masks_test()
+{
+ RET=0
+
+ # First case: C-TCAM is disabled when there are two A-TCAM masks.
+ # We push a filter into the C-TCAM by using two identical filters
+ # as in identical_filters_test()
+
+ # Filter goes into A-TCAM
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ # Filter goes into C-TCAM
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ # Filter goes into A-TCAM
+ tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
+ $tcflags dst_ip 192.0.2.0/24 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match A-TCAM filter"
+
+ # Delete both A-TCAM and C-TCAM filters and make sure the remaining
+ # A-TCAM filter still works
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_err $? "Did not match A-TCAM filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower
+
+ log_test "ctcam with two atcam masks test ($tcflags)"
+}
+
+ctcam_one_atcam_mask_test()
+{
+ RET=0
+
+ # Second case: C-TCAM is disabled when there is one A-TCAM mask.
+ # The test is similar to identical_filters_test()
+
+ # Filter goes into A-TCAM
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ # Filter goes into C-TCAM
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match C-TCAM filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match A-TCAM filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+
+ log_test "ctcam with one atcam mask test ($tcflags)"
+}
+
+ctcam_no_atcam_masks_test()
+{
+ RET=0
+
+ # Third case: C-TCAM is disabled when there are no A-TCAM masks
+ # This test exercises the code path that transitions the eRP table
+ # to its initial state after deleting the last C-TCAM mask
+
+ # Filter goes into A-TCAM
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ # Filter goes into C-TCAM
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+
+ log_test "ctcam with no atcam masks test ($tcflags)"
+}
+
+ctcam_edge_cases_test()
+{
+ # When the C-TCAM is disabled after deleting the last C-TCAM
+ # mask, we want to make sure the eRP state machine is put in
+ # the correct state
+
+ ctcam_two_atcam_masks_test
+ ctcam_one_atcam_mask_test
+ ctcam_no_atcam_masks_test
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+ h1mac=$(mac_get $h1)
+ h2mac=$(mac_get $h2)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+if ! tc_offload_check; then
+ check_err 1 "Could not test offloaded functionality"
+ log_test "mlxsw-specific tests for tc flower"
+ exit
+else
+ tcflags="skip_sw"
+ tests_run
+fi
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh
new file mode 100644
index 000000000000..73035e25085d
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source "../../../../net/forwarding/devlink_lib.sh"
+
+if [ "$DEVLINK_VIDDID" != "15b3:cb84" ]; then
+ echo "SKIP: test is tailored for Mellanox Spectrum"
+ exit 1
+fi
+
+# Needed for returning to default
+declare -A KVD_DEFAULTS
+
+KVD_CHILDREN="linear hash_single hash_double"
+KVDL_CHILDREN="singles chunks large_chunks"
+
+devlink_sp_resource_minimize()
+{
+ local size
+ local i
+
+ for i in $KVD_CHILDREN; do
+ size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]')
+ devlink_resource_size_set "$size" kvd "$i"
+ done
+
+ for i in $KVDL_CHILDREN; do
+ size=$(devlink_resource_get kvd linear "$i" | \
+ jq '.["size_min"]')
+ devlink_resource_size_set "$size" kvd linear "$i"
+ done
+}
+
+devlink_sp_size_kvd_to_default()
+{
+ local need_reload=0
+ local i
+
+ for i in $KVD_CHILDREN; do
+ local size=$(echo "${KVD_DEFAULTS[kvd_$i]}" | jq '.["size"]')
+ current_size=$(devlink_resource_size_get kvd "$i")
+
+ if [ "$size" -ne "$current_size" ]; then
+ devlink_resource_size_set "$size" kvd "$i"
+ need_reload=1
+ fi
+ done
+
+ for i in $KVDL_CHILDREN; do
+ local size=$(echo "${KVD_DEFAULTS[kvd_linear_$i]}" | \
+ jq '.["size"]')
+ current_size=$(devlink_resource_size_get kvd linear "$i")
+
+ if [ "$size" -ne "$current_size" ]; then
+ devlink_resource_size_set "$size" kvd linear "$i"
+ need_reload=1
+ fi
+ done
+
+ if [ "$need_reload" -ne "0" ]; then
+ devlink_reload
+ fi
+}
+
+devlink_sp_read_kvd_defaults()
+{
+ local key
+ local i
+
+ KVD_DEFAULTS[kvd]=$(devlink_resource_get "kvd")
+ for i in $KVD_CHILDREN; do
+ key=kvd_$i
+ KVD_DEFAULTS[$key]=$(devlink_resource_get kvd "$i")
+ done
+
+ for i in $KVDL_CHILDREN; do
+ key=kvd_linear_$i
+ KVD_DEFAULTS[$key]=$(devlink_resource_get kvd linear "$i")
+ done
+}
+
+KVD_PROFILES="default scale ipv4_max"
+
+devlink_sp_resource_kvd_profile_set()
+{
+ local profile=$1
+
+ case "$profile" in
+ scale)
+ devlink_resource_size_set 64000 kvd linear
+ devlink_resource_size_set 15616 kvd linear singles
+ devlink_resource_size_set 32000 kvd linear chunks
+ devlink_resource_size_set 16384 kvd linear large_chunks
+ devlink_resource_size_set 128000 kvd hash_single
+ devlink_resource_size_set 48000 kvd hash_double
+ devlink_reload
+ ;;
+ ipv4_max)
+ devlink_resource_size_set 64000 kvd linear
+ devlink_resource_size_set 15616 kvd linear singles
+ devlink_resource_size_set 32000 kvd linear chunks
+ devlink_resource_size_set 16384 kvd linear large_chunks
+ devlink_resource_size_set 144000 kvd hash_single
+ devlink_resource_size_set 32768 kvd hash_double
+ devlink_reload
+ ;;
+ default)
+ devlink_resource_size_set 98304 kvd linear
+ devlink_resource_size_set 16384 kvd linear singles
+ devlink_resource_size_set 49152 kvd linear chunks
+ devlink_resource_size_set 32768 kvd linear large_chunks
+ devlink_resource_size_set 87040 kvd hash_single
+ devlink_resource_size_set 60416 kvd hash_double
+ devlink_reload
+ ;;
+ *)
+ check_err 1 "Unknown profile $profile"
+ esac
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh
new file mode 100755
index 000000000000..b1fe960e398a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh
@@ -0,0 +1,117 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+NUM_NETIFS=1
+source devlink_lib_spectrum.sh
+
+setup_prepare()
+{
+ devlink_sp_read_kvd_defaults
+}
+
+cleanup()
+{
+ pre_cleanup
+ devlink_sp_size_kvd_to_default
+}
+
+trap cleanup EXIT
+
+setup_prepare
+
+profiles_test()
+{
+ local i
+
+ log_info "Running profile tests"
+
+ for i in $KVD_PROFILES; do
+ RET=0
+ devlink_sp_resource_kvd_profile_set $i
+ log_test "'$i' profile"
+ done
+
+ # Default is explicitly tested at end to ensure it's actually applied
+ RET=0
+ devlink_sp_resource_kvd_profile_set "default"
+ log_test "'default' profile"
+}
+
+resources_min_test()
+{
+ local size
+ local i
+ local j
+
+ log_info "Running KVD-minimum tests"
+
+ for i in $KVD_CHILDREN; do
+ RET=0
+ size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]')
+ devlink_resource_size_set "$size" kvd "$i"
+
+ # In case of linear, need to minimize sub-resources as well
+ if [[ "$i" == "linear" ]]; then
+ for j in $KVDL_CHILDREN; do
+ devlink_resource_size_set 0 kvd linear "$j"
+ done
+ fi
+
+ devlink_reload
+ devlink_sp_size_kvd_to_default
+ log_test "'$i' minimize [$size]"
+ done
+}
+
+resources_max_test()
+{
+ local min_size
+ local size
+ local i
+ local j
+
+ log_info "Running KVD-maximum tests"
+ for i in $KVD_CHILDREN; do
+ RET=0
+ devlink_sp_resource_minimize
+
+ # Calculate the maximum possible size for the given partition
+ size=$(devlink_resource_size_get kvd)
+ for j in $KVD_CHILDREN; do
+ if [ "$i" != "$j" ]; then
+ min_size=$(devlink_resource_get kvd "$j" | \
+ jq '.["size_min"]')
+ size=$((size - min_size))
+ fi
+ done
+
+ # Test almost maximum size
+ devlink_resource_size_set "$((size - 128))" kvd "$i"
+ devlink_reload
+ log_test "'$i' almost maximize [$((size - 128))]"
+
+ # Test above maximum size
+ devlink resource set "$DEVLINK_DEV" \
+ path "kvd/$i" size $((size + 128)) &> /dev/null
+ check_fail $? "Set kvd/$i to size $((size + 128)) should fail"
+ log_test "'$i' Overflow rejection [$((size + 128))]"
+
+ # Test maximum size
+ if [ "$i" == "hash_single" ] || [ "$i" == "hash_double" ]; then
+ echo "SKIP: Observed problem with exact max $i"
+ continue
+ fi
+
+ devlink_resource_size_set "$size" kvd "$i"
+ devlink_reload
+ log_test "'$i' maximize [$size]"
+
+ devlink_sp_size_kvd_to_default
+ done
+}
+
+profiles_test
+resources_min_test
+resources_max_test
+
+exit "$RET"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh
new file mode 100644
index 000000000000..8d2186c7c62b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../mirror_gre_scale.sh
+
+mirror_gre_get_target()
+{
+ local should_fail=$1; shift
+
+ if ((! should_fail)); then
+ echo 3
+ else
+ echo 4
+ fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
new file mode 100755
index 000000000000..a0a80e1a69e8
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+NUM_NETIFS=6
+source ../../../../net/forwarding/lib.sh
+source ../../../../net/forwarding/tc_common.sh
+source devlink_lib_spectrum.sh
+
+current_test=""
+
+cleanup()
+{
+ pre_cleanup
+ if [ ! -z $current_test ]; then
+ ${current_test}_cleanup
+ fi
+ devlink_sp_size_kvd_to_default
+}
+
+devlink_sp_read_kvd_defaults
+trap cleanup EXIT
+
+ALL_TESTS="router tc_flower mirror_gre"
+for current_test in ${TESTS:-$ALL_TESTS}; do
+ source ${current_test}_scale.sh
+
+ num_netifs_var=${current_test^^}_NUM_NETIFS
+ num_netifs=${!num_netifs_var:-$NUM_NETIFS}
+
+ for profile in $KVD_PROFILES; do
+ RET=0
+ devlink_sp_resource_kvd_profile_set $profile
+ if [[ $RET -gt 0 ]]; then
+ log_test "'$current_test' [$profile] setting"
+ continue
+ fi
+
+ for should_fail in 0 1; do
+ RET=0
+ target=$(${current_test}_get_target "$should_fail")
+ ${current_test}_setup_prepare
+ setup_wait $num_netifs
+ ${current_test}_test "$target" "$should_fail"
+ ${current_test}_cleanup
+ if [[ "$should_fail" -eq 0 ]]; then
+ log_test "'$current_test' [$profile] $target"
+ else
+ log_test "'$current_test' [$profile] overflow $target"
+ fi
+ done
+ done
+done
+current_test=""
+
+exit "$RET"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh
new file mode 100644
index 000000000000..21c4697d5bab
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../router_scale.sh
+
+router_get_target()
+{
+ local should_fail=$1
+ local target
+
+ target=$(devlink_resource_size_get kvd hash_single)
+
+ if [[ $should_fail -eq 0 ]]; then
+ target=$((target * 85 / 100))
+ else
+ target=$((target + 1))
+ fi
+
+ echo $target
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh
new file mode 100644
index 000000000000..f9bfd8937765
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../tc_flower_scale.sh
+
+tc_flower_get_target()
+{
+ local should_fail=$1; shift
+
+ # 6144 (6x1024) is the theoretical maximum.
+ # One bank of 512 rules is taken by the 18-byte MC router rule.
+ # One rule is the ACL catch-all.
+ # 6144 - 512 - 1 = 5631
+ local target=5631
+
+ if ((! should_fail)); then
+ echo $target
+ else
+ echo $((target + 1))
+ fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
new file mode 100644
index 000000000000..a6d733d2a4b4
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
@@ -0,0 +1,134 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for resource limit of offloaded flower rules. The test adds a given
+# number of flower matches for different IPv6 addresses, then generates traffic,
+# and ensures each was hit exactly once. This file contains functions to set up
+# a testing topology and run the test, and is meant to be sourced from a test
+# script that calls the testing routine with a given number of rules.
+
+TC_FLOWER_NUM_NETIFS=2
+
+tc_flower_h1_create()
+{
+ simple_if_init $h1
+ tc qdisc add dev $h1 clsact
+}
+
+tc_flower_h1_destroy()
+{
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1
+}
+
+tc_flower_h2_create()
+{
+ simple_if_init $h2
+ tc qdisc add dev $h2 clsact
+}
+
+tc_flower_h2_destroy()
+{
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2
+}
+
+tc_flower_setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+
+ vrf_prepare
+
+ tc_flower_h1_create
+ tc_flower_h2_create
+}
+
+tc_flower_cleanup()
+{
+ pre_cleanup
+
+ tc_flower_h2_destroy
+ tc_flower_h1_destroy
+
+ vrf_cleanup
+
+ if [[ -v TC_FLOWER_BATCH_FILE ]]; then
+ rm -f $TC_FLOWER_BATCH_FILE
+ fi
+}
+
+tc_flower_addr()
+{
+ local num=$1; shift
+
+ printf "2001:db8:1::%x" $num
+}
+
+tc_flower_rules_create()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ TC_FLOWER_BATCH_FILE="$(mktemp)"
+
+ for ((i = 0; i < count; ++i)); do
+ cat >> $TC_FLOWER_BATCH_FILE <<-EOF
+ filter add dev $h2 ingress \
+ prot ipv6 \
+ pref 1000 \
+ flower $tcflags dst_ip $(tc_flower_addr $i) \
+ action drop
+ EOF
+ done
+
+ tc -b $TC_FLOWER_BATCH_FILE
+ check_err_fail $should_fail $? "Rule insertion"
+}
+
+__tc_flower_test()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+ local last=$((count - 1))
+
+ tc_flower_rules_create $count $should_fail
+
+ for ((i = 0; i < count; ++i)); do
+ $MZ $h1 -q -c 1 -t ip -p 20 -b bc -6 \
+ -A 2001:db8:2::1 \
+ -B $(tc_flower_addr $i)
+ done
+
+ MISMATCHES=$(
+ tc -j -s filter show dev $h2 ingress |
+ jq -r '[ .[] | select(.kind == "flower") | .options |
+ values as $rule | .actions[].stats.packets |
+ select(. != 1) | "\(.) on \($rule.keys.dst_ip)" ] |
+ join(", ")'
+ )
+
+ test -z "$MISMATCHES"
+ check_err $? "Expected to capture 1 packet for each IP, but got $MISMATCHES"
+}
+
+tc_flower_test()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ # We use lower 16 bits of IPv6 address for match. Also there are only 16
+ # bits of rule priority space.
+ if ((count > 65536)); then
+ check_err 1 "Invalid count of $count. At most 65536 rules supported"
+ return
+ fi
+
+ if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then
+ check_err 1 "Could not test offloaded functionality"
+ return
+ fi
+
+ tcflags="skip_sw"
+ __tc_flower_test $count $should_fail
+}
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
index 667e916fa7cc..f8d468f54e98 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -225,10 +225,10 @@ int gpio_pin_test(struct gpiochip_info *cinfo, int line, int flag, int value)
if (flag & GPIOHANDLE_REQUEST_ACTIVE_LOW)
debugfs_value = !debugfs_value;
- if (!(debugfs_dir == OUT && value == debugfs_value))
+ if (!(debugfs_dir == OUT && value == debugfs_value)) {
errno = -EINVAL;
- ret = -errno;
-
+ ret = -errno;
+ }
}
gpiotools_release_linehandle(fd);
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 1a0ac3a29ec5..78b24cf76f40 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -13,3 +13,4 @@ udpgso
udpgso_bench_rx
udpgso_bench_tx
tcp_inq
+tls
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 663e11e85727..9cca68e440a0 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -13,7 +13,7 @@ TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd
TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
-TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict
+TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
include ../lib.mk
diff --git a/tools/testing/selftests/net/forwarding/README b/tools/testing/selftests/net/forwarding/README
index 4a0964c42860..b8a2af8fcfb7 100644
--- a/tools/testing/selftests/net/forwarding/README
+++ b/tools/testing/selftests/net/forwarding/README
@@ -46,6 +46,8 @@ Guidelines for Writing Tests
o Where possible, reuse an existing topology for different tests instead
of recreating the same topology.
+o Tests that use anything but the most trivial topologies should include
+ an ASCII art showing the topology.
o Where possible, IPv6 and IPv4 addresses shall conform to RFC 3849 and
RFC 5737, respectively.
o Where possible, tests shall be written so that they can be reused by
diff --git a/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh b/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh
new file mode 100755
index 000000000000..a43b4645c4de
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh
@@ -0,0 +1,151 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="ping_ipv4 ping_ipv6 flooding"
+NUM_NETIFS=6
+CHECK_TC="yes"
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h3_create()
+{
+ simple_if_init $h3 192.0.2.3/24 2001:db8:1::3/64
+}
+
+h3_destroy()
+{
+ simple_if_fini $h3 192.0.2.3/24 2001:db8:1::3/64
+}
+
+switch_create()
+{
+ ip link add dev br0 type bridge
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br0
+ ip link set dev $swp3 master br0
+
+ ip link set dev $swp1 type bridge_slave isolated on
+ check_err $? "Can't set isolation on port $swp1"
+ ip link set dev $swp2 type bridge_slave isolated on
+ check_err $? "Can't set isolation on port $swp2"
+ ip link set dev $swp3 type bridge_slave isolated off
+ check_err $? "Can't disable isolation on port $swp3"
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+ ip link set dev $swp3 up
+}
+
+switch_destroy()
+{
+ ip link set dev $swp3 down
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ RET=0
+ ping_do $h1 192.0.2.2
+ check_fail $? "Ping worked when it should not have"
+
+ RET=0
+ ping_do $h3 192.0.2.2
+ check_err $? "Ping didn't work when it should have"
+
+ log_test "Isolated port ping"
+}
+
+ping_ipv6()
+{
+ RET=0
+ ping6_do $h1 2001:db8:1::2
+ check_fail $? "Ping6 worked when it should not have"
+
+ RET=0
+ ping6_do $h3 2001:db8:1::2
+ check_err $? "Ping6 didn't work when it should have"
+
+ log_test "Isolated port ping6"
+}
+
+flooding()
+{
+ local mac=de:ad:be:ef:13:37
+ local ip=192.0.2.100
+
+ RET=0
+ flood_test_do false $mac $ip $h1 $h2
+ check_err $? "Packet was flooded when it should not have been"
+
+ RET=0
+ flood_test_do true $mac $ip $h3 $h2
+ check_err $? "Packet was not flooded when it should have been"
+
+ log_test "Isolated port flooding"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
new file mode 100644
index 000000000000..5ab1e5f43022
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+##############################################################################
+# Source library
+
+relative_path="${BASH_SOURCE%/*}"
+if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
+ relative_path="."
+fi
+
+source "$relative_path/lib.sh"
+
+##############################################################################
+# Defines
+
+DEVLINK_DEV=$(devlink port show | grep "${NETIFS[p1]}" | \
+ grep -v "${NETIFS[p1]}[0-9]" | cut -d" " -f1 | \
+ rev | cut -d"/" -f2- | rev)
+if [ -z "$DEVLINK_DEV" ]; then
+ echo "SKIP: ${NETIFS[p1]} has no devlink device registered for it"
+ exit 1
+fi
+if [[ "$(echo $DEVLINK_DEV | grep -c pci)" -eq 0 ]]; then
+ echo "SKIP: devlink device's bus is not PCI"
+ exit 1
+fi
+
+DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \
+ -n | cut -d" " -f3)
+
+##############################################################################
+# Sanity checks
+
+devlink -j resource show "$DEVLINK_DEV" &> /dev/null
+if [ $? -ne 0 ]; then
+ echo "SKIP: iproute2 too old, missing devlink resource support"
+ exit 1
+fi
+
+##############################################################################
+# Devlink helpers
+
+devlink_resource_names_to_path()
+{
+ local resource
+ local path=""
+
+ for resource in "${@}"; do
+ if [ "$path" == "" ]; then
+ path="$resource"
+ else
+ path="${path}/$resource"
+ fi
+ done
+
+ echo "$path"
+}
+
+devlink_resource_get()
+{
+ local name=$1
+ local resource_name=.[][\"$DEVLINK_DEV\"]
+
+ resource_name="$resource_name | .[] | select (.name == \"$name\")"
+
+ shift
+ for resource in "${@}"; do
+ resource_name="${resource_name} | .[\"resources\"][] | \
+ select (.name == \"$resource\")"
+ done
+
+ devlink -j resource show "$DEVLINK_DEV" | jq "$resource_name"
+}
+
+devlink_resource_size_get()
+{
+ local size=$(devlink_resource_get "$@" | jq '.["size_new"]')
+
+ if [ "$size" == "null" ]; then
+ devlink_resource_get "$@" | jq '.["size"]'
+ else
+ echo "$size"
+ fi
+}
+
+devlink_resource_size_set()
+{
+ local new_size=$1
+ local path
+
+ shift
+ path=$(devlink_resource_names_to_path "$@")
+ devlink resource set "$DEVLINK_DEV" path "$path" size "$new_size"
+ check_err $? "Failed setting path $path to size $size"
+}
+
+devlink_reload()
+{
+ local still_pending
+
+ devlink dev reload "$DEVLINK_DEV" &> /dev/null
+ check_err $? "Failed reload"
+
+ still_pending=$(devlink resource show "$DEVLINK_DEV" | \
+ grep -c "size_new")
+ check_err $still_pending "Failed reload - There are still unset sizes"
+}
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
new file mode 100755
index 000000000000..cca2baa03fb8
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
@@ -0,0 +1,253 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test traffic distribution when a wECMP route forwards traffic to two GRE
+# tunnels.
+#
+# +-------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-----+
+# |
+# +-------------------|------------------------+
+# | SW1 | |
+# | $ol1 + |
+# | 192.0.2.2/28 |
+# | |
+# | + g1a (gre) + g1b (gre) |
+# | loc=192.0.2.65 loc=192.0.2.81 |
+# | rem=192.0.2.66 --. rem=192.0.2.82 --. |
+# | tos=inherit | tos=inherit | |
+# | .------------------' | |
+# | | .------------------' |
+# | v v |
+# | + $ul1.111 (vlan) + $ul1.222 (vlan) |
+# | | 192.0.2.129/28 | 192.0.2.145/28 |
+# | \ / |
+# | \________________/ |
+# | | |
+# | + $ul1 |
+# +------------|-------------------------------+
+# |
+# +------------|-------------------------------+
+# | SW2 + $ul2 |
+# | _______|________ |
+# | / \ |
+# | / \ |
+# | + $ul2.111 (vlan) + $ul2.222 (vlan) |
+# | ^ 192.0.2.130/28 ^ 192.0.2.146/28 |
+# | | | |
+# | | '------------------. |
+# | '------------------. | |
+# | + g2a (gre) | + g2b (gre) | |
+# | loc=192.0.2.66 | loc=192.0.2.82 | |
+# | rem=192.0.2.65 --' rem=192.0.2.81 --' |
+# | tos=inherit tos=inherit |
+# | |
+# | $ol2 + |
+# | 192.0.2.17/28 | |
+# +-------------------|------------------------+
+# |
+# +-------------------|-----+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +-------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ multipath_ipv4
+"
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+sw1_create()
+{
+ simple_if_init $ol1 192.0.2.2/28
+ __simple_if_init $ul1 v$ol1
+ vlan_create $ul1 111 v$ol1 192.0.2.129/28
+ vlan_create $ul1 222 v$ol1 192.0.2.145/28
+
+ tunnel_create g1a gre 192.0.2.65 192.0.2.66 tos inherit dev v$ol1
+ __simple_if_init g1a v$ol1 192.0.2.65/32
+ ip route add vrf v$ol1 192.0.2.66/32 via 192.0.2.130
+
+ tunnel_create g1b gre 192.0.2.81 192.0.2.82 tos inherit dev v$ol1
+ __simple_if_init g1b v$ol1 192.0.2.81/32
+ ip route add vrf v$ol1 192.0.2.82/32 via 192.0.2.146
+
+ ip route add vrf v$ol1 192.0.2.16/28 \
+ nexthop dev g1a \
+ nexthop dev g1b
+
+ tc qdisc add dev $ul1 clsact
+ tc filter add dev $ul1 egress pref 111 prot ipv4 \
+ flower dst_ip 192.0.2.66 action pass
+ tc filter add dev $ul1 egress pref 222 prot ipv4 \
+ flower dst_ip 192.0.2.82 action pass
+}
+
+sw1_destroy()
+{
+ tc qdisc del dev $ul1 clsact
+
+ ip route del vrf v$ol1 192.0.2.16/28
+
+ ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
+ __simple_if_fini g1b 192.0.2.81/32
+ tunnel_destroy g1b
+
+ ip route del vrf v$ol1 192.0.2.66/32 via 192.0.2.130
+ __simple_if_fini g1a 192.0.2.65/32
+ tunnel_destroy g1a
+
+ vlan_destroy $ul1 222
+ vlan_destroy $ul1 111
+ __simple_if_fini $ul1
+ simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_create()
+{
+ simple_if_init $ol2 192.0.2.17/28
+ __simple_if_init $ul2 v$ol2
+ vlan_create $ul2 111 v$ol2 192.0.2.130/28
+ vlan_create $ul2 222 v$ol2 192.0.2.146/28
+
+ tunnel_create g2a gre 192.0.2.66 192.0.2.65 tos inherit dev v$ol2
+ __simple_if_init g2a v$ol2 192.0.2.66/32
+ ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+
+ tunnel_create g2b gre 192.0.2.82 192.0.2.81 tos inherit dev v$ol2
+ __simple_if_init g2b v$ol2 192.0.2.82/32
+ ip route add vrf v$ol2 192.0.2.81/32 via 192.0.2.145
+
+ ip route add vrf v$ol2 192.0.2.0/28 \
+ nexthop dev g2a \
+ nexthop dev g2b
+}
+
+sw2_destroy()
+{
+ ip route del vrf v$ol2 192.0.2.0/28
+
+ ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
+ __simple_if_fini g2b 192.0.2.82/32
+ tunnel_destroy g2b
+
+ ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ __simple_if_fini g2a 192.0.2.66/32
+ tunnel_destroy g2a
+
+ vlan_destroy $ul2 222
+ vlan_destroy $ul2 111
+ __simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.18/28
+ ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ simple_if_fini $h2 192.0.2.18/28
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ vrf_prepare
+ h1_create
+ sw1_create
+ sw2_create
+ h2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ sw2_destroy
+ sw1_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+multipath4_test()
+{
+ local what=$1; shift
+ local weight1=$1; shift
+ local weight2=$1; shift
+
+ sysctl_set net.ipv4.fib_multipath_hash_policy 1
+ ip route replace vrf v$ol1 192.0.2.16/28 \
+ nexthop dev g1a weight $weight1 \
+ nexthop dev g1b weight $weight2
+
+ local t0_111=$(tc_rule_stats_get $ul1 111 egress)
+ local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+
+ ip vrf exec v$h1 \
+ $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
+ -d 1msec -t udp "sp=1024,dp=0-32768"
+
+ local t1_111=$(tc_rule_stats_get $ul1 111 egress)
+ local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
+ multipath_eval "$what" $weight1 $weight2 $d111 $d222
+
+ ip route replace vrf v$ol1 192.0.2.16/28 \
+ nexthop dev g1a \
+ nexthop dev g1b
+ sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.18
+}
+
+multipath_ipv4()
+{
+ log_info "Running IPv4 multipath tests"
+ multipath4_test "ECMP" 1 1
+ multipath4_test "Weighted MP 2:1" 2 1
+ multipath4_test "Weighted MP 11:45" 11 45
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 7b18a53aa556..ca53b539aa2d 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -8,14 +8,21 @@
PING=${PING:=ping}
PING6=${PING6:=ping6}
MZ=${MZ:=mausezahn}
+ARPING=${ARPING:=arping}
+TEAMD=${TEAMD:=teamd}
WAIT_TIME=${WAIT_TIME:=5}
PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
PAUSE_ON_CLEANUP=${PAUSE_ON_CLEANUP:=no}
NETIF_TYPE=${NETIF_TYPE:=veth}
NETIF_CREATE=${NETIF_CREATE:=yes}
-if [[ -f forwarding.config ]]; then
- source forwarding.config
+relative_path="${BASH_SOURCE%/*}"
+if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
+ relative_path="."
+fi
+
+if [[ -f $relative_path/forwarding.config ]]; then
+ source "$relative_path/forwarding.config"
fi
##############################################################################
@@ -28,7 +35,10 @@ check_tc_version()
echo "SKIP: iproute2 too old; tc is missing JSON support"
exit 1
fi
+}
+check_tc_shblock_support()
+{
tc filter help 2>&1 | grep block &> /dev/null
if [[ $? -ne 0 ]]; then
echo "SKIP: iproute2 too old; tc is missing shared block support"
@@ -36,6 +46,15 @@ check_tc_version()
fi
}
+check_tc_chain_support()
+{
+ tc help 2>&1|grep chain &> /dev/null
+ if [[ $? -ne 0 ]]; then
+ echo "SKIP: iproute2 too old; tc is missing chain support"
+ exit 1
+ fi
+}
+
if [[ "$(id -u)" -ne 0 ]]; then
echo "SKIP: need root privileges"
exit 0
@@ -45,15 +64,18 @@ if [[ "$CHECK_TC" = "yes" ]]; then
check_tc_version
fi
-if [[ ! -x "$(command -v jq)" ]]; then
- echo "SKIP: jq not installed"
- exit 1
-fi
+require_command()
+{
+ local cmd=$1; shift
-if [[ ! -x "$(command -v $MZ)" ]]; then
- echo "SKIP: $MZ not installed"
- exit 1
-fi
+ if [[ ! -x "$(command -v "$cmd")" ]]; then
+ echo "SKIP: $cmd not installed"
+ exit 1
+ fi
+}
+
+require_command jq
+require_command $MZ
if [[ ! -v NUM_NETIFS ]]; then
echo "SKIP: importer does not define \"NUM_NETIFS\""
@@ -151,6 +173,19 @@ check_fail()
fi
}
+check_err_fail()
+{
+ local should_fail=$1; shift
+ local err=$1; shift
+ local what=$1; shift
+
+ if ((should_fail)); then
+ check_fail $err "$what succeeded, but should have failed"
+ else
+ check_err $err "$what failed"
+ fi
+}
+
log_test()
{
local test_name=$1
@@ -185,24 +220,54 @@ log_info()
echo "INFO: $msg"
}
+setup_wait_dev()
+{
+ local dev=$1; shift
+
+ while true; do
+ ip link show dev $dev up \
+ | grep 'state UP' &> /dev/null
+ if [[ $? -ne 0 ]]; then
+ sleep 1
+ else
+ break
+ fi
+ done
+}
+
setup_wait()
{
- for i in $(eval echo {1..$NUM_NETIFS}); do
- while true; do
- ip link show dev ${NETIFS[p$i]} up \
- | grep 'state UP' &> /dev/null
- if [[ $? -ne 0 ]]; then
- sleep 1
- else
- break
- fi
- done
+ local num_netifs=${1:-$NUM_NETIFS}
+
+ for ((i = 1; i <= num_netifs; ++i)); do
+ setup_wait_dev ${NETIFS[p$i]}
done
# Make sure links are ready.
sleep $WAIT_TIME
}
+lldpad_app_wait_set()
+{
+ local dev=$1; shift
+
+ while lldptool -t -i $dev -V APP -c app | grep -q pending; do
+ echo "$dev: waiting for lldpad to push pending APP updates"
+ sleep 5
+ done
+}
+
+lldpad_app_wait_del()
+{
+ # Give lldpad a chance to push down the changes. If the device is downed
+ # too soon, the updates will be left pending. However, they will have
+ # been struck off the lldpad's DB already, so we won't be able to tell
+ # they are pending. Then on next test iteration this would cause
+ # weirdness as newly-added APP rules conflict with the old ones,
+ # sometimes getting stuck in an "unknown" state.
+ sleep 5
+}
+
pre_cleanup()
{
if [ "${PAUSE_ON_CLEANUP}" = "yes" ]; then
@@ -287,6 +352,29 @@ __addr_add_del()
done
}
+__simple_if_init()
+{
+ local if_name=$1; shift
+ local vrf_name=$1; shift
+ local addrs=("${@}")
+
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $if_name up
+
+ __addr_add_del $if_name add "${addrs[@]}"
+}
+
+__simple_if_fini()
+{
+ local if_name=$1; shift
+ local addrs=("${@}")
+
+ __addr_add_del $if_name del "${addrs[@]}"
+
+ ip link set dev $if_name down
+ ip link set dev $if_name nomaster
+}
+
simple_if_init()
{
local if_name=$1
@@ -298,11 +386,8 @@ simple_if_init()
array=("${@}")
vrf_create $vrf_name
- ip link set dev $if_name master $vrf_name
ip link set dev $vrf_name up
- ip link set dev $if_name up
-
- __addr_add_del $if_name add "${array[@]}"
+ __simple_if_init $if_name $vrf_name "${array[@]}"
}
simple_if_fini()
@@ -315,9 +400,7 @@ simple_if_fini()
vrf_name=v$if_name
array=("${@}")
- __addr_add_del $if_name del "${array[@]}"
-
- ip link set dev $if_name down
+ __simple_if_fini $if_name "${array[@]}"
vrf_destroy $vrf_name
}
@@ -365,6 +448,28 @@ vlan_destroy()
ip link del dev $name
}
+team_create()
+{
+ local if_name=$1; shift
+ local mode=$1; shift
+
+ require_command $TEAMD
+ $TEAMD -t $if_name -d -c '{"runner": {"name": "'$mode'"}}'
+ for slave in "$@"; do
+ ip link set dev $slave down
+ ip link set dev $slave master $if_name
+ ip link set dev $slave up
+ done
+ ip link set dev $if_name up
+}
+
+team_destroy()
+{
+ local if_name=$1; shift
+
+ $TEAMD -t $if_name -k
+}
+
master_name_get()
{
local if_name=$1
@@ -383,9 +488,10 @@ tc_rule_stats_get()
{
local dev=$1; shift
local pref=$1; shift
+ local dir=$1; shift
- tc -j -s filter show dev $dev ingress pref $pref |
- jq '.[1].options.actions[].stats.packets'
+ tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \
+ | jq '.[1].options.actions[].stats.packets'
}
mac_get()
@@ -437,7 +543,9 @@ forwarding_restore()
tc_offload_check()
{
- for i in $(eval echo {1..$NUM_NETIFS}); do
+ local num_netifs=${1:-$NUM_NETIFS}
+
+ for ((i = 1; i <= num_netifs; ++i)); do
ethtool -k ${NETIFS[p$i]} \
| grep "hw-tc-offload: on" &> /dev/null
if [[ $? -ne 0 ]]; then
@@ -453,9 +561,15 @@ trap_install()
local dev=$1; shift
local direction=$1; shift
- # For slow-path testing, we need to install a trap to get to
- # slow path the packets that would otherwise be switched in HW.
- tc filter add dev $dev $direction pref 1 flower skip_sw action trap
+ # Some devices may not support or need in-hardware trapping of traffic
+ # (e.g. the veth pairs that this library creates for non-existent
+ # loopbacks). Use continue instead, so that there is a filter in there
+ # (some tests check counters), and so that other filters are still
+ # processed.
+ tc filter add dev $dev $direction pref 1 \
+ flower skip_sw action trap 2>/dev/null \
+ || tc filter add dev $dev $direction pref 1 \
+ flower action continue
}
trap_uninstall()
@@ -463,11 +577,13 @@ trap_uninstall()
local dev=$1; shift
local direction=$1; shift
- tc filter del dev $dev $direction pref 1 flower skip_sw
+ tc filter del dev $dev $direction pref 1 flower
}
slow_path_trap_install()
{
+ # For slow-path testing, we need to install a trap to get to
+ # slow path the packets that would otherwise be switched in HW.
if [ "${tcflags/skip_hw}" != "$tcflags" ]; then
trap_install "$@"
fi
@@ -537,6 +653,48 @@ vlan_capture_uninstall()
__vlan_capture_add_del del 100 "$@"
}
+__dscp_capture_add_del()
+{
+ local add_del=$1; shift
+ local dev=$1; shift
+ local base=$1; shift
+ local dscp;
+
+ for prio in {0..7}; do
+ dscp=$((base + prio))
+ __icmp_capture_add_del $add_del $((dscp + 100)) "" $dev \
+ "skip_hw ip_tos $((dscp << 2))"
+ done
+}
+
+dscp_capture_install()
+{
+ local dev=$1; shift
+ local base=$1; shift
+
+ __dscp_capture_add_del add $dev $base
+}
+
+dscp_capture_uninstall()
+{
+ local dev=$1; shift
+ local base=$1; shift
+
+ __dscp_capture_add_del del $dev $base
+}
+
+dscp_fetch_stats()
+{
+ local dev=$1; shift
+ local base=$1; shift
+
+ for prio in {0..7}; do
+ local dscp=$((base + prio))
+ local t=$(tc_rule_stats_get $dev $((dscp + 100)))
+ echo "[$dscp]=$t "
+ done
+}
+
matchall_sink_create()
{
local dev=$1; shift
@@ -557,33 +715,86 @@ tests_run()
done
}
+multipath_eval()
+{
+ local desc="$1"
+ local weight_rp12=$2
+ local weight_rp13=$3
+ local packets_rp12=$4
+ local packets_rp13=$5
+ local weights_ratio packets_ratio diff
+
+ RET=0
+
+ if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then
+ weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \
+ | bc -l)
+ else
+ weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" \
+ | bc -l)
+ fi
+
+ if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then
+ check_err 1 "Packet difference is 0"
+ log_test "Multipath"
+ log_info "Expected ratio $weights_ratio"
+ return
+ fi
+
+ if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then
+ packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \
+ | bc -l)
+ else
+ packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" \
+ | bc -l)
+ fi
+
+ diff=$(echo $weights_ratio - $packets_ratio | bc -l)
+ diff=${diff#-}
+
+ test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0
+ check_err $? "Too large discrepancy between expected and measured ratios"
+ log_test "$desc"
+ log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio"
+}
+
##############################################################################
# Tests
-ping_test()
+ping_do()
{
local if_name=$1
local dip=$2
local vrf_name
- RET=0
-
vrf_name=$(master_name_get $if_name)
ip vrf exec $vrf_name $PING $dip -c 10 -i 0.1 -w 2 &> /dev/null
+}
+
+ping_test()
+{
+ RET=0
+
+ ping_do $1 $2
check_err $?
log_test "ping"
}
-ping6_test()
+ping6_do()
{
local if_name=$1
local dip=$2
local vrf_name
- RET=0
-
vrf_name=$(master_name_get $if_name)
ip vrf exec $vrf_name $PING6 $dip -c 10 -i 0.1 -w 2 &> /dev/null
+}
+
+ping6_test()
+{
+ RET=0
+
+ ping6_do $1 $2
check_err $?
log_test "ping6"
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
new file mode 100755
index 000000000000..c5095da7f6bf
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
@@ -0,0 +1,132 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# bridge device without vlan filtering (802.1d).
+#
+# This test uses standard topology for testing mirror-to-gretap. See
+# mirror_gre_topo_lib.sh for more details. The full topology is as follows:
+#
+# +---------------------+ +---------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +-----|---------------+ +---------------|-----+
+# | |
+# +-----|-------------------------------------------------------------|-----+
+# | SW o---> mirror | |
+# | +---|-------------------------------------------------------------|---+ |
+# | | + $swp1 + br1 (802.1q bridge) $swp2 + | |
+# | +---------------------------------------------------------------------+ |
+# | |
+# | +---------------------------------------------------------------------+ |
+# | | + br2 (802.1d bridge) | |
+# | | 192.0.2.129/28 | |
+# | | + $swp3 2001:db8:2::1/64 | |
+# | +---|-----------------------------------------------------------------+ |
+# | | ^ ^ |
+# | | + gt6 (ip6gretap) | + gt4 (gretap) | |
+# | | : loc=2001:db8:2::1 | : loc=192.0.2.129 | |
+# | | : rem=2001:db8:2::2 -+ : rem=192.0.2.130 -+ |
+# | | : ttl=100 : ttl=100 |
+# | | : tos=inherit : tos=inherit |
+# +-----|---------------------:----------------------:----------------------+
+# | : :
+# +-----|---------------------:----------------------:----------------------+
+# | H3 + $h3 + h3-gt6(ip6gretap) + h3-gt4 (gretap) |
+# | 192.0.2.130/28 loc=2001:db8:2::2 loc=192.0.2.130 |
+# | 2001:db8:2::2/64 rem=2001:db8:2::1 rem=192.0.2.129 |
+# | ttl=100 ttl=100 |
+# | tos=inherit tos=inherit |
+# +-------------------------------------------------------------------------+
+
+ALL_TESTS="
+ test_gretap
+ test_ip6gretap
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+source mirror_gre_topo_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+ mirror_gre_topo_create
+
+ ip link add name br2 type bridge vlan_filtering 0
+ ip link set dev br2 up
+
+ ip link set dev $swp3 master br2
+ ip route add 192.0.2.130/32 dev br2
+ ip -6 route add 2001:db8:2::2/128 dev br2
+
+ ip address add dev br2 192.0.2.129/28
+ ip address add dev br2 2001:db8:2::1/64
+
+ ip address add dev $h3 192.0.2.130/28
+ ip address add dev $h3 2001:db8:2::2/64
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip address del dev $h3 2001:db8:2::2/64
+ ip address del dev $h3 192.0.2.130/28
+ ip link del dev br2
+
+ mirror_gre_topo_destroy
+ vrf_cleanup
+}
+
+test_gretap()
+{
+ full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap"
+ full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
+}
+
+test_ip6gretap()
+{
+ full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap"
+ full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
+}
+
+test_all()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ tests_run
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+test_all
+
+if ! tc_offload_check; then
+ echo "WARN: Could not test offloaded functionality"
+else
+ tcflags="skip_sw"
+ test_all
+fi
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
index 3bb4c2ba7b14..197e769c2ed1 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
@@ -74,12 +74,14 @@ test_vlan_match()
test_gretap()
{
- test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap"
+ test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ "mirror to gretap"
}
test_ip6gretap()
{
- test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap"
+ test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ "mirror to ip6gretap"
}
test_gretap_stp()
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
new file mode 100755
index 000000000000..a3402cd8d5b6
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# bridge device with vlan filtering (802.1q).
+#
+# This test uses standard topology for testing mirror-to-gretap. See
+# mirror_gre_topo_lib.sh for more details. The full topology is as follows:
+#
+# +---------------------+ +---------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +-----|---------------+ +---------------|-----+
+# | |
+# +-----|---------------------------------------------------------------|-----+
+# | SW o---> mirror | |
+# | +---|---------------------------------------------------------------|---+ |
+# | | + $swp1 + br1 (802.1q bridge) $swp2 + | |
+# | | 192.0.2.129/28 | |
+# | | + $swp3 2001:db8:2::1/64 | |
+# | | | vid555 vid555[pvid,untagged] | |
+# | +---|-------------------------------------------------------------------+ |
+# | | ^ ^ |
+# | | + gt6 (ip6gretap) | + gt4 (gretap) | |
+# | | : loc=2001:db8:2::1 | : loc=192.0.2.129 | |
+# | | : rem=2001:db8:2::2 -+ : rem=192.0.2.130 -+ |
+# | | : ttl=100 : ttl=100 |
+# | | : tos=inherit : tos=inherit |
+# +-----|---------------------:------------------------:----------------------+
+# | : :
+# +-----|---------------------:------------------------:----------------------+
+# | H3 + $h3 + h3-gt6(ip6gretap) + h3-gt4 (gretap) |
+# | | loc=2001:db8:2::2 loc=192.0.2.130 |
+# | + $h3.555 rem=2001:db8:2::1 rem=192.0.2.129 |
+# | 192.0.2.130/28 ttl=100 ttl=100 |
+# | 2001:db8:2::2/64 tos=inherit tos=inherit |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ test_gretap
+ test_ip6gretap
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+source mirror_gre_topo_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+ mirror_gre_topo_create
+
+ ip link set dev $swp3 master br1
+ bridge vlan add dev br1 vid 555 pvid untagged self
+ ip address add dev br1 192.0.2.129/28
+ ip address add dev br1 2001:db8:2::1/64
+
+ ip -4 route add 192.0.2.130/32 dev br1
+ ip -6 route add 2001:db8:2::2/128 dev br1
+
+ vlan_create $h3 555 v$h3 192.0.2.130/28 2001:db8:2::2/64
+ bridge vlan add dev $swp3 vid 555
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip link set dev $swp3 nomaster
+ vlan_destroy $h3 555
+
+ mirror_gre_topo_destroy
+ vrf_cleanup
+}
+
+test_gretap()
+{
+ full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap"
+ full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
+}
+
+test_ip6gretap()
+{
+ full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap"
+ full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
+}
+
+tests()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ tests_run
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+tests
+
+if ! tc_offload_check; then
+ echo "WARN: Could not test offloaded functionality"
+else
+ tcflags="skip_sw"
+ tests
+fi
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
new file mode 100755
index 000000000000..61844caf671e
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# bridge device with vlan filtering (802.1q), and the egress device is a team
+# device.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1.333 | | $h1.555 + |
+# | | 192.0.2.1/28 | | 192.0.2.18/28 | |
+# +-----|----------------+ +----------------|-----+
+# | $h1 |
+# +--------------------------------+------------------------------+
+# |
+# +--------------------------------------|------------------------------------+
+# | SW o---> mirror |
+# | | |
+# | +--------------------------------+------------------------------+ |
+# | | $swp1 | |
+# | + $swp1.333 $swp1.555 + |
+# | 192.0.2.2/28 192.0.2.17/28 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | BR1 (802.1q) | |
+# | | + lag (team) 192.0.2.129/28 | |
+# | | / \ 2001:db8:2::1/64 | |
+# | +---/---\---------------------------------------------------------------+ |
+# | / \ ^ |
+# | | \ + gt4 (gretap) | |
+# | | \ loc=192.0.2.129 | |
+# | | \ rem=192.0.2.130 -+ |
+# | | \ ttl=100 |
+# | | \ tos=inherit |
+# | | \ |
+# | | \_________________________________ |
+# | | \ |
+# | + $swp3 + $swp4 |
+# +---|------------------------------------------------|----------------------+
+# | |
+# +---|----------------------+ +---|----------------------+
+# | + $h3 H3 | | + $h4 H4 |
+# | 192.0.2.130/28 | | 192.0.2.130/28 |
+# | 2001:db8:2::2/64 | | 2001:db8:2::2/64 |
+# +--------------------------+ +--------------------------+
+
+ALL_TESTS="
+ test_mirror_gretap_first
+ test_mirror_gretap_second
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+
+require_command $ARPING
+
+vlan_host_create()
+{
+ local if_name=$1; shift
+ local vid=$1; shift
+ local vrf_name=$1; shift
+ local ips=("${@}")
+
+ vrf_create $vrf_name
+ ip link set dev $vrf_name up
+ vlan_create $if_name $vid $vrf_name "${ips[@]}"
+}
+
+vlan_host_destroy()
+{
+ local if_name=$1; shift
+ local vid=$1; shift
+ local vrf_name=$1; shift
+
+ vlan_destroy $if_name $vid
+ ip link set dev $vrf_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ vlan_host_create $h1 333 vrf-h1 192.0.2.1/28
+ ip -4 route add 192.0.2.16/28 vrf vrf-h1 nexthop via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip -4 route del 192.0.2.16/28 vrf vrf-h1
+ vlan_host_destroy $h1 333 vrf-h1
+}
+
+h2_create()
+{
+ vlan_host_create $h1 555 vrf-h2 192.0.2.18/28
+ ip -4 route add 192.0.2.0/28 vrf vrf-h2 nexthop via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip -4 route del 192.0.2.0/28 vrf vrf-h2
+ vlan_host_destroy $h1 555 vrf-h2
+}
+
+h3_create()
+{
+ simple_if_init $h3 192.0.2.130/28
+ tc qdisc add dev $h3 clsact
+}
+
+h3_destroy()
+{
+ tc qdisc del dev $h3 clsact
+ simple_if_fini $h3 192.0.2.130/28
+}
+
+h4_create()
+{
+ simple_if_init $h4 192.0.2.130/28
+ tc qdisc add dev $h4 clsact
+}
+
+h4_destroy()
+{
+ tc qdisc del dev $h4 clsact
+ simple_if_fini $h4 192.0.2.130/28
+}
+
+switch_create()
+{
+ ip link set dev $swp1 up
+ tc qdisc add dev $swp1 clsact
+ vlan_create $swp1 333 "" 192.0.2.2/28
+ vlan_create $swp1 555 "" 192.0.2.17/28
+
+ tunnel_create gt4 gretap 192.0.2.129 192.0.2.130 \
+ ttl 100 tos inherit
+
+ ip link set dev $swp3 up
+ ip link set dev $swp4 up
+
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+ __addr_add_del br1 add 192.0.2.129/32
+ ip -4 route add 192.0.2.130/32 dev br1
+
+ team_create lag loadbalance $swp3 $swp4
+ ip link set dev lag master br1
+}
+
+switch_destroy()
+{
+ ip link set dev lag nomaster
+ team_destroy lag
+
+ ip -4 route del 192.0.2.130/32 dev br1
+ __addr_add_del br1 del 192.0.2.129/32
+ ip link set dev br1 down
+ ip link del dev br1
+
+ ip link set dev $swp4 down
+ ip link set dev $swp3 down
+
+ tunnel_destroy gt4
+
+ vlan_destroy $swp1 555
+ vlan_destroy $swp1 333
+ tc qdisc del dev $swp1 clsact
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp3=${NETIFS[p3]}
+ h3=${NETIFS[p4]}
+
+ swp4=${NETIFS[p5]}
+ h4=${NETIFS[p6]}
+
+ vrf_prepare
+
+ ip link set dev $h1 up
+ h1_create
+ h2_create
+ h3_create
+ h4_create
+ switch_create
+
+ trap_install $h3 ingress
+ trap_install $h4 ingress
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ trap_uninstall $h4 ingress
+ trap_uninstall $h3 ingress
+
+ switch_destroy
+ h4_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+ ip link set dev $h1 down
+
+ vrf_cleanup
+}
+
+test_lag_slave()
+{
+ local host_dev=$1; shift
+ local up_dev=$1; shift
+ local down_dev=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ mirror_install $swp1 ingress gt4 \
+ "proto 802.1q flower vlan_id 333 $tcflags"
+
+ # Test connectivity through $up_dev when $down_dev is set down.
+ ip link set dev $down_dev down
+ setup_wait_dev $up_dev
+ setup_wait_dev $host_dev
+ $ARPING -I br1 192.0.2.130 -qfc 1
+ sleep 2
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 10
+
+ # Test lack of connectivity when both slaves are down.
+ ip link set dev $up_dev down
+ sleep 2
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h3 1 0
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h4 1 0
+
+ ip link set dev $up_dev up
+ ip link set dev $down_dev up
+ mirror_uninstall $swp1 ingress
+
+ log_test "$what ($tcflags)"
+}
+
+test_mirror_gretap_first()
+{
+ test_lag_slave $h3 $swp3 $swp4 "mirror to gretap: LAG first slave"
+}
+
+test_mirror_gretap_second()
+{
+ test_lag_slave $h4 $swp4 $swp3 "mirror to gretap: LAG second slave"
+}
+
+test_all()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ tests_run
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+test_all
+
+if ! tc_offload_check; then
+ echo "WARN: Could not test offloaded functionality"
+else
+ tcflags="skip_sw"
+ test_all
+fi
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
index aa29d46186a8..135902aa8b11 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -122,15 +122,8 @@ test_span_gre_egress_up()
# After setting the device up, wait for neighbor to get resolved so that
# we can expect mirroring to work.
ip link set dev $swp3 up
- while true; do
- ip neigh sh dev $swp3 $remote_ip nud reachable |
- grep -q ^
- if [[ $? -ne 0 ]]; then
- sleep 1
- else
- break
- fi
- done
+ setup_wait_dev $swp3
+ ping -c 1 -I $swp3 $remote_ip &>/dev/null
quick_test_span_gre_dir $tundev ingress
mirror_uninstall $swp1 ingress
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
new file mode 100755
index 000000000000..9edf4cb104a8
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
@@ -0,0 +1,285 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# team device.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1.333 | | $h1.555 + |
+# | | 192.0.2.1/28 | | 192.0.2.18/28 | |
+# +----|-----------------+ +----------------|-----+
+# | $h1 |
+# +---------------------------------+------------------------------+
+# |
+# +--------------------------------------|------------------------------------+
+# | SW o---> mirror |
+# | | |
+# | +----------------------------------+------------------------------+ |
+# | | $swp1 | |
+# | + $swp1.333 $swp1.555 + |
+# | 192.0.2.2/28 192.0.2.17/28 |
+# | |
+# | |
+# | + gt4 (gretap) ,-> + lag1 (team) |
+# | loc=192.0.2.129 | | 192.0.2.129/28 |
+# | rem=192.0.2.130 --' | |
+# | ttl=100 | |
+# | tos=inherit | |
+# | _____________________|______________________ |
+# | / \ |
+# | / \ |
+# | + $swp3 + $swp4 |
+# +---|------------------------------------------------|----------------------+
+# | |
+# +---|------------------------------------------------|----------------------+
+# | + $h3 + $h4 H3 |
+# | \ / |
+# | \____________________________________________/ |
+# | | |
+# | + lag2 (team) |
+# | 192.0.2.130/28 |
+# | |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ test_mirror_gretap_first
+ test_mirror_gretap_second
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+
+require_command $ARPING
+
+vlan_host_create()
+{
+ local if_name=$1; shift
+ local vid=$1; shift
+ local vrf_name=$1; shift
+ local ips=("${@}")
+
+ vrf_create $vrf_name
+ ip link set dev $vrf_name up
+ vlan_create $if_name $vid $vrf_name "${ips[@]}"
+}
+
+vlan_host_destroy()
+{
+ local if_name=$1; shift
+ local vid=$1; shift
+ local vrf_name=$1; shift
+
+ vlan_destroy $if_name $vid
+ ip link set dev $vrf_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ vlan_host_create $h1 333 vrf-h1 192.0.2.1/28
+ ip -4 route add 192.0.2.16/28 vrf vrf-h1 nexthop via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip -4 route del 192.0.2.16/28 vrf vrf-h1
+ vlan_host_destroy $h1 333 vrf-h1
+}
+
+h2_create()
+{
+ vlan_host_create $h1 555 vrf-h2 192.0.2.18/28
+ ip -4 route add 192.0.2.0/28 vrf vrf-h2 nexthop via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip -4 route del 192.0.2.0/28 vrf vrf-h2
+ vlan_host_destroy $h1 555 vrf-h2
+}
+
+h3_create_team()
+{
+ team_create lag2 lacp $h3 $h4
+ __simple_if_init lag2 vrf-h3 192.0.2.130/32
+ ip -4 route add vrf vrf-h3 192.0.2.129/32 dev lag2
+}
+
+h3_destroy_team()
+{
+ ip -4 route del vrf vrf-h3 192.0.2.129/32 dev lag2
+ __simple_if_fini lag2 192.0.2.130/32
+ team_destroy lag2
+
+ ip link set dev $h3 down
+ ip link set dev $h4 down
+}
+
+h3_create()
+{
+ vrf_create vrf-h3
+ ip link set dev vrf-h3 up
+ tc qdisc add dev $h3 clsact
+ tc qdisc add dev $h4 clsact
+ h3_create_team
+}
+
+h3_destroy()
+{
+ h3_destroy_team
+ tc qdisc del dev $h4 clsact
+ tc qdisc del dev $h3 clsact
+ ip link set dev vrf-h3 down
+ vrf_destroy vrf-h3
+}
+
+switch_create()
+{
+ ip link set dev $swp1 up
+ tc qdisc add dev $swp1 clsact
+ vlan_create $swp1 333 "" 192.0.2.2/28
+ vlan_create $swp1 555 "" 192.0.2.17/28
+
+ tunnel_create gt4 gretap 192.0.2.129 192.0.2.130 \
+ ttl 100 tos inherit
+
+ ip link set dev $swp3 up
+ ip link set dev $swp4 up
+ team_create lag1 lacp $swp3 $swp4
+ __addr_add_del lag1 add 192.0.2.129/32
+ ip -4 route add 192.0.2.130/32 dev lag1
+}
+
+switch_destroy()
+{
+ ip -4 route del 192.0.2.130/32 dev lag1
+ __addr_add_del lag1 del 192.0.2.129/32
+ team_destroy lag1
+
+ ip link set dev $swp4 down
+ ip link set dev $swp3 down
+
+ tunnel_destroy gt4
+
+ vlan_destroy $swp1 555
+ vlan_destroy $swp1 333
+ tc qdisc del dev $swp1 clsact
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp3=${NETIFS[p3]}
+ h3=${NETIFS[p4]}
+
+ swp4=${NETIFS[p5]}
+ h4=${NETIFS[p6]}
+
+ vrf_prepare
+
+ ip link set dev $h1 up
+ h1_create
+ h2_create
+ h3_create
+ switch_create
+
+ trap_install $h3 ingress
+ trap_install $h4 ingress
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ trap_uninstall $h4 ingress
+ trap_uninstall $h3 ingress
+
+ switch_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+ ip link set dev $h1 down
+
+ vrf_cleanup
+}
+
+test_lag_slave()
+{
+ local up_dev=$1; shift
+ local down_dev=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ mirror_install $swp1 ingress gt4 \
+ "proto 802.1q flower vlan_id 333 $tcflags"
+
+ # Move $down_dev away from the team. That will prompt change in
+ # txability of the connected device, without changing its upness. The
+ # driver should notice the txability change and move the traffic to the
+ # other slave.
+ ip link set dev $down_dev nomaster
+ sleep 2
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $up_dev 1 10
+
+ # Test lack of connectivity when neither slave is txable.
+ ip link set dev $up_dev nomaster
+ sleep 2
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h3 1 0
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h4 1 0
+ mirror_uninstall $swp1 ingress
+
+ # Recreate H3's team device, because mlxsw, which this test is
+ # predominantly mean to test, requires a bottom-up construction and
+ # doesn't allow enslavement to a device that already has an upper.
+ h3_destroy_team
+ h3_create_team
+ # Wait for ${h,swp}{3,4}.
+ setup_wait
+
+ log_test "$what ($tcflags)"
+}
+
+test_mirror_gretap_first()
+{
+ test_lag_slave $h3 $h4 "mirror to gretap: LAG first slave"
+}
+
+test_mirror_gretap_second()
+{
+ test_lag_slave $h4 $h3 "mirror to gretap: LAG second slave"
+}
+
+test_all()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ tests_run
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+test_all
+
+if ! tc_offload_check; then
+ echo "WARN: Could not test offloaded functionality"
+else
+ tcflags="skip_sw"
+ test_all
+fi
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
index 619b469365be..fac486178ef7 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-source mirror_lib.sh
+source "$relative_path/mirror_lib.sh"
quick_test_span_gre_dir_ips()
{
@@ -62,7 +62,7 @@ full_test_span_gre_dir_vlan_ips()
"$backward_type" "$ip1" "$ip2"
tc filter add dev $h3 ingress pref 77 prot 802.1q \
- flower $vlan_match ip_proto 0x2f \
+ flower $vlan_match \
action pass
mirror_test v$h1 $ip1 $ip2 $h3 77 10
tc filter del dev $h3 ingress pref 77
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
index 8fa681eb90e7..6f9ef1820e93 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
@@ -35,6 +35,8 @@ setup_prepare()
vrf_prepare
mirror_gre_topo_create
+ sysctl_set net.ipv4.conf.v$h3.rp_filter 0
+
ip address add dev $swp3 192.0.2.161/28
ip address add dev $h3 192.0.2.162/28
ip address add dev gt4 192.0.2.129/32
@@ -61,6 +63,8 @@ cleanup()
ip address del dev $h3 192.0.2.162/28
ip address del dev $swp3 192.0.2.161/28
+ sysctl_restore net.ipv4.conf.v$h3.rp_filter 0
+
mirror_gre_topo_destroy
vrf_cleanup
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
index 253419564708..39c03e2867f4 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
@@ -33,7 +33,7 @@
# | |
# +-------------------------------------------------------------------------+
-source mirror_topo_lib.sh
+source "$relative_path/mirror_topo_lib.sh"
mirror_gre_topo_h3_create()
{
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index 5dbc7a08f4bd..204b25f13934 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -28,6 +28,8 @@ source mirror_lib.sh
source mirror_gre_lib.sh
source mirror_gre_topo_lib.sh
+require_command $ARPING
+
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -39,6 +41,12 @@ setup_prepare()
swp3=${NETIFS[p5]}
h3=${NETIFS[p6]}
+ # gt4's remote address is at $h3.555, not $h3. Thus the packets arriving
+ # directly to $h3 for test_gretap_untagged_egress() are rejected by
+ # rp_filter and the test spuriously fails.
+ sysctl_set net.ipv4.conf.all.rp_filter 0
+ sysctl_set net.ipv4.conf.$h3.rp_filter 0
+
vrf_prepare
mirror_gre_topo_create
@@ -65,6 +73,9 @@ cleanup()
mirror_gre_topo_destroy
vrf_cleanup
+
+ sysctl_restore net.ipv4.conf.$h3.rp_filter
+ sysctl_restore net.ipv4.conf.all.rp_filter
}
test_vlan_match()
@@ -79,12 +90,14 @@ test_vlan_match()
test_gretap()
{
- test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap"
+ test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ "mirror to gretap"
}
test_ip6gretap()
{
- test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap"
+ test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ "mirror to ip6gretap"
}
test_span_gre_forbidden_cpu()
@@ -138,7 +151,7 @@ test_span_gre_forbidden_egress()
bridge vlan add dev $swp3 vid 555
# Re-prime FDB
- arping -I br1.555 192.0.2.130 -fqc 1
+ $ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
quick_test_span_gre_dir $tundev ingress
@@ -212,7 +225,7 @@ test_span_gre_fdb_roaming()
bridge fdb del dev $swp2 $h3mac vlan 555 master
# Re-prime FDB
- arping -I br1.555 192.0.2.130 -fqc 1
+ $ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
quick_test_span_gre_dir $tundev ingress
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index d36dc26c6c51..07991e1025c7 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -105,7 +105,7 @@ do_test_span_vlan_dir_ips()
# Install the capture as skip_hw to avoid double-counting of packets.
# The traffic is meant for local box anyway, so will be trapped to
# kernel.
- vlan_capture_install $dev "skip_hw vlan_id $vid"
+ vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype ip"
mirror_test v$h1 $ip1 $ip2 $dev 100 $expect
mirror_test v$h2 $ip2 $ip1 $dev 100 $expect
vlan_capture_uninstall $dev
diff --git a/tools/testing/selftests/net/forwarding/router_bridge.sh b/tools/testing/selftests/net/forwarding/router_bridge.sh
new file mode 100755
index 000000000000..ebc596a272f7
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_bridge.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+"
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del 2001:db8:2::/64 vrf v$h1
+ ip -4 route del 192.0.2.128/28 vrf v$h1
+ simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64
+ ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129
+ ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+ ip -6 route del 2001:db8:1::/64 vrf v$h2
+ ip -4 route del 192.0.2.0/28 vrf v$h2
+ simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64
+}
+
+router_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64
+
+ ip link set dev $swp2 up
+ __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64
+}
+
+router_destroy()
+{
+ __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64
+ ip link set dev $swp2 down
+
+ __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ ip link del dev br1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ router_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.130
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
new file mode 100755
index 000000000000..fef88eb4b873
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
@@ -0,0 +1,132 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ vlan
+"
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+ vlan_create $h1 555 v$h1 192.0.2.1/28 2001:db8:1::1/64
+ ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del 2001:db8:2::/64 vrf v$h1
+ ip -4 route del 192.0.2.128/28 vrf v$h1
+ vlan_destroy $h1 555
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64
+ ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129
+ ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+ ip -6 route del 2001:db8:1::/64 vrf v$h2
+ ip -4 route del 192.0.2.0/28 vrf v$h2
+ simple_if_fini $h2 192.0.2.130/28
+}
+
+router_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+
+ bridge vlan add dev br1 vid 555 self pvid untagged
+ bridge vlan add dev $swp1 vid 555
+
+ __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64
+
+ ip link set dev $swp2 up
+ __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64
+}
+
+router_destroy()
+{
+ __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64
+ ip link set dev $swp2 down
+
+ __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ ip link del dev br1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ router_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+vlan()
+{
+ RET=0
+
+ bridge vlan add dev br1 vid 333 self
+ check_err $? "Can't add a non-PVID VLAN"
+ bridge vlan del dev br1 vid 333 self
+ check_err $? "Can't remove a non-PVID VLAN"
+
+ log_test "vlan"
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.130
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_broadcast.sh b/tools/testing/selftests/net/forwarding/router_broadcast.sh
new file mode 100755
index 000000000000..7bd2ebb6e9de
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_broadcast.sh
@@ -0,0 +1,233 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="ping_ipv4"
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ vrf_create "vrf-h1"
+ ip link set dev $h1 master vrf-h1
+
+ ip link set dev vrf-h1 up
+ ip link set dev $h1 up
+
+ ip address add 192.0.2.2/24 dev $h1
+
+ ip route add 198.51.100.0/24 vrf vrf-h1 nexthop via 192.0.2.1
+ ip route add 198.51.200.0/24 vrf vrf-h1 nexthop via 192.0.2.1
+}
+
+h1_destroy()
+{
+ ip route del 198.51.200.0/24 vrf vrf-h1
+ ip route del 198.51.100.0/24 vrf vrf-h1
+
+ ip address del 192.0.2.2/24 dev $h1
+
+ ip link set dev $h1 down
+ vrf_destroy "vrf-h1"
+}
+
+h2_create()
+{
+ vrf_create "vrf-h2"
+ ip link set dev $h2 master vrf-h2
+
+ ip link set dev vrf-h2 up
+ ip link set dev $h2 up
+
+ ip address add 198.51.100.2/24 dev $h2
+
+ ip route add 192.0.2.0/24 vrf vrf-h2 nexthop via 198.51.100.1
+ ip route add 198.51.200.0/24 vrf vrf-h2 nexthop via 198.51.100.1
+}
+
+h2_destroy()
+{
+ ip route del 198.51.200.0/24 vrf vrf-h2
+ ip route del 192.0.2.0/24 vrf vrf-h2
+
+ ip address del 198.51.100.2/24 dev $h2
+
+ ip link set dev $h2 down
+ vrf_destroy "vrf-h2"
+}
+
+h3_create()
+{
+ vrf_create "vrf-h3"
+ ip link set dev $h3 master vrf-h3
+
+ ip link set dev vrf-h3 up
+ ip link set dev $h3 up
+
+ ip address add 198.51.200.2/24 dev $h3
+
+ ip route add 192.0.2.0/24 vrf vrf-h3 nexthop via 198.51.200.1
+ ip route add 198.51.100.0/24 vrf vrf-h3 nexthop via 198.51.200.1
+}
+
+h3_destroy()
+{
+ ip route del 198.51.100.0/24 vrf vrf-h3
+ ip route del 192.0.2.0/24 vrf vrf-h3
+
+ ip address del 198.51.200.2/24 dev $h3
+
+ ip link set dev $h3 down
+ vrf_destroy "vrf-h3"
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+ ip link set dev $rp3 up
+
+ ip address add 192.0.2.1/24 dev $rp1
+
+ ip address add 198.51.100.1/24 dev $rp2
+ ip address add 198.51.200.1/24 dev $rp3
+}
+
+router_destroy()
+{
+ ip address del 198.51.200.1/24 dev $rp3
+ ip address del 198.51.100.1/24 dev $rp2
+
+ ip address del 192.0.2.1/24 dev $rp1
+
+ ip link set dev $rp3 down
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+
+ router_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router_destroy
+
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+bc_forwarding_disable()
+{
+ sysctl_set net.ipv4.conf.all.bc_forwarding 0
+ sysctl_set net.ipv4.conf.$rp1.bc_forwarding 0
+}
+
+bc_forwarding_enable()
+{
+ sysctl_set net.ipv4.conf.all.bc_forwarding 1
+ sysctl_set net.ipv4.conf.$rp1.bc_forwarding 1
+}
+
+bc_forwarding_restore()
+{
+ sysctl_restore net.ipv4.conf.$rp1.bc_forwarding
+ sysctl_restore net.ipv4.conf.all.bc_forwarding
+}
+
+ping_test_from()
+{
+ local oif=$1
+ local dip=$2
+ local from=$3
+ local fail=${4:-0}
+
+ RET=0
+
+ log_info "ping $dip, expected reply from $from"
+ ip vrf exec $(master_name_get $oif) \
+ $PING -I $oif $dip -c 10 -i 0.1 -w 2 -b 2>&1 | grep $from &> /dev/null
+ check_err_fail $fail $?
+}
+
+ping_ipv4()
+{
+ sysctl_set net.ipv4.icmp_echo_ignore_broadcasts 0
+
+ bc_forwarding_disable
+ log_info "bc_forwarding disabled on r1 =>"
+ ping_test_from $h1 198.51.100.255 192.0.2.1
+ log_test "h1 -> net2: reply from r1 (not forwarding)"
+ ping_test_from $h1 198.51.200.255 192.0.2.1
+ log_test "h1 -> net3: reply from r1 (not forwarding)"
+ ping_test_from $h1 192.0.2.255 192.0.2.1
+ log_test "h1 -> net1: reply from r1 (not dropping)"
+ ping_test_from $h1 255.255.255.255 192.0.2.1
+ log_test "h1 -> 255.255.255.255: reply from r1 (not forwarding)"
+
+ ping_test_from $h2 192.0.2.255 198.51.100.1
+ log_test "h2 -> net1: reply from r1 (not forwarding)"
+ ping_test_from $h2 198.51.200.255 198.51.100.1
+ log_test "h2 -> net3: reply from r1 (not forwarding)"
+ ping_test_from $h2 198.51.100.255 198.51.100.1
+ log_test "h2 -> net2: reply from r1 (not dropping)"
+ ping_test_from $h2 255.255.255.255 198.51.100.1
+ log_test "h2 -> 255.255.255.255: reply from r1 (not forwarding)"
+ bc_forwarding_restore
+
+ bc_forwarding_enable
+ log_info "bc_forwarding enabled on r1 =>"
+ ping_test_from $h1 198.51.100.255 198.51.100.2
+ log_test "h1 -> net2: reply from h2 (forwarding)"
+ ping_test_from $h1 198.51.200.255 198.51.200.2
+ log_test "h1 -> net3: reply from h3 (forwarding)"
+ ping_test_from $h1 192.0.2.255 192.0.2.1 1
+ log_test "h1 -> net1: no reply (dropping)"
+ ping_test_from $h1 255.255.255.255 192.0.2.1
+ log_test "h1 -> 255.255.255.255: reply from r1 (not forwarding)"
+
+ ping_test_from $h2 192.0.2.255 192.0.2.2
+ log_test "h2 -> net1: reply from h1 (forwarding)"
+ ping_test_from $h2 198.51.200.255 198.51.200.2
+ log_test "h2 -> net3: reply from h3 (forwarding)"
+ ping_test_from $h2 198.51.100.255 198.51.100.1 1
+ log_test "h2 -> net2: no reply (dropping)"
+ ping_test_from $h2 255.255.255.255 198.51.100.1
+ log_test "h2 -> 255.255.255.255: reply from r1 (not forwarding)"
+ bc_forwarding_restore
+
+ sysctl_restore net.ipv4.icmp_echo_ignore_broadcasts
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh
index 8b6d0fb6d604..79a209927962 100755
--- a/tools/testing/selftests/net/forwarding/router_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/router_multipath.sh
@@ -159,45 +159,6 @@ router2_destroy()
vrf_destroy "vrf-r2"
}
-multipath_eval()
-{
- local desc="$1"
- local weight_rp12=$2
- local weight_rp13=$3
- local packets_rp12=$4
- local packets_rp13=$5
- local weights_ratio packets_ratio diff
-
- RET=0
-
- if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then
- check_err 1 "Packet difference is 0"
- log_test "Multipath"
- log_info "Expected ratio $weights_ratio"
- return
- fi
-
- if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then
- weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \
- | bc -l)
- packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \
- | bc -l)
- else
- weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" | \
- bc -l)
- packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" | \
- bc -l)
- fi
-
- diff=$(echo $weights_ratio - $packets_ratio | bc -l)
- diff=${diff#-}
-
- test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0
- check_err $? "Too large discrepancy between expected and measured ratios"
- log_test "$desc"
- log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio"
-}
-
multipath4_test()
{
local desc="$1"
diff --git a/tools/testing/selftests/net/forwarding/tc_chains.sh b/tools/testing/selftests/net/forwarding/tc_chains.sh
index d2c783e94df3..2934fb5ed2a2 100755
--- a/tools/testing/selftests/net/forwarding/tc_chains.sh
+++ b/tools/testing/selftests/net/forwarding/tc_chains.sh
@@ -1,7 +1,8 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="unreachable_chain_test gact_goto_chain_test"
+ALL_TESTS="unreachable_chain_test gact_goto_chain_test create_destroy_chain \
+ template_filter_fits"
NUM_NETIFS=2
source tc_common.sh
source lib.sh
@@ -80,6 +81,87 @@ gact_goto_chain_test()
log_test "gact goto chain ($tcflags)"
}
+create_destroy_chain()
+{
+ RET=0
+
+ tc chain add dev $h2 ingress
+ check_err $? "Failed to create default chain"
+
+ output="$(tc -j chain get dev $h2 ingress)"
+ check_err $? "Failed to get default chain"
+
+ echo $output | jq -e ".[] | select(.chain == 0)" &> /dev/null
+ check_err $? "Unexpected output for default chain"
+
+ tc chain add dev $h2 ingress chain 1
+ check_err $? "Failed to create chain 1"
+
+ output="$(tc -j chain get dev $h2 ingress chain 1)"
+ check_err $? "Failed to get chain 1"
+
+ echo $output | jq -e ".[] | select(.chain == 1)" &> /dev/null
+ check_err $? "Unexpected output for chain 1"
+
+ output="$(tc -j chain show dev $h2 ingress)"
+ check_err $? "Failed to dump chains"
+
+ echo $output | jq -e ".[] | select(.chain == 0)" &> /dev/null
+ check_err $? "Can't find default chain in dump"
+
+ echo $output | jq -e ".[] | select(.chain == 1)" &> /dev/null
+ check_err $? "Can't find chain 1 in dump"
+
+ tc chain del dev $h2 ingress
+ check_err $? "Failed to destroy default chain"
+
+ tc chain del dev $h2 ingress chain 1
+ check_err $? "Failed to destroy chain 1"
+
+ log_test "create destroy chain"
+}
+
+template_filter_fits()
+{
+ RET=0
+
+ tc chain add dev $h2 ingress protocol ip \
+ flower dst_mac 00:00:00:00:00:00/FF:FF:FF:FF:FF:FF &> /dev/null
+ tc chain add dev $h2 ingress chain 1 protocol ip \
+ flower src_mac 00:00:00:00:00:00/FF:FF:FF:FF:FF:FF &> /dev/null
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 1101 \
+ flower dst_mac $h2mac action drop
+ check_err $? "Failed to insert filter which fits template"
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 1102 \
+ flower src_mac $h2mac action drop &> /dev/null
+ check_fail $? "Incorrectly succeded to insert filter which does not template"
+
+ tc filter add dev $h2 ingress chain 1 protocol ip pref 1 handle 1101 \
+ flower src_mac $h2mac action drop
+ check_err $? "Failed to insert filter which fits template"
+
+ tc filter add dev $h2 ingress chain 1 protocol ip pref 1 handle 1102 \
+ flower dst_mac $h2mac action drop &> /dev/null
+ check_fail $? "Incorrectly succeded to insert filter which does not template"
+
+ tc filter del dev $h2 ingress chain 1 protocol ip pref 1 handle 1102 \
+ flower &> /dev/null
+ tc filter del dev $h2 ingress chain 1 protocol ip pref 1 handle 1101 \
+ flower &> /dev/null
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 1102 \
+ flower &> /dev/null
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 1101 \
+ flower &> /dev/null
+
+ tc chain del dev $h2 ingress chain 1
+ tc chain del dev $h2 ingress
+
+ log_test "template filter fits"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -103,6 +185,8 @@ cleanup()
vrf_cleanup
}
+check_tc_chain_support
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/tc_shblocks.sh b/tools/testing/selftests/net/forwarding/tc_shblocks.sh
index b5b917203815..9826a446e2c0 100755
--- a/tools/testing/selftests/net/forwarding/tc_shblocks.sh
+++ b/tools/testing/selftests/net/forwarding/tc_shblocks.sh
@@ -105,6 +105,8 @@ cleanup()
ip link set $swp2 address $swp2origmac
}
+check_tc_shblock_support
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/ip6_gre_headroom.sh b/tools/testing/selftests/net/ip6_gre_headroom.sh
new file mode 100755
index 000000000000..5b41e8bb6e2d
--- /dev/null
+++ b/tools/testing/selftests/net/ip6_gre_headroom.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that enough headroom is reserved for the first packet passing through an
+# IPv6 GRE-like netdevice.
+
+setup_prepare()
+{
+ ip link add h1 type veth peer name swp1
+ ip link add h3 type veth peer name swp3
+
+ ip link set dev h1 up
+ ip address add 192.0.2.1/28 dev h1
+
+ ip link add dev vh3 type vrf table 20
+ ip link set dev h3 master vh3
+ ip link set dev vh3 up
+ ip link set dev h3 up
+
+ ip link set dev swp3 up
+ ip address add dev swp3 2001:db8:2::1/64
+ ip address add dev swp3 2001:db8:2::3/64
+
+ ip link set dev swp1 up
+ tc qdisc add dev swp1 clsact
+
+ ip link add name er6 type ip6erspan \
+ local 2001:db8:2::1 remote 2001:db8:2::2 oseq okey 123
+ ip link set dev er6 up
+
+ ip link add name gt6 type ip6gretap \
+ local 2001:db8:2::3 remote 2001:db8:2::4
+ ip link set dev gt6 up
+
+ sleep 1
+}
+
+cleanup()
+{
+ ip link del dev gt6
+ ip link del dev er6
+ ip link del dev swp1
+ ip link del dev swp3
+ ip link del dev vh3
+}
+
+test_headroom()
+{
+ local type=$1; shift
+ local tundev=$1; shift
+
+ tc filter add dev swp1 ingress pref 1000 matchall skip_hw \
+ action mirred egress mirror dev $tundev
+ ping -I h1 192.0.2.2 -c 1 -w 2 &> /dev/null
+ tc filter del dev swp1 ingress pref 1000
+
+ # If it doesn't panic, it passes.
+ printf "TEST: %-60s [PASS]\n" "$type headroom"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+test_headroom ip6gretap gt6
+test_headroom ip6erspan er6
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 0d7a44fa30af..08c341b49760 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -525,18 +525,21 @@ kci_test_macsec()
#-------------------------------------------------------------------
kci_test_ipsec()
{
- srcip="14.0.0.52"
- dstip="14.0.0.70"
+ ret=0
algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128"
+ srcip=192.168.123.1
+ dstip=192.168.123.2
+ spi=7
+
+ ip addr add $srcip dev $devdummy
# flush to be sure there's nothing configured
ip x s flush ; ip x p flush
check_err $?
# start the monitor in the background
- tmpfile=`mktemp ipsectestXXX`
- ip x m > $tmpfile &
- mpid=$!
+ tmpfile=`mktemp /var/run/ipsectestXXX`
+ mpid=`(ip x m > $tmpfile & echo $!) 2>/dev/null`
sleep 0.2
ipsecid="proto esp src $srcip dst $dstip spi 0x07"
@@ -599,6 +602,7 @@ kci_test_ipsec()
check_err $?
ip x p flush
check_err $?
+ ip addr del $srcip/32 dev $devdummy
if [ $ret -ne 0 ]; then
echo "FAIL: ipsec"
@@ -607,6 +611,119 @@ kci_test_ipsec()
echo "PASS: ipsec"
}
+#-------------------------------------------------------------------
+# Example commands
+# ip x s add proto esp src 14.0.0.52 dst 14.0.0.70 \
+# spi 0x07 mode transport reqid 0x07 replay-window 32 \
+# aead 'rfc4106(gcm(aes))' 1234567890123456dcba 128 \
+# sel src 14.0.0.52/24 dst 14.0.0.70/24
+# offload dev sim1 dir out
+# ip x p add dir out src 14.0.0.52/24 dst 14.0.0.70/24 \
+# tmpl proto esp src 14.0.0.52 dst 14.0.0.70 \
+# spi 0x07 mode transport reqid 0x07
+#
+#-------------------------------------------------------------------
+kci_test_ipsec_offload()
+{
+ ret=0
+ algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128"
+ srcip=192.168.123.3
+ dstip=192.168.123.4
+ dev=simx1
+ sysfsd=/sys/kernel/debug/netdevsim/$dev
+ sysfsf=$sysfsd/ipsec
+
+ # setup netdevsim since dummydev doesn't have offload support
+ modprobe netdevsim
+ check_err $?
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ipsec_offload can't load netdevsim"
+ return 1
+ fi
+
+ ip link add $dev type netdevsim
+ ip addr add $srcip dev $dev
+ ip link set $dev up
+ if [ ! -d $sysfsd ] ; then
+ echo "FAIL: ipsec_offload can't create device $dev"
+ return 1
+ fi
+ if [ ! -f $sysfsf ] ; then
+ echo "FAIL: ipsec_offload netdevsim doesn't support IPsec offload"
+ return 1
+ fi
+
+ # flush to be sure there's nothing configured
+ ip x s flush ; ip x p flush
+
+ # create offloaded SAs, both in and out
+ ip x p add dir out src $srcip/24 dst $dstip/24 \
+ tmpl proto esp src $srcip dst $dstip spi 9 \
+ mode transport reqid 42
+ check_err $?
+ ip x p add dir out src $dstip/24 dst $srcip/24 \
+ tmpl proto esp src $dstip dst $srcip spi 9 \
+ mode transport reqid 42
+ check_err $?
+
+ ip x s add proto esp src $srcip dst $dstip spi 9 \
+ mode transport reqid 42 $algo sel src $srcip/24 dst $dstip/24 \
+ offload dev $dev dir out
+ check_err $?
+ ip x s add proto esp src $dstip dst $srcip spi 9 \
+ mode transport reqid 42 $algo sel src $dstip/24 dst $srcip/24 \
+ offload dev $dev dir in
+ check_err $?
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ipsec_offload can't create SA"
+ return 1
+ fi
+
+ # does offload show up in ip output
+ lines=`ip x s list | grep -c "crypto offload parameters: dev $dev dir"`
+ if [ $lines -ne 2 ] ; then
+ echo "FAIL: ipsec_offload SA offload missing from list output"
+ check_err 1
+ fi
+
+ # use ping to exercise the Tx path
+ ping -I $dev -c 3 -W 1 -i 0 $dstip >/dev/null
+
+ # does driver have correct offload info
+ diff $sysfsf - << EOF
+SA count=2 tx=3
+sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000
+sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
+sa[0] key=0x34333231 38373635 32313039 36353433
+sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0
+sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
+sa[1] key=0x34333231 38373635 32313039 36353433
+EOF
+ if [ $? -ne 0 ] ; then
+ echo "FAIL: ipsec_offload incorrect driver data"
+ check_err 1
+ fi
+
+ # does offload get removed from driver
+ ip x s flush
+ ip x p flush
+ lines=`grep -c "SA count=0" $sysfsf`
+ if [ $lines -ne 1 ] ; then
+ echo "FAIL: ipsec_offload SA not removed from driver"
+ check_err 1
+ fi
+
+ # clean up any leftovers
+ ip link del $dev
+ rmmod netdevsim
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ipsec_offload"
+ return 1
+ fi
+ echo "PASS: ipsec_offload"
+}
+
kci_test_gretap()
{
testns="testns"
@@ -861,6 +978,7 @@ kci_test_rtnl()
kci_test_encap
kci_test_macsec
kci_test_ipsec
+ kci_test_ipsec_offload
kci_del_dummy
}
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
new file mode 100644
index 000000000000..b3ebf2646e52
--- /dev/null
+++ b/tools/testing/selftests/net/tls.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <linux/tls.h>
+#include <linux/tcp.h>
+#include <linux/socket.h>
+
+#include <sys/types.h>
+#include <sys/sendfile.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+
+#include "../kselftest_harness.h"
+
+#define TLS_PAYLOAD_MAX_LEN 16384
+#define SOL_TLS 282
+
+FIXTURE(tls)
+{
+ int fd, cfd;
+ bool notls;
+};
+
+FIXTURE_SETUP(tls)
+{
+ struct tls12_crypto_info_aes_gcm_128 tls12;
+ struct sockaddr_in addr;
+ socklen_t len;
+ int sfd, ret;
+
+ self->notls = false;
+ len = sizeof(addr);
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ self->fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(self->fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret != 0) {
+ self->notls = true;
+ printf("Failure setting TCP_ULP, testing without tls\n");
+ }
+
+ if (!self->notls) {
+ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ self->cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(self->cfd, 0);
+
+ if (!self->notls) {
+ ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls",
+ sizeof("tls"));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ close(sfd);
+}
+
+FIXTURE_TEARDOWN(tls)
+{
+ close(self->fd);
+ close(self->cfd);
+}
+
+TEST_F(tls, sendfile)
+{
+ int filefd = open("/proc/self/exe", O_RDONLY);
+ struct stat st;
+
+ EXPECT_GE(filefd, 0);
+ fstat(filefd, &st);
+ EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
+}
+
+TEST_F(tls, send_then_sendfile)
+{
+ int filefd = open("/proc/self/exe", O_RDONLY);
+ char const *test_str = "test_send";
+ int to_send = strlen(test_str) + 1;
+ char recv_buf[10];
+ struct stat st;
+ char *buf;
+
+ EXPECT_GE(filefd, 0);
+ fstat(filefd, &st);
+ buf = (char *)malloc(st.st_size);
+
+ EXPECT_EQ(send(self->fd, test_str, to_send, 0), to_send);
+ EXPECT_EQ(recv(self->cfd, recv_buf, to_send, 0), to_send);
+ EXPECT_EQ(memcmp(test_str, recv_buf, to_send), 0);
+
+ EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
+ EXPECT_EQ(recv(self->cfd, buf, st.st_size, 0), st.st_size);
+}
+
+TEST_F(tls, recv_max)
+{
+ unsigned int send_len = TLS_PAYLOAD_MAX_LEN;
+ char recv_mem[TLS_PAYLOAD_MAX_LEN];
+ char buf[TLS_PAYLOAD_MAX_LEN];
+
+ EXPECT_GE(send(self->fd, buf, send_len, 0), 0);
+ EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0);
+}
+
+TEST_F(tls, recv_small)
+{
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10];
+
+ send_len = strlen(test_str) + 1;
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_F(tls, msg_more)
+{
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10 * 2];
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1);
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_DONTWAIT),
+ send_len * 2);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_F(tls, sendmsg_single)
+{
+ struct msghdr msg;
+
+ char const *test_str = "test_sendmsg";
+ size_t send_len = 13;
+ struct iovec vec;
+ char buf[13];
+
+ vec.iov_base = (char *)test_str;
+ vec.iov_len = send_len;
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_F(tls, sendmsg_large)
+{
+ void *mem = malloc(16384);
+ size_t send_len = 16384;
+ size_t sends = 128;
+ struct msghdr msg;
+ size_t recvs = 0;
+ size_t sent = 0;
+
+ memset(&msg, 0, sizeof(struct msghdr));
+ while (sent++ < sends) {
+ struct iovec vec = { (void *)mem, send_len };
+
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
+ }
+
+ while (recvs++ < sends)
+ EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
+
+ free(mem);
+}
+
+TEST_F(tls, sendmsg_multiple)
+{
+ char const *test_str = "test_sendmsg_multiple";
+ struct iovec vec[5];
+ char *test_strs[5];
+ struct msghdr msg;
+ int total_len = 0;
+ int len_cmp = 0;
+ int iov_len = 5;
+ char *buf;
+ int i;
+
+ memset(&msg, 0, sizeof(struct msghdr));
+ for (i = 0; i < iov_len; i++) {
+ test_strs[i] = (char *)malloc(strlen(test_str) + 1);
+ snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str);
+ vec[i].iov_base = (void *)test_strs[i];
+ vec[i].iov_len = strlen(test_strs[i]) + 1;
+ total_len += vec[i].iov_len;
+ }
+ msg.msg_iov = vec;
+ msg.msg_iovlen = iov_len;
+
+ EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len);
+ buf = malloc(total_len);
+ EXPECT_NE(recv(self->fd, buf, total_len, 0), -1);
+ for (i = 0; i < iov_len; i++) {
+ EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
+ strlen(test_strs[i])),
+ 0);
+ len_cmp += strlen(buf + len_cmp) + 1;
+ }
+ for (i = 0; i < iov_len; i++)
+ free(test_strs[i]);
+ free(buf);
+}
+
+TEST_F(tls, sendmsg_multiple_stress)
+{
+ char const *test_str = "abcdefghijklmno";
+ struct iovec vec[1024];
+ char *test_strs[1024];
+ int iov_len = 1024;
+ int total_len = 0;
+ char buf[1 << 14];
+ struct msghdr msg;
+ int len_cmp = 0;
+ int i;
+
+ memset(&msg, 0, sizeof(struct msghdr));
+ for (i = 0; i < iov_len; i++) {
+ test_strs[i] = (char *)malloc(strlen(test_str) + 1);
+ snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str);
+ vec[i].iov_base = (void *)test_strs[i];
+ vec[i].iov_len = strlen(test_strs[i]) + 1;
+ total_len += vec[i].iov_len;
+ }
+ msg.msg_iov = vec;
+ msg.msg_iovlen = iov_len;
+
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
+ EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
+
+ for (i = 0; i < iov_len; i++)
+ len_cmp += strlen(buf + len_cmp) + 1;
+
+ for (i = 0; i < iov_len; i++)
+ free(test_strs[i]);
+}
+
+TEST_F(tls, splice_from_pipe)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_GE(write(p[1], mem_send, send_len), 0);
+ EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0);
+ EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, splice_from_pipe2)
+{
+ int send_len = 16000;
+ char mem_send[16000];
+ char mem_recv[16000];
+ int p2[2];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ ASSERT_GE(pipe(p2), 0);
+ EXPECT_GE(write(p[1], mem_send, 8000), 0);
+ EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0);
+ EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0);
+ EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0);
+ EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, send_and_splice)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ char const *test_str = "test_read";
+ int send_len2 = 10;
+ char buf[10];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2);
+ EXPECT_NE(recv(self->cfd, buf, send_len2, 0), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len2), 0);
+
+ EXPECT_GE(write(p[1], mem_send, send_len), send_len);
+ EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len);
+
+ EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, splice_to_pipe)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_GE(send(self->fd, mem_send, send_len, 0), 0);
+ EXPECT_GE(splice(self->cfd, NULL, p[1], NULL, send_len, 0), 0);
+ EXPECT_GE(read(p[0], mem_recv, send_len), 0);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, recvmsg_single)
+{
+ char const *test_str = "test_recvmsg_single";
+ int send_len = strlen(test_str) + 1;
+ char buf[20];
+ struct msghdr hdr;
+ struct iovec vec;
+
+ memset(&hdr, 0, sizeof(hdr));
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ vec.iov_base = (char *)buf;
+ vec.iov_len = send_len;
+ hdr.msg_iovlen = 1;
+ hdr.msg_iov = &vec;
+ EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, recvmsg_single_max)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char send_mem[TLS_PAYLOAD_MAX_LEN];
+ char recv_mem[TLS_PAYLOAD_MAX_LEN];
+ struct iovec vec;
+ struct msghdr hdr;
+
+ EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len);
+ vec.iov_base = (char *)recv_mem;
+ vec.iov_len = TLS_PAYLOAD_MAX_LEN;
+
+ hdr.msg_iovlen = 1;
+ hdr.msg_iov = &vec;
+ EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
+ EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0);
+}
+
+TEST_F(tls, recvmsg_multiple)
+{
+ unsigned int msg_iovlen = 1024;
+ unsigned int len_compared = 0;
+ struct iovec vec[1024];
+ char *iov_base[1024];
+ unsigned int iov_len = 16;
+ int send_len = 1 << 14;
+ char buf[1 << 14];
+ struct msghdr hdr;
+ int i;
+
+ EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len);
+ for (i = 0; i < msg_iovlen; i++) {
+ iov_base[i] = (char *)malloc(iov_len);
+ vec[i].iov_base = iov_base[i];
+ vec[i].iov_len = iov_len;
+ }
+
+ hdr.msg_iovlen = msg_iovlen;
+ hdr.msg_iov = vec;
+ EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
+ for (i = 0; i < msg_iovlen; i++)
+ len_compared += iov_len;
+
+ for (i = 0; i < msg_iovlen; i++)
+ free(iov_base[i]);
+}
+
+TEST_F(tls, single_send_multiple_recv)
+{
+ unsigned int total_len = TLS_PAYLOAD_MAX_LEN * 2;
+ unsigned int send_len = TLS_PAYLOAD_MAX_LEN;
+ char send_mem[TLS_PAYLOAD_MAX_LEN * 2];
+ char recv_mem[TLS_PAYLOAD_MAX_LEN * 2];
+
+ EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
+ memset(recv_mem, 0, total_len);
+
+ EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem + send_len, send_len, 0), -1);
+ EXPECT_EQ(memcmp(send_mem, recv_mem, total_len), 0);
+}
+
+TEST_F(tls, multiple_send_single_recv)
+{
+ unsigned int total_len = 2 * 10;
+ unsigned int send_len = 10;
+ char recv_mem[2 * 10];
+ char send_mem[10];
+
+ EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
+ EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
+ memset(recv_mem, 0, total_len);
+ EXPECT_EQ(recv(self->cfd, recv_mem, total_len, 0), total_len);
+
+ EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0);
+ EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0);
+}
+
+TEST_F(tls, recv_partial)
+{
+ char const *test_str = "test_read_partial";
+ char const *test_str_first = "test_read";
+ char const *test_str_second = "_partial";
+ int send_len = strlen(test_str) + 1;
+ char recv_mem[18];
+
+ memset(recv_mem, 0, sizeof(recv_mem));
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 0), -1);
+ EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
+ memset(recv_mem, 0, sizeof(recv_mem));
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 0), -1);
+ EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
+ 0);
+}
+
+TEST_F(tls, recv_nonblock)
+{
+ char buf[4096];
+ bool err;
+
+ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_DONTWAIT), -1);
+ err = (errno == EAGAIN || errno == EWOULDBLOCK);
+ EXPECT_EQ(err, true);
+}
+
+TEST_F(tls, recv_peek)
+{
+ char const *test_str = "test_read_peek";
+ int send_len = strlen(test_str) + 1;
+ char buf[15];
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+ memset(buf, 0, sizeof(buf));
+ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, recv_peek_multiple)
+{
+ char const *test_str = "test_read_peek";
+ int send_len = strlen(test_str) + 1;
+ unsigned int num_peeks = 100;
+ char buf[15];
+ int i;
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ for (i = 0; i < num_peeks; i++) {
+ EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+ memset(buf, 0, sizeof(buf));
+ }
+ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, pollin)
+{
+ char const *test_str = "test_poll";
+ struct pollfd fd = { 0, 0, 0 };
+ char buf[10];
+ int send_len = 10;
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ fd.fd = self->cfd;
+ fd.events = POLLIN;
+
+ EXPECT_EQ(poll(&fd, 1, 20), 1);
+ EXPECT_EQ(fd.revents & POLLIN, 1);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
+ /* Test timing out */
+ EXPECT_EQ(poll(&fd, 1, 20), 0);
+}
+
+TEST_F(tls, poll_wait)
+{
+ char const *test_str = "test_poll_wait";
+ int send_len = strlen(test_str) + 1;
+ struct pollfd fd = { 0, 0, 0 };
+ char recv_mem[15];
+
+ fd.fd = self->cfd;
+ fd.events = POLLIN;
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ /* Set timeout to inf. secs */
+ EXPECT_EQ(poll(&fd, 1, -1), 1);
+ EXPECT_EQ(fd.revents & POLLIN, 1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, send_len, 0), send_len);
+}
+
+TEST_F(tls, blocking)
+{
+ size_t data = 100000;
+ int res = fork();
+
+ EXPECT_NE(res, -1);
+
+ if (res) {
+ /* parent */
+ size_t left = data;
+ char buf[16384];
+ int status;
+ int pid2;
+
+ while (left) {
+ int res = send(self->fd, buf,
+ left > 16384 ? 16384 : left, 0);
+
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+
+ pid2 = wait(&status);
+ EXPECT_EQ(status, 0);
+ EXPECT_EQ(res, pid2);
+ } else {
+ /* child */
+ size_t left = data;
+ char buf[16384];
+
+ while (left) {
+ int res = recv(self->cfd, buf,
+ left > 16384 ? 16384 : left, 0);
+
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+ }
+}
+
+TEST_F(tls, nonblocking)
+{
+ size_t data = 100000;
+ int sendbuf = 100;
+ int flags;
+ int res;
+
+ flags = fcntl(self->fd, F_GETFL, 0);
+ fcntl(self->fd, F_SETFL, flags | O_NONBLOCK);
+ fcntl(self->cfd, F_SETFL, flags | O_NONBLOCK);
+
+ /* Ensure nonblocking behavior by imposing a small send
+ * buffer.
+ */
+ EXPECT_EQ(setsockopt(self->fd, SOL_SOCKET, SO_SNDBUF,
+ &sendbuf, sizeof(sendbuf)), 0);
+
+ res = fork();
+ EXPECT_NE(res, -1);
+
+ if (res) {
+ /* parent */
+ bool eagain = false;
+ size_t left = data;
+ char buf[16384];
+ int status;
+ int pid2;
+
+ while (left) {
+ int res = send(self->fd, buf,
+ left > 16384 ? 16384 : left, 0);
+
+ if (res == -1 && errno == EAGAIN) {
+ eagain = true;
+ usleep(10000);
+ continue;
+ }
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+
+ EXPECT_TRUE(eagain);
+ pid2 = wait(&status);
+
+ EXPECT_EQ(status, 0);
+ EXPECT_EQ(res, pid2);
+ } else {
+ /* child */
+ bool eagain = false;
+ size_t left = data;
+ char buf[16384];
+
+ while (left) {
+ int res = recv(self->cfd, buf,
+ left > 16384 ? 16384 : left, 0);
+
+ if (res == -1 && errno == EAGAIN) {
+ eagain = true;
+ usleep(10000);
+ continue;
+ }
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+ EXPECT_TRUE(eagain);
+ }
+}
+
+TEST_F(tls, control_msg)
+{
+ if (self->notls)
+ return;
+
+ char cbuf[CMSG_SPACE(sizeof(char))];
+ char const *test_str = "test_read";
+ int cmsg_len = sizeof(char);
+ char record_type = 100;
+ struct cmsghdr *cmsg;
+ struct msghdr msg;
+ int send_len = 10;
+ struct iovec vec;
+ char buf[10];
+
+ vec.iov_base = (char *)test_str;
+ vec.iov_len = 10;
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_TLS;
+ /* test sending non-record types. */
+ cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
+ cmsg->cmsg_len = CMSG_LEN(cmsg_len);
+ *CMSG_DATA(cmsg) = record_type;
+ msg.msg_controllen = cmsg->cmsg_len;
+
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+ /* Should fail because we didn't provide a control message */
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
+
+ vec.iov_base = buf;
+ EXPECT_EQ(recvmsg(self->cfd, &msg, 0), send_len);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ EXPECT_NE(cmsg, NULL);
+ EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+ EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+ record_type = *((unsigned char *)CMSG_DATA(cmsg));
+ EXPECT_EQ(record_type, 100);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index c15f270e121d..65541c21a544 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Usage: configinit.sh config-spec-file [ build output dir ]
+# Usage: configinit.sh config-spec-file build-output-dir results-dir
#
# Create a .config file from the spec file. Run from the kernel source tree.
# Exits with 0 if all went well, with 1 if all went well but the config
@@ -40,20 +40,18 @@ mkdir $T
c=$1
buildloc=$2
+resdir=$3
builddir=
-if test -n $buildloc
+if echo $buildloc | grep -q '^O='
then
- if echo $buildloc | grep -q '^O='
+ builddir=`echo $buildloc | sed -e 's/^O=//'`
+ if test ! -d $builddir
then
- builddir=`echo $buildloc | sed -e 's/^O=//'`
- if test ! -d $builddir
- then
- mkdir $builddir
- fi
- else
- echo Bad build directory: \"$buildloc\"
- exit 2
+ mkdir $builddir
fi
+else
+ echo Bad build directory: \"$buildloc\"
+ exit 2
fi
sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh
@@ -61,12 +59,12 @@ sed -e 's/^\(CONFIG[0-9A-Z_]*=\).*$/grep -v \1 |/' < $c >> $T/u.sh
grep '^grep' < $T/u.sh > $T/upd.sh
echo "cat - $c" >> $T/upd.sh
make mrproper
-make $buildloc distclean > $builddir/Make.distclean 2>&1
-make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1
+make $buildloc distclean > $resdir/Make.distclean 2>&1
+make $buildloc $TORTURE_DEFCONFIG > $resdir/Make.defconfig.out 2>&1
mv $builddir/.config $builddir/.config.sav
sh $T/upd.sh < $builddir/.config.sav > $builddir/.config
cp $builddir/.config $builddir/.config.new
-yes '' | make $buildloc oldconfig > $builddir/Make.oldconfig.out 2> $builddir/Make.oldconfig.err
+yes '' | make $buildloc oldconfig > $resdir/Make.oldconfig.out 2> $resdir/Make.oldconfig.err
# verify new config matches specification.
configcheck.sh $builddir/.config $c
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 34d126734cde..9115fcdb5617 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -2,7 +2,7 @@
#
# Build a kvm-ready Linux kernel from the tree in the current directory.
#
-# Usage: kvm-build.sh config-template build-dir
+# Usage: kvm-build.sh config-template build-dir resdir
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -29,6 +29,7 @@ then
exit 1
fi
builddir=${2}
+resdir=${3}
T=${TMPDIR-/tmp}/test-linux.sh.$$
trap 'rm -rf $T' 0
@@ -41,19 +42,19 @@ CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_CONSOLE=y
___EOF___
-configinit.sh $T/config O=$builddir
+configinit.sh $T/config O=$builddir $resdir
retval=$?
if test $retval -gt 1
then
exit 2
fi
ncpus=`cpus2use.sh`
-make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $builddir/Make.out 2>&1
+make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
retval=$?
-if test $retval -ne 0 || grep "rcu[^/]*": < $builddir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $builddir/Make.out
+if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $resdir/Make.out
then
echo Kernel build error
- egrep "Stop|Error|error:|warning:" < $builddir/Make.out
+ egrep "Stop|Error|error:|warning:" < $resdir/Make.out
echo Run aborted.
exit 3
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 477ecb1293ab..0fa8a61ccb7b 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -70,4 +70,5 @@ else
else
print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
fi
+ echo $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i > $i/console.log.rcu.diags
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index c27e97824163..c9bab57a77eb 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -39,6 +39,7 @@ do
head -1 $resdir/log
fi
TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
+ rm -f $i/console.log.*.diags
kvm-recheck-${TORTURE_SUITE}.sh $i
if test -f "$i/console.log"
then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index c5b0f94341d9..f7247ee00514 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -98,14 +98,15 @@ then
ln -s $base_resdir/.config $resdir # for kvm-recheck.sh
# Arch-independent indicator
touch $resdir/builtkernel
-elif kvm-build.sh $T/Kc2 $builddir
+elif kvm-build.sh $T/Kc2 $builddir $resdir
then
# Had to build a kernel for this test.
QEMU="`identify_qemu $builddir/vmlinux`"
BOOT_IMAGE="`identify_boot_image $QEMU`"
- cp $builddir/Make*.out $resdir
cp $builddir/vmlinux $resdir
cp $builddir/.config $resdir
+ cp $builddir/Module.symvers $resdir > /dev/null || :
+ cp $builddir/System.map $resdir > /dev/null || :
if test -n "$BOOT_IMAGE"
then
cp $builddir/$BOOT_IMAGE $resdir
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 56610dbbdf73..5a7a62d76a50 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -347,7 +347,7 @@ function dump(first, pastlast, batchnum)
print "needqemurun="
jn=1
for (j = first; j < pastlast; j++) {
- builddir=KVM "/b" jn
+ builddir=KVM "/b1"
cpusr[jn] = cpus[j];
if (cfrep[cf[j]] == "") {
cfr[jn] = cf[j];
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index 17293436f551..84933f6aed77 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -163,6 +163,13 @@ then
print_warning Summary: $summary
cat $T.diags >> $file.diags
fi
+for i in $file.*.diags
+do
+ if test -f "$i"
+ then
+ cat $i >> $file.diags
+ fi
+done
if ! test -s $file.diags
then
rm -f $file.diags
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
index 5d2cc0bd50a0..5c3213cc3ad7 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
@@ -1,5 +1,5 @@
-rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30
-rcutree.gp_preinit_delay=3
+rcutorture.onoff_interval=200 rcutorture.onoff_holdoff=30
+rcutree.gp_preinit_delay=12
rcutree.gp_init_delay=3
rcutree.gp_cleanup_delay=3
rcutree.kthread_prio=2
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
deleted file mode 100644
index 883149b5f2d1..000000000000
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
+++ /dev/null
@@ -1 +0,0 @@
-rcutree.rcu_fanout_exact=1
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index 24ec91041957..7bab8246392b 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -39,7 +39,7 @@ rcutorture_param_onoff () {
if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
then
echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
- echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
+ echo rcutorture.onoff_interval=1000 rcutorture.onoff_holdoff=30
fi
}
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 615252331813..642d4e12abea 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -90,6 +90,30 @@ unsigned int yield_mod_cnt, nr_abort;
#error "Unsupported architecture"
#endif
+#elif defined(__s390__)
+
+#define RSEQ_INJECT_INPUT \
+ , [loop_cnt_1]"m"(loop_cnt[1]) \
+ , [loop_cnt_2]"m"(loop_cnt[2]) \
+ , [loop_cnt_3]"m"(loop_cnt[3]) \
+ , [loop_cnt_4]"m"(loop_cnt[4]) \
+ , [loop_cnt_5]"m"(loop_cnt[5]) \
+ , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "r12"
+
+#define RSEQ_INJECT_CLOBBER \
+ , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+ "l %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+ "ltr %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG "\n\t" \
+ "je 333f\n\t" \
+ "222:\n\t" \
+ "ahi %%" INJECT_ASM_REG ", -1\n\t" \
+ "jnz 222b\n\t" \
+ "333:\n\t"
+
#elif defined(__ARMEL__)
#define RSEQ_INJECT_INPUT \
@@ -114,6 +138,26 @@ unsigned int yield_mod_cnt, nr_abort;
"bne 222b\n\t" \
"333:\n\t"
+#elif defined(__AARCH64EL__)
+
+#define RSEQ_INJECT_INPUT \
+ , [loop_cnt_1] "Qo" (loop_cnt[1]) \
+ , [loop_cnt_2] "Qo" (loop_cnt[2]) \
+ , [loop_cnt_3] "Qo" (loop_cnt[3]) \
+ , [loop_cnt_4] "Qo" (loop_cnt[4]) \
+ , [loop_cnt_5] "Qo" (loop_cnt[5]) \
+ , [loop_cnt_6] "Qo" (loop_cnt[6])
+
+#define INJECT_ASM_REG RSEQ_ASM_TMP_REG32
+
+#define RSEQ_INJECT_ASM(n) \
+ " ldr " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n" \
+ " cbz " INJECT_ASM_REG ", 333f\n" \
+ "222:\n" \
+ " sub " INJECT_ASM_REG ", " INJECT_ASM_REG ", #1\n" \
+ " cbnz " INJECT_ASM_REG ", 222b\n" \
+ "333:\n"
+
#elif __PPC__
#define RSEQ_INJECT_INPUT \
diff --git a/tools/testing/selftests/rseq/rseq-arm64.h b/tools/testing/selftests/rseq/rseq-arm64.h
new file mode 100644
index 000000000000..954f34671ca6
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-arm64.h
@@ -0,0 +1,594 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-arm64.h
+ *
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * (C) Copyright 2018 - Will Deacon <will.deacon@arm.com>
+ */
+
+#define RSEQ_SIG 0xd428bc00 /* BRK #0x45E0 */
+
+#define rseq_smp_mb() __asm__ __volatile__ ("dmb ish" ::: "memory")
+#define rseq_smp_rmb() __asm__ __volatile__ ("dmb ishld" ::: "memory")
+#define rseq_smp_wmb() __asm__ __volatile__ ("dmb ishst" ::: "memory")
+
+#define rseq_smp_load_acquire(p) \
+__extension__ ({ \
+ __typeof(*p) ____p1; \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("ldarb %w0, %1" \
+ : "=r" (*(__u8 *)p) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("ldarh %w0, %1" \
+ : "=r" (*(__u16 *)p) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("ldar %w0, %1" \
+ : "=r" (*(__u32 *)p) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("ldar %0, %1" \
+ : "=r" (*(__u64 *)p) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ } \
+ ____p1; \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v) \
+do { \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("stlrb %w1, %0" \
+ : "=Q" (*p) \
+ : "r" ((__u8)v) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("stlrh %w1, %0" \
+ : "=Q" (*p) \
+ : "r" ((__u16)v) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("stlr %w1, %0" \
+ : "=Q" (*p) \
+ : "r" ((__u32)v) \
+ : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("stlr %1, %0" \
+ : "=Q" (*p) \
+ : "r" ((__u64)v) \
+ : "memory"); \
+ break; \
+ } \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#define RSEQ_ASM_TMP_REG32 "w15"
+#define RSEQ_ASM_TMP_REG "x15"
+#define RSEQ_ASM_TMP_REG_2 "x14"
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
+ post_commit_offset, abort_ip) \
+ " .pushsection __rseq_table, \"aw\"\n" \
+ " .balign 32\n" \
+ __rseq_str(label) ":\n" \
+ " .long " __rseq_str(version) ", " __rseq_str(flags) "\n" \
+ " .quad " __rseq_str(start_ip) ", " \
+ __rseq_str(post_commit_offset) ", " \
+ __rseq_str(abort_ip) "\n" \
+ " .popsection\n"
+
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ " adrp " RSEQ_ASM_TMP_REG ", " __rseq_str(cs_label) "\n" \
+ " add " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
+ ", :lo12:" __rseq_str(cs_label) "\n" \
+ " str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(rseq_cs) "]\n" \
+ __rseq_str(label) ":\n"
+
+#define RSEQ_ASM_DEFINE_ABORT(label, abort_label) \
+ " b 222f\n" \
+ " .inst " __rseq_str(RSEQ_SIG) "\n" \
+ __rseq_str(label) ":\n" \
+ " b %l[" __rseq_str(abort_label) "]\n" \
+ "222:\n"
+
+#define RSEQ_ASM_OP_STORE(value, var) \
+ " str %[" __rseq_str(value) "], %[" __rseq_str(var) "]\n"
+
+#define RSEQ_ASM_OP_STORE_RELEASE(value, var) \
+ " stlr %[" __rseq_str(value) "], %[" __rseq_str(var) "]\n"
+
+#define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \
+ RSEQ_ASM_OP_STORE(value, var) \
+ __rseq_str(post_commit_label) ":\n"
+
+#define RSEQ_ASM_OP_FINAL_STORE_RELEASE(value, var, post_commit_label) \
+ RSEQ_ASM_OP_STORE_RELEASE(value, var) \
+ __rseq_str(post_commit_label) ":\n"
+
+#define RSEQ_ASM_OP_CMPEQ(var, expect, label) \
+ " ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \
+ " sub " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
+ ", %[" __rseq_str(expect) "]\n" \
+ " cbnz " RSEQ_ASM_TMP_REG ", " __rseq_str(label) "\n"
+
+#define RSEQ_ASM_OP_CMPEQ32(var, expect, label) \
+ " ldr " RSEQ_ASM_TMP_REG32 ", %[" __rseq_str(var) "]\n" \
+ " sub " RSEQ_ASM_TMP_REG32 ", " RSEQ_ASM_TMP_REG32 \
+ ", %w[" __rseq_str(expect) "]\n" \
+ " cbnz " RSEQ_ASM_TMP_REG32 ", " __rseq_str(label) "\n"
+
+#define RSEQ_ASM_OP_CMPNE(var, expect, label) \
+ " ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \
+ " sub " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
+ ", %[" __rseq_str(expect) "]\n" \
+ " cbz " RSEQ_ASM_TMP_REG ", " __rseq_str(label) "\n"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+ RSEQ_INJECT_ASM(2) \
+ RSEQ_ASM_OP_CMPEQ32(current_cpu_id, cpu_id, label)
+
+#define RSEQ_ASM_OP_R_LOAD(var) \
+ " ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n"
+
+#define RSEQ_ASM_OP_R_STORE(var) \
+ " str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n"
+
+#define RSEQ_ASM_OP_R_LOAD_OFF(offset) \
+ " ldr " RSEQ_ASM_TMP_REG ", [" RSEQ_ASM_TMP_REG \
+ ", %[" __rseq_str(offset) "]]\n"
+
+#define RSEQ_ASM_OP_R_ADD(count) \
+ " add " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
+ ", %[" __rseq_str(count) "]\n"
+
+#define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \
+ " str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \
+ __rseq_str(post_commit_label) ":\n"
+
+#define RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len) \
+ " cbz %[" __rseq_str(len) "], 333f\n" \
+ " mov " RSEQ_ASM_TMP_REG_2 ", %[" __rseq_str(len) "]\n" \
+ "222: sub " RSEQ_ASM_TMP_REG_2 ", " RSEQ_ASM_TMP_REG_2 ", #1\n" \
+ " ldrb " RSEQ_ASM_TMP_REG32 ", [%[" __rseq_str(src) "]" \
+ ", " RSEQ_ASM_TMP_REG_2 "]\n" \
+ " strb " RSEQ_ASM_TMP_REG32 ", [%[" __rseq_str(dst) "]" \
+ ", " RSEQ_ASM_TMP_REG_2 "]\n" \
+ " cbnz " RSEQ_ASM_TMP_REG_2 ", 222b\n" \
+ "333:\n"
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "Qo" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2])
+#endif
+ RSEQ_ASM_OP_R_LOAD(v)
+ RSEQ_ASM_OP_R_STORE(load)
+ RSEQ_ASM_OP_R_LOAD_OFF(voffp)
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "Qo" (*v),
+ [expectnot] "r" (expectnot),
+ [load] "Qo" (*load),
+ [voffp] "r" (voffp)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ RSEQ_ASM_OP_R_LOAD(v)
+ RSEQ_ASM_OP_R_ADD(count)
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "Qo" (*v),
+ [count] "r" (count)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ RSEQ_ASM_OP_STORE(newv2, v2)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [expect] "r" (expect),
+ [v] "Qo" (*v),
+ [newv] "r" (newv),
+ [v2] "Qo" (*v2),
+ [newv2] "r" (newv2)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ RSEQ_ASM_OP_STORE(newv2, v2)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [expect] "r" (expect),
+ [v] "Qo" (*v),
+ [newv] "r" (newv),
+ [v2] "Qo" (*v2),
+ [newv2] "r" (newv2)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail])
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3])
+#endif
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "Qo" (*v),
+ [expect] "r" (expect),
+ [v2] "Qo" (*v2),
+ [expect2] "r" (expect2),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [expect] "r" (expect),
+ [v] "Qo" (*v),
+ [newv] "r" (newv),
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG, RSEQ_ASM_TMP_REG_2
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [expect] "r" (expect),
+ [v] "Qo" (*v),
+ [newv] "r" (newv),
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG, RSEQ_ASM_TMP_REG_2
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h
new file mode 100644
index 000000000000..1069e85258ce
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-s390.h
@@ -0,0 +1,513 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+
+#define RSEQ_SIG 0x53053053
+
+#define rseq_smp_mb() __asm__ __volatile__ ("bcr 15,0" ::: "memory")
+#define rseq_smp_rmb() rseq_smp_mb()
+#define rseq_smp_wmb() rseq_smp_mb()
+
+#define rseq_smp_load_acquire(p) \
+__extension__ ({ \
+ __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_barrier(); \
+ ____p1; \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v) \
+do { \
+ rseq_barrier(); \
+ RSEQ_WRITE_ONCE(*p, v); \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#ifdef __s390x__
+
+#define LONG_L "lg"
+#define LONG_S "stg"
+#define LONG_LT_R "ltgr"
+#define LONG_CMP "cg"
+#define LONG_CMP_R "cgr"
+#define LONG_ADDI "aghi"
+#define LONG_ADD_R "agr"
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
+ ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t"
+
+#elif __s390__
+
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
+ ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t"
+
+#define LONG_L "l"
+#define LONG_S "st"
+#define LONG_LT_R "ltr"
+#define LONG_CMP "c"
+#define LONG_CMP_R "cr"
+#define LONG_ADDI "ahi"
+#define LONG_ADD_R "ar"
+
+#endif
+
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ "larl %%r0, " __rseq_str(cs_label) "\n\t" \
+ LONG_S " %%r0, %[" __rseq_str(rseq_cs) "]\n\t" \
+ __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+ RSEQ_INJECT_ASM(2) \
+ "c %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "jnz " __rseq_str(label) "\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
+ ".pushsection __rseq_failure, \"ax\"\n\t" \
+ ".long " __rseq_str(RSEQ_SIG) "\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "j %l[" __rseq_str(abort_label) "]\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+ ".pushsection __rseq_failure, \"ax\"\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "j %l[" __rseq_str(cmpfail_label) "]\n\t" \
+ ".popsection\n\t"
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " %%r1, %[v]\n\t"
+ LONG_CMP_R " %%r1, %[expectnot]\n\t"
+ "je %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " %%r1, %[v]\n\t"
+ LONG_CMP_R " %%r1, %[expectnot]\n\t"
+ "je %l[error2]\n\t"
+#endif
+ LONG_S " %%r1, %[load]\n\t"
+ LONG_ADD_R " %%r1, %[voffp]\n\t"
+ LONG_L " %%r1, 0(%%r1)\n\t"
+ /* final store */
+ LONG_S " %%r1, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "r" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0", "r1"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ LONG_L " %%r0, %[v]\n\t"
+ LONG_ADD_R " %%r0, %[count]\n\t"
+ /* final store */
+ LONG_S " %%r0, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [count] "r" (count)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* try store */
+ LONG_S " %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* s390 is TSO. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu);
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ LONG_CMP " %[expect2], %[v2]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[error2]\n\t"
+ LONG_CMP " %[expect2], %[v2]\n\t"
+ "jnz %l[error3]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint64_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ LONG_S " %[src], %[rseq_scratch0]\n\t"
+ LONG_S " %[dst], %[rseq_scratch1]\n\t"
+ LONG_S " %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz 7f\n\t"
+#endif
+ /* try memcpy */
+ LONG_LT_R " %[len], %[len]\n\t"
+ "jz 333f\n\t"
+ "222:\n\t"
+ "ic %%r0,0(%[src])\n\t"
+ "stc %%r0,0(%[dst])\n\t"
+ LONG_ADDI " %[src], 1\n\t"
+ LONG_ADDI " %[dst], 1\n\t"
+ LONG_ADDI " %[len], -1\n\t"
+ "jnz 222b\n\t"
+ "333:\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t"
+ RSEQ_ASM_DEFINE_ABORT(4,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ abort)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* s390 is TSO. */
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ return rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len,
+ newv, cpu);
+}
+#endif /* !RSEQ_SKIP_FASTPATH */
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index 86ce22417e0d..c72eb70f9b52 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -71,10 +71,14 @@ extern __thread volatile struct rseq __rseq_abi;
#include <rseq-x86.h>
#elif defined(__ARMEL__)
#include <rseq-arm.h>
+#elif defined (__AARCH64EL__)
+#include <rseq-arm64.h>
#elif defined(__PPC__)
#include <rseq-ppc.h>
#elif defined(__mips__)
#include <rseq-mips.h>
+#elif defined(__s390__)
+#include <rseq-s390.h>
#else
#error unsupported target
#endif
diff --git a/tools/testing/selftests/tc-testing/README b/tools/testing/selftests/tc-testing/README
index 3a0336782d2d..49a6f8c3fdae 100644
--- a/tools/testing/selftests/tc-testing/README
+++ b/tools/testing/selftests/tc-testing/README
@@ -17,6 +17,10 @@ REQUIREMENTS
* The kernel must have veth support available, as a veth pair is created
prior to running the tests.
+* The kernel must have the appropriate infrastructure enabled to run all tdc
+ unit tests. See the config file in this directory for minimum required
+ features. As new tests will be added, config options list will be updated.
+
* All tc-related features being tested must be built in or available as
modules. To check what is required in current setup run:
./tdc.py -c
@@ -109,8 +113,8 @@ COMMAND LINE ARGUMENTS
Run tdc.py -h to see the full list of available arguments.
usage: tdc.py [-h] [-p PATH] [-D DIR [DIR ...]] [-f FILE [FILE ...]]
- [-c [CATG [CATG ...]]] [-e ID [ID ...]] [-l] [-s] [-i] [-v]
- [-d DEVICE] [-n NS] [-V]
+ [-c [CATG [CATG ...]]] [-e ID [ID ...]] [-l] [-s] [-i] [-v] [-N]
+ [-d DEVICE] [-P] [-n] [-V]
Linux TC unit tests
@@ -118,8 +122,10 @@ optional arguments:
-h, --help show this help message and exit
-p PATH, --path PATH The full path to the tc executable to use
-v, --verbose Show the commands that are being run
+ -N, --notap Suppress tap results for command under test
-d DEVICE, --device DEVICE
Execute the test case in flower category
+ -P, --pause Pause execution just before post-suite stage
selection:
select which test cases: files plus directories; filtered by categories
@@ -146,10 +152,10 @@ action:
-i, --id Generate ID numbers for new test cases
netns:
- options for nsPlugin(run commands in net namespace)
+ options for nsPlugin (run commands in net namespace)
- -n NS, --namespace NS
- Run commands in namespace NS
+ -n, --namespace
+ Run commands in namespace as specified in tdc_config.py
valgrind:
options for valgrindPlugin (run command under test under Valgrind)
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
new file mode 100644
index 000000000000..203302065458
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/config
@@ -0,0 +1,48 @@
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_INGRESS=m
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_EMATCH_IPSET=m
+CONFIG_NET_EMATCH_IPT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_SAMPLE=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_VLAN=m
+CONFIG_NET_ACT_BPF=m
+CONFIG_NET_ACT_CONNMARK=m
+CONFIG_NET_ACT_SKBMOD=m
+CONFIG_NET_ACT_IFE=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
+CONFIG_NET_IFE_SKBMARK=m
+CONFIG_NET_IFE_SKBPRIO=m
+CONFIG_NET_IFE_SKBTCINDEX=m
+CONFIG_NET_CLS_IND=y
+CONFIG_NET_SCH_FIFO=y
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
index 70952bd98ff9..13147a1f5731 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
@@ -17,7 +17,7 @@
"cmdUnderTest": "$TC actions add action connmark",
"expExitCode": "0",
"verifyCmd": "$TC actions list action connmark",
- "matchPattern": "action order [0-9]+: connmark zone 0 pipe",
+ "matchPattern": "action order [0-9]+: connmark zone 0 pipe",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -41,7 +41,7 @@
"cmdUnderTest": "$TC actions add action connmark pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 1",
- "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -65,7 +65,7 @@
"cmdUnderTest": "$TC actions add action connmark drop index 100",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 100",
- "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -89,7 +89,7 @@
"cmdUnderTest": "$TC actions add action connmark pipe index 455",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 455",
- "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -113,7 +113,7 @@
"cmdUnderTest": "$TC actions add action connmark reclassify index 7",
"expExitCode": "0",
"verifyCmd": "$TC actions list action connmark",
- "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -137,7 +137,7 @@
"cmdUnderTest": "$TC actions add action connmark continue index 17",
"expExitCode": "0",
"verifyCmd": "$TC actions list action connmark",
- "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -161,7 +161,7 @@
"cmdUnderTest": "$TC actions add action connmark jump 10 index 17",
"expExitCode": "0",
"verifyCmd": "$TC actions list action connmark",
- "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -185,7 +185,7 @@
"cmdUnderTest": "$TC actions add action connmark zone 100 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 1",
- "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -209,7 +209,7 @@
"cmdUnderTest": "$TC actions add action connmark zone 65536 reclassify index 21",
"expExitCode": "255",
"verifyCmd": "$TC actions get action connmark index 1",
- "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref",
"matchCount": "0",
"teardown": [
"$TC actions flush action connmark"
@@ -233,7 +233,7 @@
"cmdUnderTest": "$TC actions add action connmark zone 655 unsupp_arg pass index 2",
"expExitCode": "255",
"verifyCmd": "$TC actions get action connmark index 2",
- "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref",
"matchCount": "0",
"teardown": [
"$TC actions flush action connmark"
@@ -258,7 +258,7 @@
"cmdUnderTest": "$TC actions replace action connmark zone 555 reclassify index 555",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 555",
- "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -282,7 +282,7 @@
"cmdUnderTest": "$TC actions add action connmark zone 555 pipe index 5 cookie aabbccddeeff112233445566778800a1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 5",
- "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1",
+ "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
index 3a2f51fc7fd4..a022792d392a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
@@ -336,6 +336,30 @@
]
},
{
+ "id": "b10b",
+ "name": "Add all 7 csum actions",
+ "category": [
+ "actions",
+ "csum"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action csum",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action csum icmp ip4h sctp igmp udplite udp tcp index 7",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action csum index 7",
+ "matchPattern": "action order [0-9]*: csum \\(iph, icmp, igmp, tcp, udp, udplite, sctp\\).*index 7 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action csum"
+ ]
+ },
+ {
"id": "ce92",
"name": "Add csum udp action with cookie",
"category": [
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
index 6e4edfae1799..db49fd0f8445 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -44,7 +44,8 @@
"matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 2 ref",
"matchCount": "1",
"teardown": [
- "$TC actions flush action mirred"
+ "$TC actions flush action mirred",
+ "$TC actions flush action gact"
]
},
{
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
new file mode 100644
index 000000000000..0080dc2fd41c
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
@@ -0,0 +1,593 @@
+[
+ {
+ "id": "7565",
+ "name": "Add nat action on ingress with default control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 192.168.1.1 200.200.200.1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action nat",
+ "matchPattern": "action order [0-9]+: nat ingress 192.168.1.1/32 200.200.200.1 pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "fd79",
+ "name": "Add nat action on ingress with pipe control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 1.1.1.1 2.2.2.1 pipe index 77",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 77",
+ "matchPattern": "action order [0-9]+: nat ingress 1.1.1.1/32 2.2.2.1 pipe.*index 77 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "eab9",
+ "name": "Add nat action on ingress with continue control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 192.168.10.10 192.168.20.20 continue index 1000",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 1000",
+ "matchPattern": "action order [0-9]+: nat ingress 192.168.10.10/32 192.168.20.20 continue.*index 1000 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "c53a",
+ "name": "Add nat action on ingress with reclassify control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 192.168.10.10 192.168.20.20 reclassify index 1000",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 1000",
+ "matchPattern": "action order [0-9]+: nat ingress 192.168.10.10/32 192.168.20.20 reclassify.*index 1000 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "76c9",
+ "name": "Add nat action on ingress with jump control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 12.18.10.10 12.18.20.20 jump 10 index 22",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 22",
+ "matchPattern": "action order [0-9]+: nat ingress 12.18.10.10/32 12.18.20.20 jump 10.*index 22 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "24c6",
+ "name": "Add nat action on ingress with drop control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 drop index 722",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 722",
+ "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 drop.*index 722 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "2120",
+ "name": "Add nat action on ingress with maximum index value",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 index 4294967295",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 4294967295",
+ "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 pass.*index 4294967295 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "3e9d",
+ "name": "Add nat action on ingress with invalid index value",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 index 4294967295555",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action nat index 4294967295555",
+ "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 pass.*index 4294967295555 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "f6c9",
+ "name": "Add nat action on ingress with invalid IP address",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 1.1.1.1 1.1888.2.2 index 7",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action nat index 7",
+ "matchPattern": "action order [0-9]+: nat ingress 1.1.1.1/32 1.1888.2.2 pass.*index 7 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "be25",
+ "name": "Add nat action on ingress with invalid argument",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 1.1.1.1 1.18.2.2 another_arg index 12",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action nat index 12",
+ "matchPattern": "action order [0-9]+: nat ingress 1.1.1.1/32 1.18.2.2 pass.*another_arg.*index 12 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "a7bd",
+ "name": "Add nat action on ingress with DEFAULT IP address",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress default 10.10.10.1 index 12",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 12",
+ "matchPattern": "action order [0-9]+: nat ingress 0.0.0.0/32 10.10.10.1 pass.*index 12 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "ee1e",
+ "name": "Add nat action on ingress with ANY IP address",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress any 10.10.10.1 index 12",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 12",
+ "matchPattern": "action order [0-9]+: nat ingress 0.0.0.0/32 10.10.10.1 pass.*index 12 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "1de8",
+ "name": "Add nat action on ingress with ALL IP address",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress all 10.10.10.1 index 12",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 12",
+ "matchPattern": "action order [0-9]+: nat ingress 0.0.0.0/32 10.10.10.1 pass.*index 12 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "8dba",
+ "name": "Add nat action on egress with default control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action nat",
+ "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "19a7",
+ "name": "Add nat action on egress with pipe control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action nat",
+ "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "f1d9",
+ "name": "Add nat action on egress with continue control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 continue",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action nat",
+ "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 continue",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "6d4a",
+ "name": "Add nat action on egress with reclassify control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 reclassify",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action nat",
+ "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 reclassify",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "b313",
+ "name": "Add nat action on egress with jump control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 jump 777",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action nat",
+ "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 jump 777",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "d9fc",
+ "name": "Add nat action on egress with drop control action",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress 10.10.10.1 20.20.20.1 drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action nat",
+ "matchPattern": "action order [0-9]+: nat egress 10.10.10.1/32 20.20.20.1 drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "a895",
+ "name": "Add nat action on egress with DEFAULT IP address",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress default 20.20.20.1 pipe index 10",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 10",
+ "matchPattern": "action order [0-9]+: nat egress 0.0.0.0/32 20.20.20.1 pipe.*index 10 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "2572",
+ "name": "Add nat action on egress with ANY IP address",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress any 20.20.20.1 pipe index 10",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 10",
+ "matchPattern": "action order [0-9]+: nat egress 0.0.0.0/32 20.20.20.1 pipe.*index 10 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "37f3",
+ "name": "Add nat action on egress with ALL IP address",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress all 20.20.20.1 pipe index 10",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 10",
+ "matchPattern": "action order [0-9]+: nat egress 0.0.0.0/32 20.20.20.1 pipe.*index 10 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "6054",
+ "name": "Add nat action on egress with cookie",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat egress all 20.20.20.1 pipe index 10 cookie aa1bc2d3eeff112233445566778800a1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 10",
+ "matchPattern": "action order [0-9]+: nat egress 0.0.0.0/32 20.20.20.1 pipe.*index 10 ref.*cookie aa1bc2d3eeff112233445566778800a1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ },
+ {
+ "id": "79d6",
+ "name": "Add nat action on ingress with cookie",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action nat ingress 192.168.1.1 10.10.10.1 reclassify index 1 cookie 112233445566778899aabbccddeeff11",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action nat index 1",
+ "matchPattern": "action order [0-9]+: nat ingress 192.168.1.1/32 10.10.10.1 reclassify.*index 1 ref.*cookie 112233445566778899aabbccddeeff11",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
index 37ecc2716fee..5aaf593b914a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -17,7 +17,7 @@
"cmdUnderTest": "$TC actions add action skbedit mark 1",
"expExitCode": "0",
"verifyCmd": "$TC actions list action skbedit",
- "matchPattern": "action order [0-9]*: skbedit mark 1",
+ "matchPattern": "action order [0-9]*: skbedit mark 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -65,7 +65,7 @@
"cmdUnderTest": "$TC actions add action skbedit prio 99",
"expExitCode": "0",
"verifyCmd": "$TC actions list action skbedit",
- "matchPattern": "action order [0-9]*: skbedit priority :99",
+ "matchPattern": "action order [0-9]*: skbedit priority :99",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -113,7 +113,7 @@
"cmdUnderTest": "$TC actions add action skbedit queue_mapping 909",
"expExitCode": "0",
"verifyCmd": "$TC actions list action skbedit",
- "matchPattern": "action order [0-9]*: skbedit queue_mapping 909",
+ "matchPattern": "action order [0-9]*: skbedit queue_mapping 909",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -161,7 +161,7 @@
"cmdUnderTest": "$TC actions add action skbedit ptype host",
"expExitCode": "0",
"verifyCmd": "$TC actions list action skbedit",
- "matchPattern": "action order [0-9]*: skbedit ptype host",
+ "matchPattern": "action order [0-9]*: skbedit ptype host",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -185,7 +185,7 @@
"cmdUnderTest": "$TC actions add action skbedit ptype otherhost",
"expExitCode": "0",
"verifyCmd": "$TC actions list action skbedit",
- "matchPattern": "action order [0-9]*: skbedit ptype otherhost",
+ "matchPattern": "action order [0-9]*: skbedit ptype otherhost",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -233,7 +233,7 @@
"cmdUnderTest": "$TC actions add action skbedit ptype host pipe index 11",
"expExitCode": "0",
"verifyCmd": "$TC actions get action skbedit index 11",
- "matchPattern": "action order [0-9]*: skbedit ptype host pipe.*index 11 ref",
+ "matchPattern": "action order [0-9]*: skbedit ptype host pipe.*index 11 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -257,7 +257,7 @@
"cmdUnderTest": "$TC actions add action skbedit mark 56789 reclassify index 90",
"expExitCode": "0",
"verifyCmd": "$TC actions get action skbedit index 90",
- "matchPattern": "action order [0-9]*: skbedit mark 56789 reclassify.*index 90 ref",
+ "matchPattern": "action order [0-9]*: skbedit mark 56789 reclassify.*index 90 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -281,7 +281,7 @@
"cmdUnderTest": "$TC actions add action skbedit queue_mapping 3 pass index 271",
"expExitCode": "0",
"verifyCmd": "$TC actions get action skbedit index 271",
- "matchPattern": "action order [0-9]*: skbedit queue_mapping 3 pass.*index 271 ref",
+ "matchPattern": "action order [0-9]*: skbedit queue_mapping 3 pass.*index 271 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -305,7 +305,7 @@
"cmdUnderTest": "$TC actions add action skbedit queue_mapping 3 drop index 271",
"expExitCode": "0",
"verifyCmd": "$TC actions get action skbedit index 271",
- "matchPattern": "action order [0-9]*: skbedit queue_mapping 3 drop.*index 271 ref",
+ "matchPattern": "action order [0-9]*: skbedit queue_mapping 3 drop.*index 271 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -329,7 +329,7 @@
"cmdUnderTest": "$TC actions add action skbedit priority 8 jump 9 index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action skbedit index 2",
- "matchPattern": "action order [0-9]*: skbedit priority :8 jump 9.*index 2 ref",
+ "matchPattern": "action order [0-9]*: skbedit priority :8 jump 9.*index 2 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -353,7 +353,7 @@
"cmdUnderTest": "$TC actions add action skbedit priority 16 continue index 32",
"expExitCode": "0",
"verifyCmd": "$TC actions get action skbedit index 32",
- "matchPattern": "action order [0-9]*: skbedit priority :16 continue.*index 32 ref",
+ "matchPattern": "action order [0-9]*: skbedit priority :16 continue.*index 32 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -377,7 +377,7 @@
"cmdUnderTest": "$TC actions add action skbedit priority 16 continue index 32 cookie deadbeef",
"expExitCode": "0",
"verifyCmd": "$TC actions get action skbedit index 32",
- "matchPattern": "action order [0-9]*: skbedit priority :16 continue.*index 32 ref.*cookie deadbeef",
+ "matchPattern": "action order [0-9]*: skbedit priority :16 continue.*index 32 ref.*cookie deadbeef",
"matchCount": "1",
"teardown": [
"$TC actions flush action skbedit"
@@ -405,7 +405,7 @@
"cmdUnderTest": "$TC actions list action skbedit",
"expExitCode": "0",
"verifyCmd": "$TC actions list action skbedit",
- "matchPattern": "action order [0-9]*: skbedit",
+ "matchPattern": "action order [0-9]*: skbedit",
"matchCount": "4",
"teardown": [
"$TC actions flush action skbedit"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
new file mode 100644
index 000000000000..10b2d894e436
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -0,0 +1,917 @@
+[
+ {
+ "id": "2b11",
+ "name": "Add tunnel_key set action with mandatory parameters",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "dc6b",
+ "name": "Add tunnel_key set action with missing mandatory src_ip parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set dst_ip 20.20.20.2 id 100",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*dst_ip 20.20.20.2.*key_id 100",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "7f25",
+ "name": "Add tunnel_key set action with missing mandatory dst_ip parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 id 100",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*key_id 100",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "ba4e",
+ "name": "Add tunnel_key set action with missing mandatory id parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "a5e0",
+ "name": "Add tunnel_key set action with invalid src_ip parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 300.168.100.1 dst_ip 192.168.200.1 id 7 index 1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 300.168.100.1.*dst_ip 192.168.200.1.*key_id 7.*index 1 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "eaa8",
+ "name": "Add tunnel_key set action with invalid dst_ip parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.800.1 id 10 index 11",
+ "expExitCode": "1",
+ "verifyCmd": "$TC actions get action tunnel_key index 11",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 192.168.100.1.*dst_ip 192.168.800.1.*key_id 10.*index 11 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "3b09",
+ "name": "Add tunnel_key set action with invalid id parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 112233445566778899 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 112233445566778899.*index 1 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "9625",
+ "name": "Add tunnel_key set action with invalid dst_port parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 dst_port 998877 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 11.*dst_port 998877.*index 1 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "05af",
+ "name": "Add tunnel_key set action with optional dst_port parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.200.1 id 789 dst_port 4000 index 10",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 10",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.100.1.*dst_ip 192.168.200.1.*key_id 789.*dst_port 4000.*index 10 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "da80",
+ "name": "Add tunnel_key set action with index at 32-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 4294967295",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*id 11.*index 4294967295 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "d407",
+ "name": "Add tunnel_key set action with index exceeding 32-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295678",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 4294967295678",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*index 4294967295678 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "5cba",
+ "name": "Add tunnel_key set action with id value at 32-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 4294967295 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 4294967295.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "e84a",
+ "name": "Add tunnel_key set action with id value exceeding 32-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42949672955 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 4294967295",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42949672955.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "9c19",
+ "name": "Add tunnel_key set action with dst_port value at 16-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "3bd9",
+ "name": "Add tunnel_key set action with dst_port value exceeding 16-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535789 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535789.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "68e2",
+ "name": "Add tunnel_key unset action",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key unset index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*unset.*index 1 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "6192",
+ "name": "Add tunnel_key unset continue action",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key unset continue index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*unset continue.*index 1 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "061d",
+ "name": "Add tunnel_key set continue action with cookie",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "8acb",
+ "name": "Add tunnel_key set continue action with invalid cookie",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "a07e",
+ "name": "Add tunnel_key action with no set/unset command specified",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "b227",
+ "name": "Add tunnel_key action with csum option",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1 csum index 99",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 99",
+ "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*csum pipe.*index 99",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "58a7",
+ "name": "Add tunnel_key action with nocsum option",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7823 nocsum index 234",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 234",
+ "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7823.*nocsum pipe.*index 234",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "2575",
+ "name": "Add tunnel_key action with not-supported parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 foobar 999 index 4",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 4",
+ "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*foobar 999.*index 4",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "7a88",
+ "name": "Add tunnel_key action with cookie parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 4",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "4f20",
+ "name": "Add tunnel_key action with a single geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "e33d",
+ "name": "Add tunnel_key action with multiple geneve options parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "0778",
+ "name": "Add tunnel_key action with invalid class geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "4ae8",
+ "name": "Add tunnel_key action with invalid type geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "4039",
+ "name": "Add tunnel_key action with short data length geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "26a6",
+ "name": "Add tunnel_key action with non-multiple of 4 data length geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "f44d",
+ "name": "Add tunnel_key action with incomplete geneve options parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "7afc",
+ "name": "Replace tunnel_key set action with all parameters",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 csum id 1 index 1"
+ ],
+ "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 nocsum id 11 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*nocsum pipe.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "364d",
+ "name": "Replace tunnel_key set action with all parameters and cookie",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a"
+ ],
+ "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "937c",
+ "name": "Fetch all existing tunnel_key actions",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1",
+ "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 jump 10 index 2",
+ "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3",
+ "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4"
+ ],
+ "cmdUnderTest": "$TC actions list action tunnel_key",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*nocsum pipe.*index 1.*set.*src_ip 11.10.10.1.*dst_ip 21.20.20.2.*key_id 2.*dst_port 3129.*csum jump 10.*index 2.*set.*src_ip 12.10.10.1.*dst_ip 22.20.20.2.*key_id 3.*dst_port 3130.*csum pass.*index 3.*set.*src_ip 13.10.10.1.*dst_ip 23.20.20.2.*key_id 4.*dst_port 3131.*nocsum continue.*index 4",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "6783",
+ "name": "Flush all existing tunnel_key actions",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1",
+ "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 reclassify index 2",
+ "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3",
+ "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4"
+ ],
+ "cmdUnderTest": "$TC actions flush action tunnel_key",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+:.*",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json
new file mode 100644
index 000000000000..3b97cfd7e0f8
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json
@@ -0,0 +1,1049 @@
+[
+ {
+ "id": "901f",
+ "name": "Add fw filter with prio at 32-bit maxixum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 65535 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 65535 protocol all fw",
+ "matchPattern": "pref 65535 fw.*handle 0x1.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "51e2",
+ "name": "Add fw filter with prio exceeding 32-bit maxixum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 65536 fw action ok",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 65536 protocol all fw",
+ "matchPattern": "pref 65536 fw.*handle 0x1.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "d987",
+ "name": "Add fw filter with action ok",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "affe",
+ "name": "Add fw filter with action continue",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action continue",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action continue",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "28bc",
+ "name": "Add fw filter with action pipe",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8da2",
+ "name": "Add fw filter with action drop",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol all prio 1 fw",
+ "matchPattern": "handle 0x1.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "9436",
+ "name": "Add fw filter with action reclassify",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action reclassify",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action reclassify",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "95bb",
+ "name": "Add fw filter with action jump 10",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action jump 10",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action jump 10",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "3d74",
+ "name": "Add fw filter with action goto chain 5",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action goto chain 5",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action goto chain 5",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "eb8f",
+ "name": "Add fw filter with invalid action",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action pump",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action pump",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6a79",
+ "name": "Add fw filter with missing mandatory action",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "filter protocol all pref [0-9]+ fw.*handle 0x1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8298",
+ "name": "Add fw filter with cookie",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 2 protocol all fw",
+ "matchPattern": "pref 2 fw.*handle 0x1.*gact action pipe.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "a88c",
+ "name": "Add fw filter with invalid cookie",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action continue cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 2 protocol all fw",
+ "matchPattern": "pref 2 fw.*handle 0x1.*gact action continue.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "10f6",
+ "name": "Add fw filter with handle in hex",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xa1b2ff prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xa1b2ff prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa1b2ff.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "9d51",
+ "name": "Add fw filter with handle at 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967295 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4294967295 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xffffffff.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "d939",
+ "name": "Add fw filter with handle exceeding 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967296 prio 1 fw action ok",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4294967296 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0x.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "658c",
+ "name": "Add fw filter with mask in hex",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/0xa1b2f prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa/0xa1b2f",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "86be",
+ "name": "Add fw filter with mask at 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/4294967295 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa[^/]",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "e635",
+ "name": "Add fw filter with mask exceeding 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/4294967296 prio 1 fw action ok",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6cab",
+ "name": "Add fw filter with handle/mask in hex",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xa1b2cdff/0x1a2bffdc prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xa1b2cdff prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa1b2cdff/0x1a2bffdc",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8700",
+ "name": "Add fw filter with handle/mask at 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967295/4294967295 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xffffffff prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xffffffff[^/]",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7d62",
+ "name": "Add fw filter with handle/mask exceeding 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967296/4294967296 prio 1 fw action ok",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7b69",
+ "name": "Add fw filter with missing mandatory handle",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 1 fw action ok",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "filter protocol all.*fw.*handle.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "d68b",
+ "name": "Add fw filter with invalid parent",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent aa11b1b2: handle 1 prio 1 fw action ok",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter dev $DEV1 parent aa11b1b2: handle 1 prio 1 protocol all fw",
+ "matchPattern": "filter protocol all pref 1 fw.*handle 0x1.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "66e0",
+ "name": "Add fw filter with missing mandatory parent id",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 handle 1 prio 1 fw action ok",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "pref [0-9]+ fw.*handle 0x1.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "0ff3",
+ "name": "Add fw filter with classid",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 3 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0x1 classid :3.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "9849",
+ "name": "Add fw filter with classid at root",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid ffff:ffff action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "pref 1 fw.*handle 0x1 classid root.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "b7ff",
+ "name": "Add fw filter with classid - keeps last 8 (hex) digits",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 98765fedcb action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0x1 classid 765f:edcb.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "2b18",
+ "name": "Add fw filter with invalid classid",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 6789defg action ok",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0x1 classid 6789:defg.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "fade",
+ "name": "Add fw filter with flowid",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10 prio 1 fw flowid 1:10 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xa classid 1:10.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "33af",
+ "name": "Add fw filter with flowid then classid (same arg, takes second)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 11 prio 1 fw flowid 10 classid 4 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 11 prio 1 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xb classid :4.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8a8c",
+ "name": "Add fw filter with classid then flowid (same arg, takes second)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 11 prio 1 fw classid 4 flowid 10 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 11 prio 1 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xb classid :10.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "b50d",
+ "name": "Add fw filter with handle val/mask and flowid 10:1000",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 10/0xff fw flowid 10:1000 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 3 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0xa/0xff classid 10:1000.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7207",
+ "name": "Add fw filter with protocol ip",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 handle 3 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 3 prio 1 protocol ip fw",
+ "matchPattern": "filter parent ffff: protocol ip pref 1 fw.*handle 0x3.*gact action pass.*index [0-9]+ ref [0-9]+ bind [0-9]+",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "306d",
+ "name": "Add fw filter with protocol ipv6",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ipv6 prio 2 handle 4 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 2 protocol ipv6 fw",
+ "matchPattern": "filter parent ffff: protocol ipv6 pref 2 fw.*handle 0x4.*gact action pass.*index [0-9]+ ref [0-9]+ bind [0-9]+",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "9a78",
+ "name": "Add fw filter with protocol arp",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol arp prio 5 handle 7 fw action drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 7 prio 5 protocol arp fw",
+ "matchPattern": "filter parent ffff: protocol arp pref 5 fw.*handle 0x7.*gact action drop.*index [0-9]+ ref [0-9]+ bind [0-9]+",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "1821",
+ "name": "Add fw filter with protocol 802_3",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol 802_3 handle 1 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol 802_3 fw",
+ "matchPattern": "filter parent ffff: protocol 802_3 pref 1 fw.*handle 0x1.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "2260",
+ "name": "Add fw filter with invalid protocol",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol igmp handle 1 prio 1 fw action ok",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol igmp fw",
+ "matchPattern": "filter parent ffff: protocol igmp pref 1 fw.*handle 0x1.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "09d7",
+ "name": "Add fw filters protocol 802_3 and ip with conflicting priorities",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: protocol 802_3 prio 3 handle 7 fw action ok"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 3 handle 8 fw action ok",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 8 prio 3 protocol ip fw",
+ "matchPattern": "filter parent ffff: protocol ip pref 3 fw.*handle 0x8",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6973",
+ "name": "Add fw filters with same index, same action",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: prio 6 handle 2 fw action continue index 5"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 8 handle 4 fw action continue index 5",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 8 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 8 fw.*handle 0x4.*gact action continue.*index 5 ref 2 bind 2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "fc06",
+ "name": "Add fw filters with action police",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 4 fw action police rate 1kbit burst 10k index 5",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 3 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0x4.*police 0x5 rate 1Kbit burst 10Kb mtu 2Kb action reclassify overhead 0b.*ref 1 bind 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "aac7",
+ "name": "Add fw filters with action police linklayer atm",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 4 fw action police rate 2mbit burst 200k linklayer atm index 8",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 3 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0x4.*police 0x8 rate 2Mbit burst 200Kb mtu 2Kb action reclassify overhead 0b linklayer atm.*ref 1 bind 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "5339",
+ "name": "Del entire fw filter",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff:",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "protocol all pref.*handle.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "0e99",
+ "name": "Del single fw filter x1",
+ "__comment__": "First of two tests to check that one filter is there and the other isn't",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 prio 9 fw action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "protocol all pref 7.*handle 0x5.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "f54c",
+ "name": "Del single fw filter x2",
+ "__comment__": "Second of two tests to check that one filter is there and the other isn't",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 prio 9 fw action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "protocol all pref 9.*handle 0x3.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "ba94",
+ "name": "Del fw filter by prio",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 1 prio 4 fw action ok",
+ "$TC filter add dev $DEV1 parent ffff: handle 2 prio 4 fw action ok"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: prio 4",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "pref 4 fw.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4acb",
+ "name": "Del fw filter by chain",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 chain 13 fw action pipe",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 5 chain 13 fw action pipe"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: chain 13",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "fw chain 13 handle.*gact action pipe",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "3424",
+ "name": "Del fw filter by action (invalid)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 2 prio 4 fw action drop"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: fw action drop",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 2 prio 4 protocol all fw",
+ "matchPattern": "handle 0x2.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "da89",
+ "name": "Del fw filter by handle (invalid)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 4 fw action continue"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 fw",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 3 prio 4 protocol all fw",
+ "matchPattern": "handle 0x3.*gact action continue",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4d95",
+ "name": "Del fw filter by protocol (invalid)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 protocol arp fw action pipe"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: protocol arp fw",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 2 protocol arp fw",
+ "matchPattern": "filter parent ffff: protocol arp.*handle 0x4.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4736",
+ "name": "Del fw filter by flowid (invalid)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 fw action pipe flowid 45"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: fw flowid 45",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "handle 0x4.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "3dcb",
+ "name": "Replace fw filter action",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok"
+ ],
+ "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "pref 2 fw.*handle 0x1.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "eb4d",
+ "name": "Replace fw filter classid",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok"
+ ],
+ "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe classid 2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "pref 2 fw.*handle 0x1 classid :2.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "67ec",
+ "name": "Replace fw filter index",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok index 3"
+ ],
+ "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action ok index 16",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "pref 2 fw.*handle 0x1.*gact action pass.*index 16",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
index 5fa02d86b35f..99a5ffca1088 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
@@ -12,8 +12,8 @@
"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 u32 match ip src 127.0.0.1/32 flowid 1:1 action ok",
"expExitCode": "0",
"verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
- "matchPattern": "match 7f000002/ffffffff at 12",
- "matchCount": "0",
+ "matchPattern": "match 7f000001/ffffffff at 12",
+ "matchCount": "1",
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
]
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
index ca6cd146aafe..dcf73c5dab6e 100644
--- a/tools/testing/selftests/timers/raw_skew.c
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -134,6 +134,11 @@ int main(int argv, char **argc)
printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000)));
if (llabs(eppm - ppm) > 1000) {
+ if (tx1.offset || tx2.offset ||
+ tx1.freq != tx2.freq || tx1.tick != tx2.tick) {
+ printf(" [SKIP]\n");
+ return ksft_exit_skip("The clock was adjusted externally. Shutdown NTPd or other time sync daemons\n");
+ }
printf(" [FAILED]\n");
return ksft_exit_fail();
}
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
index efc84cbe8277..5abbe9b3c652 100644
--- a/virt/kvm/arm/aarch32.c
+++ b/virt/kvm/arm/aarch32.c
@@ -108,9 +108,9 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{
unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
+ bool is_arm = !(cpsr & PSR_AA32_T_BIT);
- if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK))
+ if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
return;
cond = (cpsr & 0xe000) >> 13;
@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
else
itbits = (itbits << 1) & 0x1f;
- cpsr &= ~COMPAT_PSR_IT_MASK;
+ cpsr &= ~PSR_AA32_IT_MASK;
cpsr |= cond << 13;
cpsr |= (itbits & 0x1c) << (10 - 2);
cpsr |= (itbits & 0x3) << 25;
@@ -138,7 +138,7 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
bool is_thumb;
- is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
+ is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2;
else
@@ -164,16 +164,16 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
- bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
+ bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
- cpsr = mode | COMPAT_PSR_I_BIT;
+ cpsr = mode | PSR_AA32_I_BIT;
if (sctlr & (1 << 30))
- cpsr |= COMPAT_PSR_T_BIT;
+ cpsr |= PSR_AA32_T_BIT;
if (sctlr & (1 << 25))
- cpsr |= COMPAT_PSR_E_BIT;
+ cpsr |= PSR_AA32_E_BIT;
*vcpu_cpsr(vcpu) = cpsr;
@@ -192,7 +192,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{
- prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
+ prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
}
/*
@@ -216,7 +216,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
fsr = &vcpu_cp15(vcpu, c5_DFSR);
}
- prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
+ prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
*far = addr;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 04e554cae3a2..108250e4d376 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -604,7 +604,7 @@ void kvm_arm_resume_guest(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.pause = false;
- swake_up(kvm_arch_vcpu_wq(vcpu));
+ swake_up_one(kvm_arch_vcpu_wq(vcpu));
}
}
@@ -612,7 +612,7 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
- swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
(!vcpu->arch.pause)));
if (vcpu->arch.power_off || vcpu->arch.pause) {
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index c95ab4c5a475..9b73d3ad918a 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -155,7 +155,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
- swake_up(wq);
+ swake_up_one(wq);
return PSCI_RET_SUCCESS;
}
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 57bcb27dcf30..23c2519c5b32 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -107,7 +107,7 @@ static void async_pf_execute(struct work_struct *work)
trace_kvm_async_pf_completed(addr, gva);
if (swq_has_sleeper(&vcpu->wq))
- swake_up(&vcpu->wq);
+ swake_up_one(&vcpu->wq);
mmput(mm);
kvm_put_kvm(vcpu->kvm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8b47507faab5..3d233ebfbee9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2172,7 +2172,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_blocking(vcpu);
for (;;) {
- prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
if (kvm_vcpu_check_block(vcpu) < 0)
break;
@@ -2214,7 +2214,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
wqp = kvm_arch_vcpu_wq(vcpu);
if (swq_has_sleeper(wqp)) {
- swake_up(wqp);
+ swake_up_one(wqp);
++vcpu->stat.halt_wakeup;
return true;
}